From 325d8369364e109ed26972f76382ecd46e87b425 Mon Sep 17 00:00:00 2001 From: emanuel-schmid Date: Thu, 12 Sep 2024 12:02:57 +0200 Subject: [PATCH 01/12] eccodes changed variable names: i.p., gust -> i10fg --- climada/hazard/storm_europe.py | 2 +- requirements/env_climada.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/climada/hazard/storm_europe.py b/climada/hazard/storm_europe.py index 359c3d8a3..52841bd8e 100644 --- a/climada/hazard/storm_europe.py +++ b/climada/hazard/storm_europe.py @@ -490,7 +490,7 @@ def from_icon_grib(cls, run_datetime, event_date=None, model_name='icon-eu-eps', # Create Hazard haz = cls( - intensity=sparse.csr_matrix(stacked['gust'].T), + intensity=sparse.csr_matrix((stacked.get('gust') or stacked.get('i10fg')).T), centroids=cls._centroids_from_nc(nc_centroids_file), event_id=event_id, date=date, diff --git a/requirements/env_climada.yml b/requirements/env_climada.yml index 9ebc16c2b..531fde511 100644 --- a/requirements/env_climada.yml +++ b/requirements/env_climada.yml @@ -8,7 +8,7 @@ dependencies: - cfgrib>=0.9.9,<0.9.10 # 0.9.10 cannot read the icon_grib files from https://opendata.dwd.de - contextily>=1.6 - dask>=2024.5 - - eccodes>=2.27,<2.28 # 2.28 changed some labels, in particular: gust -> i20fg + - eccodes>=2.27 # 2.28 changed some labels, in particular: gust -> i10fg (i20fg?) - gdal>=3.6 - geopandas>=0.14 - h5py>=3.8 From 6afb267ad098213b7b4f9f1e3f2b2d5d1464cded Mon Sep 17 00:00:00 2001 From: emanuel-schmid Date: Thu, 12 Sep 2024 14:05:24 +0200 Subject: [PATCH 02/12] requirements: apparently the need for fiona has ended with eccodes>=2.28 --- requirements/env_climada.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/requirements/env_climada.yml b/requirements/env_climada.yml index 531fde511..1271c7c31 100644 --- a/requirements/env_climada.yml +++ b/requirements/env_climada.yml @@ -8,7 +8,8 @@ dependencies: - cfgrib>=0.9.9,<0.9.10 # 0.9.10 cannot read the icon_grib files from https://opendata.dwd.de - contextily>=1.6 - dask>=2024.5 - - eccodes>=2.27 # 2.28 changed some labels, in particular: gust -> i10fg (i20fg?) + - eccodes>=2.27 # 2.28 changed some labels, in particular: gust -> i10fg (i20fg?) + - fiona>=1.9 - gdal>=3.6 - geopandas>=0.14 - h5py>=3.8 From 33e2706d024147c07a2e16ae290f7a17bffd2d11 Mon Sep 17 00:00:00 2001 From: emanuel-schmid Date: Thu, 12 Sep 2024 16:52:05 +0200 Subject: [PATCH 03/12] pin geopandas down to 0.x for the time being --- requirements/env_climada.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/env_climada.yml b/requirements/env_climada.yml index 1271c7c31..86ecff83e 100644 --- a/requirements/env_climada.yml +++ b/requirements/env_climada.yml @@ -11,7 +11,7 @@ dependencies: - eccodes>=2.27 # 2.28 changed some labels, in particular: gust -> i10fg (i20fg?) - fiona>=1.9 - gdal>=3.6 - - geopandas>=0.14 + - geopandas>=0.14,<1.0 - h5py>=3.8 - haversine>=2.8 - matplotlib-base>=3.9 From d718501a119818794bcee03c6e8faf4ce0f2b3aa Mon Sep 17 00:00:00 2001 From: emanuel-schmid Date: Fri, 13 Sep 2024 15:16:42 +0200 Subject: [PATCH 04/12] dependencies: as long as geopandas is <1, fiona is automatically installed --- requirements/env_climada.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/requirements/env_climada.yml b/requirements/env_climada.yml index 86ecff83e..52722f3d4 100644 --- a/requirements/env_climada.yml +++ b/requirements/env_climada.yml @@ -9,7 +9,6 @@ dependencies: - contextily>=1.6 - dask>=2024.5 - eccodes>=2.27 # 2.28 changed some labels, in particular: gust -> i10fg (i20fg?) - - fiona>=1.9 - gdal>=3.6 - geopandas>=0.14,<1.0 - h5py>=3.8 From 822c7f1d2301ac7bface42ec0f742d1ed3d417e4 Mon Sep 17 00:00:00 2001 From: emanuel-schmid Date: Sun, 20 Oct 2024 15:50:26 +0200 Subject: [PATCH 05/12] format --- climada/__init__.py | 65 +- climada/_version.py | 2 +- climada/engine/__init__.py | 5 +- climada/engine/calibration_opt.py | 401 ++-- climada/engine/cost_benefit.py | 1037 +++++++--- climada/engine/forecast.py | 61 +- climada/engine/impact.py | 891 +++++--- climada/engine/impact_calc.py | 193 +- climada/engine/impact_data.py | 1030 ++++++---- climada/engine/test/test_cost_benefit.py | 1262 ++++++++---- climada/engine/test/test_forecast.py | 311 +-- climada/engine/test/test_impact.py | 317 +-- climada/engine/test/test_impact_calc.py | 444 ++-- climada/engine/test/test_impact_data.py | 258 ++- climada/engine/unsequa/__init__.py | 6 +- climada/engine/unsequa/calc_base.py | 349 ++-- climada/engine/unsequa/calc_cost_benefit.py | 265 +-- climada/engine/unsequa/calc_delta_climate.py | 24 +- climada/engine/unsequa/calc_impact.py | 143 +- climada/engine/unsequa/input_var.py | 391 ++-- climada/engine/unsequa/test/test_unsequa.py | 755 +++---- climada/engine/unsequa/unc_output.py | 639 +++--- climada/entity/__init__.py | 5 +- climada/entity/disc_rates/__init__.py | 1 + climada/entity/disc_rates/base.py | 91 +- climada/entity/disc_rates/test/test_base.py | 89 +- climada/entity/entity_def.py | 25 +- climada/entity/exposures/__init__.py | 2 +- climada/entity/exposures/base.py | 738 ++++--- climada/entity/exposures/litpop/__init__.py | 4 +- .../entity/exposures/litpop/gpw_population.py | 71 +- climada/entity/exposures/litpop/litpop.py | 822 +++++--- climada/entity/exposures/litpop/nightlight.py | 304 +-- climada/entity/exposures/test/test_base.py | 507 +++-- climada/entity/exposures/test/test_litpop.py | 323 +-- climada/entity/exposures/test/test_mat.py | 95 +- .../entity/exposures/test/test_nightlight.py | 85 +- climada/entity/impact_funcs/__init__.py | 1 + climada/entity/impact_funcs/base.py | 95 +- .../entity/impact_funcs/impact_func_set.py | 207 +- climada/entity/impact_funcs/storm_europe.py | 77 +- climada/entity/impact_funcs/test/test_base.py | 65 +- .../impact_funcs/test/test_imp_fun_set.py | 282 +-- climada/entity/impact_funcs/test/test_tc.py | 95 +- climada/entity/impact_funcs/test/test_ws.py | 88 +- climada/entity/impact_funcs/trop_cyclone.py | 732 +++++-- climada/entity/measures/__init__.py | 1 + climada/entity/measures/base.py | 151 +- climada/entity/measures/measure_set.py | 325 +-- climada/entity/measures/test/test_base.py | 756 +++++-- climada/entity/measures/test/test_meas_set.py | 379 ++-- climada/entity/tag/__init__.py | 10 +- climada/entity/tag/tag.py | 45 +- climada/entity/tag/test/test_tag.py | 41 +- climada/entity/test/test_entity.py | 30 +- climada/hazard/__init__.py | 7 +- climada/hazard/base.py | 437 ++-- climada/hazard/centroids/__init__.py | 1 + climada/hazard/centroids/centr.py | 356 ++-- climada/hazard/centroids/test/test_centr.py | 564 ++--- climada/hazard/io.py | 483 +++-- climada/hazard/isimip_data.py | 11 +- climada/hazard/plot.py | 132 +- climada/hazard/storm_europe.py | 631 +++--- climada/hazard/tc_clim_change.py | 1172 +++++++++-- climada/hazard/tc_tracks.py | 1715 ++++++++++------ climada/hazard/tc_tracks_synth.py | 828 +++++--- climada/hazard/test/__init__.py | 18 +- climada/hazard/test/test_base.py | 958 +++++---- climada/hazard/test/test_io.py | 35 +- climada/hazard/test/test_storm_europe.py | 97 +- climada/hazard/test/test_tc_cc.py | 126 +- climada/hazard/test/test_tc_tracks.py | 1228 ++++++----- climada/hazard/test/test_tc_tracks_synth.py | 829 +++++--- climada/hazard/test/test_trop_cyclone.py | 386 +++- .../test/test_trop_cyclone_windfields.py | 357 ++-- climada/hazard/trop_cyclone/__init__.py | 24 +- climada/hazard/trop_cyclone/trop_cyclone.py | 266 +-- .../trop_cyclone/trop_cyclone_windfields.py | 229 ++- climada/test/__init__.py | 30 +- climada/test/test_api_client.py | 301 ++- climada/test/test_calibration.py | 63 +- climada/test/test_engine.py | 24 +- climada/test/test_hazard.py | 150 +- climada/test/test_litpop_integr.py | 395 ++-- climada/test/test_nightlight.py | 333 +-- climada/test/test_plot.py | 173 +- climada/test/test_util.py | 5 +- climada/test/test_util_calibrate.py | 18 +- climada/util/__init__.py | 5 +- climada/util/api_client.py | 13 +- climada/util/calibrate/__init__.py | 2 +- climada/util/calibrate/base.py | 17 +- climada/util/calibrate/bayesian_optimizer.py | 21 +- climada/util/calibrate/scipy_optimizer.py | 6 +- climada/util/calibrate/test/test_base.py | 10 +- .../calibrate/test/test_bayesian_optimizer.py | 12 +- .../calibrate/test/test_scipy_optimizer.py | 6 +- climada/util/checker.py | 40 +- climada/util/config.py | 101 +- climada/util/constants.py | 1346 +++++++----- climada/util/coordinates.py | 899 +++++--- climada/util/dates_times.py | 12 +- climada/util/dwd_icon_loader.py | 259 +-- climada/util/earth_engine.py | 38 +- climada/util/files_handler.py | 49 +- climada/util/finance.py | 227 ++- climada/util/hdf5_handler.py | 158 +- climada/util/interpolation.py | 103 +- climada/util/lines_polys_handler.py | 237 ++- climada/util/plot.py | 535 +++-- climada/util/save.py | 31 +- climada/util/scalebar_plot.py | 56 +- climada/util/select.py | 4 +- climada/util/test/test__init__.py | 29 +- climada/util/test/test_checker.py | 70 +- climada/util/test/test_config.py | 33 +- climada/util/test/test_coordinates.py | 1807 +++++++++++------ climada/util/test/test_dates_times.py | 47 +- climada/util/test/test_dwd_icon.py | 91 +- climada/util/test/test_files.py | 85 +- climada/util/test/test_finance.py | 127 +- climada/util/test/test_hdf5.py | 140 +- climada/util/test/test_interpolation.py | 125 +- climada/util/test/test_lines_polys_handler.py | 1126 +++++++--- climada/util/test/test_plot.py | 202 +- climada/util/test/test_save.py | 22 +- climada/util/test/test_select.py | 15 +- .../util/test/test_value_representation.py | 116 +- climada/util/test/test_yearsets.py | 62 +- climada/util/value_representation.py | 4 +- climada/util/yearsets.py | 105 +- 132 files changed, 22881 insertions(+), 13285 deletions(-) diff --git a/climada/__init__.py b/climada/__init__.py index 8fc4b8764..4a10de199 100755 --- a/climada/__init__.py +++ b/climada/__init__.py @@ -18,17 +18,17 @@ climada init """ -from shutil import copyfile + from pathlib import Path +from shutil import copyfile from .util.config import CONFIG from .util.constants import * - -GSDP_DIR = SYSTEM_DIR.joinpath('GSDP') +GSDP_DIR = SYSTEM_DIR.joinpath("GSDP") REPO_DATA = { - 'climada/data/system': [ + "climada/data/system": [ ISIMIP_GPWV3_NATID_150AS, GLB_CENTROIDS_MAT, ENT_TEMPLATE_XLS, @@ -36,20 +36,34 @@ RIVER_FLOOD_REGIONS_CSV, NATEARTH_CENTROIDS[150], NATEARTH_CENTROIDS[360], - SYSTEM_DIR.joinpath('WEALTH2GDP_factors_CRI_2016.csv'), - SYSTEM_DIR.joinpath('GDP_TWN_IMF_WEO_data.csv'), - SYSTEM_DIR.joinpath('FAOSTAT_data_country_codes.csv'), - SYSTEM_DIR.joinpath('rcp_db.xls'), - SYSTEM_DIR.joinpath('tc_impf_cal_v01_TDR1.0.csv'), - SYSTEM_DIR.joinpath('tc_impf_cal_v01_EDR.csv'), - SYSTEM_DIR.joinpath('tc_impf_cal_v01_RMSF.csv'), + SYSTEM_DIR.joinpath("WEALTH2GDP_factors_CRI_2016.csv"), + SYSTEM_DIR.joinpath("GDP_TWN_IMF_WEO_data.csv"), + SYSTEM_DIR.joinpath("FAOSTAT_data_country_codes.csv"), + SYSTEM_DIR.joinpath("rcp_db.xls"), + SYSTEM_DIR.joinpath("tc_impf_cal_v01_TDR1.0.csv"), + SYSTEM_DIR.joinpath("tc_impf_cal_v01_EDR.csv"), + SYSTEM_DIR.joinpath("tc_impf_cal_v01_RMSF.csv"), ], - 'climada/data/system/GSDP': [ - GSDP_DIR.joinpath(f'{cc}_GSDP.xls') - for cc in ['AUS', 'BRA', 'CAN', 'CHE', 'CHN', 'DEU', 'FRA', 'IDN', 'IND', 'JPN', 'MEX', - 'TUR', 'USA', 'ZAF'] + "climada/data/system/GSDP": [ + GSDP_DIR.joinpath(f"{cc}_GSDP.xls") + for cc in [ + "AUS", + "BRA", + "CAN", + "CHE", + "CHN", + "DEU", + "FRA", + "IDN", + "IND", + "JPN", + "MEX", + "TUR", + "USA", + "ZAF", + ] ], - 'climada/data/demo': [ + "climada/data/demo": [ ENT_DEMO_TODAY, ENT_DEMO_FUTURE, EXP_DEMO_H5, @@ -57,9 +71,10 @@ HAZ_DEMO_MAT, HAZ_DEMO_H5, TC_ANDREW_FL, - DEMO_DIR.joinpath('demo_emdat_impact_data_2020.csv'), - DEMO_DIR.joinpath('nl_rails.gpkg'), - ] + WS_DEMO_NC + DEMO_DIR.joinpath("demo_emdat_impact_data_2020.csv"), + DEMO_DIR.joinpath("nl_rails.gpkg"), + ] + + WS_DEMO_NC, } @@ -68,10 +83,13 @@ def test_installation(): If the invoked tests pass and an OK is printed out, the installation was successfull. """ from unittest import TestLoader, TextTestRunner - suite = TestLoader().discover(start_dir='climada.engine.test', - pattern='test_cost_benefit.py') - suite.addTest(TestLoader().discover(start_dir='climada.engine.test', - pattern='test_impact.py')) + + suite = TestLoader().discover( + start_dir="climada.engine.test", pattern="test_cost_benefit.py" + ) + suite.addTest( + TestLoader().discover(start_dir="climada.engine.test", pattern="test_impact.py") + ) TextTestRunner(verbosity=2).run(suite) @@ -98,4 +116,5 @@ def setup_climada_data(reload=False): src = Path(__file__).parent.parent.joinpath(src_dir, path.name) copyfile(src, path) + setup_climada_data() diff --git a/climada/_version.py b/climada/_version.py index 80952dacb..824c821f5 100644 --- a/climada/_version.py +++ b/climada/_version.py @@ -1 +1 @@ -__version__ = '5.0.1-dev' +__version__ = "5.0.1-dev" diff --git a/climada/engine/__init__.py b/climada/engine/__init__.py index 5ed316ca2..ef8292f75 100755 --- a/climada/engine/__init__.py +++ b/climada/engine/__init__.py @@ -18,6 +18,7 @@ init engine """ -from .impact import * + from .cost_benefit import * -from .impact_calc import * \ No newline at end of file +from .impact import * +from .impact_calc import * diff --git a/climada/engine/calibration_opt.py b/climada/engine/calibration_opt.py index ab9d6a688..5f174b5f7 100644 --- a/climada/engine/calibration_opt.py +++ b/climada/engine/calibration_opt.py @@ -20,55 +20,61 @@ Optimization and manual calibration """ -import datetime as dt import copy +import datetime as dt import itertools import logging + import numpy as np import pandas as pd from scipy import interpolate from scipy.optimize import minimize from climada.engine import ImpactCalc +from climada.engine.impact_data import emdat_impact_yearlysum # , emdat_impact_event from climada.entity import ImpactFuncSet, ImpfTropCyclone, impact_funcs -from climada.engine.impact_data import emdat_impact_yearlysum #, emdat_impact_event LOGGER = logging.getLogger(__name__) +def calib_instance( + hazard, + exposure, + impact_func, + df_out=pd.DataFrame(), + yearly_impact=False, + return_cost="False", +): + """calculate one impact instance for the calibration algorithm and write + to given DataFrame -def calib_instance(hazard, exposure, impact_func, df_out=pd.DataFrame(), - yearly_impact=False, return_cost='False'): + Parameters + ---------- + hazard : Hazard + exposure : Exposure + impact_func : ImpactFunc + df_out : Dataframe, optional + Output DataFrame with headers of columns defined and optionally with + first row (index=0) defined with values. If columns "impact", + "event_id", or "year" are not included, they are created here. + Data like reported impacts or impact function parameters can be + given here; values are preserved. + yearly_impact : boolean, optional + if set True, impact is returned per year, not per event + return_cost : str, optional + if not 'False' but any of 'R2', 'logR2', + cost is returned instead of df_out - """calculate one impact instance for the calibration algorithm and write - to given DataFrame - - Parameters - ---------- - hazard : Hazard - exposure : Exposure - impact_func : ImpactFunc - df_out : Dataframe, optional - Output DataFrame with headers of columns defined and optionally with - first row (index=0) defined with values. If columns "impact", - "event_id", or "year" are not included, they are created here. - Data like reported impacts or impact function parameters can be - given here; values are preserved. - yearly_impact : boolean, optional - if set True, impact is returned per year, not per event - return_cost : str, optional - if not 'False' but any of 'R2', 'logR2', - cost is returned instead of df_out - - Returns - ------- - df_out: DataFrame - DataFrame with modelled impact written to rows for each year - or event. + Returns + ------- + df_out: DataFrame + DataFrame with modelled impact written to rows for each year + or event. """ ifs = ImpactFuncSet([impact_func]) - impacts = ImpactCalc(exposures=exposure, impfset=ifs, hazard=hazard)\ - .impact(assign_centroids=False) + impacts = ImpactCalc(exposures=exposure, impfset=ifs, hazard=hazard).impact( + assign_centroids=False + ) if yearly_impact: # impact per year iys = impacts.impact_per_year(all_years=True) # Loop over whole year range: @@ -77,43 +83,49 @@ def calib_instance(hazard, exposure, impact_func, df_out=pd.DataFrame(), if cnt_ > 0: df_out.loc[cnt_] = df_out.loc[0] # copy info from first row if year in iys: - df_out.loc[cnt_, 'impact_CLIMADA'] = iys[year] + df_out.loc[cnt_, "impact_CLIMADA"] = iys[year] else: - df_out.loc[cnt_, 'impact_CLIMADA'] = 0.0 - df_out.loc[cnt_, 'year'] = year + df_out.loc[cnt_, "impact_CLIMADA"] = 0.0 + df_out.loc[cnt_, "year"] = year else: - years_in_common = df_out.loc[df_out['year'].isin(np.sort(list((iys.keys())))), 'year'] + years_in_common = df_out.loc[ + df_out["year"].isin(np.sort(list((iys.keys())))), "year" + ] for cnt_, year in years_in_common.iteritems(): - df_out.loc[df_out['year'] == year, 'impact_CLIMADA'] = iys[year] - + df_out.loc[df_out["year"] == year, "impact_CLIMADA"] = iys[year] else: # impact per event if df_out.empty | df_out.index.shape[0] == 1: for cnt_, impact in enumerate(impacts.at_event): if cnt_ > 0: df_out.loc[cnt_] = df_out.loc[0] # copy info from first row - df_out.loc[cnt_, 'impact_CLIMADA'] = impact - df_out.loc[cnt_, 'event_id'] = int(impacts.event_id[cnt_]) - df_out.loc[cnt_, 'event_name'] = impacts.event_name[cnt_] - df_out.loc[cnt_, 'year'] = \ - dt.datetime.fromordinal(impacts.date[cnt_]).year - df_out.loc[cnt_, 'date'] = impacts.date[cnt_] + df_out.loc[cnt_, "impact_CLIMADA"] = impact + df_out.loc[cnt_, "event_id"] = int(impacts.event_id[cnt_]) + df_out.loc[cnt_, "event_name"] = impacts.event_name[cnt_] + df_out.loc[cnt_, "year"] = dt.datetime.fromordinal( + impacts.date[cnt_] + ).year + df_out.loc[cnt_, "date"] = impacts.date[cnt_] elif df_out.index.shape[0] == impacts.at_event.shape[0]: for cnt_, (impact, ind) in enumerate(zip(impacts.at_event, df_out.index)): - df_out.loc[ind, 'impact_CLIMADA'] = impact - df_out.loc[ind, 'event_id'] = int(impacts.event_id[cnt_]) - df_out.loc[ind, 'event_name'] = impacts.event_name[cnt_] - df_out.loc[ind, 'year'] = \ - dt.datetime.fromordinal(impacts.date[cnt_]).year - df_out.loc[ind, 'date'] = impacts.date[cnt_] + df_out.loc[ind, "impact_CLIMADA"] = impact + df_out.loc[ind, "event_id"] = int(impacts.event_id[cnt_]) + df_out.loc[ind, "event_name"] = impacts.event_name[cnt_] + df_out.loc[ind, "year"] = dt.datetime.fromordinal( + impacts.date[cnt_] + ).year + df_out.loc[ind, "date"] = impacts.date[cnt_] else: - raise ValueError('adding simulated impacts to reported impacts not' - ' yet implemented. use yearly_impact=True or run' - ' without init_impact_data.') - if return_cost != 'False': + raise ValueError( + "adding simulated impacts to reported impacts not" + " yet implemented. use yearly_impact=True or run" + " without init_impact_data." + ) + if return_cost != "False": df_out = calib_cost_calc(df_out, return_cost) return df_out + def init_impf(impf_name_or_instance, param_dict, df_out=pd.DataFrame(index=[0])): """create an ImpactFunc based on the parameters in param_dict using the method specified in impf_parameterisation_name and document it in df_out. @@ -139,20 +151,21 @@ def init_impf(impf_name_or_instance, param_dict, df_out=pd.DataFrame(index=[0])) """ impact_func_final = None if isinstance(impf_name_or_instance, str): - if impf_name_or_instance == 'emanuel': + if impf_name_or_instance == "emanuel": impact_func_final = ImpfTropCyclone.from_emanuel_usa(**param_dict) - impact_func_final.haz_type = 'TC' + impact_func_final.haz_type = "TC" impact_func_final.id = 1 - df_out['impact_function'] = impf_name_or_instance + df_out["impact_function"] = impf_name_or_instance elif isinstance(impf_name_or_instance, impact_funcs.ImpactFunc): impact_func_final = change_impf(impf_name_or_instance, param_dict) - df_out['impact_function'] = ('given_' + - impact_func_final.haz_type + - str(impact_func_final.id)) + df_out["impact_function"] = ( + "given_" + impact_func_final.haz_type + str(impact_func_final.id) + ) for key, val in param_dict.items(): df_out[key] = val return impact_func_final, df_out + def change_impf(impf_instance, param_dict): """apply a shifting or a scaling defined in param_dict to the impact function in impf_istance and return it as a new ImpactFunc object. @@ -173,60 +186,71 @@ def change_impf(impf_instance, param_dict): """ ImpactFunc_new = copy.deepcopy(impf_instance) # create higher resolution impact functions (intensity, mdd ,paa) - paa_func = interpolate.interp1d(ImpactFunc_new.intensity, - ImpactFunc_new.paa, - fill_value='extrapolate') - mdd_func = interpolate.interp1d(ImpactFunc_new.intensity, - ImpactFunc_new.mdd, - fill_value='extrapolate') + paa_func = interpolate.interp1d( + ImpactFunc_new.intensity, ImpactFunc_new.paa, fill_value="extrapolate" + ) + mdd_func = interpolate.interp1d( + ImpactFunc_new.intensity, ImpactFunc_new.mdd, fill_value="extrapolate" + ) temp_dict = dict() - temp_dict['paa_intensity_ext'] = np.linspace(ImpactFunc_new.intensity.min(), - ImpactFunc_new.intensity.max(), - (ImpactFunc_new.intensity.shape[0] + 1) * 10 + 1) - temp_dict['mdd_intensity_ext'] = np.linspace(ImpactFunc_new.intensity.min(), - ImpactFunc_new.intensity.max(), - (ImpactFunc_new.intensity.shape[0] + 1) * 10 + 1) - temp_dict['paa_ext'] = paa_func(temp_dict['paa_intensity_ext']) - temp_dict['mdd_ext'] = mdd_func(temp_dict['mdd_intensity_ext']) + temp_dict["paa_intensity_ext"] = np.linspace( + ImpactFunc_new.intensity.min(), + ImpactFunc_new.intensity.max(), + (ImpactFunc_new.intensity.shape[0] + 1) * 10 + 1, + ) + temp_dict["mdd_intensity_ext"] = np.linspace( + ImpactFunc_new.intensity.min(), + ImpactFunc_new.intensity.max(), + (ImpactFunc_new.intensity.shape[0] + 1) * 10 + 1, + ) + temp_dict["paa_ext"] = paa_func(temp_dict["paa_intensity_ext"]) + temp_dict["mdd_ext"] = mdd_func(temp_dict["mdd_intensity_ext"]) # apply changes given in param_dict for key, val in param_dict.items(): - field_key, action = key.split('_') - if action == 'shift': - shift_absolut = ( - ImpactFunc_new.intensity[np.nonzero(getattr(ImpactFunc_new, field_key))[0][0]] - * (val - 1)) - temp_dict[field_key + '_intensity_ext'] = \ - temp_dict[field_key + '_intensity_ext'] + shift_absolut - elif action == 'scale': - temp_dict[field_key + '_ext'] = \ - np.clip(temp_dict[field_key + '_ext'] * val, - a_min=0, - a_max=1) + field_key, action = key.split("_") + if action == "shift": + shift_absolut = ImpactFunc_new.intensity[ + np.nonzero(getattr(ImpactFunc_new, field_key))[0][0] + ] * (val - 1) + temp_dict[field_key + "_intensity_ext"] = ( + temp_dict[field_key + "_intensity_ext"] + shift_absolut + ) + elif action == "scale": + temp_dict[field_key + "_ext"] = np.clip( + temp_dict[field_key + "_ext"] * val, a_min=0, a_max=1 + ) else: - raise AttributeError('keys in param_dict not recognized. Use only:' - 'paa_shift, paa_scale, mdd_shift, mdd_scale') + raise AttributeError( + "keys in param_dict not recognized. Use only:" + "paa_shift, paa_scale, mdd_shift, mdd_scale" + ) # map changed, high resolution impact functions back to initial resolution - ImpactFunc_new.intensity = np.linspace(ImpactFunc_new.intensity.min(), - ImpactFunc_new.intensity.max(), - (ImpactFunc_new.intensity.shape[0] + 1) * 10 + 1) - paa_func_new = interpolate.interp1d(temp_dict['paa_intensity_ext'], - temp_dict['paa_ext'], - fill_value='extrapolate') - mdd_func_new = interpolate.interp1d(temp_dict['mdd_intensity_ext'], - temp_dict['mdd_ext'], - fill_value='extrapolate') + ImpactFunc_new.intensity = np.linspace( + ImpactFunc_new.intensity.min(), + ImpactFunc_new.intensity.max(), + (ImpactFunc_new.intensity.shape[0] + 1) * 10 + 1, + ) + paa_func_new = interpolate.interp1d( + temp_dict["paa_intensity_ext"], temp_dict["paa_ext"], fill_value="extrapolate" + ) + mdd_func_new = interpolate.interp1d( + temp_dict["mdd_intensity_ext"], temp_dict["mdd_ext"], fill_value="extrapolate" + ) ImpactFunc_new.paa = paa_func_new(ImpactFunc_new.intensity) ImpactFunc_new.mdd = mdd_func_new(ImpactFunc_new.intensity) return ImpactFunc_new -def init_impact_data(hazard_type, - region_ids, - year_range, - source_file, - reference_year, - impact_data_source='emdat', - yearly_impact=True): + +def init_impact_data( + hazard_type, + region_ids, + year_range, + source_file, + reference_year, + impact_data_source="emdat", + yearly_impact=True, +): """creates a dataframe containing the recorded impact data for one hazard type and one area (countries, country or local split) @@ -253,18 +277,25 @@ def init_impact_data(hazard_type, Dataframe with recorded impact written to rows for each year or event. """ - if impact_data_source == 'emdat': + if impact_data_source == "emdat": if yearly_impact: - em_data = emdat_impact_yearlysum(source_file, countries=region_ids, - hazard=hazard_type, - year_range=year_range, - reference_year=reference_year) + em_data = emdat_impact_yearlysum( + source_file, + countries=region_ids, + hazard=hazard_type, + year_range=year_range, + reference_year=reference_year, + ) else: - raise ValueError('init_impact_data not yet implemented for yearly_impact = False.') - #em_data = emdat_impact_event(source_file) + raise ValueError( + "init_impact_data not yet implemented for yearly_impact = False." + ) + # em_data = emdat_impact_event(source_file) else: - raise ValueError('init_impact_data not yet implemented for other impact_data_sources ' - 'than emdat.') + raise ValueError( + "init_impact_data not yet implemented for other impact_data_sources " + "than emdat." + ) return em_data @@ -285,23 +316,34 @@ def calib_cost_calc(df_out, cost_function): The results of the cost function when comparing modelled and reported impact """ - if cost_function == 'R2': - cost = np.sum((pd.to_numeric(df_out['impact_scaled']) - - pd.to_numeric(df_out['impact_CLIMADA']))**2) - elif cost_function == 'logR2': - impact1 = pd.to_numeric(df_out['impact_scaled']) + if cost_function == "R2": + cost = np.sum( + ( + pd.to_numeric(df_out["impact_scaled"]) + - pd.to_numeric(df_out["impact_CLIMADA"]) + ) + ** 2 + ) + elif cost_function == "logR2": + impact1 = pd.to_numeric(df_out["impact_scaled"]) impact1[impact1 <= 0] = 1 - impact2 = pd.to_numeric(df_out['impact_CLIMADA']) + impact2 = pd.to_numeric(df_out["impact_CLIMADA"]) impact2[impact2 <= 0] = 1 - cost = np.sum((np.log(impact1) - - np.log(impact2))**2) + cost = np.sum((np.log(impact1) - np.log(impact2)) ** 2) else: - raise ValueError('This cost function is not implemented.') + raise ValueError("This cost function is not implemented.") return cost -def calib_all(hazard, exposure, impf_name_or_instance, param_full_dict, - impact_data_source, year_range, yearly_impact=True): +def calib_all( + hazard, + exposure, + impf_name_or_instance, + param_full_dict, + impact_data_source, + year_range, + yearly_impact=True, +): """portrait the difference between modelled and reported impacts for all impact functions described in param_full_dict and impf_name_or_instance @@ -337,30 +379,46 @@ def calib_all(hazard, exposure, impf_name_or_instance, param_full_dict, if isinstance(impact_data_source, pd.DataFrame): df_impact_data = impact_data_source else: - if list(impact_data_source.keys()) == ['emdat']: - df_impact_data = init_impact_data(hazard_type, region_ids, year_range, - impact_data_source['emdat'], year_range[-1]) + if list(impact_data_source.keys()) == ["emdat"]: + df_impact_data = init_impact_data( + hazard_type, + region_ids, + year_range, + impact_data_source["emdat"], + year_range[-1], + ) else: - raise ValueError('other impact data sources not yet implemented.') - params_generator = (dict(zip(param_full_dict, x)) - for x in itertools.product(*param_full_dict.values())) + raise ValueError("other impact data sources not yet implemented.") + params_generator = ( + dict(zip(param_full_dict, x)) + for x in itertools.product(*param_full_dict.values()) + ) for param_dict in params_generator: print(param_dict) df_out = copy.deepcopy(df_impact_data) impact_func_final, df_out = init_impf(impf_name_or_instance, param_dict, df_out) - df_out = calib_instance(hazard, exposure, impact_func_final, df_out, yearly_impact) + df_out = calib_instance( + hazard, exposure, impact_func_final, df_out, yearly_impact + ) if df_result is None: df_result = copy.deepcopy(df_out) else: df_result = df_result.append(df_out, input) - return df_result -def calib_optimize(hazard, exposure, impf_name_or_instance, param_dict, - impact_data_source, year_range, yearly_impact=True, - cost_fucntion='R2', show_details=False): +def calib_optimize( + hazard, + exposure, + impf_name_or_instance, + param_dict, + impact_data_source, + year_range, + yearly_impact=True, + cost_fucntion="R2", + show_details=False, +): """portrait the difference between modelled and reported impacts for all impact functions described in param_full_dict and impf_name_or_instance @@ -403,48 +461,67 @@ def calib_optimize(hazard, exposure, impf_name_or_instance, param_dict, if isinstance(impact_data_source, pd.DataFrame): df_impact_data = impact_data_source else: - if list(impact_data_source.keys()) == ['emdat']: - df_impact_data = init_impact_data(hazard_type, region_ids, year_range, - impact_data_source['emdat'], year_range[-1]) + if list(impact_data_source.keys()) == ["emdat"]: + df_impact_data = init_impact_data( + hazard_type, + region_ids, + year_range, + impact_data_source["emdat"], + year_range[-1], + ) else: - raise ValueError('other impact data sources not yet implemented.') + raise ValueError("other impact data sources not yet implemented.") + # definie specific function to def specific_calib(values): param_dict_temp = dict(zip(param_dict.keys(), values)) print(param_dict_temp) - return calib_instance(hazard, exposure, - init_impf(impf_name_or_instance, param_dict_temp)[0], - df_impact_data, - yearly_impact=yearly_impact, return_cost=cost_fucntion) + return calib_instance( + hazard, + exposure, + init_impf(impf_name_or_instance, param_dict_temp)[0], + df_impact_data, + yearly_impact=yearly_impact, + return_cost=cost_fucntion, + ) + # define constraints - if impf_name_or_instance == 'emanuel': - cons = [{'type': 'ineq', 'fun': lambda x: -x[0] + x[1]}, - {'type': 'ineq', 'fun': lambda x: -x[2] + 0.9999}, - {'type': 'ineq', 'fun': lambda x: x[2]}] + if impf_name_or_instance == "emanuel": + cons = [ + {"type": "ineq", "fun": lambda x: -x[0] + x[1]}, + {"type": "ineq", "fun": lambda x: -x[2] + 0.9999}, + {"type": "ineq", "fun": lambda x: x[2]}, + ] else: - cons = [{'type': 'ineq', 'fun': lambda x: -x[0] + 2}, - {'type': 'ineq', 'fun': lambda x: x[0]}, - {'type': 'ineq', 'fun': lambda x: -x[1] + 2}, - {'type': 'ineq', 'fun': lambda x: x[1]}] - + cons = [ + {"type": "ineq", "fun": lambda x: -x[0] + 2}, + {"type": "ineq", "fun": lambda x: x[0]}, + {"type": "ineq", "fun": lambda x: -x[1] + 2}, + {"type": "ineq", "fun": lambda x: x[1]}, + ] values = list(param_dict.values()) - res = minimize(specific_calib, values, - # bounds=bounds, - # bounds=((0.0, np.inf), (0.0, np.inf), (0.0, 1.0)), - constraints=cons, - # method='SLSQP', - method='trust-constr', - options={'xtol': 1e-5, 'disp': True, 'maxiter': 500}) + res = minimize( + specific_calib, + values, + # bounds=bounds, + # bounds=((0.0, np.inf), (0.0, np.inf), (0.0, 1.0)), + constraints=cons, + # method='SLSQP', + method="trust-constr", + options={"xtol": 1e-5, "disp": True, "maxiter": 500}, + ) param_dict_result = dict(zip(param_dict.keys(), res.x)) if res.success: - LOGGER.info('Optimization successfully finished.') + LOGGER.info("Optimization successfully finished.") else: - LOGGER.info('Opimization did not finish successfully. Check you input' - ' or consult the detailed returns (with argument' - 'show_details=True) for further information.') + LOGGER.info( + "Opimization did not finish successfully. Check you input" + " or consult the detailed returns (with argument" + "show_details=True) for further information." + ) if show_details: return param_dict_result, res diff --git a/climada/engine/cost_benefit.py b/climada/engine/cost_benefit.py index ea1c771b2..ef3e1ec3a 100644 --- a/climada/engine/cost_benefit.py +++ b/climada/engine/cost_benefit.py @@ -19,20 +19,20 @@ Define CostBenefit class. """ -__all__ = ['CostBenefit', 'risk_aai_agg', 'risk_rp_100', 'risk_rp_250'] +__all__ = ["CostBenefit", "risk_aai_agg", "risk_rp_100", "risk_rp_250"] import copy import logging -from typing import Optional, Dict, Tuple, Union +from typing import Dict, Optional, Tuple, Union -import numpy as np import matplotlib.colors as colors import matplotlib.pyplot as plt -from matplotlib.patches import Rectangle, FancyArrowPatch +import numpy as np +from matplotlib.patches import FancyArrowPatch, Rectangle from tabulate import tabulate -from climada.engine.impact_calc import ImpactCalc from climada.engine import Impact, ImpactFreqCurve +from climada.engine.impact_calc import ImpactCalc LOGGER = logging.getLogger(__name__) @@ -42,9 +42,10 @@ DEF_FUTURE_YEAR = 2030 """Default future reference year""" -NO_MEASURE = 'no measure' +NO_MEASURE = "no measure" """Name of risk metrics when no measure is applied""" + def risk_aai_agg(impact): """Risk measurement as average annual impact aggregated. @@ -59,6 +60,7 @@ def risk_aai_agg(impact): """ return impact.aai_agg + def risk_rp_100(impact): """Risk measurement as exceedance impact at 100 years return period. @@ -76,6 +78,7 @@ def risk_rp_100(impact): return efc.impact[0] return 0 + def risk_rp_250(impact): """Risk measurement as exceedance impact at 250 years return period. @@ -93,7 +96,8 @@ def risk_rp_250(impact): return efc.impact[0] return 0 -class CostBenefit(): + +class CostBenefit: """Impact definition. Compute from an entity (exposures and impact functions) and hazard. @@ -142,14 +146,16 @@ def __init__( present_year: int = DEF_PRESENT_YEAR, future_year: int = DEF_FUTURE_YEAR, tot_climate_risk: float = 0.0, - unit: str = 'USD', + unit: str = "USD", color_rgb: Optional[Dict[str, np.ndarray]] = None, benefit: Optional[Dict[str, float]] = None, cost_ben_ratio: Optional[Dict[str, float]] = None, - imp_meas_present: Optional[Dict[str, - Union[float, Tuple[float, float], Impact, ImpactFreqCurve]]] = None, - imp_meas_future: Optional[Dict[str, - Union[float, Tuple[float, float], Impact, ImpactFreqCurve]]] = None, + imp_meas_present: Optional[ + Dict[str, Union[float, Tuple[float, float], Impact, ImpactFreqCurve]] + ] = None, + imp_meas_future: Optional[ + Dict[str, Union[float, Tuple[float, float], Impact, ImpactFreqCurve]] + ] = None, ): """Initilization""" self.present_year = present_year @@ -171,11 +177,25 @@ def __init__( # 'risk': risk measurement, # 'efc': ImpactFreqCurve # (optionally) 'impact': Impact - self.imp_meas_future = imp_meas_future if imp_meas_future is not None else dict() - self.imp_meas_present = imp_meas_present if imp_meas_present is not None else dict() + self.imp_meas_future = ( + imp_meas_future if imp_meas_future is not None else dict() + ) + self.imp_meas_present = ( + imp_meas_present if imp_meas_present is not None else dict() + ) - def calc(self, hazard, entity, haz_future=None, ent_future=None, future_year=None, - risk_func=risk_aai_agg, imp_time_depen=None, save_imp=False, assign_centroids=True): + def calc( + self, + hazard, + entity, + haz_future=None, + ent_future=None, + future_year=None, + risk_func=risk_aai_agg, + imp_time_depen=None, + save_imp=False, + assign_centroids=True, + ): """Compute cost-benefit ratio for every measure provided current and, optionally, future conditions. Present and future measures need to have the same name. The measures costs need to be discounted by the user. @@ -222,7 +242,7 @@ def calc(self, hazard, entity, haz_future=None, ent_future=None, future_year=Non # save measure colors for meas in entity.measures.get_measure(hazard.haz_type): self.color_rgb[meas.name] = meas.color_rgb - self.color_rgb[NO_MEASURE] = colors.to_rgb('deepskyblue') + self.color_rgb[NO_MEASURE] = colors.to_rgb("deepskyblue") if future_year is None and ent_future is None: future_year = entity.exposures.ref_year @@ -237,37 +257,74 @@ def calc(self, hazard, entity, haz_future=None, ent_future=None, future_year=Non if not haz_future and not ent_future: self.future_year = future_year - self._calc_impact_measures(hazard, entity.exposures, - entity.measures, entity.impact_funcs, 'future', - risk_func, save_imp) + self._calc_impact_measures( + hazard, + entity.exposures, + entity.measures, + entity.impact_funcs, + "future", + risk_func, + save_imp, + ) else: if imp_time_depen is None: imp_time_depen = 1 - self._calc_impact_measures(hazard, entity.exposures, - entity.measures, entity.impact_funcs, 'present', - risk_func, save_imp) + self._calc_impact_measures( + hazard, + entity.exposures, + entity.measures, + entity.impact_funcs, + "present", + risk_func, + save_imp, + ) if haz_future and ent_future: self.future_year = ent_future.exposures.ref_year - self._calc_impact_measures(haz_future, ent_future.exposures, - ent_future.measures, ent_future.impact_funcs, 'future', - risk_func, save_imp) + self._calc_impact_measures( + haz_future, + ent_future.exposures, + ent_future.measures, + ent_future.impact_funcs, + "future", + risk_func, + save_imp, + ) elif haz_future: self.future_year = future_year - self._calc_impact_measures(haz_future, entity.exposures, - entity.measures, entity.impact_funcs, 'future', - risk_func, save_imp) + self._calc_impact_measures( + haz_future, + entity.exposures, + entity.measures, + entity.impact_funcs, + "future", + risk_func, + save_imp, + ) else: self.future_year = ent_future.exposures.ref_year - self._calc_impact_measures(hazard, ent_future.exposures, - ent_future.measures, ent_future.impact_funcs, 'future', - risk_func, save_imp) + self._calc_impact_measures( + hazard, + ent_future.exposures, + ent_future.measures, + ent_future.impact_funcs, + "future", + risk_func, + save_imp, + ) self._calc_cost_benefit(entity.disc_rates, imp_time_depen) self._print_results() self._print_npv() - def combine_measures(self, in_meas_names, new_name, new_color, disc_rates, - imp_time_depen=None, risk_func=risk_aai_agg): + def combine_measures( + self, + in_meas_names, + new_name, + new_color, + disc_rates, + imp_time_depen=None, + risk_func=risk_aai_agg, + ): """Compute cost-benefit of the combination of measures previously computed by calc with save_imp=True. The benefits of the measures per event are added. To combine with risk transfer options use @@ -309,24 +366,37 @@ def combine_measures(self, in_meas_names, new_name, new_color, disc_rates, new_cb.color_rgb[new_name] = new_color # compute impacts for imp_meas_future and imp_meas_present - self._combine_imp_meas(new_cb, in_meas_names, new_name, risk_func, when='future') + self._combine_imp_meas( + new_cb, in_meas_names, new_name, risk_func, when="future" + ) if self.imp_meas_present: new_cb.imp_meas_present[NO_MEASURE] = self.imp_meas_present[NO_MEASURE] if imp_time_depen is None: imp_time_depen = 1 - self._combine_imp_meas(new_cb, in_meas_names, new_name, risk_func, when='present') + self._combine_imp_meas( + new_cb, in_meas_names, new_name, risk_func, when="present" + ) # cost-benefit computation: fill measure's benefit and cost_ben_ratio time_dep = new_cb._time_dependency_array(imp_time_depen) - new_cb._cost_ben_one(new_name, new_cb.imp_meas_future[new_name], disc_rates, - time_dep) + new_cb._cost_ben_one( + new_name, new_cb.imp_meas_future[new_name], disc_rates, time_dep + ) new_cb._print_results() new_cb._print_npv() return new_cb - def apply_risk_transfer(self, meas_name, attachment, cover, disc_rates, - cost_fix=0, cost_factor=1, imp_time_depen=None, - risk_func=risk_aai_agg): + def apply_risk_transfer( + self, + meas_name, + attachment, + cover, + disc_rates, + cost_fix=0, + cost_factor=1, + imp_time_depen=None, + risk_func=risk_aai_agg, + ): """Applies risk transfer to given measure computed before with saved impact and compares it to when no measure is applied. Appended to dictionaries of measures. @@ -354,52 +424,64 @@ def apply_risk_transfer(self, meas_name, attachment, cover, disc_rates, function describing risk measure given an Impact. Default: average annual impact (aggregated). """ - m_transf_name = 'risk transfer (' + meas_name + ')' - self.color_rgb[m_transf_name] = np.maximum(np.minimum(self.color_rgb[meas_name] - - np.ones(3) * 0.2, 1), 0) + m_transf_name = "risk transfer (" + meas_name + ")" + self.color_rgb[m_transf_name] = np.maximum( + np.minimum(self.color_rgb[meas_name] - np.ones(3) * 0.2, 1), 0 + ) - _, layer_no = self.imp_meas_future[NO_MEASURE]['impact']. \ - calc_risk_transfer(attachment, cover) + _, layer_no = self.imp_meas_future[NO_MEASURE]["impact"].calc_risk_transfer( + attachment, cover + ) layer_no = risk_func(layer_no) - imp, layer = self.imp_meas_future[meas_name]['impact']. \ - calc_risk_transfer(attachment, cover) + imp, layer = self.imp_meas_future[meas_name]["impact"].calc_risk_transfer( + attachment, cover + ) self.imp_meas_future[m_transf_name] = dict() - self.imp_meas_future[m_transf_name]['risk_transf'] = risk_func(layer) - self.imp_meas_future[m_transf_name]['impact'] = imp - self.imp_meas_future[m_transf_name]['risk'] = risk_func(imp) - self.imp_meas_future[m_transf_name]['cost'] = (cost_fix, cost_factor) - self.imp_meas_future[m_transf_name]['efc'] = imp.calc_freq_curve() + self.imp_meas_future[m_transf_name]["risk_transf"] = risk_func(layer) + self.imp_meas_future[m_transf_name]["impact"] = imp + self.imp_meas_future[m_transf_name]["risk"] = risk_func(imp) + self.imp_meas_future[m_transf_name]["cost"] = (cost_fix, cost_factor) + self.imp_meas_future[m_transf_name]["efc"] = imp.calc_freq_curve() if self.imp_meas_present: if imp_time_depen is None: imp_time_depen = 1 time_dep = self._time_dependency_array(imp_time_depen) - _, pres_layer_no = self.imp_meas_present[NO_MEASURE]['impact']. \ - calc_risk_transfer(attachment, cover) + _, pres_layer_no = self.imp_meas_present[NO_MEASURE][ + "impact" + ].calc_risk_transfer(attachment, cover) pres_layer_no = risk_func(pres_layer_no) layer_no = pres_layer_no + (layer_no - pres_layer_no) * time_dep - imp, layer = self.imp_meas_present[meas_name]['impact']. \ - calc_risk_transfer(attachment, cover) + imp, layer = self.imp_meas_present[meas_name]["impact"].calc_risk_transfer( + attachment, cover + ) self.imp_meas_present[m_transf_name] = dict() - self.imp_meas_present[m_transf_name]['risk_transf'] = risk_func(layer) - self.imp_meas_present[m_transf_name]['impact'] = imp - self.imp_meas_present[m_transf_name]['risk'] = risk_func(imp) - self.imp_meas_present[m_transf_name]['cost'] = (cost_fix, cost_factor) - self.imp_meas_present[m_transf_name]['efc'] = imp.calc_freq_curve() + self.imp_meas_present[m_transf_name]["risk_transf"] = risk_func(layer) + self.imp_meas_present[m_transf_name]["impact"] = imp + self.imp_meas_present[m_transf_name]["risk"] = risk_func(imp) + self.imp_meas_present[m_transf_name]["cost"] = (cost_fix, cost_factor) + self.imp_meas_present[m_transf_name]["efc"] = imp.calc_freq_curve() else: time_dep = self._time_dependency_array(imp_time_depen) layer_no = time_dep * layer_no - self._cost_ben_one(m_transf_name, self.imp_meas_future[m_transf_name], - disc_rates, time_dep, ini_state=meas_name) + self._cost_ben_one( + m_transf_name, + self.imp_meas_future[m_transf_name], + disc_rates, + time_dep, + ini_state=meas_name, + ) # compare layer no measure - layer_no = disc_rates.net_present_value(self.present_year, - self.future_year, layer_no) - layer = ((self.cost_ben_ratio[m_transf_name] * self.benefit[m_transf_name] - cost_fix) - / cost_factor) + layer_no = disc_rates.net_present_value( + self.present_year, self.future_year, layer_no + ) + layer = ( + self.cost_ben_ratio[m_transf_name] * self.benefit[m_transf_name] - cost_fix + ) / cost_factor self._print_results() self._print_risk_transfer(layer, layer_no, cost_fix, cost_factor) self._print_npv() @@ -438,37 +520,74 @@ def plot_cost_benefit(self, cb_list=None, axis=None, **kwargs): matplotlib.axes._subplots.AxesSubplot """ if cb_list: - if 'alpha' not in kwargs: - kwargs['alpha'] = 0.5 + if "alpha" not in kwargs: + kwargs["alpha"] = 0.5 cb_uncer = [self] cb_uncer.extend(cb_list) axis = self._plot_list_cost_ben(cb_uncer, axis, **kwargs) return axis - if 'alpha' not in kwargs: - kwargs['alpha'] = 1.0 + if "alpha" not in kwargs: + kwargs["alpha"] = 1.0 axis = self._plot_list_cost_ben([self], axis, **kwargs) norm_fact, norm_name = _norm_values(self.tot_climate_risk + 0.01) - text_pos = self.imp_meas_future[NO_MEASURE]['risk'] / norm_fact - axis.scatter(text_pos, 0, c='r', zorder=200, clip_on=False) - axis.text(text_pos, 0, ' AAI', horizontalalignment='center', - verticalalignment='bottom', rotation=90, fontsize=12, color='r') + text_pos = self.imp_meas_future[NO_MEASURE]["risk"] / norm_fact + axis.scatter(text_pos, 0, c="r", zorder=200, clip_on=False) + axis.text( + text_pos, + 0, + " AAI", + horizontalalignment="center", + verticalalignment="bottom", + rotation=90, + fontsize=12, + color="r", + ) if abs(text_pos - self.tot_climate_risk / norm_fact) > 1: - axis.scatter(self.tot_climate_risk / norm_fact, 0, c='r', zorder=200, clip_on=False) - axis.text(self.tot_climate_risk / norm_fact, 0, ' Tot risk', - horizontalalignment='center', verticalalignment='bottom', rotation=90, - fontsize=12, color='r') - - axis.set_xlim(0, max(self.tot_climate_risk / norm_fact, - np.array(list(self.benefit.values())).sum() / norm_fact)) - axis.set_ylim(0, int(1 / np.nanmin(np.ma.masked_equal(np.array(list( - self.cost_ben_ratio.values())), 0))) + 1) - - x_label = ('NPV averted damage over ' + str(self.future_year - self.present_year + 1) - + ' years (' + self.unit + ' ' + norm_name + ')') + axis.scatter( + self.tot_climate_risk / norm_fact, 0, c="r", zorder=200, clip_on=False + ) + axis.text( + self.tot_climate_risk / norm_fact, + 0, + " Tot risk", + horizontalalignment="center", + verticalalignment="bottom", + rotation=90, + fontsize=12, + color="r", + ) + + axis.set_xlim( + 0, + max( + self.tot_climate_risk / norm_fact, + np.array(list(self.benefit.values())).sum() / norm_fact, + ), + ) + axis.set_ylim( + 0, + int( + 1 + / np.nanmin( + np.ma.masked_equal(np.array(list(self.cost_ben_ratio.values())), 0) + ) + ) + + 1, + ) + + x_label = ( + "NPV averted damage over " + + str(self.future_year - self.present_year + 1) + + " years (" + + self.unit + + " " + + norm_name + + ")" + ) axis.set_xlabel(x_label) - axis.set_ylabel('Benefit/Cost ratio') + axis.set_ylabel("Benefit/Cost ratio") return axis def plot_event_view(self, return_per=(10, 25, 100), axis=None, **kwargs): @@ -489,46 +608,61 @@ def plot_event_view(self, return_per=(10, 25, 100), axis=None, **kwargs): matplotlib.axes._subplots.AxesSubplot """ if not self.imp_meas_future: - raise ValueError('Compute CostBenefit.calc() first') + raise ValueError("Compute CostBenefit.calc() first") if not axis: _, axis = plt.subplots(1, 1) avert_rp = dict() for meas_name, meas_val in self.imp_meas_future.items(): if meas_name == NO_MEASURE: continue - interp_imp = np.interp(return_per, meas_val['efc'].return_per, - meas_val['efc'].impact) + interp_imp = np.interp( + return_per, meas_val["efc"].return_per, meas_val["efc"].impact + ) # check if measure over no measure or combined with another measure try: - ref_meas = meas_name[meas_name.index('(') + 1:meas_name.index(')')] + ref_meas = meas_name[meas_name.index("(") + 1 : meas_name.index(")")] except ValueError: ref_meas = NO_MEASURE - ref_imp = np.interp(return_per, - self.imp_meas_future[ref_meas]['efc'].return_per, - self.imp_meas_future[ref_meas]['efc'].impact) + ref_imp = np.interp( + return_per, + self.imp_meas_future[ref_meas]["efc"].return_per, + self.imp_meas_future[ref_meas]["efc"].impact, + ) avert_rp[meas_name] = ref_imp - interp_imp m_names = list(self.cost_ben_ratio.keys()) sort_cb = np.argsort(np.array([self.cost_ben_ratio[name] for name in m_names])) names_sort = [m_names[i] for i in sort_cb] color_sort = [self.color_rgb[name] for name in names_sort] - ref_imp = np.interp(return_per, self.imp_meas_future[NO_MEASURE]['efc'].return_per, - self.imp_meas_future[NO_MEASURE]['efc'].impact) + ref_imp = np.interp( + return_per, + self.imp_meas_future[NO_MEASURE]["efc"].return_per, + self.imp_meas_future[NO_MEASURE]["efc"].impact, + ) for rp_i, _ in enumerate(return_per): val_i = [avert_rp[name][rp_i] for name in names_sort] cum_effect = np.cumsum(np.array([0] + val_i)) - for (eff, color) in zip(cum_effect[::-1][:-1], color_sort[::-1]): + for eff, color in zip(cum_effect[::-1][:-1], color_sort[::-1]): axis.bar(rp_i + 1, eff, color=color, **kwargs) - axis.bar(rp_i + 1, ref_imp[rp_i], edgecolor='k', fc=(1, 0, 0, 0), zorder=100) - axis.set_xlabel('Return Period (%s)' % str(self.future_year)) - axis.set_ylabel('Impact (' + self.unit + ')') + axis.bar( + rp_i + 1, ref_imp[rp_i], edgecolor="k", fc=(1, 0, 0, 0), zorder=100 + ) + axis.set_xlabel("Return Period (%s)" % str(self.future_year)) + axis.set_ylabel("Impact (" + self.unit + ")") axis.set_xticks(np.arange(len(return_per)) + 1) axis.set_xticklabels([str(per) for per in return_per]) return axis @staticmethod - def plot_waterfall(hazard, entity, haz_future, ent_future, - risk_func=risk_aai_agg, axis=None, **kwargs): + def plot_waterfall( + hazard, + entity, + haz_future, + ent_future, + risk_func=risk_aai_agg, + axis=None, + **kwargs + ): """Plot waterfall graph at future with given risk metric. Can be called before and after calc(). @@ -554,16 +688,18 @@ def plot_waterfall(hazard, entity, haz_future, ent_future, matplotlib.axes._subplots.AxesSubplot """ if ent_future.exposures.ref_year == entity.exposures.ref_year: - raise ValueError('Same reference years for future and present entities.') + raise ValueError("Same reference years for future and present entities.") present_year = entity.exposures.ref_year future_year = ent_future.exposures.ref_year - imp = ImpactCalc(entity.exposures, entity.impact_funcs, hazard)\ - .impact(assign_centroids=hazard.centr_exp_col not in entity.exposures.gdf) + imp = ImpactCalc(entity.exposures, entity.impact_funcs, hazard).impact( + assign_centroids=hazard.centr_exp_col not in entity.exposures.gdf + ) curr_risk = risk_func(imp) - imp = ImpactCalc(ent_future.exposures, ent_future.impact_funcs, haz_future)\ - .impact(assign_centroids=hazard.centr_exp_col not in ent_future.exposures.gdf) + imp = ImpactCalc( + ent_future.exposures, ent_future.impact_funcs, haz_future + ).impact(assign_centroids=hazard.centr_exp_col not in ent_future.exposures.gdf) fut_risk = risk_func(imp) if not axis: @@ -571,50 +707,100 @@ def plot_waterfall(hazard, entity, haz_future, ent_future, norm_fact, norm_name = _norm_values(curr_risk) # current situation - LOGGER.info('Risk at {:d}: {:.3e}'.format(present_year, curr_risk)) + LOGGER.info("Risk at {:d}: {:.3e}".format(present_year, curr_risk)) # changing future # socio-economic dev - imp = ImpactCalc(ent_future.exposures, ent_future.impact_funcs, hazard)\ - .impact(assign_centroids=False) + imp = ImpactCalc(ent_future.exposures, ent_future.impact_funcs, hazard).impact( + assign_centroids=False + ) risk_dev = risk_func(imp) - LOGGER.info('Risk with development at {:d}: {:.3e}'.format(future_year, risk_dev)) + LOGGER.info( + "Risk with development at {:d}: {:.3e}".format(future_year, risk_dev) + ) # socioecon + cc - LOGGER.info('Risk with development and climate change at {:d}: {:.3e}'. - format(future_year, fut_risk)) + LOGGER.info( + "Risk with development and climate change at {:d}: {:.3e}".format( + future_year, fut_risk + ) + ) axis.bar(1, curr_risk / norm_fact, **kwargs) - axis.text(1, curr_risk / norm_fact, str(int(round(curr_risk / norm_fact))), - horizontalalignment='center', verticalalignment='bottom', - fontsize=12, color='k') - axis.bar(2, height=(risk_dev - curr_risk) / norm_fact, - bottom=curr_risk / norm_fact, **kwargs) - axis.text(2, curr_risk / norm_fact + (risk_dev - curr_risk) / norm_fact / 2, - str(int(round((risk_dev - curr_risk) / norm_fact))), - horizontalalignment='center', verticalalignment='center', fontsize=12, color='k') - axis.bar(3, height=(fut_risk - risk_dev) / norm_fact, - bottom=risk_dev / norm_fact, **kwargs) - axis.text(3, risk_dev / norm_fact + (fut_risk - risk_dev) / norm_fact / 2, - str(int(round((fut_risk - risk_dev) / norm_fact))), - horizontalalignment='center', verticalalignment='center', fontsize=12, - color='k') + axis.text( + 1, + curr_risk / norm_fact, + str(int(round(curr_risk / norm_fact))), + horizontalalignment="center", + verticalalignment="bottom", + fontsize=12, + color="k", + ) + axis.bar( + 2, + height=(risk_dev - curr_risk) / norm_fact, + bottom=curr_risk / norm_fact, + **kwargs + ) + axis.text( + 2, + curr_risk / norm_fact + (risk_dev - curr_risk) / norm_fact / 2, + str(int(round((risk_dev - curr_risk) / norm_fact))), + horizontalalignment="center", + verticalalignment="center", + fontsize=12, + color="k", + ) + axis.bar( + 3, + height=(fut_risk - risk_dev) / norm_fact, + bottom=risk_dev / norm_fact, + **kwargs + ) + axis.text( + 3, + risk_dev / norm_fact + (fut_risk - risk_dev) / norm_fact / 2, + str(int(round((fut_risk - risk_dev) / norm_fact))), + horizontalalignment="center", + verticalalignment="center", + fontsize=12, + color="k", + ) axis.bar(4, height=fut_risk / norm_fact, **kwargs) - axis.text(4, fut_risk / norm_fact, str(int(round(fut_risk / norm_fact))), - horizontalalignment='center', verticalalignment='bottom', - fontsize=12, color='k') + axis.text( + 4, + fut_risk / norm_fact, + str(int(round(fut_risk / norm_fact))), + horizontalalignment="center", + verticalalignment="bottom", + fontsize=12, + color="k", + ) axis.set_xticks(np.arange(4) + 1) - axis.set_xticklabels(['Risk ' + str(present_year), - 'Economic \ndevelopment', - 'Climate \nchange', - 'Risk ' + str(future_year)]) - axis.set_ylabel('Impact (' + imp.unit + ' ' + norm_name + ')') - axis.set_title('Risk at {:d} and {:d}'.format(present_year, future_year)) + axis.set_xticklabels( + [ + "Risk " + str(present_year), + "Economic \ndevelopment", + "Climate \nchange", + "Risk " + str(future_year), + ] + ) + axis.set_ylabel("Impact (" + imp.unit + " " + norm_name + ")") + axis.set_title("Risk at {:d} and {:d}".format(present_year, future_year)) return axis - def plot_arrow_averted(self, axis, in_meas_names=None, accumulate=False, combine=False, - risk_func=risk_aai_agg, disc_rates=None, imp_time_depen=1, **kwargs): + def plot_arrow_averted( + self, + axis, + in_meas_names=None, + accumulate=False, + combine=False, + risk_func=risk_aai_agg, + disc_rates=None, + imp_time_depen=1, + **kwargs + ): """Plot waterfall graph with accumulated values from present to future year. Call after calc() with save_imp=True. @@ -651,32 +837,59 @@ def plot_arrow_averted(self, axis, in_meas_names=None, accumulate=False, combine tot_benefit = np.array([self.benefit[meas] for meas in in_meas_names]).sum() norm_fact = self.tot_climate_risk / bars[3].get_height() else: - tot_benefit = np.array([risk_func(self.imp_meas_future[NO_MEASURE]['impact']) - - risk_func(self.imp_meas_future[meas]['impact']) - for meas in in_meas_names]).sum() - norm_fact = (risk_func(self.imp_meas_future['no measure']['impact']) - / bars[3].get_height()) + tot_benefit = np.array( + [ + risk_func(self.imp_meas_future[NO_MEASURE]["impact"]) + - risk_func(self.imp_meas_future[meas]["impact"]) + for meas in in_meas_names + ] + ).sum() + norm_fact = ( + risk_func(self.imp_meas_future["no measure"]["impact"]) + / bars[3].get_height() + ) if combine: try: - LOGGER.info('Combining measures %s', in_meas_names) - all_meas = self.combine_measures(in_meas_names, 'combine', - colors.to_rgba('black'), disc_rates, - imp_time_depen, risk_func) + LOGGER.info("Combining measures %s", in_meas_names) + all_meas = self.combine_measures( + in_meas_names, + "combine", + colors.to_rgba("black"), + disc_rates, + imp_time_depen, + risk_func, + ) except KeyError: - LOGGER.warning('Use calc() with save_imp=True to get a more accurate ' - 'approximation of total averted damage,') + LOGGER.warning( + "Use calc() with save_imp=True to get a more accurate " + "approximation of total averted damage," + ) if accumulate: - tot_benefit = all_meas.benefit['combine'] + tot_benefit = all_meas.benefit["combine"] else: - tot_benefit = risk_func(all_meas.imp_meas_future[NO_MEASURE]['impact']) - \ - risk_func(all_meas.imp_meas_future['combine']['impact']) - - self._plot_averted_arrow(axis, bars[3], tot_benefit, bars[3].get_height() * norm_fact, - norm_fact, **kwargs) + tot_benefit = risk_func( + all_meas.imp_meas_future[NO_MEASURE]["impact"] + ) - risk_func(all_meas.imp_meas_future["combine"]["impact"]) + + self._plot_averted_arrow( + axis, + bars[3], + tot_benefit, + bars[3].get_height() * norm_fact, + norm_fact, + **kwargs + ) - def plot_waterfall_accumulated(self, hazard, entity, ent_future, - risk_func=risk_aai_agg, imp_time_depen=1, - axis=None, **kwargs): + def plot_waterfall_accumulated( + self, + hazard, + entity, + ent_future, + risk_func=risk_aai_agg, + imp_time_depen=1, + axis=None, + **kwargs + ): """Plot waterfall graph with accumulated values from present to future year. Call after calc() with save_imp=True. Provide same inputs as in calc. @@ -702,72 +915,131 @@ def plot_waterfall_accumulated(self, hazard, entity, ent_future, matplotlib.axes._subplots.AxesSubplot """ if not self.imp_meas_future or not self.imp_meas_present: - raise ValueError('Compute CostBenefit.calc() first') + raise ValueError("Compute CostBenefit.calc() first") if ent_future.exposures.ref_year == entity.exposures.ref_year: - raise ValueError('Same reference years for future and present entities.') + raise ValueError("Same reference years for future and present entities.") self.present_year = entity.exposures.ref_year self.future_year = ent_future.exposures.ref_year # current situation - curr_risk = self.imp_meas_present[NO_MEASURE]['risk'] + curr_risk = self.imp_meas_present[NO_MEASURE]["risk"] time_dep = self._time_dependency_array() - risk_curr = self._npv_unaverted_impact(curr_risk, entity.disc_rates, - time_dep) - LOGGER.info('Current total risk at {:d}: {:.3e}'.format(self.future_year, - risk_curr)) + risk_curr = self._npv_unaverted_impact(curr_risk, entity.disc_rates, time_dep) + LOGGER.info( + "Current total risk at {:d}: {:.3e}".format(self.future_year, risk_curr) + ) # changing future time_dep = self._time_dependency_array(imp_time_depen) # socio-economic dev - imp = ImpactCalc(ent_future.exposures, ent_future.impact_funcs, hazard)\ - .impact(assign_centroids=False) - risk_dev = self._npv_unaverted_impact(risk_func(imp), entity.disc_rates, - time_dep, curr_risk) - LOGGER.info('Total risk with development at {:d}: {:.3e}'.format( - self.future_year, risk_dev)) + imp = ImpactCalc(ent_future.exposures, ent_future.impact_funcs, hazard).impact( + assign_centroids=False + ) + risk_dev = self._npv_unaverted_impact( + risk_func(imp), entity.disc_rates, time_dep, curr_risk + ) + LOGGER.info( + "Total risk with development at {:d}: {:.3e}".format( + self.future_year, risk_dev + ) + ) # socioecon + cc - risk_tot = self._npv_unaverted_impact(self.imp_meas_future[NO_MEASURE]['risk'], - entity.disc_rates, time_dep, curr_risk) - LOGGER.info('Total risk with development and climate change at {:d}: {:.3e}'. - format(self.future_year, risk_tot)) + risk_tot = self._npv_unaverted_impact( + self.imp_meas_future[NO_MEASURE]["risk"], + entity.disc_rates, + time_dep, + curr_risk, + ) + LOGGER.info( + "Total risk with development and climate change at {:d}: {:.3e}".format( + self.future_year, risk_tot + ) + ) # plot if not axis: _, axis = plt.subplots(1, 1) norm_fact, norm_name = _norm_values(curr_risk) axis.bar(1, risk_curr / norm_fact, **kwargs) - axis.text(1, risk_curr / norm_fact, str(int(round(risk_curr / norm_fact))), - horizontalalignment='center', verticalalignment='bottom', - fontsize=12, color='k') - axis.bar(2, height=(risk_dev - risk_curr) / norm_fact, - bottom=risk_curr / norm_fact, **kwargs) - axis.text(2, risk_curr / norm_fact + (risk_dev - risk_curr) / norm_fact / 2, - str(int(round((risk_dev - risk_curr) / norm_fact))), - horizontalalignment='center', verticalalignment='center', fontsize=12, color='k') - axis.bar(3, height=(risk_tot - risk_dev) / norm_fact, - bottom=risk_dev / norm_fact, **kwargs) - axis.text(3, risk_dev / norm_fact + (risk_tot - risk_dev) / norm_fact / 2, - str(int(round((risk_tot - risk_dev) / norm_fact))), - horizontalalignment='center', verticalalignment='center', fontsize=12, color='k') + axis.text( + 1, + risk_curr / norm_fact, + str(int(round(risk_curr / norm_fact))), + horizontalalignment="center", + verticalalignment="bottom", + fontsize=12, + color="k", + ) + axis.bar( + 2, + height=(risk_dev - risk_curr) / norm_fact, + bottom=risk_curr / norm_fact, + **kwargs + ) + axis.text( + 2, + risk_curr / norm_fact + (risk_dev - risk_curr) / norm_fact / 2, + str(int(round((risk_dev - risk_curr) / norm_fact))), + horizontalalignment="center", + verticalalignment="center", + fontsize=12, + color="k", + ) + axis.bar( + 3, + height=(risk_tot - risk_dev) / norm_fact, + bottom=risk_dev / norm_fact, + **kwargs + ) + axis.text( + 3, + risk_dev / norm_fact + (risk_tot - risk_dev) / norm_fact / 2, + str(int(round((risk_tot - risk_dev) / norm_fact))), + horizontalalignment="center", + verticalalignment="center", + fontsize=12, + color="k", + ) axis.bar(4, height=risk_tot / norm_fact, **kwargs) - axis.text(4, risk_tot / norm_fact, str(int(round(risk_tot / norm_fact))), - horizontalalignment='center', verticalalignment='bottom', - fontsize=12, color='k') + axis.text( + 4, + risk_tot / norm_fact, + str(int(round(risk_tot / norm_fact))), + horizontalalignment="center", + verticalalignment="bottom", + fontsize=12, + color="k", + ) axis.set_xticks(np.arange(4) + 1) - axis.set_xticklabels(['Risk ' + str(self.present_year), - 'Economic \ndevelopment', - 'Climate \nchange', - 'Risk ' + str(self.future_year)]) - axis.set_ylabel('Impact (' + self.unit + ' ' + norm_name + ')') - axis.set_title('Total accumulated impact from {:d} to {:d}'.format( - self.present_year, self.future_year)) + axis.set_xticklabels( + [ + "Risk " + str(self.present_year), + "Economic \ndevelopment", + "Climate \nchange", + "Risk " + str(self.future_year), + ] + ) + axis.set_ylabel("Impact (" + self.unit + " " + norm_name + ")") + axis.set_title( + "Total accumulated impact from {:d} to {:d}".format( + self.present_year, self.future_year + ) + ) return axis - def _calc_impact_measures(self, hazard, exposures, meas_set, imp_fun_set, - when='future', risk_func=risk_aai_agg, save_imp=False): + def _calc_impact_measures( + self, + hazard, + exposures, + meas_set, + imp_fun_set, + when="future", + risk_func=risk_aai_agg, + save_imp=False, + ): """Compute impact of each measure and transform it to input risk measurement. Set reference year from exposures value. @@ -792,31 +1064,37 @@ def _calc_impact_measures(self, hazard, exposures, meas_set, imp_fun_set, impact_meas = dict() # compute impact without measures - LOGGER.debug('%s impact with no measure.', when) - imp_tmp = ImpactCalc(exposures, imp_fun_set, hazard).impact(assign_centroids=False) + LOGGER.debug("%s impact with no measure.", when) + imp_tmp = ImpactCalc(exposures, imp_fun_set, hazard).impact( + assign_centroids=False + ) impact_meas[NO_MEASURE] = dict() - impact_meas[NO_MEASURE]['cost'] = (0, 0) - impact_meas[NO_MEASURE]['risk'] = risk_func(imp_tmp) - impact_meas[NO_MEASURE]['risk_transf'] = 0.0 - impact_meas[NO_MEASURE]['efc'] = imp_tmp.calc_freq_curve() + impact_meas[NO_MEASURE]["cost"] = (0, 0) + impact_meas[NO_MEASURE]["risk"] = risk_func(imp_tmp) + impact_meas[NO_MEASURE]["risk_transf"] = 0.0 + impact_meas[NO_MEASURE]["efc"] = imp_tmp.calc_freq_curve() if save_imp: - impact_meas[NO_MEASURE]['impact'] = imp_tmp + impact_meas[NO_MEASURE]["impact"] = imp_tmp # compute impact for each measure for measure in meas_set.get_measure(hazard.haz_type): - LOGGER.debug('%s impact of measure %s.', when, measure.name) - imp_tmp, risk_transf = measure.calc_impact(exposures, imp_fun_set, hazard, - assign_centroids=False) + LOGGER.debug("%s impact of measure %s.", when, measure.name) + imp_tmp, risk_transf = measure.calc_impact( + exposures, imp_fun_set, hazard, assign_centroids=False + ) impact_meas[measure.name] = dict() - impact_meas[measure.name]['cost'] = (measure.cost, measure.risk_transf_cost_factor) - impact_meas[measure.name]['risk'] = risk_func(imp_tmp) - impact_meas[measure.name]['risk_transf'] = risk_func(risk_transf) - impact_meas[measure.name]['efc'] = imp_tmp.calc_freq_curve() + impact_meas[measure.name]["cost"] = ( + measure.cost, + measure.risk_transf_cost_factor, + ) + impact_meas[measure.name]["risk"] = risk_func(imp_tmp) + impact_meas[measure.name]["risk_transf"] = risk_func(risk_transf) + impact_meas[measure.name]["efc"] = imp_tmp.calc_freq_curve() if save_imp: - impact_meas[measure.name]['impact'] = imp_tmp + impact_meas[measure.name]["impact"] = imp_tmp # if present reference provided save it - if when == 'future': + if when == "future": self.imp_meas_future = impact_meas else: self.imp_meas_present = impact_meas @@ -831,15 +1109,20 @@ def _calc_cost_benefit(self, disc_rates, imp_time_depen=None): imp_time_depen : float, optional parameter which represent time evolution of impact """ - LOGGER.info('Computing cost benefit from years %s to %s.', - str(self.present_year), str(self.future_year)) + LOGGER.info( + "Computing cost benefit from years %s to %s.", + str(self.present_year), + str(self.future_year), + ) if self.future_year - self.present_year + 1 <= 0: - raise ValueError('Wrong year range: %s - %s.' - % (str(self.present_year), str(self.future_year))) + raise ValueError( + "Wrong year range: %s - %s." + % (str(self.present_year), str(self.future_year)) + ) if not self.imp_meas_future: - raise ValueError('Compute first _calc_impact_measures') + raise ValueError("Compute first _calc_impact_measures") time_dep = self._time_dependency_array(imp_time_depen) @@ -849,18 +1132,22 @@ def _calc_cost_benefit(self, disc_rates, imp_time_depen=None): # npv of the full unaverted damages if self.imp_meas_present: self.tot_climate_risk = self._npv_unaverted_impact( - self.imp_meas_future[NO_MEASURE]['risk'], - disc_rates, time_dep, self.imp_meas_present[NO_MEASURE]['risk']) + self.imp_meas_future[NO_MEASURE]["risk"], + disc_rates, + time_dep, + self.imp_meas_present[NO_MEASURE]["risk"], + ) else: self.tot_climate_risk = self._npv_unaverted_impact( - self.imp_meas_future[NO_MEASURE]['risk'], - disc_rates, time_dep) + self.imp_meas_future[NO_MEASURE]["risk"], disc_rates, time_dep + ) continue self._cost_ben_one(meas_name, meas_val, disc_rates, time_dep) - def _cost_ben_one(self, meas_name, meas_val, disc_rates, time_dep, - ini_state=NO_MEASURE): + def _cost_ben_one( + self, meas_name, meas_val, disc_rates, time_dep, ini_state=NO_MEASURE + ): """Compute cost and benefit for given measure with time dependency Parameters @@ -878,28 +1165,33 @@ def _cost_ben_one(self, meas_name, meas_val, disc_rates, time_dep, name of the measure to which to compute benefit. Default: 'no measure' """ - fut_benefit = self.imp_meas_future[ini_state]['risk'] - meas_val['risk'] - fut_risk_tr = meas_val['risk_transf'] + fut_benefit = self.imp_meas_future[ini_state]["risk"] - meas_val["risk"] + fut_risk_tr = meas_val["risk_transf"] if self.imp_meas_present: - pres_benefit = self.imp_meas_present[ini_state]['risk'] - \ - self.imp_meas_present[meas_name]['risk'] + pres_benefit = ( + self.imp_meas_present[ini_state]["risk"] + - self.imp_meas_present[meas_name]["risk"] + ) meas_ben = pres_benefit + (fut_benefit - pres_benefit) * time_dep - pres_risk_tr = self.imp_meas_present[meas_name]['risk_transf'] + pres_risk_tr = self.imp_meas_present[meas_name]["risk_transf"] risk_tr = pres_risk_tr + (fut_risk_tr - pres_risk_tr) * time_dep else: meas_ben = time_dep * fut_benefit risk_tr = time_dep * fut_risk_tr # discount - meas_ben = disc_rates.net_present_value(self.present_year, - self.future_year, meas_ben) - risk_tr = disc_rates.net_present_value(self.present_year, - self.future_year, risk_tr) + meas_ben = disc_rates.net_present_value( + self.present_year, self.future_year, meas_ben + ) + risk_tr = disc_rates.net_present_value( + self.present_year, self.future_year, risk_tr + ) self.benefit[meas_name] = meas_ben - with np.errstate(divide='ignore'): - self.cost_ben_ratio[meas_name] = (meas_val['cost'][0] - + meas_val['cost'][1] * risk_tr) / meas_ben + with np.errstate(divide="ignore"): + self.cost_ben_ratio[meas_name] = ( + meas_val["cost"][0] + meas_val["cost"][1] * risk_tr + ) / meas_ben def _time_dependency_array(self, imp_time_depen=None): """Construct time dependency array. Each year contains a value in [0,1] @@ -917,14 +1209,16 @@ def _time_dependency_array(self, imp_time_depen=None): """ n_years = self.future_year - self.present_year + 1 if imp_time_depen: - time_dep = np.arange(n_years)**imp_time_depen / \ - (n_years - 1)**imp_time_depen + time_dep = ( + np.arange(n_years) ** imp_time_depen / (n_years - 1) ** imp_time_depen + ) else: time_dep = np.ones(n_years) return time_dep - def _npv_unaverted_impact(self, risk_future, disc_rates, time_dep, - risk_present=None): + def _npv_unaverted_impact( + self, risk_future, disc_rates, time_dep, risk_present=None + ): """Net present value of total unaverted damages Parameters @@ -944,16 +1238,18 @@ def _npv_unaverted_impact(self, risk_future, disc_rates, time_dep, """ if risk_present: tot_climate_risk = risk_present + (risk_future - risk_present) * time_dep - tot_climate_risk = disc_rates.net_present_value(self.present_year, - self.future_year, - tot_climate_risk) + tot_climate_risk = disc_rates.net_present_value( + self.present_year, self.future_year, tot_climate_risk + ) else: - tot_climate_risk = disc_rates.net_present_value(self.present_year, - self.future_year, - time_dep * risk_future) + tot_climate_risk = disc_rates.net_present_value( + self.present_year, self.future_year, time_dep * risk_future + ) return tot_climate_risk - def _combine_imp_meas(self, new_cb, in_meas_names, new_name, risk_func, when='future'): + def _combine_imp_meas( + self, new_cb, in_meas_names, new_name, risk_func, when="future" + ): """Compute impacts combined measures assuming they are independent, i.e. their benefit can be added. Costs are also added. For the new measure the dictionary imp_meas_future if when='future' and imp_meas_present @@ -973,59 +1269,90 @@ def _combine_imp_meas(self, new_cb, in_meas_names, new_name, risk_func, when='fu to fill (imp_meas_present or imp_meas_future respectively) default: 'future' """ - if when == 'future': + if when == "future": imp_dict = self.imp_meas_future new_imp_dict = new_cb.imp_meas_future else: imp_dict = self.imp_meas_present new_imp_dict = new_cb.imp_meas_present - sum_ben = np.sum([ - imp_dict[NO_MEASURE]['impact'].at_event - imp_dict[name]['impact'].at_event - for name in in_meas_names - ], axis=0) - new_imp = copy.deepcopy(imp_dict[in_meas_names[0]]['impact']) - new_imp.at_event = np.maximum(imp_dict[NO_MEASURE]['impact'].at_event - - sum_ben, 0) + sum_ben = np.sum( + [ + imp_dict[NO_MEASURE]["impact"].at_event + - imp_dict[name]["impact"].at_event + for name in in_meas_names + ], + axis=0, + ) + new_imp = copy.deepcopy(imp_dict[in_meas_names[0]]["impact"]) + new_imp.at_event = np.maximum( + imp_dict[NO_MEASURE]["impact"].at_event - sum_ben, 0 + ) new_imp.eai_exp = np.array([]) new_imp.aai_agg = sum(new_imp.at_event * new_imp.frequency) new_imp_dict[new_name] = dict() - new_imp_dict[new_name]['impact'] = new_imp - new_imp_dict[new_name]['efc'] = new_imp.calc_freq_curve() - new_imp_dict[new_name]['risk'] = risk_func(new_imp) - new_imp_dict[new_name]['cost'] = ( - np.array([imp_dict[name]['cost'][0] for name in in_meas_names]).sum(), - 1) - new_imp_dict[new_name]['risk_transf'] = 0 + new_imp_dict[new_name]["impact"] = new_imp + new_imp_dict[new_name]["efc"] = new_imp.calc_freq_curve() + new_imp_dict[new_name]["risk"] = risk_func(new_imp) + new_imp_dict[new_name]["cost"] = ( + np.array([imp_dict[name]["cost"][0] for name in in_meas_names]).sum(), + 1, + ) + new_imp_dict[new_name]["risk_transf"] = 0 def _print_results(self): """Print table with main results""" norm_fact, norm_name = _norm_values(np.array(list(self.benefit.values())).max()) - norm_name = '(' + self.unit + ' ' + norm_name + ')' + norm_name = "(" + self.unit + " " + norm_name + ")" table = [] - headers = ['Measure', 'Cost ' + norm_name, 'Benefit ' + norm_name, 'Benefit/Cost'] + headers = [ + "Measure", + "Cost " + norm_name, + "Benefit " + norm_name, + "Benefit/Cost", + ] for meas_name in self.benefit: - if not np.isnan(self.cost_ben_ratio[meas_name]) and \ - not np.isinf(self.cost_ben_ratio[meas_name]): - cost = self.cost_ben_ratio[meas_name] * self.benefit[meas_name] / norm_fact + if not np.isnan(self.cost_ben_ratio[meas_name]) and not np.isinf( + self.cost_ben_ratio[meas_name] + ): + cost = ( + self.cost_ben_ratio[meas_name] * self.benefit[meas_name] / norm_fact + ) else: - cost = self.imp_meas_future[meas_name]['cost'][0] / norm_fact - table.append([meas_name, cost, self.benefit[meas_name] / norm_fact, - 1 / self.cost_ben_ratio[meas_name]]) + cost = self.imp_meas_future[meas_name]["cost"][0] / norm_fact + table.append( + [ + meas_name, + cost, + self.benefit[meas_name] / norm_fact, + 1 / self.cost_ben_ratio[meas_name], + ] + ) print() print(tabulate(table, headers, tablefmt="simple")) table = [] - table.append(['Total climate risk:', - self.tot_climate_risk / norm_fact, norm_name]) - table.append(['Average annual risk:', - self.imp_meas_future[NO_MEASURE]['risk'] / norm_fact, norm_name]) - table.append(['Residual risk:', - (self.tot_climate_risk - - np.array(list(self.benefit.values())).sum()) / norm_fact, norm_name]) + table.append( + ["Total climate risk:", self.tot_climate_risk / norm_fact, norm_name] + ) + table.append( + [ + "Average annual risk:", + self.imp_meas_future[NO_MEASURE]["risk"] / norm_fact, + norm_name, + ] + ) + table.append( + [ + "Residual risk:", + (self.tot_climate_risk - np.array(list(self.benefit.values())).sum()) + / norm_fact, + norm_name, + ] + ) print() print(tabulate(table, tablefmt="simple")) @@ -1047,8 +1374,8 @@ def _plot_list_cost_ben(cb_list, axis=None, **kwargs): ------- matplotlib.axes._subplots.AxesSubplot """ - if 'alpha' not in kwargs: - kwargs['alpha'] = 0.5 + if "alpha" not in kwargs: + kwargs["alpha"] = 0.5 norm_fact = [_norm_values(cb_res.tot_climate_risk)[0] for cb_res in cb_list] norm_fact = np.array(norm_fact).mean() _, norm_name = _norm_values(norm_fact + 0.01) @@ -1056,40 +1383,67 @@ def _plot_list_cost_ben(cb_list, axis=None, **kwargs): if not axis: _, axis = plt.subplots(1, 1) m_names = list(cb_list[0].cost_ben_ratio.keys()) - sort_cb = np.argsort(np.array([cb_list[0].cost_ben_ratio[name] for name in m_names])) + sort_cb = np.argsort( + np.array([cb_list[0].cost_ben_ratio[name] for name in m_names]) + ) xy_lim = [0, 0] for i_cb, cb_res in enumerate(cb_list): xmin = 0 for meas_id in sort_cb: meas_n = m_names[meas_id] - axis.add_patch(Rectangle((xmin, 0), - cb_res.benefit[meas_n] / norm_fact, - 1 / cb_res.cost_ben_ratio[meas_n], - color=cb_res.color_rgb[meas_n], **kwargs)) + axis.add_patch( + Rectangle( + (xmin, 0), + cb_res.benefit[meas_n] / norm_fact, + 1 / cb_res.cost_ben_ratio[meas_n], + color=cb_res.color_rgb[meas_n], + **kwargs + ) + ) if i_cb == 0: - axis.text(xmin + (cb_res.benefit[meas_n] / norm_fact) / 2, - 0, ' ' + meas_n, horizontalalignment='center', - verticalalignment='bottom', rotation=90, fontsize=12) + axis.text( + xmin + (cb_res.benefit[meas_n] / norm_fact) / 2, + 0, + " " + meas_n, + horizontalalignment="center", + verticalalignment="bottom", + rotation=90, + fontsize=12, + ) xmin += cb_res.benefit[meas_n] / norm_fact - xy_lim[0] = max(xy_lim[0], - max(int(cb_res.tot_climate_risk / norm_fact), - np.array(list(cb_res.benefit.values())).sum() / norm_fact)) + xy_lim[0] = max( + xy_lim[0], + max( + int(cb_res.tot_climate_risk / norm_fact), + np.array(list(cb_res.benefit.values())).sum() / norm_fact, + ), + ) try: - with np.errstate(divide='ignore'): - xy_lim[1] = max(xy_lim[1], int(1 / cb_res.cost_ben_ratio[ - m_names[sort_cb[0]]]) + 1) + with np.errstate(divide="ignore"): + xy_lim[1] = max( + xy_lim[1], + int(1 / cb_res.cost_ben_ratio[m_names[sort_cb[0]]]) + 1, + ) except (ValueError, OverflowError): - xy_lim[1] = max(xy_lim[1], - int(1 / np.array(list(cb_res.cost_ben_ratio.values())).max()) + 1) + xy_lim[1] = max( + xy_lim[1], + int(1 / np.array(list(cb_res.cost_ben_ratio.values())).max()) + 1, + ) axis.set_xlim(0, xy_lim[0]) axis.set_ylim(0, xy_lim[1]) - axis.set_xlabel('NPV averted damage over ' + - str(cb_list[0].future_year - cb_list[0].present_year + 1) + - ' years (' + cb_list[0].unit + ' ' + norm_name + ')') - axis.set_ylabel('Benefit/Cost ratio') + axis.set_xlabel( + "NPV averted damage over " + + str(cb_list[0].future_year - cb_list[0].present_year + 1) + + " years (" + + cb_list[0].unit + + " " + + norm_name + + ")" + ) + axis.set_ylabel("Benefit/Cost ratio") return axis @staticmethod @@ -1113,20 +1467,33 @@ def _plot_averted_arrow(axis, bar_4, tot_benefit, risk_tot, norm_fact, **kwargs) arguments for bar matplotlib function, e.g. alpha=0.5 """ bar_bottom, bar_top = bar_4.get_bbox().get_points() - axis.text(bar_top[0] - (bar_top[0] - bar_bottom[0]) / 2, bar_top[1], - "Averted", ha="center", va="top", rotation=270, size=15) + axis.text( + bar_top[0] - (bar_top[0] - bar_bottom[0]) / 2, + bar_top[1], + "Averted", + ha="center", + va="top", + rotation=270, + size=15, + ) arrow_len = min(tot_benefit / norm_fact, risk_tot / norm_fact) - if 'color' not in kwargs: - kwargs['color'] = 'k' - if 'alpha' not in kwargs: - kwargs['alpha'] = 0.4 - if 'mutation_scale' not in kwargs: - kwargs['mutation_scale'] = 100 - axis.add_patch(FancyArrowPatch( - (bar_top[0] - (bar_top[0] - bar_bottom[0]) / 2, bar_top[1]), - (bar_top[0] - (bar_top[0] - bar_bottom[0]) / 2, risk_tot / norm_fact - arrow_len), - **kwargs)) + if "color" not in kwargs: + kwargs["color"] = "k" + if "alpha" not in kwargs: + kwargs["alpha"] = 0.4 + if "mutation_scale" not in kwargs: + kwargs["mutation_scale"] = 100 + axis.add_patch( + FancyArrowPatch( + (bar_top[0] - (bar_top[0] - bar_bottom[0]) / 2, bar_top[1]), + ( + bar_top[0] - (bar_top[0] - bar_bottom[0]) / 2, + risk_tot / norm_fact - arrow_len, + ), + **kwargs + ) + ) def _print_risk_transfer(self, layer, layer_no, cost_fix, cost_factor): """Print comparative of risk transfer with and without measure @@ -1139,20 +1506,32 @@ def _print_risk_transfer(self, layer, layer_no, cost_fix, cost_factor): expected insurance layer without measure """ norm_fact, norm_name = _norm_values(np.array(list(self.benefit.values())).max()) - norm_name = '(' + self.unit + ' ' + norm_name + ')' - headers = ['Risk transfer', 'Expected damage in \n insurance layer ' + - norm_name, 'Price ' + norm_name] - table = [['without measure', layer_no / norm_fact, - (cost_fix + layer_no * cost_factor) / norm_fact], - ['with measure', layer / norm_fact, - (cost_fix + layer * cost_factor) / norm_fact]] + norm_name = "(" + self.unit + " " + norm_name + ")" + headers = [ + "Risk transfer", + "Expected damage in \n insurance layer " + norm_name, + "Price " + norm_name, + ] + table = [ + [ + "without measure", + layer_no / norm_fact, + (cost_fix + layer_no * cost_factor) / norm_fact, + ], + [ + "with measure", + layer / norm_fact, + (cost_fix + layer * cost_factor) / norm_fact, + ], + ] print() print(tabulate(table, headers, tablefmt="simple")) print() @staticmethod def _print_npv(): - print('Net Present Values') + print("Net Present Values") + def _norm_values(value): """Compute normalization value and name @@ -1166,15 +1545,15 @@ def _norm_values(value): norm_fact: float norm_name: float """ - norm_fact = 1. - norm_name = '' + norm_fact = 1.0 + norm_name = "" if value / 1.0e9 > 1: norm_fact = 1.0e9 - norm_name = 'bn' + norm_name = "bn" elif value / 1.0e6 > 1: norm_fact = 1.0e6 - norm_name = 'm' + norm_name = "m" elif value / 1.0e3 > 1: norm_fact = 1.0e3 - norm_name = 'k' + norm_name = "k" return norm_fact, norm_name diff --git a/climada/engine/forecast.py b/climada/engine/forecast.py index 1be74cb4c..f123a67ed 100644 --- a/climada/engine/forecast.py +++ b/climada/engine/forecast.py @@ -22,29 +22,30 @@ __all__ = ["Forecast"] -import logging import datetime as dt +import logging from typing import Dict, Optional -import numpy as np -import matplotlib.pyplot as plt -from matplotlib.patches import Patch -from matplotlib.ticker import PercentFormatter, ScalarFormatter -from matplotlib.colors import ListedColormap, BoundaryNorm + import cartopy.crs as ccrs -from matplotlib import colormaps as cm +import matplotlib.pyplot as plt +import numpy as np import pyproj import shapely from cartopy.io import shapereader +from matplotlib import colormaps as cm +from matplotlib.colors import BoundaryNorm, ListedColormap +from matplotlib.patches import Patch +from matplotlib.ticker import PercentFormatter, ScalarFormatter from mpl_toolkits.axes_grid1 import make_axes_locatable -from climada.hazard import Hazard +import climada.util.coordinates as u_coord +import climada.util.plot as u_plot +from climada.engine import ImpactCalc from climada.entity import Exposures from climada.entity.impact_funcs import ImpactFuncSet -from climada.engine import ImpactCalc -import climada.util.plot as u_plot +from climada.hazard import Hazard from climada.util.config import CONFIG from climada.util.files_handler import to_list -import climada.util.coordinates as u_coord from climada.util.value_representation import ( value_to_monetary_unit as u_value_to_monetary_unit, ) @@ -140,7 +141,7 @@ def __init__( exposure: Exposures, impact_funcs: ImpactFuncSet, haz_model: str = "NWP", - exposure_name: Optional[str] = None + exposure_name: Optional[str] = None, ): """Initialization with hazard, exposure and vulnerability. @@ -308,8 +309,9 @@ def calc(self, force_reassign=False): if self.hazard: self.exposure.assign_centroids(self.hazard[0], overwrite=force_reassign) for ind_i, haz_i in enumerate(self.hazard): - self._impact[ind_i] = ImpactCalc(self.exposure, self.vulnerability, haz_i)\ - .impact(save_mat=True, assign_centroids=False) + self._impact[ind_i] = ImpactCalc( + self.exposure, self.vulnerability, haz_i + ).impact(save_mat=True, assign_centroids=False) def plot_imp_map( self, @@ -323,7 +325,7 @@ def plot_imp_map( figsize=(9, 13), adapt_fontsize=True, ): - """ plot a map of the impacts + """plot a map of the impacts Parameters ---------- @@ -378,7 +380,11 @@ def plot_imp_map( "run_start": ( run_datetime.strftime("%d.%m.%Y %HUTC +") + lead_time_str + "d" ), - "explain_text": "mean building damage caused by wind" if explain_str is None else explain_str, + "explain_text": ( + "mean building damage caused by wind" + if explain_str is None + else explain_str + ), "model_text": "CLIMADA IMPACT", } fig, axes = self._plot_imp_map( @@ -539,7 +545,7 @@ def plot_hist( close_fig=False, figsize=(9, 8), ): - """ plot histogram of the forecasted impacts all ensemble members + """plot histogram of the forecasted impacts all ensemble members Parameters ---------- @@ -618,7 +624,7 @@ def plot_hist( axes.xaxis.set_ticks(x_ticks) axes.xaxis.set_ticklabels(x_ticklabels) plt.xticks(rotation=15, horizontalalignment="right") - plt.xlim([(10 ** -0.25) * bins[0], (10 ** 0.25) * bins[-1]]) + plt.xlim([(10**-0.25) * bins[0], (10**0.25) * bins[-1]]) lead_time_str = "{:.0f}".format( self.lead_time(run_datetime).days @@ -629,7 +635,9 @@ def plot_hist( "run_start": ( run_datetime.strftime("%d.%m.%Y %HUTC +") + lead_time_str + "d" ), - "explain_text": ("total building damage") if explain_str is None else explain_str, + "explain_text": ( + ("total building damage") if explain_str is None else explain_str + ), "model_text": "CLIMADA IMPACT", } title_position = { @@ -673,7 +681,8 @@ def plot_hist( 0.85, "mean impact:\n " + self._number_to_str(self._impact[haz_ind].at_event.mean()) - + ' ' + self._impact[haz_ind].unit, + + " " + + self._impact[haz_ind].unit, horizontalalignment="center", verticalalignment="center", transform=axes.transAxes, @@ -780,10 +789,10 @@ def plot_exceedence_prob( run_datetime.strftime("%d.%m.%Y %HUTC +") + lead_time_str + "d" ), "explain_text": ( - "threshold: " + str(threshold) + " " + self._impact[haz_ind].unit - ) - if explain_str is None - else explain_str, + ("threshold: " + str(threshold) + " " + self._impact[haz_ind].unit) + if explain_str is None + else explain_str + ), "model_text": "Exceedance probability map", } cbar_label = "probabilty of reaching threshold" @@ -1102,7 +1111,9 @@ def _plot_warn( decision_dict_functions[aggregation] = np.mean else: raise ValueError( - "Parameter " + aggregation + " of " + "Parameter " + + aggregation + + " of " + "Forecast.plot_warn_map() must eiter be " + "a float between [0..1], which " + "specifys a quantile. or 'sum' or 'mean'." diff --git a/climada/engine/impact.py b/climada/engine/impact.py index f357538e5..58292ab9c 100644 --- a/climada/engine/impact.py +++ b/climada/engine/impact.py @@ -19,42 +19,43 @@ Define Impact and ImpactFreqCurve classes. """ -__all__ = ['ImpactFreqCurve', 'Impact'] +__all__ = ["ImpactFreqCurve", "Impact"] -from dataclasses import dataclass, field -import logging import copy import csv -import warnings import datetime as dt -from itertools import zip_longest -from typing import Any, Iterable, Union +import logging +import warnings from collections.abc import Collection +from dataclasses import dataclass, field +from itertools import zip_longest from pathlib import Path +from typing import Any, Iterable, Union import contextily as ctx -import numpy as np -from scipy import sparse -import matplotlib.pyplot as plt +import h5py import matplotlib.animation as animation +import matplotlib.pyplot as plt +import numpy as np import pandas as pd import xlsxwriter -from tqdm import tqdm -import h5py from pyproj import CRS as pyprojCRS from rasterio.crs import CRS as rasterioCRS # pylint: disable=no-name-in-module +from scipy import sparse +from tqdm import tqdm -from climada.entity import Exposures -from climada import CONFIG -from climada.util.constants import DEF_CRS, CMAP_IMPACT, DEF_FREQ_UNIT import climada.util.coordinates as u_coord import climada.util.dates_times as u_dt import climada.util.plot as u_plot +from climada import CONFIG +from climada.entity import Exposures +from climada.util.constants import CMAP_IMPACT, DEF_CRS, DEF_FREQ_UNIT from climada.util.select import get_attributes_with_matching_dimension LOGGER = logging.getLogger(__name__) -class Impact(): + +class Impact: """Impact definition. Compute from an entity (exposures and impact functions) and hazard. @@ -91,21 +92,23 @@ class Impact(): the hazard type of the hazard """ - def __init__(self, - event_id=None, - event_name=None, - date=None, - frequency=None, - frequency_unit=DEF_FREQ_UNIT, - coord_exp=None, - crs=DEF_CRS, - eai_exp=None, - at_event=None, - tot_value=0, - aai_agg=0, - unit='', - imp_mat=None, - haz_type=''): + def __init__( + self, + event_id=None, + event_name=None, + date=None, + frequency=None, + frequency_unit=DEF_FREQ_UNIT, + coord_exp=None, + crs=DEF_CRS, + eai_exp=None, + at_event=None, + tot_value=0, + aai_agg=0, + unit="", + imp_mat=None, + haz_type="", + ): """ Init Impact object @@ -152,7 +155,7 @@ def __init__(self, self.crs = crs.to_wkt() if isinstance(crs, (pyprojCRS, rasterioCRS)) else crs self.eai_exp = np.array([], float) if eai_exp is None else eai_exp self.at_event = np.array([], float) if at_event is None else at_event - self.frequency = np.array([],float) if frequency is None else frequency + self.frequency = np.array([], float) if frequency is None else frequency self.frequency_unit = frequency_unit self._tot_value = tot_value self.aai_agg = aai_agg @@ -160,52 +163,65 @@ def __init__(self, if len(self.event_id) != len(self.event_name): raise AttributeError( - f'Hazard event ids {len(self.event_id)} and event names' - f' {len(self.event_name)} are not of the same length') + f"Hazard event ids {len(self.event_id)} and event names" + f" {len(self.event_name)} are not of the same length" + ) if len(self.event_id) != len(self.date): raise AttributeError( - f'Hazard event ids {len(self.event_id)} and event dates' - f' {len(self.date)} are not of the same length') + f"Hazard event ids {len(self.event_id)} and event dates" + f" {len(self.date)} are not of the same length" + ) if len(self.event_id) != len(self.frequency): raise AttributeError( - f'Hazard event ids {len(self.event_id)} and event frequency' - f' {len(self.frequency)} are not of the same length') + f"Hazard event ids {len(self.event_id)} and event frequency" + f" {len(self.frequency)} are not of the same length" + ) if len(self.event_id) != len(self.at_event): raise AttributeError( - f'Number of hazard event ids {len(self.event_id)} is different ' - f'from number of at_event values {len(self.at_event)}') + f"Number of hazard event ids {len(self.event_id)} is different " + f"from number of at_event values {len(self.at_event)}" + ) if len(self.coord_exp) != len(self.eai_exp): - raise AttributeError('Number of exposures points is different from' - 'number of eai_exp values') + raise AttributeError( + "Number of exposures points is different from" + "number of eai_exp values" + ) if imp_mat is not None: self.imp_mat = imp_mat if self.imp_mat.size > 0: if len(self.event_id) != self.imp_mat.shape[0]: raise AttributeError( - f'The number of rows {imp_mat.shape[0]} of the impact ' + - f'matrix is inconsistent with the number {len(event_id)} ' - 'of hazard events.') + f"The number of rows {imp_mat.shape[0]} of the impact " + + f"matrix is inconsistent with the number {len(event_id)} " + "of hazard events." + ) if len(self.coord_exp) != self.imp_mat.shape[1]: raise AttributeError( - f'The number of columns {imp_mat.shape[1]} of the impact' + - f' matrix is inconsistent with the number {len(coord_exp)}' - ' exposures points.') + f"The number of columns {imp_mat.shape[1]} of the impact" + + f" matrix is inconsistent with the number {len(coord_exp)}" + " exposures points." + ) else: self.imp_mat = sparse.csr_matrix(np.empty((0, 0))) - def calc(self, exposures, impact_funcs, hazard, save_mat=False, assign_centroids=True): - """This function is deprecated, use ``ImpactCalc.impact`` instead. - """ - LOGGER.warning("The use of Impact().calc() is deprecated." - " Use ImpactCalc().impact() instead.") - from climada.engine.impact_calc import ImpactCalc # pylint: disable=import-outside-toplevel + def calc( + self, exposures, impact_funcs, hazard, save_mat=False, assign_centroids=True + ): + """This function is deprecated, use ``ImpactCalc.impact`` instead.""" + LOGGER.warning( + "The use of Impact().calc() is deprecated." + " Use ImpactCalc().impact() instead." + ) + from climada.engine.impact_calc import ( + ImpactCalc, # pylint: disable=import-outside-toplevel + ) + impcalc = ImpactCalc(exposures, impact_funcs, hazard) self.__dict__ = impcalc.impact( - save_mat=save_mat, - assign_centroids=assign_centroids + save_mat=save_mat, assign_centroids=assign_centroids ).__dict__ -#TODO: new name + # TODO: new name @classmethod def from_eih(cls, exposures, hazard, at_event, eai_exp, aai_agg, imp_mat=None): """ @@ -238,23 +254,24 @@ def from_eih(cls, exposures, hazard, at_event, eai_exp, aai_agg, imp_mat=None): impact with all risk metrics set based on the given impact matrix """ return cls( - event_id = hazard.event_id, - event_name = hazard.event_name, - date = hazard.date, - frequency = hazard.frequency, - frequency_unit = hazard.frequency_unit, - coord_exp = np.stack([exposures.gdf['latitude'].values, - exposures.gdf['longitude'].values], - axis=1), - crs = exposures.crs, - unit = exposures.value_unit, - tot_value = exposures.centroids_total_value(hazard), - eai_exp = eai_exp, - at_event = at_event, - aai_agg = aai_agg, - imp_mat = imp_mat if imp_mat is not None else sparse.csr_matrix((0, 0)), - haz_type = hazard.haz_type, - ) + event_id=hazard.event_id, + event_name=hazard.event_name, + date=hazard.date, + frequency=hazard.frequency, + frequency_unit=hazard.frequency_unit, + coord_exp=np.stack( + [exposures.gdf["latitude"].values, exposures.gdf["longitude"].values], + axis=1, + ), + crs=exposures.crs, + unit=exposures.value_unit, + tot_value=exposures.centroids_total_value(hazard), + eai_exp=eai_exp, + at_event=at_event, + aai_agg=aai_agg, + imp_mat=imp_mat if imp_mat is not None else sparse.csr_matrix((0, 0)), + haz_type=hazard.haz_type, + ) @property def tot_value(self): @@ -264,19 +281,23 @@ def tot_value(self): Use :py:meth:`climada.entity.exposures.base.Exposures.affected_total_value` instead. """ - LOGGER.warning("The Impact.tot_value attribute is deprecated." - "Use Exposures.affected_total_value to calculate the affected " - "total exposure value based on a specific hazard intensity " - "threshold") + LOGGER.warning( + "The Impact.tot_value attribute is deprecated." + "Use Exposures.affected_total_value to calculate the affected " + "total exposure value based on a specific hazard intensity " + "threshold" + ) return self._tot_value @tot_value.setter def tot_value(self, value): """Set the total exposure value close to a hazard""" - LOGGER.warning("The Impact.tot_value attribute is deprecated." - "Use Exposures.affected_total_value to calculate the affected " - "total exposure value based on a specific hazard intensity " - "threshold") + LOGGER.warning( + "The Impact.tot_value attribute is deprecated." + "Use Exposures.affected_total_value to calculate the affected " + "total exposure value based on a specific hazard intensity " + "threshold" + ) self._tot_value = value def transfer_risk(self, attachment, cover): @@ -336,7 +357,7 @@ def residual_risk(self, attachment, cover): residual_aai_agg = np.sum(residual_at_event * self.frequency) return residual_at_event, residual_aai_agg -#TODO: rewrite and deprecate method + # TODO: rewrite and deprecate method def calc_risk_transfer(self, attachment, cover): """Compute traaditional risk transfer over impact. Returns new impact with risk transfer applied and the insurance layer resulting @@ -392,8 +413,7 @@ def impact_per_year(self, all_years=True, year_range=None): if year_range is None: year_range = [] - orig_year = np.array([dt.datetime.fromordinal(date).year - for date in self.date]) + orig_year = np.array([dt.datetime.fromordinal(date).year for date in self.date]) if orig_year.size == 0 and len(year_range) == 0: return dict() if orig_year.size == 0 or (len(year_range) > 0 and all_years): @@ -461,13 +481,15 @@ def impact_at_reg(self, agg_regions=None): return at_reg_event - def calc_impact_year_set(self,all_years=True, year_range=None): + def calc_impact_year_set(self, all_years=True, year_range=None): """This function is deprecated, use Impact.impact_per_year instead.""" - LOGGER.warning("The use of Impact.calc_impact_year_set is deprecated." - "Use Impact.impact_per_year instead.") + LOGGER.warning( + "The use of Impact.calc_impact_year_set is deprecated." + "Use Impact.impact_per_year instead." + ) return self.impact_per_year(all_years=all_years, year_range=year_range) -#TODO: rewrite and deprecate method + # TODO: rewrite and deprecate method def local_exceedance_imp(self, return_periods=(25, 50, 100, 250)): """Compute exceedance impact map for given return periods. Requires attribute imp_mat. @@ -482,26 +504,35 @@ def local_exceedance_imp(self, return_periods=(25, 50, 100, 250)): ------- np.array """ - LOGGER.info('Computing exceedance impact map for return periods: %s', - return_periods) + LOGGER.info( + "Computing exceedance impact map for return periods: %s", return_periods + ) if self.imp_mat.size == 0: - raise ValueError('Attribute imp_mat is empty. Recalculate Impact' - 'instance with parameter save_mat=True') + raise ValueError( + "Attribute imp_mat is empty. Recalculate Impact" + "instance with parameter save_mat=True" + ) num_cen = self.imp_mat.shape[1] imp_stats = np.zeros((len(return_periods), num_cen)) cen_step = CONFIG.max_matrix_size.int() // self.imp_mat.shape[0] if not cen_step: - raise ValueError('Increase max_matrix_size configuration parameter to > ' - f'{self.imp_mat.shape[0]}') + raise ValueError( + "Increase max_matrix_size configuration parameter to > " + f"{self.imp_mat.shape[0]}" + ) # separte in chunks chk = -1 for chk in range(int(num_cen / cen_step)): - self._loc_return_imp(np.array(return_periods), - self.imp_mat[:, chk * cen_step:(chk + 1) * cen_step].toarray(), - imp_stats[:, chk * cen_step:(chk + 1) * cen_step]) - self._loc_return_imp(np.array(return_periods), - self.imp_mat[:, (chk + 1) * cen_step:].toarray(), - imp_stats[:, (chk + 1) * cen_step:]) + self._loc_return_imp( + np.array(return_periods), + self.imp_mat[:, chk * cen_step : (chk + 1) * cen_step].toarray(), + imp_stats[:, chk * cen_step : (chk + 1) * cen_step], + ) + self._loc_return_imp( + np.array(return_periods), + self.imp_mat[:, (chk + 1) * cen_step :].toarray(), + imp_stats[:, (chk + 1) * cen_step :], + ) return imp_stats @@ -536,21 +567,29 @@ def calc_freq_curve(self, return_per=None): impact=ifc_impact, unit=self.unit, frequency_unit=self.frequency_unit, - label='Exceedance frequency curve' + label="Exceedance frequency curve", ) def _eai_title(self): - if self.frequency_unit in ['1/year', 'annual', '1/y', '1/a']: - return 'Expected annual impact' - if self.frequency_unit in ['1/day', 'daily', '1/d']: - return 'Expected daily impact' - if self.frequency_unit in ['1/month', 'monthly', '1/m']: - return 'Expected monthly impact' - return f'Expected impact ({self.frequency_unit})' - - def plot_scatter_eai_exposure(self, mask=None, ignore_zero=False, - pop_name=True, buffer=0.0, extend='neither', - axis=None, adapt_fontsize=True, **kwargs): + if self.frequency_unit in ["1/year", "annual", "1/y", "1/a"]: + return "Expected annual impact" + if self.frequency_unit in ["1/day", "daily", "1/d"]: + return "Expected daily impact" + if self.frequency_unit in ["1/month", "monthly", "1/m"]: + return "Expected monthly impact" + return f"Expected impact ({self.frequency_unit})" + + def plot_scatter_eai_exposure( + self, + mask=None, + ignore_zero=False, + pop_name=True, + buffer=0.0, + extend="neither", + axis=None, + adapt_fontsize=True, + **kwargs, + ): """Plot scatter expected impact within a period of 1/frequency_unit of each exposure. Parameters @@ -580,18 +619,34 @@ def plot_scatter_eai_exposure(self, mask=None, ignore_zero=False, ------- cartopy.mpl.geoaxes.GeoAxesSubplot """ - if 'cmap' not in kwargs: - kwargs['cmap'] = CMAP_IMPACT + if "cmap" not in kwargs: + kwargs["cmap"] = CMAP_IMPACT eai_exp = self._build_exp() - axis = eai_exp.plot_scatter(mask, ignore_zero, pop_name, buffer, - extend, axis=axis, adapt_fontsize=adapt_fontsize, **kwargs) + axis = eai_exp.plot_scatter( + mask, + ignore_zero, + pop_name, + buffer, + extend, + axis=axis, + adapt_fontsize=adapt_fontsize, + **kwargs, + ) axis.set_title(self._eai_title()) return axis - def plot_hexbin_eai_exposure(self, mask=None, ignore_zero=False, - pop_name=True, buffer=0.0, extend='neither', - axis=None, adapt_fontsize=True, **kwargs): + def plot_hexbin_eai_exposure( + self, + mask=None, + ignore_zero=False, + pop_name=True, + buffer=0.0, + extend="neither", + axis=None, + adapt_fontsize=True, + **kwargs, + ): """Plot hexbin expected impact within a period of 1/frequency_unit of each exposure. Parameters @@ -621,19 +676,34 @@ def plot_hexbin_eai_exposure(self, mask=None, ignore_zero=False, ------- cartopy.mpl.geoaxes.GeoAxesSubplot """ - if 'cmap' not in kwargs: - kwargs['cmap'] = CMAP_IMPACT + if "cmap" not in kwargs: + kwargs["cmap"] = CMAP_IMPACT eai_exp = self._build_exp() - axis = eai_exp.plot_hexbin(mask, ignore_zero, pop_name, buffer, - extend, axis=axis, adapt_fontsize=adapt_fontsize, **kwargs) + axis = eai_exp.plot_hexbin( + mask, + ignore_zero, + pop_name, + buffer, + extend, + axis=axis, + adapt_fontsize=adapt_fontsize, + **kwargs, + ) axis.set_title(self._eai_title()) return axis - def plot_raster_eai_exposure(self, res=None, raster_res=None, save_tiff=None, - raster_f=lambda x: np.log10((np.fmax(x + 1, 1))), - label='value (log10)', axis=None, adapt_fontsize=True, - **kwargs): + def plot_raster_eai_exposure( + self, + res=None, + raster_res=None, + save_tiff=None, + raster_f=lambda x: np.log10((np.fmax(x + 1, 1))), + label="value (log10)", + axis=None, + adapt_fontsize=True, + **kwargs, + ): """Plot raster expected impact within a period of 1/frequency_unit of each exposure. Parameters @@ -666,15 +736,31 @@ def plot_raster_eai_exposure(self, res=None, raster_res=None, save_tiff=None, # we need to set geometry points because the `plot_raster` method accesses the # exposures' `gdf.crs` property, which raises an error when geometry is not set eai_exp.set_geometry_points() - axis = eai_exp.plot_raster(res, raster_res, save_tiff, raster_f, - label, axis=axis, adapt_fontsize=adapt_fontsize, **kwargs) + axis = eai_exp.plot_raster( + res, + raster_res, + save_tiff, + raster_f, + label, + axis=axis, + adapt_fontsize=adapt_fontsize, + **kwargs, + ) axis.set_title(self._eai_title()) return axis - def plot_basemap_eai_exposure(self, mask=None, ignore_zero=False, pop_name=True, - buffer=0.0, extend='neither', zoom=10, - url=ctx.providers.CartoDB.Positron, - axis=None, **kwargs): + def plot_basemap_eai_exposure( + self, + mask=None, + ignore_zero=False, + pop_name=True, + buffer=0.0, + extend="neither", + zoom=10, + url=ctx.providers.CartoDB.Positron, + axis=None, + **kwargs, + ): """Plot basemap expected impact of each exposure within a period of 1/frequency_unit. Parameters @@ -705,17 +791,27 @@ def plot_basemap_eai_exposure(self, mask=None, ignore_zero=False, pop_name=True, ------- cartopy.mpl.geoaxes.GeoAxesSubplot """ - if 'cmap' not in kwargs: - kwargs['cmap'] = CMAP_IMPACT + if "cmap" not in kwargs: + kwargs["cmap"] = CMAP_IMPACT eai_exp = self._build_exp() - axis = eai_exp.plot_basemap(mask, ignore_zero, pop_name, buffer, - extend, zoom, url, axis=axis, **kwargs) + axis = eai_exp.plot_basemap( + mask, ignore_zero, pop_name, buffer, extend, zoom, url, axis=axis, **kwargs + ) axis.set_title(self._eai_title()) return axis - def plot_hexbin_impact_exposure(self, event_id=1, mask=None, ignore_zero=False, - pop_name=True, buffer=0.0, extend='neither', - axis=None, adapt_fontsize=True, **kwargs): + def plot_hexbin_impact_exposure( + self, + event_id=1, + mask=None, + ignore_zero=False, + pop_name=True, + buffer=0.0, + extend="neither", + axis=None, + adapt_fontsize=True, + **kwargs, + ): """Plot hexbin impact of an event at each exposure. Requires attribute imp_mat. @@ -750,22 +846,39 @@ def plot_hexbin_impact_exposure(self, event_id=1, mask=None, ignore_zero=False, cartopy.mpl.geoaxes.GeoAxesSubplot """ if self.imp_mat.size == 0: - raise ValueError('Attribute imp_mat is empty. Recalculate Impact' - 'instance with parameter save_mat=True') - if 'cmap' not in kwargs: - kwargs['cmap'] = CMAP_IMPACT + raise ValueError( + "Attribute imp_mat is empty. Recalculate Impact" + "instance with parameter save_mat=True" + ) + if "cmap" not in kwargs: + kwargs["cmap"] = CMAP_IMPACT impact_at_events_exp = self._build_exp_event(event_id) - axis = impact_at_events_exp.plot_hexbin(mask, ignore_zero, pop_name, - buffer, extend, axis=axis, - adapt_fontsize=adapt_fontsize, - **kwargs) + axis = impact_at_events_exp.plot_hexbin( + mask, + ignore_zero, + pop_name, + buffer, + extend, + axis=axis, + adapt_fontsize=adapt_fontsize, + **kwargs, + ) return axis - def plot_basemap_impact_exposure(self, event_id=1, mask=None, ignore_zero=False, - pop_name=True, buffer=0.0, extend='neither', zoom=10, - url=ctx.providers.CartoDB.Positron, - axis=None, **kwargs): + def plot_basemap_impact_exposure( + self, + event_id=1, + mask=None, + ignore_zero=False, + pop_name=True, + buffer=0.0, + extend="neither", + zoom=10, + url=ctx.providers.CartoDB.Positron, + axis=None, + **kwargs, + ): """Plot basemap impact of an event at each exposure. Requires attribute imp_mat. @@ -801,21 +914,30 @@ def plot_basemap_impact_exposure(self, event_id=1, mask=None, ignore_zero=False, cartopy.mpl.geoaxes.GeoAxesSubplot """ if self.imp_mat.size == 0: - raise ValueError('Attribute imp_mat is empty. Recalculate Impact' - 'instance with parameter save_mat=True') + raise ValueError( + "Attribute imp_mat is empty. Recalculate Impact" + "instance with parameter save_mat=True" + ) if event_id not in self.event_id: - raise ValueError(f'Event ID {event_id} not found') - if 'cmap' not in kwargs: - kwargs['cmap'] = CMAP_IMPACT + raise ValueError(f"Event ID {event_id} not found") + if "cmap" not in kwargs: + kwargs["cmap"] = CMAP_IMPACT impact_at_events_exp = self._build_exp_event(event_id) - axis = impact_at_events_exp.plot_basemap(mask, ignore_zero, pop_name, - buffer, extend, zoom, url, axis=axis, **kwargs) + axis = impact_at_events_exp.plot_basemap( + mask, ignore_zero, pop_name, buffer, extend, zoom, url, axis=axis, **kwargs + ) return axis - def plot_rp_imp(self, return_periods=(25, 50, 100, 250), - log10_scale=True, smooth=True, axis=None, **kwargs): + def plot_rp_imp( + self, + return_periods=(25, 50, 100, 250), + log10_scale=True, + smooth=True, + axis=None, + **kwargs, + ): """Compute and plot exceedance impact maps for different return periods. Calls local_exceedance_imp. @@ -839,26 +961,35 @@ def plot_rp_imp(self, return_periods=(25, 50, 100, 250), """ imp_stats = self.local_exceedance_imp(np.array(return_periods)) if imp_stats.size == 0: - raise ValueError('Error: Attribute imp_mat is empty. Recalculate Impact' - 'instance with parameter save_mat=True') + raise ValueError( + "Error: Attribute imp_mat is empty. Recalculate Impact" + "instance with parameter save_mat=True" + ) if log10_scale: if np.min(imp_stats) < 0: imp_stats_log = np.log10(abs(imp_stats) + 1) - colbar_name = 'Log10(abs(Impact)+1) (' + self.unit + ')' + colbar_name = "Log10(abs(Impact)+1) (" + self.unit + ")" elif np.min(imp_stats) < 1: imp_stats_log = np.log10(imp_stats + 1) - colbar_name = 'Log10(Impact+1) (' + self.unit + ')' + colbar_name = "Log10(Impact+1) (" + self.unit + ")" else: imp_stats_log = np.log10(imp_stats) - colbar_name = 'Log10(Impact) (' + self.unit + ')' + colbar_name = "Log10(Impact) (" + self.unit + ")" else: imp_stats_log = imp_stats - colbar_name = 'Impact (' + self.unit + ')' + colbar_name = "Impact (" + self.unit + ")" title = list() for ret in return_periods: - title.append('Return period: ' + str(ret) + ' years') - axis = u_plot.geo_im_from_array(imp_stats_log, self.coord_exp, - colbar_name, title, smooth=smooth, axes=axis, **kwargs) + title.append("Return period: " + str(ret) + " years") + axis = u_plot.geo_im_from_array( + imp_stats_log, + self.coord_exp, + colbar_name, + title, + smooth=smooth, + axes=axis, + **kwargs, + ) return axis, imp_stats @@ -870,18 +1001,43 @@ def write_csv(self, file_name): file_name : str absolute path of the file """ - LOGGER.info('Writing %s', file_name) - with open(file_name, "w", encoding='utf-8') as imp_file: + LOGGER.info("Writing %s", file_name) + with open(file_name, "w", encoding="utf-8") as imp_file: imp_wr = csv.writer(imp_file) - imp_wr.writerow(["haz_type", "unit", "tot_value", "aai_agg", "event_id", - "event_name", "event_date", "event_frequency", "frequency_unit", - "at_event", "eai_exp", "exp_lat", "exp_lon", "exp_crs"]) - csv_data = [[self.haz_type], - [self.unit], [self._tot_value], [self.aai_agg], - self.event_id, self.event_name, self.date, - self.frequency, [self.frequency_unit], self.at_event, - self.eai_exp, self.coord_exp[:, 0], self.coord_exp[:, 1], - [str(self.crs)]] + imp_wr.writerow( + [ + "haz_type", + "unit", + "tot_value", + "aai_agg", + "event_id", + "event_name", + "event_date", + "event_frequency", + "frequency_unit", + "at_event", + "eai_exp", + "exp_lat", + "exp_lon", + "exp_crs", + ] + ) + csv_data = [ + [self.haz_type], + [self.unit], + [self._tot_value], + [self.aai_agg], + self.event_id, + self.event_name, + self.date, + self.frequency, + [self.frequency_unit], + self.at_event, + self.eai_exp, + self.coord_exp[:, 0], + self.coord_exp[:, 1], + [str(self.crs)], + ] for values in zip_longest(*csv_data): imp_wr.writerow(values) @@ -893,7 +1049,8 @@ def write_excel(self, file_name): file_name : str absolute path of the file """ - LOGGER.info('Writing %s', file_name) + LOGGER.info("Writing %s", file_name) + def write_col(i_col, imp_ws, xls_data): """Write one measure""" row_ini = 1 @@ -904,9 +1061,22 @@ def write_col(i_col, imp_ws, xls_data): imp_wb = xlsxwriter.Workbook(file_name) imp_ws = imp_wb.add_worksheet() - header = ["haz_type", "unit", "tot_value", "aai_agg", "event_id", - "event_name", "event_date", "event_frequency", "frequency_unit", - "at_event", "eai_exp", "exp_lat", "exp_lon", "exp_crs"] + header = [ + "haz_type", + "unit", + "tot_value", + "aai_agg", + "event_id", + "event_name", + "event_date", + "event_frequency", + "frequency_unit", + "at_event", + "eai_exp", + "exp_lat", + "exp_lon", + "exp_crs", + ] for icol, head_dat in enumerate(header): imp_ws.write(0, icol, head_dat) data = [str(self.haz_type)] @@ -927,7 +1097,7 @@ def write_col(i_col, imp_ws, xls_data): imp_wb.close() - def write_hdf5(self, file_path: Union[str, Path], dense_imp_mat: bool=False): + def write_hdf5(self, file_path: Union[str, Path], dense_imp_mat: bool = False): """Write the data stored in this object into an H5 file. Try to write all attributes of this class into H5 datasets or attributes. @@ -1043,9 +1213,14 @@ def write_csr(group, name, value): def write_sparse_csr(self, file_name): """Write imp_mat matrix in numpy's npz format.""" - LOGGER.info('Writing %s', file_name) - np.savez(file_name, data=self.imp_mat.data, indices=self.imp_mat.indices, - indptr=self.imp_mat.indptr, shape=self.imp_mat.shape) + LOGGER.info("Writing %s", file_name) + np.savez( + file_name, + data=self.imp_mat.data, + indices=self.imp_mat.indices, + indptr=self.imp_mat.indptr, + shape=self.imp_mat.shape, + ) @staticmethod def read_sparse_csr(file_name): @@ -1059,10 +1234,11 @@ def read_sparse_csr(file_name): ------- sparse.csr_matrix """ - LOGGER.info('Reading %s', file_name) + LOGGER.info("Reading %s", file_name) loader = np.load(file_name) return sparse.csr_matrix( - (loader['data'], loader['indices'], loader['indptr']), shape=loader['shape']) + (loader["data"], loader["indices"], loader["indptr"]), shape=loader["shape"] + ) @classmethod def from_csv(cls, file_name): @@ -1079,27 +1255,28 @@ def from_csv(cls, file_name): Impact from csv file """ # pylint: disable=no-member - LOGGER.info('Reading %s', file_name) + LOGGER.info("Reading %s", file_name) imp_df = pd.read_csv(file_name) - imp = cls(haz_type=imp_df['haz_type'][0]) - imp.unit = imp_df['unit'][0] - imp.tot_value = imp_df['tot_value'][0] - imp.aai_agg = imp_df['aai_agg'][0] - imp.event_id = imp_df['event_id'][~np.isnan(imp_df['event_id'])].values + imp = cls(haz_type=imp_df["haz_type"][0]) + imp.unit = imp_df["unit"][0] + imp.tot_value = imp_df["tot_value"][0] + imp.aai_agg = imp_df["aai_agg"][0] + imp.event_id = imp_df["event_id"][~np.isnan(imp_df["event_id"])].values num_ev = imp.event_id.size - imp.event_name = imp_df['event_name'][:num_ev].values.tolist() - imp.date = imp_df['event_date'][:num_ev].values - imp.at_event = imp_df['at_event'][:num_ev].values - imp.frequency = imp_df['event_frequency'][:num_ev].values - imp.frequency_unit = imp_df['frequency_unit'][0] if 'frequency_unit' in imp_df \ - else DEF_FREQ_UNIT - imp.eai_exp = imp_df['eai_exp'][~np.isnan(imp_df['eai_exp'])].values + imp.event_name = imp_df["event_name"][:num_ev].values.tolist() + imp.date = imp_df["event_date"][:num_ev].values + imp.at_event = imp_df["at_event"][:num_ev].values + imp.frequency = imp_df["event_frequency"][:num_ev].values + imp.frequency_unit = ( + imp_df["frequency_unit"][0] if "frequency_unit" in imp_df else DEF_FREQ_UNIT + ) + imp.eai_exp = imp_df["eai_exp"][~np.isnan(imp_df["eai_exp"])].values num_exp = imp.eai_exp.size imp.coord_exp = np.zeros((num_exp, 2)) - imp.coord_exp[:, 0] = imp_df['exp_lat'][:num_exp] - imp.coord_exp[:, 1] = imp_df['exp_lon'][:num_exp] + imp.coord_exp[:, 0] = imp_df["exp_lat"][:num_exp] + imp.coord_exp[:, 1] = imp_df["exp_lon"][:num_exp] try: - imp.crs = u_coord.to_crs_user_input(imp_df['exp_crs'].values[0]) + imp.crs = u_coord.to_crs_user_input(imp_df["exp_crs"].values[0]) except AttributeError: imp.crs = DEF_CRS @@ -1107,8 +1284,9 @@ def from_csv(cls, file_name): def read_csv(self, *args, **kwargs): """This function is deprecated, use Impact.from_csv instead.""" - LOGGER.warning("The use of Impact.read_csv is deprecated." - "Use Impact.from_csv instead.") + LOGGER.warning( + "The use of Impact.read_csv is deprecated." "Use Impact.from_csv instead." + ) self.__dict__ = Impact.from_csv(*args, **kwargs).__dict__ @classmethod @@ -1125,27 +1303,29 @@ def from_excel(cls, file_name): imp : climada.engine.impact.Impact Impact from excel file """ - LOGGER.info('Reading %s', file_name) + LOGGER.info("Reading %s", file_name) dfr = pd.read_excel(file_name) - imp = cls(haz_type=str(dfr['haz_type'][0])) - - imp.unit = dfr['unit'][0] - imp.tot_value = dfr['tot_value'][0] - imp.aai_agg = dfr['aai_agg'][0] - - imp.event_id = dfr['event_id'][~np.isnan(dfr['event_id'].values)].values - imp.event_name = dfr['event_name'][:imp.event_id.size].values - imp.date = dfr['event_date'][:imp.event_id.size].values - imp.frequency = dfr['event_frequency'][:imp.event_id.size].values - imp.frequency_unit = dfr['frequency_unit'][0] if 'frequency_unit' in dfr else DEF_FREQ_UNIT - imp.at_event = dfr['at_event'][:imp.event_id.size].values + imp = cls(haz_type=str(dfr["haz_type"][0])) + + imp.unit = dfr["unit"][0] + imp.tot_value = dfr["tot_value"][0] + imp.aai_agg = dfr["aai_agg"][0] + + imp.event_id = dfr["event_id"][~np.isnan(dfr["event_id"].values)].values + imp.event_name = dfr["event_name"][: imp.event_id.size].values + imp.date = dfr["event_date"][: imp.event_id.size].values + imp.frequency = dfr["event_frequency"][: imp.event_id.size].values + imp.frequency_unit = ( + dfr["frequency_unit"][0] if "frequency_unit" in dfr else DEF_FREQ_UNIT + ) + imp.at_event = dfr["at_event"][: imp.event_id.size].values - imp.eai_exp = dfr['eai_exp'][~np.isnan(dfr['eai_exp'].values)].values + imp.eai_exp = dfr["eai_exp"][~np.isnan(dfr["eai_exp"].values)].values imp.coord_exp = np.zeros((imp.eai_exp.size, 2)) - imp.coord_exp[:, 0] = dfr['exp_lat'].values[:imp.eai_exp.size] - imp.coord_exp[:, 1] = dfr['exp_lon'].values[:imp.eai_exp.size] + imp.coord_exp[:, 0] = dfr["exp_lat"].values[: imp.eai_exp.size] + imp.coord_exp[:, 1] = dfr["exp_lon"].values[: imp.eai_exp.size] try: - imp.crs = u_coord.to_csr_user_input(dfr['exp_crs'].values[0]) + imp.crs = u_coord.to_csr_user_input(dfr["exp_crs"].values[0]) except AttributeError: imp.crs = DEF_CRS @@ -1153,8 +1333,10 @@ def from_excel(cls, file_name): def read_excel(self, *args, **kwargs): """This function is deprecated, use Impact.from_excel instead.""" - LOGGER.warning("The use of Impact.read_excel is deprecated." - "Use Impact.from_excel instead.") + LOGGER.warning( + "The use of Impact.read_excel is deprecated." + "Use Impact.from_excel instead." + ) self.__dict__ = Impact.from_excel(*args, **kwargs).__dict__ @classmethod @@ -1259,10 +1441,18 @@ def from_hdf5(cls, file_path: Union[str, Path]): return cls(**kwargs) @staticmethod - def video_direct_impact(exp, impf_set, haz_list, file_name='', - writer=animation.PillowWriter(bitrate=500), - imp_thresh=0, args_exp=None, args_imp=None, - ignore_zero=False, pop_name=False): + def video_direct_impact( + exp, + impf_set, + haz_list, + file_name="", + writer=animation.PillowWriter(bitrate=500), + imp_thresh=0, + args_exp=None, + args_imp=None, + ignore_zero=False, + pop_name=False, + ): """ Computes and generates video of accumulated impact per input events over exposure. @@ -1299,7 +1489,9 @@ def video_direct_impact(exp, impf_set, haz_list, file_name='', ------- list of Impact """ - from climada.engine.impact_calc import ImpactCalc # pylint: disable=import-outside-toplevel + from climada.engine.impact_calc import ( + ImpactCalc, # pylint: disable=import-outside-toplevel + ) if args_exp is None: args_exp = dict() @@ -1311,7 +1503,9 @@ def video_direct_impact(exp, impf_set, haz_list, file_name='', # assign centroids once for all exp.assign_centroids(haz_list[0]) for i_time, _ in enumerate(haz_list): - imp_tmp = ImpactCalc(exp, impf_set, haz_list[i_time]).impact(assign_centroids=False) + imp_tmp = ImpactCalc(exp, impf_set, haz_list[i_time]).impact( + assign_centroids=False + ) imp_arr = np.maximum(imp_arr, imp_tmp.eai_exp) # remove not impacted exposures save_exp = imp_arr > imp_thresh @@ -1320,63 +1514,84 @@ def video_direct_impact(exp, impf_set, haz_list, file_name='', imp_list.append(imp_tmp) exp_list.append(~save_exp) - v_lim = [np.array([haz.intensity.min() for haz in haz_list]).min(), - np.array([haz.intensity.max() for haz in haz_list]).max()] + v_lim = [ + np.array([haz.intensity.min() for haz in haz_list]).min(), + np.array([haz.intensity.max() for haz in haz_list]).max(), + ] - if 'vmin' not in args_exp: - args_exp['vmin'] = exp.gdf['value'].values.min() + if "vmin" not in args_exp: + args_exp["vmin"] = exp.gdf["value"].values.min() - if 'vmin' not in args_imp: - args_imp['vmin'] = np.array([imp.eai_exp.min() for imp in imp_list - if imp.eai_exp.size]).min() + if "vmin" not in args_imp: + args_imp["vmin"] = np.array( + [imp.eai_exp.min() for imp in imp_list if imp.eai_exp.size] + ).min() - if 'vmax' not in args_exp: - args_exp['vmax'] = exp.gdf['value'].values.max() + if "vmax" not in args_exp: + args_exp["vmax"] = exp.gdf["value"].values.max() - if 'vmax' not in args_imp: - args_imp['vmax'] = np.array([imp.eai_exp.max() for imp in imp_list - if imp.eai_exp.size]).max() + if "vmax" not in args_imp: + args_imp["vmax"] = np.array( + [imp.eai_exp.max() for imp in imp_list if imp.eai_exp.size] + ).max() - if 'cmap' not in args_exp: - args_exp['cmap'] = 'winter_r' - - if 'cmap' not in args_imp: - args_imp['cmap'] = 'autumn_r' + if "cmap" not in args_exp: + args_exp["cmap"] = "winter_r" + if "cmap" not in args_imp: + args_imp["cmap"] = "autumn_r" plot_raster = False if exp.meta: plot_raster = True def run(i_time): - haz_list[i_time].plot_intensity(1, axis=axis, cmap='Greys', vmin=v_lim[0], - vmax=v_lim[1], alpha=0.8) + haz_list[i_time].plot_intensity( + 1, axis=axis, cmap="Greys", vmin=v_lim[0], vmax=v_lim[1], alpha=0.8 + ) if plot_raster: - exp.plot_hexbin(axis=axis, mask=exp_list[i_time], ignore_zero=ignore_zero, - pop_name=pop_name, **args_exp) + exp.plot_hexbin( + axis=axis, + mask=exp_list[i_time], + ignore_zero=ignore_zero, + pop_name=pop_name, + **args_exp, + ) if imp_list[i_time].coord_exp.size: - imp_list[i_time].plot_hexbin_eai_exposure(axis=axis, pop_name=pop_name, - **args_imp) + imp_list[i_time].plot_hexbin_eai_exposure( + axis=axis, pop_name=pop_name, **args_imp + ) fig.delaxes(fig.axes[1]) else: - exp.plot_scatter(axis=axis, mask=exp_list[i_time], ignore_zero=ignore_zero, - pop_name=pop_name, **args_exp) + exp.plot_scatter( + axis=axis, + mask=exp_list[i_time], + ignore_zero=ignore_zero, + pop_name=pop_name, + **args_exp, + ) if imp_list[i_time].coord_exp.size: - imp_list[i_time].plot_scatter_eai_exposure(axis=axis, pop_name=pop_name, - **args_imp) + imp_list[i_time].plot_scatter_eai_exposure( + axis=axis, pop_name=pop_name, **args_imp + ) fig.delaxes(fig.axes[1]) fig.delaxes(fig.axes[1]) fig.delaxes(fig.axes[1]) - axis.set_xlim(haz_list[-1].centroids.lon.min(), haz_list[-1].centroids.lon.max()) - axis.set_ylim(haz_list[-1].centroids.lat.min(), haz_list[-1].centroids.lat.max()) + axis.set_xlim( + haz_list[-1].centroids.lon.min(), haz_list[-1].centroids.lon.max() + ) + axis.set_ylim( + haz_list[-1].centroids.lat.min(), haz_list[-1].centroids.lat.max() + ) axis.set_title(haz_list[i_time].event_name[0]) pbar.update() if file_name: - LOGGER.info('Generating video %s', file_name) + LOGGER.info("Generating video %s", file_name) fig, axis, _fontsize = u_plot.make_map() - ani = animation.FuncAnimation(fig, run, frames=len(haz_list), - interval=500, blit=False) + ani = animation.FuncAnimation( + fig, run, frames=len(haz_list), interval=500, blit=False + ) pbar = tqdm(total=len(haz_list)) fig.tight_layout() ani.save(file_name, writer=writer) @@ -1384,7 +1599,7 @@ def run(i_time): return imp_list -#TODO: rewrite and deprecate method + # TODO: rewrite and deprecate method def _loc_return_imp(self, return_periods, imp, exc_imp): """Compute local exceedence impact for given return period. @@ -1411,20 +1626,20 @@ def _loc_return_imp(self, return_periods, imp, exc_imp): for cen_idx in range(imp.shape[1]): exc_imp[:, cen_idx] = self._cen_return_imp( - imp_sort[:, cen_idx], freq_sort[:, cen_idx], - 0, return_periods) + imp_sort[:, cen_idx], freq_sort[:, cen_idx], 0, return_periods + ) def _build_exp(self): return Exposures( data={ - 'value': self.eai_exp, - 'latitude': self.coord_exp[:, 0], - 'longitude': self.coord_exp[:, 1], + "value": self.eai_exp, + "latitude": self.coord_exp[:, 0], + "longitude": self.coord_exp[:, 1], }, crs=self.crs, value_unit=self.unit, ref_year=0, - meta=None + meta=None, ) def _build_exp_event(self, event_id): @@ -1438,14 +1653,14 @@ def _build_exp_event(self, event_id): [[idx]] = (self.event_id == event_id).nonzero() return Exposures( data={ - 'value': self.imp_mat[idx].toarray().ravel(), - 'latitude': self.coord_exp[:, 0], - 'longitude': self.coord_exp[:, 1], + "value": self.imp_mat[idx].toarray().ravel(), + "latitude": self.coord_exp[:, 0], + "longitude": self.coord_exp[:, 1], }, crs=self.crs, value_unit=self.unit, ref_year=0, - meta=None + meta=None, ) @staticmethod @@ -1481,7 +1696,7 @@ def _cen_return_imp(imp, freq, imp_th, return_periods): pol_coef = np.polyfit(np.log(freq_cen), imp_cen, deg=0) imp_fit = np.polyval(pol_coef, np.log(1 / return_periods)) wrong_inten = (return_periods > np.max(1 / freq_cen)) & np.isnan(imp_fit) - imp_fit[wrong_inten] = 0. + imp_fit[wrong_inten] = 0.0 return imp_fit @@ -1491,7 +1706,7 @@ def select( event_names=None, dates=None, coord_exp=None, - reset_frequency=False + reset_frequency=False, ): """ Select a subset of events and/or exposure points from the impact. @@ -1544,18 +1759,22 @@ def select( nb_exp = len(self.coord_exp) if self.imp_mat.shape != (nb_events, nb_exp): - raise ValueError("The impact matrix is missing or incomplete. " - "The eai_exp and aai_agg cannot be computed. " - "Please recompute impact.calc() with save_mat=True " - "before using impact.select()") + raise ValueError( + "The impact matrix is missing or incomplete. " + "The eai_exp and aai_agg cannot be computed. " + "Please recompute impact.calc() with save_mat=True " + "before using impact.select()" + ) if nb_events == nb_exp: - LOGGER.warning("The number of events is equal to the number of " - "exposure points. It is not possible to " - "differentiate events and exposures attributes. " - "Please add/remove one event/exposure point. " - "This is a purely technical limitation of this " - "method.") + LOGGER.warning( + "The number of events is equal to the number of " + "exposure points. It is not possible to " + "differentiate events and exposures attributes. " + "Please add/remove one event/exposure point. " + "This is a purely technical limitation of this " + "method." + ) return None imp = copy.deepcopy(self) @@ -1571,10 +1790,12 @@ def select( if value.ndim == 1: setattr(imp, attr, value[sel_ev]) else: - LOGGER.warning("Found a multidimensional numpy array " - "with one dimension matching the number of events. " - "But multidimensional numpy arrays are not handled " - "in impact.select") + LOGGER.warning( + "Found a multidimensional numpy array " + "with one dimension matching the number of events. " + "But multidimensional numpy arrays are not handled " + "in impact.select" + ) elif isinstance(value, sparse.csr_matrix): setattr(imp, attr, value[sel_ev, :]) elif isinstance(value, list) and value: @@ -1582,9 +1803,11 @@ def select( else: pass - LOGGER.info("The eai_exp and aai_agg are computed for the " - "selected subset of events WITHOUT modification of " - "the frequencies.") + LOGGER.info( + "The eai_exp and aai_agg are computed for the " + "selected subset of events WITHOUT modification of " + "the frequencies." + ) # apply exposure selection to impact attributes if coord_exp is not None: @@ -1595,20 +1818,35 @@ def select( # .A1 reduce 1d matrix to 1d array imp.at_event = imp.imp_mat.sum(axis=1).A1 imp.tot_value = None - LOGGER.info("The total value cannot be re-computed for a " - "subset of exposures and is set to None.") + LOGGER.info( + "The total value cannot be re-computed for a " + "subset of exposures and is set to None." + ) # reset frequency if date span has changed (optional): if reset_frequency: - if self.frequency_unit not in ['1/year', 'annual', '1/y', '1/a']: - LOGGER.warning("Resetting the frequency is based on the calendar year of given" + if self.frequency_unit not in ["1/year", "annual", "1/y", "1/a"]: + LOGGER.warning( + "Resetting the frequency is based on the calendar year of given" " dates but the frequency unit here is %s. Consider setting the frequency" " manually for the selection or changing the frequency unit to %s.", - self.frequency_unit, DEF_FREQ_UNIT) - year_span_old = np.abs(dt.datetime.fromordinal(self.date.max()).year - - dt.datetime.fromordinal(self.date.min()).year) + 1 - year_span_new = np.abs(dt.datetime.fromordinal(imp.date.max()).year - - dt.datetime.fromordinal(imp.date.min()).year) + 1 + self.frequency_unit, + DEF_FREQ_UNIT, + ) + year_span_old = ( + np.abs( + dt.datetime.fromordinal(self.date.max()).year + - dt.datetime.fromordinal(self.date.min()).year + ) + + 1 + ) + year_span_new = ( + np.abs( + dt.datetime.fromordinal(imp.date.max()).year + - dt.datetime.fromordinal(imp.date.min()).year + ) + + 1 + ) imp.frequency = imp.frequency * year_span_old / year_span_new # cast frequency vector into 2d array for sparse matrix multiplication @@ -1632,10 +1870,10 @@ def _selected_events_idx(self, event_ids, event_names, dates, nb_events): if isinstance(date_ini, str): date_ini = u_dt.str_to_date(date_ini) date_end = u_dt.str_to_date(date_end) - mask_dt &= (date_ini <= self.date) - mask_dt &= (self.date <= date_end) + mask_dt &= date_ini <= self.date + mask_dt &= self.date <= date_end if not np.any(mask_dt): - LOGGER.info('No impact event in given date range %s.', dates) + LOGGER.info("No impact event in given date range %s.", dates) sel_dt = mask_dt.nonzero()[0] # Convert bool to indices @@ -1646,7 +1884,7 @@ def _selected_events_idx(self, event_ids, event_names, dates, nb_events): (sel_id,) = np.isin(self.event_id, event_ids).nonzero() # pylint: disable=no-member if sel_id.size == 0: - LOGGER.info('No impact event with given ids %s found.', event_ids) + LOGGER.info("No impact event with given ids %s found.", event_ids) # filter events by name if event_names is None: @@ -1655,7 +1893,7 @@ def _selected_events_idx(self, event_ids, event_names, dates, nb_events): (sel_na,) = np.isin(self.event_name, event_names).nonzero() # pylint: disable=no-member if sel_na.size == 0: - LOGGER.info('No impact event with given names %s found.', event_names) + LOGGER.info("No impact event with given names %s found.", event_names) # select events with machting id, name or date field. sel_ev = np.unique(np.concatenate([sel_dt, sel_id, sel_na])) @@ -1713,6 +1951,7 @@ def concat(cls, imp_list: Iterable, reset_event_ids: bool = False): - Concatenation of impacts with different exposure (e.g. different countries) could also be implemented here in the future. """ + def check_unique_attr(attr_name: str): """Check if an attribute is unique among all impacts""" if len({getattr(imp, attr_name) for imp in imp_list}) > 1: @@ -1778,8 +2017,9 @@ def stack_attribute(attr_name: str) -> np.ndarray: **kwargs, ) - def match_centroids(self, hazard, distance='euclidean', - threshold=u_coord.NEAREST_NEIGHBOR_THRESHOLD): + def match_centroids( + self, hazard, distance="euclidean", threshold=u_coord.NEAREST_NEIGHBOR_THRESHOLD + ): """ Finds the closest hazard centroid for each impact coordinate. Creates a temporary GeoDataFrame and uses ``u_coord.match_centroids()``. @@ -1809,26 +2049,27 @@ def match_centroids(self, hazard, distance='euclidean', self._build_exp().gdf, hazard.centroids, distance=distance, - threshold=threshold) + threshold=threshold, + ) + @dataclass -class ImpactFreqCurve(): - """Impact exceedence frequency curve. - """ +class ImpactFreqCurve: + """Impact exceedence frequency curve.""" - return_per : np.ndarray = field(default_factory=lambda: np.empty(0)) + return_per: np.ndarray = field(default_factory=lambda: np.empty(0)) """return period""" - impact : np.ndarray = field(default_factory=lambda: np.empty(0)) + impact: np.ndarray = field(default_factory=lambda: np.empty(0)) """impact exceeding frequency""" - unit : str = '' + unit: str = "" """value unit used (given by exposures unit)""" - frequency_unit : str = DEF_FREQ_UNIT + frequency_unit: str = DEF_FREQ_UNIT """value unit used (given by exposures unit)""" - label : str = '' + label: str = "" """string describing source data""" def plot(self, axis=None, log_frequency=False, **kwargs): @@ -1850,12 +2091,12 @@ def plot(self, axis=None, log_frequency=False, **kwargs): if not axis: _, axis = plt.subplots(1, 1) axis.set_title(self.label) - axis.set_ylabel('Impact (' + self.unit + ')') + axis.set_ylabel("Impact (" + self.unit + ")") if log_frequency: - axis.set_xlabel(f'Exceedance frequency ({self.frequency_unit})') - axis.set_xscale('log') + axis.set_xlabel(f"Exceedance frequency ({self.frequency_unit})") + axis.set_xscale("log") axis.plot(self.return_per**-1, self.impact, **kwargs) else: - axis.set_xlabel('Return period (year)') + axis.set_xlabel("Return period (year)") axis.plot(self.return_per, self.impact, **kwargs) return axis diff --git a/climada/engine/impact_calc.py b/climada/engine/impact_calc.py index d0fc05286..cf7da1b30 100644 --- a/climada/engine/impact_calc.py +++ b/climada/engine/impact_calc.py @@ -19,12 +19,13 @@ Define ImpactCalc class. """ -__all__ = ['ImpactCalc'] +__all__ = ["ImpactCalc"] import logging + +import geopandas as gpd import numpy as np from scipy import sparse -import geopandas as gpd from climada import CONFIG from climada.engine import Impact @@ -32,15 +33,12 @@ LOGGER = logging.getLogger(__name__) -class ImpactCalc(): +class ImpactCalc: """ Class to compute impacts from exposures, impact function set and hazard """ - def __init__(self, - exposures, - impfset, - hazard): + def __init__(self, exposures, impfset, hazard): """ ImpactCalc constructor @@ -78,8 +76,13 @@ def n_events(self): """Number of hazard events (size of event_id array)""" return self.hazard.size - def impact(self, save_mat=True, assign_centroids=True, - ignore_cover=False, ignore_deductible=False): + def impact( + self, + save_mat=True, + assign_centroids=True, + ignore_cover=False, + ignore_deductible=False, + ): """Compute the impact of a hazard on exposures. Parameters @@ -117,50 +120,70 @@ def impact(self, save_mat=True, assign_centroids=True, apply_cover_to_mat : apply cover to impact matrix """ # check for compatibility of exposures and hazard type - if all(name not in self.exposures.gdf.columns for - name in ['if_', f'if_{self.hazard.haz_type}', - 'impf_', f'impf_{self.hazard.haz_type}']): + if all( + name not in self.exposures.gdf.columns + for name in [ + "if_", + f"if_{self.hazard.haz_type}", + "impf_", + f"impf_{self.hazard.haz_type}", + ] + ): raise AttributeError( "Impact calculation not possible. No impact functions found " f"for hazard type {self.hazard.haz_type} in exposures." - ) + ) # check for compatibility of impact function and hazard type if not self.impfset.get_func(haz_type=self.hazard.haz_type): raise AttributeError( "Impact calculation not possible. No impact functions found " f"for hazard type {self.hazard.haz_type} in impf_set." - ) + ) impf_col = self.exposures.get_impf_column(self.hazard.haz_type) known_impact_functions = self.impfset.get_ids(haz_type=self.hazard.haz_type) # check for compatibility of impact function id between impact function set and exposure if not all(self.exposures.gdf[impf_col].isin(known_impact_functions)): - unknown_impact_functions = list(self.exposures.gdf[ + unknown_impact_functions = list( + self.exposures.gdf[ ~self.exposures.gdf[impf_col].isin(known_impact_functions) - ][impf_col].drop_duplicates().astype(int).astype(str)) + ][impf_col] + .drop_duplicates() + .astype(int) + .astype(str) + ) raise ValueError( f"The associated impact function(s) with id(s) " f"{', '.join(unknown_impact_functions)} have no match in impact function set for" - f" hazard type \'{self.hazard.haz_type}\'.\nPlease make sure that all exposure " + f" hazard type '{self.hazard.haz_type}'.\nPlease make sure that all exposure " "points are associated with an impact function that is included in the impact " - "function set.") + "function set." + ) - exp_gdf = self.minimal_exp_gdf(impf_col, assign_centroids, ignore_cover, ignore_deductible) + exp_gdf = self.minimal_exp_gdf( + impf_col, assign_centroids, ignore_cover, ignore_deductible + ) if exp_gdf.size == 0: return self._return_empty(save_mat) - LOGGER.info('Calculating impact for %s assets (>0) and %s events.', - exp_gdf.size, self.n_events) + LOGGER.info( + "Calculating impact for %s assets (>0) and %s events.", + exp_gdf.size, + self.n_events, + ) imp_mat_gen = self.imp_mat_gen(exp_gdf, impf_col) - insured = ('cover' in exp_gdf and exp_gdf['cover'].max() >= 0) \ - or ('deductible' in exp_gdf and exp_gdf['deductible'].max() > 0) + insured = ("cover" in exp_gdf and exp_gdf["cover"].max() >= 0) or ( + "deductible" in exp_gdf and exp_gdf["deductible"].max() > 0 + ) if insured: - LOGGER.info("cover and/or deductible columns detected," - " going to calculate insured impact") -#TODO: make a better impact matrix generator for insured impacts when -# the impact matrix is already present + LOGGER.info( + "cover and/or deductible columns detected," + " going to calculate insured impact" + ) + # TODO: make a better impact matrix generator for insured impacts when + # the impact matrix is already present imp_mat_gen = self.insured_mat_gen(imp_mat_gen, exp_gdf, impf_col) return self._return_impact(imp_mat_gen, save_mat) @@ -187,8 +210,9 @@ def _return_impact(self, imp_mat_gen, save_mat): """ if save_mat: imp_mat = self.stitch_impact_matrix(imp_mat_gen) - at_event, eai_exp, aai_agg = \ - self.risk_metrics(imp_mat, self.hazard.frequency) + at_event, eai_exp, aai_agg = self.risk_metrics( + imp_mat, self.hazard.frequency + ) else: imp_mat = None at_event, eai_exp, aai_agg = self.stitch_risk_metrics(imp_mat_gen) @@ -214,16 +238,18 @@ def _return_empty(self, save_mat): eai_exp = np.zeros(self.n_exp_pnt) aai_agg = 0.0 if save_mat: - imp_mat = sparse.csr_matrix(( - self.n_events, self.n_exp_pnt), dtype=np.float64 - ) + imp_mat = sparse.csr_matrix( + (self.n_events, self.n_exp_pnt), dtype=np.float64 + ) else: imp_mat = None return Impact.from_eih( self.exposures, self.hazard, at_event, eai_exp, aai_agg, imp_mat ) - def minimal_exp_gdf(self, impf_col, assign_centroids, ignore_cover, ignore_deductible): + def minimal_exp_gdf( + self, impf_col, assign_centroids, ignore_cover, ignore_deductible + ): """Get minimal exposures geodataframe for impact computation Parameters @@ -248,29 +274,36 @@ def minimal_exp_gdf(self, impf_col, assign_centroids, ignore_cover, ignore_deduc if assign_centroids: self.exposures.assign_centroids(self.hazard, overwrite=True) elif self.hazard.centr_exp_col not in self.exposures.gdf.columns: - raise ValueError("'assign_centroids' is set to 'False' but no centroids are assigned" - f" for the given hazard type ({self.hazard.haz_type})." - " Run 'exposures.assign_centroids()' beforehand or set" - " 'assign_centroids' to 'True'") + raise ValueError( + "'assign_centroids' is set to 'False' but no centroids are assigned" + f" for the given hazard type ({self.hazard.haz_type})." + " Run 'exposures.assign_centroids()' beforehand or set" + " 'assign_centroids' to 'True'" + ) mask = ( - (self.exposures.gdf['value'].values == self.exposures.gdf['value'].values)# value != NaN - & (self.exposures.gdf['value'].values != 0) # value != 0 - & (self.exposures.gdf[self.hazard.centr_exp_col].values >= 0) # centroid assigned + ( + self.exposures.gdf["value"].values == self.exposures.gdf["value"].values + ) # value != NaN + & (self.exposures.gdf["value"].values != 0) # value != 0 + & ( + self.exposures.gdf[self.hazard.centr_exp_col].values >= 0 + ) # centroid assigned ) - columns = ['value', impf_col, self.hazard.centr_exp_col] - if not ignore_cover and 'cover' in self.exposures.gdf: - columns.append('cover') - if not ignore_deductible and 'deductible' in self.exposures.gdf: - columns.append('deductible') + columns = ["value", impf_col, self.hazard.centr_exp_col] + if not ignore_cover and "cover" in self.exposures.gdf: + columns.append("cover") + if not ignore_deductible and "deductible" in self.exposures.gdf: + columns.append("deductible") exp_gdf = gpd.GeoDataFrame( - {col: self.exposures.gdf[col].values[mask] - for col in columns}, - ) + {col: self.exposures.gdf[col].values[mask] for col in columns}, + ) if exp_gdf.size == 0: LOGGER.warning("No exposures with value >0 in the vicinity of the hazard.") - self._orig_exp_idx = mask.nonzero()[0] # update index of kept exposures points in exp_gdf - # within the full exposures + self._orig_exp_idx = mask.nonzero()[ + 0 + ] # update index of kept exposures points in exp_gdf + # within the full exposures return exp_gdf def imp_mat_gen(self, exp_gdf, impf_col): @@ -302,9 +335,9 @@ def imp_mat_gen(self, exp_gdf, impf_col): """ def _chunk_exp_idx(haz_size, idx_exp_impf): - ''' + """ Chunk computations in sizes that roughly fit into memory - ''' + """ max_size = CONFIG.max_matrix_size.int() if haz_size > max_size: raise ValueError( @@ -315,17 +348,12 @@ def _chunk_exp_idx(haz_size, idx_exp_impf): return np.array_split(idx_exp_impf, n_chunks) for impf_id in exp_gdf[impf_col].dropna().unique(): - impf = self.impfset.get_func( - haz_type=self.hazard.haz_type, fun_id=impf_id - ) + impf = self.impfset.get_func(haz_type=self.hazard.haz_type, fun_id=impf_id) idx_exp_impf = (exp_gdf[impf_col].values == impf_id).nonzero()[0] for exp_idx in _chunk_exp_idx(self.hazard.size, idx_exp_impf): - exp_values = exp_gdf['value'].values[exp_idx] + exp_values = exp_gdf["value"].values[exp_idx] cent_idx = exp_gdf[self.hazard.centr_exp_col].values[exp_idx] - yield ( - self.impact_matrix(exp_values, cent_idx, impf), - exp_idx - ) + yield (self.impact_matrix(exp_values, cent_idx, impf), exp_idx) def insured_mat_gen(self, imp_mat_gen, exp_gdf, impf_col): """ @@ -359,14 +387,14 @@ def insured_mat_gen(self, imp_mat_gen, exp_gdf, impf_col): for mat, exp_idx in imp_mat_gen: impf_id = exp_gdf[impf_col][exp_idx[0]] cent_idx = exp_gdf[self.hazard.centr_exp_col].values[exp_idx] - impf = self.impfset.get_func( - haz_type=self.hazard.haz_type, - fun_id=impf_id) - if 'deductible' in exp_gdf: - deductible = exp_gdf['deductible'].values[exp_idx] - mat = self.apply_deductible_to_mat(mat, deductible, self.hazard, cent_idx, impf) - if 'cover' in exp_gdf: - cover = exp_gdf['cover'].values[exp_idx] + impf = self.impfset.get_func(haz_type=self.hazard.haz_type, fun_id=impf_id) + if "deductible" in exp_gdf: + deductible = exp_gdf["deductible"].values[exp_idx] + mat = self.apply_deductible_to_mat( + mat, deductible, self.hazard, cent_idx, impf + ) + if "cover" in exp_gdf: + cover = exp_gdf["cover"].values[exp_idx] mat = self.apply_cover_to_mat(mat, cover) yield (mat, exp_idx) @@ -392,11 +420,11 @@ def impact_matrix(self, exp_values, cent_idx, impf): Impact per event (rows) per exposure point (columns) """ n_exp_pnt = len(cent_idx) # implicitly checks in matrix assignement whether - # len(cent_idx) == len(exp_values) + # len(cent_idx) == len(exp_values) mdr = self.hazard.get_mdr(cent_idx, impf) exp_values_csr = sparse.csr_matrix( # vector 1 x exp_size - (exp_values, np.arange(n_exp_pnt), [0, n_exp_pnt]), - shape=(1, n_exp_pnt)) + (exp_values, np.arange(n_exp_pnt), [0, n_exp_pnt]), shape=(1, n_exp_pnt) + ) fract = self.hazard._get_fraction(cent_idx) # pylint: disable=protected-access if fract is None: return mdr.multiply(exp_values_csr) @@ -409,13 +437,15 @@ def stitch_impact_matrix(self, imp_mat_gen): """ # rows: events index # cols: exposure point index within self.exposures - data, row, col = np.hstack([ - (mat.data, mat.nonzero()[0], self._orig_exp_idx[idx][mat.nonzero()[1]]) - for mat, idx in imp_mat_gen - ]) + data, row, col = np.hstack( + [ + (mat.data, mat.nonzero()[0], self._orig_exp_idx[idx][mat.nonzero()[1]]) + for mat, idx in imp_mat_gen + ] + ) return sparse.csr_matrix( (data, (row, col)), shape=(self.n_events, self.n_exp_pnt) - ) + ) def stitch_risk_metrics(self, imp_mat_gen): """Compute the impact metrics from an impact sub-matrix generator @@ -442,8 +472,9 @@ def stitch_risk_metrics(self, imp_mat_gen): eai_exp = np.zeros(self.n_exp_pnt) for sub_imp_mat, idx in imp_mat_gen: at_event += self.at_event_from_mat(sub_imp_mat) - eai_exp[self._orig_exp_idx[idx]] += \ - self.eai_exp_from_mat(sub_imp_mat, self.hazard.frequency) + eai_exp[self._orig_exp_idx[idx]] += self.eai_exp_from_mat( + sub_imp_mat, self.hazard.frequency + ) aai_agg = self.aai_agg_from_eai_exp(eai_exp) return at_event, eai_exp, aai_agg @@ -523,9 +554,9 @@ def eai_exp_from_mat(mat, freq): expected impact within a period of 1/frequency_unit for each exposure """ n_events = freq.size - freq_csr = sparse.csr_matrix( #vector n_events x 1 - (freq, np.zeros(n_events), np.arange(n_events + 1)), - shape=(n_events, 1)) + freq_csr = sparse.csr_matrix( # vector n_events x 1 + (freq, np.zeros(n_events), np.arange(n_events + 1)), shape=(n_events, 1) + ) return mat.multiply(freq_csr).sum(axis=0).A1 @staticmethod diff --git a/climada/engine/impact_data.py b/climada/engine/impact_data.py index 98ca074e5..21b6a2851 100644 --- a/climada/engine/impact_data.py +++ b/climada/engine/impact_data.py @@ -18,156 +18,175 @@ Functions to merge EMDAT damages to hazard events. """ + import logging import pickle from datetime import datetime from pathlib import Path -import pandas as pd + import numpy as np +import pandas as pd from cartopy.io import shapereader -from climada.util.finance import gdp -from climada.util.constants import DEF_CRS import climada.util.coordinates as u_coord from climada.engine import Impact +from climada.util.constants import DEF_CRS +from climada.util.finance import gdp LOGGER = logging.getLogger(__name__) -PERIL_SUBTYPE_MATCH_DICT = dict(TC=['Tropical cyclone'], - FL=['Coastal flood'], - EQ=['Ground movement', 'Earthquake'], - RF=['Riverine flood', 'Flood'], - WS=['Extra-tropical storm', 'Storm'], - DR=['Drought'], - LS=['Landslide'], - BF=['Forest fire', 'Wildfire', 'Land fire (Brush, Bush, Pastur'] - ) - -PERIL_TYPE_MATCH_DICT = dict(DR=['Drought'], - EQ=['Earthquake'], - FL=['Flood'], - LS=['Landslide'], - VQ=['Volcanic activity'], - BF=['Wildfire'], - HW=['Extreme temperature'] - ) - -VARNAMES_EMDAT = \ - {2018: {'Dis No': 'Disaster No.', - 'Disaster Type': 'Disaster type', - 'Disaster Subtype': 'Disaster subtype', - 'Event Name': 'Disaster name', - 'Country': 'Country', - 'ISO': 'ISO', - 'Location': 'Location', - 'Associated Dis': 'Associated disaster', - 'Associated Dis2': 'Associated disaster2', - 'Dis Mag Value': 'Magnitude value', - 'Dis Mag Scale': 'Magnitude scale', - 'Latitude': 'Latitude', - 'Longitude': 'Longitude', - 'Total Deaths': 'Total deaths', - 'Total Affected': 'Total affected', - "Insured Damages ('000 US$)": "Insured losses ('000 US$)", - "Total Damages ('000 US$)": "Total damage ('000 US$)"}, - 2020: {'Dis No': 'Dis No', - 'Year': 'Year', - 'Seq': 'Seq', - 'Disaster Group': 'Disaster Group', - 'Disaster Subgroup': 'Disaster Subgroup', - 'Disaster Type': 'Disaster Type', - 'Disaster Subtype': 'Disaster Subtype', - 'Disaster Subsubtype': 'Disaster Subsubtype', - 'Event Name': 'Event Name', - 'Entry Criteria': 'Entry Criteria', - 'Country': 'Country', - 'ISO': 'ISO', - 'Region': 'Region', - 'Continent': 'Continent', - 'Location': 'Location', - 'Origin': 'Origin', - 'Associated Dis': 'Associated Dis', - 'Associated Dis2': 'Associated Dis2', - 'OFDA Response': 'OFDA Response', - 'Appeal': 'Appeal', - 'Declaration': 'Declaration', - 'Aid Contribution': 'Aid Contribution', - 'Dis Mag Value': 'Dis Mag Value', - 'Dis Mag Scale': 'Dis Mag Scale', - 'Latitude': 'Latitude', - 'Longitude': 'Longitude', - 'Local Time': 'Local Time', - 'River Basin': 'River Basin', - 'Start Year': 'Start Year', - 'Start Month': 'Start Month', - 'Start Day': 'Start Day', - 'End Year': 'End Year', - 'End Month': 'End Month', - 'End Day': 'End Day', - 'Total Deaths': 'Total Deaths', - 'No Injured': 'No Injured', - 'No Affected': 'No Affected', - 'No Homeless': 'No Homeless', - 'Total Affected': 'Total Affected', - "Reconstruction Costs ('000 US$)": "Reconstruction Costs ('000 US$)", - "Insured Damages ('000 US$)": "Insured Damages ('000 US$)", - "Total Damages ('000 US$)": "Total Damages ('000 US$)", - 'CPI': 'CPI'}, - 2023: {'Dis No': 'Dis No', - 'Year': 'Year', - 'Seq': 'Seq', - 'Glide': 'Glide', - 'Disaster Group': 'Disaster Group', - 'Disaster Subgroup': 'Disaster Subgroup', - 'Disaster Type': 'Disaster Type', - 'Disaster Subtype': 'Disaster Subtype', - 'Disaster Subsubtype': 'Disaster Subsubtype', - 'Event Name': 'Event Name', - 'Country': 'Country', - 'ISO': 'ISO', - 'Region': 'Region', - 'Continent': 'Continent', - 'Location': 'Location', - 'Origin': 'Origin', - 'Associated Dis': 'Associated Dis', - 'Associated Dis2': 'Associated Dis2', - 'OFDA Response': 'OFDA Response', - 'Appeal': 'Appeal', - 'Declaration': 'Declaration', - "AID Contribution ('000 US$)": "AID Contribution ('000 US$)", - 'Dis Mag Value': 'Dis Mag Value', - 'Dis Mag Scale': 'Dis Mag Scale', - 'Latitude': 'Latitude', - 'Longitude': 'Longitude', - 'Local Time': 'Local Time', - 'River Basin': 'River Basin', - 'Start Year': 'Start Year', - 'Start Month': 'Start Month', - 'Start Day': 'Start Day', - 'End Year': 'End Year', - 'End Month': 'End Month', - 'End Day': 'End Day', - 'Total Deaths': 'Total Deaths', - 'No Injured': 'No Injured', - 'No Affected': 'No Affected', - 'No Homeless': 'No Homeless', - 'Total Affected': 'Total Affected', - "Reconstruction Costs ('000 US$)": "Reconstruction Costs ('000 US$)", - "Reconstruction Costs, Adjusted ('000 US$)": "Reconstruction Costs, Adjusted ('000 US$)", - "Insured Damages ('000 US$)": "Insured Damages ('000 US$)", - "Insured Damages, Adjusted ('000 US$)": "Insured Damages, Adjusted ('000 US$)", - "Total Damages ('000 US$)": "Total Damages ('000 US$)", - "Total Damages, Adjusted ('000 US$)": "Total Damages, Adjusted ('000 US$)", - 'CPI': 'CPI', - 'Adm Level': 'Adm Level', - 'Admin1 Code': 'Admin1 Code', - 'Admin2 Code': 'Admin2 Code', - 'Geo Locations': 'Geo Locations'}} - - -def assign_hazard_to_emdat(certainty_level, intensity_path_haz, names_path_haz, - reg_id_path_haz, date_path_haz, emdat_data, - start_time, end_time, keep_checks=False): +PERIL_SUBTYPE_MATCH_DICT = dict( + TC=["Tropical cyclone"], + FL=["Coastal flood"], + EQ=["Ground movement", "Earthquake"], + RF=["Riverine flood", "Flood"], + WS=["Extra-tropical storm", "Storm"], + DR=["Drought"], + LS=["Landslide"], + BF=["Forest fire", "Wildfire", "Land fire (Brush, Bush, Pastur"], +) + +PERIL_TYPE_MATCH_DICT = dict( + DR=["Drought"], + EQ=["Earthquake"], + FL=["Flood"], + LS=["Landslide"], + VQ=["Volcanic activity"], + BF=["Wildfire"], + HW=["Extreme temperature"], +) + +VARNAMES_EMDAT = { + 2018: { + "Dis No": "Disaster No.", + "Disaster Type": "Disaster type", + "Disaster Subtype": "Disaster subtype", + "Event Name": "Disaster name", + "Country": "Country", + "ISO": "ISO", + "Location": "Location", + "Associated Dis": "Associated disaster", + "Associated Dis2": "Associated disaster2", + "Dis Mag Value": "Magnitude value", + "Dis Mag Scale": "Magnitude scale", + "Latitude": "Latitude", + "Longitude": "Longitude", + "Total Deaths": "Total deaths", + "Total Affected": "Total affected", + "Insured Damages ('000 US$)": "Insured losses ('000 US$)", + "Total Damages ('000 US$)": "Total damage ('000 US$)", + }, + 2020: { + "Dis No": "Dis No", + "Year": "Year", + "Seq": "Seq", + "Disaster Group": "Disaster Group", + "Disaster Subgroup": "Disaster Subgroup", + "Disaster Type": "Disaster Type", + "Disaster Subtype": "Disaster Subtype", + "Disaster Subsubtype": "Disaster Subsubtype", + "Event Name": "Event Name", + "Entry Criteria": "Entry Criteria", + "Country": "Country", + "ISO": "ISO", + "Region": "Region", + "Continent": "Continent", + "Location": "Location", + "Origin": "Origin", + "Associated Dis": "Associated Dis", + "Associated Dis2": "Associated Dis2", + "OFDA Response": "OFDA Response", + "Appeal": "Appeal", + "Declaration": "Declaration", + "Aid Contribution": "Aid Contribution", + "Dis Mag Value": "Dis Mag Value", + "Dis Mag Scale": "Dis Mag Scale", + "Latitude": "Latitude", + "Longitude": "Longitude", + "Local Time": "Local Time", + "River Basin": "River Basin", + "Start Year": "Start Year", + "Start Month": "Start Month", + "Start Day": "Start Day", + "End Year": "End Year", + "End Month": "End Month", + "End Day": "End Day", + "Total Deaths": "Total Deaths", + "No Injured": "No Injured", + "No Affected": "No Affected", + "No Homeless": "No Homeless", + "Total Affected": "Total Affected", + "Reconstruction Costs ('000 US$)": "Reconstruction Costs ('000 US$)", + "Insured Damages ('000 US$)": "Insured Damages ('000 US$)", + "Total Damages ('000 US$)": "Total Damages ('000 US$)", + "CPI": "CPI", + }, + 2023: { + "Dis No": "Dis No", + "Year": "Year", + "Seq": "Seq", + "Glide": "Glide", + "Disaster Group": "Disaster Group", + "Disaster Subgroup": "Disaster Subgroup", + "Disaster Type": "Disaster Type", + "Disaster Subtype": "Disaster Subtype", + "Disaster Subsubtype": "Disaster Subsubtype", + "Event Name": "Event Name", + "Country": "Country", + "ISO": "ISO", + "Region": "Region", + "Continent": "Continent", + "Location": "Location", + "Origin": "Origin", + "Associated Dis": "Associated Dis", + "Associated Dis2": "Associated Dis2", + "OFDA Response": "OFDA Response", + "Appeal": "Appeal", + "Declaration": "Declaration", + "AID Contribution ('000 US$)": "AID Contribution ('000 US$)", + "Dis Mag Value": "Dis Mag Value", + "Dis Mag Scale": "Dis Mag Scale", + "Latitude": "Latitude", + "Longitude": "Longitude", + "Local Time": "Local Time", + "River Basin": "River Basin", + "Start Year": "Start Year", + "Start Month": "Start Month", + "Start Day": "Start Day", + "End Year": "End Year", + "End Month": "End Month", + "End Day": "End Day", + "Total Deaths": "Total Deaths", + "No Injured": "No Injured", + "No Affected": "No Affected", + "No Homeless": "No Homeless", + "Total Affected": "Total Affected", + "Reconstruction Costs ('000 US$)": "Reconstruction Costs ('000 US$)", + "Reconstruction Costs, Adjusted ('000 US$)": "Reconstruction Costs, Adjusted ('000 US$)", + "Insured Damages ('000 US$)": "Insured Damages ('000 US$)", + "Insured Damages, Adjusted ('000 US$)": "Insured Damages, Adjusted ('000 US$)", + "Total Damages ('000 US$)": "Total Damages ('000 US$)", + "Total Damages, Adjusted ('000 US$)": "Total Damages, Adjusted ('000 US$)", + "CPI": "CPI", + "Adm Level": "Adm Level", + "Admin1 Code": "Admin1 Code", + "Admin2 Code": "Admin2 Code", + "Geo Locations": "Geo Locations", + }, +} + + +def assign_hazard_to_emdat( + certainty_level, + intensity_path_haz, + names_path_haz, + reg_id_path_haz, + date_path_haz, + emdat_data, + start_time, + end_time, + keep_checks=False, +): """assign_hazard_to_emdat: link EMdat event to hazard Parameters @@ -196,19 +215,24 @@ def assign_hazard_to_emdat(certainty_level, intensity_path_haz, names_path_haz, pd.dataframe with EMdat entries linked to a hazard """ # check valid certainty level - certainty_levels = ['high', 'low'] + certainty_levels = ["high", "low"] if certainty_level not in certainty_levels: - raise ValueError("Invalid certainty level. Expected one of: %s" % certainty_levels) + raise ValueError( + "Invalid certainty level. Expected one of: %s" % certainty_levels + ) # prepare hazard set print("Start preparing hazard set") - hit_countries = hit_country_per_hazard(intensity_path_haz, names_path_haz, - reg_id_path_haz, date_path_haz) + hit_countries = hit_country_per_hazard( + intensity_path_haz, names_path_haz, reg_id_path_haz, date_path_haz + ) # prepare damage set # adjust emdat_data to the path!! print("Start preparing damage set") - lookup = create_lookup(emdat_data, start_time, end_time, disaster_subtype='Tropical cyclone') + lookup = create_lookup( + emdat_data, start_time, end_time, disaster_subtype="Tropical cyclone" + ) # calculate possible hits print("Calculate possible hits") hit5 = emdat_possible_hit(lookup=lookup, hit_countries=hit_countries, delta_t=5) @@ -229,49 +253,120 @@ def assign_hazard_to_emdat(certainty_level, intensity_path_haz, names_path_haz, # assign only tracks with high certainty print("Assign tracks") - if certainty_level == 'high': - lookup = assign_track_to_em(lookup=lookup, possible_tracks_1=hit10_match, - possible_tracks_2=hit50_match, level=1) - lookup = assign_track_to_em(lookup=lookup, possible_tracks_1=hit15_match, - possible_tracks_2=hit50_match, level=2) - lookup = assign_track_to_em(lookup=lookup, possible_tracks_1=hit25_match, - possible_tracks_2=hit50_match, level=3) - lookup = assign_track_to_em(lookup=lookup, possible_tracks_1=hit10_match, - possible_tracks_2=hit25_match, level=4) - lookup = assign_track_to_em(lookup=lookup, possible_tracks_1=hit15_match, - possible_tracks_2=hit25_match, level=5) + if certainty_level == "high": + lookup = assign_track_to_em( + lookup=lookup, + possible_tracks_1=hit10_match, + possible_tracks_2=hit50_match, + level=1, + ) + lookup = assign_track_to_em( + lookup=lookup, + possible_tracks_1=hit15_match, + possible_tracks_2=hit50_match, + level=2, + ) + lookup = assign_track_to_em( + lookup=lookup, + possible_tracks_1=hit25_match, + possible_tracks_2=hit50_match, + level=3, + ) + lookup = assign_track_to_em( + lookup=lookup, + possible_tracks_1=hit10_match, + possible_tracks_2=hit25_match, + level=4, + ) + lookup = assign_track_to_em( + lookup=lookup, + possible_tracks_1=hit15_match, + possible_tracks_2=hit25_match, + level=5, + ) # assign all tracks - elif certainty_level == 'low': - lookup = assign_track_to_em(lookup=lookup, possible_tracks_1=hit5_match, - possible_tracks_2=hit50_match, level=1) - lookup = assign_track_to_em(lookup=lookup, possible_tracks_1=hit10_match, - possible_tracks_2=hit50_match, level=2) - lookup = assign_track_to_em(lookup=lookup, possible_tracks_1=hit15_match, - possible_tracks_2=hit50_match, level=3) - lookup = assign_track_to_em(lookup=lookup, possible_tracks_1=hit5_match, - possible_tracks_2=hit25_match, level=4) - lookup = assign_track_to_em(lookup=lookup, possible_tracks_1=hit10_match, - possible_tracks_2=hit25_match, level=5) - lookup = assign_track_to_em(lookup=lookup, possible_tracks_1=hit15_match, - possible_tracks_2=hit25_match, level=6) - lookup = assign_track_to_em(lookup=lookup, possible_tracks_1=hit5_match, - possible_tracks_2=hit15_match, level=7) - lookup = assign_track_to_em(lookup=lookup, possible_tracks_1=hit10_match, - possible_tracks_2=hit15_match, level=8) - lookup = assign_track_to_em(lookup=lookup, possible_tracks_1=hit5_match, - possible_tracks_2=hit10_match, level=9) - lookup = assign_track_to_em(lookup=lookup, possible_tracks_1=hit15_match, - possible_tracks_2=hit15_match, level=10) - lookup = assign_track_to_em(lookup=lookup, possible_tracks_1=hit10_match, - possible_tracks_2=hit10_match, level=11) - lookup = assign_track_to_em(lookup=lookup, possible_tracks_1=hit5_match, - possible_tracks_2=hit5_match, level=12) + elif certainty_level == "low": + lookup = assign_track_to_em( + lookup=lookup, + possible_tracks_1=hit5_match, + possible_tracks_2=hit50_match, + level=1, + ) + lookup = assign_track_to_em( + lookup=lookup, + possible_tracks_1=hit10_match, + possible_tracks_2=hit50_match, + level=2, + ) + lookup = assign_track_to_em( + lookup=lookup, + possible_tracks_1=hit15_match, + possible_tracks_2=hit50_match, + level=3, + ) + lookup = assign_track_to_em( + lookup=lookup, + possible_tracks_1=hit5_match, + possible_tracks_2=hit25_match, + level=4, + ) + lookup = assign_track_to_em( + lookup=lookup, + possible_tracks_1=hit10_match, + possible_tracks_2=hit25_match, + level=5, + ) + lookup = assign_track_to_em( + lookup=lookup, + possible_tracks_1=hit15_match, + possible_tracks_2=hit25_match, + level=6, + ) + lookup = assign_track_to_em( + lookup=lookup, + possible_tracks_1=hit5_match, + possible_tracks_2=hit15_match, + level=7, + ) + lookup = assign_track_to_em( + lookup=lookup, + possible_tracks_1=hit10_match, + possible_tracks_2=hit15_match, + level=8, + ) + lookup = assign_track_to_em( + lookup=lookup, + possible_tracks_1=hit5_match, + possible_tracks_2=hit10_match, + level=9, + ) + lookup = assign_track_to_em( + lookup=lookup, + possible_tracks_1=hit15_match, + possible_tracks_2=hit15_match, + level=10, + ) + lookup = assign_track_to_em( + lookup=lookup, + possible_tracks_1=hit10_match, + possible_tracks_2=hit10_match, + level=11, + ) + lookup = assign_track_to_em( + lookup=lookup, + possible_tracks_1=hit5_match, + possible_tracks_2=hit5_match, + level=12, + ) if not keep_checks: - lookup = lookup.drop(['Date_start_EM_ordinal', 'possible_track', - 'possible_track_all'], axis=1) - lookup.groupby('allocation_level').count() - print('(%d/%s) tracks allocated' % ( - len(lookup[lookup.allocation_level.notnull()]), len(lookup))) + lookup = lookup.drop( + ["Date_start_EM_ordinal", "possible_track", "possible_track_all"], axis=1 + ) + lookup.groupby("allocation_level").count() + print( + "(%d/%s) tracks allocated" + % (len(lookup[lookup.allocation_level.notnull()]), len(lookup)) + ) return lookup @@ -294,19 +389,19 @@ def hit_country_per_hazard(intensity_path, names_path, reg_id_path, date_path): ------- pd.DataFrame with all hit countries per hazard """ - with open(intensity_path, 'rb') as filef: + with open(intensity_path, "rb") as filef: inten = pickle.load(filef) - with open(names_path, 'rb') as filef: + with open(names_path, "rb") as filef: names = pickle.load(filef) - with open(reg_id_path, 'rb') as filef: + with open(reg_id_path, "rb") as filef: reg_id = pickle.load(filef) - with open(date_path, 'rb') as filef: + with open(date_path, "rb") as filef: date = pickle.load(filef) # loop over the tracks (over the rows of the intensity matrix) all_hits = [] for track in range(0, len(names)): # select track - tc_track = inten[track, ] + tc_track = inten[track,] # select only indices that are not zero hits = tc_track.nonzero()[1] # get the country of these indices and remove dublicates @@ -315,7 +410,7 @@ def hit_country_per_hazard(intensity_path, names_path, reg_id_path, date_path): all_hits.append(hits) # create data frame for output - hit_countries = pd.DataFrame(columns=['hit_country', 'Date_start', 'ibtracsID']) + hit_countries = pd.DataFrame(columns=["hit_country", "Date_start", "ibtracsID"]) for track, _ in enumerate(names): # Check if track has hit any country else go to the next track if len(all_hits[track]) > 0: @@ -324,54 +419,68 @@ def hit_country_per_hazard(intensity_path, names_path, reg_id_path, date_path): # Hit country ISO ctry_iso = u_coord.country_to_iso(all_hits[track][hit], "alpha3") # create entry for each country a hazard has hit - hit_countries = hit_countries.append({'hit_country': ctry_iso, - 'Date_start': date[track], - 'ibtracsID': names[track]}, - ignore_index=True) + hit_countries = hit_countries.append( + { + "hit_country": ctry_iso, + "Date_start": date[track], + "ibtracsID": names[track], + }, + ignore_index=True, + ) # retrun data frame with all hit countries per hazard return hit_countries -def create_lookup(emdat_data, start, end, disaster_subtype='Tropical cyclone'): +def create_lookup(emdat_data, start, end, disaster_subtype="Tropical cyclone"): """create_lookup: prepare a lookup table of EMdat events to which hazards can be assigned - Parameters - ---------- - emdat_data: pd.DataFrame - with EMdat data - start : str - start date of events to be assigned 'yyyy-mm-dd' - end : str - end date of events to be assigned 'yyyy-mm-dd' - disaster_subtype : str - EMdat disaster subtype - - Returns - ------- - pd.DataFrame - """ - data = emdat_data[emdat_data['Disaster_subtype'] == disaster_subtype] - lookup = pd.DataFrame(columns=['hit_country', 'Date_start_EM', - 'Date_start_EM_ordinal', 'Disaster_name', - 'EM_ID', 'ibtracsID', 'allocation_level', - 'possible_track', 'possible_track_all']) - lookup['hit_country'] = data['ISO'] - lookup['Date_start_EM'] = data['Date_start_clean'] - lookup['Disaster_name'] = data['Disaster_name'] - lookup['EM_ID'] = data['Disaster_No'] + Parameters + ---------- + emdat_data: pd.DataFrame + with EMdat data + start : str + start date of events to be assigned 'yyyy-mm-dd' + end : str + end date of events to be assigned 'yyyy-mm-dd' + disaster_subtype : str + EMdat disaster subtype + + Returns + ------- + pd.DataFrame + """ + data = emdat_data[emdat_data["Disaster_subtype"] == disaster_subtype] + lookup = pd.DataFrame( + columns=[ + "hit_country", + "Date_start_EM", + "Date_start_EM_ordinal", + "Disaster_name", + "EM_ID", + "ibtracsID", + "allocation_level", + "possible_track", + "possible_track_all", + ] + ) + lookup["hit_country"] = data["ISO"] + lookup["Date_start_EM"] = data["Date_start_clean"] + lookup["Disaster_name"] = data["Disaster_name"] + lookup["EM_ID"] = data["Disaster_No"] lookup = lookup.reset_index(drop=True) # create ordinals - for i in range(0, len(data['Date_start_clean'].values)): - lookup['Date_start_EM_ordinal'][i] = datetime.toordinal( - datetime.strptime(lookup['Date_start_EM'].values[i], '%Y-%m-%d')) + for i in range(0, len(data["Date_start_clean"].values)): + lookup["Date_start_EM_ordinal"][i] = datetime.toordinal( + datetime.strptime(lookup["Date_start_EM"].values[i], "%Y-%m-%d") + ) # ordinals to numeric - lookup['Date_start_EM_ordinal'] = pd.to_numeric(lookup['Date_start_EM_ordinal']) + lookup["Date_start_EM_ordinal"] = pd.to_numeric(lookup["Date_start_EM_ordinal"]) # select time - emdat_start = datetime.toordinal(datetime.strptime(start, '%Y-%m-%d')) - emdat_end = datetime.toordinal(datetime.strptime(end, '%Y-%m-%d')) + emdat_start = datetime.toordinal(datetime.strptime(start, "%Y-%m-%d")) + emdat_end = datetime.toordinal(datetime.strptime(end, "%Y-%m-%d")) - lookup = lookup[lookup['Date_start_EM_ordinal'].values > emdat_start] - lookup = lookup[lookup['Date_start_EM_ordinal'].values < emdat_end] + lookup = lookup[lookup["Date_start_EM_ordinal"].values > emdat_start] + lookup = lookup[lookup["Date_start_EM_ordinal"].values < emdat_end] return lookup @@ -397,16 +506,20 @@ def emdat_possible_hit(lookup, hit_countries, delta_t): # tracks: processed IBtracks with info which track hit which country # delta_t: time difference of start of EMdat and IBrtacks possible_hit_all = [] - for i in range(0, len(lookup['EM_ID'].values)): + for i in range(0, len(lookup["EM_ID"].values)): possible_hit = [] country_tracks = hit_countries[ - hit_countries['hit_country'] == lookup['hit_country'].values[i]] - for j in range(0, len(country_tracks['Date_start'].values)): - if (lookup['Date_start_EM_ordinal'].values[i] - - country_tracks['Date_start'].values[j]) < \ - delta_t and (lookup['Date_start_EM_ordinal'].values[i] - - country_tracks['Date_start'].values[j]) >= 0: - possible_hit.append(country_tracks['ibtracsID'].values[j]) + hit_countries["hit_country"] == lookup["hit_country"].values[i] + ] + for j in range(0, len(country_tracks["Date_start"].values)): + if ( + lookup["Date_start_EM_ordinal"].values[i] + - country_tracks["Date_start"].values[j] + ) < delta_t and ( + lookup["Date_start_EM_ordinal"].values[i] + - country_tracks["Date_start"].values[j] + ) >= 0: + possible_hit.append(country_tracks["ibtracsID"].values[j]) possible_hit_all.append(possible_hit) return possible_hit_all @@ -416,27 +529,27 @@ def emdat_possible_hit(lookup, hit_countries, delta_t): def match_em_id(lookup, poss_hit): """function to check if EM_ID has been assigned already and combine possible hits - Parameters - ---------- - lookup : pd.dataframe - to relate EMdatID to hazard - poss_hit : list - with possible hits - - Returns - ------- - list - with all possible hits per EMdat ID - """ + Parameters + ---------- + lookup : pd.dataframe + to relate EMdatID to hazard + poss_hit : list + with possible hits + + Returns + ------- + list + with all possible hits per EMdat ID + """ possible_hit_all = [] - for i in range(0, len(lookup['EM_ID'].values)): + for i in range(0, len(lookup["EM_ID"].values)): possible_hit = [] # lookup without line i - #lookup_match = lookup.drop(i) + # lookup_match = lookup.drop(i) lookup_match = lookup # Loop over check if EM dat ID is the same - for i_match in range(0, len(lookup_match['EM_ID'].values)): - if lookup['EM_ID'].values[i] == lookup_match['EM_ID'].values[i_match]: + for i_match in range(0, len(lookup_match["EM_ID"].values)): + if lookup["EM_ID"].values[i] == lookup_match["EM_ID"].values[i_match]: possible_hit.append(poss_hit[i]) possible_hit_all.append(possible_hit) return possible_hit_all @@ -444,86 +557,102 @@ def match_em_id(lookup, poss_hit): def assign_track_to_em(lookup, possible_tracks_1, possible_tracks_2, level): """function to assign a hazard to an EMdat event - to get some confidene into the procedure, hazards get only assigned - if there is no other hazard occuring at a bigger time interval in that country - Thus a track of possible_tracks_1 gets only assigned if there are no other - tracks in possible_tracks_2. - The confidence can be expressed with a certainty level - - Parameters - ---------- - lookup : pd.DataFrame - to relate EMdatID to hazard - possible_tracks_1 : list - list of possible hits with smaller time horizon - possible_tracks_2 : list - list of possible hits with larger time horizon - level : int - level of confidence - - Returns - ------- - pd.DataFrame - lookup with assigend tracks and possible hits + to get some confidene into the procedure, hazards get only assigned + if there is no other hazard occuring at a bigger time interval in that country + Thus a track of possible_tracks_1 gets only assigned if there are no other + tracks in possible_tracks_2. + The confidence can be expressed with a certainty level + + Parameters + ---------- + lookup : pd.DataFrame + to relate EMdatID to hazard + possible_tracks_1 : list + list of possible hits with smaller time horizon + possible_tracks_2 : list + list of possible hits with larger time horizon + level : int + level of confidence + + Returns + ------- + pd.DataFrame + lookup with assigend tracks and possible hits """ for i, _ in enumerate(possible_tracks_1): - if np.isnan(lookup['allocation_level'].values[i]): + if np.isnan(lookup["allocation_level"].values[i]): number_emdat_id = len(possible_tracks_1[i]) # print(number_emdat_id) for j in range(0, number_emdat_id): # check that number of possible track stays the same at given # time difference and that list is not empty - if len(possible_tracks_1[i][j]) == len(possible_tracks_2[i][j]) == 1 \ - and possible_tracks_1[i][j] != []: + if ( + len(possible_tracks_1[i][j]) == len(possible_tracks_2[i][j]) == 1 + and possible_tracks_1[i][j] != [] + ): # check that all tracks are the same - if all(possible_tracks_1[i][0] == possible_tracks_1[i][k] - for k in range(0, len(possible_tracks_1[i]))): + if all( + possible_tracks_1[i][0] == possible_tracks_1[i][k] + for k in range(0, len(possible_tracks_1[i])) + ): # check that track ID has not been assigned to that country already - ctry_lookup = lookup[lookup['hit_country'] - == lookup['hit_country'].values[i]] - if possible_tracks_1[i][0][0] not in ctry_lookup['ibtracsID'].values: - lookup['ibtracsID'].values[i] = possible_tracks_1[i][0][0] - lookup['allocation_level'].values[i] = level + ctry_lookup = lookup[ + lookup["hit_country"] == lookup["hit_country"].values[i] + ] + if ( + possible_tracks_1[i][0][0] + not in ctry_lookup["ibtracsID"].values + ): + lookup["ibtracsID"].values[i] = possible_tracks_1[i][0][0] + lookup["allocation_level"].values[i] = level elif possible_tracks_1[i][j] != []: - lookup['possible_track'].values[i] = possible_tracks_1[i] + lookup["possible_track"].values[i] = possible_tracks_1[i] else: - lookup['possible_track_all'].values[i] = possible_tracks_1[i] + lookup["possible_track_all"].values[i] = possible_tracks_1[i] return lookup def check_assigned_track(lookup, checkset): """compare lookup with assigned tracks to a set with checked sets - Parameters - ---------- - lookup: pd.DataFrame - dataframe to relate EMdatID to hazard - checkset: pd.DataFrame - dataframe with already checked hazards + Parameters + ---------- + lookup: pd.DataFrame + dataframe to relate EMdatID to hazard + checkset: pd.DataFrame + dataframe with already checked hazards - Returns - ------- - error scores + Returns + ------- + error scores """ # merge checkset and lookup - check = pd.merge(checkset, lookup[['hit_country', 'EM_ID', 'ibtracsID']], - on=['hit_country', 'EM_ID']) - check_size = len(check['ibtracsID'].values) + check = pd.merge( + checkset, + lookup[["hit_country", "EM_ID", "ibtracsID"]], + on=["hit_country", "EM_ID"], + ) + check_size = len(check["ibtracsID"].values) # not assigned values] - not_assigned = check['ibtracsID'].isnull().sum(axis=0) + not_assigned = check["ibtracsID"].isnull().sum(axis=0) # correct assigned values - correct = sum(check['ibtracsID'].values == check['IBtracsID_checked'].values) + correct = sum(check["ibtracsID"].values == check["IBtracsID_checked"].values) # wrongly assigned values - wrong = len(check['ibtracsID'].values) - not_assigned - correct - print('%.1f%% tracks assigned correctly, %.1f%% wrongly, %.1f%% not assigned' - % (correct / check_size * 100, - wrong / check_size * 100, - not_assigned / check_size * 100)) - - -def clean_emdat_df(emdat_file, countries=None, hazard=None, year_range=None, - target_version=None): + wrong = len(check["ibtracsID"].values) - not_assigned - correct + print( + "%.1f%% tracks assigned correctly, %.1f%% wrongly, %.1f%% not assigned" + % ( + correct / check_size * 100, + wrong / check_size * 100, + not_assigned / check_size * 100, + ) + ) + + +def clean_emdat_df( + emdat_file, countries=None, hazard=None, year_range=None, target_version=None +): """ Get a clean and standardized DataFrame from EM-DAT-CSV-file (1) load EM-DAT data from CSV to DataFrame and remove header/footer, @@ -563,7 +692,7 @@ def clean_emdat_df(emdat_file, countries=None, hazard=None, year_range=None, if isinstance(emdat_file, (str, Path)): df_emdat = pd.read_csv(emdat_file, encoding="ISO-8859-1", header=0) counter = 0 - while not ('Country' in df_emdat.columns and 'ISO' in df_emdat.columns): + while not ("Country" in df_emdat.columns and "ISO" in df_emdat.columns): counter += 1 df_emdat = pd.read_csv(emdat_file, encoding="ISO-8859-1", header=counter) if counter == 10: @@ -572,7 +701,7 @@ def clean_emdat_df(emdat_file, countries=None, hazard=None, year_range=None, elif isinstance(emdat_file, pd.DataFrame): df_emdat = emdat_file else: - raise TypeError('emdat_file needs to be str or DataFrame') + raise TypeError("emdat_file needs to be str or DataFrame") # drop rows with 9 or more NaN values (e.g. footer): df_emdat = df_emdat.dropna(thresh=9) @@ -580,26 +709,32 @@ def clean_emdat_df(emdat_file, countries=None, hazard=None, year_range=None, # (2.1) identify underlying EMDAT version of csv: version = None for vers in sorted(VARNAMES_EMDAT.keys()): - if len(df_emdat.columns) >= len(VARNAMES_EMDAT[vers]) and \ - all(item in list(df_emdat.columns) for item in VARNAMES_EMDAT[vers].values()): + if len(df_emdat.columns) >= len(VARNAMES_EMDAT[vers]) and all( + item in list(df_emdat.columns) for item in VARNAMES_EMDAT[vers].values() + ): version = vers if not version: - raise ValueError("the given emdat_file contains unexpected columns and cannot be" - " associated with any known EM-DAT file structure") + raise ValueError( + "the given emdat_file contains unexpected columns and cannot be" + " associated with any known EM-DAT file structure" + ) # (2.2) create new DataFrame df_data with column names as target version target_version = target_version or version - df_data = pd.DataFrame(index=df_emdat.index.values, - columns=VARNAMES_EMDAT[target_version].values()) - if 'Year' not in df_data.columns: # make sure column "Year" exists - df_data['Year'] = np.nan + df_data = pd.DataFrame( + index=df_emdat.index.values, columns=VARNAMES_EMDAT[target_version].values() + ) + if "Year" not in df_data.columns: # make sure column "Year" exists + df_data["Year"] = np.nan for _, col in enumerate(df_data.columns): # loop over columns if col in VARNAMES_EMDAT[version]: df_data[col] = df_emdat[VARNAMES_EMDAT[version][col]] elif col in df_emdat.columns: df_data[col] = df_emdat[col] - elif col == 'Year' and version <= 2018: + elif col == "Year" and version <= 2018: years_list = list() - for _, disaster_no in enumerate(df_emdat[VARNAMES_EMDAT[version]['Dis No']]): + for _, disaster_no in enumerate( + df_emdat[VARNAMES_EMDAT[version]["Dis No"]] + ): if isinstance(disaster_no, str): years_list.append(int(disaster_no[0:4])) else: @@ -613,33 +748,33 @@ def clean_emdat_df(emdat_file, countries=None, hazard=None, year_range=None, year_list = list() month_list = list() day_list = list() - for year in list(df_data['Year']): + for year in list(df_data["Year"]): if not np.isnan(year): - date_list.append(datetime.strptime(str(year), '%Y')) + date_list.append(datetime.strptime(str(year), "%Y")) else: - date_list.append(datetime.strptime(str('0001'), '%Y')) + date_list.append(datetime.strptime(str("0001"), "%Y")) boolean_warning = True - for idx, datestr in enumerate(list(df_emdat['Start date'])): + for idx, datestr in enumerate(list(df_emdat["Start date"])): try: - date_list[idx] = datetime.strptime(datestr[-7:], '%m/%Y') + date_list[idx] = datetime.strptime(datestr[-7:], "%m/%Y") except ValueError: if boolean_warning: - LOGGER.warning('EM_DAT CSV contains invalid time formats') + LOGGER.warning("EM_DAT CSV contains invalid time formats") boolean_warning = False try: - date_list[idx] = datetime.strptime(datestr, '%d/%m/%Y') + date_list[idx] = datetime.strptime(datestr, "%d/%m/%Y") except ValueError: if boolean_warning: - LOGGER.warning('EM_DAT CSV contains invalid time formats') + LOGGER.warning("EM_DAT CSV contains invalid time formats") boolean_warning = False day_list.append(date_list[idx].day) month_list.append(date_list[idx].month) year_list.append(date_list[idx].year) - df_data['Start Month'] = np.array(month_list, dtype='int') - df_data['Start Day'] = np.array(day_list, dtype='int') - df_data['Start Year'] = np.array(year_list, dtype='int') - for var in ['Disaster Subtype', 'Disaster Type', 'Country']: - df_data[VARNAMES_EMDAT[target_version][var]].fillna('None', inplace=True) + df_data["Start Month"] = np.array(month_list, dtype="int") + df_data["Start Day"] = np.array(day_list, dtype="int") + df_data["Start Year"] = np.array(year_list, dtype="int") + for var in ["Disaster Subtype", "Disaster Type", "Country"]: + df_data[VARNAMES_EMDAT[target_version][var]].fillna("None", inplace=True) # (3) Filter by countries, year range, and disaster type # (3.1) Countries: @@ -649,15 +784,17 @@ def clean_emdat_df(emdat_file, countries=None, hazard=None, year_range=None, for idx, country in enumerate(countries): # convert countries to iso3 alpha code: countries[idx] = u_coord.country_to_iso(country, "alpha3") - df_data = df_data[df_data['ISO'].isin(countries)].reset_index(drop=True) + df_data = df_data[df_data["ISO"].isin(countries)].reset_index(drop=True) # (3.2) Year range: if year_range: for idx in df_data.index: - if np.isnan(df_data.loc[0, 'Year']): - df_data.loc[0, 'Year'] = \ - df_data.loc[0, VARNAMES_EMDAT[target_version]['Start Year']] - df_data = df_data[(df_data['Year'] >= min(year_range)) & - (df_data['Year'] <= max(year_range))] + if np.isnan(df_data.loc[0, "Year"]): + df_data.loc[0, "Year"] = df_data.loc[ + 0, VARNAMES_EMDAT[target_version]["Start Year"] + ] + df_data = df_data[ + (df_data["Year"] >= min(year_range)) & (df_data["Year"] <= max(year_range)) + ] # (3.3) Disaster type: if hazard and isinstance(hazard, str): @@ -666,17 +803,29 @@ def clean_emdat_df(emdat_file, countries=None, hazard=None, year_range=None, disaster_types = list() disaster_subtypes = list() for idx, haz in enumerate(hazard): - if haz in df_data[VARNAMES_EMDAT[target_version]['Disaster Type']].unique(): + if haz in df_data[VARNAMES_EMDAT[target_version]["Disaster Type"]].unique(): disaster_types.append(haz) - if haz in df_data[VARNAMES_EMDAT[target_version]['Disaster Subtype']].unique(): + if ( + haz + in df_data[VARNAMES_EMDAT[target_version]["Disaster Subtype"]].unique() + ): disaster_subtypes.append(haz) if haz in PERIL_TYPE_MATCH_DICT.keys(): disaster_types += PERIL_TYPE_MATCH_DICT[haz] if haz in PERIL_SUBTYPE_MATCH_DICT.keys(): disaster_subtypes += PERIL_SUBTYPE_MATCH_DICT[haz] df_data = df_data[ - (df_data[VARNAMES_EMDAT[target_version]['Disaster Type']].isin(disaster_types)) | - (df_data[VARNAMES_EMDAT[target_version]['Disaster Subtype']].isin(disaster_subtypes))] + ( + df_data[VARNAMES_EMDAT[target_version]["Disaster Type"]].isin( + disaster_types + ) + ) + | ( + df_data[VARNAMES_EMDAT[target_version]["Disaster Subtype"]].isin( + disaster_subtypes + ) + ) + ] return df_data.reset_index(drop=True) @@ -709,13 +858,13 @@ def emdat_countries_by_hazard(emdat_file_csv, hazard=None, year_range=None): List of names of countries impacted by the disaster (sub-)types """ df_data = clean_emdat_df(emdat_file_csv, hazard=hazard, year_range=year_range) - countries_iso3a = list(df_data['ISO'].unique()) + countries_iso3a = list(df_data["ISO"].unique()) countries_names = list() for iso3a in countries_iso3a: try: countries_names.append(u_coord.country_to_iso(iso3a, "name")) except LookupError: - countries_names.append('NA') + countries_names.append("NA") return countries_iso3a, countries_names @@ -753,17 +902,26 @@ def scale_impact2refyear(impact_values, year_values, iso3a_values, reference_yea gdp_years[country][year] = gdp(country, year)[1] # loop through each value and apply scaling: for idx, val in enumerate(impact_values): - impact_values[idx] = val * gdp_ref[iso3a_values[idx]] / \ - gdp_years[iso3a_values[idx]][year_values[idx]] + impact_values[idx] = ( + val + * gdp_ref[iso3a_values[idx]] + / gdp_years[iso3a_values[idx]][year_values[idx]] + ) return list(impact_values) if not reference_year: return impact_values - raise ValueError('Invalid reference_year') - - -def emdat_impact_yearlysum(emdat_file_csv, countries=None, hazard=None, year_range=None, - reference_year=None, imp_str="Total Damages ('000 US$)", - version=None): + raise ValueError("Invalid reference_year") + + +def emdat_impact_yearlysum( + emdat_file_csv, + countries=None, + hazard=None, + year_range=None, + reference_year=None, + imp_str="Total Damages ('000 US$)", + version=None, +): """function to load EM-DAT data and sum impact per year Parameters @@ -798,38 +956,45 @@ def emdat_impact_yearlysum(emdat_file_csv, countries=None, hazard=None, year_ran """ version = version or max(VARNAMES_EMDAT.keys()) imp_str = VARNAMES_EMDAT[version][imp_str] - df_data = clean_emdat_df(emdat_file_csv, countries=countries, hazard=hazard, - year_range=year_range, target_version=version) - - df_data[imp_str + " scaled"] = scale_impact2refyear(df_data[imp_str].values, - df_data['Year'].values, - df_data['ISO'].values, - reference_year=reference_year) + df_data = clean_emdat_df( + emdat_file_csv, + countries=countries, + hazard=hazard, + year_range=year_range, + target_version=version, + ) + + df_data[imp_str + " scaled"] = scale_impact2refyear( + df_data[imp_str].values, + df_data["Year"].values, + df_data["ISO"].values, + reference_year=reference_year, + ) def country_df(df_data): - for data_iso in df_data['ISO'].unique(): + for data_iso in df_data["ISO"].unique(): country = u_coord.country_to_iso(data_iso, "alpha3") - df_country = df_data.loc[df_data['ISO'] == country] + df_country = df_data.loc[df_data["ISO"] == country] if not df_country.size: continue # Retrieve impact data for all years - all_years = np.arange(min(df_data['Year']), max(df_data['Year']) + 1) + all_years = np.arange(min(df_data["Year"]), max(df_data["Year"]) + 1) data_out = pd.DataFrame.from_records( [ ( year, - np.nansum(df_country[df_country['Year'].isin([year])][imp_str]), + np.nansum(df_country[df_country["Year"].isin([year])][imp_str]), np.nansum( - df_country[df_country['Year'].isin([year])][ + df_country[df_country["Year"].isin([year])][ imp_str + " scaled" ] ), ) for year in all_years ], - columns=["year", "impact", "impact_scaled"] + columns=["year", "impact", "impact_scaled"], ) # Add static data @@ -848,9 +1013,15 @@ def country_df(df_data): return out -def emdat_impact_event(emdat_file_csv, countries=None, hazard=None, year_range=None, - reference_year=None, imp_str="Total Damages ('000 US$)", - version=None): +def emdat_impact_event( + emdat_file_csv, + countries=None, + hazard=None, + year_range=None, + reference_year=None, + imp_str="Total Damages ('000 US$)", + version=None, +): """function to load EM-DAT data return impact per event Parameters @@ -892,29 +1063,45 @@ def emdat_impact_event(emdat_file_csv, countries=None, hazard=None, year_range=N """ version = version or max(VARNAMES_EMDAT.keys()) imp_str = VARNAMES_EMDAT[version][imp_str] - df_data = clean_emdat_df(emdat_file_csv, hazard=hazard, year_range=year_range, - countries=countries, target_version=version) - df_data['year'] = df_data['Year'] - df_data['reference_year'] = reference_year - df_data['impact'] = df_data[imp_str] - df_data['impact_scaled'] = scale_impact2refyear(df_data[imp_str].values, df_data['Year'].values, - df_data['ISO'].values, - reference_year=reference_year) - df_data['region_id'] = np.nan - for country in df_data['ISO'].unique(): + df_data = clean_emdat_df( + emdat_file_csv, + hazard=hazard, + year_range=year_range, + countries=countries, + target_version=version, + ) + df_data["year"] = df_data["Year"] + df_data["reference_year"] = reference_year + df_data["impact"] = df_data[imp_str] + df_data["impact_scaled"] = scale_impact2refyear( + df_data[imp_str].values, + df_data["Year"].values, + df_data["ISO"].values, + reference_year=reference_year, + ) + df_data["region_id"] = np.nan + for country in df_data["ISO"].unique(): try: - df_data.loc[df_data['ISO'] == country, 'region_id'] = \ + df_data.loc[df_data["ISO"] == country, "region_id"] = ( u_coord.country_to_iso(country, "numeric") + ) except LookupError: - LOGGER.warning('ISO3alpha code not found in iso_country: %s', country) - if '000 US' in imp_str: - df_data['impact'] *= 1e3 - df_data['impact_scaled'] *= 1e3 + LOGGER.warning("ISO3alpha code not found in iso_country: %s", country) + if "000 US" in imp_str: + df_data["impact"] *= 1e3 + df_data["impact_scaled"] *= 1e3 return df_data.reset_index(drop=True) -def emdat_to_impact(emdat_file_csv, hazard_type_climada, year_range=None, countries=None, - hazard_type_emdat=None, reference_year=None, imp_str="Total Damages"): +def emdat_to_impact( + emdat_file_csv, + hazard_type_climada, + year_range=None, + countries=None, + hazard_type_emdat=None, + reference_year=None, + imp_str="Total Damages", +): """function to load EM-DAT data return impact per event Parameters @@ -975,30 +1162,40 @@ def emdat_to_impact(emdat_file_csv, hazard_type_climada, year_range=None, countr impact_instance = Impact(haz_type=hazard_type_climada) # Load EM-DAT impact data by event: - em_data = emdat_impact_event(emdat_file_csv, countries=countries, hazard=hazard_type_emdat, - year_range=year_range, reference_year=reference_year, - imp_str=imp_str, version=version) + em_data = emdat_impact_event( + emdat_file_csv, + countries=countries, + hazard=hazard_type_emdat, + year_range=year_range, + reference_year=reference_year, + imp_str=imp_str, + version=version, + ) if isinstance(countries, str): countries = [countries] elif not countries: - countries = emdat_countries_by_hazard(emdat_file_csv, year_range=year_range, - hazard=hazard_type_emdat)[0] + countries = emdat_countries_by_hazard( + emdat_file_csv, year_range=year_range, hazard=hazard_type_emdat + )[0] if em_data.empty: return impact_instance, countries impact_instance.event_id = np.array(em_data.index, int) - impact_instance.event_name = list( - em_data[VARNAMES_EMDAT[version]['Dis No']]) + impact_instance.event_name = list(em_data[VARNAMES_EMDAT[version]["Dis No"]]) date_list = list() - for year in list(em_data['Year']): - date_list.append(datetime.toordinal(datetime.strptime(str(year), '%Y'))) - if 'Start Year' in em_data.columns and 'Start Month' in em_data.columns \ - and 'Start Day' in em_data.columns: + for year in list(em_data["Year"]): + date_list.append(datetime.toordinal(datetime.strptime(str(year), "%Y"))) + if ( + "Start Year" in em_data.columns + and "Start Month" in em_data.columns + and "Start Day" in em_data.columns + ): idx = 0 - for year, month, day in zip(em_data['Start Year'], em_data['Start Month'], - em_data['Start Day']): + for year, month, day in zip( + em_data["Start Year"], em_data["Start Month"], em_data["Start Day"] + ): if np.isnan(year): idx += 1 continue @@ -1006,8 +1203,9 @@ def emdat_to_impact(emdat_file_csv, hazard_type_climada, year_range=None, countr month = 1 if np.isnan(day): day = 1 - date_list[idx] = datetime.toordinal(datetime.strptime( - '%02i/%02i/%04i' % (day, month, year), '%d/%m/%Y')) + date_list[idx] = datetime.toordinal( + datetime.strptime("%02i/%02i/%04i" % (day, month, year), "%d/%m/%Y") + ) idx += 1 impact_instance.date = np.array(date_list, int) impact_instance.crs = DEF_CRS @@ -1018,18 +1216,20 @@ def emdat_to_impact(emdat_file_csv, hazard_type_climada, year_range=None, countr impact_instance.at_event = np.array(em_data["impact_scaled"]) impact_instance.at_event[np.isnan(impact_instance.at_event)] = 0 if not year_range: - year_range = [em_data['Year'].min(), em_data['Year'].max()] + year_range = [em_data["Year"].min(), em_data["Year"].max()] impact_instance.frequency = np.ones(em_data.shape[0]) / (1 + np.diff(year_range)) - impact_instance.frequency_unit = '1/year' + impact_instance.frequency_unit = "1/year" impact_instance.tot_value = 0 - impact_instance.aai_agg = np.nansum(impact_instance.at_event * impact_instance.frequency) - impact_instance.unit = 'USD' + impact_instance.aai_agg = np.nansum( + impact_instance.at_event * impact_instance.frequency + ) + impact_instance.unit = "USD" impact_instance.imp_mat = [] # init rough exposure with central point per country - shp = shapereader.natural_earth(resolution='110m', - category='cultural', - name='admin_0_countries') + shp = shapereader.natural_earth( + resolution="110m", category="cultural", name="admin_0_countries" + ) shp = shapereader.Reader(shp) countries_reg_id = list() countries_lat = list() @@ -1039,10 +1239,10 @@ def emdat_to_impact(emdat_file_csv, hazard_type_climada, year_range=None, countr try: cntry = u_coord.country_to_iso(cntry, "alpha3") except LookupError: - LOGGER.warning('Country not found in iso_country: %s', cntry) + LOGGER.warning("Country not found in iso_country: %s", cntry) cntry_boolean = False for rec in shp.records(): - if rec.attributes['ADM0_A3'].casefold() == cntry.casefold(): + if rec.attributes["ADM0_A3"].casefold() == cntry.casefold(): bbox = rec.geometry.bounds cntry_boolean = True break @@ -1056,13 +1256,15 @@ def emdat_to_impact(emdat_file_csv, hazard_type_climada, year_range=None, countr countries_reg_id.append(u_coord.country_to_iso(cntry, "numeric")) except LookupError: countries_reg_id.append(0) - df_tmp = em_data[em_data[VARNAMES_EMDAT[version]['ISO']].str.contains(cntry)] + df_tmp = em_data[em_data[VARNAMES_EMDAT[version]["ISO"]].str.contains(cntry)] if not reference_year: - impact_instance.eai_exp[idx] = sum(np.array(df_tmp["impact"]) * - impact_instance.frequency[0]) + impact_instance.eai_exp[idx] = sum( + np.array(df_tmp["impact"]) * impact_instance.frequency[0] + ) else: - impact_instance.eai_exp[idx] = sum(np.array(df_tmp["impact_scaled"]) * - impact_instance.frequency[0]) + impact_instance.eai_exp[idx] = sum( + np.array(df_tmp["impact_scaled"]) * impact_instance.frequency[0] + ) impact_instance.coord_exp = np.stack([countries_lat, countries_lon], axis=1) return impact_instance, countries diff --git a/climada/engine/test/test_cost_benefit.py b/climada/engine/test/test_cost_benefit.py index 095716fc9..ecf100f74 100644 --- a/climada/engine/test/test_cost_benefit.py +++ b/climada/engine/test/test_cost_benefit.py @@ -18,28 +18,35 @@ Test CostBenefit class. """ -from pathlib import Path + import copy import unittest +from pathlib import Path + import numpy as np -from climada.entity.entity_def import Entity +from climada.engine import ImpactCalc +from climada.engine.cost_benefit import ( + CostBenefit, + _norm_values, + risk_aai_agg, + risk_rp_100, + risk_rp_250, +) from climada.entity.disc_rates import DiscRates +from climada.entity.entity_def import Entity from climada.hazard.base import Hazard -from climada.engine.cost_benefit import CostBenefit, risk_aai_agg, \ - risk_rp_100, risk_rp_250, _norm_values -from climada.engine import ImpactCalc -from climada.util.constants import ENT_DEMO_FUTURE, ENT_DEMO_TODAY -from climada.util.api_client import Client - from climada.test import get_test_file +from climada.util.api_client import Client +from climada.util.constants import ENT_DEMO_FUTURE, ENT_DEMO_TODAY +ENT_TEST_MAT = get_test_file("demo_today", file_format="MAT-file") +HAZ_TEST_TC: Path = get_test_file("test_tc_florida") -ENT_TEST_MAT = get_test_file('demo_today', file_format='MAT-file') -HAZ_TEST_TC :Path = get_test_file('test_tc_florida') class TestSteps(unittest.TestCase): """Test intermediate steps""" + def test_calc_impact_measures_pass(self): """Test _calc_impact_measures against reference value""" self.assertTrue(HAZ_TEST_TC.is_file(), "{} is not a file".format(HAZ_TEST_TC)) @@ -48,16 +55,22 @@ def test_calc_impact_measures_pass(self): self.assertTrue(ENT_TEST_MAT.is_file(), "{} is not a file".format(ENT_TEST_MAT)) entity = Entity.from_mat(ENT_TEST_MAT) entity.check() - entity.measures._data['TC'] = entity.measures._data.pop('XX') - for meas in entity.measures.get_measure('TC'): - meas.haz_type = 'TC' + entity.measures._data["TC"] = entity.measures._data.pop("XX") + for meas in entity.measures.get_measure("TC"): + meas.haz_type = "TC" entity.check() entity.exposures.assign_centroids(hazard) cost_ben = CostBenefit() - cost_ben._calc_impact_measures(hazard, entity.exposures, entity.measures, - entity.impact_funcs, when='future', - risk_func=risk_aai_agg, save_imp=True) + cost_ben._calc_impact_measures( + hazard, + entity.exposures, + entity.measures, + entity.impact_funcs, + when="future", + risk_func=risk_aai_agg, + save_imp=True, + ) self.assertEqual(cost_ben.imp_meas_present, dict()) self.assertEqual(cost_ben.cost_ben_ratio, dict()) @@ -66,131 +79,250 @@ def test_calc_impact_measures_pass(self): self.assertEqual(cost_ben.present_year, 2016) self.assertEqual(cost_ben.future_year, 2030) - self.assertEqual(cost_ben.imp_meas_future['no measure']['cost'], (0, 0)) - self.assertAlmostEqual(cost_ben.imp_meas_future['no measure']['risk'], - 6.51220115756442e+09, places=3) - new_efc = cost_ben.imp_meas_future['no measure']['impact'].calc_freq_curve() + self.assertEqual(cost_ben.imp_meas_future["no measure"]["cost"], (0, 0)) + self.assertAlmostEqual( + cost_ben.imp_meas_future["no measure"]["risk"], + 6.51220115756442e09, + places=3, + ) + new_efc = cost_ben.imp_meas_future["no measure"]["impact"].calc_freq_curve() self.assertTrue( - np.allclose(new_efc.return_per, - cost_ben.imp_meas_future['no measure']['efc'].return_per)) + np.allclose( + new_efc.return_per, + cost_ben.imp_meas_future["no measure"]["efc"].return_per, + ) + ) self.assertTrue( - np.allclose(new_efc.impact, cost_ben.imp_meas_future['no measure']['efc'].impact)) + np.allclose( + new_efc.impact, cost_ben.imp_meas_future["no measure"]["efc"].impact + ) + ) self.assertEqual( - cost_ben.imp_meas_future['no measure']['impact'].at_event.nonzero()[0].size, - 841) - self.assertAlmostEqual(cost_ben.imp_meas_future['no measure']['impact'].at_event[14082], - 8.801682862431524e+06, places=3) - self.assertAlmostEqual(cost_ben.imp_meas_future['no measure']['impact'].tot_value, - 6.570532945599105e+11, places=3) - self.assertAlmostEqual(cost_ben.imp_meas_future['no measure']['impact'].aai_agg, - 6.51220115756442e+09, places=3) - - self.assertAlmostEqual(cost_ben.imp_meas_future['Mangroves']['cost'][0], - 1.3117683608515418e+09, places=3) - self.assertEqual(cost_ben.imp_meas_future['Mangroves']['cost'][1], 1) - self.assertAlmostEqual(cost_ben.imp_meas_future['Mangroves']['risk'], - 4.850407096284983e+09, places=3) - new_efc = cost_ben.imp_meas_future['Mangroves']['impact'].calc_freq_curve() + cost_ben.imp_meas_future["no measure"]["impact"].at_event.nonzero()[0].size, + 841, + ) + self.assertAlmostEqual( + cost_ben.imp_meas_future["no measure"]["impact"].at_event[14082], + 8.801682862431524e06, + places=3, + ) + self.assertAlmostEqual( + cost_ben.imp_meas_future["no measure"]["impact"].tot_value, + 6.570532945599105e11, + places=3, + ) + self.assertAlmostEqual( + cost_ben.imp_meas_future["no measure"]["impact"].aai_agg, + 6.51220115756442e09, + places=3, + ) + + self.assertAlmostEqual( + cost_ben.imp_meas_future["Mangroves"]["cost"][0], + 1.3117683608515418e09, + places=3, + ) + self.assertEqual(cost_ben.imp_meas_future["Mangroves"]["cost"][1], 1) + self.assertAlmostEqual( + cost_ben.imp_meas_future["Mangroves"]["risk"], + 4.850407096284983e09, + places=3, + ) + new_efc = cost_ben.imp_meas_future["Mangroves"]["impact"].calc_freq_curve() self.assertTrue( - np.allclose(new_efc.return_per, - cost_ben.imp_meas_future['Mangroves']['efc'].return_per)) + np.allclose( + new_efc.return_per, + cost_ben.imp_meas_future["Mangroves"]["efc"].return_per, + ) + ) self.assertTrue( - np.allclose(new_efc.impact, cost_ben.imp_meas_future['Mangroves']['efc'].impact)) + np.allclose( + new_efc.impact, cost_ben.imp_meas_future["Mangroves"]["efc"].impact + ) + ) self.assertEqual( - cost_ben.imp_meas_future['Mangroves']['impact'].at_event.nonzero()[0].size, - 665) - self.assertAlmostEqual(cost_ben.imp_meas_future['Mangroves']['impact'].at_event[13901], - 1.29576562770977e+09, places=3) - self.assertAlmostEqual(cost_ben.imp_meas_future['Mangroves']['impact'].tot_value, - 6.570532945599105e+11, places=3) - self.assertAlmostEqual(cost_ben.imp_meas_future['Mangroves']['impact'].aai_agg, - 4.850407096284983e+09, places=3) - - self.assertAlmostEqual(cost_ben.imp_meas_future['Beach nourishment']['cost'][0], - 1.728000000000000e+09, places=3) - self.assertEqual(cost_ben.imp_meas_future['Beach nourishment']['cost'][1], 1) - self.assertAlmostEqual(cost_ben.imp_meas_future['Beach nourishment']['risk'], - 5.188921355413834e+09, places=3) - new_efc = cost_ben.imp_meas_future['Beach nourishment']['impact'].calc_freq_curve() + cost_ben.imp_meas_future["Mangroves"]["impact"].at_event.nonzero()[0].size, + 665, + ) + self.assertAlmostEqual( + cost_ben.imp_meas_future["Mangroves"]["impact"].at_event[13901], + 1.29576562770977e09, + places=3, + ) + self.assertAlmostEqual( + cost_ben.imp_meas_future["Mangroves"]["impact"].tot_value, + 6.570532945599105e11, + places=3, + ) + self.assertAlmostEqual( + cost_ben.imp_meas_future["Mangroves"]["impact"].aai_agg, + 4.850407096284983e09, + places=3, + ) + + self.assertAlmostEqual( + cost_ben.imp_meas_future["Beach nourishment"]["cost"][0], + 1.728000000000000e09, + places=3, + ) + self.assertEqual(cost_ben.imp_meas_future["Beach nourishment"]["cost"][1], 1) + self.assertAlmostEqual( + cost_ben.imp_meas_future["Beach nourishment"]["risk"], + 5.188921355413834e09, + places=3, + ) + new_efc = cost_ben.imp_meas_future["Beach nourishment"][ + "impact" + ].calc_freq_curve() self.assertTrue( - np.allclose(new_efc.return_per, - cost_ben.imp_meas_future['Beach nourishment']['efc'].return_per)) + np.allclose( + new_efc.return_per, + cost_ben.imp_meas_future["Beach nourishment"]["efc"].return_per, + ) + ) self.assertTrue( - np.allclose(new_efc.impact, - cost_ben.imp_meas_future['Beach nourishment']['efc'].impact)) + np.allclose( + new_efc.impact, + cost_ben.imp_meas_future["Beach nourishment"]["efc"].impact, + ) + ) self.assertEqual( - cost_ben.imp_meas_future['Beach nourishment']['impact'].at_event.nonzero()[0].size, - 702) - self.assertEqual(cost_ben.imp_meas_future['Beach nourishment']['impact'].at_event[1110], - 0.0) - self.assertAlmostEqual(cost_ben.imp_meas_future['Beach nourishment']['impact'].eai_exp[5], - 1.1133679079730146e+08, places=3) - self.assertAlmostEqual(cost_ben.imp_meas_future['Beach nourishment']['impact'].tot_value, - 6.570532945599105e+11, places=3) - self.assertAlmostEqual(cost_ben.imp_meas_future['Beach nourishment']['impact'].aai_agg, - 5.188921355413834e+09, places=3) - - self.assertAlmostEqual(cost_ben.imp_meas_future['Seawall']['cost'][0], - 8.878779433630093e+09, places=3) - self.assertEqual(cost_ben.imp_meas_future['Seawall']['cost'][1], 1) - self.assertAlmostEqual(cost_ben.imp_meas_future['Seawall']['risk'], - 4.736400526119911e+09, places=3) - new_efc = cost_ben.imp_meas_future['Seawall']['impact'].calc_freq_curve() - self.assertTrue(np.allclose(new_efc.return_per, - cost_ben.imp_meas_future['Seawall']['efc'].return_per)) - self.assertTrue(np.allclose(new_efc.impact, - cost_ben.imp_meas_future['Seawall']['efc'].impact)) - self.assertEqual(cost_ben.imp_meas_future['Seawall']['impact'].at_event.nonzero()[0].size, - 73) - self.assertEqual(cost_ben.imp_meas_future['Seawall']['impact'].at_event[1229], 0.0) - self.assertAlmostEqual(cost_ben.imp_meas_future['Seawall']['impact'].tot_value, - 6.570532945599105e+11, places=3) - self.assertAlmostEqual(cost_ben.imp_meas_future['Seawall']['impact'].aai_agg, - 4.736400526119911e+09, places=3) - - self.assertAlmostEqual(cost_ben.imp_meas_future['Building code']['cost'][0], - 9.200000000000000e+09, places=3) - self.assertEqual(cost_ben.imp_meas_future['Building code']['cost'][1], 1) - self.assertAlmostEqual(cost_ben.imp_meas_future['Building code']['risk'], - 4.884150868173321e+09, places=3) - new_efc = cost_ben.imp_meas_future['Building code']['impact'].calc_freq_curve() - self.assertTrue(np.allclose(new_efc.return_per, - cost_ben.imp_meas_future['Building code']['efc'].return_per)) - self.assertTrue(np.allclose(new_efc.impact, - cost_ben.imp_meas_future['Building code']['efc'].impact)) + cost_ben.imp_meas_future["Beach nourishment"]["impact"] + .at_event.nonzero()[0] + .size, + 702, + ) self.assertEqual( - cost_ben.imp_meas_future['Building code']['impact'].at_event.nonzero()[0].size, - 841) - self.assertEqual(cost_ben.imp_meas_future['Building code']['impact'].at_event[122], 0.0) - self.assertAlmostEqual(cost_ben.imp_meas_future['Building code']['impact'].eai_exp[11], - 7.757060129393841e+07, places=3) - self.assertAlmostEqual(cost_ben.imp_meas_future['Building code']['impact'].tot_value, - 6.570532945599105e+11, places=3) - self.assertAlmostEqual(cost_ben.imp_meas_future['Building code']['impact'].aai_agg, - 4.884150868173321e+09, places=3) + cost_ben.imp_meas_future["Beach nourishment"]["impact"].at_event[1110], 0.0 + ) + self.assertAlmostEqual( + cost_ben.imp_meas_future["Beach nourishment"]["impact"].eai_exp[5], + 1.1133679079730146e08, + places=3, + ) + self.assertAlmostEqual( + cost_ben.imp_meas_future["Beach nourishment"]["impact"].tot_value, + 6.570532945599105e11, + places=3, + ) + self.assertAlmostEqual( + cost_ben.imp_meas_future["Beach nourishment"]["impact"].aai_agg, + 5.188921355413834e09, + places=3, + ) + + self.assertAlmostEqual( + cost_ben.imp_meas_future["Seawall"]["cost"][0], + 8.878779433630093e09, + places=3, + ) + self.assertEqual(cost_ben.imp_meas_future["Seawall"]["cost"][1], 1) + self.assertAlmostEqual( + cost_ben.imp_meas_future["Seawall"]["risk"], 4.736400526119911e09, places=3 + ) + new_efc = cost_ben.imp_meas_future["Seawall"]["impact"].calc_freq_curve() + self.assertTrue( + np.allclose( + new_efc.return_per, + cost_ben.imp_meas_future["Seawall"]["efc"].return_per, + ) + ) + self.assertTrue( + np.allclose( + new_efc.impact, cost_ben.imp_meas_future["Seawall"]["efc"].impact + ) + ) + self.assertEqual( + cost_ben.imp_meas_future["Seawall"]["impact"].at_event.nonzero()[0].size, 73 + ) + self.assertEqual( + cost_ben.imp_meas_future["Seawall"]["impact"].at_event[1229], 0.0 + ) + self.assertAlmostEqual( + cost_ben.imp_meas_future["Seawall"]["impact"].tot_value, + 6.570532945599105e11, + places=3, + ) + self.assertAlmostEqual( + cost_ben.imp_meas_future["Seawall"]["impact"].aai_agg, + 4.736400526119911e09, + places=3, + ) + + self.assertAlmostEqual( + cost_ben.imp_meas_future["Building code"]["cost"][0], + 9.200000000000000e09, + places=3, + ) + self.assertEqual(cost_ben.imp_meas_future["Building code"]["cost"][1], 1) + self.assertAlmostEqual( + cost_ben.imp_meas_future["Building code"]["risk"], + 4.884150868173321e09, + places=3, + ) + new_efc = cost_ben.imp_meas_future["Building code"]["impact"].calc_freq_curve() + self.assertTrue( + np.allclose( + new_efc.return_per, + cost_ben.imp_meas_future["Building code"]["efc"].return_per, + ) + ) + self.assertTrue( + np.allclose( + new_efc.impact, cost_ben.imp_meas_future["Building code"]["efc"].impact + ) + ) + self.assertEqual( + cost_ben.imp_meas_future["Building code"]["impact"] + .at_event.nonzero()[0] + .size, + 841, + ) + self.assertEqual( + cost_ben.imp_meas_future["Building code"]["impact"].at_event[122], 0.0 + ) + self.assertAlmostEqual( + cost_ben.imp_meas_future["Building code"]["impact"].eai_exp[11], + 7.757060129393841e07, + places=3, + ) + self.assertAlmostEqual( + cost_ben.imp_meas_future["Building code"]["impact"].tot_value, + 6.570532945599105e11, + places=3, + ) + self.assertAlmostEqual( + cost_ben.imp_meas_future["Building code"]["impact"].aai_agg, + 4.884150868173321e09, + places=3, + ) def test_cb_one_meas_pres_pass(self): """Test _cost_ben_one with different future""" - meas_name = 'Mangroves' + meas_name = "Mangroves" meas_val = dict() - meas_val['cost'] = (1.3117683608515418e+09, 1) - meas_val['risk'] = 4.826231151473135e+10 - meas_val['efc'] = None - meas_val['risk_transf'] = 0 + meas_val["cost"] = (1.3117683608515418e09, 1) + meas_val["risk"] = 4.826231151473135e10 + meas_val["efc"] = None + meas_val["risk_transf"] = 0 imp_meas_present = dict() - imp_meas_present['no measure'] = dict() - imp_meas_present['no measure']['risk'] = 6.51220115756442e+09 - imp_meas_present['Mangroves'] = dict() - imp_meas_present['Mangroves']['risk'] = 4.850407096284983e+09 - imp_meas_present['Mangroves']['risk_transf'] = 0 + imp_meas_present["no measure"] = dict() + imp_meas_present["no measure"]["risk"] = 6.51220115756442e09 + imp_meas_present["Mangroves"] = dict() + imp_meas_present["Mangroves"]["risk"] = 4.850407096284983e09 + imp_meas_present["Mangroves"]["risk_transf"] = 0 imp_meas_future = dict() - imp_meas_future['no measure'] = dict() - imp_meas_future['no measure']['risk'] = 5.9506659786664024e+10 + imp_meas_future["no measure"] = dict() + imp_meas_future["no measure"]["risk"] = 5.9506659786664024e10 - cb = CostBenefit(present_year=2018, future_year=2040, imp_meas_present=imp_meas_present, - imp_meas_future=imp_meas_future) + cb = CostBenefit( + present_year=2018, + future_year=2040, + imp_meas_present=imp_meas_present, + imp_meas_future=imp_meas_future, + ) disc_rates = DiscRates() disc_rates.years = np.arange(2016, 2051) @@ -204,18 +336,20 @@ def test_cb_one_meas_pres_pass(self): def test_cb_one_meas_fut_pass(self): """Test _cost_ben_one with same future""" - meas_name = 'Mangroves' + meas_name = "Mangroves" meas_val = dict() - meas_val['cost'] = (1.3117683608515418e+09, 1) - meas_val['risk'] = 4.850407096284983e+09 - meas_val['efc'] = None - meas_val['risk_transf'] = 0 + meas_val["cost"] = (1.3117683608515418e09, 1) + meas_val["risk"] = 4.850407096284983e09 + meas_val["efc"] = None + meas_val["risk_transf"] = 0 imp_meas_future = dict() - imp_meas_future['no measure'] = dict() - imp_meas_future['no measure']['risk'] = 6.51220115756442e+09 + imp_meas_future["no measure"] = dict() + imp_meas_future["no measure"]["risk"] = 6.51220115756442e09 - cb = CostBenefit(present_year=2018, future_year=2040, imp_meas_future=imp_meas_future) + cb = CostBenefit( + present_year=2018, future_year=2040, imp_meas_future=imp_meas_future + ) years = np.arange(2000, 2051) rates = np.ones(years.size) * 0.02 @@ -224,23 +358,29 @@ def test_cb_one_meas_fut_pass(self): time_dep = cb._time_dependency_array() cb._cost_ben_one(meas_name, meas_val, disc_rates, time_dep) - self.assertAlmostEqual(cb.benefit[meas_name], 3.100583368954022e+10, places=3) + self.assertAlmostEqual(cb.benefit[meas_name], 3.100583368954022e10, places=3) self.assertAlmostEqual(cb.cost_ben_ratio[meas_name], 0.04230714690616641) def test_calc_cb_no_change_pass(self): """Test _calc_cost_benefit without present value against reference value""" hazard = Hazard.from_hdf5(HAZ_TEST_TC) entity = Entity.from_mat(ENT_TEST_MAT) - entity.measures._data['TC'] = entity.measures._data.pop('XX') - for meas in entity.measures.get_measure('TC'): - meas.haz_type = 'TC' + entity.measures._data["TC"] = entity.measures._data.pop("XX") + for meas in entity.measures.get_measure("TC"): + meas.haz_type = "TC" entity.check() entity.exposures.assign_centroids(hazard) cost_ben = CostBenefit() - cost_ben._calc_impact_measures(hazard, entity.exposures, entity.measures, - entity.impact_funcs, when='future', - risk_func=risk_aai_agg, save_imp=True) + cost_ben._calc_impact_measures( + hazard, + entity.exposures, + entity.measures, + entity.impact_funcs, + when="future", + risk_func=risk_aai_agg, + save_imp=True, + ) cost_ben.present_year = 2018 cost_ben.future_year = 2040 @@ -251,33 +391,54 @@ def test_calc_cb_no_change_pass(self): self.assertEqual(cost_ben.present_year, 2018) self.assertEqual(cost_ben.future_year, 2040) - self.assertAlmostEqual(cost_ben.cost_ben_ratio['Mangroves'], 0.04230714690616641) - self.assertAlmostEqual(cost_ben.cost_ben_ratio['Beach nourishment'], 0.06998836431681373) - self.assertAlmostEqual(cost_ben.cost_ben_ratio['Seawall'], 0.2679741183248266) - self.assertAlmostEqual(cost_ben.cost_ben_ratio['Building code'], 0.30286828677985717) + self.assertAlmostEqual( + cost_ben.cost_ben_ratio["Mangroves"], 0.04230714690616641 + ) + self.assertAlmostEqual( + cost_ben.cost_ben_ratio["Beach nourishment"], 0.06998836431681373 + ) + self.assertAlmostEqual(cost_ben.cost_ben_ratio["Seawall"], 0.2679741183248266) + self.assertAlmostEqual( + cost_ben.cost_ben_ratio["Building code"], 0.30286828677985717 + ) - self.assertAlmostEqual(cost_ben.benefit['Mangroves'], 3.100583368954022e+10, places=3) - self.assertAlmostEqual(cost_ben.benefit['Beach nourishment'], - 2.468981832719974e+10, places=3) - self.assertAlmostEqual(cost_ben.benefit['Seawall'], 3.3132973770502796e+10, places=3) - self.assertAlmostEqual(cost_ben.benefit['Building code'], 3.0376240767284798e+10, places=3) + self.assertAlmostEqual( + cost_ben.benefit["Mangroves"], 3.100583368954022e10, places=3 + ) + self.assertAlmostEqual( + cost_ben.benefit["Beach nourishment"], 2.468981832719974e10, places=3 + ) + self.assertAlmostEqual( + cost_ben.benefit["Seawall"], 3.3132973770502796e10, places=3 + ) + self.assertAlmostEqual( + cost_ben.benefit["Building code"], 3.0376240767284798e10, places=3 + ) - self.assertAlmostEqual(cost_ben.tot_climate_risk, 1.2150496306913972e+11, places=3) + self.assertAlmostEqual( + cost_ben.tot_climate_risk, 1.2150496306913972e11, places=3 + ) def test_calc_cb_change_pass(self): """Test _calc_cost_benefit with present value against reference value""" hazard = Hazard.from_hdf5(HAZ_TEST_TC) entity = Entity.from_mat(ENT_TEST_MAT) - entity.measures._data['TC'] = entity.measures._data.pop('XX') - for meas in entity.measures.get_measure('TC'): - meas.haz_type = 'TC' + entity.measures._data["TC"] = entity.measures._data.pop("XX") + for meas in entity.measures.get_measure("TC"): + meas.haz_type = "TC" entity.check() entity.exposures.assign_centroids(hazard) cost_ben = CostBenefit() - cost_ben._calc_impact_measures(hazard, entity.exposures, entity.measures, - entity.impact_funcs, when='present', - risk_func=risk_aai_agg, save_imp=False) + cost_ben._calc_impact_measures( + hazard, + entity.exposures, + entity.measures, + entity.impact_funcs, + when="present", + risk_func=risk_aai_agg, + save_imp=False, + ) ent_future = Entity.from_excel(ENT_DEMO_FUTURE) ent_future.check() @@ -286,9 +447,15 @@ def test_calc_cb_change_pass(self): haz_future.intensity.data += 25 ent_future.exposures.assign_centroids(haz_future) - cost_ben._calc_impact_measures(haz_future, ent_future.exposures, ent_future.measures, - ent_future.impact_funcs, when='future', - risk_func=risk_aai_agg, save_imp=False) + cost_ben._calc_impact_measures( + haz_future, + ent_future.exposures, + ent_future.measures, + ent_future.impact_funcs, + when="future", + risk_func=risk_aai_agg, + save_imp=False, + ) cost_ben.present_year = 2018 cost_ben.future_year = 2040 @@ -296,39 +463,79 @@ def test_calc_cb_change_pass(self): self.assertEqual(cost_ben.present_year, 2018) self.assertEqual(cost_ben.future_year, 2040) - self.assertAlmostEqual(cost_ben.tot_climate_risk, 5.768659152882021e+11, places=3) - - self.assertAlmostEqual(cost_ben.imp_meas_present['no measure']['risk'], - 6.51220115756442e+09, places=3) - self.assertAlmostEqual(cost_ben.imp_meas_present['Mangroves']['risk'], - 4.850407096284983e+09, places=3) - self.assertAlmostEqual(cost_ben.imp_meas_present['Beach nourishment']['risk'], - 5.188921355413834e+09, places=3) - self.assertAlmostEqual(cost_ben.imp_meas_present['Seawall']['risk'], - 4.736400526119911e+09, places=3) - self.assertAlmostEqual(cost_ben.imp_meas_present['Building code']['risk'], - 4.884150868173321e+09, places=3) - - self.assertAlmostEqual(cost_ben.imp_meas_future['no measure']['risk'], - 5.9506659786664024e+10, places=3) - self.assertAlmostEqual(cost_ben.imp_meas_future['Mangroves']['risk'], - 4.826231151473135e+10, places=3) - self.assertAlmostEqual(cost_ben.imp_meas_future['Beach nourishment']['risk'], - 5.0647250923231674e+10, places=3) - self.assertAlmostEqual(cost_ben.imp_meas_future['Seawall']['risk'], - 21089567135.7345, places=3) - self.assertAlmostEqual(cost_ben.imp_meas_future['Building code']['risk'], - 4.462999483999791e+10, places=3) - - self.assertAlmostEqual(cost_ben.benefit['Mangroves'], 113345027690.81276, places=2) - self.assertAlmostEqual(cost_ben.benefit['Beach nourishment'], 89444869971.53653, places=2) - self.assertAlmostEqual(cost_ben.benefit['Seawall'], 347977469896.1333, places=2) - self.assertAlmostEqual(cost_ben.benefit['Building code'], 144216478822.05154, places=2) - - self.assertAlmostEqual(cost_ben.cost_ben_ratio['Mangroves'], 0.011573232523528404) - self.assertAlmostEqual(cost_ben.cost_ben_ratio['Beach nourishment'], 0.01931916274851638) - self.assertAlmostEqual(cost_ben.cost_ben_ratio['Seawall'], 0.025515385913577368) - self.assertAlmostEqual(cost_ben.cost_ben_ratio['Building code'], 0.06379298728650741) + self.assertAlmostEqual( + cost_ben.tot_climate_risk, 5.768659152882021e11, places=3 + ) + + self.assertAlmostEqual( + cost_ben.imp_meas_present["no measure"]["risk"], + 6.51220115756442e09, + places=3, + ) + self.assertAlmostEqual( + cost_ben.imp_meas_present["Mangroves"]["risk"], + 4.850407096284983e09, + places=3, + ) + self.assertAlmostEqual( + cost_ben.imp_meas_present["Beach nourishment"]["risk"], + 5.188921355413834e09, + places=3, + ) + self.assertAlmostEqual( + cost_ben.imp_meas_present["Seawall"]["risk"], 4.736400526119911e09, places=3 + ) + self.assertAlmostEqual( + cost_ben.imp_meas_present["Building code"]["risk"], + 4.884150868173321e09, + places=3, + ) + + self.assertAlmostEqual( + cost_ben.imp_meas_future["no measure"]["risk"], + 5.9506659786664024e10, + places=3, + ) + self.assertAlmostEqual( + cost_ben.imp_meas_future["Mangroves"]["risk"], + 4.826231151473135e10, + places=3, + ) + self.assertAlmostEqual( + cost_ben.imp_meas_future["Beach nourishment"]["risk"], + 5.0647250923231674e10, + places=3, + ) + self.assertAlmostEqual( + cost_ben.imp_meas_future["Seawall"]["risk"], 21089567135.7345, places=3 + ) + self.assertAlmostEqual( + cost_ben.imp_meas_future["Building code"]["risk"], + 4.462999483999791e10, + places=3, + ) + + self.assertAlmostEqual( + cost_ben.benefit["Mangroves"], 113345027690.81276, places=2 + ) + self.assertAlmostEqual( + cost_ben.benefit["Beach nourishment"], 89444869971.53653, places=2 + ) + self.assertAlmostEqual(cost_ben.benefit["Seawall"], 347977469896.1333, places=2) + self.assertAlmostEqual( + cost_ben.benefit["Building code"], 144216478822.05154, places=2 + ) + + self.assertAlmostEqual( + cost_ben.cost_ben_ratio["Mangroves"], 0.011573232523528404 + ) + self.assertAlmostEqual( + cost_ben.cost_ben_ratio["Beach nourishment"], 0.01931916274851638 + ) + self.assertAlmostEqual(cost_ben.cost_ben_ratio["Seawall"], 0.025515385913577368) + self.assertAlmostEqual( + cost_ben.cost_ben_ratio["Building code"], 0.06379298728650741 + ) self.assertAlmostEqual(cost_ben.tot_climate_risk, 576865915288.2021, places=3) @@ -348,8 +555,12 @@ def test_time_array_pres_pass(self): n_years = cb.future_year - cb.present_year + 1 self.assertEqual(time_arr.size, n_years) - self.assertTrue(np.allclose(time_arr, np.arange(n_years)**imp_time_depen / - (n_years - 1)**imp_time_depen)) + self.assertTrue( + np.allclose( + time_arr, + np.arange(n_years) ** imp_time_depen / (n_years - 1) ** imp_time_depen, + ) + ) def test_time_array_no_pres_pass(self): """Test _time_dependency_array""" @@ -368,12 +579,16 @@ def test_npv_unaverted_no_pres_pass(self): rates = np.ones(years.size) * 0.025 disc_rates = DiscRates(years=years, rates=rates) time_dep = np.linspace(0, 1, disc_rates.years.size) - res = cb._npv_unaverted_impact(risk_future, disc_rates, time_dep, - risk_present=None) + res = cb._npv_unaverted_impact( + risk_future, disc_rates, time_dep, risk_present=None + ) self.assertEqual( res, - disc_rates.net_present_value(cb.present_year, cb.future_year, time_dep * risk_future)) + disc_rates.net_present_value( + cb.present_year, cb.future_year, time_dep * risk_future + ), + ) def test_npv_unaverted_pres_pass(self): """Test _npv_unaverted_impact""" @@ -387,11 +602,13 @@ def test_npv_unaverted_pres_pass(self): time_dep = np.linspace(0, 1, disc_rates.years.size) res = cb._npv_unaverted_impact(risk_future, disc_rates, time_dep, risk_present) - tot_climate_risk = risk_present + (risk_future - risk_present) * time_dep - self.assertEqual(res, disc_rates.net_present_value(cb.present_year, - cb.future_year, - tot_climate_risk)) + self.assertEqual( + res, + disc_rates.net_present_value( + cb.present_year, cb.future_year, tot_climate_risk + ), + ) def test_norm_value(self): """Test _norm_values""" @@ -447,50 +664,89 @@ def test_combine_fut_pass(self): fut_haz = copy.deepcopy(hazard) cost_ben = CostBenefit() - cost_ben.calc(hazard, entity, fut_haz, fut_ent, future_year=None, - risk_func=risk_aai_agg, imp_time_depen=None, save_imp=True) - - new_name = 'combine' + cost_ben.calc( + hazard, + entity, + fut_haz, + fut_ent, + future_year=None, + risk_func=risk_aai_agg, + imp_time_depen=None, + save_imp=True, + ) + + new_name = "combine" new_color = np.array([0.1, 0.1, 0.1]) - new_cb = cost_ben.combine_measures(['Mangroves', 'Seawall'], new_name, new_color, - entity.disc_rates, imp_time_depen=None, - risk_func=risk_aai_agg) + new_cb = cost_ben.combine_measures( + ["Mangroves", "Seawall"], + new_name, + new_color, + entity.disc_rates, + imp_time_depen=None, + risk_func=risk_aai_agg, + ) self.assertTrue(np.allclose(new_cb.color_rgb[new_name], new_color)) - new_imp = cost_ben.imp_meas_future['no measure']['impact'].at_event - \ - cost_ben.imp_meas_future['Mangroves']['impact'].at_event - new_imp += cost_ben.imp_meas_future['no measure']['impact'].at_event - \ - cost_ben.imp_meas_future['Seawall']['impact'].at_event - new_imp = np.maximum(cost_ben.imp_meas_future['no measure']['impact'].at_event - new_imp, - 0) - - self.assertTrue(np.allclose(new_cb.imp_meas_present[new_name]['impact'].at_event, new_imp)) - self.assertAlmostEqual( - new_cb.imp_meas_present[new_name]['risk'], - np.sum(new_imp * cost_ben.imp_meas_present['no measure']['impact'].frequency), 5) - self.assertAlmostEqual(new_cb.imp_meas_present[new_name]['cost'][0], - cost_ben.imp_meas_present['Mangroves']['cost'][0] + - cost_ben.imp_meas_present['Seawall']['cost'][0]) - self.assertAlmostEqual(new_cb.imp_meas_present[new_name]['cost'][1], 1) - self.assertTrue(np.allclose( - new_cb.imp_meas_present[new_name]['efc'].impact, - new_cb.imp_meas_present[new_name]['impact'].calc_freq_curve().impact)) - self.assertAlmostEqual(new_cb.imp_meas_present[new_name]['risk_transf'], 0) - - self.assertTrue(np.allclose(new_cb.imp_meas_future[new_name]['impact'].at_event, new_imp)) - self.assertAlmostEqual( - new_cb.imp_meas_future[new_name]['risk'], - np.sum(new_imp * cost_ben.imp_meas_future['no measure']['impact'].frequency), 5) - self.assertAlmostEqual( - new_cb.imp_meas_future[new_name]['cost'][0], - cost_ben.imp_meas_future['Mangroves']['cost'][0] - + cost_ben.imp_meas_future['Seawall']['cost'][0]) - self.assertAlmostEqual(new_cb.imp_meas_future[new_name]['cost'][1], 1) - self.assertTrue(np.allclose( - new_cb.imp_meas_future[new_name]['efc'].impact, - new_cb.imp_meas_future[new_name]['impact'].calc_freq_curve().impact)) - self.assertAlmostEqual(new_cb.imp_meas_future[new_name]['risk_transf'], 0) + new_imp = ( + cost_ben.imp_meas_future["no measure"]["impact"].at_event + - cost_ben.imp_meas_future["Mangroves"]["impact"].at_event + ) + new_imp += ( + cost_ben.imp_meas_future["no measure"]["impact"].at_event + - cost_ben.imp_meas_future["Seawall"]["impact"].at_event + ) + new_imp = np.maximum( + cost_ben.imp_meas_future["no measure"]["impact"].at_event - new_imp, 0 + ) + + self.assertTrue( + np.allclose(new_cb.imp_meas_present[new_name]["impact"].at_event, new_imp) + ) + self.assertAlmostEqual( + new_cb.imp_meas_present[new_name]["risk"], + np.sum( + new_imp * cost_ben.imp_meas_present["no measure"]["impact"].frequency + ), + 5, + ) + self.assertAlmostEqual( + new_cb.imp_meas_present[new_name]["cost"][0], + cost_ben.imp_meas_present["Mangroves"]["cost"][0] + + cost_ben.imp_meas_present["Seawall"]["cost"][0], + ) + self.assertAlmostEqual(new_cb.imp_meas_present[new_name]["cost"][1], 1) + self.assertTrue( + np.allclose( + new_cb.imp_meas_present[new_name]["efc"].impact, + new_cb.imp_meas_present[new_name]["impact"].calc_freq_curve().impact, + ) + ) + self.assertAlmostEqual(new_cb.imp_meas_present[new_name]["risk_transf"], 0) + + self.assertTrue( + np.allclose(new_cb.imp_meas_future[new_name]["impact"].at_event, new_imp) + ) + self.assertAlmostEqual( + new_cb.imp_meas_future[new_name]["risk"], + np.sum( + new_imp * cost_ben.imp_meas_future["no measure"]["impact"].frequency + ), + 5, + ) + self.assertAlmostEqual( + new_cb.imp_meas_future[new_name]["cost"][0], + cost_ben.imp_meas_future["Mangroves"]["cost"][0] + + cost_ben.imp_meas_future["Seawall"]["cost"][0], + ) + self.assertAlmostEqual(new_cb.imp_meas_future[new_name]["cost"][1], 1) + self.assertTrue( + np.allclose( + new_cb.imp_meas_future[new_name]["efc"].impact, + new_cb.imp_meas_future[new_name]["impact"].calc_freq_curve().impact, + ) + ) + self.assertAlmostEqual(new_cb.imp_meas_future[new_name]["risk_transf"], 0) self.assertAlmostEqual(new_cb.benefit[new_name], 51781337529.07264, places=3) self.assertAlmostEqual(new_cb.cost_ben_ratio[new_name], 0.19679962474434248) @@ -502,36 +758,62 @@ def test_combine_current_pass(self): entity.check() entity.exposures.ref_year = 2018 cost_ben = CostBenefit() - cost_ben.calc(hazard, entity, future_year=2040, risk_func=risk_aai_agg, - imp_time_depen=None, save_imp=True) - - new_name = 'combine' + cost_ben.calc( + hazard, + entity, + future_year=2040, + risk_func=risk_aai_agg, + imp_time_depen=None, + save_imp=True, + ) + + new_name = "combine" new_color = np.array([0.1, 0.1, 0.1]) - new_cb = cost_ben.combine_measures(['Mangroves', 'Seawall'], new_name, new_color, - entity.disc_rates, imp_time_depen=None, - risk_func=risk_aai_agg) + new_cb = cost_ben.combine_measures( + ["Mangroves", "Seawall"], + new_name, + new_color, + entity.disc_rates, + imp_time_depen=None, + risk_func=risk_aai_agg, + ) self.assertTrue(np.allclose(new_cb.color_rgb[new_name], new_color)) self.assertEqual(len(new_cb.imp_meas_present), 0) - new_imp = cost_ben.imp_meas_future['no measure']['impact'].at_event - \ - cost_ben.imp_meas_future['Mangroves']['impact'].at_event - new_imp += cost_ben.imp_meas_future['no measure']['impact'].at_event - \ - cost_ben.imp_meas_future['Seawall']['impact'].at_event - new_imp = np.maximum(cost_ben.imp_meas_future['no measure']['impact'].at_event - new_imp, - 0) - self.assertTrue(np.allclose(new_cb.imp_meas_future[new_name]['impact'].at_event, new_imp)) - self.assertAlmostEqual( - new_cb.imp_meas_future[new_name]['risk'], - np.sum(new_imp * cost_ben.imp_meas_future['no measure']['impact'].frequency), 5) - self.assertAlmostEqual( - new_cb.imp_meas_future[new_name]['cost'][0], - cost_ben.imp_meas_future['Mangroves']['cost'][0] - + cost_ben.imp_meas_future['Seawall']['cost'][0]) - self.assertAlmostEqual(new_cb.imp_meas_future[new_name]['cost'][1], 1) - self.assertTrue(np.allclose( - new_cb.imp_meas_future[new_name]['efc'].impact, - new_cb.imp_meas_future[new_name]['impact'].calc_freq_curve().impact)) - self.assertAlmostEqual(new_cb.imp_meas_future[new_name]['risk_transf'], 0) + new_imp = ( + cost_ben.imp_meas_future["no measure"]["impact"].at_event + - cost_ben.imp_meas_future["Mangroves"]["impact"].at_event + ) + new_imp += ( + cost_ben.imp_meas_future["no measure"]["impact"].at_event + - cost_ben.imp_meas_future["Seawall"]["impact"].at_event + ) + new_imp = np.maximum( + cost_ben.imp_meas_future["no measure"]["impact"].at_event - new_imp, 0 + ) + self.assertTrue( + np.allclose(new_cb.imp_meas_future[new_name]["impact"].at_event, new_imp) + ) + self.assertAlmostEqual( + new_cb.imp_meas_future[new_name]["risk"], + np.sum( + new_imp * cost_ben.imp_meas_future["no measure"]["impact"].frequency + ), + 5, + ) + self.assertAlmostEqual( + new_cb.imp_meas_future[new_name]["cost"][0], + cost_ben.imp_meas_future["Mangroves"]["cost"][0] + + cost_ben.imp_meas_future["Seawall"]["cost"][0], + ) + self.assertAlmostEqual(new_cb.imp_meas_future[new_name]["cost"][1], 1) + self.assertTrue( + np.allclose( + new_cb.imp_meas_future[new_name]["efc"].impact, + new_cb.imp_meas_future[new_name]["impact"].calc_freq_curve().impact, + ) + ) + self.assertAlmostEqual(new_cb.imp_meas_future[new_name]["risk_transf"], 0) self.assertAlmostEqual(new_cb.benefit[new_name], 51781337529.07264, places=3) self.assertAlmostEqual(new_cb.cost_ben_ratio[new_name], 0.19679962474434248) @@ -542,45 +824,81 @@ def test_apply_transf_current_pass(self): entity.check() entity.exposures.ref_year = 2018 cost_ben = CostBenefit() - cost_ben.calc(hazard, entity, future_year=2040, risk_func=risk_aai_agg, - imp_time_depen=None, save_imp=True) - - new_name = 'combine' + cost_ben.calc( + hazard, + entity, + future_year=2040, + risk_func=risk_aai_agg, + imp_time_depen=None, + save_imp=True, + ) + + new_name = "combine" new_color = np.array([0.1, 0.1, 0.1]) risk_transf = (1.0e7, 15.0e11, 1) - new_cb = cost_ben.combine_measures(['Mangroves', 'Seawall'], new_name, new_color, - entity.disc_rates, imp_time_depen=None, - risk_func=risk_aai_agg) - new_cb.apply_risk_transfer(new_name, risk_transf[0], risk_transf[1], - entity.disc_rates, cost_fix=0, cost_factor=risk_transf[2], - imp_time_depen=1, - risk_func=risk_aai_agg) - - tr_name = 'risk transfer (' + new_name + ')' - new_imp = cost_ben.imp_meas_future['no measure']['impact'].at_event - \ - cost_ben.imp_meas_future['Mangroves']['impact'].at_event - new_imp += cost_ben.imp_meas_future['no measure']['impact'].at_event - \ - cost_ben.imp_meas_future['Seawall']['impact'].at_event - new_imp = np.maximum(cost_ben.imp_meas_future['no measure']['impact'].at_event - new_imp, - 0) + new_cb = cost_ben.combine_measures( + ["Mangroves", "Seawall"], + new_name, + new_color, + entity.disc_rates, + imp_time_depen=None, + risk_func=risk_aai_agg, + ) + new_cb.apply_risk_transfer( + new_name, + risk_transf[0], + risk_transf[1], + entity.disc_rates, + cost_fix=0, + cost_factor=risk_transf[2], + imp_time_depen=1, + risk_func=risk_aai_agg, + ) + + tr_name = "risk transfer (" + new_name + ")" + new_imp = ( + cost_ben.imp_meas_future["no measure"]["impact"].at_event + - cost_ben.imp_meas_future["Mangroves"]["impact"].at_event + ) + new_imp += ( + cost_ben.imp_meas_future["no measure"]["impact"].at_event + - cost_ben.imp_meas_future["Seawall"]["impact"].at_event + ) + new_imp = np.maximum( + cost_ben.imp_meas_future["no measure"]["impact"].at_event - new_imp, 0 + ) imp_layer = np.minimum(np.maximum(new_imp - risk_transf[0], 0), risk_transf[1]) risk_transfer = np.sum( - imp_layer * cost_ben.imp_meas_future['no measure']['impact'].frequency) + imp_layer * cost_ben.imp_meas_future["no measure"]["impact"].frequency + ) new_imp = np.maximum(new_imp - imp_layer, 0) self.assertTrue(np.allclose(new_cb.color_rgb[new_name], new_color)) self.assertEqual(len(new_cb.imp_meas_present), 0) - self.assertTrue(np.allclose(new_cb.imp_meas_future[tr_name]['impact'].at_event, new_imp)) + self.assertTrue( + np.allclose(new_cb.imp_meas_future[tr_name]["impact"].at_event, new_imp) + ) self.assertAlmostEqual( - new_cb.imp_meas_future[tr_name]['risk'], - np.sum(new_imp * cost_ben.imp_meas_future['no measure']['impact'].frequency), 5) + new_cb.imp_meas_future[tr_name]["risk"], + np.sum( + new_imp * cost_ben.imp_meas_future["no measure"]["impact"].frequency + ), + 5, + ) self.assertAlmostEqual( new_cb.cost_ben_ratio[tr_name] * new_cb.benefit[tr_name], - 32106013195.316242, places=3) - self.assertTrue(np.allclose( - new_cb.imp_meas_future[tr_name]['efc'].impact, - new_cb.imp_meas_future[tr_name]['impact'].calc_freq_curve().impact)) - self.assertAlmostEqual(new_cb.imp_meas_future[tr_name]['risk_transf'], risk_transfer) + 32106013195.316242, + places=3, + ) + self.assertTrue( + np.allclose( + new_cb.imp_meas_future[tr_name]["efc"].impact, + new_cb.imp_meas_future[tr_name]["impact"].calc_freq_curve().impact, + ) + ) + self.assertAlmostEqual( + new_cb.imp_meas_future[tr_name]["risk_transf"], risk_transfer + ) # benefit = impact layer self.assertAlmostEqual(new_cb.benefit[tr_name], 32106013195.316242, 4) self.assertAlmostEqual(new_cb.cost_ben_ratio[tr_name], 1) @@ -592,43 +910,81 @@ def test_apply_transf_cost_fact_pass(self): entity.check() entity.exposures.ref_year = 2018 cost_ben = CostBenefit() - cost_ben.calc(hazard, entity, future_year=2040, risk_func=risk_aai_agg, - imp_time_depen=None, save_imp=True) - - new_name = 'combine' + cost_ben.calc( + hazard, + entity, + future_year=2040, + risk_func=risk_aai_agg, + imp_time_depen=None, + save_imp=True, + ) + + new_name = "combine" new_color = np.array([0.1, 0.1, 0.1]) risk_transf = (1.0e7, 15.0e11, 2) - new_cb = cost_ben.combine_measures(['Mangroves', 'Seawall'], new_name, new_color, - entity.disc_rates, imp_time_depen=None, - risk_func=risk_aai_agg) - new_cb.apply_risk_transfer(new_name, risk_transf[0], risk_transf[1], - entity.disc_rates, cost_fix=0, cost_factor=risk_transf[2], - imp_time_depen=1, risk_func=risk_aai_agg) - - tr_name = 'risk transfer (' + new_name + ')' - new_imp = cost_ben.imp_meas_future['no measure']['impact'].at_event - \ - cost_ben.imp_meas_future['Mangroves']['impact'].at_event - new_imp += cost_ben.imp_meas_future['no measure']['impact'].at_event - \ - cost_ben.imp_meas_future['Seawall']['impact'].at_event - new_imp = np.maximum(cost_ben.imp_meas_future['no measure']['impact'].at_event - new_imp, - 0) + new_cb = cost_ben.combine_measures( + ["Mangroves", "Seawall"], + new_name, + new_color, + entity.disc_rates, + imp_time_depen=None, + risk_func=risk_aai_agg, + ) + new_cb.apply_risk_transfer( + new_name, + risk_transf[0], + risk_transf[1], + entity.disc_rates, + cost_fix=0, + cost_factor=risk_transf[2], + imp_time_depen=1, + risk_func=risk_aai_agg, + ) + + tr_name = "risk transfer (" + new_name + ")" + new_imp = ( + cost_ben.imp_meas_future["no measure"]["impact"].at_event + - cost_ben.imp_meas_future["Mangroves"]["impact"].at_event + ) + new_imp += ( + cost_ben.imp_meas_future["no measure"]["impact"].at_event + - cost_ben.imp_meas_future["Seawall"]["impact"].at_event + ) + new_imp = np.maximum( + cost_ben.imp_meas_future["no measure"]["impact"].at_event - new_imp, 0 + ) imp_layer = np.minimum(np.maximum(new_imp - risk_transf[0], 0), risk_transf[1]) risk_transfer = np.sum( - imp_layer * cost_ben.imp_meas_future['no measure']['impact'].frequency) + imp_layer * cost_ben.imp_meas_future["no measure"]["impact"].frequency + ) new_imp = np.maximum(new_imp - imp_layer, 0) self.assertTrue(np.allclose(new_cb.color_rgb[new_name], new_color)) self.assertEqual(len(new_cb.imp_meas_present), 0) - self.assertTrue(np.allclose(new_cb.imp_meas_future[tr_name]['impact'].at_event, new_imp)) + self.assertTrue( + np.allclose(new_cb.imp_meas_future[tr_name]["impact"].at_event, new_imp) + ) self.assertAlmostEqual( - new_cb.imp_meas_future[tr_name]['risk'], - np.sum(new_imp * cost_ben.imp_meas_future['no measure']['impact'].frequency), 5) - self.assertAlmostEqual(new_cb.cost_ben_ratio[tr_name] * new_cb.benefit[tr_name], - risk_transf[2] * 32106013195.316242, 4) + new_cb.imp_meas_future[tr_name]["risk"], + np.sum( + new_imp * cost_ben.imp_meas_future["no measure"]["impact"].frequency + ), + 5, + ) + self.assertAlmostEqual( + new_cb.cost_ben_ratio[tr_name] * new_cb.benefit[tr_name], + risk_transf[2] * 32106013195.316242, + 4, + ) self.assertTrue( - np.allclose(new_cb.imp_meas_future[tr_name]['efc'].impact, - new_cb.imp_meas_future[tr_name]['impact'].calc_freq_curve().impact)) - self.assertAlmostEqual(new_cb.imp_meas_future[tr_name]['risk_transf'], risk_transfer) + np.allclose( + new_cb.imp_meas_future[tr_name]["efc"].impact, + new_cb.imp_meas_future[tr_name]["impact"].calc_freq_curve().impact, + ) + ) + self.assertAlmostEqual( + new_cb.imp_meas_future[tr_name]["risk_transf"], risk_transfer + ) # benefit = impact layer self.assertAlmostEqual(new_cb.benefit[tr_name], 32106013195.316242, 4) self.assertAlmostEqual(new_cb.cost_ben_ratio[tr_name], risk_transf[2]) @@ -644,47 +1000,91 @@ def test_apply_transf_future_pass(self): fut_ent.exposures.ref_year = 2040 cost_ben = CostBenefit() - cost_ben.calc(hazard, entity, ent_future=fut_ent, risk_func=risk_aai_agg, - imp_time_depen=None, save_imp=True) - - new_name = 'combine' + cost_ben.calc( + hazard, + entity, + ent_future=fut_ent, + risk_func=risk_aai_agg, + imp_time_depen=None, + save_imp=True, + ) + + new_name = "combine" new_color = np.array([0.1, 0.1, 0.1]) risk_transf = (1.0e7, 15.0e11, 1) - new_cb = cost_ben.combine_measures(['Mangroves', 'Seawall'], new_name, new_color, - entity.disc_rates, imp_time_depen=None, - risk_func=risk_aai_agg) - new_cb.apply_risk_transfer(new_name, risk_transf[0], risk_transf[1], - entity.disc_rates, cost_fix=0, cost_factor=risk_transf[2], - imp_time_depen=1, risk_func=risk_aai_agg) - - tr_name = 'risk transfer (' + new_name + ')' - new_imp = cost_ben.imp_meas_future['no measure']['impact'].at_event - \ - cost_ben.imp_meas_future['Mangroves']['impact'].at_event - new_imp += cost_ben.imp_meas_future['no measure']['impact'].at_event - \ - cost_ben.imp_meas_future['Seawall']['impact'].at_event - new_imp = np.maximum(cost_ben.imp_meas_future['no measure']['impact'].at_event - new_imp, - 0) + new_cb = cost_ben.combine_measures( + ["Mangroves", "Seawall"], + new_name, + new_color, + entity.disc_rates, + imp_time_depen=None, + risk_func=risk_aai_agg, + ) + new_cb.apply_risk_transfer( + new_name, + risk_transf[0], + risk_transf[1], + entity.disc_rates, + cost_fix=0, + cost_factor=risk_transf[2], + imp_time_depen=1, + risk_func=risk_aai_agg, + ) + + tr_name = "risk transfer (" + new_name + ")" + new_imp = ( + cost_ben.imp_meas_future["no measure"]["impact"].at_event + - cost_ben.imp_meas_future["Mangroves"]["impact"].at_event + ) + new_imp += ( + cost_ben.imp_meas_future["no measure"]["impact"].at_event + - cost_ben.imp_meas_future["Seawall"]["impact"].at_event + ) + new_imp = np.maximum( + cost_ben.imp_meas_future["no measure"]["impact"].at_event - new_imp, 0 + ) imp_layer = np.minimum(np.maximum(new_imp - risk_transf[0], 0), risk_transf[1]) risk_transfer = np.sum( - imp_layer * cost_ben.imp_meas_future['no measure']['impact'].frequency) + imp_layer * cost_ben.imp_meas_future["no measure"]["impact"].frequency + ) new_imp = np.maximum(new_imp - imp_layer, 0) self.assertTrue(np.allclose(new_cb.color_rgb[new_name], new_color)) self.assertEqual(len(new_cb.imp_meas_present), 3) - self.assertTrue(np.allclose(new_cb.imp_meas_future[tr_name]['impact'].at_event, new_imp)) - self.assertTrue(np.allclose(new_cb.imp_meas_present[tr_name]['impact'].at_event, new_imp)) + self.assertTrue( + np.allclose(new_cb.imp_meas_future[tr_name]["impact"].at_event, new_imp) + ) + self.assertTrue( + np.allclose(new_cb.imp_meas_present[tr_name]["impact"].at_event, new_imp) + ) self.assertAlmostEqual( - new_cb.imp_meas_future[tr_name]['risk'], - np.sum(new_imp * cost_ben.imp_meas_future['no measure']['impact'].frequency), 5) + new_cb.imp_meas_future[tr_name]["risk"], + np.sum( + new_imp * cost_ben.imp_meas_future["no measure"]["impact"].frequency + ), + 5, + ) self.assertAlmostEqual( - new_cb.imp_meas_present[tr_name]['risk'], - np.sum(new_imp * cost_ben.imp_meas_future['no measure']['impact'].frequency), 5) - self.assertAlmostEqual(new_cb.cost_ben_ratio[tr_name] * new_cb.benefit[tr_name], - 69715165679.7042, places=3) + new_cb.imp_meas_present[tr_name]["risk"], + np.sum( + new_imp * cost_ben.imp_meas_future["no measure"]["impact"].frequency + ), + 5, + ) + self.assertAlmostEqual( + new_cb.cost_ben_ratio[tr_name] * new_cb.benefit[tr_name], + 69715165679.7042, + places=3, + ) self.assertTrue( - np.allclose(new_cb.imp_meas_future[tr_name]['efc'].impact, - new_cb.imp_meas_future[tr_name]['impact'].calc_freq_curve().impact)) - self.assertAlmostEqual(new_cb.imp_meas_future[tr_name]['risk_transf'], risk_transfer) + np.allclose( + new_cb.imp_meas_future[tr_name]["efc"].impact, + new_cb.imp_meas_future[tr_name]["impact"].calc_freq_curve().impact, + ) + ) + self.assertAlmostEqual( + new_cb.imp_meas_future[tr_name]["risk_transf"], risk_transfer + ) # benefit = impact layer self.assertAlmostEqual(new_cb.benefit[tr_name], 69715165679.7042, 4) self.assertAlmostEqual(new_cb.cost_ben_ratio[tr_name], 1) @@ -696,10 +1096,16 @@ def test_remove_measure(self): entity.check() entity.exposures.ref_year = 2018 cost_ben = CostBenefit() - cost_ben.calc(hazard, entity, future_year=2040, risk_func=risk_aai_agg, - imp_time_depen=None, save_imp=True) - - to_remove = 'Mangroves' + cost_ben.calc( + hazard, + entity, + future_year=2040, + risk_func=risk_aai_agg, + imp_time_depen=None, + save_imp=True, + ) + + to_remove = "Mangroves" self.assertTrue(to_remove in cost_ben.benefit.keys()) cost_ben.remove_measure(to_remove) self.assertTrue(to_remove not in cost_ben.color_rgb.keys()) @@ -713,6 +1119,7 @@ def test_remove_measure(self): self.assertEqual(len(cost_ben.cost_ben_ratio), 3) self.assertEqual(len(cost_ben.benefit), 3) + class TestCalc(unittest.TestCase): """Test calc""" @@ -721,7 +1128,7 @@ def test_calc_change_pass(self): # present hazard = Hazard.from_hdf5(HAZ_TEST_TC) entity = Entity.from_excel(ENT_DEMO_TODAY) - entity.exposures.gdf.rename(columns={'impf_': 'impf_TC'}, inplace=True) + entity.exposures.gdf.rename(columns={"impf_": "impf_TC"}, inplace=True) entity.check() entity.exposures.ref_year = 2018 @@ -738,39 +1145,79 @@ def test_calc_change_pass(self): self.assertEqual(cost_ben.present_year, 2018) self.assertEqual(cost_ben.future_year, 2040) - self.assertAlmostEqual(cost_ben.tot_climate_risk, 5.768659152882021e+11, places=3) - - self.assertAlmostEqual(cost_ben.imp_meas_present['no measure']['risk'], - 6.51220115756442e+09, places=3) - self.assertAlmostEqual(cost_ben.imp_meas_present['Mangroves']['risk'], - 4.850407096284983e+09, places=3) - self.assertAlmostEqual(cost_ben.imp_meas_present['Beach nourishment']['risk'], - 5.188921355413834e+09, places=3) - self.assertAlmostEqual(cost_ben.imp_meas_present['Seawall']['risk'], - 4.736400526119911e+09, places=3) - self.assertAlmostEqual(cost_ben.imp_meas_present['Building code']['risk'], - 4.884150868173321e+09, places=3) - - self.assertAlmostEqual(cost_ben.imp_meas_future['no measure']['risk'], - 5.9506659786664024e+10, places=3) - self.assertAlmostEqual(cost_ben.imp_meas_future['Mangroves']['risk'], - 4.826231151473135e+10, places=3) - self.assertAlmostEqual(cost_ben.imp_meas_future['Beach nourishment']['risk'], - 5.0647250923231674e+10, places=3) - self.assertAlmostEqual(cost_ben.imp_meas_future['Seawall']['risk'], - 21089567135.7345, places=3) - self.assertAlmostEqual(cost_ben.imp_meas_future['Building code']['risk'], - 4.462999483999791e+10, places=3) - - self.assertAlmostEqual(cost_ben.benefit['Mangroves'], 113345027690.81276, places=2) - self.assertAlmostEqual(cost_ben.benefit['Beach nourishment'], 89444869971.53653, places=2) - self.assertAlmostEqual(cost_ben.benefit['Seawall'], 347977469896.1333, places=2) - self.assertAlmostEqual(cost_ben.benefit['Building code'], 144216478822.05154, places=2) - - self.assertAlmostEqual(cost_ben.cost_ben_ratio['Mangroves'], 0.011573232523528404) - self.assertAlmostEqual(cost_ben.cost_ben_ratio['Beach nourishment'], 0.01931916274851638) - self.assertAlmostEqual(cost_ben.cost_ben_ratio['Seawall'], 0.025515385913577368) - self.assertAlmostEqual(cost_ben.cost_ben_ratio['Building code'], 0.06379298728650741) + self.assertAlmostEqual( + cost_ben.tot_climate_risk, 5.768659152882021e11, places=3 + ) + + self.assertAlmostEqual( + cost_ben.imp_meas_present["no measure"]["risk"], + 6.51220115756442e09, + places=3, + ) + self.assertAlmostEqual( + cost_ben.imp_meas_present["Mangroves"]["risk"], + 4.850407096284983e09, + places=3, + ) + self.assertAlmostEqual( + cost_ben.imp_meas_present["Beach nourishment"]["risk"], + 5.188921355413834e09, + places=3, + ) + self.assertAlmostEqual( + cost_ben.imp_meas_present["Seawall"]["risk"], 4.736400526119911e09, places=3 + ) + self.assertAlmostEqual( + cost_ben.imp_meas_present["Building code"]["risk"], + 4.884150868173321e09, + places=3, + ) + + self.assertAlmostEqual( + cost_ben.imp_meas_future["no measure"]["risk"], + 5.9506659786664024e10, + places=3, + ) + self.assertAlmostEqual( + cost_ben.imp_meas_future["Mangroves"]["risk"], + 4.826231151473135e10, + places=3, + ) + self.assertAlmostEqual( + cost_ben.imp_meas_future["Beach nourishment"]["risk"], + 5.0647250923231674e10, + places=3, + ) + self.assertAlmostEqual( + cost_ben.imp_meas_future["Seawall"]["risk"], 21089567135.7345, places=3 + ) + self.assertAlmostEqual( + cost_ben.imp_meas_future["Building code"]["risk"], + 4.462999483999791e10, + places=3, + ) + + self.assertAlmostEqual( + cost_ben.benefit["Mangroves"], 113345027690.81276, places=2 + ) + self.assertAlmostEqual( + cost_ben.benefit["Beach nourishment"], 89444869971.53653, places=2 + ) + self.assertAlmostEqual(cost_ben.benefit["Seawall"], 347977469896.1333, places=2) + self.assertAlmostEqual( + cost_ben.benefit["Building code"], 144216478822.05154, places=2 + ) + + self.assertAlmostEqual( + cost_ben.cost_ben_ratio["Mangroves"], 0.011573232523528404 + ) + self.assertAlmostEqual( + cost_ben.cost_ben_ratio["Beach nourishment"], 0.01931916274851638 + ) + self.assertAlmostEqual(cost_ben.cost_ben_ratio["Seawall"], 0.025515385913577368) + self.assertAlmostEqual( + cost_ben.cost_ben_ratio["Building code"], 0.06379298728650741 + ) self.assertAlmostEqual(cost_ben.tot_climate_risk, 576865915288.2021, places=3) @@ -788,18 +1235,34 @@ def test_calc_no_change_pass(self): self.assertEqual(cost_ben.present_year, 2018) self.assertEqual(cost_ben.future_year, 2040) - self.assertAlmostEqual(cost_ben.cost_ben_ratio['Mangroves'], 0.04230714690616641) - self.assertAlmostEqual(cost_ben.cost_ben_ratio['Beach nourishment'], 0.06998836431681373) - self.assertAlmostEqual(cost_ben.cost_ben_ratio['Seawall'], 0.2679741183248266) - self.assertAlmostEqual(cost_ben.cost_ben_ratio['Building code'], 0.30286828677985717) + self.assertAlmostEqual( + cost_ben.cost_ben_ratio["Mangroves"], 0.04230714690616641 + ) + self.assertAlmostEqual( + cost_ben.cost_ben_ratio["Beach nourishment"], 0.06998836431681373 + ) + self.assertAlmostEqual(cost_ben.cost_ben_ratio["Seawall"], 0.2679741183248266) + self.assertAlmostEqual( + cost_ben.cost_ben_ratio["Building code"], 0.30286828677985717 + ) + + self.assertAlmostEqual( + cost_ben.benefit["Mangroves"], 3.100583368954022e10, places=3 + ) + self.assertAlmostEqual( + cost_ben.benefit["Beach nourishment"], 2.468981832719974e10, places=3 + ) + self.assertAlmostEqual( + cost_ben.benefit["Seawall"], 3.3132973770502796e10, places=3 + ) + self.assertAlmostEqual( + cost_ben.benefit["Building code"], 3.0376240767284798e10, places=3 + ) - self.assertAlmostEqual(cost_ben.benefit['Mangroves'], 3.100583368954022e+10, places=3) - self.assertAlmostEqual(cost_ben.benefit['Beach nourishment'], - 2.468981832719974e+10, places=3) - self.assertAlmostEqual(cost_ben.benefit['Seawall'], 3.3132973770502796e+10, places=3) - self.assertAlmostEqual(cost_ben.benefit['Building code'], 3.0376240767284798e+10, places=3) + self.assertAlmostEqual( + cost_ben.tot_climate_risk, 1.2150496306913972e11, places=3 + ) - self.assertAlmostEqual(cost_ben.tot_climate_risk, 1.2150496306913972e+11, places=3) class TestRiskFuncs(unittest.TestCase): """Test risk functions definitions""" @@ -815,8 +1278,8 @@ def test_risk_aai_agg_pass(self): """Test risk_aai_agg""" impact = self.test_impact() risk = risk_aai_agg(impact) - self.assertAlmostEqual(6.512201157564421e+09, risk, 5) - self.assertTrue(np.isclose(6.512201157564421e+09, risk)) + self.assertAlmostEqual(6.512201157564421e09, risk, 5) + self.assertTrue(np.isclose(6.512201157564421e09, risk)) def test_risk_rp_100_pass(self): """Test risk_rp_100""" @@ -834,6 +1297,7 @@ def test_risk_rp_200_pass(self): risk = risk_rp_250(impact) self.assertAlmostEqual(exc_freq.impact[0], risk) + # Execute Tests if __name__ == "__main__": TESTS = unittest.TestLoader().loadTestsFromTestCase(TestRiskFuncs) diff --git a/climada/engine/test/test_forecast.py b/climada/engine/test/test_forecast.py index ef249ae20..8e80cb4fe 100644 --- a/climada/engine/test/test_forecast.py +++ b/climada/engine/test/test_forecast.py @@ -19,58 +19,62 @@ Test Forecast class """ -import unittest import datetime as dt -import numpy as np +import unittest +from pathlib import Path + +import fiona import geopandas as gpd import matplotlib.pyplot as plt -import fiona +import numpy as np from cartopy.io import shapereader -from pathlib import Path from climada import CONFIG -from climada.hazard.storm_europe import StormEurope -from climada.entity.exposures.base import Exposures, INDICATOR_IMPF +from climada.engine.forecast import FORECAST_PLOT_DIR, Forecast from climada.entity import ImpactFuncSet +from climada.entity.exposures.base import INDICATOR_IMPF, Exposures from climada.entity.impact_funcs.storm_europe import ImpfStormEurope -from climada.engine.forecast import Forecast, FORECAST_PLOT_DIR +from climada.hazard.storm_europe import StormEurope from climada.util.constants import WS_DEMO_NC HAZ_DIR = CONFIG.hazard.test_data.dir() + class TestCalc(unittest.TestCase): """Test calc and propety functions from the Forecast class""" def test_Forecast_calc_properties(self): """Test calc and propety functions from the Forecast class""" - #hazard + # hazard haz = StormEurope.from_cosmoe_file( - HAZ_DIR.joinpath('storm_europe_cosmoe_forecast_vmax_testfile.nc'), - run_datetime=dt.datetime(2018,1,1), - event_date=dt.datetime(2018,1,3)) - #exposure + HAZ_DIR.joinpath("storm_europe_cosmoe_forecast_vmax_testfile.nc"), + run_datetime=dt.datetime(2018, 1, 1), + event_date=dt.datetime(2018, 1, 3), + ) + # exposure data = {} - data['latitude'] = haz.centroids.lat - data['longitude'] = haz.centroids.lon - data['value'] = np.ones_like(data['latitude']) * 100000 - data['deductible'] = np.zeros_like(data['latitude']) - data[INDICATOR_IMPF + 'WS'] = np.ones_like(data['latitude']) - data['region_id'] = np.ones_like(data['latitude'],dtype=int) * 756 + data["latitude"] = haz.centroids.lat + data["longitude"] = haz.centroids.lon + data["value"] = np.ones_like(data["latitude"]) * 100000 + data["deductible"] = np.zeros_like(data["latitude"]) + data[INDICATOR_IMPF + "WS"] = np.ones_like(data["latitude"]) + data["region_id"] = np.ones_like(data["latitude"], dtype=int) * 756 expo = Exposures(gpd.GeoDataFrame(data=data)) - #vulnerability - #generate vulnerability + # vulnerability + # generate vulnerability impact_function = ImpfStormEurope.from_welker() impact_function_set = ImpactFuncSet([impact_function]) - #create and calculate Forecast - forecast = Forecast({dt.datetime(2018,1,1): haz}, expo, impact_function_set) + # create and calculate Forecast + forecast = Forecast({dt.datetime(2018, 1, 1): haz}, expo, impact_function_set) forecast.calc() # test self.assertEqual(len(forecast.run_datetime), 1) - self.assertEqual(forecast.run_datetime[0], dt.datetime(2018,1,1)) - self.assertEqual(forecast.event_date, dt.datetime(2018,1,3)) - self.assertEqual(forecast.lead_time().days,2) - self.assertEqual(forecast.summary_str(), - 'WS_NWP_run2018010100_event20180103_Switzerland') + self.assertEqual(forecast.run_datetime[0], dt.datetime(2018, 1, 1)) + self.assertEqual(forecast.event_date, dt.datetime(2018, 1, 3)) + self.assertEqual(forecast.lead_time().days, 2) + self.assertEqual( + forecast.summary_str(), "WS_NWP_run2018010100_event20180103_Switzerland" + ) self.assertAlmostEqual(forecast.ai_agg(), 26.347, places=1) self.assertAlmostEqual(forecast.ei_exp()[1], 7.941, places=1) self.assertEqual(len(forecast.hazard), 1) @@ -80,23 +84,23 @@ def test_Forecast_calc_properties(self): def test_Forecast_init_raise(self): """Test calc and propety functions from the Forecast class""" - #hazard with several event dates + # hazard with several event dates storms = StormEurope.from_footprints(WS_DEMO_NC) - #exposure + # exposure data = {} - data['latitude'] = np.array([1, 2, 3]) - data['longitude'] = np.array([1, 2, 3]) - data['value'] = np.ones_like(data['latitude']) * 100000 - data['deductible'] = np.zeros_like(data['latitude']) - data[INDICATOR_IMPF + 'WS'] = np.ones_like(data['latitude']) - data['region_id'] = np.ones_like(data['latitude'],dtype=int) * 756 + data["latitude"] = np.array([1, 2, 3]) + data["longitude"] = np.array([1, 2, 3]) + data["value"] = np.ones_like(data["latitude"]) * 100000 + data["deductible"] = np.zeros_like(data["latitude"]) + data[INDICATOR_IMPF + "WS"] = np.ones_like(data["latitude"]) + data["region_id"] = np.ones_like(data["latitude"], dtype=int) * 756 expo = Exposures(gpd.GeoDataFrame(data=data)) - #vulnerability - #generate vulnerability + # vulnerability + # generate vulnerability impact_function_set = ImpactFuncSet() - #create and calculate Forecast + # create and calculate Forecast with self.assertRaises(ValueError): - Forecast({dt.datetime(2018,1,1): storms}, expo, impact_function_set) + Forecast({dt.datetime(2018, 1, 1): storms}, expo, impact_function_set) class TestPlot(unittest.TestCase): @@ -105,149 +109,162 @@ class TestPlot(unittest.TestCase): def test_Forecast_plot(self): """Test cplotting functions from the Forecast class""" ## given a forecast based on hazard exposure and vulnerability - #hazard + # hazard haz1 = StormEurope.from_cosmoe_file( - HAZ_DIR.joinpath('storm_europe_cosmoe_forecast_vmax_testfile.nc'), - run_datetime=dt.datetime(2018,1,1), - event_date=dt.datetime(2018,1,3)) + HAZ_DIR.joinpath("storm_europe_cosmoe_forecast_vmax_testfile.nc"), + run_datetime=dt.datetime(2018, 1, 1), + event_date=dt.datetime(2018, 1, 3), + ) haz1.centroids.gdf.geometry = haz1.centroids.gdf.geometry.translate(-1.2, 0.6) haz2 = StormEurope.from_cosmoe_file( - HAZ_DIR.joinpath('storm_europe_cosmoe_forecast_vmax_testfile.nc'), - run_datetime=dt.datetime(2018,1,1), - event_date=dt.datetime(2018,1,3)) + HAZ_DIR.joinpath("storm_europe_cosmoe_forecast_vmax_testfile.nc"), + run_datetime=dt.datetime(2018, 1, 1), + event_date=dt.datetime(2018, 1, 3), + ) haz2.centroids.gdf.geometry = haz2.centroids.gdf.geometry.translate(-1.2, 0.6) - #exposure + # exposure data = {} - data['latitude'] = haz1.centroids.lat - data['longitude'] = haz1.centroids.lon - data['value'] = np.ones_like(data['latitude']) * 100000 - data['deductible'] = np.zeros_like(data['latitude']) - data[INDICATOR_IMPF + 'WS'] = np.ones_like(data['latitude']) - data['region_id'] = np.ones_like(data['latitude'],dtype=int) * 756 + data["latitude"] = haz1.centroids.lat + data["longitude"] = haz1.centroids.lon + data["value"] = np.ones_like(data["latitude"]) * 100000 + data["deductible"] = np.zeros_like(data["latitude"]) + data[INDICATOR_IMPF + "WS"] = np.ones_like(data["latitude"]) + data["region_id"] = np.ones_like(data["latitude"], dtype=int) * 756 expo = Exposures(gpd.GeoDataFrame(data=data)) - #vulnerability - #generate vulnerability + # vulnerability + # generate vulnerability impact_function = ImpfStormEurope.from_welker() impact_function_set = ImpactFuncSet([impact_function]) - #create and calculate Forecast - forecast = Forecast({dt.datetime(2018,1,2): haz1, - dt.datetime(2017,12,31): haz2}, - expo, - impact_function_set) + # create and calculate Forecast + forecast = Forecast( + {dt.datetime(2018, 1, 2): haz1, dt.datetime(2017, 12, 31): haz2}, + expo, + impact_function_set, + ) forecast.calc() - #create a file containing the polygons of Swiss cantons using natural earth - cantons_file = CONFIG.local_data.save_dir.dir() / 'CHE_cantons.shp' - adm1_shape_file = shapereader.natural_earth(resolution='10m', - category='cultural', - name='admin_1_states_provinces') + # create a file containing the polygons of Swiss cantons using natural earth + cantons_file = CONFIG.local_data.save_dir.dir() / "CHE_cantons.shp" + adm1_shape_file = shapereader.natural_earth( + resolution="10m", category="cultural", name="admin_1_states_provinces" + ) if not cantons_file.exists(): - with fiona.open(adm1_shape_file, 'r') as source: - with fiona.open( - cantons_file, 'w', - **source.meta) as sink: + with fiona.open(adm1_shape_file, "r") as source: + with fiona.open(cantons_file, "w", **source.meta) as sink: for f in source: - if f['properties']['adm0_a3'] == 'CHE': + if f["properties"]["adm0_a3"] == "CHE": sink.write(f) ## test plotting functions # should save plot without failing - forecast.plot_imp_map(run_datetime=dt.datetime(2017,12,31), - explain_str='test text', - polygon_file=str(cantons_file), - save_fig=True, close_fig=True) - map_file_name = (forecast.summary_str(dt.datetime(2017,12,31)) + - '_impact_map' + - '.jpeg') + forecast.plot_imp_map( + run_datetime=dt.datetime(2017, 12, 31), + explain_str="test text", + polygon_file=str(cantons_file), + save_fig=True, + close_fig=True, + ) + map_file_name = ( + forecast.summary_str(dt.datetime(2017, 12, 31)) + "_impact_map" + ".jpeg" + ) map_file_name_full = Path(FORECAST_PLOT_DIR) / map_file_name map_file_name_full.absolute().unlink(missing_ok=False) - #should contain title strings - ax = forecast.plot_hist(run_datetime=dt.datetime(2017,12,31), - explain_str='test text', - save_fig=False, close_fig=False) + # should contain title strings + ax = forecast.plot_hist( + run_datetime=dt.datetime(2017, 12, 31), + explain_str="test text", + save_fig=False, + close_fig=False, + ) title_artists = ax.get_figure().get_children() title_texts = [x.get_text() for x in title_artists if isinstance(x, plt.Text)] - self.assertIn('test text', title_texts) - self.assertIn('Wed 03 Jan 2018 00-24UTC', title_texts) - self.assertIn('31.12.2017 00UTC +3d', title_texts) - #should contain average impact in axes + self.assertIn("test text", title_texts) + self.assertIn("Wed 03 Jan 2018 00-24UTC", title_texts) + self.assertIn("31.12.2017 00UTC +3d", title_texts) + # should contain average impact in axes artists = ax.get_children() texts = [x.get_text() for x in artists if type(x) == plt.Text] - self.assertIn('mean impact:\n 26 USD', texts) + self.assertIn("mean impact:\n 26 USD", texts) ax.get_figure().clf() - #should contain title strings - ax = forecast.plot_exceedence_prob(run_datetime=dt.datetime(2017,12,31), - threshold=5000, explain_str='test text exceedence', - save_fig=False, close_fig=False)[0][0] + # should contain title strings + ax = forecast.plot_exceedence_prob( + run_datetime=dt.datetime(2017, 12, 31), + threshold=5000, + explain_str="test text exceedence", + save_fig=False, + close_fig=False, + )[0][0] title_artists = ax.get_figure().get_children() title_texts = [x.get_text() for x in title_artists if isinstance(x, plt.Text)] - self.assertIn('test text exceedence', title_texts) - self.assertIn('Wed 03 Jan 2018 00-24UTC', title_texts) - self.assertIn('31.12.2017 00UTC +3d', title_texts) + self.assertIn("test text exceedence", title_texts) + self.assertIn("Wed 03 Jan 2018 00-24UTC", title_texts) + self.assertIn("31.12.2017 00UTC +3d", title_texts) ax.get_figure().clf() - forecast.plot_warn_map(str(cantons_file), - decision_level = 'polygon', - thresholds=[100000,500000, - 1000000,5000000], - probability_aggregation='mean', - area_aggregation='sum', - title="Building damage warning", - explain_text="warn level based on aggregated damages", - save_fig=False, - close_fig=True) - forecast.plot_warn_map(str(cantons_file), - decision_level = 'exposure_point', - thresholds=[1,1000, - 5000,5000000], - probability_aggregation=0.2, - area_aggregation=0.2, - title="Building damage warning", - explain_text="warn level based on aggregated damages", - run_datetime=dt.datetime(2017,12,31), - save_fig=False, - close_fig=True) + forecast.plot_warn_map( + str(cantons_file), + decision_level="polygon", + thresholds=[100000, 500000, 1000000, 5000000], + probability_aggregation="mean", + area_aggregation="sum", + title="Building damage warning", + explain_text="warn level based on aggregated damages", + save_fig=False, + close_fig=True, + ) + forecast.plot_warn_map( + str(cantons_file), + decision_level="exposure_point", + thresholds=[1, 1000, 5000, 5000000], + probability_aggregation=0.2, + area_aggregation=0.2, + title="Building damage warning", + explain_text="warn level based on aggregated damages", + run_datetime=dt.datetime(2017, 12, 31), + save_fig=False, + close_fig=True, + ) forecast.plot_hexbin_ei_exposure() plt.close() # should fail because of invalid decision_level with self.assertRaises(ValueError) as cm: - forecast.plot_warn_map(str(cantons_file), - decision_level='test_fail', - probability_aggregation=0.2, - area_aggregation=0.2, - title="Building damage warning", - explain_text="warn level based on aggregated damages", - save_fig=False, - close_fig=True) + forecast.plot_warn_map( + str(cantons_file), + decision_level="test_fail", + probability_aggregation=0.2, + area_aggregation=0.2, + title="Building damage warning", + explain_text="warn level based on aggregated damages", + save_fig=False, + close_fig=True, + ) plt.close() - self.assertIn( - "Parameter decision_level", str(cm.exception) - ) + self.assertIn("Parameter decision_level", str(cm.exception)) # should fail because of invalid probability_aggregation with self.assertRaises(ValueError) as cm: - forecast.plot_warn_map(str(cantons_file), - decision_level='exposure_point', - probability_aggregation='test_fail', - area_aggregation=0.2, - title="Building damage warning", - explain_text="warn level based on aggregated damages", - save_fig=False, - close_fig=True) + forecast.plot_warn_map( + str(cantons_file), + decision_level="exposure_point", + probability_aggregation="test_fail", + area_aggregation=0.2, + title="Building damage warning", + explain_text="warn level based on aggregated damages", + save_fig=False, + close_fig=True, + ) plt.close() - self.assertIn( - "Parameter probability_aggregation", str(cm.exception) - ) + self.assertIn("Parameter probability_aggregation", str(cm.exception)) # should fail because of invalid area_aggregation with self.assertRaises(ValueError) as cm: - forecast.plot_warn_map(str(cantons_file), - decision_level='exposure_point', - probability_aggregation=0.2, - area_aggregation='test_fail', - title="Building damage warning", - explain_text="warn level based on aggregated damages", - save_fig=False, - close_fig=True) + forecast.plot_warn_map( + str(cantons_file), + decision_level="exposure_point", + probability_aggregation=0.2, + area_aggregation="test_fail", + title="Building damage warning", + explain_text="warn level based on aggregated damages", + save_fig=False, + close_fig=True, + ) plt.close() - self.assertIn( - "Parameter area_aggregation", str(cm.exception) - ) + self.assertIn("Parameter area_aggregation", str(cm.exception)) # Execute Tests diff --git a/climada/engine/test/test_impact.py b/climada/engine/test/test_impact.py index 7b7d6fc9b..6c901f989 100644 --- a/climada/engine/test/test_impact.py +++ b/climada/engine/test/test_impact.py @@ -18,30 +18,30 @@ Test Impact class. """ + +import datetime as dt import unittest from pathlib import Path from tempfile import TemporaryDirectory + +import h5py import numpy as np import numpy.testing as npt -from scipy import sparse -import h5py from pyproj import CRS from rasterio.crs import CRS as rCRS -import datetime as dt +from scipy import sparse +import climada.util.coordinates as u_coord +from climada.engine import Impact, ImpactCalc from climada.entity.entity_def import Entity from climada.hazard.base import Hazard -from climada.engine import Impact, ImpactCalc -from climada.util.constants import ENT_DEMO_TODAY, DEF_CRS, DEMO_DIR, DEF_FREQ_UNIT -import climada.util.coordinates as u_coord - from climada.hazard.test.test_base import HAZ_TEST_TC +from climada.util.constants import DEF_CRS, DEF_FREQ_UNIT, DEMO_DIR, ENT_DEMO_TODAY +ENT: Entity = Entity.from_excel(ENT_DEMO_TODAY) +HAZ: Hazard = Hazard.from_hdf5(HAZ_TEST_TC) -ENT :Entity = Entity.from_excel(ENT_DEMO_TODAY) -HAZ :Hazard = Hazard.from_hdf5(HAZ_TEST_TC) - -DATA_FOLDER :Path = DEMO_DIR / 'test-results' +DATA_FOLDER: Path = DEMO_DIR / "test-results" DATA_FOLDER.mkdir(exist_ok=True) STR_DT = h5py.special_dtype(vlen=str) @@ -68,16 +68,17 @@ def dummy_impact(): haz_type="TC", ) + def dummy_impact_yearly(): """Return an impact containing events in multiple years""" imp = dummy_impact() - years = np.arange(2010,2010+len(imp.date)) + years = np.arange(2010, 2010 + len(imp.date)) # Edit the date and frequency - imp.date = np.array([dt.date(year,1,1).toordinal() for year in years]) + imp.date = np.array([dt.date(year, 1, 1).toordinal() for year in years]) imp.frequency_unit = "1/year" - imp.frequency = np.ones(len(years))/len(years) + imp.frequency = np.ones(len(years)) / len(years) # Calculate the correct expected annual impact freq_mat = imp.frequency.reshape(len(imp.frequency), 1) @@ -88,7 +89,8 @@ def dummy_impact_yearly(): class TestImpact(unittest.TestCase): - """"Test initialization and more""" + """ "Test initialization and more""" + def test_from_eih_pass(self): exp = ENT.exposures exp.assign_centroids(HAZ) @@ -111,8 +113,8 @@ def test_from_eih_pass(self): np.testing.assert_array_almost_equal(imp.at_event, fake_at_event) np.testing.assert_array_almost_equal( imp.coord_exp, - np.stack([exp.gdf['latitude'].values, exp.gdf['longitude'].values], axis=1) - ) + np.stack([exp.gdf["latitude"].values, exp.gdf["longitude"].values], axis=1), + ) def test_pyproj_crs(self): """Check if initializing with a pyproj.CRS transforms it into a string""" @@ -126,6 +128,7 @@ def test_rasterio_crs(self): impact = Impact(crs=crs) self.assertEqual(impact.crs, crs.to_wkt()) + class TestImpactConcat(unittest.TestCase): """test Impact.concat""" @@ -242,6 +245,7 @@ def test_results(self): class TestFreqCurve(unittest.TestCase): """Test exceedence frequency curve computation""" + def test_ref_value_pass(self): """Test result against reference value""" imp = Impact() @@ -257,8 +261,8 @@ def test_ref_value_pass(self): imp.at_event[7] = 0.381063674256423e9 imp.at_event[8] = 0.569142464157450e9 imp.at_event[9] = 0.467572545849132e9 - imp.unit = 'USD' - imp.frequency_unit = '1/day' + imp.unit = "USD" + imp.frequency_unit = "1/day" ifc = imp.calc_freq_curve() self.assertEqual(10, len(ifc.return_per)) @@ -283,9 +287,9 @@ def test_ref_value_pass(self): self.assertEqual(0.400665463736549e9, ifc.impact[2]) self.assertEqual(0.381063674256423e9, ifc.impact[1]) self.assertEqual(0, ifc.impact[0]) - self.assertEqual('Exceedance frequency curve', ifc.label) - self.assertEqual('USD', ifc.unit) - self.assertEqual('1/day', ifc.frequency_unit) + self.assertEqual("Exceedance frequency curve", ifc.label) + self.assertEqual("USD", ifc.unit) + self.assertEqual("1/day", ifc.frequency_unit) def test_ref_value_rp_pass(self): """Test result against reference value with given return periods""" @@ -302,8 +306,8 @@ def test_ref_value_rp_pass(self): imp.at_event[7] = 0.381063674256423e9 imp.at_event[8] = 0.569142464157450e9 imp.at_event[9] = 0.467572545849132e9 - imp.unit = 'USD' - imp.frequency_unit = '1/week' + imp.unit = "USD" + imp.frequency_unit = "1/week" ifc = imp.calc_freq_curve(np.array([100, 500, 1000])) self.assertEqual(3, len(ifc.return_per)) @@ -314,9 +318,10 @@ def test_ref_value_rp_pass(self): self.assertEqual(0, ifc.impact[0]) self.assertEqual(2320408028.5695677, ifc.impact[1]) self.assertEqual(3287314329.129928, ifc.impact[2]) - self.assertEqual('Exceedance frequency curve', ifc.label) - self.assertEqual('USD', ifc.unit) - self.assertEqual('1/week', ifc.frequency_unit) + self.assertEqual("Exceedance frequency curve", ifc.label) + self.assertEqual("USD", ifc.unit) + self.assertEqual("1/week", ifc.frequency_unit) + class TestImpactPerYear(unittest.TestCase): """Test calc_impact_year_set method""" @@ -336,18 +341,32 @@ def test_impact_per_year_sum(self): imp.at_event[7] = 0.381063674256423e9 imp.at_event[8] = 0.569142464157450e9 imp.at_event[9] = 0.467572545849132e9 - imp.date = np.array([732801, 716160, 718313, 712468, 732802, - 729285, 732931, 715419, 722404, 718351]) + imp.date = np.array( + [ + 732801, + 716160, + 718313, + 712468, + 732802, + 729285, + 732931, + 715419, + 722404, + 718351, + ] + ) iys_all = imp.impact_per_year() iys = imp.impact_per_year(all_years=False) iys_all_yr = imp.impact_per_year(year_range=(1975, 2000)) iys_yr = imp.impact_per_year(all_years=False, year_range=[1975, 2000]) iys_all_yr_1940 = imp.impact_per_year(all_years=True, year_range=[1940, 2000]) - self.assertEqual(np.around(sum([iys[year] for year in iys])), - np.around(sum(imp.at_event))) - self.assertEqual(sum([iys[year] for year in iys]), - sum([iys_all[year] for year in iys_all])) + self.assertEqual( + np.around(sum([iys[year] for year in iys])), np.around(sum(imp.at_event)) + ) + self.assertEqual( + sum([iys[year] for year in iys]), sum([iys_all[year] for year in iys_all]) + ) self.assertEqual(len(iys), 7) self.assertEqual(len(iys_all), 57) self.assertIn(1951 and 1959 and 2007, iys_all) @@ -358,8 +377,10 @@ def test_impact_per_year_sum(self): # year range (yr): self.assertEqual(len(iys_yr), 2) self.assertEqual(len(iys_all_yr), 26) - self.assertEqual(sum([iys_yr[year] for year in iys_yr]), - sum([iys_all_yr[year] for year in iys_all_yr])) + self.assertEqual( + sum([iys_yr[year] for year in iys_yr]), + sum([iys_all_yr[year] for year in iys_all_yr]), + ) self.assertIn(1997 and 1978, iys_yr) self.assertFalse(2007 in iys_yr) self.assertFalse(1959 in iys_yr) @@ -373,6 +394,7 @@ def test_impact_per_year_empty(self): self.assertEqual(len(iys), 0) self.assertEqual(len(iys_all), 0) + class TestIO(unittest.TestCase): """Test impact input/output methods.""" @@ -381,9 +403,9 @@ def test_write_read_ev_test(self): # Create impact object num_ev = 10 num_exp = 5 - imp_write = Impact(haz_type='TC') + imp_write = Impact(haz_type="TC") imp_write.event_id = np.arange(num_ev) - imp_write.event_name = ['event_' + str(num) for num in imp_write.event_id] + imp_write.event_name = ["event_" + str(num) for num in imp_write.event_id] imp_write.date = np.ones(num_ev) imp_write.coord_exp = np.zeros((num_exp, 2)) imp_write.coord_exp[:, 0] = 1.5 @@ -393,10 +415,10 @@ def test_write_read_ev_test(self): imp_write.frequency = np.ones(num_ev) * 0.1 imp_write.tot_value = 1000 imp_write.aai_agg = 1001 - imp_write.unit = 'USD' - imp_write.frequency_unit = '1/month' + imp_write.unit = "USD" + imp_write.frequency_unit = "1/month" - file_name = DATA_FOLDER.joinpath('test.csv') + file_name = DATA_FOLDER.joinpath("test.csv") imp_write.write_csv(file_name) imp_read = Impact.from_csv(file_name) @@ -411,16 +433,20 @@ def test_write_read_ev_test(self): self.assertEqual(imp_write.unit, imp_read.unit) self.assertEqual(imp_write.frequency_unit, imp_read.frequency_unit) self.assertEqual( - 0, len([i for i, j in zip(imp_write.event_name, imp_read.event_name) if i != j])) + 0, + len( + [i for i, j in zip(imp_write.event_name, imp_read.event_name) if i != j] + ), + ) def test_write_read_exp_test(self): """Test result against reference value""" # Create impact object num_ev = 5 num_exp = 10 - imp_write = Impact(haz_type='TC') + imp_write = Impact(haz_type="TC") imp_write.event_id = np.arange(num_ev) - imp_write.event_name = ['event_' + str(num) for num in imp_write.event_id] + imp_write.event_name = ["event_" + str(num) for num in imp_write.event_id] imp_write.date = np.ones(num_ev) imp_write.coord_exp = np.zeros((num_exp, 2)) imp_write.coord_exp[:, 0] = 1.5 @@ -430,10 +456,10 @@ def test_write_read_exp_test(self): imp_write.frequency = np.ones(num_ev) * 0.1 imp_write.tot_value = 1000 imp_write.aai_agg = 1001 - imp_write.unit = 'USD' - imp_write.frequency_unit = '1/month' + imp_write.unit = "USD" + imp_write.frequency_unit = "1/month" - file_name = DATA_FOLDER.joinpath('test.csv') + file_name = DATA_FOLDER.joinpath("test.csv") imp_write.write_csv(file_name) imp_read = Impact.from_csv(file_name) @@ -448,7 +474,11 @@ def test_write_read_exp_test(self): self.assertEqual(imp_write.unit, imp_read.unit) self.assertEqual(imp_write.frequency_unit, imp_read.frequency_unit) self.assertEqual( - 0, len([i for i, j in zip(imp_write.event_name, imp_read.event_name) if i != j])) + 0, + len( + [i for i, j in zip(imp_write.event_name, imp_read.event_name) if i != j] + ), + ) self.assertIsInstance(imp_read.crs, str) def test_excel_io(self): @@ -459,7 +489,7 @@ def test_excel_io(self): hazard = Hazard.from_hdf5(HAZ_TEST_TC) imp_write = ImpactCalc(ent.exposures, ent.impact_funcs, hazard).impact() - file_name = DATA_FOLDER.joinpath('test.xlsx') + file_name = DATA_FOLDER.joinpath("test.xlsx") imp_write.write_excel(file_name) imp_read = Impact.from_excel(file_name) @@ -467,15 +497,23 @@ def test_excel_io(self): np.testing.assert_array_equal(imp_write.event_id, imp_read.event_id) np.testing.assert_array_equal(imp_write.date, imp_read.date) np.testing.assert_array_equal(imp_write.coord_exp, imp_read.coord_exp) - np.testing.assert_array_almost_equal_nulp(imp_write.eai_exp, imp_read.eai_exp, nulp=5) - np.testing.assert_array_almost_equal_nulp(imp_write.at_event, imp_read.at_event, nulp=5) + np.testing.assert_array_almost_equal_nulp( + imp_write.eai_exp, imp_read.eai_exp, nulp=5 + ) + np.testing.assert_array_almost_equal_nulp( + imp_write.at_event, imp_read.at_event, nulp=5 + ) np.testing.assert_array_equal(imp_write.frequency, imp_read.frequency) self.assertEqual(imp_write.tot_value, imp_read.tot_value) self.assertEqual(imp_write.aai_agg, imp_read.aai_agg) self.assertEqual(imp_write.unit, imp_read.unit) self.assertEqual(imp_write.frequency_unit, imp_read.frequency_unit) self.assertEqual( - 0, len([i for i, j in zip(imp_write.event_name, imp_read.event_name) if i != j])) + 0, + len( + [i for i, j in zip(imp_write.event_name, imp_read.event_name) if i != j] + ), + ) self.assertIsInstance(imp_read.crs, str) def test_write_imp_mat(self): @@ -489,15 +527,18 @@ def test_write_imp_mat(self): impact.imp_mat[4, :] = np.arange(4) * 5 impact.imp_mat = sparse.csr_matrix(impact.imp_mat) - file_name = DATA_FOLDER.joinpath('test_imp_mat') + file_name = DATA_FOLDER.joinpath("test_imp_mat") impact.write_sparse_csr(file_name) - read_imp_mat = Impact().read_sparse_csr(f'{file_name}.npz') + read_imp_mat = Impact().read_sparse_csr(f"{file_name}.npz") for irow in range(5): np.testing.assert_array_equal( - read_imp_mat[irow, :].toarray(), impact.imp_mat[irow, :].toarray()) + read_imp_mat[irow, :].toarray(), impact.imp_mat[irow, :].toarray() + ) + class TestRPmatrix(unittest.TestCase): """Test computation of impact per return period for whole exposure""" + def test_local_exceedance_imp_pass(self): """Test calc local impacts per return period""" # Read default entity values @@ -508,12 +549,14 @@ def test_local_exceedance_imp_pass(self): hazard = Hazard.from_hdf5(HAZ_TEST_TC) # Compute the impact over the whole exposures - impact = ImpactCalc(ent.exposures, ent.impact_funcs, hazard).impact(save_mat=True) + impact = ImpactCalc(ent.exposures, ent.impact_funcs, hazard).impact( + save_mat=True + ) # Compute the impact per return period over the whole exposures impact_rp = impact.local_exceedance_imp(return_periods=(10, 40)) self.assertIsInstance(impact_rp, np.ndarray) - self.assertEqual(impact_rp.size, 2 * ent.exposures.gdf['value'].size) + self.assertEqual(impact_rp.size, 2 * ent.exposures.gdf["value"].size) self.assertAlmostEqual(np.max(impact_rp), 2916964966.388219, places=5) self.assertAlmostEqual(np.min(impact_rp), 444457580.131494, places=5) @@ -595,6 +638,7 @@ def test_no_imp_mat(self): class TestRiskTrans(unittest.TestCase): """Test risk transfer methods""" + def test_risk_trans_pass(self): """Test calc_risk_transfer""" # Create impact object @@ -609,8 +653,8 @@ def test_risk_trans_pass(self): imp.frequency = np.ones(10) / 5 imp.tot_value = 10 imp.aai_agg = 100 - imp.unit = 'USD' - imp.frequency_unit = '1/month' + imp.unit = "USD" + imp.frequency_unit = "1/month" imp.imp_mat = sparse.csr_matrix(np.empty((0, 0))) new_imp, imp_rt = imp.calc_risk_transfer(2, 10) @@ -624,7 +668,9 @@ def test_risk_trans_pass(self): np.testing.assert_array_almost_equal_nulp(new_imp.frequency, imp.frequency) np.testing.assert_array_almost_equal_nulp(new_imp.coord_exp, []) np.testing.assert_array_almost_equal_nulp(new_imp.eai_exp, []) - np.testing.assert_array_almost_equal_nulp(new_imp.at_event, [0, 1, 2, 2, 2, 2, 2, 2, 2, 5]) + np.testing.assert_array_almost_equal_nulp( + new_imp.at_event, [0, 1, 2, 2, 2, 2, 2, 2, 2, 5] + ) self.assertAlmostEqual(new_imp.aai_agg, 4.0) self.assertEqual(imp_rt.unit, imp.unit) @@ -637,7 +683,9 @@ def test_risk_trans_pass(self): np.testing.assert_array_almost_equal_nulp(imp_rt.frequency, imp.frequency) np.testing.assert_array_almost_equal_nulp(imp_rt.coord_exp, []) np.testing.assert_array_almost_equal_nulp(imp_rt.eai_exp, []) - np.testing.assert_array_almost_equal_nulp(imp_rt.at_event, [0, 0, 0, 1, 2, 3, 4, 5, 6, 10]) + np.testing.assert_array_almost_equal_nulp( + imp_rt.at_event, [0, 0, 0, 1, 2, 3, 4, 5, 6, 10] + ) self.assertAlmostEqual(imp_rt.aai_agg, 6.2) def test_transfer_risk_pass(self): @@ -661,6 +709,7 @@ def test_residual_risk_pass(self): class TestSelect(unittest.TestCase): """Test select method""" + def test_select_event_id_pass(self): """Test select by event id""" @@ -672,14 +721,18 @@ def test_select_event_id_pass(self): self.assertEqual(sel_imp.frequency_unit, imp.frequency_unit) np.testing.assert_array_equal(sel_imp.event_id, [10, 11, 12]) - self.assertEqual(sel_imp.event_name, [0, 1, 'two']) + self.assertEqual(sel_imp.event_name, [0, 1, "two"]) np.testing.assert_array_equal(sel_imp.date, [0, 1, 2]) - np.testing.assert_array_almost_equal_nulp(sel_imp.frequency, [1/6, 1/6, 1]) + np.testing.assert_array_almost_equal_nulp(sel_imp.frequency, [1 / 6, 1 / 6, 1]) np.testing.assert_array_equal(sel_imp.at_event, [0, 2, 4]) - np.testing.assert_array_equal(sel_imp.imp_mat.todense(), [[0,0], [1,1], [2,2]]) - np.testing.assert_array_almost_equal_nulp(sel_imp.eai_exp, [1/6+2, 1/6+2]) - self.assertEqual(sel_imp.aai_agg, 4+2/6) + np.testing.assert_array_equal( + sel_imp.imp_mat.todense(), [[0, 0], [1, 1], [2, 2]] + ) + np.testing.assert_array_almost_equal_nulp( + sel_imp.eai_exp, [1 / 6 + 2, 1 / 6 + 2] + ) + self.assertEqual(sel_imp.aai_agg, 4 + 2 / 6) self.assertEqual(sel_imp.tot_value, 7) np.testing.assert_array_equal(sel_imp.coord_exp, [[1, 2], [1.5, 2.5]]) @@ -691,21 +744,25 @@ def test_select_event_name_pass(self): """Test select by event name""" imp = dummy_impact() - sel_imp = imp.select(event_names=[0, 1, 'two']) + sel_imp = imp.select(event_names=[0, 1, "two"]) self.assertTrue(u_coord.equal_crs(sel_imp.crs, imp.crs)) self.assertEqual(sel_imp.unit, imp.unit) self.assertEqual(sel_imp.frequency_unit, imp.frequency_unit) np.testing.assert_array_equal(sel_imp.event_id, [10, 11, 12]) - self.assertEqual(sel_imp.event_name, [0, 1, 'two']) + self.assertEqual(sel_imp.event_name, [0, 1, "two"]) np.testing.assert_array_equal(sel_imp.date, [0, 1, 2]) - np.testing.assert_array_almost_equal_nulp(sel_imp.frequency, [1/6, 1/6, 1]) + np.testing.assert_array_almost_equal_nulp(sel_imp.frequency, [1 / 6, 1 / 6, 1]) np.testing.assert_array_equal(sel_imp.at_event, [0, 2, 4]) - np.testing.assert_array_equal(sel_imp.imp_mat.todense(), [[0,0], [1,1], [2,2]]) - np.testing.assert_array_almost_equal_nulp(sel_imp.eai_exp, [1/6+2, 1/6+2]) - self.assertEqual(sel_imp.aai_agg, 4+2/6) + np.testing.assert_array_equal( + sel_imp.imp_mat.todense(), [[0, 0], [1, 1], [2, 2]] + ) + np.testing.assert_array_almost_equal_nulp( + sel_imp.eai_exp, [1 / 6 + 2, 1 / 6 + 2] + ) + self.assertEqual(sel_imp.aai_agg, 4 + 2 / 6) self.assertEqual(sel_imp.tot_value, 7) np.testing.assert_array_equal(sel_imp.coord_exp, [[1, 2], [1.5, 2.5]]) @@ -724,14 +781,18 @@ def test_select_dates_pass(self): self.assertEqual(sel_imp.frequency_unit, imp.frequency_unit) np.testing.assert_array_equal(sel_imp.event_id, [10, 11, 12]) - self.assertEqual(sel_imp.event_name, [0, 1, 'two']) + self.assertEqual(sel_imp.event_name, [0, 1, "two"]) np.testing.assert_array_equal(sel_imp.date, [0, 1, 2]) - np.testing.assert_array_almost_equal_nulp(sel_imp.frequency, [1/6, 1/6, 1]) + np.testing.assert_array_almost_equal_nulp(sel_imp.frequency, [1 / 6, 1 / 6, 1]) np.testing.assert_array_equal(sel_imp.at_event, [0, 2, 4]) - np.testing.assert_array_equal(sel_imp.imp_mat.todense(), [[0,0], [1,1], [2,2]]) - np.testing.assert_array_almost_equal_nulp(sel_imp.eai_exp, [1/6+2, 1/6+2]) - self.assertEqual(sel_imp.aai_agg, 4+2/6) + np.testing.assert_array_equal( + sel_imp.imp_mat.todense(), [[0, 0], [1, 1], [2, 2]] + ) + np.testing.assert_array_almost_equal_nulp( + sel_imp.eai_exp, [1 / 6 + 2, 1 / 6 + 2] + ) + self.assertEqual(sel_imp.aai_agg, 4 + 2 / 6) self.assertEqual(sel_imp.tot_value, 7) np.testing.assert_array_equal(sel_imp.coord_exp, [[1, 2], [1.5, 2.5]]) @@ -740,10 +801,10 @@ def test_select_dates_pass(self): self.assertIsInstance(sel_imp.imp_mat, sparse.csr_matrix) def test_select_coord_exp_pass(self): - """ test select by exp coordinates """ + """test select by exp coordinates""" imp = dummy_impact() - sel_imp = imp.select(coord_exp=np.array([1,2])) + sel_imp = imp.select(coord_exp=np.array([1, 2])) self.assertTrue(u_coord.equal_crs(sel_imp.crs, imp.crs)) self.assertEqual(sel_imp.unit, imp.unit) @@ -755,9 +816,13 @@ def test_select_coord_exp_pass(self): np.testing.assert_array_equal(sel_imp.frequency, imp.frequency) np.testing.assert_array_equal(sel_imp.at_event, [0, 1, 2, 3, 30, 31]) - np.testing.assert_array_equal(sel_imp.imp_mat.todense(), [[0], [1], [2], [3], [30], [31]]) - np.testing.assert_array_almost_equal_nulp(sel_imp.eai_exp, [1/6+2+3+1+31/30]) - self.assertEqual(sel_imp.aai_agg, 1/6+2+3+1+31/30) + np.testing.assert_array_equal( + sel_imp.imp_mat.todense(), [[0], [1], [2], [3], [30], [31]] + ) + np.testing.assert_array_almost_equal_nulp( + sel_imp.eai_exp, [1 / 6 + 2 + 3 + 1 + 31 / 30] + ) + self.assertEqual(sel_imp.aai_agg, 1 / 6 + 2 + 3 + 1 + 31 / 30) self.assertEqual(sel_imp.tot_value, None) np.testing.assert_array_equal(sel_imp.coord_exp, [[1, 2]]) @@ -766,7 +831,7 @@ def test_select_coord_exp_pass(self): self.assertIsInstance(sel_imp.imp_mat, sparse.csr_matrix) def test_select_event_identity_pass(self): - """ test select same impact with event name, id and date """ + """test select same impact with event name, id and date""" # Read default entity values ent = Entity.from_excel(ENT_DEMO_TODAY) @@ -780,12 +845,14 @@ def test_select_event_identity_pass(self): # Compute the impact over the whole exposures imp = ImpactCalc(ent.exposures, ent.impact_funcs, hazard).impact( - save_mat=True, assign_centroids=False) + save_mat=True, assign_centroids=False + ) - sel_imp = imp.select(event_ids=imp.event_id, - event_names=imp.event_name, - dates=(min(imp.date), max(imp.date)) - ) + sel_imp = imp.select( + event_ids=imp.event_id, + event_names=imp.event_name, + dates=(min(imp.date), max(imp.date)), + ) self.assertTrue(u_coord.equal_crs(sel_imp.crs, imp.crs)) self.assertEqual(sel_imp.unit, imp.unit) @@ -799,7 +866,7 @@ def test_select_event_identity_pass(self): np.testing.assert_array_equal(sel_imp.at_event, imp.at_event) np.testing.assert_array_equal(sel_imp.imp_mat.todense(), imp.imp_mat.todense()) np.testing.assert_array_equal(sel_imp.eai_exp, imp.eai_exp) - self.assertAlmostEqual(round(sel_imp.aai_agg,5), round(imp.aai_agg,5)) + self.assertAlmostEqual(round(sel_imp.aai_agg, 5), round(imp.aai_agg, 5)) self.assertEqual(sel_imp.tot_value, imp.tot_value) np.testing.assert_array_equal(sel_imp.coord_exp, imp.coord_exp) @@ -807,29 +874,32 @@ def test_select_event_identity_pass(self): self.assertIsInstance(sel_imp, Impact) self.assertIsInstance(sel_imp.imp_mat, sparse.csr_matrix) - def test_select_new_attributes(self): - """Test if impact has new attributes """ + """Test if impact has new attributes""" imp = dummy_impact() - imp.new_per_ev = ['a', 'b', 'c', 'd', 'e', 'f'] - sel_imp = imp.select(event_names=[0, 1, 'two']) + imp.new_per_ev = ["a", "b", "c", "d", "e", "f"] + sel_imp = imp.select(event_names=[0, 1, "two"]) - self.assertEqual(sel_imp.new_per_ev, ['a', 'b', 'c']) + self.assertEqual(sel_imp.new_per_ev, ["a", "b", "c"]) self.assertTrue(u_coord.equal_crs(sel_imp.crs, imp.crs)) self.assertEqual(sel_imp.unit, imp.unit) self.assertEqual(sel_imp.frequency_unit, imp.frequency_unit) np.testing.assert_array_equal(sel_imp.event_id, [10, 11, 12]) - self.assertEqual(sel_imp.event_name, [0, 1, 'two']) + self.assertEqual(sel_imp.event_name, [0, 1, "two"]) np.testing.assert_array_equal(sel_imp.date, [0, 1, 2]) - np.testing.assert_array_almost_equal_nulp(sel_imp.frequency, [1/6, 1/6, 1]) + np.testing.assert_array_almost_equal_nulp(sel_imp.frequency, [1 / 6, 1 / 6, 1]) np.testing.assert_array_equal(sel_imp.at_event, [0, 2, 4]) - np.testing.assert_array_equal(sel_imp.imp_mat.todense(), [[0,0], [1,1], [2,2]]) - np.testing.assert_array_almost_equal_nulp(sel_imp.eai_exp, [1/6+2, 1/6+2]) - self.assertEqual(sel_imp.aai_agg, 4+2/6) + np.testing.assert_array_equal( + sel_imp.imp_mat.todense(), [[0, 0], [1, 1], [2, 2]] + ) + np.testing.assert_array_almost_equal_nulp( + sel_imp.eai_exp, [1 / 6 + 2, 1 / 6 + 2] + ) + self.assertEqual(sel_imp.aai_agg, 4 + 2 / 6) self.assertEqual(sel_imp.tot_value, 7) np.testing.assert_array_equal(sel_imp.coord_exp, [[1, 2], [1.5, 2.5]]) @@ -858,21 +928,25 @@ def test_select_id_name_dates_pass(self): """Test select by event ids, names, and dates""" imp = dummy_impact() - sel_imp = imp.select(event_ids=[0], event_names=[1, 'two'], dates=(0, 2)) + sel_imp = imp.select(event_ids=[0], event_names=[1, "two"], dates=(0, 2)) self.assertTrue(u_coord.equal_crs(sel_imp.crs, imp.crs)) self.assertEqual(sel_imp.unit, imp.unit) self.assertEqual(sel_imp.frequency_unit, imp.frequency_unit) np.testing.assert_array_equal(sel_imp.event_id, [10, 11, 12]) - self.assertEqual(sel_imp.event_name, [0, 1, 'two']) + self.assertEqual(sel_imp.event_name, [0, 1, "two"]) np.testing.assert_array_equal(sel_imp.date, [0, 1, 2]) - np.testing.assert_array_almost_equal_nulp(sel_imp.frequency, [1/6, 1/6, 1]) + np.testing.assert_array_almost_equal_nulp(sel_imp.frequency, [1 / 6, 1 / 6, 1]) np.testing.assert_array_equal(sel_imp.at_event, [0, 2, 4]) - np.testing.assert_array_equal(sel_imp.imp_mat.todense(), [[0,0], [1,1], [2,2]]) - np.testing.assert_array_almost_equal_nulp(sel_imp.eai_exp, [1/6+2, 1/6+2]) - self.assertEqual(sel_imp.aai_agg, 4+2/6) + np.testing.assert_array_equal( + sel_imp.imp_mat.todense(), [[0, 0], [1, 1], [2, 2]] + ) + np.testing.assert_array_almost_equal_nulp( + sel_imp.eai_exp, [1 / 6 + 2, 1 / 6 + 2] + ) + self.assertEqual(sel_imp.aai_agg, 4 + 2 / 6) self.assertEqual(sel_imp.tot_value, 7) np.testing.assert_array_equal(sel_imp.coord_exp, [[1, 2], [1.5, 2.5]]) @@ -886,22 +960,25 @@ def test_select_imp_map_fail(self): imp = dummy_impact() imp.imp_mat = sparse.csr_matrix(np.empty((0, 0))) with self.assertRaises(ValueError): - imp.select(event_ids=[0], event_names=[1, 'two'], dates=(0, 2)) + imp.select(event_ids=[0], event_names=[1, "two"], dates=(0, 2)) def test_select_reset_frequency(self): """Test that reset_frequency option works correctly""" - imp = dummy_impact_yearly() # 6 events, 1 per year + imp = dummy_impact_yearly() # 6 events, 1 per year # select first 4 events n_yr = 4 - sel_imp = imp.select(dates=(imp.date[0],imp.date[n_yr-1]), reset_frequency=True) + sel_imp = imp.select( + dates=(imp.date[0], imp.date[n_yr - 1]), reset_frequency=True + ) # check frequency-related attributes - np.testing.assert_array_equal(sel_imp.frequency, [1/n_yr]*n_yr) - self.assertEqual(sel_imp.aai_agg,imp.at_event[0:n_yr].sum()/n_yr) - np.testing.assert_array_equal(sel_imp.eai_exp, - imp.imp_mat[0:n_yr,:].todense().sum(axis=0).A1/n_yr) + np.testing.assert_array_equal(sel_imp.frequency, [1 / n_yr] * n_yr) + self.assertEqual(sel_imp.aai_agg, imp.at_event[0:n_yr].sum() / n_yr) + np.testing.assert_array_equal( + sel_imp.eai_exp, imp.imp_mat[0:n_yr, :].todense().sum(axis=0).A1 / n_yr + ) class TestConvertExp(unittest.TestCase): @@ -910,9 +987,9 @@ def test__build_exp(self): imp = dummy_impact() exp = imp._build_exp() - np.testing.assert_array_equal(imp.eai_exp, exp.gdf['value']) - np.testing.assert_array_equal(imp.coord_exp[:, 0], exp.gdf['latitude']) - np.testing.assert_array_equal(imp.coord_exp[:, 1], exp.gdf['longitude']) + np.testing.assert_array_equal(imp.eai_exp, exp.gdf["value"]) + np.testing.assert_array_equal(imp.coord_exp[:, 0], exp.gdf["latitude"]) + np.testing.assert_array_equal(imp.coord_exp[:, 1], exp.gdf["longitude"]) self.assertTrue(u_coord.equal_crs(exp.crs, imp.crs)) self.assertEqual(exp.value_unit, imp.unit) self.assertEqual(exp.ref_year, 0) @@ -923,13 +1000,14 @@ def test__exp_build_event(self): imp = dummy_impact() event_id = imp.event_id[1] exp = imp._build_exp_event(event_id=event_id) - np.testing.assert_array_equal(imp.imp_mat[1].todense().A1, exp.gdf['value']) - np.testing.assert_array_equal(imp.coord_exp[:, 0], exp.gdf['latitude']) - np.testing.assert_array_equal(imp.coord_exp[:, 1], exp.gdf['longitude']) + np.testing.assert_array_equal(imp.imp_mat[1].todense().A1, exp.gdf["value"]) + np.testing.assert_array_equal(imp.coord_exp[:, 0], exp.gdf["latitude"]) + np.testing.assert_array_equal(imp.coord_exp[:, 1], exp.gdf["longitude"]) self.assertTrue(u_coord.equal_crs(exp.crs, imp.crs)) self.assertEqual(exp.value_unit, imp.unit) self.assertEqual(exp.ref_year, 0) + class TestMatchCentroids(unittest.TestCase): def test_match_centroids(self): @@ -941,7 +1019,7 @@ def test_match_centroids(self): fake_aai_agg = np.sum(fake_eai_exp) imp = Impact.from_eih(exp, HAZ, fake_at_event, fake_eai_exp, fake_aai_agg) imp_centr = imp.match_centroids(HAZ) - np.testing.assert_array_equal(imp_centr, exp.gdf['centr_TC']) + np.testing.assert_array_equal(imp_centr, exp.gdf["centr_TC"]) class TestImpactH5IO(unittest.TestCase): @@ -1072,7 +1150,7 @@ def test_read_hdf5_full(self): tot_value = 100 aai_agg = 200 unit = "unit" - haz_type="haz_type" + haz_type = "haz_type" # Write the data with h5py.File(self.filepath, "w") as file: @@ -1132,6 +1210,7 @@ def test_read_hdf5_full(self): self.assertIn("'event_name' is not stored as strings", cm.output[0]) self.assertListEqual(impact.event_name, ["1.2", "2.0"]) + # Execute Tests if __name__ == "__main__": TESTS = unittest.TestLoader().loadTestsFromTestCase(TestFreqCurve) diff --git a/climada/engine/test/test_impact_calc.py b/climada/engine/test/test_impact_calc.py index d8a96747a..489f66a00 100644 --- a/climada/engine/test/test_impact_calc.py +++ b/climada/engine/test/test_impact_calc.py @@ -18,31 +18,31 @@ Test Impact class. """ + import unittest -from unittest.mock import create_autospec, MagicMock, call, patch -import numpy as np -from scipy import sparse -import geopandas as gpd from copy import deepcopy from pathlib import Path +from unittest.mock import MagicMock, call, create_autospec, patch + +import geopandas as gpd +import numpy as np +from scipy import sparse from climada import CONFIG -from climada.entity.entity_def import Entity -from climada.entity import Exposures, ImpactFuncSet, ImpactFunc, ImpfTropCyclone -from climada.hazard.base import Hazard, Centroids -from climada.engine import ImpactCalc, Impact +from climada.engine import Impact, ImpactCalc from climada.engine.impact_calc import LOGGER as ILOG -from climada.util.constants import ENT_DEMO_TODAY, DEMO_DIR +from climada.entity import Exposures, ImpactFunc, ImpactFuncSet, ImpfTropCyclone +from climada.entity.entity_def import Entity +from climada.hazard.base import Centroids, Hazard +from climada.test import get_test_file from climada.util.api_client import Client from climada.util.config import Config - -from climada.test import get_test_file - +from climada.util.constants import DEMO_DIR, ENT_DEMO_TODAY ENT = Entity.from_excel(ENT_DEMO_TODAY) -HAZ = Hazard.from_hdf5(get_test_file('test_tc_florida')) +HAZ = Hazard.from_hdf5(get_test_file("test_tc_florida")) -DATA_FOLDER = DEMO_DIR / 'test-results' +DATA_FOLDER = DEMO_DIR / "test-results" DATA_FOLDER.mkdir(exist_ok=True) @@ -50,18 +50,18 @@ def check_impact(self, imp, haz, exp, aai_agg, eai_exp, at_event, imp_mat_array= """Test properties of imapcts""" self.assertEqual(len(haz.event_id), len(imp.at_event)) self.assertIsInstance(imp, Impact) - np.testing.assert_allclose(imp.coord_exp[:,0], exp.gdf['latitude']) - np.testing.assert_allclose(imp.coord_exp[:,1], exp.gdf['longitude']) + np.testing.assert_allclose(imp.coord_exp[:, 0], exp.gdf["latitude"]) + np.testing.assert_allclose(imp.coord_exp[:, 1], exp.gdf["longitude"]) self.assertAlmostEqual(imp.aai_agg, aai_agg, 3) np.testing.assert_allclose(imp.eai_exp, eai_exp, rtol=1e-5) np.testing.assert_allclose(imp.at_event, at_event, rtol=1e-5) if imp_mat_array is not None: - np.testing.assert_allclose(imp.imp_mat.toarray().ravel(), - imp_mat_array.ravel()) + np.testing.assert_allclose(imp.imp_mat.toarray().ravel(), imp_mat_array.ravel()) class TestImpactCalc(unittest.TestCase): """Test Impact calc methods""" + def test_init(self): icalc = ImpactCalc(ENT.exposures, ENT.impact_funcs, HAZ) self.assertEqual(icalc.n_exp_pnt, ENT.exposures.gdf.shape[0]) @@ -82,11 +82,8 @@ def test_init(self): def test_metrics(self): """Test methods to get impact metrics""" - mat = sparse.csr_matrix(np.array( - [[1, 0, 1], - [2, 2, 0]] - )) - freq = np.array([1, 1/10]) + mat = sparse.csr_matrix(np.array([[1, 0, 1], [2, 2, 0]])) + freq = np.array([1, 1 / 10]) at_event = ImpactCalc.at_event_from_mat(mat) eai_exp = ImpactCalc.eai_exp_from_mat(mat, freq) aai_agg = ImpactCalc.aai_agg_from_eai_exp(eai_exp) @@ -101,58 +98,63 @@ def test_metrics(self): def test_apply_cover_to_mat(self): """Test methods to get insured metrics""" - mat = sparse.csr_matrix(np.array( - [[1, 0, 1], - [2, 2, 0]] - )) + mat = sparse.csr_matrix(np.array([[1, 0, 1], [2, 2, 0]])) cover = np.array([0, 1, 10]) imp = ImpactCalc.apply_cover_to_mat(mat, cover) - np.testing.assert_array_equal( - imp.todense(), np.array([[0, 0, 1], [0, 1, 0]]) - ) + np.testing.assert_array_equal(imp.todense(), np.array([[0, 0, 1], [0, 1, 0]])) def test_error_handling_mismatch_haz_type(self): """Test error handling in case hazard type of hazard does not appear in impf_set or exposures""" - haz_tc = Hazard('TC') + haz_tc = Hazard("TC") exp_tc = Exposures() - exp_tc.gdf['impf_TC'] = 1 + exp_tc.gdf["impf_TC"] = 1 exp_ws = Exposures() - exp_ws.gdf['impf_WS'] = 2 + exp_ws.gdf["impf_WS"] = 2 impf = ImpactFunc() impf.id = 1 impf.intensity = np.array([0, 20]) impf.paa = np.array([0, 1]) impf.mdd = np.array([0, 0.5]) - impf.haz_type = 'TC' + impf.haz_type = "TC" impfset_tc = ImpactFuncSet([impf]) - impf.haz_type = 'WS' + impf.haz_type = "WS" impfset_ws = ImpactFuncSet([impf]) - impf.haz_type = '' + impf.haz_type = "" impfset_undef = ImpactFuncSet([impf]) try: ImpactCalc(exp_ws, impfset_tc, haz_tc).impact() except Exception as e: - self.assertEqual(str(e), "Impact calculation not possible. No impact " - "functions found for hazard type TC in exposures.") + self.assertEqual( + str(e), + "Impact calculation not possible. No impact " + "functions found for hazard type TC in exposures.", + ) try: ImpactCalc(exp_tc, impfset_ws, haz_tc).impact() except Exception as e: - self.assertEqual(str(e), "Impact calculation not possible. No impact " - "functions found for hazard type TC in impf_set.") + self.assertEqual( + str(e), + "Impact calculation not possible. No impact " + "functions found for hazard type TC in impf_set.", + ) try: ImpactCalc(exp_tc, impfset_undef, haz_tc).impact() except Exception as e: - self.assertEqual(str(e), "Impact calculation not possible. No impact " - "functions found for hazard type TC in impf_set.") + self.assertEqual( + str(e), + "Impact calculation not possible. No impact " + "functions found for hazard type TC in impf_set.", + ) + def test_error_handling_mismatch_impf_ids(self): """Test error handling in case impf ids in exposures does not appear in impf_set""" - haz = Hazard('TC') + haz = Hazard("TC") exp = Exposures() - exp.gdf.loc[0,'impf_TC'] = 1 - exp.gdf.loc[1,'impf_TC'] = 2 - impf_exp = ImpactFunc(haz_type='TC', id=1) + exp.gdf.loc[0, "impf_TC"] = 1 + exp.gdf.loc[1, "impf_TC"] = 2 + impf_exp = ImpactFunc(haz_type="TC", id=1) impf_noexp = deepcopy(impf_exp) impf_noexp.id = 3 impfset = ImpactFuncSet([impf_exp, impf_noexp]) @@ -160,11 +162,13 @@ def test_error_handling_mismatch_impf_ids(self): with self.assertRaises(ValueError) as cm: ImpactCalc(exp, impfset, haz).impact() the_exception = cm.exception - self.assertEqual(the_exception.args[0], - "The associated impact function(s) with id(s) 2 have no match in " - "impact function set for hazard type \'TC\'.\nPlease make sure " - "that all exposure points are associated with an impact " - "function that is included in the impact function set.") + self.assertEqual( + the_exception.args[0], + "The associated impact function(s) with id(s) 2 have no match in " + "impact function set for hazard type 'TC'.\nPlease make sure " + "that all exposure points are associated with an impact " + "function that is included in the impact function set.", + ) def test_calc_impact_TC_pass(self): """Test compute impact""" @@ -173,15 +177,15 @@ def test_calc_impact_TC_pass(self): self.assertEqual(icalc.n_events, len(impact.at_event)) self.assertEqual(0, impact.at_event[0]) self.assertEqual(0, impact.at_event[7225]) - self.assertAlmostEqual(1.472482938320243e+08, impact.at_event[13809], delta=1) - self.assertAlmostEqual(7.076504723057620e+10, impact.at_event[12147], delta=1) + self.assertAlmostEqual(1.472482938320243e08, impact.at_event[13809], delta=1) + self.assertAlmostEqual(7.076504723057620e10, impact.at_event[12147], delta=1) self.assertEqual(0, impact.at_event[14449]) self.assertEqual(icalc.n_exp_pnt, len(impact.eai_exp)) - self.assertAlmostEqual(1.518553670803242e+08, impact.eai_exp[0], delta=1) - self.assertAlmostEqual(1.373490457046383e+08, impact.eai_exp[25], 6) - self.assertAlmostEqual(1.066837260150042e+08, impact.eai_exp[49], 6) - self.assertAlmostEqual(6.570532945599105e+11, impact.tot_value) - self.assertAlmostEqual(6.512201157564421e+09, impact.aai_agg, 5) + self.assertAlmostEqual(1.518553670803242e08, impact.eai_exp[0], delta=1) + self.assertAlmostEqual(1.373490457046383e08, impact.eai_exp[25], 6) + self.assertAlmostEqual(1.066837260150042e08, impact.eai_exp[49], 6) + self.assertAlmostEqual(6.570532945599105e11, impact.tot_value) + self.assertAlmostEqual(6.512201157564421e09, impact.aai_agg, 5) x = 0.6 HAZf = deepcopy(HAZ) @@ -192,53 +196,134 @@ def test_calc_impact_TC_pass(self): self.assertEqual(icalc.n_events, len(impact.at_event)) self.assertEqual(0, impact.at_event[0]) self.assertEqual(0, impact.at_event[7225]) - self.assertAlmostEqual(1.472482938320243e+08 * x, impact.at_event[13809], delta=1) - self.assertAlmostEqual(7.076504723057620e+10 * x, impact.at_event[12147], delta=1) + self.assertAlmostEqual( + 1.472482938320243e08 * x, impact.at_event[13809], delta=1 + ) + self.assertAlmostEqual( + 7.076504723057620e10 * x, impact.at_event[12147], delta=1 + ) self.assertEqual(0, impact.at_event[14449]) self.assertEqual(icalc.n_exp_pnt, len(impact.eai_exp)) - self.assertAlmostEqual(1.518553670803242e+08 * x, impact.eai_exp[0], delta=1) - self.assertAlmostEqual(1.373490457046383e+08 * x, impact.eai_exp[25], 6) - self.assertAlmostEqual(1.066837260150042e+08 * x, impact.eai_exp[49], 6) - self.assertAlmostEqual(6.570532945599105e+11, impact.tot_value) - self.assertAlmostEqual(6.512201157564421e+09 * x, impact.aai_agg, 5) + self.assertAlmostEqual(1.518553670803242e08 * x, impact.eai_exp[0], delta=1) + self.assertAlmostEqual(1.373490457046383e08 * x, impact.eai_exp[25], 6) + self.assertAlmostEqual(1.066837260150042e08 * x, impact.eai_exp[49], 6) + self.assertAlmostEqual(6.570532945599105e11, impact.tot_value) + self.assertAlmostEqual(6.512201157564421e09 * x, impact.aai_agg, 5) def test_calc_impact_RF_pass(self): - haz = Hazard.from_hdf5(get_test_file('test_hazard_US_flood_random_locations')) - exp = Exposures.from_hdf5(get_test_file('test_exposure_US_flood_random_locations')) - impf_set = ImpactFuncSet.from_excel(Path(__file__).parent / 'data' / 'flood_imp_func_set.xls') + haz = Hazard.from_hdf5(get_test_file("test_hazard_US_flood_random_locations")) + exp = Exposures.from_hdf5( + get_test_file("test_exposure_US_flood_random_locations") + ) + impf_set = ImpactFuncSet.from_excel( + Path(__file__).parent / "data" / "flood_imp_func_set.xls" + ) icalc = ImpactCalc(exp, impf_set, haz) impact = icalc.impact(assign_centroids=False) aai_agg = 161436.05112960344 - eai_exp = np.array([ - 1.61159701e+05, 1.33742847e+02, 0.00000000e+00, 4.21352988e-01, - 1.42185609e+02, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00 - ]) - at_event = np.array([ - 0.00000000e+00, 0.00000000e+00, 9.85233619e+04, 3.41245461e+04, - 7.73566566e+07, 0.00000000e+00, 0.00000000e+00 - ]) - imp_mat_array = np.array([ - [0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, - 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00], - [0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, - 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00], - [0.00000000e+00, 6.41965663e+04, 0.00000000e+00, 2.02249434e+02, - 3.41245461e+04, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00], - [0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, - 3.41245461e+04, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00], - [7.73566566e+07, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, - 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00], - [0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, - 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00], - [0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, - 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00] - ]) + eai_exp = np.array( + [ + 1.61159701e05, + 1.33742847e02, + 0.00000000e00, + 4.21352988e-01, + 1.42185609e02, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + ] + ) + at_event = np.array( + [ + 0.00000000e00, + 0.00000000e00, + 9.85233619e04, + 3.41245461e04, + 7.73566566e07, + 0.00000000e00, + 0.00000000e00, + ] + ) + imp_mat_array = np.array( + [ + [ + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + ], + [ + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + ], + [ + 0.00000000e00, + 6.41965663e04, + 0.00000000e00, + 2.02249434e02, + 3.41245461e04, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + ], + [ + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 3.41245461e04, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + ], + [ + 7.73566566e07, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + ], + [ + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + ], + [ + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + ], + ] + ) check_impact(self, impact, haz, exp, aai_agg, eai_exp, at_event, imp_mat_array) def test_empty_impact(self): """Check that empty impact is returned if no centroids match the exposures""" exp = ENT.exposures.copy() - exp.gdf['centr_TC'] = -1 + exp.gdf["centr_TC"] = -1 icalc = ImpactCalc(exp, ENT.impact_funcs, HAZ) impact = icalc.impact(assign_centroids=False) aai_agg = 0.0 @@ -261,8 +346,9 @@ def test_single_event_impact(self): check_impact(self, impact, haz, ENT.exposures, aai_agg, eai_exp, at_event, None) impact = icalc.impact(save_mat=True, assign_centroids=False) imp_mat_array = sparse.csr_matrix((haz.size, len(ENT.exposures.gdf))).toarray() - check_impact(self, impact, haz, ENT.exposures, aai_agg, eai_exp, at_event, imp_mat_array) - + check_impact( + self, impact, haz, ENT.exposures, aai_agg, eai_exp, at_event, imp_mat_array + ) def test_calc_impact_save_mat_pass(self): """Test compute impact with impact matrix""" @@ -270,34 +356,39 @@ def test_calc_impact_save_mat_pass(self): impact = icalc.impact() self.assertIsInstance(impact.imp_mat, sparse.csr_matrix) - self.assertEqual(impact.imp_mat.shape, (HAZ.event_id.size, - ENT.exposures.gdf['value'].size)) + self.assertEqual( + impact.imp_mat.shape, (HAZ.event_id.size, ENT.exposures.gdf["value"].size) + ) np.testing.assert_array_almost_equal_nulp( - np.array(impact.imp_mat.sum(axis=1)).ravel(), impact.at_event, nulp=5) + np.array(impact.imp_mat.sum(axis=1)).ravel(), impact.at_event, nulp=5 + ) np.testing.assert_array_almost_equal_nulp( - np.sum(impact.imp_mat.toarray() * impact.frequency[:, None], axis=0).reshape(-1), - impact.eai_exp) + np.sum( + impact.imp_mat.toarray() * impact.frequency[:, None], axis=0 + ).reshape(-1), + impact.eai_exp, + ) self.assertEqual(icalc.n_events, len(impact.at_event)) self.assertEqual(0, impact.at_event[0]) self.assertEqual(0, impact.at_event[7225]) - self.assertAlmostEqual(1.472482938320243e+08, impact.at_event[13809], delta=1) - self.assertAlmostEqual(7.076504723057620e+10, impact.at_event[12147], delta=1) + self.assertAlmostEqual(1.472482938320243e08, impact.at_event[13809], delta=1) + self.assertAlmostEqual(7.076504723057620e10, impact.at_event[12147], delta=1) self.assertEqual(0, impact.at_event[14449]) self.assertEqual(icalc.n_exp_pnt, len(impact.eai_exp)) - self.assertAlmostEqual(1.518553670803242e+08, impact.eai_exp[0], delta=1) - self.assertAlmostEqual(1.373490457046383e+08, impact.eai_exp[25], 6) - self.assertAlmostEqual(1.066837260150042e+08, impact.eai_exp[49], 6) - self.assertAlmostEqual(6.570532945599105e+11, impact.tot_value) - self.assertAlmostEqual(6.512201157564421e+09, impact.aai_agg, 5) + self.assertAlmostEqual(1.518553670803242e08, impact.eai_exp[0], delta=1) + self.assertAlmostEqual(1.373490457046383e08, impact.eai_exp[25], 6) + self.assertAlmostEqual(1.066837260150042e08, impact.eai_exp[49], 6) + self.assertAlmostEqual(6.570532945599105e11, impact.tot_value) + self.assertAlmostEqual(6.512201157564421e09, impact.aai_agg, 5) def test_calc_insured_impact_pass(self): """Test compute insured impact""" exp = ENT.exposures.copy() - exp.gdf['cover'] /= 1e3 - exp.gdf['deductible'] += 1e5 + exp.gdf["cover"] /= 1e3 + exp.gdf["deductible"] += 1e5 icalc = ImpactCalc(exp, ENT.impact_funcs, HAZ) - with self.assertLogs(ILOG, level='INFO') as logs: + with self.assertLogs(ILOG, level="INFO") as logs: impact = icalc.impact() self.assertIn("cover and/or deductible columns detected", logs.output[1]) self.assertEqual(icalc.n_events, len(impact.at_event)) @@ -310,16 +401,16 @@ def test_calc_insured_impact_pass(self): self.assertAlmostEqual(3072092, impact.eai_exp[0], delta=1) self.assertAlmostEqual(2778593, impact.eai_exp[25], delta=1) self.assertAlmostEqual(2716548, impact.eai_exp[49], delta=1) - self.assertAlmostEqual(6.570532945599105e+11, impact.tot_value) + self.assertAlmostEqual(6.570532945599105e11, impact.tot_value) self.assertAlmostEqual(143180396, impact.aai_agg, delta=1) def test_calc_insured_impact_no_cover(self): """Test compute insured impact""" exp = ENT.exposures.copy() - exp.gdf['cover'] /= 1e3 - exp.gdf['deductible'] += 1e5 + exp.gdf["cover"] /= 1e3 + exp.gdf["deductible"] += 1e5 icalc = ImpactCalc(exp, ENT.impact_funcs, HAZ) - with self.assertLogs(ILOG, level='INFO') as logs: + with self.assertLogs(ILOG, level="INFO") as logs: impact = icalc.impact(ignore_cover=True) self.assertIn("cover and/or deductible columns detected", logs.output[1]) self.assertEqual(icalc.n_events, len(impact.at_event)) @@ -332,16 +423,16 @@ def test_calc_insured_impact_no_cover(self): self.assertAlmostEqual(151847975, impact.eai_exp[0], delta=1) self.assertAlmostEqual(137341654, impact.eai_exp[25], delta=1) self.assertAlmostEqual(106676521, impact.eai_exp[49], delta=1) - self.assertAlmostEqual(6.570532945599105e+11, impact.tot_value) + self.assertAlmostEqual(6.570532945599105e11, impact.tot_value) self.assertAlmostEqual(6511839456, impact.aai_agg, delta=1) def test_calc_insured_impact_no_deductible(self): """Test compute insured impact""" exp = ENT.exposures.copy() - exp.gdf['cover'] /= 1e3 - exp.gdf['deductible'] += 1e5 + exp.gdf["cover"] /= 1e3 + exp.gdf["deductible"] += 1e5 icalc = ImpactCalc(exp, ENT.impact_funcs, HAZ) - with self.assertLogs(ILOG, level='INFO') as logs: + with self.assertLogs(ILOG, level="INFO") as logs: impact = icalc.impact(ignore_deductible=True) self.assertIn("cover and/or deductible columns detected", logs.output[1]) self.assertEqual(icalc.n_events, len(impact.at_event)) @@ -354,20 +445,23 @@ def test_calc_insured_impact_no_deductible(self): self.assertAlmostEqual(3072413, impact.eai_exp[0], delta=1) self.assertAlmostEqual(2778914, impact.eai_exp[25], delta=1) self.assertAlmostEqual(2716831, impact.eai_exp[49], delta=1) - self.assertAlmostEqual(6.570532945599105e+11, impact.tot_value) + self.assertAlmostEqual(6.570532945599105e11, impact.tot_value) self.assertAlmostEqual(143195738, impact.aai_agg, delta=1) def test_calc_insured_impact_no_insurance(self): """Test compute insured impact""" exp = ENT.exposures.copy() - exp.gdf['cover'] /= 1e3 - exp.gdf['deductible'] += 1e5 + exp.gdf["cover"] /= 1e3 + exp.gdf["deductible"] += 1e5 icalc = ImpactCalc(exp, ENT.impact_funcs, HAZ) - with self.assertLogs(ILOG, level='INFO') as logs: + with self.assertLogs(ILOG, level="INFO") as logs: impact = icalc.impact(ignore_cover=True, ignore_deductible=True) - self.assertEqual(logs.output, [ - "INFO:climada.engine.impact_calc:Calculating impact for 150 assets (>0) and 14450 events." - ]) + self.assertEqual( + logs.output, + [ + "INFO:climada.engine.impact_calc:Calculating impact for 150 assets (>0) and 14450 events." + ], + ) self.assertEqual(icalc.n_events, len(impact.at_event)) self.assertEqual(0, impact.at_event[0]) self.assertEqual(0, impact.at_event[7225]) @@ -378,25 +472,30 @@ def test_calc_insured_impact_no_insurance(self): self.assertAlmostEqual(151855367, impact.eai_exp[0], delta=1) self.assertAlmostEqual(137349045, impact.eai_exp[25], delta=1) self.assertAlmostEqual(106683726, impact.eai_exp[49], delta=1) - self.assertAlmostEqual(6.570532945599105e+11, impact.tot_value) + self.assertAlmostEqual(6.570532945599105e11, impact.tot_value) self.assertAlmostEqual(6512201157, impact.aai_agg, delta=1) def test_calc_insured_impact_save_mat_pass(self): """Test compute impact with impact matrix""" exp = ENT.exposures.copy() - exp.gdf['cover'] /= 1e3 - exp.gdf['deductible'] += 1e5 + exp.gdf["cover"] /= 1e3 + exp.gdf["deductible"] += 1e5 icalc = ImpactCalc(exp, ENT.impact_funcs, HAZ) impact = icalc.impact(save_mat=True) self.assertIsInstance(impact.imp_mat, sparse.csr_matrix) - self.assertEqual(impact.imp_mat.shape, (HAZ.event_id.size, - ENT.exposures.gdf['value'].size)) + self.assertEqual( + impact.imp_mat.shape, (HAZ.event_id.size, ENT.exposures.gdf["value"].size) + ) np.testing.assert_array_almost_equal_nulp( - np.array(impact.imp_mat.sum(axis=1)).ravel(), impact.at_event, nulp=5) + np.array(impact.imp_mat.sum(axis=1)).ravel(), impact.at_event, nulp=5 + ) np.testing.assert_array_almost_equal_nulp( - np.sum(impact.imp_mat.toarray() * impact.frequency[:, None], axis=0).reshape(-1), - impact.eai_exp) + np.sum( + impact.imp_mat.toarray() * impact.frequency[:, None], axis=0 + ).reshape(-1), + impact.eai_exp, + ) self.assertEqual(icalc.n_events, len(impact.at_event)) self.assertEqual(0, impact.at_event[0]) @@ -408,23 +507,31 @@ def test_calc_insured_impact_save_mat_pass(self): self.assertAlmostEqual(3072092, impact.eai_exp[0], delta=1) self.assertAlmostEqual(2778593, impact.eai_exp[25], delta=1) self.assertAlmostEqual(2716548, impact.eai_exp[49], delta=1) - self.assertAlmostEqual(6.570532945599105e+11, impact.tot_value) + self.assertAlmostEqual(6.570532945599105e11, impact.tot_value) self.assertAlmostEqual(143180396, impact.aai_agg, delta=1) def test_minimal_exp_gdf(self): """Test obtain minimal exposures gdf""" icalc = ImpactCalc(ENT.exposures, ENT.impact_funcs, HAZ) - exp_min_gdf = icalc.minimal_exp_gdf('impf_TC', assign_centroids=True, - ignore_cover=True, ignore_deductible=True) - self.assertSetEqual(set(exp_min_gdf.columns), - set(['value', 'impf_TC', 'centr_TC'])) - np.testing.assert_array_equal(exp_min_gdf['value'], ENT.exposures.gdf['value']) - np.testing.assert_array_equal(exp_min_gdf['impf_TC'], ENT.exposures.gdf['impf_TC']) - np.testing.assert_array_equal(exp_min_gdf['centr_TC'], ENT.exposures.gdf['centr_TC']) + exp_min_gdf = icalc.minimal_exp_gdf( + "impf_TC", assign_centroids=True, ignore_cover=True, ignore_deductible=True + ) + self.assertSetEqual( + set(exp_min_gdf.columns), set(["value", "impf_TC", "centr_TC"]) + ) + np.testing.assert_array_equal(exp_min_gdf["value"], ENT.exposures.gdf["value"]) + np.testing.assert_array_equal( + exp_min_gdf["impf_TC"], ENT.exposures.gdf["impf_TC"] + ) + np.testing.assert_array_equal( + exp_min_gdf["centr_TC"], ENT.exposures.gdf["centr_TC"] + ) def test_stitch_impact_matrix(self): """Check how sparse matrices from a generator are stitched together""" - icalc = ImpactCalc(Exposures({'blank': [1, 2, 3, 4]}), ImpactFuncSet(), Hazard()) + icalc = ImpactCalc( + Exposures({"blank": [1, 2, 3, 4]}), ImpactFuncSet(), Hazard() + ) icalc.hazard.event_id = np.array([1, 2, 3]) icalc._orig_exp_idx = np.array([0, 1, 2, 3]) @@ -449,13 +556,15 @@ def test_apply_deductible_to_mat(self): centr_idx = np.ones(2) impf = None - mat = ImpactCalc.apply_deductible_to_mat(mat, deductible, hazard, centr_idx, impf) + mat = ImpactCalc.apply_deductible_to_mat( + mat, deductible, hazard, centr_idx, impf + ) np.testing.assert_array_equal(mat.toarray(), [[9.0, 20.0], [29.9, 39.5]]) hazard.get_paa.assert_called_once_with(centr_idx, impf) def test_stitch_risk_metrics(self): """Test computing risk metrics from an impact matrix generator""" - icalc = ImpactCalc(Exposures({'blank': [1, 2, 3]}), ImpactFuncSet(), Hazard()) + icalc = ImpactCalc(Exposures({"blank": [1, 2, 3]}), ImpactFuncSet(), Hazard()) icalc.hazard.event_id = np.array([1, 2]) icalc.hazard.frequency = np.array([2, 0.5]) icalc._orig_exp_idx = np.array([0, 1, 2]) @@ -477,20 +586,19 @@ def test_single_exp_zero_mdr(self): haz = Hazard( intensity=sparse.csr_matrix(np.array([[31.5], [19.0]])), event_id=np.arange(2), - event_name=[0,1], + event_name=[0, 1], frequency=np.ones(2) / 2, - fraction=sparse.csr_matrix(np.zeros((2,1))), + fraction=sparse.csr_matrix(np.zeros((2, 1))), date=np.array([0, 1]), centroids=centroids, - haz_type='TC' + haz_type="TC", + ) + exp = Exposures( + {"value": [1.0], "longitude": 28.22, "latitude": -26.17, "impf_TC": 1}, + crs="EPSG:4326", ) - exp = Exposures({'value': [1.], - 'longitude': 28.22, - 'latitude': -26.17, - 'impf_TC': 1}, - crs="EPSG:4326") imp_evt = 0.00250988804927603 - aai_agg = imp_evt/2 + aai_agg = imp_evt / 2 eai_exp = np.array([aai_agg]) at_event = np.array([imp_evt, 0]) exp.set_geometry_points() @@ -500,6 +608,7 @@ def test_single_exp_zero_mdr(self): imp = ImpactCalc(exp, impf_set, haz).impact(save_mat=True) check_impact(self, imp, haz, exp, aai_agg, eai_exp, at_event, at_event) + class TestImpactMatrixCalc(unittest.TestCase): """Verify the computation of the impact matrix""" @@ -546,7 +655,7 @@ class TestImpactMatrixGenerator(unittest.TestCase): """Check the impact matrix generator""" def setUp(self): - """"Initialize mocks""" + """ "Initialize mocks""" # Alter the default config to enable chunking self._max_matrix_size = CONFIG.max_matrix_size.int() CONFIG.max_matrix_size = Config(val=1, root=CONFIG) @@ -588,7 +697,10 @@ def test_selection(self): # Verify calls self.impfset.get_func.assert_has_calls( - [call(haz_type="haz_type", fun_id=0), call(haz_type="haz_type", fun_id=11),] + [ + call(haz_type="haz_type", fun_id=0), + call(haz_type="haz_type", fun_id=11), + ] ) self.icalc.impact_matrix.assert_has_calls( [ @@ -629,7 +741,9 @@ def test_chunk_error(self): def test_empty_exp(self): """imp_mat_gen should return an empty iterator for an empty dataframe""" - exp_gdf = gpd.GeoDataFrame({"impact_functions": [], "centr_col": [], "value": []}) + exp_gdf = gpd.GeoDataFrame( + {"impact_functions": [], "centr_col": [], "value": []} + ) self.assertEqual( [], list(self.icalc.imp_mat_gen(exp_gdf=exp_gdf, impf_col="impact_functions")), @@ -638,8 +752,9 @@ def test_empty_exp(self): class TestInsuredImpactMatrixGenerator(unittest.TestCase): """Verify the computation of the insured impact matrix""" + def setUp(self): - """"Initialize mocks""" + """ "Initialize mocks""" hazard = create_autospec(HAZ) self.icalc = ImpactCalc(ENT.exposures, ENT.impact_funcs, hazard) self.icalc._orig_exp_idx = np.array([0, 1]) @@ -656,8 +771,13 @@ def setUp(self): def test_insured_mat_gen(self): """Test insured impact matrix generator""" exp_gdf = gpd.GeoDataFrame( - {"impact_functions": [0, 2], "centr_col": [0, 10], "value": [1.0, 2.0], - "deductible": [10.0, 20.0], "cover": [1.0, 100.0]} + { + "impact_functions": [0, 2], + "centr_col": [0, 10], + "value": [1.0, 2.0], + "deductible": [10.0, 20.0], + "cover": [1.0, 100.0], + } ) imp_mat_gen = ((i, np.array([i])) for i in range(2)) gen = self.icalc.insured_mat_gen(imp_mat_gen, exp_gdf, "impact_functions") @@ -672,7 +792,10 @@ def test_insured_mat_gen(self): # Check if correct impf_id was selected self.icalc.impfset.get_func.assert_has_calls( - [call(haz_type="haz_type", fun_id=0), call(haz_type="haz_type", fun_id=2),] + [ + call(haz_type="haz_type", fun_id=0), + call(haz_type="haz_type", fun_id=2), + ] ) # Check if correct deductible and cent_idx were selected self.icalc.apply_deductible_to_mat.assert_has_calls( @@ -692,6 +815,7 @@ def test_insured_mat_gen(self): class TestImpactMatrix(unittest.TestCase): """Test Impact matrix computation""" + def setUp(self): """Initialize mock""" hazard = create_autospec(HAZ) diff --git a/climada/engine/test/test_impact_data.py b/climada/engine/test/test_impact_data.py index 5391a3b14..05ad8ea41 100644 --- a/climada/engine/test/test_impact_data.py +++ b/climada/engine/test/test_impact_data.py @@ -18,130 +18,152 @@ Test Impact class. """ + import unittest -import numpy as np import warnings -from climada import CONFIG -from climada.util.constants import DEMO_DIR +import numpy as np import climada.engine.impact_data as im_d +from climada import CONFIG +from climada.util.constants import DEMO_DIR DATA_DIR = CONFIG.engine.test_data.dir() -EMDAT_TEST_CSV = DATA_DIR.joinpath('emdat_testdata_BGD_USA_1970-2017.csv') -EMDAT_TEST_CSV_FAKE = DATA_DIR.joinpath('emdat_testdata_fake_2007-2011.csv') -EMDAT_2020_CSV_DEMO = DEMO_DIR.joinpath('demo_emdat_impact_data_2020.csv') +EMDAT_TEST_CSV = DATA_DIR.joinpath("emdat_testdata_BGD_USA_1970-2017.csv") +EMDAT_TEST_CSV_FAKE = DATA_DIR.joinpath("emdat_testdata_fake_2007-2011.csv") +EMDAT_2020_CSV_DEMO = DEMO_DIR.joinpath("demo_emdat_impact_data_2020.csv") + class TestEmdatImport(unittest.TestCase): """Test import of EM-DAT data (as CSV) for impact data analysis""" def test_clean_emdat_df_2018_load(self): """load selected sub sample from CSV, return DataFrame. - here: from 2018 EM-DAT version to 2018 target_version""" + here: from 2018 EM-DAT version to 2018 target_version""" - df = im_d.clean_emdat_df(EMDAT_TEST_CSV, countries=['Bangladesh'], hazard='TC', - year_range=[2000, 2017], target_version=2018) - self.assertIn('ISO', df.columns) - self.assertIn('Year', df.columns) - iso3 = list(df['ISO'].unique()) - years = np.arange(df['Year'].min(), df['Year'].max() + 1) + df = im_d.clean_emdat_df( + EMDAT_TEST_CSV, + countries=["Bangladesh"], + hazard="TC", + year_range=[2000, 2017], + target_version=2018, + ) + self.assertIn("ISO", df.columns) + self.assertIn("Year", df.columns) + iso3 = list(df["ISO"].unique()) + years = np.arange(df["Year"].min(), df["Year"].max() + 1) - self.assertListEqual(['BGD'], iso3) + self.assertListEqual(["BGD"], iso3) self.assertEqual(18, len(years)) self.assertEqual(2017, years[-1]) self.assertEqual(2010, years[10]) self.assertEqual(450, df.size) - self.assertEqual(8978541, df['Total affected'].max()) - self.assertIn('Tropical cyclone', list(df['Disaster subtype'])) - self.assertFalse(False in list(df['Disaster subtype'] == 'Tropical cyclone')) - self.assertFalse('Flood' in list(df['Disaster subtype'])) + self.assertEqual(8978541, df["Total affected"].max()) + self.assertIn("Tropical cyclone", list(df["Disaster subtype"])) + self.assertFalse(False in list(df["Disaster subtype"] == "Tropical cyclone")) + self.assertFalse("Flood" in list(df["Disaster subtype"])) def test_emdat_df_2018_to_2020_load(self): """load selected sub sample from CSV, return DataFrame - here: from 2018 EM-DAT version to 2020 target_version""" - df = im_d.clean_emdat_df(EMDAT_TEST_CSV, countries=['USA'], hazard='TC', - year_range=[2000, 2017], target_version=2020) - self.assertIn('ISO', df.columns) - self.assertIn('Year', df.columns) - iso3 = list(df['ISO'].unique()) - years = np.arange(df['Year'].min(), df['Year'].max() + 1) - self.assertListEqual(['USA'], iso3) + here: from 2018 EM-DAT version to 2020 target_version""" + df = im_d.clean_emdat_df( + EMDAT_TEST_CSV, + countries=["USA"], + hazard="TC", + year_range=[2000, 2017], + target_version=2020, + ) + self.assertIn("ISO", df.columns) + self.assertIn("Year", df.columns) + iso3 = list(df["ISO"].unique()) + years = np.arange(df["Year"].min(), df["Year"].max() + 1) + self.assertListEqual(["USA"], iso3) self.assertEqual(18, len(years)) self.assertEqual(2017, years[-1]) self.assertEqual(2010, years[10]) self.assertEqual(1634, df.size) self.assertEqual(60000000, df["Insured Damages ('000 US$)"].max()) - self.assertIn('Tropical cyclone', list(df['Disaster Subtype'])) - self.assertFalse(False in list(df['Disaster Subtype'] == 'Tropical cyclone')) - self.assertFalse('Flood' in list(df['Disaster Subtype'])) + self.assertIn("Tropical cyclone", list(df["Disaster Subtype"])) + self.assertFalse(False in list(df["Disaster Subtype"] == "Tropical cyclone")) + self.assertFalse("Flood" in list(df["Disaster Subtype"])) def test_emdat_df_2020_load(self): """load selected sub sample from CSV, return DataFrame - here: from 2020 EM-DAT version to 2020 target_version""" - df = im_d.clean_emdat_df(EMDAT_2020_CSV_DEMO, countries=['THA', 'Viet Nam'], hazard='TC', - year_range=[2005, 2008], target_version=2020) - self.assertIn('ISO', df.columns) - self.assertIn('Year', df.columns) - iso3 = list(df['ISO'].unique()) - years = np.arange(df['Year'].min(), df['Year'].max() + 1) - self.assertIn('THA', iso3) - self.assertIn('VNM', iso3) - self.assertNotIn('USA', iso3) - self.assertNotIn('TWN', iso3) + here: from 2020 EM-DAT version to 2020 target_version""" + df = im_d.clean_emdat_df( + EMDAT_2020_CSV_DEMO, + countries=["THA", "Viet Nam"], + hazard="TC", + year_range=[2005, 2008], + target_version=2020, + ) + self.assertIn("ISO", df.columns) + self.assertIn("Year", df.columns) + iso3 = list(df["ISO"].unique()) + years = np.arange(df["Year"].min(), df["Year"].max() + 1) + self.assertIn("THA", iso3) + self.assertIn("VNM", iso3) + self.assertNotIn("USA", iso3) + self.assertNotIn("TWN", iso3) self.assertEqual(4, len(years)) self.assertEqual(2008, years[-1]) self.assertEqual(2006, years[1]) self.assertEqual(43, df.columns.size) self.assertEqual(688, df.size) self.assertEqual(624000, df["Total Damages ('000 US$)"].max()) - self.assertIn('Tropical cyclone', list(df['Disaster Subtype'])) - self.assertFalse(False in list(df['Disaster Subtype'] == 'Tropical cyclone')) - self.assertFalse('Flood' in list(df['Disaster Subtype'])) + self.assertIn("Tropical cyclone", list(df["Disaster Subtype"])) + self.assertFalse(False in list(df["Disaster Subtype"] == "Tropical cyclone")) + self.assertFalse("Flood" in list(df["Disaster Subtype"])) + class TestEmdatProcessing(unittest.TestCase): def test_emdat_impact_event_2018(self): """test emdat_impact_event event impact data extraction, version 2018""" - df = im_d.emdat_impact_event(EMDAT_TEST_CSV, countries=['Bangladesh', 'USA'], - hazard='Drought', year_range=[2015, 2017], - reference_year=2017, version=2018) + df = im_d.emdat_impact_event( + EMDAT_TEST_CSV, + countries=["Bangladesh", "USA"], + hazard="Drought", + year_range=[2015, 2017], + reference_year=2017, + version=2018, + ) self.assertEqual(46, df.size) - self.assertEqual('2017-9550', df['Disaster No.'][1]) - self.assertEqual(df["Total damage ('000 US$)"][0], - df["impact"][0] * 1e-3) - self.assertEqual(df["impact_scaled"][1], - df["impact"][1]) + self.assertEqual("2017-9550", df["Disaster No."][1]) + self.assertEqual(df["Total damage ('000 US$)"][0], df["impact"][0] * 1e-3) + self.assertEqual(df["impact_scaled"][1], df["impact"][1]) self.assertEqual(df["Total damage ('000 US$)"][1], 2500000) self.assertEqual(df["Total damage ('000 US$)"][0], 1800000) # scaled impact value might change if worldbank input data changes, # check magnitude and adjust if test failes in the following 1 lines: - self.assertAlmostEqual(df["impact_scaled"][0] * 1e-7, - 192.7868, places=0) - self.assertIn('USA', list(df['ISO'])) - self.assertIn('Drought', list(df['Disaster type'])) - self.assertEqual(2017, df['reference_year'].min()) + self.assertAlmostEqual(df["impact_scaled"][0] * 1e-7, 192.7868, places=0) + self.assertIn("USA", list(df["ISO"])) + self.assertIn("Drought", list(df["Disaster type"])) + self.assertEqual(2017, df["reference_year"].min()) def test_emdat_impact_event_2020(self): """test emdat_impact_event event impact data extraction, version 2020""" - df = im_d.emdat_impact_event(EMDAT_TEST_CSV, countries=['Bangladesh', 'USA'], - hazard='Drought', year_range=[2015, 2017], - reference_year=2000, version=2020) + df = im_d.emdat_impact_event( + EMDAT_TEST_CSV, + countries=["Bangladesh", "USA"], + hazard="Drought", + year_range=[2015, 2017], + reference_year=2000, + version=2020, + ) self.assertEqual(96, df.size) - self.assertEqual('2017-9550', df['Dis No'][1]) - self.assertEqual(df["Total Damages ('000 US$)"][0], - df["impact"][0] * 1e-3) - self.assertNotEqual(df["impact_scaled"][1], - df["impact"][1]) + self.assertEqual("2017-9550", df["Dis No"][1]) + self.assertEqual(df["Total Damages ('000 US$)"][0], df["impact"][0] * 1e-3) + self.assertNotEqual(df["impact_scaled"][1], df["impact"][1]) self.assertEqual(df["Total Damages ('000 US$)"][1], 2500000) self.assertEqual(df["Total Damages ('000 US$)"][0], 1800000) # scaled impact value might change if worldbank input data changes, # check magnitude and adjust if test failes in the following line: - self.assertAlmostEqual(df["impact_scaled"][0] * 1e-9, - 1.012, places=0) - self.assertIn('USA', list(df['ISO'])) - self.assertIn('Drought', list(df['Disaster Type'])) - self.assertEqual(2000, df['reference_year'].min()) + self.assertAlmostEqual(df["impact_scaled"][0] * 1e-9, 1.012, places=0) + self.assertIn("USA", list(df["ISO"])) + self.assertIn("Drought", list(df["Disaster Type"])) + self.assertEqual(2000, df["reference_year"].min()) def test_emdat_impact_yearlysum_no_futurewarning(self): """Ensure that no FutureWarning about `DataFrame.append` being deprecated is issued""" @@ -159,32 +181,39 @@ def test_emdat_impact_yearlysum_no_futurewarning(self): def test_emdat_affected_yearlysum(self): """test emdat_impact_yearlysum yearly impact data extraction""" - df = im_d.emdat_impact_yearlysum(EMDAT_TEST_CSV, countries=['Bangladesh', 'USA'], - hazard='Flood', year_range=(2015, 2017), - reference_year=None, imp_str="Total Affected") + df = im_d.emdat_impact_yearlysum( + EMDAT_TEST_CSV, + countries=["Bangladesh", "USA"], + hazard="Flood", + year_range=(2015, 2017), + reference_year=None, + imp_str="Total Affected", + ) self.assertEqual(36, df.size) self.assertEqual(df["impact"][1], 91000) - self.assertEqual(df['impact'].sum(), 11517946) + self.assertEqual(df["impact"].sum(), 11517946) self.assertEqual(df["year"][5], 2017) - self.assertIn('USA', list(df['ISO'])) - self.assertIn('BGD', list(df['ISO'])) + self.assertIn("USA", list(df["ISO"])) + self.assertIn("BGD", list(df["ISO"])) def test_emdat_countries_by_hazard_2020_pass(self): """test to get list of countries impacted by tropical cyclones from 2000 to 2019""" - iso3_codes, country_names = im_d.emdat_countries_by_hazard(EMDAT_2020_CSV_DEMO, - hazard='TC', - year_range=(2000, 2019)) + iso3_codes, country_names = im_d.emdat_countries_by_hazard( + EMDAT_2020_CSV_DEMO, hazard="TC", year_range=(2000, 2019) + ) - self.assertIn('Réunion', country_names) - self.assertEqual('Sri Lanka', country_names[4]) - self.assertEqual('BLZ', iso3_codes[3]) + self.assertIn("Réunion", country_names) + self.assertEqual("Sri Lanka", country_names[4]) + self.assertEqual("BLZ", iso3_codes[3]) self.assertEqual(len(country_names), len(iso3_codes)) self.assertEqual(100, len(iso3_codes)) + class TestEmdatToImpact(unittest.TestCase): """Test import of EM-DAT data (as CSV) to Impact-instance (CLIMADA)""" + def test_emdat_to_impact_all_countries_pass(self): """test import EM-DAT to Impact() for all countries in CSV""" # ===================================================================== @@ -194,37 +223,44 @@ def test_emdat_to_impact_all_countries_pass(self): # ===================================================================== # file 1: version 2020 - _impact_emdat2020, countries2020 = im_d.emdat_to_impact(EMDAT_2020_CSV_DEMO, 'TC') + _impact_emdat2020, countries2020 = im_d.emdat_to_impact( + EMDAT_2020_CSV_DEMO, "TC" + ) # file 2: version 2018 - impact_emdat, countries = im_d.emdat_to_impact(EMDAT_TEST_CSV, 'TC') + impact_emdat, countries = im_d.emdat_to_impact(EMDAT_TEST_CSV, "TC") self.assertEqual(142, impact_emdat.event_id.size) self.assertEqual(141, impact_emdat.event_id[-1]) self.assertEqual(0, impact_emdat.event_id[0]) - self.assertIn('2013-0138', impact_emdat.event_name) - self.assertEqual('USA', countries[0]) - self.assertEqual('BGD', countries[1]) + self.assertIn("2013-0138", impact_emdat.event_name) + self.assertEqual("USA", countries[0]) + self.assertEqual("BGD", countries[1]) self.assertEqual(len(countries), len(impact_emdat.eai_exp)) self.assertEqual(2, len(impact_emdat.eai_exp)) self.assertEqual(impact_emdat.date.size, impact_emdat.frequency.size) - self.assertAlmostEqual(555861710000 * 1e-5, np.sum(impact_emdat.at_event) * 1e-5, places=0) - self.assertAlmostEqual(0.0208333333333, np.unique(impact_emdat.frequency)[0], places=7) + self.assertAlmostEqual( + 555861710000 * 1e-5, np.sum(impact_emdat.at_event) * 1e-5, places=0 + ) + self.assertAlmostEqual( + 0.0208333333333, np.unique(impact_emdat.frequency)[0], places=7 + ) self.assertAlmostEqual(11580452291.666666, impact_emdat.aai_agg, places=0) self.assertAlmostEqual(109456249.99999999, impact_emdat.eai_exp[1], places=0) self.assertAlmostEqual(11470996041.666666, impact_emdat.eai_exp[0], places=0) - self.assertIn('SPI', countries2020) - self.assertNotIn('SPI', countries) + self.assertIn("SPI", countries2020) + self.assertNotIn("SPI", countries) def test_emdat_to_impact_fakedata(self): """test import TC EM-DAT to Impact() for all countries in CSV""" - impact_emdat, countries = im_d.emdat_to_impact(EMDAT_TEST_CSV_FAKE, 'FL', - hazard_type_emdat='Flood') + impact_emdat, countries = im_d.emdat_to_impact( + EMDAT_TEST_CSV_FAKE, "FL", hazard_type_emdat="Flood" + ) self.assertEqual(6, impact_emdat.event_id.size) self.assertEqual(5, impact_emdat.event_id[-1]) self.assertEqual(0, impact_emdat.event_id[0]) - self.assertIn('2008-0001', impact_emdat.event_name) - self.assertEqual('CHE', countries[0]) - self.assertEqual('DEU', countries[1]) + self.assertIn("2008-0001", impact_emdat.event_name) + self.assertEqual("CHE", countries[0]) + self.assertEqual("DEU", countries[1]) self.assertEqual(len(countries), len(impact_emdat.eai_exp)) self.assertEqual(2, len(impact_emdat.eai_exp)) self.assertAlmostEqual(11000000.0, np.sum(impact_emdat.at_event)) @@ -235,24 +271,34 @@ def test_emdat_to_impact_fakedata(self): def test_emdat_to_impact_2020format(self): """test import TC EM-DAT to Impact() from new 2020 EMDAT format CSV""" - df1 = im_d.clean_emdat_df(EMDAT_2020_CSV_DEMO, hazard='TC', - countries='PHL', year_range=(2013, 2013)) - df2 = im_d.emdat_impact_event(EMDAT_2020_CSV_DEMO, countries='PHL', hazard='TC', - year_range=(2013, 2013), reference_year=None, - imp_str='Total Affected') - impact_emdat, _countries = im_d.emdat_to_impact(EMDAT_2020_CSV_DEMO, 'TC', - countries='PHL', - year_range=(2013, 2013), - imp_str="Total Affected") + df1 = im_d.clean_emdat_df( + EMDAT_2020_CSV_DEMO, hazard="TC", countries="PHL", year_range=(2013, 2013) + ) + df2 = im_d.emdat_impact_event( + EMDAT_2020_CSV_DEMO, + countries="PHL", + hazard="TC", + year_range=(2013, 2013), + reference_year=None, + imp_str="Total Affected", + ) + impact_emdat, _countries = im_d.emdat_to_impact( + EMDAT_2020_CSV_DEMO, + "TC", + countries="PHL", + year_range=(2013, 2013), + imp_str="Total Affected", + ) # compare number of entries for all steps: self.assertEqual(len(df1.index), len(df2.index)) self.assertEqual(impact_emdat.event_id.size, len(df1.index)) # TC events in EM-DAT in the Philipppines, 2013: self.assertEqual(8, impact_emdat.event_id.size) # People affected by TC events in the Philippines in 2013 (AAI): - self.assertAlmostEqual(17944571., impact_emdat.aai_agg, places=0) + self.assertAlmostEqual(17944571.0, impact_emdat.aai_agg, places=0) # People affected by Typhoon Hayian in the Philippines: - self.assertAlmostEqual(1.610687e+07, impact_emdat.at_event[4], places=0) + self.assertAlmostEqual(1.610687e07, impact_emdat.at_event[4], places=0) + # Execute Tests if __name__ == "__main__": diff --git a/climada/engine/unsequa/__init__.py b/climada/engine/unsequa/__init__.py index 7241979eb..84bf9d7a3 100755 --- a/climada/engine/unsequa/__init__.py +++ b/climada/engine/unsequa/__init__.py @@ -17,9 +17,9 @@ --- """ -from .unc_output import * -from .input_var import * from .calc_base import * -from .calc_impact import * from .calc_cost_benefit import * from .calc_delta_climate import * +from .calc_impact import * +from .input_var import * +from .unc_output import * diff --git a/climada/engine/unsequa/calc_base.py b/climada/engine/unsequa/calc_base.py index 10c302959..fd7f73f89 100644 --- a/climada/engine/unsequa/calc_base.py +++ b/climada/engine/unsequa/calc_base.py @@ -19,22 +19,21 @@ Define Calc (uncertainty calculate) class. """ -import logging import copy -import itertools - import datetime as dt +import itertools +import logging -import pandas as pd import numpy as np +import pandas as pd -from climada.util.value_representation import sig_dig as u_sig_dig from climada.engine.unsequa import UncOutput +from climada.util.value_representation import sig_dig as u_sig_dig LOGGER = logging.getLogger(__name__) -class Calc(): +class Calc: """ Base class for uncertainty quantification @@ -107,13 +106,14 @@ def check_distr(self): f"The input parameter {input_param_name}" " is shared among two input variables with" " different distributions." - ) + ) LOGGER.warning( "\n\nThe input parameter %s is shared " "among at least 2 input variables. Their uncertainty is " "thus computed with the same samples for this " - "input paramter.\n\n", input_param_name - ) + "input paramter.\n\n", + input_param_name, + ) distr_dict[input_param_name] = input_param_func return True @@ -171,20 +171,23 @@ def est_comp_time(self, n_samples, time_one_run, processes=None): """ time_one_run = u_sig_dig(time_one_run, n_sig_dig=3) if time_one_run > 5: - LOGGER.warning("Computation time for one set of parameters is " + LOGGER.warning( + "Computation time for one set of parameters is " "%.2fs. This is rather long." "Potential reasons: InputVars are loading data, centroids have " "been assigned to exp before defining input_var, ..." "\n If computation cannot be reduced, consider using" - " a surrogate model https://www.uqlab.com/", time_one_run) + " a surrogate model https://www.uqlab.com/", + time_one_run, + ) total_time = n_samples * time_one_run / processes - LOGGER.info("\n\nEstimated computaion time: %s\n", - dt.timedelta(seconds=total_time)) + LOGGER.info( + "\n\nEstimated computaion time: %s\n", dt.timedelta(seconds=total_time) + ) return total_time - def make_sample(self, N, sampling_method='saltelli', - sampling_kwargs = None): + def make_sample(self, N, sampling_method="saltelli", sampling_kwargs=None): """ Make samples of the input variables @@ -238,39 +241,37 @@ def make_sample(self, N, sampling_method='saltelli', param_labels = list(self.distr_dict.keys()) problem_sa = { - 'num_vars' : len(param_labels), - 'names' : param_labels, - 'bounds' : [[0, 1]]*len(param_labels) - } - #for the ff sampler, no value of N is needed. For API consistency the user - #must input a value that is ignored and a warning is given. - if sampling_method == 'ff': - LOGGER.warning("You are using the 'ff' sampler which does not require " - "a value for N. The entered N value will be ignored" - "in the sampling process.") - uniform_base_sample = self._make_uniform_base_sample(N, problem_sa, - sampling_method, - sampling_kwargs) + "num_vars": len(param_labels), + "names": param_labels, + "bounds": [[0, 1]] * len(param_labels), + } + # for the ff sampler, no value of N is needed. For API consistency the user + # must input a value that is ignored and a warning is given. + if sampling_method == "ff": + LOGGER.warning( + "You are using the 'ff' sampler which does not require " + "a value for N. The entered N value will be ignored" + "in the sampling process." + ) + uniform_base_sample = self._make_uniform_base_sample( + N, problem_sa, sampling_method, sampling_kwargs + ) df_samples = pd.DataFrame(uniform_base_sample, columns=param_labels) for param in list(df_samples): - df_samples[param] = df_samples[param].apply( - self.distr_dict[param].ppf - ) + df_samples[param] = df_samples[param].apply(self.distr_dict[param].ppf) - sampling_kwargs = { - key: str(val) - for key, val in sampling_kwargs.items() - } - df_samples.attrs['sampling_method'] = sampling_method - df_samples.attrs['sampling_kwargs'] = tuple(sampling_kwargs.items()) + sampling_kwargs = {key: str(val) for key, val in sampling_kwargs.items()} + df_samples.attrs["sampling_method"] = sampling_method + df_samples.attrs["sampling_kwargs"] = tuple(sampling_kwargs.items()) unc_output = UncOutput(df_samples) LOGGER.info("Effective number of made samples: %d", unc_output.n_samples) return unc_output - def _make_uniform_base_sample(self, N, problem_sa, sampling_method, - sampling_kwargs): + def _make_uniform_base_sample( + self, N, problem_sa, sampling_method, sampling_kwargs + ): """ Make a uniform distributed [0,1] sample for the defined uncertainty parameters (self.param_labels) with the chosen @@ -304,29 +305,37 @@ def _make_uniform_base_sample(self, N, problem_sa, sampling_method, if sampling_kwargs is None: sampling_kwargs = {} - #Import the named submodule from the SALib sample module - #From the workings of __import__ the use of 'from_list' is necessary - #c.f. https://stackoverflow.com/questions/2724260/why-does-pythons-import-require-fromlist - import importlib # pylint: disable=import-outside-toplevel - salib_sampling_method = importlib.import_module(f'SALib.sample.{sampling_method}') + # Import the named submodule from the SALib sample module + # From the workings of __import__ the use of 'from_list' is necessary + # c.f. https://stackoverflow.com/questions/2724260/why-does-pythons-import-require-fromlist + import importlib # pylint: disable=import-outside-toplevel - if sampling_method == 'ff': #the ff sampling has a fixed sample size and - #does not require the N parameter - if problem_sa['num_vars'] & (problem_sa['num_vars'] - 1) != 0: - raise ValueError("The number of parameters must be a power of 2. " - "To use the ff sampling method, you can generate " - "dummy parameters to overcome this limitation." - " See https://salib.readthedocs.io/en/latest/api.html") + salib_sampling_method = importlib.import_module( + f"SALib.sample.{sampling_method}" + ) + + if sampling_method == "ff": # the ff sampling has a fixed sample size and + # does not require the N parameter + if problem_sa["num_vars"] & (problem_sa["num_vars"] - 1) != 0: + raise ValueError( + "The number of parameters must be a power of 2. " + "To use the ff sampling method, you can generate " + "dummy parameters to overcome this limitation." + " See https://salib.readthedocs.io/en/latest/api.html" + ) sample_uniform = salib_sampling_method.sample( - problem = problem_sa, **sampling_kwargs) + problem=problem_sa, **sampling_kwargs + ) else: sample_uniform = salib_sampling_method.sample( - problem = problem_sa, N = N, **sampling_kwargs) + problem=problem_sa, N=N, **sampling_kwargs + ) return sample_uniform - def sensitivity(self, unc_output, sensitivity_method = 'sobol', - sensitivity_kwargs = None): + def sensitivity( + self, unc_output, sensitivity_method="sobol", sensitivity_kwargs=None + ): """ Compute the sensitivity indices using SALib. @@ -378,34 +387,38 @@ def sensitivity(self, unc_output, sensitivity_method = 'sobol', if sensitivity_kwargs is None: sensitivity_kwargs = {} - #Check compatibility of sampling and sensitivity methods + # Check compatibility of sampling and sensitivity methods unc_output.check_salib(sensitivity_method) - #Import the named submodule from the SALib analyse module - #From the workings of __import__ the use of 'from_list' is necessary - #c.f. https://stackoverflow.com/questions/2724260/why-does-pythons-import-require-fromlist + # Import the named submodule from the SALib analyse module + # From the workings of __import__ the use of 'from_list' is necessary + # c.f. https://stackoverflow.com/questions/2724260/why-does-pythons-import-require-fromlist method = getattr( - __import__('SALib.analyze', - fromlist=[sensitivity_method] - ), - sensitivity_method - ) + __import__("SALib.analyze", fromlist=[sensitivity_method]), + sensitivity_method, + ) sens_output = copy.deepcopy(unc_output) - #Certain Salib method required model input (X) and output (Y), others - #need only ouput (Y) - salib_kwargs = method.analyze.__code__.co_varnames # obtain all kwargs of the salib method - X = unc_output.samples_df.to_numpy() if 'X' in salib_kwargs else None + # Certain Salib method required model input (X) and output (Y), others + # need only ouput (Y) + salib_kwargs = ( + method.analyze.__code__.co_varnames + ) # obtain all kwargs of the salib method + X = unc_output.samples_df.to_numpy() if "X" in salib_kwargs else None for metric_name in self._metric_names: unc_df = unc_output.get_unc_df(metric_name) - sens_df = _calc_sens_df(method, unc_output.problem_sa, sensitivity_kwargs, - unc_output.param_labels, X, unc_df) + sens_df = _calc_sens_df( + method, + unc_output.problem_sa, + sensitivity_kwargs, + unc_output.param_labels, + X, + unc_df, + ) sens_output.set_sens_df(metric_name, sens_df) - sensitivity_kwargs = { - key: str(val) - for key, val in sensitivity_kwargs.items()} + sensitivity_kwargs = {key: str(val) for key, val in sensitivity_kwargs.items()} sens_output.sensitivity_method = sensitivity_method sens_output.sensitivity_kwargs = tuple(sensitivity_kwargs.items()) @@ -434,9 +447,7 @@ def _multiprocess_chunksize(samples_df, processes): int the number of samples in each chunk """ - return np.ceil( - samples_df.shape[0] / processes - ).astype(int) + return np.ceil(samples_df.shape[0] / processes).astype(int) def _transpose_chunked_data(metrics): @@ -463,10 +474,7 @@ def _transpose_chunked_data(metrics): calc_cost_benefits._map_costben_calc map for cost benefit uncertainty """ - return [ - list(itertools.chain.from_iterable(x)) - for x in zip(*metrics) - ] + return [list(itertools.chain.from_iterable(x)) for x in zip(*metrics)] def _sample_parallel_iterator(samples, chunksize, **kwargs): @@ -487,17 +495,18 @@ def _sample_parallel_iterator(samples, chunksize, **kwargs): suitable for methods _map_impact_calc and _map_costben_calc """ + def _chunker(df, size): """ Divide the dataframe into chunks of size number of lines """ for pos in range(0, len(df), size): - yield df.iloc[pos:pos + size] + yield df.iloc[pos : pos + size] return zip( _chunker(samples, chunksize), - *(itertools.repeat(item) for item in kwargs.values()) - ) + *(itertools.repeat(item) for item in kwargs.values()), + ) def _calc_sens_df(method, problem_sa, sensitivity_kwargs, param_labels, X, unc_df): @@ -525,85 +534,104 @@ def _calc_sens_df(method, problem_sa, sensitivity_kwargs, param_labels, X, unc_d """ sens_first_order_dict = {} sens_second_order_dict = {} - for (submetric_name, metric_unc) in unc_df.items(): + for submetric_name, metric_unc in unc_df.items(): Y = metric_unc.to_numpy() if X is not None: - sens_indices = method.analyze(problem_sa, X, Y, - **sensitivity_kwargs) + sens_indices = method.analyze(problem_sa, X, Y, **sensitivity_kwargs) else: - sens_indices = method.analyze(problem_sa, Y, - **sensitivity_kwargs) - #refactor incoherent SALib output + sens_indices = method.analyze(problem_sa, Y, **sensitivity_kwargs) + # refactor incoherent SALib output nparams = len(param_labels) - if method.__name__[-3:] == '.ff': #ff method - if sensitivity_kwargs['second_order']: - #parse interaction terms of sens_indices to a square matrix - #to ensure consistency with unsequa - interaction_names = sens_indices.pop('interaction_names') + if method.__name__[-3:] == ".ff": # ff method + if sensitivity_kwargs["second_order"]: + # parse interaction terms of sens_indices to a square matrix + # to ensure consistency with unsequa + interaction_names = sens_indices.pop("interaction_names") interactions = np.full((nparams, nparams), np.nan) - #loop over interaction names and extract each param pair, - #then match to the corresponding param from param_labels - for i,interaction_name in enumerate(interaction_names): - interactions[param_labels.index(interaction_name[0]), - param_labels.index(interaction_name[1])] = sens_indices['IE'][i] - sens_indices['IE'] = interactions - - if method.__name__[-5:] == '.hdmr': #hdmr method - #first, remove variables that are incompatible with unsequa output - keys_to_remove = ['Em','Term','select', 'RT', 'Y_em', 'idx', 'X', 'Y'] - sens_indices = {k: v for k, v in sens_indices.items() - if k not in keys_to_remove} - names = sens_indices.pop('names') #names of terms - - #second, refactor to 2D + # loop over interaction names and extract each param pair, + # then match to the corresponding param from param_labels + for i, interaction_name in enumerate(interaction_names): + interactions[ + param_labels.index(interaction_name[0]), + param_labels.index(interaction_name[1]), + ] = sens_indices["IE"][i] + sens_indices["IE"] = interactions + + if method.__name__[-5:] == ".hdmr": # hdmr method + # first, remove variables that are incompatible with unsequa output + keys_to_remove = ["Em", "Term", "select", "RT", "Y_em", "idx", "X", "Y"] + sens_indices = { + k: v for k, v in sens_indices.items() if k not in keys_to_remove + } + names = sens_indices.pop("names") # names of terms + + # second, refactor to 2D for si, si_val_array in sens_indices.items(): - if (np.array(si_val_array).ndim == 1 and #for everything that is 1d and has - np.array(si_val_array).size > nparams): #lentgh > n params, refactor to 2D + if ( + np.array(si_val_array).ndim + == 1 # for everything that is 1d and has + and np.array(si_val_array).size > nparams + ): # lentgh > n params, refactor to 2D si_new_array = np.full((nparams, nparams), np.nan) - np.fill_diagonal(si_new_array, si_val_array[0:nparams]) #simple terms go on diag - for i,interaction_name in enumerate(names[nparams:]): - t1, t2 = interaction_name.split('/') #interaction terms - si_new_array[param_labels.index(t1), - param_labels.index(t2)] = si_val_array[nparams+i] + np.fill_diagonal( + si_new_array, si_val_array[0:nparams] + ) # simple terms go on diag + for i, interaction_name in enumerate(names[nparams:]): + t1, t2 = interaction_name.split("/") # interaction terms + si_new_array[param_labels.index(t1), param_labels.index(t2)] = ( + si_val_array[nparams + i] + ) sens_indices[si] = si_new_array - - sens_first_order = np.array([ - np.array(si_val_array) - for si, si_val_array in sens_indices.items() - if (np.array(si_val_array).ndim == 1 # dirty trick due to Salib incoherent output - and si!='names' - and np.array(si_val_array).size == len(param_labels)) - ]).ravel() + sens_first_order = np.array( + [ + np.array(si_val_array) + for si, si_val_array in sens_indices.items() + if ( + np.array(si_val_array).ndim + == 1 # dirty trick due to Salib incoherent output + and si != "names" + and np.array(si_val_array).size == len(param_labels) + ) + ] + ).ravel() sens_first_order_dict[submetric_name] = sens_first_order - sens_second_order = np.array([ - np.array(si_val_array) - for si_val_array in sens_indices.values() - if np.array(si_val_array).ndim == 2 - ]).ravel() + sens_second_order = np.array( + [ + np.array(si_val_array) + for si_val_array in sens_indices.values() + if np.array(si_val_array).ndim == 2 + ] + ).ravel() sens_second_order_dict[submetric_name] = sens_second_order sens_first_order_df = pd.DataFrame(sens_first_order_dict, dtype=np.number) if not sens_first_order_df.empty: - si_names_first_order, param_names_first_order = _si_param_first(param_labels, sens_indices) - sens_first_order_df.insert(0, 'si', si_names_first_order) - sens_first_order_df.insert(1, 'param', param_names_first_order) - sens_first_order_df.insert(2, 'param2', None) - + si_names_first_order, param_names_first_order = _si_param_first( + param_labels, sens_indices + ) + sens_first_order_df.insert(0, "si", si_names_first_order) + sens_first_order_df.insert(1, "param", param_names_first_order) + sens_first_order_df.insert(2, "param2", None) sens_second_order_df = pd.DataFrame(sens_second_order_dict) if not sens_second_order_df.empty: - si_names_second_order, param_names_second_order, param_names_second_order_2 = \ + si_names_second_order, param_names_second_order, param_names_second_order_2 = ( _si_param_second(param_labels, sens_indices) - sens_second_order_df.insert(0, 'si', si_names_second_order,) - sens_second_order_df.insert(1, 'param', param_names_second_order) - sens_second_order_df.insert(2, 'param2', param_names_second_order_2) + ) + sens_second_order_df.insert( + 0, + "si", + si_names_second_order, + ) + sens_second_order_df.insert(1, "param", param_names_second_order) + sens_second_order_df.insert(2, "param2", param_names_second_order_2) - sens_df = pd.concat( - [sens_first_order_df, sens_second_order_df] - ).reset_index(drop=True) + sens_df = pd.concat([sens_first_order_df, sens_second_order_df]).reset_index( + drop=True + ) return sens_df @@ -624,18 +652,18 @@ def _si_param_first(param_labels, sens_indices): Names of the sensivity indices of first order for all input parameters and Parameter names for each sentivity index """ - n_params = len(param_labels) + n_params = len(param_labels) si_name_first_order_list = [ key for key, array in sens_indices.items() - if (np.array(array).ndim == 1 and key!='names') # dirty trick due to Salib incoherent output - ] + if ( + np.array(array).ndim == 1 and key != "names" + ) # dirty trick due to Salib incoherent output + ] si_names_first_order = [ - si - for si in si_name_first_order_list - for _ in range(n_params) - ] + si for si in si_name_first_order_list for _ in range(n_params) + ] param_names_first_order = param_labels * len(si_name_first_order_list) return si_names_first_order, param_names_first_order @@ -656,22 +684,17 @@ def _si_param_second(param_labels, sens_indices): Names of the sensivity indices of second order for all input parameters and Pairs of parameter names for each 2nd order sentivity index """ - n_params = len(param_labels) + n_params = len(param_labels) si_name_second_order_list = [ - key - for key, array in sens_indices.items() - if np.array(array).ndim == 2 - ] + key for key, array in sens_indices.items() if np.array(array).ndim == 2 + ] si_names_second_order = [ - si - for si in si_name_second_order_list - for _ in range(n_params**2) - ] - param_names_second_order_2 = param_labels \ - * len(si_name_second_order_list) * n_params + si for si in si_name_second_order_list for _ in range(n_params**2) + ] + param_names_second_order_2 = ( + param_labels * len(si_name_second_order_list) * n_params + ) param_names_second_order = [ - param - for param in param_labels - for _ in range(n_params) - ] * len(si_name_second_order_list) + param for param in param_labels for _ in range(n_params) + ] * len(si_name_second_order_list) return si_names_second_order, param_names_second_order, param_names_second_order_2 diff --git a/climada/engine/unsequa/calc_cost_benefit.py b/climada/engine/unsequa/calc_cost_benefit.py index 36f1fe2d1..2078eaf89 100644 --- a/climada/engine/unsequa/calc_cost_benefit.py +++ b/climada/engine/unsequa/calc_cost_benefit.py @@ -19,31 +19,38 @@ Define Uncertainty Cost Benefit class """ -__all__ = ['CalcCostBenefit'] +__all__ = ["CalcCostBenefit"] +import itertools import logging import time -import itertools - from typing import Optional, Union -import pandas as pd + import numpy as np +import pandas as pd import pathos.multiprocessing as mp -# use pathos.multiprocess fork of multiprocessing for compatibility -# wiht notebooks and other environments https://stackoverflow.com/a/65001152/12454103 from climada.engine.cost_benefit import CostBenefit from climada.engine.unsequa import Calc, InputVar, UncCostBenefitOutput -from climada.engine.unsequa.calc_base import _sample_parallel_iterator, _multiprocess_chunksize, _transpose_chunked_data -from climada.util import log_level -from climada.hazard import Hazard +from climada.engine.unsequa.calc_base import ( + _multiprocess_chunksize, + _sample_parallel_iterator, + _transpose_chunked_data, +) from climada.entity import Entity +from climada.hazard import Hazard +from climada.util import log_level + +# use pathos.multiprocess fork of multiprocessing for compatibility +# wiht notebooks and other environments https://stackoverflow.com/a/65001152/12454103 + LOGGER = logging.getLogger(__name__) # Future planed features: # - Add 'efc' (frequency curve) to UncCostBenenfit + class CalcCostBenefit(Calc): """ Cost Benefit uncertainty analysis class @@ -74,19 +81,19 @@ class CalcCostBenefit(Calc): """ _input_var_names = ( - 'haz_input_var', - 'ent_input_var', - 'haz_fut_input_var', - 'ent_fut_input_var', + "haz_input_var", + "ent_input_var", + "haz_fut_input_var", + "ent_fut_input_var", ) """Names of the required uncertainty variables""" _metric_names = ( - 'tot_climate_risk', - 'benefit', - 'cost_ben_ratio', - 'imp_meas_present', - 'imp_meas_future', + "tot_climate_risk", + "benefit", + "cost_ben_ratio", + "imp_meas_present", + "imp_meas_future", ) """Names of the cost benefit output metrics""" @@ -132,13 +139,9 @@ def __init__( self.value_unit = self.ent_input_var.evaluate().exposures.value_unit self.check_distr() - - - def uncertainty(self, - unc_sample, - processes=1, - chunksize=None, - **cost_benefit_kwargs): + def uncertainty( + self, unc_sample, processes=1, chunksize=None, **cost_benefit_kwargs + ): """ Computes the cost benefit for each sample in unc_output.sample_df. @@ -193,8 +196,10 @@ def uncertainty(self, """ if unc_sample.samples_df.empty: - raise ValueError("No sample was found. Please create one first" + - "using UncImpact.make_sample(N)") + raise ValueError( + "No sample was found. Please create one first" + + "using UncImpact.make_sample(N)" + ) # copy may not be needed, but is kept to prevent potential # data corruption issues. The computational cost should be @@ -205,81 +210,92 @@ def uncertainty(self, chunksize = _multiprocess_chunksize(samples_df, processes) unit = self.value_unit - LOGGER.info("The freq_curve is not saved. Please " - "change the risk_func (see climada.engine.cost_benefit) " - "if return period information is needed") + LOGGER.info( + "The freq_curve is not saved. Please " + "change the risk_func (see climada.engine.cost_benefit) " + "if return period information is needed" + ) one_sample = samples_df.iloc[0:1] start = time.time() - self._compute_cb_metrics(one_sample, cost_benefit_kwargs, chunksize=1, processes=1) - elapsed_time = (time.time() - start) + self._compute_cb_metrics( + one_sample, cost_benefit_kwargs, chunksize=1, processes=1 + ) + elapsed_time = time.time() - start self.est_comp_time(unc_sample.n_samples, elapsed_time, processes) - #Compute impact distributions + # Compute impact distributions [ imp_meas_present, imp_meas_future, tot_climate_risk, benefit, - cost_ben_ratio - ] = self._compute_cb_metrics(samples_df, cost_benefit_kwargs, chunksize, processes) + cost_ben_ratio, + ] = self._compute_cb_metrics( + samples_df, cost_benefit_kwargs, chunksize, processes + ) # Assign computed impact distribution data to self - tot_climate_risk_unc_df = \ - pd.DataFrame(tot_climate_risk, columns = ['tot_climate_risk']) + tot_climate_risk_unc_df = pd.DataFrame( + tot_climate_risk, columns=["tot_climate_risk"] + ) benefit_unc_df = pd.DataFrame(benefit) benefit_unc_df.columns = [ - column + ' Benef' - for column in benefit_unc_df.columns] + column + " Benef" for column in benefit_unc_df.columns + ] cost_ben_ratio_unc_df = pd.DataFrame(cost_ben_ratio) cost_ben_ratio_unc_df.columns = [ - column + ' CostBen' - for column in cost_ben_ratio_unc_df.columns] + column + " CostBen" for column in cost_ben_ratio_unc_df.columns + ] - imp_metric_names = ['risk', 'risk_transf', 'cost_meas', - 'cost_ins'] + imp_metric_names = ["risk", "risk_transf", "cost_meas", "cost_ins"] im_periods = dict() - for imp_meas, period in zip([imp_meas_present, imp_meas_future], - ['present', 'future']): + for imp_meas, period in zip( + [imp_meas_present, imp_meas_future], ["present", "future"] + ): df_imp_meas = pd.DataFrame() - name = 'imp_meas_' + period + name = "imp_meas_" + period if imp_meas[0]: for imp in imp_meas: met_dic = {} for meas, imp_dic in imp.items(): - metrics = [imp_dic['risk'], - imp_dic['risk_transf'], - *imp_dic['cost']] - dic_tmp = {meas + ' - ' + m_name + ' - ' + period: [m_value] - for m_name, m_value - in zip(imp_metric_names, metrics) - } + metrics = [ + imp_dic["risk"], + imp_dic["risk_transf"], + *imp_dic["cost"], + ] + dic_tmp = { + meas + " - " + m_name + " - " + period: [m_value] + for m_name, m_value in zip(imp_metric_names, metrics) + } met_dic.update(dic_tmp) df_imp_meas = pd.concat( [df_imp_meas, pd.DataFrame(met_dic)], ignore_index=True, - sort=False - ) - im_periods[name + '_unc_df'] = df_imp_meas + sort=False, + ) + im_periods[name + "_unc_df"] = df_imp_meas cost_benefit_kwargs = { - key: str(val) - for key, val in cost_benefit_kwargs.items()} + key: str(val) for key, val in cost_benefit_kwargs.items() + } cost_benefit_kwargs = tuple(cost_benefit_kwargs.items()) - return UncCostBenefitOutput(samples_df=samples_df, - imp_meas_present_unc_df=im_periods['imp_meas_present_unc_df'], - imp_meas_future_unc_df=im_periods['imp_meas_future_unc_df'], - tot_climate_risk_unc_df=tot_climate_risk_unc_df, - cost_ben_ratio_unc_df=cost_ben_ratio_unc_df, - benefit_unc_df=benefit_unc_df, - unit=unit, - cost_benefit_kwargs=cost_benefit_kwargs) + return UncCostBenefitOutput( + samples_df=samples_df, + imp_meas_present_unc_df=im_periods["imp_meas_present_unc_df"], + imp_meas_future_unc_df=im_periods["imp_meas_future_unc_df"], + tot_climate_risk_unc_df=tot_climate_risk_unc_df, + cost_ben_ratio_unc_df=cost_ben_ratio_unc_df, + benefit_unc_df=benefit_unc_df, + unit=unit, + cost_benefit_kwargs=cost_benefit_kwargs, + ) def _compute_cb_metrics( - self, samples_df, cost_benefit_kwargs, chunksize, processes - ): + self, samples_df, cost_benefit_kwargs, chunksize, processes + ): """Compute the uncertainty metrics Parameters @@ -298,7 +314,7 @@ def _compute_cb_metrics( list values of impact metrics per sample """ - with log_level(level='ERROR', name_prefix='climada'): + with log_level(level="ERROR", name_prefix="climada"): p_iterator = _sample_parallel_iterator( samples=samples_df, chunksize=chunksize, @@ -306,55 +322,55 @@ def _compute_cb_metrics( haz_input_var=self.haz_input_var, ent_fut_input_var=self.ent_fut_input_var, haz_fut_input_var=self.haz_fut_input_var, - cost_benefit_kwargs=cost_benefit_kwargs + cost_benefit_kwargs=cost_benefit_kwargs, ) - if processes>1: + if processes > 1: with mp.Pool(processes=processes) as pool: - LOGGER.info('Using %s CPUs.', processes) - cb_metrics = pool.starmap( - _map_costben_calc, p_iterator - ) + LOGGER.info("Using %s CPUs.", processes) + cb_metrics = pool.starmap(_map_costben_calc, p_iterator) else: - cb_metrics = itertools.starmap( - _map_costben_calc, p_iterator - ) + cb_metrics = itertools.starmap(_map_costben_calc, p_iterator) - #Perform the actual computation - with log_level(level='ERROR', name_prefix='climada'): + # Perform the actual computation + with log_level(level="ERROR", name_prefix="climada"): return _transpose_chunked_data(cb_metrics) def _map_costben_calc( - sample_chunks, ent_input_var, haz_input_var, - ent_fut_input_var, haz_fut_input_var, cost_benefit_kwargs - ): + sample_chunks, + ent_input_var, + haz_input_var, + ent_fut_input_var, + haz_fut_input_var, + cost_benefit_kwargs, +): """ - Map to compute cost benefit for all parameter samples in parallel - - Parameters - ---------- - sample_chunks : pd.DataFrame - Dataframe of the parameter samples - haz_input_var : InputVar - Hazard uncertainty variable or Hazard for the present Hazard - in climada.engine.CostBenefit.calc - ent_input_var : InputVar - Entity uncertainty variable or Entity for the present Entity - in climada.engine.CostBenefit.calc - haz_fut_input_var: InputVar - Hazard uncertainty variable or Hazard for the future Hazard - ent_fut_input_var : InputVar - Entity uncertainty variable or Entity for the future Entity - in climada.engine.CostBenefit.calc - cost_benefit_kwargs : - Keyword arguments passed on to climada.engine.CostBenefit.calc() - - Returns - ------- - list - icost benefit metrics list for all samples containing - imp_meas_present, imp_meas_future, tot_climate_risk, - benefit, cost_ben_ratio + Map to compute cost benefit for all parameter samples in parallel + + Parameters + ---------- + sample_chunks : pd.DataFrame + Dataframe of the parameter samples + haz_input_var : InputVar + Hazard uncertainty variable or Hazard for the present Hazard + in climada.engine.CostBenefit.calc + ent_input_var : InputVar + Entity uncertainty variable or Entity for the present Entity + in climada.engine.CostBenefit.calc + haz_fut_input_var: InputVar + Hazard uncertainty variable or Hazard for the future Hazard + ent_fut_input_var : InputVar + Entity uncertainty variable or Entity for the future Entity + in climada.engine.CostBenefit.calc + cost_benefit_kwargs : + Keyword arguments passed on to climada.engine.CostBenefit.calc() + + Returns + ------- + list + icost benefit metrics list for all samples containing + imp_meas_present, imp_meas_future, tot_climate_risk, + benefit, cost_ben_ratio """ @@ -373,17 +389,28 @@ def _map_costben_calc( cb = CostBenefit() ent.exposures.assign_centroids(haz, overwrite=False) if ent_fut: - ent_fut.exposures.assign_centroids(haz_fut if haz_fut else haz, overwrite=False) - cb.calc(hazard=haz, entity=ent, haz_future=haz_fut, ent_future=ent_fut, - save_imp=False, assign_centroids=False, **cost_benefit_kwargs) + ent_fut.exposures.assign_centroids( + haz_fut if haz_fut else haz, overwrite=False + ) + cb.calc( + hazard=haz, + entity=ent, + haz_future=haz_fut, + ent_future=ent_fut, + save_imp=False, + assign_centroids=False, + **cost_benefit_kwargs + ) # Extract from climada.impact the chosen metrics - uncertainty_values.append([ - cb.imp_meas_present, - cb.imp_meas_future, - cb.tot_climate_risk, - cb.benefit, - cb.cost_ben_ratio - ]) + uncertainty_values.append( + [ + cb.imp_meas_present, + cb.imp_meas_future, + cb.tot_climate_risk, + cb.benefit, + cb.cost_ben_ratio, + ] + ) # Transpose list return list(zip(*uncertainty_values)) diff --git a/climada/engine/unsequa/calc_delta_climate.py b/climada/engine/unsequa/calc_delta_climate.py index 9de9ddae6..1c56c3fba 100644 --- a/climada/engine/unsequa/calc_delta_climate.py +++ b/climada/engine/unsequa/calc_delta_climate.py @@ -21,23 +21,20 @@ __all__ = ["CalcDeltaImpact"] +import itertools import logging import time from typing import Union -import itertools -import pandas as pd import numpy as np +import pandas as pd import pathos.multiprocessing as mp -# use pathos.multiprocess fork of multiprocessing for compatibility -# wiht notebooks and other environments https://stackoverflow.com/a/65001152/12454103 - from climada.engine import ImpactCalc from climada.engine.unsequa import Calc, InputVar, UncImpactOutput from climada.engine.unsequa.calc_base import ( - _sample_parallel_iterator, _multiprocess_chunksize, + _sample_parallel_iterator, _transpose_chunked_data, ) from climada.entity import Exposures, ImpactFuncSet @@ -45,6 +42,9 @@ from climada.util import log_level from climada.util.value_representation import safe_divide +# use pathos.multiprocess fork of multiprocessing for compatibility +# wiht notebooks and other environments https://stackoverflow.com/a/65001152/12454103 + LOGGER = logging.getLogger(__name__) @@ -430,18 +430,12 @@ def _map_impact_calc( else: delta_func = lambda x, y: x - y - delta_aai_agg = delta_func( - imp_final.aai_agg, imp_initial.aai_agg - ) + delta_aai_agg = delta_func(imp_final.aai_agg, imp_initial.aai_agg) - delta_freq_curve = delta_func( - freq_curve_final, freq_curve_initial - ) + delta_freq_curve = delta_func(freq_curve_final, freq_curve_initial) delta_eai_exp = ( - delta_func(eai_exp_final, eai_exp_initial) - if calc_eai_exp - else np.array([]) + delta_func(eai_exp_final, eai_exp_initial) if calc_eai_exp else np.array([]) ) delta_at_event = ( diff --git a/climada/engine/unsequa/calc_impact.py b/climada/engine/unsequa/calc_impact.py index a82f5cae5..6b6a8773c 100644 --- a/climada/engine/unsequa/calc_impact.py +++ b/climada/engine/unsequa/calc_impact.py @@ -19,32 +19,35 @@ Define Uncertainty Impact class """ -__all__ = ['CalcImpact'] +__all__ = ["CalcImpact"] +import itertools import logging import time from typing import Union -import itertools -import pandas as pd import numpy as np +import pandas as pd import pathos.multiprocessing as mp -# use pathos.multiprocess fork of multiprocessing for compatibility -# wiht notebooks and other environments https://stackoverflow.com/a/65001152/12454103 from climada.engine import ImpactCalc from climada.engine.unsequa import Calc, InputVar, UncImpactOutput from climada.engine.unsequa.calc_base import ( - _sample_parallel_iterator, _multiprocess_chunksize, + _sample_parallel_iterator, _transpose_chunked_data, ) from climada.entity import Exposures, ImpactFuncSet from climada.hazard import Hazard from climada.util import log_level +# use pathos.multiprocess fork of multiprocessing for compatibility +# wiht notebooks and other environments https://stackoverflow.com/a/65001152/12454103 + + LOGGER = logging.getLogger(__name__) + class CalcImpact(Calc): """ Impact uncertainty caclulation class. @@ -77,18 +80,13 @@ class CalcImpact(Calc): """ _input_var_names = ( - 'exp_input_var', - 'impf_input_var', - 'haz_input_var', + "exp_input_var", + "impf_input_var", + "haz_input_var", ) """Names of the required uncertainty variables""" - _metric_names = ( - 'aai_agg', - 'freq_curve', - 'at_event', - 'eai_exp' - ) + _metric_names = ("aai_agg", "freq_curve", "at_event", "eai_exp") """Names of the cost benefit output metrics""" def __init__( @@ -114,22 +112,22 @@ def __init__( """ Calc.__init__(self) - self.exp_input_var = InputVar.var_to_inputvar(exp_input_var) - self.impf_input_var = InputVar.var_to_inputvar(impf_input_var) - self.haz_input_var = InputVar.var_to_inputvar(haz_input_var) + self.exp_input_var = InputVar.var_to_inputvar(exp_input_var) + self.impf_input_var = InputVar.var_to_inputvar(impf_input_var) + self.haz_input_var = InputVar.var_to_inputvar(haz_input_var) self.value_unit = self.exp_input_var.evaluate().value_unit self.check_distr() - - def uncertainty(self, - unc_sample, - rp=None, - calc_eai_exp=False, - calc_at_event=False, - processes=1, - chunksize=None - ): + def uncertainty( + self, + unc_sample, + rp=None, + calc_eai_exp=False, + calc_at_event=False, + processes=1, + chunksize=None, + ): """ Computes the impact for each sample in unc_data.sample_df. @@ -196,9 +194,10 @@ def uncertainty(self, """ if unc_sample.samples_df.empty: - raise ValueError("No sample was found. Please create one first" - "using UncImpact.make_sample(N)") - + raise ValueError( + "No sample was found. Please create one first" + "using UncImpact.make_sample(N)" + ) # copy may not be needed, but is kept to prevent potential # data corruption issues. The computational cost should be @@ -210,7 +209,7 @@ def uncertainty(self, unit = self.value_unit if rp is None: - rp=[5, 10, 20, 50, 100, 250] + rp = [5, 10, 20, 50, 100, 250] self.rp = rp self.calc_eai_exp = calc_eai_exp @@ -218,44 +217,40 @@ def uncertainty(self, one_sample = samples_df.iloc[0:1] start = time.time() - self._compute_imp_metrics( - one_sample, chunksize=1, processes=1 - ) - elapsed_time = (time.time() - start) + self._compute_imp_metrics(one_sample, chunksize=1, processes=1) + elapsed_time = time.time() - start self.est_comp_time(unc_sample.n_samples, elapsed_time, processes) - [ - aai_agg_list, - freq_curve_list, - eai_exp_list, - at_event_list - ] = self._compute_imp_metrics( - samples_df, chunksize=chunksize, processes=processes + [aai_agg_list, freq_curve_list, eai_exp_list, at_event_list] = ( + self._compute_imp_metrics( + samples_df, chunksize=chunksize, processes=processes ) + ) # Assign computed impact distribution data to self - aai_agg_unc_df = pd.DataFrame(aai_agg_list, - columns = ['aai_agg']) - freq_curve_unc_df = pd.DataFrame(freq_curve_list, - columns=['rp' + str(n) for n in rp]) - eai_exp_unc_df = pd.DataFrame(eai_exp_list) + aai_agg_unc_df = pd.DataFrame(aai_agg_list, columns=["aai_agg"]) + freq_curve_unc_df = pd.DataFrame( + freq_curve_list, columns=["rp" + str(n) for n in rp] + ) + eai_exp_unc_df = pd.DataFrame(eai_exp_list) # Note: sparse dataframes are not used as they are not nativel y compatible with .to_hdf5 at_event_unc_df = pd.DataFrame(at_event_list) if calc_eai_exp: exp = self.exp_input_var.evaluate() - coord_df = exp.gdf[['latitude', 'longitude']] + coord_df = exp.gdf[["latitude", "longitude"]] else: coord_df = pd.DataFrame([]) - return UncImpactOutput(samples_df=samples_df, - unit=unit, - aai_agg_unc_df=aai_agg_unc_df, - freq_curve_unc_df=freq_curve_unc_df, - eai_exp_unc_df=eai_exp_unc_df, - at_event_unc_df=at_event_unc_df, - coord_df=coord_df - ) + return UncImpactOutput( + samples_df=samples_df, + unit=unit, + aai_agg_unc_df=aai_agg_unc_df, + freq_curve_unc_df=freq_curve_unc_df, + eai_exp_unc_df=eai_exp_unc_df, + at_event_unc_df=at_event_unc_df, + coord_df=coord_df, + ) def _compute_imp_metrics(self, samples_df, chunksize, processes): """Compute the uncertainty metrics @@ -274,8 +269,8 @@ def _compute_imp_metrics(self, samples_df, chunksize, processes): list values of impact metrics per sample """ - #Compute impact distributions - with log_level(level='ERROR', name_prefix='climada'): + # Compute impact distributions + with log_level(level="ERROR", name_prefix="climada"): p_iterator = _sample_parallel_iterator( samples=samples_df, chunksize=chunksize, @@ -288,24 +283,25 @@ def _compute_imp_metrics(self, samples_df, chunksize, processes): ) if processes > 1: with mp.Pool(processes=processes) as pool: - LOGGER.info('Using %s CPUs.', processes) - imp_metrics = pool.starmap( - _map_impact_calc, p_iterator - ) + LOGGER.info("Using %s CPUs.", processes) + imp_metrics = pool.starmap(_map_impact_calc, p_iterator) else: - imp_metrics = itertools.starmap( - _map_impact_calc, p_iterator - ) + imp_metrics = itertools.starmap(_map_impact_calc, p_iterator) - #Perform the actual computation - with log_level(level='ERROR', name_prefix='climada'): + # Perform the actual computation + with log_level(level="ERROR", name_prefix="climada"): return _transpose_chunked_data(imp_metrics) def _map_impact_calc( - sample_chunks, exp_input_var, impf_input_var, haz_input_var, - rp, calc_eai_exp, calc_at_event - ): + sample_chunks, + exp_input_var, + impf_input_var, + haz_input_var, + rp, + calc_eai_exp, + calc_at_event, +): """ Map to compute impact for all parameter samples in parallel @@ -345,8 +341,9 @@ def _map_impact_calc( haz = haz_input_var.evaluate(**haz_samples) exp.assign_centroids(haz, overwrite=False) - imp = ImpactCalc(exposures=exp, impfset=impf, hazard=haz)\ - .impact(assign_centroids=False, save_mat=False) + imp = ImpactCalc(exposures=exp, impfset=impf, hazard=haz).impact( + assign_centroids=False, save_mat=False + ) # Extract from climada.impact the chosen metrics freq_curve = imp.calc_freq_curve(rp).impact @@ -357,7 +354,7 @@ def _map_impact_calc( eai_exp = np.array([]) if calc_at_event: - at_event= imp.at_event + at_event = imp.at_event else: at_event = np.array([]) diff --git a/climada/engine/unsequa/input_var.py b/climada/engine/unsequa/input_var.py index 62d7729f4..56a47fe84 100644 --- a/climada/engine/unsequa/input_var.py +++ b/climada/engine/unsequa/input_var.py @@ -20,24 +20,25 @@ """ import copy +import logging from functools import partial from itertools import zip_longest -import logging from typing import Dict -import scipy as sp -import numpy as np import matplotlib.pyplot as plt +import numpy as np +import scipy as sp -from climada.entity import Entity, DiscRates +from climada.entity import DiscRates, Entity LOGGER = logging.getLogger(__name__) -__all__ = ['InputVar'] +__all__ = ["InputVar"] + +FIG_W, FIG_H = 8, 5 # default figize width/heigh column/work multiplicators -FIG_W, FIG_H = 8, 5 #default figize width/heigh column/work multiplicators -class InputVar(): +class InputVar: """ Input variable for the uncertainty analysis @@ -148,13 +149,9 @@ def evaluate(self, **params): """ if not params: - params = { - param: distr.mean() - for param, distr in self.distr_dict.items() - } + params = {param: distr.mean() for param, distr in self.distr_dict.items()} return self.func(**params) - def plot(self, figsize=None): """ Plot the distributions of the parameters of the uncertainty variable. @@ -184,29 +181,28 @@ def plot(self, figsize=None): flat_axes = axes.flatten() else: flat_axes = np.array([axes]) - for ax, name_distr in zip_longest(flat_axes, - self.distr_dict.items(), - fillvalue=None): + for ax, name_distr in zip_longest( + flat_axes, self.distr_dict.items(), fillvalue=None + ): if name_distr is None: ax.remove() continue (param_name, distr) = name_distr low = distr.ppf(1e-10) - high = distr.ppf(1-1e-10) + high = distr.ppf(1 - 1e-10) n = 100 try: x = np.linspace(low, high, n) ax.plot(x, distr.pdf(x), label=param_name) except AttributeError: if (high - low) > n: - x = np.arange(low, high, int((high-low) / n)) + x = np.arange(low, high, int((high - low) / n)) else: - x = np.arange(low, high+1) + x = np.arange(low, high + 1) ax.vlines(x, 0, distr.pmf(x), label=param_name) ax.legend() return axes - @staticmethod def var_to_inputvar(var): """ @@ -229,7 +225,6 @@ def var_to_inputvar(var): return InputVar(func=lambda: var, distr_dict={}) - @staticmethod def haz(haz_list, n_ev=None, bounds_int=None, bounds_frac=None, bounds_freq=None): """ @@ -282,21 +277,21 @@ def haz(haz_list, n_ev=None, bounds_int=None, bounds_frac=None, bounds_freq=None """ n_haz = len(haz_list) - kwargs = {'haz_list': haz_list, 'n_ev': n_ev} + kwargs = {"haz_list": haz_list, "n_ev": n_ev} if n_ev is None: - kwargs['HE'] = None + kwargs["HE"] = None if bounds_int is None: - kwargs['HI'] = None + kwargs["HI"] = None if bounds_frac is None: - kwargs['HA'] = None + kwargs["HA"] = None if bounds_freq is None: - kwargs['HF'] = None + kwargs["HF"] = None if n_haz == 1: - kwargs['HL'] = 0 + kwargs["HL"] = 0 return InputVar( partial(_haz_uncfunc, **kwargs), - _haz_unc_dict(n_ev, bounds_int, bounds_frac, bounds_freq, n_haz) - ) + _haz_unc_dict(n_ev, bounds_int, bounds_frac, bounds_freq, n_haz), + ) @staticmethod def exp(exp_list, bounds_totval=None, bounds_noise=None): @@ -339,23 +334,28 @@ def exp(exp_list, bounds_totval=None, bounds_noise=None): """ n_exp = len(exp_list) - kwargs = {'exp_list': exp_list, 'bounds_noise': bounds_noise} + kwargs = {"exp_list": exp_list, "bounds_noise": bounds_noise} if bounds_noise is None: - kwargs['EN'] = None + kwargs["EN"] = None if bounds_totval is None: - kwargs['ET'] = None + kwargs["ET"] = None if n_exp == 1: - kwargs['EL'] = 0 + kwargs["EL"] = 0 return InputVar( partial(_exp_uncfunc, **kwargs), - _exp_unc_dict(bounds_totval=bounds_totval, - bounds_noise=bounds_noise, - n_exp=n_exp) - ) + _exp_unc_dict( + bounds_totval=bounds_totval, bounds_noise=bounds_noise, n_exp=n_exp + ), + ) @staticmethod - def impfset(impf_set_list, haz_id_dict= None, bounds_mdd=None, bounds_paa=None, - bounds_impfi=None): + def impfset( + impf_set_list, + haz_id_dict=None, + bounds_mdd=None, + bounds_paa=None, + bounds_impfi=None, + ): """ Helper wrapper for basic impact function set uncertainty input variable. @@ -411,30 +411,37 @@ def impfset(impf_set_list, haz_id_dict= None, bounds_mdd=None, bounds_paa=None, """ n_impf_set = len(impf_set_list) - kwargs = {'impf_set_list': impf_set_list} + kwargs = {"impf_set_list": impf_set_list} if bounds_mdd is None: - kwargs['MDD'] = None + kwargs["MDD"] = None if bounds_paa is None: - kwargs['PAA'] = None + kwargs["PAA"] = None if bounds_impfi is None: - kwargs['IFi'] = None + kwargs["IFi"] = None if haz_id_dict is None: haz_id_dict = impf_set_list[0].get_ids() if n_impf_set == 1: - kwargs['IL'] = 0 + kwargs["IL"] = 0 return InputVar( - partial( - _impfset_uncfunc, haz_id_dict=haz_id_dict, - **kwargs - ), - _impfset_unc_dict(bounds_impfi, bounds_mdd, bounds_paa, n_impf_set) + partial(_impfset_uncfunc, haz_id_dict=haz_id_dict, **kwargs), + _impfset_unc_dict(bounds_impfi, bounds_mdd, bounds_paa, n_impf_set), ) @staticmethod - def ent(impf_set_list, disc_rate, exp_list, meas_set, haz_id_dict, - bounds_disc=None, bounds_cost=None, bounds_totval=None, - bounds_noise=None, bounds_mdd=None, bounds_paa=None, - bounds_impfi=None): + def ent( + impf_set_list, + disc_rate, + exp_list, + meas_set, + haz_id_dict, + bounds_disc=None, + bounds_cost=None, + bounds_totval=None, + bounds_noise=None, + bounds_mdd=None, + bounds_paa=None, + bounds_impfi=None, + ): """ Helper wrapper for basic entity set uncertainty input variable. @@ -532,42 +539,61 @@ def ent(impf_set_list, disc_rate, exp_list, meas_set, haz_id_dict, kwargs = {} if bounds_mdd is None: - kwargs['MDD'] = None + kwargs["MDD"] = None if bounds_paa is None: - kwargs['PAA'] = None + kwargs["PAA"] = None if bounds_impfi is None: - kwargs['IFi'] = None - if n_impf_set== 1: - kwargs['IL'] = 0 + kwargs["IFi"] = None + if n_impf_set == 1: + kwargs["IL"] = 0 if bounds_disc is None: - kwargs['DR'] = None + kwargs["DR"] = None if bounds_cost is None: - kwargs['CO'] = None + kwargs["CO"] = None if bounds_totval is None: - kwargs['ET'] = None + kwargs["ET"] = None if bounds_noise is None: - kwargs['EN'] = None + kwargs["EN"] = None if n_exp == 1: - kwargs['EL'] = 0 + kwargs["EL"] = 0 return InputVar( - partial(_ent_unc_func, - impf_set_list=impf_set_list, haz_id_dict=haz_id_dict, - disc_rate=disc_rate, bounds_noise=bounds_noise, - exp_list=exp_list, meas_set=meas_set, **kwargs - ), - _ent_unc_dict(bounds_totval=bounds_totval, bounds_noise=bounds_noise, - bounds_impfi=bounds_impfi, n_impf_set=n_impf_set, - bounds_mdd=bounds_mdd, - bounds_paa=bounds_paa, bounds_disc=bounds_disc, - bounds_cost=bounds_cost, n_exp=n_exp,) + partial( + _ent_unc_func, + impf_set_list=impf_set_list, + haz_id_dict=haz_id_dict, + disc_rate=disc_rate, + bounds_noise=bounds_noise, + exp_list=exp_list, + meas_set=meas_set, + **kwargs + ), + _ent_unc_dict( + bounds_totval=bounds_totval, + bounds_noise=bounds_noise, + bounds_impfi=bounds_impfi, + n_impf_set=n_impf_set, + bounds_mdd=bounds_mdd, + bounds_paa=bounds_paa, + bounds_disc=bounds_disc, + bounds_cost=bounds_cost, + n_exp=n_exp, + ), ) @staticmethod - def entfut(impf_set_list, exp_list, meas_set, haz_id_dict, - bounds_cost=None, bounds_eg=None, bounds_noise=None, - bounds_impfi=None, bounds_mdd=None, bounds_paa=None, - ): + def entfut( + impf_set_list, + exp_list, + meas_set, + haz_id_dict, + bounds_cost=None, + bounds_eg=None, + bounds_noise=None, + bounds_impfi=None, + bounds_mdd=None, + bounds_paa=None, + ): """ Helper wrapper for basic future entity set uncertainty input variable. @@ -656,35 +682,46 @@ def entfut(impf_set_list, exp_list, meas_set, haz_id_dict, kwargs = {} if bounds_mdd is None: - kwargs['MDD'] = None + kwargs["MDD"] = None if bounds_paa is None: - kwargs['PAA'] = None + kwargs["PAA"] = None if bounds_impfi is None: - kwargs['IFi'] = None + kwargs["IFi"] = None if n_impf_set == 1: - kwargs['IL'] = 0 + kwargs["IL"] = 0 if bounds_cost is None: - kwargs['CO'] = None + kwargs["CO"] = None if bounds_eg is None: - kwargs['EG'] = None + kwargs["EG"] = None if bounds_noise is None: - kwargs['EN'] = None + kwargs["EN"] = None if n_exp == 1: - kwargs['EL'] = 0 + kwargs["EL"] = 0 return InputVar( - partial(_entfut_unc_func, haz_id_dict=haz_id_dict, - bounds_noise=bounds_noise, impf_set_list=impf_set_list, - exp_list=exp_list, meas_set=meas_set, **kwargs), - _entfut_unc_dict(bounds_eg=bounds_eg, bounds_noise=bounds_noise, - bounds_impfi=bounds_impfi, n_impf_set=n_impf_set, - bounds_paa=bounds_paa, - bounds_mdd=bounds_mdd, bounds_cost=bounds_cost, - n_exp=n_exp) + partial( + _entfut_unc_func, + haz_id_dict=haz_id_dict, + bounds_noise=bounds_noise, + impf_set_list=impf_set_list, + exp_list=exp_list, + meas_set=meas_set, + **kwargs + ), + _entfut_unc_dict( + bounds_eg=bounds_eg, + bounds_noise=bounds_noise, + bounds_impfi=bounds_impfi, + n_impf_set=n_impf_set, + bounds_paa=bounds_paa, + bounds_mdd=bounds_mdd, + bounds_cost=bounds_cost, + n_exp=n_exp, + ), ) -#Hazard +# Hazard def _haz_uncfunc(HE, HI, HA, HF, HL, haz_list, n_ev): haz_tmp = copy.deepcopy(haz_list[int(HL)]) if HE is not None: @@ -699,46 +736,50 @@ def _haz_uncfunc(HE, HI, HA, HF, HL, haz_list, n_ev): haz_tmp.frequency = np.multiply(haz_tmp.frequency, HF) return haz_tmp + def _haz_unc_dict(n_ev, bounds_int, bounds_frac, bounds_freq, n_haz): hud = {} if n_ev is not None: - hud['HE'] = sp.stats.randint(0, 2**32 - 1) #seed for rnd generator + hud["HE"] = sp.stats.randint(0, 2**32 - 1) # seed for rnd generator if bounds_int is not None: imin, idelta = bounds_int[0], bounds_int[1] - bounds_int[0] - hud['HI'] = sp.stats.uniform(imin, idelta) + hud["HI"] = sp.stats.uniform(imin, idelta) if bounds_frac is not None: amin, adelta = bounds_frac[0], bounds_frac[1] - bounds_frac[0] - hud['HA'] = sp.stats.uniform(amin, adelta) + hud["HA"] = sp.stats.uniform(amin, adelta) if bounds_freq is not None: fmin, fdelta = bounds_freq[0], bounds_freq[1] - bounds_freq[0] - hud['HF'] = sp.stats.uniform(fmin, fdelta) + hud["HF"] = sp.stats.uniform(fmin, fdelta) if n_haz > 1: - hud['HL'] = sp.stats.randint(0, n_haz) + hud["HL"] = sp.stats.randint(0, n_haz) return hud -#Exposure + +# Exposure def _exp_uncfunc(EN, ET, EL, exp_list, bounds_noise): exp_tmp = exp_list[int(EL)].copy(deep=True) if EN is not None: rng = np.random.RandomState(int(EN)) - rnd_vals = rng.uniform(bounds_noise[0], bounds_noise[1], size = len(exp_tmp.gdf)) - exp_tmp.gdf['value'] *= rnd_vals + rnd_vals = rng.uniform(bounds_noise[0], bounds_noise[1], size=len(exp_tmp.gdf)) + exp_tmp.gdf["value"] *= rnd_vals if ET is not None: - exp_tmp.gdf['value'] *= ET + exp_tmp.gdf["value"] *= ET return exp_tmp + def _exp_unc_dict(bounds_totval, bounds_noise, n_exp): eud = {} if bounds_totval is not None: tmin, tmax = bounds_totval[0], bounds_totval[1] - bounds_totval[0] - eud['ET'] = sp.stats.uniform(tmin, tmax) + eud["ET"] = sp.stats.uniform(tmin, tmax) if bounds_noise is not None: - eud['EN'] = sp.stats.randint(0, 2**32 - 1) #seed for rnd generator + eud["EN"] = sp.stats.randint(0, 2**32 - 1) # seed for rnd generator if n_exp > 1: - eud['EL'] = sp.stats.randint(0, n_exp) + eud["EL"] = sp.stats.randint(0, n_exp) return eud -#Impact function set + +# Impact function set def _impfset_uncfunc(IFi, MDD, PAA, IL, impf_set_list, haz_id_dict): impf_set_tmp = copy.deepcopy(impf_set_list[int(IL)]) for haz_type, fun_id_list in haz_id_dict.items(): @@ -746,50 +787,57 @@ def _impfset_uncfunc(IFi, MDD, PAA, IL, impf_set_list, haz_id_dict): if MDD is not None: new_mdd = np.minimum( impf_set_tmp.get_func(haz_type=haz_type, fun_id=fun_id).mdd * MDD, - 1.0 - ) + 1.0, + ) impf_set_tmp.get_func(haz_type=haz_type, fun_id=fun_id).mdd = new_mdd if PAA is not None: new_paa = np.minimum( impf_set_tmp.get_func(haz_type=haz_type, fun_id=fun_id).paa * PAA, - 1.0 - ) + 1.0, + ) impf_set_tmp.get_func(haz_type=haz_type, fun_id=fun_id).paa = new_paa if IFi is not None: new_int = np.maximum( - impf_set_tmp.get_func(haz_type=haz_type, fun_id=fun_id).intensity + IFi, - 0.0 - ) - impf_set_tmp.get_func(haz_type=haz_type, fun_id=fun_id).intensity = new_int + impf_set_tmp.get_func(haz_type=haz_type, fun_id=fun_id).intensity + + IFi, + 0.0, + ) + impf_set_tmp.get_func(haz_type=haz_type, fun_id=fun_id).intensity = ( + new_int + ) return impf_set_tmp + def _impfset_unc_dict(bounds_impfi, bounds_mdd, bounds_paa, n_impf_set): iud = {} if bounds_impfi is not None: xmin, xdelta = bounds_impfi[0], bounds_impfi[1] - bounds_impfi[0] - iud['IFi'] = sp.stats.uniform(xmin, xdelta) + iud["IFi"] = sp.stats.uniform(xmin, xdelta) if bounds_paa is not None: xmin, xdelta = bounds_paa[0], bounds_paa[1] - bounds_paa[0] - iud['PAA'] = sp.stats.uniform(xmin, xdelta) + iud["PAA"] = sp.stats.uniform(xmin, xdelta) if bounds_mdd is not None: xmin, xdelta = bounds_mdd[0], bounds_mdd[1] - bounds_mdd[0] - iud['MDD'] = sp.stats.uniform(xmin, xdelta) + iud["MDD"] = sp.stats.uniform(xmin, xdelta) if n_impf_set > 1: - iud['IL'] = sp.stats.randint(0, n_impf_set) + iud["IL"] = sp.stats.randint(0, n_impf_set) return iud -#Entity + +# Entity def _disc_uncfunc(DR, disc_rate): disc = copy.deepcopy(disc_rate) if DR is not None: disc.rates = np.ones(disc.years.size) * DR return disc + def _disc_unc_dict(bounds_disk): if bounds_disk is None: return {} dmin, ddelta = bounds_disk[0], bounds_disk[1] - bounds_disk[0] - return {'DR': sp.stats.uniform(dmin, ddelta)} + return {"DR": sp.stats.uniform(dmin, ddelta)} + def _meas_set_uncfunc(CO, meas_set): meas_set_tmp = copy.deepcopy(meas_set) @@ -799,48 +847,105 @@ def _meas_set_uncfunc(CO, meas_set): meas.cost *= CO return meas_set_tmp + def _meas_set_unc_dict(bounds_cost): cmin, cdelta = bounds_cost[0], bounds_cost[1] - bounds_cost[0] - return {'CO': sp.stats.uniform(cmin, cdelta)} - -def _ent_unc_func(EN, ET, EL, IFi, IL, MDD, PAA, CO, DR, bounds_noise, - impf_set_list, haz_id_dict, disc_rate, exp_list, meas_set): + return {"CO": sp.stats.uniform(cmin, cdelta)} + + +def _ent_unc_func( + EN, + ET, + EL, + IFi, + IL, + MDD, + PAA, + CO, + DR, + bounds_noise, + impf_set_list, + haz_id_dict, + disc_rate, + exp_list, + meas_set, +): exposures = _exp_uncfunc(EN, ET, EL, exp_list, bounds_noise) - impact_func_set = _impfset_uncfunc(IFi, MDD, PAA, IL, impf_set_list=impf_set_list, - haz_id_dict=haz_id_dict) + impact_func_set = _impfset_uncfunc( + IFi, MDD, PAA, IL, impf_set_list=impf_set_list, haz_id_dict=haz_id_dict + ) measure_set = _meas_set_uncfunc(CO, meas_set=meas_set) disc_rates = _disc_uncfunc(DR, disc_rate) return Entity(exposures, disc_rates, impact_func_set, measure_set) -def _ent_unc_dict(bounds_totval, bounds_noise, bounds_impfi, bounds_mdd, - bounds_paa, n_impf_set, bounds_disc, bounds_cost, n_exp): + +def _ent_unc_dict( + bounds_totval, + bounds_noise, + bounds_impfi, + bounds_mdd, + bounds_paa, + n_impf_set, + bounds_disc, + bounds_cost, + n_exp, +): ent_unc_dict = _exp_unc_dict(bounds_totval, bounds_noise, n_exp) - ent_unc_dict.update(_impfset_unc_dict(bounds_impfi, bounds_mdd, bounds_paa, n_impf_set)) + ent_unc_dict.update( + _impfset_unc_dict(bounds_impfi, bounds_mdd, bounds_paa, n_impf_set) + ) ent_unc_dict.update(_disc_unc_dict(bounds_disc)) ent_unc_dict.update(_meas_set_unc_dict(bounds_cost)) - return ent_unc_dict - -def _entfut_unc_func(EN, EG, EL, IFi, IL, MDD, PAA, CO, bounds_noise, - impf_set_list, haz_id_dict, exp_list, meas_set): - exposures = _exp_uncfunc(EN=EN, ET=EG, EL=EL, exp_list=exp_list, bounds_noise=bounds_noise) - impact_funcs = _impfset_uncfunc(IFi, MDD, PAA, IL, impf_set_list=impf_set_list, - haz_id_dict=haz_id_dict) + return ent_unc_dict + + +def _entfut_unc_func( + EN, + EG, + EL, + IFi, + IL, + MDD, + PAA, + CO, + bounds_noise, + impf_set_list, + haz_id_dict, + exp_list, + meas_set, +): + exposures = _exp_uncfunc( + EN=EN, ET=EG, EL=EL, exp_list=exp_list, bounds_noise=bounds_noise + ) + impact_funcs = _impfset_uncfunc( + IFi, MDD, PAA, IL, impf_set_list=impf_set_list, haz_id_dict=haz_id_dict + ) measures = _meas_set_uncfunc(CO, meas_set=meas_set) - disc_rates = DiscRates() #Disc rate of future entity ignored in cost_benefit.calc() + disc_rates = ( + DiscRates() + ) # Disc rate of future entity ignored in cost_benefit.calc() return Entity(exposures, disc_rates, impact_funcs, measures) -def _entfut_unc_dict(bounds_impfi, bounds_mdd, - bounds_paa, n_impf_set, bounds_eg, bounds_noise, - bounds_cost, n_exp): + +def _entfut_unc_dict( + bounds_impfi, + bounds_mdd, + bounds_paa, + n_impf_set, + bounds_eg, + bounds_noise, + bounds_cost, + n_exp, +): eud = {} if bounds_eg is not None: gmin, gmax = bounds_eg[0], bounds_eg[1] - bounds_eg[0] - eud['EG'] = sp.stats.uniform(gmin, gmax) + eud["EG"] = sp.stats.uniform(gmin, gmax) if bounds_noise is not None: - eud['EN'] = sp.stats.randint(0, 2**32 - 1) #seed for rnd generator + eud["EN"] = sp.stats.randint(0, 2**32 - 1) # seed for rnd generator if n_exp > 1: - eud['EL'] = sp.stats.randint(0, n_exp) + eud["EL"] = sp.stats.randint(0, n_exp) eud.update(_impfset_unc_dict(bounds_impfi, bounds_mdd, bounds_paa, n_impf_set)) if bounds_cost is not None: eud.update(_meas_set_unc_dict(bounds_cost)) diff --git a/climada/engine/unsequa/test/test_unsequa.py b/climada/engine/unsequa/test/test_unsequa.py index 0bc05f0bb..c53162e8a 100755 --- a/climada/engine/unsequa/test/test_unsequa.py +++ b/climada/engine/unsequa/test/test_unsequa.py @@ -19,39 +19,50 @@ Test uncertainty module. """ -import unittest import copy import time +import unittest +import matplotlib.pyplot as plt import numpy as np import pandas as pd -import matplotlib.pyplot as plt import scipy as sp - from tables.exceptions import HDF5ExtError -from climada.entity import ImpactFunc, ImpactFuncSet -from climada.entity.entity_def import Entity -from climada.entity import Exposures -from climada.hazard import Hazard from climada.engine import ImpactCalc -from climada.engine.unsequa import InputVar, CalcImpact, UncOutput, CalcCostBenefit, CalcDeltaImpact +from climada.engine.unsequa import ( + CalcCostBenefit, + CalcDeltaImpact, + CalcImpact, + InputVar, + UncOutput, +) from climada.engine.unsequa.calc_base import LOGGER - - -from climada.util.constants import (EXP_DEMO_H5, HAZ_DEMO_H5, ENT_DEMO_TODAY, ENT_DEMO_FUTURE, - TEST_UNC_OUTPUT_IMPACT, TEST_UNC_OUTPUT_COSTBEN) +from climada.entity import Exposures, ImpactFunc, ImpactFuncSet +from climada.entity.entity_def import Entity +from climada.hazard import Hazard from climada.util.api_client import Client - - -test_unc_output_impact = Client().get_dataset_file(name=TEST_UNC_OUTPUT_IMPACT, status='test_dataset') -test_unc_output_costben = Client().get_dataset_file(name=TEST_UNC_OUTPUT_COSTBEN, status='test_dataset') +from climada.util.constants import ( + ENT_DEMO_FUTURE, + ENT_DEMO_TODAY, + EXP_DEMO_H5, + HAZ_DEMO_H5, + TEST_UNC_OUTPUT_COSTBEN, + TEST_UNC_OUTPUT_IMPACT, +) + +test_unc_output_impact = Client().get_dataset_file( + name=TEST_UNC_OUTPUT_IMPACT, status="test_dataset" +) +test_unc_output_costben = Client().get_dataset_file( + name=TEST_UNC_OUTPUT_COSTBEN, status="test_dataset" +) def impf_dem(x_paa=1, x_mdd=1): - haz_type = 'TC' + haz_type = "TC" id = 1 - intensity_unit = 'm/s' + intensity_unit = "m/s" intensity = np.linspace(0, 150, num=100) mdd = np.repeat(1, len(intensity)) * x_mdd paa = np.arange(0, len(intensity)) / len(intensity) * x_paa @@ -69,7 +80,7 @@ def exp_dem(x_exp=1, exp=None): # possibly raised by pd.HDFStore when the file is locked by another process due to multiprocessing time.sleep(0.1) exp_tmp = exp.copy(deep=True) - exp_tmp.gdf['value'] *= x_exp + exp_tmp.gdf["value"] *= x_exp return exp_tmp @@ -83,19 +94,19 @@ def haz_dem(x_haz=1, haz=None): def make_input_vars(): exp = exp_dem - exp_distr = {"x_exp": sp.stats.uniform(0.8,2), - } + exp_distr = { + "x_exp": sp.stats.uniform(0.8, 2), + } exp_unc = InputVar(exp, exp_distr) impf = impf_dem - impf_distr = {"x_paa": sp.stats.beta(0.5, 1), - "x_mdd": sp.stats.uniform(0.8, 1.2) - } + impf_distr = {"x_paa": sp.stats.beta(0.5, 1), "x_mdd": sp.stats.uniform(0.8, 1.2)} impf_unc = InputVar(impf, impf_distr) haz = haz_dem - haz_distr = {"x_haz": sp.stats.alpha(a=2, loc=1, scale=1), - } + haz_distr = { + "x_haz": sp.stats.alpha(a=2, loc=1, scale=1), + } haz_unc = InputVar(haz, haz_distr) return exp_unc, impf_unc, haz_unc @@ -119,120 +130,96 @@ def make_costben_iv(): entdem = ent_dem() ent_iv = InputVar.ent( - impf_set_list = [entdem.impact_funcs], - disc_rate = entdem.disc_rates, - exp_list = [entdem.exposures], - meas_set = entdem.measures, + impf_set_list=[entdem.impact_funcs], + disc_rate=entdem.disc_rates, + exp_list=[entdem.exposures], + meas_set=entdem.measures, bounds_noise=[0.3, 1.9], bounds_cost=[0.5, 1.5], bounds_impfi=[-2, 5], - haz_id_dict={'TC': [1]} - ) + haz_id_dict={"TC": [1]}, + ) entfutdem = ent_fut_dem() entfut_iv = InputVar.entfut( - impf_set_list = [entfutdem.impact_funcs], - exp_list = [entfutdem.exposures], - meas_set = entfutdem.measures, + impf_set_list=[entfutdem.impact_funcs], + exp_list=[entfutdem.exposures], + meas_set=entfutdem.measures, bounds_eg=[0.8, 1.5], bounds_mdd=[0.7, 0.9], bounds_paa=[1.3, 2], - haz_id_dict={'TC': [1]} - ) + haz_id_dict={"TC": [1]}, + ) return ent_iv, entfut_iv class TestInputVar(unittest.TestCase): - """ Test UncVar class """ + """Test UncVar class""" def test_init_pass(self): impf = impf_dem - distr_dict = {"x_paa": sp.stats.beta(0.5, 1), - "x_mdd": sp.stats.uniform(0.8, 1.2) - } + distr_dict = { + "x_paa": sp.stats.beta(0.5, 1), + "x_mdd": sp.stats.uniform(0.8, 1.2), + } impf_iv = InputVar(impf, distr_dict) - self.assertListEqual(impf_iv.labels, ['x_paa', 'x_mdd']) + self.assertListEqual(impf_iv.labels, ["x_paa", "x_mdd"]) self.assertTrue(isinstance(impf_iv.distr_dict, dict)) def test_evaluate_pass(self): impf = impf_dem - distr_dict = {"x_paa": sp.stats.beta(0.5, 1), - "x_mdd": sp.stats.uniform(0.8, 0.4) - } + distr_dict = { + "x_paa": sp.stats.beta(0.5, 1), + "x_mdd": sp.stats.uniform(0.8, 0.4), + } impf_iv = InputVar(impf, distr_dict) - #Direct function evaluate - impf_eval = impf_iv.func(**{'x_paa': 0.8, 'x_mdd': 1.1}) + # Direct function evaluate + impf_eval = impf_iv.func(**{"x_paa": 0.8, "x_mdd": 1.1}) impf_true = impf_dem(x_paa=0.8, x_mdd=1.1) self.assertEqual(impf_eval.size(), impf_true.size()) - impf_func1 = impf_eval.get_func()['TC'][1] - impf_func2 = impf_true.get_func()['TC'][1] - np.testing.assert_array_equal( - impf_func1.intensity, - impf_func2.intensity - ) - np.testing.assert_array_equal( - impf_func1.mdd, - impf_func2.mdd - ) - np.testing.assert_array_equal( - impf_func1.paa, - impf_func2.paa - ) + impf_func1 = impf_eval.get_func()["TC"][1] + impf_func2 = impf_true.get_func()["TC"][1] + np.testing.assert_array_equal(impf_func1.intensity, impf_func2.intensity) + np.testing.assert_array_equal(impf_func1.mdd, impf_func2.mdd) + np.testing.assert_array_equal(impf_func1.paa, impf_func2.paa) self.assertEqual(impf_func1.id, impf_func2.id) self.assertEqual(impf_func1.haz_type, impf_func2.haz_type) - #Specific evaluate + # Specific evaluate impf_eval = impf_iv.evaluate(x_paa=0.8, x_mdd=1.1) impf_true = impf_dem(x_paa=0.8, x_mdd=1.1) self.assertEqual(impf_eval.size(), impf_true.size()) - impf_func1 = impf_eval.get_func()['TC'][1] - impf_func2 = impf_true.get_func()['TC'][1] - np.testing.assert_array_equal( - impf_func1.intensity, - impf_func2.intensity - ) - np.testing.assert_array_equal( - impf_func1.mdd, - impf_func2.mdd - ) - np.testing.assert_array_equal( - impf_func1.paa, - impf_func2.paa - ) + impf_func1 = impf_eval.get_func()["TC"][1] + impf_func2 = impf_true.get_func()["TC"][1] + np.testing.assert_array_equal(impf_func1.intensity, impf_func2.intensity) + np.testing.assert_array_equal(impf_func1.mdd, impf_func2.mdd) + np.testing.assert_array_equal(impf_func1.paa, impf_func2.paa) self.assertEqual(impf_func1.id, impf_func2.id) self.assertEqual(impf_func1.haz_type, impf_func2.haz_type) - #Average evaluate (default) + # Average evaluate (default) impf_eval = impf_iv.evaluate() impf_true = impf_dem(x_paa=0.3333333333333333, x_mdd=1.0) self.assertEqual(impf_eval.size(), impf_true.size()) - impf_func1 = impf_eval.get_func()['TC'][1] - impf_func2 = impf_true.get_func()['TC'][1] - np.testing.assert_array_almost_equal( - impf_func1.intensity, - impf_func2.intensity - ) - np.testing.assert_array_almost_equal( - impf_func1.mdd, - impf_func2.mdd - ) - np.testing.assert_array_almost_equal( - impf_func1.paa, - impf_func2.paa - ) + impf_func1 = impf_eval.get_func()["TC"][1] + impf_func2 = impf_true.get_func()["TC"][1] + np.testing.assert_array_almost_equal(impf_func1.intensity, impf_func2.intensity) + np.testing.assert_array_almost_equal(impf_func1.mdd, impf_func2.mdd) + np.testing.assert_array_almost_equal(impf_func1.paa, impf_func2.paa) self.assertEqual(impf_func1.id, impf_func2.id) self.assertEqual(impf_func1.haz_type, impf_func2.haz_type) def test_plot_pass(self): impf = impf_dem() - distr_dict = {"x_paa": sp.stats.beta(0.5, 1), - "x_mdd": sp.stats.uniform(0.8, 1.2), - "x_lit": sp.stats.randint(0, 10) - } + distr_dict = { + "x_paa": sp.stats.beta(0.5, 1), + "x_mdd": sp.stats.uniform(0.8, 1.2), + "x_lit": sp.stats.randint(0, 10), + } impf_iv = InputVar(impf, distr_dict) self.assertIsNotNone(impf_iv.plot()) plt.close() @@ -240,8 +227,7 @@ def test_plot_pass(self): def test_var_to_inputvar(self): exp = exp_dem() - distr_dict = {"x_exp": sp.stats.uniform(0.8,1.2) - } + distr_dict = {"x_exp": sp.stats.uniform(0.8, 1.2)} var = InputVar.var_to_inputvar(exp) self.assertDictEqual(var.distr_dict, {}) @@ -251,6 +237,7 @@ def test_var_to_inputvar(self): self.assertDictEqual(iv_var.distr_dict, distr_dict) self.assertTrue(isinstance(iv_var, InputVar)) + class TestOutput(unittest.TestCase): """Test the output class""" @@ -274,7 +261,7 @@ def test_plot_unc_imp(self): plt_sens = unc_output.plot_rp_uncertainty() self.assertIsNotNone(plt_sens) plt.close() - plt_sens_2 = unc_output.plot_sensitivity_second_order(salib_si='S1') + plt_sens_2 = unc_output.plot_sensitivity_second_order(salib_si="S1") self.assertIsNotNone(plt_sens_2) plt.close() plt_map = unc_output.plot_sensitivity_map() @@ -288,7 +275,9 @@ def test_save_load_pass(self): haz = haz_dem() unc_calc = CalcImpact(exp_unc, impf_unc, haz) - unc_data_save = unc_calc.make_sample(N=2, sampling_kwargs={'calc_second_order': True}) + unc_data_save = unc_calc.make_sample( + N=2, sampling_kwargs={"calc_second_order": True} + ) filename = unc_data_save.to_hdf5() unc_data_load = UncOutput.from_hdf5(filename) for attr_save, val_save in unc_data_save.__dict__.items(): @@ -299,8 +288,9 @@ def test_save_load_pass(self): self.assertEqual(unc_data_load.sampling_kwargs, unc_data_save.sampling_kwargs) filename.unlink() - unc_data_save = unc_calc.uncertainty(unc_data_save, calc_eai_exp=True, - calc_at_event=False) + unc_data_save = unc_calc.uncertainty( + unc_data_save, calc_eai_exp=True, calc_at_event=False + ) filename = unc_data_save.to_hdf5() unc_data_load = UncOutput.from_hdf5(filename) for attr_save, val_save in unc_data_save.__dict__.items(): @@ -312,9 +302,8 @@ def test_save_load_pass(self): filename.unlink() unc_data_save = unc_calc.sensitivity( - unc_data_save, - sensitivity_kwargs = {'calc_second_order': True} - ) + unc_data_save, sensitivity_kwargs={"calc_second_order": True} + ) filename = unc_data_save.to_hdf5() unc_data_load = UncOutput.from_hdf5(filename) for attr_save, val_save in unc_data_save.__dict__.items(): @@ -323,10 +312,15 @@ def test_save_load_pass(self): self.assertTrue(df_load.equals(val_save)) self.assertEqual(unc_data_load.sampling_method, unc_data_save.sampling_method) self.assertEqual(unc_data_load.sampling_kwargs, unc_data_save.sampling_kwargs) - self.assertEqual(unc_data_load.sensitivity_method, unc_data_save.sensitivity_method) - self.assertEqual(unc_data_load.sensitivity_kwargs, unc_data_save.sensitivity_kwargs) + self.assertEqual( + unc_data_load.sensitivity_method, unc_data_save.sensitivity_method + ) + self.assertEqual( + unc_data_load.sensitivity_kwargs, unc_data_save.sensitivity_kwargs + ) filename.unlink() + class TestCalcDelta(unittest.TestCase): """Test the calcluate delta impact uncertainty class""" @@ -336,12 +330,16 @@ def test_calc_uncertainty_pass(self): exp_unc, impf_unc, _ = make_input_vars() haz = haz_dem() haz2 = haz_dem() - haz2.intensity *=2 + haz2.intensity *= 2 unc_calc = CalcDeltaImpact(exp_unc, impf_dem(), haz, exp_dem(), impf_unc, haz2) unc_data = unc_calc.make_sample(N=2) - unc_data = unc_calc.uncertainty(unc_data, calc_eai_exp=False, calc_at_event=False) + unc_data = unc_calc.uncertainty( + unc_data, calc_eai_exp=False, calc_at_event=False + ) - for [x_exp, x_paa, x_mdd], delta_aai_aag in zip(unc_data.samples_df.values, unc_data.aai_agg_unc_df.values): + for [x_exp, x_paa, x_mdd], delta_aai_aag in zip( + unc_data.samples_df.values, unc_data.aai_agg_unc_df.values + ): exp1 = exp_unc.evaluate(x_exp=x_exp) exp2 = exp_dem() impf1 = impf_dem() @@ -351,13 +349,18 @@ def test_calc_uncertainty_pass(self): imp1 = ImpactCalc(exp1, impf1, haz1).impact() imp2 = ImpactCalc(exp2, impf2, haz2).impact() - self.assertAlmostEqual((imp2.aai_agg - imp1.aai_agg)/imp1.aai_agg, delta_aai_aag) + self.assertAlmostEqual( + (imp2.aai_agg - imp1.aai_agg) / imp1.aai_agg, delta_aai_aag + ) - #test when computing absolute delta - unc_data = unc_calc.uncertainty(unc_data, calc_eai_exp=False, calc_at_event=False, - relative_delta=False) + # test when computing absolute delta + unc_data = unc_calc.uncertainty( + unc_data, calc_eai_exp=False, calc_at_event=False, relative_delta=False + ) - for [x_exp, x_paa, x_mdd], delta_aai_aag in zip(unc_data.samples_df.values, unc_data.aai_agg_unc_df.values): + for [x_exp, x_paa, x_mdd], delta_aai_aag in zip( + unc_data.samples_df.values, unc_data.aai_agg_unc_df.values + ): exp1 = exp_unc.evaluate(x_exp=x_exp) exp2 = exp_dem() impf1 = impf_dem() @@ -374,14 +377,10 @@ def test_calc_uncertainty_pass(self): self.assertEqual(unc_calc.calc_eai_exp, False) self.assertEqual(unc_calc.calc_at_event, False) + self.assertEqual(unc_data.aai_agg_unc_df.size, unc_data.n_samples) self.assertEqual( - unc_data.aai_agg_unc_df.size, - unc_data.n_samples - ) - self.assertEqual( - unc_data.freq_curve_unc_df.size, - unc_data.n_samples * len(unc_calc.rp) - ) + unc_data.freq_curve_unc_df.size, unc_data.n_samples * len(unc_calc.rp) + ) self.assertTrue(unc_data.eai_exp_unc_df.empty) self.assertTrue(unc_data.at_event_unc_df.empty) @@ -394,38 +393,39 @@ def test_calc_sensitivity_pass(self): haz2.intensity *= 2 unc_calc = CalcDeltaImpact(exp_unc, impf_dem(), haz, exp_dem(), impf_unc, haz2) unc_data = unc_calc.make_sample(N=4) - unc_data = unc_calc.uncertainty(unc_data, calc_eai_exp=False, calc_at_event=False) + unc_data = unc_calc.uncertainty( + unc_data, calc_eai_exp=False, calc_at_event=False + ) unc_data = unc_calc.sensitivity( - unc_data, - sensitivity_kwargs = {'calc_second_order': True} - ) + unc_data, sensitivity_kwargs={"calc_second_order": True} + ) - self.assertEqual(unc_data.sensitivity_method, 'sobol') - self.assertTupleEqual(unc_data.sensitivity_kwargs, - tuple({'calc_second_order': 'True'}.items()) - ) + self.assertEqual(unc_data.sensitivity_method, "sobol") + self.assertTupleEqual( + unc_data.sensitivity_kwargs, tuple({"calc_second_order": "True"}.items()) + ) for name, attr in unc_data.__dict__.items(): - if 'sens_df' in name: - if 'eai' in name: + if "sens_df" in name: + if "eai" in name: self.assertTrue(attr.empty) - elif 'at_event' in name: + elif "at_event" in name: self.assertTrue(attr.empty) else: np.testing.assert_array_equal( - attr.param.unique(), - np.array(['x_exp', 'x_paa', 'x_mdd']) - ) + attr.param.unique(), np.array(["x_exp", "x_paa", "x_mdd"]) + ) np.testing.assert_array_equal( attr.si.unique(), - np.array(['S1', 'S1_conf', 'ST', 'ST_conf', 'S2', 'S2_conf']) - ) + np.array(["S1", "S1_conf", "ST", "ST_conf", "S2", "S2_conf"]), + ) + + self.assertEqual( + len(attr), len(unc_data.param_labels) * (4 + 3 + 3) + ) - self.assertEqual(len(attr), - len(unc_data.param_labels) * (4 + 3 + 3) - ) class TestCalcImpact(unittest.TestCase): """Test the calcluate impact uncertainty class""" @@ -438,83 +438,81 @@ def test_init_pass(self): self.assertTupleEqual( unc_calc._input_var_names, - ('exp_input_var', 'impf_input_var', 'haz_input_var') - ) + ("exp_input_var", "impf_input_var", "haz_input_var"), + ) self.assertTupleEqual( - unc_calc._metric_names, - ('aai_agg', 'freq_curve', 'at_event', 'eai_exp') - ) + unc_calc._metric_names, ("aai_agg", "freq_curve", "at_event", "eai_exp") + ) self.assertEqual(unc_calc.value_unit, exp_iv.evaluate().value_unit) self.assertTrue( - unc_calc.exp_input_var.evaluate(x_exp=1).gdf.equals( - exp_dem(1).gdf) - ) - impf1 = unc_calc.impf_input_var.evaluate(x_paa=1, x_mdd=1).get_func()['TC'][1] - impf2 = impf_dem(1, 1).get_func()['TC'][1] + unc_calc.exp_input_var.evaluate(x_exp=1).gdf.equals(exp_dem(1).gdf) + ) + impf1 = unc_calc.impf_input_var.evaluate(x_paa=1, x_mdd=1).get_func()["TC"][1] + impf2 = impf_dem(1, 1).get_func()["TC"][1] np.testing.assert_array_almost_equal( - impf1.calc_mdr(impf1.intensity), - impf2.calc_mdr(impf2.intensity) - ) + impf1.calc_mdr(impf1.intensity), impf2.calc_mdr(impf2.intensity) + ) haz1 = unc_calc.haz_input_var.evaluate(x_haz=1) haz2 = haz_dem(1) - self.assertListEqual( - haz1.event_name, haz2.event_name - ) + self.assertListEqual(haz1.event_name, haz2.event_name) def test_make_sample_pass(self): """Test generate sample""" - exp_unc, _ , haz_unc = make_input_vars() + exp_unc, _, haz_unc = make_input_vars() impf = impf_dem() unc_calc = CalcImpact(exp_unc, impf, haz_unc) - #default sampling saltelli - unc_data = unc_calc.make_sample(N=2, sampling_kwargs = {'calc_second_order': True}) - self.assertEqual(unc_data.n_samples, 2*(2*2+2)) # N * (2 * D + 2) + # default sampling saltelli + unc_data = unc_calc.make_sample( + N=2, sampling_kwargs={"calc_second_order": True} + ) + self.assertEqual(unc_data.n_samples, 2 * (2 * 2 + 2)) # N * (2 * D + 2) self.assertTrue(isinstance(unc_data.samples_df, pd.DataFrame)) np.testing.assert_array_equal( - unc_data.samples_df.columns.values, - np.array(['x_exp', 'x_haz']) - ) + unc_data.samples_df.columns.values, np.array(["x_exp", "x_haz"]) + ) - #latin sampling - unc_data = unc_calc.make_sample(N=1, sampling_method='latin', - sampling_kwargs = {'seed': 11245}) + # latin sampling + unc_data = unc_calc.make_sample( + N=1, sampling_method="latin", sampling_kwargs={"seed": 11245} + ) self.assertEqual(unc_data.n_samples, 1) self.assertTrue(isinstance(unc_data.samples_df, pd.DataFrame)) np.testing.assert_array_equal( - unc_data.samples_df.columns.values, - np.array(['x_exp', 'x_haz']) - ) + unc_data.samples_df.columns.values, np.array(["x_exp", "x_haz"]) + ) def test_make_sample_ff_fail(self): - """Test for warning and error messages when sampling using the 'ff' method""" + """Test for warning and error messages when sampling using the 'ff' method""" - exp_unc, impf_unc, haz_unc = make_input_vars() - haz = haz_dem() + exp_unc, impf_unc, haz_unc = make_input_vars() + haz = haz_dem() - # Warning ff sampling - unc_calc = CalcImpact(exp_unc, impf_unc, haz_unc) - warning_msg = "You are using the 'ff' sampler which does not require " - "a value for N. The entered N value will be ignored" - "in the sampling process." - - with self.assertLogs(LOGGER, level='WARNING') as logs: - unc_data = unc_calc.make_sample(N=4, sampling_method='ff') - self.assertEqual(len(logs.output), 1) - self.assertIn(warning_msg, logs.output[0]) - - # Error ff sampling - unc_calc = CalcImpact(exp_unc, impf_unc, haz) - with self.assertRaises(ValueError) as cm: - unc_data = unc_calc.make_sample(N=4, sampling_method='ff') - the_exception = cm.exception - self.assertEqual(the_exception.args[0], - "The number of parameters must be a power of 2. " - "To use the ff sampling method, you can generate " - "dummy parameters to overcome this limitation." - " See https://salib.readthedocs.io/en/latest/api.html") + # Warning ff sampling + unc_calc = CalcImpact(exp_unc, impf_unc, haz_unc) + warning_msg = "You are using the 'ff' sampler which does not require " + "a value for N. The entered N value will be ignored" + "in the sampling process." + + with self.assertLogs(LOGGER, level="WARNING") as logs: + unc_data = unc_calc.make_sample(N=4, sampling_method="ff") + self.assertEqual(len(logs.output), 1) + self.assertIn(warning_msg, logs.output[0]) + + # Error ff sampling + unc_calc = CalcImpact(exp_unc, impf_unc, haz) + with self.assertRaises(ValueError) as cm: + unc_data = unc_calc.make_sample(N=4, sampling_method="ff") + the_exception = cm.exception + self.assertEqual( + the_exception.args[0], + "The number of parameters must be a power of 2. " + "To use the ff sampling method, you can generate " + "dummy parameters to overcome this limitation." + " See https://salib.readthedocs.io/en/latest/api.html", + ) def test_calc_uncertainty_pass(self): """Test compute the uncertainty distribution for an impact""" @@ -522,23 +520,21 @@ def test_calc_uncertainty_pass(self): exp_unc, impf_unc, _ = make_input_vars() haz = haz_dem() unc_calc = CalcImpact(exp_unc, impf_unc, haz) - unc_data = unc_calc.make_sample( N=2) - unc_data = unc_calc.uncertainty(unc_data, calc_eai_exp=False, calc_at_event=False) + unc_data = unc_calc.make_sample(N=2) + unc_data = unc_calc.uncertainty( + unc_data, calc_eai_exp=False, calc_at_event=False + ) self.assertEqual(unc_data.unit, exp_dem().value_unit) self.assertListEqual(unc_calc.rp, [5, 10, 20, 50, 100, 250]) self.assertEqual(unc_calc.calc_eai_exp, False) self.assertEqual(unc_calc.calc_at_event, False) - self.assertEqual( - unc_data.aai_agg_unc_df.size, - unc_data.n_samples - ) + self.assertEqual(unc_data.aai_agg_unc_df.size, unc_data.n_samples) self.assertEqual( - unc_data.freq_curve_unc_df.size, - unc_data.n_samples * len(unc_calc.rp) - ) + unc_data.freq_curve_unc_df.size, unc_data.n_samples * len(unc_calc.rp) + ) self.assertTrue(unc_data.eai_exp_unc_df.empty) self.assertTrue(unc_data.at_event_unc_df.empty) @@ -552,153 +548,155 @@ def test_calc_uncertainty_pool_pass(self): unc_data = unc_calc.uncertainty( unc_data, calc_eai_exp=False, calc_at_event=False, processes=4 - ) + ) self.assertEqual(unc_data.unit, exp_dem().value_unit) self.assertListEqual(unc_calc.rp, [5, 10, 20, 50, 100, 250]) self.assertEqual(unc_calc.calc_eai_exp, False) self.assertEqual(unc_calc.calc_at_event, False) - self.assertEqual( - unc_data.aai_agg_unc_df.size, - unc_data.n_samples - ) + self.assertEqual(unc_data.aai_agg_unc_df.size, unc_data.n_samples) self.assertEqual( - unc_data.freq_curve_unc_df.size, - unc_data.n_samples * len(unc_calc.rp) - ) + unc_data.freq_curve_unc_df.size, unc_data.n_samples * len(unc_calc.rp) + ) self.assertTrue(unc_data.eai_exp_unc_df.empty) self.assertTrue(unc_data.at_event_unc_df.empty) def test_calc_sensitivity_all_pass(self): """Test compute sensitivity using all different sensitivity methods""" - #define input_vars + # define input_vars exp_unc, impf_unc, haz_unc = make_input_vars() # dict to store the parameters and expected results for the tests test_dict = { - 'pawn': { - 'sampling_method' : 'saltelli', - 'sampling_kwargs' : {}, - 'N' : 4, - 'sensitivity_kwargs' : { - 'S' : 10, - 'seed' : 12345 - }, - 'test_param_name' : ['x_exp',0], - 'test_si_name' : ['CV', 16], - 'test_si_value' : [0.25000, 2] - }, - 'hdmr': { - 'sampling_method' : 'saltelli', - 'sampling_kwargs' : {}, - 'N' : 100, - 'sensitivity_kwargs' : {}, - 'test_param_name' : ['x_exp', 2], - 'test_si_name' : ['Sa', 4], - 'test_si_value' : [0.004658, 3] - }, - 'ff': { - - 'sampling_method' : 'ff', - 'sampling_kwargs' : {'seed' : 12345}, - 'N' : 4, - 'sensitivity_kwargs' : {'second_order': True}, - 'test_param_name' : ['x_exp', 0], - 'test_si_name' : ['IE', 4], - 'test_si_value' : [865181825.901295, 10] - }, - 'sobol': { - 'sampling_method' : 'saltelli', - 'sampling_kwargs' : {}, - 'N' : 4, - 'sensitivity_kwargs' : {}, - 'test_param_name' : ['x_paa', 5], - 'test_si_name' : ['ST', 8], - 'test_si_value' : [0.313025, 10] - }, - - 'dgsm': { - 'sampling_method' : 'finite_diff', - 'N' : 4, - 'sampling_kwargs' : {'seed':12345}, - 'sensitivity_kwargs' : {'num_resamples': 100, - 'conf_level': 0.95, 'seed': 12345}, - 'test_param_name' : ['x_exp',0], - 'test_si_name' : ['dgsm', 8], - 'test_si_value' : [1.697516e-01, 9] + "pawn": { + "sampling_method": "saltelli", + "sampling_kwargs": {}, + "N": 4, + "sensitivity_kwargs": {"S": 10, "seed": 12345}, + "test_param_name": ["x_exp", 0], + "test_si_name": ["CV", 16], + "test_si_value": [0.25000, 2], }, - 'fast': { - 'sampling_method' : 'fast_sampler', - 'sampling_kwargs' : {'M' : 4, 'seed' : 12345}, - 'N' : 256, - 'sensitivity_kwargs' : {'M': 4, 'seed': 12345}, - 'test_param_name' : ['x_exp',0], - 'test_si_name' : ['S1_conf',8], - 'test_si_value' : [0.671396, 1] + "hdmr": { + "sampling_method": "saltelli", + "sampling_kwargs": {}, + "N": 100, + "sensitivity_kwargs": {}, + "test_param_name": ["x_exp", 2], + "test_si_name": ["Sa", 4], + "test_si_value": [0.004658, 3], }, - - 'rbd_fast': { - 'sampling_method' : 'saltelli', - 'sampling_kwargs' : {}, - 'N' : 24, - 'sensitivity_kwargs' : {'M': 4, 'seed': 12345}, - 'test_param_name' : ['x_exp', 0], - 'test_si_name' : ['S1_conf', 4], - 'test_si_value' : [0.152609, 4] + "ff": { + "sampling_method": "ff", + "sampling_kwargs": {"seed": 12345}, + "N": 4, + "sensitivity_kwargs": {"second_order": True}, + "test_param_name": ["x_exp", 0], + "test_si_name": ["IE", 4], + "test_si_value": [865181825.901295, 10], }, - - 'morris': { - 'sampling_method' : 'morris', - 'sampling_kwargs' : {'seed': 12345}, - 'N' : 4, - 'sensitivity_kwargs' : {}, - 'test_param_name' : ['x_exp', 0], - 'test_si_name' : ['mu', 1], - 'test_si_value' : [5066460029.63911, 8] + "sobol": { + "sampling_method": "saltelli", + "sampling_kwargs": {}, + "N": 4, + "sensitivity_kwargs": {}, + "test_param_name": ["x_paa", 5], + "test_si_name": ["ST", 8], + "test_si_value": [0.313025, 10], + }, + "dgsm": { + "sampling_method": "finite_diff", + "N": 4, + "sampling_kwargs": {"seed": 12345}, + "sensitivity_kwargs": { + "num_resamples": 100, + "conf_level": 0.95, + "seed": 12345, + }, + "test_param_name": ["x_exp", 0], + "test_si_name": ["dgsm", 8], + "test_si_value": [1.697516e-01, 9], + }, + "fast": { + "sampling_method": "fast_sampler", + "sampling_kwargs": {"M": 4, "seed": 12345}, + "N": 256, + "sensitivity_kwargs": {"M": 4, "seed": 12345}, + "test_param_name": ["x_exp", 0], + "test_si_name": ["S1_conf", 8], + "test_si_value": [0.671396, 1], + }, + "rbd_fast": { + "sampling_method": "saltelli", + "sampling_kwargs": {}, + "N": 24, + "sensitivity_kwargs": {"M": 4, "seed": 12345}, + "test_param_name": ["x_exp", 0], + "test_si_name": ["S1_conf", 4], + "test_si_value": [0.152609, 4], + }, + "morris": { + "sampling_method": "morris", + "sampling_kwargs": {"seed": 12345}, + "N": 4, + "sensitivity_kwargs": {}, + "test_param_name": ["x_exp", 0], + "test_si_name": ["mu", 1], + "test_si_value": [5066460029.63911, 8], }, } - def test_sensitivity_method(exp_unc, impf_unc, haz_unc, sensitivity_method, param_dict): + def test_sensitivity_method( + exp_unc, impf_unc, haz_unc, sensitivity_method, param_dict + ): """Function to test each seaprate sensitivity method""" unc_calc = CalcImpact(exp_unc, impf_unc, haz_unc) - unc_data = unc_calc.make_sample(N=param_dict['N'], - sampling_method=param_dict['sampling_method'], - sampling_kwargs=param_dict['sampling_kwargs']) - unc_data = unc_calc.uncertainty(unc_data, calc_eai_exp=False, calc_at_event=False) + unc_data = unc_calc.make_sample( + N=param_dict["N"], + sampling_method=param_dict["sampling_method"], + sampling_kwargs=param_dict["sampling_kwargs"], + ) + unc_data = unc_calc.uncertainty( + unc_data, calc_eai_exp=False, calc_at_event=False + ) # Call the sensitivity method with each method's specific arguments unc_data = unc_calc.sensitivity( unc_data, sensitivity_method=sensitivity_method, - sensitivity_kwargs=param_dict['sensitivity_kwargs']) - - self.assertEqual(param_dict['test_param_name'][0], - unc_data.aai_agg_sens_df['param'][param_dict['test_param_name'][1]]) - self.assertEqual(param_dict['test_si_name'][0], - unc_data.aai_agg_sens_df['si'][param_dict['test_si_name'][1]]) - self.assertAlmostEqual(param_dict['test_si_value'][0], - unc_data.aai_agg_sens_df['aai_agg'][param_dict['test_si_value'][1]], - places=5) + sensitivity_kwargs=param_dict["sensitivity_kwargs"], + ) self.assertEqual( - unc_data.aai_agg_unc_df.size, - unc_data.n_samples - ) + param_dict["test_param_name"][0], + unc_data.aai_agg_sens_df["param"][param_dict["test_param_name"][1]], + ) + self.assertEqual( + param_dict["test_si_name"][0], + unc_data.aai_agg_sens_df["si"][param_dict["test_si_name"][1]], + ) + self.assertAlmostEqual( + param_dict["test_si_value"][0], + unc_data.aai_agg_sens_df["aai_agg"][param_dict["test_si_value"][1]], + places=5, + ) + + self.assertEqual(unc_data.aai_agg_unc_df.size, unc_data.n_samples) self.assertEqual( - unc_data.freq_curve_unc_df.size, - unc_data.n_samples * len(unc_calc.rp) - ) + unc_data.freq_curve_unc_df.size, unc_data.n_samples * len(unc_calc.rp) + ) self.assertTrue(unc_data.eai_exp_unc_df.empty) self.assertTrue(unc_data.at_event_unc_df.empty) # loop over each method and do test for sensitivity_method, method_params in test_dict.items(): - test_sensitivity_method(exp_unc, impf_unc, haz_unc, - sensitivity_method, method_params) + test_sensitivity_method( + exp_unc, impf_unc, haz_unc, sensitivity_method, method_params + ) class TestCalcCostBenefit(unittest.TestCase): @@ -714,58 +712,73 @@ def test_init_pass(self): self.assertTupleEqual( unc_calc._input_var_names, - ('haz_input_var', 'ent_input_var', - 'haz_fut_input_var', 'ent_fut_input_var') - ) + ( + "haz_input_var", + "ent_input_var", + "haz_fut_input_var", + "ent_fut_input_var", + ), + ) self.assertTupleEqual( unc_calc._metric_names, - ('tot_climate_risk', 'benefit', 'cost_ben_ratio', - 'imp_meas_present', 'imp_meas_future') - ) + ( + "tot_climate_risk", + "benefit", + "cost_ben_ratio", + "imp_meas_present", + "imp_meas_future", + ), + ) self.assertEqual(unc_calc.value_unit, ent_dem().exposures.value_unit) self.assertTrue( - unc_calc.ent_input_var.evaluate(CO=None, IFi=None, EN=None, EL=0).exposures.gdf.equals( - ent_dem().exposures.gdf) - ) + unc_calc.ent_input_var.evaluate( + CO=None, IFi=None, EN=None, EL=0 + ).exposures.gdf.equals(ent_dem().exposures.gdf) + ) haz1 = unc_calc.haz_input_var.evaluate(x_haz=1) haz2 = haz_dem(1) - self.assertListEqual( - haz1.event_name, haz2.event_name - ) + self.assertListEqual(haz1.event_name, haz2.event_name) unc_calc = CalcCostBenefit(haz_iv, ent_iv, haz_iv, ent_fut_iv) self.assertTupleEqual( unc_calc._input_var_names, - ('haz_input_var', 'ent_input_var', - 'haz_fut_input_var', 'ent_fut_input_var') - ) + ( + "haz_input_var", + "ent_input_var", + "haz_fut_input_var", + "ent_fut_input_var", + ), + ) self.assertTupleEqual( unc_calc._metric_names, - ('tot_climate_risk', 'benefit', 'cost_ben_ratio', - 'imp_meas_present', 'imp_meas_future') - ) + ( + "tot_climate_risk", + "benefit", + "cost_ben_ratio", + "imp_meas_present", + "imp_meas_future", + ), + ) self.assertEqual(unc_calc.value_unit, ent_dem().exposures.value_unit) self.assertTrue( - unc_calc.ent_input_var.evaluate(CO=None, IFi=None, EN=None).exposures.gdf.equals( - ent_dem().exposures.gdf) - ) + unc_calc.ent_input_var.evaluate( + CO=None, IFi=None, EN=None + ).exposures.gdf.equals(ent_dem().exposures.gdf) + ) self.assertTrue( - unc_calc.ent_fut_input_var.evaluate(EG=None, MDD=None, PAA=None).exposures.gdf.equals( - ent_fut_dem().exposures.gdf) - ) + unc_calc.ent_fut_input_var.evaluate( + EG=None, MDD=None, PAA=None + ).exposures.gdf.equals(ent_fut_dem().exposures.gdf) + ) haz1 = unc_calc.haz_input_var.evaluate(x_haz=1) haz2 = haz_dem(1) - self.assertListEqual( - haz1.event_name, haz2.event_name - ) + self.assertListEqual(haz1.event_name, haz2.event_name) haz3 = unc_calc.haz_fut_input_var.evaluate(x_haz=1) - self.assertListEqual( - haz3.event_name, haz2.event_name - ) + self.assertListEqual(haz3.event_name, haz2.event_name) def test_make_sample_pass(self): """Test generate sample""" @@ -775,46 +788,49 @@ def test_make_sample_pass(self): unc_calc = CalcCostBenefit(haz_iv, ent_iv) - #default sampling saltelli - unc_data = unc_calc.make_sample(N=2, sampling_kwargs = {'calc_second_order': True}) - self.assertEqual(unc_data.n_samples, 2*(2*4+2)) # N * (2 * D + 2) + # default sampling saltelli + unc_data = unc_calc.make_sample( + N=2, sampling_kwargs={"calc_second_order": True} + ) + self.assertEqual(unc_data.n_samples, 2 * (2 * 4 + 2)) # N * (2 * D + 2) self.assertTrue(isinstance(unc_data.samples_df, pd.DataFrame)) np.testing.assert_array_equal( - unc_data.samples_df.columns.values, - np.array(['x_haz', 'EN', 'IFi', 'CO']) - ) + unc_data.samples_df.columns.values, np.array(["x_haz", "EN", "IFi", "CO"]) + ) # #latin sampling - unc_data = unc_calc.make_sample(N=1, sampling_method='latin', - sampling_kwargs = {'seed': 11245}) + unc_data = unc_calc.make_sample( + N=1, sampling_method="latin", sampling_kwargs={"seed": 11245} + ) self.assertEqual(unc_data.n_samples, 1) self.assertTrue(isinstance(unc_data.samples_df, pd.DataFrame)) np.testing.assert_array_equal( - unc_data.samples_df.columns.values, - np.array(['x_haz', 'EN', 'IFi', 'CO']) - ) - + unc_data.samples_df.columns.values, np.array(["x_haz", "EN", "IFi", "CO"]) + ) unc_calc = CalcCostBenefit(haz_iv, ent_iv, haz_iv, ent_fut_iv) - #default sampling saltelli - unc_data = unc_calc.make_sample(N=2, sampling_kwargs = {'calc_second_order': True}) - self.assertEqual(unc_data.n_samples, 2*(2*7+2)) # N * (2 * D + 2) + # default sampling saltelli + unc_data = unc_calc.make_sample( + N=2, sampling_kwargs={"calc_second_order": True} + ) + self.assertEqual(unc_data.n_samples, 2 * (2 * 7 + 2)) # N * (2 * D + 2) self.assertTrue(isinstance(unc_data.samples_df, pd.DataFrame)) np.testing.assert_array_equal( unc_data.samples_df.columns.values, - np.array(['x_haz', 'EN', 'IFi', 'CO', 'EG', 'PAA', 'MDD']) - ) + np.array(["x_haz", "EN", "IFi", "CO", "EG", "PAA", "MDD"]), + ) # #latin sampling - unc_data = unc_calc.make_sample(N=1, sampling_method='latin', - sampling_kwargs = {'seed': 11245}) + unc_data = unc_calc.make_sample( + N=1, sampling_method="latin", sampling_kwargs={"seed": 11245} + ) self.assertEqual(unc_data.n_samples, 1) self.assertTrue(isinstance(unc_data.samples_df, pd.DataFrame)) np.testing.assert_array_equal( unc_data.samples_df.columns.values, - np.array(['x_haz', 'EN', 'IFi', 'CO', 'EG', 'PAA', 'MDD']) - ) + np.array(["x_haz", "EN", "IFi", "CO", "EG", "PAA", "MDD"]), + ) def test_calc_uncertainty_pool_pass(self): """Test compute the uncertainty distribution for an impact""" @@ -828,22 +844,17 @@ def test_calc_uncertainty_pool_pass(self): self.assertEqual(unc_data.unit, ent_dem().exposures.value_unit) - self.assertEqual( - unc_data.tot_climate_risk_unc_df.size, - unc_data.n_samples - ) + self.assertEqual(unc_data.tot_climate_risk_unc_df.size, unc_data.n_samples) self.assertEqual( unc_data.cost_ben_ratio_unc_df.size, - unc_data.n_samples * 4 #number of measures - ) - self.assertEqual( - unc_data.imp_meas_present_unc_df.size, - 0 - ) + unc_data.n_samples * 4, # number of measures + ) + self.assertEqual(unc_data.imp_meas_present_unc_df.size, 0) self.assertEqual( unc_data.imp_meas_future_unc_df.size, - unc_data.n_samples * 4 * 5 #All measures 4 and risks/benefits 5 - ) + unc_data.n_samples * 4 * 5, # All measures 4 and risks/benefits 5 + ) + if __name__ == "__main__": TESTS = unittest.TestLoader().loadTestsFromTestCase(TestInputVar) diff --git a/climada/engine/unsequa/unc_output.py b/climada/engine/unsequa/unc_output.py index 4a833df2b..d9c68fe69 100644 --- a/climada/engine/unsequa/unc_output.py +++ b/climada/engine/unsequa/unc_output.py @@ -19,57 +19,59 @@ Define Uncertainty class. """ -__all__ = ['UncOutput', 'UncCostBenefitOutput', 'UncImpactOutput', 'UncDeltaImpactOutput'] +__all__ = [ + "UncOutput", + "UncCostBenefitOutput", + "UncImpactOutput", + "UncDeltaImpactOutput", +] -import logging import datetime as dt - +import logging from itertools import zip_longest from pathlib import Path - import h5py -import pandas as pd -import numpy as np -import matplotlib.pyplot as plt import matplotlib as mpl +import matplotlib.pyplot as plt +import numpy as np +import pandas as pd from matplotlib import colormaps as cm +import climada.util.hdf5_handler as u_hdf5 from climada import CONFIG - -from climada.util.value_representation import value_to_monetary_unit as u_vtm -from climada.util.value_representation import convert_monetary_value as u_cmv from climada.util import plot as u_plot -import climada.util.hdf5_handler as u_hdf5 +from climada.util.value_representation import convert_monetary_value as u_cmv +from climada.util.value_representation import value_to_monetary_unit as u_vtm LOGGER = logging.getLogger(__name__) # Metrics that are multi-dimensional -METRICS_2D = ['eai_exp', 'at_event'] +METRICS_2D = ["eai_exp", "at_event"] DATA_DIR = CONFIG.engine.uncertainty.local_data.user_data.dir() -FIG_W, FIG_H = 8, 5 #default figize width/heigh column/work multiplicators +FIG_W, FIG_H = 8, 5 # default figize width/heigh column/work multiplicators -MAP_CMAP = 'Dark2' #Default color map for the sensitivity map +MAP_CMAP = "Dark2" # Default color map for the sensitivity map -#Table of recommended pairing between salib sampling and sensitivity methods +# Table of recommended pairing between salib sampling and sensitivity methods # NEEDS TO BE UPDATED REGULARLY!! https://salib.readthedocs.io/en/latest/api.html # Currently, we do not support the 'delta' method due to Singular matrix issues, SALIB_COMPATIBILITY = { #'delta': ['fast_sampler', 'ff', 'finite_diff', 'latin', 'morris', 'saltelli'], - 'dgsm': ['finite_diff'], - 'fast': ['fast_sampler'], - 'ff': ['ff'], - 'hdmr': ['fast_sampler', 'ff', 'finite_diff', 'latin', 'morris', 'saltelli'], - 'morris': ['morris'], - 'pawn': ['fast_sampler', 'ff', 'finite_diff', 'latin', 'morris', 'saltelli'], - 'rbd_fast': ['fast_sampler', 'ff', 'finite_diff', 'latin', 'morris', 'saltelli'], - 'sobol': ['saltelli', 'sobol'] - } - - -class UncOutput(): + "dgsm": ["finite_diff"], + "fast": ["fast_sampler"], + "ff": ["ff"], + "hdmr": ["fast_sampler", "ff", "finite_diff", "latin", "morris", "saltelli"], + "morris": ["morris"], + "pawn": ["fast_sampler", "ff", "finite_diff", "latin", "morris", "saltelli"], + "rbd_fast": ["fast_sampler", "ff", "finite_diff", "latin", "morris", "saltelli"], + "sobol": ["saltelli", "sobol"], +} + + +class UncOutput: """ Class to store and plot uncertainty and sensitivity analysis output data @@ -98,8 +100,12 @@ class UncOutput(): https://salib.readthedocs.io/en/latest/basics.html. """ - _metadata = ['sampling_method', 'sampling_kwargs', 'sensitivity_method', - 'sensitivity_kwargs'] + _metadata = [ + "sampling_method", + "sampling_kwargs", + "sensitivity_method", + "sensitivity_kwargs", + ] def __init__(self, samples_df, unit=None): """ @@ -112,7 +118,7 @@ def __init__(self, samples_df, unit=None): unit : str, optional value unit """ - #Data + # Data self.samples_df = samples_df self.unit = unit @@ -135,19 +141,19 @@ def order_samples(self, by_parameters): self.samples_df.sort_values(by=by_parameters, inplace=True, axis=0) def get_samples_df(self): - return getattr(self, 'samples_df') + return getattr(self, "samples_df") def get_unc_df(self, metric_name): - return getattr(self, f'{metric_name}_unc_df') + return getattr(self, f"{metric_name}_unc_df") def set_unc_df(self, metric_name, unc_df): - setattr(self, f'{metric_name}_unc_df', unc_df) + setattr(self, f"{metric_name}_unc_df", unc_df) def get_sens_df(self, metric_name): - return getattr(self, f'{metric_name}_sens_df') + return getattr(self, f"{metric_name}_sens_df") def set_sens_df(self, metric_name, sens_df): - setattr(self, f'{metric_name}_sens_df', sens_df) + setattr(self, f"{metric_name}_sens_df", sens_df) def check_salib(self, sensitivity_method): """ @@ -171,12 +177,14 @@ def check_salib(self, sensitivity_method): """ if self.sampling_method not in SALIB_COMPATIBILITY[sensitivity_method]: - LOGGER.warning("The chosen combination of sensitivity method (%s)" + LOGGER.warning( + "The chosen combination of sensitivity method (%s)" " and sampling method (%s) does not correspond to the" " recommendation of the salib pacakge." "\n https://salib.readthedocs.io/en/latest/api.html", - self.sampling_method, sensitivity_method - ) + self.sampling_method, + sensitivity_method, + ) return False return True @@ -191,7 +199,7 @@ def sampling_method(self): Sampling method name """ - return self.samples_df.attrs['sampling_method'] + return self.samples_df.attrs["sampling_method"] @property def sampling_kwargs(self): @@ -204,7 +212,7 @@ def sampling_kwargs(self): Dictionary of arguments for SALib sampling method """ - return self.samples_df.attrs['sampling_kwargs'] + return self.samples_df.attrs["sampling_kwargs"] @property def n_samples(self): @@ -246,10 +254,10 @@ def problem_sa(self): """ return { - 'num_vars' : len(self.param_labels), - 'names' : self.param_labels, - 'bounds' : [[0, 1]]*len(self.param_labels) - } + "num_vars": len(self.param_labels), + "names": self.param_labels, + "bounds": [[0, 1]] * len(self.param_labels), + } @property def uncertainty_metrics(self): @@ -314,9 +322,8 @@ def get_uncertainty(self, metric_list=None): metric_list = self.uncertainty_metrics try: unc_df = pd.concat( - [self.get_unc_df(metric) for metric in metric_list], - axis=1 - ) + [self.get_unc_df(metric) for metric in metric_list], axis=1 + ) except AttributeError: return pd.DataFrame([]) return unc_df @@ -358,14 +365,14 @@ def get_sensitivity(self, salib_si, metric_list=None): for metric in metric_list: submetric_df = self.get_sens_df(metric) if not submetric_df.empty: - submetric_df = submetric_df[submetric_df['si'] == salib_si] + submetric_df = submetric_df[submetric_df["si"] == salib_si] df_all = pd.concat( - [df_all, submetric_df.select_dtypes('number')], - axis=1 - ) + [df_all, submetric_df.select_dtypes("number")], axis=1 + ) if df_meta.empty: df_meta = submetric_df.drop( - submetric_df.select_dtypes('number').columns, axis=1) + submetric_df.select_dtypes("number").columns, axis=1 + ) return pd.concat([df_meta, df_all], axis=1).reset_index(drop=True) def get_largest_si(self, salib_si, metric_list=None, threshold=0.01): @@ -394,25 +401,27 @@ def get_largest_si(self, salib_si, metric_list=None, threshold=0.01): si_df = self.get_sensitivity(salib_si, metric_list) - #get max index - si_df_num = si_df.select_dtypes('number') - si_df_num[si_df_num 1: flat_axes = axes.flatten() else: flat_axes = np.array([axes]) - for ax, col, orig_val in zip_longest(flat_axes, cols, orig_list, fillvalue=None): + for ax, col, orig_val in zip_longest( + flat_axes, cols, orig_list, fillvalue=None + ): if col is None: if ax is not None: ax.remove() @@ -569,11 +588,17 @@ def plot_uncertainty(self, metric_list=None, orig_list=None, figsize=None, if data.empty or data.isna().all() or data.dropna().shape[0] < 2: print(f"No data to plot for '{col}'.") if ax is not None: - ax.text(0.5, 0.5, 'No data to plot', fontsize=18, - horizontalalignment='center', verticalalignment='center', - transform=ax.transAxes) + ax.text( + 0.5, + 0.5, + "No data to plot", + fontsize=18, + horizontalalignment="center", + verticalalignment="center", + transform=ax.transAxes, + ) ax.set_xlabel(col) - ax.set_ylabel('density of samples') + ax.set_ylabel("density of samples") ax.tick_params(labelsize=fontsize) for item in [ax.title, ax.xaxis.label, ax.yaxis.label]: item.set_fontsize(fontsize) @@ -583,10 +608,16 @@ def plot_uncertainty(self, metric_list=None, orig_list=None, figsize=None, if data.empty: ax.remove() continue - data.hist(ax=ax, bins=30, density=True, histtype='bar', - color='lightsteelblue', edgecolor='black') + data.hist( + ax=ax, + bins=30, + density=True, + histtype="bar", + color="lightsteelblue", + edgecolor="black", + ) try: - data.plot.kde(ax=ax, color='darkblue', linewidth=4, label='') + data.plot.kde(ax=ax, color="darkblue", linewidth=4, label="") except np.linalg.LinAlgError: pass avg, std = data.mean(), data.std() @@ -595,33 +626,46 @@ def plot_uncertainty(self, metric_list=None, orig_list=None, figsize=None, avg_plot = np.log10(avg) else: avg_plot = avg - ax.axvline(avg_plot, color='darkorange', linestyle='dashed', linewidth=2, - label="avg=%.2f%s" %(avg, m_unit)) + ax.axvline( + avg_plot, + color="darkorange", + linestyle="dashed", + linewidth=2, + label="avg=%.2f%s" % (avg, m_unit), + ) if orig_val is not None: if log: orig_plot = np.log10(orig_val) else: orig_plot = orig_val [orig_plot] = u_cmv(orig_plot, m_unit) - ax.axvline(orig_plot, color='green', linestyle='dotted', linewidth=2, - label="orig=%.2f%s" %(orig_plot, m_unit)) + ax.axvline( + orig_plot, + color="green", + linestyle="dotted", + linewidth=2, + label="orig=%.2f%s" % (orig_plot, m_unit), + ) if log: std_m, std_p = np.log10(avg - std), np.log10(avg + std) else: std_m, std_p = avg - std, avg + std - ax.plot([std_m, std_p], - [0.3 * ymax, 0.3 * ymax], color='black', - label="std=%.2f%s" %(std, m_unit)) - xlabel = col + ' [' + m_unit + ' ' + self.unit + '] ' + ax.plot( + [std_m, std_p], + [0.3 * ymax, 0.3 * ymax], + color="black", + label="std=%.2f%s" % (std, m_unit), + ) + xlabel = col + " [" + m_unit + " " + self.unit + "] " if calc_delta: # Modify the xlabel when calc_delta is True - xlabel = col + ' change [%]' + xlabel = col + " change [%]" if log: - ax.set_xlabel( xlabel + ' (log10 scale)') + ax.set_xlabel(xlabel + " (log10 scale)") else: ax.set_xlabel(xlabel) - ax.set_ylabel('density of samples') - ax.legend(fontsize=fontsize-2) + ax.set_ylabel("density of samples") + ax.legend(fontsize=fontsize - 2) ax.tick_params(labelsize=fontsize) for item in [ax.title, ax.xaxis.label, ax.yaxis.label]: @@ -631,9 +675,9 @@ def plot_uncertainty(self, metric_list=None, orig_list=None, figsize=None, return axes - - def plot_rp_uncertainty(self, orig_list=None, figsize=(16, 6), axes=None, - calc_delta=False): + def plot_rp_uncertainty( + self, orig_list=None, figsize=(16, 6), axes=None, calc_delta=False + ): """ Plot the distribution of return period uncertainty @@ -667,23 +711,27 @@ def plot_rp_uncertainty(self, orig_list=None, figsize=(16, 6), axes=None, except AttributeError: unc_df = None if unc_df is None or unc_df.empty: - raise ValueError("No return period uncertainty data present " - "Please run an uncertainty analysis with the desired " - "return period specified.") + raise ValueError( + "No return period uncertainty data present " + "Please run an uncertainty analysis with the desired " + "return period specified." + ) - add_orig=True + add_orig = True if orig_list is None: - add_orig=False + add_orig = False if axes is None: _fig, axes = plt.subplots(figsize=figsize, nrows=1, ncols=2) - [min_l, max_l], m_unit = u_vtm([unc_df.min().min(), unc_df.max().max()], n_sig_dig=4) + [min_l, max_l], m_unit = u_vtm( + [unc_df.min().min(), unc_df.max().max()], n_sig_dig=4 + ) # Plotting for the first axes ax = axes[0] - prop_cycle = plt.rcParams['axes.prop_cycle'] - colors = prop_cycle.by_key()['color'] + prop_cycle = plt.rcParams["axes.prop_cycle"] + colors = prop_cycle.by_key()["color"] for n, (_name, values) in enumerate(unc_df.items()): if values.isna().all() or len(values.dropna()) < 2: @@ -692,67 +740,92 @@ def plot_rp_uncertainty(self, orig_list=None, figsize=(16, 6), axes=None, values = u_cmv(values, m_unit, n_sig_dig=4) count, division = np.histogram(values, bins=100) count = count / count.max() - losses = [(bin_i + bin_f)/2 for (bin_i, bin_f) in zip(division[:-1], division[1:])] - ax.plot([min_l, max_l], [2*n, 2*n], color='k', alpha=0.5) - ax.fill_between(losses, count + 2*n, 2*n) + losses = [ + (bin_i + bin_f) / 2 + for (bin_i, bin_f) in zip(division[:-1], division[1:]) + ] + ax.plot([min_l, max_l], [2 * n, 2 * n], color="k", alpha=0.5) + ax.fill_between(losses, count + 2 * n, 2 * n) if add_orig: [orig_val] = u_cmv(orig_list[n], m_unit, n_sig_dig=4) ax.plot( - [orig_val, orig_val], [2*n, 2*(n+1)], - color=colors[n], linestyle='dotted', linewidth=2, - label="orig=%.2f%s" %(orig_val, m_unit) + [orig_val, orig_val], + [2 * n, 2 * (n + 1)], + color=colors[n], + linestyle="dotted", + linewidth=2, + label="orig=%.2f%s" % (orig_val, m_unit), ) ax.set_xlim(min_l, max_l) - ax.set_ylim(0, 2*unc_df.shape[1]) - ax.set_yticks(np.arange(0, 2*unc_df.shape[1], 2)) + ax.set_ylim(0, 2 * unc_df.shape[1]) + ax.set_yticks(np.arange(0, 2 * unc_df.shape[1], 2)) ax.set_yticklabels([s[2:] for s in unc_df.columns]) - ax.legend(loc='lower right') + ax.legend(loc="lower right") # Set x-axis label for the first axes if calc_delta: - ax.set_xlabel('Impact change [%]') + ax.set_xlabel("Impact change [%]") else: - ax.set_xlabel('Impact [%s %s]' % (m_unit, self.unit)) + ax.set_xlabel("Impact [%s %s]" % (m_unit, self.unit)) - ax.set_ylabel('Return period [years]') + ax.set_ylabel("Return period [years]") # Plotting for the second axes ax = axes[1] - high = u_cmv(self.get_unc_df('freq_curve').quantile(0.95).values, - m_unit, n_sig_dig=4) - middle = u_cmv(self.get_unc_df('freq_curve').quantile(0.5).values, - m_unit, n_sig_dig=4) - low = u_cmv(self.get_unc_df('freq_curve').quantile(0.05).values, - m_unit, n_sig_dig=4) + high = u_cmv( + self.get_unc_df("freq_curve").quantile(0.95).values, m_unit, n_sig_dig=4 + ) + middle = u_cmv( + self.get_unc_df("freq_curve").quantile(0.5).values, m_unit, n_sig_dig=4 + ) + low = u_cmv( + self.get_unc_df("freq_curve").quantile(0.05).values, m_unit, n_sig_dig=4 + ) x = [float(rp[2:]) for rp in unc_df.columns] - ax.plot(x, high, linestyle='--', color='blue', alpha=0.5, - label='0.95 percentile') - ax.plot(x, middle, label='0.5 percentile') - ax.plot(x, low, linestyle='dashdot', color='blue', alpha=0.5, - label='0.05 percentile') + ax.plot( + x, high, linestyle="--", color="blue", alpha=0.5, label="0.95 percentile" + ) + ax.plot(x, middle, label="0.5 percentile") + ax.plot( + x, + low, + linestyle="dashdot", + color="blue", + alpha=0.5, + label="0.05 percentile", + ) ax.fill_between(x, low, high, alpha=0.2) if add_orig: - ax.plot(x, u_cmv(orig_list, m_unit, n_sig_dig=4), color='green', - linestyle='dotted', label='orig') - ax.set_xlabel('Return period [year]') + ax.plot( + x, + u_cmv(orig_list, m_unit, n_sig_dig=4), + color="green", + linestyle="dotted", + label="orig", + ) + ax.set_xlabel("Return period [year]") # Set y-axis label for the second axes if calc_delta: - ax.set_ylabel('Impact change [%]') + ax.set_ylabel("Impact change [%]") else: - ax.set_ylabel('Impact [' + m_unit + ' ' + self.unit + ']') + ax.set_ylabel("Impact [" + m_unit + " " + self.unit + "]") ax.legend() return axes - - - def plot_sensitivity(self, salib_si='S1', salib_si_conf='S1_conf', - metric_list=None, figsize=None, axes=None, - **kwargs): + def plot_sensitivity( + self, + salib_si="S1", + salib_si_conf="S1_conf", + metric_list=None, + figsize=None, + axes=None, + **kwargs, + ): """ Bar plot of a first order sensitivity index @@ -813,54 +886,63 @@ def plot_sensitivity(self, salib_si='S1', salib_si_conf='S1_conf', """ if not self.sensitivity_metrics: - raise ValueError("No sensitivity present. " - "Please run a sensitivity analysis first.") + raise ValueError( + "No sensitivity present. " "Please run a sensitivity analysis first." + ) if metric_list is None: metric_list = [ metric for metric in self.sensitivity_metrics if metric not in METRICS_2D - ] + ] nplots = len(metric_list) nrows, ncols = int(np.ceil(nplots / 2)), min(nplots, 2) if axes is None: if not figsize: figsize = (ncols * FIG_W, nrows * FIG_H) - _fig, axes = plt.subplots(nrows = nrows, - ncols = ncols, - figsize = figsize) + _fig, axes = plt.subplots(nrows=nrows, ncols=ncols, figsize=figsize) if nplots > 1: flat_axes = axes.flatten() else: flat_axes = np.array([axes]) for ax, metric in zip(flat_axes, metric_list): - df_S = self.get_sensitivity(salib_si, [metric]).select_dtypes('number') + df_S = self.get_sensitivity(salib_si, [metric]).select_dtypes("number") if not df_S.columns[df_S.isnull().all()].empty: - LOGGER.warning("All-NaN columns encountered: %s", - list(df_S.columns[df_S.isnull().all()])) + LOGGER.warning( + "All-NaN columns encountered: %s", + list(df_S.columns[df_S.isnull().all()]), + ) df_S = df_S.loc[:, df_S.notnull().any()] if df_S.empty: - ax.set_xlabel('Input parameter') + ax.set_xlabel("Input parameter") ax.remove() continue - df_S_conf = self.get_sensitivity(salib_si_conf, [metric]).select_dtypes('number') + df_S_conf = self.get_sensitivity(salib_si_conf, [metric]).select_dtypes( + "number" + ) df_S_conf = df_S_conf.loc[:, df_S.columns] if df_S_conf.empty: - df_S.plot(ax=ax, kind='bar', **kwargs) - df_S.plot(ax=ax, kind='bar', yerr=df_S_conf, **kwargs) + df_S.plot(ax=ax, kind="bar", **kwargs) + df_S.plot(ax=ax, kind="bar", yerr=df_S_conf, **kwargs) ax.set_xticklabels(self.param_labels, rotation=0) - ax.set_xlabel('Input parameter') + ax.set_xlabel("Input parameter") ax.set_ylabel(salib_si) plt.tight_layout() return axes - def plot_sensitivity_second_order(self, salib_si='S2', salib_si_conf='S2_conf', - metric_list=None, figsize=None, axes=None, - **kwargs): + def plot_sensitivity_second_order( + self, + salib_si="S2", + salib_si_conf="S2_conf", + metric_list=None, + figsize=None, + axes=None, + **kwargs, + ): """ Plot second order sensitivity indices as matrix. @@ -916,33 +998,34 @@ def plot_sensitivity_second_order(self, salib_si='S2', salib_si_conf='S2_conf', """ if not self.sensitivity_metrics: - raise ValueError("No sensitivity present for this metrics. " - "Please run a sensitivity analysis first.") + raise ValueError( + "No sensitivity present for this metrics. " + "Please run a sensitivity analysis first." + ) if metric_list is None: metric_list = [ metric for metric in self.sensitivity_metrics if metric not in METRICS_2D - ] - + ] - if 'cmap' not in kwargs.keys(): - kwargs['cmap'] = 'summer' + if "cmap" not in kwargs.keys(): + kwargs["cmap"] = "summer" - #all the lowest level metrics (e.g. rp10) directly or as - #submetrics of the metrics in metrics_list - df_S = self.get_sensitivity(salib_si, metric_list).select_dtypes('number') - df_S_conf = self.get_sensitivity(salib_si_conf, metric_list).select_dtypes('number') + # all the lowest level metrics (e.g. rp10) directly or as + # submetrics of the metrics in metrics_list + df_S = self.get_sensitivity(salib_si, metric_list).select_dtypes("number") + df_S_conf = self.get_sensitivity(salib_si_conf, metric_list).select_dtypes( + "number" + ) nplots = len(df_S.columns) nrows, ncols = int(np.ceil(nplots / 3)), min(nplots, 3) if axes is None: if not figsize: figsize = (ncols * 5, nrows * 5) - _fig, axes = plt.subplots(nrows = nrows, - ncols = ncols, - figsize = figsize) + _fig, axes = plt.subplots(nrows=nrows, ncols=ncols, figsize=figsize) if nplots > 1: flat_axes = axes.flatten() @@ -950,37 +1033,46 @@ def plot_sensitivity_second_order(self, salib_si='S2', salib_si_conf='S2_conf', flat_axes = np.array([axes]) for ax, submetric in zip(flat_axes, df_S.columns): - #Make matrix symmetric + # Make matrix symmetric s2_matrix = np.triu( - np.reshape( - df_S[submetric].to_numpy(), - (len(self.param_labels), -1) - ) - ) + np.reshape(df_S[submetric].to_numpy(), (len(self.param_labels), -1)) + ) s2_matrix = s2_matrix + s2_matrix.T - np.diag(np.diag(s2_matrix)) ax.imshow(s2_matrix, **kwargs) s2_conf_matrix = np.triu( np.reshape( - df_S_conf[submetric].to_numpy(), - (len(self.param_labels), -1) - ) + df_S_conf[submetric].to_numpy(), (len(self.param_labels), -1) ) - s2_conf_matrix = s2_conf_matrix + s2_conf_matrix.T - \ - np.diag(np.diag(s2_conf_matrix)) + ) + s2_conf_matrix = ( + s2_conf_matrix + s2_conf_matrix.T - np.diag(np.diag(s2_conf_matrix)) + ) for i in range(len(s2_matrix)): for j in range(len(s2_matrix)): if np.isnan(s2_matrix[i, j]): - ax.text(j, i, np.nan, - ha="center", va="center", - color="k", fontsize='medium') + ax.text( + j, + i, + np.nan, + ha="center", + va="center", + color="k", + fontsize="medium", + ) else: - ax.text(j, i, - str(round(s2_matrix[i, j], 2)) + u'\n\u00B1' + #\u00B1 = +- - str(round(s2_conf_matrix[i, j], 2)), - ha="center", va="center", - color="k", fontsize='medium') - - ax.set_title(salib_si + ' - ' + submetric, fontsize=18) + ax.text( + j, + i, + str(round(s2_matrix[i, j], 2)) + + "\n\u00B1" # \u00B1 = +- + + str(round(s2_conf_matrix[i, j], 2)), + ha="center", + va="center", + color="k", + fontsize="medium", + ) + + ax.set_title(salib_si + " - " + submetric, fontsize=18) labels = self.param_labels ax.set_xticks(np.arange(len(labels))) ax.set_yticks(np.arange(len(labels))) @@ -990,7 +1082,7 @@ def plot_sensitivity_second_order(self, salib_si='S2', salib_si_conf='S2_conf', return axes - def plot_sensitivity_map(self, salib_si='S1', **kwargs): + def plot_sensitivity_map(self, salib_si="S1", **kwargs): """ Plot a map of the largest sensitivity index in each exposure point @@ -1022,36 +1114,34 @@ def plot_sensitivity_map(self, salib_si='S1', **kwargs): """ - eai_max_si_df = self.get_largest_si(salib_si, metric_list=['eai_exp']) - - plot_val = eai_max_si_df['param'] - coord = np.array([self.coord_df['latitude'], self.coord_df['longitude']]).transpose() # pylint: disable=no-member - if 'var_name' not in kwargs: - kwargs['var_name'] = 'Input parameter with largest ' + salib_si - if 'title' not in kwargs: - kwargs['title'] = '' - if 'figsize' not in kwargs: - kwargs['figsize'] = (8,6) - if 'cmap' not in kwargs: + eai_max_si_df = self.get_largest_si(salib_si, metric_list=["eai_exp"]) + + plot_val = eai_max_si_df["param"] + coord = np.array( + [self.coord_df["latitude"], self.coord_df["longitude"]] + ).transpose() # pylint: disable=no-member + if "var_name" not in kwargs: + kwargs["var_name"] = "Input parameter with largest " + salib_si + if "title" not in kwargs: + kwargs["title"] = "" + if "figsize" not in kwargs: + kwargs["figsize"] = (8, 6) + if "cmap" not in kwargs: labels = np.unique(plot_val) - n=np.where(labels=='None')[0] - if len(n) > 0 : + n = np.where(labels == "None")[0] + if len(n) > 0: n = n[0] cmap = mpl.colors.ListedColormap( - cm.get_cmap(MAP_CMAP).colors[:len(labels)] - ) + cm.get_cmap(MAP_CMAP).colors[: len(labels)] + ) colors = list(cmap.colors) colors[n] = tuple(np.repeat(0.93, 3)) cmap.colors = tuple(colors) - kwargs['cmap'] = cmap - ax = u_plot.geo_scatter_categorical( - plot_val, coord, - **kwargs - ) + kwargs["cmap"] = cmap + ax = u_plot.geo_scatter_categorical(plot_val, coord, **kwargs) return ax - def to_hdf5(self, filename=None): """ Save output to .hdf5 @@ -1070,29 +1160,27 @@ def to_hdf5(self, filename=None): """ if filename is None: - filename = "unc_output" + dt.datetime.now().strftime( - "%Y-%m-%d-%H%M%S" - ) + filename = "unc_output" + dt.datetime.now().strftime("%Y-%m-%d-%H%M%S") filename = Path(DATA_DIR) / Path(filename) save_path = Path(filename) - save_path = save_path.with_suffix('.hdf5') + save_path = save_path.with_suffix(".hdf5") - LOGGER.info('Writing %s', save_path) - store = pd.HDFStore(save_path, mode='w') - for (var_name, var_val) in self.__dict__.items(): + LOGGER.info("Writing %s", save_path) + store = pd.HDFStore(save_path, mode="w") + for var_name, var_val in self.__dict__.items(): if isinstance(var_val, pd.DataFrame): - store.put(var_name, var_val, format='fixed', complevel=9) - store.get_storer('/samples_df').attrs.metadata = self.samples_df.attrs + store.put(var_name, var_val, format="fixed", complevel=9) + store.get_storer("/samples_df").attrs.metadata = self.samples_df.attrs store.close() str_dt = h5py.special_dtype(vlen=str) - with h5py.File(save_path, 'a') as fh: - if getattr(self, 'unit'): - fh['impact_unit'] = [self.unit] - if hasattr(self, 'sensitivity_method'): + with h5py.File(save_path, "a") as fh: + if getattr(self, "unit"): + fh["impact_unit"] = [self.unit] + if hasattr(self, "sensitivity_method"): if self.sensitivity_method: - fh['sensitivity_method'] = [self.sensitivity_method] - if hasattr(self, 'sensitivity_kwargs'): + fh["sensitivity_method"] = [self.sensitivity_method] + if hasattr(self, "sensitivity_kwargs"): if self.sensitivity_kwargs: grp = fh.create_group("sensitivity_kwargs") for key, value in dict(self.sensitivity_kwargs).items(): @@ -1115,41 +1203,50 @@ def from_hdf5(filename): unc_output: climada.engine.uncertainty.unc_output.UncOutput Uncertainty and sensitivity data loaded from .hdf5 file. """ - filename = Path(filename).with_suffix('.hdf5') + filename = Path(filename).with_suffix(".hdf5") if not filename.exists(): - LOGGER.info('File not found') + LOGGER.info("File not found") return None unc_data = UncOutput(pd.DataFrame()) - LOGGER.info('Reading %s', filename) - store = pd.HDFStore(filename, mode='r') + LOGGER.info("Reading %s", filename) + store = pd.HDFStore(filename, mode="r") for var_name in store.keys(): setattr(unc_data, var_name[1:], store.get(var_name)) - unc_data.samples_df.attrs = store.get_storer('/samples_df').attrs.metadata + unc_data.samples_df.attrs = store.get_storer("/samples_df").attrs.metadata store.close() - with h5py.File(filename, 'r') as fh: - if 'impact_unit' in list(fh.keys()): - unc_data.unit = fh.get('impact_unit')[0].decode('UTF-8') - if 'sensitivity_method' in list(fh.keys()): - unc_data.sensitivity_method = \ - fh.get('sensitivity_method')[0].decode('UTF-8') - if 'sensitivity_kwargs' in list(fh.keys()): + with h5py.File(filename, "r") as fh: + if "impact_unit" in list(fh.keys()): + unc_data.unit = fh.get("impact_unit")[0].decode("UTF-8") + if "sensitivity_method" in list(fh.keys()): + unc_data.sensitivity_method = fh.get("sensitivity_method")[0].decode( + "UTF-8" + ) + if "sensitivity_kwargs" in list(fh.keys()): grp = fh["sensitivity_kwargs"] sens_kwargs = { - key: u_hdf5.to_string(grp.get(key)[0]) - for key in grp.keys() - } + key: u_hdf5.to_string(grp.get(key)[0]) for key in grp.keys() + } unc_data.sensitivity_kwargs = tuple(sens_kwargs.items()) return unc_data class UncImpactOutput(UncOutput): """Extension of UncOutput specific for CalcImpact, returned by the - uncertainty() method. + uncertainty() method. """ - def __init__(self, samples_df, unit, aai_agg_unc_df, freq_curve_unc_df, - eai_exp_unc_df, at_event_unc_df, coord_df): + + def __init__( + self, + samples_df, + unit, + aai_agg_unc_df, + freq_curve_unc_df, + eai_exp_unc_df, + at_event_unc_df, + coord_df, + ): """Constructor Uncertainty output values from impact.calc for each sample @@ -1186,11 +1283,21 @@ def __init__(self, samples_df, unit, aai_agg_unc_df, freq_curve_unc_df, self.at_event_sens_df = None self.coord_df = coord_df + class UncDeltaImpactOutput(UncOutput): - """Extension of UncOutput specific for CalcDeltaImpact, returned by the uncertainty() method. - """ - def __init__(self, samples_df, unit, aai_agg_unc_df, freq_curve_unc_df, eai_exp_unc_df, - at_event_initial_unc_df, at_event_final_unc_df, coord_df): + """Extension of UncOutput specific for CalcDeltaImpact, returned by the uncertainty() method.""" + + def __init__( + self, + samples_df, + unit, + aai_agg_unc_df, + freq_curve_unc_df, + eai_exp_unc_df, + at_event_initial_unc_df, + at_event_final_unc_df, + coord_df, + ): """Constructor Uncertainty output values from impact.calc for each sample @@ -1234,11 +1341,19 @@ def __init__(self, samples_df, unit, aai_agg_unc_df, freq_curve_unc_df, eai_exp_ class UncCostBenefitOutput(UncOutput): - """Extension of UncOutput specific for CalcCostBenefit, returned by the uncertainty() method. - """ - def __init__(self, samples_df, unit, imp_meas_present_unc_df, imp_meas_future_unc_df, - tot_climate_risk_unc_df, benefit_unc_df, cost_ben_ratio_unc_df, - cost_benefit_kwargs): + """Extension of UncOutput specific for CalcCostBenefit, returned by the uncertainty() method.""" + + def __init__( + self, + samples_df, + unit, + imp_meas_present_unc_df, + imp_meas_future_unc_df, + tot_climate_risk_unc_df, + benefit_unc_df, + cost_ben_ratio_unc_df, + cost_benefit_kwargs, + ): """Constructor Uncertainty output values from cost_benefit.calc for each sample @@ -1270,9 +1385,9 @@ def __init__(self, samples_df, unit, imp_meas_present_unc_df, imp_meas_future_un """ super().__init__(samples_df, unit) - self.imp_meas_present_unc_df= imp_meas_present_unc_df + self.imp_meas_present_unc_df = imp_meas_present_unc_df self.imp_meas_present_sens_df = None - self.imp_meas_future_unc_df= imp_meas_future_unc_df + self.imp_meas_future_unc_df = imp_meas_future_unc_df self.imp_meas_future_sens_df = None self.tot_climate_risk_unc_df = tot_climate_risk_unc_df self.tot_climate_risk_sens_df = None diff --git a/climada/entity/__init__.py b/climada/entity/__init__.py index 985d78c2a..7b830c2b7 100755 --- a/climada/entity/__init__.py +++ b/climada/entity/__init__.py @@ -18,8 +18,9 @@ init entity """ + +from .disc_rates import * +from .entity_def import * from .exposures import * from .impact_funcs import * -from .disc_rates import * from .measures import * -from .entity_def import * diff --git a/climada/entity/disc_rates/__init__.py b/climada/entity/disc_rates/__init__.py index 744aaa982..2dd6148b9 100755 --- a/climada/entity/disc_rates/__init__.py +++ b/climada/entity/disc_rates/__init__.py @@ -18,4 +18,5 @@ init disc_rates """ + from .base import * diff --git a/climada/entity/disc_rates/base.py b/climada/entity/disc_rates/base.py index 0d51e7597..e18daaf91 100755 --- a/climada/entity/disc_rates/base.py +++ b/climada/entity/disc_rates/base.py @@ -19,15 +19,16 @@ Define DiscRates class. """ -__all__ = ['DiscRates'] +__all__ = ["DiscRates"] import copy -from array import array import logging +from array import array from typing import Optional + +import matplotlib.pyplot as plt import numpy as np import pandas as pd -import matplotlib.pyplot as plt import xlsxwriter import climada.util.checker as u_check @@ -37,22 +38,20 @@ LOGGER = logging.getLogger(__name__) """MATLAB variable names""" -DEF_VAR_MAT = {'sup_field_name': 'entity', - 'field_name': 'discount', - 'var_name': {'year': 'year', - 'disc': 'discount_rate' - } - } +DEF_VAR_MAT = { + "sup_field_name": "entity", + "field_name": "discount", + "var_name": {"year": "year", "disc": "discount_rate"}, +} """Excel variable names""" -DEF_VAR_EXCEL = {'sheet_name': 'discount', - 'col_name': {'year': 'year', - 'disc': 'discount_rate' - } - } +DEF_VAR_EXCEL = { + "sheet_name": "discount", + "col_name": {"year": "year", "disc": "discount_rate"}, +} -class DiscRates(): +class DiscRates: """ Defines discount rates and basic methods. Loads from files with format defined in FILE_EXT. @@ -66,10 +65,8 @@ class DiscRates(): """ def __init__( - self, - years : Optional[np.ndarray] = None, - rates : Optional[np.ndarray] = None - ): + self, years: Optional[np.ndarray] = None, rates: Optional[np.ndarray] = None + ): """ Fill discount rates with values and check consistency data @@ -100,7 +97,7 @@ def check(self): ------ ValueError """ - u_check.size(len(self.years), self.rates, 'DiscRates.rates') + u_check.size(len(self.years), self.rates, "DiscRates.rates") def select(self, year_range): """ @@ -116,12 +113,11 @@ def select(self, year_range): """ pos_year = np.isin(year_range, self.years) if not np.all(pos_year): - LOGGER.info('No discount rates for given years.') + LOGGER.info("No discount rates for given years.") return None pos_year = np.isin(self.years, year_range) - return DiscRates(years=self.years[pos_year], - rates=self.rates[pos_year]) + return DiscRates(years=self.years[pos_year], rates=self.rates[pos_year]) def append(self, disc_rates): """ @@ -142,8 +138,8 @@ def append(self, disc_rates): self.__dict__ = copy.deepcopy(disc_rates.__dict__) return - new_year = array('l') - new_rate = array('d') + new_year = array("l") + new_rate = array("d") for year, rate in zip(disc_rates.years, disc_rates.rates): found = np.where(year == self.years)[0] if found.size > 0: @@ -176,13 +172,14 @@ def net_present_value(self, ini_year, end_year, val_years): """ year_range = np.arange(ini_year, end_year + 1) if year_range.size != val_years.size: - raise ValueError('Wrong size of yearly values.') + raise ValueError("Wrong size of yearly values.") sel_disc = self.select(year_range) if sel_disc is None: - raise ValueError('No information of discount rates for provided years:' - f' {ini_year} - {end_year}') - return u_fin.net_present_value(sel_disc.years, sel_disc.rates, - val_years) + raise ValueError( + "No information of discount rates for provided years:" + f" {ini_year} - {end_year}" + ) + return u_fin.net_present_value(sel_disc.years, sel_disc.rates, val_years) def plot(self, axis=None, figsize=(6, 8), **kwargs): """ @@ -205,9 +202,9 @@ def plot(self, axis=None, figsize=(6, 8), **kwargs): if not axis: _, axis = plt.subplots(1, 1, figsize=figsize) - axis.set_title('Discount rates') - axis.set_xlabel('Year') - axis.set_ylabel('discount rate (%)') + axis.set_title("Discount rates") + axis.set_xlabel("Year") + axis.set_ylabel("discount rate (%)") axis.plot(self.years, self.rates * 100, **kwargs) axis.set_xlim((self.years.min(), self.years.max())) return axis @@ -244,15 +241,16 @@ def from_mat(cls, file_name, var_names=None): var_names = DEF_VAR_MAT disc = u_hdf5.read(file_name) try: - disc = disc[var_names['sup_field_name']] + disc = disc[var_names["sup_field_name"]] except KeyError: pass try: - disc = disc[var_names['field_name']] - years = np.squeeze(disc[var_names['var_name']['year']]). \ - astype(int, copy=False) - rates = np.squeeze(disc[var_names['var_name']['disc']]) + disc = disc[var_names["field_name"]] + years = np.squeeze(disc[var_names["var_name"]["year"]]).astype( + int, copy=False + ) + rates = np.squeeze(disc[var_names["var_name"]["disc"]]) except KeyError as err: raise KeyError("Not existing variable: %s" % str(err)) from err @@ -295,11 +293,10 @@ def from_excel(cls, file_name, var_names=None): """ if var_names is None: var_names = DEF_VAR_EXCEL - dfr = pd.read_excel(file_name, var_names['sheet_name']) + dfr = pd.read_excel(file_name, var_names["sheet_name"]) try: - years = dfr[var_names['col_name']['year']].values. \ - astype(int, copy=False) - rates = dfr[var_names['col_name']['disc']].values + years = dfr[var_names["col_name"]["year"]].values.astype(int, copy=False) + rates = dfr[var_names["col_name"]["disc"]].values except KeyError as err: raise KeyError("Not existing variable: %s" % str(err)) from err @@ -307,8 +304,10 @@ def from_excel(cls, file_name, var_names=None): def read_excel(self, *args, **kwargs): """This function is deprecated, use DiscRates.from_excel instead.""" - LOGGER.warning("The use of DiscRates.read_excel is deprecated." - "Use DiscRates.from_excel instead.") + LOGGER.warning( + "The use of DiscRates.read_excel is deprecated." + "Use DiscRates.from_excel instead." + ) self.__dict__ = DiscRates.from_excel(*args, **kwargs).__dict__ def write_excel(self, file_name, var_names=None): @@ -333,9 +332,9 @@ def write_excel(self, file_name, var_names=None): if var_names is None: var_names = DEF_VAR_EXCEL disc_wb = xlsxwriter.Workbook(file_name) - disc_ws = disc_wb.add_worksheet(var_names['sheet_name']) + disc_ws = disc_wb.add_worksheet(var_names["sheet_name"]) - header = [var_names['col_name']['year'], var_names['col_name']['disc']] + header = [var_names["col_name"]["year"], var_names["col_name"]["disc"]] for icol, head_dat in enumerate(header): disc_ws.write(0, icol, head_dat) for i_yr, (disc_yr, disc_rt) in enumerate(zip(self.years, self.rates), 1): diff --git a/climada/entity/disc_rates/test/test_base.py b/climada/entity/disc_rates/test/test_base.py index 2458a7546..7815a63ea 100644 --- a/climada/entity/disc_rates/test/test_base.py +++ b/climada/entity/disc_rates/test/test_base.py @@ -18,48 +18,51 @@ Test DiscRates class. """ -import unittest -import numpy as np + import copy +import unittest from pathlib import Path from tempfile import TemporaryDirectory +import numpy as np + from climada import CONFIG from climada.entity.disc_rates.base import DiscRates -from climada.util.constants import ENT_TEMPLATE_XLS, ENT_DEMO_TODAY +from climada.util.constants import ENT_DEMO_TODAY, ENT_TEMPLATE_XLS + +ENT_TEST_MAT = CONFIG.exposures.test_data.dir().joinpath("demo_today.mat") -ENT_TEST_MAT = CONFIG.exposures.test_data.dir().joinpath('demo_today.mat') class TestChecker(unittest.TestCase): """Test discount rates attributes checker""" def test_check_wrongRates_fail(self): """Wrong discount rates definition""" - disc_rate = DiscRates( - rates=np.array([3, 4]), - years=np.array([1]) - ) + disc_rate = DiscRates(rates=np.array([3, 4]), years=np.array([1])) with self.assertRaises(ValueError) as cm: disc_rate.check() - self.assertIn('Invalid DiscRates.rates size: 1 != 2.', str(cm.exception)) + self.assertIn("Invalid DiscRates.rates size: 1 != 2.", str(cm.exception)) + class TestConstructor(unittest.TestCase): """Test discount rates attributes.""" + def test_attributes_all(self): """All attributes are defined""" disc_rate = DiscRates() - self.assertTrue(hasattr(disc_rate, 'years')) - self.assertTrue(hasattr(disc_rate, 'rates')) + self.assertTrue(hasattr(disc_rate, "years")) + self.assertTrue(hasattr(disc_rate, "rates")) + class TestAppend(unittest.TestCase): """Check append function""" + def test_append_to_empty_same(self): """Append DiscRates to empty one.""" disc_rate = DiscRates() disc_rate_add = DiscRates( - years=np.array([2000, 2001, 2002]), - rates=np.array([0.1, 0.2, 0.3]) + years=np.array([2000, 2001, 2002]), rates=np.array([0.1, 0.2, 0.3]) ) disc_rate.append(disc_rate_add) @@ -88,34 +91,32 @@ def test_append_different_append(self): years are overwritten.""" disc_rate = DiscRates( - years=np.array([2000, 2001, 2002]), - rates=np.array([0.1, 0.2, 0.3]) + years=np.array([2000, 2001, 2002]), rates=np.array([0.1, 0.2, 0.3]) ) disc_rate_add = DiscRates( - years=np.array([2000, 2001, 2003]), - rates=np.array([0.11, 0.22, 0.33]) + years=np.array([2000, 2001, 2003]), rates=np.array([0.11, 0.22, 0.33]) ) disc_rate.append(disc_rate_add) disc_rate.check() - self.assertTrue(np.array_equal(disc_rate.years, - np.array([2000, 2001, 2002, 2003]))) - self.assertTrue(np.array_equal(disc_rate.rates, - np.array([0.11, 0.22, 0.3, 0.33]))) + self.assertTrue( + np.array_equal(disc_rate.years, np.array([2000, 2001, 2002, 2003])) + ) + self.assertTrue( + np.array_equal(disc_rate.rates, np.array([0.11, 0.22, 0.3, 0.33])) + ) class TestSelect(unittest.TestCase): """Test select method""" + def test_select_pass(self): """Test select right time range.""" - years=np.arange(2000, 2050) - rates=np.arange(years.size) - disc_rate = DiscRates( - years=years, - rates=rates - ) + years = np.arange(2000, 2050) + rates = np.arange(years.size) + disc_rate = DiscRates(years=years, rates=rates) year_range = np.arange(2010, 2020) sel_disc = disc_rate.select(year_range) @@ -125,33 +126,25 @@ def test_select_pass(self): def test_select_wrong_pass(self): """Test select wrong time range.""" - disc_rate = DiscRates( - years=np.arange(2000, 2050), - rates=np.arange(50) - ) + disc_rate = DiscRates(years=np.arange(2000, 2050), rates=np.arange(50)) year_range = np.arange(2050, 2060) self.assertEqual(None, disc_rate.select(year_range)) class TestNetPresValue(unittest.TestCase): """Test select method""" + def test_net_present_value_pass(self): """Test net_present_value right time range.""" - disc_rate = DiscRates( - years=np.arange(2000, 2050), - rates=np.ones(50) * 0.02 - ) + disc_rate = DiscRates(years=np.arange(2000, 2050), rates=np.ones(50) * 0.02) val_years = np.ones(23) * 6.512201157564418e9 res = disc_rate.net_present_value(2018, 2040, val_years) - self.assertEqual(res, 1.215049630691397e+11) + self.assertEqual(res, 1.215049630691397e11) def test_net_present_value_wrong_pass(self): """Test net_present_value wrong time range.""" - disc_rate = DiscRates( - years=np.arange(2000, 2050), - rates=np.arange(50) * 0.02 - ) + disc_rate = DiscRates(years=np.arange(2000, 2050), rates=np.arange(50) * 0.02) val_years = np.ones(11) * 6.512201157564418e9 with self.assertRaises(ValueError): disc_rate.net_present_value(2050, 2060, val_years) @@ -167,12 +160,12 @@ def test_demo_file_pass(self): # Check results n_rates = 51 - self.assertIn('int', str(disc_rate.years.dtype)) + self.assertIn("int", str(disc_rate.years.dtype)) self.assertEqual(disc_rate.years.shape, (n_rates,)) self.assertEqual(disc_rate.years[0], 2000) self.assertEqual(disc_rate.years[n_rates - 1], 2050) - self.assertIn('float', str(disc_rate.rates.dtype)) + self.assertIn("float", str(disc_rate.rates.dtype)) self.assertEqual(disc_rate.rates.shape, (n_rates,)) self.assertEqual(disc_rate.rates.min(), 0.02) self.assertEqual(disc_rate.rates.max(), 0.02) @@ -184,12 +177,12 @@ def test_template_file_pass(self): # Check results n_rates = 102 - self.assertIn('int', str(disc_rate.years.dtype)) + self.assertIn("int", str(disc_rate.years.dtype)) self.assertEqual(disc_rate.years.shape, (n_rates,)) self.assertEqual(disc_rate.years[0], 2000) self.assertEqual(disc_rate.years[n_rates - 1], 2101) - self.assertIn('float', str(disc_rate.rates.dtype)) + self.assertIn("float", str(disc_rate.rates.dtype)) self.assertEqual(disc_rate.rates.shape, (n_rates,)) self.assertEqual(disc_rate.rates.min(), 0.02) self.assertEqual(disc_rate.rates.max(), 0.02) @@ -207,12 +200,12 @@ def test_demo_file_pass(self): # Check results n_rates = 51 - self.assertIn('int', str(disc_rate.years.dtype)) + self.assertIn("int", str(disc_rate.years.dtype)) self.assertEqual(len(disc_rate.years), n_rates) self.assertEqual(disc_rate.years[0], 2000) self.assertEqual(disc_rate.years[n_rates - 1], 2050) - self.assertIn('float', str(disc_rate.rates.dtype)) + self.assertIn("float", str(disc_rate.rates.dtype)) self.assertEqual(len(disc_rate.rates), n_rates) self.assertEqual(disc_rate.rates.min(), 0.02) self.assertEqual(disc_rate.rates.max(), 0.02) @@ -236,7 +229,7 @@ def test_write_read_excel_pass(self): rates = np.ones(years.size) * 0.03 disc_rate = DiscRates(years=years, rates=rates) - file_name = self.tempdir.joinpath('test_disc.xlsx') + file_name = self.tempdir.joinpath("test_disc.xlsx") disc_rate.write_excel(file_name) disc_read = DiscRates.from_excel(file_name) @@ -250,7 +243,7 @@ def test_write_read_csv_pass(self): rates = np.ones(years.size) * 0.03 disc_rate = DiscRates(years=years, rates=rates) - file_name = self.tempdir.joinpath('test_disc.csv') + file_name = self.tempdir.joinpath("test_disc.csv") disc_rate.write_csv(file_name) disc_read = DiscRates.from_csv(file_name) diff --git a/climada/entity/entity_def.py b/climada/entity/entity_def.py index 542ca2992..d58af9efe 100755 --- a/climada/entity/entity_def.py +++ b/climada/entity/entity_def.py @@ -19,19 +19,21 @@ Define Entity Class. """ -__all__ = ['Entity'] +__all__ = ["Entity"] import logging from typing import Optional + import pandas as pd -from climada.entity.impact_funcs.impact_func_set import ImpactFuncSet from climada.entity.disc_rates.base import DiscRates -from climada.entity.measures.measure_set import MeasureSet from climada.entity.exposures.base import Exposures +from climada.entity.impact_funcs.impact_func_set import ImpactFuncSet +from climada.entity.measures.measure_set import MeasureSet LOGGER = logging.getLogger(__name__) + class Entity: """Collects exposures, impact functions, measures and discount rates. Default values set when empty constructor. @@ -55,7 +57,7 @@ def __init__( exposures: Optional[Exposures] = None, disc_rates: Optional[DiscRates] = None, impact_func_set: Optional[ImpactFuncSet] = None, - measure_set: Optional[MeasureSet] = None + measure_set: Optional[MeasureSet] = None, ): """ Initialize entity @@ -73,7 +75,9 @@ def __init__( """ self.exposures = Exposures() if exposures is None else exposures self.disc_rates = DiscRates() if disc_rates is None else disc_rates - self.impact_funcs = ImpactFuncSet() if impact_func_set is None else impact_func_set + self.impact_funcs = ( + ImpactFuncSet() if impact_func_set is None else impact_func_set + ) self.measures = MeasureSet() if measure_set is None else measure_set @classmethod @@ -100,8 +104,9 @@ def from_mat(cls, file_name): def read_mat(self, *args, **kwargs): """This function is deprecated, use Entity.from_mat instead.""" - LOGGER.warning("The use of Entity.read_mat is deprecated." - "Use Entity.from_mat instead.") + LOGGER.warning( + "The use of Entity.read_mat is deprecated." "Use Entity.from_mat instead." + ) self.__dict__ = Entity.from_mat(*args, **kwargs).__dict__ @classmethod @@ -138,8 +143,10 @@ def from_excel(cls, file_name): def read_excel(self, *args, **kwargs): """This function is deprecated, use Entity.from_excel instead.""" - LOGGER.warning("The use of Entity.read_excel is deprecated." - " Use Entity.from_excel instead.") + LOGGER.warning( + "The use of Entity.read_excel is deprecated." + " Use Entity.from_excel instead." + ) self.__dict__ = Entity.from_excel(*args, **kwargs).__dict__ def write_excel(self, file_name): diff --git a/climada/entity/exposures/__init__.py b/climada/entity/exposures/__init__.py index 7e78173ec..509d0f00d 100755 --- a/climada/entity/exposures/__init__.py +++ b/climada/entity/exposures/__init__.py @@ -18,6 +18,6 @@ init exposures """ + from .base import * from .litpop import * - diff --git a/climada/entity/exposures/base.py b/climada/entity/exposures/base.py index 645127f7c..5087a237f 100644 --- a/climada/entity/exposures/base.py +++ b/climada/entity/exposures/base.py @@ -19,65 +19,68 @@ Define Exposures class. """ -__all__ = ['Exposures', 'add_sea', 'INDICATOR_IMPF', 'INDICATOR_CENTR'] +__all__ = ["Exposures", "add_sea", "INDICATOR_IMPF", "INDICATOR_CENTR"] -import logging import copy -from pathlib import Path +import logging import warnings +from pathlib import Path +import cartopy.crs as ccrs +import contextily as ctx +import matplotlib.pyplot as plt import numpy as np import pandas as pd -import matplotlib.pyplot as plt -from mpl_toolkits.axes_grid1 import make_axes_locatable -from geopandas import GeoDataFrame import rasterio +from geopandas import GeoDataFrame +from mpl_toolkits.axes_grid1 import make_axes_locatable from rasterio.warp import Resampling -import contextily as ctx -import cartopy.crs as ccrs -from climada.hazard import Hazard -import climada.util.hdf5_handler as u_hdf5 -from climada.util.constants import ONE_LAT_KM, DEF_CRS, CMAP_RASTER import climada.util.coordinates as u_coord +import climada.util.hdf5_handler as u_hdf5 import climada.util.plot as u_plot from climada import CONFIG +from climada.hazard import Hazard +from climada.util.constants import CMAP_RASTER, DEF_CRS, ONE_LAT_KM LOGGER = logging.getLogger(__name__) -INDICATOR_IMPF_OLD = 'if_' +INDICATOR_IMPF_OLD = "if_" """Previously used name of the column containing the impact functions id of specified hazard""" -INDICATOR_IMPF = 'impf_' +INDICATOR_IMPF = "impf_" """Name of the column containing the impact functions id of specified hazard""" -INDICATOR_CENTR = 'centr_' +INDICATOR_CENTR = "centr_" """Name of the column containing the centroids id of specified hazard""" DEF_REF_YEAR = CONFIG.exposures.def_ref_year.int() """Default reference year""" -DEF_VALUE_UNIT = 'USD' +DEF_VALUE_UNIT = "USD" """Default value unit""" -DEF_VAR_MAT = {'sup_field_name': 'entity', - 'field_name': 'assets', - 'var_name': {'lat': 'lat', - 'lon': 'lon', - 'val': 'Value', - 'ded': 'Deductible', - 'cov': 'Cover', - 'impf': 'DamageFunID', - 'cat': 'Category_ID', - 'reg': 'Region_ID', - 'uni': 'Value_unit', - 'ass': 'centroid_index', - 'ref': 'reference_year' - } - } +DEF_VAR_MAT = { + "sup_field_name": "entity", + "field_name": "assets", + "var_name": { + "lat": "lat", + "lon": "lon", + "val": "Value", + "ded": "Deductible", + "cov": "Cover", + "impf": "DamageFunID", + "cat": "Category_ID", + "reg": "Region_ID", + "uni": "Value_unit", + "ass": "centroid_index", + "ref": "reference_year", + }, +} """MATLAB variable names""" -class Exposures(): + +class Exposures: """geopandas GeoDataFrame with metada and columns (pd.Series) defined in Attributes. @@ -120,29 +123,44 @@ class Exposures(): TC. There might be different hazards defined: centr_TC, centr_FL, ... Computed in method assign_centroids(). """ - _metadata = ['description', 'ref_year', 'value_unit', 'meta'] - vars_oblig = ['value', 'latitude', 'longitude'] + _metadata = ["description", "ref_year", "value_unit", "meta"] + + vars_oblig = ["value", "latitude", "longitude"] """Name of the variables needed to compute the impact.""" vars_def = [INDICATOR_IMPF, INDICATOR_IMPF_OLD] """Name of variables that can be computed.""" - vars_opt = [INDICATOR_CENTR, 'deductible', 'cover', 'category_id', - 'region_id', 'geometry'] + vars_opt = [ + INDICATOR_CENTR, + "deductible", + "cover", + "category_id", + "region_id", + "geometry", + ] """Name of the variables that aren't need to compute the impact.""" @property def crs(self): """Coordinate Reference System, refers to the crs attribute of the inherent GeoDataFrame""" try: - return self.gdf.geometry.crs or self.meta.get('crs') + return self.gdf.geometry.crs or self.meta.get("crs") except AttributeError: # i.e., no geometry, crs is assumed to be a property # In case of gdf without geometry, empty or before set_geometry_points was called - return self.meta.get('crs') + return self.meta.get("crs") - def __init__(self, *args, meta=None, description=None, ref_year=DEF_REF_YEAR, - value_unit=DEF_VALUE_UNIT, crs=None, **kwargs): + def __init__( + self, + *args, + meta=None, + description=None, + ref_year=DEF_REF_YEAR, + value_unit=DEF_VALUE_UNIT, + crs=None, + **kwargs, + ): """Creates an Exposures object from a GeoDataFrame Parameters @@ -167,10 +185,17 @@ def __init__(self, *args, meta=None, description=None, ref_year=DEF_REF_YEAR, self.meta = {} if meta is None else meta if not isinstance(self.meta, dict): raise ValueError("meta must be a dictionary") - self.description = self.meta.get('description') if description is None else description - self.ref_year = self.meta.get('ref_year', DEF_REF_YEAR) if ref_year is None else ref_year - self.value_unit = (self.meta.get('value_unit', DEF_VALUE_UNIT) - if value_unit is None else value_unit) + self.description = ( + self.meta.get("description") if description is None else description + ) + self.ref_year = ( + self.meta.get("ref_year", DEF_REF_YEAR) if ref_year is None else ref_year + ) + self.value_unit = ( + self.meta.get("value_unit", DEF_VALUE_UNIT) + if value_unit is None + else value_unit + ) # remaining generic attributes from derived classes for mda in type(self)._metadata: @@ -183,7 +208,7 @@ def __init__(self, *args, meta=None, description=None, ref_year=DEF_REF_YEAR, setattr(self, mda, None) # crs (property) and geometry - data = args[0] if args else kwargs.get('data', {}) + data = args[0] if args else kwargs.get("data", {}) try: data_crs = data.geometry.crs except AttributeError: @@ -191,34 +216,48 @@ def __init__(self, *args, meta=None, description=None, ref_year=DEF_REF_YEAR, if data_crs and data.crs and not u_coord.equal_crs(data_crs, data.crs): raise ValueError("Inconsistent crs definition in data and data.geometry") - crs = (crs if crs is not None - else self.meta['crs'] if 'crs' in self.meta - else data_crs if data_crs - else None) - if 'crs' in self.meta and not u_coord.equal_crs(self.meta['crs'], crs): - raise ValueError("Inconsistent CRS definition, crs and meta arguments don't match") + crs = ( + crs + if crs is not None + else ( + self.meta["crs"] + if "crs" in self.meta + else data_crs if data_crs else None + ) + ) + if "crs" in self.meta and not u_coord.equal_crs(self.meta["crs"], crs): + raise ValueError( + "Inconsistent CRS definition, crs and meta arguments don't match" + ) if data_crs and not u_coord.equal_crs(data_crs, crs): - raise ValueError("Inconsistent CRS definition, data doesn't match meta or crs argument") + raise ValueError( + "Inconsistent CRS definition, data doesn't match meta or crs argument" + ) if not crs: crs = DEF_CRS - geometry = kwargs.get('geometry') + geometry = kwargs.get("geometry") if geometry and isinstance(geometry, str): - raise ValueError("Exposures is not able to handle customized 'geometry' column names.") + raise ValueError( + "Exposures is not able to handle customized 'geometry' column names." + ) # make the data frame self.set_gdf(GeoDataFrame(*args, **kwargs), crs=crs) def __str__(self): - return '\n'.join( - [f"{md}: {self.__dict__[md]}" for md in type(self)._metadata] + - [f"crs: {self.crs}", "data:", str(self.gdf)] + return "\n".join( + [f"{md}: {self.__dict__[md]}" for md in type(self)._metadata] + + [f"crs: {self.crs}", "data:", str(self.gdf)] ) def _access_item(self, *args): - raise TypeError("Since CLIMADA 2.0, Exposures objects are not subscriptable. Data " - "fields of Exposures objects are accessed using the `gdf` attribute. " - "For example, `expo['value']` is replaced by `expo.gdf['value']`.") + raise TypeError( + "Since CLIMADA 2.0, Exposures objects are not subscriptable. Data " + "fields of Exposures objects are accessed using the `gdf` attribute. " + "For example, `expo['value']` is replaced by `expo.gdf['value']`." + ) + __getitem__ = _access_item __setitem__ = _access_item __delitem__ = _access_item @@ -236,7 +275,9 @@ def check(self): raise ValueError(f"{var} missing in gdf") # computable columns except impf_* - for var in sorted(set(self.vars_def).difference([INDICATOR_IMPF, INDICATOR_IMPF_OLD])): + for var in sorted( + set(self.vars_def).difference([INDICATOR_IMPF, INDICATOR_IMPF_OLD]) + ): if not var in self.gdf.columns: LOGGER.info("%s not set.", var) @@ -248,9 +289,10 @@ def check(self): default_impf_present = True if not default_impf_present and not [ - col for col in self.gdf.columns - if col.startswith(INDICATOR_IMPF) or col.startswith(INDICATOR_IMPF_OLD) - ]: + col + for col in self.gdf.columns + if col.startswith(INDICATOR_IMPF) or col.startswith(INDICATOR_IMPF_OLD) + ]: LOGGER.info("Setting %s to default impact functions ids 1.", INDICATOR_IMPF) self.gdf[INDICATOR_IMPF] = 1 @@ -267,16 +309,22 @@ def check(self): LOGGER.info("%s not set.", INDICATOR_CENTR) # check if CRS is consistent - if self.crs != self.meta.get('crs'): - raise ValueError(f"Inconsistent CRS definition, gdf ({self.crs}) attribute doesn't " - f"match meta ({self.meta.get('crs')}) attribute.") + if self.crs != self.meta.get("crs"): + raise ValueError( + f"Inconsistent CRS definition, gdf ({self.crs}) attribute doesn't " + f"match meta ({self.meta.get('crs')}) attribute." + ) # check whether geometry corresponds to lat/lon try: - if (self.gdf.geometry.values[0].x != self.gdf['longitude'].values[0] or - self.gdf.geometry.values[0].y != self.gdf['latitude'].values[0]): - raise ValueError("Geometry values do not correspond to latitude and" + - " longitude. Use set_geometry_points() or set_lat_lon().") + if ( + self.gdf.geometry.values[0].x != self.gdf["longitude"].values[0] + or self.gdf.geometry.values[0].y != self.gdf["latitude"].values[0] + ): + raise ValueError( + "Geometry values do not correspond to latitude and" + + " longitude. Use set_geometry_points() or set_lat_lon()." + ) except AttributeError: # no geometry column pass @@ -291,21 +339,21 @@ def set_crs(self, crs=None): if the original value is None it will be set to the default CRS. """ # clear the meta dictionary entry - if 'crs' in self.meta: - old_crs = self.meta.pop('crs') + if "crs" in self.meta: + old_crs = self.meta.pop("crs") crs = crs if crs else self.crs if self.crs else DEF_CRS # adjust the dataframe - if 'geometry' in self.gdf.columns: + if "geometry" in self.gdf.columns: try: self.gdf.set_crs(crs, inplace=True) except ValueError: # restore popped crs and leave - self.meta['crs'] = old_crs + self.meta["crs"] = old_crs raise # store the value - self.meta['crs'] = crs + self.meta["crs"] = crs - def set_gdf(self, gdf:GeoDataFrame, crs=None): + def set_gdf(self, gdf: GeoDataFrame, crs=None): """Set the `gdf` GeoDataFrame and update the CRS Parameters @@ -323,7 +371,7 @@ def set_gdf(self, gdf:GeoDataFrame, crs=None): # update the coordinate reference system self.set_crs(crs) - def get_impf_column(self, haz_type=''): + def get_impf_column(self, haz_type=""): """Find the best matching column name in the exposures dataframe for a given hazard type, Parameters @@ -350,24 +398,37 @@ def get_impf_column(self, haz_type=''): if INDICATOR_IMPF + haz_type in self.gdf.columns: return INDICATOR_IMPF + haz_type if INDICATOR_IMPF_OLD + haz_type in self.gdf.columns: - LOGGER.info("Impact function column name 'if_%s' is not according to current" - " naming conventions. It's suggested to use 'impf_%s' instead.", - haz_type, haz_type) + LOGGER.info( + "Impact function column name 'if_%s' is not according to current" + " naming conventions. It's suggested to use 'impf_%s' instead.", + haz_type, + haz_type, + ) return INDICATOR_IMPF_OLD + haz_type if INDICATOR_IMPF in self.gdf.columns: - LOGGER.info("No specific impact function column found for hazard %s." - " Using the anonymous 'impf_' column.", haz_type) + LOGGER.info( + "No specific impact function column found for hazard %s." + " Using the anonymous 'impf_' column.", + haz_type, + ) return INDICATOR_IMPF if INDICATOR_IMPF_OLD in self.gdf.columns: - LOGGER.info("No specific impact function column found for hazard %s. Using the" - " anonymous 'if_' column, which is not according to current naming" - " conventions. It's suggested to use 'impf_' instead.", haz_type) + LOGGER.info( + "No specific impact function column found for hazard %s. Using the" + " anonymous 'if_' column, which is not according to current naming" + " conventions. It's suggested to use 'impf_' instead.", + haz_type, + ) return INDICATOR_IMPF_OLD raise ValueError(f"Missing exposures impact functions {INDICATOR_IMPF}.") - def assign_centroids(self, hazard, distance='euclidean', - threshold=u_coord.NEAREST_NEIGHBOR_THRESHOLD, - overwrite=True): + def assign_centroids( + self, + hazard, + distance="euclidean", + threshold=u_coord.NEAREST_NEIGHBOR_THRESHOLD, + overwrite=True, + ): """Assign for each exposure coordinate closest hazard coordinate. The Exposures ``gdf`` will be altered by this method. It will have an additional (or modified) column named ``centr_[hazard.HAZ_TYPE]`` after the call. @@ -420,25 +481,28 @@ def assign_centroids(self, hazard, distance='euclidean', haz_type = hazard.haz_type centr_haz = INDICATOR_CENTR + haz_type if centr_haz in self.gdf: - LOGGER.info('Exposures matching centroids already found for %s', haz_type) + LOGGER.info("Exposures matching centroids already found for %s", haz_type) if overwrite: - LOGGER.info('Existing centroids will be overwritten for %s', haz_type) + LOGGER.info("Existing centroids will be overwritten for %s", haz_type) else: return - LOGGER.info('Matching %s exposures with %s centroids.', - str(self.gdf.shape[0]), str(hazard.centroids.size)) + LOGGER.info( + "Matching %s exposures with %s centroids.", + str(self.gdf.shape[0]), + str(hazard.centroids.size), + ) if not u_coord.equal_crs(self.crs, hazard.centroids.crs): - raise ValueError('Set hazard and exposure to same CRS first!') + raise ValueError("Set hazard and exposure to same CRS first!") # Note: equal_crs is tested here, rather than within match_centroids(), # because exp.gdf.crs may not be defined, but exp.crs must be defined. - assigned_centr = u_coord.match_centroids(self.gdf, hazard.centroids, - distance=distance, threshold=threshold) + assigned_centr = u_coord.match_centroids( + self.gdf, hazard.centroids, distance=distance, threshold=threshold + ) self.gdf[centr_haz] = assigned_centr - def set_geometry_points(self, scheduler=None): """Set geometry attribute of GeoDataFrame with Points from latitude and longitude attributes. @@ -453,20 +517,32 @@ def set_geometry_points(self, scheduler=None): def set_lat_lon(self): """Set latitude and longitude attributes from geometry attribute.""" - LOGGER.info('Setting latitude and longitude attributes.') - self.gdf['latitude'] = self.gdf.geometry[:].y - self.gdf['longitude'] = self.gdf.geometry[:].x + LOGGER.info("Setting latitude and longitude attributes.") + self.gdf["latitude"] = self.gdf.geometry[:].y + self.gdf["longitude"] = self.gdf.geometry[:].x def set_from_raster(self, *args, **kwargs): """This function is deprecated, use Exposures.from_raster instead.""" - LOGGER.warning("The use of Exposures.set_from_raster is deprecated." - "Use Exposures.from_raster instead.") + LOGGER.warning( + "The use of Exposures.set_from_raster is deprecated." + "Use Exposures.from_raster instead." + ) self.__dict__ = Exposures.from_raster(*args, **kwargs).__dict__ @classmethod - def from_raster(cls, file_name, band=1, src_crs=None, window=None, - geometry=None, dst_crs=None, transform=None, - width=None, height=None, resampling=Resampling.nearest): + def from_raster( + cls, + file_name, + band=1, + src_crs=None, + window=None, + geometry=None, + dst_crs=None, + transform=None, + width=None, + height=None, + resampling=Resampling.nearest, + ): """Read raster data and set latitude, longitude, value and meta Parameters @@ -498,28 +574,47 @@ def from_raster(cls, file_name, band=1, src_crs=None, window=None, -------- Exposures """ - meta, value = u_coord.read_raster(file_name, [band], src_crs, window, - geometry, dst_crs, transform, width, - height, resampling) - ulx, xres, _, uly, _, yres = meta['transform'].to_gdal() - lrx = ulx + meta['width'] * xres - lry = uly + meta['height'] * yres - x_grid, y_grid = np.meshgrid(np.arange(ulx + xres / 2, lrx, xres), - np.arange(uly + yres / 2, lry, yres)) + meta, value = u_coord.read_raster( + file_name, + [band], + src_crs, + window, + geometry, + dst_crs, + transform, + width, + height, + resampling, + ) + ulx, xres, _, uly, _, yres = meta["transform"].to_gdal() + lrx = ulx + meta["width"] * xres + lry = uly + meta["height"] * yres + x_grid, y_grid = np.meshgrid( + np.arange(ulx + xres / 2, lrx, xres), np.arange(uly + yres / 2, lry, yres) + ) return cls( { - 'longitude': x_grid.flatten(), - 'latitude': y_grid.flatten(), - 'value': value.reshape(-1), + "longitude": x_grid.flatten(), + "latitude": y_grid.flatten(), + "value": value.reshape(-1), }, meta=meta, - crs=meta['crs'], + crs=meta["crs"], ) - - def plot_scatter(self, mask=None, ignore_zero=False, pop_name=True, - buffer=0.0, extend='neither', axis=None, figsize=(9, 13), - adapt_fontsize=True, title=None, **kwargs): + def plot_scatter( + self, + mask=None, + ignore_zero=False, + pop_name=True, + buffer=0.0, + extend="neither", + axis=None, + figsize=(9, 13), + adapt_fontsize=True, + title=None, + **kwargs, + ): """Plot exposures geometry's value sum scattered over Earth's map. The plot will we projected according to the current crs. @@ -560,28 +655,45 @@ def plot_scatter(self, mask=None, ignore_zero=False, pop_name=True, if mask is None: mask = np.ones((self.gdf.shape[0],), dtype=bool) if ignore_zero: - pos_vals = self.gdf['value'][mask].values > 0 + pos_vals = self.gdf["value"][mask].values > 0 else: - pos_vals = np.ones((self.gdf['value'][mask].values.size,), dtype=bool) - value = self.gdf['value'][mask][pos_vals].values - coord = np.stack([self.gdf['latitude'][mask][pos_vals].values, - self.gdf['longitude'][mask][pos_vals].values], axis=1) - return u_plot.geo_scatter_from_array(array_sub=value, - geo_coord=coord, - var_name=f'Value ({self.value_unit})', - title=title, - pop_name=pop_name, - buffer=buffer, - extend=extend, - proj=crs_epsg, - axes=axis, - figsize=figsize, - adapt_fontsize=adapt_fontsize, - **kwargs) - - def plot_hexbin(self, mask=None, ignore_zero=False, pop_name=True, - buffer=0.0, extend='neither', axis=None, figsize=(9, 13), - adapt_fontsize=True, title=None, **kwargs): + pos_vals = np.ones((self.gdf["value"][mask].values.size,), dtype=bool) + value = self.gdf["value"][mask][pos_vals].values + coord = np.stack( + [ + self.gdf["latitude"][mask][pos_vals].values, + self.gdf["longitude"][mask][pos_vals].values, + ], + axis=1, + ) + return u_plot.geo_scatter_from_array( + array_sub=value, + geo_coord=coord, + var_name=f"Value ({self.value_unit})", + title=title, + pop_name=pop_name, + buffer=buffer, + extend=extend, + proj=crs_epsg, + axes=axis, + figsize=figsize, + adapt_fontsize=adapt_fontsize, + **kwargs, + ) + + def plot_hexbin( + self, + mask=None, + ignore_zero=False, + pop_name=True, + buffer=0.0, + extend="neither", + axis=None, + figsize=(9, 13), + adapt_fontsize=True, + title=None, + **kwargs, + ): """Plot exposures geometry's value sum binned over Earth's map. An other function for the bins can be set through the key reduce_C_function. The plot will we projected according to the current crs. @@ -624,34 +736,51 @@ def plot_hexbin(self, mask=None, ignore_zero=False, pop_name=True, crs_epsg, _ = u_plot.get_transformation(self.crs) if title is None: title = self.description or "" - if 'reduce_C_function' not in kwargs: - kwargs['reduce_C_function'] = np.sum + if "reduce_C_function" not in kwargs: + kwargs["reduce_C_function"] = np.sum if mask is None: mask = np.ones((self.gdf.shape[0],), dtype=bool) if ignore_zero: - pos_vals = self.gdf['value'][mask].values > 0 + pos_vals = self.gdf["value"][mask].values > 0 else: - pos_vals = np.ones((self.gdf['value'][mask].values.size,), dtype=bool) - value = self.gdf['value'][mask][pos_vals].values - coord = np.stack([self.gdf['latitude'][mask][pos_vals].values, - self.gdf['longitude'][mask][pos_vals].values], axis=1) - return u_plot.geo_bin_from_array(array_sub=value, - geo_coord=coord, - var_name=f'Value ({self.value_unit})', - title=title, - pop_name=pop_name, - buffer=buffer, - extend=extend, - proj=crs_epsg, - axes=axis, - figsize=figsize, - adapt_fontsize=adapt_fontsize, - **kwargs) - - def plot_raster(self, res=None, raster_res=None, save_tiff=None, - raster_f=lambda x: np.log10((np.fmax(x + 1, 1))), - label='value (log10)', scheduler=None, axis=None, - figsize=(9, 13), fill=True, adapt_fontsize=True, **kwargs): + pos_vals = np.ones((self.gdf["value"][mask].values.size,), dtype=bool) + value = self.gdf["value"][mask][pos_vals].values + coord = np.stack( + [ + self.gdf["latitude"][mask][pos_vals].values, + self.gdf["longitude"][mask][pos_vals].values, + ], + axis=1, + ) + return u_plot.geo_bin_from_array( + array_sub=value, + geo_coord=coord, + var_name=f"Value ({self.value_unit})", + title=title, + pop_name=pop_name, + buffer=buffer, + extend=extend, + proj=crs_epsg, + axes=axis, + figsize=figsize, + adapt_fontsize=adapt_fontsize, + **kwargs, + ) + + def plot_raster( + self, + res=None, + raster_res=None, + save_tiff=None, + raster_f=lambda x: np.log10((np.fmax(x + 1, 1))), + label="value (log10)", + scheduler=None, + axis=None, + figsize=(9, 13), + fill=True, + adapt_fontsize=True, + **kwargs, + ): """Generate raster from points geometry and plot it using log10 scale `np.log10((np.fmax(raster+1, 1)))`. @@ -691,23 +820,35 @@ def plot_raster(self, res=None, raster_res=None, save_tiff=None, ------- matplotlib.figure.Figure, cartopy.mpl.geoaxes.GeoAxesSubplot """ - if self.meta and self.meta.get('height', 0) * self.meta.get('height', 0) == len(self.gdf): - raster = self.gdf['value'].values.reshape((self.meta['height'], - self.meta['width'])) + if self.meta and self.meta.get("height", 0) * self.meta.get("height", 0) == len( + self.gdf + ): + raster = self.gdf["value"].values.reshape( + (self.meta["height"], self.meta["width"]) + ) # check raster starts by upper left corner - if self.gdf['latitude'].values[0] < self.gdf['latitude'].values[-1]: + if self.gdf["latitude"].values[0] < self.gdf["latitude"].values[-1]: raster = np.flip(raster, axis=0) - if self.gdf['longitude'].values[0] > self.gdf['longitude'].values[-1]: - raise ValueError('Points are not ordered according to meta raster.') + if self.gdf["longitude"].values[0] > self.gdf["longitude"].values[-1]: + raise ValueError("Points are not ordered according to meta raster.") else: - raster, meta = u_coord.points_to_raster(self.gdf, ['value'], res, raster_res, scheduler) - raster = raster.reshape((meta['height'], meta['width'])) + raster, meta = u_coord.points_to_raster( + self.gdf, ["value"], res, raster_res, scheduler + ) + raster = raster.reshape((meta["height"], meta["width"])) # save tiff if save_tiff is not None: - with rasterio.open(save_tiff, 'w', driver='GTiff', - height=meta['height'], width=meta['width'], count=1, - dtype=np.float32, crs=self.crs, transform=meta['transform'] - ) as ras_tiff: + with rasterio.open( + save_tiff, + "w", + driver="GTiff", + height=meta["height"], + width=meta["width"], + count=1, + dtype=np.float32, + crs=self.crs, + transform=meta["transform"], + ) as ras_tiff: ras_tiff.write(raster.astype(np.float32), 1) # make plot proj_data, _ = u_plot.get_transformation(self.crs) @@ -715,28 +856,40 @@ def plot_raster(self, res=None, raster_res=None, save_tiff=None, if isinstance(proj_data, ccrs.PlateCarree): # use different projections for plot and data to shift the central lon in the plot xmin, ymin, xmax, ymax = u_coord.latlon_bounds( - self.gdf['latitude'].values, self.gdf['longitude'].values) + self.gdf["latitude"].values, self.gdf["longitude"].values + ) proj_plot = ccrs.PlateCarree(central_longitude=0.5 * (xmin + xmax)) else: - xmin, ymin, xmax, ymax = (self.gdf['longitude'].min(), self.gdf['latitude'].min(), - self.gdf['longitude'].max(), self.gdf['latitude'].max()) + xmin, ymin, xmax, ymax = ( + self.gdf["longitude"].min(), + self.gdf["latitude"].min(), + self.gdf["longitude"].max(), + self.gdf["latitude"].max(), + ) if not axis: - _, axis, fontsize = u_plot.make_map(proj=proj_plot, figsize=figsize, - adapt_fontsize=adapt_fontsize) + _, axis, fontsize = u_plot.make_map( + proj=proj_plot, figsize=figsize, adapt_fontsize=adapt_fontsize + ) else: fontsize = None - cbar_ax = make_axes_locatable(axis).append_axes('right', size="6.5%", - pad=0.1, axes_class=plt.Axes) + cbar_ax = make_axes_locatable(axis).append_axes( + "right", size="6.5%", pad=0.1, axes_class=plt.Axes + ) axis.set_extent((xmin, xmax, ymin, ymax), crs=proj_data) u_plot.add_shapes(axis) if not fill: raster = np.where(raster == 0, np.nan, raster) raster_f = lambda x: np.log10((np.maximum(x + 1, 1))) - if 'cmap' not in kwargs: - kwargs['cmap'] = CMAP_RASTER - imag = axis.imshow(raster_f(raster), **kwargs, origin='upper', - extent=(xmin, xmax, ymin, ymax), transform=proj_data) + if "cmap" not in kwargs: + kwargs["cmap"] = CMAP_RASTER + imag = axis.imshow( + raster_f(raster), + **kwargs, + origin="upper", + extent=(xmin, xmax, ymin, ymax), + transform=proj_data, + ) cbar = plt.colorbar(imag, cax=cbar_ax, label=label) plt.colorbar(imag, cax=cbar_ax, label=label) plt.tight_layout() @@ -748,9 +901,18 @@ def plot_raster(self, res=None, raster_res=None, save_tiff=None, item.set_fontsize(fontsize) return axis - def plot_basemap(self, mask=None, ignore_zero=False, pop_name=True, - buffer=0.0, extend='neither', zoom=10, - url=ctx.providers.CartoDB.Positron, axis=None, **kwargs): + def plot_basemap( + self, + mask=None, + ignore_zero=False, + pop_name=True, + buffer=0.0, + extend="neither", + zoom=10, + url=ctx.providers.CartoDB.Positron, + axis=None, + **kwargs, + ): """Scatter points over satellite image using contextily Parameters @@ -783,13 +945,21 @@ def plot_basemap(self, mask=None, ignore_zero=False, pop_name=True, ------- matplotlib.figure.Figure, cartopy.mpl.geoaxes.GeoAxesSubplot """ - if 'geometry' not in self.gdf: + if "geometry" not in self.gdf: self.set_geometry_points() crs_ori = self.crs self.to_crs(epsg=3857, inplace=True) - axis = self.plot_scatter(mask, ignore_zero, pop_name, buffer, - extend, shapes=False, axis=axis, **kwargs) - ctx.add_basemap(axis, zoom, source=url, origin='upper') + axis = self.plot_scatter( + mask, + ignore_zero, + pop_name, + buffer, + extend, + shapes=False, + axis=axis, + **kwargs, + ) + ctx.add_basemap(axis, zoom, source=url, origin="upper") axis.set_axis_off() self.to_crs(crs_ori, inplace=True) return axis @@ -802,8 +972,8 @@ def write_hdf5(self, file_name): file_name : str (path and) file name to write to. """ - LOGGER.info('Writing %s', file_name) - store = pd.HDFStore(file_name, mode='w') + LOGGER.info("Writing %s", file_name) + store = pd.HDFStore(file_name, mode="w") pandas_df = pd.DataFrame(self.gdf) for col in pandas_df.columns: if str(pandas_df[col].dtype) == "geometry": @@ -813,19 +983,21 @@ def write_hdf5(self, file_name): with warnings.catch_warnings(): warnings.simplefilter("ignore", category=pd.errors.PerformanceWarning) # Write dataframe - store.put('exposures', pandas_df) + store.put("exposures", pandas_df) var_meta = {} for var in type(self)._metadata: var_meta[var] = getattr(self, var) - store.get_storer('exposures').attrs.metadata = var_meta + store.get_storer("exposures").attrs.metadata = var_meta store.close() def read_hdf5(self, *args, **kwargs): """This function is deprecated, use Exposures.from_hdf5 instead.""" - LOGGER.warning("The use of Exposures.read_hdf5 is deprecated." - "Use Exposures.from_hdf5 instead.") + LOGGER.warning( + "The use of Exposures.read_hdf5 is deprecated." + "Use Exposures.from_hdf5 instead." + ) self.__dict__ = Exposures.from_hdf5(*args, **kwargs).__dict__ @classmethod @@ -844,28 +1016,32 @@ def from_hdf5(cls, file_name): ------- Exposures """ - LOGGER.info('Reading %s', file_name) + LOGGER.info("Reading %s", file_name) if not Path(file_name).is_file(): raise FileNotFoundError(str(file_name)) - with pd.HDFStore(file_name, mode='r') as store: - metadata = store.get_storer('exposures').attrs.metadata + with pd.HDFStore(file_name, mode="r") as store: + metadata = store.get_storer("exposures").attrs.metadata # in previous versions of CLIMADA and/or geopandas, the CRS was stored in '_crs'/'crs' - crs = metadata.get('crs', metadata.get('_crs')) - if crs is None and metadata.get('meta'): - crs = metadata['meta'].get('crs') - exp = cls(store['exposures'], crs=crs) + crs = metadata.get("crs", metadata.get("_crs")) + if crs is None and metadata.get("meta"): + crs = metadata["meta"].get("crs") + exp = cls(store["exposures"], crs=crs) for key, val in metadata.items(): - if key in type(exp)._metadata: # pylint: disable=protected-access + if key in type(exp)._metadata: # pylint: disable=protected-access setattr(exp, key, val) - if key == 'tag': # for backwards compatitbility with climada <= 3.x - descriptions = [u_hdf5.to_string(x) for x in getattr(val, 'description', [])] + if key == "tag": # for backwards compatitbility with climada <= 3.x + descriptions = [ + u_hdf5.to_string(x) for x in getattr(val, "description", []) + ] exp.description = "\n".join(descriptions) if descriptions else None return exp def read_mat(self, *args, **kwargs): """This function is deprecated, use Exposures.from_mat instead.""" - LOGGER.warning("The use of Exposures.read_mat is deprecated." - "Use Exposures.from_mat instead.") + LOGGER.warning( + "The use of Exposures.read_mat is deprecated." + "Use Exposures.from_mat instead." + ) self.__dict__ = Exposures.from_mat(*args, **kwargs).__dict__ @classmethod @@ -884,25 +1060,26 @@ def from_mat(cls, file_name, var_names=None): ------- Exposures """ - LOGGER.info('Reading %s', file_name) + LOGGER.info("Reading %s", file_name) if not var_names: var_names = DEF_VAR_MAT data = u_hdf5.read(file_name) try: - data = data[var_names['sup_field_name']] + data = data[var_names["sup_field_name"]] except KeyError: pass try: - data = data[var_names['field_name']] + data = data[var_names["field_name"]] exposures = dict() _read_mat_obligatory(exposures, data, var_names) _read_mat_optional(exposures, data, var_names) except KeyError as var_err: - raise KeyError(f"Variable not in MAT file: {var_names.get('field_name')}")\ - from var_err + raise KeyError( + f"Variable not in MAT file: {var_names.get('field_name')}" + ) from var_err exp = cls(data=exposures) _read_mat_metadata(exp, data, file_name, var_names) @@ -942,7 +1119,7 @@ def to_crs(self, crs=None, epsg=None, inplace=False): if inplace: self.gdf.to_crs(crs, epsg, True) - self.meta['crs'] = crs or f'EPSG:{epsg}' + self.meta["crs"] = crs or f"EPSG:{epsg}" self.set_lat_lon() return None @@ -966,16 +1143,13 @@ def copy(self, deep=True): Exposures """ gdf = self.gdf.copy(deep=deep) - metadata = dict([ - (md, copy.deepcopy(self.__dict__[md])) for md in type(self)._metadata - ]) - metadata['crs'] = self.crs - return type(self)( - gdf, - **metadata + metadata = dict( + [(md, copy.deepcopy(self.__dict__[md])) for md in type(self)._metadata] ) + metadata["crs"] = self.crs + return type(self)(gdf, **metadata) - def write_raster(self, file_name, value_name='value', scheduler=None): + def write_raster(self, file_name, value_name="value", scheduler=None): """Write value data into raster file with GeoTiff format Parameters @@ -983,17 +1157,20 @@ def write_raster(self, file_name, value_name='value', scheduler=None): file_name : str name output file in tif format """ - if self.meta and self.meta['height'] * self.meta['width'] == len(self.gdf): - raster = self.gdf[value_name].values.reshape((self.meta['height'], - self.meta['width'])) + if self.meta and self.meta["height"] * self.meta["width"] == len(self.gdf): + raster = self.gdf[value_name].values.reshape( + (self.meta["height"], self.meta["width"]) + ) # check raster starts by upper left corner - if self.gdf['latitude'].values[0] < self.gdf['latitude'].values[-1]: + if self.gdf["latitude"].values[0] < self.gdf["latitude"].values[-1]: raster = np.flip(raster, axis=0) - if self.gdf['longitude'].values[0] > self.gdf['longitude'].values[-1]: - raise ValueError('Points are not ordered according to meta raster.') + if self.gdf["longitude"].values[0] > self.gdf["longitude"].values[-1]: + raise ValueError("Points are not ordered according to meta raster.") u_coord.write_raster(file_name, raster, self.meta) else: - raster, meta = u_coord.points_to_raster(self.gdf, [value_name], scheduler=scheduler) + raster, meta = u_coord.points_to_raster( + self.gdf, [value_name], scheduler=scheduler + ) u_coord.write_raster(file_name, raster, meta) @staticmethod @@ -1015,12 +1192,10 @@ def concat(exposures_list): exp = Exposures(exp) exp.check() - df_list = [ - ex.gdf if isinstance(ex, Exposures) else ex - for ex in exposures_list - ] + df_list = [ex.gdf if isinstance(ex, Exposures) else ex for ex in exposures_list] crss = [ - ex.crs for ex in exposures_list + ex.crs + for ex in exposures_list if isinstance(ex, (Exposures, GeoDataFrame)) and hasattr(ex, "crs") and ex.crs is not None @@ -1032,9 +1207,9 @@ def concat(exposures_list): else: crs = None - exp.set_gdf(GeoDataFrame( - pd.concat(df_list, ignore_index=True, sort=False) - ), crs=crs) + exp.set_gdf( + GeoDataFrame(pd.concat(df_list, ignore_index=True, sort=False)), crs=crs + ) return exp @@ -1060,11 +1235,10 @@ def centroids_total_value(self, hazard): a centroids is assigned """ - nz_mask = ( - (self.gdf['value'].values > 0) - & (self.gdf[hazard.centr_exp_col].values >= 0) + nz_mask = (self.gdf["value"].values > 0) & ( + self.gdf[hazard.centr_exp_col].values >= 0 ) - return np.sum(self.gdf['value'].values[nz_mask]) + return np.sum(self.gdf["value"].values[nz_mask]) def affected_total_value( self, @@ -1109,7 +1283,7 @@ def affected_total_value( """ self.assign_centroids(hazard=hazard, overwrite=overwrite_assigned_centroids) assigned_centroids = self.gdf[hazard.centr_exp_col] - nz_mask = (self.gdf['value'].values > 0) & (assigned_centroids.values >= 0) + nz_mask = (self.gdf["value"].values > 0) & (assigned_centroids.values >= 0) cents = np.unique(assigned_centroids[nz_mask]) cent_with_inten_above_thres = ( hazard.intensity[:, cents].max(axis=0) > threshold_affected @@ -1117,7 +1291,7 @@ def affected_total_value( above_thres_mask = np.isin( self.gdf[hazard.centr_exp_col].values, cents[cent_with_inten_above_thres] ) - return np.sum(self.gdf['value'].values[above_thres_mask]) + return np.sum(self.gdf["value"].values[above_thres_mask]) def add_sea(exposures, sea_res, scheduler=None): @@ -1140,15 +1314,18 @@ def add_sea(exposures, sea_res, scheduler=None): ------- Exposures """ - LOGGER.info("Adding sea at %s km resolution and %s km distance from coast.", - str(sea_res[1]), str(sea_res[0])) + LOGGER.info( + "Adding sea at %s km resolution and %s km distance from coast.", + str(sea_res[1]), + str(sea_res[0]), + ) sea_res = (sea_res[0] / ONE_LAT_KM, sea_res[1] / ONE_LAT_KM) - min_lat = max(-90, float(exposures.gdf['latitude'].min()) - sea_res[0]) - max_lat = min(90, float(exposures.gdf['latitude'].max()) + sea_res[0]) - min_lon = max(-180, float(exposures.gdf['longitude'].min()) - sea_res[0]) - max_lon = min(180, float(exposures.gdf['longitude'].max()) + sea_res[0]) + min_lat = max(-90, float(exposures.gdf["latitude"].min()) - sea_res[0]) + max_lat = min(90, float(exposures.gdf["latitude"].max()) + sea_res[0]) + min_lon = max(-180, float(exposures.gdf["longitude"].min()) - sea_res[0]) + max_lon = min(180, float(exposures.gdf["longitude"].max()) + sea_res[0]) lat_arr = np.arange(min_lat, max_lat + sea_res[1], sea_res[1]) lon_arr = np.arange(min_lon, max_lon + sea_res[1], sea_res[1]) @@ -1158,17 +1335,20 @@ def add_sea(exposures, sea_res, scheduler=None): on_land = ~u_coord.coord_on_land(lat_mgrid, lon_mgrid) sea_exp_gdf = GeoDataFrame() - sea_exp_gdf['latitude'] = lat_mgrid[on_land] - sea_exp_gdf['longitude'] = lon_mgrid[on_land] - sea_exp_gdf['region_id'] = np.zeros(sea_exp_gdf['latitude'].size, int) - 1 + sea_exp_gdf["latitude"] = lat_mgrid[on_land] + sea_exp_gdf["longitude"] = lon_mgrid[on_land] + sea_exp_gdf["region_id"] = np.zeros(sea_exp_gdf["latitude"].size, int) - 1 - if 'geometry' in exposures.gdf.columns: - u_coord.set_df_geometry_points(sea_exp_gdf, crs=exposures.crs, scheduler=scheduler) + if "geometry" in exposures.gdf.columns: + u_coord.set_df_geometry_points( + sea_exp_gdf, crs=exposures.crs, scheduler=scheduler + ) for var_name in exposures.gdf.columns: - if var_name not in ('latitude', 'longitude', 'region_id', 'geometry'): - sea_exp_gdf[var_name] = np.zeros(sea_exp_gdf['latitude'].size, - exposures.gdf[var_name].dtype) + if var_name not in ("latitude", "longitude", "region_id", "geometry"): + sea_exp_gdf[var_name] = np.zeros( + sea_exp_gdf["latitude"].size, exposures.gdf[var_name].dtype + ) return Exposures( pd.concat([exposures.gdf, sea_exp_gdf], ignore_index=True, sort=False), @@ -1182,41 +1362,46 @@ def add_sea(exposures, sea_res, scheduler=None): def _read_mat_obligatory(exposures, data, var_names): """Fill obligatory variables.""" - exposures['value'] = np.squeeze(data[var_names['var_name']['val']]) + exposures["value"] = np.squeeze(data[var_names["var_name"]["val"]]) - exposures['latitude'] = data[var_names['var_name']['lat']].reshape(-1) - exposures['longitude'] = data[var_names['var_name']['lon']].reshape(-1) + exposures["latitude"] = data[var_names["var_name"]["lat"]].reshape(-1) + exposures["longitude"] = data[var_names["var_name"]["lon"]].reshape(-1) - exposures[INDICATOR_IMPF] = np.squeeze( - data[var_names['var_name']['impf']]).astype(int, copy=False) + exposures[INDICATOR_IMPF] = np.squeeze(data[var_names["var_name"]["impf"]]).astype( + int, copy=False + ) def _read_mat_optional(exposures, data, var_names): """Fill optional parameters.""" try: - exposures['deductible'] = np.squeeze(data[var_names['var_name']['ded']]) + exposures["deductible"] = np.squeeze(data[var_names["var_name"]["ded"]]) except KeyError: pass try: - exposures['cover'] = np.squeeze(data[var_names['var_name']['cov']]) + exposures["cover"] = np.squeeze(data[var_names["var_name"]["cov"]]) except KeyError: pass try: - exposures['category_id'] = \ - np.squeeze(data[var_names['var_name']['cat']]).astype(int, copy=False) + exposures["category_id"] = np.squeeze( + data[var_names["var_name"]["cat"]] + ).astype(int, copy=False) except KeyError: pass try: - exposures['region_id'] = \ - np.squeeze(data[var_names['var_name']['reg']]).astype(int, copy=False) + exposures["region_id"] = np.squeeze(data[var_names["var_name"]["reg"]]).astype( + int, copy=False + ) except KeyError: pass try: - assigned = np.squeeze(data[var_names['var_name']['ass']]).astype(int, copy=False) + assigned = np.squeeze(data[var_names["var_name"]["ass"]]).astype( + int, copy=False + ) if assigned.size > 0: exposures[INDICATOR_CENTR] = assigned except KeyError: @@ -1226,12 +1411,13 @@ def _read_mat_optional(exposures, data, var_names): def _read_mat_metadata(exposures, data, file_name, var_names): """Fill metadata in DataFrame object""" try: - exposures.ref_year = int(np.squeeze(data[var_names['var_name']['ref']])) + exposures.ref_year = int(np.squeeze(data[var_names["var_name"]["ref"]])) except KeyError: exposures.ref_year = DEF_REF_YEAR try: exposures.value_unit = u_hdf5.get_str_from_ref( - file_name, data[var_names['var_name']['uni']][0][0]) + file_name, data[var_names["var_name"]["uni"]][0][0] + ) except KeyError: exposures.value_unit = DEF_VALUE_UNIT diff --git a/climada/entity/exposures/litpop/__init__.py b/climada/entity/exposures/litpop/__init__.py index 322ef3c75..0724ad4b9 100755 --- a/climada/entity/exposures/litpop/__init__.py +++ b/climada/entity/exposures/litpop/__init__.py @@ -18,7 +18,7 @@ init litpop """ -from .litpop import * + from .gpw_population import * +from .litpop import * from .nightlight import * - diff --git a/climada/entity/exposures/litpop/gpw_population.py b/climada/entity/exposures/litpop/gpw_population.py index 51e7d35ae..fbb34b464 100644 --- a/climada/entity/exposures/litpop/gpw_population.py +++ b/climada/entity/exposures/litpop/gpw_population.py @@ -18,19 +18,21 @@ Import data from Global Population of the World (GPW) datasets """ + import logging -import rasterio import numpy as np +import rasterio -from climada.util.constants import SYSTEM_DIR from climada import CONFIG +from climada.util.constants import SYSTEM_DIR LOGGER = logging.getLogger(__name__) -def load_gpw_pop_shape(geometry, reference_year, gpw_version, - data_dir=SYSTEM_DIR, layer=0, verbose=True): +def load_gpw_pop_shape( + geometry, reference_year, gpw_version, data_dir=SYSTEM_DIR, layer=0, verbose=True +): """Read gridded population data from TIFF and crop to given shape(s). Note: A (free) NASA Earthdata login is necessary to download the data. @@ -73,22 +75,29 @@ def load_gpw_pop_shape(geometry, reference_year, gpw_version, """ # check whether GPW input file exists and get file path - file_path = get_gpw_file_path(gpw_version, reference_year, data_dir=data_dir, verbose=verbose) + file_path = get_gpw_file_path( + gpw_version, reference_year, data_dir=data_dir, verbose=verbose + ) # open TIFF and extract cropped data from input file: - with rasterio.open(file_path, 'r') as src: + with rasterio.open(file_path, "r") as src: global_transform = src.transform - pop_data, out_transform = rasterio.mask.mask(src, [geometry], crop=True, nodata=0) + pop_data, out_transform = rasterio.mask.mask( + src, [geometry], crop=True, nodata=0 + ) # extract and update meta data for cropped data and close src: meta = src.meta - meta.update({ - "driver": "GTiff", - "height": pop_data.shape[1], - "width": pop_data.shape[2], - "transform": out_transform, - }) - return pop_data[layer,:,:], meta, global_transform + meta.update( + { + "driver": "GTiff", + "height": pop_data.shape[1], + "width": pop_data.shape[2], + "transform": out_transform, + } + ) + return pop_data[layer, :, :], meta, global_transform + def get_gpw_file_path(gpw_version, reference_year, data_dir=None, verbose=True): """Check available GPW population data versions and year closest to @@ -118,24 +127,36 @@ def get_gpw_file_path(gpw_version, reference_year, data_dir=None, verbose=True): data_dir = SYSTEM_DIR # get years available in GPW data from CONFIG and convert to array: - years_available = np.array([ - year.int() for year in CONFIG.exposures.litpop.gpw_population.years_available.list() - ]) + years_available = np.array( + [ + year.int() + for year in CONFIG.exposures.litpop.gpw_population.years_available.list() + ] + ) # find closest year to reference_year with data available: year = years_available[np.abs(years_available - reference_year).argmin()] if verbose and year != reference_year: - LOGGER.warning('Reference year: %i. Using nearest available year for GPW data: %i', - reference_year, year) + LOGGER.warning( + "Reference year: %i. Using nearest available year for GPW data: %i", + reference_year, + year, + ) # check if file is available for given GPW version, construct GPW file path from CONFIG: # if available, return full path to file: - gpw_dirname = CONFIG.exposures.litpop.gpw_population.dirname_gpw.str() % (gpw_version, year) - gpw_filename = CONFIG.exposures.litpop.gpw_population.filename_gpw.str() % (gpw_version, year) + gpw_dirname = CONFIG.exposures.litpop.gpw_population.dirname_gpw.str() % ( + gpw_version, + year, + ) + gpw_filename = CONFIG.exposures.litpop.gpw_population.filename_gpw.str() % ( + gpw_version, + year, + ) for file_path in [data_dir / gpw_filename, data_dir / gpw_dirname / gpw_filename]: if file_path.is_file(): if verbose: - LOGGER.info('GPW Version v4.%2i', gpw_version) + LOGGER.info("GPW Version v4.%2i", gpw_version) return file_path # if the file was not found, an exception is raised with instructions on how to obtain it @@ -146,7 +167,7 @@ def get_gpw_file_path(gpw_version, reference_year, data_dir=None, verbose=True): f"{gpw_dirname}.zip" ) raise FileNotFoundError( - f'The file {file_path} could not be found. Please download the file first or choose a' - f' different folder. The data can be downloaded from {sedac_browse_url}, e.g.,' - f' {sedac_file_url} (Free NASA Earthdata login required).' + f"The file {file_path} could not be found. Please download the file first or choose a" + f" different folder. The data can be downloaded from {sedac_browse_url}, e.g.," + f" {sedac_file_url} (Free NASA Earthdata login required)." ) diff --git a/climada/entity/exposures/litpop/litpop.py b/climada/entity/exposures/litpop/litpop.py index 7fe2047bd..372e58533 100644 --- a/climada/entity/exposures/litpop/litpop.py +++ b/climada/entity/exposures/litpop/litpop.py @@ -17,28 +17,31 @@ --- Define LitPop class. """ + import logging from pathlib import Path + +import geopandas import numpy as np +import pandas as pd import rasterio -import geopandas -from shapefile import Shape import shapely -import pandas as pd +from shapefile import Shape import climada.util.coordinates as u_coord import climada.util.finance as u_fin - -from climada.entity.exposures.litpop import nightlight as nl_util +from climada import CONFIG +from climada.entity.exposures.base import DEF_REF_YEAR, INDICATOR_IMPF, Exposures from climada.entity.exposures.litpop import gpw_population as pop_util -from climada.entity.exposures.base import Exposures, INDICATOR_IMPF, DEF_REF_YEAR +from climada.entity.exposures.litpop import nightlight as nl_util from climada.util.constants import SYSTEM_DIR -from climada import CONFIG + LOGGER = logging.getLogger(__name__) GPW_VERSION = CONFIG.exposures.litpop.gpw_population.gpw_version.int() """Version of Gridded Population of the World (GPW) input data. Check for updates.""" + class LitPop(Exposures): """ Holds geopandas GeoDataFrame with metada and columns (pd.Series) defined in @@ -65,19 +68,30 @@ class LitPop(Exposures): Version number of GPW population data, e.g. 11 for v4.11. The default is defined in GPW_VERSION. """ - _metadata = Exposures._metadata + ['exponents', 'fin_mode', 'gpw_version'] + + _metadata = Exposures._metadata + ["exponents", "fin_mode", "gpw_version"] def set_countries(self, *args, **kwargs): """This function is deprecated, use LitPop.from_countries instead.""" - LOGGER.warning("The use of LitPop.set_countries is deprecated." - "Use LitPop.from_countries instead.") + LOGGER.warning( + "The use of LitPop.set_countries is deprecated." + "Use LitPop.from_countries instead." + ) self.__dict__ = LitPop.from_countries(*args, **kwargs).__dict__ @classmethod - def from_countries(cls, countries, res_arcsec=30, exponents=(1,1), - fin_mode='pc', total_values=None, admin1_calc=False, - reference_year=DEF_REF_YEAR, gpw_version=GPW_VERSION, - data_dir=SYSTEM_DIR): + def from_countries( + cls, + countries, + res_arcsec=30, + exponents=(1, 1), + fin_mode="pc", + total_values=None, + admin1_calc=False, + reference_year=DEF_REF_YEAR, + gpw_version=GPW_VERSION, + data_dir=SYSTEM_DIR, + ): """Init new LitPop exposure object for a list of countries (admin 0). Sets attributes `ref_year`, `crs`, `value`, `geometry`, `meta`, @@ -138,53 +152,74 @@ def from_countries(cls, countries, res_arcsec=30, exponents=(1,1), LitPop instance with exposure for given countries """ if isinstance(countries, (int, str)): - countries = [countries] # for backward compatibility + countries = [countries] # for backward compatibility - if total_values is None: # init list with total values per countries + if total_values is None: # init list with total values per countries total_values = [None] * len(countries) elif len(total_values) != len(countries): - raise ValueError("'countries' and 'total_values' must be lists of same length") + raise ValueError( + "'countries' and 'total_values' must be lists of same length" + ) # litpop_list is initiated, a list containing one Exposure instance per # country and None for countries that could not be identified: - if admin1_calc: # each admin 1 region is initiated separately, - # with total value share based on subnational GDP share. - # This requires GRP (Gross Regional Product) data in the - # GSDP data folder. - litpop_list = [_calc_admin1_one_country(country, res_arcsec, exponents, - fin_mode, tot_value, reference_year, - gpw_version, data_dir, - ) - for tot_value, country in zip(total_values, countries)] - - else: # else, as default, country is initiated as a whole: + if admin1_calc: # each admin 1 region is initiated separately, + # with total value share based on subnational GDP share. + # This requires GRP (Gross Regional Product) data in the + # GSDP data folder. + litpop_list = [ + _calc_admin1_one_country( + country, + res_arcsec, + exponents, + fin_mode, + tot_value, + reference_year, + gpw_version, + data_dir, + ) + for tot_value, country in zip(total_values, countries) + ] + + else: # else, as default, country is initiated as a whole: # loop over countries: litpop is initiated for each individual polygon # within each country and combined at the end. - litpop_list = \ - [cls._from_country(country, - res_arcsec=res_arcsec, - exponents=exponents, - fin_mode=fin_mode, - total_value=total_values[idc], - reference_year=reference_year, - gpw_version=gpw_version, - data_dir=data_dir) - for idc, country in enumerate(countries)] + litpop_list = [ + cls._from_country( + country, + res_arcsec=res_arcsec, + exponents=exponents, + fin_mode=fin_mode, + total_value=total_values[idc], + reference_year=reference_year, + gpw_version=gpw_version, + data_dir=data_dir, + ) + for idc, country in enumerate(countries) + ] # make lists of countries with Exposure initaited and those ignored: - countries_in = \ - [country for lp, country in zip(litpop_list, countries) if lp is not None] - countries_out = \ - [country for lp, country in zip(litpop_list, countries) if lp is None] + countries_in = [ + country for lp, country in zip(litpop_list, countries) if lp is not None + ] + countries_out = [ + country for lp, country in zip(litpop_list, countries) if lp is None + ] if not countries_in: - raise ValueError('No valid country identified in %s, aborting.' % countries) + raise ValueError("No valid country identified in %s, aborting." % countries) litpop_list = [exp for exp in litpop_list if exp is not None] if countries_out: - LOGGER.warning('Some countries could not be identified and are ignored: ' - '%s. Litpop only initiated for: %s', countries_out, countries_in) + LOGGER.warning( + "Some countries could not be identified and are ignored: " + "%s. Litpop only initiated for: %s", + countries_out, + countries_in, + ) - description = (f'LitPop Exposure for {countries_in} at {res_arcsec} as,' - f' year: {reference_year}, financial mode: {fin_mode},' - f' exp: {exponents}, admin1_calc: {admin1_calc}') + description = ( + f"LitPop Exposure for {countries_in} at {res_arcsec} as," + f" year: {reference_year}, financial mode: {fin_mode}," + f" exp: {exponents}, admin1_calc: {admin1_calc}" + ) exp = cls( data=Exposures.concat(litpop_list).gdf, @@ -194,36 +229,51 @@ def from_countries(cls, countries, res_arcsec=30, exponents=(1,1), exponents=exponents, gpw_version=gpw_version, fin_mode=fin_mode, - description=description + description=description, ) try: rows, cols, ras_trans = u_coord.pts_to_raster_meta( - (exp.gdf['longitude'].min(), exp.gdf['latitude'].min(), - exp.gdf['longitude'].max(), exp.gdf['latitude'].max()), - u_coord.get_resolution(exp.gdf['longitude'], exp.gdf['latitude'])) + ( + exp.gdf["longitude"].min(), + exp.gdf["latitude"].min(), + exp.gdf["longitude"].max(), + exp.gdf["latitude"].max(), + ), + u_coord.get_resolution(exp.gdf["longitude"], exp.gdf["latitude"]), + ) exp.meta = { - 'width': cols, - 'height': rows, - 'crs': exp.crs, - 'transform': ras_trans, + "width": cols, + "height": rows, + "crs": exp.crs, + "transform": ras_trans, } except ValueError: - LOGGER.warning('Could not write attribute meta, because exposure' - ' has only 1 data point') - exp.meta = {'crs': exp.crs} + LOGGER.warning( + "Could not write attribute meta, because exposure" + " has only 1 data point" + ) + exp.meta = {"crs": exp.crs} exp.check() return exp def set_nightlight_intensity(self, *args, **kwargs): """This function is deprecated, use LitPop.from_nightlight_intensity instead.""" - LOGGER.warning("The use of LitPop.set_nightlight_intensity is deprecated." - "Use LitPop.from_nightlight_intensity instead.") + LOGGER.warning( + "The use of LitPop.set_nightlight_intensity is deprecated." + "Use LitPop.from_nightlight_intensity instead." + ) self.__dict__ = LitPop.from_nightlight_intensity(*args, **kwargs).__dict__ @classmethod - def from_nightlight_intensity(cls, countries=None, shape=None, res_arcsec=15, - reference_year=DEF_REF_YEAR, data_dir=SYSTEM_DIR): + def from_nightlight_intensity( + cls, + countries=None, + shape=None, + res_arcsec=15, + reference_year=DEF_REF_YEAR, + data_dir=SYSTEM_DIR, + ): """ Wrapper around `from_countries` / `from_shape`. @@ -258,33 +308,56 @@ def from_nightlight_intensity(cls, countries=None, shape=None, res_arcsec=15, if countries is None and shape is None: raise ValueError("Either `countries` or `shape` required. Aborting.") if countries is not None and shape is not None: - raise ValueError("Not allowed to set both `countries` and `shape`. Aborting.") + raise ValueError( + "Not allowed to set both `countries` and `shape`. Aborting." + ) if countries is not None: - exp = cls.from_countries(countries, res_arcsec=res_arcsec, - exponents=(1,0), fin_mode='none', - reference_year=reference_year, gpw_version=GPW_VERSION, - data_dir=data_dir) + exp = cls.from_countries( + countries, + res_arcsec=res_arcsec, + exponents=(1, 0), + fin_mode="none", + reference_year=reference_year, + gpw_version=GPW_VERSION, + data_dir=data_dir, + ) else: - exp = cls.from_shape(shape, None, res_arcsec=res_arcsec, - exponents=(1,0), value_unit='', - reference_year=reference_year, - gpw_version=GPW_VERSION, data_dir=SYSTEM_DIR) - LOGGER.warning("Note: set_nightlight_intensity sets values to raw nightlight intensity, " - "not to USD. " - "To disaggregate asset value proportionally to nightlights^m, " - "call from_countries or from_shape with exponents=(m,0).") + exp = cls.from_shape( + shape, + None, + res_arcsec=res_arcsec, + exponents=(1, 0), + value_unit="", + reference_year=reference_year, + gpw_version=GPW_VERSION, + data_dir=SYSTEM_DIR, + ) + LOGGER.warning( + "Note: set_nightlight_intensity sets values to raw nightlight intensity, " + "not to USD. " + "To disaggregate asset value proportionally to nightlights^m, " + "call from_countries or from_shape with exponents=(m,0)." + ) return exp def set_population(self, *args, **kwargs): """This function is deprecated, use LitPop.from_population instead.""" - LOGGER.warning("The use of LitPop.set_population is deprecated." - "Use LitPop.from_population instead.") + LOGGER.warning( + "The use of LitPop.set_population is deprecated." + "Use LitPop.from_population instead." + ) self.__dict__ = LitPop.from_population(*args, **kwargs).__dict__ @classmethod - def from_population(cls, countries=None, shape=None, res_arcsec=30, - reference_year=DEF_REF_YEAR, gpw_version=GPW_VERSION, - data_dir=SYSTEM_DIR): + def from_population( + cls, + countries=None, + shape=None, + res_arcsec=30, + reference_year=DEF_REF_YEAR, + gpw_version=GPW_VERSION, + data_dir=SYSTEM_DIR, + ): """ Wrapper around `from_countries` / `from_shape`. @@ -321,28 +394,53 @@ def from_population(cls, countries=None, shape=None, res_arcsec=30, if countries is None and shape is None: raise ValueError("Either `countries` or `shape` required. Aborting.") if countries is not None and shape is not None: - raise ValueError("Not allowed to set both `countries` and `shape`. Aborting.") + raise ValueError( + "Not allowed to set both `countries` and `shape`. Aborting." + ) if countries is not None: - exp = cls.from_countries(countries, res_arcsec=res_arcsec, - exponents=(0,1), fin_mode='pop', - reference_year=reference_year, gpw_version=gpw_version, - data_dir=data_dir) + exp = cls.from_countries( + countries, + res_arcsec=res_arcsec, + exponents=(0, 1), + fin_mode="pop", + reference_year=reference_year, + gpw_version=gpw_version, + data_dir=data_dir, + ) else: - exp = cls.from_shape(shape, None, res_arcsec=res_arcsec, exponents=(0,1), - value_unit='people', reference_year=reference_year, - gpw_version=gpw_version, data_dir=data_dir) + exp = cls.from_shape( + shape, + None, + res_arcsec=res_arcsec, + exponents=(0, 1), + value_unit="people", + reference_year=reference_year, + gpw_version=gpw_version, + data_dir=data_dir, + ) return exp def set_custom_shape_from_countries(self, *args, **kwargs): """This function is deprecated, use LitPop.from_shape_and_countries instead.""" - LOGGER.warning("The use of LitPop.set_custom_shape_from_countries is deprecated." - "Use LitPop.from_shape_and_countries instead.") + LOGGER.warning( + "The use of LitPop.set_custom_shape_from_countries is deprecated." + "Use LitPop.from_shape_and_countries instead." + ) self.__dict__ = LitPop.from_shape_and_countries(*args, **kwargs).__dict__ @classmethod - def from_shape_and_countries(cls, shape, countries, res_arcsec=30, exponents=(1,1), - fin_mode='pc', admin1_calc=False, reference_year=DEF_REF_YEAR, - gpw_version=GPW_VERSION, data_dir=SYSTEM_DIR): + def from_shape_and_countries( + cls, + shape, + countries, + res_arcsec=30, + exponents=(1, 1), + fin_mode="pc", + admin1_calc=False, + reference_year=DEF_REF_YEAR, + gpw_version=GPW_VERSION, + data_dir=SYSTEM_DIR, + ): """ create LitPop exposure for `country` and then crop to given shape. @@ -398,62 +496,89 @@ def from_shape_and_countries(cls, shape, countries, res_arcsec=30, exponents=(1, The exposure LitPop within shape """ # init countries' exposure: - exp = cls.from_countries(countries, res_arcsec=res_arcsec, exponents=exponents, - fin_mode=fin_mode, reference_year=reference_year, - gpw_version=gpw_version, data_dir=data_dir) + exp = cls.from_countries( + countries, + res_arcsec=res_arcsec, + exponents=exponents, + fin_mode=fin_mode, + reference_year=reference_year, + gpw_version=gpw_version, + data_dir=data_dir, + ) if isinstance(shape, Shape): # get gdf with geometries of points within shape: - shape_gdf, _ = _get_litpop_single_polygon(shape, reference_year, - res_arcsec, data_dir, - gpw_version, exponents, - ) + shape_gdf, _ = _get_litpop_single_polygon( + shape, + reference_year, + res_arcsec, + data_dir, + gpw_version, + exponents, + ) shape_gdf = shape_gdf.drop( - columns=shape_gdf.columns[shape_gdf.columns != 'geometry']) + columns=shape_gdf.columns[shape_gdf.columns != "geometry"] + ) # extract gdf with data points within shape: - gdf = geopandas.sjoin(exp.gdf, shape_gdf, how='right') - gdf = gdf.drop(columns=['index_left']) - elif isinstance(shape, (shapely.geometry.MultiPolygon, shapely.geometry.Polygon)): + gdf = geopandas.sjoin(exp.gdf, shape_gdf, how="right") + gdf = gdf.drop(columns=["index_left"]) + elif isinstance( + shape, (shapely.geometry.MultiPolygon, shapely.geometry.Polygon) + ): # works if shape is Polygon or MultiPolygon gdf = exp.gdf.loc[exp.gdf.geometry.within(shape)] elif isinstance(shape, (geopandas.GeoSeries, list)): gdf = geopandas.GeoDataFrame(columns=exp.gdf.columns) for shp in shape: - if isinstance(shp, (shapely.geometry.MultiPolygon, - shapely.geometry.Polygon)): + if isinstance( + shp, (shapely.geometry.MultiPolygon, shapely.geometry.Polygon) + ): gdf = gdf.append(exp.gdf.loc[exp.gdf.geometry.within(shp)]) else: - raise NotImplementedError('Not implemented for list or GeoSeries containing ' - f'objects of type {type(shp)} as `shape`') + raise NotImplementedError( + "Not implemented for list or GeoSeries containing " + f"objects of type {type(shp)} as `shape`" + ) else: - raise NotImplementedError('Not implemented for `shape` of type {type(shape)}') + raise NotImplementedError( + "Not implemented for `shape` of type {type(shape)}" + ) - exp.description = (f'LitPop Exposure for custom shape in {countries} at' - f' {res_arcsec} as, year: {reference_year}, financial mode:' - f' {fin_mode}, exp: {exponents}, admin1_calc: {admin1_calc}') + exp.description = ( + f"LitPop Exposure for custom shape in {countries} at" + f" {res_arcsec} as, year: {reference_year}, financial mode:" + f" {fin_mode}, exp: {exponents}, admin1_calc: {admin1_calc}" + ) exp.set_gdf(gdf.reset_index()) try: rows, cols, ras_trans = u_coord.pts_to_raster_meta( - (exp.gdf['longitude'].min(), exp.gdf['latitude'].min(), - exp.gdf['longitude'].max(), exp.gdf['latitude'].max()), - u_coord.get_resolution(exp.gdf['longitude'], exp.gdf['latitude'])) + ( + exp.gdf["longitude"].min(), + exp.gdf["latitude"].min(), + exp.gdf["longitude"].max(), + exp.gdf["latitude"].max(), + ), + u_coord.get_resolution(exp.gdf["longitude"], exp.gdf["latitude"]), + ) exp.meta = { - 'width': cols, - 'height': rows, - 'crs': exp.crs, - 'transform': ras_trans, + "width": cols, + "height": rows, + "crs": exp.crs, + "transform": ras_trans, } except ValueError as err: - LOGGER.warning('Could not write attribute meta with ValueError: ') + LOGGER.warning("Could not write attribute meta with ValueError: ") LOGGER.warning(err.args[0]) - exp.meta = {'crs': exp.crs} + exp.meta = {"crs": exp.crs} return exp def set_custom_shape(self, *args, **kwargs): """This function is deprecated, use LitPop.from_shape instead.""" - LOGGER.warning("The use of LitPop.set_custom_shape is deprecated." - "Use LitPop.from_shape instead.") + LOGGER.warning( + "The use of LitPop.set_custom_shape is deprecated." + "Use LitPop.from_shape instead." + ) self.__dict__ = LitPop.from_shape(*args, **kwargs).__dict__ @classmethod @@ -462,8 +587,8 @@ def from_shape( shape, total_value, res_arcsec=30, - exponents=(1,1), - value_unit='USD', + exponents=(1, 1), + value_unit="USD", region_id=None, reference_year=DEF_REF_YEAR, gpw_version=GPW_VERSION, @@ -521,8 +646,10 @@ def from_shape( The exposure LitPop within shape """ if isinstance(shape, (geopandas.GeoSeries, list)): - raise NotImplementedError('Not implemented for `shape` of type list or ' - 'GeoSeries. Loop over elements of series outside method.') + raise NotImplementedError( + "Not implemented for `shape` of type list or " + "GeoSeries. Loop over elements of series outside method." + ) litpop_gdf, _ = _get_litpop_single_polygon( shape, @@ -531,18 +658,21 @@ def from_shape( data_dir, gpw_version, exponents, - region_id + region_id, ) # disaggregate total value proportional to LitPop values: if isinstance(total_value, (float, int)): - litpop_gdf['value'] = np.divide(litpop_gdf['value'], - litpop_gdf['value'].sum()) * total_value + litpop_gdf["value"] = ( + np.divide(litpop_gdf["value"], litpop_gdf["value"].sum()) * total_value + ) elif total_value is not None: raise TypeError("total_value must be int, float or None.") - description = (f'LitPop Exposure for custom shape at {res_arcsec} as,' - f' year: {reference_year}, exp: {exponents}') + description = ( + f"LitPop Exposure for custom shape at {res_arcsec} as," + f" year: {reference_year}, exp: {exponents}" + ) litpop_gdf[INDICATOR_IMPF] = 1 @@ -554,31 +684,48 @@ def from_shape( exponents=exponents, gpw_version=gpw_version, fin_mode=None, - description=description + description=description, ) - if min(len(exp.gdf['latitude'].unique()), len(exp.gdf['longitude'].unique())) > 1: - #if exp.gdf.shape[0] > 1 and len(exp.gdf.latitude.unique()) > 1: + if ( + min(len(exp.gdf["latitude"].unique()), len(exp.gdf["longitude"].unique())) + > 1 + ): + # if exp.gdf.shape[0] > 1 and len(exp.gdf.latitude.unique()) > 1: rows, cols, ras_trans = u_coord.pts_to_raster_meta( - (exp.gdf['longitude'].min(), exp.gdf['latitude'].min(), - exp.gdf['longitude'].max(), exp.gdf['latitude'].max()), - u_coord.get_resolution(exp.gdf['longitude'], exp.gdf['latitude'])) + ( + exp.gdf["longitude"].min(), + exp.gdf["latitude"].min(), + exp.gdf["longitude"].max(), + exp.gdf["latitude"].max(), + ), + u_coord.get_resolution(exp.gdf["longitude"], exp.gdf["latitude"]), + ) exp.meta = { - 'width': cols, - 'height': rows, - 'crs': exp.crs, - 'transform': ras_trans, + "width": cols, + "height": rows, + "crs": exp.crs, + "transform": ras_trans, } else: - LOGGER.warning('Could not write attribute meta because coordinates' - ' are either only one point or do not extend in lat and lon') - exp.meta = {'crs': exp.crs} + LOGGER.warning( + "Could not write attribute meta because coordinates" + " are either only one point or do not extend in lat and lon" + ) + exp.meta = {"crs": exp.crs} return exp @staticmethod - def _from_country(country, res_arcsec=30, exponents=(1,1), fin_mode=None, - total_value=None, reference_year=DEF_REF_YEAR, - gpw_version=GPW_VERSION, data_dir=SYSTEM_DIR): + def _from_country( + country, + res_arcsec=30, + exponents=(1, 1), + fin_mode=None, + total_value=None, + reference_year=DEF_REF_YEAR, + gpw_version=GPW_VERSION, + data_dir=SYSTEM_DIR, + ): """init LitPop exposure object for one single country See docstring of from_countries() for detailled description of parameters. @@ -610,14 +757,13 @@ def _from_country(country, res_arcsec=30, exponents=(1,1), fin_mode=None, iso3a = u_coord.country_to_iso(country, representation="alpha3") iso3n = u_coord.country_to_iso(country, representation="numeric") except LookupError: - LOGGER.error('Country not identified: %s.', country) + LOGGER.error("Country not identified: %s.", country) return None country_geometry = u_coord.get_land_geometry([iso3a]) - if not country_geometry.bounds: # check for empty shape - LOGGER.error('No geometry found for country: %s.', country) + if not country_geometry.bounds: # check for empty shape + LOGGER.error("No geometry found for country: %s.", country) return None - LOGGER.info('\n LitPop: Init Exposure for country: %s (%i)...\n', - iso3a, iso3n) + LOGGER.info("\n LitPop: Init Exposure for country: %s (%i)...\n", iso3a, iso3n) litpop_gdf = geopandas.GeoDataFrame() total_population = 0 @@ -627,31 +773,39 @@ def _from_country(country, res_arcsec=30, exponents=(1,1), fin_mode=None, # loop over single polygons in country shape object: for idx, polygon in enumerate(country_geometry.geoms): # get litpop data for each polygon and combine into GeoDataFrame: - gdf_tmp, meta_tmp, = \ - _get_litpop_single_polygon(polygon, reference_year, - res_arcsec, data_dir, - gpw_version, exponents, - verbose=(idx > 0), - region_id=iso3n - ) + ( + gdf_tmp, + meta_tmp, + ) = _get_litpop_single_polygon( + polygon, + reference_year, + res_arcsec, + data_dir, + gpw_version, + exponents, + verbose=(idx > 0), + region_id=iso3n, + ) if gdf_tmp is None: - LOGGER.debug(f'Skipping polygon with index {idx} for' + - f' country {iso3a}.') + LOGGER.debug( + f"Skipping polygon with index {idx} for" + f" country {iso3a}." + ) continue - total_population += meta_tmp['total_population'] + total_population += meta_tmp["total_population"] litpop_gdf = pd.concat([litpop_gdf, gdf_tmp]) - litpop_gdf.crs = meta_tmp['crs'] + litpop_gdf.crs = meta_tmp["crs"] # set total value for disaggregation if not provided: - if total_value is None and fin_mode == 'pop': - total_value = total_population # population count is taken from pop-data. + if total_value is None and fin_mode == "pop": + total_value = total_population # population count is taken from pop-data. elif total_value is None: total_value = _get_total_value_per_country(iso3a, fin_mode, reference_year) # disaggregate total value proportional to LitPop values: if isinstance(total_value, (float, int)): - litpop_gdf['value'] = np.divide(litpop_gdf['value'], - litpop_gdf['value'].sum()) * total_value + litpop_gdf["value"] = ( + np.divide(litpop_gdf["value"], litpop_gdf["value"].sum()) * total_value + ) elif total_value is not None: raise TypeError("total_value must be int or float.") @@ -663,8 +817,17 @@ def _from_country(country, res_arcsec=30, exponents=(1,1), fin_mode=None, # Alias method names for backward compatibility: set_country = set_countries -def _get_litpop_single_polygon(polygon, reference_year, res_arcsec, data_dir, - gpw_version, exponents, region_id=None, verbose=False): + +def _get_litpop_single_polygon( + polygon, + reference_year, + res_arcsec, + data_dir, + gpw_version, + exponents, + region_id=None, + verbose=False, +): """load nightlight (nl) and population (pop) data in rastered 2d arrays and apply rescaling (resolution reprojection) and LitPop core calculation, i.e. combination of nl and pop per grid cell. @@ -714,80 +877,84 @@ def _get_litpop_single_polygon(polygon, reference_year, res_arcsec, data_dir, offsets = (1, 0) # import population data (2d array), meta data, and global grid info, # global_transform defines the origin (corner points) of the global traget grid: - pop, meta_pop, global_transform = \ - pop_util.load_gpw_pop_shape(polygon, - reference_year, - gpw_version=gpw_version, - data_dir=data_dir, - verbose=verbose, - ) + pop, meta_pop, global_transform = pop_util.load_gpw_pop_shape( + polygon, + reference_year, + gpw_version=gpw_version, + data_dir=data_dir, + verbose=verbose, + ) total_population = pop.sum() # import nightlight data (2d array) and associated meta data: - nlight, meta_nl = nl_util.load_nasa_nl_shape(polygon, - reference_year, - data_dir=data_dir, - dtype=float - ) + nlight, meta_nl = nl_util.load_nasa_nl_shape( + polygon, reference_year, data_dir=data_dir, dtype=float + ) # if resolution is the same as for lit (15 arcsec), set grid same as lit: - if res_arcsec==15: + if res_arcsec == 15: i_align = 1 - global_origins = (meta_nl['transform'][2], # lon - meta_nl['transform'][5]) # lat - else: # align grid for resampling to grid of population data (pop) + global_origins = ( + meta_nl["transform"][2], # lon + meta_nl["transform"][5], + ) # lat + else: # align grid for resampling to grid of population data (pop) i_align = 0 - global_origins=(global_transform[2], - global_transform[5]) + global_origins = (global_transform[2], global_transform[5]) # reproject Lit and Pop input data to aligned grid with target resolution: try: - [pop, nlight], meta_out = reproject_input_data([pop, nlight], - [meta_pop, meta_nl], - i_align=i_align, # pop defines grid - target_res_arcsec=res_arcsec, - global_origins=global_origins, - ) + [pop, nlight], meta_out = reproject_input_data( + [pop, nlight], + [meta_pop, meta_nl], + i_align=i_align, # pop defines grid + target_res_arcsec=res_arcsec, + global_origins=global_origins, + ) except ValueError as err: - if ("height must be > 0" in str(err) or "width must be > 0" in str(err) # rasterio 1.2 - or "Invalid dataset dimensions :" in str(err)): # rasterio 1.3 + if ( + "height must be > 0" in str(err) + or "width must be > 0" in str(err) # rasterio 1.2 + or "Invalid dataset dimensions :" in str(err) + ): # rasterio 1.3 # no grid point within shape after reprojection, None is returned. if verbose: - LOGGER.info('No data point on destination grid within polygon.') - return None, {'crs': meta_pop['crs']} + LOGGER.info("No data point on destination grid within polygon.") + return None, {"crs": meta_pop["crs"]} raise err # calculate Lit^m * Pop^n (but not yet disaggregate any total value to grid): - litpop_array = gridpoints_core_calc([nlight, pop], - offsets=offsets, - exponents=exponents, - total_val_rescale=None) + litpop_array = gridpoints_core_calc( + [nlight, pop], offsets=offsets, exponents=exponents, total_val_rescale=None + ) # mask entries outside polygon (set to NaN) and set total population: - litpop_array = u_coord.mask_raster_with_geometry(litpop_array, meta_out['transform'], - [polygon], nodata=np.nan) - meta_out['total_population'] = total_population + litpop_array = u_coord.mask_raster_with_geometry( + litpop_array, meta_out["transform"], [polygon], nodata=np.nan + ) + meta_out["total_population"] = total_population # extract coordinates as meshgrid arrays: - lon, lat = u_coord.raster_to_meshgrid(meta_out['transform'], - meta_out['width'], - meta_out['height']) + lon, lat = u_coord.raster_to_meshgrid( + meta_out["transform"], meta_out["width"], meta_out["height"] + ) # init GeoDataFrame from data and coordinates: latitude = np.round(lat.flatten(), decimals=8) longitude = np.round(lon.flatten(), decimals=8) gdf = geopandas.GeoDataFrame( {"value": litpop_array.flatten(), "latitude": latitude, "longitude": longitude}, - crs=meta_out['crs'], + crs=meta_out["crs"], geometry=geopandas.points_from_xy(longitude, latitude), ) - if region_id is not None: # set region_id - gdf['region_id'] = region_id + if region_id is not None: # set region_id + gdf["region_id"] = region_id else: - gdf['region_id'] = u_coord.get_country_code( - gdf['latitude'], gdf['longitude'], gridded=True + gdf["region_id"] = u_coord.get_country_code( + gdf["latitude"], gdf["longitude"], gridded=True ) # remove entries outside polygon with `dropna` and return GeoDataFrame: return gdf.dropna(), meta_out + def get_value_unit(fin_mode): """get `value_unit` depending on `fin_mode` @@ -800,11 +967,12 @@ def get_value_unit(fin_mode): value_unit : str """ - if fin_mode in ['none', 'norm']: - return '' - if fin_mode == 'pop': - return 'people' - return 'USD' + if fin_mode in ["none", "norm"]: + return "" + if fin_mode == "pop": + return "people" + return "USD" + def _get_total_value_per_country(cntry_iso3a, fin_mode, reference_year): """ @@ -840,31 +1008,36 @@ def _get_total_value_per_country(cntry_iso3a, fin_mode, reference_year): ------- total_value : float """ - if fin_mode == 'none': + if fin_mode == "none": return None - if fin_mode == 'pop': - raise NotImplementedError("`_get_total_value_per_country` is not " - "implemented for `fin_mode` == 'pop'.") - if fin_mode == 'pc': - return(u_fin.world_bank_wealth_account(cntry_iso3a, reference_year, - no_land=True)[1]) + if fin_mode == "pop": + raise NotImplementedError( + "`_get_total_value_per_country` is not " + "implemented for `fin_mode` == 'pop'." + ) + if fin_mode == "pc": + return u_fin.world_bank_wealth_account( + cntry_iso3a, reference_year, no_land=True + )[1] # here, total_asset_val is Produced Capital "pc" # no_land=True returns value w/o the mark-up of 24% for land value - if fin_mode == 'pc_land': - return(u_fin.world_bank_wealth_account(cntry_iso3a, reference_year, - no_land=False)[1]) + if fin_mode == "pc_land": + return u_fin.world_bank_wealth_account( + cntry_iso3a, reference_year, no_land=False + )[1] # no_land=False returns pc value incl. the mark-up of 24% for land value - if fin_mode == 'norm': + if fin_mode == "norm": return 1 # GDP based total values: gdp_value = u_fin.gdp(cntry_iso3a, reference_year)[1] - if fin_mode == 'gdp': + if fin_mode == "gdp": return gdp_value - if fin_mode == 'income_group': # gdp * (income group + 1) - return gdp_value*(u_fin.income_group(cntry_iso3a, reference_year)[1]+1) - if fin_mode in ('nfw', 'tw'): - wealthtogdp_factor = u_fin.wealth2gdp(cntry_iso3a, fin_mode == 'nfw', - reference_year)[1] + if fin_mode == "income_group": # gdp * (income group + 1) + return gdp_value * (u_fin.income_group(cntry_iso3a, reference_year)[1] + 1) + if fin_mode in ("nfw", "tw"): + wealthtogdp_factor = u_fin.wealth2gdp( + cntry_iso3a, fin_mode == "nfw", reference_year + )[1] if np.isnan(wealthtogdp_factor): LOGGER.warning("Missing wealth-to-gdp factor for country %s.", cntry_iso3a) LOGGER.warning("Using GDP instead as total value.") @@ -872,12 +1045,16 @@ def _get_total_value_per_country(cntry_iso3a, fin_mode, reference_year): return gdp_value * wealthtogdp_factor raise ValueError(f"Unsupported fin_mode: {fin_mode}") -def reproject_input_data(data_array_list, meta_list, - i_align=0, - target_res_arcsec=None, - global_origins=(-180.0, 89.99999999999991), - resampling=rasterio.warp.Resampling.bilinear, - conserve=None): + +def reproject_input_data( + data_array_list, + meta_list, + i_align=0, + target_res_arcsec=None, + global_origins=(-180.0, 89.99999999999991), + resampling=rasterio.warp.Resampling.bilinear, + conserve=None, +): """ LitPop-sepcific wrapper around u_coord.align_raster_data. @@ -935,40 +1112,50 @@ def reproject_input_data(data_array_list, meta_list, # target resolution in degree lon,lat: if target_res_arcsec is None: - res_degree = meta_list[i_align]['transform'][0] # reference grid + res_degree = meta_list[i_align]["transform"][0] # reference grid else: res_degree = target_res_arcsec / 3600 - dst_crs = meta_list[i_align]['crs'] + dst_crs = meta_list[i_align]["crs"] # loop over data arrays, do transformation where required: data_out_list = [None] * len(data_array_list) - meta_out = {'dtype': meta_list[i_align]['dtype'], - 'nodata': meta_list[i_align]['nodata'], - 'crs': dst_crs} + meta_out = { + "dtype": meta_list[i_align]["dtype"], + "nodata": meta_list[i_align]["nodata"], + "crs": dst_crs, + } for idx, data in enumerate(data_array_list): # if target resolution corresponds to reference data resolution, # the reference data is not transformed: - if idx==i_align and ((target_res_arcsec is None) or \ - (np.round(meta_list[i_align]['transform'][0], - decimals=7)==np.round(res_degree, decimals=7))): + if idx == i_align and ( + (target_res_arcsec is None) + or ( + np.round(meta_list[i_align]["transform"][0], decimals=7) + == np.round(res_degree, decimals=7) + ) + ): data_out_list[idx] = data continue # reproject data grid: - dst_bounds = rasterio.transform.array_bounds(meta_list[i_align]['height'], - meta_list[i_align]['width'], - meta_list[i_align]['transform']) - data_out_list[idx], meta_out['transform'] = \ - u_coord.align_raster_data(data_array_list[idx], meta_list[idx]['crs'], - meta_list[idx]['transform'], - dst_crs=dst_crs, - dst_resolution=(res_degree, res_degree), - dst_bounds=dst_bounds, - global_origin=global_origins, - resampling=resampling, - conserve=conserve) - meta_out['height'] = data_out_list[-1].shape[0] - meta_out['width'] = data_out_list[-1].shape[1] + dst_bounds = rasterio.transform.array_bounds( + meta_list[i_align]["height"], + meta_list[i_align]["width"], + meta_list[i_align]["transform"], + ) + data_out_list[idx], meta_out["transform"] = u_coord.align_raster_data( + data_array_list[idx], + meta_list[idx]["crs"], + meta_list[idx]["transform"], + dst_crs=dst_crs, + dst_resolution=(res_degree, res_degree), + dst_bounds=dst_bounds, + global_origin=global_origins, + resampling=resampling, + conserve=conserve, + ) + meta_out["height"] = data_out_list[-1].shape[0] + meta_out["width"] = data_out_list[-1].shape[1] return data_out_list, meta_out @@ -1078,17 +1265,18 @@ def _check_excel_exists(file_path, file_name, xlsx_before_xls=True): """ try_ext = [] if xlsx_before_xls: - try_ext.append('.xlsx') - try_ext.append('.xls') + try_ext.append(".xlsx") + try_ext.append(".xls") else: - try_ext.append('.xls') - try_ext.append('.xlsx') + try_ext.append(".xls") + try_ext.append(".xlsx") path_name = Path(file_path, file_name).stem for i in try_ext: if Path(file_path, path_name + i).is_file(): return str(Path(file_path, path_name + i)) return None + def _grp_read(country_iso3, admin1_info=None, data_dir=SYSTEM_DIR): """Retrieves the Gross Regional Product (GRP) aka Gross State Domestic Product (GSDP) data for a certain country. It requires an excel file in a subfolder @@ -1116,44 +1304,61 @@ def _grp_read(country_iso3, admin1_info=None, data_dir=SYSTEM_DIR): if admin1_info is None: admin1_info, _ = u_coord.get_admin1_info(country_iso3) admin1_info = admin1_info[country_iso3] - file_name = _check_excel_exists(data_dir.joinpath('GSDP'), str(country_iso3 + '_GSDP')) + file_name = _check_excel_exists( + data_dir.joinpath("GSDP"), str(country_iso3 + "_GSDP") + ) if file_name is not None: # open spreadsheet and identify relevant columns: admin1_xls_data = pd.read_excel(file_name) - if admin1_xls_data.get('State_Province') is None: + if admin1_xls_data.get("State_Province") is None: admin1_xls_data = admin1_xls_data.rename( - columns={admin1_xls_data.columns[0]: 'State_Province'}) - if admin1_xls_data.get('GSDP_ref') is None: + columns={admin1_xls_data.columns[0]: "State_Province"} + ) + if admin1_xls_data.get("GSDP_ref") is None: admin1_xls_data = admin1_xls_data.rename( - columns={admin1_xls_data.columns[-1]: 'GSDP_ref'}) + columns={admin1_xls_data.columns[-1]: "GSDP_ref"} + ) # initiate dictionary with admin 1 names as keys: - out_dict = dict.fromkeys([record['name'] for record in admin1_info]) - postals = [record['postal'] for record in admin1_info] + out_dict = dict.fromkeys([record["name"] for record in admin1_info]) + postals = [record["postal"] for record in admin1_info] # first nested loop. outer loop over region names in admin1_info: for record_name in out_dict: # inner loop over region names in spreadsheet, find matches - for idx, xls_name in enumerate(admin1_xls_data['State_Province'].tolist()): - subnat_shape_str = [c for c in record_name if c.isalpha() or c.isnumeric()] + for idx, xls_name in enumerate(admin1_xls_data["State_Province"].tolist()): + subnat_shape_str = [ + c for c in record_name if c.isalpha() or c.isnumeric() + ] subnat_xls_str = [c for c in xls_name if c.isalpha()] if subnat_shape_str == subnat_xls_str: - out_dict[record_name] = admin1_xls_data['GSDP_ref'][idx] + out_dict[record_name] = admin1_xls_data["GSDP_ref"][idx] break # second nested loop to detect matched empty entries for idx1, country_name in enumerate(out_dict.keys()): if out_dict[country_name] is None: - for idx2, xls_name in enumerate(admin1_xls_data['State_Province'].tolist()): + for idx2, xls_name in enumerate( + admin1_xls_data["State_Province"].tolist() + ): subnat_xls_str = [c for c in xls_name if c.isalpha()] postals_str = [c for c in postals[idx1] if c.isalpha()] if subnat_xls_str == postals_str: - out_dict[country_name] = admin1_xls_data['GSDP_ref'][idx2] + out_dict[country_name] = admin1_xls_data["GSDP_ref"][idx2] return out_dict - LOGGER.warning('No file for %s could be found in %s.', country_iso3, data_dir) - LOGGER.warning('No admin1 data is calculated in this case.') + LOGGER.warning("No file for %s could be found in %s.", country_iso3, data_dir) + LOGGER.warning("No admin1 data is calculated in this case.") return None -def _calc_admin1_one_country(country, res_arcsec, exponents, fin_mode, total_value, - reference_year, gpw_version, data_dir): + +def _calc_admin1_one_country( + country, + res_arcsec, + exponents, + fin_mode, + total_value, + reference_year, + gpw_version, + data_dir, +): """ Calculates the LitPop on admin1 level for provinces/states where such information are available (i.e. GDP is distributed on a subnational instead of a national level). Requires @@ -1181,14 +1386,15 @@ def _calc_admin1_one_country(country, res_arcsec, exponents, fin_mode, total_val Exposure instance """ - if fin_mode == 'pop': - raise NotImplementedError('`_calc_admin1_one_country` not implemented for '+ - "`fin_mode` == 'pop'.") + if fin_mode == "pop": + raise NotImplementedError( + "`_calc_admin1_one_country` not implemented for " + "`fin_mode` == 'pop'." + ) # Determine ISO 3166 representation of country and get geometry: try: iso3a = u_coord.country_to_iso(country, representation="alpha3") except LookupError: - LOGGER.error('Country not identified: %s. Skippig.', country) + LOGGER.error("Country not identified: %s. Skippig.", country) return None # get records and shapes on admin 1 level: admin1_info, admin1_shapes = u_coord.get_admin1_info(iso3a) @@ -1197,31 +1403,39 @@ def _calc_admin1_one_country(country, res_arcsec, exponents, fin_mode, total_val # get subnational Gross Regional Product (GRP) data for country: grp_values = _grp_read(iso3a, admin1_info=admin1_info, data_dir=data_dir) if grp_values is None: - LOGGER.error("No subnational GRP data found for calc_admin1" - " for country %s. Skipping.", country) + LOGGER.error( + "No subnational GRP data found for calc_admin1" + " for country %s. Skipping.", + country, + ) return None # normalize GRP values: - sum_vals = sum(filter(None, grp_values.values())) # get total - grp_values = {key: (value / sum_vals if value is not None else None) - for (key, value) in grp_values.items()} + sum_vals = sum(filter(None, grp_values.values())) # get total + grp_values = { + key: (value / sum_vals if value is not None else None) + for (key, value) in grp_values.items() + } # get total value of country: total_value = _get_total_value_per_country(iso3a, fin_mode, reference_year) exp_list = [] for idx, record in enumerate(admin1_info): - if grp_values[record['name']] is None: + if grp_values[record["name"]] is None: continue - LOGGER.info(record['name']) + LOGGER.info(record["name"]) # init exposure for province and add to list # total value is defined from country multiplied by grp_share: - exp_list.append(LitPop.from_shape(admin1_shapes[idx], - total_value * grp_values[record['name']], - res_arcsec=res_arcsec, - exponents=exponents, - reference_year=reference_year, - gpw_version=gpw_version, - data_dir=data_dir) - ) - exp_list[-1].gdf['admin1'] = record['name'] + exp_list.append( + LitPop.from_shape( + admin1_shapes[idx], + total_value * grp_values[record["name"]], + res_arcsec=res_arcsec, + exponents=exponents, + reference_year=reference_year, + gpw_version=gpw_version, + data_dir=data_dir, + ) + ) + exp_list[-1].gdf["admin1"] = record["name"] return Exposures.concat(exp_list) diff --git a/climada/entity/exposures/litpop/nightlight.py b/climada/entity/exposures/litpop/nightlight.py index f25b4fca6..d875b26a9 100644 --- a/climada/entity/exposures/litpop/nightlight.py +++ b/climada/entity/exposures/litpop/nightlight.py @@ -18,27 +18,28 @@ Define nightlight reader and cutting functions. """ + import glob -import shutil -import tarfile import gzip -import pickle import logging +import pickle +import shutil +import tarfile from pathlib import Path -import rasterio +import matplotlib.pyplot as plt import numpy as np +import rasterio import scipy.sparse as sparse -import matplotlib.pyplot as plt from osgeo import gdal from PIL import Image from shapefile import Shape +from climada import CONFIG from climada.util import ureg from climada.util.constants import SYSTEM_DIR from climada.util.files_handler import download_file from climada.util.save import save -from climada import CONFIG Image.MAX_IMAGE_PIXELS = 1e9 @@ -56,18 +57,20 @@ NOAA_BORDER = (-180, -65, 180, 75) """NOAA nightlights border (min_lon, min_lat, max_lon, max_lat)""" -BM_FILENAMES = ['BlackMarble_%i_A1_geo_gray.tif', - 'BlackMarble_%i_A2_geo_gray.tif', - 'BlackMarble_%i_B1_geo_gray.tif', - 'BlackMarble_%i_B2_geo_gray.tif', - 'BlackMarble_%i_C1_geo_gray.tif', - 'BlackMarble_%i_C2_geo_gray.tif', - 'BlackMarble_%i_D1_geo_gray.tif', - 'BlackMarble_%i_D2_geo_gray.tif' - ] +BM_FILENAMES = [ + "BlackMarble_%i_A1_geo_gray.tif", + "BlackMarble_%i_A2_geo_gray.tif", + "BlackMarble_%i_B1_geo_gray.tif", + "BlackMarble_%i_B2_geo_gray.tif", + "BlackMarble_%i_C1_geo_gray.tif", + "BlackMarble_%i_C2_geo_gray.tif", + "BlackMarble_%i_D1_geo_gray.tif", + "BlackMarble_%i_D2_geo_gray.tif", +] """Nightlight NASA files which generate the whole earth when put together.""" -def load_nasa_nl_shape(geometry, year, data_dir=SYSTEM_DIR, dtype='float32'): + +def load_nasa_nl_shape(geometry, year, data_dir=SYSTEM_DIR, dtype="float32"): """Read nightlight data from NASA BlackMarble tiles cropped to given shape(s) and combine arrays from each tile. @@ -107,63 +110,74 @@ def load_nasa_nl_shape(geometry, year, data_dir=SYSTEM_DIR, dtype='float32'): bounds = geometry.bounds # get years available in BlackMarble data from CONFIG and convert to array: - years_available = [year.int() for year in \ - CONFIG.exposures.litpop.nightlights.blackmarble_years.list() - ] + years_available = [ + year.int() + for year in CONFIG.exposures.litpop.nightlights.blackmarble_years.list() + ] # get year closest to year with BlackMarble data available: year = min(years_available, key=lambda x: abs(x - year)) # determin black marble tiles with coordinates containing the bounds: req_files = get_required_nl_files(bounds) # check wether required files exist locally: - files_exist = check_nl_local_file_exists(required_files=req_files, - check_path=data_dir, year=year) + files_exist = check_nl_local_file_exists( + required_files=req_files, check_path=data_dir, year=year + ) # download data that is missing: download_nl_files(req_files, files_exist, data_dir, year) # convert `req_files` to sorted list of indices: - req_files = np.where(req_files ==1)[0] + req_files = np.where(req_files == 1)[0] # init empty lists for tiles depending on position in global grid: - results_array_north = list() # tiles A1, B1, C1, D1 (Nothern Hemisphere) - results_array_south = list() # tiles A2, B2, C2, D2 (Southern Hemisphere) + results_array_north = list() # tiles A1, B1, C1, D1 (Nothern Hemisphere) + results_array_south = list() # tiles A2, B2, C2, D2 (Southern Hemisphere) # loop through required files, load and crop data for each: for idx, i_file in enumerate(req_files): # read cropped data from source file (src) to np.ndarray: - out_image, meta_tmp = load_nasa_nl_shape_single_tile(geometry, - data_dir / (BM_FILENAMES[i_file] %(year))) + out_image, meta_tmp = load_nasa_nl_shape_single_tile( + geometry, data_dir / (BM_FILENAMES[i_file] % (year)) + ) # sort indicies to northenr and southern hemisphere: - if i_file in [0,2,4,6]: # indicies of northern hemisphere files + if i_file in [0, 2, 4, 6]: # indicies of northern hemisphere files results_array_north.append(out_image) - elif i_file in [1,3,5,7]: # indicies of southern hemisphere files + elif i_file in [1, 3, 5, 7]: # indicies of southern hemisphere files results_array_south.append(out_image) # from first (top left) of tiles, meta is initiated, incl. origin: if idx == 0: meta = meta_tmp # set correct CRS from local tile's CRS to global WGS 84: - meta.update({"crs": rasterio.crs.CRS.from_epsg(4326), - "dtype": dtype}) - if len(req_files) == 1: # only one tile required: + meta.update({"crs": rasterio.crs.CRS.from_epsg(4326), "dtype": dtype}) + if len(req_files) == 1: # only one tile required: return np.array(out_image, dtype=dtype), meta # Else, combine data from multiple input files (BlackMarble tiles) - # concatenate arrays from west to east and from north to south: del out_image - if results_array_north: # northern hemisphere west to east + if results_array_north: # northern hemisphere west to east results_array_north = np.concatenate(results_array_north, axis=1) - if results_array_south: # southern hemisphere west to east + if results_array_south: # southern hemisphere west to east results_array_south = np.concatenate(results_array_south, axis=1) - if isinstance(results_array_north, np.ndarray) and isinstance(results_array_south, np.ndarray): + if isinstance(results_array_north, np.ndarray) and isinstance( + results_array_south, np.ndarray + ): # north to south if both hemispheres are involved - results_array_north = np.concatenate([results_array_north, results_array_south], axis=0) - elif isinstance(results_array_south, np.ndarray): # only southern hemisphere + results_array_north = np.concatenate( + [results_array_north, results_array_south], axis=0 + ) + elif isinstance(results_array_south, np.ndarray): # only southern hemisphere results_array_north = results_array_south del results_array_south # update number of elements per axis in meta dictionary: - meta.update({"height": results_array_north.shape[0], - "width": results_array_north.shape[1], - "dtype": dtype}) + meta.update( + { + "height": results_array_north.shape[0], + "width": results_array_north.shape[1], + "dtype": dtype, + } + ) return np.array(results_array_north, dtype=dtype), meta + def get_required_nl_files(bounds): """Determines which of the satellite pictures are necessary for a certain bounding box (e.g. country) @@ -186,16 +200,22 @@ def get_required_nl_files(bounds): """ # check if bounds are valid: if (np.size(bounds) != 4) or (bounds[0] > bounds[2]) or (bounds[1] > bounds[3]): - raise ValueError('Invalid bounds supplied. `bounds` must be tuple'+ - ' with (min_lon, min_lat, max_lon, max_lat).') + raise ValueError( + "Invalid bounds supplied. `bounds` must be tuple" + + " with (min_lon, min_lat, max_lon, max_lat)." + ) min_lon, min_lat, max_lon, max_lat = bounds # longitude first. The width of all tiles is 90 degrees tile_width = 90 - req_files = np.zeros(np.count_nonzero(BM_FILENAMES),) + req_files = np.zeros( + np.count_nonzero(BM_FILENAMES), + ) # determine the staring tile - first_tile_lon = min(np.floor((min_lon - (-180)) / tile_width), 3) # "normalise" to zero + first_tile_lon = min( + np.floor((min_lon - (-180)) / tile_width), 3 + ) # "normalise" to zero last_tile_lon = min(np.floor((max_lon - (-180)) / tile_width), 3) # Now latitude. The height of all tiles is the same as the height. @@ -213,8 +233,8 @@ def get_required_nl_files(bounds): continue return req_files -def check_nl_local_file_exists(required_files=None, check_path=SYSTEM_DIR, - year=2016): + +def check_nl_local_file_exists(required_files=None, check_path=SYSTEM_DIR, year=2016): """Checks if BM Satellite files are avaialbe and returns a vector denoting the missing files. @@ -237,38 +257,60 @@ def check_nl_local_file_exists(required_files=None, check_path=SYSTEM_DIR, Boolean array that denotes if the required files exist. """ if required_files is None: - required_files = np.ones(len(BM_FILENAMES),) + required_files = np.ones( + len(BM_FILENAMES), + ) if np.size(required_files) < np.count_nonzero(BM_FILENAMES): - required_files = np.ones(np.count_nonzero(BM_FILENAMES),) - LOGGER.warning('The parameter \'required_files\' was too short and ' - 'is ignored.') + required_files = np.ones( + np.count_nonzero(BM_FILENAMES), + ) + LOGGER.warning( + "The parameter 'required_files' was too short and " "is ignored." + ) if isinstance(check_path, str): check_path = Path(check_path) if not check_path.is_dir(): - raise ValueError(f'The given path does not exist: {check_path}') - files_exist = np.zeros(np.count_nonzero(BM_FILENAMES),) + raise ValueError(f"The given path does not exist: {check_path}") + files_exist = np.zeros( + np.count_nonzero(BM_FILENAMES), + ) for num_check, name_check in enumerate(BM_FILENAMES): if required_files[num_check] == 0: continue - curr_file = check_path.joinpath(name_check %(year)) + curr_file = check_path.joinpath(name_check % (year)) if curr_file.is_file(): files_exist[num_check] = 1 if sum(files_exist) == sum(required_files): - LOGGER.debug('Found all required satellite data (%s files) in folder %s', - int(sum(required_files)), check_path) + LOGGER.debug( + "Found all required satellite data (%s files) in folder %s", + int(sum(required_files)), + check_path, + ) elif sum(files_exist) == 0: - LOGGER.info('No satellite files found locally in %s', check_path) + LOGGER.info("No satellite files found locally in %s", check_path) else: - LOGGER.debug('Not all satellite files available. ' - 'Found %d out of %d required files in %s', - int(sum(files_exist)), int(sum(required_files)), check_path) + LOGGER.debug( + "Not all satellite files available. " + "Found %d out of %d required files in %s", + int(sum(files_exist)), + int(sum(required_files)), + check_path, + ) return files_exist -def download_nl_files(req_files=np.ones(len(BM_FILENAMES),), - files_exist=np.zeros(len(BM_FILENAMES),), - dwnl_path=SYSTEM_DIR, year=2016): + +def download_nl_files( + req_files=np.ones( + len(BM_FILENAMES), + ), + files_exist=np.zeros( + len(BM_FILENAMES), + ), + dwnl_path=SYSTEM_DIR, + year=2016, +): """Attempts to download nightlight files from NASA webpage. Parameters @@ -297,43 +339,52 @@ def download_nl_files(req_files=np.ones(len(BM_FILENAMES),), """ if (len(req_files) != len(files_exist)) or (len(req_files) != len(BM_FILENAMES)): - raise ValueError('The given arguments are invalid. req_files and ' - 'files_exist must both be as long as there are files to download' - ' (' + str(len(BM_FILENAMES)) + ').') + raise ValueError( + "The given arguments are invalid. req_files and " + "files_exist must both be as long as there are files to download" + " (" + str(len(BM_FILENAMES)) + ")." + ) if not Path(dwnl_path).is_dir(): - raise ValueError(f'The folder {dwnl_path} does not exist. Operation aborted.') + raise ValueError(f"The folder {dwnl_path} does not exist. Operation aborted.") if np.all(req_files == files_exist): - LOGGER.debug('All required files already exist. No downloads necessary.') + LOGGER.debug("All required files already exist. No downloads necessary.") return dwnl_path try: for num_files in range(0, np.count_nonzero(BM_FILENAMES)): if req_files[num_files] == 0 or files_exist[num_files] == 1: - continue # file already available or not required + continue # file already available or not required path_check = False # loop through different possible URLs defined in CONFIG: value_err = None for url in CONFIG.exposures.litpop.nightlights.nasa_sites.list(): - try: # control for ValueError due to wrong URL - curr_file = url.str() + BM_FILENAMES[num_files] %(year) - LOGGER.info('Attempting to download file from %s', curr_file) + try: # control for ValueError due to wrong URL + curr_file = url.str() + BM_FILENAMES[num_files] % (year) + LOGGER.info("Attempting to download file from %s", curr_file) path_check = download_file(curr_file, download_dir=dwnl_path) - break # leave loop if sucessful + break # leave loop if sucessful except ValueError as err: value_err = err - if path_check: # download succesful + if path_check: # download succesful continue if value_err: - raise ValueError("Download failed," - " check URLs inCONFIG.exposures.litpop.nightlights.nasa_sites!\n" - f" Last error message:\n {value_err.args[0]}") + raise ValueError( + "Download failed," + " check URLs inCONFIG.exposures.litpop.nightlights.nasa_sites!\n" + f" Last error message:\n {value_err.args[0]}" + ) else: - raise ValueError("Download failed, file not found and no nasa sites configured," - " check URLs in CONFIG.exposures.litpop.nightlights.nasa_sites!") + raise ValueError( + "Download failed, file not found and no nasa sites configured," + " check URLs in CONFIG.exposures.litpop.nightlights.nasa_sites!" + ) except Exception as exc: - raise RuntimeError('Download failed. Please check the network ' - 'connection and whether filenames are still valid.') from exc + raise RuntimeError( + "Download failed. Please check the network " + "connection and whether filenames are still valid." + ) from exc return dwnl_path + def load_nasa_nl_shape_single_tile(geometry, path, layer=0): """Read nightlight data from single NASA BlackMarble tile and crop to given shape. @@ -356,19 +407,26 @@ def load_nasa_nl_shape_single_tile(geometry, path, layer=0): rasterio meta """ # open tif source file with raterio: - with rasterio.open(path, 'r') as src: + with rasterio.open(path, "r") as src: # read cropped data from source file (src) to np.ndarray: out_image, transform = rasterio.mask.mask(src, [geometry], crop=True) - LOGGER.debug('Read cropped %s as np.ndarray.', path.name) + LOGGER.debug("Read cropped %s as np.ndarray.", path.name) if out_image.shape[0] < layer: - raise IndexError(f"{path.name} has only {out_image.shape[0]} layers," - f" layer {layer} can't be accessed.") + raise IndexError( + f"{path.name} has only {out_image.shape[0]} layers," + f" layer {layer} can't be accessed." + ) meta = src.meta - meta.update({"driver": "GTiff", - "height": out_image.shape[1], - "width": out_image.shape[2], - "transform": transform}) - return out_image[layer,:,:], meta + meta.update( + { + "driver": "GTiff", + "height": out_image.shape[1], + "width": out_image.shape[2], + "transform": transform, + } + ) + return out_image[layer, :, :], meta + def load_nightlight_nasa(bounds, req_files, year): """Get nightlight from NASA repository that contain input boundary. @@ -410,16 +468,16 @@ def load_nightlight_nasa(bounds, req_files, year): continue extent = np.int64(np.clip(extent, 0, tile_size[None] - 1)) # pylint: disable=unsubscriptable-object - im_nl, _ = read_bm_file(SYSTEM_DIR, fname %(year)) + im_nl, _ = read_bm_file(SYSTEM_DIR, fname % (year)) im_nl = np.flipud(im_nl) im_nl = sparse.csc.csc_matrix(im_nl) - im_nl = im_nl[extent[0, 0]:extent[1, 0] + 1, extent[0, 1]:extent[1, 1] + 1] + im_nl = im_nl[extent[0, 0] : extent[1, 0] + 1, extent[0, 1] : extent[1, 1] + 1] nightlight.append((tile_coord, im_nl)) tile_coords = np.array([n[0] for n in nightlight]) shape = tile_coords.max(axis=0) - tile_coords.min(axis=0) + 1 - nightlight = np.array([n[1] for n in nightlight]).reshape(shape, order='F') - nightlight = sparse.bmat(np.flipud(nightlight), format='csr') + nightlight = np.array([n[1] for n in nightlight]).reshape(shape, order="F") + nightlight = sparse.bmat(np.flipud(nightlight), format="csr") coord_nl = np.vstack([coord_min, coord_h]).T coord_nl[:, 0] += global_idx[0, :] * coord_h[:] @@ -447,13 +505,16 @@ def read_bm_file(bm_path, filename): Additional info from which coordinates can be calculated. """ path = Path(bm_path, filename) - LOGGER.debug('Importing%s.', path) + LOGGER.debug("Importing%s.", path) if not path.exists(): - raise FileNotFoundError('Invalid path: check that the path to BlackMarble file is correct.') + raise FileNotFoundError( + "Invalid path: check that the path to BlackMarble file is correct." + ) curr_file = gdal.Open(str(path)) arr1 = curr_file.GetRasterBand(1).ReadAsArray() return arr1, curr_file + def unzip_tif_to_py(file_gz): """Unzip image file, read it, flip the x axis, save values as pickle and remove tif. @@ -471,8 +532,8 @@ def unzip_tif_to_py(file_gz): """ LOGGER.info("Unzipping file %s.", file_gz) file_name = Path(Path(file_gz).stem) - with gzip.open(file_gz, 'rb') as f_in: - with file_name.open('wb') as f_out: + with gzip.open(file_gz, "rb") as f_in: + with file_name.open("wb") as f_out: shutil.copyfileobj(f_in, f_out) nightlight = sparse.csc_matrix(plt.imread(file_name)) # flip X axis @@ -484,6 +545,7 @@ def unzip_tif_to_py(file_gz): return file_name, nightlight + def untar_noaa_stable_nightlight(f_tar_ini): """Move input tar file to SYSTEM_DIR and extract stable light file. Returns absolute path of stable light file in format tif.gz. @@ -503,14 +565,22 @@ def untar_noaa_stable_nightlight(f_tar_ini): shutil.move(f_tar_ini, f_tar_dest) # extract stable_lights.avg_vis.tif with tarfile.open(f_tar_ini) as tar_file: - extract_name = [name for name in tar_file.getnames() - if name.endswith('stable_lights.avg_vis.tif.gz')] + extract_name = [ + name + for name in tar_file.getnames() + if name.endswith("stable_lights.avg_vis.tif.gz") + ] if len(extract_name) == 0: - raise ValueError('No stable light intensities for selected year and satellite ' - f'in file {f_tar_ini}') + raise ValueError( + "No stable light intensities for selected year and satellite " + f"in file {f_tar_ini}" + ) if len(extract_name) > 1: - LOGGER.warning('found more than one potential intensity file in %s %s', - f_tar_ini, extract_name) + LOGGER.warning( + "found more than one potential intensity file in %s %s", + f_tar_ini, + extract_name, + ) tar_file.extract(extract_name[0], SYSTEM_DIR) return SYSTEM_DIR.joinpath(extract_name[0]) @@ -536,15 +606,17 @@ def load_nightlight_noaa(ref_year=2013, sat_name=None): # NOAA's URL used to retrieve nightlight satellite images: noaa_url = CONFIG.exposures.litpop.nightlights.noaa_url.str() if sat_name is None: - fn_light = str(SYSTEM_DIR.joinpath('*' + - str(ref_year) + '*.stable_lights.avg_vis')) + fn_light = str( + SYSTEM_DIR.joinpath("*" + str(ref_year) + "*.stable_lights.avg_vis") + ) else: - fn_light = str(SYSTEM_DIR.joinpath(sat_name + - str(ref_year) + '*.stable_lights.avg_vis')) + fn_light = str( + SYSTEM_DIR.joinpath(sat_name + str(ref_year) + "*.stable_lights.avg_vis") + ) # check if file exists in SYSTEM_DIR, download if not if glob.glob(fn_light + ".p"): fn_light = sorted(glob.glob(fn_light + ".p"))[0] - with open(fn_light, 'rb') as f_nl: + with open(fn_light, "rb") as f_nl: nightlight = pickle.load(f_nl) elif glob.glob(fn_light + ".tif.gz"): fn_light = sorted(glob.glob(fn_light + ".tif.gz"))[0] @@ -554,22 +626,26 @@ def load_nightlight_noaa(ref_year=2013, sat_name=None): if sat_name is None: ini_pre, end_pre = 18, 9 for pre_i in np.arange(ini_pre, end_pre, -1): - url = noaa_url + 'F' + str(pre_i) + str(ref_year) + '.v4.tar' + url = noaa_url + "F" + str(pre_i) + str(ref_year) + ".v4.tar" try: file_down = download_file(url, download_dir=SYSTEM_DIR) break except ValueError: pass - if 'file_down' not in locals(): - raise ValueError(f'Nightlight for reference year {ref_year} not available. ' - 'Try a different year.') + if "file_down" not in locals(): + raise ValueError( + f"Nightlight for reference year {ref_year} not available. " + "Try a different year." + ) else: - url = noaa_url + sat_name + str(ref_year) + '.v4.tar' + url = noaa_url + sat_name + str(ref_year) + ".v4.tar" try: file_down = download_file(url, download_dir=SYSTEM_DIR) except ValueError as err: - raise ValueError(f'Nightlight intensities for year {ref_year} and satellite' - f' {sat_name} do not exist.') from err + raise ValueError( + f"Nightlight intensities for year {ref_year} and satellite" + f" {sat_name} do not exist." + ) from err fn_light = untar_noaa_stable_nightlight(file_down) fn_light, nightlight = unzip_tif_to_py(fn_light) diff --git a/climada/entity/exposures/test/test_base.py b/climada/entity/exposures/test/test_base.py index 867e39c29..6650719a5 100644 --- a/climada/entity/exposures/test/test_base.py +++ b/climada/entity/exposures/test/test_base.py @@ -18,40 +18,50 @@ Test Exposure base class. """ + import unittest + +import geopandas as gpd import numpy as np import pandas as pd -import geopandas as gpd -from sklearn.metrics import DistanceMetric import rasterio -from rasterio.windows import Window import scipy as sp +from rasterio.windows import Window +from sklearn.metrics import DistanceMetric +import climada.util.coordinates as u_coord from climada import CONFIG -from climada.entity.exposures.base import Exposures, INDICATOR_IMPF, \ - INDICATOR_CENTR, add_sea, DEF_REF_YEAR, DEF_VALUE_UNIT from climada.entity import LitPop -from climada.hazard.base import Hazard, Centroids -from climada.util.constants import ENT_TEMPLATE_XLS, ONE_LAT_KM, DEF_CRS, HAZ_DEMO_FL -import climada.util.coordinates as u_coord +from climada.entity.exposures.base import ( + DEF_REF_YEAR, + DEF_VALUE_UNIT, + INDICATOR_CENTR, + INDICATOR_IMPF, + Exposures, + add_sea, +) +from climada.hazard.base import Centroids, Hazard +from climada.util.constants import DEF_CRS, ENT_TEMPLATE_XLS, HAZ_DEMO_FL, ONE_LAT_KM DATA_DIR = CONFIG.exposures.test_data.dir() + def good_exposures(): """Followng values are defined for each exposure""" data = {} - data['latitude'] = np.array([1, 2, 3]) - data['longitude'] = np.array([2, 3, 4]) - data['value'] = np.array([1, 2, 3]) - data['deductible'] = np.array([1, 2, 3]) - data[INDICATOR_IMPF + 'NA'] = np.array([1, 2, 3]) - data['category_id'] = np.array([1, 2, 3]) - data['region_id'] = np.array([1, 2, 3]) - data[INDICATOR_CENTR + 'TC'] = np.array([1, 2, 3]) + data["latitude"] = np.array([1, 2, 3]) + data["longitude"] = np.array([2, 3, 4]) + data["value"] = np.array([1, 2, 3]) + data["deductible"] = np.array([1, 2, 3]) + data[INDICATOR_IMPF + "NA"] = np.array([1, 2, 3]) + data["category_id"] = np.array([1, 2, 3]) + data["region_id"] = np.array([1, 2, 3]) + data[INDICATOR_CENTR + "TC"] = np.array([1, 2, 3]) expo = Exposures(gpd.GeoDataFrame(data=data)) return expo + class TestFuncs(unittest.TestCase): """Check assign function""" @@ -59,16 +69,26 @@ def test_assign_pass(self): """Check that attribute `assigned` is correctly set.""" np_rand = np.random.RandomState(123456789) - haz = Hazard.from_raster([HAZ_DEMO_FL], haz_type='FL', window=Window(10, 20, 50, 60)) + haz = Hazard.from_raster( + [HAZ_DEMO_FL], haz_type="FL", window=Window(10, 20, 50, 60) + ) ncentroids = haz.centroids.size exp = Exposures(crs=haz.centroids.crs) # some are matching exactly, some are geographically close - exp.gdf['longitude'] = np.concatenate([ - haz.centroids.lon, haz.centroids.lon + 0.001 * (-0.5 + np_rand.rand(ncentroids))]) - exp.gdf['latitude'] = np.concatenate([ - haz.centroids.lat, haz.centroids.lat + 0.001 * (-0.5 + np_rand.rand(ncentroids))]) + exp.gdf["longitude"] = np.concatenate( + [ + haz.centroids.lon, + haz.centroids.lon + 0.001 * (-0.5 + np_rand.rand(ncentroids)), + ] + ) + exp.gdf["latitude"] = np.concatenate( + [ + haz.centroids.lat, + haz.centroids.lat + 0.001 * (-0.5 + np_rand.rand(ncentroids)), + ] + ) expected_result = np.concatenate([np.arange(ncentroids), np.arange(ncentroids)]) # make sure that it works for both float32 and float64 @@ -76,25 +96,30 @@ def test_assign_pass(self): haz.centroids.gdf["lat"] = haz.centroids.lat.astype(test_dtype) haz.centroids.gdf["lon"] = haz.centroids.lon.astype(test_dtype) exp.assign_centroids(haz) - self.assertEqual(exp.gdf.shape[0], len(exp.gdf[INDICATOR_CENTR + 'FL'])) - np.testing.assert_array_equal(exp.gdf[INDICATOR_CENTR + 'FL'].values, expected_result) + self.assertEqual(exp.gdf.shape[0], len(exp.gdf[INDICATOR_CENTR + "FL"])) + np.testing.assert_array_equal( + exp.gdf[INDICATOR_CENTR + "FL"].values, expected_result + ) exp.assign_centroids(Hazard(), overwrite=False) - self.assertEqual(exp.gdf.shape[0], len(exp.gdf[INDICATOR_CENTR + 'FL'])) - np.testing.assert_array_equal(exp.gdf[INDICATOR_CENTR + 'FL'].values, expected_result) + self.assertEqual(exp.gdf.shape[0], len(exp.gdf[INDICATOR_CENTR + "FL"])) + np.testing.assert_array_equal( + exp.gdf[INDICATOR_CENTR + "FL"].values, expected_result + ) def test__init__meta_type(self): - """ Check if meta of type list raises a ValueError in __init__""" + """Check if meta of type list raises a ValueError in __init__""" with self.assertRaises(ValueError) as cm: Exposures(meta=[]) - self.assertEqual("meta must be a dictionary", - str(cm.exception)) + self.assertEqual("meta must be a dictionary", str(cm.exception)) def test__init__geometry_type(self): """Check that initialization fails when `geometry` is given as a `str` argument""" with self.assertRaises(ValueError) as cm: - Exposures(geometry='myname') - self.assertEqual("Exposures is not able to handle customized 'geometry' column names.", - str(cm.exception)) + Exposures(geometry="myname") + self.assertEqual( + "Exposures is not able to handle customized 'geometry' column names.", + str(cm.exception), + ) def test__init__mda_in_kwargs(self): """Check if `_metadata` attributes are instantiated correctly for sub-classes of @@ -109,66 +134,134 @@ def test_read_raster_pass(self): exp = Exposures.from_raster(HAZ_DEMO_FL, window=Window(10, 20, 50, 60)) exp.check() self.assertTrue(u_coord.equal_crs(exp.crs, DEF_CRS)) - self.assertAlmostEqual(exp.gdf['latitude'].max(), - 10.248220966978932 - 0.009000000000000341 / 2) - self.assertAlmostEqual(exp.gdf['latitude'].min(), - 10.248220966978932 - 0.009000000000000341 - / 2 - 59 * 0.009000000000000341) - self.assertAlmostEqual(exp.gdf['longitude'].min(), - -69.2471495969998 + 0.009000000000000341 / 2) - self.assertAlmostEqual(exp.gdf['longitude'].max(), - -69.2471495969998 + 0.009000000000000341 - / 2 + 49 * 0.009000000000000341) + self.assertAlmostEqual( + exp.gdf["latitude"].max(), 10.248220966978932 - 0.009000000000000341 / 2 + ) + self.assertAlmostEqual( + exp.gdf["latitude"].min(), + 10.248220966978932 - 0.009000000000000341 / 2 - 59 * 0.009000000000000341, + ) + self.assertAlmostEqual( + exp.gdf["longitude"].min(), -69.2471495969998 + 0.009000000000000341 / 2 + ) + self.assertAlmostEqual( + exp.gdf["longitude"].max(), + -69.2471495969998 + 0.009000000000000341 / 2 + 49 * 0.009000000000000341, + ) self.assertEqual(len(exp.gdf), 60 * 50) - self.assertAlmostEqual(exp.gdf['value'].values.reshape((60, 50))[25, 12], 0.056825936) + self.assertAlmostEqual( + exp.gdf["value"].values.reshape((60, 50))[25, 12], 0.056825936 + ) def test_assign_raster_pass(self): """Test assign_centroids with raster hazard""" # explicit, easy-to-understand raster centroids for hazard meta = { - 'count': 1, 'crs': DEF_CRS, - 'width': 20, 'height': 10, - 'transform': rasterio.Affine(1.5, 0.0, -20, 0.0, -1.4, 8) + "count": 1, + "crs": DEF_CRS, + "width": 20, + "height": 10, + "transform": rasterio.Affine(1.5, 0.0, -20, 0.0, -1.4, 8), } - haz = Hazard('FL', centroids=Centroids.from_meta(meta)) + haz = Hazard("FL", centroids=Centroids.from_meta(meta)) # explicit points with known results (see `expected_result` for details) exp = Exposures(crs=DEF_CRS) - exp.gdf['longitude'] = np.array([ - -20.1, -20.0, -19.8, -19.0, -18.6, -18.4, - -19.0, -19.0, -19.0, -19.0, - -20.1, 0.0, 10.1, 10.1, 10.1, 0.0, -20.2, -20.3, - -6.4, 9.8, 0.0, - ]) - exp.gdf['latitude'] = np.array([ - 7.3, 7.3, 7.3, 7.3, 7.3, 7.3, - 8.1, 7.9, 6.7, 6.5, - 8.1, 8.2, 8.3, 0.0, -6.1, -6.2, -6.3, 0.0, - -1.9, -1.7, 0.0, - ]) + exp.gdf["longitude"] = np.array( + [ + -20.1, + -20.0, + -19.8, + -19.0, + -18.6, + -18.4, + -19.0, + -19.0, + -19.0, + -19.0, + -20.1, + 0.0, + 10.1, + 10.1, + 10.1, + 0.0, + -20.2, + -20.3, + -6.4, + 9.8, + 0.0, + ] + ) + exp.gdf["latitude"] = np.array( + [ + 7.3, + 7.3, + 7.3, + 7.3, + 7.3, + 7.3, + 8.1, + 7.9, + 6.7, + 6.5, + 8.1, + 8.2, + 8.3, + 0.0, + -6.1, + -6.2, + -6.3, + 0.0, + -1.9, + -1.7, + 0.0, + ] + ) exp.assign_centroids(haz) expected_result = [ # constant y-value, varying x-value - 0, 0, 0, 0, 0, 1, + 0, + 0, + 0, + 0, + 0, + 1, # constant x-value, varying y-value - 0, 0, 0, 20, + 0, + 0, + 0, + 20, # out of bounds: topleft, top, topright, right, bottomright, bottom, bottomleft, left - -1, -1, -1, -1, -1, -1, -1, -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, # some explicit points within the raster - 149, 139, 113, + 149, + 139, + 113, ] - np.testing.assert_array_equal(exp.gdf[INDICATOR_CENTR + 'FL'].values, expected_result) - + np.testing.assert_array_equal( + exp.gdf[INDICATOR_CENTR + "FL"].values, expected_result + ) def test_assign_raster_same_pass(self): """Test assign_centroids with raster hazard""" exp = Exposures.from_raster(HAZ_DEMO_FL, window=Window(10, 20, 50, 60)) exp.check() - haz = Hazard.from_raster([HAZ_DEMO_FL], haz_type='FL', window=Window(10, 20, 50, 60)) + haz = Hazard.from_raster( + [HAZ_DEMO_FL], haz_type="FL", window=Window(10, 20, 50, 60) + ) exp.assign_centroids(haz) - np.testing.assert_array_equal(exp.gdf[INDICATOR_CENTR + 'FL'].values, - np.arange(haz.centroids.size, dtype=int)) + np.testing.assert_array_equal( + exp.gdf[INDICATOR_CENTR + "FL"].values, + np.arange(haz.centroids.size, dtype=int), + ) # Test fails because exposures stores the crs in the meta attribute as rasterio object, # while the centroids stores the crs in the geodataframe, which is not a rasterio object. @@ -176,14 +269,20 @@ def test_assign_raster_same_pass(self): def test_assign_large_hazard_subset_pass(self): """Test assign_centroids with raster hazard""" exp = Exposures.from_raster(HAZ_DEMO_FL, window=Window(10, 20, 50, 60)) - exp.gdf['latitude'][[0, 1]] = exp.gdf['latitude'][[1, 0]] - exp.gdf['longitude'][[0, 1]] = exp.gdf['longitude'][[1, 0]] + exp.gdf["latitude"][[0, 1]] = exp.gdf["latitude"][[1, 0]] + exp.gdf["longitude"][[0, 1]] = exp.gdf["longitude"][[1, 0]] exp.check() - haz = Hazard.from_raster([HAZ_DEMO_FL], haz_type='FL') + haz = Hazard.from_raster([HAZ_DEMO_FL], haz_type="FL") exp.assign_centroids(haz) - assigned_centroids = haz.centroids.select(sel_cen=exp.gdf[INDICATOR_CENTR + 'FL'].values) - np.testing.assert_array_equal(np.unique(assigned_centroids.lat), np.unique(exp.gdf['latitude'])) - np.testing.assert_array_equal(np.unique(assigned_centroids.lon), np.unique(exp.gdf['longitude'])) + assigned_centroids = haz.centroids.select( + sel_cen=exp.gdf[INDICATOR_CENTR + "FL"].values + ) + np.testing.assert_array_equal( + np.unique(assigned_centroids.lat), np.unique(exp.gdf["latitude"]) + ) + np.testing.assert_array_equal( + np.unique(assigned_centroids.lon), np.unique(exp.gdf["longitude"]) + ) def test_affected_total_value(self): haz_type = "RF" @@ -206,15 +305,15 @@ def test_affected_total_value(self): tot_val = exp.affected_total_value( haz, threshold_affected=0, overwrite_assigned_centroids=False ) - self.assertEqual(tot_val, np.sum(exp.gdf['value'][[1, 2, 3, 5]])) + self.assertEqual(tot_val, np.sum(exp.gdf["value"][[1, 2, 3, 5]])) tot_val = exp.affected_total_value( haz, threshold_affected=3, overwrite_assigned_centroids=False ) - self.assertEqual(tot_val, np.sum(exp.gdf['value'][[3]])) + self.assertEqual(tot_val, np.sum(exp.gdf["value"][[3]])) tot_val = exp.affected_total_value( haz, threshold_affected=-2, overwrite_assigned_centroids=False ) - self.assertEqual(tot_val, np.sum(exp.gdf['value'][[0, 1, 2, 3, 5]])) + self.assertEqual(tot_val, np.sum(exp.gdf["value"][[0, 1, 2, 3, 5]])) tot_val = exp.affected_total_value( haz, threshold_affected=11, overwrite_assigned_centroids=False ) @@ -234,17 +333,18 @@ def test_affected_total_value(self): ) self.assertEqual(tot_val, 4) + class TestChecker(unittest.TestCase): """Test logs of check function""" def test_error_logs_fail(self): """Wrong exposures definition""" expo = good_exposures() - expo.gdf.drop(['longitude'], inplace=True, axis=1) + expo.gdf.drop(["longitude"], inplace=True, axis=1) with self.assertRaises(ValueError) as cm: expo.check() - self.assertIn('longitude missing', str(cm.exception)) + self.assertIn("longitude missing", str(cm.exception)) def test_error_logs_wrong_crs(self): """Ambiguous crs definition""" @@ -252,39 +352,49 @@ def test_error_logs_wrong_crs(self): expo.set_geometry_points() # sets crs to 4326 # all good - _expo = Exposures(expo.gdf, meta={'crs':4326}, crs=DEF_CRS) + _expo = Exposures(expo.gdf, meta={"crs": 4326}, crs=DEF_CRS) with self.assertRaises(ValueError) as cm: - _expo = Exposures(expo.gdf, meta={'crs':4230}, crs=4326) - self.assertIn("Inconsistent CRS definition, crs and meta arguments don't match", - str(cm.exception)) + _expo = Exposures(expo.gdf, meta={"crs": 4230}, crs=4326) + self.assertIn( + "Inconsistent CRS definition, crs and meta arguments don't match", + str(cm.exception), + ) with self.assertRaises(ValueError) as cm: - _expo = Exposures(expo.gdf, meta={'crs':4230}) - self.assertIn("Inconsistent CRS definition, data doesn't match meta or crs argument", - str(cm.exception)) + _expo = Exposures(expo.gdf, meta={"crs": 4230}) + self.assertIn( + "Inconsistent CRS definition, data doesn't match meta or crs argument", + str(cm.exception), + ) with self.assertRaises(ValueError) as cm: - _expo = Exposures(expo.gdf, crs='epsg:4230') - self.assertIn("Inconsistent CRS definition, data doesn't match meta or crs argument", - str(cm.exception)) + _expo = Exposures(expo.gdf, crs="epsg:4230") + self.assertIn( + "Inconsistent CRS definition, data doesn't match meta or crs argument", + str(cm.exception), + ) _expo = Exposures(expo.gdf) - _expo.meta['crs'] = 'epsg:4230' + _expo.meta["crs"] = "epsg:4230" with self.assertRaises(ValueError) as cm: _expo.check() - self.assertIn("Inconsistent CRS definition, gdf (EPSG:4326) attribute doesn't match " - "meta (epsg:4230) attribute.", str(cm.exception)) + self.assertIn( + "Inconsistent CRS definition, gdf (EPSG:4326) attribute doesn't match " + "meta (epsg:4230) attribute.", + str(cm.exception), + ) def test_error_geometry_fail(self): """Wrong exposures definition""" expo = good_exposures() expo.set_geometry_points() - expo.gdf['latitude'].values[0] = 5 + expo.gdf["latitude"].values[0] = 5 with self.assertRaises(ValueError): expo.check() + class TestIO(unittest.TestCase): """Check constructor Exposures through DataFrames readers""" @@ -294,7 +404,7 @@ def test_read_template_pass(self): exp_df = Exposures(df) # set metadata exp_df.ref_year = 2020 - exp_df.value_unit = 'XSD' + exp_df.value_unit = "XSD" exp_df.check() def test_io_hdf5_pass(self): @@ -304,13 +414,14 @@ def test_io_hdf5_pass(self): exp_df.check() # set metadata exp_df.ref_year = 2020 - exp_df.value_unit = 'XSD' + exp_df.value_unit = "XSD" - file_name = DATA_DIR.joinpath('test_hdf5_exp.h5') + file_name = DATA_DIR.joinpath("test_hdf5_exp.h5") # pd.errors.PerformanceWarning should be suppressed. Therefore, make sure that # PerformanceWarning would result in test failure here import warnings + with warnings.catch_warnings(): warnings.simplefilter("error", category=pd.errors.PerformanceWarning) exp_df.write_hdf5(file_name) @@ -323,36 +434,62 @@ def test_io_hdf5_pass(self): self.assertTrue(u_coord.equal_crs(exp_df.crs, exp_read.crs)) self.assertTrue(u_coord.equal_crs(exp_df.gdf.crs, exp_read.gdf.crs)) self.assertEqual(exp_df.description, exp_read.description) - np.testing.assert_array_equal(exp_df.gdf['latitude'].values, exp_read.gdf['latitude'].values) - np.testing.assert_array_equal(exp_df.gdf['longitude'].values, exp_read.gdf['longitude'].values) - np.testing.assert_array_equal(exp_df.gdf['value'].values, exp_read.gdf['value'].values) - np.testing.assert_array_equal(exp_df.gdf['deductible'].values, exp_read.gdf['deductible'].values) - np.testing.assert_array_equal(exp_df.gdf['cover'].values, exp_read.gdf['cover'].values) - np.testing.assert_array_equal(exp_df.gdf['region_id'].values, exp_read.gdf['region_id'].values) - np.testing.assert_array_equal(exp_df.gdf['category_id'].values, exp_read.gdf['category_id'].values) - np.testing.assert_array_equal(exp_df.gdf['impf_TC'].values, exp_read.gdf['impf_TC'].values) - np.testing.assert_array_equal(exp_df.gdf['centr_TC'].values, exp_read.gdf['centr_TC'].values) - np.testing.assert_array_equal(exp_df.gdf['impf_FL'].values, exp_read.gdf['impf_FL'].values) - np.testing.assert_array_equal(exp_df.gdf['centr_FL'].values, exp_read.gdf['centr_FL'].values) - - for point_df, point_read in zip(exp_df.gdf.geometry.values, exp_read.gdf.geometry.values): + np.testing.assert_array_equal( + exp_df.gdf["latitude"].values, exp_read.gdf["latitude"].values + ) + np.testing.assert_array_equal( + exp_df.gdf["longitude"].values, exp_read.gdf["longitude"].values + ) + np.testing.assert_array_equal( + exp_df.gdf["value"].values, exp_read.gdf["value"].values + ) + np.testing.assert_array_equal( + exp_df.gdf["deductible"].values, exp_read.gdf["deductible"].values + ) + np.testing.assert_array_equal( + exp_df.gdf["cover"].values, exp_read.gdf["cover"].values + ) + np.testing.assert_array_equal( + exp_df.gdf["region_id"].values, exp_read.gdf["region_id"].values + ) + np.testing.assert_array_equal( + exp_df.gdf["category_id"].values, exp_read.gdf["category_id"].values + ) + np.testing.assert_array_equal( + exp_df.gdf["impf_TC"].values, exp_read.gdf["impf_TC"].values + ) + np.testing.assert_array_equal( + exp_df.gdf["centr_TC"].values, exp_read.gdf["centr_TC"].values + ) + np.testing.assert_array_equal( + exp_df.gdf["impf_FL"].values, exp_read.gdf["impf_FL"].values + ) + np.testing.assert_array_equal( + exp_df.gdf["centr_FL"].values, exp_read.gdf["centr_FL"].values + ) + + for point_df, point_read in zip( + exp_df.gdf.geometry.values, exp_read.gdf.geometry.values + ): self.assertEqual(point_df.x, point_read.x) self.assertEqual(point_df.y, point_read.y) + class TestAddSea(unittest.TestCase): """Check constructor Exposures through DataFrames readers""" + def test_add_sea_pass(self): """Test add_sea function with fake data.""" exp = Exposures() - exp.gdf['value'] = np.arange(0, 1.0e6, 1.0e5) + exp.gdf["value"] = np.arange(0, 1.0e6, 1.0e5) min_lat, max_lat = 27.5, 30 min_lon, max_lon = -18, -12 - exp.gdf['latitude'] = np.linspace(min_lat, max_lat, 10) - exp.gdf['longitude'] = np.linspace(min_lon, max_lon, 10) - exp.gdf['region_id'] = np.ones(10) - exp.gdf['impf_TC'] = np.ones(10) + exp.gdf["latitude"] = np.linspace(min_lat, max_lat, 10) + exp.gdf["longitude"] = np.linspace(min_lon, max_lon, 10) + exp.gdf["region_id"] = np.ones(10) + exp.gdf["impf_TC"] = np.ones(10) exp.ref_year = 2015 - exp.value_unit = 'XSD' + exp.value_unit = "XSD" exp.check() sea_coast = 100 @@ -368,38 +505,52 @@ def test_add_sea_pass(self): max_lat = max_lat + sea_coast min_lon = min_lon - sea_coast max_lon = max_lon + sea_coast - self.assertEqual(np.min(exp_sea.gdf['latitude']), min_lat) - self.assertEqual(np.min(exp_sea.gdf['longitude']), min_lon) - np.testing.assert_array_equal(exp_sea.gdf.value.values[:10], np.arange(0, 1.0e6, 1.0e5)) + self.assertEqual(np.min(exp_sea.gdf["latitude"]), min_lat) + self.assertEqual(np.min(exp_sea.gdf["longitude"]), min_lon) + np.testing.assert_array_equal( + exp_sea.gdf.value.values[:10], np.arange(0, 1.0e6, 1.0e5) + ) self.assertEqual(exp_sea.ref_year, exp.ref_year) self.assertEqual(exp_sea.value_unit, exp.value_unit) - on_sea_lat = exp_sea.gdf['latitude'].values[11:] - on_sea_lon = exp_sea.gdf['longitude'].values[11:] + on_sea_lat = exp_sea.gdf["latitude"].values[11:] + on_sea_lon = exp_sea.gdf["longitude"].values[11:] res_on_sea = u_coord.coord_on_land(on_sea_lat, on_sea_lon) res_on_sea = ~res_on_sea self.assertTrue(np.all(res_on_sea)) - dist = DistanceMetric.get_metric('haversine') - self.assertAlmostEqual(dist.pairwise([ - [exp_sea.gdf['longitude'].values[-1], exp_sea.gdf['latitude'].values[-1]], - [exp_sea.gdf['longitude'].values[-2], exp_sea.gdf['latitude'].values[-2]], - ])[0][1], sea_res_km) + dist = DistanceMetric.get_metric("haversine") + self.assertAlmostEqual( + dist.pairwise( + [ + [ + exp_sea.gdf["longitude"].values[-1], + exp_sea.gdf["latitude"].values[-1], + ], + [ + exp_sea.gdf["longitude"].values[-2], + exp_sea.gdf["latitude"].values[-2], + ], + ] + )[0][1], + sea_res_km, + ) class TestConcat(unittest.TestCase): """Check constructor Exposures through DataFrames readers""" + def setUp(self): - exp = Exposures(crs='epsg:3395') - exp.gdf['value'] = np.arange(0, 1.0e6, 1.0e5) + exp = Exposures(crs="epsg:3395") + exp.gdf["value"] = np.arange(0, 1.0e6, 1.0e5) min_lat, max_lat = 27.5, 30 min_lon, max_lon = -18, -12 - exp.gdf['latitude'] = np.linspace(min_lat, max_lat, 10) - exp.gdf['longitude'] = np.linspace(min_lon, max_lon, 10) - exp.gdf['region_id'] = np.ones(10) - exp.gdf['impf_TC'] = np.ones(10) + exp.gdf["latitude"] = np.linspace(min_lat, max_lat, 10) + exp.gdf["longitude"] = np.linspace(min_lon, max_lon, 10) + exp.gdf["region_id"] = np.ones(10) + exp.gdf["impf_TC"] = np.ones(10) exp.ref_year = 2015 - exp.value_unit = 'XSD' + exp.value_unit = "XSD" self.dummy = exp def test_concat_pass(self): @@ -407,20 +558,30 @@ def test_concat_pass(self): self.dummy.check() - catexp = Exposures.concat([self.dummy, self.dummy.gdf, pd.DataFrame(self.dummy.gdf.values, columns=self.dummy.gdf.columns), self.dummy]) - self.assertEqual(self.dummy.gdf.shape, (10,5)) - self.assertEqual(catexp.gdf.shape, (40,5)) - self.assertTrue(u_coord.equal_crs(catexp.crs, 'epsg:3395')) + catexp = Exposures.concat( + [ + self.dummy, + self.dummy.gdf, + pd.DataFrame(self.dummy.gdf.values, columns=self.dummy.gdf.columns), + self.dummy, + ] + ) + self.assertEqual(self.dummy.gdf.shape, (10, 5)) + self.assertEqual(catexp.gdf.shape, (40, 5)) + self.assertTrue(u_coord.equal_crs(catexp.crs, "epsg:3395")) def test_concat_fail(self): """Test failing concat function with fake data.""" with self.assertRaises(TypeError): - Exposures.concat([self.dummy, self.dummy.gdf, self.dummy.gdf.values, self.dummy]) + Exposures.concat( + [self.dummy, self.dummy.gdf, self.dummy.gdf.values, self.dummy] + ) class TestGeoDFFuncs(unittest.TestCase): """Check constructor Exposures through DataFrames readers""" + def test_copy_pass(self): """Test copy function.""" exp = good_exposures() @@ -431,17 +592,21 @@ def test_copy_pass(self): self.assertEqual(exp_copy.ref_year, exp.ref_year) self.assertEqual(exp_copy.value_unit, exp.value_unit) self.assertEqual(exp_copy.description, exp.description) - np.testing.assert_array_equal(exp_copy.gdf['latitude'].values, exp.gdf['latitude'].values) - np.testing.assert_array_equal(exp_copy.gdf['longitude'].values, exp.gdf['longitude'].values) + np.testing.assert_array_equal( + exp_copy.gdf["latitude"].values, exp.gdf["latitude"].values + ) + np.testing.assert_array_equal( + exp_copy.gdf["longitude"].values, exp.gdf["longitude"].values + ) def test_to_crs_inplace_pass(self): """Test to_crs function inplace.""" exp = good_exposures() exp.set_geometry_points() exp.check() - exp.to_crs('epsg:3395', inplace=True) + exp.to_crs("epsg:3395", inplace=True) self.assertIsInstance(exp, Exposures) - self.assertTrue(u_coord.equal_crs(exp.crs, 'epsg:3395')) + self.assertTrue(u_coord.equal_crs(exp.crs, "epsg:3395")) self.assertEqual(exp.ref_year, DEF_REF_YEAR) self.assertEqual(exp.value_unit, DEF_VALUE_UNIT) self.assertEqual(exp.description, None) @@ -451,10 +616,10 @@ def test_to_crs_pass(self): exp = good_exposures() exp.set_geometry_points() exp.check() - exp_tr = exp.to_crs('epsg:3395') + exp_tr = exp.to_crs("epsg:3395") self.assertIsInstance(exp, Exposures) self.assertTrue(u_coord.equal_crs(exp.crs, DEF_CRS)) - self.assertTrue(u_coord.equal_crs(exp_tr.crs, 'epsg:3395')) + self.assertTrue(u_coord.equal_crs(exp_tr.crs, "epsg:3395")) self.assertEqual(exp_tr.ref_year, DEF_REF_YEAR) self.assertEqual(exp_tr.value_unit, DEF_VALUE_UNIT) self.assertEqual(exp_tr.description, None) @@ -462,17 +627,17 @@ def test_to_crs_pass(self): def test_constructor_pass(self): """Test initialization with input GeoDataFrame""" in_gpd = gpd.GeoDataFrame() - in_gpd['value'] = np.zeros(10) + in_gpd["value"] = np.zeros(10) in_gpd.ref_year = 2015 in_exp = Exposures(in_gpd, ref_year=2015) self.assertEqual(in_exp.ref_year, 2015) - np.testing.assert_array_equal(in_exp.gdf['value'], np.zeros(10)) + np.testing.assert_array_equal(in_exp.gdf["value"], np.zeros(10)) def test_error_on_access_item(self): """Test error output when trying to access items as in CLIMADA 1.x""" expo = good_exposures() with self.assertRaises(TypeError) as err: - expo['value'] = 3 + expo["value"] = 3 self.assertIn("CLIMADA 2", str(err.exception)) self.assertIn("gdf", str(err.exception)) @@ -481,7 +646,7 @@ def test_set_gdf(self): empty_gdf = gpd.GeoDataFrame() gdf_without_geometry = good_exposures().gdf good_exp = good_exposures() - good_exp.set_crs(crs='epsg:3395') + good_exp.set_crs(crs="epsg:3395") good_exp.set_geometry_points() gdf_with_geometry = good_exp.gdf @@ -495,8 +660,8 @@ def test_set_gdf(self): probe.set_gdf(gdf_with_geometry) self.assertTrue(probe.gdf.equals(gdf_with_geometry)) - self.assertTrue(u_coord.equal_crs('epsg:3395', probe.crs)) - self.assertTrue(u_coord.equal_crs('epsg:3395', probe.gdf.crs)) + self.assertTrue(u_coord.equal_crs("epsg:3395", probe.crs)) + self.assertTrue(u_coord.equal_crs("epsg:3395", probe.gdf.crs)) probe.set_gdf(gdf_without_geometry) self.assertTrue(probe.gdf.equals(good_exposures().gdf)) @@ -513,61 +678,63 @@ def test_set_crs(self): probe = Exposures(gdf_without_geometry) self.assertTrue(u_coord.equal_crs(DEF_CRS, probe.crs)) - probe.set_crs('epsg:3395') - self.assertTrue(u_coord.equal_crs('epsg:3395', probe.crs)) + probe.set_crs("epsg:3395") + self.assertTrue(u_coord.equal_crs("epsg:3395", probe.crs)) probe = Exposures(gdf_with_geometry) self.assertTrue(u_coord.equal_crs(DEF_CRS, probe.crs)) probe.set_crs(DEF_CRS) self.assertTrue(u_coord.equal_crs(DEF_CRS, probe.crs)) - self.assertRaises(ValueError, probe.set_crs, 'epsg:3395') - self.assertTrue(u_coord.equal_crs('EPSG:4326', probe.meta.get('crs'))) + self.assertRaises(ValueError, probe.set_crs, "epsg:3395") + self.assertTrue(u_coord.equal_crs("EPSG:4326", probe.meta.get("crs"))) def test_to_crs_epsg_crs(self): - """ Check that if crs and epsg are both provided a ValueError is raised""" + """Check that if crs and epsg are both provided a ValueError is raised""" with self.assertRaises(ValueError) as cm: - Exposures.to_crs(self, crs='GCS', epsg=26915) + Exposures.to_crs(self, crs="GCS", epsg=26915) self.assertEqual("one of crs or epsg must be None", str(cm.exception)) + class TestImpactFunctions(unittest.TestCase): """Test impact function handling""" + def test_get_impf_column(self): """Test the get_impf_column""" expo = good_exposures() # impf column is 'impf_NA' - self.assertEqual('impf_NA', expo.get_impf_column('NA')) + self.assertEqual("impf_NA", expo.get_impf_column("NA")) self.assertRaises(ValueError, expo.get_impf_column) - self.assertRaises(ValueError, expo.get_impf_column, 'HAZ') + self.assertRaises(ValueError, expo.get_impf_column, "HAZ") # removed impf column - expo.gdf.drop(columns='impf_NA', inplace=True) - self.assertRaises(ValueError, expo.get_impf_column, 'NA') + expo.gdf.drop(columns="impf_NA", inplace=True) + self.assertRaises(ValueError, expo.get_impf_column, "NA") self.assertRaises(ValueError, expo.get_impf_column) # default (anonymous) impf column expo.check() - self.assertEqual('impf_', expo.get_impf_column()) - self.assertEqual('impf_', expo.get_impf_column('HAZ')) + self.assertEqual("impf_", expo.get_impf_column()) + self.assertEqual("impf_", expo.get_impf_column("HAZ")) # rename impf column to old style column name - expo.gdf.rename(columns={'impf_': 'if_'}, inplace=True) + expo.gdf.rename(columns={"impf_": "if_"}, inplace=True) expo.check() - self.assertEqual('if_', expo.get_impf_column()) - self.assertEqual('if_', expo.get_impf_column('HAZ')) + self.assertEqual("if_", expo.get_impf_column()) + self.assertEqual("if_", expo.get_impf_column("HAZ")) # rename impf column to old style column name - expo.gdf.rename(columns={'if_': 'if_NA'}, inplace=True) + expo.gdf.rename(columns={"if_": "if_NA"}, inplace=True) expo.check() - self.assertEqual('if_NA', expo.get_impf_column('NA')) + self.assertEqual("if_NA", expo.get_impf_column("NA")) self.assertRaises(ValueError, expo.get_impf_column) - self.assertRaises(ValueError, expo.get_impf_column, 'HAZ') + self.assertRaises(ValueError, expo.get_impf_column, "HAZ") # add anonymous impf column - expo.gdf['impf_'] = expo.gdf['region_id'] - self.assertEqual('if_NA', expo.get_impf_column('NA')) - self.assertEqual('impf_', expo.get_impf_column()) - self.assertEqual('impf_', expo.get_impf_column('HAZ')) + expo.gdf["impf_"] = expo.gdf["region_id"] + self.assertEqual("if_NA", expo.get_impf_column("NA")) + self.assertEqual("impf_", expo.get_impf_column()) + self.assertEqual("impf_", expo.get_impf_column("HAZ")) # Execute Tests diff --git a/climada/entity/exposures/test/test_litpop.py b/climada/entity/exposures/test/test_litpop.py index d8ec001cd..72360bc8d 100644 --- a/climada/entity/exposures/test/test_litpop.py +++ b/climada/entity/exposures/test/test_litpop.py @@ -19,10 +19,12 @@ Unit Tests for LitPop class. """ -import numpy as np import unittest -from rasterio.crs import CRS + +import numpy as np from rasterio import Affine +from rasterio.crs import CRS + from climada.entity.exposures.litpop import litpop as lp @@ -30,73 +32,86 @@ def data_arrays_demo(number_of_arrays=2): """init demo data arrays (2d) for LitPop core calculations""" data_arrays = list() if number_of_arrays > 0: - data_arrays.append(np.array([[0,1,2], [3,4,5]])) + data_arrays.append(np.array([[0, 1, 2], [3, 4, 5]])) # array([[0, 1, 2], # [3, 4, 5]]) if number_of_arrays > 1: - data_arrays.append(np.array([[10,10,10], [1,1,1]])) + data_arrays.append(np.array([[10, 10, 10], [1, 1, 1]])) # array([[10, 10, 10], # [1, 1, 1]]) if number_of_arrays > 2: - data_arrays.append(np.array([[0,1,10], [0,1,10]])) + data_arrays.append(np.array([[0, 1, 10], [0, 1, 10]])) # array([[0, 1, 10], # [0, 1, 10]]) if number_of_arrays > 3: - data_arrays.append([[0,1,10,100], [0,1,10,100]]) + data_arrays.append([[0, 1, 10, 100], [0, 1, 10, 100]]) # [[0, 1, 10, 100], # [0, 1, 10, 100]] return data_arrays + def data_arrays_resampling_demo(): """init demo data arrays (2d) and meta data for resampling""" data_arrays = list() # demo pop: - data_arrays.append(np.array([[0,1,2], [3,4,5]], dtype='float32')) - data_arrays.append(np.array([[0,1,2], [3,4,5]], dtype='float32')) - # array([[0, 1, 2], - # [3, 4, 5]]) + data_arrays.append(np.array([[0, 1, 2], [3, 4, 5]], dtype="float32")) + data_arrays.append(np.array([[0, 1, 2], [3, 4, 5]], dtype="float32")) + # array([[0, 1, 2], + # [3, 4, 5]]) # demo nightlight: - data_arrays.append(np.array([[2,10,0, 0, 0, 0], [10,2,10, 0, 0, 0], - [0,0,0, 0, 1, 1], [1,0,0, 0, 1, 1]], - dtype='float32')) - # array([[ 2., 10., 0., 0., 0., 0.], - # [10., 2., 10., 0., 0., 0.], - # [ 0., 0., 0., 0., 1., 1.], - # [ 1., 0., 0., 0., 1., 1.]], dtype=float32)] - - meta_list = [{'driver': 'GTiff', - 'dtype': 'float32', - 'nodata': -3.4028230607370965e+38, - 'width': 3, - 'height': 2, - 'count': 1, - 'crs': CRS.from_epsg(4326), - #'crs': CRS.from_epsg(4326), - 'transform': Affine(1, 0.0, -10, - 0.0, -1, 40), - }, - {'driver': 'GTiff', - 'dtype': 'float32', - 'nodata': -3.4028230607370965e+38, - 'width': 3, - 'height': 2, - 'count': 1, - 'crs': CRS.from_epsg(4326), - #'crs': CRS.from_epsg(4326), - 'transform': Affine(1, 0.0, -10, - 0.0, -1, 41), # shifted by 1 degree latitude to the north - }, - {'driver': 'GTiff', - 'dtype': 'float32', - 'nodata': None, - 'width': 6, - 'height': 4, - 'count': 1, - 'crs': CRS.from_epsg(4326), - # 'crs': CRS.from_epsg(32662), - 'transform': Affine(.5, 0.0, -10, - 0.0, -.5, 40), # higher resolution - }] + data_arrays.append( + np.array( + [ + [2, 10, 0, 0, 0, 0], + [10, 2, 10, 0, 0, 0], + [0, 0, 0, 0, 1, 1], + [1, 0, 0, 0, 1, 1], + ], + dtype="float32", + ) + ) + # array([[ 2., 10., 0., 0., 0., 0.], + # [10., 2., 10., 0., 0., 0.], + # [ 0., 0., 0., 0., 1., 1.], + # [ 1., 0., 0., 0., 1., 1.]], dtype=float32)] + + meta_list = [ + { + "driver": "GTiff", + "dtype": "float32", + "nodata": -3.4028230607370965e38, + "width": 3, + "height": 2, + "count": 1, + "crs": CRS.from_epsg(4326), + #'crs': CRS.from_epsg(4326), + "transform": Affine(1, 0.0, -10, 0.0, -1, 40), + }, + { + "driver": "GTiff", + "dtype": "float32", + "nodata": -3.4028230607370965e38, + "width": 3, + "height": 2, + "count": 1, + "crs": CRS.from_epsg(4326), + #'crs': CRS.from_epsg(4326), + "transform": Affine( + 1, 0.0, -10, 0.0, -1, 41 + ), # shifted by 1 degree latitude to the north + }, + { + "driver": "GTiff", + "dtype": "float32", + "nodata": None, + "width": 6, + "height": 4, + "count": 1, + "crs": CRS.from_epsg(4326), + # 'crs': CRS.from_epsg(32662), + "transform": Affine(0.5, 0.0, -10, 0.0, -0.5, 40), # higher resolution + }, + ] return data_arrays, meta_list @@ -108,29 +123,36 @@ def test_reproject_input_data_downsample(self): (default resampling for LitPop)""" data_in, meta_list = data_arrays_resampling_demo() # - data_out, meta_out = lp.reproject_input_data(data_in, meta_list, - i_align=0, - target_res_arcsec=None, - global_origins=(-180, 90) - ) + data_out, meta_out = lp.reproject_input_data( + data_in, + meta_list, + i_align=0, + target_res_arcsec=None, + global_origins=(-180, 90), + ) # test reference data unchanged: np.testing.assert_array_equal(data_in[0], data_out[0]) # test northward shift: - np.testing.assert_array_equal(data_in[1][1,:], data_out[1][0,:]) + np.testing.assert_array_equal(data_in[1][1, :], data_out[1][0, :]) # test reprojected nl data: - reference_array = np.array([[5.020408 , 2.267857 , 0.12244898], - [1.1224489 , 0.6785714 , 0.7346939 ]], dtype='float32') + reference_array = np.array( + [[5.020408, 2.267857, 0.12244898], [1.1224489, 0.6785714, 0.7346939]], + dtype="float32", + ) np.testing.assert_array_almost_equal_nulp(reference_array, data_out[2]) def test_reproject_input_data_downsample_conserve_sum(self): """test function reproject_input_data downsampling with conservation of sum""" data_in, meta_list = data_arrays_resampling_demo() # - data_out, meta_out = lp.reproject_input_data(data_in, meta_list, - i_align=0, - target_res_arcsec=None, - global_origins=(-180, 90), - conserve='sum') + data_out, meta_out = lp.reproject_input_data( + data_in, + meta_list, + i_align=0, + target_res_arcsec=None, + global_origins=(-180, 90), + conserve="sum", + ) # test reference data unchanged: np.testing.assert_array_equal(data_in[0], data_out[0]) # test conserve sum: @@ -141,11 +163,14 @@ def test_reproject_input_data_downsample_conserve_mean(self): """test function reproject_input_data downsampling with conservation of sum""" data_in, meta_list = data_arrays_resampling_demo() # - data_out, meta_out = lp.reproject_input_data(data_in, meta_list, - i_align=1, - target_res_arcsec=None, - global_origins=(-180, 90), - conserve='mean') + data_out, meta_out = lp.reproject_input_data( + data_in, + meta_list, + i_align=1, + target_res_arcsec=None, + global_origins=(-180, 90), + conserve="mean", + ) # test reference data unchanged: np.testing.assert_array_equal(data_in[1], data_out[1]) # test conserve sum: @@ -157,36 +182,45 @@ def test_reproject_input_data_upsample(self): (usually not required for LitPop)""" data_in, meta_list = data_arrays_resampling_demo() # - data_out, meta_out = lp.reproject_input_data(data_in, meta_list, - i_align=2, # high res data as reference - target_res_arcsec=None, - global_origins=(-180, 90) - ) + data_out, meta_out = lp.reproject_input_data( + data_in, + meta_list, + i_align=2, # high res data as reference + target_res_arcsec=None, + global_origins=(-180, 90), + ) # test reference data unchanged: np.testing.assert_array_equal(data_in[2], data_out[2]) # test northward shift: - np.testing.assert_array_equal(data_out[0][2,:], data_out[1][0,:]) - np.testing.assert_array_equal(data_out[0][3,:], data_out[1][1,:]) + np.testing.assert_array_equal(data_out[0][2, :], data_out[1][0, :]) + np.testing.assert_array_equal(data_out[0][3, :], data_out[1][1, :]) # test reprojected nl data: - reference_array = np.array([[0. , 0.25, 0.75, 1.25, 1.75, 2. ], - [0.75, 1. , 1.5 , 2. , 2.5 , 2.75], - [2.25, 2.5 , 3. , 3.5 , 4. , 4.25], - [3. , 3.25, 3.75, 4.25, 4.75, 5. ]], dtype='float32') + reference_array = np.array( + [ + [0.0, 0.25, 0.75, 1.25, 1.75, 2.0], + [0.75, 1.0, 1.5, 2.0, 2.5, 2.75], + [2.25, 2.5, 3.0, 3.5, 4.0, 4.25], + [3.0, 3.25, 3.75, 4.25, 4.75, 5.0], + ], + dtype="float32", + ) np.testing.assert_array_equal(reference_array, data_out[0]) def test_reproject_input_data_odd_downsample(self): """test function reproject_input_data with odd downsampling""" data_in, meta_list = data_arrays_resampling_demo() # - data_out, meta_out = \ - lp.reproject_input_data(data_in, meta_list, - i_align=0, # high res data as reference - target_res_arcsec=6120, # 1.7 degree - global_origins=(-180, 90), - ) - self.assertEqual(1.7, meta_out['transform'][0]) # check resolution - reference_array = np.array([[0.425 , 1.7631578], - [3.425 , 4.763158 ]], dtype='float32') + data_out, meta_out = lp.reproject_input_data( + data_in, + meta_list, + i_align=0, # high res data as reference + target_res_arcsec=6120, # 1.7 degree + global_origins=(-180, 90), + ) + self.assertEqual(1.7, meta_out["transform"][0]) # check resolution + reference_array = np.array( + [[0.425, 1.7631578], [3.425, 4.763158]], dtype="float32" + ) np.testing.assert_array_equal(reference_array, data_out[0]) def test_gridpoints_core_calc_input_errors(self): @@ -195,10 +229,10 @@ def test_gridpoints_core_calc_input_errors(self): data = data_arrays_demo(2) # negative offset: with self.assertRaises(ValueError): - lp.gridpoints_core_calc(data, offsets=[2,-1]) + lp.gridpoints_core_calc(data, offsets=[2, -1]) # negative exponents: with self.assertRaises(ValueError): - lp.gridpoints_core_calc(data, exponents=[2,-1]) + lp.gridpoints_core_calc(data, exponents=[2, -1]) # different shapes: with self.assertRaises(ValueError): @@ -206,33 +240,32 @@ def test_gridpoints_core_calc_input_errors(self): # wrong format: with self.assertRaises(ValueError): - lp.gridpoints_core_calc(data, exponents=['a', 'b']) - data.append('hello i am a string') + lp.gridpoints_core_calc(data, exponents=["a", "b"]) + data.append("hello i am a string") with self.assertRaises(ValueError): lp.gridpoints_core_calc(data) def test_gridpoints_core_calc_default_1(self): """test function gridpoints_core_calc, i.e. core data combination on grid point level with default exponents and offsets - 1 array""" - data_arrays = data_arrays_demo(1) # get list with 1 demo array + data_arrays = data_arrays_demo(1) # get list with 1 demo array result_array = lp.gridpoints_core_calc(data_arrays) results_check = data_arrays[0] self.assertEqual(result_array.shape, results_check.shape) - self.assertEqual(result_array[1,1], results_check[1,1]) + self.assertEqual(result_array[1, 1], results_check[1, 1]) np.testing.assert_array_almost_equal_nulp(result_array, results_check) def test_gridpoints_core_calc_default_2(self): """test function gridpoints_core_calc, i.e. core data combination on grid point level with default exponents and offsets- 2 arrays""" - data_arrays = data_arrays_demo(2) # get list with 2 demo arrays + data_arrays = data_arrays_demo(2) # get list with 2 demo arrays result_array = lp.gridpoints_core_calc(data_arrays) results_check = data_arrays[0] * data_arrays[1] - self.assertEqual(result_array[0,0], results_check[0,0]) + self.assertEqual(result_array[0, 0], results_check[0, 0]) np.testing.assert_array_almost_equal_nulp(result_array, results_check) - def test_gridpoints_core_calc_default_3(self): """test function gridpoints_core_calc, i.e. core data combination on grid point level with default exponents and offsets- 3 arrays""" @@ -241,133 +274,141 @@ def test_gridpoints_core_calc_default_3(self): results_check = data_arrays[0] * data_arrays[1] * data_arrays[2] self.assertEqual(result_array.shape, results_check.shape) - self.assertEqual(result_array[1,1], results_check[1,1]) + self.assertEqual(result_array[1, 1], results_check[1, 1]) np.testing.assert_array_almost_equal_nulp(result_array, results_check) - #self.assertEqual(result_array, data_arrays[0] * data_arrays[1]) + # self.assertEqual(result_array, data_arrays[0] * data_arrays[1]) def test_gridpoints_core_calc_exp(self): """test function gridpoints_core_calc, i.e. core data combination on grid point level with changed exponents""" - data_arrays = data_arrays_demo(2) # get list with 2 demo arrays + data_arrays = data_arrays_demo(2) # get list with 2 demo arrays exp = [2, 1] result_array = lp.gridpoints_core_calc(data_arrays, exponents=exp) results_check = data_arrays[0] * data_arrays[0] * data_arrays[1] self.assertEqual(result_array.shape, results_check.shape) - self.assertEqual(result_array[0,2], results_check[0,2]) + self.assertEqual(result_array[0, 2], results_check[0, 2]) np.testing.assert_array_almost_equal_nulp(result_array, results_check) - exp = [2, .1] + exp = [2, 0.1] result_array = lp.gridpoints_core_calc(data_arrays, exponents=exp) - results_check = data_arrays[0] * data_arrays[0] * (data_arrays[1] ** .1) + results_check = data_arrays[0] * data_arrays[0] * (data_arrays[1] ** 0.1) np.testing.assert_array_almost_equal_nulp(result_array, results_check) def test_gridpoints_core_calc_offsets(self): """test function gridpoints_core_calc, i.e. core data combination on grid point level with changed offsets""" - data_arrays = data_arrays_demo(2) # get list with 2 demo arrays + data_arrays = data_arrays_demo(2) # get list with 2 demo arrays offsets = [1, 10] result_array = lp.gridpoints_core_calc(data_arrays, offsets=offsets) - results_check = (data_arrays[0]+1) * (10 + data_arrays[1]) + results_check = (data_arrays[0] + 1) * (10 + data_arrays[1]) self.assertEqual(result_array.shape, results_check.shape) - self.assertEqual(result_array[0,2], results_check[0,2]) + self.assertEqual(result_array[0, 2], results_check[0, 2]) np.testing.assert_array_almost_equal_nulp(result_array, results_check) def test_gridpoints_core_calc_offsets_exp(self): """test function gridpoints_core_calc, i.e. core data combination on grid point level with changed offsets and exponents""" - data_arrays = data_arrays_demo(2) # get list with 2 demo arrays + data_arrays = data_arrays_demo(2) # get list with 2 demo arrays offsets = [0, 10] exp = [2, 1] - result_array = lp.gridpoints_core_calc(data_arrays, offsets=offsets, - exponents=exp) - results_check = (data_arrays[0]) * (data_arrays[0]) * (10+data_arrays[1]) - results_check2 = np.array([[0, 20, 80],[99, 176, 275]]) + result_array = lp.gridpoints_core_calc( + data_arrays, offsets=offsets, exponents=exp + ) + results_check = (data_arrays[0]) * (data_arrays[0]) * (10 + data_arrays[1]) + results_check2 = np.array([[0, 20, 80], [99, 176, 275]]) self.assertEqual(result_array.shape, results_check.shape) - self.assertEqual(result_array[0,2], results_check[0,2]) + self.assertEqual(result_array[0, 2], results_check[0, 2]) np.testing.assert_array_almost_equal_nulp(result_array, results_check) - self.assertEqual(result_array[1,2], results_check2[1,2]) + self.assertEqual(result_array[1, 2], results_check2[1, 2]) np.testing.assert_array_almost_equal_nulp(result_array, results_check2) def test_gridpoints_core_calc_rescale(self): """test function gridpoints_core_calc, i.e. core data combination on grid point level with rescaling (default exponents and offsets)""" - data_arrays = data_arrays_demo(2) # get list with 2 demo arrays + data_arrays = data_arrays_demo(2) # get list with 2 demo arrays result_array = lp.gridpoints_core_calc(data_arrays, total_val_rescale=2.5) - results_check = (data_arrays[0]*data_arrays[1]) * 2.5/np.sum(data_arrays[0]*data_arrays[1]) + results_check = ( + (data_arrays[0] * data_arrays[1]) + * 2.5 + / np.sum(data_arrays[0] * data_arrays[1]) + ) self.assertAlmostEqual(result_array.sum(), 2.5) - self.assertEqual(result_array[0,1], results_check[0,1]) + self.assertEqual(result_array[0, 1], results_check[0, 1]) np.testing.assert_array_almost_equal_nulp(result_array, results_check) def test_gridpoints_core_calc_offsets_exp_rescale(self): """test function gridpoints_core_calc, i.e. core data combination on grid point level with changed offsets and exponents and rescaling""" - data_arrays = data_arrays_demo(2) # get list with 2 demo arrays + data_arrays = data_arrays_demo(2) # get list with 2 demo arrays offsets = [0.2, 3] - exp = [.5, 1.7] + exp = [0.5, 1.7] tot = -7 - result_array = lp.gridpoints_core_calc(data_arrays, offsets=offsets, - exponents=exp, total_val_rescale=tot) - results_check = np.array(data_arrays[0]+.2, dtype=float)**exp[0] * \ - (np.array(data_arrays[1]+3., dtype=float)**exp[1]) + result_array = lp.gridpoints_core_calc( + data_arrays, offsets=offsets, exponents=exp, total_val_rescale=tot + ) + results_check = np.array(data_arrays[0] + 0.2, dtype=float) ** exp[0] * ( + np.array(data_arrays[1] + 3.0, dtype=float) ** exp[1] + ) results_check = results_check * tot / results_check.sum() self.assertEqual(result_array.shape, results_check.shape) self.assertAlmostEqual(result_array.sum(), tot) - self.assertEqual(result_array[1,2], results_check[1,2]) + self.assertEqual(result_array[1, 2], results_check[1, 2]) np.testing.assert_allclose(result_array, results_check) def test_grp_read_pass(self): """test _grp_read() to pass and return either dict with admin1 values or None""" - result = lp._grp_read('JPN') + result = lp._grp_read("JPN") if result is not None: self.assertIsInstance(result, dict) - self.assertIn('Fukuoka', result.keys()) - self.assertIsInstance(result['Saga'], float) + self.assertIn("Fukuoka", result.keys()) + self.assertIsInstance(result["Saga"], float) def test_fail_get_total_value_per_country_pop(self): "test _get_total_value_per_country fails for pop" with self.assertRaises(NotImplementedError): - lp._get_total_value_per_country('XXX', 'pop', None) + lp._get_total_value_per_country("XXX", "pop", None) def test_get_total_value_per_country_none(self): "test _get_total_value_per_country pass with None" - value = lp._get_total_value_per_country('XXX', 'none', None) + value = lp._get_total_value_per_country("XXX", "none", None) self.assertEqual(value, None) def test_get_total_value_per_country_norm(self): "test _get_total_value_per_country pass with 1" - value = lp._get_total_value_per_country('XXX', 'norm', None) + value = lp._get_total_value_per_country("XXX", "norm", None) self.assertEqual(value, 1) def test_get_total_value_per_country_gdp(self): "test _get_total_value_per_country get number for gdp" - gdp_togo = lp._get_total_value_per_country('TGO', 'gdp', 2010) - gdp_switzerland = lp._get_total_value_per_country('CHE', 'gdp', 2222) - value_switzerland = lp._get_total_value_per_country('CHE', 'income_group', 2222) + gdp_togo = lp._get_total_value_per_country("TGO", "gdp", 2010) + gdp_switzerland = lp._get_total_value_per_country("CHE", "gdp", 2222) + value_switzerland = lp._get_total_value_per_country("CHE", "income_group", 2222) self.assertIsInstance(gdp_togo, float) # value for income_group = gdp * income group: - self.assertEqual(value_switzerland, 5*gdp_switzerland) + self.assertEqual(value_switzerland, 5 * gdp_switzerland) def test_get_total_value_per_country_pc(self): "test _get_total_value_per_country get number for pc of Poland" - value = lp._get_total_value_per_country('POL', 'pc', 2015) + value = lp._get_total_value_per_country("POL", "pc", 2015) self.assertIsInstance(value, float) def test_get_total_value_per_country_nfw(self): "test _get_total_value_per_country get number for pc of Poland" - value = lp._get_total_value_per_country('POL', 'nfw', 2015) + value = lp._get_total_value_per_country("POL", "nfw", 2015) self.assertIsInstance(value, float) def test_get_value_unit_pass(self): """test get_value_unit pass""" - self.assertEqual(lp.get_value_unit('pop'), 'people') - self.assertEqual(lp.get_value_unit('gdp'), 'USD') - self.assertEqual(lp.get_value_unit('pc'), 'USD') - self.assertEqual(lp.get_value_unit('nfw'), 'USD') - self.assertEqual(lp.get_value_unit('none'), '') + self.assertEqual(lp.get_value_unit("pop"), "people") + self.assertEqual(lp.get_value_unit("gdp"), "USD") + self.assertEqual(lp.get_value_unit("pc"), "USD") + self.assertEqual(lp.get_value_unit("nfw"), "USD") + self.assertEqual(lp.get_value_unit("none"), "") + if __name__ == "__main__": TESTS = unittest.TestLoader().loadTestsFromTestCase(TestLitPop) diff --git a/climada/entity/exposures/test/test_mat.py b/climada/entity/exposures/test/test_mat.py index c6fbe09f8..540b92c88 100644 --- a/climada/entity/exposures/test/test_mat.py +++ b/climada/entity/exposures/test/test_mat.py @@ -18,13 +18,15 @@ Test Exposures from MATLAB file. """ -import unittest + import copy +import unittest from climada import CONFIG -from climada.entity.exposures.base import Exposures, DEF_VAR_MAT +from climada.entity.exposures.base import DEF_VAR_MAT, Exposures + +ENT_TEST_MAT = CURR_DIR = CONFIG.exposures.test_data.dir().joinpath("demo_today.mat") -ENT_TEST_MAT = CURR_DIR = CONFIG.exposures.test_data.dir().joinpath('demo_today.mat') class TestReader(unittest.TestCase): """Test reader functionality of the ExposuresMat class""" @@ -41,43 +43,44 @@ def test_read_demo_pass(self): self.assertEqual(expo.gdf.index[0], 0) self.assertEqual(expo.gdf.index[n_expos - 1], n_expos - 1) - self.assertEqual(expo.gdf['value'].shape, (n_expos,)) - self.assertEqual(expo.gdf['value'][0], 13927504367.680632) - self.assertEqual(expo.gdf['value'][n_expos - 1], 12624818493.687229) + self.assertEqual(expo.gdf["value"].shape, (n_expos,)) + self.assertEqual(expo.gdf["value"][0], 13927504367.680632) + self.assertEqual(expo.gdf["value"][n_expos - 1], 12624818493.687229) - self.assertEqual(expo.gdf['deductible'].shape, (n_expos,)) - self.assertEqual(expo.gdf['deductible'][0], 0) - self.assertEqual(expo.gdf['deductible'][n_expos - 1], 0) + self.assertEqual(expo.gdf["deductible"].shape, (n_expos,)) + self.assertEqual(expo.gdf["deductible"][0], 0) + self.assertEqual(expo.gdf["deductible"][n_expos - 1], 0) - self.assertEqual(expo.gdf['cover'].shape, (n_expos,)) - self.assertEqual(expo.gdf['cover'][0], 13927504367.680632) - self.assertEqual(expo.gdf['cover'][n_expos - 1], 12624818493.687229) + self.assertEqual(expo.gdf["cover"].shape, (n_expos,)) + self.assertEqual(expo.gdf["cover"][0], 13927504367.680632) + self.assertEqual(expo.gdf["cover"][n_expos - 1], 12624818493.687229) - self.assertIn('int', str(expo.gdf['impf_'].dtype)) - self.assertEqual(expo.gdf['impf_'].shape, (n_expos,)) - self.assertEqual(expo.gdf['impf_'][0], 1) - self.assertEqual(expo.gdf['impf_'][n_expos - 1], 1) + self.assertIn("int", str(expo.gdf["impf_"].dtype)) + self.assertEqual(expo.gdf["impf_"].shape, (n_expos,)) + self.assertEqual(expo.gdf["impf_"][0], 1) + self.assertEqual(expo.gdf["impf_"][n_expos - 1], 1) - self.assertIn('int', str(expo.gdf['category_id'].dtype)) - self.assertEqual(expo.gdf['category_id'].shape, (n_expos,)) - self.assertEqual(expo.gdf['category_id'][0], 1) - self.assertEqual(expo.gdf['category_id'][n_expos - 1], 1) + self.assertIn("int", str(expo.gdf["category_id"].dtype)) + self.assertEqual(expo.gdf["category_id"].shape, (n_expos,)) + self.assertEqual(expo.gdf["category_id"][0], 1) + self.assertEqual(expo.gdf["category_id"][n_expos - 1], 1) - self.assertIn('int', str(expo.gdf['centr_'].dtype)) - self.assertEqual(expo.gdf['centr_'].shape, (n_expos,)) - self.assertEqual(expo.gdf['centr_'][0], 47) - self.assertEqual(expo.gdf['centr_'][n_expos - 1], 46) + self.assertIn("int", str(expo.gdf["centr_"].dtype)) + self.assertEqual(expo.gdf["centr_"].shape, (n_expos,)) + self.assertEqual(expo.gdf["centr_"][0], 47) + self.assertEqual(expo.gdf["centr_"][n_expos - 1], 46) - self.assertTrue('region_id' not in expo.gdf) + self.assertTrue("region_id" not in expo.gdf) - self.assertEqual(expo.gdf['latitude'].shape, (n_expos,)) - self.assertEqual(expo.gdf['latitude'][0], 26.93389900000) - self.assertEqual(expo.gdf['latitude'][n_expos - 1], 26.34795700000) - self.assertEqual(expo.gdf['longitude'][0], -80.12879900000) - self.assertEqual(expo.gdf['longitude'][n_expos - 1], -80.15885500000) + self.assertEqual(expo.gdf["latitude"].shape, (n_expos,)) + self.assertEqual(expo.gdf["latitude"][0], 26.93389900000) + self.assertEqual(expo.gdf["latitude"][n_expos - 1], 26.34795700000) + self.assertEqual(expo.gdf["longitude"][0], -80.12879900000) + self.assertEqual(expo.gdf["longitude"][n_expos - 1], -80.15885500000) self.assertEqual(expo.ref_year, 2016) - self.assertEqual(expo.value_unit, 'USD') + self.assertEqual(expo.value_unit, "USD") + class TestObligatories(unittest.TestCase): """Test reading exposures obligatory values.""" @@ -85,77 +88,79 @@ class TestObligatories(unittest.TestCase): def test_no_value_fail(self): """Error if no values.""" new_var_names = copy.deepcopy(DEF_VAR_MAT) - new_var_names['var_name']['val'] = 'no valid value' + new_var_names["var_name"]["val"] = "no valid value" with self.assertRaises(KeyError): Exposures.from_mat(ENT_TEST_MAT, var_names=new_var_names) def test_no_impact_fail(self): """Error if no impact ids.""" new_var_names = copy.deepcopy(DEF_VAR_MAT) - new_var_names['var_name']['impf'] = 'no valid value' + new_var_names["var_name"]["impf"] = "no valid value" with self.assertRaises(KeyError): Exposures.from_mat(ENT_TEST_MAT, var_names=new_var_names) def test_no_coord_fail(self): """Error if no coordinates.""" new_var_names = copy.deepcopy(DEF_VAR_MAT) - new_var_names['var_name']['lat'] = 'no valid Latitude' + new_var_names["var_name"]["lat"] = "no valid Latitude" with self.assertRaises(KeyError): Exposures.from_mat(ENT_TEST_MAT, var_names=new_var_names) - new_var_names['var_name']['lat'] = 'nLatitude' - new_var_names['var_name']['lon'] = 'no valid Longitude' + new_var_names["var_name"]["lat"] = "nLatitude" + new_var_names["var_name"]["lon"] = "no valid Longitude" with self.assertRaises(KeyError): Exposures.from_mat(ENT_TEST_MAT, var_names=new_var_names) + class TestOptionals(unittest.TestCase): """Test reading exposures optional values.""" def test_no_category_pass(self): """Not error if no category id.""" new_var_names = copy.deepcopy(DEF_VAR_MAT) - new_var_names['var_name']['cat'] = 'no valid category' + new_var_names["var_name"]["cat"] = "no valid category" exp = Exposures.from_mat(ENT_TEST_MAT, var_names=new_var_names) # Check results - self.assertTrue('category_id' not in exp.gdf) + self.assertTrue("category_id" not in exp.gdf) def test_no_region_pass(self): """Not error if no region id.""" new_var_names = copy.deepcopy(DEF_VAR_MAT) - new_var_names['var_name']['reg'] = 'no valid region' + new_var_names["var_name"]["reg"] = "no valid region" exp = Exposures.from_mat(ENT_TEST_MAT, var_names=new_var_names) # Check results - self.assertTrue('region_id' not in exp.gdf) + self.assertTrue("region_id" not in exp.gdf) def test_no_unit_pass(self): """Not error if no value unit.""" new_var_names = copy.deepcopy(DEF_VAR_MAT) - new_var_names['var_name']['uni'] = 'no valid unit' + new_var_names["var_name"]["uni"] = "no valid unit" exp = Exposures.from_mat(ENT_TEST_MAT, var_names=new_var_names) # Check results - self.assertEqual('USD', exp.value_unit) + self.assertEqual("USD", exp.value_unit) def test_no_assigned_pass(self): """Not error if no value unit.""" new_var_names = copy.deepcopy(DEF_VAR_MAT) - new_var_names['var_name']['ass'] = 'no valid assign' + new_var_names["var_name"]["ass"] = "no valid assign" exp = Exposures.from_mat(ENT_TEST_MAT, var_names=new_var_names) # Check results - self.assertTrue('centr_' not in exp.gdf) + self.assertTrue("centr_" not in exp.gdf) def test_no_refyear_pass(self): """Not error if no value unit.""" new_var_names = copy.deepcopy(DEF_VAR_MAT) - new_var_names['var_name']['ref'] = 'no valid ref' + new_var_names["var_name"]["ref"] = "no valid ref" exp = Exposures.from_mat(ENT_TEST_MAT, var_names=new_var_names) # Check results self.assertEqual(2018, exp.ref_year) + # Execute Tests if __name__ == "__main__": TESTS = unittest.TestLoader().loadTestsFromTestCase(TestReader) diff --git a/climada/entity/exposures/test/test_nightlight.py b/climada/entity/exposures/test/test_nightlight.py index f7b83b6a4..e70028991 100644 --- a/climada/entity/exposures/test/test_nightlight.py +++ b/climada/entity/exposures/test/test_nightlight.py @@ -18,43 +18,63 @@ Test Nightlight module. """ + import unittest +from pathlib import Path + import numpy as np from climada.entity.exposures.litpop import nightlight from climada.util.constants import SYSTEM_DIR -from pathlib import Path BM_FILENAMES = nightlight.BM_FILENAMES + class TestNightLight(unittest.TestCase): """Test nightlight functions.""" def test_required_files(self): """Test get_required_nl_files function with various countries.""" # Switzerland - bbox = (5.954809204000128, 45.82071848599999, 10.466626831000013, 47.801166077000076) + bbox = ( + 5.954809204000128, + 45.82071848599999, + 10.466626831000013, + 47.801166077000076, + ) # min_lon, min_lat, max_lon, max_lat = bbox - np.testing.assert_array_equal(nightlight.get_required_nl_files(bbox), - [0., 0., 0., 0., 1., 0., 0., 0.]) + np.testing.assert_array_equal( + nightlight.get_required_nl_files(bbox), + [0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0], + ) # UK - bbox = (-13.69131425699993, 49.90961334800005, 1.7711694670000497, 60.84788646000004) - np.testing.assert_array_equal(nightlight.get_required_nl_files(bbox), - [0., 0., 1., 0., 1., 0., 0., 0.]) + bbox = ( + -13.69131425699993, + 49.90961334800005, + 1.7711694670000497, + 60.84788646000004, + ) + np.testing.assert_array_equal( + nightlight.get_required_nl_files(bbox), + [0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0], + ) # entire world bbox = (-180, -90, 180, 90) - np.testing.assert_array_equal(nightlight.get_required_nl_files(bbox), - [1., 1., 1., 1., 1., 1., 1., 1.]) + np.testing.assert_array_equal( + nightlight.get_required_nl_files(bbox), + [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], + ) # Invalid coordinate order or bbox length - self.assertRaises(ValueError, nightlight.get_required_nl_files, - (-180, 90, 180, -90)) - self.assertRaises(ValueError, nightlight.get_required_nl_files, - (180, -90, -180, 90)) - self.assertRaises(ValueError, nightlight.get_required_nl_files, - (-90, 90)) + self.assertRaises( + ValueError, nightlight.get_required_nl_files, (-180, 90, 180, -90) + ) + self.assertRaises( + ValueError, nightlight.get_required_nl_files, (180, -90, -180, 90) + ) + self.assertRaises(ValueError, nightlight.get_required_nl_files, (-90, 90)) def test_download_nightlight_files(self): """Test check_nightlight_local_file_exists""" @@ -62,19 +82,19 @@ def test_download_nightlight_files(self): self.assertRaises(ValueError, nightlight.download_nl_files, (1, 0, 1), (1, 1)) # The same length but not the correct length - self.assertRaises(ValueError, nightlight.download_nl_files, (1, 0, 1), (1, 1, 1)) + self.assertRaises( + ValueError, nightlight.download_nl_files, (1, 0, 1), (1, 1, 1) + ) def test_get_required_nl_files(self): - """ get_required_nl_files return a boolean matrix of 0 and 1 - indicating which tile of NASA nighlight files are needed giving - a bounding box. This test check a few configuration of tiles - and check that a value error is raised if the bounding box are - incorrect """ + """get_required_nl_files return a boolean matrix of 0 and 1 + indicating which tile of NASA nighlight files are needed giving + a bounding box. This test check a few configuration of tiles + and check that a value error is raised if the bounding box are + incorrect""" # incorrect bounds: bounds size =! 4, min lon > max lon, min lat > min lat - BOUNDS = [(20, 30, 40), - (120, -20, 110, 30), - (-120, 50, 130, 10)] + BOUNDS = [(20, 30, 40), (120, -20, 110, 30), (-120, 50, 130, 10)] # correct bounds bounds_c1 = (-120, -20, 0, 40) bounds_c2 = (-70, -20, 10, 40) @@ -83,25 +103,28 @@ def test_get_required_nl_files(self): for bounds in BOUNDS: with self.assertRaises(ValueError) as cm: - nightlight.get_required_nl_files(bounds = bounds) + nightlight.get_required_nl_files(bounds=bounds) - self.assertEqual('Invalid bounds supplied. `bounds` must be tuple' - ' with (min_lon, min_lat, max_lon, max_lat).', - str(cm.exception)) + self.assertEqual( + "Invalid bounds supplied. `bounds` must be tuple" + " with (min_lon, min_lat, max_lon, max_lat).", + str(cm.exception), + ) # test first correct bounds configurations - req_files = nightlight.get_required_nl_files(bounds = bounds_c1) + req_files = nightlight.get_required_nl_files(bounds=bounds_c1) bool = np.array_equal(np.array([1, 1, 1, 1, 1, 1, 0, 0]), req_files) self.assertTrue(bool) # second correct configuration - req_files = nightlight.get_required_nl_files(bounds = bounds_c2) + req_files = nightlight.get_required_nl_files(bounds=bounds_c2) bool = np.array_equal(np.array([0, 0, 1, 1, 1, 1, 0, 0]), req_files) self.assertTrue(bool) # third correct configuration - req_files = nightlight.get_required_nl_files(bounds = bounds_c3) + req_files = nightlight.get_required_nl_files(bounds=bounds_c3) bool = np.array_equal(np.array([0, 0, 0, 0, 0, 0, 1, 0]), req_files) self.assertTrue(bool) + # Execute Tests if __name__ == "__main__": TESTS = unittest.TestLoader().loadTestsFromTestCase(TestNightLight) diff --git a/climada/entity/impact_funcs/__init__.py b/climada/entity/impact_funcs/__init__.py index f4d0aee3f..8672a17fa 100755 --- a/climada/entity/impact_funcs/__init__.py +++ b/climada/entity/impact_funcs/__init__.py @@ -18,6 +18,7 @@ init impact functions """ + from .base import * from .impact_func_set import * from .trop_cyclone import * diff --git a/climada/entity/impact_funcs/base.py b/climada/entity/impact_funcs/base.py index 71f3d1af4..287391a79 100644 --- a/climada/entity/impact_funcs/base.py +++ b/climada/entity/impact_funcs/base.py @@ -19,19 +19,20 @@ Define ImpactFunc class. """ -__all__ = ['ImpactFunc'] +__all__ = ["ImpactFunc"] import logging from typing import Optional, Union -import numpy as np + import matplotlib.pyplot as plt +import numpy as np import climada.util.checker as u_check LOGGER = logging.getLogger(__name__) -class ImpactFunc(): +class ImpactFunc: """Contains the definition of one impact function. Attributes @@ -109,9 +110,10 @@ def calc_mdr(self, inten: Union[float, np.ndarray]) -> np.ndarray: ------- np.array """ -# return np.interp(inten, self.intensity, self.mdd * self.paa) - return np.interp(inten, self.intensity, self.paa) * \ - np.interp(inten, self.intensity, self.mdd) + # return np.interp(inten, self.intensity, self.mdd * self.paa) + return np.interp(inten, self.intensity, self.paa) * np.interp( + inten, self.intensity, self.mdd + ) def plot(self, axis=None, **kwargs): """Plot the impact functions MDD, MDR and PAA in one graph, where @@ -131,15 +133,17 @@ def plot(self, axis=None, **kwargs): if axis is None: _, axis = plt.subplots(1, 1) - title = '%s %s' % (self.haz_type, str(self.id)) + title = "%s %s" % (self.haz_type, str(self.id)) if self.name != str(self.id): - title += ': %s' % self.name - axis.set_xlabel('Intensity (' + self.intensity_unit + ')') - axis.set_ylabel('Impact (%)') + title += ": %s" % self.name + axis.set_xlabel("Intensity (" + self.intensity_unit + ")") + axis.set_ylabel("Impact (%)") axis.set_title(title) - axis.plot(self.intensity, self.mdd * 100, 'b', label='MDD', **kwargs) - axis.plot(self.intensity, self.paa * 100, 'r', label='PAA', **kwargs) - axis.plot(self.intensity, self.mdd * self.paa * 100, 'k--', label='MDR', **kwargs) + axis.plot(self.intensity, self.mdd * 100, "b", label="MDD", **kwargs) + axis.plot(self.intensity, self.paa * 100, "r", label="PAA", **kwargs) + axis.plot( + self.intensity, self.mdd * self.paa * 100, "k--", label="MDR", **kwargs + ) axis.set_xlim((self.intensity.min(), self.intensity.max())) axis.legend() @@ -153,12 +157,16 @@ def check(self): ValueError """ num_exp = len(self.intensity) - u_check.size(num_exp, self.mdd, 'ImpactFunc.mdd') - u_check.size(num_exp, self.paa, 'ImpactFunc.paa') + u_check.size(num_exp, self.mdd, "ImpactFunc.mdd") + u_check.size(num_exp, self.paa, "ImpactFunc.paa") if num_exp == 0: - LOGGER.warning("%s impact function with name '%s' (id=%s) has empty" - " intensity.", self.haz_type, self.name, self.id) + LOGGER.warning( + "%s impact function with name '%s' (id=%s) has empty" " intensity.", + self.haz_type, + self.name, + self.id, + ) return @classmethod @@ -169,9 +177,9 @@ def from_step_impf( mdd: tuple[float, float] = (0, 1), paa: tuple[float, float] = (1, 1), impf_id: int = 1, - **kwargs): - - """ Step function type impact function. + **kwargs + ): + """Step function type impact function. By default, the impact is 100% above the step. Useful for high resolution modelling. @@ -204,13 +212,21 @@ def from_step_impf( mdd_min, mdd_max = mdd mdd = np.array([mdd_min, mdd_min, mdd_max, mdd_max]) - return cls(haz_type=haz_type, id=impf_id, - intensity=intensity, mdd=mdd, paa=paa, **kwargs) + return cls( + haz_type=haz_type, + id=impf_id, + intensity=intensity, + mdd=mdd, + paa=paa, + **kwargs + ) def set_step_impf(self, *args, **kwargs): """This function is deprecated, use ImpactFunc.from_step_impf instead.""" - LOGGER.warning("The use of ImpactFunc.set_step_impf is deprecated." + - "Use ImpactFunc.from_step_impf instead.") + LOGGER.warning( + "The use of ImpactFunc.set_step_impf is deprecated." + + "Use ImpactFunc.from_step_impf instead." + ) self.__dict__ = ImpactFunc.from_step_impf(*args, **kwargs).__dict__ @classmethod @@ -222,7 +238,8 @@ def from_sigmoid_impf( x0: float, haz_type: str, impf_id: int = 1, - **kwargs): + **kwargs + ): r"""Sigmoid type impact function hinging on three parameter. This type of impact function is very flexible for any sort of study, @@ -264,13 +281,21 @@ def from_sigmoid_impf( paa = np.ones(len(intensity)) mdd = L / (1 + np.exp(-k * (intensity - x0))) - return cls(haz_type=haz_type, id=impf_id, intensity=intensity, - paa=paa, mdd=mdd, **kwargs) + return cls( + haz_type=haz_type, + id=impf_id, + intensity=intensity, + paa=paa, + mdd=mdd, + **kwargs + ) def set_sigmoid_impf(self, *args, **kwargs): """This function is deprecated, use LitPop.from_countries instead.""" - LOGGER.warning("The use of ImpactFunc.set_sigmoid_impf is deprecated." - "Use ImpactFunc.from_sigmoid_impf instead.") + LOGGER.warning( + "The use of ImpactFunc.set_sigmoid_impf is deprecated." + "Use ImpactFunc.from_sigmoid_impf instead." + ) self.__dict__ = ImpactFunc.from_sigmoid_impf(*args, **kwargs).__dict__ @classmethod @@ -283,7 +308,8 @@ def from_poly_s_shape( exponent: float, haz_type: str, impf_id: int = 1, - **kwargs): + **kwargs + ): r"""S-shape polynomial impact function hinging on four parameter. .. math:: @@ -336,7 +362,7 @@ def from_poly_s_shape( s-shaped polynomial impact function """ if exponent < 0: - raise ValueError('Exponent value must larger than 0') + raise ValueError("Exponent value must larger than 0") inten = np.linspace(*intensity) @@ -349,11 +375,6 @@ def from_poly_s_shape( paa = np.ones_like(inten) impf = cls( - haz_type=haz_type, - id=impf_id, - intensity=inten, - paa=paa, - mdd=mdd, - **kwargs + haz_type=haz_type, id=impf_id, intensity=inten, paa=paa, mdd=mdd, **kwargs ) return impf diff --git a/climada/entity/impact_funcs/impact_func_set.py b/climada/entity/impact_funcs/impact_func_set.py index 6bba81d56..e94ff8b82 100755 --- a/climada/entity/impact_funcs/impact_func_set.py +++ b/climada/entity/impact_funcs/impact_func_set.py @@ -19,48 +19,54 @@ Define ImpactFuncSet class. """ -__all__ = ['ImpactFuncSet'] +__all__ = ["ImpactFuncSet"] import copy import logging -from typing import Optional, Iterable from itertools import repeat +from typing import Iterable, Optional + +import matplotlib.pyplot as plt import numpy as np import pandas as pd -import matplotlib.pyplot as plt import xlsxwriter -from climada.entity.impact_funcs.base import ImpactFunc -import climada.util.plot as u_plot import climada.util.hdf5_handler as u_hdf5 +import climada.util.plot as u_plot +from climada.entity.impact_funcs.base import ImpactFunc LOGGER = logging.getLogger(__name__) -DEF_VAR_EXCEL = {'sheet_name': 'impact_functions', - 'col_name': {'func_id': 'impact_fun_id', - 'inten': 'intensity', - 'mdd': 'mdd', - 'paa': 'paa', - 'name': 'name', - 'unit': 'intensity_unit', - 'peril': 'peril_id' - } - } +DEF_VAR_EXCEL = { + "sheet_name": "impact_functions", + "col_name": { + "func_id": "impact_fun_id", + "inten": "intensity", + "mdd": "mdd", + "paa": "paa", + "name": "name", + "unit": "intensity_unit", + "peril": "peril_id", + }, +} """Excel and csv variable names""" -DEF_VAR_MAT = {'sup_field_name': 'entity', - 'field_name': 'damagefunctions', - 'var_name': {'fun_id': 'DamageFunID', - 'inten': 'Intensity', - 'mdd': 'MDD', - 'paa': 'PAA', - 'name': 'name', - 'unit': 'Intensity_unit', - 'peril': 'peril_ID' - } - } +DEF_VAR_MAT = { + "sup_field_name": "entity", + "field_name": "damagefunctions", + "var_name": { + "fun_id": "DamageFunID", + "inten": "Intensity", + "mdd": "MDD", + "paa": "PAA", + "name": "name", + "unit": "Intensity_unit", + "peril": "peril_ID", + }, +} """MATLAB variable names""" + class ImpactFuncSet: """Contains impact functions of type ImpactFunc. Loads from files with format defined in FILE_EXT. @@ -72,10 +78,7 @@ class ImpactFuncSet: directly accessed. Use the class methods instead. """ - def __init__( - self, - impact_funcs: Optional[Iterable[ImpactFunc]] = None - ): + def __init__(self, impact_funcs: Optional[Iterable[ImpactFunc]] = None): """Initialization. Build an impact function set from an iterable of ImpactFunc. @@ -147,8 +150,9 @@ def remove_func(self, haz_type=None, fun_id=None): try: del self._data[haz_type][fun_id] except KeyError: - LOGGER.warning("No ImpactFunc with hazard %s and id %s.", - haz_type, fun_id) + LOGGER.warning( + "No ImpactFunc with hazard %s and id %s.", haz_type, fun_id + ) elif haz_type is not None: try: del self._data[haz_type] @@ -261,8 +265,11 @@ def size(self, haz_type=None, fun_id=None): ------- int """ - if (haz_type is not None) and (fun_id is not None) and \ - (isinstance(self.get_func(haz_type, fun_id), ImpactFunc)): + if ( + (haz_type is not None) + and (fun_id is not None) + and (isinstance(self.get_func(haz_type, fun_id), ImpactFunc)) + ): return 1 if (haz_type is not None) or (fun_id is not None): return len(self.get_func(haz_type, fun_id)) @@ -277,12 +284,14 @@ def check(self): """ for key_haz, vul_dict in self._data.items(): for fun_id, vul in vul_dict.items(): - if (fun_id != vul.id) | (fun_id == ''): - raise ValueError("Wrong ImpactFunc.id: %s != %s." - % (fun_id, vul.id)) - if (key_haz != vul.haz_type) | (key_haz == ''): - raise ValueError("Wrong ImpactFunc.haz_type: %s != %s." - % (key_haz, vul.haz_type)) + if (fun_id != vul.id) | (fun_id == ""): + raise ValueError( + "Wrong ImpactFunc.id: %s != %s." % (fun_id, vul.id) + ) + if (key_haz != vul.haz_type) | (key_haz == ""): + raise ValueError( + "Wrong ImpactFunc.haz_type: %s != %s." % (key_haz, vul.haz_type) + ) vul.check() def extend(self, impact_funcs): @@ -368,7 +377,7 @@ def from_excel(cls, file_name, var_names=None): """ if var_names is None: var_names = DEF_VAR_EXCEL - dfr = pd.read_excel(file_name, var_names['sheet_name']) + dfr = pd.read_excel(file_name, var_names["sheet_name"]) imp_func_set = cls() imp_func_set._fill_dfr(dfr, var_names) @@ -376,8 +385,10 @@ def from_excel(cls, file_name, var_names=None): def read_excel(self, *args, **kwargs): """This function is deprecated, use ImpactFuncSet.from_excel instead.""" - LOGGER.warning("The use of ImpactFuncSet.read_excel is deprecated." - " Use ImpactFuncSet.from_excel instead.") + LOGGER.warning( + "The use of ImpactFuncSet.read_excel is deprecated." + " Use ImpactFuncSet.from_excel instead." + ) self.__dict__ = ImpactFuncSet.from_excel(*args, **kwargs).__dict__ @classmethod @@ -400,12 +411,16 @@ def from_mat(cls, file_name, var_names=None): """ if var_names is None: var_names = DEF_VAR_MAT + def _get_hdf5_funcs(imp, file_name, var_names): """Get rows that fill every impact function and its name.""" func_pos = dict() for row, (fun_id, fun_type) in enumerate( - zip(imp[var_names['var_name']['fun_id']].squeeze(), - imp[var_names['var_name']['peril']].squeeze())): + zip( + imp[var_names["var_name"]["fun_id"]].squeeze(), + imp[var_names["var_name"]["peril"]].squeeze(), + ) + ): type_str = u_hdf5.get_str_from_ref(file_name, fun_type) key = (type_str, int(fun_id)) if key not in func_pos: @@ -421,17 +436,19 @@ def _get_hdf5_str(imp, idxs, file_name, var_name): if prev_str == "": prev_str = cur_str elif prev_str != cur_str: - raise ValueError("Impact function with two different %s." % var_name) + raise ValueError( + "Impact function with two different %s." % var_name + ) return prev_str imp = u_hdf5.read(file_name) try: - imp = imp[var_names['sup_field_name']] + imp = imp[var_names["sup_field_name"]] except KeyError: pass try: - imp = imp[var_names['field_name']] + imp = imp[var_names["field_name"]] funcs_idx = _get_hdf5_funcs(imp, file_name, var_names) impact_funcs = [] for imp_key, imp_rows in funcs_idx.items(): @@ -442,19 +459,26 @@ def _get_hdf5_str(imp, idxs, file_name, var_name): # check that this function only has one intensity unit, if provided try: impf_kwargs["intensity_unit"] = _get_hdf5_str( - imp, imp_rows, file_name, var_names['var_name']['unit']) + imp, imp_rows, file_name, var_names["var_name"]["unit"] + ) except KeyError: pass # check that this function only has one name try: impf_kwargs["name"] = _get_hdf5_str( - imp, imp_rows, file_name, var_names['var_name']['name']) + imp, imp_rows, file_name, var_names["var_name"]["name"] + ) except KeyError: impf_kwargs["name"] = str(impf_kwargs["idx"]) impf_kwargs["intensity"] = np.take( - imp[var_names['var_name']['inten']], imp_rows) - impf_kwargs["mdd"] = np.take(imp[var_names['var_name']['mdd']], imp_rows) - impf_kwargs["paa"] = np.take(imp[var_names['var_name']['paa']], imp_rows) + imp[var_names["var_name"]["inten"]], imp_rows + ) + impf_kwargs["mdd"] = np.take( + imp[var_names["var_name"]["mdd"]], imp_rows + ) + impf_kwargs["paa"] = np.take( + imp[var_names["var_name"]["paa"]], imp_rows + ) impact_funcs.append(ImpactFunc(**impf_kwargs)) except KeyError as err: raise KeyError("Not existing variable: %s" % str(err)) from err @@ -463,8 +487,10 @@ def _get_hdf5_str(imp, idxs, file_name, var_name): def read_mat(self, *args, **kwargs): """This function is deprecated, use ImpactFuncSet.from_mat instead.""" - LOGGER.warning("The use of ImpactFuncSet.read_mat is deprecated." - "Use ImpactFuncSet.from_mat instead.") + LOGGER.warning( + "The use of ImpactFuncSet.read_mat is deprecated." + "Use ImpactFuncSet.from_mat instead." + ) self.__dict__ = ImpactFuncSet.from_mat(*args, **kwargs).__dict__ def write_excel(self, file_name, var_names=None): @@ -479,6 +505,7 @@ def write_excel(self, file_name, var_names=None): """ if var_names is None: var_names = DEF_VAR_EXCEL + def write_impf(row_ini, imp_ws, xls_data): """Write one impact function""" for icol, col_dat in enumerate(xls_data): @@ -486,22 +513,32 @@ def write_impf(row_ini, imp_ws, xls_data): imp_ws.write(irow, icol, data) imp_wb = xlsxwriter.Workbook(file_name) - imp_ws = imp_wb.add_worksheet(var_names['sheet_name']) - - header = [var_names['col_name']['func_id'], var_names['col_name']['inten'], - var_names['col_name']['mdd'], var_names['col_name']['paa'], - var_names['col_name']['peril'], var_names['col_name']['unit'], - var_names['col_name']['name']] + imp_ws = imp_wb.add_worksheet(var_names["sheet_name"]) + + header = [ + var_names["col_name"]["func_id"], + var_names["col_name"]["inten"], + var_names["col_name"]["mdd"], + var_names["col_name"]["paa"], + var_names["col_name"]["peril"], + var_names["col_name"]["unit"], + var_names["col_name"]["name"], + ] for icol, head_dat in enumerate(header): imp_ws.write(0, icol, head_dat) row_ini = 1 for fun_haz_id, fun_haz in self._data.items(): for fun_id, fun in fun_haz.items(): n_inten = fun.intensity.size - xls_data = [repeat(fun_id, n_inten), fun.intensity, fun.mdd, - fun.paa, repeat(fun_haz_id, n_inten), - repeat(fun.intensity_unit, n_inten), - repeat(fun.name, n_inten)] + xls_data = [ + repeat(fun_id, n_inten), + fun.intensity, + fun.mdd, + fun.paa, + repeat(fun_haz_id, n_inten), + repeat(fun.intensity_unit, n_inten), + repeat(fun.name, n_inten), + ] write_impf(row_ini, imp_ws, xls_data) row_ini += n_inten imp_wb.close() @@ -511,8 +548,10 @@ def _fill_dfr(self, dfr, var_names): def _get_xls_funcs(dfr, var_names): """Parse individual impact functions.""" dist_func = [] - for (haz_type, imp_id) in zip(dfr[var_names['col_name']['peril']], - dfr[var_names['col_name']['func_id']]): + for haz_type, imp_id in zip( + dfr[var_names["col_name"]["peril"]], + dfr[var_names["col_name"]["func_id"]], + ): if (haz_type, imp_id) not in dist_func: dist_func.append((haz_type, imp_id)) return dist_func @@ -520,9 +559,8 @@ def _get_xls_funcs(dfr, var_names): try: dist_func = _get_xls_funcs(dfr, var_names) for haz_type, imp_id in dist_func: - df_func = dfr[dfr[var_names['col_name']['peril']] == haz_type] - df_func = df_func[df_func[var_names['col_name']['func_id']] - == imp_id] + df_func = dfr[dfr[var_names["col_name"]["peril"]] == haz_type] + df_func = df_func[df_func[var_names["col_name"]["func_id"]] == imp_id] # Store arguments in a dict (missing ones will be default) impf_kwargs = dict() @@ -530,26 +568,31 @@ def _get_xls_funcs(dfr, var_names): impf_kwargs["id"] = imp_id # check that the unit of the intensity is the same try: - if len(df_func[var_names['col_name']['name']].unique()) != 1: - raise ValueError('Impact function with two different names.') - impf_kwargs["name"] = df_func[var_names['col_name'] - ['name']].values[0] + if len(df_func[var_names["col_name"]["name"]].unique()) != 1: + raise ValueError("Impact function with two different names.") + impf_kwargs["name"] = df_func[var_names["col_name"]["name"]].values[ + 0 + ] except KeyError: impf_kwargs["name"] = str(impf_kwargs["id"]) # check that the unit of the intensity is the same, if provided try: - if len(df_func[var_names['col_name']['unit']].unique()) != 1: - raise ValueError('Impact function with two different' - ' intensity units.') - impf_kwargs["intensity_unit"] = df_func[var_names['col_name'] - ['unit']].values[0] + if len(df_func[var_names["col_name"]["unit"]].unique()) != 1: + raise ValueError( + "Impact function with two different" " intensity units." + ) + impf_kwargs["intensity_unit"] = df_func[ + var_names["col_name"]["unit"] + ].values[0] except KeyError: pass - impf_kwargs["intensity"] = df_func[var_names['col_name']['inten']].values - impf_kwargs["mdd"] = df_func[var_names['col_name']['mdd']].values - impf_kwargs["paa"] = df_func[var_names['col_name']['paa']].values + impf_kwargs["intensity"] = df_func[ + var_names["col_name"]["inten"] + ].values + impf_kwargs["mdd"] = df_func[var_names["col_name"]["mdd"]].values + impf_kwargs["paa"] = df_func[var_names["col_name"]["paa"]].values self.append(ImpactFunc(**impf_kwargs)) diff --git a/climada/entity/impact_funcs/storm_europe.py b/climada/entity/impact_funcs/storm_europe.py index f021f4957..76973c3df 100644 --- a/climada/entity/impact_funcs/storm_europe.py +++ b/climada/entity/impact_funcs/storm_europe.py @@ -19,24 +19,25 @@ Define impact functions for extratropical storms (mainly windstorms in Europe). """ -__all__ = ['ImpfStormEurope', 'IFStormEurope'] +__all__ = ["ImpfStormEurope", "IFStormEurope"] import logging -from deprecation import deprecated + import numpy as np +from deprecation import deprecated -from climada.entity.impact_funcs.base import ImpactFunc from climada.engine.calibration_opt import init_impf - +from climada.entity.impact_funcs.base import ImpactFunc LOGGER = logging.getLogger(__name__) + class ImpfStormEurope(ImpactFunc): """Impact functions for tropical cyclones.""" def __init__(self): ImpactFunc.__init__(self) - self.haz_type = 'WS' + self.haz_type = "WS" @classmethod def from_schwierz(cls, impf_id=1): @@ -50,16 +51,42 @@ def from_schwierz(cls, impf_id=1): """ impf = cls() - impf.name = 'Schwierz 2010' + impf.name = "Schwierz 2010" impf.id = impf_id - impf.intensity_unit = 'm/s' + impf.intensity_unit = "m/s" impf.intensity = np.array([0, 20, 25, 30, 35, 40, 45, 50, 55, 60, 80, 100]) - impf.paa = np.array([0., 0., 0.001, 0.00676, - 0.03921, 0.10707, 0.25357, 0.48869, - 0.82907, 1., 1., 1.]) - impf.mdd = np.array([0., 0., 0.001, 0.00177515, - 0.00367253, 0.00749977, 0.01263556, 0.01849639, - 0.02370487, 0.037253, 0.037253, 0.037253]) + impf.paa = np.array( + [ + 0.0, + 0.0, + 0.001, + 0.00676, + 0.03921, + 0.10707, + 0.25357, + 0.48869, + 0.82907, + 1.0, + 1.0, + 1.0, + ] + ) + impf.mdd = np.array( + [ + 0.0, + 0.0, + 0.001, + 0.00177515, + 0.00367253, + 0.00749977, + 0.01263556, + 0.01849639, + 0.02370487, + 0.037253, + 0.037253, + 0.037253, + ] + ) impf.check() return impf @@ -77,11 +104,11 @@ def from_welker(cls, impf_id=1): """ temp_Impf = ImpfStormEurope.from_schwierz() - scaling_factor = {'paa_scale': 1.332518, 'mdd_scale': 1.332518} + scaling_factor = {"paa_scale": 1.332518, "mdd_scale": 1.332518} temp_Impf = init_impf(temp_Impf, scaling_factor)[0] - temp_Impf.name = 'Welker 2021' + temp_Impf.name = "Welker 2021" temp_Impf.id = impf_id - temp_Impf.intensity_unit = 'm/s' + temp_Impf.intensity_unit = "m/s" temp_Impf.check() return temp_Impf @@ -90,8 +117,10 @@ def set_schwierz(self, impf_id=1): This function is deprecated, use ImpfStormEurope.from_schwierz instead. """ - LOGGER.warning("The use of ImpfStormEurope.set_schwierz is deprecated." - "Use ImpfStormEurope.from_schwierz instead.") + LOGGER.warning( + "The use of ImpfStormEurope.set_schwierz is deprecated." + "Use ImpfStormEurope.from_schwierz instead." + ) self.__dict__ = ImpfStormEurope.from_schwierz(impf_id=impf_id).__dict__ def set_welker(self, impf_id=1): @@ -99,12 +128,16 @@ def set_welker(self, impf_id=1): This function is deprecated, use ImpfStormEurope.from_welker instead. """ - LOGGER.warning("The use of ImpfStormEurope.set_welker is deprecated." - "Use ImpfStormEurope.from_welker instead.") + LOGGER.warning( + "The use of ImpfStormEurope.set_welker is deprecated." + "Use ImpfStormEurope.from_welker instead." + ) self.__dict__ = ImpfStormEurope.from_welker(impf_id=impf_id).__dict__ -@deprecated(details="The class name IFStormEurope is deprecated and won't be supported in a future " - +"version. Use ImpfStormEurope instead") +@deprecated( + details="The class name IFStormEurope is deprecated and won't be supported in a future " + + "version. Use ImpfStormEurope instead" +) class IFStormEurope(ImpfStormEurope): """Is ImpfStormEurope now""" diff --git a/climada/entity/impact_funcs/test/test_base.py b/climada/entity/impact_funcs/test/test_base.py index 3f2e0460b..b0652a1be 100644 --- a/climada/entity/impact_funcs/test/test_base.py +++ b/climada/entity/impact_funcs/test/test_base.py @@ -20,10 +20,12 @@ """ import unittest + import numpy as np from climada.entity.impact_funcs.base import ImpactFunc + class TestInterpolation(unittest.TestCase): """Impact function interpolation test""" @@ -39,36 +41,36 @@ def test_calc_mdr_pass(self): def test_from_step(self): """Check default impact function: step function""" inten = (0, 5, 10) - imp_fun = ImpactFunc.from_step_impf( - intensity=inten, haz_type='TC', impf_id=2) + imp_fun = ImpactFunc.from_step_impf(intensity=inten, haz_type="TC", impf_id=2) self.assertTrue(np.array_equal(imp_fun.paa, np.ones(4))) self.assertTrue(np.array_equal(imp_fun.mdd, np.array([0, 0, 1, 1]))) self.assertTrue(np.array_equal(imp_fun.intensity, np.array([0, 5, 5, 10]))) - self.assertEqual(imp_fun.haz_type, 'TC') + self.assertEqual(imp_fun.haz_type, "TC") self.assertEqual(imp_fun.id, 2) def test_from_sigmoid(self): """Check default impact function: sigmoid function""" inten = (0, 100, 5) imp_fun = ImpactFunc.from_sigmoid_impf( - inten, L=1.0, k=2., x0=50., haz_type='RF', impf_id=2) + inten, L=1.0, k=2.0, x0=50.0, haz_type="RF", impf_id=2 + ) self.assertTrue(np.array_equal(imp_fun.paa, np.ones(20))) self.assertEqual(imp_fun.mdd[10], 0.5) self.assertEqual(imp_fun.mdd[-1], 1.0) self.assertTrue(np.array_equal(imp_fun.intensity, np.arange(0, 100, 5))) - self.assertEqual(imp_fun.haz_type, 'RF') + self.assertEqual(imp_fun.haz_type, "RF") self.assertEqual(imp_fun.id, 2) def test_from_poly_s_shape(self): """Check default impact function: polynomial s-shape""" - haz_type = 'RF' + haz_type = "RF" threshold = 0.2 half_point = 1 scale = 0.8 exponent = 4 impf_id = 2 - unit = 'm' + unit = "m" intensity = (0, 5, 5) def test_aux_vars(impf): @@ -79,9 +81,15 @@ def test_aux_vars(impf): self.assertEqual(impf.intensity_unit, unit) impf = ImpactFunc.from_poly_s_shape( - intensity=intensity, threshold=threshold, half_point=half_point, scale=scale, - exponent=exponent, haz_type=haz_type, impf_id=impf_id, intensity_unit=unit - ) + intensity=intensity, + threshold=threshold, + half_point=half_point, + scale=scale, + exponent=exponent, + haz_type=haz_type, + impf_id=impf_id, + intensity_unit=unit, + ) # True value can easily be computed with a calculator correct_mdd = np.array([0, 0.59836395, 0.78845941, 0.79794213, 0.79938319]) np.testing.assert_array_almost_equal(impf.mdd, correct_mdd) @@ -89,29 +97,46 @@ def test_aux_vars(impf): # If threshold > half_point, mdd should all be 0 impf = ImpactFunc.from_poly_s_shape( - intensity=intensity, threshold=half_point*2, half_point=half_point, scale=scale, - exponent=exponent, haz_type=haz_type, impf_id=impf_id, intensity_unit=unit - ) + intensity=intensity, + threshold=half_point * 2, + half_point=half_point, + scale=scale, + exponent=exponent, + haz_type=haz_type, + impf_id=impf_id, + intensity_unit=unit, + ) np.testing.assert_array_almost_equal(impf.mdd, np.zeros(5)) test_aux_vars(impf) # If exponent = 0, mdd should be constant impf = ImpactFunc.from_poly_s_shape( - intensity=intensity, threshold=threshold, half_point=half_point, scale=scale, - exponent=0, haz_type=haz_type, impf_id=impf_id, intensity_unit=unit - ) + intensity=intensity, + threshold=threshold, + half_point=half_point, + scale=scale, + exponent=0, + haz_type=haz_type, + impf_id=impf_id, + intensity_unit=unit, + ) np.testing.assert_array_almost_equal(impf.mdd, np.ones(5) * scale / 2) test_aux_vars(impf) # If exponent < 0, raise error. with self.assertRaisesRegex(ValueError, "Exponent value"): ImpactFunc.from_poly_s_shape( - intensity=intensity, threshold=half_point, - half_point=half_point, scale=scale, - exponent=-1, haz_type=haz_type, - impf_id=impf_id, intensity_unit=unit + intensity=intensity, + threshold=half_point, + half_point=half_point, + scale=scale, + exponent=-1, + haz_type=haz_type, + impf_id=impf_id, + intensity_unit=unit, ) + # Execute Tests if __name__ == "__main__": TESTS = unittest.TestLoader().loadTestsFromTestCase(TestInterpolation) diff --git a/climada/entity/impact_funcs/test/test_imp_fun_set.py b/climada/entity/impact_funcs/test/test_imp_fun_set.py index e3804f849..3bc60559b 100644 --- a/climada/entity/impact_funcs/test/test_imp_fun_set.py +++ b/climada/entity/impact_funcs/test/test_imp_fun_set.py @@ -18,43 +18,51 @@ Test ImpactFuncSet class. """ + import unittest + import numpy as np from climada import CONFIG -from climada.entity.impact_funcs.impact_func_set import ImpactFuncSet, ImpactFunc -from climada.util.constants import ENT_TEMPLATE_XLS, ENT_DEMO_TODAY +from climada.entity.impact_funcs.impact_func_set import ImpactFunc, ImpactFuncSet +from climada.util.constants import ENT_DEMO_TODAY, ENT_TEMPLATE_XLS + +ENT_TEST_MAT = CONFIG.exposures.test_data.dir().joinpath("demo_today.mat") -ENT_TEST_MAT = CONFIG.exposures.test_data.dir().joinpath('demo_today.mat') class TestConstructor(unittest.TestCase): """Test impact function attributes.""" + def test_attributes_all(self): """All attributes are defined""" imp_fun = ImpactFuncSet() vulner_1 = ImpactFunc("TC", "2") - self.assertTrue(hasattr(imp_fun, '_data')) - self.assertTrue(hasattr(vulner_1, 'haz_type')) - self.assertTrue(hasattr(vulner_1, 'name')) - self.assertTrue(hasattr(vulner_1, 'id')) - self.assertTrue(hasattr(vulner_1, 'intensity_unit')) - self.assertTrue(hasattr(vulner_1, 'mdd')) - self.assertTrue(hasattr(vulner_1, 'paa')) + self.assertTrue(hasattr(imp_fun, "_data")) + self.assertTrue(hasattr(vulner_1, "haz_type")) + self.assertTrue(hasattr(vulner_1, "name")) + self.assertTrue(hasattr(vulner_1, "id")) + self.assertTrue(hasattr(vulner_1, "intensity_unit")) + self.assertTrue(hasattr(vulner_1, "mdd")) + self.assertTrue(hasattr(vulner_1, "paa")) + class TestContainer(unittest.TestCase): """Test ImpactFuncSet as container.""" + def test_add_wrong_error(self): """Test error is raised when wrong ImpactFunc provided.""" imp_fun = ImpactFuncSet() vulner_1 = ImpactFunc() - with self.assertLogs('climada.entity.impact_funcs.impact_func_set', - level='WARNING') as cm: + with self.assertLogs( + "climada.entity.impact_funcs.impact_func_set", level="WARNING" + ) as cm: imp_fun.append(vulner_1) self.assertIn("Input ImpactFunc's hazard type not set.", cm.output[0]) vulner_1 = ImpactFunc("TC") - with self.assertLogs('climada.entity.impact_funcs.impact_func_set', - level='WARNING') as cm: + with self.assertLogs( + "climada.entity.impact_funcs.impact_func_set", level="WARNING" + ) as cm: imp_fun.append(vulner_1) self.assertIn("Input ImpactFunc's id not set.", cm.output[0]) @@ -71,29 +79,33 @@ def test_remove_func_pass(self): def test_remove_wrong_error(self): """Test error is raised when invalid inputs.""" imp_fun = ImpactFuncSet([ImpactFunc("TC", 1)]) - with self.assertLogs('climada.entity.impact_funcs.impact_func_set', level='WARNING') as cm: - imp_fun.remove_func('FL') - self.assertIn('No ImpactFunc with hazard FL.', cm.output[0]) - with self.assertLogs('climada.entity.impact_funcs.impact_func_set', level='WARNING') as cm: + with self.assertLogs( + "climada.entity.impact_funcs.impact_func_set", level="WARNING" + ) as cm: + imp_fun.remove_func("FL") + self.assertIn("No ImpactFunc with hazard FL.", cm.output[0]) + with self.assertLogs( + "climada.entity.impact_funcs.impact_func_set", level="WARNING" + ) as cm: imp_fun.remove_func(fun_id=3) - self.assertIn('No ImpactFunc with id 3.', cm.output[0]) + self.assertIn("No ImpactFunc with id 3.", cm.output[0]) def test_get_hazards_pass(self): """Test get_hazard_types function.""" imp_fun = ImpactFuncSet([ImpactFunc("TC", 1)]) self.assertEqual(1, len(imp_fun.get_hazard_types())) - self.assertEqual(['TC'], imp_fun.get_hazard_types()) + self.assertEqual(["TC"], imp_fun.get_hazard_types()) vulner_2 = ImpactFunc("TC", 1) imp_fun.append(vulner_2) self.assertEqual(1, len(imp_fun.get_hazard_types())) - self.assertEqual(['TC'], imp_fun.get_hazard_types()) + self.assertEqual(["TC"], imp_fun.get_hazard_types()) vulner_3 = ImpactFunc("FL", 1) imp_fun.append(vulner_3) self.assertEqual(2, len(imp_fun.get_hazard_types())) - self.assertIn('TC', imp_fun.get_hazard_types()) - self.assertIn('FL', imp_fun.get_hazard_types()) + self.assertIn("TC", imp_fun.get_hazard_types()) + self.assertIn("FL", imp_fun.get_hazard_types()) def test_get_ids_pass(self): """Test normal functionality of get_ids method.""" @@ -103,63 +115,63 @@ def test_get_ids_pass(self): vulner_1 = ImpactFunc("TC", 1) imp_fun.append(vulner_1) self.assertEqual(1, len(imp_fun.get_ids())) - self.assertIn('TC', imp_fun.get_ids()) - self.assertEqual(1, len(imp_fun.get_ids('TC'))) - self.assertEqual([1], imp_fun.get_ids('TC')) + self.assertIn("TC", imp_fun.get_ids()) + self.assertEqual(1, len(imp_fun.get_ids("TC"))) + self.assertEqual([1], imp_fun.get_ids("TC")) vulner_2 = ImpactFunc("TC", 3) imp_fun.append(vulner_2) self.assertEqual(1, len(imp_fun.get_ids())) - self.assertIn('TC', imp_fun.get_ids()) - self.assertEqual(2, len(imp_fun.get_ids('TC'))) - self.assertEqual([1, 3], imp_fun.get_ids('TC')) + self.assertIn("TC", imp_fun.get_ids()) + self.assertEqual(2, len(imp_fun.get_ids("TC"))) + self.assertEqual([1, 3], imp_fun.get_ids("TC")) vulner_3 = ImpactFunc("FL", 3) imp_fun.append(vulner_3) self.assertEqual(2, len(imp_fun.get_ids())) - self.assertIn('TC', imp_fun.get_ids()) - self.assertIn('FL', imp_fun.get_ids()) - self.assertEqual(2, len(imp_fun.get_ids('TC'))) - self.assertEqual([1, 3], imp_fun.get_ids('TC')) - self.assertEqual(1, len(imp_fun.get_ids('FL'))) - self.assertEqual([3], imp_fun.get_ids('FL')) + self.assertIn("TC", imp_fun.get_ids()) + self.assertIn("FL", imp_fun.get_ids()) + self.assertEqual(2, len(imp_fun.get_ids("TC"))) + self.assertEqual([1, 3], imp_fun.get_ids("TC")) + self.assertEqual(1, len(imp_fun.get_ids("FL"))) + self.assertEqual([3], imp_fun.get_ids("FL")) def test_get_ids_wrong_zero(self): """Test get_ids method with wrong inputs.""" imp_fun = ImpactFuncSet([ImpactFunc("WS", 56)]) - self.assertEqual([], imp_fun.get_ids('TC')) + self.assertEqual([], imp_fun.get_ids("TC")) def test_get_func_pass(self): """Test normal functionality of get_func method.""" imp_fun = ImpactFuncSet() vulner_1 = ImpactFunc("WS", 56) imp_fun.append(vulner_1) - self.assertEqual(1, len(imp_fun.get_func('WS'))) + self.assertEqual(1, len(imp_fun.get_func("WS"))) self.assertEqual(1, len(imp_fun.get_func(fun_id=56))) - self.assertIs(vulner_1, imp_fun.get_func('WS', 56)) + self.assertIs(vulner_1, imp_fun.get_func("WS", 56)) vulner_2 = ImpactFunc("WS", 6) imp_fun.append(vulner_2) - self.assertEqual(2, len(imp_fun.get_func('WS'))) + self.assertEqual(2, len(imp_fun.get_func("WS"))) self.assertEqual(1, len(imp_fun.get_func(fun_id=6))) - self.assertIs(vulner_2, imp_fun.get_func('WS', 6)) + self.assertIs(vulner_2, imp_fun.get_func("WS", 6)) vulner_3 = ImpactFunc("TC", 6) imp_fun.append(vulner_3) self.assertEqual(2, len(imp_fun.get_func(fun_id=6))) self.assertEqual(1, len(imp_fun.get_func(fun_id=56))) - self.assertEqual(2, len(imp_fun.get_func('WS'))) - self.assertEqual(1, len(imp_fun.get_func('TC'))) - self.assertIs(vulner_3, imp_fun.get_func('TC', 6)) + self.assertEqual(2, len(imp_fun.get_func("WS"))) + self.assertEqual(1, len(imp_fun.get_func("TC"))) + self.assertIs(vulner_3, imp_fun.get_func("TC", 6)) self.assertEqual(2, len(imp_fun.get_func().keys())) - self.assertEqual(1, len(imp_fun.get_func()['TC'].keys())) - self.assertEqual(2, len(imp_fun.get_func()['WS'].keys())) + self.assertEqual(1, len(imp_fun.get_func()["TC"].keys())) + self.assertEqual(2, len(imp_fun.get_func()["WS"].keys())) def test_get_func_wrong_error(self): """Test get_func method with wrong inputs.""" imp_fun = ImpactFuncSet([ImpactFunc("WS", 56)]) - self.assertEqual([], imp_fun.get_func('TC')) + self.assertEqual([], imp_fun.get_func("TC")) def test_size_pass(self): """Test size function.""" @@ -169,37 +181,37 @@ def test_size_pass(self): vulner_1 = ImpactFunc("WS", 56) imp_fun.append(vulner_1) self.assertEqual(1, imp_fun.size()) - self.assertEqual(1, imp_fun.size('WS', 56)) - self.assertEqual(1, imp_fun.size('WS')) + self.assertEqual(1, imp_fun.size("WS", 56)) + self.assertEqual(1, imp_fun.size("WS")) self.assertEqual(1, imp_fun.size(fun_id=56)) imp_fun.append(vulner_1) self.assertEqual(1, imp_fun.size()) - self.assertEqual(1, imp_fun.size('WS', 56)) - self.assertEqual(1, imp_fun.size('WS')) + self.assertEqual(1, imp_fun.size("WS", 56)) + self.assertEqual(1, imp_fun.size("WS")) self.assertEqual(1, imp_fun.size(fun_id=56)) vulner_2 = ImpactFunc("WS", 5) imp_fun.append(vulner_2) self.assertEqual(2, imp_fun.size()) - self.assertEqual(1, imp_fun.size('WS', 56)) - self.assertEqual(2, imp_fun.size('WS')) + self.assertEqual(1, imp_fun.size("WS", 56)) + self.assertEqual(2, imp_fun.size("WS")) self.assertEqual(1, imp_fun.size(fun_id=56)) self.assertEqual(1, imp_fun.size(fun_id=5)) vulner_3 = ImpactFunc("TC", 5) imp_fun.append(vulner_3) self.assertEqual(3, imp_fun.size()) - self.assertEqual(1, imp_fun.size('TC', 5)) - self.assertEqual(2, imp_fun.size('WS')) - self.assertEqual(1, imp_fun.size('TC')) + self.assertEqual(1, imp_fun.size("TC", 5)) + self.assertEqual(2, imp_fun.size("WS")) + self.assertEqual(1, imp_fun.size("TC")) self.assertEqual(1, imp_fun.size(fun_id=56)) self.assertEqual(2, imp_fun.size(fun_id=5)) def test_size_wrong_zero(self): """Test size method with wrong inputs.""" imp_fun = ImpactFuncSet() - self.assertEqual(0, imp_fun.size('TC')) - self.assertEqual(0, imp_fun.size('TC', 3)) + self.assertEqual(0, imp_fun.size("TC")) + self.assertEqual(0, imp_fun.size("TC", 3)) self.assertEqual(0, imp_fun.size(fun_id=3)) def test_append_pass(self): @@ -208,31 +220,32 @@ def test_append_pass(self): vulner_1 = ImpactFunc("TC", 1) imp_fun.append(vulner_1) self.assertEqual(1, len(imp_fun._data)) - self.assertIn('TC', imp_fun._data.keys()) - self.assertEqual(1, len(imp_fun._data['TC'])) - self.assertIn(1, imp_fun._data['TC'].keys()) + self.assertIn("TC", imp_fun._data.keys()) + self.assertEqual(1, len(imp_fun._data["TC"])) + self.assertIn(1, imp_fun._data["TC"].keys()) vulner_2 = ImpactFunc("TC", 3) imp_fun.append(vulner_2) self.assertEqual(1, len(imp_fun._data)) - self.assertIn('TC', imp_fun._data.keys()) - self.assertEqual(2, len(imp_fun._data['TC'])) - self.assertIn(1, imp_fun._data['TC'].keys()) - self.assertIn(3, imp_fun._data['TC'].keys()) + self.assertIn("TC", imp_fun._data.keys()) + self.assertEqual(2, len(imp_fun._data["TC"])) + self.assertIn(1, imp_fun._data["TC"].keys()) + self.assertIn(3, imp_fun._data["TC"].keys()) vulner_3 = ImpactFunc("FL", 3) imp_fun.append(vulner_3) self.assertEqual(2, len(imp_fun._data)) - self.assertIn('TC', imp_fun._data.keys()) - self.assertIn('FL', imp_fun._data.keys()) - self.assertEqual(2, len(imp_fun._data['TC'])) - self.assertEqual(1, len(imp_fun._data['FL'])) - self.assertIn(1, imp_fun._data['TC'].keys()) - self.assertIn(3, imp_fun._data['TC'].keys()) - self.assertIn(3, imp_fun._data['FL'].keys()) + self.assertIn("TC", imp_fun._data.keys()) + self.assertIn("FL", imp_fun._data.keys()) + self.assertEqual(2, len(imp_fun._data["TC"])) + self.assertEqual(1, len(imp_fun._data["FL"])) + self.assertIn(1, imp_fun._data["TC"].keys()) + self.assertIn(3, imp_fun._data["TC"].keys()) + self.assertIn(3, imp_fun._data["FL"].keys()) def test_init_with_iterable(self): """Check that initializing with iterables works""" + def _check_contents(imp_fun): self.assertEqual(imp_fun.size("TC"), 2) self.assertEqual(imp_fun.size("FL"), 1) @@ -247,11 +260,17 @@ def _check_contents(imp_fun): self.assertFalse(impf_set.get_ids("TC")) # Initialize with list - _check_contents(ImpactFuncSet( - [ImpactFunc("TC", 1), ImpactFunc("TC", 3), ImpactFunc("FL", 3)])) + _check_contents( + ImpactFuncSet( + [ImpactFunc("TC", 1), ImpactFunc("TC", 3), ImpactFunc("FL", 3)] + ) + ) # Initialize with tuple - _check_contents(ImpactFuncSet( - (ImpactFunc("TC", 1), ImpactFunc("TC", 3), ImpactFunc("FL", 3)))) + _check_contents( + ImpactFuncSet( + (ImpactFunc("TC", 1), ImpactFunc("TC", 3), ImpactFunc("FL", 3)) + ) + ) def test_remove_add_pass(self): """Test ImpactFunc can be added after removing.""" @@ -264,12 +283,14 @@ def test_remove_add_pass(self): imp_fun.append(vulner_1) self.assertEqual(1, len(imp_fun.get_hazard_types())) - self.assertEqual('TC', imp_fun.get_hazard_types()[0]) + self.assertEqual("TC", imp_fun.get_hazard_types()[0]) self.assertEqual(1, len(imp_fun.get_ids())) - self.assertEqual([1], imp_fun.get_ids('TC')) + self.assertEqual([1], imp_fun.get_ids("TC")) + class TestChecker(unittest.TestCase): """Test loading funcions from the ImpactFuncSet class""" + def test_check_wrongPAA_fail(self): """Wrong PAA definition""" intensity = np.array([1, 2, 3]) @@ -280,7 +301,7 @@ def test_check_wrongPAA_fail(self): with self.assertRaises(ValueError) as cm: imp_fun.check() - self.assertIn('Invalid ImpactFunc.paa size: 3 != 2.', str(cm.exception)) + self.assertIn("Invalid ImpactFunc.paa size: 3 != 2.", str(cm.exception)) def test_check_wrongMDD_fail(self): """Wrong MDD definition""" @@ -292,21 +313,24 @@ def test_check_wrongMDD_fail(self): with self.assertRaises(ValueError) as cm: imp_fun.check() - self.assertIn('Invalid ImpactFunc.mdd size: 3 != 2.', str(cm.exception)) + self.assertIn("Invalid ImpactFunc.mdd size: 3 != 2.", str(cm.exception)) + class TestExtend(unittest.TestCase): """Check extend function""" + def test_extend_to_empty_same(self): """Extend ImpactFuncSet to empty one.""" imp_fun = ImpactFuncSet() imp_fun_add = ImpactFuncSet( - (ImpactFunc("TC", 1), ImpactFunc("TC", 3), ImpactFunc("FL", 3))) + (ImpactFunc("TC", 1), ImpactFunc("TC", 3), ImpactFunc("FL", 3)) + ) imp_fun.extend(imp_fun_add) imp_fun.check() self.assertEqual(imp_fun.size(), 3) - self.assertEqual(imp_fun.size('TC'), 2) - self.assertEqual(imp_fun.size('FL'), 1) + self.assertEqual(imp_fun.size("TC"), 2) + self.assertEqual(imp_fun.size("FL"), 1) def test_extend_equal_same(self): """Extend the same ImpactFuncSet. The inital ImpactFuncSet is obtained.""" @@ -318,7 +342,7 @@ def test_extend_equal_same(self): imp_fun.check() self.assertEqual(imp_fun.size(), 1) - self.assertEqual(imp_fun.size('TC'), 1) + self.assertEqual(imp_fun.size("TC"), 1) def test_extend_different_extend(self): """Extend ImpactFuncSet with same and new values. The vulnerabilities @@ -334,14 +358,16 @@ def test_extend_different_extend(self): imp_fun.append(vulner_3) imp_fun_add = ImpactFuncSet( - (ImpactFunc("TC", 1), ImpactFunc("WS", 1), ImpactFunc("FL", 3))) + (ImpactFunc("TC", 1), ImpactFunc("WS", 1), ImpactFunc("FL", 3)) + ) imp_fun.extend(imp_fun_add) imp_fun.check() self.assertEqual(imp_fun.size(), 4) - self.assertEqual(imp_fun.size('TC'), 2) - self.assertEqual(imp_fun.size('FL'), 1) - self.assertEqual(imp_fun.size('WS'), 1) + self.assertEqual(imp_fun.size("TC"), 2) + self.assertEqual(imp_fun.size("FL"), 1) + self.assertEqual(imp_fun.size("WS"), 1) + class TestReaderMat(unittest.TestCase): """Test reader functionality of the imp_funcsFuncsExcel class""" @@ -353,7 +379,7 @@ def test_demo_file_pass(self): # Check results n_funcs = 2 - hazard = 'TC' + hazard = "TC" first_id = 1 second_id = 3 @@ -362,13 +388,12 @@ def test_demo_file_pass(self): # first function self.assertEqual(imp_funcs._data[hazard][first_id].id, 1) - self.assertEqual(imp_funcs._data[hazard][first_id].name, - 'Tropical cyclone default') - self.assertEqual(imp_funcs._data[hazard][first_id].intensity_unit, - 'm/s') + self.assertEqual( + imp_funcs._data[hazard][first_id].name, "Tropical cyclone default" + ) + self.assertEqual(imp_funcs._data[hazard][first_id].intensity_unit, "m/s") - self.assertEqual(imp_funcs._data[hazard][first_id].intensity.shape, - (9,)) + self.assertEqual(imp_funcs._data[hazard][first_id].intensity.shape, (9,)) self.assertEqual(imp_funcs._data[hazard][first_id].intensity[0], 0) self.assertEqual(imp_funcs._data[hazard][first_id].intensity[1], 20) self.assertEqual(imp_funcs._data[hazard][first_id].intensity[2], 30) @@ -389,13 +414,10 @@ def test_demo_file_pass(self): # second function self.assertEqual(imp_funcs._data[hazard][second_id].id, 3) - self.assertEqual(imp_funcs._data[hazard][second_id].name, - 'TC Building code') - self.assertEqual(imp_funcs._data[hazard][first_id].intensity_unit, - 'm/s') + self.assertEqual(imp_funcs._data[hazard][second_id].name, "TC Building code") + self.assertEqual(imp_funcs._data[hazard][first_id].intensity_unit, "m/s") - self.assertEqual(imp_funcs._data[hazard][second_id].intensity.shape, - (9,)) + self.assertEqual(imp_funcs._data[hazard][second_id].intensity.shape, (9,)) self.assertEqual(imp_funcs._data[hazard][second_id].intensity[0], 0) self.assertEqual(imp_funcs._data[hazard][second_id].intensity[1], 20) self.assertEqual(imp_funcs._data[hazard][second_id].intensity[2], 30) @@ -426,7 +448,7 @@ def test_demo_file_pass(self): # Check results n_funcs = 2 - hazard = 'TC' + hazard = "TC" first_id = 1 second_id = 3 @@ -435,13 +457,12 @@ def test_demo_file_pass(self): # first function self.assertEqual(imp_funcs._data[hazard][first_id].id, 1) - self.assertEqual(imp_funcs._data[hazard][first_id].name, - 'Tropical cyclone default') - self.assertEqual(imp_funcs._data[hazard][first_id].intensity_unit, - 'm/s') + self.assertEqual( + imp_funcs._data[hazard][first_id].name, "Tropical cyclone default" + ) + self.assertEqual(imp_funcs._data[hazard][first_id].intensity_unit, "m/s") - self.assertEqual(imp_funcs._data[hazard][first_id].intensity.shape, - (9,)) + self.assertEqual(imp_funcs._data[hazard][first_id].intensity.shape, (9,)) self.assertEqual(imp_funcs._data[hazard][first_id].intensity[0], 0) self.assertEqual(imp_funcs._data[hazard][first_id].intensity[1], 20) self.assertEqual(imp_funcs._data[hazard][first_id].intensity[2], 30) @@ -462,13 +483,10 @@ def test_demo_file_pass(self): # second function self.assertEqual(imp_funcs._data[hazard][second_id].id, 3) - self.assertEqual(imp_funcs._data[hazard][second_id].name, - 'TC Building code') - self.assertEqual(imp_funcs._data[hazard][first_id].intensity_unit, - 'm/s') + self.assertEqual(imp_funcs._data[hazard][second_id].name, "TC Building code") + self.assertEqual(imp_funcs._data[hazard][first_id].intensity_unit, "m/s") - self.assertEqual(imp_funcs._data[hazard][second_id].intensity.shape, - (9,)) + self.assertEqual(imp_funcs._data[hazard][second_id].intensity.shape, (9,)) self.assertEqual(imp_funcs._data[hazard][second_id].intensity[0], 0) self.assertEqual(imp_funcs._data[hazard][second_id].intensity[1], 20) self.assertEqual(imp_funcs._data[hazard][second_id].intensity[2], 30) @@ -492,9 +510,10 @@ def test_template_file_pass(self): imp_funcs = ImpactFuncSet.from_excel(ENT_TEMPLATE_XLS) # Check some results self.assertEqual(len(imp_funcs._data), 10) - self.assertEqual(len(imp_funcs._data['TC'][3].paa), 9) - self.assertEqual(len(imp_funcs._data['EQ'][1].intensity), 14) - self.assertEqual(len(imp_funcs._data['HS'][1].mdd), 16) + self.assertEqual(len(imp_funcs._data["TC"][3].paa), 9) + self.assertEqual(len(imp_funcs._data["EQ"][1].intensity), 14) + self.assertEqual(len(imp_funcs._data["HS"][1].mdd), 16) + class TestWriter(unittest.TestCase): """Test reader functionality of the imp_funcsFuncsExcel class""" @@ -505,9 +524,9 @@ def test_write_read_pass(self): imp_funcs = ImpactFuncSet() idx = 1 - name = 'code 1' - intensity_unit = 'm/s' - haz_type = 'TC' + name = "code 1" + intensity_unit = "m/s" + haz_type = "TC" intensity = np.arange(100) mdd = np.arange(100) * 0.5 paa = np.ones(100) @@ -515,7 +534,7 @@ def test_write_read_pass(self): imp_funcs.append(imp1) idx = 2 - name = 'code 2' + name = "code 2" intensity = np.arange(102) mdd = np.arange(102) * 0.25 paa = np.ones(102) @@ -523,9 +542,9 @@ def test_write_read_pass(self): imp_funcs.append(imp2) idx = 1 - name = 'code 1' - intensity_unit = 'm' - haz_type = 'FL' + name = "code 1" + intensity_unit = "m" + haz_type = "FL" intensity = np.arange(86) mdd = np.arange(86) * 0.15 paa = np.ones(86) @@ -533,16 +552,16 @@ def test_write_read_pass(self): imp_funcs.append(imp3) idx = 15 - name = 'code 15' - intensity_unit = 'K' - haz_type = 'DR' + name = "code 15" + intensity_unit = "K" + haz_type = "DR" intensity = np.arange(5) mdd = np.arange(5) paa = np.ones(5) imp4 = ImpactFunc(haz_type, idx, intensity, mdd, paa, intensity_unit, name) imp_funcs.append(imp4) - file_name = CONFIG.impact_funcs.test_data.dir().joinpath('test_write.xlsx') + file_name = CONFIG.impact_funcs.test_data.dir().joinpath("test_write.xlsx") imp_funcs.write_excel(file_name) imp_res = ImpactFuncSet.from_excel(file_name) @@ -550,13 +569,13 @@ def test_write_read_pass(self): # first function for fun_haz, fun_dict in imp_res.get_func().items(): for fun_id, fun in fun_dict.items(): - if fun_haz == 'TC' and fun_id == 1: + if fun_haz == "TC" and fun_id == 1: ref_fun = imp1 - elif fun_haz == 'TC' and fun_id == 2: + elif fun_haz == "TC" and fun_id == 2: ref_fun = imp2 - elif fun_haz == 'FL' and fun_id == 1: + elif fun_haz == "FL" and fun_id == 1: ref_fun = imp3 - elif fun_haz == 'DR' and fun_id == 15: + elif fun_haz == "DR" and fun_id == 15: ref_fun = imp4 else: self.assertEqual(1, 0) @@ -569,6 +588,7 @@ def test_write_read_pass(self): self.assertTrue(np.allclose(ref_fun.mdd, fun.mdd)) self.assertTrue(np.allclose(ref_fun.paa, fun.paa)) + # Execute Tests if __name__ == "__main__": TESTS = unittest.TestLoader().loadTestsFromTestCase(TestContainer) diff --git a/climada/entity/impact_funcs/test/test_tc.py b/climada/entity/impact_funcs/test/test_tc.py index e2db9e609..ffa502b51 100644 --- a/climada/entity/impact_funcs/test/test_tc.py +++ b/climada/entity/impact_funcs/test/test_tc.py @@ -20,11 +20,12 @@ """ import unittest + import numpy as np import pandas as pd -from climada.entity.impact_funcs.trop_cyclone import ImpfTropCyclone -from climada.entity.impact_funcs.trop_cyclone import ImpfSetTropCyclone +from climada.entity.impact_funcs.trop_cyclone import ImpfSetTropCyclone, ImpfTropCyclone + class TestEmanuelFormula(unittest.TestCase): """Impact function interpolation test""" @@ -32,10 +33,10 @@ class TestEmanuelFormula(unittest.TestCase): def test_default_values_pass(self): """Compute mdr interpolating values.""" imp_fun = ImpfTropCyclone.from_emanuel_usa() - self.assertEqual(imp_fun.name, 'Emanuel 2011') - self.assertEqual(imp_fun.haz_type, 'TC') + self.assertEqual(imp_fun.name, "Emanuel 2011") + self.assertEqual(imp_fun.haz_type, "TC") self.assertEqual(imp_fun.id, 1) - self.assertEqual(imp_fun.intensity_unit, 'm/s') + self.assertEqual(imp_fun.intensity_unit, "m/s") self.assertTrue(np.array_equal(imp_fun.intensity, np.arange(0, 121, 5))) self.assertTrue(np.array_equal(imp_fun.paa, np.ones((25,)))) self.assertTrue(np.array_equal(imp_fun.mdd[0:6], np.zeros((6,)))) @@ -66,38 +67,43 @@ def test_default_values_pass(self): def test_values_pass(self): """Compute mdr interpolating values.""" - imp_fun = ImpfTropCyclone.from_emanuel_usa(impf_id=5, - intensity=np.arange(0, 6, 1), - v_thresh=2, - v_half=5, - scale=0.5) - self.assertEqual(imp_fun.name, 'Emanuel 2011') - self.assertEqual(imp_fun.haz_type, 'TC') + imp_fun = ImpfTropCyclone.from_emanuel_usa( + impf_id=5, intensity=np.arange(0, 6, 1), v_thresh=2, v_half=5, scale=0.5 + ) + self.assertEqual(imp_fun.name, "Emanuel 2011") + self.assertEqual(imp_fun.haz_type, "TC") self.assertEqual(imp_fun.id, 5) - self.assertEqual(imp_fun.intensity_unit, 'm/s') + self.assertEqual(imp_fun.intensity_unit, "m/s") self.assertTrue(np.array_equal(imp_fun.intensity, np.arange(0, 6, 1))) self.assertTrue(np.array_equal(imp_fun.paa, np.ones((6,)))) self.assertTrue(np.array_equal(imp_fun.mdd[0:3], np.zeros((3,)))) - self.assertTrue(np.array_equal(imp_fun.mdd[3:], - np.array([0.017857142857142853, 0.11428571428571425, - 0.250000000000000]))) + self.assertTrue( + np.array_equal( + imp_fun.mdd[3:], + np.array( + [0.017857142857142853, 0.11428571428571425, 0.250000000000000] + ), + ) + ) def test_wrong_shape(self): """Set shape parameters.""" with self.assertRaises(ValueError): - imp_fun = ImpfTropCyclone.from_emanuel_usa(impf_id=5, v_thresh=2, - v_half=1, - intensity=np.arange(0, 6, 1)) + imp_fun = ImpfTropCyclone.from_emanuel_usa( + impf_id=5, v_thresh=2, v_half=1, intensity=np.arange(0, 6, 1) + ) def test_wrong_scale(self): """Set shape parameters.""" with self.assertRaises(ValueError): - imp_fun = ImpfTropCyclone.from_emanuel_usa(impf_id=5, scale=2, - intensity=np.arange(0, 6, 1)) + imp_fun = ImpfTropCyclone.from_emanuel_usa( + impf_id=5, scale=2, intensity=np.arange(0, 6, 1) + ) + class TestCalibratedImpfSet(unittest.TestCase): """Test inititation of IFS with regional calibrated TC IFs - based on Eberenz et al. (2020)""" + based on Eberenz et al. (2020)""" def test_default_values_pass(self): """Test return TDR optimized IFs (TDR=1)""" @@ -105,31 +111,33 @@ def test_default_values_pass(self): v_halfs = ImpfSetTropCyclone.calibrated_regional_vhalf() # extract IF for region WP4 impf_wp4 = impfs.get_func(fun_id=9)[0] - self.assertIn('TC', impfs.get_ids().keys()) + self.assertIn("TC", impfs.get_ids().keys()) self.assertEqual(impfs.size(), 10) - self.assertEqual(impfs.get_ids()['TC'], [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) - self.assertEqual(impf_wp4.intensity_unit, 'm/s') - self.assertEqual(impf_wp4.name, 'North West Pacific (WP4)') - self.assertAlmostEqual(v_halfs['WP2'], 188.4, places=7) - self.assertAlmostEqual(v_halfs['ROW'], 110.1, places=7) + self.assertEqual(impfs.get_ids()["TC"], [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) + self.assertEqual(impf_wp4.intensity_unit, "m/s") + self.assertEqual(impf_wp4.name, "North West Pacific (WP4)") + self.assertAlmostEqual(v_halfs["WP2"], 188.4, places=7) + self.assertAlmostEqual(v_halfs["ROW"], 110.1, places=7) self.assertListEqual(list(impf_wp4.intensity), list(np.arange(0, 121, 5))) - self.assertEqual(impf_wp4.paa.min(), 1.) + self.assertEqual(impf_wp4.paa.min(), 1.0) self.assertEqual(impf_wp4.mdd.min(), 0.0) self.assertAlmostEqual(impf_wp4.mdd.max(), 0.15779133833203, places=5) self.assertAlmostEqual(impf_wp4.calc_mdr(75), 0.02607326527808, places=5) def test_RMSF_pass(self): """Test return RMSF optimized impact function set (RMSF=minimum)""" - impfs = ImpfSetTropCyclone.from_calibrated_regional_ImpfSet('RMSF') - v_halfs = ImpfSetTropCyclone.calibrated_regional_vhalf(calibration_approach='RMSF') + impfs = ImpfSetTropCyclone.from_calibrated_regional_ImpfSet("RMSF") + v_halfs = ImpfSetTropCyclone.calibrated_regional_vhalf( + calibration_approach="RMSF" + ) # extract IF for region NA1 impf_na1 = impfs.get_func(fun_id=1)[0] self.assertEqual(impfs.size(), 10) - self.assertEqual(impfs.get_ids()['TC'], [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) - self.assertEqual(impf_na1.intensity_unit, 'm/s') - self.assertEqual(impf_na1.name, 'Caribbean and Mexico (NA1)') - self.assertAlmostEqual(v_halfs['NA1'], 59.6, places=7) - self.assertAlmostEqual(v_halfs['ROW'], 73.4, places=7) + self.assertEqual(impfs.get_ids()["TC"], [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) + self.assertEqual(impf_na1.intensity_unit, "m/s") + self.assertEqual(impf_na1.name, "Caribbean and Mexico (NA1)") + self.assertAlmostEqual(v_halfs["NA1"], 59.6, places=7) + self.assertAlmostEqual(v_halfs["ROW"], 73.4, places=7) self.assertListEqual(list(impf_na1.intensity), list(np.arange(0, 121, 5))) self.assertEqual(impf_na1.mdd.min(), 0.0) self.assertAlmostEqual(impf_na1.mdd.max(), 0.95560418241669, places=5) @@ -137,15 +145,15 @@ def test_RMSF_pass(self): def test_quantile_pass(self): """Test return impact function set from quantile of inidividual event fitting (EDR=1)""" - impfs = ImpfSetTropCyclone.from_calibrated_regional_ImpfSet('EDR') - impfs_p10 = ImpfSetTropCyclone.from_calibrated_regional_ImpfSet('EDR', q=.1) + impfs = ImpfSetTropCyclone.from_calibrated_regional_ImpfSet("EDR") + impfs_p10 = ImpfSetTropCyclone.from_calibrated_regional_ImpfSet("EDR", q=0.1) # extract IF for region SI impf_si = impfs.get_func(fun_id=5)[0] impf_si_p10 = impfs_p10.get_func(fun_id=5)[0] self.assertEqual(impfs.size(), 10) self.assertEqual(impfs_p10.size(), 10) - self.assertEqual(impf_si.intensity_unit, 'm/s') - self.assertEqual(impf_si_p10.name, 'South Indian (SI)') + self.assertEqual(impf_si.intensity_unit, "m/s") + self.assertEqual(impf_si_p10.name, "South Indian (SI)") self.assertAlmostEqual(impf_si_p10.mdd.max(), 0.99999999880, places=5) self.assertAlmostEqual(impf_si.calc_mdr(30), 0.01620503041, places=5) intensity = np.random.randint(26, impf_si.intensity.max()) @@ -154,11 +162,12 @@ def test_quantile_pass(self): def test_get_countries_per_region(self): """Test static get_countries_per_region()""" ifs = ImpfSetTropCyclone() - out = ifs.get_countries_per_region('NA2') - self.assertEqual(out[0], 'USA and Canada') + out = ifs.get_countries_per_region("NA2") + self.assertEqual(out[0], "USA and Canada") self.assertEqual(out[1], 2) self.assertListEqual(out[2], [124, 840]) - self.assertListEqual(out[3], ['CAN', 'USA']) + self.assertListEqual(out[3], ["CAN", "USA"]) + # Execute Tests if __name__ == "__main__": diff --git a/climada/entity/impact_funcs/test/test_ws.py b/climada/entity/impact_funcs/test/test_ws.py index 4b2f79c39..61258a537 100644 --- a/climada/entity/impact_funcs/test/test_ws.py +++ b/climada/entity/impact_funcs/test/test_ws.py @@ -20,39 +20,89 @@ """ import unittest + import numpy as np from climada.entity.impact_funcs.storm_europe import ImpfStormEurope + class TestStormEuropeDefault(unittest.TestCase): """Impact function interpolation test""" def test_default_values_pass(self): """Compute mdr interpolating values.""" imp_fun = ImpfStormEurope.from_schwierz() - self.assertEqual(imp_fun.name, 'Schwierz 2010') - self.assertEqual(imp_fun.haz_type, 'WS') + self.assertEqual(imp_fun.name, "Schwierz 2010") + self.assertEqual(imp_fun.haz_type, "WS") self.assertEqual(imp_fun.id, 1) - self.assertEqual(imp_fun.intensity_unit, 'm/s') - self.assertTrue(np.array_equal(imp_fun.intensity, np.array([0, 20, 25, 30, 35, 40, 45, 50, 55, 60, 80, 100]))) - self.assertTrue(np.array_equal(imp_fun.paa[4:8], np.array([0.03921, 0.10707, 0.25357, 0.48869]))) - self.assertTrue(np.array_equal(imp_fun.mdd[4:8], np.array([0.00367253, 0.00749977, 0.01263556, 0.01849639]))) + self.assertEqual(imp_fun.intensity_unit, "m/s") + self.assertTrue( + np.array_equal( + imp_fun.intensity, + np.array([0, 20, 25, 30, 35, 40, 45, 50, 55, 60, 80, 100]), + ) + ) + self.assertTrue( + np.array_equal( + imp_fun.paa[4:8], np.array([0.03921, 0.10707, 0.25357, 0.48869]) + ) + ) + self.assertTrue( + np.array_equal( + imp_fun.mdd[4:8], + np.array([0.00367253, 0.00749977, 0.01263556, 0.01849639]), + ) + ) imp_fun2 = ImpfStormEurope.from_welker() - self.assertEqual(imp_fun2.name, 'Welker 2021') - self.assertEqual(imp_fun2.haz_type, 'WS') + self.assertEqual(imp_fun2.name, "Welker 2021") + self.assertEqual(imp_fun2.haz_type, "WS") self.assertEqual(imp_fun2.id, 1) - self.assertEqual(imp_fun2.intensity_unit, 'm/s') - self.assertTrue(np.array_equal(imp_fun2.intensity[np.arange(0, 120, 13)], - np.array([0., 10., 20., 30., 40., 50., 60., 70., 80., 90.]))) - self.assertTrue(np.allclose(imp_fun2.paa[np.arange(0, 120, 13)], - np.array([0., 0., 0., 0.00900782, 0.1426727, - 0.65118822, 1., 1., 1., 1.]))) - self.assertTrue(np.allclose(imp_fun2.mdd[np.arange(0, 120, 13)], - np.array([0., 0., 0., 0.00236542, 0.00999358, - 0.02464677, 0.04964029, 0.04964029, 0.04964029, 0.04964029]))) - - + self.assertEqual(imp_fun2.intensity_unit, "m/s") + self.assertTrue( + np.array_equal( + imp_fun2.intensity[np.arange(0, 120, 13)], + np.array([0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0]), + ) + ) + self.assertTrue( + np.allclose( + imp_fun2.paa[np.arange(0, 120, 13)], + np.array( + [ + 0.0, + 0.0, + 0.0, + 0.00900782, + 0.1426727, + 0.65118822, + 1.0, + 1.0, + 1.0, + 1.0, + ] + ), + ) + ) + self.assertTrue( + np.allclose( + imp_fun2.mdd[np.arange(0, 120, 13)], + np.array( + [ + 0.0, + 0.0, + 0.0, + 0.00236542, + 0.00999358, + 0.02464677, + 0.04964029, + 0.04964029, + 0.04964029, + 0.04964029, + ] + ), + ) + ) # Execute Tests diff --git a/climada/entity/impact_funcs/trop_cyclone.py b/climada/entity/impact_funcs/trop_cyclone.py index ab432f625..18492bbb1 100644 --- a/climada/entity/impact_funcs/trop_cyclone.py +++ b/climada/entity/impact_funcs/trop_cyclone.py @@ -19,12 +19,13 @@ Define impact functions for tropical cyclnes . """ -__all__ = ['ImpfTropCyclone', 'ImpfSetTropCyclone', 'IFTropCyclone'] +__all__ = ["ImpfTropCyclone", "ImpfSetTropCyclone", "IFTropCyclone"] import logging -from deprecation import deprecated + import numpy as np import pandas as pd +from deprecation import deprecated from climada.entity.impact_funcs.base import ImpactFunc from climada.entity.impact_funcs.impact_func_set import ImpactFuncSet @@ -32,22 +33,31 @@ LOGGER = logging.getLogger(__name__) + class ImpfTropCyclone(ImpactFunc): """Impact functions for tropical cyclones.""" def __init__(self): ImpactFunc.__init__(self) - self.haz_type = 'TC' + self.haz_type = "TC" def set_emanuel_usa(self, *args, **kwargs): """This function is deprecated, use from_emanuel_usa() instead.""" - LOGGER.warning("The use of ImpfTropCyclone.set_emanuel_usa is deprecated." - "Use ImpfTropCyclone.from_emanuel_usa instead.") + LOGGER.warning( + "The use of ImpfTropCyclone.set_emanuel_usa is deprecated." + "Use ImpfTropCyclone.from_emanuel_usa instead." + ) self.__dict__ = ImpfTropCyclone.from_emanuel_usa(*args, **kwargs).__dict__ @classmethod - def from_emanuel_usa(cls, impf_id=1, intensity=np.arange(0, 121, 5), - v_thresh=25.7, v_half=74.7, scale=1.0): + def from_emanuel_usa( + cls, + impf_id=1, + intensity=np.arange(0, 121, 5), + v_thresh=25.7, + v_half=74.7, + scale=1.0, + ): """ Init TC impact function using the formula of Kerry Emanuel, 2011: 'Global Warming Effects on U.S. Hurricane Damage', @@ -81,16 +91,16 @@ def from_emanuel_usa(cls, impf_id=1, intensity=np.arange(0, 121, 5), TC impact function instance based on formula by Emanuel (2011) """ if v_half <= v_thresh: - raise ValueError('Shape parameters out of range: v_half <= v_thresh.') + raise ValueError("Shape parameters out of range: v_half <= v_thresh.") if v_thresh < 0 or v_half < 0: - raise ValueError('Negative shape parameter.') + raise ValueError("Negative shape parameter.") if scale > 1 or scale <= 0: - raise ValueError('Scale parameter out of range.') + raise ValueError("Scale parameter out of range.") impf = cls() - impf.name = 'Emanuel 2011' + impf.name = "Emanuel 2011" impf.id = impf_id - impf.intensity_unit = 'm/s' + impf.intensity_unit = "m/s" impf.intensity = intensity impf.paa = np.ones(intensity.shape) v_temp = (impf.intensity - v_thresh) / (v_half - v_thresh) @@ -99,6 +109,7 @@ def from_emanuel_usa(cls, impf_id=1, intensity=np.arange(0, 121, 5), impf.mdd *= scale return impf + class ImpfSetTropCyclone(ImpactFuncSet): """Impact function set (ImpfS) for tropical cyclones.""" @@ -107,15 +118,19 @@ def __init__(self): def set_calibrated_regional_ImpfSet(self, *args, **kwargs): """This function is deprecated, use from_calibrated_regional_ImpfSet() instead.""" - LOGGER.warning("ImpfSetTropCyclone.set_calibrated_regional_ImpfSet is deprecated." - "Use ImpfSetTropCyclone.from_calibrated_regional_ImpfSet instead.") - self.__dict__ = \ - ImpfSetTropCyclone.from_calibrated_regional_ImpfSet(*args, **kwargs).__dict__ + LOGGER.warning( + "ImpfSetTropCyclone.set_calibrated_regional_ImpfSet is deprecated." + "Use ImpfSetTropCyclone.from_calibrated_regional_ImpfSet instead." + ) + self.__dict__ = ImpfSetTropCyclone.from_calibrated_regional_ImpfSet( + *args, **kwargs + ).__dict__ return ImpfSetTropCyclone.calibrated_regional_vhalf(*args, **kwargs) @classmethod - def from_calibrated_regional_ImpfSet(cls, calibration_approach='TDR', q=.5, - input_file_path=None, version=1): + def from_calibrated_regional_ImpfSet( + cls, calibration_approach="TDR", q=0.5, input_file_path=None, version=1 + ): """Calibrated regional TC wind impact functions Based on Eberenz et al. 2021: https://doi.org/10.5194/nhess-21-393-2021 @@ -154,38 +169,41 @@ def from_calibrated_regional_ImpfSet(cls, calibration_approach='TDR', q=.5, q=q, input_file_path=input_file_path, version=version, - ) + ) # define regions and parameters: v_0 = 25.7 # v_threshold based on Emanuel (2011) scale = 1.0 regions_long = dict() - regions_long['NA1'] = 'Caribbean and Mexico (NA1)' - regions_long['NA2'] = 'USA and Canada (NA2)' - regions_long['NI'] = 'North Indian (NI)' - regions_long['OC'] = 'Oceania (OC)' - regions_long['SI'] = 'South Indian (SI)' - regions_long['WP1'] = 'South East Asia (WP1)' - regions_long['WP2'] = 'Philippines (WP2)' - regions_long['WP3'] = 'China Mainland (WP3)' - regions_long['WP4'] = 'North West Pacific (WP4)' - regions_long['ROW'] = 'Global' + regions_long["NA1"] = "Caribbean and Mexico (NA1)" + regions_long["NA2"] = "USA and Canada (NA2)" + regions_long["NI"] = "North Indian (NI)" + regions_long["OC"] = "Oceania (OC)" + regions_long["SI"] = "South Indian (SI)" + regions_long["WP1"] = "South East Asia (WP1)" + regions_long["WP2"] = "Philippines (WP2)" + regions_long["WP3"] = "China Mainland (WP3)" + regions_long["WP4"] = "North West Pacific (WP4)" + regions_long["ROW"] = "Global" # init impact function set impf_set = cls() for idx, region in enumerate(reg_v_half.keys()): - impf_tc = ImpfTropCyclone.from_emanuel_usa(impf_id=int(idx + 1), - v_thresh=v_0, - v_half=reg_v_half[region], - scale=scale) + impf_tc = ImpfTropCyclone.from_emanuel_usa( + impf_id=int(idx + 1), + v_thresh=v_0, + v_half=reg_v_half[region], + scale=scale, + ) impf_tc.name = regions_long[region] impf_set.append(impf_tc) return impf_set @staticmethod - def calibrated_regional_vhalf(calibration_approach='TDR', q=.5, - input_file_path=None, version=1): + def calibrated_regional_vhalf( + calibration_approach="TDR", q=0.5, input_file_path=None, version=1 + ): """Calibrated TC wind impact function slope parameter v_half per region Based on Eberenz et al., 2021: https://doi.org/10.5194/nhess-21-393-2021 @@ -224,40 +242,46 @@ def calibrated_regional_vhalf(calibration_approach='TDR', q=.5, TC impact function slope parameter v_half per region """ calibration_approach = calibration_approach.upper() - if calibration_approach not in ['TDR', 'TDR1.0', 'TDR1.5', 'RMSF', 'EDR']: - raise ValueError('calibration_approach is invalid') - if 'EDR' in calibration_approach and (q < 0. or q > 1.): - raise ValueError('Quantile q out of range [0, 1]') - if calibration_approach == 'TDR': - calibration_approach = 'TDR1.0' + if calibration_approach not in ["TDR", "TDR1.0", "TDR1.5", "RMSF", "EDR"]: + raise ValueError("calibration_approach is invalid") + if "EDR" in calibration_approach and (q < 0.0 or q > 1.0): + raise ValueError("Quantile q out of range [0, 1]") + if calibration_approach == "TDR": + calibration_approach = "TDR1.0" # load calibration results depending on approach: if isinstance(input_file_path, str): - df_calib_results = pd.read_csv(input_file_path, - encoding="ISO-8859-1", header=0) + df_calib_results = pd.read_csv( + input_file_path, encoding="ISO-8859-1", header=0 + ) elif isinstance(input_file_path, pd.DataFrame): df_calib_results = input_file_path else: df_calib_results = pd.read_csv( SYSTEM_DIR.joinpath( - 'tc_impf_cal_v%02.0f_%s.csv' % (version, calibration_approach)), - encoding="ISO-8859-1", header=0) + "tc_impf_cal_v%02.0f_%s.csv" % (version, calibration_approach) + ), + encoding="ISO-8859-1", + header=0, + ) - regions_short = ['NA1', 'NA2', 'NI', 'OC', 'SI', 'WP1', 'WP2', 'WP3', 'WP4'] + regions_short = ["NA1", "NA2", "NI", "OC", "SI", "WP1", "WP2", "WP3", "WP4"] # loop over calibration regions (column cal_region2 in df): reg_v_half = dict() for region in regions_short: df_reg = df_calib_results.loc[df_calib_results.cal_region2 == region] df_reg = df_reg.reset_index(drop=True) - reg_v_half[region] = np.round(df_reg['v_half'].quantile(q=q), 5) + reg_v_half[region] = np.round(df_reg["v_half"].quantile(q=q), 5) # rest of the world (ROW), calibrated by all data: - regions_short = regions_short + ['ROW'] - if calibration_approach == 'EDR': - reg_v_half[regions_short[-1]] = np.round(df_calib_results['v_half'].quantile(q=q), 5) + regions_short = regions_short + ["ROW"] + if calibration_approach == "EDR": + reg_v_half[regions_short[-1]] = np.round( + df_calib_results["v_half"].quantile(q=q), 5 + ) else: - df_reg = df_calib_results.loc[df_calib_results.cal_region2 == 'GLB'] + df_reg = df_calib_results.loc[df_calib_results.cal_region2 == "GLB"] df_reg = df_reg.reset_index(drop=True) - reg_v_half[regions_short[-1]] = np.round(df_reg['v_half'].values[0], 5) + reg_v_half[regions_short[-1]] = np.round(df_reg["v_half"].values[0], 5) return reg_v_half @staticmethod @@ -286,95 +310,531 @@ def get_countries_per_region(region=None): numerical ISO3codes (=region_id) per region """ if not region: - region = 'all' - iso3n = {'NA1': [660, 28, 32, 533, 44, 52, 84, 60, 68, 132, 136, - 152, 170, 188, 192, 212, 214, 218, 222, 238, 254, - 308, 312, 320, 328, 332, 340, 388, 474, 484, 500, - 558, 591, 600, 604, 630, 654, 659, 662, 670, 534, - 740, 780, 796, 858, 862, 92, 850], - 'NA2': [124, 840], - 'NI': [4, 51, 31, 48, 50, 64, 262, 232, - 231, 268, 356, 364, 368, 376, 400, 398, 414, 417, - 422, 462, 496, 104, 524, 512, 586, 634, 682, 706, - 144, 760, 762, 795, 800, 784, 860, 887], - 'OC': [16, 36, 184, 242, 258, 316, 296, 584, 583, 520, - 540, 554, 570, 574, 580, 585, 598, 612, 882, 90, - 626, 772, 776, 798, 548, 876], - 'SI': [174, 180, 748, 450, 454, 466, 480, 508, 710, 834, - 716], - 'WP1': [116, 360, 418, 458, 764, 704], - 'WP2': [608], - 'WP3': [156], - 'WP4': [344, 392, 410, 446, 158], - 'ROW': [8, 12, 20, 24, 10, 40, 112, 56, 204, 535, 70, 72, - 74, 76, 86, 96, 100, 854, 108, 120, 140, 148, 162, - 166, 178, 191, 531, 196, 203, 384, 208, 818, 226, - 233, 234, 246, 250, 260, 266, 270, 276, 288, 292, - 300, 304, 831, 324, 624, 334, 336, 348, 352, 372, - 833, 380, 832, 404, 408, 983, 428, 426, 430, 434, - 438, 440, 442, 470, 478, 175, 498, 492, 499, 504, - 516, 528, 562, 566, 807, 578, 275, 616, 620, 642, - 643, 646, 638, 652, 663, 666, 674, 678, 686, 688, - 690, 694, 702, 703, 705, 239, 728, 724, 729, 744, - 752, 756, 768, 788, 792, 804, 826, 581, 732, 894, - 248]} - iso3a = {'NA1': ['AIA', 'ATG', 'ARG', 'ABW', 'BHS', 'BRB', 'BLZ', 'BMU', - 'BOL', 'CPV', 'CYM', 'CHL', 'COL', 'CRI', 'CUB', 'DMA', - 'DOM', 'ECU', 'SLV', 'FLK', 'GUF', 'GRD', 'GLP', 'GTM', - 'GUY', 'HTI', 'HND', 'JAM', 'MTQ', 'MEX', 'MSR', 'NIC', - 'PAN', 'PRY', 'PER', 'PRI', 'SHN', 'KNA', 'LCA', 'VCT', - 'SXM', 'SUR', 'TTO', 'TCA', 'URY', 'VEN', 'VGB', 'VIR'], - 'NA2': ['CAN', 'USA'], - 'NI': ['AFG', 'ARM', 'AZE', 'BHR', 'BGD', 'BTN', 'DJI', 'ERI', - 'ETH', 'GEO', 'IND', 'IRN', 'IRQ', 'ISR', 'JOR', 'KAZ', - 'KWT', 'KGZ', 'LBN', 'MDV', 'MNG', 'MMR', 'NPL', 'OMN', - 'PAK', 'QAT', 'SAU', 'SOM', 'LKA', 'SYR', 'TJK', 'TKM', - 'UGA', 'ARE', 'UZB', 'YEM'], - 'OC': ['ASM', 'AUS', 'COK', 'FJI', 'PYF', 'GUM', 'KIR', 'MHL', - 'FSM', 'NRU', 'NCL', 'NZL', 'NIU', 'NFK', 'MNP', 'PLW', - 'PNG', 'PCN', 'WSM', 'SLB', 'TLS', 'TKL', 'TON', 'TUV', - 'VUT', 'WLF'], - 'SI': ['COM', 'COD', 'SWZ', 'MDG', 'MWI', 'MLI', 'MUS', 'MOZ', - 'ZAF', 'TZA', 'ZWE'], - 'WP1': ['KHM', 'IDN', 'LAO', 'MYS', 'THA', 'VNM'], - 'WP2': ['PHL'], - 'WP3': ['CHN'], - 'WP4': ['HKG', 'JPN', 'KOR', 'MAC', 'TWN'], - 'ROW': ['ALB', 'DZA', 'AND', 'AGO', 'ATA', 'AUT', 'BLR', 'BEL', - 'BEN', 'BES', 'BIH', 'BWA', 'BVT', 'BRA', 'IOT', 'BRN', - 'BGR', 'BFA', 'BDI', 'CMR', 'CAF', 'TCD', 'CXR', 'CCK', - 'COG', 'HRV', 'CUW', 'CYP', 'CZE', 'CIV', 'DNK', 'EGY', - 'GNQ', 'EST', 'FRO', 'FIN', 'FRA', 'ATF', 'GAB', 'GMB', - 'DEU', 'GHA', 'GIB', 'GRC', 'GRL', 'GGY', 'GIN', 'GNB', - 'HMD', 'VAT', 'HUN', 'ISL', 'IRL', 'IMN', 'ITA', 'JEY', - 'KEN', 'PRK', 'XKX', 'LVA', 'LSO', 'LBR', 'LBY', 'LIE', - 'LTU', 'LUX', 'MLT', 'MRT', 'MYT', 'MDA', 'MCO', 'MNE', - 'MAR', 'NAM', 'NLD', 'NER', 'NGA', 'MKD', 'NOR', 'PSE', - 'POL', 'PRT', 'ROU', 'RUS', 'RWA', 'REU', 'BLM', 'MAF', - 'SPM', 'SMR', 'STP', 'SEN', 'SRB', 'SYC', 'SLE', 'SGP', - 'SVK', 'SVN', 'SGS', 'SSD', 'ESP', 'SDN', 'SJM', 'SWE', - 'CHE', 'TGO', 'TUN', 'TUR', 'UKR', 'GBR', 'UMI', 'ESH', - 'ZMB', 'ALA']} - impf_id = {'NA1': 1, 'NA2': 2, 'NI': 3, 'OC': 4, 'SI': 5, - 'WP1': 6, 'WP2': 7, 'WP3': 8, 'WP4': 9, 'ROW': 10} + region = "all" + iso3n = { + "NA1": [ + 660, + 28, + 32, + 533, + 44, + 52, + 84, + 60, + 68, + 132, + 136, + 152, + 170, + 188, + 192, + 212, + 214, + 218, + 222, + 238, + 254, + 308, + 312, + 320, + 328, + 332, + 340, + 388, + 474, + 484, + 500, + 558, + 591, + 600, + 604, + 630, + 654, + 659, + 662, + 670, + 534, + 740, + 780, + 796, + 858, + 862, + 92, + 850, + ], + "NA2": [124, 840], + "NI": [ + 4, + 51, + 31, + 48, + 50, + 64, + 262, + 232, + 231, + 268, + 356, + 364, + 368, + 376, + 400, + 398, + 414, + 417, + 422, + 462, + 496, + 104, + 524, + 512, + 586, + 634, + 682, + 706, + 144, + 760, + 762, + 795, + 800, + 784, + 860, + 887, + ], + "OC": [ + 16, + 36, + 184, + 242, + 258, + 316, + 296, + 584, + 583, + 520, + 540, + 554, + 570, + 574, + 580, + 585, + 598, + 612, + 882, + 90, + 626, + 772, + 776, + 798, + 548, + 876, + ], + "SI": [174, 180, 748, 450, 454, 466, 480, 508, 710, 834, 716], + "WP1": [116, 360, 418, 458, 764, 704], + "WP2": [608], + "WP3": [156], + "WP4": [344, 392, 410, 446, 158], + "ROW": [ + 8, + 12, + 20, + 24, + 10, + 40, + 112, + 56, + 204, + 535, + 70, + 72, + 74, + 76, + 86, + 96, + 100, + 854, + 108, + 120, + 140, + 148, + 162, + 166, + 178, + 191, + 531, + 196, + 203, + 384, + 208, + 818, + 226, + 233, + 234, + 246, + 250, + 260, + 266, + 270, + 276, + 288, + 292, + 300, + 304, + 831, + 324, + 624, + 334, + 336, + 348, + 352, + 372, + 833, + 380, + 832, + 404, + 408, + 983, + 428, + 426, + 430, + 434, + 438, + 440, + 442, + 470, + 478, + 175, + 498, + 492, + 499, + 504, + 516, + 528, + 562, + 566, + 807, + 578, + 275, + 616, + 620, + 642, + 643, + 646, + 638, + 652, + 663, + 666, + 674, + 678, + 686, + 688, + 690, + 694, + 702, + 703, + 705, + 239, + 728, + 724, + 729, + 744, + 752, + 756, + 768, + 788, + 792, + 804, + 826, + 581, + 732, + 894, + 248, + ], + } + iso3a = { + "NA1": [ + "AIA", + "ATG", + "ARG", + "ABW", + "BHS", + "BRB", + "BLZ", + "BMU", + "BOL", + "CPV", + "CYM", + "CHL", + "COL", + "CRI", + "CUB", + "DMA", + "DOM", + "ECU", + "SLV", + "FLK", + "GUF", + "GRD", + "GLP", + "GTM", + "GUY", + "HTI", + "HND", + "JAM", + "MTQ", + "MEX", + "MSR", + "NIC", + "PAN", + "PRY", + "PER", + "PRI", + "SHN", + "KNA", + "LCA", + "VCT", + "SXM", + "SUR", + "TTO", + "TCA", + "URY", + "VEN", + "VGB", + "VIR", + ], + "NA2": ["CAN", "USA"], + "NI": [ + "AFG", + "ARM", + "AZE", + "BHR", + "BGD", + "BTN", + "DJI", + "ERI", + "ETH", + "GEO", + "IND", + "IRN", + "IRQ", + "ISR", + "JOR", + "KAZ", + "KWT", + "KGZ", + "LBN", + "MDV", + "MNG", + "MMR", + "NPL", + "OMN", + "PAK", + "QAT", + "SAU", + "SOM", + "LKA", + "SYR", + "TJK", + "TKM", + "UGA", + "ARE", + "UZB", + "YEM", + ], + "OC": [ + "ASM", + "AUS", + "COK", + "FJI", + "PYF", + "GUM", + "KIR", + "MHL", + "FSM", + "NRU", + "NCL", + "NZL", + "NIU", + "NFK", + "MNP", + "PLW", + "PNG", + "PCN", + "WSM", + "SLB", + "TLS", + "TKL", + "TON", + "TUV", + "VUT", + "WLF", + ], + "SI": [ + "COM", + "COD", + "SWZ", + "MDG", + "MWI", + "MLI", + "MUS", + "MOZ", + "ZAF", + "TZA", + "ZWE", + ], + "WP1": ["KHM", "IDN", "LAO", "MYS", "THA", "VNM"], + "WP2": ["PHL"], + "WP3": ["CHN"], + "WP4": ["HKG", "JPN", "KOR", "MAC", "TWN"], + "ROW": [ + "ALB", + "DZA", + "AND", + "AGO", + "ATA", + "AUT", + "BLR", + "BEL", + "BEN", + "BES", + "BIH", + "BWA", + "BVT", + "BRA", + "IOT", + "BRN", + "BGR", + "BFA", + "BDI", + "CMR", + "CAF", + "TCD", + "CXR", + "CCK", + "COG", + "HRV", + "CUW", + "CYP", + "CZE", + "CIV", + "DNK", + "EGY", + "GNQ", + "EST", + "FRO", + "FIN", + "FRA", + "ATF", + "GAB", + "GMB", + "DEU", + "GHA", + "GIB", + "GRC", + "GRL", + "GGY", + "GIN", + "GNB", + "HMD", + "VAT", + "HUN", + "ISL", + "IRL", + "IMN", + "ITA", + "JEY", + "KEN", + "PRK", + "XKX", + "LVA", + "LSO", + "LBR", + "LBY", + "LIE", + "LTU", + "LUX", + "MLT", + "MRT", + "MYT", + "MDA", + "MCO", + "MNE", + "MAR", + "NAM", + "NLD", + "NER", + "NGA", + "MKD", + "NOR", + "PSE", + "POL", + "PRT", + "ROU", + "RUS", + "RWA", + "REU", + "BLM", + "MAF", + "SPM", + "SMR", + "STP", + "SEN", + "SRB", + "SYC", + "SLE", + "SGP", + "SVK", + "SVN", + "SGS", + "SSD", + "ESP", + "SDN", + "SJM", + "SWE", + "CHE", + "TGO", + "TUN", + "TUR", + "UKR", + "GBR", + "UMI", + "ESH", + "ZMB", + "ALA", + ], + } + impf_id = { + "NA1": 1, + "NA2": 2, + "NI": 3, + "OC": 4, + "SI": 5, + "WP1": 6, + "WP2": 7, + "WP3": 8, + "WP4": 9, + "ROW": 10, + } region_name = dict() - region_name['NA1'] = 'Caribbean and Mexico' - region_name['NA2'] = 'USA and Canada' - region_name['NI'] = 'North Indian' - region_name['OC'] = 'Oceania' - region_name['SI'] = 'South Indian' - region_name['WP1'] = 'South East Asia' - region_name['WP2'] = 'Philippines' - region_name['WP3'] = 'China Mainland' - region_name['WP4'] = 'North West Pacific' - - if region == 'all': + region_name["NA1"] = "Caribbean and Mexico" + region_name["NA2"] = "USA and Canada" + region_name["NI"] = "North Indian" + region_name["OC"] = "Oceania" + region_name["SI"] = "South Indian" + region_name["WP1"] = "South East Asia" + region_name["WP2"] = "Philippines" + region_name["WP3"] = "China Mainland" + region_name["WP4"] = "North West Pacific" + + if region == "all": return region_name, impf_id, iso3n, iso3a return region_name[region], impf_id[region], iso3n[region], iso3a[region] -@deprecated(details="The class name IFTropCyclone is deprecated and won't be supported in a future " - +"version. Use ImpfTropCyclone instead") +@deprecated( + details="The class name IFTropCyclone is deprecated and won't be supported in a future " + + "version. Use ImpfTropCyclone instead" +) class IFTropCyclone(ImpfTropCyclone): """Is ImpfTropCyclone now""" diff --git a/climada/entity/measures/__init__.py b/climada/entity/measures/__init__.py index 24cc74455..36d925045 100755 --- a/climada/entity/measures/__init__.py +++ b/climada/entity/measures/__init__.py @@ -18,5 +18,6 @@ init measures """ + from .base import * from .measure_set import * diff --git a/climada/entity/measures/base.py b/climada/entity/measures/base.py index 40c4cac4e..93505feb3 100755 --- a/climada/entity/measures/base.py +++ b/climada/entity/measures/base.py @@ -19,7 +19,7 @@ Define Measure class. """ -__all__ = ['Measure'] +__all__ = ["Measure"] import copy import logging @@ -30,20 +30,21 @@ import pandas as pd from geopandas import GeoDataFrame -from climada.entity.exposures.base import Exposures, INDICATOR_IMPF, INDICATOR_CENTR -from climada.hazard.base import Hazard import climada.util.checker as u_check +from climada.entity.exposures.base import INDICATOR_CENTR, INDICATOR_IMPF, Exposures +from climada.hazard.base import Hazard LOGGER = logging.getLogger(__name__) IMPF_ID_FACT = 1000 """Factor internally used as id for impact functions when region selected.""" -NULL_STR = 'nil' +NULL_STR = "nil" """String considered as no path in measures exposures_set and hazard_set or no string in imp_fun_map""" -class Measure(): + +class Measure: """ Contains the definition of one measure. @@ -99,7 +100,7 @@ def __init__( risk_transf_attach: float = 0, risk_transf_cover: float = 0, risk_transf_cost_factor: float = 1, - color_rgb: Optional[np.ndarray] = None + color_rgb: Optional[np.ndarray] = None, ): """Initialize a Measure object with given values. @@ -173,10 +174,10 @@ def check(self): ------ ValueError """ - u_check.size([3, 4], self.color_rgb, 'Measure.color_rgb') - u_check.size(2, self.hazard_inten_imp, 'Measure.hazard_inten_imp') - u_check.size(2, self.mdd_impact, 'Measure.mdd_impact') - u_check.size(2, self.paa_impact, 'Measure.paa_impact') + u_check.size([3, 4], self.color_rgb, "Measure.color_rgb") + u_check.size(2, self.hazard_inten_imp, "Measure.hazard_inten_imp") + u_check.size(2, self.mdd_impact, "Measure.mdd_impact") + u_check.size(2, self.paa_impact, "Measure.paa_impact") def calc_impact(self, exposures, imp_fun_set, hazard, assign_centroids=True): """ @@ -240,7 +241,8 @@ def apply(self, exposures, imp_fun_set, hazard): new_haz = self._cutoff_hazard_damage(new_exp, new_impfs, new_haz) # apply all previous changes only to the selected exposures new_exp, new_impfs, new_haz = self._filter_exposures( - exposures, imp_fun_set, hazard, new_exp, new_impfs, new_haz) + exposures, imp_fun_set, hazard, new_exp, new_impfs, new_haz + ) return new_exp, new_impfs, new_haz @@ -260,9 +262,13 @@ def _calc_impact(self, new_exp, new_impfs, new_haz, assign_centroids): ------- climada.engine.Impact """ - from climada.engine.impact_calc import ImpactCalc # pylint: disable=import-outside-toplevel - imp = ImpactCalc(new_exp, new_impfs, new_haz)\ - .impact(save_mat=False, assign_centroids=assign_centroids) + from climada.engine.impact_calc import ( + ImpactCalc, # pylint: disable=import-outside-toplevel + ) + + imp = ImpactCalc(new_exp, new_impfs, new_haz).impact( + save_mat=False, assign_centroids=assign_centroids + ) return imp.calc_risk_transfer(self.risk_transf_attach, self.risk_transf_cover) def _change_all_hazard(self, hazard): @@ -282,7 +288,7 @@ def _change_all_hazard(self, hazard): if self.hazard_set == NULL_STR: return hazard - LOGGER.debug('Setting new hazard %s', self.hazard_set) + LOGGER.debug("Setting new hazard %s", self.hazard_set) new_haz = Hazard.from_hdf5(self.hazard_set) new_haz.check() return new_haz @@ -305,21 +311,26 @@ def _change_all_exposures(self, exposures): return exposures if isinstance(self.exposures_set, (str, Path)): - LOGGER.debug('Setting new exposures %s', self.exposures_set) + LOGGER.debug("Setting new exposures %s", self.exposures_set) new_exp = Exposures.from_hdf5(self.exposures_set) new_exp.check() elif isinstance(self.exposures_set, Exposures): - LOGGER.debug('Setting new exposures. ') + LOGGER.debug("Setting new exposures. ") new_exp = self.exposures_set.copy(deep=True) new_exp.check() else: - raise ValueError(f'{self.exposures_set} is neither a string nor an Exposures object') + raise ValueError( + f"{self.exposures_set} is neither a string nor an Exposures object" + ) - if not np.array_equal(np.unique(exposures.gdf['latitude'].values), - np.unique(new_exp.gdf['latitude'].values)) or \ - not np.array_equal(np.unique(exposures.gdf['longitude'].values), - np.unique(new_exp.gdf['longitude'].values)): - LOGGER.warning('Exposures locations have changed.') + if not np.array_equal( + np.unique(exposures.gdf["latitude"].values), + np.unique(new_exp.gdf["latitude"].values), + ) or not np.array_equal( + np.unique(exposures.gdf["longitude"].values), + np.unique(new_exp.gdf["longitude"].values), + ): + LOGGER.warning("Exposures locations have changed.") return new_exp @@ -340,10 +351,10 @@ def _change_exposures_impf(self, exposures): if self.imp_fun_map == NULL_STR: return exposures - LOGGER.debug('Setting new exposures impact functions%s', self.imp_fun_map) + LOGGER.debug("Setting new exposures impact functions%s", self.imp_fun_map) new_exp = exposures.copy(deep=True) - from_id = int(self.imp_fun_map[0:self.imp_fun_map.find('to')]) - to_id = int(self.imp_fun_map[self.imp_fun_map.find('to') + 2:]) + from_id = int(self.imp_fun_map[0 : self.imp_fun_map.find("to")]) + to_id = int(self.imp_fun_map[self.imp_fun_map.find("to") + 2 :]) try: exp_change = np.argwhere( new_exp.gdf[INDICATOR_IMPF + self.haz_type].values == from_id @@ -371,22 +382,29 @@ def _change_imp_func(self, imp_set): ImpactFuncSet with measure applied to each impact function according to the defined hazard type """ - if self.hazard_inten_imp == (1, 0) and self.mdd_impact == (1, 0)\ - and self.paa_impact == (1, 0): + if ( + self.hazard_inten_imp == (1, 0) + and self.mdd_impact == (1, 0) + and self.paa_impact == (1, 0) + ): return imp_set new_imp_set = copy.deepcopy(imp_set) for imp_fun in new_imp_set.get_func(self.haz_type): - LOGGER.debug('Transforming impact functions.') + LOGGER.debug("Transforming impact functions.") imp_fun.intensity = np.maximum( - imp_fun.intensity * self.hazard_inten_imp[0] - self.hazard_inten_imp[1], 0.0) + imp_fun.intensity * self.hazard_inten_imp[0] - self.hazard_inten_imp[1], + 0.0, + ) imp_fun.mdd = np.maximum( - imp_fun.mdd * self.mdd_impact[0] + self.mdd_impact[1], 0.0) + imp_fun.mdd * self.mdd_impact[0] + self.mdd_impact[1], 0.0 + ) imp_fun.paa = np.maximum( - imp_fun.paa * self.paa_impact[0] + self.paa_impact[1], 0.0) + imp_fun.paa * self.paa_impact[0] + self.paa_impact[1], 0.0 + ) if not new_imp_set.size(): - LOGGER.info('No impact function of hazard %s found.', self.haz_type) + LOGGER.info("No impact function of hazard %s found.", self.haz_type) return new_imp_set @@ -415,31 +433,39 @@ def _cutoff_hazard_damage(self, exposures, impf_set, hazard): if self.exp_region_id: # compute impact only in selected region in_reg = np.logical_or.reduce( - [exposures.gdf['region_id'].values == reg for reg in self.exp_region_id] + [exposures.gdf["region_id"].values == reg for reg in self.exp_region_id] ) exp_imp = Exposures(exposures.gdf[in_reg], crs=exposures.crs) else: exp_imp = exposures - from climada.engine.impact_calc import ImpactCalc # pylint: disable=import-outside-toplevel - imp = ImpactCalc(exp_imp, impf_set, hazard)\ - .impact(assign_centroids=hazard.centr_exp_col not in exp_imp.gdf) + from climada.engine.impact_calc import ( + ImpactCalc, # pylint: disable=import-outside-toplevel + ) + + imp = ImpactCalc(exp_imp, impf_set, hazard).impact( + assign_centroids=hazard.centr_exp_col not in exp_imp.gdf + ) - LOGGER.debug('Cutting events whose damage have a frequency > %s.', - self.hazard_freq_cutoff) + LOGGER.debug( + "Cutting events whose damage have a frequency > %s.", + self.hazard_freq_cutoff, + ) new_haz = copy.deepcopy(hazard) sort_idxs = np.argsort(imp.at_event)[::-1] exceed_freq = np.cumsum(imp.frequency[sort_idxs]) cutoff = exceed_freq > self.hazard_freq_cutoff sel_haz = sort_idxs[cutoff] for row in sel_haz: - new_haz.intensity.data[new_haz.intensity.indptr[row]: - new_haz.intensity.indptr[row + 1]] = 0 + new_haz.intensity.data[ + new_haz.intensity.indptr[row] : new_haz.intensity.indptr[row + 1] + ] = 0 new_haz.intensity.eliminate_zeros() return new_haz - def _filter_exposures(self, exposures, imp_set, hazard, new_exp, new_impfs, - new_haz): + def _filter_exposures( + self, exposures, imp_set, hazard, new_exp, new_impfs, new_haz + ): """ Incorporate changes of new elements to previous ones only for the selected exp_region_id. If exp_region_id is [], all new changes @@ -479,38 +505,49 @@ def _filter_exposures(self, exposures, imp_set, hazard, new_exp, new_impfs, fun_ids = list(new_impfs.get_func()[self.haz_type].keys()) for key in fun_ids: new_impfs.get_func()[self.haz_type][key].id = key + IMPF_ID_FACT - new_impfs.get_func()[self.haz_type][key + IMPF_ID_FACT] = \ - new_impfs.get_func()[self.haz_type][key] + new_impfs.get_func()[self.haz_type][ + key + IMPF_ID_FACT + ] = new_impfs.get_func()[self.haz_type][key] try: new_exp.gdf[INDICATOR_IMPF + self.haz_type] += IMPF_ID_FACT except KeyError: new_exp.gdf[INDICATOR_IMPF] += IMPF_ID_FACT # collect old impact functions as well (used by exposures) - new_impfs.get_func()[self.haz_type].update(imp_set.get_func()[self.haz_type]) + new_impfs.get_func()[self.haz_type].update( + imp_set.get_func()[self.haz_type] + ) # get the indices for changing and inert regions - chg_reg = exposures.gdf['region_id'].isin(self.exp_region_id) + chg_reg = exposures.gdf["region_id"].isin(self.exp_region_id) no_chg_reg = ~chg_reg - LOGGER.debug('Number of changed exposures: %s', chg_reg.sum()) + LOGGER.debug("Number of changed exposures: %s", chg_reg.sum()) # concatenate previous and new exposures new_exp.set_gdf( GeoDataFrame( - pd.concat([ - exposures.gdf[no_chg_reg], # old values for inert regions - new_exp.gdf[chg_reg] # new values for changing regions - ]).loc[exposures.gdf.index,:], # re-establish old order + pd.concat( + [ + exposures.gdf[no_chg_reg], # old values for inert regions + new_exp.gdf[chg_reg], # new values for changing regions + ] + ).loc[ + exposures.gdf.index, : + ], # re-establish old order ), - crs=exposures.crs + crs=exposures.crs, ) # set missing values of centr_ - if INDICATOR_CENTR + self.haz_type in new_exp.gdf.columns \ - and np.isnan(new_exp.gdf[INDICATOR_CENTR + self.haz_type].values).any(): + if ( + INDICATOR_CENTR + self.haz_type in new_exp.gdf.columns + and np.isnan(new_exp.gdf[INDICATOR_CENTR + self.haz_type].values).any() + ): new_exp.gdf.drop(columns=INDICATOR_CENTR + self.haz_type, inplace=True) - elif INDICATOR_CENTR in new_exp.gdf.columns \ - and np.isnan(new_exp.gdf[INDICATOR_CENTR].values).any(): + elif ( + INDICATOR_CENTR in new_exp.gdf.columns + and np.isnan(new_exp.gdf[INDICATOR_CENTR].values).any() + ): new_exp.gdf.drop(columns=INDICATOR_CENTR, inplace=True) # put hazard intensities outside region to previous intensities diff --git a/climada/entity/measures/measure_set.py b/climada/entity/measures/measure_set.py index 31a413797..90a2bb43c 100755 --- a/climada/entity/measures/measure_set.py +++ b/climada/entity/measures/measure_set.py @@ -19,70 +19,75 @@ Define MeasureSet class. """ -__all__ = ['MeasureSet'] +__all__ = ["MeasureSet"] import ast import copy import logging -from typing import Optional, List +from typing import List, Optional -from matplotlib import colormaps as cm import numpy as np import pandas as pd import xlsxwriter +from matplotlib import colormaps as cm -from climada.entity.measures.base import Measure import climada.util.hdf5_handler as u_hdf5 +from climada.entity.measures.base import Measure LOGGER = logging.getLogger(__name__) -DEF_VAR_MAT = {'sup_field_name': 'entity', - 'field_name': 'measures', - 'var_name': {'name': 'name', - 'color': 'color', - 'cost': 'cost', - 'haz_int_a': 'hazard_intensity_impact_a', - 'haz_int_b': 'hazard_intensity_impact_b', - 'haz_frq': 'hazard_high_frequency_cutoff', - 'haz_set': 'hazard_event_set', - 'mdd_a': 'MDD_impact_a', - 'mdd_b': 'MDD_impact_b', - 'paa_a': 'PAA_impact_a', - 'paa_b': 'PAA_impact_b', - 'fun_map': 'damagefunctions_map', - 'exp_set': 'assets_file', - 'exp_reg': 'Region_ID', - 'risk_att': 'risk_transfer_attachement', - 'risk_cov': 'risk_transfer_cover', - 'haz': 'peril_ID' - } - } +DEF_VAR_MAT = { + "sup_field_name": "entity", + "field_name": "measures", + "var_name": { + "name": "name", + "color": "color", + "cost": "cost", + "haz_int_a": "hazard_intensity_impact_a", + "haz_int_b": "hazard_intensity_impact_b", + "haz_frq": "hazard_high_frequency_cutoff", + "haz_set": "hazard_event_set", + "mdd_a": "MDD_impact_a", + "mdd_b": "MDD_impact_b", + "paa_a": "PAA_impact_a", + "paa_b": "PAA_impact_b", + "fun_map": "damagefunctions_map", + "exp_set": "assets_file", + "exp_reg": "Region_ID", + "risk_att": "risk_transfer_attachement", + "risk_cov": "risk_transfer_cover", + "haz": "peril_ID", + }, +} """MATLAB variable names""" -DEF_VAR_EXCEL = {'sheet_name': 'measures', - 'col_name': {'name': 'name', - 'color': 'color', - 'cost': 'cost', - 'haz_int_a': 'hazard intensity impact a', - 'haz_int_b': 'hazard intensity impact b', - 'haz_frq': 'hazard high frequency cutoff', - 'haz_set': 'hazard event set', - 'mdd_a': 'MDD impact a', - 'mdd_b': 'MDD impact b', - 'paa_a': 'PAA impact a', - 'paa_b': 'PAA impact b', - 'fun_map': 'damagefunctions map', - 'exp_set': 'assets file', - 'exp_reg': 'Region_ID', - 'risk_att': 'risk transfer attachement', - 'risk_cov': 'risk transfer cover', - 'risk_fact': 'risk transfer cost factor', - 'haz': 'peril_ID' - } - } +DEF_VAR_EXCEL = { + "sheet_name": "measures", + "col_name": { + "name": "name", + "color": "color", + "cost": "cost", + "haz_int_a": "hazard intensity impact a", + "haz_int_b": "hazard intensity impact b", + "haz_frq": "hazard high frequency cutoff", + "haz_set": "hazard event set", + "mdd_a": "MDD impact a", + "mdd_b": "MDD impact b", + "paa_a": "PAA impact a", + "paa_b": "PAA impact b", + "fun_map": "damagefunctions map", + "exp_set": "assets file", + "exp_reg": "Region_ID", + "risk_att": "risk transfer attachement", + "risk_cov": "risk transfer cover", + "risk_fact": "risk transfer cost factor", + "haz": "peril_ID", + }, +} """Excel variable names""" -class MeasureSet(): + +class MeasureSet: """Contains measures of type Measure. Loads from files with format defined in FILE_EXT. @@ -93,10 +98,7 @@ class MeasureSet(): Use the available methods instead. """ - def __init__( - self, - measure_list: Optional[List[Measure]] = None - ): + def __init__(self, measure_list: Optional[List[Measure]] = None): """Initialize a new MeasureSet object with specified data. Parameters @@ -136,7 +138,9 @@ def clear(self, _data: Optional[dict] = None): A dict containing the Measure objects. For internal use only: It's not suppossed to be set directly. Use the class methods instead. """ - self._data = _data if _data is not None else dict() # {hazard_type : {name: Measure()}} + self._data = ( + _data if _data is not None else dict() + ) # {hazard_type : {name: Measure()}} def append(self, meas): """Append an Measure. Override if same name and haz_type. @@ -175,8 +179,7 @@ def remove_measure(self, haz_type=None, name=None): try: del self._data[haz_type][name] except KeyError: - LOGGER.info("No Measure with hazard %s and id %s.", - haz_type, name) + LOGGER.info("No Measure with hazard %s and id %s.", haz_type, name) elif haz_type is not None: try: del self._data[haz_type] @@ -212,8 +215,7 @@ def get_measure(self, haz_type=None, name=None): try: return self._data[haz_type][name] except KeyError: - LOGGER.info("No Measure with hazard %s and id %s.", - haz_type, name) + LOGGER.info("No Measure with hazard %s and id %s.", haz_type, name) return list() elif haz_type is not None: try: @@ -295,8 +297,11 @@ def size(self, haz_type=None, name=None): ------- int """ - if (haz_type is not None) and (name is not None) and \ - (isinstance(self.get_measure(haz_type, name), Measure)): + if ( + (haz_type is not None) + and (name is not None) + and (isinstance(self.get_measure(haz_type, name), Measure)) + ): return 1 if (haz_type is not None) or (name is not None): return len(self.get_measure(haz_type, name)) @@ -310,14 +315,16 @@ def check(self): ValueError """ for key_haz, meas_dict in self._data.items(): - def_color = cm.get_cmap('Greys').resampled(len(meas_dict)) + def_color = cm.get_cmap("Greys").resampled(len(meas_dict)) for i_meas, (name, meas) in enumerate(meas_dict.items()): - if (name != meas.name) | (name == ''): - raise ValueError("Wrong Measure.name: %s != %s." - % (name, meas.name)) + if (name != meas.name) | (name == ""): + raise ValueError( + "Wrong Measure.name: %s != %s." % (name, meas.name) + ) if key_haz != meas.haz_type: - raise ValueError("Wrong Measure.haz_type: %s != %s." - % (key_haz, meas.haz_type)) + raise ValueError( + "Wrong Measure.haz_type: %s != %s." % (key_haz, meas.haz_type) + ) # set default color if not set if np.array_equal(meas.color_rgb, np.zeros(3)): meas.color_rgb = def_color(i_meas) @@ -366,46 +373,60 @@ def from_mat(cls, file_name, var_names=None): """ if var_names is None: var_names = DEF_VAR_MAT + def read_att_mat(measures, data, file_name, var_names): """Read MATLAB measures attributes""" - num_mes = len(data[var_names['var_name']['name']]) + num_mes = len(data[var_names["var_name"]["name"]]) for idx in range(0, num_mes): color_str = u_hdf5.get_str_from_ref( - file_name, data[var_names['var_name']['color']][idx][0]) + file_name, data[var_names["var_name"]["color"]][idx][0] + ) try: hazard_inten_imp = ( - data[var_names['var_name']['haz_int_a']][idx][0], - data[var_names['var_name']['haz_int_b']][0][idx]) + data[var_names["var_name"]["haz_int_a"]][idx][0], + data[var_names["var_name"]["haz_int_b"]][0][idx], + ) except KeyError: hazard_inten_imp = ( - data[var_names['var_name']['haz_int_a'][:-2]][idx][0], 0) + data[var_names["var_name"]["haz_int_a"][:-2]][idx][0], + 0, + ) meas_kwargs = dict( name=u_hdf5.get_str_from_ref( - file_name, data[var_names['var_name']['name']][idx][0]), - color_rgb=np.fromstring(color_str, dtype=float, sep=' '), - cost=data[var_names['var_name']['cost']][idx][0], + file_name, data[var_names["var_name"]["name"]][idx][0] + ), + color_rgb=np.fromstring(color_str, dtype=float, sep=" "), + cost=data[var_names["var_name"]["cost"]][idx][0], haz_type=u_hdf5.get_str_from_ref( - file_name, data[var_names['var_name']['haz']][idx][0]), - hazard_freq_cutoff=data[var_names['var_name']['haz_frq']][idx][0], + file_name, data[var_names["var_name"]["haz"]][idx][0] + ), + hazard_freq_cutoff=data[var_names["var_name"]["haz_frq"]][idx][0], hazard_set=u_hdf5.get_str_from_ref( - file_name, data[var_names['var_name']['haz_set']][idx][0]), + file_name, data[var_names["var_name"]["haz_set"]][idx][0] + ), hazard_inten_imp=hazard_inten_imp, # different convention of signs followed in MATLAB! - mdd_impact=(data[var_names['var_name']['mdd_a']][idx][0], - data[var_names['var_name']['mdd_b']][idx][0]), - paa_impact=(data[var_names['var_name']['paa_a']][idx][0], - data[var_names['var_name']['paa_b']][idx][0]), + mdd_impact=( + data[var_names["var_name"]["mdd_a"]][idx][0], + data[var_names["var_name"]["mdd_b"]][idx][0], + ), + paa_impact=( + data[var_names["var_name"]["paa_a"]][idx][0], + data[var_names["var_name"]["paa_b"]][idx][0], + ), imp_fun_map=u_hdf5.get_str_from_ref( - file_name, data[var_names['var_name']['fun_map']][idx][0]), + file_name, data[var_names["var_name"]["fun_map"]][idx][0] + ), exposures_set=u_hdf5.get_str_from_ref( - file_name, data[var_names['var_name']['exp_set']][idx][0]), - risk_transf_attach=data[var_names['var_name']['risk_att']][idx][0], - risk_transf_cover=data[var_names['var_name']['risk_cov']][idx][0], + file_name, data[var_names["var_name"]["exp_set"]][idx][0] + ), + risk_transf_attach=data[var_names["var_name"]["risk_att"]][idx][0], + risk_transf_cover=data[var_names["var_name"]["risk_cov"]][idx][0], ) - exp_region_id = data[var_names['var_name']['exp_reg']][idx][0] + exp_region_id = data[var_names["var_name"]["exp_reg"]][idx][0] if exp_region_id: meas_kwargs["exp_region_id"] = [exp_region_id] @@ -414,12 +435,12 @@ def read_att_mat(measures, data, file_name, var_names): data = u_hdf5.read(file_name) meas_set = cls() try: - data = data[var_names['sup_field_name']] + data = data[var_names["sup_field_name"]] except KeyError: pass try: - data = data[var_names['field_name']] + data = data[var_names["field_name"]] read_att_mat(meas_set, data, file_name, var_names) except KeyError as var_err: raise KeyError("Variable not in MAT file: " + str(var_err)) from var_err @@ -428,8 +449,10 @@ def read_att_mat(measures, data, file_name, var_names): def read_mat(self, *args, **kwargs): """This function is deprecated, use MeasureSet.from_mat instead.""" - LOGGER.warning("The use of MeasureSet.read_mat is deprecated." - "Use MeasureSet.from_mat instead.") + LOGGER.warning( + "The use of MeasureSet.read_mat is deprecated." + "Use MeasureSet.from_mat instead." + ) self.__dict__ = MeasureSet.from_mat(*args, **kwargs).__dict__ @classmethod @@ -452,63 +475,76 @@ def from_excel(cls, file_name, var_names=None): """ if var_names is None: var_names = DEF_VAR_EXCEL + def read_att_excel(measures, dfr, var_names): """Read Excel measures attributes""" num_mes = len(dfr.index) for idx in range(0, num_mes): # Search for (a, b) values, put a=1 otherwise try: - hazard_inten_imp = (dfr[var_names['col_name']['haz_int_a']][idx], - dfr[var_names['col_name']['haz_int_b']][idx]) + hazard_inten_imp = ( + dfr[var_names["col_name"]["haz_int_a"]][idx], + dfr[var_names["col_name"]["haz_int_b"]][idx], + ) except KeyError: - hazard_inten_imp = (1, dfr['hazard intensity impact'][idx]) + hazard_inten_imp = (1, dfr["hazard intensity impact"][idx]) meas_kwargs = dict( - name=dfr[var_names['col_name']['name']][idx], - cost=dfr[var_names['col_name']['cost']][idx], - hazard_freq_cutoff=dfr[var_names['col_name']['haz_frq']][idx], - hazard_set=dfr[var_names['col_name']['haz_set']][idx], + name=dfr[var_names["col_name"]["name"]][idx], + cost=dfr[var_names["col_name"]["cost"]][idx], + hazard_freq_cutoff=dfr[var_names["col_name"]["haz_frq"]][idx], + hazard_set=dfr[var_names["col_name"]["haz_set"]][idx], hazard_inten_imp=hazard_inten_imp, - mdd_impact=(dfr[var_names['col_name']['mdd_a']][idx], - dfr[var_names['col_name']['mdd_b']][idx]), - paa_impact=(dfr[var_names['col_name']['paa_a']][idx], - dfr[var_names['col_name']['paa_b']][idx]), - imp_fun_map=dfr[var_names['col_name']['fun_map']][idx], - risk_transf_attach=dfr[var_names['col_name']['risk_att']][idx], - risk_transf_cover=dfr[var_names['col_name']['risk_cov']][idx], + mdd_impact=( + dfr[var_names["col_name"]["mdd_a"]][idx], + dfr[var_names["col_name"]["mdd_b"]][idx], + ), + paa_impact=( + dfr[var_names["col_name"]["paa_a"]][idx], + dfr[var_names["col_name"]["paa_b"]][idx], + ), + imp_fun_map=dfr[var_names["col_name"]["fun_map"]][idx], + risk_transf_attach=dfr[var_names["col_name"]["risk_att"]][idx], + risk_transf_cover=dfr[var_names["col_name"]["risk_cov"]][idx], color_rgb=np.fromstring( - dfr[var_names['col_name']['color']][idx], dtype=float, sep=' '), + dfr[var_names["col_name"]["color"]][idx], dtype=float, sep=" " + ), ) try: - meas_kwargs["haz_type"] = dfr[var_names['col_name']['haz']][idx] + meas_kwargs["haz_type"] = dfr[var_names["col_name"]["haz"]][idx] except KeyError: pass try: - meas_kwargs["exposures_set"] = dfr[var_names['col_name']['exp_set']][idx] + meas_kwargs["exposures_set"] = dfr[ + var_names["col_name"]["exp_set"] + ][idx] except KeyError: pass try: meas_kwargs["exp_region_id"] = ast.literal_eval( - dfr[var_names['col_name']['exp_reg']][idx]) + dfr[var_names["col_name"]["exp_reg"]][idx] + ) except KeyError: pass except ValueError: - meas_kwargs["exp_region_id"] = dfr[var_names['col_name']['exp_reg']][idx] + meas_kwargs["exp_region_id"] = dfr[ + var_names["col_name"]["exp_reg"] + ][idx] try: - meas_kwargs["risk_transf_cost_factor"] = ( - dfr[var_names['col_name']['risk_fact']][idx] - ) + meas_kwargs["risk_transf_cost_factor"] = dfr[ + var_names["col_name"]["risk_fact"] + ][idx] except KeyError: pass measures.append(Measure(**meas_kwargs)) - dfr = pd.read_excel(file_name, var_names['sheet_name']) - dfr = dfr.fillna('') + dfr = pd.read_excel(file_name, var_names["sheet_name"]) + dfr = dfr.fillna("") meas_set = cls() try: read_att_excel(meas_set, dfr, var_names) @@ -519,8 +555,10 @@ def read_att_excel(measures, dfr, var_names): def read_excel(self, *args, **kwargs): """This function is deprecated, use MeasureSet.from_excel instead.""" - LOGGER.warning("The use ofMeasureSet.read_excel is deprecated." - "Use MeasureSet.from_excel instead.") + LOGGER.warning( + "The use ofMeasureSet.read_excel is deprecated." + "Use MeasureSet.from_excel instead." + ) self.__dict__ = MeasureSet.from_excel(*args, **kwargs).__dict__ def write_excel(self, file_name, var_names=None): @@ -535,33 +573,56 @@ def write_excel(self, file_name, var_names=None): """ if var_names is None: var_names = DEF_VAR_EXCEL + def write_meas(row_ini, imp_ws, xls_data): """Write one measure""" for icol, col_dat in enumerate(xls_data): imp_ws.write(row_ini, icol, col_dat) meas_wb = xlsxwriter.Workbook(file_name) - mead_ws = meas_wb.add_worksheet(var_names['sheet_name']) - - header = [var_names['col_name']['name'], var_names['col_name']['color'], - var_names['col_name']['cost'], var_names['col_name']['haz_int_a'], - var_names['col_name']['haz_int_b'], var_names['col_name']['haz_frq'], - var_names['col_name']['haz_set'], var_names['col_name']['mdd_a'], - var_names['col_name']['mdd_b'], var_names['col_name']['paa_a'], - var_names['col_name']['paa_b'], var_names['col_name']['fun_map'], - var_names['col_name']['exp_set'], var_names['col_name']['exp_reg'], - var_names['col_name']['risk_att'], var_names['col_name']['risk_cov'], - var_names['col_name']['haz']] + mead_ws = meas_wb.add_worksheet(var_names["sheet_name"]) + + header = [ + var_names["col_name"]["name"], + var_names["col_name"]["color"], + var_names["col_name"]["cost"], + var_names["col_name"]["haz_int_a"], + var_names["col_name"]["haz_int_b"], + var_names["col_name"]["haz_frq"], + var_names["col_name"]["haz_set"], + var_names["col_name"]["mdd_a"], + var_names["col_name"]["mdd_b"], + var_names["col_name"]["paa_a"], + var_names["col_name"]["paa_b"], + var_names["col_name"]["fun_map"], + var_names["col_name"]["exp_set"], + var_names["col_name"]["exp_reg"], + var_names["col_name"]["risk_att"], + var_names["col_name"]["risk_cov"], + var_names["col_name"]["haz"], + ] for icol, head_dat in enumerate(header): mead_ws.write(0, icol, head_dat) for row_ini, (_, haz_dict) in enumerate(self._data.items(), 1): for meas_name, meas in haz_dict.items(): - xls_data = [meas_name, ' '.join(list(map(str, meas.color_rgb))), - meas.cost, meas.hazard_inten_imp[0], - meas.hazard_inten_imp[1], meas.hazard_freq_cutoff, - meas.hazard_set, meas.mdd_impact[0], meas.mdd_impact[1], - meas.paa_impact[0], meas.paa_impact[1], meas.imp_fun_map, - meas.exposures_set, str(meas.exp_region_id), meas.risk_transf_attach, - meas.risk_transf_cover, meas.haz_type] + xls_data = [ + meas_name, + " ".join(list(map(str, meas.color_rgb))), + meas.cost, + meas.hazard_inten_imp[0], + meas.hazard_inten_imp[1], + meas.hazard_freq_cutoff, + meas.hazard_set, + meas.mdd_impact[0], + meas.mdd_impact[1], + meas.paa_impact[0], + meas.paa_impact[1], + meas.imp_fun_map, + meas.exposures_set, + str(meas.exp_region_id), + meas.risk_transf_attach, + meas.risk_transf_cover, + meas.haz_type, + ] write_meas(row_ini, mead_ws, xls_data) meas_wb.close() diff --git a/climada/entity/measures/test/test_base.py b/climada/entity/measures/test/test_base.py index 84ac988c4..520229ffc 100644 --- a/climada/entity/measures/test/test_base.py +++ b/climada/entity/measures/test/test_base.py @@ -18,73 +18,131 @@ Test MeasureSet and Measure classes. """ -import unittest + import copy +import unittest from pathlib import Path import numpy as np +import climada.entity.exposures.test as exposures_test +import climada.util.coordinates as u_coord from climada import CONFIG -from climada.hazard.base import Hazard from climada.entity.entity_def import Entity -from climada.entity.exposures.base import Exposures, INDICATOR_IMPF -from climada.entity.impact_funcs.impact_func_set import ImpactFuncSet +from climada.entity.exposures.base import INDICATOR_IMPF, Exposures from climada.entity.impact_funcs.base import ImpactFunc +from climada.entity.impact_funcs.impact_func_set import ImpactFuncSet +from climada.entity.measures.base import IMPF_ID_FACT, Measure from climada.entity.measures.measure_set import MeasureSet -from climada.entity.measures.base import Measure, IMPF_ID_FACT -from climada.util.constants import EXP_DEMO_H5, HAZ_DEMO_H5 +from climada.hazard.base import Hazard from climada.test import get_test_file -import climada.util.coordinates as u_coord -import climada.entity.exposures.test as exposures_test +from climada.util.constants import EXP_DEMO_H5, HAZ_DEMO_H5 DATA_DIR = CONFIG.measures.test_data.dir() -HAZ_TEST_TC :Path = get_test_file('test_tc_florida', file_format='hdf5') +HAZ_TEST_TC: Path = get_test_file("test_tc_florida", file_format="hdf5") """ Hazard test file from Data API: Hurricanes from 1851 to 2011 over Florida with 100 centroids. Fraction is empty. Format: HDF5. """ -ENT_TEST_MAT = Path(exposures_test.__file__).parent / 'data' / 'demo_today.mat' +ENT_TEST_MAT = Path(exposures_test.__file__).parent / "data" / "demo_today.mat" + class TestApply(unittest.TestCase): """Test implement measures functions.""" + def test_change_imp_func_pass(self): """Test _change_imp_func""" meas = MeasureSet.from_mat(ENT_TEST_MAT) - act_1 = meas.get_measure(name='Mangroves')[0] + act_1 = meas.get_measure(name="Mangroves")[0] - haz_type = 'XX' + haz_type = "XX" idx = 1 intensity = np.arange(10, 100, 10) - intensity[0] = 0. - intensity[-1] = 100. - mdd = np.array([0.0, 0.0, 0.021857142857143, 0.035887500000000, - 0.053977415307403, 0.103534246575342, 0.180414000000000, - 0.410796000000000, 0.410796000000000]) - paa = np.array([0, 0.005000000000000, 0.042000000000000, 0.160000000000000, - 0.398500000000000, 0.657000000000000, 1.000000000000000, - 1.000000000000000, 1.000000000000000]) + intensity[0] = 0.0 + intensity[-1] = 100.0 + mdd = np.array( + [ + 0.0, + 0.0, + 0.021857142857143, + 0.035887500000000, + 0.053977415307403, + 0.103534246575342, + 0.180414000000000, + 0.410796000000000, + 0.410796000000000, + ] + ) + paa = np.array( + [ + 0, + 0.005000000000000, + 0.042000000000000, + 0.160000000000000, + 0.398500000000000, + 0.657000000000000, + 1.000000000000000, + 1.000000000000000, + 1.000000000000000, + ] + ) imp_tc = ImpactFunc(haz_type, idx, intensity, mdd, paa) imp_set = ImpactFuncSet([imp_tc]) - new_imp = act_1._change_imp_func(imp_set).get_func('XX')[0] - - self.assertTrue(np.array_equal(new_imp.intensity, np.array([4., 24., 34., 44., - 54., 64., 74., 84., 104.]))) - self.assertTrue(np.array_equal(new_imp.mdd, np.array([0, 0, 0.021857142857143, 0.035887500000000, - 0.053977415307403, 0.103534246575342, 0.180414000000000, 0.410796000000000, 0.410796000000000]))) - self.assertTrue(np.array_equal(new_imp.paa, np.array([0, 0.005000000000000, 0.042000000000000, - 0.160000000000000, 0.398500000000000, 0.657000000000000, 1.000000000000000, - 1.000000000000000, 1.000000000000000]))) + new_imp = act_1._change_imp_func(imp_set).get_func("XX")[0] + + self.assertTrue( + np.array_equal( + new_imp.intensity, + np.array([4.0, 24.0, 34.0, 44.0, 54.0, 64.0, 74.0, 84.0, 104.0]), + ) + ) + self.assertTrue( + np.array_equal( + new_imp.mdd, + np.array( + [ + 0, + 0, + 0.021857142857143, + 0.035887500000000, + 0.053977415307403, + 0.103534246575342, + 0.180414000000000, + 0.410796000000000, + 0.410796000000000, + ] + ), + ) + ) + self.assertTrue( + np.array_equal( + new_imp.paa, + np.array( + [ + 0, + 0.005000000000000, + 0.042000000000000, + 0.160000000000000, + 0.398500000000000, + 0.657000000000000, + 1.000000000000000, + 1.000000000000000, + 1.000000000000000, + ] + ), + ) + ) self.assertFalse(id(new_imp) == id(imp_tc)) def test_cutoff_hazard_pass(self): """Test _cutoff_hazard_damage""" meas = MeasureSet.from_mat(ENT_TEST_MAT) - act_1 = meas.get_measure(name='Seawall')[0] + act_1 = meas.get_measure(name="Seawall")[0] haz = Hazard.from_hdf5(HAZ_TEST_TC) exp = Exposures.from_mat(ENT_TEST_MAT) - exp.gdf.rename(columns={'impf': 'impf_TC'}, inplace=True) + exp.gdf.rename(columns={"impf": "impf_TC"}, inplace=True) exp.check() exp.assign_centroids(haz) @@ -94,32 +152,99 @@ def test_cutoff_hazard_pass(self): self.assertFalse(id(new_haz) == id(haz)) - pos_no_null = np.array([6249, 7697, 9134, 13500, 13199, 5944, 9052, 9050, 2429, - 5139, 9053, 7102, 4096, 1070, 5948, 1076, 5947, 7432, - 5949, 11694, 5484, 6246, 12147, 778, 3326, 7199, 12498, - 11698, 6245, 5327, 4819, 8677, 5970, 7101, 779, 3894, - 9051, 5976, 3329, 5978, 4282, 11697, 7193, 5351, 7310, - 7478, 5489, 5526, 7194, 4283, 7191, 5328, 4812, 5528, - 5527, 5488, 7475, 5529, 776, 5758, 4811, 6223, 7479, - 7470, 5480, 5325, 7477, 7318, 7317, 11696, 7313, 13165, - 6221]) + pos_no_null = np.array( + [ + 6249, + 7697, + 9134, + 13500, + 13199, + 5944, + 9052, + 9050, + 2429, + 5139, + 9053, + 7102, + 4096, + 1070, + 5948, + 1076, + 5947, + 7432, + 5949, + 11694, + 5484, + 6246, + 12147, + 778, + 3326, + 7199, + 12498, + 11698, + 6245, + 5327, + 4819, + 8677, + 5970, + 7101, + 779, + 3894, + 9051, + 5976, + 3329, + 5978, + 4282, + 11697, + 7193, + 5351, + 7310, + 7478, + 5489, + 5526, + 7194, + 4283, + 7191, + 5328, + 4812, + 5528, + 5527, + 5488, + 7475, + 5529, + 776, + 5758, + 4811, + 6223, + 7479, + 7470, + 5480, + 5325, + 7477, + 7318, + 7317, + 11696, + 7313, + 13165, + 6221, + ] + ) all_haz = np.arange(haz.intensity.shape[0]) all_haz[pos_no_null] = -1 pos_null = np.argwhere(all_haz > 0).reshape(-1) for i_ev in pos_null: self.assertEqual(new_haz.intensity[i_ev, :].max(), 0) - def test_cutoff_hazard_region_pass(self): """Test _cutoff_hazard_damage in specific region""" meas = MeasureSet.from_mat(ENT_TEST_MAT) - act_1 = meas.get_measure(name='Seawall')[0] + act_1 = meas.get_measure(name="Seawall")[0] act_1.exp_region_id = [1] haz = Hazard.from_hdf5(HAZ_TEST_TC) exp = Exposures.from_mat(ENT_TEST_MAT) - exp.gdf['region_id'] = np.zeros(exp.gdf.shape[0]) - exp.gdf['region_id'].values[10:] = 1 + exp.gdf["region_id"] = np.zeros(exp.gdf.shape[0]) + exp.gdf["region_id"].values[10:] = 1 exp.check() exp.assign_centroids(haz) @@ -129,27 +254,95 @@ def test_cutoff_hazard_region_pass(self): self.assertFalse(id(new_haz) == id(haz)) - pos_no_null = np.array([6249, 7697, 9134, 13500, 13199, 5944, 9052, 9050, 2429, - 5139, 9053, 7102, 4096, 1070, 5948, 1076, 5947, 7432, - 5949, 11694, 5484, 6246, 12147, 778, 3326, 7199, 12498, - 11698, 6245, 5327, 4819, 8677, 5970, 7101, 779, 3894, - 9051, 5976, 3329, 5978, 4282, 11697, 7193, 5351, 7310, - 7478, 5489, 5526, 7194, 4283, 7191, 5328, 4812, 5528, - 5527, 5488, 7475, 5529, 776, 5758, 4811, 6223, 7479, - 7470, 5480, 5325, 7477, 7318, 7317, 11696, 7313, 13165, - 6221]) + pos_no_null = np.array( + [ + 6249, + 7697, + 9134, + 13500, + 13199, + 5944, + 9052, + 9050, + 2429, + 5139, + 9053, + 7102, + 4096, + 1070, + 5948, + 1076, + 5947, + 7432, + 5949, + 11694, + 5484, + 6246, + 12147, + 778, + 3326, + 7199, + 12498, + 11698, + 6245, + 5327, + 4819, + 8677, + 5970, + 7101, + 779, + 3894, + 9051, + 5976, + 3329, + 5978, + 4282, + 11697, + 7193, + 5351, + 7310, + 7478, + 5489, + 5526, + 7194, + 4283, + 7191, + 5328, + 4812, + 5528, + 5527, + 5488, + 7475, + 5529, + 776, + 5758, + 4811, + 6223, + 7479, + 7470, + 5480, + 5325, + 7477, + 7318, + 7317, + 11696, + 7313, + 13165, + 6221, + ] + ) all_haz = np.arange(haz.intensity.shape[0]) all_haz[pos_no_null] = -1 pos_null = np.argwhere(all_haz > 0).reshape(-1) - centr_null = np.unique(exp.gdf['centr_'][exp.gdf['region_id'] == 0]) + centr_null = np.unique(exp.gdf["centr_"][exp.gdf["region_id"] == 0]) for i_ev in pos_null: self.assertEqual(new_haz.intensity[i_ev, centr_null].max(), 0) def test_change_exposures_impf_pass(self): """Test _change_exposures_impf""" meas = Measure( - imp_fun_map='1to3', - haz_type='TC', + imp_fun_map="1to3", + haz_type="TC", ) imp_set = ImpactFuncSet() @@ -170,11 +363,26 @@ def test_change_exposures_impf_pass(self): self.assertEqual(new_exp.ref_year, exp.ref_year) self.assertEqual(new_exp.value_unit, exp.value_unit) self.assertEqual(new_exp.description, exp.description) - self.assertTrue(np.array_equal(new_exp.gdf['value'].values, exp.gdf['value'].values)) - self.assertTrue(np.array_equal(new_exp.gdf['latitude'].values, exp.gdf['latitude'].values)) - self.assertTrue(np.array_equal(new_exp.gdf['longitude'].values, exp.gdf['longitude'].values)) - self.assertTrue(np.array_equal(exp.gdf[INDICATOR_IMPF + 'TC'].values, np.ones(new_exp.gdf.shape[0]))) - self.assertTrue(np.array_equal(new_exp.gdf[INDICATOR_IMPF + 'TC'].values, np.ones(new_exp.gdf.shape[0]) * 3)) + self.assertTrue( + np.array_equal(new_exp.gdf["value"].values, exp.gdf["value"].values) + ) + self.assertTrue( + np.array_equal(new_exp.gdf["latitude"].values, exp.gdf["latitude"].values) + ) + self.assertTrue( + np.array_equal(new_exp.gdf["longitude"].values, exp.gdf["longitude"].values) + ) + self.assertTrue( + np.array_equal( + exp.gdf[INDICATOR_IMPF + "TC"].values, np.ones(new_exp.gdf.shape[0]) + ) + ) + self.assertTrue( + np.array_equal( + new_exp.gdf[INDICATOR_IMPF + "TC"].values, + np.ones(new_exp.gdf.shape[0]) * 3, + ) + ) def test_change_all_hazard_pass(self): """Test _change_all_hazard method""" @@ -182,14 +390,16 @@ def test_change_all_hazard_pass(self): ref_haz = Hazard.from_hdf5(HAZ_DEMO_H5) - hazard = Hazard('TC') + hazard = Hazard("TC") new_haz = meas._change_all_hazard(hazard) self.assertEqual(new_haz.haz_type, ref_haz.haz_type) self.assertTrue(np.array_equal(new_haz.frequency, ref_haz.frequency)) self.assertTrue(np.array_equal(new_haz.date, ref_haz.date)) self.assertTrue(np.array_equal(new_haz.orig, ref_haz.orig)) - self.assertTrue(np.array_equal(new_haz.centroids.coord, ref_haz.centroids.coord)) + self.assertTrue( + np.array_equal(new_haz.centroids.coord, ref_haz.centroids.coord) + ) self.assertTrue(np.array_equal(new_haz.intensity.data, ref_haz.intensity.data)) self.assertTrue(np.array_equal(new_haz.fraction.data, ref_haz.fraction.data)) @@ -200,16 +410,26 @@ def test_change_all_exposures_pass(self): ref_exp = Exposures.from_hdf5(EXP_DEMO_H5) exposures = Exposures() - exposures.gdf['latitude'] = np.ones(10) - exposures.gdf['longitude'] = np.ones(10) + exposures.gdf["latitude"] = np.ones(10) + exposures.gdf["longitude"] = np.ones(10) new_exp = meas._change_all_exposures(exposures) self.assertEqual(new_exp.ref_year, ref_exp.ref_year) self.assertEqual(new_exp.value_unit, ref_exp.value_unit) self.assertEqual(new_exp.description, ref_exp.description) - self.assertTrue(np.array_equal(new_exp.gdf['value'].values, ref_exp.gdf['value'].values)) - self.assertTrue(np.array_equal(new_exp.gdf['latitude'].values, ref_exp.gdf['latitude'].values)) - self.assertTrue(np.array_equal(new_exp.gdf['longitude'].values, ref_exp.gdf['longitude'].values)) + self.assertTrue( + np.array_equal(new_exp.gdf["value"].values, ref_exp.gdf["value"].values) + ) + self.assertTrue( + np.array_equal( + new_exp.gdf["latitude"].values, ref_exp.gdf["latitude"].values + ) + ) + self.assertTrue( + np.array_equal( + new_exp.gdf["longitude"].values, ref_exp.gdf["longitude"].values + ) + ) def test_not_filter_exposures_pass(self): """Test _filter_exposures method with []""" @@ -217,14 +437,15 @@ def test_not_filter_exposures_pass(self): exp = Exposures() imp_set = ImpactFuncSet() - haz = Hazard('TC') + haz = Hazard("TC") new_exp = Exposures() new_impfs = ImpactFuncSet() - new_haz = Hazard('TC') + new_haz = Hazard("TC") - res_exp, res_ifs, res_haz = meas._filter_exposures(exp, imp_set, haz, - new_exp, new_impfs, new_haz) + res_exp, res_ifs, res_haz = meas._filter_exposures( + exp, imp_set, haz, new_exp, new_impfs, new_haz + ) self.assertTrue(res_exp is new_exp) self.assertTrue(res_ifs is new_impfs) @@ -238,14 +459,14 @@ def test_filter_exposures_pass(self): """Test _filter_exposures method with two values""" meas = Measure( exp_region_id=[3, 4], - haz_type='TC', + haz_type="TC", ) exp = Exposures.from_mat(ENT_TEST_MAT) - exp.gdf.rename(columns={'impf_': 'impf_TC', 'centr_': 'centr_TC'}, inplace=True) - exp.gdf['region_id'] = np.ones(exp.gdf.shape[0]) - exp.gdf['region_id'].values[:exp.gdf.shape[0] // 2] = 3 - exp.gdf['region_id'][0] = 4 + exp.gdf.rename(columns={"impf_": "impf_TC", "centr_": "centr_TC"}, inplace=True) + exp.gdf["region_id"] = np.ones(exp.gdf.shape[0]) + exp.gdf["region_id"].values[: exp.gdf.shape[0] // 2] = 3 + exp.gdf["region_id"][0] = 4 exp.check() imp_set = ImpactFuncSet.from_mat(ENT_TEST_MAT) @@ -254,20 +475,21 @@ def test_filter_exposures_pass(self): exp.assign_centroids(haz) new_exp = copy.deepcopy(exp) - new_exp.gdf['value'] *= 3 - new_exp.gdf['impf_TC'].values[:20] = 2 - new_exp.gdf['impf_TC'].values[20:40] = 3 - new_exp.gdf['impf_TC'].values[40:] = 1 + new_exp.gdf["value"] *= 3 + new_exp.gdf["impf_TC"].values[:20] = 2 + new_exp.gdf["impf_TC"].values[20:40] = 3 + new_exp.gdf["impf_TC"].values[40:] = 1 new_ifs = copy.deepcopy(imp_set) - new_ifs.get_func('TC')[1].intensity += 1 + new_ifs.get_func("TC")[1].intensity += 1 ref_ifs = copy.deepcopy(new_ifs) new_haz = copy.deepcopy(haz) new_haz.intensity *= 4 - res_exp, res_ifs, res_haz = meas._filter_exposures(exp, imp_set, haz, - new_exp.copy(deep=True), new_ifs, new_haz) + res_exp, res_ifs, res_haz = meas._filter_exposures( + exp, imp_set, haz, new_exp.copy(deep=True), new_ifs, new_haz + ) # unchanged meta data self.assertEqual(res_exp.ref_year, exp.ref_year) @@ -278,93 +500,267 @@ def test_filter_exposures_pass(self): self.assertFalse(hasattr(res_exp.gdf, "crs")) # regions (that is just input data, no need for testing, but it makes the changed and unchanged parts obious) - self.assertTrue(np.array_equal(res_exp.gdf['region_id'].values[0], 4)) - self.assertTrue(np.array_equal(res_exp.gdf['region_id'].values[1:25], np.ones(24) * 3)) - self.assertTrue(np.array_equal(res_exp.gdf['region_id'].values[25:], np.ones(25))) + self.assertTrue(np.array_equal(res_exp.gdf["region_id"].values[0], 4)) + self.assertTrue( + np.array_equal(res_exp.gdf["region_id"].values[1:25], np.ones(24) * 3) + ) + self.assertTrue( + np.array_equal(res_exp.gdf["region_id"].values[25:], np.ones(25)) + ) # changed exposures - self.assertTrue(np.array_equal(res_exp.gdf['value'].values[:25], new_exp.gdf['value'].values[:25])) - self.assertTrue(np.all(np.not_equal(res_exp.gdf['value'].values[:25], exp.gdf['value'].values[:25]))) - self.assertTrue(np.all(np.not_equal(res_exp.gdf['impf_TC'].values[:25], new_exp.gdf['impf_TC'].values[:25]))) - self.assertTrue(np.array_equal(res_exp.gdf['latitude'].values[:25], new_exp.gdf['latitude'].values[:25])) - self.assertTrue(np.array_equal(res_exp.gdf['longitude'].values[:25], new_exp.gdf['longitude'].values[:25])) + self.assertTrue( + np.array_equal( + res_exp.gdf["value"].values[:25], new_exp.gdf["value"].values[:25] + ) + ) + self.assertTrue( + np.all( + np.not_equal( + res_exp.gdf["value"].values[:25], exp.gdf["value"].values[:25] + ) + ) + ) + self.assertTrue( + np.all( + np.not_equal( + res_exp.gdf["impf_TC"].values[:25], + new_exp.gdf["impf_TC"].values[:25], + ) + ) + ) + self.assertTrue( + np.array_equal( + res_exp.gdf["latitude"].values[:25], new_exp.gdf["latitude"].values[:25] + ) + ) + self.assertTrue( + np.array_equal( + res_exp.gdf["longitude"].values[:25], + new_exp.gdf["longitude"].values[:25], + ) + ) # unchanged exposures - self.assertTrue(np.array_equal(res_exp.gdf['value'].values[25:], exp.gdf['value'].values[25:])) - self.assertTrue(np.all(np.not_equal(res_exp.gdf['value'].values[25:], new_exp.gdf['value'].values[25:]))) - self.assertTrue(np.array_equal(res_exp.gdf['impf_TC'].values[25:], exp.gdf['impf_TC'].values[25:])) - self.assertTrue(np.array_equal(res_exp.gdf['latitude'].values[25:], exp.gdf['latitude'].values[25:])) - self.assertTrue(np.array_equal(res_exp.gdf['longitude'].values[25:], exp.gdf['longitude'].values[25:])) + self.assertTrue( + np.array_equal( + res_exp.gdf["value"].values[25:], exp.gdf["value"].values[25:] + ) + ) + self.assertTrue( + np.all( + np.not_equal( + res_exp.gdf["value"].values[25:], new_exp.gdf["value"].values[25:] + ) + ) + ) + self.assertTrue( + np.array_equal( + res_exp.gdf["impf_TC"].values[25:], exp.gdf["impf_TC"].values[25:] + ) + ) + self.assertTrue( + np.array_equal( + res_exp.gdf["latitude"].values[25:], exp.gdf["latitude"].values[25:] + ) + ) + self.assertTrue( + np.array_equal( + res_exp.gdf["longitude"].values[25:], exp.gdf["longitude"].values[25:] + ) + ) # unchanged impact functions self.assertEqual(list(res_ifs.get_func().keys()), [meas.haz_type]) - self.assertEqual(res_ifs.get_func()[meas.haz_type][1].id, imp_set.get_func()[meas.haz_type][1].id) - self.assertTrue(np.array_equal(res_ifs.get_func()[meas.haz_type][1].intensity, - imp_set.get_func()[meas.haz_type][1].intensity)) - self.assertEqual(res_ifs.get_func()[meas.haz_type][3].id, imp_set.get_func()[meas.haz_type][3].id) - self.assertTrue(np.array_equal(res_ifs.get_func()[meas.haz_type][3].intensity, - imp_set.get_func()[meas.haz_type][3].intensity)) + self.assertEqual( + res_ifs.get_func()[meas.haz_type][1].id, + imp_set.get_func()[meas.haz_type][1].id, + ) + self.assertTrue( + np.array_equal( + res_ifs.get_func()[meas.haz_type][1].intensity, + imp_set.get_func()[meas.haz_type][1].intensity, + ) + ) + self.assertEqual( + res_ifs.get_func()[meas.haz_type][3].id, + imp_set.get_func()[meas.haz_type][3].id, + ) + self.assertTrue( + np.array_equal( + res_ifs.get_func()[meas.haz_type][3].intensity, + imp_set.get_func()[meas.haz_type][3].intensity, + ) + ) # changed impact functions - self.assertTrue(np.array_equal(res_ifs.get_func()[meas.haz_type][1 + IMPF_ID_FACT].intensity, - ref_ifs.get_func()[meas.haz_type][1].intensity)) - self.assertTrue(np.array_equal(res_ifs.get_func()[meas.haz_type][1 + IMPF_ID_FACT].paa, - ref_ifs.get_func()[meas.haz_type][1].paa)) - self.assertTrue(np.array_equal(res_ifs.get_func()[meas.haz_type][1 + IMPF_ID_FACT].mdd, - ref_ifs.get_func()[meas.haz_type][1].mdd)) - self.assertTrue(np.array_equal(res_ifs.get_func()[meas.haz_type][3 + IMPF_ID_FACT].intensity, - ref_ifs.get_func()[meas.haz_type][3].intensity)) - self.assertTrue(np.array_equal(res_ifs.get_func()[meas.haz_type][3 + IMPF_ID_FACT].paa, - ref_ifs.get_func()[meas.haz_type][3].paa)) - self.assertTrue(np.array_equal(res_ifs.get_func()[meas.haz_type][3 + IMPF_ID_FACT].mdd, - ref_ifs.get_func()[meas.haz_type][3].mdd)) + self.assertTrue( + np.array_equal( + res_ifs.get_func()[meas.haz_type][1 + IMPF_ID_FACT].intensity, + ref_ifs.get_func()[meas.haz_type][1].intensity, + ) + ) + self.assertTrue( + np.array_equal( + res_ifs.get_func()[meas.haz_type][1 + IMPF_ID_FACT].paa, + ref_ifs.get_func()[meas.haz_type][1].paa, + ) + ) + self.assertTrue( + np.array_equal( + res_ifs.get_func()[meas.haz_type][1 + IMPF_ID_FACT].mdd, + ref_ifs.get_func()[meas.haz_type][1].mdd, + ) + ) + self.assertTrue( + np.array_equal( + res_ifs.get_func()[meas.haz_type][3 + IMPF_ID_FACT].intensity, + ref_ifs.get_func()[meas.haz_type][3].intensity, + ) + ) + self.assertTrue( + np.array_equal( + res_ifs.get_func()[meas.haz_type][3 + IMPF_ID_FACT].paa, + ref_ifs.get_func()[meas.haz_type][3].paa, + ) + ) + self.assertTrue( + np.array_equal( + res_ifs.get_func()[meas.haz_type][3 + IMPF_ID_FACT].mdd, + ref_ifs.get_func()[meas.haz_type][3].mdd, + ) + ) # unchanged hazard - self.assertTrue(np.array_equal(res_haz.intensity[:, :36].toarray(), - haz.intensity[:, :36].toarray())) - self.assertTrue(np.array_equal(res_haz.intensity[:, 37:46].toarray(), - haz.intensity[:, 37:46].toarray())) - self.assertTrue(np.array_equal(res_haz.intensity[:, 47:].toarray(), - haz.intensity[:, 47:].toarray())) + self.assertTrue( + np.array_equal( + res_haz.intensity[:, :36].toarray(), haz.intensity[:, :36].toarray() + ) + ) + self.assertTrue( + np.array_equal( + res_haz.intensity[:, 37:46].toarray(), haz.intensity[:, 37:46].toarray() + ) + ) + self.assertTrue( + np.array_equal( + res_haz.intensity[:, 47:].toarray(), haz.intensity[:, 47:].toarray() + ) + ) # changed hazard - self.assertTrue(np.array_equal(res_haz.intensity[[36, 46]].toarray(), - new_haz.intensity[[36, 46]].toarray())) + self.assertTrue( + np.array_equal( + res_haz.intensity[[36, 46]].toarray(), + new_haz.intensity[[36, 46]].toarray(), + ) + ) def test_apply_ref_pass(self): """Test apply method: apply all measures but insurance""" hazard = Hazard.from_hdf5(HAZ_TEST_TC) entity = Entity.from_mat(ENT_TEST_MAT) - entity.measures._data['TC'] = entity.measures._data.pop('XX') - for meas in entity.measures.get_measure('TC'): - meas.haz_type = 'TC' + entity.measures._data["TC"] = entity.measures._data.pop("XX") + for meas in entity.measures.get_measure("TC"): + meas.haz_type = "TC" entity.check() - new_exp, new_ifs, new_haz = entity.measures.get_measure('TC', 'Mangroves').apply(entity.exposures, - entity.impact_funcs, hazard) + new_exp, new_ifs, new_haz = entity.measures.get_measure( + "TC", "Mangroves" + ).apply(entity.exposures, entity.impact_funcs, hazard) self.assertTrue(new_exp is entity.exposures) self.assertTrue(new_haz is hazard) self.assertFalse(new_ifs is entity.impact_funcs) - new_imp = new_ifs.get_func('TC')[0] - self.assertTrue(np.array_equal(new_imp.intensity, np.array([4., 24., 34., 44., - 54., 64., 74., 84., 104.]))) - self.assertTrue(np.allclose(new_imp.mdd, np.array([0, 0, 0.021857142857143, 0.035887500000000, - 0.053977415307403, 0.103534246575342, 0.180414000000000, 0.410796000000000, 0.410796000000000]))) - self.assertTrue(np.allclose(new_imp.paa, np.array([0, 0.005000000000000, 0.042000000000000, - 0.160000000000000, 0.398500000000000, 0.657000000000000, 1.000000000000000, - 1.000000000000000, 1.000000000000000]))) - - new_imp = new_ifs.get_func('TC')[1] - self.assertTrue(np.array_equal(new_imp.intensity, np.array([4., 24., 34., 44., - 54., 64., 74., 84., 104.]))) - self.assertTrue(np.allclose(new_imp.mdd, np.array([0, 0, 0, 0.025000000000000, - 0.054054054054054, 0.104615384615385, 0.211764705882353, 0.400000000000000, 0.400000000000000]))) - self.assertTrue(np.allclose(new_imp.paa, np.array([0, 0.004000000000000, 0, 0.160000000000000, - 0.370000000000000, 0.650000000000000, 0.850000000000000, 1.000000000000000, - 1.000000000000000]))) + new_imp = new_ifs.get_func("TC")[0] + self.assertTrue( + np.array_equal( + new_imp.intensity, + np.array([4.0, 24.0, 34.0, 44.0, 54.0, 64.0, 74.0, 84.0, 104.0]), + ) + ) + self.assertTrue( + np.allclose( + new_imp.mdd, + np.array( + [ + 0, + 0, + 0.021857142857143, + 0.035887500000000, + 0.053977415307403, + 0.103534246575342, + 0.180414000000000, + 0.410796000000000, + 0.410796000000000, + ] + ), + ) + ) + self.assertTrue( + np.allclose( + new_imp.paa, + np.array( + [ + 0, + 0.005000000000000, + 0.042000000000000, + 0.160000000000000, + 0.398500000000000, + 0.657000000000000, + 1.000000000000000, + 1.000000000000000, + 1.000000000000000, + ] + ), + ) + ) + + new_imp = new_ifs.get_func("TC")[1] + self.assertTrue( + np.array_equal( + new_imp.intensity, + np.array([4.0, 24.0, 34.0, 44.0, 54.0, 64.0, 74.0, 84.0, 104.0]), + ) + ) + self.assertTrue( + np.allclose( + new_imp.mdd, + np.array( + [ + 0, + 0, + 0, + 0.025000000000000, + 0.054054054054054, + 0.104615384615385, + 0.211764705882353, + 0.400000000000000, + 0.400000000000000, + ] + ), + ) + ) + self.assertTrue( + np.allclose( + new_imp.paa, + np.array( + [ + 0, + 0.004000000000000, + 0, + 0.160000000000000, + 0.370000000000000, + 0.650000000000000, + 0.850000000000000, + 1.000000000000000, + 1.000000000000000, + ] + ), + ) + ) def test_calc_impact_pass(self): """Test calc_impact method: apply all measures but insurance""" @@ -372,46 +768,50 @@ def test_calc_impact_pass(self): hazard = Hazard.from_hdf5(HAZ_TEST_TC) entity = Entity.from_mat(ENT_TEST_MAT) - entity.exposures.gdf.rename(columns={'impf': 'impf_TC'}, inplace=True) - entity.measures._data['TC'] = entity.measures._data.pop('XX') - entity.measures.get_measure(name='Mangroves', haz_type='TC').haz_type = 'TC' - for meas in entity.measures.get_measure('TC'): - meas.haz_type = 'TC' + entity.exposures.gdf.rename(columns={"impf": "impf_TC"}, inplace=True) + entity.measures._data["TC"] = entity.measures._data.pop("XX") + entity.measures.get_measure(name="Mangroves", haz_type="TC").haz_type = "TC" + for meas in entity.measures.get_measure("TC"): + meas.haz_type = "TC" entity.check() - imp, risk_transf = entity.measures.get_measure('TC', 'Mangroves').calc_impact( - entity.exposures, entity.impact_funcs, hazard) + imp, risk_transf = entity.measures.get_measure("TC", "Mangroves").calc_impact( + entity.exposures, entity.impact_funcs, hazard + ) - self.assertAlmostEqual(imp.aai_agg, 4.850407096284983e+09, delta=1) + self.assertAlmostEqual(imp.aai_agg, 4.850407096284983e09, delta=1) self.assertAlmostEqual(imp.at_event[0], 0) - self.assertAlmostEqual(imp.at_event[12], 1.470194187501225e+07) - self.assertAlmostEqual(imp.at_event[41], 4.7226357936631286e+08) - self.assertAlmostEqual(imp.at_event[11890], 1.742110428135755e+07) - self.assertTrue(np.array_equal(imp.coord_exp[:, 0], entity.exposures.gdf['latitude'])) - self.assertTrue(np.array_equal(imp.coord_exp[:, 1], entity.exposures.gdf['longitude'])) - self.assertAlmostEqual(imp.eai_exp[0], 1.15677655725858e+08) - self.assertAlmostEqual(imp.eai_exp[-1], 7.528669956120645e+07) - self.assertAlmostEqual(imp.tot_value, 6.570532945599105e+11) - self.assertEqual(imp.unit, 'USD') + self.assertAlmostEqual(imp.at_event[12], 1.470194187501225e07) + self.assertAlmostEqual(imp.at_event[41], 4.7226357936631286e08) + self.assertAlmostEqual(imp.at_event[11890], 1.742110428135755e07) + self.assertTrue( + np.array_equal(imp.coord_exp[:, 0], entity.exposures.gdf["latitude"]) + ) + self.assertTrue( + np.array_equal(imp.coord_exp[:, 1], entity.exposures.gdf["longitude"]) + ) + self.assertAlmostEqual(imp.eai_exp[0], 1.15677655725858e08) + self.assertAlmostEqual(imp.eai_exp[-1], 7.528669956120645e07) + self.assertAlmostEqual(imp.tot_value, 6.570532945599105e11) + self.assertEqual(imp.unit, "USD") self.assertEqual(imp.imp_mat.shape, (0, 0)) self.assertTrue(np.array_equal(imp.event_id, hazard.event_id)) self.assertTrue(np.array_equal(imp.date, hazard.date)) self.assertEqual(imp.event_name, hazard.event_name) self.assertEqual(risk_transf.aai_agg, 0) - def test_calc_impact_transf_pass(self): """Test calc_impact method: apply all measures and insurance""" hazard = Hazard.from_hdf5(HAZ_TEST_TC) entity = Entity.from_mat(ENT_TEST_MAT) - entity.exposures.gdf.rename(columns={'impf': 'impf_TC'}, inplace=True) - entity.measures._data['TC'] = entity.measures._data.pop('XX') - for meas in entity.measures.get_measure('TC'): - meas.haz_type = 'TC' - meas = entity.measures.get_measure(name='Beach nourishment', haz_type='TC') - meas.haz_type = 'TC' + entity.exposures.gdf.rename(columns={"impf": "impf_TC"}, inplace=True) + entity.measures._data["TC"] = entity.measures._data.pop("XX") + for meas in entity.measures.get_measure("TC"): + meas.haz_type = "TC" + meas = entity.measures.get_measure(name="Beach nourishment", haz_type="TC") + meas.haz_type = "TC" meas.hazard_inten_imp = (1, 0) meas.mdd_impact = (1, 0) meas.paa_impact = (1, 0) @@ -419,23 +819,25 @@ def test_calc_impact_transf_pass(self): meas.risk_transf_cover = 1.0e9 entity.check() - imp, risk_transf = entity.measures.get_measure(name='Beach nourishment', haz_type='TC').calc_impact( - entity.exposures, entity.impact_funcs, hazard) + imp, risk_transf = entity.measures.get_measure( + name="Beach nourishment", haz_type="TC" + ).calc_impact(entity.exposures, entity.impact_funcs, hazard) - self.assertAlmostEqual(imp.aai_agg, 6.280804242609713e+09) + self.assertAlmostEqual(imp.aai_agg, 6.280804242609713e09) self.assertAlmostEqual(imp.at_event[0], 0) - self.assertAlmostEqual(imp.at_event[12], 8.648764833437817e+07) + self.assertAlmostEqual(imp.at_event[12], 8.648764833437817e07) self.assertAlmostEqual(imp.at_event[41], 500000000) - self.assertAlmostEqual(imp.at_event[11890], 6.498096646836635e+07) + self.assertAlmostEqual(imp.at_event[11890], 6.498096646836635e07) self.assertTrue(np.array_equal(imp.coord_exp, np.array([]))) self.assertTrue(np.array_equal(imp.eai_exp, np.array([]))) - self.assertAlmostEqual(imp.tot_value, 6.570532945599105e+11) - self.assertEqual(imp.unit, 'USD') + self.assertAlmostEqual(imp.tot_value, 6.570532945599105e11) + self.assertEqual(imp.unit, "USD") self.assertEqual(imp.imp_mat.shape, (0, 0)) self.assertTrue(np.array_equal(imp.event_id, hazard.event_id)) self.assertTrue(np.array_equal(imp.date, hazard.date)) self.assertEqual(imp.event_name, hazard.event_name) - self.assertEqual(risk_transf.aai_agg, 2.3139691495470852e+08) + self.assertEqual(risk_transf.aai_agg, 2.3139691495470852e08) + # Execute Tests if __name__ == "__main__": diff --git a/climada/entity/measures/test/test_meas_set.py b/climada/entity/measures/test/test_meas_set.py index fe2caa1bf..a2cbdc3f1 100644 --- a/climada/entity/measures/test/test_meas_set.py +++ b/climada/entity/measures/test/test_meas_set.py @@ -18,40 +18,48 @@ Test MeasureSet and Measure classes. """ + import unittest + import numpy as np from climada import CONFIG from climada.entity.measures.base import Measure from climada.entity.measures.measure_set import MeasureSet -from climada.util.constants import ENT_TEMPLATE_XLS, ENT_DEMO_TODAY +from climada.util.constants import ENT_DEMO_TODAY, ENT_TEMPLATE_XLS DATA_DIR = CONFIG.measures.test_data.dir() -ENT_TEST_MAT = CONFIG.exposures.test_data.dir().joinpath('demo_today.mat') +ENT_TEST_MAT = CONFIG.exposures.test_data.dir().joinpath("demo_today.mat") + class TestConstructor(unittest.TestCase): """Test impact function attributes.""" + def test_attributes_all(self): """All attributes are defined""" meas = MeasureSet() - act_1 = Measure(name='Seawall') - self.assertTrue(hasattr(meas, '_data')) - self.assertTrue(hasattr(act_1, 'name')) - self.assertTrue(hasattr(act_1, 'color_rgb')) - self.assertTrue(hasattr(act_1, 'cost')) - self.assertTrue(hasattr(act_1, 'hazard_freq_cutoff')) - self.assertTrue(hasattr(act_1, 'hazard_inten_imp')) - self.assertTrue(hasattr(act_1, 'mdd_impact')) - self.assertTrue(hasattr(act_1, 'paa_impact')) - self.assertTrue(hasattr(act_1, 'risk_transf_attach')) - self.assertTrue(hasattr(act_1, 'risk_transf_cover')) + act_1 = Measure(name="Seawall") + self.assertTrue(hasattr(meas, "_data")) + self.assertTrue(hasattr(act_1, "name")) + self.assertTrue(hasattr(act_1, "color_rgb")) + self.assertTrue(hasattr(act_1, "cost")) + self.assertTrue(hasattr(act_1, "hazard_freq_cutoff")) + self.assertTrue(hasattr(act_1, "hazard_inten_imp")) + self.assertTrue(hasattr(act_1, "mdd_impact")) + self.assertTrue(hasattr(act_1, "paa_impact")) + self.assertTrue(hasattr(act_1, "risk_transf_attach")) + self.assertTrue(hasattr(act_1, "risk_transf_cover")) + class TestContainer(unittest.TestCase): """Test MeasureSet as container.""" + def test_add_wrong_error(self): """Test error is raised when wrong ImpactFunc provided.""" meas = MeasureSet() - with self.assertLogs('climada.entity.measures.measure_set', level='WARNING') as cm: + with self.assertLogs( + "climada.entity.measures.measure_set", level="WARNING" + ) as cm: meas.append(Measure()) self.assertIn("Input Measure's hazard type not set.", cm.output[0]) @@ -61,196 +69,224 @@ def test_add_wrong_error(self): def test_remove_measure_pass(self): """Test remove_measure removes Measure of MeasureSet correcty.""" - meas = MeasureSet(measure_list=[Measure(name='Mangrove', haz_type='FL')]) - meas.remove_measure(name='Mangrove') + meas = MeasureSet(measure_list=[Measure(name="Mangrove", haz_type="FL")]) + meas.remove_measure(name="Mangrove") self.assertEqual(0, meas.size()) def test_remove_wrong_error(self): """Test error is raised when invalid inputs.""" - meas = MeasureSet(measure_list=[Measure(name='Mangrove', haz_type='FL')]) - with self.assertLogs('climada.entity.measures.measure_set', level='INFO') as cm: - meas.remove_measure(name='Seawall') - self.assertIn('No Measure with name Seawall.', cm.output[0]) + meas = MeasureSet(measure_list=[Measure(name="Mangrove", haz_type="FL")]) + with self.assertLogs("climada.entity.measures.measure_set", level="INFO") as cm: + meas.remove_measure(name="Seawall") + self.assertIn("No Measure with name Seawall.", cm.output[0]) def test_get_names_pass(self): """Test get_names function.""" - meas = MeasureSet(measure_list=[Measure(name='Mangrove', haz_type='FL')]) + meas = MeasureSet(measure_list=[Measure(name="Mangrove", haz_type="FL")]) self.assertEqual(1, len(meas.get_names())) - self.assertEqual({'FL': ['Mangrove']}, meas.get_names()) + self.assertEqual({"FL": ["Mangrove"]}, meas.get_names()) - meas.append(Measure( - name='Seawall', - haz_type='FL', - )) - self.assertEqual(2, len(meas.get_names('FL'))) - self.assertIn('Mangrove', meas.get_names('FL')) - self.assertIn('Seawall', meas.get_names('FL')) + meas.append( + Measure( + name="Seawall", + haz_type="FL", + ) + ) + self.assertEqual(2, len(meas.get_names("FL"))) + self.assertIn("Mangrove", meas.get_names("FL")) + self.assertIn("Seawall", meas.get_names("FL")) def test_get_measure_pass(self): """Test normal functionality of get_measure method.""" act_1 = Measure( - name='Mangrove', - haz_type='FL', + name="Mangrove", + haz_type="FL", ) meas = MeasureSet(measure_list=[act_1]) - self.assertIs(act_1, meas.get_measure(name='Mangrove')[0]) + self.assertIs(act_1, meas.get_measure(name="Mangrove")[0]) act_2 = Measure( - name='Seawall', - haz_type='FL', + name="Seawall", + haz_type="FL", ) meas.append(act_2) - self.assertIs(act_1, meas.get_measure(name='Mangrove')[0]) - self.assertIs(act_2, meas.get_measure(name='Seawall')[0]) - self.assertEqual(2, len(meas.get_measure('FL'))) + self.assertIs(act_1, meas.get_measure(name="Mangrove")[0]) + self.assertIs(act_2, meas.get_measure(name="Seawall")[0]) + self.assertEqual(2, len(meas.get_measure("FL"))) def test_get_measure_wrong_error(self): """Test get_measure method with wrong inputs.""" - meas = MeasureSet(measure_list=[Measure(name='Seawall', haz_type='FL')]) - self.assertEqual([], meas.get_measure('Mangrove')) + meas = MeasureSet(measure_list=[Measure(name="Seawall", haz_type="FL")]) + self.assertEqual([], meas.get_measure("Mangrove")) def test_num_measures_pass(self): """Test num_measures function.""" meas = MeasureSet() self.assertEqual(0, meas.size()) act_1 = Measure( - name='Mangrove', - haz_type='FL', + name="Mangrove", + haz_type="FL", ) meas.append(act_1) self.assertEqual(1, meas.size()) meas.append(act_1) self.assertEqual(1, meas.size()) - meas.append(Measure( - name='Seawall', - haz_type='FL', - )) + meas.append( + Measure( + name="Seawall", + haz_type="FL", + ) + ) self.assertEqual(2, meas.size()) + class TestChecker(unittest.TestCase): """Test check functionality of the MeasureSet class""" def test_check_wronginten_fail(self): """Wrong intensity definition""" - meas = MeasureSet(measure_list=[ - Measure( - haz_type='TC', - name='Mangrove', - hazard_inten_imp=(1, 2, 3), - color_rgb=np.array([1, 1, 1]), - mdd_impact=(1, 2), - paa_impact=(1, 2), - ), - ]) + meas = MeasureSet( + measure_list=[ + Measure( + haz_type="TC", + name="Mangrove", + hazard_inten_imp=(1, 2, 3), + color_rgb=np.array([1, 1, 1]), + mdd_impact=(1, 2), + paa_impact=(1, 2), + ), + ] + ) with self.assertRaises(ValueError) as cm: meas.check() - self.assertIn('Invalid Measure.hazard_inten_imp size: 2 != 3.', str(cm.exception)) + self.assertIn( + "Invalid Measure.hazard_inten_imp size: 2 != 3.", str(cm.exception) + ) def test_check_wrongColor_fail(self): """Wrong measures definition""" - meas = MeasureSet(measure_list=[ - Measure( - name='Mangrove', - haz_type='DR', - color_rgb=(1, 2), - mdd_impact=(1, 2), - paa_impact=(1, 2), - hazard_inten_imp=(1, 2), - ), - ]) + meas = MeasureSet( + measure_list=[ + Measure( + name="Mangrove", + haz_type="DR", + color_rgb=(1, 2), + mdd_impact=(1, 2), + paa_impact=(1, 2), + hazard_inten_imp=(1, 2), + ), + ] + ) with self.assertRaises(ValueError) as cm: meas.check() - self.assertIn('Invalid Measure.color_rgb size: 2 not in [3, 4].', str(cm.exception)) + self.assertIn( + "Invalid Measure.color_rgb size: 2 not in [3, 4].", str(cm.exception) + ) def test_check_wrongMDD_fail(self): """Wrong measures definition""" - meas = MeasureSet(measure_list=[ - Measure( - name='Mangrove', - haz_type='DR', - color_rgb=np.array([1, 1, 1]), - mdd_impact=(1), - paa_impact=(1, 2), - hazard_inten_imp=(1, 2), - ), - ]) + meas = MeasureSet( + measure_list=[ + Measure( + name="Mangrove", + haz_type="DR", + color_rgb=np.array([1, 1, 1]), + mdd_impact=(1), + paa_impact=(1, 2), + hazard_inten_imp=(1, 2), + ), + ] + ) with self.assertRaises(ValueError) as cm: meas.check() - self.assertIn('Measure.mdd_impact has wrong size.', str(cm.exception)) + self.assertIn("Measure.mdd_impact has wrong size.", str(cm.exception)) def test_check_wrongPAA_fail(self): """Wrong measures definition""" - meas = MeasureSet(measure_list=[ - Measure( - name='Mangrove', - haz_type='TC', - color_rgb=np.array([1, 1, 1]), - mdd_impact=(1, 2), - paa_impact=(1, 2, 3, 4), - hazard_inten_imp=(1, 2), - ), - ]) + meas = MeasureSet( + measure_list=[ + Measure( + name="Mangrove", + haz_type="TC", + color_rgb=np.array([1, 1, 1]), + mdd_impact=(1, 2), + paa_impact=(1, 2, 3, 4), + hazard_inten_imp=(1, 2), + ), + ] + ) with self.assertRaises(ValueError) as cm: meas.check() - self.assertIn('Invalid Measure.paa_impact size: 2 != 4.', str(cm.exception)) + self.assertIn("Invalid Measure.paa_impact size: 2 != 4.", str(cm.exception)) def test_check_name_fail(self): """Wrong measures definition""" meas = MeasureSet() - meas._data['FL'] = dict() - meas._data['FL']['LoLo'] = Measure( - name='LaLa', - haz_type='FL', + meas._data["FL"] = dict() + meas._data["FL"]["LoLo"] = Measure( + name="LaLa", + haz_type="FL", ) with self.assertRaises(ValueError) as cm: meas.check() - self.assertIn('Wrong Measure.name: LoLo != LaLa', str(cm.exception)) + self.assertIn("Wrong Measure.name: LoLo != LaLa", str(cm.exception)) def test_def_color(self): """Test default grey scale used when no color set""" - meas = MeasureSet(measure_list=[ - Measure(name='LaLa', haz_type='FL'), - Measure(name='LoLo', haz_type='FL'), - ]) + meas = MeasureSet( + measure_list=[ + Measure(name="LaLa", haz_type="FL"), + Measure(name="LoLo", haz_type="FL"), + ] + ) meas.check() - self.assertTrue(np.array_equal(meas.get_measure('FL', 'LaLa').color_rgb, np.ones(4))) - self.assertTrue(np.allclose(meas.get_measure('FL', 'LoLo').color_rgb, - np.array([0., 0., 0., 1.0]))) + self.assertTrue( + np.array_equal(meas.get_measure("FL", "LaLa").color_rgb, np.ones(4)) + ) + self.assertTrue( + np.allclose( + meas.get_measure("FL", "LoLo").color_rgb, np.array([0.0, 0.0, 0.0, 1.0]) + ) + ) + class TestExtend(unittest.TestCase): """Check extend function""" + def test_extend_to_empty_same(self): """Extend MeasureSet to empty one.""" meas = MeasureSet() - meas_add = MeasureSet(measure_list=[ + meas_add = MeasureSet( + measure_list=[ Measure( - name='Mangrove', - haz_type='TC', + name="Mangrove", + haz_type="TC", color_rgb=np.array([1, 1, 1]), mdd_impact=(1, 2), paa_impact=(1, 2), hazard_inten_imp=(1, 2), ), - ]) + ] + ) meas.extend(meas_add) meas.check() self.assertEqual(meas.size(), 1) - self.assertEqual(meas.get_names(), {'TC': ['Mangrove']}) + self.assertEqual(meas.get_names(), {"TC": ["Mangrove"]}) def test_extend_equal_same(self): """Extend the same MeasureSet. The inital MeasureSet is obtained.""" act_1 = Measure( - name='Mangrove', - haz_type='TC', + name="Mangrove", + haz_type="TC", color_rgb=np.array([1, 1, 1]), mdd_impact=(1, 2), paa_impact=(1, 2), @@ -263,14 +299,14 @@ def test_extend_equal_same(self): meas.check() self.assertEqual(meas.size(), 1) - self.assertEqual(meas.get_names(), {'TC': ['Mangrove']}) + self.assertEqual(meas.get_names(), {"TC": ["Mangrove"]}) def test_extend_different_extend(self): """Extend MeasureSet with same and new values. The actions with repeated name are overwritten.""" act_1 = Measure( - name='Mangrove', - haz_type='TC', + name="Mangrove", + haz_type="TC", color_rgb=np.array([1, 1, 1]), mdd_impact=(1, 2), paa_impact=(1, 2), @@ -278,8 +314,8 @@ def test_extend_different_extend(self): ) act_11 = Measure( - name='Mangrove', - haz_type='TC', + name="Mangrove", + haz_type="TC", color_rgb=np.array([1, 1, 1]), mdd_impact=(1, 2), paa_impact=(1, 3), @@ -287,8 +323,8 @@ def test_extend_different_extend(self): ) act_2 = Measure( - name='Anything', - haz_type='TC', + name="Anything", + haz_type="TC", color_rgb=np.array([1, 1, 1]), mdd_impact=(1, 2), paa_impact=(1, 2), @@ -302,8 +338,11 @@ def test_extend_different_extend(self): meas.check() self.assertEqual(meas.size(), 2) - self.assertEqual(meas.get_names(), {'TC': ['Mangrove', 'Anything']}) - self.assertEqual(meas.get_measure(name=act_1.name)[0].paa_impact, act_11.paa_impact) + self.assertEqual(meas.get_names(), {"TC": ["Mangrove", "Anything"]}) + self.assertEqual( + meas.get_measure(name=act_1.name)[0].paa_impact, act_11.paa_impact + ) + class TestReaderExcel(unittest.TestCase): """Test reader functionality of the MeasuresExcel class""" @@ -317,8 +356,8 @@ def test_demo_file(self): self.assertEqual(meas.size(), n_meas) - act_man = meas.get_measure(name='Mangroves')[0] - self.assertEqual(act_man.name, 'Mangroves') + act_man = meas.get_measure(name="Mangroves")[0] + self.assertEqual(act_man.name, "Mangroves") self.assertEqual(type(act_man.color_rgb), np.ndarray) self.assertEqual(len(act_man.color_rgb), 3) self.assertEqual(act_man.color_rgb[0], 0.1529) @@ -332,8 +371,8 @@ def test_demo_file(self): self.assertEqual(act_man.risk_transf_attach, 0) self.assertEqual(act_man.risk_transf_cover, 0) - act_buil = meas.get_measure(name='Building code')[0] - self.assertEqual(act_buil.name, 'Building code') + act_buil = meas.get_measure(name="Building code")[0] + self.assertEqual(act_buil.name, "Building code") self.assertEqual(type(act_buil.color_rgb), np.ndarray) self.assertEqual(len(act_buil.color_rgb), 3) self.assertEqual(act_buil.color_rgb[0], 0.6980) @@ -353,69 +392,77 @@ def test_template_file_pass(self): self.assertEqual(meas.size(), 7) - name = 'elevate existing buildings' + name = "elevate existing buildings" act_buil = meas.get_measure(name=name)[0] self.assertEqual(act_buil.name, name) - self.assertEqual(act_buil.haz_type, 'TS') - self.assertTrue(np.array_equal(act_buil.color_rgb, np.array([0.84, 0.89, 0.70]))) + self.assertEqual(act_buil.haz_type, "TS") + self.assertTrue( + np.array_equal(act_buil.color_rgb, np.array([0.84, 0.89, 0.70])) + ) self.assertEqual(act_buil.cost, 3911963265.476649) - self.assertEqual(act_buil.hazard_set, 'nil') + self.assertEqual(act_buil.hazard_set, "nil") self.assertEqual(act_buil.hazard_freq_cutoff, 0) self.assertEqual(act_buil.hazard_inten_imp, (1, -2)) - self.assertEqual(act_buil.exposures_set, 'nil') + self.assertEqual(act_buil.exposures_set, "nil") self.assertEqual(act_buil.exp_region_id, 0) self.assertEqual(act_buil.paa_impact, (0.9, 0)) self.assertEqual(act_buil.mdd_impact, (0.9, -0.1)) - self.assertEqual(act_buil.imp_fun_map, 'nil') + self.assertEqual(act_buil.imp_fun_map, "nil") self.assertEqual(act_buil.risk_transf_attach, 0) self.assertEqual(act_buil.risk_transf_cover, 0) self.assertEqual(act_buil.risk_transf_cost_factor, 1) - name = 'vegetation management' + name = "vegetation management" act_buil = meas.get_measure(name=name)[0] self.assertEqual(act_buil.name, name) - self.assertEqual(act_buil.haz_type, 'TC') - self.assertTrue(np.array_equal(act_buil.color_rgb, np.array([0.76, 0.84, 0.60]))) + self.assertEqual(act_buil.haz_type, "TC") + self.assertTrue( + np.array_equal(act_buil.color_rgb, np.array([0.76, 0.84, 0.60])) + ) self.assertEqual(act_buil.cost, 63968125.00687534) - self.assertEqual(act_buil.hazard_set, 'nil') + self.assertEqual(act_buil.hazard_set, "nil") self.assertEqual(act_buil.hazard_freq_cutoff, 0) self.assertEqual(act_buil.hazard_inten_imp, (1, -1)) - self.assertEqual(act_buil.exposures_set, 'nil') + self.assertEqual(act_buil.exposures_set, "nil") self.assertEqual(act_buil.exp_region_id, 0) self.assertEqual(act_buil.paa_impact, (0.8, 0)) self.assertEqual(act_buil.mdd_impact, (1, 0)) - self.assertEqual(act_buil.imp_fun_map, 'nil') + self.assertEqual(act_buil.imp_fun_map, "nil") self.assertEqual(act_buil.risk_transf_attach, 0) self.assertEqual(act_buil.risk_transf_cover, 0) self.assertEqual(act_buil.risk_transf_cost_factor, 1) - self.assertEqual(meas.get_measure(name='enforce building code')[0].imp_fun_map, '1to3') + self.assertEqual( + meas.get_measure(name="enforce building code")[0].imp_fun_map, "1to3" + ) - name = 'risk transfer' + name = "risk transfer" act_buil = meas.get_measure(name=name)[0] self.assertEqual(act_buil.name, name) - self.assertEqual(act_buil.haz_type, 'TC') - self.assertTrue(np.array_equal(act_buil.color_rgb, np.array([0.90, 0.72, 0.72]))) + self.assertEqual(act_buil.haz_type, "TC") + self.assertTrue( + np.array_equal(act_buil.color_rgb, np.array([0.90, 0.72, 0.72])) + ) self.assertEqual(act_buil.cost, 21000000) - self.assertEqual(act_buil.hazard_set, 'nil') + self.assertEqual(act_buil.hazard_set, "nil") self.assertEqual(act_buil.hazard_freq_cutoff, 0) self.assertEqual(act_buil.hazard_inten_imp, (1, 0)) - self.assertEqual(act_buil.exposures_set, 'nil') + self.assertEqual(act_buil.exposures_set, "nil") self.assertEqual(act_buil.exp_region_id, 0) self.assertEqual(act_buil.paa_impact, (1, 0)) self.assertEqual(act_buil.mdd_impact, (1, 0)) - self.assertEqual(act_buil.imp_fun_map, 'nil') + self.assertEqual(act_buil.imp_fun_map, "nil") self.assertEqual(act_buil.risk_transf_attach, 500000000) self.assertEqual(act_buil.risk_transf_cover, 1000000000) @@ -434,9 +481,9 @@ def test_demo_file(self): self.assertEqual(meas.size(), n_meas) - act_man = meas.get_measure(name='Mangroves')[0] - self.assertEqual(act_man.name, 'Mangroves') - self.assertEqual(act_man.haz_type, 'XX') + act_man = meas.get_measure(name="Mangroves")[0] + self.assertEqual(act_man.name, "Mangroves") + self.assertEqual(act_man.haz_type, "XX") self.assertEqual(type(act_man.color_rgb), np.ndarray) self.assertEqual(len(act_man.color_rgb), 3) self.assertEqual(act_man.color_rgb[0], 0.1529) @@ -445,23 +492,22 @@ def test_demo_file(self): self.assertEqual(act_man.cost, 1311768360.8515418) self.assertEqual(act_man.hazard_freq_cutoff, 0) - self.assertEqual(act_man.hazard_set, 'nil') + self.assertEqual(act_man.hazard_set, "nil") self.assertEqual(act_man.hazard_inten_imp, (1, -4)) - self.assertEqual(act_man.exposures_set, 'nil') + self.assertEqual(act_man.exposures_set, "nil") self.assertEqual(act_man.exp_region_id, []) self.assertEqual(act_man.mdd_impact, (1, 0)) self.assertEqual(act_man.paa_impact, (1, 0)) - self.assertEqual(act_man.imp_fun_map, 'nil') + self.assertEqual(act_man.imp_fun_map, "nil") self.assertEqual(act_man.risk_transf_attach, 0) self.assertEqual(act_man.risk_transf_cover, 0) - - act_buil = meas.get_measure(name='Building code')[0] - self.assertEqual(act_buil.name, 'Building code') - self.assertEqual(act_buil.haz_type, 'XX') + act_buil = meas.get_measure(name="Building code")[0] + self.assertEqual(act_buil.name, "Building code") + self.assertEqual(act_buil.haz_type, "XX") self.assertEqual(type(act_buil.color_rgb), np.ndarray) self.assertEqual(len(act_buil.color_rgb), 3) self.assertEqual(act_buil.color_rgb[0], 0.6980) @@ -470,15 +516,15 @@ def test_demo_file(self): self.assertEqual(act_buil.cost, 9200000000.0000000) self.assertEqual(act_buil.hazard_freq_cutoff, 0) - self.assertEqual(act_buil.hazard_set, 'nil') + self.assertEqual(act_buil.hazard_set, "nil") self.assertEqual(act_buil.hazard_inten_imp, (1, 0)) - self.assertEqual(act_buil.exposures_set, 'nil') + self.assertEqual(act_buil.exposures_set, "nil") self.assertEqual(act_buil.exp_region_id, []) self.assertEqual(act_buil.mdd_impact, (0.75, 0)) self.assertEqual(act_buil.paa_impact, (1, 0)) - self.assertEqual(act_man.imp_fun_map, 'nil') + self.assertEqual(act_man.imp_fun_map, "nil") self.assertEqual(act_buil.risk_transf_attach, 0) self.assertEqual(act_buil.risk_transf_cover, 0) @@ -491,8 +537,8 @@ def test_write_read_file(self): """Write and read excel file""" act_1 = Measure( - name='Mangrove', - haz_type='TC', + name="Mangrove", + haz_type="TC", color_rgb=np.array([1, 1, 1]), cost=10, mdd_impact=(1, 2), @@ -502,8 +548,8 @@ def test_write_read_file(self): ) act_11 = Measure( - name='Something', - haz_type='TC', + name="Something", + haz_type="TC", color_rgb=np.array([1, 1, 1]), mdd_impact=(1, 2), paa_impact=(1, 3), @@ -512,32 +558,32 @@ def test_write_read_file(self): ) act_2 = Measure( - name='Anything', - haz_type='FL', + name="Anything", + haz_type="FL", color_rgb=np.array([1, 1, 1]), mdd_impact=(1, 2), paa_impact=(1, 2), hazard_inten_imp=(1, 2), hazard_freq_cutoff=30, - imp_fun_map='map', + imp_fun_map="map", ) meas_set = MeasureSet(measure_list=[act_1, act_11, act_2]) - file_name = DATA_DIR.joinpath('test_meas.xlsx') + file_name = DATA_DIR.joinpath("test_meas.xlsx") meas_set.write_excel(file_name) meas_read = MeasureSet.from_excel(file_name) - meas_list = meas_read.get_measure('TC') - meas_list.extend(meas_read.get_measure('FL')) + meas_list = meas_read.get_measure("TC") + meas_list.extend(meas_read.get_measure("FL")) for meas in meas_list: - if meas.name == 'Mangrove': + if meas.name == "Mangrove": meas_ref = act_1 - elif meas.name == 'Something': + elif meas.name == "Something": meas_ref = act_11 - elif meas.name == 'Anything': + elif meas.name == "Anything": meas_ref = act_2 self.assertEqual(meas_ref.name, meas.name) @@ -555,6 +601,7 @@ def test_write_read_file(self): self.assertEqual(meas_ref.risk_transf_attach, meas.risk_transf_attach) self.assertEqual(meas_ref.risk_transf_cover, meas.risk_transf_cover) + # Execute Tests if __name__ == "__main__": TESTS = unittest.TestLoader().loadTestsFromTestCase(TestContainer) diff --git a/climada/entity/tag/__init__.py b/climada/entity/tag/__init__.py index 4d386fd09..374022317 100644 --- a/climada/entity/tag/__init__.py +++ b/climada/entity/tag/__init__.py @@ -30,9 +30,11 @@ # # @deprecated(details="This class is not supported anymore.") class Tag(_Tag): - """kept for backwards compatibility with climada <= 3.3 - """ - @deprecated(details="This class is not supported anymore and will be removed in the next" - " version of climada.") + """kept for backwards compatibility with climada <= 3.3""" + + @deprecated( + details="This class is not supported anymore and will be removed in the next" + " version of climada." + ) def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) diff --git a/climada/entity/tag/tag.py b/climada/entity/tag/tag.py index 4ec1a2bef..01f9db19c 100644 --- a/climada/entity/tag/tag.py +++ b/climada/entity/tag/tag.py @@ -18,13 +18,14 @@ Define Tag class. """ + from __future__ import annotations + from pathlib import Path -from typing import Union, List +from typing import List, Union import h5py - STR_DT = h5py.special_dtype(vlen=str) @@ -39,7 +40,7 @@ def _distinct_list_of_str(list_of_str: list, arg: Union[list, str, object]): return list_of_str -class Tag(): +class Tag: """Deprecated since climada 4.*. This class is only used for unpickling, e.g., when reading Exposures hdf5 data files that have been created with climada <=3.*. @@ -51,9 +52,11 @@ class Tag(): description of the data """ - def __init__(self, - file_name: Union[List[str], str] = None, - description: Union[List[str], str] = None): + def __init__( + self, + file_name: Union[List[str], str] = None, + description: Union[List[str], str] = None, + ): """Initialize values. Parameters @@ -71,7 +74,7 @@ def __getattribute__(self, name): # the attribute assignment there is not done neither via __init__ nor via __setattr__. # The outcome is e.g., a description of type str val = super().__getattribute__(name) - if name in ['file_name', 'description'] and not isinstance(val, list): + if name in ["file_name", "description"] and not isinstance(val, list): if not val: return [] return [str(val)] @@ -84,18 +87,19 @@ def append(self, tag: Tag): def join_file_names(self): """Get a string with the joined file names.""" - return ' + '.join([ - Path(single_name).stem - for single_name in self.file_name - ]) + return " + ".join([Path(single_name).stem for single_name in self.file_name]) def join_descriptions(self): """Get a string with the joined descriptions.""" - return ' + '.join(self.description) + return " + ".join(self.description) def __str__(self): - return ' File: ' + self.join_file_names() + \ - '\n Description: ' + self.join_descriptions() + return ( + " File: " + + self.join_file_names() + + "\n Description: " + + self.join_descriptions() + ) __repr__ = __str__ @@ -107,10 +111,14 @@ def to_hdf5(self, hf_data): hf_data : h5py.File will be updated during the call """ - hf_str = hf_data.create_dataset('file_name', (len(self.file_name),), dtype=STR_DT) + hf_str = hf_data.create_dataset( + "file_name", (len(self.file_name),), dtype=STR_DT + ) for i, name in enumerate(self.file_name): hf_str[i] = name - hf_str = hf_data.create_dataset('description', (len(self.description),), dtype=STR_DT) + hf_str = hf_data.create_dataset( + "description", (len(self.description),), dtype=STR_DT + ) for i, desc in enumerate(self.description): hf_str[i] = desc @@ -127,5 +135,6 @@ def from_hdf5(cls, hf_data): Tag """ return cls( - file_name=[x.decode() for x in hf_data.get('file_name')], - description=[x.decode() for x in hf_data.get('description')]) + file_name=[x.decode() for x in hf_data.get("file_name")], + description=[x.decode() for x in hf_data.get("description")], + ) diff --git a/climada/entity/tag/test/test_tag.py b/climada/entity/tag/test/test_tag.py index 8dc37590d..dfd1f454b 100644 --- a/climada/entity/tag/test/test_tag.py +++ b/climada/entity/tag/test/test_tag.py @@ -23,48 +23,53 @@ from climada.entity.tag import Tag + class TestAppend(unittest.TestCase): """Test loading funcions from the Hazard class""" def test_append_different_increase(self): """Appends an other tag correctly.""" - tag1 = Tag('file_name1.mat', 'dummy file 1') - self.assertEqual(['file_name1.mat'], tag1.file_name) - self.assertEqual(['dummy file 1'], tag1.description) + tag1 = Tag("file_name1.mat", "dummy file 1") + self.assertEqual(["file_name1.mat"], tag1.file_name) + self.assertEqual(["dummy file 1"], tag1.description) - tag2 = Tag('file_name2.mat', 'dummy file 2') + tag2 = Tag("file_name2.mat", "dummy file 2") tag1.append(tag2) - self.assertEqual(['file_name1.mat', 'file_name2.mat'], tag1.file_name) - self.assertEqual(['dummy file 1', 'dummy file 2'], tag1.description) - self.assertEqual(' File: file_name1 + file_name2\n' - ' Description: dummy file 1 + dummy file 2', str(tag1)) + self.assertEqual(["file_name1.mat", "file_name2.mat"], tag1.file_name) + self.assertEqual(["dummy file 1", "dummy file 2"], tag1.description) + self.assertEqual( + " File: file_name1 + file_name2\n" + " Description: dummy file 1 + dummy file 2", + str(tag1), + ) def test_append_equal_same(self): """Appends an other tag correctly.""" - tag1 = Tag('file_name1.mat', 'dummy file 1') - tag2 = Tag('file_name1.mat', 'dummy file 1') + tag1 = Tag("file_name1.mat", "dummy file 1") + tag2 = Tag("file_name1.mat", "dummy file 1") tag1.append(tag2) - self.assertEqual(['file_name1.mat'], tag1.file_name) - self.assertEqual(['dummy file 1'], tag1.description) + self.assertEqual(["file_name1.mat"], tag1.file_name) + self.assertEqual(["dummy file 1"], tag1.description) def test_append_empty(self): """Appends an other tag correctly.""" - tag1 = Tag('file_name1.mat', 'dummy file 1') + tag1 = Tag("file_name1.mat", "dummy file 1") tag2 = Tag() tag1.append(tag2) - self.assertEqual(['file_name1.mat'], tag1.file_name) - self.assertEqual(['dummy file 1'], tag1.description) + self.assertEqual(["file_name1.mat"], tag1.file_name) + self.assertEqual(["dummy file 1"], tag1.description) tag1 = Tag() - tag2 = Tag('file_name1.mat', 'dummy file 1') + tag2 = Tag("file_name1.mat", "dummy file 1") tag1.append(tag2) - self.assertEqual(['file_name1.mat'], tag1.file_name) - self.assertEqual(['dummy file 1'], tag1.description) + self.assertEqual(["file_name1.mat"], tag1.file_name) + self.assertEqual(["dummy file 1"], tag1.description) + # Execute Tests if __name__ == "__main__": diff --git a/climada/entity/test/test_entity.py b/climada/entity/test/test_entity.py index 46e712c57..7805a24e7 100644 --- a/climada/entity/test/test_entity.py +++ b/climada/entity/test/test_entity.py @@ -18,18 +18,20 @@ Test Entity class. """ + import unittest + import numpy as np from climada import CONFIG +from climada.entity.disc_rates.base import DiscRates from climada.entity.entity_def import Entity from climada.entity.exposures.base import Exposures -from climada.entity.disc_rates.base import DiscRates from climada.entity.impact_funcs.impact_func_set import ImpactFuncSet from climada.entity.measures.measure_set import MeasureSet from climada.util.constants import ENT_TEMPLATE_XLS -ENT_TEST_MAT = CONFIG.exposures.test_data.dir().joinpath('demo_today.mat') +ENT_TEST_MAT = CONFIG.exposures.test_data.dir().joinpath("demo_today.mat") class TestReader(unittest.TestCase): @@ -41,12 +43,12 @@ def test_default_pass(self): def_entity = Entity.from_excel(ENT_TEMPLATE_XLS) # Check default demo excel file has been loaded - self.assertEqual(len(def_entity.exposures.gdf['deductible']), 24) - self.assertEqual(def_entity.exposures.gdf['value'][2], 12596064143.542929) + self.assertEqual(len(def_entity.exposures.gdf["deductible"]), 24) + self.assertEqual(def_entity.exposures.gdf["value"][2], 12596064143.542929) - self.assertEqual(len(def_entity.impact_funcs.get_func('TC', 1).mdd), 25) + self.assertEqual(len(def_entity.impact_funcs.get_func("TC", 1).mdd), 25) - self.assertIn('risk transfer', def_entity.measures.get_names('TC')) + self.assertIn("risk transfer", def_entity.measures.get_names("TC")) self.assertEqual(def_entity.disc_rates.years[5], 2005) @@ -70,27 +72,27 @@ class TestCheck(unittest.TestCase): def test_wrongMeas_fail(self): """Wrong measures""" ent = Entity.from_excel(ENT_TEMPLATE_XLS) - actions = ent.measures.get_measure('TC') + actions = ent.measures.get_measure("TC") actions[0].color_rgb = np.array([1, 2]) with self.assertRaises(ValueError) as cm: ent.check() - self.assertIn('Measure.color_rgb', str(cm.exception)) + self.assertIn("Measure.color_rgb", str(cm.exception)) with self.assertRaises(ValueError) as cm: ent.measures = Exposures() - self.assertIn('MeasureSet', str(cm.exception)) + self.assertIn("MeasureSet", str(cm.exception)) def test_wrongImpFun_fail(self): """Wrong impact functions""" ent = Entity.from_excel(ENT_TEMPLATE_XLS) - ent.impact_funcs.get_func('TC', 1).paa = np.array([1, 2]) + ent.impact_funcs.get_func("TC", 1).paa = np.array([1, 2]) with self.assertRaises(ValueError) as cm: ent.check() - self.assertIn('ImpactFunc.paa', str(cm.exception)) + self.assertIn("ImpactFunc.paa", str(cm.exception)) with self.assertRaises(ValueError) as cm: ent.impact_funcs = Exposures() - self.assertIn('ImpactFuncSet', str(cm.exception)) + self.assertIn("ImpactFuncSet", str(cm.exception)) def test_wrongDisc_fail(self): """Wrong discount rates""" @@ -98,11 +100,11 @@ def test_wrongDisc_fail(self): ent.disc_rates.rates = np.array([1, 2]) with self.assertRaises(ValueError) as cm: ent.check() - self.assertIn('DiscRates.rates', str(cm.exception)) + self.assertIn("DiscRates.rates", str(cm.exception)) with self.assertRaises(ValueError) as cm: ent.disc_rates = Exposures() - self.assertIn('DiscRates', str(cm.exception)) + self.assertIn("DiscRates", str(cm.exception)) # Execute Tests diff --git a/climada/hazard/__init__.py b/climada/hazard/__init__.py index 8201c40fa..0231ec112 100755 --- a/climada/hazard/__init__.py +++ b/climada/hazard/__init__.py @@ -18,8 +18,9 @@ init hazard """ -from .centroids import * + from .base import * -from .trop_cyclone import * -from .tc_tracks import * +from .centroids import * from .storm_europe import * +from .tc_tracks import * +from .trop_cyclone import * diff --git a/climada/hazard/base.py b/climada/hazard/base.py index 877a22f2d..f8d379315 100644 --- a/climada/hazard/base.py +++ b/climada/hazard/base.py @@ -19,28 +19,27 @@ Define Hazard. """ -__all__ = ['Hazard'] +__all__ = ["Hazard"] import copy import datetime as dt import logging -from typing import Optional,List import warnings +from typing import List, Optional import geopandas as gpd import numpy as np from pathos.pools import ProcessPool as Pool from scipy import sparse -from climada import CONFIG -from climada.hazard.plot import HazardPlot -from climada.hazard.io import HazardIO -from climada.hazard.centroids.centr import Centroids import climada.util.checker as u_check import climada.util.constants as u_const import climada.util.coordinates as u_coord import climada.util.dates_times as u_dt - +from climada import CONFIG +from climada.hazard.centroids.centr import Centroids +from climada.hazard.io import HazardIO +from climada.hazard.plot import HazardPlot LOGGER = logging.getLogger(__name__) @@ -98,26 +97,24 @@ class Hazard(HazardIO, HazardPlot): If empty (all 0), it is ignored in the impact computations (i.e., is equivalent to fraction is 1 everywhere). """ + intensity_thres = 10 """Intensity threshold per hazard used to filter lower intensities. To be set for every hazard type""" - vars_oblig = {'units', - 'centroids', - 'event_id', - 'frequency', - 'intensity', - 'fraction' - } + vars_oblig = { + "units", + "centroids", + "event_id", + "frequency", + "intensity", + "fraction", + } """Name of the variables needed to compute the impact. Types: scalar, str, list, 1dim np.array of size num_events, scipy.sparse matrix of shape num_events x num_centroids, Centroids.""" - vars_def = {'date', - 'orig', - 'event_name', - 'frequency_unit' - } + vars_def = {"date", "orig", "event_name", "frequency_unit"} """Name of the variables used in impact calculation whose value is descriptive and can therefore be set with default values. Types: scalar, string, list, 1dim np.array of size num_events. @@ -127,19 +124,21 @@ class Hazard(HazardIO, HazardPlot): """Name of the variables that aren't need to compute the impact. Types: scalar, string, list, 1dim np.array of size num_events.""" - def __init__(self, - haz_type: str = "", - pool: Optional[Pool] = None, - units: str = "", - centroids: Optional[Centroids] = None, - event_id: Optional[np.ndarray] = None, - frequency: Optional[np.ndarray] = None, - frequency_unit: str = u_const.DEF_FREQ_UNIT, - event_name: Optional[List[str]] = None, - date: Optional[np.ndarray] = None, - orig: Optional[np.ndarray] = None, - intensity: Optional[sparse.csr_matrix] = None, - fraction: Optional[sparse.csr_matrix] = None): + def __init__( + self, + haz_type: str = "", + pool: Optional[Pool] = None, + units: str = "", + centroids: Optional[Centroids] = None, + event_id: Optional[np.ndarray] = None, + frequency: Optional[np.ndarray] = None, + frequency_unit: str = u_const.DEF_FREQ_UNIT, + event_name: Optional[List[str]] = None, + date: Optional[np.ndarray] = None, + orig: Optional[np.ndarray] = None, + intensity: Optional[sparse.csr_matrix] = None, + fraction: Optional[sparse.csr_matrix] = None, + ): """ Initialize values. @@ -187,25 +186,31 @@ def __init__(self, """ self.haz_type = haz_type self.units = units - self.centroids = centroids if centroids is not None else Centroids( - lat=np.empty(0), lon=np.empty(0)) + self.centroids = ( + centroids + if centroids is not None + else Centroids(lat=np.empty(0), lon=np.empty(0)) + ) # following values are defined for each event self.event_id = event_id if event_id is not None else np.array([], int) - self.frequency = frequency if frequency is not None else np.array( - [], float) + self.frequency = frequency if frequency is not None else np.array([], float) self.frequency_unit = frequency_unit self.event_name = event_name if event_name is not None else list() self.date = date if date is not None else np.array([], int) self.orig = orig if orig is not None else np.array([], bool) # following values are defined for each event and centroid - self.intensity = intensity if intensity is not None else sparse.csr_matrix( - np.empty((0, 0))) # events x centroids - self.fraction = fraction if fraction is not None else sparse.csr_matrix( - self.intensity.shape) # events x centroids + self.intensity = ( + intensity if intensity is not None else sparse.csr_matrix(np.empty((0, 0))) + ) # events x centroids + self.fraction = ( + fraction + if fraction is not None + else sparse.csr_matrix(self.intensity.shape) + ) # events x centroids self.pool = pool if self.pool: - LOGGER.info('Using %s CPUs.', self.pool.ncpus) + LOGGER.info("Using %s CPUs.", self.pool.ncpus) def check_matrices(self): """Ensure that matrices are consistently shaped and stored @@ -248,7 +253,7 @@ def get_default(cls, attribute): Any """ return { - 'frequency_unit': u_const.DEF_FREQ_UNIT, + "frequency_unit": u_const.DEF_FREQ_UNIT, }.get(attribute) def check(self): @@ -271,8 +276,16 @@ def reproject_vector(self, dst_crs): self.centroids.gdf.to_crs(dst_crs, inplace=True) self.check() - def select(self, event_names=None, event_id=None, date=None, orig=None, - reg_id=None, extent=None, reset_frequency=False): + def select( + self, + event_names=None, + event_id=None, + date=None, + orig=None, + reg_id=None, + extent=None, + reset_frequency=False, + ): """Select events matching provided criteria The frequency of events may need to be recomputed (see `reset_frequency`)! @@ -308,7 +321,7 @@ def select(self, event_names=None, event_id=None, date=None, orig=None, else: haz = self.__class__() - #filter events + # filter events sel_ev = np.ones(self.event_id.size, dtype=bool) # filter events by date @@ -319,14 +332,14 @@ def select(self, event_names=None, event_id=None, date=None, orig=None, date_end = u_dt.str_to_date(date[1]) sel_ev &= (date_ini <= self.date) & (self.date <= date_end) if not np.any(sel_ev): - LOGGER.info('No hazard in date range %s.', date) + LOGGER.info("No hazard in date range %s.", date) return None # filter events hist/synthetic if orig is not None: - sel_ev &= (self.orig.astype(bool) == orig) + sel_ev &= self.orig.astype(bool) == orig if not np.any(sel_ev): - LOGGER.info('No hazard with %s original events.', str(orig)) + LOGGER.info("No hazard with %s original events.", str(orig)) return None # filter events based on name @@ -337,38 +350,43 @@ def select(self, event_names=None, event_id=None, date=None, orig=None, new_sel = [filtered_events.index(n) for n in event_names] except ValueError as err: name = str(err).replace(" is not in list", "") - LOGGER.info('No hazard with name %s', name) + LOGGER.info("No hazard with name %s", name) return None sel_ev = sel_ev[new_sel] # filter events based on id if event_id is not None: # preserves order of event_id - sel_ev = np.array([ - np.argwhere(self.event_id == n)[0,0] - for n in event_id - if n in self.event_id[sel_ev] - ]) + sel_ev = np.array( + [ + np.argwhere(self.event_id == n)[0, 0] + for n in event_id + if n in self.event_id[sel_ev] + ] + ) # filter centroids sel_cen = self.centroids.select_mask(reg_id=reg_id, extent=extent) if not np.any(sel_cen): - LOGGER.info('No hazard centroids within extent and region') + LOGGER.info("No hazard centroids within extent and region") return None # Sanitize fraction, because we check non-zero entries later self.fraction.eliminate_zeros() # Perform attribute selection - for (var_name, var_val) in self.__dict__.items(): - if isinstance(var_val, np.ndarray) and var_val.ndim == 1 \ - and var_val.size > 0: + for var_name, var_val in self.__dict__.items(): + if ( + isinstance(var_val, np.ndarray) + and var_val.ndim == 1 + and var_val.size > 0 + ): setattr(haz, var_name, var_val[sel_ev]) elif isinstance(var_val, sparse.csr_matrix): setattr(haz, var_name, var_val[sel_ev, :][:, sel_cen]) elif isinstance(var_val, list) and var_val: setattr(haz, var_name, [var_val[idx] for idx in sel_ev]) - elif var_name == 'centroids': + elif var_name == "centroids": if reg_id is None and extent is None: new_cent = var_val else: @@ -379,15 +397,28 @@ def select(self, event_names=None, event_id=None, date=None, orig=None, # reset frequency if date span has changed (optional): if reset_frequency: - if self.frequency_unit not in ['1/year', 'annual', '1/y', '1/a']: - LOGGER.warning("Resetting the frequency is based on the calendar year of given" + if self.frequency_unit not in ["1/year", "annual", "1/y", "1/a"]: + LOGGER.warning( + "Resetting the frequency is based on the calendar year of given" " dates but the frequency unit here is %s. Consider setting the frequency" " manually for the selection or changing the frequency unit to %s.", - self.frequency_unit, u_const.DEF_FREQ_UNIT) - year_span_old = np.abs(dt.datetime.fromordinal(self.date.max()).year - - dt.datetime.fromordinal(self.date.min()).year) + 1 - year_span_new = np.abs(dt.datetime.fromordinal(haz.date.max()).year - - dt.datetime.fromordinal(haz.date.min()).year) + 1 + self.frequency_unit, + u_const.DEF_FREQ_UNIT, + ) + year_span_old = ( + np.abs( + dt.datetime.fromordinal(self.date.max()).year + - dt.datetime.fromordinal(self.date.min()).year + ) + + 1 + ) + year_span_new = ( + np.abs( + dt.datetime.fromordinal(haz.date.max()).year + - dt.datetime.fromordinal(haz.date.min()).year + ) + + 1 + ) haz.frequency = haz.frequency * year_span_old / year_span_new # Check if new fraction is zero everywhere @@ -405,8 +436,11 @@ def select(self, event_names=None, event_id=None, date=None, orig=None, haz.sanitize_event_ids() return haz - def select_tight(self, buffer=u_coord.NEAREST_NEIGHBOR_THRESHOLD / u_const.ONE_LAT_KM, - val='intensity'): + def select_tight( + self, + buffer=u_coord.NEAREST_NEIGHBOR_THRESHOLD / u_const.ONE_LAT_KM, + val="intensity", + ): """ Reduce hazard to those centroids spanning a minimal box which contains all non-zero intensity or fraction points. @@ -435,15 +469,17 @@ def select_tight(self, buffer=u_coord.NEAREST_NEIGHBOR_THRESHOLD / u_const.ONE_L """ - if val == 'intensity': + if val == "intensity": cent_nz = (self.intensity != 0).sum(axis=0).nonzero()[1] - if val == 'fraction': + if val == "fraction": cent_nz = (self.fraction != 0).sum(axis=0).nonzero()[1] lon_nz = self.centroids.lon[cent_nz] lat_nz = self.centroids.lat[cent_nz] - return self.select(extent=u_coord.toggle_extent_bounds( - u_coord.latlon_bounds(lat=lat_nz, lon=lon_nz, buffer=buffer) - )) + return self.select( + extent=u_coord.toggle_extent_bounds( + u_coord.latlon_bounds(lat=lat_nz, lon=lon_nz, buffer=buffer) + ) + ) def local_exceedance_inten(self, return_periods=(25, 50, 100, 250)): """Compute exceedance intensity map for given return periods. @@ -460,40 +496,49 @@ def local_exceedance_inten(self, return_periods=(25, 50, 100, 250)): # warn if return period is above return period of rarest event: for period in return_periods: if period > 1 / self.frequency.min(): - LOGGER.warning('Return period %1.1f exceeds max. event return period.', period) - LOGGER.info('Computing exceedance intenstiy map for return periods: %s', - return_periods) + LOGGER.warning( + "Return period %1.1f exceeds max. event return period.", period + ) + LOGGER.info( + "Computing exceedance intenstiy map for return periods: %s", return_periods + ) num_cen = self.intensity.shape[1] inten_stats = np.zeros((len(return_periods), num_cen)) cen_step = CONFIG.max_matrix_size.int() // self.intensity.shape[0] if not cen_step: - raise ValueError('Increase max_matrix_size configuration parameter to >' - f' {self.intensity.shape[0]}') + raise ValueError( + "Increase max_matrix_size configuration parameter to >" + f" {self.intensity.shape[0]}" + ) # separte in chunks chk = -1 for chk in range(int(num_cen / cen_step)): self._loc_return_inten( np.array(return_periods), - self.intensity[:, chk * cen_step:(chk + 1) * cen_step].toarray(), - inten_stats[:, chk * cen_step:(chk + 1) * cen_step]) + self.intensity[:, chk * cen_step : (chk + 1) * cen_step].toarray(), + inten_stats[:, chk * cen_step : (chk + 1) * cen_step], + ) self._loc_return_inten( np.array(return_periods), - self.intensity[:, (chk + 1) * cen_step:].toarray(), - inten_stats[:, (chk + 1) * cen_step:]) + self.intensity[:, (chk + 1) * cen_step :].toarray(), + inten_stats[:, (chk + 1) * cen_step :], + ) # set values below 0 to zero if minimum of hazard.intensity >= 0: if np.min(inten_stats) < 0 <= self.intensity.min(): - LOGGER.warning('Exceedance intenstiy values below 0 are set to 0. \ - Reason: no negative intensity values were found in hazard.') + LOGGER.warning( + "Exceedance intenstiy values below 0 are set to 0. \ + Reason: no negative intensity values were found in hazard." + ) inten_stats[inten_stats < 0] = 0 return inten_stats def sanitize_event_ids(self): """Make sure that event ids are unique""" if np.unique(self.event_id).size != self.event_id.size: - LOGGER.debug('Resetting event_id.') + LOGGER.debug("Resetting event_id.") self.event_id = np.arange(1, self.event_id.size + 1) - def local_return_period(self, threshold_intensities=(5., 10., 20.)): + def local_return_period(self, threshold_intensities=(5.0, 10.0, 20.0)): """Compute local return periods for given hazard intensities. The used method is fitting the ordered intensitites per centroid to the corresponding cummulated frequency with a step function. @@ -516,17 +561,20 @@ def local_return_period(self, threshold_intensities=(5., 10., 20.)): column_label : function Column-label-generating function, for reporting and plotting """ - #check frequency unit - if self.frequency_unit in ['1/year', 'annual', '1/y', '1/a']: - rp_unit = 'Years' - elif self.frequency_unit in ['1/month', 'monthly', '1/m']: - rp_unit = 'Months' - elif self.frequency_unit in ['1/week', 'weekly', '1/w']: - rp_unit = 'Weeks' + # check frequency unit + if self.frequency_unit in ["1/year", "annual", "1/y", "1/a"]: + rp_unit = "Years" + elif self.frequency_unit in ["1/month", "monthly", "1/m"]: + rp_unit = "Months" + elif self.frequency_unit in ["1/week", "weekly", "1/w"]: + rp_unit = "Weeks" else: - LOGGER.warning("Hazard's frequency unit %s is not known, " - "years will be used as return period unit.", self.frequency_unit) - rp_unit = 'Years' + LOGGER.warning( + "Hazard's frequency unit %s is not known, " + "years will be used as return period unit.", + self.frequency_unit, + ) + rp_unit = "Years" # Ensure threshold_intensities is a numpy array threshold_intensities = np.array(threshold_intensities) @@ -534,31 +582,34 @@ def local_return_period(self, threshold_intensities=(5., 10., 20.)): num_cen = self.intensity.shape[1] return_periods = np.zeros((len(threshold_intensities), num_cen)) - # batch_centroids = number of centroids that are handled in parallel: + # batch_centroids = number of centroids that are handled in parallel: # batch_centroids = maximal matrix size // number of events batch_centroids = CONFIG.max_matrix_size.int() // self.intensity.shape[0] if batch_centroids < 1: - raise ValueError('Increase max_matrix_size configuration parameter to >' - f'{self.intensity.shape[0]}') + raise ValueError( + "Increase max_matrix_size configuration parameter to >" + f"{self.intensity.shape[0]}" + ) # Process the intensities in chunks of centroids for start_col in range(0, num_cen, batch_centroids): end_col = min(start_col + batch_centroids, num_cen) return_periods[:, start_col:end_col] = self._loc_return_period( - threshold_intensities, - self.intensity[:, start_col:end_col].toarray() - ) + threshold_intensities, self.intensity[:, start_col:end_col].toarray() + ) # create the output GeoDataFrame - gdf = gpd.GeoDataFrame(geometry = self.centroids.gdf['geometry'], - crs = self.centroids.gdf.crs) - col_names = [f'{tresh_inten}' for tresh_inten in threshold_intensities] + gdf = gpd.GeoDataFrame( + geometry=self.centroids.gdf["geometry"], crs=self.centroids.gdf.crs + ) + col_names = [f"{tresh_inten}" for tresh_inten in threshold_intensities] gdf[col_names] = return_periods.T # create label and column_label - label = f'Return Periods ({rp_unit})' - column_label = lambda column_names: [f'Threshold Intensity: {col} {self.units}' - for col in column_names] + label = f"Return Periods ({rp_unit})" + column_label = lambda column_names: [ + f"Threshold Intensity: {col} {self.units}" for col in column_names + ] return gdf, label, column_label @@ -575,8 +626,13 @@ def get_event_id(self, event_name): ------- list_id: np.array(int) """ - list_id = self.event_id[[i_name for i_name, val_name in enumerate(self.event_name) - if val_name == event_name]] + list_id = self.event_id[ + [ + i_name + for i_name, val_name in enumerate(self.event_name) + if val_name == event_name + ] + ] if list_id.size == 0: raise ValueError(f"No event with name: {event_name}") return list_id @@ -598,8 +654,7 @@ def get_event_name(self, event_id): ValueError """ try: - return self.event_name[np.argwhere( - self.event_id == event_id)[0][0]] + return self.event_name[np.argwhere(self.event_id == event_id)[0][0]] except IndexError as err: raise ValueError(f"No event with id: {event_id}") from err @@ -622,7 +677,8 @@ def get_event_date(self, event=None): ev_ids = self.get_event_id(event) l_dates = [ u_dt.date_to_str(self.date[np.argwhere(self.event_id == ev_id)[0][0]]) - for ev_id in ev_ids] + for ev_id in ev_ids + ] else: ev_idx = np.argwhere(self.event_id == event)[0][0] l_dates = [u_dt.date_to_str(self.date[ev_idx])] @@ -637,8 +693,9 @@ def calc_year_set(self): key are years, values array with event_ids of that year """ - orig_year = np.array([dt.datetime.fromordinal(date).year - for date in self.date[self.orig]]) + orig_year = np.array( + [dt.datetime.fromordinal(date).year for date in self.date[self.orig]] + ) orig_yearset = {} for year in np.unique(orig_year): orig_yearset[year] = self.event_id[self.orig][orig_year == year] @@ -669,13 +726,19 @@ def set_frequency(self, yearrange=None): per event. If yearrange is not given (None), the year range is derived from self.date """ - if self.frequency_unit not in ['1/year', 'annual', '1/y', '1/a']: - LOGGER.warning("setting the frequency on a hazard object who's frequency unit" + if self.frequency_unit not in ["1/year", "annual", "1/y", "1/a"]: + LOGGER.warning( + "setting the frequency on a hazard object who's frequency unit" "is %s and not %s will most likely lead to unexpected results", - self.frequency_unit, u_const.DEF_FREQ_UNIT) + self.frequency_unit, + u_const.DEF_FREQ_UNIT, + ) if not yearrange: - delta_time = dt.datetime.fromordinal(int(np.max(self.date))).year - \ - dt.datetime.fromordinal(int(np.min(self.date))).year + 1 + delta_time = ( + dt.datetime.fromordinal(int(np.max(self.date))).year + - dt.datetime.fromordinal(int(np.min(self.date))).year + + 1 + ) else: delta_time = max(yearrange) - min(yearrange) + 1 num_orig = self.orig.nonzero()[0].size @@ -723,8 +786,11 @@ def _loc_return_inten(self, return_periods, inten, exc_inten): for cen_idx in range(inten.shape[1]): exc_inten[:, cen_idx] = self._cen_return_inten( - inten_sort[:, cen_idx], freq_sort[:, cen_idx], - self.intensity_thres, return_periods) + inten_sort[:, cen_idx], + freq_sort[:, cen_idx], + self.intensity_thres, + return_periods, + ) def _loc_return_period(self, threshold_intensities, inten): """Compute local return periods for user-specified threshold intensities @@ -757,7 +823,9 @@ def _loc_return_period(self, threshold_intensities, inten): for i, intensity in enumerate(threshold_intensities): # Find the first occurrence where the intensity is less than the sorted intensities - exceedance_index = np.searchsorted(sorted_inten_cen[::-1], intensity, side='left') + exceedance_index = np.searchsorted( + sorted_inten_cen[::-1], intensity, side="left" + ) # Calculate exceedance probability if exceedance_index < len(cum_freq_cen): @@ -786,15 +854,19 @@ def _check_events(self): if np.unique(self.event_id).size != num_ev: raise ValueError("There are events with the same identifier.") - u_check.check_obligatories(self.__dict__, self.vars_oblig, 'Hazard.', - num_ev, num_ev, num_cen) - u_check.check_optionals(self.__dict__, self.vars_opt, 'Hazard.', num_ev) - self.event_name = u_check.array_default(num_ev, self.event_name, - 'Hazard.event_name', list(self.event_id)) - self.date = u_check.array_default(num_ev, self.date, 'Hazard.date', - np.ones(self.event_id.shape, dtype=int)) - self.orig = u_check.array_default(num_ev, self.orig, 'Hazard.orig', - np.zeros(self.event_id.shape, dtype=bool)) + u_check.check_obligatories( + self.__dict__, self.vars_oblig, "Hazard.", num_ev, num_ev, num_cen + ) + u_check.check_optionals(self.__dict__, self.vars_opt, "Hazard.", num_ev) + self.event_name = u_check.array_default( + num_ev, self.event_name, "Hazard.event_name", list(self.event_id) + ) + self.date = u_check.array_default( + num_ev, self.date, "Hazard.date", np.ones(self.event_id.shape, dtype=int) + ) + self.orig = u_check.array_default( + num_ev, self.orig, "Hazard.orig", np.zeros(self.event_id.shape, dtype=bool) + ) if len(self._events_set()) != num_ev: raise ValueError("There are events with same date and name.") @@ -831,7 +903,7 @@ def _cen_return_inten(inten, freq, inten_th, return_periods): pol_coef = np.polyfit(np.log(freq_cen), inten_cen, deg=0) inten_fit = np.polyval(pol_coef, np.log(1 / return_periods)) wrong_inten = (return_periods > np.max(1 / freq_cen)) & np.isnan(inten_fit) - inten_fit[wrong_inten] = 0. + inten_fit[wrong_inten] = 0.0 return inten_fit @@ -881,36 +953,46 @@ def append(self, *others): haz._check_events() # check type, unit, and attribute consistency among hazards - haz_types = {haz.haz_type for haz in haz_list if haz.haz_type != ''} + haz_types = {haz.haz_type for haz in haz_list if haz.haz_type != ""} if len(haz_types) > 1: - raise ValueError(f"The given hazards are of different types: {haz_types}. " - "The hazards are incompatible and cannot be concatenated.") + raise ValueError( + f"The given hazards are of different types: {haz_types}. " + "The hazards are incompatible and cannot be concatenated." + ) self.haz_type = haz_types.pop() haz_classes = {type(haz) for haz in haz_list} if len(haz_classes) > 1: - raise TypeError(f"The given hazards are of different classes: {haz_classes}. " - "The hazards are incompatible and cannot be concatenated.") + raise TypeError( + f"The given hazards are of different classes: {haz_classes}. " + "The hazards are incompatible and cannot be concatenated." + ) freq_units = {haz.frequency_unit for haz in haz_list} if len(freq_units) > 1: - raise ValueError(f"The given hazards have different frequency units: {freq_units}. " - "The hazards are incompatible and cannot be concatenated.") + raise ValueError( + f"The given hazards have different frequency units: {freq_units}. " + "The hazards are incompatible and cannot be concatenated." + ) self.frequency_unit = freq_units.pop() - units = {haz.units for haz in haz_list if haz.units != ''} + units = {haz.units for haz in haz_list if haz.units != ""} if len(units) > 1: - raise ValueError(f"The given hazards use different units: {units}. " - "The hazards are incompatible and cannot be concatenated.") + raise ValueError( + f"The given hazards use different units: {units}. " + "The hazards are incompatible and cannot be concatenated." + ) if len(units) == 0: - units = {''} + units = {""} self.units = units.pop() attributes = sorted(set.union(*[set(vars(haz).keys()) for haz in haz_list])) for attr_name in attributes: if not all(hasattr(haz, attr_name) for haz in haz_list_nonempty): - raise ValueError(f"Attribute {attr_name} is not shared by all hazards. " - "The hazards are incompatible and cannot be concatenated.") + raise ValueError( + f"Attribute {attr_name} is not shared by all hazards. " + "The hazards are incompatible and cannot be concatenated." + ) # map individual centroids objects to union centroids = Centroids.union(*[haz.centroids for haz in haz_list]) @@ -924,14 +1006,25 @@ def append(self, *others): attr_val_list = [getattr(haz, attr_name) for haz in haz_list_nonempty] if isinstance(attr_val_list[0], sparse.csr_matrix): # map sparse matrix onto centroids - setattr(self, attr_name, sparse.vstack([ - sparse.csr_matrix( - (matrix.data, cent_idx[matrix.indices], matrix.indptr), - shape=(matrix.shape[0], centroids.size) - ) - for matrix, cent_idx in zip(attr_val_list, hazcent_in_cent_idx_list) - ], format='csr')) - elif isinstance(attr_val_list[0], np.ndarray) and attr_val_list[0].ndim == 1: + setattr( + self, + attr_name, + sparse.vstack( + [ + sparse.csr_matrix( + (matrix.data, cent_idx[matrix.indices], matrix.indptr), + shape=(matrix.shape[0], centroids.size), + ) + for matrix, cent_idx in zip( + attr_val_list, hazcent_in_cent_idx_list + ) + ], + format="csr", + ), + ) + elif ( + isinstance(attr_val_list[0], np.ndarray) and attr_val_list[0].ndim == 1 + ): setattr(self, attr_name, np.hstack(attr_val_list)) elif isinstance(attr_val_list[0], list): setattr(self, attr_name, sum(attr_val_list, [])) @@ -974,13 +1067,16 @@ def concat(cls, haz_list): """ if len(haz_list) == 0: return cls() - haz_concat = haz_list[0].__class__(centroids=Centroids(lat=[], lon=[], - crs=haz_list[0].centroids.crs)) + haz_concat = haz_list[0].__class__( + centroids=Centroids(lat=[], lon=[], crs=haz_list[0].centroids.crs) + ) for attr_name, attr_val in vars(haz_list[0]).items(): # to save memory, only copy simple attributes like # "units" that are not explicitly handled by Hazard.append - if not (isinstance(attr_val, (list, np.ndarray, sparse.csr_matrix)) - or attr_name in ["centroids"]): + if not ( + isinstance(attr_val, (list, np.ndarray, sparse.csr_matrix)) + or attr_name in ["centroids"] + ): setattr(haz_concat, attr_name, copy.deepcopy(attr_val)) haz_concat.append(*haz_list) return haz_concat @@ -1025,7 +1121,6 @@ def change_centroids(self, centroids, threshold=u_coord.NEAREST_NEIGHBOR_THRESHO haz_new_cent = copy.deepcopy(self) haz_new_cent.centroids = centroids - new_cent_idx = u_coord.match_coordinates( self.centroids.coord, centroids.coord, threshold=threshold ) @@ -1046,11 +1141,14 @@ def change_centroids(self, centroids, threshold=u_coord.NEAREST_NEIGHBOR_THRESHO # re-assign attributes intensity and fraction for attr_name in ["intensity", "fraction"]: matrix = getattr(self, attr_name) - setattr(haz_new_cent, attr_name, - sparse.csr_matrix( - (matrix.data, new_cent_idx[matrix.indices], matrix.indptr), - shape=(matrix.shape[0], centroids.size) - )) + setattr( + haz_new_cent, + attr_name, + sparse.csr_matrix( + (matrix.data, new_cent_idx[matrix.indices], matrix.indptr), + shape=(matrix.shape[0], centroids.size), + ), + ) return haz_new_cent @@ -1066,7 +1164,10 @@ def centr_exp_col(self): in an exposures gdf. E.g. "centr_TC" """ - from climada.entity.exposures import INDICATOR_CENTR # pylint: disable=import-outside-toplevel + from climada.entity.exposures import ( + INDICATOR_CENTR, # pylint: disable=import-outside-toplevel + ) + # import outside toplevel is necessary for it not being circular return INDICATOR_CENTR + self.haz_type @@ -1097,10 +1198,12 @@ def get_mdr(self, cent_idx, impf): if impf.calc_mdr(0) == 0: mdr.data = impf.calc_mdr(mdr.data) else: - LOGGER.warning("Impact function id=%d has mdr(0) != 0." + LOGGER.warning( + "Impact function id=%d has mdr(0) != 0." "The mean damage ratio must thus be computed for all values of" "hazard intensity including 0 which can be very time consuming.", - impf.id) + impf.id, + ) mdr_array = impf.calc_mdr(mdr.toarray().ravel()).reshape(mdr.shape) mdr = sparse.csr_matrix(mdr_array) mdr_out = mdr[:, indices] diff --git a/climada/hazard/centroids/__init__.py b/climada/hazard/centroids/__init__.py index f746df302..530f47958 100755 --- a/climada/hazard/centroids/__init__.py +++ b/climada/hazard/centroids/__init__.py @@ -18,4 +18,5 @@ init centroids """ + from .centr import * diff --git a/climada/hazard/centroids/centr.py b/climada/hazard/centroids/centr.py index df57fbdc3..c1e8bb68b 100644 --- a/climada/hazard/centroids/centr.py +++ b/climada/hazard/centroids/centr.py @@ -20,43 +20,38 @@ """ import copy -from deprecation import deprecated import logging +import warnings from pathlib import Path from typing import Any, Literal, Union -import warnings -import h5py import cartopy import cartopy.crs as ccrs import cartopy.feature as cfeature import geopandas as gpd +import h5py import matplotlib.pyplot as plt import numpy as np import pandas as pd -from pyproj.crs.crs import CRS import rasterio +from deprecation import deprecated +from pyproj.crs.crs import CRS from shapely.geometry.point import Point -from climada.util.constants import DEF_CRS import climada.util.coordinates as u_coord +from climada.util.constants import DEF_CRS -__all__ = ['Centroids'] +__all__ = ["Centroids"] -PROJ_CEA = CRS.from_user_input({'proj': 'cea'}) +PROJ_CEA = CRS.from_user_input({"proj": "cea"}) LOGGER = logging.getLogger(__name__) -DEF_SHEET_NAME = 'centroids' -EXP_SPECIFIC_COLS = [ - 'value', - 'impf_', - 'centr_', - 'cover', - 'deductible' -] +DEF_SHEET_NAME = "centroids" +EXP_SPECIFIC_COLS = ["value", "impf_", "centr_", "cover", "deductible"] + -class Centroids(): +class Centroids: """Contains vector centroids as a GeoDataFrame Attributes @@ -106,52 +101,52 @@ def __init__( self.gdf = gpd.GeoDataFrame( data={ - 'geometry': gpd.points_from_xy(lon, lat, crs=crs), - 'region_id': region_id, - 'on_land': on_land, + "geometry": gpd.points_from_xy(lon, lat, crs=crs), + "region_id": region_id, + "on_land": on_land, **kwargs, } ) if isinstance(region_id, str): - LOGGER.info('Setting region id to %s level.', region_id) + LOGGER.info("Setting region id to %s level.", region_id) self.set_region_id(level=region_id, overwrite=True) if isinstance(on_land, str): - LOGGER.info('Setting on land from %s source.', on_land) + LOGGER.info("Setting on land from %s source.", on_land) self.set_on_land(source=on_land, overwrite=True) @property def lat(self): - """ Return latitudes """ + """Return latitudes""" return self.gdf.geometry.y.values @property def lon(self): - """ Return longitudes """ + """Return longitudes""" return self.gdf.geometry.x.values @property def geometry(self): - """ Return the geometry """ - return self.gdf['geometry'] + """Return the geometry""" + return self.gdf["geometry"] @property def on_land(self): - """ Get the on_land property """ - if self.gdf['on_land'].isna().all(): + """Get the on_land property""" + if self.gdf["on_land"].isna().all(): return None - return self.gdf['on_land'].values + return self.gdf["on_land"].values @property def region_id(self): - """ Get the assigned region_id """ - if self.gdf['region_id'].isna().all(): + """Get the assigned region_id""" + if self.gdf["region_id"].isna().all(): return None - return self.gdf['region_id'].values + return self.gdf["region_id"].values @property def crs(self): - """ Get the crs""" + """Get the crs""" return self.gdf.crs @property @@ -175,7 +170,7 @@ def coord(self): return np.stack([self.lat, self.lon], axis=1) def __eq__(self, other): - """ dunder method for Centroids comparison. + """dunder method for Centroids comparison. returns True if two centroids equal, False otherwise Parameters @@ -214,7 +209,7 @@ def to_default_crs(self, inplace=True): return self.to_crs(DEF_CRS, inplace=inplace) def to_crs(self, crs, inplace=False): - """ Project the current centroids to the desired crs + """Project the current centroids to the desired crs Parameters ---------- @@ -253,15 +248,15 @@ def from_geodataframe(cls, gdf): ------ ValueError """ - if (gdf.geom_type != 'Point').any(): + if (gdf.geom_type != "Point").any(): raise ValueError( - 'The inpute geodataframe contains geometries that are not points.' + "The inpute geodataframe contains geometries that are not points." ) # Don't forget to make a copy!! # This is a bit ugly, but avoids to recompute the geometries # in the init. For large datasets this saves computation time - centroids = cls(lat=[1], lon=[1]) #make "empty" centroids + centroids = cls(lat=[1], lon=[1]) # make "empty" centroids centroids.gdf = gdf.copy(deep=True) if gdf.crs is None: centroids.gdf.set_crs(DEF_CRS, inplace=True) @@ -290,22 +285,23 @@ def from_exposures(cls, exposures): ValueError """ col_names = [ - column for column in exposures.gdf.columns + column + for column in exposures.gdf.columns if not any(pattern in column for pattern in EXP_SPECIFIC_COLS) ] # Legacy behaviour # Exposures can be without geometry column - #TODO: remove once exposures is real geodataframe with geometry. - if 'geometry' in exposures.gdf.columns: + # TODO: remove once exposures is real geodataframe with geometry. + if "geometry" in exposures.gdf.columns: gdf = exposures.gdf[col_names] return cls.from_geodataframe(gdf) - if 'latitude' in exposures.gdf.columns and 'longitude' in exposures.gdf.columns: + if "latitude" in exposures.gdf.columns and "longitude" in exposures.gdf.columns: gdf = exposures.gdf[col_names] return cls( - lat=exposures.gdf['latitude'], - lon=exposures.gdf['longitude'], + lat=exposures.gdf["latitude"], + lon=exposures.gdf["longitude"], crs=exposures.crs, **dict(gdf.items()), ) @@ -337,13 +333,17 @@ def from_pnt_bounds(cls, points_bounds, res, crs=DEF_CRS): ------- Centroids """ - height, width, transform = u_coord.pts_to_raster_meta(points_bounds, (res, -res)) - return cls.from_meta({ - "crs": crs, - "width": width, - "height": height, - "transform": transform, - }) + height, width, transform = u_coord.pts_to_raster_meta( + points_bounds, (res, -res) + ) + return cls.from_meta( + { + "crs": crs, + "width": width, + "height": height, + "transform": transform, + } + ) def append(self, centr): """Append Centroids @@ -435,15 +435,16 @@ def select(self, reg_id=None, extent=None, sel_cen=None): Sub-selection of this object """ sel_cen_bool = sel_cen - if sel_cen is not None and sel_cen.dtype.kind == 'i': + if sel_cen is not None and sel_cen.dtype.kind == "i": # if needed, convert indices to bool sel_cen_bool = np.zeros(self.size, dtype=bool) sel_cen_bool[np.unique(sel_cen)] = True - sel_cen_mask = self.select_mask(sel_cen=sel_cen_bool, reg_id=reg_id, extent=extent) + sel_cen_mask = self.select_mask( + sel_cen=sel_cen_bool, reg_id=reg_id, extent=extent + ) return Centroids.from_geodataframe(self.gdf.iloc[sel_cen_mask]) - def select_mask(self, sel_cen=None, reg_id=None, extent=None): """Create mask of selected centroids @@ -473,10 +474,13 @@ def select_mask(self, sel_cen=None, reg_id=None, extent=None): lon_min, lon_max, lat_min, lat_max = extent lon_max += 360 if lon_min > lon_max else 0 lon_normalized = u_coord.lon_normalize( - self.lon.copy(), center=0.5 * (lon_min + lon_max)) + self.lon.copy(), center=0.5 * (lon_min + lon_max) + ) sel_cen &= ( - (lon_normalized >= lon_min) & (lon_normalized <= lon_max) & - (self.lat >= lat_min) & (self.lat <= lat_max) + (lon_normalized >= lon_min) + & (lon_normalized <= lon_max) + & (self.lat >= lat_min) + & (self.lat <= lat_max) ) return sel_cen @@ -500,7 +504,9 @@ def plot(self, *, axis=None, figsize=(9, 13), **kwargs): ax : cartopy.mpl.geoaxes.GeoAxes instance """ if axis == None: - fig, axis = plt.subplots(figsize=figsize, subplot_kw={"projection": ccrs.PlateCarree()}) + fig, axis = plt.subplots( + figsize=figsize, subplot_kw={"projection": ccrs.PlateCarree()} + ) if type(axis) != cartopy.mpl.geoaxes.GeoAxes: raise AttributeError( f"The axis provided is of type: {type(axis)} " @@ -518,7 +524,7 @@ def plot(self, *, axis=None, figsize=(9, 13), **kwargs): self.gdf.plot(ax=axis, transform=ccrs.PlateCarree(), **kwargs) return axis - def set_region_id(self, level='country', overwrite=False): + def set_region_id(self, level="country", overwrite=False): """Set region_id as country ISO numeric code attribute for every pixel or point. Parameters @@ -531,18 +537,19 @@ def set_region_id(self, level='country', overwrite=False): only if region_id is missing (None). Default: False """ if overwrite or self.region_id is None: - LOGGER.debug('Setting region_id %s points.', str(self.size)) - if level == 'country': + LOGGER.debug("Setting region_id %s points.", str(self.size)) + if level == "country": ne_geom = self._ne_crs_geom() - self.gdf['region_id'] = u_coord.get_country_code( - ne_geom.y.values, ne_geom.x.values, + self.gdf["region_id"] = u_coord.get_country_code( + ne_geom.y.values, + ne_geom.x.values, ) else: raise NotImplementedError( - 'The region id can only be assigned for countries so far' + "The region id can only be assigned for countries so far" ) - def set_on_land(self, source='natural_earth', overwrite=False): + def set_on_land(self, source="natural_earth", overwrite=False): """Set on_land attribute for every pixel or point. Parameters @@ -556,15 +563,15 @@ def set_on_land(self, source='natural_earth', overwrite=False): only if on_land is missing (None). Default: False """ if overwrite or self.on_land is None: - LOGGER.debug('Setting on_land %s points.', str(self.lat.size)) - if source=='natural_earth': + LOGGER.debug("Setting on_land %s points.", str(self.lat.size)) + if source == "natural_earth": ne_geom = self._ne_crs_geom() - self.gdf['on_land'] = u_coord.coord_on_land( + self.gdf["on_land"] = u_coord.coord_on_land( ne_geom.y.values, ne_geom.x.values ) else: raise NotImplementedError( - 'The on land variables can only be automatically assigned using natural earth.' + "The on land variables can only be automatically assigned using natural earth." ) def get_pixel_shapes(self, res=None, **kwargs): @@ -596,8 +603,10 @@ def get_pixel_shapes(self, res=None, **kwargs): return geom.buffer( # resolution=1, cap_style=3: squared buffers # https://shapely.readthedocs.io/en/latest/manual.html#object.buffer - distance=res / 2, resolution=1, cap_style=3, - # reset CRS (see above) + distance=res / 2, + resolution=1, + cap_style=3, + # reset CRS (see above) ).set_crs(self.crs) def get_area_pixel(self, min_resol=1.0e-8): @@ -616,10 +625,10 @@ def get_area_pixel(self, min_resol=1.0e-8): areapixels : np.array Area of each pixel in square meters. """ - LOGGER.debug('Computing pixel area for %d centroids.', self.size) + LOGGER.debug("Computing pixel area for %d centroids.", self.size) xy_pixels = self.get_pixel_shapes(min_resol=min_resol) if PROJ_CEA != xy_pixels.crs: - xy_pixels = xy_pixels.to_crs(crs={'proj': 'cea'}) + xy_pixels = xy_pixels.to_crs(crs={"proj": "cea"}) return xy_pixels.area.values def get_closest_point(self, x_lon, y_lat): @@ -689,7 +698,10 @@ def get_dist_coast(self, signed=False, precomputed=True): ) ne_geom = self._ne_crs_geom() return u_coord.dist_to_coast_nasa( - ne_geom.y.values, ne_geom.x.values, highres=True, signed=signed, + ne_geom.y.values, + ne_geom.x.values, + highres=True, + signed=signed, ) def get_meta(self, resolution=None): @@ -718,10 +730,10 @@ def get_meta(self, resolution=None): (resolution, -resolution), ) meta = { - 'crs': self.crs, - 'height': rows, - 'width': cols, - 'transform': ras_trans, + "crs": self.crs, + "height": rows, + "width": cols, + "transform": ras_trans, } return meta @@ -730,9 +742,19 @@ def get_meta(self, resolution=None): ## @classmethod - def from_raster_file(cls, file_name, src_crs=None, window=None, geometry=None, - dst_crs=None, transform=None, width=None, height=None, - resampling=rasterio.warp.Resampling.nearest, return_meta=False): + def from_raster_file( + cls, + file_name, + src_crs=None, + window=None, + geometry=None, + dst_crs=None, + transform=None, + width=None, + height=None, + resampling=rasterio.warp.Resampling.nearest, + return_meta=False, + ): """Create a new Centroids object from a raster file Select region using window or geometry. Reproject input by providing @@ -770,8 +792,16 @@ def from_raster_file(cls, file_name, src_crs=None, window=None, geometry=None, Raster meta (height, width, transform, crs). """ meta, _ = u_coord.read_raster( - file_name, [1], src_crs, window, geometry, dst_crs, - transform, width, height, resampling, + file_name, + [1], + src_crs, + window, + geometry, + dst_crs, + transform, + width, + height, + resampling, ) centr = cls.from_meta(meta) return (centr, meta) if return_meta else centr @@ -790,7 +820,7 @@ def from_meta(cls, meta): Centroid Centroids initialized for raster described by meta. """ - crs = meta['crs'] + crs = meta["crs"] lat, lon = _meta_to_lat_lon(meta) return cls(lon=lon, lat=lat, crs=crs) @@ -845,11 +875,10 @@ def write_csv(self, file_path): file_path : str, Path absolute or relative file path and name to write to """ - file_path = Path(file_path).with_suffix('.csv') - LOGGER.info('Writing %s', file_path) + file_path = Path(file_path).with_suffix(".csv") + LOGGER.info("Writing %s", file_path) self._centroids_to_dataframe().to_csv(file_path, index=False) - @classmethod def from_excel(cls, file_path, sheet_name=None): """Generate a new centroids object from an excel file with column names in var_names. @@ -868,7 +897,7 @@ def from_excel(cls, file_path, sheet_name=None): Centroids with data from the given excel file """ if sheet_name is None: - sheet_name = 'centroids' + sheet_name = "centroids" df = pd.read_excel(file_path, sheet_name) return cls._from_dataframe(df) @@ -880,13 +909,15 @@ def write_excel(self, file_path): file_path : str, Path absolute or relative file path and name to write to """ - file_path = Path(file_path).with_suffix('.xlsx') - LOGGER.info('Writing %s', file_path) + file_path = Path(file_path).with_suffix(".xlsx") + LOGGER.info("Writing %s", file_path) self._centroids_to_dataframe().to_excel( - file_path, sheet_name=DEF_SHEET_NAME, index=False, + file_path, + sheet_name=DEF_SHEET_NAME, + index=False, ) - def write_hdf5(self, file_name, mode='w'): + def write_hdf5(self, file_name, mode="w"): """Write data frame and metadata in hdf5 format Parameters @@ -894,7 +925,7 @@ def write_hdf5(self, file_name, mode='w'): file_name : str (path and) file name to write to. """ - LOGGER.info('Writing %s', file_name) + LOGGER.info("Writing %s", file_name) store = pd.HDFStore(file_name, mode=mode) pandas_df = pd.DataFrame(self.gdf) for col in pandas_df.columns: @@ -905,15 +936,14 @@ def write_hdf5(self, file_name, mode='w'): with warnings.catch_warnings(): warnings.simplefilter("ignore", category=pd.errors.PerformanceWarning) # Write dataframe - store.put('centroids', pandas_df) + store.put("centroids", pandas_df) - store.get_storer('centroids').attrs.metadata = { - 'crs': CRS.from_user_input(self.crs).to_wkt() + store.get_storer("centroids").attrs.metadata = { + "crs": CRS.from_user_input(self.crs).to_wkt() } store.close() - @classmethod def from_hdf5(cls, file_name): """Create a centroids object from a HDF5 file. @@ -935,17 +965,17 @@ def from_hdf5(cls, file_name): if not Path(file_name).is_file(): raise FileNotFoundError(str(file_name)) try: - with pd.HDFStore(file_name, mode='r') as store: - metadata = store.get_storer('centroids').attrs.metadata + with pd.HDFStore(file_name, mode="r") as store: + metadata = store.get_storer("centroids").attrs.metadata # in previous versions of CLIMADA and/or geopandas, # the CRS was stored in '_crs'/'crs' - crs = metadata.get('crs') - gdf = gpd.GeoDataFrame(store['centroids'], crs=crs) + crs = metadata.get("crs") + gdf = gpd.GeoDataFrame(store["centroids"], crs=crs) except TypeError: - with h5py.File(file_name, 'r') as data: - gdf = cls._gdf_from_legacy_hdf5(data.get('centroids')) + with h5py.File(file_name, "r") as data: + gdf = cls._gdf_from_legacy_hdf5(data.get("centroids")) except KeyError: - with h5py.File(file_name, 'r') as data: + with h5py.File(file_name, "r") as data: gdf = cls._gdf_from_legacy_hdf5(data) return cls.from_geodataframe(gdf) @@ -956,37 +986,37 @@ def from_hdf5(cls, file_name): @classmethod def _from_dataframe(cls, df): - if 'crs' in df.columns: - crs = df['crs'].iloc[0] + if "crs" in df.columns: + crs = df["crs"].iloc[0] else: - LOGGER.info("No 'crs' column provided in file, setting CRS to WGS84 default.") + LOGGER.info( + "No 'crs' column provided in file, setting CRS to WGS84 default." + ) crs = DEF_CRS extra_values = { - col: df[col] - for col in df.columns - if col not in ['lat', 'lon', 'crs'] + col: df[col] for col in df.columns if col not in ["lat", "lon", "crs"] } - return cls(lat=df['lat'], lon=df['lon'], **extra_values, crs=crs) + return cls(lat=df["lat"], lon=df["lon"], **extra_values, crs=crs) @staticmethod def _gdf_from_legacy_hdf5(data): crs = DEF_CRS - if data.get('crs'): - crs = u_coord.to_crs_user_input(data.get('crs')[0]) - if data.get('lat') and data.get('lat').size: - latitude = np.array(data.get('lat')) - longitude = np.array(data.get('lon')) - elif data.get('latitude') and data.get('latitude').size: - latitude = np.array(data.get('latitude')) - longitude = np.array(data.get('longitude')) + if data.get("crs"): + crs = u_coord.to_crs_user_input(data.get("crs")[0]) + if data.get("lat") and data.get("lat").size: + latitude = np.array(data.get("lat")) + longitude = np.array(data.get("lon")) + elif data.get("latitude") and data.get("latitude").size: + latitude = np.array(data.get("latitude")) + longitude = np.array(data.get("longitude")) else: - centr_meta = data.get('meta') + centr_meta = data.get("meta") meta = dict() - meta['crs'] = crs + meta["crs"] = crs for key, value in centr_meta.items(): - if key != 'transform': + if key != "transform": meta[key] = value[0] else: meta[key] = rasterio.Affine(*value) @@ -994,9 +1024,9 @@ def _gdf_from_legacy_hdf5(data): extra_values = {} for centr_name in data.keys(): - if centr_name not in ('crs', 'lat', 'lon', 'meta', 'latitude', 'longitude'): + if centr_name not in ("crs", "lat", "lon", "meta", "latitude", "longitude"): values = np.array(data.get(centr_name)) - if latitude.size != 0 and values.size != 0 : + if latitude.size != 0 and values.size != 0: extra_values[centr_name] = values return gpd.GeoDataFrame( @@ -1006,10 +1036,10 @@ def _gdf_from_legacy_hdf5(data): @classmethod def _legacy_from_excel(cls, file_name, var_names): - LOGGER.info('Reading %s', file_name) + LOGGER.info("Reading %s", file_name) try: - df = pd.read_excel(file_name, var_names['sheet_name']) - df = df.rename(columns=var_names['col_name']) + df = pd.read_excel(file_name, var_names["sheet_name"]) + df = df.rename(columns=var_names["col_name"]) except KeyError as err: raise KeyError("Not existing variable: %s" % str(err)) from err return cls._from_dataframe(df) @@ -1023,10 +1053,10 @@ def _centroids_to_dataframe(self): df : DataFrame """ df = pd.DataFrame(self.gdf) - df['lon'] = self.gdf['geometry'].x - df['lat'] = self.gdf['geometry'].y - df['crs'] = CRS.from_user_input(self.crs).to_wkt() - df = df.drop(['geometry'], axis=1) + df["lon"] = self.gdf["geometry"].x + df["lat"] = self.gdf["geometry"].y + df["crs"] = CRS.from_user_input(self.crs).to_wkt() + df = df.drop(["geometry"], axis=1) return df def _ne_crs_geom(self): @@ -1045,57 +1075,77 @@ def _ne_crs_geom(self): ## @classmethod - @deprecated(details="Reading Centroids data from matlab files is not supported anymore." - "This method has been removed with climada 5.0") + @deprecated( + details="Reading Centroids data from matlab files is not supported anymore." + "This method has been removed with climada 5.0" + ) def from_mat(cls, file_name, var_names=None): """Reading Centroids data from matlab files is not supported anymore. This method has been removed with climada 5.0""" - raise NotImplementedError("You are suggested to use an old version of climada (<=4.*) and" - " convert the file to hdf5 format.") + raise NotImplementedError( + "You are suggested to use an old version of climada (<=4.*) and" + " convert the file to hdf5 format." + ) @staticmethod @deprecated(details="This method has been removed with climada 5.0") def from_base_grid(land=False, res_as=360, base_file=None): """This method has been removed with climada 5.0""" - raise NotImplementedError("Create the Centroids from a custom base file or from Natural" - " Earth (files are available in Climada, look up ``climada.util" - ".constants.NATEARTH_CENTROIDS`` for their location)") + raise NotImplementedError( + "Create the Centroids from a custom base file or from Natural" + " Earth (files are available in Climada, look up ``climada.util" + ".constants.NATEARTH_CENTROIDS`` for their location)" + ) @classmethod - @deprecated(details="This method will be removed in a future version." - " Simply use the constructor instead.") + @deprecated( + details="This method will be removed in a future version." + " Simply use the constructor instead." + ) def from_lat_lon(cls, lat, lon, crs="EPSG:4326"): """deprecated, use the constructor instead""" return cls(lat=lat, lon=lon, crs=crs) - @deprecated(details="This method is futile and will be removed in a future version." - " `Centroids.get_area_pixel` can be run without initialization.") + @deprecated( + details="This method is futile and will be removed in a future version." + " `Centroids.get_area_pixel` can be run without initialization." + ) def set_area_pixel(self, min_resol=1e-08, scheduler=None): """deprecated, obsolete""" - @deprecated(details="This method is futile and will be removed in a future version." - " `Centroids.get_area_pixel` can be run without initialization.") + @deprecated( + details="This method is futile and will be removed in a future version." + " `Centroids.get_area_pixel` can be run without initialization." + ) def set_area_approx(self, min_resol=1e-08): """deprecated, obsolete""" - @deprecated(details="This method is futile and will be removed in a future version." - " `Centroids.get_dist_coast` can be run without initialization.") + @deprecated( + details="This method is futile and will be removed in a future version." + " `Centroids.get_dist_coast` can be run without initialization." + ) def set_dist_coast(self, signed=False, precomputed=False, scheduler=None): """deprecated, obsolete""" - @deprecated(details="This method has no effect and will be removed in a future version." - " In the current version of climada the geometry points of a `Centroids` object" - " cannot be removed as they are the backbone of the Centroids' GeoDataFrame.") + @deprecated( + details="This method has no effect and will be removed in a future version." + " In the current version of climada the geometry points of a `Centroids` object" + " cannot be removed as they are the backbone of the Centroids' GeoDataFrame." + ) def empty_geometry_points(self): - """"deprecated, has no effect, which may be unexpected: no geometry points will be removed, + """ "deprecated, has no effect, which may be unexpected: no geometry points will be removed, the centroids' GeoDataFrame is built on them! """ - @deprecated(details="This method has no effect and will be removed in a future version.") + @deprecated( + details="This method has no effect and will be removed in a future version." + ) def set_meta_to_lat_lon(self): """deprecated, has no effect""" - @deprecated(details="This method has no effect and will be removed in a future version.") + @deprecated( + details="This method has no effect and will be removed in a future version." + ) def set_lat_lon_to_meta(self, min_resol=1e-08): """deprecated, has no effect""" @@ -1115,5 +1165,7 @@ def _meta_to_lat_lon(meta): longitudes : np.ndarray Longitudinal coordinates of pixel centers. """ - xgrid, ygrid = u_coord.raster_to_meshgrid(meta['transform'], meta['width'], meta['height']) + xgrid, ygrid = u_coord.raster_to_meshgrid( + meta["transform"], meta["width"], meta["height"] + ) return ygrid.ravel(), xgrid.ravel() diff --git a/climada/hazard/centroids/test/test_centr.py b/climada/hazard/centroids/test/test_centr.py index 745e544d5..a41060bae 100644 --- a/climada/hazard/centroids/test/test_centr.py +++ b/climada/hazard/centroids/test/test_centr.py @@ -18,63 +18,66 @@ Test CentroidsVector and CentroidsRaster classes. """ + +import itertools import unittest -from unittest.mock import patch from pathlib import Path +from unittest.mock import patch +import geopandas as gpd import numpy as np import pandas as pd -import geopandas as gpd -import shapely -import itertools import rasterio +import shapely +from cartopy.io import shapereader from pyproj.crs.crs import CRS +from rasterio import Affine from rasterio.windows import Window from shapely.geometry.point import Point -from cartopy.io import shapereader - +import climada.util.coordinates as u_coord from climada import CONFIG +from climada.entity import Exposures from climada.hazard.centroids.centr import Centroids from climada.util.constants import DEF_CRS, HAZ_DEMO_FL -import climada.util.coordinates as u_coord -from climada.entity import Exposures -from rasterio import Affine - DATA_DIR = CONFIG.hazard.test_data.dir() # Note: the coordinates are not directly on the cities, the region id and on land # otherwise do not work correctly. It is only a close point. -LATLON = np.array([ - [-21.1736, -175.1883], # Tonga, Nuku'alofa, TON, 776 - [-18.133, 178.433], # Fidji, Suva, FJI, 242 IN WATER IN NATURAL EARTH - [-38.4689, 177.8642], # New-Zealand, Te Karaka, NZL, 554 - [69.6833, 18.95], # Norway, Tromso, NOR, 578 IN WATER IN NATURAL EARTH - [78.84422, 20.82842], # Norway, Svalbard, NOR, 578 - [1, 1], # Ocean, 0 (0,0 is onland in Natural earth for testing reasons) - [-77.85, 166.6778], # Antarctica, McMurdo station, ATA, 010 - [-0.25, -78.5833] # Ecuador, Quito, ECU, 218 -]) - -VEC_LAT = LATLON[:,0] -VEC_LON = LATLON[:,1] +LATLON = np.array( + [ + [-21.1736, -175.1883], # Tonga, Nuku'alofa, TON, 776 + [-18.133, 178.433], # Fidji, Suva, FJI, 242 IN WATER IN NATURAL EARTH + [-38.4689, 177.8642], # New-Zealand, Te Karaka, NZL, 554 + [69.6833, 18.95], # Norway, Tromso, NOR, 578 IN WATER IN NATURAL EARTH + [78.84422, 20.82842], # Norway, Svalbard, NOR, 578 + [1, 1], # Ocean, 0 (0,0 is onland in Natural earth for testing reasons) + [-77.85, 166.6778], # Antarctica, McMurdo station, ATA, 010 + [-0.25, -78.5833], # Ecuador, Quito, ECU, 218 + ] +) + +VEC_LAT = LATLON[:, 0] +VEC_LON = LATLON[:, 1] ON_LAND = np.array([True, False, True, False, True, False, True, True]) REGION_ID = np.array([776, 0, 554, 0, 578, 0, 10, 218]) -TEST_CRS = 'EPSG:4326' -ALT_CRS = 'epsg:32632' # UTM zone 32N (Central Europe, 6-12°E) +TEST_CRS = "EPSG:4326" +ALT_CRS = "epsg:32632" # UTM zone 32N (Central Europe, 6-12°E) + class TestCentroidsData(unittest.TestCase): - """ Test class for initialisation and value based creation of Centroids objects""" + """Test class for initialisation and value based creation of Centroids objects""" + def setUp(self): self.lat = np.array([-10, 0, 10]) self.lon = np.array([-170, -150, -130]) - self.region_id = np.array([1, 2, 3]) + self.region_id = np.array([1, 2, 3]) self.on_land = np.array([True, False, False]) - self.crs = 'epsg:32632' - self.centr = Centroids(lat=VEC_LAT,lon=VEC_LON) + self.crs = "epsg:32632" + self.centr = Centroids(lat=VEC_LAT, lon=VEC_LON) def test_centroids_check_pass(self): """Test vector data in Centroids""" @@ -86,13 +89,15 @@ def test_centroids_check_pass(self): [VEC_LON.min(), VEC_LAT.min(), VEC_LON.max(), VEC_LAT.max()], ) - self.assertIsInstance(centr,Centroids) + self.assertIsInstance(centr, Centroids) self.assertIsInstance(centr.lat, np.ndarray) self.assertIsInstance(centr.lon, np.ndarray) self.assertIsInstance(centr.coord, np.ndarray) self.assertTrue(np.array_equal(centr.lat, VEC_LAT)) self.assertTrue(np.array_equal(centr.lon, VEC_LON)) - self.assertTrue(np.array_equal(centr.coord, np.array([VEC_LAT, VEC_LON]).transpose())) + self.assertTrue( + np.array_equal(centr.coord, np.array([VEC_LAT, VEC_LON]).transpose()) + ) self.assertEqual(centr.size, VEC_LON.size) def test_init_pass(self): @@ -104,32 +109,44 @@ def test_init_pass(self): self.assertTrue(u_coord.equal_crs(self.centr.crs, DEF_CRS)) # Creating Centroids with additional attributes - centroids = Centroids(lat=VEC_LAT, lon=VEC_LON, - region_id=REGION_ID, on_land=ON_LAND) + centroids = Centroids( + lat=VEC_LAT, lon=VEC_LON, region_id=REGION_ID, on_land=ON_LAND + ) # Checking additional attributes np.testing.assert_array_equal(centroids.region_id, REGION_ID) np.testing.assert_array_equal(centroids.on_land, ON_LAND) def test_init_defaults(self): - ''' Checking default values for Centroids''' - centroids = Centroids(lat=VEC_LAT,lon=VEC_LON) + """Checking default values for Centroids""" + centroids = Centroids(lat=VEC_LAT, lon=VEC_LON) # Checking defaults: nothing set for region_id, on_land self.assertFalse(centroids.region_id) self.assertFalse(centroids.on_land) # Guarantee a no-default TypeError for lon/lat - with self.assertRaises(TypeError): Centroids() + with self.assertRaises(TypeError): + Centroids() def test_init_properties(self): - """ Guarantee that Centroid objects have at least the properties: """ - properties = ['gdf','lon','lat','geometry', - 'on_land','region_id','crs', - 'shape','size','total_bounds','coord'] - centroids = Centroids(lat=[],lon=[]) - [self.assertTrue(hasattr(centroids,prop)) for prop in properties] + """Guarantee that Centroid objects have at least the properties:""" + properties = [ + "gdf", + "lon", + "lat", + "geometry", + "on_land", + "region_id", + "crs", + "shape", + "size", + "total_bounds", + "coord", + ] + centroids = Centroids(lat=[], lon=[]) + [self.assertTrue(hasattr(centroids, prop)) for prop in properties] def test_init_kwargs(self): - """ Test default crs and kwargs forwarding """ + """Test default crs and kwargs forwarding""" centr = Centroids( lat=VEC_LAT, lon=VEC_LON, @@ -142,32 +159,36 @@ def test_init_kwargs(self): # make sure kwargs are properly forwarded to centroids.gdf np.random.seed(1000) - randommask = np.random.choice([True,False],size=len(VEC_LON)) - centroids = Centroids(lat=VEC_LAT,lon=VEC_LON,masked=randommask,ones=1) - self.assertTrue(hasattr(centroids.gdf,'masked')) - self.assertTrue(hasattr(centroids.gdf,'ones')) - np.testing.assert_array_equal(randommask,centroids.gdf.masked) - self.assertEqual(sum(centroids.gdf.ones),len(VEC_LON)) + randommask = np.random.choice([True, False], size=len(VEC_LON)) + centroids = Centroids(lat=VEC_LAT, lon=VEC_LON, masked=randommask, ones=1) + self.assertTrue(hasattr(centroids.gdf, "masked")) + self.assertTrue(hasattr(centroids.gdf, "ones")) + np.testing.assert_array_equal(randommask, centroids.gdf.masked) + self.assertEqual(sum(centroids.gdf.ones), len(VEC_LON)) def test_from_meta_pass(self): - expected_lon = np.array([-30.0, -20.0, -10.0]*3) - expected_lat = np.repeat([30.0, 20.0, 10.0],3) + expected_lon = np.array([-30.0, -20.0, -10.0] * 3) + expected_lat = np.repeat([30.0, 20.0, 10.0], 3) # Check metadata meta = dict( crs=DEF_CRS, height=3, width=3, transform=Affine( - 10, 0, -35, - 0, -10, 35, + 10, + 0, + -35, + 0, + -10, + 35, ), ) centroids = Centroids.from_meta(meta) # check created object - np.testing.assert_array_equal(centroids.lon,expected_lon) - np.testing.assert_array_equal(centroids.lat,expected_lat) - self.assertEqual(centroids.crs,DEF_CRS) + np.testing.assert_array_equal(centroids.lon, expected_lon) + np.testing.assert_array_equal(centroids.lat, expected_lat) + self.assertEqual(centroids.crs, DEF_CRS) # generally we assume that from_meta does not set region_ids and on_land flags self.assertFalse(centroids.region_id) self.assertFalse(centroids.on_land) @@ -175,13 +196,17 @@ def test_from_meta_pass(self): def test_from_meta(self): """Test from_meta""" meta_ref = { - 'width': 10, - 'height': 8, - 'transform': rasterio.Affine( - 0.6, 0, -0.1, - 0, -0.6, 0.3, + "width": 10, + "height": 8, + "transform": rasterio.Affine( + 0.6, + 0, + -0.1, + 0, + -0.6, + 0.3, ), - 'crs': DEF_CRS, + "crs": DEF_CRS, } lon_ref = np.array([0.2, 0.8, 1.4, 2.0, 2.6, 3.2, 3.8, 4.4, 5.0, 5.6]) @@ -195,22 +220,24 @@ def test_from_meta(self): self.assertEqual(meta_ref["height"], meta["height"]) np.testing.assert_allclose(meta_ref["transform"], meta["transform"]) - centr = Centroids.from_meta( - Centroids(lat=lat_ref, lon=lon_ref).get_meta() - ) + centr = Centroids.from_meta(Centroids(lat=lat_ref, lon=lon_ref).get_meta()) np.testing.assert_allclose(lat_ref, centr.lat) np.testing.assert_allclose(lon_ref, centr.lon) # `get_meta` enforces same resolution in x and y, and y-coordinates are decreasing. # For other cases, `from_meta` needs to be checked manually. meta_ref = { - 'width': 4, - 'height': 5, - 'transform': rasterio.Affine( - 0.5, 0, 0.2, - 0, 0.6, -0.7, + "width": 4, + "height": 5, + "transform": rasterio.Affine( + 0.5, + 0, + 0.2, + 0, + 0.6, + -0.7, ), - 'crs': DEF_CRS, + "crs": DEF_CRS, } lon_ref = np.array([0.45, 0.95, 1.45, 1.95]) lat_ref = np.array([-0.4, 0.2, 0.8, 1.4, 2.0]) @@ -220,7 +247,6 @@ def test_from_meta(self): np.testing.assert_allclose(lat_ref, centr.lat) np.testing.assert_allclose(lon_ref, centr.lon) - def test_from_pnt_bounds(self): """Test from_pnt_bounds""" width, height = 26, 51 @@ -230,21 +256,25 @@ def test_from_pnt_bounds(self): self.assertTrue(u_coord.equal_crs(centr.crs, DEF_CRS)) self.assertEqual(centr.size, width * height) np.testing.assert_allclose([5.0, 5.2, 5.0], centr.lon[[0, 1, width]], atol=0.1) - np.testing.assert_allclose([10.0, 10.0, 9.8], centr.lat[[0, 1, width]], atol=0.1) + np.testing.assert_allclose( + [10.0, 10.0, 9.8], centr.lat[[0, 1, width]], atol=0.1 + ) # generally we assume that from_meta does not set region_ids and on_land flags self.assertFalse(centr.region_id) self.assertFalse(centr.on_land) + class TestCentroidsTransformation(unittest.TestCase): - """ Test class for coordinate transformations of Centroid objects + """Test class for coordinate transformations of Centroid objects and modifications using set_ methods""" + def setUp(self): self.lat = np.array([-10, 0, 10]) self.lon = np.array([-170, -150, -130]) - self.region_id = np.array([1, 2, 3]) + self.region_id = np.array([1, 2, 3]) self.on_land = np.array([True, False, False]) - self.crs = 'epsg:32632' - self.centr = Centroids(lat=VEC_LAT,lon=VEC_LON,crs=TEST_CRS) + self.crs = "epsg:32632" + self.centr = Centroids(lat=VEC_LAT, lon=VEC_LON, crs=TEST_CRS) def test_to_default_crs(self): # Creating Centroids with non-default CRS and @@ -255,29 +285,33 @@ def test_to_default_crs(self): # make sure CRS is DEF_CRS after transformation self.assertTrue(u_coord.equal_crs(centroids.crs, DEF_CRS)) # Checking that modification actually took place - [self.assertNotEqual(x-y,0) for x,y in zip(centroids.lon,VEC_LON)] - [self.assertNotEqual(x-y,0) for x,y in zip(centroids.lat,VEC_LAT) if not x == 0] + [self.assertNotEqual(x - y, 0) for x, y in zip(centroids.lon, VEC_LON)] + [ + self.assertNotEqual(x - y, 0) + for x, y in zip(centroids.lat, VEC_LAT) + if not x == 0 + ] def test_to_default_crs_not_inplace(self): centroids = Centroids(lat=VEC_LAT, lon=VEC_LON, crs=ALT_CRS) newcentr = centroids.to_default_crs(inplace=False) # make sure that new object has been created - self.assertIsNot(centroids,newcentr) - self.assertIsInstance(newcentr,Centroids) + self.assertIsNot(centroids, newcentr) + self.assertIsInstance(newcentr, Centroids) ## compare with inplace transformation centroids.to_default_crs() - np.testing.assert_array_equal(centroids.lat,newcentr.lat) - np.testing.assert_array_equal(centroids.lon,newcentr.lon) + np.testing.assert_array_equal(centroids.lat, newcentr.lat) + np.testing.assert_array_equal(centroids.lon, newcentr.lon) def test_to_crs(self): # Creating Centroids with default CRS centroids = Centroids(lat=self.lat, lon=self.lon, crs=DEF_CRS) # Transforming to another CRS - new_crs = 'epsg:3857' + new_crs = "epsg:3857" transformed_centroids = centroids.to_crs(new_crs) - self.assertIsNot(centroids,transformed_centroids) + self.assertIsNot(centroids, transformed_centroids) self.assertFalse(centroids == transformed_centroids) # Checking CRS string after transformation @@ -285,22 +319,22 @@ def test_to_crs(self): self.assertTrue(u_coord.equal_crs(centroids.crs, DEF_CRS)) # Checking correctness of transformation - expected_lat = np.array([-1118889.974858, 0., 1118889.9748585]) + expected_lat = np.array([-1118889.974858, 0.0, 1118889.9748585]) expected_lon = np.array([-18924313.434857, -16697923.618991, -14471533.803126]) np.testing.assert_array_almost_equal(transformed_centroids.lat, expected_lat) np.testing.assert_array_almost_equal(transformed_centroids.lon, expected_lon) def test_to_crs_inplace(self): - centroids = Centroids(lat=self.lat,lon=self.lon,crs=DEF_CRS) - new_crs = 'epsg:3857' + centroids = Centroids(lat=self.lat, lon=self.lon, crs=DEF_CRS) + new_crs = "epsg:3857" transformed_centroids = centroids.to_crs(new_crs) # inplace transforming to another CRS - centroids.to_crs(new_crs,inplace=True) + centroids.to_crs(new_crs, inplace=True) self.assertTrue(centroids == transformed_centroids) - expected_lat = np.array([-1118889.974858, 0., 1118889.9748585]) + expected_lat = np.array([-1118889.974858, 0.0, 1118889.9748585]) expected_lon = np.array([-18924313.434857, -16697923.618991, -14471533.803126]) np.testing.assert_array_almost_equal(centroids.lat, expected_lat) np.testing.assert_array_almost_equal(centroids.lon, expected_lon) @@ -321,14 +355,14 @@ def test_set_on_land_pass(self): self.centr.set_on_land() np.testing.assert_array_equal(self.centr.on_land, ON_LAND) - centroids = Centroids(lat=VEC_LAT, lon=VEC_LON, on_land='natural_earth') + centroids = Centroids(lat=VEC_LAT, lon=VEC_LON, on_land="natural_earth") np.testing.assert_array_equal(centroids.on_land, ON_LAND) def test_set_on_land_implementationerror(self): - centroids = Centroids(lat=self.lat,lon=self.lon) + centroids = Centroids(lat=self.lat, lon=self.lon) with self.assertRaises(NotImplementedError): - centroids.set_on_land(source='satellite',overwrite=True) + centroids.set_on_land(source="satellite", overwrite=True) def test_set_on_land_raster(self): """Test set_on_land""" @@ -341,7 +375,7 @@ def test_set_region_id_pass(self): self.centr.set_region_id() np.testing.assert_array_equal(self.centr.region_id, REGION_ID) - centroids = Centroids(lat=VEC_LAT, lon=VEC_LON, region_id='country') + centroids = Centroids(lat=VEC_LAT, lon=VEC_LON, region_id="country") np.testing.assert_array_equal(centroids.region_id, REGION_ID) def test_set_region_id_raster(self): @@ -352,10 +386,10 @@ def test_set_region_id_raster(self): self.assertTrue(np.array_equal(np.unique(centr_ras.region_id), np.array([862]))) def test_set_region_id_implementationerror(self): - centroids = Centroids(lat=self.lat,lon=self.lon) + centroids = Centroids(lat=self.lat, lon=self.lon) with self.assertRaises(NotImplementedError): - centroids.set_region_id(level='continent',overwrite=True) + centroids.set_region_id(level="continent", overwrite=True) def test_set_geometry_points_pass(self): """Test set_geometry_points""" @@ -373,10 +407,10 @@ class TestCentroidsReaderWriter(unittest.TestCase): def test_from_csv_def_crs(self): """Read a centroid csv file correctly and use default CRS.""" # Create temporary csv file containing centroids data - tmpfile = Path('test_write_csv.csv') + tmpfile = Path("test_write_csv.csv") lat = np.array([0, 90, -90, 0, 0]) lon = np.array([0, 0, 0, 180, -180]) - df = pd.DataFrame({'lat': lat, 'lon': lon}) + df = pd.DataFrame({"lat": lat, "lon": lon}) df.to_csv(tmpfile, index=False) # Read centroids using from_csv method @@ -392,13 +426,15 @@ def test_from_csv_def_crs(self): def test_from_csv(self): """Read a centroid csv file which contains CRS information.""" - tmpfile = Path('test_write_csv.csv') + tmpfile = Path("test_write_csv.csv") lat = np.array([0, 20048966.1, -20048966, 0, 0]) lon = np.array([0, 0, 0, 20037508.34, -20037508.34]) region_id = np.array([1, 2, 3, 4, 5]) on_land = np.array([True, False, False, True, True]) - df = pd.DataFrame({'lat': lat, 'lon': lon, 'region_id': region_id, 'on_land': on_land}) - df['crs'] = CRS.from_user_input(3857).to_wkt() + df = pd.DataFrame( + {"lat": lat, "lon": lon, "region_id": region_id, "on_land": on_land} + ) + df["crs"] = CRS.from_user_input(3857).to_wkt() df.to_csv(tmpfile, index=False) # Read centroids using from_csv method @@ -407,7 +443,7 @@ def test_from_csv(self): # Test attributes np.testing.assert_array_equal(centroids.lat, lat) np.testing.assert_array_equal(centroids.lon, lon) - self.assertEqual(centroids.crs, 'epsg:3857') + self.assertEqual(centroids.crs, "epsg:3857") np.testing.assert_array_equal(centroids.region_id, region_id) np.testing.assert_array_equal(centroids.on_land, on_land) @@ -417,12 +453,14 @@ def test_from_csv(self): def test_write_read_csv(self): """Write and read a Centroids CSV file correctly.""" # Create Centroids with latitude and longitude arrays - tmpfile = Path('test_write_csv.csv') + tmpfile = Path("test_write_csv.csv") lat = np.array([10.0, 20.0, 30.0]) lon = np.array([-10.0, -20.0, -30.0]) region_id = np.array([1, 2, 3]) on_land = np.array([True, False, False]) - centroids_out = Centroids(lat=lat, lon=lon, region_id=region_id, on_land=on_land) + centroids_out = Centroids( + lat=lat, lon=lon, region_id=region_id, on_land=on_land + ) # Write CSV file from Centroids using write_csv centroids_out.write_csv(tmpfile) @@ -443,11 +481,11 @@ def test_write_read_csv(self): def test_from_excel_def_crs(self): """Read a centroid excel file correctly and use default CRS.""" # Create temporary excel file containing centroids data - tmpfile = Path('test_write_excel.xlsx') + tmpfile = Path("test_write_excel.xlsx") lat = np.array([0, 90, -90, 0, 0]) lon = np.array([0, 0, 0, 180, -180]) - df = pd.DataFrame({'lat': lat, 'lon': lon}) - df.to_excel(tmpfile, sheet_name='centroids', index=False) + df = pd.DataFrame({"lat": lat, "lon": lon}) + df.to_excel(tmpfile, sheet_name="centroids", index=False) # Read centroids using from_excel method centroids = Centroids.from_excel(file_path=tmpfile) @@ -463,14 +501,16 @@ def test_from_excel_def_crs(self): def test_from_excel(self): """Read a centroid excel file correctly which contains CRS information.""" # Create temporary excel file containing centroids data - tmpfile = Path('test_write_excel.xlsx') + tmpfile = Path("test_write_excel.xlsx") lat = np.array([0, 20048966.1, -20048966, 0, 0]) lon = np.array([0, 0, 0, 20037508.34, -20037508.34]) region_id = np.array([1, 2, 3, 4, 5]) on_land = np.array([True, False, False, True, True]) - df = pd.DataFrame({'lat': lat, 'lon': lon, 'region_id': region_id, 'on_land': on_land}) - df['crs'] = CRS.from_user_input(3857).to_wkt() - df.to_excel(tmpfile, sheet_name='centroids', index=False) + df = pd.DataFrame( + {"lat": lat, "lon": lon, "region_id": region_id, "on_land": on_land} + ) + df["crs"] = CRS.from_user_input(3857).to_wkt() + df.to_excel(tmpfile, sheet_name="centroids", index=False) # Read centroids using from_excel method centroids = Centroids.from_excel(file_path=tmpfile) @@ -478,7 +518,7 @@ def test_from_excel(self): # test attributes np.testing.assert_array_equal(centroids.lat, lat) np.testing.assert_array_equal(centroids.lon, lon) - self.assertEqual(centroids.crs, 'epsg:3857') + self.assertEqual(centroids.crs, "epsg:3857") np.testing.assert_array_equal(centroids.region_id, region_id) np.testing.assert_array_equal(centroids.on_land, on_land) @@ -488,12 +528,14 @@ def test_from_excel(self): def test_write_read_excel(self): """Write and read a Centroids Excel file correctly.""" # Create Centroids with latitude and longitude arrays - tmpfile = Path('test_write_excel.xlsx') + tmpfile = Path("test_write_excel.xlsx") lat = np.array([10.0, 20.0, 30.0]) lon = np.array([-10.0, -20.0, -30.0]) region_id = np.array([1, 2, 3]) on_land = np.array([True, False, False]) - centroids_out = Centroids(lat=lat, lon=lon, region_id=region_id, on_land=on_land) + centroids_out = Centroids( + lat=lat, lon=lon, region_id=region_id, on_land=on_land + ) # Write Excel file from Centroids using write_csv centroids_out.write_excel(tmpfile) @@ -517,20 +559,27 @@ def test_from_raster_file(self): o_lat, o_lon = (10.42822096697894, -69.33714959699981) res_lat, res_lon = (-0.009000000000000341, 0.009000000000000341) - centr_ras = Centroids.from_raster_file(HAZ_DEMO_FL, window=Window(0, 0, width, height)) + centr_ras = Centroids.from_raster_file( + HAZ_DEMO_FL, window=Window(0, 0, width, height) + ) self.assertTrue(u_coord.equal_crs(centr_ras.crs, DEF_CRS)) self.assertEqual(centr_ras.size, width * height) np.testing.assert_allclose( - [-69.333, -69.324, -69.333], centr_ras.lon[[0, 1, width]], atol=0.001, + [-69.333, -69.324, -69.333], + centr_ras.lon[[0, 1, width]], + atol=0.001, ) np.testing.assert_allclose( - [10.424, 10.424, 10.415], centr_ras.lat[[0, 1, width]], atol=0.001, + [10.424, 10.424, 10.415], + centr_ras.lat[[0, 1, width]], + atol=0.001, ) def test_from_vector_file(self): """Test from_vector_file and values_from_vector_files""" - shp_file = shapereader.natural_earth(resolution='110m', category='cultural', - name='populated_places_simple') + shp_file = shapereader.natural_earth( + resolution="110m", category="cultural", name="populated_places_simple" + ) centr = Centroids.from_vector_file(shp_file, dst_crs=DEF_CRS) self.assertTrue(u_coord.equal_crs(centr.crs, DEF_CRS)) @@ -549,35 +598,40 @@ def test_from_geodataframe(self): lon = np.arange(-50, -40) region_id = np.arange(1, 11) on_land = np.ones(10, dtype=bool) - extra = np.full(10, 'a') - - gdf = gpd.GeoDataFrame({ - 'geometry': gpd.points_from_xy(lon, lat), - 'region_id': region_id, - 'on_land': on_land, - 'extra': extra, - }, crs=crs) + extra = np.full(10, "a") + + gdf = gpd.GeoDataFrame( + { + "geometry": gpd.points_from_xy(lon, lat), + "region_id": region_id, + "on_land": on_land, + "extra": extra, + }, + crs=crs, + ) centroids = Centroids.from_geodataframe(gdf) for name, array in zip( - ['lat', 'lon', 'region_id', 'on_land'], + ["lat", "lon", "region_id", "on_land"], [lat, lon, region_id, on_land], ): np.testing.assert_array_equal(array, getattr(centroids, name)) - self.assertTrue('extra' in centroids.gdf.columns) + self.assertTrue("extra" in centroids.gdf.columns) self.assertTrue(u_coord.equal_crs(centroids.crs, crs)) def test_from_geodataframe_invalid(self): # Creating an invalid GeoDataFrame with geometries that are not points - invalid_geometry_gdf = gpd.GeoDataFrame({ - 'geometry': [ - shapely.Point((2,2)), - shapely.Polygon([(0, 0), (1, 1), (1, 0), (0, 0)]), - shapely.LineString([(0, 1), (1, 0)]), - ], - }) + invalid_geometry_gdf = gpd.GeoDataFrame( + { + "geometry": [ + shapely.Point((2, 2)), + shapely.Polygon([(0, 0), (1, 1), (1, 0), (0, 0)]), + shapely.LineString([(0, 1), (1, 0)]), + ], + } + ) with self.assertRaises(ValueError): # Trying to create Centroids from invalid GeoDataFrame @@ -594,14 +648,16 @@ def test_from_exposures_with_region_id(self): value = np.array([1, 1, 1]) region_id = np.array([1, 2, 3]) on_land = [False, True, True] - crs = 'epsg:32632' - gdf = gpd.GeoDataFrame({ - 'latitude': lat, - 'longitude': lon, - 'value': value, - 'region_id': region_id, - 'on_land': on_land, - }) + crs = "epsg:32632" + gdf = gpd.GeoDataFrame( + { + "latitude": lat, + "longitude": lon, + "value": value, + "region_id": region_id, + "on_land": on_land, + } + ) exposures = Exposures(gdf, crs=crs) # Extract centroids from exposures @@ -612,7 +668,7 @@ def test_from_exposures_with_region_id(self): np.testing.assert_array_equal(centroids.lon, lon) np.testing.assert_array_equal(centroids.region_id, region_id) np.testing.assert_array_equal(centroids.on_land, on_land) - self.assertFalse(np.isin('value', centroids.gdf.columns)) + self.assertFalse(np.isin("value", centroids.gdf.columns)) self.assertEqual(centroids.crs, crs) def test_from_exposures_without_region_id(self): @@ -627,13 +683,15 @@ def test_from_exposures_without_region_id(self): value = np.array([1, 1, 1]) impf_TC = np.array([1, 2, 3]) centr_TC = np.array([1, 2, 3]) - gdf = gpd.GeoDataFrame({ - 'latitude': lat, - 'longitude': lon, - 'value': value, - 'impf_tc': impf_TC, - 'centr_TC': centr_TC, - }) + gdf = gpd.GeoDataFrame( + { + "latitude": lat, + "longitude": lon, + "value": value, + "impf_tc": impf_TC, + "centr_TC": centr_TC, + } + ) exposures = Exposures(gdf) # Extract centroids from exposures @@ -646,19 +704,18 @@ def test_from_exposures_without_region_id(self): self.assertEqual(centroids.region_id, None) self.assertEqual(centroids.on_land, None) np.testing.assert_equal( - np.isin(['value', 'impf_tc', 'centr_tc'], centroids.gdf.columns), + np.isin(["value", "impf_tc", "centr_tc"], centroids.gdf.columns), False, ) def test_from_exposure_exceptions(self): - gdf = gpd.GeoDataFrame({ - }) + gdf = gpd.GeoDataFrame({}) exposures = Exposures(gdf) with self.assertRaises(ValueError): Centroids.from_exposures(exposures) def test_read_write_hdf5(self): - tmpfile = Path('test_write_hdf5.out.hdf5') + tmpfile = Path("test_write_hdf5.out.hdf5") crs = DEF_CRS centroids_w = Centroids(lat=VEC_LAT, lon=VEC_LON, crs=crs) centroids_w.write_hdf5(tmpfile) @@ -674,8 +731,10 @@ def test_from_hdf5_nonexistent_file(self): with self.assertRaises(FileNotFoundError): Centroids.from_hdf5(file_name) + class TestCentroidsMethods(unittest.TestCase): """Test Centroids methods""" + def setUp(self): self.centr = Centroids(lat=VEC_LAT, lon=VEC_LON, crs=TEST_CRS) @@ -686,7 +745,7 @@ def test_select_pass(self): centr = Centroids(lat=VEC_LAT, lon=VEC_LON, region_id=region_id) fil_centr = centr.select(reg_id=10) - self.assertIsInstance(fil_centr,Centroids) + self.assertIsInstance(fil_centr, Centroids) self.assertEqual(fil_centr.size, 2) self.assertEqual(fil_centr.lat[0], VEC_LAT[2]) self.assertEqual(fil_centr.lat[1], VEC_LAT[4]) @@ -702,7 +761,7 @@ def test_select_extent_pass(self): region_id=np.zeros(5), ) ext_centr = centr.select(extent=[-175, -170, -5, 5]) - self.assertIsInstance(ext_centr,Centroids) + self.assertIsInstance(ext_centr, Centroids) np.testing.assert_array_equal(ext_centr.lon, np.array([-175, -170])) np.testing.assert_array_equal(ext_centr.lat, np.array([-3, 0])) @@ -719,7 +778,9 @@ def test_select_extent_pass(self): def test_append_pass(self): """Append points""" centr = self.centr - centr_bis = Centroids(lat=np.array([1, 2, 3]), lon=np.array([4, 5, 6]), crs=DEF_CRS) + centr_bis = Centroids( + lat=np.array([1, 2, 3]), lon=np.array([4, 5, 6]), crs=DEF_CRS + ) with self.assertRaises(ValueError): # Different crs centr_bis.to_crs(ALT_CRS).append(centr) @@ -734,16 +795,20 @@ def test_append_pass(self): self.assertTrue(np.array_equal(centr_bis.lon[3:], centr.lon)) def test_append(self): - lat2,lon2 = np.array([6,7,8,9,10]),np.array([6,7,8,9,10]) - newcentr = Centroids(lat=lat2,lon=lon2) + lat2, lon2 = np.array([6, 7, 8, 9, 10]), np.array([6, 7, 8, 9, 10]) + newcentr = Centroids(lat=lat2, lon=lon2) newcentr.append(self.centr) - self.assertTrue(newcentr.size == len(self.centr.lon)+len(lon2)) - np.testing.assert_array_equal(newcentr.lon,np.concatenate([lon2,self.centr.lon])) - np.testing.assert_array_equal(newcentr.lat,np.concatenate([lat2,self.centr.lat])) + self.assertTrue(newcentr.size == len(self.centr.lon) + len(lon2)) + np.testing.assert_array_equal( + newcentr.lon, np.concatenate([lon2, self.centr.lon]) + ) + np.testing.assert_array_equal( + newcentr.lat, np.concatenate([lat2, self.centr.lat]) + ) def test_append_dif_crs(self): - lat2,lon2 = np.array([0,0,1,2,3,4,5]),np.array([0,0,1,2,3,4,5]) - centr2 = Centroids(lat=lat2,lon=lon2,crs='epsg:3857') + lat2, lon2 = np.array([0, 0, 1, 2, 3, 4, 5]), np.array([0, 0, 1, 2, 3, 4, 5]) + centr2 = Centroids(lat=lat2, lon=lon2, crs="epsg:3857") # appending differing crs is not provided/possible with self.assertRaises(ValueError): @@ -758,26 +823,25 @@ def test_remove_duplicate_pass(self): ) self.assertTrue(centr.gdf.shape[0] == 2 * self.centr.gdf.shape[0]) rem_centr = Centroids.remove_duplicate_points(centr) - self.assertIsInstance(rem_centr,Centroids) + self.assertIsInstance(rem_centr, Centroids) self.assertTrue(self.centr == rem_centr) - def test_remove_duplicates_dif_on_land(self): ### We currently expect that only the geometry of the gdf defines duplicates. ### If one geometry is duplicated with differences in other attributes e.g. on_land ### they get removed nevertheless. Only the first occurrence will be part of the new object ### this test is only here to guarantee this behaviour - lat, lon = np.array([0,0,1,2,3,4,5]),np.array([0,0,1,2,3,4,5]) - centr = Centroids(lat=lat,lon=lon,on_land=[True]+[False]*6) + lat, lon = np.array([0, 0, 1, 2, 3, 4, 5]), np.array([0, 0, 1, 2, 3, 4, 5]) + centr = Centroids(lat=lat, lon=lon, on_land=[True] + [False] * 6) centr_subset = centr.remove_duplicate_points() # new object created self.assertFalse(centr == centr_subset) - self.assertIsNot(centr,centr_subset) + self.assertIsNot(centr, centr_subset) # duplicates removed - self.assertTrue(centr_subset.size == len(lat)-1) - self.assertTrue(np.all(centr_subset.shape == (len(lat)-1,len(lon)-1))) - np.testing.assert_array_equal(centr_subset.lon,np.unique(lon)) - np.testing.assert_array_equal(centr_subset.lat,np.unique(lat)) + self.assertTrue(centr_subset.size == len(lat) - 1) + self.assertTrue(np.all(centr_subset.shape == (len(lat) - 1, len(lon) - 1))) + np.testing.assert_array_equal(centr_subset.lon, np.unique(lon)) + np.testing.assert_array_equal(centr_subset.lat, np.unique(lat)) # only first on_land (True) is selected self.assertTrue(centr_subset.on_land[0]) @@ -791,17 +855,17 @@ def test_union(self): cent2 = Centroids(lat=lat2, lon=lon2, on_land=on_land2) lat3, lon3 = np.array([-1, -2]), np.array([1, 2]) - cent3 = Centroids(lat=lat3,lon=lon3) + cent3 = Centroids(lat=lat3, lon=lon3) cent = cent1.union(cent2) - np.testing.assert_array_equal(cent.lat, np.concatenate([lat,lat2])) - np.testing.assert_array_equal(cent.lon, np.concatenate([lon,lon2])) - np.testing.assert_array_equal(cent.on_land, np.concatenate([on_land,on_land2])) + np.testing.assert_array_equal(cent.lat, np.concatenate([lat, lat2])) + np.testing.assert_array_equal(cent.lon, np.concatenate([lon, lon2])) + np.testing.assert_array_equal(cent.on_land, np.concatenate([on_land, on_land2])) cent = cent1.union(cent1, cent2) - np.testing.assert_array_equal(cent.lat, np.concatenate([lat,lat2])) - np.testing.assert_array_equal(cent.lon, np.concatenate([lon,lon2])) - np.testing.assert_array_equal(cent.on_land, np.concatenate([on_land,on_land2])) + np.testing.assert_array_equal(cent.lat, np.concatenate([lat, lat2])) + np.testing.assert_array_equal(cent.lon, np.concatenate([lon, lon2])) + np.testing.assert_array_equal(cent.on_land, np.concatenate([on_land, on_land2])) cent = Centroids.union(cent1) np.testing.assert_array_equal(cent.lat, cent1.lat) @@ -815,9 +879,11 @@ def test_union(self): # if attributes are not part in one of the centroid objects it will be added as None in the union cent = Centroids.union(cent1, cent2, cent3) - np.testing.assert_array_equal(cent.lat, np.concatenate([lat,lat2,lat3])) - np.testing.assert_array_equal(cent.lon, np.concatenate([lon,lon2,lon3])) - np.testing.assert_array_equal(cent.on_land, np.concatenate([on_land,on_land2,[None,None]])) + np.testing.assert_array_equal(cent.lat, np.concatenate([lat, lat2, lat3])) + np.testing.assert_array_equal(cent.lon, np.concatenate([lon, lon2, lon3])) + np.testing.assert_array_equal( + cent.on_land, np.concatenate([on_land, on_land2, [None, None]]) + ) def test_select_pass(self): """Test Centroids.select method""" @@ -873,14 +939,18 @@ def test_get_meta(self): height=3, width=3, transform=Affine( - 10, 0, -35, - 0, -10, 35, + 10, + 0, + -35, + 0, + -10, + 35, ), ) - self.assertEqual(meta['height'], expected_meta['height']) - self.assertEqual(meta['width'], expected_meta['width']) - self.assertTrue(u_coord.equal_crs(meta['crs'], expected_meta['crs'])) - self.assertTrue(meta['transform'].almost_equals(expected_meta['transform'])) + self.assertEqual(meta["height"], expected_meta["height"]) + self.assertEqual(meta["width"], expected_meta["width"]) + self.assertTrue(u_coord.equal_crs(meta["crs"], expected_meta["crs"])) + self.assertTrue(meta["transform"].almost_equals(expected_meta["transform"])) def test_get_closest_point(self): """Test get_closest_point""" @@ -896,21 +966,25 @@ def test_get_closest_point(self): """Test get_closest_point""" for y_sign in [1, -1]: meta = { - 'width': 10, - 'height': 20, - 'transform': rasterio.Affine(0.5, 0, 0.1, 0, y_sign * 0.6, y_sign * (-0.3)), - 'crs': DEF_CRS, + "width": 10, + "height": 20, + "transform": rasterio.Affine( + 0.5, 0, 0.1, 0, y_sign * 0.6, y_sign * (-0.3) + ), + "crs": DEF_CRS, } centr_ras = Centroids.from_meta(meta=meta) - test_data = np.array([ - [0.4, 0.1, 0.35, 0.0, 0], - [-0.1, 0.2, 0.35, 0.0, 0], - [2.2, 0.1, 2.35, 0.0, 4], - [1.4, 2.5, 1.35, 2.4, 42], - [5.5, -0.1, 4.85, 0.0, 9], - ]) - test_data[:,[1,3]] *= y_sign + test_data = np.array( + [ + [0.4, 0.1, 0.35, 0.0, 0], + [-0.1, 0.2, 0.35, 0.0, 0], + [2.2, 0.1, 2.35, 0.0, 4], + [1.4, 2.5, 1.35, 2.4, 42], + [5.5, -0.1, 4.85, 0.0, 9], + ] + ) + test_data[:, [1, 3]] *= y_sign for x_in, y_in, x_out, y_out, idx_out in test_data: x, y, idx = centr_ras.get_closest_point(x_in, y_in) self.assertEqual(x, x_out) @@ -919,7 +993,9 @@ def test_get_closest_point(self): self.assertEqual(centr_ras.lon[idx], x) self.assertEqual(centr_ras.lat[idx], y) - centr_ras = Centroids(lat=np.array([0, 0.2, 0.7]), lon=np.array([-0.4, 0.2, 1.1])) + centr_ras = Centroids( + lat=np.array([0, 0.2, 0.7]), lon=np.array([-0.4, 0.2, 1.1]) + ) x, y, idx = centr_ras.get_closest_point(0.1, 0.0) self.assertEqual(x, 0.2) self.assertEqual(y, 0.2) @@ -929,11 +1005,19 @@ def test_dist_coast_pass(self): """Test get_dist_coast""" dist_coast = self.centr.get_dist_coast() # Just checking that the output doesnt change over time. - REF_VALUES = np.array([ - 860.0, 200.0, 25610.0, 1000.0, 4685.0, - 507500.0, 500.0, 150500.0, - ]) - self.assertIsInstance(dist_coast,np.ndarray) + REF_VALUES = np.array( + [ + 860.0, + 200.0, + 25610.0, + 1000.0, + 4685.0, + 507500.0, + 500.0, + 150500.0, + ] + ) + self.assertIsInstance(dist_coast, np.ndarray) np.testing.assert_allclose(dist_coast, REF_VALUES, atol=1.0) def test_dist_coast_pass_raster(self): @@ -947,13 +1031,20 @@ def test_area_pass(self): """Test set_area""" ulx, xres, lrx = 60, 1, 90 uly, yres, lry = 0, 1, 20 - xx, yy = np.meshgrid(np.arange(ulx + xres / 2, lrx, xres), - np.arange(uly + yres / 2, lry, yres)) - vec_data = gpd.GeoDataFrame({ - 'geometry': [Point(xflat, yflat) for xflat, yflat in zip(xx.flatten(), yy.flatten())], - 'lon': xx.flatten(), - 'lat': yy.flatten(), - }, crs={'proj': 'cea'}) + xx, yy = np.meshgrid( + np.arange(ulx + xres / 2, lrx, xres), np.arange(uly + yres / 2, lry, yres) + ) + vec_data = gpd.GeoDataFrame( + { + "geometry": [ + Point(xflat, yflat) + for xflat, yflat in zip(xx.flatten(), yy.flatten()) + ], + "lon": xx.flatten(), + "lat": yy.flatten(), + }, + crs={"proj": "cea"}, + ) centr = Centroids.from_geodataframe(vec_data) area_pixel = centr.get_area_pixel() self.assertTrue(np.allclose(area_pixel, np.ones(centr.size))) @@ -972,10 +1063,16 @@ def test_area_pass_raster(self): ) # Correct result in CEA results in unequal pixel area - test_area = np.array([ - 981010.32497514, 981010.3249724 , 981037.92674855, - 981037.92674582, 981065.50487659, 981065.50487385, - ]) + test_area = np.array( + [ + 981010.32497514, + 981010.3249724, + 981037.92674855, + 981037.92674582, + 981065.50487659, + 981065.50487385, + ] + ) np.testing.assert_allclose(area_pixel, test_area) def test_equal_pass(self): @@ -983,7 +1080,7 @@ def test_equal_pass(self): centr_list = [ Centroids(lat=VEC_LAT, lon=VEC_LON, crs=DEF_CRS), Centroids(lat=VEC_LAT, lon=VEC_LON, crs=ALT_CRS), - Centroids(lat=VEC_LAT + 1, lon=VEC_LON + 1) + Centroids(lat=VEC_LAT + 1, lon=VEC_LON + 1), ] for centr1, centr2 in itertools.combinations(centr_list, 2): self.assertFalse(centr2 == centr1) @@ -992,28 +1089,31 @@ def test_equal_pass(self): self.assertTrue(centr2 == centr2) def test_plot(self): - "Test Centroids.plot()" - centr = Centroids( + "Test Centroids.plot()" + centr = Centroids( lat=np.array([-5, -3, 0, 3, 5]), lon=np.array([-180, -175, -170, 170, 175]), region_id=np.zeros(5), - crs=DEF_CRS + crs=DEF_CRS, ) - centr.plot() + centr.plot() def test_plot_non_def_crs(self): - "Test Centroids.plot() with non-default CRS" - centr = Centroids( - lat = np.array([10.0, 20.0, 30.0]), - lon = np.array([-10.0, -20.0, -30.0]), + "Test Centroids.plot() with non-default CRS" + centr = Centroids( + lat=np.array([10.0, 20.0, 30.0]), + lon=np.array([-10.0, -20.0, -30.0]), region_id=np.zeros(3), - crs='epsg:32632' + crs="epsg:32632", ) - centr.plot() + centr.plot() + # Execute Tests if __name__ == "__main__": TESTS = unittest.TestLoader().loadTestsFromTestCase(TestCentroidsData) - TESTS.addTests(unittest.TestLoader().loadTestsFromTestCase(TestCentroidsReaderWriter)) + TESTS.addTests( + unittest.TestLoader().loadTestsFromTestCase(TestCentroidsReaderWriter) + ) TESTS.addTests(unittest.TestLoader().loadTestsFromTestCase(TestCentroidsMethods)) unittest.TextTestRunner(verbosity=2).run(TESTS) diff --git a/climada/hazard/io.py b/climada/hazard/io.py index 5248d4579..4ae036c52 100644 --- a/climada/hazard/io.py +++ b/climada/hazard/io.py @@ -24,63 +24,64 @@ import itertools import logging import pathlib -from typing import Union, Optional, Callable, Dict, Any +from typing import Any, Callable, Dict, Optional, Union import h5py import numpy as np import pandas as pd import rasterio import sparse as sp -from scipy import sparse import xarray as xr +from scipy import sparse -from climada.hazard.centroids.centr import Centroids import climada.util.constants as u_const import climada.util.coordinates as u_coord import climada.util.dates_times as u_dt import climada.util.hdf5_handler as u_hdf5 - +from climada.hazard.centroids.centr import Centroids LOGGER = logging.getLogger(__name__) -DEF_VAR_EXCEL = {'sheet_name': {'inten': 'hazard_intensity', - 'freq': 'hazard_frequency' - }, - 'col_name': {'cen_id': 'centroid_id/event_id', - 'even_id': 'event_id', - 'even_dt': 'event_date', - 'even_name': 'event_name', - 'freq': 'frequency', - 'orig': 'orig_event_flag' - }, - 'col_centroids': {'sheet_name': 'centroids', - 'col_name': {'cen_id': 'centroid_id', - 'latitude': 'lat', - 'longitude': 'lon', - } - } - } +DEF_VAR_EXCEL = { + "sheet_name": {"inten": "hazard_intensity", "freq": "hazard_frequency"}, + "col_name": { + "cen_id": "centroid_id/event_id", + "even_id": "event_id", + "even_dt": "event_date", + "even_name": "event_name", + "freq": "frequency", + "orig": "orig_event_flag", + }, + "col_centroids": { + "sheet_name": "centroids", + "col_name": { + "cen_id": "centroid_id", + "latitude": "lat", + "longitude": "lon", + }, + }, +} """Excel variable names""" -DEF_VAR_MAT = {'field_name': 'hazard', - 'var_name': {'per_id': 'peril_ID', - 'even_id': 'event_ID', - 'ev_name': 'name', - 'freq': 'frequency', - 'inten': 'intensity', - 'unit': 'units', - 'frac': 'fraction', - 'comment': 'comment', - 'datenum': 'datenum', - 'orig': 'orig_event_flag' - }, - 'var_cent': {'field_names': ['centroids', 'hazard'], - 'var_name': {'cen_id': 'centroid_ID', - 'lat': 'lat', - 'lon': 'lon' - } - } - } +DEF_VAR_MAT = { + "field_name": "hazard", + "var_name": { + "per_id": "peril_ID", + "even_id": "event_ID", + "ev_name": "name", + "freq": "frequency", + "inten": "intensity", + "unit": "units", + "frac": "fraction", + "comment": "comment", + "datenum": "datenum", + "orig": "orig_event_flag", + }, + "var_cent": { + "field_names": ["centroids", "hazard"], + "var_name": {"cen_id": "centroid_ID", "lat": "lat", "lon": "lon"}, + }, +} """MATLAB variable names""" DEF_COORDS = dict(event="time", longitude="longitude", latitude="latitude") @@ -92,22 +93,38 @@ # pylint: disable=no-member -class HazardIO(): + +class HazardIO: """ Contains all read/write methods of the Hazard class """ def set_raster(self, *args, **kwargs): """This function is deprecated, use Hazard.from_raster.""" - LOGGER.warning("The use of Hazard.set_raster is deprecated." - "Use Hazard.from_raster instead.") + LOGGER.warning( + "The use of Hazard.set_raster is deprecated." + "Use Hazard.from_raster instead." + ) self.__dict__ = self.__class__.from_raster(*args, **kwargs).__dict__ @classmethod - def from_raster(cls, files_intensity, files_fraction=None, attrs=None, - band=None, haz_type=None, pool=None, src_crs=None, window=None, - geometry=None, dst_crs=None, transform=None, width=None, - height=None, resampling=rasterio.warp.Resampling.nearest): + def from_raster( + cls, + files_intensity, + files_fraction=None, + attrs=None, + band=None, + haz_type=None, + pool=None, + src_crs=None, + window=None, + geometry=None, + dst_crs=None, + transform=None, + width=None, + height=None, + resampling=rasterio.warp.Resampling.nearest, + ): """Create Hazard with intensity and fraction values from raster files If raster files are masked, the masked values are set to 0. @@ -163,8 +180,10 @@ def from_raster(cls, files_intensity, files_fraction=None, attrs=None, if not band: band = [1] if files_fraction is not None and len(files_intensity) != len(files_fraction): - raise ValueError('Number of intensity files differs from fraction files:' - f'{len(files_intensity)} != {len(files_fraction)}') + raise ValueError( + "Number of intensity files differs from fraction files:" + f"{len(files_intensity)} != {len(files_fraction)}" + ) # List all parameters for initialization here (missing ones will be default) hazard_kwargs = dict() @@ -172,50 +191,90 @@ def from_raster(cls, files_intensity, files_fraction=None, attrs=None, hazard_kwargs["haz_type"] = haz_type centroids, meta = Centroids.from_raster_file( - files_intensity[0], src_crs=src_crs, window=window, - geometry=geometry, dst_crs=dst_crs, transform=transform, - width=width, height=height, resampling=resampling, return_meta=True, + files_intensity[0], + src_crs=src_crs, + window=window, + geometry=geometry, + dst_crs=dst_crs, + transform=transform, + width=width, + height=height, + resampling=resampling, + return_meta=True, ) if pool: chunksize = max(min(len(files_intensity) // pool.ncpus, 1000), 1) inten_list = pool.map( _values_from_raster_files, - [[f] for f in files_intensity], itertools.repeat(meta), - itertools.repeat(band), itertools.repeat(src_crs), - itertools.repeat(window), itertools.repeat(geometry), - itertools.repeat(dst_crs), itertools.repeat(transform), - itertools.repeat(width), itertools.repeat(height), - itertools.repeat(resampling), chunksize=chunksize) - intensity = sparse.vstack(inten_list, format='csr') + [[f] for f in files_intensity], + itertools.repeat(meta), + itertools.repeat(band), + itertools.repeat(src_crs), + itertools.repeat(window), + itertools.repeat(geometry), + itertools.repeat(dst_crs), + itertools.repeat(transform), + itertools.repeat(width), + itertools.repeat(height), + itertools.repeat(resampling), + chunksize=chunksize, + ) + intensity = sparse.vstack(inten_list, format="csr") if files_fraction is not None: fract_list = pool.map( _values_from_raster_files, - [[f] for f in files_fraction], itertools.repeat(meta), - itertools.repeat(band), itertools.repeat(src_crs), - itertools.repeat(window), itertools.repeat(geometry), - itertools.repeat(dst_crs), itertools.repeat(transform), - itertools.repeat(width), itertools.repeat(height), - itertools.repeat(resampling), chunksize=chunksize) - fraction = sparse.vstack(fract_list, format='csr') + [[f] for f in files_fraction], + itertools.repeat(meta), + itertools.repeat(band), + itertools.repeat(src_crs), + itertools.repeat(window), + itertools.repeat(geometry), + itertools.repeat(dst_crs), + itertools.repeat(transform), + itertools.repeat(width), + itertools.repeat(height), + itertools.repeat(resampling), + chunksize=chunksize, + ) + fraction = sparse.vstack(fract_list, format="csr") else: intensity = _values_from_raster_files( - files_intensity, meta=meta, band=band, src_crs=src_crs, window=window, - geometry=geometry, dst_crs=dst_crs, transform=transform, width=width, - height=height, resampling=resampling, + files_intensity, + meta=meta, + band=band, + src_crs=src_crs, + window=window, + geometry=geometry, + dst_crs=dst_crs, + transform=transform, + width=width, + height=height, + resampling=resampling, ) if files_fraction is not None: fraction = _values_from_raster_files( - files_fraction, meta=meta, band=band, src_crs=src_crs, window=window, - geometry=geometry, dst_crs=dst_crs, transform=transform, width=width, - height=height, resampling=resampling) + files_fraction, + meta=meta, + band=band, + src_crs=src_crs, + window=window, + geometry=geometry, + dst_crs=dst_crs, + transform=transform, + width=width, + height=height, + resampling=resampling, + ) if files_fraction is None: fraction = intensity.copy() fraction.data.fill(1) hazard_kwargs.update(cls._attrs_to_kwargs(attrs, num_events=intensity.shape[0])) - return cls(centroids=centroids, intensity=intensity, fraction=fraction, **hazard_kwargs) + return cls( + centroids=centroids, intensity=intensity, fraction=fraction, **hazard_kwargs + ) @classmethod def from_xarray_raster_file( @@ -513,8 +572,10 @@ def from_xarray_raster( # Check data type for better error message if not isinstance(data, xr.Dataset): if isinstance(data, (pathlib.Path, str)): - raise TypeError("Passing a path to this classmethod is not supported. " - "Use Hazard.from_xarray_raster_file instead.") + raise TypeError( + "Passing a path to this classmethod is not supported. " + "Use Hazard.from_xarray_raster_file instead." + ) raise TypeError("This method only supports xarray.Dataset as input data") @@ -592,7 +653,7 @@ def to_csr_matrix(array: xr.DataArray) -> sparse.csr_matrix: sp.COO.from_numpy, array, dask="parallelized", - output_dtypes=[array.dtype] + output_dtypes=[array.dtype], ) sparse_coo = array.compute().data # Load into memory return sparse_coo.tocsr() # Convert sparse.COO to scipy.sparse.csr_matrix @@ -824,8 +885,9 @@ def vshape(array): # Set the Hazard attributes for _, ident in data_ident.iterrows(): - hazard_kwargs[ident["hazard_attr"] - ] = load_from_xarray_or_return_default(**ident) + hazard_kwargs[ident["hazard_attr"]] = load_from_xarray_or_return_default( + **ident + ) # Done! LOGGER.debug("Hazard successfully loaded. Number of events: %i", num_events) @@ -854,37 +916,39 @@ def _attrs_to_kwargs(attrs: Dict[str, Any], num_events: int) -> Dict[str, Any]: kwargs = dict() - if 'event_id' in attrs: - kwargs["event_id"] = attrs['event_id'] + if "event_id" in attrs: + kwargs["event_id"] = attrs["event_id"] else: kwargs["event_id"] = np.arange(1, num_events + 1) - if 'frequency' in attrs: - kwargs["frequency"] = attrs['frequency'] + if "frequency" in attrs: + kwargs["frequency"] = attrs["frequency"] else: kwargs["frequency"] = np.ones(kwargs["event_id"].size) - if 'frequency_unit' in attrs: - kwargs["frequency_unit"] = attrs['frequency_unit'] - if 'event_name' in attrs: - kwargs["event_name"] = attrs['event_name'] + if "frequency_unit" in attrs: + kwargs["frequency_unit"] = attrs["frequency_unit"] + if "event_name" in attrs: + kwargs["event_name"] = attrs["event_name"] else: kwargs["event_name"] = list(map(str, kwargs["event_id"])) - if 'date' in attrs: - kwargs["date"] = np.array([attrs['date']]) + if "date" in attrs: + kwargs["date"] = np.array([attrs["date"]]) else: kwargs["date"] = np.ones(kwargs["event_id"].size) - if 'orig' in attrs: - kwargs["orig"] = np.array([attrs['orig']]) + if "orig" in attrs: + kwargs["orig"] = np.array([attrs["orig"]]) else: kwargs["orig"] = np.ones(kwargs["event_id"].size, bool) - if 'unit' in attrs: - kwargs["units"] = attrs['unit'] + if "unit" in attrs: + kwargs["units"] = attrs["unit"] return kwargs def read_excel(self, *args, **kwargs): """This function is deprecated, use Hazard.from_excel.""" - LOGGER.warning("The use of Hazard.read_excel is deprecated." - "Use Hazard.from_excel instead.") + LOGGER.warning( + "The use of Hazard.read_excel is deprecated." + "Use Hazard.from_excel instead." + ) self.__dict__ = self.__class__.from_excel(*args, **kwargs).__dict__ @classmethod @@ -914,20 +978,21 @@ def from_excel(cls, file_name, var_names=None, haz_type=None): # pylint: disable=protected-access if not var_names: var_names = DEF_VAR_EXCEL - LOGGER.info('Reading %s', file_name) + LOGGER.info("Reading %s", file_name) hazard_kwargs = {} if haz_type is not None: hazard_kwargs["haz_type"] = haz_type try: centroids = Centroids._legacy_from_excel( - file_name, var_names=var_names['col_centroids']) + file_name, var_names=var_names["col_centroids"] + ) hazard_kwargs.update(cls._read_att_excel(file_name, var_names, centroids)) except KeyError as var_err: raise KeyError("Variable not in Excel file: " + str(var_err)) from var_err return cls(centroids=centroids, **hazard_kwargs) - def write_raster(self, file_name, variable='intensity', output_resolution=None): + def write_raster(self, file_name, variable="intensity", output_resolution=None): """Write intensity or fraction as GeoTIFF file. Each band is an event. Output raster is always a regular grid (same resolution for lat/lon). @@ -955,9 +1020,9 @@ def write_raster(self, file_name, variable='intensity', output_resolution=None): method to read intensity and fraction raster files. """ - if variable == 'intensity': + if variable == "intensity": var_to_write = self.intensity - elif variable =='fraction': + elif variable == "fraction": var_to_write = self.fraction else: raise ValueError( @@ -965,30 +1030,31 @@ def write_raster(self, file_name, variable='intensity', output_resolution=None): ) meta = self.centroids.get_meta(resolution=output_resolution) - meta.update(driver='GTiff', dtype=rasterio.float32, count=self.size) + meta.update(driver="GTiff", dtype=rasterio.float32, count=self.size) res = meta["transform"][0] # resolution from lon coordinates - if meta['height'] * meta['width'] == self.centroids.size: + if meta["height"] * meta["width"] == self.centroids.size: # centroids already in raster format u_coord.write_raster(file_name, var_to_write.toarray(), meta) else: geometry = self.centroids.get_pixel_shapes(res=res) - with rasterio.open(file_name, 'w', **meta) as dst: - LOGGER.info('Writing %s', file_name) + with rasterio.open(file_name, "w", **meta) as dst: + LOGGER.info("Writing %s", file_name) for i_ev in range(self.size): raster = rasterio.features.rasterize( ( (geom, value) - for geom, value - in zip(geometry, var_to_write[i_ev].toarray().flatten()) + for geom, value in zip( + geometry, var_to_write[i_ev].toarray().flatten() + ) ), - out_shape=(meta['height'], meta['width']), - transform=meta['transform'], + out_shape=(meta["height"], meta["width"]), + transform=meta["transform"], fill=0, all_touched=True, - dtype=meta['dtype'], + dtype=meta["dtype"], ) - dst.write(raster.astype(meta['dtype']), i_ev + 1) + dst.write(raster.astype(meta["dtype"]), i_ev + 1) def write_hdf5(self, file_name, todense=False): """Write hazard in hdf5 format. @@ -1001,11 +1067,11 @@ def write_hdf5(self, file_name, todense=False): if True write the sparse matrices as hdf5.dataset by converting them to dense format first. This increases readability of the file for other programs. default: False """ - LOGGER.info('Writing %s', file_name) - with h5py.File(file_name, 'w') as hf_data: + LOGGER.info("Writing %s", file_name) + with h5py.File(file_name, "w") as hf_data: str_dt = h5py.special_dtype(vlen=str) - for (var_name, var_val) in self.__dict__.items(): - if var_name == 'centroids': + for var_name, var_val in self.__dict__.items(): + if var_name == "centroids": # Centroids have their own write_hdf5 method, # which is invoked at the end of this method (s.b.) continue @@ -1014,18 +1080,24 @@ def write_hdf5(self, file_name, todense=False): hf_data.create_dataset(var_name, data=var_val.toarray()) else: hf_csr = hf_data.create_group(var_name) - hf_csr.create_dataset('data', data=var_val.data) - hf_csr.create_dataset('indices', data=var_val.indices) - hf_csr.create_dataset('indptr', data=var_val.indptr) - hf_csr.attrs['shape'] = var_val.shape + hf_csr.create_dataset("data", data=var_val.data) + hf_csr.create_dataset("indices", data=var_val.indices) + hf_csr.create_dataset("indptr", data=var_val.indptr) + hf_csr.attrs["shape"] = var_val.shape elif isinstance(var_val, str): hf_str = hf_data.create_dataset(var_name, (1,), dtype=str_dt) hf_str[0] = var_val - elif isinstance(var_val, list) and var_val and isinstance(var_val[0], str): - hf_str = hf_data.create_dataset(var_name, (len(var_val),), dtype=str_dt) + elif ( + isinstance(var_val, list) + and var_val + and isinstance(var_val[0], str) + ): + hf_str = hf_data.create_dataset( + var_name, (len(var_val),), dtype=str_dt + ) for i_ev, var_ev in enumerate(var_val): hf_str[i_ev] = var_ev - elif var_val is not None and var_name != 'pool': + elif var_val is not None and var_name != "pool": try: hf_data.create_dataset(var_name, data=var_val) except TypeError: @@ -1034,14 +1106,17 @@ def write_hdf5(self, file_name, todense=False): "type, %s, for which writing to hdf5 " "is not implemented. Reading this H5 file will probably lead to " "%s being set to its default value.", - var_name, var_val.__class__.__name__, var_name + var_name, + var_val.__class__.__name__, + var_name, ) - self.centroids.write_hdf5(file_name, mode='a') + self.centroids.write_hdf5(file_name, mode="a") def read_hdf5(self, *args, **kwargs): """This function is deprecated, use Hazard.from_hdf5.""" - LOGGER.warning("The use of Hazard.read_hdf5 is deprecated." - "Use Hazard.from_hdf5 instead.") + LOGGER.warning( + "The use of Hazard.read_hdf5 is deprecated." "Use Hazard.from_hdf5 instead." + ) self.__dict__ = self.__class__.from_hdf5(*args, **kwargs).__dict__ @classmethod @@ -1059,16 +1134,16 @@ def from_hdf5(cls, file_name): Hazard object from the provided MATLAB file """ - LOGGER.info('Reading %s', file_name) + LOGGER.info("Reading %s", file_name) # NOTE: This is a stretch. We instantiate one empty object to iterate over its # attributes. But then we create a new one with the attributes filled! haz = cls() hazard_kwargs = dict() - with h5py.File(file_name, 'r') as hf_data: - for (var_name, var_val) in haz.__dict__.items(): + with h5py.File(file_name, "r") as hf_data: + for var_name, var_val in haz.__dict__.items(): if var_name not in hf_data.keys(): continue - if var_name == 'centroids': + if var_name == "centroids": continue if isinstance(var_val, np.ndarray) and var_val.ndim == 1: hazard_kwargs[var_name] = np.array(hf_data.get(var_name)) @@ -1078,14 +1153,22 @@ def from_hdf5(cls, file_name): hazard_kwargs[var_name] = sparse.csr_matrix(hf_csr) else: hazard_kwargs[var_name] = sparse.csr_matrix( - (hf_csr['data'][:], hf_csr['indices'][:], hf_csr['indptr'][:]), - hf_csr.attrs['shape']) + ( + hf_csr["data"][:], + hf_csr["indices"][:], + hf_csr["indptr"][:], + ), + hf_csr.attrs["shape"], + ) elif isinstance(var_val, str): - hazard_kwargs[var_name] = u_hdf5.to_string( - hf_data.get(var_name)[0]) + hazard_kwargs[var_name] = u_hdf5.to_string(hf_data.get(var_name)[0]) elif isinstance(var_val, list): - hazard_kwargs[var_name] = [x for x in map( - u_hdf5.to_string, np.array(hf_data.get(var_name)).tolist())] + hazard_kwargs[var_name] = [ + x + for x in map( + u_hdf5.to_string, np.array(hf_data.get(var_name)).tolist() + ) + ] else: hazard_kwargs[var_name] = hf_data.get(var_name) hazard_kwargs["centroids"] = Centroids.from_hdf5(file_name) @@ -1096,19 +1179,19 @@ def from_hdf5(cls, file_name): def _read_att_mat(data, file_name, var_names, centroids): """Read MATLAB hazard's attributes.""" attrs = dict() - attrs["frequency"] = np.squeeze(data[var_names['var_name']['freq']]) + attrs["frequency"] = np.squeeze(data[var_names["var_name"]["freq"]]) try: attrs["frequency_unit"] = u_hdf5.get_string( - data[var_names['var_name']['freq_unit']]) + data[var_names["var_name"]["freq_unit"]] + ) except KeyError: pass - attrs["orig"] = np.squeeze( - data[var_names['var_name']['orig']]).astype(bool) + attrs["orig"] = np.squeeze(data[var_names["var_name"]["orig"]]).astype(bool) attrs["event_id"] = np.squeeze( - data[var_names['var_name']['even_id']].astype(int, copy=False)) + data[var_names["var_name"]["even_id"]].astype(int, copy=False) + ) try: - attrs["units"] = u_hdf5.get_string( - data[var_names['var_name']['unit']]) + attrs["units"] = u_hdf5.get_string(data[var_names["var_name"]["unit"]]) except KeyError: pass @@ -1116,31 +1199,40 @@ def _read_att_mat(data, file_name, var_names, centroids): n_event = len(attrs["event_id"]) try: attrs["intensity"] = u_hdf5.get_sparse_csr_mat( - data[var_names['var_name']['inten']], (n_event, n_cen)) + data[var_names["var_name"]["inten"]], (n_event, n_cen) + ) except ValueError as err: - raise ValueError('Size missmatch in intensity matrix.') from err + raise ValueError("Size missmatch in intensity matrix.") from err try: attrs["fraction"] = u_hdf5.get_sparse_csr_mat( - data[var_names['var_name']['frac']], (n_event, n_cen)) + data[var_names["var_name"]["frac"]], (n_event, n_cen) + ) except ValueError as err: - raise ValueError('Size missmatch in fraction matrix.') from err + raise ValueError("Size missmatch in fraction matrix.") from err except KeyError: attrs["fraction"] = sparse.csr_matrix( - np.ones(attrs["intensity"].shape, dtype=float)) + np.ones(attrs["intensity"].shape, dtype=float) + ) # Event names: set as event_id if no provided try: attrs["event_name"] = u_hdf5.get_list_str_from_ref( - file_name, data[var_names['var_name']['ev_name']]) + file_name, data[var_names["var_name"]["ev_name"]] + ) except KeyError: attrs["event_name"] = list(attrs["event_id"]) try: - datenum = data[var_names['var_name']['datenum']].squeeze() - attrs["date"] = np.array([ - (dt.datetime.fromordinal(int(date)) - + dt.timedelta(days=date % 1) - - dt.timedelta(days=366)).toordinal() - for date in datenum]) + datenum = data[var_names["var_name"]["datenum"]].squeeze() + attrs["date"] = np.array( + [ + ( + dt.datetime.fromordinal(int(date)) + + dt.timedelta(days=date % 1) + - dt.timedelta(days=366) + ).toordinal() + for date in datenum + ] + ) except KeyError: pass @@ -1149,44 +1241,59 @@ def _read_att_mat(data, file_name, var_names, centroids): @staticmethod def _read_att_excel(file_name, var_names, centroids): """Read Excel hazard's attributes.""" - dfr = pd.read_excel(file_name, var_names['sheet_name']['freq']) + dfr = pd.read_excel(file_name, var_names["sheet_name"]["freq"]) num_events = dfr.shape[0] attrs = dict() - attrs["frequency"] = dfr[var_names['col_name']['freq']].values - attrs["orig"] = dfr[var_names['col_name']['orig']].values.astype(bool) - attrs["event_id"] = dfr[var_names['col_name'] - ['even_id']].values.astype(int, copy=False) - attrs["date"] = dfr[var_names['col_name'] - ['even_dt']].values.astype(int, copy=False) - attrs["event_name"] = dfr[var_names['col_name'] - ['even_name']].values.tolist() - - dfr = pd.read_excel(file_name, var_names['sheet_name']['inten']) + attrs["frequency"] = dfr[var_names["col_name"]["freq"]].values + attrs["orig"] = dfr[var_names["col_name"]["orig"]].values.astype(bool) + attrs["event_id"] = dfr[var_names["col_name"]["even_id"]].values.astype( + int, copy=False + ) + attrs["date"] = dfr[var_names["col_name"]["even_dt"]].values.astype( + int, copy=False + ) + attrs["event_name"] = dfr[var_names["col_name"]["even_name"]].values.tolist() + + dfr = pd.read_excel(file_name, var_names["sheet_name"]["inten"]) # number of events (ignore centroid_ID column) # check the number of events is the same as the one in the frequency if dfr.shape[1] - 1 is not num_events: - raise ValueError('Hazard intensity is given for a number of events ' - 'different from the number of defined in its frequency: ' - f'{dfr.shape[1] - 1} != {num_events}') + raise ValueError( + "Hazard intensity is given for a number of events " + "different from the number of defined in its frequency: " + f"{dfr.shape[1] - 1} != {num_events}" + ) # check number of centroids is the same as retrieved before if dfr.shape[0] is not centroids.size: - raise ValueError('Hazard intensity is given for a number of centroids ' - 'different from the number of centroids defined: ' - f'{dfr.shape[0]} != {centroids.size}') + raise ValueError( + "Hazard intensity is given for a number of centroids " + "different from the number of centroids defined: " + f"{dfr.shape[0]} != {centroids.size}" + ) attrs["intensity"] = sparse.csr_matrix( - dfr.values[:, 1:num_events + 1].transpose()) + dfr.values[:, 1 : num_events + 1].transpose() + ) attrs["fraction"] = sparse.csr_matrix( - np.ones(attrs["intensity"].shape, dtype=float)) + np.ones(attrs["intensity"].shape, dtype=float) + ) return attrs def _values_from_raster_files( - file_names, meta, band=None, src_crs=None, window=None, - geometry=None, dst_crs=None, transform=None, width=None, - height=None, resampling=rasterio.warp.Resampling.nearest, + file_names, + meta, + band=None, + src_crs=None, + window=None, + geometry=None, + dst_crs=None, + transform=None, + width=None, + height=None, + resampling=rasterio.warp.Resampling.nearest, ): """Read raster of bands and set 0 values to the masked ones. @@ -1237,14 +1344,24 @@ class method) to allow for parallel computing. values = [] for file_name in file_names: tmp_meta, data = u_coord.read_raster( - file_name, band, src_crs, window, geometry, dst_crs, - transform, width, height, resampling, + file_name, + band, + src_crs, + window, + geometry, + dst_crs, + transform, + width, + height, + resampling, ) - if (tmp_meta['crs'] != meta['crs'] - or tmp_meta['transform'] != meta['transform'] - or tmp_meta['height'] != meta['height'] - or tmp_meta['width'] != meta['width']): - raise ValueError('Raster data is inconsistent with contained raster.') + if ( + tmp_meta["crs"] != meta["crs"] + or tmp_meta["transform"] != meta["transform"] + or tmp_meta["height"] != meta["height"] + or tmp_meta["width"] != meta["width"] + ): + raise ValueError("Raster data is inconsistent with contained raster.") values.append(sparse.csr_matrix(data)) - return sparse.vstack(values, format='csr') + return sparse.vstack(values, format="csr") diff --git a/climada/hazard/isimip_data.py b/climada/hazard/isimip_data.py index f9c28e8e3..5f9f794e1 100644 --- a/climada/hazard/isimip_data.py +++ b/climada/hazard/isimip_data.py @@ -27,11 +27,11 @@ monthly, yearly (e.g. yield) """ - import xarray as xr bbox_world = [-85, 85, -180, 180] + def _read_one_nc(file_name, bbox=None, years=None): """Reads 1 ISIMIP output NETCDF file data within a certain bounding box and time period @@ -56,6 +56,9 @@ def _read_one_nc(file_name, bbox=None, years=None): if not years: return data.sel(lat=slice(bbox[3], bbox[1]), lon=slice(bbox[0], bbox[2])) - time_id = years - int(data['time'].units[12:16]) - return data.sel(lat=slice(bbox[3], bbox[1]), lon=slice(bbox[0], bbox[2]), - time=slice(time_id[0], time_id[1])) + time_id = years - int(data["time"].units[12:16]) + return data.sel( + lat=slice(bbox[3], bbox[1]), + lon=slice(bbox[0], bbox[2]), + time=slice(time_id[0], time_id[1]), + ) diff --git a/climada/hazard/plot.py b/climada/hazard/plot.py index 26d6169a6..f5d02d74e 100644 --- a/climada/hazard/plot.py +++ b/climada/hazard/plot.py @@ -19,22 +19,28 @@ Define Hazard Plotting Methods. """ -import numpy as np import matplotlib.pyplot as plt +import numpy as np import climada.util.plot as u_plot - # pylint: disable=no-member -class HazardPlot(): + +class HazardPlot: """ Contains all plotting methods of the Hazard class """ - def plot_rp_intensity(self, return_periods=(25, 50, 100, 250), - smooth=True, axis=None, figsize=(9, 13), adapt_fontsize=True, - **kwargs): + def plot_rp_intensity( + self, + return_periods=(25, 50, 100, 250), + smooth=True, + axis=None, + figsize=(9, 13), + adapt_fontsize=True, + **kwargs, + ): """Compute and plot hazard exceedance intensity maps for different return periods. Calls local_exceedance_inten. @@ -57,17 +63,32 @@ def plot_rp_intensity(self, return_periods=(25, 50, 100, 250), intenstats is return_periods.size x num_centroids """ inten_stats = self.local_exceedance_inten(np.array(return_periods)) - colbar_name = 'Intensity (' + self.units + ')' + colbar_name = "Intensity (" + self.units + ")" title = list() for ret in return_periods: - title.append('Return period: ' + str(ret) + ' years') - axis = u_plot.geo_im_from_array(inten_stats, self.centroids.coord, - colbar_name, title, smooth=smooth, axes=axis, - figsize=figsize, adapt_fontsize=adapt_fontsize, **kwargs) + title.append("Return period: " + str(ret) + " years") + axis = u_plot.geo_im_from_array( + inten_stats, + self.centroids.coord, + colbar_name, + title, + smooth=smooth, + axes=axis, + figsize=figsize, + adapt_fontsize=adapt_fontsize, + **kwargs, + ) return axis, inten_stats - def plot_intensity(self, event=None, centr=None, smooth=True, axis=None, adapt_fontsize=True, - **kwargs): + def plot_intensity( + self, + event=None, + centr=None, + smooth=True, + axis=None, + adapt_fontsize=True, + **kwargs, + ): """Plot intensity values for a selected event or centroid. Parameters @@ -101,13 +122,21 @@ def plot_intensity(self, event=None, centr=None, smooth=True, axis=None, adapt_f ------ ValueError """ - col_label = f'Intensity ({self.units})' + col_label = f"Intensity ({self.units})" crs_epsg, _ = u_plot.get_transformation(self.centroids.geometry.crs) if event is not None: if isinstance(event, str): event = self.get_event_id(event) - return self._event_plot(event, self.intensity, col_label, - smooth, crs_epsg, axis, adapt_fontsize=adapt_fontsize, **kwargs) + return self._event_plot( + event, + self.intensity, + col_label, + smooth, + crs_epsg, + axis, + adapt_fontsize=adapt_fontsize, + **kwargs, + ) if centr is not None: if isinstance(centr, tuple): _, _, centr = self.centroids.get_closest_point(centr[0], centr[1]) @@ -115,8 +144,7 @@ def plot_intensity(self, event=None, centr=None, smooth=True, axis=None, adapt_f raise ValueError("Provide one event id or one centroid id.") - def plot_fraction(self, event=None, centr=None, smooth=True, axis=None, - **kwargs): + def plot_fraction(self, event=None, centr=None, smooth=True, axis=None, **kwargs): """Plot fraction values for a selected event or centroid. Parameters @@ -150,12 +178,13 @@ def plot_fraction(self, event=None, centr=None, smooth=True, axis=None, ------ ValueError """ - col_label = 'Fraction' + col_label = "Fraction" if event is not None: if isinstance(event, str): event = self.get_event_id(event) - return self._event_plot(event, self.fraction, col_label, smooth, axis, - **kwargs) + return self._event_plot( + event, self.fraction, col_label, smooth, axis, **kwargs + ) if centr is not None: if isinstance(centr, tuple): _, _, centr = self.centroids.get_closest_point(centr[0], centr[1]) @@ -163,8 +192,18 @@ def plot_fraction(self, event=None, centr=None, smooth=True, axis=None, raise ValueError("Provide one event id or one centroid id.") - def _event_plot(self, event_id, mat_var, col_name, smooth, crs_espg, axis=None, - figsize=(9, 13), adapt_fontsize=True, **kwargs): + def _event_plot( + self, + event_id, + mat_var, + col_name, + smooth, + crs_espg, + axis=None, + figsize=(9, 13), + adapt_fontsize=True, + **kwargs, + ): """Plot an event of the input matrix. Parameters @@ -200,26 +239,39 @@ def _event_plot(self, event_id, mat_var, col_name, smooth, crs_espg, axis=None, try: event_pos = np.where(self.event_id == ev_id)[0][0] except IndexError as err: - raise ValueError(f'Wrong event id: {ev_id}.') from err + raise ValueError(f"Wrong event id: {ev_id}.") from err im_val = mat_var[event_pos, :].toarray().transpose() - title = f'Event ID {self.event_id[event_pos]}: {self.event_name[event_pos]}' + title = ( + f"Event ID {self.event_id[event_pos]}: {self.event_name[event_pos]}" + ) elif ev_id < 0: max_inten = np.asarray(np.sum(mat_var, axis=1)).reshape(-1) event_pos = np.argpartition(max_inten, ev_id)[ev_id:] event_pos = event_pos[np.argsort(max_inten[event_pos])][0] im_val = mat_var[event_pos, :].toarray().transpose() - title = (f'{np.abs(ev_id)}-largest Event. ID {self.event_id[event_pos]}:' - f' {self.event_name[event_pos]}') + title = ( + f"{np.abs(ev_id)}-largest Event. ID {self.event_id[event_pos]}:" + f" {self.event_name[event_pos]}" + ) else: im_val = np.max(mat_var, axis=0).toarray().transpose() - title = f'{self.haz_type} max intensity at each point' + title = f"{self.haz_type} max intensity at each point" array_val.append(im_val) l_title.append(title) - return u_plot.geo_im_from_array(array_val, self.centroids.coord, col_name, - l_title, smooth=smooth, axes=axis, figsize=figsize, - proj=crs_espg, adapt_fontsize=adapt_fontsize, **kwargs) + return u_plot.geo_im_from_array( + array_val, + self.centroids.coord, + col_name, + l_title, + smooth=smooth, + axes=axis, + figsize=figsize, + proj=crs_espg, + adapt_fontsize=adapt_fontsize, + **kwargs, + ) def _centr_plot(self, centr_idx, mat_var, col_name, axis=None, **kwargs): """Plot a centroid of the input matrix. @@ -251,11 +303,11 @@ def _centr_plot(self, centr_idx, mat_var, col_name, axis=None, **kwargs): try: centr_pos = centr_idx except IndexError as err: - raise ValueError(f'Wrong centroid id: {centr_idx}.') from err + raise ValueError(f"Wrong centroid id: {centr_idx}.") from err array_val = mat_var[:, centr_pos].toarray() title = ( - f'Centroid {centr_idx}:' - f' ({np.around(coord[centr_pos, 0], 3)}, {np.around(coord[centr_pos, 1],3)})' + f"Centroid {centr_idx}:" + f" ({np.around(coord[centr_pos, 0], 3)}, {np.around(coord[centr_pos, 1],3)})" ) elif centr_idx < 0: max_inten = np.asarray(np.sum(mat_var, axis=0)).reshape(-1) @@ -264,19 +316,19 @@ def _centr_plot(self, centr_idx, mat_var, col_name, axis=None, **kwargs): array_val = mat_var[:, centr_pos].toarray() title = ( - f'{np.abs(centr_idx)}-largest Centroid. {centr_pos}:' - f' ({np.around(coord[centr_pos, 0], 3)}, {np.around(coord[centr_pos, 1], 3)})' + f"{np.abs(centr_idx)}-largest Centroid. {centr_pos}:" + f" ({np.around(coord[centr_pos, 0], 3)}, {np.around(coord[centr_pos, 1], 3)})" ) else: array_val = np.max(mat_var, axis=1).toarray() - title = f'{self.haz_type} max intensity at each event' + title = f"{self.haz_type} max intensity at each event" if not axis: _, axis = plt.subplots(1) - if 'color' not in kwargs: - kwargs['color'] = 'b' + if "color" not in kwargs: + kwargs["color"] = "b" axis.set_title(title) - axis.set_xlabel('Event number') + axis.set_xlabel("Event number") axis.set_ylabel(str(col_name)) axis.plot(range(len(array_val)), array_val, **kwargs) axis.set_xlim([0, len(array_val)]) diff --git a/climada/hazard/storm_europe.py b/climada/hazard/storm_europe.py index c49630a5a..c4b49e7fc 100644 --- a/climada/hazard/storm_europe.py +++ b/climada/hazard/storm_europe.py @@ -19,7 +19,7 @@ Define StormEurope class. """ -__all__ = ['StormEurope'] +__all__ = ["StormEurope"] import bz2 import datetime as dt @@ -27,29 +27,31 @@ from pathlib import Path from typing import Optional +import matplotlib.pyplot as plt import numpy as np -import xarray as xr import pandas as pd -import matplotlib.pyplot as plt +import xarray as xr from scipy import sparse -from climada.util.config import CONFIG from climada.hazard.base import Hazard from climada.hazard.centroids.centr import Centroids +from climada.util.config import CONFIG +from climada.util.dates_times import ( + date_to_str, + datetime64_to_ordinal, + first_year, + last_year, +) +from climada.util.dwd_icon_loader import ( + delete_icon_grib, + download_icon_centroids_file, + download_icon_grib, +) from climada.util.files_handler import get_file_names -from climada.util.dates_times import (datetime64_to_ordinal, - last_year, - first_year, - date_to_str - ) -from climada.util.dwd_icon_loader import (download_icon_centroids_file, - download_icon_grib, - delete_icon_grib, - ) LOGGER = logging.getLogger(__name__) -HAZ_TYPE = 'WS' +HAZ_TYPE = "WS" """Hazard type acronym for Winter Storm""" N_PROB_EVENTS = 5 * 6 @@ -77,15 +79,17 @@ class StormEurope(Hazard): intensity_thres = 14.7 """Intensity threshold for storage in m/s; same as used by WISC SSI calculations.""" - vars_opt = Hazard.vars_opt.union({'ssi_wisc', 'ssi', 'ssi_full_area'}) + vars_opt = Hazard.vars_opt.union({"ssi_wisc", "ssi", "ssi_full_area"}) """Name of the variables that aren't need to compute the impact.""" - def __init__(self, - units: str = 'm/s', - ssi: Optional[np.ndarray] = None, - ssi_wisc: Optional[np.ndarray] = None, - ssi_full_area: Optional[np.ndarray] = None, - **kwargs): + def __init__( + self, + units: str = "m/s", + ssi: Optional[np.ndarray] = None, + ssi_wisc: Optional[np.ndarray] = None, + ssi_full_area: Optional[np.ndarray] = None, + **kwargs, + ): """Initialize a StormEurope object Parameters @@ -106,22 +110,32 @@ def __init__(self, `StormEurope` object into a smaller region`. Defaults to an empty array. """ - kwargs.setdefault('haz_type', HAZ_TYPE) + kwargs.setdefault("haz_type", HAZ_TYPE) Hazard.__init__(self, units=units, **kwargs) self.ssi = ssi if ssi is not None else np.array([], float) self.ssi_wisc = ssi_wisc if ssi_wisc is not None else np.array([], float) - self.ssi_full_area = ssi_full_area if ssi_full_area is not None else np.array([], float) + self.ssi_full_area = ( + ssi_full_area if ssi_full_area is not None else np.array([], float) + ) def read_footprints(self, *args, **kwargs): """This function is deprecated, use StormEurope.from_footprints instead.""" - LOGGER.warning("The use of StormEurope.read_footprints is deprecated." - "Use StormEurope.from_footprints instead.") + LOGGER.warning( + "The use of StormEurope.read_footprints is deprecated." + "Use StormEurope.from_footprints instead." + ) self.__dict__ = StormEurope.from_footprints(*args, **kwargs).__dict__ @classmethod - def from_footprints(cls, path, ref_raster=None, centroids=None, - files_omit='fp_era20c_1990012515_701_0.nc', combine_threshold=None, - intensity_thres=None): + def from_footprints( + cls, + path, + ref_raster=None, + centroids=None, + files_omit="fp_era20c_1990012515_701_0.nc", + combine_threshold=None, + intensity_thres=None, + ): """Create new StormEurope object from WISC footprints. Assumes that all footprints have the same coordinates as the first file listed/first @@ -161,11 +175,13 @@ def from_footprints(cls, path, ref_raster=None, centroids=None, StormEurope object with data from WISC footprints. """ # pylint: disable=protected-access - intensity_thres = cls.intensity_thres if intensity_thres is None else intensity_thres + intensity_thres = ( + cls.intensity_thres if intensity_thres is None else intensity_thres + ) file_names = get_file_names(path) if ref_raster is not None and centroids is not None: - LOGGER.warning('Overriding ref_raster with centroids') + LOGGER.warning("Overriding ref_raster with centroids") if centroids is not None: pass @@ -177,7 +193,7 @@ def from_footprints(cls, path, ref_raster=None, centroids=None, if isinstance(files_omit, str): files_omit = [files_omit] - LOGGER.info('Commencing to iterate over netCDF files.') + LOGGER.info("Commencing to iterate over netCDF files.") file_names = set(file_names) files_to_read = sorted(file_names.difference(files_omit)) @@ -185,24 +201,24 @@ def from_footprints(cls, path, ref_raster=None, centroids=None, if files_to_skip: LOGGER.info("Omitting files %s", files_to_skip) hazard_list = [ - cls._read_one_nc( - file_name, - centroids, - intensity_thres) for file_name in files_to_read] + cls._read_one_nc(file_name, centroids, intensity_thres) + for file_name in files_to_read + ] haz = cls.concat([haz for haz in hazard_list if haz is not None]) # Fix values after concatenation haz.event_id = np.arange(1, len(haz.event_id) + 1) haz.frequency = np.divide( np.ones_like(haz.date), - np.max([(last_year(haz.date) - first_year(haz.date)), 1]) + np.max([(last_year(haz.date) - first_year(haz.date)), 1]), ) if combine_threshold is not None: - LOGGER.info('Combining events with small difference in date.') + LOGGER.info("Combining events with small difference in date.") difference_date = np.diff(haz.date) for event_id_i in haz.event_id[ - np.append(difference_date <= combine_threshold, False)]: + np.append(difference_date <= combine_threshold, False) + ]: event_ids = [event_id_i, event_id_i + 1] haz._combine_events(event_ids) return haz @@ -229,42 +245,58 @@ def _read_one_nc(cls, file_name, centroids, intensity_thres): Hazard instance for one single storm. """ with xr.open_dataset(file_name) as ncdf: - if centroids.size != (ncdf.sizes['latitude'] * ncdf.sizes['longitude']): - LOGGER.warning(('Centroids size doesn\'t match NCDF dimensions. ' - 'Omitting file %s.'), file_name) + if centroids.size != (ncdf.sizes["latitude"] * ncdf.sizes["longitude"]): + LOGGER.warning( + ( + "Centroids size doesn't match NCDF dimensions. " + "Omitting file %s." + ), + file_name, + ) return None # xarray does not penalise repeated assignments, see # http://xarray.pydata.org/en/stable/data-structures.html - stacked = ncdf['max_wind_gust'].stack( - intensity=('latitude', 'longitude', 'time') + stacked = ncdf["max_wind_gust"].stack( + intensity=("latitude", "longitude", "time") ) stacked = stacked.where(stacked > intensity_thres) stacked = stacked.fillna(0) # fill in values from netCDF - ssi_wisc = np.array([float(ncdf.attrs['ssi'])]) + ssi_wisc = np.array([float(ncdf.attrs["ssi"])]) intensity = sparse.csr_matrix(stacked) - new_haz = cls(ssi_wisc=ssi_wisc, - intensity=intensity, - event_name=[ncdf.attrs['storm_name']], - date=np.array([datetime64_to_ordinal(ncdf['time'].data[0])]), - # fill in default values - centroids=centroids, - event_id=np.array([1]), - frequency=np.array([1]), - orig=np.array([True]),) + new_haz = cls( + ssi_wisc=ssi_wisc, + intensity=intensity, + event_name=[ncdf.attrs["storm_name"]], + date=np.array([datetime64_to_ordinal(ncdf["time"].data[0])]), + # fill in default values + centroids=centroids, + event_id=np.array([1]), + frequency=np.array([1]), + orig=np.array([True]), + ) return new_haz def read_cosmoe_file(self, *args, **kwargs): """This function is deprecated, use StormEurope.from_cosmoe_file instead.""" - LOGGER.warning("The use of StormEurope.read_cosmoe_file is deprecated." - "Use StormEurope.from_cosmoe_file instead.") + LOGGER.warning( + "The use of StormEurope.read_cosmoe_file is deprecated." + "Use StormEurope.from_cosmoe_file instead." + ) self.__dict__ = StormEurope.from_cosmoe_file(*args, **kwargs).__dict__ @classmethod - def from_cosmoe_file(cls, fp_file, run_datetime, event_date=None, - model_name='COSMO-2E', description=None, intensity_thres=None): + def from_cosmoe_file( + cls, + fp_file, + run_datetime, + event_date=None, + model_name="COSMO-2E", + description=None, + intensity_thres=None, + ): """Create a new StormEurope object with gust footprint from weather forecast. The funciton is designed for the COSMO ensemble model used by @@ -302,65 +334,80 @@ def from_cosmoe_file(cls, fp_file, run_datetime, event_date=None, haz : StormEurope StormEurope object with data from COSMO ensemble file. """ - intensity_thres = cls.intensity_thres if intensity_thres is None else intensity_thres + intensity_thres = ( + cls.intensity_thres if intensity_thres is None else intensity_thres + ) # read intensity from file with xr.open_dataset(fp_file) as ncdf: - ncdf = ncdf.assign_coords(date=('time',ncdf["time"].dt.floor("D").values)) + ncdf = ncdf.assign_coords(date=("time", ncdf["time"].dt.floor("D").values)) if event_date: try: - stacked = ncdf.sel( - time=event_date.strftime('%Y-%m-%d') - ).groupby('date').max().stack(intensity=('y_1', 'x_1')) + stacked = ( + ncdf.sel(time=event_date.strftime("%Y-%m-%d")) + .groupby("date") + .max() + .stack(intensity=("y_1", "x_1")) + ) except KeyError as ker: - raise ValueError('Extraction of date and coordinates failed. This is most likely ' - 'because the selected event_date ' - f'{event_date.strftime("%Y-%m-%d")} is not contained in the ' - 'weather forecast selected by fp_file {fp_file}. Please adjust ' - f'event_date or fp_file.') from ker + raise ValueError( + "Extraction of date and coordinates failed. This is most likely " + "because the selected event_date " + f'{event_date.strftime("%Y-%m-%d")} is not contained in the ' + "weather forecast selected by fp_file {fp_file}. Please adjust " + f"event_date or fp_file." + ) from ker considered_dates = np.datetime64(event_date) else: - time_covered_step = ncdf['time'].diff('time') - time_covered_day = time_covered_step.groupby('date').sum() + time_covered_step = ncdf["time"].diff("time") + time_covered_day = time_covered_step.groupby("date").sum() # forecast run should cover at least 18 hours of a day - considered_dates_bool = time_covered_day >= np.timedelta64(18,'h') - stacked = ncdf.groupby('date').max()\ - .sel(date=considered_dates_bool)\ - .stack(intensity=('y_1', 'x_1')) - considered_dates = stacked['date'].values - stacked = stacked.stack(date_ensemble=('date', 'epsd_1')) - stacked = stacked.where(stacked['VMAX_10M'] > intensity_thres) + considered_dates_bool = time_covered_day >= np.timedelta64(18, "h") + stacked = ( + ncdf.groupby("date") + .max() + .sel(date=considered_dates_bool) + .stack(intensity=("y_1", "x_1")) + ) + considered_dates = stacked["date"].values + stacked = stacked.stack(date_ensemble=("date", "epsd_1")) + stacked = stacked.where(stacked["VMAX_10M"] > intensity_thres) stacked = stacked.fillna(0) # fill in values from netCDF - intensity = sparse.csr_matrix(stacked['VMAX_10M'].T) - event_id = np.arange(stacked['date_ensemble'].size) + 1 + intensity = sparse.csr_matrix(stacked["VMAX_10M"].T) + event_id = np.arange(stacked["date_ensemble"].size) + 1 date = np.repeat( np.array(datetime64_to_ordinal(considered_dates)), - np.unique(ncdf['epsd_1']).size + np.unique(ncdf["epsd_1"]).size, ) orig = np.full_like(event_id, False) - orig[(stacked['epsd_1'] == 0).values] = True + orig[(stacked["epsd_1"] == 0).values] = True if description is None: - description = (model_name + - ' weather forecast windfield ' + - 'for run startet at ' + - run_datetime.strftime('%Y%m%d%H')) + description = ( + model_name + + " weather forecast windfield " + + "for run startet at " + + run_datetime.strftime("%Y%m%d%H") + ) # Create Hazard haz = cls( intensity=intensity, event_id=event_id, - centroids = cls._centroids_from_nc(fp_file), + centroids=cls._centroids_from_nc(fp_file), # fill in default values orig=orig, date=date, - event_name=[date_i + '_ens' + str(ens_i) - for date_i, ens_i - in zip(date_to_str(date), stacked['epsd_1'].values + 1)], + event_name=[ + date_i + "_ens" + str(ens_i) + for date_i, ens_i in zip( + date_to_str(date), stacked["epsd_1"].values + 1 + ) + ], frequency=np.divide( - np.ones_like(event_id), - np.unique(ncdf['epsd_1']).size), + np.ones_like(event_id), np.unique(ncdf["epsd_1"]).size + ), ) haz.check() @@ -368,14 +415,23 @@ def from_cosmoe_file(cls, fp_file, run_datetime, event_date=None, def read_icon_grib(self, *args, **kwargs): """This function is deprecated, use StormEurope.from_icon_grib instead.""" - LOGGER.warning("The use of StormEurope.read_icon_grib is deprecated." - "Use StormEurope.from_icon_grib instead.") + LOGGER.warning( + "The use of StormEurope.read_icon_grib is deprecated." + "Use StormEurope.from_icon_grib instead." + ) self.__dict__ = StormEurope.from_icon_grib(*args, **kwargs).__dict__ @classmethod - def from_icon_grib(cls, run_datetime, event_date=None, model_name='icon-eu-eps', - description=None, grib_dir=None, delete_raw_data=True, - intensity_thres=None): + def from_icon_grib( + cls, + run_datetime, + event_date=None, + model_name="icon-eu-eps", + description=None, + grib_dir=None, + delete_raw_data=True, + intensity_thres=None, + ): """Create new StormEurope object from DWD icon weather forecast footprints. New files are available for 24 hours on @@ -420,14 +476,19 @@ def from_icon_grib(cls, run_datetime, event_date=None, model_name='icon-eu-eps', StormEurope object with data from DWD icon weather forecast footprints. """ # pylint: disable=protected-access - intensity_thres = cls.intensity_thres if intensity_thres is None else intensity_thres + intensity_thres = ( + cls.intensity_thres if intensity_thres is None else intensity_thres + ) if not (run_datetime.hour == 0 or run_datetime.hour == 12): - LOGGER.warning('The event definition is inaccuratly implemented ' - 'for starting times, which are not 00H or 12H.') + LOGGER.warning( + "The event definition is inaccuratly implemented " + "for starting times, which are not 00H or 12H." + ) # download files, if they don't already exist file_names = download_icon_grib( - run_datetime, model_name=model_name, download_dir=grib_dir) + run_datetime, model_name=model_name, download_dir=grib_dir + ) # create centroids nc_centroids_file = download_icon_centroids_file(model_name, grib_dir) @@ -435,79 +496,89 @@ def from_icon_grib(cls, run_datetime, event_date=None, model_name='icon-eu-eps', # read intensity from files for ind_i, file_i in enumerate(file_names): gripfile_path_i = Path(file_i[:-4]) - with open(file_i, 'rb') as source, open(gripfile_path_i, 'wb') as dest: + with open(file_i, "rb") as source, open(gripfile_path_i, "wb") as dest: dest.write(bz2.decompress(source.read())) - with xr.open_dataset(gripfile_path_i, engine='cfgrib') as ds_i: + with xr.open_dataset(gripfile_path_i, engine="cfgrib") as ds_i: if ind_i == 0: stacked = ds_i else: - stacked = xr.concat([stacked,ds_i], 'valid_time') + stacked = xr.concat([stacked, ds_i], "valid_time") # create intensity matrix with max for each full day stacked = stacked.assign_coords( - date=('valid_time', stacked["valid_time"].dt.floor("D").values)) + date=("valid_time", stacked["valid_time"].dt.floor("D").values) + ) if event_date: try: - stacked = stacked.sel( - valid_time=event_date.strftime('%Y-%m-%d')).groupby('date').max() + stacked = ( + stacked.sel(valid_time=event_date.strftime("%Y-%m-%d")) + .groupby("date") + .max() + ) except KeyError as ker: - raise ValueError('Extraction of date and coordinates failed. This is most likely ' - 'because the selected event_date ' - f'{event_date.strftime("%Y-%m-%d")} is not contained in the ' - 'weather forecast selected by run_datetime' - f'{run_datetime.strftime("%Y-%m-%d %H:%M")}. Please adjust ' - 'event_date or run_datetime.') from ker + raise ValueError( + "Extraction of date and coordinates failed. This is most likely " + "because the selected event_date " + f'{event_date.strftime("%Y-%m-%d")} is not contained in the ' + "weather forecast selected by run_datetime" + f'{run_datetime.strftime("%Y-%m-%d %H:%M")}. Please adjust ' + "event_date or run_datetime." + ) from ker considered_dates = np.datetime64(event_date) else: - time_covered_step = stacked['valid_time'].diff('valid_time') - time_covered_day = time_covered_step.groupby('date').sum() + time_covered_step = stacked["valid_time"].diff("valid_time") + time_covered_day = time_covered_step.groupby("date").sum() # forecast run should cover at least 18 hours of a day - considered_dates_bool = time_covered_day >= np.timedelta64(18,'h') - stacked = stacked.groupby('date').max().sel(date=considered_dates_bool) - considered_dates = stacked['date'].values - stacked = stacked.stack(date_ensemble=('date', 'number')) + considered_dates_bool = time_covered_day >= np.timedelta64(18, "h") + stacked = stacked.groupby("date").max().sel(date=considered_dates_bool) + considered_dates = stacked["date"].values + stacked = stacked.stack(date_ensemble=("date", "number")) stacked = stacked.where(stacked > intensity_thres) stacked = stacked.fillna(0) - event_id = np.arange(stacked['date_ensemble'].size) + 1 + event_id = np.arange(stacked["date_ensemble"].size) + 1 date = np.repeat( np.array(datetime64_to_ordinal(considered_dates)), - np.unique(stacked['number']).size + np.unique(stacked["number"]).size, ) orig = np.full_like(event_id, False) - orig[(stacked['number'] == 1).values] = True + orig[(stacked["number"] == 1).values] = True if description is None: - description = ('icon weather forecast windfield for run started at ' + - run_datetime.strftime('%Y%m%d%H')) + description = ( + "icon weather forecast windfield for run started at " + + run_datetime.strftime("%Y%m%d%H") + ) # Create Hazard haz = cls( - intensity=sparse.csr_matrix(stacked['gust'].T), + intensity=sparse.csr_matrix(stacked["gust"].T), centroids=cls._centroids_from_nc(nc_centroids_file), event_id=event_id, date=date, orig=orig, - event_name=[date_i + '_ens' + str(ens_i) - for date_i, ens_i - in zip(date_to_str(date), stacked['number'].values)], + event_name=[ + date_i + "_ens" + str(ens_i) + for date_i, ens_i in zip(date_to_str(date), stacked["number"].values) + ], frequency=np.divide( - np.ones_like(event_id), - np.unique(stacked['number']).size), + np.ones_like(event_id), np.unique(stacked["number"]).size + ), ) haz.check() # delete generated .grib2 and .4cc40.idx files for ind_i, file_i in enumerate(file_names): gripfile_path_i = Path(file_i[:-4]) - idxfile_path_i = next(gripfile_path_i.parent.glob( - str(gripfile_path_i.name) + '.*.idx')) + idxfile_path_i = next( + gripfile_path_i.parent.glob(str(gripfile_path_i.name) + ".*.idx") + ) gripfile_path_i.unlink() idxfile_path_i.unlink() if delete_raw_data: - #delete downloaded .bz2 files + # delete downloaded .bz2 files delete_icon_grib(run_datetime, model_name=model_name, download_dir=grib_dir) return haz @@ -517,40 +588,43 @@ def _centroids_from_nc(file_name): """Construct Centroids from the grid described by 'latitude' and 'longitude' variables in a netCDF file. """ - LOGGER.info('Constructing centroids from %s', file_name) + LOGGER.info("Constructing centroids from %s", file_name) with xr.open_dataset(file_name) as ncdf: create_meshgrid = True - if hasattr(ncdf, 'latitude'): - lats = ncdf['latitude'].data - lons = ncdf['longitude'].data - elif hasattr(ncdf, 'lat'): - lats = ncdf['lat'].data - lons = ncdf['lon'].data - elif hasattr(ncdf, 'lat_1'): - if len(ncdf['lon_1'].shape)>1 & \ - (ncdf['lon_1'].shape == ncdf['lat_1'].shape) \ - : - lats = ncdf['lat_1'].data.flatten() - lons = ncdf['lon_1'].data.flatten() + if hasattr(ncdf, "latitude"): + lats = ncdf["latitude"].data + lons = ncdf["longitude"].data + elif hasattr(ncdf, "lat"): + lats = ncdf["lat"].data + lons = ncdf["lon"].data + elif hasattr(ncdf, "lat_1"): + if len(ncdf["lon_1"].shape) > 1 & ( + ncdf["lon_1"].shape == ncdf["lat_1"].shape + ): + lats = ncdf["lat_1"].data.flatten() + lons = ncdf["lon_1"].data.flatten() create_meshgrid = False else: - lats = ncdf['lat_1'].data - lons = ncdf['lon_1'].data - elif hasattr(ncdf, 'clat'): - lats = ncdf['clat'].data - lons = ncdf['clon'].data - if ncdf['clat'].attrs['units']=='radian': + lats = ncdf["lat_1"].data + lons = ncdf["lon_1"].data + elif hasattr(ncdf, "clat"): + lats = ncdf["clat"].data + lons = ncdf["clon"].data + if ncdf["clat"].attrs["units"] == "radian": lats = np.rad2deg(lats) lons = np.rad2deg(lons) create_meshgrid = False else: - raise AttributeError('netcdf file has no field named latitude or ' - 'other know abrivation for coordinates.') + raise AttributeError( + "netcdf file has no field named latitude or " + "other know abrivation for coordinates." + ) if create_meshgrid: - lats, lons = np.array([np.repeat(lats, len(lons)), - np.tile(lons, len(lats))]) - cent = Centroids(lat=lats, lon=lons, on_land='natural_earth') + lats, lons = np.array( + [np.repeat(lats, len(lons)), np.tile(lons, len(lats))] + ) + cent = Centroids(lat=lats, lon=lons, on_land="natural_earth") return cent @@ -569,34 +643,46 @@ def _combine_events(self, event_ids): select_other_events = np.invert(select_event_ids) intensity_tmp = self.intensity[select_event_ids, :].max(axis=0) self.intensity = self.intensity[select_other_events, :] - self.intensity = sparse.vstack([self.intensity, sparse.csr_matrix(intensity_tmp)]) - self.event_id = np.append(self.event_id[select_other_events], self.event_id.max() + 1) - self.date = np.append(self.date[select_other_events], - np.round(self.date[select_event_ids].mean())) + self.intensity = sparse.vstack( + [self.intensity, sparse.csr_matrix(intensity_tmp)] + ) + self.event_id = np.append( + self.event_id[select_other_events], self.event_id.max() + 1 + ) + self.date = np.append( + self.date[select_other_events], np.round(self.date[select_event_ids].mean()) + ) name_2 = self.event_name.pop(np.where(select_event_ids)[0][1]) name_1 = self.event_name.pop(np.where(select_event_ids)[0][0]) - self.event_name.append(name_1 + '_' + name_2) + self.event_name.append(name_1 + "_" + name_2) fraction_tmp = self.fraction[select_event_ids, :].max(axis=0) self.fraction = self.fraction[select_other_events, :] self.fraction = sparse.vstack([self.fraction, sparse.csr_matrix(fraction_tmp)]) - self.frequency = np.append(self.frequency[select_other_events], - self.frequency[select_event_ids].mean()) - self.orig = np.append(self.orig[select_other_events], - self.orig[select_event_ids].max()) + self.frequency = np.append( + self.frequency[select_other_events], self.frequency[select_event_ids].mean() + ) + self.orig = np.append( + self.orig[select_other_events], self.orig[select_event_ids].max() + ) if self.ssi_wisc.size > 0: - self.ssi_wisc = np.append(self.ssi_wisc[select_other_events], - np.nan) + self.ssi_wisc = np.append(self.ssi_wisc[select_other_events], np.nan) if self.ssi.size > 0: - self.ssi = np.append(self.ssi[select_other_events], - np.nan) + self.ssi = np.append(self.ssi[select_other_events], np.nan) if self.ssi_full_area.size > 0: - self.ssi_full_area = np.append(self.ssi_full_area[select_other_events], - np.nan) + self.ssi_full_area = np.append( + self.ssi_full_area[select_other_events], np.nan + ) self.check() - def calc_ssi(self, method='dawkins', intensity=None, on_land=True, - threshold=None, sel_cen=None): + def calc_ssi( + self, + method="dawkins", + intensity=None, + on_land=True, + threshold=None, + sel_cen=None, + ): """Calculate the SSI, method must either be 'dawkins' or 'wisc_gust'. 'dawkins', after Dawkins et al. (2016), @@ -642,8 +728,9 @@ def calc_ssi(self, method='dawkins', intensity=None, on_land=True, intensity = self.intensity if threshold is not None: - assert threshold >= self.intensity_thres, \ - 'threshold cannot be below threshold upon read_footprint' + assert ( + threshold >= self.intensity_thres + ), "threshold cannot be below threshold upon read_footprint" intensity = intensity.multiply(intensity > threshold) else: intensity = intensity.multiply(intensity > self.intensity_thres) @@ -660,14 +747,14 @@ def calc_ssi(self, method='dawkins', intensity=None, on_land=True, ssi = np.zeros(intensity.shape[0]) - if method == 'dawkins': + if method == "dawkins": area_c = area_pixel / 1000 / 1000 * sel_cen for i, inten_i in enumerate(intensity): ssi_i = inten_i.power(3).dot(area_c) # matrix crossproduct (row x column vector) ssi[i] = ssi_i.item(0) - elif method == 'wisc_gust': + elif method == "wisc_gust": for i, inten_i in enumerate(intensity[:, sel_cen]): area = np.sum(area_pixel[inten_i.indices]) / 1000 / 1000 inten_mean = np.mean(inten_i) @@ -700,33 +787,37 @@ def plot_ssi(self, full_area=False): ssi = self.ssi # data wrangling - ssi_freq = pd.DataFrame({ - 'ssi': ssi, - 'freq': self.frequency, - 'orig': self.orig, - }) - ssi_freq = ssi_freq.sort_values('ssi', ascending=False) - ssi_freq['freq_cum'] = np.cumsum(ssi_freq['freq']) - - ssi_hist = ssi_freq.loc[ssi_freq['orig']].copy() - ssi_hist['freq'] = ssi_hist['freq'] * self.orig.size / self.orig.sum() - ssi_hist['freq_cum'] = np.cumsum(ssi_hist['freq']) + ssi_freq = pd.DataFrame( + { + "ssi": ssi, + "freq": self.frequency, + "orig": self.orig, + } + ) + ssi_freq = ssi_freq.sort_values("ssi", ascending=False) + ssi_freq["freq_cum"] = np.cumsum(ssi_freq["freq"]) + + ssi_hist = ssi_freq.loc[ssi_freq["orig"]].copy() + ssi_hist["freq"] = ssi_hist["freq"] * self.orig.size / self.orig.sum() + ssi_hist["freq_cum"] = np.cumsum(ssi_hist["freq"]) # plotting fig, axs = plt.subplots() - axs.plot(ssi_freq['freq_cum'], ssi_freq['ssi'], label='All Events') - axs.scatter(ssi_hist['freq_cum'], ssi_hist['ssi'], - color='red', label='Historic Events') + axs.plot(ssi_freq["freq_cum"], ssi_freq["ssi"], label="All Events") + axs.scatter( + ssi_hist["freq_cum"], ssi_hist["ssi"], color="red", label="Historic Events" + ) axs.legend() - axs.set_xlabel('Exceedance Frequency [1/a]') - axs.set_ylabel('Storm Severity Index') - axs.ticklabel_format(axis='y', style='sci', scilimits=(0, 0)) + axs.set_xlabel("Exceedance Frequency [1/a]") + axs.set_ylabel("Storm Severity Index") + axs.ticklabel_format(axis="y", style="sci", scilimits=(0, 0)) plt.show() return fig, axs - def generate_prob_storms(self, reg_id=528, spatial_shift=4, ssi_args=None, - **kwargs): + def generate_prob_storms( + self, reg_id=528, spatial_shift=4, ssi_args=None, **kwargs + ): """Generates a new hazard set with one original and 29 probabilistic storms per historic storm. This represents a partial implementation of the Monte-Carlo method described in section 2.2 of Schwierz et al. @@ -773,10 +864,7 @@ def generate_prob_storms(self, reg_id=528, spatial_shift=4, ssi_args=None, else: # shifting truncates valid centroids sel_cen = np.zeros(self.centroids.shape, bool) - sel_cen[ - spatial_shift:-spatial_shift, - spatial_shift:-spatial_shift - ] = True + sel_cen[spatial_shift:-spatial_shift, spatial_shift:-spatial_shift] = True sel_cen = sel_cen.reshape(self.centroids.size) # init probabilistic array @@ -784,21 +872,17 @@ def generate_prob_storms(self, reg_id=528, spatial_shift=4, ssi_args=None, intensity_prob = sparse.lil_matrix((n_out, np.count_nonzero(sel_cen))) ssi = np.zeros(n_out) - LOGGER.info('Commencing probabilistic calculations') + LOGGER.info("Commencing probabilistic calculations") for index, intensity1d in enumerate(self.intensity): # indices for return matrix start = index * N_PROB_EVENTS end = (index + 1) * N_PROB_EVENTS - intensity_prob[start:end, :], ssi[start:end] =\ - self._hist2prob( - intensity1d, - sel_cen, - spatial_shift, - ssi_args, - **kwargs) + intensity_prob[start:end, :], ssi[start:end] = self._hist2prob( + intensity1d, sel_cen, spatial_shift, ssi_args, **kwargs + ) - LOGGER.info('Generating new StormEurope instance') + LOGGER.info("Generating new StormEurope instance") base = np.repeat((self.event_id * 100), N_PROB_EVENTS) synth_id = np.tile(np.arange(N_PROB_EVENTS), self.size) event_id = base + synth_id @@ -811,20 +895,27 @@ def generate_prob_storms(self, reg_id=528, spatial_shift=4, ssi_args=None, # subsetting centroids centroids=self.centroids.select(sel_cen=sel_cen), # construct new event ids - event_id=event_id, # frequency still based on the historic number of # years - frequency=np.divide(np.repeat(self.frequency, N_PROB_EVENTS), - N_PROB_EVENTS), + frequency=np.divide( + np.repeat(self.frequency, N_PROB_EVENTS), N_PROB_EVENTS + ), orig=(event_id % 100 == 0), ) new_haz.check() return new_haz - def _hist2prob(self, intensity1d, sel_cen, spatial_shift, ssi_args=None, - power=1.15, scale=0.0225): + def _hist2prob( + self, + intensity1d, + sel_cen, + spatial_shift, + ssi_args=None, + power=1.15, + scale=0.0225, + ): """Internal function, intended to be called from generate_prob_storms. Generates six permutations based on one historical storm event, which it then moves around by spatial_shift gridpoints to the east, west, and @@ -881,27 +972,32 @@ def _hist2prob(self, intensity1d, sel_cen, spatial_shift, ssi_args=None, intensity3d_prob[4] = intensity2d + (scale * intensity2d_pwr) # 6. minus scaled sqrt and pwr - intensity3d_prob[5] = (intensity2d - - (0.5 * scale * intensity2d_pwr) - - (0.5 * scale * intensity2d_sqrt)) + intensity3d_prob[5] = ( + intensity2d + - (0.5 * scale * intensity2d_pwr) + - (0.5 * scale * intensity2d_sqrt) + ) # spatial shifts # northward - intensity3d_prob[6:12, :-spatial_shift, :] = \ - intensity3d_prob[0:6, spatial_shift:, :] + intensity3d_prob[6:12, :-spatial_shift, :] = intensity3d_prob[ + 0:6, spatial_shift:, : + ] # southward - intensity3d_prob[12:18, spatial_shift:, :] = \ - intensity3d_prob[0:6, :-spatial_shift, :] + intensity3d_prob[12:18, spatial_shift:, :] = intensity3d_prob[ + 0:6, :-spatial_shift, : + ] # eastward - intensity3d_prob[18:24, :, spatial_shift:] = \ - intensity3d_prob[0:6, :, :-spatial_shift] + intensity3d_prob[18:24, :, spatial_shift:] = intensity3d_prob[ + 0:6, :, :-spatial_shift + ] # westward - intensity3d_prob[24:30, :, :-spatial_shift] = \ - intensity3d_prob[0:6, :, spatial_shift:] + intensity3d_prob[24:30, :, :-spatial_shift] = intensity3d_prob[ + 0:6, :, spatial_shift: + ] intensity_out = intensity3d_prob.reshape( - N_PROB_EVENTS, - np.prod(self.centroids.shape) + N_PROB_EVENTS, np.prod(self.centroids.shape) ) ssi = self.calc_ssi(intensity=intensity_out, **ssi_args) @@ -910,11 +1006,13 @@ def _hist2prob(self, intensity1d, sel_cen, spatial_shift, ssi_args=None, # pylint: disable=invalid-name -def generate_WS_forecast_hazard(run_datetime=None, - event_date=None, - haz_model='icon-eu-eps', - haz_raw_storage=None, - save_haz=True): +def generate_WS_forecast_hazard( + run_datetime=None, + event_date=None, + haz_model="icon-eu-eps", + haz_raw_storage=None, + save_haz=True, +): """use the initialization time (run_datetime), the date of the event and specify the forecast model (haz_model) to generate a Hazard from forecast data either by download or through reading from existing file. @@ -959,65 +1057,76 @@ def generate_WS_forecast_hazard(run_datetime=None, FORECAST_DIR = CONFIG.hazard.storm_europe.forecast_dir.dir() if run_datetime is None: - run_datetime = dt.datetime.today().replace(hour=0, minute=0, second=0, microsecond=0) + run_datetime = dt.datetime.today().replace( + hour=0, minute=0, second=0, microsecond=0 + ) if event_date is None: - event_date = dt.datetime.today().replace(hour=0, minute=0, second=0, microsecond=0) \ - + dt.timedelta(days=2) - - if haz_model in ['cosmo1e_file', 'cosmo2e_file']: - if haz_model == 'cosmo1e_file': - haz_model='C1E' - full_model_name_temp = 'COSMO-1E' - if haz_model == 'cosmo2e_file': - haz_model='C2E' - full_model_name_temp = 'COSMO-2E' - haz_file_name = (f'{HAZ_TYPE}_{haz_model}_run{run_datetime.strftime("%Y%m%d%H")}' - f'_event{event_date.strftime("%Y%m%d")}.hdf5') + event_date = dt.datetime.today().replace( + hour=0, minute=0, second=0, microsecond=0 + ) + dt.timedelta(days=2) + + if haz_model in ["cosmo1e_file", "cosmo2e_file"]: + if haz_model == "cosmo1e_file": + haz_model = "C1E" + full_model_name_temp = "COSMO-1E" + if haz_model == "cosmo2e_file": + haz_model = "C2E" + full_model_name_temp = "COSMO-2E" + haz_file_name = ( + f'{HAZ_TYPE}_{haz_model}_run{run_datetime.strftime("%Y%m%d%H")}' + f'_event{event_date.strftime("%Y%m%d")}.hdf5' + ) haz_file = FORECAST_DIR / haz_file_name if haz_file.exists(): - LOGGER.info('Loading hazard from %s.', haz_file) + LOGGER.info("Loading hazard from %s.", haz_file) hazard = StormEurope.from_hdf5(haz_file) else: - LOGGER.info('Generating %s hazard.', haz_model) + LOGGER.info("Generating %s hazard.", haz_model) if not haz_raw_storage: haz_raw_storage = FORECAST_DIR / "cosmoe_forecast_{}_vmax.nc" - fp_file = Path(str(haz_raw_storage).format(run_datetime.strftime('%y%m%d%H'))) + fp_file = Path( + str(haz_raw_storage).format(run_datetime.strftime("%y%m%d%H")) + ) hazard = StormEurope.from_cosmoe_file( fp_file, event_date=event_date, run_datetime=run_datetime, - model_name=full_model_name_temp + model_name=full_model_name_temp, ) if save_haz: hazard.write_hdf5(haz_file) - elif haz_model in ['icon-eu-eps', 'icon-d2-eps']: - if haz_model == 'icon-eu-eps': + elif haz_model in ["icon-eu-eps", "icon-d2-eps"]: + if haz_model == "icon-eu-eps": full_model_name_temp = haz_model - haz_model='IEE' - if haz_model == 'icon-d2-eps': + haz_model = "IEE" + if haz_model == "icon-d2-eps": full_model_name_temp = haz_model - haz_model='IDE' - haz_file_name = (f'{HAZ_TYPE}_{haz_model}_run{run_datetime.strftime("%Y%m%d%H")}' - f'_event{event_date.strftime("%Y%m%d")}.hdf5') + haz_model = "IDE" + haz_file_name = ( + f'{HAZ_TYPE}_{haz_model}_run{run_datetime.strftime("%Y%m%d%H")}' + f'_event{event_date.strftime("%Y%m%d")}.hdf5' + ) haz_file = FORECAST_DIR / haz_file_name if haz_file.exists(): - LOGGER.info('Loading hazard from %s.', haz_file) + LOGGER.info("Loading hazard from %s.", haz_file) hazard = StormEurope.from_hdf5(haz_file) else: - LOGGER.info('Generating %s hazard.', haz_model) + LOGGER.info("Generating %s hazard.", haz_model) hazard = StormEurope.from_icon_grib( run_datetime, event_date=event_date, delete_raw_data=False, - model_name=full_model_name_temp + model_name=full_model_name_temp, ) if save_haz: hazard.write_hdf5(haz_file) else: - raise NotImplementedError("specific 'WS' hazard not implemented yet. " + - "Please specify a valid value for haz_model.") + raise NotImplementedError( + "specific 'WS' hazard not implemented yet. " + + "Please specify a valid value for haz_model." + ) # check if hazard is successfully generated for Forecast if not isinstance(hazard, Hazard): - LOGGER.warning('Hazard generation unsuccessful.') + LOGGER.warning("Hazard generation unsuccessful.") return hazard, haz_model, run_datetime, event_date diff --git a/climada/hazard/tc_clim_change.py b/climada/hazard/tc_clim_change.py index c10fcecc0..576cb38bd 100644 --- a/climada/hazard/tc_clim_change.py +++ b/climada/hazard/tc_clim_change.py @@ -36,29 +36,31 @@ Define scaling factors to model the impact of climate change on tropical cyclones. """ -from math import log import logging -import pandas as pd +from math import log + import numpy as np +import pandas as pd LOGGER = logging.getLogger(__name__) -MAP_BASINS_NAMES = {'NA': 0, 'WP': 1, 'EP': 2, 'NI': 3, 'SI': 4, 'SP': 5} +MAP_BASINS_NAMES = {"NA": 0, "WP": 1, "EP": 2, "NI": 3, "SI": 4, "SP": 5} -MAP_VARS_NAMES = {'cat05': 0, 'cat45': 1, 'intensity': 2} +MAP_VARS_NAMES = {"cat05": 0, "cat45": 1, "intensity": 2} -MAP_PERC_NAMES = {'5/10': 0, '25': 1, '50': 2, '75': 3, '90/95': 4} +MAP_PERC_NAMES = {"5/10": 0, "25": 1, "50": 2, "75": 3, "90/95": 4} # it defines the first and last projection years as well as the largest smoothing window -YEAR_WINDOWS_PROPS = {'start': 2000, 'end': 2100, 'smoothing': 5} +YEAR_WINDOWS_PROPS = {"start": 2000, "end": 2100, "smoothing": 5} + def get_knutson_scaling_factor( - variable: str='cat05', - percentile: str='50', - basin: str='NA', - baseline: tuple=(1982, 2022), - yearly_steps: int=5 - ): + variable: str = "cat05", + percentile: str = "50", + basin: str = "NA", + baseline: tuple = (1982, 2022), + yearly_steps: int = 5, +): """ This code combines data in Knutson et al. (2020) and global mean surface temperature (GMST) data (historical and CMIP5 simulated) to produce TC @@ -119,16 +121,20 @@ def get_knutson_scaling_factor( knutson_data = get_knutson_data() - num_of_rcps, gmst_years = gmst_info['gmst_data'].shape + num_of_rcps, gmst_years = gmst_info["gmst_data"].shape - if ((base_start_year <= gmst_info['gmst_start_year']) or - (base_start_year >= gmst_info['gmst_end_year']) or - (base_end_year <= gmst_info['gmst_start_year']) or - (base_end_year >= gmst_info['gmst_end_year'])): + if ( + (base_start_year <= gmst_info["gmst_start_year"]) + or (base_start_year >= gmst_info["gmst_end_year"]) + or (base_end_year <= gmst_info["gmst_start_year"]) + or (base_end_year >= gmst_info["gmst_end_year"]) + ): - raise ValueError("The selected historical baseline falls outside" - f"the GMST data period {gmst_info['gmst_start_year']}" - f"-{gmst_info['gmst_end_year']}") + raise ValueError( + "The selected historical baseline falls outside" + f"the GMST data period {gmst_info['gmst_start_year']}" + f"-{gmst_info['gmst_end_year']}" + ) var_id = MAP_VARS_NAMES[variable] perc_id = MAP_PERC_NAMES[percentile] @@ -139,9 +145,9 @@ def get_knutson_scaling_factor( # 3. calculate the fractional change in the averages # please refer to section 4. Methods of Jewson (2021) for more details. - mid_years = np.arange(YEAR_WINDOWS_PROPS['start'], - YEAR_WINDOWS_PROPS['end']+1, - yearly_steps) + mid_years = np.arange( + YEAR_WINDOWS_PROPS["start"], YEAR_WINDOWS_PROPS["end"] + 1, yearly_steps + ) predicted_change = np.ones((mid_years.shape[0], num_of_rcps)) try: @@ -149,29 +155,31 @@ def get_knutson_scaling_factor( knutson_value = knutson_data[var_id, basin_id, perc_id] except KeyError: - LOGGER.warning(f"No scaling factors are defined for basin {basin} therefore" - "no change will be projected for tracks in this basin") - return pd.DataFrame(predicted_change, - index=mid_years, - columns=gmst_info['rcps']) + LOGGER.warning( + f"No scaling factors are defined for basin {basin} therefore" + "no change will be projected for tracks in this basin" + ) + return pd.DataFrame( + predicted_change, index=mid_years, columns=gmst_info["rcps"] + ) - base_start_pos = base_start_year - gmst_info['gmst_start_year'] - base_end_pos = base_end_year - gmst_info['gmst_start_year'] + base_start_pos = base_start_year - gmst_info["gmst_start_year"] + base_end_pos = base_end_year - gmst_info["gmst_start_year"] # Step 1. - beta = 0.5 * log(0.01 * knutson_value + 1) # equation 6 in Jewson (2021) - tc_properties = np.exp(beta * gmst_info['gmst_data']) # equation 3 in Jewson (2021) + beta = 0.5 * log(0.01 * knutson_value + 1) # equation 6 in Jewson (2021) + tc_properties = np.exp(beta * gmst_info["gmst_data"]) # equation 3 in Jewson (2021) # Step 2. - baseline = np.mean(tc_properties[:, base_start_pos:base_end_pos + 1], 1) + baseline = np.mean(tc_properties[:, base_start_pos : base_end_pos + 1], 1) # Step 3. for i, mid_year in enumerate(mid_years): - mid_year_in_gmst_ind = mid_year - gmst_info['gmst_start_year'] + mid_year_in_gmst_ind = mid_year - gmst_info["gmst_start_year"] actual_smoothing = min( - YEAR_WINDOWS_PROPS['smoothing'], + YEAR_WINDOWS_PROPS["smoothing"], gmst_years - mid_year_in_gmst_ind - 1, - mid_year_in_gmst_ind + mid_year_in_gmst_ind, ) fut_start_pos = mid_year_in_gmst_ind - actual_smoothing fut_end_pos = mid_year_in_gmst_ind + actual_smoothing + 1 @@ -179,12 +187,10 @@ def get_knutson_scaling_factor( prediction = np.mean(tc_properties[:, fut_start_pos:fut_end_pos], 1) # assess fractional changes - predicted_change[i] = ((prediction - baseline) / - baseline) * 100 + predicted_change[i] = ((prediction - baseline) / baseline) * 100 + + return pd.DataFrame(predicted_change, index=mid_years, columns=gmst_info["rcps"]) - return pd.DataFrame(predicted_change, - index=mid_years, - columns=gmst_info['rcps']) def get_gmst_info(): """ @@ -207,126 +213,913 @@ def get_gmst_info(): - gmst_data: array with GMST data across RCPs (first dim) and years (second dim) """ - gmst_data = np.array([ - [-0.16,-0.08,-0.1,-0.16,-0.28,-0.32,-0.3,-0.35,-0.16,-0.1, - -0.35,-0.22,-0.27,-0.31,-0.3,-0.22,-0.11,-0.11,-0.26,-0.17, - -0.08,-0.15,-0.28,-0.37, -0.47,-0.26,-0.22,-0.39,-0.43,-0.48, - -0.43,-0.44,-0.36,-0.34,-0.15,-0.14,-0.36,-0.46,-0.29,-0.27, - -0.27,-0.19,-0.28,-0.26,-0.27,-0.22,-0.1,-0.22,-0.2,-0.36, - -0.16,-0.1,-0.16,-0.29,-0.13,-0.2,-0.15,-0.03,-0.01,-0.02, - 0.13,0.19,0.07,0.09,0.2,0.09,-0.07,-0.03,-0.11,-0.11,-0.17, - -0.07,0.01,0.08,-0.13,-0.14,-0.19,0.05,0.06,0.03,-0.02,0.06, - 0.04,0.05,-0.2,-0.11,-0.06,-0.02,-0.08,0.05,0.02,-0.08,0.01, - 0.16,-0.07,-0.01,-0.1,0.18,0.07,0.16,0.26,0.32,0.14,0.31,0.15, - 0.11,0.18,0.32,0.38,0.27,0.45,0.4,0.22,0.23,0.32,0.45,0.33,0.47, - 0.61,0.39,0.39,0.54,0.63,0.62,0.54, 0.68,0.64,0.66,0.54,0.66, - 0.72,0.61,0.64,0.68,0.75,0.9,1.02,0.92,0.85,0.98,0.909014286, - 0.938814286,0.999714286,1.034314286,1.009714286,1.020014286, - 1.040914286,1.068614286,1.072114286,1.095114286,1.100414286, - 1.099014286,1.118514286,1.133414286,1.135314286,1.168814286, - 1.200414286,1.205414286,1.227214286,1.212614286,1.243014286, - 1.270114286,1.250114286,1.254514286,1.265814286,1.263314286, - 1.294714286,1.289814286,1.314214286,1.322514286,1.315614286, - 1.276314286,1.302414286,1.318414286,1.312014286,1.317914286, - 1.341214286,1.297414286,1.308514286,1.314614286,1.327814286, - 1.335814286,1.331214286,1.318014286,1.289714286,1.334414286, - 1.323914286,1.316614286,1.300214286,1.302414286,1.303114286, - 1.311014286,1.283914286,1.293814286,1.296914286,1.316614286, - 1.306314286,1.290614286,1.288814286,1.272114286,1.264614286, - 1.262514286,1.290514286,1.285114286,1.267214286,1.267414286, - 1.294314286,1.315614286,1.310314286,1.283914286,1.296614286, - 1.281214286,1.301014286,1.300114286,1.303114286,1.286714286, - 1.297514286,1.312114286,1.276714286,1.281414286,1.276414286], - [-0.16,-0.08,-0.1,-0.16,-0.28,-0.32,-0.3,-0.35,-0.16,-0.1, - -0.35, -0.22,-0.27,-0.31,-0.3,-0.22,-0.11,-0.11,-0.26,-0.17, - -0.08,-0.15,-0.28,-0.37,-0.47,-0.26,-0.22,-0.39,-0.43,-0.48, - -0.43,-0.44,-0.36,-0.34,-0.15,-0.14,-0.36,-0.46, -0.29,-0.27, - -0.27,-0.19,-0.28,-0.26,-0.27,-0.22,-0.1,-0.22,-0.2,-0.36, - -0.16,-0.1,-0.16,-0.29,-0.13,-0.2,-0.15,-0.03,-0.01,-0.02,0.13, - 0.19,0.07,0.09,0.2,0.09,-0.07,-0.03,-0.11,-0.11,-0.17,-0.07,0.01, - 0.08,-0.13,-0.14,-0.19,0.05,0.06,0.03,-0.02,0.06,0.04,0.05,-0.2, - -0.11,-0.06,-0.02,-0.08,0.05,0.02,-0.08,0.01,0.16,-0.07,-0.01, - -0.1,0.18,0.07,0.16,0.26,0.32,0.14,0.31,0.15,0.11,0.18,0.32,0.38, - 0.27,0.45,0.4,0.22,0.23,0.32,0.45,0.33,0.47,0.61,0.39,0.39,0.54, - 0.63,0.62,0.54,0.68,0.64,0.66,0.54,0.66,0.72,0.61,0.64,0.68,0.75, - 0.9,1.02,0.92,0.85,0.98,0.903592857,0.949092857,0.955792857, - 0.997892857,1.048392857,1.068092857,1.104792857,1.122192857, - 1.125792857,1.156292857,1.160992857,1.201692857,1.234692857, - 1.255392857,1.274392857,1.283792857,1.319992857,1.369992857, - 1.385592857,1.380892857,1.415092857,1.439892857,1.457092857, - 1.493592857,1.520292857,1.517692857,1.538092857,1.577192857, - 1.575492857,1.620392857,1.657092857,1.673492857,1.669992857, - 1.706292857,1.707892857,1.758592857,1.739492857,1.740192857, - 1.797792857,1.839292857,1.865392857,1.857692857,1.864092857, - 1.881192857,1.907592857,1.918492857,1.933992857,1.929392857, - 1.931192857,1.942492857,1.985592857,1.997392857,2.000992857, - 2.028692857,2.016192857,2.020792857,2.032892857,2.057492857, - 2.092092857,2.106292857,2.117492857,2.123492857,2.121092857, - 2.096892857,2.126892857,2.131292857,2.144892857,2.124092857, - 2.134492857,2.171392857,2.163692857,2.144092857,2.145092857, - 2.128992857,2.129992857,2.169192857,2.186492857,2.181092857, - 2.217592857,2.210492857,2.223692857], - [-0.16,-0.08,-0.1,-0.16,-0.28,-0.32,-0.3,-0.35,-0.16,-0.1, - -0.35,-0.22,-0.27, -0.31,-0.3,-0.22,-0.11,-0.11,-0.26,-0.17, - -0.08,-0.15,-0.28,-0.37,-0.47,-0.26,-0.22,-0.39,-0.43,-0.48, - -0.43,-0.44,-0.36,-0.34,-0.15,-0.14,-0.36,-0.46,-0.29,-0.27, - -0.27,-0.19,-0.28,-0.26,-0.27,-0.22,-0.1,-0.22,-0.2,-0.36, - -0.16,-0.1,-0.16,-0.29,-0.13,-0.2,-0.15,-0.03,-0.01,-0.02,0.13, - 0.19,0.07,0.09,0.2,0.09,-0.07,-0.03,-0.11,-0.11,-0.17,-0.07,0.01, - 0.08,-0.13,-0.14,-0.19,0.05,0.06,0.03,-0.02,0.06,0.04,0.05,-0.2, - -0.11,-0.06,-0.02,-0.08,0.05,0.02,-0.08,0.01,0.16,-0.07,-0.01,-0.1, - 0.18,0.07,0.16,0.26,0.32,0.14,0.31,0.15,0.11,0.18,0.32,0.38,0.27,0.45, - 0.4,0.22,0.23,0.32,0.45,0.33,0.47,0.61,0.39,0.39,0.54,0.63,0.62,0.54, - 0.68,0.64,0.66,0.54,0.66,0.72,0.61,0.64,0.68,0.75,0.9,1.02,0.92,0.85, - 0.98,0.885114286,0.899814286,0.919314286,0.942414286,0.957814286, - 1.000414286,1.023114286,1.053414286,1.090814286,1.073014286,1.058114286, - 1.117514286,1.123714286,1.123814286,1.177514286,1.190814286,1.187514286, - 1.223514286,1.261714286,1.289014286,1.276414286,1.339114286,1.365714286, - 1.375314286,1.402214286,1.399914286,1.437314286,1.464914286,1.479114286, - 1.505514286,1.509614286,1.539814286,1.558214286,1.595014286,1.637114286, - 1.653414286,1.636714286,1.652214286,1.701014286,1.731114286,1.759214286, - 1.782114286,1.811014286,1.801714286,1.823014286,1.842914286,1.913014286, - 1.943114286,1.977514286,1.982014286,2.007114286,2.066314286,2.079214286, - 2.126014286,2.147314286,2.174914286,2.184414286,2.218514286,2.261514286, - 2.309614286,2.328014286,2.347014286,2.369414286,2.396614286,2.452014286, - 2.473314286,2.486514286,2.497914286,2.518014286,2.561814286,2.613014286, - 2.626814286,2.585914286,2.614614286,2.644714286,2.688414286,2.688514286, - 2.685314286,2.724614286,2.746214286,2.773814286], - [-0.16,-0.08,-0.1,-0.16,-0.28,-0.32,-0.3,-0.35,-0.16,-0.1,-0.35,-0.22, - -0.27,-0.31,-0.3,-0.22,-0.11,-0.11,-0.26,-0.17,-0.08,-0.15,-0.28,-0.37, - -0.47,-0.26,-0.22,-0.39,-0.43,-0.48,-0.43,-0.44,-0.36,-0.34,-0.15,-0.14, - -0.36,-0.46,-0.29,-0.27,-0.27,-0.19,-0.28,-0.26,-0.27,-0.22,-0.1,-0.22, - -0.2,-0.36,-0.16,-0.1,-0.16,-0.29,-0.13,-0.2,-0.15,-0.03,-0.01,-0.02,0.13, - 0.19,0.07,0.09,0.2,0.09,-0.07,-0.03,-0.11,-0.11,-0.17,-0.07,0.01,0.08,-0.13, - -0.14,-0.19,0.05,0.06,0.03,-0.02,0.06,0.04,0.05,-0.2,-0.11,-0.06,-0.02,-0.08, - 0.05,0.02,-0.08,0.01,0.16,-0.07,-0.01,-0.1,0.18,0.07,0.16,0.26,0.32,0.14,0.31, - 0.15,0.11,0.18,0.32,0.38,0.27,0.45,0.4,0.22,0.23,0.32,0.45,0.33,0.47,0.61,0.39, - 0.39,0.54,0.63,0.62,0.54,0.68,0.64,0.66,0.54,0.66,0.72,0.61,0.64, 0.68,0.75,0.9, - 1.02,0.92,0.85,0.98,0.945764286,1.011064286,1.048564286,1.049564286,1.070264286, - 1.126564286,1.195464286,1.215064286,1.246964286,1.272564286,1.262464286, - 1.293464286,1.340864286,1.391164286,1.428764286,1.452564286,1.494164286, - 1.520664286,1.557164286,1.633664286,1.654264286,1.693264286,1.730264286, - 1.795264286,1.824264286,1.823864286,1.880664286,1.952864286,1.991764286, - 1.994764286,2.085764286,2.105764286,2.155064286,2.227464286,2.249964286, - 2.313664286,2.341464286,2.394064286,2.457364286,2.484664286,2.549564286, - 2.605964286,2.656864286,2.707364286,2.742964286,2.789764286,2.847664286, - 2.903564286,2.925064286,2.962864286,3.002664286,3.069264286,3.133364286, - 3.174764286,3.217764286,3.256564286,3.306864286,3.375464286,3.420264286, - 3.476464286,3.493864286,3.552964286,3.592364286,3.630664286,3.672464286, - 3.734364286,3.789764286,3.838164286,3.882264286,3.936064286,3.984064286, - 4.055764286,4.098964286,4.122364286,4.172064286,4.225264286,4.275064286, - 4.339064286,4.375864286,4.408064286,4.477764286] -]) + gmst_data = np.array( + [ + [ + -0.16, + -0.08, + -0.1, + -0.16, + -0.28, + -0.32, + -0.3, + -0.35, + -0.16, + -0.1, + -0.35, + -0.22, + -0.27, + -0.31, + -0.3, + -0.22, + -0.11, + -0.11, + -0.26, + -0.17, + -0.08, + -0.15, + -0.28, + -0.37, + -0.47, + -0.26, + -0.22, + -0.39, + -0.43, + -0.48, + -0.43, + -0.44, + -0.36, + -0.34, + -0.15, + -0.14, + -0.36, + -0.46, + -0.29, + -0.27, + -0.27, + -0.19, + -0.28, + -0.26, + -0.27, + -0.22, + -0.1, + -0.22, + -0.2, + -0.36, + -0.16, + -0.1, + -0.16, + -0.29, + -0.13, + -0.2, + -0.15, + -0.03, + -0.01, + -0.02, + 0.13, + 0.19, + 0.07, + 0.09, + 0.2, + 0.09, + -0.07, + -0.03, + -0.11, + -0.11, + -0.17, + -0.07, + 0.01, + 0.08, + -0.13, + -0.14, + -0.19, + 0.05, + 0.06, + 0.03, + -0.02, + 0.06, + 0.04, + 0.05, + -0.2, + -0.11, + -0.06, + -0.02, + -0.08, + 0.05, + 0.02, + -0.08, + 0.01, + 0.16, + -0.07, + -0.01, + -0.1, + 0.18, + 0.07, + 0.16, + 0.26, + 0.32, + 0.14, + 0.31, + 0.15, + 0.11, + 0.18, + 0.32, + 0.38, + 0.27, + 0.45, + 0.4, + 0.22, + 0.23, + 0.32, + 0.45, + 0.33, + 0.47, + 0.61, + 0.39, + 0.39, + 0.54, + 0.63, + 0.62, + 0.54, + 0.68, + 0.64, + 0.66, + 0.54, + 0.66, + 0.72, + 0.61, + 0.64, + 0.68, + 0.75, + 0.9, + 1.02, + 0.92, + 0.85, + 0.98, + 0.909014286, + 0.938814286, + 0.999714286, + 1.034314286, + 1.009714286, + 1.020014286, + 1.040914286, + 1.068614286, + 1.072114286, + 1.095114286, + 1.100414286, + 1.099014286, + 1.118514286, + 1.133414286, + 1.135314286, + 1.168814286, + 1.200414286, + 1.205414286, + 1.227214286, + 1.212614286, + 1.243014286, + 1.270114286, + 1.250114286, + 1.254514286, + 1.265814286, + 1.263314286, + 1.294714286, + 1.289814286, + 1.314214286, + 1.322514286, + 1.315614286, + 1.276314286, + 1.302414286, + 1.318414286, + 1.312014286, + 1.317914286, + 1.341214286, + 1.297414286, + 1.308514286, + 1.314614286, + 1.327814286, + 1.335814286, + 1.331214286, + 1.318014286, + 1.289714286, + 1.334414286, + 1.323914286, + 1.316614286, + 1.300214286, + 1.302414286, + 1.303114286, + 1.311014286, + 1.283914286, + 1.293814286, + 1.296914286, + 1.316614286, + 1.306314286, + 1.290614286, + 1.288814286, + 1.272114286, + 1.264614286, + 1.262514286, + 1.290514286, + 1.285114286, + 1.267214286, + 1.267414286, + 1.294314286, + 1.315614286, + 1.310314286, + 1.283914286, + 1.296614286, + 1.281214286, + 1.301014286, + 1.300114286, + 1.303114286, + 1.286714286, + 1.297514286, + 1.312114286, + 1.276714286, + 1.281414286, + 1.276414286, + ], + [ + -0.16, + -0.08, + -0.1, + -0.16, + -0.28, + -0.32, + -0.3, + -0.35, + -0.16, + -0.1, + -0.35, + -0.22, + -0.27, + -0.31, + -0.3, + -0.22, + -0.11, + -0.11, + -0.26, + -0.17, + -0.08, + -0.15, + -0.28, + -0.37, + -0.47, + -0.26, + -0.22, + -0.39, + -0.43, + -0.48, + -0.43, + -0.44, + -0.36, + -0.34, + -0.15, + -0.14, + -0.36, + -0.46, + -0.29, + -0.27, + -0.27, + -0.19, + -0.28, + -0.26, + -0.27, + -0.22, + -0.1, + -0.22, + -0.2, + -0.36, + -0.16, + -0.1, + -0.16, + -0.29, + -0.13, + -0.2, + -0.15, + -0.03, + -0.01, + -0.02, + 0.13, + 0.19, + 0.07, + 0.09, + 0.2, + 0.09, + -0.07, + -0.03, + -0.11, + -0.11, + -0.17, + -0.07, + 0.01, + 0.08, + -0.13, + -0.14, + -0.19, + 0.05, + 0.06, + 0.03, + -0.02, + 0.06, + 0.04, + 0.05, + -0.2, + -0.11, + -0.06, + -0.02, + -0.08, + 0.05, + 0.02, + -0.08, + 0.01, + 0.16, + -0.07, + -0.01, + -0.1, + 0.18, + 0.07, + 0.16, + 0.26, + 0.32, + 0.14, + 0.31, + 0.15, + 0.11, + 0.18, + 0.32, + 0.38, + 0.27, + 0.45, + 0.4, + 0.22, + 0.23, + 0.32, + 0.45, + 0.33, + 0.47, + 0.61, + 0.39, + 0.39, + 0.54, + 0.63, + 0.62, + 0.54, + 0.68, + 0.64, + 0.66, + 0.54, + 0.66, + 0.72, + 0.61, + 0.64, + 0.68, + 0.75, + 0.9, + 1.02, + 0.92, + 0.85, + 0.98, + 0.903592857, + 0.949092857, + 0.955792857, + 0.997892857, + 1.048392857, + 1.068092857, + 1.104792857, + 1.122192857, + 1.125792857, + 1.156292857, + 1.160992857, + 1.201692857, + 1.234692857, + 1.255392857, + 1.274392857, + 1.283792857, + 1.319992857, + 1.369992857, + 1.385592857, + 1.380892857, + 1.415092857, + 1.439892857, + 1.457092857, + 1.493592857, + 1.520292857, + 1.517692857, + 1.538092857, + 1.577192857, + 1.575492857, + 1.620392857, + 1.657092857, + 1.673492857, + 1.669992857, + 1.706292857, + 1.707892857, + 1.758592857, + 1.739492857, + 1.740192857, + 1.797792857, + 1.839292857, + 1.865392857, + 1.857692857, + 1.864092857, + 1.881192857, + 1.907592857, + 1.918492857, + 1.933992857, + 1.929392857, + 1.931192857, + 1.942492857, + 1.985592857, + 1.997392857, + 2.000992857, + 2.028692857, + 2.016192857, + 2.020792857, + 2.032892857, + 2.057492857, + 2.092092857, + 2.106292857, + 2.117492857, + 2.123492857, + 2.121092857, + 2.096892857, + 2.126892857, + 2.131292857, + 2.144892857, + 2.124092857, + 2.134492857, + 2.171392857, + 2.163692857, + 2.144092857, + 2.145092857, + 2.128992857, + 2.129992857, + 2.169192857, + 2.186492857, + 2.181092857, + 2.217592857, + 2.210492857, + 2.223692857, + ], + [ + -0.16, + -0.08, + -0.1, + -0.16, + -0.28, + -0.32, + -0.3, + -0.35, + -0.16, + -0.1, + -0.35, + -0.22, + -0.27, + -0.31, + -0.3, + -0.22, + -0.11, + -0.11, + -0.26, + -0.17, + -0.08, + -0.15, + -0.28, + -0.37, + -0.47, + -0.26, + -0.22, + -0.39, + -0.43, + -0.48, + -0.43, + -0.44, + -0.36, + -0.34, + -0.15, + -0.14, + -0.36, + -0.46, + -0.29, + -0.27, + -0.27, + -0.19, + -0.28, + -0.26, + -0.27, + -0.22, + -0.1, + -0.22, + -0.2, + -0.36, + -0.16, + -0.1, + -0.16, + -0.29, + -0.13, + -0.2, + -0.15, + -0.03, + -0.01, + -0.02, + 0.13, + 0.19, + 0.07, + 0.09, + 0.2, + 0.09, + -0.07, + -0.03, + -0.11, + -0.11, + -0.17, + -0.07, + 0.01, + 0.08, + -0.13, + -0.14, + -0.19, + 0.05, + 0.06, + 0.03, + -0.02, + 0.06, + 0.04, + 0.05, + -0.2, + -0.11, + -0.06, + -0.02, + -0.08, + 0.05, + 0.02, + -0.08, + 0.01, + 0.16, + -0.07, + -0.01, + -0.1, + 0.18, + 0.07, + 0.16, + 0.26, + 0.32, + 0.14, + 0.31, + 0.15, + 0.11, + 0.18, + 0.32, + 0.38, + 0.27, + 0.45, + 0.4, + 0.22, + 0.23, + 0.32, + 0.45, + 0.33, + 0.47, + 0.61, + 0.39, + 0.39, + 0.54, + 0.63, + 0.62, + 0.54, + 0.68, + 0.64, + 0.66, + 0.54, + 0.66, + 0.72, + 0.61, + 0.64, + 0.68, + 0.75, + 0.9, + 1.02, + 0.92, + 0.85, + 0.98, + 0.885114286, + 0.899814286, + 0.919314286, + 0.942414286, + 0.957814286, + 1.000414286, + 1.023114286, + 1.053414286, + 1.090814286, + 1.073014286, + 1.058114286, + 1.117514286, + 1.123714286, + 1.123814286, + 1.177514286, + 1.190814286, + 1.187514286, + 1.223514286, + 1.261714286, + 1.289014286, + 1.276414286, + 1.339114286, + 1.365714286, + 1.375314286, + 1.402214286, + 1.399914286, + 1.437314286, + 1.464914286, + 1.479114286, + 1.505514286, + 1.509614286, + 1.539814286, + 1.558214286, + 1.595014286, + 1.637114286, + 1.653414286, + 1.636714286, + 1.652214286, + 1.701014286, + 1.731114286, + 1.759214286, + 1.782114286, + 1.811014286, + 1.801714286, + 1.823014286, + 1.842914286, + 1.913014286, + 1.943114286, + 1.977514286, + 1.982014286, + 2.007114286, + 2.066314286, + 2.079214286, + 2.126014286, + 2.147314286, + 2.174914286, + 2.184414286, + 2.218514286, + 2.261514286, + 2.309614286, + 2.328014286, + 2.347014286, + 2.369414286, + 2.396614286, + 2.452014286, + 2.473314286, + 2.486514286, + 2.497914286, + 2.518014286, + 2.561814286, + 2.613014286, + 2.626814286, + 2.585914286, + 2.614614286, + 2.644714286, + 2.688414286, + 2.688514286, + 2.685314286, + 2.724614286, + 2.746214286, + 2.773814286, + ], + [ + -0.16, + -0.08, + -0.1, + -0.16, + -0.28, + -0.32, + -0.3, + -0.35, + -0.16, + -0.1, + -0.35, + -0.22, + -0.27, + -0.31, + -0.3, + -0.22, + -0.11, + -0.11, + -0.26, + -0.17, + -0.08, + -0.15, + -0.28, + -0.37, + -0.47, + -0.26, + -0.22, + -0.39, + -0.43, + -0.48, + -0.43, + -0.44, + -0.36, + -0.34, + -0.15, + -0.14, + -0.36, + -0.46, + -0.29, + -0.27, + -0.27, + -0.19, + -0.28, + -0.26, + -0.27, + -0.22, + -0.1, + -0.22, + -0.2, + -0.36, + -0.16, + -0.1, + -0.16, + -0.29, + -0.13, + -0.2, + -0.15, + -0.03, + -0.01, + -0.02, + 0.13, + 0.19, + 0.07, + 0.09, + 0.2, + 0.09, + -0.07, + -0.03, + -0.11, + -0.11, + -0.17, + -0.07, + 0.01, + 0.08, + -0.13, + -0.14, + -0.19, + 0.05, + 0.06, + 0.03, + -0.02, + 0.06, + 0.04, + 0.05, + -0.2, + -0.11, + -0.06, + -0.02, + -0.08, + 0.05, + 0.02, + -0.08, + 0.01, + 0.16, + -0.07, + -0.01, + -0.1, + 0.18, + 0.07, + 0.16, + 0.26, + 0.32, + 0.14, + 0.31, + 0.15, + 0.11, + 0.18, + 0.32, + 0.38, + 0.27, + 0.45, + 0.4, + 0.22, + 0.23, + 0.32, + 0.45, + 0.33, + 0.47, + 0.61, + 0.39, + 0.39, + 0.54, + 0.63, + 0.62, + 0.54, + 0.68, + 0.64, + 0.66, + 0.54, + 0.66, + 0.72, + 0.61, + 0.64, + 0.68, + 0.75, + 0.9, + 1.02, + 0.92, + 0.85, + 0.98, + 0.945764286, + 1.011064286, + 1.048564286, + 1.049564286, + 1.070264286, + 1.126564286, + 1.195464286, + 1.215064286, + 1.246964286, + 1.272564286, + 1.262464286, + 1.293464286, + 1.340864286, + 1.391164286, + 1.428764286, + 1.452564286, + 1.494164286, + 1.520664286, + 1.557164286, + 1.633664286, + 1.654264286, + 1.693264286, + 1.730264286, + 1.795264286, + 1.824264286, + 1.823864286, + 1.880664286, + 1.952864286, + 1.991764286, + 1.994764286, + 2.085764286, + 2.105764286, + 2.155064286, + 2.227464286, + 2.249964286, + 2.313664286, + 2.341464286, + 2.394064286, + 2.457364286, + 2.484664286, + 2.549564286, + 2.605964286, + 2.656864286, + 2.707364286, + 2.742964286, + 2.789764286, + 2.847664286, + 2.903564286, + 2.925064286, + 2.962864286, + 3.002664286, + 3.069264286, + 3.133364286, + 3.174764286, + 3.217764286, + 3.256564286, + 3.306864286, + 3.375464286, + 3.420264286, + 3.476464286, + 3.493864286, + 3.552964286, + 3.592364286, + 3.630664286, + 3.672464286, + 3.734364286, + 3.789764286, + 3.838164286, + 3.882264286, + 3.936064286, + 3.984064286, + 4.055764286, + 4.098964286, + 4.122364286, + 4.172064286, + 4.225264286, + 4.275064286, + 4.339064286, + 4.375864286, + 4.408064286, + 4.477764286, + ], + ] + ) gmst_info = { - 'rcps' : ['2.6', '4.5', '6.0', '8.5'], - 'gmst_start_year' : 1880, - 'gmst_end_year' : 2100, - 'gmst_data' : gmst_data + "rcps": ["2.6", "4.5", "6.0", "8.5"], + "gmst_start_year": 1880, + "gmst_end_year": 2100, + "gmst_data": gmst_data, } return gmst_info + def get_knutson_data(): """ Retrieve projections data in Knutson et al., (2020): @@ -356,34 +1149,41 @@ def get_knutson_data(): # The knutson_data array has dimension: # 4 (tropical cyclones variables) x 6 (tropical cyclone regions) x 5 (percentiles) - knutson_data = np.array([[ - [-34.49,-24.875,-14.444,3.019,28.737], - [-30.444,-20,-10.27,0.377,17.252], - [-32.075,-18.491,-3.774,11.606,36.682], - [-35.094,-15.115,-4.465,5.785,29.405], - [-32.778,-22.522,-17.297,-8.995,7.241], - [-40.417,-26.321,-18.113,-8.21,4.689]], - [ - [-38.038,-22.264,11.321,38.302,81.874], - [-25.811,-14.34,-4.75,16.146,41.979], - [-24.83,-6.792,22.642,57.297,104.315], - [-30.566,-16.415,5.283,38.491,79.119], - [-23.229,-13.611,4.528,26.645,63.514], - [-42.453,-29.434,-14.467,-0.541,19.061]], - [ - [0.543,1.547,2.943,4.734,6.821], - [1.939,3.205,5.328,6.549,9.306], - [-2.217,0.602,5.472,9.191,10.368], - [-0.973,1.944,4.324,6.15,7.808], - [1.605,3.455,5.405,7.69,10.884], - [-6.318,-0.783,0.938,5.314,12.213]], - [ - [5.848,9.122,15.869,20.352,22.803], - [6.273,12.121,16.486,18.323,23.784], - [6.014,8.108,21.081,29.324,31.838], - [12.703,14.347,17.649,19.182,20.77], - [2.2,11.919,19.73,23.115,26.243], - [-1.299,5.137,7.297,11.091,15.419] - ]]) - - return knutson_data \ No newline at end of file + knutson_data = np.array( + [ + [ + [-34.49, -24.875, -14.444, 3.019, 28.737], + [-30.444, -20, -10.27, 0.377, 17.252], + [-32.075, -18.491, -3.774, 11.606, 36.682], + [-35.094, -15.115, -4.465, 5.785, 29.405], + [-32.778, -22.522, -17.297, -8.995, 7.241], + [-40.417, -26.321, -18.113, -8.21, 4.689], + ], + [ + [-38.038, -22.264, 11.321, 38.302, 81.874], + [-25.811, -14.34, -4.75, 16.146, 41.979], + [-24.83, -6.792, 22.642, 57.297, 104.315], + [-30.566, -16.415, 5.283, 38.491, 79.119], + [-23.229, -13.611, 4.528, 26.645, 63.514], + [-42.453, -29.434, -14.467, -0.541, 19.061], + ], + [ + [0.543, 1.547, 2.943, 4.734, 6.821], + [1.939, 3.205, 5.328, 6.549, 9.306], + [-2.217, 0.602, 5.472, 9.191, 10.368], + [-0.973, 1.944, 4.324, 6.15, 7.808], + [1.605, 3.455, 5.405, 7.69, 10.884], + [-6.318, -0.783, 0.938, 5.314, 12.213], + ], + [ + [5.848, 9.122, 15.869, 20.352, 22.803], + [6.273, 12.121, 16.486, 18.323, 23.784], + [6.014, 8.108, 21.081, 29.324, 31.838], + [12.703, 14.347, 17.649, 19.182, 20.77], + [2.2, 11.919, 19.73, 23.115, 26.243], + [-1.299, 5.137, 7.297, 11.091, 15.419], + ], + ] + ) + + return knutson_data diff --git a/climada/hazard/tc_tracks.py b/climada/hazard/tc_tracks.py index 519f93627..3f2fb85b8 100644 --- a/climada/hazard/tc_tracks.py +++ b/climada/hazard/tc_tracks.py @@ -19,47 +19,48 @@ Define TCTracks: IBTracs reader and tracks manager. """ -__all__ = ['CAT_NAMES', 'SAFFIR_SIM_CAT', 'TCTracks', 'set_category'] +__all__ = ["CAT_NAMES", "SAFFIR_SIM_CAT", "TCTracks", "set_category"] # standard libraries import datetime as dt import itertools import logging -from typing import Optional, List import re import shutil import warnings from pathlib import Path +from typing import List, Optional # additional libraries import cartopy.crs as ccrs import cftime import geopandas as gpd -import pathos import matplotlib.cm as cm_mp -from matplotlib.collections import LineCollection -from matplotlib.colors import BoundaryNorm, ListedColormap -from matplotlib.lines import Line2D import matplotlib.pyplot as plt import netCDF4 as nc import numba import numpy as np import pandas as pd +import pathos import scipy.io.matlab as matlab -from shapely.geometry import Point, LineString, MultiLineString import shapely.ops -from sklearn.metrics import DistanceMetric import statsmodels.api as sm import xarray as xr +from matplotlib.collections import LineCollection +from matplotlib.colors import BoundaryNorm, ListedColormap +from matplotlib.lines import Line2D +from shapely.geometry import LineString, MultiLineString, Point +from sklearn.metrics import DistanceMetric -# climada dependencies -from climada.util import ureg +import climada.hazard.tc_tracks_synth import climada.util.coordinates as u_coord -from climada.util.constants import EARTH_RADIUS_KM, SYSTEM_DIR, DEF_CRS -from climada.util.files_handler import get_file_names, download_ftp import climada.util.plot as u_plot from climada.hazard import Centroids -import climada.hazard.tc_tracks_synth + +# climada dependencies +from climada.util import ureg +from climada.util.constants import DEF_CRS, EARTH_RADIUS_KM, SYSTEM_DIR +from climada.util.files_handler import download_ftp, get_file_names LOGGER = logging.getLogger(__name__) @@ -67,37 +68,61 @@ """Saffir-Simpson Hurricane Wind Scale in kn based on NOAA""" CAT_NAMES = { - -1: 'Tropical Depression', - 0: 'Tropical Storm', - 1: 'Hurricane Cat. 1', - 2: 'Hurricane Cat. 2', - 3: 'Hurricane Cat. 3', - 4: 'Hurricane Cat. 4', - 5: 'Hurricane Cat. 5', + -1: "Tropical Depression", + 0: "Tropical Storm", + 1: "Hurricane Cat. 1", + 2: "Hurricane Cat. 2", + 3: "Hurricane Cat. 3", + 4: "Hurricane Cat. 4", + 5: "Hurricane Cat. 5", } """Saffir-Simpson category names.""" CAT_COLORS = cm_mp.rainbow(np.linspace(0, 1, len(SAFFIR_SIM_CAT))) """Color scale to plot the Saffir-Simpson scale.""" -IBTRACS_URL = ('https://www.ncei.noaa.gov/data/' - 'international-best-track-archive-for-climate-stewardship-ibtracs/' - 'v04r00/access/netcdf') +IBTRACS_URL = ( + "https://www.ncei.noaa.gov/data/" + "international-best-track-archive-for-climate-stewardship-ibtracs/" + "v04r00/access/netcdf" +) """Site of IBTrACS netcdf file containing all tracks v4.0, s. https://www.ncdc.noaa.gov/ibtracs/index.php?name=ib-v4-access""" -IBTRACS_FILE = 'IBTrACS.ALL.v04r00.nc' +IBTRACS_FILE = "IBTrACS.ALL.v04r00.nc" """IBTrACS v4.0 file all""" IBTRACS_AGENCIES = [ - 'usa', 'tokyo', 'newdelhi', 'reunion', 'bom', 'nadi', 'wellington', - 'cma', 'hko', 'ds824', 'td9636', 'td9635', 'neumann', 'mlc', + "usa", + "tokyo", + "newdelhi", + "reunion", + "bom", + "nadi", + "wellington", + "cma", + "hko", + "ds824", + "td9636", + "td9635", + "neumann", + "mlc", ] """Names/IDs of agencies in IBTrACS v4.0""" IBTRACS_USA_AGENCIES = [ - 'atcf', 'cphc', 'hurdat_atl', 'hurdat_epa', 'jtwc_cp', 'jtwc_ep', 'jtwc_io', - 'jtwc_sh', 'jtwc_wp', 'nhc_working_bt', 'tcvightals', 'tcvitals' + "atcf", + "cphc", + "hurdat_atl", + "hurdat_epa", + "jtwc_cp", + "jtwc_ep", + "jtwc_io", + "jtwc_sh", + "jtwc_wp", + "nhc_working_bt", + "tcvightals", + "tcvitals", ] """Names/IDs of agencies in IBTrACS that correspond to 'usa_*' variables""" @@ -110,13 +135,13 @@ "bom": [0.88, 0.0], "nadi": [0.88, 0.0], "wellington": [0.88, 0.0], - 'cma': [0.871, 0.0], - 'hko': [0.9, 0.0], - 'ds824': [1.0, 0.0], - 'td9636': [1.0, 0.0], - 'td9635': [1.0, 0.0], - 'neumann': [0.88, 0.0], - 'mlc': [1.0, 0.0], + "cma": [0.871, 0.0], + "hko": [0.9, 0.0], + "ds824": [1.0, 0.0], + "td9636": [1.0, 0.0], + "td9635": [1.0, 0.0], + "neumann": [0.88, 0.0], + "mlc": [1.0, 0.0], } """Scale and shift used by agencies to convert their internal Dvorak 1-minute sustained winds to the officially reported values that are in IBTrACS. From Table 1 in: @@ -129,20 +154,30 @@ """Default environmental pressure""" BASIN_ENV_PRESSURE = { - '': DEF_ENV_PRESSURE, - 'EP': 1010, 'NA': 1010, 'SA': 1010, - 'NI': 1005, 'SI': 1005, 'WP': 1005, - 'SP': 1004, + "": DEF_ENV_PRESSURE, + "EP": 1010, + "NA": 1010, + "SA": 1010, + "NI": 1005, + "SI": 1005, + "WP": 1005, + "SP": 1004, } """Basin-specific default environmental pressure""" EMANUEL_RMW_CORR_FILES = [ - 'temp_ccsm420thcal.mat', 'temp_ccsm4rcp85_full.mat', - 'temp_gfdl520thcal.mat', 'temp_gfdl5rcp85cal_full.mat', - 'temp_hadgem20thcal.mat', 'temp_hadgemrcp85cal_full.mat', - 'temp_miroc20thcal.mat', 'temp_mirocrcp85cal_full.mat', - 'temp_mpi20thcal.mat', 'temp_mpircp85cal_full.mat', - 'temp_mri20thcal.mat', 'temp_mrircp85cal_full.mat', + "temp_ccsm420thcal.mat", + "temp_ccsm4rcp85_full.mat", + "temp_gfdl520thcal.mat", + "temp_gfdl5rcp85cal_full.mat", + "temp_hadgem20thcal.mat", + "temp_hadgemrcp85cal_full.mat", + "temp_miroc20thcal.mat", + "temp_mirocrcp85cal_full.mat", + "temp_mpi20thcal.mat", + "temp_mpircp85cal_full.mat", + "temp_mri20thcal.mat", + "temp_mrircp85cal_full.mat", ] EMANUEL_RMW_CORR_FACTOR = 2.0 """Kerry Emanuel track files in this list require a correction: The radius of @@ -155,7 +190,8 @@ Bloemendaal et al. (2020): Generation of a global synthetic tropical cyclone hazard dataset using STORM. Scientific Data 7(1): 40.""" -class TCTracks(): + +class TCTracks: """Contains tropical cyclone tracks. Attributes @@ -187,9 +223,12 @@ class TCTracks(): system is a disturbance, tropical storm, post-transition extratropical storm etc.) might be included, depending on the data source and on use cases. """ - def __init__(self, - data: Optional[List[xr.Dataset]] = None, - pool: Optional[pathos.multiprocessing.ProcessPool] = None): + + def __init__( + self, + data: Optional[List[xr.Dataset]] = None, + pool: Optional[pathos.multiprocessing.ProcessPool] = None, + ): """Create new (empty) TCTracks instance. Parameters @@ -204,7 +243,7 @@ def __init__(self, self.data = data if data is not None else list() self.pool = pool if pool: - LOGGER.debug('Using %s CPUs.', self.pool.ncpus) + LOGGER.debug("Using %s CPUs.", self.pool.ncpus) def append(self, tracks): """Append tracks to current. @@ -242,12 +281,12 @@ def get_track(self, track_name=None): return self.data for track in self.data: - if track.attrs['name'] == track_name: + if track.attrs["name"] == track_name: return track - if hasattr(track, 'sid') and track.sid == track_name: + if hasattr(track, "sid") and track.sid == track_name: return track - LOGGER.info('No track with name or sid %s found.', track_name) + LOGGER.info("No track with name or sid %s found.", track_name) return [] def subset(self, filterdict): @@ -317,16 +356,28 @@ def tracks_in_exp(self, exposure, buffer=1.0): def read_ibtracs_netcdf(self, *args, **kwargs): """This function is deprecated, use TCTracks.from_ibtracs_netcdf instead.""" - LOGGER.warning("The use of TCTracks.read_ibtracs_netcdf is deprecated. " - "Use TCTracks.from_ibtracs_netcdf instead.") + LOGGER.warning( + "The use of TCTracks.read_ibtracs_netcdf is deprecated. " + "Use TCTracks.from_ibtracs_netcdf instead." + ) self.__dict__ = TCTracks.from_ibtracs_netcdf(*args, **kwargs).__dict__ @classmethod - def from_ibtracs_netcdf(cls, provider=None, rescale_windspeeds=True, storm_id=None, - year_range=None, basin=None, genesis_basin=None, - interpolate_missing=True, estimate_missing=False, correct_pres=False, - discard_single_points=True, additional_variables=None, - file_name='IBTrACS.ALL.v04r00.nc'): + def from_ibtracs_netcdf( + cls, + provider=None, + rescale_windspeeds=True, + storm_id=None, + year_range=None, + basin=None, + genesis_basin=None, + interpolate_missing=True, + estimate_missing=False, + correct_pres=False, + discard_single_points=True, + additional_variables=None, + file_name="IBTrACS.ALL.v04r00.nc", + ): """Create new TCTracks object from IBTrACS databse. When using data from IBTrACS, make sure to be familiar with the scope and limitations of @@ -446,125 +497,165 @@ def from_ibtracs_netcdf(cls, provider=None, rescale_windspeeds=True, storm_id=No TCTracks with data from IBTrACS """ if correct_pres: - LOGGER.warning("`correct_pres` is deprecated. " - "Use `estimate_missing` instead.") + LOGGER.warning( + "`correct_pres` is deprecated. " "Use `estimate_missing` instead." + ) estimate_missing = True if estimate_missing and not rescale_windspeeds: LOGGER.warning( - "Using `estimate_missing` without `rescale_windspeeds` is strongly discouraged!") + "Using `estimate_missing` without `rescale_windspeeds` is strongly discouraged!" + ) ibtracs_path = SYSTEM_DIR.joinpath(file_name) if not ibtracs_path.is_file(): try: - download_ftp(f'{IBTRACS_URL}/{IBTRACS_FILE}', IBTRACS_FILE) + download_ftp(f"{IBTRACS_URL}/{IBTRACS_FILE}", IBTRACS_FILE) shutil.move(IBTRACS_FILE, ibtracs_path) except ValueError as err: raise ValueError( - f'Error while downloading {IBTRACS_URL}. Try to download it manually and ' - f'put the file in {ibtracs_path}') from err + f"Error while downloading {IBTRACS_URL}. Try to download it manually and " + f"put the file in {ibtracs_path}" + ) from err if additional_variables is None: additional_variables = [] with xr.open_dataset(ibtracs_path) as ibtracs_ds: ibtracs_date = ibtracs_ds.attrs["date_created"] - if (np.datetime64('today') - np.datetime64(ibtracs_date)).item().days > 180: - LOGGER.warning("The cached IBTrACS data set dates from %s (older " - "than 180 days). Very likely, a more recent version is available. " - "Consider manually removing the file %s and re-running " - "this function, which will download the most recent version of the " - "IBTrACS data set from the official URL.", ibtracs_date, ibtracs_path) + if (np.datetime64("today") - np.datetime64(ibtracs_date)).item().days > 180: + LOGGER.warning( + "The cached IBTrACS data set dates from %s (older " + "than 180 days). Very likely, a more recent version is available. " + "Consider manually removing the file %s and re-running " + "this function, which will download the most recent version of the " + "IBTrACS data set from the official URL.", + ibtracs_date, + ibtracs_path, + ) match = np.ones(ibtracs_ds.sid.shape[0], dtype=bool) if storm_id is not None: if not isinstance(storm_id, list): storm_id = [storm_id] invalid_mask = np.array( - [re.match(r"[12][0-9]{6}[NS][0-9]{5}", s) is None for s in storm_id]) + [re.match(r"[12][0-9]{6}[NS][0-9]{5}", s) is None for s in storm_id] + ) if invalid_mask.any(): invalid_sids = list(np.array(storm_id)[invalid_mask]) - raise ValueError("The following given IDs are invalid: %s%s" % ( - ", ".join(invalid_sids[:5]), - ", ..." if len(invalid_sids) > 5 else ".")) + raise ValueError( + "The following given IDs are invalid: %s%s" + % ( + ", ".join(invalid_sids[:5]), + ", ..." if len(invalid_sids) > 5 else ".", + ) + ) storm_id = list(np.array(storm_id)[~invalid_mask]) storm_id_encoded = [i.encode() for i in storm_id] non_existing_mask = ~np.isin(storm_id_encoded, ibtracs_ds.sid.values) if np.count_nonzero(non_existing_mask) > 0: non_existing_sids = list(np.array(storm_id)[non_existing_mask]) - raise ValueError("The following given IDs are not in IBTrACS: %s%s" % ( - ", ".join(non_existing_sids[:5]), - ", ..." if len(non_existing_sids) > 5 else ".")) - storm_id_encoded = list(np.array(storm_id_encoded)[~non_existing_mask]) + raise ValueError( + "The following given IDs are not in IBTrACS: %s%s" + % ( + ", ".join(non_existing_sids[:5]), + ", ..." if len(non_existing_sids) > 5 else ".", + ) + ) + storm_id_encoded = list( + np.array(storm_id_encoded)[~non_existing_mask] + ) match &= ibtracs_ds.sid.isin(storm_id_encoded) if year_range is not None: years = ibtracs_ds.sid.str.slice(0, 4).astype(int) match &= (years >= year_range[0]) & (years <= year_range[1]) if np.count_nonzero(match) == 0: - LOGGER.info('No tracks in time range (%s, %s).', *year_range) + LOGGER.info("No tracks in time range (%s, %s).", *year_range) if basin is not None: - match &= (ibtracs_ds.basin == basin.encode()).any(dim='date_time') + match &= (ibtracs_ds.basin == basin.encode()).any(dim="date_time") if np.count_nonzero(match) == 0: - LOGGER.info('No tracks in basin %s.', basin) + LOGGER.info("No tracks in basin %s.", basin) if genesis_basin is not None: # Here, we only filter for the basin at *any* eye position. We will filter again later # for the basin of the *first* eye position, but only after restricting to the valid # time steps in the data. - match &= (ibtracs_ds.basin == genesis_basin.encode()).any(dim='date_time') + match &= (ibtracs_ds.basin == genesis_basin.encode()).any( + dim="date_time" + ) if np.count_nonzero(match) == 0: - LOGGER.info('No tracks in genesis basin %s.', genesis_basin) + LOGGER.info("No tracks in genesis basin %s.", genesis_basin) if np.count_nonzero(match) == 0: - LOGGER.info("IBTrACS doesn't contain any tracks matching the specified requirements.") + LOGGER.info( + "IBTrACS doesn't contain any tracks matching the specified requirements." + ) return cls() ibtracs_ds = ibtracs_ds.sel(storm=match) - ibtracs_ds['valid_t'] = ibtracs_ds['time'].notnull() + ibtracs_ds["valid_t"] = ibtracs_ds["time"].notnull() if rescale_windspeeds: for agency in IBTRACS_AGENCIES: scale, shift = IBTRACS_AGENCY_1MIN_WIND_FACTOR[agency] - ibtracs_ds[f'{agency}_wind'] -= shift - ibtracs_ds[f'{agency}_wind'] /= scale + ibtracs_ds[f"{agency}_wind"] -= shift + ibtracs_ds[f"{agency}_wind"] /= scale if provider is None: provider = ["official_3h"] + IBTRACS_AGENCIES elif isinstance(provider, str): provider = [provider] - phys_vars = ['lat', 'lon', 'wind', 'pres', 'rmw', 'poci', 'roci'] + phys_vars = ["lat", "lon", "wind", "pres", "rmw", "poci", "roci"] for tc_var in phys_vars: if "official" in provider or "official_3h" in provider: ibtracs_add_official_variable( - ibtracs_ds, tc_var, add_3h=("official_3h" in provider)) + ibtracs_ds, tc_var, add_3h=("official_3h" in provider) + ) # set up dimension of agency-reported values in order of preference, including the # newly created `official` and `official_3h` data if specified - ag_vars = [f'{ag}_{tc_var}' for ag in provider] - ag_vars = [ag_var for ag_var in ag_vars if ag_var in ibtracs_ds.data_vars.keys()] + ag_vars = [f"{ag}_{tc_var}" for ag in provider] + ag_vars = [ + ag_var + for ag_var in ag_vars + if ag_var in ibtracs_ds.data_vars.keys() + ] if len(ag_vars) == 0: - ag_vars = [f'{provider[0]}_{tc_var}'] - ibtracs_ds[ag_vars[0]] = xr.full_like(ibtracs_ds[f'usa_{tc_var}'], np.nan) - all_vals = ibtracs_ds[ag_vars].to_array(dim='agency') + ag_vars = [f"{provider[0]}_{tc_var}"] + ibtracs_ds[ag_vars[0]] = xr.full_like( + ibtracs_ds[f"usa_{tc_var}"], np.nan + ) + all_vals = ibtracs_ds[ag_vars].to_array(dim="agency") # argmax returns the first True (i.e. valid) along the 'agency' dimension - preferred_idx = all_vals.notnull().any(dim="date_time").argmax(dim='agency') + preferred_idx = ( + all_vals.notnull().any(dim="date_time").argmax(dim="agency") + ) ibtracs_ds[tc_var] = all_vals.isel(agency=preferred_idx) - selected_ags = np.array([v[:-len(f'_{tc_var}')].encode() for v in ag_vars]) - ibtracs_ds[f'{tc_var}_agency'] = ('storm', selected_ags[preferred_idx.values]) + selected_ags = np.array( + [v[: -len(f"_{tc_var}")].encode() for v in ag_vars] + ) + ibtracs_ds[f"{tc_var}_agency"] = ( + "storm", + selected_ags[preferred_idx.values], + ) - if tc_var == 'lon': + if tc_var == "lon": # Most IBTrACS longitudes are either normalized to [-180, 180] or to [0, 360], but # some aren't normalized at all, so we have to make sure that the values are okay: lons = ibtracs_ds[tc_var].values.copy() lon_valid_mask = np.isfinite(lons) - lons[lon_valid_mask] = u_coord.lon_normalize(lons[lon_valid_mask], center=0.0) + lons[lon_valid_mask] = u_coord.lon_normalize( + lons[lon_valid_mask], center=0.0 + ) ibtracs_ds[tc_var].values[:] = lons # Make sure that the longitude is always chosen positive if a track crosses the # antimeridian: - crossing_mask = ((ibtracs_ds[tc_var] > 170).any(dim="date_time") - & (ibtracs_ds[tc_var] < -170).any(dim="date_time") - & (ibtracs_ds[tc_var] < 0)).values + crossing_mask = ( + (ibtracs_ds[tc_var] > 170).any(dim="date_time") + & (ibtracs_ds[tc_var] < -170).any(dim="date_time") + & (ibtracs_ds[tc_var] < 0) + ).values ibtracs_ds[tc_var].values[crossing_mask] += 360 if interpolate_missing: @@ -574,68 +665,103 @@ def from_ibtracs_netcdf(cls, provider=None, rescale_windspeeds=True, storm_id=No # don't interpolate if there is only a single record for this variable nonsingular_mask = ( - ibtracs_ds[tc_var].notnull().sum(dim="date_time") > 1).values + ibtracs_ds[tc_var].notnull().sum(dim="date_time") > 1 + ).values if nonsingular_mask.sum() > 0: ibtracs_ds[tc_var].values[nonsingular_mask] = ( - ibtracs_ds[tc_var].sel(storm=nonsingular_mask).interpolate_na( - dim="date_time", method="linear")) - ibtracs_ds = ibtracs_ds[['sid', 'name', 'basin', 'time', 'valid_t'] - + additional_variables + phys_vars - + [f'{v}_agency' for v in phys_vars]] + ibtracs_ds[tc_var] + .sel(storm=nonsingular_mask) + .interpolate_na(dim="date_time", method="linear") + ) + ibtracs_ds = ibtracs_ds[ + ["sid", "name", "basin", "time", "valid_t"] + + additional_variables + + phys_vars + + [f"{v}_agency" for v in phys_vars] + ] if estimate_missing: - ibtracs_ds['pres'][:] = _estimate_pressure( - ibtracs_ds['pres'], ibtracs_ds['lat'], ibtracs_ds['lon'], ibtracs_ds['wind']) - ibtracs_ds['wind'][:] = _estimate_vmax( - ibtracs_ds['wind'], ibtracs_ds['lat'], ibtracs_ds['lon'], ibtracs_ds['pres']) - - ibtracs_ds['valid_t'] &= (ibtracs_ds['lat'].notnull() & ibtracs_ds['lon'].notnull() - & ibtracs_ds['wind'].notnull() & ibtracs_ds['pres'].notnull()) - valid_storms_mask = ibtracs_ds['valid_t'].any(dim="date_time") + ibtracs_ds["pres"][:] = _estimate_pressure( + ibtracs_ds["pres"], + ibtracs_ds["lat"], + ibtracs_ds["lon"], + ibtracs_ds["wind"], + ) + ibtracs_ds["wind"][:] = _estimate_vmax( + ibtracs_ds["wind"], + ibtracs_ds["lat"], + ibtracs_ds["lon"], + ibtracs_ds["pres"], + ) + + ibtracs_ds["valid_t"] &= ( + ibtracs_ds["lat"].notnull() + & ibtracs_ds["lon"].notnull() + & ibtracs_ds["wind"].notnull() + & ibtracs_ds["pres"].notnull() + ) + valid_storms_mask = ibtracs_ds["valid_t"].any(dim="date_time") invalid_storms_idx = np.nonzero(~valid_storms_mask.data)[0] if invalid_storms_idx.size > 0: - invalid_sids = list(ibtracs_ds.sid.sel(storm=invalid_storms_idx).astype(str).data) - LOGGER.warning('%d storm events are discarded because no valid wind/pressure values ' - 'have been found: %s%s', len(invalid_sids), ", ".join(invalid_sids[:5]), - ", ..." if len(invalid_sids) > 5 else ".") + invalid_sids = list( + ibtracs_ds.sid.sel(storm=invalid_storms_idx).astype(str).data + ) + LOGGER.warning( + "%d storm events are discarded because no valid wind/pressure values " + "have been found: %s%s", + len(invalid_sids), + ", ".join(invalid_sids[:5]), + ", ..." if len(invalid_sids) > 5 else ".", + ) ibtracs_ds = ibtracs_ds.sel(storm=valid_storms_mask) if discard_single_points: - valid_storms_mask = ibtracs_ds['valid_t'].sum(dim="date_time") > 1 + valid_storms_mask = ibtracs_ds["valid_t"].sum(dim="date_time") > 1 invalid_storms_idx = np.nonzero(~valid_storms_mask.data)[0] if invalid_storms_idx.size > 0: - invalid_sids = list(ibtracs_ds.sid.sel(storm=invalid_storms_idx).astype(str).data) - LOGGER.warning('%d storm events are discarded because only one valid timestep ' - 'has been found: %s%s', len(invalid_sids), - ", ".join(invalid_sids[:5]), - ", ..." if len(invalid_sids) > 5 else ".") + invalid_sids = list( + ibtracs_ds.sid.sel(storm=invalid_storms_idx).astype(str).data + ) + LOGGER.warning( + "%d storm events are discarded because only one valid timestep " + "has been found: %s%s", + len(invalid_sids), + ", ".join(invalid_sids[:5]), + ", ..." if len(invalid_sids) > 5 else ".", + ) ibtracs_ds = ibtracs_ds.sel(storm=valid_storms_mask) - if ibtracs_ds.dims['storm'] == 0: - LOGGER.info('After discarding IBTrACS events without valid values by the selected ' - 'reporting agencies, there are no tracks left that match the specified ' - 'requirements.') + if ibtracs_ds.dims["storm"] == 0: + LOGGER.info( + "After discarding IBTrACS events without valid values by the selected " + "reporting agencies, there are no tracks left that match the specified " + "requirements." + ) return cls() - max_wind = ibtracs_ds['wind'].max(dim="date_time").data.ravel() - category_test = (max_wind[:, None] < np.array(SAFFIR_SIM_CAT)[None]) + max_wind = ibtracs_ds["wind"].max(dim="date_time").data.ravel() + category_test = max_wind[:, None] < np.array(SAFFIR_SIM_CAT)[None] category = np.argmax(category_test, axis=1) - 1 basin_map = {b.encode("utf-8"): v for b, v in BASIN_ENV_PRESSURE.items()} basin_fun = lambda b: basin_map[b] - ibtracs_ds['id_no'] = (ibtracs_ds.sid.str.replace(b'N', b'0') - .str.replace(b'S', b'1') - .astype(float)) + ibtracs_ds["id_no"] = ( + ibtracs_ds.sid.str.replace(b"N", b"0") + .str.replace(b"S", b"1") + .astype(float) + ) last_perc = 0 all_tracks = [] - for i_track, t_msk in enumerate(ibtracs_ds['valid_t'].data): + for i_track, t_msk in enumerate(ibtracs_ds["valid_t"].data): perc = 100 * len(all_tracks) / ibtracs_ds.sid.size if perc - last_perc >= 10: LOGGER.info("Progress: %d%%", perc) last_perc = perc track_ds = ibtracs_ds.sel(storm=i_track, date_time=t_msk) - tr_basin_penv = xr.apply_ufunc(basin_fun, track_ds.basin, vectorize=True) + tr_basin_penv = xr.apply_ufunc( + basin_fun, track_ds.basin, vectorize=True + ) tr_genesis_basin = track_ds.basin.values[0].astype(str).item() # Now that the valid time steps have been selected, we discard this track if it @@ -647,71 +773,90 @@ def from_ibtracs_netcdf(cls, provider=None, rescale_windspeeds=True, storm_id=No # A track that crosses the antimeridian in IBTrACS might be truncated by `t_msk` in # such a way that the remaining part is not crossing the antimeridian: - if (track_ds['lon'].values > 180).all(): - track_ds['lon'] -= 360 + if (track_ds["lon"].values > 180).all(): + track_ds["lon"] -= 360 # set time_step in hours - track_ds['time_step'] = xr.ones_like(track_ds['time'], dtype=float) - if track_ds['time'].size > 1: - track_ds['time_step'].values[1:] = (track_ds['time'].diff(dim="date_time") - / np.timedelta64(1, 'h')) - track_ds['time_step'].values[0] = track_ds['time_step'][1] + track_ds["time_step"] = xr.ones_like(track_ds["time"], dtype=float) + if track_ds["time"].size > 1: + track_ds["time_step"].values[1:] = track_ds["time"].diff( + dim="date_time" + ) / np.timedelta64(1, "h") + track_ds["time_step"].values[0] = track_ds["time_step"][1] with warnings.catch_warnings(): # See https://github.com/pydata/xarray/issues/4167 warnings.simplefilter(action="ignore", category=FutureWarning) - track_ds['rmw'] = track_ds['rmw'] \ - .ffill(dim='date_time', limit=1) \ - .bfill(dim='date_time', limit=1) \ + track_ds["rmw"] = ( + track_ds["rmw"] + .ffill(dim="date_time", limit=1) + .bfill(dim="date_time", limit=1) .fillna(0) - track_ds['roci'] = track_ds['roci'] \ - .ffill(dim='date_time', limit=1) \ - .bfill(dim='date_time', limit=1) \ + ) + track_ds["roci"] = ( + track_ds["roci"] + .ffill(dim="date_time", limit=1) + .bfill(dim="date_time", limit=1) .fillna(0) - track_ds['poci'] = track_ds['poci'] \ - .ffill(dim='date_time', limit=4) \ - .bfill(dim='date_time', limit=4) + ) + track_ds["poci"] = ( + track_ds["poci"] + .ffill(dim="date_time", limit=4) + .bfill(dim="date_time", limit=4) + ) # this is the most time consuming line in the processing: - track_ds['poci'] = track_ds['poci'].fillna(tr_basin_penv) + track_ds["poci"] = track_ds["poci"].fillna(tr_basin_penv) if estimate_missing: - track_ds['rmw'][:] = estimate_rmw(track_ds['rmw'].values, track_ds['pres'].values) - track_ds['roci'][:] = estimate_roci(track_ds['roci'].values, - track_ds['pres'].values) - track_ds['roci'][:] = np.fmax(track_ds['rmw'].values, track_ds['roci'].values) + track_ds["rmw"][:] = estimate_rmw( + track_ds["rmw"].values, track_ds["pres"].values + ) + track_ds["roci"][:] = estimate_roci( + track_ds["roci"].values, track_ds["pres"].values + ) + track_ds["roci"][:] = np.fmax( + track_ds["rmw"].values, track_ds["roci"].values + ) # ensure environmental pressure >= central pressure # this is the second most time consuming line in the processing: - track_ds['poci'][:] = np.fmax(track_ds['poci'], track_ds['pres']) + track_ds["poci"][:] = np.fmax(track_ds["poci"], track_ds["pres"]) provider_str = f"ibtracs_{provider[0]}" if len(provider) > 1: provider_str = "ibtracs_mixed:" + ",".join( - "{}({})".format(v, track_ds[f'{v}_agency'].astype(str).item()) - for v in phys_vars) + "{}({})".format(v, track_ds[f"{v}_agency"].astype(str).item()) + for v in phys_vars + ) data_vars = { - 'radius_max_wind': ('time', track_ds['rmw'].data), - 'radius_oci': ('time', track_ds['roci'].data), - 'max_sustained_wind': ('time', track_ds['wind'].data), - 'central_pressure': ('time', track_ds['pres'].data), - 'environmental_pressure': ('time', track_ds['poci'].data), + "radius_max_wind": ("time", track_ds["rmw"].data), + "radius_oci": ("time", track_ds["roci"].data), + "max_sustained_wind": ("time", track_ds["wind"].data), + "central_pressure": ("time", track_ds["pres"].data), + "environmental_pressure": ("time", track_ds["poci"].data), } coords = { - 'time': ('time', track_ds['time'].dt.round('s').data), - 'lat': ('time', track_ds['lat'].data), - 'lon': ('time', track_ds['lon'].data), + "time": ("time", track_ds["time"].dt.round("s").data), + "lat": ("time", track_ds["lat"].data), + "lon": ("time", track_ds["lon"].data), } attrs = { - 'max_sustained_wind_unit': 'kn', - 'central_pressure_unit': 'mb', - 'orig_event_flag': True, - 'data_provider': provider_str, - 'category': category[i_track], + "max_sustained_wind_unit": "kn", + "central_pressure_unit": "mb", + "orig_event_flag": True, + "data_provider": provider_str, + "category": category[i_track], } # automatically assign the remaining variables as attributes or data variables - for varname in ["time_step", "basin", "name", "sid", "id_no"] + additional_variables: + for varname in [ + "time_step", + "basin", + "name", + "sid", + "id_no", + ] + additional_variables: values = track_ds[varname].data if track_ds[varname].dtype.kind == "S": # This converts the `bytes` (dtype "|S*") in IBTrACS to the more common `str` @@ -720,20 +865,24 @@ def from_ibtracs_netcdf(cls, provider=None, rescale_windspeeds=True, storm_id=No if values.ndim == 0: attrs[varname] = values.item() else: - data_vars[varname] = ('time', values) + data_vars[varname] = ("time", values) all_tracks.append(xr.Dataset(data_vars, coords=coords, attrs=attrs)) if last_perc != 100: LOGGER.info("Progress: 100%") if len(all_tracks) == 0: # If all tracks have been discarded in the loop due to the basin filters: - LOGGER.info('There were no tracks left in the specified basin ' - 'after discarding invalid track positions.') + LOGGER.info( + "There were no tracks left in the specified basin " + "after discarding invalid track positions." + ) return cls(all_tracks) def read_processed_ibtracs_csv(self, *args, **kwargs): """This function is deprecated, use TCTracks.from_processed_ibtracs_csv instead.""" - LOGGER.warning("The use of TCTracks.read_processed_ibtracs_csv is deprecated. " - "Use TCTracks.from_processed_ibtracs_csv instead.") + LOGGER.warning( + "The use of TCTracks.read_processed_ibtracs_csv is deprecated. " + "Use TCTracks.from_processed_ibtracs_csv instead." + ) self.__dict__ = TCTracks.from_processed_ibtracs_csv(*args, **kwargs).__dict__ @classmethod @@ -754,8 +903,10 @@ def from_processed_ibtracs_csv(cls, file_names): def read_simulations_emanuel(self, *args, **kwargs): """This function is deprecated, use TCTracks.from_simulations_emanuel instead.""" - LOGGER.warning("The use of TCTracks.read_simulations_emanuel is deprecated. " - "Use TCTracks.from_simulations_emanuel instead.") + LOGGER.warning( + "The use of TCTracks.read_simulations_emanuel is deprecated. " + "Use TCTracks.from_simulations_emanuel instead." + ) self.__dict__ = TCTracks.from_simulations_emanuel(*args, **kwargs).__dict__ @classmethod @@ -781,15 +932,22 @@ def from_simulations_emanuel(cls, file_names, hemisphere=None, subset=None): """ data = [] for path in get_file_names(file_names): - data.extend(_read_file_emanuel( - path, hemisphere=hemisphere, subset=subset, - rmw_corr=Path(path).name in EMANUEL_RMW_CORR_FILES)) + data.extend( + _read_file_emanuel( + path, + hemisphere=hemisphere, + subset=subset, + rmw_corr=Path(path).name in EMANUEL_RMW_CORR_FILES, + ) + ) return cls(data) def read_one_gettelman(self, nc_data, i_track): """This function is deprecated, use TCTracks.from_gettelman instead.""" - LOGGER.warning("The use of TCTracks.read_one_gettelman is deprecated. " - "Use TCTracks.from_gettelman instead.") + LOGGER.warning( + "The use of TCTracks.read_one_gettelman is deprecated. " + "Use TCTracks.from_gettelman instead." + ) self.data.append(_read_one_gettelman(nc_data, i_track)) @classmethod @@ -807,13 +965,15 @@ def from_gettelman(cls, path): TCTracks with data from Andrew Gettelman's simulations. """ nc_data = nc.Dataset(path) - nstorms = nc_data.dimensions['storm'].size + nstorms = nc_data.dimensions["storm"].size return cls([_read_one_gettelman(nc_data, i) for i in range(nstorms)]) def read_simulations_chaz(self, *args, **kwargs): """This function is deprecated, use TCTracks.from_simulations_chaz instead.""" - LOGGER.warning("The use of TCTracks.read_simulations_chaz is deprecated. " - "Use TCTracks.from_simulations_chaz instead.") + LOGGER.warning( + "The use of TCTracks.read_simulations_chaz is deprecated. " + "Use TCTracks.from_simulations_chaz instead." + ) self.__dict__ = TCTracks.from_simulations_chaz(*args, **kwargs).__dict__ @classmethod @@ -839,109 +999,155 @@ def from_simulations_chaz(cls, file_names, year_range=None, ensemble_nums=None): """ data = [] for path in get_file_names(file_names): - LOGGER.info('Reading %s.', path) + LOGGER.info("Reading %s.", path) with xr.open_dataset(path) as chaz_ds: - chaz_ds['time'].attrs["units"] = "days since 1950-1-1" - chaz_ds['time'].attrs["missing_value"] = -54786.0 + chaz_ds["time"].attrs["units"] = "days since 1950-1-1" + chaz_ds["time"].attrs["missing_value"] = -54786.0 chaz_ds = xr.decode_cf(chaz_ds) - chaz_ds['id_no'] = chaz_ds['stormID'] * 1000 + chaz_ds['ensembleNum'] - for var in ['time', 'longitude', 'latitude']: - chaz_ds[var] = chaz_ds[var].expand_dims(ensembleNum=chaz_ds['ensembleNum']) + chaz_ds["id_no"] = chaz_ds["stormID"] * 1000 + chaz_ds["ensembleNum"] + for var in ["time", "longitude", "latitude"]: + chaz_ds[var] = chaz_ds[var].expand_dims( + ensembleNum=chaz_ds["ensembleNum"] + ) chaz_ds = chaz_ds.stack(id=("ensembleNum", "stormID")) - years_uniq = chaz_ds['time'].dt.year.data + years_uniq = chaz_ds["time"].dt.year.data years_uniq = np.unique(years_uniq[~np.isnan(years_uniq)]) - LOGGER.info("File contains %s tracks (at most %s nodes each), " - "representing %s years (%d-%d).", - chaz_ds['id_no'].size, chaz_ds['lifelength'].size, - years_uniq.size, years_uniq[0], years_uniq[-1]) + LOGGER.info( + "File contains %s tracks (at most %s nodes each), " + "representing %s years (%d-%d).", + chaz_ds["id_no"].size, + chaz_ds["lifelength"].size, + years_uniq.size, + years_uniq[0], + years_uniq[-1], + ) # filter by year range if given if year_range: - match = ((chaz_ds['time'].dt.year >= year_range[0]) - & (chaz_ds['time'].dt.year <= year_range[1])).sel(lifelength=0) + match = ( + (chaz_ds["time"].dt.year >= year_range[0]) + & (chaz_ds["time"].dt.year <= year_range[1]) + ).sel(lifelength=0) if np.count_nonzero(match) == 0: - LOGGER.info('No tracks in time range (%s, %s).', *year_range) + LOGGER.info("No tracks in time range (%s, %s).", *year_range) continue chaz_ds = chaz_ds.sel(id=match) # filter by ensembleNum if given if ensemble_nums is not None: - match = np.isin(chaz_ds['ensembleNum'].values, ensemble_nums) + match = np.isin(chaz_ds["ensembleNum"].values, ensemble_nums) if np.count_nonzero(match) == 0: - LOGGER.info('No tracks with specified ensemble numbers.') + LOGGER.info("No tracks with specified ensemble numbers.") continue chaz_ds = chaz_ds.sel(id=match) # remove invalid tracks from selection - chaz_ds['valid_t'] = chaz_ds['time'].notnull() & chaz_ds['Mwspd'].notnull() - valid_st = chaz_ds['valid_t'].any(dim="lifelength") + chaz_ds["valid_t"] = ( + chaz_ds["time"].notnull() & chaz_ds["Mwspd"].notnull() + ) + valid_st = chaz_ds["valid_t"].any(dim="lifelength") invalid_st = np.nonzero(~valid_st.data)[0] if invalid_st.size > 0: - LOGGER.info('No valid Mwspd values found for %d out of %d storm tracks.', - invalid_st.size, valid_st.size) + LOGGER.info( + "No valid Mwspd values found for %d out of %d storm tracks.", + invalid_st.size, + valid_st.size, + ) chaz_ds = chaz_ds.sel(id=valid_st) # estimate central pressure from location and max wind - chaz_ds['pres'] = xr.full_like(chaz_ds['Mwspd'], -1, dtype=float) - chaz_ds['pres'][:] = _estimate_pressure( - chaz_ds['pres'], chaz_ds['latitude'], chaz_ds['longitude'], chaz_ds['Mwspd']) + chaz_ds["pres"] = xr.full_like(chaz_ds["Mwspd"], -1, dtype=float) + chaz_ds["pres"][:] = _estimate_pressure( + chaz_ds["pres"], + chaz_ds["latitude"], + chaz_ds["longitude"], + chaz_ds["Mwspd"], + ) # compute time stepsizes - chaz_ds['time_step'] = xr.zeros_like(chaz_ds['time'], dtype=float) - chaz_ds['time_step'][1:, :] = (chaz_ds['time'].diff(dim="lifelength") - / np.timedelta64(1, 'h')) - chaz_ds['time_step'][0, :] = chaz_ds['time_step'][1, :] + chaz_ds["time_step"] = xr.zeros_like(chaz_ds["time"], dtype=float) + chaz_ds["time_step"][1:, :] = chaz_ds["time"].diff( + dim="lifelength" + ) / np.timedelta64(1, "h") + chaz_ds["time_step"][0, :] = chaz_ds["time_step"][1, :] # determine Saffir-Simpson category - max_wind = chaz_ds['Mwspd'].max(dim="lifelength").data.ravel() - category_test = (max_wind[:, None] < np.array(SAFFIR_SIM_CAT)[None]) - chaz_ds['category'] = ("id", np.argmax(category_test, axis=1) - 1) + max_wind = chaz_ds["Mwspd"].max(dim="lifelength").data.ravel() + category_test = max_wind[:, None] < np.array(SAFFIR_SIM_CAT)[None] + chaz_ds["category"] = ("id", np.argmax(category_test, axis=1) - 1) fname = Path(path).name - chaz_ds['time'][:] = chaz_ds['time'].dt.round('s').data - chaz_ds['radius_max_wind'] = xr.full_like(chaz_ds['pres'], np.nan) - chaz_ds['environmental_pressure'] = xr.full_like(chaz_ds['pres'], DEF_ENV_PRESSURE) - chaz_ds["track_name"] = ("id", [f"{fname}-{track_id.item()[1]}-{track_id.item()[0]}" - for track_id in chaz_ds['id']]) + chaz_ds["time"][:] = chaz_ds["time"].dt.round("s").data + chaz_ds["radius_max_wind"] = xr.full_like(chaz_ds["pres"], np.nan) + chaz_ds["environmental_pressure"] = xr.full_like( + chaz_ds["pres"], DEF_ENV_PRESSURE + ) + chaz_ds["track_name"] = ( + "id", + [ + f"{fname}-{track_id.item()[1]}-{track_id.item()[0]}" + for track_id in chaz_ds["id"] + ], + ) # add tracks one by one last_perc = 0 - for cnt, i_track in enumerate(chaz_ds['id_no']): - perc = 100 * cnt / chaz_ds['id_no'].size + for cnt, i_track in enumerate(chaz_ds["id_no"]): + perc = 100 * cnt / chaz_ds["id_no"].size if perc - last_perc >= 10: LOGGER.info("Progress: %d%%", perc) last_perc = perc - track_ds = chaz_ds.sel(id=i_track['id'].item()) - track_ds = track_ds.sel(lifelength=track_ds['valid_t'].data) - data.append(xr.Dataset({ - 'time_step': ('time', track_ds['time_step'].values), - 'max_sustained_wind': ('time', track_ds['Mwspd'].values), - 'central_pressure': ('time', track_ds['pres'].values), - 'radius_max_wind': ('time', track_ds['radius_max_wind'].values), - 'environmental_pressure': ('time', track_ds['environmental_pressure'].values), - 'basin': ('time', np.full(track_ds['time'].size, "GB", dtype=" 0: - LOGGER.info('%d track%s already at the requested temporal resolution.', - n_skip, "s are" if n_skip > 1 else " is") + LOGGER.info( + "%d track%s already at the requested temporal resolution.", + n_skip, + "s are" if n_skip > 1 else " is", + ) - LOGGER.info('Interpolating %d tracks to %sh time steps.', - self.size - n_skip, time_step_h) + LOGGER.info( + "Interpolating %d tracks to %sh time steps.", + self.size - n_skip, + time_step_h, + ) if land_params: extent = self.get_extent() @@ -1122,7 +1366,7 @@ def equal_timestep(self, time_step_h=1, land_params=False, pool=None): self.data, l_time_step_h, itertools.repeat(land_geom, self.size), - chunksize=chunksize + chunksize=chunksize, ) else: last_perc = 0 @@ -1139,10 +1383,12 @@ def equal_timestep(self, time_step_h=1, land_params=False, pool=None): def calc_random_walk(self, **kwargs): """Deprecated. Use `TCTracks.calc_perturbed_trajectories` instead.""" - LOGGER.warning("The use of TCTracks.calc_random_walk is deprecated." - "Use TCTracks.calc_perturbed_trajectories instead.") - if kwargs.get('ens_size'): - kwargs['nb_synth_tracks'] = kwargs.pop('ens_size') + LOGGER.warning( + "The use of TCTracks.calc_random_walk is deprecated." + "Use TCTracks.calc_perturbed_trajectories instead." + ) + if kwargs.get("ens_size"): + kwargs["nb_synth_tracks"] = kwargs.pop("ens_size") return self.calc_perturbed_trajectories(**kwargs) def calc_perturbed_trajectories(self, **kwargs): @@ -1167,9 +1413,10 @@ def get_bounds(self, deg_buffer=0.1): bounds : tuple (lon_min, lat_min, lon_max, lat_max) """ bounds = u_coord.latlon_bounds( - np.concatenate([t['lat'].values for t in self.data]), - np.concatenate([t['lon'].values for t in self.data]), - buffer=deg_buffer) + np.concatenate([t["lat"].values for t in self.data]), + np.concatenate([t["lon"].values for t in self.data]), + buffer=deg_buffer, + ) return bounds @property @@ -1217,7 +1464,9 @@ def generate_centroids(self, res_deg, buffer_deg): lon, lat = [ar.ravel() for ar in np.meshgrid(lon, lat)] return Centroids(lat=lat, lon=lon) - def plot(self, axis=None, figsize=(9, 13), legend=True, adapt_fontsize=True, **kwargs): + def plot( + self, axis=None, figsize=(9, 13), legend=True, adapt_fontsize=True, **kwargs + ): """Track over earth. Historical events are blue, probabilistic black. Parameters @@ -1239,13 +1488,13 @@ def plot(self, axis=None, figsize=(9, 13), legend=True, adapt_fontsize=True, **k ------- axis : matplotlib.axes._subplots.AxesSubplot """ - if 'lw' not in kwargs: - kwargs['lw'] = 2 - if 'transform' not in kwargs: - kwargs['transform'] = ccrs.PlateCarree() + if "lw" not in kwargs: + kwargs["lw"] = 2 + if "transform" not in kwargs: + kwargs["transform"] = ccrs.PlateCarree() if not self.size: - LOGGER.info('No tracks to plot') + LOGGER.info("No tracks to plot") return None extent = self.get_extent(deg_buffer=1) @@ -1253,16 +1502,18 @@ def plot(self, axis=None, figsize=(9, 13), legend=True, adapt_fontsize=True, **k if not axis: proj = ccrs.PlateCarree(central_longitude=mid_lon) - _, axis, _ = u_plot.make_map(proj=proj, figsize=figsize, adapt_fontsize=adapt_fontsize) + _, axis, _ = u_plot.make_map( + proj=proj, figsize=figsize, adapt_fontsize=adapt_fontsize + ) else: proj = axis.projection - axis.set_extent(extent, crs=kwargs['transform']) + axis.set_extent(extent, crs=kwargs["transform"]) u_plot.add_shapes(axis) cmap = ListedColormap(colors=CAT_COLORS) norm = BoundaryNorm([0] + SAFFIR_SIM_CAT, len(SAFFIR_SIM_CAT)) for track in self.data: - lonlat = np.stack([track['lon'].values, track['lat'].values], axis=-1) + lonlat = np.stack([track["lon"].values, track["lat"].values], axis=-1) lonlat[:, 0] = u_coord.lon_normalize(lonlat[:, 0], center=mid_lon) segments = np.stack([lonlat[:-1], lonlat[1:]], axis=1) @@ -1278,20 +1529,26 @@ def plot(self, axis=None, figsize=(9, 13), legend=True, adapt_fontsize=True, **k segments[mask, 1, 0] = -180 track_lc = LineCollection( - segments, linestyle='solid' if track.orig_event_flag else ':', - cmap=cmap, norm=norm, **kwargs) - track_lc.set_array(track['max_sustained_wind'].values) + segments, + linestyle="solid" if track.orig_event_flag else ":", + cmap=cmap, + norm=norm, + **kwargs, + ) + track_lc.set_array(track["max_sustained_wind"].values) axis.add_collection(track_lc) if legend: - leg_lines = [Line2D([0], [0], color=CAT_COLORS[i_col], lw=2) - for i_col in range(len(SAFFIR_SIM_CAT))] + leg_lines = [ + Line2D([0], [0], color=CAT_COLORS[i_col], lw=2) + for i_col in range(len(SAFFIR_SIM_CAT)) + ] leg_names = [CAT_NAMES[i_col] for i_col in sorted(CAT_NAMES.keys())] if any(not tr.orig_event_flag for tr in self.data): - leg_lines.append(Line2D([0], [0], color='grey', lw=2, ls='solid')) - leg_lines.append(Line2D([0], [0], color='grey', lw=2, ls=':')) - leg_names.append('Historical') - leg_names.append('Synthetic') + leg_lines.append(Line2D([0], [0], color="grey", lw=2, ls="solid")) + leg_lines.append(Line2D([0], [0], color="grey", lw=2, ls=":")) + leg_names.append("Historical") + leg_names.append("Synthetic") axis.legend(leg_lines, leg_names, loc=0) plt.tight_layout() return axis @@ -1304,16 +1561,18 @@ def write_netcdf(self, folder_name): folder_name : str Folder name where to write files. """ - list_path = [Path(folder_name, track.sid + '.nc') for track in self.data] - LOGGER.info('Writting %s files.', self.size) + list_path = [Path(folder_name, track.sid + ".nc") for track in self.data] + LOGGER.info("Writting %s files.", self.size) for track in self.data: - track.attrs['orig_event_flag'] = int(track.orig_event_flag) + track.attrs["orig_event_flag"] = int(track.orig_event_flag) xr.save_mfdataset(self.data, list_path) def read_netcdf(self, *args, **kwargs): """This function is deprecated, use TCTracks.from_netcdf instead.""" - LOGGER.warning("The use of TCTracks.read_netcdf is deprecated. " - "Use TCTracks.from_netcdf instead.") + LOGGER.warning( + "The use of TCTracks.read_netcdf is deprecated. " + "Use TCTracks.from_netcdf instead." + ) self.__dict__ = TCTracks.from_netcdf(*args, **kwargs).__dict__ @classmethod @@ -1338,20 +1597,25 @@ def from_netcdf(cls, folder_name): TCTracks with data from the given directory of NetCDF files. """ file_tr = get_file_names(folder_name) - LOGGER.info('Reading %s files.', len(file_tr)) + LOGGER.info("Reading %s files.", len(file_tr)) data = [] for file in file_tr: - if Path(file).suffix != '.nc': + if Path(file).suffix != ".nc": continue with xr.open_dataset(file) as track: - track.attrs['orig_event_flag'] = bool(track.orig_event_flag) + track.attrs["orig_event_flag"] = bool(track.orig_event_flag) if "basin" in track.attrs: - LOGGER.warning("Track data comes with legacy basin attribute. " - "We assume that the track remains in that basin during its " - "whole life time.") + LOGGER.warning( + "Track data comes with legacy basin attribute. " + "We assume that the track remains in that basin during its " + "whole life time." + ) basin = track.basin - del track.attrs['basin'] - track['basin'] = ("time", np.full(track['time'].size, basin, dtype=" 1 - else Point(lons, lats) - for lons, lats in zip(t_lons, t_lats) - ]) + gdf.geometry = gpd.GeoSeries( + [ + ( + LineString(np.c_[lons, lats]) + if lons.size > 1 + else Point(lons, lats) + ) + for lons, lats in zip(t_lons, t_lats) + ] + ) gdf.crs = DEF_CRS # for splitting, restrict to tracks that come close to the antimeridian - t_split_mask = np.asarray([ - (lon > 170).any() and (lon < -170).any() and lon.size > 1 - for lon in t_lons]) + t_split_mask = np.asarray( + [ + (lon > 170).any() and (lon < -170).any() and lon.size > 1 + for lon in t_lons + ] + ) # note that tracks might be splitted at self-intersections as well: # https://github.com/Toblerity/Shapely/issues/572 antimeridian = LineString([(180, -90), (180, 90)]) - gdf.loc[t_split_mask, "geometry"] = gdf.geometry[t_split_mask] \ - .to_crs({"proj": "longlat", "lon_wrap": 180}) \ - .apply(lambda line: MultiLineString([ - LineString([(x - 360, y) for x, y in segment.coords]) - if any(x > 180 for x, y in segment.coords) else segment - for segment in shapely.ops.split(line, antimeridian).geoms - ])) + gdf.loc[t_split_mask, "geometry"] = ( + gdf.geometry[t_split_mask] + .to_crs({"proj": "longlat", "lon_wrap": 180}) + .apply( + lambda line: MultiLineString( + [ + ( + LineString([(x - 360, y) for x, y in segment.coords]) + if any(x > 180 for x, y in segment.coords) + else segment + ) + for segment in shapely.ops.split(line, antimeridian).geoms + ] + ) + ) + ) else: # LineString only works with more than one lat/lon pair - gdf.geometry = gpd.GeoSeries([ - LineString(np.c_[track['lon'], track['lat']]) if track['lon'].size > 1 - else Point(track['lon'].data, track['lat'].data) - for track in self.data - ]) + gdf.geometry = gpd.GeoSeries( + [ + ( + LineString(np.c_[track["lon"], track["lat"]]) + if track["lon"].size > 1 + else Point(track["lon"].data, track["lat"].data) + ) + for track in self.data + ] + ) gdf.crs = DEF_CRS return gdf @@ -1528,43 +1819,52 @@ def _one_interp_data(track, time_step_h, land_geom=None): """ if time_step_h is None: return track - if track['time'].size < 2: - LOGGER.warning('Track interpolation not done. ' - 'Not enough elements for %s', track.name) + if track["time"].size < 2: + LOGGER.warning( + "Track interpolation not done. " "Not enough elements for %s", + track.name, + ) track_int = track else: - method = ['linear', 'quadratic', 'cubic'][min(2, track['time'].size - 2)] + method = ["linear", "quadratic", "cubic"][min(2, track["time"].size - 2)] # handle change of sign in longitude - lon = u_coord.lon_normalize(track['lon'].copy(), center=0) + lon = u_coord.lon_normalize(track["lon"].copy(), center=0) if (lon < -170).any() and (lon > 170).any(): # crosses 180 degrees east/west -> use positive degrees east lon[lon < 0] += 360 - time_step = pd.tseries.frequencies.to_offset(pd.Timedelta(hours=time_step_h)).freqstr - track_int = track.resample(time=time_step, skipna=True)\ - .interpolate('linear') + time_step = pd.tseries.frequencies.to_offset( + pd.Timedelta(hours=time_step_h) + ).freqstr + track_int = track.resample(time=time_step, skipna=True).interpolate( + "linear" + ) for var in track.data_vars: if "time" in track[var].dims and track[var].dtype.kind != "f": track_int[var] = track[var].resample(time=time_step).nearest() - track_int['time_step'][:] = time_step_h + track_int["time_step"][:] = time_step_h lon_int = lon.resample(time=time_step).interpolate(method) lon_int[lon_int > 180] -= 360 - track_int.coords['lon'] = lon_int - track_int.coords['lat'] = track['lat'].resample(time=time_step)\ - .interpolate(method) - track_int.attrs['category'] = set_category( - track_int['max_sustained_wind'].values, - track_int.attrs['max_sustained_wind_unit']) + track_int.coords["lon"] = lon_int + track_int.coords["lat"] = ( + track["lat"].resample(time=time_step).interpolate(method) + ) + track_int.attrs["category"] = set_category( + track_int["max_sustained_wind"].values, + track_int.attrs["max_sustained_wind_unit"], + ) # restrict to time steps within original bounds track_int = track_int.sel( - time=(track['time'][0] <= track_int['time']) & - (track_int['time'] <= track['time'][-1])) + time=(track["time"][0] <= track_int["time"]) + & (track_int["time"] <= track["time"][-1]) + ) if land_geom: track_land_params(track_int, land_geom) return track_int + def _raise_if_legacy_or_unknown_hdf5_format(file_name): """Raise an exception if the HDF5 format of the file is not supported @@ -1610,11 +1910,12 @@ def _raise_if_legacy_or_unknown_hdf5_format(file_name): " supported by CLIMADA. Please store the data again using" " TCTracks.write_hdf5. If you struggle to convert the data, please open an" " issue on GitHub." - ) if is_legacy else ( - f"Unknown HDF5/NetCDF file format: {file_name}" ) + if is_legacy + else (f"Unknown HDF5/NetCDF file format: {file_name}") ) + def _read_one_gettelman(nc_data, i_track): """Read a single track from Andrew Gettelman's NetCDF dataset @@ -1629,36 +1930,45 @@ def _read_one_gettelman(nc_data, i_track): ------- xr.Dataset """ - scale_to_10m = (10. / 60.)**.11 + scale_to_10m = (10.0 / 60.0) ** 0.11 mps2kts = 1.94384 - basin_dict = {0: 'NA - North Atlantic', - 1: 'SA - South Atlantic', - 2: 'WP - West Pacific', - 3: 'EP - East Pacific', - 4: 'SP - South Pacific', - 5: 'NI - North Indian', - 6: 'SI - South Indian', - 7: 'AS - Arabian Sea', - 8: 'BB - Bay of Bengal', - 9: 'EA - Eastern Australia', - 10: 'WA - Western Australia', - 11: 'CP - Central Pacific', - 12: 'CS - Carribbean Sea', - 13: 'GM - Gulf of Mexico', - 14: 'MM - Missing'} - - val_len = nc_data.variables['numObs'][i_track] + basin_dict = { + 0: "NA - North Atlantic", + 1: "SA - South Atlantic", + 2: "WP - West Pacific", + 3: "EP - East Pacific", + 4: "SP - South Pacific", + 5: "NI - North Indian", + 6: "SI - South Indian", + 7: "AS - Arabian Sea", + 8: "BB - Bay of Bengal", + 9: "EA - Eastern Australia", + 10: "WA - Western Australia", + 11: "CP - Central Pacific", + 12: "CS - Carribbean Sea", + 13: "GM - Gulf of Mexico", + 14: "MM - Missing", + } + + val_len = nc_data.variables["numObs"][i_track] sid = str(i_track) - times = nc_data.variables['source_time'][i_track, :][:val_len] + times = nc_data.variables["source_time"][i_track, :][:val_len] datetimes = list() for time in times: try: datetimes.append( dt.datetime.strptime( - str(nc.num2date(time, 'days since {}'.format('1858-11-17'), - calendar='standard')), - '%Y-%m-%d %H:%M:%S')) + str( + nc.num2date( + time, + "days since {}".format("1858-11-17"), + calendar="standard", + ) + ), + "%Y-%m-%d %H:%M:%S", + ) + ) except ValueError: # If wrong t, set t to previous t plus 3 hours if datetimes: @@ -1668,52 +1978,71 @@ def _read_one_gettelman(nc_data, i_track): time = times[pos + 1] - 1 / 24 * 3 datetimes.append( dt.datetime.strptime( - str(nc.num2date(time, 'days since {}'.format('1858-11-17'), - calendar='standard')), - '%Y-%m-%d %H:%M:%S')) + str( + nc.num2date( + time, + "days since {}".format("1858-11-17"), + calendar="standard", + ) + ), + "%Y-%m-%d %H:%M:%S", + ) + ) time_step = [] for i_time, time in enumerate(datetimes[1:], 1): time_step.append((time - datetimes[i_time - 1]).total_seconds() / 3600) time_step.append(time_step[-1]) - basins_numeric = nc_data.variables['basin'][i_track, :val_len] - basins = [basin_dict[b] if b in basin_dict else basin_dict[14] for b in basins_numeric] + basins_numeric = nc_data.variables["basin"][i_track, :val_len] + basins = [ + basin_dict[b] if b in basin_dict else basin_dict[14] for b in basins_numeric + ] - lon = nc_data.variables['lon'][i_track, :][:val_len] + lon = nc_data.variables["lon"][i_track, :][:val_len] lon[lon > 180] = lon[lon > 180] - 360 # change lon format to -180 to 180 - lat = nc_data.variables['lat'][i_track, :][:val_len] - cen_pres = nc_data.variables['pres'][i_track, :][:val_len] - av_prec = nc_data.variables['precavg'][i_track, :][:val_len] - max_prec = nc_data.variables['precmax'][i_track, :][:val_len] + lat = nc_data.variables["lat"][i_track, :][:val_len] + cen_pres = nc_data.variables["pres"][i_track, :][:val_len] + av_prec = nc_data.variables["precavg"][i_track, :][:val_len] + max_prec = nc_data.variables["precmax"][i_track, :][:val_len] # m/s to kn - wind = nc_data.variables['wind'][i_track, :][:val_len] * mps2kts * scale_to_10m + wind = nc_data.variables["wind"][i_track, :][:val_len] * mps2kts * scale_to_10m if not all(wind.data): # if wind is empty wind = np.ones(wind.size) * -999.9 - tr_df = pd.DataFrame({'time': datetimes, 'lat': lat, 'lon': lon, - 'max_sustained_wind': wind, - 'central_pressure': cen_pres, - 'environmental_pressure': np.ones(lat.size) * 1015., - 'radius_max_wind': np.ones(lat.size) * 65., - 'maximum_precipitation': max_prec, - 'average_precipitation': av_prec, - 'basin': [b[:2] for b in basins], - 'time_step': time_step}) + tr_df = pd.DataFrame( + { + "time": datetimes, + "lat": lat, + "lon": lon, + "max_sustained_wind": wind, + "central_pressure": cen_pres, + "environmental_pressure": np.ones(lat.size) * 1015.0, + "radius_max_wind": np.ones(lat.size) * 65.0, + "maximum_precipitation": max_prec, + "average_precipitation": av_prec, + "basin": [b[:2] for b in basins], + "time_step": time_step, + } + ) # construct xarray - tr_ds = xr.Dataset.from_dataframe(tr_df.set_index('time')) - tr_ds.coords['lat'] = ('time', tr_ds['lat'].values) - tr_ds.coords['lon'] = ('time', tr_ds['lon'].values) - tr_ds['basin'] = tr_ds['basin'].astype('= hem_min) & (lat <= hem_max) | (lat == 0) hem_idx = np.all(hem_mask, axis=1).nonzero()[0] - data_hem = lambda keys: [data_mat[f'{k}store'][hem_idx] for k in keys] + data_hem = lambda keys: [data_mat[f"{k}store"][hem_idx] for k in keys] - lat, lon = data_hem(['lat', 'long']) - months, days, hours = data_hem(['month', 'day', 'hour']) + lat, lon = data_hem(["lat", "long"]) + months, days, hours = data_hem(["month", "day", "hour"]) months, days, hours = [np.int8(ar) for ar in [months, days, hours]] - tc_rmw, tc_maxwind, tc_pressure = data_hem(['rm', 'v', 'p']) - years = data_mat['yearstore'][0, hem_idx] + tc_rmw, tc_maxwind, tc_pressure = data_hem(["rm", "v", "p"]) + years = data_mat["yearstore"][0, hem_idx] ntracks, nnodes = lat.shape - LOGGER.info("Loading %s tracks%s.", ntracks, - f" on {hemisphere} hemisphere" if hemisphere in ['N', 'S'] else "") + LOGGER.info( + "Loading %s tracks%s.", + ntracks, + f" on {hemisphere} hemisphere" if hemisphere in ["N", "S"] else "", + ) # change lon format to -180 to 180 lon[lon > 180] = lon[lon > 180] - 360 @@ -1793,21 +2133,25 @@ def _read_file_emanuel(path, hemisphere=None, rmw_corr=False, subset=None): # deal with change of year year = np.full(valid_idx.size, years[i_track]) - year_change = (np.diff(months[i_track, valid_idx]) < 0) + year_change = np.diff(months[i_track, valid_idx]) < 0 year_change = year_change.nonzero()[0] if year_change.size > 0: - year[year_change[0] + 1:] += 1 + year[year_change[0] + 1 :] += 1 try: - datetimes = map(dt.datetime, year, - months[i_track, valid_idx], - days[i_track, valid_idx], - hours[i_track, valid_idx]) + datetimes = map( + dt.datetime, + year, + months[i_track, valid_idx], + days[i_track, valid_idx], + hours[i_track, valid_idx], + ) datetimes = list(datetimes) except ValueError as err: # dates are known to contain invalid February 30 - date_feb = (months[i_track, valid_idx] == 2) \ - & (days[i_track, valid_idx] > 28) + date_feb = (months[i_track, valid_idx] == 2) & ( + days[i_track, valid_idx] > 28 + ) if np.count_nonzero(date_feb) == 0: # unknown invalid date issue raise err @@ -1817,42 +2161,52 @@ def _read_file_emanuel(path, hemisphere=None, rmw_corr=False, subset=None): year[reference_idx], months[i_track, valid_idx[reference_idx]], days[i_track, valid_idx[reference_idx]], - hours[i_track, valid_idx[reference_idx]],) - datetimes = [reference_date + dt.timedelta(hours=int(step * i)) - for i in range(nnodes)] - datetimes = [cftime.DatetimeProlepticGregorian(d.year, d.month, d.day, d.hour) - for d in datetimes] + hours[i_track, valid_idx[reference_idx]], + ) + datetimes = [ + reference_date + dt.timedelta(hours=int(step * i)) + for i in range(nnodes) + ] + datetimes = [ + cftime.DatetimeProlepticGregorian(d.year, d.month, d.day, d.hour) + for d in datetimes + ] max_sustained_wind = tc_maxwind[i_track, valid_idx] - max_sustained_wind_unit = 'kn' + max_sustained_wind_unit = "kn" env_pressure = np.full(nnodes, DEF_ENV_PRESSURE) - category = set_category(max_sustained_wind, - max_sustained_wind_unit, - SAFFIR_SIM_CAT) - tr_ds = xr.Dataset({ - 'time_step': ('time', np.full(nnodes, time_step)), - 'radius_max_wind': ('time', tc_rmw[i_track, valid_idx]), - 'max_sustained_wind': ('time', max_sustained_wind), - 'central_pressure': ('time', tc_pressure[i_track, valid_idx]), - 'environmental_pressure': ('time', env_pressure), - 'basin': ('time', np.full(nnodes, basin, dtype=" 0: # Assume the landfall started between this and the previous point - orig_lf[i_lf][0] = track['lat'][lf_point - 1] + \ - (track['lat'][lf_point] - track['lat'][lf_point - 1]) / 2 - orig_lf[i_lf][1] = track['lon'][lf_point - 1] + \ - (track['lon'][lf_point] - track['lon'][lf_point - 1]) / 2 + orig_lf[i_lf][0] = ( + track["lat"][lf_point - 1] + + (track["lat"][lf_point] - track["lat"][lf_point - 1]) / 2 + ) + orig_lf[i_lf][1] = ( + track["lon"][lf_point - 1] + + (track["lon"][lf_point] - track["lon"][lf_point - 1]) / 2 + ) else: # track starts over land, assume first 'landfall' starts here - orig_lf[i_lf][0] = track['lat'][lf_point] - orig_lf[i_lf][1] = track['lon'][lf_point] - + orig_lf[i_lf][0] = track["lat"][lf_point] + orig_lf[i_lf][1] = track["lon"][lf_point] - dist = DistanceMetric.get_metric('haversine') - nodes1 = np.radians(np.array([track['lat'].values[1:], - track['lon'].values[1:]]).transpose()) - nodes0 = np.radians(np.array([track['lat'].values[:-1], - track['lon'].values[:-1]]).transpose()) + dist = DistanceMetric.get_metric("haversine") + nodes1 = np.radians( + np.array([track["lat"].values[1:], track["lon"].values[1:]]).transpose() + ) + nodes0 = np.radians( + np.array([track["lat"].values[:-1], track["lon"].values[:-1]]).transpose() + ) dist_since_lf[1:] = dist.pairwise(nodes1, nodes0).diagonal() - dist_since_lf[~track['on_land'].values] = 0.0 - nodes1 = np.array([track['lat'].values[sea_land_idx], - track['lon'].values[sea_land_idx]]).transpose() / 180 * np.pi - dist_since_lf[sea_land_idx] = \ - dist.pairwise(nodes1, orig_lf / 180 * np.pi).diagonal() + dist_since_lf[~track["on_land"].values] = 0.0 + nodes1 = ( + np.array( + [track["lat"].values[sea_land_idx], track["lon"].values[sea_land_idx]] + ).transpose() + / 180 + * np.pi + ) + dist_since_lf[sea_land_idx] = dist.pairwise( + nodes1, orig_lf / 180 * np.pi + ).diagonal() for sea_land, land_sea in zip(sea_land_idx, land_sea_idx): - dist_since_lf[sea_land:land_sea] = \ - np.cumsum(dist_since_lf[sea_land:land_sea]) + dist_since_lf[sea_land:land_sea] = np.cumsum(dist_since_lf[sea_land:land_sea]) dist_since_lf *= EARTH_RADIUS_KM - dist_since_lf[~track['on_land'].values] = np.nan + dist_since_lf[~track["on_land"].values] = np.nan return dist_since_lf + def _get_landfall_idx(track, include_starting_landfall=False): """Get the position of the start and end of landfalls for a TC track. @@ -2005,21 +2374,22 @@ def _get_landfall_idx(track, include_starting_landfall=False): ends over land, the last value is set to track.time.size. """ # Index in land that comes from previous sea index - sea_land_idx = np.where(np.diff(track['on_land'].astype(int)) == 1)[0] + 1 + sea_land_idx = np.where(np.diff(track["on_land"].astype(int)) == 1)[0] + 1 # Index in sea that comes from previous land index - land_sea_idx = np.where(np.diff(track['on_land'].astype(int)) == -1)[0] + 1 - if track['on_land'][-1]: + land_sea_idx = np.where(np.diff(track["on_land"].astype(int)) == -1)[0] + 1 + if track["on_land"][-1]: # track ends over land: add last track point as the end of that landfall - land_sea_idx = np.append(land_sea_idx, track['time'].size) - if track['on_land'][0]: + land_sea_idx = np.append(land_sea_idx, track["time"].size) + if track["on_land"][0]: # track starts over land: remove first land-to-sea transition (not a landfall)? if include_starting_landfall: sea_land_idx = np.append(0, sea_land_idx) else: land_sea_idx = land_sea_idx[1:] if land_sea_idx.size != sea_land_idx.size: - raise ValueError('Mismatch') - return sea_land_idx,land_sea_idx + raise ValueError("Mismatch") + return sea_land_idx, land_sea_idx + def _estimate_pressure(cen_pres, lat, lon, v_max): """Replace missing pressure values with statistical estimate. @@ -2052,11 +2422,10 @@ def _estimate_pressure(cen_pres, lat, lon, v_max): lat, lon = [np.where(np.isnan(ar), -999, ar) for ar in [lat, lon]] msk = (cen_pres <= 0) & (v_max > 0) & (lat > -999) & (lon > -999) c_const, c_lat, c_lon, c_vmax = 1026.3401, -0.05504, -0.03536, -0.7357 - cen_pres[msk] = c_const + c_lat * lat[msk] \ - + c_lon * lon[msk] \ - + c_vmax * v_max[msk] + cen_pres[msk] = c_const + c_lat * lat[msk] + c_lon * lon[msk] + c_vmax * v_max[msk] return np.where(cen_pres <= 0, np.nan, cen_pres) + def _estimate_vmax(v_max, lat, lon, cen_pres): """Replace missing wind speed values with a statistical estimate. @@ -2088,11 +2457,10 @@ def _estimate_vmax(v_max, lat, lon, cen_pres): lat, lon = [np.where(np.isnan(ar), -999, ar) for ar in [lat, lon]] msk = (v_max <= 0) & (cen_pres > 0) & (lat > -999) & (lon > -999) c_const, c_lat, c_lon, c_pres = 1216.5223, -0.04086, -0.04190, -1.1797 - v_max[msk] = c_const + c_lat * lat[msk] \ - + c_lon * lon[msk] \ - + c_pres * cen_pres[msk] + v_max[msk] = c_const + c_lat * lat[msk] + c_lon * lon[msk] + c_pres * cen_pres[msk] return np.where(v_max <= 0, np.nan, v_max) + def estimate_roci(roci, cen_pres): """Replace missing radius (ROCI) values with statistical estimate. @@ -2124,12 +2492,19 @@ def estimate_roci(roci, cen_pres): roci_l = [210.711487, 215.897110, 198.261520, 159.589508, 90.900116] roci[msk] = 0 for i, pres_l_i in enumerate(pres_l): - slope_0 = 1. / (pres_l_i - pres_l[i - 1]) if i > 0 else 0 - slope_1 = 1. / (pres_l[i + 1] - pres_l_i) if i + 1 < len(pres_l) else 0 - roci[msk] += roci_l[i] * np.fmax(0, (1 - slope_0 * np.fmax(0, pres_l_i - cen_pres[msk]) - - slope_1 * np.fmax(0, cen_pres[msk] - pres_l_i))) + slope_0 = 1.0 / (pres_l_i - pres_l[i - 1]) if i > 0 else 0 + slope_1 = 1.0 / (pres_l[i + 1] - pres_l_i) if i + 1 < len(pres_l) else 0 + roci[msk] += roci_l[i] * np.fmax( + 0, + ( + 1 + - slope_0 * np.fmax(0, pres_l_i - cen_pres[msk]) + - slope_1 * np.fmax(0, cen_pres[msk] - pres_l_i) + ), + ) return np.where(roci <= 0, np.nan, roci) + def estimate_rmw(rmw, cen_pres): """Replace missing radius (RMW) values with statistical estimate. @@ -2159,12 +2534,19 @@ def estimate_rmw(rmw, cen_pres): rmw_l = [14.907318, 15.726927, 25.742142, 56.856522] rmw[msk] = 0 for i, pres_l_i in enumerate(pres_l): - slope_0 = 1. / (pres_l_i - pres_l[i - 1]) if i > 0 else 0 - slope_1 = 1. / (pres_l[i + 1] - pres_l_i) if i + 1 < len(pres_l) else 0 - rmw[msk] += rmw_l[i] * np.fmax(0, (1 - slope_0 * np.fmax(0, pres_l_i - cen_pres[msk]) - - slope_1 * np.fmax(0, cen_pres[msk] - pres_l_i))) + slope_0 = 1.0 / (pres_l_i - pres_l[i - 1]) if i > 0 else 0 + slope_1 = 1.0 / (pres_l[i + 1] - pres_l_i) if i + 1 < len(pres_l) else 0 + rmw[msk] += rmw_l[i] * np.fmax( + 0, + ( + 1 + - slope_0 * np.fmax(0, pres_l_i - cen_pres[msk]) + - slope_1 * np.fmax(0, cen_pres[msk] - pres_l_i) + ), + ) return np.where(rmw <= 0, np.nan, rmw) + def ibtracs_fit_param(explained, explanatory, year_range=(1980, 2019), order=1): """Statistically fit an ibtracs parameter to other ibtracs variables. @@ -2185,8 +2567,8 @@ def ibtracs_fit_param(explained, explanatory, year_range=(1980, 2019), order=1): ------- result : OLSResults """ - wmo_vars = ['wind', 'pres', 'rmw', 'roci', 'poci'] - all_vars = ['lat', 'lon'] + wmo_vars + wmo_vars = ["wind", "pres", "rmw", "roci", "poci"] + all_vars = ["lat", "lon"] + wmo_vars explanatory = list(explanatory) variables = explanatory + [explained] for var in variables: @@ -2194,7 +2576,7 @@ def ibtracs_fit_param(explained, explanatory, year_range=(1980, 2019), order=1): raise KeyError("Unknown ibtracs variable: %s" % var) # load ibtracs dataset - fn_nc = SYSTEM_DIR.joinpath('IBTrACS.ALL.v04r00.nc') + fn_nc = SYSTEM_DIR.joinpath("IBTrACS.ALL.v04r00.nc") with xr.open_dataset(fn_nc) as ibtracs_ds: # choose specified year range years = ibtracs_ds.sid.str.slice(0, 4).astype(int) @@ -2204,8 +2586,8 @@ def ibtracs_fit_param(explained, explanatory, year_range=(1980, 2019), order=1): if "wind" in variables: for agency in IBTRACS_AGENCIES: scale, shift = IBTRACS_AGENCY_1MIN_WIND_FACTOR[agency] - ibtracs_ds[f'{agency}_wind'] -= shift - ibtracs_ds[f'{agency}_wind'] /= scale + ibtracs_ds[f"{agency}_wind"] -= shift + ibtracs_ds[f"{agency}_wind"] /= scale # fill values agency_pref, track_agency_ix = ibtracs_track_agency(ibtracs_ds) @@ -2213,21 +2595,25 @@ def ibtracs_fit_param(explained, explanatory, year_range=(1980, 2019), order=1): if var not in variables: continue # array of values in order of preference - cols = [f'{a}_{var}' for a in agency_pref] + cols = [f"{a}_{var}" for a in agency_pref] cols = [col for col in cols if col in ibtracs_ds.data_vars.keys()] - all_vals = ibtracs_ds[cols].to_array(dim='agency') - preferred_ix = all_vals.notnull().argmax(dim='agency') - if var in ['wind', 'pres']: + all_vals = ibtracs_ds[cols].to_array(dim="agency") + preferred_ix = all_vals.notnull().argmax(dim="agency") + if var in ["wind", "pres"]: # choice: wmo -> wmo_agency/usa_agency -> preferred - ibtracs_ds[var] = ibtracs_ds['wmo_' + var] \ - .fillna(all_vals.isel(agency=track_agency_ix)) \ + ibtracs_ds[var] = ( + ibtracs_ds["wmo_" + var] + .fillna(all_vals.isel(agency=track_agency_ix)) .fillna(all_vals.isel(agency=preferred_ix)) + ) else: ibtracs_ds[var] = all_vals.isel(agency=preferred_ix) - fit_df = pd.DataFrame({var: ibtracs_ds[var].values.ravel() for var in variables}) - fit_df = fit_df.dropna(axis=0, how='any').reset_index(drop=True) - if 'lat' in explanatory: - fit_df['lat'] = fit_df['lat'].abs() + fit_df = pd.DataFrame( + {var: ibtracs_ds[var].values.ravel() for var in variables} + ) + fit_df = fit_df.dropna(axis=0, how="any").reset_index(drop=True) + if "lat" in explanatory: + fit_df["lat"] = fit_df["lat"].abs() # prepare explanatory variables d_explanatory = fit_df[explanatory] @@ -2243,23 +2629,31 @@ def ibtracs_fit_param(explained, explanatory, year_range=(1980, 2019), order=1): # piecewise linear with given break points d_explanatory = d_explanatory.drop(labels=[ex], axis=1) for i, max_o_i in enumerate(max_o): - col = f'{ex}{max_o_i}' - slope_0 = 1. / (max_o_i - max_o[i - 1]) if i > 0 else 0 - slope_1 = 1. / (max_o[i + 1] - max_o_i) if i + 1 < len(max_o) else 0 - d_explanatory[col] = np.fmax(0, (1 - slope_0 * np.fmax(0, max_o_i - fit_df[ex]) - - slope_1 * np.fmax(0, fit_df[ex] - max_o_i))) + col = f"{ex}{max_o_i}" + slope_0 = 1.0 / (max_o_i - max_o[i - 1]) if i > 0 else 0 + slope_1 = ( + 1.0 / (max_o[i + 1] - max_o_i) if i + 1 < len(max_o) else 0 + ) + d_explanatory[col] = np.fmax( + 0, + ( + 1 + - slope_0 * np.fmax(0, max_o_i - fit_df[ex]) + - slope_1 * np.fmax(0, fit_df[ex] - max_o_i) + ), + ) elif max_o < 0: d_explanatory = d_explanatory.drop(labels=[ex], axis=1) for order in range(1, abs(max_o) + 1): - d_explanatory[f'{ex}^{-order}'] = fit_df[ex]**(-order) + d_explanatory[f"{ex}^{-order}"] = fit_df[ex] ** (-order) add_const = True else: for order in range(2, max_o + 1): - d_explanatory[f'{ex}^{order}'] = fit_df[ex]**order + d_explanatory[f"{ex}^{order}"] = fit_df[ex] ** order add_const = True d_explained = fit_df[[explained]] if add_const: - d_explanatory['const'] = 1.0 + d_explanatory["const"] = 1.0 # run statistical fit sm_results = sm.OLS(d_explained, d_explanatory).fit() @@ -2270,6 +2664,7 @@ def ibtracs_fit_param(explained, explanatory, year_range=(1980, 2019), order=1): return sm_results + def ibtracs_track_agency(ds_sel): """Get preferred IBTrACS agency for each entry in the dataset. @@ -2286,18 +2681,20 @@ def ibtracs_track_agency(ds_sel): For each entry in `ds_sel`, the agency to use, given as an index into `agency_pref`. """ agency_pref = ["wmo"] + IBTRACS_AGENCIES - agency_map = {a.encode('utf-8'): i for i, a in enumerate(agency_pref)} - agency_map.update({ - a.encode('utf-8'): agency_map[b'usa'] for a in IBTRACS_USA_AGENCIES - }) - agency_map[b''] = agency_map[b'wmo'] + agency_map = {a.encode("utf-8"): i for i, a in enumerate(agency_pref)} + agency_map.update( + {a.encode("utf-8"): agency_map[b"usa"] for a in IBTRACS_USA_AGENCIES} + ) + agency_map[b""] = agency_map[b"wmo"] agency_fun = lambda x: agency_map[x] if "track_agency" not in ds_sel.data_vars.keys(): - ds_sel['track_agency'] = ds_sel['wmo_agency'].where(ds_sel['wmo_agency'] != b'', - ds_sel['usa_agency']) - track_agency_ix = xr.apply_ufunc(agency_fun, ds_sel['track_agency'], vectorize=True) + ds_sel["track_agency"] = ds_sel["wmo_agency"].where( + ds_sel["wmo_agency"] != b"", ds_sel["usa_agency"] + ) + track_agency_ix = xr.apply_ufunc(agency_fun, ds_sel["track_agency"], vectorize=True) return agency_pref, track_agency_ix + def ibtracs_add_official_variable(ibtracs_ds, tc_var, add_3h=False): """Add variables for the officially responsible agencies to an IBTrACS dataset @@ -2318,52 +2715,68 @@ def ibtracs_add_official_variable(ibtracs_ds, tc_var, add_3h=False): """ if "nan_var" not in ibtracs_ds.data_vars.keys(): # add an array full of NaN as a fallback value in the procedure - ibtracs_ds['nan_var'] = xr.full_like(ibtracs_ds['lat'], np.nan) + ibtracs_ds["nan_var"] = xr.full_like(ibtracs_ds["lat"], np.nan) # determine which of the official agencies report this variable at all - available_agencies = [a for a in IBTRACS_AGENCIES - if f'{a}_{tc_var}' in ibtracs_ds.data_vars.keys()] + available_agencies = [ + a for a in IBTRACS_AGENCIES if f"{a}_{tc_var}" in ibtracs_ds.data_vars.keys() + ] # map all non-reporting agency variables to the 'nan_var' (0) agency_map = { - a.encode("utf-8"): available_agencies.index(a) + 1 if a in available_agencies else 0 - for a in [''] + IBTRACS_AGENCIES + a.encode("utf-8"): ( + available_agencies.index(a) + 1 if a in available_agencies else 0 + ) + for a in [""] + IBTRACS_AGENCIES } - agency_map.update({ - a.encode('utf-8'): agency_map[b'usa'] for a in IBTRACS_USA_AGENCIES - }) + agency_map.update( + {a.encode("utf-8"): agency_map[b"usa"] for a in IBTRACS_USA_AGENCIES} + ) # read from officially responsible agencies that report this variable, but only # at official reporting times (usually 6-hourly) official_agency_ix = xr.apply_ufunc( - lambda x: agency_map[x], ibtracs_ds['wmo_agency'], vectorize=True) - available_cols = ['nan_var'] + [f'{a}_{tc_var}' for a in available_agencies] - all_vals = ibtracs_ds[available_cols].to_array(dim='agency') - ibtracs_ds[f'official_{tc_var}'] = all_vals.isel(agency=official_agency_ix) + lambda x: agency_map[x], ibtracs_ds["wmo_agency"], vectorize=True + ) + available_cols = ["nan_var"] + [f"{a}_{tc_var}" for a in available_agencies] + all_vals = ibtracs_ds[available_cols].to_array(dim="agency") + ibtracs_ds[f"official_{tc_var}"] = all_vals.isel(agency=official_agency_ix) if add_3h: # create a copy in float for NaN interpolation official_agency_ix_interp = official_agency_ix.astype(np.float16) # extrapolate track agency for tracks with only a single record - mask_singular = ((official_agency_ix_interp > 0).sum(dim="date_time") == 1).values - official_agency_ix_interp.values[mask_singular,:] = \ - official_agency_ix_interp.sel(storm=mask_singular).max(dim="date_time").values[:,None] + mask_singular = ( + (official_agency_ix_interp > 0).sum(dim="date_time") == 1 + ).values + official_agency_ix_interp.values[mask_singular, :] = ( + official_agency_ix_interp.sel(storm=mask_singular) + .max(dim="date_time") + .values[:, None] + ) with warnings.catch_warnings(): # See https://github.com/pydata/xarray/issues/4167 warnings.simplefilter(action="ignore", category=FutureWarning) # interpolate responsible agencies using nearest neighbor interpolation - official_agency_ix_interp.values[official_agency_ix_interp.values == 0.0] = np.nan + official_agency_ix_interp.values[ + official_agency_ix_interp.values == 0.0 + ] = np.nan official_agency_ix_interp = official_agency_ix_interp.interpolate_na( - dim="date_time", method="nearest", fill_value="extrapolate") + dim="date_time", method="nearest", fill_value="extrapolate" + ) # read from officially responsible agencies that report this variable, including # 3-hour time steps if available - official_agency_ix_interp.values[official_agency_ix_interp.isnull().values] = 0.0 - ibtracs_ds[f'official_3h_{tc_var}'] = all_vals.isel( - agency=official_agency_ix_interp.astype(int)) + official_agency_ix_interp.values[official_agency_ix_interp.isnull().values] = ( + 0.0 + ) + ibtracs_ds[f"official_3h_{tc_var}"] = all_vals.isel( + agency=official_agency_ix_interp.astype(int) + ) + def _change_max_wind_unit(wind, unit_orig, unit_dest): """Compute maximum wind speed in unit_dest. @@ -2382,29 +2795,30 @@ def _change_max_wind_unit(wind, unit_orig, unit_dest): maxwind : double Maximum wind speed in specified wind speed units. """ - if unit_orig in ('kn', 'kt'): + if unit_orig in ("kn", "kt"): ur_orig = ureg.knot - elif unit_orig == 'mph': + elif unit_orig == "mph": ur_orig = ureg.mile / ureg.hour - elif unit_orig == 'm/s': + elif unit_orig == "m/s": ur_orig = ureg.meter / ureg.second - elif unit_orig == 'km/h': + elif unit_orig == "km/h": ur_orig = ureg.kilometer / ureg.hour else: - raise ValueError('Unit not recognised %s.' % unit_orig) - if unit_dest in ('kn', 'kt'): + raise ValueError("Unit not recognised %s." % unit_orig) + if unit_dest in ("kn", "kt"): ur_dest = ureg.knot - elif unit_dest == 'mph': + elif unit_dest == "mph": ur_dest = ureg.mile / ureg.hour - elif unit_dest == 'm/s': + elif unit_dest == "m/s": ur_dest = ureg.meter / ureg.second - elif unit_dest == 'km/h': + elif unit_dest == "km/h": ur_dest = ureg.kilometer / ureg.hour else: - raise ValueError('Unit not recognised %s.' % unit_dest) + raise ValueError("Unit not recognised %s." % unit_dest) return (np.nanmax(wind) * ur_orig).to(ur_dest).magnitude -def set_category(max_sus_wind, wind_unit='kn', saffir_scale=None): + +def set_category(max_sus_wind, wind_unit="kn", saffir_scale=None): """Add storm category according to Saffir-Simpson hurricane scale. Parameters @@ -2430,14 +2844,15 @@ def set_category(max_sus_wind, wind_unit='kn', saffir_scale=None): """ if saffir_scale is None: saffir_scale = SAFFIR_SIM_CAT - if wind_unit != 'kn': - max_sus_wind = _change_max_wind_unit(max_sus_wind, wind_unit, 'kn') + if wind_unit != "kn": + max_sus_wind = _change_max_wind_unit(max_sus_wind, wind_unit, "kn") max_wind = np.nanmax(max_sus_wind) try: return (np.argwhere(max_wind < saffir_scale) - 1)[0][0] except IndexError: return -1 + def _zlib_from_dataarray(data_var: xr.DataArray) -> bool: """Return true if data_var is of numerical type, return False otherwise diff --git a/climada/hazard/tc_tracks_synth.py b/climada/hazard/tc_tracks_synth.py index 759245010..57be3dc2a 100644 --- a/climada/hazard/tc_tracks_synth.py +++ b/climada/hazard/tc_tracks_synth.py @@ -23,15 +23,16 @@ import itertools import logging import warnings + import matplotlib.cm as cm_mp -from matplotlib.lines import Line2D import matplotlib.pyplot as plt import numba import numpy as np +from matplotlib.lines import Line2D -from climada import CONFIG -import climada.util.coordinates import climada.hazard.tc_tracks +import climada.util.coordinates +from climada import CONFIG LOGGER = logging.getLogger(__name__) @@ -42,7 +43,7 @@ 2: 0.0025968221565522698, 3: 0.002626252944053856, 4: 0.002550639312763181, - 5: 0.003788695795963695 + 5: 0.003788695795963695, } """Global landfall decay parameters for wind speed by TC category. @@ -65,7 +66,8 @@ 2: (1.0468630800617038, 0.004067381088015585), 3: (1.0639055205005432, 0.003708174876364079), 4: (1.0828373148889825, 0.003997492773076179), - 5: (1.1088615145002092, 0.005224331234796362)} + 5: (1.1088615145002092, 0.005224331234796362), +} """Global landfall decay parameters for pressure by TC category. Keys are TC categories with -1='TD', 0='TS', 1='Cat 1', ..., 5='Cat 5'. @@ -80,17 +82,20 @@ >>> v_rel, p_rel = _calc_land_decay(tracks.data, land_geom, pool=tracks.pool) """ -def calc_perturbed_trajectories(tracks, - nb_synth_tracks=9, - max_shift_ini=0.75, - max_dspeed_rel=0.3, - max_ddirection=np.pi / 360, - autocorr_dspeed=0.85, - autocorr_ddirection=0.5, - seed=CONFIG.hazard.trop_cyclone.random_seed.int(), - decay=True, - use_global_decay_params=True, - pool=None): + +def calc_perturbed_trajectories( + tracks, + nb_synth_tracks=9, + max_shift_ini=0.75, + max_dspeed_rel=0.3, + max_ddirection=np.pi / 360, + autocorr_dspeed=0.85, + autocorr_ddirection=0.5, + seed=CONFIG.hazard.trop_cyclone.random_seed.int(), + decay=True, + use_global_decay_params=True, + pool=None, +): """ Generate synthetic tracks based on directed random walk. An ensemble of nb_synth_tracks synthetic tracks is computed for every track contained in self. @@ -161,7 +166,7 @@ def calc_perturbed_trajectories(tracks, Pool that will be used for parallel computation when applicable. If not given, the pool attribute of `tracks` will be used. Default: None """ - LOGGER.info('Computing %s synthetic tracks.', nb_synth_tracks * tracks.size) + LOGGER.info("Computing %s synthetic tracks.", nb_synth_tracks * tracks.size) pool = tracks.pool if pool is None else pool @@ -169,10 +174,14 @@ def calc_perturbed_trajectories(tracks, np.random.seed(seed) # ensure tracks have constant time steps - time_step_h = np.unique(np.concatenate([np.unique(x['time_step']) for x in tracks.data])) + time_step_h = np.unique( + np.concatenate([np.unique(x["time_step"]) for x in tracks.data]) + ) if not np.allclose(time_step_h, time_step_h[0]): - raise ValueError('Tracks have different temporal resolution. ' - 'Please ensure constant time steps by applying equal_timestep beforehand') + raise ValueError( + "Tracks have different temporal resolution. " + "Please ensure constant time steps by applying equal_timestep beforehand" + ) time_step_h = time_step_h[0] # number of random value per synthetic track: @@ -181,44 +190,77 @@ def calc_perturbed_trajectories(tracks, # hence sum is nb_synth_tracks * (2 + 2*(size-1)) = nb_synth_tracks * 2 * size # https://stats.stackexchange.com/questions/48086/algorithm-to-produce-autocorrelated-uniformly-distributed-number if autocorr_ddirection == 0 and autocorr_dspeed == 0: - random_vec = [np.random.uniform(size=nb_synth_tracks * (2 * track['time'].size)) - for track in tracks.data] + random_vec = [ + np.random.uniform(size=nb_synth_tracks * (2 * track["time"].size)) + for track in tracks.data + ] else: - random_vec = [np.concatenate((np.random.uniform(size=nb_synth_tracks * 2), - _random_uniform_ac(nb_synth_tracks * (track['time'].size - 1), - autocorr_ddirection, time_step_h), - _random_uniform_ac(nb_synth_tracks * (track['time'].size - 1), - autocorr_dspeed, time_step_h))) - if track['time'].size > 1 else np.random.uniform(size=nb_synth_tracks * 2) - for track in tracks.data] + random_vec = [ + ( + np.concatenate( + ( + np.random.uniform(size=nb_synth_tracks * 2), + _random_uniform_ac( + nb_synth_tracks * (track["time"].size - 1), + autocorr_ddirection, + time_step_h, + ), + _random_uniform_ac( + nb_synth_tracks * (track["time"].size - 1), + autocorr_dspeed, + time_step_h, + ), + ) + ) + if track["time"].size > 1 + else np.random.uniform(size=nb_synth_tracks * 2) + ) + for track in tracks.data + ] if pool: chunksize = max(min(tracks.size // pool.ncpus, 1000), 1) - new_ens = pool.map(_one_rnd_walk, tracks.data, - itertools.repeat(nb_synth_tracks, tracks.size), - itertools.repeat(max_shift_ini, tracks.size), - itertools.repeat(max_dspeed_rel, tracks.size), - itertools.repeat(max_ddirection, tracks.size), - random_vec, chunksize=chunksize) + new_ens = pool.map( + _one_rnd_walk, + tracks.data, + itertools.repeat(nb_synth_tracks, tracks.size), + itertools.repeat(max_shift_ini, tracks.size), + itertools.repeat(max_dspeed_rel, tracks.size), + itertools.repeat(max_ddirection, tracks.size), + random_vec, + chunksize=chunksize, + ) else: - new_ens = [_one_rnd_walk(track, nb_synth_tracks, max_shift_ini, - max_dspeed_rel, max_ddirection, rand) - for track, rand in zip(tracks.data, random_vec)] + new_ens = [ + _one_rnd_walk( + track, + nb_synth_tracks, + max_shift_ini, + max_dspeed_rel, + max_ddirection, + rand, + ) + for track, rand in zip(tracks.data, random_vec) + ] cutoff_track_ids_tc = [x[1] for x in new_ens] cutoff_track_ids_tc = sum(cutoff_track_ids_tc, []) cutoff_track_ids_ts = [x[2] for x in new_ens] cutoff_track_ids_ts = sum(cutoff_track_ids_ts, []) if len(cutoff_track_ids_tc) > 0: - LOGGER.info('The following generated synthetic tracks moved beyond ' - 'the range of [-70, 70] degrees latitude. Cut out ' - 'at TC category >1: %s.', - ', '.join(cutoff_track_ids_tc)) + LOGGER.info( + "The following generated synthetic tracks moved beyond " + "the range of [-70, 70] degrees latitude. Cut out " + "at TC category >1: %s.", + ", ".join(cutoff_track_ids_tc), + ) if len(cutoff_track_ids_ts) > 0: - LOGGER.debug('The following generated synthetic tracks moved beyond ' - 'the range of [-70, 70] degrees latitude. Cut out ' - 'at TC category <= 1: %s.', - ', '.join(cutoff_track_ids_ts)) + LOGGER.debug( + "The following generated synthetic tracks moved beyond " + "the range of [-70, 70] degrees latitude. Cut out " + "at TC category <= 1: %s.", + ", ".join(cutoff_track_ids_ts), + ) new_ens = [x[0] for x in new_ens] tracks.data = sum(new_ens, []) @@ -228,8 +270,9 @@ def calc_perturbed_trajectories(tracks, extent=extent, resolution=10 ) if use_global_decay_params: - tracks.data = _apply_land_decay(tracks.data, LANDFALL_DECAY_V, - LANDFALL_DECAY_P, land_geom, pool=pool) + tracks.data = _apply_land_decay( + tracks.data, LANDFALL_DECAY_V, LANDFALL_DECAY_P, land_geom, pool=pool + ) else: # fit land decay coefficients based on historical tracks hist_tracks = [track for track in tracks.data if track.orig_event_flag] @@ -237,16 +280,21 @@ def calc_perturbed_trajectories(tracks, try: v_rel, p_rel = _calc_land_decay(hist_tracks, land_geom, pool=pool) tracks.data = _apply_land_decay( - tracks.data, v_rel, p_rel, land_geom, pool=pool) + tracks.data, v_rel, p_rel, land_geom, pool=pool + ) except ValueError as verr: - raise ValueError('Landfall decay could not be applied.') from verr + raise ValueError("Landfall decay could not be applied.") from verr else: - raise ValueError('No historical tracks found. Historical' - ' tracks are needed for land decay calibration' - ' if use_global_decay_params=False.') + raise ValueError( + "No historical tracks found. Historical" + " tracks are needed for land decay calibration" + " if use_global_decay_params=False." + ) -def _one_rnd_walk(track, nb_synth_tracks, max_shift_ini, max_dspeed_rel, max_ddirection, rnd_vec): +def _one_rnd_walk( + track, nb_synth_tracks, max_shift_ini, max_dspeed_rel, max_ddirection, rnd_vec +): """ Apply random walk to one track. @@ -280,10 +328,12 @@ def _one_rnd_walk(track, nb_synth_tracks, max_shift_ini, max_dspeed_rel, max_ddi latitudes with a wind speed up to TC category 1. """ ens_track = list() - n_dat = track['time'].size + n_dat = track["time"].size n_seg = n_dat - 1 - xy_ini = max_shift_ini * (2 * rnd_vec[:2 * nb_synth_tracks].reshape((2, nb_synth_tracks)) - 1) - [dt] = np.unique(track['time_step']) + xy_ini = max_shift_ini * ( + 2 * rnd_vec[: 2 * nb_synth_tracks].reshape((2, nb_synth_tracks)) - 1 + ) + [dt] = np.unique(track["time_step"]) ens_track.append(track) cutoff_track_ids_ts = [] @@ -293,49 +343,58 @@ def _one_rnd_walk(track, nb_synth_tracks, max_shift_ini, max_dspeed_rel, max_ddi # select angular perturbation for that synthetic track i_start_ang = 2 * nb_synth_tracks + i_ens * n_seg - i_end_ang = i_start_ang + track['time'].size - 1 + i_end_ang = i_start_ang + track["time"].size - 1 # scale by maximum perturbation and time step in hour (temporal-resolution independent) - ang_pert = dt * np.degrees(max_ddirection * (2 * rnd_vec[i_start_ang:i_end_ang] - 1)) + ang_pert = dt * np.degrees( + max_ddirection * (2 * rnd_vec[i_start_ang:i_end_ang] - 1) + ) ang_pert_cum = np.cumsum(ang_pert) # select translational speed perturbation for that synthetic track i_start_trans = 2 * nb_synth_tracks + nb_synth_tracks * n_seg + i_ens * n_seg - i_end_trans = i_start_trans + track['time'].size - 1 + i_end_trans = i_start_trans + track["time"].size - 1 # scale by maximum perturbation and time step in hour (temporal-resolution independent) trans_pert = 1 + max_dspeed_rel * (2 * rnd_vec[i_start_trans:i_end_trans] - 1) # get bearings and angular distance for the original track - bearings = _get_bearing_angle(i_track['lon'].values, i_track['lat'].values) - angular_dist = climada.util.coordinates.dist_approx(i_track['lat'].values[:-1, None], - i_track['lon'].values[:-1, None], - i_track['lat'].values[1:, None], - i_track['lon'].values[1:, None], - method="geosphere", - units="degree")[:, 0, 0] + bearings = _get_bearing_angle(i_track["lon"].values, i_track["lat"].values) + angular_dist = climada.util.coordinates.dist_approx( + i_track["lat"].values[:-1, None], + i_track["lon"].values[:-1, None], + i_track["lat"].values[1:, None], + i_track["lon"].values[1:, None], + method="geosphere", + units="degree", + )[:, 0, 0] # apply perturbation to lon / lat - new_lon = np.zeros_like(i_track['lon'].values) - new_lat = np.zeros_like(i_track['lat'].values) - new_lon[0] = i_track['lon'].values[0] + xy_ini[0, i_ens] - new_lat[0] = i_track['lat'].values[0] + xy_ini[1, i_ens] - last_idx = i_track['time'].size + new_lon = np.zeros_like(i_track["lon"].values) + new_lat = np.zeros_like(i_track["lat"].values) + new_lon[0] = i_track["lon"].values[0] + xy_ini[0, i_ens] + new_lat[0] = i_track["lat"].values[0] + xy_ini[1, i_ens] + last_idx = i_track["time"].size for i in range(0, len(new_lon) - 1): - new_lon[i + 1], new_lat[i + 1] = \ - _get_destination_points(new_lon[i], new_lat[i], - bearings[i] + ang_pert_cum[i], - trans_pert[i] * angular_dist[i]) + new_lon[i + 1], new_lat[i + 1] = _get_destination_points( + new_lon[i], + new_lat[i], + bearings[i] + ang_pert_cum[i], + trans_pert[i] * angular_dist[i], + ) # if track crosses latitudinal thresholds (+-70°), # keep up to this segment (i+1), set i+2 as last point, # and discard all further points > i+2. - if i+2 < last_idx and (new_lat[i + 1] > 70 or new_lat[i + 1] < -70): + if i + 2 < last_idx and (new_lat[i + 1] > 70 or new_lat[i + 1] < -70): last_idx = i + 2 # end the track here - max_wind_end = i_track['max_sustained_wind'].values[last_idx] - ss_scale_end = climada.hazard.tc_tracks.set_category(max_wind_end, - i_track.attrs['max_sustained_wind_unit']) + max_wind_end = i_track["max_sustained_wind"].values[last_idx] + ss_scale_end = climada.hazard.tc_tracks.set_category( + max_wind_end, i_track.attrs["max_sustained_wind_unit"] + ) # TC category at ending point should not be higher than 1 - cutoff_txt = (f"{i_track.attrs['name']}_gen{i_ens + 1}" - f" ({climada.hazard.tc_tracks.CAT_NAMES[ss_scale_end]})") + cutoff_txt = ( + f"{i_track.attrs['name']}_gen{i_ens + 1}" + f" ({climada.hazard.tc_tracks.CAT_NAMES[ss_scale_end]})" + ) if ss_scale_end > 1: cutoff_track_ids_tc = cutoff_track_ids_tc + [cutoff_txt] else: @@ -344,12 +403,12 @@ def _one_rnd_walk(track, nb_synth_tracks, max_shift_ini, max_dspeed_rel, max_ddi # make sure longitude values are within (-180, 180) climada.util.coordinates.lon_normalize(new_lon, center=0.0) - i_track['lon'].values = new_lon - i_track['lat'].values = new_lat - i_track.attrs['orig_event_flag'] = False - i_track.attrs['name'] = f"{i_track.attrs['name']}_gen{i_ens + 1}" - i_track.attrs['sid'] = f"{i_track.attrs['sid']}_gen{i_ens + 1}" - i_track.attrs['id_no'] = i_track.attrs['id_no'] + (i_ens + 1) / 100 + i_track["lon"].values = new_lon + i_track["lat"].values = new_lat + i_track.attrs["orig_event_flag"] = False + i_track.attrs["name"] = f"{i_track.attrs['name']}_gen{i_ens + 1}" + i_track.attrs["sid"] = f"{i_track.attrs['sid']}_gen{i_ens + 1}" + i_track.attrs["id_no"] = i_track.attrs["id_no"] + (i_ens + 1) / 100 i_track = i_track.isel(time=slice(None, last_idx)) ens_track.append(i_track) @@ -395,8 +454,11 @@ def _random_uniform_ac(n_ts, autocorr, time_step_h): # scale x to have magnitude [0,1] x = (x + np.sqrt(3)) / (2 * np.sqrt(3)) # resample at target time step - x_ts = np.interp(np.arange(start=0, stop=n_ts_hourly_exact, step=time_step_h), - np.arange(n_ts_hourly), x) + x_ts = np.interp( + np.arange(start=0, stop=n_ts_hourly_exact, step=time_step_h), + np.arange(n_ts_hourly), + x, + ) return x_ts @@ -423,9 +485,13 @@ def _h_ac(x, y, theta): x_next : float Next value in the series. """ - gamma = np.abs(np.mod(theta, np.pi) - \ - np.floor((np.mod(theta, np.pi) / (np.pi / 2)) + 0.5) * np.pi / 2) - x_next = 2 * np.sqrt(3) * (_f_ac(np.cos(theta) * x + np.sin(theta) * y, gamma) - 1 / 2) + gamma = np.abs( + np.mod(theta, np.pi) + - np.floor((np.mod(theta, np.pi) / (np.pi / 2)) + 0.5) * np.pi / 2 + ) + x_next = ( + 2 * np.sqrt(3) * (_f_ac(np.cos(theta) * x + np.sin(theta) * y, gamma) - 1 / 2) + ) return x_next @@ -456,13 +522,21 @@ def _f_ac(z, theta): if z >= np.sqrt(3) * (c + s): res = 1 elif z > np.sqrt(3) * (c - s): - res = 1 / 12 / np.sin(2 * theta) * \ - (-3 - z ** 2 + 2 * np.sqrt(3) * z * (c + s) + 9 * np.sin(2 * theta)) + res = ( + 1 + / 12 + / np.sin(2 * theta) + * (-3 - z**2 + 2 * np.sqrt(3) * z * (c + s) + 9 * np.sin(2 * theta)) + ) elif z > np.sqrt(3) * (-c + s): res = 1 / 6 * (3 + np.sqrt(3) * z / c) elif z > -np.sqrt(3) * (c + s): - res = 1 / 12 / np.sin(2 * theta) * \ - (z ** 2 + 2 * np.sqrt(3) * z * (c + s) + 3 * (1 + np.sin(2 * theta))) + res = ( + 1 + / 12 + / np.sin(2 * theta) + * (z**2 + 2 * np.sqrt(3) * z * (c + s) + 3 * (1 + np.sin(2 * theta))) + ) else: res = 0 return res @@ -504,9 +578,11 @@ def _get_bearing_angle(lon, lat): # what to do with the points that don't move? # i.e. where lat_2=lat_1 and lon_2=lon_1? The angle does not matter in # that case because angular distance will be 0. - earth_ang_fix = np.arctan2(np.sin(delta_lon) * np.cos(lat_2), - np.cos(lat_1) * np.sin(lat_2) - \ - np.sin(lat_1) * np.cos(lat_2) * np.cos(delta_lon)) + earth_ang_fix = np.arctan2( + np.sin(delta_lon) * np.cos(lat_2), + np.cos(lat_1) * np.sin(lat_2) + - np.sin(lat_1) * np.cos(lat_2) * np.cos(delta_lon), + ) return np.degrees(earth_ang_fix) @@ -536,15 +612,18 @@ def _get_destination_points(lon, lat, bearing, angular_distance): lon, lat = map(np.radians, [lon, lat]) bearing = np.radians(bearing) angular_distance = np.radians(angular_distance) - lat_2 = np.arcsin(np.sin(lat) * np.cos(angular_distance) + np.cos(lat) * \ - np.sin(angular_distance) * np.cos(bearing)) - lon_2 = lon + np.arctan2(np.sin(bearing) * np.sin(angular_distance) * np.cos(lat), - np.cos(angular_distance) - np.sin(lat) * np.sin(lat_2)) + lat_2 = np.arcsin( + np.sin(lat) * np.cos(angular_distance) + + np.cos(lat) * np.sin(angular_distance) * np.cos(bearing) + ) + lon_2 = lon + np.arctan2( + np.sin(bearing) * np.sin(angular_distance) * np.cos(lat), + np.cos(angular_distance) - np.sin(lat) * np.sin(lat_2), + ) return np.degrees(lon_2), np.degrees(lat_2) -def _calc_land_decay(hist_tracks, land_geom, s_rel=True, check_plot=False, - pool=None): +def _calc_land_decay(hist_tracks, land_geom, s_rel=True, check_plot=False, pool=None): """Compute wind and pressure decay coefficients from historical events Decay is calculated for every TC category according to the formulas: @@ -572,13 +651,16 @@ def _calc_land_decay(hist_tracks, land_geom, s_rel=True, check_plot=False, """ if len(hist_tracks) < 100: - LOGGER.warning('For the calibration of the landfall decay ' - 'it is recommended to provide as many historical ' - 'tracks as possible, but only %s historical tracks ' - 'were provided. ' - 'For a more robust calculation consider using ' - 'a larger number of tracks or set ' - '`use_global_decay_params` to True', len(hist_tracks)) + LOGGER.warning( + "For the calibration of the landfall decay " + "it is recommended to provide as many historical " + "tracks as possible, but only %s historical tracks " + "were provided. " + "For a more robust calculation consider using " + "a larger number of tracks or set " + "`use_global_decay_params` to True", + len(hist_tracks), + ) # Key is Saffir-Simpson scale # values are lists of wind/wind at landfall @@ -590,13 +672,17 @@ def _calc_land_decay(hist_tracks, land_geom, s_rel=True, check_plot=False, x_val = dict() if pool: - dec_val = pool.map(_decay_values, hist_tracks, itertools.repeat(land_geom), - itertools.repeat(s_rel), - chunksize=max(min(len(hist_tracks) // pool.ncpus, 1000), 1)) + dec_val = pool.map( + _decay_values, + hist_tracks, + itertools.repeat(land_geom), + itertools.repeat(s_rel), + chunksize=max(min(len(hist_tracks) // pool.ncpus, 1000), 1), + ) else: dec_val = [_decay_values(track, land_geom, s_rel) for track in hist_tracks] - for (tv_lf, tp_lf, tx_val) in dec_val: + for tv_lf, tp_lf, tx_val in dec_val: for key in tv_lf.keys(): v_lf.setdefault(key, []).extend(tv_lf[key]) p_lf.setdefault(key, ([], [])) @@ -611,8 +697,9 @@ def _calc_land_decay(hist_tracks, land_geom, s_rel=True, check_plot=False, return v_rel, p_rel -def _apply_land_decay(tracks, v_rel, p_rel, land_geom, s_rel=True, - check_plot=False, pool=None): +def _apply_land_decay( + tracks, v_rel, p_rel, land_geom, s_rel=True, check_plot=False, pool=None +): """Compute wind and pressure decay due to landfall in synthetic tracks. Parameters @@ -632,28 +719,36 @@ def _apply_land_decay(tracks, v_rel, p_rel, land_geom, s_rel=True, """ sy_tracks = [track for track in tracks if not track.orig_event_flag] if not sy_tracks: - raise ValueError('No synthetic tracks contained. Synthetic tracks' - ' are needed.') + raise ValueError( + "No synthetic tracks contained. Synthetic tracks" " are needed." + ) if not v_rel or not p_rel: - LOGGER.info('No decay coefficients.') + LOGGER.info("No decay coefficients.") return if check_plot: orig_wind, orig_pres = [], [] for track in sy_tracks: - orig_wind.append(np.copy(track['max_sustained_wind'].values)) - orig_pres.append(np.copy(track['central_pressure'].values)) + orig_wind.append(np.copy(track["max_sustained_wind"].values)) + orig_pres.append(np.copy(track["central_pressure"].values)) if pool: chunksize = max(min(len(tracks) // pool.ncpus, 1000), 1) - tracks = pool.map(_apply_decay_coeffs, tracks, - itertools.repeat(v_rel), itertools.repeat(p_rel), - itertools.repeat(land_geom), itertools.repeat(s_rel), - chunksize=chunksize) + tracks = pool.map( + _apply_decay_coeffs, + tracks, + itertools.repeat(v_rel), + itertools.repeat(p_rel), + itertools.repeat(land_geom), + itertools.repeat(s_rel), + chunksize=chunksize, + ) else: - tracks = [_apply_decay_coeffs(track, v_rel, p_rel, land_geom, s_rel) - for track in tracks] + tracks = [ + _apply_decay_coeffs(track, v_rel, p_rel, land_geom, s_rel) + for track in tracks + ] for track in tracks: if track.orig_event_flag: @@ -696,35 +791,35 @@ def _decay_values(track, land_geom, s_rel): sea_land_idx, land_sea_idx = climada.hazard.tc_tracks._get_landfall_idx(track) if sea_land_idx.size: for sea_land, land_sea in zip(sea_land_idx, land_sea_idx): - v_landfall = track['max_sustained_wind'][sea_land - 1].values - ss_scale = climada.hazard.tc_tracks.set_category(v_landfall, - track.attrs['max_sustained_wind_unit']) + v_landfall = track["max_sustained_wind"][sea_land - 1].values + ss_scale = climada.hazard.tc_tracks.set_category( + v_landfall, track.attrs["max_sustained_wind_unit"] + ) - v_land = track['max_sustained_wind'][sea_land - 1:land_sea].values + v_land = track["max_sustained_wind"][sea_land - 1 : land_sea].values if v_land[0] > 0: v_land = (v_land[1:] / v_land[0]).tolist() else: v_land = v_land[1:].tolist() - p_landfall = float(track['central_pressure'][sea_land - 1].values) - p_land = track['central_pressure'][sea_land - 1:land_sea].values + p_landfall = float(track["central_pressure"][sea_land - 1].values) + p_land = track["central_pressure"][sea_land - 1 : land_sea].values p_land = (p_land[1:] / p_land[0]).tolist() - p_land_s = _calc_decay_ps_value( - track, p_landfall, land_sea - 1, s_rel) + p_land_s = _calc_decay_ps_value(track, p_landfall, land_sea - 1, s_rel) p_land_s = len(p_land) * [p_land_s] if ss_scale not in v_lf: - v_lf[ss_scale] = array.array('f', v_land) - p_lf[ss_scale] = (array.array('f', p_land_s), - array.array('f', p_land)) - x_val[ss_scale] = array.array('f', - track['dist_since_lf'][sea_land:land_sea]) + v_lf[ss_scale] = array.array("f", v_land) + p_lf[ss_scale] = (array.array("f", p_land_s), array.array("f", p_land)) + x_val[ss_scale] = array.array( + "f", track["dist_since_lf"][sea_land:land_sea] + ) else: v_lf[ss_scale].extend(v_land) p_lf[ss_scale][0].extend(p_land_s) p_lf[ss_scale][1].extend(p_land) - x_val[ss_scale].extend(track['dist_since_lf'][sea_land:land_sea]) + x_val[ss_scale].extend(track["dist_since_lf"][sea_land:land_sea]) return v_lf, p_lf, x_val @@ -753,7 +848,7 @@ def _decay_calc_coeff(x_val, v_lf, p_lf): v_rel : dict p_rel : dict """ - warnings.filterwarnings('ignore') + warnings.filterwarnings("ignore") v_rel = dict() p_rel = dict() for ss_scale, val_lf in v_lf.items(): @@ -781,29 +876,36 @@ def _decay_calc_coeff(x_val, v_lf, p_lf): scale_fill = np.array(list(p_rel.keys())) if not scale_fill.size: - LOGGER.info('No historical track with landfall.') + LOGGER.info("No historical track with landfall.") return v_rel, p_rel for ss_scale, ss_name in climada.hazard.tc_tracks.CAT_NAMES.items(): if ss_scale not in p_rel: close_scale = scale_fill[np.argmin(np.abs(scale_fill - ss_scale))] close_name = climada.hazard.tc_tracks.CAT_NAMES[close_scale] - LOGGER.debug('No historical track of category %s with landfall. ' - 'Decay parameters from category %s taken.', - ss_name, close_name) + LOGGER.debug( + "No historical track of category %s with landfall. " + "Decay parameters from category %s taken.", + ss_name, + close_name, + ) v_rel[ss_scale] = v_rel[close_scale] p_rel[ss_scale] = p_rel[close_scale] elif v_rel[ss_scale] < 0: - raise ValueError('The calibration of landfall decay for wind speed resulted in' - f' a wind speed increase for TC category {ss_name}.' - ' This behaviour is unphysical. Please use a larger number of tracks' - ' or use global paramaters by setting `use_global_decay_params` to' - ' `True`') + raise ValueError( + "The calibration of landfall decay for wind speed resulted in" + f" a wind speed increase for TC category {ss_name}." + " This behaviour is unphysical. Please use a larger number of tracks" + " or use global paramaters by setting `use_global_decay_params` to" + " `True`" + ) elif p_rel[ss_scale][0] < 0 or p_rel[ss_scale][1] < 0: - raise ValueError('The calibration of landfall decay for central pressure resulted in' - f' a pressure decrease for TC category {ss_name}.' - ' This behaviour is unphysical. Please use a larger number of tracks' - ' or use global paramaters by setting `use_global_decay_params` to' - ' `True`') + raise ValueError( + "The calibration of landfall decay for central pressure resulted in" + f" a pressure decrease for TC category {ss_name}." + " This behaviour is unphysical. Please use a larger number of tracks" + " or use global paramaters by setting `use_global_decay_params` to" + " `True`" + ) return v_rel, p_rel @@ -812,28 +914,44 @@ def _check_decay_values_plot(x_val, v_lf, p_lf, v_rel, p_rel): """Generate one graph with wind decay and an other with central pressure decay, true and approximated.""" # One graph per TC category - for track_cat, color in zip(v_lf.keys(), - cm_mp.rainbow(np.linspace(0, 1, len(v_lf)))): + for track_cat, color in zip( + v_lf.keys(), cm_mp.rainbow(np.linspace(0, 1, len(v_lf))) + ): _, axes = plt.subplots(2, 1) x_eval = np.linspace(0, np.max(x_val[track_cat]), 20) - axes[0].set_xlabel('Distance from landfall (km)') - axes[0].set_ylabel('Max sustained wind\nrelative to landfall') - axes[0].set_title(f'Wind, TC cat {climada.hazard.tc_tracks.CAT_NAMES[track_cat]}') - axes[0].plot(x_val[track_cat], v_lf[track_cat], '*', c=color, - label=climada.hazard.tc_tracks.CAT_NAMES[track_cat]) - axes[0].plot(x_eval, _decay_v_function(v_rel[track_cat], x_eval), - '-', c=color) - - axes[1].set_xlabel('Distance from landfall (km)') - axes[1].set_ylabel('Central pressure\nrelative to landfall') - axes[1].set_title(f'Pressure, TC cat {climada.hazard.tc_tracks.CAT_NAMES[track_cat]}') - axes[1].plot(x_val[track_cat], p_lf[track_cat][1], '*', c=color, - label=climada.hazard.tc_tracks.CAT_NAMES[track_cat]) + axes[0].set_xlabel("Distance from landfall (km)") + axes[0].set_ylabel("Max sustained wind\nrelative to landfall") + axes[0].set_title( + f"Wind, TC cat {climada.hazard.tc_tracks.CAT_NAMES[track_cat]}" + ) + axes[0].plot( + x_val[track_cat], + v_lf[track_cat], + "*", + c=color, + label=climada.hazard.tc_tracks.CAT_NAMES[track_cat], + ) + axes[0].plot(x_eval, _decay_v_function(v_rel[track_cat], x_eval), "-", c=color) + + axes[1].set_xlabel("Distance from landfall (km)") + axes[1].set_ylabel("Central pressure\nrelative to landfall") + axes[1].set_title( + f"Pressure, TC cat {climada.hazard.tc_tracks.CAT_NAMES[track_cat]}" + ) + axes[1].plot( + x_val[track_cat], + p_lf[track_cat][1], + "*", + c=color, + label=climada.hazard.tc_tracks.CAT_NAMES[track_cat], + ) axes[1].plot( x_eval, _decay_p_function(p_rel[track_cat][0], p_rel[track_cat][1], x_eval), - '-', c=color) + "-", + c=color, + ) def _apply_decay_coeffs(track, v_rel, p_rel, land_geom, s_rel): @@ -868,51 +986,61 @@ def _apply_decay_coeffs(track, v_rel, p_rel, land_geom, s_rel): sea_land_idx, land_sea_idx = climada.hazard.tc_tracks._get_landfall_idx(track) if not sea_land_idx.size: return track - for idx, (sea_land, land_sea) \ - in enumerate(zip(sea_land_idx, land_sea_idx)): - v_landfall = track['max_sustained_wind'][sea_land - 1].values - p_landfall = float(track['central_pressure'][sea_land - 1].values) - ss_scale = climada.hazard.tc_tracks.set_category(v_landfall, - track.attrs['max_sustained_wind_unit']) + for idx, (sea_land, land_sea) in enumerate(zip(sea_land_idx, land_sea_idx)): + v_landfall = track["max_sustained_wind"][sea_land - 1].values + p_landfall = float(track["central_pressure"][sea_land - 1].values) + ss_scale = climada.hazard.tc_tracks.set_category( + v_landfall, track.attrs["max_sustained_wind_unit"] + ) if land_sea - sea_land == 1: continue S = _calc_decay_ps_value(track, p_landfall, land_sea - 1, s_rel) if S <= 1: # central_pressure at start of landfall > env_pres after landfall: # set central_pressure to environmental pressure during whole lf - track['central_pressure'][sea_land:land_sea] = \ - track['environmental_pressure'][sea_land:land_sea] + track["central_pressure"][sea_land:land_sea] = track[ + "environmental_pressure" + ][sea_land:land_sea] else: - p_decay = _decay_p_function(S, p_rel[ss_scale][1], - track['dist_since_lf'][sea_land:land_sea].values) + p_decay = _decay_p_function( + S, p_rel[ss_scale][1], track["dist_since_lf"][sea_land:land_sea].values + ) # dont apply decay if it would decrease central pressure if np.any(p_decay < 1): - LOGGER.info('Landfall decay would decrease pressure for ' - 'track id %s, leading to an intensification ' - 'of the Tropical Cyclone. This behaviour is ' - 'unphysical and therefore landfall decay is not ' - 'applied in this case.', - track.sid) - p_decay[p_decay < 1] = (track['central_pressure'][sea_land:land_sea][p_decay < 1] - / p_landfall) - track['central_pressure'][sea_land:land_sea] = p_landfall * p_decay - - v_decay = _decay_v_function(v_rel[ss_scale], - track['dist_since_lf'][sea_land:land_sea].values) + LOGGER.info( + "Landfall decay would decrease pressure for " + "track id %s, leading to an intensification " + "of the Tropical Cyclone. This behaviour is " + "unphysical and therefore landfall decay is not " + "applied in this case.", + track.sid, + ) + p_decay[p_decay < 1] = ( + track["central_pressure"][sea_land:land_sea][p_decay < 1] + / p_landfall + ) + track["central_pressure"][sea_land:land_sea] = p_landfall * p_decay + + v_decay = _decay_v_function( + v_rel[ss_scale], track["dist_since_lf"][sea_land:land_sea].values + ) # dont apply decay if it would increase wind speeds if np.any(v_decay > 1): # should not happen unless v_rel is negative - LOGGER.info('Landfall decay would increase wind speed for ' - 'track id %s. This behavious in unphysical and ' - 'therefore landfall decay is not applied in this ' - 'case.', - track['sid']) - v_decay[v_decay > 1] = (track['max_sustained_wind'][sea_land:land_sea][v_decay > 1] - / v_landfall) - track['max_sustained_wind'][sea_land:land_sea] = v_landfall * v_decay + LOGGER.info( + "Landfall decay would increase wind speed for " + "track id %s. This behavious in unphysical and " + "therefore landfall decay is not applied in this " + "case.", + track["sid"], + ) + v_decay[v_decay > 1] = ( + track["max_sustained_wind"][sea_land:land_sea][v_decay > 1] / v_landfall + ) + track["max_sustained_wind"][sea_land:land_sea] = v_landfall * v_decay # correct values of sea after a landfall (until next landfall, if any) - if land_sea < track['time'].size: + if land_sea < track["time"].size: if idx + 1 < sea_land_idx.size: # if there is a next landfall, correct until last point before # reaching land again @@ -920,25 +1048,34 @@ def _apply_decay_coeffs(track, v_rel, p_rel, land_geom, s_rel): else: # if there is no further landfall, correct until the end of # the track - end_cor = track['time'].size + end_cor = track["time"].size rndn = 0.1 * float(np.abs(np.random.normal(size=1) * 5) + 6) - r_diff = track['central_pressure'][land_sea].values - \ - track['central_pressure'][land_sea - 1].values + rndn - track['central_pressure'][land_sea:end_cor] += - r_diff + r_diff = ( + track["central_pressure"][land_sea].values + - track["central_pressure"][land_sea - 1].values + + rndn + ) + track["central_pressure"][land_sea:end_cor] += -r_diff rndn = rndn * 10 # mean value 10 - r_diff = track['max_sustained_wind'][land_sea].values - \ - track['max_sustained_wind'][land_sea - 1].values - rndn - track['max_sustained_wind'][land_sea:end_cor] += - r_diff + r_diff = ( + track["max_sustained_wind"][land_sea].values + - track["max_sustained_wind"][land_sea - 1].values + - rndn + ) + track["max_sustained_wind"][land_sea:end_cor] += -r_diff # correct limits - warnings.filterwarnings('ignore') - cor_p = track['central_pressure'].values > track['environmental_pressure'].values - track['central_pressure'][cor_p] = track['environmental_pressure'][cor_p] - track['max_sustained_wind'][track['max_sustained_wind'] < 0] = 0 + warnings.filterwarnings("ignore") + cor_p = ( + track["central_pressure"].values > track["environmental_pressure"].values + ) + track["central_pressure"][cor_p] = track["environmental_pressure"][cor_p] + track["max_sustained_wind"][track["max_sustained_wind"] < 0] = 0 - track.attrs['category'] = climada.hazard.tc_tracks.set_category( - track['max_sustained_wind'].values, track.attrs['max_sustained_wind_unit']) + track.attrs["category"] = climada.hazard.tc_tracks.set_category( + track["max_sustained_wind"].values, track.attrs["max_sustained_wind_unit"] + ) return track @@ -947,25 +1084,40 @@ def _check_apply_decay_plot(all_tracks, syn_orig_wind, syn_orig_pres): Plot wind and presure for unchanged historical tracks.""" # Plot synthetic tracks sy_tracks = [track for track in all_tracks if not track.orig_event_flag] - graph_v_b, graph_v_a, graph_p_b, graph_p_a, graph_pd_a, graph_ped_a = \ - _check_apply_decay_syn_plot(sy_tracks, syn_orig_wind, - syn_orig_pres) + graph_v_b, graph_v_a, graph_p_b, graph_p_a, graph_pd_a, graph_ped_a = ( + _check_apply_decay_syn_plot(sy_tracks, syn_orig_wind, syn_orig_pres) + ) # Plot historic tracks hist_tracks = [track for track in all_tracks if track.orig_event_flag] - graph_hv, graph_hp, graph_hpd_a, graph_hped_a = \ - _check_apply_decay_hist_plot(hist_tracks) + graph_hv, graph_hp, graph_hpd_a, graph_hped_a = _check_apply_decay_hist_plot( + hist_tracks + ) # Put legend and fix size scale_thresholds = climada.hazard.tc_tracks.SAFFIR_SIM_CAT - leg_lines = [Line2D([0], [0], color=climada.hazard.tc_tracks.CAT_COLORS[i_col], lw=2) - for i_col in range(len(scale_thresholds))] - leg_lines.append(Line2D([0], [0], color='k', lw=2)) - leg_names = [climada.hazard.tc_tracks.CAT_NAMES[i_col] - for i_col in sorted(climada.hazard.tc_tracks.CAT_NAMES.keys())] - leg_names.append('Sea') - all_gr = [graph_v_a, graph_v_b, graph_p_a, graph_p_b, graph_ped_a, - graph_pd_a, graph_hv, graph_hp, graph_hpd_a, graph_hped_a] + leg_lines = [ + Line2D([0], [0], color=climada.hazard.tc_tracks.CAT_COLORS[i_col], lw=2) + for i_col in range(len(scale_thresholds)) + ] + leg_lines.append(Line2D([0], [0], color="k", lw=2)) + leg_names = [ + climada.hazard.tc_tracks.CAT_NAMES[i_col] + for i_col in sorted(climada.hazard.tc_tracks.CAT_NAMES.keys()) + ] + leg_names.append("Sea") + all_gr = [ + graph_v_a, + graph_v_b, + graph_p_a, + graph_p_b, + graph_ped_a, + graph_pd_a, + graph_hv, + graph_hp, + graph_hpd_a, + graph_hped_a, + ] for graph in all_gr: graph.axs[0].legend(leg_lines, leg_names) fig, _ = graph.get_elems() @@ -974,9 +1126,9 @@ def _check_apply_decay_plot(all_tracks, syn_orig_wind, syn_orig_pres): def _calc_decay_ps_value(track, p_landfall, pos, s_rel): if s_rel: - p_land_s = track['environmental_pressure'][pos].values + p_land_s = track["environmental_pressure"][pos].values else: - p_land_s = track['central_pressure'][pos].values + p_land_s = track["central_pressure"][pos].values return float(p_land_s / p_landfall) @@ -1001,78 +1153,99 @@ def _solve_decay_p_function(ps_y, p_y, x_val): return -np.log((ps_y - p_y) / (ps_y - 1.0)) / x_val -def _check_apply_decay_syn_plot(sy_tracks, syn_orig_wind, - syn_orig_pres): +def _check_apply_decay_syn_plot(sy_tracks, syn_orig_wind, syn_orig_pres): """Plot winds and pressures of synthetic tracks before and after correction.""" # pylint: disable=protected-access _, graph_v_b = plt.subplots() - graph_v_b.set_title('Wind before land decay correction') - graph_v_b.set_xlabel('Node number') - graph_v_b.set_ylabel('Max sustained wind (kn)') + graph_v_b.set_title("Wind before land decay correction") + graph_v_b.set_xlabel("Node number") + graph_v_b.set_ylabel("Max sustained wind (kn)") _, graph_v_a = plt.subplots() - graph_v_a.set_title('Wind after land decay correction') - graph_v_a.set_xlabel('Node number') - graph_v_a.set_ylabel('Max sustained wind (kn)') + graph_v_a.set_title("Wind after land decay correction") + graph_v_a.set_xlabel("Node number") + graph_v_a.set_ylabel("Max sustained wind (kn)") _, graph_p_b = plt.subplots() - graph_p_b.set_title('Pressure before land decay correctionn') - graph_p_b.set_xlabel('Node number') - graph_p_b.set_ylabel('Central pressure (mb)') + graph_p_b.set_title("Pressure before land decay correctionn") + graph_p_b.set_xlabel("Node number") + graph_p_b.set_ylabel("Central pressure (mb)") _, graph_p_a = plt.subplots() - graph_p_a.set_title('Pressure after land decay correctionn') - graph_p_a.set_xlabel('Node number') - graph_p_a.set_ylabel('Central pressure (mb)') + graph_p_a.set_title("Pressure after land decay correctionn") + graph_p_a.set_xlabel("Node number") + graph_p_a.set_ylabel("Central pressure (mb)") _, graph_pd_a = plt.subplots() - graph_pd_a.set_title('Relative pressure after land decay correction') - graph_pd_a.set_xlabel('Distance from landfall (km)') - graph_pd_a.set_ylabel('Central pressure relative to landfall') + graph_pd_a.set_title("Relative pressure after land decay correction") + graph_pd_a.set_xlabel("Distance from landfall (km)") + graph_pd_a.set_ylabel("Central pressure relative to landfall") _, graph_ped_a = plt.subplots() graph_ped_a.set_title( - 'Environmental - central pressure after land decay correction') - graph_ped_a.set_xlabel('Distance from landfall (km)') - graph_ped_a.set_ylabel('Environmental pressure - Central pressure (mb)') + "Environmental - central pressure after land decay correction" + ) + graph_ped_a.set_xlabel("Distance from landfall (km)") + graph_ped_a.set_ylabel("Environmental pressure - Central pressure (mb)") - for track, orig_wind, orig_pres in \ - zip(sy_tracks, syn_orig_wind, syn_orig_pres): + for track, orig_wind, orig_pres in zip(sy_tracks, syn_orig_wind, syn_orig_pres): sea_land_idx, land_sea_idx = climada.hazard.tc_tracks._get_landfall_idx(track) if sea_land_idx.size: for sea_land, land_sea in zip(sea_land_idx, land_sea_idx): - v_lf = track['max_sustained_wind'][sea_land - 1].values - p_lf = track['central_pressure'][sea_land - 1].values + v_lf = track["max_sustained_wind"][sea_land - 1].values + p_lf = track["central_pressure"][sea_land - 1].values scale_thresholds = climada.hazard.tc_tracks.SAFFIR_SIM_CAT ss_scale_idx = np.where(v_lf < scale_thresholds)[0][0] - on_land = np.arange(track['time'].size)[sea_land:land_sea] - - graph_v_a.plot(on_land, track['max_sustained_wind'][on_land], - 'o', c=climada.hazard.tc_tracks.CAT_COLORS[ss_scale_idx]) - graph_v_b.plot(on_land, orig_wind[on_land], - 'o', c=climada.hazard.tc_tracks.CAT_COLORS[ss_scale_idx]) - graph_p_a.plot(on_land, track['central_pressure'][on_land], - 'o', c=climada.hazard.tc_tracks.CAT_COLORS[ss_scale_idx]) - graph_p_b.plot(on_land, orig_pres[on_land], - 'o', c=climada.hazard.tc_tracks.CAT_COLORS[ss_scale_idx]) - graph_pd_a.plot(track['dist_since_lf'][on_land], - track['central_pressure'][on_land] / p_lf, - 'o', c=climada.hazard.tc_tracks.CAT_COLORS[ss_scale_idx]) - graph_ped_a.plot(track['dist_since_lf'][on_land], - track['environmental_pressure'][on_land] - - track['central_pressure'][on_land], - 'o', c=climada.hazard.tc_tracks.CAT_COLORS[ss_scale_idx]) - - on_sea = np.arange(track['time'].size)[~track['on_land']] - graph_v_a.plot(on_sea, track['max_sustained_wind'][on_sea], - 'o', c='k', markersize=5) - graph_v_b.plot(on_sea, orig_wind[on_sea], - 'o', c='k', markersize=5) - graph_p_a.plot(on_sea, track['central_pressure'][on_sea], - 'o', c='k', markersize=5) - graph_p_b.plot(on_sea, orig_pres[on_sea], - 'o', c='k', markersize=5) + on_land = np.arange(track["time"].size)[sea_land:land_sea] + + graph_v_a.plot( + on_land, + track["max_sustained_wind"][on_land], + "o", + c=climada.hazard.tc_tracks.CAT_COLORS[ss_scale_idx], + ) + graph_v_b.plot( + on_land, + orig_wind[on_land], + "o", + c=climada.hazard.tc_tracks.CAT_COLORS[ss_scale_idx], + ) + graph_p_a.plot( + on_land, + track["central_pressure"][on_land], + "o", + c=climada.hazard.tc_tracks.CAT_COLORS[ss_scale_idx], + ) + graph_p_b.plot( + on_land, + orig_pres[on_land], + "o", + c=climada.hazard.tc_tracks.CAT_COLORS[ss_scale_idx], + ) + graph_pd_a.plot( + track["dist_since_lf"][on_land], + track["central_pressure"][on_land] / p_lf, + "o", + c=climada.hazard.tc_tracks.CAT_COLORS[ss_scale_idx], + ) + graph_ped_a.plot( + track["dist_since_lf"][on_land], + track["environmental_pressure"][on_land] + - track["central_pressure"][on_land], + "o", + c=climada.hazard.tc_tracks.CAT_COLORS[ss_scale_idx], + ) + + on_sea = np.arange(track["time"].size)[~track["on_land"]] + graph_v_a.plot( + on_sea, track["max_sustained_wind"][on_sea], "o", c="k", markersize=5 + ) + graph_v_b.plot(on_sea, orig_wind[on_sea], "o", c="k", markersize=5) + graph_p_a.plot( + on_sea, track["central_pressure"][on_sea], "o", c="k", markersize=5 + ) + graph_p_b.plot(on_sea, orig_pres[on_sea], "o", c="k", markersize=5) return graph_v_b, graph_v_a, graph_p_b, graph_p_a, graph_pd_a, graph_ped_a @@ -1081,51 +1254,68 @@ def _check_apply_decay_hist_plot(hist_tracks): """Plot winds and pressures of historical tracks.""" # pylint: disable=protected-access _, graph_hv = plt.subplots() - graph_hv.set_title('Historical wind') - graph_hv.set_xlabel('Node number') - graph_hv.set_ylabel('Max sustained wind (kn)') + graph_hv.set_title("Historical wind") + graph_hv.set_xlabel("Node number") + graph_hv.set_ylabel("Max sustained wind (kn)") _, graph_hp = plt.subplots() - graph_hp.set_title('Historical pressure') - graph_hp.set_xlabel('Node number') - graph_hp.set_ylabel('Central pressure (mb)') + graph_hp.set_title("Historical pressure") + graph_hp.set_xlabel("Node number") + graph_hp.set_ylabel("Central pressure (mb)") _, graph_hpd_a = plt.subplots() - graph_hpd_a.set_title('Historical relative pressure') - graph_hpd_a.set_xlabel('Distance from landfall (km)') - graph_hpd_a.set_ylabel('Central pressure relative to landfall') + graph_hpd_a.set_title("Historical relative pressure") + graph_hpd_a.set_xlabel("Distance from landfall (km)") + graph_hpd_a.set_ylabel("Central pressure relative to landfall") _, graph_hped_a = plt.subplots() - graph_hped_a.set_title('Historical environmental - central pressure') - graph_hped_a.set_xlabel('Distance from landfall (km)') - graph_hped_a.set_ylabel('Environmental pressure - Central pressure (mb)') + graph_hped_a.set_title("Historical environmental - central pressure") + graph_hped_a.set_xlabel("Distance from landfall (km)") + graph_hped_a.set_ylabel("Environmental pressure - Central pressure (mb)") for track in hist_tracks: sea_land_idx, land_sea_idx = climada.hazard.tc_tracks._get_landfall_idx(track) if sea_land_idx.size: for sea_land, land_sea in zip(sea_land_idx, land_sea_idx): scale_thresholds = climada.hazard.tc_tracks.SAFFIR_SIM_CAT - ss_scale_idx = np.where(track['max_sustained_wind'][sea_land - 1].values - < scale_thresholds)[0][0] - on_land = np.arange(track['time'].size)[sea_land:land_sea] - - graph_hv.add_curve(on_land, track['max_sustained_wind'][on_land], - 'o', c=climada.hazard.tc_tracks.CAT_COLORS[ss_scale_idx]) - graph_hp.add_curve(on_land, track['central_pressure'][on_land], - 'o', c=climada.hazard.tc_tracks.CAT_COLORS[ss_scale_idx]) - graph_hpd_a.plot(track['dist_since_lf'][on_land], - track['central_pressure'][on_land] - / track['central_pressure'][sea_land - 1].values, - 'o', c=climada.hazard.tc_tracks.CAT_COLORS[ss_scale_idx]) - graph_hped_a.plot(track['dist_since_lf'][on_land], - track['environmental_pressure'][on_land] - - track['central_pressure'][on_land], - 'o', c=climada.hazard.tc_tracks.CAT_COLORS[ss_scale_idx]) - - on_sea = np.arange(track['time'].size)[~track.on_land] - graph_hp.plot(on_sea, track['central_pressure'][on_sea], - 'o', c='k', markersize=5) - graph_hv.plot(on_sea, track['max_sustained_wind'][on_sea], - 'o', c='k', markersize=5) + ss_scale_idx = np.where( + track["max_sustained_wind"][sea_land - 1].values < scale_thresholds + )[0][0] + on_land = np.arange(track["time"].size)[sea_land:land_sea] + + graph_hv.add_curve( + on_land, + track["max_sustained_wind"][on_land], + "o", + c=climada.hazard.tc_tracks.CAT_COLORS[ss_scale_idx], + ) + graph_hp.add_curve( + on_land, + track["central_pressure"][on_land], + "o", + c=climada.hazard.tc_tracks.CAT_COLORS[ss_scale_idx], + ) + graph_hpd_a.plot( + track["dist_since_lf"][on_land], + track["central_pressure"][on_land] + / track["central_pressure"][sea_land - 1].values, + "o", + c=climada.hazard.tc_tracks.CAT_COLORS[ss_scale_idx], + ) + graph_hped_a.plot( + track["dist_since_lf"][on_land], + track["environmental_pressure"][on_land] + - track["central_pressure"][on_land], + "o", + c=climada.hazard.tc_tracks.CAT_COLORS[ss_scale_idx], + ) + + on_sea = np.arange(track["time"].size)[~track.on_land] + graph_hp.plot( + on_sea, track["central_pressure"][on_sea], "o", c="k", markersize=5 + ) + graph_hv.plot( + on_sea, track["max_sustained_wind"][on_sea], "o", c="k", markersize=5 + ) return graph_hv, graph_hp, graph_hpd_a, graph_hped_a diff --git a/climada/hazard/test/__init__.py b/climada/hazard/test/__init__.py index 7bc33d61f..10a572415 100755 --- a/climada/hazard/test/__init__.py +++ b/climada/hazard/test/__init__.py @@ -21,10 +21,10 @@ import shutil -from climada.util.constants import SYSTEM_DIR +from climada.hazard.tc_tracks import IBTRACS_FILE, IBTRACS_URL from climada.util.api_client import Client +from climada.util.constants import SYSTEM_DIR from climada.util.files_handler import download_ftp -from climada.hazard.tc_tracks import IBTRACS_FILE, IBTRACS_URL def download_ibtracs(): @@ -36,11 +36,17 @@ def download_ibtracs(): return # Nothing to do try: - download_ftp(f'{IBTRACS_URL}/{IBTRACS_FILE}', IBTRACS_FILE) + download_ftp(f"{IBTRACS_URL}/{IBTRACS_FILE}", IBTRACS_FILE) shutil.move(IBTRACS_FILE, SYSTEM_DIR) - except ValueError: # plan b: download an old version of that file from the climada api + except ( + ValueError + ): # plan b: download an old version of that file from the climada api client = Client() - dsinfo = client.get_dataset_info(name="IBTrACS", version="v04r00", status="external") - [fileinfo] = [fi for fi in dsinfo.files if fi.file_name == 'IBTrACS.ALL.v04r00.nc'] + dsinfo = client.get_dataset_info( + name="IBTrACS", version="v04r00", status="external" + ) + [fileinfo] = [ + fi for fi in dsinfo.files if fi.file_name == "IBTrACS.ALL.v04r00.nc" + ] client._download_file(local_path=SYSTEM_DIR, fileinfo=fileinfo) diff --git a/climada/hazard/test/test_base.py b/climada/hazard/test/test_base.py index 585832219..cf4c8e99b 100644 --- a/climada/hazard/test/test_base.py +++ b/climada/hazard/test/test_base.py @@ -20,28 +20,26 @@ """ import unittest - from pathlib import Path + import numpy as np -from scipy import sparse from pathos.pools import ProcessPool as Pool +from scipy import sparse +import climada.util.coordinates as u_coord +import climada.util.dates_times as u_dt from climada import CONFIG from climada.hazard.base import Hazard from climada.hazard.centroids.centr import Centroids -import climada.util.dates_times as u_dt -from climada.util.constants import HAZ_TEMPLATE_XLS -import climada.util.coordinates as u_coord - from climada.test import get_test_file +from climada.util.constants import HAZ_TEMPLATE_XLS - -DATA_DIR :Path = CONFIG.hazard.test_data.dir() +DATA_DIR: Path = CONFIG.hazard.test_data.dir() """ Directory for writing (and subsequent reading) of temporary files created during tests. """ -HAZ_TEST_TC :Path = get_test_file('test_tc_florida') +HAZ_TEST_TC: Path = get_test_file("test_tc_florida") """ Hazard test file from Data API: Hurricanes from 1851 to 2011 over Florida with 100 centroids. Fraction is empty. Format: HDF5. @@ -49,30 +47,28 @@ def dummy_hazard(): - fraction = sparse.csr_matrix([[0.02, 0.03, 0.04], - [0.01, 0.01, 0.01], - [0.3, 0.1, 0.0], - [0.3, 0.2, 0.0]]) - intensity = sparse.csr_matrix([[0.2, 0.3, 0.4], - [0.1, 0.1, 0.01], - [4.3, 2.1, 1.0], - [5.3, 0.2, 0.0]]) + fraction = sparse.csr_matrix( + [[0.02, 0.03, 0.04], [0.01, 0.01, 0.01], [0.3, 0.1, 0.0], [0.3, 0.2, 0.0]] + ) + intensity = sparse.csr_matrix( + [[0.2, 0.3, 0.4], [0.1, 0.1, 0.01], [4.3, 2.1, 1.0], [5.3, 0.2, 0.0]] + ) return Hazard( "TC", intensity=intensity, fraction=fraction, - centroids=Centroids( - lat=np.array([1, 3, 5]), lon=np.array([2, 4, 6])), + centroids=Centroids(lat=np.array([1, 3, 5]), lon=np.array([2, 4, 6])), event_id=np.array([1, 2, 3, 4]), - event_name=['ev1', 'ev2', 'ev3', 'ev4'], + event_name=["ev1", "ev2", "ev3", "ev4"], date=np.array([1, 2, 3, 4]), orig=np.array([True, False, False, True]), frequency=np.array([0.1, 0.5, 0.5, 0.2]), - frequency_unit='1/week', - units='m/s', + frequency_unit="1/week", + units="m/s", ) + class TestLoader(unittest.TestCase): """Test loading functions from the Hazard class""" @@ -87,7 +83,7 @@ def setUp(self): "TC", centroids=centroids, event_id=np.array([1, 2, 3]), - event_name=['A', 'B', 'C'], + event_name=["A", "B", "C"], frequency=np.array([1, 2, 3]), # events x centroids intensity=sparse.csr_matrix([[1, 2], [1, 2], [1, 2]]), @@ -107,7 +103,7 @@ def test_init_empty_fraction(self): event_id=self.hazard.event_id, event_name=self.hazard.event_name, frequency=self.hazard.frequency, - intensity=self.hazard.intensity + intensity=self.hazard.intensity, ) hazard.check() np.testing.assert_array_equal(hazard.fraction.shape, hazard.intensity.shape) @@ -119,7 +115,7 @@ def test_check_wrongFreq_fail(self): with self.assertRaises(ValueError) as cm: self.hazard.check() - self.assertIn('Invalid Hazard.frequency size: 3 != 2.', str(cm.exception)) + self.assertIn("Invalid Hazard.frequency size: 3 != 2.", str(cm.exception)) def test_check_wrongInten_fail(self): """Wrong hazard definition""" @@ -139,11 +135,11 @@ def test_check_wrongFrac_fail(self): def test_check_wrongEvName_fail(self): """Wrong hazard definition""" - self.hazard.event_name = ['M'] + self.hazard.event_name = ["M"] with self.assertRaises(ValueError) as cm: self.hazard.check() - self.assertIn('Invalid Hazard.event_name size: 3 != 1.', str(cm.exception)) + self.assertIn("Invalid Hazard.event_name size: 3 != 1.", str(cm.exception)) def test_check_wrongId_fail(self): """Wrong hazard definition""" @@ -151,7 +147,7 @@ def test_check_wrongId_fail(self): with self.assertRaises(ValueError) as cm: self.hazard.check() - self.assertIn('There are events with the same identifier.', str(cm.exception)) + self.assertIn("There are events with the same identifier.", str(cm.exception)) def test_check_wrong_date_fail(self): """Wrong hazard definition""" @@ -159,7 +155,7 @@ def test_check_wrong_date_fail(self): with self.assertRaises(ValueError) as cm: self.hazard.check() - self.assertIn('Invalid Hazard.date size: 3 != 2.', str(cm.exception)) + self.assertIn("Invalid Hazard.date size: 3 != 2.", str(cm.exception)) def test_check_wrong_orig_fail(self): """Wrong hazard definition""" @@ -167,50 +163,47 @@ def test_check_wrong_orig_fail(self): with self.assertRaises(ValueError) as cm: self.hazard.check() - self.assertIn('Invalid Hazard.orig size: 3 != 4.', str(cm.exception)) + self.assertIn("Invalid Hazard.orig size: 3 != 4.", str(cm.exception)) def test_event_name_to_id_pass(self): """Test event_name_to_id function.""" - haz = Hazard.from_excel(HAZ_TEMPLATE_XLS, haz_type='TC') - self.assertEqual(haz.get_event_id('event001')[0], 1) - self.assertEqual(haz.get_event_id('event084')[0], 84) + haz = Hazard.from_excel(HAZ_TEMPLATE_XLS, haz_type="TC") + self.assertEqual(haz.get_event_id("event001")[0], 1) + self.assertEqual(haz.get_event_id("event084")[0], 84) def test_event_name_to_id_fail(self): """Test event_name_to_id function.""" - haz = Hazard.from_excel(HAZ_TEMPLATE_XLS, haz_type='TC') + haz = Hazard.from_excel(HAZ_TEMPLATE_XLS, haz_type="TC") with self.assertRaises(ValueError) as cm: - haz.get_event_id('1050') - self.assertIn('No event with name: 1050', str(cm.exception)) + haz.get_event_id("1050") + self.assertIn("No event with name: 1050", str(cm.exception)) def test_event_id_to_name_pass(self): """Test event_id_to_name function.""" - haz = Hazard.from_excel(HAZ_TEMPLATE_XLS, haz_type='TC') - self.assertEqual(haz.get_event_name(2), 'event002') - self.assertEqual(haz.get_event_name(48), 'event048') + haz = Hazard.from_excel(HAZ_TEMPLATE_XLS, haz_type="TC") + self.assertEqual(haz.get_event_name(2), "event002") + self.assertEqual(haz.get_event_name(48), "event048") def test_event_id_to_name_fail(self): """Test event_id_to_name function.""" - haz = Hazard.from_excel(HAZ_TEMPLATE_XLS, haz_type='TC') + haz = Hazard.from_excel(HAZ_TEMPLATE_XLS, haz_type="TC") with self.assertRaises(ValueError) as cm: haz.get_event_name(1050) - self.assertIn('No event with id: 1050', str(cm.exception)) + self.assertIn("No event with id: 1050", str(cm.exception)) def test_get_date_strings_pass(self): haz = Hazard.from_hdf5(HAZ_TEST_TC) - haz.event_name[5] = 'HAZEL' - haz.event_name[10] = 'HAZEL' + haz.event_name[5] = "HAZEL" + haz.event_name[10] = "HAZEL" - self.assertEqual(len(haz.get_event_date('HAZEL')), 2) - self.assertEqual(haz.get_event_date('HAZEL')[0], - u_dt.date_to_str(haz.date[5])) - self.assertEqual(haz.get_event_date('HAZEL')[1], - u_dt.date_to_str(haz.date[10])) + self.assertEqual(len(haz.get_event_date("HAZEL")), 2) + self.assertEqual(haz.get_event_date("HAZEL")[0], u_dt.date_to_str(haz.date[5])) + self.assertEqual(haz.get_event_date("HAZEL")[1], u_dt.date_to_str(haz.date[10])) self.assertEqual(haz.get_event_date(2)[0], u_dt.date_to_str(haz.date[1])) self.assertEqual(len(haz.get_event_date()), haz.date.size) - self.assertEqual(haz.get_event_date()[560], - u_dt.date_to_str(haz.date[560])) + self.assertEqual(haz.get_event_date()[560], u_dt.date_to_str(haz.date[560])) def test_check_matrices(self): """Test the check_matrices method""" @@ -238,13 +231,14 @@ def test_check_matrices(self): self.assertEqual(matrix.nnz, 0) self.assertTrue(matrix.has_canonical_format) + class TestRemoveDupl(unittest.TestCase): """Test remove_duplicates method.""" def test_equal_same(self): """Append the same hazard and remove duplicates, obtain initial hazard.""" - haz1 = Hazard.from_excel(HAZ_TEMPLATE_XLS, haz_type='TC') - haz2 = Hazard.from_excel(HAZ_TEMPLATE_XLS, haz_type='TC') + haz1 = Hazard.from_excel(HAZ_TEMPLATE_XLS, haz_type="TC") + haz2 = Hazard.from_excel(HAZ_TEMPLATE_XLS, haz_type="TC") haz1.append(haz2) haz1.remove_duplicates() haz1.check() @@ -254,8 +248,12 @@ def test_equal_same(self): self.assertEqual(haz1.frequency_unit, haz2.frequency_unit) self.assertTrue(np.array_equal(haz1.date, haz2.date)) self.assertTrue(np.array_equal(haz1.orig, haz2.orig)) - self.assertTrue(np.array_equal(haz1.intensity.toarray(), haz2.intensity.toarray())) - self.assertTrue(np.array_equal(haz1.fraction.toarray(), haz2.fraction.toarray())) + self.assertTrue( + np.array_equal(haz1.intensity.toarray(), haz2.intensity.toarray()) + ) + self.assertTrue( + np.array_equal(haz1.fraction.toarray(), haz2.fraction.toarray()) + ) self.assertTrue((haz1.intensity != haz2.intensity).nnz == 0) self.assertTrue((haz1.fraction != haz2.fraction).nnz == 0) self.assertEqual(haz1.units, haz2.units) @@ -267,22 +265,30 @@ def test_same_events_same(self): fraction in new appended centroids.""" haz1 = dummy_hazard() centroids = Centroids(lat=np.array([7, 9, 11]), lon=np.array([8, 10, 12])) - fraction = sparse.csr_matrix([[0.22, 0.32, 0.44], - [0.11, 0.11, 0.11], - [0.32, 0.11, 0.99], - [0.32, 0.22, 0.88]]) - intensity = sparse.csr_matrix([[0.22, 3.33, 6.44], - [1.11, 0.11, 1.11], - [8.33, 4.11, 4.4], - [9.33, 9.22, 1.77]]) + fraction = sparse.csr_matrix( + [ + [0.22, 0.32, 0.44], + [0.11, 0.11, 0.11], + [0.32, 0.11, 0.99], + [0.32, 0.22, 0.88], + ] + ) + intensity = sparse.csr_matrix( + [ + [0.22, 3.33, 6.44], + [1.11, 0.11, 1.11], + [8.33, 4.11, 4.4], + [9.33, 9.22, 1.77], + ] + ) haz2 = Hazard( "TC", centroids=centroids, event_id=haz1.event_id, event_name=haz1.event_name, frequency=haz1.frequency, - frequency_unit = "1/week", - date = haz1.date, + frequency_unit="1/week", + date=haz1.date, fraction=fraction, intensity=intensity, units="m/s", @@ -295,32 +301,38 @@ def test_same_events_same(self): # expected values haz_res = dummy_hazard() haz_res.intensity = sparse.hstack( - [haz_res.intensity, sparse.csr_matrix((haz_res.intensity.shape[0], 3))], format='csr') + [haz_res.intensity, sparse.csr_matrix((haz_res.intensity.shape[0], 3))], + format="csr", + ) haz_res.fraction = sparse.hstack( - [haz_res.fraction, sparse.csr_matrix((haz_res.fraction.shape[0], 3))], format='csr') - self.assertTrue(np.array_equal(haz_res.intensity.toarray(), - haz1.intensity.toarray())) + [haz_res.fraction, sparse.csr_matrix((haz_res.fraction.shape[0], 3))], + format="csr", + ) + self.assertTrue( + np.array_equal(haz_res.intensity.toarray(), haz1.intensity.toarray()) + ) self.assertTrue(sparse.isspmatrix_csr(haz1.intensity)) - self.assertTrue(np.array_equal(haz_res.fraction.toarray(), - haz1.fraction.toarray())) + self.assertTrue( + np.array_equal(haz_res.fraction.toarray(), haz1.fraction.toarray()) + ) self.assertTrue(sparse.isspmatrix_csr(haz1.fraction)) self.assertEqual(haz1.event_name, haz_res.event_name) self.assertTrue(np.array_equal(haz1.date, haz_res.date)) self.assertTrue(np.array_equal(haz1.orig, haz_res.orig)) - self.assertTrue(np.array_equal(haz1.event_id, - haz_res.event_id)) + self.assertTrue(np.array_equal(haz1.event_id, haz_res.event_id)) self.assertTrue(np.array_equal(haz1.frequency, haz_res.frequency)) self.assertEqual(haz1.frequency_unit, haz_res.frequency_unit) self.assertEqual(haz_res.units, haz1.units) self.assertEqual(haz1.haz_type, haz_res.haz_type) + class TestSelect(unittest.TestCase): """Test select method.""" def test_select_event_name(self): """Test select historical events.""" haz = dummy_hazard() - sel_haz = haz.select(event_names=['ev4', 'ev1']) + sel_haz = haz.select(event_names=["ev4", "ev1"]) self.assertTrue(np.array_equal(sel_haz.centroids.coord, haz.centroids.coord)) self.assertEqual(sel_haz.units, haz.units) @@ -329,13 +341,19 @@ def test_select_event_name(self): self.assertTrue(np.array_equal(sel_haz.orig, np.array([True, True]))) self.assertTrue(np.array_equal(sel_haz.frequency, np.array([0.2, 0.1]))) self.assertEqual(sel_haz.frequency_unit, haz.frequency_unit) - self.assertTrue(np.array_equal(sel_haz.fraction.toarray(), - np.array([[0.3, 0.2, 0.0], - [0.02, 0.03, 0.04]]))) - self.assertTrue(np.array_equal(sel_haz.intensity.toarray(), - np.array([[5.3, 0.2, 0.0], - [0.2, 0.3, 0.4]]))) - self.assertEqual(sel_haz.event_name, ['ev4', 'ev1']) + self.assertTrue( + np.array_equal( + sel_haz.fraction.toarray(), + np.array([[0.3, 0.2, 0.0], [0.02, 0.03, 0.04]]), + ) + ) + self.assertTrue( + np.array_equal( + sel_haz.intensity.toarray(), + np.array([[5.3, 0.2, 0.0], [0.2, 0.3, 0.4]]), + ) + ) + self.assertEqual(sel_haz.event_name, ["ev4", "ev1"]) self.assertIsInstance(sel_haz, Hazard) self.assertIsInstance(sel_haz.intensity, sparse.csr_matrix) self.assertIsInstance(sel_haz.fraction, sparse.csr_matrix) @@ -352,13 +370,19 @@ def test_select_event_id(self): self.assertTrue(np.array_equal(sel_haz.orig, np.array([True, True]))) self.assertTrue(np.array_equal(sel_haz.frequency, np.array([0.2, 0.1]))) self.assertEqual(sel_haz.frequency_unit, haz.frequency_unit) - self.assertTrue(np.array_equal(sel_haz.fraction.toarray(), - np.array([[0.3, 0.2, 0.0], - [0.02, 0.03, 0.04]]))) - self.assertTrue(np.array_equal(sel_haz.intensity.toarray(), - np.array([[5.3, 0.2, 0.0], - [0.2, 0.3, 0.4]]))) - self.assertEqual(sel_haz.event_name, ['ev4', 'ev1']) + self.assertTrue( + np.array_equal( + sel_haz.fraction.toarray(), + np.array([[0.3, 0.2, 0.0], [0.02, 0.03, 0.04]]), + ) + ) + self.assertTrue( + np.array_equal( + sel_haz.intensity.toarray(), + np.array([[5.3, 0.2, 0.0], [0.2, 0.3, 0.4]]), + ) + ) + self.assertEqual(sel_haz.event_name, ["ev4", "ev1"]) self.assertIsInstance(sel_haz, Hazard) self.assertIsInstance(sel_haz.intensity, sparse.csr_matrix) self.assertIsInstance(sel_haz.fraction, sparse.csr_matrix) @@ -375,13 +399,19 @@ def test_select_event_id(self): self.assertTrue(np.array_equal(sel_haz.orig, np.array([True, True]))) self.assertTrue(np.array_equal(sel_haz.frequency, np.array([0.2, 0.1]))) self.assertEqual(sel_haz.frequency_unit, haz.frequency_unit) - self.assertTrue(np.array_equal(sel_haz.fraction.toarray(), - np.array([[0.3, 0.2, 0.0], - [0.02, 0.03, 0.04]]))) - self.assertTrue(np.array_equal(sel_haz.intensity.toarray(), - np.array([[5.3, 0.2, 0.0], - [0.2, 0.3, 0.4]]))) - self.assertEqual(sel_haz.event_name, ['ev4', 'ev1']) + self.assertTrue( + np.array_equal( + sel_haz.fraction.toarray(), + np.array([[0.3, 0.2, 0.0], [0.02, 0.03, 0.04]]), + ) + ) + self.assertTrue( + np.array_equal( + sel_haz.intensity.toarray(), + np.array([[5.3, 0.2, 0.0], [0.2, 0.3, 0.4]]), + ) + ) + self.assertEqual(sel_haz.event_name, ["ev4", "ev1"]) self.assertIsInstance(sel_haz, Hazard) self.assertIsInstance(sel_haz.intensity, sparse.csr_matrix) self.assertIsInstance(sel_haz.fraction, sparse.csr_matrix) @@ -398,11 +428,19 @@ def test_select_orig_pass(self): self.assertTrue(np.array_equal(sel_haz.orig, np.array([True, True]))) self.assertTrue(np.array_equal(sel_haz.frequency, np.array([0.1, 0.2]))) self.assertEqual(sel_haz.frequency_unit, haz.frequency_unit) - self.assertTrue(np.array_equal( - sel_haz.fraction.toarray(), np.array([[0.02, 0.03, 0.04], [0.3, 0.2, 0.0]]))) - self.assertTrue(np.array_equal( - sel_haz.intensity.toarray(), np.array([[0.2, 0.3, 0.4], [5.3, 0.2, 0.0]]))) - self.assertEqual(sel_haz.event_name, ['ev1', 'ev4']) + self.assertTrue( + np.array_equal( + sel_haz.fraction.toarray(), + np.array([[0.02, 0.03, 0.04], [0.3, 0.2, 0.0]]), + ) + ) + self.assertTrue( + np.array_equal( + sel_haz.intensity.toarray(), + np.array([[0.2, 0.3, 0.4], [5.3, 0.2, 0.0]]), + ) + ) + self.assertEqual(sel_haz.event_name, ["ev1", "ev4"]) self.assertIsInstance(sel_haz, Hazard) self.assertIsInstance(sel_haz.intensity, sparse.csr_matrix) self.assertIsInstance(sel_haz.fraction, sparse.csr_matrix) @@ -419,11 +457,19 @@ def test_select_syn_pass(self): self.assertTrue(np.array_equal(sel_haz.orig, np.array([False, False]))) self.assertTrue(np.array_equal(sel_haz.frequency, np.array([0.5, 0.5]))) self.assertEqual(sel_haz.frequency_unit, haz.frequency_unit) - self.assertTrue(np.array_equal( - sel_haz.fraction.toarray(), np.array([[0.01, 0.01, 0.01], [0.3, 0.1, 0.0]]))) - self.assertTrue(np.array_equal( - sel_haz.intensity.toarray(), np.array([[0.1, 0.1, 0.01], [4.3, 2.1, 1.0]]))) - self.assertEqual(sel_haz.event_name, ['ev2', 'ev3']) + self.assertTrue( + np.array_equal( + sel_haz.fraction.toarray(), + np.array([[0.01, 0.01, 0.01], [0.3, 0.1, 0.0]]), + ) + ) + self.assertTrue( + np.array_equal( + sel_haz.intensity.toarray(), + np.array([[0.1, 0.1, 0.01], [4.3, 2.1, 1.0]]), + ) + ) + self.assertEqual(sel_haz.event_name, ["ev2", "ev3"]) self.assertIsInstance(sel_haz, Hazard) self.assertIsInstance(sel_haz.intensity, sparse.csr_matrix) self.assertIsInstance(sel_haz.fraction, sparse.csr_matrix) @@ -440,15 +486,19 @@ def test_select_date_pass(self): self.assertTrue(np.array_equal(sel_haz.orig, np.array([False, False, True]))) self.assertTrue(np.array_equal(sel_haz.frequency, np.array([0.5, 0.5, 0.2]))) self.assertEqual(sel_haz.frequency_unit, haz.frequency_unit) - self.assertTrue(np.array_equal( - sel_haz.fraction.toarray(), np.array([[0.01, 0.01, 0.01], - [0.3, 0.1, 0.0], - [0.3, 0.2, 0.0]]))) - self.assertTrue(np.array_equal( - sel_haz.intensity.toarray(), np.array([[0.1, 0.1, 0.01], - [4.3, 2.1, 1.0], - [5.3, 0.2, 0.0]]))) - self.assertEqual(sel_haz.event_name, ['ev2', 'ev3', 'ev4']) + self.assertTrue( + np.array_equal( + sel_haz.fraction.toarray(), + np.array([[0.01, 0.01, 0.01], [0.3, 0.1, 0.0], [0.3, 0.2, 0.0]]), + ) + ) + self.assertTrue( + np.array_equal( + sel_haz.intensity.toarray(), + np.array([[0.1, 0.1, 0.01], [4.3, 2.1, 1.0], [5.3, 0.2, 0.0]]), + ) + ) + self.assertEqual(sel_haz.event_name, ["ev2", "ev3", "ev4"]) self.assertIsInstance(sel_haz, Hazard) self.assertIsInstance(sel_haz.intensity, sparse.csr_matrix) self.assertIsInstance(sel_haz.fraction, sparse.csr_matrix) @@ -456,7 +506,7 @@ def test_select_date_pass(self): def test_select_date_str_pass(self): """Test select historical events.""" haz = dummy_hazard() - sel_haz = haz.select(date=('0001-01-02', '0001-01-03')) + sel_haz = haz.select(date=("0001-01-02", "0001-01-03")) self.assertTrue(np.array_equal(sel_haz.centroids.coord, haz.centroids.coord)) self.assertEqual(sel_haz.units, haz.units) @@ -465,11 +515,19 @@ def test_select_date_str_pass(self): self.assertTrue(np.array_equal(sel_haz.orig, np.array([False, False]))) self.assertTrue(np.array_equal(sel_haz.frequency, np.array([0.5, 0.5]))) self.assertEqual(sel_haz.frequency_unit, haz.frequency_unit) - self.assertTrue(np.array_equal( - sel_haz.fraction.toarray(), np.array([[0.01, 0.01, 0.01], [0.3, 0.1, 0.0]]))) - self.assertTrue(np.array_equal( - sel_haz.intensity.toarray(), np.array([[0.1, 0.1, 0.01], [4.3, 2.1, 1.0]]))) - self.assertEqual(sel_haz.event_name, ['ev2', 'ev3']) + self.assertTrue( + np.array_equal( + sel_haz.fraction.toarray(), + np.array([[0.01, 0.01, 0.01], [0.3, 0.1, 0.0]]), + ) + ) + self.assertTrue( + np.array_equal( + sel_haz.intensity.toarray(), + np.array([[0.1, 0.1, 0.01], [4.3, 2.1, 1.0]]), + ) + ) + self.assertEqual(sel_haz.event_name, ["ev2", "ev3"]) self.assertIsInstance(sel_haz, Hazard) self.assertIsInstance(sel_haz.intensity, sparse.csr_matrix) self.assertIsInstance(sel_haz.fraction, sparse.csr_matrix) @@ -486,11 +544,19 @@ def test_select_date_and_orig_pass(self): self.assertTrue(np.array_equal(sel_haz.orig, np.array([False, False]))) self.assertTrue(np.array_equal(sel_haz.frequency, np.array([0.5, 0.5]))) self.assertEqual(sel_haz.frequency_unit, haz.frequency_unit) - self.assertTrue(np.array_equal( - sel_haz.fraction.toarray(), np.array([[0.01, 0.01, 0.01], [0.3, 0.1, 0.0]]))) - self.assertTrue(np.array_equal( - sel_haz.intensity.toarray(), np.array([[0.1, 0.1, 0.01], [4.3, 2.1, 1.0]]))) - self.assertEqual(sel_haz.event_name, ['ev2', 'ev3']) + self.assertTrue( + np.array_equal( + sel_haz.fraction.toarray(), + np.array([[0.01, 0.01, 0.01], [0.3, 0.1, 0.0]]), + ) + ) + self.assertTrue( + np.array_equal( + sel_haz.intensity.toarray(), + np.array([[0.1, 0.1, 0.01], [4.3, 2.1, 1.0]]), + ) + ) + self.assertEqual(sel_haz.event_name, ["ev2", "ev3"]) self.assertIsInstance(sel_haz, Hazard) self.assertIsInstance(sel_haz.intensity, sparse.csr_matrix) self.assertIsInstance(sel_haz.fraction, sparse.csr_matrix) @@ -506,7 +572,7 @@ def test_select_date_invalid_pass(self): haz = dummy_hazard() # lists and numpy arrays should work just like tuples - sel_haz = haz.select(date=['0001-01-02', '0001-01-03']) + sel_haz = haz.select(date=["0001-01-02", "0001-01-03"]) self.assertTrue(np.array_equal(sel_haz.date, np.array([2, 3]))) sel_haz = haz.select(date=np.array([2, 4])) self.assertTrue(np.array_equal(sel_haz.date, np.array([2, 3, 4]))) @@ -526,20 +592,25 @@ def test_select_date_invalid_pass(self): def test_select_reg_id_pass(self): """Test select region of centroids.""" haz = dummy_hazard() - haz.centroids.gdf['region_id'] = np.array([5, 7, 9]) + haz.centroids.gdf["region_id"] = np.array([5, 7, 9]) sel_haz = haz.select(date=(2, 4), orig=False, reg_id=9) - self.assertTrue(np.array_equal(sel_haz.centroids.coord.squeeze(), - haz.centroids.coord[2, :])) + self.assertTrue( + np.array_equal(sel_haz.centroids.coord.squeeze(), haz.centroids.coord[2, :]) + ) self.assertEqual(sel_haz.units, haz.units) self.assertTrue(np.array_equal(sel_haz.event_id, np.array([2, 3]))) self.assertTrue(np.array_equal(sel_haz.date, np.array([2, 3]))) self.assertTrue(np.array_equal(sel_haz.orig, np.array([False, False]))) self.assertTrue(np.array_equal(sel_haz.frequency, np.array([0.5, 0.5]))) self.assertEqual(sel_haz.frequency_unit, haz.frequency_unit) - self.assertTrue(np.array_equal(sel_haz.fraction.toarray(), np.array([[0.01], [0.0]]))) - self.assertTrue(np.array_equal(sel_haz.intensity.toarray(), np.array([[0.01], [1.0]]))) - self.assertEqual(sel_haz.event_name, ['ev2', 'ev3']) + self.assertTrue( + np.array_equal(sel_haz.fraction.toarray(), np.array([[0.01], [0.0]])) + ) + self.assertTrue( + np.array_equal(sel_haz.intensity.toarray(), np.array([[0.01], [1.0]])) + ) + self.assertEqual(sel_haz.event_name, ["ev2", "ev3"]) self.assertIsInstance(sel_haz, Hazard) self.assertIsInstance(sel_haz.intensity, sparse.csr_matrix) self.assertIsInstance(sel_haz.fraction, sparse.csr_matrix) @@ -547,74 +618,89 @@ def test_select_reg_id_pass(self): def test_select_tight_pass(self): """Test select tight box around hazard""" - #intensity select + # intensity select haz = dummy_hazard() haz.intensity[:, -1] = 0.0 sel_haz = haz.select_tight() - self.assertTrue(np.array_equal(sel_haz.centroids.coord.squeeze(), - haz.centroids.coord[:-1, :])) + self.assertTrue( + np.array_equal( + sel_haz.centroids.coord.squeeze(), haz.centroids.coord[:-1, :] + ) + ) self.assertEqual(sel_haz.units, haz.units) self.assertTrue(np.array_equal(sel_haz.event_id, haz.event_id)) self.assertTrue(np.array_equal(sel_haz.date, haz.date)) self.assertTrue(np.array_equal(sel_haz.orig, haz.orig)) self.assertTrue(np.array_equal(sel_haz.frequency, haz.frequency)) self.assertEqual(sel_haz.frequency_unit, haz.frequency_unit) - self.assertTrue(np.array_equal(sel_haz.fraction.toarray(), - haz.fraction[:,:-1].toarray())) - self.assertTrue(np.array_equal(sel_haz.intensity.toarray(), - haz.intensity[:,:-1].toarray())) + self.assertTrue( + np.array_equal(sel_haz.fraction.toarray(), haz.fraction[:, :-1].toarray()) + ) + self.assertTrue( + np.array_equal(sel_haz.intensity.toarray(), haz.intensity[:, :-1].toarray()) + ) self.assertEqual(sel_haz.event_name, haz.event_name) self.assertIsInstance(sel_haz, Hazard) self.assertIsInstance(sel_haz.intensity, sparse.csr_matrix) self.assertIsInstance(sel_haz.fraction, sparse.csr_matrix) - #fraction select + # fraction select haz = dummy_hazard() haz.fraction[:, -1] = 0.0 - sel_haz = haz.select_tight(val='fraction') + sel_haz = haz.select_tight(val="fraction") - self.assertTrue(np.array_equal(sel_haz.centroids.coord.squeeze(), - haz.centroids.coord[:-1, :])) + self.assertTrue( + np.array_equal( + sel_haz.centroids.coord.squeeze(), haz.centroids.coord[:-1, :] + ) + ) self.assertEqual(sel_haz.units, haz.units) self.assertTrue(np.array_equal(sel_haz.event_id, haz.event_id)) self.assertTrue(np.array_equal(sel_haz.date, haz.date)) self.assertTrue(np.array_equal(sel_haz.orig, haz.orig)) self.assertTrue(np.array_equal(sel_haz.frequency, haz.frequency)) self.assertEqual(sel_haz.frequency_unit, haz.frequency_unit) - self.assertTrue(np.array_equal(sel_haz.fraction.toarray(), - haz.fraction[:,:-1].toarray())) - self.assertTrue(np.array_equal(sel_haz.intensity.toarray(), - haz.intensity[:,:-1].toarray())) + self.assertTrue( + np.array_equal(sel_haz.fraction.toarray(), haz.fraction[:, :-1].toarray()) + ) + self.assertTrue( + np.array_equal(sel_haz.intensity.toarray(), haz.intensity[:, :-1].toarray()) + ) self.assertEqual(sel_haz.event_name, haz.event_name) self.assertIsInstance(sel_haz, Hazard) self.assertIsInstance(sel_haz.intensity, sparse.csr_matrix) self.assertIsInstance(sel_haz.fraction, sparse.csr_matrix) - haz = dummy_hazard() haz.intensity[:, -1] = 0.0 # small buffer: zero field is discarded sel_haz = haz.select_tight(buffer=0.1) - self.assertTrue(np.array_equal(sel_haz.centroids.coord.squeeze(), - haz.centroids.coord[:-1, :])) + self.assertTrue( + np.array_equal( + sel_haz.centroids.coord.squeeze(), haz.centroids.coord[:-1, :] + ) + ) # large buffer: zero field is retained sel_haz = haz.select_tight(buffer=10) - self.assertTrue(np.array_equal(sel_haz.centroids.coord.squeeze(), - haz.centroids.coord)) + self.assertTrue( + np.array_equal(sel_haz.centroids.coord.squeeze(), haz.centroids.coord) + ) self.assertEqual(sel_haz.units, haz.units) self.assertTrue(np.array_equal(sel_haz.event_id, haz.event_id)) self.assertTrue(np.array_equal(sel_haz.date, haz.date)) self.assertTrue(np.array_equal(sel_haz.orig, haz.orig)) self.assertTrue(np.array_equal(sel_haz.frequency, haz.frequency)) self.assertEqual(sel_haz.frequency_unit, haz.frequency_unit) - self.assertTrue(np.array_equal(sel_haz.fraction.toarray(), - haz.fraction.toarray())) - self.assertTrue(np.array_equal(sel_haz.intensity.toarray(), - haz.intensity.toarray())) + self.assertTrue( + np.array_equal(sel_haz.fraction.toarray(), haz.fraction.toarray()) + ) + self.assertTrue( + np.array_equal(sel_haz.intensity.toarray(), haz.intensity.toarray()) + ) self.assertEqual(sel_haz.event_name, haz.event_name) self.assertIsInstance(sel_haz, Hazard) self.assertIsInstance(sel_haz.intensity, sparse.csr_matrix) @@ -629,7 +715,7 @@ def test_select_new_fraction_zero(self): with self.assertRaisesRegex( RuntimeError, "Your selection created a Hazard object where the fraction matrix is zero " - "everywhere" + "everywhere", ): hazard.select(event_id=[3, 4], reg_id=[2]) @@ -641,14 +727,16 @@ def test_select_new_fraction_zero(self): selection = hazard.select(event_id=[3, 4], reg_id=[2]) np.testing.assert_array_equal(selection.fraction.toarray(), [[0], [0]]) + class TestAppend(unittest.TestCase): """Test append method.""" def test_append_empty_fill(self): """Append an empty. Obtain initial hazard.""" + def _check_hazard(hazard): # expected values - haz1_orig = Hazard.from_excel(HAZ_TEMPLATE_XLS, haz_type='TC') + haz1_orig = Hazard.from_excel(HAZ_TEMPLATE_XLS, haz_type="TC") self.assertEqual(hazard.event_name, haz1_orig.event_name) self.assertTrue(np.array_equal(hazard.event_id, haz1_orig.event_id)) self.assertTrue(np.array_equal(hazard.date, haz1_orig.date)) @@ -660,16 +748,16 @@ def _check_hazard(hazard): self.assertEqual(hazard.units, haz1_orig.units) self.assertEqual(hazard.haz_type, haz1_orig.haz_type) - haz1 = Hazard.from_excel(HAZ_TEMPLATE_XLS, haz_type='TC') - haz2 = Hazard('TC') - haz2.centroids.geometry.crs = 'epsg:4326' + haz1 = Hazard.from_excel(HAZ_TEMPLATE_XLS, haz_type="TC") + haz2 = Hazard("TC") + haz2.centroids.geometry.crs = "epsg:4326" haz1.append(haz2) haz1.check() _check_hazard(haz1) - haz1 = Hazard.from_excel(HAZ_TEMPLATE_XLS, haz_type='TC') - haz2 = Hazard('TC') - haz2.centroids.geometry.crs = 'epsg:4326' + haz1 = Hazard.from_excel(HAZ_TEMPLATE_XLS, haz_type="TC") + haz2 = Hazard("TC") + haz2.centroids.geometry.crs = "epsg:4326" haz2.append(haz1) haz2.check() _check_hazard(haz2) @@ -677,24 +765,23 @@ def _check_hazard(hazard): def test_same_centroids_extend(self): """Append hazard with same centroids, different events.""" haz1 = dummy_hazard() - fraction = sparse.csr_matrix([[0.2, 0.3, 0.4], - [0.1, 0.1, 0.1], - [0.3, 0.1, 0.9], - [0.3, 0.2, 0.8]]) - intensity = sparse.csr_matrix([[0.2, 3.3, 6.4], - [1.1, 0.1, 1.01], - [8.3, 4.1, 4.0], - [9.3, 9.2, 1.7]]) - haz2 = Hazard('TC', - centroids=haz1.centroids, - event_id=np.array([5, 6, 7, 8]), - event_name=['ev5', 'ev6', 'ev7', 'ev8'], - frequency=np.array([0.9, 0.75, 0.75, 0.22]), - frequency_unit='1/week', - units="m/s", - fraction=fraction, - intensity=intensity, - ) + fraction = sparse.csr_matrix( + [[0.2, 0.3, 0.4], [0.1, 0.1, 0.1], [0.3, 0.1, 0.9], [0.3, 0.2, 0.8]] + ) + intensity = sparse.csr_matrix( + [[0.2, 3.3, 6.4], [1.1, 0.1, 1.01], [8.3, 4.1, 4.0], [9.3, 9.2, 1.7]] + ) + haz2 = Hazard( + "TC", + centroids=haz1.centroids, + event_id=np.array([5, 6, 7, 8]), + event_name=["ev5", "ev6", "ev7", "ev8"], + frequency=np.array([0.9, 0.75, 0.75, 0.22]), + frequency_unit="1/week", + units="m/s", + fraction=fraction, + intensity=intensity, + ) haz1.append(haz2) haz1.check() @@ -714,11 +801,17 @@ def test_same_centroids_extend(self): for i_ev in range(haz1.event_id.size): self.assertTrue(any((haz1.intensity[i_ev].toarray() == exp_inten).all(1))) self.assertTrue(any((haz1.fraction[i_ev].toarray() == exp_frac).all(1))) - self.assertTrue(haz1.event_name[i_ev] in haz1_orig.event_name + haz2.event_name) + self.assertTrue( + haz1.event_name[i_ev] in haz1_orig.event_name + haz2.event_name + ) self.assertTrue(haz1.date[i_ev] in np.append(haz1_orig.date, haz2.date)) self.assertTrue(haz1.orig[i_ev] in np.append(haz1_orig.orig, haz2.orig)) - self.assertTrue(haz1.event_id[i_ev] in np.append(haz1_orig.event_id, haz2.event_id)) - self.assertTrue(haz1.frequency[i_ev] in np.append(haz1_orig.frequency, haz2.frequency)) + self.assertTrue( + haz1.event_id[i_ev] in np.append(haz1_orig.event_id, haz2.event_id) + ) + self.assertTrue( + haz1.frequency[i_ev] in np.append(haz1_orig.frequency, haz2.frequency) + ) self.assertEqual(haz1.centroids.size, 3) self.assertTrue(np.array_equal(haz1.centroids.coord, haz2.centroids.coord)) @@ -728,7 +821,7 @@ def test_incompatible_type_fail(self): """Raise error when append two incompatible hazards.""" haz1 = dummy_hazard() haz2 = dummy_hazard() - haz2.haz_type = 'WS' + haz2.haz_type = "WS" with self.assertRaises(ValueError) as cm: haz1.append(haz2) @@ -736,7 +829,7 @@ def test_incompatible_units_fail(self): """Raise error when append two incompatible hazards.""" haz1 = dummy_hazard() haz2 = dummy_hazard() - haz2.units = 'km/h' + haz2.units = "km/h" with self.assertRaises(ValueError) as cm: haz1.append(haz2) @@ -744,7 +837,7 @@ def test_incompatible_freq_units_fail(self): """Raise error when append two incompatible hazards.""" haz1 = dummy_hazard() haz2 = dummy_hazard() - haz2.frequency_unit = '1/month' + haz2.frequency_unit = "1/month" with self.assertRaises(ValueError) as cm: haz1.append(haz2) @@ -752,26 +845,25 @@ def test_all_different_extend(self): """Append totally different hazard.""" haz1 = dummy_hazard() - fraction = sparse.csr_matrix([[0.2, 0.3, 0.4], - [0.1, 0.1, 0.1], - [0.3, 0.1, 0.9], - [0.3, 0.2, 0.8]]) - intensity = sparse.csr_matrix([[0.2, 3.3, 6.4], - [1.1, 0.1, 1.01], - [8.3, 4.1, 4.0], - [9.3, 9.2, 1.7]]) - haz2 = Hazard('TC', - date=np.ones((4,)), - orig=np.ones((4,)), - centroids=Centroids( - lat=np.array([7, 9, 11]), lon=np.array([8, 10, 12])), - event_id=np.array([5, 6, 7, 8]), - event_name=['ev5', 'ev6', 'ev7', 'ev8'], - frequency=np.array([0.9, 0.75, 0.75, 0.22]), - frequency_unit='1/week', - units='m/s', - intensity=intensity, - fraction=fraction) + fraction = sparse.csr_matrix( + [[0.2, 0.3, 0.4], [0.1, 0.1, 0.1], [0.3, 0.1, 0.9], [0.3, 0.2, 0.8]] + ) + intensity = sparse.csr_matrix( + [[0.2, 3.3, 6.4], [1.1, 0.1, 1.01], [8.3, 4.1, 4.0], [9.3, 9.2, 1.7]] + ) + haz2 = Hazard( + "TC", + date=np.ones((4,)), + orig=np.ones((4,)), + centroids=Centroids(lat=np.array([7, 9, 11]), lon=np.array([8, 10, 12])), + event_id=np.array([5, 6, 7, 8]), + event_name=["ev5", "ev6", "ev7", "ev8"], + frequency=np.array([0.9, 0.75, 0.75, 0.22]), + frequency_unit="1/week", + units="m/s", + intensity=intensity, + fraction=fraction, + ) haz1.append(haz2) haz1.check() @@ -790,11 +882,17 @@ def test_all_different_extend(self): for i_ev in range(haz1.event_id.size): self.assertTrue(any((haz1.intensity[i_ev].toarray() == exp_inten).all(1))) self.assertTrue(any((haz1.fraction[i_ev].toarray() == exp_frac).all(1))) - self.assertTrue(haz1.event_name[i_ev] in haz1_orig.event_name + haz2.event_name) + self.assertTrue( + haz1.event_name[i_ev] in haz1_orig.event_name + haz2.event_name + ) self.assertTrue(haz1.date[i_ev] in np.append(haz1_orig.date, haz2.date)) self.assertTrue(haz1.orig[i_ev] in np.append(haz1_orig.orig, haz2.orig)) - self.assertTrue(haz1.event_id[i_ev] in np.append(haz1_orig.event_id, haz2.event_id)) - self.assertTrue(haz1.frequency[i_ev] in np.append(haz1_orig.frequency, haz2.frequency)) + self.assertTrue( + haz1.event_id[i_ev] in np.append(haz1_orig.event_id, haz2.event_id) + ) + self.assertTrue( + haz1.frequency[i_ev] in np.append(haz1_orig.frequency, haz2.frequency) + ) self.assertEqual(haz1.centroids.size, 6) self.assertEqual(haz1_orig.units, haz1.units) @@ -805,25 +903,34 @@ def test_same_events_append(self): """Append hazard with same events (and diff centroids). Events are appended with all new centroids columns.""" haz1 = dummy_hazard() - fraction = sparse.csr_matrix([[0.22, 0.32, 0.44], - [0.11, 0.11, 0.11], - [0.32, 0.11, 0.99], - [0.32, 0.22, 0.88]]) - intensity = sparse.csr_matrix([[0.22, 3.33, 6.44], - [1.11, 0.11, 1.11], - [8.33, 4.11, 4.4], - [9.33, 9.22, 1.77]]) - haz2 = Hazard('TC', - centroids=Centroids( - lat=np.array([7, 9, 11]), lon=np.array([8, 10, 12])), - event_id=haz1.event_id, - event_name=haz1.event_name.copy(), - frequency=haz1.frequency, - frequency_unit=haz1.frequency_unit, - date=haz1.date, - units='m/s', - fraction=fraction, - intensity=intensity) + fraction = sparse.csr_matrix( + [ + [0.22, 0.32, 0.44], + [0.11, 0.11, 0.11], + [0.32, 0.11, 0.99], + [0.32, 0.22, 0.88], + ] + ) + intensity = sparse.csr_matrix( + [ + [0.22, 3.33, 6.44], + [1.11, 0.11, 1.11], + [8.33, 4.11, 4.4], + [9.33, 9.22, 1.77], + ] + ) + haz2 = Hazard( + "TC", + centroids=Centroids(lat=np.array([7, 9, 11]), lon=np.array([8, 10, 12])), + event_id=haz1.event_id, + event_name=haz1.event_name.copy(), + frequency=haz1.frequency, + frequency_unit=haz1.frequency_unit, + date=haz1.date, + units="m/s", + fraction=fraction, + intensity=intensity, + ) haz1.append(haz2) @@ -837,21 +944,19 @@ def test_same_events_append(self): res_frac[0:4, 0:3] = haz1_ori.fraction.toarray() res_frac[4:, 3:] = haz2.fraction.toarray() - self.assertTrue(np.array_equal(res_inten, - haz1.intensity.toarray())) + self.assertTrue(np.array_equal(res_inten, haz1.intensity.toarray())) self.assertTrue(sparse.isspmatrix_csr(haz1.intensity)) - self.assertTrue(np.array_equal(res_frac, - haz1.fraction.toarray())) + self.assertTrue(np.array_equal(res_frac, haz1.fraction.toarray())) self.assertTrue(sparse.isspmatrix_csr(haz1.fraction)) - self.assertEqual(haz1.event_name, - haz1_ori.event_name + haz2.event_name) - self.assertTrue(np.array_equal(haz1.date, - np.append(haz1_ori.date, haz2.date))) - self.assertTrue(np.array_equal(haz1.orig, - np.append(haz1_ori.orig, haz2.orig))) + self.assertEqual(haz1.event_name, haz1_ori.event_name + haz2.event_name) + self.assertTrue(np.array_equal(haz1.date, np.append(haz1_ori.date, haz2.date))) + self.assertTrue(np.array_equal(haz1.orig, np.append(haz1_ori.orig, haz2.orig))) self.assertTrue(np.array_equal(haz1.event_id, np.arange(1, 9))) - self.assertTrue(np.array_equal(haz1.frequency, - np.append(haz1_ori.frequency, haz2.frequency))) + self.assertTrue( + np.array_equal( + haz1.frequency, np.append(haz1_ori.frequency, haz2.frequency) + ) + ) self.assertEqual(haz1_ori.frequency_unit, haz1.frequency_unit) self.assertEqual(haz1_ori.units, haz1.units) @@ -860,40 +965,42 @@ def test_same_events_append(self): def test_concat_pass(self): """Test concatenate function.""" - haz_1 = Hazard("TC", - centroids=Centroids( - lat=np.array([1, 3, 5]), lon=np.array([2, 4, 6]), - crs="epsg:4326"), - event_id=np.array([1]), - event_name=['ev1'], - date=np.array([1]), - orig=np.array([True]), - frequency=np.array([1.0]), - frequency_unit='1/week', - fraction=sparse.csr_matrix([[0.02, 0.03, 0.04]]), - intensity=sparse.csr_matrix([[0.2, 0.3, 0.4]]), - units='m/s') - - haz_2 = Hazard("TC", - centroids=Centroids( - lat=np.array([1, 3, 5]), lon=np.array([2, 4, 6]), - crs="epsg:4326"), - event_id=np.array([1]), - event_name=['ev2'], - date=np.array([2]), - orig=np.array([False]), - frequency=np.array([1.0]), - frequency_unit='1/week', - fraction=sparse.csr_matrix([[1.02, 1.03, 1.04]]), - intensity=sparse.csr_matrix([[1.2, 1.3, 1.4]]), - units='m/s') + haz_1 = Hazard( + "TC", + centroids=Centroids( + lat=np.array([1, 3, 5]), lon=np.array([2, 4, 6]), crs="epsg:4326" + ), + event_id=np.array([1]), + event_name=["ev1"], + date=np.array([1]), + orig=np.array([True]), + frequency=np.array([1.0]), + frequency_unit="1/week", + fraction=sparse.csr_matrix([[0.02, 0.03, 0.04]]), + intensity=sparse.csr_matrix([[0.2, 0.3, 0.4]]), + units="m/s", + ) + + haz_2 = Hazard( + "TC", + centroids=Centroids( + lat=np.array([1, 3, 5]), lon=np.array([2, 4, 6]), crs="epsg:4326" + ), + event_id=np.array([1]), + event_name=["ev2"], + date=np.array([2]), + orig=np.array([False]), + frequency=np.array([1.0]), + frequency_unit="1/week", + fraction=sparse.csr_matrix([[1.02, 1.03, 1.04]]), + intensity=sparse.csr_matrix([[1.2, 1.3, 1.4]]), + units="m/s", + ) haz = Hazard.concat([haz_1, haz_2]) - hres_frac = sparse.csr_matrix([[0.02, 0.03, 0.04], - [1.02, 1.03, 1.04]]) - hres_inten = sparse.csr_matrix([[0.2, 0.3, 0.4], - [1.2, 1.3, 1.4]]) + hres_frac = sparse.csr_matrix([[0.02, 0.03, 0.04], [1.02, 1.03, 1.04]]) + hres_inten = sparse.csr_matrix([[0.2, 0.3, 0.4], [1.2, 1.3, 1.4]]) self.assertIsInstance(haz, Hazard) self.assertTrue(sparse.isspmatrix_csr(haz.intensity)) @@ -906,7 +1013,7 @@ def test_concat_pass(self): self.assertTrue(np.array_equal(haz.orig, np.array([True, False]))) self.assertTrue(np.array_equal(haz.date, np.array([1, 2]))) self.assertTrue(np.array_equal(haz.event_id, np.array([1, 2]))) - self.assertEqual(haz.event_name, ['ev1', 'ev2']) + self.assertEqual(haz.event_name, ["ev1", "ev2"]) self.assertTrue(np.array_equal(haz.centroids.coord, haz_1.centroids.coord)) self.assertTrue(np.array_equal(haz.centroids.coord, haz_2.centroids.coord)) self.assertEqual(haz.centroids.crs, haz_1.centroids.crs) @@ -914,38 +1021,37 @@ def test_concat_pass(self): def test_append_new_var_pass(self): """New variable appears if hazard to append is empty.""" haz = dummy_hazard() - haz.frequency_unit = haz.get_default('frequency_unit') + haz.frequency_unit = haz.get_default("frequency_unit") haz.new_var = np.ones(haz.size) - app_haz = Hazard('TC') + app_haz = Hazard("TC") app_haz.append(haz) - self.assertIn('new_var', app_haz.__dict__) + self.assertIn("new_var", app_haz.__dict__) def test_append_raise_type_error(self): """Raise error if hazards of different class""" - haz1 = Hazard('TC', units='m/s') + haz1 = Hazard("TC", units="m/s") from climada.hazard import TropCyclone + haz2 = TropCyclone() with self.assertRaises(TypeError): haz1.append(haz2) def test_concat_raise_value_error(self): """Raise error if hazards with different units, type or crs""" - haz1 = Hazard('TC', units='m/s', - centroids=Centroids(lat=[],lon=[], crs="epsg:4326")) - haz3 = Hazard('EQ') - with self.assertRaisesRegex(ValueError, - "different types"): + haz1 = Hazard( + "TC", units="m/s", centroids=Centroids(lat=[], lon=[], crs="epsg:4326") + ) + haz3 = Hazard("EQ") + with self.assertRaisesRegex(ValueError, "different types"): Hazard.concat([haz1, haz3]) - haz4 = Hazard('TC', units='cm') - with self.assertRaisesRegex(ValueError, - "different units"): + haz4 = Hazard("TC", units="cm") + with self.assertRaisesRegex(ValueError, "different units"): Hazard.concat([haz1, haz4]) - - haz5 = Hazard('TC', centroids=Centroids(lat=[],lon=[], crs="epsg:7777")) - with self.assertRaisesRegex(ValueError, - "different CRS"): + + haz5 = Hazard("TC", centroids=Centroids(lat=[], lon=[], crs="epsg:7777")) + with self.assertRaisesRegex(ValueError, "different CRS"): Hazard.concat([haz1, haz5]) def test_change_centroids(self): @@ -954,17 +1060,19 @@ def test_change_centroids(self): on_land = np.array([True, True]) cent1 = Centroids(lat=lat, lon=lon, on_land=on_land) - haz_1 = Hazard('TC', - centroids=cent1, - event_id=np.array([1]), - event_name=['ev1'], - date=np.array([1]), - orig=np.array([True]), - frequency=np.array([1.0]), - frequency_unit='1/week', - fraction=sparse.csr_matrix([[0.02, 0.03]]), - intensity=sparse.csr_matrix([[0.2, 0.3]]), - units='m/s',) + haz_1 = Hazard( + "TC", + centroids=cent1, + event_id=np.array([1]), + event_name=["ev1"], + date=np.array([1]), + orig=np.array([True]), + frequency=np.array([1.0]), + frequency_unit="1/week", + fraction=sparse.csr_matrix([[0.02, 0.03]]), + intensity=sparse.csr_matrix([[0.2, 0.3]]), + units="m/s", + ) lat2, lon2 = np.array([0, 1, 3]), np.array([0, -1, 3]) on_land2 = np.array([True, True, False]) @@ -972,12 +1080,14 @@ def test_change_centroids(self): haz_2 = haz_1.change_centroids(cent2) - self.assertTrue(np.array_equal(haz_2.intensity.toarray(), - np.array([[0.2, 0.3, 0.]]))) - self.assertTrue(np.array_equal(haz_2.fraction.toarray(), - np.array([[0.02, 0.03, 0.]]))) + self.assertTrue( + np.array_equal(haz_2.intensity.toarray(), np.array([[0.2, 0.3, 0.0]])) + ) + self.assertTrue( + np.array_equal(haz_2.fraction.toarray(), np.array([[0.02, 0.03, 0.0]])) + ) self.assertTrue(np.array_equal(haz_2.event_id, np.array([1]))) - self.assertTrue(np.array_equal(haz_2.event_name, ['ev1'])) + self.assertTrue(np.array_equal(haz_2.event_name, ["ev1"])) self.assertTrue(np.array_equal(haz_2.orig, [True])) """Test error for projection""" @@ -987,8 +1097,9 @@ def test_change_centroids(self): with self.assertRaises(ValueError) as cm: haz_1.change_centroids(cent3, threshold=100) - self.assertIn('two hazard centroids are mapped to the same centroids', str(cm.exception)) - + self.assertIn( + "two hazard centroids are mapped to the same centroids", str(cm.exception) + ) def test_change_centroids_raster(self): """Set new centroids for hazard""" @@ -996,30 +1107,33 @@ def test_change_centroids_raster(self): on_land = np.array([True, True]) cent1 = Centroids(lat=lat, lon=lon, on_land=on_land) - haz_1 = Hazard('TC', - centroids=cent1, - event_id=np.array([1]), - event_name=['ev1'], - date=np.array([1]), - orig=np.array([True]), - frequency=np.array([1.0]), - frequency_unit='1/week', - fraction=sparse.csr_matrix([[0.02, 0.03]]), - intensity=sparse.csr_matrix([[0.2, 0.3]]), - units='m/s',) - + haz_1 = Hazard( + "TC", + centroids=cent1, + event_id=np.array([1]), + event_name=["ev1"], + date=np.array([1]), + orig=np.array([True]), + frequency=np.array([1.0]), + frequency_unit="1/week", + fraction=sparse.csr_matrix([[0.02, 0.03]]), + intensity=sparse.csr_matrix([[0.2, 0.3]]), + units="m/s", + ) """Test with raster centroids""" cent4 = Centroids.from_pnt_bounds(points_bounds=(-1, 0, 0, 1), res=1) haz_4 = haz_1.change_centroids(cent4) - self.assertTrue(np.array_equal(haz_4.intensity.toarray(), - np.array([[0.3, 0.0, 0.0, 0.2]]))) - self.assertTrue(np.array_equal(haz_4.fraction.toarray(), - np.array([[0.03, 0.0, 0.0, 0.02]]))) + self.assertTrue( + np.array_equal(haz_4.intensity.toarray(), np.array([[0.3, 0.0, 0.0, 0.2]])) + ) + self.assertTrue( + np.array_equal(haz_4.fraction.toarray(), np.array([[0.03, 0.0, 0.0, 0.02]])) + ) self.assertTrue(np.array_equal(haz_4.event_id, np.array([1]))) - self.assertTrue(np.array_equal(haz_4.event_name, ['ev1'])) + self.assertTrue(np.array_equal(haz_4.event_name, ["ev1"])) self.assertTrue(np.array_equal(haz_4.orig, [True])) @@ -1048,24 +1162,17 @@ def test_ref_all_pass(self): self.assertAlmostEqual(inten_stats[1][66], 70.608592953031405) self.assertAlmostEqual(inten_stats[3][33], 88.510983305123631) self.assertAlmostEqual(inten_stats[2][99], 79.717518054203623) - + def test_local_return_period(self): """Compare local return periods against reference.""" haz = dummy_hazard() - haz.intensity = sparse.csr_matrix([ - [1., 5., 1.], - [2., 2., 0.] - ]) - haz.frequency = np.full(4, 1.) - threshold_intensities = np.array([1., 2., 3.]) + haz.intensity = sparse.csr_matrix([[1.0, 5.0, 1.0], [2.0, 2.0, 0.0]]) + haz.frequency = np.full(4, 1.0) + threshold_intensities = np.array([1.0, 2.0, 3.0]) return_stats, _, _ = haz.local_return_period(threshold_intensities) np.testing.assert_allclose( return_stats[return_stats.columns[1:]].values.T, - np.array([ - [0.5, 0.5, 1.], - [1., 0.5, np.nan], - [np.nan, 1., np.nan] - ]) + np.array([[0.5, 0.5, 1.0], [1.0, 0.5, np.nan], [np.nan, 1.0, np.nan]]), ) @@ -1077,26 +1184,75 @@ def test_ref_pass(self): haz = Hazard.from_hdf5(HAZ_TEST_TC) orig_year_set = haz.calc_year_set() - self.assertTrue(np.array_equal(np.array(list(orig_year_set.keys())), - np.arange(1851, 2012))) - self.assertTrue(np.array_equal(orig_year_set[1851], - np.array([1, 11, 21, 31]))) - self.assertTrue(np.array_equal(orig_year_set[1958], - np.array([8421, 8431, 8441, 8451, 8461, 8471, 8481, - 8491, 8501, 8511]))) - self.assertTrue(np.array_equal(orig_year_set[1986], - np.array([11101, 11111, 11121, 11131, 11141, 11151]))) - self.assertTrue(np.array_equal(orig_year_set[1997], - np.array([12221, 12231, 12241, 12251, 12261, 12271, - 12281, 12291]))) - self.assertTrue(np.array_equal(orig_year_set[2006], - np.array([13571, 13581, 13591, 13601, 13611, 13621, - 13631, 13641, 13651, 13661]))) - self.assertTrue(np.array_equal(orig_year_set[2010], - np.array([14071, 14081, 14091, 14101, 14111, 14121, - 14131, 14141, 14151, 14161, 14171, 14181, - 14191, 14201, 14211, 14221, 14231, 14241, - 14251]))) + self.assertTrue( + np.array_equal(np.array(list(orig_year_set.keys())), np.arange(1851, 2012)) + ) + self.assertTrue(np.array_equal(orig_year_set[1851], np.array([1, 11, 21, 31]))) + self.assertTrue( + np.array_equal( + orig_year_set[1958], + np.array([8421, 8431, 8441, 8451, 8461, 8471, 8481, 8491, 8501, 8511]), + ) + ) + self.assertTrue( + np.array_equal( + orig_year_set[1986], + np.array([11101, 11111, 11121, 11131, 11141, 11151]), + ) + ) + self.assertTrue( + np.array_equal( + orig_year_set[1997], + np.array([12221, 12231, 12241, 12251, 12261, 12271, 12281, 12291]), + ) + ) + self.assertTrue( + np.array_equal( + orig_year_set[2006], + np.array( + [ + 13571, + 13581, + 13591, + 13601, + 13611, + 13621, + 13631, + 13641, + 13651, + 13661, + ] + ), + ) + ) + self.assertTrue( + np.array_equal( + orig_year_set[2010], + np.array( + [ + 14071, + 14081, + 14091, + 14101, + 14111, + 14121, + 14131, + 14141, + 14151, + 14161, + 14171, + 14181, + 14191, + 14201, + 14211, + 14221, + 14231, + 14241, + 14251, + ] + ), + ) + ) class TestCentroids(unittest.TestCase): @@ -1104,72 +1260,88 @@ class TestCentroids(unittest.TestCase): def test_reproject_vector_pass(self): """Test reproject_vector""" - haz_fl = Hazard('FL', - event_id=np.array([1]), - date=np.array([1]), - frequency=np.array([1]), - orig=np.array([1]), - event_name=['1'], - intensity=sparse.csr_matrix(np.array([0.5, 0.2, 0.1])), - fraction=sparse.csr_matrix(np.array([0.5, 0.2, 0.1]) / 2), - centroids=Centroids( - lat=np.array([1, 2, 3]), lon=np.array([1, 2, 3])),) + haz_fl = Hazard( + "FL", + event_id=np.array([1]), + date=np.array([1]), + frequency=np.array([1]), + orig=np.array([1]), + event_name=["1"], + intensity=sparse.csr_matrix(np.array([0.5, 0.2, 0.1])), + fraction=sparse.csr_matrix(np.array([0.5, 0.2, 0.1]) / 2), + centroids=Centroids(lat=np.array([1, 2, 3]), lon=np.array([1, 2, 3])), + ) haz_fl.check() - haz_fl.reproject_vector(dst_crs='epsg:2202') - self.assertTrue(np.allclose(haz_fl.centroids.lat, - np.array([331585.4099637291, 696803.88, 1098649.44]))) - self.assertTrue(np.allclose(haz_fl.centroids.lon, - np.array([11625664.37925186, 11939560.43, 12244857.13]))) - self.assertTrue(u_coord.equal_crs(haz_fl.centroids.crs, 'epsg:2202')) - self.assertTrue(np.allclose(haz_fl.intensity.toarray(), np.array([0.5, 0.2, 0.1]))) - self.assertTrue(np.allclose(haz_fl.fraction.toarray(), np.array([0.5, 0.2, 0.1]) / 2)) + haz_fl.reproject_vector(dst_crs="epsg:2202") + self.assertTrue( + np.allclose( + haz_fl.centroids.lat, + np.array([331585.4099637291, 696803.88, 1098649.44]), + ) + ) + self.assertTrue( + np.allclose( + haz_fl.centroids.lon, + np.array([11625664.37925186, 11939560.43, 12244857.13]), + ) + ) + self.assertTrue(u_coord.equal_crs(haz_fl.centroids.crs, "epsg:2202")) + self.assertTrue( + np.allclose(haz_fl.intensity.toarray(), np.array([0.5, 0.2, 0.1])) + ) + self.assertTrue( + np.allclose(haz_fl.fraction.toarray(), np.array([0.5, 0.2, 0.1]) / 2) + ) def dummy_step_impf(haz): from climada.entity import ImpactFunc + intensity = (0, 1, haz.intensity.max()) impf = ImpactFunc.from_step_impf(intensity, haz_type=haz.haz_type) return impf + class TestImpactFuncs(unittest.TestCase): """Test methods mainly for computing impacts""" + def test_haz_type(self): """Test haz_type property""" haz = dummy_hazard() - self.assertEqual(haz.haz_type, 'TC') - haz.haz_type = 'random' - self.assertEqual(haz.haz_type, 'random') + self.assertEqual(haz.haz_type, "TC") + haz.haz_type = "random" + self.assertEqual(haz.haz_type, "random") def test_cent_exp_col(self): """Test return of centroid exposures column""" haz = dummy_hazard() - self.assertEqual(haz.centr_exp_col, 'centr_TC') - haz.haz_type = 'random' - self.assertEqual(haz.centr_exp_col, 'centr_random') + self.assertEqual(haz.centr_exp_col, "centr_TC") + haz.haz_type = "random" + self.assertEqual(haz.centr_exp_col, "centr_random") haz = Hazard() - self.assertEqual(haz.centr_exp_col, 'centr_') + self.assertEqual(haz.centr_exp_col, "centr_") def test_get_mdr(self): haz = dummy_hazard() impf = dummy_step_impf(haz) - #single index + # single index for idx in range(3): cent_idx = np.array([idx]) mdr = haz.get_mdr(cent_idx, impf) true_mdr = np.digitize(haz.intensity[:, idx].toarray(), [0, 1]) - 1 np.testing.assert_array_almost_equal(mdr.toarray(), true_mdr) - #repeated index + # repeated index cent_idx = np.array([0, 0, 1]) mdr = haz.get_mdr(cent_idx, impf) true_mdr = np.digitize(haz.intensity[:, cent_idx].toarray(), [0, 1]) - 1 np.testing.assert_array_almost_equal(mdr.toarray(), true_mdr) - #mdr is not zero at 0 + # mdr is not zero at 0 impf.mdd += 1 - #single index + # single index for idx in range(3): cent_idx = np.array([idx]) mdr = haz.get_mdr(cent_idx, impf) @@ -1178,7 +1350,7 @@ def test_get_mdr(self): # #case with zeros everywhere cent_idx = np.array([0, 0, 1]) - impf.mdd=np.array([0,0,0,1]) + impf.mdd = np.array([0, 0, 0, 1]) # how many non-zeros values are expected num_nz_values = 5 mdr = haz.get_mdr(cent_idx, impf) @@ -1194,16 +1366,16 @@ def test_get_paa(self): true_paa = np.ones(haz.intensity[:, idx].shape) np.testing.assert_array_almost_equal(paa.toarray(), true_paa) - #repeated index + # repeated index idx = [0, 0] cent_idx = np.array(idx) paa = haz.get_paa(cent_idx, impf) true_paa = np.ones(haz.intensity[:, idx].shape) np.testing.assert_array_almost_equal(paa.toarray(), true_paa) - #paa is not zero at 0 + # paa is not zero at 0 impf.paa += 1 - #repeated index + # repeated index idx = [0, 0, 1] cent_idx = np.array(idx) paa = haz.get_paa(cent_idx, impf) @@ -1213,27 +1385,27 @@ def test_get_paa(self): def test_get_fraction(self): haz = dummy_hazard() - #standard index + # standard index idx = [0, 1] cent_idx = np.array(idx) frac = haz._get_fraction(cent_idx) true_frac = haz.fraction[:, idx] np.testing.assert_array_equal(frac.toarray(), true_frac.toarray()) - #repeated index + # repeated index idx = [0, 0] cent_idx = np.array(idx) frac = haz._get_fraction(cent_idx) true_frac = haz.fraction[:, idx] np.testing.assert_array_equal(frac.toarray(), true_frac.toarray()) - #index is None + # index is None cent_idx = None frac = haz._get_fraction(cent_idx) true_frac = haz.fraction np.testing.assert_array_equal(frac.toarray(), true_frac.toarray()) - #test empty fraction + # test empty fraction haz.fraction = sparse.csr_matrix(haz.fraction.shape) frac = haz._get_fraction() self.assertIsNone(frac) diff --git a/climada/hazard/test/test_io.py b/climada/hazard/test/test_io.py index be9e2829a..63e35291f 100644 --- a/climada/hazard/test/test_io.py +++ b/climada/hazard/test/test_io.py @@ -18,20 +18,21 @@ Test Hazard base class. """ -import unittest -from unittest.mock import patch + import datetime as dt +import unittest from pathlib import Path from tempfile import TemporaryDirectory +from unittest.mock import patch -from pyproj import CRS import numpy as np -from scipy.sparse import csr_matrix import xarray as xr +from pyproj import CRS +from scipy.sparse import csr_matrix from climada.hazard.base import Hazard -from climada.util.constants import DEF_FREQ_UNIT, HAZ_TEMPLATE_XLS, HAZ_DEMO_FL, DEF_CRS from climada.hazard.test.test_base import DATA_DIR, dummy_hazard +from climada.util.constants import DEF_CRS, DEF_FREQ_UNIT, HAZ_DEMO_FL, HAZ_TEMPLATE_XLS class TestReadDefaultNetCDF(unittest.TestCase): @@ -605,13 +606,13 @@ def test_hazard_pass(self): """Read an hazard excel file correctly.""" # Read demo excel file - hazard = Hazard.from_excel(HAZ_TEMPLATE_XLS, haz_type='TC') + hazard = Hazard.from_excel(HAZ_TEMPLATE_XLS, haz_type="TC") # Check results n_events = 100 n_centroids = 45 - self.assertEqual(hazard.units, '') + self.assertEqual(hazard.units, "") self.assertEqual(hazard.centroids.coord.shape, (n_centroids, 2)) self.assertEqual(hazard.centroids.coord[0][0], -25.95) @@ -620,7 +621,7 @@ def test_hazard_pass(self): self.assertEqual(hazard.centroids.coord[n_centroids - 1][1], 33.88) self.assertEqual(len(hazard.event_name), 100) - self.assertEqual(hazard.event_name[12], 'event013') + self.assertEqual(hazard.event_name[12], "event013") self.assertEqual(hazard.event_id.dtype, int) self.assertEqual(hazard.event_id.shape, (n_events,)) @@ -632,9 +633,9 @@ def test_hazard_pass(self): self.assertEqual(hazard.date[0], 675874) self.assertEqual(hazard.date[n_events - 1], 676329) - self.assertEqual(hazard.event_name[0], 'event001') - self.assertEqual(hazard.event_name[50], 'event051') - self.assertEqual(hazard.event_name[-1], 'event100') + self.assertEqual(hazard.event_name[0], "event001") + self.assertEqual(hazard.event_name[50], "event051") + self.assertEqual(hazard.event_name[-1], "event100") self.assertEqual(hazard.frequency.dtype, float) self.assertEqual(hazard.frequency.shape, (n_events,)) @@ -654,7 +655,7 @@ def test_hazard_pass(self): self.assertTrue(np.all(hazard.orig)) - self.assertEqual(hazard.haz_type, 'TC') + self.assertEqual(hazard.haz_type, "TC") class TestHDF5(unittest.TestCase): @@ -662,7 +663,7 @@ class TestHDF5(unittest.TestCase): def test_write_read_unsupported_type(self): """Check if the write command correctly handles unsupported types""" - file_name = str(DATA_DIR.joinpath('test_unsupported.h5')) + file_name = str(DATA_DIR.joinpath("test_unsupported.h5")) # Define an unsupported type class CustomID: @@ -680,13 +681,17 @@ class CustomID: # Load the file again and compare to previous instance hazard_read = Hazard.from_hdf5(file_name) self.assertTrue(np.array_equal(hazard.date, hazard_read.date)) - self.assertTrue(np.array_equal(hazard_read.event_id, np.array([]))) # Empty array + self.assertTrue( + np.array_equal(hazard_read.event_id, np.array([])) + ) # Empty array # Execute Tests if __name__ == "__main__": TESTS = unittest.TestLoader().loadTestsFromTestCase(TestReadDefaultNetCDF) - TESTS.addTests(unittest.TestLoader().loadTestsFromTestCase(TestReadDimsCoordsNetCDF)) + TESTS.addTests( + unittest.TestLoader().loadTestsFromTestCase(TestReadDimsCoordsNetCDF) + ) TESTS.addTests(unittest.TestLoader().loadTestsFromTestCase(TestReaderExcel)) TESTS.addTests(unittest.TestLoader().loadTestsFromTestCase(TestHDF5)) unittest.TextTestRunner(verbosity=2).run(TESTS) diff --git a/climada/hazard/test/test_storm_europe.py b/climada/hazard/test/test_storm_europe.py index f919cbaa4..6e4fff3b2 100644 --- a/climada/hazard/test/test_storm_europe.py +++ b/climada/hazard/test/test_storm_europe.py @@ -20,19 +20,20 @@ """ import copy -import unittest import datetime as dt +import unittest + import numpy as np from scipy import sparse from climada import CONFIG -from climada.hazard.storm_europe import StormEurope, generate_WS_forecast_hazard from climada.hazard.centroids.centr import Centroids +from climada.hazard.storm_europe import StormEurope, generate_WS_forecast_hazard from climada.util.constants import WS_DEMO_NC - DATA_DIR = CONFIG.hazard.test_data.dir() + class TestReader(unittest.TestCase): """Test loading functions from the StormEurope class""" @@ -47,15 +48,15 @@ def test_read_with_ref(self): """Test from_footprints while passing in a reference raster.""" storms = StormEurope.from_footprints(WS_DEMO_NC, ref_raster=WS_DEMO_NC[1]) - self.assertEqual(storms.haz_type, 'WS') - self.assertEqual(storms.units, 'm/s') + self.assertEqual(storms.haz_type, "WS") + self.assertEqual(storms.units, "m/s") self.assertEqual(storms.event_id.size, 2) self.assertEqual(storms.date.size, 2) self.assertEqual(dt.datetime.fromordinal(storms.date[0]).year, 1999) self.assertEqual(dt.datetime.fromordinal(storms.date[0]).month, 12) self.assertEqual(dt.datetime.fromordinal(storms.date[0]).day, 26) self.assertEqual(storms.event_id[0], 1) - self.assertEqual(storms.event_name[0], 'Lothar') + self.assertEqual(storms.event_name[0], "Lothar") self.assertTrue(isinstance(storms.intensity, sparse.csr_matrix)) self.assertTrue(isinstance(storms.fraction, sparse.csr_matrix)) self.assertEqual(storms.intensity.shape, (2, 9944)) @@ -64,40 +65,29 @@ def test_read_with_ref(self): def test_read_with_cent(self): """Test from_footprints while passing in a Centroids object""" test_centroids = Centroids.from_excel( - file_path=DATA_DIR.joinpath('fp_centroids-test.xls'), - sheet_name='fp_centroids-test' - ) + file_path=DATA_DIR.joinpath("fp_centroids-test.xls"), + sheet_name="fp_centroids-test", + ) storms = StormEurope.from_footprints(WS_DEMO_NC, centroids=test_centroids) self.assertEqual(storms.intensity.shape, (2, 9944)) - self.assertEqual( - np.count_nonzero( - ~np.isnan(storms.centroids.region_id) - ), - 6401 - ) + self.assertEqual(np.count_nonzero(~np.isnan(storms.centroids.region_id)), 6401) def test_set_ssi(self): """Test set_ssi with both dawkins and wisc_gust methodology.""" storms = StormEurope.from_footprints(WS_DEMO_NC) - storms.set_ssi(method='dawkins') - ssi_dawg = np.asarray([1.44573572e+09, 6.16173724e+08]) - self.assertTrue( - np.allclose(storms.ssi, ssi_dawg) - ) + storms.set_ssi(method="dawkins") + ssi_dawg = np.asarray([1.44573572e09, 6.16173724e08]) + self.assertTrue(np.allclose(storms.ssi, ssi_dawg)) - storms.set_ssi(method='wisc_gust') - ssi_gusty = np.asarray([1.42124571e+09, 5.86870673e+08]) - self.assertTrue( - np.allclose(storms.ssi, ssi_gusty) - ) + storms.set_ssi(method="wisc_gust") + ssi_gusty = np.asarray([1.42124571e09, 5.86870673e08]) + self.assertTrue(np.allclose(storms.ssi, ssi_gusty)) storms.set_ssi(threshold=20, on_land=False) - ssi_special = np.asarray([2.96582030e+09, 1.23980294e+09]) - self.assertTrue( - np.allclose(storms.ssi, ssi_special) - ) + ssi_special = np.asarray([2.96582030e09, 1.23980294e09]) + self.assertTrue(np.allclose(storms.ssi, ssi_special)) def test_generate_prob_storms(self): """Test the probabilistic storm generator; calls _hist2prob as well as @@ -107,59 +97,58 @@ def test_generate_prob_storms(self): self.assertEqual( np.count_nonzero(storms.centroids.region_id), - 6402 + 6402, # here, we don't rasterise; we check if the centroids lie in a # polygon. that is to say, it's not the majority of a raster pixel, # but the centroid's location that is decisive ) self.assertEqual(storms_prob.size, 60) self.assertTrue(np.allclose((1 / storms_prob.frequency).astype(int), 330)) - self.assertAlmostEqual(storms.frequency.sum(), - storms_prob.frequency.sum()) + self.assertAlmostEqual(storms.frequency.sum(), storms_prob.frequency.sum()) self.assertEqual(np.count_nonzero(storms_prob.orig), 2) self.assertEqual(storms_prob.centroids.size, 3054) - self.assertIsInstance(storms_prob.intensity, - sparse.csr_matrix) + self.assertIsInstance(storms_prob.intensity, sparse.csr_matrix) def test_cosmoe_read(self): """test reading from cosmo-e netcdf""" haz = StormEurope.from_cosmoe_file( - DATA_DIR.joinpath('storm_europe_cosmoe_forecast_vmax_testfile.nc'), - run_datetime=dt.datetime(2018,1,1), - event_date=dt.datetime(2018,1,3)) - self.assertEqual(haz.haz_type, 'WS') - self.assertEqual(haz.units, 'm/s') + DATA_DIR.joinpath("storm_europe_cosmoe_forecast_vmax_testfile.nc"), + run_datetime=dt.datetime(2018, 1, 1), + event_date=dt.datetime(2018, 1, 3), + ) + self.assertEqual(haz.haz_type, "WS") + self.assertEqual(haz.units, "m/s") self.assertEqual(haz.event_id.size, 21) self.assertEqual(haz.date.size, 21) self.assertEqual(dt.datetime.fromordinal(haz.date[0]).year, 2018) self.assertEqual(dt.datetime.fromordinal(haz.date[0]).month, 1) self.assertEqual(dt.datetime.fromordinal(haz.date[0]).day, 3) self.assertEqual(haz.event_id[-1], 21) - self.assertEqual(haz.event_name[-1], '2018-01-03_ens21') - self.assertIsInstance(haz.intensity, - sparse.csr_matrix) - self.assertIsInstance(haz.fraction, - sparse.csr_matrix) + self.assertEqual(haz.event_name[-1], "2018-01-03_ens21") + self.assertIsInstance(haz.intensity, sparse.csr_matrix) + self.assertIsInstance(haz.fraction, sparse.csr_matrix) self.assertEqual(haz.intensity.shape, (21, 25)) - self.assertAlmostEqual(haz.intensity.max(), 36.426735,places=3) + self.assertAlmostEqual(haz.intensity.max(), 36.426735, places=3) self.assertEqual(haz.fraction.shape, (21, 25)) def test_generate_forecast(self): - """ testing generating a forecast """ + """testing generating a forecast""" hazard, haz_model, run_datetime, event_date = generate_WS_forecast_hazard( - run_datetime=dt.datetime(2018,1,1), - event_date=dt.datetime(2018,1,3), - haz_model='cosmo2e_file', - haz_raw_storage=DATA_DIR.joinpath('storm_europe_cosmoe_forecast' + - '_vmax_testfile.nc'), + run_datetime=dt.datetime(2018, 1, 1), + event_date=dt.datetime(2018, 1, 3), + haz_model="cosmo2e_file", + haz_raw_storage=DATA_DIR.joinpath( + "storm_europe_cosmoe_forecast" + "_vmax_testfile.nc" + ), save_haz=False, - ) + ) self.assertEqual(run_datetime.year, 2018) self.assertEqual(run_datetime.month, 1) self.assertEqual(run_datetime.day, 1) self.assertEqual(event_date.day, 3) - self.assertEqual(hazard.event_name[-1], '2018-01-03_ens21') - self.assertEqual(haz_model, 'C2E') + self.assertEqual(hazard.event_name[-1], "2018-01-03_ens21") + self.assertEqual(haz_model, "C2E") + # Execute Tests if __name__ == "__main__": diff --git a/climada/hazard/test/test_tc_cc.py b/climada/hazard/test/test_tc_cc.py index 4014ac2cb..0ec281510 100644 --- a/climada/hazard/test/test_tc_cc.py +++ b/climada/hazard/test/test_tc_cc.py @@ -19,85 +19,102 @@ Test tc_clim_change module """ -import unittest - import unittest from math import log -import pandas as pd + import numpy as np +import pandas as pd + import climada.hazard.tc_clim_change as tc_cc + class TestKnutson(unittest.TestCase): def test_get_knutson_scaling_calculations(self): - basin = 'NA' - variable = 'cat05' - percentile = '5/10' + basin = "NA" + variable = "cat05" + percentile = "5/10" base_start, base_end = 1950, 2018 yearly_steps = 5 target_predicted_changes = tc_cc.get_knutson_scaling_factor( - percentile=percentile, - variable=variable, - basin=basin, - baseline=(base_start, base_end), - yearly_steps=yearly_steps + percentile=percentile, + variable=variable, + basin=basin, + baseline=(base_start, base_end), + yearly_steps=yearly_steps, ) ## Test computations of future changes # Load data gmst_info = tc_cc.get_gmst_info() - var_id, basin_id, perc_id = (tc_cc.MAP_VARS_NAMES[variable], - tc_cc.MAP_BASINS_NAMES[basin], - tc_cc.MAP_PERC_NAMES[percentile]) + var_id, basin_id, perc_id = ( + tc_cc.MAP_VARS_NAMES[variable], + tc_cc.MAP_BASINS_NAMES[basin], + tc_cc.MAP_PERC_NAMES[percentile], + ) knutson_data = tc_cc.get_knutson_data() knutson_value = knutson_data[var_id, basin_id, perc_id] - - start_ind = base_start - gmst_info['gmst_start_year'] - end_ind = base_end - gmst_info['gmst_start_year'] + + start_ind = base_start - gmst_info["gmst_start_year"] + end_ind = base_end - gmst_info["gmst_start_year"] # Apply model beta = 0.5 * log(0.01 * knutson_value + 1) - tc_properties = np.exp(beta * gmst_info['gmst_data']) + tc_properties = np.exp(beta * gmst_info["gmst_data"]) # Assess baseline value - baseline = np.mean(tc_properties[:, start_ind:end_ind + 1], 1) + baseline = np.mean(tc_properties[:, start_ind : end_ind + 1], 1) # Assess future value and test predicted change from baseline is # the same as given by function smoothing = 5 for target_year in [2030, 2050, 2070, 2090]: - target_year_ind = target_year - gmst_info['gmst_start_year'] + target_year_ind = target_year - gmst_info["gmst_start_year"] ind1 = target_year_ind - smoothing ind2 = target_year_ind + smoothing + 1 prediction = np.mean(tc_properties[:, ind1:ind2], 1) calculated_predicted_change = ((prediction - baseline) / baseline) * 100 - np.testing.assert_array_almost_equal(target_predicted_changes.loc[target_year, '2.6'], - calculated_predicted_change[0]) - np.testing.assert_array_almost_equal(target_predicted_changes.loc[target_year, '4.5'], - calculated_predicted_change[1]) - np.testing.assert_array_almost_equal(target_predicted_changes.loc[target_year, '6.0'], - calculated_predicted_change[2]) - np.testing.assert_array_almost_equal(target_predicted_changes.loc[target_year, '8.5'], - calculated_predicted_change[3]) + np.testing.assert_array_almost_equal( + target_predicted_changes.loc[target_year, "2.6"], + calculated_predicted_change[0], + ) + np.testing.assert_array_almost_equal( + target_predicted_changes.loc[target_year, "4.5"], + calculated_predicted_change[1], + ) + np.testing.assert_array_almost_equal( + target_predicted_changes.loc[target_year, "6.0"], + calculated_predicted_change[2], + ) + np.testing.assert_array_almost_equal( + target_predicted_changes.loc[target_year, "8.5"], + calculated_predicted_change[3], + ) def test_get_knutson_scaling_structure(self): """Test get_knutson_criterion function.""" - + yearly_steps = 8 - target_predicted_changes = tc_cc.get_knutson_scaling_factor(yearly_steps=yearly_steps) + target_predicted_changes = tc_cc.get_knutson_scaling_factor( + yearly_steps=yearly_steps + ) - np.testing.assert_equal(target_predicted_changes.columns, np.array(['2.6', '4.5', '6.0', '8.5'])) + np.testing.assert_equal( + target_predicted_changes.columns, np.array(["2.6", "4.5", "6.0", "8.5"]) + ) - simulated_years = np.arange(tc_cc.YEAR_WINDOWS_PROPS['start'], - tc_cc.YEAR_WINDOWS_PROPS['end']+1, - yearly_steps) + simulated_years = np.arange( + tc_cc.YEAR_WINDOWS_PROPS["start"], + tc_cc.YEAR_WINDOWS_PROPS["end"] + 1, + yearly_steps, + ) np.testing.assert_equal(target_predicted_changes.index, simulated_years) def test_get_knutson_scaling_valid_inputs(self): @@ -114,7 +131,7 @@ def test_get_knutson_scaling_invalid_baseline_end_year(self): tc_cc.get_knutson_scaling_factor(baseline=(1982, 2110)) def test_get_knutson_scaling_no_scaling_factors_for_unknonw_basin(self): - df = tc_cc.get_knutson_scaling_factor(basin='ZZZZZ') + df = tc_cc.get_knutson_scaling_factor(basin="ZZZZZ") self.assertIsInstance(df, pd.DataFrame) np.testing.assert_equal(df.values, np.ones_like(df.values)) @@ -122,30 +139,35 @@ def test_get_gmst(self): """Test get_gmst_info function.""" gmst_info = tc_cc.get_gmst_info() - self.assertAlmostEqual(gmst_info['gmst_start_year'], 1880) - self.assertAlmostEqual(gmst_info['gmst_end_year'], 2100) - self.assertAlmostEqual(len(gmst_info['rcps']), 4) + self.assertAlmostEqual(gmst_info["gmst_start_year"], 1880) + self.assertAlmostEqual(gmst_info["gmst_end_year"], 2100) + self.assertAlmostEqual(len(gmst_info["rcps"]), 4) - self.assertAlmostEqual(gmst_info['gmst_data'].shape, - (len(gmst_info['rcps']), - gmst_info['gmst_end_year']-gmst_info['gmst_start_year']+1)) - self.assertAlmostEqual(gmst_info['gmst_data'][0,0], -0.16) - self.assertAlmostEqual(gmst_info['gmst_data'][0,-1], 1.27641, 4) - self.assertAlmostEqual(gmst_info['gmst_data'][-1,0], -0.16) - self.assertAlmostEqual(gmst_info['gmst_data'][-1,-1], 4.477764, 4) + self.assertAlmostEqual( + gmst_info["gmst_data"].shape, + ( + len(gmst_info["rcps"]), + gmst_info["gmst_end_year"] - gmst_info["gmst_start_year"] + 1, + ), + ) + self.assertAlmostEqual(gmst_info["gmst_data"][0, 0], -0.16) + self.assertAlmostEqual(gmst_info["gmst_data"][0, -1], 1.27641, 4) + self.assertAlmostEqual(gmst_info["gmst_data"][-1, 0], -0.16) + self.assertAlmostEqual(gmst_info["gmst_data"][-1, -1], 4.477764, 4) def test_get_knutson_data_pass(self): """Test get_knutson_data function.""" data_knutson = tc_cc.get_knutson_data() - self.assertAlmostEqual(data_knutson.shape, (4,6,5)) - self.assertAlmostEqual(data_knutson[0,0,0], -34.49) - self.assertAlmostEqual(data_knutson[-1,-1,-1], 15.419) - self.assertAlmostEqual(data_knutson[0,-1,-1], 4.689) - self.assertAlmostEqual(data_knutson[-1,0,0], 5.848) - self.assertAlmostEqual(data_knutson[-1,0,-1], 22.803) - self.assertAlmostEqual(data_knutson[2,3,2], 4.324) + self.assertAlmostEqual(data_knutson.shape, (4, 6, 5)) + self.assertAlmostEqual(data_knutson[0, 0, 0], -34.49) + self.assertAlmostEqual(data_knutson[-1, -1, -1], 15.419) + self.assertAlmostEqual(data_knutson[0, -1, -1], 4.689) + self.assertAlmostEqual(data_knutson[-1, 0, 0], 5.848) + self.assertAlmostEqual(data_knutson[-1, 0, -1], 22.803) + self.assertAlmostEqual(data_knutson[2, 3, 2], 4.324) + if __name__ == "__main__": TESTS = unittest.TestLoader().loadTestsFromTestCase(TestKnutson) diff --git a/climada/hazard/test/test_tc_tracks.py b/climada/hazard/test/test_tc_tracks.py index f5c64e22b..df60bc83e 100644 --- a/climada/hazard/test/test_tc_tracks.py +++ b/climada/hazard/test/test_tc_tracks.py @@ -19,35 +19,34 @@ Test tc_tracks module. """ -from datetime import datetime as dt import unittest +from datetime import datetime as dt -import xarray as xr +import geopandas as gpd import numpy as np import pandas as pd -import geopandas as gpd -from shapely.geometry import Point, LineString, MultiLineString +import xarray as xr +from shapely.geometry import LineString, MultiLineString, Point import climada.hazard.tc_tracks as tc -from climada import CONFIG -from climada.util import ureg -from climada.util.constants import TC_ANDREW_FL import climada.util.coordinates as u_coord +from climada import CONFIG from climada.entity import Exposures from climada.hazard.test import download_ibtracs - +from climada.util import ureg +from climada.util.constants import TC_ANDREW_FL DATA_DIR = CONFIG.hazard.test_data.dir() TEST_TRACK = DATA_DIR.joinpath("trac_brb_test.csv") TEST_TRACK_SHORT = DATA_DIR.joinpath("trac_short_test.csv") -TEST_RAW_TRACK = DATA_DIR.joinpath('Storm.2016075S11087.ibtracs_all.v03r10.csv') -TEST_TRACK_GETTELMAN = DATA_DIR.joinpath('gettelman_test_tracks.nc') -TEST_TRACK_EMANUEL = DATA_DIR.joinpath('emanuel_test_tracks.mat') -TEST_TRACK_EMANUEL_CORR = DATA_DIR.joinpath('temp_mpircp85cal_full.mat') -TEST_TRACK_CHAZ = DATA_DIR.joinpath('chaz_test_tracks.nc') -TEST_TRACK_STORM = DATA_DIR.joinpath('storm_test_tracks.txt') -TEST_TRACKS_ANTIMERIDIAN = DATA_DIR.joinpath('tracks-antimeridian') -TEST_TRACKS_LEGACY_HDF5 = DATA_DIR.joinpath('tctracks_hdf5_legacy.nc') +TEST_RAW_TRACK = DATA_DIR.joinpath("Storm.2016075S11087.ibtracs_all.v03r10.csv") +TEST_TRACK_GETTELMAN = DATA_DIR.joinpath("gettelman_test_tracks.nc") +TEST_TRACK_EMANUEL = DATA_DIR.joinpath("emanuel_test_tracks.mat") +TEST_TRACK_EMANUEL_CORR = DATA_DIR.joinpath("temp_mpircp85cal_full.mat") +TEST_TRACK_CHAZ = DATA_DIR.joinpath("chaz_test_tracks.nc") +TEST_TRACK_STORM = DATA_DIR.joinpath("storm_test_tracks.txt") +TEST_TRACKS_ANTIMERIDIAN = DATA_DIR.joinpath("tracks-antimeridian") +TEST_TRACKS_LEGACY_HDF5 = DATA_DIR.joinpath("tctracks_hdf5_legacy.nc") class TestIbtracs(unittest.TestCase): @@ -60,170 +59,195 @@ def setUpClass(cls): def test_raw_ibtracs_empty_pass(self): """Test reading empty TC from IBTrACS files""" tc_track = tc.TCTracks.from_ibtracs_netcdf( - provider='usa', storm_id='1988234N13299') + provider="usa", storm_id="1988234N13299" + ) self.assertEqual(tc_track.size, 0) self.assertEqual(tc_track.get_track(), []) def test_raw_ibtracs_invalid_pass(self): """Test reading invalid/non-existing TC from IBTrACS files""" with self.assertRaises(ValueError) as cm: - tc_track = tc.TCTracks.from_ibtracs_netcdf(storm_id='INVALID') + tc_track = tc.TCTracks.from_ibtracs_netcdf(storm_id="INVALID") self.assertIn("IDs are invalid", str(cm.exception)) self.assertIn("INVALID", str(cm.exception)) with self.assertRaises(ValueError) as cm: - tc_track = tc.TCTracks.from_ibtracs_netcdf(storm_id='1988234N13298') + tc_track = tc.TCTracks.from_ibtracs_netcdf(storm_id="1988234N13298") self.assertIn("IDs are not in IBTrACS", str(cm.exception)) self.assertIn("1988234N13298", str(cm.exception)) def test_penv_rmax_penv_pass(self): """from_ibtracs_netcdf""" - tc_track = tc.TCTracks.from_ibtracs_netcdf(provider='usa', storm_id='1992230N11325') + tc_track = tc.TCTracks.from_ibtracs_netcdf( + provider="usa", storm_id="1992230N11325" + ) penv_ref = np.ones(97) * 1010 penv_ref[26:36] = [1011, 1012, 1013, 1014, 1015, 1014, 1014, 1014, 1014, 1012] - self.assertTrue(np.allclose( - tc_track.get_track()['environmental_pressure'].values, penv_ref)) - self.assertTrue(np.allclose( - tc_track.get_track()['radius_max_wind'].values, np.zeros(97))) + self.assertTrue( + np.allclose(tc_track.get_track()["environmental_pressure"].values, penv_ref) + ) + self.assertTrue( + np.allclose(tc_track.get_track()["radius_max_wind"].values, np.zeros(97)) + ) def test_ibtracs_raw_pass(self): """Read a tropical cyclone.""" # read without specified provider or estimation of missing values - tc_track = tc.TCTracks.from_ibtracs_netcdf(storm_id='2017242N16333') + tc_track = tc.TCTracks.from_ibtracs_netcdf(storm_id="2017242N16333") track_ds = tc_track.get_track() self.assertEqual(len(tc_track.data), 1) - self.assertEqual(track_ds['time'].dt.year.values[0], 2017) - self.assertEqual(track_ds['time'].dt.month.values[0], 8) - self.assertEqual(track_ds['time'].dt.day.values[0], 30) - self.assertEqual(track_ds['time'].dt.hour.values[0], 0) - self.assertAlmostEqual(track_ds['lat'].values[0], 16.1, places=5) - self.assertAlmostEqual(track_ds['lon'].values[0], -26.9, places=5) - self.assertAlmostEqual(track_ds['max_sustained_wind'].values[0], 30) - self.assertAlmostEqual(track_ds['central_pressure'].values[0], 1008) - self.assertAlmostEqual(track_ds['environmental_pressure'].values[0], 1012) - self.assertAlmostEqual(track_ds['radius_max_wind'].values[0], 60) - self.assertEqual(track_ds['time'].size, 123) - - self.assertAlmostEqual(track_ds['lat'].values[-1], 36.8, places=5) - self.assertAlmostEqual(track_ds['lon'].values[-1], -90.1, places=4) - self.assertAlmostEqual(track_ds['central_pressure'].values[-1], 1005) - self.assertAlmostEqual(track_ds['max_sustained_wind'].values[-1], 15) - self.assertAlmostEqual(track_ds['environmental_pressure'].values[-1], 1008) - self.assertAlmostEqual(track_ds['radius_max_wind'].values[-1], 60) - - self.assertFalse(np.isnan(track_ds['radius_max_wind'].values).any()) - self.assertFalse(np.isnan(track_ds['environmental_pressure'].values).any()) - self.assertFalse(np.isnan(track_ds['max_sustained_wind'].values).any()) - self.assertFalse(np.isnan(track_ds['central_pressure'].values).any()) - self.assertFalse(np.isnan(track_ds['lat'].values).any()) - self.assertFalse(np.isnan(track_ds['lon'].values).any()) - - np.testing.assert_array_equal(track_ds['basin'], 'NA') - self.assertEqual(track_ds.attrs['max_sustained_wind_unit'], 'kn') - self.assertEqual(track_ds.attrs['central_pressure_unit'], 'mb') - self.assertEqual(track_ds.attrs['sid'], '2017242N16333') - self.assertEqual(track_ds.attrs['name'], 'IRMA') - self.assertEqual(track_ds.attrs['orig_event_flag'], True) - self.assertEqual(track_ds.attrs['data_provider'], - 'ibtracs_mixed:lat(official_3h),lon(official_3h),wind(official_3h),' - 'pres(official_3h),rmw(official_3h),poci(official_3h),roci(official_3h)') - self.assertEqual(track_ds.attrs['category'], 5) + self.assertEqual(track_ds["time"].dt.year.values[0], 2017) + self.assertEqual(track_ds["time"].dt.month.values[0], 8) + self.assertEqual(track_ds["time"].dt.day.values[0], 30) + self.assertEqual(track_ds["time"].dt.hour.values[0], 0) + self.assertAlmostEqual(track_ds["lat"].values[0], 16.1, places=5) + self.assertAlmostEqual(track_ds["lon"].values[0], -26.9, places=5) + self.assertAlmostEqual(track_ds["max_sustained_wind"].values[0], 30) + self.assertAlmostEqual(track_ds["central_pressure"].values[0], 1008) + self.assertAlmostEqual(track_ds["environmental_pressure"].values[0], 1012) + self.assertAlmostEqual(track_ds["radius_max_wind"].values[0], 60) + self.assertEqual(track_ds["time"].size, 123) + + self.assertAlmostEqual(track_ds["lat"].values[-1], 36.8, places=5) + self.assertAlmostEqual(track_ds["lon"].values[-1], -90.1, places=4) + self.assertAlmostEqual(track_ds["central_pressure"].values[-1], 1005) + self.assertAlmostEqual(track_ds["max_sustained_wind"].values[-1], 15) + self.assertAlmostEqual(track_ds["environmental_pressure"].values[-1], 1008) + self.assertAlmostEqual(track_ds["radius_max_wind"].values[-1], 60) + + self.assertFalse(np.isnan(track_ds["radius_max_wind"].values).any()) + self.assertFalse(np.isnan(track_ds["environmental_pressure"].values).any()) + self.assertFalse(np.isnan(track_ds["max_sustained_wind"].values).any()) + self.assertFalse(np.isnan(track_ds["central_pressure"].values).any()) + self.assertFalse(np.isnan(track_ds["lat"].values).any()) + self.assertFalse(np.isnan(track_ds["lon"].values).any()) + + np.testing.assert_array_equal(track_ds["basin"], "NA") + self.assertEqual(track_ds.attrs["max_sustained_wind_unit"], "kn") + self.assertEqual(track_ds.attrs["central_pressure_unit"], "mb") + self.assertEqual(track_ds.attrs["sid"], "2017242N16333") + self.assertEqual(track_ds.attrs["name"], "IRMA") + self.assertEqual(track_ds.attrs["orig_event_flag"], True) + self.assertEqual( + track_ds.attrs["data_provider"], + "ibtracs_mixed:lat(official_3h),lon(official_3h),wind(official_3h)," + "pres(official_3h),rmw(official_3h),poci(official_3h),roci(official_3h)", + ) + self.assertEqual(track_ds.attrs["category"], 5) def test_ibtracs_with_provider(self): """Read a tropical cyclone with and without explicit provider.""" - storm_id = '2012152N12130' - tc_track = tc.TCTracks.from_ibtracs_netcdf(storm_id=storm_id, provider='usa') + storm_id = "2012152N12130" + tc_track = tc.TCTracks.from_ibtracs_netcdf(storm_id=storm_id, provider="usa") track_ds = tc_track.get_track() - self.assertEqual(track_ds['time'].size, 51) - self.assertEqual(track_ds.attrs['data_provider'], 'ibtracs_usa') - self.assertAlmostEqual(track_ds['lat'].values[50], 34.3, places=5) - self.assertAlmostEqual(track_ds['central_pressure'].values[50], 989, places=5) - self.assertAlmostEqual(track_ds['radius_max_wind'].values[46], 20, places=5) + self.assertEqual(track_ds["time"].size, 51) + self.assertEqual(track_ds.attrs["data_provider"], "ibtracs_usa") + self.assertAlmostEqual(track_ds["lat"].values[50], 34.3, places=5) + self.assertAlmostEqual(track_ds["central_pressure"].values[50], 989, places=5) + self.assertAlmostEqual(track_ds["radius_max_wind"].values[46], 20, places=5) tc_track = tc.TCTracks.from_ibtracs_netcdf(storm_id=storm_id) track_ds = tc_track.get_track() - self.assertEqual(track_ds['time'].size, 35) - self.assertEqual(track_ds.attrs['data_provider'], - 'ibtracs_mixed:lat(official_3h),lon(official_3h),wind(official_3h),' - 'pres(official_3h),rmw(usa),poci(usa),roci(usa)') - self.assertAlmostEqual(track_ds['lat'].values[-1], 31.40, places=5) - self.assertAlmostEqual(track_ds['central_pressure'].values[-1], 980, places=5) + self.assertEqual(track_ds["time"].size, 35) + self.assertEqual( + track_ds.attrs["data_provider"], + "ibtracs_mixed:lat(official_3h),lon(official_3h),wind(official_3h)," + "pres(official_3h),rmw(usa),poci(usa),roci(usa)", + ) + self.assertAlmostEqual(track_ds["lat"].values[-1], 31.40, places=5) + self.assertAlmostEqual(track_ds["central_pressure"].values[-1], 980, places=5) def test_ibtracs_antimeridian(self): """Read a track that crosses the antimeridian and make sure that lon is consistent""" - storm_id = '2013224N12220' + storm_id = "2013224N12220" # the officially responsible agencies 'usa' and 'tokyo' use different signs in lon, but we # have to `estimate_missing` because both have gaps in reported values - tc_track = tc.TCTracks.from_ibtracs_netcdf(storm_id=storm_id, provider=['official_3h'], - estimate_missing=True) + tc_track = tc.TCTracks.from_ibtracs_netcdf( + storm_id=storm_id, provider=["official_3h"], estimate_missing=True + ) track_ds = tc_track.get_track() - np.testing.assert_array_less(0, track_ds['lon']) + np.testing.assert_array_less(0, track_ds["lon"]) def test_ibtracs_estimate_missing(self): """Read a tropical cyclone and estimate missing values.""" - storm_id = '2012152N12130' + storm_id = "2012152N12130" - tc_track = tc.TCTracks.from_ibtracs_netcdf(storm_id=storm_id, estimate_missing=True) + tc_track = tc.TCTracks.from_ibtracs_netcdf( + storm_id=storm_id, estimate_missing=True + ) track_ds = tc_track.get_track() # less time steps are discarded, leading to a larger total size - self.assertEqual(track_ds['time'].size, 99) - self.assertEqual(track_ds.attrs['data_provider'], - 'ibtracs_mixed:lat(official_3h),lon(official_3h),wind(official_3h),' - 'pres(official_3h),rmw(usa),poci(usa),roci(usa)') - self.assertAlmostEqual(track_ds['lat'].values[44], 33.30, places=5) - self.assertAlmostEqual(track_ds['central_pressure'].values[44], 976, places=5) - self.assertAlmostEqual(track_ds['central_pressure'].values[42], 980, places=5) + self.assertEqual(track_ds["time"].size, 99) + self.assertEqual( + track_ds.attrs["data_provider"], + "ibtracs_mixed:lat(official_3h),lon(official_3h),wind(official_3h)," + "pres(official_3h),rmw(usa),poci(usa),roci(usa)", + ) + self.assertAlmostEqual(track_ds["lat"].values[44], 33.30, places=5) + self.assertAlmostEqual(track_ds["central_pressure"].values[44], 976, places=5) + self.assertAlmostEqual(track_ds["central_pressure"].values[42], 980, places=5) # the wind speed at position 44 is missing in the original data - self.assertAlmostEqual(track_ds['max_sustained_wind'].values[44], 58, places=0) - self.assertAlmostEqual(track_ds['radius_oci'].values[40], 160, places=0) + self.assertAlmostEqual(track_ds["max_sustained_wind"].values[44], 58, places=0) + self.assertAlmostEqual(track_ds["radius_oci"].values[40], 160, places=0) # after position 42, ROCI is missing in the original data - self.assertAlmostEqual(track_ds['radius_oci'].values[42], 200, places=-1) - self.assertAlmostEqual(track_ds['radius_oci'].values[85], 165, places=-1) - self.assertAlmostEqual(track_ds['radius_oci'].values[95], 155, places=-1) + self.assertAlmostEqual(track_ds["radius_oci"].values[42], 200, places=-1) + self.assertAlmostEqual(track_ds["radius_oci"].values[85], 165, places=-1) + self.assertAlmostEqual(track_ds["radius_oci"].values[95], 155, places=-1) def test_ibtracs_official(self): """Read a tropical cyclone, only officially reported values.""" - storm_id = '2012152N12130' + storm_id = "2012152N12130" tc_track = tc.TCTracks.from_ibtracs_netcdf( - storm_id=storm_id, interpolate_missing=False, provider='official') + storm_id=storm_id, interpolate_missing=False, provider="official" + ) track_ds = tc_track.get_track() - self.assertEqual(track_ds['time'].size, 21) - self.assertEqual(track_ds.attrs['data_provider'], 'ibtracs_official') - self.assertAlmostEqual(track_ds['lon'].values[19], 137.6, places=4) - self.assertAlmostEqual(track_ds['central_pressure'].values[19], 980, places=5) - np.testing.assert_array_equal(track_ds['radius_max_wind'].values, 0) + self.assertEqual(track_ds["time"].size, 21) + self.assertEqual(track_ds.attrs["data_provider"], "ibtracs_official") + self.assertAlmostEqual(track_ds["lon"].values[19], 137.6, places=4) + self.assertAlmostEqual(track_ds["central_pressure"].values[19], 980, places=5) + np.testing.assert_array_equal(track_ds["radius_max_wind"].values, 0) def test_ibtracs_scale_wind(self): """Read a tropical cyclone and scale wind speed according to agency.""" - storm_id = '2012152N12130' + storm_id = "2012152N12130" - tc_track = tc.TCTracks.from_ibtracs_netcdf(storm_id=storm_id, rescale_windspeeds=True) + tc_track = tc.TCTracks.from_ibtracs_netcdf( + storm_id=storm_id, rescale_windspeeds=True + ) track_ds = tc_track.get_track() - self.assertAlmostEqual(track_ds['max_sustained_wind'].values[34], (55 - 23.3) / 0.6, places=5) + self.assertAlmostEqual( + track_ds["max_sustained_wind"].values[34], (55 - 23.3) / 0.6, places=5 + ) - tc_track = tc.TCTracks.from_ibtracs_netcdf(storm_id=storm_id, rescale_windspeeds=False) + tc_track = tc.TCTracks.from_ibtracs_netcdf( + storm_id=storm_id, rescale_windspeeds=False + ) track_ds = tc_track.get_track() - self.assertAlmostEqual(track_ds['max_sustained_wind'].values[34], 55, places=5) + self.assertAlmostEqual(track_ds["max_sustained_wind"].values[34], 55, places=5) def test_ibtracs_interpolate_missing(self): """Read a tropical cyclone with and without interpolating missing values.""" - storm_id = '2010066S19050' + storm_id = "2010066S19050" - tc_track = tc.TCTracks.from_ibtracs_netcdf(storm_id=storm_id, interpolate_missing=False) + tc_track = tc.TCTracks.from_ibtracs_netcdf( + storm_id=storm_id, interpolate_missing=False + ) track_ds = tc_track.get_track() self.assertEqual(track_ds.time.size, 50) - self.assertAlmostEqual(track_ds['central_pressure'].values[30], 992, places=5) - self.assertAlmostEqual(track_ds['central_pressure'].values[31], 1006, places=5) + self.assertAlmostEqual(track_ds["central_pressure"].values[30], 992, places=5) + self.assertAlmostEqual(track_ds["central_pressure"].values[31], 1006, places=5) - tc_track = tc.TCTracks.from_ibtracs_netcdf(storm_id=storm_id, interpolate_missing=True) + tc_track = tc.TCTracks.from_ibtracs_netcdf( + storm_id=storm_id, interpolate_missing=True + ) track_ds = tc_track.get_track() - self.assertEqual(track_ds['time'].size, 65) - self.assertAlmostEqual(track_ds['central_pressure'].values[30], 992, places=5) - self.assertAlmostEqual(track_ds['central_pressure'].values[38], 999, places=5) - self.assertAlmostEqual(track_ds['central_pressure'].values[46], 1006, places=5) + self.assertEqual(track_ds["time"].size, 65) + self.assertAlmostEqual(track_ds["central_pressure"].values[30], 992, places=5) + self.assertAlmostEqual(track_ds["central_pressure"].values[38], 999, places=5) + self.assertAlmostEqual(track_ds["central_pressure"].values[46], 1006, places=5) def test_ibtracs_range(self): """Read several TCs.""" @@ -231,34 +255,47 @@ def test_ibtracs_range(self): self.assertEqual(tc_track.size, 0) tc_track = tc.TCTracks.from_ibtracs_netcdf( - provider='usa', storm_id=None, year_range=(1915, 1916), basin='WP') + provider="usa", storm_id=None, year_range=(1915, 1916), basin="WP" + ) self.assertEqual(tc_track.size, 0) tc_track = tc.TCTracks.from_ibtracs_netcdf( - provider='usa', year_range=(1993, 1994), basin='EP', estimate_missing=False) + provider="usa", year_range=(1993, 1994), basin="EP", estimate_missing=False + ) self.assertEqual(tc_track.size, 33) tc_track = tc.TCTracks.from_ibtracs_netcdf( - provider='usa', year_range=(1993, 1994), basin='EP', estimate_missing=True) + provider="usa", year_range=(1993, 1994), basin="EP", estimate_missing=True + ) self.assertEqual(tc_track.size, 45) def test_ibtracs_correct_pass(self): """Check estimate_missing option""" tc_try = tc.TCTracks.from_ibtracs_netcdf( - provider='usa', storm_id='1982267N25289', estimate_missing=True) - self.assertAlmostEqual(tc_try.data[0]['central_pressure'].values[0], 1013, places=0) - self.assertAlmostEqual(tc_try.data[0]['central_pressure'].values[5], 1008, places=0) - self.assertAlmostEqual(tc_try.data[0]['central_pressure'].values[-1], 1012, places=0) + provider="usa", storm_id="1982267N25289", estimate_missing=True + ) + self.assertAlmostEqual( + tc_try.data[0]["central_pressure"].values[0], 1013, places=0 + ) + self.assertAlmostEqual( + tc_try.data[0]["central_pressure"].values[5], 1008, places=0 + ) + self.assertAlmostEqual( + tc_try.data[0]["central_pressure"].values[-1], 1012, places=0 + ) def test_ibtracs_discard_single_points(self): """Check discard_single_points option""" passed = False for year in range(1863, 1981): tc_track_singlept = tc.TCTracks.from_ibtracs_netcdf( - provider='usa', year_range=(year,year), discard_single_points=False) - n_singlepts = np.sum([x['time'].size == 1 for x in tc_track_singlept.data]) + provider="usa", year_range=(year, year), discard_single_points=False + ) + n_singlepts = np.sum([x["time"].size == 1 for x in tc_track_singlept.data]) if n_singlepts > 0: - tc_track = tc.TCTracks.from_ibtracs_netcdf(provider='usa', year_range=(year,year)) + tc_track = tc.TCTracks.from_ibtracs_netcdf( + provider="usa", year_range=(year, year) + ) if tc_track.size == tc_track_singlept.size - n_singlepts: passed = True break @@ -270,14 +307,40 @@ def test_ibtracs_additional_variables(self): # agency-specific and that are not already considered by other parts of # `from_ibtracs_netcdf`: addtl_vars = [ - 'numobs', 'season', 'number', 'subbasin', 'name', 'source_usa', 'source_jma', - 'source_cma', 'source_hko', 'source_new', 'source_reu', 'source_bom', 'source_nad', - 'source_wel', 'source_td5', 'source_td6', 'source_ds8', 'source_neu', 'source_mlc', - 'iso_time', 'nature', 'wmo_wind', 'wmo_pres', 'wmo_agency', 'track_type', - 'main_track_sid', 'dist2land', 'landfall', 'iflag', 'storm_speed', 'storm_dir', + "numobs", + "season", + "number", + "subbasin", + "name", + "source_usa", + "source_jma", + "source_cma", + "source_hko", + "source_new", + "source_reu", + "source_bom", + "source_nad", + "source_wel", + "source_td5", + "source_td6", + "source_ds8", + "source_neu", + "source_mlc", + "iso_time", + "nature", + "wmo_wind", + "wmo_pres", + "wmo_agency", + "track_type", + "main_track_sid", + "dist2land", + "landfall", + "iflag", + "storm_speed", + "storm_dir", ] tc_track = tc.TCTracks.from_ibtracs_netcdf( - storm_id='2017242N16333', + storm_id="2017242N16333", additional_variables=addtl_vars, ) track_ds = tc_track.get_track() @@ -295,6 +358,7 @@ def test_ibtracs_additional_variables(self): self.assertEqual(track_ds["storm_speed"].values[5], 11.0) self.assertEqual(track_ds["storm_speed"].values[-1], 8.0) + class TestIO(unittest.TestCase): """Test reading of tracks from files of different formats""" @@ -307,7 +371,9 @@ def test_netcdf_io(self): path = DATA_DIR.joinpath("tc_tracks_nc") path.mkdir(exist_ok=True) tc_track = tc.TCTracks.from_ibtracs_netcdf( - provider='usa', storm_id='1988234N13299', estimate_missing=True, + provider="usa", + storm_id="1988234N13299", + estimate_missing=True, additional_variables=["numobs", "storm_speed", "nature"], ) tc_track.write_netcdf(path) @@ -328,14 +394,15 @@ def test_read_legacy_netcdf(self): anti_track = tc.TCTracks.from_netcdf(TEST_TRACKS_ANTIMERIDIAN) for tr in anti_track.data: - self.assertEqual(tr['basin'].shape, tr['time'].shape) - np.testing.assert_array_equal(tr['basin'], "SP") + self.assertEqual(tr["basin"].shape, tr["time"].shape) + np.testing.assert_array_equal(tr["basin"], "SP") def test_hdf5_io(self): """Test writing and reading hdf5 TCTracks instances""" path = DATA_DIR.joinpath("tc_tracks.h5") tc_track = tc.TCTracks.from_ibtracs_netcdf( - provider='usa', year_range=(1993, 1994), basin='EP', estimate_missing=True) + provider="usa", year_range=(1993, 1994), basin="EP", estimate_missing=True + ) tc_track.write_hdf5(path) tc_read = tc.TCTracks.from_hdf5(path) path.unlink() @@ -360,72 +427,82 @@ def test_hdf5_io(self): def test_from_processed_ibtracs_csv(self): tc_track = tc.TCTracks.from_processed_ibtracs_csv(TEST_TRACK) - self.assertEqual(tc_track.data[0]['time'].size, 38) - self.assertEqual(tc_track.data[0]['lon'][11], -39.60) - self.assertEqual(tc_track.data[0]['lat'][23], 14.10) - self.assertEqual(tc_track.data[0]['time_step'][7], 6) - self.assertEqual(np.max(tc_track.data[0]['radius_max_wind']), 0) - self.assertEqual(np.min(tc_track.data[0]['radius_max_wind']), 0) - self.assertEqual(tc_track.data[0]['max_sustained_wind'][21], 55) - self.assertAlmostEqual(tc_track.data[0]['central_pressure'].values[29], 976, places=0) - self.assertEqual(np.max(tc_track.data[0]['environmental_pressure']), 1010) - self.assertEqual(np.min(tc_track.data[0]['environmental_pressure']), 1010) - self.assertEqual(tc_track.data[0]['time'].dt.year[13], 1951) - self.assertEqual(tc_track.data[0]['time'].dt.month[26], 9) - self.assertEqual(tc_track.data[0]['time'].dt.day[7], 29) - self.assertEqual(tc_track.data[0].attrs['max_sustained_wind_unit'], 'kn') - self.assertEqual(tc_track.data[0].attrs['central_pressure_unit'], 'mb') - self.assertEqual(tc_track.data[0].attrs['orig_event_flag'], 1) - self.assertEqual(tc_track.data[0].attrs['name'], '1951239N12334') - self.assertEqual(tc_track.data[0].attrs['sid'], '1951239N12334') - self.assertEqual(tc_track.data[0].attrs['id_no'], 1951239012334) - self.assertEqual(tc_track.data[0].attrs['data_provider'], 'hurdat_atl') - np.testing.assert_array_equal(tc_track.data[0]['basin'], 'NA') - self.assertEqual(tc_track.data[0].attrs['id_no'], 1951239012334) - self.assertEqual(tc_track.data[0].attrs['category'], 1) + self.assertEqual(tc_track.data[0]["time"].size, 38) + self.assertEqual(tc_track.data[0]["lon"][11], -39.60) + self.assertEqual(tc_track.data[0]["lat"][23], 14.10) + self.assertEqual(tc_track.data[0]["time_step"][7], 6) + self.assertEqual(np.max(tc_track.data[0]["radius_max_wind"]), 0) + self.assertEqual(np.min(tc_track.data[0]["radius_max_wind"]), 0) + self.assertEqual(tc_track.data[0]["max_sustained_wind"][21], 55) + self.assertAlmostEqual( + tc_track.data[0]["central_pressure"].values[29], 976, places=0 + ) + self.assertEqual(np.max(tc_track.data[0]["environmental_pressure"]), 1010) + self.assertEqual(np.min(tc_track.data[0]["environmental_pressure"]), 1010) + self.assertEqual(tc_track.data[0]["time"].dt.year[13], 1951) + self.assertEqual(tc_track.data[0]["time"].dt.month[26], 9) + self.assertEqual(tc_track.data[0]["time"].dt.day[7], 29) + self.assertEqual(tc_track.data[0].attrs["max_sustained_wind_unit"], "kn") + self.assertEqual(tc_track.data[0].attrs["central_pressure_unit"], "mb") + self.assertEqual(tc_track.data[0].attrs["orig_event_flag"], 1) + self.assertEqual(tc_track.data[0].attrs["name"], "1951239N12334") + self.assertEqual(tc_track.data[0].attrs["sid"], "1951239N12334") + self.assertEqual(tc_track.data[0].attrs["id_no"], 1951239012334) + self.assertEqual(tc_track.data[0].attrs["data_provider"], "hurdat_atl") + np.testing.assert_array_equal(tc_track.data[0]["basin"], "NA") + self.assertEqual(tc_track.data[0].attrs["id_no"], 1951239012334) + self.assertEqual(tc_track.data[0].attrs["category"], 1) def test_from_simulations_emanuel(self): - tc_track = tc.TCTracks.from_simulations_emanuel(TEST_TRACK_EMANUEL, hemisphere='N') + tc_track = tc.TCTracks.from_simulations_emanuel( + TEST_TRACK_EMANUEL, hemisphere="N" + ) self.assertEqual(len(tc_track.data), 4) - self.assertEqual(tc_track.data[0]['time'].size, 93) - self.assertEqual(tc_track.data[0]['lon'][11], -115.57) - self.assertEqual(tc_track.data[0]['lat'][23], 10.758) - self.assertEqual(tc_track.data[0]['time_step'][7], 2.0) - self.assertEqual(tc_track.data[0]['time_step'].dtype, float) - self.assertAlmostEqual(tc_track.data[0]['radius_max_wind'][15], 44.27645788336934) - self.assertEqual(tc_track.data[0]['max_sustained_wind'][21], 27.1) - self.assertEqual(tc_track.data[0]['central_pressure'][29], 995.31) - self.assertTrue(np.all(tc_track.data[0]['environmental_pressure'] == 1010)) - self.assertTrue(np.all(tc_track.data[0]['time'].dt.year == 1950)) - self.assertEqual(tc_track.data[0]['time'].dt.month[26], 10) - self.assertEqual(tc_track.data[0]['time'].dt.day[7], 26) - self.assertEqual(tc_track.data[0].attrs['max_sustained_wind_unit'], 'kn') - self.assertEqual(tc_track.data[0].attrs['central_pressure_unit'], 'mb') - self.assertEqual(tc_track.data[0].attrs['sid'], '1') - self.assertEqual(tc_track.data[0].attrs['name'], '1') - self.assertEqual(tc_track.data[0]['basin'].dtype, ' 0)) def test_category_pass(self): """Test category computation.""" max_sus_wind = np.array([25, 30, 35, 40, 45, 45, 45, 45, 35, 25]) - max_sus_wind_unit = 'kn' + max_sus_wind_unit = "kn" cat = tc.set_category(max_sus_wind, max_sus_wind_unit) self.assertEqual(0, cat) max_sus_wind = np.array([25, 25, 25, 30, 30, 30, 30, 30, 25, 25, 20]) - max_sus_wind_unit = 'kn' + max_sus_wind_unit = "kn" cat = tc.set_category(max_sus_wind, max_sus_wind_unit) self.assertEqual(-1, cat) - max_sus_wind = np.array([80, 90, 100, 115, 120, 125, 130, - 120, 110, 80, 75, 80, 65]) - max_sus_wind_unit = 'kn' + max_sus_wind = np.array( + [80, 90, 100, 115, 120, 125, 130, 120, 110, 80, 75, 80, 65] + ) + max_sus_wind_unit = "kn" cat = tc.set_category(max_sus_wind, max_sus_wind_unit) self.assertEqual(4, cat) - max_sus_wind = np.array([ - 28.769475, 34.52337, 40.277265, 46.03116, 51.785055, 51.785055, - 51.785055, 51.785055, 40.277265, 28.769475 - ]) - max_sus_wind_unit = 'mph' + max_sus_wind = np.array( + [ + 28.769475, + 34.52337, + 40.277265, + 46.03116, + 51.785055, + 51.785055, + 51.785055, + 51.785055, + 40.277265, + 28.769475, + ] + ) + max_sus_wind_unit = "mph" cat = tc.set_category(max_sus_wind, max_sus_wind_unit) self.assertEqual(0, cat) - max_sus_wind = np.array([ - 12.86111437, 12.86111437, 12.86111437, 15.43333724, 15.43333724, - 15.43333724, 15.43333724, 15.43333724, 12.86111437, 12.86111437, - 10.2888915 - ]) - max_sus_wind_unit = 'm/s' + max_sus_wind = np.array( + [ + 12.86111437, + 12.86111437, + 12.86111437, + 15.43333724, + 15.43333724, + 15.43333724, + 15.43333724, + 15.43333724, + 12.86111437, + 12.86111437, + 10.2888915, + ] + ) + max_sus_wind_unit = "m/s" cat = tc.set_category(max_sus_wind, max_sus_wind_unit) self.assertEqual(-1, cat) - max_sus_wind = np.array([ - 148.16, 166.68, 185.2, 212.98, 222.24, 231.5, 240.76, 222.24, - 203.72, 148.16, 138.9, 148.16, 120.38 - ]) - max_sus_wind_unit = 'km/h' + max_sus_wind = np.array( + [ + 148.16, + 166.68, + 185.2, + 212.98, + 222.24, + 231.5, + 240.76, + 222.24, + 203.72, + 148.16, + 138.9, + 148.16, + 120.38, + ] + ) + max_sus_wind_unit = "km/h" cat = tc.set_category(max_sus_wind, max_sus_wind_unit) self.assertEqual(4, cat) @@ -891,9 +1189,13 @@ def test_estimate_rmw_pass(self): tc_track = tc.TCTracks.from_processed_ibtracs_csv(TEST_TRACK) tc_track.equal_timestep() - rad_max_wind = tc.estimate_rmw( - tc_track.data[0]['radius_max_wind'].values, - tc_track.data[0]['central_pressure'].values) * NM_TO_KM + rad_max_wind = ( + tc.estimate_rmw( + tc_track.data[0]["radius_max_wind"].values, + tc_track.data[0]["central_pressure"].values, + ) + * NM_TO_KM + ) self.assertAlmostEqual(rad_max_wind[0], 87, places=0) self.assertAlmostEqual(rad_max_wind[10], 87, places=0) @@ -910,19 +1212,19 @@ def test_tracks_in_exp_pass(self): """Check if tracks in exp are filtered correctly""" # Load two tracks from ibtracks - storms = {'in': '2000233N12316', 'out': '2000160N21267'} + storms = {"in": "2000233N12316", "out": "2000160N21267"} tc_track = tc.TCTracks.from_ibtracs_netcdf(storm_id=list(storms.values())) # Define exposure from geopandas - world = gpd.read_file(gpd.datasets.get_path('naturalearth_lowres')) + world = gpd.read_file(gpd.datasets.get_path("naturalearth_lowres")) exp_world = Exposures(world) - exp = Exposures(exp_world.gdf[exp_world.gdf['name']=='Cuba']) + exp = Exposures(exp_world.gdf[exp_world.gdf["name"] == "Cuba"]) # Compute tracks in exp tracks_in_exp = tc_track.tracks_in_exp(exp, buffer=1.0) - self.assertTrue(tracks_in_exp.get_track(storms['in'])) - self.assertFalse(tracks_in_exp.get_track(storms['out'])) + self.assertTrue(tracks_in_exp.get_track(storms["in"])) + self.assertFalse(tracks_in_exp.get_track(storms["out"])) def test_get_landfall_idx(self): """Test identification of landfalls""" @@ -930,35 +1232,45 @@ def test_get_landfall_idx(self): datetimes = list() for h in range(0, 24, 3): datetimes.append(dt(2000, 1, 1, h)) - tr_ds.coords['time'] = ('time', datetimes) + tr_ds.coords["time"] = ("time", datetimes) # no landfall - tr_ds['on_land'] = np.repeat(np.array([False]), 8) + tr_ds["on_land"] = np.repeat(np.array([False]), 8) sea_land_idx, land_sea_idx = tc._get_landfall_idx(tr_ds) - self.assertEqual([len(sea_land_idx), len(land_sea_idx)], [0,0]) + self.assertEqual([len(sea_land_idx), len(land_sea_idx)], [0, 0]) # single landfall - tr_ds['on_land'] = np.array([False, False, True, True, True, False, False, False]) + tr_ds["on_land"] = np.array( + [False, False, True, True, True, False, False, False] + ) sea_land_idx, land_sea_idx = tc._get_landfall_idx(tr_ds) - self.assertEqual([len(sea_land_idx), len(land_sea_idx)], [1,1]) + self.assertEqual([len(sea_land_idx), len(land_sea_idx)], [1, 1]) self.assertEqual([sea_land_idx, land_sea_idx], [2, 5]) # single landfall from starting point - tr_ds['on_land'] = np.array([True, True, True, True, True, False, False, False]) + tr_ds["on_land"] = np.array([True, True, True, True, True, False, False, False]) sea_land_idx, land_sea_idx = tc._get_landfall_idx(tr_ds) - self.assertEqual([len(sea_land_idx), len(land_sea_idx)], [0,0]) - sea_land_idx, land_sea_idx = tc._get_landfall_idx(tr_ds, include_starting_landfall=True) + self.assertEqual([len(sea_land_idx), len(land_sea_idx)], [0, 0]) + sea_land_idx, land_sea_idx = tc._get_landfall_idx( + tr_ds, include_starting_landfall=True + ) self.assertEqual([sea_land_idx, land_sea_idx], [0, 5]) # two landfalls - tr_ds['on_land'] = np.array([False, True, True, False, False, False, True, True]) + tr_ds["on_land"] = np.array( + [False, True, True, False, False, False, True, True] + ) sea_land_idx, land_sea_idx = tc._get_landfall_idx(tr_ds) - self.assertEqual([len(sea_land_idx), len(land_sea_idx)], [2,2]) - self.assertEqual(sea_land_idx.tolist(), [1,6]) - self.assertEqual(land_sea_idx.tolist(), [3,8]) + self.assertEqual([len(sea_land_idx), len(land_sea_idx)], [2, 2]) + self.assertEqual(sea_land_idx.tolist(), [1, 6]) + self.assertEqual(land_sea_idx.tolist(), [3, 8]) # two landfalls, starting on land - tr_ds['on_land'] = np.array([True, True, False, False, True, True, False, False]) + tr_ds["on_land"] = np.array( + [True, True, False, False, True, True, False, False] + ) sea_land_idx, land_sea_idx = tc._get_landfall_idx(tr_ds) self.assertEqual([sea_land_idx, land_sea_idx], [4, 6]) - sea_land_idx, land_sea_idx = tc._get_landfall_idx(tr_ds, include_starting_landfall=True) - self.assertEqual(sea_land_idx.tolist(), [0,4]) - self.assertEqual(land_sea_idx.tolist(), [2,6]) + sea_land_idx, land_sea_idx = tc._get_landfall_idx( + tr_ds, include_starting_landfall=True + ) + self.assertEqual(sea_land_idx.tolist(), [0, 4]) + self.assertEqual(land_sea_idx.tolist(), [2, 6]) def test_track_land_params(self): """Test identification of points on land and distance since landfall""" @@ -969,24 +1281,22 @@ def test_track_land_params(self): lon_shift = np.array([-360, 0, 360]) # ensure both points are considered on land as is np.testing.assert_array_equal( - u_coord.coord_on_land(lat = lat_test, lon = lon_test), - on_land + u_coord.coord_on_land(lat=lat_test, lon=lon_test), on_land ) # independently on shifts by 360 degrees in longitude np.testing.assert_array_equal( - u_coord.coord_on_land(lat = lat_test, lon = lon_test + lon_shift), - on_land + u_coord.coord_on_land(lat=lat_test, lon=lon_test + lon_shift), on_land ) np.testing.assert_array_equal( - u_coord.coord_on_land(lat = lat_test, lon = lon_test - lon_shift), - on_land + u_coord.coord_on_land(lat=lat_test, lon=lon_test - lon_shift), on_land ) # also when longitude is within correct range np.testing.assert_array_equal( - u_coord.coord_on_land(lat = lat_test, lon = u_coord.lon_normalize(lon_test)), - on_land + u_coord.coord_on_land(lat=lat_test, lon=u_coord.lon_normalize(lon_test)), + on_land, ) + # Execute Tests if __name__ == "__main__": TESTS = unittest.TestLoader().loadTestsFromTestCase(TestFuncs) diff --git a/climada/hazard/test/test_tc_tracks_synth.py b/climada/hazard/test/test_tc_tracks_synth.py index 1b2cca2c6..f0b5c0b44 100644 --- a/climada/hazard/test/test_tc_tracks_synth.py +++ b/climada/hazard/test/test_tc_tracks_synth.py @@ -29,17 +29,16 @@ import climada.hazard.tc_tracks as tc import climada.hazard.tc_tracks_synth as tc_synth import climada.util.coordinates -from climada.util.constants import TC_ANDREW_FL from climada.hazard.test import download_ibtracs +from climada.util.constants import TC_ANDREW_FL - -DATA_DIR = Path(__file__).parent.joinpath('data') +DATA_DIR = Path(__file__).parent.joinpath("data") TEST_TRACK = DATA_DIR.joinpath("trac_brb_test.csv") TEST_TRACK_SHORT = DATA_DIR.joinpath("trac_short_test.csv") -TEST_TRACK_DECAY_END_OCEAN = DATA_DIR.joinpath('1997018S11059_gen3.nc') -TEST_TRACK_DECAY_END_OCEAN_HIST = DATA_DIR.joinpath('1997018S11059.nc') -TEST_TRACK_DECAY_PENV_GT_PCEN = DATA_DIR.joinpath('1988021S12080_gen2.nc') -TEST_TRACK_DECAY_PENV_GT_PCEN_HIST = DATA_DIR.joinpath('1988021S12080.nc') +TEST_TRACK_DECAY_END_OCEAN = DATA_DIR.joinpath("1997018S11059_gen3.nc") +TEST_TRACK_DECAY_END_OCEAN_HIST = DATA_DIR.joinpath("1997018S11059.nc") +TEST_TRACK_DECAY_PENV_GT_PCEN = DATA_DIR.joinpath("1988021S12080_gen2.nc") +TEST_TRACK_DECAY_PENV_GT_PCEN_HIST = DATA_DIR.joinpath("1988021S12080.nc") class TestDecay(unittest.TestCase): @@ -56,17 +55,29 @@ def test_apply_decay_no_landfall_pass(self): extent=extent, resolution=10 ) tc.track_land_params(tc_track.data[0], land_geom) - tc_track.data[0]['orig_event_flag'] = False + tc_track.data[0]["orig_event_flag"] = False tc_ref = tc_track.data[0].copy() tc_synth._apply_land_decay(tc_track.data, dict(), dict(), land_geom) - self.assertTrue(np.allclose(tc_track.data[0]['max_sustained_wind'].values, - tc_ref['max_sustained_wind'].values)) - self.assertTrue(np.allclose(tc_track.data[0]['central_pressure'].values, - tc_ref['central_pressure'].values)) - self.assertTrue(np.allclose(tc_track.data[0]['environmental_pressure'].values, - tc_ref['environmental_pressure'].values)) - self.assertTrue(np.all(np.isnan(tc_track.data[0]['dist_since_lf'].values))) + self.assertTrue( + np.allclose( + tc_track.data[0]["max_sustained_wind"].values, + tc_ref["max_sustained_wind"].values, + ) + ) + self.assertTrue( + np.allclose( + tc_track.data[0]["central_pressure"].values, + tc_ref["central_pressure"].values, + ) + ) + self.assertTrue( + np.allclose( + tc_track.data[0]["environmental_pressure"].values, + tc_ref["environmental_pressure"].values, + ) + ) + self.assertTrue(np.all(np.isnan(tc_track.data[0]["dist_since_lf"].values))) def test_apply_decay_pass(self): """Test _apply_land_decay against MATLAB reference.""" @@ -77,7 +88,7 @@ def test_apply_decay_pass(self): 1: 0.0038950967656296597, 2: 0.0038950967656296597, 3: 0.0038950967656296597, - 5: 0.0038950967656296597 + 5: 0.0038950967656296597, } p_rel = { @@ -87,62 +98,141 @@ def test_apply_decay_pass(self): 1: (1.0499941, 0.007978940084158488), 2: (1.0499941, 0.007978940084158488), 3: (1.0499941, 0.007978940084158488), - 5: (1.0499941, 0.007978940084158488) + 5: (1.0499941, 0.007978940084158488), } tc_track = tc.TCTracks.from_processed_ibtracs_csv(TC_ANDREW_FL) - tc_track.data[0]['orig_event_flag'] = False + tc_track.data[0]["orig_event_flag"] = False extent = tc_track.get_extent() land_geom = climada.util.coordinates.get_land_geometry( extent=extent, resolution=10 ) tc.track_land_params(tc_track.data[0], land_geom) - tc_synth._apply_land_decay(tc_track.data, v_rel, p_rel, land_geom, - s_rel=True, check_plot=False) - - p_ref = np.array([ - 1.010000000000000, 1.009000000000000, 1.008000000000000, - 1.006000000000000, 1.003000000000000, 1.002000000000000, - 1.001000000000000, 1.000000000000000, 1.000000000000000, - 1.001000000000000, 1.002000000000000, 1.005000000000000, - 1.007000000000000, 1.010000000000000, 1.010000000000000, - 1.010000000000000, 1.010000000000000, 1.010000000000000, - 1.010000000000000, 1.007000000000000, 1.004000000000000, - 1.000000000000000, 0.994000000000000, 0.981000000000000, - 0.969000000000000, 0.961000000000000, 0.947000000000000, - 0.933000000000000, 0.922000000000000, 0.930000000000000, - 0.937000000000000, 0.951000000000000, 0.947000000000000, - 0.943000000000000, 0.948000000000000, 0.946000000000000, - 0.941000000000000, 0.937000000000000, 0.955000000000000, - 0.9741457117, 0.99244068917, 1.00086729492, 1.00545853355, - 1.00818354609, 1.00941850023, 1.00986192053, 1.00998400565 - ]) * 1e3 - - self.assertTrue(np.allclose(p_ref, tc_track.data[0]['central_pressure'].values)) - - v_ref = np.array([ - 0.250000000000000, 0.300000000000000, 0.300000000000000, - 0.350000000000000, 0.350000000000000, 0.400000000000000, - 0.450000000000000, 0.450000000000000, 0.450000000000000, - 0.450000000000000, 0.450000000000000, 0.450000000000000, - 0.450000000000000, 0.400000000000000, 0.400000000000000, - 0.400000000000000, 0.400000000000000, 0.450000000000000, - 0.450000000000000, 0.500000000000000, 0.500000000000000, - 0.550000000000000, 0.650000000000000, 0.800000000000000, - 0.950000000000000, 1.100000000000000, 1.300000000000000, - 1.450000000000000, 1.500000000000000, 1.250000000000000, - 1.300000000000000, 1.150000000000000, 1.150000000000000, - 1.150000000000000, 1.150000000000000, 1.200000000000000, - 1.250000000000000, 1.250000000000000, 1.200000000000000, - 0.9737967353, 0.687255951, 0.4994850556, 0.3551480462, 0.2270548036, - 0.1302099557, 0.0645385918, 0.0225325851 - ]) * 1e2 - - self.assertTrue(np.allclose(v_ref, tc_track.data[0]['max_sustained_wind'].values)) - - cat_ref = tc.set_category(tc_track.data[0]['max_sustained_wind'].values, - tc_track.data[0].attrs['max_sustained_wind_unit']) - self.assertEqual(cat_ref, tc_track.data[0].attrs['category']) + tc_synth._apply_land_decay( + tc_track.data, v_rel, p_rel, land_geom, s_rel=True, check_plot=False + ) + + p_ref = ( + np.array( + [ + 1.010000000000000, + 1.009000000000000, + 1.008000000000000, + 1.006000000000000, + 1.003000000000000, + 1.002000000000000, + 1.001000000000000, + 1.000000000000000, + 1.000000000000000, + 1.001000000000000, + 1.002000000000000, + 1.005000000000000, + 1.007000000000000, + 1.010000000000000, + 1.010000000000000, + 1.010000000000000, + 1.010000000000000, + 1.010000000000000, + 1.010000000000000, + 1.007000000000000, + 1.004000000000000, + 1.000000000000000, + 0.994000000000000, + 0.981000000000000, + 0.969000000000000, + 0.961000000000000, + 0.947000000000000, + 0.933000000000000, + 0.922000000000000, + 0.930000000000000, + 0.937000000000000, + 0.951000000000000, + 0.947000000000000, + 0.943000000000000, + 0.948000000000000, + 0.946000000000000, + 0.941000000000000, + 0.937000000000000, + 0.955000000000000, + 0.9741457117, + 0.99244068917, + 1.00086729492, + 1.00545853355, + 1.00818354609, + 1.00941850023, + 1.00986192053, + 1.00998400565, + ] + ) + * 1e3 + ) + + self.assertTrue(np.allclose(p_ref, tc_track.data[0]["central_pressure"].values)) + + v_ref = ( + np.array( + [ + 0.250000000000000, + 0.300000000000000, + 0.300000000000000, + 0.350000000000000, + 0.350000000000000, + 0.400000000000000, + 0.450000000000000, + 0.450000000000000, + 0.450000000000000, + 0.450000000000000, + 0.450000000000000, + 0.450000000000000, + 0.450000000000000, + 0.400000000000000, + 0.400000000000000, + 0.400000000000000, + 0.400000000000000, + 0.450000000000000, + 0.450000000000000, + 0.500000000000000, + 0.500000000000000, + 0.550000000000000, + 0.650000000000000, + 0.800000000000000, + 0.950000000000000, + 1.100000000000000, + 1.300000000000000, + 1.450000000000000, + 1.500000000000000, + 1.250000000000000, + 1.300000000000000, + 1.150000000000000, + 1.150000000000000, + 1.150000000000000, + 1.150000000000000, + 1.200000000000000, + 1.250000000000000, + 1.250000000000000, + 1.200000000000000, + 0.9737967353, + 0.687255951, + 0.4994850556, + 0.3551480462, + 0.2270548036, + 0.1302099557, + 0.0645385918, + 0.0225325851, + ] + ) + * 1e2 + ) + + self.assertTrue( + np.allclose(v_ref, tc_track.data[0]["max_sustained_wind"].values) + ) + + cat_ref = tc.set_category( + tc_track.data[0]["max_sustained_wind"].values, + tc_track.data[0].attrs["max_sustained_wind_unit"], + ) + self.assertEqual(cat_ref, tc_track.data[0].attrs["category"]) def test_func_decay_p_pass(self): """Test decay function for pressure with its inverse.""" @@ -152,7 +242,9 @@ def test_func_decay_p_pass(self): res = tc_synth._decay_p_function(s_coef, b_coef, x_val) b_coef_res = tc_synth._solve_decay_p_function(s_coef, res, x_val) - self.assertTrue(np.allclose(b_coef_res[1:], np.ones((x_val.size - 1,)) * b_coef)) + self.assertTrue( + np.allclose(b_coef_res[1:], np.ones((x_val.size - 1,)) * b_coef) + ) self.assertTrue(np.isnan(b_coef_res[0])) def test_func_decay_v_pass(self): @@ -162,38 +254,46 @@ def test_func_decay_v_pass(self): res = tc_synth._decay_v_function(a_coef, x_val) a_coef_res = tc_synth._solve_decay_v_function(res, x_val) - self.assertTrue(np.allclose(a_coef_res[1:], np.ones((x_val.size - 1,)) * a_coef)) + self.assertTrue( + np.allclose(a_coef_res[1:], np.ones((x_val.size - 1,)) * a_coef) + ) self.assertTrue(np.isnan(a_coef_res[0])) def test_decay_ps_value(self): """Test the calculation of S in pressure decay.""" on_land_idx = 5 tr_ds = xr.Dataset() - tr_ds.coords['time'] = ('time', np.arange(10)) - tr_ds['central_pressure'] = ('time', np.arange(10, 20)) - tr_ds['environmental_pressure'] = ('time', np.arange(20, 30)) - tr_ds['on_land'] = ('time', np.zeros((10,)).astype(bool)) + tr_ds.coords["time"] = ("time", np.arange(10)) + tr_ds["central_pressure"] = ("time", np.arange(10, 20)) + tr_ds["environmental_pressure"] = ("time", np.arange(20, 30)) + tr_ds["on_land"] = ("time", np.zeros((10,)).astype(bool)) tr_ds.on_land[on_land_idx] = True p_landfall = 100 res = tc_synth._calc_decay_ps_value(tr_ds, p_landfall, on_land_idx, s_rel=True) - self.assertEqual(res, float(tr_ds['environmental_pressure'][on_land_idx] / p_landfall)) + self.assertEqual( + res, float(tr_ds["environmental_pressure"][on_land_idx] / p_landfall) + ) res = tc_synth._calc_decay_ps_value(tr_ds, p_landfall, on_land_idx, s_rel=False) - self.assertEqual(res, float(tr_ds['central_pressure'][on_land_idx] / p_landfall)) + self.assertEqual( + res, float(tr_ds["central_pressure"][on_land_idx] / p_landfall) + ) def test_calc_decay_no_landfall_pass(self): """Test _calc_land_decay with no historical tracks with landfall""" tc_track = tc.TCTracks.from_processed_ibtracs_csv(TEST_TRACK_SHORT) - expected_warning = 'only %s historical tracks were provided. ' % len(tc_track.data) + expected_warning = "only %s historical tracks were provided. " % len( + tc_track.data + ) extent = tc_track.get_extent() land_geom = climada.util.coordinates.get_land_geometry( extent=extent, resolution=10 ) tc.track_land_params(tc_track.data[0], land_geom) - with self.assertLogs('climada.hazard.tc_tracks_synth', level='INFO') as cm: + with self.assertLogs("climada.hazard.tc_tracks_synth", level="INFO") as cm: tc_synth._calc_land_decay(tc_track.data, land_geom) self.assertIn(expected_warning, cm.output[0]) - self.assertIn('No historical track with landfall.', cm.output[1]) + self.assertIn("No historical track with landfall.", cm.output[1]) def test_calc_land_decay_pass(self): """Test _calc_land_decay with environmental pressure function.""" @@ -232,56 +332,98 @@ def test_decay_values_andrew_pass(self): s_cell_2 = 8 * [1.047120451927185] s_cell = s_cell_1 + s_cell_2 p_vs_lf_time_relative = [ - 1.0149413020277482, 1.018848167539267, 1.037696335078534, - 1.0418848167539267, 1.043979057591623, 1.0450261780104713, - 1.0460732984293193, 1.0471204188481675, 1.0471204188481675 + 1.0149413020277482, + 1.018848167539267, + 1.037696335078534, + 1.0418848167539267, + 1.043979057591623, + 1.0450261780104713, + 1.0460732984293193, + 1.0471204188481675, + 1.0471204188481675, ] self.assertEqual(list(p_lf.keys()), [ss_category]) - self.assertEqual(p_lf[ss_category][0], array.array('f', s_cell)) - self.assertEqual(p_lf[ss_category][1], array.array('f', p_vs_lf_time_relative)) + self.assertEqual(p_lf[ss_category][0], array.array("f", s_cell)) + self.assertEqual(p_lf[ss_category][1], array.array("f", p_vs_lf_time_relative)) v_vs_lf_time_relative = [ - 0.8846153846153846, 0.6666666666666666, 0.4166666666666667, - 0.2916666666666667, 0.250000000000000, 0.250000000000000, - 0.20833333333333334, 0.16666666666666666, 0.16666666666666666 + 0.8846153846153846, + 0.6666666666666666, + 0.4166666666666667, + 0.2916666666666667, + 0.250000000000000, + 0.250000000000000, + 0.20833333333333334, + 0.16666666666666666, + 0.16666666666666666, ] self.assertEqual(list(v_lf.keys()), [ss_category]) - self.assertEqual(v_lf[ss_category], array.array('f', v_vs_lf_time_relative)) - - x_val_ref = np.array([ - 95.9512939453125, 53.624916076660156, 143.09530639648438, - 225.0262908935547, 312.5832824707031, 427.43109130859375, - 570.1857299804688, 750.3827514648438, 1020.5431518554688 - ]) + self.assertEqual(v_lf[ss_category], array.array("f", v_vs_lf_time_relative)) + + x_val_ref = np.array( + [ + 95.9512939453125, + 53.624916076660156, + 143.09530639648438, + 225.0262908935547, + 312.5832824707031, + 427.43109130859375, + 570.1857299804688, + 750.3827514648438, + 1020.5431518554688, + ] + ) self.assertEqual(list(x_val.keys()), [ss_category]) self.assertTrue(np.allclose(x_val[ss_category], x_val_ref)) def test_decay_calc_coeff(self): """Test _decay_calc_coeff against MATLAB""" x_val = { - 4: np.array([ - 53.57314960249573, 142.97903059281566, 224.76733726289183, - 312.14621544207563, 426.6757021862584, 568.9358305779094, - 748.3713215157885, 1016.9904230811956 - ]) + 4: np.array( + [ + 53.57314960249573, + 142.97903059281566, + 224.76733726289183, + 312.14621544207563, + 426.6757021862584, + 568.9358305779094, + 748.3713215157885, + 1016.9904230811956, + ] + ) } v_lf = { - 4: np.array([ - 0.6666666666666666, 0.4166666666666667, 0.2916666666666667, - 0.250000000000000, 0.250000000000000, 0.20833333333333334, - 0.16666666666666666, 0.16666666666666666 - ]) + 4: np.array( + [ + 0.6666666666666666, + 0.4166666666666667, + 0.2916666666666667, + 0.250000000000000, + 0.250000000000000, + 0.20833333333333334, + 0.16666666666666666, + 0.16666666666666666, + ] + ) } p_lf = { - 4: (8 * [1.0471204188481675], - np.array([ - 1.018848167539267, 1.037696335078534, 1.0418848167539267, - 1.043979057591623, 1.0450261780104713, 1.0460732984293193, - 1.0471204188481675, 1.0471204188481675 - ]) + 4: ( + 8 * [1.0471204188481675], + np.array( + [ + 1.018848167539267, + 1.037696335078534, + 1.0418848167539267, + 1.043979057591623, + 1.0450261780104713, + 1.0460732984293193, + 1.0471204188481675, + 1.0471204188481675, + ] + ), ) } @@ -298,41 +440,141 @@ def test_decay_calc_coeff(self): def test_wrong_decay_pass(self): """Test decay not implemented when coefficient < 1""" - track = tc.TCTracks.from_ibtracs_netcdf(provider='usa', storm_id='1975178N28281') + track = tc.TCTracks.from_ibtracs_netcdf( + provider="usa", storm_id="1975178N28281" + ) track_gen = track.data[0] - track_gen['lat'] = np.array([ - 28.20340431, 28.7915261, 29.38642458, 29.97836984, 30.56844404, - 31.16265292, 31.74820301, 32.34449825, 32.92261894, 33.47430891, - 34.01492525, 34.56789399, 35.08810845, 35.55965893, 35.94835174, - 36.29355848, 36.45379561, 36.32473812, 36.07552209, 35.92224784, - 35.84144186, 35.78298537, 35.86090718, 36.02440372, 36.37555559, - 37.06207765, 37.73197352, 37.97524273, 38.05560287, 38.21901208, - 38.31486156, 38.30813367, 38.28481808, 38.28410366, 38.25894812, - 38.20583372, 38.22741099, 38.39970022, 38.68367797, 39.08329904, - 39.41434629, 39.424984, 39.31327716, 39.30336335, 39.31714429, - 39.27031932, 39.30848775, 39.48759833, 39.73326595, 39.96187967, - 40.26954226, 40.76882202, 41.40398607, 41.93809726, 42.60395785, - 43.57074792, 44.63816143, 45.61450458, 46.68528511, 47.89209365, - 49.15580502 - ]) - track_gen['lon'] = np.array([ - -79.20514075, -79.25243311, -79.28393082, -79.32324646, - -79.36668585, -79.41495519, -79.45198688, -79.40580325, - -79.34965443, -79.36938122, -79.30294825, -79.06809546, - -78.70281969, -78.29418936, -77.82170609, -77.30034709, - -76.79004969, -76.37038827, -75.98641014, -75.58383356, - -75.18310414, -74.7974524, -74.3797645, -73.86393572, -73.37910948, - -73.01059003, -72.77051313, -72.68011328, -72.66864779, - -72.62579773, -72.56307717, -72.46607618, -72.35871353, - -72.31120649, -72.15537583, -71.75577051, -71.25287498, - -70.75527907, -70.34788946, -70.17518421, -70.04446577, - -69.76582749, -69.44372386, -69.15881376, -68.84351922, - -68.47890287, -68.04184565, -67.53541437, -66.94008642, - -66.25596075, -65.53496635, -64.83491802, -64.12962685, - -63.54118808, -62.72934383, -61.34915091, -59.72580755, - -58.24404252, -56.71972992, -55.0809336, -53.31524758 - ]) + track_gen["lat"] = np.array( + [ + 28.20340431, + 28.7915261, + 29.38642458, + 29.97836984, + 30.56844404, + 31.16265292, + 31.74820301, + 32.34449825, + 32.92261894, + 33.47430891, + 34.01492525, + 34.56789399, + 35.08810845, + 35.55965893, + 35.94835174, + 36.29355848, + 36.45379561, + 36.32473812, + 36.07552209, + 35.92224784, + 35.84144186, + 35.78298537, + 35.86090718, + 36.02440372, + 36.37555559, + 37.06207765, + 37.73197352, + 37.97524273, + 38.05560287, + 38.21901208, + 38.31486156, + 38.30813367, + 38.28481808, + 38.28410366, + 38.25894812, + 38.20583372, + 38.22741099, + 38.39970022, + 38.68367797, + 39.08329904, + 39.41434629, + 39.424984, + 39.31327716, + 39.30336335, + 39.31714429, + 39.27031932, + 39.30848775, + 39.48759833, + 39.73326595, + 39.96187967, + 40.26954226, + 40.76882202, + 41.40398607, + 41.93809726, + 42.60395785, + 43.57074792, + 44.63816143, + 45.61450458, + 46.68528511, + 47.89209365, + 49.15580502, + ] + ) + track_gen["lon"] = np.array( + [ + -79.20514075, + -79.25243311, + -79.28393082, + -79.32324646, + -79.36668585, + -79.41495519, + -79.45198688, + -79.40580325, + -79.34965443, + -79.36938122, + -79.30294825, + -79.06809546, + -78.70281969, + -78.29418936, + -77.82170609, + -77.30034709, + -76.79004969, + -76.37038827, + -75.98641014, + -75.58383356, + -75.18310414, + -74.7974524, + -74.3797645, + -73.86393572, + -73.37910948, + -73.01059003, + -72.77051313, + -72.68011328, + -72.66864779, + -72.62579773, + -72.56307717, + -72.46607618, + -72.35871353, + -72.31120649, + -72.15537583, + -71.75577051, + -71.25287498, + -70.75527907, + -70.34788946, + -70.17518421, + -70.04446577, + -69.76582749, + -69.44372386, + -69.15881376, + -68.84351922, + -68.47890287, + -68.04184565, + -67.53541437, + -66.94008642, + -66.25596075, + -65.53496635, + -64.83491802, + -64.12962685, + -63.54118808, + -62.72934383, + -61.34915091, + -59.72580755, + -58.24404252, + -56.71972992, + -55.0809336, + -53.31524758, + ] + ) v_rel = { 1: 0.002249541544102336, @@ -352,22 +594,26 @@ def test_wrong_decay_pass(self): 5: (1.0894914184297835, 0.004315034379018768), 4: (1.0714354641894077, 0.002783787561718677), } - track_gen.attrs['orig_event_flag'] = False + track_gen.attrs["orig_event_flag"] = False - cp_ref = np.array([1012., 1012.]) + cp_ref = np.array([1012.0, 1012.0]) single_track = tc.TCTracks([track_gen]) extent = single_track.get_extent() land_geom = climada.util.coordinates.get_land_geometry( extent=extent, resolution=10 ) - track_res = tc_synth._apply_decay_coeffs(track_gen, v_rel, p_rel, land_geom, True) - self.assertTrue(np.array_equal(cp_ref, track_res['central_pressure'][9:11])) + track_res = tc_synth._apply_decay_coeffs( + track_gen, v_rel, p_rel, land_geom, True + ) + self.assertTrue(np.array_equal(cp_ref, track_res["central_pressure"][9:11])) def test_decay_end_ocean(self): """Test decay is applied after landfall if the track ends over the ocean""" # this track was generated without applying landfall decay # (i.e. with decay=False in tc_synth.calc_perturbed_trajectories) - tracks_synth_nodecay_example = tc.TCTracks.from_netcdf(TEST_TRACK_DECAY_END_OCEAN) + tracks_synth_nodecay_example = tc.TCTracks.from_netcdf( + TEST_TRACK_DECAY_END_OCEAN + ) # apply landfall decay extent = tracks_synth_nodecay_example.get_extent() @@ -378,7 +624,8 @@ def test_decay_end_ocean(self): tracks_synth_nodecay_example.data, tc_synth.LANDFALL_DECAY_V, tc_synth.LANDFALL_DECAY_P, - land_geom) + land_geom, + ) track = tracks_synth_nodecay_example.data[0] # read its corresponding historical track @@ -390,34 +637,53 @@ def test_decay_end_ocean(self): lf_idx = tc._get_landfall_idx(track) last_lf_idx = lf_idx[-1][1] # only suitable if track ends over the ocean - self.assertTrue(last_lf_idx < track['time'].size-2, - 'This test should be re-written, data not suitable') + self.assertTrue( + last_lf_idx < track["time"].size - 2, + "This test should be re-written, data not suitable", + ) # check pressure and wind values - p_hist_end = track_hist['central_pressure'].values[last_lf_idx:] - p_synth_end = track['central_pressure'].values[last_lf_idx:] + p_hist_end = track_hist["central_pressure"].values[last_lf_idx:] + p_synth_end = track["central_pressure"].values[last_lf_idx:] self.assertTrue(np.all(p_synth_end > p_hist_end)) - v_hist_end = track_hist['max_sustained_wind'].values[last_lf_idx:] - v_synth_end = track['max_sustained_wind'].values[last_lf_idx:] + v_hist_end = track_hist["max_sustained_wind"].values[last_lf_idx:] + v_synth_end = track["max_sustained_wind"].values[last_lf_idx:] self.assertTrue(np.all(v_synth_end < v_hist_end)) # Part 2: is landfall applied in all landfalls? - p_hist_lf = np.concatenate([track_hist['central_pressure'].values[lfs:lfe] - for lfs,lfe in zip(*lf_idx)]) - p_synth_lf = np.concatenate([track['central_pressure'].values[lfs:lfe] - for lfs,lfe in zip(*lf_idx)]) - v_hist_lf = np.concatenate([track_hist['max_sustained_wind'].values[lfs:lfe] - for lfs,lfe in zip(*lf_idx)]) - v_synth_lf = np.concatenate([track['max_sustained_wind'].values[lfs:lfe] - for lfs,lfe in zip(*lf_idx)]) + p_hist_lf = np.concatenate( + [ + track_hist["central_pressure"].values[lfs:lfe] + for lfs, lfe in zip(*lf_idx) + ] + ) + p_synth_lf = np.concatenate( + [track["central_pressure"].values[lfs:lfe] for lfs, lfe in zip(*lf_idx)] + ) + v_hist_lf = np.concatenate( + [ + track_hist["max_sustained_wind"].values[lfs:lfe] + for lfs, lfe in zip(*lf_idx) + ] + ) + v_synth_lf = np.concatenate( + [track["max_sustained_wind"].values[lfs:lfe] for lfs, lfe in zip(*lf_idx)] + ) self.assertTrue(np.all(p_synth_lf > p_hist_lf)) self.assertTrue(np.all(v_synth_lf < v_hist_lf)) - self.assertTrue(np.all(track['central_pressure'].values <= track['environmental_pressure'].values)) + self.assertTrue( + np.all( + track["central_pressure"].values + <= track["environmental_pressure"].values + ) + ) def test_decay_penv_gt_pcen(self): """Test decay is applied if penv at end of landfall < pcen just before landfall""" # this track was generated without applying landfall decay # (i.e. with decay=False in tc_synth.calc_perturbed_trajectories) - tracks_synth_nodecay_example = tc.TCTracks.from_netcdf(TEST_TRACK_DECAY_PENV_GT_PCEN) + tracks_synth_nodecay_example = tc.TCTracks.from_netcdf( + TEST_TRACK_DECAY_PENV_GT_PCEN + ) # apply landfall decay extent = tracks_synth_nodecay_example.get_extent() @@ -428,7 +694,8 @@ def test_decay_penv_gt_pcen(self): tracks_synth_nodecay_example.data, tc_synth.LANDFALL_DECAY_V, tc_synth.LANDFALL_DECAY_P, - land_geom) + land_geom, + ) track = tracks_synth_nodecay_example.data[0] # read its corresponding historical track @@ -441,41 +708,50 @@ def test_decay_penv_gt_pcen(self): start_lf_idx, end_lf_idx = lf_idx[0][0], lf_idx[1][0] # check pressure and wind values - p_hist_end = track_hist['central_pressure'].values[end_lf_idx:] - p_synth_end = track['central_pressure'].values[end_lf_idx:] + p_hist_end = track_hist["central_pressure"].values[end_lf_idx:] + p_synth_end = track["central_pressure"].values[end_lf_idx:] self.assertTrue(np.all(p_synth_end > p_hist_end)) - v_hist_end = track_hist['max_sustained_wind'].values[end_lf_idx:] - v_synth_end = track['max_sustained_wind'].values[end_lf_idx:] + v_hist_end = track_hist["max_sustained_wind"].values[end_lf_idx:] + v_synth_end = track["max_sustained_wind"].values[end_lf_idx:] self.assertTrue(np.all(v_synth_end < v_hist_end)) # Part 2: is landfall applied in all landfalls? # central pressure - p_hist_lf = track_hist['central_pressure'].values[start_lf_idx:end_lf_idx] - p_synth_lf = track['central_pressure'].values[start_lf_idx:end_lf_idx] + p_hist_lf = track_hist["central_pressure"].values[start_lf_idx:end_lf_idx] + p_synth_lf = track["central_pressure"].values[start_lf_idx:end_lf_idx] # central pressure should be higher in synth than hist; unless it was set to p_env - self.assertTrue(np.all( - np.logical_or(p_synth_lf > p_hist_lf, - p_synth_lf == track['environmental_pressure'].values[start_lf_idx:end_lf_idx]) - )) + self.assertTrue( + np.all( + np.logical_or( + p_synth_lf > p_hist_lf, + p_synth_lf + == track["environmental_pressure"].values[start_lf_idx:end_lf_idx], + ) + ) + ) # but for this track is should be higher towards the end self.assertTrue(np.any(p_synth_lf > p_hist_lf)) self.assertTrue(np.all(p_synth_lf >= p_hist_lf)) # wind speed - v_hist_lf = track_hist['max_sustained_wind'].values[start_lf_idx:end_lf_idx] - v_synth_lf = track['max_sustained_wind'].values[start_lf_idx:end_lf_idx] + v_hist_lf = track_hist["max_sustained_wind"].values[start_lf_idx:end_lf_idx] + v_synth_lf = track["max_sustained_wind"].values[start_lf_idx:end_lf_idx] # wind should decrease over time for that landfall - v_before_lf = track_hist['max_sustained_wind'].values[start_lf_idx-1] + v_before_lf = track_hist["max_sustained_wind"].values[start_lf_idx - 1] self.assertTrue(np.all(v_synth_lf[1:] < v_before_lf)) # and wind speed should be lower in synth than hist at the end of and after this landfall - self.assertTrue(np.all( - track['max_sustained_wind'].values[end_lf_idx:] < track_hist['max_sustained_wind'].values[end_lf_idx:] - )) + self.assertTrue( + np.all( + track["max_sustained_wind"].values[end_lf_idx:] + < track_hist["max_sustained_wind"].values[end_lf_idx:] + ) + ) # finally, central minus env pressure cannot increase during this landfall - p_env_lf = track['central_pressure'].values[start_lf_idx:end_lf_idx] + p_env_lf = track["central_pressure"].values[start_lf_idx:end_lf_idx] self.assertTrue(np.all(np.diff(p_env_lf - p_synth_lf) <= 0)) + class TestSynth(unittest.TestCase): @classmethod @@ -484,69 +760,80 @@ def setUpClass(cls): def test_angle_funs_pass(self): """Test functions used by random walk code""" - self.assertAlmostEqual(tc_synth._get_bearing_angle(np.array([15, 20]), - np.array([0, 0]))[0], 90.0) - self.assertAlmostEqual(tc_synth._get_bearing_angle(np.array([20, 20]), - np.array([0, 5]))[0], 0.0) - self.assertAlmostEqual(tc_synth._get_bearing_angle(np.array([0, 0.00001]), - np.array([0, 0.00001]))[0], 45) + self.assertAlmostEqual( + tc_synth._get_bearing_angle(np.array([15, 20]), np.array([0, 0]))[0], 90.0 + ) + self.assertAlmostEqual( + tc_synth._get_bearing_angle(np.array([20, 20]), np.array([0, 5]))[0], 0.0 + ) + self.assertAlmostEqual( + tc_synth._get_bearing_angle(np.array([0, 0.00001]), np.array([0, 0.00001]))[ + 0 + ], + 45, + ) pt_north = tc_synth._get_destination_points(0, 0, 0, 1) self.assertAlmostEqual(pt_north[0], 0.0) self.assertAlmostEqual(pt_north[1], 1.0) pt_west = tc_synth._get_destination_points(0, 0, -90, 3) self.assertAlmostEqual(pt_west[0], -3.0) self.assertAlmostEqual(pt_west[1], 0.0) - pt_test = tc_synth._get_destination_points(8.523224, 47.371102, - 151.14161003, 52.80812463) + pt_test = tc_synth._get_destination_points( + 8.523224, 47.371102, 151.14161003, 52.80812463 + ) self.assertAlmostEqual(pt_test[0], 31.144113) self.assertAlmostEqual(pt_test[1], -1.590347) def test_random_no_landfall_pass(self): """Test calc_perturbed_trajectories with decay and no historical tracks with landfall""" tc_track = tc.TCTracks.from_processed_ibtracs_csv(TEST_TRACK_SHORT) - expected_warning = 'only %s historical tracks were provided. ' % len(tc_track.data) - with self.assertLogs('climada.hazard.tc_tracks_synth', level='INFO') as cm: + expected_warning = "only %s historical tracks were provided. " % len( + tc_track.data + ) + with self.assertLogs("climada.hazard.tc_tracks_synth", level="INFO") as cm: tc_track.calc_perturbed_trajectories(use_global_decay_params=False) self.assertIn(expected_warning, cm.output[1]) - self.assertIn('No historical track with landfall.', cm.output[2]) + self.assertIn("No historical track with landfall.", cm.output[2]) def test_random_walk_ref_pass(self): """Test against MATLAB reference.""" tc_track = tc.TCTracks.from_processed_ibtracs_csv(TEST_TRACK_SHORT) nb_synth_tracks = 2 - tc_track.calc_perturbed_trajectories(nb_synth_tracks=nb_synth_tracks, seed=25, decay=False) + tc_track.calc_perturbed_trajectories( + nb_synth_tracks=nb_synth_tracks, seed=25, decay=False + ) self.assertEqual(len(tc_track.data), nb_synth_tracks + 1) - self.assertFalse(tc_track.data[1].attrs['orig_event_flag']) - self.assertEqual(tc_track.data[1].attrs['name'], '1951239N12334_gen1') - self.assertEqual(tc_track.data[1].attrs['id_no'], 1.951239012334010e+12) - self.assertAlmostEqual(tc_track.data[1]['lon'][0].values, -25.0448138) - self.assertAlmostEqual(tc_track.data[1]['lon'][1].values, -25.74439739) - self.assertAlmostEqual(tc_track.data[1]['lon'][2].values, -26.54491644) - self.assertAlmostEqual(tc_track.data[1]['lon'][3].values, -27.73156829) - self.assertAlmostEqual(tc_track.data[1]['lon'][4].values, -28.63175987) - self.assertAlmostEqual(tc_track.data[1]['lon'][8].values, -34.05293373) - - self.assertAlmostEqual(tc_track.data[1]['lat'][0].values, 11.96825841) - self.assertAlmostEqual(tc_track.data[1]['lat'][4].values, 11.86769405) - self.assertAlmostEqual(tc_track.data[1]['lat'][5].values, 11.84378139) - self.assertAlmostEqual(tc_track.data[1]['lat'][6].values, 11.85957282) - self.assertAlmostEqual(tc_track.data[1]['lat'][7].values, 11.84555291) - self.assertAlmostEqual(tc_track.data[1]['lat'][8].values, 11.8065998) - - self.assertFalse(tc_track.data[2].attrs['orig_event_flag']) - self.assertEqual(tc_track.data[2].attrs['name'], '1951239N12334_gen2') - self.assertAlmostEqual(tc_track.data[2].attrs['id_no'], 1.951239012334020e+12) - self.assertAlmostEqual(tc_track.data[2]['lon'][0].values, -25.47658461) - self.assertAlmostEqual(tc_track.data[2]['lon'][3].values, -28.08465841) - self.assertAlmostEqual(tc_track.data[2]['lon'][4].values, -28.85901852) - self.assertAlmostEqual(tc_track.data[2]['lon'][8].values, -33.62144837) - - self.assertAlmostEqual(tc_track.data[2]['lat'][0].values, 11.82886685) - self.assertAlmostEqual(tc_track.data[2]['lat'][6].values, 11.71068012) - self.assertAlmostEqual(tc_track.data[2]['lat'][7].values, 11.69832976) - self.assertAlmostEqual(tc_track.data[2]['lat'][8].values, 11.64145734) + self.assertFalse(tc_track.data[1].attrs["orig_event_flag"]) + self.assertEqual(tc_track.data[1].attrs["name"], "1951239N12334_gen1") + self.assertEqual(tc_track.data[1].attrs["id_no"], 1.951239012334010e12) + self.assertAlmostEqual(tc_track.data[1]["lon"][0].values, -25.0448138) + self.assertAlmostEqual(tc_track.data[1]["lon"][1].values, -25.74439739) + self.assertAlmostEqual(tc_track.data[1]["lon"][2].values, -26.54491644) + self.assertAlmostEqual(tc_track.data[1]["lon"][3].values, -27.73156829) + self.assertAlmostEqual(tc_track.data[1]["lon"][4].values, -28.63175987) + self.assertAlmostEqual(tc_track.data[1]["lon"][8].values, -34.05293373) + + self.assertAlmostEqual(tc_track.data[1]["lat"][0].values, 11.96825841) + self.assertAlmostEqual(tc_track.data[1]["lat"][4].values, 11.86769405) + self.assertAlmostEqual(tc_track.data[1]["lat"][5].values, 11.84378139) + self.assertAlmostEqual(tc_track.data[1]["lat"][6].values, 11.85957282) + self.assertAlmostEqual(tc_track.data[1]["lat"][7].values, 11.84555291) + self.assertAlmostEqual(tc_track.data[1]["lat"][8].values, 11.8065998) + + self.assertFalse(tc_track.data[2].attrs["orig_event_flag"]) + self.assertEqual(tc_track.data[2].attrs["name"], "1951239N12334_gen2") + self.assertAlmostEqual(tc_track.data[2].attrs["id_no"], 1.951239012334020e12) + self.assertAlmostEqual(tc_track.data[2]["lon"][0].values, -25.47658461) + self.assertAlmostEqual(tc_track.data[2]["lon"][3].values, -28.08465841) + self.assertAlmostEqual(tc_track.data[2]["lon"][4].values, -28.85901852) + self.assertAlmostEqual(tc_track.data[2]["lon"][8].values, -33.62144837) + + self.assertAlmostEqual(tc_track.data[2]["lat"][0].values, 11.82886685) + self.assertAlmostEqual(tc_track.data[2]["lat"][6].values, 11.71068012) + self.assertAlmostEqual(tc_track.data[2]["lat"][7].values, 11.69832976) + self.assertAlmostEqual(tc_track.data[2]["lat"][8].values, 11.64145734) def test_random_walk_decay_pass(self): """Test land decay is called from calc_perturbed_trajectories.""" @@ -554,63 +841,99 @@ def test_random_walk_decay_pass(self): tc_track = tc.TCTracks.from_processed_ibtracs_csv(TC_ANDREW_FL) nb_synth_tracks = 2 # should work if using global parameters - with self.assertLogs('climada.hazard.tc_tracks_synth', level='DEBUG') as cm0: - tc_track.calc_perturbed_trajectories(nb_synth_tracks=nb_synth_tracks, seed=25, decay=True, - use_global_decay_params=True) + with self.assertLogs("climada.hazard.tc_tracks_synth", level="DEBUG") as cm0: + tc_track.calc_perturbed_trajectories( + nb_synth_tracks=nb_synth_tracks, + seed=25, + decay=True, + use_global_decay_params=True, + ) self.assertEqual(len(cm0), 2) self.assertEqual(tc_track.size, 3) # but alert the user otherwise tc_track = tc.TCTracks.from_processed_ibtracs_csv(TC_ANDREW_FL) - with self.assertLogs('climada.hazard.tc_tracks_synth', level='DEBUG') as cm: - tc_track.calc_perturbed_trajectories(nb_synth_tracks=nb_synth_tracks, seed=25, decay=True, - use_global_decay_params=False) - self.assertIn('No historical track of category Tropical Depression ' - 'with landfall.', cm.output[2]) - self.assertIn('Decay parameters from category Hurricane Cat. 4 taken.', - cm.output[3]) - self.assertIn('No historical track of category Hurricane Cat. 1 with ' - 'landfall.', cm.output[4]) - self.assertIn('Decay parameters from category Hurricane Cat. 4 taken.', - cm.output[5]) - self.assertIn('No historical track of category Hurricane Cat. 3 with ' - 'landfall. Decay parameters from category Hurricane Cat. ' - '4 taken.', cm.output[6]) - self.assertIn('No historical track of category Hurricane Cat. 5 with ' - 'landfall.', cm.output[7]) + with self.assertLogs("climada.hazard.tc_tracks_synth", level="DEBUG") as cm: + tc_track.calc_perturbed_trajectories( + nb_synth_tracks=nb_synth_tracks, + seed=25, + decay=True, + use_global_decay_params=False, + ) + self.assertIn( + "No historical track of category Tropical Depression " "with landfall.", + cm.output[2], + ) + self.assertIn( + "Decay parameters from category Hurricane Cat. 4 taken.", cm.output[3] + ) + self.assertIn( + "No historical track of category Hurricane Cat. 1 with " "landfall.", + cm.output[4], + ) + self.assertIn( + "Decay parameters from category Hurricane Cat. 4 taken.", cm.output[5] + ) + self.assertIn( + "No historical track of category Hurricane Cat. 3 with " + "landfall. Decay parameters from category Hurricane Cat. " + "4 taken.", + cm.output[6], + ) + self.assertIn( + "No historical track of category Hurricane Cat. 5 with " "landfall.", + cm.output[7], + ) def test_random_walk_identical_pass(self): """Test 0 perturbation leads to identical tracks.""" tc_track = tc.TCTracks.from_processed_ibtracs_csv(TC_ANDREW_FL) nb_synth_tracks = 2 - tc_track.calc_perturbed_trajectories(nb_synth_tracks=nb_synth_tracks, - max_shift_ini=0, max_dspeed_rel=0, max_ddirection=0, decay=False) + tc_track.calc_perturbed_trajectories( + nb_synth_tracks=nb_synth_tracks, + max_shift_ini=0, + max_dspeed_rel=0, + max_ddirection=0, + decay=False, + ) orig_track = tc_track.data[0] for syn_track in tc_track.data[1:]: - np.testing.assert_allclose(orig_track['lon'].values, syn_track['lon'].values, atol=1e-4) - np.testing.assert_allclose(orig_track['lat'].values, syn_track['lat'].values, atol=1e-4) - for varname in ["time", "time_step", "radius_max_wind", "max_sustained_wind", - "central_pressure", "environmental_pressure"]: - np.testing.assert_array_equal(orig_track[varname].values, - syn_track[varname].values) + np.testing.assert_allclose( + orig_track["lon"].values, syn_track["lon"].values, atol=1e-4 + ) + np.testing.assert_allclose( + orig_track["lat"].values, syn_track["lat"].values, atol=1e-4 + ) + for varname in [ + "time", + "time_step", + "radius_max_wind", + "max_sustained_wind", + "central_pressure", + "environmental_pressure", + ]: + np.testing.assert_array_equal( + orig_track[varname].values, syn_track[varname].values + ) def test_random_walk_single_point(self): found = False for year in range(1951, 1981): - tc_track = tc.TCTracks.from_ibtracs_netcdf(provider='usa', - year_range=(year,year), - discard_single_points=False) - singlept = np.where([x['time'].size == 1 for x in tc_track.data])[0] + tc_track = tc.TCTracks.from_ibtracs_netcdf( + provider="usa", year_range=(year, year), discard_single_points=False + ) + singlept = np.where([x["time"].size == 1 for x in tc_track.data])[0] found = len(singlept) > 0 if found: # found a case with a single-point track, keep max three tracks for efficiency - tc_track.data = tc_track.data[max(0, singlept[0]-1):singlept[0]+2] + tc_track.data = tc_track.data[max(0, singlept[0] - 1) : singlept[0] + 2] n_tr = tc_track.size tc_track.equal_timestep() tc_track.calc_perturbed_trajectories(nb_synth_tracks=2) - self.assertEqual((2+1)*n_tr, tc_track.size) + self.assertEqual((2 + 1) * n_tr, tc_track.size) break self.assertTrue(found) + # Execute Tests if __name__ == "__main__": TESTS = unittest.TestLoader().loadTestsFromTestCase(TestDecay) diff --git a/climada/hazard/test/test_trop_cyclone.py b/climada/hazard/test/test_trop_cyclone.py index 9141778ae..9996becc3 100644 --- a/climada/hazard/test/test_trop_cyclone.py +++ b/climada/hazard/test/test_trop_cyclone.py @@ -20,30 +20,30 @@ """ import datetime as dt +import unittest from pathlib import Path from tempfile import TemporaryDirectory -import unittest import numpy as np from scipy import sparse import climada.hazard.test as hazard_test -from climada.util import ureg -from climada.test import get_test_file -from climada.hazard.tc_tracks import TCTracks -from climada.hazard.tc_clim_change import get_knutson_scaling_factor -from climada.hazard.trop_cyclone.trop_cyclone import ( - TropCyclone, ) from climada.hazard.centroids.centr import Centroids +from climada.hazard.tc_clim_change import get_knutson_scaling_factor +from climada.hazard.tc_tracks import TCTracks from climada.hazard.test import download_ibtracs +from climada.hazard.trop_cyclone.trop_cyclone import TropCyclone +from climada.test import get_test_file +from climada.util import ureg - -DATA_DIR = Path(hazard_test.__file__).parent.joinpath('data') +DATA_DIR = Path(hazard_test.__file__).parent.joinpath("data") TEST_TRACK = DATA_DIR.joinpath("trac_brb_test.csv") TEST_TRACK_SHORT = DATA_DIR.joinpath("trac_short_test.csv") -CENTR_TEST_BRB = Centroids.from_hdf5(get_test_file('centr_test_brb', file_format='hdf5')) +CENTR_TEST_BRB = Centroids.from_hdf5( + get_test_file("centr_test_brb", file_format="hdf5") +) class TestReader(unittest.TestCase): @@ -62,11 +62,23 @@ def test_memory_limit(self): # This should not affect the results. In practice, chunking is not applied due to limited # memory, but due to very high spatial/temporal resolution of the centroids/tracks. We # simulate this situation by artificially reducing the available memory. - tc_haz = TropCyclone.from_tracks(tc_track, centroids=CENTR_TEST_BRB, max_memory_gb=0.001) - intensity_idx = [0, 1, 2, 3, 80, 100, 120, 200, 220, 250, 260, 295] + tc_haz = TropCyclone.from_tracks( + tc_track, centroids=CENTR_TEST_BRB, max_memory_gb=0.001 + ) + intensity_idx = [0, 1, 2, 3, 80, 100, 120, 200, 220, 250, 260, 295] intensity_values = [ - 22.74903, 23.784691, 24.82255, 22.67403, 27.218706, 30.593959, - 18.980878, 24.540069, 27.826407, 26.846293, 0., 34.568898, + 22.74903, + 23.784691, + 24.82255, + 22.67403, + 27.218706, + 30.593959, + 18.980878, + 24.540069, + 27.826407, + 26.846293, + 0.0, + 34.568898, ] np.testing.assert_array_almost_equal( @@ -76,16 +88,36 @@ def test_memory_limit(self): def test_set_one_pass(self): """Test _tc_from_track function.""" - intensity_idx = [0, 1, 2, 3, 80, 100, 120, 200, 220, 250, 260, 295] + intensity_idx = [0, 1, 2, 3, 80, 100, 120, 200, 220, 250, 260, 295] intensity_values = { "geosphere": [ - 22.74927, 23.78498, 24.822908, 22.674202, 27.220042, 30.602122, - 18.981022, 24.540138, 27.830925, 26.8489, 0., 34.572391, + 22.74927, + 23.78498, + 24.822908, + 22.674202, + 27.220042, + 30.602122, + 18.981022, + 24.540138, + 27.830925, + 26.8489, + 0.0, + 34.572391, ], "equirect": [ - 22.74903, 23.784691, 24.82255, 22.67403, 27.218706, 30.593959, - 18.980878, 24.540069, 27.826407, 26.846293, 0., 34.568898, - ] + 22.74903, + 23.784691, + 24.82255, + 22.67403, + 27.218706, + 30.593959, + 18.980878, + 24.540069, + 27.826407, + 26.846293, + 0.0, + 34.568898, + ], } # the values for the two metrics should agree up to first digit at least for i, val in enumerate(intensity_values["geosphere"]): @@ -96,11 +128,16 @@ def test_set_one_pass(self): tc_track.data = tc_track.data[:1] for metric in ["equirect", "geosphere"]: - tc_haz = TropCyclone.from_tracks(tc_track, centroids=CENTR_TEST_BRB, model='H08', - store_windfields=True, metric=metric) + tc_haz = TropCyclone.from_tracks( + tc_track, + centroids=CENTR_TEST_BRB, + model="H08", + store_windfields=True, + metric=metric, + ) - self.assertEqual(tc_haz.haz_type, 'TC') - self.assertEqual(tc_haz.units, 'm/s') + self.assertEqual(tc_haz.haz_type, "TC") + self.assertEqual(tc_haz.units, "m/s") self.assertEqual(tc_haz.centroids.size, 296) self.assertEqual(tc_haz.event_id.size, 1) self.assertEqual(tc_haz.date.size, 1) @@ -108,7 +145,7 @@ def test_set_one_pass(self): self.assertEqual(dt.datetime.fromordinal(tc_haz.date[0]).month, 8) self.assertEqual(dt.datetime.fromordinal(tc_haz.date[0]).day, 27) self.assertEqual(tc_haz.event_id[0], 1) - self.assertEqual(tc_haz.event_name, ['1951239N12334']) + self.assertEqual(tc_haz.event_name, ["1951239N12334"]) self.assertTrue(np.array_equal(tc_haz.frequency, np.array([1]))) self.assertTrue(isinstance(tc_haz.fraction, sparse.csr_matrix)) self.assertEqual(tc_haz.fraction.shape, (1, 296)) @@ -119,7 +156,9 @@ def test_set_one_pass(self): self.assertEqual(np.nonzero(tc_haz.intensity)[0].size, 255) np.testing.assert_array_almost_equal( - tc_haz.intensity[0, intensity_idx].toarray()[0], intensity_values[metric]) + tc_haz.intensity[0, intensity_idx].toarray()[0], + intensity_values[metric], + ) for idx, val in zip(intensity_idx, intensity_values[metric]): if val == 0: self.assertEqual(tc_haz.intensity[0, idx], 0) @@ -128,7 +167,7 @@ def test_set_one_pass(self): windfields = windfields.reshape(windfields.shape[0], -1, 2) windfield_norms = np.linalg.norm(windfields, axis=-1).max(axis=0) intensity = tc_haz.intensity.toarray()[0, :] - msk = (intensity > 0) + msk = intensity > 0 np.testing.assert_array_equal(windfield_norms[msk], intensity[msk]) def test_cross_antimeridian(self): @@ -152,38 +191,136 @@ def test_cross_antimeridian(self): def test_windfield_models(self): """Test _tc_from_track function with different wind field models.""" - intensity_idx = [0, 1, 2, 3, 80, 100, 120, 200, 220, 250, 260, 295] + intensity_idx = [0, 1, 2, 3, 80, 100, 120, 200, 220, 250, 260, 295] intensity_values = [ - ("H08", None, [ - 22.74903, 23.784691, 24.82255, 22.67403, 27.218706, 30.593959, - 18.980878, 24.540069, 27.826407, 26.846293, 0., 34.568898, - ]), - ("H10", None, [ - 24.745521, 25.596484, 26.475329, 24.690914, 28.650107, 31.584395, - 21.723546, 26.140293, 28.94964, 28.051915, 18.49378, 35.312152, - ]), - # The following model configurations use recorded wind speeds, while the above use - # pressure values only. That's why some of the values are so different. - ("H10", dict(vmax_from_cen=False, rho_air_const=1.2), [ - 23.702232, 24.327615, 24.947161, 23.589233, 26.616085, 29.389295, - 21.338178, 24.257067, 26.472543, 25.662313, 18.535842, 31.886041, - ]), - ("H10", dict(vmax_from_cen=False, rho_air_const=None), [ - 24.244162, 24.835561, 25.432454, 24.139294, 27.127457, 29.719196, - 21.910658, 24.692637, 26.783575, 25.971516, 19.005555, 31.904048, - ]), - ("H10", dict(vmax_from_cen=False, rho_air_const=None, vmax_in_brackets=True), [ - 23.592924, 24.208169, 24.817104, 23.483053, 26.468975, 29.221715, - 21.260867, 24.150879, 26.34288 , 25.543635, 18.487385, 31.904048 - ]), - ("H1980", None, [ - 21.376807, 21.957217, 22.569568, 21.284351, 24.254226, 26.971303, - 19.220149, 21.984516, 24.196388, 23.449116, 0, 31.550207, - ]), - ("ER11", None, [ - 23.565332, 24.931413, 26.360758, 23.490333, 29.601171, 34.522795, - 18.996389, 26.102109, 30.780737, 29.498453, 0, 38.368805, - ]), + ( + "H08", + None, + [ + 22.74903, + 23.784691, + 24.82255, + 22.67403, + 27.218706, + 30.593959, + 18.980878, + 24.540069, + 27.826407, + 26.846293, + 0.0, + 34.568898, + ], + ), + ( + "H10", + None, + [ + 24.745521, + 25.596484, + 26.475329, + 24.690914, + 28.650107, + 31.584395, + 21.723546, + 26.140293, + 28.94964, + 28.051915, + 18.49378, + 35.312152, + ], + ), + # The following model configurations use recorded wind speeds, while the above use + # pressure values only. That's why some of the values are so different. + ( + "H10", + dict(vmax_from_cen=False, rho_air_const=1.2), + [ + 23.702232, + 24.327615, + 24.947161, + 23.589233, + 26.616085, + 29.389295, + 21.338178, + 24.257067, + 26.472543, + 25.662313, + 18.535842, + 31.886041, + ], + ), + ( + "H10", + dict(vmax_from_cen=False, rho_air_const=None), + [ + 24.244162, + 24.835561, + 25.432454, + 24.139294, + 27.127457, + 29.719196, + 21.910658, + 24.692637, + 26.783575, + 25.971516, + 19.005555, + 31.904048, + ], + ), + ( + "H10", + dict(vmax_from_cen=False, rho_air_const=None, vmax_in_brackets=True), + [ + 23.592924, + 24.208169, + 24.817104, + 23.483053, + 26.468975, + 29.221715, + 21.260867, + 24.150879, + 26.34288, + 25.543635, + 18.487385, + 31.904048, + ], + ), + ( + "H1980", + None, + [ + 21.376807, + 21.957217, + 22.569568, + 21.284351, + 24.254226, + 26.971303, + 19.220149, + 21.984516, + 24.196388, + 23.449116, + 0, + 31.550207, + ], + ), + ( + "ER11", + None, + [ + 23.565332, + 24.931413, + 26.360758, + 23.490333, + 29.601171, + 34.522795, + 18.996389, + 26.102109, + 30.780737, + 29.498453, + 0, + 38.368805, + ], + ), ] tc_track = TCTracks.from_processed_ibtracs_csv(TEST_TRACK) @@ -192,10 +329,14 @@ def test_windfield_models(self): for model, model_kwargs, inten_ref in intensity_values: tc_haz = TropCyclone.from_tracks( - tc_track, centroids=CENTR_TEST_BRB, model=model, model_kwargs=model_kwargs, + tc_track, + centroids=CENTR_TEST_BRB, + model=model, + model_kwargs=model_kwargs, ) np.testing.assert_array_almost_equal( - tc_haz.intensity[0, intensity_idx].toarray()[0], inten_ref, + tc_haz.intensity[0, intensity_idx].toarray()[0], + inten_ref, ) for idx, val in zip(intensity_idx, inten_ref): if val == 0: @@ -205,18 +346,38 @@ def test_windfield_models_different_windunits(self): """ Test _tc_from_track function should calculate the same results or raise ValueError with different windspeed units. - """ - intensity_idx = [0, 1, 2, 3, 80, 100, 120, 200, 220, 250, 260, 295] + """ + intensity_idx = [0, 1, 2, 3, 80, 100, 120, 200, 220, 250, 260, 295] intensity_values = { # Holland 1980 and Emanuel & Rotunno 2011 use recorded wind speeds, that is why checking them for different # windspeed units is so important: "H1980": [ - 21.376807, 21.957217, 22.569568, 21.284351, 24.254226, 26.971303, - 19.220149, 21.984516, 24.196388, 23.449116, 0, 31.550207, + 21.376807, + 21.957217, + 22.569568, + 21.284351, + 24.254226, + 26.971303, + 19.220149, + 21.984516, + 24.196388, + 23.449116, + 0, + 31.550207, ], "ER11": [ - 23.565332, 24.931413, 26.360758, 23.490333, 29.601171, 34.522795, - 18.996389, 26.102109, 30.780737, 29.498453, 0, 38.368805, + 23.565332, + 24.931413, + 26.360758, + 23.490333, + 29.601171, + 34.522795, + 18.996389, + 26.102109, + 30.780737, + 29.498453, + 0, + 38.368805, ], } @@ -225,27 +386,31 @@ def test_windfield_models_different_windunits(self): tc_track.data = tc_track.data[:1] tc_track_kmph = TCTracks(data=[ds.copy(deep=True) for ds in tc_track.data]) - tc_track_kmph.data[0]['max_sustained_wind'] *= ( + tc_track_kmph.data[0]["max_sustained_wind"] *= ( (1.0 * ureg.knot).to(ureg.km / ureg.hour).magnitude ) - tc_track_kmph.data[0].attrs['max_sustained_wind_unit'] = 'km/h' + tc_track_kmph.data[0].attrs["max_sustained_wind_unit"] = "km/h" tc_track_mps = TCTracks(data=[ds.copy(deep=True) for ds in tc_track.data]) - tc_track_mps.data[0]['max_sustained_wind'] *= ( + tc_track_mps.data[0]["max_sustained_wind"] *= ( (1.0 * ureg.knot).to(ureg.meter / ureg.second).magnitude ) - tc_track_mps.data[0].attrs['max_sustained_wind_unit'] = 'm/s' + tc_track_mps.data[0].attrs["max_sustained_wind_unit"] = "m/s" for model in ["H1980", "ER11"]: for tc_track_i in [tc_track_kmph, tc_track_mps]: - tc_haz = TropCyclone.from_tracks(tc_track_i, centroids=CENTR_TEST_BRB, model=model) + tc_haz = TropCyclone.from_tracks( + tc_track_i, centroids=CENTR_TEST_BRB, model=model + ) np.testing.assert_array_almost_equal( - tc_haz.intensity[0, intensity_idx].toarray()[0], intensity_values[model]) + tc_haz.intensity[0, intensity_idx].toarray()[0], + intensity_values[model], + ) for idx, val in zip(intensity_idx, intensity_values[model]): if val == 0: self.assertEqual(tc_haz.intensity[0, idx], 0) - tc_track.data[0].attrs['max_sustained_wind_unit'] = 'elbows/fortnight' + tc_track.data[0].attrs["max_sustained_wind_unit"] = "elbows/fortnight" with self.assertRaises(ValueError): TropCyclone.from_tracks(tc_track, centroids=CENTR_TEST_BRB, model=model) @@ -255,14 +420,14 @@ def test_set_one_file_pass(self): tc_haz = TropCyclone.from_tracks(tc_track, centroids=CENTR_TEST_BRB) tc_haz.check() - self.assertEqual(tc_haz.haz_type, 'TC') - self.assertEqual(tc_haz.units, 'm/s') + self.assertEqual(tc_haz.haz_type, "TC") + self.assertEqual(tc_haz.units, "m/s") self.assertEqual(tc_haz.centroids.size, 296) self.assertEqual(tc_haz.event_id.size, 1) self.assertEqual(tc_haz.event_id[0], 1) - self.assertEqual(tc_haz.event_name, ['1951239N12334']) + self.assertEqual(tc_haz.event_name, ["1951239N12334"]) self.assertEqual(tc_haz.category, tc_track.data[0].category) - self.assertEqual(tc_haz.basin[0], 'NA') + self.assertEqual(tc_haz.basin[0], "NA") self.assertIsInstance(tc_haz.basin, list) self.assertIsInstance(tc_haz.category, np.ndarray) self.assertTrue(np.array_equal(tc_haz.frequency, np.array([1]))) @@ -276,17 +441,19 @@ def test_set_one_file_pass(self): def test_two_files_pass(self): """Test from_tracks with two ibtracs.""" - tc_track = TCTracks.from_processed_ibtracs_csv([TEST_TRACK_SHORT, TEST_TRACK_SHORT]) + tc_track = TCTracks.from_processed_ibtracs_csv( + [TEST_TRACK_SHORT, TEST_TRACK_SHORT] + ) tc_haz = TropCyclone.from_tracks(tc_track, centroids=CENTR_TEST_BRB) tc_haz.remove_duplicates() tc_haz.check() - self.assertEqual(tc_haz.haz_type, 'TC') - self.assertEqual(tc_haz.units, 'm/s') + self.assertEqual(tc_haz.haz_type, "TC") + self.assertEqual(tc_haz.units, "m/s") self.assertEqual(tc_haz.centroids.size, 296) self.assertEqual(tc_haz.event_id.size, 1) self.assertEqual(tc_haz.event_id[0], 1) - self.assertEqual(tc_haz.event_name, ['1951239N12334']) + self.assertEqual(tc_haz.event_name, ["1951239N12334"]) self.assertTrue(np.array_equal(tc_haz.frequency, np.array([1]))) self.assertTrue(np.array_equal(tc_haz.orig, np.array([True]))) self.assertTrue(isinstance(tc_haz.intensity, sparse.csr_matrix)) @@ -310,11 +477,11 @@ def create_tc(self): self.tc = TropCyclone( intensity=sparse.csr_matrix(intensity), - basin=['NA', 'NA', 'NA', 'WP'], + basin=["NA", "NA", "NA", "WP"], category=np.array([2, 0, 4, 1]), event_id=np.arange(intensity.shape[0]), - frequency=np.repeat(1./intensity.shape[0], intensity.shape[0]), - date=np.array([723795, 728395, 738395, 724395]) + frequency=np.repeat(1.0 / intensity.shape[0], intensity.shape[0]), + date=np.array([723795, 728395, 738395, 724395]), ) def test_apply_climate_scenario_knu_calculations(self): @@ -324,51 +491,58 @@ def test_apply_climate_scenario_knu_calculations(self): self.create_tc() cat05_sel = np.repeat(True, self.tc.category.shape[0]) - cat03_sel = np.array([cat in [0,1,2,3] for cat in self.tc.category]) - cat45_sel = np.array([cat in [4,5] for cat in self.tc.category]) + cat03_sel = np.array([cat in [0, 1, 2, 3] for cat in self.tc.category]) + cat45_sel = np.array([cat in [4, 5] for cat in self.tc.category]) ## Retrieve scaling factors for cat 4 to 5 and 0 to 5 - percentile = '50' + percentile = "50" target_year = 2035 - rcp = '8.5' + rcp = "8.5" - future_tc = self.tc.apply_climate_scenario_knu(percentile=percentile, - scenario=rcp, - target_year=target_year) + future_tc = self.tc.apply_climate_scenario_knu( + percentile=percentile, scenario=rcp, target_year=target_year + ) for basin in np.unique(self.tc.basin): - basin_sel = np.array(self.tc.basin)==basin + basin_sel = np.array(self.tc.basin) == basin scaling_05, scaling_45 = [ - get_knutson_scaling_factor(percentile=percentile, - variable=variable, - basin=basin).loc[target_year, rcp] - for variable in ['cat05', 'cat45'] - ] + get_knutson_scaling_factor( + percentile=percentile, variable=variable, basin=basin + ).loc[target_year, rcp] + for variable in ["cat05", "cat45"] + ] ## Calulate scaling factors for cat 0 to 3 - freq_weighted_scaling_05 = scaling_05 * np.sum(self.tc.frequency[cat05_sel & basin_sel]) - freq_weighted_scaling_45 = scaling_45 * np.sum(self.tc.frequency[cat45_sel & basin_sel]) + freq_weighted_scaling_05 = scaling_05 * np.sum( + self.tc.frequency[cat05_sel & basin_sel] + ) + freq_weighted_scaling_45 = scaling_45 * np.sum( + self.tc.frequency[cat45_sel & basin_sel] + ) freq_sum_03 = np.sum(self.tc.frequency[cat03_sel & basin_sel]) - scaling_03 = (freq_weighted_scaling_05 - freq_weighted_scaling_45) / freq_sum_03 + scaling_03 = ( + freq_weighted_scaling_05 - freq_weighted_scaling_45 + ) / freq_sum_03 ## Check that frequencies obtained by function are the same as those obtained by scaling ## historic frequencies with retrieved scaling factors np.testing.assert_array_equal( - self.tc.frequency[cat03_sel & basin_sel] * (1 + scaling_03/100), - future_tc.frequency[cat03_sel & basin_sel] - ) + self.tc.frequency[cat03_sel & basin_sel] * (1 + scaling_03 / 100), + future_tc.frequency[cat03_sel & basin_sel], + ) np.testing.assert_array_equal( - self.tc.frequency[cat45_sel & basin_sel] * (1 + scaling_45/100), - future_tc.frequency[cat45_sel & basin_sel] - ) + self.tc.frequency[cat45_sel & basin_sel] * (1 + scaling_45 / 100), + future_tc.frequency[cat45_sel & basin_sel], + ) def test_apply_climate_scenario_knu_target_year_out_of_range(self): self.create_tc() with self.assertRaises(KeyError): self.tc.apply_climate_scenario_knu(target_year=2200) + class TestDumpReloadCycle(unittest.TestCase): def setUp(self): """Create a TropCyclone object and a temporary directory""" diff --git a/climada/hazard/test/test_trop_cyclone_windfields.py b/climada/hazard/test/test_trop_cyclone_windfields.py index f91ac075a..418e52867 100644 --- a/climada/hazard/test/test_trop_cyclone_windfields.py +++ b/climada/hazard/test/test_trop_cyclone_windfields.py @@ -5,9 +5,21 @@ from climada.hazard import TCTracks from climada.hazard.test.test_trop_cyclone import TEST_TRACK, TEST_TRACK_SHORT -from climada.hazard.trop_cyclone.trop_cyclone_windfields import get_close_centroids, MBAR_TO_PA, _B_holland_1980, H_TO_S, \ - _bs_holland_2008, _v_max_s_holland_2008, KM_TO_M, _x_holland_2010, _stat_holland_2010, _stat_holland_1980, \ - _stat_er_2011, tctrack_to_si, _vtrans +from climada.hazard.trop_cyclone.trop_cyclone_windfields import ( + H_TO_S, + KM_TO_M, + MBAR_TO_PA, + _B_holland_1980, + _bs_holland_2008, + _stat_er_2011, + _stat_holland_1980, + _stat_holland_2010, + _v_max_s_holland_2008, + _vtrans, + _x_holland_2010, + get_close_centroids, + tctrack_to_si, +) from climada.util import ureg @@ -16,89 +28,129 @@ class TestWindfieldHelpers(unittest.TestCase): def test_get_close_centroids_pass(self): """Test get_close_centroids function.""" - si_track = xr.Dataset({ - "lat": ("time", np.array([0, -0.5, 0])), - "lon": ("time", np.array([0.9, 2, 3.2])), - }, attrs={"mid_lon": 0.0}) - centroids = np.array([ - [0, -0.2], [0, 0.9], [-1.1, 1.2], [1, 2.1], [0, 4.3], [0.6, 3.8], [0.9, 4.1], - ]) - centroids_close, mask_close, mask_close_alongtrack = ( - get_close_centroids(si_track, centroids, 112.0) + si_track = xr.Dataset( + { + "lat": ("time", np.array([0, -0.5, 0])), + "lon": ("time", np.array([0.9, 2, 3.2])), + }, + attrs={"mid_lon": 0.0}, + ) + centroids = np.array( + [ + [0, -0.2], + [0, 0.9], + [-1.1, 1.2], + [1, 2.1], + [0, 4.3], + [0.6, 3.8], + [0.9, 4.1], + ] + ) + centroids_close, mask_close, mask_close_alongtrack = get_close_centroids( + si_track, centroids, 112.0 ) self.assertEqual(centroids_close.shape[0], mask_close.sum()) self.assertEqual(mask_close_alongtrack.shape[0], si_track.sizes["time"]) self.assertEqual(mask_close_alongtrack.shape[1], centroids_close.shape[0]) np.testing.assert_equal(mask_close_alongtrack.any(axis=0), True) - np.testing.assert_equal(mask_close, np.array( - [False, True, True, False, False, True, False] - )) - np.testing.assert_equal(mask_close_alongtrack, np.array([ - [True, False, False], - [False, True, False], - [False, False, True], - ])) + np.testing.assert_equal( + mask_close, np.array([False, True, True, False, False, True, False]) + ) + np.testing.assert_equal( + mask_close_alongtrack, + np.array( + [ + [True, False, False], + [False, True, False], + [False, False, True], + ] + ), + ) np.testing.assert_equal(centroids_close, centroids[mask_close]) # example where antimeridian is crossed - si_track = xr.Dataset({ - "lat": ("time", np.linspace(-10, 10, 11)), - "lon": ("time", np.linspace(170, 200, 11)), - }, attrs={"mid_lon": 180.0}) + si_track = xr.Dataset( + { + "lat": ("time", np.linspace(-10, 10, 11)), + "lon": ("time", np.linspace(170, 200, 11)), + }, + attrs={"mid_lon": 180.0}, + ) centroids = np.array([[-11, 169], [-7, 176], [4, -170], [10, 170], [-10, -160]]) - centroids_close, mask_close, mask_close_alongtrack = ( - get_close_centroids(si_track, centroids, 600.0) + centroids_close, mask_close, mask_close_alongtrack = get_close_centroids( + si_track, centroids, 600.0 ) self.assertEqual(centroids_close.shape[0], mask_close.sum()) self.assertEqual(mask_close_alongtrack.shape[0], si_track.sizes["time"]) self.assertEqual(mask_close_alongtrack.shape[1], centroids_close.shape[0]) np.testing.assert_equal(mask_close_alongtrack.any(axis=0), True) np.testing.assert_equal(mask_close, np.array([True, True, True, False, False])) - np.testing.assert_equal(centroids_close, np.array([ - # the longitudinal coordinate of the third centroid is normalized - [-11, 169], [-7, 176], [4, 190], - ])) + np.testing.assert_equal( + centroids_close, + np.array( + [ + # the longitudinal coordinate of the third centroid is normalized + [-11, 169], + [-7, 176], + [4, 190], + ] + ), + ) def test_B_holland_1980_pass(self): """Test _B_holland_1980 function.""" - si_track = xr.Dataset({ - "pdelta": ("time", MBAR_TO_PA * np.array([15, 30])), - "vgrad": ("time", [35, 40]), - "rho_air": ("time", [1.15, 1.15]) - }) + si_track = xr.Dataset( + { + "pdelta": ("time", MBAR_TO_PA * np.array([15, 30])), + "vgrad": ("time", [35, 40]), + "rho_air": ("time", [1.15, 1.15]), + } + ) _B_holland_1980(si_track) np.testing.assert_array_almost_equal(si_track["hol_b"], [2.5, 1.667213]) - si_track = xr.Dataset({ - "pdelta": ("time", MBAR_TO_PA * np.array([4.74, 15, 30, 40])), - "vmax": ("time", [np.nan, 22.5, 25.4, 42.5]), - "rho_air": ("time", [1.2, 1.2, 1.2, 1.2]) - }) + si_track = xr.Dataset( + { + "pdelta": ("time", MBAR_TO_PA * np.array([4.74, 15, 30, 40])), + "vmax": ("time", [np.nan, 22.5, 25.4, 42.5]), + "rho_air": ("time", [1.2, 1.2, 1.2, 1.2]), + } + ) _B_holland_1980(si_track, gradient_to_surface_winds=0.9) - np.testing.assert_allclose(si_track["hol_b"], [np.nan, 1.101, 0.810, 1.473], atol=1e-3) + np.testing.assert_allclose( + si_track["hol_b"], [np.nan, 1.101, 0.810, 1.473], atol=1e-3 + ) def test_bs_holland_2008_pass(self): """Test _bs_holland_2008 function. Compare to MATLAB reference.""" - si_track = xr.Dataset({ - "tstep": ("time", H_TO_S * np.array([1.0, 1.0, 1.0])), - "lat": ("time", [12.299999504631234, 12.299999504631343, 12.299999279463769]), - "pdelta": ("time", MBAR_TO_PA * np.array([4.74, 4.73, 4.73])), - "cen": ("time", MBAR_TO_PA * np.array([1005.2585, 1005.2633, 1005.2682])), - "vtrans_norm": ("time", [np.nan, 5.241999541820597, 5.123882725120426]), - }) - _bs_holland_2008(si_track) - np.testing.assert_allclose( - si_track["hol_b"], [np.nan, 1.27, 1.27], atol=1e-2 + si_track = xr.Dataset( + { + "tstep": ("time", H_TO_S * np.array([1.0, 1.0, 1.0])), + "lat": ( + "time", + [12.299999504631234, 12.299999504631343, 12.299999279463769], + ), + "pdelta": ("time", MBAR_TO_PA * np.array([4.74, 4.73, 4.73])), + "cen": ( + "time", + MBAR_TO_PA * np.array([1005.2585, 1005.2633, 1005.2682]), + ), + "vtrans_norm": ("time", [np.nan, 5.241999541820597, 5.123882725120426]), + } ) + _bs_holland_2008(si_track) + np.testing.assert_allclose(si_track["hol_b"], [np.nan, 1.27, 1.27], atol=1e-2) def test_v_max_s_holland_2008_pass(self): """Test _v_max_s_holland_2008 function.""" # Numbers analogous to test_B_holland_1980_pass - si_track = xr.Dataset({ - "pdelta": ("time", MBAR_TO_PA * np.array([15, 30])), - "hol_b": ("time", [2.5, 1.67]), - "rho_air": ("time", [1.15, 1.15]), - }) + si_track = xr.Dataset( + { + "pdelta": ("time", MBAR_TO_PA * np.array([15, 30])), + "hol_b": ("time", [2.5, 1.67]), + "rho_air": ("time", [1.15, 1.15]), + } + ) _v_max_s_holland_2008(si_track) np.testing.assert_array_almost_equal(si_track["vmax"], [34.635341, 40.033421]) @@ -113,77 +165,110 @@ def test_holland_2010_pass(self): # peripheral wind speeds. # # The "hol_b" parameter tunes the meaning of a "comparably" large or small RMW. - si_track = xr.Dataset({ - # four test cases: - # - low vmax, moderate RMW: x decreases moderately - # - large hol_b: x decreases sharply - # - very low vmax: x decreases so much, it needs to be clipped at 0 - # - large vmax, large RMW: x increases - "rad": ("time", KM_TO_M * np.array([75, 75, 75, 90])), - "vmax": ("time", [35.0, 35.0, 16.0, 90.0]), - "hol_b": ("time", [1.75, 2.5, 1.9, 1.6]), - }) - d_centr = KM_TO_M * np.array([ - # first column is for locations within the storm eye - # second column is for locations at or close to the radius of max wind - # third column is for locations outside the storm eye - # fourth column is for locations exactly at the peripheral radius - # fifth column is for locations outside the peripheral radius - [0., 75, 220, 300, 490], - [30, 74, 170, 300, 501], - [21, 76, 230, 300, 431], - [32, 91, 270, 300, 452], - ], dtype=float) - close_centr = np.array([ - # note that we set one of these to "False" for testing - [True, True, True, True, True], - [True, True, True, True, False], - [True, True, True, True, True], - [True, True, True, True, True], - ], dtype=bool) + si_track = xr.Dataset( + { + # four test cases: + # - low vmax, moderate RMW: x decreases moderately + # - large hol_b: x decreases sharply + # - very low vmax: x decreases so much, it needs to be clipped at 0 + # - large vmax, large RMW: x increases + "rad": ("time", KM_TO_M * np.array([75, 75, 75, 90])), + "vmax": ("time", [35.0, 35.0, 16.0, 90.0]), + "hol_b": ("time", [1.75, 2.5, 1.9, 1.6]), + } + ) + d_centr = KM_TO_M * np.array( + [ + # first column is for locations within the storm eye + # second column is for locations at or close to the radius of max wind + # third column is for locations outside the storm eye + # fourth column is for locations exactly at the peripheral radius + # fifth column is for locations outside the peripheral radius + [0.0, 75, 220, 300, 490], + [30, 74, 170, 300, 501], + [21, 76, 230, 300, 431], + [32, 91, 270, 300, 452], + ], + dtype=float, + ) + close_centr = np.array( + [ + # note that we set one of these to "False" for testing + [True, True, True, True, True], + [True, True, True, True, False], + [True, True, True, True, True], + [True, True, True, True, True], + ], + dtype=bool, + ) hol_x = _x_holland_2010(si_track, d_centr, close_centr) - np.testing.assert_array_almost_equal(hol_x, [ - [0.5, 0.500000, 0.485077, 0.476844, 0.457291], - [0.5, 0.500000, 0.410997, 0.400000, 0.000000], - [0.5, 0.497620, 0.400000, 0.400000, 0.400000], - [0.5, 0.505022, 1.403952, 1.554611, 2.317948], - ]) + np.testing.assert_array_almost_equal( + hol_x, + [ + [0.5, 0.500000, 0.485077, 0.476844, 0.457291], + [0.5, 0.500000, 0.410997, 0.400000, 0.000000], + [0.5, 0.497620, 0.400000, 0.400000, 0.400000], + [0.5, 0.505022, 1.403952, 1.554611, 2.317948], + ], + ) v_ang_norm = _stat_holland_2010(si_track, d_centr, close_centr, hol_x) - np.testing.assert_allclose(v_ang_norm, [ - # first column: converge to 0 when approaching storm eye - # second column: vmax at RMW - # fourth column: peripheral speed (17 m/s) at peripheral radius (unless x is clipped!) - [ 0.000000, 35.000000, 21.181497, 17.000000, 12.1034610], - [ 1.296480, 34.990037, 21.593755, 12.891313, 0.0000000], - [ 0.321952, 15.997500, 9.712006, 8.087240, 6.2289690], - [24.823469, 89.992938, 24.381965, 17.000000, 1.9292020], - ], atol=1e-6) + np.testing.assert_allclose( + v_ang_norm, + [ + # first column: converge to 0 when approaching storm eye + # second column: vmax at RMW + # fourth column: peripheral speed (17 m/s) at peripheral radius (unless x is clipped!) + [0.000000, 35.000000, 21.181497, 17.000000, 12.1034610], + [1.296480, 34.990037, 21.593755, 12.891313, 0.0000000], + [0.321952, 15.997500, 9.712006, 8.087240, 6.2289690], + [24.823469, 89.992938, 24.381965, 17.000000, 1.9292020], + ], + atol=1e-6, + ) def test_stat_holland_1980(self): """Test _stat_holland_1980 function. Compare to MATLAB reference.""" - d_centr = KM_TO_M * np.array([ - [299.4501244109841, 291.0737897183741, 292.5441003235722, 40.665454622610511], - [293.6067129546862, 1000.0, 298.2652319413182, 70.0], - ]) - si_track = xr.Dataset({ - "rad": ("time", KM_TO_M * np.array([40.665454622610511, 75.547902916671745])), - "hol_b": ("time", [1.486076257880692, 1.265551666104679]), - "pdelta": ("time", MBAR_TO_PA * np.array([39.12, 4.73])), - "lat": ("time", [-14.089110370469488, 12.299999279463769]), - "cp": ("time", [3.54921922e-05, 3.10598285e-05]), - "rho_air": ("time", [1.15, 1.15]), - }) - mask = np.array([[True, True, True, True], [True, False, True, True]], dtype=bool) + d_centr = KM_TO_M * np.array( + [ + [ + 299.4501244109841, + 291.0737897183741, + 292.5441003235722, + 40.665454622610511, + ], + [293.6067129546862, 1000.0, 298.2652319413182, 70.0], + ] + ) + si_track = xr.Dataset( + { + "rad": ( + "time", + KM_TO_M * np.array([40.665454622610511, 75.547902916671745]), + ), + "hol_b": ("time", [1.486076257880692, 1.265551666104679]), + "pdelta": ("time", MBAR_TO_PA * np.array([39.12, 4.73])), + "lat": ("time", [-14.089110370469488, 12.299999279463769]), + "cp": ("time", [3.54921922e-05, 3.10598285e-05]), + "rho_air": ("time", [1.15, 1.15]), + } + ) + mask = np.array( + [[True, True, True, True], [True, False, True, True]], dtype=bool + ) v_ang_norm = _stat_holland_1980(si_track, d_centr, mask) np.testing.assert_allclose( - v_ang_norm, [[11.28, 11.68, 11.61, 42.41], [5.38, 0, 5.28, 12.76]], atol=1e-2, + v_ang_norm, + [[11.28, 11.68, 11.61, 42.41], [5.38, 0, 5.28, 12.76]], + atol=1e-2, ) # without Coriolis force, values are higher, esp. far away from the center: v_ang_norm = _stat_holland_1980(si_track, d_centr, mask, cyclostrophic=True) np.testing.assert_allclose( - v_ang_norm, [[15.72, 16.04, 15.98, 43.13], [8.84, 0, 8.76, 13.81]], atol=1e-2, + v_ang_norm, + [[15.72, 16.04, 15.98, 43.13], [8.84, 0, 8.76, 13.81]], + atol=1e-2, ) d_centr = np.array([[], []]) @@ -194,18 +279,28 @@ def test_stat_holland_1980(self): def test_er_2011_pass(self): """Test Emanuel and Rotunno 2011 wind field model.""" # test at centroids within and outside of radius of max wind - d_centr = KM_TO_M * np.array([[35, 70, 75, 220], [30, 150, 1000, 300]], dtype=float) - si_track = xr.Dataset({ - "rad": ("time", KM_TO_M * np.array([75.0, 40.0])), - "vmax": ("time", [35.0, 40.0]), - "lat": ("time", [20.0, 27.0]), - "cp": ("time", [4.98665369e-05, 6.61918149e-05]), - }) - mask = np.array([[True, True, True, True], [True, False, True, True]], dtype=bool) + d_centr = KM_TO_M * np.array( + [[35, 70, 75, 220], [30, 150, 1000, 300]], dtype=float + ) + si_track = xr.Dataset( + { + "rad": ("time", KM_TO_M * np.array([75.0, 40.0])), + "vmax": ("time", [35.0, 40.0]), + "lat": ("time", [20.0, 27.0]), + "cp": ("time", [4.98665369e-05, 6.61918149e-05]), + } + ) + mask = np.array( + [[True, True, True, True], [True, False, True, True]], dtype=bool + ) v_ang_norm = _stat_er_2011(si_track, d_centr, mask) - np.testing.assert_array_almost_equal(v_ang_norm, - [[28.258025, 36.782418, 36.869995, 22.521237], - [39.670883, 0, 3.300626, 10.827206]]) + np.testing.assert_array_almost_equal( + v_ang_norm, + [ + [28.258025, 36.782418, 36.869995, 22.521237], + [39.670883, 0, 3.300626, 10.827206], + ], + ) def test_vtrans_pass(self): """Test _vtrans function. Compare to MATLAB reference.""" @@ -223,21 +318,23 @@ def test_vtrans_pass(self): self.assertAlmostEqual(si_track["vtrans_norm"].values[1] * to_kn, 10.191466246) def testtctrack_to_si(self): - """ Test tctrack_to_si should create the same vmax output independent of the input unit """ + """Test tctrack_to_si should create the same vmax output independent of the input unit""" tc_track = TCTracks.from_processed_ibtracs_csv(TEST_TRACK_SHORT).data[0] tc_track_kmph = tc_track.copy(deep=True) - tc_track_kmph['max_sustained_wind'] *= ( + tc_track_kmph["max_sustained_wind"] *= ( (1.0 * ureg.knot).to(ureg.km / ureg.hour).magnitude ) - tc_track_kmph.attrs['max_sustained_wind_unit'] = 'km/h' + tc_track_kmph.attrs["max_sustained_wind_unit"] = "km/h" si_track = tctrack_to_si(tc_track) si_track_from_kmph = tctrack_to_si(tc_track_kmph) - np.testing.assert_array_almost_equal(si_track["vmax"], si_track_from_kmph["vmax"]) + np.testing.assert_array_almost_equal( + si_track["vmax"], si_track_from_kmph["vmax"] + ) - tc_track.attrs['max_sustained_wind_unit'] = 'elbows/fortnight' + tc_track.attrs["max_sustained_wind_unit"] = "elbows/fortnight" with self.assertRaises(ValueError): tctrack_to_si(tc_track) diff --git a/climada/hazard/trop_cyclone/__init__.py b/climada/hazard/trop_cyclone/__init__.py index 452bf4364..d7f4d6c6e 100644 --- a/climada/hazard/trop_cyclone/__init__.py +++ b/climada/hazard/trop_cyclone/__init__.py @@ -1,5 +1,21 @@ from climada.hazard.trop_cyclone.trop_cyclone import * -from climada.hazard.trop_cyclone.trop_cyclone_windfields import compute_windfields_sparse, compute_angular_windspeeds, tctrack_to_si, \ - get_close_centroids, KN_TO_MS, KM_TO_M, KM_TO_M, H_TO_S, NM_TO_KM, KMH_TO_MS, MBAR_TO_PA, \ - DEF_MAX_DIST_EYE_KM, DEF_INTENSITY_THRES, DEF_MAX_MEMORY_GB, MODEL_VANG, DEF_RHO_AIR, DEF_GRADIENT_TO_SURFACE_WINDS, \ - T_ICE_K, V_ANG_EARTH +from climada.hazard.trop_cyclone.trop_cyclone_windfields import ( + DEF_GRADIENT_TO_SURFACE_WINDS, + DEF_INTENSITY_THRES, + DEF_MAX_DIST_EYE_KM, + DEF_MAX_MEMORY_GB, + DEF_RHO_AIR, + H_TO_S, + KM_TO_M, + KMH_TO_MS, + KN_TO_MS, + MBAR_TO_PA, + MODEL_VANG, + NM_TO_KM, + T_ICE_K, + V_ANG_EARTH, + compute_angular_windspeeds, + compute_windfields_sparse, + get_close_centroids, + tctrack_to_si, +) diff --git a/climada/hazard/trop_cyclone/trop_cyclone.py b/climada/hazard/trop_cyclone/trop_cyclone.py index 6dacb1b7d..ae01332ca 100644 --- a/climada/hazard/trop_cyclone/trop_cyclone.py +++ b/climada/hazard/trop_cyclone/trop_cyclone.py @@ -19,36 +19,40 @@ Define TC wind hazard (TropCyclone class). """ -__all__ = ['TropCyclone'] +__all__ = ["TropCyclone"] import copy import datetime as dt import itertools import logging import time -from typing import Optional, Tuple, List +from typing import List, Optional, Tuple -import numpy as np -from scipy import sparse import matplotlib.animation as animation -from tqdm import tqdm +import numpy as np import pathos.pools import xarray as xr +from scipy import sparse +from tqdm import tqdm -from climada.hazard.base import Hazard -from climada.hazard.tc_tracks import TCTracks -from climada.hazard.tc_clim_change import get_knutson_scaling_factor -from climada.hazard.centroids.centr import Centroids import climada.util.constants as u_const import climada.util.coordinates as u_coord import climada.util.plot as u_plot +from climada.hazard.base import Hazard +from climada.hazard.centroids.centr import Centroids +from climada.hazard.tc_clim_change import get_knutson_scaling_factor +from climada.hazard.tc_tracks import TCTracks -from .trop_cyclone_windfields import DEF_MAX_DIST_EYE_KM, DEF_INTENSITY_THRES, \ - DEF_MAX_MEMORY_GB, compute_windfields_sparse +from .trop_cyclone_windfields import ( + DEF_INTENSITY_THRES, + DEF_MAX_DIST_EYE_KM, + DEF_MAX_MEMORY_GB, + compute_windfields_sparse, +) LOGGER = logging.getLogger(__name__) -HAZ_TYPE = 'TC' +HAZ_TYPE = "TC" """Hazard type acronym for Tropical Cyclone""" @@ -83,10 +87,11 @@ class TropCyclone(Hazard): matrix of shape (npositions, ncentroids * 2) that can be reshaped to a full ndarray of shape (npositions, ncentroids, 2). """ + intensity_thres = DEF_INTENSITY_THRES """intensity threshold for storage in m/s""" - vars_opt = Hazard.vars_opt.union({'category'}) + vars_opt = Hazard.vars_opt.union({"category"}) """Name of the variables that are not needed to compute the impact.""" def __init__( @@ -125,7 +130,7 @@ def __init__( **kwargs : Hazard properties, optional All other keyword arguments are passed to the Hazard constructor. """ - kwargs.setdefault('haz_type', HAZ_TYPE) + kwargs.setdefault("haz_type", HAZ_TYPE) Hazard.__init__(self, **kwargs) self.category = category if category is not None else np.array([], int) self.basin = basin if basin is not None else [] @@ -133,13 +138,15 @@ def __init__( def set_from_tracks(self, *args, **kwargs): """This function is deprecated, use TropCyclone.from_tracks instead.""" - LOGGER.warning("The use of TropCyclone.set_from_tracks is deprecated." - "Use TropCyclone.from_tracks instead.") + LOGGER.warning( + "The use of TropCyclone.set_from_tracks is deprecated." + "Use TropCyclone.from_tracks instead." + ) if "intensity_thres" not in kwargs: # some users modify the threshold attribute before calling `set_from_tracks` kwargs["intensity_thres"] = self.intensity_thres - if self.pool is not None and 'pool' not in kwargs: - kwargs['pool'] = self.pool + if self.pool is not None and "pool" not in kwargs: + kwargs["pool"] = self.pool self.__dict__ = TropCyclone.from_tracks(*args, **kwargs).__dict__ @classmethod @@ -148,7 +155,7 @@ def from_tracks( tracks: TCTracks, centroids: Centroids, pool: Optional[pathos.pools.ProcessPool] = None, - model: str = 'H08', + model: str = "H08", model_kwargs: Optional[dict] = None, ignore_distance_to_coast: bool = False, store_windfields: bool = False, @@ -287,10 +294,10 @@ def from_tracks( [idx_centr_filter] = (np.abs(centroids.lat) <= max_latitude).nonzero() else: # Select centroids which are inside max_dist_inland_km and lat <= max_latitude - if 'dist_coast' not in centroids.gdf.columns: + if "dist_coast" not in centroids.gdf.columns: dist_coast = centroids.get_dist_coast() else: - dist_coast = centroids.gdf['dist_coast'].values + dist_coast = centroids.gdf["dist_coast"].values [idx_centr_filter] = ( (dist_coast <= max_dist_inland_km * 1000) & (np.abs(centroids.lat) <= max_latitude) @@ -303,7 +310,9 @@ def from_tracks( ) # Restrict to coastal centroids within reach of any of the tracks - t_lon_min, t_lat_min, t_lon_max, t_lat_max = tracks.get_bounds(deg_buffer=max_dist_eye_deg) + t_lon_min, t_lat_min, t_lon_max, t_lat_max = tracks.get_bounds( + deg_buffer=max_dist_eye_deg + ) t_mid_lon = 0.5 * (t_lon_min + t_lon_max) filtered_centroids = centroids.coord[idx_centr_filter] u_coord.lon_normalize(filtered_centroids[:, 1], center=t_mid_lon) @@ -328,7 +337,9 @@ def from_tracks( ) LOGGER.info( - 'Mapping %d tracks to %d coastal centroids.', num_tracks, idx_centr_filter.size, + "Mapping %d tracks to %d coastal centroids.", + num_tracks, + idx_centr_filter.size, ) if pool: chunksize = max(min(num_tracks // pool.ncpus, 1000), 1) @@ -356,20 +367,20 @@ def from_tracks( if last_perc < 100: LOGGER.info("Progress: 100%") - LOGGER.debug('Concatenate events.') + LOGGER.debug("Concatenate events.") haz = cls.concat(tc_haz_list) haz.pool = pool haz.intensity_thres = intensity_thres - LOGGER.debug('Compute frequency.') + LOGGER.debug("Compute frequency.") haz.frequency_from_tracks(tracks.data) return haz def apply_climate_scenario_knu( self, - percentile: str='50', - scenario: str='4.5', - target_year: int=2050, - **kwargs + percentile: str = "50", + scenario: str = "4.5", + target_year: int = 2050, + **kwargs, ): """ From current TC hazard instance, return new hazard set with future events @@ -421,9 +432,11 @@ def apply_climate_scenario_knu( """ if self.category.size == 0: - LOGGER.warning("Tropical cyclone categories are missing and" - "no effect of climate change can be modelled." - "The original event set is returned") + LOGGER.warning( + "Tropical cyclone categories are missing and" + "no effect of climate change can be modelled." + "The original event set is returned" + ) return self tc_cc = copy.deepcopy(self) @@ -436,40 +449,48 @@ def apply_climate_scenario_knu( for basin in np.unique(tc_cc.basin): scale_year_rcp_05, scale_year_rcp_45 = [ - get_knutson_scaling_factor( - percentile=percentile, - variable=variable, - basin=basin, - baseline=(np.min(years), np.max(years)), - **kwargs - ).loc[target_year, scenario] - for variable in ['cat05', 'cat45'] - ] + get_knutson_scaling_factor( + percentile=percentile, + variable=variable, + basin=basin, + baseline=(np.min(years), np.max(years)), + **kwargs, + ).loc[target_year, scenario] + for variable in ["cat05", "cat45"] + ] bas_sel = np.array(tc_cc.basin) == basin - cat_05_freqs_change = scale_year_rcp_05 * np.sum(tc_cc.frequency[sel_cat05 & bas_sel]) - cat_45_freqs_change = scale_year_rcp_45 * np.sum(tc_cc.frequency[sel_cat45 & bas_sel]) + cat_05_freqs_change = scale_year_rcp_05 * np.sum( + tc_cc.frequency[sel_cat05 & bas_sel] + ) + cat_45_freqs_change = scale_year_rcp_45 * np.sum( + tc_cc.frequency[sel_cat45 & bas_sel] + ) cat_03_freqs = np.sum(tc_cc.frequency[sel_cat03 & bas_sel]) - scale_year_rcp_03 = (cat_05_freqs_change-cat_45_freqs_change) / cat_03_freqs + scale_year_rcp_03 = ( + cat_05_freqs_change - cat_45_freqs_change + ) / cat_03_freqs - tc_cc.frequency[sel_cat03 & bas_sel] *= 1 + scale_year_rcp_03/100 - tc_cc.frequency[sel_cat45 & bas_sel] *= 1 + scale_year_rcp_45/100 + tc_cc.frequency[sel_cat03 & bas_sel] *= 1 + scale_year_rcp_03 / 100 + tc_cc.frequency[sel_cat45 & bas_sel] *= 1 + scale_year_rcp_45 / 100 if any(tc_cc.frequency) < 0: raise ValueError( " The application of the climate scenario leads to " " negative frequencies. One solution - if appropriate -" " could be to use a less extreme percentile." - ) + ) return tc_cc def set_climate_scenario_knu(self, *args, **kwargs): """This function is deprecated, use TropCyclone.apply_climate_scenario_knu instead.""" - LOGGER.warning("The use of TropCyclone.set_climate_scenario_knu is deprecated." - "Use TropCyclone.apply_climate_scenario_knu instead.") + LOGGER.warning( + "The use of TropCyclone.set_climate_scenario_knu is deprecated." + "Use TropCyclone.apply_climate_scenario_knu instead." + ) return self.apply_climate_scenario_knu(*args, **kwargs) @classmethod @@ -482,7 +503,7 @@ def video_intensity( writer: animation = animation.PillowWriter(bitrate=500), figsize: Tuple[float, float] = (9, 13), adapt_fontsize: bool = True, - **kwargs + **kwargs, ): """ Generate video of TC wind fields node by node and returns its @@ -520,55 +541,64 @@ def video_intensity( # initialization track = tracks.get_track(track_name) if not track: - raise ValueError(f'{track_name} not found in track data.') + raise ValueError(f"{track_name} not found in track data.") idx_plt = np.argwhere( - (track['lon'].values < centroids.total_bounds[2] + 1) - & (centroids.total_bounds[0] - 1 < track['lon'].values) - & (track['lat'].values < centroids.total_bounds[3] + 1) - & (centroids.total_bounds[1] - 1 < track['lat'].values) + (track["lon"].values < centroids.total_bounds[2] + 1) + & (centroids.total_bounds[0] - 1 < track["lon"].values) + & (track["lat"].values < centroids.total_bounds[3] + 1) + & (centroids.total_bounds[1] - 1 < track["lat"].values) ).reshape(-1) tc_list = [] - tr_coord = {'lat': [], 'lon': []} + tr_coord = {"lat": [], "lon": []} for node in range(idx_plt.size - 2): tr_piece = track.sel( - time=slice(track['time'].values[idx_plt[node]], - track['time'].values[idx_plt[node + 2]])) - tr_piece.attrs['n_nodes'] = 2 # plot only one node + time=slice( + track["time"].values[idx_plt[node]], + track["time"].values[idx_plt[node + 2]], + ) + ) + tr_piece.attrs["n_nodes"] = 2 # plot only one node tr_sel = TCTracks() tr_sel.append(tr_piece) - tr_coord['lat'].append(tr_sel.data[0]['lat'].values[:-1]) - tr_coord['lon'].append(tr_sel.data[0]['lon'].values[:-1]) + tr_coord["lat"].append(tr_sel.data[0]["lat"].values[:-1]) + tr_coord["lon"].append(tr_sel.data[0]["lon"].values[:-1]) tc_tmp = cls.from_tracks(tr_sel, centroids=centroids) tc_tmp.event_name = [ - track['name'] + ' ' + time.strftime( + track["name"] + + " " + + time.strftime( "%d %h %Y %H:%M", - time.gmtime(tr_sel.data[0]['time'][1].values.astype(int) - / 1000000000) + time.gmtime( + tr_sel.data[0]["time"][1].values.astype(int) / 1000000000 + ), ) ] tc_list.append(tc_tmp) - if 'cmap' not in kwargs: - kwargs['cmap'] = 'Greys' - if 'vmin' not in kwargs: - kwargs['vmin'] = np.array([tc_.intensity.min() for tc_ in tc_list]).min() - if 'vmax' not in kwargs: - kwargs['vmax'] = np.array([tc_.intensity.max() for tc_ in tc_list]).max() + if "cmap" not in kwargs: + kwargs["cmap"] = "Greys" + if "vmin" not in kwargs: + kwargs["vmin"] = np.array([tc_.intensity.min() for tc_ in tc_list]).min() + if "vmax" not in kwargs: + kwargs["vmax"] = np.array([tc_.intensity.max() for tc_ in tc_list]).max() def run(node): tc_list[node].plot_intensity(1, axis=axis, **kwargs) - axis.plot(tr_coord['lon'][node], tr_coord['lat'][node], 'k') + axis.plot(tr_coord["lon"][node], tr_coord["lat"][node], "k") axis.set_title(tc_list[node].event_name[0]) pbar.update() if file_name: - LOGGER.info('Generating video %s', file_name) - fig, axis, _fontsize = u_plot.make_map(figsize=figsize, adapt_fontsize=adapt_fontsize) + LOGGER.info("Generating video %s", file_name) + fig, axis, _fontsize = u_plot.make_map( + figsize=figsize, adapt_fontsize=adapt_fontsize + ) pbar = tqdm(total=idx_plt.size - 2) - ani = animation.FuncAnimation(fig, run, frames=idx_plt.size - 2, - interval=500, blit=False) + ani = animation.FuncAnimation( + fig, run, frames=idx_plt.size - 2, interval=500, blit=False + ) fig.tight_layout() ani.save(file_name, writer=writer) pbar.close() @@ -584,8 +614,8 @@ def frequency_from_tracks(self, tracks: List): """ if not tracks: return - year_max = np.amax([t['time'].dt.year.values.max() for t in tracks]) - year_min = np.amin([t['time'].dt.year.values.min() for t in tracks]) + year_max = np.amax([t["time"].dt.year.values.max() for t in tracks]) + year_min = np.amin([t["time"].dt.year.values.min() for t in tracks]) year_delta = year_max - year_min + 1 num_orig = np.count_nonzero(self.orig) ens_size = (self.event_id.size / num_orig) if num_orig > 0 else 1 @@ -597,7 +627,7 @@ def from_single_track( track: xr.Dataset, centroids: Centroids, idx_centr_filter: np.ndarray, - model: str = 'H08', + model: str = "H08", model_kwargs: Optional[dict] = None, store_windfields: bool = False, metric: str = "equirect", @@ -665,31 +695,36 @@ def from_single_track( new_haz.intensity = intensity_sparse if store_windfields: new_haz.windfields = [windfields_sparse] - new_haz.units = 'm/s' + new_haz.units = "m/s" new_haz.centroids = centroids new_haz.event_id = np.array([1]) new_haz.frequency = np.array([1]) - new_haz.event_name = [track.attrs['sid']] + new_haz.event_name = [track.attrs["sid"]] new_haz.fraction = sparse.csr_matrix(new_haz.intensity.shape) # store first day of track as date - new_haz.date = np.array([ - dt.datetime(track['time'].dt.year.values[0], - track['time'].dt.month.values[0], - track['time'].dt.day.values[0]).toordinal() - ]) - new_haz.orig = np.array([track.attrs['orig_event_flag']]) - new_haz.category = np.array([track.attrs['category']]) + new_haz.date = np.array( + [ + dt.datetime( + track["time"].dt.year.values[0], + track["time"].dt.month.values[0], + track["time"].dt.day.values[0], + ).toordinal() + ] + ) + new_haz.orig = np.array([track.attrs["orig_event_flag"]]) + new_haz.category = np.array([track.attrs["category"]]) # users that pickle TCTracks objects might still have data with the legacy basin attribute, # so we have to deal with it here - new_haz.basin = [track['basin'] if isinstance(track['basin'], str) - else str(track['basin'].values[0])] + new_haz.basin = [ + ( + track["basin"] + if isinstance(track["basin"], str) + else str(track["basin"].values[0]) + ) + ] return new_haz - def _apply_knutson_criterion( - self, - chg_int_freq: List, - scaling_rcp_year: float - ): + def _apply_knutson_criterion(self, chg_int_freq: List, scaling_rcp_year: float): """ Apply changes to intensities and cumulative frequencies. @@ -715,41 +750,42 @@ def _apply_knutson_criterion( bas_sel = np.array(tc_cc.basin) == basin # Apply intensity change - inten_chg = [chg - for chg in chg_int_freq - if (chg['variable'] == 'intensity' and - chg['basin'] == basin) - ] + inten_chg = [ + chg + for chg in chg_int_freq + if (chg["variable"] == "intensity" and chg["basin"] == basin) + ] for chg in inten_chg: - sel_cat_chg = np.isin(tc_cc.category, chg['category']) & bas_sel - inten_scaling = 1 + (chg['change'] - 1) * scaling_rcp_year + sel_cat_chg = np.isin(tc_cc.category, chg["category"]) & bas_sel + inten_scaling = 1 + (chg["change"] - 1) * scaling_rcp_year tc_cc.intensity = sparse.diags( np.where(sel_cat_chg, inten_scaling, 1) - ).dot(tc_cc.intensity) + ).dot(tc_cc.intensity) # Apply frequency change - freq_chg = [chg - for chg in chg_int_freq - if (chg['variable'] == 'frequency' and - chg['basin'] == basin) - ] - freq_chg.sort(reverse=False, key=lambda x: len(x['category'])) + freq_chg = [ + chg + for chg in chg_int_freq + if (chg["variable"] == "frequency" and chg["basin"] == basin) + ] + freq_chg.sort(reverse=False, key=lambda x: len(x["category"])) # Scale frequencies by category cat_larger_list = [] for chg in freq_chg: - cat_chg_list = [cat - for cat in chg['category'] - if cat not in cat_larger_list - ] + cat_chg_list = [ + cat for cat in chg["category"] if cat not in cat_larger_list + ] sel_cat_chg = np.isin(tc_cc.category, cat_chg_list) & bas_sel if sel_cat_chg.any(): - freq_scaling = 1 + (chg['change'] - 1) * scaling_rcp_year + freq_scaling = 1 + (chg["change"] - 1) * scaling_rcp_year tc_cc.frequency[sel_cat_chg] *= freq_scaling cat_larger_list += cat_chg_list if (tc_cc.frequency < 0).any(): - raise ValueError("The application of the given climate scenario" - "resulted in at least one negative frequency.") + raise ValueError( + "The application of the given climate scenario" + "resulted in at least one negative frequency." + ) return tc_cc diff --git a/climada/hazard/trop_cyclone/trop_cyclone_windfields.py b/climada/hazard/trop_cyclone/trop_cyclone_windfields.py index e82c0b11e..eba194bc9 100644 --- a/climada/hazard/trop_cyclone/trop_cyclone_windfields.py +++ b/climada/hazard/trop_cyclone/trop_cyclone_windfields.py @@ -20,7 +20,7 @@ """ import logging -from typing import Optional, Union, Tuple +from typing import Optional, Tuple, Union import numpy as np import xarray as xr @@ -28,7 +28,9 @@ from climada.hazard import Centroids from climada.hazard.tc_tracks import estimate_rmw -from climada.util import ureg, coordinates as u_coord, constants as u_const +from climada.util import constants as u_const +from climada.util import coordinates as u_coord +from climada.util import ureg LOGGER = logging.getLogger(__name__) @@ -49,7 +51,7 @@ DEF_MAX_MEMORY_GB = 8 """Default value of the memory limit (in GB) for windfield computations (in each thread).""" -MODEL_VANG = {'H08': 0, 'H1980': 1, 'H10': 2, 'ER11': 3} +MODEL_VANG = {"H08": 0, "H1980": 1, "H10": 2, "ER11": 3} """Enumerate different symmetric wind field models.""" DEF_RHO_AIR = 1.15 @@ -72,6 +74,7 @@ V_ANG_EARTH = 7.29e-5 """Earth angular velocity (in radians per second)""" + def _vgrad(si_track, gradient_to_surface_winds): """Gradient wind speeds (in m/s) without translational influence at each track node @@ -86,7 +89,8 @@ def _vgrad(si_track, gradient_to_surface_winds): The gradient-to-surface wind reduction factor to use. """ si_track["vgrad"] = ( - np.fmax(0, si_track["vmax"] - si_track["vtrans_norm"]) / gradient_to_surface_winds + np.fmax(0, si_track["vmax"] - si_track["vtrans_norm"]) + / gradient_to_surface_winds ) @@ -124,10 +128,10 @@ def compute_angular_windspeeds( """ model_kwargs = {} if model_kwargs is None else model_kwargs compute_funs = { - MODEL_VANG['H1980']: _compute_angular_windspeeds_h1980, - MODEL_VANG['H08']: _compute_angular_windspeeds_h08, - MODEL_VANG['H10']: _compute_angular_windspeeds_h10, - MODEL_VANG['ER11']: _stat_er_2011, + MODEL_VANG["H1980"]: _compute_angular_windspeeds_h1980, + MODEL_VANG["H08"]: _compute_angular_windspeeds_h08, + MODEL_VANG["H10"]: _compute_angular_windspeeds_h10, + MODEL_VANG["ER11"]: _stat_er_2011, } if model not in compute_funs: raise NotImplementedError(f"The specified wind model is not supported: {model}") @@ -182,7 +186,9 @@ def _compute_angular_windspeeds_h1980( _vgrad(si_track, gradient_to_surface_winds) _rho_air(si_track, rho_air_const) _B_holland_1980(si_track) - result = _stat_holland_1980(si_track, d_centr, close_centr_msk, cyclostrophic=cyclostrophic) + result = _stat_holland_1980( + si_track, d_centr, close_centr_msk, cyclostrophic=cyclostrophic + ) result *= gradient_to_surface_winds return result @@ -226,7 +232,9 @@ def _compute_angular_windspeeds_h08( """ _rho_air(si_track, rho_air_const) _bs_holland_2008(si_track, gradient_to_surface_winds=gradient_to_surface_winds) - return _stat_holland_1980(si_track, d_centr, close_centr_msk, cyclostrophic=cyclostrophic) + return _stat_holland_1980( + si_track, d_centr, close_centr_msk, cyclostrophic=cyclostrophic + ) def _compute_angular_windspeeds_h10( @@ -279,18 +287,25 @@ def _compute_angular_windspeeds_h10( """ if not cyclostrophic: LOGGER.debug( - 'The function _compute_angular_windspeeds_h10 was called with parameter ' + "The function _compute_angular_windspeeds_h10 was called with parameter " '"cyclostrophic" equal to false. Please be aware that this setting is ignored as the' - ' Holland et al. 2010 model is always cyclostrophic.') + " Holland et al. 2010 model is always cyclostrophic." + ) _rho_air(si_track, rho_air_const) if vmax_from_cen: _bs_holland_2008(si_track, gradient_to_surface_winds=gradient_to_surface_winds) _v_max_s_holland_2008(si_track) else: _B_holland_1980(si_track, gradient_to_surface_winds=gradient_to_surface_winds) - hol_x = _x_holland_2010(si_track, d_centr, close_centr_msk, vmax_in_brackets=vmax_in_brackets) + hol_x = _x_holland_2010( + si_track, d_centr, close_centr_msk, vmax_in_brackets=vmax_in_brackets + ) return _stat_holland_2010( - si_track, d_centr, close_centr_msk, hol_x, vmax_in_brackets=vmax_in_brackets, + si_track, + d_centr, + close_centr_msk, + hol_x, + vmax_in_brackets=vmax_in_brackets, ) @@ -334,7 +349,7 @@ def _rho_air(si_track: xr.Dataset, const: Optional[float]): r_dry_air = 286.9 # density of air (in kg/m³); when checking the units, note that J/Pa = m³ - si_track["rho_air"] = pres_eyewall / (r_dry_air * temp_vs) + si_track["rho_air"] = pres_eyewall / (r_dry_air * temp_vs) def _bs_holland_2008( @@ -394,12 +409,17 @@ def _bs_holland_2008( # and time steps are in hours instead of seconds, but translational wind speed is still # expected to be in m/s. pdelta = si_track["pdelta"] / MBAR_TO_PA - hol_xx = 0.6 * (1. - pdelta / 215) + hol_xx = 0.6 * (1.0 - pdelta / 215) si_track["hol_b"] = ( - -4.4e-5 * pdelta**2 + 0.01 * pdelta - + 0.03 * (si_track["cen"] - prev_cen) / si_track["tstep"] * (H_TO_S / MBAR_TO_PA) + -4.4e-5 * pdelta**2 + + 0.01 * pdelta + + 0.03 + * (si_track["cen"] - prev_cen) + / si_track["tstep"] + * (H_TO_S / MBAR_TO_PA) - 0.014 * abs(si_track["lat"]) - + 0.15 * si_track["vtrans_norm"]**hol_xx + 1.0 + + 0.15 * si_track["vtrans_norm"] ** hol_xx + + 1.0 ) clip_interval = _b_holland_clip_interval(gradient_to_surface_winds) si_track["hol_b"] = np.clip(si_track["hol_b"], *clip_interval) @@ -472,7 +492,7 @@ def _B_holland_1980( # pylint: disable=invalid-name windvar = "vgrad" if gradient_to_surface_winds is None else "vmax" si_track["hol_b"] = ( - si_track[windvar]**2 * np.exp(1) * si_track["rho_air"] / si_track["pdelta"] + si_track[windvar] ** 2 * np.exp(1) * si_track["rho_air"] / si_track["pdelta"] ) clip_interval = _b_holland_clip_interval(gradient_to_surface_winds) @@ -572,7 +592,7 @@ def _x_holland_2010( # compute peripheral exponent from second measurement # (equation (6) from Holland et al. 2010 solved for x) - r_max_norm = (r_max / r_n)**hol_b + r_max_norm = (r_max / r_n) ** hol_b if vmax_in_brackets: x_n = np.log(v_n) / np.log(v_max_s**2 * r_max_norm * np.exp(1 - r_max_norm)) @@ -586,7 +606,9 @@ def _x_holland_2010( # linearly interpolate between max exponent and peripheral exponent x_max = 0.5 - hol_x[mask_centr_close] = x_max + np.fmax(0, d_centr - r_max) * (x_n - x_max) / (r_n - r_max) + hol_x[mask_centr_close] = x_max + np.fmax(0, d_centr - r_max) * (x_n - x_max) / ( + r_n - r_max + ) # Truncate to prevent wind speed from increasing again towards the peripheral radius (which is # unphysical). A value of 0.4 has been found to be reasonable by manual testing of thresholds. @@ -656,11 +678,15 @@ def _stat_holland_2010( ] ] - r_max_norm = (r_max / np.fmax(1, d_centr))**hol_b + r_max_norm = (r_max / np.fmax(1, d_centr)) ** hol_b if vmax_in_brackets: - v_ang[mask_centr_close] = (v_max_s**2 * r_max_norm * np.exp(1 - r_max_norm))**hol_x + v_ang[mask_centr_close] = ( + v_max_s**2 * r_max_norm * np.exp(1 - r_max_norm) + ) ** hol_x else: - v_ang[mask_centr_close] = v_max_s * (r_max_norm * np.exp(1 - r_max_norm))**hol_x + v_ang[mask_centr_close] = ( + v_max_s * (r_max_norm * np.exp(1 - r_max_norm)) ** hol_x + ) return v_ang @@ -668,7 +694,7 @@ def _stat_holland_1980( si_track: xr.Dataset, d_centr: np.ndarray, mask_centr_close: np.ndarray, - cyclostrophic: bool = False + cyclostrophic: bool = False, ) -> np.ndarray: """Symmetric and static wind fields (in m/s) according to Holland 1980. @@ -725,8 +751,10 @@ def _stat_holland_1980( if not cyclostrophic: r_coriolis = 0.5 * d_centr * coriolis_p - r_max_norm = (r_max / np.fmax(1, d_centr))**hol_b - sqrt_term = hol_b / rho_air * r_max_norm * pdelta * np.exp(-r_max_norm) + r_coriolis**2 + r_max_norm = (r_max / np.fmax(1, d_centr)) ** hol_b + sqrt_term = ( + hol_b / rho_air * r_max_norm * pdelta * np.exp(-r_max_norm) + r_coriolis**2 + ) v_ang[mask_centr_close] = np.sqrt(np.fmax(0, sqrt_term)) - r_coriolis return v_ang @@ -793,7 +821,7 @@ def _stat_er_2011( momentum_max += 0.5 * coriolis_p * r_max**2 # rescale the momentum using formula (36) in Emanuel and Rotunno 2011 with Ck == Cd - r_max_norm = (d_centr / r_max)**2 + r_max_norm = (d_centr / r_max) ** 2 momentum = momentum_max * 2 * r_max_norm / (1 + r_max_norm) # extract the velocity from the rescaled momentum through division by r @@ -832,9 +860,16 @@ def _vtrans(si_track: xr.Dataset, metric: str = "equirect"): si_track["component"] = ("component", ["v", "u"]) t_lat, t_lon = si_track["lat"].values, si_track["lon"].values - norm, vec = u_coord.dist_approx(t_lat[:-1, None], t_lon[:-1, None], - t_lat[1:, None], t_lon[1:, None], - log=True, normalize=False, method=metric, units="m") + norm, vec = u_coord.dist_approx( + t_lat[:-1, None], + t_lon[:-1, None], + t_lat[1:, None], + t_lon[1:, None], + log=True, + normalize=False, + method=metric, + units="m", + ) si_track["vtrans"].values[1:, :] = vec[:, 0, 0] / si_track["tstep"].values[1:, None] si_track["vtrans_norm"].values[1:] = norm[:, 0, 0] / si_track["tstep"].values[1:] @@ -860,11 +895,12 @@ def _coriolis_parameter(lat: np.ndarray) -> np.ndarray: """ return 2 * V_ANG_EARTH * np.sin(np.radians(np.abs(lat))) + def compute_windfields_sparse( track: xr.Dataset, centroids: Centroids, idx_centr_filter: np.ndarray, - model: str = 'H08', + model: str = "H08", model_kwargs: Optional[dict] = None, store_windfields: bool = False, metric: str = "equirect", @@ -921,7 +957,7 @@ def compute_windfields_sparse( try: mod_id = MODEL_VANG[model] except KeyError as err: - raise ValueError(f'Model not implemented: {model}.') from err + raise ValueError(f"Model not implemented: {model}.") from err ncentroids = centroids.coord.shape[0] npositions = track.sizes["time"] @@ -931,7 +967,8 @@ def compute_windfields_sparse( # initialise arrays for the assumption that no centroids are within reach windfields_sparse = ( sparse.csr_matrix(([], ([], [])), shape=windfields_shape) - if store_windfields else None + if store_windfields + else None ) intensity_sparse = sparse.csr_matrix(([], ([], [])), shape=intensity_shape) @@ -948,7 +985,10 @@ def compute_windfields_sparse( # returned by `get_close_centroids` are normalized to be consistent with the coordinates in # `si_track`. centroids_close, mask_centr, mask_centr_alongtrack = get_close_centroids( - si_track, centroids.coord[idx_centr_filter], max_dist_eye_km, metric=metric, + si_track, + centroids.coord[idx_centr_filter], + max_dist_eye_km, + metric=metric, ) idx_centr_filter = idx_centr_filter[mask_centr] n_centr_close = centroids_close.shape[0] @@ -992,8 +1032,8 @@ def compute_windfields_sparse( intensity = np.linalg.norm(windfields, axis=-1).max(axis=0) intensity[intensity < intensity_thres] = 0 intensity_sparse = sparse.csr_matrix( - (intensity, idx_centr_filter, [0, intensity.size]), - shape=intensity_shape) + (intensity, idx_centr_filter, [0, intensity.size]), shape=intensity_shape + ) intensity_sparse.eliminate_zeros() windfields_sparse = None @@ -1004,8 +1044,9 @@ def compute_windfields_sparse( indices[:, :, 1] = 2 * idx_centr_filter[None] + 1 indices = indices.ravel() indptr = np.arange(npositions + 1) * n_centr_filter * 2 - windfields_sparse = sparse.csr_matrix((windfields.ravel(), indices, indptr), - shape=windfields_shape) + windfields_sparse = sparse.csr_matrix( + (windfields.ravel(), indices, indptr), shape=windfields_shape + ) windfields_sparse.eliminate_zeros() return intensity_sparse, windfields_sparse @@ -1060,8 +1101,10 @@ def _compute_windfields_sparse_chunked( for prev_chunk_end, chunk_end in zip(split_pos[:-1], split_pos[1:]): chunk_start = max(0, prev_chunk_end - 1) inten, win = compute_windfields_sparse( - track.isel(time=slice(chunk_start, chunk_end)), *args, - max_memory_gb=max_memory_gb, **kwargs, + track.isel(time=slice(chunk_start, chunk_end)), + *args, + max_memory_gb=max_memory_gb, + **kwargs, ) intensity.append(inten) windfields.append(win) @@ -1128,9 +1171,15 @@ def _compute_windfields( # compute distances (in m) and vectors to all centroids [d_centr], [v_centr_normed] = u_coord.dist_approx( - si_track["lat"].values[None], si_track["lon"].values[None], - centroids[None, :, 0], centroids[None, :, 1], - log=True, normalize=False, method=metric, units="m") + si_track["lat"].values[None], + si_track["lon"].values[None], + centroids[None, :, 0], + centroids[None, :, 1], + log=True, + normalize=False, + method=metric, + units="m", + ) # exclude centroids that are too far from or too close to the eye mask_centr_close = (d_centr <= max_dist_eye_km * KM_TO_M) & (d_centr > 1) @@ -1149,7 +1198,12 @@ def _compute_windfields( # derive (absolute) angular velocity from parametric wind profile v_ang_norm = compute_angular_windspeeds( - si_track, d_centr, mask_centr_close, model, model_kwargs=model_kwargs, cyclostrophic=False, + si_track, + d_centr, + mask_centr_close, + model, + model_kwargs=model_kwargs, + cyclostrophic=False, ) # Influence of translational speed decreases with distance from eye. @@ -1163,20 +1217,25 @@ def _compute_windfields( t_rad_bc = np.broadcast_to(si_track["rad"].values[:, None], d_centr.shape) v_trans_corr = np.zeros_like(d_centr) v_trans_corr[mask_centr_close] = np.fmin( - 1, t_rad_bc[mask_centr_close] / d_centr[mask_centr_close]) + 1, t_rad_bc[mask_centr_close] / d_centr[mask_centr_close] + ) - if model in [MODEL_VANG['H08'], MODEL_VANG['H10']]: + if model in [MODEL_VANG["H08"], MODEL_VANG["H10"]]: # In these models, v_ang_norm already contains vtrans_norm, so subtract it first, before # converting to vectors and then adding (vectorial) vtrans again. Make sure to apply the # "absorbing factor" in both steps: - vtrans_norm_bc = np.broadcast_to(si_track["vtrans_norm"].values[:, None], d_centr.shape) + vtrans_norm_bc = np.broadcast_to( + si_track["vtrans_norm"].values[:, None], d_centr.shape + ) v_ang_norm[mask_centr_close] -= ( - vtrans_norm_bc[mask_centr_close] * v_trans_corr[mask_centr_close] + vtrans_norm_bc[mask_centr_close] * v_trans_corr[mask_centr_close] ) # vectorial angular velocity windfields = ( - si_track.attrs["latsign"] * np.array([1.0, -1.0])[..., :] * v_centr_normed[:, :, ::-1] + si_track.attrs["latsign"] + * np.array([1.0, -1.0])[..., :] + * v_centr_normed[:, :, ::-1] ) windfields[mask_centr_close] *= v_ang_norm[mask_centr_close, None] @@ -1243,7 +1302,7 @@ def tctrack_to_si( except Exception as ex: raise ValueError( f"The {long_name}_unit '{unit}' in the provided track is not supported." - ) from ex + ) from ex si_track[var_name] = track[long_name] * conv_factor # normalize longitudinal coordinates @@ -1257,14 +1316,15 @@ def tctrack_to_si( # extrapolate radius of max wind from pressure if not given si_track["rad"] = track["radius_max_wind"].copy() si_track["rad"].values[:] = estimate_rmw( - si_track["rad"].values, si_track["cen"].values / MBAR_TO_PA, + si_track["rad"].values, + si_track["cen"].values / MBAR_TO_PA, ) si_track["rad"] *= NM_TO_KM * KM_TO_M - hemisphere = 'N' + hemisphere = "N" if np.count_nonzero(si_track["lat"] < 0) > np.count_nonzero(si_track["lat"] > 0): - hemisphere = 'S' - si_track.attrs["latsign"] = 1.0 if hemisphere == 'N' else -1.0 + hemisphere = "S" + si_track.attrs["latsign"] = 1.0 if hemisphere == "N" else -1.0 # add translational speed of track at every node (in m/s) _vtrans(si_track, metric=metric) @@ -1333,9 +1393,8 @@ def get_close_centroids( # centroids that are considered by a factor larger than 30). buffer_lat = buffer_km / u_const.ONE_LAT_KM buffer_lon = buffer_km / ( - u_const.ONE_LAT_KM * np.cos(np.radians( - np.fmin(89.999, np.abs(centr_lat) + buffer_lat) - )) + u_const.ONE_LAT_KM + * np.cos(np.radians(np.fmin(89.999, np.abs(centr_lat) + buffer_lat))) ) [idx_close] = ( (t_lat.min() - centr_lat <= buffer_lat) @@ -1348,15 +1407,20 @@ def get_close_centroids( # Restrict to bounding boxes of each track position. buffer_lat = buffer_km / u_const.ONE_LAT_KM - buffer_lon = buffer_km / (u_const.ONE_LAT_KM * np.cos(np.radians( - np.fmin(89.999, np.abs(t_lat[:, None]) + buffer_lat) - ))) + buffer_lon = buffer_km / ( + u_const.ONE_LAT_KM + * np.cos(np.radians(np.fmin(89.999, np.abs(t_lat[:, None]) + buffer_lat))) + ) [idx_close_sub] = ( - (t_lat[:, None] - buffer_lat <= centr_lat[None]) - & (t_lat[:, None] + buffer_lat >= centr_lat[None]) - & (t_lon[:, None] - buffer_lon <= centr_lon[None]) - & (t_lon[:, None] + buffer_lon >= centr_lon[None]) - ).any(axis=0).nonzero() + ( + (t_lat[:, None] - buffer_lat <= centr_lat[None]) + & (t_lat[:, None] + buffer_lat >= centr_lat[None]) + & (t_lon[:, None] - buffer_lon <= centr_lon[None]) + & (t_lon[:, None] + buffer_lon >= centr_lon[None]) + ) + .any(axis=0) + .nonzero() + ) idx_close = idx_close[idx_close_sub] centr_lat = centr_lat[idx_close_sub] centr_lon = centr_lon[idx_close_sub] @@ -1369,16 +1433,27 @@ def get_close_centroids( # FAITH. With a chunk size of 10, this figure is down to 240 MB. The final along-track mask # will require 1.0 GB of memory. chunk_size = 10 - chunks = np.split(np.arange(npositions), np.arange(chunk_size, npositions, chunk_size)) - mask_centr_alongtrack = np.concatenate([ - ( - u_coord.dist_approx( - t_lat[None, chunk], t_lon[None, chunk], - centr_lat[None], centr_lon[None], - normalize=False, method=metric, units="km", - )[0] <= buffer_km - ) for chunk in chunks - ], axis=0) + chunks = np.split( + np.arange(npositions), np.arange(chunk_size, npositions, chunk_size) + ) + mask_centr_alongtrack = np.concatenate( + [ + ( + u_coord.dist_approx( + t_lat[None, chunk], + t_lon[None, chunk], + centr_lat[None], + centr_lon[None], + normalize=False, + method=metric, + units="km", + )[0] + <= buffer_km + ) + for chunk in chunks + ], + axis=0, + ) [idx_close_sub] = mask_centr_alongtrack.any(axis=0).nonzero() idx_close = idx_close[idx_close_sub] centr_lat = centr_lat[idx_close_sub] diff --git a/climada/test/__init__.py b/climada/test/__init__.py index 34ef38092..83aa75857 100755 --- a/climada/test/__init__.py +++ b/climada/test/__init__.py @@ -19,8 +19,8 @@ init test """ -from climada.util.api_client import Client from climada._version import __version__ as climada_version +from climada.util.api_client import Client def get_test_file(ds_name, file_format=None): @@ -46,13 +46,25 @@ def get_test_file(ds_name, file_format=None): # get the dataset with the highest version below (or equal to) the current climada version # in this way a test dataset can be updated without breaking tests on former versions # just make sure that the new dataset has a higher version than any previous version - test_ds = [ds for ds in sorted( - client.list_dataset_infos(name=ds_name, status='test_dataset', version='ANY'), - key=lambda ds: ds.version - ) if ds.version.strip('v') <= climada_version.strip('v')][-1] + test_ds = [ + ds + for ds in sorted( + client.list_dataset_infos( + name=ds_name, status="test_dataset", version="ANY" + ), + key=lambda ds: ds.version, + ) + if ds.version.strip("v") <= climada_version.strip("v") + ][-1] _, files = client.download_dataset(test_ds) - [test_file] = [fil for fil in files if fil.name in [ - dsf.file_name - for dsf in test_ds.files - if file_format is None or dsf.file_format == file_format]] + [test_file] = [ + fil + for fil in files + if fil.name + in [ + dsf.file_name + for dsf in test_ds.files + if file_format is None or dsf.file_format == file_format + ] + ] return test_file diff --git a/climada/test/test_api_client.py b/climada/test/test_api_client.py index 3b60a016e..6bd86ed4f 100644 --- a/climada/test/test_api_client.py +++ b/climada/test/test_api_client.py @@ -18,9 +18,10 @@ Test save module. """ -from pathlib import Path + import tempfile import unittest +from pathlib import Path import numpy as np @@ -36,30 +37,40 @@ class TestClient(unittest.TestCase): def test_data_type(self): """""" lpdt = Client().get_data_type_info("tropical_cyclone") - self.assertEqual(lpdt.data_type, 'tropical_cyclone') - self.assertEqual(lpdt.data_type_group, 'hazard') - self.assertTrue('res_arcsec' in [p['property'] for p in lpdt.properties if p['mandatory']]) - self.assertTrue('ref_year' in [p['property'] for p in lpdt.properties if not p['mandatory']]) + self.assertEqual(lpdt.data_type, "tropical_cyclone") + self.assertEqual(lpdt.data_type_group, "hazard") + self.assertTrue( + "res_arcsec" in [p["property"] for p in lpdt.properties if p["mandatory"]] + ) + self.assertTrue( + "ref_year" in [p["property"] for p in lpdt.properties if not p["mandatory"]] + ) def test_data_types(self): """""" exdts = Client().list_data_type_infos("exposures") - self.assertTrue('litpop' in [exdt.data_type for exdt in exdts]) + self.assertTrue("litpop" in [exdt.data_type for exdt in exdts]) def test_datasets(self): """""" - datasets = Client().list_dataset_infos(status=None, name='FAOSTAT_data_producer_prices') + datasets = Client().list_dataset_infos( + status=None, name="FAOSTAT_data_producer_prices" + ) self.assertEqual(len(datasets), 1) def test_dataset(self): """""" client = Client() - dataset = client.get_dataset_info(name='FAOSTAT_data_producer_prices', status='test_dataset') - self.assertEqual(dataset.version, 'v1') + dataset = client.get_dataset_info( + name="FAOSTAT_data_producer_prices", status="test_dataset" + ) + self.assertEqual(dataset.version, "v1") self.assertEqual(len(dataset.files), 1) self.assertEqual(dataset.files[0].file_size, 26481) - self.assertEqual(dataset.data_type, DataTypeShortInfo('crop_production', 'exposures')) + self.assertEqual( + dataset.data_type, DataTypeShortInfo("crop_production", "exposures") + ) dataset2 = client.get_dataset_info_by_uuid(dataset.uuid) self.assertEqual(dataset, dataset2) @@ -68,49 +79,64 @@ def test_search_for_property_not_set(self): """""" client = Client() - nocountry = client.list_dataset_infos(data_type="earthquake", - properties={'country_name': None})[0] - self.assertNotIn('country_name', nocountry.properties) - self.assertIn('spatial_coverage', nocountry.properties) + nocountry = client.list_dataset_infos( + data_type="earthquake", properties={"country_name": None} + )[0] + self.assertNotIn("country_name", nocountry.properties) + self.assertIn("spatial_coverage", nocountry.properties) def test_dataset_offline(self): """""" client = Client() client.online = False - with self.assertLogs('climada.util.api_client', level='WARNING') as cm: - dataset = client.get_dataset_info(name='FAOSTAT_data_producer_prices', - status='test_dataset') - self.assertIn("there is no internet connection but the client has stored ", cm.output[0]) + with self.assertLogs("climada.util.api_client", level="WARNING") as cm: + dataset = client.get_dataset_info( + name="FAOSTAT_data_producer_prices", status="test_dataset" + ) + self.assertIn( + "there is no internet connection but the client has stored ", cm.output[0] + ) - self.assertEqual(dataset.version, 'v1') + self.assertEqual(dataset.version, "v1") self.assertEqual(len(dataset.files), 1) self.assertEqual(dataset.files[0].file_size, 26481) - self.assertEqual(dataset.data_type, DataTypeShortInfo('crop_production', 'exposures')) + self.assertEqual( + dataset.data_type, DataTypeShortInfo("crop_production", "exposures") + ) with self.assertRaises(AssertionError) as ar: - with self.assertLogs('climada.util.api_client', level='WARNING') as cm: + with self.assertLogs("climada.util.api_client", level="WARNING") as cm: dataset2 = Client().get_dataset_info_by_uuid(dataset.uuid) self.assertIn("no logs of level WARNING or higher triggered", str(ar.exception)) self.assertEqual(dataset, dataset2) - with self.assertLogs('climada.util.api_client', level='WARNING') as cm: + with self.assertLogs("climada.util.api_client", level="WARNING") as cm: dataset2 = client.get_dataset_info_by_uuid(dataset.uuid) - self.assertIn("there is no internet connection but the client has stored ", cm.output[0]) + self.assertIn( + "there is no internet connection but the client has stored ", cm.output[0] + ) self.assertEqual(dataset, dataset2) def test_download_file(self): """""" client = Client() client.MAX_WAITING_PERIOD = 0.1 - dataset = client.get_dataset_info(name='FAOSTAT_data_producer_prices', - status='test_dataset') + dataset = client.get_dataset_info( + name="FAOSTAT_data_producer_prices", status="test_dataset" + ) # test failure def fail(x, y): raise Download.Failed("on purpose") - self.assertRaises(Download.Failed, - client._download_file, DATA_DIR, dataset.files[0], check=fail) + + self.assertRaises( + Download.Failed, + client._download_file, + DATA_DIR, + dataset.files[0], + check=fail, + ) self.assertFalse(DATA_DIR.joinpath(dataset.files[0].file_name).is_file()) # test success @@ -126,7 +152,9 @@ def test_download_dataset(self): client = Client() client.MAX_WAITING_PERIOD = 0.1 - dataset = client.get_dataset_info(name='test_write_raster', status='test_dataset') + dataset = client.get_dataset_info( + name="test_write_raster", status="test_dataset" + ) download_dir, downloads = client.download_dataset(dataset, target_dir=DATA_DIR) self.assertEqual(download_dir.name, dataset.version) self.assertEqual(download_dir.parent.name, dataset.name) @@ -142,94 +170,136 @@ def test_download_dataset(self): def test_get_exposures(self): client = Client() - exposures = client.get_exposures(exposures_type='litpop', - properties={'country_iso3alpha': 'AUT', - 'fin_mode': 'pop', 'exponents': '(0,1)'}, - version='v1', - dump_dir=DATA_DIR) + exposures = client.get_exposures( + exposures_type="litpop", + properties={ + "country_iso3alpha": "AUT", + "fin_mode": "pop", + "exponents": "(0,1)", + }, + version="v1", + dump_dir=DATA_DIR, + ) self.assertEqual(len(exposures.gdf), 5782) - self.assertEqual(np.unique(exposures.gdf['region_id']), 40) - self.assertEqual(exposures.description, - "LitPop Exposure for ['AUT'] at 150 as, year: 2018, financial mode: pop, exp: [0, 1], admin1_calc: False") + self.assertEqual(np.unique(exposures.gdf["region_id"]), 40) + self.assertEqual( + exposures.description, + "LitPop Exposure for ['AUT'] at 150 as, year: 2018, financial mode: pop, exp: [0, 1], admin1_calc: False", + ) def test_get_exposures_fails(self): client = Client() with self.assertRaises(ValueError) as cm: - client.get_exposures(exposures_type='river_flood', - properties={'country_iso3alpha': 'AUT', - 'fin_mode': 'pop', 'exponents': '(0,1)'}, - dump_dir=DATA_DIR) - self.assertIn('Valid exposures types are a subset of CLIMADA exposures types. Currently', - str(cm.exception)) + client.get_exposures( + exposures_type="river_flood", + properties={ + "country_iso3alpha": "AUT", + "fin_mode": "pop", + "exponents": "(0,1)", + }, + dump_dir=DATA_DIR, + ) + self.assertIn( + "Valid exposures types are a subset of CLIMADA exposures types. Currently", + str(cm.exception), + ) with self.assertRaises(Client.AmbiguousResult) as cm: - client.get_exposures(exposures_type='litpop', - properties={'country_iso3alpha': 'AUT'}, - dump_dir=DATA_DIR) - self.assertIn('there are 3 datasets meeting the requirements', - str(cm.exception)) + client.get_exposures( + exposures_type="litpop", + properties={"country_iso3alpha": "AUT"}, + dump_dir=DATA_DIR, + ) + self.assertIn( + "there are 3 datasets meeting the requirements", str(cm.exception) + ) def test_get_hazard(self): client = Client() - hazard = client.get_hazard(hazard_type='river_flood', - properties={'country_name': 'Austria', - 'year_range': '2010_2030', 'climate_scenario': 'rcp26'}, - version='v1', - dump_dir=DATA_DIR) + hazard = client.get_hazard( + hazard_type="river_flood", + properties={ + "country_name": "Austria", + "year_range": "2010_2030", + "climate_scenario": "rcp26", + }, + version="v1", + dump_dir=DATA_DIR, + ) self.assertEqual(np.shape(hazard.intensity), (480, 5784)) self.assertEqual(np.unique(hazard.centroids.region_id), 40) self.assertEqual(np.unique(hazard.date).size, 20) - self.assertEqual(hazard.haz_type, 'RF') + self.assertEqual(hazard.haz_type, "RF") def test_get_hazard_fails(self): client = Client() with self.assertRaises(ValueError) as cm: - client.get_hazard(hazard_type='litpop', - properties={'country_name': 'Austria', - 'year_range': '2010_2030', 'climate_scenario': 'rcp26'}, - dump_dir=DATA_DIR) - self.assertIn('Valid hazard types are a subset of CLIMADA hazard types. Currently', - str(cm.exception)) + client.get_hazard( + hazard_type="litpop", + properties={ + "country_name": "Austria", + "year_range": "2010_2030", + "climate_scenario": "rcp26", + }, + dump_dir=DATA_DIR, + ) + self.assertIn( + "Valid hazard types are a subset of CLIMADA hazard types. Currently", + str(cm.exception), + ) with self.assertRaises(Client.AmbiguousResult) as cm: - client.get_hazard(hazard_type='river_flood', - properties={'country_name': ['Switzerland', 'Austria'], - 'year_range': '2010_2030', 'climate_scenario': ['rcp26', 'rcp85']}, - dump_dir=DATA_DIR) - self.assertIn('there are 4 datasets meeting the requirements:', str(cm.exception)) + client.get_hazard( + hazard_type="river_flood", + properties={ + "country_name": ["Switzerland", "Austria"], + "year_range": "2010_2030", + "climate_scenario": ["rcp26", "rcp85"], + }, + dump_dir=DATA_DIR, + ) + self.assertIn( + "there are 4 datasets meeting the requirements:", str(cm.exception) + ) def test_get_litpop(self): client = Client() - litpop = client.get_litpop(country='LUX', version='v1', dump_dir=DATA_DIR) + litpop = client.get_litpop(country="LUX", version="v1", dump_dir=DATA_DIR) self.assertEqual(len(litpop.gdf), 188) - self.assertEqual(np.unique(litpop.gdf['region_id']), 442) - self.assertEqual(litpop.description, - "LitPop Exposure for ['LUX'] at 150 as, year: 2018, financial mode: pc, exp: [1, 1], admin1_calc: False") + self.assertEqual(np.unique(litpop.gdf["region_id"]), 442) + self.assertEqual( + litpop.description, + "LitPop Exposure for ['LUX'] at 150 as, year: 2018, financial mode: pc, exp: [1, 1], admin1_calc: False", + ) def test_get_litpop_fail(self): client = Client() with self.assertRaises(ValueError) as cm: - client.get_litpop(['AUT', 'CHE']) - self.assertIn(" can only query single countries. Download the data for multiple countries individually and concatenate ", - str(cm.exception)) + client.get_litpop(["AUT", "CHE"]) + self.assertIn( + " can only query single countries. Download the data for multiple countries individually and concatenate ", + str(cm.exception), + ) def test_get_centroids_plot(self): client = Client() - client.get_centroids(country='COM').plot() + client.get_centroids(country="COM").plot() def test_get_dataset_file(self): client = Client() with tempfile.TemporaryDirectory() as temp_dir: single_file = client.get_dataset_file( - name='test_imp_mat', status='test_dataset', # get_dataset_info arguments - target_dir=Path(temp_dir), organize_path=False, # download_dataset arguments + name="test_imp_mat", + status="test_dataset", # get_dataset_info arguments + target_dir=Path(temp_dir), + organize_path=False, # download_dataset arguments ) self.assertTrue(single_file.is_file()) self.assertEqual(list(Path(temp_dir).iterdir()), [single_file]) def test_multi_filter(self): client = Client() - testds = client.list_dataset_infos(data_type='storm_europe') + testds = client.list_dataset_infos(data_type="storm_europe") # assert no systemic loss in filtering still = client._filter_datasets(testds, dict()) @@ -237,62 +307,101 @@ def test_multi_filter(self): self.assertEqual(o, r) # assert filter is effective - p = 'country_name' - a, b = 'Germany', 'Netherlands' - less = client._filter_datasets(testds, {p:[a, b]}) + p = "country_name" + a, b = "Germany", "Netherlands" + less = client._filter_datasets(testds, {p: [a, b]}) self.assertLess(len(less), len(testds)) - only = client._filter_datasets(testds, {p:[a]}) + only = client._filter_datasets(testds, {p: [a]}) self.assertLess(len(only), len(less)) self.assertLess(0, len(only)) def test_multiplicity_split(self): - properties = { - 'country_name': ['x', 'y', 'z'], - 'b': '1' - } + properties = {"country_name": ["x", "y", "z"], "b": "1"} # assert split matches expectations straight, multi = Client._divide_straight_from_multi(properties) - self.assertEqual(straight, {'b': '1'}) - self.assertEqual(multi, {'country_name': ['x', 'y', 'z']}) + self.assertEqual(straight, {"b": "1"}) + self.assertEqual(multi, {"country_name": ["x", "y", "z"]}) def test_purge_cache(self): client = Client() - active_ds = client.get_dataset_info(data_type="litpop", name="LitPop_150arcsec_ABW", version="v3") - outdated_ds = client.get_dataset_info(data_type="litpop", name="LitPop_150arcsec_ABW", version="v1") - test_ds = client.get_dataset_info(data_type="storm_europe", name="test_storm_europe_icon_2021012800", version="v1", status="test_dataset") - expired_ds = client.get_dataset_info(data_type="tropical_cyclone", name="rename_files2", version="v1", status="expired") + active_ds = client.get_dataset_info( + data_type="litpop", name="LitPop_150arcsec_ABW", version="v3" + ) + outdated_ds = client.get_dataset_info( + data_type="litpop", name="LitPop_150arcsec_ABW", version="v1" + ) + test_ds = client.get_dataset_info( + data_type="storm_europe", + name="test_storm_europe_icon_2021012800", + version="v1", + status="test_dataset", + ) + expired_ds = client.get_dataset_info( + data_type="tropical_cyclone", + name="rename_files2", + version="v1", + status="expired", + ) with tempfile.TemporaryDirectory() as temp_dir: for ds in [active_ds, outdated_ds, test_ds, expired_ds]: client.download_dataset(dataset=ds, target_dir=Path(temp_dir)) self.assertEqual( # outdated dataset present 1, - len(list(Path(temp_dir).joinpath('exposures/litpop/LitPop_150arcsec_ABW/v1').iterdir())) + len( + list( + Path(temp_dir) + .joinpath("exposures/litpop/LitPop_150arcsec_ABW/v1") + .iterdir() + ) + ), ) self.assertEqual( # expired data set present 1, - len(list(Path(temp_dir).joinpath('hazard/tropical_cyclone/rename_files2/v1').iterdir())) + len( + list( + Path(temp_dir) + .joinpath("hazard/tropical_cyclone/rename_files2/v1") + .iterdir() + ) + ), ) client.purge_cache(target_dir=temp_dir) self.assertFalse( # outdated data set removed - Path(temp_dir).joinpath('exposures/litpop/LitPop_150arcsec_ABW/v1').is_dir() + Path(temp_dir) + .joinpath("exposures/litpop/LitPop_150arcsec_ABW/v1") + .is_dir() ) self.assertFalse( # expired data set removed - Path(temp_dir).joinpath('hazard/tropical_cyclone/rename_files2/v1').is_dir() + Path(temp_dir) + .joinpath("hazard/tropical_cyclone/rename_files2/v1") + .is_dir() ) self.assertEqual( # test files are still there 3, - len(list(Path(temp_dir).joinpath('hazard/storm_europe/test_storm_europe_icon_2021012800/v1').iterdir())) + len( + list( + Path(temp_dir) + .joinpath( + "hazard/storm_europe/test_storm_europe_icon_2021012800/v1" + ) + .iterdir() + ) + ), ) client.purge_cache(target_dir=temp_dir, keep_testfiles=False) self.assertTrue( # uptodate active dataset file still there - Path(temp_dir).joinpath('exposures/litpop/LitPop_150arcsec_ABW/v3/LitPop_150arcsec_ABW.hdf5').exists() + Path(temp_dir) + .joinpath( + "exposures/litpop/LitPop_150arcsec_ABW/v3/LitPop_150arcsec_ABW.hdf5" + ) + .exists() ) self.assertFalse( # test data removed, empty directories removed - Path(temp_dir).joinpath('hazard/').exists() + Path(temp_dir).joinpath("hazard/").exists() ) diff --git a/climada/test/test_calibration.py b/climada/test/test_calibration.py index 72dcca3a4..5b83b3a3f 100644 --- a/climada/test/test_calibration.py +++ b/climada/test/test_calibration.py @@ -18,21 +18,22 @@ Test Calibration class. """ + import unittest from pathlib import Path + import pandas as pd +import climada.hazard.test as hazard_test from climada import CONFIG -from climada.entity.entity_def import Entity -from climada.hazard.base import Hazard from climada.engine import ImpactCalc from climada.engine.calibration_opt import calib_instance -from climada.util.constants import ENT_DEMO_TODAY -import climada.hazard.test as hazard_test +from climada.entity.entity_def import Entity +from climada.hazard.base import Hazard from climada.test import get_test_file +from climada.util.constants import ENT_DEMO_TODAY - -HAZ_TEST_TC = get_test_file('test_tc_florida', file_format='hdf5') +HAZ_TEST_TC = get_test_file("test_tc_florida", file_format="hdf5") DATA_FOLDER = CONFIG.test_data.dir() @@ -42,7 +43,7 @@ class TestCalib(unittest.TestCase): def test_calib_instance(self): """Test save calib instance""" - # Read default entity values + # Read default entity values ent = Entity.from_excel(ENT_DEMO_TODAY) ent.check() @@ -50,27 +51,30 @@ def test_calib_instance(self): hazard = Hazard.from_hdf5(HAZ_TEST_TC) # get impact function from set - imp_func = ent.impact_funcs.get_func(hazard.haz_type, - ent.exposures.gdf['impf_TC'].median()) + imp_func = ent.impact_funcs.get_func( + hazard.haz_type, ent.exposures.gdf["impf_TC"].median() + ) # Assign centroids to exposures ent.exposures.assign_centroids(hazard) # create input frame - df_in = pd.DataFrame.from_dict({'v_threshold': [25.7], - 'other_param': [2], - 'hazard': [HAZ_TEST_TC]}) - df_in_yearly = pd.DataFrame.from_dict({'v_threshold': [25.7], - 'other_param': [2], - 'hazard': [HAZ_TEST_TC]}) + df_in = pd.DataFrame.from_dict( + {"v_threshold": [25.7], "other_param": [2], "hazard": [HAZ_TEST_TC]} + ) + df_in_yearly = pd.DataFrame.from_dict( + {"v_threshold": [25.7], "other_param": [2], "hazard": [HAZ_TEST_TC]} + ) # Compute the impact over the whole exposures df_out = calib_instance(hazard, ent.exposures, imp_func, df_in) - df_out_yearly = calib_instance(hazard, ent.exposures, imp_func, - df_in_yearly, - yearly_impact=True) + df_out_yearly = calib_instance( + hazard, ent.exposures, imp_func, df_in_yearly, yearly_impact=True + ) # calc Impact as comparison - impact = ImpactCalc(ent.exposures, ent.impact_funcs, hazard).impact(assign_centroids=False) + impact = ImpactCalc(ent.exposures, ent.impact_funcs, hazard).impact( + assign_centroids=False + ) IYS = impact.impact_per_year(all_years=True) # do the tests @@ -78,17 +82,16 @@ def test_calib_instance(self): self.assertTrue(isinstance(df_out_yearly, pd.DataFrame)) self.assertEqual(df_out.shape[0], hazard.event_id.size) self.assertEqual(df_out_yearly.shape[0], 161) - self.assertTrue(all(df_out['event_id'] == - hazard.event_id)) - self.assertTrue(all(df_out[df_in.columns[0]].isin( - df_in[df_in.columns[0]]))) - self.assertTrue(all(df_out_yearly[df_in.columns[1]].isin( - df_in[df_in.columns[1]]))) - self.assertTrue(all(df_out_yearly[df_in.columns[2]].isin( - df_in[df_in.columns[2]]))) - self.assertTrue(all(df_out['impact_CLIMADA'].values == - impact.at_event)) - self.assertTrue(all(df_out_yearly['impact_CLIMADA'].values == [*IYS.values()])) + self.assertTrue(all(df_out["event_id"] == hazard.event_id)) + self.assertTrue(all(df_out[df_in.columns[0]].isin(df_in[df_in.columns[0]]))) + self.assertTrue( + all(df_out_yearly[df_in.columns[1]].isin(df_in[df_in.columns[1]])) + ) + self.assertTrue( + all(df_out_yearly[df_in.columns[2]].isin(df_in[df_in.columns[2]])) + ) + self.assertTrue(all(df_out["impact_CLIMADA"].values == impact.at_event)) + self.assertTrue(all(df_out_yearly["impact_CLIMADA"].values == [*IYS.values()])) # Execute Tests diff --git a/climada/test/test_engine.py b/climada/test/test_engine.py index ab078b29f..ce9ee2445 100644 --- a/climada/test/test_engine.py +++ b/climada/test/test_engine.py @@ -19,27 +19,27 @@ """ -import unittest -import numpy as np import copy import time +import unittest + +import numpy as np import scipy as sp +from tables.exceptions import HDF5ExtError +from climada import CONFIG from climada.engine import impact_data as im_d -from climada.engine.unsequa import InputVar, CalcCostBenefit -from climada.entity.entity_def import Entity +from climada.engine.unsequa import CalcCostBenefit, InputVar from climada.entity import Exposures, ImpactFunc, ImpactFuncSet +from climada.entity.entity_def import Entity from climada.hazard import Hazard -from climada import CONFIG from climada.util.constants import ( + ENT_DEMO_FUTURE, + ENT_DEMO_TODAY, EXP_DEMO_H5, HAZ_DEMO_H5, - ENT_DEMO_TODAY, - ENT_DEMO_FUTURE, ) -from tables.exceptions import HDF5ExtError - DATA_DIR = CONFIG.engine.test_data.dir() EMDAT_TEST_CSV = DATA_DIR.joinpath("emdat_testdata_BGD_USA_1970-2017.csv") @@ -66,7 +66,7 @@ def exp_dem(x_exp=1, exp=None): except HDF5ExtError: time.sleep(0.1) exp_tmp = exp.copy(deep=True) - exp_tmp.gdf['value'] *= x_exp + exp_tmp.gdf["value"] *= x_exp return exp_tmp @@ -152,8 +152,8 @@ def test_emdat_damage_yearlysum(self): ) self.assertEqual(36, df.size) - self.assertAlmostEqual(df['impact'].max(), 15150000000.0) - self.assertAlmostEqual(df['impact_scaled'].min(), 10939000.0) + self.assertAlmostEqual(df["impact"].max(), 15150000000.0) + self.assertAlmostEqual(df["impact_scaled"].min(), 10939000.0) self.assertEqual(df["year"][5], 2017) self.assertEqual(df["reference_year"].max(), 2000) self.assertIn("USA", list(df["ISO"])) diff --git a/climada/test/test_hazard.py b/climada/test/test_hazard.py index 6ae8dbfb4..0be423bcb 100644 --- a/climada/test/test_hazard.py +++ b/climada/test/test_hazard.py @@ -19,10 +19,11 @@ Test Hazard base class. """ -import unittest -import numpy as np import datetime as dt +import unittest from pathlib import Path + +import numpy as np from scipy import sparse from climada import CONFIG @@ -30,44 +31,50 @@ from climada.hazard.base import Hazard from climada.hazard.centroids import Centroids from climada.hazard.storm_europe import StormEurope -from climada.util.constants import (HAZ_DEMO_FL, WS_DEMO_NC, DEF_CRS) -from climada.util.api_client import Client -from climada.util import coordinates as u_coord from climada.test import get_test_file +from climada.util import coordinates as u_coord +from climada.util.api_client import Client +from climada.util.constants import DEF_CRS, HAZ_DEMO_FL, WS_DEMO_NC DATA_DIR = CONFIG.test_data.dir() -HAZ_TEST_TC :Path = get_test_file('test_tc_florida', file_format='hdf5') +HAZ_TEST_TC: Path = get_test_file("test_tc_florida", file_format="hdf5") """ Hazard test file from Data API: Hurricanes from 1851 to 2011 over Florida with 100 centroids. Fraction is empty. Format: HDF5. """ + class TestCentroids(unittest.TestCase): """Test centroids functionalities""" def test_read_write_raster_pass(self): """Test write_raster: Hazard from raster data""" haz_fl = Hazard.from_raster([HAZ_DEMO_FL]) - haz_fl.haz_type = 'FL' + haz_fl.haz_type = "FL" self.assertEqual(haz_fl.intensity.shape, (1, 1032226)) self.assertEqual(haz_fl.intensity.min(), -9999.0) self.assertAlmostEqual(haz_fl.intensity.max(), 4.662774085998535) - haz_fl.write_raster(DATA_DIR.joinpath('test_write_hazard.tif'), variable='intensity') + haz_fl.write_raster( + DATA_DIR.joinpath("test_write_hazard.tif"), variable="intensity" + ) - haz_read = Hazard.from_raster([DATA_DIR.joinpath('test_write_hazard.tif')]) - haz_fl.haz_type = 'FL' - self.assertTrue(np.allclose(haz_fl.intensity.toarray(), haz_read.intensity.toarray())) + haz_read = Hazard.from_raster([DATA_DIR.joinpath("test_write_hazard.tif")]) + haz_fl.haz_type = "FL" + self.assertTrue( + np.allclose(haz_fl.intensity.toarray(), haz_read.intensity.toarray()) + ) self.assertEqual(np.unique(np.array(haz_fl.fraction.toarray())).size, 2) - DATA_DIR.joinpath('test_write_hazard.tif').unlink() + DATA_DIR.joinpath("test_write_hazard.tif").unlink() def test_read_raster_pool_pass(self): """Test from_raster constructor with pool""" from pathos.pools import ProcessPool as Pool + pool = Pool() - haz_fl = Hazard.from_raster([HAZ_DEMO_FL], haz_type='FL', pool=pool) + haz_fl = Hazard.from_raster([HAZ_DEMO_FL], haz_type="FL", pool=pool) haz_fl.check() self.assertEqual(haz_fl.intensity.shape, (1, 1032226)) @@ -79,118 +86,87 @@ def test_read_raster_pool_pass(self): def test_read_write_vector_pass(self): """Test write_raster: Rasterize intensity from vector data""" haz_fl = Hazard( - 'FL', + "FL", event_id=np.array([1]), date=np.array([1]), frequency=np.array([1]), orig=np.array([1]), - event_name=['1'], + event_name=["1"], intensity=sparse.csr_matrix(np.array([0.11, 0.22, 0.33, 0.31])), - fraction=sparse.csr_matrix(np.array([0, 1, 2, 3]) ), + fraction=sparse.csr_matrix(np.array([0, 1, 2, 3])), centroids=Centroids( lon=np.array([1, 2, 3, 3]), lat=np.array([1, 2, 3, 1]), crs=DEF_CRS - ) + ), ) - haz_fl.write_raster(DATA_DIR.joinpath('test_write_hazard.tif'), variable='intensity') + haz_fl.write_raster( + DATA_DIR.joinpath("test_write_hazard.tif"), variable="intensity" + ) - haz_read = Hazard.from_raster([DATA_DIR.joinpath('test_write_hazard.tif')], haz_type='FL') + haz_read = Hazard.from_raster( + [DATA_DIR.joinpath("test_write_hazard.tif")], haz_type="FL" + ) self.assertEqual(haz_read.intensity.shape, (1, 9)) - output_raster = np.array([ - [1, 3], [2, 3], [3, 3], - [1, 2], [2, 2], [3, 2], - [1, 1], [2, 1], [3, 1] - ]) - output_instensity = np.array([ - 0, 0, 0.33, - 0, 0.22, 0, - 0.11, 0, 0.31 - ]) - - np.testing.assert_array_equal( - haz_read.centroids.lon, - output_raster[:, 0] - ) - np.testing.assert_array_equal( - haz_read.centroids.lat, - output_raster[:, 1] + output_raster = np.array( + [[1, 3], [2, 3], [3, 3], [1, 2], [2, 2], [3, 2], [1, 1], [2, 1], [3, 1]] ) + output_instensity = np.array([0, 0, 0.33, 0, 0.22, 0, 0.11, 0, 0.31]) + + np.testing.assert_array_equal(haz_read.centroids.lon, output_raster[:, 0]) + np.testing.assert_array_equal(haz_read.centroids.lat, output_raster[:, 1]) np.testing.assert_array_almost_equal( - haz_read.intensity.toarray().flatten(), - output_instensity + haz_read.intensity.toarray().flatten(), output_instensity ) - DATA_DIR.joinpath('test_write_hazard.tif').unlink() + DATA_DIR.joinpath("test_write_hazard.tif").unlink() def test_read_write_vector_fraction_pass(self): """Test write_raster: Rasterize fraction from vector data""" haz_fl = Hazard( - 'FL', + "FL", event_id=np.array([1]), date=np.array([1]), frequency=np.array([1]), orig=np.array([1]), - event_name=['1'], + event_name=["1"], intensity=sparse.csr_matrix(np.array([-0.11, -0.22, -0.33, -0.31])), fraction=sparse.csr_matrix(np.array([0.11, 0.22, 0.33, 0.31])), centroids=Centroids( lon=np.array([1, 2, 3, 3]), lat=np.array([1, 2, 3, 1]), crs=DEF_CRS - ) + ), ) - intensity_file = DATA_DIR.joinpath('test_write_hazard_intensity.tif') - fraction_file = DATA_DIR.joinpath('test_write_hazard_fraction.tif') + intensity_file = DATA_DIR.joinpath("test_write_hazard_intensity.tif") + fraction_file = DATA_DIR.joinpath("test_write_hazard_fraction.tif") - haz_fl.write_raster(fraction_file, variable='fraction') - haz_fl.write_raster(intensity_file, variable='intensity') + haz_fl.write_raster(fraction_file, variable="fraction") + haz_fl.write_raster(intensity_file, variable="intensity") - haz_read = Hazard.from_raster( - [intensity_file], [fraction_file], haz_type='FL' - ) + haz_read = Hazard.from_raster([intensity_file], [fraction_file], haz_type="FL") self.assertEqual(haz_read.fraction.shape, (1, 9)) self.assertEqual(haz_read.intensity.shape, (1, 9)) - - output_raster = np.array([ - [1, 3], [2, 3], [3, 3], - [1, 2], [2, 2], [3, 2], - [1, 1], [2, 1], [3, 1] - ]) - output_fraction = np.array([ - 0, 0, 0.33, - 0, 0.22, 0, - 0.11, 0, 0.31 - ]) - - output_intensity = np.array([ - 0, 0, -0.33, - 0, -0.22, 0, - -0.11, 0, -0.31 - ]) - - np.testing.assert_array_equal( - haz_read.centroids.lon, - output_raster[:, 0] - ) - np.testing.assert_array_equal( - haz_read.centroids.lat, - output_raster[:, 1] + output_raster = np.array( + [[1, 3], [2, 3], [3, 3], [1, 2], [2, 2], [3, 2], [1, 1], [2, 1], [3, 1]] ) + output_fraction = np.array([0, 0, 0.33, 0, 0.22, 0, 0.11, 0, 0.31]) + + output_intensity = np.array([0, 0, -0.33, 0, -0.22, 0, -0.11, 0, -0.31]) + + np.testing.assert_array_equal(haz_read.centroids.lon, output_raster[:, 0]) + np.testing.assert_array_equal(haz_read.centroids.lat, output_raster[:, 1]) np.testing.assert_array_almost_equal( - haz_read.fraction.toarray().flatten(), - output_fraction + haz_read.fraction.toarray().flatten(), output_fraction ) np.testing.assert_array_almost_equal( - haz_read.intensity.toarray().flatten(), - output_intensity + haz_read.intensity.toarray().flatten(), output_intensity ) DATA_DIR.joinpath(intensity_file).unlink() DATA_DIR.joinpath(fraction_file).unlink() - class TestStormEurope(unittest.TestCase): """Test methods to create StormEurope object""" @@ -215,9 +191,7 @@ def _test_first(haz): self.assertEqual(haz.frequency[0], 1.0) # Load first entry - storms = StormEurope.from_footprints( - WS_DEMO_NC[0] - ) + storms = StormEurope.from_footprints(WS_DEMO_NC[0]) _test_first(storms) # Omit the second file, should be the same result @@ -299,8 +273,8 @@ def test_ibtracs_with_basin(self): year_range=(1995, 1995), basin="SP", estimate_missing=True ) self.assertEqual(tc_track.size, 6) - self.assertEqual(tc_track.data[0]['basin'][0], "SP") - self.assertEqual(tc_track.data[5]['basin'][0], "SI") + self.assertEqual(tc_track.data[0]["basin"][0], "SP") + self.assertEqual(tc_track.data[5]["basin"][0], "SI") # genesis in NI tc_track = tc.TCTracks.from_ibtracs_netcdf( @@ -308,7 +282,7 @@ def test_ibtracs_with_basin(self): ) self.assertEqual(tc_track.size, 5) for tr in tc_track.data: - self.assertEqual(tr['basin'][0], "NI") + self.assertEqual(tr["basin"][0], "NI") # genesis in EP, but crosses WP at some point tc_track = tc.TCTracks.from_ibtracs_netcdf( @@ -316,8 +290,8 @@ def test_ibtracs_with_basin(self): ) self.assertEqual(tc_track.size, 3) for tr in tc_track.data: - self.assertEqual(tr['basin'][0], "EP") - self.assertIn("WP", tr['basin']) + self.assertEqual(tr["basin"][0], "EP") + self.assertIn("WP", tr["basin"]) def test_cutoff_tracks(self): tc_track = tc.TCTracks.from_ibtracs_netcdf(storm_id="1986226N30276") diff --git a/climada/test/test_litpop_integr.py b/climada/test/test_litpop_integr.py index 3a963d0df..0390a4538 100644 --- a/climada/test/test_litpop_integr.py +++ b/climada/test/test_litpop_integr.py @@ -18,207 +18,280 @@ Tests on LitPop exposures. """ + import unittest + import numpy as np from shapely.geometry import Polygon -from climada.entity.exposures.litpop import litpop as lp -from climada.entity.exposures.litpop import gpw_population -from climada.util.finance import world_bank_wealth_account, gdp, income_group import climada.util.coordinates as u_coord -from climada.util.constants import SYSTEM_DIR from climada import CONFIG +from climada.entity.exposures.litpop import gpw_population +from climada.entity.exposures.litpop import litpop as lp +from climada.util.constants import SYSTEM_DIR +from climada.util.finance import gdp, income_group, world_bank_wealth_account + +bounds = (8.41, 47.2, 8.70, 47.45) # (min_lon, max_lon, min_lat, max_lat) +shape = Polygon( + [ + (bounds[0], bounds[3]), + (bounds[2], bounds[3]), + (bounds[2], bounds[1]), + (bounds[0], bounds[1]), + ] +) -bounds = (8.41, 47.2, 8.70, 47.45) # (min_lon, max_lon, min_lat, max_lat) -shape = Polygon([ - (bounds[0], bounds[3]), - (bounds[2], bounds[3]), - (bounds[2], bounds[1]), - (bounds[0], bounds[1]) - ]) class TestLitPopExposure(unittest.TestCase): """Test LitPop exposure data model:""" def test_netherlands150_pass(self): """Test from_countries for Netherlands at 150 arcsec, first shape is empty""" - ent = lp.LitPop.from_countries('Netherlands', res_arcsec=150, reference_year=2016) + ent = lp.LitPop.from_countries( + "Netherlands", res_arcsec=150, reference_year=2016 + ) self.assertEqual(ent.gdf.shape[0], 2829) def test_BLM150_pass(self): """Test from_countries for BLM at 150 arcsec, 2 data points""" - ent = lp.LitPop.from_countries('BLM', res_arcsec=150, reference_year=2016) + ent = lp.LitPop.from_countries("BLM", res_arcsec=150, reference_year=2016) self.assertEqual(ent.gdf.shape[0], 2) def test_Monaco150_pass(self): """Test from_countries for Moncao at 150 arcsec, 1 data point""" - ent = lp.LitPop.from_countries('Monaco', res_arcsec=150, reference_year=2016) + ent = lp.LitPop.from_countries("Monaco", res_arcsec=150, reference_year=2016) self.assertEqual(ent.gdf.shape[0], 1) def test_switzerland300_pass(self): """Create LitPop entity for Switzerland on 300 arcsec:""" - country_name = ['CHE'] + country_name = ["CHE"] resolution = 300 - fin_mode = 'income_group' - with self.assertLogs('climada.entity.exposures.litpop', level='INFO') as cm: - ent = lp.LitPop.from_countries(country_name, res_arcsec=resolution, fin_mode=fin_mode, - reference_year=2016) - - self.assertIn('LitPop: Init Exposure for country: CHE', cm.output[0]) - self.assertEqual(ent.gdf['region_id'].min(), 756) - self.assertEqual(ent.gdf['region_id'].max(), 756) + fin_mode = "income_group" + with self.assertLogs("climada.entity.exposures.litpop", level="INFO") as cm: + ent = lp.LitPop.from_countries( + country_name, + res_arcsec=resolution, + fin_mode=fin_mode, + reference_year=2016, + ) + + self.assertIn("LitPop: Init Exposure for country: CHE", cm.output[0]) + self.assertEqual(ent.gdf["region_id"].min(), 756) + self.assertEqual(ent.gdf["region_id"].max(), 756) # confirm that the total value is equal to GDP * (income_group+1): - self.assertAlmostEqual(ent.gdf['value'].sum()/gdp('CHE', 2016)[1], - (income_group('CHE', 2016)[1] + 1)) - self.assertIn("LitPop Exposure for ['CHE'] at 300 as, year: 2016", ent.description) - self.assertIn('income_group', ent.description) - self.assertIn('1, 1', ent.description) - self.assertTrue(u_coord.equal_crs(ent.crs, 'epsg:4326')) - self.assertEqual(ent.meta['width'], 54) - self.assertEqual(ent.meta['height'], 23) - self.assertTrue(u_coord.equal_crs(ent.meta['crs'], 'epsg:4326')) - self.assertAlmostEqual(ent.meta['transform'][0], 0.08333333333333333) - self.assertAlmostEqual(ent.meta['transform'][1], 0) - self.assertAlmostEqual(ent.meta['transform'][2], 5.9166666666666) - self.assertAlmostEqual(ent.meta['transform'][3], 0) - self.assertAlmostEqual(ent.meta['transform'][4], -0.08333333333333333) - self.assertAlmostEqual(ent.meta['transform'][5], 47.75) + self.assertAlmostEqual( + ent.gdf["value"].sum() / gdp("CHE", 2016)[1], + (income_group("CHE", 2016)[1] + 1), + ) + self.assertIn( + "LitPop Exposure for ['CHE'] at 300 as, year: 2016", ent.description + ) + self.assertIn("income_group", ent.description) + self.assertIn("1, 1", ent.description) + self.assertTrue(u_coord.equal_crs(ent.crs, "epsg:4326")) + self.assertEqual(ent.meta["width"], 54) + self.assertEqual(ent.meta["height"], 23) + self.assertTrue(u_coord.equal_crs(ent.meta["crs"], "epsg:4326")) + self.assertAlmostEqual(ent.meta["transform"][0], 0.08333333333333333) + self.assertAlmostEqual(ent.meta["transform"][1], 0) + self.assertAlmostEqual(ent.meta["transform"][2], 5.9166666666666) + self.assertAlmostEqual(ent.meta["transform"][3], 0) + self.assertAlmostEqual(ent.meta["transform"][4], -0.08333333333333333) + self.assertAlmostEqual(ent.meta["transform"][5], 47.75) def test_switzerland30normPop_pass(self): """Create LitPop entity for Switzerland on 30 arcsec:""" - country_name = ['CHE'] + country_name = ["CHE"] resolution = 30 exp = [0, 1] - fin_mode = 'norm' - with self.assertLogs('climada.entity.exposures.litpop', level='INFO') as cm: - ent = lp.LitPop.from_countries(country_name, res_arcsec=resolution, exponents=exp, - fin_mode=fin_mode, reference_year=2015) + fin_mode = "norm" + with self.assertLogs("climada.entity.exposures.litpop", level="INFO") as cm: + ent = lp.LitPop.from_countries( + country_name, + res_arcsec=resolution, + exponents=exp, + fin_mode=fin_mode, + reference_year=2015, + ) # print(cm) - self.assertIn('LitPop: Init Exposure for country: CHE', cm.output[0]) - self.assertEqual(ent.gdf['region_id'].min(), 756) - self.assertEqual(ent.gdf['region_id'].max(), 756) - self.assertEqual(ent.gdf['value'].sum(), 1.0) + self.assertIn("LitPop: Init Exposure for country: CHE", cm.output[0]) + self.assertEqual(ent.gdf["region_id"].min(), 756) + self.assertEqual(ent.gdf["region_id"].max(), 756) + self.assertEqual(ent.gdf["value"].sum(), 1.0) self.assertEqual(ent.ref_year, 2015) def test_suriname30_nfw_pass(self): """Create LitPop entity for Suriname for non-finanical wealth in 2016:""" - country_name = ['SUR'] - fin_mode = 'nfw' - ent = lp.LitPop.from_countries(country_name, reference_year=2016, fin_mode=fin_mode) - - self.assertEqual(ent.gdf['region_id'].min(), 740) - self.assertEqual(ent.gdf['region_id'].max(), 740) + country_name = ["SUR"] + fin_mode = "nfw" + ent = lp.LitPop.from_countries( + country_name, reference_year=2016, fin_mode=fin_mode + ) + + self.assertEqual(ent.gdf["region_id"].min(), 740) + self.assertEqual(ent.gdf["region_id"].max(), 740) self.assertEqual(ent.ref_year, 2016) def test_switzerland300_admin1_pc2016_pass(self): """Create LitPop entity for Switzerland 2016 with admin1 and produced capital:""" - country_name = ['CHE'] - fin_mode = 'pc' + country_name = ["CHE"] + fin_mode = "pc" resolution = 300 ref_year = 2016 adm1 = True - comparison_total_val = world_bank_wealth_account(country_name[0], ref_year, no_land=1)[1] - ent = lp.LitPop.from_countries(country_name, res_arcsec=resolution, - reference_year=ref_year, fin_mode=fin_mode, - admin1_calc=adm1) - - self.assertAlmostEqual(np.around(ent.gdf['value'].sum()*1e-9, 0), - np.around(comparison_total_val*1e-9, 0), places=0) - self.assertEqual(ent.value_unit, 'USD') + comparison_total_val = world_bank_wealth_account( + country_name[0], ref_year, no_land=1 + )[1] + ent = lp.LitPop.from_countries( + country_name, + res_arcsec=resolution, + reference_year=ref_year, + fin_mode=fin_mode, + admin1_calc=adm1, + ) + + self.assertAlmostEqual( + np.around(ent.gdf["value"].sum() * 1e-9, 0), + np.around(comparison_total_val * 1e-9, 0), + places=0, + ) + self.assertEqual(ent.value_unit, "USD") def test_from_shape_zurich_pass(self): """test initiating LitPop for custom shape (square around Zurich City) Distributing an imaginary total value of 1000 USD""" - total_value=1000 - ent = lp.LitPop.from_shape(shape, total_value, res_arcsec=30, reference_year=2016) - self.assertEqual(ent.gdf['value'].sum(), 1000.0) - self.assertEqual(ent.gdf['value'].min(), 0.0) - self.assertEqual(ent.gdf['region_id'].min(), 756) - self.assertEqual(ent.gdf['region_id'].max(), 756) - self.assertAlmostEqual(ent.gdf['latitude'].min(), 47.20416666666661) + total_value = 1000 + ent = lp.LitPop.from_shape( + shape, total_value, res_arcsec=30, reference_year=2016 + ) + self.assertEqual(ent.gdf["value"].sum(), 1000.0) + self.assertEqual(ent.gdf["value"].min(), 0.0) + self.assertEqual(ent.gdf["region_id"].min(), 756) + self.assertEqual(ent.gdf["region_id"].max(), 756) + self.assertAlmostEqual(ent.gdf["latitude"].min(), 47.20416666666661) # index and coord. of largest value: - self.assertEqual(ent.gdf.loc[ent.gdf['value'] == ent.gdf['value'].max()].index[0], 482) - self.assertAlmostEqual(ent.gdf.loc[ent.gdf['value'] == ent.gdf['value'].max()]['latitude'].values[0], 47.34583333333325) - self.assertAlmostEqual(ent.gdf.loc[ent.gdf['value'] == ent.gdf['value'].max()]['longitude'].values[0], 8.529166666666658) + self.assertEqual( + ent.gdf.loc[ent.gdf["value"] == ent.gdf["value"].max()].index[0], 482 + ) + self.assertAlmostEqual( + ent.gdf.loc[ent.gdf["value"] == ent.gdf["value"].max()]["latitude"].values[ + 0 + ], + 47.34583333333325, + ) + self.assertAlmostEqual( + ent.gdf.loc[ent.gdf["value"] == ent.gdf["value"].max()]["longitude"].values[ + 0 + ], + 8.529166666666658, + ) def test_from_shape_and_countries_zurich_pass(self): """test initiating LitPop for custom shape (square around Zurich City) with from_shape_and_countries()""" ent = lp.LitPop.from_shape_and_countries( - shape, 'Switzerland', res_arcsec=30, reference_year=2016) - self.assertEqual(ent.gdf['value'].min(), 0.0) - self.assertEqual(ent.gdf['region_id'].min(), 756) - self.assertEqual(ent.gdf['region_id'].max(), 756) - self.assertAlmostEqual(ent.gdf['latitude'].min(), 47.20416666666661) + shape, "Switzerland", res_arcsec=30, reference_year=2016 + ) + self.assertEqual(ent.gdf["value"].min(), 0.0) + self.assertEqual(ent.gdf["region_id"].min(), 756) + self.assertEqual(ent.gdf["region_id"].max(), 756) + self.assertAlmostEqual(ent.gdf["latitude"].min(), 47.20416666666661) # coord of largest value: - self.assertEqual(ent.gdf.loc[ent.gdf['value'] == ent.gdf['value'].max()].index[0], 434) - self.assertAlmostEqual(ent.gdf.loc[ent.gdf['value'] == ent.gdf['value'].max()]['latitude'].values[0], 47.34583333333325) - self.assertAlmostEqual(ent.gdf.loc[ent.gdf['value'] == ent.gdf['value'].max()]['longitude'].values[0], 8.529166666666658) + self.assertEqual( + ent.gdf.loc[ent.gdf["value"] == ent.gdf["value"].max()].index[0], 434 + ) + self.assertAlmostEqual( + ent.gdf.loc[ent.gdf["value"] == ent.gdf["value"].max()]["latitude"].values[ + 0 + ], + 47.34583333333325, + ) + self.assertAlmostEqual( + ent.gdf.loc[ent.gdf["value"] == ent.gdf["value"].max()]["longitude"].values[ + 0 + ], + 8.529166666666658, + ) def test_Liechtenstein_15_lit_pass(self): """Create Nightlights entity for Liechtenstein 2016:""" - country_name = 'Liechtenstein' + country_name = "Liechtenstein" ref_year = 2016 ent = lp.LitPop.from_nightlight_intensity(country_name, reference_year=ref_year) - self.assertEqual(ent.gdf['value'].sum(), 36469.0) - self.assertEqual(ent.gdf['region_id'][1], 438) - self.assertEqual(ent.value_unit, '') - self.assertAlmostEqual(ent.gdf['latitude'].max(), 47.260416666666664) - self.assertAlmostEqual(ent.meta['transform'][4], -15/3600) + self.assertEqual(ent.gdf["value"].sum(), 36469.0) + self.assertEqual(ent.gdf["region_id"][1], 438) + self.assertEqual(ent.value_unit, "") + self.assertAlmostEqual(ent.gdf["latitude"].max(), 47.260416666666664) + self.assertAlmostEqual(ent.meta["transform"][4], -15 / 3600) def test_Liechtenstein_30_pop_pass(self): """Create population count entity for Liechtenstein 2015:""" - country_name = 'Liechtenstein' + country_name = "Liechtenstein" ref_year = 2015 ent = lp.LitPop.from_population(country_name, reference_year=ref_year) - self.assertEqual(ent.gdf['value'].sum(), 30068.970703125) - self.assertEqual(ent.gdf['region_id'][1], 438) - self.assertEqual(ent.value_unit, 'people') - self.assertAlmostEqual(ent.gdf['latitude'].max(), 47.2541666666666) - self.assertAlmostEqual(ent.meta['transform'][0], 30/3600) + self.assertEqual(ent.gdf["value"].sum(), 30068.970703125) + self.assertEqual(ent.gdf["region_id"][1], 438) + self.assertEqual(ent.value_unit, "people") + self.assertAlmostEqual(ent.gdf["latitude"].max(), 47.2541666666666) + self.assertAlmostEqual(ent.meta["transform"][0], 30 / 3600) def test_from_nightlight_intensity(self): - """ Test raises, logger and if methods from_countries and from_shape are - are used.""" + """Test raises, logger and if methods from_countries and from_shape are + are used.""" with self.assertRaises(ValueError) as cm: lp.LitPop.from_nightlight_intensity() - self.assertEqual('Either `countries` or `shape` required. Aborting.', str(cm.exception)) + self.assertEqual( + "Either `countries` or `shape` required. Aborting.", str(cm.exception) + ) with self.assertRaises(ValueError) as cm: - lp.LitPop.from_nightlight_intensity(countries = 'Liechtenstein', shape = shape) - self.assertEqual('Not allowed to set both `countries` and `shape`. Aborting.', str(cm.exception)) - - exp = lp.LitPop.from_nightlight_intensity(countries = 'Liechtenstein') - self.assertEqual(exp.fin_mode, 'none') - - exp = lp.LitPop.from_nightlight_intensity(shape = shape) - self.assertEqual(exp.value_unit, '') - - with self.assertLogs('climada.entity.exposures.litpop.litpop', level = 'WARNING') as cm: - lp.LitPop.from_nightlight_intensity(shape = shape) - self.assertIn('Note: set_nightlight_intensity sets values to raw nightlight intensity,', cm.output[0]) + lp.LitPop.from_nightlight_intensity(countries="Liechtenstein", shape=shape) + self.assertEqual( + "Not allowed to set both `countries` and `shape`. Aborting.", + str(cm.exception), + ) + + exp = lp.LitPop.from_nightlight_intensity(countries="Liechtenstein") + self.assertEqual(exp.fin_mode, "none") + + exp = lp.LitPop.from_nightlight_intensity(shape=shape) + self.assertEqual(exp.value_unit, "") + + with self.assertLogs( + "climada.entity.exposures.litpop.litpop", level="WARNING" + ) as cm: + lp.LitPop.from_nightlight_intensity(shape=shape) + self.assertIn( + "Note: set_nightlight_intensity sets values to raw nightlight intensity,", + cm.output[0], + ) def test_from_population(self): - """ Test raises, logger and if methods from_countries and from_shape are - are used.""" + """Test raises, logger and if methods from_countries and from_shape are + are used.""" with self.assertRaises(ValueError) as cm: lp.LitPop.from_population() - self.assertEqual('Either `countries` or `shape` required. Aborting.', str(cm.exception)) + self.assertEqual( + "Either `countries` or `shape` required. Aborting.", str(cm.exception) + ) - exp = lp.LitPop.from_population(countries = 'Liechtenstein') - self.assertEqual(exp.fin_mode, 'pop') + exp = lp.LitPop.from_population(countries="Liechtenstein") + self.assertEqual(exp.fin_mode, "pop") - exp = lp.LitPop.from_population(shape = shape) - self.assertEqual(exp.value_unit, 'people') + exp = lp.LitPop.from_population(shape=shape) + self.assertEqual(exp.value_unit, "people") with self.assertRaises(ValueError) as cm: - lp.LitPop.from_population(countries = 'Liechtenstein', shape = shape) - self.assertEqual('Not allowed to set both `countries` and `shape`. Aborting.', str(cm.exception)) + lp.LitPop.from_population(countries="Liechtenstein", shape=shape) + self.assertEqual( + "Not allowed to set both `countries` and `shape`. Aborting.", + str(cm.exception), + ) class TestAdmin1(unittest.TestCase): @@ -228,12 +301,22 @@ def test_from_countries_calc_admin1_pass(self): """test method from_countries with admin1_calc=True for Switzerland""" country = "Switzerland" resolution = 90 - fin_mode = 'gdp' - - ent = lp.LitPop.from_countries(country, res_arcsec=resolution, fin_mode=fin_mode, - reference_year=2016, admin1_calc=True) - ent_adm0 = lp.LitPop.from_countries(country, res_arcsec=resolution, fin_mode=fin_mode, - reference_year=2016, admin1_calc=False) + fin_mode = "gdp" + + ent = lp.LitPop.from_countries( + country, + res_arcsec=resolution, + fin_mode=fin_mode, + reference_year=2016, + admin1_calc=True, + ) + ent_adm0 = lp.LitPop.from_countries( + country, + res_arcsec=resolution, + fin_mode=fin_mode, + reference_year=2016, + admin1_calc=False, + ) # shape must be same as with admin1_calc = False, otherwise there # is a problem with handling of the admin1 shapes: self.assertEqual(ent.gdf.shape[0], 7800) @@ -242,41 +325,52 @@ def test_from_countries_calc_admin1_pass(self): def test_calc_admin1(self): """test function _calc_admin1_one_country for Switzerland.""" resolution = 300 - country = 'CHE' - ent = lp._calc_admin1_one_country(country, resolution, (2,1), 'pc', None, - 2016, lp.GPW_VERSION, SYSTEM_DIR) + country = "CHE" + ent = lp._calc_admin1_one_country( + country, resolution, (2, 1), "pc", None, 2016, lp.GPW_VERSION, SYSTEM_DIR + ) self.assertEqual(ent.gdf.shape[0], 699) - self.assertEqual(ent.gdf['region_id'][88], 756) - self.assertAlmostEqual(ent.gdf['latitude'].max(), 47.708333333333336) + self.assertEqual(ent.gdf["region_id"][88], 756) + self.assertAlmostEqual(ent.gdf["latitude"].max(), 47.708333333333336) # shape must be same as with admin1_calc = False, otherwise there # is a problem with handling of the admin1 shapes: - ent_adm0 = lp.LitPop.from_countries(country, res_arcsec=resolution, fin_mode='pc', - reference_year=2016, admin1_calc=False) + ent_adm0 = lp.LitPop.from_countries( + country, + res_arcsec=resolution, + fin_mode="pc", + reference_year=2016, + admin1_calc=False, + ) self.assertEqual(ent.gdf.shape[0], ent_adm0.gdf.shape[0]) def test_brandenburg(self): """test functions from_shape_and_countries and from_shape for admin1 shape of Brandenburg""" reslution_arcsec = 120 - country = 'DEU' - state_name = 'Brandenburg' + country = "DEU" + state_name = "Brandenburg" # get the shape of Brandenburg: admin1_info, admin1_shapes = u_coord.get_admin1_info(country) admin1_info = admin1_info[country] admin1_shapes = admin1_shapes[country] - admin1_names = [record['name'] for record in admin1_info] + admin1_names = [record["name"] for record in admin1_info] print(admin1_names) for idx, name in enumerate(admin1_names): - if admin1_names[idx]==state_name: + if admin1_names[idx] == state_name: break # init LitPop for Brandenburg exp_bra2 = lp.LitPop.from_shape_and_countries( - admin1_shapes[idx], country, res_arcsec=reslution_arcsec, reference_year=2016) + admin1_shapes[idx], + country, + res_arcsec=reslution_arcsec, + reference_year=2016, + ) exp_bra = lp.LitPop.from_shape( - admin1_shapes[idx], 1000, res_arcsec=reslution_arcsec, reference_year=2016) - self.assertAlmostEqual(exp_bra.gdf['value'].sum(), 1000) + admin1_shapes[idx], 1000, res_arcsec=reslution_arcsec, reference_year=2016 + ) + self.assertAlmostEqual(exp_bra.gdf["value"].sum(), 1000) # compare number of data points: self.assertEqual(exp_bra.gdf.shape[0], exp_bra2.gdf.shape[0]) self.assertEqual(exp_bra.gdf.shape[0], 3566) @@ -284,6 +378,7 @@ def test_brandenburg(self): self.assertEqual(len(exp_bra.gdf.geometry.unique()), len(exp_bra.gdf.geometry)) self.assertEqual(len(exp_bra.gdf.geometry.unique()), 3566) + class TestGPWPopulation(unittest.TestCase): """Test gpw_population submodule""" @@ -292,28 +387,30 @@ def test_get_gpw_file_path_pass(self): gpw_version = CONFIG.exposures.litpop.gpw_population.gpw_version.int() try: path = gpw_population.get_gpw_file_path(gpw_version, 2020, verbose=False) - self.assertIn('gpw_v4_population', str(path)) + self.assertIn("gpw_v4_population", str(path)) except FileExistsError as err: - self.assertIn('lease download', err.args[0]) - self.skipTest('GPW input data for GPW v4.%i not found.' %(gpw_version)) + self.assertIn("lease download", err.args[0]) + self.skipTest("GPW input data for GPW v4.%i not found." % (gpw_version)) def test_load_gpw_pop_shape_pass(self): """test method gpw_population.load_gpw_pop_shape""" gpw_version = CONFIG.exposures.litpop.gpw_population.gpw_version.int() try: - data, meta, glb_transform = \ - gpw_population.load_gpw_pop_shape(shape, 2020, gpw_version, verbose=False) + data, meta, glb_transform = gpw_population.load_gpw_pop_shape( + shape, 2020, gpw_version, verbose=False + ) self.assertEqual(data.shape, (31, 36)) - self.assertAlmostEqual(meta['transform'][0], 0.00833333333333333) - self.assertAlmostEqual(meta['transform'][0], glb_transform[0]) - self.assertEqual(meta['driver'], 'GTiff') - self.assertEqual(meta['height'], data.shape[0]) - self.assertEqual(meta['width'], data.shape[1]) + self.assertAlmostEqual(meta["transform"][0], 0.00833333333333333) + self.assertAlmostEqual(meta["transform"][0], glb_transform[0]) + self.assertEqual(meta["driver"], "GTiff") + self.assertEqual(meta["height"], data.shape[0]) + self.assertEqual(meta["width"], data.shape[1]) self.assertIsInstance(data, np.ndarray) self.assertEqual(len(data.shape), 2) except FileExistsError as err: - self.assertIn('lease download', err.args[0]) - self.skipTest('GPW input data for GPW v4.%i not found.' %(gpw_version)) + self.assertIn("lease download", err.args[0]) + self.skipTest("GPW input data for GPW v4.%i not found." % (gpw_version)) + # Execute Tests if __name__ == "__main__": diff --git a/climada/test/test_nightlight.py b/climada/test/test_nightlight.py index fff3cc633..cb463fcd7 100644 --- a/climada/test/test_nightlight.py +++ b/climada/test/test_nightlight.py @@ -21,85 +21,113 @@ import gzip import io -from pathlib import Path import tarfile -from tempfile import TemporaryDirectory import unittest +from pathlib import Path +from tempfile import TemporaryDirectory import affine import numpy as np import scipy.sparse as sparse +from osgeo import gdal from PIL import Image from shapely.geometry import Polygon -from osgeo import gdal from climada.entity.exposures.litpop import nightlight -from climada.util.constants import (SYSTEM_DIR, CONFIG) -from climada.util import (files_handler, ureg) +from climada.util import files_handler, ureg +from climada.util.constants import CONFIG, SYSTEM_DIR BM_FILENAMES = nightlight.BM_FILENAMES NOAA_RESOLUTION_DEG = (30 * ureg.arc_second).to(ureg.deg).magnitude + def init_test_shape(): """provide a rectangular shape""" bounds = (14.18, 35.78, 14.58, 36.09) # (min_lon, max_lon, min_lat, max_lat) - return bounds, Polygon([ - (bounds[0], bounds[3]), - (bounds[2], bounds[3]), - (bounds[2], bounds[1]), - (bounds[0], bounds[1]) - ]) + return bounds, Polygon( + [ + (bounds[0], bounds[3]), + (bounds[2], bounds[3]), + (bounds[2], bounds[1]), + (bounds[0], bounds[1]), + ] + ) + class TestNightlight(unittest.TestCase): """Test litpop.nightlight""" def test_load_nasa_nl_shape_single_tile(self): - """ Test that the function returns a np.ndarray containing - the cropped .tif image values. Test that - just one layer is returned. """ + """Test that the function returns a np.ndarray containing + the cropped .tif image values. Test that + just one layer is returned.""" # Initialization - path = Path(SYSTEM_DIR, 'BlackMarble_2016_C1_geo_gray.tif') + path = Path(SYSTEM_DIR, "BlackMarble_2016_C1_geo_gray.tif") _, shape = init_test_shape() # Test cropped output - out_image, meta = nightlight.load_nasa_nl_shape_single_tile(geometry = shape, path = path) + out_image, meta = nightlight.load_nasa_nl_shape_single_tile( + geometry=shape, path=path + ) self.assertIsInstance(out_image, np.ndarray) self.assertEqual(len(out_image.shape), 2) # Test meta ouput - self.assertEqual(meta['height'],out_image.shape[0]) - self.assertEqual(meta['width'],out_image.shape[1]) - self.assertEqual(meta['driver'], 'GTiff') - self.assertEqual(meta['transform'], affine.Affine(0.004166666666666667, 0.0, - 14.179166666666667, 0.0, -0.004166666666666667, 36.09166666666667)) + self.assertEqual(meta["height"], out_image.shape[0]) + self.assertEqual(meta["width"], out_image.shape[1]) + self.assertEqual(meta["driver"], "GTiff") + self.assertEqual( + meta["transform"], + affine.Affine( + 0.004166666666666667, + 0.0, + 14.179166666666667, + 0.0, + -0.004166666666666667, + 36.09166666666667, + ), + ) # Test raises with self.assertRaises(IndexError) as cm: - nightlight.load_nasa_nl_shape_single_tile(geometry = shape, path = path, layer = 4) - self.assertEqual("BlackMarble_2016_C1_geo_gray.tif has only 3 layers," - " layer 4 can't be accessed.", str(cm.exception)) + nightlight.load_nasa_nl_shape_single_tile( + geometry=shape, path=path, layer=4 + ) + self.assertEqual( + "BlackMarble_2016_C1_geo_gray.tif has only 3 layers," + " layer 4 can't be accessed.", + str(cm.exception), + ) # Test logger - with self.assertLogs('climada.entity.exposures.litpop.nightlight', level='DEBUG') as cm: - nightlight.load_nasa_nl_shape_single_tile(geometry = shape, path = path) - self.assertIn('Read cropped BlackMarble_2016_C1_geo_gray.tif as np.ndarray.', cm.output[0]) + with self.assertLogs( + "climada.entity.exposures.litpop.nightlight", level="DEBUG" + ) as cm: + nightlight.load_nasa_nl_shape_single_tile(geometry=shape, path=path) + self.assertIn( + "Read cropped BlackMarble_2016_C1_geo_gray.tif as np.ndarray.", cm.output[0] + ) def test_read_bm_files(self): - """" Test that read_bm_files function read NASA BlackMarble GeoTiff and output - an array and a gdal DataSet.""" + """ " Test that read_bm_files function read NASA BlackMarble GeoTiff and output + an array and a gdal DataSet.""" # Download 'BlackMarble_2016_A1_geo_gray.tif' in the temporary directory and create a path temp_dir = TemporaryDirectory() urls = CONFIG.exposures.litpop.nightlights.nasa_sites.list() - url = str(urls[0]) + 'BlackMarble_2016_A1_geo_gray.tif' - files_handler.download_file(url = url, download_dir = temp_dir.name) - filename = 'BlackMarble_2016_A1_geo_gray.tif' + url = str(urls[0]) + "BlackMarble_2016_A1_geo_gray.tif" + files_handler.download_file(url=url, download_dir=temp_dir.name) + filename = "BlackMarble_2016_A1_geo_gray.tif" # Test logger - with self.assertLogs('climada.entity.exposures.litpop.nightlight', level='DEBUG') as cm: - arr1, curr_file = nightlight.read_bm_file(bm_path=temp_dir.name, filename=filename) - self.assertIn('Importing' + temp_dir.name, cm.output[0]) + with self.assertLogs( + "climada.entity.exposures.litpop.nightlight", level="DEBUG" + ) as cm: + arr1, curr_file = nightlight.read_bm_file( + bm_path=temp_dir.name, filename=filename + ) + self.assertIn("Importing" + temp_dir.name, cm.output[0]) # Check outputs are a np.array and a gdal DataSet and band 1 is selected self.assertIsInstance(arr1, np.ndarray) @@ -111,64 +139,94 @@ def test_read_bm_files(self): # Check that the right exception is raised with self.assertRaises(FileNotFoundError) as cm: - nightlight.read_bm_file(bm_path='/Wrong/path/file.tif', filename='file.tif') - self.assertEqual('Invalid path: check that the path to BlackMarble file is correct.', - str(cm.exception)) + nightlight.read_bm_file(bm_path="/Wrong/path/file.tif", filename="file.tif") + self.assertEqual( + "Invalid path: check that the path to BlackMarble file is correct.", + str(cm.exception), + ) temp_dir.cleanup() def test_download_nl_files(self): - """ Test that BlackMarble GeoTiff files are downloaded. """ + """Test that BlackMarble GeoTiff files are downloaded.""" # Test Raises temp_dir = TemporaryDirectory() with self.assertRaises(ValueError) as cm: - nightlight.download_nl_files(req_files=np.ones(5), - files_exist=np.zeros(4), - dwnl_path=temp_dir.name) - self.assertEqual('The given arguments are invalid. req_files and ' - 'files_exist must both be as long as there are files to download ' - '(8).', str(cm.exception)) + nightlight.download_nl_files( + req_files=np.ones(5), files_exist=np.zeros(4), dwnl_path=temp_dir.name + ) + self.assertEqual( + "The given arguments are invalid. req_files and " + "files_exist must both be as long as there are files to download " + "(8).", + str(cm.exception), + ) with self.assertRaises(ValueError) as cm: - nightlight.download_nl_files(dwnl_path='not a folder') - self.assertEqual('The folder not a folder does not exist. Operation aborted.', - str(cm.exception)) + nightlight.download_nl_files(dwnl_path="not a folder") + self.assertEqual( + "The folder not a folder does not exist. Operation aborted.", + str(cm.exception), + ) # Test logger - with self.assertLogs('climada.entity.exposures.litpop.nightlight', level='DEBUG') as cm: - dwl_path = nightlight.download_nl_files(req_files=np.ones(len(BM_FILENAMES),), - files_exist=np.ones(len(BM_FILENAMES),), - dwnl_path=temp_dir.name, year=2016) - self.assertIn('All required files already exist. No downloads necessary.', cm.output[0]) + with self.assertLogs( + "climada.entity.exposures.litpop.nightlight", level="DEBUG" + ) as cm: + dwl_path = nightlight.download_nl_files( + req_files=np.ones( + len(BM_FILENAMES), + ), + files_exist=np.ones( + len(BM_FILENAMES), + ), + dwnl_path=temp_dir.name, + year=2016, + ) + self.assertIn( + "All required files already exist. No downloads necessary.", + cm.output[0], + ) # Test download - with self.assertLogs('climada.entity.exposures.litpop.nightlight', level='DEBUG') as cm: - dwl_path = nightlight.download_nl_files(req_files=np.array([1, 0, 0, 0, 0, 0, 0, 0]), - files_exist=np.array([0, 1, 1, 1, 1, 1, 1, 1]), - dwnl_path=temp_dir.name) - self.assertIn('Attempting to download file from ' - 'https://eoimages.gsfc.nasa.gov/images/imagerecords/' - '144000/144897/BlackMarble_2016_A1_geo_gray.tif', cm.output[0]) + with self.assertLogs( + "climada.entity.exposures.litpop.nightlight", level="DEBUG" + ) as cm: + dwl_path = nightlight.download_nl_files( + req_files=np.array([1, 0, 0, 0, 0, 0, 0, 0]), + files_exist=np.array([0, 1, 1, 1, 1, 1, 1, 1]), + dwnl_path=temp_dir.name, + ) + self.assertIn( + "Attempting to download file from " + "https://eoimages.gsfc.nasa.gov/images/imagerecords/" + "144000/144897/BlackMarble_2016_A1_geo_gray.tif", + cm.output[0], + ) # Test if dwl_path has been returned self.assertEqual(temp_dir.name, dwl_path) temp_dir.cleanup() def test_unzip_tif_to_py(self): - """ Test that .gz files are unzipped and read as a sparse matrix, - file_name is correct and logger message recorded. """ + """Test that .gz files are unzipped and read as a sparse matrix, + file_name is correct and logger message recorded.""" - path_file_tif_gz = str(SYSTEM_DIR.joinpath('F182013.v4c_web.stable_lights.avg_vis.tif.gz')) - with self.assertLogs('climada.entity.exposures.litpop.nightlight', level='INFO') as cm: + path_file_tif_gz = str( + SYSTEM_DIR.joinpath("F182013.v4c_web.stable_lights.avg_vis.tif.gz") + ) + with self.assertLogs( + "climada.entity.exposures.litpop.nightlight", level="INFO" + ) as cm: file_name, night = nightlight.unzip_tif_to_py(path_file_tif_gz) - self.assertIn(f'Unzipping file {path_file_tif_gz}', cm.output[0]) - self.assertEqual(str(file_name), 'F182013.v4c_web.stable_lights.avg_vis.tif') + self.assertIn(f"Unzipping file {path_file_tif_gz}", cm.output[0]) + self.assertEqual(str(file_name), "F182013.v4c_web.stable_lights.avg_vis.tif") self.assertIsInstance(night, sparse._csr.csr_matrix) - SYSTEM_DIR.joinpath('F182013.v4c_web.stable_lights.avg_vis.p').unlink() + SYSTEM_DIR.joinpath("F182013.v4c_web.stable_lights.avg_vis.p").unlink() def test_load_nightlight_noaa(self): - """ Test that data is not downloaded if a .tif.gz file is present - in SYSTEM_DIR. """ + """Test that data is not downloaded if a .tif.gz file is present + in SYSTEM_DIR.""" # initialization - sat_name = 'E99' + sat_name = "E99" year = 2013 pattern = f"{sat_name}{year}.v4c_web.stable_lights.avg_vis" gzfile = f"{pattern}.tif.gz" @@ -183,12 +241,14 @@ def test_load_nightlight_noaa(self): with io.BytesIO() as mem: pilim.save(mem, "tiff") # compressed image to a gzip file - with gzip.GzipFile(SYSTEM_DIR.joinpath(gzfile), 'wb') as f: + with gzip.GzipFile(SYSTEM_DIR.joinpath(gzfile), "wb") as f: f.write(mem.getvalue()) try: # with arguments - night, coord_nl, fn_light = nightlight.load_nightlight_noaa(ref_year=year, sat_name=sat_name) + night, coord_nl, fn_light = nightlight.load_nightlight_noaa( + ref_year=year, sat_name=sat_name + ) self.assertIsInstance(night, sparse._csr.csr_matrix) self.assertIn(tiffile, str(fn_light)) @@ -196,94 +256,125 @@ def test_load_nightlight_noaa(self): night, coord_nl, fn_light = nightlight.load_nightlight_noaa() self.assertIsInstance(night, sparse._csr.csr_matrix) self.assertIn(pfile, str(fn_light)) - self.assertTrue(np.array_equal(np.array([[-65, NOAA_RESOLUTION_DEG], - [-180, NOAA_RESOLUTION_DEG]]),coord_nl)) + self.assertTrue( + np.array_equal( + np.array([[-65, NOAA_RESOLUTION_DEG], [-180, NOAA_RESOLUTION_DEG]]), + coord_nl, + ) + ) # test raises from wrong input agruments with self.assertRaises(ValueError) as cm: night, coord_nl, fn_light = nightlight.load_nightlight_noaa( - ref_year=2050, sat_name='F150') - self.assertEqual('Nightlight intensities for year 2050 and satellite F150 do not exist.', - str(cm.exception)) + ref_year=2050, sat_name="F150" + ) + self.assertEqual( + "Nightlight intensities for year 2050 and satellite F150 do not exist.", + str(cm.exception), + ) finally: # clean up SYSTEM_DIR.joinpath(pfile).unlink(missing_ok=True) SYSTEM_DIR.joinpath(gzfile).unlink(missing_ok=True) def test_untar_noaa_stable_nighlight(self): - """ Testing that input .tar file is moved into SYSTEM_DIR, - tif.gz file is extracted from .tar file and moved into SYSTEM_DIR, - exception are raised when no .tif.gz file is present in the tar file, - and the logger message is recorded if more then one .tif.gz is present in - .tar file. """ + """Testing that input .tar file is moved into SYSTEM_DIR, + tif.gz file is extracted from .tar file and moved into SYSTEM_DIR, + exception are raised when no .tif.gz file is present in the tar file, + and the logger message is recorded if more then one .tif.gz is present in + .tar file.""" # Create path to .tif.gz and .csv files already existing in SYSTEM_DIR - path_tif_gz_1 = Path(SYSTEM_DIR, 'F182013.v4c_web.stable_lights.avg_vis.tif.gz') - path_csv = Path(SYSTEM_DIR, 'GDP_TWN_IMF_WEO_data.csv') - path_tar = Path(SYSTEM_DIR, 'sample.tar') + path_tif_gz_1 = Path(SYSTEM_DIR, "F182013.v4c_web.stable_lights.avg_vis.tif.gz") + path_csv = Path(SYSTEM_DIR, "GDP_TWN_IMF_WEO_data.csv") + path_tar = Path(SYSTEM_DIR, "sample.tar") # Create .tar file and add .tif.gz and .csv - file_tar = tarfile.open(path_tar, "w") #create the tar file - file_tar.add(name = path_tif_gz_1, recursive = False, arcname = 'F182013.v4c_web.stable_lights.avg_vis.tif.gz') + file_tar = tarfile.open(path_tar, "w") # create the tar file + file_tar.add( + name=path_tif_gz_1, + recursive=False, + arcname="F182013.v4c_web.stable_lights.avg_vis.tif.gz", + ) file_tar.close() # Test that the files has been moved path_to_test = nightlight.untar_noaa_stable_nightlight(path_tar) self.assertTrue(path_to_test.exists()) - self.assertTrue(path_tar .exists()) + self.assertTrue(path_tar.exists()) path_tar.unlink() # Put no .tif.gz file in .tar file and check raises - path_tar = Path(SYSTEM_DIR, 'sample.tar') - file_tar = tarfile.open(path_tar, "w") #create the tar file - file_tar.add(name = path_csv, recursive = False, arcname ='GDP_TWN_IMF_WEO_data.csv' ) + path_tar = Path(SYSTEM_DIR, "sample.tar") + file_tar = tarfile.open(path_tar, "w") # create the tar file + file_tar.add(name=path_csv, recursive=False, arcname="GDP_TWN_IMF_WEO_data.csv") file_tar.close() with self.assertRaises(ValueError) as cm: nightlight.untar_noaa_stable_nightlight(path_tar) - self.assertEqual('No stable light intensities for selected year and satellite ' - f'in file {path_tar}',str(cm.exception)) + self.assertEqual( + "No stable light intensities for selected year and satellite " + f"in file {path_tar}", + str(cm.exception), + ) path_tar.unlink() # Test logger with having two .tif.gz file in .tar file - file_tar = tarfile.open(path_tar, "w") #create the tar file - file_tar.add(name = path_tif_gz_1, recursive = False, arcname = 'F182013.v4c_web.stable_lights.avg_vis.tif.gz' ) - file_tar.add(name = path_tif_gz_1, recursive = False, arcname = 'F182013.v4c_web.stable_lights.avg_vis.tif.gz' ) + file_tar = tarfile.open(path_tar, "w") # create the tar file + file_tar.add( + name=path_tif_gz_1, + recursive=False, + arcname="F182013.v4c_web.stable_lights.avg_vis.tif.gz", + ) + file_tar.add( + name=path_tif_gz_1, + recursive=False, + arcname="F182013.v4c_web.stable_lights.avg_vis.tif.gz", + ) file_tar.close() - with self.assertLogs('climada.entity.exposures.litpop.nightlight', level = 'WARNING') as cm: + with self.assertLogs( + "climada.entity.exposures.litpop.nightlight", level="WARNING" + ) as cm: nightlight.untar_noaa_stable_nightlight(path_tar) - self.assertIn('found more than one potential intensity file in', cm.output[0]) + self.assertIn("found more than one potential intensity file in", cm.output[0]) path_tar.unlink() def test_check_nl_local_file_exists(self): - """ Test that an array with the correct number of already existing files - is produced, the LOGGER messages logged and the ValueError raised. """ + """Test that an array with the correct number of already existing files + is produced, the LOGGER messages logged and the ValueError raised.""" # check logger messages by giving a to short req_file - with self.assertLogs('climada.entity.exposures.litpop.nightlight', level='WARNING') as cm: - nightlight.check_nl_local_file_exists(required_files = np.array([0, 0, 1, 1])) - self.assertIn('The parameter \'required_files\' was too short and is ignored', - cm.output[0]) + with self.assertLogs( + "climada.entity.exposures.litpop.nightlight", level="WARNING" + ) as cm: + nightlight.check_nl_local_file_exists(required_files=np.array([0, 0, 1, 1])) + self.assertIn( + "The parameter 'required_files' was too short and is ignored", cm.output[0] + ) # check logger message: not all files are available - with self.assertLogs('climada.entity.exposures.litpop.nightlight', level='DEBUG') as cm: + with self.assertLogs( + "climada.entity.exposures.litpop.nightlight", level="DEBUG" + ) as cm: nightlight.check_nl_local_file_exists() - self.assertIn('Not all satellite files available. Found ', cm.output[0]) - self.assertIn(f' out of 8 required files in {Path(SYSTEM_DIR)}', cm.output[0]) + self.assertIn("Not all satellite files available. Found ", cm.output[0]) + self.assertIn(f" out of 8 required files in {Path(SYSTEM_DIR)}", cm.output[0]) # check logger message: no files found in checkpath - check_path = Path('climada/entity/exposures') - with self.assertLogs('climada.entity.exposures.litpop.nightlight', level='INFO') as cm: + check_path = Path("climada/entity/exposures") + with self.assertLogs( + "climada.entity.exposures.litpop.nightlight", level="INFO" + ) as cm: # using a random path where no files are stored nightlight.check_nl_local_file_exists(check_path=check_path) - self.assertIn(f'No satellite files found locally in {check_path}', - cm.output[0]) + self.assertIn(f"No satellite files found locally in {check_path}", cm.output[0]) # test raises with wrong path - check_path = Path('/random/wrong/path') + check_path = Path("/random/wrong/path") with self.assertRaises(ValueError) as cm: nightlight.check_nl_local_file_exists(check_path=check_path) - self.assertEqual(f'The given path does not exist: {check_path}', - str(cm.exception)) + self.assertEqual( + f"The given path does not exist: {check_path}", str(cm.exception) + ) # test that files_exist is correct files_exist = nightlight.check_nl_local_file_exists() @@ -295,16 +386,18 @@ def test_check_files_exist(self): # If invalid directory is supplied it has to fail try: nightlight.check_nl_local_file_exists( - np.ones(np.count_nonzero(BM_FILENAMES)), 'Invalid/path')[0] - raise Exception("if the path is not valid, check_nl_local_file_exists should fail") + np.ones(np.count_nonzero(BM_FILENAMES)), "Invalid/path" + )[0] + raise Exception( + "if the path is not valid, check_nl_local_file_exists should fail" + ) except ValueError: pass files_exist = nightlight.check_nl_local_file_exists( - np.ones(np.count_nonzero(BM_FILENAMES)), SYSTEM_DIR) - self.assertTrue( - files_exist.sum() > 0, - f'{files_exist} {BM_FILENAMES}' + np.ones(np.count_nonzero(BM_FILENAMES)), SYSTEM_DIR ) + self.assertTrue(files_exist.sum() > 0, f"{files_exist} {BM_FILENAMES}") + # Execute Tests if __name__ == "__main__": diff --git a/climada/test/test_plot.py b/climada/test/test_plot.py index dcfb608f9..082f38e1b 100644 --- a/climada/test/test_plot.py +++ b/climada/test/test_plot.py @@ -18,36 +18,45 @@ test plots """ + import copy import unittest import urllib +from pathlib import Path -import numpy as np +import contextily as ctx import matplotlib.pyplot as plt +import numpy as np import pandas as pd -import contextily as ctx -from pathlib import Path -from climada.engine.unsequa import UncOutput -from climada.engine import ImpactCalc, ImpactFreqCurve, CostBenefit -from climada.entity import (Entity, ImpactFuncSet, Exposures, DiscRates, ImpfTropCyclone, Measure, - MeasureSet) -from climada.hazard import Hazard, Centroids -from climada.util.constants import ENT_DEMO_TODAY, TEST_UNC_OUTPUT_COSTBEN, HAZ_DEMO_FL -from climada.util.api_client import Client +from climada.engine import CostBenefit, ImpactCalc, ImpactFreqCurve +from climada.engine.unsequa import UncOutput +from climada.entity import ( + DiscRates, + Entity, + Exposures, + ImpactFuncSet, + ImpfTropCyclone, + Measure, + MeasureSet, +) +from climada.hazard import Centroids, Hazard from climada.test import get_test_file +from climada.util.api_client import Client +from climada.util.constants import ENT_DEMO_TODAY, HAZ_DEMO_FL, TEST_UNC_OUTPUT_COSTBEN - -test_unc_output_costben = Client().get_dataset_file(name=TEST_UNC_OUTPUT_COSTBEN, status='test_dataset') - +test_unc_output_costben = Client().get_dataset_file( + name=TEST_UNC_OUTPUT_COSTBEN, status="test_dataset" +) -HAZ_TEST_TC :Path = get_test_file('test_tc_florida') +HAZ_TEST_TC: Path = get_test_file("test_tc_florida") """ Hazard test file from Data API: Hurricanes from 1851 to 2011 over Florida with 100 centroids. Fraction is empty. Format: HDF5. """ + class TestPlotter(unittest.TestCase): """Test plot functions.""" @@ -62,31 +71,31 @@ def test_hazard_intensity_pass(self): hazard.event_name[3898] = "NNN_1190604_gen8" hazard.event_name[5488] = "NNN_1192804_gen8" myax = hazard.plot_intensity(event=36) - self.assertIn('Event ID 36: NNN_1185106_gen5', myax.get_title()) + self.assertIn("Event ID 36: NNN_1185106_gen5", myax.get_title()) myax = hazard.plot_intensity(event=-1) - self.assertIn('1-largest Event. ID 3899: NNN_1190604_gen8', myax.get_title()) + self.assertIn("1-largest Event. ID 3899: NNN_1190604_gen8", myax.get_title()) myax = hazard.plot_intensity(event=-4) - self.assertIn('4-largest Event. ID 5489: NNN_1192804_gen8', myax.get_title()) + self.assertIn("4-largest Event. ID 5489: NNN_1192804_gen8", myax.get_title()) myax = hazard.plot_intensity(event=0) - self.assertIn('TC max intensity at each point', myax.get_title()) + self.assertIn("TC max intensity at each point", myax.get_title()) myax = hazard.plot_intensity(centr=59) - self.assertIn('Centroid 59: (30.0, -79.0)', myax.get_title()) + self.assertIn("Centroid 59: (30.0, -79.0)", myax.get_title()) myax = hazard.plot_intensity(centr=-1) - self.assertIn('1-largest Centroid. 99: (30.0, -75.0)', myax.get_title()) + self.assertIn("1-largest Centroid. 99: (30.0, -75.0)", myax.get_title()) myax = hazard.plot_intensity(centr=-4) - self.assertIn('4-largest Centroid. 69: (30.0, -78.0)', myax.get_title()) + self.assertIn("4-largest Centroid. 69: (30.0, -78.0)", myax.get_title()) myax = hazard.plot_intensity(centr=0) - self.assertIn('TC max intensity at each event', myax.get_title()) + self.assertIn("TC max intensity at each event", myax.get_title()) - myax = hazard.plot_intensity(event='NNN_1192804_gen8') - self.assertIn('NNN_1192804_gen8', myax.get_title()) + myax = hazard.plot_intensity(event="NNN_1192804_gen8") + self.assertIn("NNN_1192804_gen8", myax.get_title()) def test_hazard_fraction_pass(self): """Generate all possible plots of the hazard fraction.""" @@ -94,30 +103,30 @@ def test_hazard_fraction_pass(self): hazard.event_name = [""] * hazard.event_id.size hazard.event_name[0] = "NNN_1185106_gen5" myax = hazard.plot_fraction(event=1) - self.assertIn('Event ID 1: NNN_1185106_gen5', myax.get_title()) + self.assertIn("Event ID 1: NNN_1185106_gen5", myax.get_title()) myax = hazard.plot_fraction(centr=1) - self.assertIn('Centroid 1: (10.424, -69.324)', myax.get_title()) + self.assertIn("Centroid 1: (10.424, -69.324)", myax.get_title()) def test_hazard_rp_intensity(self): - """"Plot exceedance intensity maps for different return periods""" + """ "Plot exceedance intensity maps for different return periods""" hazard = Hazard.from_hdf5(HAZ_TEST_TC) (axis1, axis2), _ = hazard.plot_rp_intensity([25, 50]) - self.assertEqual('Return period: 25 years', axis1.get_title()) - self.assertEqual('Return period: 50 years', axis2.get_title()) + self.assertEqual("Return period: 25 years", axis1.get_title()) + self.assertEqual("Return period: 50 years", axis2.get_title()) def test_exposures_value_pass(self): """Plot exposures values.""" myexp = pd.read_excel(ENT_DEMO_TODAY) myexp = Exposures(myexp) myexp.check() - myexp.description = 'demo_today' + myexp.description = "demo_today" myax = myexp.plot_hexbin() - self.assertEqual('demo_today', myax.get_title()) + self.assertEqual("demo_today", myax.get_title()) myexp.description = None myax = myexp.plot_hexbin() - self.assertEqual('', myax.get_title()) + self.assertEqual("", myax.get_title()) myexp.plot_scatter() myexp.plot_basemap() @@ -129,9 +138,8 @@ def test_impact_funcs_pass(self): myfuncs = ImpactFuncSet.from_excel(ENT_DEMO_TODAY) myax = myfuncs.plot() self.assertEqual(2, len(myax)) - self.assertIn('TC 1: Tropical cyclone default', - myax[0].title.get_text()) - self.assertIn('TC 3: TC Building code', myax[1].title.get_text()) + self.assertIn("TC 1: Tropical cyclone default", myax[0].title.get_text()) + self.assertIn("TC 3: TC Building code", myax[1].title.get_text()) def test_impact_pass(self): """Plot impact exceedence frequency curves.""" @@ -142,21 +150,21 @@ def test_impact_pass(self): myimp = ImpactCalc(myent.exposures, myent.impact_funcs, myhaz).impact() ifc = myimp.calc_freq_curve() myax = ifc.plot() - self.assertIn('Exceedance frequency curve', myax.get_title()) + self.assertIn("Exceedance frequency curve", myax.get_title()) ifc2 = ImpactFreqCurve( return_per=ifc.return_per, impact=1.5e11 * np.ones(ifc.return_per.size), - label='prove' + label="prove", ) ifc2.plot(axis=myax) def test_ctx_osm_pass(self): """Test basemap function using osm images""" myexp = Exposures() - myexp.gdf['latitude'] = np.array([30, 40, 50]) - myexp.gdf['longitude'] = np.array([0, 0, 0]) - myexp.gdf['value'] = np.array([1, 1, 1]) + myexp.gdf["latitude"] = np.array([30, 40, 50]) + myexp.gdf["longitude"] = np.array([0, 0, 0]) + myexp.gdf["value"] = np.array([1, 1, 1]) myexp.check() myexp.plot_basemap(url=ctx.providers.OpenStreetMap.Mapnik) @@ -171,36 +179,44 @@ def test_disc_rates(self): disc.plot() def test_cost_benefit(self): - """ Test plot functions of cost benefit""" + """Test plot functions of cost benefit""" # Load hazard from the data API client = Client() future_year = 2080 - haz_present = client.get_hazard('tropical_cyclone', - properties={'country_name': 'Haiti', - 'climate_scenario': 'historical', - 'nb_synth_tracks':'10'}) - haz_future = client.get_hazard('tropical_cyclone', - properties={'country_name': 'Haiti', - 'climate_scenario': 'rcp60', - 'ref_year': str(future_year), - 'nb_synth_tracks':'10'}) + haz_present = client.get_hazard( + "tropical_cyclone", + properties={ + "country_name": "Haiti", + "climate_scenario": "historical", + "nb_synth_tracks": "10", + }, + ) + haz_future = client.get_hazard( + "tropical_cyclone", + properties={ + "country_name": "Haiti", + "climate_scenario": "rcp60", + "ref_year": str(future_year), + "nb_synth_tracks": "10", + }, + ) # Create an exposure - exp_present = client.get_litpop(country='Haiti') + exp_present = client.get_litpop(country="Haiti") exp_future = copy.deepcopy(exp_present) exp_future.ref_year = future_year n_years = exp_future.ref_year - exp_present.ref_year + 1 - growth = 1.02 ** n_years - exp_future.gdf['value'] = exp_future.gdf['value'] * growth + growth = 1.02**n_years + exp_future.gdf["value"] = exp_future.gdf["value"] * growth # Create an impact function impf_tc = ImpfTropCyclone.from_emanuel_usa() impf_set = ImpactFuncSet([impf_tc]) # Create adaptation measures meas_1 = Measure( - haz_type='TC', - name='Measure A', + haz_type="TC", + name="Measure A", color_rgb=np.array([0.8, 0.1, 0.1]), cost=5000000000, hazard_inten_imp=(1, -5), @@ -208,8 +224,8 @@ def test_cost_benefit(self): ) meas_2 = Measure( - haz_type='TC', - name='Measure B', + haz_type="TC", + name="Measure B", color_rgb=np.array([0.1, 0.1, 0.8]), cost=220000000, paa_impact=(1, -0.10), @@ -221,25 +237,41 @@ def test_cost_benefit(self): annual_discount_zero = np.zeros(n_years) discount_zero = DiscRates(year_range, annual_discount_zero) # Wrap the entity together - entity_present = Entity(exposures=exp_present, disc_rates=discount_zero, - impact_func_set=impf_set, measure_set=meas_set) - entity_future = Entity(exposures=exp_future, disc_rates=discount_zero, - impact_func_set=impf_set, measure_set=meas_set) + entity_present = Entity( + exposures=exp_present, + disc_rates=discount_zero, + impact_func_set=impf_set, + measure_set=meas_set, + ) + entity_future = Entity( + exposures=exp_future, + disc_rates=discount_zero, + impact_func_set=impf_set, + measure_set=meas_set, + ) # Create a cost benefit object costben = CostBenefit() - costben.calc(haz_present, entity_present, haz_future=haz_future, - ent_future=entity_future, future_year=future_year, - imp_time_depen=1, save_imp=True) + costben.calc( + haz_present, + entity_present, + haz_future=haz_future, + ent_future=entity_future, + future_year=future_year, + imp_time_depen=1, + save_imp=True, + ) # Call the plotting functions costben.plot_cost_benefit() costben.plot_event_view((25, 50, 100, 250)) costben.plot_waterfall_accumulated(haz_present, entity_present, entity_future) - ax = costben.plot_waterfall(haz_present, entity_present, - haz_future, entity_future) - costben.plot_arrow_averted(axis = ax, in_meas_names=['Measure A', 'Measure B'], - accumulate=True) - CostBenefit._plot_list_cost_ben(cb_list = [costben]) + ax = costben.plot_waterfall( + haz_present, entity_present, haz_future, entity_future + ) + costben.plot_arrow_averted( + axis=ax, in_meas_names=["Measure A", "Measure B"], accumulate=True + ) + CostBenefit._plot_list_cost_ben(cb_list=[costben]) def test_plot_unc_cb(self): """Test all cost benefit plots""" @@ -255,10 +287,11 @@ def test_plot_unc_cb(self): plt_sens = unc_output.plot_sensitivity() self.assertIsNotNone(plt_sens) plt.close() - plt_sens_2 = unc_output.plot_sensitivity_second_order(salib_si='S1') + plt_sens_2 = unc_output.plot_sensitivity_second_order(salib_si="S1") self.assertIsNotNone(plt_sens_2) plt.close() + # Execute Tests if __name__ == "__main__": TESTS = unittest.TestLoader().loadTestsFromTestCase(TestPlotter) diff --git a/climada/test/test_util.py b/climada/test/test_util.py index f7edaa9c5..d6ccdbf7e 100644 --- a/climada/test/test_util.py +++ b/climada/test/test_util.py @@ -24,14 +24,13 @@ import numpy as np -from climada.entity import Exposures import climada.util.lines_polys_handler as u_lp from climada.engine import Impact - +from climada.entity import Exposures from climada.util.test.test_lines_polys_handler import ( - HAZ, EXP_POLY, GDF_POLY, + HAZ, IMPF_SET, check_impact, ) diff --git a/climada/test/test_util_calibrate.py b/climada/test/test_util_calibrate.py index 5432f62cd..8fc6929ff 100644 --- a/climada/test/test_util_calibrate.py +++ b/climada/test/test_util_calibrate.py @@ -20,25 +20,23 @@ import unittest -import pandas as pd import numpy as np import numpy.testing as npt +import pandas as pd +from matplotlib.axes import Axes from scipy.optimize import NonlinearConstraint from sklearn.metrics import mean_squared_error -from matplotlib.axes import Axes - -from climada.entity import ImpactFuncSet, ImpactFunc +from climada.entity import ImpactFunc, ImpactFuncSet from climada.util.calibrate import ( - Input, - ScipyMinimizeOptimizer, BayesianOptimizer, - OutputEvaluator, - BayesianOptimizerOutputEvaluator, BayesianOptimizerController, + BayesianOptimizerOutputEvaluator, + Input, + OutputEvaluator, + ScipyMinimizeOptimizer, ) - -from climada.util.calibrate.test.test_base import hazard, exposure +from climada.util.calibrate.test.test_base import exposure, hazard class TestScipyMinimizeOptimizer(unittest.TestCase): diff --git a/climada/util/__init__.py b/climada/util/__init__.py index 6a497f64d..a4cf8a450 100755 --- a/climada/util/__init__.py +++ b/climada/util/__init__.py @@ -18,7 +18,9 @@ init util """ + import logging + from pint import UnitRegistry from .config import * @@ -28,6 +30,7 @@ ureg = UnitRegistry() + class log_level: """Context manager that sets all loggers with names starting with name_prefix (default is "") to a given specified level. @@ -50,7 +53,7 @@ def __init__(self, level, name_prefix=""): name: (logger, logger.level) for name, logger in logging.root.manager.loggerDict.items() if isinstance(logger, logging.Logger) and name.startswith(name_prefix) - } + } if name_prefix == "": self.loggers[""] = (logging.getLogger(), logging.getLogger().level) diff --git a/climada/util/api_client.py b/climada/util/api_client.py index c6afca7a0..3857cf0d8 100644 --- a/climada/util/api_client.py +++ b/climada/util/api_client.py @@ -18,24 +18,25 @@ Data API client """ -from dataclasses import dataclass -from datetime import datetime + import hashlib import json import logging +import time +from dataclasses import dataclass +from datetime import datetime from os.path import commonprefix from pathlib import Path from urllib.parse import quote, unquote, urlsplit, urlunsplit -import time import pandas as pd -from peewee import CharField, DateTimeField, IntegrityError, Model, SqliteDatabase -import requests import pycountry +import requests +from peewee import CharField, DateTimeField, IntegrityError, Model, SqliteDatabase from climada import CONFIG from climada.entity import Exposures -from climada.hazard import Hazard, Centroids +from climada.hazard import Centroids, Hazard from climada.util.constants import SYSTEM_DIR LOGGER = logging.getLogger(__name__) diff --git a/climada/util/calibrate/__init__.py b/climada/util/calibrate/__init__.py index 2e947ee04..53d753aab 100644 --- a/climada/util/calibrate/__init__.py +++ b/climada/util/calibrate/__init__.py @@ -24,6 +24,6 @@ BayesianOptimizerController, BayesianOptimizerOutput, BayesianOptimizerOutputEvaluator, - select_best + select_best, ) from .scipy_optimizer import ScipyMinimizeOptimizer diff --git a/climada/util/calibrate/base.py b/climada/util/calibrate/base.py index d61644dc7..4e3fc21f6 100644 --- a/climada/util/calibrate/base.py +++ b/climada/util/calibrate/base.py @@ -19,20 +19,20 @@ """ from abc import ABC, abstractmethod -from dataclasses import dataclass, field, InitVar -from typing import Callable, Mapping, Optional, Tuple, Union, Any, Dict +from dataclasses import InitVar, dataclass, field from numbers import Number from pathlib import Path +from typing import Any, Callable, Dict, Mapping, Optional, Tuple, Union -import pandas as pd +import h5py import numpy as np -from scipy.optimize import Bounds, LinearConstraint, NonlinearConstraint +import pandas as pd import seaborn as sns -import h5py +from scipy.optimize import Bounds, LinearConstraint, NonlinearConstraint -from climada.hazard import Hazard -from climada.entity import Exposures, ImpactFuncSet from climada.engine import Impact, ImpactCalc +from climada.entity import Exposures, ImpactFuncSet +from climada.hazard import Hazard ConstraintType = Union[LinearConstraint, NonlinearConstraint, Mapping] @@ -187,7 +187,7 @@ class Output: params: Mapping[str, Number] target: Number - def to_hdf5(self, filepath: Union[Path, str], mode:str = "x"): + def to_hdf5(self, filepath: Union[Path, str], mode: str = "x"): """Write the output into an H5 file This stores the data as attributes because we only store single numbers, not @@ -219,6 +219,7 @@ def from_hdf5(cls, filepath: Union[Path, str]): params = dict(file["base"]["params"].attrs.items()) return cls(params=params, target=target) + @dataclass class OutputEvaluator: """Evaluate the output of a calibration task diff --git a/climada/util/calibrate/bayesian_optimizer.py b/climada/util/calibrate/bayesian_optimizer.py index 98fe302c0..b34688050 100644 --- a/climada/util/calibrate/bayesian_optimizer.py +++ b/climada/util/calibrate/bayesian_optimizer.py @@ -18,26 +18,25 @@ Calibration with Bayesian Optimization """ -from dataclasses import dataclass, InitVar, field -from typing import Mapping, Optional, Any, Union, List, Tuple -from numbers import Number -from itertools import combinations, repeat -from collections import deque, namedtuple import logging +from collections import deque, namedtuple +from dataclasses import InitVar, dataclass, field +from itertools import combinations, repeat +from numbers import Number from pathlib import Path +from typing import Any, List, Mapping, Optional, Tuple, Union -import pandas as pd -import numpy as np import matplotlib as mpl -import matplotlib.pyplot as plt import matplotlib.axes as maxes import matplotlib.patches as mpatches +import matplotlib.pyplot as plt import matplotlib.ticker as mticker -from bayes_opt import BayesianOptimization, Events, UtilityFunction, ScreenLogger +import numpy as np +import pandas as pd +from bayes_opt import BayesianOptimization, Events, ScreenLogger, UtilityFunction from bayes_opt.target_space import TargetSpace -from .base import Input, Output, Optimizer, OutputEvaluator - +from .base import Input, Optimizer, Output, OutputEvaluator LOGGER = logging.getLogger(__name__) diff --git a/climada/util/calibrate/scipy_optimizer.py b/climada/util/calibrate/scipy_optimizer.py index 12d46b661..2962d8fd7 100644 --- a/climada/util/calibrate/scipy_optimizer.py +++ b/climada/util/calibrate/scipy_optimizer.py @@ -19,12 +19,12 @@ """ from dataclasses import dataclass -from typing import Mapping, Any, Dict, List +from typing import Any, Dict, List, Mapping import numpy as np -from scipy.optimize import minimize, OptimizeResult +from scipy.optimize import OptimizeResult, minimize -from .base import Output, Optimizer +from .base import Optimizer, Output @dataclass diff --git a/climada/util/calibrate/test/test_base.py b/climada/util/calibrate/test/test_base.py index f7b5fb69f..e2ef72bab 100644 --- a/climada/util/calibrate/test/test_base.py +++ b/climada/util/calibrate/test/test_base.py @@ -19,19 +19,18 @@ """ import unittest -from unittest.mock import patch, create_autospec, MagicMock -from tempfile import TemporaryDirectory from pathlib import Path +from tempfile import TemporaryDirectory +from unittest.mock import MagicMock, create_autospec, patch import numpy as np import numpy.testing as npt import pandas as pd from scipy.sparse import csr_matrix -from climada.entity import Exposures, ImpactFunc, ImpactFuncSet -from climada.hazard import Hazard, Centroids from climada.engine import ImpactCalc - +from climada.entity import Exposures, ImpactFunc, ImpactFuncSet +from climada.hazard import Centroids, Hazard from climada.util.calibrate import Input, OutputEvaluator from climada.util.calibrate.base import Optimizer, Output @@ -222,6 +221,7 @@ def test_cycle(self): self.assertEqual(output.target, output_2.target) self.assertDictEqual(output.params, output_2.params) + class TestOutputEvaluator(unittest.TestCase): """Test the output evaluator""" diff --git a/climada/util/calibrate/test/test_bayesian_optimizer.py b/climada/util/calibrate/test/test_bayesian_optimizer.py index 71af9b354..e80960fda 100644 --- a/climada/util/calibrate/test/test_bayesian_optimizer.py +++ b/climada/util/calibrate/test/test_bayesian_optimizer.py @@ -19,25 +19,25 @@ """ import unittest -from unittest.mock import patch, MagicMock -from tempfile import TemporaryDirectory from pathlib import Path +from tempfile import TemporaryDirectory +from unittest.mock import MagicMock, patch import numpy as np import numpy.testing as npt import pandas as pd from bayes_opt import BayesianOptimization, Events -from scipy.optimize import NonlinearConstraint from matplotlib.axes import Axes +from scipy.optimize import NonlinearConstraint -from climada.util.calibrate import Input, BayesianOptimizer, BayesianOptimizerController +from climada.util.calibrate import BayesianOptimizer, BayesianOptimizerController, Input from climada.util.calibrate.bayesian_optimizer import ( + BayesianOptimizerOutput, Improvement, StopEarly, - BayesianOptimizerOutput, ) -from .test_base import hazard, exposure +from .test_base import exposure, hazard def input(): diff --git a/climada/util/calibrate/test/test_scipy_optimizer.py b/climada/util/calibrate/test/test_scipy_optimizer.py index 01b04ea5f..06bf4e595 100644 --- a/climada/util/calibrate/test/test_scipy_optimizer.py +++ b/climada/util/calibrate/test/test_scipy_optimizer.py @@ -19,8 +19,8 @@ """ import unittest -from unittest.mock import patch, MagicMock, call -from typing import Optional, List +from typing import List, Optional +from unittest.mock import MagicMock, call, patch import numpy as np import numpy.testing as npt @@ -29,7 +29,7 @@ from climada.util.calibrate import Input, ScipyMinimizeOptimizer -from .test_base import hazard, exposure +from .test_base import exposure, hazard class TestScipyMinimizeOptimizer(unittest.TestCase): diff --git a/climada/util/checker.py b/climada/util/checker.py index 17e9fa76d..0d17b6036 100644 --- a/climada/util/checker.py +++ b/climada/util/checker.py @@ -20,14 +20,15 @@ """ __all__ = [ - 'size', - 'shape', - 'array_optional', - 'array_default', - 'prune_csr_matrix', + "size", + "shape", + "array_optional", + "array_default", + "prune_csr_matrix", ] import logging + import numpy as np import scipy.sparse as sparse @@ -58,12 +59,16 @@ def check_obligatories(var_dict, var_obl, name_prefix, n_size, n_row, n_col): """ for var_name, var_val in var_dict.items(): if var_name in var_obl: - if (isinstance(var_val, np.ndarray) and var_val.ndim == 1) \ - or isinstance(var_val, list): + if (isinstance(var_val, np.ndarray) and var_val.ndim == 1) or isinstance( + var_val, list + ): size(n_size, var_val, name_prefix + var_name) - elif (isinstance(var_val, np.ndarray) and var_val.ndim == 2): + elif isinstance(var_val, np.ndarray) and var_val.ndim == 2: shape(n_row, n_col, var_val, name_prefix + var_name) - elif isinstance(var_val, (np.ndarray, sparse.csr_matrix)) and var_val.ndim == 2: + elif ( + isinstance(var_val, (np.ndarray, sparse.csr_matrix)) + and var_val.ndim == 2 + ): shape(n_row, n_col, var_val, name_prefix + var_name) @@ -107,9 +112,13 @@ def size(exp_len, var, var_name): try: if isinstance(exp_len, int): if exp_len != len(var): - raise ValueError(f"Invalid {var_name} size: {str(exp_len)} != {len(var)}.") + raise ValueError( + f"Invalid {var_name} size: {str(exp_len)} != {len(var)}." + ) elif len(var) not in exp_len: - raise ValueError(f"Invalid {var_name} size: {len(var)} not in {str(exp_len)}.") + raise ValueError( + f"Invalid {var_name} size: {len(var)} not in {str(exp_len)}." + ) except TypeError as err: raise ValueError(f"{var_name} has wrong size.") from err @@ -123,9 +132,13 @@ def shape(exp_row, exp_col, var, var_name): """ try: if exp_row != var.shape[0]: - raise ValueError(f"Invalid {var_name} row size: {exp_row} != {var.shape[0]}.") + raise ValueError( + f"Invalid {var_name} row size: {exp_row} != {var.shape[0]}." + ) if exp_col != var.shape[1]: - raise ValueError(f"Invalid {var_name} column size: {exp_col} != {var.shape[1]}.") + raise ValueError( + f"Invalid {var_name} column size: {exp_col} != {var.shape[1]}." + ) except TypeError as err: raise ValueError("%s has wrong dimensions." % var_name) from err @@ -182,6 +195,7 @@ def array_default(exp_len, var, var_name, def_val): size(exp_len, var, var_name) return res + def prune_csr_matrix(matrix: sparse.csr_matrix): """Ensure that the matrix is in the "canonical format". diff --git a/climada/util/config.py b/climada/util/config.py index 7e607f7d4..17975f09a 100644 --- a/climada/util/config.py +++ b/climada/util/config.py @@ -20,17 +20,17 @@ """ __all__ = [ - 'CONFIG', + "CONFIG", ] -import sys -import re import json import logging +import re +import sys from pathlib import Path -class Config(): +class Config: """Convenience Class. A Config object is a slow JSON object like nested dictonary who's values can be accessed by their names right away. E.g.: `a.b.c.str()` instead of `a['b']['c']` """ @@ -47,10 +47,14 @@ def __getattribute__(self, __name): try: return super().__getattribute__(__name) except AttributeError: - conf_files = [Path(_find_in_parents(conf_dir, CONFIG_NAME)) - if _find_in_parents(conf_dir, CONFIG_NAME) - else conf_dir / CONFIG_NAME - for conf_dir in CONFIG_DIRS[::-1]] + conf_files = [ + ( + Path(_find_in_parents(conf_dir, CONFIG_NAME)) + if _find_in_parents(conf_dir, CONFIG_NAME) + else conf_dir / CONFIG_NAME + ) + for conf_dir in CONFIG_DIRS[::-1] + ] raise AttributeError( # pylint: disable=raise-missing-from f"there is no '{__name}' configured for '{super().__getattribute__('_name')}'." f" check your config files: {conf_files}" @@ -58,19 +62,35 @@ def __getattribute__(self, __name): def __str__(self): # pylint: disable=bare-except,multiple-statements,too-complex - try: return self.str() - except: pass - try: return str(self.int()) - except: pass - try: return str(self.float()) - except: pass - try: return str(self.bool()) - except: pass - try: return str(self.list()) - except: pass - return '{{{}}}'.format(", ".join([ - f'{k}: {v}' for (k, v) in self.__dict__.items() if not k in {'_name', '_root'} - ])) + try: + return self.str() + except: + pass + try: + return str(self.int()) + except: + pass + try: + return str(self.float()) + except: + pass + try: + return str(self.bool()) + except: + pass + try: + return str(self.list()) + except: + pass + return "{{{}}}".format( + ", ".join( + [ + f"{k}: {v}" + for (k, v) in self.__dict__.items() + if not k in {"_name", "_root"} + ] + ) + ) def __repr__(self): return self.__str__() @@ -111,15 +131,18 @@ def str(self, index=None): Exception if it is not a string """ + def feval(root, cstr): def expand(dct, lst): if len(lst) == 1: return dct.__getattribute__(lst[0]).str() return expand(dct.__getattribute__(lst[0]), lst[1:]) + def msub(match): - cpath = match.group(1).split('.') + cpath = match.group(1).split(".") return expand(root, cpath) - return re.sub(r'{([\w\.]+)}', msub, cstr) + + return re.sub(r"{([\w\.]+)}", msub, cstr) if index is None: if self._val.__class__ is str: @@ -261,7 +284,7 @@ def dir(self, index=None, create=True): @classmethod def _expand_source_dir(cls, path): parts = path.parts - if parts[0] == '...': + if parts[0] == "...": return Path(cls.SOURCE_DIR, *parts[1:]) return Path(*parts) @@ -271,9 +294,13 @@ def _objectify_dict(cls, name, dct, root): obj = Config(name=name, root=root) for key, val in dct.items(): if val.__class__ is dict: - obj.__setattr__(key, cls._objectify_dict(name=key, dct=val, root=obj._root)) + obj.__setattr__( + key, cls._objectify_dict(name=key, dct=val, root=obj._root) + ) elif val.__class__ is list: - obj.__setattr__(key, cls._objectify_list(name=key, lst=val, root=obj._root)) + obj.__setattr__( + key, cls._objectify_list(name=key, lst=val, root=obj._root) + ) else: obj.__setattr__(key, Config(name=key, val=val, root=obj._root)) return obj @@ -303,7 +330,7 @@ def from_dict(cls, dct): Config contaning the same data as the input parameter `dct` """ - return cls._objectify_dict('climada.CONFIG', dct, root=None) + return cls._objectify_dict("climada.CONFIG", dct, root=None) def _supersede(nested, addendum): @@ -328,15 +355,12 @@ def _find_in_parents(directory, filename): def _fetch_conf(directories, config_name): - superseding_configs = [ - _find_in_parents(path, config_name) - for path in directories - ] + superseding_configs = [_find_in_parents(path, config_name) for path in directories] conf_dct = dict() for conf_path in superseding_configs: if conf_path is None: continue - with open(conf_path, encoding='utf-8') as conf: + with open(conf_path, encoding="utf-8") as conf: dct = json.load(conf) conf_dct = _supersede(conf_dct, dct) @@ -344,11 +368,11 @@ def _fetch_conf(directories, config_name): SOURCE_DIR = Path(__file__).absolute().parent.parent.parent -CONFIG_NAME = 'climada.conf' +CONFIG_NAME = "climada.conf" CONFIG_DIRS = [ - Path(SOURCE_DIR, 'climada', 'conf'), # default config from the climada repository - Path(Path.home(), 'climada', 'conf'), # ~/climada/conf directory - Path(Path.home(), '.config'), # ~/.config directory + Path(SOURCE_DIR, "climada", "conf"), # default config from the climada repository + Path(Path.home(), "climada", "conf"), # ~/climada/conf directory + Path(Path.home(), ".config"), # ~/.config directory Path.cwd(), # current working directory ] @@ -358,10 +382,11 @@ def _fetch_conf(directories, config_name): # set climada style logging if CONFIG.logging.managed.bool(): - LOGGER = logging.getLogger('climada') + LOGGER = logging.getLogger("climada") LOGGER.propagate = False FORMATTER = logging.Formatter( - "%(asctime)s - %(name)s - %(levelname)s - %(message)s") + "%(asctime)s - %(name)s - %(levelname)s - %(message)s" + ) CONSOLE = logging.StreamHandler(stream=sys.stdout) CONSOLE.setFormatter(FORMATTER) LOGGER.addHandler(CONSOLE) diff --git a/climada/util/constants.py b/climada/util/constants.py index 85569f5fc..a4e595aaa 100644 --- a/climada/util/constants.py +++ b/climada/util/constants.py @@ -19,33 +19,37 @@ Define constants. """ -__all__ = ['SYSTEM_DIR', - 'DEMO_DIR', - 'ENT_DEMO_TODAY', - 'ENT_DEMO_FUTURE', - 'HAZ_DEMO_MAT', - 'HAZ_DEMO_FL', - 'ENT_TEMPLATE_XLS', - 'HAZ_TEMPLATE_XLS', - 'ONE_LAT_KM', - 'EARTH_RADIUS_KM', - 'GLB_CENTROIDS_MAT', - 'GLB_CENTROIDS_NC', - 'ISIMIP_GPWV3_NATID_150AS', - 'NATEARTH_CENTROIDS', - 'RIVER_FLOOD_REGIONS_CSV', - 'TC_ANDREW_FL', - 'HAZ_DEMO_H5', - 'EXP_DEMO_H5', - 'WS_DEMO_NC', - 'TEST_UNC_OUTPUT_IMPACT', - 'TEST_UNC_OUTPUT_COSTBEN'] +__all__ = [ + "SYSTEM_DIR", + "DEMO_DIR", + "ENT_DEMO_TODAY", + "ENT_DEMO_FUTURE", + "HAZ_DEMO_MAT", + "HAZ_DEMO_FL", + "ENT_TEMPLATE_XLS", + "HAZ_TEMPLATE_XLS", + "ONE_LAT_KM", + "EARTH_RADIUS_KM", + "GLB_CENTROIDS_MAT", + "GLB_CENTROIDS_NC", + "ISIMIP_GPWV3_NATID_150AS", + "NATEARTH_CENTROIDS", + "RIVER_FLOOD_REGIONS_CSV", + "TC_ANDREW_FL", + "HAZ_DEMO_H5", + "EXP_DEMO_H5", + "WS_DEMO_NC", + "TEST_UNC_OUTPUT_IMPACT", + "TEST_UNC_OUTPUT_COSTBEN", +] + +import matplotlib as mpl # pylint: disable=unused-import # without importing numpy ahead of fiona the debugger may run into an error import numpy from fiona.crs import from_epsg -import matplotlib as mpl + from .config import CONFIG SYSTEM_DIR = CONFIG.local_data.system.dir(create=False) @@ -54,7 +58,7 @@ DEMO_DIR = CONFIG.local_data.demo.dir(create=False) """Folder containing the data used for tutorials""" -ISIMIP_GPWV3_NATID_150AS = SYSTEM_DIR.joinpath('NatID_grid_0150as.nc') +ISIMIP_GPWV3_NATID_150AS = SYSTEM_DIR.joinpath("NatID_grid_0150as.nc") """ Compressed version of National Identifier Grid in 150 arc-seconds from ISIMIP project, based on GPWv3. Location in ISIMIP repository: @@ -70,12 +74,12 @@ GLB_CENTROIDS_NC = ISIMIP_GPWV3_NATID_150AS """For backwards compatibility, it remains available under its old name.""" -GLB_CENTROIDS_MAT = SYSTEM_DIR.joinpath('GLB_NatID_grid_0360as_adv_2.mat') +GLB_CENTROIDS_MAT = SYSTEM_DIR.joinpath("GLB_NatID_grid_0360as_adv_2.mat") """Global centroids""" NATEARTH_CENTROIDS = { - 150: SYSTEM_DIR.joinpath('NatEarth_Centroids_150as.hdf5'), - 360: SYSTEM_DIR.joinpath('NatEarth_Centroids_360as.hdf5'), + 150: SYSTEM_DIR.joinpath("NatEarth_Centroids_150as.hdf5"), + 360: SYSTEM_DIR.joinpath("NatEarth_Centroids_360as.hdf5"), } """ Global centroids at XXX arc-seconds resolution, @@ -83,30 +87,32 @@ coast from NASA. """ -ENT_TEMPLATE_XLS = SYSTEM_DIR.joinpath('entity_template.xlsx') +ENT_TEMPLATE_XLS = SYSTEM_DIR.joinpath("entity_template.xlsx") """Entity template in xls format.""" -HAZ_TEMPLATE_XLS = SYSTEM_DIR.joinpath('hazard_template.xlsx') +HAZ_TEMPLATE_XLS = SYSTEM_DIR.joinpath("hazard_template.xlsx") """Hazard template in xls format.""" -RIVER_FLOOD_REGIONS_CSV = SYSTEM_DIR.joinpath('NatRegIDs.csv') +RIVER_FLOOD_REGIONS_CSV = SYSTEM_DIR.joinpath("NatRegIDs.csv") """Look-up table for river flood module""" -HAZ_DEMO_FL = DEMO_DIR.joinpath('SC22000_VE__M1.grd.gz') +HAZ_DEMO_FL = DEMO_DIR.joinpath("SC22000_VE__M1.grd.gz") """Raster file of flood over Venezuela. Model from GAR2015""" -HAZ_DEMO_MAT = DEMO_DIR.joinpath('atl_prob_nonames.mat') +HAZ_DEMO_MAT = DEMO_DIR.joinpath("atl_prob_nonames.mat") """ Hazard demo from climada in MATLAB: hurricanes from 1851 to 2011 over Florida with 100 centroids. """ -HAZ_DEMO_H5 = DEMO_DIR.joinpath('tc_fl_1990_2004.h5') +HAZ_DEMO_H5 = DEMO_DIR.joinpath("tc_fl_1990_2004.h5") """ Hazard demo in hdf5 format: IBTrACS from 1990 to 2004 over Florida with 2500 centroids. """ -WS_DEMO_NC = [DEMO_DIR.joinpath('fp_lothar_crop-test.nc'), - DEMO_DIR.joinpath('fp_xynthia_crop-test.nc')] +WS_DEMO_NC = [ + DEMO_DIR.joinpath("fp_lothar_crop-test.nc"), + DEMO_DIR.joinpath("fp_xynthia_crop-test.nc"), +] """ Winter storm in Europe files. These test files have been generated using the netCDF kitchen sink: @@ -115,46 +121,256 @@ """ -ENT_DEMO_TODAY = DEMO_DIR.joinpath('demo_today.xlsx') +ENT_DEMO_TODAY = DEMO_DIR.joinpath("demo_today.xlsx") """Entity demo present in xslx format.""" -ENT_DEMO_FUTURE = DEMO_DIR.joinpath('demo_future_TEST.xlsx') +ENT_DEMO_FUTURE = DEMO_DIR.joinpath("demo_future_TEST.xlsx") """Entity demo future in xslx format.""" -EXP_DEMO_H5 = DEMO_DIR.joinpath('exp_demo_today.h5') +EXP_DEMO_H5 = DEMO_DIR.joinpath("exp_demo_today.h5") """Exposures over Florida""" -TC_ANDREW_FL = DEMO_DIR.joinpath('ibtracs_global_intp-None_1992230N11325.csv') +TC_ANDREW_FL = DEMO_DIR.joinpath("ibtracs_global_intp-None_1992230N11325.csv") """Tropical cyclone Andrew in Florida""" -TEST_UNC_OUTPUT_IMPACT = 'test_unc_output_impact' +TEST_UNC_OUTPUT_IMPACT = "test_unc_output_impact" """Demo uncertainty impact output""" -TEST_UNC_OUTPUT_COSTBEN = 'test_unc_output_costben' +TEST_UNC_OUTPUT_COSTBEN = "test_unc_output_costben" """Demo uncertainty costben output""" ISIMIP_NATID_TO_ISO = [ - '', 'ABW', 'AFG', 'AGO', 'AIA', 'ALB', 'AND', 'ANT', 'ARE', 'ARG', 'ARM', - 'ASM', 'ATG', 'AUS', 'AUT', 'AZE', 'BDI', 'BEL', 'BEN', 'BFA', 'BGD', 'BGR', - 'BHR', 'BHS', 'BIH', 'BLR', 'BLZ', 'BMU', 'BOL', 'BRA', 'BRB', 'BRN', 'BTN', - 'BWA', 'CAF', 'CAN', 'CHE', 'CHL', 'CHN', 'CIV', 'CMR', 'COD', 'COG', 'COK', - 'COL', 'COM', 'CPV', 'CRI', 'CUB', 'CYM', 'CYP', 'CZE', 'DEU', 'DJI', 'DMA', - 'DNK', 'DOM', 'DZA', 'ECU', 'EGY', 'ERI', 'ESP', 'EST', 'ETH', 'FIN', 'FJI', - 'FLK', 'FRA', 'FRO', 'FSM', 'GAB', 'GBR', 'GEO', 'GGY', 'GHA', 'GIB', 'GIN', - 'GLP', 'GMB', 'GNB', 'GNQ', 'GRC', 'GRD', 'GTM', 'GUF', 'GUM', 'GUY', 'HKG', - 'HND', 'HRV', 'HTI', 'HUN', 'IDN', 'IMN', 'IND', 'IRL', 'IRN', 'IRQ', 'ISL', - 'ISR', 'ITA', 'JAM', 'JEY', 'JOR', 'JPN', 'KAZ', 'KEN', 'KGZ', 'KHM', 'KIR', - 'KNA', 'KOR', 'KWT', 'LAO', 'LBN', 'LBR', 'LBY', 'LCA', 'LIE', 'LKA', 'LSO', - 'LTU', 'LUX', 'LVA', 'MAC', 'MAR', 'MCO', 'MDA', 'MDG', 'MDV', 'MEX', 'MHL', - 'MKD', 'MLI', 'MLT', 'MMR', 'MNG', 'MNP', 'MOZ', 'MRT', 'MSR', 'MTQ', 'MUS', - 'MWI', 'MYS', 'MYT', 'NAM', 'NCL', 'NER', 'NFK', 'NGA', 'NIC', 'NIU', 'NLD', - 'NOR', 'NPL', 'NRU', 'NZL', 'OMN', 'PAK', 'PAN', 'PCN', 'PER', 'PHL', 'PLW', - 'PNG', 'POL', 'PRI', 'PRK', 'PRT', 'PRY', 'PSE', 'PYF', 'QAT', 'REU', 'ROU', - 'RUS', 'RWA', 'SAU', 'SCG', 'SDN', 'SEN', 'SGP', 'SHN', 'SJM', 'SLB', 'SLE', - 'SLV', 'SMR', 'SOM', 'SPM', 'STP', 'SUR', 'SVK', 'SVN', 'SWE', 'SWZ', 'SYC', - 'SYR', 'TCA', 'TCD', 'TGO', 'THA', 'TJK', 'TKL', 'TKM', 'TLS', 'TON', 'TTO', - 'TUN', 'TUR', 'TUV', 'TWN', 'TZA', 'UGA', 'UKR', 'URY', 'USA', 'UZB', 'VCT', - 'VEN', 'VGB', 'VIR', 'VNM', 'VUT', 'WLF', 'WSM', 'YEM', 'ZAF', 'ZMB', 'ZWE', + "", + "ABW", + "AFG", + "AGO", + "AIA", + "ALB", + "AND", + "ANT", + "ARE", + "ARG", + "ARM", + "ASM", + "ATG", + "AUS", + "AUT", + "AZE", + "BDI", + "BEL", + "BEN", + "BFA", + "BGD", + "BGR", + "BHR", + "BHS", + "BIH", + "BLR", + "BLZ", + "BMU", + "BOL", + "BRA", + "BRB", + "BRN", + "BTN", + "BWA", + "CAF", + "CAN", + "CHE", + "CHL", + "CHN", + "CIV", + "CMR", + "COD", + "COG", + "COK", + "COL", + "COM", + "CPV", + "CRI", + "CUB", + "CYM", + "CYP", + "CZE", + "DEU", + "DJI", + "DMA", + "DNK", + "DOM", + "DZA", + "ECU", + "EGY", + "ERI", + "ESP", + "EST", + "ETH", + "FIN", + "FJI", + "FLK", + "FRA", + "FRO", + "FSM", + "GAB", + "GBR", + "GEO", + "GGY", + "GHA", + "GIB", + "GIN", + "GLP", + "GMB", + "GNB", + "GNQ", + "GRC", + "GRD", + "GTM", + "GUF", + "GUM", + "GUY", + "HKG", + "HND", + "HRV", + "HTI", + "HUN", + "IDN", + "IMN", + "IND", + "IRL", + "IRN", + "IRQ", + "ISL", + "ISR", + "ITA", + "JAM", + "JEY", + "JOR", + "JPN", + "KAZ", + "KEN", + "KGZ", + "KHM", + "KIR", + "KNA", + "KOR", + "KWT", + "LAO", + "LBN", + "LBR", + "LBY", + "LCA", + "LIE", + "LKA", + "LSO", + "LTU", + "LUX", + "LVA", + "MAC", + "MAR", + "MCO", + "MDA", + "MDG", + "MDV", + "MEX", + "MHL", + "MKD", + "MLI", + "MLT", + "MMR", + "MNG", + "MNP", + "MOZ", + "MRT", + "MSR", + "MTQ", + "MUS", + "MWI", + "MYS", + "MYT", + "NAM", + "NCL", + "NER", + "NFK", + "NGA", + "NIC", + "NIU", + "NLD", + "NOR", + "NPL", + "NRU", + "NZL", + "OMN", + "PAK", + "PAN", + "PCN", + "PER", + "PHL", + "PLW", + "PNG", + "POL", + "PRI", + "PRK", + "PRT", + "PRY", + "PSE", + "PYF", + "QAT", + "REU", + "ROU", + "RUS", + "RWA", + "SAU", + "SCG", + "SDN", + "SEN", + "SGP", + "SHN", + "SJM", + "SLB", + "SLE", + "SLV", + "SMR", + "SOM", + "SPM", + "STP", + "SUR", + "SVK", + "SVN", + "SWE", + "SWZ", + "SYC", + "SYR", + "TCA", + "TCD", + "TGO", + "THA", + "TJK", + "TKL", + "TKM", + "TLS", + "TON", + "TTO", + "TUN", + "TUR", + "TUV", + "TWN", + "TZA", + "UGA", + "UKR", + "URY", + "USA", + "UZB", + "VCT", + "VEN", + "VGB", + "VIR", + "VNM", + "VUT", + "WLF", + "WSM", + "YEM", + "ZAF", + "ZMB", + "ZWE", ] """ISO 3166 alpha-3 codes of countries used in ISIMIP_GPWV3_NATID_150AS""" @@ -179,7 +395,9 @@ dict(name="Serranilla Bank", alpha_2="XR", alpha_3="XXR", numeric="913"), dict(name="Siachen Glacier", alpha_2="XH", alpha_3="XXH", numeric="914"), dict(name="Somaliland", alpha_2="XM", alpha_3="XXM", numeric="915"), - dict(name="Southern Patagonian Ice Field", alpha_2="XN", alpha_3="XXN", numeric="918"), + dict( + name="Southern Patagonian Ice Field", alpha_2="XN", alpha_3="XXN", numeric="918" + ), dict(name="Spratly Is.", alpha_2="XP", alpha_3="XXP", numeric="916"), dict(name="USNB Guantanamo Bay", alpha_2="XG", alpha_3="XXG", numeric="917"), ] @@ -199,513 +417,517 @@ DEF_EPSG = 4326 """Default EPSG code""" -DEF_CRS = f'EPSG:{DEF_EPSG}' +DEF_CRS = f"EPSG:{DEF_EPSG}" """Default coordinate reference system WGS 84, str, for pyproj and rasterio CRS.from_string()""" DEF_CRS_FIONA = from_epsg(DEF_EPSG) """Default coordinate reference system WGS 84, dict, for fiona interface""" -cm_data1 = [[0.00000000, 0.00000000, 0.00000000], - [0.00032031, 0.00020876, 0.00015576], - [0.00115213, 0.00071222, 0.00050933], - [0.00246632, 0.00145292, 0.00099932], - [0.00426111, 0.00240248, 0.00159470], - [0.00654129, 0.00354149, 0.00227479], - [0.00931453, 0.00485497, 0.00302435], - [0.01259008, 0.00633067, 0.00383153], - [0.01637810, 0.00795809, 0.00468676], - [0.02068947, 0.00972796, 0.00558214], - [0.02553552, 0.01163194, 0.00651101], - [0.03092793, 0.01366243, 0.00746771], - [0.03687870, 0.01581232, 0.00844736], - [0.04329108, 0.01807499, 0.00944575], - [0.04970018, 0.02044415, 0.01045917], - [0.05607744, 0.02291381, 0.01148441], - [0.06242826, 0.02547822, 0.01251862], - [0.06875727, 0.02813185, 0.01355932], - [0.07506844, 0.03086930, 0.01460431], - [0.08136524, 0.03368535, 0.01565167], - [0.08765071, 0.03657489, 0.01669973], - [0.09392754, 0.03953289, 0.01774700], - [0.10019812, 0.04248851, 0.01879222], - [0.10646459, 0.04536893, 0.01983431], - [0.11272888, 0.04818555, 0.02087234], - [0.11899272, 0.05094021, 0.02190555], - [0.12525770, 0.05363453, 0.02293331], - [0.13152527, 0.05626994, 0.02395516], - [0.13779673, 0.05884770, 0.02497073], - [0.14407332, 0.06136894, 0.02597979], - [0.15035614, 0.06383462, 0.02698225], - [0.15664624, 0.06624561, 0.02797810], - [0.16294457, 0.06860266, 0.02896747], - [0.16925203, 0.07090640, 0.02995057], - [0.17556946, 0.07315739, 0.03092776], - [0.18189762, 0.07535608, 0.03189947], - [0.18823726, 0.07750287, 0.03286623], - [0.19458905, 0.07959805, 0.03382870], - [0.20095364, 0.08164185, 0.03478764], - [0.20733163, 0.08363445, 0.03574389], - [0.21372359, 0.08557593, 0.03669841], - [0.22013006, 0.08746634, 0.03765228], - [0.22655154, 0.08930565, 0.03860667], - [0.23298852, 0.09109380, 0.03956286], - [0.23944144, 0.09283065, 0.04052097], - [0.24591073, 0.09451600, 0.04146142], - [0.25239679, 0.09614964, 0.04239527], - [0.25890000, 0.09773126, 0.04332440], - [0.26542072, 0.09926052, 0.04425071], - [0.27195929, 0.10073705, 0.04517610], - [0.27851612, 0.10216029, 0.04610242], - [0.28509144, 0.10352983, 0.04703172], - [0.29168551, 0.10484515, 0.04796603], - [0.29829858, 0.10610566, 0.04890741], - [0.30493089, 0.10731073, 0.04985793], - [0.31158270, 0.10845962, 0.05081968], - [0.31825437, 0.10955144, 0.05179469], - [0.32494588, 0.11058558, 0.05278533], - [0.33165741, 0.11156121, 0.05379388], - [0.33838918, 0.11247734, 0.05482253], - [0.34514146, 0.11333282, 0.05587349], - [0.35191413, 0.11412692, 0.05694939], - [0.35870733, 0.11485850, 0.05805261], - [0.36552140, 0.11552606, 0.05918537], - [0.37235602, 0.11612887, 0.06035055], - [0.37921149, 0.11666531, 0.06155047], - [0.38608774, 0.11713411, 0.06278785], - [0.39298465, 0.11753398, 0.06406542], - [0.39990243, 0.11786308, 0.06538571], - [0.40684070, 0.11812026, 0.06675174], - [0.41379968, 0.11830340, 0.06816610], - [0.42077900, 0.11841110, 0.06963182], - [0.42777857, 0.11844140, 0.07115178], - [0.43479835, 0.11839213, 0.07272887], - [0.44183779, 0.11826176, 0.07436631], - [0.44889692, 0.11804763, 0.07606698], - [0.45597537, 0.11774759, 0.07783407], - [0.46307262, 0.11735955, 0.07967086], - [0.47018828, 0.11688094, 0.08158056], - [0.47732206, 0.11630887, 0.08356643], - [0.48447342, 0.11564059, 0.08563184], - [0.49164167, 0.11487339, 0.08778027], - [0.49882616, 0.11400421, 0.09001524], - [0.50602619, 0.11302981, 0.09234030], - [0.51324096, 0.11194681, 0.09475911], - [0.52046957, 0.11075165, 0.09727541], - [0.52771103, 0.10944063, 0.09989300], - [0.53496423, 0.10800987, 0.10261578], - [0.54222828, 0.10645458, 0.10544773], - [0.54950158, 0.10477099, 0.10839295], - [0.55678265, 0.10295467, 0.11145561], - [0.56407005, 0.10100050, 0.11463998], - [0.57136221, 0.09890294, 0.11795046], - [0.57865683, 0.09665778, 0.12139144], - [0.58595251, 0.09425758, 0.12496762], - [0.59324637, 0.09169820, 0.12868351], - [0.60053647, 0.08897198, 0.13254399], - [0.60781996, 0.08607290, 0.13655381], - [0.61509391, 0.08299424, 0.14071783], - [0.62235528, 0.07972847, 0.14504098], - [0.62960086, 0.07626735, 0.14952833], - [0.63682690, 0.07260321, 0.15418475], - [0.64402945, 0.06872768, 0.15901515], - [0.65120429, 0.06463189, 0.16402435], - [0.65834703, 0.06030595, 0.16921717], - [0.66545273, 0.05574060, 0.17459807], - [0.67251615, 0.05092618, 0.18017123], - [0.67953179, 0.04585268, 0.18594053], - [0.68649408, 0.04050791, 0.19190990], - [0.69339656, 0.03501827, 0.19808181], - [0.70023310, 0.02974032, 0.20445918], - [0.70699677, 0.02473108, 0.21104325], - [0.71368081, 0.02004735, 0.21783521], - [0.72027805, 0.01575128, 0.22483488], - [0.72678121, 0.01190847, 0.23204104], - [0.73318299, 0.00858729, 0.23945145], - [0.73947609, 0.00585900, 0.24706262], - [0.74565328, 0.00379723, 0.25486974], - [0.75170751, 0.00247734, 0.26286660], - [0.75763201, 0.00197573, 0.27104565], - [0.76342035, 0.00236912, 0.27939796], - [0.76906659, 0.00373375, 0.28791328], - [0.77456531, 0.00614457, 0.29658016], - [0.77991170, 0.00967453, 0.30538600], - [0.78510166, 0.01439382, 0.31431727], - [0.79013176, 0.02036922, 0.32335963], - [0.79499936, 0.02766356, 0.33249813], - [0.79970258, 0.03633527, 0.34171740], - [0.80424028, 0.04610137, 0.35100187], - [0.80861206, 0.05593074, 0.36033595], - [0.81281824, 0.06575513, 0.36970423], - [0.81685977, 0.07556701, 0.37909164], - [0.82073820, 0.08536045, 0.38848361], - [0.82445563, 0.09513050, 0.39786621], - [0.82801462, 0.10487292, 0.40722623], - [0.83141814, 0.11458394, 0.41655122], - [0.83466964, 0.12426002, 0.42582926], - [0.83777258, 0.13389850, 0.43505012], - [0.84073089, 0.14349659, 0.44420371], - [0.84354864, 0.15305194, 0.45328109], - [0.84622995, 0.16256264, 0.46227431], - [0.84877908, 0.17202698, 0.47117623], - [0.85120054, 0.18144313, 0.47998013], - [0.85349849, 0.19081025, 0.48868085], - [0.85567734, 0.20012720, 0.49727347], - [0.85774150, 0.20939307, 0.50575378], - [0.85969539, 0.21860703, 0.51411817], - [0.86154321, 0.22776871, 0.52236389], - [0.86328918, 0.23687774, 0.53048865], - [0.86493759, 0.24593368, 0.53849050], - [0.86649243, 0.25493655, 0.54636825], - [0.86795766, 0.26388635, 0.55412108], - [0.86933714, 0.27278325, 0.56174857], - [0.87063488, 0.28162708, 0.56925039], - [0.87185473, 0.29041795, 0.57662667], - [0.87299987, 0.29915672, 0.58387836], - [0.87407470, 0.30784267, 0.59100548], - [0.87508176, 0.31647731, 0.59800984], - [0.87602545, 0.32505984, 0.60489185], - [0.87690829, 0.33359164, 0.61165350], - [0.87773379, 0.34207284, 0.61829617], - [0.87850545, 0.35050356, 0.62482133], - [0.87922592, 0.35888478, 0.63123109], - [0.87989827, 0.36721697, 0.63752735], - [0.88052548, 0.37550059, 0.64371209], - [0.88111058, 0.38373605, 0.64978738], - [0.88165635, 0.39192396, 0.65575540], - [0.88216538, 0.40006502, 0.66161845], - [0.88264034, 0.40815983, 0.66737883], - [0.88308383, 0.41620898, 0.67303885], - [0.88349837, 0.42421311, 0.67860087], - [0.88388658, 0.43217272, 0.68406723], - [0.88425089, 0.44008842, 0.68944031], - [0.88459352, 0.44796098, 0.69472256], - [0.88491674, 0.45579107, 0.69991638], - [0.88522277, 0.46357936, 0.70502418], - [0.88551386, 0.47132645, 0.71004831], - [0.88579260, 0.47903263, 0.71499109], - [0.88606054, 0.48669904, 0.71985498], - [0.88631967, 0.49432634, 0.72464230], - [0.88657273, 0.50191463, 0.72935531], - [0.88682100, 0.50946512, 0.73399636], - [0.88706656, 0.51697833, 0.73856771], - [0.88731166, 0.52445464, 0.74307157], - [0.88755748, 0.53189523, 0.74751019], - [0.88780677, 0.53930002, 0.75188571], - [0.88806029, 0.54667042, 0.75620029], - [0.88832077, 0.55400637, 0.76045604], - [0.88858898, 0.56130917, 0.76465503], - [0.88886751, 0.56857881, 0.76879932], - [0.88915723, 0.57581648, 0.77289087], - [0.88946027, 0.58302245, 0.77693169], - [0.88977801, 0.59019749, 0.78092369], - [0.89011184, 0.59734231, 0.78486874], - [0.89046385, 0.60445719, 0.78876876], - [0.89083498, 0.61154309, 0.79262552], - [0.89122688, 0.61860051, 0.79644080], - [0.89164127, 0.62562987, 0.80021639], - [0.89207922, 0.63263202, 0.80395396], - [0.89254218, 0.63960749, 0.80765517], - [0.89303193, 0.64655664, 0.81132175], - [0.89354946, 0.65348027, 0.81495521], - [0.89409613, 0.66037894, 0.81855714], - [0.89467341, 0.66725312, 0.82212908], - [0.89528268, 0.67410333, 0.82567258], - [0.89592507, 0.68093022, 0.82918904], - [0.89660188, 0.68773430, 0.83267991], - [0.89731440, 0.69451609, 0.83614660], - [0.89806405, 0.70127602, 0.83959053], - [0.89885189, 0.70801470, 0.84301299], - [0.89967918, 0.71473262, 0.84641529], - [0.90054714, 0.72143026, 0.84979872], - [0.90145701, 0.72810810, 0.85316454], - [0.90241007, 0.73476657, 0.85651399], - [0.90340743, 0.74140617, 0.85984825], - [0.90445031, 0.74802735, 0.86316849], - [0.90553992, 0.75463054, 0.86647585], - [0.90667746, 0.76121615, 0.86977146], - [0.90786415, 0.76778459, 0.87305641], - [0.90910120, 0.77433626, 0.87633178], - [0.91038981, 0.78087154, 0.87959861], - [0.91173124, 0.78739078, 0.88285793], - [0.91312673, 0.79389433, 0.88611074], - [0.91457758, 0.80038249, 0.88935803], - [0.91608500, 0.80685562, 0.89260074], - [0.91765039, 0.81331396, 0.89583983], - [0.91927511, 0.81975775, 0.89907623], - [0.92096059, 0.82618722, 0.90231088], - [0.92270830, 0.83260254, 0.90554466], - [0.92451964, 0.83900395, 0.90877841], - [0.92639632, 0.84539150, 0.91201305], - [0.92834008, 0.85176524, 0.91524947], - [0.93035272, 0.85812518, 0.91848857], - [0.93243609, 0.86447132, 0.92173117], - [0.93459223, 0.87080356, 0.92497815], - [0.93682359, 0.87712161, 0.92823055], - [0.93913266, 0.88342515, 0.93148937], - [0.94152187, 0.88971391, 0.93475546], - [0.94399458, 0.89598719, 0.93803021], - [0.94655427, 0.90224421, 0.94131502], - [0.94920436, 0.90848425, 0.94461125], - ] - -cm_data2 = [[0.00000000, 0.00000000, 0.00000000], - [0.00028691, 0.00020835, 0.00028279], - [0.00102421, 0.00070903, 0.00101021], - [0.00218033, 0.00144242, 0.00214845], - [0.00375280, 0.00237790, 0.00368891], - [0.00574727, 0.00349371, 0.00562841], - [0.00817359, 0.00477242, 0.00796563], - [0.01104432, 0.00619914, 0.01069976], - [0.01437378, 0.00776073, 0.01382970], - [0.01817764, 0.00944524, 0.01735364], - [0.02247277, 0.01124162, 0.02126897], - [0.02727694, 0.01313949, 0.02557207], - [0.03260869, 0.01512908, 0.03025819], - [0.03848721, 0.01720107, 0.03532137], - [0.04472223, 0.01934661, 0.04074862], - [0.05095008, 0.02155723, 0.04620189], - [0.05718085, 0.02382484, 0.05156892], - [0.06341877, 0.02614168, 0.05685075], - [0.06966727, 0.02850036, 0.06204782], - [0.07592916, 0.03089381, 0.06716019], - [0.08220666, 0.03331529, 0.07218757], - [0.08850155, 0.03575837, 0.07712945], - [0.09481532, 0.03821687, 0.08198520], - [0.10114895, 0.04068063, 0.08675399], - [0.10750319, 0.04306161, 0.09143498], - [0.11387855, 0.04536332, 0.09602729], - [0.12027537, 0.04758808, 0.10053004], - [0.12669388, 0.04973801, 0.10494242], - [0.13313410, 0.05181515, 0.10926361], - [0.13959587, 0.05382147, 0.11349284], - [0.14607903, 0.05575879, 0.11762946], - [0.15258333, 0.05762879, 0.12167284], - [0.15910850, 0.05943303, 0.12562246], - [0.16565413, 0.06117310, 0.12947786], - [0.17221981, 0.06285040, 0.13323866], - [0.17880518, 0.06446624, 0.13690456], - [0.18540980, 0.06602187, 0.14047531], - [0.19203321, 0.06751848, 0.14395075], - [0.19867499, 0.06895715, 0.14733079], - [0.20533472, 0.07033887, 0.15061537], - [0.21201197, 0.07166460, 0.15380450], - [0.21870632, 0.07293518, 0.15689824], - [0.22541736, 0.07415142, 0.15989669], - [0.23214472, 0.07531401, 0.16279996], - [0.23888802, 0.07642364, 0.16560823], - [0.24564687, 0.07748088, 0.16832171], - [0.25242097, 0.07848626, 0.17094058], - [0.25920996, 0.07944023, 0.17346508], - [0.26601352, 0.08034324, 0.17589547], - [0.27283134, 0.08119562, 0.17823199], - [0.27966317, 0.08199764, 0.18047489], - [0.28650868, 0.08274959, 0.18262446], - [0.29336760, 0.08345167, 0.18468096], - [0.30023971, 0.08410396, 0.18664460], - [0.30712474, 0.08470663, 0.18851568], - [0.31402240, 0.08525975, 0.19029445], - [0.32093251, 0.08576327, 0.19198110], - [0.32785482, 0.08621717, 0.19357587], - [0.33478905, 0.08662148, 0.19507899], - [0.34173503, 0.08697601, 0.19649062], - [0.34869254, 0.08728060, 0.19781092], - [0.35566125, 0.08753522, 0.19904011], - [0.36264104, 0.08773953, 0.20017823], - [0.36963165, 0.08789334, 0.20122542], - [0.37663272, 0.08799656, 0.20218186], - [0.38364424, 0.08804859, 0.20304740], - [0.39066574, 0.08804944, 0.20382227], - [0.39769703, 0.08799872, 0.20450641], - [0.40473792, 0.08789596, 0.20509971], - [0.41178790, 0.08774121, 0.20560237], - [0.41884704, 0.08753353, 0.20601388], - [0.42591463, 0.08727325, 0.20633459], - [0.43299069, 0.08695948, 0.20656394], - [0.44007455, 0.08659242, 0.20670212], - [0.44716616, 0.08617128, 0.20674851], - [0.45426479, 0.08569637, 0.20670331], - [0.46137042, 0.08516677, 0.20656566], - [0.46848219, 0.08458313, 0.20633582], - [0.47560004, 0.08394454, 0.20601280], - [0.48272316, 0.08325159, 0.20559662], - [0.48985104, 0.08250434, 0.20508677], - [0.49698340, 0.08170242, 0.20448225], - [0.50411927, 0.08084690, 0.20378304], - [0.51125803, 0.07993830, 0.20298844], - [0.51839929, 0.07897664, 0.20209721], - [0.52554202, 0.07796358, 0.20110904], - [0.53268538, 0.07690049, 0.20002312], - [0.53982852, 0.07578902, 0.19883855], - [0.54697049, 0.07463129, 0.19755431], - [0.55411028, 0.07342990, 0.19616934], - [0.56124678, 0.07218810, 0.19468248], - [0.56837880, 0.07090985, 0.19309253], - [0.57550502, 0.06959997, 0.19139818], - [0.58262400, 0.06826431, 0.18959809], - [0.58973418, 0.06690989, 0.18769083], - [0.59683382, 0.06554515, 0.18567490], - [0.60392106, 0.06418012, 0.18354875], - [0.61099403, 0.06282598, 0.18131023], - [0.61805061, 0.06149625, 0.17895730], - [0.62508803, 0.06020822, 0.17648890], - [0.63210426, 0.05897851, 0.17390136], - [0.63909578, 0.05783082, 0.17119418], - [0.64606007, 0.05678752, 0.16836327], - [0.65299326, 0.05587785, 0.16540731], - [0.65989160, 0.05513269, 0.16232365], - [0.66675096, 0.05458598, 0.15910942], - [0.67356680, 0.05427454, 0.15576179], - [0.68033403, 0.05423761, 0.15227799], - [0.68704706, 0.05451589, 0.14865546], - [0.69369969, 0.05515040, 0.14489185], - [0.70028509, 0.05618108, 0.14098519], - [0.70679624, 0.05764355, 0.13693176], - [0.71322465, 0.05957213, 0.13273203], - [0.71956187, 0.06199294, 0.12838347], - [0.72579832, 0.06492701, 0.12388673], - [0.73192387, 0.06838759, 0.11924309], - [0.73792785, 0.07238015, 0.11445523], - [0.74379911, 0.07690258, 0.10952793], - [0.74952631, 0.08194530, 0.10446780], - [0.75509807, 0.08749192, 0.09928513], - [0.76050344, 0.09351949, 0.09399345], - [0.76573234, 0.09999923, 0.08860931], - [0.77077595, 0.10689714, 0.08315390], - [0.77562724, 0.11417469, 0.07765262], - [0.78028137, 0.12178994, 0.07213493], - [0.78473594, 0.12969861, 0.06663478], - [0.78899120, 0.13785534, 0.06119075], - [0.79304987, 0.14621526, 0.05584590], - [0.79691698, 0.15473527, 0.05064835], - [0.80059949, 0.16337512, 0.04565234], - [0.80410578, 0.17209842, 0.04091877], - [0.80744502, 0.18087354, 0.03656330], - [0.81062721, 0.18967261, 0.03284897], - [0.81366202, 0.19847328, 0.02978095], - [0.81655911, 0.20725703, 0.02735425], - [0.81932773, 0.21600901, 0.02556368], - [0.82197656, 0.22471783, 0.02440445], - [0.82451354, 0.23337504, 0.02387282], - [0.82694588, 0.24197470, 0.02396658], - [0.82928000, 0.25051291, 0.02468537], - [0.83152234, 0.25898625, 0.02603161], - [0.83367755, 0.26739445, 0.02800850], - [0.83575119, 0.27573587, 0.03062270], - [0.83774693, 0.28401176, 0.03388176], - [0.83966871, 0.29222281, 0.03779577], - [0.84152000, 0.30037020, 0.04231855], - [0.84330390, 0.30845547, 0.04718171], - [0.84502314, 0.31648042, 0.05232334], - [0.84668012, 0.32444703, 0.05769850], - [0.84827700, 0.33235739, 0.06327080], - [0.84981598, 0.34021329, 0.06901096], - [0.85129899, 0.34801660, 0.07489554], - [0.85272715, 0.35576999, 0.08090629], - [0.85410285, 0.36347441, 0.08702799], - [0.85542653, 0.37113285, 0.09324952], - [0.85670046, 0.37874607, 0.09956104], - [0.85792511, 0.38631664, 0.10595570], - [0.85910167, 0.39384615, 0.11242769], - [0.86023184, 0.40133560, 0.11897200], - [0.86131603, 0.40878710, 0.12558544], - [0.86235527, 0.41620202, 0.13226519], - [0.86335049, 0.42358173, 0.13900904], - [0.86430261, 0.43092748, 0.14581530], - [0.86521249, 0.43824051, 0.15268270], - [0.86608094, 0.44552198, 0.15961030], - [0.86690878, 0.45277298, 0.16659744], - [0.86769678, 0.45999455, 0.17364368], - [0.86844571, 0.46718767, 0.18074877], - [0.86915633, 0.47435325, 0.18791261], - [0.86982940, 0.48149217, 0.19513520], - [0.87046566, 0.48860521, 0.20241667], - [0.87106589, 0.49569313, 0.20975721], - [0.87163086, 0.50275663, 0.21715708], - [0.87216162, 0.50979614, 0.22461634], - [0.87265881, 0.51681240, 0.23213553], - [0.87312317, 0.52380600, 0.23971510], - [0.87355555, 0.53077744, 0.24735548], - [0.87395712, 0.53772697, 0.25505684], - [0.87432861, 0.54465512, 0.26281981], - [0.87467085, 0.55156232, 0.27064498], - [0.87498503, 0.55844876, 0.27853263], - [0.87527217, 0.56531471, 0.28648326], - [0.87553313, 0.57216055, 0.29449756], - [0.87576930, 0.57898630, 0.30257577], - [0.87598171, 0.58579221, 0.31071851], - [0.87617147, 0.59257844, 0.31892638], - [0.87634020, 0.59934489, 0.32719953], - [0.87648888, 0.60609181, 0.33553878], - [0.87661914, 0.61281908, 0.34394439], - [0.87673240, 0.61952670, 0.35241687], - [0.87683016, 0.62621463, 0.36095669], - [0.87691421, 0.63288268, 0.36956410], - [0.87698607, 0.63953083, 0.37823972], - [0.87704779, 0.64615877, 0.38698363], - [0.87710104, 0.65276640, 0.39579639], - [0.87714801, 0.65935338, 0.40467811], - [0.87719069, 0.66591948, 0.41362916], - [0.87723137, 0.67246435, 0.42264965], - [0.87727233, 0.67898764, 0.43173978], - [0.87731605, 0.68548896, 0.44089961], - [0.87736509, 0.69196788, 0.45012917], - [0.87742214, 0.69842394, 0.45942844], - [0.87749005, 0.70485663, 0.46879727], - [0.87757175, 0.71126545, 0.47823549], - [0.87767038, 0.71764981, 0.48774277], - [0.87778914, 0.72400915, 0.49731878], - [0.87793145, 0.73034282, 0.50696296], - [0.87810081, 0.73665020, 0.51667477], - [0.87830092, 0.74293060, 0.52645341], - [0.87853556, 0.74918334, 0.53629808], - [0.87880873, 0.75540769, 0.54620771], - [0.87912449, 0.76160293, 0.55618122], - [0.87948712, 0.76776830, 0.56621720], - [0.87990092, 0.77390307, 0.57631429], - [0.88037047, 0.78000643, 0.58647070], - [0.88090027, 0.78607767, 0.59668473], - [0.88149514, 0.79211598, 0.60695418], - [0.88215974, 0.79812065, 0.61727700], - [0.88289909, 0.80409090, 0.62765056], - [0.88371798, 0.81002606, 0.63807240], - [0.88462153, 0.81592540, 0.64853946], - [0.88561459, 0.82178829, 0.65904886], - [0.88670229, 0.82761408, 0.66959711], - [0.88788952, 0.83340224, 0.68018083], - [0.88918122, 0.83915225, 0.69079625], - [0.89058234, 0.84486362, 0.70143930], - [0.89209744, 0.85053601, 0.71210615], - [0.89373153, 0.85616903, 0.72279183], - [0.89548875, 0.86176252, 0.73349245], - [0.89737373, 0.86731625, 0.74420272], - [0.89939058, 0.87283016, 0.75491787], - [0.90154313, 0.87830429, 0.76563309], - [0.90383561, 0.88373862, 0.77634217], - [0.90627132, 0.88913338, 0.78704028], - [0.90885368, 0.89448881, 0.79772179], - [0.91158625, 0.89980515, 0.80838000], - [0.91447204, 0.90508277, 0.81900898], - [0.91751403, 0.91032207, 0.82960244], - [0.92071527, 0.91552347, 0.84015333], - [0.92407894, 0.92068737, 0.85065379], - [0.92760832, 0.92581419, 0.86109531], - [0.93130674, 0.93090430, 0.87146916], - [0.93517804, 0.93595804, 0.88176475], - [0.93922654, 0.94097572, 0.89196965], - [0.94345707, 0.94595767, 0.90206897], - [0.94787482, 0.95090438, 0.91204440], - ] - -CMAP_EXPOSURES = mpl.colors.LinearSegmentedColormap.from_list('cmr.sunburst', cm_data1, N=256).\ - reversed() +cm_data1 = [ + [0.00000000, 0.00000000, 0.00000000], + [0.00032031, 0.00020876, 0.00015576], + [0.00115213, 0.00071222, 0.00050933], + [0.00246632, 0.00145292, 0.00099932], + [0.00426111, 0.00240248, 0.00159470], + [0.00654129, 0.00354149, 0.00227479], + [0.00931453, 0.00485497, 0.00302435], + [0.01259008, 0.00633067, 0.00383153], + [0.01637810, 0.00795809, 0.00468676], + [0.02068947, 0.00972796, 0.00558214], + [0.02553552, 0.01163194, 0.00651101], + [0.03092793, 0.01366243, 0.00746771], + [0.03687870, 0.01581232, 0.00844736], + [0.04329108, 0.01807499, 0.00944575], + [0.04970018, 0.02044415, 0.01045917], + [0.05607744, 0.02291381, 0.01148441], + [0.06242826, 0.02547822, 0.01251862], + [0.06875727, 0.02813185, 0.01355932], + [0.07506844, 0.03086930, 0.01460431], + [0.08136524, 0.03368535, 0.01565167], + [0.08765071, 0.03657489, 0.01669973], + [0.09392754, 0.03953289, 0.01774700], + [0.10019812, 0.04248851, 0.01879222], + [0.10646459, 0.04536893, 0.01983431], + [0.11272888, 0.04818555, 0.02087234], + [0.11899272, 0.05094021, 0.02190555], + [0.12525770, 0.05363453, 0.02293331], + [0.13152527, 0.05626994, 0.02395516], + [0.13779673, 0.05884770, 0.02497073], + [0.14407332, 0.06136894, 0.02597979], + [0.15035614, 0.06383462, 0.02698225], + [0.15664624, 0.06624561, 0.02797810], + [0.16294457, 0.06860266, 0.02896747], + [0.16925203, 0.07090640, 0.02995057], + [0.17556946, 0.07315739, 0.03092776], + [0.18189762, 0.07535608, 0.03189947], + [0.18823726, 0.07750287, 0.03286623], + [0.19458905, 0.07959805, 0.03382870], + [0.20095364, 0.08164185, 0.03478764], + [0.20733163, 0.08363445, 0.03574389], + [0.21372359, 0.08557593, 0.03669841], + [0.22013006, 0.08746634, 0.03765228], + [0.22655154, 0.08930565, 0.03860667], + [0.23298852, 0.09109380, 0.03956286], + [0.23944144, 0.09283065, 0.04052097], + [0.24591073, 0.09451600, 0.04146142], + [0.25239679, 0.09614964, 0.04239527], + [0.25890000, 0.09773126, 0.04332440], + [0.26542072, 0.09926052, 0.04425071], + [0.27195929, 0.10073705, 0.04517610], + [0.27851612, 0.10216029, 0.04610242], + [0.28509144, 0.10352983, 0.04703172], + [0.29168551, 0.10484515, 0.04796603], + [0.29829858, 0.10610566, 0.04890741], + [0.30493089, 0.10731073, 0.04985793], + [0.31158270, 0.10845962, 0.05081968], + [0.31825437, 0.10955144, 0.05179469], + [0.32494588, 0.11058558, 0.05278533], + [0.33165741, 0.11156121, 0.05379388], + [0.33838918, 0.11247734, 0.05482253], + [0.34514146, 0.11333282, 0.05587349], + [0.35191413, 0.11412692, 0.05694939], + [0.35870733, 0.11485850, 0.05805261], + [0.36552140, 0.11552606, 0.05918537], + [0.37235602, 0.11612887, 0.06035055], + [0.37921149, 0.11666531, 0.06155047], + [0.38608774, 0.11713411, 0.06278785], + [0.39298465, 0.11753398, 0.06406542], + [0.39990243, 0.11786308, 0.06538571], + [0.40684070, 0.11812026, 0.06675174], + [0.41379968, 0.11830340, 0.06816610], + [0.42077900, 0.11841110, 0.06963182], + [0.42777857, 0.11844140, 0.07115178], + [0.43479835, 0.11839213, 0.07272887], + [0.44183779, 0.11826176, 0.07436631], + [0.44889692, 0.11804763, 0.07606698], + [0.45597537, 0.11774759, 0.07783407], + [0.46307262, 0.11735955, 0.07967086], + [0.47018828, 0.11688094, 0.08158056], + [0.47732206, 0.11630887, 0.08356643], + [0.48447342, 0.11564059, 0.08563184], + [0.49164167, 0.11487339, 0.08778027], + [0.49882616, 0.11400421, 0.09001524], + [0.50602619, 0.11302981, 0.09234030], + [0.51324096, 0.11194681, 0.09475911], + [0.52046957, 0.11075165, 0.09727541], + [0.52771103, 0.10944063, 0.09989300], + [0.53496423, 0.10800987, 0.10261578], + [0.54222828, 0.10645458, 0.10544773], + [0.54950158, 0.10477099, 0.10839295], + [0.55678265, 0.10295467, 0.11145561], + [0.56407005, 0.10100050, 0.11463998], + [0.57136221, 0.09890294, 0.11795046], + [0.57865683, 0.09665778, 0.12139144], + [0.58595251, 0.09425758, 0.12496762], + [0.59324637, 0.09169820, 0.12868351], + [0.60053647, 0.08897198, 0.13254399], + [0.60781996, 0.08607290, 0.13655381], + [0.61509391, 0.08299424, 0.14071783], + [0.62235528, 0.07972847, 0.14504098], + [0.62960086, 0.07626735, 0.14952833], + [0.63682690, 0.07260321, 0.15418475], + [0.64402945, 0.06872768, 0.15901515], + [0.65120429, 0.06463189, 0.16402435], + [0.65834703, 0.06030595, 0.16921717], + [0.66545273, 0.05574060, 0.17459807], + [0.67251615, 0.05092618, 0.18017123], + [0.67953179, 0.04585268, 0.18594053], + [0.68649408, 0.04050791, 0.19190990], + [0.69339656, 0.03501827, 0.19808181], + [0.70023310, 0.02974032, 0.20445918], + [0.70699677, 0.02473108, 0.21104325], + [0.71368081, 0.02004735, 0.21783521], + [0.72027805, 0.01575128, 0.22483488], + [0.72678121, 0.01190847, 0.23204104], + [0.73318299, 0.00858729, 0.23945145], + [0.73947609, 0.00585900, 0.24706262], + [0.74565328, 0.00379723, 0.25486974], + [0.75170751, 0.00247734, 0.26286660], + [0.75763201, 0.00197573, 0.27104565], + [0.76342035, 0.00236912, 0.27939796], + [0.76906659, 0.00373375, 0.28791328], + [0.77456531, 0.00614457, 0.29658016], + [0.77991170, 0.00967453, 0.30538600], + [0.78510166, 0.01439382, 0.31431727], + [0.79013176, 0.02036922, 0.32335963], + [0.79499936, 0.02766356, 0.33249813], + [0.79970258, 0.03633527, 0.34171740], + [0.80424028, 0.04610137, 0.35100187], + [0.80861206, 0.05593074, 0.36033595], + [0.81281824, 0.06575513, 0.36970423], + [0.81685977, 0.07556701, 0.37909164], + [0.82073820, 0.08536045, 0.38848361], + [0.82445563, 0.09513050, 0.39786621], + [0.82801462, 0.10487292, 0.40722623], + [0.83141814, 0.11458394, 0.41655122], + [0.83466964, 0.12426002, 0.42582926], + [0.83777258, 0.13389850, 0.43505012], + [0.84073089, 0.14349659, 0.44420371], + [0.84354864, 0.15305194, 0.45328109], + [0.84622995, 0.16256264, 0.46227431], + [0.84877908, 0.17202698, 0.47117623], + [0.85120054, 0.18144313, 0.47998013], + [0.85349849, 0.19081025, 0.48868085], + [0.85567734, 0.20012720, 0.49727347], + [0.85774150, 0.20939307, 0.50575378], + [0.85969539, 0.21860703, 0.51411817], + [0.86154321, 0.22776871, 0.52236389], + [0.86328918, 0.23687774, 0.53048865], + [0.86493759, 0.24593368, 0.53849050], + [0.86649243, 0.25493655, 0.54636825], + [0.86795766, 0.26388635, 0.55412108], + [0.86933714, 0.27278325, 0.56174857], + [0.87063488, 0.28162708, 0.56925039], + [0.87185473, 0.29041795, 0.57662667], + [0.87299987, 0.29915672, 0.58387836], + [0.87407470, 0.30784267, 0.59100548], + [0.87508176, 0.31647731, 0.59800984], + [0.87602545, 0.32505984, 0.60489185], + [0.87690829, 0.33359164, 0.61165350], + [0.87773379, 0.34207284, 0.61829617], + [0.87850545, 0.35050356, 0.62482133], + [0.87922592, 0.35888478, 0.63123109], + [0.87989827, 0.36721697, 0.63752735], + [0.88052548, 0.37550059, 0.64371209], + [0.88111058, 0.38373605, 0.64978738], + [0.88165635, 0.39192396, 0.65575540], + [0.88216538, 0.40006502, 0.66161845], + [0.88264034, 0.40815983, 0.66737883], + [0.88308383, 0.41620898, 0.67303885], + [0.88349837, 0.42421311, 0.67860087], + [0.88388658, 0.43217272, 0.68406723], + [0.88425089, 0.44008842, 0.68944031], + [0.88459352, 0.44796098, 0.69472256], + [0.88491674, 0.45579107, 0.69991638], + [0.88522277, 0.46357936, 0.70502418], + [0.88551386, 0.47132645, 0.71004831], + [0.88579260, 0.47903263, 0.71499109], + [0.88606054, 0.48669904, 0.71985498], + [0.88631967, 0.49432634, 0.72464230], + [0.88657273, 0.50191463, 0.72935531], + [0.88682100, 0.50946512, 0.73399636], + [0.88706656, 0.51697833, 0.73856771], + [0.88731166, 0.52445464, 0.74307157], + [0.88755748, 0.53189523, 0.74751019], + [0.88780677, 0.53930002, 0.75188571], + [0.88806029, 0.54667042, 0.75620029], + [0.88832077, 0.55400637, 0.76045604], + [0.88858898, 0.56130917, 0.76465503], + [0.88886751, 0.56857881, 0.76879932], + [0.88915723, 0.57581648, 0.77289087], + [0.88946027, 0.58302245, 0.77693169], + [0.88977801, 0.59019749, 0.78092369], + [0.89011184, 0.59734231, 0.78486874], + [0.89046385, 0.60445719, 0.78876876], + [0.89083498, 0.61154309, 0.79262552], + [0.89122688, 0.61860051, 0.79644080], + [0.89164127, 0.62562987, 0.80021639], + [0.89207922, 0.63263202, 0.80395396], + [0.89254218, 0.63960749, 0.80765517], + [0.89303193, 0.64655664, 0.81132175], + [0.89354946, 0.65348027, 0.81495521], + [0.89409613, 0.66037894, 0.81855714], + [0.89467341, 0.66725312, 0.82212908], + [0.89528268, 0.67410333, 0.82567258], + [0.89592507, 0.68093022, 0.82918904], + [0.89660188, 0.68773430, 0.83267991], + [0.89731440, 0.69451609, 0.83614660], + [0.89806405, 0.70127602, 0.83959053], + [0.89885189, 0.70801470, 0.84301299], + [0.89967918, 0.71473262, 0.84641529], + [0.90054714, 0.72143026, 0.84979872], + [0.90145701, 0.72810810, 0.85316454], + [0.90241007, 0.73476657, 0.85651399], + [0.90340743, 0.74140617, 0.85984825], + [0.90445031, 0.74802735, 0.86316849], + [0.90553992, 0.75463054, 0.86647585], + [0.90667746, 0.76121615, 0.86977146], + [0.90786415, 0.76778459, 0.87305641], + [0.90910120, 0.77433626, 0.87633178], + [0.91038981, 0.78087154, 0.87959861], + [0.91173124, 0.78739078, 0.88285793], + [0.91312673, 0.79389433, 0.88611074], + [0.91457758, 0.80038249, 0.88935803], + [0.91608500, 0.80685562, 0.89260074], + [0.91765039, 0.81331396, 0.89583983], + [0.91927511, 0.81975775, 0.89907623], + [0.92096059, 0.82618722, 0.90231088], + [0.92270830, 0.83260254, 0.90554466], + [0.92451964, 0.83900395, 0.90877841], + [0.92639632, 0.84539150, 0.91201305], + [0.92834008, 0.85176524, 0.91524947], + [0.93035272, 0.85812518, 0.91848857], + [0.93243609, 0.86447132, 0.92173117], + [0.93459223, 0.87080356, 0.92497815], + [0.93682359, 0.87712161, 0.92823055], + [0.93913266, 0.88342515, 0.93148937], + [0.94152187, 0.88971391, 0.93475546], + [0.94399458, 0.89598719, 0.93803021], + [0.94655427, 0.90224421, 0.94131502], + [0.94920436, 0.90848425, 0.94461125], +] + +cm_data2 = [ + [0.00000000, 0.00000000, 0.00000000], + [0.00028691, 0.00020835, 0.00028279], + [0.00102421, 0.00070903, 0.00101021], + [0.00218033, 0.00144242, 0.00214845], + [0.00375280, 0.00237790, 0.00368891], + [0.00574727, 0.00349371, 0.00562841], + [0.00817359, 0.00477242, 0.00796563], + [0.01104432, 0.00619914, 0.01069976], + [0.01437378, 0.00776073, 0.01382970], + [0.01817764, 0.00944524, 0.01735364], + [0.02247277, 0.01124162, 0.02126897], + [0.02727694, 0.01313949, 0.02557207], + [0.03260869, 0.01512908, 0.03025819], + [0.03848721, 0.01720107, 0.03532137], + [0.04472223, 0.01934661, 0.04074862], + [0.05095008, 0.02155723, 0.04620189], + [0.05718085, 0.02382484, 0.05156892], + [0.06341877, 0.02614168, 0.05685075], + [0.06966727, 0.02850036, 0.06204782], + [0.07592916, 0.03089381, 0.06716019], + [0.08220666, 0.03331529, 0.07218757], + [0.08850155, 0.03575837, 0.07712945], + [0.09481532, 0.03821687, 0.08198520], + [0.10114895, 0.04068063, 0.08675399], + [0.10750319, 0.04306161, 0.09143498], + [0.11387855, 0.04536332, 0.09602729], + [0.12027537, 0.04758808, 0.10053004], + [0.12669388, 0.04973801, 0.10494242], + [0.13313410, 0.05181515, 0.10926361], + [0.13959587, 0.05382147, 0.11349284], + [0.14607903, 0.05575879, 0.11762946], + [0.15258333, 0.05762879, 0.12167284], + [0.15910850, 0.05943303, 0.12562246], + [0.16565413, 0.06117310, 0.12947786], + [0.17221981, 0.06285040, 0.13323866], + [0.17880518, 0.06446624, 0.13690456], + [0.18540980, 0.06602187, 0.14047531], + [0.19203321, 0.06751848, 0.14395075], + [0.19867499, 0.06895715, 0.14733079], + [0.20533472, 0.07033887, 0.15061537], + [0.21201197, 0.07166460, 0.15380450], + [0.21870632, 0.07293518, 0.15689824], + [0.22541736, 0.07415142, 0.15989669], + [0.23214472, 0.07531401, 0.16279996], + [0.23888802, 0.07642364, 0.16560823], + [0.24564687, 0.07748088, 0.16832171], + [0.25242097, 0.07848626, 0.17094058], + [0.25920996, 0.07944023, 0.17346508], + [0.26601352, 0.08034324, 0.17589547], + [0.27283134, 0.08119562, 0.17823199], + [0.27966317, 0.08199764, 0.18047489], + [0.28650868, 0.08274959, 0.18262446], + [0.29336760, 0.08345167, 0.18468096], + [0.30023971, 0.08410396, 0.18664460], + [0.30712474, 0.08470663, 0.18851568], + [0.31402240, 0.08525975, 0.19029445], + [0.32093251, 0.08576327, 0.19198110], + [0.32785482, 0.08621717, 0.19357587], + [0.33478905, 0.08662148, 0.19507899], + [0.34173503, 0.08697601, 0.19649062], + [0.34869254, 0.08728060, 0.19781092], + [0.35566125, 0.08753522, 0.19904011], + [0.36264104, 0.08773953, 0.20017823], + [0.36963165, 0.08789334, 0.20122542], + [0.37663272, 0.08799656, 0.20218186], + [0.38364424, 0.08804859, 0.20304740], + [0.39066574, 0.08804944, 0.20382227], + [0.39769703, 0.08799872, 0.20450641], + [0.40473792, 0.08789596, 0.20509971], + [0.41178790, 0.08774121, 0.20560237], + [0.41884704, 0.08753353, 0.20601388], + [0.42591463, 0.08727325, 0.20633459], + [0.43299069, 0.08695948, 0.20656394], + [0.44007455, 0.08659242, 0.20670212], + [0.44716616, 0.08617128, 0.20674851], + [0.45426479, 0.08569637, 0.20670331], + [0.46137042, 0.08516677, 0.20656566], + [0.46848219, 0.08458313, 0.20633582], + [0.47560004, 0.08394454, 0.20601280], + [0.48272316, 0.08325159, 0.20559662], + [0.48985104, 0.08250434, 0.20508677], + [0.49698340, 0.08170242, 0.20448225], + [0.50411927, 0.08084690, 0.20378304], + [0.51125803, 0.07993830, 0.20298844], + [0.51839929, 0.07897664, 0.20209721], + [0.52554202, 0.07796358, 0.20110904], + [0.53268538, 0.07690049, 0.20002312], + [0.53982852, 0.07578902, 0.19883855], + [0.54697049, 0.07463129, 0.19755431], + [0.55411028, 0.07342990, 0.19616934], + [0.56124678, 0.07218810, 0.19468248], + [0.56837880, 0.07090985, 0.19309253], + [0.57550502, 0.06959997, 0.19139818], + [0.58262400, 0.06826431, 0.18959809], + [0.58973418, 0.06690989, 0.18769083], + [0.59683382, 0.06554515, 0.18567490], + [0.60392106, 0.06418012, 0.18354875], + [0.61099403, 0.06282598, 0.18131023], + [0.61805061, 0.06149625, 0.17895730], + [0.62508803, 0.06020822, 0.17648890], + [0.63210426, 0.05897851, 0.17390136], + [0.63909578, 0.05783082, 0.17119418], + [0.64606007, 0.05678752, 0.16836327], + [0.65299326, 0.05587785, 0.16540731], + [0.65989160, 0.05513269, 0.16232365], + [0.66675096, 0.05458598, 0.15910942], + [0.67356680, 0.05427454, 0.15576179], + [0.68033403, 0.05423761, 0.15227799], + [0.68704706, 0.05451589, 0.14865546], + [0.69369969, 0.05515040, 0.14489185], + [0.70028509, 0.05618108, 0.14098519], + [0.70679624, 0.05764355, 0.13693176], + [0.71322465, 0.05957213, 0.13273203], + [0.71956187, 0.06199294, 0.12838347], + [0.72579832, 0.06492701, 0.12388673], + [0.73192387, 0.06838759, 0.11924309], + [0.73792785, 0.07238015, 0.11445523], + [0.74379911, 0.07690258, 0.10952793], + [0.74952631, 0.08194530, 0.10446780], + [0.75509807, 0.08749192, 0.09928513], + [0.76050344, 0.09351949, 0.09399345], + [0.76573234, 0.09999923, 0.08860931], + [0.77077595, 0.10689714, 0.08315390], + [0.77562724, 0.11417469, 0.07765262], + [0.78028137, 0.12178994, 0.07213493], + [0.78473594, 0.12969861, 0.06663478], + [0.78899120, 0.13785534, 0.06119075], + [0.79304987, 0.14621526, 0.05584590], + [0.79691698, 0.15473527, 0.05064835], + [0.80059949, 0.16337512, 0.04565234], + [0.80410578, 0.17209842, 0.04091877], + [0.80744502, 0.18087354, 0.03656330], + [0.81062721, 0.18967261, 0.03284897], + [0.81366202, 0.19847328, 0.02978095], + [0.81655911, 0.20725703, 0.02735425], + [0.81932773, 0.21600901, 0.02556368], + [0.82197656, 0.22471783, 0.02440445], + [0.82451354, 0.23337504, 0.02387282], + [0.82694588, 0.24197470, 0.02396658], + [0.82928000, 0.25051291, 0.02468537], + [0.83152234, 0.25898625, 0.02603161], + [0.83367755, 0.26739445, 0.02800850], + [0.83575119, 0.27573587, 0.03062270], + [0.83774693, 0.28401176, 0.03388176], + [0.83966871, 0.29222281, 0.03779577], + [0.84152000, 0.30037020, 0.04231855], + [0.84330390, 0.30845547, 0.04718171], + [0.84502314, 0.31648042, 0.05232334], + [0.84668012, 0.32444703, 0.05769850], + [0.84827700, 0.33235739, 0.06327080], + [0.84981598, 0.34021329, 0.06901096], + [0.85129899, 0.34801660, 0.07489554], + [0.85272715, 0.35576999, 0.08090629], + [0.85410285, 0.36347441, 0.08702799], + [0.85542653, 0.37113285, 0.09324952], + [0.85670046, 0.37874607, 0.09956104], + [0.85792511, 0.38631664, 0.10595570], + [0.85910167, 0.39384615, 0.11242769], + [0.86023184, 0.40133560, 0.11897200], + [0.86131603, 0.40878710, 0.12558544], + [0.86235527, 0.41620202, 0.13226519], + [0.86335049, 0.42358173, 0.13900904], + [0.86430261, 0.43092748, 0.14581530], + [0.86521249, 0.43824051, 0.15268270], + [0.86608094, 0.44552198, 0.15961030], + [0.86690878, 0.45277298, 0.16659744], + [0.86769678, 0.45999455, 0.17364368], + [0.86844571, 0.46718767, 0.18074877], + [0.86915633, 0.47435325, 0.18791261], + [0.86982940, 0.48149217, 0.19513520], + [0.87046566, 0.48860521, 0.20241667], + [0.87106589, 0.49569313, 0.20975721], + [0.87163086, 0.50275663, 0.21715708], + [0.87216162, 0.50979614, 0.22461634], + [0.87265881, 0.51681240, 0.23213553], + [0.87312317, 0.52380600, 0.23971510], + [0.87355555, 0.53077744, 0.24735548], + [0.87395712, 0.53772697, 0.25505684], + [0.87432861, 0.54465512, 0.26281981], + [0.87467085, 0.55156232, 0.27064498], + [0.87498503, 0.55844876, 0.27853263], + [0.87527217, 0.56531471, 0.28648326], + [0.87553313, 0.57216055, 0.29449756], + [0.87576930, 0.57898630, 0.30257577], + [0.87598171, 0.58579221, 0.31071851], + [0.87617147, 0.59257844, 0.31892638], + [0.87634020, 0.59934489, 0.32719953], + [0.87648888, 0.60609181, 0.33553878], + [0.87661914, 0.61281908, 0.34394439], + [0.87673240, 0.61952670, 0.35241687], + [0.87683016, 0.62621463, 0.36095669], + [0.87691421, 0.63288268, 0.36956410], + [0.87698607, 0.63953083, 0.37823972], + [0.87704779, 0.64615877, 0.38698363], + [0.87710104, 0.65276640, 0.39579639], + [0.87714801, 0.65935338, 0.40467811], + [0.87719069, 0.66591948, 0.41362916], + [0.87723137, 0.67246435, 0.42264965], + [0.87727233, 0.67898764, 0.43173978], + [0.87731605, 0.68548896, 0.44089961], + [0.87736509, 0.69196788, 0.45012917], + [0.87742214, 0.69842394, 0.45942844], + [0.87749005, 0.70485663, 0.46879727], + [0.87757175, 0.71126545, 0.47823549], + [0.87767038, 0.71764981, 0.48774277], + [0.87778914, 0.72400915, 0.49731878], + [0.87793145, 0.73034282, 0.50696296], + [0.87810081, 0.73665020, 0.51667477], + [0.87830092, 0.74293060, 0.52645341], + [0.87853556, 0.74918334, 0.53629808], + [0.87880873, 0.75540769, 0.54620771], + [0.87912449, 0.76160293, 0.55618122], + [0.87948712, 0.76776830, 0.56621720], + [0.87990092, 0.77390307, 0.57631429], + [0.88037047, 0.78000643, 0.58647070], + [0.88090027, 0.78607767, 0.59668473], + [0.88149514, 0.79211598, 0.60695418], + [0.88215974, 0.79812065, 0.61727700], + [0.88289909, 0.80409090, 0.62765056], + [0.88371798, 0.81002606, 0.63807240], + [0.88462153, 0.81592540, 0.64853946], + [0.88561459, 0.82178829, 0.65904886], + [0.88670229, 0.82761408, 0.66959711], + [0.88788952, 0.83340224, 0.68018083], + [0.88918122, 0.83915225, 0.69079625], + [0.89058234, 0.84486362, 0.70143930], + [0.89209744, 0.85053601, 0.71210615], + [0.89373153, 0.85616903, 0.72279183], + [0.89548875, 0.86176252, 0.73349245], + [0.89737373, 0.86731625, 0.74420272], + [0.89939058, 0.87283016, 0.75491787], + [0.90154313, 0.87830429, 0.76563309], + [0.90383561, 0.88373862, 0.77634217], + [0.90627132, 0.88913338, 0.78704028], + [0.90885368, 0.89448881, 0.79772179], + [0.91158625, 0.89980515, 0.80838000], + [0.91447204, 0.90508277, 0.81900898], + [0.91751403, 0.91032207, 0.82960244], + [0.92071527, 0.91552347, 0.84015333], + [0.92407894, 0.92068737, 0.85065379], + [0.92760832, 0.92581419, 0.86109531], + [0.93130674, 0.93090430, 0.87146916], + [0.93517804, 0.93595804, 0.88176475], + [0.93922654, 0.94097572, 0.89196965], + [0.94345707, 0.94595767, 0.90206897], + [0.94787482, 0.95090438, 0.91204440], +] + +CMAP_EXPOSURES = mpl.colors.LinearSegmentedColormap.from_list( + "cmr.sunburst", cm_data1, N=256 +).reversed() """Default sequential colormaps, taken from https://cmasher.readthedocs.io/index.html""" -CMAP_EXPOSURES.set_under('lightgray') +CMAP_EXPOSURES.set_under("lightgray") -CMAP_IMPACT = mpl.colors.LinearSegmentedColormap.from_list('cmr.flamingo', cm_data2, N=256).\ - reversed() +CMAP_IMPACT = mpl.colors.LinearSegmentedColormap.from_list( + "cmr.flamingo", cm_data2, N=256 +).reversed() """Default sequential colormaps, taken from https://cmasher.readthedocs.io/index.html""" -CMAP_IMPACT.set_under('lightgray') +CMAP_IMPACT.set_under("lightgray") -CMAP_RASTER = 'viridis' +CMAP_RASTER = "viridis" -CMAP_CAT = 'Dark2' +CMAP_CAT = "Dark2" diff --git a/climada/util/coordinates.py b/climada/util/coordinates.py index b26c1f8d0..e160965b1 100644 --- a/climada/util/coordinates.py +++ b/climada/util/coordinates.py @@ -23,13 +23,12 @@ import copy import logging import math -from multiprocessing import cpu_count -from pathlib import Path import re import warnings import zipfile +from multiprocessing import cpu_count +from pathlib import Path -from cartopy.io import shapereader import dask.dataframe as dd import geopandas as gpd import numba @@ -41,23 +40,29 @@ import rasterio.features import rasterio.mask import rasterio.warp -import scipy.spatial import scipy.interpolate -from shapely.geometry import Polygon, MultiPolygon, Point, box +import scipy.spatial import shapely.ops import shapely.vectorized import shapely.wkt +from cartopy.io import shapereader +from shapely.geometry import MultiPolygon, Point, Polygon, box from sklearn.neighbors import BallTree +import climada.util.hdf5_handler as u_hdf5 from climada.util.config import CONFIG -from climada.util.constants import (DEF_CRS, EARTH_RADIUS_KM, SYSTEM_DIR, ONE_LAT_KM, - NATEARTH_CENTROIDS, - ISIMIP_GPWV3_NATID_150AS, - ISIMIP_NATID_TO_ISO, - NONISO_REGIONS, - RIVER_FLOOD_REGIONS_CSV) +from climada.util.constants import ( + DEF_CRS, + EARTH_RADIUS_KM, + ISIMIP_GPWV3_NATID_150AS, + ISIMIP_NATID_TO_ISO, + NATEARTH_CENTROIDS, + NONISO_REGIONS, + ONE_LAT_KM, + RIVER_FLOOD_REGIONS_CSV, + SYSTEM_DIR, +) from climada.util.files_handler import download_file -import climada.util.hdf5_handler as u_hdf5 pd.options.mode.chained_assignment = None @@ -79,6 +84,7 @@ """Distance threshold in km for coordinate assignment. Nearest neighbors with greater distances are not considered.""" + def latlon_to_geosph_vector(lat, lon, rad=False, basis=False): """Convert lat/lon coodinates to radial vectors (on geosphere) @@ -110,15 +116,23 @@ def latlon_to_geosph_vector(lat, lon, rad=False, basis=False): sin_lon, cos_lon = np.sin(rad_lon), np.cos(rad_lon) vecn = np.stack((sin_lat * cos_lon, sin_lat * sin_lon, cos_lat), axis=-1) if basis: - vbasis = np.stack(( - cos_lat * cos_lon, cos_lat * sin_lon, -sin_lat, - -sin_lon, cos_lon, np.zeros_like(cos_lat), - ), axis=-1).reshape(lat.shape + (2, 3)) + vbasis = np.stack( + ( + cos_lat * cos_lon, + cos_lat * sin_lon, + -sin_lat, + -sin_lon, + cos_lon, + np.zeros_like(cos_lat), + ), + axis=-1, + ).reshape(lat.shape + (2, 3)) return vecn, vbasis return vecn + def lon_normalize(lon, center=0.0): - """ Normalizes degrees such that always -180 < lon - center <= 180 + """Normalizes degrees such that always -180 < lon - center <= 180 The input data is modified in place! @@ -149,6 +163,7 @@ def lon_normalize(lon, center=0.0): lon[lon <= bounds[0]] += 360 return lon + def lon_bounds(lon, buffer=0.0): """Bounds of a set of degree values, respecting the periodicity in longitude @@ -265,11 +280,17 @@ def toggle_extent_bounds(bounds_or_extent): extent_or_bounds : tuple (a, c, b, d) Bounding box of the given points in "extent" (or "bounds") convention. """ - return (bounds_or_extent[0], bounds_or_extent[2], bounds_or_extent[1], bounds_or_extent[3]) + return ( + bounds_or_extent[0], + bounds_or_extent[2], + bounds_or_extent[1], + bounds_or_extent[3], + ) -def dist_approx(lat1, lon1, lat2, lon2, log=False, normalize=True, - method="equirect", units='km'): +def dist_approx( + lat1, lon1, lat2, lon2, log=False, normalize=True, method="equirect", units="km" +): """Compute approximation of geodistance in specified units Several batches of points can be processed at once for improved performance. The distances of @@ -334,15 +355,19 @@ def dist_approx(lat1, lon1, lat2, lon2, log=False, normalize=True, elif units == "degree": unit_factor = 1 else: - raise KeyError('Unknown distance unit: %s' % units) + raise KeyError("Unknown distance unit: %s" % units) if method == "equirect": if normalize: - mid_lon = 0.5 * sum(lon_bounds(np.concatenate([lon1.ravel(), lon2.ravel()]))) + mid_lon = 0.5 * sum( + lon_bounds(np.concatenate([lon1.ravel(), lon2.ravel()])) + ) lon_normalize(lon1, center=mid_lon) lon_normalize(lon2, center=mid_lon) - vtan = np.stack([lat2[:, None, :] - lat1[:, :, None], - lon2[:, None, :] - lon1[:, :, None]], axis=-1) + vtan = np.stack( + [lat2[:, None, :] - lat1[:, :, None], lon2[:, None, :] - lon1[:, :, None]], + axis=-1, + ) fact1 = np.heaviside(vtan[..., 1] - 180, 0) fact2 = np.heaviside(-vtan[..., 1] - 180, 0) vtan[..., 1] -= (fact1 - fact2) * 360 @@ -355,14 +380,16 @@ def dist_approx(lat1, lon1, lat2, lon2, log=False, normalize=True, dlat = 0.5 * (lat2[:, None, :] - lat1[:, :, None]) dlon = 0.5 * (lon2[:, None, :] - lon1[:, :, None]) # haversine formula: - hav = np.sin(dlat)**2 \ - + np.cos(lat1[:, :, None]) * np.cos(lat2[:, None, :]) * np.sin(dlon)**2 + hav = ( + np.sin(dlat) ** 2 + + np.cos(lat1[:, :, None]) * np.cos(lat2[:, None, :]) * np.sin(dlon) ** 2 + ) dist = np.degrees(2 * np.arcsin(np.sqrt(hav))) * unit_factor if log: vec1, vbasis = latlon_to_geosph_vector(lat1, lon1, rad=True, basis=True) vec2 = latlon_to_geosph_vector(lat2, lon2, rad=True) vtan = vec2[:, None, :] - (1 - 2 * hav[..., None]) * vec1[:, :, None] - vtan = np.einsum('nkli,nkji->nklj', vtan, vbasis) + vtan = np.einsum("nkli,nkji->nklj", vtan, vbasis) # faster version of `vtan_norm = np.linalg.norm(vtan, axis=-1)` vtan_norm = np.sqrt(np.einsum("...l,...l->...", vtan, vtan)) # for consistency, set dist to 0 if vtan is 0 @@ -372,6 +399,7 @@ def dist_approx(lat1, lon1, lat2, lon2, log=False, normalize=True, raise KeyError("Unknown distance approximation method: %s" % method) return (dist, vtan) if log else dist + def compute_geodesic_lengths(gdf): """Calculate the great circle (geodesic / spherical) lengths along any (complicated) line geometry object, based on the pyproj.Geod implementation. @@ -403,7 +431,7 @@ def compute_geodesic_lengths(gdf): return gdf_tmp.apply(lambda row: geod.geometry_length(row.geometry), axis=1) -def get_gridcellarea(lat, resolution=0.5, unit='ha'): +def get_gridcellarea(lat, resolution=0.5, unit="ha"): """The area covered by a grid cell is calculated depending on the latitude * 1 degree = ONE_LAT_KM (111.12km at the equator) @@ -421,15 +449,16 @@ def get_gridcellarea(lat, resolution=0.5, unit='ha'): unit of the output area (default: ha, alternatives: m2, km2) """ - if unit == 'm2': - area = (ONE_LAT_KM * resolution)**2 * np.cos(np.deg2rad(lat)) * 1000000 - elif unit == 'km2': - area = (ONE_LAT_KM * resolution)**2 * np.cos(np.deg2rad(lat)) + if unit == "m2": + area = (ONE_LAT_KM * resolution) ** 2 * np.cos(np.deg2rad(lat)) * 1000000 + elif unit == "km2": + area = (ONE_LAT_KM * resolution) ** 2 * np.cos(np.deg2rad(lat)) else: - area = (ONE_LAT_KM * resolution)**2 * np.cos(np.deg2rad(lat))*100 + area = (ONE_LAT_KM * resolution) ** 2 * np.cos(np.deg2rad(lat)) * 100 return area + def grid_is_regular(coord): """Return True if grid is regular. If True, returns height and width. @@ -453,11 +482,16 @@ def grid_is_regular(coord): _, count_lon = np.unique(coord[:, 1], return_counts=True) uni_lat_size = np.unique(count_lat).size uni_lon_size = np.unique(count_lon).size - if uni_lat_size == uni_lon_size and uni_lat_size == 1 \ - and count_lat[0] > 1 and count_lon[0] > 1: + if ( + uni_lat_size == uni_lon_size + and uni_lat_size == 1 + and count_lat[0] > 1 + and count_lon[0] > 1 + ): regular = True return regular, count_lat[0], count_lon[0] + def convert_wgs_to_utm(lon, lat): """Get EPSG code of UTM projection for input point in EPSG 4326 @@ -476,6 +510,7 @@ def convert_wgs_to_utm(lon, lat): epsg_utm_base = 32601 + (0 if lat >= 0 else 100) return epsg_utm_base + (math.floor((lon + 180) / 6) % 60) + def dist_to_coast(coord_lat, lon=None, highres=False, signed=False): """Read interpolated (signed) distance to coast (in m) from NASA data @@ -512,19 +547,26 @@ def dist_to_coast(coord_lat, lon=None, highres=False, signed=False): if lon is None: if isinstance(coord_lat, (gpd.GeoDataFrame, gpd.GeoSeries)): if not equal_crs(coord_lat.crs, DEF_CRS): - raise ValueError('Input CRS is not %s' % str(DEF_CRS)) - geom = coord_lat if isinstance(coord_lat, gpd.GeoSeries) else coord_lat["geometry"] + raise ValueError("Input CRS is not %s" % str(DEF_CRS)) + geom = ( + coord_lat + if isinstance(coord_lat, gpd.GeoSeries) + else coord_lat["geometry"] + ) lon, lat = geom.x.values, geom.y.values elif isinstance(coord_lat, np.ndarray) and coord_lat.shape[1] == 2: lat, lon = coord_lat[:, 0], coord_lat[:, 1] else: - raise ValueError('Missing longitude values.') + raise ValueError("Missing longitude values.") else: lat, lon = [np.asarray(v).reshape(-1) for v in [coord_lat, lon]] if lat.size != lon.size: - raise ValueError(f'Mismatching input coordinates size: {lat.size} != {lon.size}') + raise ValueError( + f"Mismatching input coordinates size: {lat.size} != {lon.size}" + ) return dist_to_coast_nasa(lat, lon, highres=highres, signed=signed) + def _get_dist_to_coast_nasa_tif(): """Get the path to the NASA raster file for distance to coast. If the file (300 MB) is missing it will be automatically downloaded. @@ -541,11 +583,12 @@ def _get_dist_to_coast_nasa_tif(): if not path.is_file(): url = CONFIG.util.coordinates.dist_to_coast_nasa_url.str() path_dwn = download_file(url, download_dir=SYSTEM_DIR) - zip_ref = zipfile.ZipFile(path_dwn, 'r') + zip_ref = zipfile.ZipFile(path_dwn, "r") zip_ref.extractall(SYSTEM_DIR) zip_ref.close() return path + def dist_to_coast_nasa(lat, lon, highres=False, signed=False): """Read interpolated (signed) distance to coast (in m) from NASA data @@ -572,16 +615,22 @@ def dist_to_coast_nasa(lat, lon, highres=False, signed=False): lat, lon = [np.asarray(ar).ravel() for ar in [lat, lon]] lon = lon_normalize(lon.copy()) intermediate_res = None if highres else 0.1 - west_msk = (lon < 0) + west_msk = lon < 0 dist = np.zeros_like(lat) for msk in [west_msk, ~west_msk]: if np.count_nonzero(msk) > 0: dist[msk] = read_raster_sample( - path, lat[msk], lon[msk], intermediate_res=intermediate_res, fill_value=0) + path, + lat[msk], + lon[msk], + intermediate_res=intermediate_res, + fill_value=0, + ) if not signed: dist = np.abs(dist) return 1000 * dist + def get_land_geometry(country_names=None, extent=None, resolution=10): """Get union of the specified (or all) countries or the points inside the extent. @@ -606,6 +655,7 @@ def get_land_geometry(country_names=None, extent=None, resolution=10): geom = MultiPolygon([geom]) return geom + def coord_on_land(lat, lon, land_geom=None): """Check if points are on land. @@ -624,8 +674,9 @@ def coord_on_land(lat, lon, land_geom=None): Entries are True if corresponding coordinate is on land and False otherwise. """ if lat.size != lon.size: - raise ValueError('Wrong size input coordinates: %s != %s.' - % (lat.size, lon.size)) + raise ValueError( + "Wrong size input coordinates: %s != %s." % (lat.size, lon.size) + ) if lat.size == 0: return np.empty((0,), dtype=bool) delta_deg = 1 @@ -639,8 +690,8 @@ def coord_on_land(lat, lon, land_geom=None): bounds = latlon_bounds(lat, lons, buffer=delta_deg) # load land geometry with appropriate same extent land_geom = get_land_geometry( - extent=toggle_extent_bounds(bounds), - resolution=10) + extent=toggle_extent_bounds(bounds), resolution=10 + ) elif not land_geom.is_empty: # ensure lon values are within extent of provided land_geom land_bounds = land_geom.bounds @@ -651,6 +702,7 @@ def coord_on_land(lat, lon, land_geom=None): return shapely.vectorized.contains(land_geom, lons, lat) + def nat_earth_resolution(resolution): """Check if resolution is available in Natural Earth. Build string. @@ -670,10 +722,13 @@ def nat_earth_resolution(resolution): """ avail_res = [10, 50, 110] if resolution not in avail_res: - raise ValueError('Natural Earth does not accept resolution %s m.' % resolution) - return str(resolution) + 'm' + raise ValueError("Natural Earth does not accept resolution %s m." % resolution) + return str(resolution) + "m" + -def get_country_geometries(country_names=None, extent=None, resolution=10, center_crs=True): +def get_country_geometries( + country_names=None, extent=None, resolution=10, center_crs=True +): """Natural Earth country boundaries within given extent If no arguments are given, simply returns the whole natural earth dataset. @@ -708,19 +763,19 @@ def get_country_geometries(country_names=None, extent=None, resolution=10, cente within the specified extent. """ resolution = nat_earth_resolution(resolution) - shp_file = shapereader.natural_earth(resolution=resolution, - category='cultural', - name='admin_0_countries') - nat_earth = gpd.read_file(shp_file, encoding='UTF-8') + shp_file = shapereader.natural_earth( + resolution=resolution, category="cultural", name="admin_0_countries" + ) + nat_earth = gpd.read_file(shp_file, encoding="UTF-8") if not nat_earth.crs: nat_earth.crs = NE_CRS # fill gaps in nat_earth - gap_mask = (nat_earth['ISO_A3'] == '-99') - nat_earth.loc[gap_mask, 'ISO_A3'] = nat_earth.loc[gap_mask, 'ADM0_A3'] + gap_mask = nat_earth["ISO_A3"] == "-99" + nat_earth.loc[gap_mask, "ISO_A3"] = nat_earth.loc[gap_mask, "ADM0_A3"] - gap_mask = (nat_earth['ISO_N3'] == '-99') + gap_mask = nat_earth["ISO_N3"] == "-99" for idx, country in nat_earth[gap_mask].iterrows(): nat_earth.loc[idx, "ISO_N3"] = f"{natearth_country_to_int(country):03d}" @@ -729,7 +784,7 @@ def get_country_geometries(country_names=None, extent=None, resolution=10, cente if isinstance(country_names, str): country_names = [country_names] country_mask = np.isin( - nat_earth[['ISO_A3', 'WB_A3', 'ADM0_A3']].values, + nat_earth[["ISO_A3", "WB_A3", "ADM0_A3"]].values, country_names, ).any(axis=1) out = out[country_mask] @@ -759,21 +814,26 @@ def get_country_geometries(country_names=None, extent=None, resolution=10, cente [box(*toggle_extent_bounds(e)) for e in [extent_left, extent_right]] ) bbox = gpd.GeoSeries(bbox, crs=DEF_CRS) - bbox = gpd.GeoDataFrame({'geometry': bbox}, crs=DEF_CRS) + bbox = gpd.GeoDataFrame({"geometry": bbox}, crs=DEF_CRS) out = gpd.overlay(out, bbox, how="intersection") if ~lon_normalized and center_crs: lon_mid = 0.5 * (extent[0] + extent[1]) # reset the CRS attribute after rewrapping (we don't really change the CRS) - out = ( - out - .to_crs({"proj": "longlat", "lon_wrap": lon_mid}) - .set_crs(DEF_CRS, allow_override=True) + out = out.to_crs({"proj": "longlat", "lon_wrap": lon_mid}).set_crs( + DEF_CRS, allow_override=True ) return out -def get_region_gridpoints(countries=None, regions=None, resolution=150, - iso=True, rect=False, basemap="natearth"): + +def get_region_gridpoints( + countries=None, + regions=None, + resolution=150, + iso=True, + rect=False, + basemap="natearth", +): """Get coordinates of gridpoints in specified countries or regions Parameters @@ -809,31 +869,38 @@ def get_region_gridpoints(countries=None, regions=None, resolution=150, if basemap == "natearth": base_file = NATEARTH_CENTROIDS[resolution] hdf5_f = u_hdf5.read(base_file) - meta = hdf5_f['meta'] - grid_shape = (meta['height'][0], meta['width'][0]) - transform = rasterio.Affine(*meta['transform']) - region_id = hdf5_f['region_id'].reshape(grid_shape) + meta = hdf5_f["meta"] + grid_shape = (meta["height"][0], meta["width"][0]) + transform = rasterio.Affine(*meta["transform"]) + region_id = hdf5_f["region_id"].reshape(grid_shape) lon, lat = raster_to_meshgrid(transform, grid_shape[1], grid_shape[0]) elif basemap == "isimip": hdf5_f = u_hdf5.read(ISIMIP_GPWV3_NATID_150AS) - dim_lon, dim_lat = hdf5_f['lon'], hdf5_f['lat'] + dim_lon, dim_lat = hdf5_f["lon"], hdf5_f["lat"] bounds = dim_lon.min(), dim_lat.min(), dim_lon.max(), dim_lat.max() orig_res = get_resolution(dim_lon, dim_lat) _, _, transform = pts_to_raster_meta(bounds, orig_res) grid_shape = (dim_lat.size, dim_lon.size) - region_id = hdf5_f['NatIdGrid'].reshape(grid_shape).astype(int) + region_id = hdf5_f["NatIdGrid"].reshape(grid_shape).astype(int) region_id[region_id < 0] = 0 - natid2iso_numeric = np.array(country_natid2iso(list(range(231)), "numeric"), dtype=int) + natid2iso_numeric = np.array( + country_natid2iso(list(range(231)), "numeric"), dtype=int + ) region_id = natid2iso_numeric[region_id] lon, lat = np.meshgrid(dim_lon, dim_lat) else: raise ValueError(f"Unknown basemap: {basemap}") - if basemap == "natearth" and resolution not in [150, 360] \ - or basemap == "isimip" and resolution != 150: + if ( + basemap == "natearth" + and resolution not in [150, 360] + or basemap == "isimip" + and resolution != 150 + ): resolution /= 3600 region_id, transform = refine_raster_data( - region_id, transform, resolution, method='nearest', fill_value=0) + region_id, transform, resolution, method="nearest", fill_value=0 + ) grid_shape = region_id.shape lon, lat = raster_to_meshgrid(transform, grid_shape[1], grid_shape[0]) @@ -858,11 +925,13 @@ def get_region_gridpoints(countries=None, regions=None, resolution=150, lat, lon = [ar.ravel() for ar in [lat, lon]] return lat, lon + def assign_grid_points(*args, **kwargs): """This function has been renamed, use ``match_grid_points`` instead.""" LOGGER.warning("This function has been renamed, use match_grid_points instead.") return match_grid_points(*args, **kwargs) + def match_grid_points(x, y, grid_width, grid_height, grid_transform): """To each coordinate in `x` and `y`, assign the closest centroid in the given raster grid @@ -900,13 +969,20 @@ def match_grid_points(x, y, grid_width, grid_height, grid_transform): assigned[(y_i < 0) | (y_i >= grid_height)] = -1 return assigned + def assign_coordinates(*args, **kwargs): """This function has been renamed, use ``match_coordinates`` instead.""" LOGGER.warning("This function has been renamed, use match_coordinates instead.") return match_coordinates(*args, **kwargs) -def match_coordinates(coords, coords_to_assign, distance="euclidean", - threshold=NEAREST_NEIGHBOR_THRESHOLD, **kwargs): + +def match_coordinates( + coords, + coords_to_assign, + distance="euclidean", + threshold=NEAREST_NEIGHBOR_THRESHOLD, + **kwargs, +): """To each coordinate in `coords`, assign a matching coordinate in `coords_to_assign` If there is no exact match for some entry, an attempt is made to assign the geographically @@ -969,40 +1045,54 @@ def match_coordinates(coords, coords_to_assign, distance="euclidean", } if distance not in nearest_neighbor_funcs: raise ValueError( - f'Coordinate assignment with "{distance}" distance is not supported.') + f'Coordinate assignment with "{distance}" distance is not supported.' + ) - coords = coords.astype('float64') - coords_to_assign = coords_to_assign.astype('float64') + coords = coords.astype("float64") + coords_to_assign = coords_to_assign.astype("float64") if np.array_equal(coords, coords_to_assign): assigned_idx = np.arange(coords.shape[0]) else: - LOGGER.info("No exact centroid match found. Reprojecting coordinates " - "to nearest neighbor closer than the threshold = %s", - threshold) + LOGGER.info( + "No exact centroid match found. Reprojecting coordinates " + "to nearest neighbor closer than the threshold = %s", + threshold, + ) # pairs of floats can be sorted (lexicographically) in NumPy - coords_view = coords.view(dtype='float64,float64').reshape(-1) - coords_to_assign_view = coords_to_assign.view(dtype='float64,float64').reshape(-1) + coords_view = coords.view(dtype="float64,float64").reshape(-1) + coords_to_assign_view = coords_to_assign.view(dtype="float64,float64").reshape( + -1 + ) # assign each hazard coordsinate to an element in coords using searchsorted coords_sorter = np.argsort(coords_view) - sort_assign_idx = np.fmin(coords_sorter.size - 1, np.searchsorted( - coords_view, coords_to_assign_view, side="left", sorter=coords_sorter)) + sort_assign_idx = np.fmin( + coords_sorter.size - 1, + np.searchsorted( + coords_view, coords_to_assign_view, side="left", sorter=coords_sorter + ), + ) sort_assign_idx = coords_sorter[sort_assign_idx] # determine which of the assignements match exactly - exact_assign_idx = (coords_view[sort_assign_idx] == coords_to_assign_view).nonzero()[0] + exact_assign_idx = ( + coords_view[sort_assign_idx] == coords_to_assign_view + ).nonzero()[0] assigned_idx = np.full_like(coords_sorter, -1) assigned_idx[sort_assign_idx[exact_assign_idx]] = exact_assign_idx # assign remaining coordinates to their geographically nearest neighbor if threshold > 0 and exact_assign_idx.size != coords_view.size: - not_assigned_idx_mask = (assigned_idx == -1) + not_assigned_idx_mask = assigned_idx == -1 assigned_idx[not_assigned_idx_mask] = nearest_neighbor_funcs[distance]( - coords_to_assign, coords[not_assigned_idx_mask], threshold, **kwargs) + coords_to_assign, coords[not_assigned_idx_mask], threshold, **kwargs + ) return assigned_idx -def match_centroids(coord_gdf, centroids, distance='euclidean', - threshold=NEAREST_NEIGHBOR_THRESHOLD): + +def match_centroids( + coord_gdf, centroids, distance="euclidean", threshold=NEAREST_NEIGHBOR_THRESHOLD +): """Assign to each gdf coordinate point its closest centroids's coordinate. If distances > threshold in points' distances, -1 is returned. If centroids are in a raster and coordinate point is outside of it ``-1`` is assigned @@ -1049,18 +1139,21 @@ def match_centroids(coord_gdf, centroids, distance='euclidean', try: if not equal_crs(coord_gdf.crs, centroids.crs): - raise ValueError('Set hazard and GeoDataFrame to same CRS first!') + raise ValueError("Set hazard and GeoDataFrame to same CRS first!") except AttributeError: # If the coord_gdf has no crs defined (or no valid geometry column), # no error is raised and it is assumed that the user set the crs correctly pass assigned = match_coordinates( - np.stack([coord_gdf['latitude'].values, coord_gdf['longitude'].values], axis=1), - centroids.coord, distance=distance, threshold=threshold, + np.stack([coord_gdf["latitude"].values, coord_gdf["longitude"].values], axis=1), + centroids.coord, + distance=distance, + threshold=threshold, ) return assigned + @numba.njit def _dist_sqr_approx(lats1, lons1, cos_lats1, lats2, lons2): """Compute squared equirectangular approximation distance. Values need @@ -1069,7 +1162,10 @@ def _dist_sqr_approx(lats1, lons1, cos_lats1, lats2, lons2): d_lat = lats1 - lats2 return d_lon * d_lon * cos_lats1 * cos_lats1 + d_lat * d_lat -def _nearest_neighbor_approx(centroids, coordinates, threshold, check_antimeridian=True): + +def _nearest_neighbor_approx( + centroids, coordinates, threshold, check_antimeridian=True +): """Compute the nearest centroid for each coordinate using the euclidean distance d = ((dlon)cos(lat))^2+(dlat)^2. For distant points (e.g. more than 100km apart) use the haversine distance. @@ -1099,16 +1195,19 @@ def _nearest_neighbor_approx(centroids, coordinates, threshold, check_antimeridi # Compute only for the unique coordinates. Copy the results for the # not unique coordinates - _, idx, inv = np.unique(coordinates, axis=0, return_index=True, - return_inverse=True) + _, idx, inv = np.unique(coordinates, axis=0, return_index=True, return_inverse=True) # Compute cos(lat) for all centroids centr_cos_lat = np.cos(np.radians(centroids[:, 0])) assigned = np.zeros(coordinates.shape[0], int) num_warn = 0 for icoord, iidx in enumerate(idx): - dist = _dist_sqr_approx(centroids[:, 0], centroids[:, 1], - centr_cos_lat, coordinates[iidx, 0], - coordinates[iidx, 1]) + dist = _dist_sqr_approx( + centroids[:, 0], + centroids[:, 1], + centr_cos_lat, + coordinates[iidx, 0], + coordinates[iidx, 1], + ) min_idx = dist.argmin() # Raise a warning if the minimum distance is greater than the # threshold and set an unvalid index -1 @@ -1120,15 +1219,20 @@ def _nearest_neighbor_approx(centroids, coordinates, threshold, check_antimeridi assigned[inv == icoord] = min_idx if num_warn: - LOGGER.warning('Distance to closest centroid is greater than %s' - 'km for %s coordinates.', threshold, num_warn) + LOGGER.warning( + "Distance to closest centroid is greater than %s" "km for %s coordinates.", + threshold, + num_warn, + ) if check_antimeridian: assigned = _nearest_neighbor_antimeridian( - centroids, coordinates, threshold, assigned) + centroids, coordinates, threshold, assigned + ) return assigned + def _nearest_neighbor_haversine(centroids, coordinates, threshold): """Compute the neareast centroid for each coordinate using a Ball tree with haversine distance. @@ -1150,15 +1254,18 @@ def _nearest_neighbor_haversine(centroids, coordinates, threshold): with as many rows as coordinates containing the centroids indexes """ # Construct tree from centroids - tree = BallTree(np.radians(centroids), metric='haversine') + tree = BallTree(np.radians(centroids), metric="haversine") # Select unique exposures coordinates - _, idx, inv = np.unique(coordinates, axis=0, return_index=True, - return_inverse=True) + _, idx, inv = np.unique(coordinates, axis=0, return_index=True, return_inverse=True) # query the k closest points of the n_points using dual tree - dist, assigned = tree.query(np.radians(coordinates[idx]), k=1, - return_distance=True, dualtree=True, - breadth_first=False) + dist, assigned = tree.query( + np.radians(coordinates[idx]), + k=1, + return_distance=True, + dualtree=True, + breadth_first=False, + ) # `BallTree.query` returns a row for each entry, even if k=1 (number of nearest neighbors) dist = dist[:, 0] @@ -1168,15 +1275,20 @@ def _nearest_neighbor_haversine(centroids, coordinates, threshold): # threshold and set an unvalid index -1 num_warn = np.sum(dist * EARTH_RADIUS_KM > threshold) if num_warn: - LOGGER.warning('Distance to closest centroid is greater than %s' - 'km for %s coordinates.', threshold, num_warn) + LOGGER.warning( + "Distance to closest centroid is greater than %s" "km for %s coordinates.", + threshold, + num_warn, + ) assigned[dist * EARTH_RADIUS_KM > threshold] = -1 # Copy result to all exposures and return value return assigned[inv] -def _nearest_neighbor_euclidean(centroids, coordinates, threshold, check_antimeridian=True): +def _nearest_neighbor_euclidean( + centroids, coordinates, threshold, check_antimeridian=True +): """Compute the neareast centroid for each coordinate using a k-d tree. Parameters @@ -1204,8 +1316,7 @@ def _nearest_neighbor_euclidean(centroids, coordinates, threshold, check_antimer # Construct tree from centroids tree = scipy.spatial.KDTree(np.radians(centroids)) # Select unique exposures coordinates - _, idx, inv = np.unique(coordinates, axis=0, return_index=True, - return_inverse=True) + _, idx, inv = np.unique(coordinates, axis=0, return_index=True, return_inverse=True) # query the k closest points of the n_points using dual tree dist, assigned = tree.query(np.radians(coordinates[idx]), k=1, p=2, workers=-1) @@ -1214,17 +1325,22 @@ def _nearest_neighbor_euclidean(centroids, coordinates, threshold, check_antimer # threshold and set an unvalid index -1 num_warn = np.sum(dist * EARTH_RADIUS_KM > threshold) if num_warn: - LOGGER.warning('Distance to closest centroid is greater than %s' - 'km for %s coordinates.', threshold, num_warn) + LOGGER.warning( + "Distance to closest centroid is greater than %s" "km for %s coordinates.", + threshold, + num_warn, + ) assigned[dist * EARTH_RADIUS_KM > threshold] = -1 if check_antimeridian: assigned = _nearest_neighbor_antimeridian( - centroids, coordinates[idx], threshold, assigned) + centroids, coordinates[idx], threshold, assigned + ) # Copy result to all exposures and return value return assigned[inv] + def _nearest_neighbor_antimeridian(centroids, coordinates, threshold, assigned): """Recompute nearest neighbors close to the anti-meridian with the Haversine distance @@ -1250,26 +1366,31 @@ def _nearest_neighbor_antimeridian(centroids, coordinates, threshold, assigned): lon_min = min(centroids[:, 1].min(), coordinates[:, 1].min()) lon_max = max(centroids[:, 1].max(), coordinates[:, 1].max()) if lon_max - lon_min > 360: - raise ValueError("Longitudinal coordinates need to be normalized" - "to a common 360 degree range") + raise ValueError( + "Longitudinal coordinates need to be normalized" + "to a common 360 degree range" + ) mid_lon = 0.5 * (lon_max + lon_min) antimeridian = mid_lon + 180 thres_deg = np.degrees(threshold / EARTH_RADIUS_KM) coord_strip_bool = coordinates[:, 1] + antimeridian < 1.5 * thres_deg - coord_strip_bool |= coordinates[:, 1] - antimeridian > -1.5 * thres_deg + coord_strip_bool |= coordinates[:, 1] - antimeridian > -1.5 * thres_deg if np.any(coord_strip_bool): coord_strip = coordinates[coord_strip_bool] cent_strip_bool = centroids[:, 1] + antimeridian < 2.5 * thres_deg - cent_strip_bool |= centroids[:, 1] - antimeridian > -2.5 * thres_deg + cent_strip_bool |= centroids[:, 1] - antimeridian > -2.5 * thres_deg if np.any(cent_strip_bool): cent_strip = centroids[cent_strip_bool] - strip_assigned = _nearest_neighbor_haversine(cent_strip, coord_strip, threshold) + strip_assigned = _nearest_neighbor_haversine( + cent_strip, coord_strip, threshold + ) new_coords = cent_strip_bool.nonzero()[0][strip_assigned] new_coords[strip_assigned == -1] = -1 assigned[coord_strip_bool] = new_coords return assigned + def region2isos(regions): """Convert region names to ISO 3166 alpha-3 codes of countries @@ -1287,12 +1408,13 @@ def region2isos(regions): reg_info = pd.read_csv(RIVER_FLOOD_REGIONS_CSV) isos = [] for region in regions: - region_msk = (reg_info['Reg_name'] == region) + region_msk = reg_info["Reg_name"] == region if not any(region_msk): - raise KeyError('Unknown region name: %s' % region) - isos += list(reg_info['ISO'][region_msk].values) + raise KeyError("Unknown region name: %s" % region) + isos += list(reg_info["ISO"][region_msk].values) return list(set(isos)) + def country_to_iso(countries, representation="alpha3", fillvalue=None): """Determine ISO 3166 representation of countries @@ -1345,24 +1467,32 @@ def country_to_iso(countries, representation="alpha3", fillvalue=None): try: match = pycountry.historic_countries.lookup(country) except LookupError: - match = next(filter(lambda c: country in c.values(), NONISO_REGIONS), None) + match = next( + filter(lambda c: country in c.values(), NONISO_REGIONS), None + ) if match is not None: match = pycountry.db.Data(**match) elif fillvalue is not None: match = pycountry.db.Data(**{representation: fillvalue}) else: - raise LookupError(f'Unknown country identifier: {country}') from None + raise LookupError( + f"Unknown country identifier: {country}" + ) from None iso = getattr(match, representation) if representation == "numeric": iso = int(iso) iso_list.append(iso) return iso_list[0] if return_single else iso_list + def country_iso_alpha2numeric(iso_alpha): """Deprecated: Use `country_to_iso` with `representation="numeric"` instead""" - LOGGER.warning("country_iso_alpha2numeric is deprecated, use country_to_iso instead.") + LOGGER.warning( + "country_iso_alpha2numeric is deprecated, use country_to_iso instead." + ) return country_to_iso(iso_alpha, "numeric") + def country_natid2iso(natids, representation="alpha3"): """Convert internal NatIDs to ISO 3166-1 alpha-3 codes @@ -1386,12 +1516,13 @@ def country_natid2iso(natids, representation="alpha3"): iso_list = [] for natid in natids: if natid < 0 or natid >= len(ISIMIP_NATID_TO_ISO): - raise LookupError('Unknown country NatID: %s' % natid) + raise LookupError("Unknown country NatID: %s" % natid) iso_list.append(ISIMIP_NATID_TO_ISO[natid]) if representation != "alpha3": iso_list = country_to_iso(iso_list, representation) return iso_list[0] if return_str else iso_list + def country_iso2natid(isos): """Convert ISO 3166-1 alpha-3 codes to internal NatIDs @@ -1412,9 +1543,10 @@ def country_iso2natid(isos): try: natids.append(ISIMIP_NATID_TO_ISO.index(iso)) except ValueError as ver: - raise LookupError(f'Unknown country ISO: {iso}') from ver + raise LookupError(f"Unknown country ISO: {iso}") from ver return natids[0] if return_int else natids + def natearth_country_to_int(country): """Integer representation (ISO 3166, if possible) of Natural Earth GeoPandas country row @@ -1428,10 +1560,11 @@ def natearth_country_to_int(country): iso_numeric : int Integer representation of given country. """ - if country.ISO_N3 != '-99': + if country.ISO_N3 != "-99": return int(country.ISO_N3) return country_to_iso(str(country.NAME), representation="numeric") + def get_country_code(lat, lon, gridded=False): """Provide numeric (ISO 3166) code for every point. @@ -1455,40 +1588,47 @@ def get_country_code(lat, lon, gridded=False): lat, lon = [np.asarray(ar).ravel() for ar in [lat, lon]] if lat.size == 0: return np.empty((0,), dtype=int) - LOGGER.info('Setting region_id %s points.', str(lat.size)) + LOGGER.info("Setting region_id %s points.", str(lat.size)) if gridded: base_file = u_hdf5.read(NATEARTH_CENTROIDS[150]) - meta, region_id = base_file['meta'], base_file['region_id'] - transform = rasterio.Affine(*meta['transform']) - region_id = region_id.reshape(meta['height'][0], meta['width'][0]) - region_id = interp_raster_data(region_id, lat, lon, transform, - method='nearest', fill_value=0) + meta, region_id = base_file["meta"], base_file["region_id"] + transform = rasterio.Affine(*meta["transform"]) + region_id = region_id.reshape(meta["height"][0], meta["width"][0]) + region_id = interp_raster_data( + region_id, lat, lon, transform, method="nearest", fill_value=0 + ) region_id = region_id.astype(int) else: (lon_min, lat_min, lon_max, lat_max) = latlon_bounds(lat, lon, 0.001) countries = get_country_geometries( - extent=(lon_min, lon_max, lat_min, lat_max), center_crs=False) + extent=(lon_min, lon_max, lat_min, lat_max), center_crs=False + ) with warnings.catch_warnings(): # in order to suppress the following # UserWarning: Geometry is in a geographic CRS. Results from 'area' are likely # incorrect. Use 'GeoSeries.to_crs()' to re-project geometries to a projected CRS # before this operation. - warnings.simplefilter('ignore', UserWarning) - countries['area'] = countries.geometry.area - countries = countries.sort_values(by=['area'], ascending=False) + warnings.simplefilter("ignore", UserWarning) + countries["area"] = countries.geometry.area + countries = countries.sort_values(by=["area"], ascending=False) region_id = np.full((lon.size,), -1, dtype=int) total_land = countries.geometry.unary_union - ocean_mask = (region_id.all() if total_land is None - else ~shapely.vectorized.contains(total_land, lon, lat)) + ocean_mask = ( + region_id.all() + if total_land is None + else ~shapely.vectorized.contains(total_land, lon, lat) + ) region_id[ocean_mask] = 0 for country in countries.itertuples(): unset = (region_id == -1).nonzero()[0] - select = shapely.vectorized.contains(country.geometry, - lon[unset], lat[unset]) + select = shapely.vectorized.contains( + country.geometry, lon[unset], lat[unset] + ) region_id[unset[select]] = natearth_country_to_int(country) region_id[region_id == -1] = 0 return region_id + def get_admin1_info(country_names): """Provide Natural Earth registry info and shape files for admin1 regions @@ -1506,6 +1646,7 @@ def get_admin1_info(country_names): admin1_shapes : dict Shape according to Natural Earth. """ + def _ensure_utf8(val): # Without the `*.cpg` file present, the shape reader wrongly assumes latin-1 encoding: # https://github.com/SciTools/cartopy/issues/1282 @@ -1513,7 +1654,7 @@ def _ensure_utf8(val): # As a workaround, we encode and decode again, unless this fails which means # that the `*.cpg` is present and the encoding is correct: try: - return val.encode('latin-1').decode('utf-8') + return val.encode("latin-1").decode("utf-8") except (AttributeError, UnicodeDecodeError, UnicodeEncodeError): return val @@ -1522,29 +1663,30 @@ def _ensure_utf8(val): if not isinstance(country_names, list): LOGGER.error("country_names needs to be of type list, str, int or float") raise TypeError("Invalid type for input parameter 'country_names'") - admin1_file = shapereader.natural_earth(resolution='10m', - category='cultural', - name='admin_1_states_provinces') + admin1_file = shapereader.natural_earth( + resolution="10m", category="cultural", name="admin_1_states_provinces" + ) admin1_recs = shapereader.Reader(admin1_file) admin1_info = dict() admin1_shapes = dict() for country in country_names: if isinstance(country, (int, float)): # transform numerric code to str - country = f'{int(country):03d}' + country = f"{int(country):03d}" # get alpha-3 code according to ISO 3166 country = pycountry.countries.lookup(country).alpha_3 admin1_info[country] = list() admin1_shapes[country] = list() for rec in admin1_recs.records(): - if rec.attributes['adm0_a3'] == country: + if rec.attributes["adm0_a3"] == country: rec_attributes = {k: _ensure_utf8(v) for k, v in rec.attributes.items()} admin1_info[country].append(rec_attributes) admin1_shapes[country].append(rec.geometry) if len(admin1_info[country]) == 0: - raise LookupError(f'natural_earth records are empty for country {country}') + raise LookupError(f"natural_earth records are empty for country {country}") return admin1_info, admin1_shapes + def get_admin1_geometries(countries): """ return geometries, names and codes of admin 1 regions in given countries @@ -1575,26 +1717,31 @@ def get_admin1_geometries(countries): """ # init empty GeoDataFrame: gdf = gpd.GeoDataFrame( - columns = ("admin1_name", "iso_3166_2", "geometry", "iso_3n", "iso_3a")) + columns=("admin1_name", "iso_3166_2", "geometry", "iso_3n", "iso_3a") + ) # extract admin 1 infos and shapes for each country: admin1_info, admin1_shapes = get_admin1_info(countries) for country in admin1_info: # fill admin 1 region names and codes to GDF for single country: gdf_tmp = gpd.GeoDataFrame(columns=gdf.columns) - gdf_tmp['admin1_name'] = [record['name'] for record in admin1_info[country]] - gdf_tmp['iso_3166_2'] = [record['iso_3166_2'] for record in admin1_info[country]] + gdf_tmp["admin1_name"] = [record["name"] for record in admin1_info[country]] + gdf_tmp["iso_3166_2"] = [ + record["iso_3166_2"] for record in admin1_info[country] + ] # With this initiation of GeoSeries in a list comprehension, # the ability of geopandas to convert shapereader.Shape to (Multi)Polygon is exploited: - geoseries = gpd.GeoSeries([gpd.GeoSeries(shape).values[0] - for shape in admin1_shapes[country]]) + geoseries = gpd.GeoSeries( + [gpd.GeoSeries(shape).values[0] for shape in admin1_shapes[country]] + ) gdf_tmp.geometry = list(geoseries) # fill columns with country identifiers (admin 0): - gdf_tmp['iso_3n'] = pycountry.countries.lookup(country).numeric - gdf_tmp['iso_3a'] = country + gdf_tmp["iso_3n"] = pycountry.countries.lookup(country).numeric + gdf_tmp["iso_3a"] = country gdf = pd.concat([gdf, gdf_tmp], ignore_index=True) return gdf + def get_resolution_1d(coords, min_resol=1.0e-8): """Compute resolution of scalar grid @@ -1668,6 +1815,7 @@ def pts_to_raster_meta(points_bounds, res): ras_trans = rasterio.Affine.translation(*origin) * rasterio.Affine.scale(*res) return int(nsteps[1]), int(nsteps[0]), ras_trans + def raster_to_meshgrid(transform, width, height): """Get coordinates of grid points in raster @@ -1690,8 +1838,9 @@ def raster_to_meshgrid(transform, width, height): xres, _, xmin, _, yres, ymin = transform[:6] xmax = xmin + width * xres ymax = ymin + height * yres - return np.meshgrid(np.arange(xmin + xres / 2, xmax, xres), - np.arange(ymin + yres / 2, ymax, yres)) + return np.meshgrid( + np.arange(xmin + xres / 2, xmax, xres), np.arange(ymin + yres / 2, ymax, yres) + ) def to_crs_user_input(crs_obj): @@ -1717,15 +1866,18 @@ def to_crs_user_input(crs_obj): ValueError if type(crs_obj) has the wrong type """ + def _is_deprecated_init_crs(crs_dict): - return (isinstance(crs_dict, dict) - and "init" in crs_dict - and all(k in ["init", "no_defs"] for k in crs_dict.keys()) - and crs_dict.get("no_defs", True) is True) + return ( + isinstance(crs_dict, dict) + and "init" in crs_dict + and all(k in ["init", "no_defs"] for k in crs_dict.keys()) + and crs_dict.get("no_defs", True) is True + ) if isinstance(crs_obj, (dict, int)): if _is_deprecated_init_crs(crs_obj): - return crs_obj['init'] + return crs_obj["init"] return crs_obj crs_string = crs_obj.decode() if isinstance(crs_obj, bytes) else crs_obj @@ -1733,10 +1885,10 @@ def _is_deprecated_init_crs(crs_dict): if not isinstance(crs_string, str): raise ValueError(f"crs has unhandled data set type: {type(crs_string)}") - if crs_string[0] == '{': + if crs_string[0] == "{": crs_dict = ast.literal_eval(crs_string) if _is_deprecated_init_crs(crs_dict): - return crs_dict['init'] + return crs_dict["init"] return crs_dict return crs_string @@ -1759,10 +1911,21 @@ def equal_crs(crs_one, crs_two): """ if crs_one is None: return crs_two is None - return rasterio.crs.CRS.from_user_input(crs_one) == rasterio.crs.CRS.from_user_input(crs_two) - -def _read_raster_reproject(src, src_crs, dst_meta, band=None, geometry=None, dst_crs=None, - transform=None, resampling="nearest"): + return rasterio.crs.CRS.from_user_input( + crs_one + ) == rasterio.crs.CRS.from_user_input(crs_two) + + +def _read_raster_reproject( + src, + src_crs, + dst_meta, + band=None, + geometry=None, + dst_crs=None, + transform=None, + resampling="nearest", +): """Helper function for `read_raster`.""" if isinstance(resampling, str): resampling = getattr(rasterio.warp.Resampling, resampling) @@ -1772,19 +1935,22 @@ def _read_raster_reproject(src, src_crs, dst_meta, band=None, geometry=None, dst dst_crs = src_crs if not transform: transform, width, height = rasterio.warp.calculate_default_transform( - src_crs, dst_crs, src.width, src.height, *src.bounds) + src_crs, dst_crs, src.width, src.height, *src.bounds + ) else: transform, width, height = transform - dst_meta.update({ - 'crs': dst_crs, - 'transform': transform, - 'width': width, - 'height': height, - }) + dst_meta.update( + { + "crs": dst_crs, + "transform": transform, + "width": width, + "height": height, + } + ) kwargs = {} - if src.meta['nodata']: - kwargs['src_nodata'] = src.meta['nodata'] - kwargs['dst_nodata'] = src.meta['nodata'] + if src.meta["nodata"]: + kwargs["src_nodata"] = src.meta["nodata"] + kwargs["dst_nodata"] = src.meta["nodata"] intensity = np.zeros((len(band), height, width)) for idx_band, i_band in enumerate(band): @@ -1796,40 +1962,46 @@ def _read_raster_reproject(src, src_crs, dst_meta, band=None, geometry=None, dst dst_transform=transform, dst_crs=dst_crs, resampling=resampling, - **kwargs) + **kwargs, + ) - if dst_meta['nodata'] and np.isnan(dst_meta['nodata']): + if dst_meta["nodata"] and np.isnan(dst_meta["nodata"]): nodata_mask = np.isnan(intensity[idx_band, :]) else: - nodata_mask = (intensity[idx_band, :] == dst_meta['nodata']) + nodata_mask = intensity[idx_band, :] == dst_meta["nodata"] intensity[idx_band, :][nodata_mask] = 0 if geometry: - intensity = intensity.astype('float32') + intensity = intensity.astype("float32") # update driver to GTiff as netcdf does not work reliably - dst_meta.update(driver='GTiff') + dst_meta.update(driver="GTiff") with rasterio.MemoryFile() as memfile: with memfile.open(**dst_meta) as dst: dst.write(intensity) with memfile.open() as dst: - inten, mask_trans = rasterio.mask.mask(dst, geometry, crop=True, indexes=band) - dst_meta.update({ - "height": inten.shape[1], - "width": inten.shape[2], - "transform": mask_trans, - }) + inten, mask_trans = rasterio.mask.mask( + dst, geometry, crop=True, indexes=band + ) + dst_meta.update( + { + "height": inten.shape[1], + "width": inten.shape[2], + "transform": mask_trans, + } + ) intensity = inten[range(len(band)), :] - intensity = intensity.astype('float64') + intensity = intensity.astype("float64") # reset nodata values again as driver Gtiff resets them again - if dst_meta['nodata'] and np.isnan(dst_meta['nodata']): + if dst_meta["nodata"] and np.isnan(dst_meta["nodata"]): intensity[np.isnan(intensity)] = 0 else: - intensity[intensity == dst_meta['nodata']] = 0 + intensity[intensity == dst_meta["nodata"]] = 0 return intensity + def _add_gdal_vsi_prefix(path): """Add one of GDAL's virtual file system prefixes if applicable @@ -1863,8 +2035,19 @@ def _add_gdal_vsi_prefix(path): path = f"/vsi{supported_suffixes[suffix]}/{path}" return str(path) -def read_raster(file_name, band=None, src_crs=None, window=None, geometry=None, - dst_crs=None, transform=None, width=None, height=None, resampling="nearest"): + +def read_raster( + file_name, + band=None, + src_crs=None, + window=None, + geometry=None, + dst_crs=None, + transform=None, + width=None, + height=None, + resampling="nearest", +): """Read raster of bands and set 0-values to the masked ones. Parameters @@ -1900,29 +2083,38 @@ def read_raster(file_name, band=None, src_crs=None, window=None, geometry=None, """ if not band: band = [1] - LOGGER.info('Reading %s', file_name) + LOGGER.info("Reading %s", file_name) with rasterio.Env(): - with rasterio.open(_add_gdal_vsi_prefix(file_name), 'r') as src: + with rasterio.open(_add_gdal_vsi_prefix(file_name), "r") as src: dst_meta = src.meta.copy() if dst_crs or transform: - LOGGER.debug('Reprojecting ...') + LOGGER.debug("Reprojecting ...") src_crs = src.crs if src_crs is None else src_crs if not src_crs: src_crs = rasterio.crs.CRS.from_user_input(DEF_CRS) transform = (transform, width, height) if transform else None - inten = _read_raster_reproject(src, src_crs, dst_meta, band=band, - geometry=geometry, dst_crs=dst_crs, - transform=transform, resampling=resampling) + inten = _read_raster_reproject( + src, + src_crs, + dst_meta, + band=band, + geometry=geometry, + dst_crs=dst_crs, + transform=transform, + resampling=resampling, + ) else: if geometry: - inten, trans = rasterio.mask.mask(src, geometry, crop=True, indexes=band) - if dst_meta['nodata'] and np.isnan(dst_meta['nodata']): + inten, trans = rasterio.mask.mask( + src, geometry, crop=True, indexes=band + ) + if dst_meta["nodata"] and np.isnan(dst_meta["nodata"]): inten[np.isnan(inten)] = 0 else: - inten[inten == dst_meta['nodata']] = 0 + inten[inten == dst_meta["nodata"]] = 0 else: masked_array = src.read(band, window=window, masked=True) @@ -1932,24 +2124,34 @@ def read_raster(file_name, band=None, src_crs=None, window=None, geometry=None, if window: trans = rasterio.windows.transform(window, src.transform) else: - trans = dst_meta['transform'] + trans = dst_meta["transform"] - dst_meta.update({ - "height": inten.shape[1], - "width": inten.shape[2], - "transform": trans, - }) + dst_meta.update( + { + "height": inten.shape[1], + "width": inten.shape[2], + "transform": trans, + } + ) - if not dst_meta['crs']: - dst_meta['crs'] = rasterio.crs.CRS.from_user_input(DEF_CRS) + if not dst_meta["crs"]: + dst_meta["crs"] = rasterio.crs.CRS.from_user_input(DEF_CRS) intensity = inten[range(len(band)), :] - dst_shape = (len(band), dst_meta['height'] * dst_meta['width']) + dst_shape = (len(band), dst_meta["height"] * dst_meta["width"]) return dst_meta, intensity.reshape(dst_shape) -def read_raster_bounds(path, bounds, res=None, bands=None, resampling="nearest", - global_origin=None, pad_cells=1.0): + +def read_raster_bounds( + path, + bounds, + res=None, + bands=None, + resampling="nearest", + global_origin=None, + pad_cells=1.0, +): """Read raster file within given bounds at given resolution By default, not only the grid cells of the destination raster whose cell centers fall within @@ -1996,7 +2198,7 @@ def read_raster_bounds(path, bounds, res=None, bands=None, resampling="nearest", resampling = getattr(rasterio.warp.Resampling, resampling) if not bands: bands = [1] - with rasterio.open(_add_gdal_vsi_prefix(path), 'r') as src: + with rasterio.open(_add_gdal_vsi_prefix(path), "r") as src: if res: if not isinstance(res, tuple): res = (res, res) @@ -2005,8 +2207,12 @@ def read_raster_bounds(path, bounds, res=None, bands=None, resampling="nearest", res = (np.abs(res[0]), np.abs(res[1])) # make sure that the extent of pixel centers covers the specified region - bounds = (bounds[0] - pad_cells * res[0], bounds[1] - pad_cells * res[1], - bounds[2] + pad_cells * res[0], bounds[3] + pad_cells * res[1]) + bounds = ( + bounds[0] - pad_cells * res[0], + bounds[1] - pad_cells * res[1], + bounds[2] + pad_cells * res[0], + bounds[3] + pad_cells * res[1], + ) if src.crs is not None and src.crs.to_epsg() == 4326: # We consider WGS84 (EPSG:4326) as a special case just because it's so common. @@ -2015,9 +2221,13 @@ def read_raster_bounds(path, bounds, res=None, bands=None, resampling="nearest", if global_origin is None: global_origin = (src.transform[2], src.transform[5]) - res_signed = (np.sign(src.transform[0]) * res[0], np.sign(src.transform[4]) * res[1]) + res_signed = ( + np.sign(src.transform[0]) * res[0], + np.sign(src.transform[4]) * res[1], + ) global_transform = rasterio.transform.from_origin( - *global_origin, res_signed[0], -res_signed[1]) + *global_origin, res_signed[0], -res_signed[1] + ) transform, shape = subraster_from_bounds(global_transform, bounds) data = np.zeros((len(bands),) + shape, dtype=src.dtypes[0]) @@ -2030,9 +2240,11 @@ def read_raster_bounds(path, bounds, res=None, bands=None, resampling="nearest", src_crs=src.crs, dst_transform=transform, dst_crs=crs, - resampling=resampling) + resampling=resampling, + ) return data, transform + def _raster_gradient(data, transform, latlon_to_m=False): """Compute the gradient of raster data using finite differences @@ -2059,7 +2271,7 @@ def _raster_gradient(data, transform, latlon_to_m=False): Affine transformation defining the output raster. """ xres, _, _, _, yres = transform[:5] - gradient_transform = rasterio.Affine.translation(0.5 * xres, 0.5 * yres) * transform + gradient_transform = rasterio.Affine.translation(0.5 * xres, 0.5 * yres) * transform if latlon_to_m: height, width = [s - 1 for s in data.shape] @@ -2075,6 +2287,7 @@ def _raster_gradient(data, transform, latlon_to_m=False): return gradient_data, gradient_transform + def _prepare_raster_sample(path, lat, lon, intermediate_res, fill_value): """Helper function for the sampling of points from a raster file. @@ -2105,16 +2318,18 @@ def _prepare_raster_sample(path, lat, lon, intermediate_res, fill_value): crs : CRS The CRS of the raster file. """ - LOGGER.info('Sampling from %s', path) + LOGGER.info("Sampling from %s", path) with rasterio.open(_add_gdal_vsi_prefix(path), "r") as src: if intermediate_res is None: intermediate_res = (np.abs(src.transform[0]), np.abs(src.transform[4])) - meta_nodata = src.meta['nodata'] + meta_nodata = src.meta["nodata"] crs = src.crs bounds = (lon.min(), lat.min(), lon.max(), lat.max()) - data, transform = read_raster_bounds(path, bounds, res=intermediate_res, pad_cells=2) + data, transform = read_raster_bounds( + path, bounds, res=intermediate_res, pad_cells=2 + ) data = data[0, :, :] if fill_value is not None: @@ -2125,7 +2340,10 @@ def _prepare_raster_sample(path, lat, lon, intermediate_res, fill_value): return data, transform, fill_value, crs -def read_raster_sample(path, lat, lon, intermediate_res=None, method='linear', fill_value=None): + +def read_raster_sample( + path, lat, lon, intermediate_res=None, method="linear", fill_value=None +): """Read point samples from raster file. Parameters @@ -2153,13 +2371,17 @@ def read_raster_sample(path, lat, lon, intermediate_res=None, method='linear', f return np.zeros_like(lat) data, transform, fill_value, _ = _prepare_raster_sample( - path, lat, lon, intermediate_res, fill_value) + path, lat, lon, intermediate_res, fill_value + ) return interp_raster_data( - data, lat, lon, transform, method=method, fill_value=fill_value) + data, lat, lon, transform, method=method, fill_value=fill_value + ) + -def read_raster_sample_with_gradients(path, lat, lon, intermediate_res=None, - method=('linear', 'nearest'), fill_value=None): +def read_raster_sample_with_gradients( + path, lat, lon, intermediate_res=None, method=("linear", "nearest"), fill_value=None +): """Read point samples with computed gradients from raster file. For convenience, and because this is the most common use case, the step sizes in the gradient @@ -2205,19 +2427,25 @@ def read_raster_sample_with_gradients(path, lat, lon, intermediate_res=None, method = (method, method) data, transform, fill_value, crs = _prepare_raster_sample( - path, lat, lon, intermediate_res, fill_value) + path, lat, lon, intermediate_res, fill_value + ) interp_data = interp_raster_data( - data, lat, lon, transform, method=method[0], fill_value=fill_value) + data, lat, lon, transform, method=method[0], fill_value=fill_value + ) is_latlon = crs is not None and crs.to_epsg() == 4326 grad_data, grad_transform = _raster_gradient(data, transform, latlon_to_m=is_latlon) interp_grad = interp_raster_data( - grad_data, lat, lon, grad_transform, method=method[1], fill_value=fill_value) + grad_data, lat, lon, grad_transform, method=method[1], fill_value=fill_value + ) return interp_data, interp_grad -def interp_raster_data(data, interp_y, interp_x, transform, method='linear', fill_value=0): + +def interp_raster_data( + data, interp_y, interp_x, transform, method="linear", fill_value=0 +): """Interpolate raster data, given as array and affine transform Parameters @@ -2247,7 +2475,9 @@ def interp_raster_data(data, interp_y, interp_x, transform, method='linear', fil xres, _, xmin, _, yres, ymin = transform[:6] xmax = xmin + data.shape[1] * xres ymax = ymin + data.shape[0] * yres - data = np.pad(data, [(1, 1) if i < 2 else (0, 0) for i in range(data.ndim)], mode='edge') + data = np.pad( + data, [(1, 1) if i < 2 else (0, 0) for i in range(data.ndim)], mode="edge" + ) if yres < 0: yres = -yres @@ -2262,10 +2492,17 @@ def interp_raster_data(data, interp_y, interp_x, transform, method='linear', fil data = np.array(data, dtype=np.float64) data[np.isnan(data)] = fill_value - return scipy.interpolate.interpn((y_dim, x_dim), data, np.vstack([interp_y, interp_x]).T, - method=method, bounds_error=False, fill_value=fill_value) + return scipy.interpolate.interpn( + (y_dim, x_dim), + data, + np.vstack([interp_y, interp_x]).T, + method=method, + bounds_error=False, + fill_value=fill_value, + ) + -def refine_raster_data(data, transform, res, method='linear', fill_value=0): +def refine_raster_data(data, transform, res, method="linear", fill_value=0): """Refine raster data, given as array and affine transform Parameters @@ -2297,11 +2534,13 @@ def refine_raster_data(data, transform, res, method='linear', fill_value=0): new_shape = (new_dimy.size, new_dimx.size) new_x, new_y = [ar.ravel() for ar in np.meshgrid(new_dimx, new_dimy)] new_transform = rasterio.Affine(res[0], 0, xmin, 0, res[1], ymin) - new_data = interp_raster_data(data, new_y, new_x, transform, method=method, - fill_value=fill_value) + new_data = interp_raster_data( + data, new_y, new_x, transform, method=method, fill_value=fill_value + ) new_data = new_data.reshape(new_shape) return new_data, new_transform + def read_vector(file_name, field_name, dst_crs=None): """Read vector file format supported by fiona. @@ -2325,7 +2564,7 @@ def read_vector(file_name, field_name, dst_crs=None): value : np.array Values associated to each shape. """ - LOGGER.info('Reading %s', file_name) + LOGGER.info("Reading %s", file_name) data_frame = gpd.read_file(file_name) if not data_frame.crs: data_frame.crs = DEF_CRS @@ -2339,6 +2578,7 @@ def read_vector(file_name, field_name, dst_crs=None): value[i_inten, :] = data_frame[inten].values return lat, lon, geometry, value + def write_raster(file_name, data_matrix, meta, dtype=np.float32): """Write raster in GeoTiff format. @@ -2355,20 +2595,22 @@ def write_raster(file_name, data_matrix, meta, dtype=np.float32): dtype : numpy dtype, optional A numpy dtype. Default: np.float32 """ - LOGGER.info('Writting %s', file_name) - if data_matrix.shape != (meta['height'], meta['width']): + LOGGER.info("Writting %s", file_name) + if data_matrix.shape != (meta["height"], meta["width"]): # every row is an event (from hazard intensity or fraction) == band - shape = (data_matrix.shape[0], meta['height'], meta['width']) + shape = (data_matrix.shape[0], meta["height"], meta["width"]) else: - shape = (1, meta['height'], meta['width']) + shape = (1, meta["height"], meta["width"]) dst_meta = copy.deepcopy(meta) - dst_meta.update(driver='GTiff', dtype=dtype, count=shape[0]) + dst_meta.update(driver="GTiff", dtype=dtype, count=shape[0]) data_matrix = np.asarray(data_matrix, dtype=dtype).reshape(shape) - with rasterio.open(file_name, 'w', **dst_meta) as dst: + with rasterio.open(file_name, "w", **dst_meta) as dst: dst.write(data_matrix, indexes=np.arange(1, shape[0] + 1)) -def points_to_raster(points_df, val_names=None, res=0.0, raster_res=0.0, crs=DEF_CRS, - scheduler=None): + +def points_to_raster( + points_df, val_names=None, res=0.0, raster_res=0.0, crs=DEF_CRS, scheduler=None +): """Compute raster (as data and transform) from GeoDataFrame. Parameters @@ -2399,52 +2641,65 @@ def points_to_raster(points_df, val_names=None, res=0.0, raster_res=0.0, crs=DEF Dictionary with 'crs', 'height', 'width' and 'transform' attributes. """ if not val_names: - val_names = ['value'] + val_names = ["value"] if not res: - res = np.abs(get_resolution(points_df['latitude'].values, - points_df['longitude'].values)).min() + res = np.abs( + get_resolution(points_df["latitude"].values, points_df["longitude"].values) + ).min() if not raster_res: raster_res = res def apply_box(df_exp): - fun = lambda r: Point(r['longitude'], r['latitude']).buffer(res / 2).envelope + fun = lambda r: Point(r["longitude"], r["latitude"]).buffer(res / 2).envelope return df_exp.apply(fun, axis=1) - LOGGER.info('Raster from resolution %s to %s.', res, raster_res) + LOGGER.info("Raster from resolution %s to %s.", res, raster_res) df_poly = gpd.GeoDataFrame(points_df[val_names]) if not scheduler: - df_poly['_-geometry-prov'] = apply_box(points_df) + df_poly["_-geometry-prov"] = apply_box(points_df) else: - ddata = dd.from_pandas(points_df[['latitude', 'longitude']], - npartitions=cpu_count()) - df_poly['_-geometry-prov'] = ddata.map_partitions( - apply_box).compute(scheduler=scheduler) + ddata = dd.from_pandas( + points_df[["latitude", "longitude"]], npartitions=cpu_count() + ) + df_poly["_-geometry-prov"] = ddata.map_partitions(apply_box).compute( + scheduler=scheduler + ) # depending on the dask/pandas version setting `meta=Polygon` in map_partitions # would just raise a warning and returns a string, so we have to convert explicitly - if isinstance(df_poly.loc[0, '_-geometry-prov'], str): # fails for empty `points_df` - df_poly['_-geometry-prov'] = shapely.wkt.loads(df_poly['_-geometry-prov']) - - df_poly.set_geometry('_-geometry-prov', - crs=crs if crs else points_df.crs if points_df.crs else DEF_CRS, - inplace=True, - drop=True) + if isinstance( + df_poly.loc[0, "_-geometry-prov"], str + ): # fails for empty `points_df` + df_poly["_-geometry-prov"] = shapely.wkt.loads(df_poly["_-geometry-prov"]) + + df_poly.set_geometry( + "_-geometry-prov", + crs=crs if crs else points_df.crs if points_df.crs else DEF_CRS, + inplace=True, + drop=True, + ) # renormalize longitude if necessary if equal_crs(df_poly.crs, DEF_CRS): - xmin, ymin, xmax, ymax = latlon_bounds(points_df['latitude'].values, - points_df['longitude'].values) + xmin, ymin, xmax, ymax = latlon_bounds( + points_df["latitude"].values, points_df["longitude"].values + ) x_mid = 0.5 * (xmin + xmax) # we don't really change the CRS when rewrapping, so we reset the CRS attribute afterwards - df_poly = df_poly \ - .to_crs({"proj": "longlat", "lon_wrap": x_mid}) \ - .set_crs(DEF_CRS, allow_override=True) + df_poly = df_poly.to_crs({"proj": "longlat", "lon_wrap": x_mid}).set_crs( + DEF_CRS, allow_override=True + ) else: - xmin, ymin, xmax, ymax = (points_df['longitude'].min(), points_df['latitude'].min(), - points_df['longitude'].max(), points_df['latitude'].max()) + xmin, ymin, xmax, ymax = ( + points_df["longitude"].min(), + points_df["latitude"].min(), + points_df["longitude"].max(), + points_df["latitude"].max(), + ) # construct raster - rows, cols, ras_trans = pts_to_raster_meta((xmin, ymin, xmax, ymax), - (raster_res, -raster_res)) + rows, cols, ras_trans = pts_to_raster_meta( + (xmin, ymin, xmax, ymax), (raster_res, -raster_res) + ) raster_out = np.zeros((len(val_names), rows, cols)) # TODO: parallel rasterize @@ -2455,16 +2710,18 @@ def apply_box(df_exp): transform=ras_trans, fill=0, all_touched=True, - dtype=rasterio.float32) + dtype=rasterio.float32, + ) meta = { - 'crs': df_poly.crs, - 'height': rows, - 'width': cols, - 'transform': ras_trans, + "crs": df_poly.crs, + "height": rows, + "width": cols, + "transform": ras_trans, } return raster_out, meta + def subraster_from_bounds(transform, bounds): """Compute a subraster definition from a given reference transform and bounds. @@ -2494,16 +2751,30 @@ def subraster_from_bounds(transform, bounds): # align the window bounds to the raster by rounding col_min, col_max = np.round(window.col_off), np.round(window.col_off + window.width) - row_min, row_max = np.round(window.row_off), np.round(window.row_off + window.height) - window = rasterio.windows.Window(col_min, row_min, col_max - col_min, row_max - row_min) + row_min, row_max = np.round(window.row_off), np.round( + window.row_off + window.height + ) + window = rasterio.windows.Window( + col_min, row_min, col_max - col_min, row_max - row_min + ) dst_transform = rasterio.windows.transform(window, transform) dst_shape = (int(window.height), int(window.width)) return dst_transform, dst_shape -def align_raster_data(source, src_crs, src_transform, dst_crs=None, dst_resolution=None, - dst_bounds=None, global_origin=(-180, 90), resampling="nearest", - conserve=None, **kwargs): + +def align_raster_data( + source, + src_crs, + src_transform, + dst_crs=None, + dst_resolution=None, + dst_bounds=None, + global_origin=(-180, 90), + resampling="nearest", + conserve=None, + **kwargs, +): """Reproject 2D np.ndarray to be aligned to a reference grid. This function ensures that reprojected data with the same dst_resolution and global_origins are @@ -2577,14 +2848,16 @@ def align_raster_data(source, src_crs, src_transform, dst_crs=None, dst_resoluti destination = np.zeros(dst_shape, dtype=source.dtype) try: - rasterio.warp.reproject(source=source, - destination=destination, - src_transform=src_transform, - src_crs=src_crs, - dst_transform=dst_transform, - dst_crs=dst_crs, - resampling=resampling, - **kwargs) + rasterio.warp.reproject( + source=source, + destination=destination, + src_transform=src_transform, + src_crs=src_crs, + dst_transform=dst_transform, + dst_crs=dst_crs, + resampling=resampling, + **kwargs, + ) except Exception as raster_exc: # rasterio doesn't expose all of their error classes # in particular: rasterio._err.CPLE_AppDefinedError @@ -2592,16 +2865,17 @@ def align_raster_data(source, src_crs, src_transform, dst_crs=None, dst_resoluti # e.g. in litpop._get_litpop_single_polygon raise ValueError(raster_exc) from raster_exc - if conserve == 'mean': + if conserve == "mean": destination *= source.mean() / destination.mean() - elif conserve == 'sum': + elif conserve == "sum": destination *= source.sum() / destination.sum() - elif conserve == 'norm': + elif conserve == "norm": destination *= 1.0 / destination.sum() elif conserve is not None: raise ValueError(f"Invalid value for conserve: {conserve}") return destination, dst_transform + def mask_raster_with_geometry(raster, transform, shapes, nodata=None, **kwargs): """ Change values in `raster` that are outside of given `shapes` to `nodata`. @@ -2633,7 +2907,7 @@ def mask_raster_with_geometry(raster, transform, shapes, nodata=None, **kwargs): """ with rasterio.io.MemoryFile() as memfile: with memfile.open( - driver='GTiff', + driver="GTiff", height=raster.shape[0], width=raster.shape[1], count=1, @@ -2645,6 +2919,7 @@ def mask_raster_with_geometry(raster, transform, shapes, nodata=None, **kwargs): output, _ = rasterio.mask.mask(dataset, shapes, nodata=nodata, **kwargs) return output.squeeze(0) + def set_df_geometry_points(df_val, scheduler=None, crs=None): """Set given geometry to given dataframe using dask if scheduler. @@ -2660,16 +2935,22 @@ def set_df_geometry_points(df_val, scheduler=None, crs=None): crs : object (anything readable by pyproj4.CRS.from_user_input), optional Coordinate Reference System, if omitted or None: df_val.geometry.crs """ - LOGGER.info('Setting geometry points.') + LOGGER.info("Setting geometry points.") if scheduler is not None: - warnings.warn("This function does not use dask features anymore. The parameter has no" - " effect and will be removed in a future version.", DeprecationWarning) + warnings.warn( + "This function does not use dask features anymore. The parameter has no" + " effect and will be removed in a future version.", + DeprecationWarning, + ) # keep the original crs if any - crs = df_val['crs'] if crs is None else crs # crs might now still be None + crs = df_val["crs"] if crs is None else crs # crs might now still be None - df_val.set_geometry(gpd.points_from_xy(df_val['longitude'], df_val['latitude']), - inplace=True, crs=crs) + df_val.set_geometry( + gpd.points_from_xy(df_val["longitude"], df_val["latitude"]), + inplace=True, + crs=crs, + ) def fao_code_def(): @@ -2685,8 +2966,8 @@ def fao_code_def(): # FAO_FILE2: contains FAO country codes and correstponding ISO3 Code # (http://www.fao.org/faostat/en/#definitions) fao_file = pd.read_csv(SYSTEM_DIR.joinpath("FAOSTAT_data_country_codes.csv")) - fao_code = getattr(fao_file, 'Country Code').values - fao_iso = (getattr(fao_file, 'ISO3 Code').values).tolist() + fao_code = getattr(fao_file, "Country Code").values + fao_iso = (getattr(fao_file, "ISO3 Code").values).tolist() # create a list of ISO3 codes and corresponding fao country codes iso_list = list() @@ -2698,6 +2979,7 @@ def fao_code_def(): return iso_list, faocode_list + def country_faocode2iso(input_fao): """Convert FAO country code to ISO numeric-3 codes. @@ -2724,6 +3006,7 @@ def country_faocode2iso(input_fao): return output_iso + def country_iso2faocode(input_iso): """Convert ISO numeric-3 codes to FAO country code. diff --git a/climada/util/dates_times.py b/climada/util/dates_times.py index 31de094a9..07882bf11 100644 --- a/climada/util/dates_times.py +++ b/climada/util/dates_times.py @@ -18,13 +18,16 @@ Define functions to handle dates and times in climada """ -import logging + import datetime as dt +import logging + import numpy as np import pandas as pd LOGGER = logging.getLogger(__name__) + def date_to_str(date): """Compute date string in ISO format from input datetime ordinal int. Parameters @@ -55,15 +58,16 @@ def str_to_date(date): int """ if isinstance(date, str): - year, mounth, day = (int(val) for val in date.split('-')) + year, mounth, day = (int(val) for val in date.split("-")) return dt.date(year, mounth, day).toordinal() all_date = [] for i_date in date: - year, mounth, day = (int(val) for val in i_date.split('-')) + year, mounth, day = (int(val) for val in i_date.split("-")) all_date.append(dt.date(year, mounth, day).toordinal()) return all_date + def datetime64_to_ordinal(datetime): """Converts from a numpy datetime64 object to an ordinal date. See https://stackoverflow.com/a/21916253 for the horrible details. @@ -81,6 +85,7 @@ def datetime64_to_ordinal(datetime): return [pd.to_datetime(i_dt.tolist()).toordinal() for i_dt in datetime] + def last_year(ordinal_vector): """Extract first year from ordinal date @@ -95,6 +100,7 @@ def last_year(ordinal_vector): """ return dt.date.fromordinal(np.max(ordinal_vector)).year + def first_year(ordinal_vector): """Extract first year from ordinal date diff --git a/climada/util/dwd_icon_loader.py b/climada/util/dwd_icon_loader.py index 8878f63e7..ae03712c8 100644 --- a/climada/util/dwd_icon_loader.py +++ b/climada/util/dwd_icon_loader.py @@ -23,16 +23,17 @@ """ __all__ = [ - 'download_icon_grib', - 'delete_icon_grib', - 'download_icon_centroids_file', + "download_icon_grib", + "delete_icon_grib", + "download_icon_centroids_file", ] -import logging -from pathlib import Path import bz2 import datetime as dt +import logging +from pathlib import Path + import numpy as np from climada.util.config import CONFIG @@ -41,12 +42,13 @@ LOGGER = logging.getLogger(__name__) - -def download_icon_grib(run_datetime, - model_name='icon-eu-eps', - parameter_name='vmax_10m', - max_lead_time=None, - download_dir=None): +def download_icon_grib( + run_datetime, + model_name="icon-eu-eps", + parameter_name="vmax_10m", + max_lead_time=None, + download_dir=None, +): """download the gribfiles of a weather forecast run for a certain weather parameter from opendata.dwd.de/weather/nwp/. @@ -76,16 +78,21 @@ def download_icon_grib(run_datetime, the input parameters """ - LOGGER.info('Downloading icon grib files of model %s for parameter %s with starting date %s.', - model_name, parameter_name, run_datetime.strftime('%Y%m%d%H')) - - url, file_name, lead_times = _create_icon_grib_name(run_datetime, - model_name, - parameter_name, - max_lead_time) - download_path = CONFIG.local_data.save_dir.dir() if download_dir is None else Path(download_dir) - - #download all files + LOGGER.info( + "Downloading icon grib files of model %s for parameter %s with starting date %s.", + model_name, + parameter_name, + run_datetime.strftime("%Y%m%d%H"), + ) + + url, file_name, lead_times = _create_icon_grib_name( + run_datetime, model_name, parameter_name, max_lead_time + ) + download_path = ( + CONFIG.local_data.save_dir.dir() if download_dir is None else Path(download_dir) + ) + + # download all files file_names = [] for lead_i in lead_times: file_name_i = file_name.format(lead_i=lead_i) @@ -94,29 +101,32 @@ def download_icon_grib(run_datetime, # download file if it does not exist already if not bz2_pathfile_i.exists(): try: - download_file(url + file_name_i, - download_dir=download_dir) + download_file(url + file_name_i, download_dir=download_dir) except Exception as err: err_msg = "" - if run_datetime > (dt.datetime.utcnow()-dt.timedelta(hours=6)): - err_msg += (f'Forecast file {file_name_i} might not yet be available ' - f'on {url}. Wait a few hours. ') - elif run_datetime < (dt.datetime.utcnow() - -dt.timedelta(hours=24)): - err_msg += (f'Forecast file {file_name_i} might no longer be available ' - f'on {url}. Files are only openly available for 24 hours. ') + if run_datetime > (dt.datetime.utcnow() - dt.timedelta(hours=6)): + err_msg += ( + f"Forecast file {file_name_i} might not yet be available " + f"on {url}. Wait a few hours. " + ) + elif run_datetime < (dt.datetime.utcnow() - dt.timedelta(hours=24)): + err_msg += ( + f"Forecast file {file_name_i} might no longer be available " + f"on {url}. Files are only openly available for 24 hours. " + ) err_msg += f"Error while downloading {url + file_name_i}: " raise type(err)(err_msg + str(err)) from err file_names.append(str(bz2_pathfile_i)) return file_names - -def delete_icon_grib(run_datetime, - model_name='icon-eu-eps', - parameter_name='vmax_10m', - max_lead_time=None, - download_dir=None): +def delete_icon_grib( + run_datetime, + model_name="icon-eu-eps", + parameter_name="vmax_10m", + max_lead_time=None, + download_dir=None, +): """delete the downloaded gribfiles of a weather forecast run for a certain weather parameter from opendata.dwd.de/weather/nwp/. @@ -139,26 +149,30 @@ def delete_icon_grib(run_datetime, are stored at the moment """ - _, file_name, lead_times = _create_icon_grib_name(run_datetime, - model_name, - parameter_name, - max_lead_time) - download_path = CONFIG.local_data.save_dir.dir() if download_dir is None else Path(download_dir) - #delete all files + _, file_name, lead_times = _create_icon_grib_name( + run_datetime, model_name, parameter_name, max_lead_time + ) + download_path = ( + CONFIG.local_data.save_dir.dir() if download_dir is None else Path(download_dir) + ) + # delete all files for lead_i in lead_times: file_name_i = file_name.format(lead_i=lead_i) full_path_name_i = download_path.absolute().joinpath(file_name_i) if full_path_name_i.exists(): full_path_name_i.unlink() else: - LOGGER.warning('File %s does not exist and could not be deleted.', - full_path_name_i) + LOGGER.warning( + "File %s does not exist and could not be deleted.", full_path_name_i + ) -def _create_icon_grib_name(run_datetime, - model_name='icon-eu-eps', - parameter_name='vmax_10m', - max_lead_time=None): +def _create_icon_grib_name( + run_datetime, + model_name="icon-eu-eps", + parameter_name="vmax_10m", + max_lead_time=None, +): """create all parameters to download or delete gribfiles of a weather forecast run for a certain weather parameter from opendata.dwd.de/weather/nwp/. @@ -189,70 +203,76 @@ def _create_icon_grib_name(run_datetime, in hours, which are available for download """ # define defaults of the url for each model and parameter combination - if (model_name == 'icon-eu-eps') & (parameter_name == 'vmax_10m'): - file_extension = '_europe_icosahedral_single-level_' - #this string completes the filename on the server - file_extension_2 = '' #this string completes the filename on the server - max_lead_time_default = 120 # maximum available data - lead_times = np.concatenate((np.arange(1, 49), - np.arange(51, 73, 3), - np.arange(78, 121, 6) - )) - elif (model_name == 'icon-d2-eps') & (parameter_name == 'vmax_10m'): - file_extension = '_germany_icosahedral_single-level_' - #this string completes the filename on the server - file_extension_2 = '_2d' #this string completes the filename on the server - max_lead_time_default = 48 # maximum available data - lead_times = np.concatenate((np.arange(1, 49), - )) - elif model_name == 'test': - file_extension = '_storm_europe_icon_' #this string completes the filename on the server - file_extension_2 = '' #this string completes the filename on the server - max_lead_time_default = 2 # maximum available data - lead_times = np.concatenate((np.arange(1, 49), - np.arange(51, 73, 3), - np.arange(78, 121, 6) - )) + if (model_name == "icon-eu-eps") & (parameter_name == "vmax_10m"): + file_extension = "_europe_icosahedral_single-level_" + # this string completes the filename on the server + file_extension_2 = "" # this string completes the filename on the server + max_lead_time_default = 120 # maximum available data + lead_times = np.concatenate( + (np.arange(1, 49), np.arange(51, 73, 3), np.arange(78, 121, 6)) + ) + elif (model_name == "icon-d2-eps") & (parameter_name == "vmax_10m"): + file_extension = "_germany_icosahedral_single-level_" + # this string completes the filename on the server + file_extension_2 = "_2d" # this string completes the filename on the server + max_lead_time_default = 48 # maximum available data + lead_times = np.concatenate((np.arange(1, 49),)) + elif model_name == "test": + file_extension = ( + "_storm_europe_icon_" # this string completes the filename on the server + ) + file_extension_2 = "" # this string completes the filename on the server + max_lead_time_default = 2 # maximum available data + lead_times = np.concatenate( + (np.arange(1, 49), np.arange(51, 73, 3), np.arange(78, 121, 6)) + ) else: - raise ValueError(f'Download for model {model_name} and parameter {parameter_name} ' - 'is not yet implemented. ' - 'Please define the default values in the code first.') + raise ValueError( + f"Download for model {model_name} and parameter {parameter_name} " + "is not yet implemented. " + "Please define the default values in the code first." + ) # create the url for download - url = ('https://opendata.dwd.de/weather/nwp/' + - model_name + - '/grib/' + - run_datetime.strftime('%H') + - '/' + - parameter_name + - '/') - file_name = (model_name + - file_extension + - run_datetime.strftime('%Y%m%d%H') + - '_' + - '{lead_i:03}' + - file_extension_2 + - '_' + - parameter_name + - '.grib2.bz2') - + url = ( + "https://opendata.dwd.de/weather/nwp/" + + model_name + + "/grib/" + + run_datetime.strftime("%H") + + "/" + + parameter_name + + "/" + ) + file_name = ( + model_name + + file_extension + + run_datetime.strftime("%Y%m%d%H") + + "_" + + "{lead_i:03}" + + file_extension_2 + + "_" + + parameter_name + + ".grib2.bz2" + ) # define the leadtimes - if not max_lead_time: + if not max_lead_time: max_lead_time = max_lead_time_default elif max_lead_time > max_lead_time_default: - LOGGER.warning('Parameter max_lead_time %s is bigger than maximum ' - 'available files. max_lead_time is adjusted to %s.', - max_lead_time, max_lead_time_default) + LOGGER.warning( + "Parameter max_lead_time %s is bigger than maximum " + "available files. max_lead_time is adjusted to %s.", + max_lead_time, + max_lead_time_default, + ) max_lead_time = max_lead_time_default - lead_times = lead_times[lead_times<=max_lead_time] + lead_times = lead_times[lead_times <= max_lead_time] return url, file_name, lead_times -def download_icon_centroids_file(model_name='icon-eu-eps', - download_dir = None): - """ create centroids based on netcdf files provided by dwd, links +def download_icon_centroids_file(model_name="icon-eu-eps", download_dir=None): + """create centroids based on netcdf files provided by dwd, links found here: https://www.dwd.de/DE/leistungen/opendata/neuigkeiten/opendata_dez2018_02.html https://www.dwd.de/DE/leistungen/opendata/neuigkeiten/opendata_aug2020_01.html @@ -274,32 +294,35 @@ def download_icon_centroids_file(model_name='icon-eu-eps', """ # define url and filename - url = 'https://opendata.dwd.de/weather/lib/cdo/' - if model_name == 'icon-eu-eps': - file_name = 'icon_grid_0028_R02B07_N02.nc.bz2' - elif model_name == 'icon-eu': - file_name = 'icon_grid_0024_R02B06_G.nc.bz2' - elif model_name in ('icon-d2-eps', 'icon-d2'): - file_name = 'icon_grid_0047_R19B07_L.nc.bz2' - elif model_name == 'test': - file_name = 'test_storm_europe_icon_grid.nc.bz2' + url = "https://opendata.dwd.de/weather/lib/cdo/" + if model_name == "icon-eu-eps": + file_name = "icon_grid_0028_R02B07_N02.nc.bz2" + elif model_name == "icon-eu": + file_name = "icon_grid_0024_R02B06_G.nc.bz2" + elif model_name in ("icon-d2-eps", "icon-d2"): + file_name = "icon_grid_0047_R19B07_L.nc.bz2" + elif model_name == "test": + file_name = "test_storm_europe_icon_grid.nc.bz2" else: - raise ValueError(f'Creation of centroids for the icon model {model_name} ' - 'is not yet implemented. Please define ' - 'the default values in the code first.') - download_path = CONFIG.local_data.save_dir.dir() if download_dir is None else Path(download_dir) + raise ValueError( + f"Creation of centroids for the icon model {model_name} " + "is not yet implemented. Please define " + "the default values in the code first." + ) + download_path = ( + CONFIG.local_data.save_dir.dir() if download_dir is None else Path(download_dir) + ) bz2_pathfile = download_path.absolute().joinpath(file_name) - nc_pathfile = bz2_pathfile.with_suffix('') + nc_pathfile = bz2_pathfile.with_suffix("") # download and unzip file if not nc_pathfile.exists(): if not bz2_pathfile.exists(): try: - download_file(url + file_name, - download_dir=download_path) + download_file(url + file_name, download_dir=download_path) except ValueError as err: - raise ValueError(f'Error while downloading {url + file_name}.') from err - with open(bz2_pathfile, 'rb') as source, open(nc_pathfile, 'wb') as dest: + raise ValueError(f"Error while downloading {url + file_name}.") from err + with open(bz2_pathfile, "rb") as source, open(nc_pathfile, "wb") as dest: dest.write(bz2.decompress(source.read())) bz2_pathfile.unlink() diff --git a/climada/util/earth_engine.py b/climada/util/earth_engine.py index fdc136d4d..2a35755e5 100644 --- a/climada/util/earth_engine.py +++ b/climada/util/earth_engine.py @@ -48,7 +48,7 @@ def obtain_image_landsat_composite(landsat_collection, time_range, area): Returns ------- image_composite : ee.image.Image - """ + """ collection = ee.ImageCollection(landsat_collection) # Filter by time range and location @@ -57,6 +57,7 @@ def obtain_image_landsat_composite(landsat_collection, time_range, area): image_composite = ee.Algorithms.Landsat.simpleComposite(image_area, 75, 3) return image_composite + def obtain_image_median(collection, time_range, area): """Selection of median from a collection of images in the Earth Engine library See also: https://developers.google.com/earth-engine/reducers_image_collection @@ -73,7 +74,7 @@ def obtain_image_median(collection, time_range, area): Returns ------- image_median : ee.image.Image - """ + """ collection = ee.ImageCollection(collection) # Filter by time range and location @@ -82,6 +83,7 @@ def obtain_image_median(collection, time_range, area): image_median = image_area.median() return image_median + def obtain_image_sentinel(sentinel_collection, time_range, area): """Selection of median, cloud-free image from a collection of images in the Sentinel 2 dataset See also: https://developers.google.com/earth-engine/datasets/catalog/COPERNICUS_S2 @@ -98,20 +100,25 @@ def obtain_image_sentinel(sentinel_collection, time_range, area): Returns ------- sentinel_median : ee.image.Image - """ -# First, method to remove cloud from the image + """ + + # First, method to remove cloud from the image def maskclouds(image): - band_qa = image.select('QA60') + band_qa = image.select("QA60") cloud_mask = ee.Number(2).pow(10).int() cirrus_mask = ee.Number(2).pow(11).int() - mask = band_qa.bitwiseAnd(cloud_mask).eq(0) and (band_qa.bitwiseAnd(cirrus_mask).eq(0)) + mask = band_qa.bitwiseAnd(cloud_mask).eq(0) and ( + band_qa.bitwiseAnd(cirrus_mask).eq(0) + ) return image.updateMask(mask).divide(10000) - sentinel_filtered = (ee.ImageCollection(sentinel_collection). - filterBounds(area). - filterDate(time_range[0], time_range[1]). - filter(ee.Filter.lt('CLOUDY_PIXEL_PERCENTAGE', 20)). - map(maskclouds)) + sentinel_filtered = ( + ee.ImageCollection(sentinel_collection) + .filterBounds(area) + .filterDate(time_range[0], time_range[1]) + .filter(ee.Filter.lt("CLOUDY_PIXEL_PERCENTAGE", 20)) + .map(maskclouds) + ) sentinel_median = sentinel_filtered.median() return sentinel_median @@ -139,6 +146,7 @@ def get_region(geom): region = geom return region + def get_url(name, image, scale, region): """It will open and download automatically a zip folder containing Geotiff data of 'image'. If additional parameters are needed, see also: @@ -158,12 +166,8 @@ def get_url(name, image, scale, region): Returns ------- path : str - """ - path = image.getDownloadURL({ - 'name': (name), - 'scale': scale, - 'region': (region) - }) + """ + path = image.getDownloadURL({"name": (name), "scale": scale, "region": (region)}) webbrowser.open_new_tab(path) return path diff --git a/climada/util/files_handler.py b/climada/util/files_handler.py index 03b49a0a5..7f45762e7 100644 --- a/climada/util/files_handler.py +++ b/climada/util/files_handler.py @@ -20,8 +20,8 @@ """ __all__ = [ - 'to_list', - 'get_file_names', + "to_list", + "get_file_names", ] import glob @@ -40,6 +40,7 @@ class DownloadProgressBar(tqdm): """Class to use progress bar during dowloading""" + def update_to(self, blocks=1, bsize=1, tsize=None): """Update progress bar @@ -77,10 +78,12 @@ def download_file(url, download_dir=None, overwrite=True): str the full path to the eventually downloaded file """ - file_name = url.split('/')[-1] - if file_name.strip() == '': + file_name = url.split("/")[-1] + if file_name.strip() == "": raise ValueError(f"cannot download {url} as a file") - download_path = CONFIG.local_data.save_dir.dir() if download_dir is None else Path(download_dir) + download_path = ( + CONFIG.local_data.save_dir.dir() if download_dir is None else Path(download_dir) + ) file_path = download_path.absolute().joinpath(file_name) if file_path.exists(): if not file_path.is_file() or not overwrite: @@ -89,20 +92,25 @@ def download_file(url, download_dir=None, overwrite=True): try: req_file = requests.get(url, stream=True) except IOError as ioe: - raise type(ioe)('Check URL and internet connection: ' + str(ioe)) from ioe + raise type(ioe)("Check URL and internet connection: " + str(ioe)) from ioe if req_file.status_code < 200 or req_file.status_code > 299: - raise ValueError(f'Error loading page {url}\n' - f' Status: {req_file.status_code}\n' - f' Content: {req_file.content}') + raise ValueError( + f"Error loading page {url}\n" + f" Status: {req_file.status_code}\n" + f" Content: {req_file.content}" + ) - total_size = int(req_file.headers.get('content-length', 0)) + total_size = int(req_file.headers.get("content-length", 0)) block_size = 1024 - LOGGER.info('Downloading %s to file %s', url, file_path) - with file_path.open('wb') as file: - for data in tqdm(req_file.iter_content(block_size), - total=math.ceil(total_size // block_size), - unit='KB', unit_scale=True): + LOGGER.info("Downloading %s to file %s", url, file_path) + with file_path.open("wb") as file: + for data in tqdm( + req_file.iter_content(block_size), + total=math.ceil(total_size // block_size), + unit="KB", + unit_scale=True, + ): file.write(data) return str(file_path) @@ -122,10 +130,11 @@ def download_ftp(url, file_name): ------ ValueError """ - LOGGER.info('Downloading file %s', file_name) + LOGGER.info("Downloading file %s", file_name) try: - with DownloadProgressBar(unit='B', unit_scale=True, miniters=1, - desc=url.split('/')[-1]) as prog_bar: + with DownloadProgressBar( + unit="B", unit_scale=True, miniters=1, desc=url.split("/")[-1] + ) as prog_bar: urllib.request.urlretrieve(url, file_name, reporthook=prog_bar.update_to) except Exception as exc: raise ValueError( @@ -156,7 +165,7 @@ def to_list(num_exp, values, val_name): return values if len(values) == 1: return num_exp * [values[0]] - raise ValueError(f'Provide one or {num_exp} {val_name}.') + raise ValueError(f"Provide one or {num_exp} {val_name}.") def get_file_names(file_name): @@ -208,5 +217,5 @@ def get_extension(file_name): str, str """ file_path = Path(file_name) - cuts = file_path.name.split('.') + cuts = file_path.name.split(".") return str(file_path.parent.joinpath(cuts[0])), "".join(file_path.suffixes) diff --git a/climada/util/finance.py b/climada/util/finance.py index f12b5e000..58be8b227 100644 --- a/climada/util/finance.py +++ b/climada/util/finance.py @@ -18,65 +18,70 @@ Finance functionalities. """ -__all__ = ['net_present_value', 'income_group', 'gdp'] -import shutil +__all__ = ["net_present_value", "income_group", "gdp"] + import logging +import shutil import warnings import zipfile - from pathlib import Path -import requests import numpy as np import pandas as pd -from pandas_datareader import wb +import requests from cartopy.io import shapereader +from pandas_datareader import wb -from climada.util.files_handler import download_file from climada.util.constants import SYSTEM_DIR - +from climada.util.files_handler import download_file LOGGER = logging.getLogger(__name__) -WORLD_BANK_WEALTH_ACC = \ -"https://databank.worldbank.org/data/download/Wealth-Accounts_CSV.zip" +WORLD_BANK_WEALTH_ACC = ( + "https://databank.worldbank.org/data/download/Wealth-Accounts_CSV.zip" +) """Wealth historical data (1995, 2000, 2005, 2010, 2014) from World Bank (ZIP). https://datacatalog.worldbank.org/dataset/wealth-accounting Includes variable Produced Capital (NW.PCA.TO)""" FILE_WORLD_BANK_WEALTH_ACC = "Wealth-AccountsData.csv" -WORLD_BANK_INC_GRP = \ -"http://databank.worldbank.org/data/download/site-content/OGHIST.xls" +WORLD_BANK_INC_GRP = ( + "http://databank.worldbank.org/data/download/site-content/OGHIST.xls" +) """Income group historical data from World bank.""" -INCOME_GRP_WB_TABLE = {'L': 1, # low income - 'LM': 2, # lower middle income - 'UM': 3, # upper middle income - 'H': 4, # high income - '..': np.nan # no data - } +INCOME_GRP_WB_TABLE = { + "L": 1, # low income + "LM": 2, # lower middle income + "UM": 3, # upper middle income + "H": 4, # high income + "..": np.nan, # no data +} """Meaning of values of world banks' historical table on income groups.""" -INCOME_GRP_NE_TABLE = {5: 1, # Low income - 4: 2, # Lower middle income - 3: 3, # Upper middle income - 2: 4, # High income: nonOECD - 1: 4 # High income: OECD - } +INCOME_GRP_NE_TABLE = { + 5: 1, # Low income + 4: 2, # Lower middle income + 3: 3, # Upper middle income + 2: 4, # High income: nonOECD + 1: 4, # High income: OECD +} """Meaning of values of natural earth's income groups.""" -FILE_GWP_WEALTH2GDP_FACTORS = 'WEALTH2GDP_factors_CRI_2016.csv' +FILE_GWP_WEALTH2GDP_FACTORS = "WEALTH2GDP_factors_CRI_2016.csv" """File with wealth-to-GDP factors from the Credit Suisse's Global Wealth Report 2017 (household wealth)""" -def _nat_earth_shp(resolution='10m', category='cultural', - name='admin_0_countries'): - shp_file = shapereader.natural_earth(resolution=resolution, - category=category, name=name) + +def _nat_earth_shp(resolution="10m", category="cultural", name="admin_0_countries"): + shp_file = shapereader.natural_earth( + resolution=resolution, category=category, name=name + ) return shapereader.Reader(shp_file) + def net_present_value(years, disc_rates, val_years): """Compute net present value. @@ -94,7 +99,9 @@ def net_present_value(years, disc_rates, val_years): float """ if years.size != disc_rates.size or years.size != val_years.size: - raise ValueError(f'Wrong input sizes {years.size}, {disc_rates.size}, {val_years.size}.') + raise ValueError( + f"Wrong input sizes {years.size}, {disc_rates.size}, {val_years.size}." + ) npv = val_years[-1] for val, disc in zip(val_years[-2::-1], disc_rates[-2::-1]): @@ -102,6 +109,7 @@ def net_present_value(years, disc_rates, val_years): return npv + def income_group(cntry_iso, ref_year, shp_file=None): """Get country's income group from World Bank's data at a given year, or closest year value. If no data, get the natural earth's approximation. @@ -118,15 +126,17 @@ def income_group(cntry_iso, ref_year, shp_file=None): if not provided. """ try: - close_year, close_val = world_bank(cntry_iso, ref_year, 'INC_GRP') + close_year, close_val = world_bank(cntry_iso, ref_year, "INC_GRP") except (KeyError, IndexError): # take value from natural earth repository - close_year, close_val = nat_earth_adm0(cntry_iso, 'INCOME_GRP', - shp_file=shp_file) + close_year, close_val = nat_earth_adm0( + cntry_iso, "INCOME_GRP", shp_file=shp_file + ) - LOGGER.info('Income group %s %s: %s.', cntry_iso, close_year, close_val) + LOGGER.info("Income group %s %s: %s.", cntry_iso, close_year, close_val) return close_year, close_val + def gdp(cntry_iso, ref_year, shp_file=None, per_capita=False): """Get country's (current value) GDP from World Bank's data at a given year, or closest year value. If no data, get the natural earth's approximation. @@ -148,24 +158,29 @@ def gdp(cntry_iso, ref_year, shp_file=None, per_capita=False): ------- float """ - if cntry_iso == 'TWN': - LOGGER.warning('GDP data for TWN is not provided by World Bank. \ - Instead, IMF data is returned here.') + if cntry_iso == "TWN": + LOGGER.warning( + "GDP data for TWN is not provided by World Bank. \ + Instead, IMF data is returned here." + ) close_year, close_val = _gdp_twn(ref_year, per_capita=per_capita) return close_year, close_val try: if per_capita: - close_year, close_val = world_bank(cntry_iso, ref_year, 'NY.GDP.PCAP.CD') + close_year, close_val = world_bank(cntry_iso, ref_year, "NY.GDP.PCAP.CD") else: - close_year, close_val = world_bank(cntry_iso, ref_year, 'NY.GDP.MKTP.CD') + close_year, close_val = world_bank(cntry_iso, ref_year, "NY.GDP.MKTP.CD") except (ValueError, IndexError, requests.exceptions.ConnectionError) as err: if isinstance(err, requests.exceptions.ConnectionError): - LOGGER.warning('Internet connection failed while retrieving GDPs.') - close_year, close_val = nat_earth_adm0(cntry_iso, 'GDP_MD', 'GDP_YEAR', shp_file) + LOGGER.warning("Internet connection failed while retrieving GDPs.") + close_year, close_val = nat_earth_adm0( + cntry_iso, "GDP_MD", "GDP_YEAR", shp_file + ) LOGGER.info("GDP {} {:d}: {:.3e}.".format(cntry_iso, close_year, close_val)) return close_year, close_val + def world_bank(cntry_iso, ref_year, info_ind): """Get country's GDP from World Bank's data at a given year, or closest year value. If no data, get the natural earth's approximation. @@ -188,38 +203,47 @@ def world_bank(cntry_iso, ref_year, info_ind): ------ IOError, KeyError, IndexError """ - if info_ind != 'INC_GRP': + if info_ind != "INC_GRP": with warnings.catch_warnings(): warnings.simplefilter("ignore") - cntry_gdp = wb.download(indicator=info_ind, country=cntry_iso, start=1960, end=2030) - years = np.array([int(year) for year in cntry_gdp.index.get_level_values('year')]) + cntry_gdp = wb.download( + indicator=info_ind, country=cntry_iso, start=1960, end=2030 + ) + years = np.array( + [int(year) for year in cntry_gdp.index.get_level_values("year")] + ) sort_years = np.abs(years - ref_year).argsort() close_val = cntry_gdp.iloc[sort_years].dropna() close_year = int(close_val.iloc[0].name[1]) close_val = float(close_val.iloc[0].values) else: # income group level - fn_ig = SYSTEM_DIR.joinpath('OGHIST.xls') + fn_ig = SYSTEM_DIR.joinpath("OGHIST.xls") dfr_wb = pd.DataFrame() try: if not fn_ig.is_file(): file_down = download_file(WORLD_BANK_INC_GRP) shutil.move(file_down, fn_ig) - dfr_wb = pd.read_excel(fn_ig, 'Country Analytical History', skiprows=5) - dfr_wb = dfr_wb.drop(dfr_wb.index[0:5]).set_index('Unnamed: 0') - dfr_wb = dfr_wb.replace(INCOME_GRP_WB_TABLE.keys(), - INCOME_GRP_WB_TABLE.values()) + dfr_wb = pd.read_excel(fn_ig, "Country Analytical History", skiprows=5) + dfr_wb = dfr_wb.drop(dfr_wb.index[0:5]).set_index("Unnamed: 0") + dfr_wb = dfr_wb.replace( + INCOME_GRP_WB_TABLE.keys(), INCOME_GRP_WB_TABLE.values() + ) except (IOError, requests.exceptions.ConnectionError) as err: - raise type(err)('Internet connection failed while downloading ' - 'historical income groups: ' + str(err)) from err + raise type(err)( + "Internet connection failed while downloading " + "historical income groups: " + str(err) + ) from err cntry_dfr = dfr_wb.loc[cntry_iso] - close_val = cntry_dfr.iloc[np.abs( - np.array(cntry_dfr.index[1:]) - ref_year).argsort() + 1].dropna() + close_val = cntry_dfr.iloc[ + np.abs(np.array(cntry_dfr.index[1:]) - ref_year).argsort() + 1 + ].dropna() close_year = close_val.index[0] close_val = int(close_val.iloc[0]) return close_year, close_val + def nat_earth_adm0(cntry_iso, info_name, year_name=None, shp_file=None): """Get country's parameter from natural earth's admin0 shape file. @@ -246,12 +270,12 @@ def nat_earth_adm0(cntry_iso, info_name, year_name=None, shp_file=None): ValueError """ if not shp_file: - shp_file = _nat_earth_shp('10m', 'cultural', 'admin_0_countries') + shp_file = _nat_earth_shp("10m", "cultural", "admin_0_countries") close_val = 0 close_year = 0 for info in shp_file.records(): - if info.attributes['ADM0_A3'] == cntry_iso: + if info.attributes["ADM0_A3"] == cntry_iso: close_val = info.attributes[info_name] if year_name: close_year = int(info.attributes[year_name]) @@ -261,15 +285,17 @@ def nat_earth_adm0(cntry_iso, info_name, year_name=None, shp_file=None): raise ValueError("No GDP for country %s found." % cntry_iso) # the variable name changed in Natural Earth v5.0.0 - if info_name in ['GDP_MD', 'GDP_MD_EST']: + if info_name in ["GDP_MD", "GDP_MD_EST"]: close_val *= 1e6 - elif info_name == 'INCOME_GRP': + elif info_name == "INCOME_GRP": close_val = INCOME_GRP_NE_TABLE.get(int(close_val[0])) return close_year, close_val -def wealth2gdp(cntry_iso, non_financial=True, ref_year=2016, - file_name=FILE_GWP_WEALTH2GDP_FACTORS): + +def wealth2gdp( + cntry_iso, non_financial=True, ref_year=2016, file_name=FILE_GWP_WEALTH2GDP_FACTORS +): """Get country's wealth-to-GDP factor from the Credit Suisse's Global Wealth Report 2017 (household wealth). Missing value: returns NaN. @@ -289,32 +315,39 @@ def wealth2gdp(cntry_iso, non_financial=True, ref_year=2016, float """ fname = SYSTEM_DIR.joinpath(file_name) - factors_all_countries = pd.read_csv(fname, sep=',', index_col=None, - header=0, encoding='ISO-8859-1') + factors_all_countries = pd.read_csv( + fname, sep=",", index_col=None, header=0, encoding="ISO-8859-1" + ) if ref_year != 2016: - LOGGER.warning('Reference year for the factor to convert GDP to ' - 'wealth was set to 2016 because other years have not ' - 'been implemented yet.') + LOGGER.warning( + "Reference year for the factor to convert GDP to " + "wealth was set to 2016 because other years have not " + "been implemented yet." + ) ref_year = 2016 if non_financial: try: val = factors_all_countries[ - factors_all_countries['country_iso3'] == cntry_iso]['NFW-to-GDP-ratio'].values[0] + factors_all_countries["country_iso3"] == cntry_iso + ]["NFW-to-GDP-ratio"].values[0] except (AttributeError, KeyError, IndexError): - LOGGER.warning('No data for country, using mean factor.') + LOGGER.warning("No data for country, using mean factor.") val = factors_all_countries["NFW-to-GDP-ratio"].mean() else: try: val = factors_all_countries[ - factors_all_countries['country_iso3'] == cntry_iso]['TW-to-GDP-ratio'].values[0] + factors_all_countries["country_iso3"] == cntry_iso + ]["TW-to-GDP-ratio"].values[0] except (AttributeError, KeyError, IndexError): - LOGGER.warning('No data for country, using mean factor.') + LOGGER.warning("No data for country, using mean factor.") val = factors_all_countries["TW-to-GDP-ratio"].mean() val = np.around(val, 5) return ref_year, val -def world_bank_wealth_account(cntry_iso, ref_year, variable_name="NW.PCA.TO", - no_land=True): + +def world_bank_wealth_account( + cntry_iso, ref_year, variable_name="NW.PCA.TO", no_land=True +): """ Download and unzip wealth accounting historical data (1995, 2000, 2005, 2010, 2014) from World Bank (https://datacatalog.worldbank.org/dataset/wealth-accounting). @@ -357,28 +390,36 @@ def world_bank_wealth_account(cntry_iso, ref_year, variable_name="NW.PCA.TO", try: data_file = SYSTEM_DIR.joinpath(FILE_WORLD_BANK_WEALTH_ACC) if not data_file.is_file(): - data_file = SYSTEM_DIR.joinpath('Wealth-Accounts_CSV', FILE_WORLD_BANK_WEALTH_ACC) + data_file = SYSTEM_DIR.joinpath( + "Wealth-Accounts_CSV", FILE_WORLD_BANK_WEALTH_ACC + ) if not data_file.is_file(): - if not SYSTEM_DIR.joinpath('Wealth-Accounts_CSV').is_dir(): - SYSTEM_DIR.joinpath('Wealth-Accounts_CSV').mkdir() + if not SYSTEM_DIR.joinpath("Wealth-Accounts_CSV").is_dir(): + SYSTEM_DIR.joinpath("Wealth-Accounts_CSV").mkdir() file_down = download_file(WORLD_BANK_WEALTH_ACC) - zip_ref = zipfile.ZipFile(file_down, 'r') - zip_ref.extractall(SYSTEM_DIR.joinpath('Wealth-Accounts_CSV')) + zip_ref = zipfile.ZipFile(file_down, "r") + zip_ref.extractall(SYSTEM_DIR.joinpath("Wealth-Accounts_CSV")) zip_ref.close() Path(file_down).unlink() - LOGGER.debug('Download and unzip complete. Unzipping %s', str(data_file)) + LOGGER.debug("Download and unzip complete. Unzipping %s", str(data_file)) - data_wealth = pd.read_csv(data_file, sep=',', index_col=None, header=0) + data_wealth = pd.read_csv(data_file, sep=",", index_col=None, header=0) except Exception as err: raise type(err)( - 'Downloading World Bank Wealth Accounting Data failed: ' + str(err)) from err + "Downloading World Bank Wealth Accounting Data failed: " + str(err) + ) from err - data_wealth = data_wealth[data_wealth['Country Code'].str.contains(cntry_iso) - & data_wealth['Indicator Code'].str.contains(variable_name) - ].loc[:, '1995':'2014'] + data_wealth = data_wealth[ + data_wealth["Country Code"].str.contains(cntry_iso) + & data_wealth["Indicator Code"].str.contains(variable_name) + ].loc[:, "1995":"2014"] years = list(map(int, list(data_wealth))) - if data_wealth.size == 0 and 'NW.PCA.TO' in variable_name: # if country is not found in data - LOGGER.warning('No data available for country. Using non-financial wealth instead') + if ( + data_wealth.size == 0 and "NW.PCA.TO" in variable_name + ): # if country is not found in data + LOGGER.warning( + "No data available for country. Using non-financial wealth instead" + ) gdp_year, gdp_val = gdp(cntry_iso, ref_year) fac = wealth2gdp(cntry_iso)[1] return gdp_year, np.around((fac * gdp_val), 1), 0 @@ -396,11 +437,12 @@ def world_bank_wealth_account(cntry_iso, ref_year, variable_name="NW.PCA.TO", gdp_year, gdp_val = gdp(cntry_iso, ref_year) result = data_wealth.values[0, -1] * gdp_val / gdp0_val ref_year = gdp_year - if 'NW.PCA.' in variable_name and no_land: + if "NW.PCA." in variable_name and no_land: # remove value of built-up land from produced capital result = result / 1.24 return ref_year, np.around(result, 1), 1 + def _gdp_twn(ref_year, per_capita=False): """returns GDP for TWN (Republic of China / Taiwan Province of China) based on a CSV sheet downloaded from the @@ -424,23 +466,26 @@ def _gdp_twn(ref_year, per_capita=False): ------- float """ - fname = 'GDP_TWN_IMF_WEO_data.csv' + fname = "GDP_TWN_IMF_WEO_data.csv" if not SYSTEM_DIR.joinpath(fname).is_file(): - raise FileNotFoundError(f'File {fname} not found in SYSTEM_DIR') + raise FileNotFoundError(f"File {fname} not found in SYSTEM_DIR") if per_capita: - var_name = 'Gross domestic product per capita, current prices' + var_name = "Gross domestic product per capita, current prices" else: - var_name = 'Gross domestic product, current prices' + var_name = "Gross domestic product, current prices" if ref_year < 1980: close_year = 1980 elif ref_year > 2024: close_year = 2024 else: close_year = ref_year - data = pd.read_csv(SYSTEM_DIR.joinpath('GDP_TWN_IMF_WEO_data.csv'), - index_col=None, header=0) - close_val = data.loc[data['Subject Descriptor'] == var_name, str(close_year)].values[0] - close_val = float(close_val.replace(',', '')) + data = pd.read_csv( + SYSTEM_DIR.joinpath("GDP_TWN_IMF_WEO_data.csv"), index_col=None, header=0 + ) + close_val = data.loc[ + data["Subject Descriptor"] == var_name, str(close_year) + ].values[0] + close_val = float(close_val.replace(",", "")) if not per_capita: close_val = close_val * 1e9 return close_year, close_val diff --git a/climada/util/hdf5_handler.py b/climada/util/hdf5_handler.py index 08d0a4970..8408972bd 100644 --- a/climada/util/hdf5_handler.py +++ b/climada/util/hdf5_handler.py @@ -19,44 +19,47 @@ Functionalities to handle HDF5 files. Used for MATLAB files as well. """ -__all__ = ['read', - 'get_string', - 'get_str_from_ref', - 'get_list_str_from_ref', - 'get_sparse_csr_mat' - ] +__all__ = [ + "read", + "get_string", + "get_str_from_ref", + "get_list_str_from_ref", + "get_sparse_csr_mat", +] -from scipy import sparse -import numpy as np import h5py +import numpy as np +from scipy import sparse + def read(file_name, with_refs=False): """Load a hdf5 data structure from a file. - Parameters - ---------- - file_name : - file to load - with_refs : - enable loading of the references. Default is unset, - since it increments the execution time considerably. - - Returns - ------- - contents : - dictionary structure containing all the variables. - - Examples - -------- - >>> # Contents contains the Matlab data in a dictionary. - >>> contents = read("/pathto/dummy.mat") - >>> # Contents contains the Matlab data and its reference in a dictionary. - >>> contents = read("/pathto/dummy.mat", True) - - Raises - ------ - Exception while reading - """ + Parameters + ---------- + file_name : + file to load + with_refs : + enable loading of the references. Default is unset, + since it increments the execution time considerably. + + Returns + ------- + contents : + dictionary structure containing all the variables. + + Examples + -------- + >>> # Contents contains the Matlab data in a dictionary. + >>> contents = read("/pathto/dummy.mat") + >>> # Contents contains the Matlab data and its reference in a dictionary. + >>> contents = read("/pathto/dummy.mat", True) + + Raises + ------ + Exception while reading + """ + def get_group(group): """Recursive function to get variables from a group.""" contents = {} @@ -70,81 +73,86 @@ def get_group(group): # other objects such as links are ignored return contents - with h5py.File(file_name, 'r') as file: + with h5py.File(file_name, "r") as file: return get_group(file) + def get_string(array): """Form string from input array of unisgned integers. - Parameters - ---------- - array : - array of integers + Parameters + ---------- + array : + array of integers - Returns - ------- - string + Returns + ------- + string """ - return ''.join(chr(int(c)) for c in array) + return "".join(chr(int(c)) for c in array) + def get_str_from_ref(file_name, var): """Form string from a reference HDF5 variable of the given file. - Parameters - ---------- - file_name : - matlab file name - var : - HDF5 reference variable + Parameters + ---------- + file_name : + matlab file name + var : + HDF5 reference variable - Returns - ------- - string + Returns + ------- + string """ - with h5py.File(file_name, 'r') as file: + with h5py.File(file_name, "r") as file: return get_string(file[var]) + def get_list_str_from_ref(file_name, var): """Form list of strings from a reference HDF5 variable of the given file. - Parameters - ---------- - file_name : - matlab file name - var : - array of HDF5 reference variable + Parameters + ---------- + file_name : + matlab file name + var : + array of HDF5 reference variable - Returns - ------- - string + Returns + ------- + string """ name_list = [] - with h5py.File(file_name, 'r') as file: + with h5py.File(file_name, "r") as file: for name in var: name_list.append(get_string(file[name[0]][:]).strip()) return name_list + def get_sparse_csr_mat(mat_dict, shape): """Form sparse matrix from input hdf5 sparse matrix data type. - Parameters - ---------- - mat_dict : - dictionary containing the sparse matrix information. - shape : - tuple describing output matrix shape. + Parameters + ---------- + mat_dict : + dictionary containing the sparse matrix information. + shape : + tuple describing output matrix shape. - Returns - ------- - sparse csr matrix + Returns + ------- + sparse csr matrix """ # Check if input has all the necessary data of a sparse matrix - if ('data' not in mat_dict) or ('ir' not in mat_dict) or \ - ('jc' not in mat_dict): - raise ValueError('Input data is not a sparse matrix.') + if ("data" not in mat_dict) or ("ir" not in mat_dict) or ("jc" not in mat_dict): + raise ValueError("Input data is not a sparse matrix.") + + return sparse.csc_matrix( + (mat_dict["data"], mat_dict["ir"], mat_dict["jc"]), shape + ).tocsr() - return sparse.csc_matrix((mat_dict['data'], mat_dict['ir'], - mat_dict['jc']), shape).tocsr() def to_string(str_or_bytes): """converts a bytes object into a string if necessary diff --git a/climada/util/interpolation.py b/climada/util/interpolation.py index c2e514797..4a230f9af 100644 --- a/climada/util/interpolation.py +++ b/climada/util/interpolation.py @@ -19,7 +19,6 @@ Define interpolation and extrapolation functions for calculating (local) exceedance frequencies and return periods """ - import logging import numpy as np @@ -29,17 +28,18 @@ LOGGER = logging.getLogger(__name__) + def interpolate_ev( - x_test, - x_train, - y_train, - logx = False, - logy = False, - x_threshold = None, - y_threshold = None, - extrapolation = False, - y_asymptotic = np.nan - ): + x_test, + x_train, + y_train, + logx=False, + logy=False, + x_threshold=None, + y_threshold=None, + extrapolation=False, + y_asymptotic=np.nan, +): """ Util function to interpolate (and extrapolate) training data (x_train, y_train) to new points x_test with several options (log scale, thresholds) @@ -80,38 +80,35 @@ def interpolate_ev( x_test, x_train, y_train, logx, logy, x_threshold, y_threshold ) - # handle case of small training data sizes + # handle case of small training data sizes if x_train.size < 2: - LOGGER.warning('Data is being extrapolated.') + LOGGER.warning("Data is being extrapolated.") return _interpolate_small_input(x_test, x_train, y_train, logy, y_asymptotic) # calculate fill values if extrapolation: - fill_value = 'extrapolate' + fill_value = "extrapolate" if np.min(x_test) < np.min(x_train) or np.max(x_test) > np.max(x_train): - LOGGER.warning('Data is being extrapolated.') + LOGGER.warning("Data is being extrapolated.") else: if not all(sorted(x_train) == x_train): - raise ValueError('x_train array must be sorted in ascending order.') - fill_value = (y_train[0], np.log10(y_asymptotic) if logy else y_asymptotic) + raise ValueError("x_train array must be sorted in ascending order.") + fill_value = (y_train[0], np.log10(y_asymptotic) if logy else y_asymptotic) interpolation = interpolate.interp1d( - x_train, y_train, fill_value=fill_value, bounds_error=False) + x_train, y_train, fill_value=fill_value, bounds_error=False + ) y_test = interpolation(x_test) # adapt output scale if logy: - y_test = np.power(10., y_test) + y_test = np.power(10.0, y_test) return y_test + def stepfunction_ev( - x_test, - x_train, - y_train, - x_threshold = None, - y_threshold = None, - y_asymptotic = np.nan - ): + x_test, x_train, y_train, x_threshold=None, y_threshold=None, y_asymptotic=np.nan +): """ Util function to interpolate and extrapolate training data (x_train, y_train) to new points x_test using a step function @@ -148,43 +145,42 @@ def stepfunction_ev( # find indices of x_test if sorted into x_train if not all(sorted(x_train) == x_train): - raise ValueError('Input array x_train must be sorted in ascending order.') + raise ValueError("Input array x_train must be sorted in ascending order.") indx = np.searchsorted(x_train, x_test) - y_test = y_train[indx.clip(max = len(x_train) - 1)] + y_test = y_train[indx.clip(max=len(x_train) - 1)] y_test[indx == len(x_train)] = y_asymptotic return y_test + def _preprocess_interpolation_data( - x_test, - x_train, - y_train, - logx, - logy, - x_threshold, - y_threshold - ): + x_test, x_train, y_train, logx, logy, x_threshold, y_threshold +): """ helper function to preprocess interpolation training and test data by filtering data below thresholds and converting to log scale if required """ if x_train.shape != y_train.shape: - raise ValueError(f'Incompatible shapes of input data, x_train {x_train.shape} ' - f'and y_train {y_train.shape}. Should be the same') + raise ValueError( + f"Incompatible shapes of input data, x_train {x_train.shape} " + f"and y_train {y_train.shape}. Should be the same" + ) # transform input to float arrays - x_test, x_train, y_train = (np.array(x_test).astype(float), - np.array(x_train).astype(float), - np.array(y_train).astype(float)) + x_test, x_train, y_train = ( + np.array(x_test).astype(float), + np.array(x_train).astype(float), + np.array(y_train).astype(float), + ) # cut x and y above threshold - if x_threshold or x_threshold==0: + if x_threshold or x_threshold == 0: x_th = np.asarray(x_train > x_threshold).squeeze() x_train = x_train[x_th] y_train = y_train[x_th] - if y_threshold or y_threshold==0: + if y_threshold or y_threshold == 0: y_th = np.asarray(y_train > y_threshold).squeeze() x_train = x_train[y_th] y_train = y_train[y_th] @@ -197,6 +193,7 @@ def _preprocess_interpolation_data( return (x_test, x_train, y_train) + def _interpolate_small_input(x_test, x_train, y_train, logy, y_asymptotic): """ helper function to handle if interpolation data is small (empty or one point) @@ -207,7 +204,7 @@ def _interpolate_small_input(x_test, x_train, y_train, logy, y_asymptotic): # reconvert logarithmic y_train to original y_train if logy: - y_train = np.power(10., y_train) + y_train = np.power(10.0, y_train) # if only one (x_train, y_train), return stepfunction with # y_train if x_test < x_train and y_asymtotic if x_test > x_train @@ -215,6 +212,7 @@ def _interpolate_small_input(x_test, x_train, y_train, logy, y_asymptotic): y_test[np.squeeze(x_test) > np.squeeze(x_train)] = y_asymptotic return y_test + def group_frequency(frequency, value, n_sig_dig=2): """ Util function to aggregate (add) frequencies for equal values @@ -240,15 +238,18 @@ def group_frequency(frequency, value, n_sig_dig=2): return ([], []) if len(value) != len(np.unique(sig_dig_list(value, n_sig_dig=n_sig_dig))): - #check ordering of value + # check ordering of value if not all(sorted(value) == value): - raise ValueError('Value array must be sorted in ascending order.') + raise ValueError("Value array must be sorted in ascending order.") # add frequency for equal value value, start_indices = np.unique( - sig_dig_list(value, n_sig_dig=n_sig_dig), return_index=True) + sig_dig_list(value, n_sig_dig=n_sig_dig), return_index=True + ) start_indices = np.insert(start_indices, len(value), len(frequency)) - frequency = np.array([ - sum(frequency[start_indices[i]:start_indices[i+1]]) - for i in range(len(value)) - ]) + frequency = np.array( + [ + sum(frequency[start_indices[i] : start_indices[i + 1]]) + for i in range(len(value)) + ] + ) return frequency, value diff --git a/climada/util/lines_polys_handler.py b/climada/util/lines_polys_handler.py index 22d27793e..244658b18 100755 --- a/climada/util/lines_polys_handler.py +++ b/climada/util/lines_polys_handler.py @@ -15,8 +15,9 @@ with CLIMADA. If not, see . """ -import logging + import copy +import logging from enum import Enum import cartopy.crs as ccrs @@ -41,8 +42,9 @@ class DisaggMethod(Enum): DIV : the geometry's distributed to equal parts over all its interpolated points FIX : the geometry's value is replicated over all its interpolated points """ - DIV = 'div' - FIX = 'fix' + + DIV = "div" + FIX = "fix" class AggMethod(Enum): @@ -51,12 +53,20 @@ class AggMethod(Enum): SUM : the impact is summed over all points in the polygon/line """ - SUM = 'sum' + + SUM = "sum" def calc_geom_impact( - exp, impf_set, haz, res, to_meters=False, disagg_met=DisaggMethod.DIV, - disagg_val=None, agg_met=AggMethod.SUM): + exp, + impf_set, + haz, + res, + to_meters=False, + disagg_met=DisaggMethod.DIV, + disagg_val=None, + agg_met=AggMethod.SUM, +): """ Compute impact for exposure with (multi-)polygons and/or (multi-)lines. Lat/Lon values in exp.gdf are ignored, only exp.gdf.geometry is considered. @@ -116,10 +126,12 @@ def calc_geom_impact( # disaggregate exposure exp_pnt = exp_geom_to_pnt( - exp=exp, res=res, - to_meters=to_meters, disagg_met=disagg_met, - disagg_val=disagg_val - ) + exp=exp, + res=res, + to_meters=to_meters, + disagg_met=disagg_met, + disagg_val=disagg_val, + ) exp_pnt.assign_centroids(haz) # compute point impact @@ -174,14 +186,16 @@ def impact_pnt_agg(impact_pnt, exp_pnt_gdf, agg_met): # add exposure representation points as coordinates repr_pnts = gpd.GeoSeries( - exp_pnt_gdf['geometry_orig'][:,0].apply( - lambda x: x.representative_point())) + exp_pnt_gdf["geometry_orig"][:, 0].apply(lambda x: x.representative_point()) + ) impact_agg.coord_exp = np.array([repr_pnts.y, repr_pnts.x]).transpose() # Add original geometries for plotting - impact_agg.geom_exp = exp_pnt_gdf.xs(0, level=1)\ - .set_geometry('geometry_orig')\ - .geometry.rename('geometry') + impact_agg.geom_exp = ( + exp_pnt_gdf.xs(0, level=1) + .set_geometry("geometry_orig") + .geometry.rename("geometry") + ) return impact_agg @@ -221,18 +235,24 @@ def _aggregate_impact_mat(imp_pnt, gdf_pnt, agg_met): mask = np.ones(len(col_geom)) else: raise NotImplementedError( - f'The available aggregation methods are {AggMethod._member_names_}') # pylint: disable=no-member, protected-access + f"The available aggregation methods are {AggMethod._member_names_}" + ) # pylint: disable=no-member, protected-access csr_mask = sp.sparse.csr_matrix( - (mask, (row_pnt, col_geom)), - shape=(len(row_pnt), len(np.unique(col_geom))) - ) + (mask, (row_pnt, col_geom)), shape=(len(row_pnt), len(np.unique(col_geom))) + ) return imp_pnt.imp_mat.dot(csr_mask) def calc_grid_impact( - exp, impf_set, haz, grid, disagg_met=DisaggMethod.DIV, disagg_val=None, - agg_met=AggMethod.SUM): + exp, + impf_set, + haz, + grid, + disagg_met=DisaggMethod.DIV, + disagg_val=None, + agg_met=AggMethod.SUM, +): """ Compute impact for exposure with (multi-)polygons and/or (multi-)lines. Lat/Lon values in exp.gdf are ignored, only exp.gdf.geometry is considered. @@ -288,13 +308,14 @@ def calc_grid_impact( # disaggregate exposure exp_pnt = exp_geom_to_grid( - exp=exp, grid= grid, disagg_met=disagg_met, - disagg_val=disagg_val - ) + exp=exp, grid=grid, disagg_met=disagg_met, disagg_val=disagg_val + ) exp_pnt.assign_centroids(haz) # compute point impact - impact_pnt = ImpactCalc(exp_pnt, impf_set, haz).impact(save_mat=True, assign_centroids=False) + impact_pnt = ImpactCalc(exp_pnt, impf_set, haz).impact( + save_mat=True, assign_centroids=False + ) # re-aggregate impact to original exposure geometry impact_agg = impact_pnt_agg(impact_pnt, exp_pnt.gdf, agg_met) @@ -324,22 +345,22 @@ def plot_eai_exp_geom(imp_geom, centered=False, figsize=(9, 13), **kwargs): matplotlib axes instance """ - kwargs['figsize'] = figsize - if 'legend_kwds' not in kwargs: - kwargs['legend_kwds'] = { - 'label': f"Impact [{imp_geom.unit}]", - 'orientation': "horizontal" - } - if 'legend' not in kwargs: - kwargs['legend'] = True + kwargs["figsize"] = figsize + if "legend_kwds" not in kwargs: + kwargs["legend_kwds"] = { + "label": f"Impact [{imp_geom.unit}]", + "orientation": "horizontal", + } + if "legend" not in kwargs: + kwargs["legend"] = True gdf_plot = gpd.GeoDataFrame(imp_geom.geom_exp) - gdf_plot['impact'] = imp_geom.eai_exp + gdf_plot["impact"] = imp_geom.eai_exp if centered: # pylint: disable=abstract-class-instantiated - xmin, xmax = u_coord.lon_bounds(imp_geom.coord_exp[:,1]) + xmin, xmax = u_coord.lon_bounds(imp_geom.coord_exp[:, 1]) proj_plot = ccrs.PlateCarree(central_longitude=0.5 * (xmin + xmax)) gdf_plot = gdf_plot.to_crs(proj_plot) - return gdf_plot.plot(column = 'impact', **kwargs) + return gdf_plot.plot(column="impact", **kwargs) def exp_geom_to_pnt(exp, res, to_meters, disagg_met, disagg_val): @@ -385,11 +406,13 @@ def exp_geom_to_pnt(exp, res, to_meters, disagg_met, disagg_val): if disagg_val is not None: exp = exp.copy() - exp.gdf['value'] = disagg_val + exp.gdf["value"] = disagg_val - if ((disagg_val is None) and ('value' not in exp.gdf.columns)): - raise ValueError('There is no value column in the exposure gdf to'+ - ' disaggregate from. Please set disagg_val explicitly.') + if (disagg_val is None) and ("value" not in exp.gdf.columns): + raise ValueError( + "There is no value column in the exposure gdf to" + + " disaggregate from. Please set disagg_val explicitly." + ) gdf_pnt = gdf_to_pnts(exp.gdf, res, to_meters) @@ -445,11 +468,13 @@ def exp_geom_to_grid(exp, grid, disagg_met, disagg_val): if disagg_val is not None: exp = exp.copy() - exp.gdf['value'] = disagg_val + exp.gdf["value"] = disagg_val - if ((disagg_val is None) and ('value' not in exp.gdf.columns)): - raise ValueError('There is no value column in the exposure gdf to'+ - ' disaggregate from. Please set disagg_val explicitly.') + if (disagg_val is None) and ("value" not in exp.gdf.columns): + raise ValueError( + "There is no value column in the exposure gdf to" + + " disaggregate from. Please set disagg_val explicitly." + ) gdf_pnt = gdf_to_grid(exp.gdf, grid) @@ -479,13 +504,13 @@ def _pnt_line_poly_mask(gdf): ------- pnt_mask, line_mask, poly_mask : """ - pnt_mask = gdf.geometry.apply(lambda x: isinstance(x, shgeom.Point)) + pnt_mask = gdf.geometry.apply(lambda x: isinstance(x, shgeom.Point)) - line_mask = gdf.geometry.apply(lambda x: isinstance(x, shgeom.LineString)) - line_mask |= gdf.geometry.apply(lambda x: isinstance(x, shgeom.MultiLineString)) + line_mask = gdf.geometry.apply(lambda x: isinstance(x, shgeom.LineString)) + line_mask |= gdf.geometry.apply(lambda x: isinstance(x, shgeom.MultiLineString)) - poly_mask = gdf.geometry.apply(lambda x: isinstance(x, shgeom.Polygon)) - poly_mask |= gdf.geometry.apply(lambda x: isinstance(x, shgeom.MultiPolygon)) + poly_mask = gdf.geometry.apply(lambda x: isinstance(x, shgeom.Polygon)) + poly_mask |= gdf.geometry.apply(lambda x: isinstance(x, shgeom.MultiPolygon)) return pnt_mask, line_mask, poly_mask @@ -525,23 +550,18 @@ def gdf_to_pnts(gdf, res, to_meters): gdf_pnt = gpd.GeoDataFrame([]) if pnt_mask.any(): gdf_pnt_only = gdf[pnt_mask] - gdf_pnt_only['geometry_orig'] = gdf_pnt_only['geometry'].copy() + gdf_pnt_only["geometry_orig"] = gdf_pnt_only["geometry"].copy() index = gdf_pnt_only.index.values gdf_pnt_only.index = pd.MultiIndex.from_arrays([index, np.zeros(len(index))]) - gdf_pnt = gpd.GeoDataFrame(pd.concat([ - gdf_pnt, - gdf_pnt_only - ])) + gdf_pnt = gpd.GeoDataFrame(pd.concat([gdf_pnt, gdf_pnt_only])) if line_mask.any(): - gdf_pnt = gpd.GeoDataFrame(pd.concat([ - gdf_pnt, - _line_to_pnts(gdf[line_mask], res, to_meters) - ])) + gdf_pnt = gpd.GeoDataFrame( + pd.concat([gdf_pnt, _line_to_pnts(gdf[line_mask], res, to_meters)]) + ) if poly_mask.any(): - gdf_pnt = gpd.GeoDataFrame(pd.concat([ - gdf_pnt, - _poly_to_pnts(gdf[poly_mask], res, to_meters) - ])) + gdf_pnt = gpd.GeoDataFrame( + pd.concat([gdf_pnt, _poly_to_pnts(gdf[poly_mask], res, to_meters)]) + ) return gdf_pnt @@ -583,10 +603,12 @@ def gdf_to_grid(gdf, grid): # Concatenating an empty dataframe with an index together with # a dataframe with a multi-index breaks the multi-index - if (line_mask.any() or pnt_mask.any()): - raise AttributeError("The dataframe contains lines and/or polygons." - "Currently only polygon dataframes can be " - "disaggregated onto a fixed grid.") + if line_mask.any() or pnt_mask.any(): + raise AttributeError( + "The dataframe contains lines and/or polygons." + "Currently only polygon dataframes can be " + "disaggregated onto a fixed grid." + ) if poly_mask.any(): return _poly_to_grid(gdf[poly_mask], grid) @@ -615,10 +637,10 @@ def _disagg_values_div(gdf_pnts): gdf_disagg = gdf_pnts.copy(deep=False) group = gdf_pnts.groupby(axis=0, level=0) - vals = group['value'].mean() / group['value'].count() + vals = group["value"].mean() / group["value"].count() vals = vals.reindex(gdf_pnts.index, level=0) - gdf_disagg['value'] = vals + gdf_disagg["value"] = vals return gdf_disagg @@ -652,20 +674,23 @@ def _poly_to_pnts(gdf, res, to_meters): return gdf # Needed because gdf.explode(index_parts=True) requires numeric index - idx = gdf.index.to_list() #To restore the naming of the index + idx = gdf.index.to_list() # To restore the naming of the index gdf_points = gdf.copy().reset_index(drop=True) # Check if we need to reproject if to_meters and not gdf.geometry.crs.is_projected: - gdf_points['geometry_pnt'] = gdf_points.apply( - lambda row: _interp_one_poly_m(row.geometry, res, gdf.crs), axis=1) + gdf_points["geometry_pnt"] = gdf_points.apply( + lambda row: _interp_one_poly_m(row.geometry, res, gdf.crs), axis=1 + ) else: - gdf_points['geometry_pnt'] = gdf_points.apply( - lambda row: _interp_one_poly(row.geometry, res), axis=1) + gdf_points["geometry_pnt"] = gdf_points.apply( + lambda row: _interp_one_poly(row.geometry, res), axis=1 + ) gdf_points = _swap_geom_cols( - gdf_points, geom_to='geometry_orig', new_geom='geometry_pnt') + gdf_points, geom_to="geometry_orig", new_geom="geometry_pnt" + ) gdf_points = gdf_points.explode(index_parts=True) gdf_points.index = gdf_points.index.set_levels(idx, level=0) @@ -699,16 +724,18 @@ def _poly_to_grid(gdf, grid): return gdf # Needed because gdf.explode(index_parts=True) requires numeric index - idx = gdf.index.to_list() #To restore the naming of the index + idx = gdf.index.to_list() # To restore the naming of the index gdf_points = gdf.copy().reset_index(drop=True) x_grid, y_grid = grid - gdf_points['geometry_pnt'] = gdf_points.apply( - lambda row: _interp_one_poly_grid(row.geometry, x_grid, y_grid), axis=1) + gdf_points["geometry_pnt"] = gdf_points.apply( + lambda row: _interp_one_poly_grid(row.geometry, x_grid, y_grid), axis=1 + ) gdf_points = _swap_geom_cols( - gdf_points, geom_to='geometry_orig', new_geom='geometry_pnt') + gdf_points, geom_to="geometry_orig", new_geom="geometry_pnt" + ) gdf_points = gdf_points.explode(index_parts=True) gdf_points.index = gdf_points.index.set_levels(idx, level=0) @@ -743,7 +770,7 @@ def _interp_one_poly_grid(poly, x_grid, y_grid): if sum(in_geom.flatten()) > 1: return shgeom.MultiPoint(list(zip(x_grid[in_geom], y_grid[in_geom]))) - LOGGER.warning('Polygon smaller than resolution. Setting a representative point.') + LOGGER.warning("Polygon smaller than resolution. Setting a representative point.") return shgeom.MultiPoint([poly.representative_point()]) @@ -775,7 +802,7 @@ def _interp_one_poly(poly, res): if sum(in_geom.flatten()) > 1: return shgeom.MultiPoint(list(zip(x_grid[in_geom], y_grid[in_geom]))) - LOGGER.warning('Polygon smaller than resolution. Setting a representative point.') + LOGGER.warning("Polygon smaller than resolution. Setting a representative point.") return shgeom.MultiPoint([poly.representative_point()]) @@ -812,10 +839,11 @@ def _interp_one_poly_m(poly, res, orig_crs): in_geom = sh.vectorized.contains(poly_m, x_grid, y_grid) if sum(in_geom.flatten()) > 1: x_poly, y_poly = reproject_grid( - x_grid[in_geom], y_grid[in_geom], m_crs, orig_crs) + x_grid[in_geom], y_grid[in_geom], m_crs, orig_crs + ) return shgeom.MultiPoint(list(zip(x_poly, y_poly))) - LOGGER.warning('Polygon smaller than resolution. Setting a representative point.') + LOGGER.warning("Polygon smaller than resolution. Setting a representative point.") return shgeom.MultiPoint([poly.representative_point()]) @@ -835,9 +863,9 @@ def _get_pyproj_trafo(orig_crs, dest_crs): """ Get pyproj projection from orig_crs to dest_crs """ - return pyproj.Transformer.from_proj(pyproj.Proj(orig_crs), - pyproj.Proj(dest_crs), - always_xy=True) + return pyproj.Transformer.from_proj( + pyproj.Proj(orig_crs), pyproj.Proj(dest_crs), always_xy=True + ) def reproject_grid(x_grid, y_grid, orig_crs, dest_crs): @@ -889,7 +917,6 @@ def reproject_poly(poly, orig_crs, dest_crs): def _line_to_pnts(gdf_lines, res, to_meters): - """ Convert a GeoDataFrame with LineString geometries to Point geometries, where Points are placed at a specified distance @@ -922,7 +949,7 @@ def _line_to_pnts(gdf_lines, res, to_meters): return gdf_lines # Needed because gdf.explode(index_parts=True) requires numeric index - idx = gdf_lines.index.to_list() #To restore the naming of the index + idx = gdf_lines.index.to_list() # To restore the naming of the index gdf_points = gdf_lines.copy().reset_index(drop=True) if to_meters: @@ -931,31 +958,28 @@ def _line_to_pnts(gdf_lines, res, to_meters): line_lengths = gdf_lines.length # Add warning if lines are too short w.r.t. resolution - failing_res_check_count = len(line_lengths[line_lengths > 10*res]) + failing_res_check_count = len(line_lengths[line_lengths > 10 * res]) if failing_res_check_count > 0: LOGGER.warning( "%d lines with a length < 10*resolution were found. " "Each of these lines is disaggregate to one point. " "Reaggregatint values will thus likely lead to overestimattion. " "Consider chosing a smaller resolution or filter out the short lines. ", - failing_res_check_count - ) - - line_fractions = [ - _line_fraction(length, res) - for length in line_lengths - ] - - gdf_points['geometry_pnt'] = [ - shgeom.MultiPoint([ - line.interpolate(dist, normalized=True) - for dist in fractions - ]) + failing_res_check_count, + ) + + line_fractions = [_line_fraction(length, res) for length in line_lengths] + + gdf_points["geometry_pnt"] = [ + shgeom.MultiPoint( + [line.interpolate(dist, normalized=True) for dist in fractions] + ) for line, fractions in zip(gdf_points.geometry, line_fractions) - ] + ] gdf_points = _swap_geom_cols( - gdf_points, geom_to='geometry_orig', new_geom='geometry_pnt') + gdf_points, geom_to="geometry_orig", new_geom="geometry_pnt" + ) gdf_points = gdf_points.explode(index_parts=True) gdf_points.index = gdf_points.index.set_levels(idx, level=0) @@ -985,6 +1009,7 @@ def _line_fraction(length, res): start = eff_res / 2 return np.arange(start, 1, eff_res) + def _pnts_per_line(length, res): """Calculate number of points fitting along a line, given a certain resolution (spacing) res between points. @@ -1021,9 +1046,9 @@ def _swap_geom_cols(gdf, geom_to, new_geom): gdf_swap : gpd.GeoDataFrame Copy of gdf with the new geometry column """ - gdf_swap = gdf.rename(columns = {'geometry': geom_to}) - gdf_swap.rename(columns = {new_geom: 'geometry'}, inplace=True) - gdf_swap.set_geometry('geometry', inplace=True, crs=gdf.crs) + gdf_swap = gdf.rename(columns={"geometry": geom_to}) + gdf_swap.rename(columns={new_geom: "geometry"}, inplace=True) + gdf_swap.set_geometry("geometry", inplace=True, crs=gdf.crs) return gdf_swap diff --git a/climada/util/plot.py b/climada/util/plot.py index e404da2de..92c97ad36 100644 --- a/climada/util/plot.py +++ b/climada/util/plot.py @@ -18,37 +18,39 @@ Define auxiliary functions for plots. """ + # pylint: disable=abstract-class-instantiated -__all__ = ['geo_bin_from_array', - 'geo_im_from_array', - 'make_map', - 'add_shapes', - 'add_populated_places', - 'add_cntry_names' - ] +__all__ = [ + "geo_bin_from_array", + "geo_im_from_array", + "make_map", + "add_shapes", + "add_populated_places", + "add_cntry_names", +] import logging -from textwrap import wrap import warnings +from textwrap import wrap -from scipy.interpolate import griddata -import numpy as np -import matplotlib.pyplot as plt +import cartopy.crs as ccrs +import geopandas as gpd import matplotlib as mpl +import matplotlib.pyplot as plt +import numpy as np +import requests +from cartopy.io import shapereader +from cartopy.mpl.gridliner import LATITUDE_FORMATTER, LONGITUDE_FORMATTER from matplotlib import colormaps as cm from mpl_toolkits.axes_grid1 import make_axes_locatable -from shapely.geometry import box -import cartopy.crs as ccrs -from cartopy.io import shapereader -from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER from rasterio.crs import CRS -import requests -import geopandas as gpd +from scipy.interpolate import griddata +from shapely.geometry import box -from climada.util.constants import CMAP_EXPOSURES, CMAP_CAT, CMAP_RASTER -from climada.util.files_handler import to_list import climada.util.coordinates as u_coord +from climada.util.constants import CMAP_CAT, CMAP_EXPOSURES, CMAP_RASTER +from climada.util.files_handler import to_list LOGGER = logging.getLogger(__name__) @@ -62,10 +64,21 @@ """Maximum number of bins in geo_bin_from_array""" -def geo_bin_from_array(array_sub, geo_coord, var_name, title, - pop_name=True, buffer=BUFFER, extend='neither', - proj=ccrs.PlateCarree(), shapes=True, axes=None, - figsize=(9, 13), adapt_fontsize=True, **kwargs): +def geo_bin_from_array( + array_sub, + geo_coord, + var_name, + title, + pop_name=True, + buffer=BUFFER, + extend="neither", + proj=ccrs.PlateCarree(), + shapes=True, + axes=None, + figsize=(9, 13), + adapt_fontsize=True, + **kwargs, +): """Plot array values binned over input coordinates. Parameters @@ -114,16 +127,39 @@ def geo_bin_from_array(array_sub, geo_coord, var_name, title, ValueError: Input array size missmatch """ - return _plot_scattered_data("hexbin", array_sub, geo_coord, var_name, title, - pop_name=pop_name, buffer=buffer, extend=extend, - proj=proj, shapes=shapes, axes=axes, - figsize=figsize, adapt_fontsize=adapt_fontsize, **kwargs) + return _plot_scattered_data( + "hexbin", + array_sub, + geo_coord, + var_name, + title, + pop_name=pop_name, + buffer=buffer, + extend=extend, + proj=proj, + shapes=shapes, + axes=axes, + figsize=figsize, + adapt_fontsize=adapt_fontsize, + **kwargs, + ) -def geo_scatter_from_array(array_sub, geo_coord, var_name, title, - pop_name=False, buffer=BUFFER, extend='neither', - proj=ccrs.PlateCarree(), shapes=True, axes=None, - figsize=(9, 13), adapt_fontsize=True, **kwargs): +def geo_scatter_from_array( + array_sub, + geo_coord, + var_name, + title, + pop_name=False, + buffer=BUFFER, + extend="neither", + proj=ccrs.PlateCarree(), + shapes=True, + axes=None, + figsize=(9, 13), + adapt_fontsize=True, + **kwargs, +): """Plot array values at input coordinates. Parameters @@ -171,38 +207,65 @@ def geo_scatter_from_array(array_sub, geo_coord, var_name, title, ValueError: Input array size missmatch """ - return _plot_scattered_data("scatter", array_sub, geo_coord, var_name, title, - pop_name=pop_name, buffer=buffer, extend=extend, - proj=proj, shapes=shapes, axes=axes, - figsize=figsize, adapt_fontsize=adapt_fontsize, **kwargs) + return _plot_scattered_data( + "scatter", + array_sub, + geo_coord, + var_name, + title, + pop_name=pop_name, + buffer=buffer, + extend=extend, + proj=proj, + shapes=shapes, + axes=axes, + figsize=figsize, + adapt_fontsize=adapt_fontsize, + **kwargs, + ) -def _plot_scattered_data(method, array_sub, geo_coord, var_name, title, - pop_name=False, buffer=BUFFER, extend='neither', - proj=ccrs.PlateCarree(), shapes=True, axes=None, - figsize=(9, 13), adapt_fontsize=True, **kwargs): +def _plot_scattered_data( + method, + array_sub, + geo_coord, + var_name, + title, + pop_name=False, + buffer=BUFFER, + extend="neither", + proj=ccrs.PlateCarree(), + shapes=True, + axes=None, + figsize=(9, 13), + adapt_fontsize=True, + **kwargs, +): """Function for internal use in `geo_scatter_from_array` (when called with method="scatter") and `geo_bin_from_array` (when called with method="hexbin"). See the docstrings of the respective functions for more information on the parameters.""" # Generate array of values used in each subplot num_im, list_arr = _get_collection_arrays(array_sub) - list_tit = to_list(num_im, title, 'title') - list_name = to_list(num_im, var_name, 'var_name') - list_coord = to_list(num_im, geo_coord, 'geo_coord') + list_tit = to_list(num_im, title, "title") + list_name = to_list(num_im, var_name, "var_name") + list_coord = to_list(num_im, geo_coord, "geo_coord") - if 'cmap' not in kwargs: - kwargs['cmap'] = CMAP_EXPOSURES + if "cmap" not in kwargs: + kwargs["cmap"] = CMAP_EXPOSURES if axes is None: proj_plot = proj if isinstance(proj, ccrs.PlateCarree): # for PlateCarree, center plot around data's central lon # without overwriting the data's original projection info - xmin, xmax = u_coord.lon_bounds(np.concatenate([c[:, 1] for c in list_coord])) + xmin, xmax = u_coord.lon_bounds( + np.concatenate([c[:, 1] for c in list_coord]) + ) proj_plot = ccrs.PlateCarree(central_longitude=0.5 * (xmin + xmax)) - _, axes, fontsize = make_map(num_im, proj=proj_plot, figsize=figsize, - adapt_fontsize=adapt_fontsize) + _, axes, fontsize = make_map( + num_im, proj=proj_plot, figsize=figsize, adapt_fontsize=adapt_fontsize + ) else: fontsize = None axes_iter = axes @@ -210,17 +273,24 @@ def _plot_scattered_data(method, array_sub, geo_coord, var_name, title, axes_iter = np.array([[axes]]) # Generate each subplot - for array_im, axis, tit, name, coord in \ - zip(list_arr, axes_iter.flatten(), list_tit, list_name, list_coord): + for array_im, axis, tit, name, coord in zip( + list_arr, axes_iter.flatten(), list_tit, list_name, list_coord + ): if coord.shape[0] != array_im.size: - raise ValueError(f"Size mismatch in input array: {coord.shape[0]} != {array_im.size}.") + raise ValueError( + f"Size mismatch in input array: {coord.shape[0]} != {array_im.size}." + ) # Binned image with coastlines if isinstance(proj, ccrs.PlateCarree): - xmin, ymin, xmax, ymax = u_coord.latlon_bounds(coord[:, 0], coord[:, 1], buffer=buffer) + xmin, ymin, xmax, ymax = u_coord.latlon_bounds( + coord[:, 0], coord[:, 1], buffer=buffer + ) extent = (xmin, xmax, ymin, ymax) else: - extent = _get_borders(coord, buffer=buffer, proj_limits=proj.x_limits + proj.y_limits) + extent = _get_borders( + coord, buffer=buffer, proj_limits=proj.x_limits + proj.y_limits + ) axis.set_extent((extent), proj) if shapes: @@ -229,18 +299,21 @@ def _plot_scattered_data(method, array_sub, geo_coord, var_name, title, add_populated_places(axis, extent, proj, fontsize) if method == "hexbin": - if 'gridsize' not in kwargs: - kwargs['gridsize'] = min(int(array_im.size / 2), MAX_BINS) - mappable = axis.hexbin(coord[:, 1], coord[:, 0], C=array_im, - transform=proj, **kwargs) + if "gridsize" not in kwargs: + kwargs["gridsize"] = min(int(array_im.size / 2), MAX_BINS) + mappable = axis.hexbin( + coord[:, 1], coord[:, 0], C=array_im, transform=proj, **kwargs + ) else: - mappable = axis.scatter(coord[:, 1], coord[:, 0], c=array_im, - transform=proj, **kwargs) + mappable = axis.scatter( + coord[:, 1], coord[:, 0], c=array_im, transform=proj, **kwargs + ) # Create colorbar in this axis cbax = make_axes_locatable(axis).append_axes( - 'right', size="6.5%", pad=0.1, axes_class=plt.Axes) - cbar = plt.colorbar(mappable, cax=cbax, orientation='vertical', extend=extend) + "right", size="6.5%", pad=0.1, axes_class=plt.Axes + ) + cbar = plt.colorbar(mappable, cax=cbax, orientation="vertical", extend=extend) cbar.set_label(name) axis.set_title("\n".join(wrap(tit))) if fontsize: @@ -252,9 +325,19 @@ def _plot_scattered_data(method, array_sub, geo_coord, var_name, title, return axes -def geo_im_from_array(array_sub, coord, var_name, title, - proj=None, smooth=True, shapes=True, axes=None, figsize=(9, 13), adapt_fontsize=True, - **kwargs): +def geo_im_from_array( + array_sub, + coord, + var_name, + title, + proj=None, + smooth=True, + shapes=True, + axes=None, + figsize=(9, 13), + adapt_fontsize=True, + **kwargs, +): """Image(s) plot defined in array(s) over input coordinates. Parameters @@ -298,10 +381,9 @@ def geo_im_from_array(array_sub, coord, var_name, title, # Generate array of values used in each subplot num_im, list_arr = _get_collection_arrays(array_sub) - list_tit = to_list(num_im, title, 'title') - list_name = to_list(num_im, var_name, 'var_name') - list_coord = to_list(num_im, coord, 'geo_coord') - + list_tit = to_list(num_im, title, "title") + list_name = to_list(num_im, var_name, "var_name") + list_coord = to_list(num_im, coord, "geo_coord") is_reg, height, width = u_coord.grid_is_regular(coord) extent = _get_borders(coord, proj_limits=(-360, 360, -90, 90)) @@ -313,20 +395,23 @@ def geo_im_from_array(array_sub, coord, var_name, title, if "norm" in kwargs: min_value = kwargs["norm"].vmin else: - kwargs['vmin'] = kwargs.get("vmin", np.nanmin(array_sub)) - min_value = kwargs['vmin'] - kwargs['vmax'] = kwargs.get("vmax", np.nanmax(array_sub)) - min_value = min_value/2 if min_value > 0 else min_value-1 + kwargs["vmin"] = kwargs.get("vmin", np.nanmin(array_sub)) + min_value = kwargs["vmin"] + kwargs["vmax"] = kwargs.get("vmax", np.nanmax(array_sub)) + min_value = min_value / 2 if min_value > 0 else min_value - 1 if axes is None: proj_plot = proj if isinstance(proj, ccrs.PlateCarree): # for PlateCarree, center plot around data's central lon # without overwriting the data's original projection info - xmin, xmax = u_coord.lon_bounds(np.concatenate([c[:, 1] for c in list_coord])) + xmin, xmax = u_coord.lon_bounds( + np.concatenate([c[:, 1] for c in list_coord]) + ) proj_plot = ccrs.PlateCarree(central_longitude=0.5 * (xmin + xmax)) - _, axes, fontsize = make_map(num_im, proj=proj_plot, figsize=figsize, - adapt_fontsize=adapt_fontsize) + _, axes, fontsize = make_map( + num_im, proj=proj_plot, figsize=figsize, adapt_fontsize=adapt_fontsize + ) else: fontsize = None axes_iter = axes @@ -339,19 +424,25 @@ def geo_im_from_array(array_sub, coord, var_name, title, cmap.set_under("white", alpha=0) # For values below vmin # Generate each subplot - for array_im, axis, tit, name in zip(list_arr, axes_iter.flatten(), list_tit, list_name): + for array_im, axis, tit, name in zip( + list_arr, axes_iter.flatten(), list_tit, list_name + ): if coord.shape[0] != array_im.size: - raise ValueError(f"Size mismatch in input array: {coord.shape[0]} != {array_im.size}.") + raise ValueError( + f"Size mismatch in input array: {coord.shape[0]} != {array_im.size}." + ) if smooth or not is_reg: # Create regular grid where to interpolate the array grid_x, grid_y = np.mgrid[ - extent[0]: extent[1]: complex(0, RESOLUTION), - extent[2]: extent[3]: complex(0, RESOLUTION)] + extent[0] : extent[1] : complex(0, RESOLUTION), + extent[2] : extent[3] : complex(0, RESOLUTION), + ] grid_im = griddata( (coord[:, 1], coord[:, 0]), array_im, (grid_x, grid_y), - fill_value=min_value) + fill_value=min_value, + ) else: grid_x = coord[:, 1].reshape((width, height)).transpose() grid_y = coord[:, 0].reshape((width, height)).transpose() @@ -360,24 +451,26 @@ def geo_im_from_array(array_sub, coord, var_name, title, grid_y = np.flip(grid_y) grid_im = np.flip(grid_im, 1) grid_im = np.resize(grid_im, (height, width, 1)) - axis.set_extent((extent[0] - mid_lon, extent[1] - mid_lon, - extent[2], extent[3]), crs=proj) + axis.set_extent( + (extent[0] - mid_lon, extent[1] - mid_lon, extent[2], extent[3]), crs=proj + ) # Add coastline to axis if shapes: add_shapes(axis) # Create colormesh, colorbar and labels in axis - cbax = make_axes_locatable(axis).append_axes('right', size="6.5%", - pad=0.1, axes_class=plt.Axes) + cbax = make_axes_locatable(axis).append_axes( + "right", size="6.5%", pad=0.1, axes_class=plt.Axes + ) img = axis.pcolormesh( grid_x - mid_lon, grid_y, np.squeeze(grid_im), transform=proj, cmap=cmap, - **kwargs + **kwargs, ) - cbar = plt.colorbar(img, cax=cbax, orientation='vertical') + cbar = plt.colorbar(img, cax=cbax, orientation="vertical") cbar.set_label(name) axis.set_title("\n".join(wrap(tit))) if fontsize: @@ -390,8 +483,9 @@ def geo_im_from_array(array_sub, coord, var_name, title, return axes -def geo_scatter_categorical(array_sub, geo_coord, var_name, title, - cat_name=None, adapt_fontsize=True, **kwargs): +def geo_scatter_categorical( + array_sub, geo_coord, var_name, title, cat_name=None, adapt_fontsize=True, **kwargs +): """ Map plots for categorical data defined in array(s) over input coordinates. The categories must be a finite set of unique values @@ -444,19 +538,33 @@ def geo_scatter_categorical(array_sub, geo_coord, var_name, title, # convert sorted categories to numeric array [0, 1, ...] array_sub = np.array(array_sub) - array_sub_unique, array_sub_cat = np.unique(array_sub, return_inverse=True) #flattens array + array_sub_unique, array_sub_cat = np.unique( + array_sub, return_inverse=True + ) # flattens array array_sub_cat = array_sub_cat.reshape(array_sub.shape) array_sub_n = array_sub_unique.size - if 'cmap' in kwargs: + if "cmap" in kwargs: # optional user defined colormap (can be continuous) - cmap_arg = kwargs['cmap'] + cmap_arg = kwargs["cmap"] if isinstance(cmap_arg, str): cmap_name = cmap_arg # for qualitative colormaps taking the first few colors is preferable # over jumping equal distances - if cmap_name in ['Pastel1', 'Pastel2', 'Paired', 'Accent', 'Dark2', - 'Set1', 'Set2', 'Set3', 'tab10', 'tab20', 'tab20b', 'tab20c']: + if cmap_name in [ + "Pastel1", + "Pastel2", + "Paired", + "Accent", + "Dark2", + "Set1", + "Set2", + "Set3", + "tab10", + "tab20", + "tab20b", + "tab20c", + ]: cmap = mpl.colors.ListedColormap( cm.get_cmap(cmap_name).colors[:array_sub_n] ) @@ -464,38 +572,45 @@ def geo_scatter_categorical(array_sub, geo_coord, var_name, title, cmap = cm.get_cmap(cmap_arg).resampled(array_sub_n) elif isinstance(cmap_arg, mpl.colors.ListedColormap): # If a user brings their own colormap it's probably qualitative - cmap_name = 'defined by the user' - cmap = mpl.colors.ListedColormap( - cmap_arg.colors[:array_sub_n] - ) + cmap_name = "defined by the user" + cmap = mpl.colors.ListedColormap(cmap_arg.colors[:array_sub_n]) else: - raise TypeError("if cmap is given it must be either a str or a ListedColormap") + raise TypeError( + "if cmap is given it must be either a str or a ListedColormap" + ) else: # default qualitative colormap cmap_name = CMAP_CAT - cmap = mpl.colors.ListedColormap( - cm.get_cmap(cmap_name).colors[:array_sub_n] - ) + cmap = mpl.colors.ListedColormap(cm.get_cmap(cmap_name).colors[:array_sub_n]) if array_sub_n > cmap.N: - LOGGER.warning("More than %d categories cannot be plotted accurately " - "using the colormap %s. Please specify " - "a different qualitative colormap using the `cmap` " - "attribute. For Matplotlib's built-in colormaps, see " - "https://matplotlib.org/stable/tutorials/colors/colormaps.html", - cmap.N, cmap_name) + LOGGER.warning( + "More than %d categories cannot be plotted accurately " + "using the colormap %s. Please specify " + "a different qualitative colormap using the `cmap` " + "attribute. For Matplotlib's built-in colormaps, see " + "https://matplotlib.org/stable/tutorials/colors/colormaps.html", + cmap.N, + cmap_name, + ) # define the discrete colormap kwargs - kwargs['cmap'] = cmap - kwargs['vmin'] = -0.5 - kwargs['vmax'] = array_sub_n - 0.5 + kwargs["cmap"] = cmap + kwargs["vmin"] = -0.5 + kwargs["vmax"] = array_sub_n - 0.5 # #create the axes axes = _plot_scattered_data( - "scatter", array_sub_cat, geo_coord, var_name, title, - adapt_fontsize=adapt_fontsize, **kwargs) + "scatter", + array_sub_cat, + geo_coord, + var_name, + title, + adapt_fontsize=adapt_fontsize, + **kwargs, + ) - #add colorbar labels + # add colorbar labels if cat_name is None: cat_name = array_sub_unique.astype(str) if not isinstance(cat_name, dict): @@ -540,8 +655,9 @@ def make_map(num_sub=1, figsize=(9, 13), proj=ccrs.PlateCarree(), adapt_fontsize else: num_row, num_col = num_sub - fig, axis_sub = plt.subplots(num_row, num_col, figsize=figsize, - subplot_kw=dict(projection=proj)) + fig, axis_sub = plt.subplots( + num_row, num_col, figsize=figsize, subplot_kw=dict(projection=proj) + ) axes_iter = axis_sub if not isinstance(axis_sub, np.ndarray): axes_iter = np.array([[axis_sub]]) @@ -553,11 +669,11 @@ def make_map(num_sub=1, figsize=(9, 13), proj=ccrs.PlateCarree(), adapt_fontsize grid.xformatter = LONGITUDE_FORMATTER grid.yformatter = LATITUDE_FORMATTER if adapt_fontsize: - fontsize = axis.bbox.width/35 + fontsize = axis.bbox.width / 35 if fontsize < 10: fontsize = 10 - grid.xlabel_style = {'size': fontsize} - grid.ylabel_style = {'size': fontsize} + grid.xlabel_style = {"size": fontsize} + grid.ylabel_style = {"size": fontsize} else: fontsize = None except TypeError: @@ -572,6 +688,7 @@ def make_map(num_sub=1, figsize=(9, 13), proj=ccrs.PlateCarree(), adapt_fontsize return fig, axis_sub, fontsize + def add_shapes(axis): """ Overlay Earth's countries coastlines to matplotlib.pyplot axis. @@ -584,12 +701,15 @@ def add_shapes(axis): Geographical projection. The default is PlateCarree. """ - shp_file = shapereader.natural_earth(resolution='10m', category='cultural', - name='admin_0_countries') + shp_file = shapereader.natural_earth( + resolution="10m", category="cultural", name="admin_0_countries" + ) shp = shapereader.Reader(shp_file) for geometry in shp.geometries(): - axis.add_geometries([geometry], crs=ccrs.PlateCarree(), facecolor='none', - edgecolor='dimgray') + axis.add_geometries( + [geometry], crs=ccrs.PlateCarree(), facecolor="none", edgecolor="dimgray" + ) + def _ensure_utf8(val): # Without the `*.cpg` file present, the shape reader wrongly assumes latin-1 encoding: @@ -598,10 +718,11 @@ def _ensure_utf8(val): # As a workaround, we encode and decode again, unless this fails which means # that the `*.cpg` is present and the encoding is correct: try: - return val.encode('latin-1').decode('utf-8') + return val.encode("latin-1").decode("utf-8") except (AttributeError, UnicodeDecodeError, UnicodeEncodeError): return val + def add_populated_places(axis, extent, proj=ccrs.PlateCarree(), fontsize=None): """ Add city names. @@ -620,21 +741,37 @@ def add_populated_places(axis, extent, proj=ccrs.PlateCarree(), fontsize=None): are used. """ - shp_file = shapereader.natural_earth(resolution='50m', category='cultural', - name='populated_places_simple') + shp_file = shapereader.natural_earth( + resolution="50m", category="cultural", name="populated_places_simple" + ) shp = shapereader.Reader(shp_file) ext_pts = list(box(*u_coord.toggle_extent_bounds(extent)).exterior.coords) - ext_trans = [ccrs.PlateCarree().transform_point(pts[0], pts[1], proj) - for pts in ext_pts] + ext_trans = [ + ccrs.PlateCarree().transform_point(pts[0], pts[1], proj) for pts in ext_pts + ] for rec, point in zip(shp.records(), shp.geometries()): if ext_trans[2][0] < point.x <= ext_trans[0][0]: if ext_trans[0][1] < point.y <= ext_trans[1][1]: - axis.plot(point.x, point.y, color='navy', marker='o', - transform=ccrs.PlateCarree(), markerfacecolor='None') - axis.text(point.x, point.y, _ensure_utf8(rec.attributes['name']), - horizontalalignment='right', verticalalignment='bottom', - transform=ccrs.PlateCarree(), color='navy', fontsize=fontsize) + axis.plot( + point.x, + point.y, + color="navy", + marker="o", + transform=ccrs.PlateCarree(), + markerfacecolor="None", + ) + axis.text( + point.x, + point.y, + _ensure_utf8(rec.attributes["name"]), + horizontalalignment="right", + verticalalignment="bottom", + transform=ccrs.PlateCarree(), + color="navy", + fontsize=fontsize, + ) + def add_cntry_names(axis, extent, proj=ccrs.PlateCarree(), fontsize=None): """ @@ -653,21 +790,31 @@ def add_cntry_names(axis, extent, proj=ccrs.PlateCarree(), fontsize=None): Size of the fonts. If set to None, the default matplotlib settings are used. """ - shp_file = shapereader.natural_earth(resolution='10m', category='cultural', - name='admin_0_countries') + shp_file = shapereader.natural_earth( + resolution="10m", category="cultural", name="admin_0_countries" + ) shp = shapereader.Reader(shp_file) ext_pts = list(box(*u_coord.toggle_extent_bounds(extent)).exterior.coords) - ext_trans = [ccrs.PlateCarree().transform_point(pts[0], pts[1], proj) - for pts in ext_pts] + ext_trans = [ + ccrs.PlateCarree().transform_point(pts[0], pts[1], proj) for pts in ext_pts + ] for rec, point in zip(shp.records(), shp.geometries()): point_x = point.centroid.xy[0][0] point_y = point.centroid.xy[1][0] if ext_trans[2][0] < point_x <= ext_trans[0][0]: if ext_trans[0][1] < point_y <= ext_trans[1][1]: - axis.text(point_x, point_y, _ensure_utf8(rec.attributes['NAME']), - horizontalalignment='center', verticalalignment='center', - transform=ccrs.PlateCarree(), fontsize=fontsize, color='navy') + axis.text( + point_x, + point_y, + _ensure_utf8(rec.attributes["NAME"]), + horizontalalignment="center", + verticalalignment="center", + transform=ccrs.PlateCarree(), + fontsize=fontsize, + color="navy", + ) + def _get_collection_arrays(array_sub): """ @@ -698,6 +845,7 @@ def _get_collection_arrays(array_sub): return num_im, list_arr + def _get_row_col_size(num_sub): """ Compute number of rows and columns of subplots in figure. @@ -724,6 +872,7 @@ def _get_row_col_size(num_sub): num_row = int(num_sub / 2) + num_sub % 2 return num_row, num_col + def _get_borders(geo_coord, buffer=0, proj_limits=(-180, 180, -90, 90)): """ Get min and max longitude and min and max latitude (in this order). @@ -748,6 +897,7 @@ def _get_borders(geo_coord, buffer=0, proj_limits=(-180, 180, -90, 90)): max_lat = min(np.max(geo_coord[:, 0]) + buffer, proj_limits[3]) return [min_lon, max_lon, min_lat, max_lat] + def get_transformation(crs_in): """ Get projection and its units to use in cartopy transforamtions from current crs. @@ -774,11 +924,12 @@ def get_transformation(crs_in): crs = ccrs.epsg(epsg) except ValueError: LOGGER.warning( - "Error parsing coordinate system '%s'. Using projection PlateCarree in plot.", crs_in + "Error parsing coordinate system '%s'. Using projection PlateCarree in plot.", + crs_in, ) crs = ccrs.PlateCarree() except requests.exceptions.ConnectionError: - LOGGER.warning('No internet connection. Using projection PlateCarree in plot.') + LOGGER.warning("No internet connection. Using projection PlateCarree in plot.") crs = ccrs.PlateCarree() # units @@ -788,23 +939,33 @@ def get_transformation(crs_in): # we may safely ignore it. warnings.simplefilter(action="ignore", category=UserWarning) try: - units = (crs.proj4_params.get('units') - # As of cartopy 0.20 the proj4_params attribute is {} for CRS from an EPSG number - # (see issue raised https://github.com/SciTools/cartopy/issues/1974 - # and longterm discussion on https://github.com/SciTools/cartopy/issues/813). - # In these cases the units can be fetched through the method `to_dict`. - or crs.to_dict().get('units', '°')) + units = ( + crs.proj4_params.get("units") + # As of cartopy 0.20 the proj4_params attribute is {} for CRS from an EPSG number + # (see issue raised https://github.com/SciTools/cartopy/issues/1974 + # and longterm discussion on https://github.com/SciTools/cartopy/issues/813). + # In these cases the units can be fetched through the method `to_dict`. + or crs.to_dict().get("units", "°") + ) except AttributeError: # This happens in setups with cartopy<0.20, where `to_dict` is not defined. # Officially, we require cartopy>=0.20, but there are still users around that # can't upgrade due to https://github.com/SciTools/iris/issues/4468 - units = '°' + units = "°" return crs, units -def multibar_plot(ax, data, colors=None, total_width=0.8, single_width=1, - legend=True, ticklabels=None, invert_axis=False): +def multibar_plot( + ax, + data, + colors=None, + total_width=0.8, + single_width=1, + legend=True, + ticklabels=None, + invert_axis=False, +): """ Draws a bar plot with multiple bars per data point. https://stackoverflow.com/questions/14270391/python-matplotlib-multiple-bars @@ -854,7 +1015,7 @@ def multibar_plot(ax, data, colors=None, total_width=0.8, single_width=1, # Check if colors where provided, otherwhise use the default color cycle if colors is None: - colors = plt.rcParams['axes.prop_cycle'].by_key()['color'] + colors = plt.rcParams["axes.prop_cycle"].by_key()["color"] # Number of bars per group n_bars = len(data) @@ -873,11 +1034,19 @@ def multibar_plot(ax, data, colors=None, total_width=0.8, single_width=1, # Draw a bar for every value of that type for x, y in enumerate(values): if invert_axis: - lbar = ax.barh(x + x_offset, width=y, height=bar_width * single_width, - color=colors[i % len(colors)]) + lbar = ax.barh( + x + x_offset, + width=y, + height=bar_width * single_width, + color=colors[i % len(colors)], + ) else: - lbar = ax.bar(x + x_offset, y, width=bar_width * single_width, - color=colors[i % len(colors)]) + lbar = ax.bar( + x + x_offset, + y, + width=bar_width * single_width, + color=colors[i % len(colors)], + ) # Add a handle to the last drawn bar, which we'll need for the legend bars.append(lbar[0]) @@ -892,17 +1061,18 @@ def multibar_plot(ax, data, colors=None, total_width=0.8, single_width=1, if legend: ax.legend(bars, data.keys()) + def plot_from_gdf( - gdf: gpd.GeoDataFrame, - colorbar_name: str = None, - title_subplots: callable = None, - smooth=True, - axis=None, - figsize=(9, 13), - adapt_fontsize=True, - **kwargs + gdf: gpd.GeoDataFrame, + colorbar_name: str = None, + title_subplots: callable = None, + smooth=True, + axis=None, + figsize=(9, 13), + adapt_fontsize=True, + **kwargs, ): - """Plot several subplots from different columns of a GeoDataFrame, e.g., for + """Plot several subplots from different columns of a GeoDataFrame, e.g., for plotting local return periods or local exceedance intensities. Parameters @@ -933,47 +1103,56 @@ def plot_from_gdf( # check if inputs are correct types if not isinstance(gdf, gpd.GeoDataFrame): raise ValueError("gdf is not a GeoDataFrame") - gdf_values = gdf.drop(columns='geometry').values.T + gdf_values = gdf.drop(columns="geometry").values.T # read meta data for fig and axis labels if not isinstance(colorbar_name, str): print("Unknown colorbar name. Colorbar label will be missing.") - colorbar_name = '' + colorbar_name = "" if not callable(title_subplots): - print("Unknown subplot-title-generation function. Subplot titles will be column names.") + print( + "Unknown subplot-title-generation function. Subplot titles will be column names." + ) title_subplots = lambda cols: [f"{col}" for col in cols] # use log colorbar for return periods and impact if ( - colorbar_name.strip().startswith(('Return Period', 'Impact')) and - 'norm' not in kwargs.keys() and + colorbar_name.strip().startswith(("Return Period", "Impact")) + and "norm" not in kwargs.keys() + and # check if there are no zeros values in gdf - not np.any(gdf_values == 0) and + not np.any(gdf_values == 0) + and # check if value range too small for logarithmic colorscale (np.log10(np.nanmax(gdf_values)) - np.log10(np.nanmin(gdf_values))) > 2 ): kwargs.update( - {'norm': mpl.colors.LogNorm( - vmin=np.nanmin(gdf_values), vmax=np.nanmax(gdf_values) + { + "norm": mpl.colors.LogNorm( + vmin=np.nanmin(gdf_values), vmax=np.nanmax(gdf_values) ), - 'vmin': None, 'vmax': None} + "vmin": None, + "vmax": None, + } ) # use inverted color bar for return periods - if (colorbar_name.strip().startswith('Return Period') and - 'cmap' not in kwargs.keys()): - kwargs.update({'cmap': 'viridis_r'}) + if ( + colorbar_name.strip().startswith("Return Period") + and "cmap" not in kwargs.keys() + ): + kwargs.update({"cmap": "viridis_r"}) axis = geo_im_from_array( gdf_values, - gdf.geometry.get_coordinates().values[:,::-1], + gdf.geometry.get_coordinates().values[:, ::-1], colorbar_name, - title_subplots(np.delete(gdf.columns, np.where(gdf.columns == 'geometry'))), + title_subplots(np.delete(gdf.columns, np.where(gdf.columns == "geometry"))), smooth=smooth, axes=axis, figsize=figsize, adapt_fontsize=adapt_fontsize, - **kwargs + **kwargs, ) return axis diff --git a/climada/util/save.py b/climada/util/save.py index 5d871b6f7..a1a74dde3 100644 --- a/climada/util/save.py +++ b/climada/util/save.py @@ -19,12 +19,11 @@ define save functionalities """ -__all__ = ['save', - 'load'] +__all__ = ["save", "load"] -from pathlib import Path -import pickle import logging +import pickle +from pathlib import Path from climada.util.config import CONFIG @@ -42,21 +41,26 @@ def save(out_file_name, var): var : object variable to save in pickle format """ - out_file = Path(out_file_name) if Path(out_file_name).is_absolute() \ + out_file = ( + Path(out_file_name) + if Path(out_file_name).is_absolute() else CONFIG.local_data.save_dir.dir().joinpath(out_file_name) + ) target_dir = out_file.parent try: # Generate folder if it doesn't exists if not target_dir.is_dir(): target_dir.mkdir() - LOGGER.info('Created folder %s.', target_dir) - with out_file.open('wb') as flh: + LOGGER.info("Created folder %s.", target_dir) + with out_file.open("wb") as flh: pickle.dump(var, flh, pickle.HIGHEST_PROTOCOL) - LOGGER.info('Written file %s', out_file) + LOGGER.info("Written file %s", out_file) except FileNotFoundError as err: - raise FileNotFoundError(f'Folder {target_dir} not found: ' + str(err)) from err + raise FileNotFoundError(f"Folder {target_dir} not found: " + str(err)) from err except OSError as ose: - raise ValueError('Data is probably too big. Try splitting it: ' + str(ose)) from ose + raise ValueError( + "Data is probably too big. Try splitting it: " + str(ose) + ) from ose def load(in_file_name): @@ -72,8 +76,11 @@ def load(in_file_name): ------- object """ - in_file = Path(in_file_name) if Path(in_file_name).is_absolute() \ + in_file = ( + Path(in_file_name) + if Path(in_file_name).is_absolute() else CONFIG.local_data.save_dir.dir().joinpath(in_file_name) - with in_file.open('rb') as flh: + ) + with in_file.open("rb") as flh: data = pickle.load(flh) return data diff --git a/climada/util/scalebar_plot.py b/climada/util/scalebar_plot.py index 11d16b9d0..e30f515a6 100644 --- a/climada/util/scalebar_plot.py +++ b/climada/util/scalebar_plot.py @@ -20,9 +20,10 @@ https://stackoverflow.com/questions/32333870/how-can-i-show-a-km-ruler-on-a-cartopy-matplotlib-plot/50674451#50674451 """ -import numpy as np import cartopy.crs as ccrs import cartopy.geodesic as cgeo +import numpy as np + def _axes_to_lonlat(ax, coords): """(lon, lat) from axes coordinates.""" @@ -32,6 +33,7 @@ def _axes_to_lonlat(ax, coords): return lonlat + def _upper_bound(start, direction, distance, dist_func): """A point farther than distance from start, in the given direction. @@ -97,8 +99,10 @@ def _distance_along_line(start, end, distance, dist_func, tol): """ initial_distance = dist_func(start, end) if initial_distance < distance: - raise ValueError(f"End is closer to start ({initial_distance}) than " - f"given distance ({distance}).") + raise ValueError( + f"End is closer to start ({initial_distance}) than " + f"given distance ({distance})." + ) if tol <= 0: raise ValueError(f"Tolerance is not positive: {tol}") @@ -159,10 +163,23 @@ def dist_func(a_axes, b_axes): return _distance_along_line(start, end, distance, dist_func, tol) -def scale_bar(ax, location, length, metres_per_unit=1000, unit_name='km', - tol=0.01, angle=0, color='black', linewidth=3, text_offset=0.005, - ha='center', va='bottom', plot_kwargs=None, text_kwargs=None, - **kwargs): +def scale_bar( + ax, + location, + length, + metres_per_unit=1000, + unit_name="km", + tol=0.01, + angle=0, + color="black", + linewidth=3, + text_offset=0.005, + ha="center", + va="bottom", + plot_kwargs=None, + text_kwargs=None, + **kwargs, +): """Add a scale bar to CartoPy axes. For angles between 0 and 90 the text and line may be plotted at @@ -209,10 +226,15 @@ def scale_bar(ax, location, length, metres_per_unit=1000, unit_name='km', if text_kwargs is None: text_kwargs = {} - plot_kwargs = {'linewidth': linewidth, 'color': color, **plot_kwargs, - **kwargs} - text_kwargs = {'ha': ha, 'va': va, 'rotation': angle, 'color': color, - **text_kwargs, **kwargs} + plot_kwargs = {"linewidth": linewidth, "color": color, **plot_kwargs, **kwargs} + text_kwargs = { + "ha": ha, + "va": va, + "rotation": angle, + "color": color, + **text_kwargs, + **kwargs, + } # Convert all units and types. location = np.asarray(location) # For vector addition. @@ -220,8 +242,7 @@ def scale_bar(ax, location, length, metres_per_unit=1000, unit_name='km', angle_rad = angle * np.pi / 180 # End-point of bar. - end = _point_along_line(ax, location, length_metres, angle=angle_rad, - tol=tol) + end = _point_along_line(ax, location, length_metres, angle=angle_rad, tol=tol) # Coordinates are currently in axes coordinates, so use transAxes to # put into data coordinates. *zip(a, b) produces a list of x-coords, @@ -234,5 +255,10 @@ def scale_bar(ax, location, length, metres_per_unit=1000, unit_name='km', text_location = midpoint + offset # 'rotation' keyword argument is in text_kwargs. - ax.text(*text_location, f"{length} {unit_name}", rotation_mode='anchor', - transform=ax.transAxes, **text_kwargs) + ax.text( + *text_location, + f"{length} {unit_name}", + rotation_mode="anchor", + transform=ax.transAxes, + **text_kwargs, + ) diff --git a/climada/util/select.py b/climada/util/select.py index 4b62a1b34..aaee63890 100755 --- a/climada/util/select.py +++ b/climada/util/select.py @@ -19,13 +19,13 @@ module containing functions to support various select methods. """ - - import logging + import numpy as np LOGGER = logging.getLogger(__name__) + def get_attributes_with_matching_dimension(obj, dims): """ Get the attributes of an object that have len(dims) number diff --git a/climada/util/test/test__init__.py b/climada/util/test/test__init__.py index cef6e8e48..aebd1b017 100755 --- a/climada/util/test/test__init__.py +++ b/climada/util/test/test__init__.py @@ -18,28 +18,31 @@ Test config module. """ -import unittest + import logging +import unittest from climada.util import log_level + class TestUtilInit(unittest.TestCase): """Test util __init__ methods""" def test_log_level_pass(self): """Test log level context manager passes""" - #Check loggers are set to level - with self.assertLogs('climada', level='INFO') as cm: - with log_level('WARNING'): - logging.getLogger('climada').info('info') - logging.getLogger('climada').error('error') - self.assertEqual(cm.output, ['ERROR:climada:error']) - #Check if only climada loggers level change - with self.assertLogs('matplotlib', level='DEBUG') as cm: - with log_level('ERROR', name_prefix='climada'): - logging.getLogger('climada').info('info') - logging.getLogger('matplotlib').debug('debug') - self.assertEqual(cm.output, ['DEBUG:matplotlib:debug']) + # Check loggers are set to level + with self.assertLogs("climada", level="INFO") as cm: + with log_level("WARNING"): + logging.getLogger("climada").info("info") + logging.getLogger("climada").error("error") + self.assertEqual(cm.output, ["ERROR:climada:error"]) + # Check if only climada loggers level change + with self.assertLogs("matplotlib", level="DEBUG") as cm: + with log_level("ERROR", name_prefix="climada"): + logging.getLogger("climada").info("info") + logging.getLogger("matplotlib").debug("debug") + self.assertEqual(cm.output, ["DEBUG:matplotlib:debug"]) + # Execute Tests if __name__ == "__main__": diff --git a/climada/util/test/test_checker.py b/climada/util/test/test_checker.py index 391191a7e..493f394e4 100644 --- a/climada/util/test/test_checker.py +++ b/climada/util/test/test_checker.py @@ -20,15 +20,17 @@ """ import unittest + import numpy as np import scipy.sparse as sparse import climada.util.checker as u_check + class DummyClass(object): - vars_oblig = {'id', 'array', 'sparse_arr'} - vars_opt = {'list', 'array_opt'} + vars_oblig = {"id", "array", "sparse_arr"} + vars_opt = {"list", "array_opt"} def __init__(self): self.id = np.arange(25) @@ -36,7 +38,8 @@ def __init__(self): self.array_opt = np.arange(25) self.list = np.arange(25).tolist() self.sparse_arr = sparse.csr_matrix(np.zeros((25, 2))) - self.name = 'name class' + self.name = "name class" + class TestChecks(unittest.TestCase): """Test loading funcions from the Hazard class""" @@ -44,48 +47,76 @@ class TestChecks(unittest.TestCase): def test_check_obligatories_pass(self): """Correct DummyClass definition""" dummy = DummyClass() - u_check.check_obligatories(dummy.__dict__, dummy.vars_oblig, "DummyClass.", - dummy.id.size, dummy.id.size, 2) + u_check.check_obligatories( + dummy.__dict__, + dummy.vars_oblig, + "DummyClass.", + dummy.id.size, + dummy.id.size, + 2, + ) def test_check_obligatories_fail(self): """Wrong DummyClass definition""" dummy = DummyClass() dummy.array = np.arange(3) with self.assertRaises(ValueError) as cm: - u_check.check_obligatories(dummy.__dict__, dummy.vars_oblig, "DummyClass.", - dummy.id.size, dummy.id.size, 2) - self.assertIn('Invalid DummyClass.array size: 25 != 3.', str(cm.exception)) + u_check.check_obligatories( + dummy.__dict__, + dummy.vars_oblig, + "DummyClass.", + dummy.id.size, + dummy.id.size, + 2, + ) + self.assertIn("Invalid DummyClass.array size: 25 != 3.", str(cm.exception)) dummy = DummyClass() dummy.sparse_arr = sparse.csr_matrix(np.zeros((25, 1))) with self.assertRaises(ValueError) as cm: - u_check.check_obligatories(dummy.__dict__, dummy.vars_oblig, "DummyClass.", - dummy.id.size, dummy.id.size, 2) - self.assertIn('Invalid DummyClass.sparse_arr column size: 2 != 1.', str(cm.exception)) + u_check.check_obligatories( + dummy.__dict__, + dummy.vars_oblig, + "DummyClass.", + dummy.id.size, + dummy.id.size, + 2, + ) + self.assertIn( + "Invalid DummyClass.sparse_arr column size: 2 != 1.", str(cm.exception) + ) def test_check_optionals_pass(self): """Correct DummyClass definition""" dummy = DummyClass() - u_check.check_optionals(dummy.__dict__, dummy.vars_opt, "DummyClass.", dummy.id.size) + u_check.check_optionals( + dummy.__dict__, dummy.vars_opt, "DummyClass.", dummy.id.size + ) def test_check_optionals_fail(self): """Correct DummyClass definition""" dummy = DummyClass() dummy.array_opt = np.arange(3) with self.assertRaises(ValueError) as cm: - u_check.check_optionals(dummy.__dict__, dummy.vars_opt, "DummyClass.", dummy.id.size) - self.assertIn('Invalid DummyClass.array_opt size: 25 != 3.', str(cm.exception)) + u_check.check_optionals( + dummy.__dict__, dummy.vars_opt, "DummyClass.", dummy.id.size + ) + self.assertIn("Invalid DummyClass.array_opt size: 25 != 3.", str(cm.exception)) dummy.array_opt = np.array([], int) - with self.assertLogs('climada.util.checker', level='DEBUG') as cm: - u_check.check_optionals(dummy.__dict__, dummy.vars_opt, "DummyClass.", dummy.id.size) - self.assertIn('DummyClass.array_opt not set.', cm.output[0]) + with self.assertLogs("climada.util.checker", level="DEBUG") as cm: + u_check.check_optionals( + dummy.__dict__, dummy.vars_opt, "DummyClass.", dummy.id.size + ) + self.assertIn("DummyClass.array_opt not set.", cm.output[0]) dummy = DummyClass() dummy.list = np.arange(3).tolist() with self.assertRaises(ValueError) as cm: - u_check.check_optionals(dummy.__dict__, dummy.vars_opt, "DummyClass.", dummy.id.size) - self.assertIn('Invalid DummyClass.list size: 25 != 3.', str(cm.exception)) + u_check.check_optionals( + dummy.__dict__, dummy.vars_opt, "DummyClass.", dummy.id.size + ) + self.assertIn("Invalid DummyClass.list size: 25 != 3.", str(cm.exception)) def test_prune_csr_matrix(self): """Check that csr matrices are brought into canonical format""" @@ -109,6 +140,7 @@ def test_prune_csr_matrix(self): np.testing.assert_array_equal(matrix.data, [3]) self.assertEqual(matrix.nnz, 1) + # Execute Tests if __name__ == "__main__": TESTS = unittest.TestLoader().loadTestsFromTestCase(TestChecks) diff --git a/climada/util/test/test_config.py b/climada/util/test/test_config.py index 58bdce0e6..6579927f2 100644 --- a/climada/util/test/test_config.py +++ b/climada/util/test/test_config.py @@ -18,9 +18,11 @@ Test config module. """ + import unittest -from climada.util.config import Config, CONFIG +from climada.util.config import CONFIG, Config + class TestConfig(unittest.TestCase): """Test Config methods""" @@ -33,11 +35,12 @@ def tearDown(self): def test_from_dict(self): """Check the creation and use of a Config object.""" - dct = {'a': 4., - 'b': [0, 1., '2', {'c': 'c'}, [[11, 12], [21, 22]]]} + dct = {"a": 4.0, "b": [0, 1.0, "2", {"c": "c"}, [[11, 12], [21, 22]]]} conf = Config.from_dict(dct) self.assertEqual(conf.a.float(), 4.0) - self.assertEqual(str(conf), '{a: 4.0, b: [0, 1.0, 2, {c: c}, [[11, 12], [21, 22]]]}') + self.assertEqual( + str(conf), "{a: 4.0, b: [0, 1.0, 2, {c: c}, [[11, 12], [21, 22]]]}" + ) try: conf.a.int() self.fail("this should have failed with `, not int`") @@ -45,9 +48,9 @@ def test_from_dict(self): pass self.assertEqual(conf.b.get(0).int(), 0) self.assertEqual(conf.b.int(0), 0) - self.assertEqual(conf.b.float(1), 1.) - self.assertEqual(conf.b.str(2), '2') - self.assertEqual(conf.b.get(3).c.str(), 'c') + self.assertEqual(conf.b.float(1), 1.0) + self.assertEqual(conf.b.str(2), "2") + self.assertEqual(conf.b.get(3).c.str(), "c") self.assertEqual(conf.b.get(4, 1, 0).int(), 21) self.assertEqual(conf.b.get(4, 1).int(1), 22) self.assertEqual(conf.b.get(4).list(0)[1].int(), 12) @@ -56,20 +59,28 @@ def test_from_dict(self): def test_substitute(self): global CONFIG """Check the substitution of references.""" - dct = {'a': 'https://{b.c}/{b.d}.{b.e}', 'b': {'c': 'host', 'd': 'page', 'e': 'domain'}} + dct = { + "a": "https://{b.c}/{b.d}.{b.e}", + "b": {"c": "host", "d": "page", "e": "domain"}, + } conf = Config.from_dict(dct) self.assertEqual(conf.a._root, conf._root) - self.assertEqual(conf.a.str(), 'https://host/page.domain') + self.assertEqual(conf.a.str(), "https://host/page.domain") def test_missing(self): with self.assertRaises(AttributeError) as ve: CONFIG.hazard.fire_fly.population.str() - self.assertIn("there is no 'fire_fly' configured for 'hazard'", str(ve.exception)) + self.assertIn( + "there is no 'fire_fly' configured for 'hazard'", str(ve.exception) + ) self.assertIn("check your config files: [", str(ve.exception)) with self.assertRaises(AttributeError) as ve: CONFIG.some_module.str() - self.assertIn("there is no 'some_module' configured for 'climada.CONFIG'", str(ve.exception)) + self.assertIn( + "there is no 'some_module' configured for 'climada.CONFIG'", + str(ve.exception), + ) # Execute Tests diff --git a/climada/util/test/test_coordinates.py b/climada/util/test/test_coordinates.py index aea4d5e6b..50d5a8073 100644 --- a/climada/util/test/test_coordinates.py +++ b/climada/util/test/test_coordinates.py @@ -22,80 +22,84 @@ import unittest from pathlib import Path -from cartopy.io import shapereader -import pandas as pd import geopandas as gpd import numpy as np -from pyproj.crs import CRS as PCRS +import pandas as pd +import rasterio.transform import shapely -from shapely.geometry import box -from rasterio.windows import Window -from rasterio.warp import Resampling +from cartopy.io import shapereader +from pyproj.crs import CRS as PCRS from rasterio import Affine from rasterio.crs import CRS as RCRS -import rasterio.transform +from rasterio.warp import Resampling +from rasterio.windows import Window +from shapely.geometry import box +import climada.util.coordinates as u_coord from climada import CONFIG -from climada.util.constants import HAZ_DEMO_FL, DEF_CRS, ONE_LAT_KM, DEMO_DIR from climada.hazard.base import Centroids -import climada.util.coordinates as u_coord +from climada.util.constants import DEF_CRS, DEMO_DIR, HAZ_DEMO_FL, ONE_LAT_KM DATA_DIR = CONFIG.util.test_data.dir() + + def def_input_values(): """Default input coordinates and centroids values""" # Load exposures coordinates from demo entity file - exposures = np.array([ - [26.933899, -80.128799], - [26.957203, -80.098284], - [26.783846, -80.748947], - [26.645524, -80.550704], - [26.897796, -80.596929], - [26.925359, -80.220966], - [26.914768, -80.07466], - [26.853491, -80.190281], - [26.845099, -80.083904], - [26.82651, -80.213493], - [26.842772, -80.0591], - [26.825905, -80.630096], - [26.80465, -80.075301], - [26.788649, -80.069885], - [26.704277, -80.656841], - [26.71005, -80.190085], - [26.755412, -80.08955], - [26.678449, -80.041179], - [26.725649, -80.1324], - [26.720599, -80.091746], - [26.71255, -80.068579], - [26.6649, -80.090698], - [26.664699, -80.1254], - [26.663149, -80.151401], - [26.66875, -80.058749], - [26.638517, -80.283371], - [26.59309, -80.206901], - [26.617449, -80.090649], - [26.620079, -80.055001], - [26.596795, -80.128711], - [26.577049, -80.076435], - [26.524585, -80.080105], - [26.524158, -80.06398], - [26.523737, -80.178973], - [26.520284, -80.110519], - [26.547349, -80.057701], - [26.463399, -80.064251], - [26.45905, -80.07875], - [26.45558, -80.139247], - [26.453699, -80.104316], - [26.449999, -80.188545], - [26.397299, -80.21902], - [26.4084, -80.092391], - [26.40875, -80.1575], - [26.379113, -80.102028], - [26.3809, -80.16885], - [26.349068, -80.116401], - [26.346349, -80.08385], - [26.348015, -80.241305], - [26.347957, -80.158855] - ]) + exposures = np.array( + [ + [26.933899, -80.128799], + [26.957203, -80.098284], + [26.783846, -80.748947], + [26.645524, -80.550704], + [26.897796, -80.596929], + [26.925359, -80.220966], + [26.914768, -80.07466], + [26.853491, -80.190281], + [26.845099, -80.083904], + [26.82651, -80.213493], + [26.842772, -80.0591], + [26.825905, -80.630096], + [26.80465, -80.075301], + [26.788649, -80.069885], + [26.704277, -80.656841], + [26.71005, -80.190085], + [26.755412, -80.08955], + [26.678449, -80.041179], + [26.725649, -80.1324], + [26.720599, -80.091746], + [26.71255, -80.068579], + [26.6649, -80.090698], + [26.664699, -80.1254], + [26.663149, -80.151401], + [26.66875, -80.058749], + [26.638517, -80.283371], + [26.59309, -80.206901], + [26.617449, -80.090649], + [26.620079, -80.055001], + [26.596795, -80.128711], + [26.577049, -80.076435], + [26.524585, -80.080105], + [26.524158, -80.06398], + [26.523737, -80.178973], + [26.520284, -80.110519], + [26.547349, -80.057701], + [26.463399, -80.064251], + [26.45905, -80.07875], + [26.45558, -80.139247], + [26.453699, -80.104316], + [26.449999, -80.188545], + [26.397299, -80.21902], + [26.4084, -80.092391], + [26.40875, -80.1575], + [26.379113, -80.102028], + [26.3809, -80.16885], + [26.349068, -80.116401], + [26.346349, -80.08385], + [26.348015, -80.241305], + [26.347957, -80.158855], + ] + ) # Define centroids centroids = np.zeros((100, 2)) @@ -109,20 +113,122 @@ def def_input_values(): return exposures, centroids + def def_ref(): """Default output reference""" - return np.array([46, 46, 36, 36, 36, 46, 46, 46, 46, 46, 46, - 36, 46, 46, 36, 46, 46, 46, 46, 46, 46, 46, - 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, - 46, 46, 46, 45, 45, 45, 45, 45, 45, 45, 45, - 45, 45, 45, 45, 45, 45]) + return np.array( + [ + 46, + 46, + 36, + 36, + 36, + 46, + 46, + 46, + 46, + 46, + 46, + 36, + 46, + 46, + 36, + 46, + 46, + 46, + 46, + 46, + 46, + 46, + 46, + 46, + 46, + 46, + 46, + 46, + 46, + 46, + 46, + 46, + 46, + 46, + 46, + 46, + 45, + 45, + 45, + 45, + 45, + 45, + 45, + 45, + 45, + 45, + 45, + 45, + 45, + 45, + ] + ) + def def_ref_50(): """Default output reference for maximum distance threshold 50km""" - return np.array([46, 46, 36, -1, 36, 46, 46, 46, 46, 46, 46, 36, 46, 46, - 36, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, - 46, 46, 46, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 45, - 45, 45, 45, 45, 45, 45, 45, 45]) + return np.array( + [ + 46, + 46, + 36, + -1, + 36, + 46, + 46, + 46, + 46, + 46, + 46, + 36, + 46, + 46, + 36, + 46, + 46, + 46, + 46, + 46, + 46, + 46, + 46, + 46, + 46, + 46, + 46, + 46, + 46, + 46, + 46, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + 45, + 45, + 45, + 45, + 45, + 45, + 45, + 45, + 45, + ] + ) + class TestDistance(unittest.TestCase): """Test distance functions.""" @@ -136,89 +242,106 @@ def test_dist_sqr_approx_pass(self): lons2 = 56 self.assertAlmostEqual( 7709.827814738594, - np.sqrt(u_coord._dist_sqr_approx(lats1, lons1, cos_lats1, lats2, lons2)) * ONE_LAT_KM) + np.sqrt(u_coord._dist_sqr_approx(lats1, lons1, cos_lats1, lats2, lons2)) + * ONE_LAT_KM, + ) def test_geodesic_length_geog(self): """Test compute_geodesic_lengths for geographic input crs""" - LINE_PATH = DEMO_DIR.joinpath('nl_rails.gpkg') - gdf_rails = gpd.read_file(LINE_PATH).to_crs('epsg:4326') + LINE_PATH = DEMO_DIR.joinpath("nl_rails.gpkg") + gdf_rails = gpd.read_file(LINE_PATH).to_crs("epsg:4326") lengths_geom = u_coord.compute_geodesic_lengths(gdf_rails) self.assertEqual(len(lengths_geom), len(gdf_rails)) self.assertTrue( np.all( - (abs(lengths_geom - gdf_rails['distance'])/lengths_geom < 0.1) | - (lengths_geom - gdf_rails['distance'] < 10) - ) + (abs(lengths_geom - gdf_rails["distance"]) / lengths_geom < 0.1) + | (lengths_geom - gdf_rails["distance"] < 10) ) + ) def test_geodesic_length_proj(self): """Test compute_geodesic_lengths for projected input crs""" - LINE_PATH = DEMO_DIR.joinpath('nl_rails.gpkg') - gdf_rails = gpd.read_file(LINE_PATH).to_crs('epsg:4326') - gdf_rails_proj = gpd.read_file(LINE_PATH).to_crs('epsg:4326').to_crs('EPSG:28992') + LINE_PATH = DEMO_DIR.joinpath("nl_rails.gpkg") + gdf_rails = gpd.read_file(LINE_PATH).to_crs("epsg:4326") + gdf_rails_proj = ( + gpd.read_file(LINE_PATH).to_crs("epsg:4326").to_crs("EPSG:28992") + ) lengths_geom = u_coord.compute_geodesic_lengths(gdf_rails) lengths_proj = u_coord.compute_geodesic_lengths(gdf_rails_proj) - for len_proj, len_geom in zip(lengths_proj,lengths_geom): + for len_proj, len_geom in zip(lengths_proj, lengths_geom): self.assertAlmostEqual(len_proj, len_geom, 1) self.assertTrue( np.all( - (abs(lengths_proj - gdf_rails_proj['distance'])/lengths_proj < 0.1) | - (lengths_proj - gdf_rails_proj['distance'] < 10) - ) + (abs(lengths_proj - gdf_rails_proj["distance"]) / lengths_proj < 0.1) + | (lengths_proj - gdf_rails_proj["distance"] < 10) ) + ) + def data_arrays_resampling_demo(): """init demo data arrays (2d) and meta data for resampling""" data_arrays = [ # demo pop: - np.array([[0, 1, 2], [3, 4, 5]], dtype='float32'), - np.array([[0, 1, 2], [3, 4, 5]], dtype='float32'), + np.array([[0, 1, 2], [3, 4, 5]], dtype="float32"), + np.array([[0, 1, 2], [3, 4, 5]], dtype="float32"), # demo nightlight: - np.array([[2, 10, 0, 0, 0, 0], - [10, 2, 10, 0, 0, 0], - [0, 0, 0, 0, 1, 1], - [1, 0, 0, 0, 1, 1]], dtype='float32'), + np.array( + [ + [2, 10, 0, 0, 0, 0], + [10, 2, 10, 0, 0, 0], + [0, 0, 0, 0, 1, 1], + [1, 0, 0, 0, 1, 1], + ], + dtype="float32", + ), ] - meta_list = [{'driver': 'GTiff', - 'dtype': 'float32', - 'nodata': -3.4028230607370965e+38, - 'width': 3, - 'height': 2, - 'count': 1, - 'crs': RCRS.from_epsg(4326), - 'transform': Affine(1, 0.0, -10, 0.0, -1, 40), - }, - {'driver': 'GTiff', - 'dtype': 'float32', - 'nodata': -3.4028230607370965e+38, - 'width': 3, - 'height': 2, - 'count': 1, - 'crs': RCRS.from_epsg(4326), - # shifted by 1 degree latitude to the north: - 'transform': Affine(1, 0.0, -10, 0.0, -1, 41), - }, - {'driver': 'GTiff', - 'dtype': 'float32', - 'nodata': None, - 'width': 6, - 'height': 4, - 'count': 1, - 'crs': RCRS.from_epsg(4326), - # higher resolution: - 'transform': Affine(.5, 0.0, -10, 0.0, -.5, 40), - }] + meta_list = [ + { + "driver": "GTiff", + "dtype": "float32", + "nodata": -3.4028230607370965e38, + "width": 3, + "height": 2, + "count": 1, + "crs": RCRS.from_epsg(4326), + "transform": Affine(1, 0.0, -10, 0.0, -1, 40), + }, + { + "driver": "GTiff", + "dtype": "float32", + "nodata": -3.4028230607370965e38, + "width": 3, + "height": 2, + "count": 1, + "crs": RCRS.from_epsg(4326), + # shifted by 1 degree latitude to the north: + "transform": Affine(1, 0.0, -10, 0.0, -1, 41), + }, + { + "driver": "GTiff", + "dtype": "float32", + "nodata": None, + "width": 6, + "height": 4, + "count": 1, + "crs": RCRS.from_epsg(4326), + # higher resolution: + "transform": Affine(0.5, 0.0, -10, 0.0, -0.5, 40), + }, + ] return data_arrays, meta_list + class TestFunc(unittest.TestCase): """Test auxiliary functions""" + def test_lon_normalize(self): """Test the longitude normalization function""" data = np.array([-180, 20.1, -30, 190, -350]) @@ -287,14 +410,16 @@ def test_geosph_vector(self): def test_dist_approx_pass(self): """Test approximate distance functions""" - data = np.array([ - # lat1, lon1, lat2, lon2, dist_equirect, dist_geosphere - [45.5, -32.1, 14, 56, 7702.88906574, 8750.64119051], - [45.5, 147.8, 14, -124, 7709.82781473, 8758.34146833], - [45.5, 507.9, 14, -124, 7702.88906574, 8750.64119051], - [45.5, -212.2, 14, -124, 7709.82781473, 8758.34146833], - [-3, -130.1, 4, -30.5, 11079.7217421, 11087.0352544], - ]) + data = np.array( + [ + # lat1, lon1, lat2, lon2, dist_equirect, dist_geosphere + [45.5, -32.1, 14, 56, 7702.88906574, 8750.64119051], + [45.5, 147.8, 14, -124, 7709.82781473, 8758.34146833], + [45.5, 507.9, 14, -124, 7702.88906574, 8750.64119051], + [45.5, -212.2, 14, -124, 7709.82781473, 8758.34146833], + [-3, -130.1, 4, -30.5, 11079.7217421, 11087.0352544], + ] + ) # conversion factors from reference data (in km, see above) to other units factors_km_to_x = { "m": 1e3, @@ -302,28 +427,52 @@ def test_dist_approx_pass(self): "degree": 1.0 / u_coord.ONE_LAT_KM, "km": 1.0, } - compute_dist = np.stack([ - u_coord.dist_approx(data[:, None, 0], data[:, None, 1], - data[:, None, 2], data[:, None, 3], - method="equirect")[:, 0, 0], - u_coord.dist_approx(data[:, None, 0], data[:, None, 1], - data[:, None, 2], data[:, None, 3], - method="geosphere")[:, 0, 0], - ], axis=-1) + compute_dist = np.stack( + [ + u_coord.dist_approx( + data[:, None, 0], + data[:, None, 1], + data[:, None, 2], + data[:, None, 3], + method="equirect", + )[:, 0, 0], + u_coord.dist_approx( + data[:, None, 0], + data[:, None, 1], + data[:, None, 2], + data[:, None, 3], + method="geosphere", + )[:, 0, 0], + ], + axis=-1, + ) self.assertEqual(compute_dist.shape[0], data.shape[0]) for d, cd in zip(data[:, 4:], compute_dist): self.assertAlmostEqual(d[0], cd[0]) self.assertAlmostEqual(d[1], cd[1]) for units, factor in factors_km_to_x.items(): - compute_dist = np.stack([ - u_coord.dist_approx(data[:, None, 0], data[:, None, 1], - data[:, None, 2], data[:, None, 3], - method="equirect", units=units)[:, 0, 0], - u_coord.dist_approx(data[:, None, 0], data[:, None, 1], - data[:, None, 2], data[:, None, 3], - method="geosphere", units=units)[:, 0, 0], - ], axis=-1) + compute_dist = np.stack( + [ + u_coord.dist_approx( + data[:, None, 0], + data[:, None, 1], + data[:, None, 2], + data[:, None, 3], + method="equirect", + units=units, + )[:, 0, 0], + u_coord.dist_approx( + data[:, None, 0], + data[:, None, 1], + data[:, None, 2], + data[:, None, 3], + method="geosphere", + units=units, + )[:, 0, 0], + ], + axis=-1, + ) self.assertEqual(compute_dist.shape[0], data.shape[0]) places = 4 if units == "m" else 7 for d, cd in zip(data[:, 4:], compute_dist): @@ -332,13 +481,15 @@ def test_dist_approx_pass(self): def test_dist_approx_log_pass(self): """Test log-functionality of approximate distance functions""" - data = np.array([ - # lat1, lon1, lat2, lon2, dist_equirect, dist_geosphere - [0, 0, 0, 1, 111.12, 111.12], - [-13, 179, 5, -179, 2011.84774049, 2012.30698122], - [24., 85., 23.99999967, 85., 3.666960e-5, 3.666960e-5], - [24., 85., 24., 85., 0, 0], - ]) + data = np.array( + [ + # lat1, lon1, lat2, lon2, dist_equirect, dist_geosphere + [0, 0, 0, 1, 111.12, 111.12], + [-13, 179, 5, -179, 2011.84774049, 2012.30698122], + [24.0, 85.0, 23.99999967, 85.0, 3.666960e-5, 3.666960e-5], + [24.0, 85.0, 24.0, 85.0, 0, 0], + ] + ) # conversion factors from reference data (in km, see above) to other units factors_km_to_x = { "m": 1e3, @@ -348,9 +499,15 @@ def test_dist_approx_log_pass(self): } for i, method in enumerate(["equirect", "geosphere"]): for units, factor in factors_km_to_x.items(): - dist, vec = u_coord.dist_approx(data[:, None, 0], data[:, None, 1], - data[:, None, 2], data[:, None, 3], - log=True, method=method, units=units) + dist, vec = u_coord.dist_approx( + data[:, None, 0], + data[:, None, 1], + data[:, None, 2], + data[:, None, 3], + log=True, + method=method, + units=units, + ) dist, vec = dist[:, 0, 0], vec[:, 0, 0] np.testing.assert_allclose(np.linalg.norm(vec, axis=-1), dist) np.testing.assert_allclose(dist, data[:, 4 + i] * factor) @@ -369,20 +526,22 @@ def test_dist_approx_batch_pass(self): # The distance of each of 4 points (lat1, lon1) to each of 3 points (lat2, lon2) is # computed for each of 2 batches (first dimension) of data. - test_data = np.array([ - [ - [7702.88906574, 7967.66578334, 4613.1634431], - [19389.5254652, 2006.65638992, 11079.7217421], - [7960.66983129, 7709.82781473, 14632.55958021], - [7967.66578334, 7702.88906574, 14639.95139706], - ], + test_data = np.array( [ - [14632.55958021, 7709.82781473, 7960.66983129], - [14639.95139706, 7702.88906574, 7967.66578334], - [4613.1634431, 7967.66578334, 7702.88906574], - [11079.7217421, 2006.65638992, 19389.5254652], - ], - ]) + [ + [7702.88906574, 7967.66578334, 4613.1634431], + [19389.5254652, 2006.65638992, 11079.7217421], + [7960.66983129, 7709.82781473, 14632.55958021], + [7967.66578334, 7702.88906574, 14639.95139706], + ], + [ + [14632.55958021, 7709.82781473, 7960.66983129], + [14639.95139706, 7702.88906574, 7967.66578334], + [4613.1634431, 7967.66578334, 7702.88906574], + [11079.7217421, 2006.65638992, 19389.5254652], + ], + ] + ) dist = u_coord.dist_approx(lat1, lon1, lat2, lon2) np.testing.assert_array_almost_equal(dist, test_data) @@ -402,15 +561,18 @@ def test_get_gridcellarea(self): self.assertAlmostEqual(area[1], 180352.82386516) self.assertEqual(lat.shape, area.shape) - area2 = u_coord.get_gridcellarea(lat, resolution, unit='km2') + area2 = u_coord.get_gridcellarea(lat, resolution, unit="km2") self.assertAlmostEqual(area2[0], 1781.5973363005) self.assertTrue(area2[0] <= 2500) def test_read_vector_pass(self): """Test one columns data""" - shp_file = shapereader.natural_earth(resolution='110m', category='cultural', - name='populated_places_simple') - lat, lon, geometry, intensity = u_coord.read_vector(shp_file, ['pop_min', 'pop_max']) + shp_file = shapereader.natural_earth( + resolution="110m", category="cultural", name="populated_places_simple" + ) + lat, lon, geometry, intensity = u_coord.read_vector( + shp_file, ["pop_min", "pop_max"] + ) self.assertTrue(u_coord.equal_crs(geometry.crs, u_coord.NE_EPSG)) self.assertEqual(geometry.size, lat.size) @@ -429,8 +591,8 @@ def test_read_vector_pass(self): def test_compare_crs(self): """Compare two crs""" - crs_one = 'epsg:4326' - crs_two = {'init': 'epsg:4326', 'no_defs': True} + crs_one = "epsg:4326" + crs_two = {"init": "epsg:4326", "no_defs": True} self.assertTrue(u_coord.equal_crs(crs_one, crs_two)) def test_set_df_geometry_points_pass(self): @@ -440,12 +602,12 @@ def test_set_df_geometry_points_pass(self): climada.test.test_multi_processing.TestCoordinates.test_set_df_geometry_points_scheduled_pass """ df_val = gpd.GeoDataFrame() - df_val['latitude'] = np.ones(10) * 40.0 - df_val['longitude'] = np.ones(10) * 0.50 + df_val["latitude"] = np.ones(10) * 40.0 + df_val["longitude"] = np.ones(10) * 0.50 - u_coord.set_df_geometry_points(df_val, crs='epsg:2202') + u_coord.set_df_geometry_points(df_val, crs="epsg:2202") np.testing.assert_allclose(df_val.geometry.x.values, np.ones(10) * 0.5) - np.testing.assert_allclose(df_val.geometry.y.values, np.ones(10) * 40.) + np.testing.assert_allclose(df_val.geometry.y.values, np.ones(10) * 40.0) def test_convert_wgs_to_utm_pass(self): """Test convert_wgs_to_utm""" @@ -466,9 +628,14 @@ def test_to_crs_user_input(self): self.assertEqual(rcrs, RCRS.from_user_input(u_coord.to_crs_user_input(DEF_CRS))) # can they be understood from the provider? - for arg in ['epsg:4326', b'epsg:4326', DEF_CRS, 4326, - {'init': 'epsg:4326', 'no_defs': True}, - b'{"init": "epsg:4326", "no_defs": True}']: + for arg in [ + "epsg:4326", + b"epsg:4326", + DEF_CRS, + 4326, + {"init": "epsg:4326", "no_defs": True}, + b'{"init": "epsg:4326", "no_defs": True}', + ]: self.assertEqual(pcrs, PCRS.from_user_input(u_coord.to_crs_user_input(arg))) self.assertEqual(rcrs, RCRS.from_user_input(u_coord.to_crs_user_input(arg))) @@ -477,27 +644,42 @@ def test_to_crs_user_input(self): with self.assertRaises(ValueError): u_coord.to_crs_user_input(arg) with self.assertRaises(SyntaxError): - u_coord.to_crs_user_input('{init: epsg:4326, no_defs: True}') + u_coord.to_crs_user_input("{init: epsg:4326, no_defs: True}") def test_country_to_iso(self): name_list = [ - '', 'United States', 'Argentina', 'Japan', 'Australia', 'Norway', 'Madagascar'] - al2_list = ['', 'US', 'AR', 'JP', 'AU', 'NO', 'MG'] - al3_list = ['', 'USA', 'ARG', 'JPN', 'AUS', 'NOR', 'MDG'] + "", + "United States", + "Argentina", + "Japan", + "Australia", + "Norway", + "Madagascar", + ] + al2_list = ["", "US", "AR", "JP", "AU", "NO", "MG"] + al3_list = ["", "USA", "ARG", "JPN", "AUS", "NOR", "MDG"] num_list = [0, 840, 32, 392, 36, 578, 450] natid_list = [0, 217, 9, 104, 13, 154, 128] # examples from docstring: self.assertEqual(u_coord.country_to_iso(840), "USA") - self.assertEqual(u_coord.country_to_iso("United States", representation="alpha2"), "US") - self.assertEqual(u_coord.country_to_iso(["United States of America", "SU"], "numeric"), - [840, 810]) - self.assertEqual(u_coord.country_to_iso(["XK", "Dhekelia"], "numeric"), [983, 907]) + self.assertEqual( + u_coord.country_to_iso("United States", representation="alpha2"), "US" + ) + self.assertEqual( + u_coord.country_to_iso(["United States of America", "SU"], "numeric"), + [840, 810], + ) + self.assertEqual( + u_coord.country_to_iso(["XK", "Dhekelia"], "numeric"), [983, 907] + ) # test cases: iso_lists = [name_list, al2_list, al3_list, num_list] for l1 in iso_lists: - for l2, representation in zip(iso_lists, ["name", "alpha2", "alpha3", "numeric"]): + for l2, representation in zip( + iso_lists, ["name", "alpha2", "alpha3", "numeric"] + ): self.assertEqual(u_coord.country_to_iso(l1, representation), l2) # deprecated API `country_iso_alpha2numeric` @@ -544,61 +726,120 @@ def test_match_grid_points(self): def test_match_centroids(self): """Test match_centroids function.""" - #Test 1: Raster data + # Test 1: Raster data meta = { - 'count': 1, 'crs': DEF_CRS, - 'width': 20, 'height': 10, - 'transform': rasterio.Affine(1.5, 0.0, -20, 0.0, -1.4, 8) + "count": 1, + "crs": DEF_CRS, + "width": 20, + "height": 10, + "transform": rasterio.Affine(1.5, 0.0, -20, 0.0, -1.4, 8), } centroids = Centroids.from_meta(meta=meta) - df = pd.DataFrame({ - 'longitude': np.array([ - -20.1, -20.0, -19.8, -19.0, -18.6, -18.4, - -19.0, -19.0, -19.0, -19.0, - -20.1, 0.0, 10.1, 10.1, 10.1, 0.0, -20.2, -20.3, - -6.4, 9.8, 0.0, - ]), - 'latitude': np.array([ - 7.3, 7.3, 7.3, 7.3, 7.3, 7.3, - 8.1, 7.9, 6.7, 6.5, - 8.1, 8.2, 8.3, 0.0, -6.1, -6.2, -6.3, 0.0, - -1.9, -1.7, 0.0, - ]), - }) + df = pd.DataFrame( + { + "longitude": np.array( + [ + -20.1, + -20.0, + -19.8, + -19.0, + -18.6, + -18.4, + -19.0, + -19.0, + -19.0, + -19.0, + -20.1, + 0.0, + 10.1, + 10.1, + 10.1, + 0.0, + -20.2, + -20.3, + -6.4, + 9.8, + 0.0, + ] + ), + "latitude": np.array( + [ + 7.3, + 7.3, + 7.3, + 7.3, + 7.3, + 7.3, + 8.1, + 7.9, + 6.7, + 6.5, + 8.1, + 8.2, + 8.3, + 0.0, + -6.1, + -6.2, + -6.3, + 0.0, + -1.9, + -1.7, + 0.0, + ] + ), + } + ) gdf = gpd.GeoDataFrame( df, - geometry=gpd.points_from_xy(df['longitude'], df['latitude']), + geometry=gpd.points_from_xy(df["longitude"], df["latitude"]), crs=DEF_CRS, ) assigned = u_coord.match_centroids(gdf, centroids) expected_result = [ # constant y-value, varying x-value - 0, 0, 0, 0, 0, 1, + 0, + 0, + 0, + 0, + 0, + 1, # constant x-value, varying y-value - 0, 0, 0, 20, + 0, + 0, + 0, + 20, # out of bounds: topleft, top, topright, right, bottomright, bottom, bottomleft, left - -1, -1, -1, -1, -1, -1, -1, -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, # some explicit points within the raster - 149, 139, 113, + 149, + 139, + 113, ] - np.testing.assert_array_equal(assigned,expected_result) + np.testing.assert_array_equal(assigned, expected_result) # Test 2: Vector data (copied from test_match_coordinates) # note that the coordinates are in lat/lon - gdf_coords = np.array([(0.2, 2), (0, 0), (0, 2), (2.1, 3), (1, 1), (-1, 1), (0, 179.9)]) - df = pd.DataFrame({ - 'longitude': gdf_coords[:, 1], - 'latitude': gdf_coords[:, 0] - }) - gdf = gpd.GeoDataFrame(df,geometry=gpd.points_from_xy(df['longitude'], df['latitude']), - crs=DEF_CRS) + gdf_coords = np.array( + [(0.2, 2), (0, 0), (0, 2), (2.1, 3), (1, 1), (-1, 1), (0, 179.9)] + ) + df = pd.DataFrame({"longitude": gdf_coords[:, 1], "latitude": gdf_coords[:, 0]}) + gdf = gpd.GeoDataFrame( + df, + geometry=gpd.points_from_xy(df["longitude"], df["latitude"]), + crs=DEF_CRS, + ) coords_to_assign = np.array([(2.1, 3), (0, 0), (0, 2), (0.9, 1.0), (0, -179.9)]) centroids = Centroids( - lat=coords_to_assign[:, 0], - lon=coords_to_assign[:, 1], - crs=DEF_CRS + lat=coords_to_assign[:, 0], lon=coords_to_assign[:, 1], crs=DEF_CRS ) centroids_empty = Centroids(lat=np.array([]), lon=np.array([])) @@ -612,34 +853,35 @@ def test_match_centroids(self): for distance in ["euclidean", "haversine", "approx"]: for thresh, result in expected_results: assigned = u_coord.match_centroids( - gdf, centroids, distance=distance, threshold=thresh) + gdf, centroids, distance=distance, threshold=thresh + ) np.testing.assert_array_equal(assigned, result) - #test empty centroids + # test empty centroids result = [-1, -1, -1, -1, -1, -1, -1] assigned_idx = u_coord.match_centroids( - gdf, centroids_empty, distance=distance, threshold=thresh) + gdf, centroids_empty, distance=distance, threshold=thresh + ) np.testing.assert_array_equal(assigned_idx, result) # Test 3: non matching crs - df = pd.DataFrame({ - 'longitude': [10, 20, 30], - 'latitude': [50, 60, 70] - }) - gdf = gpd.GeoDataFrame(df,geometry=gpd.points_from_xy(df['longitude'], df['latitude']), - crs = 'EPSG:4326') + df = pd.DataFrame({"longitude": [10, 20, 30], "latitude": [50, 60, 70]}) + gdf = gpd.GeoDataFrame( + df, + geometry=gpd.points_from_xy(df["longitude"], df["latitude"]), + crs="EPSG:4326", + ) coords_to_assign = np.array([(2.1, 3), (0, 0), (0, 2), (0.9, 1.0), (0, -179.9)]) centroids = Centroids( - lat=[1100000,1200000], - lon=[2500000,2600000], - crs='EPSG:2056' + lat=[1100000, 1200000], lon=[2500000, 2600000], crs="EPSG:2056" ) with self.assertRaises(ValueError) as cm: u_coord.match_centroids(gdf, centroids) - self.assertIn('Set hazard and GeoDataFrame to same CRS first!', - str(cm.exception)) + self.assertIn( + "Set hazard and GeoDataFrame to same CRS first!", str(cm.exception) + ) def test_dist_sqr_approx_pass(self): """Test approximate distance helper function.""" @@ -650,70 +892,78 @@ def test_dist_sqr_approx_pass(self): lons2 = 56 self.assertAlmostEqual( 7709.827814738594, - np.sqrt(u_coord._dist_sqr_approx(lats1, lons1, cos_lats1, lats2, lons2)) * ONE_LAT_KM) + np.sqrt(u_coord._dist_sqr_approx(lats1, lons1, cos_lats1, lats2, lons2)) + * ONE_LAT_KM, + ) def test_wrong_distance_fail(self): """Check exception is thrown when wrong distance is given""" with self.assertRaises(ValueError) as cm: - u_coord.match_coordinates(np.ones((10, 2)), np.ones((7, 2)), distance='distance') - self.assertIn('Coordinate assignment with "distance" distance is not supported.', - str(cm.exception)) + u_coord.match_coordinates( + np.ones((10, 2)), np.ones((7, 2)), distance="distance" + ) + self.assertIn( + 'Coordinate assignment with "distance" distance is not supported.', + str(cm.exception), + ) def data_input_values(self): """Default input coordinates and centroids values""" # Load exposures coordinates from demo entity file - exposures = np.array([ - [26.933899, -80.128799], - [26.957203, -80.098284], - [26.783846, -80.748947], - [26.645524, -80.550704], - [26.897796, -80.596929], - [26.925359, -80.220966], - [26.914768, -80.07466], - [26.853491, -80.190281], - [26.845099, -80.083904], - [26.82651, -80.213493], - [26.842772, -80.0591], - [26.825905, -80.630096], - [26.80465, -80.075301], - [26.788649, -80.069885], - [26.704277, -80.656841], - [26.71005, -80.190085], - [26.755412, -80.08955], - [26.678449, -80.041179], - [26.725649, -80.1324], - [26.720599, -80.091746], - [26.71255, -80.068579], - [26.6649, -80.090698], - [26.664699, -80.1254], - [26.663149, -80.151401], - [26.66875, -80.058749], - [26.638517, -80.283371], - [26.59309, -80.206901], - [26.617449, -80.090649], - [26.620079, -80.055001], - [26.596795, -80.128711], - [26.577049, -80.076435], - [26.524585, -80.080105], - [26.524158, -80.06398], - [26.523737, -80.178973], - [26.520284, -80.110519], - [26.547349, -80.057701], - [26.463399, -80.064251], - [26.45905, -80.07875], - [26.45558, -80.139247], - [26.453699, -80.104316], - [26.449999, -80.188545], - [26.397299, -80.21902], - [26.4084, -80.092391], - [26.40875, -80.1575], - [26.379113, -80.102028], - [26.3809, -80.16885], - [26.349068, -80.116401], - [26.346349, -80.08385], - [26.348015, -80.241305], - [26.347957, -80.158855] - ]) + exposures = np.array( + [ + [26.933899, -80.128799], + [26.957203, -80.098284], + [26.783846, -80.748947], + [26.645524, -80.550704], + [26.897796, -80.596929], + [26.925359, -80.220966], + [26.914768, -80.07466], + [26.853491, -80.190281], + [26.845099, -80.083904], + [26.82651, -80.213493], + [26.842772, -80.0591], + [26.825905, -80.630096], + [26.80465, -80.075301], + [26.788649, -80.069885], + [26.704277, -80.656841], + [26.71005, -80.190085], + [26.755412, -80.08955], + [26.678449, -80.041179], + [26.725649, -80.1324], + [26.720599, -80.091746], + [26.71255, -80.068579], + [26.6649, -80.090698], + [26.664699, -80.1254], + [26.663149, -80.151401], + [26.66875, -80.058749], + [26.638517, -80.283371], + [26.59309, -80.206901], + [26.617449, -80.090649], + [26.620079, -80.055001], + [26.596795, -80.128711], + [26.577049, -80.076435], + [26.524585, -80.080105], + [26.524158, -80.06398], + [26.523737, -80.178973], + [26.520284, -80.110519], + [26.547349, -80.057701], + [26.463399, -80.064251], + [26.45905, -80.07875], + [26.45558, -80.139247], + [26.453699, -80.104316], + [26.449999, -80.188545], + [26.397299, -80.21902], + [26.4084, -80.092391], + [26.40875, -80.1575], + [26.379113, -80.102028], + [26.3809, -80.16885], + [26.349068, -80.116401], + [26.346349, -80.08385], + [26.348015, -80.241305], + [26.347957, -80.158855], + ] + ) # Define centroids centroids = np.zeros((100, 2)) @@ -729,29 +979,123 @@ def data_input_values(self): def data_ref(self): """Default output reference""" - return np.array([46, 46, 36, 36, 36, 46, 46, 46, 46, 46, 46, - 36, 46, 46, 36, 46, 46, 46, 46, 46, 46, 46, - 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, - 46, 46, 46, 45, 45, 45, 45, 45, 45, 45, 45, - 45, 45, 45, 45, 45, 45]) + return np.array( + [ + 46, + 46, + 36, + 36, + 36, + 46, + 46, + 46, + 46, + 46, + 46, + 36, + 46, + 46, + 36, + 46, + 46, + 46, + 46, + 46, + 46, + 46, + 46, + 46, + 46, + 46, + 46, + 46, + 46, + 46, + 46, + 46, + 46, + 46, + 46, + 46, + 45, + 45, + 45, + 45, + 45, + 45, + 45, + 45, + 45, + 45, + 45, + 45, + 45, + 45, + ] + ) def data_ref_40(self): """Default output reference for maximum distance threshold 40km""" - return np.array([46, 46, 36, -1, -1, 46, 46, 46, 46, 46, 46, -1, 46, 46, - -1, 46, 46, 46, 46, 46, 46, 46, 46, -1, 46, -1, -1, -1, - -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, -1, -1, -1, -1, 45, -1, -1]) + return np.array( + [ + 46, + 46, + 36, + -1, + -1, + 46, + 46, + 46, + 46, + 46, + 46, + -1, + 46, + 46, + -1, + 46, + 46, + 46, + 46, + 46, + 46, + 46, + 46, + -1, + 46, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + 45, + -1, + -1, + ] + ) def data_antimeridian_values(self): """Default input coordinates and centroids value crossing antimerdian""" - exposures = np.array([ - [0, -179.99], - [0, 179.99], - [5, -179.09], - [-5, 179.09], - [0, 130], - [0, -130] - ]) + exposures = np.array( + [[0, -179.99], [0, 179.99], [5, -179.09], [-5, 179.09], [0, 130], [0, -130]] + ) # Define centroids centroids = np.zeros((100, 2)) @@ -759,7 +1103,7 @@ def data_antimeridian_values(self): for ilon in range(10): for ilat in range(10): centroids[inext][0] = -5 + ilat - if ilat -5 <= 0: + if ilat - 5 <= 0: centroids[inext][1] = 170 + ilon + 1 else: centroids[inext][1] = -170 - ilon @@ -793,9 +1137,10 @@ def normal_warning(self, dist): # Interpolate with lower threshold to raise warnings threshold = 40 - with self.assertLogs('climada.util.coordinates', level='INFO') as cm: + with self.assertLogs("climada.util.coordinates", level="INFO") as cm: neighbors = u_coord.match_coordinates( - exposures, centroids, distance=dist, threshold=threshold) + exposures, centroids, distance=dist, threshold=threshold + ) self.assertIn("Distance to closest centroid", cm.output[1]) ref_neighbors = self.data_ref_40() @@ -825,101 +1170,97 @@ def antimeridian_warning(self, dist): # Interpolate with lower threshold to raise warnings threshold = 100 - with self.assertLogs('climada.util.coordinates', level='INFO') as cm: + with self.assertLogs("climada.util.coordinates", level="INFO") as cm: neighbors = u_coord.match_coordinates( - exposures, centroids, distance=dist, threshold=threshold) + exposures, centroids, distance=dist, threshold=threshold + ) self.assertIn("Distance to closest centroid", cm.output[1]) np.testing.assert_array_equal(neighbors, self.data_ref_antimeridian()) def test_approx_normal_pass(self): """Call normal_pass test for approxiamte distance""" - self.normal_pass('approx') + self.normal_pass("approx") def test_approx_normal_warning(self): """Call normal_warning test for approxiamte distance""" - self.normal_warning('approx') + self.normal_warning("approx") def test_approx_repeat_coord_pass(self): """Call repeat_coord_pass test for approxiamte distance""" - self.repeat_coord_pass('approx') + self.repeat_coord_pass("approx") def test_approx_antimeridian_warning(self): """Call normal_warning test for approximate distance""" - self.antimeridian_warning('approx') + self.antimeridian_warning("approx") def test_haver_normal_pass(self): """Call normal_pass test for haversine distance""" - self.normal_pass('haversine') + self.normal_pass("haversine") def test_haver_normal_warning(self): """Call normal_warning test for haversine distance""" - self.normal_warning('haversine') + self.normal_warning("haversine") def test_haver_repeat_coord_pass(self): """Call repeat_coord_pass test for haversine distance""" - self.repeat_coord_pass('haversine') + self.repeat_coord_pass("haversine") def test_haver_antimeridian_warning(self): """Call normal_warning test for haversine distance""" - self.antimeridian_warning('haversine') + self.antimeridian_warning("haversine") def test_euc_normal_pass(self): """Call normal_pass test for euclidean distance""" - self.normal_pass('euclidean') + self.normal_pass("euclidean") def test_euc_normal_warning(self): """Call normal_warning test for euclidean distance""" - self.normal_warning('euclidean') + self.normal_warning("euclidean") def test_euc_repeat_coord_pass(self): """Call repeat_coord_pass test for euclidean distance""" - self.repeat_coord_pass('euclidean') + self.repeat_coord_pass("euclidean") def test_euc_antimeridian_warning(self): """Call normal_warning test for euclidean distance""" - self.antimeridian_warning('euclidean') + self.antimeridian_warning("euclidean") def test_diff_outcomes(self): """Different NN interpolation outcomes""" threshold = 100000 # Define centroids - lons = np.arange(-160, 180+1, 20) - lats = np.arange(-60, 60+1, 20) + lons = np.arange(-160, 180 + 1, 20) + lats = np.arange(-60, 60 + 1, 20) lats, lons = [arr.ravel() for arr in np.meshgrid(lats, lons)] centroids = np.transpose([lats, lons]).copy() # `copy()` makes it F-contiguous # Define exposures - exposures = np.array([ - [49.9, 9], - [49.5, 9], - [0, -175] - ]) + exposures = np.array([[49.9, 9], [49.5, 9], [0, -175]]) # Neighbors ref_neighbors = [ [62, 62, 3], [62, 61, 122], [61, 61, 3], - ] + ] - dist_list = ['approx', 'haversine', 'euclidean'] - kwargs_list = [ - {'check_antimeridian':False}, - {}, - {'check_antimeridian':False} - ] + dist_list = ["approx", "haversine", "euclidean"] + kwargs_list = [{"check_antimeridian": False}, {}, {"check_antimeridian": False}] for dist, ref, kwargs in zip(dist_list, ref_neighbors, kwargs_list): neighbors = u_coord.match_coordinates( - exposures, centroids, distance=dist, threshold=threshold, **kwargs) + exposures, centroids, distance=dist, threshold=threshold, **kwargs + ) np.testing.assert_array_equal(neighbors, ref) def test_match_coordinates(self): """Test match_coordinates function""" # note that the coordinates are in lat/lon - coords = np.array([(0.2, 2), (0, 0), (0, 2), (2.1, 3), (1, 1), (-1, 1), (0, 179.9)]) + coords = np.array( + [(0.2, 2), (0, 0), (0, 2), (2.1, 3), (1, 1), (-1, 1), (0, 179.9)] + ) coords_to_assign = np.array([(2.1, 3), (0, 0), (0, 2), (0.9, 1.0), (0, -179.9)]) expected_results = [ # test with different thresholds (in km) @@ -935,30 +1276,36 @@ def test_match_coordinates(self): coords_to_assign_typed = coords_to_assign.astype(test_dtype) for thresh, result in expected_results: assigned_idx = u_coord.match_coordinates( - coords_typed, coords_to_assign_typed, - distance=distance, threshold=thresh) + coords_typed, + coords_to_assign_typed, + distance=distance, + threshold=thresh, + ) np.testing.assert_array_equal(assigned_idx, result) - #test empty coords_to_assign + # test empty coords_to_assign coords_to_assign_empty = np.array([]) result = [-1, -1, -1, -1, -1, -1, -1] assigned_idx = u_coord.match_coordinates( - coords, coords_to_assign_empty, distance=distance, threshold=thresh) + coords, coords_to_assign_empty, distance=distance, threshold=thresh + ) np.testing.assert_array_equal(assigned_idx, result) - #test empty coords + # test empty coords coords_empty = np.array([]) result = np.array([]) assigned_idx = u_coord.match_coordinates( - coords_empty, coords_to_assign, distance=distance, threshold=thresh) + coords_empty, coords_to_assign, distance=distance, threshold=thresh + ) np.testing.assert_array_equal(assigned_idx, result) + class TestGetGeodata(unittest.TestCase): def test_nat_earth_resolution_pass(self): """Correct resolution.""" - self.assertEqual(u_coord.nat_earth_resolution(10), '10m') - self.assertEqual(u_coord.nat_earth_resolution(50), '50m') - self.assertEqual(u_coord.nat_earth_resolution(110), '110m') + self.assertEqual(u_coord.nat_earth_resolution(10), "10m") + self.assertEqual(u_coord.nat_earth_resolution(50), "50m") + self.assertEqual(u_coord.nat_earth_resolution(110), "110m") def test_nat_earth_resolution_fail(self): """Wrong resolution.""" @@ -971,33 +1318,44 @@ def test_nat_earth_resolution_fail(self): def test_get_land_geometry_country_pass(self): """get_land_geometry with selected countries.""" - iso_countries = ['DEU', 'VNM'] + iso_countries = ["DEU", "VNM"] res = u_coord.get_land_geometry(country_names=iso_countries, resolution=10) self.assertIsInstance(res, shapely.geometry.multipolygon.MultiPolygon) - for res, ref in zip(res.bounds, (5.85248986800, 8.56557851800, - 109.47242272200, 55.065334377000)): + for res, ref in zip( + res.bounds, (5.85248986800, 8.56557851800, 109.47242272200, 55.065334377000) + ): self.assertAlmostEqual(res, ref) - iso_countries = ['ESP'] + iso_countries = ["ESP"] res = u_coord.get_land_geometry(country_names=iso_countries, resolution=10) self.assertIsInstance(res, shapely.geometry.multipolygon.MultiPolygon) - for res, ref in zip(res.bounds, (-18.16722571499986, 27.642238674000, - 4.337087436000, 43.793443101)): + for res, ref in zip( + res.bounds, + (-18.16722571499986, 27.642238674000, 4.337087436000, 43.793443101), + ): self.assertAlmostEqual(res, ref) - iso_countries = ['FRA'] + iso_countries = ["FRA"] res = u_coord.get_land_geometry(country_names=iso_countries, resolution=10) self.assertIsInstance(res, shapely.geometry.multipolygon.MultiPolygon) - for res, ref in zip(res.bounds, (-61.79784094999991, -21.37078215899993, - 55.854502800000034, 51.08754088371883)): + for res, ref in zip( + res.bounds, + ( + -61.79784094999991, + -21.37078215899993, + 55.854502800000034, + 51.08754088371883, + ), + ): self.assertAlmostEqual(res, ref) def test_get_land_geometry_extent_pass(self): """get_land_geometry with selected countries.""" lat = np.array([28.203216, 28.555994, 28.860875]) lon = np.array([-16.567489, -18.554130, -9.532476]) - res = u_coord.get_land_geometry(extent=(np.min(lon), np.max(lon), - np.min(lat), np.max(lat)), resolution=10) + res = u_coord.get_land_geometry( + extent=(np.min(lon), np.max(lon), np.min(lat), np.max(lat)), resolution=10 + ) self.assertIsInstance(res, shapely.geometry.multipolygon.MultiPolygon) self.assertAlmostEqual(res.bounds[0], -18.002186653) self.assertAlmostEqual(res.bounds[1], lat[0]) @@ -1012,7 +1370,9 @@ def test_get_land_geometry_all_pass(self): def test_on_land_pass(self): """check point on land with 1:50.000.000 resolution.""" - rows, cols, trans = u_coord.pts_to_raster_meta((-179.5, -60, 179.5, 60), (1, -1)) + rows, cols, trans = u_coord.pts_to_raster_meta( + (-179.5, -60, 179.5, 60), (1, -1) + ) xgrid, ygrid = u_coord.raster_to_meshgrid(trans, cols, rows) lat = np.concatenate([[28.203216, 28.555994, 28.860875], ygrid.ravel()]) lon = np.concatenate([[-16.567489, -18.554130, -9.532476], xgrid.ravel()]) @@ -1022,18 +1382,22 @@ def test_on_land_pass(self): def test_dist_to_coast_nasa(self): """Test point in coast and point not in coast""" - points = np.array([ - # Caribbean Sea: - [13.208333333333329, -59.625000000000014], - # South America: - [-12.497529, -58.849505], - # Very close to coast of Somalia: - [1.96475615, 45.23249055], - ]) + points = np.array( + [ + # Caribbean Sea: + [13.208333333333329, -59.625000000000014], + # South America: + [-12.497529, -58.849505], + # Very close to coast of Somalia: + [1.96475615, 45.23249055], + ] + ) dists = [-3000, -1393549.5, 48.77] dists_lowres = [729.1666667, 1393670.6973145, 945.73129294] # Warning: This will download more than 300 MB of data if not already present! - result = u_coord.dist_to_coast_nasa(points[:, 0], points[:, 1], highres=True, signed=True) + result = u_coord.dist_to_coast_nasa( + points[:, 0], points[:, 1], highres=True, signed=True + ) result_lowres = u_coord.dist_to_coast_nasa(points[:, 0], points[:, 1]) np.testing.assert_array_almost_equal(dists, result) np.testing.assert_array_almost_equal(dists_lowres, result_lowres) @@ -1042,49 +1406,46 @@ def test_get_country_geometries_country_pass(self): """get_country_geometries with selected countries. issues with the natural earth data should be caught by test_get_land_geometry_* since it's very similar""" - iso_countries = ['NLD', 'VNM'] + iso_countries = ["NLD", "VNM"] res = u_coord.get_country_geometries(iso_countries, resolution=110) self.assertIsInstance(res, gpd.geodataframe.GeoDataFrame) self.assertEqual(res.shape[0], 2) def test_get_country_geometries_country_norway_pass(self): """test correct numeric ISO3 for country Norway""" - iso_countries = 'NOR' + iso_countries = "NOR" extent = [10, 11, 55, 60] res1 = u_coord.get_country_geometries(iso_countries) res2 = u_coord.get_country_geometries(extent=extent) - self.assertEqual(res1.ISO_N3.values[0], '578') - self.assertIn('578', res2.ISO_N3.values) - self.assertIn('NOR', res2.ISO_A3.values) - self.assertIn('Denmark', res2.NAME.values) - self.assertIn('Norway', res2.NAME.values) - self.assertNotIn('Sweden', res2.NAME.values) + self.assertEqual(res1.ISO_N3.values[0], "578") + self.assertIn("578", res2.ISO_N3.values) + self.assertIn("NOR", res2.ISO_A3.values) + self.assertIn("Denmark", res2.NAME.values) + self.assertIn("Norway", res2.NAME.values) + self.assertNotIn("Sweden", res2.NAME.values) def test_get_country_geometries_extent_pass(self): """get_country_geometries by selecting by extent""" lat = np.array([28.203216, 28.555994, 28.860875]) lon = np.array([-16.567489, -18.554130, -9.532476]) - res = u_coord.get_country_geometries(extent=( - np.min(lon), np.max(lon), - np.min(lat), np.max(lat) - )) + res = u_coord.get_country_geometries( + extent=(np.min(lon), np.max(lon), np.min(lat), np.max(lat)) + ) self.assertIsInstance(res, gpd.geodataframe.GeoDataFrame) + self.assertTrue(np.allclose(res.bounds.iloc[1, 1], lat[0])) self.assertTrue( - np.allclose(res.bounds.iloc[1, 1], lat[0]) + np.allclose(res.bounds.iloc[0, 0], -11.800084333105298) + or np.allclose(res.bounds.iloc[1, 0], -11.800084333105298) ) self.assertTrue( - np.allclose(res.bounds.iloc[0, 0], -11.800084333105298) or - np.allclose(res.bounds.iloc[1, 0], -11.800084333105298) + np.allclose(res.bounds.iloc[0, 2], np.max(lon)) + or np.allclose(res.bounds.iloc[1, 2], np.max(lon)) ) self.assertTrue( - np.allclose(res.bounds.iloc[0, 2], np.max(lon)) or - np.allclose(res.bounds.iloc[1, 2], np.max(lon)) - ) - self.assertTrue( - np.allclose(res.bounds.iloc[0, 3], np.max(lat)) or - np.allclose(res.bounds.iloc[1, 3], np.max(lat)) + np.allclose(res.bounds.iloc[0, 3], np.max(lat)) + or np.allclose(res.bounds.iloc[1, 3], np.max(lat)) ) def test_get_country_geometries_all_pass(self): @@ -1097,23 +1458,45 @@ def test_get_country_geometries_all_pass(self): def test_get_country_geometries_fail(self): """get_country_geometries with offensive parameters""" with self.assertRaises(ValueError) as cm: - u_coord.get_country_geometries(extent=(-20,350,0,0)) - self.assertIn("longitude extent range is greater than 360: -20 to 350", - str(cm.exception)) + u_coord.get_country_geometries(extent=(-20, 350, 0, 0)) + self.assertIn( + "longitude extent range is greater than 360: -20 to 350", str(cm.exception) + ) with self.assertRaises(ValueError) as cm: - u_coord.get_country_geometries(extent=(350,-20,0,0)) - self.assertIn("longitude extent at the left (350) is larger " - "than longitude extent at the right (-20)", - str(cm.exception)) + u_coord.get_country_geometries(extent=(350, -20, 0, 0)) + self.assertIn( + "longitude extent at the left (350) is larger " + "than longitude extent at the right (-20)", + str(cm.exception), + ) def test_country_code_pass(self): """Test set_region_id""" - lon = np.array([-59.6250000000000, -59.6250000000000, -59.6250000000000, - -59.5416666666667, -59.5416666666667, -59.4583333333333, - -60.2083333333333, -60.2083333333333]) - lat = np.array([13.125, 13.20833333, 13.29166667, 13.125, 13.20833333, - 13.125, 12.625, 12.70833333]) + lon = np.array( + [ + -59.6250000000000, + -59.6250000000000, + -59.6250000000000, + -59.5416666666667, + -59.5416666666667, + -59.4583333333333, + -60.2083333333333, + -60.2083333333333, + ] + ) + lat = np.array( + [ + 13.125, + 13.20833333, + 13.29166667, + 13.125, + 13.20833333, + 13.125, + 12.625, + 12.70833333, + ] + ) for gridded in [True, False]: region_id = u_coord.get_country_code(lat, lon, gridded=gridded) region_id_OSLO = u_coord.get_country_code(59.91, 10.75, gridded=gridded) @@ -1125,50 +1508,63 @@ def test_country_code_pass(self): def test_all_points_on_sea(self): """Test country codes for unassignable coordinates (i.e., on sea)""" - lon = [-24.1 , -24.32634711, -24.55751498, -24.79698392] - lat = [87.3 , 87.23261237, 87.14440587, 87.04121094] + lon = [-24.1, -24.32634711, -24.55751498, -24.79698392] + lat = [87.3, 87.23261237, 87.14440587, 87.04121094] for gridded in [True, False]: country_codes = u_coord.get_country_code(lat, lon, gridded=gridded) self.assertTrue(np.all(country_codes == np.array([0, 0, 0, 0]))) def test_get_admin1_info_pass(self): """test get_admin1_info()""" - country_names = ['CHE', 'Indonesia', '840', 51] - admin1_info, admin1_shapes = u_coord.get_admin1_info(country_names=country_names) + country_names = ["CHE", "Indonesia", "840", 51] + admin1_info, admin1_shapes = u_coord.get_admin1_info( + country_names=country_names + ) self.assertEqual(len(admin1_info), 4) - self.assertListEqual(list(admin1_info.keys()), ['CHE', 'IDN', 'USA', 'ARM']) - self.assertEqual(len(admin1_info['CHE']), len(admin1_shapes['CHE'])) - self.assertEqual(len(admin1_info['CHE']), 26) - self.assertEqual(len(admin1_shapes['IDN']), 33) - self.assertEqual(len(admin1_info['USA']), 51) + self.assertListEqual(list(admin1_info.keys()), ["CHE", "IDN", "USA", "ARM"]) + self.assertEqual(len(admin1_info["CHE"]), len(admin1_shapes["CHE"])) + self.assertEqual(len(admin1_info["CHE"]), 26) + self.assertEqual(len(admin1_shapes["IDN"]), 33) + self.assertEqual(len(admin1_info["USA"]), 51) # depending on the version of Natural Earth, this is Washington or Idaho: - self.assertIn(admin1_info['USA'][1]['iso_3166_2'], ['US-WA', 'US-ID']) + self.assertIn(admin1_info["USA"][1]["iso_3166_2"], ["US-WA", "US-ID"]) def test_get_admin1_geometries_pass(self): """test get_admin1_geometries""" - countries = ['CHE', 'Indonesia', '840', 51] + countries = ["CHE", "Indonesia", "840", 51] gdf = u_coord.get_admin1_geometries(countries=countries) self.assertIsInstance(gdf, gpd.GeoDataFrame) - self.assertEqual(len(gdf.iso_3a.unique()), 4) # 4 countries - self.assertEqual(gdf.loc[gdf.iso_3a=='CHE'].shape[0], 26) # 26 cantons in CHE - self.assertEqual(gdf.shape[0], 121) # 121 admin 1 regions in the 4 countries - self.assertIn('ARM', gdf['iso_3a'].values) # Armenia (region_id 051) - self.assertIn('756', gdf['iso_3n'].values) # Switzerland (region_id 756) - self.assertIn('CH-AI', gdf['iso_3166_2'].values) # canton in CHE - self.assertIn('Sulawesi Tengah', gdf['admin1_name'].values) # region in Indonesia - self.assertIsInstance(gdf.loc[gdf['iso_3166_2'] == 'CH-AI'].geometry.values[0], - shapely.geometry.MultiPolygon) - self.assertIsInstance(gdf.loc[gdf.admin1_name == 'Sulawesi Tengah'].geometry.values[0], - shapely.geometry.MultiPolygon) - self.assertIsInstance(gdf.loc[gdf.admin1_name == 'Valais'].geometry.values[0], - shapely.geometry.Polygon) + self.assertEqual(len(gdf.iso_3a.unique()), 4) # 4 countries + self.assertEqual(gdf.loc[gdf.iso_3a == "CHE"].shape[0], 26) # 26 cantons in CHE + self.assertEqual(gdf.shape[0], 121) # 121 admin 1 regions in the 4 countries + self.assertIn("ARM", gdf["iso_3a"].values) # Armenia (region_id 051) + self.assertIn("756", gdf["iso_3n"].values) # Switzerland (region_id 756) + self.assertIn("CH-AI", gdf["iso_3166_2"].values) # canton in CHE + self.assertIn( + "Sulawesi Tengah", gdf["admin1_name"].values + ) # region in Indonesia + self.assertIsInstance( + gdf.loc[gdf["iso_3166_2"] == "CH-AI"].geometry.values[0], + shapely.geometry.MultiPolygon, + ) + self.assertIsInstance( + gdf.loc[gdf.admin1_name == "Sulawesi Tengah"].geometry.values[0], + shapely.geometry.MultiPolygon, + ) + self.assertIsInstance( + gdf.loc[gdf.admin1_name == "Valais"].geometry.values[0], + shapely.geometry.Polygon, + ) def test_get_admin1_geometries_fail(self): """test get_admin1_geometries wrong input""" # non existing country: self.assertRaises(LookupError, u_coord.get_admin1_geometries, ["FantasyLand"]) # wrong variable type for 'countries', e.g. Polygon: - self.assertRaises(TypeError, u_coord.get_admin1_geometries, shapely.geometry.Polygon()) + self.assertRaises( + TypeError, u_coord.get_admin1_geometries, shapely.geometry.Polygon() + ) + class TestRasterMeta(unittest.TestCase): def test_is_regular_pass(self): @@ -1203,20 +1599,26 @@ def test_is_regular_pass(self): self.assertEqual(hei, 2) self.assertEqual(wid, 2) - grid_x, grid_y = np.mgrid[10: 100: complex(0, 5), - 0: 10: complex(0, 5)] - grid_x = grid_x.reshape(-1,) - grid_y = grid_y.reshape(-1,) + grid_x, grid_y = np.mgrid[10 : 100 : complex(0, 5), 0 : 10 : complex(0, 5)] + grid_x = grid_x.reshape( + -1, + ) + grid_y = grid_y.reshape( + -1, + ) coord = np.array([grid_x, grid_y]).transpose() reg, hei, wid = u_coord.grid_is_regular(coord) self.assertTrue(reg) self.assertEqual(hei, 5) self.assertEqual(wid, 5) - grid_x, grid_y = np.mgrid[10: 100: complex(0, 4), - 0: 10: complex(0, 5)] - grid_x = grid_x.reshape(-1,) - grid_y = grid_y.reshape(-1,) + grid_x, grid_y = np.mgrid[10 : 100 : complex(0, 4), 0 : 10 : complex(0, 5)] + grid_x = grid_x.reshape( + -1, + ) + grid_y = grid_y.reshape( + -1, + ) coord = np.array([grid_x, grid_y]).transpose() reg, hei, wid = u_coord.grid_is_regular(coord) self.assertTrue(reg) @@ -1225,14 +1627,38 @@ def test_is_regular_pass(self): def test_get_resolution_pass(self): """Test _get_resolution method""" - lat = np.array([13.125, 13.20833333, 13.29166667, 13.125, - 13.20833333, 13.125, 12.625, 12.70833333, - 12.79166667, 12.875, 12.95833333, 13.04166667]) - lon = np.array([ - -59.6250000000000, -59.6250000000000, -59.6250000000000, -59.5416666666667, - -59.5416666666667, -59.4583333333333, -60.2083333333333, -60.2083333333333, - -60.2083333333333, -60.2083333333333, -60.2083333333333, -60.2083333333333 - ]) + lat = np.array( + [ + 13.125, + 13.20833333, + 13.29166667, + 13.125, + 13.20833333, + 13.125, + 12.625, + 12.70833333, + 12.79166667, + 12.875, + 12.95833333, + 13.04166667, + ] + ) + lon = np.array( + [ + -59.6250000000000, + -59.6250000000000, + -59.6250000000000, + -59.5416666666667, + -59.5416666666667, + -59.4583333333333, + -60.2083333333333, + -60.2083333333333, + -60.2083333333333, + -60.2083333333333, + -60.2083333333333, + -60.2083333333333, + ] + ) res_lat, res_lon = u_coord.get_resolution(lat, lon) self.assertAlmostEqual(res_lat, 0.0833333333333) self.assertAlmostEqual(res_lon, 0.0833333333333) @@ -1276,179 +1702,212 @@ def test_points_to_raster_pass(self): df_val = gpd.GeoDataFrame() x, y = np.meshgrid(np.linspace(0, 2, 5), np.linspace(40, 50, 10)) - df_val['latitude'] = y.flatten() - df_val['longitude'] = x.flatten() - df_val['value'] = np.ones(len(df_val)) * 10 - crs = 'epsg:2202' - _raster, meta = u_coord.points_to_raster(df_val, val_names=['value'], crs=crs, - scheduler=scheduler) - self.assertFalse(hasattr(df_val, "crs")) # points_to_raster must not modify df_val - self.assertTrue(u_coord.equal_crs(meta['crs'], crs)) - self.assertAlmostEqual(meta['transform'][0], 0.5) - self.assertAlmostEqual(meta['transform'][1], 0) - self.assertAlmostEqual(meta['transform'][2], -0.25) - self.assertAlmostEqual(meta['transform'][3], 0) - self.assertAlmostEqual(meta['transform'][4], -0.5) - self.assertAlmostEqual(meta['transform'][5], 50.25) - self.assertEqual(meta['height'], 21) - self.assertEqual(meta['width'], 5) + df_val["latitude"] = y.flatten() + df_val["longitude"] = x.flatten() + df_val["value"] = np.ones(len(df_val)) * 10 + crs = "epsg:2202" + _raster, meta = u_coord.points_to_raster( + df_val, val_names=["value"], crs=crs, scheduler=scheduler + ) + self.assertFalse( + hasattr(df_val, "crs") + ) # points_to_raster must not modify df_val + self.assertTrue(u_coord.equal_crs(meta["crs"], crs)) + self.assertAlmostEqual(meta["transform"][0], 0.5) + self.assertAlmostEqual(meta["transform"][1], 0) + self.assertAlmostEqual(meta["transform"][2], -0.25) + self.assertAlmostEqual(meta["transform"][3], 0) + self.assertAlmostEqual(meta["transform"][4], -0.5) + self.assertAlmostEqual(meta["transform"][5], 50.25) + self.assertEqual(meta["height"], 21) + self.assertEqual(meta["width"], 5) # test for values crossing antimeridian df_val = gpd.GeoDataFrame() - df_val['latitude'] = [1, 0, 1, 0] - df_val['longitude'] = [178, -179.0, 181, -180] - df_val['value'] = np.arange(4) + df_val["latitude"] = [1, 0, 1, 0] + df_val["longitude"] = [178, -179.0, 181, -180] + df_val["value"] = np.arange(4) r_data, meta = u_coord.points_to_raster( - df_val, val_names=['value'], res=0.5, raster_res=1.0, scheduler=scheduler) - self.assertTrue(u_coord.equal_crs(meta['crs'], DEF_CRS)) - self.assertAlmostEqual(meta['transform'][0], 1.0) - self.assertAlmostEqual(meta['transform'][1], 0) - self.assertAlmostEqual(meta['transform'][2], 177.5) - self.assertAlmostEqual(meta['transform'][3], 0) - self.assertAlmostEqual(meta['transform'][4], -1.0) - self.assertAlmostEqual(meta['transform'][5], 1.5) - self.assertEqual(meta['height'], 2) - self.assertEqual(meta['width'], 4) + df_val, + val_names=["value"], + res=0.5, + raster_res=1.0, + scheduler=scheduler, + ) + self.assertTrue(u_coord.equal_crs(meta["crs"], DEF_CRS)) + self.assertAlmostEqual(meta["transform"][0], 1.0) + self.assertAlmostEqual(meta["transform"][1], 0) + self.assertAlmostEqual(meta["transform"][2], 177.5) + self.assertAlmostEqual(meta["transform"][3], 0) + self.assertAlmostEqual(meta["transform"][4], -1.0) + self.assertAlmostEqual(meta["transform"][5], 1.5) + self.assertEqual(meta["height"], 2) + self.assertEqual(meta["width"], 4) np.testing.assert_array_equal(r_data[0], [[0, 0, 0, 2], [0, 0, 3, 1]]) + class TestRasterIO(unittest.TestCase): def test_write_raster_pass(self): """Test write_raster function.""" test_file = Path(DATA_DIR, "test_write_raster.tif") data = np.arange(24).reshape(6, 4).astype(np.float32) meta = { - 'transform': Affine(0.1, 0, 0, 0, 1, 0), - 'width': data.shape[1], - 'height': data.shape[0], - 'crs': 'epsg:2202', - 'compress': 'deflate', + "transform": Affine(0.1, 0, 0, 0, 1, 0), + "width": data.shape[1], + "height": data.shape[0], + "crs": "epsg:2202", + "compress": "deflate", } u_coord.write_raster(test_file, data, meta) read_meta, read_data = u_coord.read_raster(test_file) - self.assertEqual(read_meta['transform'], meta['transform']) - self.assertEqual(read_meta['width'], meta['width']) - self.assertEqual(read_meta['height'], meta['height']) - self.assertTrue(u_coord.equal_crs(read_meta['crs'], meta['crs'])) + self.assertEqual(read_meta["transform"], meta["transform"]) + self.assertEqual(read_meta["width"], meta["width"]) + self.assertEqual(read_meta["height"], meta["height"]) + self.assertTrue(u_coord.equal_crs(read_meta["crs"], meta["crs"])) self.assertEqual(read_data.shape, (1, np.prod(data.shape))) np.testing.assert_array_equal(read_data, data.reshape(read_data.shape)) def test_window_raster_pass(self): """Test window""" - meta, inten_ras = u_coord.read_raster(HAZ_DEMO_FL, window=Window(10, 20, 50.1, 60)) - self.assertAlmostEqual(meta['crs'], DEF_CRS) - self.assertAlmostEqual(meta['transform'].c, -69.2471495969998) - self.assertAlmostEqual(meta['transform'].a, 0.009000000000000341) - self.assertAlmostEqual(meta['transform'].b, 0.0) - self.assertAlmostEqual(meta['transform'].f, 10.248220966978932) - self.assertAlmostEqual(meta['transform'].d, 0.0) - self.assertAlmostEqual(meta['transform'].e, -0.009000000000000341) - self.assertEqual(meta['height'], 60) - self.assertEqual(meta['width'], 50) + meta, inten_ras = u_coord.read_raster( + HAZ_DEMO_FL, window=Window(10, 20, 50.1, 60) + ) + self.assertAlmostEqual(meta["crs"], DEF_CRS) + self.assertAlmostEqual(meta["transform"].c, -69.2471495969998) + self.assertAlmostEqual(meta["transform"].a, 0.009000000000000341) + self.assertAlmostEqual(meta["transform"].b, 0.0) + self.assertAlmostEqual(meta["transform"].f, 10.248220966978932) + self.assertAlmostEqual(meta["transform"].d, 0.0) + self.assertAlmostEqual(meta["transform"].e, -0.009000000000000341) + self.assertEqual(meta["height"], 60) + self.assertEqual(meta["width"], 50) self.assertEqual(inten_ras.shape, (1, 60 * 50)) self.assertAlmostEqual(inten_ras.reshape((60, 50))[25, 12], 0.056825936) def test_poly_raster_pass(self): """Test geometry""" - poly = box(-69.2471495969998, 9.708220966978912, -68.79714959699979, 10.248220966978932) + poly = box( + -69.2471495969998, 9.708220966978912, -68.79714959699979, 10.248220966978932 + ) meta, inten_ras = u_coord.read_raster(HAZ_DEMO_FL, geometry=[poly]) - self.assertAlmostEqual(meta['crs'], DEF_CRS) - self.assertAlmostEqual(meta['transform'].c, -69.2471495969998) - self.assertAlmostEqual(meta['transform'].a, 0.009000000000000341) - self.assertAlmostEqual(meta['transform'].b, 0.0) - self.assertAlmostEqual(meta['transform'].f, 10.248220966978932) - self.assertAlmostEqual(meta['transform'].d, 0.0) - self.assertAlmostEqual(meta['transform'].e, -0.009000000000000341) - self.assertEqual(meta['height'], 60) - self.assertEqual(meta['width'], 50) + self.assertAlmostEqual(meta["crs"], DEF_CRS) + self.assertAlmostEqual(meta["transform"].c, -69.2471495969998) + self.assertAlmostEqual(meta["transform"].a, 0.009000000000000341) + self.assertAlmostEqual(meta["transform"].b, 0.0) + self.assertAlmostEqual(meta["transform"].f, 10.248220966978932) + self.assertAlmostEqual(meta["transform"].d, 0.0) + self.assertAlmostEqual(meta["transform"].e, -0.009000000000000341) + self.assertEqual(meta["height"], 60) + self.assertEqual(meta["width"], 50) self.assertEqual(inten_ras.shape, (1, 60 * 50)) def test_crs_raster_pass(self): """Test change projection""" meta, inten_ras = u_coord.read_raster( - HAZ_DEMO_FL, dst_crs='epsg:2202', resampling=Resampling.nearest) - self.assertAlmostEqual(meta['crs'], 'epsg:2202') - self.assertAlmostEqual(meta['transform'].c, 462486.8490210658) - self.assertAlmostEqual(meta['transform'].a, 998.576177833903) - self.assertAlmostEqual(meta['transform'].b, 0.0) - self.assertAlmostEqual(meta['transform'].f, 1164831.4772731226) - self.assertAlmostEqual(meta['transform'].d, 0.0) - self.assertAlmostEqual(meta['transform'].e, -998.576177833903) - self.assertEqual(meta['height'], 1081) - self.assertEqual(meta['width'], 968) + HAZ_DEMO_FL, dst_crs="epsg:2202", resampling=Resampling.nearest + ) + self.assertAlmostEqual(meta["crs"], "epsg:2202") + self.assertAlmostEqual(meta["transform"].c, 462486.8490210658) + self.assertAlmostEqual(meta["transform"].a, 998.576177833903) + self.assertAlmostEqual(meta["transform"].b, 0.0) + self.assertAlmostEqual(meta["transform"].f, 1164831.4772731226) + self.assertAlmostEqual(meta["transform"].d, 0.0) + self.assertAlmostEqual(meta["transform"].e, -998.576177833903) + self.assertEqual(meta["height"], 1081) + self.assertEqual(meta["width"], 968) self.assertEqual(inten_ras.shape, (1, 1081 * 968)) # TODO: NOT RESAMPLING WELL in this case!? self.assertAlmostEqual(inten_ras.reshape((1081, 968))[45, 22], 0) def test_crs_and_geometry_raster_pass(self): """Test change projection and crop to geometry""" - ply = shapely.geometry.Polygon([ - (478080.8562247154, 1105419.13439131), - (478087.5912452241, 1116475.583523723), - (500000, 1116468.876713805), - (500000, 1105412.49126517), - (478080.8562247154, 1105419.13439131) - ]) + ply = shapely.geometry.Polygon( + [ + (478080.8562247154, 1105419.13439131), + (478087.5912452241, 1116475.583523723), + (500000, 1116468.876713805), + (500000, 1105412.49126517), + (478080.8562247154, 1105419.13439131), + ] + ) meta, inten_ras = u_coord.read_raster( - HAZ_DEMO_FL, dst_crs='epsg:2202', geometry=[ply], - resampling=Resampling.nearest) - self.assertAlmostEqual(meta['crs'], 'epsg:2202') - self.assertEqual(meta['height'], 12) - self.assertEqual(meta['width'], 23) + HAZ_DEMO_FL, + dst_crs="epsg:2202", + geometry=[ply], + resampling=Resampling.nearest, + ) + self.assertAlmostEqual(meta["crs"], "epsg:2202") + self.assertEqual(meta["height"], 12) + self.assertEqual(meta["width"], 23) self.assertEqual(inten_ras.shape, (1, 12 * 23)) # TODO: NOT RESAMPLING WELL in this case!? self.assertAlmostEqual(inten_ras.reshape((12, 23))[11, 12], 0.10063865780830383) def test_transform_raster_pass(self): - transform = Affine(0.009000000000000341, 0.0, -69.33714959699981, - 0.0, -0.009000000000000341, 10.42822096697894) + transform = Affine( + 0.009000000000000341, + 0.0, + -69.33714959699981, + 0.0, + -0.009000000000000341, + 10.42822096697894, + ) meta, inten_ras = u_coord.read_raster( - HAZ_DEMO_FL, transform=transform, height=500, width=501) + HAZ_DEMO_FL, transform=transform, height=500, width=501 + ) - left = meta['transform'].xoff - top = meta['transform'].yoff - bottom = top + meta['transform'][4] * meta['height'] - right = left + meta['transform'][0] * meta['width'] + left = meta["transform"].xoff + top = meta["transform"].yoff + bottom = top + meta["transform"][4] * meta["height"] + right = left + meta["transform"][0] * meta["width"] self.assertAlmostEqual(left, -69.33714959699981) self.assertAlmostEqual(bottom, 5.928220966978939) self.assertAlmostEqual(right, -64.82814959699981) self.assertAlmostEqual(top, 10.42822096697894) - self.assertEqual(meta['width'], 501) - self.assertEqual(meta['height'], 500) - self.assertTrue(u_coord.equal_crs(meta['crs'].to_epsg(), 4326)) + self.assertEqual(meta["width"], 501) + self.assertEqual(meta["height"], 500) + self.assertTrue(u_coord.equal_crs(meta["crs"].to_epsg(), 4326)) self.assertEqual(inten_ras.shape, (1, 500 * 501)) - meta, inten_all = u_coord.read_raster(HAZ_DEMO_FL, window=Window(0, 0, 501, 500)) + meta, inten_all = u_coord.read_raster( + HAZ_DEMO_FL, window=Window(0, 0, 501, 500) + ) self.assertTrue(np.array_equal(inten_all, inten_ras)) def test_sample_raster(self): """Test sampling points from raster file""" val_1, val_2, fill_value = 0.056825936, 0.10389626, -999 - i_j_vals = np.array([ - [44, 21, 0], - [44, 22, 0], - [44, 23, 0], - [45, 21, 0], - [45, 22, val_1], - [45, 23, val_2], - [46, 21, 0], - [46, 22, 0], - [46, 23, 0], - [45, 22.2, 0.8 * val_1 + 0.2 * val_2], - [45.3, 21.4, 0.7 * 0.4 * val_1], - [-20, 0, fill_value], - ]) + i_j_vals = np.array( + [ + [44, 21, 0], + [44, 22, 0], + [44, 23, 0], + [45, 21, 0], + [45, 22, val_1], + [45, 23, val_2], + [46, 21, 0], + [46, 22, 0], + [46, 23, 0], + [45, 22.2, 0.8 * val_1 + 0.2 * val_2], + [45.3, 21.4, 0.7 * 0.4 * val_1], + [-20, 0, fill_value], + ] + ) res = 0.009000000000000341 lat = 10.42822096697894 - res / 2 - i_j_vals[:, 0] * res lon = -69.33714959699981 + res / 2 + i_j_vals[:, 1] * res - values = u_coord.read_raster_sample(HAZ_DEMO_FL, lat, lon, fill_value=fill_value) + values = u_coord.read_raster_sample( + HAZ_DEMO_FL, lat, lon, fill_value=fill_value + ) self.assertEqual(values.size, lat.size) for i, val in enumerate(i_j_vals[:, 2]): self.assertAlmostEqual(values[i], val) # with explicit intermediate resolution values = u_coord.read_raster_sample( - HAZ_DEMO_FL, lat, lon, fill_value=fill_value, intermediate_res=res) + HAZ_DEMO_FL, lat, lon, fill_value=fill_value, intermediate_res=res + ) self.assertEqual(values.size, lat.size) for i, val in enumerate(i_j_vals[:, 2]): self.assertAlmostEqual(values[i], val) @@ -1469,10 +1928,13 @@ def test_sample_raster(self): self.assertEqual(z_both.size, lat_both.size) self.assertEqual(z_both_neg.size, lat_both.size) - np.testing.assert_array_almost_equal(z_left, z_both[:z_left.size], ) - np.testing.assert_array_almost_equal(z_right, z_both[-z_right.size:]) - np.testing.assert_array_almost_equal(z_left, z_both_neg[:z_left.size]) - np.testing.assert_array_almost_equal(z_right, z_both_neg[-z_right.size:]) + np.testing.assert_array_almost_equal( + z_left, + z_both[: z_left.size], + ) + np.testing.assert_array_almost_equal(z_right, z_both[-z_right.size :]) + np.testing.assert_array_almost_equal(z_left, z_both_neg[: z_left.size]) + np.testing.assert_array_almost_equal(z_right, z_both_neg[-z_right.size :]) def test_sample_raster_gradient(self): """Test sampling gradients from a raster file""" @@ -1500,10 +1962,12 @@ def test_sample_raster_gradient(self): def test_refine_raster(self): """Test refinement of given raster data""" - data = np.array([ - [0.25, 0.75], - [0.5, 1], - ]) + data = np.array( + [ + [0.25, 0.75], + [0.5, 1], + ] + ) transform = Affine(0.5, 0, 0, 0, 0.5, 0) new_res = 0.1 new_data, new_transform = u_coord.refine_raster_data(data, transform, new_res) @@ -1524,7 +1988,8 @@ def test_bounded_refined_raster(self): res = 0.004 global_origin = (-180, 90) z, transform = u_coord.read_raster_bounds( - HAZ_DEMO_FL, bounds, res=res, global_origin=global_origin) + HAZ_DEMO_FL, bounds, res=res, global_origin=global_origin + ) # the first dimension corresponds to the raster bands: self.assertEqual(z.shape[0], 1) @@ -1548,13 +2013,17 @@ def test_bounded_refined_raster(self): # check along x-axis self.assertLessEqual(transform[2] + 0.5 * transform[0], bounds[0]) self.assertGreater(transform[2] + 1.5 * transform[0], bounds[0]) - self.assertGreaterEqual(transform[2] + (z.shape[1] - 0.5) * transform[0], bounds[2]) + self.assertGreaterEqual( + transform[2] + (z.shape[1] - 0.5) * transform[0], bounds[2] + ) self.assertLess(transform[2] + (z.shape[1] - 1.5) * transform[0], bounds[2]) # check along y-axis (note that the orientation is reversed) self.assertGreaterEqual(transform[5] + 0.5 * transform[4], bounds[3]) self.assertLess(transform[5] + 1.5 * transform[4], bounds[3]) - self.assertLessEqual(transform[5] + (z.shape[0] - 0.5) * transform[4], bounds[1]) + self.assertLessEqual( + transform[5] + (z.shape[0] - 0.5) * transform[4], bounds[1] + ) self.assertGreater(transform[5] + (z.shape[0] - 1.5) * transform[4], bounds[1]) # trigger downloading of dist-to-coast dataset (if not already present) @@ -1562,27 +2031,39 @@ def test_bounded_refined_raster(self): # make sure the buffering doesn't go beyond ±90 degrees latitude: z, transform = u_coord.read_raster_bounds( - path, (0, -90, 10, -80), res=1.0, global_origin=(-180, 90)) + path, (0, -90, 10, -80), res=1.0, global_origin=(-180, 90) + ) self.assertEqual(z.shape, (1, 11, 12)) self.assertEqual(transform[5], -79.0) z, transform = u_coord.read_raster_bounds( - path, (0, 80, 10, 90), res=1.0, global_origin=(-180, 90)) + path, (0, 80, 10, 90), res=1.0, global_origin=(-180, 90) + ) self.assertEqual(z.shape, (1, 11, 12)) self.assertEqual(transform[5], 90.0) # make sure crossing the antimeridian works fine: z_right, transform = u_coord.read_raster_bounds( - path, (-175, 0, -170, 10), res=1.0, global_origin=(-180, 90)) + path, (-175, 0, -170, 10), res=1.0, global_origin=(-180, 90) + ) z_left, transform = u_coord.read_raster_bounds( - path, (170, 0, 175, 10), res=1.0, global_origin=(-180, 90)) + path, (170, 0, 175, 10), res=1.0, global_origin=(-180, 90) + ) z_both, transform = u_coord.read_raster_bounds( - path, (170, 0, 190, 10), res=1.0, global_origin=(-180, 90)) + path, (170, 0, 190, 10), res=1.0, global_origin=(-180, 90) + ) z_both_neg, transform = u_coord.read_raster_bounds( - path, (-190, 0, -170, 10), res=1.0, global_origin=(-180, 90)) - np.testing.assert_array_equal(z_left[0,:,:], z_both[0,:,:z_left.shape[2]]) - np.testing.assert_array_equal(z_right[0,:,:], z_both[0,:,-z_right.shape[2]:]) - np.testing.assert_array_equal(z_left[0,:,:], z_both_neg[0,:,:z_left.shape[2]]) - np.testing.assert_array_equal(z_right[0,:,:], z_both_neg[0,:,-z_right.shape[2]:]) + path, (-190, 0, -170, 10), res=1.0, global_origin=(-180, 90) + ) + np.testing.assert_array_equal(z_left[0, :, :], z_both[0, :, : z_left.shape[2]]) + np.testing.assert_array_equal( + z_right[0, :, :], z_both[0, :, -z_right.shape[2] :] + ) + np.testing.assert_array_equal( + z_left[0, :, :], z_both_neg[0, :, : z_left.shape[2]] + ) + np.testing.assert_array_equal( + z_right[0, :, :], z_both_neg[0, :, -z_right.shape[2] :] + ) def test_subraster_from_bounds(self): """test subraster_from_bounds function""" @@ -1601,24 +2082,31 @@ def test_subraster_from_bounds(self): # test for more complicated input data: _, meta_list = data_arrays_resampling_demo() i = 2 - dst_resolution = (1., .2) + dst_resolution = (1.0, 0.2) bounds = rasterio.transform.array_bounds( - meta_list[i]['height'], meta_list[i]['width'], meta_list[i]['transform']) + meta_list[i]["height"], meta_list[i]["width"], meta_list[i]["transform"] + ) transform = rasterio.transform.from_origin(-180, 90, *dst_resolution) dst_transform, dst_shape = u_coord.subraster_from_bounds(transform, bounds) - self.assertEqual(dst_shape, (meta_list[0]['height'] / dst_resolution[1], - meta_list[0]['width'] / dst_resolution[0])) + self.assertEqual( + dst_shape, + ( + meta_list[0]["height"] / dst_resolution[1], + meta_list[0]["width"] / dst_resolution[0], + ), + ) self.assertEqual(dst_resolution, (dst_transform[0], -dst_transform[4])) - self.assertEqual(meta_list[i]['transform'][1], dst_transform[1]) - self.assertEqual(meta_list[i]['transform'][2], dst_transform[2]) - self.assertEqual(meta_list[i]['transform'][3], dst_transform[3]) - self.assertEqual(meta_list[i]['transform'][5], dst_transform[5]) + self.assertEqual(meta_list[i]["transform"][1], dst_transform[1]) + self.assertEqual(meta_list[i]["transform"][2], dst_transform[2]) + self.assertEqual(meta_list[i]["transform"][3], dst_transform[3]) + self.assertEqual(meta_list[i]["transform"][5], dst_transform[5]) # test for odd resolution change: i = 0 - dst_resolution = (.15, .15) + dst_resolution = (0.15, 0.15) bounds = rasterio.transform.array_bounds( - meta_list[i]['height'], meta_list[i]['width'], meta_list[i]['transform']) + meta_list[i]["height"], meta_list[i]["width"], meta_list[i]["transform"] + ) transform = rasterio.transform.from_origin(-180, 90, *dst_resolution) dst_transform, dst_shape = u_coord.subraster_from_bounds(transform, bounds) self.assertEqual(dst_shape, (14, 20)) @@ -1633,18 +2121,26 @@ def test_align_raster_data_shift(self): i = 0 # dst j = 1 # src - dst_resolution=meta_list[i]['transform'][0] + dst_resolution = meta_list[i]["transform"][0] dst_bounds = rasterio.transform.array_bounds( - meta_list[i]['height'], meta_list[i]['width'], meta_list[i]['transform']) + meta_list[i]["height"], meta_list[i]["width"], meta_list[i]["transform"] + ) data_out, dst_transform = u_coord.align_raster_data( - data_in[j], meta_list[j]['crs'], meta_list[j]['transform'], - dst_crs=meta_list[i]['crs'], dst_resolution=dst_resolution, dst_bounds=dst_bounds, - resampling='bilinear') + data_in[j], + meta_list[j]["crs"], + meta_list[j]["transform"], + dst_crs=meta_list[i]["crs"], + dst_resolution=dst_resolution, + dst_bounds=dst_bounds, + resampling="bilinear", + ) # test northward shift of box: - np.testing.assert_array_equal(data_in[1][1,:], data_out[0,:]) - np.testing.assert_array_equal(np.array([0., 0., 0.], dtype='float32'), data_out[1,:]) - self.assertEqual(meta_list[i]['transform'][5], dst_transform[5]) + np.testing.assert_array_equal(data_in[1][1, :], data_out[0, :]) + np.testing.assert_array_equal( + np.array([0.0, 0.0, 0.0], dtype="float32"), data_out[1, :] + ) + self.assertEqual(meta_list[i]["transform"][5], dst_transform[5]) def test_align_raster_data_downsampling(self): """test function align_raster_data for downsampling""" @@ -1652,17 +2148,25 @@ def test_align_raster_data_downsampling(self): i = 0 # dst j = 2 # src - dst_resolution=meta_list[i]['transform'][0] + dst_resolution = meta_list[i]["transform"][0] dst_bounds = rasterio.transform.array_bounds( - meta_list[i]['height'], meta_list[i]['width'], meta_list[i]['transform']) + meta_list[i]["height"], meta_list[i]["width"], meta_list[i]["transform"] + ) data_out, dst_transform = u_coord.align_raster_data( - data_in[j], meta_list[j]['crs'], meta_list[j]['transform'], - dst_crs=meta_list[i]['crs'], dst_resolution=dst_resolution, dst_bounds=dst_bounds, - resampling='bilinear') + data_in[j], + meta_list[j]["crs"], + meta_list[j]["transform"], + dst_crs=meta_list[i]["crs"], + dst_resolution=dst_resolution, + dst_bounds=dst_bounds, + resampling="bilinear", + ) # test downsampled data: - reference_array = np.array([[5.0204080, 2.2678570, 0.12244898], - [1.1224489, 0.6785714, 0.73469390]], dtype='float32') + reference_array = np.array( + [[5.0204080, 2.2678570, 0.12244898], [1.1224489, 0.6785714, 0.73469390]], + dtype="float32", + ) np.testing.assert_array_almost_equal_nulp(reference_array, data_out) self.assertEqual(dst_resolution, dst_transform[0]) @@ -1672,24 +2176,37 @@ def test_align_raster_data_downsample_conserve(self): data_in, meta_list = data_arrays_resampling_demo() i = 0 # dst - dst_resolution=meta_list[i]['transform'][0] + dst_resolution = meta_list[i]["transform"][0] dst_bounds = rasterio.transform.array_bounds( - meta_list[i]['height'], meta_list[i]['width'], meta_list[i]['transform']) + meta_list[i]["height"], meta_list[i]["width"], meta_list[i]["transform"] + ) # test conserve sum: - for j, data in enumerate(data_in): # src + for j, data in enumerate(data_in): # src data_out, _ = u_coord.align_raster_data( - data_in[j], meta_list[j]['crs'], meta_list[j]['transform'], - dst_crs=meta_list[i]['crs'], dst_resolution=dst_resolution, dst_bounds=dst_bounds, - resampling='bilinear', conserve='sum') + data_in[j], + meta_list[j]["crs"], + meta_list[j]["transform"], + dst_crs=meta_list[i]["crs"], + dst_resolution=dst_resolution, + dst_bounds=dst_bounds, + resampling="bilinear", + conserve="sum", + ) self.assertAlmostEqual(data_in[j].sum(), data_out.sum(), places=4) # test conserve mean: for j, data in enumerate(data_in): data_out, _ = u_coord.align_raster_data( - data_in[j], meta_list[j]['crs'], meta_list[j]['transform'], - dst_crs=meta_list[i]['crs'], dst_resolution=dst_resolution, dst_bounds=dst_bounds, - resampling='bilinear', conserve='mean') + data_in[j], + meta_list[j]["crs"], + meta_list[j]["transform"], + dst_crs=meta_list[i]["crs"], + dst_resolution=dst_resolution, + dst_bounds=dst_bounds, + resampling="bilinear", + conserve="mean", + ) self.assertAlmostEqual(data_in[j].mean(), data_out.mean(), places=4) def test_align_raster_data_upsample(self): @@ -1698,25 +2215,38 @@ def test_align_raster_data_upsample(self): data_out = list() i = 2 # dst - dst_resolution = meta_list[i]['transform'][0] + dst_resolution = meta_list[i]["transform"][0] dst_bounds = rasterio.transform.array_bounds( - meta_list[i]['height'], meta_list[i]['width'], meta_list[i]['transform']) - for j in [0,1,2]: - data_out.append(u_coord.align_raster_data( - data_in[j], meta_list[j]['crs'], meta_list[j]['transform'], - dst_crs=meta_list[i]['crs'], dst_resolution=dst_resolution, - dst_bounds=dst_bounds, resampling='bilinear')[0]) + meta_list[i]["height"], meta_list[i]["width"], meta_list[i]["transform"] + ) + for j in [0, 1, 2]: + data_out.append( + u_coord.align_raster_data( + data_in[j], + meta_list[j]["crs"], + meta_list[j]["transform"], + dst_crs=meta_list[i]["crs"], + dst_resolution=dst_resolution, + dst_bounds=dst_bounds, + resampling="bilinear", + )[0] + ) # test reference data unchanged: np.testing.assert_array_equal(data_in[2], data_out[2]) # test northward shift: - np.testing.assert_array_equal(data_out[0][2,:], data_out[1][0,:]) - np.testing.assert_array_equal(data_out[0][3,:], data_out[1][1,:]) + np.testing.assert_array_equal(data_out[0][2, :], data_out[1][0, :]) + np.testing.assert_array_equal(data_out[0][3, :], data_out[1][1, :]) # test upsampled data: - reference_array = np.array([[0.00, 0.25, 0.75, 1.25, 1.75, 2.00], - [0.75, 1.00, 1.50, 2.00, 2.50, 2.75], - [2.25, 2.50, 3.00, 3.50, 4.00, 4.25], - [3.00, 3.25, 3.75, 4.25, 4.75, 5.00]], dtype='float32') + reference_array = np.array( + [ + [0.00, 0.25, 0.75, 1.25, 1.75, 2.00], + [0.75, 1.00, 1.50, 2.00, 2.50, 2.75], + [2.25, 2.50, 3.00, 3.50, 4.00, 4.25], + [3.00, 3.25, 3.75, 4.25, 4.75, 5.00], + ], + dtype="float32", + ) np.testing.assert_array_equal(reference_array, data_out[0]) def test_align_raster_data_odd_downsample(self): @@ -1727,15 +2257,22 @@ def test_align_raster_data_odd_downsample(self): dst_resolution = 1.7 dst_bounds = rasterio.transform.array_bounds( - meta_list[i]['height'], meta_list[i]['width'], meta_list[i]['transform']) + meta_list[i]["height"], meta_list[i]["width"], meta_list[i]["transform"] + ) data_out, dst_transform = u_coord.align_raster_data( - data_in[j], meta_list[j]['crs'], meta_list[j]['transform'], - dst_crs=meta_list[i]['crs'], dst_resolution=dst_resolution, dst_bounds=dst_bounds, - resampling='bilinear') + data_in[j], + meta_list[j]["crs"], + meta_list[j]["transform"], + dst_crs=meta_list[i]["crs"], + dst_resolution=dst_resolution, + dst_bounds=dst_bounds, + resampling="bilinear", + ) self.assertEqual(dst_resolution, dst_transform[0]) - reference_array = np.array([[0.425, 1.7631578], - [3.425, 4.763158 ]], dtype='float32') + reference_array = np.array( + [[0.425, 1.7631578], [3.425, 4.763158]], dtype="float32" + ) np.testing.assert_array_equal(reference_array, data_out) def test_mask_raster_with_geometry(self): @@ -1743,14 +2280,18 @@ def test_mask_raster_with_geometry(self): raster = np.ones((4, 3), dtype=np.float32) transform = rasterio.transform.Affine(1, 0, 5, 0, -1, -10) shapes = [shapely.geometry.box(6.1, -12.9, 6.9, -11.1)] - expected = np.array([ - [0, 0, 0], - [0, 1, 0], - [0, 1, 0], - [0, 0, 0], - ], dtype=np.float32) + expected = np.array( + [ + [0, 0, 0], + [0, 1, 0], + [0, 1, 0], + [0, 0, 0], + ], + dtype=np.float32, + ) np.testing.assert_array_equal( - u_coord.mask_raster_with_geometry(raster, transform, shapes), expected) + u_coord.mask_raster_with_geometry(raster, transform, shapes), expected + ) # Execute Tests diff --git a/climada/util/test/test_dates_times.py b/climada/util/test/test_dates_times.py index 2e1d019c4..3cc9d25bc 100644 --- a/climada/util/test/test_dates_times.py +++ b/climada/util/test/test_dates_times.py @@ -18,22 +18,28 @@ Test of dates_times module """ + import datetime as dt import unittest + import numpy as np import climada.util.dates_times as u_dt + class TestDateString(unittest.TestCase): """Test date functions""" + def test_date_to_str_pass(self): """Test _date_to_str function""" ordinal_date = dt.datetime.toordinal(dt.datetime(2018, 4, 6)) - self.assertEqual('2018-04-06', u_dt.date_to_str(ordinal_date)) + self.assertEqual("2018-04-06", u_dt.date_to_str(ordinal_date)) - ordinal_date = [dt.datetime.toordinal(dt.datetime(2018, 4, 6)), - dt.datetime.toordinal(dt.datetime(2019, 1, 1))] - self.assertEqual(['2018-04-06', '2019-01-01'], u_dt.date_to_str(ordinal_date)) + ordinal_date = [ + dt.datetime.toordinal(dt.datetime(2018, 4, 6)), + dt.datetime.toordinal(dt.datetime(2019, 1, 1)), + ] + self.assertEqual(["2018-04-06", "2019-01-01"], u_dt.date_to_str(ordinal_date)) def test_str_to_date_pass(self): """Test _date_to_str function""" @@ -43,36 +49,45 @@ def test_str_to_date_pass(self): date = [640000, 730000] self.assertEqual(u_dt.str_to_date(u_dt.date_to_str(date)), date) + class TestDateNumpy(unittest.TestCase): """Test date functions for numpy datetime64 type""" + def test_datetime64_to_ordinal(self): """Test _datetime64_to_ordinal""" - date = np.datetime64('1999-12-26T06:00:00.000000000') + date = np.datetime64("1999-12-26T06:00:00.000000000") ordinal = u_dt.datetime64_to_ordinal(date) - self.assertEqual(u_dt.date_to_str(ordinal), '1999-12-26') + self.assertEqual(u_dt.date_to_str(ordinal), "1999-12-26") - date = [np.datetime64('1999-12-26T06:00:00.000000000'), - np.datetime64('2000-12-26T06:00:00.000000000')] + date = [ + np.datetime64("1999-12-26T06:00:00.000000000"), + np.datetime64("2000-12-26T06:00:00.000000000"), + ] ordinal = u_dt.datetime64_to_ordinal(date) - self.assertEqual(u_dt.date_to_str(ordinal[0]), '1999-12-26') - self.assertEqual(u_dt.date_to_str(ordinal[1]), '2000-12-26') + self.assertEqual(u_dt.date_to_str(ordinal[0]), "1999-12-26") + self.assertEqual(u_dt.date_to_str(ordinal[1]), "2000-12-26") def test_last_year_pass(self): """Test last_year""" - ordinal_date = [dt.datetime.toordinal(dt.datetime(2018, 4, 6)), - dt.datetime.toordinal(dt.datetime(1918, 4, 6)), - dt.datetime.toordinal(dt.datetime(2019, 1, 1))] + ordinal_date = [ + dt.datetime.toordinal(dt.datetime(2018, 4, 6)), + dt.datetime.toordinal(dt.datetime(1918, 4, 6)), + dt.datetime.toordinal(dt.datetime(2019, 1, 1)), + ] self.assertEqual(u_dt.last_year(ordinal_date), 2019) self.assertEqual(u_dt.last_year(np.array(ordinal_date)), 2019) def test_first_year_pass(self): """Test last_year""" - ordinal_date = [dt.datetime.toordinal(dt.datetime(2018, 4, 6)), - dt.datetime.toordinal(dt.datetime(1918, 4, 6)), - dt.datetime.toordinal(dt.datetime(2019, 1, 1))] + ordinal_date = [ + dt.datetime.toordinal(dt.datetime(2018, 4, 6)), + dt.datetime.toordinal(dt.datetime(1918, 4, 6)), + dt.datetime.toordinal(dt.datetime(2019, 1, 1)), + ] self.assertEqual(u_dt.first_year(ordinal_date), 1918) self.assertEqual(u_dt.first_year(np.array(ordinal_date)), 1918) + # Execute Tests if __name__ == "__main__": TESTS = unittest.TestLoader().loadTestsFromTestCase(TestDateString) diff --git a/climada/util/test/test_dwd_icon.py b/climada/util/test/test_dwd_icon.py index 477f54a32..db1161189 100644 --- a/climada/util/test/test_dwd_icon.py +++ b/climada/util/test/test_dwd_icon.py @@ -19,65 +19,82 @@ Test files_handler module. """ +import datetime as dt import unittest from pathlib import Path -import datetime as dt -import numpy as np -from climada.util.dwd_icon_loader import (download_icon_grib, - delete_icon_grib, - _create_icon_grib_name, - download_icon_centroids_file) +import numpy as np from climada.util.constants import SYSTEM_DIR +from climada.util.dwd_icon_loader import ( + _create_icon_grib_name, + delete_icon_grib, + download_icon_centroids_file, + download_icon_grib, +) class TestCreateIconName(unittest.TestCase): """Test _create_icon_grib_name function""" + def test_icon_name(self): """Correct strings created""" url, file_name, lead_times = _create_icon_grib_name( dt.datetime(2021, 2, 2), max_lead_time=56, - ) - self.assertEqual(url, ('https://opendata.dwd.de/weather/nwp/'+ - 'icon-eu-eps/grib/00/vmax_10m/') - ) - self.assertEqual(file_name, - ('icon-eu-eps_europe_icosahedral_single-level_'+ - '2021020200_{lead_i:03}_vmax_10m.grib2.bz2') - ) - np.testing.assert_array_equal(lead_times, - np.concatenate([np.arange(1,49), - [51,54,]]) - ) + ) + self.assertEqual( + url, + ("https://opendata.dwd.de/weather/nwp/" + "icon-eu-eps/grib/00/vmax_10m/"), + ) + self.assertEqual( + file_name, + ( + "icon-eu-eps_europe_icosahedral_single-level_" + + "2021020200_{lead_i:03}_vmax_10m.grib2.bz2" + ), + ) + np.testing.assert_array_equal( + lead_times, + np.concatenate( + [ + np.arange(1, 49), + [ + 51, + 54, + ], + ] + ), + ) def test_leadtime_warning(self): """Adjustment for wrong leadtime""" url, file_name, lead_times = _create_icon_grib_name( dt.datetime(2021, 2, 2), max_lead_time=240, - ) - self.assertEqual(lead_times.max(),120) + ) + self.assertEqual(lead_times.max(), 120) class TestDownloadIcon(unittest.TestCase): """Test download_icon_grib function""" + def test_download_icon(self): """Value Error if date to old""" try: with self.assertRaises(ValueError): - download_icon_grib(dt.datetime(2020,1,1)) + download_icon_grib(dt.datetime(2020, 1, 1)) except IOError: pass class TestDownloadIconCentroids(unittest.TestCase): """Test download_icon_centroids_file function""" + def test_download_icon(self): """Value Error if model unknown""" with self.assertRaises(ValueError): - download_icon_centroids_file(model_name='icon') + download_icon_centroids_file(model_name="icon") class TestDeleteIcon(unittest.TestCase): @@ -86,27 +103,27 @@ class TestDeleteIcon(unittest.TestCase): def test_file_not_exist_warning(self): """test warning if file does not exist""" - with self.assertLogs('climada.util.dwd_icon_loader', 'WARNING') as cm: - delete_icon_grib(dt.datetime(1908, 2, 2), - max_lead_time=1, - ) + with self.assertLogs("climada.util.dwd_icon_loader", "WARNING") as cm: + delete_icon_grib( + dt.datetime(1908, 2, 2), + max_lead_time=1, + ) self.assertEqual(len(cm.output), 1) - self.assertIn('does not exist and could not be deleted', cm.output[0]) + self.assertIn("does not exist and could not be deleted", cm.output[0]) def test_rm_file(self): """test if file is removed""" url, file_name, lead_times = _create_icon_grib_name( - dt.datetime(1908, 2, 2), - max_lead_time=1, - ) + dt.datetime(1908, 2, 2), + max_lead_time=1, + ) file_name_i = SYSTEM_DIR.absolute().joinpath( file_name.format(lead_i=lead_times[0]) - ) + ) Path(file_name_i).touch() - delete_icon_grib(dt.datetime(1908, 2, 2), - max_lead_time=1, - download_dir=SYSTEM_DIR - ) + delete_icon_grib( + dt.datetime(1908, 2, 2), max_lead_time=1, download_dir=SYSTEM_DIR + ) self.assertFalse(Path(file_name_i).exists()) @@ -114,6 +131,8 @@ def test_rm_file(self): if __name__ == "__main__": TESTS = unittest.TestLoader().loadTestsFromTestCase(TestCreateIconName) TESTS.addTests(unittest.TestLoader().loadTestsFromTestCase(TestDownloadIcon)) - TESTS.addTests(unittest.TestLoader().loadTestsFromTestCase(TestDownloadIconCentroids)) + TESTS.addTests( + unittest.TestLoader().loadTestsFromTestCase(TestDownloadIconCentroids) + ) TESTS.addTests(unittest.TestLoader().loadTestsFromTestCase(TestDeleteIcon)) unittest.TextTestRunner(verbosity=2).run(TESTS) diff --git a/climada/util/test/test_files.py b/climada/util/test/test_files.py index f7df2fbc4..75b3b8cb2 100644 --- a/climada/util/test/test_files.py +++ b/climada/util/test/test_files.py @@ -22,16 +22,21 @@ import unittest from pathlib import Path -from climada.util.files_handler import to_list, get_file_names, download_file, \ -get_extension -from climada.util.constants import DEMO_DIR, GLB_CENTROIDS_MAT, ENT_TEMPLATE_XLS +from climada.util.constants import DEMO_DIR, ENT_TEMPLATE_XLS, GLB_CENTROIDS_MAT +from climada.util.files_handler import ( + download_file, + get_extension, + get_file_names, + to_list, +) class TestDownloadUrl(unittest.TestCase): """Test download_file function""" + def test_wrong_url_fail(self): """Error raised when wrong url.""" - url = 'https://ngdc.noaa.gov/eog/data/web_data/v4composites/F172012.v4.tar' + url = "https://ngdc.noaa.gov/eog/data/web_data/v4composites/F172012.v4.tar" try: with self.assertRaises(ValueError): download_file(url) @@ -41,11 +46,12 @@ def test_wrong_url_fail(self): class TestToStrList(unittest.TestCase): """Test to_list function""" + def test_identity_pass(self): """Returns the same list if its length is correct.""" num_exp = 3 - values = ['hi', 'ho', 'ha'] - val_name = 'values' + values = ["hi", "ho", "ha"] + val_name = "values" out = to_list(num_exp, values, val_name) self.assertEqual(values, out) @@ -53,20 +59,20 @@ def test_one_to_list(self): """When input is a string or list with one element, it returns a list with the expected number of elments repeated""" num_exp = 3 - values = 'hi' - val_name = 'values' + values = "hi" + val_name = "values" out = to_list(num_exp, values, val_name) - self.assertEqual(['hi', 'hi', 'hi'], out) + self.assertEqual(["hi", "hi", "hi"], out) - values = ['ha'] + values = ["ha"] out = to_list(num_exp, values, val_name) - self.assertEqual(['ha', 'ha', 'ha'], out) + self.assertEqual(["ha", "ha", "ha"], out) def test_list_wrong_length_fail(self): """When input is list of neither expected size nor one, fail.""" num_exp = 3 - values = ['1', '2'] - val_name = 'values' + values = ["1", "2"] + val_name = "values" with self.assertRaises(ValueError) as cm: to_list(num_exp, values, val_name) @@ -75,7 +81,8 @@ def test_list_wrong_length_fail(self): class TestGetFileNames(unittest.TestCase): """Test get_file_names function. Only works with actually existing - files and directories.""" + files and directories.""" + def test_one_file_copy(self): """If input is one file name, return a list with this file name""" file_name = GLB_CENTROIDS_MAT @@ -105,21 +112,23 @@ def test_wrong_argument(self): get_file_names(str(empty_dir)) self.assertIn("no files", str(ve.exception)) - no_file = 'this is not a file' + no_file = "this is not a file" with self.assertRaises(ValueError) as ve: get_file_names(no_file) self.assertIn("cannot find", str(ve.exception)) def test_globbing(self): """If input is a glob pattern, return a list of matching visible - files; omit folders. + files; omit folders. """ file_name = DEMO_DIR - out = get_file_names(f'{file_name}/*') + out = get_file_names(f"{file_name}/*") - tmp_files = [str(f) - for f in Path(file_name).iterdir() - if f.is_file() and not f.name.startswith('.')] + tmp_files = [ + str(f) + for f in Path(file_name).iterdir() + if f.is_file() and not f.name.startswith(".") + ] self.assertListEqual(sorted(tmp_files), sorted(out)) @@ -129,26 +138,40 @@ class TestExtension(unittest.TestCase): def test_get_extension_no_pass(self): """Test no extension""" - file_name = '/Users/aznarsig/Documents/Python/climada_python/data/demo/SC22000_VE__M1' - self.assertEqual('', get_extension(file_name)[1]) + file_name = ( + "/Users/aznarsig/Documents/Python/climada_python/data/demo/SC22000_VE__M1" + ) + self.assertEqual("", get_extension(file_name)[1]) self.assertEqual(str(Path(file_name)), get_extension(file_name)[0]) def test_get_extension_one_pass(self): """Test not compressed""" - file_name = '/Users/aznarsig/Documents/Python/climada_python/data/demo/SC22000_VE__M1.grd' - self.assertEqual('.grd', get_extension(file_name)[1]) + file_name = "/Users/aznarsig/Documents/Python/climada_python/data/demo/SC22000_VE__M1.grd" + self.assertEqual(".grd", get_extension(file_name)[1]) self.assertEqual( - str(Path('/Users/aznarsig/Documents/Python/climada_python/data/demo/SC22000_VE__M1')), - get_extension(file_name)[0]) + str( + Path( + "/Users/aznarsig/Documents/Python/climada_python/data/demo/SC22000_VE__M1" + ) + ), + get_extension(file_name)[0], + ) def test_get_extension_two_pass(self): """Test compressed""" - file_name = '/Users/aznarsig/Documents/Python/climada_python' \ - '/data/demo/SC22000_VE__M1.grd.gz' - self.assertEqual('.grd.gz', get_extension(file_name)[1]) + file_name = ( + "/Users/aznarsig/Documents/Python/climada_python" + "/data/demo/SC22000_VE__M1.grd.gz" + ) + self.assertEqual(".grd.gz", get_extension(file_name)[1]) self.assertEqual( - str(Path('/Users/aznarsig/Documents/Python/climada_python/data/demo/SC22000_VE__M1')), - get_extension(file_name)[0]) + str( + Path( + "/Users/aznarsig/Documents/Python/climada_python/data/demo/SC22000_VE__M1" + ) + ), + get_extension(file_name)[0], + ) # Execute Tests diff --git a/climada/util/test/test_finance.py b/climada/util/test/test_finance.py index 50edf9a15..69c8ddd23 100644 --- a/climada/util/test/test_finance.py +++ b/climada/util/test/test_finance.py @@ -18,19 +18,32 @@ Test of finance module """ + import unittest + import numpy as np from cartopy.io import shapereader -from climada.util.finance import net_present_value, gdp, income_group, \ -nat_earth_adm0, world_bank, wealth2gdp, world_bank_wealth_account, _gdp_twn - -SHP_FN = shapereader.natural_earth(resolution='10m', category='cultural', - name='admin_0_countries') +from climada.util.finance import ( + _gdp_twn, + gdp, + income_group, + nat_earth_adm0, + net_present_value, + wealth2gdp, + world_bank, + world_bank_wealth_account, +) + +SHP_FN = shapereader.natural_earth( + resolution="10m", category="cultural", name="admin_0_countries" +) SHP_FILE = shapereader.Reader(SHP_FN) + class TestNetpresValue(unittest.TestCase): """Test date functions""" + def test_net_pres_val_pass(self): """Test net_present_value against MATLAB reference""" years = np.arange(2018, 2041) @@ -38,15 +51,16 @@ def test_net_pres_val_pass(self): val_years = np.ones(years.size) * 6.512201157564418e9 res = net_present_value(years, disc_rates, val_years) - self.assertEqual(1.215049630691397e+11, res) + self.assertEqual(1.215049630691397e11, res) + class TestWBData(unittest.TestCase): """Test World Bank data""" + def test_ne_income_grp_aia_pass(self): """Test nat_earth_adm0 function Anguilla.""" ref_year = 2012 - ne_year, ne_val = nat_earth_adm0('AIA', 'INCOME_GRP', - shp_file=SHP_FILE) + ne_year, ne_val = nat_earth_adm0("AIA", "INCOME_GRP", shp_file=SHP_FILE) ref_year = 0 ref_val = 3 @@ -56,7 +70,7 @@ def test_ne_income_grp_aia_pass(self): def test_wb_income_grp_sxm_pass(self): """Test world_bank function Sint Maarten.""" ref_year = 2012 - wb_year, wb_val = world_bank('SXM', ref_year, 'INC_GRP') + wb_year, wb_val = world_bank("SXM", ref_year, "INC_GRP") ref_year = 2012 ref_val = 4 @@ -66,23 +80,22 @@ def test_wb_income_grp_sxm_pass(self): def test_income_grp_sxm_1999_pass(self): """Test income_group function Sint Maarten.""" ref_year = 1999 - with self.assertLogs('climada.util.finance', level='INFO') as cm: - ig_year, ig_val = income_group('SXM', ref_year, SHP_FILE) + with self.assertLogs("climada.util.finance", level="INFO") as cm: + ig_year, ig_val = income_group("SXM", ref_year, SHP_FILE) ref_year = 2010 ref_val = 4 - self.assertIn('Income group SXM 2010: 4.', cm.output[0]) + self.assertIn("Income group SXM 2010: 4.", cm.output[0]) self.assertEqual(ig_year, ref_year) self.assertEqual(ig_val, ref_val) def test_ne_gdp_aia_2012_pass(self): """Test nat_earth_adm0 function Anguilla.""" ref_year = 2012 - ne_year, ne_val = nat_earth_adm0('AIA', 'GDP_MD', - 'GDP_YEAR', SHP_FILE) + ne_year, ne_val = nat_earth_adm0("AIA", "GDP_MD", "GDP_YEAR", SHP_FILE) ref_year = 2009 - ref_val = 1.75e+08 + ref_val = 1.75e08 self.assertEqual(ne_year, ref_year) self.assertEqual(ne_val, ref_val) @@ -91,22 +104,22 @@ def test_gdp_sxm_2010_pass(self): # If World Bank input data changes, make sure to set ref_year to a year where # no data is available so that the next available data point has to be selected. ref_year = 2010 - with self.assertLogs('climada.util.finance', level='INFO') as cm: - gdp_year, gdp_val = gdp('SXM', ref_year) + with self.assertLogs("climada.util.finance", level="INFO") as cm: + gdp_year, gdp_val = gdp("SXM", ref_year) - ref_val = 936089385.47486 # reference GDP value - ref_year = 2011 # nearest year with data available (might change) + ref_val = 936089385.47486 # reference GDP value + ref_year = 2011 # nearest year with data available (might change) # GDP and years with data available might change if worldbank input # data changes, check magnitude and adjust ref_val and/or ref_year # if test fails: - self.assertIn('GDP SXM %i: %1.3e' % (ref_year, ref_val), cm.output[0]) + self.assertIn("GDP SXM %i: %1.3e" % (ref_year, ref_val), cm.output[0]) self.assertEqual(gdp_year, ref_year) self.assertAlmostEqual(gdp_val, ref_val, places=0) def test_gdp_twn_2012_pass(self): """Test gdp function TWN.""" ref_year = 2014 - gdp_year, gdp_val = gdp('TWN', ref_year) + gdp_year, gdp_val = gdp("TWN", ref_year) _, gdp_val_direct = _gdp_twn(ref_year) ref_val = 530515000000.0 ref_year = 2014 @@ -114,22 +127,23 @@ def test_gdp_twn_2012_pass(self): self.assertEqual(gdp_val, ref_val) self.assertEqual(gdp_val_direct, ref_val) - def test_wb_esp_1950_pass(self): """Test world_bank function Sint Maarten.""" ref_year = 1950 - wb_year, wb_val = world_bank('ESP', ref_year, 'NY.GDP.MKTP.CD') + wb_year, wb_val = world_bank("ESP", ref_year, "NY.GDP.MKTP.CD") ref_year = 1960 ref_val = 12433394725.2159 self.assertEqual(wb_year, ref_year) self.assertAlmostEqual(wb_val, ref_val) + class TestWealth2GDP(unittest.TestCase): """Test Wealth to GDP factor extraction""" + def test_nfw_SUR_pass(self): """Test non-financial wealth-to-gdp factor with Suriname.""" - w2g_year, w2g_val = wealth2gdp('SUR') + w2g_year, w2g_val = wealth2gdp("SUR") ref_year = 2016 ref_val = 0.73656 @@ -138,7 +152,7 @@ def test_nfw_SUR_pass(self): def test_nfw_BEL_pass(self): """Test total wealth-to-gdp factor with Belgium.""" - w2g_year, w2g_val = wealth2gdp('BEL', False) + w2g_year, w2g_val = wealth2gdp("BEL", False) ref_year = 2016 ref_val = 4.88758 @@ -147,21 +161,27 @@ def test_nfw_BEL_pass(self): def test_nfw_LBY_pass(self): """Test missing factor with Libya.""" - _, w2g_val = wealth2gdp('LBY') + _, w2g_val = wealth2gdp("LBY") self.assertTrue(np.isnan(w2g_val)) + class TestWBWealthAccount(unittest.TestCase): """Test Wealth Indicator extraction from World Bank provided CSV""" + def test_pca_DEU_2010_pass(self): """Test Processed Capital value Germany 2010.""" ref_year = 2010 - cntry_iso = 'DEU' + cntry_iso = "DEU" wb_year, wb_val, q = world_bank_wealth_account(cntry_iso, ref_year, no_land=0) - wb_year_noland, wb_val_noland, q = world_bank_wealth_account(cntry_iso, ref_year, - no_land=1) - ref_val = [17675048450284.9, 19767982562092.2] # second value as updated by worldbank on - # October 27 2021 + wb_year_noland, wb_val_noland, q = world_bank_wealth_account( + cntry_iso, ref_year, no_land=1 + ) + ref_val = [ + 17675048450284.9, + 19767982562092.2, + ] # second value as updated by worldbank on + # October 27 2021 ref_val_noland = [14254071330874.9, 15941921421042.1] # dito self.assertEqual(wb_year, ref_year) self.assertEqual(q, 1) @@ -172,42 +192,49 @@ def test_pca_DEU_2010_pass(self): def test_pca_CHE_2008_pass(self): """Test Prcoessed Capital per capita Switzerland 2008 (interp.).""" ref_year = 2008 - cntry_iso = 'CHE' - var_name = 'NW.PCA.PC' - wb_year, wb_val, _ = world_bank_wealth_account(cntry_iso, ref_year, - variable_name=var_name, no_land=0) - ref_val = [328398.7, # values sporadically updated by worldbank - 369081.0] # <- October 27 2021 + cntry_iso = "CHE" + var_name = "NW.PCA.PC" + wb_year, wb_val, _ = world_bank_wealth_account( + cntry_iso, ref_year, variable_name=var_name, no_land=0 + ) + ref_val = [ + 328398.7, # values sporadically updated by worldbank + 369081.0, + ] # <- October 27 2021 self.assertEqual(wb_year, ref_year) self.assertIn(wb_val, ref_val) def test_tow_IND_1985_pass(self): """Test Total Wealth value India 1985 (outside year range).""" ref_year = 1985 - cntry_iso = 'IND' - var_name = 'NW.TOW.TO' - wb_year, wb_val, _ = world_bank_wealth_account(cntry_iso, ref_year, - variable_name=var_name) - ref_val = [5415188681934.5, # values sporadically updated by worldbank - 5861193808779.6, # <- October 27 2021 - 5861186556152.8, # <- June 29 2023 - 5861186367245.2, # <- December 20 2023 - ] + cntry_iso = "IND" + var_name = "NW.TOW.TO" + wb_year, wb_val, _ = world_bank_wealth_account( + cntry_iso, ref_year, variable_name=var_name + ) + ref_val = [ + 5415188681934.5, # values sporadically updated by worldbank + 5861193808779.6, # <- October 27 2021 + 5861186556152.8, # <- June 29 2023 + 5861186367245.2, # <- December 20 2023 + ] self.assertEqual(wb_year, ref_year) self.assertIn(wb_val, ref_val) def test_pca_CUB_2015_pass(self): """Test Processed Capital value Cuba 2015 (missing value).""" ref_year = 2015 - cntry_iso = 'CUB' + cntry_iso = "CUB" wb_year, wb_val, q = world_bank_wealth_account(cntry_iso, ref_year, no_land=1) - ref_val = [108675762920.0, # values sporadically updated by worldbank - 108675513472.0, # <- Dezember 20 2023 - ] + ref_val = [ + 108675762920.0, # values sporadically updated by worldbank + 108675513472.0, # <- Dezember 20 2023 + ] self.assertEqual(q, 0) self.assertEqual(wb_year, ref_year) self.assertIn(wb_val, ref_val) + # Execute Tests if __name__ == "__main__": TESTS = unittest.TestLoader().loadTestsFromTestCase(TestNetpresValue) diff --git a/climada/util/test/test_hdf5.py b/climada/util/test/test_hdf5.py index 32ad7a7bc..ae85003e7 100644 --- a/climada/util/test/test_hdf5.py +++ b/climada/util/test/test_hdf5.py @@ -20,11 +20,13 @@ """ import unittest -import numpy as np + import h5py +import numpy as np -from climada.util.constants import HAZ_DEMO_MAT import climada.util.hdf5_handler as u_hdf5 +from climada.util.constants import HAZ_DEMO_MAT + class TestFunc(unittest.TestCase): """Test the auxiliary functions used to retrieve variables from HDF5""" @@ -36,21 +38,23 @@ def test_get_string_pass(self): contents = u_hdf5.read(HAZ_DEMO_MAT) # Convert several strings - str_date = u_hdf5.get_string(contents['hazard']['date']) - str_comment = u_hdf5.get_string(contents['hazard']['comment']) - str_wf = u_hdf5.get_string(contents['hazard']['windfield_comment']) - str_fn = u_hdf5.get_string(contents['hazard']['filename']) + str_date = u_hdf5.get_string(contents["hazard"]["date"]) + str_comment = u_hdf5.get_string(contents["hazard"]["comment"]) + str_wf = u_hdf5.get_string(contents["hazard"]["windfield_comment"]) + str_fn = u_hdf5.get_string(contents["hazard"]["filename"]) # Check results - self.assertEqual('14-Nov-2017 10:09:05', str_date) + self.assertEqual("14-Nov-2017 10:09:05", str_date) + self.assertEqual( + "TC hazard event set, generated 14-Nov-2017 10:09:05", str_comment + ) self.assertEqual( - 'TC hazard event set, generated 14-Nov-2017 10:09:05', - str_comment) + "generating 14450 windfields took 0.25 min " + "(0.0010 sec/event)", str_wf + ) self.assertEqual( - 'generating 14450 windfields took 0.25 min ' + - '(0.0010 sec/event)', str_wf) - self.assertEqual('/Users/aznarsig/Documents/MATLAB/climada_data/' + - 'hazards/atl_prob.mat', str_fn) + "/Users/aznarsig/Documents/MATLAB/climada_data/" + "hazards/atl_prob.mat", + str_fn, + ) def test_get_sparse_mat_pass(self): """Check contents of imported sparse matrix, using the function \ @@ -60,10 +64,11 @@ def test_get_sparse_mat_pass(self): contents = u_hdf5.read(HAZ_DEMO_MAT) # get matrix size - mat_shape = (len(contents['hazard']['event_ID']), - len(contents['hazard']['centroid_ID'])) - spr_mat = u_hdf5.get_sparse_csr_mat( - contents['hazard']['intensity'], mat_shape) + mat_shape = ( + len(contents["hazard"]["event_ID"]), + len(contents["hazard"]["centroid_ID"]), + ) + spr_mat = u_hdf5.get_sparse_csr_mat(contents["hazard"]["intensity"], mat_shape) self.assertEqual(mat_shape[0], spr_mat.shape[0]) self.assertEqual(mat_shape[1], spr_mat.shape[1]) @@ -79,19 +84,20 @@ def test_get_sparse_mat_pass(self): def test_get_str_from_ref(self): """Check import string from a HDF5 object reference""" - with h5py.File(HAZ_DEMO_MAT, 'r') as file: - var = file['hazard']['name'][0][0] + with h5py.File(HAZ_DEMO_MAT, "r") as file: + var = file["hazard"]["name"][0][0] res = u_hdf5.get_str_from_ref(HAZ_DEMO_MAT, var) - self.assertEqual('NNN_1185101', res) + self.assertEqual("NNN_1185101", res) def test_get_list_str_from_ref(self): """Check import string from a HDF5 object reference""" - with h5py.File(HAZ_DEMO_MAT, 'r') as file: - var = file['hazard']['name'] + with h5py.File(HAZ_DEMO_MAT, "r") as file: + var = file["hazard"]["name"] var_list = u_hdf5.get_list_str_from_ref(HAZ_DEMO_MAT, var) - self.assertEqual('NNN_1185101', var_list[0]) - self.assertEqual('NNN_1185101_gen1', var_list[1]) - self.assertEqual('NNN_1185101_gen2', var_list[2]) + self.assertEqual("NNN_1185101", var_list[0]) + self.assertEqual("NNN_1185101_gen1", var_list[1]) + self.assertEqual("NNN_1185101_gen2", var_list[2]) + class TestReader(unittest.TestCase): """Test HDF5 reader""" @@ -104,50 +110,51 @@ def test_hazard_pass(self): # Check read contents self.assertEqual(1, len(contents)) - self.assertTrue('hazard' in contents.keys()) - self.assertEqual(False, '#refs#' in contents.keys()) - - hazard = contents['hazard'] - self.assertTrue('reference_year' in hazard.keys()) - self.assertTrue('lon' in hazard.keys()) - self.assertTrue('lat' in hazard.keys()) - self.assertTrue('centroid_ID' in hazard.keys()) - self.assertTrue('orig_years' in hazard.keys()) - self.assertTrue('orig_event_count' in hazard.keys()) - self.assertTrue('event_count' in hazard.keys()) - self.assertTrue('event_ID' in hazard.keys()) - self.assertTrue('category' in hazard.keys()) - self.assertTrue('orig_event_flag' in hazard.keys()) - self.assertTrue('yyyy' in hazard.keys()) - self.assertTrue('mm' in hazard.keys()) - self.assertTrue('dd' in hazard.keys()) - self.assertTrue('datenum' in hazard.keys()) - self.assertTrue('scenario' in hazard.keys()) - self.assertTrue('intensity' in hazard.keys()) - self.assertTrue('name' in hazard.keys()) - self.assertTrue('frequency' in hazard.keys()) - self.assertTrue('matrix_density' in hazard.keys()) - self.assertTrue('windfield_comment' in hazard.keys()) - self.assertTrue('peril_ID' in hazard.keys()) - self.assertTrue('filename' in hazard.keys()) - self.assertTrue('comment' in hazard.keys()) - self.assertTrue('date' in hazard.keys()) - self.assertTrue('units' in hazard.keys()) - self.assertTrue('orig_yearset' in hazard.keys()) - self.assertTrue('fraction' in hazard.keys()) + self.assertTrue("hazard" in contents.keys()) + self.assertEqual(False, "#refs#" in contents.keys()) + + hazard = contents["hazard"] + self.assertTrue("reference_year" in hazard.keys()) + self.assertTrue("lon" in hazard.keys()) + self.assertTrue("lat" in hazard.keys()) + self.assertTrue("centroid_ID" in hazard.keys()) + self.assertTrue("orig_years" in hazard.keys()) + self.assertTrue("orig_event_count" in hazard.keys()) + self.assertTrue("event_count" in hazard.keys()) + self.assertTrue("event_ID" in hazard.keys()) + self.assertTrue("category" in hazard.keys()) + self.assertTrue("orig_event_flag" in hazard.keys()) + self.assertTrue("yyyy" in hazard.keys()) + self.assertTrue("mm" in hazard.keys()) + self.assertTrue("dd" in hazard.keys()) + self.assertTrue("datenum" in hazard.keys()) + self.assertTrue("scenario" in hazard.keys()) + self.assertTrue("intensity" in hazard.keys()) + self.assertTrue("name" in hazard.keys()) + self.assertTrue("frequency" in hazard.keys()) + self.assertTrue("matrix_density" in hazard.keys()) + self.assertTrue("windfield_comment" in hazard.keys()) + self.assertTrue("peril_ID" in hazard.keys()) + self.assertTrue("filename" in hazard.keys()) + self.assertTrue("comment" in hazard.keys()) + self.assertTrue("date" in hazard.keys()) + self.assertTrue("units" in hazard.keys()) + self.assertTrue("orig_yearset" in hazard.keys()) + self.assertTrue("fraction" in hazard.keys()) self.assertEqual(27, len(hazard.keys())) # Check some random values - mat_shape = (len(contents['hazard']['event_ID']), - len(contents['hazard']['centroid_ID'])) - sp_mat = u_hdf5.get_sparse_csr_mat(hazard['intensity'], mat_shape) + mat_shape = ( + len(contents["hazard"]["event_ID"]), + len(contents["hazard"]["centroid_ID"]), + ) + sp_mat = u_hdf5.get_sparse_csr_mat(hazard["intensity"], mat_shape) - self.assertTrue(np.array_equal(np.array([[84], [67]]), - hazard['peril_ID'])) + self.assertTrue(np.array_equal(np.array([[84], [67]]), hazard["peril_ID"])) self.assertEqual(34.537289477809473, sp_mat[2862, 97]) - self.assertEqual(-80, hazard['lon'][46]) - self.assertEqual(28, hazard['lat'][87]) - self.assertEqual(2016, hazard['reference_year']) + self.assertEqual(-80, hazard["lon"][46]) + self.assertEqual(28, hazard["lat"][87]) + self.assertEqual(2016, hazard["reference_year"]) def test_with_refs_pass(self): """Allow to load references of the matlab file""" @@ -158,8 +165,9 @@ def test_with_refs_pass(self): # Check read contents self.assertEqual(2, len(contents)) - self.assertTrue('hazard' in contents.keys()) - self.assertTrue('#refs#' in contents.keys()) + self.assertTrue("hazard" in contents.keys()) + self.assertTrue("#refs#" in contents.keys()) + # Execute Tests if __name__ == "__main__": diff --git a/climada/util/test/test_interpolation.py b/climada/util/test/test_interpolation.py index 1c780fcce..8000ace9e 100644 --- a/climada/util/test/test_interpolation.py +++ b/climada/util/test/test_interpolation.py @@ -20,9 +20,10 @@ """ import unittest + import numpy as np -from climada.util.interpolation import interpolate_ev, stepfunction_ev, group_frequency +from climada.util.interpolation import group_frequency, interpolate_ev, stepfunction_ev class TestFitMethods(unittest.TestCase): @@ -30,153 +31,141 @@ class TestFitMethods(unittest.TestCase): def test_interpolate_ev_linear_interp(self): """Test linear interpolation""" - x_train = np.array([1., 3., 5.]) - y_train = np.array([8., 4., 2.]) - x_test = np.array([0., 3., 4., 6.]) + x_train = np.array([1.0, 3.0, 5.0]) + y_train = np.array([8.0, 4.0, 2.0]) + x_test = np.array([0.0, 3.0, 4.0, 6.0]) np.testing.assert_allclose( - interpolate_ev(x_test, x_train, y_train), - np.array([8., 4., 3., np.nan]) + interpolate_ev(x_test, x_train, y_train), np.array([8.0, 4.0, 3.0, np.nan]) ) np.testing.assert_allclose( - interpolate_ev(x_test, x_train, y_train, y_asymptotic = 0), - np.array([8., 4., 3., 0.]) + interpolate_ev(x_test, x_train, y_train, y_asymptotic=0), + np.array([8.0, 4.0, 3.0, 0.0]), ) def test_interpolate_ev_threshold_parameters(self): """Test input threshold parameters""" - x_train = np.array([0., 3., 6.]) - y_train = np.array([4., 1., 4.]) - x_test = np.array([-1., 3., 4.]) + x_train = np.array([0.0, 3.0, 6.0]) + y_train = np.array([4.0, 1.0, 4.0]) + x_test = np.array([-1.0, 3.0, 4.0]) np.testing.assert_allclose( - interpolate_ev(x_test, x_train, y_train), - np.array([4., 1., 2.]) + interpolate_ev(x_test, x_train, y_train), np.array([4.0, 1.0, 2.0]) ) np.testing.assert_allclose( - interpolate_ev(x_test, x_train, y_train, x_threshold=1.), - np.array([1., 1., 2.]) + interpolate_ev(x_test, x_train, y_train, x_threshold=1.0), + np.array([1.0, 1.0, 2.0]), ) np.testing.assert_allclose( - interpolate_ev(x_test, x_train, y_train, y_threshold=2.), - np.array([4., 4., 4.]) + interpolate_ev(x_test, x_train, y_train, y_threshold=2.0), + np.array([4.0, 4.0, 4.0]), ) - + def test_interpolate_ev_scale_parameters(self): """Test log scale parameters""" x_train = np.array([1e1, 1e3]) - y_train = np.array([1., 3.]) + y_train = np.array([1.0, 3.0]) x_test = np.array([1e0, 1e2]) np.testing.assert_allclose( interpolate_ev(x_test, x_train, y_train, logx=True, extrapolation=True), - np.array([0., 2.]) + np.array([0.0, 2.0]), ) np.testing.assert_allclose( - interpolate_ev(x_test, x_train, y_train, logx=True), - np.array([1., 2.]) + interpolate_ev(x_test, x_train, y_train, logx=True), np.array([1.0, 2.0]) ) - x_train = np.array([1., 3.]) + x_train = np.array([1.0, 3.0]) y_train = np.array([1e1, 1e3]) - x_test = np.array([0., 2.]) + x_test = np.array([0.0, 2.0]) np.testing.assert_allclose( interpolate_ev(x_test, x_train, y_train, logy=True, extrapolation=True), - np.array([1e0, 1e2]) + np.array([1e0, 1e2]), ) x_train = np.array([1e1, 1e3]) y_train = np.array([1e1, 1e5]) x_test = np.array([1e0, 1e2]) np.testing.assert_allclose( - interpolate_ev(x_test, x_train, y_train, logx=True, logy=True, extrapolation=True), - np.array([1e-1, 1e3]) + interpolate_ev( + x_test, x_train, y_train, logx=True, logy=True, extrapolation=True + ), + np.array([1e-1, 1e3]), ) def test_interpolate_ev_degenerate_input(self): """Test interp to constant zeros""" - x_train = np.array([1., 3., 5.]) - x_test = np.array([0., 2., 4.]) + x_train = np.array([1.0, 3.0, 5.0]) + x_test = np.array([0.0, 2.0, 4.0]) y_train = np.zeros(3) np.testing.assert_allclose( - interpolate_ev(x_test, x_train, y_train), - np.array([0., 0., 0.]) + interpolate_ev(x_test, x_train, y_train), np.array([0.0, 0.0, 0.0]) ) def test_interpolate_ev_small_input(self): """Test small input""" - x_train = np.array([1.]) - y_train = np.array([2.]) - x_test = np.array([0., 1., 2.]) + x_train = np.array([1.0]) + y_train = np.array([2.0]) + x_test = np.array([0.0, 1.0, 2.0]) np.testing.assert_allclose( - interpolate_ev(x_test, x_train, y_train), - np.array([2., 2., np.nan]) + interpolate_ev(x_test, x_train, y_train), np.array([2.0, 2.0, np.nan]) ) np.testing.assert_allclose( interpolate_ev(x_test, x_train, y_train, y_asymptotic=0), - np.array([2., 2., 0.]) + np.array([2.0, 2.0, 0.0]), ) x_train = np.array([]) y_train = np.array([]) - x_test = np.array([0., 1., 2.]) + x_test = np.array([0.0, 1.0, 2.0]) np.testing.assert_allclose( - interpolate_ev(x_test, x_train, y_train), - np.full(3, np.nan) + interpolate_ev(x_test, x_train, y_train), np.full(3, np.nan) ) np.testing.assert_allclose( - interpolate_ev(x_test, x_train, y_train, y_asymptotic=0), - np.zeros(3) + interpolate_ev(x_test, x_train, y_train, y_asymptotic=0), np.zeros(3) ) def test_stepfunction_ev(self): """Test stepfunction method""" - x_train = np.array([1., 3., 5.]) - y_train = np.array([8., 4., 2.]) - x_test = np.array([0., 3., 4., 6.]) + x_train = np.array([1.0, 3.0, 5.0]) + y_train = np.array([8.0, 4.0, 2.0]) + x_test = np.array([0.0, 3.0, 4.0, 6.0]) np.testing.assert_allclose( - stepfunction_ev(x_test, x_train, y_train), - np.array([8., 4., 2., np.nan]) + stepfunction_ev(x_test, x_train, y_train), np.array([8.0, 4.0, 2.0, np.nan]) ) np.testing.assert_allclose( - stepfunction_ev(x_test, x_train, y_train, y_asymptotic=0.), - np.array([8., 4., 2., 0.]) + stepfunction_ev(x_test, x_train, y_train, y_asymptotic=0.0), + np.array([8.0, 4.0, 2.0, 0.0]), ) def test_stepfunction_ev_small_input(self): """Test small input""" - x_train = np.array([1.]) - y_train = np.array([2.]) - x_test = np.array([0., 1., 2.]) + x_train = np.array([1.0]) + y_train = np.array([2.0]) + x_test = np.array([0.0, 1.0, 2.0]) np.testing.assert_allclose( - stepfunction_ev(x_test, x_train, y_train), - np.array([2., 2., np.nan]) + stepfunction_ev(x_test, x_train, y_train), np.array([2.0, 2.0, np.nan]) ) np.testing.assert_allclose( stepfunction_ev(x_test, x_train, y_train, y_asymptotic=0), - np.array([2., 2., 0.]) + np.array([2.0, 2.0, 0.0]), ) x_train = np.array([]) y_train = np.array([]) - x_test = np.array([0., 1., 2.]) + x_test = np.array([0.0, 1.0, 2.0]) np.testing.assert_allclose( - stepfunction_ev(x_test, x_train, y_train), - np.full(3, np.nan) + stepfunction_ev(x_test, x_train, y_train), np.full(3, np.nan) ) np.testing.assert_allclose( - stepfunction_ev(x_test, x_train, y_train, y_asymptotic=0), - np.zeros(3) + stepfunction_ev(x_test, x_train, y_train, y_asymptotic=0), np.zeros(3) ) - + def test_frequency_group(self): """Test frequency grouping method""" frequency = np.ones(6) - intensity = np.array([1., 1., 1., 2., 3., 3]) - np.testing.assert_allclose( - group_frequency(frequency, intensity), - ([3, 1, 2], [1, 2, 3]) - ) + intensity = np.array([1.0, 1.0, 1.0, 2.0, 3.0, 3]) np.testing.assert_allclose( - group_frequency([], []), - ([], []) + group_frequency(frequency, intensity), ([3, 1, 2], [1, 2, 3]) ) + np.testing.assert_allclose(group_frequency([], []), ([], [])) with self.assertRaises(ValueError): group_frequency(frequency, intensity[::-1]) + # Execute Tests if __name__ == "__main__": TESTS = unittest.TestLoader().loadTestsFromTestCase(TestFitMethods) diff --git a/climada/util/test/test_lines_polys_handler.py b/climada/util/test/test_lines_polys_handler.py index b9275d548..8800d6d06 100644 --- a/climada/util/test/test_lines_polys_handler.py +++ b/climada/util/test/test_lines_polys_handler.py @@ -19,39 +19,39 @@ Test of lines_polys_handler """ +import copy import unittest -from unittest.mock import patch, DEFAULT +from unittest.mock import DEFAULT, patch -import numpy as np import geopandas as gpd +import numpy as np import pandas as pd -import copy - -from shapely.geometry import Point -from shapely.geometry import LineString +from shapely.geometry import LineString, Point -from climada.entity import Exposures -import climada.util.lines_polys_handler as u_lp import climada.util.coordinates as u_coord -from climada.util.api_client import Client +import climada.util.lines_polys_handler as u_lp from climada.engine import Impact, ImpactCalc +from climada.entity import Exposures from climada.entity.impact_funcs import ImpactFuncSet from climada.entity.impact_funcs.storm_europe import ImpfStormEurope +from climada.util.api_client import Client -#TODO: add tests for the private methods +# TODO: add tests for the private methods # Load gdfs and hazard and impact functions for tests -HAZ = Client().get_hazard('storm_europe', name='test_haz_WS_nl', status='test_dataset') +HAZ = Client().get_hazard("storm_europe", name="test_haz_WS_nl", status="test_dataset") -EXP_POLY = Client().get_exposures('base', name='test_polygon_exp', status='test_dataset') -EXP_POLY.gdf['impf_WS'] = 2 +EXP_POLY = Client().get_exposures( + "base", name="test_polygon_exp", status="test_dataset" +) +EXP_POLY.gdf["impf_WS"] = 2 GDF_POLY = EXP_POLY.gdf -EXP_LINE = Client().get_exposures('base', name='test_line_exp', status='test_dataset') +EXP_LINE = Client().get_exposures("base", name="test_line_exp", status="test_dataset") GDF_LINE = EXP_LINE.gdf -EXP_POINT = Client().get_exposures('base', name='test_point_exp', status='test_dataset') +EXP_POINT = Client().get_exposures("base", name="test_point_exp", status="test_dataset") GDF_POINT = EXP_POINT.gdf IMPF = ImpfStormEurope.from_welker() @@ -59,7 +59,7 @@ IMPF2.id = 2 IMPF_SET = ImpactFuncSet([IMPF, IMPF2]) -COL_CHANGING = ['value', 'latitude', 'longitude', 'geometry', 'geometry_orig'] +COL_CHANGING = ["value", "latitude", "longitude", "geometry", "geometry_orig"] def check_unchanged_geom_gdf(self, gdf_geom, gdf_pnt): @@ -68,22 +68,26 @@ def check_unchanged_geom_gdf(self, gdf_geom, gdf_pnt): sub_gdf_pnt = gdf_pnt.xs(n, level=1) rows_sel = sub_gdf_pnt.index.to_numpy() sub_gdf = gdf_geom.loc[rows_sel] - self.assertTrue(np.alltrue(sub_gdf.geometry.geom_equals(sub_gdf_pnt.geometry_orig))) + self.assertTrue( + np.alltrue(sub_gdf.geometry.geom_equals(sub_gdf_pnt.geometry_orig)) + ) for col in gdf_pnt.columns: if col not in COL_CHANGING: np.testing.assert_allclose(gdf_pnt[col].unique(), gdf_geom[col].unique()) + def check_impact(self, imp, haz, exp, aai_agg, eai_exp): """Test properties of imapcts""" self.assertEqual(len(haz.event_id), len(imp.at_event)) self.assertIsInstance(imp, Impact) - self.assertTrue(hasattr(imp, 'geom_exp')) - self.assertTrue(hasattr(imp, 'coord_exp')) - self.assertTrue(np.all(imp.geom_exp.sort_index()==exp.gdf.geometry.sort_index())) + self.assertTrue(hasattr(imp, "geom_exp")) + self.assertTrue(hasattr(imp, "coord_exp")) + self.assertTrue(np.all(imp.geom_exp.sort_index() == exp.gdf.geometry.sort_index())) self.assertEqual(len(imp.coord_exp), len(exp.gdf)) self.assertAlmostEqual(imp.aai_agg, aai_agg, 3) np.testing.assert_allclose(imp.eai_exp, eai_exp, rtol=1e-5) + class TestExposureGeomToPnt(unittest.TestCase): """Test Exposures to points functions""" @@ -95,73 +99,174 @@ def check_unchanged_exp(self, exp_geom, exp_pnt): def test_point_exposure_from_polygons(self): """Test disaggregation of polygons to points""" - #test low res - one point per poly + # test low res - one point per poly exp_pnt = u_lp.exp_geom_to_pnt( - EXP_POLY, res=1, to_meters=False, - disagg_met=u_lp.DisaggMethod.FIX, disagg_val=None - ) - np.testing.assert_array_equal(exp_pnt.gdf['value'], EXP_POLY.gdf['value']) + EXP_POLY, + res=1, + to_meters=False, + disagg_met=u_lp.DisaggMethod.FIX, + disagg_val=None, + ) + np.testing.assert_array_equal(exp_pnt.gdf["value"], EXP_POLY.gdf["value"]) self.check_unchanged_exp(EXP_POLY, exp_pnt) - #to_meters=False, DIV + # to_meters=False, DIV exp_pnt = u_lp.exp_geom_to_pnt( - EXP_POLY, res=0.5, to_meters=False, - disagg_met=u_lp.DisaggMethod.DIV, disagg_val=None - ) + EXP_POLY, + res=0.5, + to_meters=False, + disagg_met=u_lp.DisaggMethod.DIV, + disagg_val=None, + ) self.check_unchanged_exp(EXP_POLY, exp_pnt) - val_avg = np.array([ - 4.93449000e+10, 4.22202000e+10, 6.49988000e+10, 1.04223900e+11, - 1.04223900e+11, 5.85881000e+10, 1.11822300e+11, 8.54188667e+10, - 8.54188667e+10, 8.54188667e+10, 1.43895450e+11, 1.43895450e+11, - 1.16221500e+11, 3.70562500e+11, 1.35359600e+11, 3.83689000e+10 - ]) - np.testing.assert_allclose(exp_pnt.gdf['value'], val_avg) - lat = np.array([ - 53.15019278, 52.90814037, 52.48232657, 52.23482697, 52.23482697, - 51.26574748, 51.30438894, 51.71676713, 51.71676713, 51.71676713, - 52.13772724, 52.13772724, 52.61538869, 53.10328543, 52.54974468, - 52.11286591 - ]) - np.testing.assert_allclose(exp_pnt.gdf['latitude'], lat) - - #to_meters=TRUE, FIX, dissag_val + val_avg = np.array( + [ + 4.93449000e10, + 4.22202000e10, + 6.49988000e10, + 1.04223900e11, + 1.04223900e11, + 5.85881000e10, + 1.11822300e11, + 8.54188667e10, + 8.54188667e10, + 8.54188667e10, + 1.43895450e11, + 1.43895450e11, + 1.16221500e11, + 3.70562500e11, + 1.35359600e11, + 3.83689000e10, + ] + ) + np.testing.assert_allclose(exp_pnt.gdf["value"], val_avg) + lat = np.array( + [ + 53.15019278, + 52.90814037, + 52.48232657, + 52.23482697, + 52.23482697, + 51.26574748, + 51.30438894, + 51.71676713, + 51.71676713, + 51.71676713, + 52.13772724, + 52.13772724, + 52.61538869, + 53.10328543, + 52.54974468, + 52.11286591, + ] + ) + np.testing.assert_allclose(exp_pnt.gdf["latitude"], lat) + + # to_meters=TRUE, FIX, dissag_val res = 20000 exp_pnt = u_lp.exp_geom_to_pnt( - EXP_POLY, res=res, to_meters=True, - disagg_met=u_lp.DisaggMethod.FIX, disagg_val=res**2 - ) + EXP_POLY, + res=res, + to_meters=True, + disagg_met=u_lp.DisaggMethod.FIX, + disagg_val=res**2, + ) self.check_unchanged_exp(EXP_POLY, exp_pnt) val = res**2 - self.assertEqual(np.unique(exp_pnt.gdf['value'])[0], val) - lat = np.array([ - 53.13923671, 53.13923671, 53.13923671, 53.13923671, 53.43921725, - 53.43921725, 52.90782155, 52.90782155, 52.90782155, 52.90782155, - 52.90782155, 52.40180033, 52.40180033, 52.40180033, 52.40180033, - 52.40180033, 52.69674738, 52.69674738, 52.02540815, 52.02540815, - 52.02540815, 52.02540815, 52.02540815, 52.02540815, 52.31787025, - 52.31787025, 51.31813586, 51.31813586, 51.31813586, 51.49256036, - 51.49256036, 51.49256036, 51.49256036, 51.50407349, 51.50407349, - 51.50407349, 51.50407349, 51.50407349, 51.50407349, 51.50407349, - 51.50407349, 51.50407349, 51.79318374, 51.79318374, 51.79318374, - 51.92768703, 51.92768703, 51.92768703, 51.92768703, 51.92768703, - 51.92768703, 51.92768703, 52.46150801, 52.46150801, 52.46150801, - 52.75685438, 52.75685438, 52.75685438, 52.75685438, 53.05419711, - 53.08688006, 53.08688006, 53.08688006, 53.08688006, 53.08688006, - 53.38649582, 53.38649582, 53.38649582, 52.55795685, 52.55795685, - 52.55795685, 52.55795685, 52.23308448, 52.23308448 - ]) - np.testing.assert_allclose(exp_pnt.gdf['latitude'], lat) - - #projected crs, to_meters=TRUE, FIX, dissag_val + self.assertEqual(np.unique(exp_pnt.gdf["value"])[0], val) + lat = np.array( + [ + 53.13923671, + 53.13923671, + 53.13923671, + 53.13923671, + 53.43921725, + 53.43921725, + 52.90782155, + 52.90782155, + 52.90782155, + 52.90782155, + 52.90782155, + 52.40180033, + 52.40180033, + 52.40180033, + 52.40180033, + 52.40180033, + 52.69674738, + 52.69674738, + 52.02540815, + 52.02540815, + 52.02540815, + 52.02540815, + 52.02540815, + 52.02540815, + 52.31787025, + 52.31787025, + 51.31813586, + 51.31813586, + 51.31813586, + 51.49256036, + 51.49256036, + 51.49256036, + 51.49256036, + 51.50407349, + 51.50407349, + 51.50407349, + 51.50407349, + 51.50407349, + 51.50407349, + 51.50407349, + 51.50407349, + 51.50407349, + 51.79318374, + 51.79318374, + 51.79318374, + 51.92768703, + 51.92768703, + 51.92768703, + 51.92768703, + 51.92768703, + 51.92768703, + 51.92768703, + 52.46150801, + 52.46150801, + 52.46150801, + 52.75685438, + 52.75685438, + 52.75685438, + 52.75685438, + 53.05419711, + 53.08688006, + 53.08688006, + 53.08688006, + 53.08688006, + 53.08688006, + 53.38649582, + 53.38649582, + 53.38649582, + 52.55795685, + 52.55795685, + 52.55795685, + 52.55795685, + 52.23308448, + 52.23308448, + ] + ) + np.testing.assert_allclose(exp_pnt.gdf["latitude"], lat) + + # projected crs, to_meters=TRUE, FIX, dissag_val res = 20000 EXP_POLY_PROJ = Exposures(GDF_POLY.to_crs(epsg=28992)) exp_pnt = u_lp.exp_geom_to_pnt( - EXP_POLY_PROJ, res=res, to_meters=True, - disagg_met=u_lp.DisaggMethod.FIX, disagg_val=res**2 - ) + EXP_POLY_PROJ, + res=res, + to_meters=True, + disagg_met=u_lp.DisaggMethod.FIX, + disagg_val=res**2, + ) self.check_unchanged_exp(EXP_POLY_PROJ, exp_pnt) val = res**2 - self.assertEqual(np.unique(exp_pnt.gdf['value'])[0], val) + self.assertEqual(np.unique(exp_pnt.gdf["value"])[0], val) self.assertEqual(exp_pnt.gdf.crs, EXP_POLY_PROJ.gdf.crs) @patch.multiple( @@ -210,143 +315,257 @@ def test_point_exposure_from_polygons_on_grid(self): """Test disaggregation of polygons to points on grid""" exp_poly = EXP_POLY.copy() res = 0.1 - exp_poly.set_gdf(exp_poly.gdf[exp_poly.gdf['population']<400000]) + exp_poly.set_gdf(exp_poly.gdf[exp_poly.gdf["population"] < 400000]) height, width, trafo = u_coord.pts_to_raster_meta( exp_poly.gdf.geometry.bounds, (res, res) - ) + ) x_grid, y_grid = u_coord.raster_to_meshgrid(trafo, width, height) - #to_meters=False, DIV + # to_meters=False, DIV exp_pnt = u_lp.exp_geom_to_pnt( - exp_poly, res=0.1, to_meters=False, - disagg_met=u_lp.DisaggMethod.DIV, disagg_val=None - ) + exp_poly, + res=0.1, + to_meters=False, + disagg_met=u_lp.DisaggMethod.DIV, + disagg_val=None, + ) exp_pnt_grid = u_lp.exp_geom_to_grid( - exp_poly, (x_grid, y_grid), - disagg_met=u_lp.DisaggMethod.DIV, disagg_val=None - ) + exp_poly, + (x_grid, y_grid), + disagg_met=u_lp.DisaggMethod.DIV, + disagg_val=None, + ) self.check_unchanged_exp(exp_poly, exp_pnt_grid) - for col in ['value', 'latitude', 'longitude']: + for col in ["value", "latitude", "longitude"]: np.testing.assert_allclose(exp_pnt.gdf[col], exp_pnt_grid.gdf[col]) - x_grid = np.append(x_grid, x_grid+10) - y_grid = np.append(y_grid, y_grid+10) - #to_meters=False, DIV + x_grid = np.append(x_grid, x_grid + 10) + y_grid = np.append(y_grid, y_grid + 10) + # to_meters=False, DIV exp_pnt = u_lp.exp_geom_to_pnt( - exp_poly, res=0.1, to_meters=False, - disagg_met=u_lp.DisaggMethod.DIV, disagg_val=None - ) + exp_poly, + res=0.1, + to_meters=False, + disagg_met=u_lp.DisaggMethod.DIV, + disagg_val=None, + ) exp_pnt_grid = u_lp.exp_geom_to_grid( - exp_poly, (x_grid, y_grid), - disagg_met=u_lp.DisaggMethod.DIV, disagg_val=None - ) + exp_poly, + (x_grid, y_grid), + disagg_met=u_lp.DisaggMethod.DIV, + disagg_val=None, + ) self.check_unchanged_exp(exp_poly, exp_pnt_grid) - for col in ['value', 'latitude', 'longitude']: + for col in ["value", "latitude", "longitude"]: np.testing.assert_allclose(exp_pnt.gdf[col], exp_pnt_grid.gdf[col]) - def test_point_exposure_from_lines(self): """Test disaggregation of lines to points""" - #to_meters=False, FIX + # to_meters=False, FIX exp_pnt = u_lp.exp_geom_to_pnt( - EXP_LINE, res=1, to_meters=False, - disagg_met=u_lp.DisaggMethod.FIX, disagg_val=None - ) - np.testing.assert_array_equal(exp_pnt.gdf['value'][:,0], EXP_LINE.gdf['value']) + EXP_LINE, + res=1, + to_meters=False, + disagg_met=u_lp.DisaggMethod.FIX, + disagg_val=None, + ) + np.testing.assert_array_equal(exp_pnt.gdf["value"][:, 0], EXP_LINE.gdf["value"]) self.check_unchanged_exp(EXP_LINE, exp_pnt) - #to_meters=False, DIV + # to_meters=False, DIV exp_pnt = u_lp.exp_geom_to_pnt( - EXP_LINE, res=1, to_meters=False, - disagg_met=u_lp.DisaggMethod.DIV, disagg_val=None - ) - np.testing.assert_array_equal(exp_pnt.gdf['value'][:,0], EXP_LINE.gdf['value']) + EXP_LINE, + res=1, + to_meters=False, + disagg_met=u_lp.DisaggMethod.DIV, + disagg_val=None, + ) + np.testing.assert_array_equal(exp_pnt.gdf["value"][:, 0], EXP_LINE.gdf["value"]) self.check_unchanged_exp(EXP_LINE, exp_pnt) - #to_meters=TRUE, FIX, dissag_val + # to_meters=TRUE, FIX, dissag_val res = 20000 exp_pnt = u_lp.exp_geom_to_pnt( - EXP_LINE, res=res, to_meters=True, - disagg_met=u_lp.DisaggMethod.FIX, disagg_val=res**2 - ) + EXP_LINE, + res=res, + to_meters=True, + disagg_met=u_lp.DisaggMethod.FIX, + disagg_val=res**2, + ) self.check_unchanged_exp(EXP_LINE, exp_pnt) val = res**2 - self.assertEqual(np.unique(exp_pnt.gdf['value'])[0], val) - lat = np.array([ - 50.83944191, 50.94706532, 51.85008694, 51.7524172 , 52.07732906, - 50.889641 , 51.90287148, 51.53858598, 52.30223675, 53.15931081, - 51.61111058, 52.05191342, 52.3893 , 52.14520761, 52.47715845, - 52.68641293, 52.11355 , 51.90503849, 52.49610201, 51.8418 , - 51.93188219, 51.10694216, 52.48596301, 50.87543042, 51.0801347 , - 50.82145186, 50.81341953, 51.07235498, 50.9105503 - ]) - np.testing.assert_allclose(exp_pnt.gdf['latitude'], lat) + self.assertEqual(np.unique(exp_pnt.gdf["value"])[0], val) + lat = np.array( + [ + 50.83944191, + 50.94706532, + 51.85008694, + 51.7524172, + 52.07732906, + 50.889641, + 51.90287148, + 51.53858598, + 52.30223675, + 53.15931081, + 51.61111058, + 52.05191342, + 52.3893, + 52.14520761, + 52.47715845, + 52.68641293, + 52.11355, + 51.90503849, + 52.49610201, + 51.8418, + 51.93188219, + 51.10694216, + 52.48596301, + 50.87543042, + 51.0801347, + 50.82145186, + 50.81341953, + 51.07235498, + 50.9105503, + ] + ) + np.testing.assert_allclose(exp_pnt.gdf["latitude"], lat) + class TestGeomImpactCalcs(unittest.TestCase): """Test main functions on impact calculation and impact aggregation""" def test_calc_geom_impact_lines(self): - """ test calc_geom_impact() with lines""" + """test calc_geom_impact() with lines""" # line exposures only - exp_line_novals = Exposures(GDF_LINE.drop(columns='value')) + exp_line_novals = Exposures(GDF_LINE.drop(columns="value")) imp1 = u_lp.calc_geom_impact( - EXP_LINE, IMPF_SET, HAZ, - res=0.05, to_meters=False, disagg_met=u_lp.DisaggMethod.DIV, - disagg_val=None, agg_met=u_lp.AggMethod.SUM - ) + EXP_LINE, + IMPF_SET, + HAZ, + res=0.05, + to_meters=False, + disagg_met=u_lp.DisaggMethod.DIV, + disagg_val=None, + agg_met=u_lp.AggMethod.SUM, + ) aai_agg1 = 2.114365936716238 - eai_exp1 = np.array([ - 8.58546479e-02, 4.57753040e-02, 1.07081794e-01, 1.27160538e-02, - 8.60984331e-02, 1.57751547e-01, 2.32808488e-02, 2.95520878e-02, - 4.06902083e-03, 2.27553509e-01, 5.29133033e-03, 2.72705887e-03, - 8.48207692e-03, 2.95633263e-02, 4.88225543e-01, 1.33011693e-03, - 1.03018186e-01, 7.72573773e-02, 5.48322256e-03, 1.61239410e-02, - 1.13181160e-01, 8.32840521e-02, 2.99243546e-01, 4.88901364e-02, - 1.71930351e-02, 2.49435540e-02, 2.96121155e-05, 1.03654148e-02 - ]) + eai_exp1 = np.array( + [ + 8.58546479e-02, + 4.57753040e-02, + 1.07081794e-01, + 1.27160538e-02, + 8.60984331e-02, + 1.57751547e-01, + 2.32808488e-02, + 2.95520878e-02, + 4.06902083e-03, + 2.27553509e-01, + 5.29133033e-03, + 2.72705887e-03, + 8.48207692e-03, + 2.95633263e-02, + 4.88225543e-01, + 1.33011693e-03, + 1.03018186e-01, + 7.72573773e-02, + 5.48322256e-03, + 1.61239410e-02, + 1.13181160e-01, + 8.32840521e-02, + 2.99243546e-01, + 4.88901364e-02, + 1.71930351e-02, + 2.49435540e-02, + 2.96121155e-05, + 1.03654148e-02, + ] + ) check_impact(self, imp1, HAZ, EXP_LINE, aai_agg1, eai_exp1) - imp2 = u_lp.calc_geom_impact( - EXP_LINE, IMPF_SET, HAZ, - res=300, to_meters=True, disagg_met=u_lp.DisaggMethod.DIV, - disagg_val=None, agg_met=u_lp.AggMethod.SUM - ) + EXP_LINE, + IMPF_SET, + HAZ, + res=300, + to_meters=True, + disagg_met=u_lp.DisaggMethod.DIV, + disagg_val=None, + agg_met=u_lp.AggMethod.SUM, + ) np.testing.assert_allclose(imp2.eai_exp, imp1.eai_exp, rtol=0.2) imp3 = u_lp.calc_geom_impact( - exp_line_novals, IMPF_SET, HAZ, - res=300, to_meters=True, disagg_met=u_lp.DisaggMethod.FIX, - disagg_val=5000, agg_met=u_lp.AggMethod.SUM - ) + exp_line_novals, + IMPF_SET, + HAZ, + res=300, + to_meters=True, + disagg_met=u_lp.DisaggMethod.FIX, + disagg_val=5000, + agg_met=u_lp.AggMethod.SUM, + ) aai_agg3 = 2.626753478142696 - eai_exp3 = np.array([ - 0.10307851, 0.05544964, 0.12810739, 0.01736701, 0.1092617 , - 0.19785227, 0.02959709, 0.03617366, 0.00464554, 0.27378204, - 0.00670862, 0.00329956, 0.01030654, 0.03324303, 0.61571791, - 0.00215879, 0.12245651, 0.10379203, 0.00536503, 0.01881487, - 0.14592603, 0.12312706, 0.35965216, 0.05581585, 0.01968975, - 0.02843223, 0.00241899, 0.01451368 - ]) + eai_exp3 = np.array( + [ + 0.10307851, + 0.05544964, + 0.12810739, + 0.01736701, + 0.1092617, + 0.19785227, + 0.02959709, + 0.03617366, + 0.00464554, + 0.27378204, + 0.00670862, + 0.00329956, + 0.01030654, + 0.03324303, + 0.61571791, + 0.00215879, + 0.12245651, + 0.10379203, + 0.00536503, + 0.01881487, + 0.14592603, + 0.12312706, + 0.35965216, + 0.05581585, + 0.01968975, + 0.02843223, + 0.00241899, + 0.01451368, + ] + ) check_impact(self, imp3, HAZ, exp_line_novals, aai_agg3, eai_exp3) imp4 = u_lp.calc_geom_impact( - EXP_LINE, IMPF_SET, HAZ, - res=300, to_meters=True, disagg_met=u_lp.DisaggMethod.FIX, - disagg_val=5000, agg_met=u_lp.AggMethod.SUM - ) + EXP_LINE, + IMPF_SET, + HAZ, + res=300, + to_meters=True, + disagg_met=u_lp.DisaggMethod.FIX, + disagg_val=5000, + agg_met=u_lp.AggMethod.SUM, + ) np.testing.assert_array_equal(imp3.eai_exp, imp4.eai_exp) - def test_calc_geom_impact_points(self): - """ test calc_geom_impact() with points""" + """test calc_geom_impact() with points""" imp1 = u_lp.calc_geom_impact( - EXP_POINT, IMPF_SET, HAZ, - res=0.05, to_meters=False, disagg_met=u_lp.DisaggMethod.DIV, - disagg_val=None, agg_met=u_lp.AggMethod.SUM - ) - aai_agg1 = 0.0470814 + EXP_POINT, + IMPF_SET, + HAZ, + res=0.05, + to_meters=False, + disagg_met=u_lp.DisaggMethod.DIV, + disagg_val=None, + agg_met=u_lp.AggMethod.SUM, + ) + aai_agg1 = 0.0470814 exp = EXP_POINT.copy() exp.set_lat_lon() @@ -354,66 +573,163 @@ def test_calc_geom_impact_points(self): check_impact(self, imp1, HAZ, EXP_POINT, aai_agg1, imp11.eai_exp) imp2 = u_lp.calc_geom_impact( - EXP_POINT, IMPF_SET, HAZ, - res=500, to_meters=True, disagg_met=u_lp.DisaggMethod.FIX, - disagg_val=1.0, agg_met=u_lp.AggMethod.SUM - ) + EXP_POINT, + IMPF_SET, + HAZ, + res=500, + to_meters=True, + disagg_met=u_lp.DisaggMethod.FIX, + disagg_val=1.0, + agg_met=u_lp.AggMethod.SUM, + ) - exp.gdf['value'] = 1.0 + exp.gdf["value"] = 1.0 imp22 = ImpactCalc(exp, IMPF_SET, HAZ).impact() aai_agg2 = 6.5454249333e-06 check_impact(self, imp2, HAZ, EXP_POINT, aai_agg2, imp22.eai_exp) def test_calc_geom_impact_mixed(self): - """ test calc_geom_impact() with a mixed exp (points, lines and polygons) """ + """test calc_geom_impact() with a mixed exp (points, lines and polygons)""" # mixed exposures gdf_mix = pd.concat([GDF_LINE, GDF_POLY, GDF_POINT]).reset_index(drop=True) exp_mix = Exposures(gdf_mix) imp1 = u_lp.calc_geom_impact( - exp_mix, IMPF_SET, HAZ, - res=0.05, to_meters=False, disagg_met=u_lp.DisaggMethod.DIV, - disagg_val=None, agg_met=u_lp.AggMethod.SUM - ) + exp_mix, + IMPF_SET, + HAZ, + res=0.05, + to_meters=False, + disagg_met=u_lp.DisaggMethod.DIV, + disagg_val=None, + agg_met=u_lp.AggMethod.SUM, + ) aai_agg1 = 2354303.3196003754 - eai_exp1 = np.array([ - 1.73069928e-04, 8.80741357e-04, 1.77657635e-01, 1.06413744e-02, - 1.15405492e-02, 3.40097761e-02, 8.91658032e-03, 4.19735141e-02, - 1.27160538e-02, 2.43849980e-01, 2.32808488e-02, 5.47043065e-03, - 5.44984095e-03, 5.80779958e-03, 1.06361040e-01, 4.67335812e-02, - 9.93703142e-02, 8.48207692e-03, 2.95633263e-02, 1.30223646e-01, - 3.84600393e-01, 2.05709279e-02, 1.39919480e-01, 1.61239410e-02, - 4.46991386e-02, 1.30045513e-02, 1.30045513e-02, 6.91177788e-04, - 1.61063727e+04, 1.07420484e+04, 1.44746070e+04, 7.18796281e+04, - 2.58806206e+04, 2.01316315e+05, 1.76071458e+05, 3.92482129e+05, - 2.90364327e+05, 9.05399356e+05, 1.94728210e+05, 5.11729689e+04, - 2.84224294e+02, 2.45938137e+02, 1.90644327e+02, 1.73925079e+02, - 1.76091839e+02, 4.43054173e+02, 4.41378151e+02, 4.74316805e+02, - 4.83873464e+02, 2.59001795e+02, 2.48200400e+02, 2.62995792e+02 - ]) + eai_exp1 = np.array( + [ + 1.73069928e-04, + 8.80741357e-04, + 1.77657635e-01, + 1.06413744e-02, + 1.15405492e-02, + 3.40097761e-02, + 8.91658032e-03, + 4.19735141e-02, + 1.27160538e-02, + 2.43849980e-01, + 2.32808488e-02, + 5.47043065e-03, + 5.44984095e-03, + 5.80779958e-03, + 1.06361040e-01, + 4.67335812e-02, + 9.93703142e-02, + 8.48207692e-03, + 2.95633263e-02, + 1.30223646e-01, + 3.84600393e-01, + 2.05709279e-02, + 1.39919480e-01, + 1.61239410e-02, + 4.46991386e-02, + 1.30045513e-02, + 1.30045513e-02, + 6.91177788e-04, + 1.61063727e04, + 1.07420484e04, + 1.44746070e04, + 7.18796281e04, + 2.58806206e04, + 2.01316315e05, + 1.76071458e05, + 3.92482129e05, + 2.90364327e05, + 9.05399356e05, + 1.94728210e05, + 5.11729689e04, + 2.84224294e02, + 2.45938137e02, + 1.90644327e02, + 1.73925079e02, + 1.76091839e02, + 4.43054173e02, + 4.41378151e02, + 4.74316805e02, + 4.83873464e02, + 2.59001795e02, + 2.48200400e02, + 2.62995792e02, + ] + ) check_impact(self, imp1, HAZ, exp_mix, aai_agg1, eai_exp1) imp2 = u_lp.calc_geom_impact( - exp_mix, IMPF_SET, HAZ, - res=5000, to_meters=True, disagg_met=u_lp.DisaggMethod.FIX, - disagg_val=None, agg_met=u_lp.AggMethod.SUM - ) + exp_mix, + IMPF_SET, + HAZ, + res=5000, + to_meters=True, + disagg_met=u_lp.DisaggMethod.FIX, + disagg_val=None, + agg_met=u_lp.AggMethod.SUM, + ) aai_agg2 = 321653479.4607434 - eai_exp2 = np.array([ - 1.73069928e-04, 8.80741357e-04, 2.17736979e-01, 6.48243461e-02, - 2.67262620e-02, 3.55078893e-01, 8.14081011e-02, 4.36578022e-01, - 1.02605091e-01, 3.45121722e-01, 1.62144669e-01, 1.45008544e-01, - 2.32808488e-02, 2.73521532e-02, 9.51399554e-02, 2.25921717e-01, - 6.90427531e-01, 5.29133033e-03, 2.72705887e-03, 8.48207692e-03, - 2.10403881e+00, 1.33011693e-03, 3.14644100e-01, 7.72573773e-02, - 5.48322256e-03, 1.61239410e-02, 2.68194832e-01, 7.80273077e-02, - 1.48411299e+06, 1.09137411e+06, 1.62477251e+06, 1.43455724e+07, - 2.94783633e+06, 1.06950486e+07, 3.17592949e+07, 4.58152749e+07, - 3.94173129e+07, 1.48016265e+08, 1.87811203e+07, 5.41509882e+06, - 1.24792652e+04, 1.20008305e+04, 1.43296472e+04, 3.15280802e+04, - 3.32644558e+04, 3.19325625e+04, 3.11256252e+04, 3.20372742e+04, - 1.67623417e+04, 1.64528393e+04, 1.47050883e+04, 1.37721978e+04 - ]) + eai_exp2 = np.array( + [ + 1.73069928e-04, + 8.80741357e-04, + 2.17736979e-01, + 6.48243461e-02, + 2.67262620e-02, + 3.55078893e-01, + 8.14081011e-02, + 4.36578022e-01, + 1.02605091e-01, + 3.45121722e-01, + 1.62144669e-01, + 1.45008544e-01, + 2.32808488e-02, + 2.73521532e-02, + 9.51399554e-02, + 2.25921717e-01, + 6.90427531e-01, + 5.29133033e-03, + 2.72705887e-03, + 8.48207692e-03, + 2.10403881e00, + 1.33011693e-03, + 3.14644100e-01, + 7.72573773e-02, + 5.48322256e-03, + 1.61239410e-02, + 2.68194832e-01, + 7.80273077e-02, + 1.48411299e06, + 1.09137411e06, + 1.62477251e06, + 1.43455724e07, + 2.94783633e06, + 1.06950486e07, + 3.17592949e07, + 4.58152749e07, + 3.94173129e07, + 1.48016265e08, + 1.87811203e07, + 5.41509882e06, + 1.24792652e04, + 1.20008305e04, + 1.43296472e04, + 3.15280802e04, + 3.32644558e04, + 3.19325625e04, + 3.11256252e04, + 3.20372742e04, + 1.67623417e04, + 1.64528393e04, + 1.47050883e04, + 1.37721978e04, + ] + ) check_impact(self, imp2, HAZ, exp_mix, aai_agg2, eai_exp2) # Check non-default impact function id @@ -421,26 +737,72 @@ def test_calc_geom_impact_mixed(self): impfdouble.mdd *= 2 impf_set = ImpactFuncSet([IMPF, impfdouble]) imp3 = u_lp.calc_geom_impact( - exp_mix, impf_set, HAZ, - res=0.05, to_meters=False, disagg_met=u_lp.DisaggMethod.DIV, - disagg_val=None, agg_met=u_lp.AggMethod.SUM - ) + exp_mix, + impf_set, + HAZ, + res=0.05, + to_meters=False, + disagg_met=u_lp.DisaggMethod.DIV, + disagg_val=None, + agg_met=u_lp.AggMethod.SUM, + ) aai_agg3 = 4708604.47775332 - eai_exp3 = np.array([ - 1.73069928e-04, 8.80741357e-04, 1.77657635e-01, 1.06413744e-02, - 1.15405492e-02, 3.40097761e-02, 8.91658032e-03, 4.19735141e-02, - 1.27160538e-02, 2.43849980e-01, 2.32808488e-02, 5.47043065e-03, - 5.44984095e-03, 5.80779958e-03, 1.06361040e-01, 4.67335812e-02, - 9.93703142e-02, 8.48207692e-03, 2.95633263e-02, 1.30223646e-01, - 3.84600393e-01, 2.05709279e-02, 1.39919480e-01, 1.61239410e-02, - 4.46991386e-02, 1.30045513e-02, 1.30045513e-02, 6.91177788e-04, - 3.22122197e+04, 2.14840968e+04, 2.89492139e+04, 1.43759256e+05, - 5.17612411e+04, 4.02632630e+05, 3.52142916e+05, 7.84964258e+05, - 5.80728653e+05, 1.81079871e+06, 3.89456421e+05, 1.02345938e+05, - 5.68448588e+02, 4.91876274e+02, 3.81288655e+02, 3.47850159e+02, - 3.52183678e+02, 8.86108346e+02, 8.82756302e+02, 9.48633609e+02, - 9.67746928e+02, 5.18003590e+02, 4.96400801e+02, 5.25991584e+02 - ]) + eai_exp3 = np.array( + [ + 1.73069928e-04, + 8.80741357e-04, + 1.77657635e-01, + 1.06413744e-02, + 1.15405492e-02, + 3.40097761e-02, + 8.91658032e-03, + 4.19735141e-02, + 1.27160538e-02, + 2.43849980e-01, + 2.32808488e-02, + 5.47043065e-03, + 5.44984095e-03, + 5.80779958e-03, + 1.06361040e-01, + 4.67335812e-02, + 9.93703142e-02, + 8.48207692e-03, + 2.95633263e-02, + 1.30223646e-01, + 3.84600393e-01, + 2.05709279e-02, + 1.39919480e-01, + 1.61239410e-02, + 4.46991386e-02, + 1.30045513e-02, + 1.30045513e-02, + 6.91177788e-04, + 3.22122197e04, + 2.14840968e04, + 2.89492139e04, + 1.43759256e05, + 5.17612411e04, + 4.02632630e05, + 3.52142916e05, + 7.84964258e05, + 5.80728653e05, + 1.81079871e06, + 3.89456421e05, + 1.02345938e05, + 5.68448588e02, + 4.91876274e02, + 3.81288655e02, + 3.47850159e02, + 3.52183678e02, + 8.86108346e02, + 8.82756302e02, + 9.48633609e02, + 9.67746928e02, + 5.18003590e02, + 4.96400801e02, + 5.25991584e02, + ] + ) check_impact(self, imp3, HAZ, exp_mix, aai_agg3, eai_exp3) def test_impact_pnt_agg(self): @@ -449,59 +811,117 @@ def test_impact_pnt_agg(self): exp_mix = Exposures(gdf_mix) exp_pnt = u_lp.exp_geom_to_pnt( - exp_mix, res=1, to_meters=False, disagg_met=u_lp.DisaggMethod.DIV, - disagg_val=None - ) + exp_mix, + res=1, + to_meters=False, + disagg_met=u_lp.DisaggMethod.DIV, + disagg_val=None, + ) imp_pnt = ImpactCalc(exp_pnt, IMPF_SET, HAZ).impact(save_mat=True) imp_agg = u_lp.impact_pnt_agg(imp_pnt, exp_pnt.gdf, u_lp.AggMethod.SUM) aai_agg = 1282901.0114188215 - eai_exp = np.array([ - 0.00000000e+00, 1.73069928e-04, 3.71172778e-04, 5.09568579e-04, - 8.43340681e-04, 3.47906751e-03, 3.00385618e-03, 5.62430455e-03, - 9.07998787e-03, 1.30641275e-02, 6.18365411e-03, 4.74934473e-03, - 8.34810476e-02, 5.07280880e-02, 1.02690634e-01, 1.27160538e-02, - 8.60984331e-02, 1.62144669e-01, 2.32808488e-02, 2.90389979e-02, - 4.06902083e-03, 2.33667906e-01, 5.29133033e-03, 2.72705887e-03, - 8.48207692e-03, 2.95633263e-02, 4.01271600e-01, 1.33011693e-03, - 9.94596852e-02, 7.72573773e-02, 5.48322256e-03, 1.61239410e-02, - 4.14706673e-03, 8.32840521e-02, 2.87509619e-01, 4.88901364e-02, - 1.71930351e-02, 2.49435540e-02, 2.96121155e-05, 1.03654148e-02, - 8.36178802e+03, 7.30704698e+03, 1.20628926e+04, 3.54061498e+04, - 1.23524320e+04, 7.78074661e+04, 1.28292995e+05, 2.31231953e+05, - 1.31911226e+05, 5.37897306e+05, 8.37016948e+04, 1.65661030e+04 - ]) + eai_exp = np.array( + [ + 0.00000000e00, + 1.73069928e-04, + 3.71172778e-04, + 5.09568579e-04, + 8.43340681e-04, + 3.47906751e-03, + 3.00385618e-03, + 5.62430455e-03, + 9.07998787e-03, + 1.30641275e-02, + 6.18365411e-03, + 4.74934473e-03, + 8.34810476e-02, + 5.07280880e-02, + 1.02690634e-01, + 1.27160538e-02, + 8.60984331e-02, + 1.62144669e-01, + 2.32808488e-02, + 2.90389979e-02, + 4.06902083e-03, + 2.33667906e-01, + 5.29133033e-03, + 2.72705887e-03, + 8.48207692e-03, + 2.95633263e-02, + 4.01271600e-01, + 1.33011693e-03, + 9.94596852e-02, + 7.72573773e-02, + 5.48322256e-03, + 1.61239410e-02, + 4.14706673e-03, + 8.32840521e-02, + 2.87509619e-01, + 4.88901364e-02, + 1.71930351e-02, + 2.49435540e-02, + 2.96121155e-05, + 1.03654148e-02, + 8.36178802e03, + 7.30704698e03, + 1.20628926e04, + 3.54061498e04, + 1.23524320e04, + 7.78074661e04, + 1.28292995e05, + 2.31231953e05, + 1.31911226e05, + 5.37897306e05, + 8.37016948e04, + 1.65661030e04, + ] + ) check_impact(self, imp_agg, HAZ, exp_mix, aai_agg, eai_exp) def test_calc_grid_impact_polys(self): """Test impact on grid for polygons""" import climada.util.coordinates as u_coord + res = 0.1 (_, _, xmax, ymax) = EXP_POLY.gdf.geometry.bounds.max() (xmin, ymin, _, _) = EXP_POLY.gdf.geometry.bounds.min() bounds = (xmin, ymin, xmax, ymax) - height, width, trafo = u_coord.pts_to_raster_meta( - bounds, (res, res) - ) + height, width, trafo = u_coord.pts_to_raster_meta(bounds, (res, res)) x_grid, y_grid = u_coord.raster_to_meshgrid(trafo, width, height) imp_g = u_lp.calc_grid_impact( - exp=EXP_POLY, impf_set=IMPF_SET, haz=HAZ, - grid=(x_grid, y_grid), disagg_met=u_lp.DisaggMethod.DIV, - disagg_val=None, agg_met=u_lp.AggMethod.SUM - ) + exp=EXP_POLY, + impf_set=IMPF_SET, + haz=HAZ, + grid=(x_grid, y_grid), + disagg_met=u_lp.DisaggMethod.DIV, + disagg_val=None, + agg_met=u_lp.AggMethod.SUM, + ) aai_agg = 2319608.54202 - eai_exp = np.array([ - 17230.22051525, 10974.85453081, 14423.77523209, 77906.29609785, - 22490.08925927, 147937.83580832, 132329.78961234, 375082.82348148, - 514527.07490518, 460185.19291995, 265875.77587879, 280644.81378238 - ]) + eai_exp = np.array( + [ + 17230.22051525, + 10974.85453081, + 14423.77523209, + 77906.29609785, + 22490.08925927, + 147937.83580832, + 132329.78961234, + 375082.82348148, + 514527.07490518, + 460185.19291995, + 265875.77587879, + 280644.81378238, + ] + ) check_impact(self, imp_g, HAZ, EXP_POLY, aai_agg, eai_exp) - def test_aggregate_impact_mat(self): """Private method""" pass + class TestGdfGeomToPnt(unittest.TestCase): """Test Geodataframes to points and vice-versa functions""" @@ -510,84 +930,138 @@ def test_gdf_line_to_pnt(self): gdf_pnt = u_lp._line_to_pnts(GDF_LINE, 1, False) check_unchanged_geom_gdf(self, GDF_LINE, gdf_pnt) np.testing.assert_array_equal( - np.unique(GDF_LINE['value']), np.unique(gdf_pnt['value']) - ) + np.unique(GDF_LINE["value"]), np.unique(gdf_pnt["value"]) + ) gdf_pnt = u_lp._line_to_pnts(GDF_LINE, 1000, True) check_unchanged_geom_gdf(self, GDF_LINE, gdf_pnt) np.testing.assert_array_equal( - np.unique(GDF_LINE['value']), np.unique(gdf_pnt['value']) - ) + np.unique(GDF_LINE["value"]), np.unique(gdf_pnt["value"]) + ) gdf_pnt_d = u_lp._line_to_pnts(GDF_LINE.iloc[0:1], 0.01, False) np.testing.assert_allclose( gdf_pnt_d.geometry.x.values, - np.array([ - 6.092507, 6.092895, 6.088363, 6.083726, 6.079199, 6.074582, - 6.068896, 6.061939, 6.061839 - ]) - ) + np.array( + [ + 6.092507, + 6.092895, + 6.088363, + 6.083726, + 6.079199, + 6.074582, + 6.068896, + 6.061939, + 6.061839, + ] + ), + ) np.testing.assert_allclose( gdf_pnt_d.geometry.y.values, - np.array([ - 50.876242, 50.866888, 50.857725, 50.84861 , 50.839442, 50.830321, - 50.82186 , 50.814366, 50.80475 - ]) - ) + np.array( + [ + 50.876242, + 50.866888, + 50.857725, + 50.84861, + 50.839442, + 50.830321, + 50.82186, + 50.814366, + 50.80475, + ] + ), + ) - #disaggregation in degrees and approximately same value in meters + # disaggregation in degrees and approximately same value in meters gdf_pnt_m = u_lp._line_to_pnts(GDF_LINE.iloc[0:1], 1000, True) np.testing.assert_allclose( - gdf_pnt_m.geometry.x, - gdf_pnt_d.geometry.x, rtol=1e-2) + gdf_pnt_m.geometry.x, gdf_pnt_d.geometry.x, rtol=1e-2 + ) np.testing.assert_allclose( - gdf_pnt_m.geometry.y, - gdf_pnt_d.geometry.y,rtol=1e-2) + gdf_pnt_m.geometry.y, gdf_pnt_d.geometry.y, rtol=1e-2 + ) def test_gdf_poly_to_pnts(self): """Test polygon to points disaggregation""" gdf_pnt = u_lp._poly_to_pnts(GDF_POLY, 1, False) check_unchanged_geom_gdf(self, GDF_POLY, gdf_pnt) np.testing.assert_array_equal( - np.unique(GDF_POLY['value']), np.unique(gdf_pnt['value']) - ) + np.unique(GDF_POLY["value"]), np.unique(gdf_pnt["value"]) + ) gdf_pnt = u_lp._poly_to_pnts(GDF_POLY, 5000, True) check_unchanged_geom_gdf(self, GDF_POLY, gdf_pnt) np.testing.assert_array_equal( - np.unique(GDF_POLY['value']), np.unique(gdf_pnt['value']) - ) + np.unique(GDF_POLY["value"]), np.unique(gdf_pnt["value"]) + ) gdf_pnt_d = u_lp._poly_to_pnts(GDF_POLY.iloc[0:1], 0.2, False) np.testing.assert_allclose( gdf_pnt_d.geometry.x.values, - np.array([ - 6.9690605, 7.1690605, 6.3690605, 6.5690605, 6.7690605, 6.9690605, - 7.1690605, 6.5690605, 6.7690605 - ]) - ) + np.array( + [ + 6.9690605, + 7.1690605, + 6.3690605, + 6.5690605, + 6.7690605, + 6.9690605, + 7.1690605, + 6.5690605, + 6.7690605, + ] + ), + ) np.testing.assert_allclose( gdf_pnt_d.geometry.y.values, - np.array([ - 53.04131655, 53.04131655, 53.24131655, 53.24131655, 53.24131655, - 53.24131655, 53.24131655, 53.44131655, 53.44131655 - ]) - ) + np.array( + [ + 53.04131655, + 53.04131655, + 53.24131655, + 53.24131655, + 53.24131655, + 53.24131655, + 53.24131655, + 53.44131655, + 53.44131655, + ] + ), + ) gdf_pnt_m = u_lp._poly_to_pnts(GDF_POLY.iloc[0:1], 15000, True) np.testing.assert_allclose( gdf_pnt_m.geometry.x.values, - np.array([ - 6.84279696, 6.97754426, 7.11229155, 6.30380779, 6.43855509, - 6.57330238, 6.70804967, 6.84279696, 6.97754426 - ]) - ) + np.array( + [ + 6.84279696, + 6.97754426, + 7.11229155, + 6.30380779, + 6.43855509, + 6.57330238, + 6.70804967, + 6.84279696, + 6.97754426, + ] + ), + ) np.testing.assert_allclose( gdf_pnt_m.geometry.y.values, - np.array([ - 53.0645655 , 53.0645655 , 53.0645655 , 53.28896623, 53.28896623, - 53.28896623, 53.28896623, 53.28896623, 53.28896623 - ]) - ) + np.array( + [ + 53.0645655, + 53.0645655, + 53.0645655, + 53.28896623, + 53.28896623, + 53.28896623, + 53.28896623, + 53.28896623, + 53.28896623, + ] + ), + ) def test_pnts_per_line(self): """Test number of points per line for give resolution""" @@ -603,8 +1077,8 @@ def test_line_fractions(self): 2: np.array([0.5]), 0.8: np.array([0.5]), 0.6: np.array([0.25, 0.75]), - 0.4: np.array([0.25, 0.75]) - } + 0.4: np.array([0.25, 0.75]), + } for res, fraction in res_fractions.items(): np.testing.assert_allclose(u_lp._line_fraction(length, res), fraction) @@ -613,26 +1087,31 @@ def test_line_fractions(self): 2: np.array([0.5]), 0.8: np.array([0.25, 0.75]), 0.6: np.array([0.166667, 0.5, 0.833333]), - 0.4: np.array([0.1, 0.3, 0.5, 0.7, 0.9]) - } + 0.4: np.array([0.1, 0.3, 0.5, 0.7, 0.9]), + } for res, fraction in res_fractions.items(): - np.testing.assert_allclose(u_lp._line_fraction(length, res), fraction, rtol=1e-04 ) + np.testing.assert_allclose( + u_lp._line_fraction(length, res), fraction, rtol=1e-04 + ) def test_resolution_warning(self): lines = [ LineString([[0, 0], [0, 2]]), LineString([[0, 0], [0, 12]]), - LineString([[0, 0], [0, 20]]) - ] + LineString([[0, 0], [0, 20]]), + ] gdf_lines = gpd.GeoDataFrame(geometry=lines) - with self.assertLogs('climada.util.lines_polys_handler', level='WARNING') as ctx: + with self.assertLogs( + "climada.util.lines_polys_handler", level="WARNING" + ) as ctx: u_lp._line_to_pnts(gdf_lines, 1, False) - self.assertEqual(ctx.records[0].message, + self.assertEqual( + ctx.records[0].message, f"{2} lines with a length < 10*resolution were found. " "Each of these lines is disaggregate to one point. " "Reaggregatint values will thus likely lead to overestimattion. " - "Consider chosing a smaller resolution or filter out the short lines. ") - + "Consider chosing a smaller resolution or filter out the short lines. ", + ) def test_gdf_to_grid(self): """""" @@ -658,36 +1137,35 @@ def test_pnt_line_poly_mask(self): """""" pnt, lines, poly = u_lp._pnt_line_poly_mask(GDF_POLY) self.assertTrue(np.all(poly)) - self.assertTrue(np.all(lines==False)) - self.assertTrue(np.all(pnt==False)) + self.assertTrue(np.all(lines == False)) + self.assertTrue(np.all(pnt == False)) pnt, lines, poly = u_lp._pnt_line_poly_mask(GDF_LINE) - self.assertTrue(np.all(poly==False)) + self.assertTrue(np.all(poly == False)) self.assertTrue(np.all(lines)) - self.assertTrue(np.all(pnt==False)) + self.assertTrue(np.all(pnt == False)) pnt, lines, poly = u_lp._pnt_line_poly_mask(GDF_POINT) - self.assertTrue(np.all(poly==False)) - self.assertTrue(np.all(lines==False)) + self.assertTrue(np.all(poly == False)) + self.assertTrue(np.all(lines == False)) self.assertTrue(np.all(pnt)) - def test_get_equalarea_proj(self): """Test pass get locally cylindrical equalarea projection""" poly = EXP_POLY.gdf.geometry[0] proj = u_lp._get_equalarea_proj(poly) - self.assertEqual(proj, '+proj=cea +lat_0=53.150193 +lon_0=6.881223 +units=m') + self.assertEqual(proj, "+proj=cea +lat_0=53.150193 +lon_0=6.881223 +units=m") def test_get_pyproj_trafo(self): """""" - dest_crs = '+proj=cea +lat_0=52.112866 +lon_0=5.150162 +units=m' + dest_crs = "+proj=cea +lat_0=52.112866 +lon_0=5.150162 +units=m" orig_crs = EXP_POLY.gdf.crs trafo = u_lp._get_pyproj_trafo(orig_crs, dest_crs) self.assertEqual( trafo.definition, - 'proj=pipeline step proj=unitconvert xy_in=deg' + - ' xy_out=rad step proj=cea lat_0=52.112866 lon_0=5.150162 units=m' - ) + "proj=pipeline step proj=unitconvert xy_in=deg" + + " xy_out=rad step proj=cea lat_0=52.112866 lon_0=5.150162 units=m", + ) def test_reproject_grid(self): """""" @@ -698,10 +1176,10 @@ def test_reproject_poly(self): pass def test_swap_geom_cols(self): - """Test swap of geometry columns """ + """Test swap of geometry columns""" gdf_orig = GDF_POLY.copy() - gdf_orig['new_geom'] = gdf_orig.geometry - swap_gdf = u_lp._swap_geom_cols(gdf_orig, 'old_geom', 'new_geom') + gdf_orig["new_geom"] = gdf_orig.geometry + swap_gdf = u_lp._swap_geom_cols(gdf_orig, "old_geom", "new_geom") self.assertTrue(np.alltrue(swap_gdf.geometry.geom_equals(gdf_orig.new_geom))) diff --git a/climada/util/test/test_plot.py b/climada/util/test/test_plot.py index 9588e3821..351010afb 100644 --- a/climada/util/test/test_plot.py +++ b/climada/util/test/test_plot.py @@ -20,87 +20,107 @@ """ import unittest + import cartopy -import numpy as np -import matplotlib.pyplot as plt -from matplotlib import colormaps as cm import cartopy.crs as ccrs import geopandas as gpd +import matplotlib.pyplot as plt +import numpy as np +from matplotlib import colormaps as cm from shapely import Point import climada.util.plot as u_plot + class TestFuncs(unittest.TestCase): def test_get_transform_4326_pass(self): """Check _get_transformation for 4326 epsg.""" - res, unit = u_plot.get_transformation('epsg:4326') + res, unit = u_plot.get_transformation("epsg:4326") self.assertIsInstance(res, cartopy.crs.PlateCarree) - self.assertEqual(unit, '°') + self.assertEqual(unit, "°") def test_get_transform_3395_pass(self): """Check that assigned attribute is correctly set.""" - res, unit = u_plot.get_transformation('epsg:3395') + res, unit = u_plot.get_transformation("epsg:3395") self.assertIsInstance(res, cartopy.crs.Mercator) - self.assertEqual(unit, 'm') + self.assertEqual(unit, "m") def test_get_transform_3035_pass(self): """Check that assigned attribute is correctly set.""" - res, unit = u_plot.get_transformation('epsg:3035') + res, unit = u_plot.get_transformation("epsg:3035") self.assertIsInstance(res, cartopy.crs.Projection) self.assertEqual(res.epsg_code, 3035) - self.assertEqual(unit, 'm') + self.assertEqual(unit, "m") + class TestPlots(unittest.TestCase): def test_geo_scatter_categorical(self): """Plots ones with geo_scatteR_categorical""" # test default with one plot - values = np.array([1, 2.0, 1, 'a']) + values = np.array([1, 2.0, 1, "a"]) coord = np.array([[26, 0], [26, 1], [28, 0], [29, 1]]) - u_plot.geo_scatter_categorical(values, coord, 'value', 'test plot', - pop_name=True) + u_plot.geo_scatter_categorical( + values, coord, "value", "test plot", pop_name=True + ) plt.close() - #test multiple plots with non default kwargs - values = np.array([[1, 2.0, 1, 'a'], [0, 0, 0, 0]]) + # test multiple plots with non default kwargs + values = np.array([[1, 2.0, 1, "a"], [0, 0, 0, 0]]) coord = np.array([[26, 0], [26, 1], [28, 0], [29, 1]]) - u_plot.geo_scatter_categorical(values, coord, 'value', 'test plot', - cat_name={0: 'zero', - 1: 'int', - 2.0: 'float', - 'a': 'string'}, - pop_name=False, cmap=cm.get_cmap('Set1')) + u_plot.geo_scatter_categorical( + values, + coord, + "value", + "test plot", + cat_name={0: "zero", 1: "int", 2.0: "float", "a": "string"}, + pop_name=False, + cmap=cm.get_cmap("Set1"), + ) plt.close() - #test colormap warning - values = np.array([[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], - [12, 13, 14, 15]]) + # test colormap warning + values = np.array( + [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]] + ) coord = np.array([[26, 0], [26, 4], [28, 0], [29, 1]]) - u_plot.geo_scatter_categorical(values, coord, 'value', 'test plot', - pop_name=False, cmap='viridis') + u_plot.geo_scatter_categorical( + values, coord, "value", "test plot", pop_name=False, cmap="viridis" + ) plt.close() - #test colormap warning with 256 colors - values = np.array([[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], - [12, 13, 14, 15]]) + # test colormap warning with 256 colors + values = np.array( + [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]] + ) coord = np.array([[26, 0], [26, 1], [28, 0], [29, 1]]) - u_plot.geo_scatter_categorical(values, coord, 'value', 'test plot', - pop_name=False, cmap='tab20c') + u_plot.geo_scatter_categorical( + values, coord, "value", "test plot", pop_name=False, cmap="tab20c" + ) plt.close() def test_geo_scatter_from_array(self): values = np.array([1, 2.0, 1, 1]) coord = np.array([[-17, 178], [-10, 180], [-27, 175], [-16, 186]]) - var_name = 'test' - title = 'test' + var_name = "test" + title = "test" projection = ccrs.PlateCarree() - cmap = 'viridis' - ax = u_plot.geo_scatter_from_array(values, coord, var_name, title, - pop_name=True, extend='neither', - shapes=True, axes=None, proj=projection, - figsize=(9, 13), cmap=cmap) + cmap = "viridis" + ax = u_plot.geo_scatter_from_array( + values, + coord, + var_name, + title, + pop_name=True, + extend="neither", + shapes=True, + axes=None, + proj=projection, + figsize=(9, 13), + cmap=cmap, + ) self.assertEqual(var_name, ax.get_title()) colorbar = next(x.colorbar for x in ax.collections if x.colorbar) self.assertAlmostEqual(np.max(values), colorbar.vmax) @@ -111,14 +131,23 @@ def test_geo_scatter_from_array(self): def test_geo_bin_from_array(self): values = np.array([1, 2.0, 5, 1]) coord = np.array([[-10, 17], [-30, 20], [5, 75], [-16, 20]]) - var_name = 'test' - title = 'test' + var_name = "test" + title = "test" projection = ccrs.PlateCarree() - cmap = 'viridis' - ax = u_plot.geo_bin_from_array(values, coord, var_name, title, - pop_name=True, extend='neither', - shapes=True, axes=None, proj=projection, - figsize=(9, 13), cmap=cmap) + cmap = "viridis" + ax = u_plot.geo_bin_from_array( + values, + coord, + var_name, + title, + pop_name=True, + extend="neither", + shapes=True, + axes=None, + proj=projection, + figsize=(9, 13), + cmap=cmap, + ) self.assertEqual(var_name, ax.get_title()) colorbar = next(x.colorbar for x in ax.collections if x.colorbar) self.assertAlmostEqual(np.max(values), colorbar.vmax) @@ -129,12 +158,21 @@ def test_geo_bin_from_array(self): def test_geo_im_from_array(self): values = np.array([1, 2.0, 5, np.nan]) coord = np.array([[-17, 178], [-10, 180], [-27, 175], [-16, 186]]) - var_name = 'test' - title = 'test' + var_name = "test" + title = "test" projection = ccrs.PlateCarree() - cmap = 'viridis' - ax = u_plot.geo_im_from_array(values, coord, var_name, title, - proj=projection, smooth=True, axes=None, figsize=(9, 13), cmap=cmap) + cmap = "viridis" + ax = u_plot.geo_im_from_array( + values, + coord, + var_name, + title, + proj=projection, + smooth=True, + axes=None, + figsize=(9, 13), + cmap=cmap, + ) self.assertEqual(var_name, ax.get_title()) colorbar = next(x.colorbar for x in ax.collections if x.colorbar) self.assertAlmostEqual(np.nanmax(values), colorbar.vmax) @@ -143,8 +181,17 @@ def test_geo_im_from_array(self): plt.close() projection = ccrs.AzimuthalEquidistant() - ax = u_plot.geo_im_from_array(values, coord, var_name, title, - proj=projection, smooth=True, axes=None, figsize=(9, 13), cmap=cmap) + ax = u_plot.geo_im_from_array( + values, + coord, + var_name, + title, + proj=projection, + smooth=True, + axes=None, + figsize=(9, 13), + cmap=cmap, + ) self.assertEqual(var_name, ax.get_title()) colorbar = next(x.colorbar for x in ax.collections if x.colorbar) self.assertAlmostEqual(np.nanmax(values), colorbar.vmax) @@ -155,37 +202,50 @@ def test_geo_im_from_array(self): def test_plot_from_gdf_no_log(self): """test plot_from_gdf() with linear color bar (because there is a 0 in data)""" return_periods = gpd.GeoDataFrame( - data = ((2., 5.), (0., 6.), (None, 2.), (1., 1000.)), - columns = ('10.0', '20.0') + data=((2.0, 5.0), (0.0, 6.0), (None, 2.0), (1.0, 1000.0)), + columns=("10.0", "20.0"), + ) + return_periods["geometry"] = ( + Point(45.0, 26.0), + Point(46.0, 26.0), + Point(45.0, 27.0), + Point(46.0, 27.0), ) - return_periods['geometry'] = (Point(45., 26.), Point(46., 26.), Point(45., 27.), Point(46., 27.)) - colorbar_name = 'Return Periods (Years)' - title_subplots = lambda cols: [f'Threshold Intensity: {col} m/s' for col in cols] + colorbar_name = "Return Periods (Years)" + title_subplots = lambda cols: [ + f"Threshold Intensity: {col} m/s" for col in cols + ] (axis1, axis2) = u_plot.plot_from_gdf( - return_periods, - colorbar_name=colorbar_name, - title_subplots=title_subplots) - self.assertEqual('Threshold Intensity: 10.0 m/s', axis1.get_title()) - self.assertEqual('Threshold Intensity: 20.0 m/s', axis2.get_title()) + return_periods, colorbar_name=colorbar_name, title_subplots=title_subplots + ) + self.assertEqual("Threshold Intensity: 10.0 m/s", axis1.get_title()) + self.assertEqual("Threshold Intensity: 20.0 m/s", axis2.get_title()) plt.close() def test_plot_from_gdf_log(self): """test plot_from_gdf() with log color bar)""" return_periods = gpd.GeoDataFrame( - data = ((2., 5.), (3., 6.), (None, 2.), (1., 1000.)), - columns = ('10.0', '20.0') + data=((2.0, 5.0), (3.0, 6.0), (None, 2.0), (1.0, 1000.0)), + columns=("10.0", "20.0"), ) - return_periods['geometry'] = (Point(45., 26.), Point(46., 26.), Point(45., 27.), Point(46., 27.)) - colorbar_name = 'Return Periods (Years)' - title_subplots = lambda cols: [f'Threshold Intensity: {col} m/s' for col in cols] + return_periods["geometry"] = ( + Point(45.0, 26.0), + Point(46.0, 26.0), + Point(45.0, 27.0), + Point(46.0, 27.0), + ) + colorbar_name = "Return Periods (Years)" + title_subplots = lambda cols: [ + f"Threshold Intensity: {col} m/s" for col in cols + ] (axis1, axis2) = u_plot.plot_from_gdf( - return_periods, - colorbar_name=colorbar_name, - title_subplots=title_subplots) - self.assertEqual('Threshold Intensity: 10.0 m/s', axis1.get_title()) - self.assertEqual('Threshold Intensity: 20.0 m/s', axis2.get_title()) + return_periods, colorbar_name=colorbar_name, title_subplots=title_subplots + ) + self.assertEqual("Threshold Intensity: 10.0 m/s", axis1.get_title()) + self.assertEqual("Threshold Intensity: 20.0 m/s", axis2.get_title()) plt.close() + # Execute Tests if __name__ == "__main__": TESTS = unittest.TestLoader().loadTestsFromTestCase(TestFuncs) diff --git a/climada/util/test/test_save.py b/climada/util/test/test_save.py index 24ea298d1..6a43e58dd 100644 --- a/climada/util/test/test_save.py +++ b/climada/util/test/test_save.py @@ -18,15 +18,17 @@ Test save module. """ + import unittest from pathlib import Path from climada import CONFIG -from climada.util.save import save, load +from climada.util.save import load, save DATA_DIR = CONFIG.util.test_data.str() IN_CONFIG = CONFIG.local_data.save_dir.str() + class TestSave(unittest.TestCase): """Test save function""" @@ -38,22 +40,22 @@ def tearDown(self): def test_entity_in_save_dir(self): """Returns the same list if its length is correct.""" - file_name = 'save_test.pkl' - ent = {'value': [1, 2, 3]} - with self.assertLogs('climada.util.save', level='INFO') as cm: + file_name = "save_test.pkl" + ent = {"value": [1, 2, 3]} + with self.assertLogs("climada.util.save", level="INFO") as cm: save(file_name, ent) self.assertTrue(CONFIG.local_data.save_dir.dir().joinpath(file_name).is_file()) - self.assertTrue((file_name in cm.output[0]) or - (file_name in cm.output[1])) + self.assertTrue((file_name in cm.output[0]) or (file_name in cm.output[1])) def test_load_pass(self): """Load previously saved variable""" - file_name = 'save_test.pkl' - ent = {'value': [1, 2, 3]} + file_name = "save_test.pkl" + ent = {"value": [1, 2, 3]} save(file_name, ent) res = load(file_name) - self.assertTrue('value' in res) - self.assertTrue(res['value'] == ent['value']) + self.assertTrue("value" in res) + self.assertTrue(res["value"] == ent["value"]) + # Execute Tests if __name__ == "__main__": diff --git a/climada/util/test/test_select.py b/climada/util/test/test_select.py index 7a7097735..184a7b0c9 100755 --- a/climada/util/test/test_select.py +++ b/climada/util/test/test_select.py @@ -19,22 +19,24 @@ Test select module. """ - import unittest + import numpy as np from climada.util.select import get_attributes_with_matching_dimension -class Dummy(): + +class Dummy: def __init__(self): self.oneD3 = [1, 2, 3] self.oneD4 = [1, 2, 3, 4] self.twoD2 = [[1, 2, 3], [1, 2, 3, 4]] self.twoD3 = [[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]] - self.twoD4 = [[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3 ,4]] + self.twoD4 = [[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]] self.twonp = np.array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]]) + class TestGetAttributesDimension_pass(unittest.TestCase): """Test get_attributes_with_matching_dimension function""" @@ -43,18 +45,17 @@ def test_select_pass(self): dummy = Dummy() list_attrs = get_attributes_with_matching_dimension(dummy, [3]) - self.assertTrue(np.array_equal(list_attrs, ['oneD3', 'twoD3', 'twonp'])) + self.assertTrue(np.array_equal(list_attrs, ["oneD3", "twoD3", "twonp"])) list_attrs = get_attributes_with_matching_dimension(dummy, [4, 4]) - self.assertTrue(np.array_equal(list_attrs, ['twoD4'])) + self.assertTrue(np.array_equal(list_attrs, ["twoD4"])) list_attrs = get_attributes_with_matching_dimension(dummy, [3, 4]) - self.assertTrue(np.array_equal(list_attrs, ['twoD3', 'twonp'])) + self.assertTrue(np.array_equal(list_attrs, ["twoD3", "twonp"])) list_attrs = get_attributes_with_matching_dimension(dummy, [5]) self.assertTrue(np.array_equal(list_attrs, [])) - # Execute Tests if __name__ == "__main__": TESTS = unittest.TestLoader().loadTestsFromTestCase(TestGetAttributesDimension_pass) diff --git a/climada/util/test/test_value_representation.py b/climada/util/test/test_value_representation.py index 61cf6a932..dfd662e5b 100644 --- a/climada/util/test/test_value_representation.py +++ b/climada/util/test/test_value_representation.py @@ -19,12 +19,18 @@ Test of util.math module """ - -from climada.util.value_representation import sig_dig, sig_dig_list, ABBREV -from climada.util.value_representation import value_to_monetary_unit, safe_divide +import math import unittest + import numpy as np -import math + +from climada.util.value_representation import ( + ABBREV, + safe_divide, + sig_dig, + sig_dig_list, + value_to_monetary_unit, +) class TestDigits(unittest.TestCase): @@ -37,9 +43,7 @@ def test_sig_dig_pass(self): nbs_out = [1.23, 12300, -12300, -12.3] for nb_in, nb_out in zip(nbs_in, nbs_out): self.assertEqual(sig_dig(nb_in, n_sig_dig), nb_out) - self.assertTrue( - np.array_equal(sig_dig_list(nbs_in, n_sig_dig), nbs_out) - ) + self.assertTrue(np.array_equal(sig_dig_list(nbs_in, n_sig_dig), nbs_out)) def test_sig_dig_fail(self): """Test sig_dig function""" @@ -48,16 +52,21 @@ def test_sig_dig_fail(self): nbs_out = [1.23, 12300, -12300, -12.3] for nb_in, nb_out in zip(nbs_in, nbs_out): self.assertNotEqual(sig_dig(nb_in, n_sig_dig_wrong), nb_out) - self.assertFalse( - np.array_equal(sig_dig_list(nbs_in, n_sig_dig_wrong), nbs_out) - ) + self.assertFalse(np.array_equal(sig_dig_list(nbs_in, n_sig_dig_wrong), nbs_out)) def test_value_to_monetary_unit_pass(self): """Test money_unit function""" nbs_in = [-1e10, -1e6, -1e2, 0, 1e3, 1e7, 1e11] nbs_out = [-10, -1, -100, 0, 1, 10, 100] - names_out = [ABBREV[1e9], ABBREV[1e6], ABBREV[1], ABBREV[1], - ABBREV[1e3], ABBREV[1e6], ABBREV[1e9]] + names_out = [ + ABBREV[1e9], + ABBREV[1e6], + ABBREV[1], + ABBREV[1], + ABBREV[1e3], + ABBREV[1e6], + ABBREV[1e9], + ] for j, (nb_in, nb_out) in enumerate(zip(nbs_in, nbs_out)): money, names = value_to_monetary_unit(nb_in) self.assertEqual(money[0], nb_out) @@ -67,8 +76,15 @@ def test_value_to_monetary_unit_0inf_pass(self): """Test money_unit function""" nbs_in = [-math.inf, 0, 1e-10, 1e-5, math.inf] nbs_out = [-math.inf, 0, 1e-10, 1e-5, math.inf] - names_out = [ABBREV[1], ABBREV[1], ABBREV[1], ABBREV[1], - ABBREV[1], ABBREV[1], ABBREV[1]] + names_out = [ + ABBREV[1], + ABBREV[1], + ABBREV[1], + ABBREV[1], + ABBREV[1], + ABBREV[1], + ABBREV[1], + ] for j, (nb_in, nb_out) in enumerate(zip(nbs_in, nbs_out)): money, names = value_to_monetary_unit(nb_in) self.assertEqual(money[0], nb_out) @@ -79,16 +95,29 @@ def test_value_to_monetary_unit_nan_pass(self): nb_in = math.nan money, name = value_to_monetary_unit(nb_in) self.assertTrue(math.isnan(money[0])) - self.assertEqual(name, '') - + self.assertEqual(name, "") def test_value_to_monetary_unit_sigdig_pass(self): """Test money_unit function with significant digits""" - nbs_in = [-1e10*1.2345, -1e6*1.2345, -1e2*1.2345, 0, 1e3*1.2345, - 1e7*1.2345, 1e11*1.2345] + nbs_in = [ + -1e10 * 1.2345, + -1e6 * 1.2345, + -1e2 * 1.2345, + 0, + 1e3 * 1.2345, + 1e7 * 1.2345, + 1e11 * 1.2345, + ] nbs_out = [-12.3, -1.23, -123, 0, 1.23, 12.3, 123] - names_out = [ABBREV[1e9], ABBREV[1e6], ABBREV[1], ABBREV[1], - ABBREV[1e3], ABBREV[1e6], ABBREV[1e9]] + names_out = [ + ABBREV[1e9], + ABBREV[1e6], + ABBREV[1], + ABBREV[1], + ABBREV[1e3], + ABBREV[1e6], + ABBREV[1e9], + ] for j, (nb_in, nb_out) in enumerate(zip(nbs_in, nbs_out)): money, names = value_to_monetary_unit(nb_in, n_sig_dig=3) self.assertEqual(money[0], nb_out) @@ -96,13 +125,17 @@ def test_value_to_monetary_unit_sigdig_pass(self): def test_value_to_monetary_unit_list_pass(self): """Test money_unit function with list of numbers""" - nbs_in = [-1e10*1.2345, -1e9*1.2345] + nbs_in = [-1e10 * 1.2345, -1e9 * 1.2345] nbs_out = [-12.3, -1.23] name_out = ABBREV[1e9] money, name = value_to_monetary_unit(nbs_in, n_sig_dig=3) self.assertTrue(np.array_equal(money, nbs_out)) self.assertEqual(name, name_out) - nbs_in = [1e4*1.2345, 1e3*1.2345, 1e2*1.2345,] + nbs_in = [ + 1e4 * 1.2345, + 1e3 * 1.2345, + 1e2 * 1.2345, + ] nbs_out = [12.3, 1.23, 0.123] name_out = ABBREV[1e3] money, name = value_to_monetary_unit(nbs_in, n_sig_dig=3) @@ -111,13 +144,13 @@ def test_value_to_monetary_unit_list_pass(self): def test_value_to_monetary_unit_list_0inf_pass(self): """Test money_unit function with list of numbers""" - nbs_in = [-1e10*1.2345, -1e9*1.2345, 0, math.inf] + nbs_in = [-1e10 * 1.2345, -1e9 * 1.2345, 0, math.inf] nbs_out = [-12.3, -1.23, 0, math.inf] name_out = ABBREV[1e9] money, name = value_to_monetary_unit(nbs_in, n_sig_dig=3) self.assertTrue(np.array_equal(money, nbs_out)) self.assertEqual(name, name_out) - nbs_in = [1e4*1.2345, 1e3*1.2345, 1e2*1.2345, 0, math.inf] + nbs_in = [1e4 * 1.2345, 1e3 * 1.2345, 1e2 * 1.2345, 0, math.inf] nbs_out = [12.3, 1.23, 0.123, 0, math.inf] name_out = ABBREV[1e3] money, name = value_to_monetary_unit(nbs_in, n_sig_dig=3) @@ -126,7 +159,7 @@ def test_value_to_monetary_unit_list_0inf_pass(self): def test_value_to_monetary_unit_list_nan_pass(self): """Test money_unit function with list of numbers""" - nbs_in = [-1e10*1.2345, -1e9*1.2345, math.nan] + nbs_in = [-1e10 * 1.2345, -1e9 * 1.2345, math.nan] nbs_out = [-12.3, -1.23, math.nan] name_out = ABBREV[1e9] money, name = value_to_monetary_unit(nbs_in, n_sig_dig=3) @@ -134,8 +167,9 @@ def test_value_to_monetary_unit_list_nan_pass(self): self.assertTrue(np.array_equal(money[:-1], nbs_out[:-1])) self.assertEqual(name, name_out) + class TestSafeDivide(unittest.TestCase): - + def test_scalar_division(self): self.assertEqual(safe_divide(10, 2), 5) self.assertEqual(safe_divide(-10, 5), -2) @@ -145,34 +179,47 @@ def test_scalar_division_by_zero(self): self.assertEqual(safe_divide(1, 0, replace_with=0), 0) def test_array_division(self): - np.testing.assert_array_equal(safe_divide(np.array([10, 20, 30]), np.array([2, 5, 10])), np.array([5, 4, 3])) + np.testing.assert_array_equal( + safe_divide(np.array([10, 20, 30]), np.array([2, 5, 10])), + np.array([5, 4, 3]), + ) def test_array_division_by_zero(self): - np.testing.assert_array_equal(safe_divide(np.array([1, 0, 3]), np.array([0, 0, 1])), np.array([np.nan, np.nan, 3])) - np.testing.assert_array_equal(safe_divide(np.array([1, 0, 3]), np.array([0, 0, 1]), replace_with=0), np.array([0, 0, 3])) + np.testing.assert_array_equal( + safe_divide(np.array([1, 0, 3]), np.array([0, 0, 1])), + np.array([np.nan, np.nan, 3]), + ) + np.testing.assert_array_equal( + safe_divide(np.array([1, 0, 3]), np.array([0, 0, 1]), replace_with=0), + np.array([0, 0, 3]), + ) def test_list_division_by_zero(self): list_num = [10, 0, 30] list_denom = [2, 0, 10] - expected_result = [5.0, np.nan, 3.0] - np.testing.assert_array_almost_equal(safe_divide(list_num, list_denom), expected_result) + expected_result = [5.0, np.nan, 3.0] + np.testing.assert_array_almost_equal( + safe_divide(list_num, list_denom), expected_result + ) def test_list_division(self): list_num = [10, 20, 30] list_denom = [2, 5, 10] expected_result = [5.0, 4.0, 3.0] - np.testing.assert_array_almost_equal(safe_divide(list_num, list_denom), expected_result) + np.testing.assert_array_almost_equal( + safe_divide(list_num, list_denom), expected_result + ) def test_nan_handling(self): self.assertTrue(np.isnan(safe_divide(np.nan, 1))) self.assertTrue(np.isnan(safe_divide(1, np.nan))) self.assertEqual(safe_divide(np.nan, 1, replace_with=0), 0) self.assertEqual(safe_divide(1, np.nan, replace_with=0), 0) - + def test_nan_handling_in_arrays(self): np.testing.assert_array_equal( safe_divide(np.array([1, np.nan, 3]), np.array([3, 2, 0])), - np.array([1/3, np.nan, np.nan]) + np.array([1 / 3, np.nan, np.nan]), ) def test_nan_handling_in_scalars(self): @@ -181,6 +228,7 @@ def test_nan_handling_in_scalars(self): self.assertEqual(safe_divide(np.nan, 1, replace_with=0), 0) self.assertEqual(safe_divide(1, np.nan, replace_with=0), 0) + # Execute Tests if __name__ == "__main__": TESTS = unittest.TestLoader().loadTestsFromTestCase(TestDigits) diff --git a/climada/util/test/test_yearsets.py b/climada/util/test/test_yearsets.py index 2adb5e0e6..b5fae036d 100755 --- a/climada/util/test/test_yearsets.py +++ b/climada/util/test/test_yearsets.py @@ -19,42 +19,56 @@ Test of dates_times module """ +import collections import unittest + import numpy as np -import collections +import climada.util.dates_times as u_dt import climada.util.yearsets as yearsets from climada.engine import Impact -import climada.util.dates_times as u_dt - IMP = Impact() -IMP.at_event = np.arange(10,110,10) -IMP.frequency = np.array(np.ones(10)*0.2) - -SAMPLING_VECT = [np.array([0]), np.array([4]), np.array([1]), np.array([2, 5, 7, 9, 6]), - np.array([8]), np.array([3]), np.array([2, 6]), np.array([1]), - np.array([3,5]), np.array([])] +IMP.at_event = np.arange(10, 110, 10) +IMP.frequency = np.array(np.ones(10) * 0.2) + +SAMPLING_VECT = [ + np.array([0]), + np.array([4]), + np.array([1]), + np.array([2, 5, 7, 9, 6]), + np.array([8]), + np.array([3]), + np.array([2, 6]), + np.array([1]), + np.array([3, 5]), + np.array([]), +] YEAR_LIST = list(range(2000, 2010)) + class TestYearSets(unittest.TestCase): """Test yearset functions""" def test_impact_yearset(self): """Test computing a yearly impact (yimp) for a given list of years (YEAR_LIST) from an impact (IMP) and a sampling vector (SAMPLING_VECT)""" - yimp, sampling_vect = yearsets.impact_yearset(IMP, YEAR_LIST, correction_fac=False) + yimp, sampling_vect = yearsets.impact_yearset( + IMP, YEAR_LIST, correction_fac=False + ) self.assertAlmostEqual(len(sampling_vect), len(YEAR_LIST)) def test_impact_yearset_sampling_vect(self): """Test computing a yearly impact (yimp) for a given list of years (YEAR_LIST) from an impact (IMP) and a sampling vector (SAMPLING_VECT)""" - yimp = yearsets.impact_yearset_from_sampling_vect(IMP, YEAR_LIST, SAMPLING_VECT, False) + yimp = yearsets.impact_yearset_from_sampling_vect( + IMP, YEAR_LIST, SAMPLING_VECT, False + ) self.assertAlmostEqual(yimp.at_event[3], 340) - self.assertEqual(u_dt.date_to_str(yimp.date)[0], '2000-01-01') + self.assertEqual(u_dt.date_to_str(yimp.date)[0], "2000-01-01") self.assertAlmostEqual(np.sum(yimp.at_event), 770) def test_sample_from_poisson(self): @@ -71,14 +85,29 @@ def test_sample_from_poisson(self): def test_sample_events(self): """Test the sampling of 34 events out of a pool of 20 events.""" - events_per_year = np.array([0, 2, 2, 2, 1, 2, 3, 2, 2, 0, 2, 1, 2, 2, 2, 3, 5, 0, 1, 0]) - frequencies = np.array(np.ones(20)*0.2) + events_per_year = np.array( + [0, 2, 2, 2, 1, 2, 3, 2, 2, 0, 2, 1, 2, 2, 2, 3, 5, 0, 1, 0] + ) + frequencies = np.array(np.ones(20) * 0.2) sampling_vect = yearsets.sample_events(events_per_year, frequencies) self.assertEqual(len(sampling_vect), len(events_per_year)) - self.assertEqual(len(np.concatenate(sampling_vect).ravel()), np.sum(events_per_year)) - self.assertEqual(len(np.unique(list(collections.Counter(np.concatenate(sampling_vect).ravel()).values()))), 2) + self.assertEqual( + len(np.concatenate(sampling_vect).ravel()), np.sum(events_per_year) + ) + self.assertEqual( + len( + np.unique( + list( + collections.Counter( + np.concatenate(sampling_vect).ravel() + ).values() + ) + ) + ), + 2, + ) def test_computing_imp_per_year(self): """Test the calculation of impacts per year from a given sampling dictionary.""" @@ -93,6 +122,7 @@ def test_correction_fac(self): self.assertAlmostEqual(correction_factor, 1.42857143) + # Execute Tests if __name__ == "__main__": TESTS = unittest.TestLoader().loadTestsFromTestCase(TestYearSets) diff --git a/climada/util/value_representation.py b/climada/util/value_representation.py index 9af0d258e..2590efa13 100755 --- a/climada/util/value_representation.py +++ b/climada/util/value_representation.py @@ -21,11 +21,11 @@ @author: ckropf """ +import decimal import logging import math -import decimal -import numpy as np +import numpy as np LOGGER = logging.getLogger(__name__) diff --git a/climada/util/yearsets.py b/climada/util/yearsets.py index 0e30102f8..1e1582719 100755 --- a/climada/util/yearsets.py +++ b/climada/util/yearsets.py @@ -15,6 +15,7 @@ import copy import logging + import numpy as np from numpy.random import default_rng @@ -22,6 +23,7 @@ LOGGER = logging.getLogger(__name__) + def impact_yearset(imp, sampled_years, lam=None, correction_fac=True, seed=None): """Create a yearset of impacts (yimp) containing a probabilistic impact for each year in the sampled_years list by sampling events from the impact received as input with a @@ -58,37 +60,43 @@ def impact_yearset(imp, sampled_years, lam=None, correction_fac=True, seed=None) sub-array per sampled_year, which contains the event_ids of the events used to calculate the annual impacts. Can be used to re-create the exact same yimp. - """ + """ n_sampled_years = len(sampled_years) - #create sampling vector + # create sampling vector if not lam: lam = np.sum(imp.frequency) events_per_year = sample_from_poisson(n_sampled_years, lam, seed=seed) sampling_vect = sample_events(events_per_year, imp.frequency, seed=seed) - #compute impact per sampled_year + # compute impact per sampled_year imp_per_year = compute_imp_per_year(imp, sampling_vect) - #copy imp object as basis for the yimp object + # copy imp object as basis for the yimp object yimp = copy.deepcopy(imp) - #save imp_per_year in yimp - if correction_fac: #adjust for sampling error + # save imp_per_year in yimp + if correction_fac: # adjust for sampling error yimp.at_event = imp_per_year / calculate_correction_fac(imp_per_year, imp) else: yimp.at_event = imp_per_year - #save calculations in yimp - yimp.event_id = np.arange(1, n_sampled_years+1) - yimp.date = u_dt.str_to_date([str(date) + '-01-01' for date in sampled_years]) - yimp.frequency = np.ones(n_sampled_years)*sum(len(row) for row in sampling_vect - )/n_sampled_years + # save calculations in yimp + yimp.event_id = np.arange(1, n_sampled_years + 1) + yimp.date = u_dt.str_to_date([str(date) + "-01-01" for date in sampled_years]) + yimp.frequency = ( + np.ones(n_sampled_years) + * sum(len(row) for row in sampling_vect) + / n_sampled_years + ) return yimp, sampling_vect -def impact_yearset_from_sampling_vect(imp, sampled_years, sampling_vect, correction_fac=True): + +def impact_yearset_from_sampling_vect( + imp, sampled_years, sampling_vect, correction_fac=True +): """Create a yearset of impacts (yimp) containing a probabilistic impact for each year in the sampled_years list by sampling events from the impact received as input following the sampling vector provided. @@ -124,23 +132,25 @@ def impact_yearset_from_sampling_vect(imp, sampled_years, sampling_vect, correct """ - #compute impact per sampled_year + # compute impact per sampled_year imp_per_year = compute_imp_per_year(imp, sampling_vect) - #copy imp object as basis for the yimp object + # copy imp object as basis for the yimp object yimp = copy.deepcopy(imp) - - if correction_fac: #adjust for sampling error + if correction_fac: # adjust for sampling error imp_per_year = imp_per_year / calculate_correction_fac(imp_per_year, imp) - #save calculations in yimp + # save calculations in yimp yimp.at_event = imp_per_year n_sampled_years = len(sampled_years) - yimp.event_id = np.arange(1, n_sampled_years+1) - yimp.date = u_dt.str_to_date([str(date) + '-01-01' for date in sampled_years]) - yimp.frequency = np.ones(n_sampled_years)*sum(len(row) for row in sampling_vect - )/n_sampled_years + yimp.event_id = np.arange(1, n_sampled_years + 1) + yimp.date = u_dt.str_to_date([str(date) + "-01-01" for date in sampled_years]) + yimp.frequency = ( + np.ones(n_sampled_years) + * sum(len(row) for row in sampling_vect) + / n_sampled_years + ) return yimp @@ -165,7 +175,7 @@ def sample_from_poisson(n_sampled_years, lam, seed=None): """ if seed is not None: np.random.seed(seed) - return np.round(np.random.poisson(lam=lam, size=n_sampled_years)).astype('int') + return np.round(np.random.poisson(lam=lam, size=n_sampled_years)).astype("int") def sample_events(events_per_year, freqs_orig, seed=None): @@ -199,40 +209,45 @@ def sample_events(events_per_year, freqs_orig, seed=None): freqs = freqs_orig indices = indices_orig - #sample events for each sampled year + # sample events for each sampled year for amount_events in events_per_year: - #if there are not enough input events, choice with no replace will fail + # if there are not enough input events, choice with no replace will fail if amount_events > len(freqs_orig): - raise ValueError(f"cannot sample {amount_events} distinct events for a single year" - f" when there are only {len(freqs_orig)} input events") - - #add the original indices and frequencies to the pool if there are less events - #in the pool than needed to fill the year one is sampling for - #or if the pool is empty (not covered in case amount_events is 0) + raise ValueError( + f"cannot sample {amount_events} distinct events for a single year" + f" when there are only {len(freqs_orig)} input events" + ) + + # add the original indices and frequencies to the pool if there are less events + # in the pool than needed to fill the year one is sampling for + # or if the pool is empty (not covered in case amount_events is 0) if len(np.unique(indices)) < amount_events or len(indices) == 0: indices = np.append(indices, indices_orig) freqs = np.append(freqs, freqs_orig) - #ensure that each event only occurs once per sampled year + # ensure that each event only occurs once per sampled year unique_events = np.unique(indices, return_index=True)[0] - probab_dis = freqs[np.unique(indices, return_index=True)[1]]/( - np.sum(freqs[np.unique(indices, return_index=True)[1]])) + probab_dis = freqs[np.unique(indices, return_index=True)[1]] / ( + np.sum(freqs[np.unique(indices, return_index=True)[1]]) + ) - #sample events + # sample events rng = default_rng(seed) - selected_events = rng.choice(unique_events, size=amount_events, replace=False, - p=probab_dis).astype('int') + selected_events = rng.choice( + unique_events, size=amount_events, replace=False, p=probab_dis + ).astype("int") - #determine used events to remove them from sampling pool + # determine used events to remove them from sampling pool idx_to_remove = [np.where(indices == event)[0][0] for event in selected_events] indices = np.delete(indices, idx_to_remove) freqs = np.delete(freqs, idx_to_remove) - #save sampled events in sampling vector + # save sampled events in sampling vector sampling_vect.append(selected_events) return sampling_vect + def compute_imp_per_year(imp, sampling_vect): """Sample annual impacts from the given event_impacts according to the sampling dictionary @@ -251,11 +266,13 @@ def compute_imp_per_year(imp, sampling_vect): Sampled impact per year (length = sampled_years) """ - imp_per_year = [np.sum(imp.at_event[list(sampled_events)]) for sampled_events in - sampling_vect] + imp_per_year = [ + np.sum(imp.at_event[list(sampled_events)]) for sampled_events in sampling_vect + ] return np.array(imp_per_year) + def calculate_correction_fac(imp_per_year, imp): """Calculate a correction factor that can be used to scale the yimp in such a way that the expected annual impact (eai) of the yimp amounts to the eai @@ -274,10 +291,10 @@ def calculate_correction_fac(imp_per_year, imp): The correction factor is calculated as imp_eai/yimp_eai """ - yimp_eai = np.sum(imp_per_year)/len(imp_per_year) - imp_eai = np.sum(imp.frequency*imp.at_event) - correction_factor = imp_eai/yimp_eai - LOGGER.info("The correction factor amounts to %s", (correction_factor-1)*100) + yimp_eai = np.sum(imp_per_year) / len(imp_per_year) + imp_eai = np.sum(imp.frequency * imp.at_event) + correction_factor = imp_eai / yimp_eai + LOGGER.info("The correction factor amounts to %s", (correction_factor - 1) * 100) # if correction_factor > 0.1: # tex = raw_input("Do you want to exclude small events?") From 1b78bdff986107bd2e29d7dac5e6ab1240920b27 Mon Sep 17 00:00:00 2001 From: emanuel-schmid Date: Sun, 20 Oct 2024 16:29:50 +0200 Subject: [PATCH 06/12] import Impact from climada.engine.impact in order to avoid circular imports --- climada/engine/cost_benefit.py | 2 +- climada/engine/impact_calc.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/climada/engine/cost_benefit.py b/climada/engine/cost_benefit.py index ef3e1ec3a..99387aab6 100644 --- a/climada/engine/cost_benefit.py +++ b/climada/engine/cost_benefit.py @@ -31,7 +31,7 @@ from matplotlib.patches import FancyArrowPatch, Rectangle from tabulate import tabulate -from climada.engine import Impact, ImpactFreqCurve +from climada.engine.impact import Impact, ImpactFreqCurve from climada.engine.impact_calc import ImpactCalc LOGGER = logging.getLogger(__name__) diff --git a/climada/engine/impact_calc.py b/climada/engine/impact_calc.py index cf7da1b30..713cda324 100644 --- a/climada/engine/impact_calc.py +++ b/climada/engine/impact_calc.py @@ -28,7 +28,7 @@ from scipy import sparse from climada import CONFIG -from climada.engine import Impact +from climada.engine.impact import Impact LOGGER = logging.getLogger(__name__) From 87cb6fd43d76383f641211aecba0b37630c4e44c Mon Sep 17 00:00:00 2001 From: emanuel-schmid Date: Sun, 20 Oct 2024 16:50:51 +0200 Subject: [PATCH 07/12] avoid circular imports --- climada/engine/unsequa/calc_base.py | 2 +- climada/engine/unsequa/calc_cost_benefit.py | 5 +++-- climada/engine/unsequa/calc_delta_climate.py | 4 +++- climada/engine/unsequa/calc_impact.py | 4 +++- 4 files changed, 10 insertions(+), 5 deletions(-) diff --git a/climada/engine/unsequa/calc_base.py b/climada/engine/unsequa/calc_base.py index fd7f73f89..4ec8e55b0 100644 --- a/climada/engine/unsequa/calc_base.py +++ b/climada/engine/unsequa/calc_base.py @@ -27,7 +27,7 @@ import numpy as np import pandas as pd -from climada.engine.unsequa import UncOutput +from climada.engine.unsequa.unc_output import UncOutput from climada.util.value_representation import sig_dig as u_sig_dig LOGGER = logging.getLogger(__name__) diff --git a/climada/engine/unsequa/calc_cost_benefit.py b/climada/engine/unsequa/calc_cost_benefit.py index 2078eaf89..b42e76da1 100644 --- a/climada/engine/unsequa/calc_cost_benefit.py +++ b/climada/engine/unsequa/calc_cost_benefit.py @@ -26,17 +26,18 @@ import time from typing import Optional, Union -import numpy as np import pandas as pd import pathos.multiprocessing as mp from climada.engine.cost_benefit import CostBenefit -from climada.engine.unsequa import Calc, InputVar, UncCostBenefitOutput from climada.engine.unsequa.calc_base import ( + Calc, _multiprocess_chunksize, _sample_parallel_iterator, _transpose_chunked_data, ) +from climada.engine.unsequa.input_var import InputVar +from climada.engine.unsequa.unc_output import UncCostBenefitOutput from climada.entity import Entity from climada.hazard import Hazard from climada.util import log_level diff --git a/climada/engine/unsequa/calc_delta_climate.py b/climada/engine/unsequa/calc_delta_climate.py index 1c56c3fba..0ec1fb3af 100644 --- a/climada/engine/unsequa/calc_delta_climate.py +++ b/climada/engine/unsequa/calc_delta_climate.py @@ -31,12 +31,14 @@ import pathos.multiprocessing as mp from climada.engine import ImpactCalc -from climada.engine.unsequa import Calc, InputVar, UncImpactOutput from climada.engine.unsequa.calc_base import ( + Calc, _multiprocess_chunksize, _sample_parallel_iterator, _transpose_chunked_data, ) +from climada.engine.unsequa.input_var import InputVar +from climada.engine.unsequa.unc_output import UncImpactOutput from climada.entity import Exposures, ImpactFuncSet from climada.hazard import Hazard from climada.util import log_level diff --git a/climada/engine/unsequa/calc_impact.py b/climada/engine/unsequa/calc_impact.py index 6b6a8773c..061b3e3a2 100644 --- a/climada/engine/unsequa/calc_impact.py +++ b/climada/engine/unsequa/calc_impact.py @@ -31,12 +31,14 @@ import pathos.multiprocessing as mp from climada.engine import ImpactCalc -from climada.engine.unsequa import Calc, InputVar, UncImpactOutput from climada.engine.unsequa.calc_base import ( + Calc, _multiprocess_chunksize, _sample_parallel_iterator, _transpose_chunked_data, ) +from climada.engine.unsequa.input_var import InputVar +from climada.engine.unsequa.unc_output import UncImpactOutput from climada.entity import Exposures, ImpactFuncSet from climada.hazard import Hazard from climada.util import log_level From 5143b99f6d9500f28275e77ab4cb4a39887da4e4 Mon Sep 17 00:00:00 2001 From: emanuel-schmid Date: Sun, 20 Oct 2024 18:02:53 +0200 Subject: [PATCH 08/12] pre-commit run --all-files --- .github/ISSUE_TEMPLATE/bug_report.md | 4 +- .github/pull_request_template.md | 4 +- .github/scripts/make_release.py | 4 +- .github/scripts/prepare_release.py | 55 ++- .github/scripts/setup_devbranch.py | 22 +- MANIFEST.in | 2 +- climada.conf | 2 +- .../data/demo/demo_emdat_impact_data_2020.csv | 2 +- climada/data/system/GDP_TWN_IMF_WEO_data.csv | 2 +- .../system/WEALTH2GDP_factors_CRI_2016.csv | 2 +- .../data/emdat_testdata_BGD_USA_1970-2017.csv | 8 +- .../data/emdat_testdata_fake_2007-2011.csv | 2 +- climada/hazard/test/data/trac_short_test.csv | 2 +- doc/Makefile | 2 +- doc/climada/climada.engine.rst | 1 - doc/climada/climada.entity.disc_rates.rst | 1 - .../climada.entity.exposures.litpop.rst | 1 - doc/climada/climada.entity.exposures.rst | 1 - doc/climada/climada.entity.impact_funcs.rst | 1 - doc/climada/climada.entity.measures.rst | 1 - doc/climada/climada.hazard.centroids.rst | 1 - doc/climada/climada.hazard.rst | 1 - doc/climada/climada.hazard.trop_cyclone.rst | 1 - doc/climada/climada.rst | 1 - doc/climada/climada.util.rst | 1 - doc/conf.py | 135 +++--- doc/guide/Guide_Configuration.ipynb | 27 +- doc/guide/Guide_Exception_Logging.ipynb | 13 +- doc/guide/Guide_Py_Performance.ipynb | 2 + doc/guide/Guide_PythonDos-n-Donts.ipynb | 14 +- doc/guide/Guide_Testing.ipynb | 11 +- ...ontinuous_integration_GitHub_actions.ipynb | 16 +- doc/index.rst | 2 +- doc/misc/AUTHORS.md | 2 +- doc/misc/CHANGELOG.md | 2 +- doc/misc/CONTRIBUTING.md | 2 +- doc/tutorial/0_intro_python.ipynb | 205 +++++---- doc/tutorial/1_main_climada.ipynb | 76 ++-- doc/tutorial/climada_engine_CostBenefit.ipynb | 173 +++++--- doc/tutorial/climada_engine_Forecast.ipynb | 143 +++--- doc/tutorial/climada_engine_Impact.ipynb | 168 ++++--- doc/tutorial/climada_engine_impact_data.ipynb | 112 +++-- doc/tutorial/climada_engine_unsequa.ipynb | 416 +++++++++++------- .../climada_engine_unsequa_helper.ipynb | 282 +++++++----- doc/tutorial/climada_entity_DiscRates.ipynb | 19 +- doc/tutorial/climada_entity_Exposures.ipynb | 124 +++--- ...mada_entity_Exposures_polygons_lines.ipynb | 267 +++++++---- .../climada_entity_ImpactFuncSet.ipynb | 18 +- doc/tutorial/climada_entity_LitPop.ipynb | 139 +++--- doc/tutorial/climada_entity_MeasureSet.ipynb | 67 +-- doc/tutorial/climada_hazard_Hazard.ipynb | 379 +++++++++++----- doc/tutorial/climada_hazard_StormEurope.ipynb | 25 +- doc/tutorial/climada_hazard_TropCyclone.ipynb | 93 ++-- doc/tutorial/climada_util_api_client.ipynb | 83 +++- doc/tutorial/climada_util_earth_engine.ipynb | 161 +++---- doc/tutorial/climada_util_yearsets.ipynb | 28 +- .../applications/eca_san_salvador/README.txt | 2 +- .../San_Salvador_Adaptacion.ipynb | 113 +++-- .../San_Salvador_Adaptation.ipynb | 116 +++-- .../San_Salvador_Parametric.ipynb | 52 ++- .../eca_san_salvador/San_Salvador_Risk.ipynb | 91 ++-- .../eca_san_salvador/functions_ss.py | 249 +++++++---- script/jenkins/set_config.py | 8 +- script/jenkins/test_data_api.py | 67 +-- script/jenkins/test_notebooks.py | 138 +++--- 65 files changed, 2548 insertions(+), 1616 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index 27760ea62..8c086f8b9 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -12,7 +12,7 @@ A clear and concise description of what the bug is. **To Reproduce** Steps to reproduce the behavior/error: -1. +1. Code example: ```python @@ -29,7 +29,7 @@ If applicable, add screenshots to help explain your problem. **System Information (please complete the following information):** - Operating system and version: [e.g. Ubuntu 22.04, macOS 14.3.1, Windows 10] - - Python version: [e.g. 3.10] + - Python version: [e.g. 3.10] (to obtain this information execute > import sys >print(sys.version)) **Additional context** diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index ee5328299..b1e66a575 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -1,6 +1,6 @@ Changes proposed in this PR: -- -- +- +- This PR fixes # diff --git a/.github/scripts/make_release.py b/.github/scripts/make_release.py index 5c6260d4d..cdba6755c 100644 --- a/.github/scripts/make_release.py +++ b/.github/scripts/make_release.py @@ -13,9 +13,9 @@ def get_version() -> str: """Return the current version number, based on the _version.py file.""" [version_file] = glob.glob("climada*/_version.py") - with open(version_file, 'r', encoding="UTF-8") as vfp: + with open(version_file, "r", encoding="UTF-8") as vfp: content = vfp.read() - regex = r'^__version__\s*=\s*[\'\"](.*)[\'\"]\s*$' + regex = r"^__version__\s*=\s*[\'\"](.*)[\'\"]\s*$" mtch = re.match(regex, content) return mtch.group(1) diff --git a/.github/scripts/prepare_release.py b/.github/scripts/prepare_release.py index bce483b6f..eb0dd4c2b 100644 --- a/.github/scripts/prepare_release.py +++ b/.github/scripts/prepare_release.py @@ -5,7 +5,7 @@ - update version numbers in _version.py and setup.py - purge the "Unreleased" section of CHANGELOG.md and rename it to the new version number -- copy the README.md file to doc/misc/README.md, +- copy the README.md file to doc/misc/README.md, but without the badges as they interfere with the sphinx doc builder All changes are immediately commited to the repository. @@ -38,28 +38,28 @@ def bump_version_number(version_number: str, level: str) -> str: """Return a copy of `version_number` with one level number incremented.""" major, minor, patch = version_number.split(".") if level == "major": - major = str(int(major)+1) + major = str(int(major) + 1) minor = "0" patch = "0" elif level == "minor": - minor = str(int(minor)+1) + minor = str(int(minor) + 1) patch = "0" elif level == "patch": - patch = str(int(patch)+1) + patch = str(int(patch) + 1) else: raise ValueError(f"level should be 'major', 'minor' or 'patch', not {level}") return ".".join([major, minor, patch]) def update_readme(_nvn): - """align doc/misc/README.md with ./README.md but remove the non-markdown header lines from """ - with open("README.md", 'r', encoding="UTF-8") as rmin: - lines = [line for line in rmin.readlines() if not line.startswith('[![')] + """align doc/misc/README.md with ./README.md but remove the non-markdown header lines from""" + with open("README.md", "r", encoding="UTF-8") as rmin: + lines = [line for line in rmin.readlines() if not line.startswith("[![")] while not lines[0].strip(): lines = lines[1:] - with open("doc/misc/README.md", 'w', encoding="UTF-8") as rmout: + with open("doc/misc/README.md", "w", encoding="UTF-8") as rmout: rmout.writelines(lines) - return GitFile('doc/misc/README.md') + return GitFile("doc/misc/README.md") def update_changelog(nvn): @@ -70,16 +70,16 @@ def update_changelog(nvn): release = [] section_name = None section = [] - with open("CHANGELOG.md", 'r', encoding="UTF-8") as changelog: + with open("CHANGELOG.md", "r", encoding="UTF-8") as changelog: for line in changelog.readlines(): - if line.startswith('#'): - if line.startswith('### '): + if line.startswith("#"): + if line.startswith("### "): if section: release.append((section_name, section)) section_name = line[4:].strip() section = [] - #print("tag:", section_name) - elif line.startswith('## '): + # print("tag:", section_name) + elif line.startswith("## "): if section: release.append((section_name, section)) if release: @@ -88,7 +88,7 @@ def update_changelog(nvn): release = [] section_name = None section = [] - #print("release:", release_name) + # print("release:", release_name) else: section.append(line) if section: @@ -96,7 +96,7 @@ def update_changelog(nvn): if release: releases.append((release_name, release)) - with open("CHANGELOG.md", 'w', encoding="UTF-8") as changelog: + with open("CHANGELOG.md", "w", encoding="UTF-8") as changelog: changelog.write("# Changelog\n\n") for release_name, release in releases: if release_name: @@ -107,7 +107,11 @@ def update_changelog(nvn): if any(ln.strip() for ln in section): if section_name: changelog.write(f"### {section_name}\n") - lines = [ln.strip() for ln in section if "code freeze date: " not in ln.lower()] + lines = [ + ln.strip() + for ln in section + if "code freeze date: " not in ln.lower() + ] if not section_name and release_name.lower() == nvn: print("setting date") for i, line in enumerate(lines): @@ -116,26 +120,26 @@ def update_changelog(nvn): lines[i] = f"Release date: {today}" changelog.write(re.sub("\n+$", "\n", "\n".join(lines))) changelog.write("\n") - return GitFile('CHANGELOG.md') + return GitFile("CHANGELOG.md") def update_version(nvn): """Update the _version.py file""" [file_with_version] = glob.glob("climada*/_version.py") - regex = r'(^__version__\s*=\s*[\'\"]).*([\'\"]\s*$)' + regex = r"(^__version__\s*=\s*[\'\"]).*([\'\"]\s*$)" return update_file(file_with_version, regex, nvn) def update_setup(new_version_number): """Update the setup.py file""" file_with_version = "setup.py" - regex = r'(^\s+version\s*=\s*[\'\"]).*([\'\"]\s*,\s*$)' + regex = r"(^\s+version\s*=\s*[\'\"]).*([\'\"]\s*,\s*$)" return update_file(file_with_version, regex, new_version_number) def update_file(file_with_version, regex, new_version_number): """Replace the version number(s) in a file, based on a rgular expression.""" - with open(file_with_version, 'r', encoding="UTF-8") as curf: + with open(file_with_version, "r", encoding="UTF-8") as curf: lines = curf.readlines() successfully_updated = False for i, line in enumerate(lines): @@ -145,14 +149,15 @@ def update_file(file_with_version, regex, new_version_number): successfully_updated = True if not successfully_updated: raise RuntimeError(f"cannot determine version of {file_with_version}") - with open(file_with_version, 'w', encoding="UTF-8") as newf: + with open(file_with_version, "w", encoding="UTF-8") as newf: for line in lines: newf.write(line) return GitFile(file_with_version) -class GitFile(): +class GitFile: """Helper class for `git add`.""" + def __init__(self, path): self.path = path @@ -166,8 +171,9 @@ def gitadd(self): ).stdout.decode("utf8") -class Git(): +class Git: """Helper class for `git commit`.""" + def __init__(self): _gitname = subprocess.run( ["git", "config", "--global", "user.name", "'climada'"], @@ -228,6 +234,7 @@ def prepare_new_release(level): if __name__ == "__main__": from sys import argv + try: LEVEL = argv[1] except IndexError: diff --git a/.github/scripts/setup_devbranch.py b/.github/scripts/setup_devbranch.py index 001390fa0..36c9e6c78 100644 --- a/.github/scripts/setup_devbranch.py +++ b/.github/scripts/setup_devbranch.py @@ -33,14 +33,15 @@ def get_last_version() -> str: def update_changelog(): """Insert a vanilla "Unreleased" section on top.""" - with open("CHANGELOG.md", 'r', encoding="UTF-8") as changelog: + with open("CHANGELOG.md", "r", encoding="UTF-8") as changelog: lines = changelog.readlines() if "## Unreleased" in lines: return - with open("CHANGELOG.md", 'w', encoding="UTF-8") as changelog: - changelog.write("""# Changelog + with open("CHANGELOG.md", "w", encoding="UTF-8") as changelog: + changelog.write( + """# Changelog ## Unreleased @@ -62,27 +63,28 @@ def update_changelog(): ### Removed -""") +""" + ) changelog.writelines(lines[2:]) def update_version(nvn): """Update the _version.py file""" [file_with_version] = glob.glob("climada*/_version.py") - regex = r'(^__version__\s*=\s*[\'\"]).*([\'\"]\s*$)' + regex = r"(^__version__\s*=\s*[\'\"]).*([\'\"]\s*$)" return update_file(file_with_version, regex, nvn) def update_setup(new_version_number): """Update the setup.py file""" file_with_version = "setup.py" - regex = r'(^\s+version\s*=\s*[\'\"]).*([\'\"]\s*,\s*$)' + regex = r"(^\s+version\s*=\s*[\'\"]).*([\'\"]\s*,\s*$)" return update_file(file_with_version, regex, new_version_number) def update_file(file_with_version, regex, new_version_number): """Replace the version number(s) in a file, based on a rgular expression.""" - with open(file_with_version, 'r', encoding="UTF-8") as curf: + with open(file_with_version, "r", encoding="UTF-8") as curf: lines = curf.readlines() successfully_updated = False for i, line in enumerate(lines): @@ -92,7 +94,7 @@ def update_file(file_with_version, regex, new_version_number): successfully_updated = True if not successfully_updated: raise RuntimeError(f"cannot determine version of {file_with_version}") - with open(file_with_version, 'w', encoding="UTF-8") as newf: + with open(file_with_version, "w", encoding="UTF-8") as newf: for line in lines: newf.write(line) @@ -100,10 +102,10 @@ def update_file(file_with_version, regex, new_version_number): def setup_devbranch(): """Adjust files after a release was published, i.e., apply the canonical deviations from main in develop. - + Just changes files, all `git` commands are in the setup_devbranch.sh file. """ - main_version = get_last_version().strip('v') + main_version = get_last_version().strip("v") semver = main_version.split(".") semver[-1] = f"{int(semver[-1]) + 1}-dev" dev_version = ".".join(semver) diff --git a/MANIFEST.in b/MANIFEST.in index 2c9965a94..fff806f53 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -4,4 +4,4 @@ graft climada/*/test/data graft climada/test/data graft data global-exclude .* -global-exclude *.py[co] \ No newline at end of file +global-exclude *.py[co] diff --git a/climada.conf b/climada.conf index 3d07e07ca..367928405 100644 --- a/climada.conf +++ b/climada.conf @@ -27,4 +27,4 @@ "supported_exposures_types": ["litpop", "crop_production", "base"] }, "log_level": "INFO" -} \ No newline at end of file +} diff --git a/climada/data/demo/demo_emdat_impact_data_2020.csv b/climada/data/demo/demo_emdat_impact_data_2020.csv index 55c72eaf4..3cf4f5c85 100644 --- a/climada/data/demo/demo_emdat_impact_data_2020.csv +++ b/climada/data/demo/demo_emdat_impact_data_2020.csv @@ -1073,4 +1073,4 @@ Dis No,Year,Seq,Disaster Group,Disaster Subgroup,Disaster Type,Disaster Subtype, 2020-0132-TON,2020,0132,Natural,Meteorological,Storm,Tropical cyclone,,Cyclone 'Harold',--,Tonga,TON,Polynesia,Oceania,"Tongatapu, 'Eua",,,,,,,,,Kph,,,,,2020,4,6,2020,4,9,,,1289,,1289,,,111000, 2020-0015-TUV,2020,0015,Natural,Meteorological,Storm,Tropical cyclone,,Cyclone 'Tino',Affected,Tuvalu,TUV,Polynesia,Oceania,,,,,,,Yes,,,Kph,,,,,2020,1,18,2020,1,18,,,,,,,,, 2020-0219-USA,2020,0219,Natural,Meteorological,Storm,Tropical cyclone,,Tropical storm 'Cristobal',Affected,United States of America (the),USA,Northern America,Americas,"errebonne, Plaquemines, Lafourche Parishes (Louisiana)",,,,,,Yes,,80,Kph,,,,,2020,6,7,2020,6,7,,,,,,,,, -2020-0132-VUT,2020,0132,Natural,Meteorological,Storm,Tropical cyclone,,Cyclone 'Harold',--,Vanuatu,VUT,Melanesia,Oceania,Pentecost and Espiritu Santo,,,,,,,,,Kph,,,,,2020,4,6,2020,4,9,4,,,,,,,, \ No newline at end of file +2020-0132-VUT,2020,0132,Natural,Meteorological,Storm,Tropical cyclone,,Cyclone 'Harold',--,Vanuatu,VUT,Melanesia,Oceania,Pentecost and Espiritu Santo,,,,,,,,,Kph,,,,,2020,4,6,2020,4,9,4,,,,,,,, diff --git a/climada/data/system/GDP_TWN_IMF_WEO_data.csv b/climada/data/system/GDP_TWN_IMF_WEO_data.csv index e0acd9898..e39f4cb62 100644 --- a/climada/data/system/GDP_TWN_IMF_WEO_data.csv +++ b/climada/data/system/GDP_TWN_IMF_WEO_data.csv @@ -3,4 +3,4 @@ TWN,Taiwan Province of China,"Gross domestic product, current prices",U.S. dolla TWN,Taiwan Province of China,"Gross domestic product, deflator",Index,,"See notes for: Gross domestic product, constant prices (National currency) Gross domestic product, current prices (National currency).",69.946,77.417,79.33,81.444,82.495,82.523,86.575,86.605,86.657,88.892,93.472,96.725,99.824,103.299,105.065,107.554,110.062,112.506,116.182,113.911,112.88,112.189,111.733,110.174,109.894,108.209,107.095,106.638,103.869,104.003,102.405,100,100.543,102.019,103.749,107.128,108.085,106.84,105.834,106.337,106.484,107.149,108.054,109.026,109.951,2018 TWN,Taiwan Province of China,"Gross domestic product per capita, current prices",U.S. dollars,Units,"See notes for: Gross domestic product, current prices (National currency) Population (Persons).","2,367.600","2,692.406","2,675.823","2,882.402","3,203.468","3,295.112","4,010.111","5,325.216","6,337.499","7,577.046","8,178.152","9,092.297","10,725.702","11,266.123","12,108.752","13,076.007","13,597.248","13,968.097","12,787.258","13,768.274","14,876.879","13,408.383","13,715.525","14,094.370","15,360.724","16,503.313","16,984.540","17,780.925","18,102.946","16,959.775","19,261.667","20,911.643","21,269.614","21,887.992","22,638.917","22,373.564","22,572.702","24,389.677","25,007.747","24,827.898","25,525.806","26,861.070","28,324.425","29,870.221","31,483.799",2018 ,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, -"International Monetary Fund, World Economic Outlook Database, October 2019",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, \ No newline at end of file +"International Monetary Fund, World Economic Outlook Database, October 2019",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, diff --git a/climada/data/system/WEALTH2GDP_factors_CRI_2016.csv b/climada/data/system/WEALTH2GDP_factors_CRI_2016.csv index f63f0453a..8e8bb97c9 100644 --- a/climada/data/system/WEALTH2GDP_factors_CRI_2016.csv +++ b/climada/data/system/WEALTH2GDP_factors_CRI_2016.csv @@ -169,4 +169,4 @@ Venezuela,VEN,0.29407,0.35328 Vietnam,VNM,1.23241,1.66724 Yemen,YEM,1.18584,1.76063 Zambia,ZMB,0.10663,0.32193 -Zimbabwe,ZWE,0.20161,1.65566 \ No newline at end of file +Zimbabwe,ZWE,0.20161,1.65566 diff --git a/climada/engine/test/data/emdat_testdata_BGD_USA_1970-2017.csv b/climada/engine/test/data/emdat_testdata_BGD_USA_1970-2017.csv index 5ca0ec256..00748e54a 100644 --- a/climada/engine/test/data/emdat_testdata_BGD_USA_1970-2017.csv +++ b/climada/engine/test/data/emdat_testdata_BGD_USA_1970-2017.csv @@ -691,7 +691,7 @@ Start date,End date,Country,ISO,Location,Latitude,Longitude,Magnitude value,Magn 02.05.02,08.05.02,United States of America (the),USA,"Pike district (Kentucky province), Virginia province",,,,Km2,Flood,Riverine flood,--,--,9,1000,13000,0,,2002-0266 05.05.02,05.05.02,United States of America (the),USA,"Happy town (Randall, Swisher districts, Texas province)",,,,Kph,Storm,Convective storm,--,--,2,183,0,0,,2002-0283 21.04.02,21.04.02,United States of America (the),USA,"Wayne, Jefferson districts (Illinois province)",,,,Kph,Storm,Convective storm,--,--,1,12,4000,0,,2002-0287 -27.04.02,03.05.02,United States of America (the),USA,"Breckinridge, Meade, Crittenden, Webster, Hopkins, Ohio, Hardin, Edmonson districts (Kentucky province), Bollinger, Howell districts (Missouri province), Charles, Calvert, Dorchester, Wicomico, Cecil districts (Maryland province), +27.04.02,03.05.02,United States of America (the),USA,"Breckinridge, Meade, Crittenden, Webster, Hopkins, Ohio, Hardin, Edmonson districts (Kentucky province), Bollinger, Howell districts (Missouri province), Charles, Calvert, Dorchester, Wicomico, Cecil districts (Maryland province), Illinois (Clay,Union, Johnson,Pope, Moultrie, Saline, Bond), Gordon district (Georgia province), Atchison district (Kansas province), Erie, Allegany districts (New York province), Stark district (Ohio province), Indiana, Mercer, Venango, Butler, Armstrong, Columbia, Lebanon, Allegheny districts (Pennsylvania province), Rutherford, Lake, Henry, Carter districts (Tennessee province), Virginia (Shenandoah, Greensville, Bedford, (Campbell, Nottoway, Prince George), Marshall district (West Virginia province), Pontotoc, Chickasaw districts (Mississippi province), Perry district (Indiana province)",,,290,Kph,Storm,Convective storm,--,--,10,100,2200000,2000500,,2002-0310 /04/2002,/04/2002,United States of America (the),USA,Arizona province,,,145,Km2,Wildfire,"Land fire (Brush, Bush, Pasture)",--,--,0,0,0,0,,2002-0312 @@ -858,7 +858,7 @@ Virginia (Shenandoah, Greensville, Bedford, (Campbell, Nottoway, Prince George), 26.06.07,06.07.07,United States of America (the),USA,"Wichita Falls area (Wichita district, Texas province), Georgetown areas (Williamson district, Texas province), Burnet, Marble Falls, Granite Shoals areas (Burnet district, Texas province), Granbury area (Hood district, Texas province), Lampasas, Parker, Eastland districts (Texas province), Miami, Commerce areas (Ottawa district, Oklahoma province), Shawnee, Tecumseh, Maud areas (Pottawatomie district, Oklahoma province), Oklahoma city (Oklahoma district, Oklahoma province), Waurika area (Jefferson district, Oklahoma province), Bartlesville, Dewey areas (Washington district, Oklahoma province), Love, Lincoln districts (Oklahoma province), Coffeyville area (Montgomery district, Kansas province), Osawatomie area (Miami district, Kansas province), Allen, Labette, Neosho, Wilson, Woodson districts (Kansas province), Rockville, Papinville areas (Bates district, Missouri province), Vernon district (Missouri province)",32.84,-97.17,507800,Km2,Flood,Riverine flood,--,Rain,8,5000,0,0,,2007-0244 19.06.07,20.06.07,United States of America (the),USA,New York province,42.23,-74.95,6500,Km2,Flood,Flash flood,Rain,--,4,120,0,0,,2007-0251 17.06.07,22.06.07,United States of America (the),USA,"North Texas, Oklahoma provinces",33.45,-97.3,34750,Km2,Flood,Riverine flood,--,--,10,750,28000,0,,2007-0254 -21.07.07,03.08.07,Bangladesh,BGD,"Goalanda village (Goalandaghat area, Rajbari district, Dhaka province), Aricha port (Shibalaya area, Manikganj district, Dhaka province), Bhagyakul village (Sreenagar area, Munshiganj district, Dhaka province), +21.07.07,03.08.07,Bangladesh,BGD,"Goalanda village (Goalandaghat area, Rajbari district, Dhaka province), Aricha port (Shibalaya area, Manikganj district, Dhaka province), Bhagyakul village (Sreenagar area, Munshiganj district, Dhaka province), Bandarban, Feni, Comilla districts (Chittagong province), Sirajganj district (Rajshahi province), Rangpur province",23.92,91.23,7000,Km2,Flood,Riverine flood,"Slide (land, mud, snow, rock)",--,1110,13771380,100000,0,,2007-0311 24.06.07,02.07.07,United States of America (the),USA,"Alpine, Amador, Calaveras, El Dorado, Mono, Placer, Tuolumne districts (California province)",,,,Km2,Wildfire,Forest fire,--,--,0,768,0,150000,,2007-0351 @@ -980,7 +980,7 @@ Bandarban, Feni, Comilla districts (Chittagong province), Sirajganj district (Ra 22.01.12,23.01.12,United States of America (the),USA,"Jefferson, Chilton districts (Alabama province)",,,240,Kph,Storm,Convective storm,--,--,2,100,175000,200000,,2012-0010 28.02.12,29.02.12,United States of America (the),USA,"Nebraska, Kansas, Missouri, Illinois, Indiana, Kentucky provinces",,,270,Kph,Storm,Convective storm,--,--,14,200,500000,450000,,2012-0055 02.03.12,04.03.12,United States of America (the),USA,"Alabama, Tennessee, Illinois, Kentucky, Indiana, Ohio, Georgia, Florida, Mississippi, North Carolina, Virginia provinces",,,112,Kph,Storm,Convective storm,Flood,Hail,41,0,5000000,2500000,,2012-0060 -06.04.12,06.04.12,Bangladesh,BGD,"Panchagarh, Rangpur, Nilphamari districts (Rangpur province), Noakhali, Comilla districts (Chittagong province), Narsingdi, Jamalpur, Faridpur, Shariatpur districts (Dhaka province), Jessore, Satkhira, Khulna, Chuadanga districts (Khulna province), Rajshahi district (Rajshahi province), Sylhet district (Sylhet province), +06.04.12,06.04.12,Bangladesh,BGD,"Panchagarh, Rangpur, Nilphamari districts (Rangpur province), Noakhali, Comilla districts (Chittagong province), Narsingdi, Jamalpur, Faridpur, Shariatpur districts (Dhaka province), Jessore, Satkhira, Khulna, Chuadanga districts (Khulna province), Rajshahi district (Rajshahi province), Sylhet district (Sylhet province), Bhola district (Barisal province)",,,56,Kph,Storm,Convective storm,Hail,--,25,55121,0,0,,2012-0082 02.04.12,03.04.12,United States of America (the),USA,"Dallas, Tarrant districts (Texas province)",,,,Kph,Storm,Convective storm,--,--,0,3300,1550000,800000,,2012-0122 14.04.12,15.04.12,United States of America (the),USA,"Oklahoma, Kansas, Iowa, Nebraska, South Dakota, Minnesota provinces",,,,Kph,Storm,Convective storm,--,--,6,297,1800000,910000,,2012-0156 @@ -1165,4 +1165,4 @@ Wilkes, Ashe )",,,140,Kph,Storm,Tropical cyclone,--,--,0,60,250000,0,Tropical de 03.11.17,12.12.17,Bangladesh,BGD,Cox’s Bazar ,,,,Vaccinated,Epidemic,Bacterial disease,--,--,15,789,0,0,Diphteria,2017-0556 06.03.17,09.03.17,United States of America (the),USA,"Missouri (Oak Grove in Jackson County, Clay and Clinton (Trimble, Plattsburg, Lathrop) counties), Iowa (Centerville in Appanoose county, Muscatine), Minnesota (Sherburne, Freeborn counties, Lake Ann in Carver county), Kansas (Wabaunsee, Pottawatomie and Butler counties), Wisconsin, Arkansas, Oklahoma, Illinois, Mississipi, Michigan, New-York, Pennsylvania, Massachussets, Ohio, Nebraska, Indiana",,,130,Kph,Storm,Convective storm,Hail,--,2,615,2200000,2000000,,2017-0563 25.03.17,28.03.17,United States of America (the),USA,"Texas (Justin in Denton, Collin, Rockwall, Lubbock counties, Seymour in Baylor, Dallas – Fort Worth metro area, Houston metro area), Oklahoma (El Reno in Canadian, Oklahoma city metro region, Caddo in Bryan, Cleveland South and East), Kansas (south), Kentucky, Tennessee, Mississippi, Alabama, Georgia, Indianapolis (Marion-IN)",,,175,Kph,Storm,Convective storm,Hail,Flood,1,0,2700000,2000000,,2017-0564 -/03/2017,/09/2017,United States of America (the),USA,"Upper Midwest, Northern Rockies and parts of the West",,,,Km2,Drought,Drought,--,--,0,0,2500000,1900000,,2017-9550 \ No newline at end of file +/03/2017,/09/2017,United States of America (the),USA,"Upper Midwest, Northern Rockies and parts of the West",,,,Km2,Drought,Drought,--,--,0,0,2500000,1900000,,2017-9550 diff --git a/climada/engine/test/data/emdat_testdata_fake_2007-2011.csv b/climada/engine/test/data/emdat_testdata_fake_2007-2011.csv index 6826050a4..3d6242746 100644 --- a/climada/engine/test/data/emdat_testdata_fake_2007-2011.csv +++ b/climada/engine/test/data/emdat_testdata_fake_2007-2011.csv @@ -4,4 +4,4 @@ Start date,End date,Country,ISO,Location,Latitude,Longitude,Magnitude value,Magn 15.01.09,26.01.09,Switzerland,CHE,Zurich,47.37,8.54,1,Km2,Flood,Riverine flood,--,--,0,0,2000,0,FakeFlood3,2009-0001 15.01.10,27.01.10,Switzerland,CHE,Zurich,47.37,8.54,1,Km2,Flood,Riverine flood,--,--,0,0,2000,0,FakeFlood4,2010-0001 15.01.11,28.01.11,Switzerland,CHE,Zurich,47.37,8.54,1,Km2,Flood,Riverine flood,--,--,0,0,2000,0,FakeFlood5,2011-0001 -15.01.11,28.01.11,Germany,DEU,Konstanz,22,22,2,Km2,Flood,Riverine flood,--,--,0,0,1000,0,FakeFlood5,2011-0001 \ No newline at end of file +15.01.11,28.01.11,Germany,DEU,Konstanz,22,22,2,Km2,Flood,Riverine flood,--,--,0,0,1000,0,FakeFlood5,2011-0001 diff --git a/climada/hazard/test/data/trac_short_test.csv b/climada/hazard/test/data/trac_short_test.csv index 79defb690..bacbd8c99 100644 --- a/climada/hazard/test/data/trac_short_test.csv +++ b/climada/hazard/test/data/trac_short_test.csv @@ -7,4 +7,4 @@ cgps_lat,cgps_lon,data_provider,gen_basin,ibtracsID,isotime,model,msize,ngps_lat 12.3,-31,hurdat_atl,NA,1951239N12334,1951082812,H08,101,12.3,-32.3,1,-999,1010,-999,0.1,0,6,25 12.3,-32.3,hurdat_atl,NA,1951239N12334,1951082818,H08,101,12.3,-33.6,1,-999,1010,-999,0.1,0,6,25 12.3,-33.6,hurdat_atl,NA,1951239N12334,1951082900,H08,101,12.3,-34.9,1,-999,1010,-999,0.1,0,6,25 -12.3,-34.9,hurdat_atl,NA,1951239N12334,1951082906,H08,101,12.3,-36.3,1,-999,1010,-999,0.1,0,6,25 \ No newline at end of file +12.3,-34.9,hurdat_atl,NA,1951239N12334,1951082906,H08,101,12.3,-36.3,1,-999,1010,-999,0.1,0,6,25 diff --git a/doc/Makefile b/doc/Makefile index 0a8a51eba..41c2d07bf 100644 --- a/doc/Makefile +++ b/doc/Makefile @@ -9,7 +9,7 @@ PAPER = # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter -ALLSPHINXOPTS = -d _build/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) ./ +ALLSPHINXOPTS = -d _build/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) ./ .PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex changes linkcheck doctest diff --git a/doc/climada/climada.engine.rst b/doc/climada/climada.engine.rst index 91274418f..f21024fde 100644 --- a/doc/climada/climada.engine.rst +++ b/doc/climada/climada.engine.rst @@ -52,4 +52,3 @@ climada\.engine\.impact\_data module :members: :undoc-members: :show-inheritance: - diff --git a/doc/climada/climada.entity.disc_rates.rst b/doc/climada/climada.entity.disc_rates.rst index bc17051c6..4089561f0 100644 --- a/doc/climada/climada.entity.disc_rates.rst +++ b/doc/climada/climada.entity.disc_rates.rst @@ -8,4 +8,3 @@ climada\.entity\.disc\_rates\.base module :members: :undoc-members: :show-inheritance: - diff --git a/doc/climada/climada.entity.exposures.litpop.rst b/doc/climada/climada.entity.exposures.litpop.rst index 9e65391b0..62e233a06 100644 --- a/doc/climada/climada.entity.exposures.litpop.rst +++ b/doc/climada/climada.entity.exposures.litpop.rst @@ -24,4 +24,3 @@ climada\.entity\.exposures\.litpop\.nightlight module :members: :undoc-members: :show-inheritance: - diff --git a/doc/climada/climada.entity.exposures.rst b/doc/climada/climada.entity.exposures.rst index 30f175d10..952af75e8 100644 --- a/doc/climada/climada.entity.exposures.rst +++ b/doc/climada/climada.entity.exposures.rst @@ -12,4 +12,3 @@ climada\.entity\.exposures\.base module :members: :undoc-members: :show-inheritance: - diff --git a/doc/climada/climada.entity.impact_funcs.rst b/doc/climada/climada.entity.impact_funcs.rst index 91f88ff77..90ad9441b 100644 --- a/doc/climada/climada.entity.impact_funcs.rst +++ b/doc/climada/climada.entity.impact_funcs.rst @@ -32,4 +32,3 @@ climada\.entity\.impact\_funcs\.trop\_cyclone module :members: :undoc-members: :show-inheritance: - diff --git a/doc/climada/climada.entity.measures.rst b/doc/climada/climada.entity.measures.rst index a7d16c650..8e63a2082 100644 --- a/doc/climada/climada.entity.measures.rst +++ b/doc/climada/climada.entity.measures.rst @@ -16,4 +16,3 @@ climada\.entity\.measures\.measure\_set module :members: :undoc-members: :show-inheritance: - diff --git a/doc/climada/climada.hazard.centroids.rst b/doc/climada/climada.hazard.centroids.rst index 8038d406e..7a9c65a90 100644 --- a/doc/climada/climada.hazard.centroids.rst +++ b/doc/climada/climada.hazard.centroids.rst @@ -8,4 +8,3 @@ climada\.hazard\.centroids\.centr module :members: :undoc-members: :show-inheritance: - diff --git a/doc/climada/climada.hazard.rst b/doc/climada/climada.hazard.rst index 8e4767ae6..3b3bef00b 100644 --- a/doc/climada/climada.hazard.rst +++ b/doc/climada/climada.hazard.rst @@ -69,4 +69,3 @@ climada\.hazard\.tc\_tracks\_synth module :members: :undoc-members: :show-inheritance: - diff --git a/doc/climada/climada.hazard.trop_cyclone.rst b/doc/climada/climada.hazard.trop_cyclone.rst index c703126ec..caafdcd93 100644 --- a/doc/climada/climada.hazard.trop_cyclone.rst +++ b/doc/climada/climada.hazard.trop_cyclone.rst @@ -16,4 +16,3 @@ climada\.hazard\.trop\_cyclone\.trop\_cyclone\_windfields module :members: :undoc-members: :show-inheritance: - diff --git a/doc/climada/climada.rst b/doc/climada/climada.rst index e248812bc..557532912 100644 --- a/doc/climada/climada.rst +++ b/doc/climada/climada.rst @@ -8,4 +8,3 @@ Software documentation per package climada.entity climada.hazard climada.util - diff --git a/doc/climada/climada.util.rst b/doc/climada/climada.util.rst index 820fd43f7..98df93aec 100644 --- a/doc/climada/climada.util.rst +++ b/doc/climada/climada.util.rst @@ -152,4 +152,3 @@ climada\.util\.yearsets module :members: :undoc-members: :show-inheritance: - diff --git a/doc/conf.py b/doc/conf.py index 02e19ecc0..b4ef1dc69 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -18,49 +18,52 @@ # is relative to the documentation root, use os.path.abspath to make it # absolute, like shown here. # sys.path.append(os.path.abspath('sphinxext')) -sys.path.insert(0, os.path.abspath('../')) +sys.path.insert(0, os.path.abspath("../")) # set version from climada import _version + __version__ = _version.__version__ # -- General configuration ----------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = ['matplotlib.sphinxext.plot_directive', - 'IPython.sphinxext.ipython_directive', - 'IPython.sphinxext.ipython_console_highlighting', - 'sphinx.ext.mathjax', - 'sphinx.ext.autodoc', - 'sphinx.ext.doctest', - 'sphinx.ext.inheritance_diagram', - 'sphinx.ext.viewcode', - 'sphinx.ext.napoleon', - 'sphinx.ext.ifconfig', - 'myst_nb', - 'sphinx_markdown_tables', - 'readthedocs_ext.readthedocs',] +extensions = [ + "matplotlib.sphinxext.plot_directive", + "IPython.sphinxext.ipython_directive", + "IPython.sphinxext.ipython_console_highlighting", + "sphinx.ext.mathjax", + "sphinx.ext.autodoc", + "sphinx.ext.doctest", + "sphinx.ext.inheritance_diagram", + "sphinx.ext.viewcode", + "sphinx.ext.napoleon", + "sphinx.ext.ifconfig", + "myst_nb", + "sphinx_markdown_tables", + "readthedocs_ext.readthedocs", +] # read the docs version used for links -if 'dev' in __version__: - read_docs_url = 'en/latest/' +if "dev" in __version__: + read_docs_url = "en/latest/" else: - read_docs_url = 'en/v{}/'.format(__version__) + read_docs_url = "en/v{}/".format(__version__) # Add any paths that contain templates here, relative to this directory. templates_path = [] # The encoding of source files. -#source_encoding = 'utf-8' +# source_encoding = 'utf-8' # The master toctree document. -master_doc = 'index' +master_doc = "index" # General information about the project. -project = 'CLIMADA' -copyright = '2017, ETH Zurich' -author = 'CLIMADA contributors' +project = "CLIMADA" +copyright = "2017, ETH Zurich" +author = "CLIMADA contributors" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the @@ -73,45 +76,45 @@ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. -language = 'en' +language = "en" # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: -#today = '' +# today = '' # Else, today_fmt is used as the format for a strftime call. -#today_fmt = '%B %d, %Y' +# today_fmt = '%B %d, %Y' # List of documents that shouldn't be included in the build. -#unused_docs = [] +# unused_docs = [] # List of directories, relative to source directory, that shouldn't be searched # for source files. # exclude_trees = [] # The reST default role (used for this markup: `text`) to use for all documents. -#default_role = None +# default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. -#add_function_parentheses = True +# add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). -#add_module_names = True +# add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. -#show_authors = False +# show_authors = False # The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' +pygments_style = "sphinx" # A list of ignored prefixes for module index sorting. -#modindex_common_prefix = [] +# modindex_common_prefix = [] # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path -exclude_patterns = ['_build', 'test', 'Thumbs.db', '.DS_Store'] +exclude_patterns = ["_build", "test", "Thumbs.db", ".DS_Store"] # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = True @@ -125,17 +128,17 @@ # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. -#html_theme_options = {} +# html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. -#html_theme_path = [] +# html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". -#html_title = None +# html_title = None # A shorter title for the navigation bar. Default is the same as html_title. -#html_short_title = None +# html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. @@ -149,45 +152,45 @@ # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] +html_static_path = ["_static"] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. -#html_last_updated_fmt = '%b %d, %Y' +# html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. -#html_use_smartypants = True +# html_use_smartypants = True # Custom sidebar templates, maps document names to template names. -#html_sidebars = {} +# html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. -#html_additional_pages = {} +# html_additional_pages = {} # If false, no module index is generated. -#html_use_modindex = True +# html_use_modindex = True # If false, no index is generated. -#html_use_index = True +# html_use_index = True # If true, the index is split into individual pages for each letter. -#html_split_index = False +# html_split_index = False # If true, links to the reST sources are added to the pages. -#html_show_sourcelink = True +# html_show_sourcelink = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. -#html_use_opensearch = '' +# html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). -#html_file_suffix = '' +# html_file_suffix = '' # Output file base name for HTML help builder. -htmlhelp_basename = 'climadadoc' +htmlhelp_basename = "climadadoc" # -- Options for LaTeX output -------------------------------------------------- @@ -195,47 +198,55 @@ latex_engine = "xelatex" # The paper size ('letter' or 'a4'). -#latex_paper_size = 'letter' +# latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). -#latex_font_size = '10pt' +# latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ - (master_doc, 'climada.tex', u'CLIMADA documentation', - u'CLIMADA contributors', 'manual'), + ( + master_doc, + "climada.tex", + "CLIMADA documentation", + "CLIMADA contributors", + "manual", + ), ] # The name of an image file (relative to this directory) to place at the top of # the title page. -#latex_logo = None +# latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. -#latex_use_parts = False +# latex_use_parts = False # Additional stuff for the LaTeX preamble. -#latex_preamble = '' +# latex_preamble = '' # Documents to append as an appendix to all manuals. -#latex_appendices = [] +# latex_appendices = [] # If false, no module index is generated. -#latex_use_modindex = True +# latex_use_modindex = True + # ----------------------------------------------------------------------------- # show __init__ documentation def skip(app, what, name, obj, skip, options): - if (name == "__init__"): + if name == "__init__": return False return skip + # remove docstrings modules def remove_module_docstring(app, what, name, obj, options, lines): if what == "module": del lines[:] + autodoc_member_order = "bysource" # --- MYST Parser settings ---- @@ -260,13 +271,15 @@ def remove_module_docstring(app, what, name, obj, options, lines): # --- + def setup(app): app.connect("autodoc-skip-member", skip) app.connect("autodoc-process-docstring", remove_module_docstring) # Pass to the app if we are building this on ReadTheDocs - on_rtd = True if (os.environ.get('READTHEDOCS') == 'True') else False - app.add_config_value('readthedocs', on_rtd, 'env') + on_rtd = True if (os.environ.get("READTHEDOCS") == "True") else False + app.add_config_value("readthedocs", on_rtd, "env") + # improve parameters description napoleon_use_param = False diff --git a/doc/guide/Guide_Configuration.ipynb b/doc/guide/Guide_Configuration.ipynb index 50ffc35f2..69056eba6 100644 --- a/doc/guide/Guide_Configuration.ipynb +++ b/doc/guide/Guide_Configuration.ipynb @@ -54,9 +54,9 @@ ], "source": [ "# suboptimal\n", - "my_dict = {'x': 4}\n", - "if my_dict['x'] > 3:\n", - " msg = 'well, arh, ...'\n", + "my_dict = {\"x\": 4}\n", + "if my_dict[\"x\"] > 3:\n", + " msg = \"well, arh, ...\"\n", "msg" ] }, @@ -78,10 +78,10 @@ ], "source": [ "# good\n", - "X = 'x'\n", + "X = \"x\"\n", "my_dict = {X: 4}\n", "if my_dict[X] > 3:\n", - " msg = 'yeah!'\n", + " msg = \"yeah!\"\n", "msg" ] }, @@ -103,7 +103,7 @@ ], "source": [ "# possibly overdoing it\n", - "X = 'x'\n", + "X = \"x\"\n", "Y = \"this doesn't mean that every string must be a constant\"\n", "my_dict = {X: 4}\n", "if my_dict[X] > 3:\n", @@ -139,13 +139,16 @@ ], "source": [ "import pandas as pd\n", - "X = 'x'\n", - "df = pd.DataFrame({'x':[1,2,3], 'y':[4,5,6]})\n", + "\n", + "X = \"x\"\n", + "df = pd.DataFrame({\"x\": [1, 2, 3], \"y\": [4, 5, 6]})\n", "try:\n", " df.X\n", "except:\n", - " from sys import stderr; stderr.write(\"this does not work\\n\")\n", - "df[X] # this does work but it's less pretty\n", + " from sys import stderr\n", + "\n", + " stderr.write(\"this does not work\\n\")\n", + "df[X] # this does work but it's less pretty\n", "df.x" ] }, @@ -357,7 +360,9 @@ "try:\n", " CONFIG.hazard.trop_cyclone.random_seed.str()\n", "except Exception as e:\n", - " from sys import stderr; stderr.write(f\"cannot convert random_seed to str: {e}\\n\")" + " from sys import stderr\n", + "\n", + " stderr.write(f\"cannot convert random_seed to str: {e}\\n\")" ] }, { diff --git a/doc/guide/Guide_Exception_Logging.ipynb b/doc/guide/Guide_Exception_Logging.ipynb index 55341f434..b4f776aa9 100644 --- a/doc/guide/Guide_Exception_Logging.ipynb +++ b/doc/guide/Guide_Exception_Logging.ipynb @@ -44,7 +44,7 @@ "metadata": {}, "outputs": [], "source": [ - "#Bad (1)\n", + "# Bad (1)\n", "x = 1\n", "try:\n", " l = len(events)\n", @@ -60,7 +60,7 @@ "metadata": {}, "outputs": [], "source": [ - "#Still bad (2)\n", + "# Still bad (2)\n", "try:\n", " l = len(events)\n", " if l < 1:\n", @@ -75,7 +75,7 @@ "metadata": {}, "outputs": [], "source": [ - "#Better, but still unsufficient (3)\n", + "# Better, but still unsufficient (3)\n", "try:\n", " l = len(events)\n", " if l < 1:\n", @@ -90,7 +90,7 @@ "metadata": {}, "outputs": [], "source": [ - "#Even better (4)\n", + "# Even better (4)\n", "try:\n", " l = len(events)\n", "except TypeError:\n", @@ -105,13 +105,13 @@ "metadata": {}, "outputs": [], "source": [ - "#Even better (5)\n", + "# Even better (5)\n", "try:\n", " l = len(events)\n", "except TypeError as tper:\n", " raise TypeError(\"The provided variable events is not a list\") from tper\n", "if l < 1:\n", - " raise ValueError(\"To compute an impact there must be at least one event.\")\n" + " raise ValueError(\"To compute an impact there must be at least one event.\")" ] }, { @@ -172,6 +172,7 @@ "source": [ "import logging\n", "from climada.util.config import LOGGER\n", + "\n", "LOGGER.setLevel(logging.ERROR)" ] }, diff --git a/doc/guide/Guide_Py_Performance.ipynb b/doc/guide/Guide_Py_Performance.ipynb index bb3cf209f..21f81313d 100644 --- a/doc/guide/Guide_Py_Performance.ipynb +++ b/doc/guide/Guide_Py_Performance.ipynb @@ -188,6 +188,7 @@ ], "source": [ "import numpy as np\n", + "\n", "%timeit np.sum(list_of_numbers)" ] }, @@ -947,6 +948,7 @@ "source": [ "from numba import njit\n", "\n", + "\n", "@njit\n", "def sum_array(arr):\n", " result = 0.0\n", diff --git a/doc/guide/Guide_PythonDos-n-Donts.ipynb b/doc/guide/Guide_PythonDos-n-Donts.ipynb index 85295356a..222ffd0ab 100644 --- a/doc/guide/Guide_PythonDos-n-Donts.ipynb +++ b/doc/guide/Guide_PythonDos-n-Donts.ipynb @@ -147,14 +147,12 @@ "outputs": [], "source": [ "# Vertically aligned with opening delimiter.\n", - "foo = long_function_name(var_one, var_two,\n", - " var_three, var_four)\n", + "foo = long_function_name(var_one, var_two, var_three, var_four)\n", + "\n", "\n", "# Hanging indentation (4 additonal spaces)\n", - "def very_very_long_function_name(\n", - " var_one, var_two, var_three,\n", - " var_four):\n", - " print(var_one)\n" + "def very_very_long_function_name(var_one, var_two, var_three, var_four):\n", + " print(var_one)" ] }, { @@ -303,6 +301,8 @@ " return math.sqrt(x)\n", " else:\n", " return None\n", + "\n", + "\n", "# Wrong\n", "def foo(x):\n", " if x >= 0:\n", @@ -601,7 +601,7 @@ "source": [ "@uppercase_decorator\n", "def say_hi():\n", - " return 'hello there'" + " return \"hello there\"" ] }, { diff --git a/doc/guide/Guide_Testing.ipynb b/doc/guide/Guide_Testing.ipynb index f1876080c..319d8ada5 100644 --- a/doc/guide/Guide_Testing.ipynb +++ b/doc/guide/Guide_Testing.ipynb @@ -209,7 +209,9 @@ "source": [ "from climada.test import get_test_file\n", "\n", - "my_test_file = get_test_file(ds_name='my-test-file', file_format='hdf5') # returns a pathlib.Path object" + "my_test_file = get_test_file(\n", + " ds_name=\"my-test-file\", file_format=\"hdf5\"\n", + ") # returns a pathlib.Path object" ] }, { @@ -240,11 +242,16 @@ "outputs": [], "source": [ "import climada\n", + "\n", + "\n", "def x(download_file=climada.util.files_handler.download_file):\n", - " filepath = download_file('http://real_data.ch')\n", + " filepath = download_file(\"http://real_data.ch\")\n", " return Path(filepath).stat().st_size\n", "\n", + "\n", "import unittest\n", + "\n", + "\n", "class TestX(unittest.TestCase):\n", " def download_file_dummy(url):\n", " return \"phony_data.ch\"\n", diff --git a/doc/guide/Guide_continuous_integration_GitHub_actions.ipynb b/doc/guide/Guide_continuous_integration_GitHub_actions.ipynb index d9b1d9053..f800f8eda 100644 --- a/doc/guide/Guide_continuous_integration_GitHub_actions.ipynb +++ b/doc/guide/Guide_continuous_integration_GitHub_actions.ipynb @@ -95,20 +95,23 @@ } ], "source": [ - "def x(b:bool):\n", + "def x(b: bool):\n", " if b:\n", - " print('been here')\n", + " print(\"been here\")\n", " return 4\n", " else:\n", - " print('been there')\n", + " print(\"been there\")\n", " return 0\n", "\n", - "def y(b:bool):\n", - " print('been everywhere')\n", - " return 1/x(b)\n", + "\n", + "def y(b: bool):\n", + " print(\"been everywhere\")\n", + " return 1 / x(b)\n", "\n", "\n", "import unittest\n", + "\n", + "\n", "class TestXY(unittest.TestCase):\n", " def test_x(self):\n", " self.assertEqual(x(True), 4)\n", @@ -117,6 +120,7 @@ " def test_y(self):\n", " self.assertEqual(y(True), 0.25)\n", "\n", + "\n", "unittest.TextTestRunner().run(unittest.TestLoader().loadTestsFromTestCase(TestXY));" ] }, diff --git a/doc/index.rst b/doc/index.rst index 732290eee..4ad14dd78 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -69,7 +69,7 @@ Jump right in: .. toctree:: :caption: API Reference :hidden: - + Python Modules diff --git a/doc/misc/AUTHORS.md b/doc/misc/AUTHORS.md index 2d2e8405f..561ed5cd3 120000 --- a/doc/misc/AUTHORS.md +++ b/doc/misc/AUTHORS.md @@ -1 +1 @@ -../../AUTHORS.md \ No newline at end of file +../../AUTHORS.md diff --git a/doc/misc/CHANGELOG.md b/doc/misc/CHANGELOG.md index 699cc9e7b..03cb73106 120000 --- a/doc/misc/CHANGELOG.md +++ b/doc/misc/CHANGELOG.md @@ -1 +1 @@ -../../CHANGELOG.md \ No newline at end of file +../../CHANGELOG.md diff --git a/doc/misc/CONTRIBUTING.md b/doc/misc/CONTRIBUTING.md index f939e75f2..bcac999a8 120000 --- a/doc/misc/CONTRIBUTING.md +++ b/doc/misc/CONTRIBUTING.md @@ -1 +1 @@ -../../CONTRIBUTING.md \ No newline at end of file +../../CONTRIBUTING.md diff --git a/doc/tutorial/0_intro_python.ipynb b/doc/tutorial/0_intro_python.ipynb index 43df82d5b..831898602 100644 --- a/doc/tutorial/0_intro_python.ipynb +++ b/doc/tutorial/0_intro_python.ipynb @@ -27,15 +27,15 @@ "metadata": {}, "outputs": [], "source": [ - "print('Addition: 2 + 2 =', 2 + 2)\n", - "print('Substraction: 50 - 5*6 =', 50 - 5*6)\n", - "print('Use of parenthesis: (50 - 5*6) / 4 =', (50 - 5*6) / 4)\n", - "print('Classic division returns a float: 17 / 3 =', 17 / 3)\n", - "print('Floor division discards the fractional part: 17 // 3 =', 17 // 3)\n", - "print('The % operator returns the remainder of the division: 17 % 3 =', 17 % 3)\n", - "print('Result * divisor + remainder: 5 * 3 + 2 =', 5 * 3 + 2)\n", - "print('5 squared: 5 ** 2 =', 5 ** 2)\n", - "print('2 to the power of 7: 2 ** 7 =', 2 ** 7)" + "print(\"Addition: 2 + 2 =\", 2 + 2)\n", + "print(\"Substraction: 50 - 5*6 =\", 50 - 5 * 6)\n", + "print(\"Use of parenthesis: (50 - 5*6) / 4 =\", (50 - 5 * 6) / 4)\n", + "print(\"Classic division returns a float: 17 / 3 =\", 17 / 3)\n", + "print(\"Floor division discards the fractional part: 17 // 3 =\", 17 // 3)\n", + "print(\"The % operator returns the remainder of the division: 17 % 3 =\", 17 % 3)\n", + "print(\"Result * divisor + remainder: 5 * 3 + 2 =\", 5 * 3 + 2)\n", + "print(\"5 squared: 5 ** 2 =\", 5**2)\n", + "print(\"2 to the power of 7: 2 ** 7 =\", 2**7)" ] }, { @@ -72,11 +72,11 @@ "metadata": {}, "outputs": [], "source": [ - "print('spam eggs') # single quotes\n", - "print('doesn\\'t') # use \\' to escape the single quote...\n", - "print(\"doesn't\") # ...or use double quotes instead\n", + "print(\"spam eggs\") # single quotes\n", + "print(\"doesn't\") # use \\' to escape the single quote...\n", + "print(\"doesn't\") # ...or use double quotes instead\n", + "print('\"Yes,\" he said.')\n", "print('\"Yes,\" he said.')\n", - "print(\"\\\"Yes,\\\" he said.\")\n", "print('\"Isn\\'t,\" she said.')" ] }, @@ -96,13 +96,13 @@ "metadata": {}, "outputs": [], "source": [ - "word = 'Python'\n", - "print('word = ', word)\n", - "print('Character in position 0: word[0] =', word[0])\n", - "print('Character in position 5: word[5] =', word[5])\n", - "print('Last character: word[-1] =', word[-1])\n", - "print('Second-last character: word[-2] =', word[-2])\n", - "print('word[-6] =', word[-6])" + "word = \"Python\"\n", + "print(\"word = \", word)\n", + "print(\"Character in position 0: word[0] =\", word[0])\n", + "print(\"Character in position 5: word[5] =\", word[5])\n", + "print(\"Last character: word[-1] =\", word[-1])\n", + "print(\"Second-last character: word[-2] =\", word[-2])\n", + "print(\"word[-6] =\", word[-6])" ] }, { @@ -118,8 +118,8 @@ "metadata": {}, "outputs": [], "source": [ - "print('Characters from position 0 (included) to 2 (excluded): word[0:2] =', word[0:2])\n", - "print('Characters from position 2 (included) to 5 (excluded): word[2:5] =', word[2:5])" + "print(\"Characters from position 0 (included) to 2 (excluded): word[0:2] =\", word[0:2])\n", + "print(\"Characters from position 2 (included) to 5 (excluded): word[2:5] =\", word[2:5])" ] }, { @@ -145,11 +145,11 @@ "outputs": [], "source": [ "squares = [1, 4, 9, 16, 25]\n", - "print('squares: ', squares)\n", - "print('Indexing returns the item: squares[0]:', squares[0])\n", - "print('squares[-1]:', squares[-1])\n", - "print('Slicing returns a new list: squares[-3:]:', squares[-3:])\n", - "print('squares[:]:', squares[:])" + "print(\"squares: \", squares)\n", + "print(\"Indexing returns the item: squares[0]:\", squares[0])\n", + "print(\"squares[-1]:\", squares[-1])\n", + "print(\"Slicing returns a new list: squares[-3:]:\", squares[-3:])\n", + "print(\"squares[:]:\", squares[:])" ] }, { @@ -184,7 +184,7 @@ "cubes = [1, 8, 27, 65, 125] # something's wrong here\n", "cubes[3] = 64 # replace the wrong value\n", "cubes.append(216) # add the cube of 6\n", - "cubes.append(7 ** 3) # and the cube of 7\n", + "cubes.append(7**3) # and the cube of 7\n", "cubes" ] }, @@ -197,8 +197,8 @@ "# Note: execution of this cell will fail\n", "\n", "# Try to modify a character of a string\n", - "word = 'Python'\n", - "word[0] = 'p'" + "word = \"Python\"\n", + "word[0] = \"p\"" ] }, { @@ -262,7 +262,7 @@ "metadata": {}, "outputs": [], "source": [ - "t = 12345, 54321, 'hello!'\n", + "t = 12345, 54321, \"hello!\"\n", "t[0]" ] }, @@ -322,8 +322,8 @@ "metadata": {}, "outputs": [], "source": [ - "t = 12345, 54321, 'hello!' # tuple packing\n", - "x, y, z = t # tuple unpacking\n", + "t = 12345, 54321, \"hello!\" # tuple packing\n", + "x, y, z = t # tuple unpacking\n", "x, y, z" ] }, @@ -344,8 +344,8 @@ "metadata": {}, "outputs": [], "source": [ - "basket = {'apple', 'orange', 'apple', 'pear', 'orange', 'banana'}\n", - "basket # show that duplicates have been removed" + "basket = {\"apple\", \"orange\", \"apple\", \"pear\", \"orange\", \"banana\"}\n", + "basket # show that duplicates have been removed" ] }, { @@ -354,7 +354,7 @@ "metadata": {}, "outputs": [], "source": [ - "'orange' in basket # fast membership testing" + "\"orange\" in basket # fast membership testing" ] }, { @@ -363,7 +363,7 @@ "metadata": {}, "outputs": [], "source": [ - "'crabgrass' in basket" + "\"crabgrass\" in basket" ] }, { @@ -373,9 +373,9 @@ "outputs": [], "source": [ "# Demonstrate set operations on unique letters from two words\n", - "a = set('abracadabra')\n", - "b = set('alacazam')\n", - "a # unique letters in a" + "a = set(\"abracadabra\")\n", + "b = set(\"alacazam\")\n", + "a # unique letters in a" ] }, { @@ -384,7 +384,7 @@ "metadata": {}, "outputs": [], "source": [ - "a - b # letters in a but not in b" + "a - b # letters in a but not in b" ] }, { @@ -393,7 +393,7 @@ "metadata": {}, "outputs": [], "source": [ - "a | b # letters in a or b or both" + "a | b # letters in a or b or both" ] }, { @@ -402,7 +402,7 @@ "metadata": {}, "outputs": [], "source": [ - "a & b # letters in both a and b" + "a & b # letters in both a and b" ] }, { @@ -411,7 +411,7 @@ "metadata": {}, "outputs": [], "source": [ - "a ^ b # letters in a or b but not both" + "a ^ b # letters in a or b but not both" ] }, { @@ -440,7 +440,7 @@ "metadata": {}, "outputs": [], "source": [ - "# Define a new set and try some set methods (freestyle)\n" + "# Define a new set and try some set methods (freestyle)" ] }, { @@ -465,8 +465,8 @@ "metadata": {}, "outputs": [], "source": [ - "tel = {'jack': 4098, 'sape': 4139}\n", - "tel['guido'] = 4127\n", + "tel = {\"jack\": 4098, \"sape\": 4139}\n", + "tel[\"guido\"] = 4127\n", "tel" ] }, @@ -476,7 +476,7 @@ "metadata": {}, "outputs": [], "source": [ - "tel['jack']" + "tel[\"jack\"]" ] }, { @@ -485,7 +485,7 @@ "metadata": {}, "outputs": [], "source": [ - "del tel['sape']" + "del tel[\"sape\"]" ] }, { @@ -494,7 +494,7 @@ "metadata": {}, "outputs": [], "source": [ - "tel['irv'] = 4127\n", + "tel[\"irv\"] = 4127\n", "tel" ] }, @@ -522,7 +522,7 @@ "metadata": {}, "outputs": [], "source": [ - "'guido' in tel" + "\"guido\" in tel" ] }, { @@ -531,7 +531,7 @@ "metadata": {}, "outputs": [], "source": [ - "'jack' not in tel" + "\"jack\" not in tel" ] }, { @@ -554,13 +554,13 @@ "metadata": {}, "outputs": [], "source": [ - "def fib(n): # write Fibonacci series up to n\n", - " \"\"\"Print a Fibonacci series up to n.\"\"\"\n", - " a, b = 0, 1 # two assignments in one line\n", - " while a < n:\n", - " print(a, end=' ')\n", - " a, b = b, a+b # two assignments in one line\n", - " print()" + "def fib(n): # write Fibonacci series up to n\n", + " \"\"\"Print a Fibonacci series up to n.\"\"\"\n", + " a, b = 0, 1 # two assignments in one line\n", + " while a < n:\n", + " print(a, end=\" \")\n", + " a, b = b, a + b # two assignments in one line\n", + " print()" ] }, { @@ -587,7 +587,7 @@ "outputs": [], "source": [ "print(fib)\n", - "print(type(fib)) # function type\n", + "print(type(fib)) # function type\n", "f = fib\n", "f(100)" ] @@ -608,15 +608,16 @@ "def dummy(x):\n", " x += x\n", "\n", + "\n", "xx = 5\n", - "print('xx before function call: ', xx)\n", + "print(\"xx before function call: \", xx)\n", "dummy(xx)\n", - "print('xx after function call: ', xx)\n", + "print(\"xx after function call: \", xx)\n", "\n", "yy = [5]\n", - "print('yy before function call: ', yy)\n", + "print(\"yy before function call: \", yy)\n", "dummy(yy)\n", - "print('yy after function call: ', yy)" + "print(\"yy after function call: \", yy)" ] }, { @@ -634,16 +635,16 @@ "metadata": {}, "outputs": [], "source": [ - "def ask_ok(prompt, retries=4, reminder='Please try again!'):\n", + "def ask_ok(prompt, retries=4, reminder=\"Please try again!\"):\n", " while True:\n", " ok = input(prompt)\n", - " if ok in ('y', 'ye', 'yes'):\n", + " if ok in (\"y\", \"ye\", \"yes\"):\n", " return True\n", - " if ok in ('n', 'no', 'nop', 'nope'):\n", + " if ok in (\"n\", \"no\", \"nop\", \"nope\"):\n", " return False\n", " retries = retries - 1\n", " if retries < 0:\n", - " raise ValueError('invalid user response')\n", + " raise ValueError(\"invalid user response\")\n", " print(reminder)" ] }, @@ -653,10 +654,10 @@ "metadata": {}, "outputs": [], "source": [ - "#This function can be called in several ways:\n", + "# This function can be called in several ways:\n", "\n", - "#giving only the mandatory argument:\n", - "ask_ok('Do you really want to quit?')\n" + "# giving only the mandatory argument:\n", + "ask_ok(\"Do you really want to quit?\")" ] }, { @@ -666,7 +667,7 @@ "outputs": [], "source": [ "# giving one of the optional arguments:\n", - "ask_ok('OK to overwrite the file?', 2)\n" + "ask_ok(\"OK to overwrite the file?\", 2)" ] }, { @@ -676,7 +677,7 @@ "outputs": [], "source": [ "# or even giving all arguments:\n", - "ask_ok('OK to overwrite the file?', 2, 'Come on, only yes or no!')" + "ask_ok(\"OK to overwrite the file?\", 2, \"Come on, only yes or no!\")" ] }, { @@ -692,7 +693,7 @@ "metadata": {}, "outputs": [], "source": [ - "ask_ok('OK to overwrite the file?', reminder='Come on, only yes or no!')" + "ask_ok(\"OK to overwrite the file?\", reminder=\"Come on, only yes or no!\")" ] }, { @@ -710,9 +711,11 @@ "source": [ "def test(x=None):\n", " if x is None:\n", - " print('no x here')\n", + " print(\"no x here\")\n", " else:\n", " print(x)\n", + "\n", + "\n", "test()" ] }, @@ -736,15 +739,15 @@ "metadata": {}, "outputs": [], "source": [ - "class Dog: # same as \"class Dog(object)\"\n", + "class Dog: # same as \"class Dog(object)\"\n", "\n", - " kind = 'canine' # class variable shared by all instances\n", + " kind = \"canine\" # class variable shared by all instances\n", "\n", - " def __init__(self, name): # initialization method\n", - " self.name = name # instance variable unique to each instance\n", - " self.tricks = [] # creates a new empty list for each dog\n", + " def __init__(self, name): # initialization method\n", + " self.name = name # instance variable unique to each instance\n", + " self.tricks = [] # creates a new empty list for each dog\n", "\n", - " def add_trick(self, trick): # class method\n", + " def add_trick(self, trick): # class method\n", " self.tricks.append(trick)" ] }, @@ -761,7 +764,9 @@ "metadata": {}, "outputs": [], "source": [ - "d = Dog('Fido') # creates a new instance of the class and assigns this object to the local variable d\n", + "d = Dog(\n", + " \"Fido\"\n", + ") # creates a new instance of the class and assigns this object to the local variable d\n", "d.name" ] }, @@ -771,9 +776,11 @@ "metadata": {}, "outputs": [], "source": [ - "e = Dog('Buddy') # creates a new instance of the class and assigns this object to the local variable e\n", - "d.add_trick('roll over')\n", - "e.add_trick('play dead')" + "e = Dog(\n", + " \"Buddy\"\n", + ") # creates a new instance of the class and assigns this object to the local variable e\n", + "d.add_trick(\"roll over\")\n", + "e.add_trick(\"play dead\")" ] }, { @@ -782,7 +789,7 @@ "metadata": {}, "outputs": [], "source": [ - "d.tricks # unique to d" + "d.tricks # unique to d" ] }, { @@ -791,7 +798,7 @@ "metadata": {}, "outputs": [], "source": [ - "e.tricks # unique to e" + "e.tricks # unique to e" ] }, { @@ -800,7 +807,7 @@ "metadata": {}, "outputs": [], "source": [ - "d.kind # shared by all dogs" + "d.kind # shared by all dogs" ] }, { @@ -809,7 +816,7 @@ "metadata": {}, "outputs": [], "source": [ - "e.kind # shared by all dogs" + "e.kind # shared by all dogs" ] }, { @@ -831,19 +838,22 @@ "metadata": {}, "outputs": [], "source": [ - "class Animal: # base class\n", + "class Animal: # base class\n", "\n", " def __init__(self, kind):\n", " self.kind = kind\n", " self.tricks = []\n", "\n", - " def add_trick(self, trick): # class method\n", + " def add_trick(self, trick): # class method\n", " self.tricks.append(trick)\n", "\n", - "class Dog(Animal): # derived class\n", "\n", - " def __init__(self): # override of __init__ base method\n", - " super(Dog, self).__init__('canine') # call Animal __init__ method with input string" + "class Dog(Animal): # derived class\n", + "\n", + " def __init__(self): # override of __init__ base method\n", + " super(Dog, self).__init__(\n", + " \"canine\"\n", + " ) # call Animal __init__ method with input string" ] }, { @@ -852,9 +862,9 @@ "metadata": {}, "outputs": [], "source": [ - "fido = Dog() # fido is automatically an animal of kind 'canine'\n", + "fido = Dog() # fido is automatically an animal of kind 'canine'\n", "print(fido.kind)\n", - "fido.add_trick('play dead') # Dog class can use Animal class\n", + "fido.add_trick(\"play dead\") # Dog class can use Animal class\n", "print(fido.tricks)" ] }, @@ -893,7 +903,8 @@ " for item in iterable:\n", " self.items_list.append(item)\n", "\n", - " __update = update # private copy of original update() method\n", + " __update = update # private copy of original update() method\n", + "\n", "\n", "class MappingSubclass(Mapping):\n", "\n", diff --git a/doc/tutorial/1_main_climada.ipynb b/doc/tutorial/1_main_climada.ipynb index 730d5e5ed..36ce87bb2 100644 --- a/doc/tutorial/1_main_climada.ipynb +++ b/doc/tutorial/1_main_climada.ipynb @@ -182,10 +182,13 @@ "source": [ "import numpy as np\n", "from climada.hazard import TCTracks\n", - "import warnings # To hide the warnings\n", - "warnings.filterwarnings('ignore')\n", + "import warnings # To hide the warnings\n", "\n", - "tracks = TCTracks.from_ibtracs_netcdf(provider='usa', basin='NA') # Here we download the full dataset for the analysis\n", + "warnings.filterwarnings(\"ignore\")\n", + "\n", + "tracks = TCTracks.from_ibtracs_netcdf(\n", + " provider=\"usa\", basin=\"NA\"\n", + ") # Here we download the full dataset for the analysis\n", "# afterwards (e.g. return period), but you can also use \"year_range\" to adjust the range of the dataset to be downloaded.\n", "# While doing that, you need to make sure that the year 2017 is included if you want to run the blocks with the codes\n", "# subsetting a specific tropic cyclone, which happened in 2017. (Of course, you can also change the subsetting codes.)" @@ -220,8 +223,10 @@ ], "source": [ "# plotting tracks can be very time consuming, depending on the number of tracks. So we choose only a few here, by limiting the time range to one year\n", - "tracks_2017 = TCTracks.from_ibtracs_netcdf(provider='usa', basin='NA', year_range = (2017, 2017))\n", - "tracks_2017 .plot(); # This may take a very long time" + "tracks_2017 = TCTracks.from_ibtracs_netcdf(\n", + " provider=\"usa\", basin=\"NA\", year_range=(2017, 2017)\n", + ")\n", + "tracks_2017.plot(); # This may take a very long time" ] }, { @@ -368,7 +373,9 @@ } ], "source": [ - "tracks.subset({\"sid\": \"2017260N12310\"}).plot(); # This is how we subset a TCTracks object" + "tracks.subset(\n", + " {\"sid\": \"2017260N12310\"}\n", + ").plot(); # This is how we subset a TCTracks object" ] }, { @@ -397,7 +404,7 @@ } ], "source": [ - "haz.plot_intensity(event='2017260N12310');" + "haz.plot_intensity(event=\"2017260N12310\");" ] }, { @@ -433,7 +440,7 @@ } ], "source": [ - "haz.plot_rp_intensity(return_periods=(5,10,20,40));" + "haz.plot_rp_intensity(return_periods=(5, 10, 20, 40));" ] }, { @@ -553,8 +560,10 @@ "source": [ "from climada.entity.exposures import LitPop\n", "\n", - "exp_litpop = LitPop.from_countries('Puerto Rico', res_arcsec = 120) # We'll go lower resolution than default to keep it simple\n", - "exp_litpop.set_geometry_points() # Set geodataframe geometries from lat lon data\n", + "exp_litpop = LitPop.from_countries(\n", + " \"Puerto Rico\", res_arcsec=120\n", + ") # We'll go lower resolution than default to keep it simple\n", + "exp_litpop.set_geometry_points() # Set geodataframe geometries from lat lon data\n", "\n", "exp_litpop.plot_hexbin(pop_name=True, linewidth=4, buffer=0.1);" ] @@ -647,7 +656,7 @@ } ], "source": [ - "exp_litpop.gdf['impf_TC'] = 1" + "exp_litpop.gdf[\"impf_TC\"] = 1" ] }, { @@ -688,8 +697,8 @@ "from climada.entity import Measure, MeasureSet\n", "\n", "meas_mangrove = Measure(\n", - " name='Mangrove',\n", - " haz_type='TC',\n", + " name=\"Mangrove\",\n", + " haz_type=\"TC\",\n", " color_rgb=np.array([0.2, 0.2, 0.7]),\n", " cost=500000000,\n", " mdd_impact=(1, 0),\n", @@ -762,11 +771,13 @@ } ], "source": [ - "mangrove_exp, mangrove_imp_fun_set, mangrove_haz = meas_mangrove.apply(exp_litpop, imp_fun_set, haz)\n", + "mangrove_exp, mangrove_imp_fun_set, mangrove_haz = meas_mangrove.apply(\n", + " exp_litpop, imp_fun_set, haz\n", + ")\n", "axes1 = imp_fun_set.plot()\n", - "axes1.set_title('TC: Emanuel (2011) impact function')\n", + "axes1.set_title(\"TC: Emanuel (2011) impact function\")\n", "axes2 = mangrove_imp_fun_set.plot()\n", - "axes2.set_title('TC: Modified impact function')" + "axes2.set_title(\"TC: Modified impact function\")" ] }, { @@ -792,8 +803,8 @@ ], "source": [ "meas_buildings = Measure(\n", - " name='Building code',\n", - " haz_type='TC',\n", + " name=\"Building code\",\n", + " haz_type=\"TC\",\n", " color_rgb=np.array([0.2, 0.7, 0.5]),\n", " cost=100000000,\n", " hazard_freq_cutoff=0.1,\n", @@ -802,7 +813,9 @@ "meas_set.append(meas_buildings)\n", "meas_set.check()\n", "\n", - "buildings_exp, buildings_imp_fun_set, buildings_haz = meas_buildings.apply(exp_litpop, imp_fun_set, haz)" + "buildings_exp, buildings_imp_fun_set, buildings_haz = meas_buildings.apply(\n", + " exp_litpop, imp_fun_set, haz\n", + ")" ] }, { @@ -861,7 +874,7 @@ } ], "source": [ - "haz.plot_rp_intensity(return_periods=(5, 20));\n", + "haz.plot_rp_intensity(return_periods=(5, 20))\n", "buildings_haz.plot_rp_intensity(return_periods=(5, 20));" ] }, @@ -906,8 +919,8 @@ "source": [ "from climada.entity import DiscRates\n", "\n", - "years=np.arange(1950, 2101)\n", - "rates=np.ones(years.size) * 0.02\n", + "years = np.arange(1950, 2101)\n", + "rates = np.ones(years.size) * 0.02\n", "disc = DiscRates(years=years, rates=rates)\n", "disc.check()\n", "disc.plot()" @@ -941,7 +954,7 @@ " exposures=exp_litpop,\n", " disc_rates=disc,\n", " impact_func_set=imp_fun_set,\n", - " measure_set=meas_set\n", + " measure_set=meas_set,\n", ")" ] }, @@ -1030,10 +1043,10 @@ } ], "source": [ - "freq_curve = imp.calc_freq_curve() # impact exceedance frequency curve\n", - "freq_curve.plot();\n", + "freq_curve = imp.calc_freq_curve() # impact exceedance frequency curve\n", + "freq_curve.plot()\n", "\n", - "print('Expected average annual impact: {:.3e} USD'.format(imp.aai_agg))" + "print(\"Expected average annual impact: {:.3e} USD\".format(imp.aai_agg))" ] }, { @@ -1071,7 +1084,7 @@ } ], "source": [ - "imp.plot_basemap_eai_exposure(buffer=0.1); # average annual impact at each exposure" + "imp.plot_basemap_eai_exposure(buffer=0.1); # average annual impact at each exposure" ] }, { @@ -1186,9 +1199,12 @@ "from climada.engine import CostBenefit\n", "\n", "cost_ben = CostBenefit()\n", - "cost_ben.calc(haz, ent, future_year=2040) # prints costs and benefits\n", - "cost_ben.plot_cost_benefit(); # plot cost benefit ratio and averted damage of every exposure\n", - "cost_ben.plot_event_view(return_per=(10, 20, 40)); # plot averted damage of each measure for every return period" + "cost_ben.calc(haz, ent, future_year=2040) # prints costs and benefits\n", + "cost_ben.plot_cost_benefit()\n", + "# plot cost benefit ratio and averted damage of every exposure\n", + "cost_ben.plot_event_view(\n", + " return_per=(10, 20, 40)\n", + "); # plot averted damage of each measure for every return period" ] }, { diff --git a/doc/tutorial/climada_engine_CostBenefit.ipynb b/doc/tutorial/climada_engine_CostBenefit.ipynb index 514bceb9e..de98c7926 100644 --- a/doc/tutorial/climada_engine_CostBenefit.ipynb +++ b/doc/tutorial/climada_engine_CostBenefit.ipynb @@ -257,15 +257,23 @@ "\n", "client = Client()\n", "future_year = 2080\n", - "haz_present = client.get_hazard('tropical_cyclone',\n", - " properties={'country_name': 'Haiti',\n", - " 'climate_scenario': 'historical',\n", - " 'nb_synth_tracks':'10'})\n", - "haz_future = client.get_hazard('tropical_cyclone',\n", - " properties={'country_name': 'Haiti',\n", - " 'climate_scenario': 'rcp60',\n", - " 'ref_year': str(future_year),\n", - " 'nb_synth_tracks':'10'})\n" + "haz_present = client.get_hazard(\n", + " \"tropical_cyclone\",\n", + " properties={\n", + " \"country_name\": \"Haiti\",\n", + " \"climate_scenario\": \"historical\",\n", + " \"nb_synth_tracks\": \"10\",\n", + " },\n", + ")\n", + "haz_future = client.get_hazard(\n", + " \"tropical_cyclone\",\n", + " properties={\n", + " \"country_name\": \"Haiti\",\n", + " \"climate_scenario\": \"rcp60\",\n", + " \"ref_year\": str(future_year),\n", + " \"nb_synth_tracks\": \"10\",\n", + " },\n", + ")" ] }, { @@ -366,7 +374,7 @@ } ], "source": [ - "exp_present = client.get_litpop(country='Haiti')" + "exp_present = client.get_litpop(country=\"Haiti\")" ] }, { @@ -388,8 +396,8 @@ "exp_future.ref_year = future_year\n", "n_years = exp_future.ref_year - exp_present.ref_year + 1\n", "growth_rate = 1.02\n", - "growth = growth_rate ** n_years\n", - "exp_future.gdf['value'] = exp_future.gdf['value'] * growth" + "growth = growth_rate**n_years\n", + "exp_future.gdf[\"value\"] = exp_future.gdf[\"value\"] * growth" ] }, { @@ -517,8 +525,8 @@ "source": [ "# This would be done automatically in Impact calculations\n", "# but it's better to do it explicitly before the calculation\n", - "exp_present.assign_centroids(haz_present, distance='approx')\n", - "exp_future.assign_centroids(haz_future, distance='approx')" + "exp_present.assign_centroids(haz_present, distance=\"approx\")\n", + "exp_future.assign_centroids(haz_future, distance=\"approx\")" ] }, { @@ -592,9 +600,9 @@ "# This is more out of politeness, since if there's only one impact function\n", "# and one `impf_` column, CLIMADA can figure it out\n", "exp_present.gdf.rename(columns={\"impf_\": \"impf_TC\"}, inplace=True)\n", - "exp_present.gdf['impf_TC'] = 1\n", + "exp_present.gdf[\"impf_TC\"] = 1\n", "exp_future.gdf.rename(columns={\"impf_\": \"impf_TC\"}, inplace=True)\n", - "exp_future.gdf['impf_TC'] = 1" + "exp_future.gdf[\"impf_TC\"] = 1" ] }, { @@ -619,20 +627,20 @@ "from climada.entity.measures import Measure, MeasureSet\n", "\n", "meas_1 = Measure(\n", - " haz_type='TC',\n", - " name='Measure A',\n", + " haz_type=\"TC\",\n", + " name=\"Measure A\",\n", " color_rgb=np.array([0.8, 0.1, 0.1]),\n", " cost=5000000000,\n", - " hazard_inten_imp=(1, -5), # Decrease wind speeds by 5 m/s\n", + " hazard_inten_imp=(1, -5), # Decrease wind speeds by 5 m/s\n", " risk_transf_cover=0,\n", ")\n", "\n", "meas_2 = Measure(\n", - " haz_type='TC',\n", - " name='Measure B',\n", + " haz_type=\"TC\",\n", + " name=\"Measure B\",\n", " color_rgb=np.array([0.1, 0.1, 0.8]),\n", " cost=220000000,\n", - " paa_impact=(1, -0.10), # 10% fewer assets affected\n", + " paa_impact=(1, -0.10), # 10% fewer assets affected\n", ")\n", "\n", "# gather all measures\n", @@ -684,10 +692,18 @@ "source": [ "from climada.entity import Entity\n", "\n", - "entity_present = Entity(exposures=exp_present, disc_rates=discount_zero,\n", - " impact_func_set=impf_set, measure_set=meas_set)\n", - "entity_future = Entity(exposures=exp_future, disc_rates=discount_zero,\n", - " impact_func_set=impf_set, measure_set=meas_set)" + "entity_present = Entity(\n", + " exposures=exp_present,\n", + " disc_rates=discount_zero,\n", + " impact_func_set=impf_set,\n", + " measure_set=meas_set,\n", + ")\n", + "entity_future = Entity(\n", + " exposures=exp_future,\n", + " disc_rates=discount_zero,\n", + " impact_func_set=impf_set,\n", + " measure_set=meas_set,\n", + ")" ] }, { @@ -737,8 +753,16 @@ "from climada.engine.cost_benefit import risk_aai_agg\n", "\n", "costben_measures_only = CostBenefit()\n", - "costben_measures_only.calc(haz_present, entity_present, haz_future=None, ent_future=None,\n", - " future_year=future_year, risk_func=risk_aai_agg, imp_time_depen=None, save_imp=True)" + "costben_measures_only.calc(\n", + " haz_present,\n", + " entity_present,\n", + " haz_future=None,\n", + " ent_future=None,\n", + " future_year=future_year,\n", + " risk_func=risk_aai_agg,\n", + " imp_time_depen=None,\n", + " save_imp=True,\n", + ")" ] }, { @@ -783,10 +807,12 @@ } ], "source": [ - "combined_costben = costben_measures_only.combine_measures(['Measure A', 'Measure B'],\n", - " 'Combined measures',\n", - " new_color=np.array([0.1, 0.8, 0.8]),\n", - " disc_rates=discount_zero)" + "combined_costben = costben_measures_only.combine_measures(\n", + " [\"Measure A\", \"Measure B\"],\n", + " \"Combined measures\",\n", + " new_color=np.array([0.1, 0.8, 0.8]),\n", + " disc_rates=discount_zero,\n", + ")" ] }, { @@ -877,8 +903,16 @@ ], "source": [ "costben = CostBenefit()\n", - "costben.calc(haz_present, entity_present, haz_future=haz_future, ent_future=entity_future,\n", - " future_year=future_year, risk_func=risk_aai_agg, imp_time_depen=1, save_imp=True)" + "costben.calc(\n", + " haz_present,\n", + " entity_present,\n", + " haz_future=haz_future,\n", + " ent_future=entity_future,\n", + " future_year=future_year,\n", + " risk_func=risk_aai_agg,\n", + " imp_time_depen=1,\n", + " save_imp=True,\n", + ")" ] }, { @@ -939,8 +973,10 @@ "source": [ "# define this as a function because we'll use it again later\n", "def waterfall():\n", - " return costben.plot_waterfall(haz_present, entity_present, haz_future, entity_future,\n", - " risk_func=risk_aai_agg)\n", + " return costben.plot_waterfall(\n", + " haz_present, entity_present, haz_future, entity_future, risk_func=risk_aai_agg\n", + " )\n", + "\n", "\n", "ax = waterfall()" ] @@ -992,8 +1028,15 @@ } ], "source": [ - "costben.plot_arrow_averted(axis = waterfall(), in_meas_names=['Measure A', 'Measure B'], accumulate=True, combine=False,\n", - " risk_func=risk_aai_agg, disc_rates=None, imp_time_depen=1)" + "costben.plot_arrow_averted(\n", + " axis=waterfall(),\n", + " in_meas_names=[\"Measure A\", \"Measure B\"],\n", + " accumulate=True,\n", + " combine=False,\n", + " risk_func=risk_aai_agg,\n", + " disc_rates=None,\n", + " imp_time_depen=1,\n", + ")" ] }, { @@ -1025,10 +1068,18 @@ }, "outputs": [], "source": [ - "entity_present_disc = Entity(exposures=exp_present, disc_rates=discount_stern,\n", - " impact_func_set=impf_set, measure_set=meas_set)\n", - "entity_future_disc = Entity(exposures=exp_future, disc_rates=discount_stern,\n", - " impact_func_set=impf_set, measure_set=meas_set)" + "entity_present_disc = Entity(\n", + " exposures=exp_present,\n", + " disc_rates=discount_stern,\n", + " impact_func_set=impf_set,\n", + " measure_set=meas_set,\n", + ")\n", + "entity_future_disc = Entity(\n", + " exposures=exp_future,\n", + " disc_rates=discount_stern,\n", + " impact_func_set=impf_set,\n", + " measure_set=meas_set,\n", + ")" ] }, { @@ -1083,9 +1134,17 @@ ], "source": [ "costben_disc = CostBenefit()\n", - "costben_disc.calc(haz_present, entity_present_disc, haz_future=haz_future, ent_future=entity_future_disc,\n", - " future_year=future_year, risk_func=risk_aai_agg, imp_time_depen=1, save_imp=True)\n", - "print(costben_disc.imp_meas_future['no measure']['impact'].imp_mat.shape)" + "costben_disc.calc(\n", + " haz_present,\n", + " entity_present_disc,\n", + " haz_future=haz_future,\n", + " ent_future=entity_future_disc,\n", + " future_year=future_year,\n", + " risk_func=risk_aai_agg,\n", + " imp_time_depen=1,\n", + " save_imp=True,\n", + ")\n", + "print(costben_disc.imp_meas_future[\"no measure\"][\"impact\"].imp_mat.shape)" ] }, { @@ -1194,18 +1253,22 @@ } ], "source": [ - "combined_costben_disc = costben_disc.combine_measures(['Measure A', 'Measure B'],\n", - " 'Combined measures',\n", - " new_color=np.array([0.1, 0.8, 0.8]),\n", - " disc_rates=discount_stern)\n", - "efc_present = costben_disc.imp_meas_present['no measure']['efc']\n", - "efc_future = costben_disc.imp_meas_future['no measure']['efc']\n", - "efc_combined_measures = combined_costben_disc.imp_meas_future['Combined measures']['efc']\n", + "combined_costben_disc = costben_disc.combine_measures(\n", + " [\"Measure A\", \"Measure B\"],\n", + " \"Combined measures\",\n", + " new_color=np.array([0.1, 0.8, 0.8]),\n", + " disc_rates=discount_stern,\n", + ")\n", + "efc_present = costben_disc.imp_meas_present[\"no measure\"][\"efc\"]\n", + "efc_future = costben_disc.imp_meas_future[\"no measure\"][\"efc\"]\n", + "efc_combined_measures = combined_costben_disc.imp_meas_future[\"Combined measures\"][\n", + " \"efc\"\n", + "]\n", "\n", "ax = plt.subplot(1, 1, 1)\n", - "efc_present.plot(axis=ax, color='blue', label='Present')\n", - "efc_future.plot(axis=ax, color='orange', label='Future, unadapted')\n", - "efc_combined_measures.plot(axis=ax, color='green', label='Future, adapted')\n", + "efc_present.plot(axis=ax, color=\"blue\", label=\"Present\")\n", + "efc_future.plot(axis=ax, color=\"orange\", label=\"Future, unadapted\")\n", + "efc_combined_measures.plot(axis=ax, color=\"green\", label=\"Future, adapted\")\n", "leg = ax.legend()" ] }, diff --git a/doc/tutorial/climada_engine_Forecast.ipynb b/doc/tutorial/climada_engine_Forecast.ipynb index 74cbd00f8..29c9a5930 100644 --- a/doc/tutorial/climada_engine_Forecast.ipynb +++ b/doc/tutorial/climada_engine_Forecast.ipynb @@ -42,12 +42,12 @@ "metadata": {}, "outputs": [], "source": [ - "#generate hazard\n", + "# generate hazard\n", "hazard, haz_model, run_datetime, event_date = generate_WS_forecast_hazard()\n", "# generate hazard with forecasts from past dates (works only if the files have already been downloaded)\n", "# hazard, haz_model, run_datetime, event_date = generate_WS_forecast_hazard(\n", "# run_datetime=datetime(2022,5,17),\n", - "# event_date=datetime(2022,5,19)) " + "# event_date=datetime(2022,5,19))" ] }, { @@ -56,7 +56,7 @@ "metadata": {}, "outputs": [], "source": [ - "#generate vulnerability\n", + "# generate vulnerability\n", "impact_function = ImpfStormEurope.from_welker()\n", "impact_function_set = ImpactFuncSet([impact_function])" ] @@ -67,12 +67,12 @@ "metadata": {}, "outputs": [], "source": [ - "#generate exposure and save to file\n", - "filename_exp = CONFIG.local_data.save_dir.dir() / ('exp_litpop_Switzerland.hdf5')\n", + "# generate exposure and save to file\n", + "filename_exp = CONFIG.local_data.save_dir.dir() / (\"exp_litpop_Switzerland.hdf5\")\n", "if filename_exp.exists():\n", " exposure = LitPop.from_hdf5(filename_exp)\n", "else:\n", - " exposure = LitPop.from_countries('Switzerland', reference_year=2020)\n", + " exposure = LitPop.from_countries(\"Switzerland\", reference_year=2020)\n", " exposure.write_hdf5(filename_exp)" ] }, @@ -82,7 +82,7 @@ "metadata": {}, "outputs": [], "source": [ - "#create and calculate Forecast\n", + "# create and calculate Forecast\n", "CH_WS_forecast = Forecast({run_datetime: hazard}, exposure, impact_function_set)\n", "CH_WS_forecast.calc()" ] @@ -106,7 +106,7 @@ } ], "source": [ - "CH_WS_forecast.plot_imp_map(save_fig=False,close_fig=False,proj=ccrs.epsg(2056));" + "CH_WS_forecast.plot_imp_map(save_fig=False, close_fig=False, proj=ccrs.epsg(2056));" ] }, { @@ -135,7 +135,7 @@ } ], "source": [ - "CH_WS_forecast.plot_hist(save_fig=False,close_fig=False);" + "CH_WS_forecast.plot_hist(save_fig=False, close_fig=False);" ] }, { @@ -164,7 +164,9 @@ } ], "source": [ - "CH_WS_forecast.plot_exceedence_prob(threshold=5000, save_fig=False, close_fig=False,proj=ccrs.epsg(2056));" + "CH_WS_forecast.plot_exceedence_prob(\n", + " threshold=5000, save_fig=False, close_fig=False, proj=ccrs.epsg(2056)\n", + ");" ] }, { @@ -198,31 +200,30 @@ "from climada.util.config import CONFIG\n", "\n", "\n", - "#create a file containing the polygons of Swiss cantons using natural earth\n", - "cantons_file = CONFIG.local_data.save_dir.dir() / 'cantons.shp'\n", - "adm1_shape_file = shapereader.natural_earth(resolution='10m',\n", - " category='cultural',\n", - " name='admin_1_states_provinces')\n", + "# create a file containing the polygons of Swiss cantons using natural earth\n", + "cantons_file = CONFIG.local_data.save_dir.dir() / \"cantons.shp\"\n", + "adm1_shape_file = shapereader.natural_earth(\n", + " resolution=\"10m\", category=\"cultural\", name=\"admin_1_states_provinces\"\n", + ")\n", "if not cantons_file.exists():\n", - " with fiona.open(adm1_shape_file, 'r') as source:\n", - " with fiona.open(\n", - " cantons_file, 'w',\n", - " **source.meta) as sink:\n", + " with fiona.open(adm1_shape_file, \"r\") as source:\n", + " with fiona.open(cantons_file, \"w\", **source.meta) as sink:\n", "\n", " for f in source:\n", - " if f['properties']['adm0_a3'] == 'CHE':\n", + " if f[\"properties\"][\"adm0_a3\"] == \"CHE\":\n", " sink.write(f)\n", - "CH_WS_forecast.plot_warn_map(str(cantons_file),\n", - " decision_level = 'polygon',\n", - " thresholds=[100000,500000,\n", - " 1000000,5000000],\n", - " probability_aggregation='mean',\n", - " area_aggregation='sum',\n", - " title=\"Building damage warning\",\n", - " explain_text=\"warn level based on aggregated damages\",\n", - " save_fig=False,\n", - " close_fig=False,\n", - " proj=ccrs.epsg(2056));" + "CH_WS_forecast.plot_warn_map(\n", + " str(cantons_file),\n", + " decision_level=\"polygon\",\n", + " thresholds=[100000, 500000, 1000000, 5000000],\n", + " probability_aggregation=\"mean\",\n", + " area_aggregation=\"sum\",\n", + " title=\"Building damage warning\",\n", + " explain_text=\"warn level based on aggregated damages\",\n", + " save_fig=False,\n", + " close_fig=False,\n", + " proj=ccrs.epsg(2056),\n", + ");" ] }, { @@ -255,43 +256,43 @@ "\n", "### generate exposure\n", "# find out which hazard coord to consider\n", - "CHE_borders = u_plot._get_borders(np.stack([exposure.gdf['latitude'].values,\n", - " exposure.gdf['longitude'].values],\n", - " axis=1)\n", - " )\n", - "centroid_selection = np.logical_and(np.logical_and(hazard.centroids.lat >= CHE_borders[2],\n", - " hazard.centroids.lat <= CHE_borders[3]),\n", - " np.logical_and(hazard.centroids.lon >= CHE_borders[0],\n", - " hazard.centroids.lon <= CHE_borders[1])\n", - " )\n", + "CHE_borders = u_plot._get_borders(\n", + " np.stack(\n", + " [exposure.gdf[\"latitude\"].values, exposure.gdf[\"longitude\"].values], axis=1\n", + " )\n", + ")\n", + "centroid_selection = np.logical_and(\n", + " np.logical_and(\n", + " hazard.centroids.lat >= CHE_borders[2], hazard.centroids.lat <= CHE_borders[3]\n", + " ),\n", + " np.logical_and(\n", + " hazard.centroids.lon >= CHE_borders[0], hazard.centroids.lon <= CHE_borders[1]\n", + " ),\n", + ")\n", "# Fill DataFrame with values for a \"neutral\" exposure (value = 1)\n", "\n", "exp_df = DataFrame()\n", - "exp_df['value'] = np.ones_like(hazard.centroids.lat[centroid_selection]) # provide value\n", - "exp_df['latitude'] = hazard.centroids.lat[centroid_selection]\n", - "exp_df['longitude'] = hazard.centroids.lon[centroid_selection]\n", - "exp_df['impf_WS'] = np.ones_like(hazard.centroids.lat[centroid_selection], int)\n", + "exp_df[\"value\"] = np.ones_like(\n", + " hazard.centroids.lat[centroid_selection]\n", + ") # provide value\n", + "exp_df[\"latitude\"] = hazard.centroids.lat[centroid_selection]\n", + "exp_df[\"longitude\"] = hazard.centroids.lon[centroid_selection]\n", + "exp_df[\"impf_WS\"] = np.ones_like(hazard.centroids.lat[centroid_selection], int)\n", "# Generate Exposures\n", "exp = Exposures(exp_df)\n", "exp.check()\n", - "exp.value_unit = 'warn_level'\n", + "exp.value_unit = \"warn_level\"\n", "\n", "### generate impact functions\n", "## impact functions for hazard based warnings\n", - "haz_type = 'WS'\n", + "haz_type = \"WS\"\n", "idx = 1\n", - "name = 'warn_level_low_elevation'\n", - "intensity_unit = 'm/s'\n", - "intensity = np.array([0.0, 19.439, \n", - " 19.44, 24.999, \n", - " 25.0, 30.549, \n", - " 30.55, 38.879, \n", - " 38.88, 100.0])\n", - "mdd = np.array([1.0, 1.0, \n", - " 2.0, 2.0, \n", - " 3.0, 3.0, \n", - " 4.0, 4.0, \n", - " 5.0, 5.0])\n", + "name = \"warn_level_low_elevation\"\n", + "intensity_unit = \"m/s\"\n", + "intensity = np.array(\n", + " [0.0, 19.439, 19.44, 24.999, 25.0, 30.549, 30.55, 38.879, 38.88, 100.0]\n", + ")\n", + "mdd = np.array([1.0, 1.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0, 5.0, 5.0])\n", "paa = np.ones_like(mdd)\n", "imp_fun_low = ImpactFunc(haz_type, idx, intensity, mdd, paa, intensity_unit, name)\n", "imp_fun_low.check()\n", @@ -305,7 +306,7 @@ "metadata": {}, "outputs": [], "source": [ - "#create and calculate Forecast\n", + "# create and calculate Forecast\n", "warn_forecast = Forecast({run_datetime: hazard}, exp, impf_set)\n", "warn_forecast.calc()" ] @@ -336,16 +337,18 @@ } ], "source": [ - "warn_forecast.plot_warn_map(cantons_file,\n", - " thresholds=[2,3,4,5],\n", - " decision_level = 'exposure_point',\n", - " probability_aggregation=0.5,\n", - " area_aggregation=0.5,\n", - " title=\"DWD ICON METEOROLOGICAL WARNING\",\n", - " explain_text=\"warn level based on wind gust thresholds\",\n", - " save_fig=False,\n", - " close_fig=False,\n", - " proj=ccrs.epsg(2056));" + "warn_forecast.plot_warn_map(\n", + " cantons_file,\n", + " thresholds=[2, 3, 4, 5],\n", + " decision_level=\"exposure_point\",\n", + " probability_aggregation=0.5,\n", + " area_aggregation=0.5,\n", + " title=\"DWD ICON METEOROLOGICAL WARNING\",\n", + " explain_text=\"warn level based on wind gust thresholds\",\n", + " save_fig=False,\n", + " close_fig=False,\n", + " proj=ccrs.epsg(2056),\n", + ");" ] }, { @@ -390,4 +393,4 @@ }, "nbformat": 4, "nbformat_minor": 4 -} \ No newline at end of file +} diff --git a/doc/tutorial/climada_engine_Impact.ipynb b/doc/tutorial/climada_engine_Impact.ipynb index bbe55afd6..b6ea21cd8 100644 --- a/doc/tutorial/climada_engine_Impact.ipynb +++ b/doc/tutorial/climada_engine_Impact.ipynb @@ -329,7 +329,9 @@ "from climada.entity import LitPop\n", "\n", "# Cuba with resolution 10km and financial_mode = income group.\n", - "exp_lp = LitPop.from_countries(countries=['CUB'], res_arcsec=300, fin_mode='income_group')\n", + "exp_lp = LitPop.from_countries(\n", + " countries=[\"CUB\"], res_arcsec=300, fin_mode=\"income_group\"\n", + ")\n", "exp_lp.check()" ] }, @@ -492,7 +494,7 @@ "# not needed for impact calculations\n", "# visualize the define exposure\n", "exp_lp.plot_raster()\n", - "print('\\n Raster properties exposures:', exp_lp.meta)" + "print(\"\\n Raster properties exposures:\", exp_lp.meta)" ] }, { @@ -540,13 +542,17 @@ "from climada.hazard import TCTracks, TropCyclone, Centroids\n", "\n", "# Load histrocial tropical cyclone tracks from ibtracs over the North Atlantic basin between 2010-2012\n", - "ibtracks_na = TCTracks.from_ibtracs_netcdf(provider='usa', basin='NA', year_range=(2010, 2012), correct_pres=True)\n", - "print('num tracks hist:', ibtracks_na.size)\n", + "ibtracks_na = TCTracks.from_ibtracs_netcdf(\n", + " provider=\"usa\", basin=\"NA\", year_range=(2010, 2012), correct_pres=True\n", + ")\n", + "print(\"num tracks hist:\", ibtracks_na.size)\n", "\n", - "ibtracks_na.equal_timestep(0.5) # Interpolation to make the track smooth and to allow applying calc_perturbed_trajectories\n", + "ibtracks_na.equal_timestep(\n", + " 0.5\n", + ") # Interpolation to make the track smooth and to allow applying calc_perturbed_trajectories\n", "# Add randomly generated tracks using the calc_perturbed_trajectories method (1 per historical track)\n", "ibtracks_na.calc_perturbed_trajectories(nb_synth_tracks=1)\n", - "print('num tracks hist+syn:', ibtracks_na.size)" + "print(\"num tracks hist+syn:\", ibtracks_na.size)" ] }, { @@ -620,8 +626,8 @@ "outputs": [], "source": [ "# Define the centroids from the exposures position\n", - "lat = exp_lp.gdf['latitude'].values\n", - "lon = exp_lp.gdf['longitude'].values\n", + "lat = exp_lp.gdf[\"latitude\"].values\n", + "lon = exp_lp.gdf[\"longitude\"].values\n", "centrs = Centroids.from_lat_lon(lat, lon)\n", "centrs.check()" ] @@ -702,6 +708,7 @@ "outputs": [], "source": [ "from climada.entity import ImpactFuncSet, ImpfTropCyclone\n", + "\n", "# impact function TC\n", "impf_tc = ImpfTropCyclone.from_emanuel_usa()\n", "\n", @@ -865,7 +872,7 @@ "source": [ "# Exposures: rename column and assign id\n", "exp_lp.gdf.rename(columns={\"impf_\": \"impf_\" + haz_type}, inplace=True)\n", - "exp_lp.gdf['impf_' + haz_type] = haz_id\n", + "exp_lp.gdf[\"impf_\" + haz_type] = haz_id\n", "exp_lp.check()\n", "exp_lp.gdf.head()" ] @@ -910,7 +917,10 @@ "source": [ "# Compute impact\n", "from climada.engine import ImpactCalc\n", - "imp = ImpactCalc(exp_lp, impf_set, tc).impact(save_mat=False) # Do not save the results geographically resolved (only aggregate values)" + "\n", + "imp = ImpactCalc(exp_lp, impf_set, tc).impact(\n", + " save_mat=False\n", + ") # Do not save the results geographically resolved (only aggregate values)" ] }, { @@ -1215,25 +1225,27 @@ "from datetime import datetime, date\n", "import pandas as pd\n", "\n", - "#set a harvest date\n", - "harvest_DOY=290 #17 October\n", + "# set a harvest date\n", + "harvest_DOY = 290 # 17 October\n", "\n", - "#loop over all events an check if they happened before or after harvest\n", - "event_ids_post_harvest=[]\n", - "event_ids_pre_harvest=[]\n", + "# loop over all events an check if they happened before or after harvest\n", + "event_ids_post_harvest = []\n", + "event_ids_pre_harvest = []\n", "for event_id in tc.event_id:\n", - " event_date = tc.date[np.where(tc.event_id==event_id)[0][0]]\n", - " day_of_year = event_date - date(datetime.fromordinal(event_date).year, 1, 1).toordinal() + 1\n", + " event_date = tc.date[np.where(tc.event_id == event_id)[0][0]]\n", + " day_of_year = (\n", + " event_date - date(datetime.fromordinal(event_date).year, 1, 1).toordinal() + 1\n", + " )\n", "\n", - " if day_of_year > harvest_DOY:\n", - " event_ids_post_harvest.append(event_id)\n", - " else:\n", - " event_ids_pre_harvest.append(event_id)\n", + " if day_of_year > harvest_DOY:\n", + " event_ids_post_harvest.append(event_id)\n", + " else:\n", + " event_ids_pre_harvest.append(event_id)\n", "\n", - "tc_post_harvest=tc.select(event_id=event_ids_post_harvest)\n", - "tc_pre_harvest=tc.select(event_id=event_ids_pre_harvest)\n", - "#print('pre-harvest:', tc_pre_harvest.event_name)\n", - "#print('post-harvest:', tc_post_harvest.event_name)" + "tc_post_harvest = tc.select(event_id=event_ids_post_harvest)\n", + "tc_pre_harvest = tc.select(event_id=event_ids_pre_harvest)\n", + "# print('pre-harvest:', tc_pre_harvest.event_name)\n", + "# print('post-harvest:', tc_post_harvest.event_name)" ] }, { @@ -1285,18 +1297,19 @@ ], "source": [ "from climada.engine import Impact\n", + "\n", "# impact function TC\n", "impf_tc = ImpfTropCyclone.from_emanuel_usa()\n", "# impact function TC after harvest is by factor 0.5 smaller\n", "impf_tc_posth = ImpfTropCyclone.from_emanuel_usa()\n", - "impf_tc_posth.mdd = impf_tc.mdd*0.1\n", + "impf_tc_posth.mdd = impf_tc.mdd * 0.1\n", "# add the impact function to an Impact function set\n", "impf_set = ImpactFuncSet([impf_tc])\n", "impf_set_posth = ImpactFuncSet([impf_tc_posth])\n", "impf_set.check()\n", "impf_set_posth.check()\n", "\n", - "#plot\n", + "# plot\n", "impf_set.plot()\n", "impf_set_posth.plot()\n", "\n", @@ -1360,16 +1373,17 @@ ], "source": [ "# Concatenate impacts again\n", - "imp_tot = Impact.concat([imp_preh,imp_posth])\n", + "imp_tot = Impact.concat([imp_preh, imp_posth])\n", "\n", - "#plot result\n", + "# plot result\n", "import matplotlib.pyplot as plt\n", - "ax=imp_preh.plot_hexbin_eai_exposure(gridsize=100,adapt_fontsize=False)\n", - "ax.set_title('Expected annual impact: Pre-Harvest')\n", - "ax=imp_posth.plot_hexbin_eai_exposure(gridsize=100,adapt_fontsize=False)\n", - "ax.set_title('Expected annual impact: Post-Harvest')\n", - "ax=imp_tot.plot_hexbin_eai_exposure(gridsize=100,adapt_fontsize=False)\n", - "ax.set_title('Expected annual impact: Total')\n" + "\n", + "ax = imp_preh.plot_hexbin_eai_exposure(gridsize=100, adapt_fontsize=False)\n", + "ax.set_title(\"Expected annual impact: Pre-Harvest\")\n", + "ax = imp_posth.plot_hexbin_eai_exposure(gridsize=100, adapt_fontsize=False)\n", + "ax.set_title(\"Expected annual impact: Post-Harvest\")\n", + "ax = imp_tot.plot_hexbin_eai_exposure(gridsize=100, adapt_fontsize=False)\n", + "ax.set_title(\"Expected annual impact: Total\")" ] }, { @@ -1459,22 +1473,34 @@ "from climada.engine import ImpactCalc\n", "\n", "# Set Exposures in points\n", - "exp_pnt = Exposures(crs='epsg:4326') #set coordinate system\n", - "exp_pnt.gdf['latitude'] = np.array([21.899326, 21.960728, 22.220574, 22.298390, 21.787977, 21.787977, 21.981732])\n", - "exp_pnt.gdf['longitude'] = np.array([88.307422, 88.565362, 88.378337, 87.806356, 88.348835, 88.348835, 89.246521])\n", - "exp_pnt.gdf['value'] = np.array([1.0e5, 1.2e5, 1.1e5, 1.1e5, 2.0e5, 2.5e5, 0.5e5])\n", + "exp_pnt = Exposures(crs=\"epsg:4326\") # set coordinate system\n", + "exp_pnt.gdf[\"latitude\"] = np.array(\n", + " [21.899326, 21.960728, 22.220574, 22.298390, 21.787977, 21.787977, 21.981732]\n", + ")\n", + "exp_pnt.gdf[\"longitude\"] = np.array(\n", + " [88.307422, 88.565362, 88.378337, 87.806356, 88.348835, 88.348835, 89.246521]\n", + ")\n", + "exp_pnt.gdf[\"value\"] = np.array([1.0e5, 1.2e5, 1.1e5, 1.1e5, 2.0e5, 2.5e5, 0.5e5])\n", "exp_pnt.check()\n", "exp_pnt.plot_scatter(buffer=0.05)\n", "\n", "# Set Hazard in Exposures points\n", "# set centroids from exposures coordinates\n", - "centr_pnt = Centroids.from_lat_lon(exp_pnt.gdf['latitude'].values, exp_pnt.gdf['longitude'].values, exp_pnt.crs)\n", + "centr_pnt = Centroids.from_lat_lon(\n", + " exp_pnt.gdf[\"latitude\"].values, exp_pnt.gdf[\"longitude\"].values, exp_pnt.crs\n", + ")\n", "# compute Hazard in that centroids\n", - "tr_pnt = TCTracks.from_ibtracs_netcdf(storm_id='2007314N10093')\n", + "tr_pnt = TCTracks.from_ibtracs_netcdf(storm_id=\"2007314N10093\")\n", "tc_pnt = TropCyclone.from_tracks(tr_pnt, centroids=centr_pnt)\n", "tc_pnt.check()\n", - "ax_pnt = tc_pnt.centroids.plot(c=np.array(tc_pnt.intensity[0,:].todense()).squeeze()) # plot intensity per point\n", - "ax_pnt.get_figure().colorbar(ax_pnt.collections[0], fraction=0.0175, pad=0.02).set_label('Intensity (m/s)') # add colorbar\n", + "ax_pnt = tc_pnt.centroids.plot(\n", + " c=np.array(tc_pnt.intensity[0, :].todense()).squeeze()\n", + ") # plot intensity per point\n", + "ax_pnt.get_figure().colorbar(\n", + " ax_pnt.collections[0], fraction=0.0175, pad=0.02\n", + ").set_label(\n", + " \"Intensity (m/s)\"\n", + ") # add colorbar\n", "\n", "# Set impact function\n", "impf_tc = ImpfTropCyclone.from_emanuel_usa()\n", @@ -1486,13 +1512,16 @@ "[haz_id] = impf_set.get_ids()[haz_type]\n", "# Exposures: rename column and assign id\n", "exp_lp.gdf.rename(columns={\"impf_\": \"impf_\" + haz_type}, inplace=True)\n", - "exp_lp.gdf['impf_' + haz_type] = haz_id\n", + "exp_lp.gdf[\"impf_\" + haz_type] = haz_id\n", "exp_lp.gdf.head()\n", "\n", "# Compute Impact\n", "imp_pnt = ImpactCalc(exp_pnt, impf_pnt, tc_pnt).impact()\n", "# nearest neighbor of exposures to centroids gives identity\n", - "print('Nearest neighbor hazard.centroids indexes for each exposure:', exp_pnt.gdf['centr_TC'].values)\n", + "print(\n", + " \"Nearest neighbor hazard.centroids indexes for each exposure:\",\n", + " exp_pnt.gdf[\"centr_TC\"].values,\n", + ")\n", "imp_pnt.plot_scatter_eai_exposure(ignore_zero=False, buffer=0.05);" ] }, @@ -1680,24 +1709,32 @@ "from climada.util.constants import HAZ_DEMO_FL\n", "\n", "# Exposures belonging to a raster (the raser information is contained in the meta attribute)\n", - "exp_ras = LitPop.from_countries(countries=['VEN'], res_arcsec=300, fin_mode='income_group')\n", + "exp_ras = LitPop.from_countries(\n", + " countries=[\"VEN\"], res_arcsec=300, fin_mode=\"income_group\"\n", + ")\n", "exp_ras.gdf.reset_index()\n", "exp_ras.check()\n", "exp_ras.plot_raster()\n", - "print('\\n Raster properties exposures:', exp_ras.meta)\n", + "print(\"\\n Raster properties exposures:\", exp_ras.meta)\n", "\n", "# Initialize hazard object with haz_type = 'FL' (for Flood)\n", - "hazard_type='FL'\n", + "hazard_type = \"FL\"\n", "# Load a previously generated (either with CLIMADA or other means) hazard\n", "# from file (HAZ_DEMO_FL) and resample the hazard raster to the exposures' ones\n", "# Hint: check how other resampling methods affect to final impact\n", - "haz_ras = Hazard.from_raster([HAZ_DEMO_FL], haz_type=hazard_type, dst_crs=exp_ras.meta['crs'], transform=exp_ras.meta['transform'],\n", - " width=exp_ras.meta['width'], height=exp_ras.meta['height'],\n", - " resampling=Resampling.nearest)\n", - "haz_ras.intensity[haz_ras.intensity==-9999] = 0 # correct no data values\n", + "haz_ras = Hazard.from_raster(\n", + " [HAZ_DEMO_FL],\n", + " haz_type=hazard_type,\n", + " dst_crs=exp_ras.meta[\"crs\"],\n", + " transform=exp_ras.meta[\"transform\"],\n", + " width=exp_ras.meta[\"width\"],\n", + " height=exp_ras.meta[\"height\"],\n", + " resampling=Resampling.nearest,\n", + ")\n", + "haz_ras.intensity[haz_ras.intensity == -9999] = 0 # correct no data values\n", "haz_ras.check()\n", "haz_ras.plot_intensity(1)\n", - "print('Raster properties centroids:', haz_ras.centroids.meta)\n", + "print(\"Raster properties centroids:\", haz_ras.centroids.meta)\n", "\n", "# Set dummy impact function\n", "intensity = np.linspace(0, 10, 100)\n", @@ -1710,13 +1747,16 @@ "\n", "# Exposures: rename column and assign id\n", "exp_lp.gdf.rename(columns={\"impf_\": \"impf_\" + hazard_type}, inplace=True)\n", - "exp_lp.gdf['impf_' + haz_type] = haz_id\n", + "exp_lp.gdf[\"impf_\" + haz_type] = haz_id\n", "exp_lp.gdf.head()\n", "\n", "# Compute impact\n", "imp_ras = ImpactCalc(exp_ras, impf_ras, haz_ras).impact(save_mat=False)\n", "# nearest neighbor of exposures to centroids is not identity because litpop does not contain data outside the country polygon\n", - "print('\\n Nearest neighbor hazard.centroids indexes for each exposure:', exp_ras.gdf['centr_FL'].values)\n", + "print(\n", + " \"\\n Nearest neighbor hazard.centroids indexes for each exposure:\",\n", + " exp_ras.gdf[\"centr_FL\"].values,\n", + ")\n", "imp_ras.plot_raster_eai_exposure();" ] }, @@ -1957,7 +1997,7 @@ "from climada_petals.entity import BlackMarble\n", "\n", "exp_video = BlackMarble()\n", - "exp_video.set_countries(['Cuba'], 2016, res_km=2.5)\n", + "exp_video.set_countries([\"Cuba\"], 2016, res_km=2.5)\n", "exp_video.check()\n", "\n", "# impact function\n", @@ -1967,19 +2007,23 @@ "\n", "# compute sequence of hazards using TropCyclone video_intensity method\n", "exp_sea = add_sea(exp_video, (100, 5))\n", - "centr_video = Centroids.from_lat_lon(exp_sea.gdf['latitude'].values, exp_sea.gdf['longitude'].values)\n", + "centr_video = Centroids.from_lat_lon(\n", + " exp_sea.gdf[\"latitude\"].values, exp_sea.gdf[\"longitude\"].values\n", + ")\n", "centr_video.check()\n", "\n", - "track_name = '2017242N16333'\n", - "tr_irma = TCTracks.from_ibtracs_netcdf(provider='usa', storm_id=track_name) # IRMA 2017\n", + "track_name = \"2017242N16333\"\n", + "tr_irma = TCTracks.from_ibtracs_netcdf(provider=\"usa\", storm_id=track_name) # IRMA 2017\n", "\n", "tc_video = TropCyclone()\n", - "tc_list, _ = tc_video.video_intensity(track_name, tr_irma, centr_video) # empty file name to not to write the video\n", + "tc_list, _ = tc_video.video_intensity(\n", + " track_name, tr_irma, centr_video\n", + ") # empty file name to not to write the video\n", "\n", "# generate video of impacts\n", - "file_name='./results/irma_imp_fl.gif'\n", + "file_name = \"./results/irma_imp_fl.gif\"\n", "imp_video = Impact()\n", - "imp_list = imp_video.video_direct_impact(exp_video, impfs_video, tc_list, file_name)\n" + "imp_list = imp_video.video_direct_impact(exp_video, impfs_video, tc_list, file_name)" ] } ], diff --git a/doc/tutorial/climada_engine_impact_data.ipynb b/doc/tutorial/climada_engine_impact_data.ipynb index 443a6f414..40ead3d80 100644 --- a/doc/tutorial/climada_engine_impact_data.ipynb +++ b/doc/tutorial/climada_engine_impact_data.ipynb @@ -46,11 +46,15 @@ "from matplotlib import pyplot as plt\n", "\n", "from climada.util.constants import DEMO_DIR\n", - "from climada.engine.impact_data import emdat_countries_by_hazard, \\\n", - " emdat_impact_yearlysum, emdat_to_impact, clean_emdat_df\n", + "from climada.engine.impact_data import (\n", + " emdat_countries_by_hazard,\n", + " emdat_impact_yearlysum,\n", + " emdat_to_impact,\n", + " clean_emdat_df,\n", + ")\n", "\n", "# set path to CSV file downloaded from https://public.emdat.be :\n", - "emdat_file_path = DEMO_DIR.joinpath('demo_emdat_impact_data_2020.csv')" + "emdat_file_path = DEMO_DIR.joinpath(\"demo_emdat_impact_data_2020.csv\")" ] }, { @@ -129,8 +133,12 @@ "source": [ "\"\"\"Create DataFrame df with EM-DAT entries of tropical cyclones in Thailand and Viet Nam in the years 2005 and 2006\"\"\"\n", "\n", - "df = clean_emdat_df(emdat_file_path, countries=['THA', 'Viet Nam'], hazard=['TC'], \\\n", - " year_range=[2005, 2006])\n", + "df = clean_emdat_df(\n", + " emdat_file_path,\n", + " countries=[\"THA\", \"Viet Nam\"],\n", + " hazard=[\"TC\"],\n", + " year_range=[2005, 2006],\n", + ")\n", "print(df)" ] }, @@ -160,7 +168,9 @@ "source": [ "\"\"\"emdat_countries_by_hazard: get lists of countries impacted by tropical cyclones from 2010 to 2019\"\"\"\n", "\n", - "iso3_codes, country_names = emdat_countries_by_hazard(emdat_file_path, hazard='TC', year_range=(2010, 2019))\n", + "iso3_codes, country_names = emdat_countries_by_hazard(\n", + " emdat_file_path, hazard=\"TC\", year_range=(2010, 2019)\n", + ")\n", "\n", "print(country_names)\n", "\n", @@ -214,11 +224,18 @@ "source": [ "\"\"\"Global TC damages 2000 to 2009\"\"\"\n", "\n", - "impact_emdat, countries = emdat_to_impact(emdat_file_path, 'TC', year_range=(2000,2009))\n", - "\n", - "print('Number of TC events in EM-DAT 2000 to 2009 globally: %i' %(impact_emdat.event_id.size))\n", - "print('Global annual average monetary damage (AAI) from TCs as reported in EM-DAT 2000 to 2009: USD billion %2.2f' \\\n", - " %(impact_emdat.aai_agg/1e9))\n" + "impact_emdat, countries = emdat_to_impact(\n", + " emdat_file_path, \"TC\", year_range=(2000, 2009)\n", + ")\n", + "\n", + "print(\n", + " \"Number of TC events in EM-DAT 2000 to 2009 globally: %i\"\n", + " % (impact_emdat.event_id.size)\n", + ")\n", + "print(\n", + " \"Global annual average monetary damage (AAI) from TCs as reported in EM-DAT 2000 to 2009: USD billion %2.2f\"\n", + " % (impact_emdat.aai_agg / 1e9)\n", + ")" ] }, { @@ -267,26 +284,34 @@ "\"\"\"Total people affected by TCs in the Philippines in 2013:\"\"\"\n", "\n", "# People affected\n", - "impact_emdat_PHL, countries = emdat_to_impact(emdat_file_path, 'TC', countries='PHL', \\\n", - " year_range=(2013,2013), imp_str=\"Total Affected\")\n", - "\n", - "print('Number of TC events in EM-DAT in the Philipppines, 2013: %i' \\\n", - " %(impact_emdat_PHL.event_id.size))\n", - "print('\\nPeople affected by TC events in the Philippines in 2013 (per event):')\n", + "impact_emdat_PHL, countries = emdat_to_impact(\n", + " emdat_file_path,\n", + " \"TC\",\n", + " countries=\"PHL\",\n", + " year_range=(2013, 2013),\n", + " imp_str=\"Total Affected\",\n", + ")\n", + "\n", + "print(\n", + " \"Number of TC events in EM-DAT in the Philipppines, 2013: %i\"\n", + " % (impact_emdat_PHL.event_id.size)\n", + ")\n", + "print(\"\\nPeople affected by TC events in the Philippines in 2013 (per event):\")\n", "print(impact_emdat_PHL.at_event)\n", - "print('\\nPeople affected by TC events in the Philippines in 2013 (total):')\n", + "print(\"\\nPeople affected by TC events in the Philippines in 2013 (total):\")\n", "print(int(impact_emdat_PHL.aai_agg))\n", "\n", "# Comparison to monetary damages:\n", - "impact_emdat_PHL_USD, _ = emdat_to_impact(emdat_file_path, 'TC', countries='PHL', \\\n", - " year_range=(2013,2013))\n", + "impact_emdat_PHL_USD, _ = emdat_to_impact(\n", + " emdat_file_path, \"TC\", countries=\"PHL\", year_range=(2013, 2013)\n", + ")\n", "\n", "ax = plt.scatter(impact_emdat_PHL_USD.at_event, impact_emdat_PHL.at_event)\n", - "plt.title('Typhoon impacts in the Philippines, 2013')\n", - "plt.xlabel('Total Damage [USD]')\n", - "plt.ylabel('People Affected');\n", - "#plt.xscale('log')\n", - "#plt.yscale('log')" + "plt.title(\"Typhoon impacts in the Philippines, 2013\")\n", + "plt.xlabel(\"Total Damage [USD]\")\n", + "plt.ylabel(\"People Affected\");\n", + "# plt.xscale('log')\n", + "# plt.yscale('log')" ] }, { @@ -352,23 +377,40 @@ "source": [ "\"\"\"Yearly TC damages in the USA, normalized and current\"\"\"\n", "\n", - "yearly_damage_normalized_to_2019 = emdat_impact_yearlysum(emdat_file_path, countries='USA', \\\n", - " hazard='Tropical cyclone', year_range=None, \\\n", - " reference_year=2019)\n", + "yearly_damage_normalized_to_2019 = emdat_impact_yearlysum(\n", + " emdat_file_path,\n", + " countries=\"USA\",\n", + " hazard=\"Tropical cyclone\",\n", + " year_range=None,\n", + " reference_year=2019,\n", + ")\n", "\n", - "yearly_damage_current = emdat_impact_yearlysum(emdat_file_path, countries=['USA'], hazard='TC',)\n", + "yearly_damage_current = emdat_impact_yearlysum(\n", + " emdat_file_path,\n", + " countries=[\"USA\"],\n", + " hazard=\"TC\",\n", + ")\n", "\n", "import matplotlib.pyplot as plt\n", "\n", "fig, axis = plt.subplots(1, 1)\n", - "axis.plot(yearly_damage_current.year, yearly_damage_current.impact, 'b', label='USD current value')\n", - "axis.plot(yearly_damage_normalized_to_2019.year, yearly_damage_normalized_to_2019.impact_scaled, \\\n", - " 'r--', label='USD normalized to 2019')\n", + "axis.plot(\n", + " yearly_damage_current.year,\n", + " yearly_damage_current.impact,\n", + " \"b\",\n", + " label=\"USD current value\",\n", + ")\n", + "axis.plot(\n", + " yearly_damage_normalized_to_2019.year,\n", + " yearly_damage_normalized_to_2019.impact_scaled,\n", + " \"r--\",\n", + " label=\"USD normalized to 2019\",\n", + ")\n", "plt.legend()\n", - "axis.set_title('TC damage reported in EM-DAT in the USA')\n", + "axis.set_title(\"TC damage reported in EM-DAT in the USA\")\n", "axis.set_xticks([2000, 2004, 2008, 2012, 2016])\n", - "axis.set_xlabel('year')\n", - "axis.set_ylabel('Total Damage [USD]');\n" + "axis.set_xlabel(\"year\")\n", + "axis.set_ylabel(\"Total Damage [USD]\");" ] } ], diff --git a/doc/tutorial/climada_engine_unsequa.ipynb b/doc/tutorial/climada_engine_unsequa.ipynb index 08558632e..a7f6fabd6 100644 --- a/doc/tutorial/climada_engine_unsequa.ipynb +++ b/doc/tutorial/climada_engine_unsequa.ipynb @@ -154,11 +154,13 @@ "outputs": [], "source": [ "import warnings\n", - "warnings.filterwarnings('ignore') #Ignore warnings for making the tutorial's pdf.\n", "\n", - "#Define the base exposure\n", + "warnings.filterwarnings(\"ignore\") # Ignore warnings for making the tutorial's pdf.\n", + "\n", + "# Define the base exposure\n", "from climada.util.constants import EXP_DEMO_H5\n", "from climada.entity import Exposures\n", + "\n", "exp_base = Exposures.from_hdf5(EXP_DEMO_H5)" ] }, @@ -177,7 +179,7 @@ "# Here x_exp is the input uncertainty parameter and exp_func the inputvar.func.\n", "def exp_func(x_exp, exp_base=exp_base):\n", " exp = exp_base.copy()\n", - " exp.gdf['value'] *= x_exp\n", + " exp.gdf[\"value\"] *= x_exp\n", " return exp" ] }, @@ -197,8 +199,9 @@ "from climada.engine.unsequa import InputVar\n", "import scipy as sp\n", "\n", - "exp_distr = {\"x_exp\": sp.stats.uniform(0.9, 0.2),\n", - " }\n", + "exp_distr = {\n", + " \"x_exp\": sp.stats.uniform(0.9, 0.2),\n", + "}\n", "exp_iv = InputVar(exp_func, exp_distr)" ] }, @@ -249,8 +252,10 @@ ], "source": [ "# Evaluate for a given value of the uncertainty parameters\n", - "exp095 = exp_iv.func(x_exp = 0.95)\n", - "print(f\"Base value is {exp_base.gdf['value'].sum()}, and the value for x_exp=0.95 is {exp095.gdf['value'].sum()}\")" + "exp095 = exp_iv.func(x_exp=0.95)\n", + "print(\n", + " f\"Base value is {exp_base.gdf['value'].sum()}, and the value for x_exp=0.95 is {exp095.gdf['value'].sum()}\"\n", + ")" ] }, { @@ -315,11 +320,12 @@ "m_min, m_max = (1, 2)\n", "n_min, n_max = (1, 2)\n", "\n", + "\n", "# Define the function\n", "# Note that this here works, but might be slow because the method LitPop is called everytime the the function\n", "# is evaluated, and LitPop is relatively slow.\n", "def litpop_cat(m, n):\n", - " exp = Litpop.from_countries('CHE', res_arcsec=150, exponent=[m, n])\n", + " exp = Litpop.from_countries(\"CHE\", res_arcsec=150, exponent=[m, n])\n", " return exp" ] }, @@ -341,9 +347,10 @@ "litpop_dict = {}\n", "for m in range(m_min, m_max + 1):\n", " for n in range(n_min, n_max + 1):\n", - " exp_mn = LitPop.from_countries('CHE', res_arcsec=150, exponents=[m, n]);\n", + " exp_mn = LitPop.from_countries(\"CHE\", res_arcsec=150, exponents=[m, n])\n", " litpop_dict[(m, n)] = exp_mn\n", "\n", + "\n", "def litpop_cat(m, n, litpop_dict=litpop_dict):\n", " return litpop_dict[(m, n)]" ] @@ -360,16 +367,18 @@ }, "outputs": [], "source": [ - "#Define the distribution dictionnary\n", + "# Define the distribution dictionnary\n", "import scipy as sp\n", "from climada.engine.unsequa import InputVar\n", "\n", "distr_dict = {\n", - " 'm': sp.stats.randint(low=m_min, high=m_max+1),\n", - " 'n': sp.stats.randint(low=n_min, high=n_max+1)\n", - " }\n", + " \"m\": sp.stats.randint(low=m_min, high=m_max + 1),\n", + " \"n\": sp.stats.randint(low=n_min, high=n_max + 1),\n", + "}\n", "\n", - "cat_iv = InputVar(litpop_cat, distr_dict) # One can use either of the above definitions of litpop_cat" + "cat_iv = InputVar(\n", + " litpop_cat, distr_dict\n", + ") # One can use either of the above definitions of litpop_cat" ] }, { @@ -578,8 +587,9 @@ "# Requires internet connection\n", "from climada.util.constants import TEST_UNC_OUTPUT_IMPACT\n", "from climada.util.api_client import Client\n", + "\n", "apiclient = Client()\n", - "ds = apiclient.get_dataset_info(name=TEST_UNC_OUTPUT_IMPACT, status='test_dataset')\n", + "ds = apiclient.get_dataset_info(name=TEST_UNC_OUTPUT_IMPACT, status=\"test_dataset\")\n", "_target_dir, [filename] = apiclient.download_dataset(ds)" ] }, @@ -597,6 +607,7 @@ "source": [ "# If you produced your own data, you do not need the API. Just replace 'filename' with the path to your file.\n", "from climada.engine.unsequa import UncOutput\n", + "\n", "unc_imp = UncOutput.from_hdf5(filename)" ] }, @@ -623,7 +634,7 @@ } ], "source": [ - "unc_imp.plot_uncertainty(metric_list=['aai_agg'], figsize=(12,5));" + "unc_imp.plot_uncertainty(metric_list=[\"aai_agg\"], figsize=(12, 5));" ] }, { @@ -642,8 +653,9 @@ "# Requires internet connection\n", "from climada.util.constants import TEST_UNC_OUTPUT_COSTBEN\n", "from climada.util.api_client import Client\n", + "\n", "apiclient = Client()\n", - "ds = apiclient.get_dataset_info(name=TEST_UNC_OUTPUT_COSTBEN, status='test_dataset')\n", + "ds = apiclient.get_dataset_info(name=TEST_UNC_OUTPUT_COSTBEN, status=\"test_dataset\")\n", "_target_dir, [filename] = apiclient.download_dataset(ds)" ] }, @@ -661,6 +673,7 @@ "source": [ "# If you produced your own data, you do not need the API. Just replace 'filename' with the path to your file.\n", "from climada.engine.unsequa import UncOutput\n", + "\n", "unc_cb = UncOutput.from_hdf5(filename)" ] }, @@ -955,25 +968,27 @@ }, "outputs": [], "source": [ - "#Define the input variable functions\n", + "# Define the input variable functions\n", "import numpy as np\n", "\n", "from climada.entity import ImpactFunc, ImpactFuncSet, Exposures\n", "from climada.util.constants import EXP_DEMO_H5, HAZ_DEMO_H5\n", "from climada.hazard import Hazard\n", "\n", + "\n", "def impf_func(G=1, v_half=84.7, vmin=25.7, k=3, _id=1):\n", "\n", " def xhi(v, v_half, vmin):\n", " return max([(v - vmin), 0]) / (v_half - vmin)\n", "\n", " def sigmoid_func(v, G, v_half, vmin, k):\n", - " return G * xhi(v, v_half, vmin)**k / (1 + xhi(v, v_half, vmin)**k)\n", + " return G * xhi(v, v_half, vmin) ** k / (1 + xhi(v, v_half, vmin) ** k)\n", "\n", - " #In-function imports needed only for parallel computing on Windows\n", + " # In-function imports needed only for parallel computing on Windows\n", " import numpy as np\n", " from climada.entity import ImpactFunc, ImpactFuncSet\n", - " intensity_unit = 'm/s'\n", + "\n", + " intensity_unit = \"m/s\"\n", " intensity = np.linspace(0, 150, num=100)\n", " mdd = np.repeat(1, len(intensity))\n", " paa = np.array([sigmoid_func(v, G, v_half, vmin, k) for v in intensity])\n", @@ -982,16 +997,22 @@ " impf_set = ImpactFuncSet([imp_fun])\n", " return impf_set\n", "\n", + "\n", "haz = Hazard.from_hdf5(HAZ_DEMO_H5)\n", "exp_base = Exposures.from_hdf5(EXP_DEMO_H5)\n", - "#It is a good idea to assign the centroids to the base exposures in order to avoid repeating this\n", + "# It is a good idea to assign the centroids to the base exposures in order to avoid repeating this\n", "# potentially costly operation for each sample.\n", "exp_base.assign_centroids(haz)\n", + "\n", + "\n", "def exp_base_func(x_exp, exp_base):\n", " exp = exp_base.copy()\n", - " exp.gdf['value'] *= x_exp\n", + " exp.gdf[\"value\"] *= x_exp\n", " return exp\n", + "\n", + "\n", "from functools import partial\n", + "\n", "exp_func = partial(exp_base_func, exp_base=exp_base)" ] }, @@ -1018,7 +1039,7 @@ ], "source": [ "# Visualization of the parametrized impact function\n", - "impf_func(G=0.8, v_half=80, vmin=30,k=5).plot();" + "impf_func(G=0.8, v_half=80, vmin=30, k=5).plot();" ] }, { @@ -1032,13 +1053,15 @@ }, "outputs": [], "source": [ - "#Define the InputVars\n", + "# Define the InputVars\n", "\n", "import scipy as sp\n", "from climada.engine.unsequa import InputVar\n", "\n", - "exp_distr = {\"x_exp\": sp.stats.beta(10, 1.1)} #This is not really a reasonable distribution but is used\n", - " #here to show that you can use any scipy distribution.\n", + "exp_distr = {\n", + " \"x_exp\": sp.stats.beta(10, 1.1)\n", + "} # This is not really a reasonable distribution but is used\n", + "# here to show that you can use any scipy distribution.\n", "\n", "exp_iv = InputVar(exp_func, exp_distr)\n", "\n", @@ -1046,8 +1069,8 @@ " \"G\": sp.stats.truncnorm(0.5, 1.5),\n", " \"v_half\": sp.stats.uniform(35, 65),\n", " \"vmin\": sp.stats.uniform(0, 15),\n", - " \"k\": sp.stats.uniform(1, 4)\n", - " }\n", + " \"k\": sp.stats.uniform(1, 4),\n", + "}\n", "impf_iv = InputVar(impf_func, impf_distr)" ] }, @@ -1074,8 +1097,9 @@ ], "source": [ "import matplotlib.pyplot as plt\n", - "ax = exp_iv.plot(figsize=(6,4));\n", - "plt.yticks(fontsize=16);\n", + "\n", + "ax = exp_iv.plot(figsize=(6, 4))\n", + "plt.yticks(fontsize=16)\n", "plt.xticks(fontsize=16);" ] }, @@ -1215,7 +1239,7 @@ } ], "source": [ - "output_imp = calc_imp.make_sample(N=2**7, sampling_kwargs={'skip_values': 2**8})\n", + "output_imp = calc_imp.make_sample(N=2**7, sampling_kwargs={\"skip_values\": 2**8})\n", "output_imp.get_samples_df().tail()" ] }, @@ -1248,7 +1272,7 @@ } ], "source": [ - "output_imp.plot_sample(figsize=(15,8));" + "output_imp.plot_sample(figsize=(15, 8));" ] }, { @@ -1269,7 +1293,7 @@ }, "outputs": [], "source": [ - "output_imp = calc_imp.uncertainty(output_imp, rp = [50, 100, 250])" + "output_imp = calc_imp.uncertainty(output_imp, rp=[50, 100, 250])" ] }, { @@ -1306,7 +1330,7 @@ } ], "source": [ - "#All the computed uncertainty metrics attribute\n", + "# All the computed uncertainty metrics attribute\n", "output_imp.uncertainty_metrics" ] }, @@ -1384,8 +1408,8 @@ } ], "source": [ - "#One uncertainty dataframe\n", - "output_imp.get_unc_df('aai_agg').tail()" + "# One uncertainty dataframe\n", + "output_imp.get_unc_df(\"aai_agg\").tail()" ] }, { @@ -1519,7 +1543,7 @@ } ], "source": [ - "output_imp.plot_uncertainty(figsize=(12,12));" + "output_imp.plot_uncertainty(figsize=(12, 12));" ] }, { @@ -1552,7 +1576,7 @@ ], "source": [ "# Specific plot for the return period distributions\n", - "output_imp.plot_rp_uncertainty(figsize=(14.3,8));" + "output_imp.plot_rp_uncertainty(figsize=(14.3, 8));" ] }, { @@ -1704,7 +1728,7 @@ } ], "source": [ - "output_imp.get_sens_df('aai_agg').tail()" + "output_imp.get_sens_df(\"aai_agg\").tail()" ] }, { @@ -1824,7 +1848,7 @@ } ], "source": [ - "output_imp.get_sensitivity('S1')" + "output_imp.get_sensitivity(\"S1\")" ] }, { @@ -1918,7 +1942,7 @@ } ], "source": [ - "output_imp.get_largest_si(salib_si='S1')" + "output_imp.get_largest_si(salib_si=\"S1\")" ] }, { @@ -1953,7 +1977,7 @@ ], "source": [ "# Default for 'sobol' is to plot 'S1' sensitivity index.\n", - "output_imp.plot_sensitivity(figsize=(12,8));" + "output_imp.plot_sensitivity(figsize=(12, 8));" ] }, { @@ -1985,7 +2009,7 @@ } ], "source": [ - "output_imp.plot_sensitivity(salib_si = 'ST', figsize=(12,8));" + "output_imp.plot_sensitivity(salib_si=\"ST\", figsize=(12, 8));" ] }, { @@ -2017,7 +2041,7 @@ } ], "source": [ - "output_imp.plot_sensitivity_second_order(figsize=(12,8));" + "output_imp.plot_sensitivity_second_order(figsize=(12, 8));" ] }, { @@ -2050,7 +2074,7 @@ "from climada.engine.unsequa import CalcImpact\n", "\n", "calc_imp2 = CalcImpact(exp_iv, impf_iv, haz)\n", - "output_imp2 = calc_imp2.make_sample(N=1000, sampling_method='latin')" + "output_imp2 = calc_imp2.make_sample(N=1000, sampling_method=\"latin\")" ] }, { @@ -2075,7 +2099,7 @@ } ], "source": [ - "output_imp2.plot_sample(figsize=(15,8));" + "output_imp2.plot_sample(figsize=(15, 8));" ] }, { @@ -2104,13 +2128,15 @@ "import time\n", "\n", "calc_imp2 = CalcImpact(exp_iv, impf_iv, haz)\n", - "output_imp2 = calc_imp2.make_sample(N=1000, sampling_method='latin')\n", + "output_imp2 = calc_imp2.make_sample(N=1000, sampling_method=\"latin\")\n", "\n", "start = time.time()\n", - "output_imp2 = calc_imp2.uncertainty(output_imp2, rp = [50, 100, 250], calc_eai_exp=True, calc_at_event=True, processes=4)\n", + "output_imp2 = calc_imp2.uncertainty(\n", + " output_imp2, rp=[50, 100, 250], calc_eai_exp=True, calc_at_event=True, processes=4\n", + ")\n", "end = time.time()\n", - "time_passed = end-start\n", - "print(f'Time passed with pool: {time_passed}')" + "time_passed = end - start\n", + "print(f\"Time passed with pool: {time_passed}\")" ] }, { @@ -2148,13 +2174,15 @@ ], "source": [ "calc_imp2 = CalcImpact(exp_iv, impf_iv, haz)\n", - "output_imp2 = calc_imp2.make_sample(N=1000, sampling_method='latin')\n", + "output_imp2 = calc_imp2.make_sample(N=1000, sampling_method=\"latin\")\n", "\n", "start2 = time.time()\n", - "output_imp2 = calc_imp2.uncertainty(output_imp2, rp = [50, 100, 250], calc_eai_exp=True, calc_at_event=True)\n", + "output_imp2 = calc_imp2.uncertainty(\n", + " output_imp2, rp=[50, 100, 250], calc_eai_exp=True, calc_at_event=True\n", + ")\n", "end2 = time.time()\n", - "time_passed_nopool = end2-start2\n", - "print(f'Time passed without pool: {time_passed_nopool}')" + "time_passed_nopool = end2 - start2\n", + "print(f\"Time passed without pool: {time_passed_nopool}\")" ] }, { @@ -2170,10 +2198,11 @@ "source": [ "# Add the original value of the impacts (without uncertainty) to the uncertainty plot\n", "from climada.engine import ImpactCalc\n", + "\n", "imp = ImpactCalc(exp_base, impf_func(), haz).impact(assign_centroids=False)\n", "aai_agg_o = imp.aai_agg\n", "freq_curve_o = imp.calc_freq_curve([50, 100, 250]).impact\n", - "orig_list = [aai_agg_o] + list(freq_curve_o) +[1]" + "orig_list = [aai_agg_o] + list(freq_curve_o) + [1]" ] }, { @@ -2201,7 +2230,12 @@ "source": [ "# plot the aai_agg and freq_curve uncertainty only\n", "# use logarithmic x-scale\n", - "output_imp2.plot_uncertainty(metric_list=['aai_agg', 'freq_curve'], orig_list=orig_list, log=True, figsize=(12,8));" + "output_imp2.plot_uncertainty(\n", + " metric_list=[\"aai_agg\", \"freq_curve\"],\n", + " orig_list=orig_list,\n", + " log=True,\n", + " figsize=(12, 8),\n", + ");" ] }, { @@ -2217,7 +2251,9 @@ "source": [ "# Use the method 'rbd_fast' which is recommend in pair with 'latin'. In addition, change one of the kwargs\n", "# (M=15) of the salib sampling method.\n", - "output_imp2 = calc_imp2.sensitivity(output_imp2, sensitivity_method='rbd_fast', sensitivity_kwargs = {'M': 15})" + "output_imp2 = calc_imp2.sensitivity(\n", + " output_imp2, sensitivity_method=\"rbd_fast\", sensitivity_kwargs={\"M\": 15}\n", + ")" ] }, { @@ -2345,7 +2381,7 @@ } ], "source": [ - "output_imp2.get_largest_si(salib_si='S1', metric_list=['eai_exp']).tail()" + "output_imp2.get_largest_si(salib_si=\"S1\", metric_list=[\"eai_exp\"]).tail()" ] }, { @@ -2401,16 +2437,17 @@ "from climada.util.constants import EXP_DEMO_H5, HAZ_DEMO_H5\n", "from climada.hazard import Centroids, TCTracks, Hazard, TropCyclone\n", "\n", + "\n", "def impf_func(G=1, v_half=84.7, vmin=25.7, k=3, _id=1):\n", "\n", " def xhi(v, v_half, vmin):\n", " return max([(v - vmin), 0]) / (v_half - vmin)\n", "\n", " def sigmoid_func(v, G, v_half, vmin, k):\n", - " return G * xhi(v, v_half, vmin)**k / (1 + xhi(v, v_half, vmin)**k)\n", + " return G * xhi(v, v_half, vmin) ** k / (1 + xhi(v, v_half, vmin) ** k)\n", "\n", - " #In-function imports needed only for parallel computing on Windows\n", - " intensity_unit = 'm/s'\n", + " # In-function imports needed only for parallel computing on Windows\n", + " intensity_unit = \"m/s\"\n", " intensity = np.linspace(0, 150, num=100)\n", " mdd = np.repeat(1, len(intensity))\n", " paa = np.array([sigmoid_func(v, G, v_half, vmin, k) for v in intensity])\n", @@ -2446,7 +2483,7 @@ "# pack future hazard sets into dictionary - we want to sample from this dictionary later\n", "haz_fut_list = [haz_26, haz_45, haz_60, haz_85]\n", "tc_haz_fut_dict = {}\n", - "for r, rcp in enumerate(['26', '45', '60', '85']):\n", + "for r, rcp in enumerate([\"26\", \"45\", \"60\", \"85\"]):\n", " tc_haz_fut_dict[rcp] = haz_fut_list[r]" ] }, @@ -2457,14 +2494,19 @@ "outputs": [], "source": [ "exp_base = Exposures.from_hdf5(EXP_DEMO_H5)\n", - "#It is a good idea to assign the centroids to the base exposures in order to avoid repeating this\n", + "# It is a good idea to assign the centroids to the base exposures in order to avoid repeating this\n", "# potentially costly operation for each sample.\n", "exp_base.assign_centroids(haz)\n", + "\n", + "\n", "def exp_base_func(x_exp, exp_base):\n", " exp = exp_base.copy()\n", - " exp.gdf['value'] *= x_exp\n", + " exp.gdf[\"value\"] *= x_exp\n", " return exp\n", + "\n", + "\n", "from functools import partial\n", + "\n", "exp_func = partial(exp_base_func, exp_base=exp_base)" ] }, @@ -2477,8 +2519,10 @@ "import scipy as sp\n", "from climada.engine.unsequa import InputVar\n", "\n", - "exp_distr = {\"x_exp\": sp.stats.beta(10, 1.1)} #This is not really a reasonable distribution but is used\n", - " #here to show that you can use any scipy distribution.\n", + "exp_distr = {\n", + " \"x_exp\": sp.stats.beta(10, 1.1)\n", + "} # This is not really a reasonable distribution but is used\n", + "# here to show that you can use any scipy distribution.\n", "\n", "exp_iv = InputVar(exp_func, exp_distr)\n", "\n", @@ -2486,8 +2530,8 @@ " \"G\": sp.stats.truncnorm(0.5, 1.5),\n", " \"v_half\": sp.stats.uniform(35, 65),\n", " \"vmin\": sp.stats.uniform(0, 15),\n", - " \"k\": sp.stats.uniform(1, 4)\n", - " }\n", + " \"k\": sp.stats.uniform(1, 4),\n", + "}\n", "impf_iv = InputVar(impf_func, impf_distr)" ] }, @@ -2504,16 +2548,15 @@ "metadata": {}, "outputs": [], "source": [ - "rcp_key = {0: '26',\n", - " 1: '45',\n", - " 2: '60',\n", - " 3: '85'}\n", + "rcp_key = {0: \"26\", 1: \"45\", 2: \"60\", 3: \"85\"}\n", + "\n", "\n", "# future\n", "def haz_fut_func(rcp_scenario):\n", " haz_fut = tc_haz_fut_dict[rcp_key[rcp_scenario]]\n", " return haz_fut\n", "\n", + "\n", "haz_fut_distr = {\"rcp_scenario\": sp.stats.randint(0, 4)}\n", "\n", "haz_fut_iv = InputVar(haz_fut_func, haz_fut_distr)" @@ -2573,8 +2616,8 @@ ], "source": [ "from climada.engine.unsequa import CalcDeltaImpact\n", - "calc_imp = CalcDeltaImpact(exp_iv, impf_iv, haz,\n", - " exp_iv, impf_iv, haz_fut_iv)" + "\n", + "calc_imp = CalcDeltaImpact(exp_iv, impf_iv, haz, exp_iv, impf_iv, haz_fut_iv)" ] }, { @@ -2639,6 +2682,7 @@ ], "source": [ "from climada.engine.unsequa import UncOutput\n", + "\n", "output_imp.plot_uncertainty(calc_delta=True)" ] }, @@ -2687,6 +2731,7 @@ ], "source": [ "from climada.engine.unsequa import UncOutput\n", + "\n", "output_imp.plot_rp_uncertainty(calc_delta=True)" ] }, @@ -2790,39 +2835,50 @@ "from climada.entity import Entity\n", "from climada.hazard import Hazard\n", "\n", + "\n", "# Entity today has an uncertainty in the total asset value\n", "def ent_today_func(x_ent):\n", - " #In-function imports needed only for parallel computing on Windows\n", + " # In-function imports needed only for parallel computing on Windows\n", " from climada.entity import Entity\n", " from climada.util.constants import ENT_DEMO_TODAY\n", + "\n", " entity = Entity.from_excel(ENT_DEMO_TODAY)\n", " entity.exposures.ref_year = 2018\n", - " entity.exposures.gdf['value'] *= x_ent\n", + " entity.exposures.gdf[\"value\"] *= x_ent\n", " return entity\n", "\n", + "\n", "# Entity in the future has a +- 10% uncertainty in the cost of all the adapatation measures\n", "def ent_fut_func(m_fut_cost):\n", - " #In-function imports needed only for parallel computing on Windows\n", + " # In-function imports needed only for parallel computing on Windows\n", " from climada.entity import Entity\n", " from climada.util.constants import ENT_DEMO_FUTURE\n", + "\n", " entity = Entity.from_excel(ENT_DEMO_FUTURE)\n", " entity.exposures.ref_year = 2040\n", - " for meas in entity.measures.get_measure('TC'):\n", + " for meas in entity.measures.get_measure(\"TC\"):\n", " meas.cost *= m_fut_cost\n", " return entity\n", "\n", + "\n", "haz_base = Hazard.from_hdf5(HAZ_DEMO_H5)\n", + "\n", + "\n", "# The hazard intensity in the future is also uncertainty by a multiplicative factor\n", "def haz_fut(x_haz_fut, haz_base):\n", - " #In-function imports needed only for parallel computing on Windows\n", + " # In-function imports needed only for parallel computing on Windows\n", " import copy\n", " from climada.hazard import Hazard\n", " from climada.util.constants import HAZ_DEMO_H5\n", + "\n", " haz = copy.deepcopy(haz_base)\n", " haz.intensity = haz.intensity.multiply(x_haz_fut)\n", " return haz\n", + "\n", + "\n", "from functools import partial\n", - "haz_fut_func = partial(haz_fut, haz_base=haz_base)\n" + "\n", + "haz_fut_func = partial(haz_fut, haz_base=haz_base)" ] }, { @@ -2853,10 +2909,12 @@ } ], "source": [ - "costs_1 = [meas.cost for meas in ent_fut_func(1).measures.get_measure('TC')]\n", - "costs_05 = [meas.cost for meas in ent_fut_func(0.5).measures.get_measure('TC')]\n", - "print(f\"\\nThe cost for m_fut_cost=1 are {costs_1}\\n\"\n", - " f\"The cost for m_fut_cost=0.5 are {costs_05}\");" + "costs_1 = [meas.cost for meas in ent_fut_func(1).measures.get_measure(\"TC\")]\n", + "costs_05 = [meas.cost for meas in ent_fut_func(0.5).measures.get_measure(\"TC\")]\n", + "print(\n", + " f\"\\nThe cost for m_fut_cost=1 are {costs_1}\\n\"\n", + " f\"The cost for m_fut_cost=0.5 are {costs_05}\"\n", + ");" ] }, { @@ -2882,14 +2940,15 @@ "\n", "haz_today = haz_base\n", "\n", - "haz_fut_distr = {\"x_haz_fut\": sp.stats.uniform(1, 3),\n", - " }\n", + "haz_fut_distr = {\n", + " \"x_haz_fut\": sp.stats.uniform(1, 3),\n", + "}\n", "haz_fut_iv = InputVar(haz_fut_func, haz_fut_distr)\n", "\n", - "ent_today_distr = {\"x_ent\": sp.stats.uniform(0.7, 1)}\n", + "ent_today_distr = {\"x_ent\": sp.stats.uniform(0.7, 1)}\n", "ent_today_iv = InputVar(ent_today_func, ent_today_distr)\n", "\n", - "ent_fut_distr = {\"m_fut_cost\": sp.stats.norm(1, 0.1)}\n", + "ent_fut_distr = {\"m_fut_cost\": sp.stats.norm(1, 0.1)}\n", "ent_fut_iv = InputVar(ent_fut_func, ent_fut_distr)" ] }, @@ -3042,8 +3101,12 @@ "source": [ "from climada.engine.unsequa import CalcCostBenefit\n", "\n", - "unc_cb = CalcCostBenefit(haz_input_var=haz_today, ent_input_var=ent_today_iv,\n", - " haz_fut_input_var=haz_fut_iv, ent_fut_input_var=ent_fut_iv)" + "unc_cb = CalcCostBenefit(\n", + " haz_input_var=haz_today,\n", + " ent_input_var=ent_today_iv,\n", + " haz_fut_input_var=haz_fut_iv,\n", + " ent_fut_input_var=ent_fut_iv,\n", + ")" ] }, { @@ -3132,7 +3195,7 @@ } ], "source": [ - "output_cb= unc_cb.make_sample(N=10, sampling_kwargs={'calc_second_order':False})\n", + "output_cb = unc_cb.make_sample(N=10, sampling_kwargs={\"calc_second_order\": False})\n", "output_cb.get_samples_df().tail()" ] }, @@ -4626,12 +4689,11 @@ } ], "source": [ - "\n", - "#without pool\n", + "# without pool\n", "output_cb = unc_cb.uncertainty(output_cb)\n", "\n", - "#with pool\n", - "output_cb = unc_cb.uncertainty(output_cb, processes=4)\n" + "# with pool\n", + "output_cb = unc_cb.uncertainty(output_cb, processes=4)" ] }, { @@ -4667,7 +4729,7 @@ } ], "source": [ - "#Top level metrics keys\n", + "# Top level metrics keys\n", "macro_metrics = output_cb.uncertainty_metrics\n", "macro_metrics" ] @@ -4803,7 +4865,7 @@ ], "source": [ "# The benefits and cost_ben_ratio are available for each measure\n", - "output_cb.get_uncertainty(metric_list=['benefit', 'cost_ben_ratio']).tail()" + "output_cb.get_uncertainty(metric_list=[\"benefit\", \"cost_ben_ratio\"]).tail()" ] }, { @@ -5073,7 +5135,7 @@ "source": [ "# The impact_meas_present and impact_meas_future provide values of the cost_meas, risk_transf, risk,\n", "# and cost_ins for each measure\n", - "output_cb.get_uncertainty(metric_list=['imp_meas_present']).tail()" + "output_cb.get_uncertainty(metric_list=[\"imp_meas_present\"]).tail()" ] }, { @@ -5106,7 +5168,7 @@ ], "source": [ "# tot_climate_risk and benefit\n", - "output_cb.plot_uncertainty(metric_list=['benefit'], figsize=(12,8));" + "output_cb.plot_uncertainty(metric_list=[\"benefit\"], figsize=(12, 8));" ] }, { @@ -5127,7 +5189,9 @@ }, "outputs": [], "source": [ - "output_cb = unc_cb.sensitivity(output_cb, sensitivity_kwargs={'calc_second_order':False})" + "output_cb = unc_cb.sensitivity(\n", + " output_cb, sensitivity_kwargs={\"calc_second_order\": False}\n", + ")" ] }, { @@ -5161,8 +5225,10 @@ } ], "source": [ - "#plot only certain metrics\n", - "axes = output_cb.plot_sensitivity(metric_list=['cost_ben_ratio','tot_climate_risk','benefit'], figsize=(12,8));" + "# plot only certain metrics\n", + "axes = output_cb.plot_sensitivity(\n", + " metric_list=[\"cost_ben_ratio\", \"tot_climate_risk\", \"benefit\"], figsize=(12, 8)\n", + ");" ] }, { @@ -5216,6 +5282,7 @@ "outputs": [], "source": [ "from climada.util.api_client import Client\n", + "\n", "client = Client()" ] }, @@ -5231,9 +5298,9 @@ "\n", "def get_ws(iso):\n", " properties = {\n", - " 'country_iso3alpha': iso,\n", + " \"country_iso3alpha\": iso,\n", " }\n", - " return client.get_hazard('storm_europe', properties=properties)\n" + " return client.get_hazard(\"storm_europe\", properties=properties)" ] }, { @@ -5242,12 +5309,12 @@ "metadata": {}, "outputs": [], "source": [ - "#Define list of exposures and/or of hazard files\n", + "# Define list of exposures and/or of hazard files\n", "\n", - "exp_list = [get_litpop(iso) for iso in ['CHE', 'DEU', 'ITA']]\n", - "haz_list = [get_ws(iso) for iso in ['CHE', 'DEU', 'ITA']]\n", + "exp_list = [get_litpop(iso) for iso in [\"CHE\", \"DEU\", \"ITA\"]]\n", + "haz_list = [get_ws(iso) for iso in [\"CHE\", \"DEU\", \"ITA\"]]\n", "for exp, haz in zip(exp_list, haz_list):\n", - " exp.gdf['impf_WS'] = 1\n", + " exp.gdf[\"impf_WS\"] = 1\n", " exp.assign_centroids(haz)" ] }, @@ -5257,7 +5324,7 @@ "metadata": {}, "outputs": [], "source": [ - "#Define the input variable\n", + "# Define the input variable\n", "from climada.entity import ImpactFuncSet, Exposures\n", "from climada.entity.impact_funcs.storm_europe import ImpfStormEurope\n", "from climada.hazard import Hazard\n", @@ -5265,31 +5332,40 @@ "import scipy as sp\n", "import copy\n", "\n", + "\n", "def exp_func(cnt, x_exp, exp_list=exp_list):\n", " exp = exp_list[int(cnt)].copy()\n", - " exp.gdf['value'] *= x_exp\n", + " exp.gdf[\"value\"] *= x_exp\n", " return exp\n", "\n", - "exp_distr = {\"x_exp\": sp.stats.uniform(0.9, 0.2),\n", - " \"cnt\": sp.stats.randint(low=0, high=len(exp_list)) #use the same parameter name accross input variables\n", - " }\n", + "\n", + "exp_distr = {\n", + " \"x_exp\": sp.stats.uniform(0.9, 0.2),\n", + " \"cnt\": sp.stats.randint(\n", + " low=0, high=len(exp_list)\n", + " ), # use the same parameter name accross input variables\n", + "}\n", "exp_iv = InputVar(exp_func, exp_distr)\n", "\n", "\n", "def haz_func(cnt, i_haz, haz_list=haz_list):\n", - " haz = copy.deepcopy(haz_list[int(cnt)]) #use the same parameter name accross input variables\n", + " haz = copy.deepcopy(\n", + " haz_list[int(cnt)]\n", + " ) # use the same parameter name accross input variables\n", " haz.intensity *= i_haz\n", " return haz\n", "\n", - "haz_distr = {\"i_haz\": sp.stats.norm(1, 0.2),\n", - " \"cnt\": sp.stats.randint(low=0, high=len(haz_list))\n", - " }\n", + "\n", + "haz_distr = {\n", + " \"i_haz\": sp.stats.norm(1, 0.2),\n", + " \"cnt\": sp.stats.randint(low=0, high=len(haz_list)),\n", + "}\n", "haz_iv = InputVar(haz_func, haz_distr)\n", "\n", "impf = ImpfStormEurope.from_schwierz()\n", "impf_set = ImpactFuncSet()\n", "impf_set.append(impf)\n", - "impf_iv = InputVar.impfset([impf_set], bounds_mdd = [0.9, 1.1])" + "impf_iv = InputVar.impfset([impf_set], bounds_mdd=[0.9, 1.1])" ] }, { @@ -5321,7 +5397,7 @@ "metadata": {}, "outputs": [], "source": [ - "output_imp = calc_imp.make_sample(N=2**2, sampling_kwargs={'skip_values': 2**3})\n" + "output_imp = calc_imp.make_sample(N=2**2, sampling_kwargs={\"skip_values\": 2**3})" ] }, { @@ -5457,6 +5533,7 @@ "outputs": [], "source": [ "from climada.util.api_client import Client\n", + "\n", "client = Client()" ] }, @@ -5468,21 +5545,26 @@ "source": [ "def get_litpop_path(iso):\n", " properties = {\n", - " 'country_iso3alpha': iso,\n", - " 'res_arcsec': '150',\n", - " 'exponents': '(1,1)',\n", - " 'fin_mode': 'pc'\n", + " \"country_iso3alpha\": iso,\n", + " \"res_arcsec\": \"150\",\n", + " \"exponents\": \"(1,1)\",\n", + " \"fin_mode\": \"pc\",\n", " }\n", - " litpop_datasets = client.list_dataset_infos(data_type='litpop', properties=properties)\n", + " litpop_datasets = client.list_dataset_infos(\n", + " data_type=\"litpop\", properties=properties\n", + " )\n", " ds = litpop_datasets[0]\n", " download_dir, ds_files = client.download_dataset(ds)\n", " return ds_files[0]\n", "\n", + "\n", "def get_ws_path(iso):\n", " properties = {\n", - " 'country_iso3alpha': iso,\n", + " \"country_iso3alpha\": iso,\n", " }\n", - " hazard_datasets = client.list_dataset_infos(data_type='storm_europe', properties=properties)\n", + " hazard_datasets = client.list_dataset_infos(\n", + " data_type=\"storm_europe\", properties=properties\n", + " )\n", " ds = hazard_datasets[0]\n", " download_dir, ds_files = client.download_dataset(ds)\n", " return ds_files[0]" @@ -5494,10 +5576,10 @@ "metadata": {}, "outputs": [], "source": [ - "#Define list of exposures and/or of hazard files\n", + "# Define list of exposures and/or of hazard files\n", "\n", - "f_exp_list = [get_litpop_path(iso) for iso in ['CHE', 'DEU', 'ITA']]\n", - "f_haz_list = [get_ws_path(iso) for iso in ['CHE', 'DEU', 'ITA']]" + "f_exp_list = [get_litpop_path(iso) for iso in [\"CHE\", \"DEU\", \"ITA\"]]\n", + "f_haz_list = [get_ws_path(iso) for iso in [\"CHE\", \"DEU\", \"ITA\"]]" ] }, { @@ -5506,40 +5588,43 @@ "metadata": {}, "outputs": [], "source": [ - "#Define the input variable for the loading files\n", - "#The trick is to not reload a file if it is already in memory. This is done using a global variable.\n", + "# Define the input variable for the loading files\n", + "# The trick is to not reload a file if it is already in memory. This is done using a global variable.\n", "from climada.entity import ImpactFunc, ImpactFuncSet, Exposures\n", "from climada.hazard import Hazard\n", "from climada.engine.unsequa import InputVar\n", "import scipy as sp\n", "import copy\n", "\n", + "\n", "def exp_func(f_exp, x_exp, filename_list=f_exp_list):\n", " filename = filename_list[int(f_exp)]\n", " global exp_base\n", - " if 'exp_base' in globals():\n", + " if \"exp_base\" in globals():\n", " if isinstance(exp_base, Exposures):\n", - " if exp_base.gdf['filename'] != str(filename):\n", + " if exp_base.gdf[\"filename\"] != str(filename):\n", " exp_base = Exposures.from_hdf5(filename)\n", - " exp_base.gdf['filename'] = str(filename)\n", + " exp_base.gdf[\"filename\"] = str(filename)\n", " else:\n", " exp_base = Exposures.from_hdf5(filename)\n", - " exp_base.gdf['filename'] = str(filename)\n", + " exp_base.gdf[\"filename\"] = str(filename)\n", "\n", " exp = exp_base.copy()\n", - " exp.gdf['value'] *= x_exp\n", + " exp.gdf[\"value\"] *= x_exp\n", " return exp\n", "\n", - "exp_distr = {\"x_exp\": sp.stats.uniform(0.9, 0.2),\n", - " \"f_exp\": sp.stats.randint(low=0, high=len(f_exp_list))\n", - " }\n", + "\n", + "exp_distr = {\n", + " \"x_exp\": sp.stats.uniform(0.9, 0.2),\n", + " \"f_exp\": sp.stats.randint(low=0, high=len(f_exp_list)),\n", + "}\n", "exp_iv = InputVar(exp_func, exp_distr)\n", "\n", "\n", "def haz_func(f_haz, i_haz, filename_list=f_haz_list):\n", " filename = filename_list[int(f_haz)]\n", " global haz_base\n", - " if 'haz_base' in globals():\n", + " if \"haz_base\" in globals():\n", " if isinstance(haz_base, Hazard):\n", " if haz_base.filename != str(filename):\n", " haz_base = Hazard.from_hdf5(filename)\n", @@ -5552,9 +5637,11 @@ " haz.intensity *= i_haz\n", " return haz\n", "\n", - "haz_distr = {\"i_haz\": sp.stats.norm(1, 0.2),\n", - " \"f_haz\": sp.stats.randint(low=0, high=len(f_haz_list))\n", - " }\n", + "\n", + "haz_distr = {\n", + " \"i_haz\": sp.stats.norm(1, 0.2),\n", + " \"f_haz\": sp.stats.randint(low=0, high=len(f_haz_list)),\n", + "}\n", "haz_iv = InputVar(haz_func, haz_distr)\n", "\n", "\n", @@ -5564,29 +5651,33 @@ " return max([(v - vmin), 0]) / (v_half - vmin)\n", "\n", " def sigmoid_func(v, G, v_half, vmin, k):\n", - " return G * xhi(v, v_half, vmin)**k / (1 + xhi(v, v_half, vmin)**k)\n", + " return G * xhi(v, v_half, vmin) ** k / (1 + xhi(v, v_half, vmin) ** k)\n", "\n", - " #In-function imports needed only for parallel computing on Windows\n", + " # In-function imports needed only for parallel computing on Windows\n", " import numpy as np\n", " from climada.entity import ImpactFunc, ImpactFuncSet\n", + "\n", " imp_fun = ImpactFunc()\n", - " imp_fun.haz_type = 'WS'\n", + " imp_fun.haz_type = \"WS\"\n", " imp_fun.id = _id\n", - " imp_fun.intensity_unit = 'm/s'\n", + " imp_fun.intensity_unit = \"m/s\"\n", " imp_fun.intensity = np.linspace(0, 150, num=100)\n", " imp_fun.mdd = np.repeat(1, len(imp_fun.intensity))\n", - " imp_fun.paa = np.array([sigmoid_func(v, G, v_half, vmin, k) for v in imp_fun.intensity])\n", + " imp_fun.paa = np.array(\n", + " [sigmoid_func(v, G, v_half, vmin, k) for v in imp_fun.intensity]\n", + " )\n", " imp_fun.check()\n", " impf_set = ImpactFuncSet()\n", " impf_set.append(imp_fun)\n", " return impf_set\n", "\n", + "\n", "impf_distr = {\n", " \"G\": sp.stats.truncnorm(0.5, 1.5),\n", " \"v_half\": sp.stats.uniform(35, 65),\n", " \"vmin\": sp.stats.uniform(0, 15),\n", - " \"k\": sp.stats.uniform(1, 4)\n", - " }\n", + " \"k\": sp.stats.uniform(1, 4),\n", + "}\n", "impf_iv = InputVar(impf_func, impf_distr)" ] }, @@ -5615,8 +5706,8 @@ "outputs": [], "source": [ "# Ordering of the samples by hazard first and exposures second\n", - "output_imp = calc_imp.make_sample(N=2**2, sampling_kwargs={'skip_values': 2**3})\n", - "output_imp.order_samples(by=['f_haz', 'f_exp'])" + "output_imp = calc_imp.make_sample(N=2**2, sampling_kwargs={\"skip_values\": 2**3})\n", + "output_imp.order_samples(by=[\"f_haz\", \"f_exp\"])" ] }, { @@ -5633,8 +5724,9 @@ "outputs": [], "source": [ "import matplotlib.pyplot as plt\n", - "e = output_imp.samples_df['f_exp'].values\n", - "h = output_imp.samples_df['f_haz'].values" + "\n", + "e = output_imp.samples_df[\"f_exp\"].values\n", + "h = output_imp.samples_df[\"f_haz\"].values" ] }, { @@ -5650,12 +5742,12 @@ "metadata": {}, "outputs": [], "source": [ - "plt.plot(e, label='exposures');\n", - "plt.plot(h, label='hazards');\n", - "plt.xlabel('samples');\n", - "plt.ylabel('file number');\n", - "plt.title('Order of exposures and hazards files in samples');\n", - "plt.legend(loc='upper right');" + "plt.plot(e, label=\"exposures\")\n", + "plt.plot(h, label=\"hazards\")\n", + "plt.xlabel(\"samples\")\n", + "plt.ylabel(\"file number\")\n", + "plt.title(\"Order of exposures and hazards files in samples\")\n", + "plt.legend(loc=\"upper right\");" ] }, { @@ -5727,4 +5819,4 @@ }, "nbformat": 4, "nbformat_minor": 4 -} \ No newline at end of file +} diff --git a/doc/tutorial/climada_engine_unsequa_helper.ipynb b/doc/tutorial/climada_engine_unsequa_helper.ipynb index 831f5f4bd..adad22323 100644 --- a/doc/tutorial/climada_engine_unsequa_helper.ipynb +++ b/doc/tutorial/climada_engine_unsequa_helper.ipynb @@ -37,7 +37,8 @@ "outputs": [], "source": [ "import warnings\n", - "warnings.filterwarnings('ignore') #Ignore warnings for making the tutorial's pdf." + "\n", + "warnings.filterwarnings(\"ignore\") # Ignore warnings for making the tutorial's pdf." ] }, { @@ -101,9 +102,10 @@ } ], "source": [ - "#Define the base exposure\n", + "# Define the base exposure\n", "from climada.util.constants import EXP_DEMO_H5\n", "from climada.entity import Exposures\n", + "\n", "exp_base = Exposures.from_hdf5(EXP_DEMO_H5)" ] }, @@ -120,8 +122,9 @@ "outputs": [], "source": [ "from climada.engine.unsequa import InputVar\n", - "bounds_totval = [0.9, 1.1] #+- 10% noise on the total exposures value\n", - "bounds_noise = [0.9, 1.2] #-10% - +20% noise each exposures point\n", + "\n", + "bounds_totval = [0.9, 1.1] # +- 10% noise on the total exposures value\n", + "bounds_noise = [0.9, 1.2] # -10% - +20% noise each exposures point\n", "exp_iv = InputVar.exp([exp_base], bounds_totval, bounds_noise)" ] }, @@ -148,10 +151,10 @@ } ], "source": [ - "#The difference in total value between the base exposure and the average input uncertainty exposure\n", - "#due to the random noise on each exposures point (the average change in the total value is 1.0).\n", + "# The difference in total value between the base exposure and the average input uncertainty exposure\n", + "# due to the random noise on each exposures point (the average change in the total value is 1.0).\n", "avg_exp = exp_iv.evaluate()\n", - "(sum(avg_exp.gdf['value']) - sum(exp_base.gdf['value'])) / sum(exp_base.gdf['value'])" + "(sum(avg_exp.gdf[\"value\"]) - sum(exp_base.gdf[\"value\"])) / sum(exp_base.gdf[\"value\"])" ] }, { @@ -177,8 +180,8 @@ } ], "source": [ - "#The values for EN are seeds for the random number generator for the noise sampling and\n", - "#thus are uniformly sampled numbers between (0, 2**32-1)\n", + "# The values for EN are seeds for the random number generator for the noise sampling and\n", + "# thus are uniformly sampled numbers between (0, 2**32-1)\n", "exp_iv.plot();" ] }, @@ -208,19 +211,23 @@ }, "outputs": [], "source": [ - "#Define a generic method to make litpop instances with different exponent pairs.\n", + "# Define a generic method to make litpop instances with different exponent pairs.\n", "from climada.entity import LitPop\n", - "def generate_litpop_base(impf_id, value_unit, haz, assign_centr_kwargs,\n", - " choice_mn, **litpop_kwargs):\n", - " #In-function imports needed only for parallel computing on Windows\n", + "\n", + "\n", + "def generate_litpop_base(\n", + " impf_id, value_unit, haz, assign_centr_kwargs, choice_mn, **litpop_kwargs\n", + "):\n", + " # In-function imports needed only for parallel computing on Windows\n", " from climada.entity import LitPop\n", + "\n", " litpop_base = []\n", " for [m, n] in choice_mn:\n", - " print('\\n Computing litpop for m=%d, n=%d \\n' %(m, n))\n", - " litpop_kwargs['exponents'] = (m, n)\n", + " print(\"\\n Computing litpop for m=%d, n=%d \\n\" % (m, n))\n", + " litpop_kwargs[\"exponents\"] = (m, n)\n", " exp = LitPop.from_countries(**litpop_kwargs)\n", - " exp.gdf['impf_' + haz.haz_type] = impf_id\n", - " exp.gdf.drop('impf_', axis=1, inplace=True)\n", + " exp.gdf[\"impf_\" + haz.haz_type] = impf_id\n", + " exp.gdf.drop(\"impf_\", axis=1, inplace=True)\n", " if value_unit is not None:\n", " exp.value_unit = value_unit\n", " exp.assign_centroids(haz, **assign_centr_kwargs)\n", @@ -248,22 +255,23 @@ } ], "source": [ - "#Define the parameters of the LitPop instances\n", + "# Define the parameters of the LitPop instances\n", "tot_pop = 11.317e6\n", "impf_id = 1\n", - "value_unit = 'people'\n", + "value_unit = \"people\"\n", "litpop_kwargs = {\n", - " 'countries' : ['CUB'],\n", - " 'res_arcsec' : 150,\n", - " 'reference_year' : 2020,\n", - " 'fin_mode' : 'norm',\n", - " 'total_values' : [tot_pop]\n", + " \"countries\": [\"CUB\"],\n", + " \"res_arcsec\": 150,\n", + " \"reference_year\": 2020,\n", + " \"fin_mode\": \"norm\",\n", + " \"total_values\": [tot_pop],\n", "}\n", - "assign_centr_kwargs={}\n", + "assign_centr_kwargs = {}\n", "\n", "# The hazard is needed to assign centroids\n", "from climada.util.constants import HAZ_DEMO_H5\n", "from climada.hazard import Hazard\n", + "\n", "haz = Hazard.from_hdf5(HAZ_DEMO_H5)" ] }, @@ -460,11 +468,13 @@ } ], "source": [ - "#Generate the LitPop list\n", + "# Generate the LitPop list\n", "\n", - "choice_mn = [[0, 0.5], [0, 1], [0, 2]] #Choice of exponents m,n\n", + "choice_mn = [[0, 0.5], [0, 1], [0, 2]] # Choice of exponents m,n\n", "\n", - "litpop_list = generate_litpop_base(impf_id, value_unit, haz, assign_centr_kwargs, choice_mn, **litpop_kwargs)\n" + "litpop_list = generate_litpop_base(\n", + " impf_id, value_unit, haz, assign_centr_kwargs, choice_mn, **litpop_kwargs\n", + ")" ] }, { @@ -480,9 +490,9 @@ "outputs": [], "source": [ "from climada.engine.unsequa import InputVar\n", - "bounds_totval = [0.9, 1.1] #+- 10% noise on the total exposures value\n", - "litpop_iv = InputVar.exp(exp_list = litpop_list,\n", - " bounds_totval=bounds_totval)" + "\n", + "bounds_totval = [0.9, 1.1] # +- 10% noise on the total exposures value\n", + "litpop_iv = InputVar.exp(exp_list=litpop_list, bounds_totval=bounds_totval)" ] }, { @@ -848,8 +858,8 @@ } ], "source": [ - "#The values for EN are seeds for the random number generator for the noise sampling and\n", - "#thus are uniformly sampled numbers between (0, 2**32-1)\n", + "# The values for EN are seeds for the random number generator for the noise sampling and\n", + "# thus are uniformly sampled numbers between (0, 2**32-1)\n", "litpop_iv.plot();" ] }, @@ -912,9 +922,10 @@ } ], "source": [ - "#Define the base exposure\n", + "# Define the base exposure\n", "from climada.util.constants import HAZ_DEMO_H5\n", "from climada.hazard import Hazard\n", + "\n", "haz_base = Hazard.from_hdf5(HAZ_DEMO_H5)" ] }, @@ -931,10 +942,13 @@ "outputs": [], "source": [ "from climada.engine.unsequa import InputVar\n", - "bounds_freq = [0.9, 1.1] #+- 10% noise on the frequency of all events\n", - "bounds_int = None #No uncertainty on the intensity\n", + "\n", + "bounds_freq = [0.9, 1.1] # +- 10% noise on the frequency of all events\n", + "bounds_int = None # No uncertainty on the intensity\n", "n_ev = None\n", - "haz_iv = InputVar.haz([haz_base], n_ev=n_ev, bounds_freq=bounds_freq, bounds_int=bounds_int)" + "haz_iv = InputVar.haz(\n", + " [haz_base], n_ev=n_ev, bounds_freq=bounds_freq, bounds_int=bounds_int\n", + ")" ] }, { @@ -960,8 +974,8 @@ } ], "source": [ - "#The difference in frequency for HF=1.1 is indeed 10%.\n", - "haz_high_freq = haz_iv.evaluate(HE=n_ev, HI=None, HF = 1.1)\n", + "# The difference in frequency for HF=1.1 is indeed 10%.\n", + "haz_high_freq = haz_iv.evaluate(HE=n_ev, HI=None, HF=1.1)\n", "(sum(haz_high_freq.frequency) - sum(haz_base.frequency)) / sum(haz_base.frequency)" ] }, @@ -977,12 +991,18 @@ }, "outputs": [], "source": [ - "bounds_freq = [0.9, 1.1] #+- 10% noise on the frequency of all events\n", - "bounds_int = None #No uncertainty on the intensity\n", - "bounds_frac = [0.7, 1.1] #noise on the fraction of all events\n", - "n_ev = round(0.8 * haz_base.size) #sub-sample with re-draw events to obtain hazards with n=0.8*tot_number_events\n", + "bounds_freq = [0.9, 1.1] # +- 10% noise on the frequency of all events\n", + "bounds_int = None # No uncertainty on the intensity\n", + "bounds_frac = [0.7, 1.1] # noise on the fraction of all events\n", + "n_ev = round(\n", + " 0.8 * haz_base.size\n", + ") # sub-sample with re-draw events to obtain hazards with n=0.8*tot_number_events\n", "haz_iv = InputVar.haz(\n", - " [haz_base], n_ev=n_ev, bounds_freq=bounds_freq, bounds_int=bounds_int, bounds_frac=bounds_frac\n", + " [haz_base],\n", + " n_ev=n_ev,\n", + " bounds_freq=bounds_freq,\n", + " bounds_int=bounds_int,\n", + " bounds_frac=bounds_frac,\n", ")" ] }, @@ -1007,9 +1027,12 @@ "outputs": [], "source": [ "import numpy as np\n", - "HE = 2618981871 #The random seed (number between 0 and 2**32)\n", - "rng = np.random.RandomState(int(HE)) #Initialize a random state with the seed\n", - "chosen_ev = list(rng.choice(haz_base.event_name, int(n_ev))) #Obtain the corresponding events" + "\n", + "HE = 2618981871 # The random seed (number between 0 and 2**32)\n", + "rng = np.random.RandomState(int(HE)) # Initialize a random state with the seed\n", + "chosen_ev = list(\n", + " rng.choice(haz_base.event_name, int(n_ev))\n", + ") # Obtain the corresponding events" ] }, { @@ -1035,7 +1058,7 @@ } ], "source": [ - "#The first event is\n", + "# The first event is\n", "chosen_ev[0]" ] }, @@ -1062,8 +1085,8 @@ } ], "source": [ - "#The values for HE are seeds for the random number generator for the noise sampling and\n", - "#thus are uniformly sampled numbers between (0, 2**32-1)\n", + "# The values for HE are seeds for the random number generator for the noise sampling and\n", + "# thus are uniformly sampled numbers between (0, 2**32-1)\n", "haz_iv.plot();" ] }, @@ -1098,9 +1121,9 @@ } ], "source": [ - "#The number of events per sample is equal to n_ev\n", - "haz_sub = haz_iv.evaluate(HE=928165924, HI=None, HF = 1.1, HA=None)\n", - "#The number for HE is irrelevant, as all samples have the same n_Ev\n", + "# The number of events per sample is equal to n_ev\n", + "haz_sub = haz_iv.evaluate(HE=928165924, HI=None, HF=1.1, HA=None)\n", + "# The number for HE is irrelevant, as all samples have the same n_Ev\n", "haz_sub.size - n_ev" ] }, @@ -1149,6 +1172,7 @@ "outputs": [], "source": [ "from climada.entity import ImpactFuncSet, ImpfTropCyclone\n", + "\n", "impf = ImpfTropCyclone.from_emanuel_usa()\n", "impf_set_base = ImpactFuncSet([impf])" ] @@ -1174,14 +1198,17 @@ "outputs": [], "source": [ "from climada.engine.unsequa import InputVar\n", - "bounds_impfi = [-10, 10] #-10 m/s ; +10m/s uncertainty on the intensity\n", - "bounds_mdd = [0.7, 1.1] #-30% - +10% uncertainty on the mdd\n", - "bounds_paa = None #No uncertainty in the paa\n", - "impf_iv = InputVar.impfset(impf_set_list=[impf_set_base],\n", - " bounds_impfi=bounds_impfi,\n", - " bounds_mdd=bounds_mdd,\n", - " bounds_paa=bounds_paa,\n", - " haz_id_dict={'TC': [1]})" + "\n", + "bounds_impfi = [-10, 10] # -10 m/s ; +10m/s uncertainty on the intensity\n", + "bounds_mdd = [0.7, 1.1] # -30% - +10% uncertainty on the mdd\n", + "bounds_paa = None # No uncertainty in the paa\n", + "impf_iv = InputVar.impfset(\n", + " impf_set_list=[impf_set_base],\n", + " bounds_impfi=bounds_impfi,\n", + " bounds_mdd=bounds_mdd,\n", + " bounds_paa=bounds_paa,\n", + " haz_id_dict={\"TC\": [1]},\n", + ")" ] }, { @@ -1207,11 +1234,11 @@ } ], "source": [ - "#Plot the impact function for 50 random samples (note for the expert, these are not global)\n", + "# Plot the impact function for 50 random samples (note for the expert, these are not global)\n", "n = 50\n", "ax = impf_iv.evaluate().plot()\n", - "inten = impf_iv.distr_dict['IFi'].rvs(size=n)\n", - "mdd = impf_iv.distr_dict['MDD'].rvs(size=n)\n", + "inten = impf_iv.distr_dict[\"IFi\"].rvs(size=n)\n", + "mdd = impf_iv.distr_dict[\"MDD\"].rvs(size=n)\n", "for i, m in zip(inten, mdd):\n", " impf_iv.evaluate(IFi=i, MDD=m).plot(axis=ax)\n", "ax.get_legend().remove()" @@ -1286,6 +1313,7 @@ "source": [ "from climada.entity import Entity\n", "from climada.util.constants import ENT_DEMO_TODAY\n", + "\n", "ent = Entity.from_excel(ENT_DEMO_TODAY)\n", "ent.exposures.ref_year = 2018\n", "ent.check()" @@ -1304,11 +1332,12 @@ "outputs": [], "source": [ "from climada.engine.unsequa import InputVar\n", + "\n", "ent_iv = InputVar.ent(\n", - " impf_set_list = [ent.impact_funcs],\n", - " disc_rate = ent.disc_rates,\n", - " exp_list = [ent.exposures],\n", - " meas_set = ent.measures,\n", + " impf_set_list=[ent.impact_funcs],\n", + " disc_rate=ent.disc_rates,\n", + " exp_list=[ent.exposures],\n", + " meas_set=ent.measures,\n", " bounds_disc=[0, 0.08],\n", " bounds_cost=[0.5, 1.5],\n", " bounds_totval=[0.9, 1.1],\n", @@ -1316,8 +1345,8 @@ " bounds_mdd=[0.9, 1.05],\n", " bounds_paa=None,\n", " bounds_impfi=[-2, 5],\n", - " haz_id_dict={'TC': [1]}\n", - " )" + " haz_id_dict={\"TC\": [1]},\n", + ")" ] }, { @@ -1367,19 +1396,23 @@ }, "outputs": [], "source": [ - "#Define a generic method to make litpop instances with different exponent pairs.\n", + "# Define a generic method to make litpop instances with different exponent pairs.\n", "from climada.entity import LitPop\n", - "def generate_litpop_base(impf_id, value_unit, haz, assign_centr_kwargs,\n", - " choice_mn, **litpop_kwargs):\n", - " #In-function imports needed only for parallel computing on Windows\n", + "\n", + "\n", + "def generate_litpop_base(\n", + " impf_id, value_unit, haz, assign_centr_kwargs, choice_mn, **litpop_kwargs\n", + "):\n", + " # In-function imports needed only for parallel computing on Windows\n", " from climada.entity import LitPop\n", + "\n", " litpop_base = []\n", " for [m, n] in choice_mn:\n", - " print('\\n Computing litpop for m=%d, n=%d \\n' %(m, n))\n", - " litpop_kwargs['exponents'] = (m, n)\n", + " print(\"\\n Computing litpop for m=%d, n=%d \\n\" % (m, n))\n", + " litpop_kwargs[\"exponents\"] = (m, n)\n", " exp = LitPop.from_countries(**litpop_kwargs)\n", - " exp.gdf['impf_' + haz.haz_type] = impf_id\n", - " exp.gdf.drop('impf_', axis=1, inplace=True)\n", + " exp.gdf[\"impf_\" + haz.haz_type] = impf_id\n", + " exp.gdf.drop(\"impf_\", axis=1, inplace=True)\n", " if value_unit is not None:\n", " exp.value_unit = value_unit\n", " exp.assign_centroids(haz, **assign_centr_kwargs)\n", @@ -1407,19 +1440,20 @@ } ], "source": [ - "#Define the parameters of the LitPop instances\n", + "# Define the parameters of the LitPop instances\n", "impf_id = 1\n", "value_unit = None\n", "litpop_kwargs = {\n", - " 'countries' : ['CUB'],\n", - " 'res_arcsec' : 300,\n", - " 'reference_year' : 2020,\n", + " \"countries\": [\"CUB\"],\n", + " \"res_arcsec\": 300,\n", + " \"reference_year\": 2020,\n", "}\n", - "assign_centr_kwargs={}\n", + "assign_centr_kwargs = {}\n", "\n", "# The hazard is needed to assign centroids\n", "from climada.util.constants import HAZ_DEMO_H5\n", "from climada.hazard import Hazard\n", + "\n", "haz = Hazard.from_hdf5(HAZ_DEMO_H5)" ] }, @@ -1661,11 +1695,13 @@ } ], "source": [ - "#Generate the LitPop list\n", + "# Generate the LitPop list\n", "\n", - "choice_mn = [[1, 0.5], [0.5, 1], [1, 1]] #Choice of exponents m,n\n", + "choice_mn = [[1, 0.5], [0.5, 1], [1, 1]] # Choice of exponents m,n\n", "\n", - "litpop_list = generate_litpop_base(impf_id, value_unit, haz, assign_centr_kwargs, choice_mn, **litpop_kwargs)\n" + "litpop_list = generate_litpop_base(\n", + " impf_id, value_unit, haz, assign_centr_kwargs, choice_mn, **litpop_kwargs\n", + ")" ] }, { @@ -1693,6 +1729,7 @@ "source": [ "from climada.entity import Entity\n", "from climada.util.constants import ENT_DEMO_TODAY\n", + "\n", "ent = Entity.from_excel(ENT_DEMO_TODAY)\n", "ent.exposures.ref_year = 2020\n", "ent.check()" @@ -1711,11 +1748,12 @@ "outputs": [], "source": [ "from climada.engine.unsequa import InputVar\n", + "\n", "ent_iv = InputVar.ent(\n", - " impf_set_list = [ent.impact_funcs],\n", - " disc_rate = ent.disc_rates,\n", - " exp_list = litpop_list,\n", - " meas_set = ent.measures,\n", + " impf_set_list=[ent.impact_funcs],\n", + " disc_rate=ent.disc_rates,\n", + " exp_list=litpop_list,\n", + " meas_set=ent.measures,\n", " bounds_disc=[0, 0.08],\n", " bounds_cost=[0.5, 1.5],\n", " bounds_totval=[0.9, 1.1],\n", @@ -1723,8 +1761,8 @@ " bounds_mdd=[0.9, 1.05],\n", " bounds_paa=None,\n", " bounds_impfi=[-2, 5],\n", - " haz_id_dict={'TC': [1]}\n", - " )" + " haz_id_dict={\"TC\": [1]},\n", + ")" ] }, { @@ -1847,16 +1885,16 @@ "outputs": [], "source": [ "entfut_iv = InputVar.entfut(\n", - " impf_set_list = [ent_fut.impact_funcs],\n", - " exp_list = [ent_fut.exposures],\n", - " meas_set = ent_fut.measures,\n", + " impf_set_list=[ent_fut.impact_funcs],\n", + " exp_list=[ent_fut.exposures],\n", + " meas_set=ent_fut.measures,\n", " bounds_cost=[0.6, 1.2],\n", " bounds_eg=[0.8, 1.5],\n", " bounds_noise=None,\n", " bounds_mdd=[0.7, 0.9],\n", " bounds_paa=[1.3, 2],\n", - " haz_id_dict={'TC': [1]}\n", - " )" + " haz_id_dict={\"TC\": [1]},\n", + ")" ] }, { @@ -1879,19 +1917,23 @@ }, "outputs": [], "source": [ - "#Define a generic method to make litpop instances with different exponent pairs.\n", + "# Define a generic method to make litpop instances with different exponent pairs.\n", "from climada.entity import LitPop\n", - "def generate_litpop_base(impf_id, value_unit, haz, assign_centr_kwargs,\n", - " choice_mn, **litpop_kwargs):\n", - " #In-function imports needed only for parallel computing on Windows\n", + "\n", + "\n", + "def generate_litpop_base(\n", + " impf_id, value_unit, haz, assign_centr_kwargs, choice_mn, **litpop_kwargs\n", + "):\n", + " # In-function imports needed only for parallel computing on Windows\n", " from climada.entity import LitPop\n", + "\n", " litpop_base = []\n", " for [m, n] in choice_mn:\n", - " print('\\n Computing litpop for m=%d, n=%d \\n' %(m, n))\n", - " litpop_kwargs['exponents'] = (m, n)\n", + " print(\"\\n Computing litpop for m=%d, n=%d \\n\" % (m, n))\n", + " litpop_kwargs[\"exponents\"] = (m, n)\n", " exp = LitPop.from_countries(**litpop_kwargs)\n", - " exp.gdf['impf_' + haz.haz_type] = impf_id\n", - " exp.gdf.drop('impf_', axis=1, inplace=True)\n", + " exp.gdf[\"impf_\" + haz.haz_type] = impf_id\n", + " exp.gdf.drop(\"impf_\", axis=1, inplace=True)\n", " if value_unit is not None:\n", " exp.value_unit = value_unit\n", " exp.assign_centroids(haz, **assign_centr_kwargs)\n", @@ -1919,19 +1961,20 @@ } ], "source": [ - "#Define the parameters of the LitPop instances\n", + "# Define the parameters of the LitPop instances\n", "impf_id = 1\n", "value_unit = None\n", "litpop_kwargs = {\n", - " 'countries' : ['CUB'],\n", - " 'res_arcsec' : 300,\n", - " 'reference_year' : 2040,\n", + " \"countries\": [\"CUB\"],\n", + " \"res_arcsec\": 300,\n", + " \"reference_year\": 2040,\n", "}\n", - "assign_centr_kwargs={}\n", + "assign_centr_kwargs = {}\n", "\n", "# The hazard is needed to assign centroids\n", "from climada.util.constants import HAZ_DEMO_H5\n", "from climada.hazard import Hazard\n", + "\n", "haz = Hazard.from_hdf5(HAZ_DEMO_H5)" ] }, @@ -2306,11 +2349,13 @@ } ], "source": [ - "#Generate the LitPop list\n", + "# Generate the LitPop list\n", "\n", - "choice_mn = [[1, 0.5], [0.5, 1], [1, 1]] #Choice of exponents m,n\n", + "choice_mn = [[1, 0.5], [0.5, 1], [1, 1]] # Choice of exponents m,n\n", "\n", - "litpop_list = generate_litpop_base(impf_id, value_unit, haz, assign_centr_kwargs, choice_mn, **litpop_kwargs)\n" + "litpop_list = generate_litpop_base(\n", + " impf_id, value_unit, haz, assign_centr_kwargs, choice_mn, **litpop_kwargs\n", + ")" ] }, { @@ -2358,17 +2403,18 @@ "outputs": [], "source": [ "from climada.engine.unsequa import InputVar\n", + "\n", "entfut_iv = InputVar.entfut(\n", - " impf_set_list = [ent_fut.impact_funcs],\n", - " exp_list = litpop_list,\n", - " meas_set = ent_fut.measures,\n", + " impf_set_list=[ent_fut.impact_funcs],\n", + " exp_list=litpop_list,\n", + " meas_set=ent_fut.measures,\n", " bounds_cost=[0.6, 1.2],\n", " bounds_eg=[0.8, 1.5],\n", " bounds_noise=None,\n", " bounds_mdd=[0.7, 0.9],\n", " bounds_paa=[1.3, 2],\n", - " haz_id_dict={'TC': [1]}\n", - " )" + " haz_id_dict={\"TC\": [1]},\n", + ")" ] } ], diff --git a/doc/tutorial/climada_entity_DiscRates.ipynb b/doc/tutorial/climada_entity_DiscRates.ipynb index acb33de01..375e2167f 100644 --- a/doc/tutorial/climada_entity_DiscRates.ipynb +++ b/doc/tutorial/climada_entity_DiscRates.ipynb @@ -74,11 +74,11 @@ "# Compute net present value between present year and future year.\n", "ini_year = 2019\n", "end_year = 2050\n", - "val_years = np.zeros(end_year-ini_year+1)\n", - "val_years[0] = 100000000 # initial investment\n", - "val_years[10:] = 75000 # maintenance from 10th year\n", + "val_years = np.zeros(end_year - ini_year + 1)\n", + "val_years[0] = 100000000 # initial investment\n", + "val_years[10:] = 75000 # maintenance from 10th year\n", "npv = disc.net_present_value(ini_year, end_year, val_years)\n", - "print('net present value: {:.5e}'.format(npv))" + "print(\"net present value: {:.5e}\".format(npv))" ] }, { @@ -135,8 +135,8 @@ "from climada.util import ENT_TEMPLATE_XLS\n", "\n", "# Fill DataFrame from Excel file\n", - "file_name = ENT_TEMPLATE_XLS # provide absolute path of the excel file\n", - "print('Read file:', ENT_TEMPLATE_XLS)\n", + "file_name = ENT_TEMPLATE_XLS # provide absolute path of the excel file\n", + "print(\"Read file:\", ENT_TEMPLATE_XLS)\n", "disc = DiscRates.from_excel(file_name)\n", "disc.plot();" ] @@ -170,11 +170,11 @@ "from climada.util import ENT_TEMPLATE_XLS\n", "\n", "# Fill DataFrame from Excel file\n", - "file_name = ENT_TEMPLATE_XLS # provide absolute path of the excel file\n", + "file_name = ENT_TEMPLATE_XLS # provide absolute path of the excel file\n", "disc = DiscRates.from_excel(file_name)\n", "\n", "# write file\n", - "disc.write_excel('results/tutorial_disc.xlsx')" + "disc.write_excel(\"results/tutorial_disc.xlsx\")" ] }, { @@ -192,8 +192,9 @@ "outputs": [], "source": [ "from climada.util.save import save\n", + "\n", "# this generates a results folder in the current path and stores the output there\n", - "save('tutorial_disc.p', disc)" + "save(\"tutorial_disc.p\", disc)" ] } ], diff --git a/doc/tutorial/climada_entity_Exposures.ipynb b/doc/tutorial/climada_entity_Exposures.ipynb index b5db1520e..d46903e8f 100644 --- a/doc/tutorial/climada_entity_Exposures.ipynb +++ b/doc/tutorial/climada_entity_Exposures.ipynb @@ -112,13 +112,15 @@ "# Fill a pandas DataFrame with the 3 mandatory variables (latitude, longitude, value) for a number of assets (10'000).\n", "# We will do this with random dummy data for purely illustrative reasons:\n", "exp_df = DataFrame()\n", - "n_exp = 100*100\n", + "n_exp = 100 * 100\n", "# provide value\n", - "exp_df['value'] = np.arange(n_exp)\n", + "exp_df[\"value\"] = np.arange(n_exp)\n", "# provide latitude and longitude\n", - "lat, lon = np.mgrid[15 : 35 : complex(0, np.sqrt(n_exp)), 20 : 40 : complex(0, np.sqrt(n_exp))]\n", - "exp_df['latitude'] = lat.flatten()\n", - "exp_df['longitude'] = lon.flatten()" + "lat, lon = np.mgrid[\n", + " 15 : 35 : complex(0, np.sqrt(n_exp)), 20 : 40 : complex(0, np.sqrt(n_exp))\n", + "]\n", + "exp_df[\"latitude\"] = lat.flatten()\n", + "exp_df[\"longitude\"] = lon.flatten()" ] }, { @@ -131,7 +133,7 @@ "# In this case, we only specify the IDs for tropical cyclone (TC); here, each exposure entry will be treated with\n", "# the same impact function: the one that has ID '1':\n", "# Of course, this will only be relevant at later steps during impact calculations.\n", - "exp_df['impf_TC'] = np.ones(n_exp, int)" + "exp_df[\"impf_TC\"] = np.ones(n_exp, int)" ] }, { @@ -156,8 +158,8 @@ ], "source": [ "# Let's have a look at the pandas DataFrame\n", - "print('exp_df is a DataFrame:', str(type(exp_df)))\n", - "print('exp_df looks like:')\n", + "print(\"exp_df is a DataFrame:\", str(type(exp_df)))\n", + "print(\"exp_df looks like:\")\n", "print(exp_df.head())" ] }, @@ -195,12 +197,12 @@ "# Generate Exposures from the pandas DataFrame. This step converts the DataFrame into\n", "# a CLIMADA Exposures instance!\n", "exp = Exposures(exp_df)\n", - "print('exp has the type:', str(type(exp)))\n", - "print('and contains a GeoDataFrame exp.gdf:', str(type(exp.gdf)))\n", + "print(\"exp has the type:\", str(type(exp)))\n", + "print(\"and contains a GeoDataFrame exp.gdf:\", str(type(exp.gdf)))\n", "\n", "# set geometry attribute (shapely Points) from GeoDataFrame from latitude and longitude\n", "exp.set_geometry_points()\n", - "print('\\n' + 'check method logs:')\n", + "print(\"\\n\" + \"check method logs:\")\n", "\n", "# always apply the check() method in the end. It puts metadata that has not been assigned,\n", "# and points out missing mandatory data\n", @@ -243,7 +245,7 @@ ], "source": [ "# let's have a look at the Exposures instance we created!\n", - "print('\\n' + 'exp looks like:')\n", + "print(\"\\n\" + \"exp looks like:\")\n", "print(exp)" ] }, @@ -292,9 +294,9 @@ "from climada.entity import Exposures\n", "\n", "# Read spatial info from an external file into GeoDataFrame\n", - "world = gpd.read_file(gpd.datasets.get_path('naturalearth_cities'))\n", - "print('World is a GeoDataFrame:', str(type(world)))\n", - "print('World looks like:')\n", + "world = gpd.read_file(gpd.datasets.get_path(\"naturalearth_cities\"))\n", + "print(\"World is a GeoDataFrame:\", str(type(world)))\n", + "print(\"World looks like:\")\n", "print(world.head())" ] }, @@ -317,9 +319,9 @@ "# Generate Exposures: value, latitude and longitude for each exposure entry.\n", "# Convert GeoDataFrame into Exposure instance\n", "exp_gpd = Exposures(world)\n", - "print('\\n' + 'exp_gpd is an Exposures:', str(type(exp_gpd)))\n", + "print(\"\\n\" + \"exp_gpd is an Exposures:\", str(type(exp_gpd)))\n", "# add random values to entries\n", - "exp_gpd.gdf['value'] = np.arange(world.shape[0])\n", + "exp_gpd.gdf[\"value\"] = np.arange(world.shape[0])\n", "# set latitude and longitude attributes from geometry\n", "exp_gpd.set_lat_lon()" ] @@ -348,8 +350,8 @@ "# In this case, we only specify the IDs for tropical cyclone (TC); here, each exposure entry will be treated with\n", "# the same impact function: the one that has ID '1':\n", "# Of course, this will only be relevant at later steps during impact calculations.\n", - "exp_gpd.gdf['impf_TC'] = np.ones(world.shape[0], int)\n", - "print('\\n' + 'check method logs:')\n", + "exp_gpd.gdf[\"impf_TC\"] = np.ones(world.shape[0], int)\n", + "print(\"\\n\" + \"check method logs:\")\n", "\n", "# as always, run check method to assign meta-data and check for missing mandatory variables.\n", "exp_gpd.check()" @@ -414,7 +416,7 @@ ], "source": [ "# let's have a look at the Exposures instance we created!\n", - "print('\\n' + '\\x1b[1;03;30;30m' + 'exp_gpd looks like:' + '\\x1b[0m')\n", + "print(\"\\n\" + \"\\x1b[1;03;30;30m\" + \"exp_gpd looks like:\" + \"\\x1b[0m\")\n", "print(exp_gpd)" ] }, @@ -536,7 +538,7 @@ "sel_exp = exp_gpd.copy() # to keep the original exp_gpd Exposures data\n", "sel_exp.gdf = sel_exp.gdf.cx[:, -5:5]\n", "\n", - "print('\\n' + 'sel_exp contains a subset of the original data')\n", + "print(\"\\n\" + \"sel_exp contains a subset of the original data\")\n", "sel_exp.gdf.head()" ] }, @@ -659,13 +661,14 @@ "source": [ "# Example 2: extract data in a polygon\n", "from shapely.geometry import Polygon\n", + "\n", "sel_polygon = exp_gpd.copy() # to keep the original exp_gpd Exposures data\n", "\n", "poly = Polygon([(0, -10), (0, 10), (10, 5)])\n", "sel_polygon.gdf = sel_polygon.gdf[sel_polygon.gdf.intersects(poly)]\n", "\n", "# Let's have a look. Again, the sub-selection is a GeoDataFrame!\n", - "print('\\n' + 'sel_exp contains a subset of the original data')\n", + "print(\"\\n\" + \"sel_exp contains a subset of the original data\")\n", "sel_polygon.gdf" ] }, @@ -799,8 +802,10 @@ "# Example 3: change coordinate reference system\n", "# use help to see more options: help(sel_exp.to_crs)\n", "sel_polygon.to_crs(epsg=3395, inplace=True)\n", - "print('\\n' + 'the crs has changed to ' +str(sel_polygon.crs))\n", - "print('the values for latitude and longitude are now according to the new coordinate system: ')\n", + "print(\"\\n\" + \"the crs has changed to \" + str(sel_polygon.crs))\n", + "print(\n", + " \"the values for latitude and longitude are now according to the new coordinate system: \"\n", + ")\n", "sel_polygon.gdf" ] }, @@ -922,8 +927,8 @@ "exp_all = Exposures.concat([sel_polygon, sel_exp.to_crs(epsg=3395)])\n", "\n", "# the output is of type Exposures\n", - "print('exp_all type and number of rows:', type(exp_all), exp_all.gdf.shape[0])\n", - "print('number of unique rows:', exp_all.gdf.drop_duplicates().shape[0])\n", + "print(\"exp_all type and number of rows:\", type(exp_all), exp_all.gdf.shape[0])\n", + "print(\"number of unique rows:\", exp_all.gdf.drop_duplicates().shape[0])\n", "\n", "# NaNs will appear in the missing values\n", "exp_all.gdf.head()" @@ -1103,8 +1108,8 @@ "exp_templ = pd.read_excel(file_name)\n", "\n", "# Let's have a look at the data:\n", - "print('exp_templ is a DataFrame:', str(type(exp_templ)))\n", - "print('exp_templ looks like:')\n", + "print(\"exp_templ is a DataFrame:\", str(type(exp_templ)))\n", + "print(\"exp_templ looks like:\")\n", "exp_templ.head()" ] }, @@ -1145,14 +1150,14 @@ "source": [ "# Generate an Exposures instance from the dataframe.\n", "exp_templ = Exposures(exp_templ)\n", - "print('\\n' + 'exp_templ is now an Exposures:', str(type(exp_templ)))\n", + "print(\"\\n\" + \"exp_templ is now an Exposures:\", str(type(exp_templ)))\n", "\n", "# set geometry attribute (shapely Points) from GeoDataFrame from latitude and longitude\n", - "print('\\n' + 'set_geometry logs:')\n", + "print(\"\\n\" + \"set_geometry logs:\")\n", "exp_templ.set_geometry_points()\n", "# as always, run check method to include metadata and check for missing mandatory parameters\n", "\n", - "print('\\n' + 'check exp_templ:')\n", + "print(\"\\n\" + \"check exp_templ:\")\n", "exp_templ.check()" ] }, @@ -1314,7 +1319,7 @@ ], "source": [ "# Let's have a look at our Exposures instance!\n", - "print('\\n' + 'exp_templ.gdf looks like:')\n", + "print(\"\\n\" + \"exp_templ.gdf looks like:\")\n", "exp_templ.gdf.head()" ] }, @@ -1347,7 +1352,7 @@ "\n", "# We take an example with a dummy raster file (HAZ_DEMO_FL), running the method set_from_raster directly loads the\n", "# necessary info from the file into an Exposures instance.\n", - "exp_raster = Exposures.from_raster(HAZ_DEMO_FL, window= Window(10, 20, 50, 60))\n", + "exp_raster = Exposures.from_raster(HAZ_DEMO_FL, window=Window(10, 20, 50, 60))\n", "# There are several keyword argument options that come with the set_from_raster method (such as\n", "# specifying a window, if not the entire file should be read, or a bounding box. Check them out." ] @@ -1376,7 +1381,7 @@ "source": [ "# As always, run the check method, such that metadata can be assigned and checked for missing mandatory parameters.\n", "exp_raster.check()\n", - "print('Meta:', exp_raster.meta)" + "print(\"Meta:\", exp_raster.meta)" ] }, { @@ -1475,7 +1480,7 @@ ], "source": [ "# Let's have a look at the Exposures instance!\n", - "print('\\n' + 'exp_raster looks like:')\n", + "print(\"\\n\" + \"exp_raster looks like:\")\n", "exp_raster.gdf.head()" ] }, @@ -1567,7 +1572,7 @@ ], "source": [ "# Example 1: plot_hexbin method\n", - "print('Plotting exp_df.')\n", + "print(\"Plotting exp_df.\")\n", "axs = exp.plot_hexbin();\n", "\n", "# further methods to check out:\n", @@ -1606,7 +1611,7 @@ "source": [ "# Example 2: plot_scatter method\n", "\n", - "exp_gpd.to_crs('epsg:3035', inplace=True)\n", + "exp_gpd.to_crs(\"epsg:3035\", inplace=True)\n", "exp_gpd.plot_scatter(pop_name=False);" ] }, @@ -1637,9 +1642,19 @@ ], "source": [ "# Example 3: plot_raster method\n", - "from climada.util.plot import add_cntry_names # use climada's plotting utilities\n", - "ax = exp.plot_raster(); # plot with same resolution as data\n", - "add_cntry_names(ax, [exp.gdf['longitude'].min(), exp.gdf['longitude'].max(), exp.gdf['latitude'].min(), exp.gdf['latitude'].max()])\n", + "from climada.util.plot import add_cntry_names # use climada's plotting utilities\n", + "\n", + "ax = exp.plot_raster()\n", + "# plot with same resolution as data\n", + "add_cntry_names(\n", + " ax,\n", + " [\n", + " exp.gdf[\"longitude\"].min(),\n", + " exp.gdf[\"longitude\"].max(),\n", + " exp.gdf[\"latitude\"].min(),\n", + " exp.gdf[\"latitude\"].max(),\n", + " ],\n", + ")\n", "\n", "# use keyword argument save_tiff='filepath.tiff' to save the corresponding raster in tiff format\n", "# use keyword argument raster_res='desired number' to change resolution of the raster." @@ -1674,11 +1689,16 @@ "source": [ "# Example 4: plot_basemap method\n", "import contextily as ctx\n", + "\n", "# select the background image from the available ctx.providers\n", - "ax = exp_templ.plot_basemap(buffer=30000, cmap='brg'); # using Positron from CartoDB\n", - "ax = exp_templ.plot_basemap(buffer=30000, cmap='brg',\n", - " url=ctx.providers.OpenStreetMap.Mapnik, # Using OpenStreetmap,\n", - " zoom=9); # select the zoom level of the map, affects the font size of labelled objects" + "ax = exp_templ.plot_basemap(buffer=30000, cmap=\"brg\")\n", + "# using Positron from CartoDB\n", + "ax = exp_templ.plot_basemap(\n", + " buffer=30000,\n", + " cmap=\"brg\",\n", + " url=ctx.providers.OpenStreetMap.Mapnik, # Using OpenStreetmap,\n", + " zoom=9,\n", + "); # select the zoom level of the map, affects the font size of labelled objects" ] }, { @@ -1718,7 +1738,7 @@ ], "source": [ "# other visualization types\n", - "exp_templ.gdf.hist(column='value');" + "exp_templ.gdf.hist(column=\"value\");" ] }, { @@ -1737,12 +1757,15 @@ "metadata": {}, "outputs": [], "source": [ - "import fiona; fiona.supported_drivers\n", + "import fiona\n", + "\n", + "fiona.supported_drivers\n", "from climada import CONFIG\n", + "\n", "results = CONFIG.local_data.save_dir.dir()\n", "\n", "# DataFrame save to csv format. geometry writen as string, metadata not saved!\n", - "exp_templ.gdf.to_csv(results.joinpath('exp_templ.csv'), sep='\\t')" + "exp_templ.gdf.to_csv(results.joinpath(\"exp_templ.csv\"), sep=\"\\t\")" ] }, { @@ -1752,7 +1775,7 @@ "outputs": [], "source": [ "# write as hdf5 file\n", - "exp_templ.write_hdf5(results.joinpath('exp_temp.h5'))" + "exp_templ.write_hdf5(results.joinpath(\"exp_temp.h5\"))" ] }, { @@ -1771,8 +1794,9 @@ "source": [ "# save in pickle format\n", "from climada.util.save import save\n", + "\n", "# this generates a results folder in the current path and stores the output there\n", - "save('exp_templ.pkl.p', exp_templ) # creates results folder and stores there" + "save(\"exp_templ.pkl.p\", exp_templ) # creates results folder and stores there" ] }, { @@ -1814,7 +1838,7 @@ "source": [ "# set_geometry_points is expensive for big exposures\n", "# for small amount of data, the execution time might be even greater when using dask\n", - "exp.gdf.drop(columns=['geometry'], inplace=True)\n", + "exp.gdf.drop(columns=[\"geometry\"], inplace=True)\n", "print(exp.gdf.head())\n", "%time exp.set_geometry_points(scheduler='processes')\n", "print(exp.gdf.head())" diff --git a/doc/tutorial/climada_entity_Exposures_polygons_lines.ipynb b/doc/tutorial/climada_entity_Exposures_polygons_lines.ipynb index 22c5827f2..904d00f4d 100644 --- a/doc/tutorial/climada_entity_Exposures_polygons_lines.ipynb +++ b/doc/tutorial/climada_entity_Exposures_polygons_lines.ipynb @@ -58,11 +58,13 @@ "from climada.entity.impact_funcs.storm_europe import ImpfStormEurope\n", "from climada.entity import Exposures\n", "\n", - "HAZ = Client().get_hazard('storm_europe', name='test_haz_WS_nl', status='test_dataset');\n", + "HAZ = Client().get_hazard(\"storm_europe\", name=\"test_haz_WS_nl\", status=\"test_dataset\")\n", "\n", - "EXP_POLY = Client().get_exposures('base', name='test_polygon_exp', status='test_dataset');\n", - "EXP_LINE = Client().get_exposures('base', name='test_line_exp', status='test_dataset');\n", - "EXP_POINT = Client().get_exposures('base', name='test_point_exp', status='test_dataset');\n", + "EXP_POLY = Client().get_exposures(\n", + " \"base\", name=\"test_polygon_exp\", status=\"test_dataset\"\n", + ")\n", + "EXP_LINE = Client().get_exposures(\"base\", name=\"test_line_exp\", status=\"test_dataset\")\n", + "EXP_POINT = Client().get_exposures(\"base\", name=\"test_point_exp\", status=\"test_dataset\")\n", "\n", "EXP_MIX = Exposures.concat([EXP_POLY, EXP_LINE, EXP_POINT])\n", "\n", @@ -109,15 +111,20 @@ } ], "source": [ - "#disaggregate in the same CRS as the exposures are defined (here degrees), resolution 1degree\n", - "#divide values on points\n", - "#aggregate by summing\n", + "# disaggregate in the same CRS as the exposures are defined (here degrees), resolution 1degree\n", + "# divide values on points\n", + "# aggregate by summing\n", "\n", "impact = u_lp.calc_geom_impact(\n", - " exp=EXP_MIX, impf_set=IMPF_SET, haz=HAZ,\n", - " res=0.2, to_meters=False, disagg_met=u_lp.DisaggMethod.DIV, disagg_val=None,\n", - " agg_met=u_lp.AggMethod.SUM\n", - " )" + " exp=EXP_MIX,\n", + " impf_set=IMPF_SET,\n", + " haz=HAZ,\n", + " res=0.2,\n", + " to_meters=False,\n", + " disagg_met=u_lp.DisaggMethod.DIV,\n", + " disagg_val=None,\n", + " agg_met=u_lp.AggMethod.SUM,\n", + ")" ] }, { @@ -170,15 +177,20 @@ } ], "source": [ - "#disaggregate in meters\n", - "#same value for each point, fixed to 1 (allows to get percentages of affected surface/distance)\n", - "#aggregate by summing\n", + "# disaggregate in meters\n", + "# same value for each point, fixed to 1 (allows to get percentages of affected surface/distance)\n", + "# aggregate by summing\n", "\n", "impact = u_lp.calc_geom_impact(\n", - " exp=EXP_MIX, impf_set=IMPF_SET, haz=HAZ,\n", - " res=1000, to_meters=True, disagg_met=u_lp.DisaggMethod.FIX, disagg_val=1.0,\n", - " agg_met=u_lp.AggMethod.SUM\n", - " );" + " exp=EXP_MIX,\n", + " impf_set=IMPF_SET,\n", + " haz=HAZ,\n", + " res=1000,\n", + " to_meters=True,\n", + " disagg_met=u_lp.DisaggMethod.FIX,\n", + " disagg_val=1.0,\n", + " agg_met=u_lp.AggMethod.SUM,\n", + ");" ] }, { @@ -206,7 +218,10 @@ ], "source": [ "import matplotlib.pyplot as plt\n", - "ax = u_lp.plot_eai_exp_geom(impact, legend_kwds={'label': 'percentage', 'orientation': 'horizontal'})" + "\n", + "ax = u_lp.plot_eai_exp_geom(\n", + " impact, legend_kwds={\"label\": \"percentage\", \"orientation\": \"horizontal\"}\n", + ")" ] }, { @@ -282,36 +297,60 @@ " from climada_petals.entity.exposures.black_marble import country_iso_geom\n", "\n", " # open the file containing the Netherlands admin-1 polygons\n", - " shp_file = shapereader.natural_earth(resolution='10m',\n", - " category='cultural',\n", - " name='admin_0_countries')\n", + " shp_file = shapereader.natural_earth(\n", + " resolution=\"10m\", category=\"cultural\", name=\"admin_0_countries\"\n", + " )\n", " shp_file = shapereader.Reader(shp_file)\n", "\n", " # extract the NL polygons\n", - " prov_names = {'Netherlands': ['Groningen', 'Drenthe',\n", - " 'Overijssel', 'Gelderland',\n", - " 'Limburg', 'Zeeland',\n", - " 'Noord-Brabant', 'Zuid-Holland',\n", - " 'Noord-Holland', 'Friesland',\n", - " 'Flevoland', 'Utrecht']\n", - " }\n", - " polygon_Netherlands, polygons_prov_NL = country_iso_geom(prov_names,\n", - " shp_file)\n", - " prov_geom_NL = {prov: geom for prov, geom in zip(list(prov_names.values())[0], list(polygons_prov_NL.values())[0])}\n", + " prov_names = {\n", + " \"Netherlands\": [\n", + " \"Groningen\",\n", + " \"Drenthe\",\n", + " \"Overijssel\",\n", + " \"Gelderland\",\n", + " \"Limburg\",\n", + " \"Zeeland\",\n", + " \"Noord-Brabant\",\n", + " \"Zuid-Holland\",\n", + " \"Noord-Holland\",\n", + " \"Friesland\",\n", + " \"Flevoland\",\n", + " \"Utrecht\",\n", + " ]\n", + " }\n", + " polygon_Netherlands, polygons_prov_NL = country_iso_geom(prov_names, shp_file)\n", + " prov_geom_NL = {\n", + " prov: geom\n", + " for prov, geom in zip(\n", + " list(prov_names.values())[0], list(polygons_prov_NL.values())[0]\n", + " )\n", + " }\n", "\n", " # assign a value to each admin-1 area (assumption 100'000 USD per inhabitant)\n", - " population_prov_NL = {'Drenthe':493449, 'Flevoland':422202,\n", - " 'Friesland':649988, 'Gelderland':2084478,\n", - " 'Groningen':585881, 'Limburg':1118223,\n", - " 'Noord-Brabant':2562566, 'Noord-Holland':2877909,\n", - " 'Overijssel':1162215, 'Zuid-Holland':3705625,\n", - " 'Utrecht':1353596, 'Zeeland':383689}\n", - " value_prov_NL = {n: 100000 * population_prov_NL[n] for n in population_prov_NL.keys()}\n", + " population_prov_NL = {\n", + " \"Drenthe\": 493449,\n", + " \"Flevoland\": 422202,\n", + " \"Friesland\": 649988,\n", + " \"Gelderland\": 2084478,\n", + " \"Groningen\": 585881,\n", + " \"Limburg\": 1118223,\n", + " \"Noord-Brabant\": 2562566,\n", + " \"Noord-Holland\": 2877909,\n", + " \"Overijssel\": 1162215,\n", + " \"Zuid-Holland\": 3705625,\n", + " \"Utrecht\": 1353596,\n", + " \"Zeeland\": 383689,\n", + " }\n", + " value_prov_NL = {\n", + " n: 100000 * population_prov_NL[n] for n in population_prov_NL.keys()\n", + " }\n", "\n", " # combine into GeoDataFrame and add a coordinate reference system to it:\n", - " df1 = pd.DataFrame.from_dict(population_prov_NL, orient='index', columns=['population']).join(\n", - " pd.DataFrame.from_dict(value_prov_NL, orient='index', columns=['value']))\n", - " df1['geometry'] = [prov_geom_NL[prov] for prov in df1.index]\n", + " df1 = pd.DataFrame.from_dict(\n", + " population_prov_NL, orient=\"index\", columns=[\"population\"]\n", + " ).join(pd.DataFrame.from_dict(value_prov_NL, orient=\"index\", columns=[\"value\"]))\n", + " df1[\"geometry\"] = [prov_geom_NL[prov] for prov in df1.index]\n", " gdf_polys = gpd.GeoDataFrame(df1)\n", " gdf_polys = gdf_polys.set_crs(epsg=4326)\n", " return gdf_polys" @@ -417,7 +456,7 @@ ], "source": [ "exp_nl_poly = Exposures(gdf_poly())\n", - "exp_nl_poly.gdf['impf_WS'] = 1\n", + "exp_nl_poly.gdf[\"impf_WS\"] = 1\n", "exp_nl_poly.gdf.head()" ] }, @@ -456,7 +495,7 @@ ], "source": [ "# take a look\n", - "exp_nl_poly.gdf.plot('value', legend=True, cmap='OrRd')" + "exp_nl_poly.gdf.plot(\"value\", legend=True, cmap=\"OrRd\")" ] }, { @@ -557,9 +596,13 @@ ], "source": [ "imp_deg = u_lp.calc_geom_impact(\n", - " exp=exp_nl_poly, impf_set=impf_set, haz=storms,\n", - " res=0.005, disagg_met=u_lp.DisaggMethod.DIV, disagg_val=None,\n", - " agg_met=u_lp.AggMethod.SUM\n", + " exp=exp_nl_poly,\n", + " impf_set=impf_set,\n", + " haz=storms,\n", + " res=0.005,\n", + " disagg_met=u_lp.DisaggMethod.DIV,\n", + " disagg_val=None,\n", + " agg_met=u_lp.AggMethod.SUM,\n", ")" ] }, @@ -621,9 +664,14 @@ ], "source": [ "imp_m = u_lp.calc_geom_impact(\n", - " exp=exp_nl_poly, impf_set=impf_set, haz=storms,\n", - " res=500, to_meters=True, disagg_met=u_lp.DisaggMethod.DIV, disagg_val=None,\n", - " agg_met=u_lp.AggMethod.SUM\n", + " exp=exp_nl_poly,\n", + " impf_set=impf_set,\n", + " haz=storms,\n", + " res=500,\n", + " to_meters=True,\n", + " disagg_met=u_lp.DisaggMethod.DIV,\n", + " disagg_val=None,\n", + " agg_met=u_lp.AggMethod.SUM,\n", ")" ] }, @@ -711,15 +759,14 @@ }, "outputs": [], "source": [ - "#regular grid from exposures bounds\n", + "# regular grid from exposures bounds\n", "import climada.util.coordinates as u_coord\n", + "\n", "res = 0.1\n", "(_, _, xmax, ymax) = exp_nl_poly.gdf.geometry.bounds.max()\n", "(xmin, ymin, _, _) = exp_nl_poly.gdf.geometry.bounds.min()\n", "bounds = (xmin, ymin, xmax, ymax)\n", - "height, width, trafo = u_coord.pts_to_raster_meta(\n", - " bounds, (res, res)\n", - " )\n", + "height, width, trafo = u_coord.pts_to_raster_meta(bounds, (res, res))\n", "x_grid, y_grid = u_coord.raster_to_meshgrid(trafo, width, height)" ] }, @@ -747,9 +794,13 @@ ], "source": [ "imp_g = u_lp.calc_grid_impact(\n", - " exp=exp_nl_poly, impf_set=impf_set, haz=storms,\n", - " grid=(x_grid, y_grid), disagg_met=u_lp.DisaggMethod.DIV, disagg_val=None,\n", - " agg_met=u_lp.AggMethod.SUM\n", + " exp=exp_nl_poly,\n", + " impf_set=impf_set,\n", + " haz=storms,\n", + " grid=(x_grid, y_grid),\n", + " disagg_met=u_lp.DisaggMethod.DIV,\n", + " disagg_val=None,\n", + " agg_met=u_lp.AggMethod.SUM,\n", ")" ] }, @@ -931,8 +982,11 @@ "source": [ "# Disaggregate exposure to 10'000 metre grid, each point gets average value within polygon.\n", "exp_pnt = u_lp.exp_geom_to_pnt(\n", - " exp_nl_poly, res=10000, to_meters=True,\n", - " disagg_met=u_lp.DisaggMethod.DIV, disagg_val=None\n", + " exp_nl_poly,\n", + " res=10000,\n", + " to_meters=True,\n", + " disagg_met=u_lp.DisaggMethod.DIV,\n", + " disagg_val=None,\n", ")\n", "exp_pnt.gdf.head()" ] @@ -1073,8 +1127,12 @@ "source": [ "# Disaggregate exposure to 0.1° grid, no value disaggregation specified --> replicate initial value\n", "exp_pnt2 = u_lp.exp_geom_to_pnt(\n", - " exp_nl_poly, res=0.1, to_meters=False,\n", - " disagg_met=u_lp.DisaggMethod.FIX, disagg_val=None)\n", + " exp_nl_poly,\n", + " res=0.1,\n", + " to_meters=False,\n", + " disagg_met=u_lp.DisaggMethod.FIX,\n", + " disagg_val=None,\n", + ")\n", "exp_pnt2.gdf.head()" ] }, @@ -1214,8 +1272,12 @@ "# Disaggregate exposure to 1'000 metre grid, each point gets value corresponding to\n", "# its representative area (1'000^2).\n", "exp_pnt3 = u_lp.exp_geom_to_pnt(\n", - " exp_nl_poly, res=1000, to_meters=True,\n", - " disagg_met=u_lp.DisaggMethod.FIX, disagg_val=10e6)\n", + " exp_nl_poly,\n", + " res=1000,\n", + " to_meters=True,\n", + " disagg_met=u_lp.DisaggMethod.FIX,\n", + " disagg_val=10e6,\n", + ")\n", "exp_pnt3.gdf.head()" ] }, @@ -1355,8 +1417,12 @@ "# Disaggregate exposure to 1'000 metre grid, each point gets value corresponding to 1\n", "# After dissagregation, each point has a value equal to the percentage of area of the polygon\n", "exp_pnt4 = u_lp.exp_geom_to_pnt(\n", - " exp_nl_poly, res=1000, to_meters=True,\n", - " disagg_met=u_lp.DisaggMethod.DIV, disagg_val=1)\n", + " exp_nl_poly,\n", + " res=1000,\n", + " to_meters=True,\n", + " disagg_met=u_lp.DisaggMethod.DIV,\n", + " disagg_val=1,\n", + ")\n", "exp_pnt4.gdf.tail()" ] }, @@ -1494,19 +1560,18 @@ ], "source": [ "# disaggregate on pre-defined grid\n", - "#regular grid from exposures bounds\n", + "# regular grid from exposures bounds\n", "import climada.util.coordinates as u_coord\n", + "\n", "res = 0.1\n", "(_, _, xmax, ymax) = exp_nl_poly.gdf.geometry.bounds.max()\n", "(xmin, ymin, _, _) = exp_nl_poly.gdf.geometry.bounds.min()\n", "bounds = (xmin, ymin, xmax, ymax)\n", - "height, width, trafo = u_coord.pts_to_raster_meta(\n", - " bounds, (res, res)\n", - " )\n", + "height, width, trafo = u_coord.pts_to_raster_meta(bounds, (res, res))\n", "x_grid, y_grid = u_coord.raster_to_meshgrid(trafo, width, height)\n", "exp_pnt5 = u_lp.exp_geom_to_grid(\n", - " exp_nl_poly, grid=(x_grid, y_grid),\n", - " disagg_met=u_lp.DisaggMethod.DIV, disagg_val=1)\n", + " exp_nl_poly, grid=(x_grid, y_grid), disagg_met=u_lp.DisaggMethod.DIV, disagg_val=1\n", + ")\n", "exp_pnt5.gdf.tail()" ] }, @@ -1589,7 +1654,7 @@ ], "source": [ "# Plot point-impacts and aggregated impacts\n", - "imp_pnt.plot_hexbin_eai_exposure();\n", + "imp_pnt.plot_hexbin_eai_exposure()\n", "u_lp.plot_eai_exp_geom(imp_geom);" ] }, @@ -1727,7 +1792,7 @@ "outputs": [], "source": [ "def gdf_lines():\n", - " gdf_lines = gpd.read_file(Path(DEMO_DIR,'nl_rails.gpkg'))\n", + " gdf_lines = gpd.read_file(Path(DEMO_DIR, \"nl_rails.gpkg\"))\n", " gdf_lines = gdf_lines.to_crs(epsg=4326)\n", " return gdf_lines" ] @@ -1832,8 +1897,8 @@ ], "source": [ "exp_nl_lines = Exposures(gdf_lines())\n", - "exp_nl_lines.gdf['impf_WS'] = 1\n", - "exp_nl_lines.gdf['value'] = 1\n", + "exp_nl_lines.gdf[\"impf_WS\"] = 1\n", + "exp_nl_lines.gdf[\"value\"] = 1\n", "exp_nl_lines.gdf.head()" ] }, @@ -1861,7 +1926,7 @@ } ], "source": [ - "exp_nl_lines.gdf.plot('value', cmap='inferno');" + "exp_nl_lines.gdf.plot(\"value\", cmap=\"inferno\");" ] }, { @@ -1911,9 +1976,13 @@ ], "source": [ "imp_deg = u_lp.calc_geom_impact(\n", - " exp=exp_nl_lines, impf_set=impf_set, haz=storms,\n", - " res=0.005, disagg_met=u_lp.DisaggMethod.DIV, disagg_val=None,\n", - " agg_met=u_lp.AggMethod.SUM\n", + " exp=exp_nl_lines,\n", + " impf_set=impf_set,\n", + " haz=storms,\n", + " res=0.005,\n", + " disagg_met=u_lp.DisaggMethod.DIV,\n", + " disagg_val=None,\n", + " agg_met=u_lp.AggMethod.SUM,\n", ")" ] }, @@ -1975,9 +2044,14 @@ ], "source": [ "imp_m = u_lp.calc_geom_impact(\n", - " exp=exp_nl_lines, impf_set=impf_set, haz=storms,\n", - " res=500, to_meters=True, disagg_met=u_lp.DisaggMethod.DIV, disagg_val=None,\n", - " agg_met=u_lp.AggMethod.SUM\n", + " exp=exp_nl_lines,\n", + " impf_set=impf_set,\n", + " haz=storms,\n", + " res=500,\n", + " to_meters=True,\n", + " disagg_met=u_lp.DisaggMethod.DIV,\n", + " disagg_val=None,\n", + " agg_met=u_lp.AggMethod.SUM,\n", ")" ] }, @@ -2028,8 +2102,11 @@ ], "source": [ "import numpy as np\n", + "\n", "diff = np.max((imp_deg.eai_exp - imp_m.eai_exp) / imp_deg.eai_exp)\n", - "print(f\"The largest relative different between degrees and meters impact in this example is {diff}\")" + "print(\n", + " f\"The largest relative different between degrees and meters impact in this example is {diff}\"\n", + ")" ] }, { @@ -2184,7 +2261,11 @@ "source": [ "# 0.1° distance between points, average value disaggregation\n", "exp_pnt = u_lp.exp_geom_to_pnt(\n", - " exp_nl_lines, res=0.1, to_meters=False, disagg_met=u_lp.DisaggMethod.DIV, disagg_val=None\n", + " exp_nl_lines,\n", + " res=0.1,\n", + " to_meters=False,\n", + " disagg_met=u_lp.DisaggMethod.DIV,\n", + " disagg_val=None,\n", ")\n", "exp_pnt.gdf.head()" ] @@ -2317,7 +2398,11 @@ "source": [ "# 1000m distance between points, no value disaggregation\n", "exp_pnt2 = u_lp.exp_geom_to_pnt(\n", - " exp_nl_lines, res=1000, to_meters=True, disagg_met=u_lp.DisaggMethod.FIX, disagg_val=None\n", + " exp_nl_lines,\n", + " res=1000,\n", + " to_meters=True,\n", + " disagg_met=u_lp.DisaggMethod.FIX,\n", + " disagg_val=None,\n", ")\n", "exp_pnt2.gdf.head()" ] @@ -2450,7 +2535,11 @@ "source": [ "# 1000m distance between points, equal value disaggregation\n", "exp_pnt3 = u_lp.exp_geom_to_pnt(\n", - " exp_nl_lines, res=1000, to_meters=True, disagg_met=u_lp.DisaggMethod.DIV, disagg_val=None\n", + " exp_nl_lines,\n", + " res=1000,\n", + " to_meters=True,\n", + " disagg_met=u_lp.DisaggMethod.DIV,\n", + " disagg_val=None,\n", ")\n", "exp_pnt3.gdf.head()" ] @@ -2583,7 +2672,11 @@ "source": [ "# 1000m distance between points, disaggregation of value according to representative distance\n", "exp_pnt4 = u_lp.exp_geom_to_pnt(\n", - " exp_nl_lines, res=1000, to_meters=True, disagg_met=u_lp.DisaggMethod.FIX, disagg_val=1000\n", + " exp_nl_lines,\n", + " res=1000,\n", + " to_meters=True,\n", + " disagg_met=u_lp.DisaggMethod.FIX,\n", + " disagg_val=1000,\n", ")\n", "exp_pnt4.gdf.head()" ] diff --git a/doc/tutorial/climada_entity_ImpactFuncSet.ipynb b/doc/tutorial/climada_entity_ImpactFuncSet.ipynb index 2702aa60f..6df482925 100644 --- a/doc/tutorial/climada_entity_ImpactFuncSet.ipynb +++ b/doc/tutorial/climada_entity_ImpactFuncSet.ipynb @@ -113,7 +113,7 @@ ")\n", "\n", "# check if the all the attributes are set correctly\n", - "imp_fun.check()\n" + "imp_fun.check()" ] }, { @@ -131,7 +131,7 @@ ], "source": [ "# Calculate the mdr at hazard intensity 18.7 m/s\n", - "print('Mean damage ratio at intensity 18.7 m/s: ', imp_fun.calc_mdr(18.7))" + "print(\"Mean damage ratio at intensity 18.7 m/s: \", imp_fun.calc_mdr(18.7))" ] }, { @@ -282,7 +282,7 @@ "imp_fun_3.check()\n", "\n", "# add the 2 impact functions into ImpactFuncSet\n", - "imp_fun_set = ImpactFuncSet([imp_fun_1, imp_fun_3])\n" + "imp_fun_set = ImpactFuncSet([imp_fun_1, imp_fun_3])" ] }, { @@ -345,7 +345,7 @@ ], "source": [ "# extract the TC impact function with id 1\n", - "impf_tc_1 = imp_fun_set.get_func('TC', 1)\n", + "impf_tc_1 = imp_fun_set.get_func(\"TC\", 1)\n", "# plot the impact function\n", "impf_tc_1.plot();" ] @@ -404,7 +404,7 @@ ], "source": [ "# removing the TC impact function with id 3\n", - "imp_fun_set.remove_func('TC', 3)\n", + "imp_fun_set.remove_func(\"TC\", 3)\n", "# plot all the remaining impact functions in imp_fun_set\n", "imp_fun_set.plot();" ] @@ -464,7 +464,7 @@ "# plot all the impact functions from the ImpactFuncSet\n", "imp_set_xlsx.plot()\n", "# adjust the plots\n", - "plt.subplots_adjust(right=1., top=4., hspace=0.4, wspace=0.4)" + "plt.subplots_adjust(right=1.0, top=4.0, hspace=0.4, wspace=0.4)" ] }, { @@ -483,7 +483,7 @@ "outputs": [], "source": [ "# write imp_set_xlsx into an excel file\n", - "imp_set_xlsx.write_excel('tutorial_impf_set.xlsx')" + "imp_set_xlsx.write_excel(\"tutorial_impf_set.xlsx\")" ] }, { @@ -512,7 +512,7 @@ "from climada.util.save import save\n", "\n", "# this generates a results folder in the current path and stores the output there\n", - "save('tutorial_impf_set.p', imp_set_xlsx)" + "save(\"tutorial_impf_set.p\", imp_set_xlsx)" ] }, { @@ -563,7 +563,7 @@ "# plot all the impact functions\n", "imp_fun_set_TC.plot()\n", "# adjust the plots\n", - "plt.subplots_adjust(right=1., top=4., hspace=0.4, wspace=0.4)" + "plt.subplots_adjust(right=1.0, top=4.0, hspace=0.4, wspace=0.4)" ] } ], diff --git a/doc/tutorial/climada_entity_LitPop.ipynb b/doc/tutorial/climada_entity_LitPop.ipynb index 8625fe394..56c2d065a 100644 --- a/doc/tutorial/climada_entity_LitPop.ipynb +++ b/doc/tutorial/climada_entity_LitPop.ipynb @@ -155,15 +155,19 @@ "source": [ "# Initiate a default LitPop exposure entity for Switzerland and Liechtenstein (ISO3-Codes 'CHE' and 'LIE'):\n", "try:\n", - " exp = LitPop.from_countries(['CHE', 'Liechtenstein']) # you can provide either single countries or a list of countries\n", + " exp = LitPop.from_countries(\n", + " [\"CHE\", \"Liechtenstein\"]\n", + " ) # you can provide either single countries or a list of countries\n", "except FileExistsError as err:\n", - " print(\"Reason for error: The GPW population data has not been downloaded, c.f. section 'Input data' above.\")\n", + " print(\n", + " \"Reason for error: The GPW population data has not been downloaded, c.f. section 'Input data' above.\"\n", + " )\n", " raise err\n", - "exp.plot_scatter();\n", + "exp.plot_scatter()\n", "\n", "# Note that `exp.gdf['region_id']` is a number identifying each country:\n", - "print('\\n Region IDs (`region_id`) in this exposure:')\n", - "print(exp.gdf['region_id'].unique())" + "print(\"\\n Region IDs (`region_id`) in this exposure:\")\n", + "print(exp.gdf[\"region_id\"].unique())" ] }, { @@ -240,9 +244,12 @@ ], "source": [ "# Initiate a LitPop exposure entity for Costa Rica with varied resolution, fin_mode, and exponents:\n", - "exp = LitPop.from_countries('Costa Rica', fin_mode='income_group', res_arcsec=120, exponents=(1,1)) # change the parameters and see what happens...\n", + "exp = LitPop.from_countries(\n", + " \"Costa Rica\", fin_mode=\"income_group\", res_arcsec=120, exponents=(1, 1)\n", + ") # change the parameters and see what happens...\n", "# exp = LitPop.from_countries('Costa Rica', fin_mode='gdp', res_arcsec=90, exponents=(3,0)) # example of variation\n", - "exp.plot_raster(); # note the log scale of the colorbar\n", + "exp.plot_raster()\n", + "# note the log scale of the colorbar\n", "exp.plot_scatter();" ] }, @@ -312,12 +319,16 @@ "source": [ "# You may want to check if you have downloaded dataset Gridded Population of the World (GPW), v4: Population Count, v4.11\n", "# (2000 and 2020) first\n", - "pop_2000 = LitPop.from_countries('CHE', fin_mode='pop', res_arcsec=300, exponents=(0,1), reference_year=2000)\n", + "pop_2000 = LitPop.from_countries(\n", + " \"CHE\", fin_mode=\"pop\", res_arcsec=300, exponents=(0, 1), reference_year=2000\n", + ")\n", "# Alternatively, we ca use `from_population`:\n", - "pop_2021 = LitPop.from_population(countries='Switzerland', res_arcsec=300, reference_year=2021)\n", + "pop_2021 = LitPop.from_population(\n", + " countries=\"Switzerland\", res_arcsec=300, reference_year=2021\n", + ")\n", "# Since no population data for 2021 is available, the closest data point, 2020, is used (see LOGGER.warning)\n", - "pop_2000.plot_scatter();\n", - "pop_2021.plot_scatter();\n", + "pop_2000.plot_scatter()\n", + "pop_2021.plot_scatter()\n", "\"\"\"Note the difference in total values on the color bar.\"\"\"" ] }, @@ -398,16 +409,18 @@ } ], "source": [ - "res = 30 # If you don't get an output after a very long time with country = \"MEX\", try with res = 100\n", - "country = 'JAM' # Try different countries, i.e. 'JAM', 'CHE', 'RWA', 'MEX'\n", - "markersize = 4 # for plotting\n", - "buffer_deg=.04\n", - "\n", - "exp_nightlights = LitPop.from_nightlight_intensity(countries=country, res_arcsec=res) # nightlight intensity\n", - "exp_nightlights.plot_hexbin(linewidth=markersize, buffer=buffer_deg);\n", + "res = 30 # If you don't get an output after a very long time with country = \"MEX\", try with res = 100\n", + "country = \"JAM\" # Try different countries, i.e. 'JAM', 'CHE', 'RWA', 'MEX'\n", + "markersize = 4 # for plotting\n", + "buffer_deg = 0.04\n", + "\n", + "exp_nightlights = LitPop.from_nightlight_intensity(\n", + " countries=country, res_arcsec=res\n", + ") # nightlight intensity\n", + "exp_nightlights.plot_hexbin(linewidth=markersize, buffer=buffer_deg)\n", "# Compare to the population map:\n", "exp_population = LitPop().from_population(countries=country, res_arcsec=res)\n", - "exp_population.plot_hexbin(linewidth=markersize, buffer=buffer_deg);\n", + "exp_population.plot_hexbin(linewidth=markersize, buffer=buffer_deg)\n", "# Compare to default LitPop exposures:\n", "exp = LitPop.from_countries(countries=country, res_arcsec=res)\n", "exp.plot_hexbin(linewidth=markersize, buffer=buffer_deg);" @@ -495,29 +508,31 @@ "import climada.util.coordinates as u_coord\n", "import climada.entity.exposures.litpop as lp\n", "\n", - "country_iso3a = 'USA'\n", - "state_name = 'Florida'\n", + "country_iso3a = \"USA\"\n", + "state_name = \"Florida\"\n", "reslution_arcsec = 600\n", "\"\"\"First, we need to get the shape of Florida:\"\"\"\n", "admin1_info, admin1_shapes = u_coord.get_admin1_info(country_iso3a)\n", "admin1_info = admin1_info[country_iso3a]\n", "admin1_shapes = admin1_shapes[country_iso3a]\n", - "admin1_names = [record['name'] for record in admin1_info]\n", + "admin1_names = [record[\"name\"] for record in admin1_info]\n", "print(admin1_names)\n", "for idx, name in enumerate(admin1_names):\n", - " if admin1_names[idx]==state_name:\n", + " if admin1_names[idx] == state_name:\n", " break\n", - "print('Florida index: ' + str(idx))\n", + "print(\"Florida index: \" + str(idx))\n", "\n", "\"\"\"Secondly, we estimate the `total_value`\"\"\"\n", "# `total_value` required user input for `from_shape`, here we assume 5% of total value of the whole USA:\n", - "total_value = 0.05 * lp._get_total_value_per_country(country_iso3a, 'pc', 2020)\n", + "total_value = 0.05 * lp._get_total_value_per_country(country_iso3a, \"pc\", 2020)\n", "\n", "\"\"\"Then, we can initiate the exposures for Florida:\"\"\"\n", "start = time.process_time()\n", - "exp = LitPop.from_shape(admin1_shapes[idx], total_value, res_arcsec=600, reference_year=2020)\n", - "print(f'\\n Runtime `from_shape` : {time.process_time() - start:1.2f} sec.\\n')\n", - "exp.plot_scatter(vmin=100, buffer=.5);\n" + "exp = LitPop.from_shape(\n", + " admin1_shapes[idx], total_value, res_arcsec=600, reference_year=2020\n", + ")\n", + "print(f\"\\n Runtime `from_shape` : {time.process_time() - start:1.2f} sec.\\n\")\n", + "exp.plot_scatter(vmin=100, buffer=0.5);" ] }, { @@ -561,9 +576,13 @@ "# `from_shape_and_countries` does not require `total_value`, but is slower to compute than `from_shape`,\n", "# because first, the exposure for the whole USA is initiated:\n", "start = time.process_time()\n", - "exp = LitPop.from_shape_and_countries(admin1_shapes[idx], country_iso3a, res_arcsec=600, reference_year=2020)\n", - "print(f'\\n Runtime `from_shape_and_countries` : {time.process_time() - start:1.2f} sec.\\n')\n", - "exp.plot_scatter(vmin=100, buffer=.5);\n", + "exp = LitPop.from_shape_and_countries(\n", + " admin1_shapes[idx], country_iso3a, res_arcsec=600, reference_year=2020\n", + ")\n", + "print(\n", + " f\"\\n Runtime `from_shape_and_countries` : {time.process_time() - start:1.2f} sec.\\n\"\n", + ")\n", + "exp.plot_scatter(vmin=100, buffer=0.5)\n", "\"\"\"Note the differences in computational speed and total value between the two approaches\"\"\"" ] }, @@ -655,31 +674,36 @@ "from shapely.geometry import Polygon\n", "\n", "\"\"\"initiate LitPop exposures for a geographical box around the city of Zurich:\"\"\"\n", - "bounds = (8.41, 47.25, 8.70, 47.47) # (min_lon, max_lon, min_lat, max_lat)\n", - "total_value=1000 # required user input for `from_shape`, here we just assume USD 1000 of total value\n", - "shape = Polygon([\n", - " (bounds[0], bounds[3]),\n", - " (bounds[2], bounds[3]),\n", - " (bounds[2], bounds[1]),\n", - " (bounds[0], bounds[1])\n", - " ])\n", + "bounds = (8.41, 47.25, 8.70, 47.47) # (min_lon, max_lon, min_lat, max_lat)\n", + "total_value = 1000 # required user input for `from_shape`, here we just assume USD 1000 of total value\n", + "shape = Polygon(\n", + " [\n", + " (bounds[0], bounds[3]),\n", + " (bounds[2], bounds[3]),\n", + " (bounds[2], bounds[1]),\n", + " (bounds[0], bounds[1]),\n", + " ]\n", + ")\n", "import time\n", + "\n", "start = time.process_time()\n", "exp = LitPop.from_shape(shape, total_value)\n", - "print(f'\\n Runtime `from_shape` : {time.process_time() - start:1.2f} sec.\\n')\n", - "exp.plot_scatter();\n", + "print(f\"\\n Runtime `from_shape` : {time.process_time() - start:1.2f} sec.\\n\")\n", + "exp.plot_scatter()\n", "# `from_shape_and_countries` does not require `total_value`, but is slower to compute:\n", "start = time.process_time()\n", - "exp = LitPop.from_shape_and_countries(shape, 'Switzerland')\n", - "print(f'\\n Runtime `from_shape_and_countries` : {time.process_time() - start:1.2f} sec.\\n')\n", - "exp.plot_scatter();\n", + "exp = LitPop.from_shape_and_countries(shape, \"Switzerland\")\n", + "print(\n", + " f\"\\n Runtime `from_shape_and_countries` : {time.process_time() - start:1.2f} sec.\\n\"\n", + ")\n", + "exp.plot_scatter()\n", "\"\"\"Note the difference in total value between the two exposure sets!\"\"\"\n", "\n", "\"\"\"For comparison, initiate population exposure for a geographical box around the city of Zurich:\"\"\"\n", "start = time.process_time()\n", "exp_pop = LitPop.from_population(shape=shape)\n", - "print(f'\\n Runtime `from_population` : {time.process_time() - start:1.2f} sec.\\n')\n", - "exp_pop.plot_scatter();\n", + "print(f\"\\n Runtime `from_population` : {time.process_time() - start:1.2f} sec.\\n\")\n", + "exp_pop.plot_scatter()\n", "\n", "\"\"\"Population exposure for a custom shape can be initiated directly via `set_population` without providing `total_value`\"\"\"" ] @@ -727,14 +751,18 @@ "source": [ "# Initiate GDP-Entity for Switzerland, with and without admin1_calc:\n", "\n", - "ent_adm0 = LitPop.from_countries('CHE', res_arcsec=120, fin_mode='gdp', admin1_calc=False)\n", + "ent_adm0 = LitPop.from_countries(\n", + " \"CHE\", res_arcsec=120, fin_mode=\"gdp\", admin1_calc=False\n", + ")\n", "ent_adm0.set_geometry_points()\n", "\n", - "ent_adm1 = LitPop.from_countries('CHE', res_arcsec=120, fin_mode='gdp', admin1_calc=True)\n", + "ent_adm1 = LitPop.from_countries(\n", + " \"CHE\", res_arcsec=120, fin_mode=\"gdp\", admin1_calc=True\n", + ")\n", "\n", "ent_adm0.check()\n", "ent_adm1.check()\n", - "print('Done.')" + "print(\"Done.\")" ] }, { @@ -788,14 +816,15 @@ "source": [ "# Plotting:\n", "from matplotlib import colors\n", - "norm=colors.LogNorm(vmin=1e5, vmax=1e9) # setting range for the log-normal scale\n", + "\n", + "norm = colors.LogNorm(vmin=1e5, vmax=1e9) # setting range for the log-normal scale\n", "markersize = 5\n", - "ent_adm0.plot_hexbin(buffer=.3, norm=norm, linewidth=markersize);\n", - "ent_adm1.plot_hexbin(buffer=.3, norm=norm, linewidth=markersize);\n", + "ent_adm0.plot_hexbin(buffer=0.3, norm=norm, linewidth=markersize)\n", + "ent_adm1.plot_hexbin(buffer=0.3, norm=norm, linewidth=markersize)\n", "\n", - "print('admin-0: First figure')\n", - "print('admin-1: Second figure')\n", - "'''Do you spot the small differences in Graubünden (eastern Switzerland)?'''" + "print(\"admin-0: First figure\")\n", + "print(\"admin-1: Second figure\")\n", + "\"\"\"Do you spot the small differences in Graubünden (eastern Switzerland)?\"\"\"" ] } ], diff --git a/doc/tutorial/climada_entity_MeasureSet.ipynb b/doc/tutorial/climada_entity_MeasureSet.ipynb index e1b93a103..812198362 100644 --- a/doc/tutorial/climada_entity_MeasureSet.ipynb +++ b/doc/tutorial/climada_entity_MeasureSet.ipynb @@ -127,28 +127,28 @@ "\n", "# define measure\n", "meas = Measure(\n", - " name='Mangrove',\n", - " haz_type='TC',\n", + " name=\"Mangrove\",\n", + " haz_type=\"TC\",\n", " color_rgb=np.array([1, 1, 1]),\n", " cost=500000000,\n", " mdd_impact=(1, 0),\n", " paa_impact=(1, -0.15),\n", - " hazard_inten_imp=(1, -10), # reduces intensity by 10\n", + " hazard_inten_imp=(1, -10), # reduces intensity by 10\n", ")\n", "\n", "# impact functions\n", "impf_tc = ImpfTropCyclone.from_emanuel_usa()\n", "impf_all = ImpactFuncSet([impf_tc])\n", - "impf_all.plot();\n", + "impf_all.plot()\n", "\n", "# dummy Hazard and Exposures\n", - "haz = Hazard('TC') # this measure does not change hazard\n", - "exp = Exposures() # this measure does not change exposures\n", + "haz = Hazard(\"TC\") # this measure does not change hazard\n", + "exp = Exposures() # this measure does not change exposures\n", "\n", "# new impact functions\n", "new_exp, new_impfs, new_haz = meas.apply(exp, impf_all, haz)\n", - "axes = new_impfs.plot();\n", - "axes.set_title('TC: Modified impact function')" + "axes = new_impfs.plot()\n", + "axes.set_title(\"TC: Modified impact function\")" ] }, { @@ -228,8 +228,8 @@ "\n", "# define measure\n", "meas = Measure(\n", - " name='Mangrove',\n", - " haz_type='TC',\n", + " name=\"Mangrove\",\n", + " haz_type=\"TC\",\n", " color_rgb=np.array([1, 1, 1]),\n", " cost=500000000,\n", " hazard_freq_cutoff=0.0255,\n", @@ -250,14 +250,16 @@ "# new hazard\n", "new_exp, new_impfs, new_haz = meas.apply(exp, impf_all, haz)\n", "# if you look at the maximum intensity per centroid: new_haz does not contain the event with smaller impact (the most frequent)\n", - "haz.plot_intensity(0);\n", - "new_haz.plot_intensity(0);\n", + "haz.plot_intensity(0)\n", + "new_haz.plot_intensity(0)\n", "# you might also compute the exceedance frequency curve of both hazard\n", "imp = ImpactCalc(exp, impf_all, haz).impact()\n", - "ax = imp.calc_freq_curve().plot(label='original');\n", + "ax = imp.calc_freq_curve().plot(label=\"original\")\n", "\n", "new_imp = ImpactCalc(new_exp, new_impfs, new_haz).impact()\n", - "new_imp.calc_freq_curve().plot(axis=ax, label='measure'); # the damages for events with return periods > 1/0.0255 ~ 40 are 0" + "new_imp.calc_freq_curve().plot(\n", + " axis=ax, label=\"measure\"\n", + "); # the damages for events with return periods > 1/0.0255 ~ 40 are 0" ] }, { @@ -361,12 +363,12 @@ "\n", "# define measure\n", "meas = Measure(\n", - " name='Building code',\n", - " haz_type='TC',\n", + " name=\"Building code\",\n", + " haz_type=\"TC\",\n", " color_rgb=np.array([1, 1, 1]),\n", " cost=500000000,\n", " hazard_freq_cutoff=0.00455,\n", - " exp_region_id=[1], # apply measure to points close to exposures with region_id=1\n", + " exp_region_id=[1], # apply measure to points close to exposures with region_id=1\n", ")\n", "\n", "# impact functions\n", @@ -379,7 +381,7 @@ "\n", "# Exposures\n", "exp = Exposures.from_hdf5(EXP_DEMO_H5)\n", - "#exp['region_id'] = np.ones(exp.shape[0])\n", + "# exp['region_id'] = np.ones(exp.shape[0])\n", "exp.check()\n", "# all exposures have region_id=1\n", "exp.plot_hexbin(buffer=1.0)\n", @@ -449,8 +451,8 @@ "\n", "# define measure\n", "meas = Measure(\n", - " name='Insurance',\n", - " haz_type='TC',\n", + " name=\"Insurance\",\n", + " haz_type=\"TC\",\n", " color_rgb=np.array([1, 1, 1]),\n", " cost=500000000,\n", " risk_transf_attach=5.0e8,\n", @@ -471,12 +473,12 @@ "\n", "# impact before\n", "imp = ImpactCalc(exp, impf_all, haz).impact()\n", - "ax = imp.calc_freq_curve().plot(label='original');\n", + "ax = imp.calc_freq_curve().plot(label=\"original\")\n", "\n", "# impact after. risk_transf will be added to the cost of the measure\n", "imp_new, risk_transf = meas.calc_impact(exp, impf_all, haz)\n", - "imp_new.calc_freq_curve().plot(axis=ax, label='measure');\n", - "print('risk_transfer {:.3}'.format(risk_transf.aai_agg))" + "imp_new.calc_freq_curve().plot(axis=ax, label=\"measure\")\n", + "print(\"risk_transfer {:.3}\".format(risk_transf.aai_agg))" ] }, { @@ -515,8 +517,8 @@ "from climada.entity.measures import Measure, MeasureSet\n", "\n", "meas_1 = Measure(\n", - " haz_type='TC',\n", - " name='Mangrove',\n", + " haz_type=\"TC\",\n", + " name=\"Mangrove\",\n", " color_rgb=np.array([1, 1, 1]),\n", " cost=500000000,\n", " mdd_impact=(1, 2),\n", @@ -526,8 +528,8 @@ ")\n", "\n", "meas_2 = Measure(\n", - " haz_type='TC',\n", - " name='Sandbags',\n", + " haz_type=\"TC\",\n", + " name=\"Sandbags\",\n", " color_rgb=np.array([1, 1, 1]),\n", " cost=22000000,\n", " mdd_impact=(1, 2),\n", @@ -543,7 +545,7 @@ "meas_set.check()\n", "\n", "# select one measure\n", - "meas_sel = meas_set.get_measure(name='Sandbags')\n", + "meas_sel = meas_set.get_measure(name=\"Sandbags\")\n", "print(meas_sel[0].name, meas_sel[0].cost)" ] }, @@ -582,7 +584,7 @@ "from climada.util import ENT_TEMPLATE_XLS\n", "\n", "# Fill DataFrame from Excel file\n", - "file_name = ENT_TEMPLATE_XLS # provide absolute path of the excel file\n", + "file_name = ENT_TEMPLATE_XLS # provide absolute path of the excel file\n", "meas_set = MeasureSet.from_excel(file_name)\n", "meas_set" ] @@ -611,11 +613,11 @@ "from climada.util import ENT_TEMPLATE_XLS\n", "\n", "# Fill DataFrame from Excel file\n", - "file_name = ENT_TEMPLATE_XLS # provide absolute path of the excel file\n", + "file_name = ENT_TEMPLATE_XLS # provide absolute path of the excel file\n", "meas_set = MeasureSet.from_excel(file_name)\n", "\n", "# write file\n", - "meas_set.write_excel('results/tutorial_meas_set.xlsx')" + "meas_set.write_excel(\"results/tutorial_meas_set.xlsx\")" ] }, { @@ -638,8 +640,9 @@ "outputs": [], "source": [ "from climada.util.save import save\n", + "\n", "# this generates a results folder in the current path and stores the output there\n", - "save('tutorial_meas_set.p', meas_set)" + "save(\"tutorial_meas_set.p\", meas_set)" ] } ], diff --git a/doc/tutorial/climada_hazard_Hazard.ipynb b/doc/tutorial/climada_hazard_Hazard.ipynb index 94dd517dc..19cc98a0f 100644 --- a/doc/tutorial/climada_hazard_Hazard.ipynb +++ b/doc/tutorial/climada_hazard_Hazard.ipynb @@ -95,27 +95,33 @@ "import numpy as np\n", "from climada.hazard import Hazard\n", "from climada.util.constants import HAZ_DEMO_FL\n", + "\n", "# to hide the warnings\n", "import warnings\n", - "warnings.filterwarnings('ignore')\n", "\n", - "# read intensity from raster file HAZ_DEMO_FL and set frequency for the contained event\n", - "haz_ven = Hazard.from_raster([HAZ_DEMO_FL], attrs={'frequency':np.ones(1)/2}, haz_type='FL')\n", + "warnings.filterwarnings(\"ignore\")\n", + "\n", + "# read intensity from raster file HAZ_DEMO_FL and set frequency for the contained event\n", + "haz_ven = Hazard.from_raster(\n", + " [HAZ_DEMO_FL], attrs={\"frequency\": np.ones(1) / 2}, haz_type=\"FL\"\n", + ")\n", "haz_ven.check()\n", "\n", "# The masked values of the raster are set to 0\n", "# Sometimes the raster file does not contain all the information, as in this case the mask value -9999\n", "# We mask it manuall and plot it using plot_intensity()\n", - "haz_ven.intensity[haz_ven.intensity==-9999] = 0\n", - "haz_ven.plot_intensity(1, smooth=False) # if smooth=True (default value) is used, the computation time might increase\n", + "haz_ven.intensity[haz_ven.intensity == -9999] = 0\n", + "haz_ven.plot_intensity(\n", + " 1, smooth=False\n", + ") # if smooth=True (default value) is used, the computation time might increase\n", "\n", "# per default the following attributes have been set\n", - "print('event_id: ', haz_ven.event_id)\n", - "print('event_name: ', haz_ven.event_name)\n", - "print('date: ', haz_ven.date)\n", - "print('frequency: ', haz_ven.frequency)\n", - "print('orig: ', haz_ven.orig)\n", - "print('min, max fraction: ', haz_ven.fraction.min(), haz_ven.fraction.max())" + "print(\"event_id: \", haz_ven.event_id)\n", + "print(\"event_name: \", haz_ven.event_name)\n", + "print(\"date: \", haz_ven.date)\n", + "print(\"frequency: \", haz_ven.frequency)\n", + "print(\"orig: \", haz_ven.orig)\n", + "print(\"min, max fraction: \", haz_ven.fraction.min(), haz_ven.fraction.max())" ] }, { @@ -135,10 +141,7 @@ "metadata": {}, "outputs": [], "source": [ - "# Put your code here\n", - "\n", - "\n", - "\n" + "# Put your code here" ] }, { @@ -212,30 +215,42 @@ "# Solution:\n", "\n", "# 1. The CRS can be reprojected using dst_crs option\n", - "haz = Hazard.from_raster([HAZ_DEMO_FL], dst_crs='epsg:2201', haz_type='FL')\n", + "haz = Hazard.from_raster([HAZ_DEMO_FL], dst_crs=\"epsg:2201\", haz_type=\"FL\")\n", "haz.check()\n", - "print('\\n Solution 1:')\n", - "print('centroids CRS:', haz.centroids.crs)\n", - "print('raster info:', haz.centroids.get_meta())\n", + "print(\"\\n Solution 1:\")\n", + "print(\"centroids CRS:\", haz.centroids.crs)\n", + "print(\"raster info:\", haz.centroids.get_meta())\n", "\n", "# 2. Transformations of the coordinates can be set using the transform option and Affine\n", "from rasterio import Affine\n", - "haz = Hazard.from_raster([HAZ_DEMO_FL], haz_type='FL',\n", - " transform=Affine(0.009000000000000341, 0.0, -69.33714959699981, \\\n", - " 0.0, -0.009000000000000341, 10.42822096697894),\n", - " height=500, width=501)\n", + "\n", + "haz = Hazard.from_raster(\n", + " [HAZ_DEMO_FL],\n", + " haz_type=\"FL\",\n", + " transform=Affine(\n", + " 0.009000000000000341,\n", + " 0.0,\n", + " -69.33714959699981,\n", + " 0.0,\n", + " -0.009000000000000341,\n", + " 10.42822096697894,\n", + " ),\n", + " height=500,\n", + " width=501,\n", + ")\n", "haz.check()\n", - "print('\\n Solution 2:')\n", - "print('raster info:', haz.centroids.get_meta())\n", - "print('intensity size:', haz.intensity.shape)\n", + "print(\"\\n Solution 2:\")\n", + "print(\"raster info:\", haz.centroids.get_meta())\n", + "print(\"intensity size:\", haz.intensity.shape)\n", "\n", "# 3. A partial part of the raster can be loaded using the window or geometry\n", "from rasterio.windows import Window\n", - "haz = Hazard.from_raster([HAZ_DEMO_FL], haz_type='FL', window=Window(10, 10, 20, 30))\n", + "\n", + "haz = Hazard.from_raster([HAZ_DEMO_FL], haz_type=\"FL\", window=Window(10, 10, 20, 30))\n", "haz.check()\n", - "print('\\n Solution 3:')\n", - "print('raster info:', haz.centroids.get_meta())\n", - "print('intensity size:', haz.intensity.shape)" + "print(\"\\n Solution 3:\")\n", + "print(\"raster info:\", haz.centroids.get_meta())\n", + "print(\"intensity size:\", haz.intensity.shape)" ] }, { @@ -266,10 +281,13 @@ ], "source": [ "from climada.hazard import Hazard, Centroids\n", - "from climada.util import HAZ_DEMO_H5 # CLIMADA's Python file\n", + "from climada.util import HAZ_DEMO_H5 # CLIMADA's Python file\n", + "\n", "# Hazard needs to know the acronym of the hazard type to be constructed!!! Use 'NA' if not known.\n", - "haz_tc_fl = Hazard.from_hdf5(HAZ_DEMO_H5) # Historic tropical cyclones in Florida from 1990 to 2004\n", - "haz_tc_fl.check() # Use always the check() method to see if the hazard has been loaded correctly" + "haz_tc_fl = Hazard.from_hdf5(\n", + " HAZ_DEMO_H5\n", + ") # Historic tropical cyclones in Florida from 1990 to 2004\n", + "haz_tc_fl.check() # Use always the check() method to see if the hazard has been loaded correctly" ] }, { @@ -298,50 +316,152 @@ } ], "source": [ - "# setting points\n", + "# setting points\n", "import numpy as np\n", "from scipy import sparse\n", "\n", - "lat = np.array([26.933899, 26.957203, 26.783846, 26.645524, 26.897796, 26.925359, \\\n", - " 26.914768, 26.853491, 26.845099, 26.82651 , 26.842772, 26.825905, \\\n", - " 26.80465 , 26.788649, 26.704277, 26.71005 , 26.755412, 26.678449, \\\n", - " 26.725649, 26.720599, 26.71255 , 26.6649 , 26.664699, 26.663149, \\\n", - " 26.66875 , 26.638517, 26.59309 , 26.617449, 26.620079, 26.596795, \\\n", - " 26.577049, 26.524585, 26.524158, 26.523737, 26.520284, 26.547349, \\\n", - " 26.463399, 26.45905 , 26.45558 , 26.453699, 26.449999, 26.397299, \\\n", - " 26.4084 , 26.40875 , 26.379113, 26.3809 , 26.349068, 26.346349, \\\n", - " 26.348015, 26.347957])\n", - "\n", - "lon = np.array([-80.128799, -80.098284, -80.748947, -80.550704, -80.596929, \\\n", - " -80.220966, -80.07466 , -80.190281, -80.083904, -80.213493, \\\n", - " -80.0591 , -80.630096, -80.075301, -80.069885, -80.656841, \\\n", - " -80.190085, -80.08955 , -80.041179, -80.1324 , -80.091746, \\\n", - " -80.068579, -80.090698, -80.1254 , -80.151401, -80.058749, \\\n", - " -80.283371, -80.206901, -80.090649, -80.055001, -80.128711, \\\n", - " -80.076435, -80.080105, -80.06398 , -80.178973, -80.110519, \\\n", - " -80.057701, -80.064251, -80.07875 , -80.139247, -80.104316, \\\n", - " -80.188545, -80.21902 , -80.092391, -80.1575 , -80.102028, \\\n", - " -80.16885 , -80.116401, -80.08385 , -80.241305, -80.158855])\n", - "\n", - "n_cen = lon.size # number of centroids\n", - "n_ev = 10 # number of events\n", + "lat = np.array(\n", + " [\n", + " 26.933899,\n", + " 26.957203,\n", + " 26.783846,\n", + " 26.645524,\n", + " 26.897796,\n", + " 26.925359,\n", + " 26.914768,\n", + " 26.853491,\n", + " 26.845099,\n", + " 26.82651,\n", + " 26.842772,\n", + " 26.825905,\n", + " 26.80465,\n", + " 26.788649,\n", + " 26.704277,\n", + " 26.71005,\n", + " 26.755412,\n", + " 26.678449,\n", + " 26.725649,\n", + " 26.720599,\n", + " 26.71255,\n", + " 26.6649,\n", + " 26.664699,\n", + " 26.663149,\n", + " 26.66875,\n", + " 26.638517,\n", + " 26.59309,\n", + " 26.617449,\n", + " 26.620079,\n", + " 26.596795,\n", + " 26.577049,\n", + " 26.524585,\n", + " 26.524158,\n", + " 26.523737,\n", + " 26.520284,\n", + " 26.547349,\n", + " 26.463399,\n", + " 26.45905,\n", + " 26.45558,\n", + " 26.453699,\n", + " 26.449999,\n", + " 26.397299,\n", + " 26.4084,\n", + " 26.40875,\n", + " 26.379113,\n", + " 26.3809,\n", + " 26.349068,\n", + " 26.346349,\n", + " 26.348015,\n", + " 26.347957,\n", + " ]\n", + ")\n", + "\n", + "lon = np.array(\n", + " [\n", + " -80.128799,\n", + " -80.098284,\n", + " -80.748947,\n", + " -80.550704,\n", + " -80.596929,\n", + " -80.220966,\n", + " -80.07466,\n", + " -80.190281,\n", + " -80.083904,\n", + " -80.213493,\n", + " -80.0591,\n", + " -80.630096,\n", + " -80.075301,\n", + " -80.069885,\n", + " -80.656841,\n", + " -80.190085,\n", + " -80.08955,\n", + " -80.041179,\n", + " -80.1324,\n", + " -80.091746,\n", + " -80.068579,\n", + " -80.090698,\n", + " -80.1254,\n", + " -80.151401,\n", + " -80.058749,\n", + " -80.283371,\n", + " -80.206901,\n", + " -80.090649,\n", + " -80.055001,\n", + " -80.128711,\n", + " -80.076435,\n", + " -80.080105,\n", + " -80.06398,\n", + " -80.178973,\n", + " -80.110519,\n", + " -80.057701,\n", + " -80.064251,\n", + " -80.07875,\n", + " -80.139247,\n", + " -80.104316,\n", + " -80.188545,\n", + " -80.21902,\n", + " -80.092391,\n", + " -80.1575,\n", + " -80.102028,\n", + " -80.16885,\n", + " -80.116401,\n", + " -80.08385,\n", + " -80.241305,\n", + " -80.158855,\n", + " ]\n", + ")\n", + "\n", + "n_cen = lon.size # number of centroids\n", + "n_ev = 10 # number of events\n", "\n", "intensity = sparse.csr_matrix(np.random.random((n_ev, n_cen)))\n", "fraction = intensity.copy()\n", "fraction.data.fill(1)\n", "\n", - "haz = Hazard(haz_type='TC',\n", - " intensity=intensity,\n", - " fraction=fraction,\n", - " centroids=Centroids(lat=lat, lon=lon), # default crs used\n", - " units='m',\n", - " event_id=np.arange(n_ev, dtype=int),\n", - " event_name=['ev_12', 'ev_21', 'Maria', 'ev_35',\n", - " 'Irma', 'ev_16', 'ev_15', 'Edgar', 'ev_1', 'ev_9'],\n", - " date=np.array([721166, 734447, 734447, 734447, 721167,\n", - " 721166, 721167, 721200, 721166, 721166]),\n", - " orig=np.zeros(n_ev, bool),\n", - " frequency=np.ones(n_ev)/n_ev,)\n", + "haz = Hazard(\n", + " haz_type=\"TC\",\n", + " intensity=intensity,\n", + " fraction=fraction,\n", + " centroids=Centroids(lat=lat, lon=lon), # default crs used\n", + " units=\"m\",\n", + " event_id=np.arange(n_ev, dtype=int),\n", + " event_name=[\n", + " \"ev_12\",\n", + " \"ev_21\",\n", + " \"Maria\",\n", + " \"ev_35\",\n", + " \"Irma\",\n", + " \"ev_16\",\n", + " \"ev_15\",\n", + " \"Edgar\",\n", + " \"ev_1\",\n", + " \"ev_9\",\n", + " ],\n", + " date=np.array(\n", + " [721166, 734447, 734447, 734447, 721167, 721166, 721167, 721200, 721166, 721166]\n", + " ),\n", + " orig=np.zeros(n_ev, bool),\n", + " frequency=np.ones(n_ev) / n_ev,\n", + ")\n", "\n", "haz.check()\n", "haz.centroids.plot();" @@ -363,10 +483,17 @@ "# using from_pnt_bounds\n", "\n", "# bounds\n", - "left, bottom, right, top = -72, -3.0, -52.0, 22 # the bounds refer to the bounds of the center of the pixel\n", + "left, bottom, right, top = (\n", + " -72,\n", + " -3.0,\n", + " -52.0,\n", + " 22,\n", + ") # the bounds refer to the bounds of the center of the pixel\n", "# resolution\n", "res = 0.5\n", - "centroids = Centroids.from_pnt_bounds((left, bottom, right, top), res) # default crs used" + "centroids = Centroids.from_pnt_bounds(\n", + " (left, bottom, right, top), res\n", + ") # default crs used" ] }, { @@ -393,26 +520,24 @@ "\n", "# raster info:\n", "# border upper left corner (of the pixel, not of the center of the pixel)\n", - "max_lat = top + res/2\n", - "min_lon = left - res/2\n", + "max_lat = top + res / 2\n", + "min_lon = left - res / 2\n", "# resolution in lat and lon\n", - "d_lat = -res # negative because starting in upper corner\n", - "d_lon = res # same step as d_lat\n", + "d_lat = -res # negative because starting in upper corner\n", + "d_lon = res # same step as d_lat\n", "# number of points\n", "n_lat, n_lon = centroids.shape\n", "\n", "# meta: raster specification\n", "meta = {\n", - " 'dtype': 'float32',\n", - " 'width': n_lon,\n", - " 'height': n_lat,\n", - " 'crs': DEF_CRS,\n", - " 'transform': rasterio.Affine(\n", - " a=d_lon, b=0.0, c=min_lon,\n", - " d=0.0, e=d_lat, f=max_lat),\n", + " \"dtype\": \"float32\",\n", + " \"width\": n_lon,\n", + " \"height\": n_lat,\n", + " \"crs\": DEF_CRS,\n", + " \"transform\": rasterio.Affine(a=d_lon, b=0.0, c=min_lon, d=0.0, e=d_lat, f=max_lat),\n", "}\n", "\n", - "centroids_from_meta = Centroids.from_meta(meta) # default crs used\n", + "centroids_from_meta = Centroids.from_meta(meta) # default crs used\n", "\n", "centroids_from_meta == centroids" ] @@ -446,27 +571,40 @@ "import numpy as np\n", "from scipy import sparse\n", "\n", - "n_ev = 10 # number of events\n", + "n_ev = 10 # number of events\n", "\n", "intensity = sparse.csr_matrix(np.random.random((n_ev, centroids.size)))\n", "fraction = intensity.copy()\n", "fraction.data.fill(1)\n", "\n", - "haz = Hazard('TC',\n", - " centroids=centroids,\n", - " intensity=intensity,\n", - " fraction=fraction,\n", - " units='m',\n", - " event_id=np.arange(n_ev, dtype=int),\n", - " event_name=['ev_12', 'ev_21', 'Maria', 'ev_35',\n", - " 'Irma', 'ev_16', 'ev_15', 'Edgar', 'ev_1', 'ev_9'],\n", - " date=np.array([721166, 734447, 734447, 734447, 721167,\n", - " 721166, 721167, 721200, 721166, 721166]),\n", - " orig=np.zeros(n_ev, bool),\n", - " frequency=np.ones(n_ev)/n_ev,)\n", + "haz = Hazard(\n", + " \"TC\",\n", + " centroids=centroids,\n", + " intensity=intensity,\n", + " fraction=fraction,\n", + " units=\"m\",\n", + " event_id=np.arange(n_ev, dtype=int),\n", + " event_name=[\n", + " \"ev_12\",\n", + " \"ev_21\",\n", + " \"Maria\",\n", + " \"ev_35\",\n", + " \"Irma\",\n", + " \"ev_16\",\n", + " \"ev_15\",\n", + " \"Edgar\",\n", + " \"ev_1\",\n", + " \"ev_9\",\n", + " ],\n", + " date=np.array(\n", + " [721166, 734447, 734447, 734447, 721167, 721166, 721167, 721200, 721166, 721166]\n", + " ),\n", + " orig=np.zeros(n_ev, bool),\n", + " frequency=np.ones(n_ev) / n_ev,\n", + ")\n", "\n", "haz.check()\n", - "print('Check centroids borders:', haz.centroids.total_bounds)\n", + "print(\"Check centroids borders:\", haz.centroids.total_bounds)\n", "haz.centroids.plot();" ] }, @@ -512,8 +650,7 @@ "metadata": {}, "outputs": [], "source": [ - "# Put your code here:\n", - "\n" + "# Put your code here:" ] }, { @@ -522,7 +659,7 @@ "metadata": {}, "outputs": [], "source": [ - "#help(hist_tc.centroids) # If you want to run it, do it after you execute the next block" + "# help(hist_tc.centroids) # If you want to run it, do it after you execute the next block" ] }, { @@ -548,26 +685,26 @@ "# SOLUTION:\n", "\n", "# 1.How many synthetic events are contained?\n", - "print('Number of total events:', haz_tc_fl.size)\n", - "print('Number of synthetic events:', np.logical_not(haz_tc_fl.orig).astype(int).sum())\n", + "print(\"Number of total events:\", haz_tc_fl.size)\n", + "print(\"Number of synthetic events:\", np.logical_not(haz_tc_fl.orig).astype(int).sum())\n", "\n", "# 2. Generate a hazard with historical hurricanes ocurring between 1995 and 2001.\n", - "hist_tc = haz_tc_fl.select(date=('1995-01-01', '2001-12-31'), orig=True)\n", - "print('Number of historical events between 1995 and 2001:', hist_tc.size)\n", + "hist_tc = haz_tc_fl.select(date=(\"1995-01-01\", \"2001-12-31\"), orig=True)\n", + "print(\"Number of historical events between 1995 and 2001:\", hist_tc.size)\n", "\n", "# 3. How many historical hurricanes occured in 1999? Which was the year with most hurricanes between 1995 and 2001?\n", - "ev_per_year = hist_tc.calc_year_set() # events ids per year\n", - "print('Number of events in 1999:', ev_per_year[1999].size)\n", + "ev_per_year = hist_tc.calc_year_set() # events ids per year\n", + "print(\"Number of events in 1999:\", ev_per_year[1999].size)\n", "max_year = 1995\n", "max_ev = ev_per_year[1995].size\n", "for year, ev in ev_per_year.items():\n", " if ev.size > max_ev:\n", " max_year = year\n", - "print('Year with most hurricanes between 1995 and 2001:', max_year)\n", + "print(\"Year with most hurricanes between 1995 and 2001:\", max_year)\n", "\n", - "# 4. What is the number of centroids with distance to coast smaller than 1km?\n", + "# 4. What is the number of centroids with distance to coast smaller than 1km?\n", "num_cen_coast = np.argwhere(hist_tc.centroids.get_dist_coast() < 1000).size\n", - "print('Number of centroids close to coast: ', num_cen_coast)" + "print(\"Number of centroids close to coast: \", num_cen_coast)" ] }, { @@ -745,14 +882,16 @@ ], "source": [ "# 1. intensities of the largest event (defined as greater sum of intensities):\n", - "# all events:\n", - "haz_tc_fl.plot_intensity(event=-1) # largest historical event: 1992230N11325 hurricane ANDREW\n", + "# all events:\n", + "haz_tc_fl.plot_intensity(\n", + " event=-1\n", + ") # largest historical event: 1992230N11325 hurricane ANDREW\n", "\n", "# 2. maximum intensities at each centroid:\n", "haz_tc_fl.plot_intensity(event=0)\n", "\n", "# 3. intensities of hurricane 1998295N12284:\n", - "haz_tc_fl.plot_intensity(event='1998295N12284', cmap='BuGn') # setting color map\n", + "haz_tc_fl.plot_intensity(event=\"1998295N12284\", cmap=\"BuGn\") # setting color map\n", "\n", "# 4. tropical cyclone intensities maps for the return periods [10, 50, 75, 100]\n", "_, res = haz_tc_fl.plot_rp_intensity([10, 50, 75, 100])\n", @@ -760,6 +899,7 @@ "# 5. tropical cyclone return period maps for the threshold intensities [30, 40]\n", "return_periods, label, column_label = haz_tc_fl.local_return_period([30, 40])\n", "from climada.util.plot import plot_from_gdf\n", + "\n", "plot_from_gdf(return_periods, colorbar_name=label, title_subplots=column_label)\n", "\n", "# 6. intensities of all the events in centroid with id 50\n", @@ -791,9 +931,9 @@ "import matplotlib.pyplot as plt\n", "\n", "fig, ax1, fontsize = make_map(1) # map\n", - "ax2 = fig.add_subplot(2, 1, 2) # add regular axes\n", - "haz_tc_fl.plot_intensity(axis=ax1, event=0) # plot original resolution\n", - "ax1.plot(-80, 26, 'or', mfc='none', markersize=12)\n", + "ax2 = fig.add_subplot(2, 1, 2) # add regular axes\n", + "haz_tc_fl.plot_intensity(axis=ax1, event=0) # plot original resolution\n", + "ax1.plot(-80, 26, \"or\", mfc=\"none\", markersize=12)\n", "haz_tc_fl.plot_intensity(axis=ax2, centr=(26, -80))\n", "fig.subplots_adjust(hspace=6.5)" ] @@ -830,9 +970,9 @@ ], "source": [ "# If you see an error message, try to create a depository named results in the repository tutorial.\n", - "haz_tc_fl.write_hdf5('results/haz_tc_fl.h5')\n", + "haz_tc_fl.write_hdf5(\"results/haz_tc_fl.h5\")\n", "\n", - "haz = Hazard.from_hdf5('results/haz_tc_fl.h5')\n", + "haz = Hazard.from_hdf5(\"results/haz_tc_fl.h5\")\n", "haz.check()" ] }, @@ -857,7 +997,7 @@ } ], "source": [ - "haz_ven.write_raster('results/haz_ven.tif') # each event is a band of the tif file" + "haz_ven.write_raster(\"results/haz_ven.tif\") # each event is a band of the tif file" ] }, { @@ -882,8 +1022,9 @@ ], "source": [ "from climada.util.save import save\n", + "\n", "# this generates a results folder in the current path and stores the output there\n", - "save('tutorial_haz_tc_fl.p', haz_tc_fl)" + "save(\"tutorial_haz_tc_fl.p\", haz_tc_fl)" ] } ], diff --git a/doc/tutorial/climada_hazard_StormEurope.ipynb b/doc/tutorial/climada_hazard_StormEurope.ipynb index 3c0ba6865..7772d6057 100644 --- a/doc/tutorial/climada_hazard_StormEurope.ipynb +++ b/doc/tutorial/climada_hazard_StormEurope.ipynb @@ -21,7 +21,8 @@ "source": [ "%matplotlib inline\n", "import matplotlib.pyplot as plt\n", - "plt.rcParams['figure.figsize'] = [15, 10]" + "\n", + "plt.rcParams[\"figure.figsize\"] = [15, 10]" ] }, { @@ -107,7 +108,7 @@ } ], "source": [ - "storm_instance?" + "?storm_instance" ] }, { @@ -144,12 +145,12 @@ "outputs": [], "source": [ "storm_instance.set_ssi(\n", - " method = 'wind_gust',\n", - " intensity = storm_instance.intensity,\n", + " method=\"wind_gust\",\n", + " intensity=storm_instance.intensity,\n", " # the above is just a more explicit way of passing the default\n", - " on_land = True,\n", - " threshold = 25,\n", - " sel_cen = None\n", + " on_land=True,\n", + " threshold=25,\n", + " sel_cen=None,\n", " # None is default. sel_cen could be used to subset centroids\n", ")" ] @@ -244,16 +245,16 @@ "outputs": [], "source": [ "ssi_args = {\n", - " 'on_land': True,\n", - " 'threshold': 25,\n", + " \"on_land\": True,\n", + " \"threshold\": 25,\n", "}\n", "\n", "storm_prob_xtreme = storm_instance.generate_prob_storms(\n", - " reg_id=[56, 528], # BEL and NLD\n", + " reg_id=[56, 528], # BEL and NLD\n", " spatial_shift=2,\n", " ssi_args=ssi_args,\n", " power=1.5,\n", - " scale=0.3\n", + " scale=0.3,\n", ")" ] }, @@ -306,7 +307,7 @@ } ], "source": [ - "storm_prob_xtreme.plot_ssi(full_area=True);\n", + "storm_prob_xtreme.plot_ssi(full_area=True)\n", "storm_prob.plot_ssi(full_area=True);" ] } diff --git a/doc/tutorial/climada_hazard_TropCyclone.ipynb b/doc/tutorial/climada_hazard_TropCyclone.ipynb index 79b63981a..480d5c0b4 100644 --- a/doc/tutorial/climada_hazard_TropCyclone.ipynb +++ b/doc/tutorial/climada_hazard_TropCyclone.ipynb @@ -142,26 +142,35 @@ "%matplotlib inline\n", "from climada.hazard import TCTracks\n", "\n", - "tr_irma = TCTracks.from_ibtracs_netcdf(provider='usa', storm_id='2017242N16333') # IRMA 2017\n", - "ax = tr_irma.plot();\n", - "ax.set_title('IRMA') # set title\n", + "tr_irma = TCTracks.from_ibtracs_netcdf(\n", + " provider=\"usa\", storm_id=\"2017242N16333\"\n", + ") # IRMA 2017\n", + "ax = tr_irma.plot()\n", + "ax.set_title(\"IRMA\") # set title\n", "\n", "# other ibtracs selection options\n", "from climada.hazard import TCTracks\n", + "\n", "# years 1993 and 1994 in basin EP.\n", "# correct_pres ignores tracks with not enough data. For statistics (frequency of events), these should be considered as well\n", - "sel_ibtracs = TCTracks.from_ibtracs_netcdf(provider='usa', year_range=(1993, 1994), basin='EP', correct_pres=False)\n", - "print('Number of tracks:', sel_ibtracs.size)\n", - "ax = sel_ibtracs.plot();\n", - "ax.get_legend()._loc = 2 # correct legend location\n", - "ax.set_title('1993-1994, EP') # set title\n", + "sel_ibtracs = TCTracks.from_ibtracs_netcdf(\n", + " provider=\"usa\", year_range=(1993, 1994), basin=\"EP\", correct_pres=False\n", + ")\n", + "print(\"Number of tracks:\", sel_ibtracs.size)\n", + "ax = sel_ibtracs.plot()\n", + "ax.get_legend()._loc = 2 # correct legend location\n", + "ax.set_title(\"1993-1994, EP\") # set title\n", "\n", - "track1 = TCTracks.from_ibtracs_netcdf(provider='usa', storm_id='2007314N10093') # SIDR 2007\n", - "track2 = TCTracks.from_ibtracs_netcdf(provider='usa', storm_id='2016138N10081') # ROANU 2016\n", - "track1.append(track2.data) # put both tracks together\n", - "ax = track1.plot();\n", - "ax.get_legend()._loc = 2 # correct legend location\n", - "ax.set_title('SIDR and ROANU'); # set title" + "track1 = TCTracks.from_ibtracs_netcdf(\n", + " provider=\"usa\", storm_id=\"2007314N10093\"\n", + ") # SIDR 2007\n", + "track2 = TCTracks.from_ibtracs_netcdf(\n", + " provider=\"usa\", storm_id=\"2016138N10081\"\n", + ") # ROANU 2016\n", + "track1.append(track2.data) # put both tracks together\n", + "ax = track1.plot()\n", + "ax.get_legend()._loc = 2 # correct legend location\n", + "ax.set_title(\"SIDR and ROANU\"); # set title" ] }, { @@ -781,7 +790,7 @@ } ], "source": [ - "tr_irma.get_track('2017242N16333')" + "tr_irma.get_track(\"2017242N16333\")" ] }, { @@ -1675,7 +1684,7 @@ } ], "source": [ - "tr_irma.data[-1] # last synthetic track. notice the value of orig_event_flag and name" + "tr_irma.data[-1] # last synthetic track. notice the value of orig_event_flag and name" ] }, { @@ -1701,10 +1710,7 @@ }, "outputs": [], "source": [ - "# Put your code here\n", - "\n", - "\n", - "\n" + "# Put your code here" ] }, { @@ -1737,20 +1743,23 @@ "source": [ "# SOLUTION:\n", "import numpy as np\n", + "\n", "# select the track\n", - "tc_syn = tr_irma.get_track('2017242N16333_gen1')\n", + "tc_syn = tr_irma.get_track(\"2017242N16333_gen1\")\n", "\n", "# 1. Which is the time frequency of the data?\n", "# The values of a DataArray are numpy.arrays.\n", "# The nummpy.ediff1d computes the different between elements in an array\n", "diff_time_ns = np.ediff1d(tc_syn[\"time\"])\n", - "diff_time_h = diff_time_ns.astype(int)/1000/1000/1000/60/60\n", - "print('Mean time frequency in hours:', diff_time_h.mean())\n", - "print('Std time frequency in hours:', diff_time_h.std())\n", + "diff_time_h = diff_time_ns.astype(int) / 1000 / 1000 / 1000 / 60 / 60\n", + "print(\"Mean time frequency in hours:\", diff_time_h.mean())\n", + "print(\"Std time frequency in hours:\", diff_time_h.std())\n", "print()\n", "\n", "# 2. Compute the maximum sustained wind for each day.\n", - "print('Daily max sustained wind:', tc_syn[\"max_sustained_wind\"].groupby('time.day').max())" + "print(\n", + " \"Daily max sustained wind:\", tc_syn[\"max_sustained_wind\"].groupby(\"time.day\").max()\n", + ")" ] }, { @@ -1887,15 +1896,16 @@ "min_lat, max_lat, min_lon, max_lon = 16.99375, 21.95625, -72.48125, -61.66875\n", "cent = Centroids.from_pnt_bounds((min_lon, min_lat, max_lon, max_lat), res=0.12)\n", "cent.check()\n", - "cent.plot();\n", + "cent.plot()\n", "\n", "# construct tropical cyclones\n", "tc_irma = TropCyclone.from_tracks(tr_irma, centroids=cent)\n", "# tc_irma = TropCyclone.from_tracks(tr_irma) # try without given centroids. It might take too much space of your memory\n", "# and then the kernel will be killed: So, don't use this function without given centroids!\n", "tc_irma.check()\n", - "tc_irma.plot_intensity('2017242N16333'); # IRMA\n", - "tc_irma.plot_intensity('2017242N16333_gen2'); # IRMA's synthetic track 2" + "tc_irma.plot_intensity(\"2017242N16333\")\n", + "# IRMA\n", + "tc_irma.plot_intensity(\"2017242N16333_gen2\"); # IRMA's synthetic track 2" ] }, { @@ -1944,13 +1954,18 @@ "source": [ "# an Irma event-like in 2055 under RCP 4.5:\n", "tc_irma = TropCyclone.from_tracks(tr_irma, centroids=cent)\n", - "tc_irma_cc = tc_irma.apply_climate_scenario_knu(target_year=2055, scenario='4.5')\n", + "tc_irma_cc = tc_irma.apply_climate_scenario_knu(target_year=2055, scenario=\"4.5\")\n", "\n", "rel_freq_incr = np.round(\n", - " (np.mean(tc_irma_cc.frequency) - np.mean(tc_irma.frequency)\n", - " ) / np.mean(tc_irma.frequency)*100, 0)\n", + " (np.mean(tc_irma_cc.frequency) - np.mean(tc_irma.frequency))\n", + " / np.mean(tc_irma.frequency)\n", + " * 100,\n", + " 0,\n", + ")\n", "\n", - "print(f\"\\nA TC like Irma would undergo a frequency increase of about {rel_freq_incr} % in 2055 under RCP 45\")" + "print(\n", + " f\"\\nA TC like Irma would undergo a frequency increase of about {rel_freq_incr} % in 2055 under RCP 45\"\n", + ")" ] }, { @@ -2067,9 +2082,9 @@ "\n", "from climada.hazard import Centroids, TropCyclone, TCTracks\n", "\n", - "track_name = '2017242N16333' #'2016273N13300' #'1992230N11325'\n", + "track_name = \"2017242N16333\" #'2016273N13300' #'1992230N11325'\n", "\n", - "tr_irma = TCTracks.from_ibtracs_netcdf(provider='usa', storm_id='2017242N16333')\n", + "tr_irma = TCTracks.from_ibtracs_netcdf(provider=\"usa\", storm_id=\"2017242N16333\")\n", "\n", "lon_min, lat_min, lon_max, lat_max = -83.5, 24.4, -79.8, 29.6\n", "centr_video = Centroids.from_pnt_bounds((lon_min, lat_min, lon_max, lat_max), 0.04)\n", @@ -2077,7 +2092,9 @@ "\n", "tc_video = TropCyclone()\n", "\n", - "tc_list, tr_coord = tc_video.video_intensity(track_name, tr_irma, centr_video, file_name='results/irma_tc_fl.gif')" + "tc_list, tr_coord = tc_video.video_intensity(\n", + " track_name, tr_irma, centr_video, file_name=\"results/irma_tc_fl.gif\"\n", + ")" ] }, { @@ -2156,9 +2173,11 @@ "from matplotlib import animation\n", "from matplotlib.pyplot import rcParams\n", "\n", - "rcParams['animation.ffmpeg_path'] = shutil.which('ffmpeg')\n", + "rcParams[\"animation.ffmpeg_path\"] = shutil.which(\"ffmpeg\")\n", "writer = animation.FFMpegWriter(bitrate=500)\n", - "tc_list, tr_coord = tc_video.video_intensity(track_name, tr_irma, centr_video, file_name='results/irma_tc_fl.mp4', writer=writer)" + "tc_list, tr_coord = tc_video.video_intensity(\n", + " track_name, tr_irma, centr_video, file_name=\"results/irma_tc_fl.mp4\", writer=writer\n", + ")" ] }, { diff --git a/doc/tutorial/climada_util_api_client.ipynb b/doc/tutorial/climada_util_api_client.ipynb index 580e0b08d..215f8b6d0 100644 --- a/doc/tutorial/climada_util_api_client.ipynb +++ b/doc/tutorial/climada_util_api_client.ipynb @@ -28,6 +28,7 @@ "outputs": [], "source": [ "from climada.util.api_client import Client\n", + "\n", "client = Client()" ] }, @@ -150,10 +151,11 @@ ], "source": [ "import pandas as pd\n", + "\n", "data_types = client.list_data_type_infos()\n", "\n", "dtf = pd.DataFrame(data_types)\n", - "dtf.sort_values(['data_type_group', 'data_type'])" + "dtf.sort_values([\"data_type_group\", \"data_type\"])" ] }, { @@ -170,7 +172,7 @@ "metadata": {}, "outputs": [], "source": [ - "litpop_dataset_infos = client.list_dataset_infos(data_type='litpop')" + "litpop_dataset_infos = client.list_dataset_infos(data_type=\"litpop\")" ] }, { @@ -233,7 +235,9 @@ "source": [ "# as datasets are usually available per country, chosing a country or global dataset reduces the options\n", "# here we want to see which datasets are available for litpop globally:\n", - "client.get_property_values(litpop_dataset_infos, known_property_values = {'spatial_coverage':'global'})" + "client.get_property_values(\n", + " litpop_dataset_infos, known_property_values={\"spatial_coverage\": \"global\"}\n", + ")" ] }, { @@ -259,8 +263,10 @@ } ], "source": [ - "#and here for Switzerland:\n", - "client.get_property_values(litpop_dataset_infos, known_property_values = {'country_name':'Switzerland'})" + "# and here for Switzerland:\n", + "client.get_property_values(\n", + " litpop_dataset_infos, known_property_values={\"country_name\": \"Switzerland\"}\n", + ")" ] }, { @@ -313,8 +319,10 @@ } ], "source": [ - "tc_dataset_infos = client.list_dataset_infos(data_type='tropical_cyclone')\n", - "client.get_property_values(tc_dataset_infos, known_property_values = {'country_name':'Haiti'})" + "tc_dataset_infos = client.list_dataset_infos(data_type=\"tropical_cyclone\")\n", + "client.get_property_values(\n", + " tc_dataset_infos, known_property_values={\"country_name\": \"Haiti\"}\n", + ")" ] }, { @@ -347,7 +355,15 @@ ], "source": [ "client = Client()\n", - "tc_haiti = client.get_hazard('tropical_cyclone', properties={'country_name': 'Haiti', 'climate_scenario': 'rcp45', 'ref_year':'2040', 'nb_synth_tracks':'10'})\n", + "tc_haiti = client.get_hazard(\n", + " \"tropical_cyclone\",\n", + " properties={\n", + " \"country_name\": \"Haiti\",\n", + " \"climate_scenario\": \"rcp45\",\n", + " \"ref_year\": \"2040\",\n", + " \"nb_synth_tracks\": \"10\",\n", + " },\n", + ")\n", "tc_haiti.plot_intensity(0);" ] }, @@ -365,7 +381,9 @@ "metadata": {}, "outputs": [], "source": [ - "litpop_default = client.get_property_values(litpop_dataset_infos, known_property_values = {'fin_mode':'pc', 'exponents':'(1,1)'})" + "litpop_default = client.get_property_values(\n", + " litpop_dataset_infos, known_property_values={\"fin_mode\": \"pc\", \"exponents\": \"(1,1)\"}\n", + ")" ] }, { @@ -385,7 +403,7 @@ } ], "source": [ - "litpop = client.get_litpop(country='Haiti')" + "litpop = client.get_litpop(country=\"Haiti\")" ] }, { @@ -446,6 +464,7 @@ "outputs": [], "source": [ "from climada.engine import ImpactCalc\n", + "\n", "impact = ImpactCalc(litpop, imp_fun_set, tc_haiti).impact()" ] }, @@ -476,7 +495,7 @@ } ], "source": [ - "crop_dataset_infos = client.list_dataset_infos(data_type='crop_production')\n", + "crop_dataset_infos = client.list_dataset_infos(data_type=\"crop_production\")\n", "\n", "client.get_property_values(crop_dataset_infos)" ] @@ -487,7 +506,10 @@ "metadata": {}, "outputs": [], "source": [ - "rice_exposure = client.get_exposures(exposures_type='crop_production', properties = {'crop':'ric', 'unit': 'USD','irrigation_status': 'noirr'})" + "rice_exposure = client.get_exposures(\n", + " exposures_type=\"crop_production\",\n", + " properties={\"crop\": \"ric\", \"unit\": \"USD\", \"irrigation_status\": \"noirr\"},\n", + ")" ] }, { @@ -584,7 +606,7 @@ } ], "source": [ - "centroids_nopoles = client.get_centroids(extent=[-180,180,-60,50])\n", + "centroids_nopoles = client.get_centroids(extent=[-180, 180, -60, 50])\n", "centroids_nopoles.plot()" ] }, @@ -612,7 +634,7 @@ } ], "source": [ - "centroids_hti = client.get_centroids(country='HTI')" + "centroids_hti = client.get_centroids(country=\"HTI\")" ] }, { @@ -667,7 +689,7 @@ } ], "source": [ - "Client?" + "?Client" ] }, { @@ -741,7 +763,7 @@ } ], "source": [ - "client.get_dataset_info_by_uuid('b1c76120-4e60-4d8f-99c0-7e1e7b7860ec')" + "client.get_dataset_info_by_uuid(\"b1c76120-4e60-4d8f-99c0-7e1e7b7860ec\")" ] }, { @@ -810,7 +832,8 @@ ], "source": [ "from climada.util.api_client import DatasetInfo\n", - "DatasetInfo?" + "\n", + "?DatasetInfo" ] }, { @@ -849,7 +872,8 @@ ], "source": [ "from climada.util.api_client import FileInfo\n", - "FileInfo?" + "\n", + "?FileInfo" ] }, { @@ -890,7 +914,7 @@ } ], "source": [ - "client.into_datasets_df?" + "?client.into_datasets_df" ] }, { @@ -1059,8 +1083,12 @@ ], "source": [ "from climada.util.api_client import Client\n", + "\n", "client = Client()\n", - "litpop_datasets = client.list_dataset_infos(data_type='litpop', properties={'country_name': 'South Georgia and the South Sandwich Islands'})\n", + "litpop_datasets = client.list_dataset_infos(\n", + " data_type=\"litpop\",\n", + " properties={\"country_name\": \"South Georgia and the South Sandwich Islands\"},\n", + ")\n", "litpop_df = client.into_datasets_df(litpop_datasets)\n", "litpop_df" ] @@ -1127,7 +1155,7 @@ } ], "source": [ - "client.download_dataset?" + "?client.download_dataset" ] }, { @@ -1161,7 +1189,9 @@ ], "source": [ "# Let's have a look at an example for downloading a litpop dataset first\n", - "ds = litpop_datasets[0] # litpop_datasets is a list and download_dataset expects a single object as argument.\n", + "ds = litpop_datasets[\n", + " 0\n", + "] # litpop_datasets is a list and download_dataset expects a single object as argument.\n", "download_dir, ds_files = client.download_dataset(ds)\n", "ds_files[0], ds_files[0].is_file()" ] @@ -1214,9 +1244,14 @@ ], "source": [ "from climada.util.api_client import Client\n", + "\n", "Client().get_dataset_file(\n", - " data_type='litpop',\n", - " properties={'country_name': 'South Georgia and the South Sandwich Islands', 'fin_mode': 'pop'})" + " data_type=\"litpop\",\n", + " properties={\n", + " \"country_name\": \"South Georgia and the South Sandwich Islands\",\n", + " \"fin_mode\": \"pop\",\n", + " },\n", + ")" ] }, { diff --git a/doc/tutorial/climada_util_earth_engine.ipynb b/doc/tutorial/climada_util_earth_engine.ipynb index d6ca785ce..10811ce4d 100644 --- a/doc/tutorial/climada_util_earth_engine.ipynb +++ b/doc/tutorial/climada_util_earth_engine.ipynb @@ -53,8 +53,9 @@ "import webbrowser\n", "\n", "import ee\n", + "\n", "ee.Initialize()\n", - "image = ee.Image('srtm90_v4')\n", + "image = ee.Image(\"srtm90_v4\")\n", "print(image.getInfo())" ] }, @@ -75,10 +76,11 @@ "outputs": [], "source": [ "# Access a specific image\n", - "image = ee.Image('LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140318'); #Landsat 8 image, with Top of Atmosphere processing, on 2014/03/18 \n", + "image = ee.Image(\"LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140318\")\n", + "# Landsat 8 image, with Top of Atmosphere processing, on 2014/03/18\n", "\n", "# Access a collection\n", - "collection = 'LANDSAT/LE07/C01/T1' #Landsat 7 raw images collection" + "collection = \"LANDSAT/LE07/C01/T1\" # Landsat 7 raw images collection" ] }, { @@ -109,32 +111,38 @@ } ], "source": [ - "#Landsat_composite in Dresden area\n", - "area_dresden = list([(13.6, 50.96), (13.9, 50.96), (13.9, 51.12), (13.6, 51.12), (13.6, 50.96)])\n", - "area_dresden = ee.Geometry.Polygon(area_dresden) \n", - "time_range_dresden = ['2002-07-28', '2002-08-05']\n", - "\n", - "collection_dresden = ('LANDSAT/LE07/C01/T1')\n", + "# Landsat_composite in Dresden area\n", + "area_dresden = list(\n", + " [(13.6, 50.96), (13.9, 50.96), (13.9, 51.12), (13.6, 51.12), (13.6, 50.96)]\n", + ")\n", + "area_dresden = ee.Geometry.Polygon(area_dresden)\n", + "time_range_dresden = [\"2002-07-28\", \"2002-08-05\"]\n", + "\n", + "collection_dresden = \"LANDSAT/LE07/C01/T1\"\n", "print(type(area_dresden))\n", "\n", - "#Population density in Switzerland\n", - "list_swiss = list([(6.72, 47.88),(6.72, 46.55),(9.72, 46.55),(9.72, 47.88),(6.72, 47.88)]) \n", - "area_swiss = ee.Geometry.Polygon(list_swiss) \n", - "time_range_swiss=['2002-01-01', '2005-12-30']\n", + "# Population density in Switzerland\n", + "list_swiss = list(\n", + " [(6.72, 47.88), (6.72, 46.55), (9.72, 46.55), (9.72, 47.88), (6.72, 47.88)]\n", + ")\n", + "area_swiss = ee.Geometry.Polygon(list_swiss)\n", + "time_range_swiss = [\"2002-01-01\", \"2005-12-30\"]\n", "\n", - "collection_swiss = ee.ImageCollection('CIESIN/GPWv4/population-density')\n", + "collection_swiss = ee.ImageCollection(\"CIESIN/GPWv4/population-density\")\n", "print(type(collection_swiss))\n", "\n", - "#Sentinel 2 cloud-free image in Zürich\n", - "collection_zurich = ('COPERNICUS/S2')\n", - "list_zurich = list([(8.53, 47.355),(8.55, 47.355),(8.55, 47.376),(8.53, 47.376),(8.53, 47.355)]) \n", - "area_zurich = ee.Geometry.Polygon(list_swiss) \n", - "time_range_zurich = ['2018-05-01', '2018-07-30']\n", + "# Sentinel 2 cloud-free image in Zürich\n", + "collection_zurich = \"COPERNICUS/S2\"\n", + "list_zurich = list(\n", + " [(8.53, 47.355), (8.55, 47.355), (8.55, 47.376), (8.53, 47.376), (8.53, 47.355)]\n", + ")\n", + "area_zurich = ee.Geometry.Polygon(list_swiss)\n", + "time_range_zurich = [\"2018-05-01\", \"2018-07-30\"]\n", "\n", "\n", - "#Landcover in Europe with CORINE dataset\n", - "dataset_landcover = ee.Image('COPERNICUS/CORINE/V18_5_1/100m/2012')\n", - "landCover_layer = dataset_landcover.select('landcover')\n", + "# Landcover in Europe with CORINE dataset\n", + "dataset_landcover = ee.Image(\"COPERNICUS/CORINE/V18_5_1/100m/2012\")\n", + "landCover_layer = dataset_landcover.select(\"landcover\")\n", "print(type(landCover_layer))" ] }, @@ -144,9 +152,9 @@ "metadata": {}, "outputs": [], "source": [ - "#Methods from climada.util.earth_engine module\n", + "# Methods from climada.util.earth_engine module\n", "def obtain_image_landsat_composite(collection, time_range, area):\n", - " \"\"\" Selection of Landsat cloud-free composites in the Earth Engine library\n", + " \"\"\"Selection of Landsat cloud-free composites in the Earth Engine library\n", " See also: https://developers.google.com/earth-engine/landsat\n", "\n", " Parameters:\n", @@ -156,7 +164,7 @@ "\n", " Returns:\n", " image_composite (ee.image.Image)\n", - " \"\"\"\n", + " \"\"\"\n", " collection = ee.ImageCollection(collection)\n", "\n", " ## Filter by time range and location\n", @@ -165,8 +173,9 @@ " image_composite = ee.Algorithms.Landsat.simpleComposite(image_area, 75, 3)\n", " return image_composite\n", "\n", + "\n", "def obtain_image_median(collection, time_range, area):\n", - " \"\"\" Selection of median from a collection of images in the Earth Engine library\n", + " \"\"\"Selection of median from a collection of images in the Earth Engine library\n", " See also: https://developers.google.com/earth-engine/reducers_image_collection\n", "\n", " Parameters:\n", @@ -176,7 +185,7 @@ "\n", " Returns:\n", " image_median (ee.image.Image)\n", - " \"\"\"\n", + " \"\"\"\n", " collection = ee.ImageCollection(collection)\n", "\n", " ## Filter by time range and location\n", @@ -185,8 +194,9 @@ " image_median = image_area.median()\n", " return image_median\n", "\n", + "\n", "def obtain_image_sentinel(collection, time_range, area):\n", - " \"\"\" Selection of median, cloud-free image from a collection of images in the Sentinel 2 dataset\n", + " \"\"\"Selection of median, cloud-free image from a collection of images in the Sentinel 2 dataset\n", " See also: https://developers.google.com/earth-engine/datasets/catalog/COPERNICUS_S2\n", "\n", " Parameters:\n", @@ -196,24 +206,28 @@ "\n", " Returns:\n", " sentinel_median (ee.image.Image)\n", - " \"\"\"\n", - "#First, method to remove cloud from the image\n", + " \"\"\"\n", + "\n", + " # First, method to remove cloud from the image\n", " def maskclouds(image):\n", - " band_qa = image.select('QA60')\n", + " band_qa = image.select(\"QA60\")\n", " cloud_mask = ee.Number(2).pow(10).int()\n", " cirrus_mask = ee.Number(2).pow(11).int()\n", - " mask = band_qa.bitwiseAnd(cloud_mask).eq(0) and(\n", - " band_qa.bitwiseAnd(cirrus_mask).eq(0))\n", + " mask = band_qa.bitwiseAnd(cloud_mask).eq(0) and (\n", + " band_qa.bitwiseAnd(cirrus_mask).eq(0)\n", + " )\n", " return image.updateMask(mask).divide(10000)\n", "\n", - " sentinel_filtered = (ee.ImageCollection(collection).\n", - " filterBounds(area).\n", - " filterDate(time_range[0], time_range[1]).\n", - " filter(ee.Filter.lt('CLOUDY_PIXEL_PERCENTAGE', 20)).\n", - " map(maskclouds))\n", + " sentinel_filtered = (\n", + " ee.ImageCollection(collection)\n", + " .filterBounds(area)\n", + " .filterDate(time_range[0], time_range[1])\n", + " .filter(ee.Filter.lt(\"CLOUDY_PIXEL_PERCENTAGE\", 20))\n", + " .map(maskclouds)\n", + " )\n", "\n", " sentinel_median = sentinel_filtered.median()\n", - " return sentinel_median\n" + " return sentinel_median" ] }, { @@ -232,13 +246,15 @@ } ], "source": [ - "#Application to examples\n", - "composite_dresden = obtain_image_landsat_composite(collection_dresden, time_range_dresden, area_dresden)\n", + "# Application to examples\n", + "composite_dresden = obtain_image_landsat_composite(\n", + " collection_dresden, time_range_dresden, area_dresden\n", + ")\n", "median_swiss = obtain_image_median(collection_swiss, time_range_swiss, area_swiss)\n", "zurich_median = obtain_image_sentinel(collection_zurich, time_range_zurich, area_zurich)\n", "\n", - "#Selection of specific bands from an image\n", - "zurich_band = zurich_median.select(['B4','B3','B2']) \n", + "# Selection of specific bands from an image\n", + "zurich_band = zurich_median.select([\"B4\", \"B3\", \"B2\"])\n", "\n", "\n", "print(composite_dresden.getInfo())\n", @@ -279,7 +295,7 @@ "\n", "region_dresden = get_region(area_dresden)\n", "region_swiss = get_region(area_swiss)\n", - "region_zurich= get_region(area_zurich)" + "region_zurich = get_region(area_zurich)" ] }, { @@ -321,24 +337,19 @@ "\n", " Returns:\n", " path (str)\n", - " \"\"\"\n", - " path = image.getDownloadURL({\n", - " 'name':(name),\n", - " 'scale': scale,\n", - " 'region':(region)\n", - " })\n", + " \"\"\"\n", + " path = image.getDownloadURL({\"name\": (name), \"scale\": scale, \"region\": (region)})\n", "\n", " webbrowser.open_new_tab(path)\n", " return path\n", "\n", - " \n", - " \n", - "url_swiss = get_url('swiss_pop', median_swiss, 900, region_swiss)\n", - "url_dresden = get_url('dresden', composite_dresden, 30, region_dresden)\n", - "url_landcover = get_url('landcover_swiss', landCover_layer, 100, region_swiss)\n", "\n", - "#For the example of Zürich, due to size, it doesn't work on Jupyter Notebook but it works on Python\n", - "#url_zurich = get_url('sentinel', zurich_band, 10, region_zurich)\n", + "url_swiss = get_url(\"swiss_pop\", median_swiss, 900, region_swiss)\n", + "url_dresden = get_url(\"dresden\", composite_dresden, 30, region_dresden)\n", + "url_landcover = get_url(\"landcover_swiss\", landCover_layer, 100, region_swiss)\n", + "\n", + "# For the example of Zürich, due to size, it doesn't work on Jupyter Notebook but it works on Python\n", + "# url_zurich = get_url('sentinel', zurich_band, 10, region_zurich)\n", "\n", "print(url_swiss)\n", "print(url_dresden)\n", @@ -387,7 +398,7 @@ "from skimage.filters import try_all_threshold\n", "from skimage.filters import threshold_otsu, threshold_local\n", "from skimage import measure\n", - "from skimage import feature\n" + "from skimage import feature" ] }, { @@ -398,8 +409,8 @@ "source": [ "from climada.util import DEMO_DIR\n", "\n", - "swiss_pop = DEMO_DIR.joinpath('earth_engine', 'population-density_median.tif')\n", - "dresden = DEMO_DIR.joinpath('earth_engine', 'dresden.tif') #B4 of Dresden example\n" + "swiss_pop = DEMO_DIR.joinpath(\"earth_engine\", \"population-density_median.tif\")\n", + "dresden = DEMO_DIR.joinpath(\"earth_engine\", \"dresden.tif\") # B4 of Dresden example" ] }, { @@ -433,19 +444,19 @@ } ], "source": [ - "#Read a tif in python and Visualize the image\n", + "# Read a tif in python and Visualize the image\n", "image_dresden = imread(dresden)\n", "plt.figure(figsize=(10, 10))\n", - "plt.imshow(image_dresden, cmap='gray', interpolation='nearest')\n", + "plt.imshow(image_dresden, cmap=\"gray\", interpolation=\"nearest\")\n", "plt.axis()\n", "plt.show()\n", "\n", - "#Crop the image\n", - "image_dresden_crop=image_dresden[300:700,600:1400]\n", + "# Crop the image\n", + "image_dresden_crop = image_dresden[300:700, 600:1400]\n", "plt.figure(figsize=(10, 10))\n", - "plt.imshow(image_dresden_crop, cmap='gray', interpolation='nearest')\n", + "plt.imshow(image_dresden_crop, cmap=\"gray\", interpolation=\"nearest\")\n", "plt.axis()\n", - "plt.show()\n" + "plt.show()" ] }, { @@ -467,12 +478,12 @@ } ], "source": [ - "image_pop= imread(swiss_pop)\n", + "image_pop = imread(swiss_pop)\n", "plt.figure(figsize=(12, 12))\n", - "plt.imshow(image_pop, cmap='Reds', interpolation='nearest')\n", + "plt.imshow(image_pop, cmap=\"Reds\", interpolation=\"nearest\")\n", "plt.colorbar()\n", "plt.axis()\n", - "plt.show()\n" + "plt.show()" ] }, { @@ -501,7 +512,7 @@ } ], "source": [ - "#Thresholding: Selection of pixels with regards with their value\n", + "# Thresholding: Selection of pixels with regards with their value\n", "\n", "global_thresh = threshold_otsu(image_dresden_crop)\n", "binary_global = image_dresden_crop > global_thresh\n", @@ -515,19 +526,19 @@ "plt.gray()\n", "\n", "ax[0].imshow(image_dresden_crop)\n", - "ax[0].set_title('Original')\n", + "ax[0].set_title(\"Original\")\n", "\n", "ax[1].imshow(binary_global)\n", - "ax[1].set_title('Global thresholding')\n", + "ax[1].set_title(\"Global thresholding\")\n", "\n", "ax[2].imshow(binary_adaptive)\n", - "ax[2].set_title('Adaptive thresholding')\n", + "ax[2].set_title(\"Adaptive thresholding\")\n", "\n", "for a in ax:\n", - " a.axis('off')\n", + " a.axis(\"off\")\n", "plt.show()\n", "\n", - "print(np.sum(binary_global))\n" + "print(np.sum(binary_global))" ] } ], diff --git a/doc/tutorial/climada_util_yearsets.ipynb b/doc/tutorial/climada_util_yearsets.ipynb index 747d29fcf..9ead01019 100644 --- a/doc/tutorial/climada_util_yearsets.ipynb +++ b/doc/tutorial/climada_util_yearsets.ipynb @@ -40,11 +40,11 @@ "import climada.util.yearsets as yearsets\n", "from climada.engine import Impact\n", "\n", - "# dummy event_impacts object containing 10 event_impacts with the values 10-110 \n", + "# dummy event_impacts object containing 10 event_impacts with the values 10-110\n", "# and the frequency 0.2 (Return period of 5 years)\n", "imp = Impact()\n", - "imp.at_event = np.arange(10,110,10)\n", - "imp.frequency = np.array(np.ones(10)*0.2)\n", + "imp.at_event = np.arange(10, 110, 10)\n", + "imp.frequency = np.array(np.ones(10) * 0.2)\n", "\n", "# the number of years to sample impacts for (length(yimp.at_event) = sampled_years)\n", "sampled_years = 10\n", @@ -147,11 +147,13 @@ ], "source": [ "# compare the resulting yimp with our step-by-step computation without applying the correction factor:\n", - "yimp, sampling_vect = yearsets.impact_yearset(imp, sampled_years=list(range(1,11)), correction_fac=False)\n", + "yimp, sampling_vect = yearsets.impact_yearset(\n", + " imp, sampled_years=list(range(1, 11)), correction_fac=False\n", + ")\n", "\n", - "print('The yimp.at_event values equal our step-by-step computed imp_per_year:')\n", - "print('yimp.at_event = ', yimp.at_event)\n", - "print('imp_per_year = ', imp_per_year)" + "print(\"The yimp.at_event values equal our step-by-step computed imp_per_year:\")\n", + "print(\"yimp.at_event = \", yimp.at_event)\n", + "print(\"imp_per_year = \", imp_per_year)" ] }, { @@ -173,12 +175,14 @@ ], "source": [ "# and here the same comparison with applying the correction factor (default settings):\n", - "yimp, sampling_vect = yearsets.impact_yearset(imp, sampled_years=list(range(1,11)))\n", + "yimp, sampling_vect = yearsets.impact_yearset(imp, sampled_years=list(range(1, 11)))\n", "\n", - "print('The same can be shown for the case of applying the correction factor.' \n", - " 'The yimp.at_event values equal our step-by-step computed imp_per year:')\n", - "print('yimp.at_event = ', yimp.at_event)\n", - "print('imp_per_year = ', imp_per_year/correction_factor)" + "print(\n", + " \"The same can be shown for the case of applying the correction factor.\"\n", + " \"The yimp.at_event values equal our step-by-step computed imp_per year:\"\n", + ")\n", + "print(\"yimp.at_event = \", yimp.at_event)\n", + "print(\"imp_per_year = \", imp_per_year / correction_factor)" ] } ], diff --git a/script/applications/eca_san_salvador/README.txt b/script/applications/eca_san_salvador/README.txt index e81b3188e..7b3fa3df3 100644 --- a/script/applications/eca_san_salvador/README.txt +++ b/script/applications/eca_san_salvador/README.txt @@ -2,4 +2,4 @@ These notebooks show how to use CLIMADA in local case studies. The data shown was generated for the Economics of Climate Adaptation study developed with KfW in San Salvador, El Salvador. These represent only a partial outcome of the project. Execute first San_Salvador_Risk.ipynb and then San_Salvador_Adaptation.ipynb. -Contact Gabriela Aznar Siguan for any questions. +Contact Gabriela Aznar Siguan for any questions. diff --git a/script/applications/eca_san_salvador/San_Salvador_Adaptacion.ipynb b/script/applications/eca_san_salvador/San_Salvador_Adaptacion.ipynb index 21fb05cdb..5a50f09d5 100644 --- a/script/applications/eca_san_salvador/San_Salvador_Adaptacion.ipynb +++ b/script/applications/eca_san_salvador/San_Salvador_Adaptacion.ipynb @@ -128,18 +128,20 @@ "import contextily as ctx\n", "from climada.entity import Entity\n", "\n", - "ent_2015 = Entity.from_excel('FL_entity_Acelhuate_houses.xlsx')\n", - "ent_2015.exposures.ref_year = 2015 # fijamos el año de referencia\n", + "ent_2015 = Entity.from_excel(\"FL_entity_Acelhuate_houses.xlsx\")\n", + "ent_2015.exposures.ref_year = 2015 # fijamos el año de referencia\n", "ent_2015.check()\n", "\n", "# Exposures (bienes): los utilizados en el script San Salvador Risk\n", - "print('Total value in 2015: {:.3e}'.format(ent_2015.exposures.gdf.value.sum()))\n", - "ax = ent_2015.exposures.plot_basemap(s=1, zoom=15, url=ctx.providers.OpenStreetMap.Mapnik, vmax=60000, cmap='autumn')\n", - "ax.set_title('Exposure 2015')\n", + "print(\"Total value in 2015: {:.3e}\".format(ent_2015.exposures.gdf.value.sum()))\n", + "ax = ent_2015.exposures.plot_basemap(\n", + " s=1, zoom=15, url=ctx.providers.OpenStreetMap.Mapnik, vmax=60000, cmap=\"autumn\"\n", + ")\n", + "ax.set_title(\"Exposure 2015\")\n", "\n", "# Impact Functions (funciones de impacto): los utilizados en el script San Salvador Risk\n", - "ent_2015.impact_funcs.get_func('FL', 101).plot()\n", - "ent_2015.impact_funcs.get_func('FL', 102).plot()\n", + "ent_2015.impact_funcs.get_func(\"FL\", 101).plot()\n", + "ent_2015.impact_funcs.get_func(\"FL\", 102).plot()\n", "\n", "# Discount rate (tasas de descuento): 2% anual hasta 2040\n", "ent_2015.disc_rates.plot();\n", @@ -230,12 +232,16 @@ "# Exposures (bienes): crecimiento anual del 2%\n", "ent_2040.exposures.ref_year = 2040\n", "growth = 0.02\n", - "ent_2040.exposures.gdf['value'] = ent_2040.exposures.gdf.value.values*(1 + growth)**(ent_2040.exposures.ref_year - ent_2015.exposures.ref_year)\n", - "ent_2040.check() # check values are well set and assignes default values\n", + "ent_2040.exposures.gdf[\"value\"] = ent_2040.exposures.gdf.value.values * (\n", + " 1 + growth\n", + ") ** (ent_2040.exposures.ref_year - ent_2015.exposures.ref_year)\n", + "ent_2040.check() # check values are well set and assignes default values\n", "\n", - "print('Valor total en 2040: {:.3e}'.format(ent_2040.exposures.gdf.value.sum()))\n", - "ax = ent_2040.exposures.plot_basemap(s=1, zoom=15, url=ctx.providers.OpenStreetMap.Mapnik, vmax=60000, cmap='autumn')\n", - "ax.set_title('Exposure 2040');" + "print(\"Valor total en 2040: {:.3e}\".format(ent_2040.exposures.gdf.value.sum()))\n", + "ax = ent_2040.exposures.plot_basemap(\n", + " s=1, zoom=15, url=ctx.providers.OpenStreetMap.Mapnik, vmax=60000, cmap=\"autumn\"\n", + ")\n", + "ax.set_title(\"Exposure 2040\");" ] }, { @@ -286,11 +292,13 @@ "import matplotlib.patches as patches\n", "from climada.hazard import Hazard\n", "\n", - "HAZ_FILE = 'Salvador_hazard_FL_2015.mat'\n", + "HAZ_FILE = \"Salvador_hazard_FL_2015.mat\"\n", "\n", - "haz_2015 = Hazard.from_mat(HAZ_FILE) # cargamos el fichero\n", + "haz_2015 = Hazard.from_mat(HAZ_FILE) # cargamos el fichero\n", "ax = haz_2015.plot_intensity(0) # intensidad máxima alcanzada en cada punto\n", - "rect = patches.Rectangle((-0.0027, 13.6738), 0.0355, 0.0233, linewidth=1, edgecolor='r', facecolor='none') # add exposures focus\n", + "rect = patches.Rectangle(\n", + " (-0.0027, 13.6738), 0.0355, 0.0233, linewidth=1, edgecolor=\"r\", facecolor=\"none\"\n", + ") # add exposures focus\n", "ax.add_patch(rect);" ] }, @@ -348,11 +356,13 @@ "# inundaciones en 2040 bajo un fuerte cambio climático\n", "from climada.hazard import Hazard\n", "\n", - "HAZ_FILE = 'Salvador_hazard_FL_2040_extreme_cc.mat'\n", + "HAZ_FILE = \"Salvador_hazard_FL_2040_extreme_cc.mat\"\n", "\n", - "haz_2040 = Hazard.from_mat(HAZ_FILE) # cargamos el fichero\n", + "haz_2040 = Hazard.from_mat(HAZ_FILE) # cargamos el fichero\n", "ax = haz_2040.plot_intensity(0) # intensidad máxima alcanzada en cada punto\n", - "rect = patches.Rectangle((-0.0027, 13.6738), 0.0355, 0.0233, linewidth=1, edgecolor='r', facecolor='none') # add exposures focus\n", + "rect = patches.Rectangle(\n", + " (-0.0027, 13.6738), 0.0355, 0.0233, linewidth=1, edgecolor=\"r\", facecolor=\"none\"\n", + ") # add exposures focus\n", "ax.add_patch(rect);" ] }, @@ -410,7 +420,7 @@ "\n", "cb_acel = CostBenefit()\n", "ax = cb_acel.plot_waterfall(haz_2015, ent_2015, haz_2040, ent_2040)\n", - "ax.set_title('Expected Annual Impact');" + "ax.set_title(\"Expected Annual Impact\");" ] }, { @@ -460,8 +470,10 @@ "from climada.engine import risk_rp_100\n", "\n", "cb_acel = CostBenefit()\n", - "ax = cb_acel.plot_waterfall(haz_2015, ent_2015, haz_2040, ent_2040, risk_func=risk_rp_100)\n", - "ax.set_title('Impact Exceedance at 100 years Return Period');" + "ax = cb_acel.plot_waterfall(\n", + " haz_2015, ent_2015, haz_2040, ent_2040, risk_func=risk_rp_100\n", + ")\n", + "ax.set_title(\"Impact Exceedance at 100 years Return Period\");" ] }, { @@ -524,22 +536,25 @@ ], "source": [ "# impacto de la medida en 2015: No descargas en Lluvia\n", - "meas = ent_2015.measures.get_measure('FL', 'No descargas en Lluvia')\n", - "print('Measure cost {:.3e} USD'.format(meas.cost))\n", - "meas_exp_2015, meas_impf_2015, meas_haz_2015 = meas.apply(ent_2015.exposures, ent_2015.impact_funcs, haz_2015)\n", + "meas = ent_2015.measures.get_measure(\"FL\", \"No descargas en Lluvia\")\n", + "print(\"Measure cost {:.3e} USD\".format(meas.cost))\n", + "meas_exp_2015, meas_impf_2015, meas_haz_2015 = meas.apply(\n", + " ent_2015.exposures, ent_2015.impact_funcs, haz_2015\n", + ")\n", "\n", "# los bienes no cambian\n", "\n", "# las funciones de impacto mejoran ligeramente:\n", - "ax = meas_impf_2015.get_func('FL', 101).plot()\n", - "ax.set_title('Flooding AUP House with measure')\n", + "ax = meas_impf_2015.get_func(\"FL\", 101).plot()\n", + "ax.set_title(\"Flooding AUP House with measure\")\n", "\n", "# inundación reducida hasta 4.8 metros en los eventos más graves:\n", "import numpy as np\n", + "\n", "haz_diff = copy.deepcopy(haz_2015)\n", - "haz_diff.intensity = (haz_2015.intensity - meas_haz_2015.intensity)\n", - "ax = haz_diff.plot_intensity(0) # maximum intensity difference at each point\n", - "ax.set_title('Maximumm reduced intensity with measure');" + "haz_diff.intensity = haz_2015.intensity - meas_haz_2015.intensity\n", + "ax = haz_diff.plot_intensity(0) # maximum intensity difference at each point\n", + "ax.set_title(\"Maximumm reduced intensity with measure\");" ] }, { @@ -569,7 +584,7 @@ ], "source": [ "# nombre de cada medida considerada\n", - "for meas in ent_2040.measures.get_measure('FL'): # measures related to flood (FL)\n", + "for meas in ent_2040.measures.get_measure(\"FL\"): # measures related to flood (FL)\n", " print(meas.name)" ] }, @@ -668,8 +683,8 @@ ], "source": [ "cb_acel = CostBenefit()\n", - "cb_acel.calc(haz_2015, ent_2015, haz_2040, ent_2040, save_imp=True) # calcula\n", - "cb_acel.plot_cost_benefit(); # dibuja el cociente beneficio/costo por medida" + "cb_acel.calc(haz_2015, ent_2015, haz_2040, ent_2040, save_imp=True) # calcula\n", + "cb_acel.plot_cost_benefit(); # dibuja el cociente beneficio/costo por medida" ] }, { @@ -718,8 +733,13 @@ ], "source": [ "import matplotlib.colors as colors\n", - "cb_comb = cb_acel.combine_measures(['No descargas en Lluvia', 'Ahorradores en Agua en casas'],\n", - " 'Domestico', colors.to_rgb('lightcoral'), ent_2015.disc_rates)" + "\n", + "cb_comb = cb_acel.combine_measures(\n", + " [\"No descargas en Lluvia\", \"Ahorradores en Agua en casas\"],\n", + " \"Domestico\",\n", + " colors.to_rgb(\"lightcoral\"),\n", + " ent_2015.disc_rates,\n", + ")" ] }, { @@ -765,8 +785,14 @@ } ], "source": [ - "cb_comb.apply_risk_transfer('Domestico', attachment=1000, cover=22000000, \n", - " disc_rates=ent_2015.disc_rates, cost_fix=0, cost_factor=1.5)" + "cb_comb.apply_risk_transfer(\n", + " \"Domestico\",\n", + " attachment=1000,\n", + " cover=22000000,\n", + " disc_rates=ent_2015.disc_rates,\n", + " cost_fix=0,\n", + " cost_factor=1.5,\n", + ")" ] }, { @@ -859,7 +885,9 @@ ], "source": [ "ax = cb_acel.plot_waterfall_accumulated(haz_2015, ent_2015, ent_2040)\n", - "cb_acel.plot_arrow_averted(ax, accumulate=True, combine=True, disc_rates=ent_2015.disc_rates) # plot total averted damages" + "cb_acel.plot_arrow_averted(\n", + " ax, accumulate=True, combine=True, disc_rates=ent_2015.disc_rates\n", + ") # plot total averted damages" ] }, { @@ -893,6 +921,7 @@ "source": [ "# show difference between sublinear, linear and superlinear expected annual damage growth\n", "import functions_ss\n", + "\n", "functions_ss.non_linear_growth(cb_acel)" ] }, @@ -996,10 +1025,14 @@ ], "source": [ "# change growth\n", - "growth_fact = 0.5 # < 1: sublinear, >1: superlinear\n", + "growth_fact = 0.5 # < 1: sublinear, >1: superlinear\n", "cb_acel_sub = CostBenefit()\n", - "cb_acel_sub.calc(haz_2015, ent_2015, haz_2040, ent_2040, imp_time_depen=growth_fact, save_imp=True)\n", - "cb_acel_sub.plot_waterfall_accumulated(haz_2015, ent_2015, ent_2040, imp_time_depen=growth_fact);" + "cb_acel_sub.calc(\n", + " haz_2015, ent_2015, haz_2040, ent_2040, imp_time_depen=growth_fact, save_imp=True\n", + ")\n", + "cb_acel_sub.plot_waterfall_accumulated(\n", + " haz_2015, ent_2015, ent_2040, imp_time_depen=growth_fact\n", + ");" ] }, { diff --git a/script/applications/eca_san_salvador/San_Salvador_Adaptation.ipynb b/script/applications/eca_san_salvador/San_Salvador_Adaptation.ipynb index 0701e4759..98388d991 100644 --- a/script/applications/eca_san_salvador/San_Salvador_Adaptation.ipynb +++ b/script/applications/eca_san_salvador/San_Salvador_Adaptation.ipynb @@ -92,22 +92,25 @@ ], "source": [ "from warnings import simplefilter\n", - "simplefilter(action='ignore')\n", + "\n", + "simplefilter(action=\"ignore\")\n", "import contextily as ctx\n", "from climada.entity import Entity\n", "\n", - "ent_2015 = Entity.from_excel('FL_entity_Acelhuate_houses.xlsx')\n", - "ent_2015.exposures.ref_year = 2015 # fix reference year\n", + "ent_2015 = Entity.from_excel(\"FL_entity_Acelhuate_houses.xlsx\")\n", + "ent_2015.exposures.ref_year = 2015 # fix reference year\n", "ent_2015.check()\n", "\n", "# Exposures: the ones we used in San Salvador Risk script\n", - "print('Total value in 2015: {:.3e}'.format(ent_2015.exposures.gdf.value.sum()))\n", - "ax = ent_2015.exposures.plot_basemap(s=1, zoom=15, url=ctx.providers.OpenStreetMap.Mapnik, vmax=60000, cmap='autumn')\n", - "ax.set_title('Exposure 2015')\n", + "print(\"Total value in 2015: {:.3e}\".format(ent_2015.exposures.gdf.value.sum()))\n", + "ax = ent_2015.exposures.plot_basemap(\n", + " s=1, zoom=15, url=ctx.providers.OpenStreetMap.Mapnik, vmax=60000, cmap=\"autumn\"\n", + ")\n", + "ax.set_title(\"Exposure 2015\")\n", "\n", "# Impact Functions: the ones we used in San Salvador Risk script\n", - "ent_2015.impact_funcs.get_func('FL', 101).plot()\n", - "ent_2015.impact_funcs.get_func('FL', 102).plot()\n", + "ent_2015.impact_funcs.get_func(\"FL\", 101).plot()\n", + "ent_2015.impact_funcs.get_func(\"FL\", 102).plot()\n", "\n", "# Discount rate: 2% yearly discount year until 2040\n", "ent_2015.disc_rates.plot();\n", @@ -165,12 +168,16 @@ "# Exposures: yearl economic growth of 2% in exposures\n", "ent_2040.exposures.ref_year = 2040\n", "growth = 0.02\n", - "ent_2040.exposures.gdf['value'] = ent_2040.exposures.gdf.value.values*(1 + growth)**(ent_2040.exposures.ref_year - ent_2015.exposures.ref_year)\n", - "ent_2040.check() # check values are well set and assignes default values\n", + "ent_2040.exposures.gdf[\"value\"] = ent_2040.exposures.gdf.value.values * (\n", + " 1 + growth\n", + ") ** (ent_2040.exposures.ref_year - ent_2015.exposures.ref_year)\n", + "ent_2040.check() # check values are well set and assignes default values\n", "\n", - "print('Total value in 2040: {:.3e}'.format(ent_2040.exposures.gdf.value.sum()))\n", - "ax = ent_2040.exposures.plot_basemap(s=1, zoom=15, url=ctx.providers.OpenStreetMap.Mapnik, vmax=60000, cmap='autumn')\n", - "ax.set_title('Exposure 2040');" + "print(\"Total value in 2040: {:.3e}\".format(ent_2040.exposures.gdf.value.sum()))\n", + "ax = ent_2040.exposures.plot_basemap(\n", + " s=1, zoom=15, url=ctx.providers.OpenStreetMap.Mapnik, vmax=60000, cmap=\"autumn\"\n", + ")\n", + "ax.set_title(\"Exposure 2040\");" ] }, { @@ -212,11 +219,13 @@ "import matplotlib.patches as patches\n", "from climada.hazard import Hazard\n", "\n", - "HAZ_FILE = 'Salvador_hazard_FL_2015.mat'\n", + "HAZ_FILE = \"Salvador_hazard_FL_2015.mat\"\n", "\n", - "haz_2015 = Hazard.from_mat(HAZ_FILE) # load file\n", + "haz_2015 = Hazard.from_mat(HAZ_FILE) # load file\n", "ax = haz_2015.plot_intensity(0) # maximum intensity reached at each point\n", - "rect = patches.Rectangle((-0.0027, 13.6738), 0.0355, 0.0233, linewidth=1, edgecolor='r', facecolor='none') # add exposures focus\n", + "rect = patches.Rectangle(\n", + " (-0.0027, 13.6738), 0.0355, 0.0233, linewidth=1, edgecolor=\"r\", facecolor=\"none\"\n", + ") # add exposures focus\n", "ax.add_patch(rect);" ] }, @@ -265,11 +274,13 @@ "# flood as for 2040 with extreme climate change\n", "from climada.hazard import Hazard\n", "\n", - "HAZ_FILE = 'Salvador_hazard_FL_2040_extreme_cc.mat'\n", + "HAZ_FILE = \"Salvador_hazard_FL_2040_extreme_cc.mat\"\n", "\n", - "haz_2040 = Hazard.from_mat(HAZ_FILE) # load file\n", + "haz_2040 = Hazard.from_mat(HAZ_FILE) # load file\n", "ax = haz_2040.plot_intensity(0) # maximum intensity reached at each point\n", - "rect = patches.Rectangle((-0.0027, 13.6738), 0.0355, 0.0233, linewidth=1, edgecolor='r', facecolor='none') # add exposures focus\n", + "rect = patches.Rectangle(\n", + " (-0.0027, 13.6738), 0.0355, 0.0233, linewidth=1, edgecolor=\"r\", facecolor=\"none\"\n", + ") # add exposures focus\n", "ax.add_patch(rect);" ] }, @@ -310,7 +321,7 @@ "\n", "cb_acel = CostBenefit()\n", "ax = cb_acel.plot_waterfall(haz_2015, ent_2015, haz_2040, ent_2040)\n", - "ax.set_title('Expected Annual Impact');" + "ax.set_title(\"Expected Annual Impact\");" ] }, { @@ -345,8 +356,10 @@ "from climada.engine import risk_rp_100\n", "\n", "cb_acel = CostBenefit()\n", - "ax = cb_acel.plot_waterfall(haz_2015, ent_2015, haz_2040, ent_2040, risk_func=risk_rp_100)\n", - "ax.set_title('Impact Exceedance at 100 years Return Period');" + "ax = cb_acel.plot_waterfall(\n", + " haz_2015, ent_2015, haz_2040, ent_2040, risk_func=risk_rp_100\n", + ")\n", + "ax.set_title(\"Impact Exceedance at 100 years Return Period\");" ] }, { @@ -400,22 +413,25 @@ ], "source": [ "# Measure impact in 2015: No descargas en Lluvia\n", - "meas = ent_2015.measures.get_measure('FL', 'No descargas en Lluvia')\n", - "print('Measure cost {:.3e} USD'.format(meas.cost))\n", - "meas_exp_2015, meas_impf2015, meas_haz_2015 = meas.apply(ent_2015.exposures, ent_2015.impact_funcs, haz_2015)\n", + "meas = ent_2015.measures.get_measure(\"FL\", \"No descargas en Lluvia\")\n", + "print(\"Measure cost {:.3e} USD\".format(meas.cost))\n", + "meas_exp_2015, meas_impf2015, meas_haz_2015 = meas.apply(\n", + " ent_2015.exposures, ent_2015.impact_funcs, haz_2015\n", + ")\n", "\n", "# exposures stay the same\n", "\n", "# impact functions slightly improved:\n", - "ax = meas_impf2015.get_func('FL', 101).plot()\n", - "ax.set_title('Flooding AUP House with measure')\n", + "ax = meas_impf2015.get_func(\"FL\", 101).plot()\n", + "ax.set_title(\"Flooding AUP House with measure\")\n", "\n", "# flood reduced up to 4.8 meters in worst events:\n", "import numpy as np\n", + "\n", "haz_diff = copy.deepcopy(haz_2015)\n", - "haz_diff.intensity = (haz_2015.intensity - meas_haz_2015.intensity)\n", - "ax = haz_diff.plot_intensity(0) # maximum intensity difference at each point\n", - "ax.set_title('Maximumm reduced intensity with measure');" + "haz_diff.intensity = haz_2015.intensity - meas_haz_2015.intensity\n", + "ax = haz_diff.plot_intensity(0) # maximum intensity difference at each point\n", + "ax.set_title(\"Maximumm reduced intensity with measure\");" ] }, { @@ -445,7 +461,7 @@ ], "source": [ "# name of every considered measure\n", - "for meas in ent_2040.measures.get_measure('FL'): # measures related to flood (FL)\n", + "for meas in ent_2040.measures.get_measure(\"FL\"): # measures related to flood (FL)\n", " print(meas.name)" ] }, @@ -491,8 +507,8 @@ ], "source": [ "cb_acel = CostBenefit()\n", - "cb_acel.calc(haz_2015, ent_2015, haz_2040, ent_2040, save_imp=True) # compute\n", - "cb_acel.plot_cost_benefit(); # plot benefit/cost ratio per measure" + "cb_acel.calc(haz_2015, ent_2015, haz_2040, ent_2040, save_imp=True) # compute\n", + "cb_acel.plot_cost_benefit(); # plot benefit/cost ratio per measure" ] }, { @@ -541,8 +557,13 @@ ], "source": [ "import matplotlib.colors as colors\n", - "cb_comb = cb_acel.combine_measures(['No descargas en Lluvia', 'Ahorradores en Agua en casas'],\n", - " 'Domestico', colors.to_rgb('lightcoral'), ent_2015.disc_rates)" + "\n", + "cb_comb = cb_acel.combine_measures(\n", + " [\"No descargas en Lluvia\", \"Ahorradores en Agua en casas\"],\n", + " \"Domestico\",\n", + " colors.to_rgb(\"lightcoral\"),\n", + " ent_2015.disc_rates,\n", + ")" ] }, { @@ -588,8 +609,14 @@ } ], "source": [ - "cb_comb.apply_risk_transfer('Domestico', attachment=1000, cover=22000000, \n", - " disc_rates=ent_2015.disc_rates, cost_fix=0, cost_factor=1.5)" + "cb_comb.apply_risk_transfer(\n", + " \"Domestico\",\n", + " attachment=1000,\n", + " cover=22000000,\n", + " disc_rates=ent_2015.disc_rates,\n", + " cost_fix=0,\n", + " cost_factor=1.5,\n", + ")" ] }, { @@ -674,7 +701,9 @@ ], "source": [ "ax = cb_acel.plot_waterfall_accumulated(haz_2015, ent_2015, ent_2040)\n", - "cb_acel.plot_arrow_averted(ax, accumulate=True, combine=True, disc_rates=ent_2015.disc_rates); # plot total averted damages" + "cb_acel.plot_arrow_averted(\n", + " ax, accumulate=True, combine=True, disc_rates=ent_2015.disc_rates\n", + "); # plot total averted damages" ] }, { @@ -705,6 +734,7 @@ "source": [ "# show difference between sublinear, linear and superlinear expected annual damage growth\n", "import functions_ss\n", + "\n", "functions_ss.non_linear_growth(cb_acel)" ] }, @@ -750,10 +780,14 @@ ], "source": [ "# change growth\n", - "growth_fact = 0.5 # < 1: sublinear, >1: superlinear\n", + "growth_fact = 0.5 # < 1: sublinear, >1: superlinear\n", "cb_acel_sub = CostBenefit()\n", - "cb_acel_sub.calc(haz_2015, ent_2015, haz_2040, ent_2040, imp_time_depen=growth_fact, save_imp=True)\n", - "cb_acel_sub.plot_waterfall_accumulated(haz_2015, ent_2015, ent_2040, imp_time_depen=growth_fact);" + "cb_acel_sub.calc(\n", + " haz_2015, ent_2015, haz_2040, ent_2040, imp_time_depen=growth_fact, save_imp=True\n", + ")\n", + "cb_acel_sub.plot_waterfall_accumulated(\n", + " haz_2015, ent_2015, ent_2040, imp_time_depen=growth_fact\n", + ");" ] }, { diff --git a/script/applications/eca_san_salvador/San_Salvador_Parametric.ipynb b/script/applications/eca_san_salvador/San_Salvador_Parametric.ipynb index 3cafb8b3c..360be7511 100644 --- a/script/applications/eca_san_salvador/San_Salvador_Parametric.ipynb +++ b/script/applications/eca_san_salvador/San_Salvador_Parametric.ipynb @@ -44,13 +44,13 @@ "import contextily as ctx\n", "from climada.engine import Impact\n", "\n", - "ent_2015_param = Entity.from_excel('FL_entity_Acelhuate_parametric.xlsx')\n", - "ent_2015_param.exposures.ref_year = 2015 # fix reference year\n", + "ent_2015_param = Entity.from_excel(\"FL_entity_Acelhuate_parametric.xlsx\")\n", + "ent_2015_param.exposures.ref_year = 2015 # fix reference year\n", "ent_2015_param.check()\n", "\n", "# flood as for 2015\n", - "HAZ_FILE = 'Salvador_hazard_FL_2015.mat'\n", - "haz_2015 = Hazard.from_mat(HAZ_FILE) # load file" + "HAZ_FILE = \"Salvador_hazard_FL_2015.mat\"\n", + "haz_2015 = Hazard.from_mat(HAZ_FILE) # load file" ] }, { @@ -97,9 +97,13 @@ ], "source": [ "param_payout = Impact()\n", - "param_payout.calc(ent_2015_param.exposures, ent_2015_param.impact_funcs, haz_2015) # compute parametric payout\n", - "print('Annual expected payout: {:} USD'.format(param_payout.aai_agg)) # get average annual payout\n", - "param_payout.calc_freq_curve().plot() " + "param_payout.calc(\n", + " ent_2015_param.exposures, ent_2015_param.impact_funcs, haz_2015\n", + ") # compute parametric payout\n", + "print(\n", + " \"Annual expected payout: {:} USD\".format(param_payout.aai_agg)\n", + ") # get average annual payout\n", + "param_payout.calc_freq_curve().plot()" ] }, { @@ -163,8 +167,8 @@ } ], "source": [ - "ent_2015 = Entity.from_excel('FL_entity_Acelhuate_houses.xlsx')\n", - "ent_2015.exposures.ref_year = 2015 # fix reference year\n", + "ent_2015 = Entity.from_excel(\"FL_entity_Acelhuate_houses.xlsx\")\n", + "ent_2015.exposures.ref_year = 2015 # fix reference year\n", "ent_2015.check()\n", "\n", "ent_2040 = copy.deepcopy(ent_2015)\n", @@ -172,19 +176,25 @@ "# Exposures: yearly economic growth of 2% in exposures\n", "ent_2040.exposures.ref_year = 2040\n", "growth = 0.02\n", - "ent_2040.exposures.gdf['value'] = ent_2040.exposures.gdf.value.values*(1 + growth)**(ent_2040.exposures.ref_year - ent_2015.exposures.ref_year)\n", - "ent_2040.check() # check values are well set and assignes default values\n", + "ent_2040.exposures.gdf[\"value\"] = ent_2040.exposures.gdf.value.values * (\n", + " 1 + growth\n", + ") ** (ent_2040.exposures.ref_year - ent_2015.exposures.ref_year)\n", + "ent_2040.check() # check values are well set and assignes default values\n", "\n", "# flood as for 2040 with extreme climate change\n", - "HAZ_FILE = 'Salvador_hazard_FL_2040_extreme_cc.mat'\n", - "haz_2040 = Hazard.from_mat(HAZ_FILE) # load file\n", + "HAZ_FILE = \"Salvador_hazard_FL_2040_extreme_cc.mat\"\n", + "haz_2040 = Hazard.from_mat(HAZ_FILE) # load file\n", "\n", "# expected annual impact\n", "cb_acel = CostBenefit()\n", - "cb_acel.calc(haz_2015, ent_2015, haz_2040, ent_2040, save_imp=True) # compute\n", + "cb_acel.calc(haz_2015, ent_2015, haz_2040, ent_2040, save_imp=True) # compute\n", "\n", - "cb_comb = cb_acel.combine_measures(['No descargas en Lluvia', 'Ahorradores en Agua en casas'],\n", - " 'Domestico', colors.to_rgb('lightcoral'), ent_2015.disc_rates)" + "cb_comb = cb_acel.combine_measures(\n", + " [\"No descargas en Lluvia\", \"Ahorradores en Agua en casas\"],\n", + " \"Domestico\",\n", + " colors.to_rgb(\"lightcoral\"),\n", + " ent_2015.disc_rates,\n", + ")" ] }, { @@ -208,10 +218,12 @@ } ], "source": [ - "damage_after_measures=cb_comb.imp_meas_present['Domestico']['impact'].at_event\n", - "paramteric_payout=param_payout.at_event\n", - "residual_damage=np.sum((damage_after_measures-paramteric_payout)*haz_2015.frequency)\n", - "print('residual damage: {:.3e} USD'.format(residual_damage))" + "damage_after_measures = cb_comb.imp_meas_present[\"Domestico\"][\"impact\"].at_event\n", + "paramteric_payout = param_payout.at_event\n", + "residual_damage = np.sum(\n", + " (damage_after_measures - paramteric_payout) * haz_2015.frequency\n", + ")\n", + "print(\"residual damage: {:.3e} USD\".format(residual_damage))" ] } ], diff --git a/script/applications/eca_san_salvador/San_Salvador_Risk.ipynb b/script/applications/eca_san_salvador/San_Salvador_Risk.ipynb index 29da95b78..b73180b38 100644 --- a/script/applications/eca_san_salvador/San_Salvador_Risk.ipynb +++ b/script/applications/eca_san_salvador/San_Salvador_Risk.ipynb @@ -16,6 +16,7 @@ "%%capture\n", "# generate plots used in this script\n", "import functions_ss\n", + "\n", "fig_ma, fig_point, fig_houses, fig_if = functions_ss.generate_plots_risk()" ] }, @@ -245,7 +246,7 @@ } ], "source": [ - "acc_df = functions_ss.load_accounting() # load accounting.xlsx\n", + "acc_df = functions_ss.load_accounting() # load accounting.xlsx\n", "acc_df.head()" ] }, @@ -424,10 +425,10 @@ "import pandas as pd\n", "from climada.entity import Exposures\n", "\n", - "ENT_FILE = 'FL_entity_Acelhuate_houses.xlsx' # entity file name\n", + "ENT_FILE = \"FL_entity_Acelhuate_houses.xlsx\" # entity file name\n", "\n", "exp_acel = Exposures(pd.read_excel(ENT_FILE))\n", - "exp_acel.check() # check values are well set and assigns default values\n", + "exp_acel.check() # check values are well set and assigns default values\n", "exp_acel.gdf.head() # show first 5 rows" ] }, @@ -459,8 +460,12 @@ ], "source": [ "# some statistics on AUPs and non AUPs\n", - "print('Number of houses, mean and total value of AUP and non AUP: \\n')\n", - "print(exp_acel.gdf[['category', 'value']].groupby('category').agg(['count', 'mean', 'sum']))" + "print(\"Number of houses, mean and total value of AUP and non AUP: \\n\")\n", + "print(\n", + " exp_acel.gdf[[\"category\", \"value\"]]\n", + " .groupby(\"category\")\n", + " .agg([\"count\", \"mean\", \"sum\"])\n", + ")" ] }, { @@ -488,7 +493,7 @@ } ], "source": [ - "print(exp_acel.gdf[['category', 'impf_FL']].groupby('category').agg(['unique']))" + "print(exp_acel.gdf[[\"category\", \"impf_FL\"]].groupby(\"category\").agg([\"unique\"]))" ] }, { @@ -551,9 +556,11 @@ "impf_acel = ImpactFuncSet.from_excel(ENT_FILE)\n", "impf_acel.check()\n", "\n", - "print('MDD: mean damage ratio; PAA: percentage of afected assets; MDR = PAA*MDD: mean damage ratio:')\n", - "impf_acel.get_func('FL', 101).plot() # plot flood function 101\n", - "impf_acel.get_func('FL', 102).plot(); # plot flood function 102" + "print(\n", + " \"MDD: mean damage ratio; PAA: percentage of afected assets; MDR = PAA*MDD: mean damage ratio:\"\n", + ")\n", + "impf_acel.get_func(\"FL\", 101).plot() # plot flood function 101\n", + "impf_acel.get_func(\"FL\", 102).plot(); # plot flood function 102" ] }, { @@ -573,9 +580,9 @@ "source": [ "from climada.hazard import Hazard\n", "\n", - "HAZ_FILE = 'Salvador_hazard_FL_2015.mat'\n", + "HAZ_FILE = \"Salvador_hazard_FL_2015.mat\"\n", "\n", - "haz_acel = Hazard.from_mat(HAZ_FILE) # load file" + "haz_acel = Hazard.from_mat(HAZ_FILE) # load file" ] }, { @@ -680,7 +687,7 @@ "from climada.engine import Impact\n", "\n", "imp_acel = Impact()\n", - "imp_acel.calc(exp_acel, impf_acel, haz_acel) # compute hazard's impact over exposure" + "imp_acel.calc(exp_acel, impf_acel, haz_acel) # compute hazard's impact over exposure" ] }, { @@ -719,8 +726,10 @@ } ], "source": [ - "print('Annual expected impact: {:.3e} USD'.format(imp_acel.aai_agg)) # get average annual impact\n", - "imp_acel.calc_freq_curve().plot(); # plot exceedance frequency curve" + "print(\n", + " \"Annual expected impact: {:.3e} USD\".format(imp_acel.aai_agg)\n", + ") # get average annual impact\n", + "imp_acel.calc_freq_curve().plot(); # plot exceedance frequency curve" ] }, { @@ -748,7 +757,11 @@ "point_lat = exp_acel.gdf.latitude.values[point_idx]\n", "point_lon = exp_acel.gdf.longitude.values[point_idx]\n", "point_eai = imp_acel.eai_exp[point_idx]\n", - "print('Annual expected impact in {:.4f}° N {:.4f}° W is {:.0f} USD.'.format(-point_lat, point_lon, point_eai))" + "print(\n", + " \"Annual expected impact in {:.4f}° N {:.4f}° W is {:.0f} USD.\".format(\n", + " -point_lat, point_lon, point_eai\n", + " )\n", + ")" ] }, { @@ -796,7 +809,10 @@ ], "source": [ "import contextily as ctx\n", - "imp_acel.plot_basemap_eai_exposure(url=ctx.providers.OpenStreetMap.Mapnik, zoom=15, s=2, cmap='gnuplot');" + "\n", + "imp_acel.plot_basemap_eai_exposure(\n", + " url=ctx.providers.OpenStreetMap.Mapnik, zoom=15, s=2, cmap=\"gnuplot\"\n", + ");" ] }, { @@ -837,8 +853,15 @@ ], "source": [ "import numpy as np\n", - "ax = imp_acel.plot_basemap_eai_exposure(mask=np.argwhere((exp_acel.gdf.category==2).to_numpy()).reshape(-1), url=ctx.providers.OpenStreetMap.Mapnik, zoom=15, s=2, cmap='gnuplot')\n", - "ax.set_title('Expected Annual Impact - no AUP');" + "\n", + "ax = imp_acel.plot_basemap_eai_exposure(\n", + " mask=np.argwhere((exp_acel.gdf.category == 2).to_numpy()).reshape(-1),\n", + " url=ctx.providers.OpenStreetMap.Mapnik,\n", + " zoom=15,\n", + " s=2,\n", + " cmap=\"gnuplot\",\n", + ")\n", + "ax.set_title(\"Expected Annual Impact - no AUP\");" ] }, { @@ -878,8 +901,14 @@ } ], "source": [ - "ax = imp_acel.plot_basemap_eai_exposure(mask=np.argwhere((exp_acel.gdf.category==1).to_numpy()).reshape(-1), url=ctx.providers.OpenStreetMap.Mapnik, zoom=15, s=2, cmap='gnuplot')\n", - "ax.set_title('Expected Annual Impact - AUP');" + "ax = imp_acel.plot_basemap_eai_exposure(\n", + " mask=np.argwhere((exp_acel.gdf.category == 1).to_numpy()).reshape(-1),\n", + " url=ctx.providers.OpenStreetMap.Mapnik,\n", + " zoom=15,\n", + " s=2,\n", + " cmap=\"gnuplot\",\n", + ")\n", + "ax.set_title(\"Expected Annual Impact - AUP\");" ] }, { @@ -906,15 +935,21 @@ } ], "source": [ - "eai_aup = imp_acel.eai_exp[exp_acel.gdf[exp_acel.gdf.category==1].index].sum()\n", - "print('Annual expected impact of AUPs: {:.3e} USD.'.format(eai_aup))\n", - "eai_per_aup = eai_aup/exp_acel.gdf[exp_acel.gdf.category==1].value.sum()*100\n", - "print('Annual expected impact of AUPs over its total value: {:.2f}%.'.format(eai_per_aup))\n", + "eai_aup = imp_acel.eai_exp[exp_acel.gdf[exp_acel.gdf.category == 1].index].sum()\n", + "print(\"Annual expected impact of AUPs: {:.3e} USD.\".format(eai_aup))\n", + "eai_per_aup = eai_aup / exp_acel.gdf[exp_acel.gdf.category == 1].value.sum() * 100\n", + "print(\n", + " \"Annual expected impact of AUPs over its total value: {:.2f}%.\".format(eai_per_aup)\n", + ")\n", "\n", - "eai_no_aup = imp_acel.eai_exp[exp_acel.gdf[exp_acel.gdf.category==2].index].sum()\n", - "print('Annual expected impact of non AUPs: {:.3e} USD.'.format(eai_no_aup))\n", - "eai_per_no_aup = eai_no_aup/exp_acel.gdf[exp_acel.gdf.category==1].value.sum()*100\n", - "print('Annual expected impact of non AUPs over its total value: {:.2f}%.'.format(eai_per_no_aup))" + "eai_no_aup = imp_acel.eai_exp[exp_acel.gdf[exp_acel.gdf.category == 2].index].sum()\n", + "print(\"Annual expected impact of non AUPs: {:.3e} USD.\".format(eai_no_aup))\n", + "eai_per_no_aup = eai_no_aup / exp_acel.gdf[exp_acel.gdf.category == 1].value.sum() * 100\n", + "print(\n", + " \"Annual expected impact of non AUPs over its total value: {:.2f}%.\".format(\n", + " eai_per_no_aup\n", + " )\n", + ")" ] } ], diff --git a/script/applications/eca_san_salvador/functions_ss.py b/script/applications/eca_san_salvador/functions_ss.py index caee8a4f5..3d0478558 100755 --- a/script/applications/eca_san_salvador/functions_ss.py +++ b/script/applications/eca_san_salvador/functions_ss.py @@ -19,47 +19,59 @@ Define WaterScarcity (WS) class. WORK IN PROGRESS """ + import contextily as ctx import geopandas as gpd import matplotlib.patches as patches from matplotlib import colormaps as cm from shapely import wkt + def plot_salvador_ma(): - risk_shape = 'POLYGON ((-89.25090785340315 13.671, -89.251 13.671, -89.251 13.67108933717579, -89.251 13.67117867435158, -89.251 13.67126801152738, -89.251 13.67135734870317, -89.251 13.67144668587896, -89.251 13.67153602305475, -89.251 13.67162536023055, -89.251 13.67171469740634, -89.251 13.67180403458213, -89.251 13.67189337175792, -89.251 13.67198270893372, -89.251 13.67207204610951, -89.251 13.6721613832853, -89.251 13.6722507204611, -89.251 13.67234005763689, -89.251 13.67242939481268, -89.251 13.67251873198847, -89.251 13.67260806916426, -89.251 13.67269740634006, -89.251 13.67278674351585, -89.251 13.67287608069164, -89.251 13.67296541786743, -89.251 13.67305475504323, -89.251 13.67314409221902, -89.251 13.67323342939481, -89.251 13.6733227665706, -89.251 13.6734121037464, -89.251 13.67350144092219, -89.251 13.67359077809798, -89.251 13.67368011527378, -89.251 13.67376945244957, -89.251 13.67385878962536, -89.251 13.67394812680115, -89.251 13.67403746397694, -89.251 13.67412680115274, -89.251 13.67421613832853, -89.251 13.67430547550432, -89.251 13.67439481268011, -89.251 13.67448414985591, -89.251 13.6745734870317, -89.251 13.67466282420749, -89.251 13.67475216138329, -89.251 13.67484149855908, -89.251 13.67493083573487, -89.251 13.67502017291066, -89.251 13.67510951008645, -89.251 13.67519884726225, -89.251 13.67528818443804, -89.251 13.67537752161383, -89.251 13.67546685878962, -89.251 13.67555619596542, -89.251 13.67564553314121, -89.251 13.675734870317, -89.251 13.67582420749279, -89.251 13.67591354466859, -89.251 13.67600288184438, -89.251 13.67609221902017, -89.251 13.67618155619597, -89.251 13.67627089337176, -89.251 13.67636023054755, -89.251 13.67644956772334, -89.251 13.67653890489913, -89.251 13.67662824207493, -89.251 13.67671757925072, -89.251 13.67680691642651, -89.251 13.6768962536023, -89.251 13.6769855907781, -89.251 13.67707492795389, -89.251 13.67716426512968, -89.251 13.67725360230548, -89.251 13.67734293948127, -89.251 13.67743227665706, -89.251 13.67752161383285, -89.251 13.67761095100865, -89.251 13.67770028818444, -89.251 13.67778962536023, -89.251 13.67787896253602, -89.251 13.67796829971181, -89.251 13.67805763688761, -89.25090785340315 13.67832564841498, -89.25081570680629 13.67850432276657, -89.25072356020942 13.67868299711816, -89.25063141361257 13.67886167146974, -89.250354973822 13.67921902017291, -89.25017068062827 13.67948703170029, -89.2498942408377 13.67984438040346, -89.24961780104712 13.68020172910663, -89.24934136125655 13.6805590778098, -89.24915706806283 13.68082708933717, -89.24888062827226 13.68118443804035, -89.24860418848168 13.68154178674352, -89.24832774869111 13.68189913544669, -89.24814345549738 13.68216714697406, -89.24786701570682 13.68252449567723, -89.24759057591623 13.6828818443804, -89.24740628272252 13.68314985590778, -89.24712984293194 13.68350720461095, -89.24685340314137 13.68386455331412, -89.24657696335079 13.68422190201729, -89.24639267015708 13.68448991354467, -89.24556335078535 13.68556195965418, -89.24510261780105 13.68609798270893, -89.2450104712042 13.68618731988473, -89.24491832460734 13.68627665706052, -89.24436544502618 13.68690201729107, -89.24427329842932 13.68699135446686, -89.24372041884817 13.68761671469741, -89.24362827225131 13.6877060518732, -89.24353612565446 13.68779538904899, -89.24298324607331 13.68842074927954, -89.24289109947644 13.68851008645533, -89.24233821989529 13.68913544668588, -89.24224607329843 13.68922478386167, -89.24169319371728 13.68985014409222, -89.24160104712043 13.68993948126801, -89.24150890052357 13.6900288184438, -89.24095602094241 13.69065417867435, -89.24086387434555 13.69074351585014, -89.24077172774869 13.69083285302594, -89.24067958115184 13.69092219020173, -89.24058743455498 13.69101152737752, -89.24049528795813 13.69110086455331, -89.24040314136126 13.69119020172911, -89.2403109947644 13.6912795389049, -89.24021884816754 13.69136887608069, -89.23975811518325 13.69163688760807, -89.23929738219896 13.69190489913545, -89.23865235602095 13.69226224783862, -89.23819162303666 13.69253025936599, -89.23773089005236 13.69279827089337, -89.23708586387436 13.69315561959654, -89.23662513089006 13.69342363112392, -89.23616439790577 13.6936916426513, -89.23570366492147 13.69395965417867, -89.23505863874345 13.69431700288184, -89.23459790575916 13.69458501440922, -89.23413717277488 13.6948530259366, -89.23376858638744 13.69494236311239, -89.23321570680629 13.69503170028818, -89.23266282722513 13.69512103746398, -89.23220209424085 13.69521037463977, -89.23164921465968 13.69529971181556, -89.23109633507853 13.69538904899135, -89.23054345549738 13.69547838616715, -89.23008272251309 13.69556772334294, -89.22952984293194 13.69565706051873, -89.22897696335079 13.69574639769452, -89.22851623036649 13.69583573487032, -89.22741047120419 13.6960144092219, -89.22685759162304 13.69610374639769, -89.22621256544502 13.69619308357349, -89.22612041884817 13.69619308357349, -89.22602827225131 13.69619308357349, -89.22593612565446 13.69619308357349, -89.2258439790576 13.69619308357349, -89.22575183246073 13.69619308357349, -89.22565968586387 13.69619308357349, -89.22556753926702 13.69619308357349, -89.22547539267016 13.69619308357349, -89.22538324607331 13.69619308357349, -89.22529109947644 13.69619308357349, -89.22519895287958 13.69619308357349, -89.22510680628272 13.69619308357349, -89.22501465968587 13.69619308357349, -89.22492251308901 13.69619308357349, -89.22483036649214 13.69619308357349, -89.22473821989529 13.69619308357349, -89.22464607329843 13.69619308357349, -89.22455392670157 13.69619308357349, -89.22446178010472 13.69619308357349, -89.22436963350786 13.69619308357349, -89.22427748691099 13.69619308357349, -89.22418534031414 13.69619308357349, -89.22409319371728 13.69619308357349, -89.22400104712042 13.69619308357349, -89.22390890052357 13.69619308357349, -89.2238167539267 13.69619308357349, -89.22372460732984 13.69619308357349, -89.22363246073299 13.69619308357349, -89.22354031413613 13.69619308357349, -89.22344816753927 13.69619308357349, -89.2233560209424 13.69619308357349, -89.22326387434555 13.69619308357349, -89.22317172774869 13.69619308357349, -89.2222502617801 13.69628242074928, -89.22215811518325 13.69628242074928, -89.22206596858639 13.69628242074928, -89.22197382198954 13.69628242074928, -89.22188167539267 13.69628242074928, -89.22178952879581 13.69628242074928, -89.22169738219895 13.69628242074928, -89.2216052356021 13.69628242074928, -89.22151308900524 13.69628242074928, -89.22142094240837 13.69628242074928, -89.22132879581152 13.69628242074928, -89.22123664921466 13.69628242074928, -89.2211445026178 13.69628242074928, -89.22105235602095 13.69628242074928, -89.22096020942409 13.69628242074928, -89.22086806282722 13.69628242074928, -89.22077591623037 13.69628242074928, -89.22059162303665 13.69619308357349, -89.22031518324607 13.6960144092219, -89.21985445026178 13.69574639769452, -89.21957801047121 13.69556772334294, -89.21930157068063 13.69538904899135, -89.21902513089006 13.69521037463977, -89.21874869109948 13.69503170028818, -89.21828795811518 13.69476368876081, -89.21801151832462 13.69458501440922, -89.21773507853403 13.69440634005764, -89.21745863874345 13.69422766570605, -89.21699790575916 13.69395965417867, -89.21672146596859 13.69378097982709, -89.21644502617801 13.6936023054755, -89.21616858638744 13.69342363112392, -89.21589214659686 13.69324495677233, -89.21543141361256 13.69297694524496, -89.215154973822 13.69279827089337, -89.21091623036649 13.69226224783862, -89.21063979057591 13.69235158501441, -89.21036335078534 13.6924409221902, -89.21008691099476 13.69253025936599, -89.20981047120419 13.69261959654179, -89.2093497382199 13.69279827089337, -89.20907329842932 13.69288760806916, -89.20879685863875 13.69297694524496, -89.20852041884817 13.69306628242075, -89.2082439790576 13.69315561959654, -89.20815183246073 13.69315561959654, -89.20805968586387 13.69315561959654, -89.20796753926702 13.69315561959654, -89.20787539267016 13.69315561959654, -89.2077832460733 13.69315561959654, -89.20769109947643 13.69315561959654, -89.20695392670157 13.69306628242075, -89.20630890052357 13.69297694524496, -89.20557172774869 13.69288760806916, -89.20492670157068 13.69279827089337, -89.20418952879581 13.69270893371758, -89.2035445026178 13.69261959654179, -89.20280732984293 13.69253025936599, -89.19976649214659 13.69288760806916, -89.19912146596859 13.69315561959654, -89.19847643979058 13.69342363112392, -89.19829214659686 13.69351296829971, -89.19764712041885 13.69378097982709, -89.19700209424084 13.69404899135447, -89.19681780104712 13.69413832853026, -89.19617277486911 13.69440634005764, -89.19552774869111 13.69467435158501, -89.19534345549738 13.69476368876081, -89.19469842931937 13.69503170028818, -89.19405340314137 13.69529971181556, -89.19386910994764 13.69538904899135, -89.19322408376964 13.69565706051873, -89.19303979057591 13.69574639769452, -89.1923947643979 13.6960144092219, -89.1917497382199 13.69628242074928, -89.19156544502617 13.69637175792507, -89.19092041884817 13.69663976945245, -89.19027539267016 13.69690778097983, -89.19009109947643 13.69699711815562, -89.18944607329843 13.697265129683, -89.18880104712042 13.69753314121037, -89.18861675392669 13.69762247838617, -89.18797172774869 13.69789048991355, -89.18732670157068 13.69815850144092, -89.18714240837696 13.69824783861671, -89.18668167539266 13.6984265129683, -89.18658952879581 13.6984265129683, -89.18649738219895 13.6984265129683, -89.18640523560209 13.6984265129683, -89.18631308900524 13.6984265129683, -89.18566806282722 13.69815850144092, -89.18502303664921 13.69789048991355, -89.18456230366492 13.69771181556196, -89.18391727748691 13.69744380403458, -89.18327225130889 13.6971757925072, -89.18262722513089 13.69690778097983, -89.1790335078534 13.69672910662824, -89.17875706806282 13.69681844380403, -89.17848062827225 13.69690778097983, -89.17820418848167 13.69699711815562, -89.1779277486911 13.69708645533141, -89.17765130890052 13.6971757925072, -89.17700628272252 13.69735446685879, -89.17672984293193 13.69744380403458, -89.17645340314137 13.69753314121037, -89.17617696335078 13.69762247838617, -89.1759005235602 13.69771181556196, -89.17562408376963 13.69780115273775, -89.17534764397905 13.69789048991355, -89.17470261780105 13.69806916426513, -89.1725832460733 13.69931988472622, -89.17249109947643 13.69940922190202, -89.17193821989528 13.70003458213256, -89.17184607329843 13.70012391930836, -89.17175392670157 13.70021325648415, -89.17120104712042 13.7008386167147, -89.17110890052356 13.70092795389049, -89.17101675392669 13.70101729106628, -89.17092460732984 13.70110662824207, -89.17037172774869 13.70173198847262, -89.17027958115183 13.70182132564842, -89.17018743455498 13.70191066282421, -89.17009528795812 13.702, -89.17000314136125 13.702, -89.16991099476439 13.702, -89.16981884816754 13.702, -89.16972670157068 13.702, -89.16963455497383 13.702, -89.16954240837696 13.702, -89.1694502617801 13.702, -89.16935811518324 13.702, -89.16926596858639 13.702, -89.16917382198953 13.702, -89.16908167539266 13.702, -89.16898952879581 13.702, -89.16889738219895 13.702, -89.16880523560209 13.702, -89.16871308900524 13.702, -89.16862094240837 13.702, -89.16852879581151 13.702, -89.16843664921466 13.702, -89.1683445026178 13.702, -89.16825235602094 13.702, -89.16816020942409 13.702, -89.16806806282722 13.702, -89.16797591623036 13.702, -89.16788376963351 13.702, -89.16779162303665 13.702, -89.16769947643979 13.702, -89.16760732984292 13.702, -89.16751518324607 13.702, -89.16742303664921 13.702, -89.16733089005236 13.702, -89.1672387434555 13.702, -89.16714659685863 13.702, -89.16705445026177 13.702, -89.16696230366492 13.702, -89.16687015706806 13.702, -89.16677801047121 13.702, -89.16668586387435 13.702, -89.16659371727748 13.702, -89.16650157068062 13.702, -89.16640942408377 13.702, -89.16631727748691 13.702, -89.16622513089006 13.702, -89.16613298429318 13.702, -89.16604083769633 13.702, -89.16594869109947 13.702, -89.16585654450262 13.702, -89.16576439790576 13.702, -89.16567225130889 13.702, -89.16558010471203 13.702, -89.16548795811518 13.702, -89.16539581151832 13.702, -89.16530366492147 13.702, -89.16521151832461 13.702, -89.16511937172774 13.702, -89.16502722513088 13.702, -89.16493507853403 13.702, -89.16484293193717 13.702, -89.16475078534032 13.702, -89.16465863874345 13.702, -89.16456649214659 13.702, -89.16447434554973 13.702, -89.16438219895288 13.702, -89.16429005235602 13.702, -89.16419790575915 13.702, -89.1641057591623 13.702, -89.16401361256544 13.702, -89.16392146596858 13.702, -89.16382931937173 13.702, -89.16373717277487 13.702, -89.163645026178 13.702, -89.16355287958115 13.702, -89.16346073298429 13.702, -89.16336858638743 13.702, -89.16327643979058 13.702, -89.16318429319371 13.702, -89.16309214659685 13.702, -89.163 13.702, -89.163 13.70191066282421, -89.163 13.70182132564842, -89.163 13.70173198847262, -89.163 13.70164265129683, -89.163 13.70155331412104, -89.163 13.70146397694525, -89.163 13.70137463976945, -89.163 13.70128530259366, -89.163 13.70119596541787, -89.163 13.70110662824207, -89.163 13.70101729106628, -89.163 13.70092795389049, -89.163 13.7008386167147, -89.163 13.7007492795389, -89.163 13.70065994236311, -89.163 13.70057060518732, -89.163 13.70048126801153, -89.163 13.70039193083574, -89.163 13.70030259365994, -89.163 13.70021325648415, -89.163 13.70012391930836, -89.163 13.70003458213256, -89.163 13.69994524495677, -89.163 13.69985590778098, -89.163 13.69976657060519, -89.163 13.69967723342939, -89.163 13.6995878962536, -89.163 13.69949855907781, -89.163 13.69940922190202, -89.163 13.69931988472622, -89.163 13.69923054755043, -89.163 13.69914121037464, -89.163 13.69905187319885, -89.163 13.69896253602306, -89.163 13.69887319884726, -89.163 13.69878386167147, -89.163 13.69869452449568, -89.163 13.69860518731988, -89.163 13.69851585014409, -89.163 13.6984265129683, -89.163 13.69833717579251, -89.163 13.69824783861671, -89.163 13.69815850144092, -89.163 13.69806916426513, -89.163 13.69797982708934, -89.163 13.69789048991355, -89.163 13.69780115273775, -89.163 13.69771181556196, -89.163 13.69762247838617, -89.163 13.69753314121037, -89.163 13.69744380403458, -89.163 13.69735446685879, -89.163 13.697265129683, -89.163 13.6971757925072, -89.163 13.69708645533141, -89.163 13.69699711815562, -89.163 13.69690778097983, -89.163 13.69681844380403, -89.163 13.69672910662824, -89.163 13.69663976945245, -89.163 13.69655043227666, -89.163 13.69646109510087, -89.163 13.69637175792507, -89.163 13.69628242074928, -89.163 13.69619308357349, -89.163 13.69610374639769, -89.163 13.6960144092219, -89.163 13.69592507204611, -89.163 13.69583573487032, -89.163 13.69574639769452, -89.163 13.69565706051873, -89.163 13.69556772334294, -89.163 13.69547838616715, -89.163 13.69538904899135, -89.163 13.69529971181556, -89.163 13.69521037463977, -89.163 13.69512103746398, -89.163 13.69503170028818, -89.163 13.69494236311239, -89.163 13.6948530259366, -89.163 13.69476368876081, -89.163 13.69467435158501, -89.163 13.69458501440922, -89.163 13.69449567723343, -89.163 13.69440634005764, -89.163 13.69431700288184, -89.163 13.69422766570605, -89.163 13.69413832853026, -89.163 13.69404899135447, -89.163 13.69395965417867, -89.163 13.69387031700288, -89.163 13.69378097982709, -89.163 13.6936916426513, -89.163 13.6936023054755, -89.163 13.69351296829971, -89.163 13.69342363112392, -89.163 13.69333429394813, -89.163 13.69324495677233, -89.16327643979058 13.69306628242075, -89.16355287958115 13.69288760806916, -89.16382931937173 13.69270893371758, -89.1641057591623 13.69253025936599, -89.16438219895288 13.69235158501441, -89.16465863874345 13.69217291066282, -89.16493507853403 13.69199423631124, -89.16521151832461 13.69181556195965, -89.16548795811518 13.69163688760807, -89.16576439790576 13.69145821325648, -89.16604083769633 13.6912795389049, -89.16631727748691 13.69110086455331, -89.16659371727748 13.69092219020173, -89.16677801047121 13.69083285302594, -89.16696230366492 13.69074351585014, -89.16714659685863 13.69065417867435, -89.16733089005236 13.69056484149856, -89.16797591623036 13.69020749279539, -89.16816020942409 13.6901181556196, -89.1683445026178 13.6900288184438, -89.16852879581151 13.68993948126801, -89.16871308900524 13.68985014409222, -89.16889738219895 13.68976080691643, -89.16908167539266 13.68967146974063, -89.16972670157068 13.68931412103746, -89.16991099476439 13.68922478386167, -89.17009528795812 13.68913544668588, -89.17027958115183 13.68904610951009, -89.17046387434554 13.68895677233429, -89.17064816753927 13.6888674351585, -89.17083246073298 13.68877809798271, -89.17147748691099 13.68842074927954, -89.17166178010471 13.68833141210375, -89.17350471204188 13.68681268011527, -89.17378115183246 13.6864553314121, -89.17405759162304 13.68609798270893, -89.17442617801046 13.68565129682997, -89.17470261780105 13.6852939481268, -89.17507120418848 13.68484726224784, -89.17534764397905 13.68448991354467, -89.17571623036649 13.68404322766571, -89.17580837696335 13.68395389048991, -89.17599267015707 13.68386455331412, -89.17617696335078 13.68377521613833, -89.1763612565445 13.68368587896254, -89.17654554973822 13.68359654178674, -89.17672984293193 13.68350720461095, -89.17691413612565 13.68341786743516, -89.17709842931937 13.68332853025936, -89.17728272251308 13.68323919308357, -89.17746701570681 13.68314985590778, -89.17765130890052 13.68306051873199, -89.17783560209423 13.6829711815562, -89.17801989528796 13.6828818443804, -89.17820418848167 13.68279250720461, -89.17884921465968 13.68252449567723, -89.1790335078534 13.68243515850144, -89.17921780104712 13.68234582132565, -89.17940209424084 13.68225648414986, -89.17958638743455 13.68216714697406, -89.17977068062827 13.68207780979827, -89.17995497382199 13.68198847262248, -89.1801392670157 13.68189913544669, -89.18032356020942 13.68180979827089, -89.18050785340314 13.6817204610951, -89.18069214659685 13.68163112391931, -89.18087643979058 13.68154178674352, -89.18152146596859 13.68127377521614, -89.1817057591623 13.68118443804035, -89.18189005235602 13.68109510086455, -89.18198219895288 13.68109510086455, -89.18207434554974 13.68109510086455, -89.18216649214659 13.68109510086455, -89.18225863874345 13.68109510086455, -89.18235078534032 13.68109510086455, -89.18244293193717 13.68109510086455, -89.18253507853403 13.68109510086455, -89.18262722513089 13.68109510086455, -89.18271937172774 13.68109510086455, -89.18281151832461 13.68109510086455, -89.18290366492147 13.68109510086455, -89.18299581151832 13.68109510086455, -89.18308795811518 13.68109510086455, -89.18318010471204 13.68109510086455, -89.18327225130889 13.68109510086455, -89.18336439790576 13.68109510086455, -89.18345654450262 13.68109510086455, -89.18354869109947 13.68109510086455, -89.18364083769633 13.68109510086455, -89.18373298429319 13.68109510086455, -89.18382513089006 13.68109510086455, -89.18806387434554 13.67868299711816, -89.18843246073298 13.67805763688761, -89.18880104712042 13.67743227665706, -89.18889319371728 13.67725360230548, -89.18926178010472 13.67662824207493, -89.18963036649214 13.67600288184438, -89.18972251308901 13.67582420749279, -89.19009109947643 13.67519884726225, -89.19045968586387 13.6745734870317, -89.19055183246073 13.67439481268011, -89.19092041884817 13.67376945244957, -89.19119685863875 13.6733227665706, -89.19138115183246 13.67323342939481, -89.19156544502617 13.67314409221902, -89.1917497382199 13.67305475504323, -89.1923947643979 13.67278674351585, -89.19257905759163 13.67269740634006, -89.19276335078534 13.67260806916426, -89.19294764397905 13.67251873198847, -89.19313193717278 13.67242939481268, -89.19331623036649 13.67234005763689, -89.1935005235602 13.6722507204611, -89.19368481675393 13.6721613832853, -89.19386910994764 13.67207204610951, -89.19405340314137 13.67198270893372, -89.19469842931937 13.67171469740634, -89.19488272251309 13.67162536023055, -89.19506701570681 13.67153602305475, -89.19525130890052 13.67144668587896, -89.19543560209424 13.67135734870317, -89.19561989528796 13.67126801152738, -89.19580418848167 13.67117867435158, -89.19598848167539 13.67108933717579, -89.19617277486911 13.671, -89.19626492146597 13.671, -89.19635706806282 13.671, -89.19644921465968 13.671, -89.19654136125655 13.671, -89.19663350785341 13.671, -89.19672565445026 13.671, -89.19681780104712 13.671, -89.19690994764397 13.671, -89.19700209424084 13.671, -89.1970942408377 13.671, -89.19718638743456 13.671, -89.19727853403141 13.671, -89.19737068062827 13.671, -89.19746282722512 13.671, -89.19755497382199 13.671, -89.19764712041885 13.671, -89.19773926701571 13.671, -89.19783141361256 13.671, -89.19792356020942 13.671, -89.19801570680629 13.671, -89.19810785340314 13.671, -89.1982 13.671, -89.19829214659686 13.671, -89.19838429319371 13.671, -89.19847643979058 13.671, -89.19856858638744 13.671, -89.19866073298429 13.671, -89.19875287958115 13.671, -89.19884502617801 13.671, -89.19893717277488 13.671, -89.19902931937173 13.671, -89.19912146596859 13.671, -89.19921361256544 13.671, -89.1993057591623 13.671, -89.19939790575916 13.671, -89.19949005235603 13.671, -89.19958219895288 13.671, -89.19967434554974 13.671, -89.19976649214659 13.671, -89.19985863874345 13.671, -89.19995078534032 13.671, -89.20004293193718 13.671, -89.20013507853403 13.671, -89.20022722513089 13.671, -89.20031937172774 13.671, -89.20041151832461 13.671, -89.20050366492147 13.671, -89.20059581151833 13.671, -89.20068795811518 13.671, -89.20078010471204 13.671, -89.20087225130889 13.671, -89.20096439790576 13.671, -89.20105654450262 13.671, -89.20114869109948 13.671, -89.20124083769633 13.671, -89.20133298429319 13.671, -89.20142513089006 13.671, -89.20151727748691 13.671, -89.20160942408377 13.671, -89.20170157068063 13.671, -89.20179371727748 13.671, -89.20188586387435 13.671, -89.20197801047121 13.671, -89.20207015706806 13.671, -89.20216230366492 13.671, -89.20225445026178 13.671, -89.20234659685863 13.671, -89.2024387434555 13.671, -89.20253089005236 13.671, -89.20262303664921 13.671, -89.20271518324607 13.671, -89.20280732984293 13.671, -89.2028994764398 13.671, -89.20299162303665 13.671, -89.20308376963351 13.671, -89.20317591623036 13.671, -89.20326806282722 13.671, -89.20336020942409 13.671, -89.20345235602095 13.671, -89.2035445026178 13.671, -89.20363664921466 13.671, -89.20372879581151 13.671, -89.20382094240837 13.671, -89.20391308900524 13.671, -89.2040052356021 13.671, -89.20409738219895 13.671, -89.20418952879581 13.671, -89.20428167539266 13.671, -89.20437382198953 13.671, -89.20446596858639 13.671, -89.20455811518325 13.671, -89.2046502617801 13.671, -89.20474240837696 13.671, -89.20483455497383 13.671, -89.20492670157068 13.671, -89.20501884816754 13.671, -89.2051109947644 13.671, -89.20520314136125 13.671, -89.20529528795812 13.671, -89.20538743455498 13.671, -89.20547958115183 13.671, -89.20557172774869 13.671, -89.20566387434555 13.671, -89.2057560209424 13.671, -89.20584816753927 13.671, -89.20594031413613 13.671, -89.20603246073298 13.671, -89.20612460732984 13.671, -89.2062167539267 13.671, -89.20640104712042 13.67108933717579, -89.20658534031413 13.67117867435158, -89.20676963350786 13.67126801152738, -89.20695392670157 13.67135734870317, -89.20713821989528 13.67144668587896, -89.20723036649214 13.67153602305475, -89.20723036649214 13.67162536023055, -89.20723036649214 13.67171469740634, -89.20723036649214 13.67180403458213, -89.20704607329843 13.67242939481268, -89.20686178010472 13.67305475504323, -89.20667748691099 13.67368011527378, -89.20658534031413 13.67394812680115, -89.20640104712042 13.6745734870317, -89.21036335078534 13.68118443804035, -89.21091623036649 13.68127377521614, -89.21146910994764 13.68136311239193, -89.21211413612565 13.68145244956772, -89.21266701570681 13.68154178674352, -89.21321989528796 13.68163112391931, -89.21377277486911 13.6817204610951, -89.21441780104712 13.68180979827089, -89.2146942408377 13.68189913544669, -89.21487853403141 13.68198847262248, -89.215154973822 13.68216714697406, -89.21543141361256 13.68234582132565, -89.21589214659686 13.68261383285303, -89.21616858638744 13.68279250720461, -89.21644502617801 13.6829711815562, -89.21672146596859 13.68314985590778, -89.21699790575916 13.68332853025936, -89.21727434554974 13.68350720461095, -89.21773507853403 13.68377521613833, -89.21801151832462 13.68395389048991, -89.21828795811518 13.6841325648415, -89.21856439790577 13.68431123919308, -89.21884083769633 13.68448991354467, -89.21911727748692 13.68466858789625, -89.22031518324607 13.68520461095101, -89.22059162303665 13.6852939481268, -89.22086806282722 13.68538328530259, -89.2211445026178 13.68547262247839, -89.22142094240837 13.68556195965418, -89.22169738219895 13.68565129682997, -89.22197382198954 13.68574063400576, -89.2222502617801 13.68582997118156, -89.22252670157069 13.68591930835735, -89.22280314136125 13.68600864553314, -89.22307958115184 13.68609798270893, -89.2233560209424 13.68618731988473, -89.22363246073299 13.68627665706052, -89.22390890052357 13.68636599423631, -89.22768691099476 13.68618731988473, -89.22833193717278 13.68591930835735, -89.22851623036649 13.68582997118156, -89.22870052356021 13.68574063400576, -89.22888481675393 13.68565129682997, -89.22906910994764 13.68556195965418, -89.22925340314137 13.68547262247839, -89.22943769633508 13.68538328530259, -89.22962198952879 13.6852939481268, -89.22980628272252 13.68520461095101, -89.22999057591623 13.68511527377522, -89.23017486910994 13.68502593659942, -89.23035916230367 13.68493659942363, -89.23054345549738 13.68484726224784, -89.23072774869111 13.68475792507205, -89.23091204188482 13.68466858789625, -89.23109633507853 13.68457925072046, -89.23174136125655 13.68431123919308, -89.23192565445027 13.68422190201729, -89.23358429319372 13.6828818443804, -89.23413717277488 13.68225648414986, -89.23469005235603 13.68163112391931, -89.23478219895289 13.68154178674352, -89.23533507853404 13.68091642651297, -89.23588795811519 13.68029106628242, -89.23598010471204 13.68020172910663, -89.2360722513089 13.68011239193084, -89.23616439790577 13.68002305475504, -89.23625654450262 13.67993371757925, -89.23634869109948 13.67984438040346, -89.23644083769634 13.67975504322767, -89.23653298429319 13.67966570605187, -89.23662513089006 13.67957636887608, -89.23671727748692 13.67948703170029, -89.23680942408377 13.67939769452449, -89.23690157068063 13.6793083573487, -89.23699371727749 13.67921902017291, -89.23754659685864 13.67859365994236, -89.23763874345551 13.67850432276657, -89.23773089005236 13.67841498559078, -89.23782303664922 13.67832564841498, -89.23791518324607 13.67823631123919, -89.23800732984293 13.6781469740634, -89.2380994764398 13.67805763688761, -89.23819162303666 13.67796829971181, -89.23828376963351 13.67787896253602, -89.23837591623037 13.67778962536023, -89.23846806282722 13.67770028818444, -89.23856020942409 13.67761095100865, -89.23865235602095 13.67752161383285, -89.23874450261781 13.67743227665706, -89.23883664921466 13.67734293948127, -89.23938952879581 13.67671757925072, -89.23948167539267 13.67662824207493, -89.23957382198954 13.67653890489913, -89.23966596858639 13.67644956772334, -89.23975811518325 13.67636023054755, -89.23985026178011 13.67627089337176, -89.23994240837696 13.67618155619597, -89.24003455497383 13.67609221902017, -89.24012670157069 13.67600288184438, -89.24021884816754 13.67591354466859, -89.2403109947644 13.67582420749279, -89.24040314136126 13.675734870317, -89.24049528795813 13.67564553314121, -89.24058743455498 13.67555619596542, -89.24114031413613 13.67493083573487, -89.24123246073299 13.67484149855908, -89.24132460732984 13.67475216138329, -89.2414167539267 13.67466282420749, -89.24150890052357 13.6745734870317, -89.24160104712043 13.67448414985591, -89.24169319371728 13.67439481268011, -89.24178534031414 13.67430547550432, -89.24298324607331 13.67251873198847, -89.24316753926702 13.67207204610951, -89.24335183246073 13.67162536023055, -89.24353612565446 13.67117867435158, -89.24362827225131 13.671, -89.24372041884817 13.671, -89.24381256544503 13.671, -89.24390471204188 13.671, -89.24399685863875 13.671, -89.24408900523561 13.671, -89.24418115183246 13.671, -89.24427329842932 13.671, -89.24436544502618 13.671, -89.24445759162305 13.671, -89.2445497382199 13.671, -89.24464188481676 13.671, -89.24473403141361 13.671, -89.24482617801047 13.671, -89.24491832460734 13.671, -89.2450104712042 13.671, -89.24510261780105 13.671, -89.24519476439791 13.671, -89.24528691099476 13.671, -89.24537905759163 13.671, -89.24547120418849 13.671, -89.24556335078535 13.671, -89.2456554973822 13.671, -89.24574764397906 13.671, -89.24583979057591 13.671, -89.24593193717278 13.671, -89.24602408376964 13.671, -89.2461162303665 13.671, -89.24620837696335 13.671, -89.24630052356021 13.671, -89.24639267015708 13.671, -89.24648481675393 13.671, -89.24657696335079 13.671, -89.24666910994765 13.671, -89.2467612565445 13.671, -89.24685340314137 13.671, -89.24694554973823 13.671, -89.24703769633508 13.671, -89.24712984293194 13.671, -89.2472219895288 13.671, -89.24731413612565 13.671, -89.24740628272252 13.671, -89.24749842931938 13.671, -89.24759057591623 13.671, -89.24768272251309 13.671, -89.24777486910995 13.671, -89.24786701570682 13.671, -89.24795916230367 13.671, -89.24805130890053 13.671, -89.24814345549738 13.671, -89.24823560209424 13.671, -89.24832774869111 13.671, -89.24841989528797 13.671, -89.24851204188482 13.671, -89.24860418848168 13.671, -89.24869633507853 13.671, -89.24878848167539 13.671, -89.24888062827226 13.671, -89.24897277486912 13.671, -89.24906492146597 13.671, -89.24915706806283 13.671, -89.24924921465968 13.671, -89.24934136125655 13.671, -89.24943350785341 13.671, -89.24952565445027 13.671, -89.24961780104712 13.671, -89.24970994764398 13.671, -89.24980209424085 13.671, -89.2498942408377 13.671, -89.24998638743456 13.671, -89.25007853403142 13.671, -89.25017068062827 13.671, -89.25026282722513 13.671, -89.250354973822 13.671, -89.25044712041885 13.671, -89.25053926701571 13.671, -89.25063141361257 13.671, -89.25072356020942 13.671, -89.25081570680629 13.671, -89.25090785340315 13.671))' + risk_shape = "POLYGON ((-89.25090785340315 13.671, -89.251 13.671, -89.251 13.67108933717579, -89.251 13.67117867435158, -89.251 13.67126801152738, -89.251 13.67135734870317, -89.251 13.67144668587896, -89.251 13.67153602305475, -89.251 13.67162536023055, -89.251 13.67171469740634, -89.251 13.67180403458213, -89.251 13.67189337175792, -89.251 13.67198270893372, -89.251 13.67207204610951, -89.251 13.6721613832853, -89.251 13.6722507204611, -89.251 13.67234005763689, -89.251 13.67242939481268, -89.251 13.67251873198847, -89.251 13.67260806916426, -89.251 13.67269740634006, -89.251 13.67278674351585, -89.251 13.67287608069164, -89.251 13.67296541786743, -89.251 13.67305475504323, -89.251 13.67314409221902, -89.251 13.67323342939481, -89.251 13.6733227665706, -89.251 13.6734121037464, -89.251 13.67350144092219, -89.251 13.67359077809798, -89.251 13.67368011527378, -89.251 13.67376945244957, -89.251 13.67385878962536, -89.251 13.67394812680115, -89.251 13.67403746397694, -89.251 13.67412680115274, -89.251 13.67421613832853, -89.251 13.67430547550432, -89.251 13.67439481268011, -89.251 13.67448414985591, -89.251 13.6745734870317, -89.251 13.67466282420749, -89.251 13.67475216138329, -89.251 13.67484149855908, -89.251 13.67493083573487, -89.251 13.67502017291066, -89.251 13.67510951008645, -89.251 13.67519884726225, -89.251 13.67528818443804, -89.251 13.67537752161383, -89.251 13.67546685878962, -89.251 13.67555619596542, -89.251 13.67564553314121, -89.251 13.675734870317, -89.251 13.67582420749279, -89.251 13.67591354466859, -89.251 13.67600288184438, -89.251 13.67609221902017, -89.251 13.67618155619597, -89.251 13.67627089337176, -89.251 13.67636023054755, -89.251 13.67644956772334, -89.251 13.67653890489913, -89.251 13.67662824207493, -89.251 13.67671757925072, -89.251 13.67680691642651, -89.251 13.6768962536023, -89.251 13.6769855907781, -89.251 13.67707492795389, -89.251 13.67716426512968, -89.251 13.67725360230548, -89.251 13.67734293948127, -89.251 13.67743227665706, -89.251 13.67752161383285, -89.251 13.67761095100865, -89.251 13.67770028818444, -89.251 13.67778962536023, -89.251 13.67787896253602, -89.251 13.67796829971181, -89.251 13.67805763688761, -89.25090785340315 13.67832564841498, -89.25081570680629 13.67850432276657, -89.25072356020942 13.67868299711816, -89.25063141361257 13.67886167146974, -89.250354973822 13.67921902017291, -89.25017068062827 13.67948703170029, -89.2498942408377 13.67984438040346, -89.24961780104712 13.68020172910663, -89.24934136125655 13.6805590778098, -89.24915706806283 13.68082708933717, -89.24888062827226 13.68118443804035, -89.24860418848168 13.68154178674352, -89.24832774869111 13.68189913544669, -89.24814345549738 13.68216714697406, -89.24786701570682 13.68252449567723, -89.24759057591623 13.6828818443804, -89.24740628272252 13.68314985590778, -89.24712984293194 13.68350720461095, -89.24685340314137 13.68386455331412, -89.24657696335079 13.68422190201729, -89.24639267015708 13.68448991354467, -89.24556335078535 13.68556195965418, -89.24510261780105 13.68609798270893, -89.2450104712042 13.68618731988473, -89.24491832460734 13.68627665706052, -89.24436544502618 13.68690201729107, -89.24427329842932 13.68699135446686, -89.24372041884817 13.68761671469741, -89.24362827225131 13.6877060518732, -89.24353612565446 13.68779538904899, -89.24298324607331 13.68842074927954, -89.24289109947644 13.68851008645533, -89.24233821989529 13.68913544668588, -89.24224607329843 13.68922478386167, -89.24169319371728 13.68985014409222, -89.24160104712043 13.68993948126801, -89.24150890052357 13.6900288184438, -89.24095602094241 13.69065417867435, -89.24086387434555 13.69074351585014, -89.24077172774869 13.69083285302594, -89.24067958115184 13.69092219020173, -89.24058743455498 13.69101152737752, -89.24049528795813 13.69110086455331, -89.24040314136126 13.69119020172911, -89.2403109947644 13.6912795389049, -89.24021884816754 13.69136887608069, -89.23975811518325 13.69163688760807, -89.23929738219896 13.69190489913545, -89.23865235602095 13.69226224783862, -89.23819162303666 13.69253025936599, -89.23773089005236 13.69279827089337, -89.23708586387436 13.69315561959654, -89.23662513089006 13.69342363112392, -89.23616439790577 13.6936916426513, -89.23570366492147 13.69395965417867, -89.23505863874345 13.69431700288184, -89.23459790575916 13.69458501440922, -89.23413717277488 13.6948530259366, -89.23376858638744 13.69494236311239, -89.23321570680629 13.69503170028818, -89.23266282722513 13.69512103746398, -89.23220209424085 13.69521037463977, -89.23164921465968 13.69529971181556, -89.23109633507853 13.69538904899135, -89.23054345549738 13.69547838616715, -89.23008272251309 13.69556772334294, -89.22952984293194 13.69565706051873, -89.22897696335079 13.69574639769452, -89.22851623036649 13.69583573487032, -89.22741047120419 13.6960144092219, -89.22685759162304 13.69610374639769, -89.22621256544502 13.69619308357349, -89.22612041884817 13.69619308357349, -89.22602827225131 13.69619308357349, -89.22593612565446 13.69619308357349, -89.2258439790576 13.69619308357349, -89.22575183246073 13.69619308357349, -89.22565968586387 13.69619308357349, -89.22556753926702 13.69619308357349, -89.22547539267016 13.69619308357349, -89.22538324607331 13.69619308357349, -89.22529109947644 13.69619308357349, -89.22519895287958 13.69619308357349, -89.22510680628272 13.69619308357349, -89.22501465968587 13.69619308357349, -89.22492251308901 13.69619308357349, -89.22483036649214 13.69619308357349, -89.22473821989529 13.69619308357349, -89.22464607329843 13.69619308357349, -89.22455392670157 13.69619308357349, -89.22446178010472 13.69619308357349, -89.22436963350786 13.69619308357349, -89.22427748691099 13.69619308357349, -89.22418534031414 13.69619308357349, -89.22409319371728 13.69619308357349, -89.22400104712042 13.69619308357349, -89.22390890052357 13.69619308357349, -89.2238167539267 13.69619308357349, -89.22372460732984 13.69619308357349, -89.22363246073299 13.69619308357349, -89.22354031413613 13.69619308357349, -89.22344816753927 13.69619308357349, -89.2233560209424 13.69619308357349, -89.22326387434555 13.69619308357349, -89.22317172774869 13.69619308357349, -89.2222502617801 13.69628242074928, -89.22215811518325 13.69628242074928, -89.22206596858639 13.69628242074928, -89.22197382198954 13.69628242074928, -89.22188167539267 13.69628242074928, -89.22178952879581 13.69628242074928, -89.22169738219895 13.69628242074928, -89.2216052356021 13.69628242074928, -89.22151308900524 13.69628242074928, -89.22142094240837 13.69628242074928, -89.22132879581152 13.69628242074928, -89.22123664921466 13.69628242074928, -89.2211445026178 13.69628242074928, -89.22105235602095 13.69628242074928, -89.22096020942409 13.69628242074928, -89.22086806282722 13.69628242074928, -89.22077591623037 13.69628242074928, -89.22059162303665 13.69619308357349, -89.22031518324607 13.6960144092219, -89.21985445026178 13.69574639769452, -89.21957801047121 13.69556772334294, -89.21930157068063 13.69538904899135, -89.21902513089006 13.69521037463977, -89.21874869109948 13.69503170028818, -89.21828795811518 13.69476368876081, -89.21801151832462 13.69458501440922, -89.21773507853403 13.69440634005764, -89.21745863874345 13.69422766570605, -89.21699790575916 13.69395965417867, -89.21672146596859 13.69378097982709, -89.21644502617801 13.6936023054755, -89.21616858638744 13.69342363112392, -89.21589214659686 13.69324495677233, -89.21543141361256 13.69297694524496, -89.215154973822 13.69279827089337, -89.21091623036649 13.69226224783862, -89.21063979057591 13.69235158501441, -89.21036335078534 13.6924409221902, -89.21008691099476 13.69253025936599, -89.20981047120419 13.69261959654179, -89.2093497382199 13.69279827089337, -89.20907329842932 13.69288760806916, -89.20879685863875 13.69297694524496, -89.20852041884817 13.69306628242075, -89.2082439790576 13.69315561959654, -89.20815183246073 13.69315561959654, -89.20805968586387 13.69315561959654, -89.20796753926702 13.69315561959654, -89.20787539267016 13.69315561959654, -89.2077832460733 13.69315561959654, -89.20769109947643 13.69315561959654, -89.20695392670157 13.69306628242075, -89.20630890052357 13.69297694524496, -89.20557172774869 13.69288760806916, -89.20492670157068 13.69279827089337, -89.20418952879581 13.69270893371758, -89.2035445026178 13.69261959654179, -89.20280732984293 13.69253025936599, -89.19976649214659 13.69288760806916, -89.19912146596859 13.69315561959654, -89.19847643979058 13.69342363112392, -89.19829214659686 13.69351296829971, -89.19764712041885 13.69378097982709, -89.19700209424084 13.69404899135447, -89.19681780104712 13.69413832853026, -89.19617277486911 13.69440634005764, -89.19552774869111 13.69467435158501, -89.19534345549738 13.69476368876081, -89.19469842931937 13.69503170028818, -89.19405340314137 13.69529971181556, -89.19386910994764 13.69538904899135, -89.19322408376964 13.69565706051873, -89.19303979057591 13.69574639769452, -89.1923947643979 13.6960144092219, -89.1917497382199 13.69628242074928, -89.19156544502617 13.69637175792507, -89.19092041884817 13.69663976945245, -89.19027539267016 13.69690778097983, -89.19009109947643 13.69699711815562, -89.18944607329843 13.697265129683, -89.18880104712042 13.69753314121037, -89.18861675392669 13.69762247838617, -89.18797172774869 13.69789048991355, -89.18732670157068 13.69815850144092, -89.18714240837696 13.69824783861671, -89.18668167539266 13.6984265129683, -89.18658952879581 13.6984265129683, -89.18649738219895 13.6984265129683, -89.18640523560209 13.6984265129683, -89.18631308900524 13.6984265129683, -89.18566806282722 13.69815850144092, -89.18502303664921 13.69789048991355, -89.18456230366492 13.69771181556196, -89.18391727748691 13.69744380403458, -89.18327225130889 13.6971757925072, -89.18262722513089 13.69690778097983, -89.1790335078534 13.69672910662824, -89.17875706806282 13.69681844380403, -89.17848062827225 13.69690778097983, -89.17820418848167 13.69699711815562, -89.1779277486911 13.69708645533141, -89.17765130890052 13.6971757925072, -89.17700628272252 13.69735446685879, -89.17672984293193 13.69744380403458, -89.17645340314137 13.69753314121037, -89.17617696335078 13.69762247838617, -89.1759005235602 13.69771181556196, -89.17562408376963 13.69780115273775, -89.17534764397905 13.69789048991355, -89.17470261780105 13.69806916426513, -89.1725832460733 13.69931988472622, -89.17249109947643 13.69940922190202, -89.17193821989528 13.70003458213256, -89.17184607329843 13.70012391930836, -89.17175392670157 13.70021325648415, -89.17120104712042 13.7008386167147, -89.17110890052356 13.70092795389049, -89.17101675392669 13.70101729106628, -89.17092460732984 13.70110662824207, -89.17037172774869 13.70173198847262, -89.17027958115183 13.70182132564842, -89.17018743455498 13.70191066282421, -89.17009528795812 13.702, -89.17000314136125 13.702, -89.16991099476439 13.702, -89.16981884816754 13.702, -89.16972670157068 13.702, -89.16963455497383 13.702, -89.16954240837696 13.702, -89.1694502617801 13.702, -89.16935811518324 13.702, -89.16926596858639 13.702, -89.16917382198953 13.702, -89.16908167539266 13.702, -89.16898952879581 13.702, -89.16889738219895 13.702, -89.16880523560209 13.702, -89.16871308900524 13.702, -89.16862094240837 13.702, -89.16852879581151 13.702, -89.16843664921466 13.702, -89.1683445026178 13.702, -89.16825235602094 13.702, -89.16816020942409 13.702, -89.16806806282722 13.702, -89.16797591623036 13.702, -89.16788376963351 13.702, -89.16779162303665 13.702, -89.16769947643979 13.702, -89.16760732984292 13.702, -89.16751518324607 13.702, -89.16742303664921 13.702, -89.16733089005236 13.702, -89.1672387434555 13.702, -89.16714659685863 13.702, -89.16705445026177 13.702, -89.16696230366492 13.702, -89.16687015706806 13.702, -89.16677801047121 13.702, -89.16668586387435 13.702, -89.16659371727748 13.702, -89.16650157068062 13.702, -89.16640942408377 13.702, -89.16631727748691 13.702, -89.16622513089006 13.702, -89.16613298429318 13.702, -89.16604083769633 13.702, -89.16594869109947 13.702, -89.16585654450262 13.702, -89.16576439790576 13.702, -89.16567225130889 13.702, -89.16558010471203 13.702, -89.16548795811518 13.702, -89.16539581151832 13.702, -89.16530366492147 13.702, -89.16521151832461 13.702, -89.16511937172774 13.702, -89.16502722513088 13.702, -89.16493507853403 13.702, -89.16484293193717 13.702, -89.16475078534032 13.702, -89.16465863874345 13.702, -89.16456649214659 13.702, -89.16447434554973 13.702, -89.16438219895288 13.702, -89.16429005235602 13.702, -89.16419790575915 13.702, -89.1641057591623 13.702, -89.16401361256544 13.702, -89.16392146596858 13.702, -89.16382931937173 13.702, -89.16373717277487 13.702, -89.163645026178 13.702, -89.16355287958115 13.702, -89.16346073298429 13.702, -89.16336858638743 13.702, -89.16327643979058 13.702, -89.16318429319371 13.702, -89.16309214659685 13.702, -89.163 13.702, -89.163 13.70191066282421, -89.163 13.70182132564842, -89.163 13.70173198847262, -89.163 13.70164265129683, -89.163 13.70155331412104, -89.163 13.70146397694525, -89.163 13.70137463976945, -89.163 13.70128530259366, -89.163 13.70119596541787, -89.163 13.70110662824207, -89.163 13.70101729106628, -89.163 13.70092795389049, -89.163 13.7008386167147, -89.163 13.7007492795389, -89.163 13.70065994236311, -89.163 13.70057060518732, -89.163 13.70048126801153, -89.163 13.70039193083574, -89.163 13.70030259365994, -89.163 13.70021325648415, -89.163 13.70012391930836, -89.163 13.70003458213256, -89.163 13.69994524495677, -89.163 13.69985590778098, -89.163 13.69976657060519, -89.163 13.69967723342939, -89.163 13.6995878962536, -89.163 13.69949855907781, -89.163 13.69940922190202, -89.163 13.69931988472622, -89.163 13.69923054755043, -89.163 13.69914121037464, -89.163 13.69905187319885, -89.163 13.69896253602306, -89.163 13.69887319884726, -89.163 13.69878386167147, -89.163 13.69869452449568, -89.163 13.69860518731988, -89.163 13.69851585014409, -89.163 13.6984265129683, -89.163 13.69833717579251, -89.163 13.69824783861671, -89.163 13.69815850144092, -89.163 13.69806916426513, -89.163 13.69797982708934, -89.163 13.69789048991355, -89.163 13.69780115273775, -89.163 13.69771181556196, -89.163 13.69762247838617, -89.163 13.69753314121037, -89.163 13.69744380403458, -89.163 13.69735446685879, -89.163 13.697265129683, -89.163 13.6971757925072, -89.163 13.69708645533141, -89.163 13.69699711815562, -89.163 13.69690778097983, -89.163 13.69681844380403, -89.163 13.69672910662824, -89.163 13.69663976945245, -89.163 13.69655043227666, -89.163 13.69646109510087, -89.163 13.69637175792507, -89.163 13.69628242074928, -89.163 13.69619308357349, -89.163 13.69610374639769, -89.163 13.6960144092219, -89.163 13.69592507204611, -89.163 13.69583573487032, -89.163 13.69574639769452, -89.163 13.69565706051873, -89.163 13.69556772334294, -89.163 13.69547838616715, -89.163 13.69538904899135, -89.163 13.69529971181556, -89.163 13.69521037463977, -89.163 13.69512103746398, -89.163 13.69503170028818, -89.163 13.69494236311239, -89.163 13.6948530259366, -89.163 13.69476368876081, -89.163 13.69467435158501, -89.163 13.69458501440922, -89.163 13.69449567723343, -89.163 13.69440634005764, -89.163 13.69431700288184, -89.163 13.69422766570605, -89.163 13.69413832853026, -89.163 13.69404899135447, -89.163 13.69395965417867, -89.163 13.69387031700288, -89.163 13.69378097982709, -89.163 13.6936916426513, -89.163 13.6936023054755, -89.163 13.69351296829971, -89.163 13.69342363112392, -89.163 13.69333429394813, -89.163 13.69324495677233, -89.16327643979058 13.69306628242075, -89.16355287958115 13.69288760806916, -89.16382931937173 13.69270893371758, -89.1641057591623 13.69253025936599, -89.16438219895288 13.69235158501441, -89.16465863874345 13.69217291066282, -89.16493507853403 13.69199423631124, -89.16521151832461 13.69181556195965, -89.16548795811518 13.69163688760807, -89.16576439790576 13.69145821325648, -89.16604083769633 13.6912795389049, -89.16631727748691 13.69110086455331, -89.16659371727748 13.69092219020173, -89.16677801047121 13.69083285302594, -89.16696230366492 13.69074351585014, -89.16714659685863 13.69065417867435, -89.16733089005236 13.69056484149856, -89.16797591623036 13.69020749279539, -89.16816020942409 13.6901181556196, -89.1683445026178 13.6900288184438, -89.16852879581151 13.68993948126801, -89.16871308900524 13.68985014409222, -89.16889738219895 13.68976080691643, -89.16908167539266 13.68967146974063, -89.16972670157068 13.68931412103746, -89.16991099476439 13.68922478386167, -89.17009528795812 13.68913544668588, -89.17027958115183 13.68904610951009, -89.17046387434554 13.68895677233429, -89.17064816753927 13.6888674351585, -89.17083246073298 13.68877809798271, -89.17147748691099 13.68842074927954, -89.17166178010471 13.68833141210375, -89.17350471204188 13.68681268011527, -89.17378115183246 13.6864553314121, -89.17405759162304 13.68609798270893, -89.17442617801046 13.68565129682997, -89.17470261780105 13.6852939481268, -89.17507120418848 13.68484726224784, -89.17534764397905 13.68448991354467, -89.17571623036649 13.68404322766571, -89.17580837696335 13.68395389048991, -89.17599267015707 13.68386455331412, -89.17617696335078 13.68377521613833, -89.1763612565445 13.68368587896254, -89.17654554973822 13.68359654178674, -89.17672984293193 13.68350720461095, -89.17691413612565 13.68341786743516, -89.17709842931937 13.68332853025936, -89.17728272251308 13.68323919308357, -89.17746701570681 13.68314985590778, -89.17765130890052 13.68306051873199, -89.17783560209423 13.6829711815562, -89.17801989528796 13.6828818443804, -89.17820418848167 13.68279250720461, -89.17884921465968 13.68252449567723, -89.1790335078534 13.68243515850144, -89.17921780104712 13.68234582132565, -89.17940209424084 13.68225648414986, -89.17958638743455 13.68216714697406, -89.17977068062827 13.68207780979827, -89.17995497382199 13.68198847262248, -89.1801392670157 13.68189913544669, -89.18032356020942 13.68180979827089, -89.18050785340314 13.6817204610951, -89.18069214659685 13.68163112391931, -89.18087643979058 13.68154178674352, -89.18152146596859 13.68127377521614, -89.1817057591623 13.68118443804035, -89.18189005235602 13.68109510086455, -89.18198219895288 13.68109510086455, -89.18207434554974 13.68109510086455, -89.18216649214659 13.68109510086455, -89.18225863874345 13.68109510086455, -89.18235078534032 13.68109510086455, -89.18244293193717 13.68109510086455, -89.18253507853403 13.68109510086455, -89.18262722513089 13.68109510086455, -89.18271937172774 13.68109510086455, -89.18281151832461 13.68109510086455, -89.18290366492147 13.68109510086455, -89.18299581151832 13.68109510086455, -89.18308795811518 13.68109510086455, -89.18318010471204 13.68109510086455, -89.18327225130889 13.68109510086455, -89.18336439790576 13.68109510086455, -89.18345654450262 13.68109510086455, -89.18354869109947 13.68109510086455, -89.18364083769633 13.68109510086455, -89.18373298429319 13.68109510086455, -89.18382513089006 13.68109510086455, -89.18806387434554 13.67868299711816, -89.18843246073298 13.67805763688761, -89.18880104712042 13.67743227665706, -89.18889319371728 13.67725360230548, -89.18926178010472 13.67662824207493, -89.18963036649214 13.67600288184438, -89.18972251308901 13.67582420749279, -89.19009109947643 13.67519884726225, -89.19045968586387 13.6745734870317, -89.19055183246073 13.67439481268011, -89.19092041884817 13.67376945244957, -89.19119685863875 13.6733227665706, -89.19138115183246 13.67323342939481, -89.19156544502617 13.67314409221902, -89.1917497382199 13.67305475504323, -89.1923947643979 13.67278674351585, -89.19257905759163 13.67269740634006, -89.19276335078534 13.67260806916426, -89.19294764397905 13.67251873198847, -89.19313193717278 13.67242939481268, -89.19331623036649 13.67234005763689, -89.1935005235602 13.6722507204611, -89.19368481675393 13.6721613832853, -89.19386910994764 13.67207204610951, -89.19405340314137 13.67198270893372, -89.19469842931937 13.67171469740634, -89.19488272251309 13.67162536023055, -89.19506701570681 13.67153602305475, -89.19525130890052 13.67144668587896, -89.19543560209424 13.67135734870317, -89.19561989528796 13.67126801152738, -89.19580418848167 13.67117867435158, -89.19598848167539 13.67108933717579, -89.19617277486911 13.671, -89.19626492146597 13.671, -89.19635706806282 13.671, -89.19644921465968 13.671, -89.19654136125655 13.671, -89.19663350785341 13.671, -89.19672565445026 13.671, -89.19681780104712 13.671, -89.19690994764397 13.671, -89.19700209424084 13.671, -89.1970942408377 13.671, -89.19718638743456 13.671, -89.19727853403141 13.671, -89.19737068062827 13.671, -89.19746282722512 13.671, -89.19755497382199 13.671, -89.19764712041885 13.671, -89.19773926701571 13.671, -89.19783141361256 13.671, -89.19792356020942 13.671, -89.19801570680629 13.671, -89.19810785340314 13.671, -89.1982 13.671, -89.19829214659686 13.671, -89.19838429319371 13.671, -89.19847643979058 13.671, -89.19856858638744 13.671, -89.19866073298429 13.671, -89.19875287958115 13.671, -89.19884502617801 13.671, -89.19893717277488 13.671, -89.19902931937173 13.671, -89.19912146596859 13.671, -89.19921361256544 13.671, -89.1993057591623 13.671, -89.19939790575916 13.671, -89.19949005235603 13.671, -89.19958219895288 13.671, -89.19967434554974 13.671, -89.19976649214659 13.671, -89.19985863874345 13.671, -89.19995078534032 13.671, -89.20004293193718 13.671, -89.20013507853403 13.671, -89.20022722513089 13.671, -89.20031937172774 13.671, -89.20041151832461 13.671, -89.20050366492147 13.671, -89.20059581151833 13.671, -89.20068795811518 13.671, -89.20078010471204 13.671, -89.20087225130889 13.671, -89.20096439790576 13.671, -89.20105654450262 13.671, -89.20114869109948 13.671, -89.20124083769633 13.671, -89.20133298429319 13.671, -89.20142513089006 13.671, -89.20151727748691 13.671, -89.20160942408377 13.671, -89.20170157068063 13.671, -89.20179371727748 13.671, -89.20188586387435 13.671, -89.20197801047121 13.671, -89.20207015706806 13.671, -89.20216230366492 13.671, -89.20225445026178 13.671, -89.20234659685863 13.671, -89.2024387434555 13.671, -89.20253089005236 13.671, -89.20262303664921 13.671, -89.20271518324607 13.671, -89.20280732984293 13.671, -89.2028994764398 13.671, -89.20299162303665 13.671, -89.20308376963351 13.671, -89.20317591623036 13.671, -89.20326806282722 13.671, -89.20336020942409 13.671, -89.20345235602095 13.671, -89.2035445026178 13.671, -89.20363664921466 13.671, -89.20372879581151 13.671, -89.20382094240837 13.671, -89.20391308900524 13.671, -89.2040052356021 13.671, -89.20409738219895 13.671, -89.20418952879581 13.671, -89.20428167539266 13.671, -89.20437382198953 13.671, -89.20446596858639 13.671, -89.20455811518325 13.671, -89.2046502617801 13.671, -89.20474240837696 13.671, -89.20483455497383 13.671, -89.20492670157068 13.671, -89.20501884816754 13.671, -89.2051109947644 13.671, -89.20520314136125 13.671, -89.20529528795812 13.671, -89.20538743455498 13.671, -89.20547958115183 13.671, -89.20557172774869 13.671, -89.20566387434555 13.671, -89.2057560209424 13.671, -89.20584816753927 13.671, -89.20594031413613 13.671, -89.20603246073298 13.671, -89.20612460732984 13.671, -89.2062167539267 13.671, -89.20640104712042 13.67108933717579, -89.20658534031413 13.67117867435158, -89.20676963350786 13.67126801152738, -89.20695392670157 13.67135734870317, -89.20713821989528 13.67144668587896, -89.20723036649214 13.67153602305475, -89.20723036649214 13.67162536023055, -89.20723036649214 13.67171469740634, -89.20723036649214 13.67180403458213, -89.20704607329843 13.67242939481268, -89.20686178010472 13.67305475504323, -89.20667748691099 13.67368011527378, -89.20658534031413 13.67394812680115, -89.20640104712042 13.6745734870317, -89.21036335078534 13.68118443804035, -89.21091623036649 13.68127377521614, -89.21146910994764 13.68136311239193, -89.21211413612565 13.68145244956772, -89.21266701570681 13.68154178674352, -89.21321989528796 13.68163112391931, -89.21377277486911 13.6817204610951, -89.21441780104712 13.68180979827089, -89.2146942408377 13.68189913544669, -89.21487853403141 13.68198847262248, -89.215154973822 13.68216714697406, -89.21543141361256 13.68234582132565, -89.21589214659686 13.68261383285303, -89.21616858638744 13.68279250720461, -89.21644502617801 13.6829711815562, -89.21672146596859 13.68314985590778, -89.21699790575916 13.68332853025936, -89.21727434554974 13.68350720461095, -89.21773507853403 13.68377521613833, -89.21801151832462 13.68395389048991, -89.21828795811518 13.6841325648415, -89.21856439790577 13.68431123919308, -89.21884083769633 13.68448991354467, -89.21911727748692 13.68466858789625, -89.22031518324607 13.68520461095101, -89.22059162303665 13.6852939481268, -89.22086806282722 13.68538328530259, -89.2211445026178 13.68547262247839, -89.22142094240837 13.68556195965418, -89.22169738219895 13.68565129682997, -89.22197382198954 13.68574063400576, -89.2222502617801 13.68582997118156, -89.22252670157069 13.68591930835735, -89.22280314136125 13.68600864553314, -89.22307958115184 13.68609798270893, -89.2233560209424 13.68618731988473, -89.22363246073299 13.68627665706052, -89.22390890052357 13.68636599423631, -89.22768691099476 13.68618731988473, -89.22833193717278 13.68591930835735, -89.22851623036649 13.68582997118156, -89.22870052356021 13.68574063400576, -89.22888481675393 13.68565129682997, -89.22906910994764 13.68556195965418, -89.22925340314137 13.68547262247839, -89.22943769633508 13.68538328530259, -89.22962198952879 13.6852939481268, -89.22980628272252 13.68520461095101, -89.22999057591623 13.68511527377522, -89.23017486910994 13.68502593659942, -89.23035916230367 13.68493659942363, -89.23054345549738 13.68484726224784, -89.23072774869111 13.68475792507205, -89.23091204188482 13.68466858789625, -89.23109633507853 13.68457925072046, -89.23174136125655 13.68431123919308, -89.23192565445027 13.68422190201729, -89.23358429319372 13.6828818443804, -89.23413717277488 13.68225648414986, -89.23469005235603 13.68163112391931, -89.23478219895289 13.68154178674352, -89.23533507853404 13.68091642651297, -89.23588795811519 13.68029106628242, -89.23598010471204 13.68020172910663, -89.2360722513089 13.68011239193084, -89.23616439790577 13.68002305475504, -89.23625654450262 13.67993371757925, -89.23634869109948 13.67984438040346, -89.23644083769634 13.67975504322767, -89.23653298429319 13.67966570605187, -89.23662513089006 13.67957636887608, -89.23671727748692 13.67948703170029, -89.23680942408377 13.67939769452449, -89.23690157068063 13.6793083573487, -89.23699371727749 13.67921902017291, -89.23754659685864 13.67859365994236, -89.23763874345551 13.67850432276657, -89.23773089005236 13.67841498559078, -89.23782303664922 13.67832564841498, -89.23791518324607 13.67823631123919, -89.23800732984293 13.6781469740634, -89.2380994764398 13.67805763688761, -89.23819162303666 13.67796829971181, -89.23828376963351 13.67787896253602, -89.23837591623037 13.67778962536023, -89.23846806282722 13.67770028818444, -89.23856020942409 13.67761095100865, -89.23865235602095 13.67752161383285, -89.23874450261781 13.67743227665706, -89.23883664921466 13.67734293948127, -89.23938952879581 13.67671757925072, -89.23948167539267 13.67662824207493, -89.23957382198954 13.67653890489913, -89.23966596858639 13.67644956772334, -89.23975811518325 13.67636023054755, -89.23985026178011 13.67627089337176, -89.23994240837696 13.67618155619597, -89.24003455497383 13.67609221902017, -89.24012670157069 13.67600288184438, -89.24021884816754 13.67591354466859, -89.2403109947644 13.67582420749279, -89.24040314136126 13.675734870317, -89.24049528795813 13.67564553314121, -89.24058743455498 13.67555619596542, -89.24114031413613 13.67493083573487, -89.24123246073299 13.67484149855908, -89.24132460732984 13.67475216138329, -89.2414167539267 13.67466282420749, -89.24150890052357 13.6745734870317, -89.24160104712043 13.67448414985591, -89.24169319371728 13.67439481268011, -89.24178534031414 13.67430547550432, -89.24298324607331 13.67251873198847, -89.24316753926702 13.67207204610951, -89.24335183246073 13.67162536023055, -89.24353612565446 13.67117867435158, -89.24362827225131 13.671, -89.24372041884817 13.671, -89.24381256544503 13.671, -89.24390471204188 13.671, -89.24399685863875 13.671, -89.24408900523561 13.671, -89.24418115183246 13.671, -89.24427329842932 13.671, -89.24436544502618 13.671, -89.24445759162305 13.671, -89.2445497382199 13.671, -89.24464188481676 13.671, -89.24473403141361 13.671, -89.24482617801047 13.671, -89.24491832460734 13.671, -89.2450104712042 13.671, -89.24510261780105 13.671, -89.24519476439791 13.671, -89.24528691099476 13.671, -89.24537905759163 13.671, -89.24547120418849 13.671, -89.24556335078535 13.671, -89.2456554973822 13.671, -89.24574764397906 13.671, -89.24583979057591 13.671, -89.24593193717278 13.671, -89.24602408376964 13.671, -89.2461162303665 13.671, -89.24620837696335 13.671, -89.24630052356021 13.671, -89.24639267015708 13.671, -89.24648481675393 13.671, -89.24657696335079 13.671, -89.24666910994765 13.671, -89.2467612565445 13.671, -89.24685340314137 13.671, -89.24694554973823 13.671, -89.24703769633508 13.671, -89.24712984293194 13.671, -89.2472219895288 13.671, -89.24731413612565 13.671, -89.24740628272252 13.671, -89.24749842931938 13.671, -89.24759057591623 13.671, -89.24768272251309 13.671, -89.24777486910995 13.671, -89.24786701570682 13.671, -89.24795916230367 13.671, -89.24805130890053 13.671, -89.24814345549738 13.671, -89.24823560209424 13.671, -89.24832774869111 13.671, -89.24841989528797 13.671, -89.24851204188482 13.671, -89.24860418848168 13.671, -89.24869633507853 13.671, -89.24878848167539 13.671, -89.24888062827226 13.671, -89.24897277486912 13.671, -89.24906492146597 13.671, -89.24915706806283 13.671, -89.24924921465968 13.671, -89.24934136125655 13.671, -89.24943350785341 13.671, -89.24952565445027 13.671, -89.24961780104712 13.671, -89.24970994764398 13.671, -89.24980209424085 13.671, -89.2498942408377 13.671, -89.24998638743456 13.671, -89.25007853403142 13.671, -89.25017068062827 13.671, -89.25026282722513 13.671, -89.250354973822 13.671, -89.25044712041885 13.671, -89.25053926701571 13.671, -89.25063141361257 13.671, -89.25072356020942 13.671, -89.25081570680629 13.671, -89.25090785340315 13.671))" shape_poly = wkt.loads(risk_shape) shape = gpd.GeoDataFrame() - shape['geometry'] = [shape_poly] - shape.crs = 'epsg:4326' + shape["geometry"] = [shape_poly] + shape.crs = "epsg:4326" shape.to_crs(epsg=3857, inplace=True) ax = shape.plot(figsize=(10, 10), alpha=0.5) ax.set_xlim(-9943223.896891385, -9911000.065720687) ax.set_ylim(1530712.637786494, 1555600.2891258441) ctx.add_basemap(ax, zoom=12, url=ctx.providers.Stamen.Terrain) - rect = patches.Rectangle((-9931038.907412536, 1536570.51725147), 4354.653554389253, - 2941.9125608841423, linewidth=1, edgecolor='r', facecolor='none') + rect = patches.Rectangle( + (-9931038.907412536, 1536570.51725147), + 4354.653554389253, + 2941.9125608841423, + linewidth=1, + edgecolor="r", + facecolor="none", + ) ax.add_patch(rect) ax.set_axis_off() fig = ax.get_figure() - ax.set_title('Metropolitan Area of San Salvador', fontsize=10) + ax.set_title("Metropolitan Area of San Salvador", fontsize=10) fig.tight_layout() return fig -from climada.entity import Exposures, Entity + +from climada.entity import Entity, Exposures from climada.hazard import Hazard + def load_entity(): - ent_file = 'FL_entity_Acelhuate_houses.xlsx' + ent_file = "FL_entity_Acelhuate_houses.xlsx" ent = Entity.from_excel(ent_file) ent.exposures.set_geometry_points() ent.check() return ent + +import cartopy.crs as ccrs import matplotlib as mpl import matplotlib.pyplot as plt import numpy as np -import cartopy.crs as ccrs + def scale_bar(ax, length=None, location=(0.5, 0.05), linewidth=3): """ @@ -69,55 +81,75 @@ def scale_bar(ax, length=None, location=(0.5, 0.05), linewidth=3): (ie. 0.5 is the middle of the plot) linewidth is the thickness of the scalebar. """ - #Get the limits of the axis in lat long + # Get the limits of the axis in lat long llx0, llx1, lly0, lly1 = ax.get_extent(ccrs.PlateCarree()) - #Make tmc horizontally centred on the middle of the map, - #vertically at scale bar location + # Make tmc horizontally centred on the middle of the map, + # vertically at scale bar location sbllx = (llx1 + llx0) / 2 sblly = lly0 + (lly1 - lly0) * location[1] tmc = ccrs.TransverseMercator(sbllx, sblly) - #Get the extent of the plotted area in coordinates in metres + # Get the extent of the plotted area in coordinates in metres x0, x1, y0, y1 = ax.get_extent(tmc) - #Turn the specified scalebar location into coordinates in metres + # Turn the specified scalebar location into coordinates in metres sbx = x0 + (x1 - x0) * location[0] sby = y0 + (y1 - y0) * location[1] - #Calculate a scale bar length if none has been given - #(Theres probably a more pythonic way of rounding the number but this works) + # Calculate a scale bar length if none has been given + # (Theres probably a more pythonic way of rounding the number but this works) if not length: - length = (x1 - x0) / 5000 #in km - ndim = int(np.floor(np.log10(length))) #number of digits in number - length = round(length, -ndim) #round to 1sf - #Returns numbers starting with the list + length = (x1 - x0) / 5000 # in km + ndim = int(np.floor(np.log10(length))) # number of digits in number + length = round(length, -ndim) # round to 1sf + + # Returns numbers starting with the list def scale_number(x): - if str(x)[0] in ['1', '2', '5']: return int(x) - else: return scale_number(x - 10 ** ndim) + if str(x)[0] in ["1", "2", "5"]: + return int(x) + else: + return scale_number(x - 10**ndim) + length = scale_number(length) - #Generate the x coordinate for the ends of the scalebar + # Generate the x coordinate for the ends of the scalebar bar_xs = [sbx - length * 500, sbx + length * 500] - #Plot the scalebar - ax.plot(bar_xs, [sby, sby], transform=tmc, color='k', linewidth=linewidth) - #Plot the scalebar label - ax.text(sbx, sby, str(int(length*1000)) + ' m', transform=tmc, - horizontalalignment='center', verticalalignment='bottom') + # Plot the scalebar + ax.plot(bar_xs, [sby, sby], transform=tmc, color="k", linewidth=linewidth) + # Plot the scalebar label + ax.text( + sbx, + sby, + str(int(length * 1000)) + " m", + transform=tmc, + horizontalalignment="center", + verticalalignment="bottom", + ) + def plot_exposure_ss(exposures, point=None): if point is not None: - fig, ax = plt.subplots(figsize=(15, 15), subplot_kw=dict(projection=ccrs.Mercator())) - ax.scatter(exposures.gdf[point:point+1].geometry[:].x, exposures.gdf[point:point+1].geometry[:].y, c='k', - marker='+', s=800) + fig, ax = plt.subplots( + figsize=(15, 15), subplot_kw=dict(projection=ccrs.Mercator()) + ) + ax.scatter( + exposures.gdf[point : point + 1].geometry[:].x, + exposures.gdf[point : point + 1].geometry[:].y, + c="k", + marker="+", + s=800, + ) ax.set_xlim(-9931038.907412536, -9926684.253858147) ax.set_ylim(1536680.51725147, 1539512.429812354) else: # create new map for viviendas - cmap_viv = cm.get_cmap('autumn').resampled(4) - cmap_viv = mpl.colors.LinearSegmentedColormap.from_list('Custom cmap', - [cmap_viv(i) for i in range(cmap_viv.N)], cmap_viv.N) + cmap_viv = cm.get_cmap("autumn").resampled(4) + cmap_viv = mpl.colors.LinearSegmentedColormap.from_list( + "Custom cmap", [cmap_viv(i) for i in range(cmap_viv.N)], cmap_viv.N + ) # create new map for aups - cmap_aup = cm.get_cmap('winter').resampled(4) - cmap_aup = mpl.colors.LinearSegmentedColormap.from_list('Custom cmap', - [cmap_aup(i) for i in range(cmap_aup.N)], cmap_aup.N) + cmap_aup = cm.get_cmap("winter").resampled(4) + cmap_aup = mpl.colors.LinearSegmentedColormap.from_list( + "Custom cmap", [cmap_aup(i) for i in range(cmap_aup.N)], cmap_aup.N + ) # define the bins and normalize bounds_aup = np.array([6000, 8800, 10000, 12000, 14600]) @@ -125,34 +157,79 @@ def plot_exposure_ss(exposures, point=None): bounds_viv = np.array([7500, 11000, 16500, 33000, 56300]) norm_viv = mpl.colors.BoundaryNorm(bounds_viv, cmap_viv.N) - exp_merc_aup = exposures.gdf[exposures.gdf.category==1] - exp_merc_house = exposures.gdf[exposures.gdf.category==2] - - fig, ax = plt.subplots(figsize=(15, 15), subplot_kw=dict(projection=ccrs.Mercator())) - clr_1 = ax.scatter(exp_merc_aup.geometry[:].x, exp_merc_aup.geometry[:].y, c=exp_merc_aup.value.values, - marker='+', s=25, cmap=cmap_aup, norm=norm_aup) - clr_2 = ax.scatter(exp_merc_house.geometry[:].x, exp_merc_house.geometry[:].y, c=exp_merc_house.value.values, - marker='o', s=8, cmap=cmap_viv, norm=norm_viv) + exp_merc_aup = exposures.gdf[exposures.gdf.category == 1] + exp_merc_house = exposures.gdf[exposures.gdf.category == 2] + + fig, ax = plt.subplots( + figsize=(15, 15), subplot_kw=dict(projection=ccrs.Mercator()) + ) + clr_1 = ax.scatter( + exp_merc_aup.geometry[:].x, + exp_merc_aup.geometry[:].y, + c=exp_merc_aup.value.values, + marker="+", + s=25, + cmap=cmap_aup, + norm=norm_aup, + ) + clr_2 = ax.scatter( + exp_merc_house.geometry[:].x, + exp_merc_house.geometry[:].y, + c=exp_merc_house.value.values, + marker="o", + s=8, + cmap=cmap_viv, + norm=norm_viv, + ) lines_legend = [] text_legend = [] for i_col, x_col in enumerate(np.linspace(0, 1, 4)): - lines_legend.append(mpl.lines.Line2D(range(1), range(1), color='white', marker='o', markerfacecolor=cmap_viv(x_col))) - text_legend.append(str(bounds_viv[i_col]) + ' - ' + str(bounds_viv[i_col+1])) - legend1 = plt.legend(lines_legend, text_legend, numpoints=1, loc=4, title='no AUP housing') + lines_legend.append( + mpl.lines.Line2D( + range(1), + range(1), + color="white", + marker="o", + markerfacecolor=cmap_viv(x_col), + ) + ) + text_legend.append( + str(bounds_viv[i_col]) + " - " + str(bounds_viv[i_col + 1]) + ) + legend1 = plt.legend( + lines_legend, text_legend, numpoints=1, loc=4, title="no AUP housing" + ) lines_legend = [] text_legend = [] for i_col, x_col in enumerate(np.linspace(0, 1, 4)): - lines_legend.append(mpl.lines.Line2D(range(1), range(1), color=cmap_aup(x_col), marker='+', markerfacecolor=cmap_aup(x_col))) - text_legend.append(str(bounds_aup[i_col]) + ' - ' + str(bounds_aup[i_col+1])) - plt.legend(lines_legend, text_legend, numpoints=1, loc=3, title='AUP housing') + lines_legend.append( + mpl.lines.Line2D( + range(1), + range(1), + color=cmap_aup(x_col), + marker="+", + markerfacecolor=cmap_aup(x_col), + ) + ) + text_legend.append( + str(bounds_aup[i_col]) + " - " + str(bounds_aup[i_col + 1]) + ) + plt.legend(lines_legend, text_legend, numpoints=1, loc=3, title="AUP housing") plt.gca().add_artist(legend1) - ctx.add_basemap(ax, zoom=15, url=ctx.providers.OpenStreetMap.Mapnik, origin='upper') + ctx.add_basemap(ax, zoom=15, url=ctx.providers.OpenStreetMap.Mapnik, origin="upper") scale_bar(ax, 0.5, location=(0.93, 0.4), linewidth=2) - rect = patches.Rectangle((-9931033.307412536, 1536686.51725147), 4345.053554389253, - 2934.0125608841423, linewidth=2, edgecolor='r', facecolor='none', zorder=200) + rect = patches.Rectangle( + (-9931033.307412536, 1536686.51725147), + 4345.053554389253, + 2934.0125608841423, + linewidth=2, + edgecolor="r", + facecolor="none", + zorder=200, + ) ax.add_patch(rect) ax.set_axis_off() if point is not None: @@ -162,37 +239,41 @@ def plot_exposure_ss(exposures, point=None): # fig.savefig('ss_points.png', format='png', bbox_inches='tight') return fig + def flooding_aup_if(impact_funcs): - mdd = impact_funcs.get_func('FL', 101).mdd - intensity = impact_funcs.get_func('FL', 101).intensity + mdd = impact_funcs.get_func("FL", 101).mdd + intensity = impact_funcs.get_func("FL", 101).intensity fig, ax = plt.subplots() - ax.set_xlabel('Intensity (m)') - ax.set_ylabel('Mean Damage Ratio (%)') - ax.set_title('Impact Function - AUP flooding') - ax.plot(intensity, mdd*100) + ax.set_xlabel("Intensity (m)") + ax.set_ylabel("Mean Damage Ratio (%)") + ax.set_title("Impact Function - AUP flooding") + ax.plot(intensity, mdd * 100) fig.set_size_inches(4.5, 4.5) - #fig.savefig('if_house_aup.png', format='png', bbox_inches='tight') + # fig.savefig('if_house_aup.png', format='png', bbox_inches='tight') return fig + import pandas as pd + def load_accounting(): acc = pd.DataFrame() - acc['Return Period (year)'] = np.array([10, 25, 50, 100]) - acc['frequency (1/year)'] = np.array([1/10, 1/25, 1/50, 1/100]) - acc['intensity (m)'] = np.array([0.7744541, 2.820973, 4.828216, 5.742804]) - acc['Mean Damage Ration (%)'] = np.array([51.83603012, 100, 100, 100]) - acc['impact (USD)'] = np.array([4786.95371, 9234.8, 9234.8, 9234.8]) - acc['frequency * impact'] = np.array([478.695371, 369.392, 184.696, 92.348]) - acc['Expected Annual Impact'] = np.ones(4)*np.nan - acc['Expected Annual Impact'].values[0] = 1125.131371 - #acc_file = 'accounting.xlsx' - #acc_df = pd.read_excel(acc_file) + acc["Return Period (year)"] = np.array([10, 25, 50, 100]) + acc["frequency (1/year)"] = np.array([1 / 10, 1 / 25, 1 / 50, 1 / 100]) + acc["intensity (m)"] = np.array([0.7744541, 2.820973, 4.828216, 5.742804]) + acc["Mean Damage Ration (%)"] = np.array([51.83603012, 100, 100, 100]) + acc["impact (USD)"] = np.array([4786.95371, 9234.8, 9234.8, 9234.8]) + acc["frequency * impact"] = np.array([478.695371, 369.392, 184.696, 92.348]) + acc["Expected Annual Impact"] = np.ones(4) * np.nan + acc["Expected Annual Impact"].values[0] = 1125.131371 + # acc_file = 'accounting.xlsx' + # acc_df = pd.read_excel(acc_file) acc.index += 1 return acc + def generate_plots_risk(): fig_ma = plot_salvador_ma() ent = load_entity() @@ -205,26 +286,26 @@ def generate_plots_risk(): return fig_ma, fig_point, fig_houses, fig_if + def non_linear_growth(cb_acel): - risk_present = 3.562753447707e+06 - risk_future = 7.578426440635e+06 + risk_present = 3.562753447707e06 + risk_future = 7.578426440635e06 - x_var = np.arange(cb_acel.present_year, cb_acel.future_year+1) + x_var = np.arange(cb_acel.present_year, cb_acel.future_year + 1) time_dep = cb_acel._time_dependency_array(0.5) - y_sqr = risk_present + (risk_future-risk_present) * time_dep + y_sqr = risk_present + (risk_future - risk_present) * time_dep time_dep = cb_acel._time_dependency_array(1.0) - y_lin = risk_present + (risk_future-risk_present) * time_dep + y_lin = risk_present + (risk_future - risk_present) * time_dep time_dep = cb_acel._time_dependency_array(2.0) - y_quad = risk_present + (risk_future-risk_present) * time_dep + y_quad = risk_present + (risk_future - risk_present) * time_dep - plt.bar(x_var, y_sqr, color='green', label='sublinear') - plt.bar(x_var, y_lin, color='blue', label='linear') - plt.bar(x_var, y_quad, color='red', label='superlinear') - plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0)) + plt.bar(x_var, y_sqr, color="green", label="sublinear") + plt.bar(x_var, y_lin, color="blue", label="linear") + plt.bar(x_var, y_quad, color="red", label="superlinear") + plt.ticklabel_format(style="sci", axis="y", scilimits=(0, 0)) plt.ylim(3.0e6, 7.8e6) - plt.xlabel('Year') - plt.ylabel('Expected Annual Impact') + plt.xlabel("Year") + plt.ylabel("Expected Annual Impact") plt.legend() - diff --git a/script/jenkins/set_config.py b/script/jenkins/set_config.py index 406eabb5e..75c4a1695 100644 --- a/script/jenkins/set_config.py +++ b/script/jenkins/set_config.py @@ -1,12 +1,12 @@ -import sys import json +import sys key = sys.argv[1] val = sys.argv[2] -jsonfile = 'climada.conf' +jsonfile = "climada.conf" -with open(jsonfile, encoding='UTF-8') as inf: +with open(jsonfile, encoding="UTF-8") as inf: data = json.load(inf) data[key] = val -with open(jsonfile, 'w', encoding='UTF-8') as outf: +with open(jsonfile, "w", encoding="UTF-8") as outf: json.dump(data, outf) diff --git a/script/jenkins/test_data_api.py b/script/jenkins/test_data_api.py index 42e910374..38eec4cd3 100644 --- a/script/jenkins/test_data_api.py +++ b/script/jenkins/test_data_api.py @@ -19,31 +19,36 @@ Test files_handler module. """ +import datetime as dt +import unittest from pathlib import Path from sys import dont_write_bytecode -import pandas as pd -import unittest -import xmlrunner -import datetime as dt import numpy as np +import pandas as pd +import xmlrunner from pandas_datareader import wb from climada import CONFIG from climada.entity.exposures.litpop.nightlight import BM_FILENAMES, download_nl_files -from climada.hazard.tc_tracks import IBTRACS_URL, IBTRACS_FILE -from climada.util.finance import WORLD_BANK_WEALTH_ACC, WORLD_BANK_INC_GRP -from climada.util.dwd_icon_loader import (download_icon_grib, - delete_icon_grib, - download_icon_centroids_file) +from climada.hazard.tc_tracks import IBTRACS_FILE, IBTRACS_URL +from climada.util.dwd_icon_loader import ( + delete_icon_grib, + download_icon_centroids_file, + download_icon_grib, +) from climada.util.files_handler import download_file, download_ftp +from climada.util.finance import WORLD_BANK_INC_GRP, WORLD_BANK_WEALTH_ACC + class TestDataAvail(unittest.TestCase): """Test availability of data used through APIs""" def test_noaa_nl_pass(self): """Test NOAA nightlights used in BlackMarble.""" - file_down = download_file(f'{CONFIG.exposures.litpop.nightlights.noaa_url.str()}/F101992.v4.tar') + file_down = download_file( + f"{CONFIG.exposures.litpop.nightlights.noaa_url.str()}/F101992.v4.tar" + ) Path(file_down).unlink() def test_nasa_nl_pass(self): @@ -72,11 +77,11 @@ def test_wb_lev_hist_pass(self): def test_wb_api_pass(self): """Test World Bank API""" - wb.download(indicator='NY.GDP.MKTP.CD', country='CHE', start=1960, end=2030) + wb.download(indicator="NY.GDP.MKTP.CD", country="CHE", start=1960, end=2030) def test_ne_api_pass(self): """Test Natural Earth API""" - url = 'https://naturalearth.s3.amazonaws.com/10m_cultural/ne_10m_admin_0_countries.zip' + url = "https://naturalearth.s3.amazonaws.com/10m_cultural/ne_10m_admin_0_countries.zip" file_down = download_file(url) Path(file_down).unlink() @@ -87,41 +92,41 @@ def test_ibtracs_pass(self): def test_icon_eu_forecast_download(self): """Test availability of DWD icon forecast.""" run_datetime = dt.datetime.utcnow() - dt.timedelta(hours=5) - run_datetime = run_datetime.replace(hour=run_datetime.hour//12*12, - minute=0, - second=0, - microsecond=0) - icon_file = download_icon_grib(run_datetime,max_lead_time=1) + run_datetime = run_datetime.replace( + hour=run_datetime.hour // 12 * 12, minute=0, second=0, microsecond=0 + ) + icon_file = download_icon_grib(run_datetime, max_lead_time=1) self.assertEqual(len(icon_file), 1) - delete_icon_grib(run_datetime,max_lead_time=1) #deletes icon_file + delete_icon_grib(run_datetime, max_lead_time=1) # deletes icon_file self.assertFalse(Path(icon_file[0]).exists()) def test_icon_d2_forecast_download(self): """Test availability of DWD icon forecast.""" run_datetime = dt.datetime.utcnow() - dt.timedelta(hours=5) - run_datetime = run_datetime.replace(hour=run_datetime.hour//12*12, - minute=0, - second=0, - microsecond=0) - icon_file = download_icon_grib(run_datetime, - model_name='icon-d2-eps', - max_lead_time=1) + run_datetime = run_datetime.replace( + hour=run_datetime.hour // 12 * 12, minute=0, second=0, microsecond=0 + ) + icon_file = download_icon_grib( + run_datetime, model_name="icon-d2-eps", max_lead_time=1 + ) self.assertEqual(len(icon_file), 1) - delete_icon_grib(run_datetime, - model_name='icon-d2-eps', - max_lead_time=1) #deletes icon_file + delete_icon_grib( + run_datetime, model_name="icon-d2-eps", max_lead_time=1 + ) # deletes icon_file self.assertFalse(Path(icon_file[0]).exists()) def test_icon_centroids_download(self): """Test availablility of DWD icon grid information.""" grid_file = download_icon_centroids_file() Path(grid_file).unlink() - grid_file = download_icon_centroids_file(model_name='icon-d2-eps') + grid_file = download_icon_centroids_file(model_name="icon-d2-eps") Path(grid_file).unlink() + # Execute Tests -if __name__ == '__main__': +if __name__ == "__main__": TESTS = unittest.TestLoader().loadTestsFromTestCase(TestDataAvail) from sys import argv - outputdir = argv[1] if len(argv) > 1 else str(Path.cwd().joinpath('tests_xml')) + + outputdir = argv[1] if len(argv) > 1 else str(Path.cwd().joinpath("tests_xml")) xmlrunner.XMLTestRunner(output=outputdir).run(TESTS) diff --git a/script/jenkins/test_notebooks.py b/script/jenkins/test_notebooks.py index bb0420194..f2e4fcdbc 100644 --- a/script/jenkins/test_notebooks.py +++ b/script/jenkins/test_notebooks.py @@ -6,20 +6,20 @@ import sys import unittest from pathlib import Path + import nbformat import climada +BOUND_TO_FAIL = "# Note: execution of this cell will fail" +"""Cells containing this line will not be executed in the test""" -BOUND_TO_FAIL = '# Note: execution of this cell will fail' -'''Cells containing this line will not be executed in the test''' - -EXCLUDED_FROM_NOTEBOOK_TEST = ['climada_installation_step_by_step.ipynb'] -'''These notebooks are excluded from being tested''' +EXCLUDED_FROM_NOTEBOOK_TEST = ["climada_installation_step_by_step.ipynb"] +"""These notebooks are excluded from being tested""" class NotebookTest(unittest.TestCase): - '''Generic TestCase for testing the executability of notebooks + """Generic TestCase for testing the executability of notebooks Attributes ---------- @@ -28,7 +28,7 @@ class NotebookTest(unittest.TestCase): notebook : str File name of the notebook. - ''' + """ def __init__(self, methodName, wd=None, notebook=None): super(NotebookTest, self).__init__(methodName) @@ -36,64 +36,81 @@ def __init__(self, methodName, wd=None, notebook=None): self.notebook = notebook def test_notebook(self): - '''Extracts code cells from the notebook and executes them one by one, using `exec`. + """Extracts code cells from the notebook and executes them one by one, using `exec`. Magic lines and help/? calls are eliminated. Cells containing `BOUND_TO_FAIL` are elided. - Cells doing multiprocessing are elided.''' + Cells doing multiprocessing are elided.""" cwd = Path.cwd() try: # cd to the notebook directory os.chdir(self.wd) - print(f'start testing {self.notebook}') + print(f"start testing {self.notebook}") # read the notebook into a string - with open(self.notebook, encoding='utf8') as nb: + with open(self.notebook, encoding="utf8") as nb: content = nb.read() # parse the string with nbformat.reads - cells = nbformat.reads(content, 4)['cells'] + cells = nbformat.reads(content, 4)["cells"] # create namespace with IPython standards namespace = dict() - exec('from IPython.display import display', namespace) + exec("from IPython.display import display", namespace) # run all cells i = 0 for c in cells: # skip markdown cells - if c['cell_type'] != 'code': continue + if c["cell_type"] != "code": + continue i += 1 # skip deliberately failing cells - if BOUND_TO_FAIL in c['source']: continue + if BOUND_TO_FAIL in c["source"]: + continue # skip multiprocessing cells - if any([ tabu in c['source'].split() for tabu in [ - 'import multiprocessing', - 'from multiprocessing import', - ]]): - print('\n'.join([ - f'\nskip multiprocessing cell {i} in {self.notebook}', - '+'+'-'*68+'+', - c['source'] - ])) + if any( + [ + tabu in c["source"].split() + for tabu in [ + "import multiprocessing", + "from multiprocessing import", + ] + ] + ): + print( + "\n".join( + [ + f"\nskip multiprocessing cell {i} in {self.notebook}", + "+" + "-" * 68 + "+", + c["source"], + ] + ) + ) continue # remove non python lines and help calls which require user input # or involve pools being opened/closed - python_code = "\n".join([ - re.sub(r'pool=\w+', 'pool=None', ln) - for ln in c['source'].split("\n") - if not ln.startswith('%') - and not ln.startswith('help(') - and not ln.startswith('ask_ok(') - and not ln.startswith('ask_ok(') - and not ln.startswith('pool') # by convention Pool objects are called pool - and not ln.strip().endswith('?') - and not re.search(r'(\W|^)Pool\(', ln) # prevent Pool object creation - ]) + python_code = "\n".join( + [ + re.sub(r"pool=\w+", "pool=None", ln) + for ln in c["source"].split("\n") + if not ln.startswith("%") + and not ln.startswith("help(") + and not ln.startswith("ask_ok(") + and not ln.startswith("ask_ok(") + and not ln.startswith( + "pool" + ) # by convention Pool objects are called pool + and not ln.strip().endswith("?") + and not re.search( + r"(\W|^)Pool\(", ln + ) # prevent Pool object creation + ] + ) # execute the python code try: @@ -101,53 +118,60 @@ def test_notebook(self): # report failures except Exception as e: - failure = "\n".join([ - f"notebook {self.notebook} cell {i} failed with {e.__class__}", - f"{e}", - '+'+'-'*68+'+', - c['source'] - ]) - print(f'failed {self.notebook}') + failure = "\n".join( + [ + f"notebook {self.notebook} cell {i} failed with {e.__class__}", + f"{e}", + "+" + "-" * 68 + "+", + c["source"], + ] + ) + print(f"failed {self.notebook}") print(failure) self.fail(failure) - print(f'succeeded {self.notebook}') + print(f"succeeded {self.notebook}") finally: os.chdir(cwd) def main(install_dir): import xmlrunner - + sys.path.append(str(install_dir)) - - notebook_dir = install_dir.joinpath('doc', 'tutorial') - '''The path to the notebook directories.''' + + notebook_dir = install_dir.joinpath("doc", "tutorial") + """The path to the notebook directories.""" # list notebooks in the NOTEBOOK_DIR - notebooks = [f.absolute() - for f in sorted(notebook_dir.iterdir()) - if os.path.splitext(f)[1] == ('.ipynb') - and not f.name in EXCLUDED_FROM_NOTEBOOK_TEST] + notebooks = [ + f.absolute() + for f in sorted(notebook_dir.iterdir()) + if os.path.splitext(f)[1] == (".ipynb") + and not f.name in EXCLUDED_FROM_NOTEBOOK_TEST + ] # build a test suite with a test for each notebook suite = unittest.TestSuite() for notebook in notebooks: - class NBTest(NotebookTest): pass + + class NBTest(NotebookTest): + pass + test_name = "_".join(notebook.stem.split()) setattr(NBTest, test_name, NBTest.test_notebook) suite.addTest(NBTest(test_name, notebook.parent, notebook.name)) # run the tests and write xml reports to tests_xml - output_dir = install_dir.joinpath('tests_xml') + output_dir = install_dir.joinpath("tests_xml") xmlrunner.XMLTestRunner(output=str(output_dir)).run(suite) -if __name__ == '__main__': - if sys.argv[1] == 'report': +if __name__ == "__main__": + if sys.argv[1] == "report": install_dir = Path(sys.argv[2]) if len(sys.argv) > 2 else Path.cwd() main(install_dir) - + else: jd, nb = os.path.split(sys.argv[1]) - unittest.TextTestRunner(verbosity=2).run(NotebookTest('test_notebook', jd, nb)) + unittest.TextTestRunner(verbosity=2).run(NotebookTest("test_notebook", jd, nb)) From bc60010232e77591972f64721c3098dd72610a74 Mon Sep 17 00:00:00 2001 From: emanuel-schmid Date: Sun, 20 Oct 2024 19:06:39 +0200 Subject: [PATCH 09/12] setup: DEPS_TEST update --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index e743557e8..dd260d7ee 100644 --- a/setup.py +++ b/setup.py @@ -24,7 +24,7 @@ DEPS_TEST = [ "ipython", "mccabe>=0.6", - "pylint==2.7.1", + "pylint>=3.0", "pytest", "pytest-cov", "pytest-subtests", From 3c5fc8f07b2455e291e9207dec631af0e1355f42 Mon Sep 17 00:00:00 2001 From: emanuel-schmid Date: Sun, 20 Oct 2024 19:06:39 +0200 Subject: [PATCH 10/12] avoid circular imports --- climada/engine/cost_benefit.py | 2 +- climada/engine/impact_calc.py | 2 +- climada/engine/unsequa/calc_base.py | 2 +- climada/engine/unsequa/calc_cost_benefit.py | 6 +++--- climada/engine/unsequa/calc_delta_climate.py | 4 +++- climada/engine/unsequa/calc_impact.py | 4 +++- setup.py | 2 +- 7 files changed, 13 insertions(+), 9 deletions(-) diff --git a/climada/engine/cost_benefit.py b/climada/engine/cost_benefit.py index ea1c771b2..dfb5153db 100644 --- a/climada/engine/cost_benefit.py +++ b/climada/engine/cost_benefit.py @@ -32,7 +32,7 @@ from tabulate import tabulate from climada.engine.impact_calc import ImpactCalc -from climada.engine import Impact, ImpactFreqCurve +from climada.engine.impact import Impact, ImpactFreqCurve LOGGER = logging.getLogger(__name__) diff --git a/climada/engine/impact_calc.py b/climada/engine/impact_calc.py index d0fc05286..aefa3810f 100644 --- a/climada/engine/impact_calc.py +++ b/climada/engine/impact_calc.py @@ -27,7 +27,7 @@ import geopandas as gpd from climada import CONFIG -from climada.engine import Impact +from climada.engine.impact import Impact LOGGER = logging.getLogger(__name__) diff --git a/climada/engine/unsequa/calc_base.py b/climada/engine/unsequa/calc_base.py index 10c302959..9f32931f7 100644 --- a/climada/engine/unsequa/calc_base.py +++ b/climada/engine/unsequa/calc_base.py @@ -29,7 +29,7 @@ import numpy as np from climada.util.value_representation import sig_dig as u_sig_dig -from climada.engine.unsequa import UncOutput +from climada.engine.unsequa.unc_output import UncOutput LOGGER = logging.getLogger(__name__) diff --git a/climada/engine/unsequa/calc_cost_benefit.py b/climada/engine/unsequa/calc_cost_benefit.py index 36f1fe2d1..74ba72319 100644 --- a/climada/engine/unsequa/calc_cost_benefit.py +++ b/climada/engine/unsequa/calc_cost_benefit.py @@ -27,14 +27,14 @@ from typing import Optional, Union import pandas as pd -import numpy as np import pathos.multiprocessing as mp # use pathos.multiprocess fork of multiprocessing for compatibility # wiht notebooks and other environments https://stackoverflow.com/a/65001152/12454103 from climada.engine.cost_benefit import CostBenefit -from climada.engine.unsequa import Calc, InputVar, UncCostBenefitOutput -from climada.engine.unsequa.calc_base import _sample_parallel_iterator, _multiprocess_chunksize, _transpose_chunked_data +from climada.engine.unsequa.input_var import InputVar +from climada.engine.unsequa.unc_output import UncCostBenefitOutput +from climada.engine.unsequa.calc_base import Calc, _sample_parallel_iterator, _multiprocess_chunksize, _transpose_chunked_data from climada.util import log_level from climada.hazard import Hazard from climada.entity import Entity diff --git a/climada/engine/unsequa/calc_delta_climate.py b/climada/engine/unsequa/calc_delta_climate.py index 9de9ddae6..f266764e7 100644 --- a/climada/engine/unsequa/calc_delta_climate.py +++ b/climada/engine/unsequa/calc_delta_climate.py @@ -34,8 +34,10 @@ # wiht notebooks and other environments https://stackoverflow.com/a/65001152/12454103 from climada.engine import ImpactCalc -from climada.engine.unsequa import Calc, InputVar, UncImpactOutput +from climada.engine.unsequa.input_var import InputVar +from climada.engine.unsequa.unc_output import UncImpactOutput from climada.engine.unsequa.calc_base import ( + Calc, _sample_parallel_iterator, _multiprocess_chunksize, _transpose_chunked_data, diff --git a/climada/engine/unsequa/calc_impact.py b/climada/engine/unsequa/calc_impact.py index a82f5cae5..58aea0c94 100644 --- a/climada/engine/unsequa/calc_impact.py +++ b/climada/engine/unsequa/calc_impact.py @@ -33,8 +33,10 @@ # wiht notebooks and other environments https://stackoverflow.com/a/65001152/12454103 from climada.engine import ImpactCalc -from climada.engine.unsequa import Calc, InputVar, UncImpactOutput +from climada.engine.unsequa.input_var import InputVar +from climada.engine.unsequa.unc_output import UncImpactOutput from climada.engine.unsequa.calc_base import ( + Calc, _sample_parallel_iterator, _multiprocess_chunksize, _transpose_chunked_data, diff --git a/setup.py b/setup.py index e743557e8..dd260d7ee 100644 --- a/setup.py +++ b/setup.py @@ -24,7 +24,7 @@ DEPS_TEST = [ "ipython", "mccabe>=0.6", - "pylint==2.7.1", + "pylint>=3.0", "pytest", "pytest-cov", "pytest-subtests", From dcbe2c5fdbace71c37d21b095263bf74817b2eef Mon Sep 17 00:00:00 2001 From: emanuel-schmid Date: Mon, 21 Oct 2024 10:28:56 +0200 Subject: [PATCH 11/12] format exceptions --- climada/engine/test/test_impact_calc.py | 72 +-- climada/entity/impact_funcs/trop_cyclone.py | 509 ++------------------ climada/entity/measures/test/test_base.py | 167 +------ climada/hazard/test/test_tc_tracks.py | 188 ++------ climada/hazard/test/test_tc_tracks_synth.py | 289 +++-------- climada/hazard/test/test_trop_cyclone.py | 108 +---- climada/util/constants.py | 254 +--------- 7 files changed, 233 insertions(+), 1354 deletions(-) diff --git a/climada/engine/test/test_impact_calc.py b/climada/engine/test/test_impact_calc.py index 489f66a00..3f19e2632 100644 --- a/climada/engine/test/test_impact_calc.py +++ b/climada/engine/test/test_impact_calc.py @@ -244,80 +244,40 @@ def test_calc_impact_RF_pass(self): 0.00000000e00, ] ) + # fmt: off imp_mat_array = np.array( [ [ - 0.00000000e00, - 0.00000000e00, - 0.00000000e00, - 0.00000000e00, - 0.00000000e00, - 0.00000000e00, - 0.00000000e00, - 0.00000000e00, + 0.00000000e00, 0.00000000e00, 0.00000000e00, 0.00000000e00, + 0.00000000e00, 0.00000000e00, 0.00000000e00, 0.00000000e00, ], [ - 0.00000000e00, - 0.00000000e00, - 0.00000000e00, - 0.00000000e00, - 0.00000000e00, - 0.00000000e00, - 0.00000000e00, - 0.00000000e00, + 0.00000000e00, 0.00000000e00, 0.00000000e00, 0.00000000e00, + 0.00000000e00, 0.00000000e00, 0.00000000e00, 0.00000000e00, ], [ - 0.00000000e00, - 6.41965663e04, - 0.00000000e00, - 2.02249434e02, - 3.41245461e04, - 0.00000000e00, - 0.00000000e00, - 0.00000000e00, + 0.00000000e00, 6.41965663e04, 0.00000000e00, 2.02249434e02, + 3.41245461e04, 0.00000000e00, 0.00000000e00, 0.00000000e00, ], [ - 0.00000000e00, - 0.00000000e00, - 0.00000000e00, - 0.00000000e00, - 3.41245461e04, - 0.00000000e00, - 0.00000000e00, - 0.00000000e00, + 0.00000000e00, 0.00000000e00, 0.00000000e00, 0.00000000e00, + 3.41245461e04, 0.00000000e00, 0.00000000e00, 0.00000000e00, ], [ - 7.73566566e07, - 0.00000000e00, - 0.00000000e00, - 0.00000000e00, - 0.00000000e00, - 0.00000000e00, - 0.00000000e00, - 0.00000000e00, + 7.73566566e07, 0.00000000e00, 0.00000000e00, 0.00000000e00, + 0.00000000e00, 0.00000000e00, 0.00000000e00, 0.00000000e00, ], [ - 0.00000000e00, - 0.00000000e00, - 0.00000000e00, - 0.00000000e00, - 0.00000000e00, - 0.00000000e00, - 0.00000000e00, - 0.00000000e00, + 0.00000000e00, 0.00000000e00, 0.00000000e00, 0.00000000e00, + 0.00000000e00, 0.00000000e00, 0.00000000e00, 0.00000000e00, ], [ - 0.00000000e00, - 0.00000000e00, - 0.00000000e00, - 0.00000000e00, - 0.00000000e00, - 0.00000000e00, - 0.00000000e00, - 0.00000000e00, + 0.00000000e00, 0.00000000e00, 0.00000000e00, 0.00000000e00, + 0.00000000e00, 0.00000000e00, 0.00000000e00, 0.00000000e00, ], ] ) + # fmt: on check_impact(self, impact, haz, exp, aai_agg, eai_exp, at_event, imp_mat_array) def test_empty_impact(self): diff --git a/climada/entity/impact_funcs/trop_cyclone.py b/climada/entity/impact_funcs/trop_cyclone.py index 18492bbb1..692e0ef92 100644 --- a/climada/entity/impact_funcs/trop_cyclone.py +++ b/climada/entity/impact_funcs/trop_cyclone.py @@ -311,123 +311,26 @@ def get_countries_per_region(region=None): """ if not region: region = "all" + # fmt: off iso3n = { "NA1": [ - 660, - 28, - 32, - 533, - 44, - 52, - 84, - 60, - 68, - 132, - 136, - 152, - 170, - 188, - 192, - 212, - 214, - 218, - 222, - 238, - 254, - 308, - 312, - 320, - 328, - 332, - 340, - 388, - 474, - 484, - 500, - 558, - 591, - 600, - 604, - 630, - 654, - 659, - 662, - 670, - 534, - 740, - 780, - 796, - 858, - 862, - 92, - 850, + 660, 28, 32, 533, 44, 52, 84, 60, 68, 132, + 136, 152, 170, 188, 192, 212, 214, 218, 222, 238, + 254, 308, 312, 320, 328, 332, 340, 388, 474, 484, + 500, 558, 591, 600, 604, 630, 654, 659, 662, 670, + 534, 740, 780, 796, 858, 862, 92, 850, ], "NA2": [124, 840], "NI": [ - 4, - 51, - 31, - 48, - 50, - 64, - 262, - 232, - 231, - 268, - 356, - 364, - 368, - 376, - 400, - 398, - 414, - 417, - 422, - 462, - 496, - 104, - 524, - 512, - 586, - 634, - 682, - 706, - 144, - 760, - 762, - 795, - 800, - 784, - 860, - 887, + 4, 51, 31, 48, 50, 64, 262, 232, 231, 268, + 356, 364, 368, 376, 400, 398, 414, 417, 422, 462, + 496, 104, 524, 512, 586, 634, 682, 706, 144, 760, + 762, 795, 800, 784, 860, 887, ], "OC": [ - 16, - 36, - 184, - 242, - 258, - 316, - 296, - 584, - 583, - 520, - 540, - 554, - 570, - 574, - 580, - 585, - 598, - 612, - 882, - 90, - 626, - 772, - 776, - 798, - 548, - 876, + 16, 36, 184, 242, 258, 316, 296, 584, 583, 520, + 540, 554, 570, 574, 580, 585, 598, 612, 882, 90, + 626, 772, 776, 798, 548, 876, ], "SI": [174, 180, 748, 450, 454, 466, 480, 508, 710, 834, 716], "WP1": [116, 360, 418, 458, 764, 704], @@ -435,251 +338,42 @@ def get_countries_per_region(region=None): "WP3": [156], "WP4": [344, 392, 410, 446, 158], "ROW": [ - 8, - 12, - 20, - 24, - 10, - 40, - 112, - 56, - 204, - 535, - 70, - 72, - 74, - 76, - 86, - 96, - 100, - 854, - 108, - 120, - 140, - 148, - 162, - 166, - 178, - 191, - 531, - 196, - 203, - 384, - 208, - 818, - 226, - 233, - 234, - 246, - 250, - 260, - 266, - 270, - 276, - 288, - 292, - 300, - 304, - 831, - 324, - 624, - 334, - 336, - 348, - 352, - 372, - 833, - 380, - 832, - 404, - 408, - 983, - 428, - 426, - 430, - 434, - 438, - 440, - 442, - 470, - 478, - 175, - 498, - 492, - 499, - 504, - 516, - 528, - 562, - 566, - 807, - 578, - 275, - 616, - 620, - 642, - 643, - 646, - 638, - 652, - 663, - 666, - 674, - 678, - 686, - 688, - 690, - 694, - 702, - 703, - 705, - 239, - 728, - 724, - 729, - 744, - 752, - 756, - 768, - 788, - 792, - 804, - 826, - 581, - 732, - 894, - 248, + 8, 12, 20, 24, 10, 40, 112, 56, 204, 535, + 70, 72, 74, 76, 86, 96, 100, 854, 108, 120, + 140, 148, 162, 166, 178, 191, 531, 196, 203, 384, + 208, 818, 226, 233, 234, 246, 250, 260, 266, 270, + 276, 288, 292, 300, 304, 831, 324, 624, 334, 336, + 348, 352, 372, 833, 380, 832, 404, 408, 983, 428, + 426, 430, 434, 438, 440, 442, 470, 478, 175, 498, + 492, 499, 504, 516, 528, 562, 566, 807, 578, 275, + 616, 620, 642, 643, 646, 638, 652, 663, 666, 674, + 678, 686, 688, 690, 694, 702, 703, 705, 239, 728, + 724, 729, 744, 752, 756, 768, 788, 792, 804, 826, + 581, 732, 894, 248, ], } iso3a = { "NA1": [ - "AIA", - "ATG", - "ARG", - "ABW", - "BHS", - "BRB", - "BLZ", - "BMU", - "BOL", - "CPV", - "CYM", - "CHL", - "COL", - "CRI", - "CUB", - "DMA", - "DOM", - "ECU", - "SLV", - "FLK", - "GUF", - "GRD", - "GLP", - "GTM", - "GUY", - "HTI", - "HND", - "JAM", - "MTQ", - "MEX", - "MSR", - "NIC", - "PAN", - "PRY", - "PER", - "PRI", - "SHN", - "KNA", - "LCA", - "VCT", - "SXM", - "SUR", - "TTO", - "TCA", - "URY", - "VEN", - "VGB", - "VIR", + "AIA", "ATG", "ARG", "ABW", "BHS", "BRB", "BLZ", "BMU", "BOL", "CPV", + "CYM", "CHL", "COL", "CRI", "CUB", "DMA", "DOM", "ECU", "SLV", "FLK", + "GUF", "GRD", "GLP", "GTM", "GUY", "HTI", "HND", "JAM", "MTQ", "MEX", + "MSR", "NIC", "PAN", "PRY", "PER", "PRI", "SHN", "KNA", "LCA", "VCT", + "SXM", "SUR", "TTO", "TCA", "URY", "VEN", "VGB", "VIR", ], "NA2": ["CAN", "USA"], "NI": [ - "AFG", - "ARM", - "AZE", - "BHR", - "BGD", - "BTN", - "DJI", - "ERI", - "ETH", - "GEO", - "IND", - "IRN", - "IRQ", - "ISR", - "JOR", - "KAZ", - "KWT", - "KGZ", - "LBN", - "MDV", - "MNG", - "MMR", - "NPL", - "OMN", - "PAK", - "QAT", - "SAU", - "SOM", - "LKA", - "SYR", - "TJK", - "TKM", - "UGA", - "ARE", - "UZB", - "YEM", + "AFG", "ARM", "AZE", "BHR", "BGD", "BTN", "DJI", "ERI", "ETH", "GEO", + "IND", "IRN", "IRQ", "ISR", "JOR", "KAZ", "KWT", "KGZ", "LBN", "MDV", + "MNG", "MMR", "NPL", "OMN", "PAK", "QAT", "SAU", "SOM", "LKA", "SYR", + "TJK", "TKM", "UGA", "ARE", "UZB", "YEM", ], "OC": [ - "ASM", - "AUS", - "COK", - "FJI", - "PYF", - "GUM", - "KIR", - "MHL", - "FSM", - "NRU", - "NCL", - "NZL", - "NIU", - "NFK", - "MNP", - "PLW", - "PNG", - "PCN", - "WSM", - "SLB", - "TLS", - "TKL", - "TON", - "TUV", - "VUT", - "WLF", + "ASM", "AUS", "COK", "FJI", "PYF", "GUM", "KIR", "MHL", "FSM", "NRU", + "NCL", "NZL", "NIU", "NFK", "MNP", "PLW", "PNG", "PCN", "WSM", "SLB", + "TLS", "TKL", "TON", "TUV", "VUT", "WLF", ], "SI": [ - "COM", - "COD", - "SWZ", - "MDG", - "MWI", - "MLI", - "MUS", - "MOZ", - "ZAF", - "TZA", + "COM", "COD", "SWZ", "MDG", "MWI", "MLI", "MUS", "MOZ", "ZAF", "TZA", "ZWE", ], "WP1": ["KHM", "IDN", "LAO", "MYS", "THA", "VNM"], @@ -687,122 +381,21 @@ def get_countries_per_region(region=None): "WP3": ["CHN"], "WP4": ["HKG", "JPN", "KOR", "MAC", "TWN"], "ROW": [ - "ALB", - "DZA", - "AND", - "AGO", - "ATA", - "AUT", - "BLR", - "BEL", - "BEN", - "BES", - "BIH", - "BWA", - "BVT", - "BRA", - "IOT", - "BRN", - "BGR", - "BFA", - "BDI", - "CMR", - "CAF", - "TCD", - "CXR", - "CCK", - "COG", - "HRV", - "CUW", - "CYP", - "CZE", - "CIV", - "DNK", - "EGY", - "GNQ", - "EST", - "FRO", - "FIN", - "FRA", - "ATF", - "GAB", - "GMB", - "DEU", - "GHA", - "GIB", - "GRC", - "GRL", - "GGY", - "GIN", - "GNB", - "HMD", - "VAT", - "HUN", - "ISL", - "IRL", - "IMN", - "ITA", - "JEY", - "KEN", - "PRK", - "XKX", - "LVA", - "LSO", - "LBR", - "LBY", - "LIE", - "LTU", - "LUX", - "MLT", - "MRT", - "MYT", - "MDA", - "MCO", - "MNE", - "MAR", - "NAM", - "NLD", - "NER", - "NGA", - "MKD", - "NOR", - "PSE", - "POL", - "PRT", - "ROU", - "RUS", - "RWA", - "REU", - "BLM", - "MAF", - "SPM", - "SMR", - "STP", - "SEN", - "SRB", - "SYC", - "SLE", - "SGP", - "SVK", - "SVN", - "SGS", - "SSD", - "ESP", - "SDN", - "SJM", - "SWE", - "CHE", - "TGO", - "TUN", - "TUR", - "UKR", - "GBR", - "UMI", - "ESH", - "ZMB", - "ALA", + "ALB", "DZA", "AND", "AGO", "ATA", "AUT", "BLR", "BEL", "BEN", "BES", + "BIH", "BWA", "BVT", "BRA", "IOT", "BRN", "BGR", "BFA", "BDI", "CMR", + "CAF", "TCD", "CXR", "CCK", "COG", "HRV", "CUW", "CYP", "CZE", "CIV", + "DNK", "EGY", "GNQ", "EST", "FRO", "FIN", "FRA", "ATF", "GAB", "GMB", + "DEU", "GHA", "GIB", "GRC", "GRL", "GGY", "GIN", "GNB", "HMD", "VAT", + "HUN", "ISL", "IRL", "IMN", "ITA", "JEY", "KEN", "PRK", "XKX", "LVA", + "LSO", "LBR", "LBY", "LIE", "LTU", "LUX", "MLT", "MRT", "MYT", "MDA", + "MCO", "MNE", "MAR", "NAM", "NLD", "NER", "NGA", "MKD", "NOR", "PSE", + "POL", "PRT", "ROU", "RUS", "RWA", "REU", "BLM", "MAF", "SPM", "SMR", + "STP", "SEN", "SRB", "SYC", "SLE", "SGP", "SVK", "SVN", "SGS", "SSD", + "ESP", "SDN", "SJM", "SWE", "CHE", "TGO", "TUN", "TUR", "UKR", "GBR", + "UMI", "ESH", "ZMB", "ALA", ], } + # fmt: on impf_id = { "NA1": 1, "NA2": 2, diff --git a/climada/entity/measures/test/test_base.py b/climada/entity/measures/test/test_base.py index 520229ffc..4f14f4a5a 100644 --- a/climada/entity/measures/test/test_base.py +++ b/climada/entity/measures/test/test_base.py @@ -151,84 +151,20 @@ def test_cutoff_hazard_pass(self): new_haz = act_1._cutoff_hazard_damage(exp, imp_set, haz) self.assertFalse(id(new_haz) == id(haz)) - + # fmt: off pos_no_null = np.array( [ - 6249, - 7697, - 9134, - 13500, - 13199, - 5944, - 9052, - 9050, - 2429, - 5139, - 9053, - 7102, - 4096, - 1070, - 5948, - 1076, - 5947, - 7432, - 5949, - 11694, - 5484, - 6246, - 12147, - 778, - 3326, - 7199, - 12498, - 11698, - 6245, - 5327, - 4819, - 8677, - 5970, - 7101, - 779, - 3894, - 9051, - 5976, - 3329, - 5978, - 4282, - 11697, - 7193, - 5351, - 7310, - 7478, - 5489, - 5526, - 7194, - 4283, - 7191, - 5328, - 4812, - 5528, - 5527, - 5488, - 7475, - 5529, - 776, - 5758, - 4811, - 6223, - 7479, - 7470, - 5480, - 5325, - 7477, - 7318, - 7317, - 11696, - 7313, - 13165, - 6221, + 6249, 7697, 9134, 13500, 13199, 5944, 9052, 9050, 2429, 5139, + 9053, 7102, 4096, 1070, 5948, 1076, 5947, 7432, 5949, 11694, + 5484, 6246, 12147, 778, 3326, 7199, 12498, 11698, 6245, 5327, + 4819, 8677, 5970, 7101, 779, 3894, 9051, 5976, 3329, 5978, + 4282, 11697, 7193, 5351, 7310, 7478, 5489, 5526, 7194, 4283, + 7191, 5328, 4812, 5528, 5527, 5488, 7475, 5529, 776, 5758, + 4811, 6223, 7479, 7470, 5480, 5325, 7477, 7318, 7317, 11696, + 7313, 13165, 6221, ] ) + # fmt: on all_haz = np.arange(haz.intensity.shape[0]) all_haz[pos_no_null] = -1 pos_null = np.argwhere(all_haz > 0).reshape(-1) @@ -254,83 +190,20 @@ def test_cutoff_hazard_region_pass(self): self.assertFalse(id(new_haz) == id(haz)) + # fmt: off pos_no_null = np.array( [ - 6249, - 7697, - 9134, - 13500, - 13199, - 5944, - 9052, - 9050, - 2429, - 5139, - 9053, - 7102, - 4096, - 1070, - 5948, - 1076, - 5947, - 7432, - 5949, - 11694, - 5484, - 6246, - 12147, - 778, - 3326, - 7199, - 12498, - 11698, - 6245, - 5327, - 4819, - 8677, - 5970, - 7101, - 779, - 3894, - 9051, - 5976, - 3329, - 5978, - 4282, - 11697, - 7193, - 5351, - 7310, - 7478, - 5489, - 5526, - 7194, - 4283, - 7191, - 5328, - 4812, - 5528, - 5527, - 5488, - 7475, - 5529, - 776, - 5758, - 4811, - 6223, - 7479, - 7470, - 5480, - 5325, - 7477, - 7318, - 7317, - 11696, - 7313, - 13165, - 6221, + 6249, 7697, 9134, 13500, 13199, 5944, 9052, 9050, 2429, 5139, + 9053, 7102, 4096, 1070, 5948, 1076, 5947, 7432, 5949, 11694, + 5484, 6246, 12147, 778, 3326, 7199, 12498, 11698, 6245, 5327, + 4819, 8677, 5970, 7101, 779, 3894, 9051, 5976, 3329, 5978, + 4282, 11697, 7193, 5351, 7310, 7478, 5489, 5526, 7194, 4283, + 7191, 5328, 4812, 5528, 5527, 5488, 7475, 5529, 776, 5758, + 4811, 6223, 7479, 7470, 5480, 5325, 7477, 7318, 7317, 11696, + 7313, 13165, 6221, ] ) + # fmt: on all_haz = np.arange(haz.intensity.shape[0]) all_haz[pos_no_null] = -1 pos_null = np.argwhere(all_haz > 0).reshape(-1) diff --git a/climada/hazard/test/test_tc_tracks.py b/climada/hazard/test/test_tc_tracks.py index df60bc83e..c42d5a7a1 100644 --- a/climada/hazard/test/test_tc_tracks.py +++ b/climada/hazard/test/test_tc_tracks.py @@ -815,90 +815,32 @@ def test_interp_track_redundancy_pass(self): def test_interp_origin_pass(self): """Interpolate track to min_time_step crossing lat origin""" tc_track = tc.TCTracks.from_processed_ibtracs_csv(TEST_TRACK) + # fmt: off tc_track.data[0]["lon"].values = np.array( [ - 167.207761, - 168.1, - 168.936535, - 169.728947, - 170.5, - 171.257176, - 171.946822, - 172.5, - 172.871797, - 173.113396, - 173.3, - 173.496375, - 173.725522, - 174.0, - 174.331591, - 174.728961, - 175.2, - 175.747632, - 176.354929, - 177.0, - 177.66677, - 178.362433, - 179.1, - 179.885288, - -179.304661, - -178.5, - -177.726442, - -176.991938, - -176.3, - -175.653595, - -175.053513, - -174.5, - -173.992511, - -173.527342, - -173.1, - -172.705991, - -172.340823, - -172.0, + 167.207761, 168.1, 168.936535, 169.728947, 170.5, + 171.257176, 171.946822, 172.5, 172.871797, 173.113396, + 173.3, 173.496375, 173.725522, 174.0, 174.331591, + 174.728961, 175.2, 175.747632, 176.354929, 177.0, + 177.66677, 178.362433, 179.1, 179.885288, -179.304661, + -178.5, -177.726442, -176.991938, -176.3, -175.653595, + -175.053513, -174.5, -173.992511, -173.527342, -173.1, + -172.705991, -172.340823, -172.0, ] ) tc_track.data[0]["lat"].values = np.array( [ - 40.196053, - 40.6, - 40.930215, - 41.215674, - 41.5, - 41.816354, - 42.156065, - 42.5, - 42.833998, - 43.16377, - 43.5, - 43.847656, - 44.188854, - 44.5, - 44.764269, - 44.991925, - 45.2, - 45.402675, - 45.602707, - 45.8, - 45.995402, - 46.193543, - 46.4, - 46.615718, - 46.82312, - 47.0, - 47.130616, - 47.225088, - 47.3, - 47.369224, - 47.435786, - 47.5, - 47.562858, - 47.628064, - 47.7, - 47.783047, - 47.881586, - 48.0, + 40.196053, 40.6, 40.930215, 41.215674, 41.5, + 41.816354, 42.156065, 42.5, 42.833998, 43.16377, + 43.5, 43.847656, 44.188854, 44.5, 44.764269, + 44.991925, 45.2, 45.402675, 45.602707, 45.8, + 45.995402, 46.193543, 46.4, 46.615718, 46.82312, + 47.0, 47.130616, 47.225088, 47.3, 47.369224, + 47.435786, 47.5, 47.562858, 47.628064, 47.7, + 47.783047, 47.881586, 48.0, ] ) + # fmt: on tc_track.equal_timestep(time_step_h=1) self.assertEqual(tc_track.data[0]["time"].size, 223) @@ -934,91 +876,33 @@ def test_interp_origin_pass(self): def test_interp_origin_inv_pass(self): """Interpolate track to min_time_step crossing lat origin""" tc_track = tc.TCTracks.from_processed_ibtracs_csv(TEST_TRACK) + # fmt: off tc_track.data[0]["lon"].values = np.array( [ - 167.207761, - 168.1, - 168.936535, - 169.728947, - 170.5, - 171.257176, - 171.946822, - 172.5, - 172.871797, - 173.113396, - 173.3, - 173.496375, - 173.725522, - 174.0, - 174.331591, - 174.728961, - 175.2, - 175.747632, - 176.354929, - 177.0, - 177.66677, - 178.362433, - 179.1, - 179.885288, - -179.304661, - -178.5, - -177.726442, - -176.991938, - -176.3, - -175.653595, - -175.053513, - -174.5, - -173.992511, - -173.527342, - -173.1, - -172.705991, - -172.340823, - -172.0, + 167.207761, 168.1, 168.936535, 169.728947, 170.5, + 171.257176, 171.946822, 172.5, 172.871797, 173.113396, + 173.3, 173.496375, 173.725522, 174.0, 174.331591, + 174.728961, 175.2, 175.747632, 176.354929, 177.0, + 177.66677, 178.362433, 179.1, 179.885288, -179.304661, + -178.5, -177.726442, -176.991938, -176.3, -175.653595, + -175.053513, -174.5, -173.992511, -173.527342, -173.1, + -172.705991, -172.340823, -172.0, ] ) tc_track.data[0]["lon"].values = -tc_track.data[0]["lon"].values tc_track.data[0]["lat"].values = np.array( [ - 40.196053, - 40.6, - 40.930215, - 41.215674, - 41.5, - 41.816354, - 42.156065, - 42.5, - 42.833998, - 43.16377, - 43.5, - 43.847656, - 44.188854, - 44.5, - 44.764269, - 44.991925, - 45.2, - 45.402675, - 45.602707, - 45.8, - 45.995402, - 46.193543, - 46.4, - 46.615718, - 46.82312, - 47.0, - 47.130616, - 47.225088, - 47.3, - 47.369224, - 47.435786, - 47.5, - 47.562858, - 47.628064, - 47.7, - 47.783047, - 47.881586, - 48.0, + 40.196053, 40.6, 40.930215, 41.215674, 41.5, + 41.816354, 42.156065, 42.5, 42.833998, 43.16377, + 43.5, 43.847656, 44.188854, 44.5, 44.764269, + 44.991925, 45.2, 45.402675, 45.602707, 45.8, + 45.995402, 46.193543, 46.4, 46.615718, 46.82312, + 47.0, 47.130616, 47.225088, 47.3, 47.369224, + 47.435786, 47.5, 47.562858, 47.628064, 47.7, + 47.783047, 47.881586, 48.0, ] ) + # fmt: on tc_track.equal_timestep(time_step_h=1) self.assertEqual(tc_track.data[0]["time"].size, 223) diff --git a/climada/hazard/test/test_tc_tracks_synth.py b/climada/hazard/test/test_tc_tracks_synth.py index f0b5c0b44..4f35b05b3 100644 --- a/climada/hazard/test/test_tc_tracks_synth.py +++ b/climada/hazard/test/test_tc_tracks_synth.py @@ -111,119 +111,53 @@ def test_apply_decay_pass(self): tc_synth._apply_land_decay( tc_track.data, v_rel, p_rel, land_geom, s_rel=True, check_plot=False ) - + # fmt: off p_ref = ( - np.array( - [ - 1.010000000000000, - 1.009000000000000, - 1.008000000000000, - 1.006000000000000, - 1.003000000000000, - 1.002000000000000, - 1.001000000000000, - 1.000000000000000, - 1.000000000000000, - 1.001000000000000, - 1.002000000000000, - 1.005000000000000, - 1.007000000000000, - 1.010000000000000, - 1.010000000000000, - 1.010000000000000, - 1.010000000000000, - 1.010000000000000, - 1.010000000000000, - 1.007000000000000, - 1.004000000000000, - 1.000000000000000, - 0.994000000000000, - 0.981000000000000, - 0.969000000000000, - 0.961000000000000, - 0.947000000000000, - 0.933000000000000, - 0.922000000000000, - 0.930000000000000, - 0.937000000000000, - 0.951000000000000, - 0.947000000000000, - 0.943000000000000, - 0.948000000000000, - 0.946000000000000, - 0.941000000000000, - 0.937000000000000, - 0.955000000000000, - 0.9741457117, - 0.99244068917, - 1.00086729492, - 1.00545853355, - 1.00818354609, - 1.00941850023, - 1.00986192053, - 1.00998400565, - ] - ) + np.array([ + 1.010000000000000, 1.009000000000000, 1.008000000000000, + 1.006000000000000, 1.003000000000000, 1.002000000000000, + 1.001000000000000, 1.000000000000000, 1.000000000000000, + 1.001000000000000, 1.002000000000000, 1.005000000000000, + 1.007000000000000, 1.010000000000000, 1.010000000000000, + 1.010000000000000, 1.010000000000000, 1.010000000000000, + 1.010000000000000, 1.007000000000000, 1.004000000000000, + 1.000000000000000, 0.994000000000000, 0.981000000000000, + 0.969000000000000, 0.961000000000000, 0.947000000000000, + 0.933000000000000, 0.922000000000000, 0.930000000000000, + 0.937000000000000, 0.951000000000000, 0.947000000000000, + 0.943000000000000, 0.948000000000000, 0.946000000000000, + 0.941000000000000, 0.937000000000000, 0.955000000000000, + 0.974145711700000, 0.992440689170000, 1.000867294920000, + 1.005458533550000, 1.008183546090000, 1.009418500230000, + 1.009861920530000, 1.009984005650000, + ]) * 1e3 ) self.assertTrue(np.allclose(p_ref, tc_track.data[0]["central_pressure"].values)) v_ref = ( - np.array( - [ - 0.250000000000000, - 0.300000000000000, - 0.300000000000000, - 0.350000000000000, - 0.350000000000000, - 0.400000000000000, - 0.450000000000000, - 0.450000000000000, - 0.450000000000000, - 0.450000000000000, - 0.450000000000000, - 0.450000000000000, - 0.450000000000000, - 0.400000000000000, - 0.400000000000000, - 0.400000000000000, - 0.400000000000000, - 0.450000000000000, - 0.450000000000000, - 0.500000000000000, - 0.500000000000000, - 0.550000000000000, - 0.650000000000000, - 0.800000000000000, - 0.950000000000000, - 1.100000000000000, - 1.300000000000000, - 1.450000000000000, - 1.500000000000000, - 1.250000000000000, - 1.300000000000000, - 1.150000000000000, - 1.150000000000000, - 1.150000000000000, - 1.150000000000000, - 1.200000000000000, - 1.250000000000000, - 1.250000000000000, - 1.200000000000000, - 0.9737967353, - 0.687255951, - 0.4994850556, - 0.3551480462, - 0.2270548036, - 0.1302099557, - 0.0645385918, - 0.0225325851, - ] - ) + np.array([ + 0.250000000000000, 0.300000000000000, 0.300000000000000, + 0.350000000000000, 0.350000000000000, 0.400000000000000, + 0.450000000000000, 0.450000000000000, 0.450000000000000, + 0.450000000000000, 0.450000000000000, 0.450000000000000, + 0.450000000000000, 0.400000000000000, 0.400000000000000, + 0.400000000000000, 0.400000000000000, 0.450000000000000, + 0.450000000000000, 0.500000000000000, 0.500000000000000, + 0.550000000000000, 0.650000000000000, 0.800000000000000, + 0.950000000000000, 1.100000000000000, 1.300000000000000, + 1.450000000000000, 1.500000000000000, 1.250000000000000, + 1.300000000000000, 1.150000000000000, 1.150000000000000, + 1.150000000000000, 1.150000000000000, 1.200000000000000, + 1.250000000000000, 1.250000000000000, 1.200000000000000, + 0.973796735300000, 0.687255951000000, 0.499485055600000, + 0.355148046200000, 0.227054803600000, 0.130209955700000, + 0.064538591800000, 0.022532585100000, + ]) * 1e2 ) - + # fmt: on self.assertTrue( np.allclose(v_ref, tc_track.data[0]["max_sustained_wind"].values) ) @@ -445,137 +379,42 @@ def test_wrong_decay_pass(self): ) track_gen = track.data[0] + # fmt: off track_gen["lat"] = np.array( [ - 28.20340431, - 28.7915261, - 29.38642458, - 29.97836984, - 30.56844404, - 31.16265292, - 31.74820301, - 32.34449825, - 32.92261894, - 33.47430891, - 34.01492525, - 34.56789399, - 35.08810845, - 35.55965893, - 35.94835174, - 36.29355848, - 36.45379561, - 36.32473812, - 36.07552209, - 35.92224784, - 35.84144186, - 35.78298537, - 35.86090718, - 36.02440372, - 36.37555559, - 37.06207765, - 37.73197352, - 37.97524273, - 38.05560287, - 38.21901208, - 38.31486156, - 38.30813367, - 38.28481808, - 38.28410366, - 38.25894812, - 38.20583372, - 38.22741099, - 38.39970022, - 38.68367797, - 39.08329904, - 39.41434629, - 39.424984, - 39.31327716, - 39.30336335, - 39.31714429, - 39.27031932, - 39.30848775, - 39.48759833, - 39.73326595, - 39.96187967, - 40.26954226, - 40.76882202, - 41.40398607, - 41.93809726, - 42.60395785, - 43.57074792, - 44.63816143, - 45.61450458, - 46.68528511, - 47.89209365, + 28.20340431, 28.7915261, 29.38642458, 29.97836984, 30.56844404, + 31.16265292, 31.74820301, 32.34449825, 32.92261894, 33.47430891, + 34.01492525, 34.56789399, 35.08810845, 35.55965893, 35.94835174, + 36.29355848, 36.45379561, 36.32473812, 36.07552209, 35.92224784, + 35.84144186, 35.78298537, 35.86090718, 36.02440372, 36.37555559, + 37.06207765, 37.73197352, 37.97524273, 38.05560287, 38.21901208, + 38.31486156, 38.30813367, 38.28481808, 38.28410366, 38.25894812, + 38.20583372, 38.22741099, 38.39970022, 38.68367797, 39.08329904, + 39.41434629, 39.424984, 39.31327716, 39.30336335, 39.31714429, + 39.27031932, 39.30848775, 39.48759833, 39.73326595, 39.96187967, + 40.26954226, 40.76882202, 41.40398607, 41.93809726, 42.60395785, + 43.57074792, 44.63816143, 45.61450458, 46.68528511, 47.89209365, 49.15580502, ] ) track_gen["lon"] = np.array( [ - -79.20514075, - -79.25243311, - -79.28393082, - -79.32324646, - -79.36668585, - -79.41495519, - -79.45198688, - -79.40580325, - -79.34965443, - -79.36938122, - -79.30294825, - -79.06809546, - -78.70281969, - -78.29418936, - -77.82170609, - -77.30034709, - -76.79004969, - -76.37038827, - -75.98641014, - -75.58383356, - -75.18310414, - -74.7974524, - -74.3797645, - -73.86393572, - -73.37910948, - -73.01059003, - -72.77051313, - -72.68011328, - -72.66864779, - -72.62579773, - -72.56307717, - -72.46607618, - -72.35871353, - -72.31120649, - -72.15537583, - -71.75577051, - -71.25287498, - -70.75527907, - -70.34788946, - -70.17518421, - -70.04446577, - -69.76582749, - -69.44372386, - -69.15881376, - -68.84351922, - -68.47890287, - -68.04184565, - -67.53541437, - -66.94008642, - -66.25596075, - -65.53496635, - -64.83491802, - -64.12962685, - -63.54118808, - -62.72934383, - -61.34915091, - -59.72580755, - -58.24404252, - -56.71972992, - -55.0809336, + -79.20514075, -79.25243311, -79.28393082, -79.32324646, -79.36668585, + -79.41495519, -79.45198688, -79.40580325, -79.34965443, -79.36938122, + -79.30294825, -79.06809546, -78.70281969, -78.29418936, -77.82170609, + -77.30034709, -76.79004969, -76.37038827, -75.98641014, -75.58383356, + -75.18310414, -74.7974524, -74.3797645, -73.86393572, -73.37910948, + -73.01059003, -72.77051313, -72.68011328, -72.66864779, -72.62579773, + -72.56307717, -72.46607618, -72.35871353, -72.31120649, -72.15537583, + -71.75577051, -71.25287498, -70.75527907, -70.34788946, -70.17518421, + -70.04446577, -69.76582749, -69.44372386, -69.15881376, -68.84351922, + -68.47890287, -68.04184565, -67.53541437, -66.94008642, -66.25596075, + -65.53496635, -64.83491802, -64.12962685, -63.54118808, -62.72934383, + -61.34915091, -59.72580755, -58.24404252, -56.71972992, -55.0809336, -53.31524758, ] ) - + # fmt: on v_rel = { 1: 0.002249541544102336, -1: 0.00046889526284203036, diff --git a/climada/hazard/test/test_trop_cyclone.py b/climada/hazard/test/test_trop_cyclone.py index 9996becc3..b04ae3420 100644 --- a/climada/hazard/test/test_trop_cyclone.py +++ b/climada/hazard/test/test_trop_cyclone.py @@ -192,41 +192,24 @@ def test_cross_antimeridian(self): def test_windfield_models(self): """Test _tc_from_track function with different wind field models.""" intensity_idx = [0, 1, 2, 3, 80, 100, 120, 200, 220, 250, 260, 295] + # fmt: off intensity_values = [ ( "H08", None, [ - 22.74903, - 23.784691, - 24.82255, - 22.67403, - 27.218706, - 30.593959, - 18.980878, - 24.540069, - 27.826407, - 26.846293, - 0.0, - 34.568898, + 22.74903, 23.784691, 24.82255, 22.67403, 27.218706, + 30.593959, 18.980878, 24.540069, 27.826407, 26.846293, + 0.0, 34.568898, ], ), ( "H10", None, [ - 24.745521, - 25.596484, - 26.475329, - 24.690914, - 28.650107, - 31.584395, - 21.723546, - 26.140293, - 28.94964, - 28.051915, - 18.49378, - 35.312152, + 24.745521, 25.596484, 26.475329, 24.690914, 28.650107, + 31.584395, 21.723546, 26.140293, 28.94964, 28.051915, + 18.49378, 35.312152, ], ), # The following model configurations use recorded wind speeds, while the above use @@ -235,94 +218,49 @@ def test_windfield_models(self): "H10", dict(vmax_from_cen=False, rho_air_const=1.2), [ - 23.702232, - 24.327615, - 24.947161, - 23.589233, - 26.616085, - 29.389295, - 21.338178, - 24.257067, - 26.472543, - 25.662313, - 18.535842, - 31.886041, + 23.702232, 24.327615, 24.947161, 23.589233, 26.616085, + 29.389295, 21.338178, 24.257067, 26.472543, 25.662313, + 18.535842, 31.886041, ], ), ( "H10", dict(vmax_from_cen=False, rho_air_const=None), [ - 24.244162, - 24.835561, - 25.432454, - 24.139294, - 27.127457, - 29.719196, - 21.910658, - 24.692637, - 26.783575, - 25.971516, - 19.005555, - 31.904048, + 24.244162, 24.835561, 25.432454, 24.139294, 27.127457, + 29.719196, 21.910658, 24.692637, 26.783575, 25.971516, + 19.005555, 31.904048, ], ), ( "H10", dict(vmax_from_cen=False, rho_air_const=None, vmax_in_brackets=True), [ - 23.592924, - 24.208169, - 24.817104, - 23.483053, - 26.468975, - 29.221715, - 21.260867, - 24.150879, - 26.34288, - 25.543635, - 18.487385, - 31.904048, + 23.592924, 24.208169, 24.817104, 23.483053, 26.468975, + 29.221715, 21.260867, 24.150879, 26.34288, 25.543635, + 18.487385, 31.904048, ], ), ( "H1980", None, [ - 21.376807, - 21.957217, - 22.569568, - 21.284351, - 24.254226, - 26.971303, - 19.220149, - 21.984516, - 24.196388, - 23.449116, - 0, - 31.550207, + 21.376807, 21.957217, 22.569568, 21.284351, 24.254226, + 26.971303, 19.220149, 21.984516, 24.196388, 23.449116, + 0, 31.550207, ], ), ( "ER11", None, [ - 23.565332, - 24.931413, - 26.360758, - 23.490333, - 29.601171, - 34.522795, - 18.996389, - 26.102109, - 30.780737, - 29.498453, - 0, - 38.368805, + 23.565332, 24.931413, 26.360758, 23.490333, 29.601171, + 34.522795, 18.996389, 26.102109, 30.780737, 29.498453, + 0, 38.368805, ], ), ] - + # fmt: on tc_track = TCTracks.from_processed_ibtracs_csv(TEST_TRACK) tc_track.equal_timestep() tc_track.data = tc_track.data[:1] diff --git a/climada/util/constants.py b/climada/util/constants.py index a4e595aaa..90f352218 100644 --- a/climada/util/constants.py +++ b/climada/util/constants.py @@ -139,239 +139,31 @@ TEST_UNC_OUTPUT_COSTBEN = "test_unc_output_costben" """Demo uncertainty costben output""" +# fmt: off ISIMIP_NATID_TO_ISO = [ - "", - "ABW", - "AFG", - "AGO", - "AIA", - "ALB", - "AND", - "ANT", - "ARE", - "ARG", - "ARM", - "ASM", - "ATG", - "AUS", - "AUT", - "AZE", - "BDI", - "BEL", - "BEN", - "BFA", - "BGD", - "BGR", - "BHR", - "BHS", - "BIH", - "BLR", - "BLZ", - "BMU", - "BOL", - "BRA", - "BRB", - "BRN", - "BTN", - "BWA", - "CAF", - "CAN", - "CHE", - "CHL", - "CHN", - "CIV", - "CMR", - "COD", - "COG", - "COK", - "COL", - "COM", - "CPV", - "CRI", - "CUB", - "CYM", - "CYP", - "CZE", - "DEU", - "DJI", - "DMA", - "DNK", - "DOM", - "DZA", - "ECU", - "EGY", - "ERI", - "ESP", - "EST", - "ETH", - "FIN", - "FJI", - "FLK", - "FRA", - "FRO", - "FSM", - "GAB", - "GBR", - "GEO", - "GGY", - "GHA", - "GIB", - "GIN", - "GLP", - "GMB", - "GNB", - "GNQ", - "GRC", - "GRD", - "GTM", - "GUF", - "GUM", - "GUY", - "HKG", - "HND", - "HRV", - "HTI", - "HUN", - "IDN", - "IMN", - "IND", - "IRL", - "IRN", - "IRQ", - "ISL", - "ISR", - "ITA", - "JAM", - "JEY", - "JOR", - "JPN", - "KAZ", - "KEN", - "KGZ", - "KHM", - "KIR", - "KNA", - "KOR", - "KWT", - "LAO", - "LBN", - "LBR", - "LBY", - "LCA", - "LIE", - "LKA", - "LSO", - "LTU", - "LUX", - "LVA", - "MAC", - "MAR", - "MCO", - "MDA", - "MDG", - "MDV", - "MEX", - "MHL", - "MKD", - "MLI", - "MLT", - "MMR", - "MNG", - "MNP", - "MOZ", - "MRT", - "MSR", - "MTQ", - "MUS", - "MWI", - "MYS", - "MYT", - "NAM", - "NCL", - "NER", - "NFK", - "NGA", - "NIC", - "NIU", - "NLD", - "NOR", - "NPL", - "NRU", - "NZL", - "OMN", - "PAK", - "PAN", - "PCN", - "PER", - "PHL", - "PLW", - "PNG", - "POL", - "PRI", - "PRK", - "PRT", - "PRY", - "PSE", - "PYF", - "QAT", - "REU", - "ROU", - "RUS", - "RWA", - "SAU", - "SCG", - "SDN", - "SEN", - "SGP", - "SHN", - "SJM", - "SLB", - "SLE", - "SLV", - "SMR", - "SOM", - "SPM", - "STP", - "SUR", - "SVK", - "SVN", - "SWE", - "SWZ", - "SYC", - "SYR", - "TCA", - "TCD", - "TGO", - "THA", - "TJK", - "TKL", - "TKM", - "TLS", - "TON", - "TTO", - "TUN", - "TUR", - "TUV", - "TWN", - "TZA", - "UGA", - "UKR", - "URY", - "USA", - "UZB", - "VCT", - "VEN", - "VGB", - "VIR", - "VNM", - "VUT", - "WLF", - "WSM", - "YEM", - "ZAF", - "ZMB", - "ZWE", + "", "ABW", "AFG", "AGO", "AIA", "ALB", "AND", "ANT", "ARE", "ARG", "ARM", + "ASM", "ATG", "AUS", "AUT", "AZE", "BDI", "BEL", "BEN", "BFA", "BGD", "BGR", + "BHR", "BHS", "BIH", "BLR", "BLZ", "BMU", "BOL", "BRA", "BRB", "BRN", "BTN", + "BWA", "CAF", "CAN", "CHE", "CHL", "CHN", "CIV", "CMR", "COD", "COG", "COK", + "COL", "COM", "CPV", "CRI", "CUB", "CYM", "CYP", "CZE", "DEU", "DJI", "DMA", + "DNK", "DOM", "DZA", "ECU", "EGY", "ERI", "ESP", "EST", "ETH", "FIN", "FJI", + "FLK", "FRA", "FRO", "FSM", "GAB", "GBR", "GEO", "GGY", "GHA", "GIB", "GIN", + "GLP", "GMB", "GNB", "GNQ", "GRC", "GRD", "GTM", "GUF", "GUM", "GUY", "HKG", + "HND", "HRV", "HTI", "HUN", "IDN", "IMN", "IND", "IRL", "IRN", "IRQ", "ISL", + "ISR", "ITA", "JAM", "JEY", "JOR", "JPN", "KAZ", "KEN", "KGZ", "KHM", "KIR", + "KNA", "KOR", "KWT", "LAO", "LBN", "LBR", "LBY", "LCA", "LIE", "LKA", "LSO", + "LTU", "LUX", "LVA", "MAC", "MAR", "MCO", "MDA", "MDG", "MDV", "MEX", "MHL", + "MKD", "MLI", "MLT", "MMR", "MNG", "MNP", "MOZ", "MRT", "MSR", "MTQ", "MUS", + "MWI", "MYS", "MYT", "NAM", "NCL", "NER", "NFK", "NGA", "NIC", "NIU", "NLD", + "NOR", "NPL", "NRU", "NZL", "OMN", "PAK", "PAN", "PCN", "PER", "PHL", "PLW", + "PNG", "POL", "PRI", "PRK", "PRT", "PRY", "PSE", "PYF", "QAT", "REU", "ROU", + "RUS", "RWA", "SAU", "SCG", "SDN", "SEN", "SGP", "SHN", "SJM", "SLB", "SLE", + "SLV", "SMR", "SOM", "SPM", "STP", "SUR", "SVK", "SVN", "SWE", "SWZ", "SYC", + "SYR", "TCA", "TCD", "TGO", "THA", "TJK", "TKL", "TKM", "TLS", "TON", "TTO", + "TUN", "TUR", "TUV", "TWN", "TZA", "UGA", "UKR", "URY", "USA", "UZB", "VCT", + "VEN", "VGB", "VIR", "VNM", "VUT", "WLF", "WSM", "YEM", "ZAF", "ZMB", "ZWE", ] +# fmt: on """ISO 3166 alpha-3 codes of countries used in ISIMIP_GPWV3_NATID_150AS""" NONISO_REGIONS = [ From 3f4f0a24b616903bdc334f6d876f055621497d09 Mon Sep 17 00:00:00 2001 From: Nicolas Colombi Date: Wed, 23 Oct 2024 11:55:39 +0200 Subject: [PATCH 12/12] black the branch --- .github/ISSUE_TEMPLATE/bug_report.md | 4 +- .github/pull_request_template.md | 4 +- .github/scripts/make_release.py | 4 +- .github/scripts/prepare_release.py | 55 +- .github/scripts/setup_devbranch.py | 22 +- MANIFEST.in | 2 +- climada.conf | 2 +- climada/__init__.py | 65 +- climada/_version.py | 2 +- .../data/demo/demo_emdat_impact_data_2020.csv | 2 +- climada/data/system/GDP_TWN_IMF_WEO_data.csv | 2 +- .../system/WEALTH2GDP_factors_CRI_2016.csv | 2 +- climada/engine/__init__.py | 5 +- climada/engine/calibration_opt.py | 401 ++-- climada/engine/cost_benefit.py | 1037 +++++++--- climada/engine/forecast.py | 61 +- climada/engine/impact.py | 891 +++++--- climada/engine/impact_calc.py | 193 +- climada/engine/impact_data.py | 1030 ++++++---- .../data/emdat_testdata_BGD_USA_1970-2017.csv | 8 +- .../data/emdat_testdata_fake_2007-2011.csv | 2 +- climada/engine/test/test_cost_benefit.py | 1262 ++++++++---- climada/engine/test/test_forecast.py | 311 +-- climada/engine/test/test_impact.py | 317 +-- climada/engine/test/test_impact_calc.py | 444 ++-- climada/engine/test/test_impact_data.py | 258 ++- climada/engine/unsequa/__init__.py | 6 +- climada/engine/unsequa/calc_base.py | 349 ++-- climada/engine/unsequa/calc_cost_benefit.py | 264 +-- climada/engine/unsequa/calc_delta_climate.py | 28 +- climada/engine/unsequa/calc_impact.py | 147 +- climada/engine/unsequa/input_var.py | 391 ++-- climada/engine/unsequa/test/test_unsequa.py | 755 +++---- climada/engine/unsequa/unc_output.py | 639 +++--- climada/entity/__init__.py | 5 +- climada/entity/disc_rates/__init__.py | 1 + climada/entity/disc_rates/base.py | 91 +- climada/entity/disc_rates/test/test_base.py | 89 +- climada/entity/entity_def.py | 25 +- climada/entity/exposures/__init__.py | 2 +- climada/entity/exposures/base.py | 738 ++++--- climada/entity/exposures/litpop/__init__.py | 4 +- .../entity/exposures/litpop/gpw_population.py | 71 +- climada/entity/exposures/litpop/litpop.py | 822 +++++--- climada/entity/exposures/litpop/nightlight.py | 304 +-- climada/entity/exposures/test/test_base.py | 507 +++-- climada/entity/exposures/test/test_litpop.py | 323 +-- climada/entity/exposures/test/test_mat.py | 95 +- .../entity/exposures/test/test_nightlight.py | 85 +- climada/entity/impact_funcs/__init__.py | 1 + climada/entity/impact_funcs/base.py | 95 +- .../entity/impact_funcs/impact_func_set.py | 207 +- climada/entity/impact_funcs/storm_europe.py | 77 +- climada/entity/impact_funcs/test/test_base.py | 65 +- .../impact_funcs/test/test_imp_fun_set.py | 282 +-- climada/entity/impact_funcs/test/test_tc.py | 95 +- climada/entity/impact_funcs/test/test_ws.py | 88 +- climada/entity/impact_funcs/trop_cyclone.py | 732 +++++-- climada/entity/measures/__init__.py | 1 + climada/entity/measures/base.py | 151 +- climada/entity/measures/measure_set.py | 325 +-- climada/entity/measures/test/test_base.py | 756 +++++-- climada/entity/measures/test/test_meas_set.py | 379 ++-- climada/entity/tag/__init__.py | 10 +- climada/entity/tag/tag.py | 45 +- climada/entity/tag/test/test_tag.py | 41 +- climada/entity/test/test_entity.py | 30 +- climada/hazard/__init__.py | 7 +- climada/hazard/base.py | 437 ++-- climada/hazard/centroids/__init__.py | 1 + climada/hazard/centroids/centr.py | 356 ++-- climada/hazard/centroids/test/test_centr.py | 564 ++--- climada/hazard/io.py | 483 +++-- climada/hazard/isimip_data.py | 11 +- climada/hazard/plot.py | 132 +- climada/hazard/storm_europe.py | 633 +++--- climada/hazard/tc_clim_change.py | 1172 +++++++++-- climada/hazard/tc_tracks.py | 1715 ++++++++++------ climada/hazard/tc_tracks_synth.py | 828 +++++--- climada/hazard/test/__init__.py | 18 +- climada/hazard/test/data/trac_short_test.csv | 2 +- climada/hazard/test/test_base.py | 958 +++++---- climada/hazard/test/test_io.py | 35 +- climada/hazard/test/test_storm_europe.py | 97 +- climada/hazard/test/test_tc_cc.py | 126 +- climada/hazard/test/test_tc_tracks.py | 1228 ++++++----- climada/hazard/test/test_tc_tracks_synth.py | 829 +++++--- climada/hazard/test/test_trop_cyclone.py | 386 +++- .../test/test_trop_cyclone_windfields.py | 357 ++-- climada/hazard/trop_cyclone/__init__.py | 24 +- climada/hazard/trop_cyclone/trop_cyclone.py | 266 +-- .../trop_cyclone/trop_cyclone_windfields.py | 229 ++- climada/test/__init__.py | 30 +- climada/test/test_api_client.py | 301 ++- climada/test/test_calibration.py | 63 +- climada/test/test_engine.py | 24 +- climada/test/test_hazard.py | 150 +- climada/test/test_litpop_integr.py | 395 ++-- climada/test/test_nightlight.py | 333 +-- climada/test/test_plot.py | 173 +- climada/test/test_util.py | 5 +- climada/test/test_util_calibrate.py | 18 +- climada/util/__init__.py | 5 +- climada/util/api_client.py | 13 +- climada/util/calibrate/__init__.py | 2 +- climada/util/calibrate/base.py | 17 +- climada/util/calibrate/bayesian_optimizer.py | 21 +- climada/util/calibrate/scipy_optimizer.py | 6 +- climada/util/calibrate/test/test_base.py | 10 +- .../calibrate/test/test_bayesian_optimizer.py | 12 +- .../calibrate/test/test_scipy_optimizer.py | 6 +- climada/util/checker.py | 40 +- climada/util/config.py | 101 +- climada/util/constants.py | 1346 +++++++----- climada/util/coordinates.py | 899 +++++--- climada/util/dates_times.py | 12 +- climada/util/dwd_icon_loader.py | 259 +-- climada/util/earth_engine.py | 38 +- climada/util/files_handler.py | 49 +- climada/util/finance.py | 227 ++- climada/util/hdf5_handler.py | 158 +- climada/util/interpolation.py | 103 +- climada/util/lines_polys_handler.py | 237 ++- climada/util/plot.py | 535 +++-- climada/util/save.py | 31 +- climada/util/scalebar_plot.py | 56 +- climada/util/select.py | 4 +- climada/util/test/test__init__.py | 29 +- climada/util/test/test_checker.py | 70 +- climada/util/test/test_config.py | 33 +- climada/util/test/test_coordinates.py | 1807 +++++++++++------ climada/util/test/test_dates_times.py | 47 +- climada/util/test/test_dwd_icon.py | 91 +- climada/util/test/test_files.py | 85 +- climada/util/test/test_finance.py | 127 +- climada/util/test/test_hdf5.py | 140 +- climada/util/test/test_interpolation.py | 125 +- climada/util/test/test_lines_polys_handler.py | 1126 +++++++--- climada/util/test/test_plot.py | 202 +- climada/util/test/test_save.py | 22 +- climada/util/test/test_select.py | 15 +- .../util/test/test_value_representation.py | 116 +- climada/util/test/test_yearsets.py | 62 +- climada/util/value_representation.py | 4 +- climada/util/yearsets.py | 105 +- doc/Makefile | 2 +- doc/climada/climada.engine.rst | 1 - doc/climada/climada.entity.disc_rates.rst | 1 - .../climada.entity.exposures.litpop.rst | 1 - doc/climada/climada.entity.exposures.rst | 1 - doc/climada/climada.entity.impact_funcs.rst | 1 - doc/climada/climada.entity.measures.rst | 1 - doc/climada/climada.hazard.centroids.rst | 1 - doc/climada/climada.hazard.rst | 1 - doc/climada/climada.hazard.trop_cyclone.rst | 1 - doc/climada/climada.rst | 1 - doc/climada/climada.util.rst | 1 - doc/conf.py | 135 +- doc/guide/Guide_Configuration.ipynb | 27 +- doc/guide/Guide_Exception_Logging.ipynb | 13 +- doc/guide/Guide_Py_Performance.ipynb | 2 + doc/guide/Guide_PythonDos-n-Donts.ipynb | 14 +- doc/guide/Guide_Testing.ipynb | 11 +- ...ontinuous_integration_GitHub_actions.ipynb | 16 +- doc/index.rst | 2 +- doc/tutorial/0_intro_python.ipynb | 205 +- doc/tutorial/1_main_climada.ipynb | 76 +- doc/tutorial/climada_engine_CostBenefit.ipynb | 173 +- doc/tutorial/climada_engine_Forecast.ipynb | 143 +- doc/tutorial/climada_engine_Impact.ipynb | 168 +- doc/tutorial/climada_engine_impact_data.ipynb | 112 +- doc/tutorial/climada_engine_unsequa.ipynb | 416 ++-- .../climada_engine_unsequa_helper.ipynb | 282 +-- doc/tutorial/climada_entity_DiscRates.ipynb | 19 +- doc/tutorial/climada_entity_Exposures.ipynb | 124 +- ...mada_entity_Exposures_polygons_lines.ipynb | 267 ++- .../climada_entity_ImpactFuncSet.ipynb | 18 +- doc/tutorial/climada_entity_LitPop.ipynb | 139 +- doc/tutorial/climada_entity_MeasureSet.ipynb | 67 +- doc/tutorial/climada_hazard_Hazard.ipynb | 379 ++-- doc/tutorial/climada_hazard_StormEurope.ipynb | 25 +- doc/tutorial/climada_hazard_TropCyclone.ipynb | 93 +- doc/tutorial/climada_util_api_client.ipynb | 83 +- doc/tutorial/climada_util_earth_engine.ipynb | 161 +- doc/tutorial/climada_util_yearsets.ipynb | 28 +- requirements/env_climada.yml | 2 +- .../applications/eca_san_salvador/README.txt | 2 +- .../San_Salvador_Adaptacion.ipynb | 113 +- .../San_Salvador_Adaptation.ipynb | 116 +- .../San_Salvador_Parametric.ipynb | 52 +- .../eca_san_salvador/San_Salvador_Risk.ipynb | 91 +- .../eca_san_salvador/functions_ss.py | 249 ++- script/jenkins/set_config.py | 8 +- script/jenkins/test_data_api.py | 67 +- script/jenkins/test_notebooks.py | 138 +- setup.py | 130 +- 196 files changed, 25494 insertions(+), 14971 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index 27760ea62..8c086f8b9 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -12,7 +12,7 @@ A clear and concise description of what the bug is. **To Reproduce** Steps to reproduce the behavior/error: -1. +1. Code example: ```python @@ -29,7 +29,7 @@ If applicable, add screenshots to help explain your problem. **System Information (please complete the following information):** - Operating system and version: [e.g. Ubuntu 22.04, macOS 14.3.1, Windows 10] - - Python version: [e.g. 3.10] + - Python version: [e.g. 3.10] (to obtain this information execute > import sys >print(sys.version)) **Additional context** diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index ee5328299..b1e66a575 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -1,6 +1,6 @@ Changes proposed in this PR: -- -- +- +- This PR fixes # diff --git a/.github/scripts/make_release.py b/.github/scripts/make_release.py index 5c6260d4d..cdba6755c 100644 --- a/.github/scripts/make_release.py +++ b/.github/scripts/make_release.py @@ -13,9 +13,9 @@ def get_version() -> str: """Return the current version number, based on the _version.py file.""" [version_file] = glob.glob("climada*/_version.py") - with open(version_file, 'r', encoding="UTF-8") as vfp: + with open(version_file, "r", encoding="UTF-8") as vfp: content = vfp.read() - regex = r'^__version__\s*=\s*[\'\"](.*)[\'\"]\s*$' + regex = r"^__version__\s*=\s*[\'\"](.*)[\'\"]\s*$" mtch = re.match(regex, content) return mtch.group(1) diff --git a/.github/scripts/prepare_release.py b/.github/scripts/prepare_release.py index bce483b6f..eb0dd4c2b 100644 --- a/.github/scripts/prepare_release.py +++ b/.github/scripts/prepare_release.py @@ -5,7 +5,7 @@ - update version numbers in _version.py and setup.py - purge the "Unreleased" section of CHANGELOG.md and rename it to the new version number -- copy the README.md file to doc/misc/README.md, +- copy the README.md file to doc/misc/README.md, but without the badges as they interfere with the sphinx doc builder All changes are immediately commited to the repository. @@ -38,28 +38,28 @@ def bump_version_number(version_number: str, level: str) -> str: """Return a copy of `version_number` with one level number incremented.""" major, minor, patch = version_number.split(".") if level == "major": - major = str(int(major)+1) + major = str(int(major) + 1) minor = "0" patch = "0" elif level == "minor": - minor = str(int(minor)+1) + minor = str(int(minor) + 1) patch = "0" elif level == "patch": - patch = str(int(patch)+1) + patch = str(int(patch) + 1) else: raise ValueError(f"level should be 'major', 'minor' or 'patch', not {level}") return ".".join([major, minor, patch]) def update_readme(_nvn): - """align doc/misc/README.md with ./README.md but remove the non-markdown header lines from """ - with open("README.md", 'r', encoding="UTF-8") as rmin: - lines = [line for line in rmin.readlines() if not line.startswith('[![')] + """align doc/misc/README.md with ./README.md but remove the non-markdown header lines from""" + with open("README.md", "r", encoding="UTF-8") as rmin: + lines = [line for line in rmin.readlines() if not line.startswith("[![")] while not lines[0].strip(): lines = lines[1:] - with open("doc/misc/README.md", 'w', encoding="UTF-8") as rmout: + with open("doc/misc/README.md", "w", encoding="UTF-8") as rmout: rmout.writelines(lines) - return GitFile('doc/misc/README.md') + return GitFile("doc/misc/README.md") def update_changelog(nvn): @@ -70,16 +70,16 @@ def update_changelog(nvn): release = [] section_name = None section = [] - with open("CHANGELOG.md", 'r', encoding="UTF-8") as changelog: + with open("CHANGELOG.md", "r", encoding="UTF-8") as changelog: for line in changelog.readlines(): - if line.startswith('#'): - if line.startswith('### '): + if line.startswith("#"): + if line.startswith("### "): if section: release.append((section_name, section)) section_name = line[4:].strip() section = [] - #print("tag:", section_name) - elif line.startswith('## '): + # print("tag:", section_name) + elif line.startswith("## "): if section: release.append((section_name, section)) if release: @@ -88,7 +88,7 @@ def update_changelog(nvn): release = [] section_name = None section = [] - #print("release:", release_name) + # print("release:", release_name) else: section.append(line) if section: @@ -96,7 +96,7 @@ def update_changelog(nvn): if release: releases.append((release_name, release)) - with open("CHANGELOG.md", 'w', encoding="UTF-8") as changelog: + with open("CHANGELOG.md", "w", encoding="UTF-8") as changelog: changelog.write("# Changelog\n\n") for release_name, release in releases: if release_name: @@ -107,7 +107,11 @@ def update_changelog(nvn): if any(ln.strip() for ln in section): if section_name: changelog.write(f"### {section_name}\n") - lines = [ln.strip() for ln in section if "code freeze date: " not in ln.lower()] + lines = [ + ln.strip() + for ln in section + if "code freeze date: " not in ln.lower() + ] if not section_name and release_name.lower() == nvn: print("setting date") for i, line in enumerate(lines): @@ -116,26 +120,26 @@ def update_changelog(nvn): lines[i] = f"Release date: {today}" changelog.write(re.sub("\n+$", "\n", "\n".join(lines))) changelog.write("\n") - return GitFile('CHANGELOG.md') + return GitFile("CHANGELOG.md") def update_version(nvn): """Update the _version.py file""" [file_with_version] = glob.glob("climada*/_version.py") - regex = r'(^__version__\s*=\s*[\'\"]).*([\'\"]\s*$)' + regex = r"(^__version__\s*=\s*[\'\"]).*([\'\"]\s*$)" return update_file(file_with_version, regex, nvn) def update_setup(new_version_number): """Update the setup.py file""" file_with_version = "setup.py" - regex = r'(^\s+version\s*=\s*[\'\"]).*([\'\"]\s*,\s*$)' + regex = r"(^\s+version\s*=\s*[\'\"]).*([\'\"]\s*,\s*$)" return update_file(file_with_version, regex, new_version_number) def update_file(file_with_version, regex, new_version_number): """Replace the version number(s) in a file, based on a rgular expression.""" - with open(file_with_version, 'r', encoding="UTF-8") as curf: + with open(file_with_version, "r", encoding="UTF-8") as curf: lines = curf.readlines() successfully_updated = False for i, line in enumerate(lines): @@ -145,14 +149,15 @@ def update_file(file_with_version, regex, new_version_number): successfully_updated = True if not successfully_updated: raise RuntimeError(f"cannot determine version of {file_with_version}") - with open(file_with_version, 'w', encoding="UTF-8") as newf: + with open(file_with_version, "w", encoding="UTF-8") as newf: for line in lines: newf.write(line) return GitFile(file_with_version) -class GitFile(): +class GitFile: """Helper class for `git add`.""" + def __init__(self, path): self.path = path @@ -166,8 +171,9 @@ def gitadd(self): ).stdout.decode("utf8") -class Git(): +class Git: """Helper class for `git commit`.""" + def __init__(self): _gitname = subprocess.run( ["git", "config", "--global", "user.name", "'climada'"], @@ -228,6 +234,7 @@ def prepare_new_release(level): if __name__ == "__main__": from sys import argv + try: LEVEL = argv[1] except IndexError: diff --git a/.github/scripts/setup_devbranch.py b/.github/scripts/setup_devbranch.py index 001390fa0..36c9e6c78 100644 --- a/.github/scripts/setup_devbranch.py +++ b/.github/scripts/setup_devbranch.py @@ -33,14 +33,15 @@ def get_last_version() -> str: def update_changelog(): """Insert a vanilla "Unreleased" section on top.""" - with open("CHANGELOG.md", 'r', encoding="UTF-8") as changelog: + with open("CHANGELOG.md", "r", encoding="UTF-8") as changelog: lines = changelog.readlines() if "## Unreleased" in lines: return - with open("CHANGELOG.md", 'w', encoding="UTF-8") as changelog: - changelog.write("""# Changelog + with open("CHANGELOG.md", "w", encoding="UTF-8") as changelog: + changelog.write( + """# Changelog ## Unreleased @@ -62,27 +63,28 @@ def update_changelog(): ### Removed -""") +""" + ) changelog.writelines(lines[2:]) def update_version(nvn): """Update the _version.py file""" [file_with_version] = glob.glob("climada*/_version.py") - regex = r'(^__version__\s*=\s*[\'\"]).*([\'\"]\s*$)' + regex = r"(^__version__\s*=\s*[\'\"]).*([\'\"]\s*$)" return update_file(file_with_version, regex, nvn) def update_setup(new_version_number): """Update the setup.py file""" file_with_version = "setup.py" - regex = r'(^\s+version\s*=\s*[\'\"]).*([\'\"]\s*,\s*$)' + regex = r"(^\s+version\s*=\s*[\'\"]).*([\'\"]\s*,\s*$)" return update_file(file_with_version, regex, new_version_number) def update_file(file_with_version, regex, new_version_number): """Replace the version number(s) in a file, based on a rgular expression.""" - with open(file_with_version, 'r', encoding="UTF-8") as curf: + with open(file_with_version, "r", encoding="UTF-8") as curf: lines = curf.readlines() successfully_updated = False for i, line in enumerate(lines): @@ -92,7 +94,7 @@ def update_file(file_with_version, regex, new_version_number): successfully_updated = True if not successfully_updated: raise RuntimeError(f"cannot determine version of {file_with_version}") - with open(file_with_version, 'w', encoding="UTF-8") as newf: + with open(file_with_version, "w", encoding="UTF-8") as newf: for line in lines: newf.write(line) @@ -100,10 +102,10 @@ def update_file(file_with_version, regex, new_version_number): def setup_devbranch(): """Adjust files after a release was published, i.e., apply the canonical deviations from main in develop. - + Just changes files, all `git` commands are in the setup_devbranch.sh file. """ - main_version = get_last_version().strip('v') + main_version = get_last_version().strip("v") semver = main_version.split(".") semver[-1] = f"{int(semver[-1]) + 1}-dev" dev_version = ".".join(semver) diff --git a/MANIFEST.in b/MANIFEST.in index 2c9965a94..fff806f53 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -4,4 +4,4 @@ graft climada/*/test/data graft climada/test/data graft data global-exclude .* -global-exclude *.py[co] \ No newline at end of file +global-exclude *.py[co] diff --git a/climada.conf b/climada.conf index 3d07e07ca..367928405 100644 --- a/climada.conf +++ b/climada.conf @@ -27,4 +27,4 @@ "supported_exposures_types": ["litpop", "crop_production", "base"] }, "log_level": "INFO" -} \ No newline at end of file +} diff --git a/climada/__init__.py b/climada/__init__.py index 8fc4b8764..4a10de199 100755 --- a/climada/__init__.py +++ b/climada/__init__.py @@ -18,17 +18,17 @@ climada init """ -from shutil import copyfile + from pathlib import Path +from shutil import copyfile from .util.config import CONFIG from .util.constants import * - -GSDP_DIR = SYSTEM_DIR.joinpath('GSDP') +GSDP_DIR = SYSTEM_DIR.joinpath("GSDP") REPO_DATA = { - 'climada/data/system': [ + "climada/data/system": [ ISIMIP_GPWV3_NATID_150AS, GLB_CENTROIDS_MAT, ENT_TEMPLATE_XLS, @@ -36,20 +36,34 @@ RIVER_FLOOD_REGIONS_CSV, NATEARTH_CENTROIDS[150], NATEARTH_CENTROIDS[360], - SYSTEM_DIR.joinpath('WEALTH2GDP_factors_CRI_2016.csv'), - SYSTEM_DIR.joinpath('GDP_TWN_IMF_WEO_data.csv'), - SYSTEM_DIR.joinpath('FAOSTAT_data_country_codes.csv'), - SYSTEM_DIR.joinpath('rcp_db.xls'), - SYSTEM_DIR.joinpath('tc_impf_cal_v01_TDR1.0.csv'), - SYSTEM_DIR.joinpath('tc_impf_cal_v01_EDR.csv'), - SYSTEM_DIR.joinpath('tc_impf_cal_v01_RMSF.csv'), + SYSTEM_DIR.joinpath("WEALTH2GDP_factors_CRI_2016.csv"), + SYSTEM_DIR.joinpath("GDP_TWN_IMF_WEO_data.csv"), + SYSTEM_DIR.joinpath("FAOSTAT_data_country_codes.csv"), + SYSTEM_DIR.joinpath("rcp_db.xls"), + SYSTEM_DIR.joinpath("tc_impf_cal_v01_TDR1.0.csv"), + SYSTEM_DIR.joinpath("tc_impf_cal_v01_EDR.csv"), + SYSTEM_DIR.joinpath("tc_impf_cal_v01_RMSF.csv"), ], - 'climada/data/system/GSDP': [ - GSDP_DIR.joinpath(f'{cc}_GSDP.xls') - for cc in ['AUS', 'BRA', 'CAN', 'CHE', 'CHN', 'DEU', 'FRA', 'IDN', 'IND', 'JPN', 'MEX', - 'TUR', 'USA', 'ZAF'] + "climada/data/system/GSDP": [ + GSDP_DIR.joinpath(f"{cc}_GSDP.xls") + for cc in [ + "AUS", + "BRA", + "CAN", + "CHE", + "CHN", + "DEU", + "FRA", + "IDN", + "IND", + "JPN", + "MEX", + "TUR", + "USA", + "ZAF", + ] ], - 'climada/data/demo': [ + "climada/data/demo": [ ENT_DEMO_TODAY, ENT_DEMO_FUTURE, EXP_DEMO_H5, @@ -57,9 +71,10 @@ HAZ_DEMO_MAT, HAZ_DEMO_H5, TC_ANDREW_FL, - DEMO_DIR.joinpath('demo_emdat_impact_data_2020.csv'), - DEMO_DIR.joinpath('nl_rails.gpkg'), - ] + WS_DEMO_NC + DEMO_DIR.joinpath("demo_emdat_impact_data_2020.csv"), + DEMO_DIR.joinpath("nl_rails.gpkg"), + ] + + WS_DEMO_NC, } @@ -68,10 +83,13 @@ def test_installation(): If the invoked tests pass and an OK is printed out, the installation was successfull. """ from unittest import TestLoader, TextTestRunner - suite = TestLoader().discover(start_dir='climada.engine.test', - pattern='test_cost_benefit.py') - suite.addTest(TestLoader().discover(start_dir='climada.engine.test', - pattern='test_impact.py')) + + suite = TestLoader().discover( + start_dir="climada.engine.test", pattern="test_cost_benefit.py" + ) + suite.addTest( + TestLoader().discover(start_dir="climada.engine.test", pattern="test_impact.py") + ) TextTestRunner(verbosity=2).run(suite) @@ -98,4 +116,5 @@ def setup_climada_data(reload=False): src = Path(__file__).parent.parent.joinpath(src_dir, path.name) copyfile(src, path) + setup_climada_data() diff --git a/climada/_version.py b/climada/_version.py index 80952dacb..824c821f5 100644 --- a/climada/_version.py +++ b/climada/_version.py @@ -1 +1 @@ -__version__ = '5.0.1-dev' +__version__ = "5.0.1-dev" diff --git a/climada/data/demo/demo_emdat_impact_data_2020.csv b/climada/data/demo/demo_emdat_impact_data_2020.csv index 55c72eaf4..3cf4f5c85 100644 --- a/climada/data/demo/demo_emdat_impact_data_2020.csv +++ b/climada/data/demo/demo_emdat_impact_data_2020.csv @@ -1073,4 +1073,4 @@ Dis No,Year,Seq,Disaster Group,Disaster Subgroup,Disaster Type,Disaster Subtype, 2020-0132-TON,2020,0132,Natural,Meteorological,Storm,Tropical cyclone,,Cyclone 'Harold',--,Tonga,TON,Polynesia,Oceania,"Tongatapu, 'Eua",,,,,,,,,Kph,,,,,2020,4,6,2020,4,9,,,1289,,1289,,,111000, 2020-0015-TUV,2020,0015,Natural,Meteorological,Storm,Tropical cyclone,,Cyclone 'Tino',Affected,Tuvalu,TUV,Polynesia,Oceania,,,,,,,Yes,,,Kph,,,,,2020,1,18,2020,1,18,,,,,,,,, 2020-0219-USA,2020,0219,Natural,Meteorological,Storm,Tropical cyclone,,Tropical storm 'Cristobal',Affected,United States of America (the),USA,Northern America,Americas,"errebonne, Plaquemines, Lafourche Parishes (Louisiana)",,,,,,Yes,,80,Kph,,,,,2020,6,7,2020,6,7,,,,,,,,, -2020-0132-VUT,2020,0132,Natural,Meteorological,Storm,Tropical cyclone,,Cyclone 'Harold',--,Vanuatu,VUT,Melanesia,Oceania,Pentecost and Espiritu Santo,,,,,,,,,Kph,,,,,2020,4,6,2020,4,9,4,,,,,,,, \ No newline at end of file +2020-0132-VUT,2020,0132,Natural,Meteorological,Storm,Tropical cyclone,,Cyclone 'Harold',--,Vanuatu,VUT,Melanesia,Oceania,Pentecost and Espiritu Santo,,,,,,,,,Kph,,,,,2020,4,6,2020,4,9,4,,,,,,,, diff --git a/climada/data/system/GDP_TWN_IMF_WEO_data.csv b/climada/data/system/GDP_TWN_IMF_WEO_data.csv index e0acd9898..e39f4cb62 100644 --- a/climada/data/system/GDP_TWN_IMF_WEO_data.csv +++ b/climada/data/system/GDP_TWN_IMF_WEO_data.csv @@ -3,4 +3,4 @@ TWN,Taiwan Province of China,"Gross domestic product, current prices",U.S. dolla TWN,Taiwan Province of China,"Gross domestic product, deflator",Index,,"See notes for: Gross domestic product, constant prices (National currency) Gross domestic product, current prices (National currency).",69.946,77.417,79.33,81.444,82.495,82.523,86.575,86.605,86.657,88.892,93.472,96.725,99.824,103.299,105.065,107.554,110.062,112.506,116.182,113.911,112.88,112.189,111.733,110.174,109.894,108.209,107.095,106.638,103.869,104.003,102.405,100,100.543,102.019,103.749,107.128,108.085,106.84,105.834,106.337,106.484,107.149,108.054,109.026,109.951,2018 TWN,Taiwan Province of China,"Gross domestic product per capita, current prices",U.S. dollars,Units,"See notes for: Gross domestic product, current prices (National currency) Population (Persons).","2,367.600","2,692.406","2,675.823","2,882.402","3,203.468","3,295.112","4,010.111","5,325.216","6,337.499","7,577.046","8,178.152","9,092.297","10,725.702","11,266.123","12,108.752","13,076.007","13,597.248","13,968.097","12,787.258","13,768.274","14,876.879","13,408.383","13,715.525","14,094.370","15,360.724","16,503.313","16,984.540","17,780.925","18,102.946","16,959.775","19,261.667","20,911.643","21,269.614","21,887.992","22,638.917","22,373.564","22,572.702","24,389.677","25,007.747","24,827.898","25,525.806","26,861.070","28,324.425","29,870.221","31,483.799",2018 ,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, -"International Monetary Fund, World Economic Outlook Database, October 2019",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, \ No newline at end of file +"International Monetary Fund, World Economic Outlook Database, October 2019",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, diff --git a/climada/data/system/WEALTH2GDP_factors_CRI_2016.csv b/climada/data/system/WEALTH2GDP_factors_CRI_2016.csv index f63f0453a..8e8bb97c9 100644 --- a/climada/data/system/WEALTH2GDP_factors_CRI_2016.csv +++ b/climada/data/system/WEALTH2GDP_factors_CRI_2016.csv @@ -169,4 +169,4 @@ Venezuela,VEN,0.29407,0.35328 Vietnam,VNM,1.23241,1.66724 Yemen,YEM,1.18584,1.76063 Zambia,ZMB,0.10663,0.32193 -Zimbabwe,ZWE,0.20161,1.65566 \ No newline at end of file +Zimbabwe,ZWE,0.20161,1.65566 diff --git a/climada/engine/__init__.py b/climada/engine/__init__.py index 5ed316ca2..ef8292f75 100755 --- a/climada/engine/__init__.py +++ b/climada/engine/__init__.py @@ -18,6 +18,7 @@ init engine """ -from .impact import * + from .cost_benefit import * -from .impact_calc import * \ No newline at end of file +from .impact import * +from .impact_calc import * diff --git a/climada/engine/calibration_opt.py b/climada/engine/calibration_opt.py index ab9d6a688..5f174b5f7 100644 --- a/climada/engine/calibration_opt.py +++ b/climada/engine/calibration_opt.py @@ -20,55 +20,61 @@ Optimization and manual calibration """ -import datetime as dt import copy +import datetime as dt import itertools import logging + import numpy as np import pandas as pd from scipy import interpolate from scipy.optimize import minimize from climada.engine import ImpactCalc +from climada.engine.impact_data import emdat_impact_yearlysum # , emdat_impact_event from climada.entity import ImpactFuncSet, ImpfTropCyclone, impact_funcs -from climada.engine.impact_data import emdat_impact_yearlysum #, emdat_impact_event LOGGER = logging.getLogger(__name__) +def calib_instance( + hazard, + exposure, + impact_func, + df_out=pd.DataFrame(), + yearly_impact=False, + return_cost="False", +): + """calculate one impact instance for the calibration algorithm and write + to given DataFrame -def calib_instance(hazard, exposure, impact_func, df_out=pd.DataFrame(), - yearly_impact=False, return_cost='False'): + Parameters + ---------- + hazard : Hazard + exposure : Exposure + impact_func : ImpactFunc + df_out : Dataframe, optional + Output DataFrame with headers of columns defined and optionally with + first row (index=0) defined with values. If columns "impact", + "event_id", or "year" are not included, they are created here. + Data like reported impacts or impact function parameters can be + given here; values are preserved. + yearly_impact : boolean, optional + if set True, impact is returned per year, not per event + return_cost : str, optional + if not 'False' but any of 'R2', 'logR2', + cost is returned instead of df_out - """calculate one impact instance for the calibration algorithm and write - to given DataFrame - - Parameters - ---------- - hazard : Hazard - exposure : Exposure - impact_func : ImpactFunc - df_out : Dataframe, optional - Output DataFrame with headers of columns defined and optionally with - first row (index=0) defined with values. If columns "impact", - "event_id", or "year" are not included, they are created here. - Data like reported impacts or impact function parameters can be - given here; values are preserved. - yearly_impact : boolean, optional - if set True, impact is returned per year, not per event - return_cost : str, optional - if not 'False' but any of 'R2', 'logR2', - cost is returned instead of df_out - - Returns - ------- - df_out: DataFrame - DataFrame with modelled impact written to rows for each year - or event. + Returns + ------- + df_out: DataFrame + DataFrame with modelled impact written to rows for each year + or event. """ ifs = ImpactFuncSet([impact_func]) - impacts = ImpactCalc(exposures=exposure, impfset=ifs, hazard=hazard)\ - .impact(assign_centroids=False) + impacts = ImpactCalc(exposures=exposure, impfset=ifs, hazard=hazard).impact( + assign_centroids=False + ) if yearly_impact: # impact per year iys = impacts.impact_per_year(all_years=True) # Loop over whole year range: @@ -77,43 +83,49 @@ def calib_instance(hazard, exposure, impact_func, df_out=pd.DataFrame(), if cnt_ > 0: df_out.loc[cnt_] = df_out.loc[0] # copy info from first row if year in iys: - df_out.loc[cnt_, 'impact_CLIMADA'] = iys[year] + df_out.loc[cnt_, "impact_CLIMADA"] = iys[year] else: - df_out.loc[cnt_, 'impact_CLIMADA'] = 0.0 - df_out.loc[cnt_, 'year'] = year + df_out.loc[cnt_, "impact_CLIMADA"] = 0.0 + df_out.loc[cnt_, "year"] = year else: - years_in_common = df_out.loc[df_out['year'].isin(np.sort(list((iys.keys())))), 'year'] + years_in_common = df_out.loc[ + df_out["year"].isin(np.sort(list((iys.keys())))), "year" + ] for cnt_, year in years_in_common.iteritems(): - df_out.loc[df_out['year'] == year, 'impact_CLIMADA'] = iys[year] - + df_out.loc[df_out["year"] == year, "impact_CLIMADA"] = iys[year] else: # impact per event if df_out.empty | df_out.index.shape[0] == 1: for cnt_, impact in enumerate(impacts.at_event): if cnt_ > 0: df_out.loc[cnt_] = df_out.loc[0] # copy info from first row - df_out.loc[cnt_, 'impact_CLIMADA'] = impact - df_out.loc[cnt_, 'event_id'] = int(impacts.event_id[cnt_]) - df_out.loc[cnt_, 'event_name'] = impacts.event_name[cnt_] - df_out.loc[cnt_, 'year'] = \ - dt.datetime.fromordinal(impacts.date[cnt_]).year - df_out.loc[cnt_, 'date'] = impacts.date[cnt_] + df_out.loc[cnt_, "impact_CLIMADA"] = impact + df_out.loc[cnt_, "event_id"] = int(impacts.event_id[cnt_]) + df_out.loc[cnt_, "event_name"] = impacts.event_name[cnt_] + df_out.loc[cnt_, "year"] = dt.datetime.fromordinal( + impacts.date[cnt_] + ).year + df_out.loc[cnt_, "date"] = impacts.date[cnt_] elif df_out.index.shape[0] == impacts.at_event.shape[0]: for cnt_, (impact, ind) in enumerate(zip(impacts.at_event, df_out.index)): - df_out.loc[ind, 'impact_CLIMADA'] = impact - df_out.loc[ind, 'event_id'] = int(impacts.event_id[cnt_]) - df_out.loc[ind, 'event_name'] = impacts.event_name[cnt_] - df_out.loc[ind, 'year'] = \ - dt.datetime.fromordinal(impacts.date[cnt_]).year - df_out.loc[ind, 'date'] = impacts.date[cnt_] + df_out.loc[ind, "impact_CLIMADA"] = impact + df_out.loc[ind, "event_id"] = int(impacts.event_id[cnt_]) + df_out.loc[ind, "event_name"] = impacts.event_name[cnt_] + df_out.loc[ind, "year"] = dt.datetime.fromordinal( + impacts.date[cnt_] + ).year + df_out.loc[ind, "date"] = impacts.date[cnt_] else: - raise ValueError('adding simulated impacts to reported impacts not' - ' yet implemented. use yearly_impact=True or run' - ' without init_impact_data.') - if return_cost != 'False': + raise ValueError( + "adding simulated impacts to reported impacts not" + " yet implemented. use yearly_impact=True or run" + " without init_impact_data." + ) + if return_cost != "False": df_out = calib_cost_calc(df_out, return_cost) return df_out + def init_impf(impf_name_or_instance, param_dict, df_out=pd.DataFrame(index=[0])): """create an ImpactFunc based on the parameters in param_dict using the method specified in impf_parameterisation_name and document it in df_out. @@ -139,20 +151,21 @@ def init_impf(impf_name_or_instance, param_dict, df_out=pd.DataFrame(index=[0])) """ impact_func_final = None if isinstance(impf_name_or_instance, str): - if impf_name_or_instance == 'emanuel': + if impf_name_or_instance == "emanuel": impact_func_final = ImpfTropCyclone.from_emanuel_usa(**param_dict) - impact_func_final.haz_type = 'TC' + impact_func_final.haz_type = "TC" impact_func_final.id = 1 - df_out['impact_function'] = impf_name_or_instance + df_out["impact_function"] = impf_name_or_instance elif isinstance(impf_name_or_instance, impact_funcs.ImpactFunc): impact_func_final = change_impf(impf_name_or_instance, param_dict) - df_out['impact_function'] = ('given_' + - impact_func_final.haz_type + - str(impact_func_final.id)) + df_out["impact_function"] = ( + "given_" + impact_func_final.haz_type + str(impact_func_final.id) + ) for key, val in param_dict.items(): df_out[key] = val return impact_func_final, df_out + def change_impf(impf_instance, param_dict): """apply a shifting or a scaling defined in param_dict to the impact function in impf_istance and return it as a new ImpactFunc object. @@ -173,60 +186,71 @@ def change_impf(impf_instance, param_dict): """ ImpactFunc_new = copy.deepcopy(impf_instance) # create higher resolution impact functions (intensity, mdd ,paa) - paa_func = interpolate.interp1d(ImpactFunc_new.intensity, - ImpactFunc_new.paa, - fill_value='extrapolate') - mdd_func = interpolate.interp1d(ImpactFunc_new.intensity, - ImpactFunc_new.mdd, - fill_value='extrapolate') + paa_func = interpolate.interp1d( + ImpactFunc_new.intensity, ImpactFunc_new.paa, fill_value="extrapolate" + ) + mdd_func = interpolate.interp1d( + ImpactFunc_new.intensity, ImpactFunc_new.mdd, fill_value="extrapolate" + ) temp_dict = dict() - temp_dict['paa_intensity_ext'] = np.linspace(ImpactFunc_new.intensity.min(), - ImpactFunc_new.intensity.max(), - (ImpactFunc_new.intensity.shape[0] + 1) * 10 + 1) - temp_dict['mdd_intensity_ext'] = np.linspace(ImpactFunc_new.intensity.min(), - ImpactFunc_new.intensity.max(), - (ImpactFunc_new.intensity.shape[0] + 1) * 10 + 1) - temp_dict['paa_ext'] = paa_func(temp_dict['paa_intensity_ext']) - temp_dict['mdd_ext'] = mdd_func(temp_dict['mdd_intensity_ext']) + temp_dict["paa_intensity_ext"] = np.linspace( + ImpactFunc_new.intensity.min(), + ImpactFunc_new.intensity.max(), + (ImpactFunc_new.intensity.shape[0] + 1) * 10 + 1, + ) + temp_dict["mdd_intensity_ext"] = np.linspace( + ImpactFunc_new.intensity.min(), + ImpactFunc_new.intensity.max(), + (ImpactFunc_new.intensity.shape[0] + 1) * 10 + 1, + ) + temp_dict["paa_ext"] = paa_func(temp_dict["paa_intensity_ext"]) + temp_dict["mdd_ext"] = mdd_func(temp_dict["mdd_intensity_ext"]) # apply changes given in param_dict for key, val in param_dict.items(): - field_key, action = key.split('_') - if action == 'shift': - shift_absolut = ( - ImpactFunc_new.intensity[np.nonzero(getattr(ImpactFunc_new, field_key))[0][0]] - * (val - 1)) - temp_dict[field_key + '_intensity_ext'] = \ - temp_dict[field_key + '_intensity_ext'] + shift_absolut - elif action == 'scale': - temp_dict[field_key + '_ext'] = \ - np.clip(temp_dict[field_key + '_ext'] * val, - a_min=0, - a_max=1) + field_key, action = key.split("_") + if action == "shift": + shift_absolut = ImpactFunc_new.intensity[ + np.nonzero(getattr(ImpactFunc_new, field_key))[0][0] + ] * (val - 1) + temp_dict[field_key + "_intensity_ext"] = ( + temp_dict[field_key + "_intensity_ext"] + shift_absolut + ) + elif action == "scale": + temp_dict[field_key + "_ext"] = np.clip( + temp_dict[field_key + "_ext"] * val, a_min=0, a_max=1 + ) else: - raise AttributeError('keys in param_dict not recognized. Use only:' - 'paa_shift, paa_scale, mdd_shift, mdd_scale') + raise AttributeError( + "keys in param_dict not recognized. Use only:" + "paa_shift, paa_scale, mdd_shift, mdd_scale" + ) # map changed, high resolution impact functions back to initial resolution - ImpactFunc_new.intensity = np.linspace(ImpactFunc_new.intensity.min(), - ImpactFunc_new.intensity.max(), - (ImpactFunc_new.intensity.shape[0] + 1) * 10 + 1) - paa_func_new = interpolate.interp1d(temp_dict['paa_intensity_ext'], - temp_dict['paa_ext'], - fill_value='extrapolate') - mdd_func_new = interpolate.interp1d(temp_dict['mdd_intensity_ext'], - temp_dict['mdd_ext'], - fill_value='extrapolate') + ImpactFunc_new.intensity = np.linspace( + ImpactFunc_new.intensity.min(), + ImpactFunc_new.intensity.max(), + (ImpactFunc_new.intensity.shape[0] + 1) * 10 + 1, + ) + paa_func_new = interpolate.interp1d( + temp_dict["paa_intensity_ext"], temp_dict["paa_ext"], fill_value="extrapolate" + ) + mdd_func_new = interpolate.interp1d( + temp_dict["mdd_intensity_ext"], temp_dict["mdd_ext"], fill_value="extrapolate" + ) ImpactFunc_new.paa = paa_func_new(ImpactFunc_new.intensity) ImpactFunc_new.mdd = mdd_func_new(ImpactFunc_new.intensity) return ImpactFunc_new -def init_impact_data(hazard_type, - region_ids, - year_range, - source_file, - reference_year, - impact_data_source='emdat', - yearly_impact=True): + +def init_impact_data( + hazard_type, + region_ids, + year_range, + source_file, + reference_year, + impact_data_source="emdat", + yearly_impact=True, +): """creates a dataframe containing the recorded impact data for one hazard type and one area (countries, country or local split) @@ -253,18 +277,25 @@ def init_impact_data(hazard_type, Dataframe with recorded impact written to rows for each year or event. """ - if impact_data_source == 'emdat': + if impact_data_source == "emdat": if yearly_impact: - em_data = emdat_impact_yearlysum(source_file, countries=region_ids, - hazard=hazard_type, - year_range=year_range, - reference_year=reference_year) + em_data = emdat_impact_yearlysum( + source_file, + countries=region_ids, + hazard=hazard_type, + year_range=year_range, + reference_year=reference_year, + ) else: - raise ValueError('init_impact_data not yet implemented for yearly_impact = False.') - #em_data = emdat_impact_event(source_file) + raise ValueError( + "init_impact_data not yet implemented for yearly_impact = False." + ) + # em_data = emdat_impact_event(source_file) else: - raise ValueError('init_impact_data not yet implemented for other impact_data_sources ' - 'than emdat.') + raise ValueError( + "init_impact_data not yet implemented for other impact_data_sources " + "than emdat." + ) return em_data @@ -285,23 +316,34 @@ def calib_cost_calc(df_out, cost_function): The results of the cost function when comparing modelled and reported impact """ - if cost_function == 'R2': - cost = np.sum((pd.to_numeric(df_out['impact_scaled']) - - pd.to_numeric(df_out['impact_CLIMADA']))**2) - elif cost_function == 'logR2': - impact1 = pd.to_numeric(df_out['impact_scaled']) + if cost_function == "R2": + cost = np.sum( + ( + pd.to_numeric(df_out["impact_scaled"]) + - pd.to_numeric(df_out["impact_CLIMADA"]) + ) + ** 2 + ) + elif cost_function == "logR2": + impact1 = pd.to_numeric(df_out["impact_scaled"]) impact1[impact1 <= 0] = 1 - impact2 = pd.to_numeric(df_out['impact_CLIMADA']) + impact2 = pd.to_numeric(df_out["impact_CLIMADA"]) impact2[impact2 <= 0] = 1 - cost = np.sum((np.log(impact1) - - np.log(impact2))**2) + cost = np.sum((np.log(impact1) - np.log(impact2)) ** 2) else: - raise ValueError('This cost function is not implemented.') + raise ValueError("This cost function is not implemented.") return cost -def calib_all(hazard, exposure, impf_name_or_instance, param_full_dict, - impact_data_source, year_range, yearly_impact=True): +def calib_all( + hazard, + exposure, + impf_name_or_instance, + param_full_dict, + impact_data_source, + year_range, + yearly_impact=True, +): """portrait the difference between modelled and reported impacts for all impact functions described in param_full_dict and impf_name_or_instance @@ -337,30 +379,46 @@ def calib_all(hazard, exposure, impf_name_or_instance, param_full_dict, if isinstance(impact_data_source, pd.DataFrame): df_impact_data = impact_data_source else: - if list(impact_data_source.keys()) == ['emdat']: - df_impact_data = init_impact_data(hazard_type, region_ids, year_range, - impact_data_source['emdat'], year_range[-1]) + if list(impact_data_source.keys()) == ["emdat"]: + df_impact_data = init_impact_data( + hazard_type, + region_ids, + year_range, + impact_data_source["emdat"], + year_range[-1], + ) else: - raise ValueError('other impact data sources not yet implemented.') - params_generator = (dict(zip(param_full_dict, x)) - for x in itertools.product(*param_full_dict.values())) + raise ValueError("other impact data sources not yet implemented.") + params_generator = ( + dict(zip(param_full_dict, x)) + for x in itertools.product(*param_full_dict.values()) + ) for param_dict in params_generator: print(param_dict) df_out = copy.deepcopy(df_impact_data) impact_func_final, df_out = init_impf(impf_name_or_instance, param_dict, df_out) - df_out = calib_instance(hazard, exposure, impact_func_final, df_out, yearly_impact) + df_out = calib_instance( + hazard, exposure, impact_func_final, df_out, yearly_impact + ) if df_result is None: df_result = copy.deepcopy(df_out) else: df_result = df_result.append(df_out, input) - return df_result -def calib_optimize(hazard, exposure, impf_name_or_instance, param_dict, - impact_data_source, year_range, yearly_impact=True, - cost_fucntion='R2', show_details=False): +def calib_optimize( + hazard, + exposure, + impf_name_or_instance, + param_dict, + impact_data_source, + year_range, + yearly_impact=True, + cost_fucntion="R2", + show_details=False, +): """portrait the difference between modelled and reported impacts for all impact functions described in param_full_dict and impf_name_or_instance @@ -403,48 +461,67 @@ def calib_optimize(hazard, exposure, impf_name_or_instance, param_dict, if isinstance(impact_data_source, pd.DataFrame): df_impact_data = impact_data_source else: - if list(impact_data_source.keys()) == ['emdat']: - df_impact_data = init_impact_data(hazard_type, region_ids, year_range, - impact_data_source['emdat'], year_range[-1]) + if list(impact_data_source.keys()) == ["emdat"]: + df_impact_data = init_impact_data( + hazard_type, + region_ids, + year_range, + impact_data_source["emdat"], + year_range[-1], + ) else: - raise ValueError('other impact data sources not yet implemented.') + raise ValueError("other impact data sources not yet implemented.") + # definie specific function to def specific_calib(values): param_dict_temp = dict(zip(param_dict.keys(), values)) print(param_dict_temp) - return calib_instance(hazard, exposure, - init_impf(impf_name_or_instance, param_dict_temp)[0], - df_impact_data, - yearly_impact=yearly_impact, return_cost=cost_fucntion) + return calib_instance( + hazard, + exposure, + init_impf(impf_name_or_instance, param_dict_temp)[0], + df_impact_data, + yearly_impact=yearly_impact, + return_cost=cost_fucntion, + ) + # define constraints - if impf_name_or_instance == 'emanuel': - cons = [{'type': 'ineq', 'fun': lambda x: -x[0] + x[1]}, - {'type': 'ineq', 'fun': lambda x: -x[2] + 0.9999}, - {'type': 'ineq', 'fun': lambda x: x[2]}] + if impf_name_or_instance == "emanuel": + cons = [ + {"type": "ineq", "fun": lambda x: -x[0] + x[1]}, + {"type": "ineq", "fun": lambda x: -x[2] + 0.9999}, + {"type": "ineq", "fun": lambda x: x[2]}, + ] else: - cons = [{'type': 'ineq', 'fun': lambda x: -x[0] + 2}, - {'type': 'ineq', 'fun': lambda x: x[0]}, - {'type': 'ineq', 'fun': lambda x: -x[1] + 2}, - {'type': 'ineq', 'fun': lambda x: x[1]}] - + cons = [ + {"type": "ineq", "fun": lambda x: -x[0] + 2}, + {"type": "ineq", "fun": lambda x: x[0]}, + {"type": "ineq", "fun": lambda x: -x[1] + 2}, + {"type": "ineq", "fun": lambda x: x[1]}, + ] values = list(param_dict.values()) - res = minimize(specific_calib, values, - # bounds=bounds, - # bounds=((0.0, np.inf), (0.0, np.inf), (0.0, 1.0)), - constraints=cons, - # method='SLSQP', - method='trust-constr', - options={'xtol': 1e-5, 'disp': True, 'maxiter': 500}) + res = minimize( + specific_calib, + values, + # bounds=bounds, + # bounds=((0.0, np.inf), (0.0, np.inf), (0.0, 1.0)), + constraints=cons, + # method='SLSQP', + method="trust-constr", + options={"xtol": 1e-5, "disp": True, "maxiter": 500}, + ) param_dict_result = dict(zip(param_dict.keys(), res.x)) if res.success: - LOGGER.info('Optimization successfully finished.') + LOGGER.info("Optimization successfully finished.") else: - LOGGER.info('Opimization did not finish successfully. Check you input' - ' or consult the detailed returns (with argument' - 'show_details=True) for further information.') + LOGGER.info( + "Opimization did not finish successfully. Check you input" + " or consult the detailed returns (with argument" + "show_details=True) for further information." + ) if show_details: return param_dict_result, res diff --git a/climada/engine/cost_benefit.py b/climada/engine/cost_benefit.py index dfb5153db..99387aab6 100644 --- a/climada/engine/cost_benefit.py +++ b/climada/engine/cost_benefit.py @@ -19,20 +19,20 @@ Define CostBenefit class. """ -__all__ = ['CostBenefit', 'risk_aai_agg', 'risk_rp_100', 'risk_rp_250'] +__all__ = ["CostBenefit", "risk_aai_agg", "risk_rp_100", "risk_rp_250"] import copy import logging -from typing import Optional, Dict, Tuple, Union +from typing import Dict, Optional, Tuple, Union -import numpy as np import matplotlib.colors as colors import matplotlib.pyplot as plt -from matplotlib.patches import Rectangle, FancyArrowPatch +import numpy as np +from matplotlib.patches import FancyArrowPatch, Rectangle from tabulate import tabulate -from climada.engine.impact_calc import ImpactCalc from climada.engine.impact import Impact, ImpactFreqCurve +from climada.engine.impact_calc import ImpactCalc LOGGER = logging.getLogger(__name__) @@ -42,9 +42,10 @@ DEF_FUTURE_YEAR = 2030 """Default future reference year""" -NO_MEASURE = 'no measure' +NO_MEASURE = "no measure" """Name of risk metrics when no measure is applied""" + def risk_aai_agg(impact): """Risk measurement as average annual impact aggregated. @@ -59,6 +60,7 @@ def risk_aai_agg(impact): """ return impact.aai_agg + def risk_rp_100(impact): """Risk measurement as exceedance impact at 100 years return period. @@ -76,6 +78,7 @@ def risk_rp_100(impact): return efc.impact[0] return 0 + def risk_rp_250(impact): """Risk measurement as exceedance impact at 250 years return period. @@ -93,7 +96,8 @@ def risk_rp_250(impact): return efc.impact[0] return 0 -class CostBenefit(): + +class CostBenefit: """Impact definition. Compute from an entity (exposures and impact functions) and hazard. @@ -142,14 +146,16 @@ def __init__( present_year: int = DEF_PRESENT_YEAR, future_year: int = DEF_FUTURE_YEAR, tot_climate_risk: float = 0.0, - unit: str = 'USD', + unit: str = "USD", color_rgb: Optional[Dict[str, np.ndarray]] = None, benefit: Optional[Dict[str, float]] = None, cost_ben_ratio: Optional[Dict[str, float]] = None, - imp_meas_present: Optional[Dict[str, - Union[float, Tuple[float, float], Impact, ImpactFreqCurve]]] = None, - imp_meas_future: Optional[Dict[str, - Union[float, Tuple[float, float], Impact, ImpactFreqCurve]]] = None, + imp_meas_present: Optional[ + Dict[str, Union[float, Tuple[float, float], Impact, ImpactFreqCurve]] + ] = None, + imp_meas_future: Optional[ + Dict[str, Union[float, Tuple[float, float], Impact, ImpactFreqCurve]] + ] = None, ): """Initilization""" self.present_year = present_year @@ -171,11 +177,25 @@ def __init__( # 'risk': risk measurement, # 'efc': ImpactFreqCurve # (optionally) 'impact': Impact - self.imp_meas_future = imp_meas_future if imp_meas_future is not None else dict() - self.imp_meas_present = imp_meas_present if imp_meas_present is not None else dict() + self.imp_meas_future = ( + imp_meas_future if imp_meas_future is not None else dict() + ) + self.imp_meas_present = ( + imp_meas_present if imp_meas_present is not None else dict() + ) - def calc(self, hazard, entity, haz_future=None, ent_future=None, future_year=None, - risk_func=risk_aai_agg, imp_time_depen=None, save_imp=False, assign_centroids=True): + def calc( + self, + hazard, + entity, + haz_future=None, + ent_future=None, + future_year=None, + risk_func=risk_aai_agg, + imp_time_depen=None, + save_imp=False, + assign_centroids=True, + ): """Compute cost-benefit ratio for every measure provided current and, optionally, future conditions. Present and future measures need to have the same name. The measures costs need to be discounted by the user. @@ -222,7 +242,7 @@ def calc(self, hazard, entity, haz_future=None, ent_future=None, future_year=Non # save measure colors for meas in entity.measures.get_measure(hazard.haz_type): self.color_rgb[meas.name] = meas.color_rgb - self.color_rgb[NO_MEASURE] = colors.to_rgb('deepskyblue') + self.color_rgb[NO_MEASURE] = colors.to_rgb("deepskyblue") if future_year is None and ent_future is None: future_year = entity.exposures.ref_year @@ -237,37 +257,74 @@ def calc(self, hazard, entity, haz_future=None, ent_future=None, future_year=Non if not haz_future and not ent_future: self.future_year = future_year - self._calc_impact_measures(hazard, entity.exposures, - entity.measures, entity.impact_funcs, 'future', - risk_func, save_imp) + self._calc_impact_measures( + hazard, + entity.exposures, + entity.measures, + entity.impact_funcs, + "future", + risk_func, + save_imp, + ) else: if imp_time_depen is None: imp_time_depen = 1 - self._calc_impact_measures(hazard, entity.exposures, - entity.measures, entity.impact_funcs, 'present', - risk_func, save_imp) + self._calc_impact_measures( + hazard, + entity.exposures, + entity.measures, + entity.impact_funcs, + "present", + risk_func, + save_imp, + ) if haz_future and ent_future: self.future_year = ent_future.exposures.ref_year - self._calc_impact_measures(haz_future, ent_future.exposures, - ent_future.measures, ent_future.impact_funcs, 'future', - risk_func, save_imp) + self._calc_impact_measures( + haz_future, + ent_future.exposures, + ent_future.measures, + ent_future.impact_funcs, + "future", + risk_func, + save_imp, + ) elif haz_future: self.future_year = future_year - self._calc_impact_measures(haz_future, entity.exposures, - entity.measures, entity.impact_funcs, 'future', - risk_func, save_imp) + self._calc_impact_measures( + haz_future, + entity.exposures, + entity.measures, + entity.impact_funcs, + "future", + risk_func, + save_imp, + ) else: self.future_year = ent_future.exposures.ref_year - self._calc_impact_measures(hazard, ent_future.exposures, - ent_future.measures, ent_future.impact_funcs, 'future', - risk_func, save_imp) + self._calc_impact_measures( + hazard, + ent_future.exposures, + ent_future.measures, + ent_future.impact_funcs, + "future", + risk_func, + save_imp, + ) self._calc_cost_benefit(entity.disc_rates, imp_time_depen) self._print_results() self._print_npv() - def combine_measures(self, in_meas_names, new_name, new_color, disc_rates, - imp_time_depen=None, risk_func=risk_aai_agg): + def combine_measures( + self, + in_meas_names, + new_name, + new_color, + disc_rates, + imp_time_depen=None, + risk_func=risk_aai_agg, + ): """Compute cost-benefit of the combination of measures previously computed by calc with save_imp=True. The benefits of the measures per event are added. To combine with risk transfer options use @@ -309,24 +366,37 @@ def combine_measures(self, in_meas_names, new_name, new_color, disc_rates, new_cb.color_rgb[new_name] = new_color # compute impacts for imp_meas_future and imp_meas_present - self._combine_imp_meas(new_cb, in_meas_names, new_name, risk_func, when='future') + self._combine_imp_meas( + new_cb, in_meas_names, new_name, risk_func, when="future" + ) if self.imp_meas_present: new_cb.imp_meas_present[NO_MEASURE] = self.imp_meas_present[NO_MEASURE] if imp_time_depen is None: imp_time_depen = 1 - self._combine_imp_meas(new_cb, in_meas_names, new_name, risk_func, when='present') + self._combine_imp_meas( + new_cb, in_meas_names, new_name, risk_func, when="present" + ) # cost-benefit computation: fill measure's benefit and cost_ben_ratio time_dep = new_cb._time_dependency_array(imp_time_depen) - new_cb._cost_ben_one(new_name, new_cb.imp_meas_future[new_name], disc_rates, - time_dep) + new_cb._cost_ben_one( + new_name, new_cb.imp_meas_future[new_name], disc_rates, time_dep + ) new_cb._print_results() new_cb._print_npv() return new_cb - def apply_risk_transfer(self, meas_name, attachment, cover, disc_rates, - cost_fix=0, cost_factor=1, imp_time_depen=None, - risk_func=risk_aai_agg): + def apply_risk_transfer( + self, + meas_name, + attachment, + cover, + disc_rates, + cost_fix=0, + cost_factor=1, + imp_time_depen=None, + risk_func=risk_aai_agg, + ): """Applies risk transfer to given measure computed before with saved impact and compares it to when no measure is applied. Appended to dictionaries of measures. @@ -354,52 +424,64 @@ def apply_risk_transfer(self, meas_name, attachment, cover, disc_rates, function describing risk measure given an Impact. Default: average annual impact (aggregated). """ - m_transf_name = 'risk transfer (' + meas_name + ')' - self.color_rgb[m_transf_name] = np.maximum(np.minimum(self.color_rgb[meas_name] - - np.ones(3) * 0.2, 1), 0) + m_transf_name = "risk transfer (" + meas_name + ")" + self.color_rgb[m_transf_name] = np.maximum( + np.minimum(self.color_rgb[meas_name] - np.ones(3) * 0.2, 1), 0 + ) - _, layer_no = self.imp_meas_future[NO_MEASURE]['impact']. \ - calc_risk_transfer(attachment, cover) + _, layer_no = self.imp_meas_future[NO_MEASURE]["impact"].calc_risk_transfer( + attachment, cover + ) layer_no = risk_func(layer_no) - imp, layer = self.imp_meas_future[meas_name]['impact']. \ - calc_risk_transfer(attachment, cover) + imp, layer = self.imp_meas_future[meas_name]["impact"].calc_risk_transfer( + attachment, cover + ) self.imp_meas_future[m_transf_name] = dict() - self.imp_meas_future[m_transf_name]['risk_transf'] = risk_func(layer) - self.imp_meas_future[m_transf_name]['impact'] = imp - self.imp_meas_future[m_transf_name]['risk'] = risk_func(imp) - self.imp_meas_future[m_transf_name]['cost'] = (cost_fix, cost_factor) - self.imp_meas_future[m_transf_name]['efc'] = imp.calc_freq_curve() + self.imp_meas_future[m_transf_name]["risk_transf"] = risk_func(layer) + self.imp_meas_future[m_transf_name]["impact"] = imp + self.imp_meas_future[m_transf_name]["risk"] = risk_func(imp) + self.imp_meas_future[m_transf_name]["cost"] = (cost_fix, cost_factor) + self.imp_meas_future[m_transf_name]["efc"] = imp.calc_freq_curve() if self.imp_meas_present: if imp_time_depen is None: imp_time_depen = 1 time_dep = self._time_dependency_array(imp_time_depen) - _, pres_layer_no = self.imp_meas_present[NO_MEASURE]['impact']. \ - calc_risk_transfer(attachment, cover) + _, pres_layer_no = self.imp_meas_present[NO_MEASURE][ + "impact" + ].calc_risk_transfer(attachment, cover) pres_layer_no = risk_func(pres_layer_no) layer_no = pres_layer_no + (layer_no - pres_layer_no) * time_dep - imp, layer = self.imp_meas_present[meas_name]['impact']. \ - calc_risk_transfer(attachment, cover) + imp, layer = self.imp_meas_present[meas_name]["impact"].calc_risk_transfer( + attachment, cover + ) self.imp_meas_present[m_transf_name] = dict() - self.imp_meas_present[m_transf_name]['risk_transf'] = risk_func(layer) - self.imp_meas_present[m_transf_name]['impact'] = imp - self.imp_meas_present[m_transf_name]['risk'] = risk_func(imp) - self.imp_meas_present[m_transf_name]['cost'] = (cost_fix, cost_factor) - self.imp_meas_present[m_transf_name]['efc'] = imp.calc_freq_curve() + self.imp_meas_present[m_transf_name]["risk_transf"] = risk_func(layer) + self.imp_meas_present[m_transf_name]["impact"] = imp + self.imp_meas_present[m_transf_name]["risk"] = risk_func(imp) + self.imp_meas_present[m_transf_name]["cost"] = (cost_fix, cost_factor) + self.imp_meas_present[m_transf_name]["efc"] = imp.calc_freq_curve() else: time_dep = self._time_dependency_array(imp_time_depen) layer_no = time_dep * layer_no - self._cost_ben_one(m_transf_name, self.imp_meas_future[m_transf_name], - disc_rates, time_dep, ini_state=meas_name) + self._cost_ben_one( + m_transf_name, + self.imp_meas_future[m_transf_name], + disc_rates, + time_dep, + ini_state=meas_name, + ) # compare layer no measure - layer_no = disc_rates.net_present_value(self.present_year, - self.future_year, layer_no) - layer = ((self.cost_ben_ratio[m_transf_name] * self.benefit[m_transf_name] - cost_fix) - / cost_factor) + layer_no = disc_rates.net_present_value( + self.present_year, self.future_year, layer_no + ) + layer = ( + self.cost_ben_ratio[m_transf_name] * self.benefit[m_transf_name] - cost_fix + ) / cost_factor self._print_results() self._print_risk_transfer(layer, layer_no, cost_fix, cost_factor) self._print_npv() @@ -438,37 +520,74 @@ def plot_cost_benefit(self, cb_list=None, axis=None, **kwargs): matplotlib.axes._subplots.AxesSubplot """ if cb_list: - if 'alpha' not in kwargs: - kwargs['alpha'] = 0.5 + if "alpha" not in kwargs: + kwargs["alpha"] = 0.5 cb_uncer = [self] cb_uncer.extend(cb_list) axis = self._plot_list_cost_ben(cb_uncer, axis, **kwargs) return axis - if 'alpha' not in kwargs: - kwargs['alpha'] = 1.0 + if "alpha" not in kwargs: + kwargs["alpha"] = 1.0 axis = self._plot_list_cost_ben([self], axis, **kwargs) norm_fact, norm_name = _norm_values(self.tot_climate_risk + 0.01) - text_pos = self.imp_meas_future[NO_MEASURE]['risk'] / norm_fact - axis.scatter(text_pos, 0, c='r', zorder=200, clip_on=False) - axis.text(text_pos, 0, ' AAI', horizontalalignment='center', - verticalalignment='bottom', rotation=90, fontsize=12, color='r') + text_pos = self.imp_meas_future[NO_MEASURE]["risk"] / norm_fact + axis.scatter(text_pos, 0, c="r", zorder=200, clip_on=False) + axis.text( + text_pos, + 0, + " AAI", + horizontalalignment="center", + verticalalignment="bottom", + rotation=90, + fontsize=12, + color="r", + ) if abs(text_pos - self.tot_climate_risk / norm_fact) > 1: - axis.scatter(self.tot_climate_risk / norm_fact, 0, c='r', zorder=200, clip_on=False) - axis.text(self.tot_climate_risk / norm_fact, 0, ' Tot risk', - horizontalalignment='center', verticalalignment='bottom', rotation=90, - fontsize=12, color='r') - - axis.set_xlim(0, max(self.tot_climate_risk / norm_fact, - np.array(list(self.benefit.values())).sum() / norm_fact)) - axis.set_ylim(0, int(1 / np.nanmin(np.ma.masked_equal(np.array(list( - self.cost_ben_ratio.values())), 0))) + 1) - - x_label = ('NPV averted damage over ' + str(self.future_year - self.present_year + 1) - + ' years (' + self.unit + ' ' + norm_name + ')') + axis.scatter( + self.tot_climate_risk / norm_fact, 0, c="r", zorder=200, clip_on=False + ) + axis.text( + self.tot_climate_risk / norm_fact, + 0, + " Tot risk", + horizontalalignment="center", + verticalalignment="bottom", + rotation=90, + fontsize=12, + color="r", + ) + + axis.set_xlim( + 0, + max( + self.tot_climate_risk / norm_fact, + np.array(list(self.benefit.values())).sum() / norm_fact, + ), + ) + axis.set_ylim( + 0, + int( + 1 + / np.nanmin( + np.ma.masked_equal(np.array(list(self.cost_ben_ratio.values())), 0) + ) + ) + + 1, + ) + + x_label = ( + "NPV averted damage over " + + str(self.future_year - self.present_year + 1) + + " years (" + + self.unit + + " " + + norm_name + + ")" + ) axis.set_xlabel(x_label) - axis.set_ylabel('Benefit/Cost ratio') + axis.set_ylabel("Benefit/Cost ratio") return axis def plot_event_view(self, return_per=(10, 25, 100), axis=None, **kwargs): @@ -489,46 +608,61 @@ def plot_event_view(self, return_per=(10, 25, 100), axis=None, **kwargs): matplotlib.axes._subplots.AxesSubplot """ if not self.imp_meas_future: - raise ValueError('Compute CostBenefit.calc() first') + raise ValueError("Compute CostBenefit.calc() first") if not axis: _, axis = plt.subplots(1, 1) avert_rp = dict() for meas_name, meas_val in self.imp_meas_future.items(): if meas_name == NO_MEASURE: continue - interp_imp = np.interp(return_per, meas_val['efc'].return_per, - meas_val['efc'].impact) + interp_imp = np.interp( + return_per, meas_val["efc"].return_per, meas_val["efc"].impact + ) # check if measure over no measure or combined with another measure try: - ref_meas = meas_name[meas_name.index('(') + 1:meas_name.index(')')] + ref_meas = meas_name[meas_name.index("(") + 1 : meas_name.index(")")] except ValueError: ref_meas = NO_MEASURE - ref_imp = np.interp(return_per, - self.imp_meas_future[ref_meas]['efc'].return_per, - self.imp_meas_future[ref_meas]['efc'].impact) + ref_imp = np.interp( + return_per, + self.imp_meas_future[ref_meas]["efc"].return_per, + self.imp_meas_future[ref_meas]["efc"].impact, + ) avert_rp[meas_name] = ref_imp - interp_imp m_names = list(self.cost_ben_ratio.keys()) sort_cb = np.argsort(np.array([self.cost_ben_ratio[name] for name in m_names])) names_sort = [m_names[i] for i in sort_cb] color_sort = [self.color_rgb[name] for name in names_sort] - ref_imp = np.interp(return_per, self.imp_meas_future[NO_MEASURE]['efc'].return_per, - self.imp_meas_future[NO_MEASURE]['efc'].impact) + ref_imp = np.interp( + return_per, + self.imp_meas_future[NO_MEASURE]["efc"].return_per, + self.imp_meas_future[NO_MEASURE]["efc"].impact, + ) for rp_i, _ in enumerate(return_per): val_i = [avert_rp[name][rp_i] for name in names_sort] cum_effect = np.cumsum(np.array([0] + val_i)) - for (eff, color) in zip(cum_effect[::-1][:-1], color_sort[::-1]): + for eff, color in zip(cum_effect[::-1][:-1], color_sort[::-1]): axis.bar(rp_i + 1, eff, color=color, **kwargs) - axis.bar(rp_i + 1, ref_imp[rp_i], edgecolor='k', fc=(1, 0, 0, 0), zorder=100) - axis.set_xlabel('Return Period (%s)' % str(self.future_year)) - axis.set_ylabel('Impact (' + self.unit + ')') + axis.bar( + rp_i + 1, ref_imp[rp_i], edgecolor="k", fc=(1, 0, 0, 0), zorder=100 + ) + axis.set_xlabel("Return Period (%s)" % str(self.future_year)) + axis.set_ylabel("Impact (" + self.unit + ")") axis.set_xticks(np.arange(len(return_per)) + 1) axis.set_xticklabels([str(per) for per in return_per]) return axis @staticmethod - def plot_waterfall(hazard, entity, haz_future, ent_future, - risk_func=risk_aai_agg, axis=None, **kwargs): + def plot_waterfall( + hazard, + entity, + haz_future, + ent_future, + risk_func=risk_aai_agg, + axis=None, + **kwargs + ): """Plot waterfall graph at future with given risk metric. Can be called before and after calc(). @@ -554,16 +688,18 @@ def plot_waterfall(hazard, entity, haz_future, ent_future, matplotlib.axes._subplots.AxesSubplot """ if ent_future.exposures.ref_year == entity.exposures.ref_year: - raise ValueError('Same reference years for future and present entities.') + raise ValueError("Same reference years for future and present entities.") present_year = entity.exposures.ref_year future_year = ent_future.exposures.ref_year - imp = ImpactCalc(entity.exposures, entity.impact_funcs, hazard)\ - .impact(assign_centroids=hazard.centr_exp_col not in entity.exposures.gdf) + imp = ImpactCalc(entity.exposures, entity.impact_funcs, hazard).impact( + assign_centroids=hazard.centr_exp_col not in entity.exposures.gdf + ) curr_risk = risk_func(imp) - imp = ImpactCalc(ent_future.exposures, ent_future.impact_funcs, haz_future)\ - .impact(assign_centroids=hazard.centr_exp_col not in ent_future.exposures.gdf) + imp = ImpactCalc( + ent_future.exposures, ent_future.impact_funcs, haz_future + ).impact(assign_centroids=hazard.centr_exp_col not in ent_future.exposures.gdf) fut_risk = risk_func(imp) if not axis: @@ -571,50 +707,100 @@ def plot_waterfall(hazard, entity, haz_future, ent_future, norm_fact, norm_name = _norm_values(curr_risk) # current situation - LOGGER.info('Risk at {:d}: {:.3e}'.format(present_year, curr_risk)) + LOGGER.info("Risk at {:d}: {:.3e}".format(present_year, curr_risk)) # changing future # socio-economic dev - imp = ImpactCalc(ent_future.exposures, ent_future.impact_funcs, hazard)\ - .impact(assign_centroids=False) + imp = ImpactCalc(ent_future.exposures, ent_future.impact_funcs, hazard).impact( + assign_centroids=False + ) risk_dev = risk_func(imp) - LOGGER.info('Risk with development at {:d}: {:.3e}'.format(future_year, risk_dev)) + LOGGER.info( + "Risk with development at {:d}: {:.3e}".format(future_year, risk_dev) + ) # socioecon + cc - LOGGER.info('Risk with development and climate change at {:d}: {:.3e}'. - format(future_year, fut_risk)) + LOGGER.info( + "Risk with development and climate change at {:d}: {:.3e}".format( + future_year, fut_risk + ) + ) axis.bar(1, curr_risk / norm_fact, **kwargs) - axis.text(1, curr_risk / norm_fact, str(int(round(curr_risk / norm_fact))), - horizontalalignment='center', verticalalignment='bottom', - fontsize=12, color='k') - axis.bar(2, height=(risk_dev - curr_risk) / norm_fact, - bottom=curr_risk / norm_fact, **kwargs) - axis.text(2, curr_risk / norm_fact + (risk_dev - curr_risk) / norm_fact / 2, - str(int(round((risk_dev - curr_risk) / norm_fact))), - horizontalalignment='center', verticalalignment='center', fontsize=12, color='k') - axis.bar(3, height=(fut_risk - risk_dev) / norm_fact, - bottom=risk_dev / norm_fact, **kwargs) - axis.text(3, risk_dev / norm_fact + (fut_risk - risk_dev) / norm_fact / 2, - str(int(round((fut_risk - risk_dev) / norm_fact))), - horizontalalignment='center', verticalalignment='center', fontsize=12, - color='k') + axis.text( + 1, + curr_risk / norm_fact, + str(int(round(curr_risk / norm_fact))), + horizontalalignment="center", + verticalalignment="bottom", + fontsize=12, + color="k", + ) + axis.bar( + 2, + height=(risk_dev - curr_risk) / norm_fact, + bottom=curr_risk / norm_fact, + **kwargs + ) + axis.text( + 2, + curr_risk / norm_fact + (risk_dev - curr_risk) / norm_fact / 2, + str(int(round((risk_dev - curr_risk) / norm_fact))), + horizontalalignment="center", + verticalalignment="center", + fontsize=12, + color="k", + ) + axis.bar( + 3, + height=(fut_risk - risk_dev) / norm_fact, + bottom=risk_dev / norm_fact, + **kwargs + ) + axis.text( + 3, + risk_dev / norm_fact + (fut_risk - risk_dev) / norm_fact / 2, + str(int(round((fut_risk - risk_dev) / norm_fact))), + horizontalalignment="center", + verticalalignment="center", + fontsize=12, + color="k", + ) axis.bar(4, height=fut_risk / norm_fact, **kwargs) - axis.text(4, fut_risk / norm_fact, str(int(round(fut_risk / norm_fact))), - horizontalalignment='center', verticalalignment='bottom', - fontsize=12, color='k') + axis.text( + 4, + fut_risk / norm_fact, + str(int(round(fut_risk / norm_fact))), + horizontalalignment="center", + verticalalignment="bottom", + fontsize=12, + color="k", + ) axis.set_xticks(np.arange(4) + 1) - axis.set_xticklabels(['Risk ' + str(present_year), - 'Economic \ndevelopment', - 'Climate \nchange', - 'Risk ' + str(future_year)]) - axis.set_ylabel('Impact (' + imp.unit + ' ' + norm_name + ')') - axis.set_title('Risk at {:d} and {:d}'.format(present_year, future_year)) + axis.set_xticklabels( + [ + "Risk " + str(present_year), + "Economic \ndevelopment", + "Climate \nchange", + "Risk " + str(future_year), + ] + ) + axis.set_ylabel("Impact (" + imp.unit + " " + norm_name + ")") + axis.set_title("Risk at {:d} and {:d}".format(present_year, future_year)) return axis - def plot_arrow_averted(self, axis, in_meas_names=None, accumulate=False, combine=False, - risk_func=risk_aai_agg, disc_rates=None, imp_time_depen=1, **kwargs): + def plot_arrow_averted( + self, + axis, + in_meas_names=None, + accumulate=False, + combine=False, + risk_func=risk_aai_agg, + disc_rates=None, + imp_time_depen=1, + **kwargs + ): """Plot waterfall graph with accumulated values from present to future year. Call after calc() with save_imp=True. @@ -651,32 +837,59 @@ def plot_arrow_averted(self, axis, in_meas_names=None, accumulate=False, combine tot_benefit = np.array([self.benefit[meas] for meas in in_meas_names]).sum() norm_fact = self.tot_climate_risk / bars[3].get_height() else: - tot_benefit = np.array([risk_func(self.imp_meas_future[NO_MEASURE]['impact']) - - risk_func(self.imp_meas_future[meas]['impact']) - for meas in in_meas_names]).sum() - norm_fact = (risk_func(self.imp_meas_future['no measure']['impact']) - / bars[3].get_height()) + tot_benefit = np.array( + [ + risk_func(self.imp_meas_future[NO_MEASURE]["impact"]) + - risk_func(self.imp_meas_future[meas]["impact"]) + for meas in in_meas_names + ] + ).sum() + norm_fact = ( + risk_func(self.imp_meas_future["no measure"]["impact"]) + / bars[3].get_height() + ) if combine: try: - LOGGER.info('Combining measures %s', in_meas_names) - all_meas = self.combine_measures(in_meas_names, 'combine', - colors.to_rgba('black'), disc_rates, - imp_time_depen, risk_func) + LOGGER.info("Combining measures %s", in_meas_names) + all_meas = self.combine_measures( + in_meas_names, + "combine", + colors.to_rgba("black"), + disc_rates, + imp_time_depen, + risk_func, + ) except KeyError: - LOGGER.warning('Use calc() with save_imp=True to get a more accurate ' - 'approximation of total averted damage,') + LOGGER.warning( + "Use calc() with save_imp=True to get a more accurate " + "approximation of total averted damage," + ) if accumulate: - tot_benefit = all_meas.benefit['combine'] + tot_benefit = all_meas.benefit["combine"] else: - tot_benefit = risk_func(all_meas.imp_meas_future[NO_MEASURE]['impact']) - \ - risk_func(all_meas.imp_meas_future['combine']['impact']) - - self._plot_averted_arrow(axis, bars[3], tot_benefit, bars[3].get_height() * norm_fact, - norm_fact, **kwargs) + tot_benefit = risk_func( + all_meas.imp_meas_future[NO_MEASURE]["impact"] + ) - risk_func(all_meas.imp_meas_future["combine"]["impact"]) + + self._plot_averted_arrow( + axis, + bars[3], + tot_benefit, + bars[3].get_height() * norm_fact, + norm_fact, + **kwargs + ) - def plot_waterfall_accumulated(self, hazard, entity, ent_future, - risk_func=risk_aai_agg, imp_time_depen=1, - axis=None, **kwargs): + def plot_waterfall_accumulated( + self, + hazard, + entity, + ent_future, + risk_func=risk_aai_agg, + imp_time_depen=1, + axis=None, + **kwargs + ): """Plot waterfall graph with accumulated values from present to future year. Call after calc() with save_imp=True. Provide same inputs as in calc. @@ -702,72 +915,131 @@ def plot_waterfall_accumulated(self, hazard, entity, ent_future, matplotlib.axes._subplots.AxesSubplot """ if not self.imp_meas_future or not self.imp_meas_present: - raise ValueError('Compute CostBenefit.calc() first') + raise ValueError("Compute CostBenefit.calc() first") if ent_future.exposures.ref_year == entity.exposures.ref_year: - raise ValueError('Same reference years for future and present entities.') + raise ValueError("Same reference years for future and present entities.") self.present_year = entity.exposures.ref_year self.future_year = ent_future.exposures.ref_year # current situation - curr_risk = self.imp_meas_present[NO_MEASURE]['risk'] + curr_risk = self.imp_meas_present[NO_MEASURE]["risk"] time_dep = self._time_dependency_array() - risk_curr = self._npv_unaverted_impact(curr_risk, entity.disc_rates, - time_dep) - LOGGER.info('Current total risk at {:d}: {:.3e}'.format(self.future_year, - risk_curr)) + risk_curr = self._npv_unaverted_impact(curr_risk, entity.disc_rates, time_dep) + LOGGER.info( + "Current total risk at {:d}: {:.3e}".format(self.future_year, risk_curr) + ) # changing future time_dep = self._time_dependency_array(imp_time_depen) # socio-economic dev - imp = ImpactCalc(ent_future.exposures, ent_future.impact_funcs, hazard)\ - .impact(assign_centroids=False) - risk_dev = self._npv_unaverted_impact(risk_func(imp), entity.disc_rates, - time_dep, curr_risk) - LOGGER.info('Total risk with development at {:d}: {:.3e}'.format( - self.future_year, risk_dev)) + imp = ImpactCalc(ent_future.exposures, ent_future.impact_funcs, hazard).impact( + assign_centroids=False + ) + risk_dev = self._npv_unaverted_impact( + risk_func(imp), entity.disc_rates, time_dep, curr_risk + ) + LOGGER.info( + "Total risk with development at {:d}: {:.3e}".format( + self.future_year, risk_dev + ) + ) # socioecon + cc - risk_tot = self._npv_unaverted_impact(self.imp_meas_future[NO_MEASURE]['risk'], - entity.disc_rates, time_dep, curr_risk) - LOGGER.info('Total risk with development and climate change at {:d}: {:.3e}'. - format(self.future_year, risk_tot)) + risk_tot = self._npv_unaverted_impact( + self.imp_meas_future[NO_MEASURE]["risk"], + entity.disc_rates, + time_dep, + curr_risk, + ) + LOGGER.info( + "Total risk with development and climate change at {:d}: {:.3e}".format( + self.future_year, risk_tot + ) + ) # plot if not axis: _, axis = plt.subplots(1, 1) norm_fact, norm_name = _norm_values(curr_risk) axis.bar(1, risk_curr / norm_fact, **kwargs) - axis.text(1, risk_curr / norm_fact, str(int(round(risk_curr / norm_fact))), - horizontalalignment='center', verticalalignment='bottom', - fontsize=12, color='k') - axis.bar(2, height=(risk_dev - risk_curr) / norm_fact, - bottom=risk_curr / norm_fact, **kwargs) - axis.text(2, risk_curr / norm_fact + (risk_dev - risk_curr) / norm_fact / 2, - str(int(round((risk_dev - risk_curr) / norm_fact))), - horizontalalignment='center', verticalalignment='center', fontsize=12, color='k') - axis.bar(3, height=(risk_tot - risk_dev) / norm_fact, - bottom=risk_dev / norm_fact, **kwargs) - axis.text(3, risk_dev / norm_fact + (risk_tot - risk_dev) / norm_fact / 2, - str(int(round((risk_tot - risk_dev) / norm_fact))), - horizontalalignment='center', verticalalignment='center', fontsize=12, color='k') + axis.text( + 1, + risk_curr / norm_fact, + str(int(round(risk_curr / norm_fact))), + horizontalalignment="center", + verticalalignment="bottom", + fontsize=12, + color="k", + ) + axis.bar( + 2, + height=(risk_dev - risk_curr) / norm_fact, + bottom=risk_curr / norm_fact, + **kwargs + ) + axis.text( + 2, + risk_curr / norm_fact + (risk_dev - risk_curr) / norm_fact / 2, + str(int(round((risk_dev - risk_curr) / norm_fact))), + horizontalalignment="center", + verticalalignment="center", + fontsize=12, + color="k", + ) + axis.bar( + 3, + height=(risk_tot - risk_dev) / norm_fact, + bottom=risk_dev / norm_fact, + **kwargs + ) + axis.text( + 3, + risk_dev / norm_fact + (risk_tot - risk_dev) / norm_fact / 2, + str(int(round((risk_tot - risk_dev) / norm_fact))), + horizontalalignment="center", + verticalalignment="center", + fontsize=12, + color="k", + ) axis.bar(4, height=risk_tot / norm_fact, **kwargs) - axis.text(4, risk_tot / norm_fact, str(int(round(risk_tot / norm_fact))), - horizontalalignment='center', verticalalignment='bottom', - fontsize=12, color='k') + axis.text( + 4, + risk_tot / norm_fact, + str(int(round(risk_tot / norm_fact))), + horizontalalignment="center", + verticalalignment="bottom", + fontsize=12, + color="k", + ) axis.set_xticks(np.arange(4) + 1) - axis.set_xticklabels(['Risk ' + str(self.present_year), - 'Economic \ndevelopment', - 'Climate \nchange', - 'Risk ' + str(self.future_year)]) - axis.set_ylabel('Impact (' + self.unit + ' ' + norm_name + ')') - axis.set_title('Total accumulated impact from {:d} to {:d}'.format( - self.present_year, self.future_year)) + axis.set_xticklabels( + [ + "Risk " + str(self.present_year), + "Economic \ndevelopment", + "Climate \nchange", + "Risk " + str(self.future_year), + ] + ) + axis.set_ylabel("Impact (" + self.unit + " " + norm_name + ")") + axis.set_title( + "Total accumulated impact from {:d} to {:d}".format( + self.present_year, self.future_year + ) + ) return axis - def _calc_impact_measures(self, hazard, exposures, meas_set, imp_fun_set, - when='future', risk_func=risk_aai_agg, save_imp=False): + def _calc_impact_measures( + self, + hazard, + exposures, + meas_set, + imp_fun_set, + when="future", + risk_func=risk_aai_agg, + save_imp=False, + ): """Compute impact of each measure and transform it to input risk measurement. Set reference year from exposures value. @@ -792,31 +1064,37 @@ def _calc_impact_measures(self, hazard, exposures, meas_set, imp_fun_set, impact_meas = dict() # compute impact without measures - LOGGER.debug('%s impact with no measure.', when) - imp_tmp = ImpactCalc(exposures, imp_fun_set, hazard).impact(assign_centroids=False) + LOGGER.debug("%s impact with no measure.", when) + imp_tmp = ImpactCalc(exposures, imp_fun_set, hazard).impact( + assign_centroids=False + ) impact_meas[NO_MEASURE] = dict() - impact_meas[NO_MEASURE]['cost'] = (0, 0) - impact_meas[NO_MEASURE]['risk'] = risk_func(imp_tmp) - impact_meas[NO_MEASURE]['risk_transf'] = 0.0 - impact_meas[NO_MEASURE]['efc'] = imp_tmp.calc_freq_curve() + impact_meas[NO_MEASURE]["cost"] = (0, 0) + impact_meas[NO_MEASURE]["risk"] = risk_func(imp_tmp) + impact_meas[NO_MEASURE]["risk_transf"] = 0.0 + impact_meas[NO_MEASURE]["efc"] = imp_tmp.calc_freq_curve() if save_imp: - impact_meas[NO_MEASURE]['impact'] = imp_tmp + impact_meas[NO_MEASURE]["impact"] = imp_tmp # compute impact for each measure for measure in meas_set.get_measure(hazard.haz_type): - LOGGER.debug('%s impact of measure %s.', when, measure.name) - imp_tmp, risk_transf = measure.calc_impact(exposures, imp_fun_set, hazard, - assign_centroids=False) + LOGGER.debug("%s impact of measure %s.", when, measure.name) + imp_tmp, risk_transf = measure.calc_impact( + exposures, imp_fun_set, hazard, assign_centroids=False + ) impact_meas[measure.name] = dict() - impact_meas[measure.name]['cost'] = (measure.cost, measure.risk_transf_cost_factor) - impact_meas[measure.name]['risk'] = risk_func(imp_tmp) - impact_meas[measure.name]['risk_transf'] = risk_func(risk_transf) - impact_meas[measure.name]['efc'] = imp_tmp.calc_freq_curve() + impact_meas[measure.name]["cost"] = ( + measure.cost, + measure.risk_transf_cost_factor, + ) + impact_meas[measure.name]["risk"] = risk_func(imp_tmp) + impact_meas[measure.name]["risk_transf"] = risk_func(risk_transf) + impact_meas[measure.name]["efc"] = imp_tmp.calc_freq_curve() if save_imp: - impact_meas[measure.name]['impact'] = imp_tmp + impact_meas[measure.name]["impact"] = imp_tmp # if present reference provided save it - if when == 'future': + if when == "future": self.imp_meas_future = impact_meas else: self.imp_meas_present = impact_meas @@ -831,15 +1109,20 @@ def _calc_cost_benefit(self, disc_rates, imp_time_depen=None): imp_time_depen : float, optional parameter which represent time evolution of impact """ - LOGGER.info('Computing cost benefit from years %s to %s.', - str(self.present_year), str(self.future_year)) + LOGGER.info( + "Computing cost benefit from years %s to %s.", + str(self.present_year), + str(self.future_year), + ) if self.future_year - self.present_year + 1 <= 0: - raise ValueError('Wrong year range: %s - %s.' - % (str(self.present_year), str(self.future_year))) + raise ValueError( + "Wrong year range: %s - %s." + % (str(self.present_year), str(self.future_year)) + ) if not self.imp_meas_future: - raise ValueError('Compute first _calc_impact_measures') + raise ValueError("Compute first _calc_impact_measures") time_dep = self._time_dependency_array(imp_time_depen) @@ -849,18 +1132,22 @@ def _calc_cost_benefit(self, disc_rates, imp_time_depen=None): # npv of the full unaverted damages if self.imp_meas_present: self.tot_climate_risk = self._npv_unaverted_impact( - self.imp_meas_future[NO_MEASURE]['risk'], - disc_rates, time_dep, self.imp_meas_present[NO_MEASURE]['risk']) + self.imp_meas_future[NO_MEASURE]["risk"], + disc_rates, + time_dep, + self.imp_meas_present[NO_MEASURE]["risk"], + ) else: self.tot_climate_risk = self._npv_unaverted_impact( - self.imp_meas_future[NO_MEASURE]['risk'], - disc_rates, time_dep) + self.imp_meas_future[NO_MEASURE]["risk"], disc_rates, time_dep + ) continue self._cost_ben_one(meas_name, meas_val, disc_rates, time_dep) - def _cost_ben_one(self, meas_name, meas_val, disc_rates, time_dep, - ini_state=NO_MEASURE): + def _cost_ben_one( + self, meas_name, meas_val, disc_rates, time_dep, ini_state=NO_MEASURE + ): """Compute cost and benefit for given measure with time dependency Parameters @@ -878,28 +1165,33 @@ def _cost_ben_one(self, meas_name, meas_val, disc_rates, time_dep, name of the measure to which to compute benefit. Default: 'no measure' """ - fut_benefit = self.imp_meas_future[ini_state]['risk'] - meas_val['risk'] - fut_risk_tr = meas_val['risk_transf'] + fut_benefit = self.imp_meas_future[ini_state]["risk"] - meas_val["risk"] + fut_risk_tr = meas_val["risk_transf"] if self.imp_meas_present: - pres_benefit = self.imp_meas_present[ini_state]['risk'] - \ - self.imp_meas_present[meas_name]['risk'] + pres_benefit = ( + self.imp_meas_present[ini_state]["risk"] + - self.imp_meas_present[meas_name]["risk"] + ) meas_ben = pres_benefit + (fut_benefit - pres_benefit) * time_dep - pres_risk_tr = self.imp_meas_present[meas_name]['risk_transf'] + pres_risk_tr = self.imp_meas_present[meas_name]["risk_transf"] risk_tr = pres_risk_tr + (fut_risk_tr - pres_risk_tr) * time_dep else: meas_ben = time_dep * fut_benefit risk_tr = time_dep * fut_risk_tr # discount - meas_ben = disc_rates.net_present_value(self.present_year, - self.future_year, meas_ben) - risk_tr = disc_rates.net_present_value(self.present_year, - self.future_year, risk_tr) + meas_ben = disc_rates.net_present_value( + self.present_year, self.future_year, meas_ben + ) + risk_tr = disc_rates.net_present_value( + self.present_year, self.future_year, risk_tr + ) self.benefit[meas_name] = meas_ben - with np.errstate(divide='ignore'): - self.cost_ben_ratio[meas_name] = (meas_val['cost'][0] - + meas_val['cost'][1] * risk_tr) / meas_ben + with np.errstate(divide="ignore"): + self.cost_ben_ratio[meas_name] = ( + meas_val["cost"][0] + meas_val["cost"][1] * risk_tr + ) / meas_ben def _time_dependency_array(self, imp_time_depen=None): """Construct time dependency array. Each year contains a value in [0,1] @@ -917,14 +1209,16 @@ def _time_dependency_array(self, imp_time_depen=None): """ n_years = self.future_year - self.present_year + 1 if imp_time_depen: - time_dep = np.arange(n_years)**imp_time_depen / \ - (n_years - 1)**imp_time_depen + time_dep = ( + np.arange(n_years) ** imp_time_depen / (n_years - 1) ** imp_time_depen + ) else: time_dep = np.ones(n_years) return time_dep - def _npv_unaverted_impact(self, risk_future, disc_rates, time_dep, - risk_present=None): + def _npv_unaverted_impact( + self, risk_future, disc_rates, time_dep, risk_present=None + ): """Net present value of total unaverted damages Parameters @@ -944,16 +1238,18 @@ def _npv_unaverted_impact(self, risk_future, disc_rates, time_dep, """ if risk_present: tot_climate_risk = risk_present + (risk_future - risk_present) * time_dep - tot_climate_risk = disc_rates.net_present_value(self.present_year, - self.future_year, - tot_climate_risk) + tot_climate_risk = disc_rates.net_present_value( + self.present_year, self.future_year, tot_climate_risk + ) else: - tot_climate_risk = disc_rates.net_present_value(self.present_year, - self.future_year, - time_dep * risk_future) + tot_climate_risk = disc_rates.net_present_value( + self.present_year, self.future_year, time_dep * risk_future + ) return tot_climate_risk - def _combine_imp_meas(self, new_cb, in_meas_names, new_name, risk_func, when='future'): + def _combine_imp_meas( + self, new_cb, in_meas_names, new_name, risk_func, when="future" + ): """Compute impacts combined measures assuming they are independent, i.e. their benefit can be added. Costs are also added. For the new measure the dictionary imp_meas_future if when='future' and imp_meas_present @@ -973,59 +1269,90 @@ def _combine_imp_meas(self, new_cb, in_meas_names, new_name, risk_func, when='fu to fill (imp_meas_present or imp_meas_future respectively) default: 'future' """ - if when == 'future': + if when == "future": imp_dict = self.imp_meas_future new_imp_dict = new_cb.imp_meas_future else: imp_dict = self.imp_meas_present new_imp_dict = new_cb.imp_meas_present - sum_ben = np.sum([ - imp_dict[NO_MEASURE]['impact'].at_event - imp_dict[name]['impact'].at_event - for name in in_meas_names - ], axis=0) - new_imp = copy.deepcopy(imp_dict[in_meas_names[0]]['impact']) - new_imp.at_event = np.maximum(imp_dict[NO_MEASURE]['impact'].at_event - - sum_ben, 0) + sum_ben = np.sum( + [ + imp_dict[NO_MEASURE]["impact"].at_event + - imp_dict[name]["impact"].at_event + for name in in_meas_names + ], + axis=0, + ) + new_imp = copy.deepcopy(imp_dict[in_meas_names[0]]["impact"]) + new_imp.at_event = np.maximum( + imp_dict[NO_MEASURE]["impact"].at_event - sum_ben, 0 + ) new_imp.eai_exp = np.array([]) new_imp.aai_agg = sum(new_imp.at_event * new_imp.frequency) new_imp_dict[new_name] = dict() - new_imp_dict[new_name]['impact'] = new_imp - new_imp_dict[new_name]['efc'] = new_imp.calc_freq_curve() - new_imp_dict[new_name]['risk'] = risk_func(new_imp) - new_imp_dict[new_name]['cost'] = ( - np.array([imp_dict[name]['cost'][0] for name in in_meas_names]).sum(), - 1) - new_imp_dict[new_name]['risk_transf'] = 0 + new_imp_dict[new_name]["impact"] = new_imp + new_imp_dict[new_name]["efc"] = new_imp.calc_freq_curve() + new_imp_dict[new_name]["risk"] = risk_func(new_imp) + new_imp_dict[new_name]["cost"] = ( + np.array([imp_dict[name]["cost"][0] for name in in_meas_names]).sum(), + 1, + ) + new_imp_dict[new_name]["risk_transf"] = 0 def _print_results(self): """Print table with main results""" norm_fact, norm_name = _norm_values(np.array(list(self.benefit.values())).max()) - norm_name = '(' + self.unit + ' ' + norm_name + ')' + norm_name = "(" + self.unit + " " + norm_name + ")" table = [] - headers = ['Measure', 'Cost ' + norm_name, 'Benefit ' + norm_name, 'Benefit/Cost'] + headers = [ + "Measure", + "Cost " + norm_name, + "Benefit " + norm_name, + "Benefit/Cost", + ] for meas_name in self.benefit: - if not np.isnan(self.cost_ben_ratio[meas_name]) and \ - not np.isinf(self.cost_ben_ratio[meas_name]): - cost = self.cost_ben_ratio[meas_name] * self.benefit[meas_name] / norm_fact + if not np.isnan(self.cost_ben_ratio[meas_name]) and not np.isinf( + self.cost_ben_ratio[meas_name] + ): + cost = ( + self.cost_ben_ratio[meas_name] * self.benefit[meas_name] / norm_fact + ) else: - cost = self.imp_meas_future[meas_name]['cost'][0] / norm_fact - table.append([meas_name, cost, self.benefit[meas_name] / norm_fact, - 1 / self.cost_ben_ratio[meas_name]]) + cost = self.imp_meas_future[meas_name]["cost"][0] / norm_fact + table.append( + [ + meas_name, + cost, + self.benefit[meas_name] / norm_fact, + 1 / self.cost_ben_ratio[meas_name], + ] + ) print() print(tabulate(table, headers, tablefmt="simple")) table = [] - table.append(['Total climate risk:', - self.tot_climate_risk / norm_fact, norm_name]) - table.append(['Average annual risk:', - self.imp_meas_future[NO_MEASURE]['risk'] / norm_fact, norm_name]) - table.append(['Residual risk:', - (self.tot_climate_risk - - np.array(list(self.benefit.values())).sum()) / norm_fact, norm_name]) + table.append( + ["Total climate risk:", self.tot_climate_risk / norm_fact, norm_name] + ) + table.append( + [ + "Average annual risk:", + self.imp_meas_future[NO_MEASURE]["risk"] / norm_fact, + norm_name, + ] + ) + table.append( + [ + "Residual risk:", + (self.tot_climate_risk - np.array(list(self.benefit.values())).sum()) + / norm_fact, + norm_name, + ] + ) print() print(tabulate(table, tablefmt="simple")) @@ -1047,8 +1374,8 @@ def _plot_list_cost_ben(cb_list, axis=None, **kwargs): ------- matplotlib.axes._subplots.AxesSubplot """ - if 'alpha' not in kwargs: - kwargs['alpha'] = 0.5 + if "alpha" not in kwargs: + kwargs["alpha"] = 0.5 norm_fact = [_norm_values(cb_res.tot_climate_risk)[0] for cb_res in cb_list] norm_fact = np.array(norm_fact).mean() _, norm_name = _norm_values(norm_fact + 0.01) @@ -1056,40 +1383,67 @@ def _plot_list_cost_ben(cb_list, axis=None, **kwargs): if not axis: _, axis = plt.subplots(1, 1) m_names = list(cb_list[0].cost_ben_ratio.keys()) - sort_cb = np.argsort(np.array([cb_list[0].cost_ben_ratio[name] for name in m_names])) + sort_cb = np.argsort( + np.array([cb_list[0].cost_ben_ratio[name] for name in m_names]) + ) xy_lim = [0, 0] for i_cb, cb_res in enumerate(cb_list): xmin = 0 for meas_id in sort_cb: meas_n = m_names[meas_id] - axis.add_patch(Rectangle((xmin, 0), - cb_res.benefit[meas_n] / norm_fact, - 1 / cb_res.cost_ben_ratio[meas_n], - color=cb_res.color_rgb[meas_n], **kwargs)) + axis.add_patch( + Rectangle( + (xmin, 0), + cb_res.benefit[meas_n] / norm_fact, + 1 / cb_res.cost_ben_ratio[meas_n], + color=cb_res.color_rgb[meas_n], + **kwargs + ) + ) if i_cb == 0: - axis.text(xmin + (cb_res.benefit[meas_n] / norm_fact) / 2, - 0, ' ' + meas_n, horizontalalignment='center', - verticalalignment='bottom', rotation=90, fontsize=12) + axis.text( + xmin + (cb_res.benefit[meas_n] / norm_fact) / 2, + 0, + " " + meas_n, + horizontalalignment="center", + verticalalignment="bottom", + rotation=90, + fontsize=12, + ) xmin += cb_res.benefit[meas_n] / norm_fact - xy_lim[0] = max(xy_lim[0], - max(int(cb_res.tot_climate_risk / norm_fact), - np.array(list(cb_res.benefit.values())).sum() / norm_fact)) + xy_lim[0] = max( + xy_lim[0], + max( + int(cb_res.tot_climate_risk / norm_fact), + np.array(list(cb_res.benefit.values())).sum() / norm_fact, + ), + ) try: - with np.errstate(divide='ignore'): - xy_lim[1] = max(xy_lim[1], int(1 / cb_res.cost_ben_ratio[ - m_names[sort_cb[0]]]) + 1) + with np.errstate(divide="ignore"): + xy_lim[1] = max( + xy_lim[1], + int(1 / cb_res.cost_ben_ratio[m_names[sort_cb[0]]]) + 1, + ) except (ValueError, OverflowError): - xy_lim[1] = max(xy_lim[1], - int(1 / np.array(list(cb_res.cost_ben_ratio.values())).max()) + 1) + xy_lim[1] = max( + xy_lim[1], + int(1 / np.array(list(cb_res.cost_ben_ratio.values())).max()) + 1, + ) axis.set_xlim(0, xy_lim[0]) axis.set_ylim(0, xy_lim[1]) - axis.set_xlabel('NPV averted damage over ' + - str(cb_list[0].future_year - cb_list[0].present_year + 1) + - ' years (' + cb_list[0].unit + ' ' + norm_name + ')') - axis.set_ylabel('Benefit/Cost ratio') + axis.set_xlabel( + "NPV averted damage over " + + str(cb_list[0].future_year - cb_list[0].present_year + 1) + + " years (" + + cb_list[0].unit + + " " + + norm_name + + ")" + ) + axis.set_ylabel("Benefit/Cost ratio") return axis @staticmethod @@ -1113,20 +1467,33 @@ def _plot_averted_arrow(axis, bar_4, tot_benefit, risk_tot, norm_fact, **kwargs) arguments for bar matplotlib function, e.g. alpha=0.5 """ bar_bottom, bar_top = bar_4.get_bbox().get_points() - axis.text(bar_top[0] - (bar_top[0] - bar_bottom[0]) / 2, bar_top[1], - "Averted", ha="center", va="top", rotation=270, size=15) + axis.text( + bar_top[0] - (bar_top[0] - bar_bottom[0]) / 2, + bar_top[1], + "Averted", + ha="center", + va="top", + rotation=270, + size=15, + ) arrow_len = min(tot_benefit / norm_fact, risk_tot / norm_fact) - if 'color' not in kwargs: - kwargs['color'] = 'k' - if 'alpha' not in kwargs: - kwargs['alpha'] = 0.4 - if 'mutation_scale' not in kwargs: - kwargs['mutation_scale'] = 100 - axis.add_patch(FancyArrowPatch( - (bar_top[0] - (bar_top[0] - bar_bottom[0]) / 2, bar_top[1]), - (bar_top[0] - (bar_top[0] - bar_bottom[0]) / 2, risk_tot / norm_fact - arrow_len), - **kwargs)) + if "color" not in kwargs: + kwargs["color"] = "k" + if "alpha" not in kwargs: + kwargs["alpha"] = 0.4 + if "mutation_scale" not in kwargs: + kwargs["mutation_scale"] = 100 + axis.add_patch( + FancyArrowPatch( + (bar_top[0] - (bar_top[0] - bar_bottom[0]) / 2, bar_top[1]), + ( + bar_top[0] - (bar_top[0] - bar_bottom[0]) / 2, + risk_tot / norm_fact - arrow_len, + ), + **kwargs + ) + ) def _print_risk_transfer(self, layer, layer_no, cost_fix, cost_factor): """Print comparative of risk transfer with and without measure @@ -1139,20 +1506,32 @@ def _print_risk_transfer(self, layer, layer_no, cost_fix, cost_factor): expected insurance layer without measure """ norm_fact, norm_name = _norm_values(np.array(list(self.benefit.values())).max()) - norm_name = '(' + self.unit + ' ' + norm_name + ')' - headers = ['Risk transfer', 'Expected damage in \n insurance layer ' + - norm_name, 'Price ' + norm_name] - table = [['without measure', layer_no / norm_fact, - (cost_fix + layer_no * cost_factor) / norm_fact], - ['with measure', layer / norm_fact, - (cost_fix + layer * cost_factor) / norm_fact]] + norm_name = "(" + self.unit + " " + norm_name + ")" + headers = [ + "Risk transfer", + "Expected damage in \n insurance layer " + norm_name, + "Price " + norm_name, + ] + table = [ + [ + "without measure", + layer_no / norm_fact, + (cost_fix + layer_no * cost_factor) / norm_fact, + ], + [ + "with measure", + layer / norm_fact, + (cost_fix + layer * cost_factor) / norm_fact, + ], + ] print() print(tabulate(table, headers, tablefmt="simple")) print() @staticmethod def _print_npv(): - print('Net Present Values') + print("Net Present Values") + def _norm_values(value): """Compute normalization value and name @@ -1166,15 +1545,15 @@ def _norm_values(value): norm_fact: float norm_name: float """ - norm_fact = 1. - norm_name = '' + norm_fact = 1.0 + norm_name = "" if value / 1.0e9 > 1: norm_fact = 1.0e9 - norm_name = 'bn' + norm_name = "bn" elif value / 1.0e6 > 1: norm_fact = 1.0e6 - norm_name = 'm' + norm_name = "m" elif value / 1.0e3 > 1: norm_fact = 1.0e3 - norm_name = 'k' + norm_name = "k" return norm_fact, norm_name diff --git a/climada/engine/forecast.py b/climada/engine/forecast.py index 1be74cb4c..f123a67ed 100644 --- a/climada/engine/forecast.py +++ b/climada/engine/forecast.py @@ -22,29 +22,30 @@ __all__ = ["Forecast"] -import logging import datetime as dt +import logging from typing import Dict, Optional -import numpy as np -import matplotlib.pyplot as plt -from matplotlib.patches import Patch -from matplotlib.ticker import PercentFormatter, ScalarFormatter -from matplotlib.colors import ListedColormap, BoundaryNorm + import cartopy.crs as ccrs -from matplotlib import colormaps as cm +import matplotlib.pyplot as plt +import numpy as np import pyproj import shapely from cartopy.io import shapereader +from matplotlib import colormaps as cm +from matplotlib.colors import BoundaryNorm, ListedColormap +from matplotlib.patches import Patch +from matplotlib.ticker import PercentFormatter, ScalarFormatter from mpl_toolkits.axes_grid1 import make_axes_locatable -from climada.hazard import Hazard +import climada.util.coordinates as u_coord +import climada.util.plot as u_plot +from climada.engine import ImpactCalc from climada.entity import Exposures from climada.entity.impact_funcs import ImpactFuncSet -from climada.engine import ImpactCalc -import climada.util.plot as u_plot +from climada.hazard import Hazard from climada.util.config import CONFIG from climada.util.files_handler import to_list -import climada.util.coordinates as u_coord from climada.util.value_representation import ( value_to_monetary_unit as u_value_to_monetary_unit, ) @@ -140,7 +141,7 @@ def __init__( exposure: Exposures, impact_funcs: ImpactFuncSet, haz_model: str = "NWP", - exposure_name: Optional[str] = None + exposure_name: Optional[str] = None, ): """Initialization with hazard, exposure and vulnerability. @@ -308,8 +309,9 @@ def calc(self, force_reassign=False): if self.hazard: self.exposure.assign_centroids(self.hazard[0], overwrite=force_reassign) for ind_i, haz_i in enumerate(self.hazard): - self._impact[ind_i] = ImpactCalc(self.exposure, self.vulnerability, haz_i)\ - .impact(save_mat=True, assign_centroids=False) + self._impact[ind_i] = ImpactCalc( + self.exposure, self.vulnerability, haz_i + ).impact(save_mat=True, assign_centroids=False) def plot_imp_map( self, @@ -323,7 +325,7 @@ def plot_imp_map( figsize=(9, 13), adapt_fontsize=True, ): - """ plot a map of the impacts + """plot a map of the impacts Parameters ---------- @@ -378,7 +380,11 @@ def plot_imp_map( "run_start": ( run_datetime.strftime("%d.%m.%Y %HUTC +") + lead_time_str + "d" ), - "explain_text": "mean building damage caused by wind" if explain_str is None else explain_str, + "explain_text": ( + "mean building damage caused by wind" + if explain_str is None + else explain_str + ), "model_text": "CLIMADA IMPACT", } fig, axes = self._plot_imp_map( @@ -539,7 +545,7 @@ def plot_hist( close_fig=False, figsize=(9, 8), ): - """ plot histogram of the forecasted impacts all ensemble members + """plot histogram of the forecasted impacts all ensemble members Parameters ---------- @@ -618,7 +624,7 @@ def plot_hist( axes.xaxis.set_ticks(x_ticks) axes.xaxis.set_ticklabels(x_ticklabels) plt.xticks(rotation=15, horizontalalignment="right") - plt.xlim([(10 ** -0.25) * bins[0], (10 ** 0.25) * bins[-1]]) + plt.xlim([(10**-0.25) * bins[0], (10**0.25) * bins[-1]]) lead_time_str = "{:.0f}".format( self.lead_time(run_datetime).days @@ -629,7 +635,9 @@ def plot_hist( "run_start": ( run_datetime.strftime("%d.%m.%Y %HUTC +") + lead_time_str + "d" ), - "explain_text": ("total building damage") if explain_str is None else explain_str, + "explain_text": ( + ("total building damage") if explain_str is None else explain_str + ), "model_text": "CLIMADA IMPACT", } title_position = { @@ -673,7 +681,8 @@ def plot_hist( 0.85, "mean impact:\n " + self._number_to_str(self._impact[haz_ind].at_event.mean()) - + ' ' + self._impact[haz_ind].unit, + + " " + + self._impact[haz_ind].unit, horizontalalignment="center", verticalalignment="center", transform=axes.transAxes, @@ -780,10 +789,10 @@ def plot_exceedence_prob( run_datetime.strftime("%d.%m.%Y %HUTC +") + lead_time_str + "d" ), "explain_text": ( - "threshold: " + str(threshold) + " " + self._impact[haz_ind].unit - ) - if explain_str is None - else explain_str, + ("threshold: " + str(threshold) + " " + self._impact[haz_ind].unit) + if explain_str is None + else explain_str + ), "model_text": "Exceedance probability map", } cbar_label = "probabilty of reaching threshold" @@ -1102,7 +1111,9 @@ def _plot_warn( decision_dict_functions[aggregation] = np.mean else: raise ValueError( - "Parameter " + aggregation + " of " + "Parameter " + + aggregation + + " of " + "Forecast.plot_warn_map() must eiter be " + "a float between [0..1], which " + "specifys a quantile. or 'sum' or 'mean'." diff --git a/climada/engine/impact.py b/climada/engine/impact.py index f357538e5..58292ab9c 100644 --- a/climada/engine/impact.py +++ b/climada/engine/impact.py @@ -19,42 +19,43 @@ Define Impact and ImpactFreqCurve classes. """ -__all__ = ['ImpactFreqCurve', 'Impact'] +__all__ = ["ImpactFreqCurve", "Impact"] -from dataclasses import dataclass, field -import logging import copy import csv -import warnings import datetime as dt -from itertools import zip_longest -from typing import Any, Iterable, Union +import logging +import warnings from collections.abc import Collection +from dataclasses import dataclass, field +from itertools import zip_longest from pathlib import Path +from typing import Any, Iterable, Union import contextily as ctx -import numpy as np -from scipy import sparse -import matplotlib.pyplot as plt +import h5py import matplotlib.animation as animation +import matplotlib.pyplot as plt +import numpy as np import pandas as pd import xlsxwriter -from tqdm import tqdm -import h5py from pyproj import CRS as pyprojCRS from rasterio.crs import CRS as rasterioCRS # pylint: disable=no-name-in-module +from scipy import sparse +from tqdm import tqdm -from climada.entity import Exposures -from climada import CONFIG -from climada.util.constants import DEF_CRS, CMAP_IMPACT, DEF_FREQ_UNIT import climada.util.coordinates as u_coord import climada.util.dates_times as u_dt import climada.util.plot as u_plot +from climada import CONFIG +from climada.entity import Exposures +from climada.util.constants import CMAP_IMPACT, DEF_CRS, DEF_FREQ_UNIT from climada.util.select import get_attributes_with_matching_dimension LOGGER = logging.getLogger(__name__) -class Impact(): + +class Impact: """Impact definition. Compute from an entity (exposures and impact functions) and hazard. @@ -91,21 +92,23 @@ class Impact(): the hazard type of the hazard """ - def __init__(self, - event_id=None, - event_name=None, - date=None, - frequency=None, - frequency_unit=DEF_FREQ_UNIT, - coord_exp=None, - crs=DEF_CRS, - eai_exp=None, - at_event=None, - tot_value=0, - aai_agg=0, - unit='', - imp_mat=None, - haz_type=''): + def __init__( + self, + event_id=None, + event_name=None, + date=None, + frequency=None, + frequency_unit=DEF_FREQ_UNIT, + coord_exp=None, + crs=DEF_CRS, + eai_exp=None, + at_event=None, + tot_value=0, + aai_agg=0, + unit="", + imp_mat=None, + haz_type="", + ): """ Init Impact object @@ -152,7 +155,7 @@ def __init__(self, self.crs = crs.to_wkt() if isinstance(crs, (pyprojCRS, rasterioCRS)) else crs self.eai_exp = np.array([], float) if eai_exp is None else eai_exp self.at_event = np.array([], float) if at_event is None else at_event - self.frequency = np.array([],float) if frequency is None else frequency + self.frequency = np.array([], float) if frequency is None else frequency self.frequency_unit = frequency_unit self._tot_value = tot_value self.aai_agg = aai_agg @@ -160,52 +163,65 @@ def __init__(self, if len(self.event_id) != len(self.event_name): raise AttributeError( - f'Hazard event ids {len(self.event_id)} and event names' - f' {len(self.event_name)} are not of the same length') + f"Hazard event ids {len(self.event_id)} and event names" + f" {len(self.event_name)} are not of the same length" + ) if len(self.event_id) != len(self.date): raise AttributeError( - f'Hazard event ids {len(self.event_id)} and event dates' - f' {len(self.date)} are not of the same length') + f"Hazard event ids {len(self.event_id)} and event dates" + f" {len(self.date)} are not of the same length" + ) if len(self.event_id) != len(self.frequency): raise AttributeError( - f'Hazard event ids {len(self.event_id)} and event frequency' - f' {len(self.frequency)} are not of the same length') + f"Hazard event ids {len(self.event_id)} and event frequency" + f" {len(self.frequency)} are not of the same length" + ) if len(self.event_id) != len(self.at_event): raise AttributeError( - f'Number of hazard event ids {len(self.event_id)} is different ' - f'from number of at_event values {len(self.at_event)}') + f"Number of hazard event ids {len(self.event_id)} is different " + f"from number of at_event values {len(self.at_event)}" + ) if len(self.coord_exp) != len(self.eai_exp): - raise AttributeError('Number of exposures points is different from' - 'number of eai_exp values') + raise AttributeError( + "Number of exposures points is different from" + "number of eai_exp values" + ) if imp_mat is not None: self.imp_mat = imp_mat if self.imp_mat.size > 0: if len(self.event_id) != self.imp_mat.shape[0]: raise AttributeError( - f'The number of rows {imp_mat.shape[0]} of the impact ' + - f'matrix is inconsistent with the number {len(event_id)} ' - 'of hazard events.') + f"The number of rows {imp_mat.shape[0]} of the impact " + + f"matrix is inconsistent with the number {len(event_id)} " + "of hazard events." + ) if len(self.coord_exp) != self.imp_mat.shape[1]: raise AttributeError( - f'The number of columns {imp_mat.shape[1]} of the impact' + - f' matrix is inconsistent with the number {len(coord_exp)}' - ' exposures points.') + f"The number of columns {imp_mat.shape[1]} of the impact" + + f" matrix is inconsistent with the number {len(coord_exp)}" + " exposures points." + ) else: self.imp_mat = sparse.csr_matrix(np.empty((0, 0))) - def calc(self, exposures, impact_funcs, hazard, save_mat=False, assign_centroids=True): - """This function is deprecated, use ``ImpactCalc.impact`` instead. - """ - LOGGER.warning("The use of Impact().calc() is deprecated." - " Use ImpactCalc().impact() instead.") - from climada.engine.impact_calc import ImpactCalc # pylint: disable=import-outside-toplevel + def calc( + self, exposures, impact_funcs, hazard, save_mat=False, assign_centroids=True + ): + """This function is deprecated, use ``ImpactCalc.impact`` instead.""" + LOGGER.warning( + "The use of Impact().calc() is deprecated." + " Use ImpactCalc().impact() instead." + ) + from climada.engine.impact_calc import ( + ImpactCalc, # pylint: disable=import-outside-toplevel + ) + impcalc = ImpactCalc(exposures, impact_funcs, hazard) self.__dict__ = impcalc.impact( - save_mat=save_mat, - assign_centroids=assign_centroids + save_mat=save_mat, assign_centroids=assign_centroids ).__dict__ -#TODO: new name + # TODO: new name @classmethod def from_eih(cls, exposures, hazard, at_event, eai_exp, aai_agg, imp_mat=None): """ @@ -238,23 +254,24 @@ def from_eih(cls, exposures, hazard, at_event, eai_exp, aai_agg, imp_mat=None): impact with all risk metrics set based on the given impact matrix """ return cls( - event_id = hazard.event_id, - event_name = hazard.event_name, - date = hazard.date, - frequency = hazard.frequency, - frequency_unit = hazard.frequency_unit, - coord_exp = np.stack([exposures.gdf['latitude'].values, - exposures.gdf['longitude'].values], - axis=1), - crs = exposures.crs, - unit = exposures.value_unit, - tot_value = exposures.centroids_total_value(hazard), - eai_exp = eai_exp, - at_event = at_event, - aai_agg = aai_agg, - imp_mat = imp_mat if imp_mat is not None else sparse.csr_matrix((0, 0)), - haz_type = hazard.haz_type, - ) + event_id=hazard.event_id, + event_name=hazard.event_name, + date=hazard.date, + frequency=hazard.frequency, + frequency_unit=hazard.frequency_unit, + coord_exp=np.stack( + [exposures.gdf["latitude"].values, exposures.gdf["longitude"].values], + axis=1, + ), + crs=exposures.crs, + unit=exposures.value_unit, + tot_value=exposures.centroids_total_value(hazard), + eai_exp=eai_exp, + at_event=at_event, + aai_agg=aai_agg, + imp_mat=imp_mat if imp_mat is not None else sparse.csr_matrix((0, 0)), + haz_type=hazard.haz_type, + ) @property def tot_value(self): @@ -264,19 +281,23 @@ def tot_value(self): Use :py:meth:`climada.entity.exposures.base.Exposures.affected_total_value` instead. """ - LOGGER.warning("The Impact.tot_value attribute is deprecated." - "Use Exposures.affected_total_value to calculate the affected " - "total exposure value based on a specific hazard intensity " - "threshold") + LOGGER.warning( + "The Impact.tot_value attribute is deprecated." + "Use Exposures.affected_total_value to calculate the affected " + "total exposure value based on a specific hazard intensity " + "threshold" + ) return self._tot_value @tot_value.setter def tot_value(self, value): """Set the total exposure value close to a hazard""" - LOGGER.warning("The Impact.tot_value attribute is deprecated." - "Use Exposures.affected_total_value to calculate the affected " - "total exposure value based on a specific hazard intensity " - "threshold") + LOGGER.warning( + "The Impact.tot_value attribute is deprecated." + "Use Exposures.affected_total_value to calculate the affected " + "total exposure value based on a specific hazard intensity " + "threshold" + ) self._tot_value = value def transfer_risk(self, attachment, cover): @@ -336,7 +357,7 @@ def residual_risk(self, attachment, cover): residual_aai_agg = np.sum(residual_at_event * self.frequency) return residual_at_event, residual_aai_agg -#TODO: rewrite and deprecate method + # TODO: rewrite and deprecate method def calc_risk_transfer(self, attachment, cover): """Compute traaditional risk transfer over impact. Returns new impact with risk transfer applied and the insurance layer resulting @@ -392,8 +413,7 @@ def impact_per_year(self, all_years=True, year_range=None): if year_range is None: year_range = [] - orig_year = np.array([dt.datetime.fromordinal(date).year - for date in self.date]) + orig_year = np.array([dt.datetime.fromordinal(date).year for date in self.date]) if orig_year.size == 0 and len(year_range) == 0: return dict() if orig_year.size == 0 or (len(year_range) > 0 and all_years): @@ -461,13 +481,15 @@ def impact_at_reg(self, agg_regions=None): return at_reg_event - def calc_impact_year_set(self,all_years=True, year_range=None): + def calc_impact_year_set(self, all_years=True, year_range=None): """This function is deprecated, use Impact.impact_per_year instead.""" - LOGGER.warning("The use of Impact.calc_impact_year_set is deprecated." - "Use Impact.impact_per_year instead.") + LOGGER.warning( + "The use of Impact.calc_impact_year_set is deprecated." + "Use Impact.impact_per_year instead." + ) return self.impact_per_year(all_years=all_years, year_range=year_range) -#TODO: rewrite and deprecate method + # TODO: rewrite and deprecate method def local_exceedance_imp(self, return_periods=(25, 50, 100, 250)): """Compute exceedance impact map for given return periods. Requires attribute imp_mat. @@ -482,26 +504,35 @@ def local_exceedance_imp(self, return_periods=(25, 50, 100, 250)): ------- np.array """ - LOGGER.info('Computing exceedance impact map for return periods: %s', - return_periods) + LOGGER.info( + "Computing exceedance impact map for return periods: %s", return_periods + ) if self.imp_mat.size == 0: - raise ValueError('Attribute imp_mat is empty. Recalculate Impact' - 'instance with parameter save_mat=True') + raise ValueError( + "Attribute imp_mat is empty. Recalculate Impact" + "instance with parameter save_mat=True" + ) num_cen = self.imp_mat.shape[1] imp_stats = np.zeros((len(return_periods), num_cen)) cen_step = CONFIG.max_matrix_size.int() // self.imp_mat.shape[0] if not cen_step: - raise ValueError('Increase max_matrix_size configuration parameter to > ' - f'{self.imp_mat.shape[0]}') + raise ValueError( + "Increase max_matrix_size configuration parameter to > " + f"{self.imp_mat.shape[0]}" + ) # separte in chunks chk = -1 for chk in range(int(num_cen / cen_step)): - self._loc_return_imp(np.array(return_periods), - self.imp_mat[:, chk * cen_step:(chk + 1) * cen_step].toarray(), - imp_stats[:, chk * cen_step:(chk + 1) * cen_step]) - self._loc_return_imp(np.array(return_periods), - self.imp_mat[:, (chk + 1) * cen_step:].toarray(), - imp_stats[:, (chk + 1) * cen_step:]) + self._loc_return_imp( + np.array(return_periods), + self.imp_mat[:, chk * cen_step : (chk + 1) * cen_step].toarray(), + imp_stats[:, chk * cen_step : (chk + 1) * cen_step], + ) + self._loc_return_imp( + np.array(return_periods), + self.imp_mat[:, (chk + 1) * cen_step :].toarray(), + imp_stats[:, (chk + 1) * cen_step :], + ) return imp_stats @@ -536,21 +567,29 @@ def calc_freq_curve(self, return_per=None): impact=ifc_impact, unit=self.unit, frequency_unit=self.frequency_unit, - label='Exceedance frequency curve' + label="Exceedance frequency curve", ) def _eai_title(self): - if self.frequency_unit in ['1/year', 'annual', '1/y', '1/a']: - return 'Expected annual impact' - if self.frequency_unit in ['1/day', 'daily', '1/d']: - return 'Expected daily impact' - if self.frequency_unit in ['1/month', 'monthly', '1/m']: - return 'Expected monthly impact' - return f'Expected impact ({self.frequency_unit})' - - def plot_scatter_eai_exposure(self, mask=None, ignore_zero=False, - pop_name=True, buffer=0.0, extend='neither', - axis=None, adapt_fontsize=True, **kwargs): + if self.frequency_unit in ["1/year", "annual", "1/y", "1/a"]: + return "Expected annual impact" + if self.frequency_unit in ["1/day", "daily", "1/d"]: + return "Expected daily impact" + if self.frequency_unit in ["1/month", "monthly", "1/m"]: + return "Expected monthly impact" + return f"Expected impact ({self.frequency_unit})" + + def plot_scatter_eai_exposure( + self, + mask=None, + ignore_zero=False, + pop_name=True, + buffer=0.0, + extend="neither", + axis=None, + adapt_fontsize=True, + **kwargs, + ): """Plot scatter expected impact within a period of 1/frequency_unit of each exposure. Parameters @@ -580,18 +619,34 @@ def plot_scatter_eai_exposure(self, mask=None, ignore_zero=False, ------- cartopy.mpl.geoaxes.GeoAxesSubplot """ - if 'cmap' not in kwargs: - kwargs['cmap'] = CMAP_IMPACT + if "cmap" not in kwargs: + kwargs["cmap"] = CMAP_IMPACT eai_exp = self._build_exp() - axis = eai_exp.plot_scatter(mask, ignore_zero, pop_name, buffer, - extend, axis=axis, adapt_fontsize=adapt_fontsize, **kwargs) + axis = eai_exp.plot_scatter( + mask, + ignore_zero, + pop_name, + buffer, + extend, + axis=axis, + adapt_fontsize=adapt_fontsize, + **kwargs, + ) axis.set_title(self._eai_title()) return axis - def plot_hexbin_eai_exposure(self, mask=None, ignore_zero=False, - pop_name=True, buffer=0.0, extend='neither', - axis=None, adapt_fontsize=True, **kwargs): + def plot_hexbin_eai_exposure( + self, + mask=None, + ignore_zero=False, + pop_name=True, + buffer=0.0, + extend="neither", + axis=None, + adapt_fontsize=True, + **kwargs, + ): """Plot hexbin expected impact within a period of 1/frequency_unit of each exposure. Parameters @@ -621,19 +676,34 @@ def plot_hexbin_eai_exposure(self, mask=None, ignore_zero=False, ------- cartopy.mpl.geoaxes.GeoAxesSubplot """ - if 'cmap' not in kwargs: - kwargs['cmap'] = CMAP_IMPACT + if "cmap" not in kwargs: + kwargs["cmap"] = CMAP_IMPACT eai_exp = self._build_exp() - axis = eai_exp.plot_hexbin(mask, ignore_zero, pop_name, buffer, - extend, axis=axis, adapt_fontsize=adapt_fontsize, **kwargs) + axis = eai_exp.plot_hexbin( + mask, + ignore_zero, + pop_name, + buffer, + extend, + axis=axis, + adapt_fontsize=adapt_fontsize, + **kwargs, + ) axis.set_title(self._eai_title()) return axis - def plot_raster_eai_exposure(self, res=None, raster_res=None, save_tiff=None, - raster_f=lambda x: np.log10((np.fmax(x + 1, 1))), - label='value (log10)', axis=None, adapt_fontsize=True, - **kwargs): + def plot_raster_eai_exposure( + self, + res=None, + raster_res=None, + save_tiff=None, + raster_f=lambda x: np.log10((np.fmax(x + 1, 1))), + label="value (log10)", + axis=None, + adapt_fontsize=True, + **kwargs, + ): """Plot raster expected impact within a period of 1/frequency_unit of each exposure. Parameters @@ -666,15 +736,31 @@ def plot_raster_eai_exposure(self, res=None, raster_res=None, save_tiff=None, # we need to set geometry points because the `plot_raster` method accesses the # exposures' `gdf.crs` property, which raises an error when geometry is not set eai_exp.set_geometry_points() - axis = eai_exp.plot_raster(res, raster_res, save_tiff, raster_f, - label, axis=axis, adapt_fontsize=adapt_fontsize, **kwargs) + axis = eai_exp.plot_raster( + res, + raster_res, + save_tiff, + raster_f, + label, + axis=axis, + adapt_fontsize=adapt_fontsize, + **kwargs, + ) axis.set_title(self._eai_title()) return axis - def plot_basemap_eai_exposure(self, mask=None, ignore_zero=False, pop_name=True, - buffer=0.0, extend='neither', zoom=10, - url=ctx.providers.CartoDB.Positron, - axis=None, **kwargs): + def plot_basemap_eai_exposure( + self, + mask=None, + ignore_zero=False, + pop_name=True, + buffer=0.0, + extend="neither", + zoom=10, + url=ctx.providers.CartoDB.Positron, + axis=None, + **kwargs, + ): """Plot basemap expected impact of each exposure within a period of 1/frequency_unit. Parameters @@ -705,17 +791,27 @@ def plot_basemap_eai_exposure(self, mask=None, ignore_zero=False, pop_name=True, ------- cartopy.mpl.geoaxes.GeoAxesSubplot """ - if 'cmap' not in kwargs: - kwargs['cmap'] = CMAP_IMPACT + if "cmap" not in kwargs: + kwargs["cmap"] = CMAP_IMPACT eai_exp = self._build_exp() - axis = eai_exp.plot_basemap(mask, ignore_zero, pop_name, buffer, - extend, zoom, url, axis=axis, **kwargs) + axis = eai_exp.plot_basemap( + mask, ignore_zero, pop_name, buffer, extend, zoom, url, axis=axis, **kwargs + ) axis.set_title(self._eai_title()) return axis - def plot_hexbin_impact_exposure(self, event_id=1, mask=None, ignore_zero=False, - pop_name=True, buffer=0.0, extend='neither', - axis=None, adapt_fontsize=True, **kwargs): + def plot_hexbin_impact_exposure( + self, + event_id=1, + mask=None, + ignore_zero=False, + pop_name=True, + buffer=0.0, + extend="neither", + axis=None, + adapt_fontsize=True, + **kwargs, + ): """Plot hexbin impact of an event at each exposure. Requires attribute imp_mat. @@ -750,22 +846,39 @@ def plot_hexbin_impact_exposure(self, event_id=1, mask=None, ignore_zero=False, cartopy.mpl.geoaxes.GeoAxesSubplot """ if self.imp_mat.size == 0: - raise ValueError('Attribute imp_mat is empty. Recalculate Impact' - 'instance with parameter save_mat=True') - if 'cmap' not in kwargs: - kwargs['cmap'] = CMAP_IMPACT + raise ValueError( + "Attribute imp_mat is empty. Recalculate Impact" + "instance with parameter save_mat=True" + ) + if "cmap" not in kwargs: + kwargs["cmap"] = CMAP_IMPACT impact_at_events_exp = self._build_exp_event(event_id) - axis = impact_at_events_exp.plot_hexbin(mask, ignore_zero, pop_name, - buffer, extend, axis=axis, - adapt_fontsize=adapt_fontsize, - **kwargs) + axis = impact_at_events_exp.plot_hexbin( + mask, + ignore_zero, + pop_name, + buffer, + extend, + axis=axis, + adapt_fontsize=adapt_fontsize, + **kwargs, + ) return axis - def plot_basemap_impact_exposure(self, event_id=1, mask=None, ignore_zero=False, - pop_name=True, buffer=0.0, extend='neither', zoom=10, - url=ctx.providers.CartoDB.Positron, - axis=None, **kwargs): + def plot_basemap_impact_exposure( + self, + event_id=1, + mask=None, + ignore_zero=False, + pop_name=True, + buffer=0.0, + extend="neither", + zoom=10, + url=ctx.providers.CartoDB.Positron, + axis=None, + **kwargs, + ): """Plot basemap impact of an event at each exposure. Requires attribute imp_mat. @@ -801,21 +914,30 @@ def plot_basemap_impact_exposure(self, event_id=1, mask=None, ignore_zero=False, cartopy.mpl.geoaxes.GeoAxesSubplot """ if self.imp_mat.size == 0: - raise ValueError('Attribute imp_mat is empty. Recalculate Impact' - 'instance with parameter save_mat=True') + raise ValueError( + "Attribute imp_mat is empty. Recalculate Impact" + "instance with parameter save_mat=True" + ) if event_id not in self.event_id: - raise ValueError(f'Event ID {event_id} not found') - if 'cmap' not in kwargs: - kwargs['cmap'] = CMAP_IMPACT + raise ValueError(f"Event ID {event_id} not found") + if "cmap" not in kwargs: + kwargs["cmap"] = CMAP_IMPACT impact_at_events_exp = self._build_exp_event(event_id) - axis = impact_at_events_exp.plot_basemap(mask, ignore_zero, pop_name, - buffer, extend, zoom, url, axis=axis, **kwargs) + axis = impact_at_events_exp.plot_basemap( + mask, ignore_zero, pop_name, buffer, extend, zoom, url, axis=axis, **kwargs + ) return axis - def plot_rp_imp(self, return_periods=(25, 50, 100, 250), - log10_scale=True, smooth=True, axis=None, **kwargs): + def plot_rp_imp( + self, + return_periods=(25, 50, 100, 250), + log10_scale=True, + smooth=True, + axis=None, + **kwargs, + ): """Compute and plot exceedance impact maps for different return periods. Calls local_exceedance_imp. @@ -839,26 +961,35 @@ def plot_rp_imp(self, return_periods=(25, 50, 100, 250), """ imp_stats = self.local_exceedance_imp(np.array(return_periods)) if imp_stats.size == 0: - raise ValueError('Error: Attribute imp_mat is empty. Recalculate Impact' - 'instance with parameter save_mat=True') + raise ValueError( + "Error: Attribute imp_mat is empty. Recalculate Impact" + "instance with parameter save_mat=True" + ) if log10_scale: if np.min(imp_stats) < 0: imp_stats_log = np.log10(abs(imp_stats) + 1) - colbar_name = 'Log10(abs(Impact)+1) (' + self.unit + ')' + colbar_name = "Log10(abs(Impact)+1) (" + self.unit + ")" elif np.min(imp_stats) < 1: imp_stats_log = np.log10(imp_stats + 1) - colbar_name = 'Log10(Impact+1) (' + self.unit + ')' + colbar_name = "Log10(Impact+1) (" + self.unit + ")" else: imp_stats_log = np.log10(imp_stats) - colbar_name = 'Log10(Impact) (' + self.unit + ')' + colbar_name = "Log10(Impact) (" + self.unit + ")" else: imp_stats_log = imp_stats - colbar_name = 'Impact (' + self.unit + ')' + colbar_name = "Impact (" + self.unit + ")" title = list() for ret in return_periods: - title.append('Return period: ' + str(ret) + ' years') - axis = u_plot.geo_im_from_array(imp_stats_log, self.coord_exp, - colbar_name, title, smooth=smooth, axes=axis, **kwargs) + title.append("Return period: " + str(ret) + " years") + axis = u_plot.geo_im_from_array( + imp_stats_log, + self.coord_exp, + colbar_name, + title, + smooth=smooth, + axes=axis, + **kwargs, + ) return axis, imp_stats @@ -870,18 +1001,43 @@ def write_csv(self, file_name): file_name : str absolute path of the file """ - LOGGER.info('Writing %s', file_name) - with open(file_name, "w", encoding='utf-8') as imp_file: + LOGGER.info("Writing %s", file_name) + with open(file_name, "w", encoding="utf-8") as imp_file: imp_wr = csv.writer(imp_file) - imp_wr.writerow(["haz_type", "unit", "tot_value", "aai_agg", "event_id", - "event_name", "event_date", "event_frequency", "frequency_unit", - "at_event", "eai_exp", "exp_lat", "exp_lon", "exp_crs"]) - csv_data = [[self.haz_type], - [self.unit], [self._tot_value], [self.aai_agg], - self.event_id, self.event_name, self.date, - self.frequency, [self.frequency_unit], self.at_event, - self.eai_exp, self.coord_exp[:, 0], self.coord_exp[:, 1], - [str(self.crs)]] + imp_wr.writerow( + [ + "haz_type", + "unit", + "tot_value", + "aai_agg", + "event_id", + "event_name", + "event_date", + "event_frequency", + "frequency_unit", + "at_event", + "eai_exp", + "exp_lat", + "exp_lon", + "exp_crs", + ] + ) + csv_data = [ + [self.haz_type], + [self.unit], + [self._tot_value], + [self.aai_agg], + self.event_id, + self.event_name, + self.date, + self.frequency, + [self.frequency_unit], + self.at_event, + self.eai_exp, + self.coord_exp[:, 0], + self.coord_exp[:, 1], + [str(self.crs)], + ] for values in zip_longest(*csv_data): imp_wr.writerow(values) @@ -893,7 +1049,8 @@ def write_excel(self, file_name): file_name : str absolute path of the file """ - LOGGER.info('Writing %s', file_name) + LOGGER.info("Writing %s", file_name) + def write_col(i_col, imp_ws, xls_data): """Write one measure""" row_ini = 1 @@ -904,9 +1061,22 @@ def write_col(i_col, imp_ws, xls_data): imp_wb = xlsxwriter.Workbook(file_name) imp_ws = imp_wb.add_worksheet() - header = ["haz_type", "unit", "tot_value", "aai_agg", "event_id", - "event_name", "event_date", "event_frequency", "frequency_unit", - "at_event", "eai_exp", "exp_lat", "exp_lon", "exp_crs"] + header = [ + "haz_type", + "unit", + "tot_value", + "aai_agg", + "event_id", + "event_name", + "event_date", + "event_frequency", + "frequency_unit", + "at_event", + "eai_exp", + "exp_lat", + "exp_lon", + "exp_crs", + ] for icol, head_dat in enumerate(header): imp_ws.write(0, icol, head_dat) data = [str(self.haz_type)] @@ -927,7 +1097,7 @@ def write_col(i_col, imp_ws, xls_data): imp_wb.close() - def write_hdf5(self, file_path: Union[str, Path], dense_imp_mat: bool=False): + def write_hdf5(self, file_path: Union[str, Path], dense_imp_mat: bool = False): """Write the data stored in this object into an H5 file. Try to write all attributes of this class into H5 datasets or attributes. @@ -1043,9 +1213,14 @@ def write_csr(group, name, value): def write_sparse_csr(self, file_name): """Write imp_mat matrix in numpy's npz format.""" - LOGGER.info('Writing %s', file_name) - np.savez(file_name, data=self.imp_mat.data, indices=self.imp_mat.indices, - indptr=self.imp_mat.indptr, shape=self.imp_mat.shape) + LOGGER.info("Writing %s", file_name) + np.savez( + file_name, + data=self.imp_mat.data, + indices=self.imp_mat.indices, + indptr=self.imp_mat.indptr, + shape=self.imp_mat.shape, + ) @staticmethod def read_sparse_csr(file_name): @@ -1059,10 +1234,11 @@ def read_sparse_csr(file_name): ------- sparse.csr_matrix """ - LOGGER.info('Reading %s', file_name) + LOGGER.info("Reading %s", file_name) loader = np.load(file_name) return sparse.csr_matrix( - (loader['data'], loader['indices'], loader['indptr']), shape=loader['shape']) + (loader["data"], loader["indices"], loader["indptr"]), shape=loader["shape"] + ) @classmethod def from_csv(cls, file_name): @@ -1079,27 +1255,28 @@ def from_csv(cls, file_name): Impact from csv file """ # pylint: disable=no-member - LOGGER.info('Reading %s', file_name) + LOGGER.info("Reading %s", file_name) imp_df = pd.read_csv(file_name) - imp = cls(haz_type=imp_df['haz_type'][0]) - imp.unit = imp_df['unit'][0] - imp.tot_value = imp_df['tot_value'][0] - imp.aai_agg = imp_df['aai_agg'][0] - imp.event_id = imp_df['event_id'][~np.isnan(imp_df['event_id'])].values + imp = cls(haz_type=imp_df["haz_type"][0]) + imp.unit = imp_df["unit"][0] + imp.tot_value = imp_df["tot_value"][0] + imp.aai_agg = imp_df["aai_agg"][0] + imp.event_id = imp_df["event_id"][~np.isnan(imp_df["event_id"])].values num_ev = imp.event_id.size - imp.event_name = imp_df['event_name'][:num_ev].values.tolist() - imp.date = imp_df['event_date'][:num_ev].values - imp.at_event = imp_df['at_event'][:num_ev].values - imp.frequency = imp_df['event_frequency'][:num_ev].values - imp.frequency_unit = imp_df['frequency_unit'][0] if 'frequency_unit' in imp_df \ - else DEF_FREQ_UNIT - imp.eai_exp = imp_df['eai_exp'][~np.isnan(imp_df['eai_exp'])].values + imp.event_name = imp_df["event_name"][:num_ev].values.tolist() + imp.date = imp_df["event_date"][:num_ev].values + imp.at_event = imp_df["at_event"][:num_ev].values + imp.frequency = imp_df["event_frequency"][:num_ev].values + imp.frequency_unit = ( + imp_df["frequency_unit"][0] if "frequency_unit" in imp_df else DEF_FREQ_UNIT + ) + imp.eai_exp = imp_df["eai_exp"][~np.isnan(imp_df["eai_exp"])].values num_exp = imp.eai_exp.size imp.coord_exp = np.zeros((num_exp, 2)) - imp.coord_exp[:, 0] = imp_df['exp_lat'][:num_exp] - imp.coord_exp[:, 1] = imp_df['exp_lon'][:num_exp] + imp.coord_exp[:, 0] = imp_df["exp_lat"][:num_exp] + imp.coord_exp[:, 1] = imp_df["exp_lon"][:num_exp] try: - imp.crs = u_coord.to_crs_user_input(imp_df['exp_crs'].values[0]) + imp.crs = u_coord.to_crs_user_input(imp_df["exp_crs"].values[0]) except AttributeError: imp.crs = DEF_CRS @@ -1107,8 +1284,9 @@ def from_csv(cls, file_name): def read_csv(self, *args, **kwargs): """This function is deprecated, use Impact.from_csv instead.""" - LOGGER.warning("The use of Impact.read_csv is deprecated." - "Use Impact.from_csv instead.") + LOGGER.warning( + "The use of Impact.read_csv is deprecated." "Use Impact.from_csv instead." + ) self.__dict__ = Impact.from_csv(*args, **kwargs).__dict__ @classmethod @@ -1125,27 +1303,29 @@ def from_excel(cls, file_name): imp : climada.engine.impact.Impact Impact from excel file """ - LOGGER.info('Reading %s', file_name) + LOGGER.info("Reading %s", file_name) dfr = pd.read_excel(file_name) - imp = cls(haz_type=str(dfr['haz_type'][0])) - - imp.unit = dfr['unit'][0] - imp.tot_value = dfr['tot_value'][0] - imp.aai_agg = dfr['aai_agg'][0] - - imp.event_id = dfr['event_id'][~np.isnan(dfr['event_id'].values)].values - imp.event_name = dfr['event_name'][:imp.event_id.size].values - imp.date = dfr['event_date'][:imp.event_id.size].values - imp.frequency = dfr['event_frequency'][:imp.event_id.size].values - imp.frequency_unit = dfr['frequency_unit'][0] if 'frequency_unit' in dfr else DEF_FREQ_UNIT - imp.at_event = dfr['at_event'][:imp.event_id.size].values + imp = cls(haz_type=str(dfr["haz_type"][0])) + + imp.unit = dfr["unit"][0] + imp.tot_value = dfr["tot_value"][0] + imp.aai_agg = dfr["aai_agg"][0] + + imp.event_id = dfr["event_id"][~np.isnan(dfr["event_id"].values)].values + imp.event_name = dfr["event_name"][: imp.event_id.size].values + imp.date = dfr["event_date"][: imp.event_id.size].values + imp.frequency = dfr["event_frequency"][: imp.event_id.size].values + imp.frequency_unit = ( + dfr["frequency_unit"][0] if "frequency_unit" in dfr else DEF_FREQ_UNIT + ) + imp.at_event = dfr["at_event"][: imp.event_id.size].values - imp.eai_exp = dfr['eai_exp'][~np.isnan(dfr['eai_exp'].values)].values + imp.eai_exp = dfr["eai_exp"][~np.isnan(dfr["eai_exp"].values)].values imp.coord_exp = np.zeros((imp.eai_exp.size, 2)) - imp.coord_exp[:, 0] = dfr['exp_lat'].values[:imp.eai_exp.size] - imp.coord_exp[:, 1] = dfr['exp_lon'].values[:imp.eai_exp.size] + imp.coord_exp[:, 0] = dfr["exp_lat"].values[: imp.eai_exp.size] + imp.coord_exp[:, 1] = dfr["exp_lon"].values[: imp.eai_exp.size] try: - imp.crs = u_coord.to_csr_user_input(dfr['exp_crs'].values[0]) + imp.crs = u_coord.to_csr_user_input(dfr["exp_crs"].values[0]) except AttributeError: imp.crs = DEF_CRS @@ -1153,8 +1333,10 @@ def from_excel(cls, file_name): def read_excel(self, *args, **kwargs): """This function is deprecated, use Impact.from_excel instead.""" - LOGGER.warning("The use of Impact.read_excel is deprecated." - "Use Impact.from_excel instead.") + LOGGER.warning( + "The use of Impact.read_excel is deprecated." + "Use Impact.from_excel instead." + ) self.__dict__ = Impact.from_excel(*args, **kwargs).__dict__ @classmethod @@ -1259,10 +1441,18 @@ def from_hdf5(cls, file_path: Union[str, Path]): return cls(**kwargs) @staticmethod - def video_direct_impact(exp, impf_set, haz_list, file_name='', - writer=animation.PillowWriter(bitrate=500), - imp_thresh=0, args_exp=None, args_imp=None, - ignore_zero=False, pop_name=False): + def video_direct_impact( + exp, + impf_set, + haz_list, + file_name="", + writer=animation.PillowWriter(bitrate=500), + imp_thresh=0, + args_exp=None, + args_imp=None, + ignore_zero=False, + pop_name=False, + ): """ Computes and generates video of accumulated impact per input events over exposure. @@ -1299,7 +1489,9 @@ def video_direct_impact(exp, impf_set, haz_list, file_name='', ------- list of Impact """ - from climada.engine.impact_calc import ImpactCalc # pylint: disable=import-outside-toplevel + from climada.engine.impact_calc import ( + ImpactCalc, # pylint: disable=import-outside-toplevel + ) if args_exp is None: args_exp = dict() @@ -1311,7 +1503,9 @@ def video_direct_impact(exp, impf_set, haz_list, file_name='', # assign centroids once for all exp.assign_centroids(haz_list[0]) for i_time, _ in enumerate(haz_list): - imp_tmp = ImpactCalc(exp, impf_set, haz_list[i_time]).impact(assign_centroids=False) + imp_tmp = ImpactCalc(exp, impf_set, haz_list[i_time]).impact( + assign_centroids=False + ) imp_arr = np.maximum(imp_arr, imp_tmp.eai_exp) # remove not impacted exposures save_exp = imp_arr > imp_thresh @@ -1320,63 +1514,84 @@ def video_direct_impact(exp, impf_set, haz_list, file_name='', imp_list.append(imp_tmp) exp_list.append(~save_exp) - v_lim = [np.array([haz.intensity.min() for haz in haz_list]).min(), - np.array([haz.intensity.max() for haz in haz_list]).max()] + v_lim = [ + np.array([haz.intensity.min() for haz in haz_list]).min(), + np.array([haz.intensity.max() for haz in haz_list]).max(), + ] - if 'vmin' not in args_exp: - args_exp['vmin'] = exp.gdf['value'].values.min() + if "vmin" not in args_exp: + args_exp["vmin"] = exp.gdf["value"].values.min() - if 'vmin' not in args_imp: - args_imp['vmin'] = np.array([imp.eai_exp.min() for imp in imp_list - if imp.eai_exp.size]).min() + if "vmin" not in args_imp: + args_imp["vmin"] = np.array( + [imp.eai_exp.min() for imp in imp_list if imp.eai_exp.size] + ).min() - if 'vmax' not in args_exp: - args_exp['vmax'] = exp.gdf['value'].values.max() + if "vmax" not in args_exp: + args_exp["vmax"] = exp.gdf["value"].values.max() - if 'vmax' not in args_imp: - args_imp['vmax'] = np.array([imp.eai_exp.max() for imp in imp_list - if imp.eai_exp.size]).max() + if "vmax" not in args_imp: + args_imp["vmax"] = np.array( + [imp.eai_exp.max() for imp in imp_list if imp.eai_exp.size] + ).max() - if 'cmap' not in args_exp: - args_exp['cmap'] = 'winter_r' - - if 'cmap' not in args_imp: - args_imp['cmap'] = 'autumn_r' + if "cmap" not in args_exp: + args_exp["cmap"] = "winter_r" + if "cmap" not in args_imp: + args_imp["cmap"] = "autumn_r" plot_raster = False if exp.meta: plot_raster = True def run(i_time): - haz_list[i_time].plot_intensity(1, axis=axis, cmap='Greys', vmin=v_lim[0], - vmax=v_lim[1], alpha=0.8) + haz_list[i_time].plot_intensity( + 1, axis=axis, cmap="Greys", vmin=v_lim[0], vmax=v_lim[1], alpha=0.8 + ) if plot_raster: - exp.plot_hexbin(axis=axis, mask=exp_list[i_time], ignore_zero=ignore_zero, - pop_name=pop_name, **args_exp) + exp.plot_hexbin( + axis=axis, + mask=exp_list[i_time], + ignore_zero=ignore_zero, + pop_name=pop_name, + **args_exp, + ) if imp_list[i_time].coord_exp.size: - imp_list[i_time].plot_hexbin_eai_exposure(axis=axis, pop_name=pop_name, - **args_imp) + imp_list[i_time].plot_hexbin_eai_exposure( + axis=axis, pop_name=pop_name, **args_imp + ) fig.delaxes(fig.axes[1]) else: - exp.plot_scatter(axis=axis, mask=exp_list[i_time], ignore_zero=ignore_zero, - pop_name=pop_name, **args_exp) + exp.plot_scatter( + axis=axis, + mask=exp_list[i_time], + ignore_zero=ignore_zero, + pop_name=pop_name, + **args_exp, + ) if imp_list[i_time].coord_exp.size: - imp_list[i_time].plot_scatter_eai_exposure(axis=axis, pop_name=pop_name, - **args_imp) + imp_list[i_time].plot_scatter_eai_exposure( + axis=axis, pop_name=pop_name, **args_imp + ) fig.delaxes(fig.axes[1]) fig.delaxes(fig.axes[1]) fig.delaxes(fig.axes[1]) - axis.set_xlim(haz_list[-1].centroids.lon.min(), haz_list[-1].centroids.lon.max()) - axis.set_ylim(haz_list[-1].centroids.lat.min(), haz_list[-1].centroids.lat.max()) + axis.set_xlim( + haz_list[-1].centroids.lon.min(), haz_list[-1].centroids.lon.max() + ) + axis.set_ylim( + haz_list[-1].centroids.lat.min(), haz_list[-1].centroids.lat.max() + ) axis.set_title(haz_list[i_time].event_name[0]) pbar.update() if file_name: - LOGGER.info('Generating video %s', file_name) + LOGGER.info("Generating video %s", file_name) fig, axis, _fontsize = u_plot.make_map() - ani = animation.FuncAnimation(fig, run, frames=len(haz_list), - interval=500, blit=False) + ani = animation.FuncAnimation( + fig, run, frames=len(haz_list), interval=500, blit=False + ) pbar = tqdm(total=len(haz_list)) fig.tight_layout() ani.save(file_name, writer=writer) @@ -1384,7 +1599,7 @@ def run(i_time): return imp_list -#TODO: rewrite and deprecate method + # TODO: rewrite and deprecate method def _loc_return_imp(self, return_periods, imp, exc_imp): """Compute local exceedence impact for given return period. @@ -1411,20 +1626,20 @@ def _loc_return_imp(self, return_periods, imp, exc_imp): for cen_idx in range(imp.shape[1]): exc_imp[:, cen_idx] = self._cen_return_imp( - imp_sort[:, cen_idx], freq_sort[:, cen_idx], - 0, return_periods) + imp_sort[:, cen_idx], freq_sort[:, cen_idx], 0, return_periods + ) def _build_exp(self): return Exposures( data={ - 'value': self.eai_exp, - 'latitude': self.coord_exp[:, 0], - 'longitude': self.coord_exp[:, 1], + "value": self.eai_exp, + "latitude": self.coord_exp[:, 0], + "longitude": self.coord_exp[:, 1], }, crs=self.crs, value_unit=self.unit, ref_year=0, - meta=None + meta=None, ) def _build_exp_event(self, event_id): @@ -1438,14 +1653,14 @@ def _build_exp_event(self, event_id): [[idx]] = (self.event_id == event_id).nonzero() return Exposures( data={ - 'value': self.imp_mat[idx].toarray().ravel(), - 'latitude': self.coord_exp[:, 0], - 'longitude': self.coord_exp[:, 1], + "value": self.imp_mat[idx].toarray().ravel(), + "latitude": self.coord_exp[:, 0], + "longitude": self.coord_exp[:, 1], }, crs=self.crs, value_unit=self.unit, ref_year=0, - meta=None + meta=None, ) @staticmethod @@ -1481,7 +1696,7 @@ def _cen_return_imp(imp, freq, imp_th, return_periods): pol_coef = np.polyfit(np.log(freq_cen), imp_cen, deg=0) imp_fit = np.polyval(pol_coef, np.log(1 / return_periods)) wrong_inten = (return_periods > np.max(1 / freq_cen)) & np.isnan(imp_fit) - imp_fit[wrong_inten] = 0. + imp_fit[wrong_inten] = 0.0 return imp_fit @@ -1491,7 +1706,7 @@ def select( event_names=None, dates=None, coord_exp=None, - reset_frequency=False + reset_frequency=False, ): """ Select a subset of events and/or exposure points from the impact. @@ -1544,18 +1759,22 @@ def select( nb_exp = len(self.coord_exp) if self.imp_mat.shape != (nb_events, nb_exp): - raise ValueError("The impact matrix is missing or incomplete. " - "The eai_exp and aai_agg cannot be computed. " - "Please recompute impact.calc() with save_mat=True " - "before using impact.select()") + raise ValueError( + "The impact matrix is missing or incomplete. " + "The eai_exp and aai_agg cannot be computed. " + "Please recompute impact.calc() with save_mat=True " + "before using impact.select()" + ) if nb_events == nb_exp: - LOGGER.warning("The number of events is equal to the number of " - "exposure points. It is not possible to " - "differentiate events and exposures attributes. " - "Please add/remove one event/exposure point. " - "This is a purely technical limitation of this " - "method.") + LOGGER.warning( + "The number of events is equal to the number of " + "exposure points. It is not possible to " + "differentiate events and exposures attributes. " + "Please add/remove one event/exposure point. " + "This is a purely technical limitation of this " + "method." + ) return None imp = copy.deepcopy(self) @@ -1571,10 +1790,12 @@ def select( if value.ndim == 1: setattr(imp, attr, value[sel_ev]) else: - LOGGER.warning("Found a multidimensional numpy array " - "with one dimension matching the number of events. " - "But multidimensional numpy arrays are not handled " - "in impact.select") + LOGGER.warning( + "Found a multidimensional numpy array " + "with one dimension matching the number of events. " + "But multidimensional numpy arrays are not handled " + "in impact.select" + ) elif isinstance(value, sparse.csr_matrix): setattr(imp, attr, value[sel_ev, :]) elif isinstance(value, list) and value: @@ -1582,9 +1803,11 @@ def select( else: pass - LOGGER.info("The eai_exp and aai_agg are computed for the " - "selected subset of events WITHOUT modification of " - "the frequencies.") + LOGGER.info( + "The eai_exp and aai_agg are computed for the " + "selected subset of events WITHOUT modification of " + "the frequencies." + ) # apply exposure selection to impact attributes if coord_exp is not None: @@ -1595,20 +1818,35 @@ def select( # .A1 reduce 1d matrix to 1d array imp.at_event = imp.imp_mat.sum(axis=1).A1 imp.tot_value = None - LOGGER.info("The total value cannot be re-computed for a " - "subset of exposures and is set to None.") + LOGGER.info( + "The total value cannot be re-computed for a " + "subset of exposures and is set to None." + ) # reset frequency if date span has changed (optional): if reset_frequency: - if self.frequency_unit not in ['1/year', 'annual', '1/y', '1/a']: - LOGGER.warning("Resetting the frequency is based on the calendar year of given" + if self.frequency_unit not in ["1/year", "annual", "1/y", "1/a"]: + LOGGER.warning( + "Resetting the frequency is based on the calendar year of given" " dates but the frequency unit here is %s. Consider setting the frequency" " manually for the selection or changing the frequency unit to %s.", - self.frequency_unit, DEF_FREQ_UNIT) - year_span_old = np.abs(dt.datetime.fromordinal(self.date.max()).year - - dt.datetime.fromordinal(self.date.min()).year) + 1 - year_span_new = np.abs(dt.datetime.fromordinal(imp.date.max()).year - - dt.datetime.fromordinal(imp.date.min()).year) + 1 + self.frequency_unit, + DEF_FREQ_UNIT, + ) + year_span_old = ( + np.abs( + dt.datetime.fromordinal(self.date.max()).year + - dt.datetime.fromordinal(self.date.min()).year + ) + + 1 + ) + year_span_new = ( + np.abs( + dt.datetime.fromordinal(imp.date.max()).year + - dt.datetime.fromordinal(imp.date.min()).year + ) + + 1 + ) imp.frequency = imp.frequency * year_span_old / year_span_new # cast frequency vector into 2d array for sparse matrix multiplication @@ -1632,10 +1870,10 @@ def _selected_events_idx(self, event_ids, event_names, dates, nb_events): if isinstance(date_ini, str): date_ini = u_dt.str_to_date(date_ini) date_end = u_dt.str_to_date(date_end) - mask_dt &= (date_ini <= self.date) - mask_dt &= (self.date <= date_end) + mask_dt &= date_ini <= self.date + mask_dt &= self.date <= date_end if not np.any(mask_dt): - LOGGER.info('No impact event in given date range %s.', dates) + LOGGER.info("No impact event in given date range %s.", dates) sel_dt = mask_dt.nonzero()[0] # Convert bool to indices @@ -1646,7 +1884,7 @@ def _selected_events_idx(self, event_ids, event_names, dates, nb_events): (sel_id,) = np.isin(self.event_id, event_ids).nonzero() # pylint: disable=no-member if sel_id.size == 0: - LOGGER.info('No impact event with given ids %s found.', event_ids) + LOGGER.info("No impact event with given ids %s found.", event_ids) # filter events by name if event_names is None: @@ -1655,7 +1893,7 @@ def _selected_events_idx(self, event_ids, event_names, dates, nb_events): (sel_na,) = np.isin(self.event_name, event_names).nonzero() # pylint: disable=no-member if sel_na.size == 0: - LOGGER.info('No impact event with given names %s found.', event_names) + LOGGER.info("No impact event with given names %s found.", event_names) # select events with machting id, name or date field. sel_ev = np.unique(np.concatenate([sel_dt, sel_id, sel_na])) @@ -1713,6 +1951,7 @@ def concat(cls, imp_list: Iterable, reset_event_ids: bool = False): - Concatenation of impacts with different exposure (e.g. different countries) could also be implemented here in the future. """ + def check_unique_attr(attr_name: str): """Check if an attribute is unique among all impacts""" if len({getattr(imp, attr_name) for imp in imp_list}) > 1: @@ -1778,8 +2017,9 @@ def stack_attribute(attr_name: str) -> np.ndarray: **kwargs, ) - def match_centroids(self, hazard, distance='euclidean', - threshold=u_coord.NEAREST_NEIGHBOR_THRESHOLD): + def match_centroids( + self, hazard, distance="euclidean", threshold=u_coord.NEAREST_NEIGHBOR_THRESHOLD + ): """ Finds the closest hazard centroid for each impact coordinate. Creates a temporary GeoDataFrame and uses ``u_coord.match_centroids()``. @@ -1809,26 +2049,27 @@ def match_centroids(self, hazard, distance='euclidean', self._build_exp().gdf, hazard.centroids, distance=distance, - threshold=threshold) + threshold=threshold, + ) + @dataclass -class ImpactFreqCurve(): - """Impact exceedence frequency curve. - """ +class ImpactFreqCurve: + """Impact exceedence frequency curve.""" - return_per : np.ndarray = field(default_factory=lambda: np.empty(0)) + return_per: np.ndarray = field(default_factory=lambda: np.empty(0)) """return period""" - impact : np.ndarray = field(default_factory=lambda: np.empty(0)) + impact: np.ndarray = field(default_factory=lambda: np.empty(0)) """impact exceeding frequency""" - unit : str = '' + unit: str = "" """value unit used (given by exposures unit)""" - frequency_unit : str = DEF_FREQ_UNIT + frequency_unit: str = DEF_FREQ_UNIT """value unit used (given by exposures unit)""" - label : str = '' + label: str = "" """string describing source data""" def plot(self, axis=None, log_frequency=False, **kwargs): @@ -1850,12 +2091,12 @@ def plot(self, axis=None, log_frequency=False, **kwargs): if not axis: _, axis = plt.subplots(1, 1) axis.set_title(self.label) - axis.set_ylabel('Impact (' + self.unit + ')') + axis.set_ylabel("Impact (" + self.unit + ")") if log_frequency: - axis.set_xlabel(f'Exceedance frequency ({self.frequency_unit})') - axis.set_xscale('log') + axis.set_xlabel(f"Exceedance frequency ({self.frequency_unit})") + axis.set_xscale("log") axis.plot(self.return_per**-1, self.impact, **kwargs) else: - axis.set_xlabel('Return period (year)') + axis.set_xlabel("Return period (year)") axis.plot(self.return_per, self.impact, **kwargs) return axis diff --git a/climada/engine/impact_calc.py b/climada/engine/impact_calc.py index aefa3810f..713cda324 100644 --- a/climada/engine/impact_calc.py +++ b/climada/engine/impact_calc.py @@ -19,12 +19,13 @@ Define ImpactCalc class. """ -__all__ = ['ImpactCalc'] +__all__ = ["ImpactCalc"] import logging + +import geopandas as gpd import numpy as np from scipy import sparse -import geopandas as gpd from climada import CONFIG from climada.engine.impact import Impact @@ -32,15 +33,12 @@ LOGGER = logging.getLogger(__name__) -class ImpactCalc(): +class ImpactCalc: """ Class to compute impacts from exposures, impact function set and hazard """ - def __init__(self, - exposures, - impfset, - hazard): + def __init__(self, exposures, impfset, hazard): """ ImpactCalc constructor @@ -78,8 +76,13 @@ def n_events(self): """Number of hazard events (size of event_id array)""" return self.hazard.size - def impact(self, save_mat=True, assign_centroids=True, - ignore_cover=False, ignore_deductible=False): + def impact( + self, + save_mat=True, + assign_centroids=True, + ignore_cover=False, + ignore_deductible=False, + ): """Compute the impact of a hazard on exposures. Parameters @@ -117,50 +120,70 @@ def impact(self, save_mat=True, assign_centroids=True, apply_cover_to_mat : apply cover to impact matrix """ # check for compatibility of exposures and hazard type - if all(name not in self.exposures.gdf.columns for - name in ['if_', f'if_{self.hazard.haz_type}', - 'impf_', f'impf_{self.hazard.haz_type}']): + if all( + name not in self.exposures.gdf.columns + for name in [ + "if_", + f"if_{self.hazard.haz_type}", + "impf_", + f"impf_{self.hazard.haz_type}", + ] + ): raise AttributeError( "Impact calculation not possible. No impact functions found " f"for hazard type {self.hazard.haz_type} in exposures." - ) + ) # check for compatibility of impact function and hazard type if not self.impfset.get_func(haz_type=self.hazard.haz_type): raise AttributeError( "Impact calculation not possible. No impact functions found " f"for hazard type {self.hazard.haz_type} in impf_set." - ) + ) impf_col = self.exposures.get_impf_column(self.hazard.haz_type) known_impact_functions = self.impfset.get_ids(haz_type=self.hazard.haz_type) # check for compatibility of impact function id between impact function set and exposure if not all(self.exposures.gdf[impf_col].isin(known_impact_functions)): - unknown_impact_functions = list(self.exposures.gdf[ + unknown_impact_functions = list( + self.exposures.gdf[ ~self.exposures.gdf[impf_col].isin(known_impact_functions) - ][impf_col].drop_duplicates().astype(int).astype(str)) + ][impf_col] + .drop_duplicates() + .astype(int) + .astype(str) + ) raise ValueError( f"The associated impact function(s) with id(s) " f"{', '.join(unknown_impact_functions)} have no match in impact function set for" - f" hazard type \'{self.hazard.haz_type}\'.\nPlease make sure that all exposure " + f" hazard type '{self.hazard.haz_type}'.\nPlease make sure that all exposure " "points are associated with an impact function that is included in the impact " - "function set.") + "function set." + ) - exp_gdf = self.minimal_exp_gdf(impf_col, assign_centroids, ignore_cover, ignore_deductible) + exp_gdf = self.minimal_exp_gdf( + impf_col, assign_centroids, ignore_cover, ignore_deductible + ) if exp_gdf.size == 0: return self._return_empty(save_mat) - LOGGER.info('Calculating impact for %s assets (>0) and %s events.', - exp_gdf.size, self.n_events) + LOGGER.info( + "Calculating impact for %s assets (>0) and %s events.", + exp_gdf.size, + self.n_events, + ) imp_mat_gen = self.imp_mat_gen(exp_gdf, impf_col) - insured = ('cover' in exp_gdf and exp_gdf['cover'].max() >= 0) \ - or ('deductible' in exp_gdf and exp_gdf['deductible'].max() > 0) + insured = ("cover" in exp_gdf and exp_gdf["cover"].max() >= 0) or ( + "deductible" in exp_gdf and exp_gdf["deductible"].max() > 0 + ) if insured: - LOGGER.info("cover and/or deductible columns detected," - " going to calculate insured impact") -#TODO: make a better impact matrix generator for insured impacts when -# the impact matrix is already present + LOGGER.info( + "cover and/or deductible columns detected," + " going to calculate insured impact" + ) + # TODO: make a better impact matrix generator for insured impacts when + # the impact matrix is already present imp_mat_gen = self.insured_mat_gen(imp_mat_gen, exp_gdf, impf_col) return self._return_impact(imp_mat_gen, save_mat) @@ -187,8 +210,9 @@ def _return_impact(self, imp_mat_gen, save_mat): """ if save_mat: imp_mat = self.stitch_impact_matrix(imp_mat_gen) - at_event, eai_exp, aai_agg = \ - self.risk_metrics(imp_mat, self.hazard.frequency) + at_event, eai_exp, aai_agg = self.risk_metrics( + imp_mat, self.hazard.frequency + ) else: imp_mat = None at_event, eai_exp, aai_agg = self.stitch_risk_metrics(imp_mat_gen) @@ -214,16 +238,18 @@ def _return_empty(self, save_mat): eai_exp = np.zeros(self.n_exp_pnt) aai_agg = 0.0 if save_mat: - imp_mat = sparse.csr_matrix(( - self.n_events, self.n_exp_pnt), dtype=np.float64 - ) + imp_mat = sparse.csr_matrix( + (self.n_events, self.n_exp_pnt), dtype=np.float64 + ) else: imp_mat = None return Impact.from_eih( self.exposures, self.hazard, at_event, eai_exp, aai_agg, imp_mat ) - def minimal_exp_gdf(self, impf_col, assign_centroids, ignore_cover, ignore_deductible): + def minimal_exp_gdf( + self, impf_col, assign_centroids, ignore_cover, ignore_deductible + ): """Get minimal exposures geodataframe for impact computation Parameters @@ -248,29 +274,36 @@ def minimal_exp_gdf(self, impf_col, assign_centroids, ignore_cover, ignore_deduc if assign_centroids: self.exposures.assign_centroids(self.hazard, overwrite=True) elif self.hazard.centr_exp_col not in self.exposures.gdf.columns: - raise ValueError("'assign_centroids' is set to 'False' but no centroids are assigned" - f" for the given hazard type ({self.hazard.haz_type})." - " Run 'exposures.assign_centroids()' beforehand or set" - " 'assign_centroids' to 'True'") + raise ValueError( + "'assign_centroids' is set to 'False' but no centroids are assigned" + f" for the given hazard type ({self.hazard.haz_type})." + " Run 'exposures.assign_centroids()' beforehand or set" + " 'assign_centroids' to 'True'" + ) mask = ( - (self.exposures.gdf['value'].values == self.exposures.gdf['value'].values)# value != NaN - & (self.exposures.gdf['value'].values != 0) # value != 0 - & (self.exposures.gdf[self.hazard.centr_exp_col].values >= 0) # centroid assigned + ( + self.exposures.gdf["value"].values == self.exposures.gdf["value"].values + ) # value != NaN + & (self.exposures.gdf["value"].values != 0) # value != 0 + & ( + self.exposures.gdf[self.hazard.centr_exp_col].values >= 0 + ) # centroid assigned ) - columns = ['value', impf_col, self.hazard.centr_exp_col] - if not ignore_cover and 'cover' in self.exposures.gdf: - columns.append('cover') - if not ignore_deductible and 'deductible' in self.exposures.gdf: - columns.append('deductible') + columns = ["value", impf_col, self.hazard.centr_exp_col] + if not ignore_cover and "cover" in self.exposures.gdf: + columns.append("cover") + if not ignore_deductible and "deductible" in self.exposures.gdf: + columns.append("deductible") exp_gdf = gpd.GeoDataFrame( - {col: self.exposures.gdf[col].values[mask] - for col in columns}, - ) + {col: self.exposures.gdf[col].values[mask] for col in columns}, + ) if exp_gdf.size == 0: LOGGER.warning("No exposures with value >0 in the vicinity of the hazard.") - self._orig_exp_idx = mask.nonzero()[0] # update index of kept exposures points in exp_gdf - # within the full exposures + self._orig_exp_idx = mask.nonzero()[ + 0 + ] # update index of kept exposures points in exp_gdf + # within the full exposures return exp_gdf def imp_mat_gen(self, exp_gdf, impf_col): @@ -302,9 +335,9 @@ def imp_mat_gen(self, exp_gdf, impf_col): """ def _chunk_exp_idx(haz_size, idx_exp_impf): - ''' + """ Chunk computations in sizes that roughly fit into memory - ''' + """ max_size = CONFIG.max_matrix_size.int() if haz_size > max_size: raise ValueError( @@ -315,17 +348,12 @@ def _chunk_exp_idx(haz_size, idx_exp_impf): return np.array_split(idx_exp_impf, n_chunks) for impf_id in exp_gdf[impf_col].dropna().unique(): - impf = self.impfset.get_func( - haz_type=self.hazard.haz_type, fun_id=impf_id - ) + impf = self.impfset.get_func(haz_type=self.hazard.haz_type, fun_id=impf_id) idx_exp_impf = (exp_gdf[impf_col].values == impf_id).nonzero()[0] for exp_idx in _chunk_exp_idx(self.hazard.size, idx_exp_impf): - exp_values = exp_gdf['value'].values[exp_idx] + exp_values = exp_gdf["value"].values[exp_idx] cent_idx = exp_gdf[self.hazard.centr_exp_col].values[exp_idx] - yield ( - self.impact_matrix(exp_values, cent_idx, impf), - exp_idx - ) + yield (self.impact_matrix(exp_values, cent_idx, impf), exp_idx) def insured_mat_gen(self, imp_mat_gen, exp_gdf, impf_col): """ @@ -359,14 +387,14 @@ def insured_mat_gen(self, imp_mat_gen, exp_gdf, impf_col): for mat, exp_idx in imp_mat_gen: impf_id = exp_gdf[impf_col][exp_idx[0]] cent_idx = exp_gdf[self.hazard.centr_exp_col].values[exp_idx] - impf = self.impfset.get_func( - haz_type=self.hazard.haz_type, - fun_id=impf_id) - if 'deductible' in exp_gdf: - deductible = exp_gdf['deductible'].values[exp_idx] - mat = self.apply_deductible_to_mat(mat, deductible, self.hazard, cent_idx, impf) - if 'cover' in exp_gdf: - cover = exp_gdf['cover'].values[exp_idx] + impf = self.impfset.get_func(haz_type=self.hazard.haz_type, fun_id=impf_id) + if "deductible" in exp_gdf: + deductible = exp_gdf["deductible"].values[exp_idx] + mat = self.apply_deductible_to_mat( + mat, deductible, self.hazard, cent_idx, impf + ) + if "cover" in exp_gdf: + cover = exp_gdf["cover"].values[exp_idx] mat = self.apply_cover_to_mat(mat, cover) yield (mat, exp_idx) @@ -392,11 +420,11 @@ def impact_matrix(self, exp_values, cent_idx, impf): Impact per event (rows) per exposure point (columns) """ n_exp_pnt = len(cent_idx) # implicitly checks in matrix assignement whether - # len(cent_idx) == len(exp_values) + # len(cent_idx) == len(exp_values) mdr = self.hazard.get_mdr(cent_idx, impf) exp_values_csr = sparse.csr_matrix( # vector 1 x exp_size - (exp_values, np.arange(n_exp_pnt), [0, n_exp_pnt]), - shape=(1, n_exp_pnt)) + (exp_values, np.arange(n_exp_pnt), [0, n_exp_pnt]), shape=(1, n_exp_pnt) + ) fract = self.hazard._get_fraction(cent_idx) # pylint: disable=protected-access if fract is None: return mdr.multiply(exp_values_csr) @@ -409,13 +437,15 @@ def stitch_impact_matrix(self, imp_mat_gen): """ # rows: events index # cols: exposure point index within self.exposures - data, row, col = np.hstack([ - (mat.data, mat.nonzero()[0], self._orig_exp_idx[idx][mat.nonzero()[1]]) - for mat, idx in imp_mat_gen - ]) + data, row, col = np.hstack( + [ + (mat.data, mat.nonzero()[0], self._orig_exp_idx[idx][mat.nonzero()[1]]) + for mat, idx in imp_mat_gen + ] + ) return sparse.csr_matrix( (data, (row, col)), shape=(self.n_events, self.n_exp_pnt) - ) + ) def stitch_risk_metrics(self, imp_mat_gen): """Compute the impact metrics from an impact sub-matrix generator @@ -442,8 +472,9 @@ def stitch_risk_metrics(self, imp_mat_gen): eai_exp = np.zeros(self.n_exp_pnt) for sub_imp_mat, idx in imp_mat_gen: at_event += self.at_event_from_mat(sub_imp_mat) - eai_exp[self._orig_exp_idx[idx]] += \ - self.eai_exp_from_mat(sub_imp_mat, self.hazard.frequency) + eai_exp[self._orig_exp_idx[idx]] += self.eai_exp_from_mat( + sub_imp_mat, self.hazard.frequency + ) aai_agg = self.aai_agg_from_eai_exp(eai_exp) return at_event, eai_exp, aai_agg @@ -523,9 +554,9 @@ def eai_exp_from_mat(mat, freq): expected impact within a period of 1/frequency_unit for each exposure """ n_events = freq.size - freq_csr = sparse.csr_matrix( #vector n_events x 1 - (freq, np.zeros(n_events), np.arange(n_events + 1)), - shape=(n_events, 1)) + freq_csr = sparse.csr_matrix( # vector n_events x 1 + (freq, np.zeros(n_events), np.arange(n_events + 1)), shape=(n_events, 1) + ) return mat.multiply(freq_csr).sum(axis=0).A1 @staticmethod diff --git a/climada/engine/impact_data.py b/climada/engine/impact_data.py index 98ca074e5..21b6a2851 100644 --- a/climada/engine/impact_data.py +++ b/climada/engine/impact_data.py @@ -18,156 +18,175 @@ Functions to merge EMDAT damages to hazard events. """ + import logging import pickle from datetime import datetime from pathlib import Path -import pandas as pd + import numpy as np +import pandas as pd from cartopy.io import shapereader -from climada.util.finance import gdp -from climada.util.constants import DEF_CRS import climada.util.coordinates as u_coord from climada.engine import Impact +from climada.util.constants import DEF_CRS +from climada.util.finance import gdp LOGGER = logging.getLogger(__name__) -PERIL_SUBTYPE_MATCH_DICT = dict(TC=['Tropical cyclone'], - FL=['Coastal flood'], - EQ=['Ground movement', 'Earthquake'], - RF=['Riverine flood', 'Flood'], - WS=['Extra-tropical storm', 'Storm'], - DR=['Drought'], - LS=['Landslide'], - BF=['Forest fire', 'Wildfire', 'Land fire (Brush, Bush, Pastur'] - ) - -PERIL_TYPE_MATCH_DICT = dict(DR=['Drought'], - EQ=['Earthquake'], - FL=['Flood'], - LS=['Landslide'], - VQ=['Volcanic activity'], - BF=['Wildfire'], - HW=['Extreme temperature'] - ) - -VARNAMES_EMDAT = \ - {2018: {'Dis No': 'Disaster No.', - 'Disaster Type': 'Disaster type', - 'Disaster Subtype': 'Disaster subtype', - 'Event Name': 'Disaster name', - 'Country': 'Country', - 'ISO': 'ISO', - 'Location': 'Location', - 'Associated Dis': 'Associated disaster', - 'Associated Dis2': 'Associated disaster2', - 'Dis Mag Value': 'Magnitude value', - 'Dis Mag Scale': 'Magnitude scale', - 'Latitude': 'Latitude', - 'Longitude': 'Longitude', - 'Total Deaths': 'Total deaths', - 'Total Affected': 'Total affected', - "Insured Damages ('000 US$)": "Insured losses ('000 US$)", - "Total Damages ('000 US$)": "Total damage ('000 US$)"}, - 2020: {'Dis No': 'Dis No', - 'Year': 'Year', - 'Seq': 'Seq', - 'Disaster Group': 'Disaster Group', - 'Disaster Subgroup': 'Disaster Subgroup', - 'Disaster Type': 'Disaster Type', - 'Disaster Subtype': 'Disaster Subtype', - 'Disaster Subsubtype': 'Disaster Subsubtype', - 'Event Name': 'Event Name', - 'Entry Criteria': 'Entry Criteria', - 'Country': 'Country', - 'ISO': 'ISO', - 'Region': 'Region', - 'Continent': 'Continent', - 'Location': 'Location', - 'Origin': 'Origin', - 'Associated Dis': 'Associated Dis', - 'Associated Dis2': 'Associated Dis2', - 'OFDA Response': 'OFDA Response', - 'Appeal': 'Appeal', - 'Declaration': 'Declaration', - 'Aid Contribution': 'Aid Contribution', - 'Dis Mag Value': 'Dis Mag Value', - 'Dis Mag Scale': 'Dis Mag Scale', - 'Latitude': 'Latitude', - 'Longitude': 'Longitude', - 'Local Time': 'Local Time', - 'River Basin': 'River Basin', - 'Start Year': 'Start Year', - 'Start Month': 'Start Month', - 'Start Day': 'Start Day', - 'End Year': 'End Year', - 'End Month': 'End Month', - 'End Day': 'End Day', - 'Total Deaths': 'Total Deaths', - 'No Injured': 'No Injured', - 'No Affected': 'No Affected', - 'No Homeless': 'No Homeless', - 'Total Affected': 'Total Affected', - "Reconstruction Costs ('000 US$)": "Reconstruction Costs ('000 US$)", - "Insured Damages ('000 US$)": "Insured Damages ('000 US$)", - "Total Damages ('000 US$)": "Total Damages ('000 US$)", - 'CPI': 'CPI'}, - 2023: {'Dis No': 'Dis No', - 'Year': 'Year', - 'Seq': 'Seq', - 'Glide': 'Glide', - 'Disaster Group': 'Disaster Group', - 'Disaster Subgroup': 'Disaster Subgroup', - 'Disaster Type': 'Disaster Type', - 'Disaster Subtype': 'Disaster Subtype', - 'Disaster Subsubtype': 'Disaster Subsubtype', - 'Event Name': 'Event Name', - 'Country': 'Country', - 'ISO': 'ISO', - 'Region': 'Region', - 'Continent': 'Continent', - 'Location': 'Location', - 'Origin': 'Origin', - 'Associated Dis': 'Associated Dis', - 'Associated Dis2': 'Associated Dis2', - 'OFDA Response': 'OFDA Response', - 'Appeal': 'Appeal', - 'Declaration': 'Declaration', - "AID Contribution ('000 US$)": "AID Contribution ('000 US$)", - 'Dis Mag Value': 'Dis Mag Value', - 'Dis Mag Scale': 'Dis Mag Scale', - 'Latitude': 'Latitude', - 'Longitude': 'Longitude', - 'Local Time': 'Local Time', - 'River Basin': 'River Basin', - 'Start Year': 'Start Year', - 'Start Month': 'Start Month', - 'Start Day': 'Start Day', - 'End Year': 'End Year', - 'End Month': 'End Month', - 'End Day': 'End Day', - 'Total Deaths': 'Total Deaths', - 'No Injured': 'No Injured', - 'No Affected': 'No Affected', - 'No Homeless': 'No Homeless', - 'Total Affected': 'Total Affected', - "Reconstruction Costs ('000 US$)": "Reconstruction Costs ('000 US$)", - "Reconstruction Costs, Adjusted ('000 US$)": "Reconstruction Costs, Adjusted ('000 US$)", - "Insured Damages ('000 US$)": "Insured Damages ('000 US$)", - "Insured Damages, Adjusted ('000 US$)": "Insured Damages, Adjusted ('000 US$)", - "Total Damages ('000 US$)": "Total Damages ('000 US$)", - "Total Damages, Adjusted ('000 US$)": "Total Damages, Adjusted ('000 US$)", - 'CPI': 'CPI', - 'Adm Level': 'Adm Level', - 'Admin1 Code': 'Admin1 Code', - 'Admin2 Code': 'Admin2 Code', - 'Geo Locations': 'Geo Locations'}} - - -def assign_hazard_to_emdat(certainty_level, intensity_path_haz, names_path_haz, - reg_id_path_haz, date_path_haz, emdat_data, - start_time, end_time, keep_checks=False): +PERIL_SUBTYPE_MATCH_DICT = dict( + TC=["Tropical cyclone"], + FL=["Coastal flood"], + EQ=["Ground movement", "Earthquake"], + RF=["Riverine flood", "Flood"], + WS=["Extra-tropical storm", "Storm"], + DR=["Drought"], + LS=["Landslide"], + BF=["Forest fire", "Wildfire", "Land fire (Brush, Bush, Pastur"], +) + +PERIL_TYPE_MATCH_DICT = dict( + DR=["Drought"], + EQ=["Earthquake"], + FL=["Flood"], + LS=["Landslide"], + VQ=["Volcanic activity"], + BF=["Wildfire"], + HW=["Extreme temperature"], +) + +VARNAMES_EMDAT = { + 2018: { + "Dis No": "Disaster No.", + "Disaster Type": "Disaster type", + "Disaster Subtype": "Disaster subtype", + "Event Name": "Disaster name", + "Country": "Country", + "ISO": "ISO", + "Location": "Location", + "Associated Dis": "Associated disaster", + "Associated Dis2": "Associated disaster2", + "Dis Mag Value": "Magnitude value", + "Dis Mag Scale": "Magnitude scale", + "Latitude": "Latitude", + "Longitude": "Longitude", + "Total Deaths": "Total deaths", + "Total Affected": "Total affected", + "Insured Damages ('000 US$)": "Insured losses ('000 US$)", + "Total Damages ('000 US$)": "Total damage ('000 US$)", + }, + 2020: { + "Dis No": "Dis No", + "Year": "Year", + "Seq": "Seq", + "Disaster Group": "Disaster Group", + "Disaster Subgroup": "Disaster Subgroup", + "Disaster Type": "Disaster Type", + "Disaster Subtype": "Disaster Subtype", + "Disaster Subsubtype": "Disaster Subsubtype", + "Event Name": "Event Name", + "Entry Criteria": "Entry Criteria", + "Country": "Country", + "ISO": "ISO", + "Region": "Region", + "Continent": "Continent", + "Location": "Location", + "Origin": "Origin", + "Associated Dis": "Associated Dis", + "Associated Dis2": "Associated Dis2", + "OFDA Response": "OFDA Response", + "Appeal": "Appeal", + "Declaration": "Declaration", + "Aid Contribution": "Aid Contribution", + "Dis Mag Value": "Dis Mag Value", + "Dis Mag Scale": "Dis Mag Scale", + "Latitude": "Latitude", + "Longitude": "Longitude", + "Local Time": "Local Time", + "River Basin": "River Basin", + "Start Year": "Start Year", + "Start Month": "Start Month", + "Start Day": "Start Day", + "End Year": "End Year", + "End Month": "End Month", + "End Day": "End Day", + "Total Deaths": "Total Deaths", + "No Injured": "No Injured", + "No Affected": "No Affected", + "No Homeless": "No Homeless", + "Total Affected": "Total Affected", + "Reconstruction Costs ('000 US$)": "Reconstruction Costs ('000 US$)", + "Insured Damages ('000 US$)": "Insured Damages ('000 US$)", + "Total Damages ('000 US$)": "Total Damages ('000 US$)", + "CPI": "CPI", + }, + 2023: { + "Dis No": "Dis No", + "Year": "Year", + "Seq": "Seq", + "Glide": "Glide", + "Disaster Group": "Disaster Group", + "Disaster Subgroup": "Disaster Subgroup", + "Disaster Type": "Disaster Type", + "Disaster Subtype": "Disaster Subtype", + "Disaster Subsubtype": "Disaster Subsubtype", + "Event Name": "Event Name", + "Country": "Country", + "ISO": "ISO", + "Region": "Region", + "Continent": "Continent", + "Location": "Location", + "Origin": "Origin", + "Associated Dis": "Associated Dis", + "Associated Dis2": "Associated Dis2", + "OFDA Response": "OFDA Response", + "Appeal": "Appeal", + "Declaration": "Declaration", + "AID Contribution ('000 US$)": "AID Contribution ('000 US$)", + "Dis Mag Value": "Dis Mag Value", + "Dis Mag Scale": "Dis Mag Scale", + "Latitude": "Latitude", + "Longitude": "Longitude", + "Local Time": "Local Time", + "River Basin": "River Basin", + "Start Year": "Start Year", + "Start Month": "Start Month", + "Start Day": "Start Day", + "End Year": "End Year", + "End Month": "End Month", + "End Day": "End Day", + "Total Deaths": "Total Deaths", + "No Injured": "No Injured", + "No Affected": "No Affected", + "No Homeless": "No Homeless", + "Total Affected": "Total Affected", + "Reconstruction Costs ('000 US$)": "Reconstruction Costs ('000 US$)", + "Reconstruction Costs, Adjusted ('000 US$)": "Reconstruction Costs, Adjusted ('000 US$)", + "Insured Damages ('000 US$)": "Insured Damages ('000 US$)", + "Insured Damages, Adjusted ('000 US$)": "Insured Damages, Adjusted ('000 US$)", + "Total Damages ('000 US$)": "Total Damages ('000 US$)", + "Total Damages, Adjusted ('000 US$)": "Total Damages, Adjusted ('000 US$)", + "CPI": "CPI", + "Adm Level": "Adm Level", + "Admin1 Code": "Admin1 Code", + "Admin2 Code": "Admin2 Code", + "Geo Locations": "Geo Locations", + }, +} + + +def assign_hazard_to_emdat( + certainty_level, + intensity_path_haz, + names_path_haz, + reg_id_path_haz, + date_path_haz, + emdat_data, + start_time, + end_time, + keep_checks=False, +): """assign_hazard_to_emdat: link EMdat event to hazard Parameters @@ -196,19 +215,24 @@ def assign_hazard_to_emdat(certainty_level, intensity_path_haz, names_path_haz, pd.dataframe with EMdat entries linked to a hazard """ # check valid certainty level - certainty_levels = ['high', 'low'] + certainty_levels = ["high", "low"] if certainty_level not in certainty_levels: - raise ValueError("Invalid certainty level. Expected one of: %s" % certainty_levels) + raise ValueError( + "Invalid certainty level. Expected one of: %s" % certainty_levels + ) # prepare hazard set print("Start preparing hazard set") - hit_countries = hit_country_per_hazard(intensity_path_haz, names_path_haz, - reg_id_path_haz, date_path_haz) + hit_countries = hit_country_per_hazard( + intensity_path_haz, names_path_haz, reg_id_path_haz, date_path_haz + ) # prepare damage set # adjust emdat_data to the path!! print("Start preparing damage set") - lookup = create_lookup(emdat_data, start_time, end_time, disaster_subtype='Tropical cyclone') + lookup = create_lookup( + emdat_data, start_time, end_time, disaster_subtype="Tropical cyclone" + ) # calculate possible hits print("Calculate possible hits") hit5 = emdat_possible_hit(lookup=lookup, hit_countries=hit_countries, delta_t=5) @@ -229,49 +253,120 @@ def assign_hazard_to_emdat(certainty_level, intensity_path_haz, names_path_haz, # assign only tracks with high certainty print("Assign tracks") - if certainty_level == 'high': - lookup = assign_track_to_em(lookup=lookup, possible_tracks_1=hit10_match, - possible_tracks_2=hit50_match, level=1) - lookup = assign_track_to_em(lookup=lookup, possible_tracks_1=hit15_match, - possible_tracks_2=hit50_match, level=2) - lookup = assign_track_to_em(lookup=lookup, possible_tracks_1=hit25_match, - possible_tracks_2=hit50_match, level=3) - lookup = assign_track_to_em(lookup=lookup, possible_tracks_1=hit10_match, - possible_tracks_2=hit25_match, level=4) - lookup = assign_track_to_em(lookup=lookup, possible_tracks_1=hit15_match, - possible_tracks_2=hit25_match, level=5) + if certainty_level == "high": + lookup = assign_track_to_em( + lookup=lookup, + possible_tracks_1=hit10_match, + possible_tracks_2=hit50_match, + level=1, + ) + lookup = assign_track_to_em( + lookup=lookup, + possible_tracks_1=hit15_match, + possible_tracks_2=hit50_match, + level=2, + ) + lookup = assign_track_to_em( + lookup=lookup, + possible_tracks_1=hit25_match, + possible_tracks_2=hit50_match, + level=3, + ) + lookup = assign_track_to_em( + lookup=lookup, + possible_tracks_1=hit10_match, + possible_tracks_2=hit25_match, + level=4, + ) + lookup = assign_track_to_em( + lookup=lookup, + possible_tracks_1=hit15_match, + possible_tracks_2=hit25_match, + level=5, + ) # assign all tracks - elif certainty_level == 'low': - lookup = assign_track_to_em(lookup=lookup, possible_tracks_1=hit5_match, - possible_tracks_2=hit50_match, level=1) - lookup = assign_track_to_em(lookup=lookup, possible_tracks_1=hit10_match, - possible_tracks_2=hit50_match, level=2) - lookup = assign_track_to_em(lookup=lookup, possible_tracks_1=hit15_match, - possible_tracks_2=hit50_match, level=3) - lookup = assign_track_to_em(lookup=lookup, possible_tracks_1=hit5_match, - possible_tracks_2=hit25_match, level=4) - lookup = assign_track_to_em(lookup=lookup, possible_tracks_1=hit10_match, - possible_tracks_2=hit25_match, level=5) - lookup = assign_track_to_em(lookup=lookup, possible_tracks_1=hit15_match, - possible_tracks_2=hit25_match, level=6) - lookup = assign_track_to_em(lookup=lookup, possible_tracks_1=hit5_match, - possible_tracks_2=hit15_match, level=7) - lookup = assign_track_to_em(lookup=lookup, possible_tracks_1=hit10_match, - possible_tracks_2=hit15_match, level=8) - lookup = assign_track_to_em(lookup=lookup, possible_tracks_1=hit5_match, - possible_tracks_2=hit10_match, level=9) - lookup = assign_track_to_em(lookup=lookup, possible_tracks_1=hit15_match, - possible_tracks_2=hit15_match, level=10) - lookup = assign_track_to_em(lookup=lookup, possible_tracks_1=hit10_match, - possible_tracks_2=hit10_match, level=11) - lookup = assign_track_to_em(lookup=lookup, possible_tracks_1=hit5_match, - possible_tracks_2=hit5_match, level=12) + elif certainty_level == "low": + lookup = assign_track_to_em( + lookup=lookup, + possible_tracks_1=hit5_match, + possible_tracks_2=hit50_match, + level=1, + ) + lookup = assign_track_to_em( + lookup=lookup, + possible_tracks_1=hit10_match, + possible_tracks_2=hit50_match, + level=2, + ) + lookup = assign_track_to_em( + lookup=lookup, + possible_tracks_1=hit15_match, + possible_tracks_2=hit50_match, + level=3, + ) + lookup = assign_track_to_em( + lookup=lookup, + possible_tracks_1=hit5_match, + possible_tracks_2=hit25_match, + level=4, + ) + lookup = assign_track_to_em( + lookup=lookup, + possible_tracks_1=hit10_match, + possible_tracks_2=hit25_match, + level=5, + ) + lookup = assign_track_to_em( + lookup=lookup, + possible_tracks_1=hit15_match, + possible_tracks_2=hit25_match, + level=6, + ) + lookup = assign_track_to_em( + lookup=lookup, + possible_tracks_1=hit5_match, + possible_tracks_2=hit15_match, + level=7, + ) + lookup = assign_track_to_em( + lookup=lookup, + possible_tracks_1=hit10_match, + possible_tracks_2=hit15_match, + level=8, + ) + lookup = assign_track_to_em( + lookup=lookup, + possible_tracks_1=hit5_match, + possible_tracks_2=hit10_match, + level=9, + ) + lookup = assign_track_to_em( + lookup=lookup, + possible_tracks_1=hit15_match, + possible_tracks_2=hit15_match, + level=10, + ) + lookup = assign_track_to_em( + lookup=lookup, + possible_tracks_1=hit10_match, + possible_tracks_2=hit10_match, + level=11, + ) + lookup = assign_track_to_em( + lookup=lookup, + possible_tracks_1=hit5_match, + possible_tracks_2=hit5_match, + level=12, + ) if not keep_checks: - lookup = lookup.drop(['Date_start_EM_ordinal', 'possible_track', - 'possible_track_all'], axis=1) - lookup.groupby('allocation_level').count() - print('(%d/%s) tracks allocated' % ( - len(lookup[lookup.allocation_level.notnull()]), len(lookup))) + lookup = lookup.drop( + ["Date_start_EM_ordinal", "possible_track", "possible_track_all"], axis=1 + ) + lookup.groupby("allocation_level").count() + print( + "(%d/%s) tracks allocated" + % (len(lookup[lookup.allocation_level.notnull()]), len(lookup)) + ) return lookup @@ -294,19 +389,19 @@ def hit_country_per_hazard(intensity_path, names_path, reg_id_path, date_path): ------- pd.DataFrame with all hit countries per hazard """ - with open(intensity_path, 'rb') as filef: + with open(intensity_path, "rb") as filef: inten = pickle.load(filef) - with open(names_path, 'rb') as filef: + with open(names_path, "rb") as filef: names = pickle.load(filef) - with open(reg_id_path, 'rb') as filef: + with open(reg_id_path, "rb") as filef: reg_id = pickle.load(filef) - with open(date_path, 'rb') as filef: + with open(date_path, "rb") as filef: date = pickle.load(filef) # loop over the tracks (over the rows of the intensity matrix) all_hits = [] for track in range(0, len(names)): # select track - tc_track = inten[track, ] + tc_track = inten[track,] # select only indices that are not zero hits = tc_track.nonzero()[1] # get the country of these indices and remove dublicates @@ -315,7 +410,7 @@ def hit_country_per_hazard(intensity_path, names_path, reg_id_path, date_path): all_hits.append(hits) # create data frame for output - hit_countries = pd.DataFrame(columns=['hit_country', 'Date_start', 'ibtracsID']) + hit_countries = pd.DataFrame(columns=["hit_country", "Date_start", "ibtracsID"]) for track, _ in enumerate(names): # Check if track has hit any country else go to the next track if len(all_hits[track]) > 0: @@ -324,54 +419,68 @@ def hit_country_per_hazard(intensity_path, names_path, reg_id_path, date_path): # Hit country ISO ctry_iso = u_coord.country_to_iso(all_hits[track][hit], "alpha3") # create entry for each country a hazard has hit - hit_countries = hit_countries.append({'hit_country': ctry_iso, - 'Date_start': date[track], - 'ibtracsID': names[track]}, - ignore_index=True) + hit_countries = hit_countries.append( + { + "hit_country": ctry_iso, + "Date_start": date[track], + "ibtracsID": names[track], + }, + ignore_index=True, + ) # retrun data frame with all hit countries per hazard return hit_countries -def create_lookup(emdat_data, start, end, disaster_subtype='Tropical cyclone'): +def create_lookup(emdat_data, start, end, disaster_subtype="Tropical cyclone"): """create_lookup: prepare a lookup table of EMdat events to which hazards can be assigned - Parameters - ---------- - emdat_data: pd.DataFrame - with EMdat data - start : str - start date of events to be assigned 'yyyy-mm-dd' - end : str - end date of events to be assigned 'yyyy-mm-dd' - disaster_subtype : str - EMdat disaster subtype - - Returns - ------- - pd.DataFrame - """ - data = emdat_data[emdat_data['Disaster_subtype'] == disaster_subtype] - lookup = pd.DataFrame(columns=['hit_country', 'Date_start_EM', - 'Date_start_EM_ordinal', 'Disaster_name', - 'EM_ID', 'ibtracsID', 'allocation_level', - 'possible_track', 'possible_track_all']) - lookup['hit_country'] = data['ISO'] - lookup['Date_start_EM'] = data['Date_start_clean'] - lookup['Disaster_name'] = data['Disaster_name'] - lookup['EM_ID'] = data['Disaster_No'] + Parameters + ---------- + emdat_data: pd.DataFrame + with EMdat data + start : str + start date of events to be assigned 'yyyy-mm-dd' + end : str + end date of events to be assigned 'yyyy-mm-dd' + disaster_subtype : str + EMdat disaster subtype + + Returns + ------- + pd.DataFrame + """ + data = emdat_data[emdat_data["Disaster_subtype"] == disaster_subtype] + lookup = pd.DataFrame( + columns=[ + "hit_country", + "Date_start_EM", + "Date_start_EM_ordinal", + "Disaster_name", + "EM_ID", + "ibtracsID", + "allocation_level", + "possible_track", + "possible_track_all", + ] + ) + lookup["hit_country"] = data["ISO"] + lookup["Date_start_EM"] = data["Date_start_clean"] + lookup["Disaster_name"] = data["Disaster_name"] + lookup["EM_ID"] = data["Disaster_No"] lookup = lookup.reset_index(drop=True) # create ordinals - for i in range(0, len(data['Date_start_clean'].values)): - lookup['Date_start_EM_ordinal'][i] = datetime.toordinal( - datetime.strptime(lookup['Date_start_EM'].values[i], '%Y-%m-%d')) + for i in range(0, len(data["Date_start_clean"].values)): + lookup["Date_start_EM_ordinal"][i] = datetime.toordinal( + datetime.strptime(lookup["Date_start_EM"].values[i], "%Y-%m-%d") + ) # ordinals to numeric - lookup['Date_start_EM_ordinal'] = pd.to_numeric(lookup['Date_start_EM_ordinal']) + lookup["Date_start_EM_ordinal"] = pd.to_numeric(lookup["Date_start_EM_ordinal"]) # select time - emdat_start = datetime.toordinal(datetime.strptime(start, '%Y-%m-%d')) - emdat_end = datetime.toordinal(datetime.strptime(end, '%Y-%m-%d')) + emdat_start = datetime.toordinal(datetime.strptime(start, "%Y-%m-%d")) + emdat_end = datetime.toordinal(datetime.strptime(end, "%Y-%m-%d")) - lookup = lookup[lookup['Date_start_EM_ordinal'].values > emdat_start] - lookup = lookup[lookup['Date_start_EM_ordinal'].values < emdat_end] + lookup = lookup[lookup["Date_start_EM_ordinal"].values > emdat_start] + lookup = lookup[lookup["Date_start_EM_ordinal"].values < emdat_end] return lookup @@ -397,16 +506,20 @@ def emdat_possible_hit(lookup, hit_countries, delta_t): # tracks: processed IBtracks with info which track hit which country # delta_t: time difference of start of EMdat and IBrtacks possible_hit_all = [] - for i in range(0, len(lookup['EM_ID'].values)): + for i in range(0, len(lookup["EM_ID"].values)): possible_hit = [] country_tracks = hit_countries[ - hit_countries['hit_country'] == lookup['hit_country'].values[i]] - for j in range(0, len(country_tracks['Date_start'].values)): - if (lookup['Date_start_EM_ordinal'].values[i] - - country_tracks['Date_start'].values[j]) < \ - delta_t and (lookup['Date_start_EM_ordinal'].values[i] - - country_tracks['Date_start'].values[j]) >= 0: - possible_hit.append(country_tracks['ibtracsID'].values[j]) + hit_countries["hit_country"] == lookup["hit_country"].values[i] + ] + for j in range(0, len(country_tracks["Date_start"].values)): + if ( + lookup["Date_start_EM_ordinal"].values[i] + - country_tracks["Date_start"].values[j] + ) < delta_t and ( + lookup["Date_start_EM_ordinal"].values[i] + - country_tracks["Date_start"].values[j] + ) >= 0: + possible_hit.append(country_tracks["ibtracsID"].values[j]) possible_hit_all.append(possible_hit) return possible_hit_all @@ -416,27 +529,27 @@ def emdat_possible_hit(lookup, hit_countries, delta_t): def match_em_id(lookup, poss_hit): """function to check if EM_ID has been assigned already and combine possible hits - Parameters - ---------- - lookup : pd.dataframe - to relate EMdatID to hazard - poss_hit : list - with possible hits - - Returns - ------- - list - with all possible hits per EMdat ID - """ + Parameters + ---------- + lookup : pd.dataframe + to relate EMdatID to hazard + poss_hit : list + with possible hits + + Returns + ------- + list + with all possible hits per EMdat ID + """ possible_hit_all = [] - for i in range(0, len(lookup['EM_ID'].values)): + for i in range(0, len(lookup["EM_ID"].values)): possible_hit = [] # lookup without line i - #lookup_match = lookup.drop(i) + # lookup_match = lookup.drop(i) lookup_match = lookup # Loop over check if EM dat ID is the same - for i_match in range(0, len(lookup_match['EM_ID'].values)): - if lookup['EM_ID'].values[i] == lookup_match['EM_ID'].values[i_match]: + for i_match in range(0, len(lookup_match["EM_ID"].values)): + if lookup["EM_ID"].values[i] == lookup_match["EM_ID"].values[i_match]: possible_hit.append(poss_hit[i]) possible_hit_all.append(possible_hit) return possible_hit_all @@ -444,86 +557,102 @@ def match_em_id(lookup, poss_hit): def assign_track_to_em(lookup, possible_tracks_1, possible_tracks_2, level): """function to assign a hazard to an EMdat event - to get some confidene into the procedure, hazards get only assigned - if there is no other hazard occuring at a bigger time interval in that country - Thus a track of possible_tracks_1 gets only assigned if there are no other - tracks in possible_tracks_2. - The confidence can be expressed with a certainty level - - Parameters - ---------- - lookup : pd.DataFrame - to relate EMdatID to hazard - possible_tracks_1 : list - list of possible hits with smaller time horizon - possible_tracks_2 : list - list of possible hits with larger time horizon - level : int - level of confidence - - Returns - ------- - pd.DataFrame - lookup with assigend tracks and possible hits + to get some confidene into the procedure, hazards get only assigned + if there is no other hazard occuring at a bigger time interval in that country + Thus a track of possible_tracks_1 gets only assigned if there are no other + tracks in possible_tracks_2. + The confidence can be expressed with a certainty level + + Parameters + ---------- + lookup : pd.DataFrame + to relate EMdatID to hazard + possible_tracks_1 : list + list of possible hits with smaller time horizon + possible_tracks_2 : list + list of possible hits with larger time horizon + level : int + level of confidence + + Returns + ------- + pd.DataFrame + lookup with assigend tracks and possible hits """ for i, _ in enumerate(possible_tracks_1): - if np.isnan(lookup['allocation_level'].values[i]): + if np.isnan(lookup["allocation_level"].values[i]): number_emdat_id = len(possible_tracks_1[i]) # print(number_emdat_id) for j in range(0, number_emdat_id): # check that number of possible track stays the same at given # time difference and that list is not empty - if len(possible_tracks_1[i][j]) == len(possible_tracks_2[i][j]) == 1 \ - and possible_tracks_1[i][j] != []: + if ( + len(possible_tracks_1[i][j]) == len(possible_tracks_2[i][j]) == 1 + and possible_tracks_1[i][j] != [] + ): # check that all tracks are the same - if all(possible_tracks_1[i][0] == possible_tracks_1[i][k] - for k in range(0, len(possible_tracks_1[i]))): + if all( + possible_tracks_1[i][0] == possible_tracks_1[i][k] + for k in range(0, len(possible_tracks_1[i])) + ): # check that track ID has not been assigned to that country already - ctry_lookup = lookup[lookup['hit_country'] - == lookup['hit_country'].values[i]] - if possible_tracks_1[i][0][0] not in ctry_lookup['ibtracsID'].values: - lookup['ibtracsID'].values[i] = possible_tracks_1[i][0][0] - lookup['allocation_level'].values[i] = level + ctry_lookup = lookup[ + lookup["hit_country"] == lookup["hit_country"].values[i] + ] + if ( + possible_tracks_1[i][0][0] + not in ctry_lookup["ibtracsID"].values + ): + lookup["ibtracsID"].values[i] = possible_tracks_1[i][0][0] + lookup["allocation_level"].values[i] = level elif possible_tracks_1[i][j] != []: - lookup['possible_track'].values[i] = possible_tracks_1[i] + lookup["possible_track"].values[i] = possible_tracks_1[i] else: - lookup['possible_track_all'].values[i] = possible_tracks_1[i] + lookup["possible_track_all"].values[i] = possible_tracks_1[i] return lookup def check_assigned_track(lookup, checkset): """compare lookup with assigned tracks to a set with checked sets - Parameters - ---------- - lookup: pd.DataFrame - dataframe to relate EMdatID to hazard - checkset: pd.DataFrame - dataframe with already checked hazards + Parameters + ---------- + lookup: pd.DataFrame + dataframe to relate EMdatID to hazard + checkset: pd.DataFrame + dataframe with already checked hazards - Returns - ------- - error scores + Returns + ------- + error scores """ # merge checkset and lookup - check = pd.merge(checkset, lookup[['hit_country', 'EM_ID', 'ibtracsID']], - on=['hit_country', 'EM_ID']) - check_size = len(check['ibtracsID'].values) + check = pd.merge( + checkset, + lookup[["hit_country", "EM_ID", "ibtracsID"]], + on=["hit_country", "EM_ID"], + ) + check_size = len(check["ibtracsID"].values) # not assigned values] - not_assigned = check['ibtracsID'].isnull().sum(axis=0) + not_assigned = check["ibtracsID"].isnull().sum(axis=0) # correct assigned values - correct = sum(check['ibtracsID'].values == check['IBtracsID_checked'].values) + correct = sum(check["ibtracsID"].values == check["IBtracsID_checked"].values) # wrongly assigned values - wrong = len(check['ibtracsID'].values) - not_assigned - correct - print('%.1f%% tracks assigned correctly, %.1f%% wrongly, %.1f%% not assigned' - % (correct / check_size * 100, - wrong / check_size * 100, - not_assigned / check_size * 100)) - - -def clean_emdat_df(emdat_file, countries=None, hazard=None, year_range=None, - target_version=None): + wrong = len(check["ibtracsID"].values) - not_assigned - correct + print( + "%.1f%% tracks assigned correctly, %.1f%% wrongly, %.1f%% not assigned" + % ( + correct / check_size * 100, + wrong / check_size * 100, + not_assigned / check_size * 100, + ) + ) + + +def clean_emdat_df( + emdat_file, countries=None, hazard=None, year_range=None, target_version=None +): """ Get a clean and standardized DataFrame from EM-DAT-CSV-file (1) load EM-DAT data from CSV to DataFrame and remove header/footer, @@ -563,7 +692,7 @@ def clean_emdat_df(emdat_file, countries=None, hazard=None, year_range=None, if isinstance(emdat_file, (str, Path)): df_emdat = pd.read_csv(emdat_file, encoding="ISO-8859-1", header=0) counter = 0 - while not ('Country' in df_emdat.columns and 'ISO' in df_emdat.columns): + while not ("Country" in df_emdat.columns and "ISO" in df_emdat.columns): counter += 1 df_emdat = pd.read_csv(emdat_file, encoding="ISO-8859-1", header=counter) if counter == 10: @@ -572,7 +701,7 @@ def clean_emdat_df(emdat_file, countries=None, hazard=None, year_range=None, elif isinstance(emdat_file, pd.DataFrame): df_emdat = emdat_file else: - raise TypeError('emdat_file needs to be str or DataFrame') + raise TypeError("emdat_file needs to be str or DataFrame") # drop rows with 9 or more NaN values (e.g. footer): df_emdat = df_emdat.dropna(thresh=9) @@ -580,26 +709,32 @@ def clean_emdat_df(emdat_file, countries=None, hazard=None, year_range=None, # (2.1) identify underlying EMDAT version of csv: version = None for vers in sorted(VARNAMES_EMDAT.keys()): - if len(df_emdat.columns) >= len(VARNAMES_EMDAT[vers]) and \ - all(item in list(df_emdat.columns) for item in VARNAMES_EMDAT[vers].values()): + if len(df_emdat.columns) >= len(VARNAMES_EMDAT[vers]) and all( + item in list(df_emdat.columns) for item in VARNAMES_EMDAT[vers].values() + ): version = vers if not version: - raise ValueError("the given emdat_file contains unexpected columns and cannot be" - " associated with any known EM-DAT file structure") + raise ValueError( + "the given emdat_file contains unexpected columns and cannot be" + " associated with any known EM-DAT file structure" + ) # (2.2) create new DataFrame df_data with column names as target version target_version = target_version or version - df_data = pd.DataFrame(index=df_emdat.index.values, - columns=VARNAMES_EMDAT[target_version].values()) - if 'Year' not in df_data.columns: # make sure column "Year" exists - df_data['Year'] = np.nan + df_data = pd.DataFrame( + index=df_emdat.index.values, columns=VARNAMES_EMDAT[target_version].values() + ) + if "Year" not in df_data.columns: # make sure column "Year" exists + df_data["Year"] = np.nan for _, col in enumerate(df_data.columns): # loop over columns if col in VARNAMES_EMDAT[version]: df_data[col] = df_emdat[VARNAMES_EMDAT[version][col]] elif col in df_emdat.columns: df_data[col] = df_emdat[col] - elif col == 'Year' and version <= 2018: + elif col == "Year" and version <= 2018: years_list = list() - for _, disaster_no in enumerate(df_emdat[VARNAMES_EMDAT[version]['Dis No']]): + for _, disaster_no in enumerate( + df_emdat[VARNAMES_EMDAT[version]["Dis No"]] + ): if isinstance(disaster_no, str): years_list.append(int(disaster_no[0:4])) else: @@ -613,33 +748,33 @@ def clean_emdat_df(emdat_file, countries=None, hazard=None, year_range=None, year_list = list() month_list = list() day_list = list() - for year in list(df_data['Year']): + for year in list(df_data["Year"]): if not np.isnan(year): - date_list.append(datetime.strptime(str(year), '%Y')) + date_list.append(datetime.strptime(str(year), "%Y")) else: - date_list.append(datetime.strptime(str('0001'), '%Y')) + date_list.append(datetime.strptime(str("0001"), "%Y")) boolean_warning = True - for idx, datestr in enumerate(list(df_emdat['Start date'])): + for idx, datestr in enumerate(list(df_emdat["Start date"])): try: - date_list[idx] = datetime.strptime(datestr[-7:], '%m/%Y') + date_list[idx] = datetime.strptime(datestr[-7:], "%m/%Y") except ValueError: if boolean_warning: - LOGGER.warning('EM_DAT CSV contains invalid time formats') + LOGGER.warning("EM_DAT CSV contains invalid time formats") boolean_warning = False try: - date_list[idx] = datetime.strptime(datestr, '%d/%m/%Y') + date_list[idx] = datetime.strptime(datestr, "%d/%m/%Y") except ValueError: if boolean_warning: - LOGGER.warning('EM_DAT CSV contains invalid time formats') + LOGGER.warning("EM_DAT CSV contains invalid time formats") boolean_warning = False day_list.append(date_list[idx].day) month_list.append(date_list[idx].month) year_list.append(date_list[idx].year) - df_data['Start Month'] = np.array(month_list, dtype='int') - df_data['Start Day'] = np.array(day_list, dtype='int') - df_data['Start Year'] = np.array(year_list, dtype='int') - for var in ['Disaster Subtype', 'Disaster Type', 'Country']: - df_data[VARNAMES_EMDAT[target_version][var]].fillna('None', inplace=True) + df_data["Start Month"] = np.array(month_list, dtype="int") + df_data["Start Day"] = np.array(day_list, dtype="int") + df_data["Start Year"] = np.array(year_list, dtype="int") + for var in ["Disaster Subtype", "Disaster Type", "Country"]: + df_data[VARNAMES_EMDAT[target_version][var]].fillna("None", inplace=True) # (3) Filter by countries, year range, and disaster type # (3.1) Countries: @@ -649,15 +784,17 @@ def clean_emdat_df(emdat_file, countries=None, hazard=None, year_range=None, for idx, country in enumerate(countries): # convert countries to iso3 alpha code: countries[idx] = u_coord.country_to_iso(country, "alpha3") - df_data = df_data[df_data['ISO'].isin(countries)].reset_index(drop=True) + df_data = df_data[df_data["ISO"].isin(countries)].reset_index(drop=True) # (3.2) Year range: if year_range: for idx in df_data.index: - if np.isnan(df_data.loc[0, 'Year']): - df_data.loc[0, 'Year'] = \ - df_data.loc[0, VARNAMES_EMDAT[target_version]['Start Year']] - df_data = df_data[(df_data['Year'] >= min(year_range)) & - (df_data['Year'] <= max(year_range))] + if np.isnan(df_data.loc[0, "Year"]): + df_data.loc[0, "Year"] = df_data.loc[ + 0, VARNAMES_EMDAT[target_version]["Start Year"] + ] + df_data = df_data[ + (df_data["Year"] >= min(year_range)) & (df_data["Year"] <= max(year_range)) + ] # (3.3) Disaster type: if hazard and isinstance(hazard, str): @@ -666,17 +803,29 @@ def clean_emdat_df(emdat_file, countries=None, hazard=None, year_range=None, disaster_types = list() disaster_subtypes = list() for idx, haz in enumerate(hazard): - if haz in df_data[VARNAMES_EMDAT[target_version]['Disaster Type']].unique(): + if haz in df_data[VARNAMES_EMDAT[target_version]["Disaster Type"]].unique(): disaster_types.append(haz) - if haz in df_data[VARNAMES_EMDAT[target_version]['Disaster Subtype']].unique(): + if ( + haz + in df_data[VARNAMES_EMDAT[target_version]["Disaster Subtype"]].unique() + ): disaster_subtypes.append(haz) if haz in PERIL_TYPE_MATCH_DICT.keys(): disaster_types += PERIL_TYPE_MATCH_DICT[haz] if haz in PERIL_SUBTYPE_MATCH_DICT.keys(): disaster_subtypes += PERIL_SUBTYPE_MATCH_DICT[haz] df_data = df_data[ - (df_data[VARNAMES_EMDAT[target_version]['Disaster Type']].isin(disaster_types)) | - (df_data[VARNAMES_EMDAT[target_version]['Disaster Subtype']].isin(disaster_subtypes))] + ( + df_data[VARNAMES_EMDAT[target_version]["Disaster Type"]].isin( + disaster_types + ) + ) + | ( + df_data[VARNAMES_EMDAT[target_version]["Disaster Subtype"]].isin( + disaster_subtypes + ) + ) + ] return df_data.reset_index(drop=True) @@ -709,13 +858,13 @@ def emdat_countries_by_hazard(emdat_file_csv, hazard=None, year_range=None): List of names of countries impacted by the disaster (sub-)types """ df_data = clean_emdat_df(emdat_file_csv, hazard=hazard, year_range=year_range) - countries_iso3a = list(df_data['ISO'].unique()) + countries_iso3a = list(df_data["ISO"].unique()) countries_names = list() for iso3a in countries_iso3a: try: countries_names.append(u_coord.country_to_iso(iso3a, "name")) except LookupError: - countries_names.append('NA') + countries_names.append("NA") return countries_iso3a, countries_names @@ -753,17 +902,26 @@ def scale_impact2refyear(impact_values, year_values, iso3a_values, reference_yea gdp_years[country][year] = gdp(country, year)[1] # loop through each value and apply scaling: for idx, val in enumerate(impact_values): - impact_values[idx] = val * gdp_ref[iso3a_values[idx]] / \ - gdp_years[iso3a_values[idx]][year_values[idx]] + impact_values[idx] = ( + val + * gdp_ref[iso3a_values[idx]] + / gdp_years[iso3a_values[idx]][year_values[idx]] + ) return list(impact_values) if not reference_year: return impact_values - raise ValueError('Invalid reference_year') - - -def emdat_impact_yearlysum(emdat_file_csv, countries=None, hazard=None, year_range=None, - reference_year=None, imp_str="Total Damages ('000 US$)", - version=None): + raise ValueError("Invalid reference_year") + + +def emdat_impact_yearlysum( + emdat_file_csv, + countries=None, + hazard=None, + year_range=None, + reference_year=None, + imp_str="Total Damages ('000 US$)", + version=None, +): """function to load EM-DAT data and sum impact per year Parameters @@ -798,38 +956,45 @@ def emdat_impact_yearlysum(emdat_file_csv, countries=None, hazard=None, year_ran """ version = version or max(VARNAMES_EMDAT.keys()) imp_str = VARNAMES_EMDAT[version][imp_str] - df_data = clean_emdat_df(emdat_file_csv, countries=countries, hazard=hazard, - year_range=year_range, target_version=version) - - df_data[imp_str + " scaled"] = scale_impact2refyear(df_data[imp_str].values, - df_data['Year'].values, - df_data['ISO'].values, - reference_year=reference_year) + df_data = clean_emdat_df( + emdat_file_csv, + countries=countries, + hazard=hazard, + year_range=year_range, + target_version=version, + ) + + df_data[imp_str + " scaled"] = scale_impact2refyear( + df_data[imp_str].values, + df_data["Year"].values, + df_data["ISO"].values, + reference_year=reference_year, + ) def country_df(df_data): - for data_iso in df_data['ISO'].unique(): + for data_iso in df_data["ISO"].unique(): country = u_coord.country_to_iso(data_iso, "alpha3") - df_country = df_data.loc[df_data['ISO'] == country] + df_country = df_data.loc[df_data["ISO"] == country] if not df_country.size: continue # Retrieve impact data for all years - all_years = np.arange(min(df_data['Year']), max(df_data['Year']) + 1) + all_years = np.arange(min(df_data["Year"]), max(df_data["Year"]) + 1) data_out = pd.DataFrame.from_records( [ ( year, - np.nansum(df_country[df_country['Year'].isin([year])][imp_str]), + np.nansum(df_country[df_country["Year"].isin([year])][imp_str]), np.nansum( - df_country[df_country['Year'].isin([year])][ + df_country[df_country["Year"].isin([year])][ imp_str + " scaled" ] ), ) for year in all_years ], - columns=["year", "impact", "impact_scaled"] + columns=["year", "impact", "impact_scaled"], ) # Add static data @@ -848,9 +1013,15 @@ def country_df(df_data): return out -def emdat_impact_event(emdat_file_csv, countries=None, hazard=None, year_range=None, - reference_year=None, imp_str="Total Damages ('000 US$)", - version=None): +def emdat_impact_event( + emdat_file_csv, + countries=None, + hazard=None, + year_range=None, + reference_year=None, + imp_str="Total Damages ('000 US$)", + version=None, +): """function to load EM-DAT data return impact per event Parameters @@ -892,29 +1063,45 @@ def emdat_impact_event(emdat_file_csv, countries=None, hazard=None, year_range=N """ version = version or max(VARNAMES_EMDAT.keys()) imp_str = VARNAMES_EMDAT[version][imp_str] - df_data = clean_emdat_df(emdat_file_csv, hazard=hazard, year_range=year_range, - countries=countries, target_version=version) - df_data['year'] = df_data['Year'] - df_data['reference_year'] = reference_year - df_data['impact'] = df_data[imp_str] - df_data['impact_scaled'] = scale_impact2refyear(df_data[imp_str].values, df_data['Year'].values, - df_data['ISO'].values, - reference_year=reference_year) - df_data['region_id'] = np.nan - for country in df_data['ISO'].unique(): + df_data = clean_emdat_df( + emdat_file_csv, + hazard=hazard, + year_range=year_range, + countries=countries, + target_version=version, + ) + df_data["year"] = df_data["Year"] + df_data["reference_year"] = reference_year + df_data["impact"] = df_data[imp_str] + df_data["impact_scaled"] = scale_impact2refyear( + df_data[imp_str].values, + df_data["Year"].values, + df_data["ISO"].values, + reference_year=reference_year, + ) + df_data["region_id"] = np.nan + for country in df_data["ISO"].unique(): try: - df_data.loc[df_data['ISO'] == country, 'region_id'] = \ + df_data.loc[df_data["ISO"] == country, "region_id"] = ( u_coord.country_to_iso(country, "numeric") + ) except LookupError: - LOGGER.warning('ISO3alpha code not found in iso_country: %s', country) - if '000 US' in imp_str: - df_data['impact'] *= 1e3 - df_data['impact_scaled'] *= 1e3 + LOGGER.warning("ISO3alpha code not found in iso_country: %s", country) + if "000 US" in imp_str: + df_data["impact"] *= 1e3 + df_data["impact_scaled"] *= 1e3 return df_data.reset_index(drop=True) -def emdat_to_impact(emdat_file_csv, hazard_type_climada, year_range=None, countries=None, - hazard_type_emdat=None, reference_year=None, imp_str="Total Damages"): +def emdat_to_impact( + emdat_file_csv, + hazard_type_climada, + year_range=None, + countries=None, + hazard_type_emdat=None, + reference_year=None, + imp_str="Total Damages", +): """function to load EM-DAT data return impact per event Parameters @@ -975,30 +1162,40 @@ def emdat_to_impact(emdat_file_csv, hazard_type_climada, year_range=None, countr impact_instance = Impact(haz_type=hazard_type_climada) # Load EM-DAT impact data by event: - em_data = emdat_impact_event(emdat_file_csv, countries=countries, hazard=hazard_type_emdat, - year_range=year_range, reference_year=reference_year, - imp_str=imp_str, version=version) + em_data = emdat_impact_event( + emdat_file_csv, + countries=countries, + hazard=hazard_type_emdat, + year_range=year_range, + reference_year=reference_year, + imp_str=imp_str, + version=version, + ) if isinstance(countries, str): countries = [countries] elif not countries: - countries = emdat_countries_by_hazard(emdat_file_csv, year_range=year_range, - hazard=hazard_type_emdat)[0] + countries = emdat_countries_by_hazard( + emdat_file_csv, year_range=year_range, hazard=hazard_type_emdat + )[0] if em_data.empty: return impact_instance, countries impact_instance.event_id = np.array(em_data.index, int) - impact_instance.event_name = list( - em_data[VARNAMES_EMDAT[version]['Dis No']]) + impact_instance.event_name = list(em_data[VARNAMES_EMDAT[version]["Dis No"]]) date_list = list() - for year in list(em_data['Year']): - date_list.append(datetime.toordinal(datetime.strptime(str(year), '%Y'))) - if 'Start Year' in em_data.columns and 'Start Month' in em_data.columns \ - and 'Start Day' in em_data.columns: + for year in list(em_data["Year"]): + date_list.append(datetime.toordinal(datetime.strptime(str(year), "%Y"))) + if ( + "Start Year" in em_data.columns + and "Start Month" in em_data.columns + and "Start Day" in em_data.columns + ): idx = 0 - for year, month, day in zip(em_data['Start Year'], em_data['Start Month'], - em_data['Start Day']): + for year, month, day in zip( + em_data["Start Year"], em_data["Start Month"], em_data["Start Day"] + ): if np.isnan(year): idx += 1 continue @@ -1006,8 +1203,9 @@ def emdat_to_impact(emdat_file_csv, hazard_type_climada, year_range=None, countr month = 1 if np.isnan(day): day = 1 - date_list[idx] = datetime.toordinal(datetime.strptime( - '%02i/%02i/%04i' % (day, month, year), '%d/%m/%Y')) + date_list[idx] = datetime.toordinal( + datetime.strptime("%02i/%02i/%04i" % (day, month, year), "%d/%m/%Y") + ) idx += 1 impact_instance.date = np.array(date_list, int) impact_instance.crs = DEF_CRS @@ -1018,18 +1216,20 @@ def emdat_to_impact(emdat_file_csv, hazard_type_climada, year_range=None, countr impact_instance.at_event = np.array(em_data["impact_scaled"]) impact_instance.at_event[np.isnan(impact_instance.at_event)] = 0 if not year_range: - year_range = [em_data['Year'].min(), em_data['Year'].max()] + year_range = [em_data["Year"].min(), em_data["Year"].max()] impact_instance.frequency = np.ones(em_data.shape[0]) / (1 + np.diff(year_range)) - impact_instance.frequency_unit = '1/year' + impact_instance.frequency_unit = "1/year" impact_instance.tot_value = 0 - impact_instance.aai_agg = np.nansum(impact_instance.at_event * impact_instance.frequency) - impact_instance.unit = 'USD' + impact_instance.aai_agg = np.nansum( + impact_instance.at_event * impact_instance.frequency + ) + impact_instance.unit = "USD" impact_instance.imp_mat = [] # init rough exposure with central point per country - shp = shapereader.natural_earth(resolution='110m', - category='cultural', - name='admin_0_countries') + shp = shapereader.natural_earth( + resolution="110m", category="cultural", name="admin_0_countries" + ) shp = shapereader.Reader(shp) countries_reg_id = list() countries_lat = list() @@ -1039,10 +1239,10 @@ def emdat_to_impact(emdat_file_csv, hazard_type_climada, year_range=None, countr try: cntry = u_coord.country_to_iso(cntry, "alpha3") except LookupError: - LOGGER.warning('Country not found in iso_country: %s', cntry) + LOGGER.warning("Country not found in iso_country: %s", cntry) cntry_boolean = False for rec in shp.records(): - if rec.attributes['ADM0_A3'].casefold() == cntry.casefold(): + if rec.attributes["ADM0_A3"].casefold() == cntry.casefold(): bbox = rec.geometry.bounds cntry_boolean = True break @@ -1056,13 +1256,15 @@ def emdat_to_impact(emdat_file_csv, hazard_type_climada, year_range=None, countr countries_reg_id.append(u_coord.country_to_iso(cntry, "numeric")) except LookupError: countries_reg_id.append(0) - df_tmp = em_data[em_data[VARNAMES_EMDAT[version]['ISO']].str.contains(cntry)] + df_tmp = em_data[em_data[VARNAMES_EMDAT[version]["ISO"]].str.contains(cntry)] if not reference_year: - impact_instance.eai_exp[idx] = sum(np.array(df_tmp["impact"]) * - impact_instance.frequency[0]) + impact_instance.eai_exp[idx] = sum( + np.array(df_tmp["impact"]) * impact_instance.frequency[0] + ) else: - impact_instance.eai_exp[idx] = sum(np.array(df_tmp["impact_scaled"]) * - impact_instance.frequency[0]) + impact_instance.eai_exp[idx] = sum( + np.array(df_tmp["impact_scaled"]) * impact_instance.frequency[0] + ) impact_instance.coord_exp = np.stack([countries_lat, countries_lon], axis=1) return impact_instance, countries diff --git a/climada/engine/test/data/emdat_testdata_BGD_USA_1970-2017.csv b/climada/engine/test/data/emdat_testdata_BGD_USA_1970-2017.csv index 5ca0ec256..00748e54a 100644 --- a/climada/engine/test/data/emdat_testdata_BGD_USA_1970-2017.csv +++ b/climada/engine/test/data/emdat_testdata_BGD_USA_1970-2017.csv @@ -691,7 +691,7 @@ Start date,End date,Country,ISO,Location,Latitude,Longitude,Magnitude value,Magn 02.05.02,08.05.02,United States of America (the),USA,"Pike district (Kentucky province), Virginia province",,,,Km2,Flood,Riverine flood,--,--,9,1000,13000,0,,2002-0266 05.05.02,05.05.02,United States of America (the),USA,"Happy town (Randall, Swisher districts, Texas province)",,,,Kph,Storm,Convective storm,--,--,2,183,0,0,,2002-0283 21.04.02,21.04.02,United States of America (the),USA,"Wayne, Jefferson districts (Illinois province)",,,,Kph,Storm,Convective storm,--,--,1,12,4000,0,,2002-0287 -27.04.02,03.05.02,United States of America (the),USA,"Breckinridge, Meade, Crittenden, Webster, Hopkins, Ohio, Hardin, Edmonson districts (Kentucky province), Bollinger, Howell districts (Missouri province), Charles, Calvert, Dorchester, Wicomico, Cecil districts (Maryland province), +27.04.02,03.05.02,United States of America (the),USA,"Breckinridge, Meade, Crittenden, Webster, Hopkins, Ohio, Hardin, Edmonson districts (Kentucky province), Bollinger, Howell districts (Missouri province), Charles, Calvert, Dorchester, Wicomico, Cecil districts (Maryland province), Illinois (Clay,Union, Johnson,Pope, Moultrie, Saline, Bond), Gordon district (Georgia province), Atchison district (Kansas province), Erie, Allegany districts (New York province), Stark district (Ohio province), Indiana, Mercer, Venango, Butler, Armstrong, Columbia, Lebanon, Allegheny districts (Pennsylvania province), Rutherford, Lake, Henry, Carter districts (Tennessee province), Virginia (Shenandoah, Greensville, Bedford, (Campbell, Nottoway, Prince George), Marshall district (West Virginia province), Pontotoc, Chickasaw districts (Mississippi province), Perry district (Indiana province)",,,290,Kph,Storm,Convective storm,--,--,10,100,2200000,2000500,,2002-0310 /04/2002,/04/2002,United States of America (the),USA,Arizona province,,,145,Km2,Wildfire,"Land fire (Brush, Bush, Pasture)",--,--,0,0,0,0,,2002-0312 @@ -858,7 +858,7 @@ Virginia (Shenandoah, Greensville, Bedford, (Campbell, Nottoway, Prince George), 26.06.07,06.07.07,United States of America (the),USA,"Wichita Falls area (Wichita district, Texas province), Georgetown areas (Williamson district, Texas province), Burnet, Marble Falls, Granite Shoals areas (Burnet district, Texas province), Granbury area (Hood district, Texas province), Lampasas, Parker, Eastland districts (Texas province), Miami, Commerce areas (Ottawa district, Oklahoma province), Shawnee, Tecumseh, Maud areas (Pottawatomie district, Oklahoma province), Oklahoma city (Oklahoma district, Oklahoma province), Waurika area (Jefferson district, Oklahoma province), Bartlesville, Dewey areas (Washington district, Oklahoma province), Love, Lincoln districts (Oklahoma province), Coffeyville area (Montgomery district, Kansas province), Osawatomie area (Miami district, Kansas province), Allen, Labette, Neosho, Wilson, Woodson districts (Kansas province), Rockville, Papinville areas (Bates district, Missouri province), Vernon district (Missouri province)",32.84,-97.17,507800,Km2,Flood,Riverine flood,--,Rain,8,5000,0,0,,2007-0244 19.06.07,20.06.07,United States of America (the),USA,New York province,42.23,-74.95,6500,Km2,Flood,Flash flood,Rain,--,4,120,0,0,,2007-0251 17.06.07,22.06.07,United States of America (the),USA,"North Texas, Oklahoma provinces",33.45,-97.3,34750,Km2,Flood,Riverine flood,--,--,10,750,28000,0,,2007-0254 -21.07.07,03.08.07,Bangladesh,BGD,"Goalanda village (Goalandaghat area, Rajbari district, Dhaka province), Aricha port (Shibalaya area, Manikganj district, Dhaka province), Bhagyakul village (Sreenagar area, Munshiganj district, Dhaka province), +21.07.07,03.08.07,Bangladesh,BGD,"Goalanda village (Goalandaghat area, Rajbari district, Dhaka province), Aricha port (Shibalaya area, Manikganj district, Dhaka province), Bhagyakul village (Sreenagar area, Munshiganj district, Dhaka province), Bandarban, Feni, Comilla districts (Chittagong province), Sirajganj district (Rajshahi province), Rangpur province",23.92,91.23,7000,Km2,Flood,Riverine flood,"Slide (land, mud, snow, rock)",--,1110,13771380,100000,0,,2007-0311 24.06.07,02.07.07,United States of America (the),USA,"Alpine, Amador, Calaveras, El Dorado, Mono, Placer, Tuolumne districts (California province)",,,,Km2,Wildfire,Forest fire,--,--,0,768,0,150000,,2007-0351 @@ -980,7 +980,7 @@ Bandarban, Feni, Comilla districts (Chittagong province), Sirajganj district (Ra 22.01.12,23.01.12,United States of America (the),USA,"Jefferson, Chilton districts (Alabama province)",,,240,Kph,Storm,Convective storm,--,--,2,100,175000,200000,,2012-0010 28.02.12,29.02.12,United States of America (the),USA,"Nebraska, Kansas, Missouri, Illinois, Indiana, Kentucky provinces",,,270,Kph,Storm,Convective storm,--,--,14,200,500000,450000,,2012-0055 02.03.12,04.03.12,United States of America (the),USA,"Alabama, Tennessee, Illinois, Kentucky, Indiana, Ohio, Georgia, Florida, Mississippi, North Carolina, Virginia provinces",,,112,Kph,Storm,Convective storm,Flood,Hail,41,0,5000000,2500000,,2012-0060 -06.04.12,06.04.12,Bangladesh,BGD,"Panchagarh, Rangpur, Nilphamari districts (Rangpur province), Noakhali, Comilla districts (Chittagong province), Narsingdi, Jamalpur, Faridpur, Shariatpur districts (Dhaka province), Jessore, Satkhira, Khulna, Chuadanga districts (Khulna province), Rajshahi district (Rajshahi province), Sylhet district (Sylhet province), +06.04.12,06.04.12,Bangladesh,BGD,"Panchagarh, Rangpur, Nilphamari districts (Rangpur province), Noakhali, Comilla districts (Chittagong province), Narsingdi, Jamalpur, Faridpur, Shariatpur districts (Dhaka province), Jessore, Satkhira, Khulna, Chuadanga districts (Khulna province), Rajshahi district (Rajshahi province), Sylhet district (Sylhet province), Bhola district (Barisal province)",,,56,Kph,Storm,Convective storm,Hail,--,25,55121,0,0,,2012-0082 02.04.12,03.04.12,United States of America (the),USA,"Dallas, Tarrant districts (Texas province)",,,,Kph,Storm,Convective storm,--,--,0,3300,1550000,800000,,2012-0122 14.04.12,15.04.12,United States of America (the),USA,"Oklahoma, Kansas, Iowa, Nebraska, South Dakota, Minnesota provinces",,,,Kph,Storm,Convective storm,--,--,6,297,1800000,910000,,2012-0156 @@ -1165,4 +1165,4 @@ Wilkes, Ashe )",,,140,Kph,Storm,Tropical cyclone,--,--,0,60,250000,0,Tropical de 03.11.17,12.12.17,Bangladesh,BGD,Cox’s Bazar ,,,,Vaccinated,Epidemic,Bacterial disease,--,--,15,789,0,0,Diphteria,2017-0556 06.03.17,09.03.17,United States of America (the),USA,"Missouri (Oak Grove in Jackson County, Clay and Clinton (Trimble, Plattsburg, Lathrop) counties), Iowa (Centerville in Appanoose county, Muscatine), Minnesota (Sherburne, Freeborn counties, Lake Ann in Carver county), Kansas (Wabaunsee, Pottawatomie and Butler counties), Wisconsin, Arkansas, Oklahoma, Illinois, Mississipi, Michigan, New-York, Pennsylvania, Massachussets, Ohio, Nebraska, Indiana",,,130,Kph,Storm,Convective storm,Hail,--,2,615,2200000,2000000,,2017-0563 25.03.17,28.03.17,United States of America (the),USA,"Texas (Justin in Denton, Collin, Rockwall, Lubbock counties, Seymour in Baylor, Dallas – Fort Worth metro area, Houston metro area), Oklahoma (El Reno in Canadian, Oklahoma city metro region, Caddo in Bryan, Cleveland South and East), Kansas (south), Kentucky, Tennessee, Mississippi, Alabama, Georgia, Indianapolis (Marion-IN)",,,175,Kph,Storm,Convective storm,Hail,Flood,1,0,2700000,2000000,,2017-0564 -/03/2017,/09/2017,United States of America (the),USA,"Upper Midwest, Northern Rockies and parts of the West",,,,Km2,Drought,Drought,--,--,0,0,2500000,1900000,,2017-9550 \ No newline at end of file +/03/2017,/09/2017,United States of America (the),USA,"Upper Midwest, Northern Rockies and parts of the West",,,,Km2,Drought,Drought,--,--,0,0,2500000,1900000,,2017-9550 diff --git a/climada/engine/test/data/emdat_testdata_fake_2007-2011.csv b/climada/engine/test/data/emdat_testdata_fake_2007-2011.csv index 6826050a4..3d6242746 100644 --- a/climada/engine/test/data/emdat_testdata_fake_2007-2011.csv +++ b/climada/engine/test/data/emdat_testdata_fake_2007-2011.csv @@ -4,4 +4,4 @@ Start date,End date,Country,ISO,Location,Latitude,Longitude,Magnitude value,Magn 15.01.09,26.01.09,Switzerland,CHE,Zurich,47.37,8.54,1,Km2,Flood,Riverine flood,--,--,0,0,2000,0,FakeFlood3,2009-0001 15.01.10,27.01.10,Switzerland,CHE,Zurich,47.37,8.54,1,Km2,Flood,Riverine flood,--,--,0,0,2000,0,FakeFlood4,2010-0001 15.01.11,28.01.11,Switzerland,CHE,Zurich,47.37,8.54,1,Km2,Flood,Riverine flood,--,--,0,0,2000,0,FakeFlood5,2011-0001 -15.01.11,28.01.11,Germany,DEU,Konstanz,22,22,2,Km2,Flood,Riverine flood,--,--,0,0,1000,0,FakeFlood5,2011-0001 \ No newline at end of file +15.01.11,28.01.11,Germany,DEU,Konstanz,22,22,2,Km2,Flood,Riverine flood,--,--,0,0,1000,0,FakeFlood5,2011-0001 diff --git a/climada/engine/test/test_cost_benefit.py b/climada/engine/test/test_cost_benefit.py index 095716fc9..ecf100f74 100644 --- a/climada/engine/test/test_cost_benefit.py +++ b/climada/engine/test/test_cost_benefit.py @@ -18,28 +18,35 @@ Test CostBenefit class. """ -from pathlib import Path + import copy import unittest +from pathlib import Path + import numpy as np -from climada.entity.entity_def import Entity +from climada.engine import ImpactCalc +from climada.engine.cost_benefit import ( + CostBenefit, + _norm_values, + risk_aai_agg, + risk_rp_100, + risk_rp_250, +) from climada.entity.disc_rates import DiscRates +from climada.entity.entity_def import Entity from climada.hazard.base import Hazard -from climada.engine.cost_benefit import CostBenefit, risk_aai_agg, \ - risk_rp_100, risk_rp_250, _norm_values -from climada.engine import ImpactCalc -from climada.util.constants import ENT_DEMO_FUTURE, ENT_DEMO_TODAY -from climada.util.api_client import Client - from climada.test import get_test_file +from climada.util.api_client import Client +from climada.util.constants import ENT_DEMO_FUTURE, ENT_DEMO_TODAY +ENT_TEST_MAT = get_test_file("demo_today", file_format="MAT-file") +HAZ_TEST_TC: Path = get_test_file("test_tc_florida") -ENT_TEST_MAT = get_test_file('demo_today', file_format='MAT-file') -HAZ_TEST_TC :Path = get_test_file('test_tc_florida') class TestSteps(unittest.TestCase): """Test intermediate steps""" + def test_calc_impact_measures_pass(self): """Test _calc_impact_measures against reference value""" self.assertTrue(HAZ_TEST_TC.is_file(), "{} is not a file".format(HAZ_TEST_TC)) @@ -48,16 +55,22 @@ def test_calc_impact_measures_pass(self): self.assertTrue(ENT_TEST_MAT.is_file(), "{} is not a file".format(ENT_TEST_MAT)) entity = Entity.from_mat(ENT_TEST_MAT) entity.check() - entity.measures._data['TC'] = entity.measures._data.pop('XX') - for meas in entity.measures.get_measure('TC'): - meas.haz_type = 'TC' + entity.measures._data["TC"] = entity.measures._data.pop("XX") + for meas in entity.measures.get_measure("TC"): + meas.haz_type = "TC" entity.check() entity.exposures.assign_centroids(hazard) cost_ben = CostBenefit() - cost_ben._calc_impact_measures(hazard, entity.exposures, entity.measures, - entity.impact_funcs, when='future', - risk_func=risk_aai_agg, save_imp=True) + cost_ben._calc_impact_measures( + hazard, + entity.exposures, + entity.measures, + entity.impact_funcs, + when="future", + risk_func=risk_aai_agg, + save_imp=True, + ) self.assertEqual(cost_ben.imp_meas_present, dict()) self.assertEqual(cost_ben.cost_ben_ratio, dict()) @@ -66,131 +79,250 @@ def test_calc_impact_measures_pass(self): self.assertEqual(cost_ben.present_year, 2016) self.assertEqual(cost_ben.future_year, 2030) - self.assertEqual(cost_ben.imp_meas_future['no measure']['cost'], (0, 0)) - self.assertAlmostEqual(cost_ben.imp_meas_future['no measure']['risk'], - 6.51220115756442e+09, places=3) - new_efc = cost_ben.imp_meas_future['no measure']['impact'].calc_freq_curve() + self.assertEqual(cost_ben.imp_meas_future["no measure"]["cost"], (0, 0)) + self.assertAlmostEqual( + cost_ben.imp_meas_future["no measure"]["risk"], + 6.51220115756442e09, + places=3, + ) + new_efc = cost_ben.imp_meas_future["no measure"]["impact"].calc_freq_curve() self.assertTrue( - np.allclose(new_efc.return_per, - cost_ben.imp_meas_future['no measure']['efc'].return_per)) + np.allclose( + new_efc.return_per, + cost_ben.imp_meas_future["no measure"]["efc"].return_per, + ) + ) self.assertTrue( - np.allclose(new_efc.impact, cost_ben.imp_meas_future['no measure']['efc'].impact)) + np.allclose( + new_efc.impact, cost_ben.imp_meas_future["no measure"]["efc"].impact + ) + ) self.assertEqual( - cost_ben.imp_meas_future['no measure']['impact'].at_event.nonzero()[0].size, - 841) - self.assertAlmostEqual(cost_ben.imp_meas_future['no measure']['impact'].at_event[14082], - 8.801682862431524e+06, places=3) - self.assertAlmostEqual(cost_ben.imp_meas_future['no measure']['impact'].tot_value, - 6.570532945599105e+11, places=3) - self.assertAlmostEqual(cost_ben.imp_meas_future['no measure']['impact'].aai_agg, - 6.51220115756442e+09, places=3) - - self.assertAlmostEqual(cost_ben.imp_meas_future['Mangroves']['cost'][0], - 1.3117683608515418e+09, places=3) - self.assertEqual(cost_ben.imp_meas_future['Mangroves']['cost'][1], 1) - self.assertAlmostEqual(cost_ben.imp_meas_future['Mangroves']['risk'], - 4.850407096284983e+09, places=3) - new_efc = cost_ben.imp_meas_future['Mangroves']['impact'].calc_freq_curve() + cost_ben.imp_meas_future["no measure"]["impact"].at_event.nonzero()[0].size, + 841, + ) + self.assertAlmostEqual( + cost_ben.imp_meas_future["no measure"]["impact"].at_event[14082], + 8.801682862431524e06, + places=3, + ) + self.assertAlmostEqual( + cost_ben.imp_meas_future["no measure"]["impact"].tot_value, + 6.570532945599105e11, + places=3, + ) + self.assertAlmostEqual( + cost_ben.imp_meas_future["no measure"]["impact"].aai_agg, + 6.51220115756442e09, + places=3, + ) + + self.assertAlmostEqual( + cost_ben.imp_meas_future["Mangroves"]["cost"][0], + 1.3117683608515418e09, + places=3, + ) + self.assertEqual(cost_ben.imp_meas_future["Mangroves"]["cost"][1], 1) + self.assertAlmostEqual( + cost_ben.imp_meas_future["Mangroves"]["risk"], + 4.850407096284983e09, + places=3, + ) + new_efc = cost_ben.imp_meas_future["Mangroves"]["impact"].calc_freq_curve() self.assertTrue( - np.allclose(new_efc.return_per, - cost_ben.imp_meas_future['Mangroves']['efc'].return_per)) + np.allclose( + new_efc.return_per, + cost_ben.imp_meas_future["Mangroves"]["efc"].return_per, + ) + ) self.assertTrue( - np.allclose(new_efc.impact, cost_ben.imp_meas_future['Mangroves']['efc'].impact)) + np.allclose( + new_efc.impact, cost_ben.imp_meas_future["Mangroves"]["efc"].impact + ) + ) self.assertEqual( - cost_ben.imp_meas_future['Mangroves']['impact'].at_event.nonzero()[0].size, - 665) - self.assertAlmostEqual(cost_ben.imp_meas_future['Mangroves']['impact'].at_event[13901], - 1.29576562770977e+09, places=3) - self.assertAlmostEqual(cost_ben.imp_meas_future['Mangroves']['impact'].tot_value, - 6.570532945599105e+11, places=3) - self.assertAlmostEqual(cost_ben.imp_meas_future['Mangroves']['impact'].aai_agg, - 4.850407096284983e+09, places=3) - - self.assertAlmostEqual(cost_ben.imp_meas_future['Beach nourishment']['cost'][0], - 1.728000000000000e+09, places=3) - self.assertEqual(cost_ben.imp_meas_future['Beach nourishment']['cost'][1], 1) - self.assertAlmostEqual(cost_ben.imp_meas_future['Beach nourishment']['risk'], - 5.188921355413834e+09, places=3) - new_efc = cost_ben.imp_meas_future['Beach nourishment']['impact'].calc_freq_curve() + cost_ben.imp_meas_future["Mangroves"]["impact"].at_event.nonzero()[0].size, + 665, + ) + self.assertAlmostEqual( + cost_ben.imp_meas_future["Mangroves"]["impact"].at_event[13901], + 1.29576562770977e09, + places=3, + ) + self.assertAlmostEqual( + cost_ben.imp_meas_future["Mangroves"]["impact"].tot_value, + 6.570532945599105e11, + places=3, + ) + self.assertAlmostEqual( + cost_ben.imp_meas_future["Mangroves"]["impact"].aai_agg, + 4.850407096284983e09, + places=3, + ) + + self.assertAlmostEqual( + cost_ben.imp_meas_future["Beach nourishment"]["cost"][0], + 1.728000000000000e09, + places=3, + ) + self.assertEqual(cost_ben.imp_meas_future["Beach nourishment"]["cost"][1], 1) + self.assertAlmostEqual( + cost_ben.imp_meas_future["Beach nourishment"]["risk"], + 5.188921355413834e09, + places=3, + ) + new_efc = cost_ben.imp_meas_future["Beach nourishment"][ + "impact" + ].calc_freq_curve() self.assertTrue( - np.allclose(new_efc.return_per, - cost_ben.imp_meas_future['Beach nourishment']['efc'].return_per)) + np.allclose( + new_efc.return_per, + cost_ben.imp_meas_future["Beach nourishment"]["efc"].return_per, + ) + ) self.assertTrue( - np.allclose(new_efc.impact, - cost_ben.imp_meas_future['Beach nourishment']['efc'].impact)) + np.allclose( + new_efc.impact, + cost_ben.imp_meas_future["Beach nourishment"]["efc"].impact, + ) + ) self.assertEqual( - cost_ben.imp_meas_future['Beach nourishment']['impact'].at_event.nonzero()[0].size, - 702) - self.assertEqual(cost_ben.imp_meas_future['Beach nourishment']['impact'].at_event[1110], - 0.0) - self.assertAlmostEqual(cost_ben.imp_meas_future['Beach nourishment']['impact'].eai_exp[5], - 1.1133679079730146e+08, places=3) - self.assertAlmostEqual(cost_ben.imp_meas_future['Beach nourishment']['impact'].tot_value, - 6.570532945599105e+11, places=3) - self.assertAlmostEqual(cost_ben.imp_meas_future['Beach nourishment']['impact'].aai_agg, - 5.188921355413834e+09, places=3) - - self.assertAlmostEqual(cost_ben.imp_meas_future['Seawall']['cost'][0], - 8.878779433630093e+09, places=3) - self.assertEqual(cost_ben.imp_meas_future['Seawall']['cost'][1], 1) - self.assertAlmostEqual(cost_ben.imp_meas_future['Seawall']['risk'], - 4.736400526119911e+09, places=3) - new_efc = cost_ben.imp_meas_future['Seawall']['impact'].calc_freq_curve() - self.assertTrue(np.allclose(new_efc.return_per, - cost_ben.imp_meas_future['Seawall']['efc'].return_per)) - self.assertTrue(np.allclose(new_efc.impact, - cost_ben.imp_meas_future['Seawall']['efc'].impact)) - self.assertEqual(cost_ben.imp_meas_future['Seawall']['impact'].at_event.nonzero()[0].size, - 73) - self.assertEqual(cost_ben.imp_meas_future['Seawall']['impact'].at_event[1229], 0.0) - self.assertAlmostEqual(cost_ben.imp_meas_future['Seawall']['impact'].tot_value, - 6.570532945599105e+11, places=3) - self.assertAlmostEqual(cost_ben.imp_meas_future['Seawall']['impact'].aai_agg, - 4.736400526119911e+09, places=3) - - self.assertAlmostEqual(cost_ben.imp_meas_future['Building code']['cost'][0], - 9.200000000000000e+09, places=3) - self.assertEqual(cost_ben.imp_meas_future['Building code']['cost'][1], 1) - self.assertAlmostEqual(cost_ben.imp_meas_future['Building code']['risk'], - 4.884150868173321e+09, places=3) - new_efc = cost_ben.imp_meas_future['Building code']['impact'].calc_freq_curve() - self.assertTrue(np.allclose(new_efc.return_per, - cost_ben.imp_meas_future['Building code']['efc'].return_per)) - self.assertTrue(np.allclose(new_efc.impact, - cost_ben.imp_meas_future['Building code']['efc'].impact)) + cost_ben.imp_meas_future["Beach nourishment"]["impact"] + .at_event.nonzero()[0] + .size, + 702, + ) self.assertEqual( - cost_ben.imp_meas_future['Building code']['impact'].at_event.nonzero()[0].size, - 841) - self.assertEqual(cost_ben.imp_meas_future['Building code']['impact'].at_event[122], 0.0) - self.assertAlmostEqual(cost_ben.imp_meas_future['Building code']['impact'].eai_exp[11], - 7.757060129393841e+07, places=3) - self.assertAlmostEqual(cost_ben.imp_meas_future['Building code']['impact'].tot_value, - 6.570532945599105e+11, places=3) - self.assertAlmostEqual(cost_ben.imp_meas_future['Building code']['impact'].aai_agg, - 4.884150868173321e+09, places=3) + cost_ben.imp_meas_future["Beach nourishment"]["impact"].at_event[1110], 0.0 + ) + self.assertAlmostEqual( + cost_ben.imp_meas_future["Beach nourishment"]["impact"].eai_exp[5], + 1.1133679079730146e08, + places=3, + ) + self.assertAlmostEqual( + cost_ben.imp_meas_future["Beach nourishment"]["impact"].tot_value, + 6.570532945599105e11, + places=3, + ) + self.assertAlmostEqual( + cost_ben.imp_meas_future["Beach nourishment"]["impact"].aai_agg, + 5.188921355413834e09, + places=3, + ) + + self.assertAlmostEqual( + cost_ben.imp_meas_future["Seawall"]["cost"][0], + 8.878779433630093e09, + places=3, + ) + self.assertEqual(cost_ben.imp_meas_future["Seawall"]["cost"][1], 1) + self.assertAlmostEqual( + cost_ben.imp_meas_future["Seawall"]["risk"], 4.736400526119911e09, places=3 + ) + new_efc = cost_ben.imp_meas_future["Seawall"]["impact"].calc_freq_curve() + self.assertTrue( + np.allclose( + new_efc.return_per, + cost_ben.imp_meas_future["Seawall"]["efc"].return_per, + ) + ) + self.assertTrue( + np.allclose( + new_efc.impact, cost_ben.imp_meas_future["Seawall"]["efc"].impact + ) + ) + self.assertEqual( + cost_ben.imp_meas_future["Seawall"]["impact"].at_event.nonzero()[0].size, 73 + ) + self.assertEqual( + cost_ben.imp_meas_future["Seawall"]["impact"].at_event[1229], 0.0 + ) + self.assertAlmostEqual( + cost_ben.imp_meas_future["Seawall"]["impact"].tot_value, + 6.570532945599105e11, + places=3, + ) + self.assertAlmostEqual( + cost_ben.imp_meas_future["Seawall"]["impact"].aai_agg, + 4.736400526119911e09, + places=3, + ) + + self.assertAlmostEqual( + cost_ben.imp_meas_future["Building code"]["cost"][0], + 9.200000000000000e09, + places=3, + ) + self.assertEqual(cost_ben.imp_meas_future["Building code"]["cost"][1], 1) + self.assertAlmostEqual( + cost_ben.imp_meas_future["Building code"]["risk"], + 4.884150868173321e09, + places=3, + ) + new_efc = cost_ben.imp_meas_future["Building code"]["impact"].calc_freq_curve() + self.assertTrue( + np.allclose( + new_efc.return_per, + cost_ben.imp_meas_future["Building code"]["efc"].return_per, + ) + ) + self.assertTrue( + np.allclose( + new_efc.impact, cost_ben.imp_meas_future["Building code"]["efc"].impact + ) + ) + self.assertEqual( + cost_ben.imp_meas_future["Building code"]["impact"] + .at_event.nonzero()[0] + .size, + 841, + ) + self.assertEqual( + cost_ben.imp_meas_future["Building code"]["impact"].at_event[122], 0.0 + ) + self.assertAlmostEqual( + cost_ben.imp_meas_future["Building code"]["impact"].eai_exp[11], + 7.757060129393841e07, + places=3, + ) + self.assertAlmostEqual( + cost_ben.imp_meas_future["Building code"]["impact"].tot_value, + 6.570532945599105e11, + places=3, + ) + self.assertAlmostEqual( + cost_ben.imp_meas_future["Building code"]["impact"].aai_agg, + 4.884150868173321e09, + places=3, + ) def test_cb_one_meas_pres_pass(self): """Test _cost_ben_one with different future""" - meas_name = 'Mangroves' + meas_name = "Mangroves" meas_val = dict() - meas_val['cost'] = (1.3117683608515418e+09, 1) - meas_val['risk'] = 4.826231151473135e+10 - meas_val['efc'] = None - meas_val['risk_transf'] = 0 + meas_val["cost"] = (1.3117683608515418e09, 1) + meas_val["risk"] = 4.826231151473135e10 + meas_val["efc"] = None + meas_val["risk_transf"] = 0 imp_meas_present = dict() - imp_meas_present['no measure'] = dict() - imp_meas_present['no measure']['risk'] = 6.51220115756442e+09 - imp_meas_present['Mangroves'] = dict() - imp_meas_present['Mangroves']['risk'] = 4.850407096284983e+09 - imp_meas_present['Mangroves']['risk_transf'] = 0 + imp_meas_present["no measure"] = dict() + imp_meas_present["no measure"]["risk"] = 6.51220115756442e09 + imp_meas_present["Mangroves"] = dict() + imp_meas_present["Mangroves"]["risk"] = 4.850407096284983e09 + imp_meas_present["Mangroves"]["risk_transf"] = 0 imp_meas_future = dict() - imp_meas_future['no measure'] = dict() - imp_meas_future['no measure']['risk'] = 5.9506659786664024e+10 + imp_meas_future["no measure"] = dict() + imp_meas_future["no measure"]["risk"] = 5.9506659786664024e10 - cb = CostBenefit(present_year=2018, future_year=2040, imp_meas_present=imp_meas_present, - imp_meas_future=imp_meas_future) + cb = CostBenefit( + present_year=2018, + future_year=2040, + imp_meas_present=imp_meas_present, + imp_meas_future=imp_meas_future, + ) disc_rates = DiscRates() disc_rates.years = np.arange(2016, 2051) @@ -204,18 +336,20 @@ def test_cb_one_meas_pres_pass(self): def test_cb_one_meas_fut_pass(self): """Test _cost_ben_one with same future""" - meas_name = 'Mangroves' + meas_name = "Mangroves" meas_val = dict() - meas_val['cost'] = (1.3117683608515418e+09, 1) - meas_val['risk'] = 4.850407096284983e+09 - meas_val['efc'] = None - meas_val['risk_transf'] = 0 + meas_val["cost"] = (1.3117683608515418e09, 1) + meas_val["risk"] = 4.850407096284983e09 + meas_val["efc"] = None + meas_val["risk_transf"] = 0 imp_meas_future = dict() - imp_meas_future['no measure'] = dict() - imp_meas_future['no measure']['risk'] = 6.51220115756442e+09 + imp_meas_future["no measure"] = dict() + imp_meas_future["no measure"]["risk"] = 6.51220115756442e09 - cb = CostBenefit(present_year=2018, future_year=2040, imp_meas_future=imp_meas_future) + cb = CostBenefit( + present_year=2018, future_year=2040, imp_meas_future=imp_meas_future + ) years = np.arange(2000, 2051) rates = np.ones(years.size) * 0.02 @@ -224,23 +358,29 @@ def test_cb_one_meas_fut_pass(self): time_dep = cb._time_dependency_array() cb._cost_ben_one(meas_name, meas_val, disc_rates, time_dep) - self.assertAlmostEqual(cb.benefit[meas_name], 3.100583368954022e+10, places=3) + self.assertAlmostEqual(cb.benefit[meas_name], 3.100583368954022e10, places=3) self.assertAlmostEqual(cb.cost_ben_ratio[meas_name], 0.04230714690616641) def test_calc_cb_no_change_pass(self): """Test _calc_cost_benefit without present value against reference value""" hazard = Hazard.from_hdf5(HAZ_TEST_TC) entity = Entity.from_mat(ENT_TEST_MAT) - entity.measures._data['TC'] = entity.measures._data.pop('XX') - for meas in entity.measures.get_measure('TC'): - meas.haz_type = 'TC' + entity.measures._data["TC"] = entity.measures._data.pop("XX") + for meas in entity.measures.get_measure("TC"): + meas.haz_type = "TC" entity.check() entity.exposures.assign_centroids(hazard) cost_ben = CostBenefit() - cost_ben._calc_impact_measures(hazard, entity.exposures, entity.measures, - entity.impact_funcs, when='future', - risk_func=risk_aai_agg, save_imp=True) + cost_ben._calc_impact_measures( + hazard, + entity.exposures, + entity.measures, + entity.impact_funcs, + when="future", + risk_func=risk_aai_agg, + save_imp=True, + ) cost_ben.present_year = 2018 cost_ben.future_year = 2040 @@ -251,33 +391,54 @@ def test_calc_cb_no_change_pass(self): self.assertEqual(cost_ben.present_year, 2018) self.assertEqual(cost_ben.future_year, 2040) - self.assertAlmostEqual(cost_ben.cost_ben_ratio['Mangroves'], 0.04230714690616641) - self.assertAlmostEqual(cost_ben.cost_ben_ratio['Beach nourishment'], 0.06998836431681373) - self.assertAlmostEqual(cost_ben.cost_ben_ratio['Seawall'], 0.2679741183248266) - self.assertAlmostEqual(cost_ben.cost_ben_ratio['Building code'], 0.30286828677985717) + self.assertAlmostEqual( + cost_ben.cost_ben_ratio["Mangroves"], 0.04230714690616641 + ) + self.assertAlmostEqual( + cost_ben.cost_ben_ratio["Beach nourishment"], 0.06998836431681373 + ) + self.assertAlmostEqual(cost_ben.cost_ben_ratio["Seawall"], 0.2679741183248266) + self.assertAlmostEqual( + cost_ben.cost_ben_ratio["Building code"], 0.30286828677985717 + ) - self.assertAlmostEqual(cost_ben.benefit['Mangroves'], 3.100583368954022e+10, places=3) - self.assertAlmostEqual(cost_ben.benefit['Beach nourishment'], - 2.468981832719974e+10, places=3) - self.assertAlmostEqual(cost_ben.benefit['Seawall'], 3.3132973770502796e+10, places=3) - self.assertAlmostEqual(cost_ben.benefit['Building code'], 3.0376240767284798e+10, places=3) + self.assertAlmostEqual( + cost_ben.benefit["Mangroves"], 3.100583368954022e10, places=3 + ) + self.assertAlmostEqual( + cost_ben.benefit["Beach nourishment"], 2.468981832719974e10, places=3 + ) + self.assertAlmostEqual( + cost_ben.benefit["Seawall"], 3.3132973770502796e10, places=3 + ) + self.assertAlmostEqual( + cost_ben.benefit["Building code"], 3.0376240767284798e10, places=3 + ) - self.assertAlmostEqual(cost_ben.tot_climate_risk, 1.2150496306913972e+11, places=3) + self.assertAlmostEqual( + cost_ben.tot_climate_risk, 1.2150496306913972e11, places=3 + ) def test_calc_cb_change_pass(self): """Test _calc_cost_benefit with present value against reference value""" hazard = Hazard.from_hdf5(HAZ_TEST_TC) entity = Entity.from_mat(ENT_TEST_MAT) - entity.measures._data['TC'] = entity.measures._data.pop('XX') - for meas in entity.measures.get_measure('TC'): - meas.haz_type = 'TC' + entity.measures._data["TC"] = entity.measures._data.pop("XX") + for meas in entity.measures.get_measure("TC"): + meas.haz_type = "TC" entity.check() entity.exposures.assign_centroids(hazard) cost_ben = CostBenefit() - cost_ben._calc_impact_measures(hazard, entity.exposures, entity.measures, - entity.impact_funcs, when='present', - risk_func=risk_aai_agg, save_imp=False) + cost_ben._calc_impact_measures( + hazard, + entity.exposures, + entity.measures, + entity.impact_funcs, + when="present", + risk_func=risk_aai_agg, + save_imp=False, + ) ent_future = Entity.from_excel(ENT_DEMO_FUTURE) ent_future.check() @@ -286,9 +447,15 @@ def test_calc_cb_change_pass(self): haz_future.intensity.data += 25 ent_future.exposures.assign_centroids(haz_future) - cost_ben._calc_impact_measures(haz_future, ent_future.exposures, ent_future.measures, - ent_future.impact_funcs, when='future', - risk_func=risk_aai_agg, save_imp=False) + cost_ben._calc_impact_measures( + haz_future, + ent_future.exposures, + ent_future.measures, + ent_future.impact_funcs, + when="future", + risk_func=risk_aai_agg, + save_imp=False, + ) cost_ben.present_year = 2018 cost_ben.future_year = 2040 @@ -296,39 +463,79 @@ def test_calc_cb_change_pass(self): self.assertEqual(cost_ben.present_year, 2018) self.assertEqual(cost_ben.future_year, 2040) - self.assertAlmostEqual(cost_ben.tot_climate_risk, 5.768659152882021e+11, places=3) - - self.assertAlmostEqual(cost_ben.imp_meas_present['no measure']['risk'], - 6.51220115756442e+09, places=3) - self.assertAlmostEqual(cost_ben.imp_meas_present['Mangroves']['risk'], - 4.850407096284983e+09, places=3) - self.assertAlmostEqual(cost_ben.imp_meas_present['Beach nourishment']['risk'], - 5.188921355413834e+09, places=3) - self.assertAlmostEqual(cost_ben.imp_meas_present['Seawall']['risk'], - 4.736400526119911e+09, places=3) - self.assertAlmostEqual(cost_ben.imp_meas_present['Building code']['risk'], - 4.884150868173321e+09, places=3) - - self.assertAlmostEqual(cost_ben.imp_meas_future['no measure']['risk'], - 5.9506659786664024e+10, places=3) - self.assertAlmostEqual(cost_ben.imp_meas_future['Mangroves']['risk'], - 4.826231151473135e+10, places=3) - self.assertAlmostEqual(cost_ben.imp_meas_future['Beach nourishment']['risk'], - 5.0647250923231674e+10, places=3) - self.assertAlmostEqual(cost_ben.imp_meas_future['Seawall']['risk'], - 21089567135.7345, places=3) - self.assertAlmostEqual(cost_ben.imp_meas_future['Building code']['risk'], - 4.462999483999791e+10, places=3) - - self.assertAlmostEqual(cost_ben.benefit['Mangroves'], 113345027690.81276, places=2) - self.assertAlmostEqual(cost_ben.benefit['Beach nourishment'], 89444869971.53653, places=2) - self.assertAlmostEqual(cost_ben.benefit['Seawall'], 347977469896.1333, places=2) - self.assertAlmostEqual(cost_ben.benefit['Building code'], 144216478822.05154, places=2) - - self.assertAlmostEqual(cost_ben.cost_ben_ratio['Mangroves'], 0.011573232523528404) - self.assertAlmostEqual(cost_ben.cost_ben_ratio['Beach nourishment'], 0.01931916274851638) - self.assertAlmostEqual(cost_ben.cost_ben_ratio['Seawall'], 0.025515385913577368) - self.assertAlmostEqual(cost_ben.cost_ben_ratio['Building code'], 0.06379298728650741) + self.assertAlmostEqual( + cost_ben.tot_climate_risk, 5.768659152882021e11, places=3 + ) + + self.assertAlmostEqual( + cost_ben.imp_meas_present["no measure"]["risk"], + 6.51220115756442e09, + places=3, + ) + self.assertAlmostEqual( + cost_ben.imp_meas_present["Mangroves"]["risk"], + 4.850407096284983e09, + places=3, + ) + self.assertAlmostEqual( + cost_ben.imp_meas_present["Beach nourishment"]["risk"], + 5.188921355413834e09, + places=3, + ) + self.assertAlmostEqual( + cost_ben.imp_meas_present["Seawall"]["risk"], 4.736400526119911e09, places=3 + ) + self.assertAlmostEqual( + cost_ben.imp_meas_present["Building code"]["risk"], + 4.884150868173321e09, + places=3, + ) + + self.assertAlmostEqual( + cost_ben.imp_meas_future["no measure"]["risk"], + 5.9506659786664024e10, + places=3, + ) + self.assertAlmostEqual( + cost_ben.imp_meas_future["Mangroves"]["risk"], + 4.826231151473135e10, + places=3, + ) + self.assertAlmostEqual( + cost_ben.imp_meas_future["Beach nourishment"]["risk"], + 5.0647250923231674e10, + places=3, + ) + self.assertAlmostEqual( + cost_ben.imp_meas_future["Seawall"]["risk"], 21089567135.7345, places=3 + ) + self.assertAlmostEqual( + cost_ben.imp_meas_future["Building code"]["risk"], + 4.462999483999791e10, + places=3, + ) + + self.assertAlmostEqual( + cost_ben.benefit["Mangroves"], 113345027690.81276, places=2 + ) + self.assertAlmostEqual( + cost_ben.benefit["Beach nourishment"], 89444869971.53653, places=2 + ) + self.assertAlmostEqual(cost_ben.benefit["Seawall"], 347977469896.1333, places=2) + self.assertAlmostEqual( + cost_ben.benefit["Building code"], 144216478822.05154, places=2 + ) + + self.assertAlmostEqual( + cost_ben.cost_ben_ratio["Mangroves"], 0.011573232523528404 + ) + self.assertAlmostEqual( + cost_ben.cost_ben_ratio["Beach nourishment"], 0.01931916274851638 + ) + self.assertAlmostEqual(cost_ben.cost_ben_ratio["Seawall"], 0.025515385913577368) + self.assertAlmostEqual( + cost_ben.cost_ben_ratio["Building code"], 0.06379298728650741 + ) self.assertAlmostEqual(cost_ben.tot_climate_risk, 576865915288.2021, places=3) @@ -348,8 +555,12 @@ def test_time_array_pres_pass(self): n_years = cb.future_year - cb.present_year + 1 self.assertEqual(time_arr.size, n_years) - self.assertTrue(np.allclose(time_arr, np.arange(n_years)**imp_time_depen / - (n_years - 1)**imp_time_depen)) + self.assertTrue( + np.allclose( + time_arr, + np.arange(n_years) ** imp_time_depen / (n_years - 1) ** imp_time_depen, + ) + ) def test_time_array_no_pres_pass(self): """Test _time_dependency_array""" @@ -368,12 +579,16 @@ def test_npv_unaverted_no_pres_pass(self): rates = np.ones(years.size) * 0.025 disc_rates = DiscRates(years=years, rates=rates) time_dep = np.linspace(0, 1, disc_rates.years.size) - res = cb._npv_unaverted_impact(risk_future, disc_rates, time_dep, - risk_present=None) + res = cb._npv_unaverted_impact( + risk_future, disc_rates, time_dep, risk_present=None + ) self.assertEqual( res, - disc_rates.net_present_value(cb.present_year, cb.future_year, time_dep * risk_future)) + disc_rates.net_present_value( + cb.present_year, cb.future_year, time_dep * risk_future + ), + ) def test_npv_unaverted_pres_pass(self): """Test _npv_unaverted_impact""" @@ -387,11 +602,13 @@ def test_npv_unaverted_pres_pass(self): time_dep = np.linspace(0, 1, disc_rates.years.size) res = cb._npv_unaverted_impact(risk_future, disc_rates, time_dep, risk_present) - tot_climate_risk = risk_present + (risk_future - risk_present) * time_dep - self.assertEqual(res, disc_rates.net_present_value(cb.present_year, - cb.future_year, - tot_climate_risk)) + self.assertEqual( + res, + disc_rates.net_present_value( + cb.present_year, cb.future_year, tot_climate_risk + ), + ) def test_norm_value(self): """Test _norm_values""" @@ -447,50 +664,89 @@ def test_combine_fut_pass(self): fut_haz = copy.deepcopy(hazard) cost_ben = CostBenefit() - cost_ben.calc(hazard, entity, fut_haz, fut_ent, future_year=None, - risk_func=risk_aai_agg, imp_time_depen=None, save_imp=True) - - new_name = 'combine' + cost_ben.calc( + hazard, + entity, + fut_haz, + fut_ent, + future_year=None, + risk_func=risk_aai_agg, + imp_time_depen=None, + save_imp=True, + ) + + new_name = "combine" new_color = np.array([0.1, 0.1, 0.1]) - new_cb = cost_ben.combine_measures(['Mangroves', 'Seawall'], new_name, new_color, - entity.disc_rates, imp_time_depen=None, - risk_func=risk_aai_agg) + new_cb = cost_ben.combine_measures( + ["Mangroves", "Seawall"], + new_name, + new_color, + entity.disc_rates, + imp_time_depen=None, + risk_func=risk_aai_agg, + ) self.assertTrue(np.allclose(new_cb.color_rgb[new_name], new_color)) - new_imp = cost_ben.imp_meas_future['no measure']['impact'].at_event - \ - cost_ben.imp_meas_future['Mangroves']['impact'].at_event - new_imp += cost_ben.imp_meas_future['no measure']['impact'].at_event - \ - cost_ben.imp_meas_future['Seawall']['impact'].at_event - new_imp = np.maximum(cost_ben.imp_meas_future['no measure']['impact'].at_event - new_imp, - 0) - - self.assertTrue(np.allclose(new_cb.imp_meas_present[new_name]['impact'].at_event, new_imp)) - self.assertAlmostEqual( - new_cb.imp_meas_present[new_name]['risk'], - np.sum(new_imp * cost_ben.imp_meas_present['no measure']['impact'].frequency), 5) - self.assertAlmostEqual(new_cb.imp_meas_present[new_name]['cost'][0], - cost_ben.imp_meas_present['Mangroves']['cost'][0] + - cost_ben.imp_meas_present['Seawall']['cost'][0]) - self.assertAlmostEqual(new_cb.imp_meas_present[new_name]['cost'][1], 1) - self.assertTrue(np.allclose( - new_cb.imp_meas_present[new_name]['efc'].impact, - new_cb.imp_meas_present[new_name]['impact'].calc_freq_curve().impact)) - self.assertAlmostEqual(new_cb.imp_meas_present[new_name]['risk_transf'], 0) - - self.assertTrue(np.allclose(new_cb.imp_meas_future[new_name]['impact'].at_event, new_imp)) - self.assertAlmostEqual( - new_cb.imp_meas_future[new_name]['risk'], - np.sum(new_imp * cost_ben.imp_meas_future['no measure']['impact'].frequency), 5) - self.assertAlmostEqual( - new_cb.imp_meas_future[new_name]['cost'][0], - cost_ben.imp_meas_future['Mangroves']['cost'][0] - + cost_ben.imp_meas_future['Seawall']['cost'][0]) - self.assertAlmostEqual(new_cb.imp_meas_future[new_name]['cost'][1], 1) - self.assertTrue(np.allclose( - new_cb.imp_meas_future[new_name]['efc'].impact, - new_cb.imp_meas_future[new_name]['impact'].calc_freq_curve().impact)) - self.assertAlmostEqual(new_cb.imp_meas_future[new_name]['risk_transf'], 0) + new_imp = ( + cost_ben.imp_meas_future["no measure"]["impact"].at_event + - cost_ben.imp_meas_future["Mangroves"]["impact"].at_event + ) + new_imp += ( + cost_ben.imp_meas_future["no measure"]["impact"].at_event + - cost_ben.imp_meas_future["Seawall"]["impact"].at_event + ) + new_imp = np.maximum( + cost_ben.imp_meas_future["no measure"]["impact"].at_event - new_imp, 0 + ) + + self.assertTrue( + np.allclose(new_cb.imp_meas_present[new_name]["impact"].at_event, new_imp) + ) + self.assertAlmostEqual( + new_cb.imp_meas_present[new_name]["risk"], + np.sum( + new_imp * cost_ben.imp_meas_present["no measure"]["impact"].frequency + ), + 5, + ) + self.assertAlmostEqual( + new_cb.imp_meas_present[new_name]["cost"][0], + cost_ben.imp_meas_present["Mangroves"]["cost"][0] + + cost_ben.imp_meas_present["Seawall"]["cost"][0], + ) + self.assertAlmostEqual(new_cb.imp_meas_present[new_name]["cost"][1], 1) + self.assertTrue( + np.allclose( + new_cb.imp_meas_present[new_name]["efc"].impact, + new_cb.imp_meas_present[new_name]["impact"].calc_freq_curve().impact, + ) + ) + self.assertAlmostEqual(new_cb.imp_meas_present[new_name]["risk_transf"], 0) + + self.assertTrue( + np.allclose(new_cb.imp_meas_future[new_name]["impact"].at_event, new_imp) + ) + self.assertAlmostEqual( + new_cb.imp_meas_future[new_name]["risk"], + np.sum( + new_imp * cost_ben.imp_meas_future["no measure"]["impact"].frequency + ), + 5, + ) + self.assertAlmostEqual( + new_cb.imp_meas_future[new_name]["cost"][0], + cost_ben.imp_meas_future["Mangroves"]["cost"][0] + + cost_ben.imp_meas_future["Seawall"]["cost"][0], + ) + self.assertAlmostEqual(new_cb.imp_meas_future[new_name]["cost"][1], 1) + self.assertTrue( + np.allclose( + new_cb.imp_meas_future[new_name]["efc"].impact, + new_cb.imp_meas_future[new_name]["impact"].calc_freq_curve().impact, + ) + ) + self.assertAlmostEqual(new_cb.imp_meas_future[new_name]["risk_transf"], 0) self.assertAlmostEqual(new_cb.benefit[new_name], 51781337529.07264, places=3) self.assertAlmostEqual(new_cb.cost_ben_ratio[new_name], 0.19679962474434248) @@ -502,36 +758,62 @@ def test_combine_current_pass(self): entity.check() entity.exposures.ref_year = 2018 cost_ben = CostBenefit() - cost_ben.calc(hazard, entity, future_year=2040, risk_func=risk_aai_agg, - imp_time_depen=None, save_imp=True) - - new_name = 'combine' + cost_ben.calc( + hazard, + entity, + future_year=2040, + risk_func=risk_aai_agg, + imp_time_depen=None, + save_imp=True, + ) + + new_name = "combine" new_color = np.array([0.1, 0.1, 0.1]) - new_cb = cost_ben.combine_measures(['Mangroves', 'Seawall'], new_name, new_color, - entity.disc_rates, imp_time_depen=None, - risk_func=risk_aai_agg) + new_cb = cost_ben.combine_measures( + ["Mangroves", "Seawall"], + new_name, + new_color, + entity.disc_rates, + imp_time_depen=None, + risk_func=risk_aai_agg, + ) self.assertTrue(np.allclose(new_cb.color_rgb[new_name], new_color)) self.assertEqual(len(new_cb.imp_meas_present), 0) - new_imp = cost_ben.imp_meas_future['no measure']['impact'].at_event - \ - cost_ben.imp_meas_future['Mangroves']['impact'].at_event - new_imp += cost_ben.imp_meas_future['no measure']['impact'].at_event - \ - cost_ben.imp_meas_future['Seawall']['impact'].at_event - new_imp = np.maximum(cost_ben.imp_meas_future['no measure']['impact'].at_event - new_imp, - 0) - self.assertTrue(np.allclose(new_cb.imp_meas_future[new_name]['impact'].at_event, new_imp)) - self.assertAlmostEqual( - new_cb.imp_meas_future[new_name]['risk'], - np.sum(new_imp * cost_ben.imp_meas_future['no measure']['impact'].frequency), 5) - self.assertAlmostEqual( - new_cb.imp_meas_future[new_name]['cost'][0], - cost_ben.imp_meas_future['Mangroves']['cost'][0] - + cost_ben.imp_meas_future['Seawall']['cost'][0]) - self.assertAlmostEqual(new_cb.imp_meas_future[new_name]['cost'][1], 1) - self.assertTrue(np.allclose( - new_cb.imp_meas_future[new_name]['efc'].impact, - new_cb.imp_meas_future[new_name]['impact'].calc_freq_curve().impact)) - self.assertAlmostEqual(new_cb.imp_meas_future[new_name]['risk_transf'], 0) + new_imp = ( + cost_ben.imp_meas_future["no measure"]["impact"].at_event + - cost_ben.imp_meas_future["Mangroves"]["impact"].at_event + ) + new_imp += ( + cost_ben.imp_meas_future["no measure"]["impact"].at_event + - cost_ben.imp_meas_future["Seawall"]["impact"].at_event + ) + new_imp = np.maximum( + cost_ben.imp_meas_future["no measure"]["impact"].at_event - new_imp, 0 + ) + self.assertTrue( + np.allclose(new_cb.imp_meas_future[new_name]["impact"].at_event, new_imp) + ) + self.assertAlmostEqual( + new_cb.imp_meas_future[new_name]["risk"], + np.sum( + new_imp * cost_ben.imp_meas_future["no measure"]["impact"].frequency + ), + 5, + ) + self.assertAlmostEqual( + new_cb.imp_meas_future[new_name]["cost"][0], + cost_ben.imp_meas_future["Mangroves"]["cost"][0] + + cost_ben.imp_meas_future["Seawall"]["cost"][0], + ) + self.assertAlmostEqual(new_cb.imp_meas_future[new_name]["cost"][1], 1) + self.assertTrue( + np.allclose( + new_cb.imp_meas_future[new_name]["efc"].impact, + new_cb.imp_meas_future[new_name]["impact"].calc_freq_curve().impact, + ) + ) + self.assertAlmostEqual(new_cb.imp_meas_future[new_name]["risk_transf"], 0) self.assertAlmostEqual(new_cb.benefit[new_name], 51781337529.07264, places=3) self.assertAlmostEqual(new_cb.cost_ben_ratio[new_name], 0.19679962474434248) @@ -542,45 +824,81 @@ def test_apply_transf_current_pass(self): entity.check() entity.exposures.ref_year = 2018 cost_ben = CostBenefit() - cost_ben.calc(hazard, entity, future_year=2040, risk_func=risk_aai_agg, - imp_time_depen=None, save_imp=True) - - new_name = 'combine' + cost_ben.calc( + hazard, + entity, + future_year=2040, + risk_func=risk_aai_agg, + imp_time_depen=None, + save_imp=True, + ) + + new_name = "combine" new_color = np.array([0.1, 0.1, 0.1]) risk_transf = (1.0e7, 15.0e11, 1) - new_cb = cost_ben.combine_measures(['Mangroves', 'Seawall'], new_name, new_color, - entity.disc_rates, imp_time_depen=None, - risk_func=risk_aai_agg) - new_cb.apply_risk_transfer(new_name, risk_transf[0], risk_transf[1], - entity.disc_rates, cost_fix=0, cost_factor=risk_transf[2], - imp_time_depen=1, - risk_func=risk_aai_agg) - - tr_name = 'risk transfer (' + new_name + ')' - new_imp = cost_ben.imp_meas_future['no measure']['impact'].at_event - \ - cost_ben.imp_meas_future['Mangroves']['impact'].at_event - new_imp += cost_ben.imp_meas_future['no measure']['impact'].at_event - \ - cost_ben.imp_meas_future['Seawall']['impact'].at_event - new_imp = np.maximum(cost_ben.imp_meas_future['no measure']['impact'].at_event - new_imp, - 0) + new_cb = cost_ben.combine_measures( + ["Mangroves", "Seawall"], + new_name, + new_color, + entity.disc_rates, + imp_time_depen=None, + risk_func=risk_aai_agg, + ) + new_cb.apply_risk_transfer( + new_name, + risk_transf[0], + risk_transf[1], + entity.disc_rates, + cost_fix=0, + cost_factor=risk_transf[2], + imp_time_depen=1, + risk_func=risk_aai_agg, + ) + + tr_name = "risk transfer (" + new_name + ")" + new_imp = ( + cost_ben.imp_meas_future["no measure"]["impact"].at_event + - cost_ben.imp_meas_future["Mangroves"]["impact"].at_event + ) + new_imp += ( + cost_ben.imp_meas_future["no measure"]["impact"].at_event + - cost_ben.imp_meas_future["Seawall"]["impact"].at_event + ) + new_imp = np.maximum( + cost_ben.imp_meas_future["no measure"]["impact"].at_event - new_imp, 0 + ) imp_layer = np.minimum(np.maximum(new_imp - risk_transf[0], 0), risk_transf[1]) risk_transfer = np.sum( - imp_layer * cost_ben.imp_meas_future['no measure']['impact'].frequency) + imp_layer * cost_ben.imp_meas_future["no measure"]["impact"].frequency + ) new_imp = np.maximum(new_imp - imp_layer, 0) self.assertTrue(np.allclose(new_cb.color_rgb[new_name], new_color)) self.assertEqual(len(new_cb.imp_meas_present), 0) - self.assertTrue(np.allclose(new_cb.imp_meas_future[tr_name]['impact'].at_event, new_imp)) + self.assertTrue( + np.allclose(new_cb.imp_meas_future[tr_name]["impact"].at_event, new_imp) + ) self.assertAlmostEqual( - new_cb.imp_meas_future[tr_name]['risk'], - np.sum(new_imp * cost_ben.imp_meas_future['no measure']['impact'].frequency), 5) + new_cb.imp_meas_future[tr_name]["risk"], + np.sum( + new_imp * cost_ben.imp_meas_future["no measure"]["impact"].frequency + ), + 5, + ) self.assertAlmostEqual( new_cb.cost_ben_ratio[tr_name] * new_cb.benefit[tr_name], - 32106013195.316242, places=3) - self.assertTrue(np.allclose( - new_cb.imp_meas_future[tr_name]['efc'].impact, - new_cb.imp_meas_future[tr_name]['impact'].calc_freq_curve().impact)) - self.assertAlmostEqual(new_cb.imp_meas_future[tr_name]['risk_transf'], risk_transfer) + 32106013195.316242, + places=3, + ) + self.assertTrue( + np.allclose( + new_cb.imp_meas_future[tr_name]["efc"].impact, + new_cb.imp_meas_future[tr_name]["impact"].calc_freq_curve().impact, + ) + ) + self.assertAlmostEqual( + new_cb.imp_meas_future[tr_name]["risk_transf"], risk_transfer + ) # benefit = impact layer self.assertAlmostEqual(new_cb.benefit[tr_name], 32106013195.316242, 4) self.assertAlmostEqual(new_cb.cost_ben_ratio[tr_name], 1) @@ -592,43 +910,81 @@ def test_apply_transf_cost_fact_pass(self): entity.check() entity.exposures.ref_year = 2018 cost_ben = CostBenefit() - cost_ben.calc(hazard, entity, future_year=2040, risk_func=risk_aai_agg, - imp_time_depen=None, save_imp=True) - - new_name = 'combine' + cost_ben.calc( + hazard, + entity, + future_year=2040, + risk_func=risk_aai_agg, + imp_time_depen=None, + save_imp=True, + ) + + new_name = "combine" new_color = np.array([0.1, 0.1, 0.1]) risk_transf = (1.0e7, 15.0e11, 2) - new_cb = cost_ben.combine_measures(['Mangroves', 'Seawall'], new_name, new_color, - entity.disc_rates, imp_time_depen=None, - risk_func=risk_aai_agg) - new_cb.apply_risk_transfer(new_name, risk_transf[0], risk_transf[1], - entity.disc_rates, cost_fix=0, cost_factor=risk_transf[2], - imp_time_depen=1, risk_func=risk_aai_agg) - - tr_name = 'risk transfer (' + new_name + ')' - new_imp = cost_ben.imp_meas_future['no measure']['impact'].at_event - \ - cost_ben.imp_meas_future['Mangroves']['impact'].at_event - new_imp += cost_ben.imp_meas_future['no measure']['impact'].at_event - \ - cost_ben.imp_meas_future['Seawall']['impact'].at_event - new_imp = np.maximum(cost_ben.imp_meas_future['no measure']['impact'].at_event - new_imp, - 0) + new_cb = cost_ben.combine_measures( + ["Mangroves", "Seawall"], + new_name, + new_color, + entity.disc_rates, + imp_time_depen=None, + risk_func=risk_aai_agg, + ) + new_cb.apply_risk_transfer( + new_name, + risk_transf[0], + risk_transf[1], + entity.disc_rates, + cost_fix=0, + cost_factor=risk_transf[2], + imp_time_depen=1, + risk_func=risk_aai_agg, + ) + + tr_name = "risk transfer (" + new_name + ")" + new_imp = ( + cost_ben.imp_meas_future["no measure"]["impact"].at_event + - cost_ben.imp_meas_future["Mangroves"]["impact"].at_event + ) + new_imp += ( + cost_ben.imp_meas_future["no measure"]["impact"].at_event + - cost_ben.imp_meas_future["Seawall"]["impact"].at_event + ) + new_imp = np.maximum( + cost_ben.imp_meas_future["no measure"]["impact"].at_event - new_imp, 0 + ) imp_layer = np.minimum(np.maximum(new_imp - risk_transf[0], 0), risk_transf[1]) risk_transfer = np.sum( - imp_layer * cost_ben.imp_meas_future['no measure']['impact'].frequency) + imp_layer * cost_ben.imp_meas_future["no measure"]["impact"].frequency + ) new_imp = np.maximum(new_imp - imp_layer, 0) self.assertTrue(np.allclose(new_cb.color_rgb[new_name], new_color)) self.assertEqual(len(new_cb.imp_meas_present), 0) - self.assertTrue(np.allclose(new_cb.imp_meas_future[tr_name]['impact'].at_event, new_imp)) + self.assertTrue( + np.allclose(new_cb.imp_meas_future[tr_name]["impact"].at_event, new_imp) + ) self.assertAlmostEqual( - new_cb.imp_meas_future[tr_name]['risk'], - np.sum(new_imp * cost_ben.imp_meas_future['no measure']['impact'].frequency), 5) - self.assertAlmostEqual(new_cb.cost_ben_ratio[tr_name] * new_cb.benefit[tr_name], - risk_transf[2] * 32106013195.316242, 4) + new_cb.imp_meas_future[tr_name]["risk"], + np.sum( + new_imp * cost_ben.imp_meas_future["no measure"]["impact"].frequency + ), + 5, + ) + self.assertAlmostEqual( + new_cb.cost_ben_ratio[tr_name] * new_cb.benefit[tr_name], + risk_transf[2] * 32106013195.316242, + 4, + ) self.assertTrue( - np.allclose(new_cb.imp_meas_future[tr_name]['efc'].impact, - new_cb.imp_meas_future[tr_name]['impact'].calc_freq_curve().impact)) - self.assertAlmostEqual(new_cb.imp_meas_future[tr_name]['risk_transf'], risk_transfer) + np.allclose( + new_cb.imp_meas_future[tr_name]["efc"].impact, + new_cb.imp_meas_future[tr_name]["impact"].calc_freq_curve().impact, + ) + ) + self.assertAlmostEqual( + new_cb.imp_meas_future[tr_name]["risk_transf"], risk_transfer + ) # benefit = impact layer self.assertAlmostEqual(new_cb.benefit[tr_name], 32106013195.316242, 4) self.assertAlmostEqual(new_cb.cost_ben_ratio[tr_name], risk_transf[2]) @@ -644,47 +1000,91 @@ def test_apply_transf_future_pass(self): fut_ent.exposures.ref_year = 2040 cost_ben = CostBenefit() - cost_ben.calc(hazard, entity, ent_future=fut_ent, risk_func=risk_aai_agg, - imp_time_depen=None, save_imp=True) - - new_name = 'combine' + cost_ben.calc( + hazard, + entity, + ent_future=fut_ent, + risk_func=risk_aai_agg, + imp_time_depen=None, + save_imp=True, + ) + + new_name = "combine" new_color = np.array([0.1, 0.1, 0.1]) risk_transf = (1.0e7, 15.0e11, 1) - new_cb = cost_ben.combine_measures(['Mangroves', 'Seawall'], new_name, new_color, - entity.disc_rates, imp_time_depen=None, - risk_func=risk_aai_agg) - new_cb.apply_risk_transfer(new_name, risk_transf[0], risk_transf[1], - entity.disc_rates, cost_fix=0, cost_factor=risk_transf[2], - imp_time_depen=1, risk_func=risk_aai_agg) - - tr_name = 'risk transfer (' + new_name + ')' - new_imp = cost_ben.imp_meas_future['no measure']['impact'].at_event - \ - cost_ben.imp_meas_future['Mangroves']['impact'].at_event - new_imp += cost_ben.imp_meas_future['no measure']['impact'].at_event - \ - cost_ben.imp_meas_future['Seawall']['impact'].at_event - new_imp = np.maximum(cost_ben.imp_meas_future['no measure']['impact'].at_event - new_imp, - 0) + new_cb = cost_ben.combine_measures( + ["Mangroves", "Seawall"], + new_name, + new_color, + entity.disc_rates, + imp_time_depen=None, + risk_func=risk_aai_agg, + ) + new_cb.apply_risk_transfer( + new_name, + risk_transf[0], + risk_transf[1], + entity.disc_rates, + cost_fix=0, + cost_factor=risk_transf[2], + imp_time_depen=1, + risk_func=risk_aai_agg, + ) + + tr_name = "risk transfer (" + new_name + ")" + new_imp = ( + cost_ben.imp_meas_future["no measure"]["impact"].at_event + - cost_ben.imp_meas_future["Mangroves"]["impact"].at_event + ) + new_imp += ( + cost_ben.imp_meas_future["no measure"]["impact"].at_event + - cost_ben.imp_meas_future["Seawall"]["impact"].at_event + ) + new_imp = np.maximum( + cost_ben.imp_meas_future["no measure"]["impact"].at_event - new_imp, 0 + ) imp_layer = np.minimum(np.maximum(new_imp - risk_transf[0], 0), risk_transf[1]) risk_transfer = np.sum( - imp_layer * cost_ben.imp_meas_future['no measure']['impact'].frequency) + imp_layer * cost_ben.imp_meas_future["no measure"]["impact"].frequency + ) new_imp = np.maximum(new_imp - imp_layer, 0) self.assertTrue(np.allclose(new_cb.color_rgb[new_name], new_color)) self.assertEqual(len(new_cb.imp_meas_present), 3) - self.assertTrue(np.allclose(new_cb.imp_meas_future[tr_name]['impact'].at_event, new_imp)) - self.assertTrue(np.allclose(new_cb.imp_meas_present[tr_name]['impact'].at_event, new_imp)) + self.assertTrue( + np.allclose(new_cb.imp_meas_future[tr_name]["impact"].at_event, new_imp) + ) + self.assertTrue( + np.allclose(new_cb.imp_meas_present[tr_name]["impact"].at_event, new_imp) + ) self.assertAlmostEqual( - new_cb.imp_meas_future[tr_name]['risk'], - np.sum(new_imp * cost_ben.imp_meas_future['no measure']['impact'].frequency), 5) + new_cb.imp_meas_future[tr_name]["risk"], + np.sum( + new_imp * cost_ben.imp_meas_future["no measure"]["impact"].frequency + ), + 5, + ) self.assertAlmostEqual( - new_cb.imp_meas_present[tr_name]['risk'], - np.sum(new_imp * cost_ben.imp_meas_future['no measure']['impact'].frequency), 5) - self.assertAlmostEqual(new_cb.cost_ben_ratio[tr_name] * new_cb.benefit[tr_name], - 69715165679.7042, places=3) + new_cb.imp_meas_present[tr_name]["risk"], + np.sum( + new_imp * cost_ben.imp_meas_future["no measure"]["impact"].frequency + ), + 5, + ) + self.assertAlmostEqual( + new_cb.cost_ben_ratio[tr_name] * new_cb.benefit[tr_name], + 69715165679.7042, + places=3, + ) self.assertTrue( - np.allclose(new_cb.imp_meas_future[tr_name]['efc'].impact, - new_cb.imp_meas_future[tr_name]['impact'].calc_freq_curve().impact)) - self.assertAlmostEqual(new_cb.imp_meas_future[tr_name]['risk_transf'], risk_transfer) + np.allclose( + new_cb.imp_meas_future[tr_name]["efc"].impact, + new_cb.imp_meas_future[tr_name]["impact"].calc_freq_curve().impact, + ) + ) + self.assertAlmostEqual( + new_cb.imp_meas_future[tr_name]["risk_transf"], risk_transfer + ) # benefit = impact layer self.assertAlmostEqual(new_cb.benefit[tr_name], 69715165679.7042, 4) self.assertAlmostEqual(new_cb.cost_ben_ratio[tr_name], 1) @@ -696,10 +1096,16 @@ def test_remove_measure(self): entity.check() entity.exposures.ref_year = 2018 cost_ben = CostBenefit() - cost_ben.calc(hazard, entity, future_year=2040, risk_func=risk_aai_agg, - imp_time_depen=None, save_imp=True) - - to_remove = 'Mangroves' + cost_ben.calc( + hazard, + entity, + future_year=2040, + risk_func=risk_aai_agg, + imp_time_depen=None, + save_imp=True, + ) + + to_remove = "Mangroves" self.assertTrue(to_remove in cost_ben.benefit.keys()) cost_ben.remove_measure(to_remove) self.assertTrue(to_remove not in cost_ben.color_rgb.keys()) @@ -713,6 +1119,7 @@ def test_remove_measure(self): self.assertEqual(len(cost_ben.cost_ben_ratio), 3) self.assertEqual(len(cost_ben.benefit), 3) + class TestCalc(unittest.TestCase): """Test calc""" @@ -721,7 +1128,7 @@ def test_calc_change_pass(self): # present hazard = Hazard.from_hdf5(HAZ_TEST_TC) entity = Entity.from_excel(ENT_DEMO_TODAY) - entity.exposures.gdf.rename(columns={'impf_': 'impf_TC'}, inplace=True) + entity.exposures.gdf.rename(columns={"impf_": "impf_TC"}, inplace=True) entity.check() entity.exposures.ref_year = 2018 @@ -738,39 +1145,79 @@ def test_calc_change_pass(self): self.assertEqual(cost_ben.present_year, 2018) self.assertEqual(cost_ben.future_year, 2040) - self.assertAlmostEqual(cost_ben.tot_climate_risk, 5.768659152882021e+11, places=3) - - self.assertAlmostEqual(cost_ben.imp_meas_present['no measure']['risk'], - 6.51220115756442e+09, places=3) - self.assertAlmostEqual(cost_ben.imp_meas_present['Mangroves']['risk'], - 4.850407096284983e+09, places=3) - self.assertAlmostEqual(cost_ben.imp_meas_present['Beach nourishment']['risk'], - 5.188921355413834e+09, places=3) - self.assertAlmostEqual(cost_ben.imp_meas_present['Seawall']['risk'], - 4.736400526119911e+09, places=3) - self.assertAlmostEqual(cost_ben.imp_meas_present['Building code']['risk'], - 4.884150868173321e+09, places=3) - - self.assertAlmostEqual(cost_ben.imp_meas_future['no measure']['risk'], - 5.9506659786664024e+10, places=3) - self.assertAlmostEqual(cost_ben.imp_meas_future['Mangroves']['risk'], - 4.826231151473135e+10, places=3) - self.assertAlmostEqual(cost_ben.imp_meas_future['Beach nourishment']['risk'], - 5.0647250923231674e+10, places=3) - self.assertAlmostEqual(cost_ben.imp_meas_future['Seawall']['risk'], - 21089567135.7345, places=3) - self.assertAlmostEqual(cost_ben.imp_meas_future['Building code']['risk'], - 4.462999483999791e+10, places=3) - - self.assertAlmostEqual(cost_ben.benefit['Mangroves'], 113345027690.81276, places=2) - self.assertAlmostEqual(cost_ben.benefit['Beach nourishment'], 89444869971.53653, places=2) - self.assertAlmostEqual(cost_ben.benefit['Seawall'], 347977469896.1333, places=2) - self.assertAlmostEqual(cost_ben.benefit['Building code'], 144216478822.05154, places=2) - - self.assertAlmostEqual(cost_ben.cost_ben_ratio['Mangroves'], 0.011573232523528404) - self.assertAlmostEqual(cost_ben.cost_ben_ratio['Beach nourishment'], 0.01931916274851638) - self.assertAlmostEqual(cost_ben.cost_ben_ratio['Seawall'], 0.025515385913577368) - self.assertAlmostEqual(cost_ben.cost_ben_ratio['Building code'], 0.06379298728650741) + self.assertAlmostEqual( + cost_ben.tot_climate_risk, 5.768659152882021e11, places=3 + ) + + self.assertAlmostEqual( + cost_ben.imp_meas_present["no measure"]["risk"], + 6.51220115756442e09, + places=3, + ) + self.assertAlmostEqual( + cost_ben.imp_meas_present["Mangroves"]["risk"], + 4.850407096284983e09, + places=3, + ) + self.assertAlmostEqual( + cost_ben.imp_meas_present["Beach nourishment"]["risk"], + 5.188921355413834e09, + places=3, + ) + self.assertAlmostEqual( + cost_ben.imp_meas_present["Seawall"]["risk"], 4.736400526119911e09, places=3 + ) + self.assertAlmostEqual( + cost_ben.imp_meas_present["Building code"]["risk"], + 4.884150868173321e09, + places=3, + ) + + self.assertAlmostEqual( + cost_ben.imp_meas_future["no measure"]["risk"], + 5.9506659786664024e10, + places=3, + ) + self.assertAlmostEqual( + cost_ben.imp_meas_future["Mangroves"]["risk"], + 4.826231151473135e10, + places=3, + ) + self.assertAlmostEqual( + cost_ben.imp_meas_future["Beach nourishment"]["risk"], + 5.0647250923231674e10, + places=3, + ) + self.assertAlmostEqual( + cost_ben.imp_meas_future["Seawall"]["risk"], 21089567135.7345, places=3 + ) + self.assertAlmostEqual( + cost_ben.imp_meas_future["Building code"]["risk"], + 4.462999483999791e10, + places=3, + ) + + self.assertAlmostEqual( + cost_ben.benefit["Mangroves"], 113345027690.81276, places=2 + ) + self.assertAlmostEqual( + cost_ben.benefit["Beach nourishment"], 89444869971.53653, places=2 + ) + self.assertAlmostEqual(cost_ben.benefit["Seawall"], 347977469896.1333, places=2) + self.assertAlmostEqual( + cost_ben.benefit["Building code"], 144216478822.05154, places=2 + ) + + self.assertAlmostEqual( + cost_ben.cost_ben_ratio["Mangroves"], 0.011573232523528404 + ) + self.assertAlmostEqual( + cost_ben.cost_ben_ratio["Beach nourishment"], 0.01931916274851638 + ) + self.assertAlmostEqual(cost_ben.cost_ben_ratio["Seawall"], 0.025515385913577368) + self.assertAlmostEqual( + cost_ben.cost_ben_ratio["Building code"], 0.06379298728650741 + ) self.assertAlmostEqual(cost_ben.tot_climate_risk, 576865915288.2021, places=3) @@ -788,18 +1235,34 @@ def test_calc_no_change_pass(self): self.assertEqual(cost_ben.present_year, 2018) self.assertEqual(cost_ben.future_year, 2040) - self.assertAlmostEqual(cost_ben.cost_ben_ratio['Mangroves'], 0.04230714690616641) - self.assertAlmostEqual(cost_ben.cost_ben_ratio['Beach nourishment'], 0.06998836431681373) - self.assertAlmostEqual(cost_ben.cost_ben_ratio['Seawall'], 0.2679741183248266) - self.assertAlmostEqual(cost_ben.cost_ben_ratio['Building code'], 0.30286828677985717) + self.assertAlmostEqual( + cost_ben.cost_ben_ratio["Mangroves"], 0.04230714690616641 + ) + self.assertAlmostEqual( + cost_ben.cost_ben_ratio["Beach nourishment"], 0.06998836431681373 + ) + self.assertAlmostEqual(cost_ben.cost_ben_ratio["Seawall"], 0.2679741183248266) + self.assertAlmostEqual( + cost_ben.cost_ben_ratio["Building code"], 0.30286828677985717 + ) + + self.assertAlmostEqual( + cost_ben.benefit["Mangroves"], 3.100583368954022e10, places=3 + ) + self.assertAlmostEqual( + cost_ben.benefit["Beach nourishment"], 2.468981832719974e10, places=3 + ) + self.assertAlmostEqual( + cost_ben.benefit["Seawall"], 3.3132973770502796e10, places=3 + ) + self.assertAlmostEqual( + cost_ben.benefit["Building code"], 3.0376240767284798e10, places=3 + ) - self.assertAlmostEqual(cost_ben.benefit['Mangroves'], 3.100583368954022e+10, places=3) - self.assertAlmostEqual(cost_ben.benefit['Beach nourishment'], - 2.468981832719974e+10, places=3) - self.assertAlmostEqual(cost_ben.benefit['Seawall'], 3.3132973770502796e+10, places=3) - self.assertAlmostEqual(cost_ben.benefit['Building code'], 3.0376240767284798e+10, places=3) + self.assertAlmostEqual( + cost_ben.tot_climate_risk, 1.2150496306913972e11, places=3 + ) - self.assertAlmostEqual(cost_ben.tot_climate_risk, 1.2150496306913972e+11, places=3) class TestRiskFuncs(unittest.TestCase): """Test risk functions definitions""" @@ -815,8 +1278,8 @@ def test_risk_aai_agg_pass(self): """Test risk_aai_agg""" impact = self.test_impact() risk = risk_aai_agg(impact) - self.assertAlmostEqual(6.512201157564421e+09, risk, 5) - self.assertTrue(np.isclose(6.512201157564421e+09, risk)) + self.assertAlmostEqual(6.512201157564421e09, risk, 5) + self.assertTrue(np.isclose(6.512201157564421e09, risk)) def test_risk_rp_100_pass(self): """Test risk_rp_100""" @@ -834,6 +1297,7 @@ def test_risk_rp_200_pass(self): risk = risk_rp_250(impact) self.assertAlmostEqual(exc_freq.impact[0], risk) + # Execute Tests if __name__ == "__main__": TESTS = unittest.TestLoader().loadTestsFromTestCase(TestRiskFuncs) diff --git a/climada/engine/test/test_forecast.py b/climada/engine/test/test_forecast.py index ef249ae20..8e80cb4fe 100644 --- a/climada/engine/test/test_forecast.py +++ b/climada/engine/test/test_forecast.py @@ -19,58 +19,62 @@ Test Forecast class """ -import unittest import datetime as dt -import numpy as np +import unittest +from pathlib import Path + +import fiona import geopandas as gpd import matplotlib.pyplot as plt -import fiona +import numpy as np from cartopy.io import shapereader -from pathlib import Path from climada import CONFIG -from climada.hazard.storm_europe import StormEurope -from climada.entity.exposures.base import Exposures, INDICATOR_IMPF +from climada.engine.forecast import FORECAST_PLOT_DIR, Forecast from climada.entity import ImpactFuncSet +from climada.entity.exposures.base import INDICATOR_IMPF, Exposures from climada.entity.impact_funcs.storm_europe import ImpfStormEurope -from climada.engine.forecast import Forecast, FORECAST_PLOT_DIR +from climada.hazard.storm_europe import StormEurope from climada.util.constants import WS_DEMO_NC HAZ_DIR = CONFIG.hazard.test_data.dir() + class TestCalc(unittest.TestCase): """Test calc and propety functions from the Forecast class""" def test_Forecast_calc_properties(self): """Test calc and propety functions from the Forecast class""" - #hazard + # hazard haz = StormEurope.from_cosmoe_file( - HAZ_DIR.joinpath('storm_europe_cosmoe_forecast_vmax_testfile.nc'), - run_datetime=dt.datetime(2018,1,1), - event_date=dt.datetime(2018,1,3)) - #exposure + HAZ_DIR.joinpath("storm_europe_cosmoe_forecast_vmax_testfile.nc"), + run_datetime=dt.datetime(2018, 1, 1), + event_date=dt.datetime(2018, 1, 3), + ) + # exposure data = {} - data['latitude'] = haz.centroids.lat - data['longitude'] = haz.centroids.lon - data['value'] = np.ones_like(data['latitude']) * 100000 - data['deductible'] = np.zeros_like(data['latitude']) - data[INDICATOR_IMPF + 'WS'] = np.ones_like(data['latitude']) - data['region_id'] = np.ones_like(data['latitude'],dtype=int) * 756 + data["latitude"] = haz.centroids.lat + data["longitude"] = haz.centroids.lon + data["value"] = np.ones_like(data["latitude"]) * 100000 + data["deductible"] = np.zeros_like(data["latitude"]) + data[INDICATOR_IMPF + "WS"] = np.ones_like(data["latitude"]) + data["region_id"] = np.ones_like(data["latitude"], dtype=int) * 756 expo = Exposures(gpd.GeoDataFrame(data=data)) - #vulnerability - #generate vulnerability + # vulnerability + # generate vulnerability impact_function = ImpfStormEurope.from_welker() impact_function_set = ImpactFuncSet([impact_function]) - #create and calculate Forecast - forecast = Forecast({dt.datetime(2018,1,1): haz}, expo, impact_function_set) + # create and calculate Forecast + forecast = Forecast({dt.datetime(2018, 1, 1): haz}, expo, impact_function_set) forecast.calc() # test self.assertEqual(len(forecast.run_datetime), 1) - self.assertEqual(forecast.run_datetime[0], dt.datetime(2018,1,1)) - self.assertEqual(forecast.event_date, dt.datetime(2018,1,3)) - self.assertEqual(forecast.lead_time().days,2) - self.assertEqual(forecast.summary_str(), - 'WS_NWP_run2018010100_event20180103_Switzerland') + self.assertEqual(forecast.run_datetime[0], dt.datetime(2018, 1, 1)) + self.assertEqual(forecast.event_date, dt.datetime(2018, 1, 3)) + self.assertEqual(forecast.lead_time().days, 2) + self.assertEqual( + forecast.summary_str(), "WS_NWP_run2018010100_event20180103_Switzerland" + ) self.assertAlmostEqual(forecast.ai_agg(), 26.347, places=1) self.assertAlmostEqual(forecast.ei_exp()[1], 7.941, places=1) self.assertEqual(len(forecast.hazard), 1) @@ -80,23 +84,23 @@ def test_Forecast_calc_properties(self): def test_Forecast_init_raise(self): """Test calc and propety functions from the Forecast class""" - #hazard with several event dates + # hazard with several event dates storms = StormEurope.from_footprints(WS_DEMO_NC) - #exposure + # exposure data = {} - data['latitude'] = np.array([1, 2, 3]) - data['longitude'] = np.array([1, 2, 3]) - data['value'] = np.ones_like(data['latitude']) * 100000 - data['deductible'] = np.zeros_like(data['latitude']) - data[INDICATOR_IMPF + 'WS'] = np.ones_like(data['latitude']) - data['region_id'] = np.ones_like(data['latitude'],dtype=int) * 756 + data["latitude"] = np.array([1, 2, 3]) + data["longitude"] = np.array([1, 2, 3]) + data["value"] = np.ones_like(data["latitude"]) * 100000 + data["deductible"] = np.zeros_like(data["latitude"]) + data[INDICATOR_IMPF + "WS"] = np.ones_like(data["latitude"]) + data["region_id"] = np.ones_like(data["latitude"], dtype=int) * 756 expo = Exposures(gpd.GeoDataFrame(data=data)) - #vulnerability - #generate vulnerability + # vulnerability + # generate vulnerability impact_function_set = ImpactFuncSet() - #create and calculate Forecast + # create and calculate Forecast with self.assertRaises(ValueError): - Forecast({dt.datetime(2018,1,1): storms}, expo, impact_function_set) + Forecast({dt.datetime(2018, 1, 1): storms}, expo, impact_function_set) class TestPlot(unittest.TestCase): @@ -105,149 +109,162 @@ class TestPlot(unittest.TestCase): def test_Forecast_plot(self): """Test cplotting functions from the Forecast class""" ## given a forecast based on hazard exposure and vulnerability - #hazard + # hazard haz1 = StormEurope.from_cosmoe_file( - HAZ_DIR.joinpath('storm_europe_cosmoe_forecast_vmax_testfile.nc'), - run_datetime=dt.datetime(2018,1,1), - event_date=dt.datetime(2018,1,3)) + HAZ_DIR.joinpath("storm_europe_cosmoe_forecast_vmax_testfile.nc"), + run_datetime=dt.datetime(2018, 1, 1), + event_date=dt.datetime(2018, 1, 3), + ) haz1.centroids.gdf.geometry = haz1.centroids.gdf.geometry.translate(-1.2, 0.6) haz2 = StormEurope.from_cosmoe_file( - HAZ_DIR.joinpath('storm_europe_cosmoe_forecast_vmax_testfile.nc'), - run_datetime=dt.datetime(2018,1,1), - event_date=dt.datetime(2018,1,3)) + HAZ_DIR.joinpath("storm_europe_cosmoe_forecast_vmax_testfile.nc"), + run_datetime=dt.datetime(2018, 1, 1), + event_date=dt.datetime(2018, 1, 3), + ) haz2.centroids.gdf.geometry = haz2.centroids.gdf.geometry.translate(-1.2, 0.6) - #exposure + # exposure data = {} - data['latitude'] = haz1.centroids.lat - data['longitude'] = haz1.centroids.lon - data['value'] = np.ones_like(data['latitude']) * 100000 - data['deductible'] = np.zeros_like(data['latitude']) - data[INDICATOR_IMPF + 'WS'] = np.ones_like(data['latitude']) - data['region_id'] = np.ones_like(data['latitude'],dtype=int) * 756 + data["latitude"] = haz1.centroids.lat + data["longitude"] = haz1.centroids.lon + data["value"] = np.ones_like(data["latitude"]) * 100000 + data["deductible"] = np.zeros_like(data["latitude"]) + data[INDICATOR_IMPF + "WS"] = np.ones_like(data["latitude"]) + data["region_id"] = np.ones_like(data["latitude"], dtype=int) * 756 expo = Exposures(gpd.GeoDataFrame(data=data)) - #vulnerability - #generate vulnerability + # vulnerability + # generate vulnerability impact_function = ImpfStormEurope.from_welker() impact_function_set = ImpactFuncSet([impact_function]) - #create and calculate Forecast - forecast = Forecast({dt.datetime(2018,1,2): haz1, - dt.datetime(2017,12,31): haz2}, - expo, - impact_function_set) + # create and calculate Forecast + forecast = Forecast( + {dt.datetime(2018, 1, 2): haz1, dt.datetime(2017, 12, 31): haz2}, + expo, + impact_function_set, + ) forecast.calc() - #create a file containing the polygons of Swiss cantons using natural earth - cantons_file = CONFIG.local_data.save_dir.dir() / 'CHE_cantons.shp' - adm1_shape_file = shapereader.natural_earth(resolution='10m', - category='cultural', - name='admin_1_states_provinces') + # create a file containing the polygons of Swiss cantons using natural earth + cantons_file = CONFIG.local_data.save_dir.dir() / "CHE_cantons.shp" + adm1_shape_file = shapereader.natural_earth( + resolution="10m", category="cultural", name="admin_1_states_provinces" + ) if not cantons_file.exists(): - with fiona.open(adm1_shape_file, 'r') as source: - with fiona.open( - cantons_file, 'w', - **source.meta) as sink: + with fiona.open(adm1_shape_file, "r") as source: + with fiona.open(cantons_file, "w", **source.meta) as sink: for f in source: - if f['properties']['adm0_a3'] == 'CHE': + if f["properties"]["adm0_a3"] == "CHE": sink.write(f) ## test plotting functions # should save plot without failing - forecast.plot_imp_map(run_datetime=dt.datetime(2017,12,31), - explain_str='test text', - polygon_file=str(cantons_file), - save_fig=True, close_fig=True) - map_file_name = (forecast.summary_str(dt.datetime(2017,12,31)) + - '_impact_map' + - '.jpeg') + forecast.plot_imp_map( + run_datetime=dt.datetime(2017, 12, 31), + explain_str="test text", + polygon_file=str(cantons_file), + save_fig=True, + close_fig=True, + ) + map_file_name = ( + forecast.summary_str(dt.datetime(2017, 12, 31)) + "_impact_map" + ".jpeg" + ) map_file_name_full = Path(FORECAST_PLOT_DIR) / map_file_name map_file_name_full.absolute().unlink(missing_ok=False) - #should contain title strings - ax = forecast.plot_hist(run_datetime=dt.datetime(2017,12,31), - explain_str='test text', - save_fig=False, close_fig=False) + # should contain title strings + ax = forecast.plot_hist( + run_datetime=dt.datetime(2017, 12, 31), + explain_str="test text", + save_fig=False, + close_fig=False, + ) title_artists = ax.get_figure().get_children() title_texts = [x.get_text() for x in title_artists if isinstance(x, plt.Text)] - self.assertIn('test text', title_texts) - self.assertIn('Wed 03 Jan 2018 00-24UTC', title_texts) - self.assertIn('31.12.2017 00UTC +3d', title_texts) - #should contain average impact in axes + self.assertIn("test text", title_texts) + self.assertIn("Wed 03 Jan 2018 00-24UTC", title_texts) + self.assertIn("31.12.2017 00UTC +3d", title_texts) + # should contain average impact in axes artists = ax.get_children() texts = [x.get_text() for x in artists if type(x) == plt.Text] - self.assertIn('mean impact:\n 26 USD', texts) + self.assertIn("mean impact:\n 26 USD", texts) ax.get_figure().clf() - #should contain title strings - ax = forecast.plot_exceedence_prob(run_datetime=dt.datetime(2017,12,31), - threshold=5000, explain_str='test text exceedence', - save_fig=False, close_fig=False)[0][0] + # should contain title strings + ax = forecast.plot_exceedence_prob( + run_datetime=dt.datetime(2017, 12, 31), + threshold=5000, + explain_str="test text exceedence", + save_fig=False, + close_fig=False, + )[0][0] title_artists = ax.get_figure().get_children() title_texts = [x.get_text() for x in title_artists if isinstance(x, plt.Text)] - self.assertIn('test text exceedence', title_texts) - self.assertIn('Wed 03 Jan 2018 00-24UTC', title_texts) - self.assertIn('31.12.2017 00UTC +3d', title_texts) + self.assertIn("test text exceedence", title_texts) + self.assertIn("Wed 03 Jan 2018 00-24UTC", title_texts) + self.assertIn("31.12.2017 00UTC +3d", title_texts) ax.get_figure().clf() - forecast.plot_warn_map(str(cantons_file), - decision_level = 'polygon', - thresholds=[100000,500000, - 1000000,5000000], - probability_aggregation='mean', - area_aggregation='sum', - title="Building damage warning", - explain_text="warn level based on aggregated damages", - save_fig=False, - close_fig=True) - forecast.plot_warn_map(str(cantons_file), - decision_level = 'exposure_point', - thresholds=[1,1000, - 5000,5000000], - probability_aggregation=0.2, - area_aggregation=0.2, - title="Building damage warning", - explain_text="warn level based on aggregated damages", - run_datetime=dt.datetime(2017,12,31), - save_fig=False, - close_fig=True) + forecast.plot_warn_map( + str(cantons_file), + decision_level="polygon", + thresholds=[100000, 500000, 1000000, 5000000], + probability_aggregation="mean", + area_aggregation="sum", + title="Building damage warning", + explain_text="warn level based on aggregated damages", + save_fig=False, + close_fig=True, + ) + forecast.plot_warn_map( + str(cantons_file), + decision_level="exposure_point", + thresholds=[1, 1000, 5000, 5000000], + probability_aggregation=0.2, + area_aggregation=0.2, + title="Building damage warning", + explain_text="warn level based on aggregated damages", + run_datetime=dt.datetime(2017, 12, 31), + save_fig=False, + close_fig=True, + ) forecast.plot_hexbin_ei_exposure() plt.close() # should fail because of invalid decision_level with self.assertRaises(ValueError) as cm: - forecast.plot_warn_map(str(cantons_file), - decision_level='test_fail', - probability_aggregation=0.2, - area_aggregation=0.2, - title="Building damage warning", - explain_text="warn level based on aggregated damages", - save_fig=False, - close_fig=True) + forecast.plot_warn_map( + str(cantons_file), + decision_level="test_fail", + probability_aggregation=0.2, + area_aggregation=0.2, + title="Building damage warning", + explain_text="warn level based on aggregated damages", + save_fig=False, + close_fig=True, + ) plt.close() - self.assertIn( - "Parameter decision_level", str(cm.exception) - ) + self.assertIn("Parameter decision_level", str(cm.exception)) # should fail because of invalid probability_aggregation with self.assertRaises(ValueError) as cm: - forecast.plot_warn_map(str(cantons_file), - decision_level='exposure_point', - probability_aggregation='test_fail', - area_aggregation=0.2, - title="Building damage warning", - explain_text="warn level based on aggregated damages", - save_fig=False, - close_fig=True) + forecast.plot_warn_map( + str(cantons_file), + decision_level="exposure_point", + probability_aggregation="test_fail", + area_aggregation=0.2, + title="Building damage warning", + explain_text="warn level based on aggregated damages", + save_fig=False, + close_fig=True, + ) plt.close() - self.assertIn( - "Parameter probability_aggregation", str(cm.exception) - ) + self.assertIn("Parameter probability_aggregation", str(cm.exception)) # should fail because of invalid area_aggregation with self.assertRaises(ValueError) as cm: - forecast.plot_warn_map(str(cantons_file), - decision_level='exposure_point', - probability_aggregation=0.2, - area_aggregation='test_fail', - title="Building damage warning", - explain_text="warn level based on aggregated damages", - save_fig=False, - close_fig=True) + forecast.plot_warn_map( + str(cantons_file), + decision_level="exposure_point", + probability_aggregation=0.2, + area_aggregation="test_fail", + title="Building damage warning", + explain_text="warn level based on aggregated damages", + save_fig=False, + close_fig=True, + ) plt.close() - self.assertIn( - "Parameter area_aggregation", str(cm.exception) - ) + self.assertIn("Parameter area_aggregation", str(cm.exception)) # Execute Tests diff --git a/climada/engine/test/test_impact.py b/climada/engine/test/test_impact.py index 7b7d6fc9b..6c901f989 100644 --- a/climada/engine/test/test_impact.py +++ b/climada/engine/test/test_impact.py @@ -18,30 +18,30 @@ Test Impact class. """ + +import datetime as dt import unittest from pathlib import Path from tempfile import TemporaryDirectory + +import h5py import numpy as np import numpy.testing as npt -from scipy import sparse -import h5py from pyproj import CRS from rasterio.crs import CRS as rCRS -import datetime as dt +from scipy import sparse +import climada.util.coordinates as u_coord +from climada.engine import Impact, ImpactCalc from climada.entity.entity_def import Entity from climada.hazard.base import Hazard -from climada.engine import Impact, ImpactCalc -from climada.util.constants import ENT_DEMO_TODAY, DEF_CRS, DEMO_DIR, DEF_FREQ_UNIT -import climada.util.coordinates as u_coord - from climada.hazard.test.test_base import HAZ_TEST_TC +from climada.util.constants import DEF_CRS, DEF_FREQ_UNIT, DEMO_DIR, ENT_DEMO_TODAY +ENT: Entity = Entity.from_excel(ENT_DEMO_TODAY) +HAZ: Hazard = Hazard.from_hdf5(HAZ_TEST_TC) -ENT :Entity = Entity.from_excel(ENT_DEMO_TODAY) -HAZ :Hazard = Hazard.from_hdf5(HAZ_TEST_TC) - -DATA_FOLDER :Path = DEMO_DIR / 'test-results' +DATA_FOLDER: Path = DEMO_DIR / "test-results" DATA_FOLDER.mkdir(exist_ok=True) STR_DT = h5py.special_dtype(vlen=str) @@ -68,16 +68,17 @@ def dummy_impact(): haz_type="TC", ) + def dummy_impact_yearly(): """Return an impact containing events in multiple years""" imp = dummy_impact() - years = np.arange(2010,2010+len(imp.date)) + years = np.arange(2010, 2010 + len(imp.date)) # Edit the date and frequency - imp.date = np.array([dt.date(year,1,1).toordinal() for year in years]) + imp.date = np.array([dt.date(year, 1, 1).toordinal() for year in years]) imp.frequency_unit = "1/year" - imp.frequency = np.ones(len(years))/len(years) + imp.frequency = np.ones(len(years)) / len(years) # Calculate the correct expected annual impact freq_mat = imp.frequency.reshape(len(imp.frequency), 1) @@ -88,7 +89,8 @@ def dummy_impact_yearly(): class TestImpact(unittest.TestCase): - """"Test initialization and more""" + """ "Test initialization and more""" + def test_from_eih_pass(self): exp = ENT.exposures exp.assign_centroids(HAZ) @@ -111,8 +113,8 @@ def test_from_eih_pass(self): np.testing.assert_array_almost_equal(imp.at_event, fake_at_event) np.testing.assert_array_almost_equal( imp.coord_exp, - np.stack([exp.gdf['latitude'].values, exp.gdf['longitude'].values], axis=1) - ) + np.stack([exp.gdf["latitude"].values, exp.gdf["longitude"].values], axis=1), + ) def test_pyproj_crs(self): """Check if initializing with a pyproj.CRS transforms it into a string""" @@ -126,6 +128,7 @@ def test_rasterio_crs(self): impact = Impact(crs=crs) self.assertEqual(impact.crs, crs.to_wkt()) + class TestImpactConcat(unittest.TestCase): """test Impact.concat""" @@ -242,6 +245,7 @@ def test_results(self): class TestFreqCurve(unittest.TestCase): """Test exceedence frequency curve computation""" + def test_ref_value_pass(self): """Test result against reference value""" imp = Impact() @@ -257,8 +261,8 @@ def test_ref_value_pass(self): imp.at_event[7] = 0.381063674256423e9 imp.at_event[8] = 0.569142464157450e9 imp.at_event[9] = 0.467572545849132e9 - imp.unit = 'USD' - imp.frequency_unit = '1/day' + imp.unit = "USD" + imp.frequency_unit = "1/day" ifc = imp.calc_freq_curve() self.assertEqual(10, len(ifc.return_per)) @@ -283,9 +287,9 @@ def test_ref_value_pass(self): self.assertEqual(0.400665463736549e9, ifc.impact[2]) self.assertEqual(0.381063674256423e9, ifc.impact[1]) self.assertEqual(0, ifc.impact[0]) - self.assertEqual('Exceedance frequency curve', ifc.label) - self.assertEqual('USD', ifc.unit) - self.assertEqual('1/day', ifc.frequency_unit) + self.assertEqual("Exceedance frequency curve", ifc.label) + self.assertEqual("USD", ifc.unit) + self.assertEqual("1/day", ifc.frequency_unit) def test_ref_value_rp_pass(self): """Test result against reference value with given return periods""" @@ -302,8 +306,8 @@ def test_ref_value_rp_pass(self): imp.at_event[7] = 0.381063674256423e9 imp.at_event[8] = 0.569142464157450e9 imp.at_event[9] = 0.467572545849132e9 - imp.unit = 'USD' - imp.frequency_unit = '1/week' + imp.unit = "USD" + imp.frequency_unit = "1/week" ifc = imp.calc_freq_curve(np.array([100, 500, 1000])) self.assertEqual(3, len(ifc.return_per)) @@ -314,9 +318,10 @@ def test_ref_value_rp_pass(self): self.assertEqual(0, ifc.impact[0]) self.assertEqual(2320408028.5695677, ifc.impact[1]) self.assertEqual(3287314329.129928, ifc.impact[2]) - self.assertEqual('Exceedance frequency curve', ifc.label) - self.assertEqual('USD', ifc.unit) - self.assertEqual('1/week', ifc.frequency_unit) + self.assertEqual("Exceedance frequency curve", ifc.label) + self.assertEqual("USD", ifc.unit) + self.assertEqual("1/week", ifc.frequency_unit) + class TestImpactPerYear(unittest.TestCase): """Test calc_impact_year_set method""" @@ -336,18 +341,32 @@ def test_impact_per_year_sum(self): imp.at_event[7] = 0.381063674256423e9 imp.at_event[8] = 0.569142464157450e9 imp.at_event[9] = 0.467572545849132e9 - imp.date = np.array([732801, 716160, 718313, 712468, 732802, - 729285, 732931, 715419, 722404, 718351]) + imp.date = np.array( + [ + 732801, + 716160, + 718313, + 712468, + 732802, + 729285, + 732931, + 715419, + 722404, + 718351, + ] + ) iys_all = imp.impact_per_year() iys = imp.impact_per_year(all_years=False) iys_all_yr = imp.impact_per_year(year_range=(1975, 2000)) iys_yr = imp.impact_per_year(all_years=False, year_range=[1975, 2000]) iys_all_yr_1940 = imp.impact_per_year(all_years=True, year_range=[1940, 2000]) - self.assertEqual(np.around(sum([iys[year] for year in iys])), - np.around(sum(imp.at_event))) - self.assertEqual(sum([iys[year] for year in iys]), - sum([iys_all[year] for year in iys_all])) + self.assertEqual( + np.around(sum([iys[year] for year in iys])), np.around(sum(imp.at_event)) + ) + self.assertEqual( + sum([iys[year] for year in iys]), sum([iys_all[year] for year in iys_all]) + ) self.assertEqual(len(iys), 7) self.assertEqual(len(iys_all), 57) self.assertIn(1951 and 1959 and 2007, iys_all) @@ -358,8 +377,10 @@ def test_impact_per_year_sum(self): # year range (yr): self.assertEqual(len(iys_yr), 2) self.assertEqual(len(iys_all_yr), 26) - self.assertEqual(sum([iys_yr[year] for year in iys_yr]), - sum([iys_all_yr[year] for year in iys_all_yr])) + self.assertEqual( + sum([iys_yr[year] for year in iys_yr]), + sum([iys_all_yr[year] for year in iys_all_yr]), + ) self.assertIn(1997 and 1978, iys_yr) self.assertFalse(2007 in iys_yr) self.assertFalse(1959 in iys_yr) @@ -373,6 +394,7 @@ def test_impact_per_year_empty(self): self.assertEqual(len(iys), 0) self.assertEqual(len(iys_all), 0) + class TestIO(unittest.TestCase): """Test impact input/output methods.""" @@ -381,9 +403,9 @@ def test_write_read_ev_test(self): # Create impact object num_ev = 10 num_exp = 5 - imp_write = Impact(haz_type='TC') + imp_write = Impact(haz_type="TC") imp_write.event_id = np.arange(num_ev) - imp_write.event_name = ['event_' + str(num) for num in imp_write.event_id] + imp_write.event_name = ["event_" + str(num) for num in imp_write.event_id] imp_write.date = np.ones(num_ev) imp_write.coord_exp = np.zeros((num_exp, 2)) imp_write.coord_exp[:, 0] = 1.5 @@ -393,10 +415,10 @@ def test_write_read_ev_test(self): imp_write.frequency = np.ones(num_ev) * 0.1 imp_write.tot_value = 1000 imp_write.aai_agg = 1001 - imp_write.unit = 'USD' - imp_write.frequency_unit = '1/month' + imp_write.unit = "USD" + imp_write.frequency_unit = "1/month" - file_name = DATA_FOLDER.joinpath('test.csv') + file_name = DATA_FOLDER.joinpath("test.csv") imp_write.write_csv(file_name) imp_read = Impact.from_csv(file_name) @@ -411,16 +433,20 @@ def test_write_read_ev_test(self): self.assertEqual(imp_write.unit, imp_read.unit) self.assertEqual(imp_write.frequency_unit, imp_read.frequency_unit) self.assertEqual( - 0, len([i for i, j in zip(imp_write.event_name, imp_read.event_name) if i != j])) + 0, + len( + [i for i, j in zip(imp_write.event_name, imp_read.event_name) if i != j] + ), + ) def test_write_read_exp_test(self): """Test result against reference value""" # Create impact object num_ev = 5 num_exp = 10 - imp_write = Impact(haz_type='TC') + imp_write = Impact(haz_type="TC") imp_write.event_id = np.arange(num_ev) - imp_write.event_name = ['event_' + str(num) for num in imp_write.event_id] + imp_write.event_name = ["event_" + str(num) for num in imp_write.event_id] imp_write.date = np.ones(num_ev) imp_write.coord_exp = np.zeros((num_exp, 2)) imp_write.coord_exp[:, 0] = 1.5 @@ -430,10 +456,10 @@ def test_write_read_exp_test(self): imp_write.frequency = np.ones(num_ev) * 0.1 imp_write.tot_value = 1000 imp_write.aai_agg = 1001 - imp_write.unit = 'USD' - imp_write.frequency_unit = '1/month' + imp_write.unit = "USD" + imp_write.frequency_unit = "1/month" - file_name = DATA_FOLDER.joinpath('test.csv') + file_name = DATA_FOLDER.joinpath("test.csv") imp_write.write_csv(file_name) imp_read = Impact.from_csv(file_name) @@ -448,7 +474,11 @@ def test_write_read_exp_test(self): self.assertEqual(imp_write.unit, imp_read.unit) self.assertEqual(imp_write.frequency_unit, imp_read.frequency_unit) self.assertEqual( - 0, len([i for i, j in zip(imp_write.event_name, imp_read.event_name) if i != j])) + 0, + len( + [i for i, j in zip(imp_write.event_name, imp_read.event_name) if i != j] + ), + ) self.assertIsInstance(imp_read.crs, str) def test_excel_io(self): @@ -459,7 +489,7 @@ def test_excel_io(self): hazard = Hazard.from_hdf5(HAZ_TEST_TC) imp_write = ImpactCalc(ent.exposures, ent.impact_funcs, hazard).impact() - file_name = DATA_FOLDER.joinpath('test.xlsx') + file_name = DATA_FOLDER.joinpath("test.xlsx") imp_write.write_excel(file_name) imp_read = Impact.from_excel(file_name) @@ -467,15 +497,23 @@ def test_excel_io(self): np.testing.assert_array_equal(imp_write.event_id, imp_read.event_id) np.testing.assert_array_equal(imp_write.date, imp_read.date) np.testing.assert_array_equal(imp_write.coord_exp, imp_read.coord_exp) - np.testing.assert_array_almost_equal_nulp(imp_write.eai_exp, imp_read.eai_exp, nulp=5) - np.testing.assert_array_almost_equal_nulp(imp_write.at_event, imp_read.at_event, nulp=5) + np.testing.assert_array_almost_equal_nulp( + imp_write.eai_exp, imp_read.eai_exp, nulp=5 + ) + np.testing.assert_array_almost_equal_nulp( + imp_write.at_event, imp_read.at_event, nulp=5 + ) np.testing.assert_array_equal(imp_write.frequency, imp_read.frequency) self.assertEqual(imp_write.tot_value, imp_read.tot_value) self.assertEqual(imp_write.aai_agg, imp_read.aai_agg) self.assertEqual(imp_write.unit, imp_read.unit) self.assertEqual(imp_write.frequency_unit, imp_read.frequency_unit) self.assertEqual( - 0, len([i for i, j in zip(imp_write.event_name, imp_read.event_name) if i != j])) + 0, + len( + [i for i, j in zip(imp_write.event_name, imp_read.event_name) if i != j] + ), + ) self.assertIsInstance(imp_read.crs, str) def test_write_imp_mat(self): @@ -489,15 +527,18 @@ def test_write_imp_mat(self): impact.imp_mat[4, :] = np.arange(4) * 5 impact.imp_mat = sparse.csr_matrix(impact.imp_mat) - file_name = DATA_FOLDER.joinpath('test_imp_mat') + file_name = DATA_FOLDER.joinpath("test_imp_mat") impact.write_sparse_csr(file_name) - read_imp_mat = Impact().read_sparse_csr(f'{file_name}.npz') + read_imp_mat = Impact().read_sparse_csr(f"{file_name}.npz") for irow in range(5): np.testing.assert_array_equal( - read_imp_mat[irow, :].toarray(), impact.imp_mat[irow, :].toarray()) + read_imp_mat[irow, :].toarray(), impact.imp_mat[irow, :].toarray() + ) + class TestRPmatrix(unittest.TestCase): """Test computation of impact per return period for whole exposure""" + def test_local_exceedance_imp_pass(self): """Test calc local impacts per return period""" # Read default entity values @@ -508,12 +549,14 @@ def test_local_exceedance_imp_pass(self): hazard = Hazard.from_hdf5(HAZ_TEST_TC) # Compute the impact over the whole exposures - impact = ImpactCalc(ent.exposures, ent.impact_funcs, hazard).impact(save_mat=True) + impact = ImpactCalc(ent.exposures, ent.impact_funcs, hazard).impact( + save_mat=True + ) # Compute the impact per return period over the whole exposures impact_rp = impact.local_exceedance_imp(return_periods=(10, 40)) self.assertIsInstance(impact_rp, np.ndarray) - self.assertEqual(impact_rp.size, 2 * ent.exposures.gdf['value'].size) + self.assertEqual(impact_rp.size, 2 * ent.exposures.gdf["value"].size) self.assertAlmostEqual(np.max(impact_rp), 2916964966.388219, places=5) self.assertAlmostEqual(np.min(impact_rp), 444457580.131494, places=5) @@ -595,6 +638,7 @@ def test_no_imp_mat(self): class TestRiskTrans(unittest.TestCase): """Test risk transfer methods""" + def test_risk_trans_pass(self): """Test calc_risk_transfer""" # Create impact object @@ -609,8 +653,8 @@ def test_risk_trans_pass(self): imp.frequency = np.ones(10) / 5 imp.tot_value = 10 imp.aai_agg = 100 - imp.unit = 'USD' - imp.frequency_unit = '1/month' + imp.unit = "USD" + imp.frequency_unit = "1/month" imp.imp_mat = sparse.csr_matrix(np.empty((0, 0))) new_imp, imp_rt = imp.calc_risk_transfer(2, 10) @@ -624,7 +668,9 @@ def test_risk_trans_pass(self): np.testing.assert_array_almost_equal_nulp(new_imp.frequency, imp.frequency) np.testing.assert_array_almost_equal_nulp(new_imp.coord_exp, []) np.testing.assert_array_almost_equal_nulp(new_imp.eai_exp, []) - np.testing.assert_array_almost_equal_nulp(new_imp.at_event, [0, 1, 2, 2, 2, 2, 2, 2, 2, 5]) + np.testing.assert_array_almost_equal_nulp( + new_imp.at_event, [0, 1, 2, 2, 2, 2, 2, 2, 2, 5] + ) self.assertAlmostEqual(new_imp.aai_agg, 4.0) self.assertEqual(imp_rt.unit, imp.unit) @@ -637,7 +683,9 @@ def test_risk_trans_pass(self): np.testing.assert_array_almost_equal_nulp(imp_rt.frequency, imp.frequency) np.testing.assert_array_almost_equal_nulp(imp_rt.coord_exp, []) np.testing.assert_array_almost_equal_nulp(imp_rt.eai_exp, []) - np.testing.assert_array_almost_equal_nulp(imp_rt.at_event, [0, 0, 0, 1, 2, 3, 4, 5, 6, 10]) + np.testing.assert_array_almost_equal_nulp( + imp_rt.at_event, [0, 0, 0, 1, 2, 3, 4, 5, 6, 10] + ) self.assertAlmostEqual(imp_rt.aai_agg, 6.2) def test_transfer_risk_pass(self): @@ -661,6 +709,7 @@ def test_residual_risk_pass(self): class TestSelect(unittest.TestCase): """Test select method""" + def test_select_event_id_pass(self): """Test select by event id""" @@ -672,14 +721,18 @@ def test_select_event_id_pass(self): self.assertEqual(sel_imp.frequency_unit, imp.frequency_unit) np.testing.assert_array_equal(sel_imp.event_id, [10, 11, 12]) - self.assertEqual(sel_imp.event_name, [0, 1, 'two']) + self.assertEqual(sel_imp.event_name, [0, 1, "two"]) np.testing.assert_array_equal(sel_imp.date, [0, 1, 2]) - np.testing.assert_array_almost_equal_nulp(sel_imp.frequency, [1/6, 1/6, 1]) + np.testing.assert_array_almost_equal_nulp(sel_imp.frequency, [1 / 6, 1 / 6, 1]) np.testing.assert_array_equal(sel_imp.at_event, [0, 2, 4]) - np.testing.assert_array_equal(sel_imp.imp_mat.todense(), [[0,0], [1,1], [2,2]]) - np.testing.assert_array_almost_equal_nulp(sel_imp.eai_exp, [1/6+2, 1/6+2]) - self.assertEqual(sel_imp.aai_agg, 4+2/6) + np.testing.assert_array_equal( + sel_imp.imp_mat.todense(), [[0, 0], [1, 1], [2, 2]] + ) + np.testing.assert_array_almost_equal_nulp( + sel_imp.eai_exp, [1 / 6 + 2, 1 / 6 + 2] + ) + self.assertEqual(sel_imp.aai_agg, 4 + 2 / 6) self.assertEqual(sel_imp.tot_value, 7) np.testing.assert_array_equal(sel_imp.coord_exp, [[1, 2], [1.5, 2.5]]) @@ -691,21 +744,25 @@ def test_select_event_name_pass(self): """Test select by event name""" imp = dummy_impact() - sel_imp = imp.select(event_names=[0, 1, 'two']) + sel_imp = imp.select(event_names=[0, 1, "two"]) self.assertTrue(u_coord.equal_crs(sel_imp.crs, imp.crs)) self.assertEqual(sel_imp.unit, imp.unit) self.assertEqual(sel_imp.frequency_unit, imp.frequency_unit) np.testing.assert_array_equal(sel_imp.event_id, [10, 11, 12]) - self.assertEqual(sel_imp.event_name, [0, 1, 'two']) + self.assertEqual(sel_imp.event_name, [0, 1, "two"]) np.testing.assert_array_equal(sel_imp.date, [0, 1, 2]) - np.testing.assert_array_almost_equal_nulp(sel_imp.frequency, [1/6, 1/6, 1]) + np.testing.assert_array_almost_equal_nulp(sel_imp.frequency, [1 / 6, 1 / 6, 1]) np.testing.assert_array_equal(sel_imp.at_event, [0, 2, 4]) - np.testing.assert_array_equal(sel_imp.imp_mat.todense(), [[0,0], [1,1], [2,2]]) - np.testing.assert_array_almost_equal_nulp(sel_imp.eai_exp, [1/6+2, 1/6+2]) - self.assertEqual(sel_imp.aai_agg, 4+2/6) + np.testing.assert_array_equal( + sel_imp.imp_mat.todense(), [[0, 0], [1, 1], [2, 2]] + ) + np.testing.assert_array_almost_equal_nulp( + sel_imp.eai_exp, [1 / 6 + 2, 1 / 6 + 2] + ) + self.assertEqual(sel_imp.aai_agg, 4 + 2 / 6) self.assertEqual(sel_imp.tot_value, 7) np.testing.assert_array_equal(sel_imp.coord_exp, [[1, 2], [1.5, 2.5]]) @@ -724,14 +781,18 @@ def test_select_dates_pass(self): self.assertEqual(sel_imp.frequency_unit, imp.frequency_unit) np.testing.assert_array_equal(sel_imp.event_id, [10, 11, 12]) - self.assertEqual(sel_imp.event_name, [0, 1, 'two']) + self.assertEqual(sel_imp.event_name, [0, 1, "two"]) np.testing.assert_array_equal(sel_imp.date, [0, 1, 2]) - np.testing.assert_array_almost_equal_nulp(sel_imp.frequency, [1/6, 1/6, 1]) + np.testing.assert_array_almost_equal_nulp(sel_imp.frequency, [1 / 6, 1 / 6, 1]) np.testing.assert_array_equal(sel_imp.at_event, [0, 2, 4]) - np.testing.assert_array_equal(sel_imp.imp_mat.todense(), [[0,0], [1,1], [2,2]]) - np.testing.assert_array_almost_equal_nulp(sel_imp.eai_exp, [1/6+2, 1/6+2]) - self.assertEqual(sel_imp.aai_agg, 4+2/6) + np.testing.assert_array_equal( + sel_imp.imp_mat.todense(), [[0, 0], [1, 1], [2, 2]] + ) + np.testing.assert_array_almost_equal_nulp( + sel_imp.eai_exp, [1 / 6 + 2, 1 / 6 + 2] + ) + self.assertEqual(sel_imp.aai_agg, 4 + 2 / 6) self.assertEqual(sel_imp.tot_value, 7) np.testing.assert_array_equal(sel_imp.coord_exp, [[1, 2], [1.5, 2.5]]) @@ -740,10 +801,10 @@ def test_select_dates_pass(self): self.assertIsInstance(sel_imp.imp_mat, sparse.csr_matrix) def test_select_coord_exp_pass(self): - """ test select by exp coordinates """ + """test select by exp coordinates""" imp = dummy_impact() - sel_imp = imp.select(coord_exp=np.array([1,2])) + sel_imp = imp.select(coord_exp=np.array([1, 2])) self.assertTrue(u_coord.equal_crs(sel_imp.crs, imp.crs)) self.assertEqual(sel_imp.unit, imp.unit) @@ -755,9 +816,13 @@ def test_select_coord_exp_pass(self): np.testing.assert_array_equal(sel_imp.frequency, imp.frequency) np.testing.assert_array_equal(sel_imp.at_event, [0, 1, 2, 3, 30, 31]) - np.testing.assert_array_equal(sel_imp.imp_mat.todense(), [[0], [1], [2], [3], [30], [31]]) - np.testing.assert_array_almost_equal_nulp(sel_imp.eai_exp, [1/6+2+3+1+31/30]) - self.assertEqual(sel_imp.aai_agg, 1/6+2+3+1+31/30) + np.testing.assert_array_equal( + sel_imp.imp_mat.todense(), [[0], [1], [2], [3], [30], [31]] + ) + np.testing.assert_array_almost_equal_nulp( + sel_imp.eai_exp, [1 / 6 + 2 + 3 + 1 + 31 / 30] + ) + self.assertEqual(sel_imp.aai_agg, 1 / 6 + 2 + 3 + 1 + 31 / 30) self.assertEqual(sel_imp.tot_value, None) np.testing.assert_array_equal(sel_imp.coord_exp, [[1, 2]]) @@ -766,7 +831,7 @@ def test_select_coord_exp_pass(self): self.assertIsInstance(sel_imp.imp_mat, sparse.csr_matrix) def test_select_event_identity_pass(self): - """ test select same impact with event name, id and date """ + """test select same impact with event name, id and date""" # Read default entity values ent = Entity.from_excel(ENT_DEMO_TODAY) @@ -780,12 +845,14 @@ def test_select_event_identity_pass(self): # Compute the impact over the whole exposures imp = ImpactCalc(ent.exposures, ent.impact_funcs, hazard).impact( - save_mat=True, assign_centroids=False) + save_mat=True, assign_centroids=False + ) - sel_imp = imp.select(event_ids=imp.event_id, - event_names=imp.event_name, - dates=(min(imp.date), max(imp.date)) - ) + sel_imp = imp.select( + event_ids=imp.event_id, + event_names=imp.event_name, + dates=(min(imp.date), max(imp.date)), + ) self.assertTrue(u_coord.equal_crs(sel_imp.crs, imp.crs)) self.assertEqual(sel_imp.unit, imp.unit) @@ -799,7 +866,7 @@ def test_select_event_identity_pass(self): np.testing.assert_array_equal(sel_imp.at_event, imp.at_event) np.testing.assert_array_equal(sel_imp.imp_mat.todense(), imp.imp_mat.todense()) np.testing.assert_array_equal(sel_imp.eai_exp, imp.eai_exp) - self.assertAlmostEqual(round(sel_imp.aai_agg,5), round(imp.aai_agg,5)) + self.assertAlmostEqual(round(sel_imp.aai_agg, 5), round(imp.aai_agg, 5)) self.assertEqual(sel_imp.tot_value, imp.tot_value) np.testing.assert_array_equal(sel_imp.coord_exp, imp.coord_exp) @@ -807,29 +874,32 @@ def test_select_event_identity_pass(self): self.assertIsInstance(sel_imp, Impact) self.assertIsInstance(sel_imp.imp_mat, sparse.csr_matrix) - def test_select_new_attributes(self): - """Test if impact has new attributes """ + """Test if impact has new attributes""" imp = dummy_impact() - imp.new_per_ev = ['a', 'b', 'c', 'd', 'e', 'f'] - sel_imp = imp.select(event_names=[0, 1, 'two']) + imp.new_per_ev = ["a", "b", "c", "d", "e", "f"] + sel_imp = imp.select(event_names=[0, 1, "two"]) - self.assertEqual(sel_imp.new_per_ev, ['a', 'b', 'c']) + self.assertEqual(sel_imp.new_per_ev, ["a", "b", "c"]) self.assertTrue(u_coord.equal_crs(sel_imp.crs, imp.crs)) self.assertEqual(sel_imp.unit, imp.unit) self.assertEqual(sel_imp.frequency_unit, imp.frequency_unit) np.testing.assert_array_equal(sel_imp.event_id, [10, 11, 12]) - self.assertEqual(sel_imp.event_name, [0, 1, 'two']) + self.assertEqual(sel_imp.event_name, [0, 1, "two"]) np.testing.assert_array_equal(sel_imp.date, [0, 1, 2]) - np.testing.assert_array_almost_equal_nulp(sel_imp.frequency, [1/6, 1/6, 1]) + np.testing.assert_array_almost_equal_nulp(sel_imp.frequency, [1 / 6, 1 / 6, 1]) np.testing.assert_array_equal(sel_imp.at_event, [0, 2, 4]) - np.testing.assert_array_equal(sel_imp.imp_mat.todense(), [[0,0], [1,1], [2,2]]) - np.testing.assert_array_almost_equal_nulp(sel_imp.eai_exp, [1/6+2, 1/6+2]) - self.assertEqual(sel_imp.aai_agg, 4+2/6) + np.testing.assert_array_equal( + sel_imp.imp_mat.todense(), [[0, 0], [1, 1], [2, 2]] + ) + np.testing.assert_array_almost_equal_nulp( + sel_imp.eai_exp, [1 / 6 + 2, 1 / 6 + 2] + ) + self.assertEqual(sel_imp.aai_agg, 4 + 2 / 6) self.assertEqual(sel_imp.tot_value, 7) np.testing.assert_array_equal(sel_imp.coord_exp, [[1, 2], [1.5, 2.5]]) @@ -858,21 +928,25 @@ def test_select_id_name_dates_pass(self): """Test select by event ids, names, and dates""" imp = dummy_impact() - sel_imp = imp.select(event_ids=[0], event_names=[1, 'two'], dates=(0, 2)) + sel_imp = imp.select(event_ids=[0], event_names=[1, "two"], dates=(0, 2)) self.assertTrue(u_coord.equal_crs(sel_imp.crs, imp.crs)) self.assertEqual(sel_imp.unit, imp.unit) self.assertEqual(sel_imp.frequency_unit, imp.frequency_unit) np.testing.assert_array_equal(sel_imp.event_id, [10, 11, 12]) - self.assertEqual(sel_imp.event_name, [0, 1, 'two']) + self.assertEqual(sel_imp.event_name, [0, 1, "two"]) np.testing.assert_array_equal(sel_imp.date, [0, 1, 2]) - np.testing.assert_array_almost_equal_nulp(sel_imp.frequency, [1/6, 1/6, 1]) + np.testing.assert_array_almost_equal_nulp(sel_imp.frequency, [1 / 6, 1 / 6, 1]) np.testing.assert_array_equal(sel_imp.at_event, [0, 2, 4]) - np.testing.assert_array_equal(sel_imp.imp_mat.todense(), [[0,0], [1,1], [2,2]]) - np.testing.assert_array_almost_equal_nulp(sel_imp.eai_exp, [1/6+2, 1/6+2]) - self.assertEqual(sel_imp.aai_agg, 4+2/6) + np.testing.assert_array_equal( + sel_imp.imp_mat.todense(), [[0, 0], [1, 1], [2, 2]] + ) + np.testing.assert_array_almost_equal_nulp( + sel_imp.eai_exp, [1 / 6 + 2, 1 / 6 + 2] + ) + self.assertEqual(sel_imp.aai_agg, 4 + 2 / 6) self.assertEqual(sel_imp.tot_value, 7) np.testing.assert_array_equal(sel_imp.coord_exp, [[1, 2], [1.5, 2.5]]) @@ -886,22 +960,25 @@ def test_select_imp_map_fail(self): imp = dummy_impact() imp.imp_mat = sparse.csr_matrix(np.empty((0, 0))) with self.assertRaises(ValueError): - imp.select(event_ids=[0], event_names=[1, 'two'], dates=(0, 2)) + imp.select(event_ids=[0], event_names=[1, "two"], dates=(0, 2)) def test_select_reset_frequency(self): """Test that reset_frequency option works correctly""" - imp = dummy_impact_yearly() # 6 events, 1 per year + imp = dummy_impact_yearly() # 6 events, 1 per year # select first 4 events n_yr = 4 - sel_imp = imp.select(dates=(imp.date[0],imp.date[n_yr-1]), reset_frequency=True) + sel_imp = imp.select( + dates=(imp.date[0], imp.date[n_yr - 1]), reset_frequency=True + ) # check frequency-related attributes - np.testing.assert_array_equal(sel_imp.frequency, [1/n_yr]*n_yr) - self.assertEqual(sel_imp.aai_agg,imp.at_event[0:n_yr].sum()/n_yr) - np.testing.assert_array_equal(sel_imp.eai_exp, - imp.imp_mat[0:n_yr,:].todense().sum(axis=0).A1/n_yr) + np.testing.assert_array_equal(sel_imp.frequency, [1 / n_yr] * n_yr) + self.assertEqual(sel_imp.aai_agg, imp.at_event[0:n_yr].sum() / n_yr) + np.testing.assert_array_equal( + sel_imp.eai_exp, imp.imp_mat[0:n_yr, :].todense().sum(axis=0).A1 / n_yr + ) class TestConvertExp(unittest.TestCase): @@ -910,9 +987,9 @@ def test__build_exp(self): imp = dummy_impact() exp = imp._build_exp() - np.testing.assert_array_equal(imp.eai_exp, exp.gdf['value']) - np.testing.assert_array_equal(imp.coord_exp[:, 0], exp.gdf['latitude']) - np.testing.assert_array_equal(imp.coord_exp[:, 1], exp.gdf['longitude']) + np.testing.assert_array_equal(imp.eai_exp, exp.gdf["value"]) + np.testing.assert_array_equal(imp.coord_exp[:, 0], exp.gdf["latitude"]) + np.testing.assert_array_equal(imp.coord_exp[:, 1], exp.gdf["longitude"]) self.assertTrue(u_coord.equal_crs(exp.crs, imp.crs)) self.assertEqual(exp.value_unit, imp.unit) self.assertEqual(exp.ref_year, 0) @@ -923,13 +1000,14 @@ def test__exp_build_event(self): imp = dummy_impact() event_id = imp.event_id[1] exp = imp._build_exp_event(event_id=event_id) - np.testing.assert_array_equal(imp.imp_mat[1].todense().A1, exp.gdf['value']) - np.testing.assert_array_equal(imp.coord_exp[:, 0], exp.gdf['latitude']) - np.testing.assert_array_equal(imp.coord_exp[:, 1], exp.gdf['longitude']) + np.testing.assert_array_equal(imp.imp_mat[1].todense().A1, exp.gdf["value"]) + np.testing.assert_array_equal(imp.coord_exp[:, 0], exp.gdf["latitude"]) + np.testing.assert_array_equal(imp.coord_exp[:, 1], exp.gdf["longitude"]) self.assertTrue(u_coord.equal_crs(exp.crs, imp.crs)) self.assertEqual(exp.value_unit, imp.unit) self.assertEqual(exp.ref_year, 0) + class TestMatchCentroids(unittest.TestCase): def test_match_centroids(self): @@ -941,7 +1019,7 @@ def test_match_centroids(self): fake_aai_agg = np.sum(fake_eai_exp) imp = Impact.from_eih(exp, HAZ, fake_at_event, fake_eai_exp, fake_aai_agg) imp_centr = imp.match_centroids(HAZ) - np.testing.assert_array_equal(imp_centr, exp.gdf['centr_TC']) + np.testing.assert_array_equal(imp_centr, exp.gdf["centr_TC"]) class TestImpactH5IO(unittest.TestCase): @@ -1072,7 +1150,7 @@ def test_read_hdf5_full(self): tot_value = 100 aai_agg = 200 unit = "unit" - haz_type="haz_type" + haz_type = "haz_type" # Write the data with h5py.File(self.filepath, "w") as file: @@ -1132,6 +1210,7 @@ def test_read_hdf5_full(self): self.assertIn("'event_name' is not stored as strings", cm.output[0]) self.assertListEqual(impact.event_name, ["1.2", "2.0"]) + # Execute Tests if __name__ == "__main__": TESTS = unittest.TestLoader().loadTestsFromTestCase(TestFreqCurve) diff --git a/climada/engine/test/test_impact_calc.py b/climada/engine/test/test_impact_calc.py index d8a96747a..489f66a00 100644 --- a/climada/engine/test/test_impact_calc.py +++ b/climada/engine/test/test_impact_calc.py @@ -18,31 +18,31 @@ Test Impact class. """ + import unittest -from unittest.mock import create_autospec, MagicMock, call, patch -import numpy as np -from scipy import sparse -import geopandas as gpd from copy import deepcopy from pathlib import Path +from unittest.mock import MagicMock, call, create_autospec, patch + +import geopandas as gpd +import numpy as np +from scipy import sparse from climada import CONFIG -from climada.entity.entity_def import Entity -from climada.entity import Exposures, ImpactFuncSet, ImpactFunc, ImpfTropCyclone -from climada.hazard.base import Hazard, Centroids -from climada.engine import ImpactCalc, Impact +from climada.engine import Impact, ImpactCalc from climada.engine.impact_calc import LOGGER as ILOG -from climada.util.constants import ENT_DEMO_TODAY, DEMO_DIR +from climada.entity import Exposures, ImpactFunc, ImpactFuncSet, ImpfTropCyclone +from climada.entity.entity_def import Entity +from climada.hazard.base import Centroids, Hazard +from climada.test import get_test_file from climada.util.api_client import Client from climada.util.config import Config - -from climada.test import get_test_file - +from climada.util.constants import DEMO_DIR, ENT_DEMO_TODAY ENT = Entity.from_excel(ENT_DEMO_TODAY) -HAZ = Hazard.from_hdf5(get_test_file('test_tc_florida')) +HAZ = Hazard.from_hdf5(get_test_file("test_tc_florida")) -DATA_FOLDER = DEMO_DIR / 'test-results' +DATA_FOLDER = DEMO_DIR / "test-results" DATA_FOLDER.mkdir(exist_ok=True) @@ -50,18 +50,18 @@ def check_impact(self, imp, haz, exp, aai_agg, eai_exp, at_event, imp_mat_array= """Test properties of imapcts""" self.assertEqual(len(haz.event_id), len(imp.at_event)) self.assertIsInstance(imp, Impact) - np.testing.assert_allclose(imp.coord_exp[:,0], exp.gdf['latitude']) - np.testing.assert_allclose(imp.coord_exp[:,1], exp.gdf['longitude']) + np.testing.assert_allclose(imp.coord_exp[:, 0], exp.gdf["latitude"]) + np.testing.assert_allclose(imp.coord_exp[:, 1], exp.gdf["longitude"]) self.assertAlmostEqual(imp.aai_agg, aai_agg, 3) np.testing.assert_allclose(imp.eai_exp, eai_exp, rtol=1e-5) np.testing.assert_allclose(imp.at_event, at_event, rtol=1e-5) if imp_mat_array is not None: - np.testing.assert_allclose(imp.imp_mat.toarray().ravel(), - imp_mat_array.ravel()) + np.testing.assert_allclose(imp.imp_mat.toarray().ravel(), imp_mat_array.ravel()) class TestImpactCalc(unittest.TestCase): """Test Impact calc methods""" + def test_init(self): icalc = ImpactCalc(ENT.exposures, ENT.impact_funcs, HAZ) self.assertEqual(icalc.n_exp_pnt, ENT.exposures.gdf.shape[0]) @@ -82,11 +82,8 @@ def test_init(self): def test_metrics(self): """Test methods to get impact metrics""" - mat = sparse.csr_matrix(np.array( - [[1, 0, 1], - [2, 2, 0]] - )) - freq = np.array([1, 1/10]) + mat = sparse.csr_matrix(np.array([[1, 0, 1], [2, 2, 0]])) + freq = np.array([1, 1 / 10]) at_event = ImpactCalc.at_event_from_mat(mat) eai_exp = ImpactCalc.eai_exp_from_mat(mat, freq) aai_agg = ImpactCalc.aai_agg_from_eai_exp(eai_exp) @@ -101,58 +98,63 @@ def test_metrics(self): def test_apply_cover_to_mat(self): """Test methods to get insured metrics""" - mat = sparse.csr_matrix(np.array( - [[1, 0, 1], - [2, 2, 0]] - )) + mat = sparse.csr_matrix(np.array([[1, 0, 1], [2, 2, 0]])) cover = np.array([0, 1, 10]) imp = ImpactCalc.apply_cover_to_mat(mat, cover) - np.testing.assert_array_equal( - imp.todense(), np.array([[0, 0, 1], [0, 1, 0]]) - ) + np.testing.assert_array_equal(imp.todense(), np.array([[0, 0, 1], [0, 1, 0]])) def test_error_handling_mismatch_haz_type(self): """Test error handling in case hazard type of hazard does not appear in impf_set or exposures""" - haz_tc = Hazard('TC') + haz_tc = Hazard("TC") exp_tc = Exposures() - exp_tc.gdf['impf_TC'] = 1 + exp_tc.gdf["impf_TC"] = 1 exp_ws = Exposures() - exp_ws.gdf['impf_WS'] = 2 + exp_ws.gdf["impf_WS"] = 2 impf = ImpactFunc() impf.id = 1 impf.intensity = np.array([0, 20]) impf.paa = np.array([0, 1]) impf.mdd = np.array([0, 0.5]) - impf.haz_type = 'TC' + impf.haz_type = "TC" impfset_tc = ImpactFuncSet([impf]) - impf.haz_type = 'WS' + impf.haz_type = "WS" impfset_ws = ImpactFuncSet([impf]) - impf.haz_type = '' + impf.haz_type = "" impfset_undef = ImpactFuncSet([impf]) try: ImpactCalc(exp_ws, impfset_tc, haz_tc).impact() except Exception as e: - self.assertEqual(str(e), "Impact calculation not possible. No impact " - "functions found for hazard type TC in exposures.") + self.assertEqual( + str(e), + "Impact calculation not possible. No impact " + "functions found for hazard type TC in exposures.", + ) try: ImpactCalc(exp_tc, impfset_ws, haz_tc).impact() except Exception as e: - self.assertEqual(str(e), "Impact calculation not possible. No impact " - "functions found for hazard type TC in impf_set.") + self.assertEqual( + str(e), + "Impact calculation not possible. No impact " + "functions found for hazard type TC in impf_set.", + ) try: ImpactCalc(exp_tc, impfset_undef, haz_tc).impact() except Exception as e: - self.assertEqual(str(e), "Impact calculation not possible. No impact " - "functions found for hazard type TC in impf_set.") + self.assertEqual( + str(e), + "Impact calculation not possible. No impact " + "functions found for hazard type TC in impf_set.", + ) + def test_error_handling_mismatch_impf_ids(self): """Test error handling in case impf ids in exposures does not appear in impf_set""" - haz = Hazard('TC') + haz = Hazard("TC") exp = Exposures() - exp.gdf.loc[0,'impf_TC'] = 1 - exp.gdf.loc[1,'impf_TC'] = 2 - impf_exp = ImpactFunc(haz_type='TC', id=1) + exp.gdf.loc[0, "impf_TC"] = 1 + exp.gdf.loc[1, "impf_TC"] = 2 + impf_exp = ImpactFunc(haz_type="TC", id=1) impf_noexp = deepcopy(impf_exp) impf_noexp.id = 3 impfset = ImpactFuncSet([impf_exp, impf_noexp]) @@ -160,11 +162,13 @@ def test_error_handling_mismatch_impf_ids(self): with self.assertRaises(ValueError) as cm: ImpactCalc(exp, impfset, haz).impact() the_exception = cm.exception - self.assertEqual(the_exception.args[0], - "The associated impact function(s) with id(s) 2 have no match in " - "impact function set for hazard type \'TC\'.\nPlease make sure " - "that all exposure points are associated with an impact " - "function that is included in the impact function set.") + self.assertEqual( + the_exception.args[0], + "The associated impact function(s) with id(s) 2 have no match in " + "impact function set for hazard type 'TC'.\nPlease make sure " + "that all exposure points are associated with an impact " + "function that is included in the impact function set.", + ) def test_calc_impact_TC_pass(self): """Test compute impact""" @@ -173,15 +177,15 @@ def test_calc_impact_TC_pass(self): self.assertEqual(icalc.n_events, len(impact.at_event)) self.assertEqual(0, impact.at_event[0]) self.assertEqual(0, impact.at_event[7225]) - self.assertAlmostEqual(1.472482938320243e+08, impact.at_event[13809], delta=1) - self.assertAlmostEqual(7.076504723057620e+10, impact.at_event[12147], delta=1) + self.assertAlmostEqual(1.472482938320243e08, impact.at_event[13809], delta=1) + self.assertAlmostEqual(7.076504723057620e10, impact.at_event[12147], delta=1) self.assertEqual(0, impact.at_event[14449]) self.assertEqual(icalc.n_exp_pnt, len(impact.eai_exp)) - self.assertAlmostEqual(1.518553670803242e+08, impact.eai_exp[0], delta=1) - self.assertAlmostEqual(1.373490457046383e+08, impact.eai_exp[25], 6) - self.assertAlmostEqual(1.066837260150042e+08, impact.eai_exp[49], 6) - self.assertAlmostEqual(6.570532945599105e+11, impact.tot_value) - self.assertAlmostEqual(6.512201157564421e+09, impact.aai_agg, 5) + self.assertAlmostEqual(1.518553670803242e08, impact.eai_exp[0], delta=1) + self.assertAlmostEqual(1.373490457046383e08, impact.eai_exp[25], 6) + self.assertAlmostEqual(1.066837260150042e08, impact.eai_exp[49], 6) + self.assertAlmostEqual(6.570532945599105e11, impact.tot_value) + self.assertAlmostEqual(6.512201157564421e09, impact.aai_agg, 5) x = 0.6 HAZf = deepcopy(HAZ) @@ -192,53 +196,134 @@ def test_calc_impact_TC_pass(self): self.assertEqual(icalc.n_events, len(impact.at_event)) self.assertEqual(0, impact.at_event[0]) self.assertEqual(0, impact.at_event[7225]) - self.assertAlmostEqual(1.472482938320243e+08 * x, impact.at_event[13809], delta=1) - self.assertAlmostEqual(7.076504723057620e+10 * x, impact.at_event[12147], delta=1) + self.assertAlmostEqual( + 1.472482938320243e08 * x, impact.at_event[13809], delta=1 + ) + self.assertAlmostEqual( + 7.076504723057620e10 * x, impact.at_event[12147], delta=1 + ) self.assertEqual(0, impact.at_event[14449]) self.assertEqual(icalc.n_exp_pnt, len(impact.eai_exp)) - self.assertAlmostEqual(1.518553670803242e+08 * x, impact.eai_exp[0], delta=1) - self.assertAlmostEqual(1.373490457046383e+08 * x, impact.eai_exp[25], 6) - self.assertAlmostEqual(1.066837260150042e+08 * x, impact.eai_exp[49], 6) - self.assertAlmostEqual(6.570532945599105e+11, impact.tot_value) - self.assertAlmostEqual(6.512201157564421e+09 * x, impact.aai_agg, 5) + self.assertAlmostEqual(1.518553670803242e08 * x, impact.eai_exp[0], delta=1) + self.assertAlmostEqual(1.373490457046383e08 * x, impact.eai_exp[25], 6) + self.assertAlmostEqual(1.066837260150042e08 * x, impact.eai_exp[49], 6) + self.assertAlmostEqual(6.570532945599105e11, impact.tot_value) + self.assertAlmostEqual(6.512201157564421e09 * x, impact.aai_agg, 5) def test_calc_impact_RF_pass(self): - haz = Hazard.from_hdf5(get_test_file('test_hazard_US_flood_random_locations')) - exp = Exposures.from_hdf5(get_test_file('test_exposure_US_flood_random_locations')) - impf_set = ImpactFuncSet.from_excel(Path(__file__).parent / 'data' / 'flood_imp_func_set.xls') + haz = Hazard.from_hdf5(get_test_file("test_hazard_US_flood_random_locations")) + exp = Exposures.from_hdf5( + get_test_file("test_exposure_US_flood_random_locations") + ) + impf_set = ImpactFuncSet.from_excel( + Path(__file__).parent / "data" / "flood_imp_func_set.xls" + ) icalc = ImpactCalc(exp, impf_set, haz) impact = icalc.impact(assign_centroids=False) aai_agg = 161436.05112960344 - eai_exp = np.array([ - 1.61159701e+05, 1.33742847e+02, 0.00000000e+00, 4.21352988e-01, - 1.42185609e+02, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00 - ]) - at_event = np.array([ - 0.00000000e+00, 0.00000000e+00, 9.85233619e+04, 3.41245461e+04, - 7.73566566e+07, 0.00000000e+00, 0.00000000e+00 - ]) - imp_mat_array = np.array([ - [0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, - 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00], - [0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, - 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00], - [0.00000000e+00, 6.41965663e+04, 0.00000000e+00, 2.02249434e+02, - 3.41245461e+04, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00], - [0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, - 3.41245461e+04, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00], - [7.73566566e+07, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, - 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00], - [0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, - 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00], - [0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, - 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00] - ]) + eai_exp = np.array( + [ + 1.61159701e05, + 1.33742847e02, + 0.00000000e00, + 4.21352988e-01, + 1.42185609e02, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + ] + ) + at_event = np.array( + [ + 0.00000000e00, + 0.00000000e00, + 9.85233619e04, + 3.41245461e04, + 7.73566566e07, + 0.00000000e00, + 0.00000000e00, + ] + ) + imp_mat_array = np.array( + [ + [ + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + ], + [ + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + ], + [ + 0.00000000e00, + 6.41965663e04, + 0.00000000e00, + 2.02249434e02, + 3.41245461e04, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + ], + [ + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 3.41245461e04, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + ], + [ + 7.73566566e07, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + ], + [ + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + ], + [ + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + 0.00000000e00, + ], + ] + ) check_impact(self, impact, haz, exp, aai_agg, eai_exp, at_event, imp_mat_array) def test_empty_impact(self): """Check that empty impact is returned if no centroids match the exposures""" exp = ENT.exposures.copy() - exp.gdf['centr_TC'] = -1 + exp.gdf["centr_TC"] = -1 icalc = ImpactCalc(exp, ENT.impact_funcs, HAZ) impact = icalc.impact(assign_centroids=False) aai_agg = 0.0 @@ -261,8 +346,9 @@ def test_single_event_impact(self): check_impact(self, impact, haz, ENT.exposures, aai_agg, eai_exp, at_event, None) impact = icalc.impact(save_mat=True, assign_centroids=False) imp_mat_array = sparse.csr_matrix((haz.size, len(ENT.exposures.gdf))).toarray() - check_impact(self, impact, haz, ENT.exposures, aai_agg, eai_exp, at_event, imp_mat_array) - + check_impact( + self, impact, haz, ENT.exposures, aai_agg, eai_exp, at_event, imp_mat_array + ) def test_calc_impact_save_mat_pass(self): """Test compute impact with impact matrix""" @@ -270,34 +356,39 @@ def test_calc_impact_save_mat_pass(self): impact = icalc.impact() self.assertIsInstance(impact.imp_mat, sparse.csr_matrix) - self.assertEqual(impact.imp_mat.shape, (HAZ.event_id.size, - ENT.exposures.gdf['value'].size)) + self.assertEqual( + impact.imp_mat.shape, (HAZ.event_id.size, ENT.exposures.gdf["value"].size) + ) np.testing.assert_array_almost_equal_nulp( - np.array(impact.imp_mat.sum(axis=1)).ravel(), impact.at_event, nulp=5) + np.array(impact.imp_mat.sum(axis=1)).ravel(), impact.at_event, nulp=5 + ) np.testing.assert_array_almost_equal_nulp( - np.sum(impact.imp_mat.toarray() * impact.frequency[:, None], axis=0).reshape(-1), - impact.eai_exp) + np.sum( + impact.imp_mat.toarray() * impact.frequency[:, None], axis=0 + ).reshape(-1), + impact.eai_exp, + ) self.assertEqual(icalc.n_events, len(impact.at_event)) self.assertEqual(0, impact.at_event[0]) self.assertEqual(0, impact.at_event[7225]) - self.assertAlmostEqual(1.472482938320243e+08, impact.at_event[13809], delta=1) - self.assertAlmostEqual(7.076504723057620e+10, impact.at_event[12147], delta=1) + self.assertAlmostEqual(1.472482938320243e08, impact.at_event[13809], delta=1) + self.assertAlmostEqual(7.076504723057620e10, impact.at_event[12147], delta=1) self.assertEqual(0, impact.at_event[14449]) self.assertEqual(icalc.n_exp_pnt, len(impact.eai_exp)) - self.assertAlmostEqual(1.518553670803242e+08, impact.eai_exp[0], delta=1) - self.assertAlmostEqual(1.373490457046383e+08, impact.eai_exp[25], 6) - self.assertAlmostEqual(1.066837260150042e+08, impact.eai_exp[49], 6) - self.assertAlmostEqual(6.570532945599105e+11, impact.tot_value) - self.assertAlmostEqual(6.512201157564421e+09, impact.aai_agg, 5) + self.assertAlmostEqual(1.518553670803242e08, impact.eai_exp[0], delta=1) + self.assertAlmostEqual(1.373490457046383e08, impact.eai_exp[25], 6) + self.assertAlmostEqual(1.066837260150042e08, impact.eai_exp[49], 6) + self.assertAlmostEqual(6.570532945599105e11, impact.tot_value) + self.assertAlmostEqual(6.512201157564421e09, impact.aai_agg, 5) def test_calc_insured_impact_pass(self): """Test compute insured impact""" exp = ENT.exposures.copy() - exp.gdf['cover'] /= 1e3 - exp.gdf['deductible'] += 1e5 + exp.gdf["cover"] /= 1e3 + exp.gdf["deductible"] += 1e5 icalc = ImpactCalc(exp, ENT.impact_funcs, HAZ) - with self.assertLogs(ILOG, level='INFO') as logs: + with self.assertLogs(ILOG, level="INFO") as logs: impact = icalc.impact() self.assertIn("cover and/or deductible columns detected", logs.output[1]) self.assertEqual(icalc.n_events, len(impact.at_event)) @@ -310,16 +401,16 @@ def test_calc_insured_impact_pass(self): self.assertAlmostEqual(3072092, impact.eai_exp[0], delta=1) self.assertAlmostEqual(2778593, impact.eai_exp[25], delta=1) self.assertAlmostEqual(2716548, impact.eai_exp[49], delta=1) - self.assertAlmostEqual(6.570532945599105e+11, impact.tot_value) + self.assertAlmostEqual(6.570532945599105e11, impact.tot_value) self.assertAlmostEqual(143180396, impact.aai_agg, delta=1) def test_calc_insured_impact_no_cover(self): """Test compute insured impact""" exp = ENT.exposures.copy() - exp.gdf['cover'] /= 1e3 - exp.gdf['deductible'] += 1e5 + exp.gdf["cover"] /= 1e3 + exp.gdf["deductible"] += 1e5 icalc = ImpactCalc(exp, ENT.impact_funcs, HAZ) - with self.assertLogs(ILOG, level='INFO') as logs: + with self.assertLogs(ILOG, level="INFO") as logs: impact = icalc.impact(ignore_cover=True) self.assertIn("cover and/or deductible columns detected", logs.output[1]) self.assertEqual(icalc.n_events, len(impact.at_event)) @@ -332,16 +423,16 @@ def test_calc_insured_impact_no_cover(self): self.assertAlmostEqual(151847975, impact.eai_exp[0], delta=1) self.assertAlmostEqual(137341654, impact.eai_exp[25], delta=1) self.assertAlmostEqual(106676521, impact.eai_exp[49], delta=1) - self.assertAlmostEqual(6.570532945599105e+11, impact.tot_value) + self.assertAlmostEqual(6.570532945599105e11, impact.tot_value) self.assertAlmostEqual(6511839456, impact.aai_agg, delta=1) def test_calc_insured_impact_no_deductible(self): """Test compute insured impact""" exp = ENT.exposures.copy() - exp.gdf['cover'] /= 1e3 - exp.gdf['deductible'] += 1e5 + exp.gdf["cover"] /= 1e3 + exp.gdf["deductible"] += 1e5 icalc = ImpactCalc(exp, ENT.impact_funcs, HAZ) - with self.assertLogs(ILOG, level='INFO') as logs: + with self.assertLogs(ILOG, level="INFO") as logs: impact = icalc.impact(ignore_deductible=True) self.assertIn("cover and/or deductible columns detected", logs.output[1]) self.assertEqual(icalc.n_events, len(impact.at_event)) @@ -354,20 +445,23 @@ def test_calc_insured_impact_no_deductible(self): self.assertAlmostEqual(3072413, impact.eai_exp[0], delta=1) self.assertAlmostEqual(2778914, impact.eai_exp[25], delta=1) self.assertAlmostEqual(2716831, impact.eai_exp[49], delta=1) - self.assertAlmostEqual(6.570532945599105e+11, impact.tot_value) + self.assertAlmostEqual(6.570532945599105e11, impact.tot_value) self.assertAlmostEqual(143195738, impact.aai_agg, delta=1) def test_calc_insured_impact_no_insurance(self): """Test compute insured impact""" exp = ENT.exposures.copy() - exp.gdf['cover'] /= 1e3 - exp.gdf['deductible'] += 1e5 + exp.gdf["cover"] /= 1e3 + exp.gdf["deductible"] += 1e5 icalc = ImpactCalc(exp, ENT.impact_funcs, HAZ) - with self.assertLogs(ILOG, level='INFO') as logs: + with self.assertLogs(ILOG, level="INFO") as logs: impact = icalc.impact(ignore_cover=True, ignore_deductible=True) - self.assertEqual(logs.output, [ - "INFO:climada.engine.impact_calc:Calculating impact for 150 assets (>0) and 14450 events." - ]) + self.assertEqual( + logs.output, + [ + "INFO:climada.engine.impact_calc:Calculating impact for 150 assets (>0) and 14450 events." + ], + ) self.assertEqual(icalc.n_events, len(impact.at_event)) self.assertEqual(0, impact.at_event[0]) self.assertEqual(0, impact.at_event[7225]) @@ -378,25 +472,30 @@ def test_calc_insured_impact_no_insurance(self): self.assertAlmostEqual(151855367, impact.eai_exp[0], delta=1) self.assertAlmostEqual(137349045, impact.eai_exp[25], delta=1) self.assertAlmostEqual(106683726, impact.eai_exp[49], delta=1) - self.assertAlmostEqual(6.570532945599105e+11, impact.tot_value) + self.assertAlmostEqual(6.570532945599105e11, impact.tot_value) self.assertAlmostEqual(6512201157, impact.aai_agg, delta=1) def test_calc_insured_impact_save_mat_pass(self): """Test compute impact with impact matrix""" exp = ENT.exposures.copy() - exp.gdf['cover'] /= 1e3 - exp.gdf['deductible'] += 1e5 + exp.gdf["cover"] /= 1e3 + exp.gdf["deductible"] += 1e5 icalc = ImpactCalc(exp, ENT.impact_funcs, HAZ) impact = icalc.impact(save_mat=True) self.assertIsInstance(impact.imp_mat, sparse.csr_matrix) - self.assertEqual(impact.imp_mat.shape, (HAZ.event_id.size, - ENT.exposures.gdf['value'].size)) + self.assertEqual( + impact.imp_mat.shape, (HAZ.event_id.size, ENT.exposures.gdf["value"].size) + ) np.testing.assert_array_almost_equal_nulp( - np.array(impact.imp_mat.sum(axis=1)).ravel(), impact.at_event, nulp=5) + np.array(impact.imp_mat.sum(axis=1)).ravel(), impact.at_event, nulp=5 + ) np.testing.assert_array_almost_equal_nulp( - np.sum(impact.imp_mat.toarray() * impact.frequency[:, None], axis=0).reshape(-1), - impact.eai_exp) + np.sum( + impact.imp_mat.toarray() * impact.frequency[:, None], axis=0 + ).reshape(-1), + impact.eai_exp, + ) self.assertEqual(icalc.n_events, len(impact.at_event)) self.assertEqual(0, impact.at_event[0]) @@ -408,23 +507,31 @@ def test_calc_insured_impact_save_mat_pass(self): self.assertAlmostEqual(3072092, impact.eai_exp[0], delta=1) self.assertAlmostEqual(2778593, impact.eai_exp[25], delta=1) self.assertAlmostEqual(2716548, impact.eai_exp[49], delta=1) - self.assertAlmostEqual(6.570532945599105e+11, impact.tot_value) + self.assertAlmostEqual(6.570532945599105e11, impact.tot_value) self.assertAlmostEqual(143180396, impact.aai_agg, delta=1) def test_minimal_exp_gdf(self): """Test obtain minimal exposures gdf""" icalc = ImpactCalc(ENT.exposures, ENT.impact_funcs, HAZ) - exp_min_gdf = icalc.minimal_exp_gdf('impf_TC', assign_centroids=True, - ignore_cover=True, ignore_deductible=True) - self.assertSetEqual(set(exp_min_gdf.columns), - set(['value', 'impf_TC', 'centr_TC'])) - np.testing.assert_array_equal(exp_min_gdf['value'], ENT.exposures.gdf['value']) - np.testing.assert_array_equal(exp_min_gdf['impf_TC'], ENT.exposures.gdf['impf_TC']) - np.testing.assert_array_equal(exp_min_gdf['centr_TC'], ENT.exposures.gdf['centr_TC']) + exp_min_gdf = icalc.minimal_exp_gdf( + "impf_TC", assign_centroids=True, ignore_cover=True, ignore_deductible=True + ) + self.assertSetEqual( + set(exp_min_gdf.columns), set(["value", "impf_TC", "centr_TC"]) + ) + np.testing.assert_array_equal(exp_min_gdf["value"], ENT.exposures.gdf["value"]) + np.testing.assert_array_equal( + exp_min_gdf["impf_TC"], ENT.exposures.gdf["impf_TC"] + ) + np.testing.assert_array_equal( + exp_min_gdf["centr_TC"], ENT.exposures.gdf["centr_TC"] + ) def test_stitch_impact_matrix(self): """Check how sparse matrices from a generator are stitched together""" - icalc = ImpactCalc(Exposures({'blank': [1, 2, 3, 4]}), ImpactFuncSet(), Hazard()) + icalc = ImpactCalc( + Exposures({"blank": [1, 2, 3, 4]}), ImpactFuncSet(), Hazard() + ) icalc.hazard.event_id = np.array([1, 2, 3]) icalc._orig_exp_idx = np.array([0, 1, 2, 3]) @@ -449,13 +556,15 @@ def test_apply_deductible_to_mat(self): centr_idx = np.ones(2) impf = None - mat = ImpactCalc.apply_deductible_to_mat(mat, deductible, hazard, centr_idx, impf) + mat = ImpactCalc.apply_deductible_to_mat( + mat, deductible, hazard, centr_idx, impf + ) np.testing.assert_array_equal(mat.toarray(), [[9.0, 20.0], [29.9, 39.5]]) hazard.get_paa.assert_called_once_with(centr_idx, impf) def test_stitch_risk_metrics(self): """Test computing risk metrics from an impact matrix generator""" - icalc = ImpactCalc(Exposures({'blank': [1, 2, 3]}), ImpactFuncSet(), Hazard()) + icalc = ImpactCalc(Exposures({"blank": [1, 2, 3]}), ImpactFuncSet(), Hazard()) icalc.hazard.event_id = np.array([1, 2]) icalc.hazard.frequency = np.array([2, 0.5]) icalc._orig_exp_idx = np.array([0, 1, 2]) @@ -477,20 +586,19 @@ def test_single_exp_zero_mdr(self): haz = Hazard( intensity=sparse.csr_matrix(np.array([[31.5], [19.0]])), event_id=np.arange(2), - event_name=[0,1], + event_name=[0, 1], frequency=np.ones(2) / 2, - fraction=sparse.csr_matrix(np.zeros((2,1))), + fraction=sparse.csr_matrix(np.zeros((2, 1))), date=np.array([0, 1]), centroids=centroids, - haz_type='TC' + haz_type="TC", + ) + exp = Exposures( + {"value": [1.0], "longitude": 28.22, "latitude": -26.17, "impf_TC": 1}, + crs="EPSG:4326", ) - exp = Exposures({'value': [1.], - 'longitude': 28.22, - 'latitude': -26.17, - 'impf_TC': 1}, - crs="EPSG:4326") imp_evt = 0.00250988804927603 - aai_agg = imp_evt/2 + aai_agg = imp_evt / 2 eai_exp = np.array([aai_agg]) at_event = np.array([imp_evt, 0]) exp.set_geometry_points() @@ -500,6 +608,7 @@ def test_single_exp_zero_mdr(self): imp = ImpactCalc(exp, impf_set, haz).impact(save_mat=True) check_impact(self, imp, haz, exp, aai_agg, eai_exp, at_event, at_event) + class TestImpactMatrixCalc(unittest.TestCase): """Verify the computation of the impact matrix""" @@ -546,7 +655,7 @@ class TestImpactMatrixGenerator(unittest.TestCase): """Check the impact matrix generator""" def setUp(self): - """"Initialize mocks""" + """ "Initialize mocks""" # Alter the default config to enable chunking self._max_matrix_size = CONFIG.max_matrix_size.int() CONFIG.max_matrix_size = Config(val=1, root=CONFIG) @@ -588,7 +697,10 @@ def test_selection(self): # Verify calls self.impfset.get_func.assert_has_calls( - [call(haz_type="haz_type", fun_id=0), call(haz_type="haz_type", fun_id=11),] + [ + call(haz_type="haz_type", fun_id=0), + call(haz_type="haz_type", fun_id=11), + ] ) self.icalc.impact_matrix.assert_has_calls( [ @@ -629,7 +741,9 @@ def test_chunk_error(self): def test_empty_exp(self): """imp_mat_gen should return an empty iterator for an empty dataframe""" - exp_gdf = gpd.GeoDataFrame({"impact_functions": [], "centr_col": [], "value": []}) + exp_gdf = gpd.GeoDataFrame( + {"impact_functions": [], "centr_col": [], "value": []} + ) self.assertEqual( [], list(self.icalc.imp_mat_gen(exp_gdf=exp_gdf, impf_col="impact_functions")), @@ -638,8 +752,9 @@ def test_empty_exp(self): class TestInsuredImpactMatrixGenerator(unittest.TestCase): """Verify the computation of the insured impact matrix""" + def setUp(self): - """"Initialize mocks""" + """ "Initialize mocks""" hazard = create_autospec(HAZ) self.icalc = ImpactCalc(ENT.exposures, ENT.impact_funcs, hazard) self.icalc._orig_exp_idx = np.array([0, 1]) @@ -656,8 +771,13 @@ def setUp(self): def test_insured_mat_gen(self): """Test insured impact matrix generator""" exp_gdf = gpd.GeoDataFrame( - {"impact_functions": [0, 2], "centr_col": [0, 10], "value": [1.0, 2.0], - "deductible": [10.0, 20.0], "cover": [1.0, 100.0]} + { + "impact_functions": [0, 2], + "centr_col": [0, 10], + "value": [1.0, 2.0], + "deductible": [10.0, 20.0], + "cover": [1.0, 100.0], + } ) imp_mat_gen = ((i, np.array([i])) for i in range(2)) gen = self.icalc.insured_mat_gen(imp_mat_gen, exp_gdf, "impact_functions") @@ -672,7 +792,10 @@ def test_insured_mat_gen(self): # Check if correct impf_id was selected self.icalc.impfset.get_func.assert_has_calls( - [call(haz_type="haz_type", fun_id=0), call(haz_type="haz_type", fun_id=2),] + [ + call(haz_type="haz_type", fun_id=0), + call(haz_type="haz_type", fun_id=2), + ] ) # Check if correct deductible and cent_idx were selected self.icalc.apply_deductible_to_mat.assert_has_calls( @@ -692,6 +815,7 @@ def test_insured_mat_gen(self): class TestImpactMatrix(unittest.TestCase): """Test Impact matrix computation""" + def setUp(self): """Initialize mock""" hazard = create_autospec(HAZ) diff --git a/climada/engine/test/test_impact_data.py b/climada/engine/test/test_impact_data.py index 5391a3b14..05ad8ea41 100644 --- a/climada/engine/test/test_impact_data.py +++ b/climada/engine/test/test_impact_data.py @@ -18,130 +18,152 @@ Test Impact class. """ + import unittest -import numpy as np import warnings -from climada import CONFIG -from climada.util.constants import DEMO_DIR +import numpy as np import climada.engine.impact_data as im_d +from climada import CONFIG +from climada.util.constants import DEMO_DIR DATA_DIR = CONFIG.engine.test_data.dir() -EMDAT_TEST_CSV = DATA_DIR.joinpath('emdat_testdata_BGD_USA_1970-2017.csv') -EMDAT_TEST_CSV_FAKE = DATA_DIR.joinpath('emdat_testdata_fake_2007-2011.csv') -EMDAT_2020_CSV_DEMO = DEMO_DIR.joinpath('demo_emdat_impact_data_2020.csv') +EMDAT_TEST_CSV = DATA_DIR.joinpath("emdat_testdata_BGD_USA_1970-2017.csv") +EMDAT_TEST_CSV_FAKE = DATA_DIR.joinpath("emdat_testdata_fake_2007-2011.csv") +EMDAT_2020_CSV_DEMO = DEMO_DIR.joinpath("demo_emdat_impact_data_2020.csv") + class TestEmdatImport(unittest.TestCase): """Test import of EM-DAT data (as CSV) for impact data analysis""" def test_clean_emdat_df_2018_load(self): """load selected sub sample from CSV, return DataFrame. - here: from 2018 EM-DAT version to 2018 target_version""" + here: from 2018 EM-DAT version to 2018 target_version""" - df = im_d.clean_emdat_df(EMDAT_TEST_CSV, countries=['Bangladesh'], hazard='TC', - year_range=[2000, 2017], target_version=2018) - self.assertIn('ISO', df.columns) - self.assertIn('Year', df.columns) - iso3 = list(df['ISO'].unique()) - years = np.arange(df['Year'].min(), df['Year'].max() + 1) + df = im_d.clean_emdat_df( + EMDAT_TEST_CSV, + countries=["Bangladesh"], + hazard="TC", + year_range=[2000, 2017], + target_version=2018, + ) + self.assertIn("ISO", df.columns) + self.assertIn("Year", df.columns) + iso3 = list(df["ISO"].unique()) + years = np.arange(df["Year"].min(), df["Year"].max() + 1) - self.assertListEqual(['BGD'], iso3) + self.assertListEqual(["BGD"], iso3) self.assertEqual(18, len(years)) self.assertEqual(2017, years[-1]) self.assertEqual(2010, years[10]) self.assertEqual(450, df.size) - self.assertEqual(8978541, df['Total affected'].max()) - self.assertIn('Tropical cyclone', list(df['Disaster subtype'])) - self.assertFalse(False in list(df['Disaster subtype'] == 'Tropical cyclone')) - self.assertFalse('Flood' in list(df['Disaster subtype'])) + self.assertEqual(8978541, df["Total affected"].max()) + self.assertIn("Tropical cyclone", list(df["Disaster subtype"])) + self.assertFalse(False in list(df["Disaster subtype"] == "Tropical cyclone")) + self.assertFalse("Flood" in list(df["Disaster subtype"])) def test_emdat_df_2018_to_2020_load(self): """load selected sub sample from CSV, return DataFrame - here: from 2018 EM-DAT version to 2020 target_version""" - df = im_d.clean_emdat_df(EMDAT_TEST_CSV, countries=['USA'], hazard='TC', - year_range=[2000, 2017], target_version=2020) - self.assertIn('ISO', df.columns) - self.assertIn('Year', df.columns) - iso3 = list(df['ISO'].unique()) - years = np.arange(df['Year'].min(), df['Year'].max() + 1) - self.assertListEqual(['USA'], iso3) + here: from 2018 EM-DAT version to 2020 target_version""" + df = im_d.clean_emdat_df( + EMDAT_TEST_CSV, + countries=["USA"], + hazard="TC", + year_range=[2000, 2017], + target_version=2020, + ) + self.assertIn("ISO", df.columns) + self.assertIn("Year", df.columns) + iso3 = list(df["ISO"].unique()) + years = np.arange(df["Year"].min(), df["Year"].max() + 1) + self.assertListEqual(["USA"], iso3) self.assertEqual(18, len(years)) self.assertEqual(2017, years[-1]) self.assertEqual(2010, years[10]) self.assertEqual(1634, df.size) self.assertEqual(60000000, df["Insured Damages ('000 US$)"].max()) - self.assertIn('Tropical cyclone', list(df['Disaster Subtype'])) - self.assertFalse(False in list(df['Disaster Subtype'] == 'Tropical cyclone')) - self.assertFalse('Flood' in list(df['Disaster Subtype'])) + self.assertIn("Tropical cyclone", list(df["Disaster Subtype"])) + self.assertFalse(False in list(df["Disaster Subtype"] == "Tropical cyclone")) + self.assertFalse("Flood" in list(df["Disaster Subtype"])) def test_emdat_df_2020_load(self): """load selected sub sample from CSV, return DataFrame - here: from 2020 EM-DAT version to 2020 target_version""" - df = im_d.clean_emdat_df(EMDAT_2020_CSV_DEMO, countries=['THA', 'Viet Nam'], hazard='TC', - year_range=[2005, 2008], target_version=2020) - self.assertIn('ISO', df.columns) - self.assertIn('Year', df.columns) - iso3 = list(df['ISO'].unique()) - years = np.arange(df['Year'].min(), df['Year'].max() + 1) - self.assertIn('THA', iso3) - self.assertIn('VNM', iso3) - self.assertNotIn('USA', iso3) - self.assertNotIn('TWN', iso3) + here: from 2020 EM-DAT version to 2020 target_version""" + df = im_d.clean_emdat_df( + EMDAT_2020_CSV_DEMO, + countries=["THA", "Viet Nam"], + hazard="TC", + year_range=[2005, 2008], + target_version=2020, + ) + self.assertIn("ISO", df.columns) + self.assertIn("Year", df.columns) + iso3 = list(df["ISO"].unique()) + years = np.arange(df["Year"].min(), df["Year"].max() + 1) + self.assertIn("THA", iso3) + self.assertIn("VNM", iso3) + self.assertNotIn("USA", iso3) + self.assertNotIn("TWN", iso3) self.assertEqual(4, len(years)) self.assertEqual(2008, years[-1]) self.assertEqual(2006, years[1]) self.assertEqual(43, df.columns.size) self.assertEqual(688, df.size) self.assertEqual(624000, df["Total Damages ('000 US$)"].max()) - self.assertIn('Tropical cyclone', list(df['Disaster Subtype'])) - self.assertFalse(False in list(df['Disaster Subtype'] == 'Tropical cyclone')) - self.assertFalse('Flood' in list(df['Disaster Subtype'])) + self.assertIn("Tropical cyclone", list(df["Disaster Subtype"])) + self.assertFalse(False in list(df["Disaster Subtype"] == "Tropical cyclone")) + self.assertFalse("Flood" in list(df["Disaster Subtype"])) + class TestEmdatProcessing(unittest.TestCase): def test_emdat_impact_event_2018(self): """test emdat_impact_event event impact data extraction, version 2018""" - df = im_d.emdat_impact_event(EMDAT_TEST_CSV, countries=['Bangladesh', 'USA'], - hazard='Drought', year_range=[2015, 2017], - reference_year=2017, version=2018) + df = im_d.emdat_impact_event( + EMDAT_TEST_CSV, + countries=["Bangladesh", "USA"], + hazard="Drought", + year_range=[2015, 2017], + reference_year=2017, + version=2018, + ) self.assertEqual(46, df.size) - self.assertEqual('2017-9550', df['Disaster No.'][1]) - self.assertEqual(df["Total damage ('000 US$)"][0], - df["impact"][0] * 1e-3) - self.assertEqual(df["impact_scaled"][1], - df["impact"][1]) + self.assertEqual("2017-9550", df["Disaster No."][1]) + self.assertEqual(df["Total damage ('000 US$)"][0], df["impact"][0] * 1e-3) + self.assertEqual(df["impact_scaled"][1], df["impact"][1]) self.assertEqual(df["Total damage ('000 US$)"][1], 2500000) self.assertEqual(df["Total damage ('000 US$)"][0], 1800000) # scaled impact value might change if worldbank input data changes, # check magnitude and adjust if test failes in the following 1 lines: - self.assertAlmostEqual(df["impact_scaled"][0] * 1e-7, - 192.7868, places=0) - self.assertIn('USA', list(df['ISO'])) - self.assertIn('Drought', list(df['Disaster type'])) - self.assertEqual(2017, df['reference_year'].min()) + self.assertAlmostEqual(df["impact_scaled"][0] * 1e-7, 192.7868, places=0) + self.assertIn("USA", list(df["ISO"])) + self.assertIn("Drought", list(df["Disaster type"])) + self.assertEqual(2017, df["reference_year"].min()) def test_emdat_impact_event_2020(self): """test emdat_impact_event event impact data extraction, version 2020""" - df = im_d.emdat_impact_event(EMDAT_TEST_CSV, countries=['Bangladesh', 'USA'], - hazard='Drought', year_range=[2015, 2017], - reference_year=2000, version=2020) + df = im_d.emdat_impact_event( + EMDAT_TEST_CSV, + countries=["Bangladesh", "USA"], + hazard="Drought", + year_range=[2015, 2017], + reference_year=2000, + version=2020, + ) self.assertEqual(96, df.size) - self.assertEqual('2017-9550', df['Dis No'][1]) - self.assertEqual(df["Total Damages ('000 US$)"][0], - df["impact"][0] * 1e-3) - self.assertNotEqual(df["impact_scaled"][1], - df["impact"][1]) + self.assertEqual("2017-9550", df["Dis No"][1]) + self.assertEqual(df["Total Damages ('000 US$)"][0], df["impact"][0] * 1e-3) + self.assertNotEqual(df["impact_scaled"][1], df["impact"][1]) self.assertEqual(df["Total Damages ('000 US$)"][1], 2500000) self.assertEqual(df["Total Damages ('000 US$)"][0], 1800000) # scaled impact value might change if worldbank input data changes, # check magnitude and adjust if test failes in the following line: - self.assertAlmostEqual(df["impact_scaled"][0] * 1e-9, - 1.012, places=0) - self.assertIn('USA', list(df['ISO'])) - self.assertIn('Drought', list(df['Disaster Type'])) - self.assertEqual(2000, df['reference_year'].min()) + self.assertAlmostEqual(df["impact_scaled"][0] * 1e-9, 1.012, places=0) + self.assertIn("USA", list(df["ISO"])) + self.assertIn("Drought", list(df["Disaster Type"])) + self.assertEqual(2000, df["reference_year"].min()) def test_emdat_impact_yearlysum_no_futurewarning(self): """Ensure that no FutureWarning about `DataFrame.append` being deprecated is issued""" @@ -159,32 +181,39 @@ def test_emdat_impact_yearlysum_no_futurewarning(self): def test_emdat_affected_yearlysum(self): """test emdat_impact_yearlysum yearly impact data extraction""" - df = im_d.emdat_impact_yearlysum(EMDAT_TEST_CSV, countries=['Bangladesh', 'USA'], - hazard='Flood', year_range=(2015, 2017), - reference_year=None, imp_str="Total Affected") + df = im_d.emdat_impact_yearlysum( + EMDAT_TEST_CSV, + countries=["Bangladesh", "USA"], + hazard="Flood", + year_range=(2015, 2017), + reference_year=None, + imp_str="Total Affected", + ) self.assertEqual(36, df.size) self.assertEqual(df["impact"][1], 91000) - self.assertEqual(df['impact'].sum(), 11517946) + self.assertEqual(df["impact"].sum(), 11517946) self.assertEqual(df["year"][5], 2017) - self.assertIn('USA', list(df['ISO'])) - self.assertIn('BGD', list(df['ISO'])) + self.assertIn("USA", list(df["ISO"])) + self.assertIn("BGD", list(df["ISO"])) def test_emdat_countries_by_hazard_2020_pass(self): """test to get list of countries impacted by tropical cyclones from 2000 to 2019""" - iso3_codes, country_names = im_d.emdat_countries_by_hazard(EMDAT_2020_CSV_DEMO, - hazard='TC', - year_range=(2000, 2019)) + iso3_codes, country_names = im_d.emdat_countries_by_hazard( + EMDAT_2020_CSV_DEMO, hazard="TC", year_range=(2000, 2019) + ) - self.assertIn('Réunion', country_names) - self.assertEqual('Sri Lanka', country_names[4]) - self.assertEqual('BLZ', iso3_codes[3]) + self.assertIn("Réunion", country_names) + self.assertEqual("Sri Lanka", country_names[4]) + self.assertEqual("BLZ", iso3_codes[3]) self.assertEqual(len(country_names), len(iso3_codes)) self.assertEqual(100, len(iso3_codes)) + class TestEmdatToImpact(unittest.TestCase): """Test import of EM-DAT data (as CSV) to Impact-instance (CLIMADA)""" + def test_emdat_to_impact_all_countries_pass(self): """test import EM-DAT to Impact() for all countries in CSV""" # ===================================================================== @@ -194,37 +223,44 @@ def test_emdat_to_impact_all_countries_pass(self): # ===================================================================== # file 1: version 2020 - _impact_emdat2020, countries2020 = im_d.emdat_to_impact(EMDAT_2020_CSV_DEMO, 'TC') + _impact_emdat2020, countries2020 = im_d.emdat_to_impact( + EMDAT_2020_CSV_DEMO, "TC" + ) # file 2: version 2018 - impact_emdat, countries = im_d.emdat_to_impact(EMDAT_TEST_CSV, 'TC') + impact_emdat, countries = im_d.emdat_to_impact(EMDAT_TEST_CSV, "TC") self.assertEqual(142, impact_emdat.event_id.size) self.assertEqual(141, impact_emdat.event_id[-1]) self.assertEqual(0, impact_emdat.event_id[0]) - self.assertIn('2013-0138', impact_emdat.event_name) - self.assertEqual('USA', countries[0]) - self.assertEqual('BGD', countries[1]) + self.assertIn("2013-0138", impact_emdat.event_name) + self.assertEqual("USA", countries[0]) + self.assertEqual("BGD", countries[1]) self.assertEqual(len(countries), len(impact_emdat.eai_exp)) self.assertEqual(2, len(impact_emdat.eai_exp)) self.assertEqual(impact_emdat.date.size, impact_emdat.frequency.size) - self.assertAlmostEqual(555861710000 * 1e-5, np.sum(impact_emdat.at_event) * 1e-5, places=0) - self.assertAlmostEqual(0.0208333333333, np.unique(impact_emdat.frequency)[0], places=7) + self.assertAlmostEqual( + 555861710000 * 1e-5, np.sum(impact_emdat.at_event) * 1e-5, places=0 + ) + self.assertAlmostEqual( + 0.0208333333333, np.unique(impact_emdat.frequency)[0], places=7 + ) self.assertAlmostEqual(11580452291.666666, impact_emdat.aai_agg, places=0) self.assertAlmostEqual(109456249.99999999, impact_emdat.eai_exp[1], places=0) self.assertAlmostEqual(11470996041.666666, impact_emdat.eai_exp[0], places=0) - self.assertIn('SPI', countries2020) - self.assertNotIn('SPI', countries) + self.assertIn("SPI", countries2020) + self.assertNotIn("SPI", countries) def test_emdat_to_impact_fakedata(self): """test import TC EM-DAT to Impact() for all countries in CSV""" - impact_emdat, countries = im_d.emdat_to_impact(EMDAT_TEST_CSV_FAKE, 'FL', - hazard_type_emdat='Flood') + impact_emdat, countries = im_d.emdat_to_impact( + EMDAT_TEST_CSV_FAKE, "FL", hazard_type_emdat="Flood" + ) self.assertEqual(6, impact_emdat.event_id.size) self.assertEqual(5, impact_emdat.event_id[-1]) self.assertEqual(0, impact_emdat.event_id[0]) - self.assertIn('2008-0001', impact_emdat.event_name) - self.assertEqual('CHE', countries[0]) - self.assertEqual('DEU', countries[1]) + self.assertIn("2008-0001", impact_emdat.event_name) + self.assertEqual("CHE", countries[0]) + self.assertEqual("DEU", countries[1]) self.assertEqual(len(countries), len(impact_emdat.eai_exp)) self.assertEqual(2, len(impact_emdat.eai_exp)) self.assertAlmostEqual(11000000.0, np.sum(impact_emdat.at_event)) @@ -235,24 +271,34 @@ def test_emdat_to_impact_fakedata(self): def test_emdat_to_impact_2020format(self): """test import TC EM-DAT to Impact() from new 2020 EMDAT format CSV""" - df1 = im_d.clean_emdat_df(EMDAT_2020_CSV_DEMO, hazard='TC', - countries='PHL', year_range=(2013, 2013)) - df2 = im_d.emdat_impact_event(EMDAT_2020_CSV_DEMO, countries='PHL', hazard='TC', - year_range=(2013, 2013), reference_year=None, - imp_str='Total Affected') - impact_emdat, _countries = im_d.emdat_to_impact(EMDAT_2020_CSV_DEMO, 'TC', - countries='PHL', - year_range=(2013, 2013), - imp_str="Total Affected") + df1 = im_d.clean_emdat_df( + EMDAT_2020_CSV_DEMO, hazard="TC", countries="PHL", year_range=(2013, 2013) + ) + df2 = im_d.emdat_impact_event( + EMDAT_2020_CSV_DEMO, + countries="PHL", + hazard="TC", + year_range=(2013, 2013), + reference_year=None, + imp_str="Total Affected", + ) + impact_emdat, _countries = im_d.emdat_to_impact( + EMDAT_2020_CSV_DEMO, + "TC", + countries="PHL", + year_range=(2013, 2013), + imp_str="Total Affected", + ) # compare number of entries for all steps: self.assertEqual(len(df1.index), len(df2.index)) self.assertEqual(impact_emdat.event_id.size, len(df1.index)) # TC events in EM-DAT in the Philipppines, 2013: self.assertEqual(8, impact_emdat.event_id.size) # People affected by TC events in the Philippines in 2013 (AAI): - self.assertAlmostEqual(17944571., impact_emdat.aai_agg, places=0) + self.assertAlmostEqual(17944571.0, impact_emdat.aai_agg, places=0) # People affected by Typhoon Hayian in the Philippines: - self.assertAlmostEqual(1.610687e+07, impact_emdat.at_event[4], places=0) + self.assertAlmostEqual(1.610687e07, impact_emdat.at_event[4], places=0) + # Execute Tests if __name__ == "__main__": diff --git a/climada/engine/unsequa/__init__.py b/climada/engine/unsequa/__init__.py index 7241979eb..84bf9d7a3 100755 --- a/climada/engine/unsequa/__init__.py +++ b/climada/engine/unsequa/__init__.py @@ -17,9 +17,9 @@ --- """ -from .unc_output import * -from .input_var import * from .calc_base import * -from .calc_impact import * from .calc_cost_benefit import * from .calc_delta_climate import * +from .calc_impact import * +from .input_var import * +from .unc_output import * diff --git a/climada/engine/unsequa/calc_base.py b/climada/engine/unsequa/calc_base.py index 9f32931f7..4ec8e55b0 100644 --- a/climada/engine/unsequa/calc_base.py +++ b/climada/engine/unsequa/calc_base.py @@ -19,22 +19,21 @@ Define Calc (uncertainty calculate) class. """ -import logging import copy -import itertools - import datetime as dt +import itertools +import logging -import pandas as pd import numpy as np +import pandas as pd -from climada.util.value_representation import sig_dig as u_sig_dig from climada.engine.unsequa.unc_output import UncOutput +from climada.util.value_representation import sig_dig as u_sig_dig LOGGER = logging.getLogger(__name__) -class Calc(): +class Calc: """ Base class for uncertainty quantification @@ -107,13 +106,14 @@ def check_distr(self): f"The input parameter {input_param_name}" " is shared among two input variables with" " different distributions." - ) + ) LOGGER.warning( "\n\nThe input parameter %s is shared " "among at least 2 input variables. Their uncertainty is " "thus computed with the same samples for this " - "input paramter.\n\n", input_param_name - ) + "input paramter.\n\n", + input_param_name, + ) distr_dict[input_param_name] = input_param_func return True @@ -171,20 +171,23 @@ def est_comp_time(self, n_samples, time_one_run, processes=None): """ time_one_run = u_sig_dig(time_one_run, n_sig_dig=3) if time_one_run > 5: - LOGGER.warning("Computation time for one set of parameters is " + LOGGER.warning( + "Computation time for one set of parameters is " "%.2fs. This is rather long." "Potential reasons: InputVars are loading data, centroids have " "been assigned to exp before defining input_var, ..." "\n If computation cannot be reduced, consider using" - " a surrogate model https://www.uqlab.com/", time_one_run) + " a surrogate model https://www.uqlab.com/", + time_one_run, + ) total_time = n_samples * time_one_run / processes - LOGGER.info("\n\nEstimated computaion time: %s\n", - dt.timedelta(seconds=total_time)) + LOGGER.info( + "\n\nEstimated computaion time: %s\n", dt.timedelta(seconds=total_time) + ) return total_time - def make_sample(self, N, sampling_method='saltelli', - sampling_kwargs = None): + def make_sample(self, N, sampling_method="saltelli", sampling_kwargs=None): """ Make samples of the input variables @@ -238,39 +241,37 @@ def make_sample(self, N, sampling_method='saltelli', param_labels = list(self.distr_dict.keys()) problem_sa = { - 'num_vars' : len(param_labels), - 'names' : param_labels, - 'bounds' : [[0, 1]]*len(param_labels) - } - #for the ff sampler, no value of N is needed. For API consistency the user - #must input a value that is ignored and a warning is given. - if sampling_method == 'ff': - LOGGER.warning("You are using the 'ff' sampler which does not require " - "a value for N. The entered N value will be ignored" - "in the sampling process.") - uniform_base_sample = self._make_uniform_base_sample(N, problem_sa, - sampling_method, - sampling_kwargs) + "num_vars": len(param_labels), + "names": param_labels, + "bounds": [[0, 1]] * len(param_labels), + } + # for the ff sampler, no value of N is needed. For API consistency the user + # must input a value that is ignored and a warning is given. + if sampling_method == "ff": + LOGGER.warning( + "You are using the 'ff' sampler which does not require " + "a value for N. The entered N value will be ignored" + "in the sampling process." + ) + uniform_base_sample = self._make_uniform_base_sample( + N, problem_sa, sampling_method, sampling_kwargs + ) df_samples = pd.DataFrame(uniform_base_sample, columns=param_labels) for param in list(df_samples): - df_samples[param] = df_samples[param].apply( - self.distr_dict[param].ppf - ) + df_samples[param] = df_samples[param].apply(self.distr_dict[param].ppf) - sampling_kwargs = { - key: str(val) - for key, val in sampling_kwargs.items() - } - df_samples.attrs['sampling_method'] = sampling_method - df_samples.attrs['sampling_kwargs'] = tuple(sampling_kwargs.items()) + sampling_kwargs = {key: str(val) for key, val in sampling_kwargs.items()} + df_samples.attrs["sampling_method"] = sampling_method + df_samples.attrs["sampling_kwargs"] = tuple(sampling_kwargs.items()) unc_output = UncOutput(df_samples) LOGGER.info("Effective number of made samples: %d", unc_output.n_samples) return unc_output - def _make_uniform_base_sample(self, N, problem_sa, sampling_method, - sampling_kwargs): + def _make_uniform_base_sample( + self, N, problem_sa, sampling_method, sampling_kwargs + ): """ Make a uniform distributed [0,1] sample for the defined uncertainty parameters (self.param_labels) with the chosen @@ -304,29 +305,37 @@ def _make_uniform_base_sample(self, N, problem_sa, sampling_method, if sampling_kwargs is None: sampling_kwargs = {} - #Import the named submodule from the SALib sample module - #From the workings of __import__ the use of 'from_list' is necessary - #c.f. https://stackoverflow.com/questions/2724260/why-does-pythons-import-require-fromlist - import importlib # pylint: disable=import-outside-toplevel - salib_sampling_method = importlib.import_module(f'SALib.sample.{sampling_method}') + # Import the named submodule from the SALib sample module + # From the workings of __import__ the use of 'from_list' is necessary + # c.f. https://stackoverflow.com/questions/2724260/why-does-pythons-import-require-fromlist + import importlib # pylint: disable=import-outside-toplevel - if sampling_method == 'ff': #the ff sampling has a fixed sample size and - #does not require the N parameter - if problem_sa['num_vars'] & (problem_sa['num_vars'] - 1) != 0: - raise ValueError("The number of parameters must be a power of 2. " - "To use the ff sampling method, you can generate " - "dummy parameters to overcome this limitation." - " See https://salib.readthedocs.io/en/latest/api.html") + salib_sampling_method = importlib.import_module( + f"SALib.sample.{sampling_method}" + ) + + if sampling_method == "ff": # the ff sampling has a fixed sample size and + # does not require the N parameter + if problem_sa["num_vars"] & (problem_sa["num_vars"] - 1) != 0: + raise ValueError( + "The number of parameters must be a power of 2. " + "To use the ff sampling method, you can generate " + "dummy parameters to overcome this limitation." + " See https://salib.readthedocs.io/en/latest/api.html" + ) sample_uniform = salib_sampling_method.sample( - problem = problem_sa, **sampling_kwargs) + problem=problem_sa, **sampling_kwargs + ) else: sample_uniform = salib_sampling_method.sample( - problem = problem_sa, N = N, **sampling_kwargs) + problem=problem_sa, N=N, **sampling_kwargs + ) return sample_uniform - def sensitivity(self, unc_output, sensitivity_method = 'sobol', - sensitivity_kwargs = None): + def sensitivity( + self, unc_output, sensitivity_method="sobol", sensitivity_kwargs=None + ): """ Compute the sensitivity indices using SALib. @@ -378,34 +387,38 @@ def sensitivity(self, unc_output, sensitivity_method = 'sobol', if sensitivity_kwargs is None: sensitivity_kwargs = {} - #Check compatibility of sampling and sensitivity methods + # Check compatibility of sampling and sensitivity methods unc_output.check_salib(sensitivity_method) - #Import the named submodule from the SALib analyse module - #From the workings of __import__ the use of 'from_list' is necessary - #c.f. https://stackoverflow.com/questions/2724260/why-does-pythons-import-require-fromlist + # Import the named submodule from the SALib analyse module + # From the workings of __import__ the use of 'from_list' is necessary + # c.f. https://stackoverflow.com/questions/2724260/why-does-pythons-import-require-fromlist method = getattr( - __import__('SALib.analyze', - fromlist=[sensitivity_method] - ), - sensitivity_method - ) + __import__("SALib.analyze", fromlist=[sensitivity_method]), + sensitivity_method, + ) sens_output = copy.deepcopy(unc_output) - #Certain Salib method required model input (X) and output (Y), others - #need only ouput (Y) - salib_kwargs = method.analyze.__code__.co_varnames # obtain all kwargs of the salib method - X = unc_output.samples_df.to_numpy() if 'X' in salib_kwargs else None + # Certain Salib method required model input (X) and output (Y), others + # need only ouput (Y) + salib_kwargs = ( + method.analyze.__code__.co_varnames + ) # obtain all kwargs of the salib method + X = unc_output.samples_df.to_numpy() if "X" in salib_kwargs else None for metric_name in self._metric_names: unc_df = unc_output.get_unc_df(metric_name) - sens_df = _calc_sens_df(method, unc_output.problem_sa, sensitivity_kwargs, - unc_output.param_labels, X, unc_df) + sens_df = _calc_sens_df( + method, + unc_output.problem_sa, + sensitivity_kwargs, + unc_output.param_labels, + X, + unc_df, + ) sens_output.set_sens_df(metric_name, sens_df) - sensitivity_kwargs = { - key: str(val) - for key, val in sensitivity_kwargs.items()} + sensitivity_kwargs = {key: str(val) for key, val in sensitivity_kwargs.items()} sens_output.sensitivity_method = sensitivity_method sens_output.sensitivity_kwargs = tuple(sensitivity_kwargs.items()) @@ -434,9 +447,7 @@ def _multiprocess_chunksize(samples_df, processes): int the number of samples in each chunk """ - return np.ceil( - samples_df.shape[0] / processes - ).astype(int) + return np.ceil(samples_df.shape[0] / processes).astype(int) def _transpose_chunked_data(metrics): @@ -463,10 +474,7 @@ def _transpose_chunked_data(metrics): calc_cost_benefits._map_costben_calc map for cost benefit uncertainty """ - return [ - list(itertools.chain.from_iterable(x)) - for x in zip(*metrics) - ] + return [list(itertools.chain.from_iterable(x)) for x in zip(*metrics)] def _sample_parallel_iterator(samples, chunksize, **kwargs): @@ -487,17 +495,18 @@ def _sample_parallel_iterator(samples, chunksize, **kwargs): suitable for methods _map_impact_calc and _map_costben_calc """ + def _chunker(df, size): """ Divide the dataframe into chunks of size number of lines """ for pos in range(0, len(df), size): - yield df.iloc[pos:pos + size] + yield df.iloc[pos : pos + size] return zip( _chunker(samples, chunksize), - *(itertools.repeat(item) for item in kwargs.values()) - ) + *(itertools.repeat(item) for item in kwargs.values()), + ) def _calc_sens_df(method, problem_sa, sensitivity_kwargs, param_labels, X, unc_df): @@ -525,85 +534,104 @@ def _calc_sens_df(method, problem_sa, sensitivity_kwargs, param_labels, X, unc_d """ sens_first_order_dict = {} sens_second_order_dict = {} - for (submetric_name, metric_unc) in unc_df.items(): + for submetric_name, metric_unc in unc_df.items(): Y = metric_unc.to_numpy() if X is not None: - sens_indices = method.analyze(problem_sa, X, Y, - **sensitivity_kwargs) + sens_indices = method.analyze(problem_sa, X, Y, **sensitivity_kwargs) else: - sens_indices = method.analyze(problem_sa, Y, - **sensitivity_kwargs) - #refactor incoherent SALib output + sens_indices = method.analyze(problem_sa, Y, **sensitivity_kwargs) + # refactor incoherent SALib output nparams = len(param_labels) - if method.__name__[-3:] == '.ff': #ff method - if sensitivity_kwargs['second_order']: - #parse interaction terms of sens_indices to a square matrix - #to ensure consistency with unsequa - interaction_names = sens_indices.pop('interaction_names') + if method.__name__[-3:] == ".ff": # ff method + if sensitivity_kwargs["second_order"]: + # parse interaction terms of sens_indices to a square matrix + # to ensure consistency with unsequa + interaction_names = sens_indices.pop("interaction_names") interactions = np.full((nparams, nparams), np.nan) - #loop over interaction names and extract each param pair, - #then match to the corresponding param from param_labels - for i,interaction_name in enumerate(interaction_names): - interactions[param_labels.index(interaction_name[0]), - param_labels.index(interaction_name[1])] = sens_indices['IE'][i] - sens_indices['IE'] = interactions - - if method.__name__[-5:] == '.hdmr': #hdmr method - #first, remove variables that are incompatible with unsequa output - keys_to_remove = ['Em','Term','select', 'RT', 'Y_em', 'idx', 'X', 'Y'] - sens_indices = {k: v for k, v in sens_indices.items() - if k not in keys_to_remove} - names = sens_indices.pop('names') #names of terms - - #second, refactor to 2D + # loop over interaction names and extract each param pair, + # then match to the corresponding param from param_labels + for i, interaction_name in enumerate(interaction_names): + interactions[ + param_labels.index(interaction_name[0]), + param_labels.index(interaction_name[1]), + ] = sens_indices["IE"][i] + sens_indices["IE"] = interactions + + if method.__name__[-5:] == ".hdmr": # hdmr method + # first, remove variables that are incompatible with unsequa output + keys_to_remove = ["Em", "Term", "select", "RT", "Y_em", "idx", "X", "Y"] + sens_indices = { + k: v for k, v in sens_indices.items() if k not in keys_to_remove + } + names = sens_indices.pop("names") # names of terms + + # second, refactor to 2D for si, si_val_array in sens_indices.items(): - if (np.array(si_val_array).ndim == 1 and #for everything that is 1d and has - np.array(si_val_array).size > nparams): #lentgh > n params, refactor to 2D + if ( + np.array(si_val_array).ndim + == 1 # for everything that is 1d and has + and np.array(si_val_array).size > nparams + ): # lentgh > n params, refactor to 2D si_new_array = np.full((nparams, nparams), np.nan) - np.fill_diagonal(si_new_array, si_val_array[0:nparams]) #simple terms go on diag - for i,interaction_name in enumerate(names[nparams:]): - t1, t2 = interaction_name.split('/') #interaction terms - si_new_array[param_labels.index(t1), - param_labels.index(t2)] = si_val_array[nparams+i] + np.fill_diagonal( + si_new_array, si_val_array[0:nparams] + ) # simple terms go on diag + for i, interaction_name in enumerate(names[nparams:]): + t1, t2 = interaction_name.split("/") # interaction terms + si_new_array[param_labels.index(t1), param_labels.index(t2)] = ( + si_val_array[nparams + i] + ) sens_indices[si] = si_new_array - - sens_first_order = np.array([ - np.array(si_val_array) - for si, si_val_array in sens_indices.items() - if (np.array(si_val_array).ndim == 1 # dirty trick due to Salib incoherent output - and si!='names' - and np.array(si_val_array).size == len(param_labels)) - ]).ravel() + sens_first_order = np.array( + [ + np.array(si_val_array) + for si, si_val_array in sens_indices.items() + if ( + np.array(si_val_array).ndim + == 1 # dirty trick due to Salib incoherent output + and si != "names" + and np.array(si_val_array).size == len(param_labels) + ) + ] + ).ravel() sens_first_order_dict[submetric_name] = sens_first_order - sens_second_order = np.array([ - np.array(si_val_array) - for si_val_array in sens_indices.values() - if np.array(si_val_array).ndim == 2 - ]).ravel() + sens_second_order = np.array( + [ + np.array(si_val_array) + for si_val_array in sens_indices.values() + if np.array(si_val_array).ndim == 2 + ] + ).ravel() sens_second_order_dict[submetric_name] = sens_second_order sens_first_order_df = pd.DataFrame(sens_first_order_dict, dtype=np.number) if not sens_first_order_df.empty: - si_names_first_order, param_names_first_order = _si_param_first(param_labels, sens_indices) - sens_first_order_df.insert(0, 'si', si_names_first_order) - sens_first_order_df.insert(1, 'param', param_names_first_order) - sens_first_order_df.insert(2, 'param2', None) - + si_names_first_order, param_names_first_order = _si_param_first( + param_labels, sens_indices + ) + sens_first_order_df.insert(0, "si", si_names_first_order) + sens_first_order_df.insert(1, "param", param_names_first_order) + sens_first_order_df.insert(2, "param2", None) sens_second_order_df = pd.DataFrame(sens_second_order_dict) if not sens_second_order_df.empty: - si_names_second_order, param_names_second_order, param_names_second_order_2 = \ + si_names_second_order, param_names_second_order, param_names_second_order_2 = ( _si_param_second(param_labels, sens_indices) - sens_second_order_df.insert(0, 'si', si_names_second_order,) - sens_second_order_df.insert(1, 'param', param_names_second_order) - sens_second_order_df.insert(2, 'param2', param_names_second_order_2) + ) + sens_second_order_df.insert( + 0, + "si", + si_names_second_order, + ) + sens_second_order_df.insert(1, "param", param_names_second_order) + sens_second_order_df.insert(2, "param2", param_names_second_order_2) - sens_df = pd.concat( - [sens_first_order_df, sens_second_order_df] - ).reset_index(drop=True) + sens_df = pd.concat([sens_first_order_df, sens_second_order_df]).reset_index( + drop=True + ) return sens_df @@ -624,18 +652,18 @@ def _si_param_first(param_labels, sens_indices): Names of the sensivity indices of first order for all input parameters and Parameter names for each sentivity index """ - n_params = len(param_labels) + n_params = len(param_labels) si_name_first_order_list = [ key for key, array in sens_indices.items() - if (np.array(array).ndim == 1 and key!='names') # dirty trick due to Salib incoherent output - ] + if ( + np.array(array).ndim == 1 and key != "names" + ) # dirty trick due to Salib incoherent output + ] si_names_first_order = [ - si - for si in si_name_first_order_list - for _ in range(n_params) - ] + si for si in si_name_first_order_list for _ in range(n_params) + ] param_names_first_order = param_labels * len(si_name_first_order_list) return si_names_first_order, param_names_first_order @@ -656,22 +684,17 @@ def _si_param_second(param_labels, sens_indices): Names of the sensivity indices of second order for all input parameters and Pairs of parameter names for each 2nd order sentivity index """ - n_params = len(param_labels) + n_params = len(param_labels) si_name_second_order_list = [ - key - for key, array in sens_indices.items() - if np.array(array).ndim == 2 - ] + key for key, array in sens_indices.items() if np.array(array).ndim == 2 + ] si_names_second_order = [ - si - for si in si_name_second_order_list - for _ in range(n_params**2) - ] - param_names_second_order_2 = param_labels \ - * len(si_name_second_order_list) * n_params + si for si in si_name_second_order_list for _ in range(n_params**2) + ] + param_names_second_order_2 = ( + param_labels * len(si_name_second_order_list) * n_params + ) param_names_second_order = [ - param - for param in param_labels - for _ in range(n_params) - ] * len(si_name_second_order_list) + param for param in param_labels for _ in range(n_params) + ] * len(si_name_second_order_list) return si_names_second_order, param_names_second_order, param_names_second_order_2 diff --git a/climada/engine/unsequa/calc_cost_benefit.py b/climada/engine/unsequa/calc_cost_benefit.py index 74ba72319..b42e76da1 100644 --- a/climada/engine/unsequa/calc_cost_benefit.py +++ b/climada/engine/unsequa/calc_cost_benefit.py @@ -19,31 +19,39 @@ Define Uncertainty Cost Benefit class """ -__all__ = ['CalcCostBenefit'] +__all__ = ["CalcCostBenefit"] +import itertools import logging import time -import itertools - from typing import Optional, Union + import pandas as pd import pathos.multiprocessing as mp -# use pathos.multiprocess fork of multiprocessing for compatibility -# wiht notebooks and other environments https://stackoverflow.com/a/65001152/12454103 from climada.engine.cost_benefit import CostBenefit +from climada.engine.unsequa.calc_base import ( + Calc, + _multiprocess_chunksize, + _sample_parallel_iterator, + _transpose_chunked_data, +) from climada.engine.unsequa.input_var import InputVar from climada.engine.unsequa.unc_output import UncCostBenefitOutput -from climada.engine.unsequa.calc_base import Calc, _sample_parallel_iterator, _multiprocess_chunksize, _transpose_chunked_data -from climada.util import log_level -from climada.hazard import Hazard from climada.entity import Entity +from climada.hazard import Hazard +from climada.util import log_level + +# use pathos.multiprocess fork of multiprocessing for compatibility +# wiht notebooks and other environments https://stackoverflow.com/a/65001152/12454103 + LOGGER = logging.getLogger(__name__) # Future planed features: # - Add 'efc' (frequency curve) to UncCostBenenfit + class CalcCostBenefit(Calc): """ Cost Benefit uncertainty analysis class @@ -74,19 +82,19 @@ class CalcCostBenefit(Calc): """ _input_var_names = ( - 'haz_input_var', - 'ent_input_var', - 'haz_fut_input_var', - 'ent_fut_input_var', + "haz_input_var", + "ent_input_var", + "haz_fut_input_var", + "ent_fut_input_var", ) """Names of the required uncertainty variables""" _metric_names = ( - 'tot_climate_risk', - 'benefit', - 'cost_ben_ratio', - 'imp_meas_present', - 'imp_meas_future', + "tot_climate_risk", + "benefit", + "cost_ben_ratio", + "imp_meas_present", + "imp_meas_future", ) """Names of the cost benefit output metrics""" @@ -132,13 +140,9 @@ def __init__( self.value_unit = self.ent_input_var.evaluate().exposures.value_unit self.check_distr() - - - def uncertainty(self, - unc_sample, - processes=1, - chunksize=None, - **cost_benefit_kwargs): + def uncertainty( + self, unc_sample, processes=1, chunksize=None, **cost_benefit_kwargs + ): """ Computes the cost benefit for each sample in unc_output.sample_df. @@ -193,8 +197,10 @@ def uncertainty(self, """ if unc_sample.samples_df.empty: - raise ValueError("No sample was found. Please create one first" + - "using UncImpact.make_sample(N)") + raise ValueError( + "No sample was found. Please create one first" + + "using UncImpact.make_sample(N)" + ) # copy may not be needed, but is kept to prevent potential # data corruption issues. The computational cost should be @@ -205,81 +211,92 @@ def uncertainty(self, chunksize = _multiprocess_chunksize(samples_df, processes) unit = self.value_unit - LOGGER.info("The freq_curve is not saved. Please " - "change the risk_func (see climada.engine.cost_benefit) " - "if return period information is needed") + LOGGER.info( + "The freq_curve is not saved. Please " + "change the risk_func (see climada.engine.cost_benefit) " + "if return period information is needed" + ) one_sample = samples_df.iloc[0:1] start = time.time() - self._compute_cb_metrics(one_sample, cost_benefit_kwargs, chunksize=1, processes=1) - elapsed_time = (time.time() - start) + self._compute_cb_metrics( + one_sample, cost_benefit_kwargs, chunksize=1, processes=1 + ) + elapsed_time = time.time() - start self.est_comp_time(unc_sample.n_samples, elapsed_time, processes) - #Compute impact distributions + # Compute impact distributions [ imp_meas_present, imp_meas_future, tot_climate_risk, benefit, - cost_ben_ratio - ] = self._compute_cb_metrics(samples_df, cost_benefit_kwargs, chunksize, processes) + cost_ben_ratio, + ] = self._compute_cb_metrics( + samples_df, cost_benefit_kwargs, chunksize, processes + ) # Assign computed impact distribution data to self - tot_climate_risk_unc_df = \ - pd.DataFrame(tot_climate_risk, columns = ['tot_climate_risk']) + tot_climate_risk_unc_df = pd.DataFrame( + tot_climate_risk, columns=["tot_climate_risk"] + ) benefit_unc_df = pd.DataFrame(benefit) benefit_unc_df.columns = [ - column + ' Benef' - for column in benefit_unc_df.columns] + column + " Benef" for column in benefit_unc_df.columns + ] cost_ben_ratio_unc_df = pd.DataFrame(cost_ben_ratio) cost_ben_ratio_unc_df.columns = [ - column + ' CostBen' - for column in cost_ben_ratio_unc_df.columns] + column + " CostBen" for column in cost_ben_ratio_unc_df.columns + ] - imp_metric_names = ['risk', 'risk_transf', 'cost_meas', - 'cost_ins'] + imp_metric_names = ["risk", "risk_transf", "cost_meas", "cost_ins"] im_periods = dict() - for imp_meas, period in zip([imp_meas_present, imp_meas_future], - ['present', 'future']): + for imp_meas, period in zip( + [imp_meas_present, imp_meas_future], ["present", "future"] + ): df_imp_meas = pd.DataFrame() - name = 'imp_meas_' + period + name = "imp_meas_" + period if imp_meas[0]: for imp in imp_meas: met_dic = {} for meas, imp_dic in imp.items(): - metrics = [imp_dic['risk'], - imp_dic['risk_transf'], - *imp_dic['cost']] - dic_tmp = {meas + ' - ' + m_name + ' - ' + period: [m_value] - for m_name, m_value - in zip(imp_metric_names, metrics) - } + metrics = [ + imp_dic["risk"], + imp_dic["risk_transf"], + *imp_dic["cost"], + ] + dic_tmp = { + meas + " - " + m_name + " - " + period: [m_value] + for m_name, m_value in zip(imp_metric_names, metrics) + } met_dic.update(dic_tmp) df_imp_meas = pd.concat( [df_imp_meas, pd.DataFrame(met_dic)], ignore_index=True, - sort=False - ) - im_periods[name + '_unc_df'] = df_imp_meas + sort=False, + ) + im_periods[name + "_unc_df"] = df_imp_meas cost_benefit_kwargs = { - key: str(val) - for key, val in cost_benefit_kwargs.items()} + key: str(val) for key, val in cost_benefit_kwargs.items() + } cost_benefit_kwargs = tuple(cost_benefit_kwargs.items()) - return UncCostBenefitOutput(samples_df=samples_df, - imp_meas_present_unc_df=im_periods['imp_meas_present_unc_df'], - imp_meas_future_unc_df=im_periods['imp_meas_future_unc_df'], - tot_climate_risk_unc_df=tot_climate_risk_unc_df, - cost_ben_ratio_unc_df=cost_ben_ratio_unc_df, - benefit_unc_df=benefit_unc_df, - unit=unit, - cost_benefit_kwargs=cost_benefit_kwargs) + return UncCostBenefitOutput( + samples_df=samples_df, + imp_meas_present_unc_df=im_periods["imp_meas_present_unc_df"], + imp_meas_future_unc_df=im_periods["imp_meas_future_unc_df"], + tot_climate_risk_unc_df=tot_climate_risk_unc_df, + cost_ben_ratio_unc_df=cost_ben_ratio_unc_df, + benefit_unc_df=benefit_unc_df, + unit=unit, + cost_benefit_kwargs=cost_benefit_kwargs, + ) def _compute_cb_metrics( - self, samples_df, cost_benefit_kwargs, chunksize, processes - ): + self, samples_df, cost_benefit_kwargs, chunksize, processes + ): """Compute the uncertainty metrics Parameters @@ -298,7 +315,7 @@ def _compute_cb_metrics( list values of impact metrics per sample """ - with log_level(level='ERROR', name_prefix='climada'): + with log_level(level="ERROR", name_prefix="climada"): p_iterator = _sample_parallel_iterator( samples=samples_df, chunksize=chunksize, @@ -306,55 +323,55 @@ def _compute_cb_metrics( haz_input_var=self.haz_input_var, ent_fut_input_var=self.ent_fut_input_var, haz_fut_input_var=self.haz_fut_input_var, - cost_benefit_kwargs=cost_benefit_kwargs + cost_benefit_kwargs=cost_benefit_kwargs, ) - if processes>1: + if processes > 1: with mp.Pool(processes=processes) as pool: - LOGGER.info('Using %s CPUs.', processes) - cb_metrics = pool.starmap( - _map_costben_calc, p_iterator - ) + LOGGER.info("Using %s CPUs.", processes) + cb_metrics = pool.starmap(_map_costben_calc, p_iterator) else: - cb_metrics = itertools.starmap( - _map_costben_calc, p_iterator - ) + cb_metrics = itertools.starmap(_map_costben_calc, p_iterator) - #Perform the actual computation - with log_level(level='ERROR', name_prefix='climada'): + # Perform the actual computation + with log_level(level="ERROR", name_prefix="climada"): return _transpose_chunked_data(cb_metrics) def _map_costben_calc( - sample_chunks, ent_input_var, haz_input_var, - ent_fut_input_var, haz_fut_input_var, cost_benefit_kwargs - ): + sample_chunks, + ent_input_var, + haz_input_var, + ent_fut_input_var, + haz_fut_input_var, + cost_benefit_kwargs, +): """ - Map to compute cost benefit for all parameter samples in parallel - - Parameters - ---------- - sample_chunks : pd.DataFrame - Dataframe of the parameter samples - haz_input_var : InputVar - Hazard uncertainty variable or Hazard for the present Hazard - in climada.engine.CostBenefit.calc - ent_input_var : InputVar - Entity uncertainty variable or Entity for the present Entity - in climada.engine.CostBenefit.calc - haz_fut_input_var: InputVar - Hazard uncertainty variable or Hazard for the future Hazard - ent_fut_input_var : InputVar - Entity uncertainty variable or Entity for the future Entity - in climada.engine.CostBenefit.calc - cost_benefit_kwargs : - Keyword arguments passed on to climada.engine.CostBenefit.calc() - - Returns - ------- - list - icost benefit metrics list for all samples containing - imp_meas_present, imp_meas_future, tot_climate_risk, - benefit, cost_ben_ratio + Map to compute cost benefit for all parameter samples in parallel + + Parameters + ---------- + sample_chunks : pd.DataFrame + Dataframe of the parameter samples + haz_input_var : InputVar + Hazard uncertainty variable or Hazard for the present Hazard + in climada.engine.CostBenefit.calc + ent_input_var : InputVar + Entity uncertainty variable or Entity for the present Entity + in climada.engine.CostBenefit.calc + haz_fut_input_var: InputVar + Hazard uncertainty variable or Hazard for the future Hazard + ent_fut_input_var : InputVar + Entity uncertainty variable or Entity for the future Entity + in climada.engine.CostBenefit.calc + cost_benefit_kwargs : + Keyword arguments passed on to climada.engine.CostBenefit.calc() + + Returns + ------- + list + icost benefit metrics list for all samples containing + imp_meas_present, imp_meas_future, tot_climate_risk, + benefit, cost_ben_ratio """ @@ -373,17 +390,28 @@ def _map_costben_calc( cb = CostBenefit() ent.exposures.assign_centroids(haz, overwrite=False) if ent_fut: - ent_fut.exposures.assign_centroids(haz_fut if haz_fut else haz, overwrite=False) - cb.calc(hazard=haz, entity=ent, haz_future=haz_fut, ent_future=ent_fut, - save_imp=False, assign_centroids=False, **cost_benefit_kwargs) + ent_fut.exposures.assign_centroids( + haz_fut if haz_fut else haz, overwrite=False + ) + cb.calc( + hazard=haz, + entity=ent, + haz_future=haz_fut, + ent_future=ent_fut, + save_imp=False, + assign_centroids=False, + **cost_benefit_kwargs + ) # Extract from climada.impact the chosen metrics - uncertainty_values.append([ - cb.imp_meas_present, - cb.imp_meas_future, - cb.tot_climate_risk, - cb.benefit, - cb.cost_ben_ratio - ]) + uncertainty_values.append( + [ + cb.imp_meas_present, + cb.imp_meas_future, + cb.tot_climate_risk, + cb.benefit, + cb.cost_ben_ratio, + ] + ) # Transpose list return list(zip(*uncertainty_values)) diff --git a/climada/engine/unsequa/calc_delta_climate.py b/climada/engine/unsequa/calc_delta_climate.py index f266764e7..0ec1fb3af 100644 --- a/climada/engine/unsequa/calc_delta_climate.py +++ b/climada/engine/unsequa/calc_delta_climate.py @@ -21,32 +21,32 @@ __all__ = ["CalcDeltaImpact"] +import itertools import logging import time from typing import Union -import itertools -import pandas as pd import numpy as np +import pandas as pd import pathos.multiprocessing as mp -# use pathos.multiprocess fork of multiprocessing for compatibility -# wiht notebooks and other environments https://stackoverflow.com/a/65001152/12454103 - from climada.engine import ImpactCalc -from climada.engine.unsequa.input_var import InputVar -from climada.engine.unsequa.unc_output import UncImpactOutput from climada.engine.unsequa.calc_base import ( Calc, - _sample_parallel_iterator, _multiprocess_chunksize, + _sample_parallel_iterator, _transpose_chunked_data, ) +from climada.engine.unsequa.input_var import InputVar +from climada.engine.unsequa.unc_output import UncImpactOutput from climada.entity import Exposures, ImpactFuncSet from climada.hazard import Hazard from climada.util import log_level from climada.util.value_representation import safe_divide +# use pathos.multiprocess fork of multiprocessing for compatibility +# wiht notebooks and other environments https://stackoverflow.com/a/65001152/12454103 + LOGGER = logging.getLogger(__name__) @@ -432,18 +432,12 @@ def _map_impact_calc( else: delta_func = lambda x, y: x - y - delta_aai_agg = delta_func( - imp_final.aai_agg, imp_initial.aai_agg - ) + delta_aai_agg = delta_func(imp_final.aai_agg, imp_initial.aai_agg) - delta_freq_curve = delta_func( - freq_curve_final, freq_curve_initial - ) + delta_freq_curve = delta_func(freq_curve_final, freq_curve_initial) delta_eai_exp = ( - delta_func(eai_exp_final, eai_exp_initial) - if calc_eai_exp - else np.array([]) + delta_func(eai_exp_final, eai_exp_initial) if calc_eai_exp else np.array([]) ) delta_at_event = ( diff --git a/climada/engine/unsequa/calc_impact.py b/climada/engine/unsequa/calc_impact.py index 58aea0c94..061b3e3a2 100644 --- a/climada/engine/unsequa/calc_impact.py +++ b/climada/engine/unsequa/calc_impact.py @@ -19,34 +19,37 @@ Define Uncertainty Impact class """ -__all__ = ['CalcImpact'] +__all__ = ["CalcImpact"] +import itertools import logging import time from typing import Union -import itertools -import pandas as pd import numpy as np +import pandas as pd import pathos.multiprocessing as mp -# use pathos.multiprocess fork of multiprocessing for compatibility -# wiht notebooks and other environments https://stackoverflow.com/a/65001152/12454103 from climada.engine import ImpactCalc -from climada.engine.unsequa.input_var import InputVar -from climada.engine.unsequa.unc_output import UncImpactOutput from climada.engine.unsequa.calc_base import ( Calc, - _sample_parallel_iterator, _multiprocess_chunksize, + _sample_parallel_iterator, _transpose_chunked_data, ) +from climada.engine.unsequa.input_var import InputVar +from climada.engine.unsequa.unc_output import UncImpactOutput from climada.entity import Exposures, ImpactFuncSet from climada.hazard import Hazard from climada.util import log_level +# use pathos.multiprocess fork of multiprocessing for compatibility +# wiht notebooks and other environments https://stackoverflow.com/a/65001152/12454103 + + LOGGER = logging.getLogger(__name__) + class CalcImpact(Calc): """ Impact uncertainty caclulation class. @@ -79,18 +82,13 @@ class CalcImpact(Calc): """ _input_var_names = ( - 'exp_input_var', - 'impf_input_var', - 'haz_input_var', + "exp_input_var", + "impf_input_var", + "haz_input_var", ) """Names of the required uncertainty variables""" - _metric_names = ( - 'aai_agg', - 'freq_curve', - 'at_event', - 'eai_exp' - ) + _metric_names = ("aai_agg", "freq_curve", "at_event", "eai_exp") """Names of the cost benefit output metrics""" def __init__( @@ -116,22 +114,22 @@ def __init__( """ Calc.__init__(self) - self.exp_input_var = InputVar.var_to_inputvar(exp_input_var) - self.impf_input_var = InputVar.var_to_inputvar(impf_input_var) - self.haz_input_var = InputVar.var_to_inputvar(haz_input_var) + self.exp_input_var = InputVar.var_to_inputvar(exp_input_var) + self.impf_input_var = InputVar.var_to_inputvar(impf_input_var) + self.haz_input_var = InputVar.var_to_inputvar(haz_input_var) self.value_unit = self.exp_input_var.evaluate().value_unit self.check_distr() - - def uncertainty(self, - unc_sample, - rp=None, - calc_eai_exp=False, - calc_at_event=False, - processes=1, - chunksize=None - ): + def uncertainty( + self, + unc_sample, + rp=None, + calc_eai_exp=False, + calc_at_event=False, + processes=1, + chunksize=None, + ): """ Computes the impact for each sample in unc_data.sample_df. @@ -198,9 +196,10 @@ def uncertainty(self, """ if unc_sample.samples_df.empty: - raise ValueError("No sample was found. Please create one first" - "using UncImpact.make_sample(N)") - + raise ValueError( + "No sample was found. Please create one first" + "using UncImpact.make_sample(N)" + ) # copy may not be needed, but is kept to prevent potential # data corruption issues. The computational cost should be @@ -212,7 +211,7 @@ def uncertainty(self, unit = self.value_unit if rp is None: - rp=[5, 10, 20, 50, 100, 250] + rp = [5, 10, 20, 50, 100, 250] self.rp = rp self.calc_eai_exp = calc_eai_exp @@ -220,44 +219,40 @@ def uncertainty(self, one_sample = samples_df.iloc[0:1] start = time.time() - self._compute_imp_metrics( - one_sample, chunksize=1, processes=1 - ) - elapsed_time = (time.time() - start) + self._compute_imp_metrics(one_sample, chunksize=1, processes=1) + elapsed_time = time.time() - start self.est_comp_time(unc_sample.n_samples, elapsed_time, processes) - [ - aai_agg_list, - freq_curve_list, - eai_exp_list, - at_event_list - ] = self._compute_imp_metrics( - samples_df, chunksize=chunksize, processes=processes + [aai_agg_list, freq_curve_list, eai_exp_list, at_event_list] = ( + self._compute_imp_metrics( + samples_df, chunksize=chunksize, processes=processes ) + ) # Assign computed impact distribution data to self - aai_agg_unc_df = pd.DataFrame(aai_agg_list, - columns = ['aai_agg']) - freq_curve_unc_df = pd.DataFrame(freq_curve_list, - columns=['rp' + str(n) for n in rp]) - eai_exp_unc_df = pd.DataFrame(eai_exp_list) + aai_agg_unc_df = pd.DataFrame(aai_agg_list, columns=["aai_agg"]) + freq_curve_unc_df = pd.DataFrame( + freq_curve_list, columns=["rp" + str(n) for n in rp] + ) + eai_exp_unc_df = pd.DataFrame(eai_exp_list) # Note: sparse dataframes are not used as they are not nativel y compatible with .to_hdf5 at_event_unc_df = pd.DataFrame(at_event_list) if calc_eai_exp: exp = self.exp_input_var.evaluate() - coord_df = exp.gdf[['latitude', 'longitude']] + coord_df = exp.gdf[["latitude", "longitude"]] else: coord_df = pd.DataFrame([]) - return UncImpactOutput(samples_df=samples_df, - unit=unit, - aai_agg_unc_df=aai_agg_unc_df, - freq_curve_unc_df=freq_curve_unc_df, - eai_exp_unc_df=eai_exp_unc_df, - at_event_unc_df=at_event_unc_df, - coord_df=coord_df - ) + return UncImpactOutput( + samples_df=samples_df, + unit=unit, + aai_agg_unc_df=aai_agg_unc_df, + freq_curve_unc_df=freq_curve_unc_df, + eai_exp_unc_df=eai_exp_unc_df, + at_event_unc_df=at_event_unc_df, + coord_df=coord_df, + ) def _compute_imp_metrics(self, samples_df, chunksize, processes): """Compute the uncertainty metrics @@ -276,8 +271,8 @@ def _compute_imp_metrics(self, samples_df, chunksize, processes): list values of impact metrics per sample """ - #Compute impact distributions - with log_level(level='ERROR', name_prefix='climada'): + # Compute impact distributions + with log_level(level="ERROR", name_prefix="climada"): p_iterator = _sample_parallel_iterator( samples=samples_df, chunksize=chunksize, @@ -290,24 +285,25 @@ def _compute_imp_metrics(self, samples_df, chunksize, processes): ) if processes > 1: with mp.Pool(processes=processes) as pool: - LOGGER.info('Using %s CPUs.', processes) - imp_metrics = pool.starmap( - _map_impact_calc, p_iterator - ) + LOGGER.info("Using %s CPUs.", processes) + imp_metrics = pool.starmap(_map_impact_calc, p_iterator) else: - imp_metrics = itertools.starmap( - _map_impact_calc, p_iterator - ) + imp_metrics = itertools.starmap(_map_impact_calc, p_iterator) - #Perform the actual computation - with log_level(level='ERROR', name_prefix='climada'): + # Perform the actual computation + with log_level(level="ERROR", name_prefix="climada"): return _transpose_chunked_data(imp_metrics) def _map_impact_calc( - sample_chunks, exp_input_var, impf_input_var, haz_input_var, - rp, calc_eai_exp, calc_at_event - ): + sample_chunks, + exp_input_var, + impf_input_var, + haz_input_var, + rp, + calc_eai_exp, + calc_at_event, +): """ Map to compute impact for all parameter samples in parallel @@ -347,8 +343,9 @@ def _map_impact_calc( haz = haz_input_var.evaluate(**haz_samples) exp.assign_centroids(haz, overwrite=False) - imp = ImpactCalc(exposures=exp, impfset=impf, hazard=haz)\ - .impact(assign_centroids=False, save_mat=False) + imp = ImpactCalc(exposures=exp, impfset=impf, hazard=haz).impact( + assign_centroids=False, save_mat=False + ) # Extract from climada.impact the chosen metrics freq_curve = imp.calc_freq_curve(rp).impact @@ -359,7 +356,7 @@ def _map_impact_calc( eai_exp = np.array([]) if calc_at_event: - at_event= imp.at_event + at_event = imp.at_event else: at_event = np.array([]) diff --git a/climada/engine/unsequa/input_var.py b/climada/engine/unsequa/input_var.py index 62d7729f4..56a47fe84 100644 --- a/climada/engine/unsequa/input_var.py +++ b/climada/engine/unsequa/input_var.py @@ -20,24 +20,25 @@ """ import copy +import logging from functools import partial from itertools import zip_longest -import logging from typing import Dict -import scipy as sp -import numpy as np import matplotlib.pyplot as plt +import numpy as np +import scipy as sp -from climada.entity import Entity, DiscRates +from climada.entity import DiscRates, Entity LOGGER = logging.getLogger(__name__) -__all__ = ['InputVar'] +__all__ = ["InputVar"] + +FIG_W, FIG_H = 8, 5 # default figize width/heigh column/work multiplicators -FIG_W, FIG_H = 8, 5 #default figize width/heigh column/work multiplicators -class InputVar(): +class InputVar: """ Input variable for the uncertainty analysis @@ -148,13 +149,9 @@ def evaluate(self, **params): """ if not params: - params = { - param: distr.mean() - for param, distr in self.distr_dict.items() - } + params = {param: distr.mean() for param, distr in self.distr_dict.items()} return self.func(**params) - def plot(self, figsize=None): """ Plot the distributions of the parameters of the uncertainty variable. @@ -184,29 +181,28 @@ def plot(self, figsize=None): flat_axes = axes.flatten() else: flat_axes = np.array([axes]) - for ax, name_distr in zip_longest(flat_axes, - self.distr_dict.items(), - fillvalue=None): + for ax, name_distr in zip_longest( + flat_axes, self.distr_dict.items(), fillvalue=None + ): if name_distr is None: ax.remove() continue (param_name, distr) = name_distr low = distr.ppf(1e-10) - high = distr.ppf(1-1e-10) + high = distr.ppf(1 - 1e-10) n = 100 try: x = np.linspace(low, high, n) ax.plot(x, distr.pdf(x), label=param_name) except AttributeError: if (high - low) > n: - x = np.arange(low, high, int((high-low) / n)) + x = np.arange(low, high, int((high - low) / n)) else: - x = np.arange(low, high+1) + x = np.arange(low, high + 1) ax.vlines(x, 0, distr.pmf(x), label=param_name) ax.legend() return axes - @staticmethod def var_to_inputvar(var): """ @@ -229,7 +225,6 @@ def var_to_inputvar(var): return InputVar(func=lambda: var, distr_dict={}) - @staticmethod def haz(haz_list, n_ev=None, bounds_int=None, bounds_frac=None, bounds_freq=None): """ @@ -282,21 +277,21 @@ def haz(haz_list, n_ev=None, bounds_int=None, bounds_frac=None, bounds_freq=None """ n_haz = len(haz_list) - kwargs = {'haz_list': haz_list, 'n_ev': n_ev} + kwargs = {"haz_list": haz_list, "n_ev": n_ev} if n_ev is None: - kwargs['HE'] = None + kwargs["HE"] = None if bounds_int is None: - kwargs['HI'] = None + kwargs["HI"] = None if bounds_frac is None: - kwargs['HA'] = None + kwargs["HA"] = None if bounds_freq is None: - kwargs['HF'] = None + kwargs["HF"] = None if n_haz == 1: - kwargs['HL'] = 0 + kwargs["HL"] = 0 return InputVar( partial(_haz_uncfunc, **kwargs), - _haz_unc_dict(n_ev, bounds_int, bounds_frac, bounds_freq, n_haz) - ) + _haz_unc_dict(n_ev, bounds_int, bounds_frac, bounds_freq, n_haz), + ) @staticmethod def exp(exp_list, bounds_totval=None, bounds_noise=None): @@ -339,23 +334,28 @@ def exp(exp_list, bounds_totval=None, bounds_noise=None): """ n_exp = len(exp_list) - kwargs = {'exp_list': exp_list, 'bounds_noise': bounds_noise} + kwargs = {"exp_list": exp_list, "bounds_noise": bounds_noise} if bounds_noise is None: - kwargs['EN'] = None + kwargs["EN"] = None if bounds_totval is None: - kwargs['ET'] = None + kwargs["ET"] = None if n_exp == 1: - kwargs['EL'] = 0 + kwargs["EL"] = 0 return InputVar( partial(_exp_uncfunc, **kwargs), - _exp_unc_dict(bounds_totval=bounds_totval, - bounds_noise=bounds_noise, - n_exp=n_exp) - ) + _exp_unc_dict( + bounds_totval=bounds_totval, bounds_noise=bounds_noise, n_exp=n_exp + ), + ) @staticmethod - def impfset(impf_set_list, haz_id_dict= None, bounds_mdd=None, bounds_paa=None, - bounds_impfi=None): + def impfset( + impf_set_list, + haz_id_dict=None, + bounds_mdd=None, + bounds_paa=None, + bounds_impfi=None, + ): """ Helper wrapper for basic impact function set uncertainty input variable. @@ -411,30 +411,37 @@ def impfset(impf_set_list, haz_id_dict= None, bounds_mdd=None, bounds_paa=None, """ n_impf_set = len(impf_set_list) - kwargs = {'impf_set_list': impf_set_list} + kwargs = {"impf_set_list": impf_set_list} if bounds_mdd is None: - kwargs['MDD'] = None + kwargs["MDD"] = None if bounds_paa is None: - kwargs['PAA'] = None + kwargs["PAA"] = None if bounds_impfi is None: - kwargs['IFi'] = None + kwargs["IFi"] = None if haz_id_dict is None: haz_id_dict = impf_set_list[0].get_ids() if n_impf_set == 1: - kwargs['IL'] = 0 + kwargs["IL"] = 0 return InputVar( - partial( - _impfset_uncfunc, haz_id_dict=haz_id_dict, - **kwargs - ), - _impfset_unc_dict(bounds_impfi, bounds_mdd, bounds_paa, n_impf_set) + partial(_impfset_uncfunc, haz_id_dict=haz_id_dict, **kwargs), + _impfset_unc_dict(bounds_impfi, bounds_mdd, bounds_paa, n_impf_set), ) @staticmethod - def ent(impf_set_list, disc_rate, exp_list, meas_set, haz_id_dict, - bounds_disc=None, bounds_cost=None, bounds_totval=None, - bounds_noise=None, bounds_mdd=None, bounds_paa=None, - bounds_impfi=None): + def ent( + impf_set_list, + disc_rate, + exp_list, + meas_set, + haz_id_dict, + bounds_disc=None, + bounds_cost=None, + bounds_totval=None, + bounds_noise=None, + bounds_mdd=None, + bounds_paa=None, + bounds_impfi=None, + ): """ Helper wrapper for basic entity set uncertainty input variable. @@ -532,42 +539,61 @@ def ent(impf_set_list, disc_rate, exp_list, meas_set, haz_id_dict, kwargs = {} if bounds_mdd is None: - kwargs['MDD'] = None + kwargs["MDD"] = None if bounds_paa is None: - kwargs['PAA'] = None + kwargs["PAA"] = None if bounds_impfi is None: - kwargs['IFi'] = None - if n_impf_set== 1: - kwargs['IL'] = 0 + kwargs["IFi"] = None + if n_impf_set == 1: + kwargs["IL"] = 0 if bounds_disc is None: - kwargs['DR'] = None + kwargs["DR"] = None if bounds_cost is None: - kwargs['CO'] = None + kwargs["CO"] = None if bounds_totval is None: - kwargs['ET'] = None + kwargs["ET"] = None if bounds_noise is None: - kwargs['EN'] = None + kwargs["EN"] = None if n_exp == 1: - kwargs['EL'] = 0 + kwargs["EL"] = 0 return InputVar( - partial(_ent_unc_func, - impf_set_list=impf_set_list, haz_id_dict=haz_id_dict, - disc_rate=disc_rate, bounds_noise=bounds_noise, - exp_list=exp_list, meas_set=meas_set, **kwargs - ), - _ent_unc_dict(bounds_totval=bounds_totval, bounds_noise=bounds_noise, - bounds_impfi=bounds_impfi, n_impf_set=n_impf_set, - bounds_mdd=bounds_mdd, - bounds_paa=bounds_paa, bounds_disc=bounds_disc, - bounds_cost=bounds_cost, n_exp=n_exp,) + partial( + _ent_unc_func, + impf_set_list=impf_set_list, + haz_id_dict=haz_id_dict, + disc_rate=disc_rate, + bounds_noise=bounds_noise, + exp_list=exp_list, + meas_set=meas_set, + **kwargs + ), + _ent_unc_dict( + bounds_totval=bounds_totval, + bounds_noise=bounds_noise, + bounds_impfi=bounds_impfi, + n_impf_set=n_impf_set, + bounds_mdd=bounds_mdd, + bounds_paa=bounds_paa, + bounds_disc=bounds_disc, + bounds_cost=bounds_cost, + n_exp=n_exp, + ), ) @staticmethod - def entfut(impf_set_list, exp_list, meas_set, haz_id_dict, - bounds_cost=None, bounds_eg=None, bounds_noise=None, - bounds_impfi=None, bounds_mdd=None, bounds_paa=None, - ): + def entfut( + impf_set_list, + exp_list, + meas_set, + haz_id_dict, + bounds_cost=None, + bounds_eg=None, + bounds_noise=None, + bounds_impfi=None, + bounds_mdd=None, + bounds_paa=None, + ): """ Helper wrapper for basic future entity set uncertainty input variable. @@ -656,35 +682,46 @@ def entfut(impf_set_list, exp_list, meas_set, haz_id_dict, kwargs = {} if bounds_mdd is None: - kwargs['MDD'] = None + kwargs["MDD"] = None if bounds_paa is None: - kwargs['PAA'] = None + kwargs["PAA"] = None if bounds_impfi is None: - kwargs['IFi'] = None + kwargs["IFi"] = None if n_impf_set == 1: - kwargs['IL'] = 0 + kwargs["IL"] = 0 if bounds_cost is None: - kwargs['CO'] = None + kwargs["CO"] = None if bounds_eg is None: - kwargs['EG'] = None + kwargs["EG"] = None if bounds_noise is None: - kwargs['EN'] = None + kwargs["EN"] = None if n_exp == 1: - kwargs['EL'] = 0 + kwargs["EL"] = 0 return InputVar( - partial(_entfut_unc_func, haz_id_dict=haz_id_dict, - bounds_noise=bounds_noise, impf_set_list=impf_set_list, - exp_list=exp_list, meas_set=meas_set, **kwargs), - _entfut_unc_dict(bounds_eg=bounds_eg, bounds_noise=bounds_noise, - bounds_impfi=bounds_impfi, n_impf_set=n_impf_set, - bounds_paa=bounds_paa, - bounds_mdd=bounds_mdd, bounds_cost=bounds_cost, - n_exp=n_exp) + partial( + _entfut_unc_func, + haz_id_dict=haz_id_dict, + bounds_noise=bounds_noise, + impf_set_list=impf_set_list, + exp_list=exp_list, + meas_set=meas_set, + **kwargs + ), + _entfut_unc_dict( + bounds_eg=bounds_eg, + bounds_noise=bounds_noise, + bounds_impfi=bounds_impfi, + n_impf_set=n_impf_set, + bounds_paa=bounds_paa, + bounds_mdd=bounds_mdd, + bounds_cost=bounds_cost, + n_exp=n_exp, + ), ) -#Hazard +# Hazard def _haz_uncfunc(HE, HI, HA, HF, HL, haz_list, n_ev): haz_tmp = copy.deepcopy(haz_list[int(HL)]) if HE is not None: @@ -699,46 +736,50 @@ def _haz_uncfunc(HE, HI, HA, HF, HL, haz_list, n_ev): haz_tmp.frequency = np.multiply(haz_tmp.frequency, HF) return haz_tmp + def _haz_unc_dict(n_ev, bounds_int, bounds_frac, bounds_freq, n_haz): hud = {} if n_ev is not None: - hud['HE'] = sp.stats.randint(0, 2**32 - 1) #seed for rnd generator + hud["HE"] = sp.stats.randint(0, 2**32 - 1) # seed for rnd generator if bounds_int is not None: imin, idelta = bounds_int[0], bounds_int[1] - bounds_int[0] - hud['HI'] = sp.stats.uniform(imin, idelta) + hud["HI"] = sp.stats.uniform(imin, idelta) if bounds_frac is not None: amin, adelta = bounds_frac[0], bounds_frac[1] - bounds_frac[0] - hud['HA'] = sp.stats.uniform(amin, adelta) + hud["HA"] = sp.stats.uniform(amin, adelta) if bounds_freq is not None: fmin, fdelta = bounds_freq[0], bounds_freq[1] - bounds_freq[0] - hud['HF'] = sp.stats.uniform(fmin, fdelta) + hud["HF"] = sp.stats.uniform(fmin, fdelta) if n_haz > 1: - hud['HL'] = sp.stats.randint(0, n_haz) + hud["HL"] = sp.stats.randint(0, n_haz) return hud -#Exposure + +# Exposure def _exp_uncfunc(EN, ET, EL, exp_list, bounds_noise): exp_tmp = exp_list[int(EL)].copy(deep=True) if EN is not None: rng = np.random.RandomState(int(EN)) - rnd_vals = rng.uniform(bounds_noise[0], bounds_noise[1], size = len(exp_tmp.gdf)) - exp_tmp.gdf['value'] *= rnd_vals + rnd_vals = rng.uniform(bounds_noise[0], bounds_noise[1], size=len(exp_tmp.gdf)) + exp_tmp.gdf["value"] *= rnd_vals if ET is not None: - exp_tmp.gdf['value'] *= ET + exp_tmp.gdf["value"] *= ET return exp_tmp + def _exp_unc_dict(bounds_totval, bounds_noise, n_exp): eud = {} if bounds_totval is not None: tmin, tmax = bounds_totval[0], bounds_totval[1] - bounds_totval[0] - eud['ET'] = sp.stats.uniform(tmin, tmax) + eud["ET"] = sp.stats.uniform(tmin, tmax) if bounds_noise is not None: - eud['EN'] = sp.stats.randint(0, 2**32 - 1) #seed for rnd generator + eud["EN"] = sp.stats.randint(0, 2**32 - 1) # seed for rnd generator if n_exp > 1: - eud['EL'] = sp.stats.randint(0, n_exp) + eud["EL"] = sp.stats.randint(0, n_exp) return eud -#Impact function set + +# Impact function set def _impfset_uncfunc(IFi, MDD, PAA, IL, impf_set_list, haz_id_dict): impf_set_tmp = copy.deepcopy(impf_set_list[int(IL)]) for haz_type, fun_id_list in haz_id_dict.items(): @@ -746,50 +787,57 @@ def _impfset_uncfunc(IFi, MDD, PAA, IL, impf_set_list, haz_id_dict): if MDD is not None: new_mdd = np.minimum( impf_set_tmp.get_func(haz_type=haz_type, fun_id=fun_id).mdd * MDD, - 1.0 - ) + 1.0, + ) impf_set_tmp.get_func(haz_type=haz_type, fun_id=fun_id).mdd = new_mdd if PAA is not None: new_paa = np.minimum( impf_set_tmp.get_func(haz_type=haz_type, fun_id=fun_id).paa * PAA, - 1.0 - ) + 1.0, + ) impf_set_tmp.get_func(haz_type=haz_type, fun_id=fun_id).paa = new_paa if IFi is not None: new_int = np.maximum( - impf_set_tmp.get_func(haz_type=haz_type, fun_id=fun_id).intensity + IFi, - 0.0 - ) - impf_set_tmp.get_func(haz_type=haz_type, fun_id=fun_id).intensity = new_int + impf_set_tmp.get_func(haz_type=haz_type, fun_id=fun_id).intensity + + IFi, + 0.0, + ) + impf_set_tmp.get_func(haz_type=haz_type, fun_id=fun_id).intensity = ( + new_int + ) return impf_set_tmp + def _impfset_unc_dict(bounds_impfi, bounds_mdd, bounds_paa, n_impf_set): iud = {} if bounds_impfi is not None: xmin, xdelta = bounds_impfi[0], bounds_impfi[1] - bounds_impfi[0] - iud['IFi'] = sp.stats.uniform(xmin, xdelta) + iud["IFi"] = sp.stats.uniform(xmin, xdelta) if bounds_paa is not None: xmin, xdelta = bounds_paa[0], bounds_paa[1] - bounds_paa[0] - iud['PAA'] = sp.stats.uniform(xmin, xdelta) + iud["PAA"] = sp.stats.uniform(xmin, xdelta) if bounds_mdd is not None: xmin, xdelta = bounds_mdd[0], bounds_mdd[1] - bounds_mdd[0] - iud['MDD'] = sp.stats.uniform(xmin, xdelta) + iud["MDD"] = sp.stats.uniform(xmin, xdelta) if n_impf_set > 1: - iud['IL'] = sp.stats.randint(0, n_impf_set) + iud["IL"] = sp.stats.randint(0, n_impf_set) return iud -#Entity + +# Entity def _disc_uncfunc(DR, disc_rate): disc = copy.deepcopy(disc_rate) if DR is not None: disc.rates = np.ones(disc.years.size) * DR return disc + def _disc_unc_dict(bounds_disk): if bounds_disk is None: return {} dmin, ddelta = bounds_disk[0], bounds_disk[1] - bounds_disk[0] - return {'DR': sp.stats.uniform(dmin, ddelta)} + return {"DR": sp.stats.uniform(dmin, ddelta)} + def _meas_set_uncfunc(CO, meas_set): meas_set_tmp = copy.deepcopy(meas_set) @@ -799,48 +847,105 @@ def _meas_set_uncfunc(CO, meas_set): meas.cost *= CO return meas_set_tmp + def _meas_set_unc_dict(bounds_cost): cmin, cdelta = bounds_cost[0], bounds_cost[1] - bounds_cost[0] - return {'CO': sp.stats.uniform(cmin, cdelta)} - -def _ent_unc_func(EN, ET, EL, IFi, IL, MDD, PAA, CO, DR, bounds_noise, - impf_set_list, haz_id_dict, disc_rate, exp_list, meas_set): + return {"CO": sp.stats.uniform(cmin, cdelta)} + + +def _ent_unc_func( + EN, + ET, + EL, + IFi, + IL, + MDD, + PAA, + CO, + DR, + bounds_noise, + impf_set_list, + haz_id_dict, + disc_rate, + exp_list, + meas_set, +): exposures = _exp_uncfunc(EN, ET, EL, exp_list, bounds_noise) - impact_func_set = _impfset_uncfunc(IFi, MDD, PAA, IL, impf_set_list=impf_set_list, - haz_id_dict=haz_id_dict) + impact_func_set = _impfset_uncfunc( + IFi, MDD, PAA, IL, impf_set_list=impf_set_list, haz_id_dict=haz_id_dict + ) measure_set = _meas_set_uncfunc(CO, meas_set=meas_set) disc_rates = _disc_uncfunc(DR, disc_rate) return Entity(exposures, disc_rates, impact_func_set, measure_set) -def _ent_unc_dict(bounds_totval, bounds_noise, bounds_impfi, bounds_mdd, - bounds_paa, n_impf_set, bounds_disc, bounds_cost, n_exp): + +def _ent_unc_dict( + bounds_totval, + bounds_noise, + bounds_impfi, + bounds_mdd, + bounds_paa, + n_impf_set, + bounds_disc, + bounds_cost, + n_exp, +): ent_unc_dict = _exp_unc_dict(bounds_totval, bounds_noise, n_exp) - ent_unc_dict.update(_impfset_unc_dict(bounds_impfi, bounds_mdd, bounds_paa, n_impf_set)) + ent_unc_dict.update( + _impfset_unc_dict(bounds_impfi, bounds_mdd, bounds_paa, n_impf_set) + ) ent_unc_dict.update(_disc_unc_dict(bounds_disc)) ent_unc_dict.update(_meas_set_unc_dict(bounds_cost)) - return ent_unc_dict - -def _entfut_unc_func(EN, EG, EL, IFi, IL, MDD, PAA, CO, bounds_noise, - impf_set_list, haz_id_dict, exp_list, meas_set): - exposures = _exp_uncfunc(EN=EN, ET=EG, EL=EL, exp_list=exp_list, bounds_noise=bounds_noise) - impact_funcs = _impfset_uncfunc(IFi, MDD, PAA, IL, impf_set_list=impf_set_list, - haz_id_dict=haz_id_dict) + return ent_unc_dict + + +def _entfut_unc_func( + EN, + EG, + EL, + IFi, + IL, + MDD, + PAA, + CO, + bounds_noise, + impf_set_list, + haz_id_dict, + exp_list, + meas_set, +): + exposures = _exp_uncfunc( + EN=EN, ET=EG, EL=EL, exp_list=exp_list, bounds_noise=bounds_noise + ) + impact_funcs = _impfset_uncfunc( + IFi, MDD, PAA, IL, impf_set_list=impf_set_list, haz_id_dict=haz_id_dict + ) measures = _meas_set_uncfunc(CO, meas_set=meas_set) - disc_rates = DiscRates() #Disc rate of future entity ignored in cost_benefit.calc() + disc_rates = ( + DiscRates() + ) # Disc rate of future entity ignored in cost_benefit.calc() return Entity(exposures, disc_rates, impact_funcs, measures) -def _entfut_unc_dict(bounds_impfi, bounds_mdd, - bounds_paa, n_impf_set, bounds_eg, bounds_noise, - bounds_cost, n_exp): + +def _entfut_unc_dict( + bounds_impfi, + bounds_mdd, + bounds_paa, + n_impf_set, + bounds_eg, + bounds_noise, + bounds_cost, + n_exp, +): eud = {} if bounds_eg is not None: gmin, gmax = bounds_eg[0], bounds_eg[1] - bounds_eg[0] - eud['EG'] = sp.stats.uniform(gmin, gmax) + eud["EG"] = sp.stats.uniform(gmin, gmax) if bounds_noise is not None: - eud['EN'] = sp.stats.randint(0, 2**32 - 1) #seed for rnd generator + eud["EN"] = sp.stats.randint(0, 2**32 - 1) # seed for rnd generator if n_exp > 1: - eud['EL'] = sp.stats.randint(0, n_exp) + eud["EL"] = sp.stats.randint(0, n_exp) eud.update(_impfset_unc_dict(bounds_impfi, bounds_mdd, bounds_paa, n_impf_set)) if bounds_cost is not None: eud.update(_meas_set_unc_dict(bounds_cost)) diff --git a/climada/engine/unsequa/test/test_unsequa.py b/climada/engine/unsequa/test/test_unsequa.py index 0bc05f0bb..c53162e8a 100755 --- a/climada/engine/unsequa/test/test_unsequa.py +++ b/climada/engine/unsequa/test/test_unsequa.py @@ -19,39 +19,50 @@ Test uncertainty module. """ -import unittest import copy import time +import unittest +import matplotlib.pyplot as plt import numpy as np import pandas as pd -import matplotlib.pyplot as plt import scipy as sp - from tables.exceptions import HDF5ExtError -from climada.entity import ImpactFunc, ImpactFuncSet -from climada.entity.entity_def import Entity -from climada.entity import Exposures -from climada.hazard import Hazard from climada.engine import ImpactCalc -from climada.engine.unsequa import InputVar, CalcImpact, UncOutput, CalcCostBenefit, CalcDeltaImpact +from climada.engine.unsequa import ( + CalcCostBenefit, + CalcDeltaImpact, + CalcImpact, + InputVar, + UncOutput, +) from climada.engine.unsequa.calc_base import LOGGER - - -from climada.util.constants import (EXP_DEMO_H5, HAZ_DEMO_H5, ENT_DEMO_TODAY, ENT_DEMO_FUTURE, - TEST_UNC_OUTPUT_IMPACT, TEST_UNC_OUTPUT_COSTBEN) +from climada.entity import Exposures, ImpactFunc, ImpactFuncSet +from climada.entity.entity_def import Entity +from climada.hazard import Hazard from climada.util.api_client import Client - - -test_unc_output_impact = Client().get_dataset_file(name=TEST_UNC_OUTPUT_IMPACT, status='test_dataset') -test_unc_output_costben = Client().get_dataset_file(name=TEST_UNC_OUTPUT_COSTBEN, status='test_dataset') +from climada.util.constants import ( + ENT_DEMO_FUTURE, + ENT_DEMO_TODAY, + EXP_DEMO_H5, + HAZ_DEMO_H5, + TEST_UNC_OUTPUT_COSTBEN, + TEST_UNC_OUTPUT_IMPACT, +) + +test_unc_output_impact = Client().get_dataset_file( + name=TEST_UNC_OUTPUT_IMPACT, status="test_dataset" +) +test_unc_output_costben = Client().get_dataset_file( + name=TEST_UNC_OUTPUT_COSTBEN, status="test_dataset" +) def impf_dem(x_paa=1, x_mdd=1): - haz_type = 'TC' + haz_type = "TC" id = 1 - intensity_unit = 'm/s' + intensity_unit = "m/s" intensity = np.linspace(0, 150, num=100) mdd = np.repeat(1, len(intensity)) * x_mdd paa = np.arange(0, len(intensity)) / len(intensity) * x_paa @@ -69,7 +80,7 @@ def exp_dem(x_exp=1, exp=None): # possibly raised by pd.HDFStore when the file is locked by another process due to multiprocessing time.sleep(0.1) exp_tmp = exp.copy(deep=True) - exp_tmp.gdf['value'] *= x_exp + exp_tmp.gdf["value"] *= x_exp return exp_tmp @@ -83,19 +94,19 @@ def haz_dem(x_haz=1, haz=None): def make_input_vars(): exp = exp_dem - exp_distr = {"x_exp": sp.stats.uniform(0.8,2), - } + exp_distr = { + "x_exp": sp.stats.uniform(0.8, 2), + } exp_unc = InputVar(exp, exp_distr) impf = impf_dem - impf_distr = {"x_paa": sp.stats.beta(0.5, 1), - "x_mdd": sp.stats.uniform(0.8, 1.2) - } + impf_distr = {"x_paa": sp.stats.beta(0.5, 1), "x_mdd": sp.stats.uniform(0.8, 1.2)} impf_unc = InputVar(impf, impf_distr) haz = haz_dem - haz_distr = {"x_haz": sp.stats.alpha(a=2, loc=1, scale=1), - } + haz_distr = { + "x_haz": sp.stats.alpha(a=2, loc=1, scale=1), + } haz_unc = InputVar(haz, haz_distr) return exp_unc, impf_unc, haz_unc @@ -119,120 +130,96 @@ def make_costben_iv(): entdem = ent_dem() ent_iv = InputVar.ent( - impf_set_list = [entdem.impact_funcs], - disc_rate = entdem.disc_rates, - exp_list = [entdem.exposures], - meas_set = entdem.measures, + impf_set_list=[entdem.impact_funcs], + disc_rate=entdem.disc_rates, + exp_list=[entdem.exposures], + meas_set=entdem.measures, bounds_noise=[0.3, 1.9], bounds_cost=[0.5, 1.5], bounds_impfi=[-2, 5], - haz_id_dict={'TC': [1]} - ) + haz_id_dict={"TC": [1]}, + ) entfutdem = ent_fut_dem() entfut_iv = InputVar.entfut( - impf_set_list = [entfutdem.impact_funcs], - exp_list = [entfutdem.exposures], - meas_set = entfutdem.measures, + impf_set_list=[entfutdem.impact_funcs], + exp_list=[entfutdem.exposures], + meas_set=entfutdem.measures, bounds_eg=[0.8, 1.5], bounds_mdd=[0.7, 0.9], bounds_paa=[1.3, 2], - haz_id_dict={'TC': [1]} - ) + haz_id_dict={"TC": [1]}, + ) return ent_iv, entfut_iv class TestInputVar(unittest.TestCase): - """ Test UncVar class """ + """Test UncVar class""" def test_init_pass(self): impf = impf_dem - distr_dict = {"x_paa": sp.stats.beta(0.5, 1), - "x_mdd": sp.stats.uniform(0.8, 1.2) - } + distr_dict = { + "x_paa": sp.stats.beta(0.5, 1), + "x_mdd": sp.stats.uniform(0.8, 1.2), + } impf_iv = InputVar(impf, distr_dict) - self.assertListEqual(impf_iv.labels, ['x_paa', 'x_mdd']) + self.assertListEqual(impf_iv.labels, ["x_paa", "x_mdd"]) self.assertTrue(isinstance(impf_iv.distr_dict, dict)) def test_evaluate_pass(self): impf = impf_dem - distr_dict = {"x_paa": sp.stats.beta(0.5, 1), - "x_mdd": sp.stats.uniform(0.8, 0.4) - } + distr_dict = { + "x_paa": sp.stats.beta(0.5, 1), + "x_mdd": sp.stats.uniform(0.8, 0.4), + } impf_iv = InputVar(impf, distr_dict) - #Direct function evaluate - impf_eval = impf_iv.func(**{'x_paa': 0.8, 'x_mdd': 1.1}) + # Direct function evaluate + impf_eval = impf_iv.func(**{"x_paa": 0.8, "x_mdd": 1.1}) impf_true = impf_dem(x_paa=0.8, x_mdd=1.1) self.assertEqual(impf_eval.size(), impf_true.size()) - impf_func1 = impf_eval.get_func()['TC'][1] - impf_func2 = impf_true.get_func()['TC'][1] - np.testing.assert_array_equal( - impf_func1.intensity, - impf_func2.intensity - ) - np.testing.assert_array_equal( - impf_func1.mdd, - impf_func2.mdd - ) - np.testing.assert_array_equal( - impf_func1.paa, - impf_func2.paa - ) + impf_func1 = impf_eval.get_func()["TC"][1] + impf_func2 = impf_true.get_func()["TC"][1] + np.testing.assert_array_equal(impf_func1.intensity, impf_func2.intensity) + np.testing.assert_array_equal(impf_func1.mdd, impf_func2.mdd) + np.testing.assert_array_equal(impf_func1.paa, impf_func2.paa) self.assertEqual(impf_func1.id, impf_func2.id) self.assertEqual(impf_func1.haz_type, impf_func2.haz_type) - #Specific evaluate + # Specific evaluate impf_eval = impf_iv.evaluate(x_paa=0.8, x_mdd=1.1) impf_true = impf_dem(x_paa=0.8, x_mdd=1.1) self.assertEqual(impf_eval.size(), impf_true.size()) - impf_func1 = impf_eval.get_func()['TC'][1] - impf_func2 = impf_true.get_func()['TC'][1] - np.testing.assert_array_equal( - impf_func1.intensity, - impf_func2.intensity - ) - np.testing.assert_array_equal( - impf_func1.mdd, - impf_func2.mdd - ) - np.testing.assert_array_equal( - impf_func1.paa, - impf_func2.paa - ) + impf_func1 = impf_eval.get_func()["TC"][1] + impf_func2 = impf_true.get_func()["TC"][1] + np.testing.assert_array_equal(impf_func1.intensity, impf_func2.intensity) + np.testing.assert_array_equal(impf_func1.mdd, impf_func2.mdd) + np.testing.assert_array_equal(impf_func1.paa, impf_func2.paa) self.assertEqual(impf_func1.id, impf_func2.id) self.assertEqual(impf_func1.haz_type, impf_func2.haz_type) - #Average evaluate (default) + # Average evaluate (default) impf_eval = impf_iv.evaluate() impf_true = impf_dem(x_paa=0.3333333333333333, x_mdd=1.0) self.assertEqual(impf_eval.size(), impf_true.size()) - impf_func1 = impf_eval.get_func()['TC'][1] - impf_func2 = impf_true.get_func()['TC'][1] - np.testing.assert_array_almost_equal( - impf_func1.intensity, - impf_func2.intensity - ) - np.testing.assert_array_almost_equal( - impf_func1.mdd, - impf_func2.mdd - ) - np.testing.assert_array_almost_equal( - impf_func1.paa, - impf_func2.paa - ) + impf_func1 = impf_eval.get_func()["TC"][1] + impf_func2 = impf_true.get_func()["TC"][1] + np.testing.assert_array_almost_equal(impf_func1.intensity, impf_func2.intensity) + np.testing.assert_array_almost_equal(impf_func1.mdd, impf_func2.mdd) + np.testing.assert_array_almost_equal(impf_func1.paa, impf_func2.paa) self.assertEqual(impf_func1.id, impf_func2.id) self.assertEqual(impf_func1.haz_type, impf_func2.haz_type) def test_plot_pass(self): impf = impf_dem() - distr_dict = {"x_paa": sp.stats.beta(0.5, 1), - "x_mdd": sp.stats.uniform(0.8, 1.2), - "x_lit": sp.stats.randint(0, 10) - } + distr_dict = { + "x_paa": sp.stats.beta(0.5, 1), + "x_mdd": sp.stats.uniform(0.8, 1.2), + "x_lit": sp.stats.randint(0, 10), + } impf_iv = InputVar(impf, distr_dict) self.assertIsNotNone(impf_iv.plot()) plt.close() @@ -240,8 +227,7 @@ def test_plot_pass(self): def test_var_to_inputvar(self): exp = exp_dem() - distr_dict = {"x_exp": sp.stats.uniform(0.8,1.2) - } + distr_dict = {"x_exp": sp.stats.uniform(0.8, 1.2)} var = InputVar.var_to_inputvar(exp) self.assertDictEqual(var.distr_dict, {}) @@ -251,6 +237,7 @@ def test_var_to_inputvar(self): self.assertDictEqual(iv_var.distr_dict, distr_dict) self.assertTrue(isinstance(iv_var, InputVar)) + class TestOutput(unittest.TestCase): """Test the output class""" @@ -274,7 +261,7 @@ def test_plot_unc_imp(self): plt_sens = unc_output.plot_rp_uncertainty() self.assertIsNotNone(plt_sens) plt.close() - plt_sens_2 = unc_output.plot_sensitivity_second_order(salib_si='S1') + plt_sens_2 = unc_output.plot_sensitivity_second_order(salib_si="S1") self.assertIsNotNone(plt_sens_2) plt.close() plt_map = unc_output.plot_sensitivity_map() @@ -288,7 +275,9 @@ def test_save_load_pass(self): haz = haz_dem() unc_calc = CalcImpact(exp_unc, impf_unc, haz) - unc_data_save = unc_calc.make_sample(N=2, sampling_kwargs={'calc_second_order': True}) + unc_data_save = unc_calc.make_sample( + N=2, sampling_kwargs={"calc_second_order": True} + ) filename = unc_data_save.to_hdf5() unc_data_load = UncOutput.from_hdf5(filename) for attr_save, val_save in unc_data_save.__dict__.items(): @@ -299,8 +288,9 @@ def test_save_load_pass(self): self.assertEqual(unc_data_load.sampling_kwargs, unc_data_save.sampling_kwargs) filename.unlink() - unc_data_save = unc_calc.uncertainty(unc_data_save, calc_eai_exp=True, - calc_at_event=False) + unc_data_save = unc_calc.uncertainty( + unc_data_save, calc_eai_exp=True, calc_at_event=False + ) filename = unc_data_save.to_hdf5() unc_data_load = UncOutput.from_hdf5(filename) for attr_save, val_save in unc_data_save.__dict__.items(): @@ -312,9 +302,8 @@ def test_save_load_pass(self): filename.unlink() unc_data_save = unc_calc.sensitivity( - unc_data_save, - sensitivity_kwargs = {'calc_second_order': True} - ) + unc_data_save, sensitivity_kwargs={"calc_second_order": True} + ) filename = unc_data_save.to_hdf5() unc_data_load = UncOutput.from_hdf5(filename) for attr_save, val_save in unc_data_save.__dict__.items(): @@ -323,10 +312,15 @@ def test_save_load_pass(self): self.assertTrue(df_load.equals(val_save)) self.assertEqual(unc_data_load.sampling_method, unc_data_save.sampling_method) self.assertEqual(unc_data_load.sampling_kwargs, unc_data_save.sampling_kwargs) - self.assertEqual(unc_data_load.sensitivity_method, unc_data_save.sensitivity_method) - self.assertEqual(unc_data_load.sensitivity_kwargs, unc_data_save.sensitivity_kwargs) + self.assertEqual( + unc_data_load.sensitivity_method, unc_data_save.sensitivity_method + ) + self.assertEqual( + unc_data_load.sensitivity_kwargs, unc_data_save.sensitivity_kwargs + ) filename.unlink() + class TestCalcDelta(unittest.TestCase): """Test the calcluate delta impact uncertainty class""" @@ -336,12 +330,16 @@ def test_calc_uncertainty_pass(self): exp_unc, impf_unc, _ = make_input_vars() haz = haz_dem() haz2 = haz_dem() - haz2.intensity *=2 + haz2.intensity *= 2 unc_calc = CalcDeltaImpact(exp_unc, impf_dem(), haz, exp_dem(), impf_unc, haz2) unc_data = unc_calc.make_sample(N=2) - unc_data = unc_calc.uncertainty(unc_data, calc_eai_exp=False, calc_at_event=False) + unc_data = unc_calc.uncertainty( + unc_data, calc_eai_exp=False, calc_at_event=False + ) - for [x_exp, x_paa, x_mdd], delta_aai_aag in zip(unc_data.samples_df.values, unc_data.aai_agg_unc_df.values): + for [x_exp, x_paa, x_mdd], delta_aai_aag in zip( + unc_data.samples_df.values, unc_data.aai_agg_unc_df.values + ): exp1 = exp_unc.evaluate(x_exp=x_exp) exp2 = exp_dem() impf1 = impf_dem() @@ -351,13 +349,18 @@ def test_calc_uncertainty_pass(self): imp1 = ImpactCalc(exp1, impf1, haz1).impact() imp2 = ImpactCalc(exp2, impf2, haz2).impact() - self.assertAlmostEqual((imp2.aai_agg - imp1.aai_agg)/imp1.aai_agg, delta_aai_aag) + self.assertAlmostEqual( + (imp2.aai_agg - imp1.aai_agg) / imp1.aai_agg, delta_aai_aag + ) - #test when computing absolute delta - unc_data = unc_calc.uncertainty(unc_data, calc_eai_exp=False, calc_at_event=False, - relative_delta=False) + # test when computing absolute delta + unc_data = unc_calc.uncertainty( + unc_data, calc_eai_exp=False, calc_at_event=False, relative_delta=False + ) - for [x_exp, x_paa, x_mdd], delta_aai_aag in zip(unc_data.samples_df.values, unc_data.aai_agg_unc_df.values): + for [x_exp, x_paa, x_mdd], delta_aai_aag in zip( + unc_data.samples_df.values, unc_data.aai_agg_unc_df.values + ): exp1 = exp_unc.evaluate(x_exp=x_exp) exp2 = exp_dem() impf1 = impf_dem() @@ -374,14 +377,10 @@ def test_calc_uncertainty_pass(self): self.assertEqual(unc_calc.calc_eai_exp, False) self.assertEqual(unc_calc.calc_at_event, False) + self.assertEqual(unc_data.aai_agg_unc_df.size, unc_data.n_samples) self.assertEqual( - unc_data.aai_agg_unc_df.size, - unc_data.n_samples - ) - self.assertEqual( - unc_data.freq_curve_unc_df.size, - unc_data.n_samples * len(unc_calc.rp) - ) + unc_data.freq_curve_unc_df.size, unc_data.n_samples * len(unc_calc.rp) + ) self.assertTrue(unc_data.eai_exp_unc_df.empty) self.assertTrue(unc_data.at_event_unc_df.empty) @@ -394,38 +393,39 @@ def test_calc_sensitivity_pass(self): haz2.intensity *= 2 unc_calc = CalcDeltaImpact(exp_unc, impf_dem(), haz, exp_dem(), impf_unc, haz2) unc_data = unc_calc.make_sample(N=4) - unc_data = unc_calc.uncertainty(unc_data, calc_eai_exp=False, calc_at_event=False) + unc_data = unc_calc.uncertainty( + unc_data, calc_eai_exp=False, calc_at_event=False + ) unc_data = unc_calc.sensitivity( - unc_data, - sensitivity_kwargs = {'calc_second_order': True} - ) + unc_data, sensitivity_kwargs={"calc_second_order": True} + ) - self.assertEqual(unc_data.sensitivity_method, 'sobol') - self.assertTupleEqual(unc_data.sensitivity_kwargs, - tuple({'calc_second_order': 'True'}.items()) - ) + self.assertEqual(unc_data.sensitivity_method, "sobol") + self.assertTupleEqual( + unc_data.sensitivity_kwargs, tuple({"calc_second_order": "True"}.items()) + ) for name, attr in unc_data.__dict__.items(): - if 'sens_df' in name: - if 'eai' in name: + if "sens_df" in name: + if "eai" in name: self.assertTrue(attr.empty) - elif 'at_event' in name: + elif "at_event" in name: self.assertTrue(attr.empty) else: np.testing.assert_array_equal( - attr.param.unique(), - np.array(['x_exp', 'x_paa', 'x_mdd']) - ) + attr.param.unique(), np.array(["x_exp", "x_paa", "x_mdd"]) + ) np.testing.assert_array_equal( attr.si.unique(), - np.array(['S1', 'S1_conf', 'ST', 'ST_conf', 'S2', 'S2_conf']) - ) + np.array(["S1", "S1_conf", "ST", "ST_conf", "S2", "S2_conf"]), + ) + + self.assertEqual( + len(attr), len(unc_data.param_labels) * (4 + 3 + 3) + ) - self.assertEqual(len(attr), - len(unc_data.param_labels) * (4 + 3 + 3) - ) class TestCalcImpact(unittest.TestCase): """Test the calcluate impact uncertainty class""" @@ -438,83 +438,81 @@ def test_init_pass(self): self.assertTupleEqual( unc_calc._input_var_names, - ('exp_input_var', 'impf_input_var', 'haz_input_var') - ) + ("exp_input_var", "impf_input_var", "haz_input_var"), + ) self.assertTupleEqual( - unc_calc._metric_names, - ('aai_agg', 'freq_curve', 'at_event', 'eai_exp') - ) + unc_calc._metric_names, ("aai_agg", "freq_curve", "at_event", "eai_exp") + ) self.assertEqual(unc_calc.value_unit, exp_iv.evaluate().value_unit) self.assertTrue( - unc_calc.exp_input_var.evaluate(x_exp=1).gdf.equals( - exp_dem(1).gdf) - ) - impf1 = unc_calc.impf_input_var.evaluate(x_paa=1, x_mdd=1).get_func()['TC'][1] - impf2 = impf_dem(1, 1).get_func()['TC'][1] + unc_calc.exp_input_var.evaluate(x_exp=1).gdf.equals(exp_dem(1).gdf) + ) + impf1 = unc_calc.impf_input_var.evaluate(x_paa=1, x_mdd=1).get_func()["TC"][1] + impf2 = impf_dem(1, 1).get_func()["TC"][1] np.testing.assert_array_almost_equal( - impf1.calc_mdr(impf1.intensity), - impf2.calc_mdr(impf2.intensity) - ) + impf1.calc_mdr(impf1.intensity), impf2.calc_mdr(impf2.intensity) + ) haz1 = unc_calc.haz_input_var.evaluate(x_haz=1) haz2 = haz_dem(1) - self.assertListEqual( - haz1.event_name, haz2.event_name - ) + self.assertListEqual(haz1.event_name, haz2.event_name) def test_make_sample_pass(self): """Test generate sample""" - exp_unc, _ , haz_unc = make_input_vars() + exp_unc, _, haz_unc = make_input_vars() impf = impf_dem() unc_calc = CalcImpact(exp_unc, impf, haz_unc) - #default sampling saltelli - unc_data = unc_calc.make_sample(N=2, sampling_kwargs = {'calc_second_order': True}) - self.assertEqual(unc_data.n_samples, 2*(2*2+2)) # N * (2 * D + 2) + # default sampling saltelli + unc_data = unc_calc.make_sample( + N=2, sampling_kwargs={"calc_second_order": True} + ) + self.assertEqual(unc_data.n_samples, 2 * (2 * 2 + 2)) # N * (2 * D + 2) self.assertTrue(isinstance(unc_data.samples_df, pd.DataFrame)) np.testing.assert_array_equal( - unc_data.samples_df.columns.values, - np.array(['x_exp', 'x_haz']) - ) + unc_data.samples_df.columns.values, np.array(["x_exp", "x_haz"]) + ) - #latin sampling - unc_data = unc_calc.make_sample(N=1, sampling_method='latin', - sampling_kwargs = {'seed': 11245}) + # latin sampling + unc_data = unc_calc.make_sample( + N=1, sampling_method="latin", sampling_kwargs={"seed": 11245} + ) self.assertEqual(unc_data.n_samples, 1) self.assertTrue(isinstance(unc_data.samples_df, pd.DataFrame)) np.testing.assert_array_equal( - unc_data.samples_df.columns.values, - np.array(['x_exp', 'x_haz']) - ) + unc_data.samples_df.columns.values, np.array(["x_exp", "x_haz"]) + ) def test_make_sample_ff_fail(self): - """Test for warning and error messages when sampling using the 'ff' method""" + """Test for warning and error messages when sampling using the 'ff' method""" - exp_unc, impf_unc, haz_unc = make_input_vars() - haz = haz_dem() + exp_unc, impf_unc, haz_unc = make_input_vars() + haz = haz_dem() - # Warning ff sampling - unc_calc = CalcImpact(exp_unc, impf_unc, haz_unc) - warning_msg = "You are using the 'ff' sampler which does not require " - "a value for N. The entered N value will be ignored" - "in the sampling process." - - with self.assertLogs(LOGGER, level='WARNING') as logs: - unc_data = unc_calc.make_sample(N=4, sampling_method='ff') - self.assertEqual(len(logs.output), 1) - self.assertIn(warning_msg, logs.output[0]) - - # Error ff sampling - unc_calc = CalcImpact(exp_unc, impf_unc, haz) - with self.assertRaises(ValueError) as cm: - unc_data = unc_calc.make_sample(N=4, sampling_method='ff') - the_exception = cm.exception - self.assertEqual(the_exception.args[0], - "The number of parameters must be a power of 2. " - "To use the ff sampling method, you can generate " - "dummy parameters to overcome this limitation." - " See https://salib.readthedocs.io/en/latest/api.html") + # Warning ff sampling + unc_calc = CalcImpact(exp_unc, impf_unc, haz_unc) + warning_msg = "You are using the 'ff' sampler which does not require " + "a value for N. The entered N value will be ignored" + "in the sampling process." + + with self.assertLogs(LOGGER, level="WARNING") as logs: + unc_data = unc_calc.make_sample(N=4, sampling_method="ff") + self.assertEqual(len(logs.output), 1) + self.assertIn(warning_msg, logs.output[0]) + + # Error ff sampling + unc_calc = CalcImpact(exp_unc, impf_unc, haz) + with self.assertRaises(ValueError) as cm: + unc_data = unc_calc.make_sample(N=4, sampling_method="ff") + the_exception = cm.exception + self.assertEqual( + the_exception.args[0], + "The number of parameters must be a power of 2. " + "To use the ff sampling method, you can generate " + "dummy parameters to overcome this limitation." + " See https://salib.readthedocs.io/en/latest/api.html", + ) def test_calc_uncertainty_pass(self): """Test compute the uncertainty distribution for an impact""" @@ -522,23 +520,21 @@ def test_calc_uncertainty_pass(self): exp_unc, impf_unc, _ = make_input_vars() haz = haz_dem() unc_calc = CalcImpact(exp_unc, impf_unc, haz) - unc_data = unc_calc.make_sample( N=2) - unc_data = unc_calc.uncertainty(unc_data, calc_eai_exp=False, calc_at_event=False) + unc_data = unc_calc.make_sample(N=2) + unc_data = unc_calc.uncertainty( + unc_data, calc_eai_exp=False, calc_at_event=False + ) self.assertEqual(unc_data.unit, exp_dem().value_unit) self.assertListEqual(unc_calc.rp, [5, 10, 20, 50, 100, 250]) self.assertEqual(unc_calc.calc_eai_exp, False) self.assertEqual(unc_calc.calc_at_event, False) - self.assertEqual( - unc_data.aai_agg_unc_df.size, - unc_data.n_samples - ) + self.assertEqual(unc_data.aai_agg_unc_df.size, unc_data.n_samples) self.assertEqual( - unc_data.freq_curve_unc_df.size, - unc_data.n_samples * len(unc_calc.rp) - ) + unc_data.freq_curve_unc_df.size, unc_data.n_samples * len(unc_calc.rp) + ) self.assertTrue(unc_data.eai_exp_unc_df.empty) self.assertTrue(unc_data.at_event_unc_df.empty) @@ -552,153 +548,155 @@ def test_calc_uncertainty_pool_pass(self): unc_data = unc_calc.uncertainty( unc_data, calc_eai_exp=False, calc_at_event=False, processes=4 - ) + ) self.assertEqual(unc_data.unit, exp_dem().value_unit) self.assertListEqual(unc_calc.rp, [5, 10, 20, 50, 100, 250]) self.assertEqual(unc_calc.calc_eai_exp, False) self.assertEqual(unc_calc.calc_at_event, False) - self.assertEqual( - unc_data.aai_agg_unc_df.size, - unc_data.n_samples - ) + self.assertEqual(unc_data.aai_agg_unc_df.size, unc_data.n_samples) self.assertEqual( - unc_data.freq_curve_unc_df.size, - unc_data.n_samples * len(unc_calc.rp) - ) + unc_data.freq_curve_unc_df.size, unc_data.n_samples * len(unc_calc.rp) + ) self.assertTrue(unc_data.eai_exp_unc_df.empty) self.assertTrue(unc_data.at_event_unc_df.empty) def test_calc_sensitivity_all_pass(self): """Test compute sensitivity using all different sensitivity methods""" - #define input_vars + # define input_vars exp_unc, impf_unc, haz_unc = make_input_vars() # dict to store the parameters and expected results for the tests test_dict = { - 'pawn': { - 'sampling_method' : 'saltelli', - 'sampling_kwargs' : {}, - 'N' : 4, - 'sensitivity_kwargs' : { - 'S' : 10, - 'seed' : 12345 - }, - 'test_param_name' : ['x_exp',0], - 'test_si_name' : ['CV', 16], - 'test_si_value' : [0.25000, 2] - }, - 'hdmr': { - 'sampling_method' : 'saltelli', - 'sampling_kwargs' : {}, - 'N' : 100, - 'sensitivity_kwargs' : {}, - 'test_param_name' : ['x_exp', 2], - 'test_si_name' : ['Sa', 4], - 'test_si_value' : [0.004658, 3] - }, - 'ff': { - - 'sampling_method' : 'ff', - 'sampling_kwargs' : {'seed' : 12345}, - 'N' : 4, - 'sensitivity_kwargs' : {'second_order': True}, - 'test_param_name' : ['x_exp', 0], - 'test_si_name' : ['IE', 4], - 'test_si_value' : [865181825.901295, 10] - }, - 'sobol': { - 'sampling_method' : 'saltelli', - 'sampling_kwargs' : {}, - 'N' : 4, - 'sensitivity_kwargs' : {}, - 'test_param_name' : ['x_paa', 5], - 'test_si_name' : ['ST', 8], - 'test_si_value' : [0.313025, 10] - }, - - 'dgsm': { - 'sampling_method' : 'finite_diff', - 'N' : 4, - 'sampling_kwargs' : {'seed':12345}, - 'sensitivity_kwargs' : {'num_resamples': 100, - 'conf_level': 0.95, 'seed': 12345}, - 'test_param_name' : ['x_exp',0], - 'test_si_name' : ['dgsm', 8], - 'test_si_value' : [1.697516e-01, 9] + "pawn": { + "sampling_method": "saltelli", + "sampling_kwargs": {}, + "N": 4, + "sensitivity_kwargs": {"S": 10, "seed": 12345}, + "test_param_name": ["x_exp", 0], + "test_si_name": ["CV", 16], + "test_si_value": [0.25000, 2], }, - 'fast': { - 'sampling_method' : 'fast_sampler', - 'sampling_kwargs' : {'M' : 4, 'seed' : 12345}, - 'N' : 256, - 'sensitivity_kwargs' : {'M': 4, 'seed': 12345}, - 'test_param_name' : ['x_exp',0], - 'test_si_name' : ['S1_conf',8], - 'test_si_value' : [0.671396, 1] + "hdmr": { + "sampling_method": "saltelli", + "sampling_kwargs": {}, + "N": 100, + "sensitivity_kwargs": {}, + "test_param_name": ["x_exp", 2], + "test_si_name": ["Sa", 4], + "test_si_value": [0.004658, 3], }, - - 'rbd_fast': { - 'sampling_method' : 'saltelli', - 'sampling_kwargs' : {}, - 'N' : 24, - 'sensitivity_kwargs' : {'M': 4, 'seed': 12345}, - 'test_param_name' : ['x_exp', 0], - 'test_si_name' : ['S1_conf', 4], - 'test_si_value' : [0.152609, 4] + "ff": { + "sampling_method": "ff", + "sampling_kwargs": {"seed": 12345}, + "N": 4, + "sensitivity_kwargs": {"second_order": True}, + "test_param_name": ["x_exp", 0], + "test_si_name": ["IE", 4], + "test_si_value": [865181825.901295, 10], }, - - 'morris': { - 'sampling_method' : 'morris', - 'sampling_kwargs' : {'seed': 12345}, - 'N' : 4, - 'sensitivity_kwargs' : {}, - 'test_param_name' : ['x_exp', 0], - 'test_si_name' : ['mu', 1], - 'test_si_value' : [5066460029.63911, 8] + "sobol": { + "sampling_method": "saltelli", + "sampling_kwargs": {}, + "N": 4, + "sensitivity_kwargs": {}, + "test_param_name": ["x_paa", 5], + "test_si_name": ["ST", 8], + "test_si_value": [0.313025, 10], + }, + "dgsm": { + "sampling_method": "finite_diff", + "N": 4, + "sampling_kwargs": {"seed": 12345}, + "sensitivity_kwargs": { + "num_resamples": 100, + "conf_level": 0.95, + "seed": 12345, + }, + "test_param_name": ["x_exp", 0], + "test_si_name": ["dgsm", 8], + "test_si_value": [1.697516e-01, 9], + }, + "fast": { + "sampling_method": "fast_sampler", + "sampling_kwargs": {"M": 4, "seed": 12345}, + "N": 256, + "sensitivity_kwargs": {"M": 4, "seed": 12345}, + "test_param_name": ["x_exp", 0], + "test_si_name": ["S1_conf", 8], + "test_si_value": [0.671396, 1], + }, + "rbd_fast": { + "sampling_method": "saltelli", + "sampling_kwargs": {}, + "N": 24, + "sensitivity_kwargs": {"M": 4, "seed": 12345}, + "test_param_name": ["x_exp", 0], + "test_si_name": ["S1_conf", 4], + "test_si_value": [0.152609, 4], + }, + "morris": { + "sampling_method": "morris", + "sampling_kwargs": {"seed": 12345}, + "N": 4, + "sensitivity_kwargs": {}, + "test_param_name": ["x_exp", 0], + "test_si_name": ["mu", 1], + "test_si_value": [5066460029.63911, 8], }, } - def test_sensitivity_method(exp_unc, impf_unc, haz_unc, sensitivity_method, param_dict): + def test_sensitivity_method( + exp_unc, impf_unc, haz_unc, sensitivity_method, param_dict + ): """Function to test each seaprate sensitivity method""" unc_calc = CalcImpact(exp_unc, impf_unc, haz_unc) - unc_data = unc_calc.make_sample(N=param_dict['N'], - sampling_method=param_dict['sampling_method'], - sampling_kwargs=param_dict['sampling_kwargs']) - unc_data = unc_calc.uncertainty(unc_data, calc_eai_exp=False, calc_at_event=False) + unc_data = unc_calc.make_sample( + N=param_dict["N"], + sampling_method=param_dict["sampling_method"], + sampling_kwargs=param_dict["sampling_kwargs"], + ) + unc_data = unc_calc.uncertainty( + unc_data, calc_eai_exp=False, calc_at_event=False + ) # Call the sensitivity method with each method's specific arguments unc_data = unc_calc.sensitivity( unc_data, sensitivity_method=sensitivity_method, - sensitivity_kwargs=param_dict['sensitivity_kwargs']) - - self.assertEqual(param_dict['test_param_name'][0], - unc_data.aai_agg_sens_df['param'][param_dict['test_param_name'][1]]) - self.assertEqual(param_dict['test_si_name'][0], - unc_data.aai_agg_sens_df['si'][param_dict['test_si_name'][1]]) - self.assertAlmostEqual(param_dict['test_si_value'][0], - unc_data.aai_agg_sens_df['aai_agg'][param_dict['test_si_value'][1]], - places=5) + sensitivity_kwargs=param_dict["sensitivity_kwargs"], + ) self.assertEqual( - unc_data.aai_agg_unc_df.size, - unc_data.n_samples - ) + param_dict["test_param_name"][0], + unc_data.aai_agg_sens_df["param"][param_dict["test_param_name"][1]], + ) + self.assertEqual( + param_dict["test_si_name"][0], + unc_data.aai_agg_sens_df["si"][param_dict["test_si_name"][1]], + ) + self.assertAlmostEqual( + param_dict["test_si_value"][0], + unc_data.aai_agg_sens_df["aai_agg"][param_dict["test_si_value"][1]], + places=5, + ) + + self.assertEqual(unc_data.aai_agg_unc_df.size, unc_data.n_samples) self.assertEqual( - unc_data.freq_curve_unc_df.size, - unc_data.n_samples * len(unc_calc.rp) - ) + unc_data.freq_curve_unc_df.size, unc_data.n_samples * len(unc_calc.rp) + ) self.assertTrue(unc_data.eai_exp_unc_df.empty) self.assertTrue(unc_data.at_event_unc_df.empty) # loop over each method and do test for sensitivity_method, method_params in test_dict.items(): - test_sensitivity_method(exp_unc, impf_unc, haz_unc, - sensitivity_method, method_params) + test_sensitivity_method( + exp_unc, impf_unc, haz_unc, sensitivity_method, method_params + ) class TestCalcCostBenefit(unittest.TestCase): @@ -714,58 +712,73 @@ def test_init_pass(self): self.assertTupleEqual( unc_calc._input_var_names, - ('haz_input_var', 'ent_input_var', - 'haz_fut_input_var', 'ent_fut_input_var') - ) + ( + "haz_input_var", + "ent_input_var", + "haz_fut_input_var", + "ent_fut_input_var", + ), + ) self.assertTupleEqual( unc_calc._metric_names, - ('tot_climate_risk', 'benefit', 'cost_ben_ratio', - 'imp_meas_present', 'imp_meas_future') - ) + ( + "tot_climate_risk", + "benefit", + "cost_ben_ratio", + "imp_meas_present", + "imp_meas_future", + ), + ) self.assertEqual(unc_calc.value_unit, ent_dem().exposures.value_unit) self.assertTrue( - unc_calc.ent_input_var.evaluate(CO=None, IFi=None, EN=None, EL=0).exposures.gdf.equals( - ent_dem().exposures.gdf) - ) + unc_calc.ent_input_var.evaluate( + CO=None, IFi=None, EN=None, EL=0 + ).exposures.gdf.equals(ent_dem().exposures.gdf) + ) haz1 = unc_calc.haz_input_var.evaluate(x_haz=1) haz2 = haz_dem(1) - self.assertListEqual( - haz1.event_name, haz2.event_name - ) + self.assertListEqual(haz1.event_name, haz2.event_name) unc_calc = CalcCostBenefit(haz_iv, ent_iv, haz_iv, ent_fut_iv) self.assertTupleEqual( unc_calc._input_var_names, - ('haz_input_var', 'ent_input_var', - 'haz_fut_input_var', 'ent_fut_input_var') - ) + ( + "haz_input_var", + "ent_input_var", + "haz_fut_input_var", + "ent_fut_input_var", + ), + ) self.assertTupleEqual( unc_calc._metric_names, - ('tot_climate_risk', 'benefit', 'cost_ben_ratio', - 'imp_meas_present', 'imp_meas_future') - ) + ( + "tot_climate_risk", + "benefit", + "cost_ben_ratio", + "imp_meas_present", + "imp_meas_future", + ), + ) self.assertEqual(unc_calc.value_unit, ent_dem().exposures.value_unit) self.assertTrue( - unc_calc.ent_input_var.evaluate(CO=None, IFi=None, EN=None).exposures.gdf.equals( - ent_dem().exposures.gdf) - ) + unc_calc.ent_input_var.evaluate( + CO=None, IFi=None, EN=None + ).exposures.gdf.equals(ent_dem().exposures.gdf) + ) self.assertTrue( - unc_calc.ent_fut_input_var.evaluate(EG=None, MDD=None, PAA=None).exposures.gdf.equals( - ent_fut_dem().exposures.gdf) - ) + unc_calc.ent_fut_input_var.evaluate( + EG=None, MDD=None, PAA=None + ).exposures.gdf.equals(ent_fut_dem().exposures.gdf) + ) haz1 = unc_calc.haz_input_var.evaluate(x_haz=1) haz2 = haz_dem(1) - self.assertListEqual( - haz1.event_name, haz2.event_name - ) + self.assertListEqual(haz1.event_name, haz2.event_name) haz3 = unc_calc.haz_fut_input_var.evaluate(x_haz=1) - self.assertListEqual( - haz3.event_name, haz2.event_name - ) + self.assertListEqual(haz3.event_name, haz2.event_name) def test_make_sample_pass(self): """Test generate sample""" @@ -775,46 +788,49 @@ def test_make_sample_pass(self): unc_calc = CalcCostBenefit(haz_iv, ent_iv) - #default sampling saltelli - unc_data = unc_calc.make_sample(N=2, sampling_kwargs = {'calc_second_order': True}) - self.assertEqual(unc_data.n_samples, 2*(2*4+2)) # N * (2 * D + 2) + # default sampling saltelli + unc_data = unc_calc.make_sample( + N=2, sampling_kwargs={"calc_second_order": True} + ) + self.assertEqual(unc_data.n_samples, 2 * (2 * 4 + 2)) # N * (2 * D + 2) self.assertTrue(isinstance(unc_data.samples_df, pd.DataFrame)) np.testing.assert_array_equal( - unc_data.samples_df.columns.values, - np.array(['x_haz', 'EN', 'IFi', 'CO']) - ) + unc_data.samples_df.columns.values, np.array(["x_haz", "EN", "IFi", "CO"]) + ) # #latin sampling - unc_data = unc_calc.make_sample(N=1, sampling_method='latin', - sampling_kwargs = {'seed': 11245}) + unc_data = unc_calc.make_sample( + N=1, sampling_method="latin", sampling_kwargs={"seed": 11245} + ) self.assertEqual(unc_data.n_samples, 1) self.assertTrue(isinstance(unc_data.samples_df, pd.DataFrame)) np.testing.assert_array_equal( - unc_data.samples_df.columns.values, - np.array(['x_haz', 'EN', 'IFi', 'CO']) - ) - + unc_data.samples_df.columns.values, np.array(["x_haz", "EN", "IFi", "CO"]) + ) unc_calc = CalcCostBenefit(haz_iv, ent_iv, haz_iv, ent_fut_iv) - #default sampling saltelli - unc_data = unc_calc.make_sample(N=2, sampling_kwargs = {'calc_second_order': True}) - self.assertEqual(unc_data.n_samples, 2*(2*7+2)) # N * (2 * D + 2) + # default sampling saltelli + unc_data = unc_calc.make_sample( + N=2, sampling_kwargs={"calc_second_order": True} + ) + self.assertEqual(unc_data.n_samples, 2 * (2 * 7 + 2)) # N * (2 * D + 2) self.assertTrue(isinstance(unc_data.samples_df, pd.DataFrame)) np.testing.assert_array_equal( unc_data.samples_df.columns.values, - np.array(['x_haz', 'EN', 'IFi', 'CO', 'EG', 'PAA', 'MDD']) - ) + np.array(["x_haz", "EN", "IFi", "CO", "EG", "PAA", "MDD"]), + ) # #latin sampling - unc_data = unc_calc.make_sample(N=1, sampling_method='latin', - sampling_kwargs = {'seed': 11245}) + unc_data = unc_calc.make_sample( + N=1, sampling_method="latin", sampling_kwargs={"seed": 11245} + ) self.assertEqual(unc_data.n_samples, 1) self.assertTrue(isinstance(unc_data.samples_df, pd.DataFrame)) np.testing.assert_array_equal( unc_data.samples_df.columns.values, - np.array(['x_haz', 'EN', 'IFi', 'CO', 'EG', 'PAA', 'MDD']) - ) + np.array(["x_haz", "EN", "IFi", "CO", "EG", "PAA", "MDD"]), + ) def test_calc_uncertainty_pool_pass(self): """Test compute the uncertainty distribution for an impact""" @@ -828,22 +844,17 @@ def test_calc_uncertainty_pool_pass(self): self.assertEqual(unc_data.unit, ent_dem().exposures.value_unit) - self.assertEqual( - unc_data.tot_climate_risk_unc_df.size, - unc_data.n_samples - ) + self.assertEqual(unc_data.tot_climate_risk_unc_df.size, unc_data.n_samples) self.assertEqual( unc_data.cost_ben_ratio_unc_df.size, - unc_data.n_samples * 4 #number of measures - ) - self.assertEqual( - unc_data.imp_meas_present_unc_df.size, - 0 - ) + unc_data.n_samples * 4, # number of measures + ) + self.assertEqual(unc_data.imp_meas_present_unc_df.size, 0) self.assertEqual( unc_data.imp_meas_future_unc_df.size, - unc_data.n_samples * 4 * 5 #All measures 4 and risks/benefits 5 - ) + unc_data.n_samples * 4 * 5, # All measures 4 and risks/benefits 5 + ) + if __name__ == "__main__": TESTS = unittest.TestLoader().loadTestsFromTestCase(TestInputVar) diff --git a/climada/engine/unsequa/unc_output.py b/climada/engine/unsequa/unc_output.py index 4a833df2b..d9c68fe69 100644 --- a/climada/engine/unsequa/unc_output.py +++ b/climada/engine/unsequa/unc_output.py @@ -19,57 +19,59 @@ Define Uncertainty class. """ -__all__ = ['UncOutput', 'UncCostBenefitOutput', 'UncImpactOutput', 'UncDeltaImpactOutput'] +__all__ = [ + "UncOutput", + "UncCostBenefitOutput", + "UncImpactOutput", + "UncDeltaImpactOutput", +] -import logging import datetime as dt - +import logging from itertools import zip_longest from pathlib import Path - import h5py -import pandas as pd -import numpy as np -import matplotlib.pyplot as plt import matplotlib as mpl +import matplotlib.pyplot as plt +import numpy as np +import pandas as pd from matplotlib import colormaps as cm +import climada.util.hdf5_handler as u_hdf5 from climada import CONFIG - -from climada.util.value_representation import value_to_monetary_unit as u_vtm -from climada.util.value_representation import convert_monetary_value as u_cmv from climada.util import plot as u_plot -import climada.util.hdf5_handler as u_hdf5 +from climada.util.value_representation import convert_monetary_value as u_cmv +from climada.util.value_representation import value_to_monetary_unit as u_vtm LOGGER = logging.getLogger(__name__) # Metrics that are multi-dimensional -METRICS_2D = ['eai_exp', 'at_event'] +METRICS_2D = ["eai_exp", "at_event"] DATA_DIR = CONFIG.engine.uncertainty.local_data.user_data.dir() -FIG_W, FIG_H = 8, 5 #default figize width/heigh column/work multiplicators +FIG_W, FIG_H = 8, 5 # default figize width/heigh column/work multiplicators -MAP_CMAP = 'Dark2' #Default color map for the sensitivity map +MAP_CMAP = "Dark2" # Default color map for the sensitivity map -#Table of recommended pairing between salib sampling and sensitivity methods +# Table of recommended pairing between salib sampling and sensitivity methods # NEEDS TO BE UPDATED REGULARLY!! https://salib.readthedocs.io/en/latest/api.html # Currently, we do not support the 'delta' method due to Singular matrix issues, SALIB_COMPATIBILITY = { #'delta': ['fast_sampler', 'ff', 'finite_diff', 'latin', 'morris', 'saltelli'], - 'dgsm': ['finite_diff'], - 'fast': ['fast_sampler'], - 'ff': ['ff'], - 'hdmr': ['fast_sampler', 'ff', 'finite_diff', 'latin', 'morris', 'saltelli'], - 'morris': ['morris'], - 'pawn': ['fast_sampler', 'ff', 'finite_diff', 'latin', 'morris', 'saltelli'], - 'rbd_fast': ['fast_sampler', 'ff', 'finite_diff', 'latin', 'morris', 'saltelli'], - 'sobol': ['saltelli', 'sobol'] - } - - -class UncOutput(): + "dgsm": ["finite_diff"], + "fast": ["fast_sampler"], + "ff": ["ff"], + "hdmr": ["fast_sampler", "ff", "finite_diff", "latin", "morris", "saltelli"], + "morris": ["morris"], + "pawn": ["fast_sampler", "ff", "finite_diff", "latin", "morris", "saltelli"], + "rbd_fast": ["fast_sampler", "ff", "finite_diff", "latin", "morris", "saltelli"], + "sobol": ["saltelli", "sobol"], +} + + +class UncOutput: """ Class to store and plot uncertainty and sensitivity analysis output data @@ -98,8 +100,12 @@ class UncOutput(): https://salib.readthedocs.io/en/latest/basics.html. """ - _metadata = ['sampling_method', 'sampling_kwargs', 'sensitivity_method', - 'sensitivity_kwargs'] + _metadata = [ + "sampling_method", + "sampling_kwargs", + "sensitivity_method", + "sensitivity_kwargs", + ] def __init__(self, samples_df, unit=None): """ @@ -112,7 +118,7 @@ def __init__(self, samples_df, unit=None): unit : str, optional value unit """ - #Data + # Data self.samples_df = samples_df self.unit = unit @@ -135,19 +141,19 @@ def order_samples(self, by_parameters): self.samples_df.sort_values(by=by_parameters, inplace=True, axis=0) def get_samples_df(self): - return getattr(self, 'samples_df') + return getattr(self, "samples_df") def get_unc_df(self, metric_name): - return getattr(self, f'{metric_name}_unc_df') + return getattr(self, f"{metric_name}_unc_df") def set_unc_df(self, metric_name, unc_df): - setattr(self, f'{metric_name}_unc_df', unc_df) + setattr(self, f"{metric_name}_unc_df", unc_df) def get_sens_df(self, metric_name): - return getattr(self, f'{metric_name}_sens_df') + return getattr(self, f"{metric_name}_sens_df") def set_sens_df(self, metric_name, sens_df): - setattr(self, f'{metric_name}_sens_df', sens_df) + setattr(self, f"{metric_name}_sens_df", sens_df) def check_salib(self, sensitivity_method): """ @@ -171,12 +177,14 @@ def check_salib(self, sensitivity_method): """ if self.sampling_method not in SALIB_COMPATIBILITY[sensitivity_method]: - LOGGER.warning("The chosen combination of sensitivity method (%s)" + LOGGER.warning( + "The chosen combination of sensitivity method (%s)" " and sampling method (%s) does not correspond to the" " recommendation of the salib pacakge." "\n https://salib.readthedocs.io/en/latest/api.html", - self.sampling_method, sensitivity_method - ) + self.sampling_method, + sensitivity_method, + ) return False return True @@ -191,7 +199,7 @@ def sampling_method(self): Sampling method name """ - return self.samples_df.attrs['sampling_method'] + return self.samples_df.attrs["sampling_method"] @property def sampling_kwargs(self): @@ -204,7 +212,7 @@ def sampling_kwargs(self): Dictionary of arguments for SALib sampling method """ - return self.samples_df.attrs['sampling_kwargs'] + return self.samples_df.attrs["sampling_kwargs"] @property def n_samples(self): @@ -246,10 +254,10 @@ def problem_sa(self): """ return { - 'num_vars' : len(self.param_labels), - 'names' : self.param_labels, - 'bounds' : [[0, 1]]*len(self.param_labels) - } + "num_vars": len(self.param_labels), + "names": self.param_labels, + "bounds": [[0, 1]] * len(self.param_labels), + } @property def uncertainty_metrics(self): @@ -314,9 +322,8 @@ def get_uncertainty(self, metric_list=None): metric_list = self.uncertainty_metrics try: unc_df = pd.concat( - [self.get_unc_df(metric) for metric in metric_list], - axis=1 - ) + [self.get_unc_df(metric) for metric in metric_list], axis=1 + ) except AttributeError: return pd.DataFrame([]) return unc_df @@ -358,14 +365,14 @@ def get_sensitivity(self, salib_si, metric_list=None): for metric in metric_list: submetric_df = self.get_sens_df(metric) if not submetric_df.empty: - submetric_df = submetric_df[submetric_df['si'] == salib_si] + submetric_df = submetric_df[submetric_df["si"] == salib_si] df_all = pd.concat( - [df_all, submetric_df.select_dtypes('number')], - axis=1 - ) + [df_all, submetric_df.select_dtypes("number")], axis=1 + ) if df_meta.empty: df_meta = submetric_df.drop( - submetric_df.select_dtypes('number').columns, axis=1) + submetric_df.select_dtypes("number").columns, axis=1 + ) return pd.concat([df_meta, df_all], axis=1).reset_index(drop=True) def get_largest_si(self, salib_si, metric_list=None, threshold=0.01): @@ -394,25 +401,27 @@ def get_largest_si(self, salib_si, metric_list=None, threshold=0.01): si_df = self.get_sensitivity(salib_si, metric_list) - #get max index - si_df_num = si_df.select_dtypes('number') - si_df_num[si_df_num 1: flat_axes = axes.flatten() else: flat_axes = np.array([axes]) - for ax, col, orig_val in zip_longest(flat_axes, cols, orig_list, fillvalue=None): + for ax, col, orig_val in zip_longest( + flat_axes, cols, orig_list, fillvalue=None + ): if col is None: if ax is not None: ax.remove() @@ -569,11 +588,17 @@ def plot_uncertainty(self, metric_list=None, orig_list=None, figsize=None, if data.empty or data.isna().all() or data.dropna().shape[0] < 2: print(f"No data to plot for '{col}'.") if ax is not None: - ax.text(0.5, 0.5, 'No data to plot', fontsize=18, - horizontalalignment='center', verticalalignment='center', - transform=ax.transAxes) + ax.text( + 0.5, + 0.5, + "No data to plot", + fontsize=18, + horizontalalignment="center", + verticalalignment="center", + transform=ax.transAxes, + ) ax.set_xlabel(col) - ax.set_ylabel('density of samples') + ax.set_ylabel("density of samples") ax.tick_params(labelsize=fontsize) for item in [ax.title, ax.xaxis.label, ax.yaxis.label]: item.set_fontsize(fontsize) @@ -583,10 +608,16 @@ def plot_uncertainty(self, metric_list=None, orig_list=None, figsize=None, if data.empty: ax.remove() continue - data.hist(ax=ax, bins=30, density=True, histtype='bar', - color='lightsteelblue', edgecolor='black') + data.hist( + ax=ax, + bins=30, + density=True, + histtype="bar", + color="lightsteelblue", + edgecolor="black", + ) try: - data.plot.kde(ax=ax, color='darkblue', linewidth=4, label='') + data.plot.kde(ax=ax, color="darkblue", linewidth=4, label="") except np.linalg.LinAlgError: pass avg, std = data.mean(), data.std() @@ -595,33 +626,46 @@ def plot_uncertainty(self, metric_list=None, orig_list=None, figsize=None, avg_plot = np.log10(avg) else: avg_plot = avg - ax.axvline(avg_plot, color='darkorange', linestyle='dashed', linewidth=2, - label="avg=%.2f%s" %(avg, m_unit)) + ax.axvline( + avg_plot, + color="darkorange", + linestyle="dashed", + linewidth=2, + label="avg=%.2f%s" % (avg, m_unit), + ) if orig_val is not None: if log: orig_plot = np.log10(orig_val) else: orig_plot = orig_val [orig_plot] = u_cmv(orig_plot, m_unit) - ax.axvline(orig_plot, color='green', linestyle='dotted', linewidth=2, - label="orig=%.2f%s" %(orig_plot, m_unit)) + ax.axvline( + orig_plot, + color="green", + linestyle="dotted", + linewidth=2, + label="orig=%.2f%s" % (orig_plot, m_unit), + ) if log: std_m, std_p = np.log10(avg - std), np.log10(avg + std) else: std_m, std_p = avg - std, avg + std - ax.plot([std_m, std_p], - [0.3 * ymax, 0.3 * ymax], color='black', - label="std=%.2f%s" %(std, m_unit)) - xlabel = col + ' [' + m_unit + ' ' + self.unit + '] ' + ax.plot( + [std_m, std_p], + [0.3 * ymax, 0.3 * ymax], + color="black", + label="std=%.2f%s" % (std, m_unit), + ) + xlabel = col + " [" + m_unit + " " + self.unit + "] " if calc_delta: # Modify the xlabel when calc_delta is True - xlabel = col + ' change [%]' + xlabel = col + " change [%]" if log: - ax.set_xlabel( xlabel + ' (log10 scale)') + ax.set_xlabel(xlabel + " (log10 scale)") else: ax.set_xlabel(xlabel) - ax.set_ylabel('density of samples') - ax.legend(fontsize=fontsize-2) + ax.set_ylabel("density of samples") + ax.legend(fontsize=fontsize - 2) ax.tick_params(labelsize=fontsize) for item in [ax.title, ax.xaxis.label, ax.yaxis.label]: @@ -631,9 +675,9 @@ def plot_uncertainty(self, metric_list=None, orig_list=None, figsize=None, return axes - - def plot_rp_uncertainty(self, orig_list=None, figsize=(16, 6), axes=None, - calc_delta=False): + def plot_rp_uncertainty( + self, orig_list=None, figsize=(16, 6), axes=None, calc_delta=False + ): """ Plot the distribution of return period uncertainty @@ -667,23 +711,27 @@ def plot_rp_uncertainty(self, orig_list=None, figsize=(16, 6), axes=None, except AttributeError: unc_df = None if unc_df is None or unc_df.empty: - raise ValueError("No return period uncertainty data present " - "Please run an uncertainty analysis with the desired " - "return period specified.") + raise ValueError( + "No return period uncertainty data present " + "Please run an uncertainty analysis with the desired " + "return period specified." + ) - add_orig=True + add_orig = True if orig_list is None: - add_orig=False + add_orig = False if axes is None: _fig, axes = plt.subplots(figsize=figsize, nrows=1, ncols=2) - [min_l, max_l], m_unit = u_vtm([unc_df.min().min(), unc_df.max().max()], n_sig_dig=4) + [min_l, max_l], m_unit = u_vtm( + [unc_df.min().min(), unc_df.max().max()], n_sig_dig=4 + ) # Plotting for the first axes ax = axes[0] - prop_cycle = plt.rcParams['axes.prop_cycle'] - colors = prop_cycle.by_key()['color'] + prop_cycle = plt.rcParams["axes.prop_cycle"] + colors = prop_cycle.by_key()["color"] for n, (_name, values) in enumerate(unc_df.items()): if values.isna().all() or len(values.dropna()) < 2: @@ -692,67 +740,92 @@ def plot_rp_uncertainty(self, orig_list=None, figsize=(16, 6), axes=None, values = u_cmv(values, m_unit, n_sig_dig=4) count, division = np.histogram(values, bins=100) count = count / count.max() - losses = [(bin_i + bin_f)/2 for (bin_i, bin_f) in zip(division[:-1], division[1:])] - ax.plot([min_l, max_l], [2*n, 2*n], color='k', alpha=0.5) - ax.fill_between(losses, count + 2*n, 2*n) + losses = [ + (bin_i + bin_f) / 2 + for (bin_i, bin_f) in zip(division[:-1], division[1:]) + ] + ax.plot([min_l, max_l], [2 * n, 2 * n], color="k", alpha=0.5) + ax.fill_between(losses, count + 2 * n, 2 * n) if add_orig: [orig_val] = u_cmv(orig_list[n], m_unit, n_sig_dig=4) ax.plot( - [orig_val, orig_val], [2*n, 2*(n+1)], - color=colors[n], linestyle='dotted', linewidth=2, - label="orig=%.2f%s" %(orig_val, m_unit) + [orig_val, orig_val], + [2 * n, 2 * (n + 1)], + color=colors[n], + linestyle="dotted", + linewidth=2, + label="orig=%.2f%s" % (orig_val, m_unit), ) ax.set_xlim(min_l, max_l) - ax.set_ylim(0, 2*unc_df.shape[1]) - ax.set_yticks(np.arange(0, 2*unc_df.shape[1], 2)) + ax.set_ylim(0, 2 * unc_df.shape[1]) + ax.set_yticks(np.arange(0, 2 * unc_df.shape[1], 2)) ax.set_yticklabels([s[2:] for s in unc_df.columns]) - ax.legend(loc='lower right') + ax.legend(loc="lower right") # Set x-axis label for the first axes if calc_delta: - ax.set_xlabel('Impact change [%]') + ax.set_xlabel("Impact change [%]") else: - ax.set_xlabel('Impact [%s %s]' % (m_unit, self.unit)) + ax.set_xlabel("Impact [%s %s]" % (m_unit, self.unit)) - ax.set_ylabel('Return period [years]') + ax.set_ylabel("Return period [years]") # Plotting for the second axes ax = axes[1] - high = u_cmv(self.get_unc_df('freq_curve').quantile(0.95).values, - m_unit, n_sig_dig=4) - middle = u_cmv(self.get_unc_df('freq_curve').quantile(0.5).values, - m_unit, n_sig_dig=4) - low = u_cmv(self.get_unc_df('freq_curve').quantile(0.05).values, - m_unit, n_sig_dig=4) + high = u_cmv( + self.get_unc_df("freq_curve").quantile(0.95).values, m_unit, n_sig_dig=4 + ) + middle = u_cmv( + self.get_unc_df("freq_curve").quantile(0.5).values, m_unit, n_sig_dig=4 + ) + low = u_cmv( + self.get_unc_df("freq_curve").quantile(0.05).values, m_unit, n_sig_dig=4 + ) x = [float(rp[2:]) for rp in unc_df.columns] - ax.plot(x, high, linestyle='--', color='blue', alpha=0.5, - label='0.95 percentile') - ax.plot(x, middle, label='0.5 percentile') - ax.plot(x, low, linestyle='dashdot', color='blue', alpha=0.5, - label='0.05 percentile') + ax.plot( + x, high, linestyle="--", color="blue", alpha=0.5, label="0.95 percentile" + ) + ax.plot(x, middle, label="0.5 percentile") + ax.plot( + x, + low, + linestyle="dashdot", + color="blue", + alpha=0.5, + label="0.05 percentile", + ) ax.fill_between(x, low, high, alpha=0.2) if add_orig: - ax.plot(x, u_cmv(orig_list, m_unit, n_sig_dig=4), color='green', - linestyle='dotted', label='orig') - ax.set_xlabel('Return period [year]') + ax.plot( + x, + u_cmv(orig_list, m_unit, n_sig_dig=4), + color="green", + linestyle="dotted", + label="orig", + ) + ax.set_xlabel("Return period [year]") # Set y-axis label for the second axes if calc_delta: - ax.set_ylabel('Impact change [%]') + ax.set_ylabel("Impact change [%]") else: - ax.set_ylabel('Impact [' + m_unit + ' ' + self.unit + ']') + ax.set_ylabel("Impact [" + m_unit + " " + self.unit + "]") ax.legend() return axes - - - def plot_sensitivity(self, salib_si='S1', salib_si_conf='S1_conf', - metric_list=None, figsize=None, axes=None, - **kwargs): + def plot_sensitivity( + self, + salib_si="S1", + salib_si_conf="S1_conf", + metric_list=None, + figsize=None, + axes=None, + **kwargs, + ): """ Bar plot of a first order sensitivity index @@ -813,54 +886,63 @@ def plot_sensitivity(self, salib_si='S1', salib_si_conf='S1_conf', """ if not self.sensitivity_metrics: - raise ValueError("No sensitivity present. " - "Please run a sensitivity analysis first.") + raise ValueError( + "No sensitivity present. " "Please run a sensitivity analysis first." + ) if metric_list is None: metric_list = [ metric for metric in self.sensitivity_metrics if metric not in METRICS_2D - ] + ] nplots = len(metric_list) nrows, ncols = int(np.ceil(nplots / 2)), min(nplots, 2) if axes is None: if not figsize: figsize = (ncols * FIG_W, nrows * FIG_H) - _fig, axes = plt.subplots(nrows = nrows, - ncols = ncols, - figsize = figsize) + _fig, axes = plt.subplots(nrows=nrows, ncols=ncols, figsize=figsize) if nplots > 1: flat_axes = axes.flatten() else: flat_axes = np.array([axes]) for ax, metric in zip(flat_axes, metric_list): - df_S = self.get_sensitivity(salib_si, [metric]).select_dtypes('number') + df_S = self.get_sensitivity(salib_si, [metric]).select_dtypes("number") if not df_S.columns[df_S.isnull().all()].empty: - LOGGER.warning("All-NaN columns encountered: %s", - list(df_S.columns[df_S.isnull().all()])) + LOGGER.warning( + "All-NaN columns encountered: %s", + list(df_S.columns[df_S.isnull().all()]), + ) df_S = df_S.loc[:, df_S.notnull().any()] if df_S.empty: - ax.set_xlabel('Input parameter') + ax.set_xlabel("Input parameter") ax.remove() continue - df_S_conf = self.get_sensitivity(salib_si_conf, [metric]).select_dtypes('number') + df_S_conf = self.get_sensitivity(salib_si_conf, [metric]).select_dtypes( + "number" + ) df_S_conf = df_S_conf.loc[:, df_S.columns] if df_S_conf.empty: - df_S.plot(ax=ax, kind='bar', **kwargs) - df_S.plot(ax=ax, kind='bar', yerr=df_S_conf, **kwargs) + df_S.plot(ax=ax, kind="bar", **kwargs) + df_S.plot(ax=ax, kind="bar", yerr=df_S_conf, **kwargs) ax.set_xticklabels(self.param_labels, rotation=0) - ax.set_xlabel('Input parameter') + ax.set_xlabel("Input parameter") ax.set_ylabel(salib_si) plt.tight_layout() return axes - def plot_sensitivity_second_order(self, salib_si='S2', salib_si_conf='S2_conf', - metric_list=None, figsize=None, axes=None, - **kwargs): + def plot_sensitivity_second_order( + self, + salib_si="S2", + salib_si_conf="S2_conf", + metric_list=None, + figsize=None, + axes=None, + **kwargs, + ): """ Plot second order sensitivity indices as matrix. @@ -916,33 +998,34 @@ def plot_sensitivity_second_order(self, salib_si='S2', salib_si_conf='S2_conf', """ if not self.sensitivity_metrics: - raise ValueError("No sensitivity present for this metrics. " - "Please run a sensitivity analysis first.") + raise ValueError( + "No sensitivity present for this metrics. " + "Please run a sensitivity analysis first." + ) if metric_list is None: metric_list = [ metric for metric in self.sensitivity_metrics if metric not in METRICS_2D - ] - + ] - if 'cmap' not in kwargs.keys(): - kwargs['cmap'] = 'summer' + if "cmap" not in kwargs.keys(): + kwargs["cmap"] = "summer" - #all the lowest level metrics (e.g. rp10) directly or as - #submetrics of the metrics in metrics_list - df_S = self.get_sensitivity(salib_si, metric_list).select_dtypes('number') - df_S_conf = self.get_sensitivity(salib_si_conf, metric_list).select_dtypes('number') + # all the lowest level metrics (e.g. rp10) directly or as + # submetrics of the metrics in metrics_list + df_S = self.get_sensitivity(salib_si, metric_list).select_dtypes("number") + df_S_conf = self.get_sensitivity(salib_si_conf, metric_list).select_dtypes( + "number" + ) nplots = len(df_S.columns) nrows, ncols = int(np.ceil(nplots / 3)), min(nplots, 3) if axes is None: if not figsize: figsize = (ncols * 5, nrows * 5) - _fig, axes = plt.subplots(nrows = nrows, - ncols = ncols, - figsize = figsize) + _fig, axes = plt.subplots(nrows=nrows, ncols=ncols, figsize=figsize) if nplots > 1: flat_axes = axes.flatten() @@ -950,37 +1033,46 @@ def plot_sensitivity_second_order(self, salib_si='S2', salib_si_conf='S2_conf', flat_axes = np.array([axes]) for ax, submetric in zip(flat_axes, df_S.columns): - #Make matrix symmetric + # Make matrix symmetric s2_matrix = np.triu( - np.reshape( - df_S[submetric].to_numpy(), - (len(self.param_labels), -1) - ) - ) + np.reshape(df_S[submetric].to_numpy(), (len(self.param_labels), -1)) + ) s2_matrix = s2_matrix + s2_matrix.T - np.diag(np.diag(s2_matrix)) ax.imshow(s2_matrix, **kwargs) s2_conf_matrix = np.triu( np.reshape( - df_S_conf[submetric].to_numpy(), - (len(self.param_labels), -1) - ) + df_S_conf[submetric].to_numpy(), (len(self.param_labels), -1) ) - s2_conf_matrix = s2_conf_matrix + s2_conf_matrix.T - \ - np.diag(np.diag(s2_conf_matrix)) + ) + s2_conf_matrix = ( + s2_conf_matrix + s2_conf_matrix.T - np.diag(np.diag(s2_conf_matrix)) + ) for i in range(len(s2_matrix)): for j in range(len(s2_matrix)): if np.isnan(s2_matrix[i, j]): - ax.text(j, i, np.nan, - ha="center", va="center", - color="k", fontsize='medium') + ax.text( + j, + i, + np.nan, + ha="center", + va="center", + color="k", + fontsize="medium", + ) else: - ax.text(j, i, - str(round(s2_matrix[i, j], 2)) + u'\n\u00B1' + #\u00B1 = +- - str(round(s2_conf_matrix[i, j], 2)), - ha="center", va="center", - color="k", fontsize='medium') - - ax.set_title(salib_si + ' - ' + submetric, fontsize=18) + ax.text( + j, + i, + str(round(s2_matrix[i, j], 2)) + + "\n\u00B1" # \u00B1 = +- + + str(round(s2_conf_matrix[i, j], 2)), + ha="center", + va="center", + color="k", + fontsize="medium", + ) + + ax.set_title(salib_si + " - " + submetric, fontsize=18) labels = self.param_labels ax.set_xticks(np.arange(len(labels))) ax.set_yticks(np.arange(len(labels))) @@ -990,7 +1082,7 @@ def plot_sensitivity_second_order(self, salib_si='S2', salib_si_conf='S2_conf', return axes - def plot_sensitivity_map(self, salib_si='S1', **kwargs): + def plot_sensitivity_map(self, salib_si="S1", **kwargs): """ Plot a map of the largest sensitivity index in each exposure point @@ -1022,36 +1114,34 @@ def plot_sensitivity_map(self, salib_si='S1', **kwargs): """ - eai_max_si_df = self.get_largest_si(salib_si, metric_list=['eai_exp']) - - plot_val = eai_max_si_df['param'] - coord = np.array([self.coord_df['latitude'], self.coord_df['longitude']]).transpose() # pylint: disable=no-member - if 'var_name' not in kwargs: - kwargs['var_name'] = 'Input parameter with largest ' + salib_si - if 'title' not in kwargs: - kwargs['title'] = '' - if 'figsize' not in kwargs: - kwargs['figsize'] = (8,6) - if 'cmap' not in kwargs: + eai_max_si_df = self.get_largest_si(salib_si, metric_list=["eai_exp"]) + + plot_val = eai_max_si_df["param"] + coord = np.array( + [self.coord_df["latitude"], self.coord_df["longitude"]] + ).transpose() # pylint: disable=no-member + if "var_name" not in kwargs: + kwargs["var_name"] = "Input parameter with largest " + salib_si + if "title" not in kwargs: + kwargs["title"] = "" + if "figsize" not in kwargs: + kwargs["figsize"] = (8, 6) + if "cmap" not in kwargs: labels = np.unique(plot_val) - n=np.where(labels=='None')[0] - if len(n) > 0 : + n = np.where(labels == "None")[0] + if len(n) > 0: n = n[0] cmap = mpl.colors.ListedColormap( - cm.get_cmap(MAP_CMAP).colors[:len(labels)] - ) + cm.get_cmap(MAP_CMAP).colors[: len(labels)] + ) colors = list(cmap.colors) colors[n] = tuple(np.repeat(0.93, 3)) cmap.colors = tuple(colors) - kwargs['cmap'] = cmap - ax = u_plot.geo_scatter_categorical( - plot_val, coord, - **kwargs - ) + kwargs["cmap"] = cmap + ax = u_plot.geo_scatter_categorical(plot_val, coord, **kwargs) return ax - def to_hdf5(self, filename=None): """ Save output to .hdf5 @@ -1070,29 +1160,27 @@ def to_hdf5(self, filename=None): """ if filename is None: - filename = "unc_output" + dt.datetime.now().strftime( - "%Y-%m-%d-%H%M%S" - ) + filename = "unc_output" + dt.datetime.now().strftime("%Y-%m-%d-%H%M%S") filename = Path(DATA_DIR) / Path(filename) save_path = Path(filename) - save_path = save_path.with_suffix('.hdf5') + save_path = save_path.with_suffix(".hdf5") - LOGGER.info('Writing %s', save_path) - store = pd.HDFStore(save_path, mode='w') - for (var_name, var_val) in self.__dict__.items(): + LOGGER.info("Writing %s", save_path) + store = pd.HDFStore(save_path, mode="w") + for var_name, var_val in self.__dict__.items(): if isinstance(var_val, pd.DataFrame): - store.put(var_name, var_val, format='fixed', complevel=9) - store.get_storer('/samples_df').attrs.metadata = self.samples_df.attrs + store.put(var_name, var_val, format="fixed", complevel=9) + store.get_storer("/samples_df").attrs.metadata = self.samples_df.attrs store.close() str_dt = h5py.special_dtype(vlen=str) - with h5py.File(save_path, 'a') as fh: - if getattr(self, 'unit'): - fh['impact_unit'] = [self.unit] - if hasattr(self, 'sensitivity_method'): + with h5py.File(save_path, "a") as fh: + if getattr(self, "unit"): + fh["impact_unit"] = [self.unit] + if hasattr(self, "sensitivity_method"): if self.sensitivity_method: - fh['sensitivity_method'] = [self.sensitivity_method] - if hasattr(self, 'sensitivity_kwargs'): + fh["sensitivity_method"] = [self.sensitivity_method] + if hasattr(self, "sensitivity_kwargs"): if self.sensitivity_kwargs: grp = fh.create_group("sensitivity_kwargs") for key, value in dict(self.sensitivity_kwargs).items(): @@ -1115,41 +1203,50 @@ def from_hdf5(filename): unc_output: climada.engine.uncertainty.unc_output.UncOutput Uncertainty and sensitivity data loaded from .hdf5 file. """ - filename = Path(filename).with_suffix('.hdf5') + filename = Path(filename).with_suffix(".hdf5") if not filename.exists(): - LOGGER.info('File not found') + LOGGER.info("File not found") return None unc_data = UncOutput(pd.DataFrame()) - LOGGER.info('Reading %s', filename) - store = pd.HDFStore(filename, mode='r') + LOGGER.info("Reading %s", filename) + store = pd.HDFStore(filename, mode="r") for var_name in store.keys(): setattr(unc_data, var_name[1:], store.get(var_name)) - unc_data.samples_df.attrs = store.get_storer('/samples_df').attrs.metadata + unc_data.samples_df.attrs = store.get_storer("/samples_df").attrs.metadata store.close() - with h5py.File(filename, 'r') as fh: - if 'impact_unit' in list(fh.keys()): - unc_data.unit = fh.get('impact_unit')[0].decode('UTF-8') - if 'sensitivity_method' in list(fh.keys()): - unc_data.sensitivity_method = \ - fh.get('sensitivity_method')[0].decode('UTF-8') - if 'sensitivity_kwargs' in list(fh.keys()): + with h5py.File(filename, "r") as fh: + if "impact_unit" in list(fh.keys()): + unc_data.unit = fh.get("impact_unit")[0].decode("UTF-8") + if "sensitivity_method" in list(fh.keys()): + unc_data.sensitivity_method = fh.get("sensitivity_method")[0].decode( + "UTF-8" + ) + if "sensitivity_kwargs" in list(fh.keys()): grp = fh["sensitivity_kwargs"] sens_kwargs = { - key: u_hdf5.to_string(grp.get(key)[0]) - for key in grp.keys() - } + key: u_hdf5.to_string(grp.get(key)[0]) for key in grp.keys() + } unc_data.sensitivity_kwargs = tuple(sens_kwargs.items()) return unc_data class UncImpactOutput(UncOutput): """Extension of UncOutput specific for CalcImpact, returned by the - uncertainty() method. + uncertainty() method. """ - def __init__(self, samples_df, unit, aai_agg_unc_df, freq_curve_unc_df, - eai_exp_unc_df, at_event_unc_df, coord_df): + + def __init__( + self, + samples_df, + unit, + aai_agg_unc_df, + freq_curve_unc_df, + eai_exp_unc_df, + at_event_unc_df, + coord_df, + ): """Constructor Uncertainty output values from impact.calc for each sample @@ -1186,11 +1283,21 @@ def __init__(self, samples_df, unit, aai_agg_unc_df, freq_curve_unc_df, self.at_event_sens_df = None self.coord_df = coord_df + class UncDeltaImpactOutput(UncOutput): - """Extension of UncOutput specific for CalcDeltaImpact, returned by the uncertainty() method. - """ - def __init__(self, samples_df, unit, aai_agg_unc_df, freq_curve_unc_df, eai_exp_unc_df, - at_event_initial_unc_df, at_event_final_unc_df, coord_df): + """Extension of UncOutput specific for CalcDeltaImpact, returned by the uncertainty() method.""" + + def __init__( + self, + samples_df, + unit, + aai_agg_unc_df, + freq_curve_unc_df, + eai_exp_unc_df, + at_event_initial_unc_df, + at_event_final_unc_df, + coord_df, + ): """Constructor Uncertainty output values from impact.calc for each sample @@ -1234,11 +1341,19 @@ def __init__(self, samples_df, unit, aai_agg_unc_df, freq_curve_unc_df, eai_exp_ class UncCostBenefitOutput(UncOutput): - """Extension of UncOutput specific for CalcCostBenefit, returned by the uncertainty() method. - """ - def __init__(self, samples_df, unit, imp_meas_present_unc_df, imp_meas_future_unc_df, - tot_climate_risk_unc_df, benefit_unc_df, cost_ben_ratio_unc_df, - cost_benefit_kwargs): + """Extension of UncOutput specific for CalcCostBenefit, returned by the uncertainty() method.""" + + def __init__( + self, + samples_df, + unit, + imp_meas_present_unc_df, + imp_meas_future_unc_df, + tot_climate_risk_unc_df, + benefit_unc_df, + cost_ben_ratio_unc_df, + cost_benefit_kwargs, + ): """Constructor Uncertainty output values from cost_benefit.calc for each sample @@ -1270,9 +1385,9 @@ def __init__(self, samples_df, unit, imp_meas_present_unc_df, imp_meas_future_un """ super().__init__(samples_df, unit) - self.imp_meas_present_unc_df= imp_meas_present_unc_df + self.imp_meas_present_unc_df = imp_meas_present_unc_df self.imp_meas_present_sens_df = None - self.imp_meas_future_unc_df= imp_meas_future_unc_df + self.imp_meas_future_unc_df = imp_meas_future_unc_df self.imp_meas_future_sens_df = None self.tot_climate_risk_unc_df = tot_climate_risk_unc_df self.tot_climate_risk_sens_df = None diff --git a/climada/entity/__init__.py b/climada/entity/__init__.py index 985d78c2a..7b830c2b7 100755 --- a/climada/entity/__init__.py +++ b/climada/entity/__init__.py @@ -18,8 +18,9 @@ init entity """ + +from .disc_rates import * +from .entity_def import * from .exposures import * from .impact_funcs import * -from .disc_rates import * from .measures import * -from .entity_def import * diff --git a/climada/entity/disc_rates/__init__.py b/climada/entity/disc_rates/__init__.py index 744aaa982..2dd6148b9 100755 --- a/climada/entity/disc_rates/__init__.py +++ b/climada/entity/disc_rates/__init__.py @@ -18,4 +18,5 @@ init disc_rates """ + from .base import * diff --git a/climada/entity/disc_rates/base.py b/climada/entity/disc_rates/base.py index 0d51e7597..e18daaf91 100755 --- a/climada/entity/disc_rates/base.py +++ b/climada/entity/disc_rates/base.py @@ -19,15 +19,16 @@ Define DiscRates class. """ -__all__ = ['DiscRates'] +__all__ = ["DiscRates"] import copy -from array import array import logging +from array import array from typing import Optional + +import matplotlib.pyplot as plt import numpy as np import pandas as pd -import matplotlib.pyplot as plt import xlsxwriter import climada.util.checker as u_check @@ -37,22 +38,20 @@ LOGGER = logging.getLogger(__name__) """MATLAB variable names""" -DEF_VAR_MAT = {'sup_field_name': 'entity', - 'field_name': 'discount', - 'var_name': {'year': 'year', - 'disc': 'discount_rate' - } - } +DEF_VAR_MAT = { + "sup_field_name": "entity", + "field_name": "discount", + "var_name": {"year": "year", "disc": "discount_rate"}, +} """Excel variable names""" -DEF_VAR_EXCEL = {'sheet_name': 'discount', - 'col_name': {'year': 'year', - 'disc': 'discount_rate' - } - } +DEF_VAR_EXCEL = { + "sheet_name": "discount", + "col_name": {"year": "year", "disc": "discount_rate"}, +} -class DiscRates(): +class DiscRates: """ Defines discount rates and basic methods. Loads from files with format defined in FILE_EXT. @@ -66,10 +65,8 @@ class DiscRates(): """ def __init__( - self, - years : Optional[np.ndarray] = None, - rates : Optional[np.ndarray] = None - ): + self, years: Optional[np.ndarray] = None, rates: Optional[np.ndarray] = None + ): """ Fill discount rates with values and check consistency data @@ -100,7 +97,7 @@ def check(self): ------ ValueError """ - u_check.size(len(self.years), self.rates, 'DiscRates.rates') + u_check.size(len(self.years), self.rates, "DiscRates.rates") def select(self, year_range): """ @@ -116,12 +113,11 @@ def select(self, year_range): """ pos_year = np.isin(year_range, self.years) if not np.all(pos_year): - LOGGER.info('No discount rates for given years.') + LOGGER.info("No discount rates for given years.") return None pos_year = np.isin(self.years, year_range) - return DiscRates(years=self.years[pos_year], - rates=self.rates[pos_year]) + return DiscRates(years=self.years[pos_year], rates=self.rates[pos_year]) def append(self, disc_rates): """ @@ -142,8 +138,8 @@ def append(self, disc_rates): self.__dict__ = copy.deepcopy(disc_rates.__dict__) return - new_year = array('l') - new_rate = array('d') + new_year = array("l") + new_rate = array("d") for year, rate in zip(disc_rates.years, disc_rates.rates): found = np.where(year == self.years)[0] if found.size > 0: @@ -176,13 +172,14 @@ def net_present_value(self, ini_year, end_year, val_years): """ year_range = np.arange(ini_year, end_year + 1) if year_range.size != val_years.size: - raise ValueError('Wrong size of yearly values.') + raise ValueError("Wrong size of yearly values.") sel_disc = self.select(year_range) if sel_disc is None: - raise ValueError('No information of discount rates for provided years:' - f' {ini_year} - {end_year}') - return u_fin.net_present_value(sel_disc.years, sel_disc.rates, - val_years) + raise ValueError( + "No information of discount rates for provided years:" + f" {ini_year} - {end_year}" + ) + return u_fin.net_present_value(sel_disc.years, sel_disc.rates, val_years) def plot(self, axis=None, figsize=(6, 8), **kwargs): """ @@ -205,9 +202,9 @@ def plot(self, axis=None, figsize=(6, 8), **kwargs): if not axis: _, axis = plt.subplots(1, 1, figsize=figsize) - axis.set_title('Discount rates') - axis.set_xlabel('Year') - axis.set_ylabel('discount rate (%)') + axis.set_title("Discount rates") + axis.set_xlabel("Year") + axis.set_ylabel("discount rate (%)") axis.plot(self.years, self.rates * 100, **kwargs) axis.set_xlim((self.years.min(), self.years.max())) return axis @@ -244,15 +241,16 @@ def from_mat(cls, file_name, var_names=None): var_names = DEF_VAR_MAT disc = u_hdf5.read(file_name) try: - disc = disc[var_names['sup_field_name']] + disc = disc[var_names["sup_field_name"]] except KeyError: pass try: - disc = disc[var_names['field_name']] - years = np.squeeze(disc[var_names['var_name']['year']]). \ - astype(int, copy=False) - rates = np.squeeze(disc[var_names['var_name']['disc']]) + disc = disc[var_names["field_name"]] + years = np.squeeze(disc[var_names["var_name"]["year"]]).astype( + int, copy=False + ) + rates = np.squeeze(disc[var_names["var_name"]["disc"]]) except KeyError as err: raise KeyError("Not existing variable: %s" % str(err)) from err @@ -295,11 +293,10 @@ def from_excel(cls, file_name, var_names=None): """ if var_names is None: var_names = DEF_VAR_EXCEL - dfr = pd.read_excel(file_name, var_names['sheet_name']) + dfr = pd.read_excel(file_name, var_names["sheet_name"]) try: - years = dfr[var_names['col_name']['year']].values. \ - astype(int, copy=False) - rates = dfr[var_names['col_name']['disc']].values + years = dfr[var_names["col_name"]["year"]].values.astype(int, copy=False) + rates = dfr[var_names["col_name"]["disc"]].values except KeyError as err: raise KeyError("Not existing variable: %s" % str(err)) from err @@ -307,8 +304,10 @@ def from_excel(cls, file_name, var_names=None): def read_excel(self, *args, **kwargs): """This function is deprecated, use DiscRates.from_excel instead.""" - LOGGER.warning("The use of DiscRates.read_excel is deprecated." - "Use DiscRates.from_excel instead.") + LOGGER.warning( + "The use of DiscRates.read_excel is deprecated." + "Use DiscRates.from_excel instead." + ) self.__dict__ = DiscRates.from_excel(*args, **kwargs).__dict__ def write_excel(self, file_name, var_names=None): @@ -333,9 +332,9 @@ def write_excel(self, file_name, var_names=None): if var_names is None: var_names = DEF_VAR_EXCEL disc_wb = xlsxwriter.Workbook(file_name) - disc_ws = disc_wb.add_worksheet(var_names['sheet_name']) + disc_ws = disc_wb.add_worksheet(var_names["sheet_name"]) - header = [var_names['col_name']['year'], var_names['col_name']['disc']] + header = [var_names["col_name"]["year"], var_names["col_name"]["disc"]] for icol, head_dat in enumerate(header): disc_ws.write(0, icol, head_dat) for i_yr, (disc_yr, disc_rt) in enumerate(zip(self.years, self.rates), 1): diff --git a/climada/entity/disc_rates/test/test_base.py b/climada/entity/disc_rates/test/test_base.py index 2458a7546..7815a63ea 100644 --- a/climada/entity/disc_rates/test/test_base.py +++ b/climada/entity/disc_rates/test/test_base.py @@ -18,48 +18,51 @@ Test DiscRates class. """ -import unittest -import numpy as np + import copy +import unittest from pathlib import Path from tempfile import TemporaryDirectory +import numpy as np + from climada import CONFIG from climada.entity.disc_rates.base import DiscRates -from climada.util.constants import ENT_TEMPLATE_XLS, ENT_DEMO_TODAY +from climada.util.constants import ENT_DEMO_TODAY, ENT_TEMPLATE_XLS + +ENT_TEST_MAT = CONFIG.exposures.test_data.dir().joinpath("demo_today.mat") -ENT_TEST_MAT = CONFIG.exposures.test_data.dir().joinpath('demo_today.mat') class TestChecker(unittest.TestCase): """Test discount rates attributes checker""" def test_check_wrongRates_fail(self): """Wrong discount rates definition""" - disc_rate = DiscRates( - rates=np.array([3, 4]), - years=np.array([1]) - ) + disc_rate = DiscRates(rates=np.array([3, 4]), years=np.array([1])) with self.assertRaises(ValueError) as cm: disc_rate.check() - self.assertIn('Invalid DiscRates.rates size: 1 != 2.', str(cm.exception)) + self.assertIn("Invalid DiscRates.rates size: 1 != 2.", str(cm.exception)) + class TestConstructor(unittest.TestCase): """Test discount rates attributes.""" + def test_attributes_all(self): """All attributes are defined""" disc_rate = DiscRates() - self.assertTrue(hasattr(disc_rate, 'years')) - self.assertTrue(hasattr(disc_rate, 'rates')) + self.assertTrue(hasattr(disc_rate, "years")) + self.assertTrue(hasattr(disc_rate, "rates")) + class TestAppend(unittest.TestCase): """Check append function""" + def test_append_to_empty_same(self): """Append DiscRates to empty one.""" disc_rate = DiscRates() disc_rate_add = DiscRates( - years=np.array([2000, 2001, 2002]), - rates=np.array([0.1, 0.2, 0.3]) + years=np.array([2000, 2001, 2002]), rates=np.array([0.1, 0.2, 0.3]) ) disc_rate.append(disc_rate_add) @@ -88,34 +91,32 @@ def test_append_different_append(self): years are overwritten.""" disc_rate = DiscRates( - years=np.array([2000, 2001, 2002]), - rates=np.array([0.1, 0.2, 0.3]) + years=np.array([2000, 2001, 2002]), rates=np.array([0.1, 0.2, 0.3]) ) disc_rate_add = DiscRates( - years=np.array([2000, 2001, 2003]), - rates=np.array([0.11, 0.22, 0.33]) + years=np.array([2000, 2001, 2003]), rates=np.array([0.11, 0.22, 0.33]) ) disc_rate.append(disc_rate_add) disc_rate.check() - self.assertTrue(np.array_equal(disc_rate.years, - np.array([2000, 2001, 2002, 2003]))) - self.assertTrue(np.array_equal(disc_rate.rates, - np.array([0.11, 0.22, 0.3, 0.33]))) + self.assertTrue( + np.array_equal(disc_rate.years, np.array([2000, 2001, 2002, 2003])) + ) + self.assertTrue( + np.array_equal(disc_rate.rates, np.array([0.11, 0.22, 0.3, 0.33])) + ) class TestSelect(unittest.TestCase): """Test select method""" + def test_select_pass(self): """Test select right time range.""" - years=np.arange(2000, 2050) - rates=np.arange(years.size) - disc_rate = DiscRates( - years=years, - rates=rates - ) + years = np.arange(2000, 2050) + rates = np.arange(years.size) + disc_rate = DiscRates(years=years, rates=rates) year_range = np.arange(2010, 2020) sel_disc = disc_rate.select(year_range) @@ -125,33 +126,25 @@ def test_select_pass(self): def test_select_wrong_pass(self): """Test select wrong time range.""" - disc_rate = DiscRates( - years=np.arange(2000, 2050), - rates=np.arange(50) - ) + disc_rate = DiscRates(years=np.arange(2000, 2050), rates=np.arange(50)) year_range = np.arange(2050, 2060) self.assertEqual(None, disc_rate.select(year_range)) class TestNetPresValue(unittest.TestCase): """Test select method""" + def test_net_present_value_pass(self): """Test net_present_value right time range.""" - disc_rate = DiscRates( - years=np.arange(2000, 2050), - rates=np.ones(50) * 0.02 - ) + disc_rate = DiscRates(years=np.arange(2000, 2050), rates=np.ones(50) * 0.02) val_years = np.ones(23) * 6.512201157564418e9 res = disc_rate.net_present_value(2018, 2040, val_years) - self.assertEqual(res, 1.215049630691397e+11) + self.assertEqual(res, 1.215049630691397e11) def test_net_present_value_wrong_pass(self): """Test net_present_value wrong time range.""" - disc_rate = DiscRates( - years=np.arange(2000, 2050), - rates=np.arange(50) * 0.02 - ) + disc_rate = DiscRates(years=np.arange(2000, 2050), rates=np.arange(50) * 0.02) val_years = np.ones(11) * 6.512201157564418e9 with self.assertRaises(ValueError): disc_rate.net_present_value(2050, 2060, val_years) @@ -167,12 +160,12 @@ def test_demo_file_pass(self): # Check results n_rates = 51 - self.assertIn('int', str(disc_rate.years.dtype)) + self.assertIn("int", str(disc_rate.years.dtype)) self.assertEqual(disc_rate.years.shape, (n_rates,)) self.assertEqual(disc_rate.years[0], 2000) self.assertEqual(disc_rate.years[n_rates - 1], 2050) - self.assertIn('float', str(disc_rate.rates.dtype)) + self.assertIn("float", str(disc_rate.rates.dtype)) self.assertEqual(disc_rate.rates.shape, (n_rates,)) self.assertEqual(disc_rate.rates.min(), 0.02) self.assertEqual(disc_rate.rates.max(), 0.02) @@ -184,12 +177,12 @@ def test_template_file_pass(self): # Check results n_rates = 102 - self.assertIn('int', str(disc_rate.years.dtype)) + self.assertIn("int", str(disc_rate.years.dtype)) self.assertEqual(disc_rate.years.shape, (n_rates,)) self.assertEqual(disc_rate.years[0], 2000) self.assertEqual(disc_rate.years[n_rates - 1], 2101) - self.assertIn('float', str(disc_rate.rates.dtype)) + self.assertIn("float", str(disc_rate.rates.dtype)) self.assertEqual(disc_rate.rates.shape, (n_rates,)) self.assertEqual(disc_rate.rates.min(), 0.02) self.assertEqual(disc_rate.rates.max(), 0.02) @@ -207,12 +200,12 @@ def test_demo_file_pass(self): # Check results n_rates = 51 - self.assertIn('int', str(disc_rate.years.dtype)) + self.assertIn("int", str(disc_rate.years.dtype)) self.assertEqual(len(disc_rate.years), n_rates) self.assertEqual(disc_rate.years[0], 2000) self.assertEqual(disc_rate.years[n_rates - 1], 2050) - self.assertIn('float', str(disc_rate.rates.dtype)) + self.assertIn("float", str(disc_rate.rates.dtype)) self.assertEqual(len(disc_rate.rates), n_rates) self.assertEqual(disc_rate.rates.min(), 0.02) self.assertEqual(disc_rate.rates.max(), 0.02) @@ -236,7 +229,7 @@ def test_write_read_excel_pass(self): rates = np.ones(years.size) * 0.03 disc_rate = DiscRates(years=years, rates=rates) - file_name = self.tempdir.joinpath('test_disc.xlsx') + file_name = self.tempdir.joinpath("test_disc.xlsx") disc_rate.write_excel(file_name) disc_read = DiscRates.from_excel(file_name) @@ -250,7 +243,7 @@ def test_write_read_csv_pass(self): rates = np.ones(years.size) * 0.03 disc_rate = DiscRates(years=years, rates=rates) - file_name = self.tempdir.joinpath('test_disc.csv') + file_name = self.tempdir.joinpath("test_disc.csv") disc_rate.write_csv(file_name) disc_read = DiscRates.from_csv(file_name) diff --git a/climada/entity/entity_def.py b/climada/entity/entity_def.py index 542ca2992..d58af9efe 100755 --- a/climada/entity/entity_def.py +++ b/climada/entity/entity_def.py @@ -19,19 +19,21 @@ Define Entity Class. """ -__all__ = ['Entity'] +__all__ = ["Entity"] import logging from typing import Optional + import pandas as pd -from climada.entity.impact_funcs.impact_func_set import ImpactFuncSet from climada.entity.disc_rates.base import DiscRates -from climada.entity.measures.measure_set import MeasureSet from climada.entity.exposures.base import Exposures +from climada.entity.impact_funcs.impact_func_set import ImpactFuncSet +from climada.entity.measures.measure_set import MeasureSet LOGGER = logging.getLogger(__name__) + class Entity: """Collects exposures, impact functions, measures and discount rates. Default values set when empty constructor. @@ -55,7 +57,7 @@ def __init__( exposures: Optional[Exposures] = None, disc_rates: Optional[DiscRates] = None, impact_func_set: Optional[ImpactFuncSet] = None, - measure_set: Optional[MeasureSet] = None + measure_set: Optional[MeasureSet] = None, ): """ Initialize entity @@ -73,7 +75,9 @@ def __init__( """ self.exposures = Exposures() if exposures is None else exposures self.disc_rates = DiscRates() if disc_rates is None else disc_rates - self.impact_funcs = ImpactFuncSet() if impact_func_set is None else impact_func_set + self.impact_funcs = ( + ImpactFuncSet() if impact_func_set is None else impact_func_set + ) self.measures = MeasureSet() if measure_set is None else measure_set @classmethod @@ -100,8 +104,9 @@ def from_mat(cls, file_name): def read_mat(self, *args, **kwargs): """This function is deprecated, use Entity.from_mat instead.""" - LOGGER.warning("The use of Entity.read_mat is deprecated." - "Use Entity.from_mat instead.") + LOGGER.warning( + "The use of Entity.read_mat is deprecated." "Use Entity.from_mat instead." + ) self.__dict__ = Entity.from_mat(*args, **kwargs).__dict__ @classmethod @@ -138,8 +143,10 @@ def from_excel(cls, file_name): def read_excel(self, *args, **kwargs): """This function is deprecated, use Entity.from_excel instead.""" - LOGGER.warning("The use of Entity.read_excel is deprecated." - " Use Entity.from_excel instead.") + LOGGER.warning( + "The use of Entity.read_excel is deprecated." + " Use Entity.from_excel instead." + ) self.__dict__ = Entity.from_excel(*args, **kwargs).__dict__ def write_excel(self, file_name): diff --git a/climada/entity/exposures/__init__.py b/climada/entity/exposures/__init__.py index 7e78173ec..509d0f00d 100755 --- a/climada/entity/exposures/__init__.py +++ b/climada/entity/exposures/__init__.py @@ -18,6 +18,6 @@ init exposures """ + from .base import * from .litpop import * - diff --git a/climada/entity/exposures/base.py b/climada/entity/exposures/base.py index 645127f7c..5087a237f 100644 --- a/climada/entity/exposures/base.py +++ b/climada/entity/exposures/base.py @@ -19,65 +19,68 @@ Define Exposures class. """ -__all__ = ['Exposures', 'add_sea', 'INDICATOR_IMPF', 'INDICATOR_CENTR'] +__all__ = ["Exposures", "add_sea", "INDICATOR_IMPF", "INDICATOR_CENTR"] -import logging import copy -from pathlib import Path +import logging import warnings +from pathlib import Path +import cartopy.crs as ccrs +import contextily as ctx +import matplotlib.pyplot as plt import numpy as np import pandas as pd -import matplotlib.pyplot as plt -from mpl_toolkits.axes_grid1 import make_axes_locatable -from geopandas import GeoDataFrame import rasterio +from geopandas import GeoDataFrame +from mpl_toolkits.axes_grid1 import make_axes_locatable from rasterio.warp import Resampling -import contextily as ctx -import cartopy.crs as ccrs -from climada.hazard import Hazard -import climada.util.hdf5_handler as u_hdf5 -from climada.util.constants import ONE_LAT_KM, DEF_CRS, CMAP_RASTER import climada.util.coordinates as u_coord +import climada.util.hdf5_handler as u_hdf5 import climada.util.plot as u_plot from climada import CONFIG +from climada.hazard import Hazard +from climada.util.constants import CMAP_RASTER, DEF_CRS, ONE_LAT_KM LOGGER = logging.getLogger(__name__) -INDICATOR_IMPF_OLD = 'if_' +INDICATOR_IMPF_OLD = "if_" """Previously used name of the column containing the impact functions id of specified hazard""" -INDICATOR_IMPF = 'impf_' +INDICATOR_IMPF = "impf_" """Name of the column containing the impact functions id of specified hazard""" -INDICATOR_CENTR = 'centr_' +INDICATOR_CENTR = "centr_" """Name of the column containing the centroids id of specified hazard""" DEF_REF_YEAR = CONFIG.exposures.def_ref_year.int() """Default reference year""" -DEF_VALUE_UNIT = 'USD' +DEF_VALUE_UNIT = "USD" """Default value unit""" -DEF_VAR_MAT = {'sup_field_name': 'entity', - 'field_name': 'assets', - 'var_name': {'lat': 'lat', - 'lon': 'lon', - 'val': 'Value', - 'ded': 'Deductible', - 'cov': 'Cover', - 'impf': 'DamageFunID', - 'cat': 'Category_ID', - 'reg': 'Region_ID', - 'uni': 'Value_unit', - 'ass': 'centroid_index', - 'ref': 'reference_year' - } - } +DEF_VAR_MAT = { + "sup_field_name": "entity", + "field_name": "assets", + "var_name": { + "lat": "lat", + "lon": "lon", + "val": "Value", + "ded": "Deductible", + "cov": "Cover", + "impf": "DamageFunID", + "cat": "Category_ID", + "reg": "Region_ID", + "uni": "Value_unit", + "ass": "centroid_index", + "ref": "reference_year", + }, +} """MATLAB variable names""" -class Exposures(): + +class Exposures: """geopandas GeoDataFrame with metada and columns (pd.Series) defined in Attributes. @@ -120,29 +123,44 @@ class Exposures(): TC. There might be different hazards defined: centr_TC, centr_FL, ... Computed in method assign_centroids(). """ - _metadata = ['description', 'ref_year', 'value_unit', 'meta'] - vars_oblig = ['value', 'latitude', 'longitude'] + _metadata = ["description", "ref_year", "value_unit", "meta"] + + vars_oblig = ["value", "latitude", "longitude"] """Name of the variables needed to compute the impact.""" vars_def = [INDICATOR_IMPF, INDICATOR_IMPF_OLD] """Name of variables that can be computed.""" - vars_opt = [INDICATOR_CENTR, 'deductible', 'cover', 'category_id', - 'region_id', 'geometry'] + vars_opt = [ + INDICATOR_CENTR, + "deductible", + "cover", + "category_id", + "region_id", + "geometry", + ] """Name of the variables that aren't need to compute the impact.""" @property def crs(self): """Coordinate Reference System, refers to the crs attribute of the inherent GeoDataFrame""" try: - return self.gdf.geometry.crs or self.meta.get('crs') + return self.gdf.geometry.crs or self.meta.get("crs") except AttributeError: # i.e., no geometry, crs is assumed to be a property # In case of gdf without geometry, empty or before set_geometry_points was called - return self.meta.get('crs') + return self.meta.get("crs") - def __init__(self, *args, meta=None, description=None, ref_year=DEF_REF_YEAR, - value_unit=DEF_VALUE_UNIT, crs=None, **kwargs): + def __init__( + self, + *args, + meta=None, + description=None, + ref_year=DEF_REF_YEAR, + value_unit=DEF_VALUE_UNIT, + crs=None, + **kwargs, + ): """Creates an Exposures object from a GeoDataFrame Parameters @@ -167,10 +185,17 @@ def __init__(self, *args, meta=None, description=None, ref_year=DEF_REF_YEAR, self.meta = {} if meta is None else meta if not isinstance(self.meta, dict): raise ValueError("meta must be a dictionary") - self.description = self.meta.get('description') if description is None else description - self.ref_year = self.meta.get('ref_year', DEF_REF_YEAR) if ref_year is None else ref_year - self.value_unit = (self.meta.get('value_unit', DEF_VALUE_UNIT) - if value_unit is None else value_unit) + self.description = ( + self.meta.get("description") if description is None else description + ) + self.ref_year = ( + self.meta.get("ref_year", DEF_REF_YEAR) if ref_year is None else ref_year + ) + self.value_unit = ( + self.meta.get("value_unit", DEF_VALUE_UNIT) + if value_unit is None + else value_unit + ) # remaining generic attributes from derived classes for mda in type(self)._metadata: @@ -183,7 +208,7 @@ def __init__(self, *args, meta=None, description=None, ref_year=DEF_REF_YEAR, setattr(self, mda, None) # crs (property) and geometry - data = args[0] if args else kwargs.get('data', {}) + data = args[0] if args else kwargs.get("data", {}) try: data_crs = data.geometry.crs except AttributeError: @@ -191,34 +216,48 @@ def __init__(self, *args, meta=None, description=None, ref_year=DEF_REF_YEAR, if data_crs and data.crs and not u_coord.equal_crs(data_crs, data.crs): raise ValueError("Inconsistent crs definition in data and data.geometry") - crs = (crs if crs is not None - else self.meta['crs'] if 'crs' in self.meta - else data_crs if data_crs - else None) - if 'crs' in self.meta and not u_coord.equal_crs(self.meta['crs'], crs): - raise ValueError("Inconsistent CRS definition, crs and meta arguments don't match") + crs = ( + crs + if crs is not None + else ( + self.meta["crs"] + if "crs" in self.meta + else data_crs if data_crs else None + ) + ) + if "crs" in self.meta and not u_coord.equal_crs(self.meta["crs"], crs): + raise ValueError( + "Inconsistent CRS definition, crs and meta arguments don't match" + ) if data_crs and not u_coord.equal_crs(data_crs, crs): - raise ValueError("Inconsistent CRS definition, data doesn't match meta or crs argument") + raise ValueError( + "Inconsistent CRS definition, data doesn't match meta or crs argument" + ) if not crs: crs = DEF_CRS - geometry = kwargs.get('geometry') + geometry = kwargs.get("geometry") if geometry and isinstance(geometry, str): - raise ValueError("Exposures is not able to handle customized 'geometry' column names.") + raise ValueError( + "Exposures is not able to handle customized 'geometry' column names." + ) # make the data frame self.set_gdf(GeoDataFrame(*args, **kwargs), crs=crs) def __str__(self): - return '\n'.join( - [f"{md}: {self.__dict__[md]}" for md in type(self)._metadata] + - [f"crs: {self.crs}", "data:", str(self.gdf)] + return "\n".join( + [f"{md}: {self.__dict__[md]}" for md in type(self)._metadata] + + [f"crs: {self.crs}", "data:", str(self.gdf)] ) def _access_item(self, *args): - raise TypeError("Since CLIMADA 2.0, Exposures objects are not subscriptable. Data " - "fields of Exposures objects are accessed using the `gdf` attribute. " - "For example, `expo['value']` is replaced by `expo.gdf['value']`.") + raise TypeError( + "Since CLIMADA 2.0, Exposures objects are not subscriptable. Data " + "fields of Exposures objects are accessed using the `gdf` attribute. " + "For example, `expo['value']` is replaced by `expo.gdf['value']`." + ) + __getitem__ = _access_item __setitem__ = _access_item __delitem__ = _access_item @@ -236,7 +275,9 @@ def check(self): raise ValueError(f"{var} missing in gdf") # computable columns except impf_* - for var in sorted(set(self.vars_def).difference([INDICATOR_IMPF, INDICATOR_IMPF_OLD])): + for var in sorted( + set(self.vars_def).difference([INDICATOR_IMPF, INDICATOR_IMPF_OLD]) + ): if not var in self.gdf.columns: LOGGER.info("%s not set.", var) @@ -248,9 +289,10 @@ def check(self): default_impf_present = True if not default_impf_present and not [ - col for col in self.gdf.columns - if col.startswith(INDICATOR_IMPF) or col.startswith(INDICATOR_IMPF_OLD) - ]: + col + for col in self.gdf.columns + if col.startswith(INDICATOR_IMPF) or col.startswith(INDICATOR_IMPF_OLD) + ]: LOGGER.info("Setting %s to default impact functions ids 1.", INDICATOR_IMPF) self.gdf[INDICATOR_IMPF] = 1 @@ -267,16 +309,22 @@ def check(self): LOGGER.info("%s not set.", INDICATOR_CENTR) # check if CRS is consistent - if self.crs != self.meta.get('crs'): - raise ValueError(f"Inconsistent CRS definition, gdf ({self.crs}) attribute doesn't " - f"match meta ({self.meta.get('crs')}) attribute.") + if self.crs != self.meta.get("crs"): + raise ValueError( + f"Inconsistent CRS definition, gdf ({self.crs}) attribute doesn't " + f"match meta ({self.meta.get('crs')}) attribute." + ) # check whether geometry corresponds to lat/lon try: - if (self.gdf.geometry.values[0].x != self.gdf['longitude'].values[0] or - self.gdf.geometry.values[0].y != self.gdf['latitude'].values[0]): - raise ValueError("Geometry values do not correspond to latitude and" + - " longitude. Use set_geometry_points() or set_lat_lon().") + if ( + self.gdf.geometry.values[0].x != self.gdf["longitude"].values[0] + or self.gdf.geometry.values[0].y != self.gdf["latitude"].values[0] + ): + raise ValueError( + "Geometry values do not correspond to latitude and" + + " longitude. Use set_geometry_points() or set_lat_lon()." + ) except AttributeError: # no geometry column pass @@ -291,21 +339,21 @@ def set_crs(self, crs=None): if the original value is None it will be set to the default CRS. """ # clear the meta dictionary entry - if 'crs' in self.meta: - old_crs = self.meta.pop('crs') + if "crs" in self.meta: + old_crs = self.meta.pop("crs") crs = crs if crs else self.crs if self.crs else DEF_CRS # adjust the dataframe - if 'geometry' in self.gdf.columns: + if "geometry" in self.gdf.columns: try: self.gdf.set_crs(crs, inplace=True) except ValueError: # restore popped crs and leave - self.meta['crs'] = old_crs + self.meta["crs"] = old_crs raise # store the value - self.meta['crs'] = crs + self.meta["crs"] = crs - def set_gdf(self, gdf:GeoDataFrame, crs=None): + def set_gdf(self, gdf: GeoDataFrame, crs=None): """Set the `gdf` GeoDataFrame and update the CRS Parameters @@ -323,7 +371,7 @@ def set_gdf(self, gdf:GeoDataFrame, crs=None): # update the coordinate reference system self.set_crs(crs) - def get_impf_column(self, haz_type=''): + def get_impf_column(self, haz_type=""): """Find the best matching column name in the exposures dataframe for a given hazard type, Parameters @@ -350,24 +398,37 @@ def get_impf_column(self, haz_type=''): if INDICATOR_IMPF + haz_type in self.gdf.columns: return INDICATOR_IMPF + haz_type if INDICATOR_IMPF_OLD + haz_type in self.gdf.columns: - LOGGER.info("Impact function column name 'if_%s' is not according to current" - " naming conventions. It's suggested to use 'impf_%s' instead.", - haz_type, haz_type) + LOGGER.info( + "Impact function column name 'if_%s' is not according to current" + " naming conventions. It's suggested to use 'impf_%s' instead.", + haz_type, + haz_type, + ) return INDICATOR_IMPF_OLD + haz_type if INDICATOR_IMPF in self.gdf.columns: - LOGGER.info("No specific impact function column found for hazard %s." - " Using the anonymous 'impf_' column.", haz_type) + LOGGER.info( + "No specific impact function column found for hazard %s." + " Using the anonymous 'impf_' column.", + haz_type, + ) return INDICATOR_IMPF if INDICATOR_IMPF_OLD in self.gdf.columns: - LOGGER.info("No specific impact function column found for hazard %s. Using the" - " anonymous 'if_' column, which is not according to current naming" - " conventions. It's suggested to use 'impf_' instead.", haz_type) + LOGGER.info( + "No specific impact function column found for hazard %s. Using the" + " anonymous 'if_' column, which is not according to current naming" + " conventions. It's suggested to use 'impf_' instead.", + haz_type, + ) return INDICATOR_IMPF_OLD raise ValueError(f"Missing exposures impact functions {INDICATOR_IMPF}.") - def assign_centroids(self, hazard, distance='euclidean', - threshold=u_coord.NEAREST_NEIGHBOR_THRESHOLD, - overwrite=True): + def assign_centroids( + self, + hazard, + distance="euclidean", + threshold=u_coord.NEAREST_NEIGHBOR_THRESHOLD, + overwrite=True, + ): """Assign for each exposure coordinate closest hazard coordinate. The Exposures ``gdf`` will be altered by this method. It will have an additional (or modified) column named ``centr_[hazard.HAZ_TYPE]`` after the call. @@ -420,25 +481,28 @@ def assign_centroids(self, hazard, distance='euclidean', haz_type = hazard.haz_type centr_haz = INDICATOR_CENTR + haz_type if centr_haz in self.gdf: - LOGGER.info('Exposures matching centroids already found for %s', haz_type) + LOGGER.info("Exposures matching centroids already found for %s", haz_type) if overwrite: - LOGGER.info('Existing centroids will be overwritten for %s', haz_type) + LOGGER.info("Existing centroids will be overwritten for %s", haz_type) else: return - LOGGER.info('Matching %s exposures with %s centroids.', - str(self.gdf.shape[0]), str(hazard.centroids.size)) + LOGGER.info( + "Matching %s exposures with %s centroids.", + str(self.gdf.shape[0]), + str(hazard.centroids.size), + ) if not u_coord.equal_crs(self.crs, hazard.centroids.crs): - raise ValueError('Set hazard and exposure to same CRS first!') + raise ValueError("Set hazard and exposure to same CRS first!") # Note: equal_crs is tested here, rather than within match_centroids(), # because exp.gdf.crs may not be defined, but exp.crs must be defined. - assigned_centr = u_coord.match_centroids(self.gdf, hazard.centroids, - distance=distance, threshold=threshold) + assigned_centr = u_coord.match_centroids( + self.gdf, hazard.centroids, distance=distance, threshold=threshold + ) self.gdf[centr_haz] = assigned_centr - def set_geometry_points(self, scheduler=None): """Set geometry attribute of GeoDataFrame with Points from latitude and longitude attributes. @@ -453,20 +517,32 @@ def set_geometry_points(self, scheduler=None): def set_lat_lon(self): """Set latitude and longitude attributes from geometry attribute.""" - LOGGER.info('Setting latitude and longitude attributes.') - self.gdf['latitude'] = self.gdf.geometry[:].y - self.gdf['longitude'] = self.gdf.geometry[:].x + LOGGER.info("Setting latitude and longitude attributes.") + self.gdf["latitude"] = self.gdf.geometry[:].y + self.gdf["longitude"] = self.gdf.geometry[:].x def set_from_raster(self, *args, **kwargs): """This function is deprecated, use Exposures.from_raster instead.""" - LOGGER.warning("The use of Exposures.set_from_raster is deprecated." - "Use Exposures.from_raster instead.") + LOGGER.warning( + "The use of Exposures.set_from_raster is deprecated." + "Use Exposures.from_raster instead." + ) self.__dict__ = Exposures.from_raster(*args, **kwargs).__dict__ @classmethod - def from_raster(cls, file_name, band=1, src_crs=None, window=None, - geometry=None, dst_crs=None, transform=None, - width=None, height=None, resampling=Resampling.nearest): + def from_raster( + cls, + file_name, + band=1, + src_crs=None, + window=None, + geometry=None, + dst_crs=None, + transform=None, + width=None, + height=None, + resampling=Resampling.nearest, + ): """Read raster data and set latitude, longitude, value and meta Parameters @@ -498,28 +574,47 @@ def from_raster(cls, file_name, band=1, src_crs=None, window=None, -------- Exposures """ - meta, value = u_coord.read_raster(file_name, [band], src_crs, window, - geometry, dst_crs, transform, width, - height, resampling) - ulx, xres, _, uly, _, yres = meta['transform'].to_gdal() - lrx = ulx + meta['width'] * xres - lry = uly + meta['height'] * yres - x_grid, y_grid = np.meshgrid(np.arange(ulx + xres / 2, lrx, xres), - np.arange(uly + yres / 2, lry, yres)) + meta, value = u_coord.read_raster( + file_name, + [band], + src_crs, + window, + geometry, + dst_crs, + transform, + width, + height, + resampling, + ) + ulx, xres, _, uly, _, yres = meta["transform"].to_gdal() + lrx = ulx + meta["width"] * xres + lry = uly + meta["height"] * yres + x_grid, y_grid = np.meshgrid( + np.arange(ulx + xres / 2, lrx, xres), np.arange(uly + yres / 2, lry, yres) + ) return cls( { - 'longitude': x_grid.flatten(), - 'latitude': y_grid.flatten(), - 'value': value.reshape(-1), + "longitude": x_grid.flatten(), + "latitude": y_grid.flatten(), + "value": value.reshape(-1), }, meta=meta, - crs=meta['crs'], + crs=meta["crs"], ) - - def plot_scatter(self, mask=None, ignore_zero=False, pop_name=True, - buffer=0.0, extend='neither', axis=None, figsize=(9, 13), - adapt_fontsize=True, title=None, **kwargs): + def plot_scatter( + self, + mask=None, + ignore_zero=False, + pop_name=True, + buffer=0.0, + extend="neither", + axis=None, + figsize=(9, 13), + adapt_fontsize=True, + title=None, + **kwargs, + ): """Plot exposures geometry's value sum scattered over Earth's map. The plot will we projected according to the current crs. @@ -560,28 +655,45 @@ def plot_scatter(self, mask=None, ignore_zero=False, pop_name=True, if mask is None: mask = np.ones((self.gdf.shape[0],), dtype=bool) if ignore_zero: - pos_vals = self.gdf['value'][mask].values > 0 + pos_vals = self.gdf["value"][mask].values > 0 else: - pos_vals = np.ones((self.gdf['value'][mask].values.size,), dtype=bool) - value = self.gdf['value'][mask][pos_vals].values - coord = np.stack([self.gdf['latitude'][mask][pos_vals].values, - self.gdf['longitude'][mask][pos_vals].values], axis=1) - return u_plot.geo_scatter_from_array(array_sub=value, - geo_coord=coord, - var_name=f'Value ({self.value_unit})', - title=title, - pop_name=pop_name, - buffer=buffer, - extend=extend, - proj=crs_epsg, - axes=axis, - figsize=figsize, - adapt_fontsize=adapt_fontsize, - **kwargs) - - def plot_hexbin(self, mask=None, ignore_zero=False, pop_name=True, - buffer=0.0, extend='neither', axis=None, figsize=(9, 13), - adapt_fontsize=True, title=None, **kwargs): + pos_vals = np.ones((self.gdf["value"][mask].values.size,), dtype=bool) + value = self.gdf["value"][mask][pos_vals].values + coord = np.stack( + [ + self.gdf["latitude"][mask][pos_vals].values, + self.gdf["longitude"][mask][pos_vals].values, + ], + axis=1, + ) + return u_plot.geo_scatter_from_array( + array_sub=value, + geo_coord=coord, + var_name=f"Value ({self.value_unit})", + title=title, + pop_name=pop_name, + buffer=buffer, + extend=extend, + proj=crs_epsg, + axes=axis, + figsize=figsize, + adapt_fontsize=adapt_fontsize, + **kwargs, + ) + + def plot_hexbin( + self, + mask=None, + ignore_zero=False, + pop_name=True, + buffer=0.0, + extend="neither", + axis=None, + figsize=(9, 13), + adapt_fontsize=True, + title=None, + **kwargs, + ): """Plot exposures geometry's value sum binned over Earth's map. An other function for the bins can be set through the key reduce_C_function. The plot will we projected according to the current crs. @@ -624,34 +736,51 @@ def plot_hexbin(self, mask=None, ignore_zero=False, pop_name=True, crs_epsg, _ = u_plot.get_transformation(self.crs) if title is None: title = self.description or "" - if 'reduce_C_function' not in kwargs: - kwargs['reduce_C_function'] = np.sum + if "reduce_C_function" not in kwargs: + kwargs["reduce_C_function"] = np.sum if mask is None: mask = np.ones((self.gdf.shape[0],), dtype=bool) if ignore_zero: - pos_vals = self.gdf['value'][mask].values > 0 + pos_vals = self.gdf["value"][mask].values > 0 else: - pos_vals = np.ones((self.gdf['value'][mask].values.size,), dtype=bool) - value = self.gdf['value'][mask][pos_vals].values - coord = np.stack([self.gdf['latitude'][mask][pos_vals].values, - self.gdf['longitude'][mask][pos_vals].values], axis=1) - return u_plot.geo_bin_from_array(array_sub=value, - geo_coord=coord, - var_name=f'Value ({self.value_unit})', - title=title, - pop_name=pop_name, - buffer=buffer, - extend=extend, - proj=crs_epsg, - axes=axis, - figsize=figsize, - adapt_fontsize=adapt_fontsize, - **kwargs) - - def plot_raster(self, res=None, raster_res=None, save_tiff=None, - raster_f=lambda x: np.log10((np.fmax(x + 1, 1))), - label='value (log10)', scheduler=None, axis=None, - figsize=(9, 13), fill=True, adapt_fontsize=True, **kwargs): + pos_vals = np.ones((self.gdf["value"][mask].values.size,), dtype=bool) + value = self.gdf["value"][mask][pos_vals].values + coord = np.stack( + [ + self.gdf["latitude"][mask][pos_vals].values, + self.gdf["longitude"][mask][pos_vals].values, + ], + axis=1, + ) + return u_plot.geo_bin_from_array( + array_sub=value, + geo_coord=coord, + var_name=f"Value ({self.value_unit})", + title=title, + pop_name=pop_name, + buffer=buffer, + extend=extend, + proj=crs_epsg, + axes=axis, + figsize=figsize, + adapt_fontsize=adapt_fontsize, + **kwargs, + ) + + def plot_raster( + self, + res=None, + raster_res=None, + save_tiff=None, + raster_f=lambda x: np.log10((np.fmax(x + 1, 1))), + label="value (log10)", + scheduler=None, + axis=None, + figsize=(9, 13), + fill=True, + adapt_fontsize=True, + **kwargs, + ): """Generate raster from points geometry and plot it using log10 scale `np.log10((np.fmax(raster+1, 1)))`. @@ -691,23 +820,35 @@ def plot_raster(self, res=None, raster_res=None, save_tiff=None, ------- matplotlib.figure.Figure, cartopy.mpl.geoaxes.GeoAxesSubplot """ - if self.meta and self.meta.get('height', 0) * self.meta.get('height', 0) == len(self.gdf): - raster = self.gdf['value'].values.reshape((self.meta['height'], - self.meta['width'])) + if self.meta and self.meta.get("height", 0) * self.meta.get("height", 0) == len( + self.gdf + ): + raster = self.gdf["value"].values.reshape( + (self.meta["height"], self.meta["width"]) + ) # check raster starts by upper left corner - if self.gdf['latitude'].values[0] < self.gdf['latitude'].values[-1]: + if self.gdf["latitude"].values[0] < self.gdf["latitude"].values[-1]: raster = np.flip(raster, axis=0) - if self.gdf['longitude'].values[0] > self.gdf['longitude'].values[-1]: - raise ValueError('Points are not ordered according to meta raster.') + if self.gdf["longitude"].values[0] > self.gdf["longitude"].values[-1]: + raise ValueError("Points are not ordered according to meta raster.") else: - raster, meta = u_coord.points_to_raster(self.gdf, ['value'], res, raster_res, scheduler) - raster = raster.reshape((meta['height'], meta['width'])) + raster, meta = u_coord.points_to_raster( + self.gdf, ["value"], res, raster_res, scheduler + ) + raster = raster.reshape((meta["height"], meta["width"])) # save tiff if save_tiff is not None: - with rasterio.open(save_tiff, 'w', driver='GTiff', - height=meta['height'], width=meta['width'], count=1, - dtype=np.float32, crs=self.crs, transform=meta['transform'] - ) as ras_tiff: + with rasterio.open( + save_tiff, + "w", + driver="GTiff", + height=meta["height"], + width=meta["width"], + count=1, + dtype=np.float32, + crs=self.crs, + transform=meta["transform"], + ) as ras_tiff: ras_tiff.write(raster.astype(np.float32), 1) # make plot proj_data, _ = u_plot.get_transformation(self.crs) @@ -715,28 +856,40 @@ def plot_raster(self, res=None, raster_res=None, save_tiff=None, if isinstance(proj_data, ccrs.PlateCarree): # use different projections for plot and data to shift the central lon in the plot xmin, ymin, xmax, ymax = u_coord.latlon_bounds( - self.gdf['latitude'].values, self.gdf['longitude'].values) + self.gdf["latitude"].values, self.gdf["longitude"].values + ) proj_plot = ccrs.PlateCarree(central_longitude=0.5 * (xmin + xmax)) else: - xmin, ymin, xmax, ymax = (self.gdf['longitude'].min(), self.gdf['latitude'].min(), - self.gdf['longitude'].max(), self.gdf['latitude'].max()) + xmin, ymin, xmax, ymax = ( + self.gdf["longitude"].min(), + self.gdf["latitude"].min(), + self.gdf["longitude"].max(), + self.gdf["latitude"].max(), + ) if not axis: - _, axis, fontsize = u_plot.make_map(proj=proj_plot, figsize=figsize, - adapt_fontsize=adapt_fontsize) + _, axis, fontsize = u_plot.make_map( + proj=proj_plot, figsize=figsize, adapt_fontsize=adapt_fontsize + ) else: fontsize = None - cbar_ax = make_axes_locatable(axis).append_axes('right', size="6.5%", - pad=0.1, axes_class=plt.Axes) + cbar_ax = make_axes_locatable(axis).append_axes( + "right", size="6.5%", pad=0.1, axes_class=plt.Axes + ) axis.set_extent((xmin, xmax, ymin, ymax), crs=proj_data) u_plot.add_shapes(axis) if not fill: raster = np.where(raster == 0, np.nan, raster) raster_f = lambda x: np.log10((np.maximum(x + 1, 1))) - if 'cmap' not in kwargs: - kwargs['cmap'] = CMAP_RASTER - imag = axis.imshow(raster_f(raster), **kwargs, origin='upper', - extent=(xmin, xmax, ymin, ymax), transform=proj_data) + if "cmap" not in kwargs: + kwargs["cmap"] = CMAP_RASTER + imag = axis.imshow( + raster_f(raster), + **kwargs, + origin="upper", + extent=(xmin, xmax, ymin, ymax), + transform=proj_data, + ) cbar = plt.colorbar(imag, cax=cbar_ax, label=label) plt.colorbar(imag, cax=cbar_ax, label=label) plt.tight_layout() @@ -748,9 +901,18 @@ def plot_raster(self, res=None, raster_res=None, save_tiff=None, item.set_fontsize(fontsize) return axis - def plot_basemap(self, mask=None, ignore_zero=False, pop_name=True, - buffer=0.0, extend='neither', zoom=10, - url=ctx.providers.CartoDB.Positron, axis=None, **kwargs): + def plot_basemap( + self, + mask=None, + ignore_zero=False, + pop_name=True, + buffer=0.0, + extend="neither", + zoom=10, + url=ctx.providers.CartoDB.Positron, + axis=None, + **kwargs, + ): """Scatter points over satellite image using contextily Parameters @@ -783,13 +945,21 @@ def plot_basemap(self, mask=None, ignore_zero=False, pop_name=True, ------- matplotlib.figure.Figure, cartopy.mpl.geoaxes.GeoAxesSubplot """ - if 'geometry' not in self.gdf: + if "geometry" not in self.gdf: self.set_geometry_points() crs_ori = self.crs self.to_crs(epsg=3857, inplace=True) - axis = self.plot_scatter(mask, ignore_zero, pop_name, buffer, - extend, shapes=False, axis=axis, **kwargs) - ctx.add_basemap(axis, zoom, source=url, origin='upper') + axis = self.plot_scatter( + mask, + ignore_zero, + pop_name, + buffer, + extend, + shapes=False, + axis=axis, + **kwargs, + ) + ctx.add_basemap(axis, zoom, source=url, origin="upper") axis.set_axis_off() self.to_crs(crs_ori, inplace=True) return axis @@ -802,8 +972,8 @@ def write_hdf5(self, file_name): file_name : str (path and) file name to write to. """ - LOGGER.info('Writing %s', file_name) - store = pd.HDFStore(file_name, mode='w') + LOGGER.info("Writing %s", file_name) + store = pd.HDFStore(file_name, mode="w") pandas_df = pd.DataFrame(self.gdf) for col in pandas_df.columns: if str(pandas_df[col].dtype) == "geometry": @@ -813,19 +983,21 @@ def write_hdf5(self, file_name): with warnings.catch_warnings(): warnings.simplefilter("ignore", category=pd.errors.PerformanceWarning) # Write dataframe - store.put('exposures', pandas_df) + store.put("exposures", pandas_df) var_meta = {} for var in type(self)._metadata: var_meta[var] = getattr(self, var) - store.get_storer('exposures').attrs.metadata = var_meta + store.get_storer("exposures").attrs.metadata = var_meta store.close() def read_hdf5(self, *args, **kwargs): """This function is deprecated, use Exposures.from_hdf5 instead.""" - LOGGER.warning("The use of Exposures.read_hdf5 is deprecated." - "Use Exposures.from_hdf5 instead.") + LOGGER.warning( + "The use of Exposures.read_hdf5 is deprecated." + "Use Exposures.from_hdf5 instead." + ) self.__dict__ = Exposures.from_hdf5(*args, **kwargs).__dict__ @classmethod @@ -844,28 +1016,32 @@ def from_hdf5(cls, file_name): ------- Exposures """ - LOGGER.info('Reading %s', file_name) + LOGGER.info("Reading %s", file_name) if not Path(file_name).is_file(): raise FileNotFoundError(str(file_name)) - with pd.HDFStore(file_name, mode='r') as store: - metadata = store.get_storer('exposures').attrs.metadata + with pd.HDFStore(file_name, mode="r") as store: + metadata = store.get_storer("exposures").attrs.metadata # in previous versions of CLIMADA and/or geopandas, the CRS was stored in '_crs'/'crs' - crs = metadata.get('crs', metadata.get('_crs')) - if crs is None and metadata.get('meta'): - crs = metadata['meta'].get('crs') - exp = cls(store['exposures'], crs=crs) + crs = metadata.get("crs", metadata.get("_crs")) + if crs is None and metadata.get("meta"): + crs = metadata["meta"].get("crs") + exp = cls(store["exposures"], crs=crs) for key, val in metadata.items(): - if key in type(exp)._metadata: # pylint: disable=protected-access + if key in type(exp)._metadata: # pylint: disable=protected-access setattr(exp, key, val) - if key == 'tag': # for backwards compatitbility with climada <= 3.x - descriptions = [u_hdf5.to_string(x) for x in getattr(val, 'description', [])] + if key == "tag": # for backwards compatitbility with climada <= 3.x + descriptions = [ + u_hdf5.to_string(x) for x in getattr(val, "description", []) + ] exp.description = "\n".join(descriptions) if descriptions else None return exp def read_mat(self, *args, **kwargs): """This function is deprecated, use Exposures.from_mat instead.""" - LOGGER.warning("The use of Exposures.read_mat is deprecated." - "Use Exposures.from_mat instead.") + LOGGER.warning( + "The use of Exposures.read_mat is deprecated." + "Use Exposures.from_mat instead." + ) self.__dict__ = Exposures.from_mat(*args, **kwargs).__dict__ @classmethod @@ -884,25 +1060,26 @@ def from_mat(cls, file_name, var_names=None): ------- Exposures """ - LOGGER.info('Reading %s', file_name) + LOGGER.info("Reading %s", file_name) if not var_names: var_names = DEF_VAR_MAT data = u_hdf5.read(file_name) try: - data = data[var_names['sup_field_name']] + data = data[var_names["sup_field_name"]] except KeyError: pass try: - data = data[var_names['field_name']] + data = data[var_names["field_name"]] exposures = dict() _read_mat_obligatory(exposures, data, var_names) _read_mat_optional(exposures, data, var_names) except KeyError as var_err: - raise KeyError(f"Variable not in MAT file: {var_names.get('field_name')}")\ - from var_err + raise KeyError( + f"Variable not in MAT file: {var_names.get('field_name')}" + ) from var_err exp = cls(data=exposures) _read_mat_metadata(exp, data, file_name, var_names) @@ -942,7 +1119,7 @@ def to_crs(self, crs=None, epsg=None, inplace=False): if inplace: self.gdf.to_crs(crs, epsg, True) - self.meta['crs'] = crs or f'EPSG:{epsg}' + self.meta["crs"] = crs or f"EPSG:{epsg}" self.set_lat_lon() return None @@ -966,16 +1143,13 @@ def copy(self, deep=True): Exposures """ gdf = self.gdf.copy(deep=deep) - metadata = dict([ - (md, copy.deepcopy(self.__dict__[md])) for md in type(self)._metadata - ]) - metadata['crs'] = self.crs - return type(self)( - gdf, - **metadata + metadata = dict( + [(md, copy.deepcopy(self.__dict__[md])) for md in type(self)._metadata] ) + metadata["crs"] = self.crs + return type(self)(gdf, **metadata) - def write_raster(self, file_name, value_name='value', scheduler=None): + def write_raster(self, file_name, value_name="value", scheduler=None): """Write value data into raster file with GeoTiff format Parameters @@ -983,17 +1157,20 @@ def write_raster(self, file_name, value_name='value', scheduler=None): file_name : str name output file in tif format """ - if self.meta and self.meta['height'] * self.meta['width'] == len(self.gdf): - raster = self.gdf[value_name].values.reshape((self.meta['height'], - self.meta['width'])) + if self.meta and self.meta["height"] * self.meta["width"] == len(self.gdf): + raster = self.gdf[value_name].values.reshape( + (self.meta["height"], self.meta["width"]) + ) # check raster starts by upper left corner - if self.gdf['latitude'].values[0] < self.gdf['latitude'].values[-1]: + if self.gdf["latitude"].values[0] < self.gdf["latitude"].values[-1]: raster = np.flip(raster, axis=0) - if self.gdf['longitude'].values[0] > self.gdf['longitude'].values[-1]: - raise ValueError('Points are not ordered according to meta raster.') + if self.gdf["longitude"].values[0] > self.gdf["longitude"].values[-1]: + raise ValueError("Points are not ordered according to meta raster.") u_coord.write_raster(file_name, raster, self.meta) else: - raster, meta = u_coord.points_to_raster(self.gdf, [value_name], scheduler=scheduler) + raster, meta = u_coord.points_to_raster( + self.gdf, [value_name], scheduler=scheduler + ) u_coord.write_raster(file_name, raster, meta) @staticmethod @@ -1015,12 +1192,10 @@ def concat(exposures_list): exp = Exposures(exp) exp.check() - df_list = [ - ex.gdf if isinstance(ex, Exposures) else ex - for ex in exposures_list - ] + df_list = [ex.gdf if isinstance(ex, Exposures) else ex for ex in exposures_list] crss = [ - ex.crs for ex in exposures_list + ex.crs + for ex in exposures_list if isinstance(ex, (Exposures, GeoDataFrame)) and hasattr(ex, "crs") and ex.crs is not None @@ -1032,9 +1207,9 @@ def concat(exposures_list): else: crs = None - exp.set_gdf(GeoDataFrame( - pd.concat(df_list, ignore_index=True, sort=False) - ), crs=crs) + exp.set_gdf( + GeoDataFrame(pd.concat(df_list, ignore_index=True, sort=False)), crs=crs + ) return exp @@ -1060,11 +1235,10 @@ def centroids_total_value(self, hazard): a centroids is assigned """ - nz_mask = ( - (self.gdf['value'].values > 0) - & (self.gdf[hazard.centr_exp_col].values >= 0) + nz_mask = (self.gdf["value"].values > 0) & ( + self.gdf[hazard.centr_exp_col].values >= 0 ) - return np.sum(self.gdf['value'].values[nz_mask]) + return np.sum(self.gdf["value"].values[nz_mask]) def affected_total_value( self, @@ -1109,7 +1283,7 @@ def affected_total_value( """ self.assign_centroids(hazard=hazard, overwrite=overwrite_assigned_centroids) assigned_centroids = self.gdf[hazard.centr_exp_col] - nz_mask = (self.gdf['value'].values > 0) & (assigned_centroids.values >= 0) + nz_mask = (self.gdf["value"].values > 0) & (assigned_centroids.values >= 0) cents = np.unique(assigned_centroids[nz_mask]) cent_with_inten_above_thres = ( hazard.intensity[:, cents].max(axis=0) > threshold_affected @@ -1117,7 +1291,7 @@ def affected_total_value( above_thres_mask = np.isin( self.gdf[hazard.centr_exp_col].values, cents[cent_with_inten_above_thres] ) - return np.sum(self.gdf['value'].values[above_thres_mask]) + return np.sum(self.gdf["value"].values[above_thres_mask]) def add_sea(exposures, sea_res, scheduler=None): @@ -1140,15 +1314,18 @@ def add_sea(exposures, sea_res, scheduler=None): ------- Exposures """ - LOGGER.info("Adding sea at %s km resolution and %s km distance from coast.", - str(sea_res[1]), str(sea_res[0])) + LOGGER.info( + "Adding sea at %s km resolution and %s km distance from coast.", + str(sea_res[1]), + str(sea_res[0]), + ) sea_res = (sea_res[0] / ONE_LAT_KM, sea_res[1] / ONE_LAT_KM) - min_lat = max(-90, float(exposures.gdf['latitude'].min()) - sea_res[0]) - max_lat = min(90, float(exposures.gdf['latitude'].max()) + sea_res[0]) - min_lon = max(-180, float(exposures.gdf['longitude'].min()) - sea_res[0]) - max_lon = min(180, float(exposures.gdf['longitude'].max()) + sea_res[0]) + min_lat = max(-90, float(exposures.gdf["latitude"].min()) - sea_res[0]) + max_lat = min(90, float(exposures.gdf["latitude"].max()) + sea_res[0]) + min_lon = max(-180, float(exposures.gdf["longitude"].min()) - sea_res[0]) + max_lon = min(180, float(exposures.gdf["longitude"].max()) + sea_res[0]) lat_arr = np.arange(min_lat, max_lat + sea_res[1], sea_res[1]) lon_arr = np.arange(min_lon, max_lon + sea_res[1], sea_res[1]) @@ -1158,17 +1335,20 @@ def add_sea(exposures, sea_res, scheduler=None): on_land = ~u_coord.coord_on_land(lat_mgrid, lon_mgrid) sea_exp_gdf = GeoDataFrame() - sea_exp_gdf['latitude'] = lat_mgrid[on_land] - sea_exp_gdf['longitude'] = lon_mgrid[on_land] - sea_exp_gdf['region_id'] = np.zeros(sea_exp_gdf['latitude'].size, int) - 1 + sea_exp_gdf["latitude"] = lat_mgrid[on_land] + sea_exp_gdf["longitude"] = lon_mgrid[on_land] + sea_exp_gdf["region_id"] = np.zeros(sea_exp_gdf["latitude"].size, int) - 1 - if 'geometry' in exposures.gdf.columns: - u_coord.set_df_geometry_points(sea_exp_gdf, crs=exposures.crs, scheduler=scheduler) + if "geometry" in exposures.gdf.columns: + u_coord.set_df_geometry_points( + sea_exp_gdf, crs=exposures.crs, scheduler=scheduler + ) for var_name in exposures.gdf.columns: - if var_name not in ('latitude', 'longitude', 'region_id', 'geometry'): - sea_exp_gdf[var_name] = np.zeros(sea_exp_gdf['latitude'].size, - exposures.gdf[var_name].dtype) + if var_name not in ("latitude", "longitude", "region_id", "geometry"): + sea_exp_gdf[var_name] = np.zeros( + sea_exp_gdf["latitude"].size, exposures.gdf[var_name].dtype + ) return Exposures( pd.concat([exposures.gdf, sea_exp_gdf], ignore_index=True, sort=False), @@ -1182,41 +1362,46 @@ def add_sea(exposures, sea_res, scheduler=None): def _read_mat_obligatory(exposures, data, var_names): """Fill obligatory variables.""" - exposures['value'] = np.squeeze(data[var_names['var_name']['val']]) + exposures["value"] = np.squeeze(data[var_names["var_name"]["val"]]) - exposures['latitude'] = data[var_names['var_name']['lat']].reshape(-1) - exposures['longitude'] = data[var_names['var_name']['lon']].reshape(-1) + exposures["latitude"] = data[var_names["var_name"]["lat"]].reshape(-1) + exposures["longitude"] = data[var_names["var_name"]["lon"]].reshape(-1) - exposures[INDICATOR_IMPF] = np.squeeze( - data[var_names['var_name']['impf']]).astype(int, copy=False) + exposures[INDICATOR_IMPF] = np.squeeze(data[var_names["var_name"]["impf"]]).astype( + int, copy=False + ) def _read_mat_optional(exposures, data, var_names): """Fill optional parameters.""" try: - exposures['deductible'] = np.squeeze(data[var_names['var_name']['ded']]) + exposures["deductible"] = np.squeeze(data[var_names["var_name"]["ded"]]) except KeyError: pass try: - exposures['cover'] = np.squeeze(data[var_names['var_name']['cov']]) + exposures["cover"] = np.squeeze(data[var_names["var_name"]["cov"]]) except KeyError: pass try: - exposures['category_id'] = \ - np.squeeze(data[var_names['var_name']['cat']]).astype(int, copy=False) + exposures["category_id"] = np.squeeze( + data[var_names["var_name"]["cat"]] + ).astype(int, copy=False) except KeyError: pass try: - exposures['region_id'] = \ - np.squeeze(data[var_names['var_name']['reg']]).astype(int, copy=False) + exposures["region_id"] = np.squeeze(data[var_names["var_name"]["reg"]]).astype( + int, copy=False + ) except KeyError: pass try: - assigned = np.squeeze(data[var_names['var_name']['ass']]).astype(int, copy=False) + assigned = np.squeeze(data[var_names["var_name"]["ass"]]).astype( + int, copy=False + ) if assigned.size > 0: exposures[INDICATOR_CENTR] = assigned except KeyError: @@ -1226,12 +1411,13 @@ def _read_mat_optional(exposures, data, var_names): def _read_mat_metadata(exposures, data, file_name, var_names): """Fill metadata in DataFrame object""" try: - exposures.ref_year = int(np.squeeze(data[var_names['var_name']['ref']])) + exposures.ref_year = int(np.squeeze(data[var_names["var_name"]["ref"]])) except KeyError: exposures.ref_year = DEF_REF_YEAR try: exposures.value_unit = u_hdf5.get_str_from_ref( - file_name, data[var_names['var_name']['uni']][0][0]) + file_name, data[var_names["var_name"]["uni"]][0][0] + ) except KeyError: exposures.value_unit = DEF_VALUE_UNIT diff --git a/climada/entity/exposures/litpop/__init__.py b/climada/entity/exposures/litpop/__init__.py index 322ef3c75..0724ad4b9 100755 --- a/climada/entity/exposures/litpop/__init__.py +++ b/climada/entity/exposures/litpop/__init__.py @@ -18,7 +18,7 @@ init litpop """ -from .litpop import * + from .gpw_population import * +from .litpop import * from .nightlight import * - diff --git a/climada/entity/exposures/litpop/gpw_population.py b/climada/entity/exposures/litpop/gpw_population.py index 51e7d35ae..fbb34b464 100644 --- a/climada/entity/exposures/litpop/gpw_population.py +++ b/climada/entity/exposures/litpop/gpw_population.py @@ -18,19 +18,21 @@ Import data from Global Population of the World (GPW) datasets """ + import logging -import rasterio import numpy as np +import rasterio -from climada.util.constants import SYSTEM_DIR from climada import CONFIG +from climada.util.constants import SYSTEM_DIR LOGGER = logging.getLogger(__name__) -def load_gpw_pop_shape(geometry, reference_year, gpw_version, - data_dir=SYSTEM_DIR, layer=0, verbose=True): +def load_gpw_pop_shape( + geometry, reference_year, gpw_version, data_dir=SYSTEM_DIR, layer=0, verbose=True +): """Read gridded population data from TIFF and crop to given shape(s). Note: A (free) NASA Earthdata login is necessary to download the data. @@ -73,22 +75,29 @@ def load_gpw_pop_shape(geometry, reference_year, gpw_version, """ # check whether GPW input file exists and get file path - file_path = get_gpw_file_path(gpw_version, reference_year, data_dir=data_dir, verbose=verbose) + file_path = get_gpw_file_path( + gpw_version, reference_year, data_dir=data_dir, verbose=verbose + ) # open TIFF and extract cropped data from input file: - with rasterio.open(file_path, 'r') as src: + with rasterio.open(file_path, "r") as src: global_transform = src.transform - pop_data, out_transform = rasterio.mask.mask(src, [geometry], crop=True, nodata=0) + pop_data, out_transform = rasterio.mask.mask( + src, [geometry], crop=True, nodata=0 + ) # extract and update meta data for cropped data and close src: meta = src.meta - meta.update({ - "driver": "GTiff", - "height": pop_data.shape[1], - "width": pop_data.shape[2], - "transform": out_transform, - }) - return pop_data[layer,:,:], meta, global_transform + meta.update( + { + "driver": "GTiff", + "height": pop_data.shape[1], + "width": pop_data.shape[2], + "transform": out_transform, + } + ) + return pop_data[layer, :, :], meta, global_transform + def get_gpw_file_path(gpw_version, reference_year, data_dir=None, verbose=True): """Check available GPW population data versions and year closest to @@ -118,24 +127,36 @@ def get_gpw_file_path(gpw_version, reference_year, data_dir=None, verbose=True): data_dir = SYSTEM_DIR # get years available in GPW data from CONFIG and convert to array: - years_available = np.array([ - year.int() for year in CONFIG.exposures.litpop.gpw_population.years_available.list() - ]) + years_available = np.array( + [ + year.int() + for year in CONFIG.exposures.litpop.gpw_population.years_available.list() + ] + ) # find closest year to reference_year with data available: year = years_available[np.abs(years_available - reference_year).argmin()] if verbose and year != reference_year: - LOGGER.warning('Reference year: %i. Using nearest available year for GPW data: %i', - reference_year, year) + LOGGER.warning( + "Reference year: %i. Using nearest available year for GPW data: %i", + reference_year, + year, + ) # check if file is available for given GPW version, construct GPW file path from CONFIG: # if available, return full path to file: - gpw_dirname = CONFIG.exposures.litpop.gpw_population.dirname_gpw.str() % (gpw_version, year) - gpw_filename = CONFIG.exposures.litpop.gpw_population.filename_gpw.str() % (gpw_version, year) + gpw_dirname = CONFIG.exposures.litpop.gpw_population.dirname_gpw.str() % ( + gpw_version, + year, + ) + gpw_filename = CONFIG.exposures.litpop.gpw_population.filename_gpw.str() % ( + gpw_version, + year, + ) for file_path in [data_dir / gpw_filename, data_dir / gpw_dirname / gpw_filename]: if file_path.is_file(): if verbose: - LOGGER.info('GPW Version v4.%2i', gpw_version) + LOGGER.info("GPW Version v4.%2i", gpw_version) return file_path # if the file was not found, an exception is raised with instructions on how to obtain it @@ -146,7 +167,7 @@ def get_gpw_file_path(gpw_version, reference_year, data_dir=None, verbose=True): f"{gpw_dirname}.zip" ) raise FileNotFoundError( - f'The file {file_path} could not be found. Please download the file first or choose a' - f' different folder. The data can be downloaded from {sedac_browse_url}, e.g.,' - f' {sedac_file_url} (Free NASA Earthdata login required).' + f"The file {file_path} could not be found. Please download the file first or choose a" + f" different folder. The data can be downloaded from {sedac_browse_url}, e.g.," + f" {sedac_file_url} (Free NASA Earthdata login required)." ) diff --git a/climada/entity/exposures/litpop/litpop.py b/climada/entity/exposures/litpop/litpop.py index 7fe2047bd..372e58533 100644 --- a/climada/entity/exposures/litpop/litpop.py +++ b/climada/entity/exposures/litpop/litpop.py @@ -17,28 +17,31 @@ --- Define LitPop class. """ + import logging from pathlib import Path + +import geopandas import numpy as np +import pandas as pd import rasterio -import geopandas -from shapefile import Shape import shapely -import pandas as pd +from shapefile import Shape import climada.util.coordinates as u_coord import climada.util.finance as u_fin - -from climada.entity.exposures.litpop import nightlight as nl_util +from climada import CONFIG +from climada.entity.exposures.base import DEF_REF_YEAR, INDICATOR_IMPF, Exposures from climada.entity.exposures.litpop import gpw_population as pop_util -from climada.entity.exposures.base import Exposures, INDICATOR_IMPF, DEF_REF_YEAR +from climada.entity.exposures.litpop import nightlight as nl_util from climada.util.constants import SYSTEM_DIR -from climada import CONFIG + LOGGER = logging.getLogger(__name__) GPW_VERSION = CONFIG.exposures.litpop.gpw_population.gpw_version.int() """Version of Gridded Population of the World (GPW) input data. Check for updates.""" + class LitPop(Exposures): """ Holds geopandas GeoDataFrame with metada and columns (pd.Series) defined in @@ -65,19 +68,30 @@ class LitPop(Exposures): Version number of GPW population data, e.g. 11 for v4.11. The default is defined in GPW_VERSION. """ - _metadata = Exposures._metadata + ['exponents', 'fin_mode', 'gpw_version'] + + _metadata = Exposures._metadata + ["exponents", "fin_mode", "gpw_version"] def set_countries(self, *args, **kwargs): """This function is deprecated, use LitPop.from_countries instead.""" - LOGGER.warning("The use of LitPop.set_countries is deprecated." - "Use LitPop.from_countries instead.") + LOGGER.warning( + "The use of LitPop.set_countries is deprecated." + "Use LitPop.from_countries instead." + ) self.__dict__ = LitPop.from_countries(*args, **kwargs).__dict__ @classmethod - def from_countries(cls, countries, res_arcsec=30, exponents=(1,1), - fin_mode='pc', total_values=None, admin1_calc=False, - reference_year=DEF_REF_YEAR, gpw_version=GPW_VERSION, - data_dir=SYSTEM_DIR): + def from_countries( + cls, + countries, + res_arcsec=30, + exponents=(1, 1), + fin_mode="pc", + total_values=None, + admin1_calc=False, + reference_year=DEF_REF_YEAR, + gpw_version=GPW_VERSION, + data_dir=SYSTEM_DIR, + ): """Init new LitPop exposure object for a list of countries (admin 0). Sets attributes `ref_year`, `crs`, `value`, `geometry`, `meta`, @@ -138,53 +152,74 @@ def from_countries(cls, countries, res_arcsec=30, exponents=(1,1), LitPop instance with exposure for given countries """ if isinstance(countries, (int, str)): - countries = [countries] # for backward compatibility + countries = [countries] # for backward compatibility - if total_values is None: # init list with total values per countries + if total_values is None: # init list with total values per countries total_values = [None] * len(countries) elif len(total_values) != len(countries): - raise ValueError("'countries' and 'total_values' must be lists of same length") + raise ValueError( + "'countries' and 'total_values' must be lists of same length" + ) # litpop_list is initiated, a list containing one Exposure instance per # country and None for countries that could not be identified: - if admin1_calc: # each admin 1 region is initiated separately, - # with total value share based on subnational GDP share. - # This requires GRP (Gross Regional Product) data in the - # GSDP data folder. - litpop_list = [_calc_admin1_one_country(country, res_arcsec, exponents, - fin_mode, tot_value, reference_year, - gpw_version, data_dir, - ) - for tot_value, country in zip(total_values, countries)] - - else: # else, as default, country is initiated as a whole: + if admin1_calc: # each admin 1 region is initiated separately, + # with total value share based on subnational GDP share. + # This requires GRP (Gross Regional Product) data in the + # GSDP data folder. + litpop_list = [ + _calc_admin1_one_country( + country, + res_arcsec, + exponents, + fin_mode, + tot_value, + reference_year, + gpw_version, + data_dir, + ) + for tot_value, country in zip(total_values, countries) + ] + + else: # else, as default, country is initiated as a whole: # loop over countries: litpop is initiated for each individual polygon # within each country and combined at the end. - litpop_list = \ - [cls._from_country(country, - res_arcsec=res_arcsec, - exponents=exponents, - fin_mode=fin_mode, - total_value=total_values[idc], - reference_year=reference_year, - gpw_version=gpw_version, - data_dir=data_dir) - for idc, country in enumerate(countries)] + litpop_list = [ + cls._from_country( + country, + res_arcsec=res_arcsec, + exponents=exponents, + fin_mode=fin_mode, + total_value=total_values[idc], + reference_year=reference_year, + gpw_version=gpw_version, + data_dir=data_dir, + ) + for idc, country in enumerate(countries) + ] # make lists of countries with Exposure initaited and those ignored: - countries_in = \ - [country for lp, country in zip(litpop_list, countries) if lp is not None] - countries_out = \ - [country for lp, country in zip(litpop_list, countries) if lp is None] + countries_in = [ + country for lp, country in zip(litpop_list, countries) if lp is not None + ] + countries_out = [ + country for lp, country in zip(litpop_list, countries) if lp is None + ] if not countries_in: - raise ValueError('No valid country identified in %s, aborting.' % countries) + raise ValueError("No valid country identified in %s, aborting." % countries) litpop_list = [exp for exp in litpop_list if exp is not None] if countries_out: - LOGGER.warning('Some countries could not be identified and are ignored: ' - '%s. Litpop only initiated for: %s', countries_out, countries_in) + LOGGER.warning( + "Some countries could not be identified and are ignored: " + "%s. Litpop only initiated for: %s", + countries_out, + countries_in, + ) - description = (f'LitPop Exposure for {countries_in} at {res_arcsec} as,' - f' year: {reference_year}, financial mode: {fin_mode},' - f' exp: {exponents}, admin1_calc: {admin1_calc}') + description = ( + f"LitPop Exposure for {countries_in} at {res_arcsec} as," + f" year: {reference_year}, financial mode: {fin_mode}," + f" exp: {exponents}, admin1_calc: {admin1_calc}" + ) exp = cls( data=Exposures.concat(litpop_list).gdf, @@ -194,36 +229,51 @@ def from_countries(cls, countries, res_arcsec=30, exponents=(1,1), exponents=exponents, gpw_version=gpw_version, fin_mode=fin_mode, - description=description + description=description, ) try: rows, cols, ras_trans = u_coord.pts_to_raster_meta( - (exp.gdf['longitude'].min(), exp.gdf['latitude'].min(), - exp.gdf['longitude'].max(), exp.gdf['latitude'].max()), - u_coord.get_resolution(exp.gdf['longitude'], exp.gdf['latitude'])) + ( + exp.gdf["longitude"].min(), + exp.gdf["latitude"].min(), + exp.gdf["longitude"].max(), + exp.gdf["latitude"].max(), + ), + u_coord.get_resolution(exp.gdf["longitude"], exp.gdf["latitude"]), + ) exp.meta = { - 'width': cols, - 'height': rows, - 'crs': exp.crs, - 'transform': ras_trans, + "width": cols, + "height": rows, + "crs": exp.crs, + "transform": ras_trans, } except ValueError: - LOGGER.warning('Could not write attribute meta, because exposure' - ' has only 1 data point') - exp.meta = {'crs': exp.crs} + LOGGER.warning( + "Could not write attribute meta, because exposure" + " has only 1 data point" + ) + exp.meta = {"crs": exp.crs} exp.check() return exp def set_nightlight_intensity(self, *args, **kwargs): """This function is deprecated, use LitPop.from_nightlight_intensity instead.""" - LOGGER.warning("The use of LitPop.set_nightlight_intensity is deprecated." - "Use LitPop.from_nightlight_intensity instead.") + LOGGER.warning( + "The use of LitPop.set_nightlight_intensity is deprecated." + "Use LitPop.from_nightlight_intensity instead." + ) self.__dict__ = LitPop.from_nightlight_intensity(*args, **kwargs).__dict__ @classmethod - def from_nightlight_intensity(cls, countries=None, shape=None, res_arcsec=15, - reference_year=DEF_REF_YEAR, data_dir=SYSTEM_DIR): + def from_nightlight_intensity( + cls, + countries=None, + shape=None, + res_arcsec=15, + reference_year=DEF_REF_YEAR, + data_dir=SYSTEM_DIR, + ): """ Wrapper around `from_countries` / `from_shape`. @@ -258,33 +308,56 @@ def from_nightlight_intensity(cls, countries=None, shape=None, res_arcsec=15, if countries is None and shape is None: raise ValueError("Either `countries` or `shape` required. Aborting.") if countries is not None and shape is not None: - raise ValueError("Not allowed to set both `countries` and `shape`. Aborting.") + raise ValueError( + "Not allowed to set both `countries` and `shape`. Aborting." + ) if countries is not None: - exp = cls.from_countries(countries, res_arcsec=res_arcsec, - exponents=(1,0), fin_mode='none', - reference_year=reference_year, gpw_version=GPW_VERSION, - data_dir=data_dir) + exp = cls.from_countries( + countries, + res_arcsec=res_arcsec, + exponents=(1, 0), + fin_mode="none", + reference_year=reference_year, + gpw_version=GPW_VERSION, + data_dir=data_dir, + ) else: - exp = cls.from_shape(shape, None, res_arcsec=res_arcsec, - exponents=(1,0), value_unit='', - reference_year=reference_year, - gpw_version=GPW_VERSION, data_dir=SYSTEM_DIR) - LOGGER.warning("Note: set_nightlight_intensity sets values to raw nightlight intensity, " - "not to USD. " - "To disaggregate asset value proportionally to nightlights^m, " - "call from_countries or from_shape with exponents=(m,0).") + exp = cls.from_shape( + shape, + None, + res_arcsec=res_arcsec, + exponents=(1, 0), + value_unit="", + reference_year=reference_year, + gpw_version=GPW_VERSION, + data_dir=SYSTEM_DIR, + ) + LOGGER.warning( + "Note: set_nightlight_intensity sets values to raw nightlight intensity, " + "not to USD. " + "To disaggregate asset value proportionally to nightlights^m, " + "call from_countries or from_shape with exponents=(m,0)." + ) return exp def set_population(self, *args, **kwargs): """This function is deprecated, use LitPop.from_population instead.""" - LOGGER.warning("The use of LitPop.set_population is deprecated." - "Use LitPop.from_population instead.") + LOGGER.warning( + "The use of LitPop.set_population is deprecated." + "Use LitPop.from_population instead." + ) self.__dict__ = LitPop.from_population(*args, **kwargs).__dict__ @classmethod - def from_population(cls, countries=None, shape=None, res_arcsec=30, - reference_year=DEF_REF_YEAR, gpw_version=GPW_VERSION, - data_dir=SYSTEM_DIR): + def from_population( + cls, + countries=None, + shape=None, + res_arcsec=30, + reference_year=DEF_REF_YEAR, + gpw_version=GPW_VERSION, + data_dir=SYSTEM_DIR, + ): """ Wrapper around `from_countries` / `from_shape`. @@ -321,28 +394,53 @@ def from_population(cls, countries=None, shape=None, res_arcsec=30, if countries is None and shape is None: raise ValueError("Either `countries` or `shape` required. Aborting.") if countries is not None and shape is not None: - raise ValueError("Not allowed to set both `countries` and `shape`. Aborting.") + raise ValueError( + "Not allowed to set both `countries` and `shape`. Aborting." + ) if countries is not None: - exp = cls.from_countries(countries, res_arcsec=res_arcsec, - exponents=(0,1), fin_mode='pop', - reference_year=reference_year, gpw_version=gpw_version, - data_dir=data_dir) + exp = cls.from_countries( + countries, + res_arcsec=res_arcsec, + exponents=(0, 1), + fin_mode="pop", + reference_year=reference_year, + gpw_version=gpw_version, + data_dir=data_dir, + ) else: - exp = cls.from_shape(shape, None, res_arcsec=res_arcsec, exponents=(0,1), - value_unit='people', reference_year=reference_year, - gpw_version=gpw_version, data_dir=data_dir) + exp = cls.from_shape( + shape, + None, + res_arcsec=res_arcsec, + exponents=(0, 1), + value_unit="people", + reference_year=reference_year, + gpw_version=gpw_version, + data_dir=data_dir, + ) return exp def set_custom_shape_from_countries(self, *args, **kwargs): """This function is deprecated, use LitPop.from_shape_and_countries instead.""" - LOGGER.warning("The use of LitPop.set_custom_shape_from_countries is deprecated." - "Use LitPop.from_shape_and_countries instead.") + LOGGER.warning( + "The use of LitPop.set_custom_shape_from_countries is deprecated." + "Use LitPop.from_shape_and_countries instead." + ) self.__dict__ = LitPop.from_shape_and_countries(*args, **kwargs).__dict__ @classmethod - def from_shape_and_countries(cls, shape, countries, res_arcsec=30, exponents=(1,1), - fin_mode='pc', admin1_calc=False, reference_year=DEF_REF_YEAR, - gpw_version=GPW_VERSION, data_dir=SYSTEM_DIR): + def from_shape_and_countries( + cls, + shape, + countries, + res_arcsec=30, + exponents=(1, 1), + fin_mode="pc", + admin1_calc=False, + reference_year=DEF_REF_YEAR, + gpw_version=GPW_VERSION, + data_dir=SYSTEM_DIR, + ): """ create LitPop exposure for `country` and then crop to given shape. @@ -398,62 +496,89 @@ def from_shape_and_countries(cls, shape, countries, res_arcsec=30, exponents=(1, The exposure LitPop within shape """ # init countries' exposure: - exp = cls.from_countries(countries, res_arcsec=res_arcsec, exponents=exponents, - fin_mode=fin_mode, reference_year=reference_year, - gpw_version=gpw_version, data_dir=data_dir) + exp = cls.from_countries( + countries, + res_arcsec=res_arcsec, + exponents=exponents, + fin_mode=fin_mode, + reference_year=reference_year, + gpw_version=gpw_version, + data_dir=data_dir, + ) if isinstance(shape, Shape): # get gdf with geometries of points within shape: - shape_gdf, _ = _get_litpop_single_polygon(shape, reference_year, - res_arcsec, data_dir, - gpw_version, exponents, - ) + shape_gdf, _ = _get_litpop_single_polygon( + shape, + reference_year, + res_arcsec, + data_dir, + gpw_version, + exponents, + ) shape_gdf = shape_gdf.drop( - columns=shape_gdf.columns[shape_gdf.columns != 'geometry']) + columns=shape_gdf.columns[shape_gdf.columns != "geometry"] + ) # extract gdf with data points within shape: - gdf = geopandas.sjoin(exp.gdf, shape_gdf, how='right') - gdf = gdf.drop(columns=['index_left']) - elif isinstance(shape, (shapely.geometry.MultiPolygon, shapely.geometry.Polygon)): + gdf = geopandas.sjoin(exp.gdf, shape_gdf, how="right") + gdf = gdf.drop(columns=["index_left"]) + elif isinstance( + shape, (shapely.geometry.MultiPolygon, shapely.geometry.Polygon) + ): # works if shape is Polygon or MultiPolygon gdf = exp.gdf.loc[exp.gdf.geometry.within(shape)] elif isinstance(shape, (geopandas.GeoSeries, list)): gdf = geopandas.GeoDataFrame(columns=exp.gdf.columns) for shp in shape: - if isinstance(shp, (shapely.geometry.MultiPolygon, - shapely.geometry.Polygon)): + if isinstance( + shp, (shapely.geometry.MultiPolygon, shapely.geometry.Polygon) + ): gdf = gdf.append(exp.gdf.loc[exp.gdf.geometry.within(shp)]) else: - raise NotImplementedError('Not implemented for list or GeoSeries containing ' - f'objects of type {type(shp)} as `shape`') + raise NotImplementedError( + "Not implemented for list or GeoSeries containing " + f"objects of type {type(shp)} as `shape`" + ) else: - raise NotImplementedError('Not implemented for `shape` of type {type(shape)}') + raise NotImplementedError( + "Not implemented for `shape` of type {type(shape)}" + ) - exp.description = (f'LitPop Exposure for custom shape in {countries} at' - f' {res_arcsec} as, year: {reference_year}, financial mode:' - f' {fin_mode}, exp: {exponents}, admin1_calc: {admin1_calc}') + exp.description = ( + f"LitPop Exposure for custom shape in {countries} at" + f" {res_arcsec} as, year: {reference_year}, financial mode:" + f" {fin_mode}, exp: {exponents}, admin1_calc: {admin1_calc}" + ) exp.set_gdf(gdf.reset_index()) try: rows, cols, ras_trans = u_coord.pts_to_raster_meta( - (exp.gdf['longitude'].min(), exp.gdf['latitude'].min(), - exp.gdf['longitude'].max(), exp.gdf['latitude'].max()), - u_coord.get_resolution(exp.gdf['longitude'], exp.gdf['latitude'])) + ( + exp.gdf["longitude"].min(), + exp.gdf["latitude"].min(), + exp.gdf["longitude"].max(), + exp.gdf["latitude"].max(), + ), + u_coord.get_resolution(exp.gdf["longitude"], exp.gdf["latitude"]), + ) exp.meta = { - 'width': cols, - 'height': rows, - 'crs': exp.crs, - 'transform': ras_trans, + "width": cols, + "height": rows, + "crs": exp.crs, + "transform": ras_trans, } except ValueError as err: - LOGGER.warning('Could not write attribute meta with ValueError: ') + LOGGER.warning("Could not write attribute meta with ValueError: ") LOGGER.warning(err.args[0]) - exp.meta = {'crs': exp.crs} + exp.meta = {"crs": exp.crs} return exp def set_custom_shape(self, *args, **kwargs): """This function is deprecated, use LitPop.from_shape instead.""" - LOGGER.warning("The use of LitPop.set_custom_shape is deprecated." - "Use LitPop.from_shape instead.") + LOGGER.warning( + "The use of LitPop.set_custom_shape is deprecated." + "Use LitPop.from_shape instead." + ) self.__dict__ = LitPop.from_shape(*args, **kwargs).__dict__ @classmethod @@ -462,8 +587,8 @@ def from_shape( shape, total_value, res_arcsec=30, - exponents=(1,1), - value_unit='USD', + exponents=(1, 1), + value_unit="USD", region_id=None, reference_year=DEF_REF_YEAR, gpw_version=GPW_VERSION, @@ -521,8 +646,10 @@ def from_shape( The exposure LitPop within shape """ if isinstance(shape, (geopandas.GeoSeries, list)): - raise NotImplementedError('Not implemented for `shape` of type list or ' - 'GeoSeries. Loop over elements of series outside method.') + raise NotImplementedError( + "Not implemented for `shape` of type list or " + "GeoSeries. Loop over elements of series outside method." + ) litpop_gdf, _ = _get_litpop_single_polygon( shape, @@ -531,18 +658,21 @@ def from_shape( data_dir, gpw_version, exponents, - region_id + region_id, ) # disaggregate total value proportional to LitPop values: if isinstance(total_value, (float, int)): - litpop_gdf['value'] = np.divide(litpop_gdf['value'], - litpop_gdf['value'].sum()) * total_value + litpop_gdf["value"] = ( + np.divide(litpop_gdf["value"], litpop_gdf["value"].sum()) * total_value + ) elif total_value is not None: raise TypeError("total_value must be int, float or None.") - description = (f'LitPop Exposure for custom shape at {res_arcsec} as,' - f' year: {reference_year}, exp: {exponents}') + description = ( + f"LitPop Exposure for custom shape at {res_arcsec} as," + f" year: {reference_year}, exp: {exponents}" + ) litpop_gdf[INDICATOR_IMPF] = 1 @@ -554,31 +684,48 @@ def from_shape( exponents=exponents, gpw_version=gpw_version, fin_mode=None, - description=description + description=description, ) - if min(len(exp.gdf['latitude'].unique()), len(exp.gdf['longitude'].unique())) > 1: - #if exp.gdf.shape[0] > 1 and len(exp.gdf.latitude.unique()) > 1: + if ( + min(len(exp.gdf["latitude"].unique()), len(exp.gdf["longitude"].unique())) + > 1 + ): + # if exp.gdf.shape[0] > 1 and len(exp.gdf.latitude.unique()) > 1: rows, cols, ras_trans = u_coord.pts_to_raster_meta( - (exp.gdf['longitude'].min(), exp.gdf['latitude'].min(), - exp.gdf['longitude'].max(), exp.gdf['latitude'].max()), - u_coord.get_resolution(exp.gdf['longitude'], exp.gdf['latitude'])) + ( + exp.gdf["longitude"].min(), + exp.gdf["latitude"].min(), + exp.gdf["longitude"].max(), + exp.gdf["latitude"].max(), + ), + u_coord.get_resolution(exp.gdf["longitude"], exp.gdf["latitude"]), + ) exp.meta = { - 'width': cols, - 'height': rows, - 'crs': exp.crs, - 'transform': ras_trans, + "width": cols, + "height": rows, + "crs": exp.crs, + "transform": ras_trans, } else: - LOGGER.warning('Could not write attribute meta because coordinates' - ' are either only one point or do not extend in lat and lon') - exp.meta = {'crs': exp.crs} + LOGGER.warning( + "Could not write attribute meta because coordinates" + " are either only one point or do not extend in lat and lon" + ) + exp.meta = {"crs": exp.crs} return exp @staticmethod - def _from_country(country, res_arcsec=30, exponents=(1,1), fin_mode=None, - total_value=None, reference_year=DEF_REF_YEAR, - gpw_version=GPW_VERSION, data_dir=SYSTEM_DIR): + def _from_country( + country, + res_arcsec=30, + exponents=(1, 1), + fin_mode=None, + total_value=None, + reference_year=DEF_REF_YEAR, + gpw_version=GPW_VERSION, + data_dir=SYSTEM_DIR, + ): """init LitPop exposure object for one single country See docstring of from_countries() for detailled description of parameters. @@ -610,14 +757,13 @@ def _from_country(country, res_arcsec=30, exponents=(1,1), fin_mode=None, iso3a = u_coord.country_to_iso(country, representation="alpha3") iso3n = u_coord.country_to_iso(country, representation="numeric") except LookupError: - LOGGER.error('Country not identified: %s.', country) + LOGGER.error("Country not identified: %s.", country) return None country_geometry = u_coord.get_land_geometry([iso3a]) - if not country_geometry.bounds: # check for empty shape - LOGGER.error('No geometry found for country: %s.', country) + if not country_geometry.bounds: # check for empty shape + LOGGER.error("No geometry found for country: %s.", country) return None - LOGGER.info('\n LitPop: Init Exposure for country: %s (%i)...\n', - iso3a, iso3n) + LOGGER.info("\n LitPop: Init Exposure for country: %s (%i)...\n", iso3a, iso3n) litpop_gdf = geopandas.GeoDataFrame() total_population = 0 @@ -627,31 +773,39 @@ def _from_country(country, res_arcsec=30, exponents=(1,1), fin_mode=None, # loop over single polygons in country shape object: for idx, polygon in enumerate(country_geometry.geoms): # get litpop data for each polygon and combine into GeoDataFrame: - gdf_tmp, meta_tmp, = \ - _get_litpop_single_polygon(polygon, reference_year, - res_arcsec, data_dir, - gpw_version, exponents, - verbose=(idx > 0), - region_id=iso3n - ) + ( + gdf_tmp, + meta_tmp, + ) = _get_litpop_single_polygon( + polygon, + reference_year, + res_arcsec, + data_dir, + gpw_version, + exponents, + verbose=(idx > 0), + region_id=iso3n, + ) if gdf_tmp is None: - LOGGER.debug(f'Skipping polygon with index {idx} for' + - f' country {iso3a}.') + LOGGER.debug( + f"Skipping polygon with index {idx} for" + f" country {iso3a}." + ) continue - total_population += meta_tmp['total_population'] + total_population += meta_tmp["total_population"] litpop_gdf = pd.concat([litpop_gdf, gdf_tmp]) - litpop_gdf.crs = meta_tmp['crs'] + litpop_gdf.crs = meta_tmp["crs"] # set total value for disaggregation if not provided: - if total_value is None and fin_mode == 'pop': - total_value = total_population # population count is taken from pop-data. + if total_value is None and fin_mode == "pop": + total_value = total_population # population count is taken from pop-data. elif total_value is None: total_value = _get_total_value_per_country(iso3a, fin_mode, reference_year) # disaggregate total value proportional to LitPop values: if isinstance(total_value, (float, int)): - litpop_gdf['value'] = np.divide(litpop_gdf['value'], - litpop_gdf['value'].sum()) * total_value + litpop_gdf["value"] = ( + np.divide(litpop_gdf["value"], litpop_gdf["value"].sum()) * total_value + ) elif total_value is not None: raise TypeError("total_value must be int or float.") @@ -663,8 +817,17 @@ def _from_country(country, res_arcsec=30, exponents=(1,1), fin_mode=None, # Alias method names for backward compatibility: set_country = set_countries -def _get_litpop_single_polygon(polygon, reference_year, res_arcsec, data_dir, - gpw_version, exponents, region_id=None, verbose=False): + +def _get_litpop_single_polygon( + polygon, + reference_year, + res_arcsec, + data_dir, + gpw_version, + exponents, + region_id=None, + verbose=False, +): """load nightlight (nl) and population (pop) data in rastered 2d arrays and apply rescaling (resolution reprojection) and LitPop core calculation, i.e. combination of nl and pop per grid cell. @@ -714,80 +877,84 @@ def _get_litpop_single_polygon(polygon, reference_year, res_arcsec, data_dir, offsets = (1, 0) # import population data (2d array), meta data, and global grid info, # global_transform defines the origin (corner points) of the global traget grid: - pop, meta_pop, global_transform = \ - pop_util.load_gpw_pop_shape(polygon, - reference_year, - gpw_version=gpw_version, - data_dir=data_dir, - verbose=verbose, - ) + pop, meta_pop, global_transform = pop_util.load_gpw_pop_shape( + polygon, + reference_year, + gpw_version=gpw_version, + data_dir=data_dir, + verbose=verbose, + ) total_population = pop.sum() # import nightlight data (2d array) and associated meta data: - nlight, meta_nl = nl_util.load_nasa_nl_shape(polygon, - reference_year, - data_dir=data_dir, - dtype=float - ) + nlight, meta_nl = nl_util.load_nasa_nl_shape( + polygon, reference_year, data_dir=data_dir, dtype=float + ) # if resolution is the same as for lit (15 arcsec), set grid same as lit: - if res_arcsec==15: + if res_arcsec == 15: i_align = 1 - global_origins = (meta_nl['transform'][2], # lon - meta_nl['transform'][5]) # lat - else: # align grid for resampling to grid of population data (pop) + global_origins = ( + meta_nl["transform"][2], # lon + meta_nl["transform"][5], + ) # lat + else: # align grid for resampling to grid of population data (pop) i_align = 0 - global_origins=(global_transform[2], - global_transform[5]) + global_origins = (global_transform[2], global_transform[5]) # reproject Lit and Pop input data to aligned grid with target resolution: try: - [pop, nlight], meta_out = reproject_input_data([pop, nlight], - [meta_pop, meta_nl], - i_align=i_align, # pop defines grid - target_res_arcsec=res_arcsec, - global_origins=global_origins, - ) + [pop, nlight], meta_out = reproject_input_data( + [pop, nlight], + [meta_pop, meta_nl], + i_align=i_align, # pop defines grid + target_res_arcsec=res_arcsec, + global_origins=global_origins, + ) except ValueError as err: - if ("height must be > 0" in str(err) or "width must be > 0" in str(err) # rasterio 1.2 - or "Invalid dataset dimensions :" in str(err)): # rasterio 1.3 + if ( + "height must be > 0" in str(err) + or "width must be > 0" in str(err) # rasterio 1.2 + or "Invalid dataset dimensions :" in str(err) + ): # rasterio 1.3 # no grid point within shape after reprojection, None is returned. if verbose: - LOGGER.info('No data point on destination grid within polygon.') - return None, {'crs': meta_pop['crs']} + LOGGER.info("No data point on destination grid within polygon.") + return None, {"crs": meta_pop["crs"]} raise err # calculate Lit^m * Pop^n (but not yet disaggregate any total value to grid): - litpop_array = gridpoints_core_calc([nlight, pop], - offsets=offsets, - exponents=exponents, - total_val_rescale=None) + litpop_array = gridpoints_core_calc( + [nlight, pop], offsets=offsets, exponents=exponents, total_val_rescale=None + ) # mask entries outside polygon (set to NaN) and set total population: - litpop_array = u_coord.mask_raster_with_geometry(litpop_array, meta_out['transform'], - [polygon], nodata=np.nan) - meta_out['total_population'] = total_population + litpop_array = u_coord.mask_raster_with_geometry( + litpop_array, meta_out["transform"], [polygon], nodata=np.nan + ) + meta_out["total_population"] = total_population # extract coordinates as meshgrid arrays: - lon, lat = u_coord.raster_to_meshgrid(meta_out['transform'], - meta_out['width'], - meta_out['height']) + lon, lat = u_coord.raster_to_meshgrid( + meta_out["transform"], meta_out["width"], meta_out["height"] + ) # init GeoDataFrame from data and coordinates: latitude = np.round(lat.flatten(), decimals=8) longitude = np.round(lon.flatten(), decimals=8) gdf = geopandas.GeoDataFrame( {"value": litpop_array.flatten(), "latitude": latitude, "longitude": longitude}, - crs=meta_out['crs'], + crs=meta_out["crs"], geometry=geopandas.points_from_xy(longitude, latitude), ) - if region_id is not None: # set region_id - gdf['region_id'] = region_id + if region_id is not None: # set region_id + gdf["region_id"] = region_id else: - gdf['region_id'] = u_coord.get_country_code( - gdf['latitude'], gdf['longitude'], gridded=True + gdf["region_id"] = u_coord.get_country_code( + gdf["latitude"], gdf["longitude"], gridded=True ) # remove entries outside polygon with `dropna` and return GeoDataFrame: return gdf.dropna(), meta_out + def get_value_unit(fin_mode): """get `value_unit` depending on `fin_mode` @@ -800,11 +967,12 @@ def get_value_unit(fin_mode): value_unit : str """ - if fin_mode in ['none', 'norm']: - return '' - if fin_mode == 'pop': - return 'people' - return 'USD' + if fin_mode in ["none", "norm"]: + return "" + if fin_mode == "pop": + return "people" + return "USD" + def _get_total_value_per_country(cntry_iso3a, fin_mode, reference_year): """ @@ -840,31 +1008,36 @@ def _get_total_value_per_country(cntry_iso3a, fin_mode, reference_year): ------- total_value : float """ - if fin_mode == 'none': + if fin_mode == "none": return None - if fin_mode == 'pop': - raise NotImplementedError("`_get_total_value_per_country` is not " - "implemented for `fin_mode` == 'pop'.") - if fin_mode == 'pc': - return(u_fin.world_bank_wealth_account(cntry_iso3a, reference_year, - no_land=True)[1]) + if fin_mode == "pop": + raise NotImplementedError( + "`_get_total_value_per_country` is not " + "implemented for `fin_mode` == 'pop'." + ) + if fin_mode == "pc": + return u_fin.world_bank_wealth_account( + cntry_iso3a, reference_year, no_land=True + )[1] # here, total_asset_val is Produced Capital "pc" # no_land=True returns value w/o the mark-up of 24% for land value - if fin_mode == 'pc_land': - return(u_fin.world_bank_wealth_account(cntry_iso3a, reference_year, - no_land=False)[1]) + if fin_mode == "pc_land": + return u_fin.world_bank_wealth_account( + cntry_iso3a, reference_year, no_land=False + )[1] # no_land=False returns pc value incl. the mark-up of 24% for land value - if fin_mode == 'norm': + if fin_mode == "norm": return 1 # GDP based total values: gdp_value = u_fin.gdp(cntry_iso3a, reference_year)[1] - if fin_mode == 'gdp': + if fin_mode == "gdp": return gdp_value - if fin_mode == 'income_group': # gdp * (income group + 1) - return gdp_value*(u_fin.income_group(cntry_iso3a, reference_year)[1]+1) - if fin_mode in ('nfw', 'tw'): - wealthtogdp_factor = u_fin.wealth2gdp(cntry_iso3a, fin_mode == 'nfw', - reference_year)[1] + if fin_mode == "income_group": # gdp * (income group + 1) + return gdp_value * (u_fin.income_group(cntry_iso3a, reference_year)[1] + 1) + if fin_mode in ("nfw", "tw"): + wealthtogdp_factor = u_fin.wealth2gdp( + cntry_iso3a, fin_mode == "nfw", reference_year + )[1] if np.isnan(wealthtogdp_factor): LOGGER.warning("Missing wealth-to-gdp factor for country %s.", cntry_iso3a) LOGGER.warning("Using GDP instead as total value.") @@ -872,12 +1045,16 @@ def _get_total_value_per_country(cntry_iso3a, fin_mode, reference_year): return gdp_value * wealthtogdp_factor raise ValueError(f"Unsupported fin_mode: {fin_mode}") -def reproject_input_data(data_array_list, meta_list, - i_align=0, - target_res_arcsec=None, - global_origins=(-180.0, 89.99999999999991), - resampling=rasterio.warp.Resampling.bilinear, - conserve=None): + +def reproject_input_data( + data_array_list, + meta_list, + i_align=0, + target_res_arcsec=None, + global_origins=(-180.0, 89.99999999999991), + resampling=rasterio.warp.Resampling.bilinear, + conserve=None, +): """ LitPop-sepcific wrapper around u_coord.align_raster_data. @@ -935,40 +1112,50 @@ def reproject_input_data(data_array_list, meta_list, # target resolution in degree lon,lat: if target_res_arcsec is None: - res_degree = meta_list[i_align]['transform'][0] # reference grid + res_degree = meta_list[i_align]["transform"][0] # reference grid else: res_degree = target_res_arcsec / 3600 - dst_crs = meta_list[i_align]['crs'] + dst_crs = meta_list[i_align]["crs"] # loop over data arrays, do transformation where required: data_out_list = [None] * len(data_array_list) - meta_out = {'dtype': meta_list[i_align]['dtype'], - 'nodata': meta_list[i_align]['nodata'], - 'crs': dst_crs} + meta_out = { + "dtype": meta_list[i_align]["dtype"], + "nodata": meta_list[i_align]["nodata"], + "crs": dst_crs, + } for idx, data in enumerate(data_array_list): # if target resolution corresponds to reference data resolution, # the reference data is not transformed: - if idx==i_align and ((target_res_arcsec is None) or \ - (np.round(meta_list[i_align]['transform'][0], - decimals=7)==np.round(res_degree, decimals=7))): + if idx == i_align and ( + (target_res_arcsec is None) + or ( + np.round(meta_list[i_align]["transform"][0], decimals=7) + == np.round(res_degree, decimals=7) + ) + ): data_out_list[idx] = data continue # reproject data grid: - dst_bounds = rasterio.transform.array_bounds(meta_list[i_align]['height'], - meta_list[i_align]['width'], - meta_list[i_align]['transform']) - data_out_list[idx], meta_out['transform'] = \ - u_coord.align_raster_data(data_array_list[idx], meta_list[idx]['crs'], - meta_list[idx]['transform'], - dst_crs=dst_crs, - dst_resolution=(res_degree, res_degree), - dst_bounds=dst_bounds, - global_origin=global_origins, - resampling=resampling, - conserve=conserve) - meta_out['height'] = data_out_list[-1].shape[0] - meta_out['width'] = data_out_list[-1].shape[1] + dst_bounds = rasterio.transform.array_bounds( + meta_list[i_align]["height"], + meta_list[i_align]["width"], + meta_list[i_align]["transform"], + ) + data_out_list[idx], meta_out["transform"] = u_coord.align_raster_data( + data_array_list[idx], + meta_list[idx]["crs"], + meta_list[idx]["transform"], + dst_crs=dst_crs, + dst_resolution=(res_degree, res_degree), + dst_bounds=dst_bounds, + global_origin=global_origins, + resampling=resampling, + conserve=conserve, + ) + meta_out["height"] = data_out_list[-1].shape[0] + meta_out["width"] = data_out_list[-1].shape[1] return data_out_list, meta_out @@ -1078,17 +1265,18 @@ def _check_excel_exists(file_path, file_name, xlsx_before_xls=True): """ try_ext = [] if xlsx_before_xls: - try_ext.append('.xlsx') - try_ext.append('.xls') + try_ext.append(".xlsx") + try_ext.append(".xls") else: - try_ext.append('.xls') - try_ext.append('.xlsx') + try_ext.append(".xls") + try_ext.append(".xlsx") path_name = Path(file_path, file_name).stem for i in try_ext: if Path(file_path, path_name + i).is_file(): return str(Path(file_path, path_name + i)) return None + def _grp_read(country_iso3, admin1_info=None, data_dir=SYSTEM_DIR): """Retrieves the Gross Regional Product (GRP) aka Gross State Domestic Product (GSDP) data for a certain country. It requires an excel file in a subfolder @@ -1116,44 +1304,61 @@ def _grp_read(country_iso3, admin1_info=None, data_dir=SYSTEM_DIR): if admin1_info is None: admin1_info, _ = u_coord.get_admin1_info(country_iso3) admin1_info = admin1_info[country_iso3] - file_name = _check_excel_exists(data_dir.joinpath('GSDP'), str(country_iso3 + '_GSDP')) + file_name = _check_excel_exists( + data_dir.joinpath("GSDP"), str(country_iso3 + "_GSDP") + ) if file_name is not None: # open spreadsheet and identify relevant columns: admin1_xls_data = pd.read_excel(file_name) - if admin1_xls_data.get('State_Province') is None: + if admin1_xls_data.get("State_Province") is None: admin1_xls_data = admin1_xls_data.rename( - columns={admin1_xls_data.columns[0]: 'State_Province'}) - if admin1_xls_data.get('GSDP_ref') is None: + columns={admin1_xls_data.columns[0]: "State_Province"} + ) + if admin1_xls_data.get("GSDP_ref") is None: admin1_xls_data = admin1_xls_data.rename( - columns={admin1_xls_data.columns[-1]: 'GSDP_ref'}) + columns={admin1_xls_data.columns[-1]: "GSDP_ref"} + ) # initiate dictionary with admin 1 names as keys: - out_dict = dict.fromkeys([record['name'] for record in admin1_info]) - postals = [record['postal'] for record in admin1_info] + out_dict = dict.fromkeys([record["name"] for record in admin1_info]) + postals = [record["postal"] for record in admin1_info] # first nested loop. outer loop over region names in admin1_info: for record_name in out_dict: # inner loop over region names in spreadsheet, find matches - for idx, xls_name in enumerate(admin1_xls_data['State_Province'].tolist()): - subnat_shape_str = [c for c in record_name if c.isalpha() or c.isnumeric()] + for idx, xls_name in enumerate(admin1_xls_data["State_Province"].tolist()): + subnat_shape_str = [ + c for c in record_name if c.isalpha() or c.isnumeric() + ] subnat_xls_str = [c for c in xls_name if c.isalpha()] if subnat_shape_str == subnat_xls_str: - out_dict[record_name] = admin1_xls_data['GSDP_ref'][idx] + out_dict[record_name] = admin1_xls_data["GSDP_ref"][idx] break # second nested loop to detect matched empty entries for idx1, country_name in enumerate(out_dict.keys()): if out_dict[country_name] is None: - for idx2, xls_name in enumerate(admin1_xls_data['State_Province'].tolist()): + for idx2, xls_name in enumerate( + admin1_xls_data["State_Province"].tolist() + ): subnat_xls_str = [c for c in xls_name if c.isalpha()] postals_str = [c for c in postals[idx1] if c.isalpha()] if subnat_xls_str == postals_str: - out_dict[country_name] = admin1_xls_data['GSDP_ref'][idx2] + out_dict[country_name] = admin1_xls_data["GSDP_ref"][idx2] return out_dict - LOGGER.warning('No file for %s could be found in %s.', country_iso3, data_dir) - LOGGER.warning('No admin1 data is calculated in this case.') + LOGGER.warning("No file for %s could be found in %s.", country_iso3, data_dir) + LOGGER.warning("No admin1 data is calculated in this case.") return None -def _calc_admin1_one_country(country, res_arcsec, exponents, fin_mode, total_value, - reference_year, gpw_version, data_dir): + +def _calc_admin1_one_country( + country, + res_arcsec, + exponents, + fin_mode, + total_value, + reference_year, + gpw_version, + data_dir, +): """ Calculates the LitPop on admin1 level for provinces/states where such information are available (i.e. GDP is distributed on a subnational instead of a national level). Requires @@ -1181,14 +1386,15 @@ def _calc_admin1_one_country(country, res_arcsec, exponents, fin_mode, total_val Exposure instance """ - if fin_mode == 'pop': - raise NotImplementedError('`_calc_admin1_one_country` not implemented for '+ - "`fin_mode` == 'pop'.") + if fin_mode == "pop": + raise NotImplementedError( + "`_calc_admin1_one_country` not implemented for " + "`fin_mode` == 'pop'." + ) # Determine ISO 3166 representation of country and get geometry: try: iso3a = u_coord.country_to_iso(country, representation="alpha3") except LookupError: - LOGGER.error('Country not identified: %s. Skippig.', country) + LOGGER.error("Country not identified: %s. Skippig.", country) return None # get records and shapes on admin 1 level: admin1_info, admin1_shapes = u_coord.get_admin1_info(iso3a) @@ -1197,31 +1403,39 @@ def _calc_admin1_one_country(country, res_arcsec, exponents, fin_mode, total_val # get subnational Gross Regional Product (GRP) data for country: grp_values = _grp_read(iso3a, admin1_info=admin1_info, data_dir=data_dir) if grp_values is None: - LOGGER.error("No subnational GRP data found for calc_admin1" - " for country %s. Skipping.", country) + LOGGER.error( + "No subnational GRP data found for calc_admin1" + " for country %s. Skipping.", + country, + ) return None # normalize GRP values: - sum_vals = sum(filter(None, grp_values.values())) # get total - grp_values = {key: (value / sum_vals if value is not None else None) - for (key, value) in grp_values.items()} + sum_vals = sum(filter(None, grp_values.values())) # get total + grp_values = { + key: (value / sum_vals if value is not None else None) + for (key, value) in grp_values.items() + } # get total value of country: total_value = _get_total_value_per_country(iso3a, fin_mode, reference_year) exp_list = [] for idx, record in enumerate(admin1_info): - if grp_values[record['name']] is None: + if grp_values[record["name"]] is None: continue - LOGGER.info(record['name']) + LOGGER.info(record["name"]) # init exposure for province and add to list # total value is defined from country multiplied by grp_share: - exp_list.append(LitPop.from_shape(admin1_shapes[idx], - total_value * grp_values[record['name']], - res_arcsec=res_arcsec, - exponents=exponents, - reference_year=reference_year, - gpw_version=gpw_version, - data_dir=data_dir) - ) - exp_list[-1].gdf['admin1'] = record['name'] + exp_list.append( + LitPop.from_shape( + admin1_shapes[idx], + total_value * grp_values[record["name"]], + res_arcsec=res_arcsec, + exponents=exponents, + reference_year=reference_year, + gpw_version=gpw_version, + data_dir=data_dir, + ) + ) + exp_list[-1].gdf["admin1"] = record["name"] return Exposures.concat(exp_list) diff --git a/climada/entity/exposures/litpop/nightlight.py b/climada/entity/exposures/litpop/nightlight.py index f25b4fca6..d875b26a9 100644 --- a/climada/entity/exposures/litpop/nightlight.py +++ b/climada/entity/exposures/litpop/nightlight.py @@ -18,27 +18,28 @@ Define nightlight reader and cutting functions. """ + import glob -import shutil -import tarfile import gzip -import pickle import logging +import pickle +import shutil +import tarfile from pathlib import Path -import rasterio +import matplotlib.pyplot as plt import numpy as np +import rasterio import scipy.sparse as sparse -import matplotlib.pyplot as plt from osgeo import gdal from PIL import Image from shapefile import Shape +from climada import CONFIG from climada.util import ureg from climada.util.constants import SYSTEM_DIR from climada.util.files_handler import download_file from climada.util.save import save -from climada import CONFIG Image.MAX_IMAGE_PIXELS = 1e9 @@ -56,18 +57,20 @@ NOAA_BORDER = (-180, -65, 180, 75) """NOAA nightlights border (min_lon, min_lat, max_lon, max_lat)""" -BM_FILENAMES = ['BlackMarble_%i_A1_geo_gray.tif', - 'BlackMarble_%i_A2_geo_gray.tif', - 'BlackMarble_%i_B1_geo_gray.tif', - 'BlackMarble_%i_B2_geo_gray.tif', - 'BlackMarble_%i_C1_geo_gray.tif', - 'BlackMarble_%i_C2_geo_gray.tif', - 'BlackMarble_%i_D1_geo_gray.tif', - 'BlackMarble_%i_D2_geo_gray.tif' - ] +BM_FILENAMES = [ + "BlackMarble_%i_A1_geo_gray.tif", + "BlackMarble_%i_A2_geo_gray.tif", + "BlackMarble_%i_B1_geo_gray.tif", + "BlackMarble_%i_B2_geo_gray.tif", + "BlackMarble_%i_C1_geo_gray.tif", + "BlackMarble_%i_C2_geo_gray.tif", + "BlackMarble_%i_D1_geo_gray.tif", + "BlackMarble_%i_D2_geo_gray.tif", +] """Nightlight NASA files which generate the whole earth when put together.""" -def load_nasa_nl_shape(geometry, year, data_dir=SYSTEM_DIR, dtype='float32'): + +def load_nasa_nl_shape(geometry, year, data_dir=SYSTEM_DIR, dtype="float32"): """Read nightlight data from NASA BlackMarble tiles cropped to given shape(s) and combine arrays from each tile. @@ -107,63 +110,74 @@ def load_nasa_nl_shape(geometry, year, data_dir=SYSTEM_DIR, dtype='float32'): bounds = geometry.bounds # get years available in BlackMarble data from CONFIG and convert to array: - years_available = [year.int() for year in \ - CONFIG.exposures.litpop.nightlights.blackmarble_years.list() - ] + years_available = [ + year.int() + for year in CONFIG.exposures.litpop.nightlights.blackmarble_years.list() + ] # get year closest to year with BlackMarble data available: year = min(years_available, key=lambda x: abs(x - year)) # determin black marble tiles with coordinates containing the bounds: req_files = get_required_nl_files(bounds) # check wether required files exist locally: - files_exist = check_nl_local_file_exists(required_files=req_files, - check_path=data_dir, year=year) + files_exist = check_nl_local_file_exists( + required_files=req_files, check_path=data_dir, year=year + ) # download data that is missing: download_nl_files(req_files, files_exist, data_dir, year) # convert `req_files` to sorted list of indices: - req_files = np.where(req_files ==1)[0] + req_files = np.where(req_files == 1)[0] # init empty lists for tiles depending on position in global grid: - results_array_north = list() # tiles A1, B1, C1, D1 (Nothern Hemisphere) - results_array_south = list() # tiles A2, B2, C2, D2 (Southern Hemisphere) + results_array_north = list() # tiles A1, B1, C1, D1 (Nothern Hemisphere) + results_array_south = list() # tiles A2, B2, C2, D2 (Southern Hemisphere) # loop through required files, load and crop data for each: for idx, i_file in enumerate(req_files): # read cropped data from source file (src) to np.ndarray: - out_image, meta_tmp = load_nasa_nl_shape_single_tile(geometry, - data_dir / (BM_FILENAMES[i_file] %(year))) + out_image, meta_tmp = load_nasa_nl_shape_single_tile( + geometry, data_dir / (BM_FILENAMES[i_file] % (year)) + ) # sort indicies to northenr and southern hemisphere: - if i_file in [0,2,4,6]: # indicies of northern hemisphere files + if i_file in [0, 2, 4, 6]: # indicies of northern hemisphere files results_array_north.append(out_image) - elif i_file in [1,3,5,7]: # indicies of southern hemisphere files + elif i_file in [1, 3, 5, 7]: # indicies of southern hemisphere files results_array_south.append(out_image) # from first (top left) of tiles, meta is initiated, incl. origin: if idx == 0: meta = meta_tmp # set correct CRS from local tile's CRS to global WGS 84: - meta.update({"crs": rasterio.crs.CRS.from_epsg(4326), - "dtype": dtype}) - if len(req_files) == 1: # only one tile required: + meta.update({"crs": rasterio.crs.CRS.from_epsg(4326), "dtype": dtype}) + if len(req_files) == 1: # only one tile required: return np.array(out_image, dtype=dtype), meta # Else, combine data from multiple input files (BlackMarble tiles) - # concatenate arrays from west to east and from north to south: del out_image - if results_array_north: # northern hemisphere west to east + if results_array_north: # northern hemisphere west to east results_array_north = np.concatenate(results_array_north, axis=1) - if results_array_south: # southern hemisphere west to east + if results_array_south: # southern hemisphere west to east results_array_south = np.concatenate(results_array_south, axis=1) - if isinstance(results_array_north, np.ndarray) and isinstance(results_array_south, np.ndarray): + if isinstance(results_array_north, np.ndarray) and isinstance( + results_array_south, np.ndarray + ): # north to south if both hemispheres are involved - results_array_north = np.concatenate([results_array_north, results_array_south], axis=0) - elif isinstance(results_array_south, np.ndarray): # only southern hemisphere + results_array_north = np.concatenate( + [results_array_north, results_array_south], axis=0 + ) + elif isinstance(results_array_south, np.ndarray): # only southern hemisphere results_array_north = results_array_south del results_array_south # update number of elements per axis in meta dictionary: - meta.update({"height": results_array_north.shape[0], - "width": results_array_north.shape[1], - "dtype": dtype}) + meta.update( + { + "height": results_array_north.shape[0], + "width": results_array_north.shape[1], + "dtype": dtype, + } + ) return np.array(results_array_north, dtype=dtype), meta + def get_required_nl_files(bounds): """Determines which of the satellite pictures are necessary for a certain bounding box (e.g. country) @@ -186,16 +200,22 @@ def get_required_nl_files(bounds): """ # check if bounds are valid: if (np.size(bounds) != 4) or (bounds[0] > bounds[2]) or (bounds[1] > bounds[3]): - raise ValueError('Invalid bounds supplied. `bounds` must be tuple'+ - ' with (min_lon, min_lat, max_lon, max_lat).') + raise ValueError( + "Invalid bounds supplied. `bounds` must be tuple" + + " with (min_lon, min_lat, max_lon, max_lat)." + ) min_lon, min_lat, max_lon, max_lat = bounds # longitude first. The width of all tiles is 90 degrees tile_width = 90 - req_files = np.zeros(np.count_nonzero(BM_FILENAMES),) + req_files = np.zeros( + np.count_nonzero(BM_FILENAMES), + ) # determine the staring tile - first_tile_lon = min(np.floor((min_lon - (-180)) / tile_width), 3) # "normalise" to zero + first_tile_lon = min( + np.floor((min_lon - (-180)) / tile_width), 3 + ) # "normalise" to zero last_tile_lon = min(np.floor((max_lon - (-180)) / tile_width), 3) # Now latitude. The height of all tiles is the same as the height. @@ -213,8 +233,8 @@ def get_required_nl_files(bounds): continue return req_files -def check_nl_local_file_exists(required_files=None, check_path=SYSTEM_DIR, - year=2016): + +def check_nl_local_file_exists(required_files=None, check_path=SYSTEM_DIR, year=2016): """Checks if BM Satellite files are avaialbe and returns a vector denoting the missing files. @@ -237,38 +257,60 @@ def check_nl_local_file_exists(required_files=None, check_path=SYSTEM_DIR, Boolean array that denotes if the required files exist. """ if required_files is None: - required_files = np.ones(len(BM_FILENAMES),) + required_files = np.ones( + len(BM_FILENAMES), + ) if np.size(required_files) < np.count_nonzero(BM_FILENAMES): - required_files = np.ones(np.count_nonzero(BM_FILENAMES),) - LOGGER.warning('The parameter \'required_files\' was too short and ' - 'is ignored.') + required_files = np.ones( + np.count_nonzero(BM_FILENAMES), + ) + LOGGER.warning( + "The parameter 'required_files' was too short and " "is ignored." + ) if isinstance(check_path, str): check_path = Path(check_path) if not check_path.is_dir(): - raise ValueError(f'The given path does not exist: {check_path}') - files_exist = np.zeros(np.count_nonzero(BM_FILENAMES),) + raise ValueError(f"The given path does not exist: {check_path}") + files_exist = np.zeros( + np.count_nonzero(BM_FILENAMES), + ) for num_check, name_check in enumerate(BM_FILENAMES): if required_files[num_check] == 0: continue - curr_file = check_path.joinpath(name_check %(year)) + curr_file = check_path.joinpath(name_check % (year)) if curr_file.is_file(): files_exist[num_check] = 1 if sum(files_exist) == sum(required_files): - LOGGER.debug('Found all required satellite data (%s files) in folder %s', - int(sum(required_files)), check_path) + LOGGER.debug( + "Found all required satellite data (%s files) in folder %s", + int(sum(required_files)), + check_path, + ) elif sum(files_exist) == 0: - LOGGER.info('No satellite files found locally in %s', check_path) + LOGGER.info("No satellite files found locally in %s", check_path) else: - LOGGER.debug('Not all satellite files available. ' - 'Found %d out of %d required files in %s', - int(sum(files_exist)), int(sum(required_files)), check_path) + LOGGER.debug( + "Not all satellite files available. " + "Found %d out of %d required files in %s", + int(sum(files_exist)), + int(sum(required_files)), + check_path, + ) return files_exist -def download_nl_files(req_files=np.ones(len(BM_FILENAMES),), - files_exist=np.zeros(len(BM_FILENAMES),), - dwnl_path=SYSTEM_DIR, year=2016): + +def download_nl_files( + req_files=np.ones( + len(BM_FILENAMES), + ), + files_exist=np.zeros( + len(BM_FILENAMES), + ), + dwnl_path=SYSTEM_DIR, + year=2016, +): """Attempts to download nightlight files from NASA webpage. Parameters @@ -297,43 +339,52 @@ def download_nl_files(req_files=np.ones(len(BM_FILENAMES),), """ if (len(req_files) != len(files_exist)) or (len(req_files) != len(BM_FILENAMES)): - raise ValueError('The given arguments are invalid. req_files and ' - 'files_exist must both be as long as there are files to download' - ' (' + str(len(BM_FILENAMES)) + ').') + raise ValueError( + "The given arguments are invalid. req_files and " + "files_exist must both be as long as there are files to download" + " (" + str(len(BM_FILENAMES)) + ")." + ) if not Path(dwnl_path).is_dir(): - raise ValueError(f'The folder {dwnl_path} does not exist. Operation aborted.') + raise ValueError(f"The folder {dwnl_path} does not exist. Operation aborted.") if np.all(req_files == files_exist): - LOGGER.debug('All required files already exist. No downloads necessary.') + LOGGER.debug("All required files already exist. No downloads necessary.") return dwnl_path try: for num_files in range(0, np.count_nonzero(BM_FILENAMES)): if req_files[num_files] == 0 or files_exist[num_files] == 1: - continue # file already available or not required + continue # file already available or not required path_check = False # loop through different possible URLs defined in CONFIG: value_err = None for url in CONFIG.exposures.litpop.nightlights.nasa_sites.list(): - try: # control for ValueError due to wrong URL - curr_file = url.str() + BM_FILENAMES[num_files] %(year) - LOGGER.info('Attempting to download file from %s', curr_file) + try: # control for ValueError due to wrong URL + curr_file = url.str() + BM_FILENAMES[num_files] % (year) + LOGGER.info("Attempting to download file from %s", curr_file) path_check = download_file(curr_file, download_dir=dwnl_path) - break # leave loop if sucessful + break # leave loop if sucessful except ValueError as err: value_err = err - if path_check: # download succesful + if path_check: # download succesful continue if value_err: - raise ValueError("Download failed," - " check URLs inCONFIG.exposures.litpop.nightlights.nasa_sites!\n" - f" Last error message:\n {value_err.args[0]}") + raise ValueError( + "Download failed," + " check URLs inCONFIG.exposures.litpop.nightlights.nasa_sites!\n" + f" Last error message:\n {value_err.args[0]}" + ) else: - raise ValueError("Download failed, file not found and no nasa sites configured," - " check URLs in CONFIG.exposures.litpop.nightlights.nasa_sites!") + raise ValueError( + "Download failed, file not found and no nasa sites configured," + " check URLs in CONFIG.exposures.litpop.nightlights.nasa_sites!" + ) except Exception as exc: - raise RuntimeError('Download failed. Please check the network ' - 'connection and whether filenames are still valid.') from exc + raise RuntimeError( + "Download failed. Please check the network " + "connection and whether filenames are still valid." + ) from exc return dwnl_path + def load_nasa_nl_shape_single_tile(geometry, path, layer=0): """Read nightlight data from single NASA BlackMarble tile and crop to given shape. @@ -356,19 +407,26 @@ def load_nasa_nl_shape_single_tile(geometry, path, layer=0): rasterio meta """ # open tif source file with raterio: - with rasterio.open(path, 'r') as src: + with rasterio.open(path, "r") as src: # read cropped data from source file (src) to np.ndarray: out_image, transform = rasterio.mask.mask(src, [geometry], crop=True) - LOGGER.debug('Read cropped %s as np.ndarray.', path.name) + LOGGER.debug("Read cropped %s as np.ndarray.", path.name) if out_image.shape[0] < layer: - raise IndexError(f"{path.name} has only {out_image.shape[0]} layers," - f" layer {layer} can't be accessed.") + raise IndexError( + f"{path.name} has only {out_image.shape[0]} layers," + f" layer {layer} can't be accessed." + ) meta = src.meta - meta.update({"driver": "GTiff", - "height": out_image.shape[1], - "width": out_image.shape[2], - "transform": transform}) - return out_image[layer,:,:], meta + meta.update( + { + "driver": "GTiff", + "height": out_image.shape[1], + "width": out_image.shape[2], + "transform": transform, + } + ) + return out_image[layer, :, :], meta + def load_nightlight_nasa(bounds, req_files, year): """Get nightlight from NASA repository that contain input boundary. @@ -410,16 +468,16 @@ def load_nightlight_nasa(bounds, req_files, year): continue extent = np.int64(np.clip(extent, 0, tile_size[None] - 1)) # pylint: disable=unsubscriptable-object - im_nl, _ = read_bm_file(SYSTEM_DIR, fname %(year)) + im_nl, _ = read_bm_file(SYSTEM_DIR, fname % (year)) im_nl = np.flipud(im_nl) im_nl = sparse.csc.csc_matrix(im_nl) - im_nl = im_nl[extent[0, 0]:extent[1, 0] + 1, extent[0, 1]:extent[1, 1] + 1] + im_nl = im_nl[extent[0, 0] : extent[1, 0] + 1, extent[0, 1] : extent[1, 1] + 1] nightlight.append((tile_coord, im_nl)) tile_coords = np.array([n[0] for n in nightlight]) shape = tile_coords.max(axis=0) - tile_coords.min(axis=0) + 1 - nightlight = np.array([n[1] for n in nightlight]).reshape(shape, order='F') - nightlight = sparse.bmat(np.flipud(nightlight), format='csr') + nightlight = np.array([n[1] for n in nightlight]).reshape(shape, order="F") + nightlight = sparse.bmat(np.flipud(nightlight), format="csr") coord_nl = np.vstack([coord_min, coord_h]).T coord_nl[:, 0] += global_idx[0, :] * coord_h[:] @@ -447,13 +505,16 @@ def read_bm_file(bm_path, filename): Additional info from which coordinates can be calculated. """ path = Path(bm_path, filename) - LOGGER.debug('Importing%s.', path) + LOGGER.debug("Importing%s.", path) if not path.exists(): - raise FileNotFoundError('Invalid path: check that the path to BlackMarble file is correct.') + raise FileNotFoundError( + "Invalid path: check that the path to BlackMarble file is correct." + ) curr_file = gdal.Open(str(path)) arr1 = curr_file.GetRasterBand(1).ReadAsArray() return arr1, curr_file + def unzip_tif_to_py(file_gz): """Unzip image file, read it, flip the x axis, save values as pickle and remove tif. @@ -471,8 +532,8 @@ def unzip_tif_to_py(file_gz): """ LOGGER.info("Unzipping file %s.", file_gz) file_name = Path(Path(file_gz).stem) - with gzip.open(file_gz, 'rb') as f_in: - with file_name.open('wb') as f_out: + with gzip.open(file_gz, "rb") as f_in: + with file_name.open("wb") as f_out: shutil.copyfileobj(f_in, f_out) nightlight = sparse.csc_matrix(plt.imread(file_name)) # flip X axis @@ -484,6 +545,7 @@ def unzip_tif_to_py(file_gz): return file_name, nightlight + def untar_noaa_stable_nightlight(f_tar_ini): """Move input tar file to SYSTEM_DIR and extract stable light file. Returns absolute path of stable light file in format tif.gz. @@ -503,14 +565,22 @@ def untar_noaa_stable_nightlight(f_tar_ini): shutil.move(f_tar_ini, f_tar_dest) # extract stable_lights.avg_vis.tif with tarfile.open(f_tar_ini) as tar_file: - extract_name = [name for name in tar_file.getnames() - if name.endswith('stable_lights.avg_vis.tif.gz')] + extract_name = [ + name + for name in tar_file.getnames() + if name.endswith("stable_lights.avg_vis.tif.gz") + ] if len(extract_name) == 0: - raise ValueError('No stable light intensities for selected year and satellite ' - f'in file {f_tar_ini}') + raise ValueError( + "No stable light intensities for selected year and satellite " + f"in file {f_tar_ini}" + ) if len(extract_name) > 1: - LOGGER.warning('found more than one potential intensity file in %s %s', - f_tar_ini, extract_name) + LOGGER.warning( + "found more than one potential intensity file in %s %s", + f_tar_ini, + extract_name, + ) tar_file.extract(extract_name[0], SYSTEM_DIR) return SYSTEM_DIR.joinpath(extract_name[0]) @@ -536,15 +606,17 @@ def load_nightlight_noaa(ref_year=2013, sat_name=None): # NOAA's URL used to retrieve nightlight satellite images: noaa_url = CONFIG.exposures.litpop.nightlights.noaa_url.str() if sat_name is None: - fn_light = str(SYSTEM_DIR.joinpath('*' + - str(ref_year) + '*.stable_lights.avg_vis')) + fn_light = str( + SYSTEM_DIR.joinpath("*" + str(ref_year) + "*.stable_lights.avg_vis") + ) else: - fn_light = str(SYSTEM_DIR.joinpath(sat_name + - str(ref_year) + '*.stable_lights.avg_vis')) + fn_light = str( + SYSTEM_DIR.joinpath(sat_name + str(ref_year) + "*.stable_lights.avg_vis") + ) # check if file exists in SYSTEM_DIR, download if not if glob.glob(fn_light + ".p"): fn_light = sorted(glob.glob(fn_light + ".p"))[0] - with open(fn_light, 'rb') as f_nl: + with open(fn_light, "rb") as f_nl: nightlight = pickle.load(f_nl) elif glob.glob(fn_light + ".tif.gz"): fn_light = sorted(glob.glob(fn_light + ".tif.gz"))[0] @@ -554,22 +626,26 @@ def load_nightlight_noaa(ref_year=2013, sat_name=None): if sat_name is None: ini_pre, end_pre = 18, 9 for pre_i in np.arange(ini_pre, end_pre, -1): - url = noaa_url + 'F' + str(pre_i) + str(ref_year) + '.v4.tar' + url = noaa_url + "F" + str(pre_i) + str(ref_year) + ".v4.tar" try: file_down = download_file(url, download_dir=SYSTEM_DIR) break except ValueError: pass - if 'file_down' not in locals(): - raise ValueError(f'Nightlight for reference year {ref_year} not available. ' - 'Try a different year.') + if "file_down" not in locals(): + raise ValueError( + f"Nightlight for reference year {ref_year} not available. " + "Try a different year." + ) else: - url = noaa_url + sat_name + str(ref_year) + '.v4.tar' + url = noaa_url + sat_name + str(ref_year) + ".v4.tar" try: file_down = download_file(url, download_dir=SYSTEM_DIR) except ValueError as err: - raise ValueError(f'Nightlight intensities for year {ref_year} and satellite' - f' {sat_name} do not exist.') from err + raise ValueError( + f"Nightlight intensities for year {ref_year} and satellite" + f" {sat_name} do not exist." + ) from err fn_light = untar_noaa_stable_nightlight(file_down) fn_light, nightlight = unzip_tif_to_py(fn_light) diff --git a/climada/entity/exposures/test/test_base.py b/climada/entity/exposures/test/test_base.py index 867e39c29..6650719a5 100644 --- a/climada/entity/exposures/test/test_base.py +++ b/climada/entity/exposures/test/test_base.py @@ -18,40 +18,50 @@ Test Exposure base class. """ + import unittest + +import geopandas as gpd import numpy as np import pandas as pd -import geopandas as gpd -from sklearn.metrics import DistanceMetric import rasterio -from rasterio.windows import Window import scipy as sp +from rasterio.windows import Window +from sklearn.metrics import DistanceMetric +import climada.util.coordinates as u_coord from climada import CONFIG -from climada.entity.exposures.base import Exposures, INDICATOR_IMPF, \ - INDICATOR_CENTR, add_sea, DEF_REF_YEAR, DEF_VALUE_UNIT from climada.entity import LitPop -from climada.hazard.base import Hazard, Centroids -from climada.util.constants import ENT_TEMPLATE_XLS, ONE_LAT_KM, DEF_CRS, HAZ_DEMO_FL -import climada.util.coordinates as u_coord +from climada.entity.exposures.base import ( + DEF_REF_YEAR, + DEF_VALUE_UNIT, + INDICATOR_CENTR, + INDICATOR_IMPF, + Exposures, + add_sea, +) +from climada.hazard.base import Centroids, Hazard +from climada.util.constants import DEF_CRS, ENT_TEMPLATE_XLS, HAZ_DEMO_FL, ONE_LAT_KM DATA_DIR = CONFIG.exposures.test_data.dir() + def good_exposures(): """Followng values are defined for each exposure""" data = {} - data['latitude'] = np.array([1, 2, 3]) - data['longitude'] = np.array([2, 3, 4]) - data['value'] = np.array([1, 2, 3]) - data['deductible'] = np.array([1, 2, 3]) - data[INDICATOR_IMPF + 'NA'] = np.array([1, 2, 3]) - data['category_id'] = np.array([1, 2, 3]) - data['region_id'] = np.array([1, 2, 3]) - data[INDICATOR_CENTR + 'TC'] = np.array([1, 2, 3]) + data["latitude"] = np.array([1, 2, 3]) + data["longitude"] = np.array([2, 3, 4]) + data["value"] = np.array([1, 2, 3]) + data["deductible"] = np.array([1, 2, 3]) + data[INDICATOR_IMPF + "NA"] = np.array([1, 2, 3]) + data["category_id"] = np.array([1, 2, 3]) + data["region_id"] = np.array([1, 2, 3]) + data[INDICATOR_CENTR + "TC"] = np.array([1, 2, 3]) expo = Exposures(gpd.GeoDataFrame(data=data)) return expo + class TestFuncs(unittest.TestCase): """Check assign function""" @@ -59,16 +69,26 @@ def test_assign_pass(self): """Check that attribute `assigned` is correctly set.""" np_rand = np.random.RandomState(123456789) - haz = Hazard.from_raster([HAZ_DEMO_FL], haz_type='FL', window=Window(10, 20, 50, 60)) + haz = Hazard.from_raster( + [HAZ_DEMO_FL], haz_type="FL", window=Window(10, 20, 50, 60) + ) ncentroids = haz.centroids.size exp = Exposures(crs=haz.centroids.crs) # some are matching exactly, some are geographically close - exp.gdf['longitude'] = np.concatenate([ - haz.centroids.lon, haz.centroids.lon + 0.001 * (-0.5 + np_rand.rand(ncentroids))]) - exp.gdf['latitude'] = np.concatenate([ - haz.centroids.lat, haz.centroids.lat + 0.001 * (-0.5 + np_rand.rand(ncentroids))]) + exp.gdf["longitude"] = np.concatenate( + [ + haz.centroids.lon, + haz.centroids.lon + 0.001 * (-0.5 + np_rand.rand(ncentroids)), + ] + ) + exp.gdf["latitude"] = np.concatenate( + [ + haz.centroids.lat, + haz.centroids.lat + 0.001 * (-0.5 + np_rand.rand(ncentroids)), + ] + ) expected_result = np.concatenate([np.arange(ncentroids), np.arange(ncentroids)]) # make sure that it works for both float32 and float64 @@ -76,25 +96,30 @@ def test_assign_pass(self): haz.centroids.gdf["lat"] = haz.centroids.lat.astype(test_dtype) haz.centroids.gdf["lon"] = haz.centroids.lon.astype(test_dtype) exp.assign_centroids(haz) - self.assertEqual(exp.gdf.shape[0], len(exp.gdf[INDICATOR_CENTR + 'FL'])) - np.testing.assert_array_equal(exp.gdf[INDICATOR_CENTR + 'FL'].values, expected_result) + self.assertEqual(exp.gdf.shape[0], len(exp.gdf[INDICATOR_CENTR + "FL"])) + np.testing.assert_array_equal( + exp.gdf[INDICATOR_CENTR + "FL"].values, expected_result + ) exp.assign_centroids(Hazard(), overwrite=False) - self.assertEqual(exp.gdf.shape[0], len(exp.gdf[INDICATOR_CENTR + 'FL'])) - np.testing.assert_array_equal(exp.gdf[INDICATOR_CENTR + 'FL'].values, expected_result) + self.assertEqual(exp.gdf.shape[0], len(exp.gdf[INDICATOR_CENTR + "FL"])) + np.testing.assert_array_equal( + exp.gdf[INDICATOR_CENTR + "FL"].values, expected_result + ) def test__init__meta_type(self): - """ Check if meta of type list raises a ValueError in __init__""" + """Check if meta of type list raises a ValueError in __init__""" with self.assertRaises(ValueError) as cm: Exposures(meta=[]) - self.assertEqual("meta must be a dictionary", - str(cm.exception)) + self.assertEqual("meta must be a dictionary", str(cm.exception)) def test__init__geometry_type(self): """Check that initialization fails when `geometry` is given as a `str` argument""" with self.assertRaises(ValueError) as cm: - Exposures(geometry='myname') - self.assertEqual("Exposures is not able to handle customized 'geometry' column names.", - str(cm.exception)) + Exposures(geometry="myname") + self.assertEqual( + "Exposures is not able to handle customized 'geometry' column names.", + str(cm.exception), + ) def test__init__mda_in_kwargs(self): """Check if `_metadata` attributes are instantiated correctly for sub-classes of @@ -109,66 +134,134 @@ def test_read_raster_pass(self): exp = Exposures.from_raster(HAZ_DEMO_FL, window=Window(10, 20, 50, 60)) exp.check() self.assertTrue(u_coord.equal_crs(exp.crs, DEF_CRS)) - self.assertAlmostEqual(exp.gdf['latitude'].max(), - 10.248220966978932 - 0.009000000000000341 / 2) - self.assertAlmostEqual(exp.gdf['latitude'].min(), - 10.248220966978932 - 0.009000000000000341 - / 2 - 59 * 0.009000000000000341) - self.assertAlmostEqual(exp.gdf['longitude'].min(), - -69.2471495969998 + 0.009000000000000341 / 2) - self.assertAlmostEqual(exp.gdf['longitude'].max(), - -69.2471495969998 + 0.009000000000000341 - / 2 + 49 * 0.009000000000000341) + self.assertAlmostEqual( + exp.gdf["latitude"].max(), 10.248220966978932 - 0.009000000000000341 / 2 + ) + self.assertAlmostEqual( + exp.gdf["latitude"].min(), + 10.248220966978932 - 0.009000000000000341 / 2 - 59 * 0.009000000000000341, + ) + self.assertAlmostEqual( + exp.gdf["longitude"].min(), -69.2471495969998 + 0.009000000000000341 / 2 + ) + self.assertAlmostEqual( + exp.gdf["longitude"].max(), + -69.2471495969998 + 0.009000000000000341 / 2 + 49 * 0.009000000000000341, + ) self.assertEqual(len(exp.gdf), 60 * 50) - self.assertAlmostEqual(exp.gdf['value'].values.reshape((60, 50))[25, 12], 0.056825936) + self.assertAlmostEqual( + exp.gdf["value"].values.reshape((60, 50))[25, 12], 0.056825936 + ) def test_assign_raster_pass(self): """Test assign_centroids with raster hazard""" # explicit, easy-to-understand raster centroids for hazard meta = { - 'count': 1, 'crs': DEF_CRS, - 'width': 20, 'height': 10, - 'transform': rasterio.Affine(1.5, 0.0, -20, 0.0, -1.4, 8) + "count": 1, + "crs": DEF_CRS, + "width": 20, + "height": 10, + "transform": rasterio.Affine(1.5, 0.0, -20, 0.0, -1.4, 8), } - haz = Hazard('FL', centroids=Centroids.from_meta(meta)) + haz = Hazard("FL", centroids=Centroids.from_meta(meta)) # explicit points with known results (see `expected_result` for details) exp = Exposures(crs=DEF_CRS) - exp.gdf['longitude'] = np.array([ - -20.1, -20.0, -19.8, -19.0, -18.6, -18.4, - -19.0, -19.0, -19.0, -19.0, - -20.1, 0.0, 10.1, 10.1, 10.1, 0.0, -20.2, -20.3, - -6.4, 9.8, 0.0, - ]) - exp.gdf['latitude'] = np.array([ - 7.3, 7.3, 7.3, 7.3, 7.3, 7.3, - 8.1, 7.9, 6.7, 6.5, - 8.1, 8.2, 8.3, 0.0, -6.1, -6.2, -6.3, 0.0, - -1.9, -1.7, 0.0, - ]) + exp.gdf["longitude"] = np.array( + [ + -20.1, + -20.0, + -19.8, + -19.0, + -18.6, + -18.4, + -19.0, + -19.0, + -19.0, + -19.0, + -20.1, + 0.0, + 10.1, + 10.1, + 10.1, + 0.0, + -20.2, + -20.3, + -6.4, + 9.8, + 0.0, + ] + ) + exp.gdf["latitude"] = np.array( + [ + 7.3, + 7.3, + 7.3, + 7.3, + 7.3, + 7.3, + 8.1, + 7.9, + 6.7, + 6.5, + 8.1, + 8.2, + 8.3, + 0.0, + -6.1, + -6.2, + -6.3, + 0.0, + -1.9, + -1.7, + 0.0, + ] + ) exp.assign_centroids(haz) expected_result = [ # constant y-value, varying x-value - 0, 0, 0, 0, 0, 1, + 0, + 0, + 0, + 0, + 0, + 1, # constant x-value, varying y-value - 0, 0, 0, 20, + 0, + 0, + 0, + 20, # out of bounds: topleft, top, topright, right, bottomright, bottom, bottomleft, left - -1, -1, -1, -1, -1, -1, -1, -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, # some explicit points within the raster - 149, 139, 113, + 149, + 139, + 113, ] - np.testing.assert_array_equal(exp.gdf[INDICATOR_CENTR + 'FL'].values, expected_result) - + np.testing.assert_array_equal( + exp.gdf[INDICATOR_CENTR + "FL"].values, expected_result + ) def test_assign_raster_same_pass(self): """Test assign_centroids with raster hazard""" exp = Exposures.from_raster(HAZ_DEMO_FL, window=Window(10, 20, 50, 60)) exp.check() - haz = Hazard.from_raster([HAZ_DEMO_FL], haz_type='FL', window=Window(10, 20, 50, 60)) + haz = Hazard.from_raster( + [HAZ_DEMO_FL], haz_type="FL", window=Window(10, 20, 50, 60) + ) exp.assign_centroids(haz) - np.testing.assert_array_equal(exp.gdf[INDICATOR_CENTR + 'FL'].values, - np.arange(haz.centroids.size, dtype=int)) + np.testing.assert_array_equal( + exp.gdf[INDICATOR_CENTR + "FL"].values, + np.arange(haz.centroids.size, dtype=int), + ) # Test fails because exposures stores the crs in the meta attribute as rasterio object, # while the centroids stores the crs in the geodataframe, which is not a rasterio object. @@ -176,14 +269,20 @@ def test_assign_raster_same_pass(self): def test_assign_large_hazard_subset_pass(self): """Test assign_centroids with raster hazard""" exp = Exposures.from_raster(HAZ_DEMO_FL, window=Window(10, 20, 50, 60)) - exp.gdf['latitude'][[0, 1]] = exp.gdf['latitude'][[1, 0]] - exp.gdf['longitude'][[0, 1]] = exp.gdf['longitude'][[1, 0]] + exp.gdf["latitude"][[0, 1]] = exp.gdf["latitude"][[1, 0]] + exp.gdf["longitude"][[0, 1]] = exp.gdf["longitude"][[1, 0]] exp.check() - haz = Hazard.from_raster([HAZ_DEMO_FL], haz_type='FL') + haz = Hazard.from_raster([HAZ_DEMO_FL], haz_type="FL") exp.assign_centroids(haz) - assigned_centroids = haz.centroids.select(sel_cen=exp.gdf[INDICATOR_CENTR + 'FL'].values) - np.testing.assert_array_equal(np.unique(assigned_centroids.lat), np.unique(exp.gdf['latitude'])) - np.testing.assert_array_equal(np.unique(assigned_centroids.lon), np.unique(exp.gdf['longitude'])) + assigned_centroids = haz.centroids.select( + sel_cen=exp.gdf[INDICATOR_CENTR + "FL"].values + ) + np.testing.assert_array_equal( + np.unique(assigned_centroids.lat), np.unique(exp.gdf["latitude"]) + ) + np.testing.assert_array_equal( + np.unique(assigned_centroids.lon), np.unique(exp.gdf["longitude"]) + ) def test_affected_total_value(self): haz_type = "RF" @@ -206,15 +305,15 @@ def test_affected_total_value(self): tot_val = exp.affected_total_value( haz, threshold_affected=0, overwrite_assigned_centroids=False ) - self.assertEqual(tot_val, np.sum(exp.gdf['value'][[1, 2, 3, 5]])) + self.assertEqual(tot_val, np.sum(exp.gdf["value"][[1, 2, 3, 5]])) tot_val = exp.affected_total_value( haz, threshold_affected=3, overwrite_assigned_centroids=False ) - self.assertEqual(tot_val, np.sum(exp.gdf['value'][[3]])) + self.assertEqual(tot_val, np.sum(exp.gdf["value"][[3]])) tot_val = exp.affected_total_value( haz, threshold_affected=-2, overwrite_assigned_centroids=False ) - self.assertEqual(tot_val, np.sum(exp.gdf['value'][[0, 1, 2, 3, 5]])) + self.assertEqual(tot_val, np.sum(exp.gdf["value"][[0, 1, 2, 3, 5]])) tot_val = exp.affected_total_value( haz, threshold_affected=11, overwrite_assigned_centroids=False ) @@ -234,17 +333,18 @@ def test_affected_total_value(self): ) self.assertEqual(tot_val, 4) + class TestChecker(unittest.TestCase): """Test logs of check function""" def test_error_logs_fail(self): """Wrong exposures definition""" expo = good_exposures() - expo.gdf.drop(['longitude'], inplace=True, axis=1) + expo.gdf.drop(["longitude"], inplace=True, axis=1) with self.assertRaises(ValueError) as cm: expo.check() - self.assertIn('longitude missing', str(cm.exception)) + self.assertIn("longitude missing", str(cm.exception)) def test_error_logs_wrong_crs(self): """Ambiguous crs definition""" @@ -252,39 +352,49 @@ def test_error_logs_wrong_crs(self): expo.set_geometry_points() # sets crs to 4326 # all good - _expo = Exposures(expo.gdf, meta={'crs':4326}, crs=DEF_CRS) + _expo = Exposures(expo.gdf, meta={"crs": 4326}, crs=DEF_CRS) with self.assertRaises(ValueError) as cm: - _expo = Exposures(expo.gdf, meta={'crs':4230}, crs=4326) - self.assertIn("Inconsistent CRS definition, crs and meta arguments don't match", - str(cm.exception)) + _expo = Exposures(expo.gdf, meta={"crs": 4230}, crs=4326) + self.assertIn( + "Inconsistent CRS definition, crs and meta arguments don't match", + str(cm.exception), + ) with self.assertRaises(ValueError) as cm: - _expo = Exposures(expo.gdf, meta={'crs':4230}) - self.assertIn("Inconsistent CRS definition, data doesn't match meta or crs argument", - str(cm.exception)) + _expo = Exposures(expo.gdf, meta={"crs": 4230}) + self.assertIn( + "Inconsistent CRS definition, data doesn't match meta or crs argument", + str(cm.exception), + ) with self.assertRaises(ValueError) as cm: - _expo = Exposures(expo.gdf, crs='epsg:4230') - self.assertIn("Inconsistent CRS definition, data doesn't match meta or crs argument", - str(cm.exception)) + _expo = Exposures(expo.gdf, crs="epsg:4230") + self.assertIn( + "Inconsistent CRS definition, data doesn't match meta or crs argument", + str(cm.exception), + ) _expo = Exposures(expo.gdf) - _expo.meta['crs'] = 'epsg:4230' + _expo.meta["crs"] = "epsg:4230" with self.assertRaises(ValueError) as cm: _expo.check() - self.assertIn("Inconsistent CRS definition, gdf (EPSG:4326) attribute doesn't match " - "meta (epsg:4230) attribute.", str(cm.exception)) + self.assertIn( + "Inconsistent CRS definition, gdf (EPSG:4326) attribute doesn't match " + "meta (epsg:4230) attribute.", + str(cm.exception), + ) def test_error_geometry_fail(self): """Wrong exposures definition""" expo = good_exposures() expo.set_geometry_points() - expo.gdf['latitude'].values[0] = 5 + expo.gdf["latitude"].values[0] = 5 with self.assertRaises(ValueError): expo.check() + class TestIO(unittest.TestCase): """Check constructor Exposures through DataFrames readers""" @@ -294,7 +404,7 @@ def test_read_template_pass(self): exp_df = Exposures(df) # set metadata exp_df.ref_year = 2020 - exp_df.value_unit = 'XSD' + exp_df.value_unit = "XSD" exp_df.check() def test_io_hdf5_pass(self): @@ -304,13 +414,14 @@ def test_io_hdf5_pass(self): exp_df.check() # set metadata exp_df.ref_year = 2020 - exp_df.value_unit = 'XSD' + exp_df.value_unit = "XSD" - file_name = DATA_DIR.joinpath('test_hdf5_exp.h5') + file_name = DATA_DIR.joinpath("test_hdf5_exp.h5") # pd.errors.PerformanceWarning should be suppressed. Therefore, make sure that # PerformanceWarning would result in test failure here import warnings + with warnings.catch_warnings(): warnings.simplefilter("error", category=pd.errors.PerformanceWarning) exp_df.write_hdf5(file_name) @@ -323,36 +434,62 @@ def test_io_hdf5_pass(self): self.assertTrue(u_coord.equal_crs(exp_df.crs, exp_read.crs)) self.assertTrue(u_coord.equal_crs(exp_df.gdf.crs, exp_read.gdf.crs)) self.assertEqual(exp_df.description, exp_read.description) - np.testing.assert_array_equal(exp_df.gdf['latitude'].values, exp_read.gdf['latitude'].values) - np.testing.assert_array_equal(exp_df.gdf['longitude'].values, exp_read.gdf['longitude'].values) - np.testing.assert_array_equal(exp_df.gdf['value'].values, exp_read.gdf['value'].values) - np.testing.assert_array_equal(exp_df.gdf['deductible'].values, exp_read.gdf['deductible'].values) - np.testing.assert_array_equal(exp_df.gdf['cover'].values, exp_read.gdf['cover'].values) - np.testing.assert_array_equal(exp_df.gdf['region_id'].values, exp_read.gdf['region_id'].values) - np.testing.assert_array_equal(exp_df.gdf['category_id'].values, exp_read.gdf['category_id'].values) - np.testing.assert_array_equal(exp_df.gdf['impf_TC'].values, exp_read.gdf['impf_TC'].values) - np.testing.assert_array_equal(exp_df.gdf['centr_TC'].values, exp_read.gdf['centr_TC'].values) - np.testing.assert_array_equal(exp_df.gdf['impf_FL'].values, exp_read.gdf['impf_FL'].values) - np.testing.assert_array_equal(exp_df.gdf['centr_FL'].values, exp_read.gdf['centr_FL'].values) - - for point_df, point_read in zip(exp_df.gdf.geometry.values, exp_read.gdf.geometry.values): + np.testing.assert_array_equal( + exp_df.gdf["latitude"].values, exp_read.gdf["latitude"].values + ) + np.testing.assert_array_equal( + exp_df.gdf["longitude"].values, exp_read.gdf["longitude"].values + ) + np.testing.assert_array_equal( + exp_df.gdf["value"].values, exp_read.gdf["value"].values + ) + np.testing.assert_array_equal( + exp_df.gdf["deductible"].values, exp_read.gdf["deductible"].values + ) + np.testing.assert_array_equal( + exp_df.gdf["cover"].values, exp_read.gdf["cover"].values + ) + np.testing.assert_array_equal( + exp_df.gdf["region_id"].values, exp_read.gdf["region_id"].values + ) + np.testing.assert_array_equal( + exp_df.gdf["category_id"].values, exp_read.gdf["category_id"].values + ) + np.testing.assert_array_equal( + exp_df.gdf["impf_TC"].values, exp_read.gdf["impf_TC"].values + ) + np.testing.assert_array_equal( + exp_df.gdf["centr_TC"].values, exp_read.gdf["centr_TC"].values + ) + np.testing.assert_array_equal( + exp_df.gdf["impf_FL"].values, exp_read.gdf["impf_FL"].values + ) + np.testing.assert_array_equal( + exp_df.gdf["centr_FL"].values, exp_read.gdf["centr_FL"].values + ) + + for point_df, point_read in zip( + exp_df.gdf.geometry.values, exp_read.gdf.geometry.values + ): self.assertEqual(point_df.x, point_read.x) self.assertEqual(point_df.y, point_read.y) + class TestAddSea(unittest.TestCase): """Check constructor Exposures through DataFrames readers""" + def test_add_sea_pass(self): """Test add_sea function with fake data.""" exp = Exposures() - exp.gdf['value'] = np.arange(0, 1.0e6, 1.0e5) + exp.gdf["value"] = np.arange(0, 1.0e6, 1.0e5) min_lat, max_lat = 27.5, 30 min_lon, max_lon = -18, -12 - exp.gdf['latitude'] = np.linspace(min_lat, max_lat, 10) - exp.gdf['longitude'] = np.linspace(min_lon, max_lon, 10) - exp.gdf['region_id'] = np.ones(10) - exp.gdf['impf_TC'] = np.ones(10) + exp.gdf["latitude"] = np.linspace(min_lat, max_lat, 10) + exp.gdf["longitude"] = np.linspace(min_lon, max_lon, 10) + exp.gdf["region_id"] = np.ones(10) + exp.gdf["impf_TC"] = np.ones(10) exp.ref_year = 2015 - exp.value_unit = 'XSD' + exp.value_unit = "XSD" exp.check() sea_coast = 100 @@ -368,38 +505,52 @@ def test_add_sea_pass(self): max_lat = max_lat + sea_coast min_lon = min_lon - sea_coast max_lon = max_lon + sea_coast - self.assertEqual(np.min(exp_sea.gdf['latitude']), min_lat) - self.assertEqual(np.min(exp_sea.gdf['longitude']), min_lon) - np.testing.assert_array_equal(exp_sea.gdf.value.values[:10], np.arange(0, 1.0e6, 1.0e5)) + self.assertEqual(np.min(exp_sea.gdf["latitude"]), min_lat) + self.assertEqual(np.min(exp_sea.gdf["longitude"]), min_lon) + np.testing.assert_array_equal( + exp_sea.gdf.value.values[:10], np.arange(0, 1.0e6, 1.0e5) + ) self.assertEqual(exp_sea.ref_year, exp.ref_year) self.assertEqual(exp_sea.value_unit, exp.value_unit) - on_sea_lat = exp_sea.gdf['latitude'].values[11:] - on_sea_lon = exp_sea.gdf['longitude'].values[11:] + on_sea_lat = exp_sea.gdf["latitude"].values[11:] + on_sea_lon = exp_sea.gdf["longitude"].values[11:] res_on_sea = u_coord.coord_on_land(on_sea_lat, on_sea_lon) res_on_sea = ~res_on_sea self.assertTrue(np.all(res_on_sea)) - dist = DistanceMetric.get_metric('haversine') - self.assertAlmostEqual(dist.pairwise([ - [exp_sea.gdf['longitude'].values[-1], exp_sea.gdf['latitude'].values[-1]], - [exp_sea.gdf['longitude'].values[-2], exp_sea.gdf['latitude'].values[-2]], - ])[0][1], sea_res_km) + dist = DistanceMetric.get_metric("haversine") + self.assertAlmostEqual( + dist.pairwise( + [ + [ + exp_sea.gdf["longitude"].values[-1], + exp_sea.gdf["latitude"].values[-1], + ], + [ + exp_sea.gdf["longitude"].values[-2], + exp_sea.gdf["latitude"].values[-2], + ], + ] + )[0][1], + sea_res_km, + ) class TestConcat(unittest.TestCase): """Check constructor Exposures through DataFrames readers""" + def setUp(self): - exp = Exposures(crs='epsg:3395') - exp.gdf['value'] = np.arange(0, 1.0e6, 1.0e5) + exp = Exposures(crs="epsg:3395") + exp.gdf["value"] = np.arange(0, 1.0e6, 1.0e5) min_lat, max_lat = 27.5, 30 min_lon, max_lon = -18, -12 - exp.gdf['latitude'] = np.linspace(min_lat, max_lat, 10) - exp.gdf['longitude'] = np.linspace(min_lon, max_lon, 10) - exp.gdf['region_id'] = np.ones(10) - exp.gdf['impf_TC'] = np.ones(10) + exp.gdf["latitude"] = np.linspace(min_lat, max_lat, 10) + exp.gdf["longitude"] = np.linspace(min_lon, max_lon, 10) + exp.gdf["region_id"] = np.ones(10) + exp.gdf["impf_TC"] = np.ones(10) exp.ref_year = 2015 - exp.value_unit = 'XSD' + exp.value_unit = "XSD" self.dummy = exp def test_concat_pass(self): @@ -407,20 +558,30 @@ def test_concat_pass(self): self.dummy.check() - catexp = Exposures.concat([self.dummy, self.dummy.gdf, pd.DataFrame(self.dummy.gdf.values, columns=self.dummy.gdf.columns), self.dummy]) - self.assertEqual(self.dummy.gdf.shape, (10,5)) - self.assertEqual(catexp.gdf.shape, (40,5)) - self.assertTrue(u_coord.equal_crs(catexp.crs, 'epsg:3395')) + catexp = Exposures.concat( + [ + self.dummy, + self.dummy.gdf, + pd.DataFrame(self.dummy.gdf.values, columns=self.dummy.gdf.columns), + self.dummy, + ] + ) + self.assertEqual(self.dummy.gdf.shape, (10, 5)) + self.assertEqual(catexp.gdf.shape, (40, 5)) + self.assertTrue(u_coord.equal_crs(catexp.crs, "epsg:3395")) def test_concat_fail(self): """Test failing concat function with fake data.""" with self.assertRaises(TypeError): - Exposures.concat([self.dummy, self.dummy.gdf, self.dummy.gdf.values, self.dummy]) + Exposures.concat( + [self.dummy, self.dummy.gdf, self.dummy.gdf.values, self.dummy] + ) class TestGeoDFFuncs(unittest.TestCase): """Check constructor Exposures through DataFrames readers""" + def test_copy_pass(self): """Test copy function.""" exp = good_exposures() @@ -431,17 +592,21 @@ def test_copy_pass(self): self.assertEqual(exp_copy.ref_year, exp.ref_year) self.assertEqual(exp_copy.value_unit, exp.value_unit) self.assertEqual(exp_copy.description, exp.description) - np.testing.assert_array_equal(exp_copy.gdf['latitude'].values, exp.gdf['latitude'].values) - np.testing.assert_array_equal(exp_copy.gdf['longitude'].values, exp.gdf['longitude'].values) + np.testing.assert_array_equal( + exp_copy.gdf["latitude"].values, exp.gdf["latitude"].values + ) + np.testing.assert_array_equal( + exp_copy.gdf["longitude"].values, exp.gdf["longitude"].values + ) def test_to_crs_inplace_pass(self): """Test to_crs function inplace.""" exp = good_exposures() exp.set_geometry_points() exp.check() - exp.to_crs('epsg:3395', inplace=True) + exp.to_crs("epsg:3395", inplace=True) self.assertIsInstance(exp, Exposures) - self.assertTrue(u_coord.equal_crs(exp.crs, 'epsg:3395')) + self.assertTrue(u_coord.equal_crs(exp.crs, "epsg:3395")) self.assertEqual(exp.ref_year, DEF_REF_YEAR) self.assertEqual(exp.value_unit, DEF_VALUE_UNIT) self.assertEqual(exp.description, None) @@ -451,10 +616,10 @@ def test_to_crs_pass(self): exp = good_exposures() exp.set_geometry_points() exp.check() - exp_tr = exp.to_crs('epsg:3395') + exp_tr = exp.to_crs("epsg:3395") self.assertIsInstance(exp, Exposures) self.assertTrue(u_coord.equal_crs(exp.crs, DEF_CRS)) - self.assertTrue(u_coord.equal_crs(exp_tr.crs, 'epsg:3395')) + self.assertTrue(u_coord.equal_crs(exp_tr.crs, "epsg:3395")) self.assertEqual(exp_tr.ref_year, DEF_REF_YEAR) self.assertEqual(exp_tr.value_unit, DEF_VALUE_UNIT) self.assertEqual(exp_tr.description, None) @@ -462,17 +627,17 @@ def test_to_crs_pass(self): def test_constructor_pass(self): """Test initialization with input GeoDataFrame""" in_gpd = gpd.GeoDataFrame() - in_gpd['value'] = np.zeros(10) + in_gpd["value"] = np.zeros(10) in_gpd.ref_year = 2015 in_exp = Exposures(in_gpd, ref_year=2015) self.assertEqual(in_exp.ref_year, 2015) - np.testing.assert_array_equal(in_exp.gdf['value'], np.zeros(10)) + np.testing.assert_array_equal(in_exp.gdf["value"], np.zeros(10)) def test_error_on_access_item(self): """Test error output when trying to access items as in CLIMADA 1.x""" expo = good_exposures() with self.assertRaises(TypeError) as err: - expo['value'] = 3 + expo["value"] = 3 self.assertIn("CLIMADA 2", str(err.exception)) self.assertIn("gdf", str(err.exception)) @@ -481,7 +646,7 @@ def test_set_gdf(self): empty_gdf = gpd.GeoDataFrame() gdf_without_geometry = good_exposures().gdf good_exp = good_exposures() - good_exp.set_crs(crs='epsg:3395') + good_exp.set_crs(crs="epsg:3395") good_exp.set_geometry_points() gdf_with_geometry = good_exp.gdf @@ -495,8 +660,8 @@ def test_set_gdf(self): probe.set_gdf(gdf_with_geometry) self.assertTrue(probe.gdf.equals(gdf_with_geometry)) - self.assertTrue(u_coord.equal_crs('epsg:3395', probe.crs)) - self.assertTrue(u_coord.equal_crs('epsg:3395', probe.gdf.crs)) + self.assertTrue(u_coord.equal_crs("epsg:3395", probe.crs)) + self.assertTrue(u_coord.equal_crs("epsg:3395", probe.gdf.crs)) probe.set_gdf(gdf_without_geometry) self.assertTrue(probe.gdf.equals(good_exposures().gdf)) @@ -513,61 +678,63 @@ def test_set_crs(self): probe = Exposures(gdf_without_geometry) self.assertTrue(u_coord.equal_crs(DEF_CRS, probe.crs)) - probe.set_crs('epsg:3395') - self.assertTrue(u_coord.equal_crs('epsg:3395', probe.crs)) + probe.set_crs("epsg:3395") + self.assertTrue(u_coord.equal_crs("epsg:3395", probe.crs)) probe = Exposures(gdf_with_geometry) self.assertTrue(u_coord.equal_crs(DEF_CRS, probe.crs)) probe.set_crs(DEF_CRS) self.assertTrue(u_coord.equal_crs(DEF_CRS, probe.crs)) - self.assertRaises(ValueError, probe.set_crs, 'epsg:3395') - self.assertTrue(u_coord.equal_crs('EPSG:4326', probe.meta.get('crs'))) + self.assertRaises(ValueError, probe.set_crs, "epsg:3395") + self.assertTrue(u_coord.equal_crs("EPSG:4326", probe.meta.get("crs"))) def test_to_crs_epsg_crs(self): - """ Check that if crs and epsg are both provided a ValueError is raised""" + """Check that if crs and epsg are both provided a ValueError is raised""" with self.assertRaises(ValueError) as cm: - Exposures.to_crs(self, crs='GCS', epsg=26915) + Exposures.to_crs(self, crs="GCS", epsg=26915) self.assertEqual("one of crs or epsg must be None", str(cm.exception)) + class TestImpactFunctions(unittest.TestCase): """Test impact function handling""" + def test_get_impf_column(self): """Test the get_impf_column""" expo = good_exposures() # impf column is 'impf_NA' - self.assertEqual('impf_NA', expo.get_impf_column('NA')) + self.assertEqual("impf_NA", expo.get_impf_column("NA")) self.assertRaises(ValueError, expo.get_impf_column) - self.assertRaises(ValueError, expo.get_impf_column, 'HAZ') + self.assertRaises(ValueError, expo.get_impf_column, "HAZ") # removed impf column - expo.gdf.drop(columns='impf_NA', inplace=True) - self.assertRaises(ValueError, expo.get_impf_column, 'NA') + expo.gdf.drop(columns="impf_NA", inplace=True) + self.assertRaises(ValueError, expo.get_impf_column, "NA") self.assertRaises(ValueError, expo.get_impf_column) # default (anonymous) impf column expo.check() - self.assertEqual('impf_', expo.get_impf_column()) - self.assertEqual('impf_', expo.get_impf_column('HAZ')) + self.assertEqual("impf_", expo.get_impf_column()) + self.assertEqual("impf_", expo.get_impf_column("HAZ")) # rename impf column to old style column name - expo.gdf.rename(columns={'impf_': 'if_'}, inplace=True) + expo.gdf.rename(columns={"impf_": "if_"}, inplace=True) expo.check() - self.assertEqual('if_', expo.get_impf_column()) - self.assertEqual('if_', expo.get_impf_column('HAZ')) + self.assertEqual("if_", expo.get_impf_column()) + self.assertEqual("if_", expo.get_impf_column("HAZ")) # rename impf column to old style column name - expo.gdf.rename(columns={'if_': 'if_NA'}, inplace=True) + expo.gdf.rename(columns={"if_": "if_NA"}, inplace=True) expo.check() - self.assertEqual('if_NA', expo.get_impf_column('NA')) + self.assertEqual("if_NA", expo.get_impf_column("NA")) self.assertRaises(ValueError, expo.get_impf_column) - self.assertRaises(ValueError, expo.get_impf_column, 'HAZ') + self.assertRaises(ValueError, expo.get_impf_column, "HAZ") # add anonymous impf column - expo.gdf['impf_'] = expo.gdf['region_id'] - self.assertEqual('if_NA', expo.get_impf_column('NA')) - self.assertEqual('impf_', expo.get_impf_column()) - self.assertEqual('impf_', expo.get_impf_column('HAZ')) + expo.gdf["impf_"] = expo.gdf["region_id"] + self.assertEqual("if_NA", expo.get_impf_column("NA")) + self.assertEqual("impf_", expo.get_impf_column()) + self.assertEqual("impf_", expo.get_impf_column("HAZ")) # Execute Tests diff --git a/climada/entity/exposures/test/test_litpop.py b/climada/entity/exposures/test/test_litpop.py index d8ec001cd..72360bc8d 100644 --- a/climada/entity/exposures/test/test_litpop.py +++ b/climada/entity/exposures/test/test_litpop.py @@ -19,10 +19,12 @@ Unit Tests for LitPop class. """ -import numpy as np import unittest -from rasterio.crs import CRS + +import numpy as np from rasterio import Affine +from rasterio.crs import CRS + from climada.entity.exposures.litpop import litpop as lp @@ -30,73 +32,86 @@ def data_arrays_demo(number_of_arrays=2): """init demo data arrays (2d) for LitPop core calculations""" data_arrays = list() if number_of_arrays > 0: - data_arrays.append(np.array([[0,1,2], [3,4,5]])) + data_arrays.append(np.array([[0, 1, 2], [3, 4, 5]])) # array([[0, 1, 2], # [3, 4, 5]]) if number_of_arrays > 1: - data_arrays.append(np.array([[10,10,10], [1,1,1]])) + data_arrays.append(np.array([[10, 10, 10], [1, 1, 1]])) # array([[10, 10, 10], # [1, 1, 1]]) if number_of_arrays > 2: - data_arrays.append(np.array([[0,1,10], [0,1,10]])) + data_arrays.append(np.array([[0, 1, 10], [0, 1, 10]])) # array([[0, 1, 10], # [0, 1, 10]]) if number_of_arrays > 3: - data_arrays.append([[0,1,10,100], [0,1,10,100]]) + data_arrays.append([[0, 1, 10, 100], [0, 1, 10, 100]]) # [[0, 1, 10, 100], # [0, 1, 10, 100]] return data_arrays + def data_arrays_resampling_demo(): """init demo data arrays (2d) and meta data for resampling""" data_arrays = list() # demo pop: - data_arrays.append(np.array([[0,1,2], [3,4,5]], dtype='float32')) - data_arrays.append(np.array([[0,1,2], [3,4,5]], dtype='float32')) - # array([[0, 1, 2], - # [3, 4, 5]]) + data_arrays.append(np.array([[0, 1, 2], [3, 4, 5]], dtype="float32")) + data_arrays.append(np.array([[0, 1, 2], [3, 4, 5]], dtype="float32")) + # array([[0, 1, 2], + # [3, 4, 5]]) # demo nightlight: - data_arrays.append(np.array([[2,10,0, 0, 0, 0], [10,2,10, 0, 0, 0], - [0,0,0, 0, 1, 1], [1,0,0, 0, 1, 1]], - dtype='float32')) - # array([[ 2., 10., 0., 0., 0., 0.], - # [10., 2., 10., 0., 0., 0.], - # [ 0., 0., 0., 0., 1., 1.], - # [ 1., 0., 0., 0., 1., 1.]], dtype=float32)] - - meta_list = [{'driver': 'GTiff', - 'dtype': 'float32', - 'nodata': -3.4028230607370965e+38, - 'width': 3, - 'height': 2, - 'count': 1, - 'crs': CRS.from_epsg(4326), - #'crs': CRS.from_epsg(4326), - 'transform': Affine(1, 0.0, -10, - 0.0, -1, 40), - }, - {'driver': 'GTiff', - 'dtype': 'float32', - 'nodata': -3.4028230607370965e+38, - 'width': 3, - 'height': 2, - 'count': 1, - 'crs': CRS.from_epsg(4326), - #'crs': CRS.from_epsg(4326), - 'transform': Affine(1, 0.0, -10, - 0.0, -1, 41), # shifted by 1 degree latitude to the north - }, - {'driver': 'GTiff', - 'dtype': 'float32', - 'nodata': None, - 'width': 6, - 'height': 4, - 'count': 1, - 'crs': CRS.from_epsg(4326), - # 'crs': CRS.from_epsg(32662), - 'transform': Affine(.5, 0.0, -10, - 0.0, -.5, 40), # higher resolution - }] + data_arrays.append( + np.array( + [ + [2, 10, 0, 0, 0, 0], + [10, 2, 10, 0, 0, 0], + [0, 0, 0, 0, 1, 1], + [1, 0, 0, 0, 1, 1], + ], + dtype="float32", + ) + ) + # array([[ 2., 10., 0., 0., 0., 0.], + # [10., 2., 10., 0., 0., 0.], + # [ 0., 0., 0., 0., 1., 1.], + # [ 1., 0., 0., 0., 1., 1.]], dtype=float32)] + + meta_list = [ + { + "driver": "GTiff", + "dtype": "float32", + "nodata": -3.4028230607370965e38, + "width": 3, + "height": 2, + "count": 1, + "crs": CRS.from_epsg(4326), + #'crs': CRS.from_epsg(4326), + "transform": Affine(1, 0.0, -10, 0.0, -1, 40), + }, + { + "driver": "GTiff", + "dtype": "float32", + "nodata": -3.4028230607370965e38, + "width": 3, + "height": 2, + "count": 1, + "crs": CRS.from_epsg(4326), + #'crs': CRS.from_epsg(4326), + "transform": Affine( + 1, 0.0, -10, 0.0, -1, 41 + ), # shifted by 1 degree latitude to the north + }, + { + "driver": "GTiff", + "dtype": "float32", + "nodata": None, + "width": 6, + "height": 4, + "count": 1, + "crs": CRS.from_epsg(4326), + # 'crs': CRS.from_epsg(32662), + "transform": Affine(0.5, 0.0, -10, 0.0, -0.5, 40), # higher resolution + }, + ] return data_arrays, meta_list @@ -108,29 +123,36 @@ def test_reproject_input_data_downsample(self): (default resampling for LitPop)""" data_in, meta_list = data_arrays_resampling_demo() # - data_out, meta_out = lp.reproject_input_data(data_in, meta_list, - i_align=0, - target_res_arcsec=None, - global_origins=(-180, 90) - ) + data_out, meta_out = lp.reproject_input_data( + data_in, + meta_list, + i_align=0, + target_res_arcsec=None, + global_origins=(-180, 90), + ) # test reference data unchanged: np.testing.assert_array_equal(data_in[0], data_out[0]) # test northward shift: - np.testing.assert_array_equal(data_in[1][1,:], data_out[1][0,:]) + np.testing.assert_array_equal(data_in[1][1, :], data_out[1][0, :]) # test reprojected nl data: - reference_array = np.array([[5.020408 , 2.267857 , 0.12244898], - [1.1224489 , 0.6785714 , 0.7346939 ]], dtype='float32') + reference_array = np.array( + [[5.020408, 2.267857, 0.12244898], [1.1224489, 0.6785714, 0.7346939]], + dtype="float32", + ) np.testing.assert_array_almost_equal_nulp(reference_array, data_out[2]) def test_reproject_input_data_downsample_conserve_sum(self): """test function reproject_input_data downsampling with conservation of sum""" data_in, meta_list = data_arrays_resampling_demo() # - data_out, meta_out = lp.reproject_input_data(data_in, meta_list, - i_align=0, - target_res_arcsec=None, - global_origins=(-180, 90), - conserve='sum') + data_out, meta_out = lp.reproject_input_data( + data_in, + meta_list, + i_align=0, + target_res_arcsec=None, + global_origins=(-180, 90), + conserve="sum", + ) # test reference data unchanged: np.testing.assert_array_equal(data_in[0], data_out[0]) # test conserve sum: @@ -141,11 +163,14 @@ def test_reproject_input_data_downsample_conserve_mean(self): """test function reproject_input_data downsampling with conservation of sum""" data_in, meta_list = data_arrays_resampling_demo() # - data_out, meta_out = lp.reproject_input_data(data_in, meta_list, - i_align=1, - target_res_arcsec=None, - global_origins=(-180, 90), - conserve='mean') + data_out, meta_out = lp.reproject_input_data( + data_in, + meta_list, + i_align=1, + target_res_arcsec=None, + global_origins=(-180, 90), + conserve="mean", + ) # test reference data unchanged: np.testing.assert_array_equal(data_in[1], data_out[1]) # test conserve sum: @@ -157,36 +182,45 @@ def test_reproject_input_data_upsample(self): (usually not required for LitPop)""" data_in, meta_list = data_arrays_resampling_demo() # - data_out, meta_out = lp.reproject_input_data(data_in, meta_list, - i_align=2, # high res data as reference - target_res_arcsec=None, - global_origins=(-180, 90) - ) + data_out, meta_out = lp.reproject_input_data( + data_in, + meta_list, + i_align=2, # high res data as reference + target_res_arcsec=None, + global_origins=(-180, 90), + ) # test reference data unchanged: np.testing.assert_array_equal(data_in[2], data_out[2]) # test northward shift: - np.testing.assert_array_equal(data_out[0][2,:], data_out[1][0,:]) - np.testing.assert_array_equal(data_out[0][3,:], data_out[1][1,:]) + np.testing.assert_array_equal(data_out[0][2, :], data_out[1][0, :]) + np.testing.assert_array_equal(data_out[0][3, :], data_out[1][1, :]) # test reprojected nl data: - reference_array = np.array([[0. , 0.25, 0.75, 1.25, 1.75, 2. ], - [0.75, 1. , 1.5 , 2. , 2.5 , 2.75], - [2.25, 2.5 , 3. , 3.5 , 4. , 4.25], - [3. , 3.25, 3.75, 4.25, 4.75, 5. ]], dtype='float32') + reference_array = np.array( + [ + [0.0, 0.25, 0.75, 1.25, 1.75, 2.0], + [0.75, 1.0, 1.5, 2.0, 2.5, 2.75], + [2.25, 2.5, 3.0, 3.5, 4.0, 4.25], + [3.0, 3.25, 3.75, 4.25, 4.75, 5.0], + ], + dtype="float32", + ) np.testing.assert_array_equal(reference_array, data_out[0]) def test_reproject_input_data_odd_downsample(self): """test function reproject_input_data with odd downsampling""" data_in, meta_list = data_arrays_resampling_demo() # - data_out, meta_out = \ - lp.reproject_input_data(data_in, meta_list, - i_align=0, # high res data as reference - target_res_arcsec=6120, # 1.7 degree - global_origins=(-180, 90), - ) - self.assertEqual(1.7, meta_out['transform'][0]) # check resolution - reference_array = np.array([[0.425 , 1.7631578], - [3.425 , 4.763158 ]], dtype='float32') + data_out, meta_out = lp.reproject_input_data( + data_in, + meta_list, + i_align=0, # high res data as reference + target_res_arcsec=6120, # 1.7 degree + global_origins=(-180, 90), + ) + self.assertEqual(1.7, meta_out["transform"][0]) # check resolution + reference_array = np.array( + [[0.425, 1.7631578], [3.425, 4.763158]], dtype="float32" + ) np.testing.assert_array_equal(reference_array, data_out[0]) def test_gridpoints_core_calc_input_errors(self): @@ -195,10 +229,10 @@ def test_gridpoints_core_calc_input_errors(self): data = data_arrays_demo(2) # negative offset: with self.assertRaises(ValueError): - lp.gridpoints_core_calc(data, offsets=[2,-1]) + lp.gridpoints_core_calc(data, offsets=[2, -1]) # negative exponents: with self.assertRaises(ValueError): - lp.gridpoints_core_calc(data, exponents=[2,-1]) + lp.gridpoints_core_calc(data, exponents=[2, -1]) # different shapes: with self.assertRaises(ValueError): @@ -206,33 +240,32 @@ def test_gridpoints_core_calc_input_errors(self): # wrong format: with self.assertRaises(ValueError): - lp.gridpoints_core_calc(data, exponents=['a', 'b']) - data.append('hello i am a string') + lp.gridpoints_core_calc(data, exponents=["a", "b"]) + data.append("hello i am a string") with self.assertRaises(ValueError): lp.gridpoints_core_calc(data) def test_gridpoints_core_calc_default_1(self): """test function gridpoints_core_calc, i.e. core data combination on grid point level with default exponents and offsets - 1 array""" - data_arrays = data_arrays_demo(1) # get list with 1 demo array + data_arrays = data_arrays_demo(1) # get list with 1 demo array result_array = lp.gridpoints_core_calc(data_arrays) results_check = data_arrays[0] self.assertEqual(result_array.shape, results_check.shape) - self.assertEqual(result_array[1,1], results_check[1,1]) + self.assertEqual(result_array[1, 1], results_check[1, 1]) np.testing.assert_array_almost_equal_nulp(result_array, results_check) def test_gridpoints_core_calc_default_2(self): """test function gridpoints_core_calc, i.e. core data combination on grid point level with default exponents and offsets- 2 arrays""" - data_arrays = data_arrays_demo(2) # get list with 2 demo arrays + data_arrays = data_arrays_demo(2) # get list with 2 demo arrays result_array = lp.gridpoints_core_calc(data_arrays) results_check = data_arrays[0] * data_arrays[1] - self.assertEqual(result_array[0,0], results_check[0,0]) + self.assertEqual(result_array[0, 0], results_check[0, 0]) np.testing.assert_array_almost_equal_nulp(result_array, results_check) - def test_gridpoints_core_calc_default_3(self): """test function gridpoints_core_calc, i.e. core data combination on grid point level with default exponents and offsets- 3 arrays""" @@ -241,133 +274,141 @@ def test_gridpoints_core_calc_default_3(self): results_check = data_arrays[0] * data_arrays[1] * data_arrays[2] self.assertEqual(result_array.shape, results_check.shape) - self.assertEqual(result_array[1,1], results_check[1,1]) + self.assertEqual(result_array[1, 1], results_check[1, 1]) np.testing.assert_array_almost_equal_nulp(result_array, results_check) - #self.assertEqual(result_array, data_arrays[0] * data_arrays[1]) + # self.assertEqual(result_array, data_arrays[0] * data_arrays[1]) def test_gridpoints_core_calc_exp(self): """test function gridpoints_core_calc, i.e. core data combination on grid point level with changed exponents""" - data_arrays = data_arrays_demo(2) # get list with 2 demo arrays + data_arrays = data_arrays_demo(2) # get list with 2 demo arrays exp = [2, 1] result_array = lp.gridpoints_core_calc(data_arrays, exponents=exp) results_check = data_arrays[0] * data_arrays[0] * data_arrays[1] self.assertEqual(result_array.shape, results_check.shape) - self.assertEqual(result_array[0,2], results_check[0,2]) + self.assertEqual(result_array[0, 2], results_check[0, 2]) np.testing.assert_array_almost_equal_nulp(result_array, results_check) - exp = [2, .1] + exp = [2, 0.1] result_array = lp.gridpoints_core_calc(data_arrays, exponents=exp) - results_check = data_arrays[0] * data_arrays[0] * (data_arrays[1] ** .1) + results_check = data_arrays[0] * data_arrays[0] * (data_arrays[1] ** 0.1) np.testing.assert_array_almost_equal_nulp(result_array, results_check) def test_gridpoints_core_calc_offsets(self): """test function gridpoints_core_calc, i.e. core data combination on grid point level with changed offsets""" - data_arrays = data_arrays_demo(2) # get list with 2 demo arrays + data_arrays = data_arrays_demo(2) # get list with 2 demo arrays offsets = [1, 10] result_array = lp.gridpoints_core_calc(data_arrays, offsets=offsets) - results_check = (data_arrays[0]+1) * (10 + data_arrays[1]) + results_check = (data_arrays[0] + 1) * (10 + data_arrays[1]) self.assertEqual(result_array.shape, results_check.shape) - self.assertEqual(result_array[0,2], results_check[0,2]) + self.assertEqual(result_array[0, 2], results_check[0, 2]) np.testing.assert_array_almost_equal_nulp(result_array, results_check) def test_gridpoints_core_calc_offsets_exp(self): """test function gridpoints_core_calc, i.e. core data combination on grid point level with changed offsets and exponents""" - data_arrays = data_arrays_demo(2) # get list with 2 demo arrays + data_arrays = data_arrays_demo(2) # get list with 2 demo arrays offsets = [0, 10] exp = [2, 1] - result_array = lp.gridpoints_core_calc(data_arrays, offsets=offsets, - exponents=exp) - results_check = (data_arrays[0]) * (data_arrays[0]) * (10+data_arrays[1]) - results_check2 = np.array([[0, 20, 80],[99, 176, 275]]) + result_array = lp.gridpoints_core_calc( + data_arrays, offsets=offsets, exponents=exp + ) + results_check = (data_arrays[0]) * (data_arrays[0]) * (10 + data_arrays[1]) + results_check2 = np.array([[0, 20, 80], [99, 176, 275]]) self.assertEqual(result_array.shape, results_check.shape) - self.assertEqual(result_array[0,2], results_check[0,2]) + self.assertEqual(result_array[0, 2], results_check[0, 2]) np.testing.assert_array_almost_equal_nulp(result_array, results_check) - self.assertEqual(result_array[1,2], results_check2[1,2]) + self.assertEqual(result_array[1, 2], results_check2[1, 2]) np.testing.assert_array_almost_equal_nulp(result_array, results_check2) def test_gridpoints_core_calc_rescale(self): """test function gridpoints_core_calc, i.e. core data combination on grid point level with rescaling (default exponents and offsets)""" - data_arrays = data_arrays_demo(2) # get list with 2 demo arrays + data_arrays = data_arrays_demo(2) # get list with 2 demo arrays result_array = lp.gridpoints_core_calc(data_arrays, total_val_rescale=2.5) - results_check = (data_arrays[0]*data_arrays[1]) * 2.5/np.sum(data_arrays[0]*data_arrays[1]) + results_check = ( + (data_arrays[0] * data_arrays[1]) + * 2.5 + / np.sum(data_arrays[0] * data_arrays[1]) + ) self.assertAlmostEqual(result_array.sum(), 2.5) - self.assertEqual(result_array[0,1], results_check[0,1]) + self.assertEqual(result_array[0, 1], results_check[0, 1]) np.testing.assert_array_almost_equal_nulp(result_array, results_check) def test_gridpoints_core_calc_offsets_exp_rescale(self): """test function gridpoints_core_calc, i.e. core data combination on grid point level with changed offsets and exponents and rescaling""" - data_arrays = data_arrays_demo(2) # get list with 2 demo arrays + data_arrays = data_arrays_demo(2) # get list with 2 demo arrays offsets = [0.2, 3] - exp = [.5, 1.7] + exp = [0.5, 1.7] tot = -7 - result_array = lp.gridpoints_core_calc(data_arrays, offsets=offsets, - exponents=exp, total_val_rescale=tot) - results_check = np.array(data_arrays[0]+.2, dtype=float)**exp[0] * \ - (np.array(data_arrays[1]+3., dtype=float)**exp[1]) + result_array = lp.gridpoints_core_calc( + data_arrays, offsets=offsets, exponents=exp, total_val_rescale=tot + ) + results_check = np.array(data_arrays[0] + 0.2, dtype=float) ** exp[0] * ( + np.array(data_arrays[1] + 3.0, dtype=float) ** exp[1] + ) results_check = results_check * tot / results_check.sum() self.assertEqual(result_array.shape, results_check.shape) self.assertAlmostEqual(result_array.sum(), tot) - self.assertEqual(result_array[1,2], results_check[1,2]) + self.assertEqual(result_array[1, 2], results_check[1, 2]) np.testing.assert_allclose(result_array, results_check) def test_grp_read_pass(self): """test _grp_read() to pass and return either dict with admin1 values or None""" - result = lp._grp_read('JPN') + result = lp._grp_read("JPN") if result is not None: self.assertIsInstance(result, dict) - self.assertIn('Fukuoka', result.keys()) - self.assertIsInstance(result['Saga'], float) + self.assertIn("Fukuoka", result.keys()) + self.assertIsInstance(result["Saga"], float) def test_fail_get_total_value_per_country_pop(self): "test _get_total_value_per_country fails for pop" with self.assertRaises(NotImplementedError): - lp._get_total_value_per_country('XXX', 'pop', None) + lp._get_total_value_per_country("XXX", "pop", None) def test_get_total_value_per_country_none(self): "test _get_total_value_per_country pass with None" - value = lp._get_total_value_per_country('XXX', 'none', None) + value = lp._get_total_value_per_country("XXX", "none", None) self.assertEqual(value, None) def test_get_total_value_per_country_norm(self): "test _get_total_value_per_country pass with 1" - value = lp._get_total_value_per_country('XXX', 'norm', None) + value = lp._get_total_value_per_country("XXX", "norm", None) self.assertEqual(value, 1) def test_get_total_value_per_country_gdp(self): "test _get_total_value_per_country get number for gdp" - gdp_togo = lp._get_total_value_per_country('TGO', 'gdp', 2010) - gdp_switzerland = lp._get_total_value_per_country('CHE', 'gdp', 2222) - value_switzerland = lp._get_total_value_per_country('CHE', 'income_group', 2222) + gdp_togo = lp._get_total_value_per_country("TGO", "gdp", 2010) + gdp_switzerland = lp._get_total_value_per_country("CHE", "gdp", 2222) + value_switzerland = lp._get_total_value_per_country("CHE", "income_group", 2222) self.assertIsInstance(gdp_togo, float) # value for income_group = gdp * income group: - self.assertEqual(value_switzerland, 5*gdp_switzerland) + self.assertEqual(value_switzerland, 5 * gdp_switzerland) def test_get_total_value_per_country_pc(self): "test _get_total_value_per_country get number for pc of Poland" - value = lp._get_total_value_per_country('POL', 'pc', 2015) + value = lp._get_total_value_per_country("POL", "pc", 2015) self.assertIsInstance(value, float) def test_get_total_value_per_country_nfw(self): "test _get_total_value_per_country get number for pc of Poland" - value = lp._get_total_value_per_country('POL', 'nfw', 2015) + value = lp._get_total_value_per_country("POL", "nfw", 2015) self.assertIsInstance(value, float) def test_get_value_unit_pass(self): """test get_value_unit pass""" - self.assertEqual(lp.get_value_unit('pop'), 'people') - self.assertEqual(lp.get_value_unit('gdp'), 'USD') - self.assertEqual(lp.get_value_unit('pc'), 'USD') - self.assertEqual(lp.get_value_unit('nfw'), 'USD') - self.assertEqual(lp.get_value_unit('none'), '') + self.assertEqual(lp.get_value_unit("pop"), "people") + self.assertEqual(lp.get_value_unit("gdp"), "USD") + self.assertEqual(lp.get_value_unit("pc"), "USD") + self.assertEqual(lp.get_value_unit("nfw"), "USD") + self.assertEqual(lp.get_value_unit("none"), "") + if __name__ == "__main__": TESTS = unittest.TestLoader().loadTestsFromTestCase(TestLitPop) diff --git a/climada/entity/exposures/test/test_mat.py b/climada/entity/exposures/test/test_mat.py index c6fbe09f8..540b92c88 100644 --- a/climada/entity/exposures/test/test_mat.py +++ b/climada/entity/exposures/test/test_mat.py @@ -18,13 +18,15 @@ Test Exposures from MATLAB file. """ -import unittest + import copy +import unittest from climada import CONFIG -from climada.entity.exposures.base import Exposures, DEF_VAR_MAT +from climada.entity.exposures.base import DEF_VAR_MAT, Exposures + +ENT_TEST_MAT = CURR_DIR = CONFIG.exposures.test_data.dir().joinpath("demo_today.mat") -ENT_TEST_MAT = CURR_DIR = CONFIG.exposures.test_data.dir().joinpath('demo_today.mat') class TestReader(unittest.TestCase): """Test reader functionality of the ExposuresMat class""" @@ -41,43 +43,44 @@ def test_read_demo_pass(self): self.assertEqual(expo.gdf.index[0], 0) self.assertEqual(expo.gdf.index[n_expos - 1], n_expos - 1) - self.assertEqual(expo.gdf['value'].shape, (n_expos,)) - self.assertEqual(expo.gdf['value'][0], 13927504367.680632) - self.assertEqual(expo.gdf['value'][n_expos - 1], 12624818493.687229) + self.assertEqual(expo.gdf["value"].shape, (n_expos,)) + self.assertEqual(expo.gdf["value"][0], 13927504367.680632) + self.assertEqual(expo.gdf["value"][n_expos - 1], 12624818493.687229) - self.assertEqual(expo.gdf['deductible'].shape, (n_expos,)) - self.assertEqual(expo.gdf['deductible'][0], 0) - self.assertEqual(expo.gdf['deductible'][n_expos - 1], 0) + self.assertEqual(expo.gdf["deductible"].shape, (n_expos,)) + self.assertEqual(expo.gdf["deductible"][0], 0) + self.assertEqual(expo.gdf["deductible"][n_expos - 1], 0) - self.assertEqual(expo.gdf['cover'].shape, (n_expos,)) - self.assertEqual(expo.gdf['cover'][0], 13927504367.680632) - self.assertEqual(expo.gdf['cover'][n_expos - 1], 12624818493.687229) + self.assertEqual(expo.gdf["cover"].shape, (n_expos,)) + self.assertEqual(expo.gdf["cover"][0], 13927504367.680632) + self.assertEqual(expo.gdf["cover"][n_expos - 1], 12624818493.687229) - self.assertIn('int', str(expo.gdf['impf_'].dtype)) - self.assertEqual(expo.gdf['impf_'].shape, (n_expos,)) - self.assertEqual(expo.gdf['impf_'][0], 1) - self.assertEqual(expo.gdf['impf_'][n_expos - 1], 1) + self.assertIn("int", str(expo.gdf["impf_"].dtype)) + self.assertEqual(expo.gdf["impf_"].shape, (n_expos,)) + self.assertEqual(expo.gdf["impf_"][0], 1) + self.assertEqual(expo.gdf["impf_"][n_expos - 1], 1) - self.assertIn('int', str(expo.gdf['category_id'].dtype)) - self.assertEqual(expo.gdf['category_id'].shape, (n_expos,)) - self.assertEqual(expo.gdf['category_id'][0], 1) - self.assertEqual(expo.gdf['category_id'][n_expos - 1], 1) + self.assertIn("int", str(expo.gdf["category_id"].dtype)) + self.assertEqual(expo.gdf["category_id"].shape, (n_expos,)) + self.assertEqual(expo.gdf["category_id"][0], 1) + self.assertEqual(expo.gdf["category_id"][n_expos - 1], 1) - self.assertIn('int', str(expo.gdf['centr_'].dtype)) - self.assertEqual(expo.gdf['centr_'].shape, (n_expos,)) - self.assertEqual(expo.gdf['centr_'][0], 47) - self.assertEqual(expo.gdf['centr_'][n_expos - 1], 46) + self.assertIn("int", str(expo.gdf["centr_"].dtype)) + self.assertEqual(expo.gdf["centr_"].shape, (n_expos,)) + self.assertEqual(expo.gdf["centr_"][0], 47) + self.assertEqual(expo.gdf["centr_"][n_expos - 1], 46) - self.assertTrue('region_id' not in expo.gdf) + self.assertTrue("region_id" not in expo.gdf) - self.assertEqual(expo.gdf['latitude'].shape, (n_expos,)) - self.assertEqual(expo.gdf['latitude'][0], 26.93389900000) - self.assertEqual(expo.gdf['latitude'][n_expos - 1], 26.34795700000) - self.assertEqual(expo.gdf['longitude'][0], -80.12879900000) - self.assertEqual(expo.gdf['longitude'][n_expos - 1], -80.15885500000) + self.assertEqual(expo.gdf["latitude"].shape, (n_expos,)) + self.assertEqual(expo.gdf["latitude"][0], 26.93389900000) + self.assertEqual(expo.gdf["latitude"][n_expos - 1], 26.34795700000) + self.assertEqual(expo.gdf["longitude"][0], -80.12879900000) + self.assertEqual(expo.gdf["longitude"][n_expos - 1], -80.15885500000) self.assertEqual(expo.ref_year, 2016) - self.assertEqual(expo.value_unit, 'USD') + self.assertEqual(expo.value_unit, "USD") + class TestObligatories(unittest.TestCase): """Test reading exposures obligatory values.""" @@ -85,77 +88,79 @@ class TestObligatories(unittest.TestCase): def test_no_value_fail(self): """Error if no values.""" new_var_names = copy.deepcopy(DEF_VAR_MAT) - new_var_names['var_name']['val'] = 'no valid value' + new_var_names["var_name"]["val"] = "no valid value" with self.assertRaises(KeyError): Exposures.from_mat(ENT_TEST_MAT, var_names=new_var_names) def test_no_impact_fail(self): """Error if no impact ids.""" new_var_names = copy.deepcopy(DEF_VAR_MAT) - new_var_names['var_name']['impf'] = 'no valid value' + new_var_names["var_name"]["impf"] = "no valid value" with self.assertRaises(KeyError): Exposures.from_mat(ENT_TEST_MAT, var_names=new_var_names) def test_no_coord_fail(self): """Error if no coordinates.""" new_var_names = copy.deepcopy(DEF_VAR_MAT) - new_var_names['var_name']['lat'] = 'no valid Latitude' + new_var_names["var_name"]["lat"] = "no valid Latitude" with self.assertRaises(KeyError): Exposures.from_mat(ENT_TEST_MAT, var_names=new_var_names) - new_var_names['var_name']['lat'] = 'nLatitude' - new_var_names['var_name']['lon'] = 'no valid Longitude' + new_var_names["var_name"]["lat"] = "nLatitude" + new_var_names["var_name"]["lon"] = "no valid Longitude" with self.assertRaises(KeyError): Exposures.from_mat(ENT_TEST_MAT, var_names=new_var_names) + class TestOptionals(unittest.TestCase): """Test reading exposures optional values.""" def test_no_category_pass(self): """Not error if no category id.""" new_var_names = copy.deepcopy(DEF_VAR_MAT) - new_var_names['var_name']['cat'] = 'no valid category' + new_var_names["var_name"]["cat"] = "no valid category" exp = Exposures.from_mat(ENT_TEST_MAT, var_names=new_var_names) # Check results - self.assertTrue('category_id' not in exp.gdf) + self.assertTrue("category_id" not in exp.gdf) def test_no_region_pass(self): """Not error if no region id.""" new_var_names = copy.deepcopy(DEF_VAR_MAT) - new_var_names['var_name']['reg'] = 'no valid region' + new_var_names["var_name"]["reg"] = "no valid region" exp = Exposures.from_mat(ENT_TEST_MAT, var_names=new_var_names) # Check results - self.assertTrue('region_id' not in exp.gdf) + self.assertTrue("region_id" not in exp.gdf) def test_no_unit_pass(self): """Not error if no value unit.""" new_var_names = copy.deepcopy(DEF_VAR_MAT) - new_var_names['var_name']['uni'] = 'no valid unit' + new_var_names["var_name"]["uni"] = "no valid unit" exp = Exposures.from_mat(ENT_TEST_MAT, var_names=new_var_names) # Check results - self.assertEqual('USD', exp.value_unit) + self.assertEqual("USD", exp.value_unit) def test_no_assigned_pass(self): """Not error if no value unit.""" new_var_names = copy.deepcopy(DEF_VAR_MAT) - new_var_names['var_name']['ass'] = 'no valid assign' + new_var_names["var_name"]["ass"] = "no valid assign" exp = Exposures.from_mat(ENT_TEST_MAT, var_names=new_var_names) # Check results - self.assertTrue('centr_' not in exp.gdf) + self.assertTrue("centr_" not in exp.gdf) def test_no_refyear_pass(self): """Not error if no value unit.""" new_var_names = copy.deepcopy(DEF_VAR_MAT) - new_var_names['var_name']['ref'] = 'no valid ref' + new_var_names["var_name"]["ref"] = "no valid ref" exp = Exposures.from_mat(ENT_TEST_MAT, var_names=new_var_names) # Check results self.assertEqual(2018, exp.ref_year) + # Execute Tests if __name__ == "__main__": TESTS = unittest.TestLoader().loadTestsFromTestCase(TestReader) diff --git a/climada/entity/exposures/test/test_nightlight.py b/climada/entity/exposures/test/test_nightlight.py index f7b83b6a4..e70028991 100644 --- a/climada/entity/exposures/test/test_nightlight.py +++ b/climada/entity/exposures/test/test_nightlight.py @@ -18,43 +18,63 @@ Test Nightlight module. """ + import unittest +from pathlib import Path + import numpy as np from climada.entity.exposures.litpop import nightlight from climada.util.constants import SYSTEM_DIR -from pathlib import Path BM_FILENAMES = nightlight.BM_FILENAMES + class TestNightLight(unittest.TestCase): """Test nightlight functions.""" def test_required_files(self): """Test get_required_nl_files function with various countries.""" # Switzerland - bbox = (5.954809204000128, 45.82071848599999, 10.466626831000013, 47.801166077000076) + bbox = ( + 5.954809204000128, + 45.82071848599999, + 10.466626831000013, + 47.801166077000076, + ) # min_lon, min_lat, max_lon, max_lat = bbox - np.testing.assert_array_equal(nightlight.get_required_nl_files(bbox), - [0., 0., 0., 0., 1., 0., 0., 0.]) + np.testing.assert_array_equal( + nightlight.get_required_nl_files(bbox), + [0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0], + ) # UK - bbox = (-13.69131425699993, 49.90961334800005, 1.7711694670000497, 60.84788646000004) - np.testing.assert_array_equal(nightlight.get_required_nl_files(bbox), - [0., 0., 1., 0., 1., 0., 0., 0.]) + bbox = ( + -13.69131425699993, + 49.90961334800005, + 1.7711694670000497, + 60.84788646000004, + ) + np.testing.assert_array_equal( + nightlight.get_required_nl_files(bbox), + [0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0], + ) # entire world bbox = (-180, -90, 180, 90) - np.testing.assert_array_equal(nightlight.get_required_nl_files(bbox), - [1., 1., 1., 1., 1., 1., 1., 1.]) + np.testing.assert_array_equal( + nightlight.get_required_nl_files(bbox), + [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], + ) # Invalid coordinate order or bbox length - self.assertRaises(ValueError, nightlight.get_required_nl_files, - (-180, 90, 180, -90)) - self.assertRaises(ValueError, nightlight.get_required_nl_files, - (180, -90, -180, 90)) - self.assertRaises(ValueError, nightlight.get_required_nl_files, - (-90, 90)) + self.assertRaises( + ValueError, nightlight.get_required_nl_files, (-180, 90, 180, -90) + ) + self.assertRaises( + ValueError, nightlight.get_required_nl_files, (180, -90, -180, 90) + ) + self.assertRaises(ValueError, nightlight.get_required_nl_files, (-90, 90)) def test_download_nightlight_files(self): """Test check_nightlight_local_file_exists""" @@ -62,19 +82,19 @@ def test_download_nightlight_files(self): self.assertRaises(ValueError, nightlight.download_nl_files, (1, 0, 1), (1, 1)) # The same length but not the correct length - self.assertRaises(ValueError, nightlight.download_nl_files, (1, 0, 1), (1, 1, 1)) + self.assertRaises( + ValueError, nightlight.download_nl_files, (1, 0, 1), (1, 1, 1) + ) def test_get_required_nl_files(self): - """ get_required_nl_files return a boolean matrix of 0 and 1 - indicating which tile of NASA nighlight files are needed giving - a bounding box. This test check a few configuration of tiles - and check that a value error is raised if the bounding box are - incorrect """ + """get_required_nl_files return a boolean matrix of 0 and 1 + indicating which tile of NASA nighlight files are needed giving + a bounding box. This test check a few configuration of tiles + and check that a value error is raised if the bounding box are + incorrect""" # incorrect bounds: bounds size =! 4, min lon > max lon, min lat > min lat - BOUNDS = [(20, 30, 40), - (120, -20, 110, 30), - (-120, 50, 130, 10)] + BOUNDS = [(20, 30, 40), (120, -20, 110, 30), (-120, 50, 130, 10)] # correct bounds bounds_c1 = (-120, -20, 0, 40) bounds_c2 = (-70, -20, 10, 40) @@ -83,25 +103,28 @@ def test_get_required_nl_files(self): for bounds in BOUNDS: with self.assertRaises(ValueError) as cm: - nightlight.get_required_nl_files(bounds = bounds) + nightlight.get_required_nl_files(bounds=bounds) - self.assertEqual('Invalid bounds supplied. `bounds` must be tuple' - ' with (min_lon, min_lat, max_lon, max_lat).', - str(cm.exception)) + self.assertEqual( + "Invalid bounds supplied. `bounds` must be tuple" + " with (min_lon, min_lat, max_lon, max_lat).", + str(cm.exception), + ) # test first correct bounds configurations - req_files = nightlight.get_required_nl_files(bounds = bounds_c1) + req_files = nightlight.get_required_nl_files(bounds=bounds_c1) bool = np.array_equal(np.array([1, 1, 1, 1, 1, 1, 0, 0]), req_files) self.assertTrue(bool) # second correct configuration - req_files = nightlight.get_required_nl_files(bounds = bounds_c2) + req_files = nightlight.get_required_nl_files(bounds=bounds_c2) bool = np.array_equal(np.array([0, 0, 1, 1, 1, 1, 0, 0]), req_files) self.assertTrue(bool) # third correct configuration - req_files = nightlight.get_required_nl_files(bounds = bounds_c3) + req_files = nightlight.get_required_nl_files(bounds=bounds_c3) bool = np.array_equal(np.array([0, 0, 0, 0, 0, 0, 1, 0]), req_files) self.assertTrue(bool) + # Execute Tests if __name__ == "__main__": TESTS = unittest.TestLoader().loadTestsFromTestCase(TestNightLight) diff --git a/climada/entity/impact_funcs/__init__.py b/climada/entity/impact_funcs/__init__.py index f4d0aee3f..8672a17fa 100755 --- a/climada/entity/impact_funcs/__init__.py +++ b/climada/entity/impact_funcs/__init__.py @@ -18,6 +18,7 @@ init impact functions """ + from .base import * from .impact_func_set import * from .trop_cyclone import * diff --git a/climada/entity/impact_funcs/base.py b/climada/entity/impact_funcs/base.py index 71f3d1af4..287391a79 100644 --- a/climada/entity/impact_funcs/base.py +++ b/climada/entity/impact_funcs/base.py @@ -19,19 +19,20 @@ Define ImpactFunc class. """ -__all__ = ['ImpactFunc'] +__all__ = ["ImpactFunc"] import logging from typing import Optional, Union -import numpy as np + import matplotlib.pyplot as plt +import numpy as np import climada.util.checker as u_check LOGGER = logging.getLogger(__name__) -class ImpactFunc(): +class ImpactFunc: """Contains the definition of one impact function. Attributes @@ -109,9 +110,10 @@ def calc_mdr(self, inten: Union[float, np.ndarray]) -> np.ndarray: ------- np.array """ -# return np.interp(inten, self.intensity, self.mdd * self.paa) - return np.interp(inten, self.intensity, self.paa) * \ - np.interp(inten, self.intensity, self.mdd) + # return np.interp(inten, self.intensity, self.mdd * self.paa) + return np.interp(inten, self.intensity, self.paa) * np.interp( + inten, self.intensity, self.mdd + ) def plot(self, axis=None, **kwargs): """Plot the impact functions MDD, MDR and PAA in one graph, where @@ -131,15 +133,17 @@ def plot(self, axis=None, **kwargs): if axis is None: _, axis = plt.subplots(1, 1) - title = '%s %s' % (self.haz_type, str(self.id)) + title = "%s %s" % (self.haz_type, str(self.id)) if self.name != str(self.id): - title += ': %s' % self.name - axis.set_xlabel('Intensity (' + self.intensity_unit + ')') - axis.set_ylabel('Impact (%)') + title += ": %s" % self.name + axis.set_xlabel("Intensity (" + self.intensity_unit + ")") + axis.set_ylabel("Impact (%)") axis.set_title(title) - axis.plot(self.intensity, self.mdd * 100, 'b', label='MDD', **kwargs) - axis.plot(self.intensity, self.paa * 100, 'r', label='PAA', **kwargs) - axis.plot(self.intensity, self.mdd * self.paa * 100, 'k--', label='MDR', **kwargs) + axis.plot(self.intensity, self.mdd * 100, "b", label="MDD", **kwargs) + axis.plot(self.intensity, self.paa * 100, "r", label="PAA", **kwargs) + axis.plot( + self.intensity, self.mdd * self.paa * 100, "k--", label="MDR", **kwargs + ) axis.set_xlim((self.intensity.min(), self.intensity.max())) axis.legend() @@ -153,12 +157,16 @@ def check(self): ValueError """ num_exp = len(self.intensity) - u_check.size(num_exp, self.mdd, 'ImpactFunc.mdd') - u_check.size(num_exp, self.paa, 'ImpactFunc.paa') + u_check.size(num_exp, self.mdd, "ImpactFunc.mdd") + u_check.size(num_exp, self.paa, "ImpactFunc.paa") if num_exp == 0: - LOGGER.warning("%s impact function with name '%s' (id=%s) has empty" - " intensity.", self.haz_type, self.name, self.id) + LOGGER.warning( + "%s impact function with name '%s' (id=%s) has empty" " intensity.", + self.haz_type, + self.name, + self.id, + ) return @classmethod @@ -169,9 +177,9 @@ def from_step_impf( mdd: tuple[float, float] = (0, 1), paa: tuple[float, float] = (1, 1), impf_id: int = 1, - **kwargs): - - """ Step function type impact function. + **kwargs + ): + """Step function type impact function. By default, the impact is 100% above the step. Useful for high resolution modelling. @@ -204,13 +212,21 @@ def from_step_impf( mdd_min, mdd_max = mdd mdd = np.array([mdd_min, mdd_min, mdd_max, mdd_max]) - return cls(haz_type=haz_type, id=impf_id, - intensity=intensity, mdd=mdd, paa=paa, **kwargs) + return cls( + haz_type=haz_type, + id=impf_id, + intensity=intensity, + mdd=mdd, + paa=paa, + **kwargs + ) def set_step_impf(self, *args, **kwargs): """This function is deprecated, use ImpactFunc.from_step_impf instead.""" - LOGGER.warning("The use of ImpactFunc.set_step_impf is deprecated." + - "Use ImpactFunc.from_step_impf instead.") + LOGGER.warning( + "The use of ImpactFunc.set_step_impf is deprecated." + + "Use ImpactFunc.from_step_impf instead." + ) self.__dict__ = ImpactFunc.from_step_impf(*args, **kwargs).__dict__ @classmethod @@ -222,7 +238,8 @@ def from_sigmoid_impf( x0: float, haz_type: str, impf_id: int = 1, - **kwargs): + **kwargs + ): r"""Sigmoid type impact function hinging on three parameter. This type of impact function is very flexible for any sort of study, @@ -264,13 +281,21 @@ def from_sigmoid_impf( paa = np.ones(len(intensity)) mdd = L / (1 + np.exp(-k * (intensity - x0))) - return cls(haz_type=haz_type, id=impf_id, intensity=intensity, - paa=paa, mdd=mdd, **kwargs) + return cls( + haz_type=haz_type, + id=impf_id, + intensity=intensity, + paa=paa, + mdd=mdd, + **kwargs + ) def set_sigmoid_impf(self, *args, **kwargs): """This function is deprecated, use LitPop.from_countries instead.""" - LOGGER.warning("The use of ImpactFunc.set_sigmoid_impf is deprecated." - "Use ImpactFunc.from_sigmoid_impf instead.") + LOGGER.warning( + "The use of ImpactFunc.set_sigmoid_impf is deprecated." + "Use ImpactFunc.from_sigmoid_impf instead." + ) self.__dict__ = ImpactFunc.from_sigmoid_impf(*args, **kwargs).__dict__ @classmethod @@ -283,7 +308,8 @@ def from_poly_s_shape( exponent: float, haz_type: str, impf_id: int = 1, - **kwargs): + **kwargs + ): r"""S-shape polynomial impact function hinging on four parameter. .. math:: @@ -336,7 +362,7 @@ def from_poly_s_shape( s-shaped polynomial impact function """ if exponent < 0: - raise ValueError('Exponent value must larger than 0') + raise ValueError("Exponent value must larger than 0") inten = np.linspace(*intensity) @@ -349,11 +375,6 @@ def from_poly_s_shape( paa = np.ones_like(inten) impf = cls( - haz_type=haz_type, - id=impf_id, - intensity=inten, - paa=paa, - mdd=mdd, - **kwargs + haz_type=haz_type, id=impf_id, intensity=inten, paa=paa, mdd=mdd, **kwargs ) return impf diff --git a/climada/entity/impact_funcs/impact_func_set.py b/climada/entity/impact_funcs/impact_func_set.py index 6bba81d56..e94ff8b82 100755 --- a/climada/entity/impact_funcs/impact_func_set.py +++ b/climada/entity/impact_funcs/impact_func_set.py @@ -19,48 +19,54 @@ Define ImpactFuncSet class. """ -__all__ = ['ImpactFuncSet'] +__all__ = ["ImpactFuncSet"] import copy import logging -from typing import Optional, Iterable from itertools import repeat +from typing import Iterable, Optional + +import matplotlib.pyplot as plt import numpy as np import pandas as pd -import matplotlib.pyplot as plt import xlsxwriter -from climada.entity.impact_funcs.base import ImpactFunc -import climada.util.plot as u_plot import climada.util.hdf5_handler as u_hdf5 +import climada.util.plot as u_plot +from climada.entity.impact_funcs.base import ImpactFunc LOGGER = logging.getLogger(__name__) -DEF_VAR_EXCEL = {'sheet_name': 'impact_functions', - 'col_name': {'func_id': 'impact_fun_id', - 'inten': 'intensity', - 'mdd': 'mdd', - 'paa': 'paa', - 'name': 'name', - 'unit': 'intensity_unit', - 'peril': 'peril_id' - } - } +DEF_VAR_EXCEL = { + "sheet_name": "impact_functions", + "col_name": { + "func_id": "impact_fun_id", + "inten": "intensity", + "mdd": "mdd", + "paa": "paa", + "name": "name", + "unit": "intensity_unit", + "peril": "peril_id", + }, +} """Excel and csv variable names""" -DEF_VAR_MAT = {'sup_field_name': 'entity', - 'field_name': 'damagefunctions', - 'var_name': {'fun_id': 'DamageFunID', - 'inten': 'Intensity', - 'mdd': 'MDD', - 'paa': 'PAA', - 'name': 'name', - 'unit': 'Intensity_unit', - 'peril': 'peril_ID' - } - } +DEF_VAR_MAT = { + "sup_field_name": "entity", + "field_name": "damagefunctions", + "var_name": { + "fun_id": "DamageFunID", + "inten": "Intensity", + "mdd": "MDD", + "paa": "PAA", + "name": "name", + "unit": "Intensity_unit", + "peril": "peril_ID", + }, +} """MATLAB variable names""" + class ImpactFuncSet: """Contains impact functions of type ImpactFunc. Loads from files with format defined in FILE_EXT. @@ -72,10 +78,7 @@ class ImpactFuncSet: directly accessed. Use the class methods instead. """ - def __init__( - self, - impact_funcs: Optional[Iterable[ImpactFunc]] = None - ): + def __init__(self, impact_funcs: Optional[Iterable[ImpactFunc]] = None): """Initialization. Build an impact function set from an iterable of ImpactFunc. @@ -147,8 +150,9 @@ def remove_func(self, haz_type=None, fun_id=None): try: del self._data[haz_type][fun_id] except KeyError: - LOGGER.warning("No ImpactFunc with hazard %s and id %s.", - haz_type, fun_id) + LOGGER.warning( + "No ImpactFunc with hazard %s and id %s.", haz_type, fun_id + ) elif haz_type is not None: try: del self._data[haz_type] @@ -261,8 +265,11 @@ def size(self, haz_type=None, fun_id=None): ------- int """ - if (haz_type is not None) and (fun_id is not None) and \ - (isinstance(self.get_func(haz_type, fun_id), ImpactFunc)): + if ( + (haz_type is not None) + and (fun_id is not None) + and (isinstance(self.get_func(haz_type, fun_id), ImpactFunc)) + ): return 1 if (haz_type is not None) or (fun_id is not None): return len(self.get_func(haz_type, fun_id)) @@ -277,12 +284,14 @@ def check(self): """ for key_haz, vul_dict in self._data.items(): for fun_id, vul in vul_dict.items(): - if (fun_id != vul.id) | (fun_id == ''): - raise ValueError("Wrong ImpactFunc.id: %s != %s." - % (fun_id, vul.id)) - if (key_haz != vul.haz_type) | (key_haz == ''): - raise ValueError("Wrong ImpactFunc.haz_type: %s != %s." - % (key_haz, vul.haz_type)) + if (fun_id != vul.id) | (fun_id == ""): + raise ValueError( + "Wrong ImpactFunc.id: %s != %s." % (fun_id, vul.id) + ) + if (key_haz != vul.haz_type) | (key_haz == ""): + raise ValueError( + "Wrong ImpactFunc.haz_type: %s != %s." % (key_haz, vul.haz_type) + ) vul.check() def extend(self, impact_funcs): @@ -368,7 +377,7 @@ def from_excel(cls, file_name, var_names=None): """ if var_names is None: var_names = DEF_VAR_EXCEL - dfr = pd.read_excel(file_name, var_names['sheet_name']) + dfr = pd.read_excel(file_name, var_names["sheet_name"]) imp_func_set = cls() imp_func_set._fill_dfr(dfr, var_names) @@ -376,8 +385,10 @@ def from_excel(cls, file_name, var_names=None): def read_excel(self, *args, **kwargs): """This function is deprecated, use ImpactFuncSet.from_excel instead.""" - LOGGER.warning("The use of ImpactFuncSet.read_excel is deprecated." - " Use ImpactFuncSet.from_excel instead.") + LOGGER.warning( + "The use of ImpactFuncSet.read_excel is deprecated." + " Use ImpactFuncSet.from_excel instead." + ) self.__dict__ = ImpactFuncSet.from_excel(*args, **kwargs).__dict__ @classmethod @@ -400,12 +411,16 @@ def from_mat(cls, file_name, var_names=None): """ if var_names is None: var_names = DEF_VAR_MAT + def _get_hdf5_funcs(imp, file_name, var_names): """Get rows that fill every impact function and its name.""" func_pos = dict() for row, (fun_id, fun_type) in enumerate( - zip(imp[var_names['var_name']['fun_id']].squeeze(), - imp[var_names['var_name']['peril']].squeeze())): + zip( + imp[var_names["var_name"]["fun_id"]].squeeze(), + imp[var_names["var_name"]["peril"]].squeeze(), + ) + ): type_str = u_hdf5.get_str_from_ref(file_name, fun_type) key = (type_str, int(fun_id)) if key not in func_pos: @@ -421,17 +436,19 @@ def _get_hdf5_str(imp, idxs, file_name, var_name): if prev_str == "": prev_str = cur_str elif prev_str != cur_str: - raise ValueError("Impact function with two different %s." % var_name) + raise ValueError( + "Impact function with two different %s." % var_name + ) return prev_str imp = u_hdf5.read(file_name) try: - imp = imp[var_names['sup_field_name']] + imp = imp[var_names["sup_field_name"]] except KeyError: pass try: - imp = imp[var_names['field_name']] + imp = imp[var_names["field_name"]] funcs_idx = _get_hdf5_funcs(imp, file_name, var_names) impact_funcs = [] for imp_key, imp_rows in funcs_idx.items(): @@ -442,19 +459,26 @@ def _get_hdf5_str(imp, idxs, file_name, var_name): # check that this function only has one intensity unit, if provided try: impf_kwargs["intensity_unit"] = _get_hdf5_str( - imp, imp_rows, file_name, var_names['var_name']['unit']) + imp, imp_rows, file_name, var_names["var_name"]["unit"] + ) except KeyError: pass # check that this function only has one name try: impf_kwargs["name"] = _get_hdf5_str( - imp, imp_rows, file_name, var_names['var_name']['name']) + imp, imp_rows, file_name, var_names["var_name"]["name"] + ) except KeyError: impf_kwargs["name"] = str(impf_kwargs["idx"]) impf_kwargs["intensity"] = np.take( - imp[var_names['var_name']['inten']], imp_rows) - impf_kwargs["mdd"] = np.take(imp[var_names['var_name']['mdd']], imp_rows) - impf_kwargs["paa"] = np.take(imp[var_names['var_name']['paa']], imp_rows) + imp[var_names["var_name"]["inten"]], imp_rows + ) + impf_kwargs["mdd"] = np.take( + imp[var_names["var_name"]["mdd"]], imp_rows + ) + impf_kwargs["paa"] = np.take( + imp[var_names["var_name"]["paa"]], imp_rows + ) impact_funcs.append(ImpactFunc(**impf_kwargs)) except KeyError as err: raise KeyError("Not existing variable: %s" % str(err)) from err @@ -463,8 +487,10 @@ def _get_hdf5_str(imp, idxs, file_name, var_name): def read_mat(self, *args, **kwargs): """This function is deprecated, use ImpactFuncSet.from_mat instead.""" - LOGGER.warning("The use of ImpactFuncSet.read_mat is deprecated." - "Use ImpactFuncSet.from_mat instead.") + LOGGER.warning( + "The use of ImpactFuncSet.read_mat is deprecated." + "Use ImpactFuncSet.from_mat instead." + ) self.__dict__ = ImpactFuncSet.from_mat(*args, **kwargs).__dict__ def write_excel(self, file_name, var_names=None): @@ -479,6 +505,7 @@ def write_excel(self, file_name, var_names=None): """ if var_names is None: var_names = DEF_VAR_EXCEL + def write_impf(row_ini, imp_ws, xls_data): """Write one impact function""" for icol, col_dat in enumerate(xls_data): @@ -486,22 +513,32 @@ def write_impf(row_ini, imp_ws, xls_data): imp_ws.write(irow, icol, data) imp_wb = xlsxwriter.Workbook(file_name) - imp_ws = imp_wb.add_worksheet(var_names['sheet_name']) - - header = [var_names['col_name']['func_id'], var_names['col_name']['inten'], - var_names['col_name']['mdd'], var_names['col_name']['paa'], - var_names['col_name']['peril'], var_names['col_name']['unit'], - var_names['col_name']['name']] + imp_ws = imp_wb.add_worksheet(var_names["sheet_name"]) + + header = [ + var_names["col_name"]["func_id"], + var_names["col_name"]["inten"], + var_names["col_name"]["mdd"], + var_names["col_name"]["paa"], + var_names["col_name"]["peril"], + var_names["col_name"]["unit"], + var_names["col_name"]["name"], + ] for icol, head_dat in enumerate(header): imp_ws.write(0, icol, head_dat) row_ini = 1 for fun_haz_id, fun_haz in self._data.items(): for fun_id, fun in fun_haz.items(): n_inten = fun.intensity.size - xls_data = [repeat(fun_id, n_inten), fun.intensity, fun.mdd, - fun.paa, repeat(fun_haz_id, n_inten), - repeat(fun.intensity_unit, n_inten), - repeat(fun.name, n_inten)] + xls_data = [ + repeat(fun_id, n_inten), + fun.intensity, + fun.mdd, + fun.paa, + repeat(fun_haz_id, n_inten), + repeat(fun.intensity_unit, n_inten), + repeat(fun.name, n_inten), + ] write_impf(row_ini, imp_ws, xls_data) row_ini += n_inten imp_wb.close() @@ -511,8 +548,10 @@ def _fill_dfr(self, dfr, var_names): def _get_xls_funcs(dfr, var_names): """Parse individual impact functions.""" dist_func = [] - for (haz_type, imp_id) in zip(dfr[var_names['col_name']['peril']], - dfr[var_names['col_name']['func_id']]): + for haz_type, imp_id in zip( + dfr[var_names["col_name"]["peril"]], + dfr[var_names["col_name"]["func_id"]], + ): if (haz_type, imp_id) not in dist_func: dist_func.append((haz_type, imp_id)) return dist_func @@ -520,9 +559,8 @@ def _get_xls_funcs(dfr, var_names): try: dist_func = _get_xls_funcs(dfr, var_names) for haz_type, imp_id in dist_func: - df_func = dfr[dfr[var_names['col_name']['peril']] == haz_type] - df_func = df_func[df_func[var_names['col_name']['func_id']] - == imp_id] + df_func = dfr[dfr[var_names["col_name"]["peril"]] == haz_type] + df_func = df_func[df_func[var_names["col_name"]["func_id"]] == imp_id] # Store arguments in a dict (missing ones will be default) impf_kwargs = dict() @@ -530,26 +568,31 @@ def _get_xls_funcs(dfr, var_names): impf_kwargs["id"] = imp_id # check that the unit of the intensity is the same try: - if len(df_func[var_names['col_name']['name']].unique()) != 1: - raise ValueError('Impact function with two different names.') - impf_kwargs["name"] = df_func[var_names['col_name'] - ['name']].values[0] + if len(df_func[var_names["col_name"]["name"]].unique()) != 1: + raise ValueError("Impact function with two different names.") + impf_kwargs["name"] = df_func[var_names["col_name"]["name"]].values[ + 0 + ] except KeyError: impf_kwargs["name"] = str(impf_kwargs["id"]) # check that the unit of the intensity is the same, if provided try: - if len(df_func[var_names['col_name']['unit']].unique()) != 1: - raise ValueError('Impact function with two different' - ' intensity units.') - impf_kwargs["intensity_unit"] = df_func[var_names['col_name'] - ['unit']].values[0] + if len(df_func[var_names["col_name"]["unit"]].unique()) != 1: + raise ValueError( + "Impact function with two different" " intensity units." + ) + impf_kwargs["intensity_unit"] = df_func[ + var_names["col_name"]["unit"] + ].values[0] except KeyError: pass - impf_kwargs["intensity"] = df_func[var_names['col_name']['inten']].values - impf_kwargs["mdd"] = df_func[var_names['col_name']['mdd']].values - impf_kwargs["paa"] = df_func[var_names['col_name']['paa']].values + impf_kwargs["intensity"] = df_func[ + var_names["col_name"]["inten"] + ].values + impf_kwargs["mdd"] = df_func[var_names["col_name"]["mdd"]].values + impf_kwargs["paa"] = df_func[var_names["col_name"]["paa"]].values self.append(ImpactFunc(**impf_kwargs)) diff --git a/climada/entity/impact_funcs/storm_europe.py b/climada/entity/impact_funcs/storm_europe.py index f021f4957..76973c3df 100644 --- a/climada/entity/impact_funcs/storm_europe.py +++ b/climada/entity/impact_funcs/storm_europe.py @@ -19,24 +19,25 @@ Define impact functions for extratropical storms (mainly windstorms in Europe). """ -__all__ = ['ImpfStormEurope', 'IFStormEurope'] +__all__ = ["ImpfStormEurope", "IFStormEurope"] import logging -from deprecation import deprecated + import numpy as np +from deprecation import deprecated -from climada.entity.impact_funcs.base import ImpactFunc from climada.engine.calibration_opt import init_impf - +from climada.entity.impact_funcs.base import ImpactFunc LOGGER = logging.getLogger(__name__) + class ImpfStormEurope(ImpactFunc): """Impact functions for tropical cyclones.""" def __init__(self): ImpactFunc.__init__(self) - self.haz_type = 'WS' + self.haz_type = "WS" @classmethod def from_schwierz(cls, impf_id=1): @@ -50,16 +51,42 @@ def from_schwierz(cls, impf_id=1): """ impf = cls() - impf.name = 'Schwierz 2010' + impf.name = "Schwierz 2010" impf.id = impf_id - impf.intensity_unit = 'm/s' + impf.intensity_unit = "m/s" impf.intensity = np.array([0, 20, 25, 30, 35, 40, 45, 50, 55, 60, 80, 100]) - impf.paa = np.array([0., 0., 0.001, 0.00676, - 0.03921, 0.10707, 0.25357, 0.48869, - 0.82907, 1., 1., 1.]) - impf.mdd = np.array([0., 0., 0.001, 0.00177515, - 0.00367253, 0.00749977, 0.01263556, 0.01849639, - 0.02370487, 0.037253, 0.037253, 0.037253]) + impf.paa = np.array( + [ + 0.0, + 0.0, + 0.001, + 0.00676, + 0.03921, + 0.10707, + 0.25357, + 0.48869, + 0.82907, + 1.0, + 1.0, + 1.0, + ] + ) + impf.mdd = np.array( + [ + 0.0, + 0.0, + 0.001, + 0.00177515, + 0.00367253, + 0.00749977, + 0.01263556, + 0.01849639, + 0.02370487, + 0.037253, + 0.037253, + 0.037253, + ] + ) impf.check() return impf @@ -77,11 +104,11 @@ def from_welker(cls, impf_id=1): """ temp_Impf = ImpfStormEurope.from_schwierz() - scaling_factor = {'paa_scale': 1.332518, 'mdd_scale': 1.332518} + scaling_factor = {"paa_scale": 1.332518, "mdd_scale": 1.332518} temp_Impf = init_impf(temp_Impf, scaling_factor)[0] - temp_Impf.name = 'Welker 2021' + temp_Impf.name = "Welker 2021" temp_Impf.id = impf_id - temp_Impf.intensity_unit = 'm/s' + temp_Impf.intensity_unit = "m/s" temp_Impf.check() return temp_Impf @@ -90,8 +117,10 @@ def set_schwierz(self, impf_id=1): This function is deprecated, use ImpfStormEurope.from_schwierz instead. """ - LOGGER.warning("The use of ImpfStormEurope.set_schwierz is deprecated." - "Use ImpfStormEurope.from_schwierz instead.") + LOGGER.warning( + "The use of ImpfStormEurope.set_schwierz is deprecated." + "Use ImpfStormEurope.from_schwierz instead." + ) self.__dict__ = ImpfStormEurope.from_schwierz(impf_id=impf_id).__dict__ def set_welker(self, impf_id=1): @@ -99,12 +128,16 @@ def set_welker(self, impf_id=1): This function is deprecated, use ImpfStormEurope.from_welker instead. """ - LOGGER.warning("The use of ImpfStormEurope.set_welker is deprecated." - "Use ImpfStormEurope.from_welker instead.") + LOGGER.warning( + "The use of ImpfStormEurope.set_welker is deprecated." + "Use ImpfStormEurope.from_welker instead." + ) self.__dict__ = ImpfStormEurope.from_welker(impf_id=impf_id).__dict__ -@deprecated(details="The class name IFStormEurope is deprecated and won't be supported in a future " - +"version. Use ImpfStormEurope instead") +@deprecated( + details="The class name IFStormEurope is deprecated and won't be supported in a future " + + "version. Use ImpfStormEurope instead" +) class IFStormEurope(ImpfStormEurope): """Is ImpfStormEurope now""" diff --git a/climada/entity/impact_funcs/test/test_base.py b/climada/entity/impact_funcs/test/test_base.py index 3f2e0460b..b0652a1be 100644 --- a/climada/entity/impact_funcs/test/test_base.py +++ b/climada/entity/impact_funcs/test/test_base.py @@ -20,10 +20,12 @@ """ import unittest + import numpy as np from climada.entity.impact_funcs.base import ImpactFunc + class TestInterpolation(unittest.TestCase): """Impact function interpolation test""" @@ -39,36 +41,36 @@ def test_calc_mdr_pass(self): def test_from_step(self): """Check default impact function: step function""" inten = (0, 5, 10) - imp_fun = ImpactFunc.from_step_impf( - intensity=inten, haz_type='TC', impf_id=2) + imp_fun = ImpactFunc.from_step_impf(intensity=inten, haz_type="TC", impf_id=2) self.assertTrue(np.array_equal(imp_fun.paa, np.ones(4))) self.assertTrue(np.array_equal(imp_fun.mdd, np.array([0, 0, 1, 1]))) self.assertTrue(np.array_equal(imp_fun.intensity, np.array([0, 5, 5, 10]))) - self.assertEqual(imp_fun.haz_type, 'TC') + self.assertEqual(imp_fun.haz_type, "TC") self.assertEqual(imp_fun.id, 2) def test_from_sigmoid(self): """Check default impact function: sigmoid function""" inten = (0, 100, 5) imp_fun = ImpactFunc.from_sigmoid_impf( - inten, L=1.0, k=2., x0=50., haz_type='RF', impf_id=2) + inten, L=1.0, k=2.0, x0=50.0, haz_type="RF", impf_id=2 + ) self.assertTrue(np.array_equal(imp_fun.paa, np.ones(20))) self.assertEqual(imp_fun.mdd[10], 0.5) self.assertEqual(imp_fun.mdd[-1], 1.0) self.assertTrue(np.array_equal(imp_fun.intensity, np.arange(0, 100, 5))) - self.assertEqual(imp_fun.haz_type, 'RF') + self.assertEqual(imp_fun.haz_type, "RF") self.assertEqual(imp_fun.id, 2) def test_from_poly_s_shape(self): """Check default impact function: polynomial s-shape""" - haz_type = 'RF' + haz_type = "RF" threshold = 0.2 half_point = 1 scale = 0.8 exponent = 4 impf_id = 2 - unit = 'm' + unit = "m" intensity = (0, 5, 5) def test_aux_vars(impf): @@ -79,9 +81,15 @@ def test_aux_vars(impf): self.assertEqual(impf.intensity_unit, unit) impf = ImpactFunc.from_poly_s_shape( - intensity=intensity, threshold=threshold, half_point=half_point, scale=scale, - exponent=exponent, haz_type=haz_type, impf_id=impf_id, intensity_unit=unit - ) + intensity=intensity, + threshold=threshold, + half_point=half_point, + scale=scale, + exponent=exponent, + haz_type=haz_type, + impf_id=impf_id, + intensity_unit=unit, + ) # True value can easily be computed with a calculator correct_mdd = np.array([0, 0.59836395, 0.78845941, 0.79794213, 0.79938319]) np.testing.assert_array_almost_equal(impf.mdd, correct_mdd) @@ -89,29 +97,46 @@ def test_aux_vars(impf): # If threshold > half_point, mdd should all be 0 impf = ImpactFunc.from_poly_s_shape( - intensity=intensity, threshold=half_point*2, half_point=half_point, scale=scale, - exponent=exponent, haz_type=haz_type, impf_id=impf_id, intensity_unit=unit - ) + intensity=intensity, + threshold=half_point * 2, + half_point=half_point, + scale=scale, + exponent=exponent, + haz_type=haz_type, + impf_id=impf_id, + intensity_unit=unit, + ) np.testing.assert_array_almost_equal(impf.mdd, np.zeros(5)) test_aux_vars(impf) # If exponent = 0, mdd should be constant impf = ImpactFunc.from_poly_s_shape( - intensity=intensity, threshold=threshold, half_point=half_point, scale=scale, - exponent=0, haz_type=haz_type, impf_id=impf_id, intensity_unit=unit - ) + intensity=intensity, + threshold=threshold, + half_point=half_point, + scale=scale, + exponent=0, + haz_type=haz_type, + impf_id=impf_id, + intensity_unit=unit, + ) np.testing.assert_array_almost_equal(impf.mdd, np.ones(5) * scale / 2) test_aux_vars(impf) # If exponent < 0, raise error. with self.assertRaisesRegex(ValueError, "Exponent value"): ImpactFunc.from_poly_s_shape( - intensity=intensity, threshold=half_point, - half_point=half_point, scale=scale, - exponent=-1, haz_type=haz_type, - impf_id=impf_id, intensity_unit=unit + intensity=intensity, + threshold=half_point, + half_point=half_point, + scale=scale, + exponent=-1, + haz_type=haz_type, + impf_id=impf_id, + intensity_unit=unit, ) + # Execute Tests if __name__ == "__main__": TESTS = unittest.TestLoader().loadTestsFromTestCase(TestInterpolation) diff --git a/climada/entity/impact_funcs/test/test_imp_fun_set.py b/climada/entity/impact_funcs/test/test_imp_fun_set.py index e3804f849..3bc60559b 100644 --- a/climada/entity/impact_funcs/test/test_imp_fun_set.py +++ b/climada/entity/impact_funcs/test/test_imp_fun_set.py @@ -18,43 +18,51 @@ Test ImpactFuncSet class. """ + import unittest + import numpy as np from climada import CONFIG -from climada.entity.impact_funcs.impact_func_set import ImpactFuncSet, ImpactFunc -from climada.util.constants import ENT_TEMPLATE_XLS, ENT_DEMO_TODAY +from climada.entity.impact_funcs.impact_func_set import ImpactFunc, ImpactFuncSet +from climada.util.constants import ENT_DEMO_TODAY, ENT_TEMPLATE_XLS + +ENT_TEST_MAT = CONFIG.exposures.test_data.dir().joinpath("demo_today.mat") -ENT_TEST_MAT = CONFIG.exposures.test_data.dir().joinpath('demo_today.mat') class TestConstructor(unittest.TestCase): """Test impact function attributes.""" + def test_attributes_all(self): """All attributes are defined""" imp_fun = ImpactFuncSet() vulner_1 = ImpactFunc("TC", "2") - self.assertTrue(hasattr(imp_fun, '_data')) - self.assertTrue(hasattr(vulner_1, 'haz_type')) - self.assertTrue(hasattr(vulner_1, 'name')) - self.assertTrue(hasattr(vulner_1, 'id')) - self.assertTrue(hasattr(vulner_1, 'intensity_unit')) - self.assertTrue(hasattr(vulner_1, 'mdd')) - self.assertTrue(hasattr(vulner_1, 'paa')) + self.assertTrue(hasattr(imp_fun, "_data")) + self.assertTrue(hasattr(vulner_1, "haz_type")) + self.assertTrue(hasattr(vulner_1, "name")) + self.assertTrue(hasattr(vulner_1, "id")) + self.assertTrue(hasattr(vulner_1, "intensity_unit")) + self.assertTrue(hasattr(vulner_1, "mdd")) + self.assertTrue(hasattr(vulner_1, "paa")) + class TestContainer(unittest.TestCase): """Test ImpactFuncSet as container.""" + def test_add_wrong_error(self): """Test error is raised when wrong ImpactFunc provided.""" imp_fun = ImpactFuncSet() vulner_1 = ImpactFunc() - with self.assertLogs('climada.entity.impact_funcs.impact_func_set', - level='WARNING') as cm: + with self.assertLogs( + "climada.entity.impact_funcs.impact_func_set", level="WARNING" + ) as cm: imp_fun.append(vulner_1) self.assertIn("Input ImpactFunc's hazard type not set.", cm.output[0]) vulner_1 = ImpactFunc("TC") - with self.assertLogs('climada.entity.impact_funcs.impact_func_set', - level='WARNING') as cm: + with self.assertLogs( + "climada.entity.impact_funcs.impact_func_set", level="WARNING" + ) as cm: imp_fun.append(vulner_1) self.assertIn("Input ImpactFunc's id not set.", cm.output[0]) @@ -71,29 +79,33 @@ def test_remove_func_pass(self): def test_remove_wrong_error(self): """Test error is raised when invalid inputs.""" imp_fun = ImpactFuncSet([ImpactFunc("TC", 1)]) - with self.assertLogs('climada.entity.impact_funcs.impact_func_set', level='WARNING') as cm: - imp_fun.remove_func('FL') - self.assertIn('No ImpactFunc with hazard FL.', cm.output[0]) - with self.assertLogs('climada.entity.impact_funcs.impact_func_set', level='WARNING') as cm: + with self.assertLogs( + "climada.entity.impact_funcs.impact_func_set", level="WARNING" + ) as cm: + imp_fun.remove_func("FL") + self.assertIn("No ImpactFunc with hazard FL.", cm.output[0]) + with self.assertLogs( + "climada.entity.impact_funcs.impact_func_set", level="WARNING" + ) as cm: imp_fun.remove_func(fun_id=3) - self.assertIn('No ImpactFunc with id 3.', cm.output[0]) + self.assertIn("No ImpactFunc with id 3.", cm.output[0]) def test_get_hazards_pass(self): """Test get_hazard_types function.""" imp_fun = ImpactFuncSet([ImpactFunc("TC", 1)]) self.assertEqual(1, len(imp_fun.get_hazard_types())) - self.assertEqual(['TC'], imp_fun.get_hazard_types()) + self.assertEqual(["TC"], imp_fun.get_hazard_types()) vulner_2 = ImpactFunc("TC", 1) imp_fun.append(vulner_2) self.assertEqual(1, len(imp_fun.get_hazard_types())) - self.assertEqual(['TC'], imp_fun.get_hazard_types()) + self.assertEqual(["TC"], imp_fun.get_hazard_types()) vulner_3 = ImpactFunc("FL", 1) imp_fun.append(vulner_3) self.assertEqual(2, len(imp_fun.get_hazard_types())) - self.assertIn('TC', imp_fun.get_hazard_types()) - self.assertIn('FL', imp_fun.get_hazard_types()) + self.assertIn("TC", imp_fun.get_hazard_types()) + self.assertIn("FL", imp_fun.get_hazard_types()) def test_get_ids_pass(self): """Test normal functionality of get_ids method.""" @@ -103,63 +115,63 @@ def test_get_ids_pass(self): vulner_1 = ImpactFunc("TC", 1) imp_fun.append(vulner_1) self.assertEqual(1, len(imp_fun.get_ids())) - self.assertIn('TC', imp_fun.get_ids()) - self.assertEqual(1, len(imp_fun.get_ids('TC'))) - self.assertEqual([1], imp_fun.get_ids('TC')) + self.assertIn("TC", imp_fun.get_ids()) + self.assertEqual(1, len(imp_fun.get_ids("TC"))) + self.assertEqual([1], imp_fun.get_ids("TC")) vulner_2 = ImpactFunc("TC", 3) imp_fun.append(vulner_2) self.assertEqual(1, len(imp_fun.get_ids())) - self.assertIn('TC', imp_fun.get_ids()) - self.assertEqual(2, len(imp_fun.get_ids('TC'))) - self.assertEqual([1, 3], imp_fun.get_ids('TC')) + self.assertIn("TC", imp_fun.get_ids()) + self.assertEqual(2, len(imp_fun.get_ids("TC"))) + self.assertEqual([1, 3], imp_fun.get_ids("TC")) vulner_3 = ImpactFunc("FL", 3) imp_fun.append(vulner_3) self.assertEqual(2, len(imp_fun.get_ids())) - self.assertIn('TC', imp_fun.get_ids()) - self.assertIn('FL', imp_fun.get_ids()) - self.assertEqual(2, len(imp_fun.get_ids('TC'))) - self.assertEqual([1, 3], imp_fun.get_ids('TC')) - self.assertEqual(1, len(imp_fun.get_ids('FL'))) - self.assertEqual([3], imp_fun.get_ids('FL')) + self.assertIn("TC", imp_fun.get_ids()) + self.assertIn("FL", imp_fun.get_ids()) + self.assertEqual(2, len(imp_fun.get_ids("TC"))) + self.assertEqual([1, 3], imp_fun.get_ids("TC")) + self.assertEqual(1, len(imp_fun.get_ids("FL"))) + self.assertEqual([3], imp_fun.get_ids("FL")) def test_get_ids_wrong_zero(self): """Test get_ids method with wrong inputs.""" imp_fun = ImpactFuncSet([ImpactFunc("WS", 56)]) - self.assertEqual([], imp_fun.get_ids('TC')) + self.assertEqual([], imp_fun.get_ids("TC")) def test_get_func_pass(self): """Test normal functionality of get_func method.""" imp_fun = ImpactFuncSet() vulner_1 = ImpactFunc("WS", 56) imp_fun.append(vulner_1) - self.assertEqual(1, len(imp_fun.get_func('WS'))) + self.assertEqual(1, len(imp_fun.get_func("WS"))) self.assertEqual(1, len(imp_fun.get_func(fun_id=56))) - self.assertIs(vulner_1, imp_fun.get_func('WS', 56)) + self.assertIs(vulner_1, imp_fun.get_func("WS", 56)) vulner_2 = ImpactFunc("WS", 6) imp_fun.append(vulner_2) - self.assertEqual(2, len(imp_fun.get_func('WS'))) + self.assertEqual(2, len(imp_fun.get_func("WS"))) self.assertEqual(1, len(imp_fun.get_func(fun_id=6))) - self.assertIs(vulner_2, imp_fun.get_func('WS', 6)) + self.assertIs(vulner_2, imp_fun.get_func("WS", 6)) vulner_3 = ImpactFunc("TC", 6) imp_fun.append(vulner_3) self.assertEqual(2, len(imp_fun.get_func(fun_id=6))) self.assertEqual(1, len(imp_fun.get_func(fun_id=56))) - self.assertEqual(2, len(imp_fun.get_func('WS'))) - self.assertEqual(1, len(imp_fun.get_func('TC'))) - self.assertIs(vulner_3, imp_fun.get_func('TC', 6)) + self.assertEqual(2, len(imp_fun.get_func("WS"))) + self.assertEqual(1, len(imp_fun.get_func("TC"))) + self.assertIs(vulner_3, imp_fun.get_func("TC", 6)) self.assertEqual(2, len(imp_fun.get_func().keys())) - self.assertEqual(1, len(imp_fun.get_func()['TC'].keys())) - self.assertEqual(2, len(imp_fun.get_func()['WS'].keys())) + self.assertEqual(1, len(imp_fun.get_func()["TC"].keys())) + self.assertEqual(2, len(imp_fun.get_func()["WS"].keys())) def test_get_func_wrong_error(self): """Test get_func method with wrong inputs.""" imp_fun = ImpactFuncSet([ImpactFunc("WS", 56)]) - self.assertEqual([], imp_fun.get_func('TC')) + self.assertEqual([], imp_fun.get_func("TC")) def test_size_pass(self): """Test size function.""" @@ -169,37 +181,37 @@ def test_size_pass(self): vulner_1 = ImpactFunc("WS", 56) imp_fun.append(vulner_1) self.assertEqual(1, imp_fun.size()) - self.assertEqual(1, imp_fun.size('WS', 56)) - self.assertEqual(1, imp_fun.size('WS')) + self.assertEqual(1, imp_fun.size("WS", 56)) + self.assertEqual(1, imp_fun.size("WS")) self.assertEqual(1, imp_fun.size(fun_id=56)) imp_fun.append(vulner_1) self.assertEqual(1, imp_fun.size()) - self.assertEqual(1, imp_fun.size('WS', 56)) - self.assertEqual(1, imp_fun.size('WS')) + self.assertEqual(1, imp_fun.size("WS", 56)) + self.assertEqual(1, imp_fun.size("WS")) self.assertEqual(1, imp_fun.size(fun_id=56)) vulner_2 = ImpactFunc("WS", 5) imp_fun.append(vulner_2) self.assertEqual(2, imp_fun.size()) - self.assertEqual(1, imp_fun.size('WS', 56)) - self.assertEqual(2, imp_fun.size('WS')) + self.assertEqual(1, imp_fun.size("WS", 56)) + self.assertEqual(2, imp_fun.size("WS")) self.assertEqual(1, imp_fun.size(fun_id=56)) self.assertEqual(1, imp_fun.size(fun_id=5)) vulner_3 = ImpactFunc("TC", 5) imp_fun.append(vulner_3) self.assertEqual(3, imp_fun.size()) - self.assertEqual(1, imp_fun.size('TC', 5)) - self.assertEqual(2, imp_fun.size('WS')) - self.assertEqual(1, imp_fun.size('TC')) + self.assertEqual(1, imp_fun.size("TC", 5)) + self.assertEqual(2, imp_fun.size("WS")) + self.assertEqual(1, imp_fun.size("TC")) self.assertEqual(1, imp_fun.size(fun_id=56)) self.assertEqual(2, imp_fun.size(fun_id=5)) def test_size_wrong_zero(self): """Test size method with wrong inputs.""" imp_fun = ImpactFuncSet() - self.assertEqual(0, imp_fun.size('TC')) - self.assertEqual(0, imp_fun.size('TC', 3)) + self.assertEqual(0, imp_fun.size("TC")) + self.assertEqual(0, imp_fun.size("TC", 3)) self.assertEqual(0, imp_fun.size(fun_id=3)) def test_append_pass(self): @@ -208,31 +220,32 @@ def test_append_pass(self): vulner_1 = ImpactFunc("TC", 1) imp_fun.append(vulner_1) self.assertEqual(1, len(imp_fun._data)) - self.assertIn('TC', imp_fun._data.keys()) - self.assertEqual(1, len(imp_fun._data['TC'])) - self.assertIn(1, imp_fun._data['TC'].keys()) + self.assertIn("TC", imp_fun._data.keys()) + self.assertEqual(1, len(imp_fun._data["TC"])) + self.assertIn(1, imp_fun._data["TC"].keys()) vulner_2 = ImpactFunc("TC", 3) imp_fun.append(vulner_2) self.assertEqual(1, len(imp_fun._data)) - self.assertIn('TC', imp_fun._data.keys()) - self.assertEqual(2, len(imp_fun._data['TC'])) - self.assertIn(1, imp_fun._data['TC'].keys()) - self.assertIn(3, imp_fun._data['TC'].keys()) + self.assertIn("TC", imp_fun._data.keys()) + self.assertEqual(2, len(imp_fun._data["TC"])) + self.assertIn(1, imp_fun._data["TC"].keys()) + self.assertIn(3, imp_fun._data["TC"].keys()) vulner_3 = ImpactFunc("FL", 3) imp_fun.append(vulner_3) self.assertEqual(2, len(imp_fun._data)) - self.assertIn('TC', imp_fun._data.keys()) - self.assertIn('FL', imp_fun._data.keys()) - self.assertEqual(2, len(imp_fun._data['TC'])) - self.assertEqual(1, len(imp_fun._data['FL'])) - self.assertIn(1, imp_fun._data['TC'].keys()) - self.assertIn(3, imp_fun._data['TC'].keys()) - self.assertIn(3, imp_fun._data['FL'].keys()) + self.assertIn("TC", imp_fun._data.keys()) + self.assertIn("FL", imp_fun._data.keys()) + self.assertEqual(2, len(imp_fun._data["TC"])) + self.assertEqual(1, len(imp_fun._data["FL"])) + self.assertIn(1, imp_fun._data["TC"].keys()) + self.assertIn(3, imp_fun._data["TC"].keys()) + self.assertIn(3, imp_fun._data["FL"].keys()) def test_init_with_iterable(self): """Check that initializing with iterables works""" + def _check_contents(imp_fun): self.assertEqual(imp_fun.size("TC"), 2) self.assertEqual(imp_fun.size("FL"), 1) @@ -247,11 +260,17 @@ def _check_contents(imp_fun): self.assertFalse(impf_set.get_ids("TC")) # Initialize with list - _check_contents(ImpactFuncSet( - [ImpactFunc("TC", 1), ImpactFunc("TC", 3), ImpactFunc("FL", 3)])) + _check_contents( + ImpactFuncSet( + [ImpactFunc("TC", 1), ImpactFunc("TC", 3), ImpactFunc("FL", 3)] + ) + ) # Initialize with tuple - _check_contents(ImpactFuncSet( - (ImpactFunc("TC", 1), ImpactFunc("TC", 3), ImpactFunc("FL", 3)))) + _check_contents( + ImpactFuncSet( + (ImpactFunc("TC", 1), ImpactFunc("TC", 3), ImpactFunc("FL", 3)) + ) + ) def test_remove_add_pass(self): """Test ImpactFunc can be added after removing.""" @@ -264,12 +283,14 @@ def test_remove_add_pass(self): imp_fun.append(vulner_1) self.assertEqual(1, len(imp_fun.get_hazard_types())) - self.assertEqual('TC', imp_fun.get_hazard_types()[0]) + self.assertEqual("TC", imp_fun.get_hazard_types()[0]) self.assertEqual(1, len(imp_fun.get_ids())) - self.assertEqual([1], imp_fun.get_ids('TC')) + self.assertEqual([1], imp_fun.get_ids("TC")) + class TestChecker(unittest.TestCase): """Test loading funcions from the ImpactFuncSet class""" + def test_check_wrongPAA_fail(self): """Wrong PAA definition""" intensity = np.array([1, 2, 3]) @@ -280,7 +301,7 @@ def test_check_wrongPAA_fail(self): with self.assertRaises(ValueError) as cm: imp_fun.check() - self.assertIn('Invalid ImpactFunc.paa size: 3 != 2.', str(cm.exception)) + self.assertIn("Invalid ImpactFunc.paa size: 3 != 2.", str(cm.exception)) def test_check_wrongMDD_fail(self): """Wrong MDD definition""" @@ -292,21 +313,24 @@ def test_check_wrongMDD_fail(self): with self.assertRaises(ValueError) as cm: imp_fun.check() - self.assertIn('Invalid ImpactFunc.mdd size: 3 != 2.', str(cm.exception)) + self.assertIn("Invalid ImpactFunc.mdd size: 3 != 2.", str(cm.exception)) + class TestExtend(unittest.TestCase): """Check extend function""" + def test_extend_to_empty_same(self): """Extend ImpactFuncSet to empty one.""" imp_fun = ImpactFuncSet() imp_fun_add = ImpactFuncSet( - (ImpactFunc("TC", 1), ImpactFunc("TC", 3), ImpactFunc("FL", 3))) + (ImpactFunc("TC", 1), ImpactFunc("TC", 3), ImpactFunc("FL", 3)) + ) imp_fun.extend(imp_fun_add) imp_fun.check() self.assertEqual(imp_fun.size(), 3) - self.assertEqual(imp_fun.size('TC'), 2) - self.assertEqual(imp_fun.size('FL'), 1) + self.assertEqual(imp_fun.size("TC"), 2) + self.assertEqual(imp_fun.size("FL"), 1) def test_extend_equal_same(self): """Extend the same ImpactFuncSet. The inital ImpactFuncSet is obtained.""" @@ -318,7 +342,7 @@ def test_extend_equal_same(self): imp_fun.check() self.assertEqual(imp_fun.size(), 1) - self.assertEqual(imp_fun.size('TC'), 1) + self.assertEqual(imp_fun.size("TC"), 1) def test_extend_different_extend(self): """Extend ImpactFuncSet with same and new values. The vulnerabilities @@ -334,14 +358,16 @@ def test_extend_different_extend(self): imp_fun.append(vulner_3) imp_fun_add = ImpactFuncSet( - (ImpactFunc("TC", 1), ImpactFunc("WS", 1), ImpactFunc("FL", 3))) + (ImpactFunc("TC", 1), ImpactFunc("WS", 1), ImpactFunc("FL", 3)) + ) imp_fun.extend(imp_fun_add) imp_fun.check() self.assertEqual(imp_fun.size(), 4) - self.assertEqual(imp_fun.size('TC'), 2) - self.assertEqual(imp_fun.size('FL'), 1) - self.assertEqual(imp_fun.size('WS'), 1) + self.assertEqual(imp_fun.size("TC"), 2) + self.assertEqual(imp_fun.size("FL"), 1) + self.assertEqual(imp_fun.size("WS"), 1) + class TestReaderMat(unittest.TestCase): """Test reader functionality of the imp_funcsFuncsExcel class""" @@ -353,7 +379,7 @@ def test_demo_file_pass(self): # Check results n_funcs = 2 - hazard = 'TC' + hazard = "TC" first_id = 1 second_id = 3 @@ -362,13 +388,12 @@ def test_demo_file_pass(self): # first function self.assertEqual(imp_funcs._data[hazard][first_id].id, 1) - self.assertEqual(imp_funcs._data[hazard][first_id].name, - 'Tropical cyclone default') - self.assertEqual(imp_funcs._data[hazard][first_id].intensity_unit, - 'm/s') + self.assertEqual( + imp_funcs._data[hazard][first_id].name, "Tropical cyclone default" + ) + self.assertEqual(imp_funcs._data[hazard][first_id].intensity_unit, "m/s") - self.assertEqual(imp_funcs._data[hazard][first_id].intensity.shape, - (9,)) + self.assertEqual(imp_funcs._data[hazard][first_id].intensity.shape, (9,)) self.assertEqual(imp_funcs._data[hazard][first_id].intensity[0], 0) self.assertEqual(imp_funcs._data[hazard][first_id].intensity[1], 20) self.assertEqual(imp_funcs._data[hazard][first_id].intensity[2], 30) @@ -389,13 +414,10 @@ def test_demo_file_pass(self): # second function self.assertEqual(imp_funcs._data[hazard][second_id].id, 3) - self.assertEqual(imp_funcs._data[hazard][second_id].name, - 'TC Building code') - self.assertEqual(imp_funcs._data[hazard][first_id].intensity_unit, - 'm/s') + self.assertEqual(imp_funcs._data[hazard][second_id].name, "TC Building code") + self.assertEqual(imp_funcs._data[hazard][first_id].intensity_unit, "m/s") - self.assertEqual(imp_funcs._data[hazard][second_id].intensity.shape, - (9,)) + self.assertEqual(imp_funcs._data[hazard][second_id].intensity.shape, (9,)) self.assertEqual(imp_funcs._data[hazard][second_id].intensity[0], 0) self.assertEqual(imp_funcs._data[hazard][second_id].intensity[1], 20) self.assertEqual(imp_funcs._data[hazard][second_id].intensity[2], 30) @@ -426,7 +448,7 @@ def test_demo_file_pass(self): # Check results n_funcs = 2 - hazard = 'TC' + hazard = "TC" first_id = 1 second_id = 3 @@ -435,13 +457,12 @@ def test_demo_file_pass(self): # first function self.assertEqual(imp_funcs._data[hazard][first_id].id, 1) - self.assertEqual(imp_funcs._data[hazard][first_id].name, - 'Tropical cyclone default') - self.assertEqual(imp_funcs._data[hazard][first_id].intensity_unit, - 'm/s') + self.assertEqual( + imp_funcs._data[hazard][first_id].name, "Tropical cyclone default" + ) + self.assertEqual(imp_funcs._data[hazard][first_id].intensity_unit, "m/s") - self.assertEqual(imp_funcs._data[hazard][first_id].intensity.shape, - (9,)) + self.assertEqual(imp_funcs._data[hazard][first_id].intensity.shape, (9,)) self.assertEqual(imp_funcs._data[hazard][first_id].intensity[0], 0) self.assertEqual(imp_funcs._data[hazard][first_id].intensity[1], 20) self.assertEqual(imp_funcs._data[hazard][first_id].intensity[2], 30) @@ -462,13 +483,10 @@ def test_demo_file_pass(self): # second function self.assertEqual(imp_funcs._data[hazard][second_id].id, 3) - self.assertEqual(imp_funcs._data[hazard][second_id].name, - 'TC Building code') - self.assertEqual(imp_funcs._data[hazard][first_id].intensity_unit, - 'm/s') + self.assertEqual(imp_funcs._data[hazard][second_id].name, "TC Building code") + self.assertEqual(imp_funcs._data[hazard][first_id].intensity_unit, "m/s") - self.assertEqual(imp_funcs._data[hazard][second_id].intensity.shape, - (9,)) + self.assertEqual(imp_funcs._data[hazard][second_id].intensity.shape, (9,)) self.assertEqual(imp_funcs._data[hazard][second_id].intensity[0], 0) self.assertEqual(imp_funcs._data[hazard][second_id].intensity[1], 20) self.assertEqual(imp_funcs._data[hazard][second_id].intensity[2], 30) @@ -492,9 +510,10 @@ def test_template_file_pass(self): imp_funcs = ImpactFuncSet.from_excel(ENT_TEMPLATE_XLS) # Check some results self.assertEqual(len(imp_funcs._data), 10) - self.assertEqual(len(imp_funcs._data['TC'][3].paa), 9) - self.assertEqual(len(imp_funcs._data['EQ'][1].intensity), 14) - self.assertEqual(len(imp_funcs._data['HS'][1].mdd), 16) + self.assertEqual(len(imp_funcs._data["TC"][3].paa), 9) + self.assertEqual(len(imp_funcs._data["EQ"][1].intensity), 14) + self.assertEqual(len(imp_funcs._data["HS"][1].mdd), 16) + class TestWriter(unittest.TestCase): """Test reader functionality of the imp_funcsFuncsExcel class""" @@ -505,9 +524,9 @@ def test_write_read_pass(self): imp_funcs = ImpactFuncSet() idx = 1 - name = 'code 1' - intensity_unit = 'm/s' - haz_type = 'TC' + name = "code 1" + intensity_unit = "m/s" + haz_type = "TC" intensity = np.arange(100) mdd = np.arange(100) * 0.5 paa = np.ones(100) @@ -515,7 +534,7 @@ def test_write_read_pass(self): imp_funcs.append(imp1) idx = 2 - name = 'code 2' + name = "code 2" intensity = np.arange(102) mdd = np.arange(102) * 0.25 paa = np.ones(102) @@ -523,9 +542,9 @@ def test_write_read_pass(self): imp_funcs.append(imp2) idx = 1 - name = 'code 1' - intensity_unit = 'm' - haz_type = 'FL' + name = "code 1" + intensity_unit = "m" + haz_type = "FL" intensity = np.arange(86) mdd = np.arange(86) * 0.15 paa = np.ones(86) @@ -533,16 +552,16 @@ def test_write_read_pass(self): imp_funcs.append(imp3) idx = 15 - name = 'code 15' - intensity_unit = 'K' - haz_type = 'DR' + name = "code 15" + intensity_unit = "K" + haz_type = "DR" intensity = np.arange(5) mdd = np.arange(5) paa = np.ones(5) imp4 = ImpactFunc(haz_type, idx, intensity, mdd, paa, intensity_unit, name) imp_funcs.append(imp4) - file_name = CONFIG.impact_funcs.test_data.dir().joinpath('test_write.xlsx') + file_name = CONFIG.impact_funcs.test_data.dir().joinpath("test_write.xlsx") imp_funcs.write_excel(file_name) imp_res = ImpactFuncSet.from_excel(file_name) @@ -550,13 +569,13 @@ def test_write_read_pass(self): # first function for fun_haz, fun_dict in imp_res.get_func().items(): for fun_id, fun in fun_dict.items(): - if fun_haz == 'TC' and fun_id == 1: + if fun_haz == "TC" and fun_id == 1: ref_fun = imp1 - elif fun_haz == 'TC' and fun_id == 2: + elif fun_haz == "TC" and fun_id == 2: ref_fun = imp2 - elif fun_haz == 'FL' and fun_id == 1: + elif fun_haz == "FL" and fun_id == 1: ref_fun = imp3 - elif fun_haz == 'DR' and fun_id == 15: + elif fun_haz == "DR" and fun_id == 15: ref_fun = imp4 else: self.assertEqual(1, 0) @@ -569,6 +588,7 @@ def test_write_read_pass(self): self.assertTrue(np.allclose(ref_fun.mdd, fun.mdd)) self.assertTrue(np.allclose(ref_fun.paa, fun.paa)) + # Execute Tests if __name__ == "__main__": TESTS = unittest.TestLoader().loadTestsFromTestCase(TestContainer) diff --git a/climada/entity/impact_funcs/test/test_tc.py b/climada/entity/impact_funcs/test/test_tc.py index e2db9e609..ffa502b51 100644 --- a/climada/entity/impact_funcs/test/test_tc.py +++ b/climada/entity/impact_funcs/test/test_tc.py @@ -20,11 +20,12 @@ """ import unittest + import numpy as np import pandas as pd -from climada.entity.impact_funcs.trop_cyclone import ImpfTropCyclone -from climada.entity.impact_funcs.trop_cyclone import ImpfSetTropCyclone +from climada.entity.impact_funcs.trop_cyclone import ImpfSetTropCyclone, ImpfTropCyclone + class TestEmanuelFormula(unittest.TestCase): """Impact function interpolation test""" @@ -32,10 +33,10 @@ class TestEmanuelFormula(unittest.TestCase): def test_default_values_pass(self): """Compute mdr interpolating values.""" imp_fun = ImpfTropCyclone.from_emanuel_usa() - self.assertEqual(imp_fun.name, 'Emanuel 2011') - self.assertEqual(imp_fun.haz_type, 'TC') + self.assertEqual(imp_fun.name, "Emanuel 2011") + self.assertEqual(imp_fun.haz_type, "TC") self.assertEqual(imp_fun.id, 1) - self.assertEqual(imp_fun.intensity_unit, 'm/s') + self.assertEqual(imp_fun.intensity_unit, "m/s") self.assertTrue(np.array_equal(imp_fun.intensity, np.arange(0, 121, 5))) self.assertTrue(np.array_equal(imp_fun.paa, np.ones((25,)))) self.assertTrue(np.array_equal(imp_fun.mdd[0:6], np.zeros((6,)))) @@ -66,38 +67,43 @@ def test_default_values_pass(self): def test_values_pass(self): """Compute mdr interpolating values.""" - imp_fun = ImpfTropCyclone.from_emanuel_usa(impf_id=5, - intensity=np.arange(0, 6, 1), - v_thresh=2, - v_half=5, - scale=0.5) - self.assertEqual(imp_fun.name, 'Emanuel 2011') - self.assertEqual(imp_fun.haz_type, 'TC') + imp_fun = ImpfTropCyclone.from_emanuel_usa( + impf_id=5, intensity=np.arange(0, 6, 1), v_thresh=2, v_half=5, scale=0.5 + ) + self.assertEqual(imp_fun.name, "Emanuel 2011") + self.assertEqual(imp_fun.haz_type, "TC") self.assertEqual(imp_fun.id, 5) - self.assertEqual(imp_fun.intensity_unit, 'm/s') + self.assertEqual(imp_fun.intensity_unit, "m/s") self.assertTrue(np.array_equal(imp_fun.intensity, np.arange(0, 6, 1))) self.assertTrue(np.array_equal(imp_fun.paa, np.ones((6,)))) self.assertTrue(np.array_equal(imp_fun.mdd[0:3], np.zeros((3,)))) - self.assertTrue(np.array_equal(imp_fun.mdd[3:], - np.array([0.017857142857142853, 0.11428571428571425, - 0.250000000000000]))) + self.assertTrue( + np.array_equal( + imp_fun.mdd[3:], + np.array( + [0.017857142857142853, 0.11428571428571425, 0.250000000000000] + ), + ) + ) def test_wrong_shape(self): """Set shape parameters.""" with self.assertRaises(ValueError): - imp_fun = ImpfTropCyclone.from_emanuel_usa(impf_id=5, v_thresh=2, - v_half=1, - intensity=np.arange(0, 6, 1)) + imp_fun = ImpfTropCyclone.from_emanuel_usa( + impf_id=5, v_thresh=2, v_half=1, intensity=np.arange(0, 6, 1) + ) def test_wrong_scale(self): """Set shape parameters.""" with self.assertRaises(ValueError): - imp_fun = ImpfTropCyclone.from_emanuel_usa(impf_id=5, scale=2, - intensity=np.arange(0, 6, 1)) + imp_fun = ImpfTropCyclone.from_emanuel_usa( + impf_id=5, scale=2, intensity=np.arange(0, 6, 1) + ) + class TestCalibratedImpfSet(unittest.TestCase): """Test inititation of IFS with regional calibrated TC IFs - based on Eberenz et al. (2020)""" + based on Eberenz et al. (2020)""" def test_default_values_pass(self): """Test return TDR optimized IFs (TDR=1)""" @@ -105,31 +111,33 @@ def test_default_values_pass(self): v_halfs = ImpfSetTropCyclone.calibrated_regional_vhalf() # extract IF for region WP4 impf_wp4 = impfs.get_func(fun_id=9)[0] - self.assertIn('TC', impfs.get_ids().keys()) + self.assertIn("TC", impfs.get_ids().keys()) self.assertEqual(impfs.size(), 10) - self.assertEqual(impfs.get_ids()['TC'], [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) - self.assertEqual(impf_wp4.intensity_unit, 'm/s') - self.assertEqual(impf_wp4.name, 'North West Pacific (WP4)') - self.assertAlmostEqual(v_halfs['WP2'], 188.4, places=7) - self.assertAlmostEqual(v_halfs['ROW'], 110.1, places=7) + self.assertEqual(impfs.get_ids()["TC"], [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) + self.assertEqual(impf_wp4.intensity_unit, "m/s") + self.assertEqual(impf_wp4.name, "North West Pacific (WP4)") + self.assertAlmostEqual(v_halfs["WP2"], 188.4, places=7) + self.assertAlmostEqual(v_halfs["ROW"], 110.1, places=7) self.assertListEqual(list(impf_wp4.intensity), list(np.arange(0, 121, 5))) - self.assertEqual(impf_wp4.paa.min(), 1.) + self.assertEqual(impf_wp4.paa.min(), 1.0) self.assertEqual(impf_wp4.mdd.min(), 0.0) self.assertAlmostEqual(impf_wp4.mdd.max(), 0.15779133833203, places=5) self.assertAlmostEqual(impf_wp4.calc_mdr(75), 0.02607326527808, places=5) def test_RMSF_pass(self): """Test return RMSF optimized impact function set (RMSF=minimum)""" - impfs = ImpfSetTropCyclone.from_calibrated_regional_ImpfSet('RMSF') - v_halfs = ImpfSetTropCyclone.calibrated_regional_vhalf(calibration_approach='RMSF') + impfs = ImpfSetTropCyclone.from_calibrated_regional_ImpfSet("RMSF") + v_halfs = ImpfSetTropCyclone.calibrated_regional_vhalf( + calibration_approach="RMSF" + ) # extract IF for region NA1 impf_na1 = impfs.get_func(fun_id=1)[0] self.assertEqual(impfs.size(), 10) - self.assertEqual(impfs.get_ids()['TC'], [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) - self.assertEqual(impf_na1.intensity_unit, 'm/s') - self.assertEqual(impf_na1.name, 'Caribbean and Mexico (NA1)') - self.assertAlmostEqual(v_halfs['NA1'], 59.6, places=7) - self.assertAlmostEqual(v_halfs['ROW'], 73.4, places=7) + self.assertEqual(impfs.get_ids()["TC"], [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) + self.assertEqual(impf_na1.intensity_unit, "m/s") + self.assertEqual(impf_na1.name, "Caribbean and Mexico (NA1)") + self.assertAlmostEqual(v_halfs["NA1"], 59.6, places=7) + self.assertAlmostEqual(v_halfs["ROW"], 73.4, places=7) self.assertListEqual(list(impf_na1.intensity), list(np.arange(0, 121, 5))) self.assertEqual(impf_na1.mdd.min(), 0.0) self.assertAlmostEqual(impf_na1.mdd.max(), 0.95560418241669, places=5) @@ -137,15 +145,15 @@ def test_RMSF_pass(self): def test_quantile_pass(self): """Test return impact function set from quantile of inidividual event fitting (EDR=1)""" - impfs = ImpfSetTropCyclone.from_calibrated_regional_ImpfSet('EDR') - impfs_p10 = ImpfSetTropCyclone.from_calibrated_regional_ImpfSet('EDR', q=.1) + impfs = ImpfSetTropCyclone.from_calibrated_regional_ImpfSet("EDR") + impfs_p10 = ImpfSetTropCyclone.from_calibrated_regional_ImpfSet("EDR", q=0.1) # extract IF for region SI impf_si = impfs.get_func(fun_id=5)[0] impf_si_p10 = impfs_p10.get_func(fun_id=5)[0] self.assertEqual(impfs.size(), 10) self.assertEqual(impfs_p10.size(), 10) - self.assertEqual(impf_si.intensity_unit, 'm/s') - self.assertEqual(impf_si_p10.name, 'South Indian (SI)') + self.assertEqual(impf_si.intensity_unit, "m/s") + self.assertEqual(impf_si_p10.name, "South Indian (SI)") self.assertAlmostEqual(impf_si_p10.mdd.max(), 0.99999999880, places=5) self.assertAlmostEqual(impf_si.calc_mdr(30), 0.01620503041, places=5) intensity = np.random.randint(26, impf_si.intensity.max()) @@ -154,11 +162,12 @@ def test_quantile_pass(self): def test_get_countries_per_region(self): """Test static get_countries_per_region()""" ifs = ImpfSetTropCyclone() - out = ifs.get_countries_per_region('NA2') - self.assertEqual(out[0], 'USA and Canada') + out = ifs.get_countries_per_region("NA2") + self.assertEqual(out[0], "USA and Canada") self.assertEqual(out[1], 2) self.assertListEqual(out[2], [124, 840]) - self.assertListEqual(out[3], ['CAN', 'USA']) + self.assertListEqual(out[3], ["CAN", "USA"]) + # Execute Tests if __name__ == "__main__": diff --git a/climada/entity/impact_funcs/test/test_ws.py b/climada/entity/impact_funcs/test/test_ws.py index 4b2f79c39..61258a537 100644 --- a/climada/entity/impact_funcs/test/test_ws.py +++ b/climada/entity/impact_funcs/test/test_ws.py @@ -20,39 +20,89 @@ """ import unittest + import numpy as np from climada.entity.impact_funcs.storm_europe import ImpfStormEurope + class TestStormEuropeDefault(unittest.TestCase): """Impact function interpolation test""" def test_default_values_pass(self): """Compute mdr interpolating values.""" imp_fun = ImpfStormEurope.from_schwierz() - self.assertEqual(imp_fun.name, 'Schwierz 2010') - self.assertEqual(imp_fun.haz_type, 'WS') + self.assertEqual(imp_fun.name, "Schwierz 2010") + self.assertEqual(imp_fun.haz_type, "WS") self.assertEqual(imp_fun.id, 1) - self.assertEqual(imp_fun.intensity_unit, 'm/s') - self.assertTrue(np.array_equal(imp_fun.intensity, np.array([0, 20, 25, 30, 35, 40, 45, 50, 55, 60, 80, 100]))) - self.assertTrue(np.array_equal(imp_fun.paa[4:8], np.array([0.03921, 0.10707, 0.25357, 0.48869]))) - self.assertTrue(np.array_equal(imp_fun.mdd[4:8], np.array([0.00367253, 0.00749977, 0.01263556, 0.01849639]))) + self.assertEqual(imp_fun.intensity_unit, "m/s") + self.assertTrue( + np.array_equal( + imp_fun.intensity, + np.array([0, 20, 25, 30, 35, 40, 45, 50, 55, 60, 80, 100]), + ) + ) + self.assertTrue( + np.array_equal( + imp_fun.paa[4:8], np.array([0.03921, 0.10707, 0.25357, 0.48869]) + ) + ) + self.assertTrue( + np.array_equal( + imp_fun.mdd[4:8], + np.array([0.00367253, 0.00749977, 0.01263556, 0.01849639]), + ) + ) imp_fun2 = ImpfStormEurope.from_welker() - self.assertEqual(imp_fun2.name, 'Welker 2021') - self.assertEqual(imp_fun2.haz_type, 'WS') + self.assertEqual(imp_fun2.name, "Welker 2021") + self.assertEqual(imp_fun2.haz_type, "WS") self.assertEqual(imp_fun2.id, 1) - self.assertEqual(imp_fun2.intensity_unit, 'm/s') - self.assertTrue(np.array_equal(imp_fun2.intensity[np.arange(0, 120, 13)], - np.array([0., 10., 20., 30., 40., 50., 60., 70., 80., 90.]))) - self.assertTrue(np.allclose(imp_fun2.paa[np.arange(0, 120, 13)], - np.array([0., 0., 0., 0.00900782, 0.1426727, - 0.65118822, 1., 1., 1., 1.]))) - self.assertTrue(np.allclose(imp_fun2.mdd[np.arange(0, 120, 13)], - np.array([0., 0., 0., 0.00236542, 0.00999358, - 0.02464677, 0.04964029, 0.04964029, 0.04964029, 0.04964029]))) - - + self.assertEqual(imp_fun2.intensity_unit, "m/s") + self.assertTrue( + np.array_equal( + imp_fun2.intensity[np.arange(0, 120, 13)], + np.array([0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0]), + ) + ) + self.assertTrue( + np.allclose( + imp_fun2.paa[np.arange(0, 120, 13)], + np.array( + [ + 0.0, + 0.0, + 0.0, + 0.00900782, + 0.1426727, + 0.65118822, + 1.0, + 1.0, + 1.0, + 1.0, + ] + ), + ) + ) + self.assertTrue( + np.allclose( + imp_fun2.mdd[np.arange(0, 120, 13)], + np.array( + [ + 0.0, + 0.0, + 0.0, + 0.00236542, + 0.00999358, + 0.02464677, + 0.04964029, + 0.04964029, + 0.04964029, + 0.04964029, + ] + ), + ) + ) # Execute Tests diff --git a/climada/entity/impact_funcs/trop_cyclone.py b/climada/entity/impact_funcs/trop_cyclone.py index ab432f625..18492bbb1 100644 --- a/climada/entity/impact_funcs/trop_cyclone.py +++ b/climada/entity/impact_funcs/trop_cyclone.py @@ -19,12 +19,13 @@ Define impact functions for tropical cyclnes . """ -__all__ = ['ImpfTropCyclone', 'ImpfSetTropCyclone', 'IFTropCyclone'] +__all__ = ["ImpfTropCyclone", "ImpfSetTropCyclone", "IFTropCyclone"] import logging -from deprecation import deprecated + import numpy as np import pandas as pd +from deprecation import deprecated from climada.entity.impact_funcs.base import ImpactFunc from climada.entity.impact_funcs.impact_func_set import ImpactFuncSet @@ -32,22 +33,31 @@ LOGGER = logging.getLogger(__name__) + class ImpfTropCyclone(ImpactFunc): """Impact functions for tropical cyclones.""" def __init__(self): ImpactFunc.__init__(self) - self.haz_type = 'TC' + self.haz_type = "TC" def set_emanuel_usa(self, *args, **kwargs): """This function is deprecated, use from_emanuel_usa() instead.""" - LOGGER.warning("The use of ImpfTropCyclone.set_emanuel_usa is deprecated." - "Use ImpfTropCyclone.from_emanuel_usa instead.") + LOGGER.warning( + "The use of ImpfTropCyclone.set_emanuel_usa is deprecated." + "Use ImpfTropCyclone.from_emanuel_usa instead." + ) self.__dict__ = ImpfTropCyclone.from_emanuel_usa(*args, **kwargs).__dict__ @classmethod - def from_emanuel_usa(cls, impf_id=1, intensity=np.arange(0, 121, 5), - v_thresh=25.7, v_half=74.7, scale=1.0): + def from_emanuel_usa( + cls, + impf_id=1, + intensity=np.arange(0, 121, 5), + v_thresh=25.7, + v_half=74.7, + scale=1.0, + ): """ Init TC impact function using the formula of Kerry Emanuel, 2011: 'Global Warming Effects on U.S. Hurricane Damage', @@ -81,16 +91,16 @@ def from_emanuel_usa(cls, impf_id=1, intensity=np.arange(0, 121, 5), TC impact function instance based on formula by Emanuel (2011) """ if v_half <= v_thresh: - raise ValueError('Shape parameters out of range: v_half <= v_thresh.') + raise ValueError("Shape parameters out of range: v_half <= v_thresh.") if v_thresh < 0 or v_half < 0: - raise ValueError('Negative shape parameter.') + raise ValueError("Negative shape parameter.") if scale > 1 or scale <= 0: - raise ValueError('Scale parameter out of range.') + raise ValueError("Scale parameter out of range.") impf = cls() - impf.name = 'Emanuel 2011' + impf.name = "Emanuel 2011" impf.id = impf_id - impf.intensity_unit = 'm/s' + impf.intensity_unit = "m/s" impf.intensity = intensity impf.paa = np.ones(intensity.shape) v_temp = (impf.intensity - v_thresh) / (v_half - v_thresh) @@ -99,6 +109,7 @@ def from_emanuel_usa(cls, impf_id=1, intensity=np.arange(0, 121, 5), impf.mdd *= scale return impf + class ImpfSetTropCyclone(ImpactFuncSet): """Impact function set (ImpfS) for tropical cyclones.""" @@ -107,15 +118,19 @@ def __init__(self): def set_calibrated_regional_ImpfSet(self, *args, **kwargs): """This function is deprecated, use from_calibrated_regional_ImpfSet() instead.""" - LOGGER.warning("ImpfSetTropCyclone.set_calibrated_regional_ImpfSet is deprecated." - "Use ImpfSetTropCyclone.from_calibrated_regional_ImpfSet instead.") - self.__dict__ = \ - ImpfSetTropCyclone.from_calibrated_regional_ImpfSet(*args, **kwargs).__dict__ + LOGGER.warning( + "ImpfSetTropCyclone.set_calibrated_regional_ImpfSet is deprecated." + "Use ImpfSetTropCyclone.from_calibrated_regional_ImpfSet instead." + ) + self.__dict__ = ImpfSetTropCyclone.from_calibrated_regional_ImpfSet( + *args, **kwargs + ).__dict__ return ImpfSetTropCyclone.calibrated_regional_vhalf(*args, **kwargs) @classmethod - def from_calibrated_regional_ImpfSet(cls, calibration_approach='TDR', q=.5, - input_file_path=None, version=1): + def from_calibrated_regional_ImpfSet( + cls, calibration_approach="TDR", q=0.5, input_file_path=None, version=1 + ): """Calibrated regional TC wind impact functions Based on Eberenz et al. 2021: https://doi.org/10.5194/nhess-21-393-2021 @@ -154,38 +169,41 @@ def from_calibrated_regional_ImpfSet(cls, calibration_approach='TDR', q=.5, q=q, input_file_path=input_file_path, version=version, - ) + ) # define regions and parameters: v_0 = 25.7 # v_threshold based on Emanuel (2011) scale = 1.0 regions_long = dict() - regions_long['NA1'] = 'Caribbean and Mexico (NA1)' - regions_long['NA2'] = 'USA and Canada (NA2)' - regions_long['NI'] = 'North Indian (NI)' - regions_long['OC'] = 'Oceania (OC)' - regions_long['SI'] = 'South Indian (SI)' - regions_long['WP1'] = 'South East Asia (WP1)' - regions_long['WP2'] = 'Philippines (WP2)' - regions_long['WP3'] = 'China Mainland (WP3)' - regions_long['WP4'] = 'North West Pacific (WP4)' - regions_long['ROW'] = 'Global' + regions_long["NA1"] = "Caribbean and Mexico (NA1)" + regions_long["NA2"] = "USA and Canada (NA2)" + regions_long["NI"] = "North Indian (NI)" + regions_long["OC"] = "Oceania (OC)" + regions_long["SI"] = "South Indian (SI)" + regions_long["WP1"] = "South East Asia (WP1)" + regions_long["WP2"] = "Philippines (WP2)" + regions_long["WP3"] = "China Mainland (WP3)" + regions_long["WP4"] = "North West Pacific (WP4)" + regions_long["ROW"] = "Global" # init impact function set impf_set = cls() for idx, region in enumerate(reg_v_half.keys()): - impf_tc = ImpfTropCyclone.from_emanuel_usa(impf_id=int(idx + 1), - v_thresh=v_0, - v_half=reg_v_half[region], - scale=scale) + impf_tc = ImpfTropCyclone.from_emanuel_usa( + impf_id=int(idx + 1), + v_thresh=v_0, + v_half=reg_v_half[region], + scale=scale, + ) impf_tc.name = regions_long[region] impf_set.append(impf_tc) return impf_set @staticmethod - def calibrated_regional_vhalf(calibration_approach='TDR', q=.5, - input_file_path=None, version=1): + def calibrated_regional_vhalf( + calibration_approach="TDR", q=0.5, input_file_path=None, version=1 + ): """Calibrated TC wind impact function slope parameter v_half per region Based on Eberenz et al., 2021: https://doi.org/10.5194/nhess-21-393-2021 @@ -224,40 +242,46 @@ def calibrated_regional_vhalf(calibration_approach='TDR', q=.5, TC impact function slope parameter v_half per region """ calibration_approach = calibration_approach.upper() - if calibration_approach not in ['TDR', 'TDR1.0', 'TDR1.5', 'RMSF', 'EDR']: - raise ValueError('calibration_approach is invalid') - if 'EDR' in calibration_approach and (q < 0. or q > 1.): - raise ValueError('Quantile q out of range [0, 1]') - if calibration_approach == 'TDR': - calibration_approach = 'TDR1.0' + if calibration_approach not in ["TDR", "TDR1.0", "TDR1.5", "RMSF", "EDR"]: + raise ValueError("calibration_approach is invalid") + if "EDR" in calibration_approach and (q < 0.0 or q > 1.0): + raise ValueError("Quantile q out of range [0, 1]") + if calibration_approach == "TDR": + calibration_approach = "TDR1.0" # load calibration results depending on approach: if isinstance(input_file_path, str): - df_calib_results = pd.read_csv(input_file_path, - encoding="ISO-8859-1", header=0) + df_calib_results = pd.read_csv( + input_file_path, encoding="ISO-8859-1", header=0 + ) elif isinstance(input_file_path, pd.DataFrame): df_calib_results = input_file_path else: df_calib_results = pd.read_csv( SYSTEM_DIR.joinpath( - 'tc_impf_cal_v%02.0f_%s.csv' % (version, calibration_approach)), - encoding="ISO-8859-1", header=0) + "tc_impf_cal_v%02.0f_%s.csv" % (version, calibration_approach) + ), + encoding="ISO-8859-1", + header=0, + ) - regions_short = ['NA1', 'NA2', 'NI', 'OC', 'SI', 'WP1', 'WP2', 'WP3', 'WP4'] + regions_short = ["NA1", "NA2", "NI", "OC", "SI", "WP1", "WP2", "WP3", "WP4"] # loop over calibration regions (column cal_region2 in df): reg_v_half = dict() for region in regions_short: df_reg = df_calib_results.loc[df_calib_results.cal_region2 == region] df_reg = df_reg.reset_index(drop=True) - reg_v_half[region] = np.round(df_reg['v_half'].quantile(q=q), 5) + reg_v_half[region] = np.round(df_reg["v_half"].quantile(q=q), 5) # rest of the world (ROW), calibrated by all data: - regions_short = regions_short + ['ROW'] - if calibration_approach == 'EDR': - reg_v_half[regions_short[-1]] = np.round(df_calib_results['v_half'].quantile(q=q), 5) + regions_short = regions_short + ["ROW"] + if calibration_approach == "EDR": + reg_v_half[regions_short[-1]] = np.round( + df_calib_results["v_half"].quantile(q=q), 5 + ) else: - df_reg = df_calib_results.loc[df_calib_results.cal_region2 == 'GLB'] + df_reg = df_calib_results.loc[df_calib_results.cal_region2 == "GLB"] df_reg = df_reg.reset_index(drop=True) - reg_v_half[regions_short[-1]] = np.round(df_reg['v_half'].values[0], 5) + reg_v_half[regions_short[-1]] = np.round(df_reg["v_half"].values[0], 5) return reg_v_half @staticmethod @@ -286,95 +310,531 @@ def get_countries_per_region(region=None): numerical ISO3codes (=region_id) per region """ if not region: - region = 'all' - iso3n = {'NA1': [660, 28, 32, 533, 44, 52, 84, 60, 68, 132, 136, - 152, 170, 188, 192, 212, 214, 218, 222, 238, 254, - 308, 312, 320, 328, 332, 340, 388, 474, 484, 500, - 558, 591, 600, 604, 630, 654, 659, 662, 670, 534, - 740, 780, 796, 858, 862, 92, 850], - 'NA2': [124, 840], - 'NI': [4, 51, 31, 48, 50, 64, 262, 232, - 231, 268, 356, 364, 368, 376, 400, 398, 414, 417, - 422, 462, 496, 104, 524, 512, 586, 634, 682, 706, - 144, 760, 762, 795, 800, 784, 860, 887], - 'OC': [16, 36, 184, 242, 258, 316, 296, 584, 583, 520, - 540, 554, 570, 574, 580, 585, 598, 612, 882, 90, - 626, 772, 776, 798, 548, 876], - 'SI': [174, 180, 748, 450, 454, 466, 480, 508, 710, 834, - 716], - 'WP1': [116, 360, 418, 458, 764, 704], - 'WP2': [608], - 'WP3': [156], - 'WP4': [344, 392, 410, 446, 158], - 'ROW': [8, 12, 20, 24, 10, 40, 112, 56, 204, 535, 70, 72, - 74, 76, 86, 96, 100, 854, 108, 120, 140, 148, 162, - 166, 178, 191, 531, 196, 203, 384, 208, 818, 226, - 233, 234, 246, 250, 260, 266, 270, 276, 288, 292, - 300, 304, 831, 324, 624, 334, 336, 348, 352, 372, - 833, 380, 832, 404, 408, 983, 428, 426, 430, 434, - 438, 440, 442, 470, 478, 175, 498, 492, 499, 504, - 516, 528, 562, 566, 807, 578, 275, 616, 620, 642, - 643, 646, 638, 652, 663, 666, 674, 678, 686, 688, - 690, 694, 702, 703, 705, 239, 728, 724, 729, 744, - 752, 756, 768, 788, 792, 804, 826, 581, 732, 894, - 248]} - iso3a = {'NA1': ['AIA', 'ATG', 'ARG', 'ABW', 'BHS', 'BRB', 'BLZ', 'BMU', - 'BOL', 'CPV', 'CYM', 'CHL', 'COL', 'CRI', 'CUB', 'DMA', - 'DOM', 'ECU', 'SLV', 'FLK', 'GUF', 'GRD', 'GLP', 'GTM', - 'GUY', 'HTI', 'HND', 'JAM', 'MTQ', 'MEX', 'MSR', 'NIC', - 'PAN', 'PRY', 'PER', 'PRI', 'SHN', 'KNA', 'LCA', 'VCT', - 'SXM', 'SUR', 'TTO', 'TCA', 'URY', 'VEN', 'VGB', 'VIR'], - 'NA2': ['CAN', 'USA'], - 'NI': ['AFG', 'ARM', 'AZE', 'BHR', 'BGD', 'BTN', 'DJI', 'ERI', - 'ETH', 'GEO', 'IND', 'IRN', 'IRQ', 'ISR', 'JOR', 'KAZ', - 'KWT', 'KGZ', 'LBN', 'MDV', 'MNG', 'MMR', 'NPL', 'OMN', - 'PAK', 'QAT', 'SAU', 'SOM', 'LKA', 'SYR', 'TJK', 'TKM', - 'UGA', 'ARE', 'UZB', 'YEM'], - 'OC': ['ASM', 'AUS', 'COK', 'FJI', 'PYF', 'GUM', 'KIR', 'MHL', - 'FSM', 'NRU', 'NCL', 'NZL', 'NIU', 'NFK', 'MNP', 'PLW', - 'PNG', 'PCN', 'WSM', 'SLB', 'TLS', 'TKL', 'TON', 'TUV', - 'VUT', 'WLF'], - 'SI': ['COM', 'COD', 'SWZ', 'MDG', 'MWI', 'MLI', 'MUS', 'MOZ', - 'ZAF', 'TZA', 'ZWE'], - 'WP1': ['KHM', 'IDN', 'LAO', 'MYS', 'THA', 'VNM'], - 'WP2': ['PHL'], - 'WP3': ['CHN'], - 'WP4': ['HKG', 'JPN', 'KOR', 'MAC', 'TWN'], - 'ROW': ['ALB', 'DZA', 'AND', 'AGO', 'ATA', 'AUT', 'BLR', 'BEL', - 'BEN', 'BES', 'BIH', 'BWA', 'BVT', 'BRA', 'IOT', 'BRN', - 'BGR', 'BFA', 'BDI', 'CMR', 'CAF', 'TCD', 'CXR', 'CCK', - 'COG', 'HRV', 'CUW', 'CYP', 'CZE', 'CIV', 'DNK', 'EGY', - 'GNQ', 'EST', 'FRO', 'FIN', 'FRA', 'ATF', 'GAB', 'GMB', - 'DEU', 'GHA', 'GIB', 'GRC', 'GRL', 'GGY', 'GIN', 'GNB', - 'HMD', 'VAT', 'HUN', 'ISL', 'IRL', 'IMN', 'ITA', 'JEY', - 'KEN', 'PRK', 'XKX', 'LVA', 'LSO', 'LBR', 'LBY', 'LIE', - 'LTU', 'LUX', 'MLT', 'MRT', 'MYT', 'MDA', 'MCO', 'MNE', - 'MAR', 'NAM', 'NLD', 'NER', 'NGA', 'MKD', 'NOR', 'PSE', - 'POL', 'PRT', 'ROU', 'RUS', 'RWA', 'REU', 'BLM', 'MAF', - 'SPM', 'SMR', 'STP', 'SEN', 'SRB', 'SYC', 'SLE', 'SGP', - 'SVK', 'SVN', 'SGS', 'SSD', 'ESP', 'SDN', 'SJM', 'SWE', - 'CHE', 'TGO', 'TUN', 'TUR', 'UKR', 'GBR', 'UMI', 'ESH', - 'ZMB', 'ALA']} - impf_id = {'NA1': 1, 'NA2': 2, 'NI': 3, 'OC': 4, 'SI': 5, - 'WP1': 6, 'WP2': 7, 'WP3': 8, 'WP4': 9, 'ROW': 10} + region = "all" + iso3n = { + "NA1": [ + 660, + 28, + 32, + 533, + 44, + 52, + 84, + 60, + 68, + 132, + 136, + 152, + 170, + 188, + 192, + 212, + 214, + 218, + 222, + 238, + 254, + 308, + 312, + 320, + 328, + 332, + 340, + 388, + 474, + 484, + 500, + 558, + 591, + 600, + 604, + 630, + 654, + 659, + 662, + 670, + 534, + 740, + 780, + 796, + 858, + 862, + 92, + 850, + ], + "NA2": [124, 840], + "NI": [ + 4, + 51, + 31, + 48, + 50, + 64, + 262, + 232, + 231, + 268, + 356, + 364, + 368, + 376, + 400, + 398, + 414, + 417, + 422, + 462, + 496, + 104, + 524, + 512, + 586, + 634, + 682, + 706, + 144, + 760, + 762, + 795, + 800, + 784, + 860, + 887, + ], + "OC": [ + 16, + 36, + 184, + 242, + 258, + 316, + 296, + 584, + 583, + 520, + 540, + 554, + 570, + 574, + 580, + 585, + 598, + 612, + 882, + 90, + 626, + 772, + 776, + 798, + 548, + 876, + ], + "SI": [174, 180, 748, 450, 454, 466, 480, 508, 710, 834, 716], + "WP1": [116, 360, 418, 458, 764, 704], + "WP2": [608], + "WP3": [156], + "WP4": [344, 392, 410, 446, 158], + "ROW": [ + 8, + 12, + 20, + 24, + 10, + 40, + 112, + 56, + 204, + 535, + 70, + 72, + 74, + 76, + 86, + 96, + 100, + 854, + 108, + 120, + 140, + 148, + 162, + 166, + 178, + 191, + 531, + 196, + 203, + 384, + 208, + 818, + 226, + 233, + 234, + 246, + 250, + 260, + 266, + 270, + 276, + 288, + 292, + 300, + 304, + 831, + 324, + 624, + 334, + 336, + 348, + 352, + 372, + 833, + 380, + 832, + 404, + 408, + 983, + 428, + 426, + 430, + 434, + 438, + 440, + 442, + 470, + 478, + 175, + 498, + 492, + 499, + 504, + 516, + 528, + 562, + 566, + 807, + 578, + 275, + 616, + 620, + 642, + 643, + 646, + 638, + 652, + 663, + 666, + 674, + 678, + 686, + 688, + 690, + 694, + 702, + 703, + 705, + 239, + 728, + 724, + 729, + 744, + 752, + 756, + 768, + 788, + 792, + 804, + 826, + 581, + 732, + 894, + 248, + ], + } + iso3a = { + "NA1": [ + "AIA", + "ATG", + "ARG", + "ABW", + "BHS", + "BRB", + "BLZ", + "BMU", + "BOL", + "CPV", + "CYM", + "CHL", + "COL", + "CRI", + "CUB", + "DMA", + "DOM", + "ECU", + "SLV", + "FLK", + "GUF", + "GRD", + "GLP", + "GTM", + "GUY", + "HTI", + "HND", + "JAM", + "MTQ", + "MEX", + "MSR", + "NIC", + "PAN", + "PRY", + "PER", + "PRI", + "SHN", + "KNA", + "LCA", + "VCT", + "SXM", + "SUR", + "TTO", + "TCA", + "URY", + "VEN", + "VGB", + "VIR", + ], + "NA2": ["CAN", "USA"], + "NI": [ + "AFG", + "ARM", + "AZE", + "BHR", + "BGD", + "BTN", + "DJI", + "ERI", + "ETH", + "GEO", + "IND", + "IRN", + "IRQ", + "ISR", + "JOR", + "KAZ", + "KWT", + "KGZ", + "LBN", + "MDV", + "MNG", + "MMR", + "NPL", + "OMN", + "PAK", + "QAT", + "SAU", + "SOM", + "LKA", + "SYR", + "TJK", + "TKM", + "UGA", + "ARE", + "UZB", + "YEM", + ], + "OC": [ + "ASM", + "AUS", + "COK", + "FJI", + "PYF", + "GUM", + "KIR", + "MHL", + "FSM", + "NRU", + "NCL", + "NZL", + "NIU", + "NFK", + "MNP", + "PLW", + "PNG", + "PCN", + "WSM", + "SLB", + "TLS", + "TKL", + "TON", + "TUV", + "VUT", + "WLF", + ], + "SI": [ + "COM", + "COD", + "SWZ", + "MDG", + "MWI", + "MLI", + "MUS", + "MOZ", + "ZAF", + "TZA", + "ZWE", + ], + "WP1": ["KHM", "IDN", "LAO", "MYS", "THA", "VNM"], + "WP2": ["PHL"], + "WP3": ["CHN"], + "WP4": ["HKG", "JPN", "KOR", "MAC", "TWN"], + "ROW": [ + "ALB", + "DZA", + "AND", + "AGO", + "ATA", + "AUT", + "BLR", + "BEL", + "BEN", + "BES", + "BIH", + "BWA", + "BVT", + "BRA", + "IOT", + "BRN", + "BGR", + "BFA", + "BDI", + "CMR", + "CAF", + "TCD", + "CXR", + "CCK", + "COG", + "HRV", + "CUW", + "CYP", + "CZE", + "CIV", + "DNK", + "EGY", + "GNQ", + "EST", + "FRO", + "FIN", + "FRA", + "ATF", + "GAB", + "GMB", + "DEU", + "GHA", + "GIB", + "GRC", + "GRL", + "GGY", + "GIN", + "GNB", + "HMD", + "VAT", + "HUN", + "ISL", + "IRL", + "IMN", + "ITA", + "JEY", + "KEN", + "PRK", + "XKX", + "LVA", + "LSO", + "LBR", + "LBY", + "LIE", + "LTU", + "LUX", + "MLT", + "MRT", + "MYT", + "MDA", + "MCO", + "MNE", + "MAR", + "NAM", + "NLD", + "NER", + "NGA", + "MKD", + "NOR", + "PSE", + "POL", + "PRT", + "ROU", + "RUS", + "RWA", + "REU", + "BLM", + "MAF", + "SPM", + "SMR", + "STP", + "SEN", + "SRB", + "SYC", + "SLE", + "SGP", + "SVK", + "SVN", + "SGS", + "SSD", + "ESP", + "SDN", + "SJM", + "SWE", + "CHE", + "TGO", + "TUN", + "TUR", + "UKR", + "GBR", + "UMI", + "ESH", + "ZMB", + "ALA", + ], + } + impf_id = { + "NA1": 1, + "NA2": 2, + "NI": 3, + "OC": 4, + "SI": 5, + "WP1": 6, + "WP2": 7, + "WP3": 8, + "WP4": 9, + "ROW": 10, + } region_name = dict() - region_name['NA1'] = 'Caribbean and Mexico' - region_name['NA2'] = 'USA and Canada' - region_name['NI'] = 'North Indian' - region_name['OC'] = 'Oceania' - region_name['SI'] = 'South Indian' - region_name['WP1'] = 'South East Asia' - region_name['WP2'] = 'Philippines' - region_name['WP3'] = 'China Mainland' - region_name['WP4'] = 'North West Pacific' - - if region == 'all': + region_name["NA1"] = "Caribbean and Mexico" + region_name["NA2"] = "USA and Canada" + region_name["NI"] = "North Indian" + region_name["OC"] = "Oceania" + region_name["SI"] = "South Indian" + region_name["WP1"] = "South East Asia" + region_name["WP2"] = "Philippines" + region_name["WP3"] = "China Mainland" + region_name["WP4"] = "North West Pacific" + + if region == "all": return region_name, impf_id, iso3n, iso3a return region_name[region], impf_id[region], iso3n[region], iso3a[region] -@deprecated(details="The class name IFTropCyclone is deprecated and won't be supported in a future " - +"version. Use ImpfTropCyclone instead") +@deprecated( + details="The class name IFTropCyclone is deprecated and won't be supported in a future " + + "version. Use ImpfTropCyclone instead" +) class IFTropCyclone(ImpfTropCyclone): """Is ImpfTropCyclone now""" diff --git a/climada/entity/measures/__init__.py b/climada/entity/measures/__init__.py index 24cc74455..36d925045 100755 --- a/climada/entity/measures/__init__.py +++ b/climada/entity/measures/__init__.py @@ -18,5 +18,6 @@ init measures """ + from .base import * from .measure_set import * diff --git a/climada/entity/measures/base.py b/climada/entity/measures/base.py index 40c4cac4e..93505feb3 100755 --- a/climada/entity/measures/base.py +++ b/climada/entity/measures/base.py @@ -19,7 +19,7 @@ Define Measure class. """ -__all__ = ['Measure'] +__all__ = ["Measure"] import copy import logging @@ -30,20 +30,21 @@ import pandas as pd from geopandas import GeoDataFrame -from climada.entity.exposures.base import Exposures, INDICATOR_IMPF, INDICATOR_CENTR -from climada.hazard.base import Hazard import climada.util.checker as u_check +from climada.entity.exposures.base import INDICATOR_CENTR, INDICATOR_IMPF, Exposures +from climada.hazard.base import Hazard LOGGER = logging.getLogger(__name__) IMPF_ID_FACT = 1000 """Factor internally used as id for impact functions when region selected.""" -NULL_STR = 'nil' +NULL_STR = "nil" """String considered as no path in measures exposures_set and hazard_set or no string in imp_fun_map""" -class Measure(): + +class Measure: """ Contains the definition of one measure. @@ -99,7 +100,7 @@ def __init__( risk_transf_attach: float = 0, risk_transf_cover: float = 0, risk_transf_cost_factor: float = 1, - color_rgb: Optional[np.ndarray] = None + color_rgb: Optional[np.ndarray] = None, ): """Initialize a Measure object with given values. @@ -173,10 +174,10 @@ def check(self): ------ ValueError """ - u_check.size([3, 4], self.color_rgb, 'Measure.color_rgb') - u_check.size(2, self.hazard_inten_imp, 'Measure.hazard_inten_imp') - u_check.size(2, self.mdd_impact, 'Measure.mdd_impact') - u_check.size(2, self.paa_impact, 'Measure.paa_impact') + u_check.size([3, 4], self.color_rgb, "Measure.color_rgb") + u_check.size(2, self.hazard_inten_imp, "Measure.hazard_inten_imp") + u_check.size(2, self.mdd_impact, "Measure.mdd_impact") + u_check.size(2, self.paa_impact, "Measure.paa_impact") def calc_impact(self, exposures, imp_fun_set, hazard, assign_centroids=True): """ @@ -240,7 +241,8 @@ def apply(self, exposures, imp_fun_set, hazard): new_haz = self._cutoff_hazard_damage(new_exp, new_impfs, new_haz) # apply all previous changes only to the selected exposures new_exp, new_impfs, new_haz = self._filter_exposures( - exposures, imp_fun_set, hazard, new_exp, new_impfs, new_haz) + exposures, imp_fun_set, hazard, new_exp, new_impfs, new_haz + ) return new_exp, new_impfs, new_haz @@ -260,9 +262,13 @@ def _calc_impact(self, new_exp, new_impfs, new_haz, assign_centroids): ------- climada.engine.Impact """ - from climada.engine.impact_calc import ImpactCalc # pylint: disable=import-outside-toplevel - imp = ImpactCalc(new_exp, new_impfs, new_haz)\ - .impact(save_mat=False, assign_centroids=assign_centroids) + from climada.engine.impact_calc import ( + ImpactCalc, # pylint: disable=import-outside-toplevel + ) + + imp = ImpactCalc(new_exp, new_impfs, new_haz).impact( + save_mat=False, assign_centroids=assign_centroids + ) return imp.calc_risk_transfer(self.risk_transf_attach, self.risk_transf_cover) def _change_all_hazard(self, hazard): @@ -282,7 +288,7 @@ def _change_all_hazard(self, hazard): if self.hazard_set == NULL_STR: return hazard - LOGGER.debug('Setting new hazard %s', self.hazard_set) + LOGGER.debug("Setting new hazard %s", self.hazard_set) new_haz = Hazard.from_hdf5(self.hazard_set) new_haz.check() return new_haz @@ -305,21 +311,26 @@ def _change_all_exposures(self, exposures): return exposures if isinstance(self.exposures_set, (str, Path)): - LOGGER.debug('Setting new exposures %s', self.exposures_set) + LOGGER.debug("Setting new exposures %s", self.exposures_set) new_exp = Exposures.from_hdf5(self.exposures_set) new_exp.check() elif isinstance(self.exposures_set, Exposures): - LOGGER.debug('Setting new exposures. ') + LOGGER.debug("Setting new exposures. ") new_exp = self.exposures_set.copy(deep=True) new_exp.check() else: - raise ValueError(f'{self.exposures_set} is neither a string nor an Exposures object') + raise ValueError( + f"{self.exposures_set} is neither a string nor an Exposures object" + ) - if not np.array_equal(np.unique(exposures.gdf['latitude'].values), - np.unique(new_exp.gdf['latitude'].values)) or \ - not np.array_equal(np.unique(exposures.gdf['longitude'].values), - np.unique(new_exp.gdf['longitude'].values)): - LOGGER.warning('Exposures locations have changed.') + if not np.array_equal( + np.unique(exposures.gdf["latitude"].values), + np.unique(new_exp.gdf["latitude"].values), + ) or not np.array_equal( + np.unique(exposures.gdf["longitude"].values), + np.unique(new_exp.gdf["longitude"].values), + ): + LOGGER.warning("Exposures locations have changed.") return new_exp @@ -340,10 +351,10 @@ def _change_exposures_impf(self, exposures): if self.imp_fun_map == NULL_STR: return exposures - LOGGER.debug('Setting new exposures impact functions%s', self.imp_fun_map) + LOGGER.debug("Setting new exposures impact functions%s", self.imp_fun_map) new_exp = exposures.copy(deep=True) - from_id = int(self.imp_fun_map[0:self.imp_fun_map.find('to')]) - to_id = int(self.imp_fun_map[self.imp_fun_map.find('to') + 2:]) + from_id = int(self.imp_fun_map[0 : self.imp_fun_map.find("to")]) + to_id = int(self.imp_fun_map[self.imp_fun_map.find("to") + 2 :]) try: exp_change = np.argwhere( new_exp.gdf[INDICATOR_IMPF + self.haz_type].values == from_id @@ -371,22 +382,29 @@ def _change_imp_func(self, imp_set): ImpactFuncSet with measure applied to each impact function according to the defined hazard type """ - if self.hazard_inten_imp == (1, 0) and self.mdd_impact == (1, 0)\ - and self.paa_impact == (1, 0): + if ( + self.hazard_inten_imp == (1, 0) + and self.mdd_impact == (1, 0) + and self.paa_impact == (1, 0) + ): return imp_set new_imp_set = copy.deepcopy(imp_set) for imp_fun in new_imp_set.get_func(self.haz_type): - LOGGER.debug('Transforming impact functions.') + LOGGER.debug("Transforming impact functions.") imp_fun.intensity = np.maximum( - imp_fun.intensity * self.hazard_inten_imp[0] - self.hazard_inten_imp[1], 0.0) + imp_fun.intensity * self.hazard_inten_imp[0] - self.hazard_inten_imp[1], + 0.0, + ) imp_fun.mdd = np.maximum( - imp_fun.mdd * self.mdd_impact[0] + self.mdd_impact[1], 0.0) + imp_fun.mdd * self.mdd_impact[0] + self.mdd_impact[1], 0.0 + ) imp_fun.paa = np.maximum( - imp_fun.paa * self.paa_impact[0] + self.paa_impact[1], 0.0) + imp_fun.paa * self.paa_impact[0] + self.paa_impact[1], 0.0 + ) if not new_imp_set.size(): - LOGGER.info('No impact function of hazard %s found.', self.haz_type) + LOGGER.info("No impact function of hazard %s found.", self.haz_type) return new_imp_set @@ -415,31 +433,39 @@ def _cutoff_hazard_damage(self, exposures, impf_set, hazard): if self.exp_region_id: # compute impact only in selected region in_reg = np.logical_or.reduce( - [exposures.gdf['region_id'].values == reg for reg in self.exp_region_id] + [exposures.gdf["region_id"].values == reg for reg in self.exp_region_id] ) exp_imp = Exposures(exposures.gdf[in_reg], crs=exposures.crs) else: exp_imp = exposures - from climada.engine.impact_calc import ImpactCalc # pylint: disable=import-outside-toplevel - imp = ImpactCalc(exp_imp, impf_set, hazard)\ - .impact(assign_centroids=hazard.centr_exp_col not in exp_imp.gdf) + from climada.engine.impact_calc import ( + ImpactCalc, # pylint: disable=import-outside-toplevel + ) + + imp = ImpactCalc(exp_imp, impf_set, hazard).impact( + assign_centroids=hazard.centr_exp_col not in exp_imp.gdf + ) - LOGGER.debug('Cutting events whose damage have a frequency > %s.', - self.hazard_freq_cutoff) + LOGGER.debug( + "Cutting events whose damage have a frequency > %s.", + self.hazard_freq_cutoff, + ) new_haz = copy.deepcopy(hazard) sort_idxs = np.argsort(imp.at_event)[::-1] exceed_freq = np.cumsum(imp.frequency[sort_idxs]) cutoff = exceed_freq > self.hazard_freq_cutoff sel_haz = sort_idxs[cutoff] for row in sel_haz: - new_haz.intensity.data[new_haz.intensity.indptr[row]: - new_haz.intensity.indptr[row + 1]] = 0 + new_haz.intensity.data[ + new_haz.intensity.indptr[row] : new_haz.intensity.indptr[row + 1] + ] = 0 new_haz.intensity.eliminate_zeros() return new_haz - def _filter_exposures(self, exposures, imp_set, hazard, new_exp, new_impfs, - new_haz): + def _filter_exposures( + self, exposures, imp_set, hazard, new_exp, new_impfs, new_haz + ): """ Incorporate changes of new elements to previous ones only for the selected exp_region_id. If exp_region_id is [], all new changes @@ -479,38 +505,49 @@ def _filter_exposures(self, exposures, imp_set, hazard, new_exp, new_impfs, fun_ids = list(new_impfs.get_func()[self.haz_type].keys()) for key in fun_ids: new_impfs.get_func()[self.haz_type][key].id = key + IMPF_ID_FACT - new_impfs.get_func()[self.haz_type][key + IMPF_ID_FACT] = \ - new_impfs.get_func()[self.haz_type][key] + new_impfs.get_func()[self.haz_type][ + key + IMPF_ID_FACT + ] = new_impfs.get_func()[self.haz_type][key] try: new_exp.gdf[INDICATOR_IMPF + self.haz_type] += IMPF_ID_FACT except KeyError: new_exp.gdf[INDICATOR_IMPF] += IMPF_ID_FACT # collect old impact functions as well (used by exposures) - new_impfs.get_func()[self.haz_type].update(imp_set.get_func()[self.haz_type]) + new_impfs.get_func()[self.haz_type].update( + imp_set.get_func()[self.haz_type] + ) # get the indices for changing and inert regions - chg_reg = exposures.gdf['region_id'].isin(self.exp_region_id) + chg_reg = exposures.gdf["region_id"].isin(self.exp_region_id) no_chg_reg = ~chg_reg - LOGGER.debug('Number of changed exposures: %s', chg_reg.sum()) + LOGGER.debug("Number of changed exposures: %s", chg_reg.sum()) # concatenate previous and new exposures new_exp.set_gdf( GeoDataFrame( - pd.concat([ - exposures.gdf[no_chg_reg], # old values for inert regions - new_exp.gdf[chg_reg] # new values for changing regions - ]).loc[exposures.gdf.index,:], # re-establish old order + pd.concat( + [ + exposures.gdf[no_chg_reg], # old values for inert regions + new_exp.gdf[chg_reg], # new values for changing regions + ] + ).loc[ + exposures.gdf.index, : + ], # re-establish old order ), - crs=exposures.crs + crs=exposures.crs, ) # set missing values of centr_ - if INDICATOR_CENTR + self.haz_type in new_exp.gdf.columns \ - and np.isnan(new_exp.gdf[INDICATOR_CENTR + self.haz_type].values).any(): + if ( + INDICATOR_CENTR + self.haz_type in new_exp.gdf.columns + and np.isnan(new_exp.gdf[INDICATOR_CENTR + self.haz_type].values).any() + ): new_exp.gdf.drop(columns=INDICATOR_CENTR + self.haz_type, inplace=True) - elif INDICATOR_CENTR in new_exp.gdf.columns \ - and np.isnan(new_exp.gdf[INDICATOR_CENTR].values).any(): + elif ( + INDICATOR_CENTR in new_exp.gdf.columns + and np.isnan(new_exp.gdf[INDICATOR_CENTR].values).any() + ): new_exp.gdf.drop(columns=INDICATOR_CENTR, inplace=True) # put hazard intensities outside region to previous intensities diff --git a/climada/entity/measures/measure_set.py b/climada/entity/measures/measure_set.py index 31a413797..90a2bb43c 100755 --- a/climada/entity/measures/measure_set.py +++ b/climada/entity/measures/measure_set.py @@ -19,70 +19,75 @@ Define MeasureSet class. """ -__all__ = ['MeasureSet'] +__all__ = ["MeasureSet"] import ast import copy import logging -from typing import Optional, List +from typing import List, Optional -from matplotlib import colormaps as cm import numpy as np import pandas as pd import xlsxwriter +from matplotlib import colormaps as cm -from climada.entity.measures.base import Measure import climada.util.hdf5_handler as u_hdf5 +from climada.entity.measures.base import Measure LOGGER = logging.getLogger(__name__) -DEF_VAR_MAT = {'sup_field_name': 'entity', - 'field_name': 'measures', - 'var_name': {'name': 'name', - 'color': 'color', - 'cost': 'cost', - 'haz_int_a': 'hazard_intensity_impact_a', - 'haz_int_b': 'hazard_intensity_impact_b', - 'haz_frq': 'hazard_high_frequency_cutoff', - 'haz_set': 'hazard_event_set', - 'mdd_a': 'MDD_impact_a', - 'mdd_b': 'MDD_impact_b', - 'paa_a': 'PAA_impact_a', - 'paa_b': 'PAA_impact_b', - 'fun_map': 'damagefunctions_map', - 'exp_set': 'assets_file', - 'exp_reg': 'Region_ID', - 'risk_att': 'risk_transfer_attachement', - 'risk_cov': 'risk_transfer_cover', - 'haz': 'peril_ID' - } - } +DEF_VAR_MAT = { + "sup_field_name": "entity", + "field_name": "measures", + "var_name": { + "name": "name", + "color": "color", + "cost": "cost", + "haz_int_a": "hazard_intensity_impact_a", + "haz_int_b": "hazard_intensity_impact_b", + "haz_frq": "hazard_high_frequency_cutoff", + "haz_set": "hazard_event_set", + "mdd_a": "MDD_impact_a", + "mdd_b": "MDD_impact_b", + "paa_a": "PAA_impact_a", + "paa_b": "PAA_impact_b", + "fun_map": "damagefunctions_map", + "exp_set": "assets_file", + "exp_reg": "Region_ID", + "risk_att": "risk_transfer_attachement", + "risk_cov": "risk_transfer_cover", + "haz": "peril_ID", + }, +} """MATLAB variable names""" -DEF_VAR_EXCEL = {'sheet_name': 'measures', - 'col_name': {'name': 'name', - 'color': 'color', - 'cost': 'cost', - 'haz_int_a': 'hazard intensity impact a', - 'haz_int_b': 'hazard intensity impact b', - 'haz_frq': 'hazard high frequency cutoff', - 'haz_set': 'hazard event set', - 'mdd_a': 'MDD impact a', - 'mdd_b': 'MDD impact b', - 'paa_a': 'PAA impact a', - 'paa_b': 'PAA impact b', - 'fun_map': 'damagefunctions map', - 'exp_set': 'assets file', - 'exp_reg': 'Region_ID', - 'risk_att': 'risk transfer attachement', - 'risk_cov': 'risk transfer cover', - 'risk_fact': 'risk transfer cost factor', - 'haz': 'peril_ID' - } - } +DEF_VAR_EXCEL = { + "sheet_name": "measures", + "col_name": { + "name": "name", + "color": "color", + "cost": "cost", + "haz_int_a": "hazard intensity impact a", + "haz_int_b": "hazard intensity impact b", + "haz_frq": "hazard high frequency cutoff", + "haz_set": "hazard event set", + "mdd_a": "MDD impact a", + "mdd_b": "MDD impact b", + "paa_a": "PAA impact a", + "paa_b": "PAA impact b", + "fun_map": "damagefunctions map", + "exp_set": "assets file", + "exp_reg": "Region_ID", + "risk_att": "risk transfer attachement", + "risk_cov": "risk transfer cover", + "risk_fact": "risk transfer cost factor", + "haz": "peril_ID", + }, +} """Excel variable names""" -class MeasureSet(): + +class MeasureSet: """Contains measures of type Measure. Loads from files with format defined in FILE_EXT. @@ -93,10 +98,7 @@ class MeasureSet(): Use the available methods instead. """ - def __init__( - self, - measure_list: Optional[List[Measure]] = None - ): + def __init__(self, measure_list: Optional[List[Measure]] = None): """Initialize a new MeasureSet object with specified data. Parameters @@ -136,7 +138,9 @@ def clear(self, _data: Optional[dict] = None): A dict containing the Measure objects. For internal use only: It's not suppossed to be set directly. Use the class methods instead. """ - self._data = _data if _data is not None else dict() # {hazard_type : {name: Measure()}} + self._data = ( + _data if _data is not None else dict() + ) # {hazard_type : {name: Measure()}} def append(self, meas): """Append an Measure. Override if same name and haz_type. @@ -175,8 +179,7 @@ def remove_measure(self, haz_type=None, name=None): try: del self._data[haz_type][name] except KeyError: - LOGGER.info("No Measure with hazard %s and id %s.", - haz_type, name) + LOGGER.info("No Measure with hazard %s and id %s.", haz_type, name) elif haz_type is not None: try: del self._data[haz_type] @@ -212,8 +215,7 @@ def get_measure(self, haz_type=None, name=None): try: return self._data[haz_type][name] except KeyError: - LOGGER.info("No Measure with hazard %s and id %s.", - haz_type, name) + LOGGER.info("No Measure with hazard %s and id %s.", haz_type, name) return list() elif haz_type is not None: try: @@ -295,8 +297,11 @@ def size(self, haz_type=None, name=None): ------- int """ - if (haz_type is not None) and (name is not None) and \ - (isinstance(self.get_measure(haz_type, name), Measure)): + if ( + (haz_type is not None) + and (name is not None) + and (isinstance(self.get_measure(haz_type, name), Measure)) + ): return 1 if (haz_type is not None) or (name is not None): return len(self.get_measure(haz_type, name)) @@ -310,14 +315,16 @@ def check(self): ValueError """ for key_haz, meas_dict in self._data.items(): - def_color = cm.get_cmap('Greys').resampled(len(meas_dict)) + def_color = cm.get_cmap("Greys").resampled(len(meas_dict)) for i_meas, (name, meas) in enumerate(meas_dict.items()): - if (name != meas.name) | (name == ''): - raise ValueError("Wrong Measure.name: %s != %s." - % (name, meas.name)) + if (name != meas.name) | (name == ""): + raise ValueError( + "Wrong Measure.name: %s != %s." % (name, meas.name) + ) if key_haz != meas.haz_type: - raise ValueError("Wrong Measure.haz_type: %s != %s." - % (key_haz, meas.haz_type)) + raise ValueError( + "Wrong Measure.haz_type: %s != %s." % (key_haz, meas.haz_type) + ) # set default color if not set if np.array_equal(meas.color_rgb, np.zeros(3)): meas.color_rgb = def_color(i_meas) @@ -366,46 +373,60 @@ def from_mat(cls, file_name, var_names=None): """ if var_names is None: var_names = DEF_VAR_MAT + def read_att_mat(measures, data, file_name, var_names): """Read MATLAB measures attributes""" - num_mes = len(data[var_names['var_name']['name']]) + num_mes = len(data[var_names["var_name"]["name"]]) for idx in range(0, num_mes): color_str = u_hdf5.get_str_from_ref( - file_name, data[var_names['var_name']['color']][idx][0]) + file_name, data[var_names["var_name"]["color"]][idx][0] + ) try: hazard_inten_imp = ( - data[var_names['var_name']['haz_int_a']][idx][0], - data[var_names['var_name']['haz_int_b']][0][idx]) + data[var_names["var_name"]["haz_int_a"]][idx][0], + data[var_names["var_name"]["haz_int_b"]][0][idx], + ) except KeyError: hazard_inten_imp = ( - data[var_names['var_name']['haz_int_a'][:-2]][idx][0], 0) + data[var_names["var_name"]["haz_int_a"][:-2]][idx][0], + 0, + ) meas_kwargs = dict( name=u_hdf5.get_str_from_ref( - file_name, data[var_names['var_name']['name']][idx][0]), - color_rgb=np.fromstring(color_str, dtype=float, sep=' '), - cost=data[var_names['var_name']['cost']][idx][0], + file_name, data[var_names["var_name"]["name"]][idx][0] + ), + color_rgb=np.fromstring(color_str, dtype=float, sep=" "), + cost=data[var_names["var_name"]["cost"]][idx][0], haz_type=u_hdf5.get_str_from_ref( - file_name, data[var_names['var_name']['haz']][idx][0]), - hazard_freq_cutoff=data[var_names['var_name']['haz_frq']][idx][0], + file_name, data[var_names["var_name"]["haz"]][idx][0] + ), + hazard_freq_cutoff=data[var_names["var_name"]["haz_frq"]][idx][0], hazard_set=u_hdf5.get_str_from_ref( - file_name, data[var_names['var_name']['haz_set']][idx][0]), + file_name, data[var_names["var_name"]["haz_set"]][idx][0] + ), hazard_inten_imp=hazard_inten_imp, # different convention of signs followed in MATLAB! - mdd_impact=(data[var_names['var_name']['mdd_a']][idx][0], - data[var_names['var_name']['mdd_b']][idx][0]), - paa_impact=(data[var_names['var_name']['paa_a']][idx][0], - data[var_names['var_name']['paa_b']][idx][0]), + mdd_impact=( + data[var_names["var_name"]["mdd_a"]][idx][0], + data[var_names["var_name"]["mdd_b"]][idx][0], + ), + paa_impact=( + data[var_names["var_name"]["paa_a"]][idx][0], + data[var_names["var_name"]["paa_b"]][idx][0], + ), imp_fun_map=u_hdf5.get_str_from_ref( - file_name, data[var_names['var_name']['fun_map']][idx][0]), + file_name, data[var_names["var_name"]["fun_map"]][idx][0] + ), exposures_set=u_hdf5.get_str_from_ref( - file_name, data[var_names['var_name']['exp_set']][idx][0]), - risk_transf_attach=data[var_names['var_name']['risk_att']][idx][0], - risk_transf_cover=data[var_names['var_name']['risk_cov']][idx][0], + file_name, data[var_names["var_name"]["exp_set"]][idx][0] + ), + risk_transf_attach=data[var_names["var_name"]["risk_att"]][idx][0], + risk_transf_cover=data[var_names["var_name"]["risk_cov"]][idx][0], ) - exp_region_id = data[var_names['var_name']['exp_reg']][idx][0] + exp_region_id = data[var_names["var_name"]["exp_reg"]][idx][0] if exp_region_id: meas_kwargs["exp_region_id"] = [exp_region_id] @@ -414,12 +435,12 @@ def read_att_mat(measures, data, file_name, var_names): data = u_hdf5.read(file_name) meas_set = cls() try: - data = data[var_names['sup_field_name']] + data = data[var_names["sup_field_name"]] except KeyError: pass try: - data = data[var_names['field_name']] + data = data[var_names["field_name"]] read_att_mat(meas_set, data, file_name, var_names) except KeyError as var_err: raise KeyError("Variable not in MAT file: " + str(var_err)) from var_err @@ -428,8 +449,10 @@ def read_att_mat(measures, data, file_name, var_names): def read_mat(self, *args, **kwargs): """This function is deprecated, use MeasureSet.from_mat instead.""" - LOGGER.warning("The use of MeasureSet.read_mat is deprecated." - "Use MeasureSet.from_mat instead.") + LOGGER.warning( + "The use of MeasureSet.read_mat is deprecated." + "Use MeasureSet.from_mat instead." + ) self.__dict__ = MeasureSet.from_mat(*args, **kwargs).__dict__ @classmethod @@ -452,63 +475,76 @@ def from_excel(cls, file_name, var_names=None): """ if var_names is None: var_names = DEF_VAR_EXCEL + def read_att_excel(measures, dfr, var_names): """Read Excel measures attributes""" num_mes = len(dfr.index) for idx in range(0, num_mes): # Search for (a, b) values, put a=1 otherwise try: - hazard_inten_imp = (dfr[var_names['col_name']['haz_int_a']][idx], - dfr[var_names['col_name']['haz_int_b']][idx]) + hazard_inten_imp = ( + dfr[var_names["col_name"]["haz_int_a"]][idx], + dfr[var_names["col_name"]["haz_int_b"]][idx], + ) except KeyError: - hazard_inten_imp = (1, dfr['hazard intensity impact'][idx]) + hazard_inten_imp = (1, dfr["hazard intensity impact"][idx]) meas_kwargs = dict( - name=dfr[var_names['col_name']['name']][idx], - cost=dfr[var_names['col_name']['cost']][idx], - hazard_freq_cutoff=dfr[var_names['col_name']['haz_frq']][idx], - hazard_set=dfr[var_names['col_name']['haz_set']][idx], + name=dfr[var_names["col_name"]["name"]][idx], + cost=dfr[var_names["col_name"]["cost"]][idx], + hazard_freq_cutoff=dfr[var_names["col_name"]["haz_frq"]][idx], + hazard_set=dfr[var_names["col_name"]["haz_set"]][idx], hazard_inten_imp=hazard_inten_imp, - mdd_impact=(dfr[var_names['col_name']['mdd_a']][idx], - dfr[var_names['col_name']['mdd_b']][idx]), - paa_impact=(dfr[var_names['col_name']['paa_a']][idx], - dfr[var_names['col_name']['paa_b']][idx]), - imp_fun_map=dfr[var_names['col_name']['fun_map']][idx], - risk_transf_attach=dfr[var_names['col_name']['risk_att']][idx], - risk_transf_cover=dfr[var_names['col_name']['risk_cov']][idx], + mdd_impact=( + dfr[var_names["col_name"]["mdd_a"]][idx], + dfr[var_names["col_name"]["mdd_b"]][idx], + ), + paa_impact=( + dfr[var_names["col_name"]["paa_a"]][idx], + dfr[var_names["col_name"]["paa_b"]][idx], + ), + imp_fun_map=dfr[var_names["col_name"]["fun_map"]][idx], + risk_transf_attach=dfr[var_names["col_name"]["risk_att"]][idx], + risk_transf_cover=dfr[var_names["col_name"]["risk_cov"]][idx], color_rgb=np.fromstring( - dfr[var_names['col_name']['color']][idx], dtype=float, sep=' '), + dfr[var_names["col_name"]["color"]][idx], dtype=float, sep=" " + ), ) try: - meas_kwargs["haz_type"] = dfr[var_names['col_name']['haz']][idx] + meas_kwargs["haz_type"] = dfr[var_names["col_name"]["haz"]][idx] except KeyError: pass try: - meas_kwargs["exposures_set"] = dfr[var_names['col_name']['exp_set']][idx] + meas_kwargs["exposures_set"] = dfr[ + var_names["col_name"]["exp_set"] + ][idx] except KeyError: pass try: meas_kwargs["exp_region_id"] = ast.literal_eval( - dfr[var_names['col_name']['exp_reg']][idx]) + dfr[var_names["col_name"]["exp_reg"]][idx] + ) except KeyError: pass except ValueError: - meas_kwargs["exp_region_id"] = dfr[var_names['col_name']['exp_reg']][idx] + meas_kwargs["exp_region_id"] = dfr[ + var_names["col_name"]["exp_reg"] + ][idx] try: - meas_kwargs["risk_transf_cost_factor"] = ( - dfr[var_names['col_name']['risk_fact']][idx] - ) + meas_kwargs["risk_transf_cost_factor"] = dfr[ + var_names["col_name"]["risk_fact"] + ][idx] except KeyError: pass measures.append(Measure(**meas_kwargs)) - dfr = pd.read_excel(file_name, var_names['sheet_name']) - dfr = dfr.fillna('') + dfr = pd.read_excel(file_name, var_names["sheet_name"]) + dfr = dfr.fillna("") meas_set = cls() try: read_att_excel(meas_set, dfr, var_names) @@ -519,8 +555,10 @@ def read_att_excel(measures, dfr, var_names): def read_excel(self, *args, **kwargs): """This function is deprecated, use MeasureSet.from_excel instead.""" - LOGGER.warning("The use ofMeasureSet.read_excel is deprecated." - "Use MeasureSet.from_excel instead.") + LOGGER.warning( + "The use ofMeasureSet.read_excel is deprecated." + "Use MeasureSet.from_excel instead." + ) self.__dict__ = MeasureSet.from_excel(*args, **kwargs).__dict__ def write_excel(self, file_name, var_names=None): @@ -535,33 +573,56 @@ def write_excel(self, file_name, var_names=None): """ if var_names is None: var_names = DEF_VAR_EXCEL + def write_meas(row_ini, imp_ws, xls_data): """Write one measure""" for icol, col_dat in enumerate(xls_data): imp_ws.write(row_ini, icol, col_dat) meas_wb = xlsxwriter.Workbook(file_name) - mead_ws = meas_wb.add_worksheet(var_names['sheet_name']) - - header = [var_names['col_name']['name'], var_names['col_name']['color'], - var_names['col_name']['cost'], var_names['col_name']['haz_int_a'], - var_names['col_name']['haz_int_b'], var_names['col_name']['haz_frq'], - var_names['col_name']['haz_set'], var_names['col_name']['mdd_a'], - var_names['col_name']['mdd_b'], var_names['col_name']['paa_a'], - var_names['col_name']['paa_b'], var_names['col_name']['fun_map'], - var_names['col_name']['exp_set'], var_names['col_name']['exp_reg'], - var_names['col_name']['risk_att'], var_names['col_name']['risk_cov'], - var_names['col_name']['haz']] + mead_ws = meas_wb.add_worksheet(var_names["sheet_name"]) + + header = [ + var_names["col_name"]["name"], + var_names["col_name"]["color"], + var_names["col_name"]["cost"], + var_names["col_name"]["haz_int_a"], + var_names["col_name"]["haz_int_b"], + var_names["col_name"]["haz_frq"], + var_names["col_name"]["haz_set"], + var_names["col_name"]["mdd_a"], + var_names["col_name"]["mdd_b"], + var_names["col_name"]["paa_a"], + var_names["col_name"]["paa_b"], + var_names["col_name"]["fun_map"], + var_names["col_name"]["exp_set"], + var_names["col_name"]["exp_reg"], + var_names["col_name"]["risk_att"], + var_names["col_name"]["risk_cov"], + var_names["col_name"]["haz"], + ] for icol, head_dat in enumerate(header): mead_ws.write(0, icol, head_dat) for row_ini, (_, haz_dict) in enumerate(self._data.items(), 1): for meas_name, meas in haz_dict.items(): - xls_data = [meas_name, ' '.join(list(map(str, meas.color_rgb))), - meas.cost, meas.hazard_inten_imp[0], - meas.hazard_inten_imp[1], meas.hazard_freq_cutoff, - meas.hazard_set, meas.mdd_impact[0], meas.mdd_impact[1], - meas.paa_impact[0], meas.paa_impact[1], meas.imp_fun_map, - meas.exposures_set, str(meas.exp_region_id), meas.risk_transf_attach, - meas.risk_transf_cover, meas.haz_type] + xls_data = [ + meas_name, + " ".join(list(map(str, meas.color_rgb))), + meas.cost, + meas.hazard_inten_imp[0], + meas.hazard_inten_imp[1], + meas.hazard_freq_cutoff, + meas.hazard_set, + meas.mdd_impact[0], + meas.mdd_impact[1], + meas.paa_impact[0], + meas.paa_impact[1], + meas.imp_fun_map, + meas.exposures_set, + str(meas.exp_region_id), + meas.risk_transf_attach, + meas.risk_transf_cover, + meas.haz_type, + ] write_meas(row_ini, mead_ws, xls_data) meas_wb.close() diff --git a/climada/entity/measures/test/test_base.py b/climada/entity/measures/test/test_base.py index 84ac988c4..520229ffc 100644 --- a/climada/entity/measures/test/test_base.py +++ b/climada/entity/measures/test/test_base.py @@ -18,73 +18,131 @@ Test MeasureSet and Measure classes. """ -import unittest + import copy +import unittest from pathlib import Path import numpy as np +import climada.entity.exposures.test as exposures_test +import climada.util.coordinates as u_coord from climada import CONFIG -from climada.hazard.base import Hazard from climada.entity.entity_def import Entity -from climada.entity.exposures.base import Exposures, INDICATOR_IMPF -from climada.entity.impact_funcs.impact_func_set import ImpactFuncSet +from climada.entity.exposures.base import INDICATOR_IMPF, Exposures from climada.entity.impact_funcs.base import ImpactFunc +from climada.entity.impact_funcs.impact_func_set import ImpactFuncSet +from climada.entity.measures.base import IMPF_ID_FACT, Measure from climada.entity.measures.measure_set import MeasureSet -from climada.entity.measures.base import Measure, IMPF_ID_FACT -from climada.util.constants import EXP_DEMO_H5, HAZ_DEMO_H5 +from climada.hazard.base import Hazard from climada.test import get_test_file -import climada.util.coordinates as u_coord -import climada.entity.exposures.test as exposures_test +from climada.util.constants import EXP_DEMO_H5, HAZ_DEMO_H5 DATA_DIR = CONFIG.measures.test_data.dir() -HAZ_TEST_TC :Path = get_test_file('test_tc_florida', file_format='hdf5') +HAZ_TEST_TC: Path = get_test_file("test_tc_florida", file_format="hdf5") """ Hazard test file from Data API: Hurricanes from 1851 to 2011 over Florida with 100 centroids. Fraction is empty. Format: HDF5. """ -ENT_TEST_MAT = Path(exposures_test.__file__).parent / 'data' / 'demo_today.mat' +ENT_TEST_MAT = Path(exposures_test.__file__).parent / "data" / "demo_today.mat" + class TestApply(unittest.TestCase): """Test implement measures functions.""" + def test_change_imp_func_pass(self): """Test _change_imp_func""" meas = MeasureSet.from_mat(ENT_TEST_MAT) - act_1 = meas.get_measure(name='Mangroves')[0] + act_1 = meas.get_measure(name="Mangroves")[0] - haz_type = 'XX' + haz_type = "XX" idx = 1 intensity = np.arange(10, 100, 10) - intensity[0] = 0. - intensity[-1] = 100. - mdd = np.array([0.0, 0.0, 0.021857142857143, 0.035887500000000, - 0.053977415307403, 0.103534246575342, 0.180414000000000, - 0.410796000000000, 0.410796000000000]) - paa = np.array([0, 0.005000000000000, 0.042000000000000, 0.160000000000000, - 0.398500000000000, 0.657000000000000, 1.000000000000000, - 1.000000000000000, 1.000000000000000]) + intensity[0] = 0.0 + intensity[-1] = 100.0 + mdd = np.array( + [ + 0.0, + 0.0, + 0.021857142857143, + 0.035887500000000, + 0.053977415307403, + 0.103534246575342, + 0.180414000000000, + 0.410796000000000, + 0.410796000000000, + ] + ) + paa = np.array( + [ + 0, + 0.005000000000000, + 0.042000000000000, + 0.160000000000000, + 0.398500000000000, + 0.657000000000000, + 1.000000000000000, + 1.000000000000000, + 1.000000000000000, + ] + ) imp_tc = ImpactFunc(haz_type, idx, intensity, mdd, paa) imp_set = ImpactFuncSet([imp_tc]) - new_imp = act_1._change_imp_func(imp_set).get_func('XX')[0] - - self.assertTrue(np.array_equal(new_imp.intensity, np.array([4., 24., 34., 44., - 54., 64., 74., 84., 104.]))) - self.assertTrue(np.array_equal(new_imp.mdd, np.array([0, 0, 0.021857142857143, 0.035887500000000, - 0.053977415307403, 0.103534246575342, 0.180414000000000, 0.410796000000000, 0.410796000000000]))) - self.assertTrue(np.array_equal(new_imp.paa, np.array([0, 0.005000000000000, 0.042000000000000, - 0.160000000000000, 0.398500000000000, 0.657000000000000, 1.000000000000000, - 1.000000000000000, 1.000000000000000]))) + new_imp = act_1._change_imp_func(imp_set).get_func("XX")[0] + + self.assertTrue( + np.array_equal( + new_imp.intensity, + np.array([4.0, 24.0, 34.0, 44.0, 54.0, 64.0, 74.0, 84.0, 104.0]), + ) + ) + self.assertTrue( + np.array_equal( + new_imp.mdd, + np.array( + [ + 0, + 0, + 0.021857142857143, + 0.035887500000000, + 0.053977415307403, + 0.103534246575342, + 0.180414000000000, + 0.410796000000000, + 0.410796000000000, + ] + ), + ) + ) + self.assertTrue( + np.array_equal( + new_imp.paa, + np.array( + [ + 0, + 0.005000000000000, + 0.042000000000000, + 0.160000000000000, + 0.398500000000000, + 0.657000000000000, + 1.000000000000000, + 1.000000000000000, + 1.000000000000000, + ] + ), + ) + ) self.assertFalse(id(new_imp) == id(imp_tc)) def test_cutoff_hazard_pass(self): """Test _cutoff_hazard_damage""" meas = MeasureSet.from_mat(ENT_TEST_MAT) - act_1 = meas.get_measure(name='Seawall')[0] + act_1 = meas.get_measure(name="Seawall")[0] haz = Hazard.from_hdf5(HAZ_TEST_TC) exp = Exposures.from_mat(ENT_TEST_MAT) - exp.gdf.rename(columns={'impf': 'impf_TC'}, inplace=True) + exp.gdf.rename(columns={"impf": "impf_TC"}, inplace=True) exp.check() exp.assign_centroids(haz) @@ -94,32 +152,99 @@ def test_cutoff_hazard_pass(self): self.assertFalse(id(new_haz) == id(haz)) - pos_no_null = np.array([6249, 7697, 9134, 13500, 13199, 5944, 9052, 9050, 2429, - 5139, 9053, 7102, 4096, 1070, 5948, 1076, 5947, 7432, - 5949, 11694, 5484, 6246, 12147, 778, 3326, 7199, 12498, - 11698, 6245, 5327, 4819, 8677, 5970, 7101, 779, 3894, - 9051, 5976, 3329, 5978, 4282, 11697, 7193, 5351, 7310, - 7478, 5489, 5526, 7194, 4283, 7191, 5328, 4812, 5528, - 5527, 5488, 7475, 5529, 776, 5758, 4811, 6223, 7479, - 7470, 5480, 5325, 7477, 7318, 7317, 11696, 7313, 13165, - 6221]) + pos_no_null = np.array( + [ + 6249, + 7697, + 9134, + 13500, + 13199, + 5944, + 9052, + 9050, + 2429, + 5139, + 9053, + 7102, + 4096, + 1070, + 5948, + 1076, + 5947, + 7432, + 5949, + 11694, + 5484, + 6246, + 12147, + 778, + 3326, + 7199, + 12498, + 11698, + 6245, + 5327, + 4819, + 8677, + 5970, + 7101, + 779, + 3894, + 9051, + 5976, + 3329, + 5978, + 4282, + 11697, + 7193, + 5351, + 7310, + 7478, + 5489, + 5526, + 7194, + 4283, + 7191, + 5328, + 4812, + 5528, + 5527, + 5488, + 7475, + 5529, + 776, + 5758, + 4811, + 6223, + 7479, + 7470, + 5480, + 5325, + 7477, + 7318, + 7317, + 11696, + 7313, + 13165, + 6221, + ] + ) all_haz = np.arange(haz.intensity.shape[0]) all_haz[pos_no_null] = -1 pos_null = np.argwhere(all_haz > 0).reshape(-1) for i_ev in pos_null: self.assertEqual(new_haz.intensity[i_ev, :].max(), 0) - def test_cutoff_hazard_region_pass(self): """Test _cutoff_hazard_damage in specific region""" meas = MeasureSet.from_mat(ENT_TEST_MAT) - act_1 = meas.get_measure(name='Seawall')[0] + act_1 = meas.get_measure(name="Seawall")[0] act_1.exp_region_id = [1] haz = Hazard.from_hdf5(HAZ_TEST_TC) exp = Exposures.from_mat(ENT_TEST_MAT) - exp.gdf['region_id'] = np.zeros(exp.gdf.shape[0]) - exp.gdf['region_id'].values[10:] = 1 + exp.gdf["region_id"] = np.zeros(exp.gdf.shape[0]) + exp.gdf["region_id"].values[10:] = 1 exp.check() exp.assign_centroids(haz) @@ -129,27 +254,95 @@ def test_cutoff_hazard_region_pass(self): self.assertFalse(id(new_haz) == id(haz)) - pos_no_null = np.array([6249, 7697, 9134, 13500, 13199, 5944, 9052, 9050, 2429, - 5139, 9053, 7102, 4096, 1070, 5948, 1076, 5947, 7432, - 5949, 11694, 5484, 6246, 12147, 778, 3326, 7199, 12498, - 11698, 6245, 5327, 4819, 8677, 5970, 7101, 779, 3894, - 9051, 5976, 3329, 5978, 4282, 11697, 7193, 5351, 7310, - 7478, 5489, 5526, 7194, 4283, 7191, 5328, 4812, 5528, - 5527, 5488, 7475, 5529, 776, 5758, 4811, 6223, 7479, - 7470, 5480, 5325, 7477, 7318, 7317, 11696, 7313, 13165, - 6221]) + pos_no_null = np.array( + [ + 6249, + 7697, + 9134, + 13500, + 13199, + 5944, + 9052, + 9050, + 2429, + 5139, + 9053, + 7102, + 4096, + 1070, + 5948, + 1076, + 5947, + 7432, + 5949, + 11694, + 5484, + 6246, + 12147, + 778, + 3326, + 7199, + 12498, + 11698, + 6245, + 5327, + 4819, + 8677, + 5970, + 7101, + 779, + 3894, + 9051, + 5976, + 3329, + 5978, + 4282, + 11697, + 7193, + 5351, + 7310, + 7478, + 5489, + 5526, + 7194, + 4283, + 7191, + 5328, + 4812, + 5528, + 5527, + 5488, + 7475, + 5529, + 776, + 5758, + 4811, + 6223, + 7479, + 7470, + 5480, + 5325, + 7477, + 7318, + 7317, + 11696, + 7313, + 13165, + 6221, + ] + ) all_haz = np.arange(haz.intensity.shape[0]) all_haz[pos_no_null] = -1 pos_null = np.argwhere(all_haz > 0).reshape(-1) - centr_null = np.unique(exp.gdf['centr_'][exp.gdf['region_id'] == 0]) + centr_null = np.unique(exp.gdf["centr_"][exp.gdf["region_id"] == 0]) for i_ev in pos_null: self.assertEqual(new_haz.intensity[i_ev, centr_null].max(), 0) def test_change_exposures_impf_pass(self): """Test _change_exposures_impf""" meas = Measure( - imp_fun_map='1to3', - haz_type='TC', + imp_fun_map="1to3", + haz_type="TC", ) imp_set = ImpactFuncSet() @@ -170,11 +363,26 @@ def test_change_exposures_impf_pass(self): self.assertEqual(new_exp.ref_year, exp.ref_year) self.assertEqual(new_exp.value_unit, exp.value_unit) self.assertEqual(new_exp.description, exp.description) - self.assertTrue(np.array_equal(new_exp.gdf['value'].values, exp.gdf['value'].values)) - self.assertTrue(np.array_equal(new_exp.gdf['latitude'].values, exp.gdf['latitude'].values)) - self.assertTrue(np.array_equal(new_exp.gdf['longitude'].values, exp.gdf['longitude'].values)) - self.assertTrue(np.array_equal(exp.gdf[INDICATOR_IMPF + 'TC'].values, np.ones(new_exp.gdf.shape[0]))) - self.assertTrue(np.array_equal(new_exp.gdf[INDICATOR_IMPF + 'TC'].values, np.ones(new_exp.gdf.shape[0]) * 3)) + self.assertTrue( + np.array_equal(new_exp.gdf["value"].values, exp.gdf["value"].values) + ) + self.assertTrue( + np.array_equal(new_exp.gdf["latitude"].values, exp.gdf["latitude"].values) + ) + self.assertTrue( + np.array_equal(new_exp.gdf["longitude"].values, exp.gdf["longitude"].values) + ) + self.assertTrue( + np.array_equal( + exp.gdf[INDICATOR_IMPF + "TC"].values, np.ones(new_exp.gdf.shape[0]) + ) + ) + self.assertTrue( + np.array_equal( + new_exp.gdf[INDICATOR_IMPF + "TC"].values, + np.ones(new_exp.gdf.shape[0]) * 3, + ) + ) def test_change_all_hazard_pass(self): """Test _change_all_hazard method""" @@ -182,14 +390,16 @@ def test_change_all_hazard_pass(self): ref_haz = Hazard.from_hdf5(HAZ_DEMO_H5) - hazard = Hazard('TC') + hazard = Hazard("TC") new_haz = meas._change_all_hazard(hazard) self.assertEqual(new_haz.haz_type, ref_haz.haz_type) self.assertTrue(np.array_equal(new_haz.frequency, ref_haz.frequency)) self.assertTrue(np.array_equal(new_haz.date, ref_haz.date)) self.assertTrue(np.array_equal(new_haz.orig, ref_haz.orig)) - self.assertTrue(np.array_equal(new_haz.centroids.coord, ref_haz.centroids.coord)) + self.assertTrue( + np.array_equal(new_haz.centroids.coord, ref_haz.centroids.coord) + ) self.assertTrue(np.array_equal(new_haz.intensity.data, ref_haz.intensity.data)) self.assertTrue(np.array_equal(new_haz.fraction.data, ref_haz.fraction.data)) @@ -200,16 +410,26 @@ def test_change_all_exposures_pass(self): ref_exp = Exposures.from_hdf5(EXP_DEMO_H5) exposures = Exposures() - exposures.gdf['latitude'] = np.ones(10) - exposures.gdf['longitude'] = np.ones(10) + exposures.gdf["latitude"] = np.ones(10) + exposures.gdf["longitude"] = np.ones(10) new_exp = meas._change_all_exposures(exposures) self.assertEqual(new_exp.ref_year, ref_exp.ref_year) self.assertEqual(new_exp.value_unit, ref_exp.value_unit) self.assertEqual(new_exp.description, ref_exp.description) - self.assertTrue(np.array_equal(new_exp.gdf['value'].values, ref_exp.gdf['value'].values)) - self.assertTrue(np.array_equal(new_exp.gdf['latitude'].values, ref_exp.gdf['latitude'].values)) - self.assertTrue(np.array_equal(new_exp.gdf['longitude'].values, ref_exp.gdf['longitude'].values)) + self.assertTrue( + np.array_equal(new_exp.gdf["value"].values, ref_exp.gdf["value"].values) + ) + self.assertTrue( + np.array_equal( + new_exp.gdf["latitude"].values, ref_exp.gdf["latitude"].values + ) + ) + self.assertTrue( + np.array_equal( + new_exp.gdf["longitude"].values, ref_exp.gdf["longitude"].values + ) + ) def test_not_filter_exposures_pass(self): """Test _filter_exposures method with []""" @@ -217,14 +437,15 @@ def test_not_filter_exposures_pass(self): exp = Exposures() imp_set = ImpactFuncSet() - haz = Hazard('TC') + haz = Hazard("TC") new_exp = Exposures() new_impfs = ImpactFuncSet() - new_haz = Hazard('TC') + new_haz = Hazard("TC") - res_exp, res_ifs, res_haz = meas._filter_exposures(exp, imp_set, haz, - new_exp, new_impfs, new_haz) + res_exp, res_ifs, res_haz = meas._filter_exposures( + exp, imp_set, haz, new_exp, new_impfs, new_haz + ) self.assertTrue(res_exp is new_exp) self.assertTrue(res_ifs is new_impfs) @@ -238,14 +459,14 @@ def test_filter_exposures_pass(self): """Test _filter_exposures method with two values""" meas = Measure( exp_region_id=[3, 4], - haz_type='TC', + haz_type="TC", ) exp = Exposures.from_mat(ENT_TEST_MAT) - exp.gdf.rename(columns={'impf_': 'impf_TC', 'centr_': 'centr_TC'}, inplace=True) - exp.gdf['region_id'] = np.ones(exp.gdf.shape[0]) - exp.gdf['region_id'].values[:exp.gdf.shape[0] // 2] = 3 - exp.gdf['region_id'][0] = 4 + exp.gdf.rename(columns={"impf_": "impf_TC", "centr_": "centr_TC"}, inplace=True) + exp.gdf["region_id"] = np.ones(exp.gdf.shape[0]) + exp.gdf["region_id"].values[: exp.gdf.shape[0] // 2] = 3 + exp.gdf["region_id"][0] = 4 exp.check() imp_set = ImpactFuncSet.from_mat(ENT_TEST_MAT) @@ -254,20 +475,21 @@ def test_filter_exposures_pass(self): exp.assign_centroids(haz) new_exp = copy.deepcopy(exp) - new_exp.gdf['value'] *= 3 - new_exp.gdf['impf_TC'].values[:20] = 2 - new_exp.gdf['impf_TC'].values[20:40] = 3 - new_exp.gdf['impf_TC'].values[40:] = 1 + new_exp.gdf["value"] *= 3 + new_exp.gdf["impf_TC"].values[:20] = 2 + new_exp.gdf["impf_TC"].values[20:40] = 3 + new_exp.gdf["impf_TC"].values[40:] = 1 new_ifs = copy.deepcopy(imp_set) - new_ifs.get_func('TC')[1].intensity += 1 + new_ifs.get_func("TC")[1].intensity += 1 ref_ifs = copy.deepcopy(new_ifs) new_haz = copy.deepcopy(haz) new_haz.intensity *= 4 - res_exp, res_ifs, res_haz = meas._filter_exposures(exp, imp_set, haz, - new_exp.copy(deep=True), new_ifs, new_haz) + res_exp, res_ifs, res_haz = meas._filter_exposures( + exp, imp_set, haz, new_exp.copy(deep=True), new_ifs, new_haz + ) # unchanged meta data self.assertEqual(res_exp.ref_year, exp.ref_year) @@ -278,93 +500,267 @@ def test_filter_exposures_pass(self): self.assertFalse(hasattr(res_exp.gdf, "crs")) # regions (that is just input data, no need for testing, but it makes the changed and unchanged parts obious) - self.assertTrue(np.array_equal(res_exp.gdf['region_id'].values[0], 4)) - self.assertTrue(np.array_equal(res_exp.gdf['region_id'].values[1:25], np.ones(24) * 3)) - self.assertTrue(np.array_equal(res_exp.gdf['region_id'].values[25:], np.ones(25))) + self.assertTrue(np.array_equal(res_exp.gdf["region_id"].values[0], 4)) + self.assertTrue( + np.array_equal(res_exp.gdf["region_id"].values[1:25], np.ones(24) * 3) + ) + self.assertTrue( + np.array_equal(res_exp.gdf["region_id"].values[25:], np.ones(25)) + ) # changed exposures - self.assertTrue(np.array_equal(res_exp.gdf['value'].values[:25], new_exp.gdf['value'].values[:25])) - self.assertTrue(np.all(np.not_equal(res_exp.gdf['value'].values[:25], exp.gdf['value'].values[:25]))) - self.assertTrue(np.all(np.not_equal(res_exp.gdf['impf_TC'].values[:25], new_exp.gdf['impf_TC'].values[:25]))) - self.assertTrue(np.array_equal(res_exp.gdf['latitude'].values[:25], new_exp.gdf['latitude'].values[:25])) - self.assertTrue(np.array_equal(res_exp.gdf['longitude'].values[:25], new_exp.gdf['longitude'].values[:25])) + self.assertTrue( + np.array_equal( + res_exp.gdf["value"].values[:25], new_exp.gdf["value"].values[:25] + ) + ) + self.assertTrue( + np.all( + np.not_equal( + res_exp.gdf["value"].values[:25], exp.gdf["value"].values[:25] + ) + ) + ) + self.assertTrue( + np.all( + np.not_equal( + res_exp.gdf["impf_TC"].values[:25], + new_exp.gdf["impf_TC"].values[:25], + ) + ) + ) + self.assertTrue( + np.array_equal( + res_exp.gdf["latitude"].values[:25], new_exp.gdf["latitude"].values[:25] + ) + ) + self.assertTrue( + np.array_equal( + res_exp.gdf["longitude"].values[:25], + new_exp.gdf["longitude"].values[:25], + ) + ) # unchanged exposures - self.assertTrue(np.array_equal(res_exp.gdf['value'].values[25:], exp.gdf['value'].values[25:])) - self.assertTrue(np.all(np.not_equal(res_exp.gdf['value'].values[25:], new_exp.gdf['value'].values[25:]))) - self.assertTrue(np.array_equal(res_exp.gdf['impf_TC'].values[25:], exp.gdf['impf_TC'].values[25:])) - self.assertTrue(np.array_equal(res_exp.gdf['latitude'].values[25:], exp.gdf['latitude'].values[25:])) - self.assertTrue(np.array_equal(res_exp.gdf['longitude'].values[25:], exp.gdf['longitude'].values[25:])) + self.assertTrue( + np.array_equal( + res_exp.gdf["value"].values[25:], exp.gdf["value"].values[25:] + ) + ) + self.assertTrue( + np.all( + np.not_equal( + res_exp.gdf["value"].values[25:], new_exp.gdf["value"].values[25:] + ) + ) + ) + self.assertTrue( + np.array_equal( + res_exp.gdf["impf_TC"].values[25:], exp.gdf["impf_TC"].values[25:] + ) + ) + self.assertTrue( + np.array_equal( + res_exp.gdf["latitude"].values[25:], exp.gdf["latitude"].values[25:] + ) + ) + self.assertTrue( + np.array_equal( + res_exp.gdf["longitude"].values[25:], exp.gdf["longitude"].values[25:] + ) + ) # unchanged impact functions self.assertEqual(list(res_ifs.get_func().keys()), [meas.haz_type]) - self.assertEqual(res_ifs.get_func()[meas.haz_type][1].id, imp_set.get_func()[meas.haz_type][1].id) - self.assertTrue(np.array_equal(res_ifs.get_func()[meas.haz_type][1].intensity, - imp_set.get_func()[meas.haz_type][1].intensity)) - self.assertEqual(res_ifs.get_func()[meas.haz_type][3].id, imp_set.get_func()[meas.haz_type][3].id) - self.assertTrue(np.array_equal(res_ifs.get_func()[meas.haz_type][3].intensity, - imp_set.get_func()[meas.haz_type][3].intensity)) + self.assertEqual( + res_ifs.get_func()[meas.haz_type][1].id, + imp_set.get_func()[meas.haz_type][1].id, + ) + self.assertTrue( + np.array_equal( + res_ifs.get_func()[meas.haz_type][1].intensity, + imp_set.get_func()[meas.haz_type][1].intensity, + ) + ) + self.assertEqual( + res_ifs.get_func()[meas.haz_type][3].id, + imp_set.get_func()[meas.haz_type][3].id, + ) + self.assertTrue( + np.array_equal( + res_ifs.get_func()[meas.haz_type][3].intensity, + imp_set.get_func()[meas.haz_type][3].intensity, + ) + ) # changed impact functions - self.assertTrue(np.array_equal(res_ifs.get_func()[meas.haz_type][1 + IMPF_ID_FACT].intensity, - ref_ifs.get_func()[meas.haz_type][1].intensity)) - self.assertTrue(np.array_equal(res_ifs.get_func()[meas.haz_type][1 + IMPF_ID_FACT].paa, - ref_ifs.get_func()[meas.haz_type][1].paa)) - self.assertTrue(np.array_equal(res_ifs.get_func()[meas.haz_type][1 + IMPF_ID_FACT].mdd, - ref_ifs.get_func()[meas.haz_type][1].mdd)) - self.assertTrue(np.array_equal(res_ifs.get_func()[meas.haz_type][3 + IMPF_ID_FACT].intensity, - ref_ifs.get_func()[meas.haz_type][3].intensity)) - self.assertTrue(np.array_equal(res_ifs.get_func()[meas.haz_type][3 + IMPF_ID_FACT].paa, - ref_ifs.get_func()[meas.haz_type][3].paa)) - self.assertTrue(np.array_equal(res_ifs.get_func()[meas.haz_type][3 + IMPF_ID_FACT].mdd, - ref_ifs.get_func()[meas.haz_type][3].mdd)) + self.assertTrue( + np.array_equal( + res_ifs.get_func()[meas.haz_type][1 + IMPF_ID_FACT].intensity, + ref_ifs.get_func()[meas.haz_type][1].intensity, + ) + ) + self.assertTrue( + np.array_equal( + res_ifs.get_func()[meas.haz_type][1 + IMPF_ID_FACT].paa, + ref_ifs.get_func()[meas.haz_type][1].paa, + ) + ) + self.assertTrue( + np.array_equal( + res_ifs.get_func()[meas.haz_type][1 + IMPF_ID_FACT].mdd, + ref_ifs.get_func()[meas.haz_type][1].mdd, + ) + ) + self.assertTrue( + np.array_equal( + res_ifs.get_func()[meas.haz_type][3 + IMPF_ID_FACT].intensity, + ref_ifs.get_func()[meas.haz_type][3].intensity, + ) + ) + self.assertTrue( + np.array_equal( + res_ifs.get_func()[meas.haz_type][3 + IMPF_ID_FACT].paa, + ref_ifs.get_func()[meas.haz_type][3].paa, + ) + ) + self.assertTrue( + np.array_equal( + res_ifs.get_func()[meas.haz_type][3 + IMPF_ID_FACT].mdd, + ref_ifs.get_func()[meas.haz_type][3].mdd, + ) + ) # unchanged hazard - self.assertTrue(np.array_equal(res_haz.intensity[:, :36].toarray(), - haz.intensity[:, :36].toarray())) - self.assertTrue(np.array_equal(res_haz.intensity[:, 37:46].toarray(), - haz.intensity[:, 37:46].toarray())) - self.assertTrue(np.array_equal(res_haz.intensity[:, 47:].toarray(), - haz.intensity[:, 47:].toarray())) + self.assertTrue( + np.array_equal( + res_haz.intensity[:, :36].toarray(), haz.intensity[:, :36].toarray() + ) + ) + self.assertTrue( + np.array_equal( + res_haz.intensity[:, 37:46].toarray(), haz.intensity[:, 37:46].toarray() + ) + ) + self.assertTrue( + np.array_equal( + res_haz.intensity[:, 47:].toarray(), haz.intensity[:, 47:].toarray() + ) + ) # changed hazard - self.assertTrue(np.array_equal(res_haz.intensity[[36, 46]].toarray(), - new_haz.intensity[[36, 46]].toarray())) + self.assertTrue( + np.array_equal( + res_haz.intensity[[36, 46]].toarray(), + new_haz.intensity[[36, 46]].toarray(), + ) + ) def test_apply_ref_pass(self): """Test apply method: apply all measures but insurance""" hazard = Hazard.from_hdf5(HAZ_TEST_TC) entity = Entity.from_mat(ENT_TEST_MAT) - entity.measures._data['TC'] = entity.measures._data.pop('XX') - for meas in entity.measures.get_measure('TC'): - meas.haz_type = 'TC' + entity.measures._data["TC"] = entity.measures._data.pop("XX") + for meas in entity.measures.get_measure("TC"): + meas.haz_type = "TC" entity.check() - new_exp, new_ifs, new_haz = entity.measures.get_measure('TC', 'Mangroves').apply(entity.exposures, - entity.impact_funcs, hazard) + new_exp, new_ifs, new_haz = entity.measures.get_measure( + "TC", "Mangroves" + ).apply(entity.exposures, entity.impact_funcs, hazard) self.assertTrue(new_exp is entity.exposures) self.assertTrue(new_haz is hazard) self.assertFalse(new_ifs is entity.impact_funcs) - new_imp = new_ifs.get_func('TC')[0] - self.assertTrue(np.array_equal(new_imp.intensity, np.array([4., 24., 34., 44., - 54., 64., 74., 84., 104.]))) - self.assertTrue(np.allclose(new_imp.mdd, np.array([0, 0, 0.021857142857143, 0.035887500000000, - 0.053977415307403, 0.103534246575342, 0.180414000000000, 0.410796000000000, 0.410796000000000]))) - self.assertTrue(np.allclose(new_imp.paa, np.array([0, 0.005000000000000, 0.042000000000000, - 0.160000000000000, 0.398500000000000, 0.657000000000000, 1.000000000000000, - 1.000000000000000, 1.000000000000000]))) - - new_imp = new_ifs.get_func('TC')[1] - self.assertTrue(np.array_equal(new_imp.intensity, np.array([4., 24., 34., 44., - 54., 64., 74., 84., 104.]))) - self.assertTrue(np.allclose(new_imp.mdd, np.array([0, 0, 0, 0.025000000000000, - 0.054054054054054, 0.104615384615385, 0.211764705882353, 0.400000000000000, 0.400000000000000]))) - self.assertTrue(np.allclose(new_imp.paa, np.array([0, 0.004000000000000, 0, 0.160000000000000, - 0.370000000000000, 0.650000000000000, 0.850000000000000, 1.000000000000000, - 1.000000000000000]))) + new_imp = new_ifs.get_func("TC")[0] + self.assertTrue( + np.array_equal( + new_imp.intensity, + np.array([4.0, 24.0, 34.0, 44.0, 54.0, 64.0, 74.0, 84.0, 104.0]), + ) + ) + self.assertTrue( + np.allclose( + new_imp.mdd, + np.array( + [ + 0, + 0, + 0.021857142857143, + 0.035887500000000, + 0.053977415307403, + 0.103534246575342, + 0.180414000000000, + 0.410796000000000, + 0.410796000000000, + ] + ), + ) + ) + self.assertTrue( + np.allclose( + new_imp.paa, + np.array( + [ + 0, + 0.005000000000000, + 0.042000000000000, + 0.160000000000000, + 0.398500000000000, + 0.657000000000000, + 1.000000000000000, + 1.000000000000000, + 1.000000000000000, + ] + ), + ) + ) + + new_imp = new_ifs.get_func("TC")[1] + self.assertTrue( + np.array_equal( + new_imp.intensity, + np.array([4.0, 24.0, 34.0, 44.0, 54.0, 64.0, 74.0, 84.0, 104.0]), + ) + ) + self.assertTrue( + np.allclose( + new_imp.mdd, + np.array( + [ + 0, + 0, + 0, + 0.025000000000000, + 0.054054054054054, + 0.104615384615385, + 0.211764705882353, + 0.400000000000000, + 0.400000000000000, + ] + ), + ) + ) + self.assertTrue( + np.allclose( + new_imp.paa, + np.array( + [ + 0, + 0.004000000000000, + 0, + 0.160000000000000, + 0.370000000000000, + 0.650000000000000, + 0.850000000000000, + 1.000000000000000, + 1.000000000000000, + ] + ), + ) + ) def test_calc_impact_pass(self): """Test calc_impact method: apply all measures but insurance""" @@ -372,46 +768,50 @@ def test_calc_impact_pass(self): hazard = Hazard.from_hdf5(HAZ_TEST_TC) entity = Entity.from_mat(ENT_TEST_MAT) - entity.exposures.gdf.rename(columns={'impf': 'impf_TC'}, inplace=True) - entity.measures._data['TC'] = entity.measures._data.pop('XX') - entity.measures.get_measure(name='Mangroves', haz_type='TC').haz_type = 'TC' - for meas in entity.measures.get_measure('TC'): - meas.haz_type = 'TC' + entity.exposures.gdf.rename(columns={"impf": "impf_TC"}, inplace=True) + entity.measures._data["TC"] = entity.measures._data.pop("XX") + entity.measures.get_measure(name="Mangroves", haz_type="TC").haz_type = "TC" + for meas in entity.measures.get_measure("TC"): + meas.haz_type = "TC" entity.check() - imp, risk_transf = entity.measures.get_measure('TC', 'Mangroves').calc_impact( - entity.exposures, entity.impact_funcs, hazard) + imp, risk_transf = entity.measures.get_measure("TC", "Mangroves").calc_impact( + entity.exposures, entity.impact_funcs, hazard + ) - self.assertAlmostEqual(imp.aai_agg, 4.850407096284983e+09, delta=1) + self.assertAlmostEqual(imp.aai_agg, 4.850407096284983e09, delta=1) self.assertAlmostEqual(imp.at_event[0], 0) - self.assertAlmostEqual(imp.at_event[12], 1.470194187501225e+07) - self.assertAlmostEqual(imp.at_event[41], 4.7226357936631286e+08) - self.assertAlmostEqual(imp.at_event[11890], 1.742110428135755e+07) - self.assertTrue(np.array_equal(imp.coord_exp[:, 0], entity.exposures.gdf['latitude'])) - self.assertTrue(np.array_equal(imp.coord_exp[:, 1], entity.exposures.gdf['longitude'])) - self.assertAlmostEqual(imp.eai_exp[0], 1.15677655725858e+08) - self.assertAlmostEqual(imp.eai_exp[-1], 7.528669956120645e+07) - self.assertAlmostEqual(imp.tot_value, 6.570532945599105e+11) - self.assertEqual(imp.unit, 'USD') + self.assertAlmostEqual(imp.at_event[12], 1.470194187501225e07) + self.assertAlmostEqual(imp.at_event[41], 4.7226357936631286e08) + self.assertAlmostEqual(imp.at_event[11890], 1.742110428135755e07) + self.assertTrue( + np.array_equal(imp.coord_exp[:, 0], entity.exposures.gdf["latitude"]) + ) + self.assertTrue( + np.array_equal(imp.coord_exp[:, 1], entity.exposures.gdf["longitude"]) + ) + self.assertAlmostEqual(imp.eai_exp[0], 1.15677655725858e08) + self.assertAlmostEqual(imp.eai_exp[-1], 7.528669956120645e07) + self.assertAlmostEqual(imp.tot_value, 6.570532945599105e11) + self.assertEqual(imp.unit, "USD") self.assertEqual(imp.imp_mat.shape, (0, 0)) self.assertTrue(np.array_equal(imp.event_id, hazard.event_id)) self.assertTrue(np.array_equal(imp.date, hazard.date)) self.assertEqual(imp.event_name, hazard.event_name) self.assertEqual(risk_transf.aai_agg, 0) - def test_calc_impact_transf_pass(self): """Test calc_impact method: apply all measures and insurance""" hazard = Hazard.from_hdf5(HAZ_TEST_TC) entity = Entity.from_mat(ENT_TEST_MAT) - entity.exposures.gdf.rename(columns={'impf': 'impf_TC'}, inplace=True) - entity.measures._data['TC'] = entity.measures._data.pop('XX') - for meas in entity.measures.get_measure('TC'): - meas.haz_type = 'TC' - meas = entity.measures.get_measure(name='Beach nourishment', haz_type='TC') - meas.haz_type = 'TC' + entity.exposures.gdf.rename(columns={"impf": "impf_TC"}, inplace=True) + entity.measures._data["TC"] = entity.measures._data.pop("XX") + for meas in entity.measures.get_measure("TC"): + meas.haz_type = "TC" + meas = entity.measures.get_measure(name="Beach nourishment", haz_type="TC") + meas.haz_type = "TC" meas.hazard_inten_imp = (1, 0) meas.mdd_impact = (1, 0) meas.paa_impact = (1, 0) @@ -419,23 +819,25 @@ def test_calc_impact_transf_pass(self): meas.risk_transf_cover = 1.0e9 entity.check() - imp, risk_transf = entity.measures.get_measure(name='Beach nourishment', haz_type='TC').calc_impact( - entity.exposures, entity.impact_funcs, hazard) + imp, risk_transf = entity.measures.get_measure( + name="Beach nourishment", haz_type="TC" + ).calc_impact(entity.exposures, entity.impact_funcs, hazard) - self.assertAlmostEqual(imp.aai_agg, 6.280804242609713e+09) + self.assertAlmostEqual(imp.aai_agg, 6.280804242609713e09) self.assertAlmostEqual(imp.at_event[0], 0) - self.assertAlmostEqual(imp.at_event[12], 8.648764833437817e+07) + self.assertAlmostEqual(imp.at_event[12], 8.648764833437817e07) self.assertAlmostEqual(imp.at_event[41], 500000000) - self.assertAlmostEqual(imp.at_event[11890], 6.498096646836635e+07) + self.assertAlmostEqual(imp.at_event[11890], 6.498096646836635e07) self.assertTrue(np.array_equal(imp.coord_exp, np.array([]))) self.assertTrue(np.array_equal(imp.eai_exp, np.array([]))) - self.assertAlmostEqual(imp.tot_value, 6.570532945599105e+11) - self.assertEqual(imp.unit, 'USD') + self.assertAlmostEqual(imp.tot_value, 6.570532945599105e11) + self.assertEqual(imp.unit, "USD") self.assertEqual(imp.imp_mat.shape, (0, 0)) self.assertTrue(np.array_equal(imp.event_id, hazard.event_id)) self.assertTrue(np.array_equal(imp.date, hazard.date)) self.assertEqual(imp.event_name, hazard.event_name) - self.assertEqual(risk_transf.aai_agg, 2.3139691495470852e+08) + self.assertEqual(risk_transf.aai_agg, 2.3139691495470852e08) + # Execute Tests if __name__ == "__main__": diff --git a/climada/entity/measures/test/test_meas_set.py b/climada/entity/measures/test/test_meas_set.py index fe2caa1bf..a2cbdc3f1 100644 --- a/climada/entity/measures/test/test_meas_set.py +++ b/climada/entity/measures/test/test_meas_set.py @@ -18,40 +18,48 @@ Test MeasureSet and Measure classes. """ + import unittest + import numpy as np from climada import CONFIG from climada.entity.measures.base import Measure from climada.entity.measures.measure_set import MeasureSet -from climada.util.constants import ENT_TEMPLATE_XLS, ENT_DEMO_TODAY +from climada.util.constants import ENT_DEMO_TODAY, ENT_TEMPLATE_XLS DATA_DIR = CONFIG.measures.test_data.dir() -ENT_TEST_MAT = CONFIG.exposures.test_data.dir().joinpath('demo_today.mat') +ENT_TEST_MAT = CONFIG.exposures.test_data.dir().joinpath("demo_today.mat") + class TestConstructor(unittest.TestCase): """Test impact function attributes.""" + def test_attributes_all(self): """All attributes are defined""" meas = MeasureSet() - act_1 = Measure(name='Seawall') - self.assertTrue(hasattr(meas, '_data')) - self.assertTrue(hasattr(act_1, 'name')) - self.assertTrue(hasattr(act_1, 'color_rgb')) - self.assertTrue(hasattr(act_1, 'cost')) - self.assertTrue(hasattr(act_1, 'hazard_freq_cutoff')) - self.assertTrue(hasattr(act_1, 'hazard_inten_imp')) - self.assertTrue(hasattr(act_1, 'mdd_impact')) - self.assertTrue(hasattr(act_1, 'paa_impact')) - self.assertTrue(hasattr(act_1, 'risk_transf_attach')) - self.assertTrue(hasattr(act_1, 'risk_transf_cover')) + act_1 = Measure(name="Seawall") + self.assertTrue(hasattr(meas, "_data")) + self.assertTrue(hasattr(act_1, "name")) + self.assertTrue(hasattr(act_1, "color_rgb")) + self.assertTrue(hasattr(act_1, "cost")) + self.assertTrue(hasattr(act_1, "hazard_freq_cutoff")) + self.assertTrue(hasattr(act_1, "hazard_inten_imp")) + self.assertTrue(hasattr(act_1, "mdd_impact")) + self.assertTrue(hasattr(act_1, "paa_impact")) + self.assertTrue(hasattr(act_1, "risk_transf_attach")) + self.assertTrue(hasattr(act_1, "risk_transf_cover")) + class TestContainer(unittest.TestCase): """Test MeasureSet as container.""" + def test_add_wrong_error(self): """Test error is raised when wrong ImpactFunc provided.""" meas = MeasureSet() - with self.assertLogs('climada.entity.measures.measure_set', level='WARNING') as cm: + with self.assertLogs( + "climada.entity.measures.measure_set", level="WARNING" + ) as cm: meas.append(Measure()) self.assertIn("Input Measure's hazard type not set.", cm.output[0]) @@ -61,196 +69,224 @@ def test_add_wrong_error(self): def test_remove_measure_pass(self): """Test remove_measure removes Measure of MeasureSet correcty.""" - meas = MeasureSet(measure_list=[Measure(name='Mangrove', haz_type='FL')]) - meas.remove_measure(name='Mangrove') + meas = MeasureSet(measure_list=[Measure(name="Mangrove", haz_type="FL")]) + meas.remove_measure(name="Mangrove") self.assertEqual(0, meas.size()) def test_remove_wrong_error(self): """Test error is raised when invalid inputs.""" - meas = MeasureSet(measure_list=[Measure(name='Mangrove', haz_type='FL')]) - with self.assertLogs('climada.entity.measures.measure_set', level='INFO') as cm: - meas.remove_measure(name='Seawall') - self.assertIn('No Measure with name Seawall.', cm.output[0]) + meas = MeasureSet(measure_list=[Measure(name="Mangrove", haz_type="FL")]) + with self.assertLogs("climada.entity.measures.measure_set", level="INFO") as cm: + meas.remove_measure(name="Seawall") + self.assertIn("No Measure with name Seawall.", cm.output[0]) def test_get_names_pass(self): """Test get_names function.""" - meas = MeasureSet(measure_list=[Measure(name='Mangrove', haz_type='FL')]) + meas = MeasureSet(measure_list=[Measure(name="Mangrove", haz_type="FL")]) self.assertEqual(1, len(meas.get_names())) - self.assertEqual({'FL': ['Mangrove']}, meas.get_names()) + self.assertEqual({"FL": ["Mangrove"]}, meas.get_names()) - meas.append(Measure( - name='Seawall', - haz_type='FL', - )) - self.assertEqual(2, len(meas.get_names('FL'))) - self.assertIn('Mangrove', meas.get_names('FL')) - self.assertIn('Seawall', meas.get_names('FL')) + meas.append( + Measure( + name="Seawall", + haz_type="FL", + ) + ) + self.assertEqual(2, len(meas.get_names("FL"))) + self.assertIn("Mangrove", meas.get_names("FL")) + self.assertIn("Seawall", meas.get_names("FL")) def test_get_measure_pass(self): """Test normal functionality of get_measure method.""" act_1 = Measure( - name='Mangrove', - haz_type='FL', + name="Mangrove", + haz_type="FL", ) meas = MeasureSet(measure_list=[act_1]) - self.assertIs(act_1, meas.get_measure(name='Mangrove')[0]) + self.assertIs(act_1, meas.get_measure(name="Mangrove")[0]) act_2 = Measure( - name='Seawall', - haz_type='FL', + name="Seawall", + haz_type="FL", ) meas.append(act_2) - self.assertIs(act_1, meas.get_measure(name='Mangrove')[0]) - self.assertIs(act_2, meas.get_measure(name='Seawall')[0]) - self.assertEqual(2, len(meas.get_measure('FL'))) + self.assertIs(act_1, meas.get_measure(name="Mangrove")[0]) + self.assertIs(act_2, meas.get_measure(name="Seawall")[0]) + self.assertEqual(2, len(meas.get_measure("FL"))) def test_get_measure_wrong_error(self): """Test get_measure method with wrong inputs.""" - meas = MeasureSet(measure_list=[Measure(name='Seawall', haz_type='FL')]) - self.assertEqual([], meas.get_measure('Mangrove')) + meas = MeasureSet(measure_list=[Measure(name="Seawall", haz_type="FL")]) + self.assertEqual([], meas.get_measure("Mangrove")) def test_num_measures_pass(self): """Test num_measures function.""" meas = MeasureSet() self.assertEqual(0, meas.size()) act_1 = Measure( - name='Mangrove', - haz_type='FL', + name="Mangrove", + haz_type="FL", ) meas.append(act_1) self.assertEqual(1, meas.size()) meas.append(act_1) self.assertEqual(1, meas.size()) - meas.append(Measure( - name='Seawall', - haz_type='FL', - )) + meas.append( + Measure( + name="Seawall", + haz_type="FL", + ) + ) self.assertEqual(2, meas.size()) + class TestChecker(unittest.TestCase): """Test check functionality of the MeasureSet class""" def test_check_wronginten_fail(self): """Wrong intensity definition""" - meas = MeasureSet(measure_list=[ - Measure( - haz_type='TC', - name='Mangrove', - hazard_inten_imp=(1, 2, 3), - color_rgb=np.array([1, 1, 1]), - mdd_impact=(1, 2), - paa_impact=(1, 2), - ), - ]) + meas = MeasureSet( + measure_list=[ + Measure( + haz_type="TC", + name="Mangrove", + hazard_inten_imp=(1, 2, 3), + color_rgb=np.array([1, 1, 1]), + mdd_impact=(1, 2), + paa_impact=(1, 2), + ), + ] + ) with self.assertRaises(ValueError) as cm: meas.check() - self.assertIn('Invalid Measure.hazard_inten_imp size: 2 != 3.', str(cm.exception)) + self.assertIn( + "Invalid Measure.hazard_inten_imp size: 2 != 3.", str(cm.exception) + ) def test_check_wrongColor_fail(self): """Wrong measures definition""" - meas = MeasureSet(measure_list=[ - Measure( - name='Mangrove', - haz_type='DR', - color_rgb=(1, 2), - mdd_impact=(1, 2), - paa_impact=(1, 2), - hazard_inten_imp=(1, 2), - ), - ]) + meas = MeasureSet( + measure_list=[ + Measure( + name="Mangrove", + haz_type="DR", + color_rgb=(1, 2), + mdd_impact=(1, 2), + paa_impact=(1, 2), + hazard_inten_imp=(1, 2), + ), + ] + ) with self.assertRaises(ValueError) as cm: meas.check() - self.assertIn('Invalid Measure.color_rgb size: 2 not in [3, 4].', str(cm.exception)) + self.assertIn( + "Invalid Measure.color_rgb size: 2 not in [3, 4].", str(cm.exception) + ) def test_check_wrongMDD_fail(self): """Wrong measures definition""" - meas = MeasureSet(measure_list=[ - Measure( - name='Mangrove', - haz_type='DR', - color_rgb=np.array([1, 1, 1]), - mdd_impact=(1), - paa_impact=(1, 2), - hazard_inten_imp=(1, 2), - ), - ]) + meas = MeasureSet( + measure_list=[ + Measure( + name="Mangrove", + haz_type="DR", + color_rgb=np.array([1, 1, 1]), + mdd_impact=(1), + paa_impact=(1, 2), + hazard_inten_imp=(1, 2), + ), + ] + ) with self.assertRaises(ValueError) as cm: meas.check() - self.assertIn('Measure.mdd_impact has wrong size.', str(cm.exception)) + self.assertIn("Measure.mdd_impact has wrong size.", str(cm.exception)) def test_check_wrongPAA_fail(self): """Wrong measures definition""" - meas = MeasureSet(measure_list=[ - Measure( - name='Mangrove', - haz_type='TC', - color_rgb=np.array([1, 1, 1]), - mdd_impact=(1, 2), - paa_impact=(1, 2, 3, 4), - hazard_inten_imp=(1, 2), - ), - ]) + meas = MeasureSet( + measure_list=[ + Measure( + name="Mangrove", + haz_type="TC", + color_rgb=np.array([1, 1, 1]), + mdd_impact=(1, 2), + paa_impact=(1, 2, 3, 4), + hazard_inten_imp=(1, 2), + ), + ] + ) with self.assertRaises(ValueError) as cm: meas.check() - self.assertIn('Invalid Measure.paa_impact size: 2 != 4.', str(cm.exception)) + self.assertIn("Invalid Measure.paa_impact size: 2 != 4.", str(cm.exception)) def test_check_name_fail(self): """Wrong measures definition""" meas = MeasureSet() - meas._data['FL'] = dict() - meas._data['FL']['LoLo'] = Measure( - name='LaLa', - haz_type='FL', + meas._data["FL"] = dict() + meas._data["FL"]["LoLo"] = Measure( + name="LaLa", + haz_type="FL", ) with self.assertRaises(ValueError) as cm: meas.check() - self.assertIn('Wrong Measure.name: LoLo != LaLa', str(cm.exception)) + self.assertIn("Wrong Measure.name: LoLo != LaLa", str(cm.exception)) def test_def_color(self): """Test default grey scale used when no color set""" - meas = MeasureSet(measure_list=[ - Measure(name='LaLa', haz_type='FL'), - Measure(name='LoLo', haz_type='FL'), - ]) + meas = MeasureSet( + measure_list=[ + Measure(name="LaLa", haz_type="FL"), + Measure(name="LoLo", haz_type="FL"), + ] + ) meas.check() - self.assertTrue(np.array_equal(meas.get_measure('FL', 'LaLa').color_rgb, np.ones(4))) - self.assertTrue(np.allclose(meas.get_measure('FL', 'LoLo').color_rgb, - np.array([0., 0., 0., 1.0]))) + self.assertTrue( + np.array_equal(meas.get_measure("FL", "LaLa").color_rgb, np.ones(4)) + ) + self.assertTrue( + np.allclose( + meas.get_measure("FL", "LoLo").color_rgb, np.array([0.0, 0.0, 0.0, 1.0]) + ) + ) + class TestExtend(unittest.TestCase): """Check extend function""" + def test_extend_to_empty_same(self): """Extend MeasureSet to empty one.""" meas = MeasureSet() - meas_add = MeasureSet(measure_list=[ + meas_add = MeasureSet( + measure_list=[ Measure( - name='Mangrove', - haz_type='TC', + name="Mangrove", + haz_type="TC", color_rgb=np.array([1, 1, 1]), mdd_impact=(1, 2), paa_impact=(1, 2), hazard_inten_imp=(1, 2), ), - ]) + ] + ) meas.extend(meas_add) meas.check() self.assertEqual(meas.size(), 1) - self.assertEqual(meas.get_names(), {'TC': ['Mangrove']}) + self.assertEqual(meas.get_names(), {"TC": ["Mangrove"]}) def test_extend_equal_same(self): """Extend the same MeasureSet. The inital MeasureSet is obtained.""" act_1 = Measure( - name='Mangrove', - haz_type='TC', + name="Mangrove", + haz_type="TC", color_rgb=np.array([1, 1, 1]), mdd_impact=(1, 2), paa_impact=(1, 2), @@ -263,14 +299,14 @@ def test_extend_equal_same(self): meas.check() self.assertEqual(meas.size(), 1) - self.assertEqual(meas.get_names(), {'TC': ['Mangrove']}) + self.assertEqual(meas.get_names(), {"TC": ["Mangrove"]}) def test_extend_different_extend(self): """Extend MeasureSet with same and new values. The actions with repeated name are overwritten.""" act_1 = Measure( - name='Mangrove', - haz_type='TC', + name="Mangrove", + haz_type="TC", color_rgb=np.array([1, 1, 1]), mdd_impact=(1, 2), paa_impact=(1, 2), @@ -278,8 +314,8 @@ def test_extend_different_extend(self): ) act_11 = Measure( - name='Mangrove', - haz_type='TC', + name="Mangrove", + haz_type="TC", color_rgb=np.array([1, 1, 1]), mdd_impact=(1, 2), paa_impact=(1, 3), @@ -287,8 +323,8 @@ def test_extend_different_extend(self): ) act_2 = Measure( - name='Anything', - haz_type='TC', + name="Anything", + haz_type="TC", color_rgb=np.array([1, 1, 1]), mdd_impact=(1, 2), paa_impact=(1, 2), @@ -302,8 +338,11 @@ def test_extend_different_extend(self): meas.check() self.assertEqual(meas.size(), 2) - self.assertEqual(meas.get_names(), {'TC': ['Mangrove', 'Anything']}) - self.assertEqual(meas.get_measure(name=act_1.name)[0].paa_impact, act_11.paa_impact) + self.assertEqual(meas.get_names(), {"TC": ["Mangrove", "Anything"]}) + self.assertEqual( + meas.get_measure(name=act_1.name)[0].paa_impact, act_11.paa_impact + ) + class TestReaderExcel(unittest.TestCase): """Test reader functionality of the MeasuresExcel class""" @@ -317,8 +356,8 @@ def test_demo_file(self): self.assertEqual(meas.size(), n_meas) - act_man = meas.get_measure(name='Mangroves')[0] - self.assertEqual(act_man.name, 'Mangroves') + act_man = meas.get_measure(name="Mangroves")[0] + self.assertEqual(act_man.name, "Mangroves") self.assertEqual(type(act_man.color_rgb), np.ndarray) self.assertEqual(len(act_man.color_rgb), 3) self.assertEqual(act_man.color_rgb[0], 0.1529) @@ -332,8 +371,8 @@ def test_demo_file(self): self.assertEqual(act_man.risk_transf_attach, 0) self.assertEqual(act_man.risk_transf_cover, 0) - act_buil = meas.get_measure(name='Building code')[0] - self.assertEqual(act_buil.name, 'Building code') + act_buil = meas.get_measure(name="Building code")[0] + self.assertEqual(act_buil.name, "Building code") self.assertEqual(type(act_buil.color_rgb), np.ndarray) self.assertEqual(len(act_buil.color_rgb), 3) self.assertEqual(act_buil.color_rgb[0], 0.6980) @@ -353,69 +392,77 @@ def test_template_file_pass(self): self.assertEqual(meas.size(), 7) - name = 'elevate existing buildings' + name = "elevate existing buildings" act_buil = meas.get_measure(name=name)[0] self.assertEqual(act_buil.name, name) - self.assertEqual(act_buil.haz_type, 'TS') - self.assertTrue(np.array_equal(act_buil.color_rgb, np.array([0.84, 0.89, 0.70]))) + self.assertEqual(act_buil.haz_type, "TS") + self.assertTrue( + np.array_equal(act_buil.color_rgb, np.array([0.84, 0.89, 0.70])) + ) self.assertEqual(act_buil.cost, 3911963265.476649) - self.assertEqual(act_buil.hazard_set, 'nil') + self.assertEqual(act_buil.hazard_set, "nil") self.assertEqual(act_buil.hazard_freq_cutoff, 0) self.assertEqual(act_buil.hazard_inten_imp, (1, -2)) - self.assertEqual(act_buil.exposures_set, 'nil') + self.assertEqual(act_buil.exposures_set, "nil") self.assertEqual(act_buil.exp_region_id, 0) self.assertEqual(act_buil.paa_impact, (0.9, 0)) self.assertEqual(act_buil.mdd_impact, (0.9, -0.1)) - self.assertEqual(act_buil.imp_fun_map, 'nil') + self.assertEqual(act_buil.imp_fun_map, "nil") self.assertEqual(act_buil.risk_transf_attach, 0) self.assertEqual(act_buil.risk_transf_cover, 0) self.assertEqual(act_buil.risk_transf_cost_factor, 1) - name = 'vegetation management' + name = "vegetation management" act_buil = meas.get_measure(name=name)[0] self.assertEqual(act_buil.name, name) - self.assertEqual(act_buil.haz_type, 'TC') - self.assertTrue(np.array_equal(act_buil.color_rgb, np.array([0.76, 0.84, 0.60]))) + self.assertEqual(act_buil.haz_type, "TC") + self.assertTrue( + np.array_equal(act_buil.color_rgb, np.array([0.76, 0.84, 0.60])) + ) self.assertEqual(act_buil.cost, 63968125.00687534) - self.assertEqual(act_buil.hazard_set, 'nil') + self.assertEqual(act_buil.hazard_set, "nil") self.assertEqual(act_buil.hazard_freq_cutoff, 0) self.assertEqual(act_buil.hazard_inten_imp, (1, -1)) - self.assertEqual(act_buil.exposures_set, 'nil') + self.assertEqual(act_buil.exposures_set, "nil") self.assertEqual(act_buil.exp_region_id, 0) self.assertEqual(act_buil.paa_impact, (0.8, 0)) self.assertEqual(act_buil.mdd_impact, (1, 0)) - self.assertEqual(act_buil.imp_fun_map, 'nil') + self.assertEqual(act_buil.imp_fun_map, "nil") self.assertEqual(act_buil.risk_transf_attach, 0) self.assertEqual(act_buil.risk_transf_cover, 0) self.assertEqual(act_buil.risk_transf_cost_factor, 1) - self.assertEqual(meas.get_measure(name='enforce building code')[0].imp_fun_map, '1to3') + self.assertEqual( + meas.get_measure(name="enforce building code")[0].imp_fun_map, "1to3" + ) - name = 'risk transfer' + name = "risk transfer" act_buil = meas.get_measure(name=name)[0] self.assertEqual(act_buil.name, name) - self.assertEqual(act_buil.haz_type, 'TC') - self.assertTrue(np.array_equal(act_buil.color_rgb, np.array([0.90, 0.72, 0.72]))) + self.assertEqual(act_buil.haz_type, "TC") + self.assertTrue( + np.array_equal(act_buil.color_rgb, np.array([0.90, 0.72, 0.72])) + ) self.assertEqual(act_buil.cost, 21000000) - self.assertEqual(act_buil.hazard_set, 'nil') + self.assertEqual(act_buil.hazard_set, "nil") self.assertEqual(act_buil.hazard_freq_cutoff, 0) self.assertEqual(act_buil.hazard_inten_imp, (1, 0)) - self.assertEqual(act_buil.exposures_set, 'nil') + self.assertEqual(act_buil.exposures_set, "nil") self.assertEqual(act_buil.exp_region_id, 0) self.assertEqual(act_buil.paa_impact, (1, 0)) self.assertEqual(act_buil.mdd_impact, (1, 0)) - self.assertEqual(act_buil.imp_fun_map, 'nil') + self.assertEqual(act_buil.imp_fun_map, "nil") self.assertEqual(act_buil.risk_transf_attach, 500000000) self.assertEqual(act_buil.risk_transf_cover, 1000000000) @@ -434,9 +481,9 @@ def test_demo_file(self): self.assertEqual(meas.size(), n_meas) - act_man = meas.get_measure(name='Mangroves')[0] - self.assertEqual(act_man.name, 'Mangroves') - self.assertEqual(act_man.haz_type, 'XX') + act_man = meas.get_measure(name="Mangroves")[0] + self.assertEqual(act_man.name, "Mangroves") + self.assertEqual(act_man.haz_type, "XX") self.assertEqual(type(act_man.color_rgb), np.ndarray) self.assertEqual(len(act_man.color_rgb), 3) self.assertEqual(act_man.color_rgb[0], 0.1529) @@ -445,23 +492,22 @@ def test_demo_file(self): self.assertEqual(act_man.cost, 1311768360.8515418) self.assertEqual(act_man.hazard_freq_cutoff, 0) - self.assertEqual(act_man.hazard_set, 'nil') + self.assertEqual(act_man.hazard_set, "nil") self.assertEqual(act_man.hazard_inten_imp, (1, -4)) - self.assertEqual(act_man.exposures_set, 'nil') + self.assertEqual(act_man.exposures_set, "nil") self.assertEqual(act_man.exp_region_id, []) self.assertEqual(act_man.mdd_impact, (1, 0)) self.assertEqual(act_man.paa_impact, (1, 0)) - self.assertEqual(act_man.imp_fun_map, 'nil') + self.assertEqual(act_man.imp_fun_map, "nil") self.assertEqual(act_man.risk_transf_attach, 0) self.assertEqual(act_man.risk_transf_cover, 0) - - act_buil = meas.get_measure(name='Building code')[0] - self.assertEqual(act_buil.name, 'Building code') - self.assertEqual(act_buil.haz_type, 'XX') + act_buil = meas.get_measure(name="Building code")[0] + self.assertEqual(act_buil.name, "Building code") + self.assertEqual(act_buil.haz_type, "XX") self.assertEqual(type(act_buil.color_rgb), np.ndarray) self.assertEqual(len(act_buil.color_rgb), 3) self.assertEqual(act_buil.color_rgb[0], 0.6980) @@ -470,15 +516,15 @@ def test_demo_file(self): self.assertEqual(act_buil.cost, 9200000000.0000000) self.assertEqual(act_buil.hazard_freq_cutoff, 0) - self.assertEqual(act_buil.hazard_set, 'nil') + self.assertEqual(act_buil.hazard_set, "nil") self.assertEqual(act_buil.hazard_inten_imp, (1, 0)) - self.assertEqual(act_buil.exposures_set, 'nil') + self.assertEqual(act_buil.exposures_set, "nil") self.assertEqual(act_buil.exp_region_id, []) self.assertEqual(act_buil.mdd_impact, (0.75, 0)) self.assertEqual(act_buil.paa_impact, (1, 0)) - self.assertEqual(act_man.imp_fun_map, 'nil') + self.assertEqual(act_man.imp_fun_map, "nil") self.assertEqual(act_buil.risk_transf_attach, 0) self.assertEqual(act_buil.risk_transf_cover, 0) @@ -491,8 +537,8 @@ def test_write_read_file(self): """Write and read excel file""" act_1 = Measure( - name='Mangrove', - haz_type='TC', + name="Mangrove", + haz_type="TC", color_rgb=np.array([1, 1, 1]), cost=10, mdd_impact=(1, 2), @@ -502,8 +548,8 @@ def test_write_read_file(self): ) act_11 = Measure( - name='Something', - haz_type='TC', + name="Something", + haz_type="TC", color_rgb=np.array([1, 1, 1]), mdd_impact=(1, 2), paa_impact=(1, 3), @@ -512,32 +558,32 @@ def test_write_read_file(self): ) act_2 = Measure( - name='Anything', - haz_type='FL', + name="Anything", + haz_type="FL", color_rgb=np.array([1, 1, 1]), mdd_impact=(1, 2), paa_impact=(1, 2), hazard_inten_imp=(1, 2), hazard_freq_cutoff=30, - imp_fun_map='map', + imp_fun_map="map", ) meas_set = MeasureSet(measure_list=[act_1, act_11, act_2]) - file_name = DATA_DIR.joinpath('test_meas.xlsx') + file_name = DATA_DIR.joinpath("test_meas.xlsx") meas_set.write_excel(file_name) meas_read = MeasureSet.from_excel(file_name) - meas_list = meas_read.get_measure('TC') - meas_list.extend(meas_read.get_measure('FL')) + meas_list = meas_read.get_measure("TC") + meas_list.extend(meas_read.get_measure("FL")) for meas in meas_list: - if meas.name == 'Mangrove': + if meas.name == "Mangrove": meas_ref = act_1 - elif meas.name == 'Something': + elif meas.name == "Something": meas_ref = act_11 - elif meas.name == 'Anything': + elif meas.name == "Anything": meas_ref = act_2 self.assertEqual(meas_ref.name, meas.name) @@ -555,6 +601,7 @@ def test_write_read_file(self): self.assertEqual(meas_ref.risk_transf_attach, meas.risk_transf_attach) self.assertEqual(meas_ref.risk_transf_cover, meas.risk_transf_cover) + # Execute Tests if __name__ == "__main__": TESTS = unittest.TestLoader().loadTestsFromTestCase(TestContainer) diff --git a/climada/entity/tag/__init__.py b/climada/entity/tag/__init__.py index 4d386fd09..374022317 100644 --- a/climada/entity/tag/__init__.py +++ b/climada/entity/tag/__init__.py @@ -30,9 +30,11 @@ # # @deprecated(details="This class is not supported anymore.") class Tag(_Tag): - """kept for backwards compatibility with climada <= 3.3 - """ - @deprecated(details="This class is not supported anymore and will be removed in the next" - " version of climada.") + """kept for backwards compatibility with climada <= 3.3""" + + @deprecated( + details="This class is not supported anymore and will be removed in the next" + " version of climada." + ) def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) diff --git a/climada/entity/tag/tag.py b/climada/entity/tag/tag.py index 4ec1a2bef..01f9db19c 100644 --- a/climada/entity/tag/tag.py +++ b/climada/entity/tag/tag.py @@ -18,13 +18,14 @@ Define Tag class. """ + from __future__ import annotations + from pathlib import Path -from typing import Union, List +from typing import List, Union import h5py - STR_DT = h5py.special_dtype(vlen=str) @@ -39,7 +40,7 @@ def _distinct_list_of_str(list_of_str: list, arg: Union[list, str, object]): return list_of_str -class Tag(): +class Tag: """Deprecated since climada 4.*. This class is only used for unpickling, e.g., when reading Exposures hdf5 data files that have been created with climada <=3.*. @@ -51,9 +52,11 @@ class Tag(): description of the data """ - def __init__(self, - file_name: Union[List[str], str] = None, - description: Union[List[str], str] = None): + def __init__( + self, + file_name: Union[List[str], str] = None, + description: Union[List[str], str] = None, + ): """Initialize values. Parameters @@ -71,7 +74,7 @@ def __getattribute__(self, name): # the attribute assignment there is not done neither via __init__ nor via __setattr__. # The outcome is e.g., a description of type str val = super().__getattribute__(name) - if name in ['file_name', 'description'] and not isinstance(val, list): + if name in ["file_name", "description"] and not isinstance(val, list): if not val: return [] return [str(val)] @@ -84,18 +87,19 @@ def append(self, tag: Tag): def join_file_names(self): """Get a string with the joined file names.""" - return ' + '.join([ - Path(single_name).stem - for single_name in self.file_name - ]) + return " + ".join([Path(single_name).stem for single_name in self.file_name]) def join_descriptions(self): """Get a string with the joined descriptions.""" - return ' + '.join(self.description) + return " + ".join(self.description) def __str__(self): - return ' File: ' + self.join_file_names() + \ - '\n Description: ' + self.join_descriptions() + return ( + " File: " + + self.join_file_names() + + "\n Description: " + + self.join_descriptions() + ) __repr__ = __str__ @@ -107,10 +111,14 @@ def to_hdf5(self, hf_data): hf_data : h5py.File will be updated during the call """ - hf_str = hf_data.create_dataset('file_name', (len(self.file_name),), dtype=STR_DT) + hf_str = hf_data.create_dataset( + "file_name", (len(self.file_name),), dtype=STR_DT + ) for i, name in enumerate(self.file_name): hf_str[i] = name - hf_str = hf_data.create_dataset('description', (len(self.description),), dtype=STR_DT) + hf_str = hf_data.create_dataset( + "description", (len(self.description),), dtype=STR_DT + ) for i, desc in enumerate(self.description): hf_str[i] = desc @@ -127,5 +135,6 @@ def from_hdf5(cls, hf_data): Tag """ return cls( - file_name=[x.decode() for x in hf_data.get('file_name')], - description=[x.decode() for x in hf_data.get('description')]) + file_name=[x.decode() for x in hf_data.get("file_name")], + description=[x.decode() for x in hf_data.get("description")], + ) diff --git a/climada/entity/tag/test/test_tag.py b/climada/entity/tag/test/test_tag.py index 8dc37590d..dfd1f454b 100644 --- a/climada/entity/tag/test/test_tag.py +++ b/climada/entity/tag/test/test_tag.py @@ -23,48 +23,53 @@ from climada.entity.tag import Tag + class TestAppend(unittest.TestCase): """Test loading funcions from the Hazard class""" def test_append_different_increase(self): """Appends an other tag correctly.""" - tag1 = Tag('file_name1.mat', 'dummy file 1') - self.assertEqual(['file_name1.mat'], tag1.file_name) - self.assertEqual(['dummy file 1'], tag1.description) + tag1 = Tag("file_name1.mat", "dummy file 1") + self.assertEqual(["file_name1.mat"], tag1.file_name) + self.assertEqual(["dummy file 1"], tag1.description) - tag2 = Tag('file_name2.mat', 'dummy file 2') + tag2 = Tag("file_name2.mat", "dummy file 2") tag1.append(tag2) - self.assertEqual(['file_name1.mat', 'file_name2.mat'], tag1.file_name) - self.assertEqual(['dummy file 1', 'dummy file 2'], tag1.description) - self.assertEqual(' File: file_name1 + file_name2\n' - ' Description: dummy file 1 + dummy file 2', str(tag1)) + self.assertEqual(["file_name1.mat", "file_name2.mat"], tag1.file_name) + self.assertEqual(["dummy file 1", "dummy file 2"], tag1.description) + self.assertEqual( + " File: file_name1 + file_name2\n" + " Description: dummy file 1 + dummy file 2", + str(tag1), + ) def test_append_equal_same(self): """Appends an other tag correctly.""" - tag1 = Tag('file_name1.mat', 'dummy file 1') - tag2 = Tag('file_name1.mat', 'dummy file 1') + tag1 = Tag("file_name1.mat", "dummy file 1") + tag2 = Tag("file_name1.mat", "dummy file 1") tag1.append(tag2) - self.assertEqual(['file_name1.mat'], tag1.file_name) - self.assertEqual(['dummy file 1'], tag1.description) + self.assertEqual(["file_name1.mat"], tag1.file_name) + self.assertEqual(["dummy file 1"], tag1.description) def test_append_empty(self): """Appends an other tag correctly.""" - tag1 = Tag('file_name1.mat', 'dummy file 1') + tag1 = Tag("file_name1.mat", "dummy file 1") tag2 = Tag() tag1.append(tag2) - self.assertEqual(['file_name1.mat'], tag1.file_name) - self.assertEqual(['dummy file 1'], tag1.description) + self.assertEqual(["file_name1.mat"], tag1.file_name) + self.assertEqual(["dummy file 1"], tag1.description) tag1 = Tag() - tag2 = Tag('file_name1.mat', 'dummy file 1') + tag2 = Tag("file_name1.mat", "dummy file 1") tag1.append(tag2) - self.assertEqual(['file_name1.mat'], tag1.file_name) - self.assertEqual(['dummy file 1'], tag1.description) + self.assertEqual(["file_name1.mat"], tag1.file_name) + self.assertEqual(["dummy file 1"], tag1.description) + # Execute Tests if __name__ == "__main__": diff --git a/climada/entity/test/test_entity.py b/climada/entity/test/test_entity.py index 46e712c57..7805a24e7 100644 --- a/climada/entity/test/test_entity.py +++ b/climada/entity/test/test_entity.py @@ -18,18 +18,20 @@ Test Entity class. """ + import unittest + import numpy as np from climada import CONFIG +from climada.entity.disc_rates.base import DiscRates from climada.entity.entity_def import Entity from climada.entity.exposures.base import Exposures -from climada.entity.disc_rates.base import DiscRates from climada.entity.impact_funcs.impact_func_set import ImpactFuncSet from climada.entity.measures.measure_set import MeasureSet from climada.util.constants import ENT_TEMPLATE_XLS -ENT_TEST_MAT = CONFIG.exposures.test_data.dir().joinpath('demo_today.mat') +ENT_TEST_MAT = CONFIG.exposures.test_data.dir().joinpath("demo_today.mat") class TestReader(unittest.TestCase): @@ -41,12 +43,12 @@ def test_default_pass(self): def_entity = Entity.from_excel(ENT_TEMPLATE_XLS) # Check default demo excel file has been loaded - self.assertEqual(len(def_entity.exposures.gdf['deductible']), 24) - self.assertEqual(def_entity.exposures.gdf['value'][2], 12596064143.542929) + self.assertEqual(len(def_entity.exposures.gdf["deductible"]), 24) + self.assertEqual(def_entity.exposures.gdf["value"][2], 12596064143.542929) - self.assertEqual(len(def_entity.impact_funcs.get_func('TC', 1).mdd), 25) + self.assertEqual(len(def_entity.impact_funcs.get_func("TC", 1).mdd), 25) - self.assertIn('risk transfer', def_entity.measures.get_names('TC')) + self.assertIn("risk transfer", def_entity.measures.get_names("TC")) self.assertEqual(def_entity.disc_rates.years[5], 2005) @@ -70,27 +72,27 @@ class TestCheck(unittest.TestCase): def test_wrongMeas_fail(self): """Wrong measures""" ent = Entity.from_excel(ENT_TEMPLATE_XLS) - actions = ent.measures.get_measure('TC') + actions = ent.measures.get_measure("TC") actions[0].color_rgb = np.array([1, 2]) with self.assertRaises(ValueError) as cm: ent.check() - self.assertIn('Measure.color_rgb', str(cm.exception)) + self.assertIn("Measure.color_rgb", str(cm.exception)) with self.assertRaises(ValueError) as cm: ent.measures = Exposures() - self.assertIn('MeasureSet', str(cm.exception)) + self.assertIn("MeasureSet", str(cm.exception)) def test_wrongImpFun_fail(self): """Wrong impact functions""" ent = Entity.from_excel(ENT_TEMPLATE_XLS) - ent.impact_funcs.get_func('TC', 1).paa = np.array([1, 2]) + ent.impact_funcs.get_func("TC", 1).paa = np.array([1, 2]) with self.assertRaises(ValueError) as cm: ent.check() - self.assertIn('ImpactFunc.paa', str(cm.exception)) + self.assertIn("ImpactFunc.paa", str(cm.exception)) with self.assertRaises(ValueError) as cm: ent.impact_funcs = Exposures() - self.assertIn('ImpactFuncSet', str(cm.exception)) + self.assertIn("ImpactFuncSet", str(cm.exception)) def test_wrongDisc_fail(self): """Wrong discount rates""" @@ -98,11 +100,11 @@ def test_wrongDisc_fail(self): ent.disc_rates.rates = np.array([1, 2]) with self.assertRaises(ValueError) as cm: ent.check() - self.assertIn('DiscRates.rates', str(cm.exception)) + self.assertIn("DiscRates.rates", str(cm.exception)) with self.assertRaises(ValueError) as cm: ent.disc_rates = Exposures() - self.assertIn('DiscRates', str(cm.exception)) + self.assertIn("DiscRates", str(cm.exception)) # Execute Tests diff --git a/climada/hazard/__init__.py b/climada/hazard/__init__.py index 8201c40fa..0231ec112 100755 --- a/climada/hazard/__init__.py +++ b/climada/hazard/__init__.py @@ -18,8 +18,9 @@ init hazard """ -from .centroids import * + from .base import * -from .trop_cyclone import * -from .tc_tracks import * +from .centroids import * from .storm_europe import * +from .tc_tracks import * +from .trop_cyclone import * diff --git a/climada/hazard/base.py b/climada/hazard/base.py index 877a22f2d..f8d379315 100644 --- a/climada/hazard/base.py +++ b/climada/hazard/base.py @@ -19,28 +19,27 @@ Define Hazard. """ -__all__ = ['Hazard'] +__all__ = ["Hazard"] import copy import datetime as dt import logging -from typing import Optional,List import warnings +from typing import List, Optional import geopandas as gpd import numpy as np from pathos.pools import ProcessPool as Pool from scipy import sparse -from climada import CONFIG -from climada.hazard.plot import HazardPlot -from climada.hazard.io import HazardIO -from climada.hazard.centroids.centr import Centroids import climada.util.checker as u_check import climada.util.constants as u_const import climada.util.coordinates as u_coord import climada.util.dates_times as u_dt - +from climada import CONFIG +from climada.hazard.centroids.centr import Centroids +from climada.hazard.io import HazardIO +from climada.hazard.plot import HazardPlot LOGGER = logging.getLogger(__name__) @@ -98,26 +97,24 @@ class Hazard(HazardIO, HazardPlot): If empty (all 0), it is ignored in the impact computations (i.e., is equivalent to fraction is 1 everywhere). """ + intensity_thres = 10 """Intensity threshold per hazard used to filter lower intensities. To be set for every hazard type""" - vars_oblig = {'units', - 'centroids', - 'event_id', - 'frequency', - 'intensity', - 'fraction' - } + vars_oblig = { + "units", + "centroids", + "event_id", + "frequency", + "intensity", + "fraction", + } """Name of the variables needed to compute the impact. Types: scalar, str, list, 1dim np.array of size num_events, scipy.sparse matrix of shape num_events x num_centroids, Centroids.""" - vars_def = {'date', - 'orig', - 'event_name', - 'frequency_unit' - } + vars_def = {"date", "orig", "event_name", "frequency_unit"} """Name of the variables used in impact calculation whose value is descriptive and can therefore be set with default values. Types: scalar, string, list, 1dim np.array of size num_events. @@ -127,19 +124,21 @@ class Hazard(HazardIO, HazardPlot): """Name of the variables that aren't need to compute the impact. Types: scalar, string, list, 1dim np.array of size num_events.""" - def __init__(self, - haz_type: str = "", - pool: Optional[Pool] = None, - units: str = "", - centroids: Optional[Centroids] = None, - event_id: Optional[np.ndarray] = None, - frequency: Optional[np.ndarray] = None, - frequency_unit: str = u_const.DEF_FREQ_UNIT, - event_name: Optional[List[str]] = None, - date: Optional[np.ndarray] = None, - orig: Optional[np.ndarray] = None, - intensity: Optional[sparse.csr_matrix] = None, - fraction: Optional[sparse.csr_matrix] = None): + def __init__( + self, + haz_type: str = "", + pool: Optional[Pool] = None, + units: str = "", + centroids: Optional[Centroids] = None, + event_id: Optional[np.ndarray] = None, + frequency: Optional[np.ndarray] = None, + frequency_unit: str = u_const.DEF_FREQ_UNIT, + event_name: Optional[List[str]] = None, + date: Optional[np.ndarray] = None, + orig: Optional[np.ndarray] = None, + intensity: Optional[sparse.csr_matrix] = None, + fraction: Optional[sparse.csr_matrix] = None, + ): """ Initialize values. @@ -187,25 +186,31 @@ def __init__(self, """ self.haz_type = haz_type self.units = units - self.centroids = centroids if centroids is not None else Centroids( - lat=np.empty(0), lon=np.empty(0)) + self.centroids = ( + centroids + if centroids is not None + else Centroids(lat=np.empty(0), lon=np.empty(0)) + ) # following values are defined for each event self.event_id = event_id if event_id is not None else np.array([], int) - self.frequency = frequency if frequency is not None else np.array( - [], float) + self.frequency = frequency if frequency is not None else np.array([], float) self.frequency_unit = frequency_unit self.event_name = event_name if event_name is not None else list() self.date = date if date is not None else np.array([], int) self.orig = orig if orig is not None else np.array([], bool) # following values are defined for each event and centroid - self.intensity = intensity if intensity is not None else sparse.csr_matrix( - np.empty((0, 0))) # events x centroids - self.fraction = fraction if fraction is not None else sparse.csr_matrix( - self.intensity.shape) # events x centroids + self.intensity = ( + intensity if intensity is not None else sparse.csr_matrix(np.empty((0, 0))) + ) # events x centroids + self.fraction = ( + fraction + if fraction is not None + else sparse.csr_matrix(self.intensity.shape) + ) # events x centroids self.pool = pool if self.pool: - LOGGER.info('Using %s CPUs.', self.pool.ncpus) + LOGGER.info("Using %s CPUs.", self.pool.ncpus) def check_matrices(self): """Ensure that matrices are consistently shaped and stored @@ -248,7 +253,7 @@ def get_default(cls, attribute): Any """ return { - 'frequency_unit': u_const.DEF_FREQ_UNIT, + "frequency_unit": u_const.DEF_FREQ_UNIT, }.get(attribute) def check(self): @@ -271,8 +276,16 @@ def reproject_vector(self, dst_crs): self.centroids.gdf.to_crs(dst_crs, inplace=True) self.check() - def select(self, event_names=None, event_id=None, date=None, orig=None, - reg_id=None, extent=None, reset_frequency=False): + def select( + self, + event_names=None, + event_id=None, + date=None, + orig=None, + reg_id=None, + extent=None, + reset_frequency=False, + ): """Select events matching provided criteria The frequency of events may need to be recomputed (see `reset_frequency`)! @@ -308,7 +321,7 @@ def select(self, event_names=None, event_id=None, date=None, orig=None, else: haz = self.__class__() - #filter events + # filter events sel_ev = np.ones(self.event_id.size, dtype=bool) # filter events by date @@ -319,14 +332,14 @@ def select(self, event_names=None, event_id=None, date=None, orig=None, date_end = u_dt.str_to_date(date[1]) sel_ev &= (date_ini <= self.date) & (self.date <= date_end) if not np.any(sel_ev): - LOGGER.info('No hazard in date range %s.', date) + LOGGER.info("No hazard in date range %s.", date) return None # filter events hist/synthetic if orig is not None: - sel_ev &= (self.orig.astype(bool) == orig) + sel_ev &= self.orig.astype(bool) == orig if not np.any(sel_ev): - LOGGER.info('No hazard with %s original events.', str(orig)) + LOGGER.info("No hazard with %s original events.", str(orig)) return None # filter events based on name @@ -337,38 +350,43 @@ def select(self, event_names=None, event_id=None, date=None, orig=None, new_sel = [filtered_events.index(n) for n in event_names] except ValueError as err: name = str(err).replace(" is not in list", "") - LOGGER.info('No hazard with name %s', name) + LOGGER.info("No hazard with name %s", name) return None sel_ev = sel_ev[new_sel] # filter events based on id if event_id is not None: # preserves order of event_id - sel_ev = np.array([ - np.argwhere(self.event_id == n)[0,0] - for n in event_id - if n in self.event_id[sel_ev] - ]) + sel_ev = np.array( + [ + np.argwhere(self.event_id == n)[0, 0] + for n in event_id + if n in self.event_id[sel_ev] + ] + ) # filter centroids sel_cen = self.centroids.select_mask(reg_id=reg_id, extent=extent) if not np.any(sel_cen): - LOGGER.info('No hazard centroids within extent and region') + LOGGER.info("No hazard centroids within extent and region") return None # Sanitize fraction, because we check non-zero entries later self.fraction.eliminate_zeros() # Perform attribute selection - for (var_name, var_val) in self.__dict__.items(): - if isinstance(var_val, np.ndarray) and var_val.ndim == 1 \ - and var_val.size > 0: + for var_name, var_val in self.__dict__.items(): + if ( + isinstance(var_val, np.ndarray) + and var_val.ndim == 1 + and var_val.size > 0 + ): setattr(haz, var_name, var_val[sel_ev]) elif isinstance(var_val, sparse.csr_matrix): setattr(haz, var_name, var_val[sel_ev, :][:, sel_cen]) elif isinstance(var_val, list) and var_val: setattr(haz, var_name, [var_val[idx] for idx in sel_ev]) - elif var_name == 'centroids': + elif var_name == "centroids": if reg_id is None and extent is None: new_cent = var_val else: @@ -379,15 +397,28 @@ def select(self, event_names=None, event_id=None, date=None, orig=None, # reset frequency if date span has changed (optional): if reset_frequency: - if self.frequency_unit not in ['1/year', 'annual', '1/y', '1/a']: - LOGGER.warning("Resetting the frequency is based on the calendar year of given" + if self.frequency_unit not in ["1/year", "annual", "1/y", "1/a"]: + LOGGER.warning( + "Resetting the frequency is based on the calendar year of given" " dates but the frequency unit here is %s. Consider setting the frequency" " manually for the selection or changing the frequency unit to %s.", - self.frequency_unit, u_const.DEF_FREQ_UNIT) - year_span_old = np.abs(dt.datetime.fromordinal(self.date.max()).year - - dt.datetime.fromordinal(self.date.min()).year) + 1 - year_span_new = np.abs(dt.datetime.fromordinal(haz.date.max()).year - - dt.datetime.fromordinal(haz.date.min()).year) + 1 + self.frequency_unit, + u_const.DEF_FREQ_UNIT, + ) + year_span_old = ( + np.abs( + dt.datetime.fromordinal(self.date.max()).year + - dt.datetime.fromordinal(self.date.min()).year + ) + + 1 + ) + year_span_new = ( + np.abs( + dt.datetime.fromordinal(haz.date.max()).year + - dt.datetime.fromordinal(haz.date.min()).year + ) + + 1 + ) haz.frequency = haz.frequency * year_span_old / year_span_new # Check if new fraction is zero everywhere @@ -405,8 +436,11 @@ def select(self, event_names=None, event_id=None, date=None, orig=None, haz.sanitize_event_ids() return haz - def select_tight(self, buffer=u_coord.NEAREST_NEIGHBOR_THRESHOLD / u_const.ONE_LAT_KM, - val='intensity'): + def select_tight( + self, + buffer=u_coord.NEAREST_NEIGHBOR_THRESHOLD / u_const.ONE_LAT_KM, + val="intensity", + ): """ Reduce hazard to those centroids spanning a minimal box which contains all non-zero intensity or fraction points. @@ -435,15 +469,17 @@ def select_tight(self, buffer=u_coord.NEAREST_NEIGHBOR_THRESHOLD / u_const.ONE_L """ - if val == 'intensity': + if val == "intensity": cent_nz = (self.intensity != 0).sum(axis=0).nonzero()[1] - if val == 'fraction': + if val == "fraction": cent_nz = (self.fraction != 0).sum(axis=0).nonzero()[1] lon_nz = self.centroids.lon[cent_nz] lat_nz = self.centroids.lat[cent_nz] - return self.select(extent=u_coord.toggle_extent_bounds( - u_coord.latlon_bounds(lat=lat_nz, lon=lon_nz, buffer=buffer) - )) + return self.select( + extent=u_coord.toggle_extent_bounds( + u_coord.latlon_bounds(lat=lat_nz, lon=lon_nz, buffer=buffer) + ) + ) def local_exceedance_inten(self, return_periods=(25, 50, 100, 250)): """Compute exceedance intensity map for given return periods. @@ -460,40 +496,49 @@ def local_exceedance_inten(self, return_periods=(25, 50, 100, 250)): # warn if return period is above return period of rarest event: for period in return_periods: if period > 1 / self.frequency.min(): - LOGGER.warning('Return period %1.1f exceeds max. event return period.', period) - LOGGER.info('Computing exceedance intenstiy map for return periods: %s', - return_periods) + LOGGER.warning( + "Return period %1.1f exceeds max. event return period.", period + ) + LOGGER.info( + "Computing exceedance intenstiy map for return periods: %s", return_periods + ) num_cen = self.intensity.shape[1] inten_stats = np.zeros((len(return_periods), num_cen)) cen_step = CONFIG.max_matrix_size.int() // self.intensity.shape[0] if not cen_step: - raise ValueError('Increase max_matrix_size configuration parameter to >' - f' {self.intensity.shape[0]}') + raise ValueError( + "Increase max_matrix_size configuration parameter to >" + f" {self.intensity.shape[0]}" + ) # separte in chunks chk = -1 for chk in range(int(num_cen / cen_step)): self._loc_return_inten( np.array(return_periods), - self.intensity[:, chk * cen_step:(chk + 1) * cen_step].toarray(), - inten_stats[:, chk * cen_step:(chk + 1) * cen_step]) + self.intensity[:, chk * cen_step : (chk + 1) * cen_step].toarray(), + inten_stats[:, chk * cen_step : (chk + 1) * cen_step], + ) self._loc_return_inten( np.array(return_periods), - self.intensity[:, (chk + 1) * cen_step:].toarray(), - inten_stats[:, (chk + 1) * cen_step:]) + self.intensity[:, (chk + 1) * cen_step :].toarray(), + inten_stats[:, (chk + 1) * cen_step :], + ) # set values below 0 to zero if minimum of hazard.intensity >= 0: if np.min(inten_stats) < 0 <= self.intensity.min(): - LOGGER.warning('Exceedance intenstiy values below 0 are set to 0. \ - Reason: no negative intensity values were found in hazard.') + LOGGER.warning( + "Exceedance intenstiy values below 0 are set to 0. \ + Reason: no negative intensity values were found in hazard." + ) inten_stats[inten_stats < 0] = 0 return inten_stats def sanitize_event_ids(self): """Make sure that event ids are unique""" if np.unique(self.event_id).size != self.event_id.size: - LOGGER.debug('Resetting event_id.') + LOGGER.debug("Resetting event_id.") self.event_id = np.arange(1, self.event_id.size + 1) - def local_return_period(self, threshold_intensities=(5., 10., 20.)): + def local_return_period(self, threshold_intensities=(5.0, 10.0, 20.0)): """Compute local return periods for given hazard intensities. The used method is fitting the ordered intensitites per centroid to the corresponding cummulated frequency with a step function. @@ -516,17 +561,20 @@ def local_return_period(self, threshold_intensities=(5., 10., 20.)): column_label : function Column-label-generating function, for reporting and plotting """ - #check frequency unit - if self.frequency_unit in ['1/year', 'annual', '1/y', '1/a']: - rp_unit = 'Years' - elif self.frequency_unit in ['1/month', 'monthly', '1/m']: - rp_unit = 'Months' - elif self.frequency_unit in ['1/week', 'weekly', '1/w']: - rp_unit = 'Weeks' + # check frequency unit + if self.frequency_unit in ["1/year", "annual", "1/y", "1/a"]: + rp_unit = "Years" + elif self.frequency_unit in ["1/month", "monthly", "1/m"]: + rp_unit = "Months" + elif self.frequency_unit in ["1/week", "weekly", "1/w"]: + rp_unit = "Weeks" else: - LOGGER.warning("Hazard's frequency unit %s is not known, " - "years will be used as return period unit.", self.frequency_unit) - rp_unit = 'Years' + LOGGER.warning( + "Hazard's frequency unit %s is not known, " + "years will be used as return period unit.", + self.frequency_unit, + ) + rp_unit = "Years" # Ensure threshold_intensities is a numpy array threshold_intensities = np.array(threshold_intensities) @@ -534,31 +582,34 @@ def local_return_period(self, threshold_intensities=(5., 10., 20.)): num_cen = self.intensity.shape[1] return_periods = np.zeros((len(threshold_intensities), num_cen)) - # batch_centroids = number of centroids that are handled in parallel: + # batch_centroids = number of centroids that are handled in parallel: # batch_centroids = maximal matrix size // number of events batch_centroids = CONFIG.max_matrix_size.int() // self.intensity.shape[0] if batch_centroids < 1: - raise ValueError('Increase max_matrix_size configuration parameter to >' - f'{self.intensity.shape[0]}') + raise ValueError( + "Increase max_matrix_size configuration parameter to >" + f"{self.intensity.shape[0]}" + ) # Process the intensities in chunks of centroids for start_col in range(0, num_cen, batch_centroids): end_col = min(start_col + batch_centroids, num_cen) return_periods[:, start_col:end_col] = self._loc_return_period( - threshold_intensities, - self.intensity[:, start_col:end_col].toarray() - ) + threshold_intensities, self.intensity[:, start_col:end_col].toarray() + ) # create the output GeoDataFrame - gdf = gpd.GeoDataFrame(geometry = self.centroids.gdf['geometry'], - crs = self.centroids.gdf.crs) - col_names = [f'{tresh_inten}' for tresh_inten in threshold_intensities] + gdf = gpd.GeoDataFrame( + geometry=self.centroids.gdf["geometry"], crs=self.centroids.gdf.crs + ) + col_names = [f"{tresh_inten}" for tresh_inten in threshold_intensities] gdf[col_names] = return_periods.T # create label and column_label - label = f'Return Periods ({rp_unit})' - column_label = lambda column_names: [f'Threshold Intensity: {col} {self.units}' - for col in column_names] + label = f"Return Periods ({rp_unit})" + column_label = lambda column_names: [ + f"Threshold Intensity: {col} {self.units}" for col in column_names + ] return gdf, label, column_label @@ -575,8 +626,13 @@ def get_event_id(self, event_name): ------- list_id: np.array(int) """ - list_id = self.event_id[[i_name for i_name, val_name in enumerate(self.event_name) - if val_name == event_name]] + list_id = self.event_id[ + [ + i_name + for i_name, val_name in enumerate(self.event_name) + if val_name == event_name + ] + ] if list_id.size == 0: raise ValueError(f"No event with name: {event_name}") return list_id @@ -598,8 +654,7 @@ def get_event_name(self, event_id): ValueError """ try: - return self.event_name[np.argwhere( - self.event_id == event_id)[0][0]] + return self.event_name[np.argwhere(self.event_id == event_id)[0][0]] except IndexError as err: raise ValueError(f"No event with id: {event_id}") from err @@ -622,7 +677,8 @@ def get_event_date(self, event=None): ev_ids = self.get_event_id(event) l_dates = [ u_dt.date_to_str(self.date[np.argwhere(self.event_id == ev_id)[0][0]]) - for ev_id in ev_ids] + for ev_id in ev_ids + ] else: ev_idx = np.argwhere(self.event_id == event)[0][0] l_dates = [u_dt.date_to_str(self.date[ev_idx])] @@ -637,8 +693,9 @@ def calc_year_set(self): key are years, values array with event_ids of that year """ - orig_year = np.array([dt.datetime.fromordinal(date).year - for date in self.date[self.orig]]) + orig_year = np.array( + [dt.datetime.fromordinal(date).year for date in self.date[self.orig]] + ) orig_yearset = {} for year in np.unique(orig_year): orig_yearset[year] = self.event_id[self.orig][orig_year == year] @@ -669,13 +726,19 @@ def set_frequency(self, yearrange=None): per event. If yearrange is not given (None), the year range is derived from self.date """ - if self.frequency_unit not in ['1/year', 'annual', '1/y', '1/a']: - LOGGER.warning("setting the frequency on a hazard object who's frequency unit" + if self.frequency_unit not in ["1/year", "annual", "1/y", "1/a"]: + LOGGER.warning( + "setting the frequency on a hazard object who's frequency unit" "is %s and not %s will most likely lead to unexpected results", - self.frequency_unit, u_const.DEF_FREQ_UNIT) + self.frequency_unit, + u_const.DEF_FREQ_UNIT, + ) if not yearrange: - delta_time = dt.datetime.fromordinal(int(np.max(self.date))).year - \ - dt.datetime.fromordinal(int(np.min(self.date))).year + 1 + delta_time = ( + dt.datetime.fromordinal(int(np.max(self.date))).year + - dt.datetime.fromordinal(int(np.min(self.date))).year + + 1 + ) else: delta_time = max(yearrange) - min(yearrange) + 1 num_orig = self.orig.nonzero()[0].size @@ -723,8 +786,11 @@ def _loc_return_inten(self, return_periods, inten, exc_inten): for cen_idx in range(inten.shape[1]): exc_inten[:, cen_idx] = self._cen_return_inten( - inten_sort[:, cen_idx], freq_sort[:, cen_idx], - self.intensity_thres, return_periods) + inten_sort[:, cen_idx], + freq_sort[:, cen_idx], + self.intensity_thres, + return_periods, + ) def _loc_return_period(self, threshold_intensities, inten): """Compute local return periods for user-specified threshold intensities @@ -757,7 +823,9 @@ def _loc_return_period(self, threshold_intensities, inten): for i, intensity in enumerate(threshold_intensities): # Find the first occurrence where the intensity is less than the sorted intensities - exceedance_index = np.searchsorted(sorted_inten_cen[::-1], intensity, side='left') + exceedance_index = np.searchsorted( + sorted_inten_cen[::-1], intensity, side="left" + ) # Calculate exceedance probability if exceedance_index < len(cum_freq_cen): @@ -786,15 +854,19 @@ def _check_events(self): if np.unique(self.event_id).size != num_ev: raise ValueError("There are events with the same identifier.") - u_check.check_obligatories(self.__dict__, self.vars_oblig, 'Hazard.', - num_ev, num_ev, num_cen) - u_check.check_optionals(self.__dict__, self.vars_opt, 'Hazard.', num_ev) - self.event_name = u_check.array_default(num_ev, self.event_name, - 'Hazard.event_name', list(self.event_id)) - self.date = u_check.array_default(num_ev, self.date, 'Hazard.date', - np.ones(self.event_id.shape, dtype=int)) - self.orig = u_check.array_default(num_ev, self.orig, 'Hazard.orig', - np.zeros(self.event_id.shape, dtype=bool)) + u_check.check_obligatories( + self.__dict__, self.vars_oblig, "Hazard.", num_ev, num_ev, num_cen + ) + u_check.check_optionals(self.__dict__, self.vars_opt, "Hazard.", num_ev) + self.event_name = u_check.array_default( + num_ev, self.event_name, "Hazard.event_name", list(self.event_id) + ) + self.date = u_check.array_default( + num_ev, self.date, "Hazard.date", np.ones(self.event_id.shape, dtype=int) + ) + self.orig = u_check.array_default( + num_ev, self.orig, "Hazard.orig", np.zeros(self.event_id.shape, dtype=bool) + ) if len(self._events_set()) != num_ev: raise ValueError("There are events with same date and name.") @@ -831,7 +903,7 @@ def _cen_return_inten(inten, freq, inten_th, return_periods): pol_coef = np.polyfit(np.log(freq_cen), inten_cen, deg=0) inten_fit = np.polyval(pol_coef, np.log(1 / return_periods)) wrong_inten = (return_periods > np.max(1 / freq_cen)) & np.isnan(inten_fit) - inten_fit[wrong_inten] = 0. + inten_fit[wrong_inten] = 0.0 return inten_fit @@ -881,36 +953,46 @@ def append(self, *others): haz._check_events() # check type, unit, and attribute consistency among hazards - haz_types = {haz.haz_type for haz in haz_list if haz.haz_type != ''} + haz_types = {haz.haz_type for haz in haz_list if haz.haz_type != ""} if len(haz_types) > 1: - raise ValueError(f"The given hazards are of different types: {haz_types}. " - "The hazards are incompatible and cannot be concatenated.") + raise ValueError( + f"The given hazards are of different types: {haz_types}. " + "The hazards are incompatible and cannot be concatenated." + ) self.haz_type = haz_types.pop() haz_classes = {type(haz) for haz in haz_list} if len(haz_classes) > 1: - raise TypeError(f"The given hazards are of different classes: {haz_classes}. " - "The hazards are incompatible and cannot be concatenated.") + raise TypeError( + f"The given hazards are of different classes: {haz_classes}. " + "The hazards are incompatible and cannot be concatenated." + ) freq_units = {haz.frequency_unit for haz in haz_list} if len(freq_units) > 1: - raise ValueError(f"The given hazards have different frequency units: {freq_units}. " - "The hazards are incompatible and cannot be concatenated.") + raise ValueError( + f"The given hazards have different frequency units: {freq_units}. " + "The hazards are incompatible and cannot be concatenated." + ) self.frequency_unit = freq_units.pop() - units = {haz.units for haz in haz_list if haz.units != ''} + units = {haz.units for haz in haz_list if haz.units != ""} if len(units) > 1: - raise ValueError(f"The given hazards use different units: {units}. " - "The hazards are incompatible and cannot be concatenated.") + raise ValueError( + f"The given hazards use different units: {units}. " + "The hazards are incompatible and cannot be concatenated." + ) if len(units) == 0: - units = {''} + units = {""} self.units = units.pop() attributes = sorted(set.union(*[set(vars(haz).keys()) for haz in haz_list])) for attr_name in attributes: if not all(hasattr(haz, attr_name) for haz in haz_list_nonempty): - raise ValueError(f"Attribute {attr_name} is not shared by all hazards. " - "The hazards are incompatible and cannot be concatenated.") + raise ValueError( + f"Attribute {attr_name} is not shared by all hazards. " + "The hazards are incompatible and cannot be concatenated." + ) # map individual centroids objects to union centroids = Centroids.union(*[haz.centroids for haz in haz_list]) @@ -924,14 +1006,25 @@ def append(self, *others): attr_val_list = [getattr(haz, attr_name) for haz in haz_list_nonempty] if isinstance(attr_val_list[0], sparse.csr_matrix): # map sparse matrix onto centroids - setattr(self, attr_name, sparse.vstack([ - sparse.csr_matrix( - (matrix.data, cent_idx[matrix.indices], matrix.indptr), - shape=(matrix.shape[0], centroids.size) - ) - for matrix, cent_idx in zip(attr_val_list, hazcent_in_cent_idx_list) - ], format='csr')) - elif isinstance(attr_val_list[0], np.ndarray) and attr_val_list[0].ndim == 1: + setattr( + self, + attr_name, + sparse.vstack( + [ + sparse.csr_matrix( + (matrix.data, cent_idx[matrix.indices], matrix.indptr), + shape=(matrix.shape[0], centroids.size), + ) + for matrix, cent_idx in zip( + attr_val_list, hazcent_in_cent_idx_list + ) + ], + format="csr", + ), + ) + elif ( + isinstance(attr_val_list[0], np.ndarray) and attr_val_list[0].ndim == 1 + ): setattr(self, attr_name, np.hstack(attr_val_list)) elif isinstance(attr_val_list[0], list): setattr(self, attr_name, sum(attr_val_list, [])) @@ -974,13 +1067,16 @@ def concat(cls, haz_list): """ if len(haz_list) == 0: return cls() - haz_concat = haz_list[0].__class__(centroids=Centroids(lat=[], lon=[], - crs=haz_list[0].centroids.crs)) + haz_concat = haz_list[0].__class__( + centroids=Centroids(lat=[], lon=[], crs=haz_list[0].centroids.crs) + ) for attr_name, attr_val in vars(haz_list[0]).items(): # to save memory, only copy simple attributes like # "units" that are not explicitly handled by Hazard.append - if not (isinstance(attr_val, (list, np.ndarray, sparse.csr_matrix)) - or attr_name in ["centroids"]): + if not ( + isinstance(attr_val, (list, np.ndarray, sparse.csr_matrix)) + or attr_name in ["centroids"] + ): setattr(haz_concat, attr_name, copy.deepcopy(attr_val)) haz_concat.append(*haz_list) return haz_concat @@ -1025,7 +1121,6 @@ def change_centroids(self, centroids, threshold=u_coord.NEAREST_NEIGHBOR_THRESHO haz_new_cent = copy.deepcopy(self) haz_new_cent.centroids = centroids - new_cent_idx = u_coord.match_coordinates( self.centroids.coord, centroids.coord, threshold=threshold ) @@ -1046,11 +1141,14 @@ def change_centroids(self, centroids, threshold=u_coord.NEAREST_NEIGHBOR_THRESHO # re-assign attributes intensity and fraction for attr_name in ["intensity", "fraction"]: matrix = getattr(self, attr_name) - setattr(haz_new_cent, attr_name, - sparse.csr_matrix( - (matrix.data, new_cent_idx[matrix.indices], matrix.indptr), - shape=(matrix.shape[0], centroids.size) - )) + setattr( + haz_new_cent, + attr_name, + sparse.csr_matrix( + (matrix.data, new_cent_idx[matrix.indices], matrix.indptr), + shape=(matrix.shape[0], centroids.size), + ), + ) return haz_new_cent @@ -1066,7 +1164,10 @@ def centr_exp_col(self): in an exposures gdf. E.g. "centr_TC" """ - from climada.entity.exposures import INDICATOR_CENTR # pylint: disable=import-outside-toplevel + from climada.entity.exposures import ( + INDICATOR_CENTR, # pylint: disable=import-outside-toplevel + ) + # import outside toplevel is necessary for it not being circular return INDICATOR_CENTR + self.haz_type @@ -1097,10 +1198,12 @@ def get_mdr(self, cent_idx, impf): if impf.calc_mdr(0) == 0: mdr.data = impf.calc_mdr(mdr.data) else: - LOGGER.warning("Impact function id=%d has mdr(0) != 0." + LOGGER.warning( + "Impact function id=%d has mdr(0) != 0." "The mean damage ratio must thus be computed for all values of" "hazard intensity including 0 which can be very time consuming.", - impf.id) + impf.id, + ) mdr_array = impf.calc_mdr(mdr.toarray().ravel()).reshape(mdr.shape) mdr = sparse.csr_matrix(mdr_array) mdr_out = mdr[:, indices] diff --git a/climada/hazard/centroids/__init__.py b/climada/hazard/centroids/__init__.py index f746df302..530f47958 100755 --- a/climada/hazard/centroids/__init__.py +++ b/climada/hazard/centroids/__init__.py @@ -18,4 +18,5 @@ init centroids """ + from .centr import * diff --git a/climada/hazard/centroids/centr.py b/climada/hazard/centroids/centr.py index df57fbdc3..c1e8bb68b 100644 --- a/climada/hazard/centroids/centr.py +++ b/climada/hazard/centroids/centr.py @@ -20,43 +20,38 @@ """ import copy -from deprecation import deprecated import logging +import warnings from pathlib import Path from typing import Any, Literal, Union -import warnings -import h5py import cartopy import cartopy.crs as ccrs import cartopy.feature as cfeature import geopandas as gpd +import h5py import matplotlib.pyplot as plt import numpy as np import pandas as pd -from pyproj.crs.crs import CRS import rasterio +from deprecation import deprecated +from pyproj.crs.crs import CRS from shapely.geometry.point import Point -from climada.util.constants import DEF_CRS import climada.util.coordinates as u_coord +from climada.util.constants import DEF_CRS -__all__ = ['Centroids'] +__all__ = ["Centroids"] -PROJ_CEA = CRS.from_user_input({'proj': 'cea'}) +PROJ_CEA = CRS.from_user_input({"proj": "cea"}) LOGGER = logging.getLogger(__name__) -DEF_SHEET_NAME = 'centroids' -EXP_SPECIFIC_COLS = [ - 'value', - 'impf_', - 'centr_', - 'cover', - 'deductible' -] +DEF_SHEET_NAME = "centroids" +EXP_SPECIFIC_COLS = ["value", "impf_", "centr_", "cover", "deductible"] + -class Centroids(): +class Centroids: """Contains vector centroids as a GeoDataFrame Attributes @@ -106,52 +101,52 @@ def __init__( self.gdf = gpd.GeoDataFrame( data={ - 'geometry': gpd.points_from_xy(lon, lat, crs=crs), - 'region_id': region_id, - 'on_land': on_land, + "geometry": gpd.points_from_xy(lon, lat, crs=crs), + "region_id": region_id, + "on_land": on_land, **kwargs, } ) if isinstance(region_id, str): - LOGGER.info('Setting region id to %s level.', region_id) + LOGGER.info("Setting region id to %s level.", region_id) self.set_region_id(level=region_id, overwrite=True) if isinstance(on_land, str): - LOGGER.info('Setting on land from %s source.', on_land) + LOGGER.info("Setting on land from %s source.", on_land) self.set_on_land(source=on_land, overwrite=True) @property def lat(self): - """ Return latitudes """ + """Return latitudes""" return self.gdf.geometry.y.values @property def lon(self): - """ Return longitudes """ + """Return longitudes""" return self.gdf.geometry.x.values @property def geometry(self): - """ Return the geometry """ - return self.gdf['geometry'] + """Return the geometry""" + return self.gdf["geometry"] @property def on_land(self): - """ Get the on_land property """ - if self.gdf['on_land'].isna().all(): + """Get the on_land property""" + if self.gdf["on_land"].isna().all(): return None - return self.gdf['on_land'].values + return self.gdf["on_land"].values @property def region_id(self): - """ Get the assigned region_id """ - if self.gdf['region_id'].isna().all(): + """Get the assigned region_id""" + if self.gdf["region_id"].isna().all(): return None - return self.gdf['region_id'].values + return self.gdf["region_id"].values @property def crs(self): - """ Get the crs""" + """Get the crs""" return self.gdf.crs @property @@ -175,7 +170,7 @@ def coord(self): return np.stack([self.lat, self.lon], axis=1) def __eq__(self, other): - """ dunder method for Centroids comparison. + """dunder method for Centroids comparison. returns True if two centroids equal, False otherwise Parameters @@ -214,7 +209,7 @@ def to_default_crs(self, inplace=True): return self.to_crs(DEF_CRS, inplace=inplace) def to_crs(self, crs, inplace=False): - """ Project the current centroids to the desired crs + """Project the current centroids to the desired crs Parameters ---------- @@ -253,15 +248,15 @@ def from_geodataframe(cls, gdf): ------ ValueError """ - if (gdf.geom_type != 'Point').any(): + if (gdf.geom_type != "Point").any(): raise ValueError( - 'The inpute geodataframe contains geometries that are not points.' + "The inpute geodataframe contains geometries that are not points." ) # Don't forget to make a copy!! # This is a bit ugly, but avoids to recompute the geometries # in the init. For large datasets this saves computation time - centroids = cls(lat=[1], lon=[1]) #make "empty" centroids + centroids = cls(lat=[1], lon=[1]) # make "empty" centroids centroids.gdf = gdf.copy(deep=True) if gdf.crs is None: centroids.gdf.set_crs(DEF_CRS, inplace=True) @@ -290,22 +285,23 @@ def from_exposures(cls, exposures): ValueError """ col_names = [ - column for column in exposures.gdf.columns + column + for column in exposures.gdf.columns if not any(pattern in column for pattern in EXP_SPECIFIC_COLS) ] # Legacy behaviour # Exposures can be without geometry column - #TODO: remove once exposures is real geodataframe with geometry. - if 'geometry' in exposures.gdf.columns: + # TODO: remove once exposures is real geodataframe with geometry. + if "geometry" in exposures.gdf.columns: gdf = exposures.gdf[col_names] return cls.from_geodataframe(gdf) - if 'latitude' in exposures.gdf.columns and 'longitude' in exposures.gdf.columns: + if "latitude" in exposures.gdf.columns and "longitude" in exposures.gdf.columns: gdf = exposures.gdf[col_names] return cls( - lat=exposures.gdf['latitude'], - lon=exposures.gdf['longitude'], + lat=exposures.gdf["latitude"], + lon=exposures.gdf["longitude"], crs=exposures.crs, **dict(gdf.items()), ) @@ -337,13 +333,17 @@ def from_pnt_bounds(cls, points_bounds, res, crs=DEF_CRS): ------- Centroids """ - height, width, transform = u_coord.pts_to_raster_meta(points_bounds, (res, -res)) - return cls.from_meta({ - "crs": crs, - "width": width, - "height": height, - "transform": transform, - }) + height, width, transform = u_coord.pts_to_raster_meta( + points_bounds, (res, -res) + ) + return cls.from_meta( + { + "crs": crs, + "width": width, + "height": height, + "transform": transform, + } + ) def append(self, centr): """Append Centroids @@ -435,15 +435,16 @@ def select(self, reg_id=None, extent=None, sel_cen=None): Sub-selection of this object """ sel_cen_bool = sel_cen - if sel_cen is not None and sel_cen.dtype.kind == 'i': + if sel_cen is not None and sel_cen.dtype.kind == "i": # if needed, convert indices to bool sel_cen_bool = np.zeros(self.size, dtype=bool) sel_cen_bool[np.unique(sel_cen)] = True - sel_cen_mask = self.select_mask(sel_cen=sel_cen_bool, reg_id=reg_id, extent=extent) + sel_cen_mask = self.select_mask( + sel_cen=sel_cen_bool, reg_id=reg_id, extent=extent + ) return Centroids.from_geodataframe(self.gdf.iloc[sel_cen_mask]) - def select_mask(self, sel_cen=None, reg_id=None, extent=None): """Create mask of selected centroids @@ -473,10 +474,13 @@ def select_mask(self, sel_cen=None, reg_id=None, extent=None): lon_min, lon_max, lat_min, lat_max = extent lon_max += 360 if lon_min > lon_max else 0 lon_normalized = u_coord.lon_normalize( - self.lon.copy(), center=0.5 * (lon_min + lon_max)) + self.lon.copy(), center=0.5 * (lon_min + lon_max) + ) sel_cen &= ( - (lon_normalized >= lon_min) & (lon_normalized <= lon_max) & - (self.lat >= lat_min) & (self.lat <= lat_max) + (lon_normalized >= lon_min) + & (lon_normalized <= lon_max) + & (self.lat >= lat_min) + & (self.lat <= lat_max) ) return sel_cen @@ -500,7 +504,9 @@ def plot(self, *, axis=None, figsize=(9, 13), **kwargs): ax : cartopy.mpl.geoaxes.GeoAxes instance """ if axis == None: - fig, axis = plt.subplots(figsize=figsize, subplot_kw={"projection": ccrs.PlateCarree()}) + fig, axis = plt.subplots( + figsize=figsize, subplot_kw={"projection": ccrs.PlateCarree()} + ) if type(axis) != cartopy.mpl.geoaxes.GeoAxes: raise AttributeError( f"The axis provided is of type: {type(axis)} " @@ -518,7 +524,7 @@ def plot(self, *, axis=None, figsize=(9, 13), **kwargs): self.gdf.plot(ax=axis, transform=ccrs.PlateCarree(), **kwargs) return axis - def set_region_id(self, level='country', overwrite=False): + def set_region_id(self, level="country", overwrite=False): """Set region_id as country ISO numeric code attribute for every pixel or point. Parameters @@ -531,18 +537,19 @@ def set_region_id(self, level='country', overwrite=False): only if region_id is missing (None). Default: False """ if overwrite or self.region_id is None: - LOGGER.debug('Setting region_id %s points.', str(self.size)) - if level == 'country': + LOGGER.debug("Setting region_id %s points.", str(self.size)) + if level == "country": ne_geom = self._ne_crs_geom() - self.gdf['region_id'] = u_coord.get_country_code( - ne_geom.y.values, ne_geom.x.values, + self.gdf["region_id"] = u_coord.get_country_code( + ne_geom.y.values, + ne_geom.x.values, ) else: raise NotImplementedError( - 'The region id can only be assigned for countries so far' + "The region id can only be assigned for countries so far" ) - def set_on_land(self, source='natural_earth', overwrite=False): + def set_on_land(self, source="natural_earth", overwrite=False): """Set on_land attribute for every pixel or point. Parameters @@ -556,15 +563,15 @@ def set_on_land(self, source='natural_earth', overwrite=False): only if on_land is missing (None). Default: False """ if overwrite or self.on_land is None: - LOGGER.debug('Setting on_land %s points.', str(self.lat.size)) - if source=='natural_earth': + LOGGER.debug("Setting on_land %s points.", str(self.lat.size)) + if source == "natural_earth": ne_geom = self._ne_crs_geom() - self.gdf['on_land'] = u_coord.coord_on_land( + self.gdf["on_land"] = u_coord.coord_on_land( ne_geom.y.values, ne_geom.x.values ) else: raise NotImplementedError( - 'The on land variables can only be automatically assigned using natural earth.' + "The on land variables can only be automatically assigned using natural earth." ) def get_pixel_shapes(self, res=None, **kwargs): @@ -596,8 +603,10 @@ def get_pixel_shapes(self, res=None, **kwargs): return geom.buffer( # resolution=1, cap_style=3: squared buffers # https://shapely.readthedocs.io/en/latest/manual.html#object.buffer - distance=res / 2, resolution=1, cap_style=3, - # reset CRS (see above) + distance=res / 2, + resolution=1, + cap_style=3, + # reset CRS (see above) ).set_crs(self.crs) def get_area_pixel(self, min_resol=1.0e-8): @@ -616,10 +625,10 @@ def get_area_pixel(self, min_resol=1.0e-8): areapixels : np.array Area of each pixel in square meters. """ - LOGGER.debug('Computing pixel area for %d centroids.', self.size) + LOGGER.debug("Computing pixel area for %d centroids.", self.size) xy_pixels = self.get_pixel_shapes(min_resol=min_resol) if PROJ_CEA != xy_pixels.crs: - xy_pixels = xy_pixels.to_crs(crs={'proj': 'cea'}) + xy_pixels = xy_pixels.to_crs(crs={"proj": "cea"}) return xy_pixels.area.values def get_closest_point(self, x_lon, y_lat): @@ -689,7 +698,10 @@ def get_dist_coast(self, signed=False, precomputed=True): ) ne_geom = self._ne_crs_geom() return u_coord.dist_to_coast_nasa( - ne_geom.y.values, ne_geom.x.values, highres=True, signed=signed, + ne_geom.y.values, + ne_geom.x.values, + highres=True, + signed=signed, ) def get_meta(self, resolution=None): @@ -718,10 +730,10 @@ def get_meta(self, resolution=None): (resolution, -resolution), ) meta = { - 'crs': self.crs, - 'height': rows, - 'width': cols, - 'transform': ras_trans, + "crs": self.crs, + "height": rows, + "width": cols, + "transform": ras_trans, } return meta @@ -730,9 +742,19 @@ def get_meta(self, resolution=None): ## @classmethod - def from_raster_file(cls, file_name, src_crs=None, window=None, geometry=None, - dst_crs=None, transform=None, width=None, height=None, - resampling=rasterio.warp.Resampling.nearest, return_meta=False): + def from_raster_file( + cls, + file_name, + src_crs=None, + window=None, + geometry=None, + dst_crs=None, + transform=None, + width=None, + height=None, + resampling=rasterio.warp.Resampling.nearest, + return_meta=False, + ): """Create a new Centroids object from a raster file Select region using window or geometry. Reproject input by providing @@ -770,8 +792,16 @@ def from_raster_file(cls, file_name, src_crs=None, window=None, geometry=None, Raster meta (height, width, transform, crs). """ meta, _ = u_coord.read_raster( - file_name, [1], src_crs, window, geometry, dst_crs, - transform, width, height, resampling, + file_name, + [1], + src_crs, + window, + geometry, + dst_crs, + transform, + width, + height, + resampling, ) centr = cls.from_meta(meta) return (centr, meta) if return_meta else centr @@ -790,7 +820,7 @@ def from_meta(cls, meta): Centroid Centroids initialized for raster described by meta. """ - crs = meta['crs'] + crs = meta["crs"] lat, lon = _meta_to_lat_lon(meta) return cls(lon=lon, lat=lat, crs=crs) @@ -845,11 +875,10 @@ def write_csv(self, file_path): file_path : str, Path absolute or relative file path and name to write to """ - file_path = Path(file_path).with_suffix('.csv') - LOGGER.info('Writing %s', file_path) + file_path = Path(file_path).with_suffix(".csv") + LOGGER.info("Writing %s", file_path) self._centroids_to_dataframe().to_csv(file_path, index=False) - @classmethod def from_excel(cls, file_path, sheet_name=None): """Generate a new centroids object from an excel file with column names in var_names. @@ -868,7 +897,7 @@ def from_excel(cls, file_path, sheet_name=None): Centroids with data from the given excel file """ if sheet_name is None: - sheet_name = 'centroids' + sheet_name = "centroids" df = pd.read_excel(file_path, sheet_name) return cls._from_dataframe(df) @@ -880,13 +909,15 @@ def write_excel(self, file_path): file_path : str, Path absolute or relative file path and name to write to """ - file_path = Path(file_path).with_suffix('.xlsx') - LOGGER.info('Writing %s', file_path) + file_path = Path(file_path).with_suffix(".xlsx") + LOGGER.info("Writing %s", file_path) self._centroids_to_dataframe().to_excel( - file_path, sheet_name=DEF_SHEET_NAME, index=False, + file_path, + sheet_name=DEF_SHEET_NAME, + index=False, ) - def write_hdf5(self, file_name, mode='w'): + def write_hdf5(self, file_name, mode="w"): """Write data frame and metadata in hdf5 format Parameters @@ -894,7 +925,7 @@ def write_hdf5(self, file_name, mode='w'): file_name : str (path and) file name to write to. """ - LOGGER.info('Writing %s', file_name) + LOGGER.info("Writing %s", file_name) store = pd.HDFStore(file_name, mode=mode) pandas_df = pd.DataFrame(self.gdf) for col in pandas_df.columns: @@ -905,15 +936,14 @@ def write_hdf5(self, file_name, mode='w'): with warnings.catch_warnings(): warnings.simplefilter("ignore", category=pd.errors.PerformanceWarning) # Write dataframe - store.put('centroids', pandas_df) + store.put("centroids", pandas_df) - store.get_storer('centroids').attrs.metadata = { - 'crs': CRS.from_user_input(self.crs).to_wkt() + store.get_storer("centroids").attrs.metadata = { + "crs": CRS.from_user_input(self.crs).to_wkt() } store.close() - @classmethod def from_hdf5(cls, file_name): """Create a centroids object from a HDF5 file. @@ -935,17 +965,17 @@ def from_hdf5(cls, file_name): if not Path(file_name).is_file(): raise FileNotFoundError(str(file_name)) try: - with pd.HDFStore(file_name, mode='r') as store: - metadata = store.get_storer('centroids').attrs.metadata + with pd.HDFStore(file_name, mode="r") as store: + metadata = store.get_storer("centroids").attrs.metadata # in previous versions of CLIMADA and/or geopandas, # the CRS was stored in '_crs'/'crs' - crs = metadata.get('crs') - gdf = gpd.GeoDataFrame(store['centroids'], crs=crs) + crs = metadata.get("crs") + gdf = gpd.GeoDataFrame(store["centroids"], crs=crs) except TypeError: - with h5py.File(file_name, 'r') as data: - gdf = cls._gdf_from_legacy_hdf5(data.get('centroids')) + with h5py.File(file_name, "r") as data: + gdf = cls._gdf_from_legacy_hdf5(data.get("centroids")) except KeyError: - with h5py.File(file_name, 'r') as data: + with h5py.File(file_name, "r") as data: gdf = cls._gdf_from_legacy_hdf5(data) return cls.from_geodataframe(gdf) @@ -956,37 +986,37 @@ def from_hdf5(cls, file_name): @classmethod def _from_dataframe(cls, df): - if 'crs' in df.columns: - crs = df['crs'].iloc[0] + if "crs" in df.columns: + crs = df["crs"].iloc[0] else: - LOGGER.info("No 'crs' column provided in file, setting CRS to WGS84 default.") + LOGGER.info( + "No 'crs' column provided in file, setting CRS to WGS84 default." + ) crs = DEF_CRS extra_values = { - col: df[col] - for col in df.columns - if col not in ['lat', 'lon', 'crs'] + col: df[col] for col in df.columns if col not in ["lat", "lon", "crs"] } - return cls(lat=df['lat'], lon=df['lon'], **extra_values, crs=crs) + return cls(lat=df["lat"], lon=df["lon"], **extra_values, crs=crs) @staticmethod def _gdf_from_legacy_hdf5(data): crs = DEF_CRS - if data.get('crs'): - crs = u_coord.to_crs_user_input(data.get('crs')[0]) - if data.get('lat') and data.get('lat').size: - latitude = np.array(data.get('lat')) - longitude = np.array(data.get('lon')) - elif data.get('latitude') and data.get('latitude').size: - latitude = np.array(data.get('latitude')) - longitude = np.array(data.get('longitude')) + if data.get("crs"): + crs = u_coord.to_crs_user_input(data.get("crs")[0]) + if data.get("lat") and data.get("lat").size: + latitude = np.array(data.get("lat")) + longitude = np.array(data.get("lon")) + elif data.get("latitude") and data.get("latitude").size: + latitude = np.array(data.get("latitude")) + longitude = np.array(data.get("longitude")) else: - centr_meta = data.get('meta') + centr_meta = data.get("meta") meta = dict() - meta['crs'] = crs + meta["crs"] = crs for key, value in centr_meta.items(): - if key != 'transform': + if key != "transform": meta[key] = value[0] else: meta[key] = rasterio.Affine(*value) @@ -994,9 +1024,9 @@ def _gdf_from_legacy_hdf5(data): extra_values = {} for centr_name in data.keys(): - if centr_name not in ('crs', 'lat', 'lon', 'meta', 'latitude', 'longitude'): + if centr_name not in ("crs", "lat", "lon", "meta", "latitude", "longitude"): values = np.array(data.get(centr_name)) - if latitude.size != 0 and values.size != 0 : + if latitude.size != 0 and values.size != 0: extra_values[centr_name] = values return gpd.GeoDataFrame( @@ -1006,10 +1036,10 @@ def _gdf_from_legacy_hdf5(data): @classmethod def _legacy_from_excel(cls, file_name, var_names): - LOGGER.info('Reading %s', file_name) + LOGGER.info("Reading %s", file_name) try: - df = pd.read_excel(file_name, var_names['sheet_name']) - df = df.rename(columns=var_names['col_name']) + df = pd.read_excel(file_name, var_names["sheet_name"]) + df = df.rename(columns=var_names["col_name"]) except KeyError as err: raise KeyError("Not existing variable: %s" % str(err)) from err return cls._from_dataframe(df) @@ -1023,10 +1053,10 @@ def _centroids_to_dataframe(self): df : DataFrame """ df = pd.DataFrame(self.gdf) - df['lon'] = self.gdf['geometry'].x - df['lat'] = self.gdf['geometry'].y - df['crs'] = CRS.from_user_input(self.crs).to_wkt() - df = df.drop(['geometry'], axis=1) + df["lon"] = self.gdf["geometry"].x + df["lat"] = self.gdf["geometry"].y + df["crs"] = CRS.from_user_input(self.crs).to_wkt() + df = df.drop(["geometry"], axis=1) return df def _ne_crs_geom(self): @@ -1045,57 +1075,77 @@ def _ne_crs_geom(self): ## @classmethod - @deprecated(details="Reading Centroids data from matlab files is not supported anymore." - "This method has been removed with climada 5.0") + @deprecated( + details="Reading Centroids data from matlab files is not supported anymore." + "This method has been removed with climada 5.0" + ) def from_mat(cls, file_name, var_names=None): """Reading Centroids data from matlab files is not supported anymore. This method has been removed with climada 5.0""" - raise NotImplementedError("You are suggested to use an old version of climada (<=4.*) and" - " convert the file to hdf5 format.") + raise NotImplementedError( + "You are suggested to use an old version of climada (<=4.*) and" + " convert the file to hdf5 format." + ) @staticmethod @deprecated(details="This method has been removed with climada 5.0") def from_base_grid(land=False, res_as=360, base_file=None): """This method has been removed with climada 5.0""" - raise NotImplementedError("Create the Centroids from a custom base file or from Natural" - " Earth (files are available in Climada, look up ``climada.util" - ".constants.NATEARTH_CENTROIDS`` for their location)") + raise NotImplementedError( + "Create the Centroids from a custom base file or from Natural" + " Earth (files are available in Climada, look up ``climada.util" + ".constants.NATEARTH_CENTROIDS`` for their location)" + ) @classmethod - @deprecated(details="This method will be removed in a future version." - " Simply use the constructor instead.") + @deprecated( + details="This method will be removed in a future version." + " Simply use the constructor instead." + ) def from_lat_lon(cls, lat, lon, crs="EPSG:4326"): """deprecated, use the constructor instead""" return cls(lat=lat, lon=lon, crs=crs) - @deprecated(details="This method is futile and will be removed in a future version." - " `Centroids.get_area_pixel` can be run without initialization.") + @deprecated( + details="This method is futile and will be removed in a future version." + " `Centroids.get_area_pixel` can be run without initialization." + ) def set_area_pixel(self, min_resol=1e-08, scheduler=None): """deprecated, obsolete""" - @deprecated(details="This method is futile and will be removed in a future version." - " `Centroids.get_area_pixel` can be run without initialization.") + @deprecated( + details="This method is futile and will be removed in a future version." + " `Centroids.get_area_pixel` can be run without initialization." + ) def set_area_approx(self, min_resol=1e-08): """deprecated, obsolete""" - @deprecated(details="This method is futile and will be removed in a future version." - " `Centroids.get_dist_coast` can be run without initialization.") + @deprecated( + details="This method is futile and will be removed in a future version." + " `Centroids.get_dist_coast` can be run without initialization." + ) def set_dist_coast(self, signed=False, precomputed=False, scheduler=None): """deprecated, obsolete""" - @deprecated(details="This method has no effect and will be removed in a future version." - " In the current version of climada the geometry points of a `Centroids` object" - " cannot be removed as they are the backbone of the Centroids' GeoDataFrame.") + @deprecated( + details="This method has no effect and will be removed in a future version." + " In the current version of climada the geometry points of a `Centroids` object" + " cannot be removed as they are the backbone of the Centroids' GeoDataFrame." + ) def empty_geometry_points(self): - """"deprecated, has no effect, which may be unexpected: no geometry points will be removed, + """ "deprecated, has no effect, which may be unexpected: no geometry points will be removed, the centroids' GeoDataFrame is built on them! """ - @deprecated(details="This method has no effect and will be removed in a future version.") + @deprecated( + details="This method has no effect and will be removed in a future version." + ) def set_meta_to_lat_lon(self): """deprecated, has no effect""" - @deprecated(details="This method has no effect and will be removed in a future version.") + @deprecated( + details="This method has no effect and will be removed in a future version." + ) def set_lat_lon_to_meta(self, min_resol=1e-08): """deprecated, has no effect""" @@ -1115,5 +1165,7 @@ def _meta_to_lat_lon(meta): longitudes : np.ndarray Longitudinal coordinates of pixel centers. """ - xgrid, ygrid = u_coord.raster_to_meshgrid(meta['transform'], meta['width'], meta['height']) + xgrid, ygrid = u_coord.raster_to_meshgrid( + meta["transform"], meta["width"], meta["height"] + ) return ygrid.ravel(), xgrid.ravel() diff --git a/climada/hazard/centroids/test/test_centr.py b/climada/hazard/centroids/test/test_centr.py index 745e544d5..a41060bae 100644 --- a/climada/hazard/centroids/test/test_centr.py +++ b/climada/hazard/centroids/test/test_centr.py @@ -18,63 +18,66 @@ Test CentroidsVector and CentroidsRaster classes. """ + +import itertools import unittest -from unittest.mock import patch from pathlib import Path +from unittest.mock import patch +import geopandas as gpd import numpy as np import pandas as pd -import geopandas as gpd -import shapely -import itertools import rasterio +import shapely +from cartopy.io import shapereader from pyproj.crs.crs import CRS +from rasterio import Affine from rasterio.windows import Window from shapely.geometry.point import Point -from cartopy.io import shapereader - +import climada.util.coordinates as u_coord from climada import CONFIG +from climada.entity import Exposures from climada.hazard.centroids.centr import Centroids from climada.util.constants import DEF_CRS, HAZ_DEMO_FL -import climada.util.coordinates as u_coord -from climada.entity import Exposures -from rasterio import Affine - DATA_DIR = CONFIG.hazard.test_data.dir() # Note: the coordinates are not directly on the cities, the region id and on land # otherwise do not work correctly. It is only a close point. -LATLON = np.array([ - [-21.1736, -175.1883], # Tonga, Nuku'alofa, TON, 776 - [-18.133, 178.433], # Fidji, Suva, FJI, 242 IN WATER IN NATURAL EARTH - [-38.4689, 177.8642], # New-Zealand, Te Karaka, NZL, 554 - [69.6833, 18.95], # Norway, Tromso, NOR, 578 IN WATER IN NATURAL EARTH - [78.84422, 20.82842], # Norway, Svalbard, NOR, 578 - [1, 1], # Ocean, 0 (0,0 is onland in Natural earth for testing reasons) - [-77.85, 166.6778], # Antarctica, McMurdo station, ATA, 010 - [-0.25, -78.5833] # Ecuador, Quito, ECU, 218 -]) - -VEC_LAT = LATLON[:,0] -VEC_LON = LATLON[:,1] +LATLON = np.array( + [ + [-21.1736, -175.1883], # Tonga, Nuku'alofa, TON, 776 + [-18.133, 178.433], # Fidji, Suva, FJI, 242 IN WATER IN NATURAL EARTH + [-38.4689, 177.8642], # New-Zealand, Te Karaka, NZL, 554 + [69.6833, 18.95], # Norway, Tromso, NOR, 578 IN WATER IN NATURAL EARTH + [78.84422, 20.82842], # Norway, Svalbard, NOR, 578 + [1, 1], # Ocean, 0 (0,0 is onland in Natural earth for testing reasons) + [-77.85, 166.6778], # Antarctica, McMurdo station, ATA, 010 + [-0.25, -78.5833], # Ecuador, Quito, ECU, 218 + ] +) + +VEC_LAT = LATLON[:, 0] +VEC_LON = LATLON[:, 1] ON_LAND = np.array([True, False, True, False, True, False, True, True]) REGION_ID = np.array([776, 0, 554, 0, 578, 0, 10, 218]) -TEST_CRS = 'EPSG:4326' -ALT_CRS = 'epsg:32632' # UTM zone 32N (Central Europe, 6-12°E) +TEST_CRS = "EPSG:4326" +ALT_CRS = "epsg:32632" # UTM zone 32N (Central Europe, 6-12°E) + class TestCentroidsData(unittest.TestCase): - """ Test class for initialisation and value based creation of Centroids objects""" + """Test class for initialisation and value based creation of Centroids objects""" + def setUp(self): self.lat = np.array([-10, 0, 10]) self.lon = np.array([-170, -150, -130]) - self.region_id = np.array([1, 2, 3]) + self.region_id = np.array([1, 2, 3]) self.on_land = np.array([True, False, False]) - self.crs = 'epsg:32632' - self.centr = Centroids(lat=VEC_LAT,lon=VEC_LON) + self.crs = "epsg:32632" + self.centr = Centroids(lat=VEC_LAT, lon=VEC_LON) def test_centroids_check_pass(self): """Test vector data in Centroids""" @@ -86,13 +89,15 @@ def test_centroids_check_pass(self): [VEC_LON.min(), VEC_LAT.min(), VEC_LON.max(), VEC_LAT.max()], ) - self.assertIsInstance(centr,Centroids) + self.assertIsInstance(centr, Centroids) self.assertIsInstance(centr.lat, np.ndarray) self.assertIsInstance(centr.lon, np.ndarray) self.assertIsInstance(centr.coord, np.ndarray) self.assertTrue(np.array_equal(centr.lat, VEC_LAT)) self.assertTrue(np.array_equal(centr.lon, VEC_LON)) - self.assertTrue(np.array_equal(centr.coord, np.array([VEC_LAT, VEC_LON]).transpose())) + self.assertTrue( + np.array_equal(centr.coord, np.array([VEC_LAT, VEC_LON]).transpose()) + ) self.assertEqual(centr.size, VEC_LON.size) def test_init_pass(self): @@ -104,32 +109,44 @@ def test_init_pass(self): self.assertTrue(u_coord.equal_crs(self.centr.crs, DEF_CRS)) # Creating Centroids with additional attributes - centroids = Centroids(lat=VEC_LAT, lon=VEC_LON, - region_id=REGION_ID, on_land=ON_LAND) + centroids = Centroids( + lat=VEC_LAT, lon=VEC_LON, region_id=REGION_ID, on_land=ON_LAND + ) # Checking additional attributes np.testing.assert_array_equal(centroids.region_id, REGION_ID) np.testing.assert_array_equal(centroids.on_land, ON_LAND) def test_init_defaults(self): - ''' Checking default values for Centroids''' - centroids = Centroids(lat=VEC_LAT,lon=VEC_LON) + """Checking default values for Centroids""" + centroids = Centroids(lat=VEC_LAT, lon=VEC_LON) # Checking defaults: nothing set for region_id, on_land self.assertFalse(centroids.region_id) self.assertFalse(centroids.on_land) # Guarantee a no-default TypeError for lon/lat - with self.assertRaises(TypeError): Centroids() + with self.assertRaises(TypeError): + Centroids() def test_init_properties(self): - """ Guarantee that Centroid objects have at least the properties: """ - properties = ['gdf','lon','lat','geometry', - 'on_land','region_id','crs', - 'shape','size','total_bounds','coord'] - centroids = Centroids(lat=[],lon=[]) - [self.assertTrue(hasattr(centroids,prop)) for prop in properties] + """Guarantee that Centroid objects have at least the properties:""" + properties = [ + "gdf", + "lon", + "lat", + "geometry", + "on_land", + "region_id", + "crs", + "shape", + "size", + "total_bounds", + "coord", + ] + centroids = Centroids(lat=[], lon=[]) + [self.assertTrue(hasattr(centroids, prop)) for prop in properties] def test_init_kwargs(self): - """ Test default crs and kwargs forwarding """ + """Test default crs and kwargs forwarding""" centr = Centroids( lat=VEC_LAT, lon=VEC_LON, @@ -142,32 +159,36 @@ def test_init_kwargs(self): # make sure kwargs are properly forwarded to centroids.gdf np.random.seed(1000) - randommask = np.random.choice([True,False],size=len(VEC_LON)) - centroids = Centroids(lat=VEC_LAT,lon=VEC_LON,masked=randommask,ones=1) - self.assertTrue(hasattr(centroids.gdf,'masked')) - self.assertTrue(hasattr(centroids.gdf,'ones')) - np.testing.assert_array_equal(randommask,centroids.gdf.masked) - self.assertEqual(sum(centroids.gdf.ones),len(VEC_LON)) + randommask = np.random.choice([True, False], size=len(VEC_LON)) + centroids = Centroids(lat=VEC_LAT, lon=VEC_LON, masked=randommask, ones=1) + self.assertTrue(hasattr(centroids.gdf, "masked")) + self.assertTrue(hasattr(centroids.gdf, "ones")) + np.testing.assert_array_equal(randommask, centroids.gdf.masked) + self.assertEqual(sum(centroids.gdf.ones), len(VEC_LON)) def test_from_meta_pass(self): - expected_lon = np.array([-30.0, -20.0, -10.0]*3) - expected_lat = np.repeat([30.0, 20.0, 10.0],3) + expected_lon = np.array([-30.0, -20.0, -10.0] * 3) + expected_lat = np.repeat([30.0, 20.0, 10.0], 3) # Check metadata meta = dict( crs=DEF_CRS, height=3, width=3, transform=Affine( - 10, 0, -35, - 0, -10, 35, + 10, + 0, + -35, + 0, + -10, + 35, ), ) centroids = Centroids.from_meta(meta) # check created object - np.testing.assert_array_equal(centroids.lon,expected_lon) - np.testing.assert_array_equal(centroids.lat,expected_lat) - self.assertEqual(centroids.crs,DEF_CRS) + np.testing.assert_array_equal(centroids.lon, expected_lon) + np.testing.assert_array_equal(centroids.lat, expected_lat) + self.assertEqual(centroids.crs, DEF_CRS) # generally we assume that from_meta does not set region_ids and on_land flags self.assertFalse(centroids.region_id) self.assertFalse(centroids.on_land) @@ -175,13 +196,17 @@ def test_from_meta_pass(self): def test_from_meta(self): """Test from_meta""" meta_ref = { - 'width': 10, - 'height': 8, - 'transform': rasterio.Affine( - 0.6, 0, -0.1, - 0, -0.6, 0.3, + "width": 10, + "height": 8, + "transform": rasterio.Affine( + 0.6, + 0, + -0.1, + 0, + -0.6, + 0.3, ), - 'crs': DEF_CRS, + "crs": DEF_CRS, } lon_ref = np.array([0.2, 0.8, 1.4, 2.0, 2.6, 3.2, 3.8, 4.4, 5.0, 5.6]) @@ -195,22 +220,24 @@ def test_from_meta(self): self.assertEqual(meta_ref["height"], meta["height"]) np.testing.assert_allclose(meta_ref["transform"], meta["transform"]) - centr = Centroids.from_meta( - Centroids(lat=lat_ref, lon=lon_ref).get_meta() - ) + centr = Centroids.from_meta(Centroids(lat=lat_ref, lon=lon_ref).get_meta()) np.testing.assert_allclose(lat_ref, centr.lat) np.testing.assert_allclose(lon_ref, centr.lon) # `get_meta` enforces same resolution in x and y, and y-coordinates are decreasing. # For other cases, `from_meta` needs to be checked manually. meta_ref = { - 'width': 4, - 'height': 5, - 'transform': rasterio.Affine( - 0.5, 0, 0.2, - 0, 0.6, -0.7, + "width": 4, + "height": 5, + "transform": rasterio.Affine( + 0.5, + 0, + 0.2, + 0, + 0.6, + -0.7, ), - 'crs': DEF_CRS, + "crs": DEF_CRS, } lon_ref = np.array([0.45, 0.95, 1.45, 1.95]) lat_ref = np.array([-0.4, 0.2, 0.8, 1.4, 2.0]) @@ -220,7 +247,6 @@ def test_from_meta(self): np.testing.assert_allclose(lat_ref, centr.lat) np.testing.assert_allclose(lon_ref, centr.lon) - def test_from_pnt_bounds(self): """Test from_pnt_bounds""" width, height = 26, 51 @@ -230,21 +256,25 @@ def test_from_pnt_bounds(self): self.assertTrue(u_coord.equal_crs(centr.crs, DEF_CRS)) self.assertEqual(centr.size, width * height) np.testing.assert_allclose([5.0, 5.2, 5.0], centr.lon[[0, 1, width]], atol=0.1) - np.testing.assert_allclose([10.0, 10.0, 9.8], centr.lat[[0, 1, width]], atol=0.1) + np.testing.assert_allclose( + [10.0, 10.0, 9.8], centr.lat[[0, 1, width]], atol=0.1 + ) # generally we assume that from_meta does not set region_ids and on_land flags self.assertFalse(centr.region_id) self.assertFalse(centr.on_land) + class TestCentroidsTransformation(unittest.TestCase): - """ Test class for coordinate transformations of Centroid objects + """Test class for coordinate transformations of Centroid objects and modifications using set_ methods""" + def setUp(self): self.lat = np.array([-10, 0, 10]) self.lon = np.array([-170, -150, -130]) - self.region_id = np.array([1, 2, 3]) + self.region_id = np.array([1, 2, 3]) self.on_land = np.array([True, False, False]) - self.crs = 'epsg:32632' - self.centr = Centroids(lat=VEC_LAT,lon=VEC_LON,crs=TEST_CRS) + self.crs = "epsg:32632" + self.centr = Centroids(lat=VEC_LAT, lon=VEC_LON, crs=TEST_CRS) def test_to_default_crs(self): # Creating Centroids with non-default CRS and @@ -255,29 +285,33 @@ def test_to_default_crs(self): # make sure CRS is DEF_CRS after transformation self.assertTrue(u_coord.equal_crs(centroids.crs, DEF_CRS)) # Checking that modification actually took place - [self.assertNotEqual(x-y,0) for x,y in zip(centroids.lon,VEC_LON)] - [self.assertNotEqual(x-y,0) for x,y in zip(centroids.lat,VEC_LAT) if not x == 0] + [self.assertNotEqual(x - y, 0) for x, y in zip(centroids.lon, VEC_LON)] + [ + self.assertNotEqual(x - y, 0) + for x, y in zip(centroids.lat, VEC_LAT) + if not x == 0 + ] def test_to_default_crs_not_inplace(self): centroids = Centroids(lat=VEC_LAT, lon=VEC_LON, crs=ALT_CRS) newcentr = centroids.to_default_crs(inplace=False) # make sure that new object has been created - self.assertIsNot(centroids,newcentr) - self.assertIsInstance(newcentr,Centroids) + self.assertIsNot(centroids, newcentr) + self.assertIsInstance(newcentr, Centroids) ## compare with inplace transformation centroids.to_default_crs() - np.testing.assert_array_equal(centroids.lat,newcentr.lat) - np.testing.assert_array_equal(centroids.lon,newcentr.lon) + np.testing.assert_array_equal(centroids.lat, newcentr.lat) + np.testing.assert_array_equal(centroids.lon, newcentr.lon) def test_to_crs(self): # Creating Centroids with default CRS centroids = Centroids(lat=self.lat, lon=self.lon, crs=DEF_CRS) # Transforming to another CRS - new_crs = 'epsg:3857' + new_crs = "epsg:3857" transformed_centroids = centroids.to_crs(new_crs) - self.assertIsNot(centroids,transformed_centroids) + self.assertIsNot(centroids, transformed_centroids) self.assertFalse(centroids == transformed_centroids) # Checking CRS string after transformation @@ -285,22 +319,22 @@ def test_to_crs(self): self.assertTrue(u_coord.equal_crs(centroids.crs, DEF_CRS)) # Checking correctness of transformation - expected_lat = np.array([-1118889.974858, 0., 1118889.9748585]) + expected_lat = np.array([-1118889.974858, 0.0, 1118889.9748585]) expected_lon = np.array([-18924313.434857, -16697923.618991, -14471533.803126]) np.testing.assert_array_almost_equal(transformed_centroids.lat, expected_lat) np.testing.assert_array_almost_equal(transformed_centroids.lon, expected_lon) def test_to_crs_inplace(self): - centroids = Centroids(lat=self.lat,lon=self.lon,crs=DEF_CRS) - new_crs = 'epsg:3857' + centroids = Centroids(lat=self.lat, lon=self.lon, crs=DEF_CRS) + new_crs = "epsg:3857" transformed_centroids = centroids.to_crs(new_crs) # inplace transforming to another CRS - centroids.to_crs(new_crs,inplace=True) + centroids.to_crs(new_crs, inplace=True) self.assertTrue(centroids == transformed_centroids) - expected_lat = np.array([-1118889.974858, 0., 1118889.9748585]) + expected_lat = np.array([-1118889.974858, 0.0, 1118889.9748585]) expected_lon = np.array([-18924313.434857, -16697923.618991, -14471533.803126]) np.testing.assert_array_almost_equal(centroids.lat, expected_lat) np.testing.assert_array_almost_equal(centroids.lon, expected_lon) @@ -321,14 +355,14 @@ def test_set_on_land_pass(self): self.centr.set_on_land() np.testing.assert_array_equal(self.centr.on_land, ON_LAND) - centroids = Centroids(lat=VEC_LAT, lon=VEC_LON, on_land='natural_earth') + centroids = Centroids(lat=VEC_LAT, lon=VEC_LON, on_land="natural_earth") np.testing.assert_array_equal(centroids.on_land, ON_LAND) def test_set_on_land_implementationerror(self): - centroids = Centroids(lat=self.lat,lon=self.lon) + centroids = Centroids(lat=self.lat, lon=self.lon) with self.assertRaises(NotImplementedError): - centroids.set_on_land(source='satellite',overwrite=True) + centroids.set_on_land(source="satellite", overwrite=True) def test_set_on_land_raster(self): """Test set_on_land""" @@ -341,7 +375,7 @@ def test_set_region_id_pass(self): self.centr.set_region_id() np.testing.assert_array_equal(self.centr.region_id, REGION_ID) - centroids = Centroids(lat=VEC_LAT, lon=VEC_LON, region_id='country') + centroids = Centroids(lat=VEC_LAT, lon=VEC_LON, region_id="country") np.testing.assert_array_equal(centroids.region_id, REGION_ID) def test_set_region_id_raster(self): @@ -352,10 +386,10 @@ def test_set_region_id_raster(self): self.assertTrue(np.array_equal(np.unique(centr_ras.region_id), np.array([862]))) def test_set_region_id_implementationerror(self): - centroids = Centroids(lat=self.lat,lon=self.lon) + centroids = Centroids(lat=self.lat, lon=self.lon) with self.assertRaises(NotImplementedError): - centroids.set_region_id(level='continent',overwrite=True) + centroids.set_region_id(level="continent", overwrite=True) def test_set_geometry_points_pass(self): """Test set_geometry_points""" @@ -373,10 +407,10 @@ class TestCentroidsReaderWriter(unittest.TestCase): def test_from_csv_def_crs(self): """Read a centroid csv file correctly and use default CRS.""" # Create temporary csv file containing centroids data - tmpfile = Path('test_write_csv.csv') + tmpfile = Path("test_write_csv.csv") lat = np.array([0, 90, -90, 0, 0]) lon = np.array([0, 0, 0, 180, -180]) - df = pd.DataFrame({'lat': lat, 'lon': lon}) + df = pd.DataFrame({"lat": lat, "lon": lon}) df.to_csv(tmpfile, index=False) # Read centroids using from_csv method @@ -392,13 +426,15 @@ def test_from_csv_def_crs(self): def test_from_csv(self): """Read a centroid csv file which contains CRS information.""" - tmpfile = Path('test_write_csv.csv') + tmpfile = Path("test_write_csv.csv") lat = np.array([0, 20048966.1, -20048966, 0, 0]) lon = np.array([0, 0, 0, 20037508.34, -20037508.34]) region_id = np.array([1, 2, 3, 4, 5]) on_land = np.array([True, False, False, True, True]) - df = pd.DataFrame({'lat': lat, 'lon': lon, 'region_id': region_id, 'on_land': on_land}) - df['crs'] = CRS.from_user_input(3857).to_wkt() + df = pd.DataFrame( + {"lat": lat, "lon": lon, "region_id": region_id, "on_land": on_land} + ) + df["crs"] = CRS.from_user_input(3857).to_wkt() df.to_csv(tmpfile, index=False) # Read centroids using from_csv method @@ -407,7 +443,7 @@ def test_from_csv(self): # Test attributes np.testing.assert_array_equal(centroids.lat, lat) np.testing.assert_array_equal(centroids.lon, lon) - self.assertEqual(centroids.crs, 'epsg:3857') + self.assertEqual(centroids.crs, "epsg:3857") np.testing.assert_array_equal(centroids.region_id, region_id) np.testing.assert_array_equal(centroids.on_land, on_land) @@ -417,12 +453,14 @@ def test_from_csv(self): def test_write_read_csv(self): """Write and read a Centroids CSV file correctly.""" # Create Centroids with latitude and longitude arrays - tmpfile = Path('test_write_csv.csv') + tmpfile = Path("test_write_csv.csv") lat = np.array([10.0, 20.0, 30.0]) lon = np.array([-10.0, -20.0, -30.0]) region_id = np.array([1, 2, 3]) on_land = np.array([True, False, False]) - centroids_out = Centroids(lat=lat, lon=lon, region_id=region_id, on_land=on_land) + centroids_out = Centroids( + lat=lat, lon=lon, region_id=region_id, on_land=on_land + ) # Write CSV file from Centroids using write_csv centroids_out.write_csv(tmpfile) @@ -443,11 +481,11 @@ def test_write_read_csv(self): def test_from_excel_def_crs(self): """Read a centroid excel file correctly and use default CRS.""" # Create temporary excel file containing centroids data - tmpfile = Path('test_write_excel.xlsx') + tmpfile = Path("test_write_excel.xlsx") lat = np.array([0, 90, -90, 0, 0]) lon = np.array([0, 0, 0, 180, -180]) - df = pd.DataFrame({'lat': lat, 'lon': lon}) - df.to_excel(tmpfile, sheet_name='centroids', index=False) + df = pd.DataFrame({"lat": lat, "lon": lon}) + df.to_excel(tmpfile, sheet_name="centroids", index=False) # Read centroids using from_excel method centroids = Centroids.from_excel(file_path=tmpfile) @@ -463,14 +501,16 @@ def test_from_excel_def_crs(self): def test_from_excel(self): """Read a centroid excel file correctly which contains CRS information.""" # Create temporary excel file containing centroids data - tmpfile = Path('test_write_excel.xlsx') + tmpfile = Path("test_write_excel.xlsx") lat = np.array([0, 20048966.1, -20048966, 0, 0]) lon = np.array([0, 0, 0, 20037508.34, -20037508.34]) region_id = np.array([1, 2, 3, 4, 5]) on_land = np.array([True, False, False, True, True]) - df = pd.DataFrame({'lat': lat, 'lon': lon, 'region_id': region_id, 'on_land': on_land}) - df['crs'] = CRS.from_user_input(3857).to_wkt() - df.to_excel(tmpfile, sheet_name='centroids', index=False) + df = pd.DataFrame( + {"lat": lat, "lon": lon, "region_id": region_id, "on_land": on_land} + ) + df["crs"] = CRS.from_user_input(3857).to_wkt() + df.to_excel(tmpfile, sheet_name="centroids", index=False) # Read centroids using from_excel method centroids = Centroids.from_excel(file_path=tmpfile) @@ -478,7 +518,7 @@ def test_from_excel(self): # test attributes np.testing.assert_array_equal(centroids.lat, lat) np.testing.assert_array_equal(centroids.lon, lon) - self.assertEqual(centroids.crs, 'epsg:3857') + self.assertEqual(centroids.crs, "epsg:3857") np.testing.assert_array_equal(centroids.region_id, region_id) np.testing.assert_array_equal(centroids.on_land, on_land) @@ -488,12 +528,14 @@ def test_from_excel(self): def test_write_read_excel(self): """Write and read a Centroids Excel file correctly.""" # Create Centroids with latitude and longitude arrays - tmpfile = Path('test_write_excel.xlsx') + tmpfile = Path("test_write_excel.xlsx") lat = np.array([10.0, 20.0, 30.0]) lon = np.array([-10.0, -20.0, -30.0]) region_id = np.array([1, 2, 3]) on_land = np.array([True, False, False]) - centroids_out = Centroids(lat=lat, lon=lon, region_id=region_id, on_land=on_land) + centroids_out = Centroids( + lat=lat, lon=lon, region_id=region_id, on_land=on_land + ) # Write Excel file from Centroids using write_csv centroids_out.write_excel(tmpfile) @@ -517,20 +559,27 @@ def test_from_raster_file(self): o_lat, o_lon = (10.42822096697894, -69.33714959699981) res_lat, res_lon = (-0.009000000000000341, 0.009000000000000341) - centr_ras = Centroids.from_raster_file(HAZ_DEMO_FL, window=Window(0, 0, width, height)) + centr_ras = Centroids.from_raster_file( + HAZ_DEMO_FL, window=Window(0, 0, width, height) + ) self.assertTrue(u_coord.equal_crs(centr_ras.crs, DEF_CRS)) self.assertEqual(centr_ras.size, width * height) np.testing.assert_allclose( - [-69.333, -69.324, -69.333], centr_ras.lon[[0, 1, width]], atol=0.001, + [-69.333, -69.324, -69.333], + centr_ras.lon[[0, 1, width]], + atol=0.001, ) np.testing.assert_allclose( - [10.424, 10.424, 10.415], centr_ras.lat[[0, 1, width]], atol=0.001, + [10.424, 10.424, 10.415], + centr_ras.lat[[0, 1, width]], + atol=0.001, ) def test_from_vector_file(self): """Test from_vector_file and values_from_vector_files""" - shp_file = shapereader.natural_earth(resolution='110m', category='cultural', - name='populated_places_simple') + shp_file = shapereader.natural_earth( + resolution="110m", category="cultural", name="populated_places_simple" + ) centr = Centroids.from_vector_file(shp_file, dst_crs=DEF_CRS) self.assertTrue(u_coord.equal_crs(centr.crs, DEF_CRS)) @@ -549,35 +598,40 @@ def test_from_geodataframe(self): lon = np.arange(-50, -40) region_id = np.arange(1, 11) on_land = np.ones(10, dtype=bool) - extra = np.full(10, 'a') - - gdf = gpd.GeoDataFrame({ - 'geometry': gpd.points_from_xy(lon, lat), - 'region_id': region_id, - 'on_land': on_land, - 'extra': extra, - }, crs=crs) + extra = np.full(10, "a") + + gdf = gpd.GeoDataFrame( + { + "geometry": gpd.points_from_xy(lon, lat), + "region_id": region_id, + "on_land": on_land, + "extra": extra, + }, + crs=crs, + ) centroids = Centroids.from_geodataframe(gdf) for name, array in zip( - ['lat', 'lon', 'region_id', 'on_land'], + ["lat", "lon", "region_id", "on_land"], [lat, lon, region_id, on_land], ): np.testing.assert_array_equal(array, getattr(centroids, name)) - self.assertTrue('extra' in centroids.gdf.columns) + self.assertTrue("extra" in centroids.gdf.columns) self.assertTrue(u_coord.equal_crs(centroids.crs, crs)) def test_from_geodataframe_invalid(self): # Creating an invalid GeoDataFrame with geometries that are not points - invalid_geometry_gdf = gpd.GeoDataFrame({ - 'geometry': [ - shapely.Point((2,2)), - shapely.Polygon([(0, 0), (1, 1), (1, 0), (0, 0)]), - shapely.LineString([(0, 1), (1, 0)]), - ], - }) + invalid_geometry_gdf = gpd.GeoDataFrame( + { + "geometry": [ + shapely.Point((2, 2)), + shapely.Polygon([(0, 0), (1, 1), (1, 0), (0, 0)]), + shapely.LineString([(0, 1), (1, 0)]), + ], + } + ) with self.assertRaises(ValueError): # Trying to create Centroids from invalid GeoDataFrame @@ -594,14 +648,16 @@ def test_from_exposures_with_region_id(self): value = np.array([1, 1, 1]) region_id = np.array([1, 2, 3]) on_land = [False, True, True] - crs = 'epsg:32632' - gdf = gpd.GeoDataFrame({ - 'latitude': lat, - 'longitude': lon, - 'value': value, - 'region_id': region_id, - 'on_land': on_land, - }) + crs = "epsg:32632" + gdf = gpd.GeoDataFrame( + { + "latitude": lat, + "longitude": lon, + "value": value, + "region_id": region_id, + "on_land": on_land, + } + ) exposures = Exposures(gdf, crs=crs) # Extract centroids from exposures @@ -612,7 +668,7 @@ def test_from_exposures_with_region_id(self): np.testing.assert_array_equal(centroids.lon, lon) np.testing.assert_array_equal(centroids.region_id, region_id) np.testing.assert_array_equal(centroids.on_land, on_land) - self.assertFalse(np.isin('value', centroids.gdf.columns)) + self.assertFalse(np.isin("value", centroids.gdf.columns)) self.assertEqual(centroids.crs, crs) def test_from_exposures_without_region_id(self): @@ -627,13 +683,15 @@ def test_from_exposures_without_region_id(self): value = np.array([1, 1, 1]) impf_TC = np.array([1, 2, 3]) centr_TC = np.array([1, 2, 3]) - gdf = gpd.GeoDataFrame({ - 'latitude': lat, - 'longitude': lon, - 'value': value, - 'impf_tc': impf_TC, - 'centr_TC': centr_TC, - }) + gdf = gpd.GeoDataFrame( + { + "latitude": lat, + "longitude": lon, + "value": value, + "impf_tc": impf_TC, + "centr_TC": centr_TC, + } + ) exposures = Exposures(gdf) # Extract centroids from exposures @@ -646,19 +704,18 @@ def test_from_exposures_without_region_id(self): self.assertEqual(centroids.region_id, None) self.assertEqual(centroids.on_land, None) np.testing.assert_equal( - np.isin(['value', 'impf_tc', 'centr_tc'], centroids.gdf.columns), + np.isin(["value", "impf_tc", "centr_tc"], centroids.gdf.columns), False, ) def test_from_exposure_exceptions(self): - gdf = gpd.GeoDataFrame({ - }) + gdf = gpd.GeoDataFrame({}) exposures = Exposures(gdf) with self.assertRaises(ValueError): Centroids.from_exposures(exposures) def test_read_write_hdf5(self): - tmpfile = Path('test_write_hdf5.out.hdf5') + tmpfile = Path("test_write_hdf5.out.hdf5") crs = DEF_CRS centroids_w = Centroids(lat=VEC_LAT, lon=VEC_LON, crs=crs) centroids_w.write_hdf5(tmpfile) @@ -674,8 +731,10 @@ def test_from_hdf5_nonexistent_file(self): with self.assertRaises(FileNotFoundError): Centroids.from_hdf5(file_name) + class TestCentroidsMethods(unittest.TestCase): """Test Centroids methods""" + def setUp(self): self.centr = Centroids(lat=VEC_LAT, lon=VEC_LON, crs=TEST_CRS) @@ -686,7 +745,7 @@ def test_select_pass(self): centr = Centroids(lat=VEC_LAT, lon=VEC_LON, region_id=region_id) fil_centr = centr.select(reg_id=10) - self.assertIsInstance(fil_centr,Centroids) + self.assertIsInstance(fil_centr, Centroids) self.assertEqual(fil_centr.size, 2) self.assertEqual(fil_centr.lat[0], VEC_LAT[2]) self.assertEqual(fil_centr.lat[1], VEC_LAT[4]) @@ -702,7 +761,7 @@ def test_select_extent_pass(self): region_id=np.zeros(5), ) ext_centr = centr.select(extent=[-175, -170, -5, 5]) - self.assertIsInstance(ext_centr,Centroids) + self.assertIsInstance(ext_centr, Centroids) np.testing.assert_array_equal(ext_centr.lon, np.array([-175, -170])) np.testing.assert_array_equal(ext_centr.lat, np.array([-3, 0])) @@ -719,7 +778,9 @@ def test_select_extent_pass(self): def test_append_pass(self): """Append points""" centr = self.centr - centr_bis = Centroids(lat=np.array([1, 2, 3]), lon=np.array([4, 5, 6]), crs=DEF_CRS) + centr_bis = Centroids( + lat=np.array([1, 2, 3]), lon=np.array([4, 5, 6]), crs=DEF_CRS + ) with self.assertRaises(ValueError): # Different crs centr_bis.to_crs(ALT_CRS).append(centr) @@ -734,16 +795,20 @@ def test_append_pass(self): self.assertTrue(np.array_equal(centr_bis.lon[3:], centr.lon)) def test_append(self): - lat2,lon2 = np.array([6,7,8,9,10]),np.array([6,7,8,9,10]) - newcentr = Centroids(lat=lat2,lon=lon2) + lat2, lon2 = np.array([6, 7, 8, 9, 10]), np.array([6, 7, 8, 9, 10]) + newcentr = Centroids(lat=lat2, lon=lon2) newcentr.append(self.centr) - self.assertTrue(newcentr.size == len(self.centr.lon)+len(lon2)) - np.testing.assert_array_equal(newcentr.lon,np.concatenate([lon2,self.centr.lon])) - np.testing.assert_array_equal(newcentr.lat,np.concatenate([lat2,self.centr.lat])) + self.assertTrue(newcentr.size == len(self.centr.lon) + len(lon2)) + np.testing.assert_array_equal( + newcentr.lon, np.concatenate([lon2, self.centr.lon]) + ) + np.testing.assert_array_equal( + newcentr.lat, np.concatenate([lat2, self.centr.lat]) + ) def test_append_dif_crs(self): - lat2,lon2 = np.array([0,0,1,2,3,4,5]),np.array([0,0,1,2,3,4,5]) - centr2 = Centroids(lat=lat2,lon=lon2,crs='epsg:3857') + lat2, lon2 = np.array([0, 0, 1, 2, 3, 4, 5]), np.array([0, 0, 1, 2, 3, 4, 5]) + centr2 = Centroids(lat=lat2, lon=lon2, crs="epsg:3857") # appending differing crs is not provided/possible with self.assertRaises(ValueError): @@ -758,26 +823,25 @@ def test_remove_duplicate_pass(self): ) self.assertTrue(centr.gdf.shape[0] == 2 * self.centr.gdf.shape[0]) rem_centr = Centroids.remove_duplicate_points(centr) - self.assertIsInstance(rem_centr,Centroids) + self.assertIsInstance(rem_centr, Centroids) self.assertTrue(self.centr == rem_centr) - def test_remove_duplicates_dif_on_land(self): ### We currently expect that only the geometry of the gdf defines duplicates. ### If one geometry is duplicated with differences in other attributes e.g. on_land ### they get removed nevertheless. Only the first occurrence will be part of the new object ### this test is only here to guarantee this behaviour - lat, lon = np.array([0,0,1,2,3,4,5]),np.array([0,0,1,2,3,4,5]) - centr = Centroids(lat=lat,lon=lon,on_land=[True]+[False]*6) + lat, lon = np.array([0, 0, 1, 2, 3, 4, 5]), np.array([0, 0, 1, 2, 3, 4, 5]) + centr = Centroids(lat=lat, lon=lon, on_land=[True] + [False] * 6) centr_subset = centr.remove_duplicate_points() # new object created self.assertFalse(centr == centr_subset) - self.assertIsNot(centr,centr_subset) + self.assertIsNot(centr, centr_subset) # duplicates removed - self.assertTrue(centr_subset.size == len(lat)-1) - self.assertTrue(np.all(centr_subset.shape == (len(lat)-1,len(lon)-1))) - np.testing.assert_array_equal(centr_subset.lon,np.unique(lon)) - np.testing.assert_array_equal(centr_subset.lat,np.unique(lat)) + self.assertTrue(centr_subset.size == len(lat) - 1) + self.assertTrue(np.all(centr_subset.shape == (len(lat) - 1, len(lon) - 1))) + np.testing.assert_array_equal(centr_subset.lon, np.unique(lon)) + np.testing.assert_array_equal(centr_subset.lat, np.unique(lat)) # only first on_land (True) is selected self.assertTrue(centr_subset.on_land[0]) @@ -791,17 +855,17 @@ def test_union(self): cent2 = Centroids(lat=lat2, lon=lon2, on_land=on_land2) lat3, lon3 = np.array([-1, -2]), np.array([1, 2]) - cent3 = Centroids(lat=lat3,lon=lon3) + cent3 = Centroids(lat=lat3, lon=lon3) cent = cent1.union(cent2) - np.testing.assert_array_equal(cent.lat, np.concatenate([lat,lat2])) - np.testing.assert_array_equal(cent.lon, np.concatenate([lon,lon2])) - np.testing.assert_array_equal(cent.on_land, np.concatenate([on_land,on_land2])) + np.testing.assert_array_equal(cent.lat, np.concatenate([lat, lat2])) + np.testing.assert_array_equal(cent.lon, np.concatenate([lon, lon2])) + np.testing.assert_array_equal(cent.on_land, np.concatenate([on_land, on_land2])) cent = cent1.union(cent1, cent2) - np.testing.assert_array_equal(cent.lat, np.concatenate([lat,lat2])) - np.testing.assert_array_equal(cent.lon, np.concatenate([lon,lon2])) - np.testing.assert_array_equal(cent.on_land, np.concatenate([on_land,on_land2])) + np.testing.assert_array_equal(cent.lat, np.concatenate([lat, lat2])) + np.testing.assert_array_equal(cent.lon, np.concatenate([lon, lon2])) + np.testing.assert_array_equal(cent.on_land, np.concatenate([on_land, on_land2])) cent = Centroids.union(cent1) np.testing.assert_array_equal(cent.lat, cent1.lat) @@ -815,9 +879,11 @@ def test_union(self): # if attributes are not part in one of the centroid objects it will be added as None in the union cent = Centroids.union(cent1, cent2, cent3) - np.testing.assert_array_equal(cent.lat, np.concatenate([lat,lat2,lat3])) - np.testing.assert_array_equal(cent.lon, np.concatenate([lon,lon2,lon3])) - np.testing.assert_array_equal(cent.on_land, np.concatenate([on_land,on_land2,[None,None]])) + np.testing.assert_array_equal(cent.lat, np.concatenate([lat, lat2, lat3])) + np.testing.assert_array_equal(cent.lon, np.concatenate([lon, lon2, lon3])) + np.testing.assert_array_equal( + cent.on_land, np.concatenate([on_land, on_land2, [None, None]]) + ) def test_select_pass(self): """Test Centroids.select method""" @@ -873,14 +939,18 @@ def test_get_meta(self): height=3, width=3, transform=Affine( - 10, 0, -35, - 0, -10, 35, + 10, + 0, + -35, + 0, + -10, + 35, ), ) - self.assertEqual(meta['height'], expected_meta['height']) - self.assertEqual(meta['width'], expected_meta['width']) - self.assertTrue(u_coord.equal_crs(meta['crs'], expected_meta['crs'])) - self.assertTrue(meta['transform'].almost_equals(expected_meta['transform'])) + self.assertEqual(meta["height"], expected_meta["height"]) + self.assertEqual(meta["width"], expected_meta["width"]) + self.assertTrue(u_coord.equal_crs(meta["crs"], expected_meta["crs"])) + self.assertTrue(meta["transform"].almost_equals(expected_meta["transform"])) def test_get_closest_point(self): """Test get_closest_point""" @@ -896,21 +966,25 @@ def test_get_closest_point(self): """Test get_closest_point""" for y_sign in [1, -1]: meta = { - 'width': 10, - 'height': 20, - 'transform': rasterio.Affine(0.5, 0, 0.1, 0, y_sign * 0.6, y_sign * (-0.3)), - 'crs': DEF_CRS, + "width": 10, + "height": 20, + "transform": rasterio.Affine( + 0.5, 0, 0.1, 0, y_sign * 0.6, y_sign * (-0.3) + ), + "crs": DEF_CRS, } centr_ras = Centroids.from_meta(meta=meta) - test_data = np.array([ - [0.4, 0.1, 0.35, 0.0, 0], - [-0.1, 0.2, 0.35, 0.0, 0], - [2.2, 0.1, 2.35, 0.0, 4], - [1.4, 2.5, 1.35, 2.4, 42], - [5.5, -0.1, 4.85, 0.0, 9], - ]) - test_data[:,[1,3]] *= y_sign + test_data = np.array( + [ + [0.4, 0.1, 0.35, 0.0, 0], + [-0.1, 0.2, 0.35, 0.0, 0], + [2.2, 0.1, 2.35, 0.0, 4], + [1.4, 2.5, 1.35, 2.4, 42], + [5.5, -0.1, 4.85, 0.0, 9], + ] + ) + test_data[:, [1, 3]] *= y_sign for x_in, y_in, x_out, y_out, idx_out in test_data: x, y, idx = centr_ras.get_closest_point(x_in, y_in) self.assertEqual(x, x_out) @@ -919,7 +993,9 @@ def test_get_closest_point(self): self.assertEqual(centr_ras.lon[idx], x) self.assertEqual(centr_ras.lat[idx], y) - centr_ras = Centroids(lat=np.array([0, 0.2, 0.7]), lon=np.array([-0.4, 0.2, 1.1])) + centr_ras = Centroids( + lat=np.array([0, 0.2, 0.7]), lon=np.array([-0.4, 0.2, 1.1]) + ) x, y, idx = centr_ras.get_closest_point(0.1, 0.0) self.assertEqual(x, 0.2) self.assertEqual(y, 0.2) @@ -929,11 +1005,19 @@ def test_dist_coast_pass(self): """Test get_dist_coast""" dist_coast = self.centr.get_dist_coast() # Just checking that the output doesnt change over time. - REF_VALUES = np.array([ - 860.0, 200.0, 25610.0, 1000.0, 4685.0, - 507500.0, 500.0, 150500.0, - ]) - self.assertIsInstance(dist_coast,np.ndarray) + REF_VALUES = np.array( + [ + 860.0, + 200.0, + 25610.0, + 1000.0, + 4685.0, + 507500.0, + 500.0, + 150500.0, + ] + ) + self.assertIsInstance(dist_coast, np.ndarray) np.testing.assert_allclose(dist_coast, REF_VALUES, atol=1.0) def test_dist_coast_pass_raster(self): @@ -947,13 +1031,20 @@ def test_area_pass(self): """Test set_area""" ulx, xres, lrx = 60, 1, 90 uly, yres, lry = 0, 1, 20 - xx, yy = np.meshgrid(np.arange(ulx + xres / 2, lrx, xres), - np.arange(uly + yres / 2, lry, yres)) - vec_data = gpd.GeoDataFrame({ - 'geometry': [Point(xflat, yflat) for xflat, yflat in zip(xx.flatten(), yy.flatten())], - 'lon': xx.flatten(), - 'lat': yy.flatten(), - }, crs={'proj': 'cea'}) + xx, yy = np.meshgrid( + np.arange(ulx + xres / 2, lrx, xres), np.arange(uly + yres / 2, lry, yres) + ) + vec_data = gpd.GeoDataFrame( + { + "geometry": [ + Point(xflat, yflat) + for xflat, yflat in zip(xx.flatten(), yy.flatten()) + ], + "lon": xx.flatten(), + "lat": yy.flatten(), + }, + crs={"proj": "cea"}, + ) centr = Centroids.from_geodataframe(vec_data) area_pixel = centr.get_area_pixel() self.assertTrue(np.allclose(area_pixel, np.ones(centr.size))) @@ -972,10 +1063,16 @@ def test_area_pass_raster(self): ) # Correct result in CEA results in unequal pixel area - test_area = np.array([ - 981010.32497514, 981010.3249724 , 981037.92674855, - 981037.92674582, 981065.50487659, 981065.50487385, - ]) + test_area = np.array( + [ + 981010.32497514, + 981010.3249724, + 981037.92674855, + 981037.92674582, + 981065.50487659, + 981065.50487385, + ] + ) np.testing.assert_allclose(area_pixel, test_area) def test_equal_pass(self): @@ -983,7 +1080,7 @@ def test_equal_pass(self): centr_list = [ Centroids(lat=VEC_LAT, lon=VEC_LON, crs=DEF_CRS), Centroids(lat=VEC_LAT, lon=VEC_LON, crs=ALT_CRS), - Centroids(lat=VEC_LAT + 1, lon=VEC_LON + 1) + Centroids(lat=VEC_LAT + 1, lon=VEC_LON + 1), ] for centr1, centr2 in itertools.combinations(centr_list, 2): self.assertFalse(centr2 == centr1) @@ -992,28 +1089,31 @@ def test_equal_pass(self): self.assertTrue(centr2 == centr2) def test_plot(self): - "Test Centroids.plot()" - centr = Centroids( + "Test Centroids.plot()" + centr = Centroids( lat=np.array([-5, -3, 0, 3, 5]), lon=np.array([-180, -175, -170, 170, 175]), region_id=np.zeros(5), - crs=DEF_CRS + crs=DEF_CRS, ) - centr.plot() + centr.plot() def test_plot_non_def_crs(self): - "Test Centroids.plot() with non-default CRS" - centr = Centroids( - lat = np.array([10.0, 20.0, 30.0]), - lon = np.array([-10.0, -20.0, -30.0]), + "Test Centroids.plot() with non-default CRS" + centr = Centroids( + lat=np.array([10.0, 20.0, 30.0]), + lon=np.array([-10.0, -20.0, -30.0]), region_id=np.zeros(3), - crs='epsg:32632' + crs="epsg:32632", ) - centr.plot() + centr.plot() + # Execute Tests if __name__ == "__main__": TESTS = unittest.TestLoader().loadTestsFromTestCase(TestCentroidsData) - TESTS.addTests(unittest.TestLoader().loadTestsFromTestCase(TestCentroidsReaderWriter)) + TESTS.addTests( + unittest.TestLoader().loadTestsFromTestCase(TestCentroidsReaderWriter) + ) TESTS.addTests(unittest.TestLoader().loadTestsFromTestCase(TestCentroidsMethods)) unittest.TextTestRunner(verbosity=2).run(TESTS) diff --git a/climada/hazard/io.py b/climada/hazard/io.py index 5248d4579..4ae036c52 100644 --- a/climada/hazard/io.py +++ b/climada/hazard/io.py @@ -24,63 +24,64 @@ import itertools import logging import pathlib -from typing import Union, Optional, Callable, Dict, Any +from typing import Any, Callable, Dict, Optional, Union import h5py import numpy as np import pandas as pd import rasterio import sparse as sp -from scipy import sparse import xarray as xr +from scipy import sparse -from climada.hazard.centroids.centr import Centroids import climada.util.constants as u_const import climada.util.coordinates as u_coord import climada.util.dates_times as u_dt import climada.util.hdf5_handler as u_hdf5 - +from climada.hazard.centroids.centr import Centroids LOGGER = logging.getLogger(__name__) -DEF_VAR_EXCEL = {'sheet_name': {'inten': 'hazard_intensity', - 'freq': 'hazard_frequency' - }, - 'col_name': {'cen_id': 'centroid_id/event_id', - 'even_id': 'event_id', - 'even_dt': 'event_date', - 'even_name': 'event_name', - 'freq': 'frequency', - 'orig': 'orig_event_flag' - }, - 'col_centroids': {'sheet_name': 'centroids', - 'col_name': {'cen_id': 'centroid_id', - 'latitude': 'lat', - 'longitude': 'lon', - } - } - } +DEF_VAR_EXCEL = { + "sheet_name": {"inten": "hazard_intensity", "freq": "hazard_frequency"}, + "col_name": { + "cen_id": "centroid_id/event_id", + "even_id": "event_id", + "even_dt": "event_date", + "even_name": "event_name", + "freq": "frequency", + "orig": "orig_event_flag", + }, + "col_centroids": { + "sheet_name": "centroids", + "col_name": { + "cen_id": "centroid_id", + "latitude": "lat", + "longitude": "lon", + }, + }, +} """Excel variable names""" -DEF_VAR_MAT = {'field_name': 'hazard', - 'var_name': {'per_id': 'peril_ID', - 'even_id': 'event_ID', - 'ev_name': 'name', - 'freq': 'frequency', - 'inten': 'intensity', - 'unit': 'units', - 'frac': 'fraction', - 'comment': 'comment', - 'datenum': 'datenum', - 'orig': 'orig_event_flag' - }, - 'var_cent': {'field_names': ['centroids', 'hazard'], - 'var_name': {'cen_id': 'centroid_ID', - 'lat': 'lat', - 'lon': 'lon' - } - } - } +DEF_VAR_MAT = { + "field_name": "hazard", + "var_name": { + "per_id": "peril_ID", + "even_id": "event_ID", + "ev_name": "name", + "freq": "frequency", + "inten": "intensity", + "unit": "units", + "frac": "fraction", + "comment": "comment", + "datenum": "datenum", + "orig": "orig_event_flag", + }, + "var_cent": { + "field_names": ["centroids", "hazard"], + "var_name": {"cen_id": "centroid_ID", "lat": "lat", "lon": "lon"}, + }, +} """MATLAB variable names""" DEF_COORDS = dict(event="time", longitude="longitude", latitude="latitude") @@ -92,22 +93,38 @@ # pylint: disable=no-member -class HazardIO(): + +class HazardIO: """ Contains all read/write methods of the Hazard class """ def set_raster(self, *args, **kwargs): """This function is deprecated, use Hazard.from_raster.""" - LOGGER.warning("The use of Hazard.set_raster is deprecated." - "Use Hazard.from_raster instead.") + LOGGER.warning( + "The use of Hazard.set_raster is deprecated." + "Use Hazard.from_raster instead." + ) self.__dict__ = self.__class__.from_raster(*args, **kwargs).__dict__ @classmethod - def from_raster(cls, files_intensity, files_fraction=None, attrs=None, - band=None, haz_type=None, pool=None, src_crs=None, window=None, - geometry=None, dst_crs=None, transform=None, width=None, - height=None, resampling=rasterio.warp.Resampling.nearest): + def from_raster( + cls, + files_intensity, + files_fraction=None, + attrs=None, + band=None, + haz_type=None, + pool=None, + src_crs=None, + window=None, + geometry=None, + dst_crs=None, + transform=None, + width=None, + height=None, + resampling=rasterio.warp.Resampling.nearest, + ): """Create Hazard with intensity and fraction values from raster files If raster files are masked, the masked values are set to 0. @@ -163,8 +180,10 @@ def from_raster(cls, files_intensity, files_fraction=None, attrs=None, if not band: band = [1] if files_fraction is not None and len(files_intensity) != len(files_fraction): - raise ValueError('Number of intensity files differs from fraction files:' - f'{len(files_intensity)} != {len(files_fraction)}') + raise ValueError( + "Number of intensity files differs from fraction files:" + f"{len(files_intensity)} != {len(files_fraction)}" + ) # List all parameters for initialization here (missing ones will be default) hazard_kwargs = dict() @@ -172,50 +191,90 @@ def from_raster(cls, files_intensity, files_fraction=None, attrs=None, hazard_kwargs["haz_type"] = haz_type centroids, meta = Centroids.from_raster_file( - files_intensity[0], src_crs=src_crs, window=window, - geometry=geometry, dst_crs=dst_crs, transform=transform, - width=width, height=height, resampling=resampling, return_meta=True, + files_intensity[0], + src_crs=src_crs, + window=window, + geometry=geometry, + dst_crs=dst_crs, + transform=transform, + width=width, + height=height, + resampling=resampling, + return_meta=True, ) if pool: chunksize = max(min(len(files_intensity) // pool.ncpus, 1000), 1) inten_list = pool.map( _values_from_raster_files, - [[f] for f in files_intensity], itertools.repeat(meta), - itertools.repeat(band), itertools.repeat(src_crs), - itertools.repeat(window), itertools.repeat(geometry), - itertools.repeat(dst_crs), itertools.repeat(transform), - itertools.repeat(width), itertools.repeat(height), - itertools.repeat(resampling), chunksize=chunksize) - intensity = sparse.vstack(inten_list, format='csr') + [[f] for f in files_intensity], + itertools.repeat(meta), + itertools.repeat(band), + itertools.repeat(src_crs), + itertools.repeat(window), + itertools.repeat(geometry), + itertools.repeat(dst_crs), + itertools.repeat(transform), + itertools.repeat(width), + itertools.repeat(height), + itertools.repeat(resampling), + chunksize=chunksize, + ) + intensity = sparse.vstack(inten_list, format="csr") if files_fraction is not None: fract_list = pool.map( _values_from_raster_files, - [[f] for f in files_fraction], itertools.repeat(meta), - itertools.repeat(band), itertools.repeat(src_crs), - itertools.repeat(window), itertools.repeat(geometry), - itertools.repeat(dst_crs), itertools.repeat(transform), - itertools.repeat(width), itertools.repeat(height), - itertools.repeat(resampling), chunksize=chunksize) - fraction = sparse.vstack(fract_list, format='csr') + [[f] for f in files_fraction], + itertools.repeat(meta), + itertools.repeat(band), + itertools.repeat(src_crs), + itertools.repeat(window), + itertools.repeat(geometry), + itertools.repeat(dst_crs), + itertools.repeat(transform), + itertools.repeat(width), + itertools.repeat(height), + itertools.repeat(resampling), + chunksize=chunksize, + ) + fraction = sparse.vstack(fract_list, format="csr") else: intensity = _values_from_raster_files( - files_intensity, meta=meta, band=band, src_crs=src_crs, window=window, - geometry=geometry, dst_crs=dst_crs, transform=transform, width=width, - height=height, resampling=resampling, + files_intensity, + meta=meta, + band=band, + src_crs=src_crs, + window=window, + geometry=geometry, + dst_crs=dst_crs, + transform=transform, + width=width, + height=height, + resampling=resampling, ) if files_fraction is not None: fraction = _values_from_raster_files( - files_fraction, meta=meta, band=band, src_crs=src_crs, window=window, - geometry=geometry, dst_crs=dst_crs, transform=transform, width=width, - height=height, resampling=resampling) + files_fraction, + meta=meta, + band=band, + src_crs=src_crs, + window=window, + geometry=geometry, + dst_crs=dst_crs, + transform=transform, + width=width, + height=height, + resampling=resampling, + ) if files_fraction is None: fraction = intensity.copy() fraction.data.fill(1) hazard_kwargs.update(cls._attrs_to_kwargs(attrs, num_events=intensity.shape[0])) - return cls(centroids=centroids, intensity=intensity, fraction=fraction, **hazard_kwargs) + return cls( + centroids=centroids, intensity=intensity, fraction=fraction, **hazard_kwargs + ) @classmethod def from_xarray_raster_file( @@ -513,8 +572,10 @@ def from_xarray_raster( # Check data type for better error message if not isinstance(data, xr.Dataset): if isinstance(data, (pathlib.Path, str)): - raise TypeError("Passing a path to this classmethod is not supported. " - "Use Hazard.from_xarray_raster_file instead.") + raise TypeError( + "Passing a path to this classmethod is not supported. " + "Use Hazard.from_xarray_raster_file instead." + ) raise TypeError("This method only supports xarray.Dataset as input data") @@ -592,7 +653,7 @@ def to_csr_matrix(array: xr.DataArray) -> sparse.csr_matrix: sp.COO.from_numpy, array, dask="parallelized", - output_dtypes=[array.dtype] + output_dtypes=[array.dtype], ) sparse_coo = array.compute().data # Load into memory return sparse_coo.tocsr() # Convert sparse.COO to scipy.sparse.csr_matrix @@ -824,8 +885,9 @@ def vshape(array): # Set the Hazard attributes for _, ident in data_ident.iterrows(): - hazard_kwargs[ident["hazard_attr"] - ] = load_from_xarray_or_return_default(**ident) + hazard_kwargs[ident["hazard_attr"]] = load_from_xarray_or_return_default( + **ident + ) # Done! LOGGER.debug("Hazard successfully loaded. Number of events: %i", num_events) @@ -854,37 +916,39 @@ def _attrs_to_kwargs(attrs: Dict[str, Any], num_events: int) -> Dict[str, Any]: kwargs = dict() - if 'event_id' in attrs: - kwargs["event_id"] = attrs['event_id'] + if "event_id" in attrs: + kwargs["event_id"] = attrs["event_id"] else: kwargs["event_id"] = np.arange(1, num_events + 1) - if 'frequency' in attrs: - kwargs["frequency"] = attrs['frequency'] + if "frequency" in attrs: + kwargs["frequency"] = attrs["frequency"] else: kwargs["frequency"] = np.ones(kwargs["event_id"].size) - if 'frequency_unit' in attrs: - kwargs["frequency_unit"] = attrs['frequency_unit'] - if 'event_name' in attrs: - kwargs["event_name"] = attrs['event_name'] + if "frequency_unit" in attrs: + kwargs["frequency_unit"] = attrs["frequency_unit"] + if "event_name" in attrs: + kwargs["event_name"] = attrs["event_name"] else: kwargs["event_name"] = list(map(str, kwargs["event_id"])) - if 'date' in attrs: - kwargs["date"] = np.array([attrs['date']]) + if "date" in attrs: + kwargs["date"] = np.array([attrs["date"]]) else: kwargs["date"] = np.ones(kwargs["event_id"].size) - if 'orig' in attrs: - kwargs["orig"] = np.array([attrs['orig']]) + if "orig" in attrs: + kwargs["orig"] = np.array([attrs["orig"]]) else: kwargs["orig"] = np.ones(kwargs["event_id"].size, bool) - if 'unit' in attrs: - kwargs["units"] = attrs['unit'] + if "unit" in attrs: + kwargs["units"] = attrs["unit"] return kwargs def read_excel(self, *args, **kwargs): """This function is deprecated, use Hazard.from_excel.""" - LOGGER.warning("The use of Hazard.read_excel is deprecated." - "Use Hazard.from_excel instead.") + LOGGER.warning( + "The use of Hazard.read_excel is deprecated." + "Use Hazard.from_excel instead." + ) self.__dict__ = self.__class__.from_excel(*args, **kwargs).__dict__ @classmethod @@ -914,20 +978,21 @@ def from_excel(cls, file_name, var_names=None, haz_type=None): # pylint: disable=protected-access if not var_names: var_names = DEF_VAR_EXCEL - LOGGER.info('Reading %s', file_name) + LOGGER.info("Reading %s", file_name) hazard_kwargs = {} if haz_type is not None: hazard_kwargs["haz_type"] = haz_type try: centroids = Centroids._legacy_from_excel( - file_name, var_names=var_names['col_centroids']) + file_name, var_names=var_names["col_centroids"] + ) hazard_kwargs.update(cls._read_att_excel(file_name, var_names, centroids)) except KeyError as var_err: raise KeyError("Variable not in Excel file: " + str(var_err)) from var_err return cls(centroids=centroids, **hazard_kwargs) - def write_raster(self, file_name, variable='intensity', output_resolution=None): + def write_raster(self, file_name, variable="intensity", output_resolution=None): """Write intensity or fraction as GeoTIFF file. Each band is an event. Output raster is always a regular grid (same resolution for lat/lon). @@ -955,9 +1020,9 @@ def write_raster(self, file_name, variable='intensity', output_resolution=None): method to read intensity and fraction raster files. """ - if variable == 'intensity': + if variable == "intensity": var_to_write = self.intensity - elif variable =='fraction': + elif variable == "fraction": var_to_write = self.fraction else: raise ValueError( @@ -965,30 +1030,31 @@ def write_raster(self, file_name, variable='intensity', output_resolution=None): ) meta = self.centroids.get_meta(resolution=output_resolution) - meta.update(driver='GTiff', dtype=rasterio.float32, count=self.size) + meta.update(driver="GTiff", dtype=rasterio.float32, count=self.size) res = meta["transform"][0] # resolution from lon coordinates - if meta['height'] * meta['width'] == self.centroids.size: + if meta["height"] * meta["width"] == self.centroids.size: # centroids already in raster format u_coord.write_raster(file_name, var_to_write.toarray(), meta) else: geometry = self.centroids.get_pixel_shapes(res=res) - with rasterio.open(file_name, 'w', **meta) as dst: - LOGGER.info('Writing %s', file_name) + with rasterio.open(file_name, "w", **meta) as dst: + LOGGER.info("Writing %s", file_name) for i_ev in range(self.size): raster = rasterio.features.rasterize( ( (geom, value) - for geom, value - in zip(geometry, var_to_write[i_ev].toarray().flatten()) + for geom, value in zip( + geometry, var_to_write[i_ev].toarray().flatten() + ) ), - out_shape=(meta['height'], meta['width']), - transform=meta['transform'], + out_shape=(meta["height"], meta["width"]), + transform=meta["transform"], fill=0, all_touched=True, - dtype=meta['dtype'], + dtype=meta["dtype"], ) - dst.write(raster.astype(meta['dtype']), i_ev + 1) + dst.write(raster.astype(meta["dtype"]), i_ev + 1) def write_hdf5(self, file_name, todense=False): """Write hazard in hdf5 format. @@ -1001,11 +1067,11 @@ def write_hdf5(self, file_name, todense=False): if True write the sparse matrices as hdf5.dataset by converting them to dense format first. This increases readability of the file for other programs. default: False """ - LOGGER.info('Writing %s', file_name) - with h5py.File(file_name, 'w') as hf_data: + LOGGER.info("Writing %s", file_name) + with h5py.File(file_name, "w") as hf_data: str_dt = h5py.special_dtype(vlen=str) - for (var_name, var_val) in self.__dict__.items(): - if var_name == 'centroids': + for var_name, var_val in self.__dict__.items(): + if var_name == "centroids": # Centroids have their own write_hdf5 method, # which is invoked at the end of this method (s.b.) continue @@ -1014,18 +1080,24 @@ def write_hdf5(self, file_name, todense=False): hf_data.create_dataset(var_name, data=var_val.toarray()) else: hf_csr = hf_data.create_group(var_name) - hf_csr.create_dataset('data', data=var_val.data) - hf_csr.create_dataset('indices', data=var_val.indices) - hf_csr.create_dataset('indptr', data=var_val.indptr) - hf_csr.attrs['shape'] = var_val.shape + hf_csr.create_dataset("data", data=var_val.data) + hf_csr.create_dataset("indices", data=var_val.indices) + hf_csr.create_dataset("indptr", data=var_val.indptr) + hf_csr.attrs["shape"] = var_val.shape elif isinstance(var_val, str): hf_str = hf_data.create_dataset(var_name, (1,), dtype=str_dt) hf_str[0] = var_val - elif isinstance(var_val, list) and var_val and isinstance(var_val[0], str): - hf_str = hf_data.create_dataset(var_name, (len(var_val),), dtype=str_dt) + elif ( + isinstance(var_val, list) + and var_val + and isinstance(var_val[0], str) + ): + hf_str = hf_data.create_dataset( + var_name, (len(var_val),), dtype=str_dt + ) for i_ev, var_ev in enumerate(var_val): hf_str[i_ev] = var_ev - elif var_val is not None and var_name != 'pool': + elif var_val is not None and var_name != "pool": try: hf_data.create_dataset(var_name, data=var_val) except TypeError: @@ -1034,14 +1106,17 @@ def write_hdf5(self, file_name, todense=False): "type, %s, for which writing to hdf5 " "is not implemented. Reading this H5 file will probably lead to " "%s being set to its default value.", - var_name, var_val.__class__.__name__, var_name + var_name, + var_val.__class__.__name__, + var_name, ) - self.centroids.write_hdf5(file_name, mode='a') + self.centroids.write_hdf5(file_name, mode="a") def read_hdf5(self, *args, **kwargs): """This function is deprecated, use Hazard.from_hdf5.""" - LOGGER.warning("The use of Hazard.read_hdf5 is deprecated." - "Use Hazard.from_hdf5 instead.") + LOGGER.warning( + "The use of Hazard.read_hdf5 is deprecated." "Use Hazard.from_hdf5 instead." + ) self.__dict__ = self.__class__.from_hdf5(*args, **kwargs).__dict__ @classmethod @@ -1059,16 +1134,16 @@ def from_hdf5(cls, file_name): Hazard object from the provided MATLAB file """ - LOGGER.info('Reading %s', file_name) + LOGGER.info("Reading %s", file_name) # NOTE: This is a stretch. We instantiate one empty object to iterate over its # attributes. But then we create a new one with the attributes filled! haz = cls() hazard_kwargs = dict() - with h5py.File(file_name, 'r') as hf_data: - for (var_name, var_val) in haz.__dict__.items(): + with h5py.File(file_name, "r") as hf_data: + for var_name, var_val in haz.__dict__.items(): if var_name not in hf_data.keys(): continue - if var_name == 'centroids': + if var_name == "centroids": continue if isinstance(var_val, np.ndarray) and var_val.ndim == 1: hazard_kwargs[var_name] = np.array(hf_data.get(var_name)) @@ -1078,14 +1153,22 @@ def from_hdf5(cls, file_name): hazard_kwargs[var_name] = sparse.csr_matrix(hf_csr) else: hazard_kwargs[var_name] = sparse.csr_matrix( - (hf_csr['data'][:], hf_csr['indices'][:], hf_csr['indptr'][:]), - hf_csr.attrs['shape']) + ( + hf_csr["data"][:], + hf_csr["indices"][:], + hf_csr["indptr"][:], + ), + hf_csr.attrs["shape"], + ) elif isinstance(var_val, str): - hazard_kwargs[var_name] = u_hdf5.to_string( - hf_data.get(var_name)[0]) + hazard_kwargs[var_name] = u_hdf5.to_string(hf_data.get(var_name)[0]) elif isinstance(var_val, list): - hazard_kwargs[var_name] = [x for x in map( - u_hdf5.to_string, np.array(hf_data.get(var_name)).tolist())] + hazard_kwargs[var_name] = [ + x + for x in map( + u_hdf5.to_string, np.array(hf_data.get(var_name)).tolist() + ) + ] else: hazard_kwargs[var_name] = hf_data.get(var_name) hazard_kwargs["centroids"] = Centroids.from_hdf5(file_name) @@ -1096,19 +1179,19 @@ def from_hdf5(cls, file_name): def _read_att_mat(data, file_name, var_names, centroids): """Read MATLAB hazard's attributes.""" attrs = dict() - attrs["frequency"] = np.squeeze(data[var_names['var_name']['freq']]) + attrs["frequency"] = np.squeeze(data[var_names["var_name"]["freq"]]) try: attrs["frequency_unit"] = u_hdf5.get_string( - data[var_names['var_name']['freq_unit']]) + data[var_names["var_name"]["freq_unit"]] + ) except KeyError: pass - attrs["orig"] = np.squeeze( - data[var_names['var_name']['orig']]).astype(bool) + attrs["orig"] = np.squeeze(data[var_names["var_name"]["orig"]]).astype(bool) attrs["event_id"] = np.squeeze( - data[var_names['var_name']['even_id']].astype(int, copy=False)) + data[var_names["var_name"]["even_id"]].astype(int, copy=False) + ) try: - attrs["units"] = u_hdf5.get_string( - data[var_names['var_name']['unit']]) + attrs["units"] = u_hdf5.get_string(data[var_names["var_name"]["unit"]]) except KeyError: pass @@ -1116,31 +1199,40 @@ def _read_att_mat(data, file_name, var_names, centroids): n_event = len(attrs["event_id"]) try: attrs["intensity"] = u_hdf5.get_sparse_csr_mat( - data[var_names['var_name']['inten']], (n_event, n_cen)) + data[var_names["var_name"]["inten"]], (n_event, n_cen) + ) except ValueError as err: - raise ValueError('Size missmatch in intensity matrix.') from err + raise ValueError("Size missmatch in intensity matrix.") from err try: attrs["fraction"] = u_hdf5.get_sparse_csr_mat( - data[var_names['var_name']['frac']], (n_event, n_cen)) + data[var_names["var_name"]["frac"]], (n_event, n_cen) + ) except ValueError as err: - raise ValueError('Size missmatch in fraction matrix.') from err + raise ValueError("Size missmatch in fraction matrix.") from err except KeyError: attrs["fraction"] = sparse.csr_matrix( - np.ones(attrs["intensity"].shape, dtype=float)) + np.ones(attrs["intensity"].shape, dtype=float) + ) # Event names: set as event_id if no provided try: attrs["event_name"] = u_hdf5.get_list_str_from_ref( - file_name, data[var_names['var_name']['ev_name']]) + file_name, data[var_names["var_name"]["ev_name"]] + ) except KeyError: attrs["event_name"] = list(attrs["event_id"]) try: - datenum = data[var_names['var_name']['datenum']].squeeze() - attrs["date"] = np.array([ - (dt.datetime.fromordinal(int(date)) - + dt.timedelta(days=date % 1) - - dt.timedelta(days=366)).toordinal() - for date in datenum]) + datenum = data[var_names["var_name"]["datenum"]].squeeze() + attrs["date"] = np.array( + [ + ( + dt.datetime.fromordinal(int(date)) + + dt.timedelta(days=date % 1) + - dt.timedelta(days=366) + ).toordinal() + for date in datenum + ] + ) except KeyError: pass @@ -1149,44 +1241,59 @@ def _read_att_mat(data, file_name, var_names, centroids): @staticmethod def _read_att_excel(file_name, var_names, centroids): """Read Excel hazard's attributes.""" - dfr = pd.read_excel(file_name, var_names['sheet_name']['freq']) + dfr = pd.read_excel(file_name, var_names["sheet_name"]["freq"]) num_events = dfr.shape[0] attrs = dict() - attrs["frequency"] = dfr[var_names['col_name']['freq']].values - attrs["orig"] = dfr[var_names['col_name']['orig']].values.astype(bool) - attrs["event_id"] = dfr[var_names['col_name'] - ['even_id']].values.astype(int, copy=False) - attrs["date"] = dfr[var_names['col_name'] - ['even_dt']].values.astype(int, copy=False) - attrs["event_name"] = dfr[var_names['col_name'] - ['even_name']].values.tolist() - - dfr = pd.read_excel(file_name, var_names['sheet_name']['inten']) + attrs["frequency"] = dfr[var_names["col_name"]["freq"]].values + attrs["orig"] = dfr[var_names["col_name"]["orig"]].values.astype(bool) + attrs["event_id"] = dfr[var_names["col_name"]["even_id"]].values.astype( + int, copy=False + ) + attrs["date"] = dfr[var_names["col_name"]["even_dt"]].values.astype( + int, copy=False + ) + attrs["event_name"] = dfr[var_names["col_name"]["even_name"]].values.tolist() + + dfr = pd.read_excel(file_name, var_names["sheet_name"]["inten"]) # number of events (ignore centroid_ID column) # check the number of events is the same as the one in the frequency if dfr.shape[1] - 1 is not num_events: - raise ValueError('Hazard intensity is given for a number of events ' - 'different from the number of defined in its frequency: ' - f'{dfr.shape[1] - 1} != {num_events}') + raise ValueError( + "Hazard intensity is given for a number of events " + "different from the number of defined in its frequency: " + f"{dfr.shape[1] - 1} != {num_events}" + ) # check number of centroids is the same as retrieved before if dfr.shape[0] is not centroids.size: - raise ValueError('Hazard intensity is given for a number of centroids ' - 'different from the number of centroids defined: ' - f'{dfr.shape[0]} != {centroids.size}') + raise ValueError( + "Hazard intensity is given for a number of centroids " + "different from the number of centroids defined: " + f"{dfr.shape[0]} != {centroids.size}" + ) attrs["intensity"] = sparse.csr_matrix( - dfr.values[:, 1:num_events + 1].transpose()) + dfr.values[:, 1 : num_events + 1].transpose() + ) attrs["fraction"] = sparse.csr_matrix( - np.ones(attrs["intensity"].shape, dtype=float)) + np.ones(attrs["intensity"].shape, dtype=float) + ) return attrs def _values_from_raster_files( - file_names, meta, band=None, src_crs=None, window=None, - geometry=None, dst_crs=None, transform=None, width=None, - height=None, resampling=rasterio.warp.Resampling.nearest, + file_names, + meta, + band=None, + src_crs=None, + window=None, + geometry=None, + dst_crs=None, + transform=None, + width=None, + height=None, + resampling=rasterio.warp.Resampling.nearest, ): """Read raster of bands and set 0 values to the masked ones. @@ -1237,14 +1344,24 @@ class method) to allow for parallel computing. values = [] for file_name in file_names: tmp_meta, data = u_coord.read_raster( - file_name, band, src_crs, window, geometry, dst_crs, - transform, width, height, resampling, + file_name, + band, + src_crs, + window, + geometry, + dst_crs, + transform, + width, + height, + resampling, ) - if (tmp_meta['crs'] != meta['crs'] - or tmp_meta['transform'] != meta['transform'] - or tmp_meta['height'] != meta['height'] - or tmp_meta['width'] != meta['width']): - raise ValueError('Raster data is inconsistent with contained raster.') + if ( + tmp_meta["crs"] != meta["crs"] + or tmp_meta["transform"] != meta["transform"] + or tmp_meta["height"] != meta["height"] + or tmp_meta["width"] != meta["width"] + ): + raise ValueError("Raster data is inconsistent with contained raster.") values.append(sparse.csr_matrix(data)) - return sparse.vstack(values, format='csr') + return sparse.vstack(values, format="csr") diff --git a/climada/hazard/isimip_data.py b/climada/hazard/isimip_data.py index f9c28e8e3..5f9f794e1 100644 --- a/climada/hazard/isimip_data.py +++ b/climada/hazard/isimip_data.py @@ -27,11 +27,11 @@ monthly, yearly (e.g. yield) """ - import xarray as xr bbox_world = [-85, 85, -180, 180] + def _read_one_nc(file_name, bbox=None, years=None): """Reads 1 ISIMIP output NETCDF file data within a certain bounding box and time period @@ -56,6 +56,9 @@ def _read_one_nc(file_name, bbox=None, years=None): if not years: return data.sel(lat=slice(bbox[3], bbox[1]), lon=slice(bbox[0], bbox[2])) - time_id = years - int(data['time'].units[12:16]) - return data.sel(lat=slice(bbox[3], bbox[1]), lon=slice(bbox[0], bbox[2]), - time=slice(time_id[0], time_id[1])) + time_id = years - int(data["time"].units[12:16]) + return data.sel( + lat=slice(bbox[3], bbox[1]), + lon=slice(bbox[0], bbox[2]), + time=slice(time_id[0], time_id[1]), + ) diff --git a/climada/hazard/plot.py b/climada/hazard/plot.py index 26d6169a6..f5d02d74e 100644 --- a/climada/hazard/plot.py +++ b/climada/hazard/plot.py @@ -19,22 +19,28 @@ Define Hazard Plotting Methods. """ -import numpy as np import matplotlib.pyplot as plt +import numpy as np import climada.util.plot as u_plot - # pylint: disable=no-member -class HazardPlot(): + +class HazardPlot: """ Contains all plotting methods of the Hazard class """ - def plot_rp_intensity(self, return_periods=(25, 50, 100, 250), - smooth=True, axis=None, figsize=(9, 13), adapt_fontsize=True, - **kwargs): + def plot_rp_intensity( + self, + return_periods=(25, 50, 100, 250), + smooth=True, + axis=None, + figsize=(9, 13), + adapt_fontsize=True, + **kwargs, + ): """Compute and plot hazard exceedance intensity maps for different return periods. Calls local_exceedance_inten. @@ -57,17 +63,32 @@ def plot_rp_intensity(self, return_periods=(25, 50, 100, 250), intenstats is return_periods.size x num_centroids """ inten_stats = self.local_exceedance_inten(np.array(return_periods)) - colbar_name = 'Intensity (' + self.units + ')' + colbar_name = "Intensity (" + self.units + ")" title = list() for ret in return_periods: - title.append('Return period: ' + str(ret) + ' years') - axis = u_plot.geo_im_from_array(inten_stats, self.centroids.coord, - colbar_name, title, smooth=smooth, axes=axis, - figsize=figsize, adapt_fontsize=adapt_fontsize, **kwargs) + title.append("Return period: " + str(ret) + " years") + axis = u_plot.geo_im_from_array( + inten_stats, + self.centroids.coord, + colbar_name, + title, + smooth=smooth, + axes=axis, + figsize=figsize, + adapt_fontsize=adapt_fontsize, + **kwargs, + ) return axis, inten_stats - def plot_intensity(self, event=None, centr=None, smooth=True, axis=None, adapt_fontsize=True, - **kwargs): + def plot_intensity( + self, + event=None, + centr=None, + smooth=True, + axis=None, + adapt_fontsize=True, + **kwargs, + ): """Plot intensity values for a selected event or centroid. Parameters @@ -101,13 +122,21 @@ def plot_intensity(self, event=None, centr=None, smooth=True, axis=None, adapt_f ------ ValueError """ - col_label = f'Intensity ({self.units})' + col_label = f"Intensity ({self.units})" crs_epsg, _ = u_plot.get_transformation(self.centroids.geometry.crs) if event is not None: if isinstance(event, str): event = self.get_event_id(event) - return self._event_plot(event, self.intensity, col_label, - smooth, crs_epsg, axis, adapt_fontsize=adapt_fontsize, **kwargs) + return self._event_plot( + event, + self.intensity, + col_label, + smooth, + crs_epsg, + axis, + adapt_fontsize=adapt_fontsize, + **kwargs, + ) if centr is not None: if isinstance(centr, tuple): _, _, centr = self.centroids.get_closest_point(centr[0], centr[1]) @@ -115,8 +144,7 @@ def plot_intensity(self, event=None, centr=None, smooth=True, axis=None, adapt_f raise ValueError("Provide one event id or one centroid id.") - def plot_fraction(self, event=None, centr=None, smooth=True, axis=None, - **kwargs): + def plot_fraction(self, event=None, centr=None, smooth=True, axis=None, **kwargs): """Plot fraction values for a selected event or centroid. Parameters @@ -150,12 +178,13 @@ def plot_fraction(self, event=None, centr=None, smooth=True, axis=None, ------ ValueError """ - col_label = 'Fraction' + col_label = "Fraction" if event is not None: if isinstance(event, str): event = self.get_event_id(event) - return self._event_plot(event, self.fraction, col_label, smooth, axis, - **kwargs) + return self._event_plot( + event, self.fraction, col_label, smooth, axis, **kwargs + ) if centr is not None: if isinstance(centr, tuple): _, _, centr = self.centroids.get_closest_point(centr[0], centr[1]) @@ -163,8 +192,18 @@ def plot_fraction(self, event=None, centr=None, smooth=True, axis=None, raise ValueError("Provide one event id or one centroid id.") - def _event_plot(self, event_id, mat_var, col_name, smooth, crs_espg, axis=None, - figsize=(9, 13), adapt_fontsize=True, **kwargs): + def _event_plot( + self, + event_id, + mat_var, + col_name, + smooth, + crs_espg, + axis=None, + figsize=(9, 13), + adapt_fontsize=True, + **kwargs, + ): """Plot an event of the input matrix. Parameters @@ -200,26 +239,39 @@ def _event_plot(self, event_id, mat_var, col_name, smooth, crs_espg, axis=None, try: event_pos = np.where(self.event_id == ev_id)[0][0] except IndexError as err: - raise ValueError(f'Wrong event id: {ev_id}.') from err + raise ValueError(f"Wrong event id: {ev_id}.") from err im_val = mat_var[event_pos, :].toarray().transpose() - title = f'Event ID {self.event_id[event_pos]}: {self.event_name[event_pos]}' + title = ( + f"Event ID {self.event_id[event_pos]}: {self.event_name[event_pos]}" + ) elif ev_id < 0: max_inten = np.asarray(np.sum(mat_var, axis=1)).reshape(-1) event_pos = np.argpartition(max_inten, ev_id)[ev_id:] event_pos = event_pos[np.argsort(max_inten[event_pos])][0] im_val = mat_var[event_pos, :].toarray().transpose() - title = (f'{np.abs(ev_id)}-largest Event. ID {self.event_id[event_pos]}:' - f' {self.event_name[event_pos]}') + title = ( + f"{np.abs(ev_id)}-largest Event. ID {self.event_id[event_pos]}:" + f" {self.event_name[event_pos]}" + ) else: im_val = np.max(mat_var, axis=0).toarray().transpose() - title = f'{self.haz_type} max intensity at each point' + title = f"{self.haz_type} max intensity at each point" array_val.append(im_val) l_title.append(title) - return u_plot.geo_im_from_array(array_val, self.centroids.coord, col_name, - l_title, smooth=smooth, axes=axis, figsize=figsize, - proj=crs_espg, adapt_fontsize=adapt_fontsize, **kwargs) + return u_plot.geo_im_from_array( + array_val, + self.centroids.coord, + col_name, + l_title, + smooth=smooth, + axes=axis, + figsize=figsize, + proj=crs_espg, + adapt_fontsize=adapt_fontsize, + **kwargs, + ) def _centr_plot(self, centr_idx, mat_var, col_name, axis=None, **kwargs): """Plot a centroid of the input matrix. @@ -251,11 +303,11 @@ def _centr_plot(self, centr_idx, mat_var, col_name, axis=None, **kwargs): try: centr_pos = centr_idx except IndexError as err: - raise ValueError(f'Wrong centroid id: {centr_idx}.') from err + raise ValueError(f"Wrong centroid id: {centr_idx}.") from err array_val = mat_var[:, centr_pos].toarray() title = ( - f'Centroid {centr_idx}:' - f' ({np.around(coord[centr_pos, 0], 3)}, {np.around(coord[centr_pos, 1],3)})' + f"Centroid {centr_idx}:" + f" ({np.around(coord[centr_pos, 0], 3)}, {np.around(coord[centr_pos, 1],3)})" ) elif centr_idx < 0: max_inten = np.asarray(np.sum(mat_var, axis=0)).reshape(-1) @@ -264,19 +316,19 @@ def _centr_plot(self, centr_idx, mat_var, col_name, axis=None, **kwargs): array_val = mat_var[:, centr_pos].toarray() title = ( - f'{np.abs(centr_idx)}-largest Centroid. {centr_pos}:' - f' ({np.around(coord[centr_pos, 0], 3)}, {np.around(coord[centr_pos, 1], 3)})' + f"{np.abs(centr_idx)}-largest Centroid. {centr_pos}:" + f" ({np.around(coord[centr_pos, 0], 3)}, {np.around(coord[centr_pos, 1], 3)})" ) else: array_val = np.max(mat_var, axis=1).toarray() - title = f'{self.haz_type} max intensity at each event' + title = f"{self.haz_type} max intensity at each event" if not axis: _, axis = plt.subplots(1) - if 'color' not in kwargs: - kwargs['color'] = 'b' + if "color" not in kwargs: + kwargs["color"] = "b" axis.set_title(title) - axis.set_xlabel('Event number') + axis.set_xlabel("Event number") axis.set_ylabel(str(col_name)) axis.plot(range(len(array_val)), array_val, **kwargs) axis.set_xlim([0, len(array_val)]) diff --git a/climada/hazard/storm_europe.py b/climada/hazard/storm_europe.py index 6de5241d6..53f94230a 100644 --- a/climada/hazard/storm_europe.py +++ b/climada/hazard/storm_europe.py @@ -19,7 +19,7 @@ Define StormEurope class. """ -__all__ = ['StormEurope'] +__all__ = ["StormEurope"] import bz2 import datetime as dt @@ -27,29 +27,31 @@ from pathlib import Path from typing import Optional +import matplotlib.pyplot as plt import numpy as np -import xarray as xr import pandas as pd -import matplotlib.pyplot as plt +import xarray as xr from scipy import sparse -from climada.util.config import CONFIG from climada.hazard.base import Hazard from climada.hazard.centroids.centr import Centroids +from climada.util.config import CONFIG +from climada.util.dates_times import ( + date_to_str, + datetime64_to_ordinal, + first_year, + last_year, +) +from climada.util.dwd_icon_loader import ( + delete_icon_grib, + download_icon_centroids_file, + download_icon_grib, +) from climada.util.files_handler import get_file_names -from climada.util.dates_times import (datetime64_to_ordinal, - last_year, - first_year, - date_to_str - ) -from climada.util.dwd_icon_loader import (download_icon_centroids_file, - download_icon_grib, - delete_icon_grib, - ) LOGGER = logging.getLogger(__name__) -HAZ_TYPE = 'WS' +HAZ_TYPE = "WS" """Hazard type acronym for Winter Storm""" N_PROB_EVENTS = 5 * 6 @@ -77,15 +79,17 @@ class StormEurope(Hazard): intensity_thres = 14.7 """Intensity threshold for storage in m/s; same as used by WISC SSI calculations.""" - vars_opt = Hazard.vars_opt.union({'ssi_wisc', 'ssi', 'ssi_full_area'}) + vars_opt = Hazard.vars_opt.union({"ssi_wisc", "ssi", "ssi_full_area"}) """Name of the variables that aren't need to compute the impact.""" - def __init__(self, - units: str = 'm/s', - ssi: Optional[np.ndarray] = None, - ssi_wisc: Optional[np.ndarray] = None, - ssi_full_area: Optional[np.ndarray] = None, - **kwargs): + def __init__( + self, + units: str = "m/s", + ssi: Optional[np.ndarray] = None, + ssi_wisc: Optional[np.ndarray] = None, + ssi_full_area: Optional[np.ndarray] = None, + **kwargs, + ): """Initialize a StormEurope object Parameters @@ -106,22 +110,32 @@ def __init__(self, `StormEurope` object into a smaller region`. Defaults to an empty array. """ - kwargs.setdefault('haz_type', HAZ_TYPE) + kwargs.setdefault("haz_type", HAZ_TYPE) Hazard.__init__(self, units=units, **kwargs) self.ssi = ssi if ssi is not None else np.array([], float) self.ssi_wisc = ssi_wisc if ssi_wisc is not None else np.array([], float) - self.ssi_full_area = ssi_full_area if ssi_full_area is not None else np.array([], float) + self.ssi_full_area = ( + ssi_full_area if ssi_full_area is not None else np.array([], float) + ) def read_footprints(self, *args, **kwargs): """This function is deprecated, use StormEurope.from_footprints instead.""" - LOGGER.warning("The use of StormEurope.read_footprints is deprecated." - "Use StormEurope.from_footprints instead.") + LOGGER.warning( + "The use of StormEurope.read_footprints is deprecated." + "Use StormEurope.from_footprints instead." + ) self.__dict__ = StormEurope.from_footprints(*args, **kwargs).__dict__ @classmethod - def from_footprints(cls, path, ref_raster=None, centroids=None, - files_omit='fp_era20c_1990012515_701_0.nc', combine_threshold=None, - intensity_thres=None): + def from_footprints( + cls, + path, + ref_raster=None, + centroids=None, + files_omit="fp_era20c_1990012515_701_0.nc", + combine_threshold=None, + intensity_thres=None, + ): """Create new StormEurope object from WISC footprints. Assumes that all footprints have the same coordinates as the first file listed/first @@ -161,11 +175,13 @@ def from_footprints(cls, path, ref_raster=None, centroids=None, StormEurope object with data from WISC footprints. """ # pylint: disable=protected-access - intensity_thres = cls.intensity_thres if intensity_thres is None else intensity_thres + intensity_thres = ( + cls.intensity_thres if intensity_thres is None else intensity_thres + ) file_names = get_file_names(path) if ref_raster is not None and centroids is not None: - LOGGER.warning('Overriding ref_raster with centroids') + LOGGER.warning("Overriding ref_raster with centroids") if centroids is not None: pass @@ -177,7 +193,7 @@ def from_footprints(cls, path, ref_raster=None, centroids=None, if isinstance(files_omit, str): files_omit = [files_omit] - LOGGER.info('Commencing to iterate over netCDF files.') + LOGGER.info("Commencing to iterate over netCDF files.") file_names = set(file_names) files_to_read = sorted(file_names.difference(files_omit)) @@ -185,24 +201,24 @@ def from_footprints(cls, path, ref_raster=None, centroids=None, if files_to_skip: LOGGER.info("Omitting files %s", files_to_skip) hazard_list = [ - cls._read_one_nc( - file_name, - centroids, - intensity_thres) for file_name in files_to_read] + cls._read_one_nc(file_name, centroids, intensity_thres) + for file_name in files_to_read + ] haz = cls.concat([haz for haz in hazard_list if haz is not None]) # Fix values after concatenation haz.event_id = np.arange(1, len(haz.event_id) + 1) haz.frequency = np.divide( np.ones_like(haz.date), - np.max([(last_year(haz.date) - first_year(haz.date)), 1]) + np.max([(last_year(haz.date) - first_year(haz.date)), 1]), ) if combine_threshold is not None: - LOGGER.info('Combining events with small difference in date.') + LOGGER.info("Combining events with small difference in date.") difference_date = np.diff(haz.date) for event_id_i in haz.event_id[ - np.append(difference_date <= combine_threshold, False)]: + np.append(difference_date <= combine_threshold, False) + ]: event_ids = [event_id_i, event_id_i + 1] haz._combine_events(event_ids) return haz @@ -229,42 +245,58 @@ def _read_one_nc(cls, file_name, centroids, intensity_thres): Hazard instance for one single storm. """ with xr.open_dataset(file_name) as ncdf: - if centroids.size != (ncdf.sizes['latitude'] * ncdf.sizes['longitude']): - LOGGER.warning(('Centroids size doesn\'t match NCDF dimensions. ' - 'Omitting file %s.'), file_name) + if centroids.size != (ncdf.sizes["latitude"] * ncdf.sizes["longitude"]): + LOGGER.warning( + ( + "Centroids size doesn't match NCDF dimensions. " + "Omitting file %s." + ), + file_name, + ) return None # xarray does not penalise repeated assignments, see # http://xarray.pydata.org/en/stable/data-structures.html - stacked = ncdf['max_wind_gust'].stack( - intensity=('latitude', 'longitude', 'time') + stacked = ncdf["max_wind_gust"].stack( + intensity=("latitude", "longitude", "time") ) stacked = stacked.where(stacked > intensity_thres) stacked = stacked.fillna(0) # fill in values from netCDF - ssi_wisc = np.array([float(ncdf.attrs['ssi'])]) + ssi_wisc = np.array([float(ncdf.attrs["ssi"])]) intensity = sparse.csr_matrix(stacked) - new_haz = cls(ssi_wisc=ssi_wisc, - intensity=intensity, - event_name=[ncdf.attrs['storm_name']], - date=np.array([datetime64_to_ordinal(ncdf['time'].data[0])]), - # fill in default values - centroids=centroids, - event_id=np.array([1]), - frequency=np.array([1]), - orig=np.array([True]),) + new_haz = cls( + ssi_wisc=ssi_wisc, + intensity=intensity, + event_name=[ncdf.attrs["storm_name"]], + date=np.array([datetime64_to_ordinal(ncdf["time"].data[0])]), + # fill in default values + centroids=centroids, + event_id=np.array([1]), + frequency=np.array([1]), + orig=np.array([True]), + ) return new_haz def read_cosmoe_file(self, *args, **kwargs): """This function is deprecated, use StormEurope.from_cosmoe_file instead.""" - LOGGER.warning("The use of StormEurope.read_cosmoe_file is deprecated." - "Use StormEurope.from_cosmoe_file instead.") + LOGGER.warning( + "The use of StormEurope.read_cosmoe_file is deprecated." + "Use StormEurope.from_cosmoe_file instead." + ) self.__dict__ = StormEurope.from_cosmoe_file(*args, **kwargs).__dict__ @classmethod - def from_cosmoe_file(cls, fp_file, run_datetime, event_date=None, - model_name='COSMO-2E', description=None, intensity_thres=None): + def from_cosmoe_file( + cls, + fp_file, + run_datetime, + event_date=None, + model_name="COSMO-2E", + description=None, + intensity_thres=None, + ): """Create a new StormEurope object with gust footprint from weather forecast. The funciton is designed for the COSMO ensemble model used by @@ -302,65 +334,80 @@ def from_cosmoe_file(cls, fp_file, run_datetime, event_date=None, haz : StormEurope StormEurope object with data from COSMO ensemble file. """ - intensity_thres = cls.intensity_thres if intensity_thres is None else intensity_thres + intensity_thres = ( + cls.intensity_thres if intensity_thres is None else intensity_thres + ) # read intensity from file with xr.open_dataset(fp_file) as ncdf: - ncdf = ncdf.assign_coords(date=('time',ncdf["time"].dt.floor("D").values)) + ncdf = ncdf.assign_coords(date=("time", ncdf["time"].dt.floor("D").values)) if event_date: try: - stacked = ncdf.sel( - time=event_date.strftime('%Y-%m-%d') - ).groupby('date').max().stack(intensity=('y_1', 'x_1')) + stacked = ( + ncdf.sel(time=event_date.strftime("%Y-%m-%d")) + .groupby("date") + .max() + .stack(intensity=("y_1", "x_1")) + ) except KeyError as ker: - raise ValueError('Extraction of date and coordinates failed. This is most likely ' - 'because the selected event_date ' - f'{event_date.strftime("%Y-%m-%d")} is not contained in the ' - 'weather forecast selected by fp_file {fp_file}. Please adjust ' - f'event_date or fp_file.') from ker + raise ValueError( + "Extraction of date and coordinates failed. This is most likely " + "because the selected event_date " + f'{event_date.strftime("%Y-%m-%d")} is not contained in the ' + "weather forecast selected by fp_file {fp_file}. Please adjust " + f"event_date or fp_file." + ) from ker considered_dates = np.datetime64(event_date) else: - time_covered_step = ncdf['time'].diff('time') - time_covered_day = time_covered_step.groupby('date').sum() + time_covered_step = ncdf["time"].diff("time") + time_covered_day = time_covered_step.groupby("date").sum() # forecast run should cover at least 18 hours of a day - considered_dates_bool = time_covered_day >= np.timedelta64(18,'h') - stacked = ncdf.groupby('date').max()\ - .sel(date=considered_dates_bool)\ - .stack(intensity=('y_1', 'x_1')) - considered_dates = stacked['date'].values - stacked = stacked.stack(date_ensemble=('date', 'epsd_1')) - stacked = stacked.where(stacked['VMAX_10M'] > intensity_thres) + considered_dates_bool = time_covered_day >= np.timedelta64(18, "h") + stacked = ( + ncdf.groupby("date") + .max() + .sel(date=considered_dates_bool) + .stack(intensity=("y_1", "x_1")) + ) + considered_dates = stacked["date"].values + stacked = stacked.stack(date_ensemble=("date", "epsd_1")) + stacked = stacked.where(stacked["VMAX_10M"] > intensity_thres) stacked = stacked.fillna(0) # fill in values from netCDF - intensity = sparse.csr_matrix(stacked['VMAX_10M'].T) - event_id = np.arange(stacked['date_ensemble'].size) + 1 + intensity = sparse.csr_matrix(stacked["VMAX_10M"].T) + event_id = np.arange(stacked["date_ensemble"].size) + 1 date = np.repeat( np.array(datetime64_to_ordinal(considered_dates)), - np.unique(ncdf['epsd_1']).size + np.unique(ncdf["epsd_1"]).size, ) orig = np.full_like(event_id, False) - orig[(stacked['epsd_1'] == 0).values] = True + orig[(stacked["epsd_1"] == 0).values] = True if description is None: - description = (model_name + - ' weather forecast windfield ' + - 'for run startet at ' + - run_datetime.strftime('%Y%m%d%H')) + description = ( + model_name + + " weather forecast windfield " + + "for run startet at " + + run_datetime.strftime("%Y%m%d%H") + ) # Create Hazard haz = cls( intensity=intensity, event_id=event_id, - centroids = cls._centroids_from_nc(fp_file), + centroids=cls._centroids_from_nc(fp_file), # fill in default values orig=orig, date=date, - event_name=[date_i + '_ens' + str(ens_i) - for date_i, ens_i - in zip(date_to_str(date), stacked['epsd_1'].values + 1)], + event_name=[ + date_i + "_ens" + str(ens_i) + for date_i, ens_i in zip( + date_to_str(date), stacked["epsd_1"].values + 1 + ) + ], frequency=np.divide( - np.ones_like(event_id), - np.unique(ncdf['epsd_1']).size), + np.ones_like(event_id), np.unique(ncdf["epsd_1"]).size + ), ) haz.check() @@ -368,14 +415,23 @@ def from_cosmoe_file(cls, fp_file, run_datetime, event_date=None, def read_icon_grib(self, *args, **kwargs): """This function is deprecated, use StormEurope.from_icon_grib instead.""" - LOGGER.warning("The use of StormEurope.read_icon_grib is deprecated." - "Use StormEurope.from_icon_grib instead.") + LOGGER.warning( + "The use of StormEurope.read_icon_grib is deprecated." + "Use StormEurope.from_icon_grib instead." + ) self.__dict__ = StormEurope.from_icon_grib(*args, **kwargs).__dict__ @classmethod - def from_icon_grib(cls, run_datetime, event_date=None, model_name='icon-eu-eps', - description=None, grib_dir=None, delete_raw_data=True, - intensity_thres=None): + def from_icon_grib( + cls, + run_datetime, + event_date=None, + model_name="icon-eu-eps", + description=None, + grib_dir=None, + delete_raw_data=True, + intensity_thres=None, + ): """Create new StormEurope object from DWD icon weather forecast footprints. New files are available for 24 hours on @@ -420,14 +476,19 @@ def from_icon_grib(cls, run_datetime, event_date=None, model_name='icon-eu-eps', StormEurope object with data from DWD icon weather forecast footprints. """ # pylint: disable=protected-access - intensity_thres = cls.intensity_thres if intensity_thres is None else intensity_thres + intensity_thres = ( + cls.intensity_thres if intensity_thres is None else intensity_thres + ) if not (run_datetime.hour == 0 or run_datetime.hour == 12): - LOGGER.warning('The event definition is inaccuratly implemented ' - 'for starting times, which are not 00H or 12H.') + LOGGER.warning( + "The event definition is inaccuratly implemented " + "for starting times, which are not 00H or 12H." + ) # download files, if they don't already exist file_names = download_icon_grib( - run_datetime, model_name=model_name, download_dir=grib_dir) + run_datetime, model_name=model_name, download_dir=grib_dir + ) # create centroids nc_centroids_file = download_icon_centroids_file(model_name, grib_dir) @@ -435,79 +496,91 @@ def from_icon_grib(cls, run_datetime, event_date=None, model_name='icon-eu-eps', # read intensity from files for ind_i, file_i in enumerate(file_names): gripfile_path_i = Path(file_i[:-4]) - with open(file_i, 'rb') as source, open(gripfile_path_i, 'wb') as dest: + with open(file_i, "rb") as source, open(gripfile_path_i, "wb") as dest: dest.write(bz2.decompress(source.read())) - with xr.open_dataset(gripfile_path_i, engine='cfgrib') as ds_i: + with xr.open_dataset(gripfile_path_i, engine="cfgrib") as ds_i: if ind_i == 0: stacked = ds_i else: - stacked = xr.concat([stacked,ds_i], 'valid_time') + stacked = xr.concat([stacked, ds_i], "valid_time") # create intensity matrix with max for each full day stacked = stacked.assign_coords( - date=('valid_time', stacked["valid_time"].dt.floor("D").values)) + date=("valid_time", stacked["valid_time"].dt.floor("D").values) + ) if event_date: try: - stacked = stacked.sel( - valid_time=event_date.strftime('%Y-%m-%d')).groupby('date').max() + stacked = ( + stacked.sel(valid_time=event_date.strftime("%Y-%m-%d")) + .groupby("date") + .max() + ) except KeyError as ker: - raise ValueError('Extraction of date and coordinates failed. This is most likely ' - 'because the selected event_date ' - f'{event_date.strftime("%Y-%m-%d")} is not contained in the ' - 'weather forecast selected by run_datetime' - f'{run_datetime.strftime("%Y-%m-%d %H:%M")}. Please adjust ' - 'event_date or run_datetime.') from ker + raise ValueError( + "Extraction of date and coordinates failed. This is most likely " + "because the selected event_date " + f'{event_date.strftime("%Y-%m-%d")} is not contained in the ' + "weather forecast selected by run_datetime" + f'{run_datetime.strftime("%Y-%m-%d %H:%M")}. Please adjust ' + "event_date or run_datetime." + ) from ker considered_dates = np.datetime64(event_date) else: - time_covered_step = stacked['valid_time'].diff('valid_time') - time_covered_day = time_covered_step.groupby('date').sum() + time_covered_step = stacked["valid_time"].diff("valid_time") + time_covered_day = time_covered_step.groupby("date").sum() # forecast run should cover at least 18 hours of a day - considered_dates_bool = time_covered_day >= np.timedelta64(18,'h') - stacked = stacked.groupby('date').max().sel(date=considered_dates_bool) - considered_dates = stacked['date'].values - stacked = stacked.stack(date_ensemble=('date', 'number')) + considered_dates_bool = time_covered_day >= np.timedelta64(18, "h") + stacked = stacked.groupby("date").max().sel(date=considered_dates_bool) + considered_dates = stacked["date"].values + stacked = stacked.stack(date_ensemble=("date", "number")) stacked = stacked.where(stacked > intensity_thres) stacked = stacked.fillna(0) - event_id = np.arange(stacked['date_ensemble'].size) + 1 + event_id = np.arange(stacked["date_ensemble"].size) + 1 date = np.repeat( np.array(datetime64_to_ordinal(considered_dates)), - np.unique(stacked['number']).size + np.unique(stacked["number"]).size, ) orig = np.full_like(event_id, False) - orig[(stacked['number'] == 1).values] = True + orig[(stacked["number"] == 1).values] = True if description is None: - description = ('icon weather forecast windfield for run started at ' + - run_datetime.strftime('%Y%m%d%H')) + description = ( + "icon weather forecast windfield for run started at " + + run_datetime.strftime("%Y%m%d%H") + ) # Create Hazard haz = cls( - intensity=sparse.csr_matrix((stacked.get('gust') or stacked.get('i10fg')).T), + intensity=sparse.csr_matrix( + (stacked.get("gust") or stacked.get("i10fg")).T + ), centroids=cls._centroids_from_nc(nc_centroids_file), event_id=event_id, date=date, orig=orig, - event_name=[date_i + '_ens' + str(ens_i) - for date_i, ens_i - in zip(date_to_str(date), stacked['number'].values)], + event_name=[ + date_i + "_ens" + str(ens_i) + for date_i, ens_i in zip(date_to_str(date), stacked["number"].values) + ], frequency=np.divide( - np.ones_like(event_id), - np.unique(stacked['number']).size), + np.ones_like(event_id), np.unique(stacked["number"]).size + ), ) haz.check() # delete generated .grib2 and .4cc40.idx files for ind_i, file_i in enumerate(file_names): gripfile_path_i = Path(file_i[:-4]) - idxfile_path_i = next(gripfile_path_i.parent.glob( - str(gripfile_path_i.name) + '.*.idx')) + idxfile_path_i = next( + gripfile_path_i.parent.glob(str(gripfile_path_i.name) + ".*.idx") + ) gripfile_path_i.unlink() idxfile_path_i.unlink() if delete_raw_data: - #delete downloaded .bz2 files + # delete downloaded .bz2 files delete_icon_grib(run_datetime, model_name=model_name, download_dir=grib_dir) return haz @@ -517,40 +590,43 @@ def _centroids_from_nc(file_name): """Construct Centroids from the grid described by 'latitude' and 'longitude' variables in a netCDF file. """ - LOGGER.info('Constructing centroids from %s', file_name) + LOGGER.info("Constructing centroids from %s", file_name) with xr.open_dataset(file_name) as ncdf: create_meshgrid = True - if hasattr(ncdf, 'latitude'): - lats = ncdf['latitude'].data - lons = ncdf['longitude'].data - elif hasattr(ncdf, 'lat'): - lats = ncdf['lat'].data - lons = ncdf['lon'].data - elif hasattr(ncdf, 'lat_1'): - if len(ncdf['lon_1'].shape)>1 & \ - (ncdf['lon_1'].shape == ncdf['lat_1'].shape) \ - : - lats = ncdf['lat_1'].data.flatten() - lons = ncdf['lon_1'].data.flatten() + if hasattr(ncdf, "latitude"): + lats = ncdf["latitude"].data + lons = ncdf["longitude"].data + elif hasattr(ncdf, "lat"): + lats = ncdf["lat"].data + lons = ncdf["lon"].data + elif hasattr(ncdf, "lat_1"): + if len(ncdf["lon_1"].shape) > 1 & ( + ncdf["lon_1"].shape == ncdf["lat_1"].shape + ): + lats = ncdf["lat_1"].data.flatten() + lons = ncdf["lon_1"].data.flatten() create_meshgrid = False else: - lats = ncdf['lat_1'].data - lons = ncdf['lon_1'].data - elif hasattr(ncdf, 'clat'): - lats = ncdf['clat'].data - lons = ncdf['clon'].data - if ncdf['clat'].attrs['units']=='radian': + lats = ncdf["lat_1"].data + lons = ncdf["lon_1"].data + elif hasattr(ncdf, "clat"): + lats = ncdf["clat"].data + lons = ncdf["clon"].data + if ncdf["clat"].attrs["units"] == "radian": lats = np.rad2deg(lats) lons = np.rad2deg(lons) create_meshgrid = False else: - raise AttributeError('netcdf file has no field named latitude or ' - 'other know abrivation for coordinates.') + raise AttributeError( + "netcdf file has no field named latitude or " + "other know abrivation for coordinates." + ) if create_meshgrid: - lats, lons = np.array([np.repeat(lats, len(lons)), - np.tile(lons, len(lats))]) - cent = Centroids(lat=lats, lon=lons, on_land='natural_earth') + lats, lons = np.array( + [np.repeat(lats, len(lons)), np.tile(lons, len(lats))] + ) + cent = Centroids(lat=lats, lon=lons, on_land="natural_earth") return cent @@ -569,34 +645,46 @@ def _combine_events(self, event_ids): select_other_events = np.invert(select_event_ids) intensity_tmp = self.intensity[select_event_ids, :].max(axis=0) self.intensity = self.intensity[select_other_events, :] - self.intensity = sparse.vstack([self.intensity, sparse.csr_matrix(intensity_tmp)]) - self.event_id = np.append(self.event_id[select_other_events], self.event_id.max() + 1) - self.date = np.append(self.date[select_other_events], - np.round(self.date[select_event_ids].mean())) + self.intensity = sparse.vstack( + [self.intensity, sparse.csr_matrix(intensity_tmp)] + ) + self.event_id = np.append( + self.event_id[select_other_events], self.event_id.max() + 1 + ) + self.date = np.append( + self.date[select_other_events], np.round(self.date[select_event_ids].mean()) + ) name_2 = self.event_name.pop(np.where(select_event_ids)[0][1]) name_1 = self.event_name.pop(np.where(select_event_ids)[0][0]) - self.event_name.append(name_1 + '_' + name_2) + self.event_name.append(name_1 + "_" + name_2) fraction_tmp = self.fraction[select_event_ids, :].max(axis=0) self.fraction = self.fraction[select_other_events, :] self.fraction = sparse.vstack([self.fraction, sparse.csr_matrix(fraction_tmp)]) - self.frequency = np.append(self.frequency[select_other_events], - self.frequency[select_event_ids].mean()) - self.orig = np.append(self.orig[select_other_events], - self.orig[select_event_ids].max()) + self.frequency = np.append( + self.frequency[select_other_events], self.frequency[select_event_ids].mean() + ) + self.orig = np.append( + self.orig[select_other_events], self.orig[select_event_ids].max() + ) if self.ssi_wisc.size > 0: - self.ssi_wisc = np.append(self.ssi_wisc[select_other_events], - np.nan) + self.ssi_wisc = np.append(self.ssi_wisc[select_other_events], np.nan) if self.ssi.size > 0: - self.ssi = np.append(self.ssi[select_other_events], - np.nan) + self.ssi = np.append(self.ssi[select_other_events], np.nan) if self.ssi_full_area.size > 0: - self.ssi_full_area = np.append(self.ssi_full_area[select_other_events], - np.nan) + self.ssi_full_area = np.append( + self.ssi_full_area[select_other_events], np.nan + ) self.check() - def calc_ssi(self, method='dawkins', intensity=None, on_land=True, - threshold=None, sel_cen=None): + def calc_ssi( + self, + method="dawkins", + intensity=None, + on_land=True, + threshold=None, + sel_cen=None, + ): """Calculate the SSI, method must either be 'dawkins' or 'wisc_gust'. 'dawkins', after Dawkins et al. (2016), @@ -642,8 +730,9 @@ def calc_ssi(self, method='dawkins', intensity=None, on_land=True, intensity = self.intensity if threshold is not None: - assert threshold >= self.intensity_thres, \ - 'threshold cannot be below threshold upon read_footprint' + assert ( + threshold >= self.intensity_thres + ), "threshold cannot be below threshold upon read_footprint" intensity = intensity.multiply(intensity > threshold) else: intensity = intensity.multiply(intensity > self.intensity_thres) @@ -660,14 +749,14 @@ def calc_ssi(self, method='dawkins', intensity=None, on_land=True, ssi = np.zeros(intensity.shape[0]) - if method == 'dawkins': + if method == "dawkins": area_c = area_pixel / 1000 / 1000 * sel_cen for i, inten_i in enumerate(intensity): ssi_i = inten_i.power(3).dot(area_c) # matrix crossproduct (row x column vector) ssi[i] = ssi_i.item(0) - elif method == 'wisc_gust': + elif method == "wisc_gust": for i, inten_i in enumerate(intensity[:, sel_cen]): area = np.sum(area_pixel[inten_i.indices]) / 1000 / 1000 inten_mean = np.mean(inten_i) @@ -700,33 +789,37 @@ def plot_ssi(self, full_area=False): ssi = self.ssi # data wrangling - ssi_freq = pd.DataFrame({ - 'ssi': ssi, - 'freq': self.frequency, - 'orig': self.orig, - }) - ssi_freq = ssi_freq.sort_values('ssi', ascending=False) - ssi_freq['freq_cum'] = np.cumsum(ssi_freq['freq']) - - ssi_hist = ssi_freq.loc[ssi_freq['orig']].copy() - ssi_hist['freq'] = ssi_hist['freq'] * self.orig.size / self.orig.sum() - ssi_hist['freq_cum'] = np.cumsum(ssi_hist['freq']) + ssi_freq = pd.DataFrame( + { + "ssi": ssi, + "freq": self.frequency, + "orig": self.orig, + } + ) + ssi_freq = ssi_freq.sort_values("ssi", ascending=False) + ssi_freq["freq_cum"] = np.cumsum(ssi_freq["freq"]) + + ssi_hist = ssi_freq.loc[ssi_freq["orig"]].copy() + ssi_hist["freq"] = ssi_hist["freq"] * self.orig.size / self.orig.sum() + ssi_hist["freq_cum"] = np.cumsum(ssi_hist["freq"]) # plotting fig, axs = plt.subplots() - axs.plot(ssi_freq['freq_cum'], ssi_freq['ssi'], label='All Events') - axs.scatter(ssi_hist['freq_cum'], ssi_hist['ssi'], - color='red', label='Historic Events') + axs.plot(ssi_freq["freq_cum"], ssi_freq["ssi"], label="All Events") + axs.scatter( + ssi_hist["freq_cum"], ssi_hist["ssi"], color="red", label="Historic Events" + ) axs.legend() - axs.set_xlabel('Exceedance Frequency [1/a]') - axs.set_ylabel('Storm Severity Index') - axs.ticklabel_format(axis='y', style='sci', scilimits=(0, 0)) + axs.set_xlabel("Exceedance Frequency [1/a]") + axs.set_ylabel("Storm Severity Index") + axs.ticklabel_format(axis="y", style="sci", scilimits=(0, 0)) plt.show() return fig, axs - def generate_prob_storms(self, reg_id=528, spatial_shift=4, ssi_args=None, - **kwargs): + def generate_prob_storms( + self, reg_id=528, spatial_shift=4, ssi_args=None, **kwargs + ): """Generates a new hazard set with one original and 29 probabilistic storms per historic storm. This represents a partial implementation of the Monte-Carlo method described in section 2.2 of Schwierz et al. @@ -773,10 +866,7 @@ def generate_prob_storms(self, reg_id=528, spatial_shift=4, ssi_args=None, else: # shifting truncates valid centroids sel_cen = np.zeros(self.centroids.shape, bool) - sel_cen[ - spatial_shift:-spatial_shift, - spatial_shift:-spatial_shift - ] = True + sel_cen[spatial_shift:-spatial_shift, spatial_shift:-spatial_shift] = True sel_cen = sel_cen.reshape(self.centroids.size) # init probabilistic array @@ -784,21 +874,17 @@ def generate_prob_storms(self, reg_id=528, spatial_shift=4, ssi_args=None, intensity_prob = sparse.lil_matrix((n_out, np.count_nonzero(sel_cen))) ssi = np.zeros(n_out) - LOGGER.info('Commencing probabilistic calculations') + LOGGER.info("Commencing probabilistic calculations") for index, intensity1d in enumerate(self.intensity): # indices for return matrix start = index * N_PROB_EVENTS end = (index + 1) * N_PROB_EVENTS - intensity_prob[start:end, :], ssi[start:end] =\ - self._hist2prob( - intensity1d, - sel_cen, - spatial_shift, - ssi_args, - **kwargs) + intensity_prob[start:end, :], ssi[start:end] = self._hist2prob( + intensity1d, sel_cen, spatial_shift, ssi_args, **kwargs + ) - LOGGER.info('Generating new StormEurope instance') + LOGGER.info("Generating new StormEurope instance") base = np.repeat((self.event_id * 100), N_PROB_EVENTS) synth_id = np.tile(np.arange(N_PROB_EVENTS), self.size) event_id = base + synth_id @@ -811,20 +897,27 @@ def generate_prob_storms(self, reg_id=528, spatial_shift=4, ssi_args=None, # subsetting centroids centroids=self.centroids.select(sel_cen=sel_cen), # construct new event ids - event_id=event_id, # frequency still based on the historic number of # years - frequency=np.divide(np.repeat(self.frequency, N_PROB_EVENTS), - N_PROB_EVENTS), + frequency=np.divide( + np.repeat(self.frequency, N_PROB_EVENTS), N_PROB_EVENTS + ), orig=(event_id % 100 == 0), ) new_haz.check() return new_haz - def _hist2prob(self, intensity1d, sel_cen, spatial_shift, ssi_args=None, - power=1.15, scale=0.0225): + def _hist2prob( + self, + intensity1d, + sel_cen, + spatial_shift, + ssi_args=None, + power=1.15, + scale=0.0225, + ): """Internal function, intended to be called from generate_prob_storms. Generates six permutations based on one historical storm event, which it then moves around by spatial_shift gridpoints to the east, west, and @@ -881,27 +974,32 @@ def _hist2prob(self, intensity1d, sel_cen, spatial_shift, ssi_args=None, intensity3d_prob[4] = intensity2d + (scale * intensity2d_pwr) # 6. minus scaled sqrt and pwr - intensity3d_prob[5] = (intensity2d - - (0.5 * scale * intensity2d_pwr) - - (0.5 * scale * intensity2d_sqrt)) + intensity3d_prob[5] = ( + intensity2d + - (0.5 * scale * intensity2d_pwr) + - (0.5 * scale * intensity2d_sqrt) + ) # spatial shifts # northward - intensity3d_prob[6:12, :-spatial_shift, :] = \ - intensity3d_prob[0:6, spatial_shift:, :] + intensity3d_prob[6:12, :-spatial_shift, :] = intensity3d_prob[ + 0:6, spatial_shift:, : + ] # southward - intensity3d_prob[12:18, spatial_shift:, :] = \ - intensity3d_prob[0:6, :-spatial_shift, :] + intensity3d_prob[12:18, spatial_shift:, :] = intensity3d_prob[ + 0:6, :-spatial_shift, : + ] # eastward - intensity3d_prob[18:24, :, spatial_shift:] = \ - intensity3d_prob[0:6, :, :-spatial_shift] + intensity3d_prob[18:24, :, spatial_shift:] = intensity3d_prob[ + 0:6, :, :-spatial_shift + ] # westward - intensity3d_prob[24:30, :, :-spatial_shift] = \ - intensity3d_prob[0:6, :, spatial_shift:] + intensity3d_prob[24:30, :, :-spatial_shift] = intensity3d_prob[ + 0:6, :, spatial_shift: + ] intensity_out = intensity3d_prob.reshape( - N_PROB_EVENTS, - np.prod(self.centroids.shape) + N_PROB_EVENTS, np.prod(self.centroids.shape) ) ssi = self.calc_ssi(intensity=intensity_out, **ssi_args) @@ -910,11 +1008,13 @@ def _hist2prob(self, intensity1d, sel_cen, spatial_shift, ssi_args=None, # pylint: disable=invalid-name -def generate_WS_forecast_hazard(run_datetime=None, - event_date=None, - haz_model='icon-eu-eps', - haz_raw_storage=None, - save_haz=True): +def generate_WS_forecast_hazard( + run_datetime=None, + event_date=None, + haz_model="icon-eu-eps", + haz_raw_storage=None, + save_haz=True, +): """use the initialization time (run_datetime), the date of the event and specify the forecast model (haz_model) to generate a Hazard from forecast data either by download or through reading from existing file. @@ -959,65 +1059,76 @@ def generate_WS_forecast_hazard(run_datetime=None, FORECAST_DIR = CONFIG.hazard.storm_europe.forecast_dir.dir() if run_datetime is None: - run_datetime = dt.datetime.today().replace(hour=0, minute=0, second=0, microsecond=0) + run_datetime = dt.datetime.today().replace( + hour=0, minute=0, second=0, microsecond=0 + ) if event_date is None: - event_date = dt.datetime.today().replace(hour=0, minute=0, second=0, microsecond=0) \ - + dt.timedelta(days=2) - - if haz_model in ['cosmo1e_file', 'cosmo2e_file']: - if haz_model == 'cosmo1e_file': - haz_model='C1E' - full_model_name_temp = 'COSMO-1E' - if haz_model == 'cosmo2e_file': - haz_model='C2E' - full_model_name_temp = 'COSMO-2E' - haz_file_name = (f'{HAZ_TYPE}_{haz_model}_run{run_datetime.strftime("%Y%m%d%H")}' - f'_event{event_date.strftime("%Y%m%d")}.hdf5') + event_date = dt.datetime.today().replace( + hour=0, minute=0, second=0, microsecond=0 + ) + dt.timedelta(days=2) + + if haz_model in ["cosmo1e_file", "cosmo2e_file"]: + if haz_model == "cosmo1e_file": + haz_model = "C1E" + full_model_name_temp = "COSMO-1E" + if haz_model == "cosmo2e_file": + haz_model = "C2E" + full_model_name_temp = "COSMO-2E" + haz_file_name = ( + f'{HAZ_TYPE}_{haz_model}_run{run_datetime.strftime("%Y%m%d%H")}' + f'_event{event_date.strftime("%Y%m%d")}.hdf5' + ) haz_file = FORECAST_DIR / haz_file_name if haz_file.exists(): - LOGGER.info('Loading hazard from %s.', haz_file) + LOGGER.info("Loading hazard from %s.", haz_file) hazard = StormEurope.from_hdf5(haz_file) else: - LOGGER.info('Generating %s hazard.', haz_model) + LOGGER.info("Generating %s hazard.", haz_model) if not haz_raw_storage: haz_raw_storage = FORECAST_DIR / "cosmoe_forecast_{}_vmax.nc" - fp_file = Path(str(haz_raw_storage).format(run_datetime.strftime('%y%m%d%H'))) + fp_file = Path( + str(haz_raw_storage).format(run_datetime.strftime("%y%m%d%H")) + ) hazard = StormEurope.from_cosmoe_file( fp_file, event_date=event_date, run_datetime=run_datetime, - model_name=full_model_name_temp + model_name=full_model_name_temp, ) if save_haz: hazard.write_hdf5(haz_file) - elif haz_model in ['icon-eu-eps', 'icon-d2-eps']: - if haz_model == 'icon-eu-eps': + elif haz_model in ["icon-eu-eps", "icon-d2-eps"]: + if haz_model == "icon-eu-eps": full_model_name_temp = haz_model - haz_model='IEE' - if haz_model == 'icon-d2-eps': + haz_model = "IEE" + if haz_model == "icon-d2-eps": full_model_name_temp = haz_model - haz_model='IDE' - haz_file_name = (f'{HAZ_TYPE}_{haz_model}_run{run_datetime.strftime("%Y%m%d%H")}' - f'_event{event_date.strftime("%Y%m%d")}.hdf5') + haz_model = "IDE" + haz_file_name = ( + f'{HAZ_TYPE}_{haz_model}_run{run_datetime.strftime("%Y%m%d%H")}' + f'_event{event_date.strftime("%Y%m%d")}.hdf5' + ) haz_file = FORECAST_DIR / haz_file_name if haz_file.exists(): - LOGGER.info('Loading hazard from %s.', haz_file) + LOGGER.info("Loading hazard from %s.", haz_file) hazard = StormEurope.from_hdf5(haz_file) else: - LOGGER.info('Generating %s hazard.', haz_model) + LOGGER.info("Generating %s hazard.", haz_model) hazard = StormEurope.from_icon_grib( run_datetime, event_date=event_date, delete_raw_data=False, - model_name=full_model_name_temp + model_name=full_model_name_temp, ) if save_haz: hazard.write_hdf5(haz_file) else: - raise NotImplementedError("specific 'WS' hazard not implemented yet. " + - "Please specify a valid value for haz_model.") + raise NotImplementedError( + "specific 'WS' hazard not implemented yet. " + + "Please specify a valid value for haz_model." + ) # check if hazard is successfully generated for Forecast if not isinstance(hazard, Hazard): - LOGGER.warning('Hazard generation unsuccessful.') + LOGGER.warning("Hazard generation unsuccessful.") return hazard, haz_model, run_datetime, event_date diff --git a/climada/hazard/tc_clim_change.py b/climada/hazard/tc_clim_change.py index c10fcecc0..576cb38bd 100644 --- a/climada/hazard/tc_clim_change.py +++ b/climada/hazard/tc_clim_change.py @@ -36,29 +36,31 @@ Define scaling factors to model the impact of climate change on tropical cyclones. """ -from math import log import logging -import pandas as pd +from math import log + import numpy as np +import pandas as pd LOGGER = logging.getLogger(__name__) -MAP_BASINS_NAMES = {'NA': 0, 'WP': 1, 'EP': 2, 'NI': 3, 'SI': 4, 'SP': 5} +MAP_BASINS_NAMES = {"NA": 0, "WP": 1, "EP": 2, "NI": 3, "SI": 4, "SP": 5} -MAP_VARS_NAMES = {'cat05': 0, 'cat45': 1, 'intensity': 2} +MAP_VARS_NAMES = {"cat05": 0, "cat45": 1, "intensity": 2} -MAP_PERC_NAMES = {'5/10': 0, '25': 1, '50': 2, '75': 3, '90/95': 4} +MAP_PERC_NAMES = {"5/10": 0, "25": 1, "50": 2, "75": 3, "90/95": 4} # it defines the first and last projection years as well as the largest smoothing window -YEAR_WINDOWS_PROPS = {'start': 2000, 'end': 2100, 'smoothing': 5} +YEAR_WINDOWS_PROPS = {"start": 2000, "end": 2100, "smoothing": 5} + def get_knutson_scaling_factor( - variable: str='cat05', - percentile: str='50', - basin: str='NA', - baseline: tuple=(1982, 2022), - yearly_steps: int=5 - ): + variable: str = "cat05", + percentile: str = "50", + basin: str = "NA", + baseline: tuple = (1982, 2022), + yearly_steps: int = 5, +): """ This code combines data in Knutson et al. (2020) and global mean surface temperature (GMST) data (historical and CMIP5 simulated) to produce TC @@ -119,16 +121,20 @@ def get_knutson_scaling_factor( knutson_data = get_knutson_data() - num_of_rcps, gmst_years = gmst_info['gmst_data'].shape + num_of_rcps, gmst_years = gmst_info["gmst_data"].shape - if ((base_start_year <= gmst_info['gmst_start_year']) or - (base_start_year >= gmst_info['gmst_end_year']) or - (base_end_year <= gmst_info['gmst_start_year']) or - (base_end_year >= gmst_info['gmst_end_year'])): + if ( + (base_start_year <= gmst_info["gmst_start_year"]) + or (base_start_year >= gmst_info["gmst_end_year"]) + or (base_end_year <= gmst_info["gmst_start_year"]) + or (base_end_year >= gmst_info["gmst_end_year"]) + ): - raise ValueError("The selected historical baseline falls outside" - f"the GMST data period {gmst_info['gmst_start_year']}" - f"-{gmst_info['gmst_end_year']}") + raise ValueError( + "The selected historical baseline falls outside" + f"the GMST data period {gmst_info['gmst_start_year']}" + f"-{gmst_info['gmst_end_year']}" + ) var_id = MAP_VARS_NAMES[variable] perc_id = MAP_PERC_NAMES[percentile] @@ -139,9 +145,9 @@ def get_knutson_scaling_factor( # 3. calculate the fractional change in the averages # please refer to section 4. Methods of Jewson (2021) for more details. - mid_years = np.arange(YEAR_WINDOWS_PROPS['start'], - YEAR_WINDOWS_PROPS['end']+1, - yearly_steps) + mid_years = np.arange( + YEAR_WINDOWS_PROPS["start"], YEAR_WINDOWS_PROPS["end"] + 1, yearly_steps + ) predicted_change = np.ones((mid_years.shape[0], num_of_rcps)) try: @@ -149,29 +155,31 @@ def get_knutson_scaling_factor( knutson_value = knutson_data[var_id, basin_id, perc_id] except KeyError: - LOGGER.warning(f"No scaling factors are defined for basin {basin} therefore" - "no change will be projected for tracks in this basin") - return pd.DataFrame(predicted_change, - index=mid_years, - columns=gmst_info['rcps']) + LOGGER.warning( + f"No scaling factors are defined for basin {basin} therefore" + "no change will be projected for tracks in this basin" + ) + return pd.DataFrame( + predicted_change, index=mid_years, columns=gmst_info["rcps"] + ) - base_start_pos = base_start_year - gmst_info['gmst_start_year'] - base_end_pos = base_end_year - gmst_info['gmst_start_year'] + base_start_pos = base_start_year - gmst_info["gmst_start_year"] + base_end_pos = base_end_year - gmst_info["gmst_start_year"] # Step 1. - beta = 0.5 * log(0.01 * knutson_value + 1) # equation 6 in Jewson (2021) - tc_properties = np.exp(beta * gmst_info['gmst_data']) # equation 3 in Jewson (2021) + beta = 0.5 * log(0.01 * knutson_value + 1) # equation 6 in Jewson (2021) + tc_properties = np.exp(beta * gmst_info["gmst_data"]) # equation 3 in Jewson (2021) # Step 2. - baseline = np.mean(tc_properties[:, base_start_pos:base_end_pos + 1], 1) + baseline = np.mean(tc_properties[:, base_start_pos : base_end_pos + 1], 1) # Step 3. for i, mid_year in enumerate(mid_years): - mid_year_in_gmst_ind = mid_year - gmst_info['gmst_start_year'] + mid_year_in_gmst_ind = mid_year - gmst_info["gmst_start_year"] actual_smoothing = min( - YEAR_WINDOWS_PROPS['smoothing'], + YEAR_WINDOWS_PROPS["smoothing"], gmst_years - mid_year_in_gmst_ind - 1, - mid_year_in_gmst_ind + mid_year_in_gmst_ind, ) fut_start_pos = mid_year_in_gmst_ind - actual_smoothing fut_end_pos = mid_year_in_gmst_ind + actual_smoothing + 1 @@ -179,12 +187,10 @@ def get_knutson_scaling_factor( prediction = np.mean(tc_properties[:, fut_start_pos:fut_end_pos], 1) # assess fractional changes - predicted_change[i] = ((prediction - baseline) / - baseline) * 100 + predicted_change[i] = ((prediction - baseline) / baseline) * 100 + + return pd.DataFrame(predicted_change, index=mid_years, columns=gmst_info["rcps"]) - return pd.DataFrame(predicted_change, - index=mid_years, - columns=gmst_info['rcps']) def get_gmst_info(): """ @@ -207,126 +213,913 @@ def get_gmst_info(): - gmst_data: array with GMST data across RCPs (first dim) and years (second dim) """ - gmst_data = np.array([ - [-0.16,-0.08,-0.1,-0.16,-0.28,-0.32,-0.3,-0.35,-0.16,-0.1, - -0.35,-0.22,-0.27,-0.31,-0.3,-0.22,-0.11,-0.11,-0.26,-0.17, - -0.08,-0.15,-0.28,-0.37, -0.47,-0.26,-0.22,-0.39,-0.43,-0.48, - -0.43,-0.44,-0.36,-0.34,-0.15,-0.14,-0.36,-0.46,-0.29,-0.27, - -0.27,-0.19,-0.28,-0.26,-0.27,-0.22,-0.1,-0.22,-0.2,-0.36, - -0.16,-0.1,-0.16,-0.29,-0.13,-0.2,-0.15,-0.03,-0.01,-0.02, - 0.13,0.19,0.07,0.09,0.2,0.09,-0.07,-0.03,-0.11,-0.11,-0.17, - -0.07,0.01,0.08,-0.13,-0.14,-0.19,0.05,0.06,0.03,-0.02,0.06, - 0.04,0.05,-0.2,-0.11,-0.06,-0.02,-0.08,0.05,0.02,-0.08,0.01, - 0.16,-0.07,-0.01,-0.1,0.18,0.07,0.16,0.26,0.32,0.14,0.31,0.15, - 0.11,0.18,0.32,0.38,0.27,0.45,0.4,0.22,0.23,0.32,0.45,0.33,0.47, - 0.61,0.39,0.39,0.54,0.63,0.62,0.54, 0.68,0.64,0.66,0.54,0.66, - 0.72,0.61,0.64,0.68,0.75,0.9,1.02,0.92,0.85,0.98,0.909014286, - 0.938814286,0.999714286,1.034314286,1.009714286,1.020014286, - 1.040914286,1.068614286,1.072114286,1.095114286,1.100414286, - 1.099014286,1.118514286,1.133414286,1.135314286,1.168814286, - 1.200414286,1.205414286,1.227214286,1.212614286,1.243014286, - 1.270114286,1.250114286,1.254514286,1.265814286,1.263314286, - 1.294714286,1.289814286,1.314214286,1.322514286,1.315614286, - 1.276314286,1.302414286,1.318414286,1.312014286,1.317914286, - 1.341214286,1.297414286,1.308514286,1.314614286,1.327814286, - 1.335814286,1.331214286,1.318014286,1.289714286,1.334414286, - 1.323914286,1.316614286,1.300214286,1.302414286,1.303114286, - 1.311014286,1.283914286,1.293814286,1.296914286,1.316614286, - 1.306314286,1.290614286,1.288814286,1.272114286,1.264614286, - 1.262514286,1.290514286,1.285114286,1.267214286,1.267414286, - 1.294314286,1.315614286,1.310314286,1.283914286,1.296614286, - 1.281214286,1.301014286,1.300114286,1.303114286,1.286714286, - 1.297514286,1.312114286,1.276714286,1.281414286,1.276414286], - [-0.16,-0.08,-0.1,-0.16,-0.28,-0.32,-0.3,-0.35,-0.16,-0.1, - -0.35, -0.22,-0.27,-0.31,-0.3,-0.22,-0.11,-0.11,-0.26,-0.17, - -0.08,-0.15,-0.28,-0.37,-0.47,-0.26,-0.22,-0.39,-0.43,-0.48, - -0.43,-0.44,-0.36,-0.34,-0.15,-0.14,-0.36,-0.46, -0.29,-0.27, - -0.27,-0.19,-0.28,-0.26,-0.27,-0.22,-0.1,-0.22,-0.2,-0.36, - -0.16,-0.1,-0.16,-0.29,-0.13,-0.2,-0.15,-0.03,-0.01,-0.02,0.13, - 0.19,0.07,0.09,0.2,0.09,-0.07,-0.03,-0.11,-0.11,-0.17,-0.07,0.01, - 0.08,-0.13,-0.14,-0.19,0.05,0.06,0.03,-0.02,0.06,0.04,0.05,-0.2, - -0.11,-0.06,-0.02,-0.08,0.05,0.02,-0.08,0.01,0.16,-0.07,-0.01, - -0.1,0.18,0.07,0.16,0.26,0.32,0.14,0.31,0.15,0.11,0.18,0.32,0.38, - 0.27,0.45,0.4,0.22,0.23,0.32,0.45,0.33,0.47,0.61,0.39,0.39,0.54, - 0.63,0.62,0.54,0.68,0.64,0.66,0.54,0.66,0.72,0.61,0.64,0.68,0.75, - 0.9,1.02,0.92,0.85,0.98,0.903592857,0.949092857,0.955792857, - 0.997892857,1.048392857,1.068092857,1.104792857,1.122192857, - 1.125792857,1.156292857,1.160992857,1.201692857,1.234692857, - 1.255392857,1.274392857,1.283792857,1.319992857,1.369992857, - 1.385592857,1.380892857,1.415092857,1.439892857,1.457092857, - 1.493592857,1.520292857,1.517692857,1.538092857,1.577192857, - 1.575492857,1.620392857,1.657092857,1.673492857,1.669992857, - 1.706292857,1.707892857,1.758592857,1.739492857,1.740192857, - 1.797792857,1.839292857,1.865392857,1.857692857,1.864092857, - 1.881192857,1.907592857,1.918492857,1.933992857,1.929392857, - 1.931192857,1.942492857,1.985592857,1.997392857,2.000992857, - 2.028692857,2.016192857,2.020792857,2.032892857,2.057492857, - 2.092092857,2.106292857,2.117492857,2.123492857,2.121092857, - 2.096892857,2.126892857,2.131292857,2.144892857,2.124092857, - 2.134492857,2.171392857,2.163692857,2.144092857,2.145092857, - 2.128992857,2.129992857,2.169192857,2.186492857,2.181092857, - 2.217592857,2.210492857,2.223692857], - [-0.16,-0.08,-0.1,-0.16,-0.28,-0.32,-0.3,-0.35,-0.16,-0.1, - -0.35,-0.22,-0.27, -0.31,-0.3,-0.22,-0.11,-0.11,-0.26,-0.17, - -0.08,-0.15,-0.28,-0.37,-0.47,-0.26,-0.22,-0.39,-0.43,-0.48, - -0.43,-0.44,-0.36,-0.34,-0.15,-0.14,-0.36,-0.46,-0.29,-0.27, - -0.27,-0.19,-0.28,-0.26,-0.27,-0.22,-0.1,-0.22,-0.2,-0.36, - -0.16,-0.1,-0.16,-0.29,-0.13,-0.2,-0.15,-0.03,-0.01,-0.02,0.13, - 0.19,0.07,0.09,0.2,0.09,-0.07,-0.03,-0.11,-0.11,-0.17,-0.07,0.01, - 0.08,-0.13,-0.14,-0.19,0.05,0.06,0.03,-0.02,0.06,0.04,0.05,-0.2, - -0.11,-0.06,-0.02,-0.08,0.05,0.02,-0.08,0.01,0.16,-0.07,-0.01,-0.1, - 0.18,0.07,0.16,0.26,0.32,0.14,0.31,0.15,0.11,0.18,0.32,0.38,0.27,0.45, - 0.4,0.22,0.23,0.32,0.45,0.33,0.47,0.61,0.39,0.39,0.54,0.63,0.62,0.54, - 0.68,0.64,0.66,0.54,0.66,0.72,0.61,0.64,0.68,0.75,0.9,1.02,0.92,0.85, - 0.98,0.885114286,0.899814286,0.919314286,0.942414286,0.957814286, - 1.000414286,1.023114286,1.053414286,1.090814286,1.073014286,1.058114286, - 1.117514286,1.123714286,1.123814286,1.177514286,1.190814286,1.187514286, - 1.223514286,1.261714286,1.289014286,1.276414286,1.339114286,1.365714286, - 1.375314286,1.402214286,1.399914286,1.437314286,1.464914286,1.479114286, - 1.505514286,1.509614286,1.539814286,1.558214286,1.595014286,1.637114286, - 1.653414286,1.636714286,1.652214286,1.701014286,1.731114286,1.759214286, - 1.782114286,1.811014286,1.801714286,1.823014286,1.842914286,1.913014286, - 1.943114286,1.977514286,1.982014286,2.007114286,2.066314286,2.079214286, - 2.126014286,2.147314286,2.174914286,2.184414286,2.218514286,2.261514286, - 2.309614286,2.328014286,2.347014286,2.369414286,2.396614286,2.452014286, - 2.473314286,2.486514286,2.497914286,2.518014286,2.561814286,2.613014286, - 2.626814286,2.585914286,2.614614286,2.644714286,2.688414286,2.688514286, - 2.685314286,2.724614286,2.746214286,2.773814286], - [-0.16,-0.08,-0.1,-0.16,-0.28,-0.32,-0.3,-0.35,-0.16,-0.1,-0.35,-0.22, - -0.27,-0.31,-0.3,-0.22,-0.11,-0.11,-0.26,-0.17,-0.08,-0.15,-0.28,-0.37, - -0.47,-0.26,-0.22,-0.39,-0.43,-0.48,-0.43,-0.44,-0.36,-0.34,-0.15,-0.14, - -0.36,-0.46,-0.29,-0.27,-0.27,-0.19,-0.28,-0.26,-0.27,-0.22,-0.1,-0.22, - -0.2,-0.36,-0.16,-0.1,-0.16,-0.29,-0.13,-0.2,-0.15,-0.03,-0.01,-0.02,0.13, - 0.19,0.07,0.09,0.2,0.09,-0.07,-0.03,-0.11,-0.11,-0.17,-0.07,0.01,0.08,-0.13, - -0.14,-0.19,0.05,0.06,0.03,-0.02,0.06,0.04,0.05,-0.2,-0.11,-0.06,-0.02,-0.08, - 0.05,0.02,-0.08,0.01,0.16,-0.07,-0.01,-0.1,0.18,0.07,0.16,0.26,0.32,0.14,0.31, - 0.15,0.11,0.18,0.32,0.38,0.27,0.45,0.4,0.22,0.23,0.32,0.45,0.33,0.47,0.61,0.39, - 0.39,0.54,0.63,0.62,0.54,0.68,0.64,0.66,0.54,0.66,0.72,0.61,0.64, 0.68,0.75,0.9, - 1.02,0.92,0.85,0.98,0.945764286,1.011064286,1.048564286,1.049564286,1.070264286, - 1.126564286,1.195464286,1.215064286,1.246964286,1.272564286,1.262464286, - 1.293464286,1.340864286,1.391164286,1.428764286,1.452564286,1.494164286, - 1.520664286,1.557164286,1.633664286,1.654264286,1.693264286,1.730264286, - 1.795264286,1.824264286,1.823864286,1.880664286,1.952864286,1.991764286, - 1.994764286,2.085764286,2.105764286,2.155064286,2.227464286,2.249964286, - 2.313664286,2.341464286,2.394064286,2.457364286,2.484664286,2.549564286, - 2.605964286,2.656864286,2.707364286,2.742964286,2.789764286,2.847664286, - 2.903564286,2.925064286,2.962864286,3.002664286,3.069264286,3.133364286, - 3.174764286,3.217764286,3.256564286,3.306864286,3.375464286,3.420264286, - 3.476464286,3.493864286,3.552964286,3.592364286,3.630664286,3.672464286, - 3.734364286,3.789764286,3.838164286,3.882264286,3.936064286,3.984064286, - 4.055764286,4.098964286,4.122364286,4.172064286,4.225264286,4.275064286, - 4.339064286,4.375864286,4.408064286,4.477764286] -]) + gmst_data = np.array( + [ + [ + -0.16, + -0.08, + -0.1, + -0.16, + -0.28, + -0.32, + -0.3, + -0.35, + -0.16, + -0.1, + -0.35, + -0.22, + -0.27, + -0.31, + -0.3, + -0.22, + -0.11, + -0.11, + -0.26, + -0.17, + -0.08, + -0.15, + -0.28, + -0.37, + -0.47, + -0.26, + -0.22, + -0.39, + -0.43, + -0.48, + -0.43, + -0.44, + -0.36, + -0.34, + -0.15, + -0.14, + -0.36, + -0.46, + -0.29, + -0.27, + -0.27, + -0.19, + -0.28, + -0.26, + -0.27, + -0.22, + -0.1, + -0.22, + -0.2, + -0.36, + -0.16, + -0.1, + -0.16, + -0.29, + -0.13, + -0.2, + -0.15, + -0.03, + -0.01, + -0.02, + 0.13, + 0.19, + 0.07, + 0.09, + 0.2, + 0.09, + -0.07, + -0.03, + -0.11, + -0.11, + -0.17, + -0.07, + 0.01, + 0.08, + -0.13, + -0.14, + -0.19, + 0.05, + 0.06, + 0.03, + -0.02, + 0.06, + 0.04, + 0.05, + -0.2, + -0.11, + -0.06, + -0.02, + -0.08, + 0.05, + 0.02, + -0.08, + 0.01, + 0.16, + -0.07, + -0.01, + -0.1, + 0.18, + 0.07, + 0.16, + 0.26, + 0.32, + 0.14, + 0.31, + 0.15, + 0.11, + 0.18, + 0.32, + 0.38, + 0.27, + 0.45, + 0.4, + 0.22, + 0.23, + 0.32, + 0.45, + 0.33, + 0.47, + 0.61, + 0.39, + 0.39, + 0.54, + 0.63, + 0.62, + 0.54, + 0.68, + 0.64, + 0.66, + 0.54, + 0.66, + 0.72, + 0.61, + 0.64, + 0.68, + 0.75, + 0.9, + 1.02, + 0.92, + 0.85, + 0.98, + 0.909014286, + 0.938814286, + 0.999714286, + 1.034314286, + 1.009714286, + 1.020014286, + 1.040914286, + 1.068614286, + 1.072114286, + 1.095114286, + 1.100414286, + 1.099014286, + 1.118514286, + 1.133414286, + 1.135314286, + 1.168814286, + 1.200414286, + 1.205414286, + 1.227214286, + 1.212614286, + 1.243014286, + 1.270114286, + 1.250114286, + 1.254514286, + 1.265814286, + 1.263314286, + 1.294714286, + 1.289814286, + 1.314214286, + 1.322514286, + 1.315614286, + 1.276314286, + 1.302414286, + 1.318414286, + 1.312014286, + 1.317914286, + 1.341214286, + 1.297414286, + 1.308514286, + 1.314614286, + 1.327814286, + 1.335814286, + 1.331214286, + 1.318014286, + 1.289714286, + 1.334414286, + 1.323914286, + 1.316614286, + 1.300214286, + 1.302414286, + 1.303114286, + 1.311014286, + 1.283914286, + 1.293814286, + 1.296914286, + 1.316614286, + 1.306314286, + 1.290614286, + 1.288814286, + 1.272114286, + 1.264614286, + 1.262514286, + 1.290514286, + 1.285114286, + 1.267214286, + 1.267414286, + 1.294314286, + 1.315614286, + 1.310314286, + 1.283914286, + 1.296614286, + 1.281214286, + 1.301014286, + 1.300114286, + 1.303114286, + 1.286714286, + 1.297514286, + 1.312114286, + 1.276714286, + 1.281414286, + 1.276414286, + ], + [ + -0.16, + -0.08, + -0.1, + -0.16, + -0.28, + -0.32, + -0.3, + -0.35, + -0.16, + -0.1, + -0.35, + -0.22, + -0.27, + -0.31, + -0.3, + -0.22, + -0.11, + -0.11, + -0.26, + -0.17, + -0.08, + -0.15, + -0.28, + -0.37, + -0.47, + -0.26, + -0.22, + -0.39, + -0.43, + -0.48, + -0.43, + -0.44, + -0.36, + -0.34, + -0.15, + -0.14, + -0.36, + -0.46, + -0.29, + -0.27, + -0.27, + -0.19, + -0.28, + -0.26, + -0.27, + -0.22, + -0.1, + -0.22, + -0.2, + -0.36, + -0.16, + -0.1, + -0.16, + -0.29, + -0.13, + -0.2, + -0.15, + -0.03, + -0.01, + -0.02, + 0.13, + 0.19, + 0.07, + 0.09, + 0.2, + 0.09, + -0.07, + -0.03, + -0.11, + -0.11, + -0.17, + -0.07, + 0.01, + 0.08, + -0.13, + -0.14, + -0.19, + 0.05, + 0.06, + 0.03, + -0.02, + 0.06, + 0.04, + 0.05, + -0.2, + -0.11, + -0.06, + -0.02, + -0.08, + 0.05, + 0.02, + -0.08, + 0.01, + 0.16, + -0.07, + -0.01, + -0.1, + 0.18, + 0.07, + 0.16, + 0.26, + 0.32, + 0.14, + 0.31, + 0.15, + 0.11, + 0.18, + 0.32, + 0.38, + 0.27, + 0.45, + 0.4, + 0.22, + 0.23, + 0.32, + 0.45, + 0.33, + 0.47, + 0.61, + 0.39, + 0.39, + 0.54, + 0.63, + 0.62, + 0.54, + 0.68, + 0.64, + 0.66, + 0.54, + 0.66, + 0.72, + 0.61, + 0.64, + 0.68, + 0.75, + 0.9, + 1.02, + 0.92, + 0.85, + 0.98, + 0.903592857, + 0.949092857, + 0.955792857, + 0.997892857, + 1.048392857, + 1.068092857, + 1.104792857, + 1.122192857, + 1.125792857, + 1.156292857, + 1.160992857, + 1.201692857, + 1.234692857, + 1.255392857, + 1.274392857, + 1.283792857, + 1.319992857, + 1.369992857, + 1.385592857, + 1.380892857, + 1.415092857, + 1.439892857, + 1.457092857, + 1.493592857, + 1.520292857, + 1.517692857, + 1.538092857, + 1.577192857, + 1.575492857, + 1.620392857, + 1.657092857, + 1.673492857, + 1.669992857, + 1.706292857, + 1.707892857, + 1.758592857, + 1.739492857, + 1.740192857, + 1.797792857, + 1.839292857, + 1.865392857, + 1.857692857, + 1.864092857, + 1.881192857, + 1.907592857, + 1.918492857, + 1.933992857, + 1.929392857, + 1.931192857, + 1.942492857, + 1.985592857, + 1.997392857, + 2.000992857, + 2.028692857, + 2.016192857, + 2.020792857, + 2.032892857, + 2.057492857, + 2.092092857, + 2.106292857, + 2.117492857, + 2.123492857, + 2.121092857, + 2.096892857, + 2.126892857, + 2.131292857, + 2.144892857, + 2.124092857, + 2.134492857, + 2.171392857, + 2.163692857, + 2.144092857, + 2.145092857, + 2.128992857, + 2.129992857, + 2.169192857, + 2.186492857, + 2.181092857, + 2.217592857, + 2.210492857, + 2.223692857, + ], + [ + -0.16, + -0.08, + -0.1, + -0.16, + -0.28, + -0.32, + -0.3, + -0.35, + -0.16, + -0.1, + -0.35, + -0.22, + -0.27, + -0.31, + -0.3, + -0.22, + -0.11, + -0.11, + -0.26, + -0.17, + -0.08, + -0.15, + -0.28, + -0.37, + -0.47, + -0.26, + -0.22, + -0.39, + -0.43, + -0.48, + -0.43, + -0.44, + -0.36, + -0.34, + -0.15, + -0.14, + -0.36, + -0.46, + -0.29, + -0.27, + -0.27, + -0.19, + -0.28, + -0.26, + -0.27, + -0.22, + -0.1, + -0.22, + -0.2, + -0.36, + -0.16, + -0.1, + -0.16, + -0.29, + -0.13, + -0.2, + -0.15, + -0.03, + -0.01, + -0.02, + 0.13, + 0.19, + 0.07, + 0.09, + 0.2, + 0.09, + -0.07, + -0.03, + -0.11, + -0.11, + -0.17, + -0.07, + 0.01, + 0.08, + -0.13, + -0.14, + -0.19, + 0.05, + 0.06, + 0.03, + -0.02, + 0.06, + 0.04, + 0.05, + -0.2, + -0.11, + -0.06, + -0.02, + -0.08, + 0.05, + 0.02, + -0.08, + 0.01, + 0.16, + -0.07, + -0.01, + -0.1, + 0.18, + 0.07, + 0.16, + 0.26, + 0.32, + 0.14, + 0.31, + 0.15, + 0.11, + 0.18, + 0.32, + 0.38, + 0.27, + 0.45, + 0.4, + 0.22, + 0.23, + 0.32, + 0.45, + 0.33, + 0.47, + 0.61, + 0.39, + 0.39, + 0.54, + 0.63, + 0.62, + 0.54, + 0.68, + 0.64, + 0.66, + 0.54, + 0.66, + 0.72, + 0.61, + 0.64, + 0.68, + 0.75, + 0.9, + 1.02, + 0.92, + 0.85, + 0.98, + 0.885114286, + 0.899814286, + 0.919314286, + 0.942414286, + 0.957814286, + 1.000414286, + 1.023114286, + 1.053414286, + 1.090814286, + 1.073014286, + 1.058114286, + 1.117514286, + 1.123714286, + 1.123814286, + 1.177514286, + 1.190814286, + 1.187514286, + 1.223514286, + 1.261714286, + 1.289014286, + 1.276414286, + 1.339114286, + 1.365714286, + 1.375314286, + 1.402214286, + 1.399914286, + 1.437314286, + 1.464914286, + 1.479114286, + 1.505514286, + 1.509614286, + 1.539814286, + 1.558214286, + 1.595014286, + 1.637114286, + 1.653414286, + 1.636714286, + 1.652214286, + 1.701014286, + 1.731114286, + 1.759214286, + 1.782114286, + 1.811014286, + 1.801714286, + 1.823014286, + 1.842914286, + 1.913014286, + 1.943114286, + 1.977514286, + 1.982014286, + 2.007114286, + 2.066314286, + 2.079214286, + 2.126014286, + 2.147314286, + 2.174914286, + 2.184414286, + 2.218514286, + 2.261514286, + 2.309614286, + 2.328014286, + 2.347014286, + 2.369414286, + 2.396614286, + 2.452014286, + 2.473314286, + 2.486514286, + 2.497914286, + 2.518014286, + 2.561814286, + 2.613014286, + 2.626814286, + 2.585914286, + 2.614614286, + 2.644714286, + 2.688414286, + 2.688514286, + 2.685314286, + 2.724614286, + 2.746214286, + 2.773814286, + ], + [ + -0.16, + -0.08, + -0.1, + -0.16, + -0.28, + -0.32, + -0.3, + -0.35, + -0.16, + -0.1, + -0.35, + -0.22, + -0.27, + -0.31, + -0.3, + -0.22, + -0.11, + -0.11, + -0.26, + -0.17, + -0.08, + -0.15, + -0.28, + -0.37, + -0.47, + -0.26, + -0.22, + -0.39, + -0.43, + -0.48, + -0.43, + -0.44, + -0.36, + -0.34, + -0.15, + -0.14, + -0.36, + -0.46, + -0.29, + -0.27, + -0.27, + -0.19, + -0.28, + -0.26, + -0.27, + -0.22, + -0.1, + -0.22, + -0.2, + -0.36, + -0.16, + -0.1, + -0.16, + -0.29, + -0.13, + -0.2, + -0.15, + -0.03, + -0.01, + -0.02, + 0.13, + 0.19, + 0.07, + 0.09, + 0.2, + 0.09, + -0.07, + -0.03, + -0.11, + -0.11, + -0.17, + -0.07, + 0.01, + 0.08, + -0.13, + -0.14, + -0.19, + 0.05, + 0.06, + 0.03, + -0.02, + 0.06, + 0.04, + 0.05, + -0.2, + -0.11, + -0.06, + -0.02, + -0.08, + 0.05, + 0.02, + -0.08, + 0.01, + 0.16, + -0.07, + -0.01, + -0.1, + 0.18, + 0.07, + 0.16, + 0.26, + 0.32, + 0.14, + 0.31, + 0.15, + 0.11, + 0.18, + 0.32, + 0.38, + 0.27, + 0.45, + 0.4, + 0.22, + 0.23, + 0.32, + 0.45, + 0.33, + 0.47, + 0.61, + 0.39, + 0.39, + 0.54, + 0.63, + 0.62, + 0.54, + 0.68, + 0.64, + 0.66, + 0.54, + 0.66, + 0.72, + 0.61, + 0.64, + 0.68, + 0.75, + 0.9, + 1.02, + 0.92, + 0.85, + 0.98, + 0.945764286, + 1.011064286, + 1.048564286, + 1.049564286, + 1.070264286, + 1.126564286, + 1.195464286, + 1.215064286, + 1.246964286, + 1.272564286, + 1.262464286, + 1.293464286, + 1.340864286, + 1.391164286, + 1.428764286, + 1.452564286, + 1.494164286, + 1.520664286, + 1.557164286, + 1.633664286, + 1.654264286, + 1.693264286, + 1.730264286, + 1.795264286, + 1.824264286, + 1.823864286, + 1.880664286, + 1.952864286, + 1.991764286, + 1.994764286, + 2.085764286, + 2.105764286, + 2.155064286, + 2.227464286, + 2.249964286, + 2.313664286, + 2.341464286, + 2.394064286, + 2.457364286, + 2.484664286, + 2.549564286, + 2.605964286, + 2.656864286, + 2.707364286, + 2.742964286, + 2.789764286, + 2.847664286, + 2.903564286, + 2.925064286, + 2.962864286, + 3.002664286, + 3.069264286, + 3.133364286, + 3.174764286, + 3.217764286, + 3.256564286, + 3.306864286, + 3.375464286, + 3.420264286, + 3.476464286, + 3.493864286, + 3.552964286, + 3.592364286, + 3.630664286, + 3.672464286, + 3.734364286, + 3.789764286, + 3.838164286, + 3.882264286, + 3.936064286, + 3.984064286, + 4.055764286, + 4.098964286, + 4.122364286, + 4.172064286, + 4.225264286, + 4.275064286, + 4.339064286, + 4.375864286, + 4.408064286, + 4.477764286, + ], + ] + ) gmst_info = { - 'rcps' : ['2.6', '4.5', '6.0', '8.5'], - 'gmst_start_year' : 1880, - 'gmst_end_year' : 2100, - 'gmst_data' : gmst_data + "rcps": ["2.6", "4.5", "6.0", "8.5"], + "gmst_start_year": 1880, + "gmst_end_year": 2100, + "gmst_data": gmst_data, } return gmst_info + def get_knutson_data(): """ Retrieve projections data in Knutson et al., (2020): @@ -356,34 +1149,41 @@ def get_knutson_data(): # The knutson_data array has dimension: # 4 (tropical cyclones variables) x 6 (tropical cyclone regions) x 5 (percentiles) - knutson_data = np.array([[ - [-34.49,-24.875,-14.444,3.019,28.737], - [-30.444,-20,-10.27,0.377,17.252], - [-32.075,-18.491,-3.774,11.606,36.682], - [-35.094,-15.115,-4.465,5.785,29.405], - [-32.778,-22.522,-17.297,-8.995,7.241], - [-40.417,-26.321,-18.113,-8.21,4.689]], - [ - [-38.038,-22.264,11.321,38.302,81.874], - [-25.811,-14.34,-4.75,16.146,41.979], - [-24.83,-6.792,22.642,57.297,104.315], - [-30.566,-16.415,5.283,38.491,79.119], - [-23.229,-13.611,4.528,26.645,63.514], - [-42.453,-29.434,-14.467,-0.541,19.061]], - [ - [0.543,1.547,2.943,4.734,6.821], - [1.939,3.205,5.328,6.549,9.306], - [-2.217,0.602,5.472,9.191,10.368], - [-0.973,1.944,4.324,6.15,7.808], - [1.605,3.455,5.405,7.69,10.884], - [-6.318,-0.783,0.938,5.314,12.213]], - [ - [5.848,9.122,15.869,20.352,22.803], - [6.273,12.121,16.486,18.323,23.784], - [6.014,8.108,21.081,29.324,31.838], - [12.703,14.347,17.649,19.182,20.77], - [2.2,11.919,19.73,23.115,26.243], - [-1.299,5.137,7.297,11.091,15.419] - ]]) - - return knutson_data \ No newline at end of file + knutson_data = np.array( + [ + [ + [-34.49, -24.875, -14.444, 3.019, 28.737], + [-30.444, -20, -10.27, 0.377, 17.252], + [-32.075, -18.491, -3.774, 11.606, 36.682], + [-35.094, -15.115, -4.465, 5.785, 29.405], + [-32.778, -22.522, -17.297, -8.995, 7.241], + [-40.417, -26.321, -18.113, -8.21, 4.689], + ], + [ + [-38.038, -22.264, 11.321, 38.302, 81.874], + [-25.811, -14.34, -4.75, 16.146, 41.979], + [-24.83, -6.792, 22.642, 57.297, 104.315], + [-30.566, -16.415, 5.283, 38.491, 79.119], + [-23.229, -13.611, 4.528, 26.645, 63.514], + [-42.453, -29.434, -14.467, -0.541, 19.061], + ], + [ + [0.543, 1.547, 2.943, 4.734, 6.821], + [1.939, 3.205, 5.328, 6.549, 9.306], + [-2.217, 0.602, 5.472, 9.191, 10.368], + [-0.973, 1.944, 4.324, 6.15, 7.808], + [1.605, 3.455, 5.405, 7.69, 10.884], + [-6.318, -0.783, 0.938, 5.314, 12.213], + ], + [ + [5.848, 9.122, 15.869, 20.352, 22.803], + [6.273, 12.121, 16.486, 18.323, 23.784], + [6.014, 8.108, 21.081, 29.324, 31.838], + [12.703, 14.347, 17.649, 19.182, 20.77], + [2.2, 11.919, 19.73, 23.115, 26.243], + [-1.299, 5.137, 7.297, 11.091, 15.419], + ], + ] + ) + + return knutson_data diff --git a/climada/hazard/tc_tracks.py b/climada/hazard/tc_tracks.py index 519f93627..3f2fb85b8 100644 --- a/climada/hazard/tc_tracks.py +++ b/climada/hazard/tc_tracks.py @@ -19,47 +19,48 @@ Define TCTracks: IBTracs reader and tracks manager. """ -__all__ = ['CAT_NAMES', 'SAFFIR_SIM_CAT', 'TCTracks', 'set_category'] +__all__ = ["CAT_NAMES", "SAFFIR_SIM_CAT", "TCTracks", "set_category"] # standard libraries import datetime as dt import itertools import logging -from typing import Optional, List import re import shutil import warnings from pathlib import Path +from typing import List, Optional # additional libraries import cartopy.crs as ccrs import cftime import geopandas as gpd -import pathos import matplotlib.cm as cm_mp -from matplotlib.collections import LineCollection -from matplotlib.colors import BoundaryNorm, ListedColormap -from matplotlib.lines import Line2D import matplotlib.pyplot as plt import netCDF4 as nc import numba import numpy as np import pandas as pd +import pathos import scipy.io.matlab as matlab -from shapely.geometry import Point, LineString, MultiLineString import shapely.ops -from sklearn.metrics import DistanceMetric import statsmodels.api as sm import xarray as xr +from matplotlib.collections import LineCollection +from matplotlib.colors import BoundaryNorm, ListedColormap +from matplotlib.lines import Line2D +from shapely.geometry import LineString, MultiLineString, Point +from sklearn.metrics import DistanceMetric -# climada dependencies -from climada.util import ureg +import climada.hazard.tc_tracks_synth import climada.util.coordinates as u_coord -from climada.util.constants import EARTH_RADIUS_KM, SYSTEM_DIR, DEF_CRS -from climada.util.files_handler import get_file_names, download_ftp import climada.util.plot as u_plot from climada.hazard import Centroids -import climada.hazard.tc_tracks_synth + +# climada dependencies +from climada.util import ureg +from climada.util.constants import DEF_CRS, EARTH_RADIUS_KM, SYSTEM_DIR +from climada.util.files_handler import download_ftp, get_file_names LOGGER = logging.getLogger(__name__) @@ -67,37 +68,61 @@ """Saffir-Simpson Hurricane Wind Scale in kn based on NOAA""" CAT_NAMES = { - -1: 'Tropical Depression', - 0: 'Tropical Storm', - 1: 'Hurricane Cat. 1', - 2: 'Hurricane Cat. 2', - 3: 'Hurricane Cat. 3', - 4: 'Hurricane Cat. 4', - 5: 'Hurricane Cat. 5', + -1: "Tropical Depression", + 0: "Tropical Storm", + 1: "Hurricane Cat. 1", + 2: "Hurricane Cat. 2", + 3: "Hurricane Cat. 3", + 4: "Hurricane Cat. 4", + 5: "Hurricane Cat. 5", } """Saffir-Simpson category names.""" CAT_COLORS = cm_mp.rainbow(np.linspace(0, 1, len(SAFFIR_SIM_CAT))) """Color scale to plot the Saffir-Simpson scale.""" -IBTRACS_URL = ('https://www.ncei.noaa.gov/data/' - 'international-best-track-archive-for-climate-stewardship-ibtracs/' - 'v04r00/access/netcdf') +IBTRACS_URL = ( + "https://www.ncei.noaa.gov/data/" + "international-best-track-archive-for-climate-stewardship-ibtracs/" + "v04r00/access/netcdf" +) """Site of IBTrACS netcdf file containing all tracks v4.0, s. https://www.ncdc.noaa.gov/ibtracs/index.php?name=ib-v4-access""" -IBTRACS_FILE = 'IBTrACS.ALL.v04r00.nc' +IBTRACS_FILE = "IBTrACS.ALL.v04r00.nc" """IBTrACS v4.0 file all""" IBTRACS_AGENCIES = [ - 'usa', 'tokyo', 'newdelhi', 'reunion', 'bom', 'nadi', 'wellington', - 'cma', 'hko', 'ds824', 'td9636', 'td9635', 'neumann', 'mlc', + "usa", + "tokyo", + "newdelhi", + "reunion", + "bom", + "nadi", + "wellington", + "cma", + "hko", + "ds824", + "td9636", + "td9635", + "neumann", + "mlc", ] """Names/IDs of agencies in IBTrACS v4.0""" IBTRACS_USA_AGENCIES = [ - 'atcf', 'cphc', 'hurdat_atl', 'hurdat_epa', 'jtwc_cp', 'jtwc_ep', 'jtwc_io', - 'jtwc_sh', 'jtwc_wp', 'nhc_working_bt', 'tcvightals', 'tcvitals' + "atcf", + "cphc", + "hurdat_atl", + "hurdat_epa", + "jtwc_cp", + "jtwc_ep", + "jtwc_io", + "jtwc_sh", + "jtwc_wp", + "nhc_working_bt", + "tcvightals", + "tcvitals", ] """Names/IDs of agencies in IBTrACS that correspond to 'usa_*' variables""" @@ -110,13 +135,13 @@ "bom": [0.88, 0.0], "nadi": [0.88, 0.0], "wellington": [0.88, 0.0], - 'cma': [0.871, 0.0], - 'hko': [0.9, 0.0], - 'ds824': [1.0, 0.0], - 'td9636': [1.0, 0.0], - 'td9635': [1.0, 0.0], - 'neumann': [0.88, 0.0], - 'mlc': [1.0, 0.0], + "cma": [0.871, 0.0], + "hko": [0.9, 0.0], + "ds824": [1.0, 0.0], + "td9636": [1.0, 0.0], + "td9635": [1.0, 0.0], + "neumann": [0.88, 0.0], + "mlc": [1.0, 0.0], } """Scale and shift used by agencies to convert their internal Dvorak 1-minute sustained winds to the officially reported values that are in IBTrACS. From Table 1 in: @@ -129,20 +154,30 @@ """Default environmental pressure""" BASIN_ENV_PRESSURE = { - '': DEF_ENV_PRESSURE, - 'EP': 1010, 'NA': 1010, 'SA': 1010, - 'NI': 1005, 'SI': 1005, 'WP': 1005, - 'SP': 1004, + "": DEF_ENV_PRESSURE, + "EP": 1010, + "NA": 1010, + "SA": 1010, + "NI": 1005, + "SI": 1005, + "WP": 1005, + "SP": 1004, } """Basin-specific default environmental pressure""" EMANUEL_RMW_CORR_FILES = [ - 'temp_ccsm420thcal.mat', 'temp_ccsm4rcp85_full.mat', - 'temp_gfdl520thcal.mat', 'temp_gfdl5rcp85cal_full.mat', - 'temp_hadgem20thcal.mat', 'temp_hadgemrcp85cal_full.mat', - 'temp_miroc20thcal.mat', 'temp_mirocrcp85cal_full.mat', - 'temp_mpi20thcal.mat', 'temp_mpircp85cal_full.mat', - 'temp_mri20thcal.mat', 'temp_mrircp85cal_full.mat', + "temp_ccsm420thcal.mat", + "temp_ccsm4rcp85_full.mat", + "temp_gfdl520thcal.mat", + "temp_gfdl5rcp85cal_full.mat", + "temp_hadgem20thcal.mat", + "temp_hadgemrcp85cal_full.mat", + "temp_miroc20thcal.mat", + "temp_mirocrcp85cal_full.mat", + "temp_mpi20thcal.mat", + "temp_mpircp85cal_full.mat", + "temp_mri20thcal.mat", + "temp_mrircp85cal_full.mat", ] EMANUEL_RMW_CORR_FACTOR = 2.0 """Kerry Emanuel track files in this list require a correction: The radius of @@ -155,7 +190,8 @@ Bloemendaal et al. (2020): Generation of a global synthetic tropical cyclone hazard dataset using STORM. Scientific Data 7(1): 40.""" -class TCTracks(): + +class TCTracks: """Contains tropical cyclone tracks. Attributes @@ -187,9 +223,12 @@ class TCTracks(): system is a disturbance, tropical storm, post-transition extratropical storm etc.) might be included, depending on the data source and on use cases. """ - def __init__(self, - data: Optional[List[xr.Dataset]] = None, - pool: Optional[pathos.multiprocessing.ProcessPool] = None): + + def __init__( + self, + data: Optional[List[xr.Dataset]] = None, + pool: Optional[pathos.multiprocessing.ProcessPool] = None, + ): """Create new (empty) TCTracks instance. Parameters @@ -204,7 +243,7 @@ def __init__(self, self.data = data if data is not None else list() self.pool = pool if pool: - LOGGER.debug('Using %s CPUs.', self.pool.ncpus) + LOGGER.debug("Using %s CPUs.", self.pool.ncpus) def append(self, tracks): """Append tracks to current. @@ -242,12 +281,12 @@ def get_track(self, track_name=None): return self.data for track in self.data: - if track.attrs['name'] == track_name: + if track.attrs["name"] == track_name: return track - if hasattr(track, 'sid') and track.sid == track_name: + if hasattr(track, "sid") and track.sid == track_name: return track - LOGGER.info('No track with name or sid %s found.', track_name) + LOGGER.info("No track with name or sid %s found.", track_name) return [] def subset(self, filterdict): @@ -317,16 +356,28 @@ def tracks_in_exp(self, exposure, buffer=1.0): def read_ibtracs_netcdf(self, *args, **kwargs): """This function is deprecated, use TCTracks.from_ibtracs_netcdf instead.""" - LOGGER.warning("The use of TCTracks.read_ibtracs_netcdf is deprecated. " - "Use TCTracks.from_ibtracs_netcdf instead.") + LOGGER.warning( + "The use of TCTracks.read_ibtracs_netcdf is deprecated. " + "Use TCTracks.from_ibtracs_netcdf instead." + ) self.__dict__ = TCTracks.from_ibtracs_netcdf(*args, **kwargs).__dict__ @classmethod - def from_ibtracs_netcdf(cls, provider=None, rescale_windspeeds=True, storm_id=None, - year_range=None, basin=None, genesis_basin=None, - interpolate_missing=True, estimate_missing=False, correct_pres=False, - discard_single_points=True, additional_variables=None, - file_name='IBTrACS.ALL.v04r00.nc'): + def from_ibtracs_netcdf( + cls, + provider=None, + rescale_windspeeds=True, + storm_id=None, + year_range=None, + basin=None, + genesis_basin=None, + interpolate_missing=True, + estimate_missing=False, + correct_pres=False, + discard_single_points=True, + additional_variables=None, + file_name="IBTrACS.ALL.v04r00.nc", + ): """Create new TCTracks object from IBTrACS databse. When using data from IBTrACS, make sure to be familiar with the scope and limitations of @@ -446,125 +497,165 @@ def from_ibtracs_netcdf(cls, provider=None, rescale_windspeeds=True, storm_id=No TCTracks with data from IBTrACS """ if correct_pres: - LOGGER.warning("`correct_pres` is deprecated. " - "Use `estimate_missing` instead.") + LOGGER.warning( + "`correct_pres` is deprecated. " "Use `estimate_missing` instead." + ) estimate_missing = True if estimate_missing and not rescale_windspeeds: LOGGER.warning( - "Using `estimate_missing` without `rescale_windspeeds` is strongly discouraged!") + "Using `estimate_missing` without `rescale_windspeeds` is strongly discouraged!" + ) ibtracs_path = SYSTEM_DIR.joinpath(file_name) if not ibtracs_path.is_file(): try: - download_ftp(f'{IBTRACS_URL}/{IBTRACS_FILE}', IBTRACS_FILE) + download_ftp(f"{IBTRACS_URL}/{IBTRACS_FILE}", IBTRACS_FILE) shutil.move(IBTRACS_FILE, ibtracs_path) except ValueError as err: raise ValueError( - f'Error while downloading {IBTRACS_URL}. Try to download it manually and ' - f'put the file in {ibtracs_path}') from err + f"Error while downloading {IBTRACS_URL}. Try to download it manually and " + f"put the file in {ibtracs_path}" + ) from err if additional_variables is None: additional_variables = [] with xr.open_dataset(ibtracs_path) as ibtracs_ds: ibtracs_date = ibtracs_ds.attrs["date_created"] - if (np.datetime64('today') - np.datetime64(ibtracs_date)).item().days > 180: - LOGGER.warning("The cached IBTrACS data set dates from %s (older " - "than 180 days). Very likely, a more recent version is available. " - "Consider manually removing the file %s and re-running " - "this function, which will download the most recent version of the " - "IBTrACS data set from the official URL.", ibtracs_date, ibtracs_path) + if (np.datetime64("today") - np.datetime64(ibtracs_date)).item().days > 180: + LOGGER.warning( + "The cached IBTrACS data set dates from %s (older " + "than 180 days). Very likely, a more recent version is available. " + "Consider manually removing the file %s and re-running " + "this function, which will download the most recent version of the " + "IBTrACS data set from the official URL.", + ibtracs_date, + ibtracs_path, + ) match = np.ones(ibtracs_ds.sid.shape[0], dtype=bool) if storm_id is not None: if not isinstance(storm_id, list): storm_id = [storm_id] invalid_mask = np.array( - [re.match(r"[12][0-9]{6}[NS][0-9]{5}", s) is None for s in storm_id]) + [re.match(r"[12][0-9]{6}[NS][0-9]{5}", s) is None for s in storm_id] + ) if invalid_mask.any(): invalid_sids = list(np.array(storm_id)[invalid_mask]) - raise ValueError("The following given IDs are invalid: %s%s" % ( - ", ".join(invalid_sids[:5]), - ", ..." if len(invalid_sids) > 5 else ".")) + raise ValueError( + "The following given IDs are invalid: %s%s" + % ( + ", ".join(invalid_sids[:5]), + ", ..." if len(invalid_sids) > 5 else ".", + ) + ) storm_id = list(np.array(storm_id)[~invalid_mask]) storm_id_encoded = [i.encode() for i in storm_id] non_existing_mask = ~np.isin(storm_id_encoded, ibtracs_ds.sid.values) if np.count_nonzero(non_existing_mask) > 0: non_existing_sids = list(np.array(storm_id)[non_existing_mask]) - raise ValueError("The following given IDs are not in IBTrACS: %s%s" % ( - ", ".join(non_existing_sids[:5]), - ", ..." if len(non_existing_sids) > 5 else ".")) - storm_id_encoded = list(np.array(storm_id_encoded)[~non_existing_mask]) + raise ValueError( + "The following given IDs are not in IBTrACS: %s%s" + % ( + ", ".join(non_existing_sids[:5]), + ", ..." if len(non_existing_sids) > 5 else ".", + ) + ) + storm_id_encoded = list( + np.array(storm_id_encoded)[~non_existing_mask] + ) match &= ibtracs_ds.sid.isin(storm_id_encoded) if year_range is not None: years = ibtracs_ds.sid.str.slice(0, 4).astype(int) match &= (years >= year_range[0]) & (years <= year_range[1]) if np.count_nonzero(match) == 0: - LOGGER.info('No tracks in time range (%s, %s).', *year_range) + LOGGER.info("No tracks in time range (%s, %s).", *year_range) if basin is not None: - match &= (ibtracs_ds.basin == basin.encode()).any(dim='date_time') + match &= (ibtracs_ds.basin == basin.encode()).any(dim="date_time") if np.count_nonzero(match) == 0: - LOGGER.info('No tracks in basin %s.', basin) + LOGGER.info("No tracks in basin %s.", basin) if genesis_basin is not None: # Here, we only filter for the basin at *any* eye position. We will filter again later # for the basin of the *first* eye position, but only after restricting to the valid # time steps in the data. - match &= (ibtracs_ds.basin == genesis_basin.encode()).any(dim='date_time') + match &= (ibtracs_ds.basin == genesis_basin.encode()).any( + dim="date_time" + ) if np.count_nonzero(match) == 0: - LOGGER.info('No tracks in genesis basin %s.', genesis_basin) + LOGGER.info("No tracks in genesis basin %s.", genesis_basin) if np.count_nonzero(match) == 0: - LOGGER.info("IBTrACS doesn't contain any tracks matching the specified requirements.") + LOGGER.info( + "IBTrACS doesn't contain any tracks matching the specified requirements." + ) return cls() ibtracs_ds = ibtracs_ds.sel(storm=match) - ibtracs_ds['valid_t'] = ibtracs_ds['time'].notnull() + ibtracs_ds["valid_t"] = ibtracs_ds["time"].notnull() if rescale_windspeeds: for agency in IBTRACS_AGENCIES: scale, shift = IBTRACS_AGENCY_1MIN_WIND_FACTOR[agency] - ibtracs_ds[f'{agency}_wind'] -= shift - ibtracs_ds[f'{agency}_wind'] /= scale + ibtracs_ds[f"{agency}_wind"] -= shift + ibtracs_ds[f"{agency}_wind"] /= scale if provider is None: provider = ["official_3h"] + IBTRACS_AGENCIES elif isinstance(provider, str): provider = [provider] - phys_vars = ['lat', 'lon', 'wind', 'pres', 'rmw', 'poci', 'roci'] + phys_vars = ["lat", "lon", "wind", "pres", "rmw", "poci", "roci"] for tc_var in phys_vars: if "official" in provider or "official_3h" in provider: ibtracs_add_official_variable( - ibtracs_ds, tc_var, add_3h=("official_3h" in provider)) + ibtracs_ds, tc_var, add_3h=("official_3h" in provider) + ) # set up dimension of agency-reported values in order of preference, including the # newly created `official` and `official_3h` data if specified - ag_vars = [f'{ag}_{tc_var}' for ag in provider] - ag_vars = [ag_var for ag_var in ag_vars if ag_var in ibtracs_ds.data_vars.keys()] + ag_vars = [f"{ag}_{tc_var}" for ag in provider] + ag_vars = [ + ag_var + for ag_var in ag_vars + if ag_var in ibtracs_ds.data_vars.keys() + ] if len(ag_vars) == 0: - ag_vars = [f'{provider[0]}_{tc_var}'] - ibtracs_ds[ag_vars[0]] = xr.full_like(ibtracs_ds[f'usa_{tc_var}'], np.nan) - all_vals = ibtracs_ds[ag_vars].to_array(dim='agency') + ag_vars = [f"{provider[0]}_{tc_var}"] + ibtracs_ds[ag_vars[0]] = xr.full_like( + ibtracs_ds[f"usa_{tc_var}"], np.nan + ) + all_vals = ibtracs_ds[ag_vars].to_array(dim="agency") # argmax returns the first True (i.e. valid) along the 'agency' dimension - preferred_idx = all_vals.notnull().any(dim="date_time").argmax(dim='agency') + preferred_idx = ( + all_vals.notnull().any(dim="date_time").argmax(dim="agency") + ) ibtracs_ds[tc_var] = all_vals.isel(agency=preferred_idx) - selected_ags = np.array([v[:-len(f'_{tc_var}')].encode() for v in ag_vars]) - ibtracs_ds[f'{tc_var}_agency'] = ('storm', selected_ags[preferred_idx.values]) + selected_ags = np.array( + [v[: -len(f"_{tc_var}")].encode() for v in ag_vars] + ) + ibtracs_ds[f"{tc_var}_agency"] = ( + "storm", + selected_ags[preferred_idx.values], + ) - if tc_var == 'lon': + if tc_var == "lon": # Most IBTrACS longitudes are either normalized to [-180, 180] or to [0, 360], but # some aren't normalized at all, so we have to make sure that the values are okay: lons = ibtracs_ds[tc_var].values.copy() lon_valid_mask = np.isfinite(lons) - lons[lon_valid_mask] = u_coord.lon_normalize(lons[lon_valid_mask], center=0.0) + lons[lon_valid_mask] = u_coord.lon_normalize( + lons[lon_valid_mask], center=0.0 + ) ibtracs_ds[tc_var].values[:] = lons # Make sure that the longitude is always chosen positive if a track crosses the # antimeridian: - crossing_mask = ((ibtracs_ds[tc_var] > 170).any(dim="date_time") - & (ibtracs_ds[tc_var] < -170).any(dim="date_time") - & (ibtracs_ds[tc_var] < 0)).values + crossing_mask = ( + (ibtracs_ds[tc_var] > 170).any(dim="date_time") + & (ibtracs_ds[tc_var] < -170).any(dim="date_time") + & (ibtracs_ds[tc_var] < 0) + ).values ibtracs_ds[tc_var].values[crossing_mask] += 360 if interpolate_missing: @@ -574,68 +665,103 @@ def from_ibtracs_netcdf(cls, provider=None, rescale_windspeeds=True, storm_id=No # don't interpolate if there is only a single record for this variable nonsingular_mask = ( - ibtracs_ds[tc_var].notnull().sum(dim="date_time") > 1).values + ibtracs_ds[tc_var].notnull().sum(dim="date_time") > 1 + ).values if nonsingular_mask.sum() > 0: ibtracs_ds[tc_var].values[nonsingular_mask] = ( - ibtracs_ds[tc_var].sel(storm=nonsingular_mask).interpolate_na( - dim="date_time", method="linear")) - ibtracs_ds = ibtracs_ds[['sid', 'name', 'basin', 'time', 'valid_t'] - + additional_variables + phys_vars - + [f'{v}_agency' for v in phys_vars]] + ibtracs_ds[tc_var] + .sel(storm=nonsingular_mask) + .interpolate_na(dim="date_time", method="linear") + ) + ibtracs_ds = ibtracs_ds[ + ["sid", "name", "basin", "time", "valid_t"] + + additional_variables + + phys_vars + + [f"{v}_agency" for v in phys_vars] + ] if estimate_missing: - ibtracs_ds['pres'][:] = _estimate_pressure( - ibtracs_ds['pres'], ibtracs_ds['lat'], ibtracs_ds['lon'], ibtracs_ds['wind']) - ibtracs_ds['wind'][:] = _estimate_vmax( - ibtracs_ds['wind'], ibtracs_ds['lat'], ibtracs_ds['lon'], ibtracs_ds['pres']) - - ibtracs_ds['valid_t'] &= (ibtracs_ds['lat'].notnull() & ibtracs_ds['lon'].notnull() - & ibtracs_ds['wind'].notnull() & ibtracs_ds['pres'].notnull()) - valid_storms_mask = ibtracs_ds['valid_t'].any(dim="date_time") + ibtracs_ds["pres"][:] = _estimate_pressure( + ibtracs_ds["pres"], + ibtracs_ds["lat"], + ibtracs_ds["lon"], + ibtracs_ds["wind"], + ) + ibtracs_ds["wind"][:] = _estimate_vmax( + ibtracs_ds["wind"], + ibtracs_ds["lat"], + ibtracs_ds["lon"], + ibtracs_ds["pres"], + ) + + ibtracs_ds["valid_t"] &= ( + ibtracs_ds["lat"].notnull() + & ibtracs_ds["lon"].notnull() + & ibtracs_ds["wind"].notnull() + & ibtracs_ds["pres"].notnull() + ) + valid_storms_mask = ibtracs_ds["valid_t"].any(dim="date_time") invalid_storms_idx = np.nonzero(~valid_storms_mask.data)[0] if invalid_storms_idx.size > 0: - invalid_sids = list(ibtracs_ds.sid.sel(storm=invalid_storms_idx).astype(str).data) - LOGGER.warning('%d storm events are discarded because no valid wind/pressure values ' - 'have been found: %s%s', len(invalid_sids), ", ".join(invalid_sids[:5]), - ", ..." if len(invalid_sids) > 5 else ".") + invalid_sids = list( + ibtracs_ds.sid.sel(storm=invalid_storms_idx).astype(str).data + ) + LOGGER.warning( + "%d storm events are discarded because no valid wind/pressure values " + "have been found: %s%s", + len(invalid_sids), + ", ".join(invalid_sids[:5]), + ", ..." if len(invalid_sids) > 5 else ".", + ) ibtracs_ds = ibtracs_ds.sel(storm=valid_storms_mask) if discard_single_points: - valid_storms_mask = ibtracs_ds['valid_t'].sum(dim="date_time") > 1 + valid_storms_mask = ibtracs_ds["valid_t"].sum(dim="date_time") > 1 invalid_storms_idx = np.nonzero(~valid_storms_mask.data)[0] if invalid_storms_idx.size > 0: - invalid_sids = list(ibtracs_ds.sid.sel(storm=invalid_storms_idx).astype(str).data) - LOGGER.warning('%d storm events are discarded because only one valid timestep ' - 'has been found: %s%s', len(invalid_sids), - ", ".join(invalid_sids[:5]), - ", ..." if len(invalid_sids) > 5 else ".") + invalid_sids = list( + ibtracs_ds.sid.sel(storm=invalid_storms_idx).astype(str).data + ) + LOGGER.warning( + "%d storm events are discarded because only one valid timestep " + "has been found: %s%s", + len(invalid_sids), + ", ".join(invalid_sids[:5]), + ", ..." if len(invalid_sids) > 5 else ".", + ) ibtracs_ds = ibtracs_ds.sel(storm=valid_storms_mask) - if ibtracs_ds.dims['storm'] == 0: - LOGGER.info('After discarding IBTrACS events without valid values by the selected ' - 'reporting agencies, there are no tracks left that match the specified ' - 'requirements.') + if ibtracs_ds.dims["storm"] == 0: + LOGGER.info( + "After discarding IBTrACS events without valid values by the selected " + "reporting agencies, there are no tracks left that match the specified " + "requirements." + ) return cls() - max_wind = ibtracs_ds['wind'].max(dim="date_time").data.ravel() - category_test = (max_wind[:, None] < np.array(SAFFIR_SIM_CAT)[None]) + max_wind = ibtracs_ds["wind"].max(dim="date_time").data.ravel() + category_test = max_wind[:, None] < np.array(SAFFIR_SIM_CAT)[None] category = np.argmax(category_test, axis=1) - 1 basin_map = {b.encode("utf-8"): v for b, v in BASIN_ENV_PRESSURE.items()} basin_fun = lambda b: basin_map[b] - ibtracs_ds['id_no'] = (ibtracs_ds.sid.str.replace(b'N', b'0') - .str.replace(b'S', b'1') - .astype(float)) + ibtracs_ds["id_no"] = ( + ibtracs_ds.sid.str.replace(b"N", b"0") + .str.replace(b"S", b"1") + .astype(float) + ) last_perc = 0 all_tracks = [] - for i_track, t_msk in enumerate(ibtracs_ds['valid_t'].data): + for i_track, t_msk in enumerate(ibtracs_ds["valid_t"].data): perc = 100 * len(all_tracks) / ibtracs_ds.sid.size if perc - last_perc >= 10: LOGGER.info("Progress: %d%%", perc) last_perc = perc track_ds = ibtracs_ds.sel(storm=i_track, date_time=t_msk) - tr_basin_penv = xr.apply_ufunc(basin_fun, track_ds.basin, vectorize=True) + tr_basin_penv = xr.apply_ufunc( + basin_fun, track_ds.basin, vectorize=True + ) tr_genesis_basin = track_ds.basin.values[0].astype(str).item() # Now that the valid time steps have been selected, we discard this track if it @@ -647,71 +773,90 @@ def from_ibtracs_netcdf(cls, provider=None, rescale_windspeeds=True, storm_id=No # A track that crosses the antimeridian in IBTrACS might be truncated by `t_msk` in # such a way that the remaining part is not crossing the antimeridian: - if (track_ds['lon'].values > 180).all(): - track_ds['lon'] -= 360 + if (track_ds["lon"].values > 180).all(): + track_ds["lon"] -= 360 # set time_step in hours - track_ds['time_step'] = xr.ones_like(track_ds['time'], dtype=float) - if track_ds['time'].size > 1: - track_ds['time_step'].values[1:] = (track_ds['time'].diff(dim="date_time") - / np.timedelta64(1, 'h')) - track_ds['time_step'].values[0] = track_ds['time_step'][1] + track_ds["time_step"] = xr.ones_like(track_ds["time"], dtype=float) + if track_ds["time"].size > 1: + track_ds["time_step"].values[1:] = track_ds["time"].diff( + dim="date_time" + ) / np.timedelta64(1, "h") + track_ds["time_step"].values[0] = track_ds["time_step"][1] with warnings.catch_warnings(): # See https://github.com/pydata/xarray/issues/4167 warnings.simplefilter(action="ignore", category=FutureWarning) - track_ds['rmw'] = track_ds['rmw'] \ - .ffill(dim='date_time', limit=1) \ - .bfill(dim='date_time', limit=1) \ + track_ds["rmw"] = ( + track_ds["rmw"] + .ffill(dim="date_time", limit=1) + .bfill(dim="date_time", limit=1) .fillna(0) - track_ds['roci'] = track_ds['roci'] \ - .ffill(dim='date_time', limit=1) \ - .bfill(dim='date_time', limit=1) \ + ) + track_ds["roci"] = ( + track_ds["roci"] + .ffill(dim="date_time", limit=1) + .bfill(dim="date_time", limit=1) .fillna(0) - track_ds['poci'] = track_ds['poci'] \ - .ffill(dim='date_time', limit=4) \ - .bfill(dim='date_time', limit=4) + ) + track_ds["poci"] = ( + track_ds["poci"] + .ffill(dim="date_time", limit=4) + .bfill(dim="date_time", limit=4) + ) # this is the most time consuming line in the processing: - track_ds['poci'] = track_ds['poci'].fillna(tr_basin_penv) + track_ds["poci"] = track_ds["poci"].fillna(tr_basin_penv) if estimate_missing: - track_ds['rmw'][:] = estimate_rmw(track_ds['rmw'].values, track_ds['pres'].values) - track_ds['roci'][:] = estimate_roci(track_ds['roci'].values, - track_ds['pres'].values) - track_ds['roci'][:] = np.fmax(track_ds['rmw'].values, track_ds['roci'].values) + track_ds["rmw"][:] = estimate_rmw( + track_ds["rmw"].values, track_ds["pres"].values + ) + track_ds["roci"][:] = estimate_roci( + track_ds["roci"].values, track_ds["pres"].values + ) + track_ds["roci"][:] = np.fmax( + track_ds["rmw"].values, track_ds["roci"].values + ) # ensure environmental pressure >= central pressure # this is the second most time consuming line in the processing: - track_ds['poci'][:] = np.fmax(track_ds['poci'], track_ds['pres']) + track_ds["poci"][:] = np.fmax(track_ds["poci"], track_ds["pres"]) provider_str = f"ibtracs_{provider[0]}" if len(provider) > 1: provider_str = "ibtracs_mixed:" + ",".join( - "{}({})".format(v, track_ds[f'{v}_agency'].astype(str).item()) - for v in phys_vars) + "{}({})".format(v, track_ds[f"{v}_agency"].astype(str).item()) + for v in phys_vars + ) data_vars = { - 'radius_max_wind': ('time', track_ds['rmw'].data), - 'radius_oci': ('time', track_ds['roci'].data), - 'max_sustained_wind': ('time', track_ds['wind'].data), - 'central_pressure': ('time', track_ds['pres'].data), - 'environmental_pressure': ('time', track_ds['poci'].data), + "radius_max_wind": ("time", track_ds["rmw"].data), + "radius_oci": ("time", track_ds["roci"].data), + "max_sustained_wind": ("time", track_ds["wind"].data), + "central_pressure": ("time", track_ds["pres"].data), + "environmental_pressure": ("time", track_ds["poci"].data), } coords = { - 'time': ('time', track_ds['time'].dt.round('s').data), - 'lat': ('time', track_ds['lat'].data), - 'lon': ('time', track_ds['lon'].data), + "time": ("time", track_ds["time"].dt.round("s").data), + "lat": ("time", track_ds["lat"].data), + "lon": ("time", track_ds["lon"].data), } attrs = { - 'max_sustained_wind_unit': 'kn', - 'central_pressure_unit': 'mb', - 'orig_event_flag': True, - 'data_provider': provider_str, - 'category': category[i_track], + "max_sustained_wind_unit": "kn", + "central_pressure_unit": "mb", + "orig_event_flag": True, + "data_provider": provider_str, + "category": category[i_track], } # automatically assign the remaining variables as attributes or data variables - for varname in ["time_step", "basin", "name", "sid", "id_no"] + additional_variables: + for varname in [ + "time_step", + "basin", + "name", + "sid", + "id_no", + ] + additional_variables: values = track_ds[varname].data if track_ds[varname].dtype.kind == "S": # This converts the `bytes` (dtype "|S*") in IBTrACS to the more common `str` @@ -720,20 +865,24 @@ def from_ibtracs_netcdf(cls, provider=None, rescale_windspeeds=True, storm_id=No if values.ndim == 0: attrs[varname] = values.item() else: - data_vars[varname] = ('time', values) + data_vars[varname] = ("time", values) all_tracks.append(xr.Dataset(data_vars, coords=coords, attrs=attrs)) if last_perc != 100: LOGGER.info("Progress: 100%") if len(all_tracks) == 0: # If all tracks have been discarded in the loop due to the basin filters: - LOGGER.info('There were no tracks left in the specified basin ' - 'after discarding invalid track positions.') + LOGGER.info( + "There were no tracks left in the specified basin " + "after discarding invalid track positions." + ) return cls(all_tracks) def read_processed_ibtracs_csv(self, *args, **kwargs): """This function is deprecated, use TCTracks.from_processed_ibtracs_csv instead.""" - LOGGER.warning("The use of TCTracks.read_processed_ibtracs_csv is deprecated. " - "Use TCTracks.from_processed_ibtracs_csv instead.") + LOGGER.warning( + "The use of TCTracks.read_processed_ibtracs_csv is deprecated. " + "Use TCTracks.from_processed_ibtracs_csv instead." + ) self.__dict__ = TCTracks.from_processed_ibtracs_csv(*args, **kwargs).__dict__ @classmethod @@ -754,8 +903,10 @@ def from_processed_ibtracs_csv(cls, file_names): def read_simulations_emanuel(self, *args, **kwargs): """This function is deprecated, use TCTracks.from_simulations_emanuel instead.""" - LOGGER.warning("The use of TCTracks.read_simulations_emanuel is deprecated. " - "Use TCTracks.from_simulations_emanuel instead.") + LOGGER.warning( + "The use of TCTracks.read_simulations_emanuel is deprecated. " + "Use TCTracks.from_simulations_emanuel instead." + ) self.__dict__ = TCTracks.from_simulations_emanuel(*args, **kwargs).__dict__ @classmethod @@ -781,15 +932,22 @@ def from_simulations_emanuel(cls, file_names, hemisphere=None, subset=None): """ data = [] for path in get_file_names(file_names): - data.extend(_read_file_emanuel( - path, hemisphere=hemisphere, subset=subset, - rmw_corr=Path(path).name in EMANUEL_RMW_CORR_FILES)) + data.extend( + _read_file_emanuel( + path, + hemisphere=hemisphere, + subset=subset, + rmw_corr=Path(path).name in EMANUEL_RMW_CORR_FILES, + ) + ) return cls(data) def read_one_gettelman(self, nc_data, i_track): """This function is deprecated, use TCTracks.from_gettelman instead.""" - LOGGER.warning("The use of TCTracks.read_one_gettelman is deprecated. " - "Use TCTracks.from_gettelman instead.") + LOGGER.warning( + "The use of TCTracks.read_one_gettelman is deprecated. " + "Use TCTracks.from_gettelman instead." + ) self.data.append(_read_one_gettelman(nc_data, i_track)) @classmethod @@ -807,13 +965,15 @@ def from_gettelman(cls, path): TCTracks with data from Andrew Gettelman's simulations. """ nc_data = nc.Dataset(path) - nstorms = nc_data.dimensions['storm'].size + nstorms = nc_data.dimensions["storm"].size return cls([_read_one_gettelman(nc_data, i) for i in range(nstorms)]) def read_simulations_chaz(self, *args, **kwargs): """This function is deprecated, use TCTracks.from_simulations_chaz instead.""" - LOGGER.warning("The use of TCTracks.read_simulations_chaz is deprecated. " - "Use TCTracks.from_simulations_chaz instead.") + LOGGER.warning( + "The use of TCTracks.read_simulations_chaz is deprecated. " + "Use TCTracks.from_simulations_chaz instead." + ) self.__dict__ = TCTracks.from_simulations_chaz(*args, **kwargs).__dict__ @classmethod @@ -839,109 +999,155 @@ def from_simulations_chaz(cls, file_names, year_range=None, ensemble_nums=None): """ data = [] for path in get_file_names(file_names): - LOGGER.info('Reading %s.', path) + LOGGER.info("Reading %s.", path) with xr.open_dataset(path) as chaz_ds: - chaz_ds['time'].attrs["units"] = "days since 1950-1-1" - chaz_ds['time'].attrs["missing_value"] = -54786.0 + chaz_ds["time"].attrs["units"] = "days since 1950-1-1" + chaz_ds["time"].attrs["missing_value"] = -54786.0 chaz_ds = xr.decode_cf(chaz_ds) - chaz_ds['id_no'] = chaz_ds['stormID'] * 1000 + chaz_ds['ensembleNum'] - for var in ['time', 'longitude', 'latitude']: - chaz_ds[var] = chaz_ds[var].expand_dims(ensembleNum=chaz_ds['ensembleNum']) + chaz_ds["id_no"] = chaz_ds["stormID"] * 1000 + chaz_ds["ensembleNum"] + for var in ["time", "longitude", "latitude"]: + chaz_ds[var] = chaz_ds[var].expand_dims( + ensembleNum=chaz_ds["ensembleNum"] + ) chaz_ds = chaz_ds.stack(id=("ensembleNum", "stormID")) - years_uniq = chaz_ds['time'].dt.year.data + years_uniq = chaz_ds["time"].dt.year.data years_uniq = np.unique(years_uniq[~np.isnan(years_uniq)]) - LOGGER.info("File contains %s tracks (at most %s nodes each), " - "representing %s years (%d-%d).", - chaz_ds['id_no'].size, chaz_ds['lifelength'].size, - years_uniq.size, years_uniq[0], years_uniq[-1]) + LOGGER.info( + "File contains %s tracks (at most %s nodes each), " + "representing %s years (%d-%d).", + chaz_ds["id_no"].size, + chaz_ds["lifelength"].size, + years_uniq.size, + years_uniq[0], + years_uniq[-1], + ) # filter by year range if given if year_range: - match = ((chaz_ds['time'].dt.year >= year_range[0]) - & (chaz_ds['time'].dt.year <= year_range[1])).sel(lifelength=0) + match = ( + (chaz_ds["time"].dt.year >= year_range[0]) + & (chaz_ds["time"].dt.year <= year_range[1]) + ).sel(lifelength=0) if np.count_nonzero(match) == 0: - LOGGER.info('No tracks in time range (%s, %s).', *year_range) + LOGGER.info("No tracks in time range (%s, %s).", *year_range) continue chaz_ds = chaz_ds.sel(id=match) # filter by ensembleNum if given if ensemble_nums is not None: - match = np.isin(chaz_ds['ensembleNum'].values, ensemble_nums) + match = np.isin(chaz_ds["ensembleNum"].values, ensemble_nums) if np.count_nonzero(match) == 0: - LOGGER.info('No tracks with specified ensemble numbers.') + LOGGER.info("No tracks with specified ensemble numbers.") continue chaz_ds = chaz_ds.sel(id=match) # remove invalid tracks from selection - chaz_ds['valid_t'] = chaz_ds['time'].notnull() & chaz_ds['Mwspd'].notnull() - valid_st = chaz_ds['valid_t'].any(dim="lifelength") + chaz_ds["valid_t"] = ( + chaz_ds["time"].notnull() & chaz_ds["Mwspd"].notnull() + ) + valid_st = chaz_ds["valid_t"].any(dim="lifelength") invalid_st = np.nonzero(~valid_st.data)[0] if invalid_st.size > 0: - LOGGER.info('No valid Mwspd values found for %d out of %d storm tracks.', - invalid_st.size, valid_st.size) + LOGGER.info( + "No valid Mwspd values found for %d out of %d storm tracks.", + invalid_st.size, + valid_st.size, + ) chaz_ds = chaz_ds.sel(id=valid_st) # estimate central pressure from location and max wind - chaz_ds['pres'] = xr.full_like(chaz_ds['Mwspd'], -1, dtype=float) - chaz_ds['pres'][:] = _estimate_pressure( - chaz_ds['pres'], chaz_ds['latitude'], chaz_ds['longitude'], chaz_ds['Mwspd']) + chaz_ds["pres"] = xr.full_like(chaz_ds["Mwspd"], -1, dtype=float) + chaz_ds["pres"][:] = _estimate_pressure( + chaz_ds["pres"], + chaz_ds["latitude"], + chaz_ds["longitude"], + chaz_ds["Mwspd"], + ) # compute time stepsizes - chaz_ds['time_step'] = xr.zeros_like(chaz_ds['time'], dtype=float) - chaz_ds['time_step'][1:, :] = (chaz_ds['time'].diff(dim="lifelength") - / np.timedelta64(1, 'h')) - chaz_ds['time_step'][0, :] = chaz_ds['time_step'][1, :] + chaz_ds["time_step"] = xr.zeros_like(chaz_ds["time"], dtype=float) + chaz_ds["time_step"][1:, :] = chaz_ds["time"].diff( + dim="lifelength" + ) / np.timedelta64(1, "h") + chaz_ds["time_step"][0, :] = chaz_ds["time_step"][1, :] # determine Saffir-Simpson category - max_wind = chaz_ds['Mwspd'].max(dim="lifelength").data.ravel() - category_test = (max_wind[:, None] < np.array(SAFFIR_SIM_CAT)[None]) - chaz_ds['category'] = ("id", np.argmax(category_test, axis=1) - 1) + max_wind = chaz_ds["Mwspd"].max(dim="lifelength").data.ravel() + category_test = max_wind[:, None] < np.array(SAFFIR_SIM_CAT)[None] + chaz_ds["category"] = ("id", np.argmax(category_test, axis=1) - 1) fname = Path(path).name - chaz_ds['time'][:] = chaz_ds['time'].dt.round('s').data - chaz_ds['radius_max_wind'] = xr.full_like(chaz_ds['pres'], np.nan) - chaz_ds['environmental_pressure'] = xr.full_like(chaz_ds['pres'], DEF_ENV_PRESSURE) - chaz_ds["track_name"] = ("id", [f"{fname}-{track_id.item()[1]}-{track_id.item()[0]}" - for track_id in chaz_ds['id']]) + chaz_ds["time"][:] = chaz_ds["time"].dt.round("s").data + chaz_ds["radius_max_wind"] = xr.full_like(chaz_ds["pres"], np.nan) + chaz_ds["environmental_pressure"] = xr.full_like( + chaz_ds["pres"], DEF_ENV_PRESSURE + ) + chaz_ds["track_name"] = ( + "id", + [ + f"{fname}-{track_id.item()[1]}-{track_id.item()[0]}" + for track_id in chaz_ds["id"] + ], + ) # add tracks one by one last_perc = 0 - for cnt, i_track in enumerate(chaz_ds['id_no']): - perc = 100 * cnt / chaz_ds['id_no'].size + for cnt, i_track in enumerate(chaz_ds["id_no"]): + perc = 100 * cnt / chaz_ds["id_no"].size if perc - last_perc >= 10: LOGGER.info("Progress: %d%%", perc) last_perc = perc - track_ds = chaz_ds.sel(id=i_track['id'].item()) - track_ds = track_ds.sel(lifelength=track_ds['valid_t'].data) - data.append(xr.Dataset({ - 'time_step': ('time', track_ds['time_step'].values), - 'max_sustained_wind': ('time', track_ds['Mwspd'].values), - 'central_pressure': ('time', track_ds['pres'].values), - 'radius_max_wind': ('time', track_ds['radius_max_wind'].values), - 'environmental_pressure': ('time', track_ds['environmental_pressure'].values), - 'basin': ('time', np.full(track_ds['time'].size, "GB", dtype=" 0: - LOGGER.info('%d track%s already at the requested temporal resolution.', - n_skip, "s are" if n_skip > 1 else " is") + LOGGER.info( + "%d track%s already at the requested temporal resolution.", + n_skip, + "s are" if n_skip > 1 else " is", + ) - LOGGER.info('Interpolating %d tracks to %sh time steps.', - self.size - n_skip, time_step_h) + LOGGER.info( + "Interpolating %d tracks to %sh time steps.", + self.size - n_skip, + time_step_h, + ) if land_params: extent = self.get_extent() @@ -1122,7 +1366,7 @@ def equal_timestep(self, time_step_h=1, land_params=False, pool=None): self.data, l_time_step_h, itertools.repeat(land_geom, self.size), - chunksize=chunksize + chunksize=chunksize, ) else: last_perc = 0 @@ -1139,10 +1383,12 @@ def equal_timestep(self, time_step_h=1, land_params=False, pool=None): def calc_random_walk(self, **kwargs): """Deprecated. Use `TCTracks.calc_perturbed_trajectories` instead.""" - LOGGER.warning("The use of TCTracks.calc_random_walk is deprecated." - "Use TCTracks.calc_perturbed_trajectories instead.") - if kwargs.get('ens_size'): - kwargs['nb_synth_tracks'] = kwargs.pop('ens_size') + LOGGER.warning( + "The use of TCTracks.calc_random_walk is deprecated." + "Use TCTracks.calc_perturbed_trajectories instead." + ) + if kwargs.get("ens_size"): + kwargs["nb_synth_tracks"] = kwargs.pop("ens_size") return self.calc_perturbed_trajectories(**kwargs) def calc_perturbed_trajectories(self, **kwargs): @@ -1167,9 +1413,10 @@ def get_bounds(self, deg_buffer=0.1): bounds : tuple (lon_min, lat_min, lon_max, lat_max) """ bounds = u_coord.latlon_bounds( - np.concatenate([t['lat'].values for t in self.data]), - np.concatenate([t['lon'].values for t in self.data]), - buffer=deg_buffer) + np.concatenate([t["lat"].values for t in self.data]), + np.concatenate([t["lon"].values for t in self.data]), + buffer=deg_buffer, + ) return bounds @property @@ -1217,7 +1464,9 @@ def generate_centroids(self, res_deg, buffer_deg): lon, lat = [ar.ravel() for ar in np.meshgrid(lon, lat)] return Centroids(lat=lat, lon=lon) - def plot(self, axis=None, figsize=(9, 13), legend=True, adapt_fontsize=True, **kwargs): + def plot( + self, axis=None, figsize=(9, 13), legend=True, adapt_fontsize=True, **kwargs + ): """Track over earth. Historical events are blue, probabilistic black. Parameters @@ -1239,13 +1488,13 @@ def plot(self, axis=None, figsize=(9, 13), legend=True, adapt_fontsize=True, **k ------- axis : matplotlib.axes._subplots.AxesSubplot """ - if 'lw' not in kwargs: - kwargs['lw'] = 2 - if 'transform' not in kwargs: - kwargs['transform'] = ccrs.PlateCarree() + if "lw" not in kwargs: + kwargs["lw"] = 2 + if "transform" not in kwargs: + kwargs["transform"] = ccrs.PlateCarree() if not self.size: - LOGGER.info('No tracks to plot') + LOGGER.info("No tracks to plot") return None extent = self.get_extent(deg_buffer=1) @@ -1253,16 +1502,18 @@ def plot(self, axis=None, figsize=(9, 13), legend=True, adapt_fontsize=True, **k if not axis: proj = ccrs.PlateCarree(central_longitude=mid_lon) - _, axis, _ = u_plot.make_map(proj=proj, figsize=figsize, adapt_fontsize=adapt_fontsize) + _, axis, _ = u_plot.make_map( + proj=proj, figsize=figsize, adapt_fontsize=adapt_fontsize + ) else: proj = axis.projection - axis.set_extent(extent, crs=kwargs['transform']) + axis.set_extent(extent, crs=kwargs["transform"]) u_plot.add_shapes(axis) cmap = ListedColormap(colors=CAT_COLORS) norm = BoundaryNorm([0] + SAFFIR_SIM_CAT, len(SAFFIR_SIM_CAT)) for track in self.data: - lonlat = np.stack([track['lon'].values, track['lat'].values], axis=-1) + lonlat = np.stack([track["lon"].values, track["lat"].values], axis=-1) lonlat[:, 0] = u_coord.lon_normalize(lonlat[:, 0], center=mid_lon) segments = np.stack([lonlat[:-1], lonlat[1:]], axis=1) @@ -1278,20 +1529,26 @@ def plot(self, axis=None, figsize=(9, 13), legend=True, adapt_fontsize=True, **k segments[mask, 1, 0] = -180 track_lc = LineCollection( - segments, linestyle='solid' if track.orig_event_flag else ':', - cmap=cmap, norm=norm, **kwargs) - track_lc.set_array(track['max_sustained_wind'].values) + segments, + linestyle="solid" if track.orig_event_flag else ":", + cmap=cmap, + norm=norm, + **kwargs, + ) + track_lc.set_array(track["max_sustained_wind"].values) axis.add_collection(track_lc) if legend: - leg_lines = [Line2D([0], [0], color=CAT_COLORS[i_col], lw=2) - for i_col in range(len(SAFFIR_SIM_CAT))] + leg_lines = [ + Line2D([0], [0], color=CAT_COLORS[i_col], lw=2) + for i_col in range(len(SAFFIR_SIM_CAT)) + ] leg_names = [CAT_NAMES[i_col] for i_col in sorted(CAT_NAMES.keys())] if any(not tr.orig_event_flag for tr in self.data): - leg_lines.append(Line2D([0], [0], color='grey', lw=2, ls='solid')) - leg_lines.append(Line2D([0], [0], color='grey', lw=2, ls=':')) - leg_names.append('Historical') - leg_names.append('Synthetic') + leg_lines.append(Line2D([0], [0], color="grey", lw=2, ls="solid")) + leg_lines.append(Line2D([0], [0], color="grey", lw=2, ls=":")) + leg_names.append("Historical") + leg_names.append("Synthetic") axis.legend(leg_lines, leg_names, loc=0) plt.tight_layout() return axis @@ -1304,16 +1561,18 @@ def write_netcdf(self, folder_name): folder_name : str Folder name where to write files. """ - list_path = [Path(folder_name, track.sid + '.nc') for track in self.data] - LOGGER.info('Writting %s files.', self.size) + list_path = [Path(folder_name, track.sid + ".nc") for track in self.data] + LOGGER.info("Writting %s files.", self.size) for track in self.data: - track.attrs['orig_event_flag'] = int(track.orig_event_flag) + track.attrs["orig_event_flag"] = int(track.orig_event_flag) xr.save_mfdataset(self.data, list_path) def read_netcdf(self, *args, **kwargs): """This function is deprecated, use TCTracks.from_netcdf instead.""" - LOGGER.warning("The use of TCTracks.read_netcdf is deprecated. " - "Use TCTracks.from_netcdf instead.") + LOGGER.warning( + "The use of TCTracks.read_netcdf is deprecated. " + "Use TCTracks.from_netcdf instead." + ) self.__dict__ = TCTracks.from_netcdf(*args, **kwargs).__dict__ @classmethod @@ -1338,20 +1597,25 @@ def from_netcdf(cls, folder_name): TCTracks with data from the given directory of NetCDF files. """ file_tr = get_file_names(folder_name) - LOGGER.info('Reading %s files.', len(file_tr)) + LOGGER.info("Reading %s files.", len(file_tr)) data = [] for file in file_tr: - if Path(file).suffix != '.nc': + if Path(file).suffix != ".nc": continue with xr.open_dataset(file) as track: - track.attrs['orig_event_flag'] = bool(track.orig_event_flag) + track.attrs["orig_event_flag"] = bool(track.orig_event_flag) if "basin" in track.attrs: - LOGGER.warning("Track data comes with legacy basin attribute. " - "We assume that the track remains in that basin during its " - "whole life time.") + LOGGER.warning( + "Track data comes with legacy basin attribute. " + "We assume that the track remains in that basin during its " + "whole life time." + ) basin = track.basin - del track.attrs['basin'] - track['basin'] = ("time", np.full(track['time'].size, basin, dtype=" 1 - else Point(lons, lats) - for lons, lats in zip(t_lons, t_lats) - ]) + gdf.geometry = gpd.GeoSeries( + [ + ( + LineString(np.c_[lons, lats]) + if lons.size > 1 + else Point(lons, lats) + ) + for lons, lats in zip(t_lons, t_lats) + ] + ) gdf.crs = DEF_CRS # for splitting, restrict to tracks that come close to the antimeridian - t_split_mask = np.asarray([ - (lon > 170).any() and (lon < -170).any() and lon.size > 1 - for lon in t_lons]) + t_split_mask = np.asarray( + [ + (lon > 170).any() and (lon < -170).any() and lon.size > 1 + for lon in t_lons + ] + ) # note that tracks might be splitted at self-intersections as well: # https://github.com/Toblerity/Shapely/issues/572 antimeridian = LineString([(180, -90), (180, 90)]) - gdf.loc[t_split_mask, "geometry"] = gdf.geometry[t_split_mask] \ - .to_crs({"proj": "longlat", "lon_wrap": 180}) \ - .apply(lambda line: MultiLineString([ - LineString([(x - 360, y) for x, y in segment.coords]) - if any(x > 180 for x, y in segment.coords) else segment - for segment in shapely.ops.split(line, antimeridian).geoms - ])) + gdf.loc[t_split_mask, "geometry"] = ( + gdf.geometry[t_split_mask] + .to_crs({"proj": "longlat", "lon_wrap": 180}) + .apply( + lambda line: MultiLineString( + [ + ( + LineString([(x - 360, y) for x, y in segment.coords]) + if any(x > 180 for x, y in segment.coords) + else segment + ) + for segment in shapely.ops.split(line, antimeridian).geoms + ] + ) + ) + ) else: # LineString only works with more than one lat/lon pair - gdf.geometry = gpd.GeoSeries([ - LineString(np.c_[track['lon'], track['lat']]) if track['lon'].size > 1 - else Point(track['lon'].data, track['lat'].data) - for track in self.data - ]) + gdf.geometry = gpd.GeoSeries( + [ + ( + LineString(np.c_[track["lon"], track["lat"]]) + if track["lon"].size > 1 + else Point(track["lon"].data, track["lat"].data) + ) + for track in self.data + ] + ) gdf.crs = DEF_CRS return gdf @@ -1528,43 +1819,52 @@ def _one_interp_data(track, time_step_h, land_geom=None): """ if time_step_h is None: return track - if track['time'].size < 2: - LOGGER.warning('Track interpolation not done. ' - 'Not enough elements for %s', track.name) + if track["time"].size < 2: + LOGGER.warning( + "Track interpolation not done. " "Not enough elements for %s", + track.name, + ) track_int = track else: - method = ['linear', 'quadratic', 'cubic'][min(2, track['time'].size - 2)] + method = ["linear", "quadratic", "cubic"][min(2, track["time"].size - 2)] # handle change of sign in longitude - lon = u_coord.lon_normalize(track['lon'].copy(), center=0) + lon = u_coord.lon_normalize(track["lon"].copy(), center=0) if (lon < -170).any() and (lon > 170).any(): # crosses 180 degrees east/west -> use positive degrees east lon[lon < 0] += 360 - time_step = pd.tseries.frequencies.to_offset(pd.Timedelta(hours=time_step_h)).freqstr - track_int = track.resample(time=time_step, skipna=True)\ - .interpolate('linear') + time_step = pd.tseries.frequencies.to_offset( + pd.Timedelta(hours=time_step_h) + ).freqstr + track_int = track.resample(time=time_step, skipna=True).interpolate( + "linear" + ) for var in track.data_vars: if "time" in track[var].dims and track[var].dtype.kind != "f": track_int[var] = track[var].resample(time=time_step).nearest() - track_int['time_step'][:] = time_step_h + track_int["time_step"][:] = time_step_h lon_int = lon.resample(time=time_step).interpolate(method) lon_int[lon_int > 180] -= 360 - track_int.coords['lon'] = lon_int - track_int.coords['lat'] = track['lat'].resample(time=time_step)\ - .interpolate(method) - track_int.attrs['category'] = set_category( - track_int['max_sustained_wind'].values, - track_int.attrs['max_sustained_wind_unit']) + track_int.coords["lon"] = lon_int + track_int.coords["lat"] = ( + track["lat"].resample(time=time_step).interpolate(method) + ) + track_int.attrs["category"] = set_category( + track_int["max_sustained_wind"].values, + track_int.attrs["max_sustained_wind_unit"], + ) # restrict to time steps within original bounds track_int = track_int.sel( - time=(track['time'][0] <= track_int['time']) & - (track_int['time'] <= track['time'][-1])) + time=(track["time"][0] <= track_int["time"]) + & (track_int["time"] <= track["time"][-1]) + ) if land_geom: track_land_params(track_int, land_geom) return track_int + def _raise_if_legacy_or_unknown_hdf5_format(file_name): """Raise an exception if the HDF5 format of the file is not supported @@ -1610,11 +1910,12 @@ def _raise_if_legacy_or_unknown_hdf5_format(file_name): " supported by CLIMADA. Please store the data again using" " TCTracks.write_hdf5. If you struggle to convert the data, please open an" " issue on GitHub." - ) if is_legacy else ( - f"Unknown HDF5/NetCDF file format: {file_name}" ) + if is_legacy + else (f"Unknown HDF5/NetCDF file format: {file_name}") ) + def _read_one_gettelman(nc_data, i_track): """Read a single track from Andrew Gettelman's NetCDF dataset @@ -1629,36 +1930,45 @@ def _read_one_gettelman(nc_data, i_track): ------- xr.Dataset """ - scale_to_10m = (10. / 60.)**.11 + scale_to_10m = (10.0 / 60.0) ** 0.11 mps2kts = 1.94384 - basin_dict = {0: 'NA - North Atlantic', - 1: 'SA - South Atlantic', - 2: 'WP - West Pacific', - 3: 'EP - East Pacific', - 4: 'SP - South Pacific', - 5: 'NI - North Indian', - 6: 'SI - South Indian', - 7: 'AS - Arabian Sea', - 8: 'BB - Bay of Bengal', - 9: 'EA - Eastern Australia', - 10: 'WA - Western Australia', - 11: 'CP - Central Pacific', - 12: 'CS - Carribbean Sea', - 13: 'GM - Gulf of Mexico', - 14: 'MM - Missing'} - - val_len = nc_data.variables['numObs'][i_track] + basin_dict = { + 0: "NA - North Atlantic", + 1: "SA - South Atlantic", + 2: "WP - West Pacific", + 3: "EP - East Pacific", + 4: "SP - South Pacific", + 5: "NI - North Indian", + 6: "SI - South Indian", + 7: "AS - Arabian Sea", + 8: "BB - Bay of Bengal", + 9: "EA - Eastern Australia", + 10: "WA - Western Australia", + 11: "CP - Central Pacific", + 12: "CS - Carribbean Sea", + 13: "GM - Gulf of Mexico", + 14: "MM - Missing", + } + + val_len = nc_data.variables["numObs"][i_track] sid = str(i_track) - times = nc_data.variables['source_time'][i_track, :][:val_len] + times = nc_data.variables["source_time"][i_track, :][:val_len] datetimes = list() for time in times: try: datetimes.append( dt.datetime.strptime( - str(nc.num2date(time, 'days since {}'.format('1858-11-17'), - calendar='standard')), - '%Y-%m-%d %H:%M:%S')) + str( + nc.num2date( + time, + "days since {}".format("1858-11-17"), + calendar="standard", + ) + ), + "%Y-%m-%d %H:%M:%S", + ) + ) except ValueError: # If wrong t, set t to previous t plus 3 hours if datetimes: @@ -1668,52 +1978,71 @@ def _read_one_gettelman(nc_data, i_track): time = times[pos + 1] - 1 / 24 * 3 datetimes.append( dt.datetime.strptime( - str(nc.num2date(time, 'days since {}'.format('1858-11-17'), - calendar='standard')), - '%Y-%m-%d %H:%M:%S')) + str( + nc.num2date( + time, + "days since {}".format("1858-11-17"), + calendar="standard", + ) + ), + "%Y-%m-%d %H:%M:%S", + ) + ) time_step = [] for i_time, time in enumerate(datetimes[1:], 1): time_step.append((time - datetimes[i_time - 1]).total_seconds() / 3600) time_step.append(time_step[-1]) - basins_numeric = nc_data.variables['basin'][i_track, :val_len] - basins = [basin_dict[b] if b in basin_dict else basin_dict[14] for b in basins_numeric] + basins_numeric = nc_data.variables["basin"][i_track, :val_len] + basins = [ + basin_dict[b] if b in basin_dict else basin_dict[14] for b in basins_numeric + ] - lon = nc_data.variables['lon'][i_track, :][:val_len] + lon = nc_data.variables["lon"][i_track, :][:val_len] lon[lon > 180] = lon[lon > 180] - 360 # change lon format to -180 to 180 - lat = nc_data.variables['lat'][i_track, :][:val_len] - cen_pres = nc_data.variables['pres'][i_track, :][:val_len] - av_prec = nc_data.variables['precavg'][i_track, :][:val_len] - max_prec = nc_data.variables['precmax'][i_track, :][:val_len] + lat = nc_data.variables["lat"][i_track, :][:val_len] + cen_pres = nc_data.variables["pres"][i_track, :][:val_len] + av_prec = nc_data.variables["precavg"][i_track, :][:val_len] + max_prec = nc_data.variables["precmax"][i_track, :][:val_len] # m/s to kn - wind = nc_data.variables['wind'][i_track, :][:val_len] * mps2kts * scale_to_10m + wind = nc_data.variables["wind"][i_track, :][:val_len] * mps2kts * scale_to_10m if not all(wind.data): # if wind is empty wind = np.ones(wind.size) * -999.9 - tr_df = pd.DataFrame({'time': datetimes, 'lat': lat, 'lon': lon, - 'max_sustained_wind': wind, - 'central_pressure': cen_pres, - 'environmental_pressure': np.ones(lat.size) * 1015., - 'radius_max_wind': np.ones(lat.size) * 65., - 'maximum_precipitation': max_prec, - 'average_precipitation': av_prec, - 'basin': [b[:2] for b in basins], - 'time_step': time_step}) + tr_df = pd.DataFrame( + { + "time": datetimes, + "lat": lat, + "lon": lon, + "max_sustained_wind": wind, + "central_pressure": cen_pres, + "environmental_pressure": np.ones(lat.size) * 1015.0, + "radius_max_wind": np.ones(lat.size) * 65.0, + "maximum_precipitation": max_prec, + "average_precipitation": av_prec, + "basin": [b[:2] for b in basins], + "time_step": time_step, + } + ) # construct xarray - tr_ds = xr.Dataset.from_dataframe(tr_df.set_index('time')) - tr_ds.coords['lat'] = ('time', tr_ds['lat'].values) - tr_ds.coords['lon'] = ('time', tr_ds['lon'].values) - tr_ds['basin'] = tr_ds['basin'].astype('= hem_min) & (lat <= hem_max) | (lat == 0) hem_idx = np.all(hem_mask, axis=1).nonzero()[0] - data_hem = lambda keys: [data_mat[f'{k}store'][hem_idx] for k in keys] + data_hem = lambda keys: [data_mat[f"{k}store"][hem_idx] for k in keys] - lat, lon = data_hem(['lat', 'long']) - months, days, hours = data_hem(['month', 'day', 'hour']) + lat, lon = data_hem(["lat", "long"]) + months, days, hours = data_hem(["month", "day", "hour"]) months, days, hours = [np.int8(ar) for ar in [months, days, hours]] - tc_rmw, tc_maxwind, tc_pressure = data_hem(['rm', 'v', 'p']) - years = data_mat['yearstore'][0, hem_idx] + tc_rmw, tc_maxwind, tc_pressure = data_hem(["rm", "v", "p"]) + years = data_mat["yearstore"][0, hem_idx] ntracks, nnodes = lat.shape - LOGGER.info("Loading %s tracks%s.", ntracks, - f" on {hemisphere} hemisphere" if hemisphere in ['N', 'S'] else "") + LOGGER.info( + "Loading %s tracks%s.", + ntracks, + f" on {hemisphere} hemisphere" if hemisphere in ["N", "S"] else "", + ) # change lon format to -180 to 180 lon[lon > 180] = lon[lon > 180] - 360 @@ -1793,21 +2133,25 @@ def _read_file_emanuel(path, hemisphere=None, rmw_corr=False, subset=None): # deal with change of year year = np.full(valid_idx.size, years[i_track]) - year_change = (np.diff(months[i_track, valid_idx]) < 0) + year_change = np.diff(months[i_track, valid_idx]) < 0 year_change = year_change.nonzero()[0] if year_change.size > 0: - year[year_change[0] + 1:] += 1 + year[year_change[0] + 1 :] += 1 try: - datetimes = map(dt.datetime, year, - months[i_track, valid_idx], - days[i_track, valid_idx], - hours[i_track, valid_idx]) + datetimes = map( + dt.datetime, + year, + months[i_track, valid_idx], + days[i_track, valid_idx], + hours[i_track, valid_idx], + ) datetimes = list(datetimes) except ValueError as err: # dates are known to contain invalid February 30 - date_feb = (months[i_track, valid_idx] == 2) \ - & (days[i_track, valid_idx] > 28) + date_feb = (months[i_track, valid_idx] == 2) & ( + days[i_track, valid_idx] > 28 + ) if np.count_nonzero(date_feb) == 0: # unknown invalid date issue raise err @@ -1817,42 +2161,52 @@ def _read_file_emanuel(path, hemisphere=None, rmw_corr=False, subset=None): year[reference_idx], months[i_track, valid_idx[reference_idx]], days[i_track, valid_idx[reference_idx]], - hours[i_track, valid_idx[reference_idx]],) - datetimes = [reference_date + dt.timedelta(hours=int(step * i)) - for i in range(nnodes)] - datetimes = [cftime.DatetimeProlepticGregorian(d.year, d.month, d.day, d.hour) - for d in datetimes] + hours[i_track, valid_idx[reference_idx]], + ) + datetimes = [ + reference_date + dt.timedelta(hours=int(step * i)) + for i in range(nnodes) + ] + datetimes = [ + cftime.DatetimeProlepticGregorian(d.year, d.month, d.day, d.hour) + for d in datetimes + ] max_sustained_wind = tc_maxwind[i_track, valid_idx] - max_sustained_wind_unit = 'kn' + max_sustained_wind_unit = "kn" env_pressure = np.full(nnodes, DEF_ENV_PRESSURE) - category = set_category(max_sustained_wind, - max_sustained_wind_unit, - SAFFIR_SIM_CAT) - tr_ds = xr.Dataset({ - 'time_step': ('time', np.full(nnodes, time_step)), - 'radius_max_wind': ('time', tc_rmw[i_track, valid_idx]), - 'max_sustained_wind': ('time', max_sustained_wind), - 'central_pressure': ('time', tc_pressure[i_track, valid_idx]), - 'environmental_pressure': ('time', env_pressure), - 'basin': ('time', np.full(nnodes, basin, dtype=" 0: # Assume the landfall started between this and the previous point - orig_lf[i_lf][0] = track['lat'][lf_point - 1] + \ - (track['lat'][lf_point] - track['lat'][lf_point - 1]) / 2 - orig_lf[i_lf][1] = track['lon'][lf_point - 1] + \ - (track['lon'][lf_point] - track['lon'][lf_point - 1]) / 2 + orig_lf[i_lf][0] = ( + track["lat"][lf_point - 1] + + (track["lat"][lf_point] - track["lat"][lf_point - 1]) / 2 + ) + orig_lf[i_lf][1] = ( + track["lon"][lf_point - 1] + + (track["lon"][lf_point] - track["lon"][lf_point - 1]) / 2 + ) else: # track starts over land, assume first 'landfall' starts here - orig_lf[i_lf][0] = track['lat'][lf_point] - orig_lf[i_lf][1] = track['lon'][lf_point] - + orig_lf[i_lf][0] = track["lat"][lf_point] + orig_lf[i_lf][1] = track["lon"][lf_point] - dist = DistanceMetric.get_metric('haversine') - nodes1 = np.radians(np.array([track['lat'].values[1:], - track['lon'].values[1:]]).transpose()) - nodes0 = np.radians(np.array([track['lat'].values[:-1], - track['lon'].values[:-1]]).transpose()) + dist = DistanceMetric.get_metric("haversine") + nodes1 = np.radians( + np.array([track["lat"].values[1:], track["lon"].values[1:]]).transpose() + ) + nodes0 = np.radians( + np.array([track["lat"].values[:-1], track["lon"].values[:-1]]).transpose() + ) dist_since_lf[1:] = dist.pairwise(nodes1, nodes0).diagonal() - dist_since_lf[~track['on_land'].values] = 0.0 - nodes1 = np.array([track['lat'].values[sea_land_idx], - track['lon'].values[sea_land_idx]]).transpose() / 180 * np.pi - dist_since_lf[sea_land_idx] = \ - dist.pairwise(nodes1, orig_lf / 180 * np.pi).diagonal() + dist_since_lf[~track["on_land"].values] = 0.0 + nodes1 = ( + np.array( + [track["lat"].values[sea_land_idx], track["lon"].values[sea_land_idx]] + ).transpose() + / 180 + * np.pi + ) + dist_since_lf[sea_land_idx] = dist.pairwise( + nodes1, orig_lf / 180 * np.pi + ).diagonal() for sea_land, land_sea in zip(sea_land_idx, land_sea_idx): - dist_since_lf[sea_land:land_sea] = \ - np.cumsum(dist_since_lf[sea_land:land_sea]) + dist_since_lf[sea_land:land_sea] = np.cumsum(dist_since_lf[sea_land:land_sea]) dist_since_lf *= EARTH_RADIUS_KM - dist_since_lf[~track['on_land'].values] = np.nan + dist_since_lf[~track["on_land"].values] = np.nan return dist_since_lf + def _get_landfall_idx(track, include_starting_landfall=False): """Get the position of the start and end of landfalls for a TC track. @@ -2005,21 +2374,22 @@ def _get_landfall_idx(track, include_starting_landfall=False): ends over land, the last value is set to track.time.size. """ # Index in land that comes from previous sea index - sea_land_idx = np.where(np.diff(track['on_land'].astype(int)) == 1)[0] + 1 + sea_land_idx = np.where(np.diff(track["on_land"].astype(int)) == 1)[0] + 1 # Index in sea that comes from previous land index - land_sea_idx = np.where(np.diff(track['on_land'].astype(int)) == -1)[0] + 1 - if track['on_land'][-1]: + land_sea_idx = np.where(np.diff(track["on_land"].astype(int)) == -1)[0] + 1 + if track["on_land"][-1]: # track ends over land: add last track point as the end of that landfall - land_sea_idx = np.append(land_sea_idx, track['time'].size) - if track['on_land'][0]: + land_sea_idx = np.append(land_sea_idx, track["time"].size) + if track["on_land"][0]: # track starts over land: remove first land-to-sea transition (not a landfall)? if include_starting_landfall: sea_land_idx = np.append(0, sea_land_idx) else: land_sea_idx = land_sea_idx[1:] if land_sea_idx.size != sea_land_idx.size: - raise ValueError('Mismatch') - return sea_land_idx,land_sea_idx + raise ValueError("Mismatch") + return sea_land_idx, land_sea_idx + def _estimate_pressure(cen_pres, lat, lon, v_max): """Replace missing pressure values with statistical estimate. @@ -2052,11 +2422,10 @@ def _estimate_pressure(cen_pres, lat, lon, v_max): lat, lon = [np.where(np.isnan(ar), -999, ar) for ar in [lat, lon]] msk = (cen_pres <= 0) & (v_max > 0) & (lat > -999) & (lon > -999) c_const, c_lat, c_lon, c_vmax = 1026.3401, -0.05504, -0.03536, -0.7357 - cen_pres[msk] = c_const + c_lat * lat[msk] \ - + c_lon * lon[msk] \ - + c_vmax * v_max[msk] + cen_pres[msk] = c_const + c_lat * lat[msk] + c_lon * lon[msk] + c_vmax * v_max[msk] return np.where(cen_pres <= 0, np.nan, cen_pres) + def _estimate_vmax(v_max, lat, lon, cen_pres): """Replace missing wind speed values with a statistical estimate. @@ -2088,11 +2457,10 @@ def _estimate_vmax(v_max, lat, lon, cen_pres): lat, lon = [np.where(np.isnan(ar), -999, ar) for ar in [lat, lon]] msk = (v_max <= 0) & (cen_pres > 0) & (lat > -999) & (lon > -999) c_const, c_lat, c_lon, c_pres = 1216.5223, -0.04086, -0.04190, -1.1797 - v_max[msk] = c_const + c_lat * lat[msk] \ - + c_lon * lon[msk] \ - + c_pres * cen_pres[msk] + v_max[msk] = c_const + c_lat * lat[msk] + c_lon * lon[msk] + c_pres * cen_pres[msk] return np.where(v_max <= 0, np.nan, v_max) + def estimate_roci(roci, cen_pres): """Replace missing radius (ROCI) values with statistical estimate. @@ -2124,12 +2492,19 @@ def estimate_roci(roci, cen_pres): roci_l = [210.711487, 215.897110, 198.261520, 159.589508, 90.900116] roci[msk] = 0 for i, pres_l_i in enumerate(pres_l): - slope_0 = 1. / (pres_l_i - pres_l[i - 1]) if i > 0 else 0 - slope_1 = 1. / (pres_l[i + 1] - pres_l_i) if i + 1 < len(pres_l) else 0 - roci[msk] += roci_l[i] * np.fmax(0, (1 - slope_0 * np.fmax(0, pres_l_i - cen_pres[msk]) - - slope_1 * np.fmax(0, cen_pres[msk] - pres_l_i))) + slope_0 = 1.0 / (pres_l_i - pres_l[i - 1]) if i > 0 else 0 + slope_1 = 1.0 / (pres_l[i + 1] - pres_l_i) if i + 1 < len(pres_l) else 0 + roci[msk] += roci_l[i] * np.fmax( + 0, + ( + 1 + - slope_0 * np.fmax(0, pres_l_i - cen_pres[msk]) + - slope_1 * np.fmax(0, cen_pres[msk] - pres_l_i) + ), + ) return np.where(roci <= 0, np.nan, roci) + def estimate_rmw(rmw, cen_pres): """Replace missing radius (RMW) values with statistical estimate. @@ -2159,12 +2534,19 @@ def estimate_rmw(rmw, cen_pres): rmw_l = [14.907318, 15.726927, 25.742142, 56.856522] rmw[msk] = 0 for i, pres_l_i in enumerate(pres_l): - slope_0 = 1. / (pres_l_i - pres_l[i - 1]) if i > 0 else 0 - slope_1 = 1. / (pres_l[i + 1] - pres_l_i) if i + 1 < len(pres_l) else 0 - rmw[msk] += rmw_l[i] * np.fmax(0, (1 - slope_0 * np.fmax(0, pres_l_i - cen_pres[msk]) - - slope_1 * np.fmax(0, cen_pres[msk] - pres_l_i))) + slope_0 = 1.0 / (pres_l_i - pres_l[i - 1]) if i > 0 else 0 + slope_1 = 1.0 / (pres_l[i + 1] - pres_l_i) if i + 1 < len(pres_l) else 0 + rmw[msk] += rmw_l[i] * np.fmax( + 0, + ( + 1 + - slope_0 * np.fmax(0, pres_l_i - cen_pres[msk]) + - slope_1 * np.fmax(0, cen_pres[msk] - pres_l_i) + ), + ) return np.where(rmw <= 0, np.nan, rmw) + def ibtracs_fit_param(explained, explanatory, year_range=(1980, 2019), order=1): """Statistically fit an ibtracs parameter to other ibtracs variables. @@ -2185,8 +2567,8 @@ def ibtracs_fit_param(explained, explanatory, year_range=(1980, 2019), order=1): ------- result : OLSResults """ - wmo_vars = ['wind', 'pres', 'rmw', 'roci', 'poci'] - all_vars = ['lat', 'lon'] + wmo_vars + wmo_vars = ["wind", "pres", "rmw", "roci", "poci"] + all_vars = ["lat", "lon"] + wmo_vars explanatory = list(explanatory) variables = explanatory + [explained] for var in variables: @@ -2194,7 +2576,7 @@ def ibtracs_fit_param(explained, explanatory, year_range=(1980, 2019), order=1): raise KeyError("Unknown ibtracs variable: %s" % var) # load ibtracs dataset - fn_nc = SYSTEM_DIR.joinpath('IBTrACS.ALL.v04r00.nc') + fn_nc = SYSTEM_DIR.joinpath("IBTrACS.ALL.v04r00.nc") with xr.open_dataset(fn_nc) as ibtracs_ds: # choose specified year range years = ibtracs_ds.sid.str.slice(0, 4).astype(int) @@ -2204,8 +2586,8 @@ def ibtracs_fit_param(explained, explanatory, year_range=(1980, 2019), order=1): if "wind" in variables: for agency in IBTRACS_AGENCIES: scale, shift = IBTRACS_AGENCY_1MIN_WIND_FACTOR[agency] - ibtracs_ds[f'{agency}_wind'] -= shift - ibtracs_ds[f'{agency}_wind'] /= scale + ibtracs_ds[f"{agency}_wind"] -= shift + ibtracs_ds[f"{agency}_wind"] /= scale # fill values agency_pref, track_agency_ix = ibtracs_track_agency(ibtracs_ds) @@ -2213,21 +2595,25 @@ def ibtracs_fit_param(explained, explanatory, year_range=(1980, 2019), order=1): if var not in variables: continue # array of values in order of preference - cols = [f'{a}_{var}' for a in agency_pref] + cols = [f"{a}_{var}" for a in agency_pref] cols = [col for col in cols if col in ibtracs_ds.data_vars.keys()] - all_vals = ibtracs_ds[cols].to_array(dim='agency') - preferred_ix = all_vals.notnull().argmax(dim='agency') - if var in ['wind', 'pres']: + all_vals = ibtracs_ds[cols].to_array(dim="agency") + preferred_ix = all_vals.notnull().argmax(dim="agency") + if var in ["wind", "pres"]: # choice: wmo -> wmo_agency/usa_agency -> preferred - ibtracs_ds[var] = ibtracs_ds['wmo_' + var] \ - .fillna(all_vals.isel(agency=track_agency_ix)) \ + ibtracs_ds[var] = ( + ibtracs_ds["wmo_" + var] + .fillna(all_vals.isel(agency=track_agency_ix)) .fillna(all_vals.isel(agency=preferred_ix)) + ) else: ibtracs_ds[var] = all_vals.isel(agency=preferred_ix) - fit_df = pd.DataFrame({var: ibtracs_ds[var].values.ravel() for var in variables}) - fit_df = fit_df.dropna(axis=0, how='any').reset_index(drop=True) - if 'lat' in explanatory: - fit_df['lat'] = fit_df['lat'].abs() + fit_df = pd.DataFrame( + {var: ibtracs_ds[var].values.ravel() for var in variables} + ) + fit_df = fit_df.dropna(axis=0, how="any").reset_index(drop=True) + if "lat" in explanatory: + fit_df["lat"] = fit_df["lat"].abs() # prepare explanatory variables d_explanatory = fit_df[explanatory] @@ -2243,23 +2629,31 @@ def ibtracs_fit_param(explained, explanatory, year_range=(1980, 2019), order=1): # piecewise linear with given break points d_explanatory = d_explanatory.drop(labels=[ex], axis=1) for i, max_o_i in enumerate(max_o): - col = f'{ex}{max_o_i}' - slope_0 = 1. / (max_o_i - max_o[i - 1]) if i > 0 else 0 - slope_1 = 1. / (max_o[i + 1] - max_o_i) if i + 1 < len(max_o) else 0 - d_explanatory[col] = np.fmax(0, (1 - slope_0 * np.fmax(0, max_o_i - fit_df[ex]) - - slope_1 * np.fmax(0, fit_df[ex] - max_o_i))) + col = f"{ex}{max_o_i}" + slope_0 = 1.0 / (max_o_i - max_o[i - 1]) if i > 0 else 0 + slope_1 = ( + 1.0 / (max_o[i + 1] - max_o_i) if i + 1 < len(max_o) else 0 + ) + d_explanatory[col] = np.fmax( + 0, + ( + 1 + - slope_0 * np.fmax(0, max_o_i - fit_df[ex]) + - slope_1 * np.fmax(0, fit_df[ex] - max_o_i) + ), + ) elif max_o < 0: d_explanatory = d_explanatory.drop(labels=[ex], axis=1) for order in range(1, abs(max_o) + 1): - d_explanatory[f'{ex}^{-order}'] = fit_df[ex]**(-order) + d_explanatory[f"{ex}^{-order}"] = fit_df[ex] ** (-order) add_const = True else: for order in range(2, max_o + 1): - d_explanatory[f'{ex}^{order}'] = fit_df[ex]**order + d_explanatory[f"{ex}^{order}"] = fit_df[ex] ** order add_const = True d_explained = fit_df[[explained]] if add_const: - d_explanatory['const'] = 1.0 + d_explanatory["const"] = 1.0 # run statistical fit sm_results = sm.OLS(d_explained, d_explanatory).fit() @@ -2270,6 +2664,7 @@ def ibtracs_fit_param(explained, explanatory, year_range=(1980, 2019), order=1): return sm_results + def ibtracs_track_agency(ds_sel): """Get preferred IBTrACS agency for each entry in the dataset. @@ -2286,18 +2681,20 @@ def ibtracs_track_agency(ds_sel): For each entry in `ds_sel`, the agency to use, given as an index into `agency_pref`. """ agency_pref = ["wmo"] + IBTRACS_AGENCIES - agency_map = {a.encode('utf-8'): i for i, a in enumerate(agency_pref)} - agency_map.update({ - a.encode('utf-8'): agency_map[b'usa'] for a in IBTRACS_USA_AGENCIES - }) - agency_map[b''] = agency_map[b'wmo'] + agency_map = {a.encode("utf-8"): i for i, a in enumerate(agency_pref)} + agency_map.update( + {a.encode("utf-8"): agency_map[b"usa"] for a in IBTRACS_USA_AGENCIES} + ) + agency_map[b""] = agency_map[b"wmo"] agency_fun = lambda x: agency_map[x] if "track_agency" not in ds_sel.data_vars.keys(): - ds_sel['track_agency'] = ds_sel['wmo_agency'].where(ds_sel['wmo_agency'] != b'', - ds_sel['usa_agency']) - track_agency_ix = xr.apply_ufunc(agency_fun, ds_sel['track_agency'], vectorize=True) + ds_sel["track_agency"] = ds_sel["wmo_agency"].where( + ds_sel["wmo_agency"] != b"", ds_sel["usa_agency"] + ) + track_agency_ix = xr.apply_ufunc(agency_fun, ds_sel["track_agency"], vectorize=True) return agency_pref, track_agency_ix + def ibtracs_add_official_variable(ibtracs_ds, tc_var, add_3h=False): """Add variables for the officially responsible agencies to an IBTrACS dataset @@ -2318,52 +2715,68 @@ def ibtracs_add_official_variable(ibtracs_ds, tc_var, add_3h=False): """ if "nan_var" not in ibtracs_ds.data_vars.keys(): # add an array full of NaN as a fallback value in the procedure - ibtracs_ds['nan_var'] = xr.full_like(ibtracs_ds['lat'], np.nan) + ibtracs_ds["nan_var"] = xr.full_like(ibtracs_ds["lat"], np.nan) # determine which of the official agencies report this variable at all - available_agencies = [a for a in IBTRACS_AGENCIES - if f'{a}_{tc_var}' in ibtracs_ds.data_vars.keys()] + available_agencies = [ + a for a in IBTRACS_AGENCIES if f"{a}_{tc_var}" in ibtracs_ds.data_vars.keys() + ] # map all non-reporting agency variables to the 'nan_var' (0) agency_map = { - a.encode("utf-8"): available_agencies.index(a) + 1 if a in available_agencies else 0 - for a in [''] + IBTRACS_AGENCIES + a.encode("utf-8"): ( + available_agencies.index(a) + 1 if a in available_agencies else 0 + ) + for a in [""] + IBTRACS_AGENCIES } - agency_map.update({ - a.encode('utf-8'): agency_map[b'usa'] for a in IBTRACS_USA_AGENCIES - }) + agency_map.update( + {a.encode("utf-8"): agency_map[b"usa"] for a in IBTRACS_USA_AGENCIES} + ) # read from officially responsible agencies that report this variable, but only # at official reporting times (usually 6-hourly) official_agency_ix = xr.apply_ufunc( - lambda x: agency_map[x], ibtracs_ds['wmo_agency'], vectorize=True) - available_cols = ['nan_var'] + [f'{a}_{tc_var}' for a in available_agencies] - all_vals = ibtracs_ds[available_cols].to_array(dim='agency') - ibtracs_ds[f'official_{tc_var}'] = all_vals.isel(agency=official_agency_ix) + lambda x: agency_map[x], ibtracs_ds["wmo_agency"], vectorize=True + ) + available_cols = ["nan_var"] + [f"{a}_{tc_var}" for a in available_agencies] + all_vals = ibtracs_ds[available_cols].to_array(dim="agency") + ibtracs_ds[f"official_{tc_var}"] = all_vals.isel(agency=official_agency_ix) if add_3h: # create a copy in float for NaN interpolation official_agency_ix_interp = official_agency_ix.astype(np.float16) # extrapolate track agency for tracks with only a single record - mask_singular = ((official_agency_ix_interp > 0).sum(dim="date_time") == 1).values - official_agency_ix_interp.values[mask_singular,:] = \ - official_agency_ix_interp.sel(storm=mask_singular).max(dim="date_time").values[:,None] + mask_singular = ( + (official_agency_ix_interp > 0).sum(dim="date_time") == 1 + ).values + official_agency_ix_interp.values[mask_singular, :] = ( + official_agency_ix_interp.sel(storm=mask_singular) + .max(dim="date_time") + .values[:, None] + ) with warnings.catch_warnings(): # See https://github.com/pydata/xarray/issues/4167 warnings.simplefilter(action="ignore", category=FutureWarning) # interpolate responsible agencies using nearest neighbor interpolation - official_agency_ix_interp.values[official_agency_ix_interp.values == 0.0] = np.nan + official_agency_ix_interp.values[ + official_agency_ix_interp.values == 0.0 + ] = np.nan official_agency_ix_interp = official_agency_ix_interp.interpolate_na( - dim="date_time", method="nearest", fill_value="extrapolate") + dim="date_time", method="nearest", fill_value="extrapolate" + ) # read from officially responsible agencies that report this variable, including # 3-hour time steps if available - official_agency_ix_interp.values[official_agency_ix_interp.isnull().values] = 0.0 - ibtracs_ds[f'official_3h_{tc_var}'] = all_vals.isel( - agency=official_agency_ix_interp.astype(int)) + official_agency_ix_interp.values[official_agency_ix_interp.isnull().values] = ( + 0.0 + ) + ibtracs_ds[f"official_3h_{tc_var}"] = all_vals.isel( + agency=official_agency_ix_interp.astype(int) + ) + def _change_max_wind_unit(wind, unit_orig, unit_dest): """Compute maximum wind speed in unit_dest. @@ -2382,29 +2795,30 @@ def _change_max_wind_unit(wind, unit_orig, unit_dest): maxwind : double Maximum wind speed in specified wind speed units. """ - if unit_orig in ('kn', 'kt'): + if unit_orig in ("kn", "kt"): ur_orig = ureg.knot - elif unit_orig == 'mph': + elif unit_orig == "mph": ur_orig = ureg.mile / ureg.hour - elif unit_orig == 'm/s': + elif unit_orig == "m/s": ur_orig = ureg.meter / ureg.second - elif unit_orig == 'km/h': + elif unit_orig == "km/h": ur_orig = ureg.kilometer / ureg.hour else: - raise ValueError('Unit not recognised %s.' % unit_orig) - if unit_dest in ('kn', 'kt'): + raise ValueError("Unit not recognised %s." % unit_orig) + if unit_dest in ("kn", "kt"): ur_dest = ureg.knot - elif unit_dest == 'mph': + elif unit_dest == "mph": ur_dest = ureg.mile / ureg.hour - elif unit_dest == 'm/s': + elif unit_dest == "m/s": ur_dest = ureg.meter / ureg.second - elif unit_dest == 'km/h': + elif unit_dest == "km/h": ur_dest = ureg.kilometer / ureg.hour else: - raise ValueError('Unit not recognised %s.' % unit_dest) + raise ValueError("Unit not recognised %s." % unit_dest) return (np.nanmax(wind) * ur_orig).to(ur_dest).magnitude -def set_category(max_sus_wind, wind_unit='kn', saffir_scale=None): + +def set_category(max_sus_wind, wind_unit="kn", saffir_scale=None): """Add storm category according to Saffir-Simpson hurricane scale. Parameters @@ -2430,14 +2844,15 @@ def set_category(max_sus_wind, wind_unit='kn', saffir_scale=None): """ if saffir_scale is None: saffir_scale = SAFFIR_SIM_CAT - if wind_unit != 'kn': - max_sus_wind = _change_max_wind_unit(max_sus_wind, wind_unit, 'kn') + if wind_unit != "kn": + max_sus_wind = _change_max_wind_unit(max_sus_wind, wind_unit, "kn") max_wind = np.nanmax(max_sus_wind) try: return (np.argwhere(max_wind < saffir_scale) - 1)[0][0] except IndexError: return -1 + def _zlib_from_dataarray(data_var: xr.DataArray) -> bool: """Return true if data_var is of numerical type, return False otherwise diff --git a/climada/hazard/tc_tracks_synth.py b/climada/hazard/tc_tracks_synth.py index 759245010..57be3dc2a 100644 --- a/climada/hazard/tc_tracks_synth.py +++ b/climada/hazard/tc_tracks_synth.py @@ -23,15 +23,16 @@ import itertools import logging import warnings + import matplotlib.cm as cm_mp -from matplotlib.lines import Line2D import matplotlib.pyplot as plt import numba import numpy as np +from matplotlib.lines import Line2D -from climada import CONFIG -import climada.util.coordinates import climada.hazard.tc_tracks +import climada.util.coordinates +from climada import CONFIG LOGGER = logging.getLogger(__name__) @@ -42,7 +43,7 @@ 2: 0.0025968221565522698, 3: 0.002626252944053856, 4: 0.002550639312763181, - 5: 0.003788695795963695 + 5: 0.003788695795963695, } """Global landfall decay parameters for wind speed by TC category. @@ -65,7 +66,8 @@ 2: (1.0468630800617038, 0.004067381088015585), 3: (1.0639055205005432, 0.003708174876364079), 4: (1.0828373148889825, 0.003997492773076179), - 5: (1.1088615145002092, 0.005224331234796362)} + 5: (1.1088615145002092, 0.005224331234796362), +} """Global landfall decay parameters for pressure by TC category. Keys are TC categories with -1='TD', 0='TS', 1='Cat 1', ..., 5='Cat 5'. @@ -80,17 +82,20 @@ >>> v_rel, p_rel = _calc_land_decay(tracks.data, land_geom, pool=tracks.pool) """ -def calc_perturbed_trajectories(tracks, - nb_synth_tracks=9, - max_shift_ini=0.75, - max_dspeed_rel=0.3, - max_ddirection=np.pi / 360, - autocorr_dspeed=0.85, - autocorr_ddirection=0.5, - seed=CONFIG.hazard.trop_cyclone.random_seed.int(), - decay=True, - use_global_decay_params=True, - pool=None): + +def calc_perturbed_trajectories( + tracks, + nb_synth_tracks=9, + max_shift_ini=0.75, + max_dspeed_rel=0.3, + max_ddirection=np.pi / 360, + autocorr_dspeed=0.85, + autocorr_ddirection=0.5, + seed=CONFIG.hazard.trop_cyclone.random_seed.int(), + decay=True, + use_global_decay_params=True, + pool=None, +): """ Generate synthetic tracks based on directed random walk. An ensemble of nb_synth_tracks synthetic tracks is computed for every track contained in self. @@ -161,7 +166,7 @@ def calc_perturbed_trajectories(tracks, Pool that will be used for parallel computation when applicable. If not given, the pool attribute of `tracks` will be used. Default: None """ - LOGGER.info('Computing %s synthetic tracks.', nb_synth_tracks * tracks.size) + LOGGER.info("Computing %s synthetic tracks.", nb_synth_tracks * tracks.size) pool = tracks.pool if pool is None else pool @@ -169,10 +174,14 @@ def calc_perturbed_trajectories(tracks, np.random.seed(seed) # ensure tracks have constant time steps - time_step_h = np.unique(np.concatenate([np.unique(x['time_step']) for x in tracks.data])) + time_step_h = np.unique( + np.concatenate([np.unique(x["time_step"]) for x in tracks.data]) + ) if not np.allclose(time_step_h, time_step_h[0]): - raise ValueError('Tracks have different temporal resolution. ' - 'Please ensure constant time steps by applying equal_timestep beforehand') + raise ValueError( + "Tracks have different temporal resolution. " + "Please ensure constant time steps by applying equal_timestep beforehand" + ) time_step_h = time_step_h[0] # number of random value per synthetic track: @@ -181,44 +190,77 @@ def calc_perturbed_trajectories(tracks, # hence sum is nb_synth_tracks * (2 + 2*(size-1)) = nb_synth_tracks * 2 * size # https://stats.stackexchange.com/questions/48086/algorithm-to-produce-autocorrelated-uniformly-distributed-number if autocorr_ddirection == 0 and autocorr_dspeed == 0: - random_vec = [np.random.uniform(size=nb_synth_tracks * (2 * track['time'].size)) - for track in tracks.data] + random_vec = [ + np.random.uniform(size=nb_synth_tracks * (2 * track["time"].size)) + for track in tracks.data + ] else: - random_vec = [np.concatenate((np.random.uniform(size=nb_synth_tracks * 2), - _random_uniform_ac(nb_synth_tracks * (track['time'].size - 1), - autocorr_ddirection, time_step_h), - _random_uniform_ac(nb_synth_tracks * (track['time'].size - 1), - autocorr_dspeed, time_step_h))) - if track['time'].size > 1 else np.random.uniform(size=nb_synth_tracks * 2) - for track in tracks.data] + random_vec = [ + ( + np.concatenate( + ( + np.random.uniform(size=nb_synth_tracks * 2), + _random_uniform_ac( + nb_synth_tracks * (track["time"].size - 1), + autocorr_ddirection, + time_step_h, + ), + _random_uniform_ac( + nb_synth_tracks * (track["time"].size - 1), + autocorr_dspeed, + time_step_h, + ), + ) + ) + if track["time"].size > 1 + else np.random.uniform(size=nb_synth_tracks * 2) + ) + for track in tracks.data + ] if pool: chunksize = max(min(tracks.size // pool.ncpus, 1000), 1) - new_ens = pool.map(_one_rnd_walk, tracks.data, - itertools.repeat(nb_synth_tracks, tracks.size), - itertools.repeat(max_shift_ini, tracks.size), - itertools.repeat(max_dspeed_rel, tracks.size), - itertools.repeat(max_ddirection, tracks.size), - random_vec, chunksize=chunksize) + new_ens = pool.map( + _one_rnd_walk, + tracks.data, + itertools.repeat(nb_synth_tracks, tracks.size), + itertools.repeat(max_shift_ini, tracks.size), + itertools.repeat(max_dspeed_rel, tracks.size), + itertools.repeat(max_ddirection, tracks.size), + random_vec, + chunksize=chunksize, + ) else: - new_ens = [_one_rnd_walk(track, nb_synth_tracks, max_shift_ini, - max_dspeed_rel, max_ddirection, rand) - for track, rand in zip(tracks.data, random_vec)] + new_ens = [ + _one_rnd_walk( + track, + nb_synth_tracks, + max_shift_ini, + max_dspeed_rel, + max_ddirection, + rand, + ) + for track, rand in zip(tracks.data, random_vec) + ] cutoff_track_ids_tc = [x[1] for x in new_ens] cutoff_track_ids_tc = sum(cutoff_track_ids_tc, []) cutoff_track_ids_ts = [x[2] for x in new_ens] cutoff_track_ids_ts = sum(cutoff_track_ids_ts, []) if len(cutoff_track_ids_tc) > 0: - LOGGER.info('The following generated synthetic tracks moved beyond ' - 'the range of [-70, 70] degrees latitude. Cut out ' - 'at TC category >1: %s.', - ', '.join(cutoff_track_ids_tc)) + LOGGER.info( + "The following generated synthetic tracks moved beyond " + "the range of [-70, 70] degrees latitude. Cut out " + "at TC category >1: %s.", + ", ".join(cutoff_track_ids_tc), + ) if len(cutoff_track_ids_ts) > 0: - LOGGER.debug('The following generated synthetic tracks moved beyond ' - 'the range of [-70, 70] degrees latitude. Cut out ' - 'at TC category <= 1: %s.', - ', '.join(cutoff_track_ids_ts)) + LOGGER.debug( + "The following generated synthetic tracks moved beyond " + "the range of [-70, 70] degrees latitude. Cut out " + "at TC category <= 1: %s.", + ", ".join(cutoff_track_ids_ts), + ) new_ens = [x[0] for x in new_ens] tracks.data = sum(new_ens, []) @@ -228,8 +270,9 @@ def calc_perturbed_trajectories(tracks, extent=extent, resolution=10 ) if use_global_decay_params: - tracks.data = _apply_land_decay(tracks.data, LANDFALL_DECAY_V, - LANDFALL_DECAY_P, land_geom, pool=pool) + tracks.data = _apply_land_decay( + tracks.data, LANDFALL_DECAY_V, LANDFALL_DECAY_P, land_geom, pool=pool + ) else: # fit land decay coefficients based on historical tracks hist_tracks = [track for track in tracks.data if track.orig_event_flag] @@ -237,16 +280,21 @@ def calc_perturbed_trajectories(tracks, try: v_rel, p_rel = _calc_land_decay(hist_tracks, land_geom, pool=pool) tracks.data = _apply_land_decay( - tracks.data, v_rel, p_rel, land_geom, pool=pool) + tracks.data, v_rel, p_rel, land_geom, pool=pool + ) except ValueError as verr: - raise ValueError('Landfall decay could not be applied.') from verr + raise ValueError("Landfall decay could not be applied.") from verr else: - raise ValueError('No historical tracks found. Historical' - ' tracks are needed for land decay calibration' - ' if use_global_decay_params=False.') + raise ValueError( + "No historical tracks found. Historical" + " tracks are needed for land decay calibration" + " if use_global_decay_params=False." + ) -def _one_rnd_walk(track, nb_synth_tracks, max_shift_ini, max_dspeed_rel, max_ddirection, rnd_vec): +def _one_rnd_walk( + track, nb_synth_tracks, max_shift_ini, max_dspeed_rel, max_ddirection, rnd_vec +): """ Apply random walk to one track. @@ -280,10 +328,12 @@ def _one_rnd_walk(track, nb_synth_tracks, max_shift_ini, max_dspeed_rel, max_ddi latitudes with a wind speed up to TC category 1. """ ens_track = list() - n_dat = track['time'].size + n_dat = track["time"].size n_seg = n_dat - 1 - xy_ini = max_shift_ini * (2 * rnd_vec[:2 * nb_synth_tracks].reshape((2, nb_synth_tracks)) - 1) - [dt] = np.unique(track['time_step']) + xy_ini = max_shift_ini * ( + 2 * rnd_vec[: 2 * nb_synth_tracks].reshape((2, nb_synth_tracks)) - 1 + ) + [dt] = np.unique(track["time_step"]) ens_track.append(track) cutoff_track_ids_ts = [] @@ -293,49 +343,58 @@ def _one_rnd_walk(track, nb_synth_tracks, max_shift_ini, max_dspeed_rel, max_ddi # select angular perturbation for that synthetic track i_start_ang = 2 * nb_synth_tracks + i_ens * n_seg - i_end_ang = i_start_ang + track['time'].size - 1 + i_end_ang = i_start_ang + track["time"].size - 1 # scale by maximum perturbation and time step in hour (temporal-resolution independent) - ang_pert = dt * np.degrees(max_ddirection * (2 * rnd_vec[i_start_ang:i_end_ang] - 1)) + ang_pert = dt * np.degrees( + max_ddirection * (2 * rnd_vec[i_start_ang:i_end_ang] - 1) + ) ang_pert_cum = np.cumsum(ang_pert) # select translational speed perturbation for that synthetic track i_start_trans = 2 * nb_synth_tracks + nb_synth_tracks * n_seg + i_ens * n_seg - i_end_trans = i_start_trans + track['time'].size - 1 + i_end_trans = i_start_trans + track["time"].size - 1 # scale by maximum perturbation and time step in hour (temporal-resolution independent) trans_pert = 1 + max_dspeed_rel * (2 * rnd_vec[i_start_trans:i_end_trans] - 1) # get bearings and angular distance for the original track - bearings = _get_bearing_angle(i_track['lon'].values, i_track['lat'].values) - angular_dist = climada.util.coordinates.dist_approx(i_track['lat'].values[:-1, None], - i_track['lon'].values[:-1, None], - i_track['lat'].values[1:, None], - i_track['lon'].values[1:, None], - method="geosphere", - units="degree")[:, 0, 0] + bearings = _get_bearing_angle(i_track["lon"].values, i_track["lat"].values) + angular_dist = climada.util.coordinates.dist_approx( + i_track["lat"].values[:-1, None], + i_track["lon"].values[:-1, None], + i_track["lat"].values[1:, None], + i_track["lon"].values[1:, None], + method="geosphere", + units="degree", + )[:, 0, 0] # apply perturbation to lon / lat - new_lon = np.zeros_like(i_track['lon'].values) - new_lat = np.zeros_like(i_track['lat'].values) - new_lon[0] = i_track['lon'].values[0] + xy_ini[0, i_ens] - new_lat[0] = i_track['lat'].values[0] + xy_ini[1, i_ens] - last_idx = i_track['time'].size + new_lon = np.zeros_like(i_track["lon"].values) + new_lat = np.zeros_like(i_track["lat"].values) + new_lon[0] = i_track["lon"].values[0] + xy_ini[0, i_ens] + new_lat[0] = i_track["lat"].values[0] + xy_ini[1, i_ens] + last_idx = i_track["time"].size for i in range(0, len(new_lon) - 1): - new_lon[i + 1], new_lat[i + 1] = \ - _get_destination_points(new_lon[i], new_lat[i], - bearings[i] + ang_pert_cum[i], - trans_pert[i] * angular_dist[i]) + new_lon[i + 1], new_lat[i + 1] = _get_destination_points( + new_lon[i], + new_lat[i], + bearings[i] + ang_pert_cum[i], + trans_pert[i] * angular_dist[i], + ) # if track crosses latitudinal thresholds (+-70°), # keep up to this segment (i+1), set i+2 as last point, # and discard all further points > i+2. - if i+2 < last_idx and (new_lat[i + 1] > 70 or new_lat[i + 1] < -70): + if i + 2 < last_idx and (new_lat[i + 1] > 70 or new_lat[i + 1] < -70): last_idx = i + 2 # end the track here - max_wind_end = i_track['max_sustained_wind'].values[last_idx] - ss_scale_end = climada.hazard.tc_tracks.set_category(max_wind_end, - i_track.attrs['max_sustained_wind_unit']) + max_wind_end = i_track["max_sustained_wind"].values[last_idx] + ss_scale_end = climada.hazard.tc_tracks.set_category( + max_wind_end, i_track.attrs["max_sustained_wind_unit"] + ) # TC category at ending point should not be higher than 1 - cutoff_txt = (f"{i_track.attrs['name']}_gen{i_ens + 1}" - f" ({climada.hazard.tc_tracks.CAT_NAMES[ss_scale_end]})") + cutoff_txt = ( + f"{i_track.attrs['name']}_gen{i_ens + 1}" + f" ({climada.hazard.tc_tracks.CAT_NAMES[ss_scale_end]})" + ) if ss_scale_end > 1: cutoff_track_ids_tc = cutoff_track_ids_tc + [cutoff_txt] else: @@ -344,12 +403,12 @@ def _one_rnd_walk(track, nb_synth_tracks, max_shift_ini, max_dspeed_rel, max_ddi # make sure longitude values are within (-180, 180) climada.util.coordinates.lon_normalize(new_lon, center=0.0) - i_track['lon'].values = new_lon - i_track['lat'].values = new_lat - i_track.attrs['orig_event_flag'] = False - i_track.attrs['name'] = f"{i_track.attrs['name']}_gen{i_ens + 1}" - i_track.attrs['sid'] = f"{i_track.attrs['sid']}_gen{i_ens + 1}" - i_track.attrs['id_no'] = i_track.attrs['id_no'] + (i_ens + 1) / 100 + i_track["lon"].values = new_lon + i_track["lat"].values = new_lat + i_track.attrs["orig_event_flag"] = False + i_track.attrs["name"] = f"{i_track.attrs['name']}_gen{i_ens + 1}" + i_track.attrs["sid"] = f"{i_track.attrs['sid']}_gen{i_ens + 1}" + i_track.attrs["id_no"] = i_track.attrs["id_no"] + (i_ens + 1) / 100 i_track = i_track.isel(time=slice(None, last_idx)) ens_track.append(i_track) @@ -395,8 +454,11 @@ def _random_uniform_ac(n_ts, autocorr, time_step_h): # scale x to have magnitude [0,1] x = (x + np.sqrt(3)) / (2 * np.sqrt(3)) # resample at target time step - x_ts = np.interp(np.arange(start=0, stop=n_ts_hourly_exact, step=time_step_h), - np.arange(n_ts_hourly), x) + x_ts = np.interp( + np.arange(start=0, stop=n_ts_hourly_exact, step=time_step_h), + np.arange(n_ts_hourly), + x, + ) return x_ts @@ -423,9 +485,13 @@ def _h_ac(x, y, theta): x_next : float Next value in the series. """ - gamma = np.abs(np.mod(theta, np.pi) - \ - np.floor((np.mod(theta, np.pi) / (np.pi / 2)) + 0.5) * np.pi / 2) - x_next = 2 * np.sqrt(3) * (_f_ac(np.cos(theta) * x + np.sin(theta) * y, gamma) - 1 / 2) + gamma = np.abs( + np.mod(theta, np.pi) + - np.floor((np.mod(theta, np.pi) / (np.pi / 2)) + 0.5) * np.pi / 2 + ) + x_next = ( + 2 * np.sqrt(3) * (_f_ac(np.cos(theta) * x + np.sin(theta) * y, gamma) - 1 / 2) + ) return x_next @@ -456,13 +522,21 @@ def _f_ac(z, theta): if z >= np.sqrt(3) * (c + s): res = 1 elif z > np.sqrt(3) * (c - s): - res = 1 / 12 / np.sin(2 * theta) * \ - (-3 - z ** 2 + 2 * np.sqrt(3) * z * (c + s) + 9 * np.sin(2 * theta)) + res = ( + 1 + / 12 + / np.sin(2 * theta) + * (-3 - z**2 + 2 * np.sqrt(3) * z * (c + s) + 9 * np.sin(2 * theta)) + ) elif z > np.sqrt(3) * (-c + s): res = 1 / 6 * (3 + np.sqrt(3) * z / c) elif z > -np.sqrt(3) * (c + s): - res = 1 / 12 / np.sin(2 * theta) * \ - (z ** 2 + 2 * np.sqrt(3) * z * (c + s) + 3 * (1 + np.sin(2 * theta))) + res = ( + 1 + / 12 + / np.sin(2 * theta) + * (z**2 + 2 * np.sqrt(3) * z * (c + s) + 3 * (1 + np.sin(2 * theta))) + ) else: res = 0 return res @@ -504,9 +578,11 @@ def _get_bearing_angle(lon, lat): # what to do with the points that don't move? # i.e. where lat_2=lat_1 and lon_2=lon_1? The angle does not matter in # that case because angular distance will be 0. - earth_ang_fix = np.arctan2(np.sin(delta_lon) * np.cos(lat_2), - np.cos(lat_1) * np.sin(lat_2) - \ - np.sin(lat_1) * np.cos(lat_2) * np.cos(delta_lon)) + earth_ang_fix = np.arctan2( + np.sin(delta_lon) * np.cos(lat_2), + np.cos(lat_1) * np.sin(lat_2) + - np.sin(lat_1) * np.cos(lat_2) * np.cos(delta_lon), + ) return np.degrees(earth_ang_fix) @@ -536,15 +612,18 @@ def _get_destination_points(lon, lat, bearing, angular_distance): lon, lat = map(np.radians, [lon, lat]) bearing = np.radians(bearing) angular_distance = np.radians(angular_distance) - lat_2 = np.arcsin(np.sin(lat) * np.cos(angular_distance) + np.cos(lat) * \ - np.sin(angular_distance) * np.cos(bearing)) - lon_2 = lon + np.arctan2(np.sin(bearing) * np.sin(angular_distance) * np.cos(lat), - np.cos(angular_distance) - np.sin(lat) * np.sin(lat_2)) + lat_2 = np.arcsin( + np.sin(lat) * np.cos(angular_distance) + + np.cos(lat) * np.sin(angular_distance) * np.cos(bearing) + ) + lon_2 = lon + np.arctan2( + np.sin(bearing) * np.sin(angular_distance) * np.cos(lat), + np.cos(angular_distance) - np.sin(lat) * np.sin(lat_2), + ) return np.degrees(lon_2), np.degrees(lat_2) -def _calc_land_decay(hist_tracks, land_geom, s_rel=True, check_plot=False, - pool=None): +def _calc_land_decay(hist_tracks, land_geom, s_rel=True, check_plot=False, pool=None): """Compute wind and pressure decay coefficients from historical events Decay is calculated for every TC category according to the formulas: @@ -572,13 +651,16 @@ def _calc_land_decay(hist_tracks, land_geom, s_rel=True, check_plot=False, """ if len(hist_tracks) < 100: - LOGGER.warning('For the calibration of the landfall decay ' - 'it is recommended to provide as many historical ' - 'tracks as possible, but only %s historical tracks ' - 'were provided. ' - 'For a more robust calculation consider using ' - 'a larger number of tracks or set ' - '`use_global_decay_params` to True', len(hist_tracks)) + LOGGER.warning( + "For the calibration of the landfall decay " + "it is recommended to provide as many historical " + "tracks as possible, but only %s historical tracks " + "were provided. " + "For a more robust calculation consider using " + "a larger number of tracks or set " + "`use_global_decay_params` to True", + len(hist_tracks), + ) # Key is Saffir-Simpson scale # values are lists of wind/wind at landfall @@ -590,13 +672,17 @@ def _calc_land_decay(hist_tracks, land_geom, s_rel=True, check_plot=False, x_val = dict() if pool: - dec_val = pool.map(_decay_values, hist_tracks, itertools.repeat(land_geom), - itertools.repeat(s_rel), - chunksize=max(min(len(hist_tracks) // pool.ncpus, 1000), 1)) + dec_val = pool.map( + _decay_values, + hist_tracks, + itertools.repeat(land_geom), + itertools.repeat(s_rel), + chunksize=max(min(len(hist_tracks) // pool.ncpus, 1000), 1), + ) else: dec_val = [_decay_values(track, land_geom, s_rel) for track in hist_tracks] - for (tv_lf, tp_lf, tx_val) in dec_val: + for tv_lf, tp_lf, tx_val in dec_val: for key in tv_lf.keys(): v_lf.setdefault(key, []).extend(tv_lf[key]) p_lf.setdefault(key, ([], [])) @@ -611,8 +697,9 @@ def _calc_land_decay(hist_tracks, land_geom, s_rel=True, check_plot=False, return v_rel, p_rel -def _apply_land_decay(tracks, v_rel, p_rel, land_geom, s_rel=True, - check_plot=False, pool=None): +def _apply_land_decay( + tracks, v_rel, p_rel, land_geom, s_rel=True, check_plot=False, pool=None +): """Compute wind and pressure decay due to landfall in synthetic tracks. Parameters @@ -632,28 +719,36 @@ def _apply_land_decay(tracks, v_rel, p_rel, land_geom, s_rel=True, """ sy_tracks = [track for track in tracks if not track.orig_event_flag] if not sy_tracks: - raise ValueError('No synthetic tracks contained. Synthetic tracks' - ' are needed.') + raise ValueError( + "No synthetic tracks contained. Synthetic tracks" " are needed." + ) if not v_rel or not p_rel: - LOGGER.info('No decay coefficients.') + LOGGER.info("No decay coefficients.") return if check_plot: orig_wind, orig_pres = [], [] for track in sy_tracks: - orig_wind.append(np.copy(track['max_sustained_wind'].values)) - orig_pres.append(np.copy(track['central_pressure'].values)) + orig_wind.append(np.copy(track["max_sustained_wind"].values)) + orig_pres.append(np.copy(track["central_pressure"].values)) if pool: chunksize = max(min(len(tracks) // pool.ncpus, 1000), 1) - tracks = pool.map(_apply_decay_coeffs, tracks, - itertools.repeat(v_rel), itertools.repeat(p_rel), - itertools.repeat(land_geom), itertools.repeat(s_rel), - chunksize=chunksize) + tracks = pool.map( + _apply_decay_coeffs, + tracks, + itertools.repeat(v_rel), + itertools.repeat(p_rel), + itertools.repeat(land_geom), + itertools.repeat(s_rel), + chunksize=chunksize, + ) else: - tracks = [_apply_decay_coeffs(track, v_rel, p_rel, land_geom, s_rel) - for track in tracks] + tracks = [ + _apply_decay_coeffs(track, v_rel, p_rel, land_geom, s_rel) + for track in tracks + ] for track in tracks: if track.orig_event_flag: @@ -696,35 +791,35 @@ def _decay_values(track, land_geom, s_rel): sea_land_idx, land_sea_idx = climada.hazard.tc_tracks._get_landfall_idx(track) if sea_land_idx.size: for sea_land, land_sea in zip(sea_land_idx, land_sea_idx): - v_landfall = track['max_sustained_wind'][sea_land - 1].values - ss_scale = climada.hazard.tc_tracks.set_category(v_landfall, - track.attrs['max_sustained_wind_unit']) + v_landfall = track["max_sustained_wind"][sea_land - 1].values + ss_scale = climada.hazard.tc_tracks.set_category( + v_landfall, track.attrs["max_sustained_wind_unit"] + ) - v_land = track['max_sustained_wind'][sea_land - 1:land_sea].values + v_land = track["max_sustained_wind"][sea_land - 1 : land_sea].values if v_land[0] > 0: v_land = (v_land[1:] / v_land[0]).tolist() else: v_land = v_land[1:].tolist() - p_landfall = float(track['central_pressure'][sea_land - 1].values) - p_land = track['central_pressure'][sea_land - 1:land_sea].values + p_landfall = float(track["central_pressure"][sea_land - 1].values) + p_land = track["central_pressure"][sea_land - 1 : land_sea].values p_land = (p_land[1:] / p_land[0]).tolist() - p_land_s = _calc_decay_ps_value( - track, p_landfall, land_sea - 1, s_rel) + p_land_s = _calc_decay_ps_value(track, p_landfall, land_sea - 1, s_rel) p_land_s = len(p_land) * [p_land_s] if ss_scale not in v_lf: - v_lf[ss_scale] = array.array('f', v_land) - p_lf[ss_scale] = (array.array('f', p_land_s), - array.array('f', p_land)) - x_val[ss_scale] = array.array('f', - track['dist_since_lf'][sea_land:land_sea]) + v_lf[ss_scale] = array.array("f", v_land) + p_lf[ss_scale] = (array.array("f", p_land_s), array.array("f", p_land)) + x_val[ss_scale] = array.array( + "f", track["dist_since_lf"][sea_land:land_sea] + ) else: v_lf[ss_scale].extend(v_land) p_lf[ss_scale][0].extend(p_land_s) p_lf[ss_scale][1].extend(p_land) - x_val[ss_scale].extend(track['dist_since_lf'][sea_land:land_sea]) + x_val[ss_scale].extend(track["dist_since_lf"][sea_land:land_sea]) return v_lf, p_lf, x_val @@ -753,7 +848,7 @@ def _decay_calc_coeff(x_val, v_lf, p_lf): v_rel : dict p_rel : dict """ - warnings.filterwarnings('ignore') + warnings.filterwarnings("ignore") v_rel = dict() p_rel = dict() for ss_scale, val_lf in v_lf.items(): @@ -781,29 +876,36 @@ def _decay_calc_coeff(x_val, v_lf, p_lf): scale_fill = np.array(list(p_rel.keys())) if not scale_fill.size: - LOGGER.info('No historical track with landfall.') + LOGGER.info("No historical track with landfall.") return v_rel, p_rel for ss_scale, ss_name in climada.hazard.tc_tracks.CAT_NAMES.items(): if ss_scale not in p_rel: close_scale = scale_fill[np.argmin(np.abs(scale_fill - ss_scale))] close_name = climada.hazard.tc_tracks.CAT_NAMES[close_scale] - LOGGER.debug('No historical track of category %s with landfall. ' - 'Decay parameters from category %s taken.', - ss_name, close_name) + LOGGER.debug( + "No historical track of category %s with landfall. " + "Decay parameters from category %s taken.", + ss_name, + close_name, + ) v_rel[ss_scale] = v_rel[close_scale] p_rel[ss_scale] = p_rel[close_scale] elif v_rel[ss_scale] < 0: - raise ValueError('The calibration of landfall decay for wind speed resulted in' - f' a wind speed increase for TC category {ss_name}.' - ' This behaviour is unphysical. Please use a larger number of tracks' - ' or use global paramaters by setting `use_global_decay_params` to' - ' `True`') + raise ValueError( + "The calibration of landfall decay for wind speed resulted in" + f" a wind speed increase for TC category {ss_name}." + " This behaviour is unphysical. Please use a larger number of tracks" + " or use global paramaters by setting `use_global_decay_params` to" + " `True`" + ) elif p_rel[ss_scale][0] < 0 or p_rel[ss_scale][1] < 0: - raise ValueError('The calibration of landfall decay for central pressure resulted in' - f' a pressure decrease for TC category {ss_name}.' - ' This behaviour is unphysical. Please use a larger number of tracks' - ' or use global paramaters by setting `use_global_decay_params` to' - ' `True`') + raise ValueError( + "The calibration of landfall decay for central pressure resulted in" + f" a pressure decrease for TC category {ss_name}." + " This behaviour is unphysical. Please use a larger number of tracks" + " or use global paramaters by setting `use_global_decay_params` to" + " `True`" + ) return v_rel, p_rel @@ -812,28 +914,44 @@ def _check_decay_values_plot(x_val, v_lf, p_lf, v_rel, p_rel): """Generate one graph with wind decay and an other with central pressure decay, true and approximated.""" # One graph per TC category - for track_cat, color in zip(v_lf.keys(), - cm_mp.rainbow(np.linspace(0, 1, len(v_lf)))): + for track_cat, color in zip( + v_lf.keys(), cm_mp.rainbow(np.linspace(0, 1, len(v_lf))) + ): _, axes = plt.subplots(2, 1) x_eval = np.linspace(0, np.max(x_val[track_cat]), 20) - axes[0].set_xlabel('Distance from landfall (km)') - axes[0].set_ylabel('Max sustained wind\nrelative to landfall') - axes[0].set_title(f'Wind, TC cat {climada.hazard.tc_tracks.CAT_NAMES[track_cat]}') - axes[0].plot(x_val[track_cat], v_lf[track_cat], '*', c=color, - label=climada.hazard.tc_tracks.CAT_NAMES[track_cat]) - axes[0].plot(x_eval, _decay_v_function(v_rel[track_cat], x_eval), - '-', c=color) - - axes[1].set_xlabel('Distance from landfall (km)') - axes[1].set_ylabel('Central pressure\nrelative to landfall') - axes[1].set_title(f'Pressure, TC cat {climada.hazard.tc_tracks.CAT_NAMES[track_cat]}') - axes[1].plot(x_val[track_cat], p_lf[track_cat][1], '*', c=color, - label=climada.hazard.tc_tracks.CAT_NAMES[track_cat]) + axes[0].set_xlabel("Distance from landfall (km)") + axes[0].set_ylabel("Max sustained wind\nrelative to landfall") + axes[0].set_title( + f"Wind, TC cat {climada.hazard.tc_tracks.CAT_NAMES[track_cat]}" + ) + axes[0].plot( + x_val[track_cat], + v_lf[track_cat], + "*", + c=color, + label=climada.hazard.tc_tracks.CAT_NAMES[track_cat], + ) + axes[0].plot(x_eval, _decay_v_function(v_rel[track_cat], x_eval), "-", c=color) + + axes[1].set_xlabel("Distance from landfall (km)") + axes[1].set_ylabel("Central pressure\nrelative to landfall") + axes[1].set_title( + f"Pressure, TC cat {climada.hazard.tc_tracks.CAT_NAMES[track_cat]}" + ) + axes[1].plot( + x_val[track_cat], + p_lf[track_cat][1], + "*", + c=color, + label=climada.hazard.tc_tracks.CAT_NAMES[track_cat], + ) axes[1].plot( x_eval, _decay_p_function(p_rel[track_cat][0], p_rel[track_cat][1], x_eval), - '-', c=color) + "-", + c=color, + ) def _apply_decay_coeffs(track, v_rel, p_rel, land_geom, s_rel): @@ -868,51 +986,61 @@ def _apply_decay_coeffs(track, v_rel, p_rel, land_geom, s_rel): sea_land_idx, land_sea_idx = climada.hazard.tc_tracks._get_landfall_idx(track) if not sea_land_idx.size: return track - for idx, (sea_land, land_sea) \ - in enumerate(zip(sea_land_idx, land_sea_idx)): - v_landfall = track['max_sustained_wind'][sea_land - 1].values - p_landfall = float(track['central_pressure'][sea_land - 1].values) - ss_scale = climada.hazard.tc_tracks.set_category(v_landfall, - track.attrs['max_sustained_wind_unit']) + for idx, (sea_land, land_sea) in enumerate(zip(sea_land_idx, land_sea_idx)): + v_landfall = track["max_sustained_wind"][sea_land - 1].values + p_landfall = float(track["central_pressure"][sea_land - 1].values) + ss_scale = climada.hazard.tc_tracks.set_category( + v_landfall, track.attrs["max_sustained_wind_unit"] + ) if land_sea - sea_land == 1: continue S = _calc_decay_ps_value(track, p_landfall, land_sea - 1, s_rel) if S <= 1: # central_pressure at start of landfall > env_pres after landfall: # set central_pressure to environmental pressure during whole lf - track['central_pressure'][sea_land:land_sea] = \ - track['environmental_pressure'][sea_land:land_sea] + track["central_pressure"][sea_land:land_sea] = track[ + "environmental_pressure" + ][sea_land:land_sea] else: - p_decay = _decay_p_function(S, p_rel[ss_scale][1], - track['dist_since_lf'][sea_land:land_sea].values) + p_decay = _decay_p_function( + S, p_rel[ss_scale][1], track["dist_since_lf"][sea_land:land_sea].values + ) # dont apply decay if it would decrease central pressure if np.any(p_decay < 1): - LOGGER.info('Landfall decay would decrease pressure for ' - 'track id %s, leading to an intensification ' - 'of the Tropical Cyclone. This behaviour is ' - 'unphysical and therefore landfall decay is not ' - 'applied in this case.', - track.sid) - p_decay[p_decay < 1] = (track['central_pressure'][sea_land:land_sea][p_decay < 1] - / p_landfall) - track['central_pressure'][sea_land:land_sea] = p_landfall * p_decay - - v_decay = _decay_v_function(v_rel[ss_scale], - track['dist_since_lf'][sea_land:land_sea].values) + LOGGER.info( + "Landfall decay would decrease pressure for " + "track id %s, leading to an intensification " + "of the Tropical Cyclone. This behaviour is " + "unphysical and therefore landfall decay is not " + "applied in this case.", + track.sid, + ) + p_decay[p_decay < 1] = ( + track["central_pressure"][sea_land:land_sea][p_decay < 1] + / p_landfall + ) + track["central_pressure"][sea_land:land_sea] = p_landfall * p_decay + + v_decay = _decay_v_function( + v_rel[ss_scale], track["dist_since_lf"][sea_land:land_sea].values + ) # dont apply decay if it would increase wind speeds if np.any(v_decay > 1): # should not happen unless v_rel is negative - LOGGER.info('Landfall decay would increase wind speed for ' - 'track id %s. This behavious in unphysical and ' - 'therefore landfall decay is not applied in this ' - 'case.', - track['sid']) - v_decay[v_decay > 1] = (track['max_sustained_wind'][sea_land:land_sea][v_decay > 1] - / v_landfall) - track['max_sustained_wind'][sea_land:land_sea] = v_landfall * v_decay + LOGGER.info( + "Landfall decay would increase wind speed for " + "track id %s. This behavious in unphysical and " + "therefore landfall decay is not applied in this " + "case.", + track["sid"], + ) + v_decay[v_decay > 1] = ( + track["max_sustained_wind"][sea_land:land_sea][v_decay > 1] / v_landfall + ) + track["max_sustained_wind"][sea_land:land_sea] = v_landfall * v_decay # correct values of sea after a landfall (until next landfall, if any) - if land_sea < track['time'].size: + if land_sea < track["time"].size: if idx + 1 < sea_land_idx.size: # if there is a next landfall, correct until last point before # reaching land again @@ -920,25 +1048,34 @@ def _apply_decay_coeffs(track, v_rel, p_rel, land_geom, s_rel): else: # if there is no further landfall, correct until the end of # the track - end_cor = track['time'].size + end_cor = track["time"].size rndn = 0.1 * float(np.abs(np.random.normal(size=1) * 5) + 6) - r_diff = track['central_pressure'][land_sea].values - \ - track['central_pressure'][land_sea - 1].values + rndn - track['central_pressure'][land_sea:end_cor] += - r_diff + r_diff = ( + track["central_pressure"][land_sea].values + - track["central_pressure"][land_sea - 1].values + + rndn + ) + track["central_pressure"][land_sea:end_cor] += -r_diff rndn = rndn * 10 # mean value 10 - r_diff = track['max_sustained_wind'][land_sea].values - \ - track['max_sustained_wind'][land_sea - 1].values - rndn - track['max_sustained_wind'][land_sea:end_cor] += - r_diff + r_diff = ( + track["max_sustained_wind"][land_sea].values + - track["max_sustained_wind"][land_sea - 1].values + - rndn + ) + track["max_sustained_wind"][land_sea:end_cor] += -r_diff # correct limits - warnings.filterwarnings('ignore') - cor_p = track['central_pressure'].values > track['environmental_pressure'].values - track['central_pressure'][cor_p] = track['environmental_pressure'][cor_p] - track['max_sustained_wind'][track['max_sustained_wind'] < 0] = 0 + warnings.filterwarnings("ignore") + cor_p = ( + track["central_pressure"].values > track["environmental_pressure"].values + ) + track["central_pressure"][cor_p] = track["environmental_pressure"][cor_p] + track["max_sustained_wind"][track["max_sustained_wind"] < 0] = 0 - track.attrs['category'] = climada.hazard.tc_tracks.set_category( - track['max_sustained_wind'].values, track.attrs['max_sustained_wind_unit']) + track.attrs["category"] = climada.hazard.tc_tracks.set_category( + track["max_sustained_wind"].values, track.attrs["max_sustained_wind_unit"] + ) return track @@ -947,25 +1084,40 @@ def _check_apply_decay_plot(all_tracks, syn_orig_wind, syn_orig_pres): Plot wind and presure for unchanged historical tracks.""" # Plot synthetic tracks sy_tracks = [track for track in all_tracks if not track.orig_event_flag] - graph_v_b, graph_v_a, graph_p_b, graph_p_a, graph_pd_a, graph_ped_a = \ - _check_apply_decay_syn_plot(sy_tracks, syn_orig_wind, - syn_orig_pres) + graph_v_b, graph_v_a, graph_p_b, graph_p_a, graph_pd_a, graph_ped_a = ( + _check_apply_decay_syn_plot(sy_tracks, syn_orig_wind, syn_orig_pres) + ) # Plot historic tracks hist_tracks = [track for track in all_tracks if track.orig_event_flag] - graph_hv, graph_hp, graph_hpd_a, graph_hped_a = \ - _check_apply_decay_hist_plot(hist_tracks) + graph_hv, graph_hp, graph_hpd_a, graph_hped_a = _check_apply_decay_hist_plot( + hist_tracks + ) # Put legend and fix size scale_thresholds = climada.hazard.tc_tracks.SAFFIR_SIM_CAT - leg_lines = [Line2D([0], [0], color=climada.hazard.tc_tracks.CAT_COLORS[i_col], lw=2) - for i_col in range(len(scale_thresholds))] - leg_lines.append(Line2D([0], [0], color='k', lw=2)) - leg_names = [climada.hazard.tc_tracks.CAT_NAMES[i_col] - for i_col in sorted(climada.hazard.tc_tracks.CAT_NAMES.keys())] - leg_names.append('Sea') - all_gr = [graph_v_a, graph_v_b, graph_p_a, graph_p_b, graph_ped_a, - graph_pd_a, graph_hv, graph_hp, graph_hpd_a, graph_hped_a] + leg_lines = [ + Line2D([0], [0], color=climada.hazard.tc_tracks.CAT_COLORS[i_col], lw=2) + for i_col in range(len(scale_thresholds)) + ] + leg_lines.append(Line2D([0], [0], color="k", lw=2)) + leg_names = [ + climada.hazard.tc_tracks.CAT_NAMES[i_col] + for i_col in sorted(climada.hazard.tc_tracks.CAT_NAMES.keys()) + ] + leg_names.append("Sea") + all_gr = [ + graph_v_a, + graph_v_b, + graph_p_a, + graph_p_b, + graph_ped_a, + graph_pd_a, + graph_hv, + graph_hp, + graph_hpd_a, + graph_hped_a, + ] for graph in all_gr: graph.axs[0].legend(leg_lines, leg_names) fig, _ = graph.get_elems() @@ -974,9 +1126,9 @@ def _check_apply_decay_plot(all_tracks, syn_orig_wind, syn_orig_pres): def _calc_decay_ps_value(track, p_landfall, pos, s_rel): if s_rel: - p_land_s = track['environmental_pressure'][pos].values + p_land_s = track["environmental_pressure"][pos].values else: - p_land_s = track['central_pressure'][pos].values + p_land_s = track["central_pressure"][pos].values return float(p_land_s / p_landfall) @@ -1001,78 +1153,99 @@ def _solve_decay_p_function(ps_y, p_y, x_val): return -np.log((ps_y - p_y) / (ps_y - 1.0)) / x_val -def _check_apply_decay_syn_plot(sy_tracks, syn_orig_wind, - syn_orig_pres): +def _check_apply_decay_syn_plot(sy_tracks, syn_orig_wind, syn_orig_pres): """Plot winds and pressures of synthetic tracks before and after correction.""" # pylint: disable=protected-access _, graph_v_b = plt.subplots() - graph_v_b.set_title('Wind before land decay correction') - graph_v_b.set_xlabel('Node number') - graph_v_b.set_ylabel('Max sustained wind (kn)') + graph_v_b.set_title("Wind before land decay correction") + graph_v_b.set_xlabel("Node number") + graph_v_b.set_ylabel("Max sustained wind (kn)") _, graph_v_a = plt.subplots() - graph_v_a.set_title('Wind after land decay correction') - graph_v_a.set_xlabel('Node number') - graph_v_a.set_ylabel('Max sustained wind (kn)') + graph_v_a.set_title("Wind after land decay correction") + graph_v_a.set_xlabel("Node number") + graph_v_a.set_ylabel("Max sustained wind (kn)") _, graph_p_b = plt.subplots() - graph_p_b.set_title('Pressure before land decay correctionn') - graph_p_b.set_xlabel('Node number') - graph_p_b.set_ylabel('Central pressure (mb)') + graph_p_b.set_title("Pressure before land decay correctionn") + graph_p_b.set_xlabel("Node number") + graph_p_b.set_ylabel("Central pressure (mb)") _, graph_p_a = plt.subplots() - graph_p_a.set_title('Pressure after land decay correctionn') - graph_p_a.set_xlabel('Node number') - graph_p_a.set_ylabel('Central pressure (mb)') + graph_p_a.set_title("Pressure after land decay correctionn") + graph_p_a.set_xlabel("Node number") + graph_p_a.set_ylabel("Central pressure (mb)") _, graph_pd_a = plt.subplots() - graph_pd_a.set_title('Relative pressure after land decay correction') - graph_pd_a.set_xlabel('Distance from landfall (km)') - graph_pd_a.set_ylabel('Central pressure relative to landfall') + graph_pd_a.set_title("Relative pressure after land decay correction") + graph_pd_a.set_xlabel("Distance from landfall (km)") + graph_pd_a.set_ylabel("Central pressure relative to landfall") _, graph_ped_a = plt.subplots() graph_ped_a.set_title( - 'Environmental - central pressure after land decay correction') - graph_ped_a.set_xlabel('Distance from landfall (km)') - graph_ped_a.set_ylabel('Environmental pressure - Central pressure (mb)') + "Environmental - central pressure after land decay correction" + ) + graph_ped_a.set_xlabel("Distance from landfall (km)") + graph_ped_a.set_ylabel("Environmental pressure - Central pressure (mb)") - for track, orig_wind, orig_pres in \ - zip(sy_tracks, syn_orig_wind, syn_orig_pres): + for track, orig_wind, orig_pres in zip(sy_tracks, syn_orig_wind, syn_orig_pres): sea_land_idx, land_sea_idx = climada.hazard.tc_tracks._get_landfall_idx(track) if sea_land_idx.size: for sea_land, land_sea in zip(sea_land_idx, land_sea_idx): - v_lf = track['max_sustained_wind'][sea_land - 1].values - p_lf = track['central_pressure'][sea_land - 1].values + v_lf = track["max_sustained_wind"][sea_land - 1].values + p_lf = track["central_pressure"][sea_land - 1].values scale_thresholds = climada.hazard.tc_tracks.SAFFIR_SIM_CAT ss_scale_idx = np.where(v_lf < scale_thresholds)[0][0] - on_land = np.arange(track['time'].size)[sea_land:land_sea] - - graph_v_a.plot(on_land, track['max_sustained_wind'][on_land], - 'o', c=climada.hazard.tc_tracks.CAT_COLORS[ss_scale_idx]) - graph_v_b.plot(on_land, orig_wind[on_land], - 'o', c=climada.hazard.tc_tracks.CAT_COLORS[ss_scale_idx]) - graph_p_a.plot(on_land, track['central_pressure'][on_land], - 'o', c=climada.hazard.tc_tracks.CAT_COLORS[ss_scale_idx]) - graph_p_b.plot(on_land, orig_pres[on_land], - 'o', c=climada.hazard.tc_tracks.CAT_COLORS[ss_scale_idx]) - graph_pd_a.plot(track['dist_since_lf'][on_land], - track['central_pressure'][on_land] / p_lf, - 'o', c=climada.hazard.tc_tracks.CAT_COLORS[ss_scale_idx]) - graph_ped_a.plot(track['dist_since_lf'][on_land], - track['environmental_pressure'][on_land] - - track['central_pressure'][on_land], - 'o', c=climada.hazard.tc_tracks.CAT_COLORS[ss_scale_idx]) - - on_sea = np.arange(track['time'].size)[~track['on_land']] - graph_v_a.plot(on_sea, track['max_sustained_wind'][on_sea], - 'o', c='k', markersize=5) - graph_v_b.plot(on_sea, orig_wind[on_sea], - 'o', c='k', markersize=5) - graph_p_a.plot(on_sea, track['central_pressure'][on_sea], - 'o', c='k', markersize=5) - graph_p_b.plot(on_sea, orig_pres[on_sea], - 'o', c='k', markersize=5) + on_land = np.arange(track["time"].size)[sea_land:land_sea] + + graph_v_a.plot( + on_land, + track["max_sustained_wind"][on_land], + "o", + c=climada.hazard.tc_tracks.CAT_COLORS[ss_scale_idx], + ) + graph_v_b.plot( + on_land, + orig_wind[on_land], + "o", + c=climada.hazard.tc_tracks.CAT_COLORS[ss_scale_idx], + ) + graph_p_a.plot( + on_land, + track["central_pressure"][on_land], + "o", + c=climada.hazard.tc_tracks.CAT_COLORS[ss_scale_idx], + ) + graph_p_b.plot( + on_land, + orig_pres[on_land], + "o", + c=climada.hazard.tc_tracks.CAT_COLORS[ss_scale_idx], + ) + graph_pd_a.plot( + track["dist_since_lf"][on_land], + track["central_pressure"][on_land] / p_lf, + "o", + c=climada.hazard.tc_tracks.CAT_COLORS[ss_scale_idx], + ) + graph_ped_a.plot( + track["dist_since_lf"][on_land], + track["environmental_pressure"][on_land] + - track["central_pressure"][on_land], + "o", + c=climada.hazard.tc_tracks.CAT_COLORS[ss_scale_idx], + ) + + on_sea = np.arange(track["time"].size)[~track["on_land"]] + graph_v_a.plot( + on_sea, track["max_sustained_wind"][on_sea], "o", c="k", markersize=5 + ) + graph_v_b.plot(on_sea, orig_wind[on_sea], "o", c="k", markersize=5) + graph_p_a.plot( + on_sea, track["central_pressure"][on_sea], "o", c="k", markersize=5 + ) + graph_p_b.plot(on_sea, orig_pres[on_sea], "o", c="k", markersize=5) return graph_v_b, graph_v_a, graph_p_b, graph_p_a, graph_pd_a, graph_ped_a @@ -1081,51 +1254,68 @@ def _check_apply_decay_hist_plot(hist_tracks): """Plot winds and pressures of historical tracks.""" # pylint: disable=protected-access _, graph_hv = plt.subplots() - graph_hv.set_title('Historical wind') - graph_hv.set_xlabel('Node number') - graph_hv.set_ylabel('Max sustained wind (kn)') + graph_hv.set_title("Historical wind") + graph_hv.set_xlabel("Node number") + graph_hv.set_ylabel("Max sustained wind (kn)") _, graph_hp = plt.subplots() - graph_hp.set_title('Historical pressure') - graph_hp.set_xlabel('Node number') - graph_hp.set_ylabel('Central pressure (mb)') + graph_hp.set_title("Historical pressure") + graph_hp.set_xlabel("Node number") + graph_hp.set_ylabel("Central pressure (mb)") _, graph_hpd_a = plt.subplots() - graph_hpd_a.set_title('Historical relative pressure') - graph_hpd_a.set_xlabel('Distance from landfall (km)') - graph_hpd_a.set_ylabel('Central pressure relative to landfall') + graph_hpd_a.set_title("Historical relative pressure") + graph_hpd_a.set_xlabel("Distance from landfall (km)") + graph_hpd_a.set_ylabel("Central pressure relative to landfall") _, graph_hped_a = plt.subplots() - graph_hped_a.set_title('Historical environmental - central pressure') - graph_hped_a.set_xlabel('Distance from landfall (km)') - graph_hped_a.set_ylabel('Environmental pressure - Central pressure (mb)') + graph_hped_a.set_title("Historical environmental - central pressure") + graph_hped_a.set_xlabel("Distance from landfall (km)") + graph_hped_a.set_ylabel("Environmental pressure - Central pressure (mb)") for track in hist_tracks: sea_land_idx, land_sea_idx = climada.hazard.tc_tracks._get_landfall_idx(track) if sea_land_idx.size: for sea_land, land_sea in zip(sea_land_idx, land_sea_idx): scale_thresholds = climada.hazard.tc_tracks.SAFFIR_SIM_CAT - ss_scale_idx = np.where(track['max_sustained_wind'][sea_land - 1].values - < scale_thresholds)[0][0] - on_land = np.arange(track['time'].size)[sea_land:land_sea] - - graph_hv.add_curve(on_land, track['max_sustained_wind'][on_land], - 'o', c=climada.hazard.tc_tracks.CAT_COLORS[ss_scale_idx]) - graph_hp.add_curve(on_land, track['central_pressure'][on_land], - 'o', c=climada.hazard.tc_tracks.CAT_COLORS[ss_scale_idx]) - graph_hpd_a.plot(track['dist_since_lf'][on_land], - track['central_pressure'][on_land] - / track['central_pressure'][sea_land - 1].values, - 'o', c=climada.hazard.tc_tracks.CAT_COLORS[ss_scale_idx]) - graph_hped_a.plot(track['dist_since_lf'][on_land], - track['environmental_pressure'][on_land] - - track['central_pressure'][on_land], - 'o', c=climada.hazard.tc_tracks.CAT_COLORS[ss_scale_idx]) - - on_sea = np.arange(track['time'].size)[~track.on_land] - graph_hp.plot(on_sea, track['central_pressure'][on_sea], - 'o', c='k', markersize=5) - graph_hv.plot(on_sea, track['max_sustained_wind'][on_sea], - 'o', c='k', markersize=5) + ss_scale_idx = np.where( + track["max_sustained_wind"][sea_land - 1].values < scale_thresholds + )[0][0] + on_land = np.arange(track["time"].size)[sea_land:land_sea] + + graph_hv.add_curve( + on_land, + track["max_sustained_wind"][on_land], + "o", + c=climada.hazard.tc_tracks.CAT_COLORS[ss_scale_idx], + ) + graph_hp.add_curve( + on_land, + track["central_pressure"][on_land], + "o", + c=climada.hazard.tc_tracks.CAT_COLORS[ss_scale_idx], + ) + graph_hpd_a.plot( + track["dist_since_lf"][on_land], + track["central_pressure"][on_land] + / track["central_pressure"][sea_land - 1].values, + "o", + c=climada.hazard.tc_tracks.CAT_COLORS[ss_scale_idx], + ) + graph_hped_a.plot( + track["dist_since_lf"][on_land], + track["environmental_pressure"][on_land] + - track["central_pressure"][on_land], + "o", + c=climada.hazard.tc_tracks.CAT_COLORS[ss_scale_idx], + ) + + on_sea = np.arange(track["time"].size)[~track.on_land] + graph_hp.plot( + on_sea, track["central_pressure"][on_sea], "o", c="k", markersize=5 + ) + graph_hv.plot( + on_sea, track["max_sustained_wind"][on_sea], "o", c="k", markersize=5 + ) return graph_hv, graph_hp, graph_hpd_a, graph_hped_a diff --git a/climada/hazard/test/__init__.py b/climada/hazard/test/__init__.py index 7bc33d61f..10a572415 100755 --- a/climada/hazard/test/__init__.py +++ b/climada/hazard/test/__init__.py @@ -21,10 +21,10 @@ import shutil -from climada.util.constants import SYSTEM_DIR +from climada.hazard.tc_tracks import IBTRACS_FILE, IBTRACS_URL from climada.util.api_client import Client +from climada.util.constants import SYSTEM_DIR from climada.util.files_handler import download_ftp -from climada.hazard.tc_tracks import IBTRACS_FILE, IBTRACS_URL def download_ibtracs(): @@ -36,11 +36,17 @@ def download_ibtracs(): return # Nothing to do try: - download_ftp(f'{IBTRACS_URL}/{IBTRACS_FILE}', IBTRACS_FILE) + download_ftp(f"{IBTRACS_URL}/{IBTRACS_FILE}", IBTRACS_FILE) shutil.move(IBTRACS_FILE, SYSTEM_DIR) - except ValueError: # plan b: download an old version of that file from the climada api + except ( + ValueError + ): # plan b: download an old version of that file from the climada api client = Client() - dsinfo = client.get_dataset_info(name="IBTrACS", version="v04r00", status="external") - [fileinfo] = [fi for fi in dsinfo.files if fi.file_name == 'IBTrACS.ALL.v04r00.nc'] + dsinfo = client.get_dataset_info( + name="IBTrACS", version="v04r00", status="external" + ) + [fileinfo] = [ + fi for fi in dsinfo.files if fi.file_name == "IBTrACS.ALL.v04r00.nc" + ] client._download_file(local_path=SYSTEM_DIR, fileinfo=fileinfo) diff --git a/climada/hazard/test/data/trac_short_test.csv b/climada/hazard/test/data/trac_short_test.csv index 79defb690..bacbd8c99 100644 --- a/climada/hazard/test/data/trac_short_test.csv +++ b/climada/hazard/test/data/trac_short_test.csv @@ -7,4 +7,4 @@ cgps_lat,cgps_lon,data_provider,gen_basin,ibtracsID,isotime,model,msize,ngps_lat 12.3,-31,hurdat_atl,NA,1951239N12334,1951082812,H08,101,12.3,-32.3,1,-999,1010,-999,0.1,0,6,25 12.3,-32.3,hurdat_atl,NA,1951239N12334,1951082818,H08,101,12.3,-33.6,1,-999,1010,-999,0.1,0,6,25 12.3,-33.6,hurdat_atl,NA,1951239N12334,1951082900,H08,101,12.3,-34.9,1,-999,1010,-999,0.1,0,6,25 -12.3,-34.9,hurdat_atl,NA,1951239N12334,1951082906,H08,101,12.3,-36.3,1,-999,1010,-999,0.1,0,6,25 \ No newline at end of file +12.3,-34.9,hurdat_atl,NA,1951239N12334,1951082906,H08,101,12.3,-36.3,1,-999,1010,-999,0.1,0,6,25 diff --git a/climada/hazard/test/test_base.py b/climada/hazard/test/test_base.py index 585832219..cf4c8e99b 100644 --- a/climada/hazard/test/test_base.py +++ b/climada/hazard/test/test_base.py @@ -20,28 +20,26 @@ """ import unittest - from pathlib import Path + import numpy as np -from scipy import sparse from pathos.pools import ProcessPool as Pool +from scipy import sparse +import climada.util.coordinates as u_coord +import climada.util.dates_times as u_dt from climada import CONFIG from climada.hazard.base import Hazard from climada.hazard.centroids.centr import Centroids -import climada.util.dates_times as u_dt -from climada.util.constants import HAZ_TEMPLATE_XLS -import climada.util.coordinates as u_coord - from climada.test import get_test_file +from climada.util.constants import HAZ_TEMPLATE_XLS - -DATA_DIR :Path = CONFIG.hazard.test_data.dir() +DATA_DIR: Path = CONFIG.hazard.test_data.dir() """ Directory for writing (and subsequent reading) of temporary files created during tests. """ -HAZ_TEST_TC :Path = get_test_file('test_tc_florida') +HAZ_TEST_TC: Path = get_test_file("test_tc_florida") """ Hazard test file from Data API: Hurricanes from 1851 to 2011 over Florida with 100 centroids. Fraction is empty. Format: HDF5. @@ -49,30 +47,28 @@ def dummy_hazard(): - fraction = sparse.csr_matrix([[0.02, 0.03, 0.04], - [0.01, 0.01, 0.01], - [0.3, 0.1, 0.0], - [0.3, 0.2, 0.0]]) - intensity = sparse.csr_matrix([[0.2, 0.3, 0.4], - [0.1, 0.1, 0.01], - [4.3, 2.1, 1.0], - [5.3, 0.2, 0.0]]) + fraction = sparse.csr_matrix( + [[0.02, 0.03, 0.04], [0.01, 0.01, 0.01], [0.3, 0.1, 0.0], [0.3, 0.2, 0.0]] + ) + intensity = sparse.csr_matrix( + [[0.2, 0.3, 0.4], [0.1, 0.1, 0.01], [4.3, 2.1, 1.0], [5.3, 0.2, 0.0]] + ) return Hazard( "TC", intensity=intensity, fraction=fraction, - centroids=Centroids( - lat=np.array([1, 3, 5]), lon=np.array([2, 4, 6])), + centroids=Centroids(lat=np.array([1, 3, 5]), lon=np.array([2, 4, 6])), event_id=np.array([1, 2, 3, 4]), - event_name=['ev1', 'ev2', 'ev3', 'ev4'], + event_name=["ev1", "ev2", "ev3", "ev4"], date=np.array([1, 2, 3, 4]), orig=np.array([True, False, False, True]), frequency=np.array([0.1, 0.5, 0.5, 0.2]), - frequency_unit='1/week', - units='m/s', + frequency_unit="1/week", + units="m/s", ) + class TestLoader(unittest.TestCase): """Test loading functions from the Hazard class""" @@ -87,7 +83,7 @@ def setUp(self): "TC", centroids=centroids, event_id=np.array([1, 2, 3]), - event_name=['A', 'B', 'C'], + event_name=["A", "B", "C"], frequency=np.array([1, 2, 3]), # events x centroids intensity=sparse.csr_matrix([[1, 2], [1, 2], [1, 2]]), @@ -107,7 +103,7 @@ def test_init_empty_fraction(self): event_id=self.hazard.event_id, event_name=self.hazard.event_name, frequency=self.hazard.frequency, - intensity=self.hazard.intensity + intensity=self.hazard.intensity, ) hazard.check() np.testing.assert_array_equal(hazard.fraction.shape, hazard.intensity.shape) @@ -119,7 +115,7 @@ def test_check_wrongFreq_fail(self): with self.assertRaises(ValueError) as cm: self.hazard.check() - self.assertIn('Invalid Hazard.frequency size: 3 != 2.', str(cm.exception)) + self.assertIn("Invalid Hazard.frequency size: 3 != 2.", str(cm.exception)) def test_check_wrongInten_fail(self): """Wrong hazard definition""" @@ -139,11 +135,11 @@ def test_check_wrongFrac_fail(self): def test_check_wrongEvName_fail(self): """Wrong hazard definition""" - self.hazard.event_name = ['M'] + self.hazard.event_name = ["M"] with self.assertRaises(ValueError) as cm: self.hazard.check() - self.assertIn('Invalid Hazard.event_name size: 3 != 1.', str(cm.exception)) + self.assertIn("Invalid Hazard.event_name size: 3 != 1.", str(cm.exception)) def test_check_wrongId_fail(self): """Wrong hazard definition""" @@ -151,7 +147,7 @@ def test_check_wrongId_fail(self): with self.assertRaises(ValueError) as cm: self.hazard.check() - self.assertIn('There are events with the same identifier.', str(cm.exception)) + self.assertIn("There are events with the same identifier.", str(cm.exception)) def test_check_wrong_date_fail(self): """Wrong hazard definition""" @@ -159,7 +155,7 @@ def test_check_wrong_date_fail(self): with self.assertRaises(ValueError) as cm: self.hazard.check() - self.assertIn('Invalid Hazard.date size: 3 != 2.', str(cm.exception)) + self.assertIn("Invalid Hazard.date size: 3 != 2.", str(cm.exception)) def test_check_wrong_orig_fail(self): """Wrong hazard definition""" @@ -167,50 +163,47 @@ def test_check_wrong_orig_fail(self): with self.assertRaises(ValueError) as cm: self.hazard.check() - self.assertIn('Invalid Hazard.orig size: 3 != 4.', str(cm.exception)) + self.assertIn("Invalid Hazard.orig size: 3 != 4.", str(cm.exception)) def test_event_name_to_id_pass(self): """Test event_name_to_id function.""" - haz = Hazard.from_excel(HAZ_TEMPLATE_XLS, haz_type='TC') - self.assertEqual(haz.get_event_id('event001')[0], 1) - self.assertEqual(haz.get_event_id('event084')[0], 84) + haz = Hazard.from_excel(HAZ_TEMPLATE_XLS, haz_type="TC") + self.assertEqual(haz.get_event_id("event001")[0], 1) + self.assertEqual(haz.get_event_id("event084")[0], 84) def test_event_name_to_id_fail(self): """Test event_name_to_id function.""" - haz = Hazard.from_excel(HAZ_TEMPLATE_XLS, haz_type='TC') + haz = Hazard.from_excel(HAZ_TEMPLATE_XLS, haz_type="TC") with self.assertRaises(ValueError) as cm: - haz.get_event_id('1050') - self.assertIn('No event with name: 1050', str(cm.exception)) + haz.get_event_id("1050") + self.assertIn("No event with name: 1050", str(cm.exception)) def test_event_id_to_name_pass(self): """Test event_id_to_name function.""" - haz = Hazard.from_excel(HAZ_TEMPLATE_XLS, haz_type='TC') - self.assertEqual(haz.get_event_name(2), 'event002') - self.assertEqual(haz.get_event_name(48), 'event048') + haz = Hazard.from_excel(HAZ_TEMPLATE_XLS, haz_type="TC") + self.assertEqual(haz.get_event_name(2), "event002") + self.assertEqual(haz.get_event_name(48), "event048") def test_event_id_to_name_fail(self): """Test event_id_to_name function.""" - haz = Hazard.from_excel(HAZ_TEMPLATE_XLS, haz_type='TC') + haz = Hazard.from_excel(HAZ_TEMPLATE_XLS, haz_type="TC") with self.assertRaises(ValueError) as cm: haz.get_event_name(1050) - self.assertIn('No event with id: 1050', str(cm.exception)) + self.assertIn("No event with id: 1050", str(cm.exception)) def test_get_date_strings_pass(self): haz = Hazard.from_hdf5(HAZ_TEST_TC) - haz.event_name[5] = 'HAZEL' - haz.event_name[10] = 'HAZEL' + haz.event_name[5] = "HAZEL" + haz.event_name[10] = "HAZEL" - self.assertEqual(len(haz.get_event_date('HAZEL')), 2) - self.assertEqual(haz.get_event_date('HAZEL')[0], - u_dt.date_to_str(haz.date[5])) - self.assertEqual(haz.get_event_date('HAZEL')[1], - u_dt.date_to_str(haz.date[10])) + self.assertEqual(len(haz.get_event_date("HAZEL")), 2) + self.assertEqual(haz.get_event_date("HAZEL")[0], u_dt.date_to_str(haz.date[5])) + self.assertEqual(haz.get_event_date("HAZEL")[1], u_dt.date_to_str(haz.date[10])) self.assertEqual(haz.get_event_date(2)[0], u_dt.date_to_str(haz.date[1])) self.assertEqual(len(haz.get_event_date()), haz.date.size) - self.assertEqual(haz.get_event_date()[560], - u_dt.date_to_str(haz.date[560])) + self.assertEqual(haz.get_event_date()[560], u_dt.date_to_str(haz.date[560])) def test_check_matrices(self): """Test the check_matrices method""" @@ -238,13 +231,14 @@ def test_check_matrices(self): self.assertEqual(matrix.nnz, 0) self.assertTrue(matrix.has_canonical_format) + class TestRemoveDupl(unittest.TestCase): """Test remove_duplicates method.""" def test_equal_same(self): """Append the same hazard and remove duplicates, obtain initial hazard.""" - haz1 = Hazard.from_excel(HAZ_TEMPLATE_XLS, haz_type='TC') - haz2 = Hazard.from_excel(HAZ_TEMPLATE_XLS, haz_type='TC') + haz1 = Hazard.from_excel(HAZ_TEMPLATE_XLS, haz_type="TC") + haz2 = Hazard.from_excel(HAZ_TEMPLATE_XLS, haz_type="TC") haz1.append(haz2) haz1.remove_duplicates() haz1.check() @@ -254,8 +248,12 @@ def test_equal_same(self): self.assertEqual(haz1.frequency_unit, haz2.frequency_unit) self.assertTrue(np.array_equal(haz1.date, haz2.date)) self.assertTrue(np.array_equal(haz1.orig, haz2.orig)) - self.assertTrue(np.array_equal(haz1.intensity.toarray(), haz2.intensity.toarray())) - self.assertTrue(np.array_equal(haz1.fraction.toarray(), haz2.fraction.toarray())) + self.assertTrue( + np.array_equal(haz1.intensity.toarray(), haz2.intensity.toarray()) + ) + self.assertTrue( + np.array_equal(haz1.fraction.toarray(), haz2.fraction.toarray()) + ) self.assertTrue((haz1.intensity != haz2.intensity).nnz == 0) self.assertTrue((haz1.fraction != haz2.fraction).nnz == 0) self.assertEqual(haz1.units, haz2.units) @@ -267,22 +265,30 @@ def test_same_events_same(self): fraction in new appended centroids.""" haz1 = dummy_hazard() centroids = Centroids(lat=np.array([7, 9, 11]), lon=np.array([8, 10, 12])) - fraction = sparse.csr_matrix([[0.22, 0.32, 0.44], - [0.11, 0.11, 0.11], - [0.32, 0.11, 0.99], - [0.32, 0.22, 0.88]]) - intensity = sparse.csr_matrix([[0.22, 3.33, 6.44], - [1.11, 0.11, 1.11], - [8.33, 4.11, 4.4], - [9.33, 9.22, 1.77]]) + fraction = sparse.csr_matrix( + [ + [0.22, 0.32, 0.44], + [0.11, 0.11, 0.11], + [0.32, 0.11, 0.99], + [0.32, 0.22, 0.88], + ] + ) + intensity = sparse.csr_matrix( + [ + [0.22, 3.33, 6.44], + [1.11, 0.11, 1.11], + [8.33, 4.11, 4.4], + [9.33, 9.22, 1.77], + ] + ) haz2 = Hazard( "TC", centroids=centroids, event_id=haz1.event_id, event_name=haz1.event_name, frequency=haz1.frequency, - frequency_unit = "1/week", - date = haz1.date, + frequency_unit="1/week", + date=haz1.date, fraction=fraction, intensity=intensity, units="m/s", @@ -295,32 +301,38 @@ def test_same_events_same(self): # expected values haz_res = dummy_hazard() haz_res.intensity = sparse.hstack( - [haz_res.intensity, sparse.csr_matrix((haz_res.intensity.shape[0], 3))], format='csr') + [haz_res.intensity, sparse.csr_matrix((haz_res.intensity.shape[0], 3))], + format="csr", + ) haz_res.fraction = sparse.hstack( - [haz_res.fraction, sparse.csr_matrix((haz_res.fraction.shape[0], 3))], format='csr') - self.assertTrue(np.array_equal(haz_res.intensity.toarray(), - haz1.intensity.toarray())) + [haz_res.fraction, sparse.csr_matrix((haz_res.fraction.shape[0], 3))], + format="csr", + ) + self.assertTrue( + np.array_equal(haz_res.intensity.toarray(), haz1.intensity.toarray()) + ) self.assertTrue(sparse.isspmatrix_csr(haz1.intensity)) - self.assertTrue(np.array_equal(haz_res.fraction.toarray(), - haz1.fraction.toarray())) + self.assertTrue( + np.array_equal(haz_res.fraction.toarray(), haz1.fraction.toarray()) + ) self.assertTrue(sparse.isspmatrix_csr(haz1.fraction)) self.assertEqual(haz1.event_name, haz_res.event_name) self.assertTrue(np.array_equal(haz1.date, haz_res.date)) self.assertTrue(np.array_equal(haz1.orig, haz_res.orig)) - self.assertTrue(np.array_equal(haz1.event_id, - haz_res.event_id)) + self.assertTrue(np.array_equal(haz1.event_id, haz_res.event_id)) self.assertTrue(np.array_equal(haz1.frequency, haz_res.frequency)) self.assertEqual(haz1.frequency_unit, haz_res.frequency_unit) self.assertEqual(haz_res.units, haz1.units) self.assertEqual(haz1.haz_type, haz_res.haz_type) + class TestSelect(unittest.TestCase): """Test select method.""" def test_select_event_name(self): """Test select historical events.""" haz = dummy_hazard() - sel_haz = haz.select(event_names=['ev4', 'ev1']) + sel_haz = haz.select(event_names=["ev4", "ev1"]) self.assertTrue(np.array_equal(sel_haz.centroids.coord, haz.centroids.coord)) self.assertEqual(sel_haz.units, haz.units) @@ -329,13 +341,19 @@ def test_select_event_name(self): self.assertTrue(np.array_equal(sel_haz.orig, np.array([True, True]))) self.assertTrue(np.array_equal(sel_haz.frequency, np.array([0.2, 0.1]))) self.assertEqual(sel_haz.frequency_unit, haz.frequency_unit) - self.assertTrue(np.array_equal(sel_haz.fraction.toarray(), - np.array([[0.3, 0.2, 0.0], - [0.02, 0.03, 0.04]]))) - self.assertTrue(np.array_equal(sel_haz.intensity.toarray(), - np.array([[5.3, 0.2, 0.0], - [0.2, 0.3, 0.4]]))) - self.assertEqual(sel_haz.event_name, ['ev4', 'ev1']) + self.assertTrue( + np.array_equal( + sel_haz.fraction.toarray(), + np.array([[0.3, 0.2, 0.0], [0.02, 0.03, 0.04]]), + ) + ) + self.assertTrue( + np.array_equal( + sel_haz.intensity.toarray(), + np.array([[5.3, 0.2, 0.0], [0.2, 0.3, 0.4]]), + ) + ) + self.assertEqual(sel_haz.event_name, ["ev4", "ev1"]) self.assertIsInstance(sel_haz, Hazard) self.assertIsInstance(sel_haz.intensity, sparse.csr_matrix) self.assertIsInstance(sel_haz.fraction, sparse.csr_matrix) @@ -352,13 +370,19 @@ def test_select_event_id(self): self.assertTrue(np.array_equal(sel_haz.orig, np.array([True, True]))) self.assertTrue(np.array_equal(sel_haz.frequency, np.array([0.2, 0.1]))) self.assertEqual(sel_haz.frequency_unit, haz.frequency_unit) - self.assertTrue(np.array_equal(sel_haz.fraction.toarray(), - np.array([[0.3, 0.2, 0.0], - [0.02, 0.03, 0.04]]))) - self.assertTrue(np.array_equal(sel_haz.intensity.toarray(), - np.array([[5.3, 0.2, 0.0], - [0.2, 0.3, 0.4]]))) - self.assertEqual(sel_haz.event_name, ['ev4', 'ev1']) + self.assertTrue( + np.array_equal( + sel_haz.fraction.toarray(), + np.array([[0.3, 0.2, 0.0], [0.02, 0.03, 0.04]]), + ) + ) + self.assertTrue( + np.array_equal( + sel_haz.intensity.toarray(), + np.array([[5.3, 0.2, 0.0], [0.2, 0.3, 0.4]]), + ) + ) + self.assertEqual(sel_haz.event_name, ["ev4", "ev1"]) self.assertIsInstance(sel_haz, Hazard) self.assertIsInstance(sel_haz.intensity, sparse.csr_matrix) self.assertIsInstance(sel_haz.fraction, sparse.csr_matrix) @@ -375,13 +399,19 @@ def test_select_event_id(self): self.assertTrue(np.array_equal(sel_haz.orig, np.array([True, True]))) self.assertTrue(np.array_equal(sel_haz.frequency, np.array([0.2, 0.1]))) self.assertEqual(sel_haz.frequency_unit, haz.frequency_unit) - self.assertTrue(np.array_equal(sel_haz.fraction.toarray(), - np.array([[0.3, 0.2, 0.0], - [0.02, 0.03, 0.04]]))) - self.assertTrue(np.array_equal(sel_haz.intensity.toarray(), - np.array([[5.3, 0.2, 0.0], - [0.2, 0.3, 0.4]]))) - self.assertEqual(sel_haz.event_name, ['ev4', 'ev1']) + self.assertTrue( + np.array_equal( + sel_haz.fraction.toarray(), + np.array([[0.3, 0.2, 0.0], [0.02, 0.03, 0.04]]), + ) + ) + self.assertTrue( + np.array_equal( + sel_haz.intensity.toarray(), + np.array([[5.3, 0.2, 0.0], [0.2, 0.3, 0.4]]), + ) + ) + self.assertEqual(sel_haz.event_name, ["ev4", "ev1"]) self.assertIsInstance(sel_haz, Hazard) self.assertIsInstance(sel_haz.intensity, sparse.csr_matrix) self.assertIsInstance(sel_haz.fraction, sparse.csr_matrix) @@ -398,11 +428,19 @@ def test_select_orig_pass(self): self.assertTrue(np.array_equal(sel_haz.orig, np.array([True, True]))) self.assertTrue(np.array_equal(sel_haz.frequency, np.array([0.1, 0.2]))) self.assertEqual(sel_haz.frequency_unit, haz.frequency_unit) - self.assertTrue(np.array_equal( - sel_haz.fraction.toarray(), np.array([[0.02, 0.03, 0.04], [0.3, 0.2, 0.0]]))) - self.assertTrue(np.array_equal( - sel_haz.intensity.toarray(), np.array([[0.2, 0.3, 0.4], [5.3, 0.2, 0.0]]))) - self.assertEqual(sel_haz.event_name, ['ev1', 'ev4']) + self.assertTrue( + np.array_equal( + sel_haz.fraction.toarray(), + np.array([[0.02, 0.03, 0.04], [0.3, 0.2, 0.0]]), + ) + ) + self.assertTrue( + np.array_equal( + sel_haz.intensity.toarray(), + np.array([[0.2, 0.3, 0.4], [5.3, 0.2, 0.0]]), + ) + ) + self.assertEqual(sel_haz.event_name, ["ev1", "ev4"]) self.assertIsInstance(sel_haz, Hazard) self.assertIsInstance(sel_haz.intensity, sparse.csr_matrix) self.assertIsInstance(sel_haz.fraction, sparse.csr_matrix) @@ -419,11 +457,19 @@ def test_select_syn_pass(self): self.assertTrue(np.array_equal(sel_haz.orig, np.array([False, False]))) self.assertTrue(np.array_equal(sel_haz.frequency, np.array([0.5, 0.5]))) self.assertEqual(sel_haz.frequency_unit, haz.frequency_unit) - self.assertTrue(np.array_equal( - sel_haz.fraction.toarray(), np.array([[0.01, 0.01, 0.01], [0.3, 0.1, 0.0]]))) - self.assertTrue(np.array_equal( - sel_haz.intensity.toarray(), np.array([[0.1, 0.1, 0.01], [4.3, 2.1, 1.0]]))) - self.assertEqual(sel_haz.event_name, ['ev2', 'ev3']) + self.assertTrue( + np.array_equal( + sel_haz.fraction.toarray(), + np.array([[0.01, 0.01, 0.01], [0.3, 0.1, 0.0]]), + ) + ) + self.assertTrue( + np.array_equal( + sel_haz.intensity.toarray(), + np.array([[0.1, 0.1, 0.01], [4.3, 2.1, 1.0]]), + ) + ) + self.assertEqual(sel_haz.event_name, ["ev2", "ev3"]) self.assertIsInstance(sel_haz, Hazard) self.assertIsInstance(sel_haz.intensity, sparse.csr_matrix) self.assertIsInstance(sel_haz.fraction, sparse.csr_matrix) @@ -440,15 +486,19 @@ def test_select_date_pass(self): self.assertTrue(np.array_equal(sel_haz.orig, np.array([False, False, True]))) self.assertTrue(np.array_equal(sel_haz.frequency, np.array([0.5, 0.5, 0.2]))) self.assertEqual(sel_haz.frequency_unit, haz.frequency_unit) - self.assertTrue(np.array_equal( - sel_haz.fraction.toarray(), np.array([[0.01, 0.01, 0.01], - [0.3, 0.1, 0.0], - [0.3, 0.2, 0.0]]))) - self.assertTrue(np.array_equal( - sel_haz.intensity.toarray(), np.array([[0.1, 0.1, 0.01], - [4.3, 2.1, 1.0], - [5.3, 0.2, 0.0]]))) - self.assertEqual(sel_haz.event_name, ['ev2', 'ev3', 'ev4']) + self.assertTrue( + np.array_equal( + sel_haz.fraction.toarray(), + np.array([[0.01, 0.01, 0.01], [0.3, 0.1, 0.0], [0.3, 0.2, 0.0]]), + ) + ) + self.assertTrue( + np.array_equal( + sel_haz.intensity.toarray(), + np.array([[0.1, 0.1, 0.01], [4.3, 2.1, 1.0], [5.3, 0.2, 0.0]]), + ) + ) + self.assertEqual(sel_haz.event_name, ["ev2", "ev3", "ev4"]) self.assertIsInstance(sel_haz, Hazard) self.assertIsInstance(sel_haz.intensity, sparse.csr_matrix) self.assertIsInstance(sel_haz.fraction, sparse.csr_matrix) @@ -456,7 +506,7 @@ def test_select_date_pass(self): def test_select_date_str_pass(self): """Test select historical events.""" haz = dummy_hazard() - sel_haz = haz.select(date=('0001-01-02', '0001-01-03')) + sel_haz = haz.select(date=("0001-01-02", "0001-01-03")) self.assertTrue(np.array_equal(sel_haz.centroids.coord, haz.centroids.coord)) self.assertEqual(sel_haz.units, haz.units) @@ -465,11 +515,19 @@ def test_select_date_str_pass(self): self.assertTrue(np.array_equal(sel_haz.orig, np.array([False, False]))) self.assertTrue(np.array_equal(sel_haz.frequency, np.array([0.5, 0.5]))) self.assertEqual(sel_haz.frequency_unit, haz.frequency_unit) - self.assertTrue(np.array_equal( - sel_haz.fraction.toarray(), np.array([[0.01, 0.01, 0.01], [0.3, 0.1, 0.0]]))) - self.assertTrue(np.array_equal( - sel_haz.intensity.toarray(), np.array([[0.1, 0.1, 0.01], [4.3, 2.1, 1.0]]))) - self.assertEqual(sel_haz.event_name, ['ev2', 'ev3']) + self.assertTrue( + np.array_equal( + sel_haz.fraction.toarray(), + np.array([[0.01, 0.01, 0.01], [0.3, 0.1, 0.0]]), + ) + ) + self.assertTrue( + np.array_equal( + sel_haz.intensity.toarray(), + np.array([[0.1, 0.1, 0.01], [4.3, 2.1, 1.0]]), + ) + ) + self.assertEqual(sel_haz.event_name, ["ev2", "ev3"]) self.assertIsInstance(sel_haz, Hazard) self.assertIsInstance(sel_haz.intensity, sparse.csr_matrix) self.assertIsInstance(sel_haz.fraction, sparse.csr_matrix) @@ -486,11 +544,19 @@ def test_select_date_and_orig_pass(self): self.assertTrue(np.array_equal(sel_haz.orig, np.array([False, False]))) self.assertTrue(np.array_equal(sel_haz.frequency, np.array([0.5, 0.5]))) self.assertEqual(sel_haz.frequency_unit, haz.frequency_unit) - self.assertTrue(np.array_equal( - sel_haz.fraction.toarray(), np.array([[0.01, 0.01, 0.01], [0.3, 0.1, 0.0]]))) - self.assertTrue(np.array_equal( - sel_haz.intensity.toarray(), np.array([[0.1, 0.1, 0.01], [4.3, 2.1, 1.0]]))) - self.assertEqual(sel_haz.event_name, ['ev2', 'ev3']) + self.assertTrue( + np.array_equal( + sel_haz.fraction.toarray(), + np.array([[0.01, 0.01, 0.01], [0.3, 0.1, 0.0]]), + ) + ) + self.assertTrue( + np.array_equal( + sel_haz.intensity.toarray(), + np.array([[0.1, 0.1, 0.01], [4.3, 2.1, 1.0]]), + ) + ) + self.assertEqual(sel_haz.event_name, ["ev2", "ev3"]) self.assertIsInstance(sel_haz, Hazard) self.assertIsInstance(sel_haz.intensity, sparse.csr_matrix) self.assertIsInstance(sel_haz.fraction, sparse.csr_matrix) @@ -506,7 +572,7 @@ def test_select_date_invalid_pass(self): haz = dummy_hazard() # lists and numpy arrays should work just like tuples - sel_haz = haz.select(date=['0001-01-02', '0001-01-03']) + sel_haz = haz.select(date=["0001-01-02", "0001-01-03"]) self.assertTrue(np.array_equal(sel_haz.date, np.array([2, 3]))) sel_haz = haz.select(date=np.array([2, 4])) self.assertTrue(np.array_equal(sel_haz.date, np.array([2, 3, 4]))) @@ -526,20 +592,25 @@ def test_select_date_invalid_pass(self): def test_select_reg_id_pass(self): """Test select region of centroids.""" haz = dummy_hazard() - haz.centroids.gdf['region_id'] = np.array([5, 7, 9]) + haz.centroids.gdf["region_id"] = np.array([5, 7, 9]) sel_haz = haz.select(date=(2, 4), orig=False, reg_id=9) - self.assertTrue(np.array_equal(sel_haz.centroids.coord.squeeze(), - haz.centroids.coord[2, :])) + self.assertTrue( + np.array_equal(sel_haz.centroids.coord.squeeze(), haz.centroids.coord[2, :]) + ) self.assertEqual(sel_haz.units, haz.units) self.assertTrue(np.array_equal(sel_haz.event_id, np.array([2, 3]))) self.assertTrue(np.array_equal(sel_haz.date, np.array([2, 3]))) self.assertTrue(np.array_equal(sel_haz.orig, np.array([False, False]))) self.assertTrue(np.array_equal(sel_haz.frequency, np.array([0.5, 0.5]))) self.assertEqual(sel_haz.frequency_unit, haz.frequency_unit) - self.assertTrue(np.array_equal(sel_haz.fraction.toarray(), np.array([[0.01], [0.0]]))) - self.assertTrue(np.array_equal(sel_haz.intensity.toarray(), np.array([[0.01], [1.0]]))) - self.assertEqual(sel_haz.event_name, ['ev2', 'ev3']) + self.assertTrue( + np.array_equal(sel_haz.fraction.toarray(), np.array([[0.01], [0.0]])) + ) + self.assertTrue( + np.array_equal(sel_haz.intensity.toarray(), np.array([[0.01], [1.0]])) + ) + self.assertEqual(sel_haz.event_name, ["ev2", "ev3"]) self.assertIsInstance(sel_haz, Hazard) self.assertIsInstance(sel_haz.intensity, sparse.csr_matrix) self.assertIsInstance(sel_haz.fraction, sparse.csr_matrix) @@ -547,74 +618,89 @@ def test_select_reg_id_pass(self): def test_select_tight_pass(self): """Test select tight box around hazard""" - #intensity select + # intensity select haz = dummy_hazard() haz.intensity[:, -1] = 0.0 sel_haz = haz.select_tight() - self.assertTrue(np.array_equal(sel_haz.centroids.coord.squeeze(), - haz.centroids.coord[:-1, :])) + self.assertTrue( + np.array_equal( + sel_haz.centroids.coord.squeeze(), haz.centroids.coord[:-1, :] + ) + ) self.assertEqual(sel_haz.units, haz.units) self.assertTrue(np.array_equal(sel_haz.event_id, haz.event_id)) self.assertTrue(np.array_equal(sel_haz.date, haz.date)) self.assertTrue(np.array_equal(sel_haz.orig, haz.orig)) self.assertTrue(np.array_equal(sel_haz.frequency, haz.frequency)) self.assertEqual(sel_haz.frequency_unit, haz.frequency_unit) - self.assertTrue(np.array_equal(sel_haz.fraction.toarray(), - haz.fraction[:,:-1].toarray())) - self.assertTrue(np.array_equal(sel_haz.intensity.toarray(), - haz.intensity[:,:-1].toarray())) + self.assertTrue( + np.array_equal(sel_haz.fraction.toarray(), haz.fraction[:, :-1].toarray()) + ) + self.assertTrue( + np.array_equal(sel_haz.intensity.toarray(), haz.intensity[:, :-1].toarray()) + ) self.assertEqual(sel_haz.event_name, haz.event_name) self.assertIsInstance(sel_haz, Hazard) self.assertIsInstance(sel_haz.intensity, sparse.csr_matrix) self.assertIsInstance(sel_haz.fraction, sparse.csr_matrix) - #fraction select + # fraction select haz = dummy_hazard() haz.fraction[:, -1] = 0.0 - sel_haz = haz.select_tight(val='fraction') + sel_haz = haz.select_tight(val="fraction") - self.assertTrue(np.array_equal(sel_haz.centroids.coord.squeeze(), - haz.centroids.coord[:-1, :])) + self.assertTrue( + np.array_equal( + sel_haz.centroids.coord.squeeze(), haz.centroids.coord[:-1, :] + ) + ) self.assertEqual(sel_haz.units, haz.units) self.assertTrue(np.array_equal(sel_haz.event_id, haz.event_id)) self.assertTrue(np.array_equal(sel_haz.date, haz.date)) self.assertTrue(np.array_equal(sel_haz.orig, haz.orig)) self.assertTrue(np.array_equal(sel_haz.frequency, haz.frequency)) self.assertEqual(sel_haz.frequency_unit, haz.frequency_unit) - self.assertTrue(np.array_equal(sel_haz.fraction.toarray(), - haz.fraction[:,:-1].toarray())) - self.assertTrue(np.array_equal(sel_haz.intensity.toarray(), - haz.intensity[:,:-1].toarray())) + self.assertTrue( + np.array_equal(sel_haz.fraction.toarray(), haz.fraction[:, :-1].toarray()) + ) + self.assertTrue( + np.array_equal(sel_haz.intensity.toarray(), haz.intensity[:, :-1].toarray()) + ) self.assertEqual(sel_haz.event_name, haz.event_name) self.assertIsInstance(sel_haz, Hazard) self.assertIsInstance(sel_haz.intensity, sparse.csr_matrix) self.assertIsInstance(sel_haz.fraction, sparse.csr_matrix) - haz = dummy_hazard() haz.intensity[:, -1] = 0.0 # small buffer: zero field is discarded sel_haz = haz.select_tight(buffer=0.1) - self.assertTrue(np.array_equal(sel_haz.centroids.coord.squeeze(), - haz.centroids.coord[:-1, :])) + self.assertTrue( + np.array_equal( + sel_haz.centroids.coord.squeeze(), haz.centroids.coord[:-1, :] + ) + ) # large buffer: zero field is retained sel_haz = haz.select_tight(buffer=10) - self.assertTrue(np.array_equal(sel_haz.centroids.coord.squeeze(), - haz.centroids.coord)) + self.assertTrue( + np.array_equal(sel_haz.centroids.coord.squeeze(), haz.centroids.coord) + ) self.assertEqual(sel_haz.units, haz.units) self.assertTrue(np.array_equal(sel_haz.event_id, haz.event_id)) self.assertTrue(np.array_equal(sel_haz.date, haz.date)) self.assertTrue(np.array_equal(sel_haz.orig, haz.orig)) self.assertTrue(np.array_equal(sel_haz.frequency, haz.frequency)) self.assertEqual(sel_haz.frequency_unit, haz.frequency_unit) - self.assertTrue(np.array_equal(sel_haz.fraction.toarray(), - haz.fraction.toarray())) - self.assertTrue(np.array_equal(sel_haz.intensity.toarray(), - haz.intensity.toarray())) + self.assertTrue( + np.array_equal(sel_haz.fraction.toarray(), haz.fraction.toarray()) + ) + self.assertTrue( + np.array_equal(sel_haz.intensity.toarray(), haz.intensity.toarray()) + ) self.assertEqual(sel_haz.event_name, haz.event_name) self.assertIsInstance(sel_haz, Hazard) self.assertIsInstance(sel_haz.intensity, sparse.csr_matrix) @@ -629,7 +715,7 @@ def test_select_new_fraction_zero(self): with self.assertRaisesRegex( RuntimeError, "Your selection created a Hazard object where the fraction matrix is zero " - "everywhere" + "everywhere", ): hazard.select(event_id=[3, 4], reg_id=[2]) @@ -641,14 +727,16 @@ def test_select_new_fraction_zero(self): selection = hazard.select(event_id=[3, 4], reg_id=[2]) np.testing.assert_array_equal(selection.fraction.toarray(), [[0], [0]]) + class TestAppend(unittest.TestCase): """Test append method.""" def test_append_empty_fill(self): """Append an empty. Obtain initial hazard.""" + def _check_hazard(hazard): # expected values - haz1_orig = Hazard.from_excel(HAZ_TEMPLATE_XLS, haz_type='TC') + haz1_orig = Hazard.from_excel(HAZ_TEMPLATE_XLS, haz_type="TC") self.assertEqual(hazard.event_name, haz1_orig.event_name) self.assertTrue(np.array_equal(hazard.event_id, haz1_orig.event_id)) self.assertTrue(np.array_equal(hazard.date, haz1_orig.date)) @@ -660,16 +748,16 @@ def _check_hazard(hazard): self.assertEqual(hazard.units, haz1_orig.units) self.assertEqual(hazard.haz_type, haz1_orig.haz_type) - haz1 = Hazard.from_excel(HAZ_TEMPLATE_XLS, haz_type='TC') - haz2 = Hazard('TC') - haz2.centroids.geometry.crs = 'epsg:4326' + haz1 = Hazard.from_excel(HAZ_TEMPLATE_XLS, haz_type="TC") + haz2 = Hazard("TC") + haz2.centroids.geometry.crs = "epsg:4326" haz1.append(haz2) haz1.check() _check_hazard(haz1) - haz1 = Hazard.from_excel(HAZ_TEMPLATE_XLS, haz_type='TC') - haz2 = Hazard('TC') - haz2.centroids.geometry.crs = 'epsg:4326' + haz1 = Hazard.from_excel(HAZ_TEMPLATE_XLS, haz_type="TC") + haz2 = Hazard("TC") + haz2.centroids.geometry.crs = "epsg:4326" haz2.append(haz1) haz2.check() _check_hazard(haz2) @@ -677,24 +765,23 @@ def _check_hazard(hazard): def test_same_centroids_extend(self): """Append hazard with same centroids, different events.""" haz1 = dummy_hazard() - fraction = sparse.csr_matrix([[0.2, 0.3, 0.4], - [0.1, 0.1, 0.1], - [0.3, 0.1, 0.9], - [0.3, 0.2, 0.8]]) - intensity = sparse.csr_matrix([[0.2, 3.3, 6.4], - [1.1, 0.1, 1.01], - [8.3, 4.1, 4.0], - [9.3, 9.2, 1.7]]) - haz2 = Hazard('TC', - centroids=haz1.centroids, - event_id=np.array([5, 6, 7, 8]), - event_name=['ev5', 'ev6', 'ev7', 'ev8'], - frequency=np.array([0.9, 0.75, 0.75, 0.22]), - frequency_unit='1/week', - units="m/s", - fraction=fraction, - intensity=intensity, - ) + fraction = sparse.csr_matrix( + [[0.2, 0.3, 0.4], [0.1, 0.1, 0.1], [0.3, 0.1, 0.9], [0.3, 0.2, 0.8]] + ) + intensity = sparse.csr_matrix( + [[0.2, 3.3, 6.4], [1.1, 0.1, 1.01], [8.3, 4.1, 4.0], [9.3, 9.2, 1.7]] + ) + haz2 = Hazard( + "TC", + centroids=haz1.centroids, + event_id=np.array([5, 6, 7, 8]), + event_name=["ev5", "ev6", "ev7", "ev8"], + frequency=np.array([0.9, 0.75, 0.75, 0.22]), + frequency_unit="1/week", + units="m/s", + fraction=fraction, + intensity=intensity, + ) haz1.append(haz2) haz1.check() @@ -714,11 +801,17 @@ def test_same_centroids_extend(self): for i_ev in range(haz1.event_id.size): self.assertTrue(any((haz1.intensity[i_ev].toarray() == exp_inten).all(1))) self.assertTrue(any((haz1.fraction[i_ev].toarray() == exp_frac).all(1))) - self.assertTrue(haz1.event_name[i_ev] in haz1_orig.event_name + haz2.event_name) + self.assertTrue( + haz1.event_name[i_ev] in haz1_orig.event_name + haz2.event_name + ) self.assertTrue(haz1.date[i_ev] in np.append(haz1_orig.date, haz2.date)) self.assertTrue(haz1.orig[i_ev] in np.append(haz1_orig.orig, haz2.orig)) - self.assertTrue(haz1.event_id[i_ev] in np.append(haz1_orig.event_id, haz2.event_id)) - self.assertTrue(haz1.frequency[i_ev] in np.append(haz1_orig.frequency, haz2.frequency)) + self.assertTrue( + haz1.event_id[i_ev] in np.append(haz1_orig.event_id, haz2.event_id) + ) + self.assertTrue( + haz1.frequency[i_ev] in np.append(haz1_orig.frequency, haz2.frequency) + ) self.assertEqual(haz1.centroids.size, 3) self.assertTrue(np.array_equal(haz1.centroids.coord, haz2.centroids.coord)) @@ -728,7 +821,7 @@ def test_incompatible_type_fail(self): """Raise error when append two incompatible hazards.""" haz1 = dummy_hazard() haz2 = dummy_hazard() - haz2.haz_type = 'WS' + haz2.haz_type = "WS" with self.assertRaises(ValueError) as cm: haz1.append(haz2) @@ -736,7 +829,7 @@ def test_incompatible_units_fail(self): """Raise error when append two incompatible hazards.""" haz1 = dummy_hazard() haz2 = dummy_hazard() - haz2.units = 'km/h' + haz2.units = "km/h" with self.assertRaises(ValueError) as cm: haz1.append(haz2) @@ -744,7 +837,7 @@ def test_incompatible_freq_units_fail(self): """Raise error when append two incompatible hazards.""" haz1 = dummy_hazard() haz2 = dummy_hazard() - haz2.frequency_unit = '1/month' + haz2.frequency_unit = "1/month" with self.assertRaises(ValueError) as cm: haz1.append(haz2) @@ -752,26 +845,25 @@ def test_all_different_extend(self): """Append totally different hazard.""" haz1 = dummy_hazard() - fraction = sparse.csr_matrix([[0.2, 0.3, 0.4], - [0.1, 0.1, 0.1], - [0.3, 0.1, 0.9], - [0.3, 0.2, 0.8]]) - intensity = sparse.csr_matrix([[0.2, 3.3, 6.4], - [1.1, 0.1, 1.01], - [8.3, 4.1, 4.0], - [9.3, 9.2, 1.7]]) - haz2 = Hazard('TC', - date=np.ones((4,)), - orig=np.ones((4,)), - centroids=Centroids( - lat=np.array([7, 9, 11]), lon=np.array([8, 10, 12])), - event_id=np.array([5, 6, 7, 8]), - event_name=['ev5', 'ev6', 'ev7', 'ev8'], - frequency=np.array([0.9, 0.75, 0.75, 0.22]), - frequency_unit='1/week', - units='m/s', - intensity=intensity, - fraction=fraction) + fraction = sparse.csr_matrix( + [[0.2, 0.3, 0.4], [0.1, 0.1, 0.1], [0.3, 0.1, 0.9], [0.3, 0.2, 0.8]] + ) + intensity = sparse.csr_matrix( + [[0.2, 3.3, 6.4], [1.1, 0.1, 1.01], [8.3, 4.1, 4.0], [9.3, 9.2, 1.7]] + ) + haz2 = Hazard( + "TC", + date=np.ones((4,)), + orig=np.ones((4,)), + centroids=Centroids(lat=np.array([7, 9, 11]), lon=np.array([8, 10, 12])), + event_id=np.array([5, 6, 7, 8]), + event_name=["ev5", "ev6", "ev7", "ev8"], + frequency=np.array([0.9, 0.75, 0.75, 0.22]), + frequency_unit="1/week", + units="m/s", + intensity=intensity, + fraction=fraction, + ) haz1.append(haz2) haz1.check() @@ -790,11 +882,17 @@ def test_all_different_extend(self): for i_ev in range(haz1.event_id.size): self.assertTrue(any((haz1.intensity[i_ev].toarray() == exp_inten).all(1))) self.assertTrue(any((haz1.fraction[i_ev].toarray() == exp_frac).all(1))) - self.assertTrue(haz1.event_name[i_ev] in haz1_orig.event_name + haz2.event_name) + self.assertTrue( + haz1.event_name[i_ev] in haz1_orig.event_name + haz2.event_name + ) self.assertTrue(haz1.date[i_ev] in np.append(haz1_orig.date, haz2.date)) self.assertTrue(haz1.orig[i_ev] in np.append(haz1_orig.orig, haz2.orig)) - self.assertTrue(haz1.event_id[i_ev] in np.append(haz1_orig.event_id, haz2.event_id)) - self.assertTrue(haz1.frequency[i_ev] in np.append(haz1_orig.frequency, haz2.frequency)) + self.assertTrue( + haz1.event_id[i_ev] in np.append(haz1_orig.event_id, haz2.event_id) + ) + self.assertTrue( + haz1.frequency[i_ev] in np.append(haz1_orig.frequency, haz2.frequency) + ) self.assertEqual(haz1.centroids.size, 6) self.assertEqual(haz1_orig.units, haz1.units) @@ -805,25 +903,34 @@ def test_same_events_append(self): """Append hazard with same events (and diff centroids). Events are appended with all new centroids columns.""" haz1 = dummy_hazard() - fraction = sparse.csr_matrix([[0.22, 0.32, 0.44], - [0.11, 0.11, 0.11], - [0.32, 0.11, 0.99], - [0.32, 0.22, 0.88]]) - intensity = sparse.csr_matrix([[0.22, 3.33, 6.44], - [1.11, 0.11, 1.11], - [8.33, 4.11, 4.4], - [9.33, 9.22, 1.77]]) - haz2 = Hazard('TC', - centroids=Centroids( - lat=np.array([7, 9, 11]), lon=np.array([8, 10, 12])), - event_id=haz1.event_id, - event_name=haz1.event_name.copy(), - frequency=haz1.frequency, - frequency_unit=haz1.frequency_unit, - date=haz1.date, - units='m/s', - fraction=fraction, - intensity=intensity) + fraction = sparse.csr_matrix( + [ + [0.22, 0.32, 0.44], + [0.11, 0.11, 0.11], + [0.32, 0.11, 0.99], + [0.32, 0.22, 0.88], + ] + ) + intensity = sparse.csr_matrix( + [ + [0.22, 3.33, 6.44], + [1.11, 0.11, 1.11], + [8.33, 4.11, 4.4], + [9.33, 9.22, 1.77], + ] + ) + haz2 = Hazard( + "TC", + centroids=Centroids(lat=np.array([7, 9, 11]), lon=np.array([8, 10, 12])), + event_id=haz1.event_id, + event_name=haz1.event_name.copy(), + frequency=haz1.frequency, + frequency_unit=haz1.frequency_unit, + date=haz1.date, + units="m/s", + fraction=fraction, + intensity=intensity, + ) haz1.append(haz2) @@ -837,21 +944,19 @@ def test_same_events_append(self): res_frac[0:4, 0:3] = haz1_ori.fraction.toarray() res_frac[4:, 3:] = haz2.fraction.toarray() - self.assertTrue(np.array_equal(res_inten, - haz1.intensity.toarray())) + self.assertTrue(np.array_equal(res_inten, haz1.intensity.toarray())) self.assertTrue(sparse.isspmatrix_csr(haz1.intensity)) - self.assertTrue(np.array_equal(res_frac, - haz1.fraction.toarray())) + self.assertTrue(np.array_equal(res_frac, haz1.fraction.toarray())) self.assertTrue(sparse.isspmatrix_csr(haz1.fraction)) - self.assertEqual(haz1.event_name, - haz1_ori.event_name + haz2.event_name) - self.assertTrue(np.array_equal(haz1.date, - np.append(haz1_ori.date, haz2.date))) - self.assertTrue(np.array_equal(haz1.orig, - np.append(haz1_ori.orig, haz2.orig))) + self.assertEqual(haz1.event_name, haz1_ori.event_name + haz2.event_name) + self.assertTrue(np.array_equal(haz1.date, np.append(haz1_ori.date, haz2.date))) + self.assertTrue(np.array_equal(haz1.orig, np.append(haz1_ori.orig, haz2.orig))) self.assertTrue(np.array_equal(haz1.event_id, np.arange(1, 9))) - self.assertTrue(np.array_equal(haz1.frequency, - np.append(haz1_ori.frequency, haz2.frequency))) + self.assertTrue( + np.array_equal( + haz1.frequency, np.append(haz1_ori.frequency, haz2.frequency) + ) + ) self.assertEqual(haz1_ori.frequency_unit, haz1.frequency_unit) self.assertEqual(haz1_ori.units, haz1.units) @@ -860,40 +965,42 @@ def test_same_events_append(self): def test_concat_pass(self): """Test concatenate function.""" - haz_1 = Hazard("TC", - centroids=Centroids( - lat=np.array([1, 3, 5]), lon=np.array([2, 4, 6]), - crs="epsg:4326"), - event_id=np.array([1]), - event_name=['ev1'], - date=np.array([1]), - orig=np.array([True]), - frequency=np.array([1.0]), - frequency_unit='1/week', - fraction=sparse.csr_matrix([[0.02, 0.03, 0.04]]), - intensity=sparse.csr_matrix([[0.2, 0.3, 0.4]]), - units='m/s') - - haz_2 = Hazard("TC", - centroids=Centroids( - lat=np.array([1, 3, 5]), lon=np.array([2, 4, 6]), - crs="epsg:4326"), - event_id=np.array([1]), - event_name=['ev2'], - date=np.array([2]), - orig=np.array([False]), - frequency=np.array([1.0]), - frequency_unit='1/week', - fraction=sparse.csr_matrix([[1.02, 1.03, 1.04]]), - intensity=sparse.csr_matrix([[1.2, 1.3, 1.4]]), - units='m/s') + haz_1 = Hazard( + "TC", + centroids=Centroids( + lat=np.array([1, 3, 5]), lon=np.array([2, 4, 6]), crs="epsg:4326" + ), + event_id=np.array([1]), + event_name=["ev1"], + date=np.array([1]), + orig=np.array([True]), + frequency=np.array([1.0]), + frequency_unit="1/week", + fraction=sparse.csr_matrix([[0.02, 0.03, 0.04]]), + intensity=sparse.csr_matrix([[0.2, 0.3, 0.4]]), + units="m/s", + ) + + haz_2 = Hazard( + "TC", + centroids=Centroids( + lat=np.array([1, 3, 5]), lon=np.array([2, 4, 6]), crs="epsg:4326" + ), + event_id=np.array([1]), + event_name=["ev2"], + date=np.array([2]), + orig=np.array([False]), + frequency=np.array([1.0]), + frequency_unit="1/week", + fraction=sparse.csr_matrix([[1.02, 1.03, 1.04]]), + intensity=sparse.csr_matrix([[1.2, 1.3, 1.4]]), + units="m/s", + ) haz = Hazard.concat([haz_1, haz_2]) - hres_frac = sparse.csr_matrix([[0.02, 0.03, 0.04], - [1.02, 1.03, 1.04]]) - hres_inten = sparse.csr_matrix([[0.2, 0.3, 0.4], - [1.2, 1.3, 1.4]]) + hres_frac = sparse.csr_matrix([[0.02, 0.03, 0.04], [1.02, 1.03, 1.04]]) + hres_inten = sparse.csr_matrix([[0.2, 0.3, 0.4], [1.2, 1.3, 1.4]]) self.assertIsInstance(haz, Hazard) self.assertTrue(sparse.isspmatrix_csr(haz.intensity)) @@ -906,7 +1013,7 @@ def test_concat_pass(self): self.assertTrue(np.array_equal(haz.orig, np.array([True, False]))) self.assertTrue(np.array_equal(haz.date, np.array([1, 2]))) self.assertTrue(np.array_equal(haz.event_id, np.array([1, 2]))) - self.assertEqual(haz.event_name, ['ev1', 'ev2']) + self.assertEqual(haz.event_name, ["ev1", "ev2"]) self.assertTrue(np.array_equal(haz.centroids.coord, haz_1.centroids.coord)) self.assertTrue(np.array_equal(haz.centroids.coord, haz_2.centroids.coord)) self.assertEqual(haz.centroids.crs, haz_1.centroids.crs) @@ -914,38 +1021,37 @@ def test_concat_pass(self): def test_append_new_var_pass(self): """New variable appears if hazard to append is empty.""" haz = dummy_hazard() - haz.frequency_unit = haz.get_default('frequency_unit') + haz.frequency_unit = haz.get_default("frequency_unit") haz.new_var = np.ones(haz.size) - app_haz = Hazard('TC') + app_haz = Hazard("TC") app_haz.append(haz) - self.assertIn('new_var', app_haz.__dict__) + self.assertIn("new_var", app_haz.__dict__) def test_append_raise_type_error(self): """Raise error if hazards of different class""" - haz1 = Hazard('TC', units='m/s') + haz1 = Hazard("TC", units="m/s") from climada.hazard import TropCyclone + haz2 = TropCyclone() with self.assertRaises(TypeError): haz1.append(haz2) def test_concat_raise_value_error(self): """Raise error if hazards with different units, type or crs""" - haz1 = Hazard('TC', units='m/s', - centroids=Centroids(lat=[],lon=[], crs="epsg:4326")) - haz3 = Hazard('EQ') - with self.assertRaisesRegex(ValueError, - "different types"): + haz1 = Hazard( + "TC", units="m/s", centroids=Centroids(lat=[], lon=[], crs="epsg:4326") + ) + haz3 = Hazard("EQ") + with self.assertRaisesRegex(ValueError, "different types"): Hazard.concat([haz1, haz3]) - haz4 = Hazard('TC', units='cm') - with self.assertRaisesRegex(ValueError, - "different units"): + haz4 = Hazard("TC", units="cm") + with self.assertRaisesRegex(ValueError, "different units"): Hazard.concat([haz1, haz4]) - - haz5 = Hazard('TC', centroids=Centroids(lat=[],lon=[], crs="epsg:7777")) - with self.assertRaisesRegex(ValueError, - "different CRS"): + + haz5 = Hazard("TC", centroids=Centroids(lat=[], lon=[], crs="epsg:7777")) + with self.assertRaisesRegex(ValueError, "different CRS"): Hazard.concat([haz1, haz5]) def test_change_centroids(self): @@ -954,17 +1060,19 @@ def test_change_centroids(self): on_land = np.array([True, True]) cent1 = Centroids(lat=lat, lon=lon, on_land=on_land) - haz_1 = Hazard('TC', - centroids=cent1, - event_id=np.array([1]), - event_name=['ev1'], - date=np.array([1]), - orig=np.array([True]), - frequency=np.array([1.0]), - frequency_unit='1/week', - fraction=sparse.csr_matrix([[0.02, 0.03]]), - intensity=sparse.csr_matrix([[0.2, 0.3]]), - units='m/s',) + haz_1 = Hazard( + "TC", + centroids=cent1, + event_id=np.array([1]), + event_name=["ev1"], + date=np.array([1]), + orig=np.array([True]), + frequency=np.array([1.0]), + frequency_unit="1/week", + fraction=sparse.csr_matrix([[0.02, 0.03]]), + intensity=sparse.csr_matrix([[0.2, 0.3]]), + units="m/s", + ) lat2, lon2 = np.array([0, 1, 3]), np.array([0, -1, 3]) on_land2 = np.array([True, True, False]) @@ -972,12 +1080,14 @@ def test_change_centroids(self): haz_2 = haz_1.change_centroids(cent2) - self.assertTrue(np.array_equal(haz_2.intensity.toarray(), - np.array([[0.2, 0.3, 0.]]))) - self.assertTrue(np.array_equal(haz_2.fraction.toarray(), - np.array([[0.02, 0.03, 0.]]))) + self.assertTrue( + np.array_equal(haz_2.intensity.toarray(), np.array([[0.2, 0.3, 0.0]])) + ) + self.assertTrue( + np.array_equal(haz_2.fraction.toarray(), np.array([[0.02, 0.03, 0.0]])) + ) self.assertTrue(np.array_equal(haz_2.event_id, np.array([1]))) - self.assertTrue(np.array_equal(haz_2.event_name, ['ev1'])) + self.assertTrue(np.array_equal(haz_2.event_name, ["ev1"])) self.assertTrue(np.array_equal(haz_2.orig, [True])) """Test error for projection""" @@ -987,8 +1097,9 @@ def test_change_centroids(self): with self.assertRaises(ValueError) as cm: haz_1.change_centroids(cent3, threshold=100) - self.assertIn('two hazard centroids are mapped to the same centroids', str(cm.exception)) - + self.assertIn( + "two hazard centroids are mapped to the same centroids", str(cm.exception) + ) def test_change_centroids_raster(self): """Set new centroids for hazard""" @@ -996,30 +1107,33 @@ def test_change_centroids_raster(self): on_land = np.array([True, True]) cent1 = Centroids(lat=lat, lon=lon, on_land=on_land) - haz_1 = Hazard('TC', - centroids=cent1, - event_id=np.array([1]), - event_name=['ev1'], - date=np.array([1]), - orig=np.array([True]), - frequency=np.array([1.0]), - frequency_unit='1/week', - fraction=sparse.csr_matrix([[0.02, 0.03]]), - intensity=sparse.csr_matrix([[0.2, 0.3]]), - units='m/s',) - + haz_1 = Hazard( + "TC", + centroids=cent1, + event_id=np.array([1]), + event_name=["ev1"], + date=np.array([1]), + orig=np.array([True]), + frequency=np.array([1.0]), + frequency_unit="1/week", + fraction=sparse.csr_matrix([[0.02, 0.03]]), + intensity=sparse.csr_matrix([[0.2, 0.3]]), + units="m/s", + ) """Test with raster centroids""" cent4 = Centroids.from_pnt_bounds(points_bounds=(-1, 0, 0, 1), res=1) haz_4 = haz_1.change_centroids(cent4) - self.assertTrue(np.array_equal(haz_4.intensity.toarray(), - np.array([[0.3, 0.0, 0.0, 0.2]]))) - self.assertTrue(np.array_equal(haz_4.fraction.toarray(), - np.array([[0.03, 0.0, 0.0, 0.02]]))) + self.assertTrue( + np.array_equal(haz_4.intensity.toarray(), np.array([[0.3, 0.0, 0.0, 0.2]])) + ) + self.assertTrue( + np.array_equal(haz_4.fraction.toarray(), np.array([[0.03, 0.0, 0.0, 0.02]])) + ) self.assertTrue(np.array_equal(haz_4.event_id, np.array([1]))) - self.assertTrue(np.array_equal(haz_4.event_name, ['ev1'])) + self.assertTrue(np.array_equal(haz_4.event_name, ["ev1"])) self.assertTrue(np.array_equal(haz_4.orig, [True])) @@ -1048,24 +1162,17 @@ def test_ref_all_pass(self): self.assertAlmostEqual(inten_stats[1][66], 70.608592953031405) self.assertAlmostEqual(inten_stats[3][33], 88.510983305123631) self.assertAlmostEqual(inten_stats[2][99], 79.717518054203623) - + def test_local_return_period(self): """Compare local return periods against reference.""" haz = dummy_hazard() - haz.intensity = sparse.csr_matrix([ - [1., 5., 1.], - [2., 2., 0.] - ]) - haz.frequency = np.full(4, 1.) - threshold_intensities = np.array([1., 2., 3.]) + haz.intensity = sparse.csr_matrix([[1.0, 5.0, 1.0], [2.0, 2.0, 0.0]]) + haz.frequency = np.full(4, 1.0) + threshold_intensities = np.array([1.0, 2.0, 3.0]) return_stats, _, _ = haz.local_return_period(threshold_intensities) np.testing.assert_allclose( return_stats[return_stats.columns[1:]].values.T, - np.array([ - [0.5, 0.5, 1.], - [1., 0.5, np.nan], - [np.nan, 1., np.nan] - ]) + np.array([[0.5, 0.5, 1.0], [1.0, 0.5, np.nan], [np.nan, 1.0, np.nan]]), ) @@ -1077,26 +1184,75 @@ def test_ref_pass(self): haz = Hazard.from_hdf5(HAZ_TEST_TC) orig_year_set = haz.calc_year_set() - self.assertTrue(np.array_equal(np.array(list(orig_year_set.keys())), - np.arange(1851, 2012))) - self.assertTrue(np.array_equal(orig_year_set[1851], - np.array([1, 11, 21, 31]))) - self.assertTrue(np.array_equal(orig_year_set[1958], - np.array([8421, 8431, 8441, 8451, 8461, 8471, 8481, - 8491, 8501, 8511]))) - self.assertTrue(np.array_equal(orig_year_set[1986], - np.array([11101, 11111, 11121, 11131, 11141, 11151]))) - self.assertTrue(np.array_equal(orig_year_set[1997], - np.array([12221, 12231, 12241, 12251, 12261, 12271, - 12281, 12291]))) - self.assertTrue(np.array_equal(orig_year_set[2006], - np.array([13571, 13581, 13591, 13601, 13611, 13621, - 13631, 13641, 13651, 13661]))) - self.assertTrue(np.array_equal(orig_year_set[2010], - np.array([14071, 14081, 14091, 14101, 14111, 14121, - 14131, 14141, 14151, 14161, 14171, 14181, - 14191, 14201, 14211, 14221, 14231, 14241, - 14251]))) + self.assertTrue( + np.array_equal(np.array(list(orig_year_set.keys())), np.arange(1851, 2012)) + ) + self.assertTrue(np.array_equal(orig_year_set[1851], np.array([1, 11, 21, 31]))) + self.assertTrue( + np.array_equal( + orig_year_set[1958], + np.array([8421, 8431, 8441, 8451, 8461, 8471, 8481, 8491, 8501, 8511]), + ) + ) + self.assertTrue( + np.array_equal( + orig_year_set[1986], + np.array([11101, 11111, 11121, 11131, 11141, 11151]), + ) + ) + self.assertTrue( + np.array_equal( + orig_year_set[1997], + np.array([12221, 12231, 12241, 12251, 12261, 12271, 12281, 12291]), + ) + ) + self.assertTrue( + np.array_equal( + orig_year_set[2006], + np.array( + [ + 13571, + 13581, + 13591, + 13601, + 13611, + 13621, + 13631, + 13641, + 13651, + 13661, + ] + ), + ) + ) + self.assertTrue( + np.array_equal( + orig_year_set[2010], + np.array( + [ + 14071, + 14081, + 14091, + 14101, + 14111, + 14121, + 14131, + 14141, + 14151, + 14161, + 14171, + 14181, + 14191, + 14201, + 14211, + 14221, + 14231, + 14241, + 14251, + ] + ), + ) + ) class TestCentroids(unittest.TestCase): @@ -1104,72 +1260,88 @@ class TestCentroids(unittest.TestCase): def test_reproject_vector_pass(self): """Test reproject_vector""" - haz_fl = Hazard('FL', - event_id=np.array([1]), - date=np.array([1]), - frequency=np.array([1]), - orig=np.array([1]), - event_name=['1'], - intensity=sparse.csr_matrix(np.array([0.5, 0.2, 0.1])), - fraction=sparse.csr_matrix(np.array([0.5, 0.2, 0.1]) / 2), - centroids=Centroids( - lat=np.array([1, 2, 3]), lon=np.array([1, 2, 3])),) + haz_fl = Hazard( + "FL", + event_id=np.array([1]), + date=np.array([1]), + frequency=np.array([1]), + orig=np.array([1]), + event_name=["1"], + intensity=sparse.csr_matrix(np.array([0.5, 0.2, 0.1])), + fraction=sparse.csr_matrix(np.array([0.5, 0.2, 0.1]) / 2), + centroids=Centroids(lat=np.array([1, 2, 3]), lon=np.array([1, 2, 3])), + ) haz_fl.check() - haz_fl.reproject_vector(dst_crs='epsg:2202') - self.assertTrue(np.allclose(haz_fl.centroids.lat, - np.array([331585.4099637291, 696803.88, 1098649.44]))) - self.assertTrue(np.allclose(haz_fl.centroids.lon, - np.array([11625664.37925186, 11939560.43, 12244857.13]))) - self.assertTrue(u_coord.equal_crs(haz_fl.centroids.crs, 'epsg:2202')) - self.assertTrue(np.allclose(haz_fl.intensity.toarray(), np.array([0.5, 0.2, 0.1]))) - self.assertTrue(np.allclose(haz_fl.fraction.toarray(), np.array([0.5, 0.2, 0.1]) / 2)) + haz_fl.reproject_vector(dst_crs="epsg:2202") + self.assertTrue( + np.allclose( + haz_fl.centroids.lat, + np.array([331585.4099637291, 696803.88, 1098649.44]), + ) + ) + self.assertTrue( + np.allclose( + haz_fl.centroids.lon, + np.array([11625664.37925186, 11939560.43, 12244857.13]), + ) + ) + self.assertTrue(u_coord.equal_crs(haz_fl.centroids.crs, "epsg:2202")) + self.assertTrue( + np.allclose(haz_fl.intensity.toarray(), np.array([0.5, 0.2, 0.1])) + ) + self.assertTrue( + np.allclose(haz_fl.fraction.toarray(), np.array([0.5, 0.2, 0.1]) / 2) + ) def dummy_step_impf(haz): from climada.entity import ImpactFunc + intensity = (0, 1, haz.intensity.max()) impf = ImpactFunc.from_step_impf(intensity, haz_type=haz.haz_type) return impf + class TestImpactFuncs(unittest.TestCase): """Test methods mainly for computing impacts""" + def test_haz_type(self): """Test haz_type property""" haz = dummy_hazard() - self.assertEqual(haz.haz_type, 'TC') - haz.haz_type = 'random' - self.assertEqual(haz.haz_type, 'random') + self.assertEqual(haz.haz_type, "TC") + haz.haz_type = "random" + self.assertEqual(haz.haz_type, "random") def test_cent_exp_col(self): """Test return of centroid exposures column""" haz = dummy_hazard() - self.assertEqual(haz.centr_exp_col, 'centr_TC') - haz.haz_type = 'random' - self.assertEqual(haz.centr_exp_col, 'centr_random') + self.assertEqual(haz.centr_exp_col, "centr_TC") + haz.haz_type = "random" + self.assertEqual(haz.centr_exp_col, "centr_random") haz = Hazard() - self.assertEqual(haz.centr_exp_col, 'centr_') + self.assertEqual(haz.centr_exp_col, "centr_") def test_get_mdr(self): haz = dummy_hazard() impf = dummy_step_impf(haz) - #single index + # single index for idx in range(3): cent_idx = np.array([idx]) mdr = haz.get_mdr(cent_idx, impf) true_mdr = np.digitize(haz.intensity[:, idx].toarray(), [0, 1]) - 1 np.testing.assert_array_almost_equal(mdr.toarray(), true_mdr) - #repeated index + # repeated index cent_idx = np.array([0, 0, 1]) mdr = haz.get_mdr(cent_idx, impf) true_mdr = np.digitize(haz.intensity[:, cent_idx].toarray(), [0, 1]) - 1 np.testing.assert_array_almost_equal(mdr.toarray(), true_mdr) - #mdr is not zero at 0 + # mdr is not zero at 0 impf.mdd += 1 - #single index + # single index for idx in range(3): cent_idx = np.array([idx]) mdr = haz.get_mdr(cent_idx, impf) @@ -1178,7 +1350,7 @@ def test_get_mdr(self): # #case with zeros everywhere cent_idx = np.array([0, 0, 1]) - impf.mdd=np.array([0,0,0,1]) + impf.mdd = np.array([0, 0, 0, 1]) # how many non-zeros values are expected num_nz_values = 5 mdr = haz.get_mdr(cent_idx, impf) @@ -1194,16 +1366,16 @@ def test_get_paa(self): true_paa = np.ones(haz.intensity[:, idx].shape) np.testing.assert_array_almost_equal(paa.toarray(), true_paa) - #repeated index + # repeated index idx = [0, 0] cent_idx = np.array(idx) paa = haz.get_paa(cent_idx, impf) true_paa = np.ones(haz.intensity[:, idx].shape) np.testing.assert_array_almost_equal(paa.toarray(), true_paa) - #paa is not zero at 0 + # paa is not zero at 0 impf.paa += 1 - #repeated index + # repeated index idx = [0, 0, 1] cent_idx = np.array(idx) paa = haz.get_paa(cent_idx, impf) @@ -1213,27 +1385,27 @@ def test_get_paa(self): def test_get_fraction(self): haz = dummy_hazard() - #standard index + # standard index idx = [0, 1] cent_idx = np.array(idx) frac = haz._get_fraction(cent_idx) true_frac = haz.fraction[:, idx] np.testing.assert_array_equal(frac.toarray(), true_frac.toarray()) - #repeated index + # repeated index idx = [0, 0] cent_idx = np.array(idx) frac = haz._get_fraction(cent_idx) true_frac = haz.fraction[:, idx] np.testing.assert_array_equal(frac.toarray(), true_frac.toarray()) - #index is None + # index is None cent_idx = None frac = haz._get_fraction(cent_idx) true_frac = haz.fraction np.testing.assert_array_equal(frac.toarray(), true_frac.toarray()) - #test empty fraction + # test empty fraction haz.fraction = sparse.csr_matrix(haz.fraction.shape) frac = haz._get_fraction() self.assertIsNone(frac) diff --git a/climada/hazard/test/test_io.py b/climada/hazard/test/test_io.py index be9e2829a..63e35291f 100644 --- a/climada/hazard/test/test_io.py +++ b/climada/hazard/test/test_io.py @@ -18,20 +18,21 @@ Test Hazard base class. """ -import unittest -from unittest.mock import patch + import datetime as dt +import unittest from pathlib import Path from tempfile import TemporaryDirectory +from unittest.mock import patch -from pyproj import CRS import numpy as np -from scipy.sparse import csr_matrix import xarray as xr +from pyproj import CRS +from scipy.sparse import csr_matrix from climada.hazard.base import Hazard -from climada.util.constants import DEF_FREQ_UNIT, HAZ_TEMPLATE_XLS, HAZ_DEMO_FL, DEF_CRS from climada.hazard.test.test_base import DATA_DIR, dummy_hazard +from climada.util.constants import DEF_CRS, DEF_FREQ_UNIT, HAZ_DEMO_FL, HAZ_TEMPLATE_XLS class TestReadDefaultNetCDF(unittest.TestCase): @@ -605,13 +606,13 @@ def test_hazard_pass(self): """Read an hazard excel file correctly.""" # Read demo excel file - hazard = Hazard.from_excel(HAZ_TEMPLATE_XLS, haz_type='TC') + hazard = Hazard.from_excel(HAZ_TEMPLATE_XLS, haz_type="TC") # Check results n_events = 100 n_centroids = 45 - self.assertEqual(hazard.units, '') + self.assertEqual(hazard.units, "") self.assertEqual(hazard.centroids.coord.shape, (n_centroids, 2)) self.assertEqual(hazard.centroids.coord[0][0], -25.95) @@ -620,7 +621,7 @@ def test_hazard_pass(self): self.assertEqual(hazard.centroids.coord[n_centroids - 1][1], 33.88) self.assertEqual(len(hazard.event_name), 100) - self.assertEqual(hazard.event_name[12], 'event013') + self.assertEqual(hazard.event_name[12], "event013") self.assertEqual(hazard.event_id.dtype, int) self.assertEqual(hazard.event_id.shape, (n_events,)) @@ -632,9 +633,9 @@ def test_hazard_pass(self): self.assertEqual(hazard.date[0], 675874) self.assertEqual(hazard.date[n_events - 1], 676329) - self.assertEqual(hazard.event_name[0], 'event001') - self.assertEqual(hazard.event_name[50], 'event051') - self.assertEqual(hazard.event_name[-1], 'event100') + self.assertEqual(hazard.event_name[0], "event001") + self.assertEqual(hazard.event_name[50], "event051") + self.assertEqual(hazard.event_name[-1], "event100") self.assertEqual(hazard.frequency.dtype, float) self.assertEqual(hazard.frequency.shape, (n_events,)) @@ -654,7 +655,7 @@ def test_hazard_pass(self): self.assertTrue(np.all(hazard.orig)) - self.assertEqual(hazard.haz_type, 'TC') + self.assertEqual(hazard.haz_type, "TC") class TestHDF5(unittest.TestCase): @@ -662,7 +663,7 @@ class TestHDF5(unittest.TestCase): def test_write_read_unsupported_type(self): """Check if the write command correctly handles unsupported types""" - file_name = str(DATA_DIR.joinpath('test_unsupported.h5')) + file_name = str(DATA_DIR.joinpath("test_unsupported.h5")) # Define an unsupported type class CustomID: @@ -680,13 +681,17 @@ class CustomID: # Load the file again and compare to previous instance hazard_read = Hazard.from_hdf5(file_name) self.assertTrue(np.array_equal(hazard.date, hazard_read.date)) - self.assertTrue(np.array_equal(hazard_read.event_id, np.array([]))) # Empty array + self.assertTrue( + np.array_equal(hazard_read.event_id, np.array([])) + ) # Empty array # Execute Tests if __name__ == "__main__": TESTS = unittest.TestLoader().loadTestsFromTestCase(TestReadDefaultNetCDF) - TESTS.addTests(unittest.TestLoader().loadTestsFromTestCase(TestReadDimsCoordsNetCDF)) + TESTS.addTests( + unittest.TestLoader().loadTestsFromTestCase(TestReadDimsCoordsNetCDF) + ) TESTS.addTests(unittest.TestLoader().loadTestsFromTestCase(TestReaderExcel)) TESTS.addTests(unittest.TestLoader().loadTestsFromTestCase(TestHDF5)) unittest.TextTestRunner(verbosity=2).run(TESTS) diff --git a/climada/hazard/test/test_storm_europe.py b/climada/hazard/test/test_storm_europe.py index f919cbaa4..6e4fff3b2 100644 --- a/climada/hazard/test/test_storm_europe.py +++ b/climada/hazard/test/test_storm_europe.py @@ -20,19 +20,20 @@ """ import copy -import unittest import datetime as dt +import unittest + import numpy as np from scipy import sparse from climada import CONFIG -from climada.hazard.storm_europe import StormEurope, generate_WS_forecast_hazard from climada.hazard.centroids.centr import Centroids +from climada.hazard.storm_europe import StormEurope, generate_WS_forecast_hazard from climada.util.constants import WS_DEMO_NC - DATA_DIR = CONFIG.hazard.test_data.dir() + class TestReader(unittest.TestCase): """Test loading functions from the StormEurope class""" @@ -47,15 +48,15 @@ def test_read_with_ref(self): """Test from_footprints while passing in a reference raster.""" storms = StormEurope.from_footprints(WS_DEMO_NC, ref_raster=WS_DEMO_NC[1]) - self.assertEqual(storms.haz_type, 'WS') - self.assertEqual(storms.units, 'm/s') + self.assertEqual(storms.haz_type, "WS") + self.assertEqual(storms.units, "m/s") self.assertEqual(storms.event_id.size, 2) self.assertEqual(storms.date.size, 2) self.assertEqual(dt.datetime.fromordinal(storms.date[0]).year, 1999) self.assertEqual(dt.datetime.fromordinal(storms.date[0]).month, 12) self.assertEqual(dt.datetime.fromordinal(storms.date[0]).day, 26) self.assertEqual(storms.event_id[0], 1) - self.assertEqual(storms.event_name[0], 'Lothar') + self.assertEqual(storms.event_name[0], "Lothar") self.assertTrue(isinstance(storms.intensity, sparse.csr_matrix)) self.assertTrue(isinstance(storms.fraction, sparse.csr_matrix)) self.assertEqual(storms.intensity.shape, (2, 9944)) @@ -64,40 +65,29 @@ def test_read_with_ref(self): def test_read_with_cent(self): """Test from_footprints while passing in a Centroids object""" test_centroids = Centroids.from_excel( - file_path=DATA_DIR.joinpath('fp_centroids-test.xls'), - sheet_name='fp_centroids-test' - ) + file_path=DATA_DIR.joinpath("fp_centroids-test.xls"), + sheet_name="fp_centroids-test", + ) storms = StormEurope.from_footprints(WS_DEMO_NC, centroids=test_centroids) self.assertEqual(storms.intensity.shape, (2, 9944)) - self.assertEqual( - np.count_nonzero( - ~np.isnan(storms.centroids.region_id) - ), - 6401 - ) + self.assertEqual(np.count_nonzero(~np.isnan(storms.centroids.region_id)), 6401) def test_set_ssi(self): """Test set_ssi with both dawkins and wisc_gust methodology.""" storms = StormEurope.from_footprints(WS_DEMO_NC) - storms.set_ssi(method='dawkins') - ssi_dawg = np.asarray([1.44573572e+09, 6.16173724e+08]) - self.assertTrue( - np.allclose(storms.ssi, ssi_dawg) - ) + storms.set_ssi(method="dawkins") + ssi_dawg = np.asarray([1.44573572e09, 6.16173724e08]) + self.assertTrue(np.allclose(storms.ssi, ssi_dawg)) - storms.set_ssi(method='wisc_gust') - ssi_gusty = np.asarray([1.42124571e+09, 5.86870673e+08]) - self.assertTrue( - np.allclose(storms.ssi, ssi_gusty) - ) + storms.set_ssi(method="wisc_gust") + ssi_gusty = np.asarray([1.42124571e09, 5.86870673e08]) + self.assertTrue(np.allclose(storms.ssi, ssi_gusty)) storms.set_ssi(threshold=20, on_land=False) - ssi_special = np.asarray([2.96582030e+09, 1.23980294e+09]) - self.assertTrue( - np.allclose(storms.ssi, ssi_special) - ) + ssi_special = np.asarray([2.96582030e09, 1.23980294e09]) + self.assertTrue(np.allclose(storms.ssi, ssi_special)) def test_generate_prob_storms(self): """Test the probabilistic storm generator; calls _hist2prob as well as @@ -107,59 +97,58 @@ def test_generate_prob_storms(self): self.assertEqual( np.count_nonzero(storms.centroids.region_id), - 6402 + 6402, # here, we don't rasterise; we check if the centroids lie in a # polygon. that is to say, it's not the majority of a raster pixel, # but the centroid's location that is decisive ) self.assertEqual(storms_prob.size, 60) self.assertTrue(np.allclose((1 / storms_prob.frequency).astype(int), 330)) - self.assertAlmostEqual(storms.frequency.sum(), - storms_prob.frequency.sum()) + self.assertAlmostEqual(storms.frequency.sum(), storms_prob.frequency.sum()) self.assertEqual(np.count_nonzero(storms_prob.orig), 2) self.assertEqual(storms_prob.centroids.size, 3054) - self.assertIsInstance(storms_prob.intensity, - sparse.csr_matrix) + self.assertIsInstance(storms_prob.intensity, sparse.csr_matrix) def test_cosmoe_read(self): """test reading from cosmo-e netcdf""" haz = StormEurope.from_cosmoe_file( - DATA_DIR.joinpath('storm_europe_cosmoe_forecast_vmax_testfile.nc'), - run_datetime=dt.datetime(2018,1,1), - event_date=dt.datetime(2018,1,3)) - self.assertEqual(haz.haz_type, 'WS') - self.assertEqual(haz.units, 'm/s') + DATA_DIR.joinpath("storm_europe_cosmoe_forecast_vmax_testfile.nc"), + run_datetime=dt.datetime(2018, 1, 1), + event_date=dt.datetime(2018, 1, 3), + ) + self.assertEqual(haz.haz_type, "WS") + self.assertEqual(haz.units, "m/s") self.assertEqual(haz.event_id.size, 21) self.assertEqual(haz.date.size, 21) self.assertEqual(dt.datetime.fromordinal(haz.date[0]).year, 2018) self.assertEqual(dt.datetime.fromordinal(haz.date[0]).month, 1) self.assertEqual(dt.datetime.fromordinal(haz.date[0]).day, 3) self.assertEqual(haz.event_id[-1], 21) - self.assertEqual(haz.event_name[-1], '2018-01-03_ens21') - self.assertIsInstance(haz.intensity, - sparse.csr_matrix) - self.assertIsInstance(haz.fraction, - sparse.csr_matrix) + self.assertEqual(haz.event_name[-1], "2018-01-03_ens21") + self.assertIsInstance(haz.intensity, sparse.csr_matrix) + self.assertIsInstance(haz.fraction, sparse.csr_matrix) self.assertEqual(haz.intensity.shape, (21, 25)) - self.assertAlmostEqual(haz.intensity.max(), 36.426735,places=3) + self.assertAlmostEqual(haz.intensity.max(), 36.426735, places=3) self.assertEqual(haz.fraction.shape, (21, 25)) def test_generate_forecast(self): - """ testing generating a forecast """ + """testing generating a forecast""" hazard, haz_model, run_datetime, event_date = generate_WS_forecast_hazard( - run_datetime=dt.datetime(2018,1,1), - event_date=dt.datetime(2018,1,3), - haz_model='cosmo2e_file', - haz_raw_storage=DATA_DIR.joinpath('storm_europe_cosmoe_forecast' + - '_vmax_testfile.nc'), + run_datetime=dt.datetime(2018, 1, 1), + event_date=dt.datetime(2018, 1, 3), + haz_model="cosmo2e_file", + haz_raw_storage=DATA_DIR.joinpath( + "storm_europe_cosmoe_forecast" + "_vmax_testfile.nc" + ), save_haz=False, - ) + ) self.assertEqual(run_datetime.year, 2018) self.assertEqual(run_datetime.month, 1) self.assertEqual(run_datetime.day, 1) self.assertEqual(event_date.day, 3) - self.assertEqual(hazard.event_name[-1], '2018-01-03_ens21') - self.assertEqual(haz_model, 'C2E') + self.assertEqual(hazard.event_name[-1], "2018-01-03_ens21") + self.assertEqual(haz_model, "C2E") + # Execute Tests if __name__ == "__main__": diff --git a/climada/hazard/test/test_tc_cc.py b/climada/hazard/test/test_tc_cc.py index 4014ac2cb..0ec281510 100644 --- a/climada/hazard/test/test_tc_cc.py +++ b/climada/hazard/test/test_tc_cc.py @@ -19,85 +19,102 @@ Test tc_clim_change module """ -import unittest - import unittest from math import log -import pandas as pd + import numpy as np +import pandas as pd + import climada.hazard.tc_clim_change as tc_cc + class TestKnutson(unittest.TestCase): def test_get_knutson_scaling_calculations(self): - basin = 'NA' - variable = 'cat05' - percentile = '5/10' + basin = "NA" + variable = "cat05" + percentile = "5/10" base_start, base_end = 1950, 2018 yearly_steps = 5 target_predicted_changes = tc_cc.get_knutson_scaling_factor( - percentile=percentile, - variable=variable, - basin=basin, - baseline=(base_start, base_end), - yearly_steps=yearly_steps + percentile=percentile, + variable=variable, + basin=basin, + baseline=(base_start, base_end), + yearly_steps=yearly_steps, ) ## Test computations of future changes # Load data gmst_info = tc_cc.get_gmst_info() - var_id, basin_id, perc_id = (tc_cc.MAP_VARS_NAMES[variable], - tc_cc.MAP_BASINS_NAMES[basin], - tc_cc.MAP_PERC_NAMES[percentile]) + var_id, basin_id, perc_id = ( + tc_cc.MAP_VARS_NAMES[variable], + tc_cc.MAP_BASINS_NAMES[basin], + tc_cc.MAP_PERC_NAMES[percentile], + ) knutson_data = tc_cc.get_knutson_data() knutson_value = knutson_data[var_id, basin_id, perc_id] - - start_ind = base_start - gmst_info['gmst_start_year'] - end_ind = base_end - gmst_info['gmst_start_year'] + + start_ind = base_start - gmst_info["gmst_start_year"] + end_ind = base_end - gmst_info["gmst_start_year"] # Apply model beta = 0.5 * log(0.01 * knutson_value + 1) - tc_properties = np.exp(beta * gmst_info['gmst_data']) + tc_properties = np.exp(beta * gmst_info["gmst_data"]) # Assess baseline value - baseline = np.mean(tc_properties[:, start_ind:end_ind + 1], 1) + baseline = np.mean(tc_properties[:, start_ind : end_ind + 1], 1) # Assess future value and test predicted change from baseline is # the same as given by function smoothing = 5 for target_year in [2030, 2050, 2070, 2090]: - target_year_ind = target_year - gmst_info['gmst_start_year'] + target_year_ind = target_year - gmst_info["gmst_start_year"] ind1 = target_year_ind - smoothing ind2 = target_year_ind + smoothing + 1 prediction = np.mean(tc_properties[:, ind1:ind2], 1) calculated_predicted_change = ((prediction - baseline) / baseline) * 100 - np.testing.assert_array_almost_equal(target_predicted_changes.loc[target_year, '2.6'], - calculated_predicted_change[0]) - np.testing.assert_array_almost_equal(target_predicted_changes.loc[target_year, '4.5'], - calculated_predicted_change[1]) - np.testing.assert_array_almost_equal(target_predicted_changes.loc[target_year, '6.0'], - calculated_predicted_change[2]) - np.testing.assert_array_almost_equal(target_predicted_changes.loc[target_year, '8.5'], - calculated_predicted_change[3]) + np.testing.assert_array_almost_equal( + target_predicted_changes.loc[target_year, "2.6"], + calculated_predicted_change[0], + ) + np.testing.assert_array_almost_equal( + target_predicted_changes.loc[target_year, "4.5"], + calculated_predicted_change[1], + ) + np.testing.assert_array_almost_equal( + target_predicted_changes.loc[target_year, "6.0"], + calculated_predicted_change[2], + ) + np.testing.assert_array_almost_equal( + target_predicted_changes.loc[target_year, "8.5"], + calculated_predicted_change[3], + ) def test_get_knutson_scaling_structure(self): """Test get_knutson_criterion function.""" - + yearly_steps = 8 - target_predicted_changes = tc_cc.get_knutson_scaling_factor(yearly_steps=yearly_steps) + target_predicted_changes = tc_cc.get_knutson_scaling_factor( + yearly_steps=yearly_steps + ) - np.testing.assert_equal(target_predicted_changes.columns, np.array(['2.6', '4.5', '6.0', '8.5'])) + np.testing.assert_equal( + target_predicted_changes.columns, np.array(["2.6", "4.5", "6.0", "8.5"]) + ) - simulated_years = np.arange(tc_cc.YEAR_WINDOWS_PROPS['start'], - tc_cc.YEAR_WINDOWS_PROPS['end']+1, - yearly_steps) + simulated_years = np.arange( + tc_cc.YEAR_WINDOWS_PROPS["start"], + tc_cc.YEAR_WINDOWS_PROPS["end"] + 1, + yearly_steps, + ) np.testing.assert_equal(target_predicted_changes.index, simulated_years) def test_get_knutson_scaling_valid_inputs(self): @@ -114,7 +131,7 @@ def test_get_knutson_scaling_invalid_baseline_end_year(self): tc_cc.get_knutson_scaling_factor(baseline=(1982, 2110)) def test_get_knutson_scaling_no_scaling_factors_for_unknonw_basin(self): - df = tc_cc.get_knutson_scaling_factor(basin='ZZZZZ') + df = tc_cc.get_knutson_scaling_factor(basin="ZZZZZ") self.assertIsInstance(df, pd.DataFrame) np.testing.assert_equal(df.values, np.ones_like(df.values)) @@ -122,30 +139,35 @@ def test_get_gmst(self): """Test get_gmst_info function.""" gmst_info = tc_cc.get_gmst_info() - self.assertAlmostEqual(gmst_info['gmst_start_year'], 1880) - self.assertAlmostEqual(gmst_info['gmst_end_year'], 2100) - self.assertAlmostEqual(len(gmst_info['rcps']), 4) + self.assertAlmostEqual(gmst_info["gmst_start_year"], 1880) + self.assertAlmostEqual(gmst_info["gmst_end_year"], 2100) + self.assertAlmostEqual(len(gmst_info["rcps"]), 4) - self.assertAlmostEqual(gmst_info['gmst_data'].shape, - (len(gmst_info['rcps']), - gmst_info['gmst_end_year']-gmst_info['gmst_start_year']+1)) - self.assertAlmostEqual(gmst_info['gmst_data'][0,0], -0.16) - self.assertAlmostEqual(gmst_info['gmst_data'][0,-1], 1.27641, 4) - self.assertAlmostEqual(gmst_info['gmst_data'][-1,0], -0.16) - self.assertAlmostEqual(gmst_info['gmst_data'][-1,-1], 4.477764, 4) + self.assertAlmostEqual( + gmst_info["gmst_data"].shape, + ( + len(gmst_info["rcps"]), + gmst_info["gmst_end_year"] - gmst_info["gmst_start_year"] + 1, + ), + ) + self.assertAlmostEqual(gmst_info["gmst_data"][0, 0], -0.16) + self.assertAlmostEqual(gmst_info["gmst_data"][0, -1], 1.27641, 4) + self.assertAlmostEqual(gmst_info["gmst_data"][-1, 0], -0.16) + self.assertAlmostEqual(gmst_info["gmst_data"][-1, -1], 4.477764, 4) def test_get_knutson_data_pass(self): """Test get_knutson_data function.""" data_knutson = tc_cc.get_knutson_data() - self.assertAlmostEqual(data_knutson.shape, (4,6,5)) - self.assertAlmostEqual(data_knutson[0,0,0], -34.49) - self.assertAlmostEqual(data_knutson[-1,-1,-1], 15.419) - self.assertAlmostEqual(data_knutson[0,-1,-1], 4.689) - self.assertAlmostEqual(data_knutson[-1,0,0], 5.848) - self.assertAlmostEqual(data_knutson[-1,0,-1], 22.803) - self.assertAlmostEqual(data_knutson[2,3,2], 4.324) + self.assertAlmostEqual(data_knutson.shape, (4, 6, 5)) + self.assertAlmostEqual(data_knutson[0, 0, 0], -34.49) + self.assertAlmostEqual(data_knutson[-1, -1, -1], 15.419) + self.assertAlmostEqual(data_knutson[0, -1, -1], 4.689) + self.assertAlmostEqual(data_knutson[-1, 0, 0], 5.848) + self.assertAlmostEqual(data_knutson[-1, 0, -1], 22.803) + self.assertAlmostEqual(data_knutson[2, 3, 2], 4.324) + if __name__ == "__main__": TESTS = unittest.TestLoader().loadTestsFromTestCase(TestKnutson) diff --git a/climada/hazard/test/test_tc_tracks.py b/climada/hazard/test/test_tc_tracks.py index f5c64e22b..df60bc83e 100644 --- a/climada/hazard/test/test_tc_tracks.py +++ b/climada/hazard/test/test_tc_tracks.py @@ -19,35 +19,34 @@ Test tc_tracks module. """ -from datetime import datetime as dt import unittest +from datetime import datetime as dt -import xarray as xr +import geopandas as gpd import numpy as np import pandas as pd -import geopandas as gpd -from shapely.geometry import Point, LineString, MultiLineString +import xarray as xr +from shapely.geometry import LineString, MultiLineString, Point import climada.hazard.tc_tracks as tc -from climada import CONFIG -from climada.util import ureg -from climada.util.constants import TC_ANDREW_FL import climada.util.coordinates as u_coord +from climada import CONFIG from climada.entity import Exposures from climada.hazard.test import download_ibtracs - +from climada.util import ureg +from climada.util.constants import TC_ANDREW_FL DATA_DIR = CONFIG.hazard.test_data.dir() TEST_TRACK = DATA_DIR.joinpath("trac_brb_test.csv") TEST_TRACK_SHORT = DATA_DIR.joinpath("trac_short_test.csv") -TEST_RAW_TRACK = DATA_DIR.joinpath('Storm.2016075S11087.ibtracs_all.v03r10.csv') -TEST_TRACK_GETTELMAN = DATA_DIR.joinpath('gettelman_test_tracks.nc') -TEST_TRACK_EMANUEL = DATA_DIR.joinpath('emanuel_test_tracks.mat') -TEST_TRACK_EMANUEL_CORR = DATA_DIR.joinpath('temp_mpircp85cal_full.mat') -TEST_TRACK_CHAZ = DATA_DIR.joinpath('chaz_test_tracks.nc') -TEST_TRACK_STORM = DATA_DIR.joinpath('storm_test_tracks.txt') -TEST_TRACKS_ANTIMERIDIAN = DATA_DIR.joinpath('tracks-antimeridian') -TEST_TRACKS_LEGACY_HDF5 = DATA_DIR.joinpath('tctracks_hdf5_legacy.nc') +TEST_RAW_TRACK = DATA_DIR.joinpath("Storm.2016075S11087.ibtracs_all.v03r10.csv") +TEST_TRACK_GETTELMAN = DATA_DIR.joinpath("gettelman_test_tracks.nc") +TEST_TRACK_EMANUEL = DATA_DIR.joinpath("emanuel_test_tracks.mat") +TEST_TRACK_EMANUEL_CORR = DATA_DIR.joinpath("temp_mpircp85cal_full.mat") +TEST_TRACK_CHAZ = DATA_DIR.joinpath("chaz_test_tracks.nc") +TEST_TRACK_STORM = DATA_DIR.joinpath("storm_test_tracks.txt") +TEST_TRACKS_ANTIMERIDIAN = DATA_DIR.joinpath("tracks-antimeridian") +TEST_TRACKS_LEGACY_HDF5 = DATA_DIR.joinpath("tctracks_hdf5_legacy.nc") class TestIbtracs(unittest.TestCase): @@ -60,170 +59,195 @@ def setUpClass(cls): def test_raw_ibtracs_empty_pass(self): """Test reading empty TC from IBTrACS files""" tc_track = tc.TCTracks.from_ibtracs_netcdf( - provider='usa', storm_id='1988234N13299') + provider="usa", storm_id="1988234N13299" + ) self.assertEqual(tc_track.size, 0) self.assertEqual(tc_track.get_track(), []) def test_raw_ibtracs_invalid_pass(self): """Test reading invalid/non-existing TC from IBTrACS files""" with self.assertRaises(ValueError) as cm: - tc_track = tc.TCTracks.from_ibtracs_netcdf(storm_id='INVALID') + tc_track = tc.TCTracks.from_ibtracs_netcdf(storm_id="INVALID") self.assertIn("IDs are invalid", str(cm.exception)) self.assertIn("INVALID", str(cm.exception)) with self.assertRaises(ValueError) as cm: - tc_track = tc.TCTracks.from_ibtracs_netcdf(storm_id='1988234N13298') + tc_track = tc.TCTracks.from_ibtracs_netcdf(storm_id="1988234N13298") self.assertIn("IDs are not in IBTrACS", str(cm.exception)) self.assertIn("1988234N13298", str(cm.exception)) def test_penv_rmax_penv_pass(self): """from_ibtracs_netcdf""" - tc_track = tc.TCTracks.from_ibtracs_netcdf(provider='usa', storm_id='1992230N11325') + tc_track = tc.TCTracks.from_ibtracs_netcdf( + provider="usa", storm_id="1992230N11325" + ) penv_ref = np.ones(97) * 1010 penv_ref[26:36] = [1011, 1012, 1013, 1014, 1015, 1014, 1014, 1014, 1014, 1012] - self.assertTrue(np.allclose( - tc_track.get_track()['environmental_pressure'].values, penv_ref)) - self.assertTrue(np.allclose( - tc_track.get_track()['radius_max_wind'].values, np.zeros(97))) + self.assertTrue( + np.allclose(tc_track.get_track()["environmental_pressure"].values, penv_ref) + ) + self.assertTrue( + np.allclose(tc_track.get_track()["radius_max_wind"].values, np.zeros(97)) + ) def test_ibtracs_raw_pass(self): """Read a tropical cyclone.""" # read without specified provider or estimation of missing values - tc_track = tc.TCTracks.from_ibtracs_netcdf(storm_id='2017242N16333') + tc_track = tc.TCTracks.from_ibtracs_netcdf(storm_id="2017242N16333") track_ds = tc_track.get_track() self.assertEqual(len(tc_track.data), 1) - self.assertEqual(track_ds['time'].dt.year.values[0], 2017) - self.assertEqual(track_ds['time'].dt.month.values[0], 8) - self.assertEqual(track_ds['time'].dt.day.values[0], 30) - self.assertEqual(track_ds['time'].dt.hour.values[0], 0) - self.assertAlmostEqual(track_ds['lat'].values[0], 16.1, places=5) - self.assertAlmostEqual(track_ds['lon'].values[0], -26.9, places=5) - self.assertAlmostEqual(track_ds['max_sustained_wind'].values[0], 30) - self.assertAlmostEqual(track_ds['central_pressure'].values[0], 1008) - self.assertAlmostEqual(track_ds['environmental_pressure'].values[0], 1012) - self.assertAlmostEqual(track_ds['radius_max_wind'].values[0], 60) - self.assertEqual(track_ds['time'].size, 123) - - self.assertAlmostEqual(track_ds['lat'].values[-1], 36.8, places=5) - self.assertAlmostEqual(track_ds['lon'].values[-1], -90.1, places=4) - self.assertAlmostEqual(track_ds['central_pressure'].values[-1], 1005) - self.assertAlmostEqual(track_ds['max_sustained_wind'].values[-1], 15) - self.assertAlmostEqual(track_ds['environmental_pressure'].values[-1], 1008) - self.assertAlmostEqual(track_ds['radius_max_wind'].values[-1], 60) - - self.assertFalse(np.isnan(track_ds['radius_max_wind'].values).any()) - self.assertFalse(np.isnan(track_ds['environmental_pressure'].values).any()) - self.assertFalse(np.isnan(track_ds['max_sustained_wind'].values).any()) - self.assertFalse(np.isnan(track_ds['central_pressure'].values).any()) - self.assertFalse(np.isnan(track_ds['lat'].values).any()) - self.assertFalse(np.isnan(track_ds['lon'].values).any()) - - np.testing.assert_array_equal(track_ds['basin'], 'NA') - self.assertEqual(track_ds.attrs['max_sustained_wind_unit'], 'kn') - self.assertEqual(track_ds.attrs['central_pressure_unit'], 'mb') - self.assertEqual(track_ds.attrs['sid'], '2017242N16333') - self.assertEqual(track_ds.attrs['name'], 'IRMA') - self.assertEqual(track_ds.attrs['orig_event_flag'], True) - self.assertEqual(track_ds.attrs['data_provider'], - 'ibtracs_mixed:lat(official_3h),lon(official_3h),wind(official_3h),' - 'pres(official_3h),rmw(official_3h),poci(official_3h),roci(official_3h)') - self.assertEqual(track_ds.attrs['category'], 5) + self.assertEqual(track_ds["time"].dt.year.values[0], 2017) + self.assertEqual(track_ds["time"].dt.month.values[0], 8) + self.assertEqual(track_ds["time"].dt.day.values[0], 30) + self.assertEqual(track_ds["time"].dt.hour.values[0], 0) + self.assertAlmostEqual(track_ds["lat"].values[0], 16.1, places=5) + self.assertAlmostEqual(track_ds["lon"].values[0], -26.9, places=5) + self.assertAlmostEqual(track_ds["max_sustained_wind"].values[0], 30) + self.assertAlmostEqual(track_ds["central_pressure"].values[0], 1008) + self.assertAlmostEqual(track_ds["environmental_pressure"].values[0], 1012) + self.assertAlmostEqual(track_ds["radius_max_wind"].values[0], 60) + self.assertEqual(track_ds["time"].size, 123) + + self.assertAlmostEqual(track_ds["lat"].values[-1], 36.8, places=5) + self.assertAlmostEqual(track_ds["lon"].values[-1], -90.1, places=4) + self.assertAlmostEqual(track_ds["central_pressure"].values[-1], 1005) + self.assertAlmostEqual(track_ds["max_sustained_wind"].values[-1], 15) + self.assertAlmostEqual(track_ds["environmental_pressure"].values[-1], 1008) + self.assertAlmostEqual(track_ds["radius_max_wind"].values[-1], 60) + + self.assertFalse(np.isnan(track_ds["radius_max_wind"].values).any()) + self.assertFalse(np.isnan(track_ds["environmental_pressure"].values).any()) + self.assertFalse(np.isnan(track_ds["max_sustained_wind"].values).any()) + self.assertFalse(np.isnan(track_ds["central_pressure"].values).any()) + self.assertFalse(np.isnan(track_ds["lat"].values).any()) + self.assertFalse(np.isnan(track_ds["lon"].values).any()) + + np.testing.assert_array_equal(track_ds["basin"], "NA") + self.assertEqual(track_ds.attrs["max_sustained_wind_unit"], "kn") + self.assertEqual(track_ds.attrs["central_pressure_unit"], "mb") + self.assertEqual(track_ds.attrs["sid"], "2017242N16333") + self.assertEqual(track_ds.attrs["name"], "IRMA") + self.assertEqual(track_ds.attrs["orig_event_flag"], True) + self.assertEqual( + track_ds.attrs["data_provider"], + "ibtracs_mixed:lat(official_3h),lon(official_3h),wind(official_3h)," + "pres(official_3h),rmw(official_3h),poci(official_3h),roci(official_3h)", + ) + self.assertEqual(track_ds.attrs["category"], 5) def test_ibtracs_with_provider(self): """Read a tropical cyclone with and without explicit provider.""" - storm_id = '2012152N12130' - tc_track = tc.TCTracks.from_ibtracs_netcdf(storm_id=storm_id, provider='usa') + storm_id = "2012152N12130" + tc_track = tc.TCTracks.from_ibtracs_netcdf(storm_id=storm_id, provider="usa") track_ds = tc_track.get_track() - self.assertEqual(track_ds['time'].size, 51) - self.assertEqual(track_ds.attrs['data_provider'], 'ibtracs_usa') - self.assertAlmostEqual(track_ds['lat'].values[50], 34.3, places=5) - self.assertAlmostEqual(track_ds['central_pressure'].values[50], 989, places=5) - self.assertAlmostEqual(track_ds['radius_max_wind'].values[46], 20, places=5) + self.assertEqual(track_ds["time"].size, 51) + self.assertEqual(track_ds.attrs["data_provider"], "ibtracs_usa") + self.assertAlmostEqual(track_ds["lat"].values[50], 34.3, places=5) + self.assertAlmostEqual(track_ds["central_pressure"].values[50], 989, places=5) + self.assertAlmostEqual(track_ds["radius_max_wind"].values[46], 20, places=5) tc_track = tc.TCTracks.from_ibtracs_netcdf(storm_id=storm_id) track_ds = tc_track.get_track() - self.assertEqual(track_ds['time'].size, 35) - self.assertEqual(track_ds.attrs['data_provider'], - 'ibtracs_mixed:lat(official_3h),lon(official_3h),wind(official_3h),' - 'pres(official_3h),rmw(usa),poci(usa),roci(usa)') - self.assertAlmostEqual(track_ds['lat'].values[-1], 31.40, places=5) - self.assertAlmostEqual(track_ds['central_pressure'].values[-1], 980, places=5) + self.assertEqual(track_ds["time"].size, 35) + self.assertEqual( + track_ds.attrs["data_provider"], + "ibtracs_mixed:lat(official_3h),lon(official_3h),wind(official_3h)," + "pres(official_3h),rmw(usa),poci(usa),roci(usa)", + ) + self.assertAlmostEqual(track_ds["lat"].values[-1], 31.40, places=5) + self.assertAlmostEqual(track_ds["central_pressure"].values[-1], 980, places=5) def test_ibtracs_antimeridian(self): """Read a track that crosses the antimeridian and make sure that lon is consistent""" - storm_id = '2013224N12220' + storm_id = "2013224N12220" # the officially responsible agencies 'usa' and 'tokyo' use different signs in lon, but we # have to `estimate_missing` because both have gaps in reported values - tc_track = tc.TCTracks.from_ibtracs_netcdf(storm_id=storm_id, provider=['official_3h'], - estimate_missing=True) + tc_track = tc.TCTracks.from_ibtracs_netcdf( + storm_id=storm_id, provider=["official_3h"], estimate_missing=True + ) track_ds = tc_track.get_track() - np.testing.assert_array_less(0, track_ds['lon']) + np.testing.assert_array_less(0, track_ds["lon"]) def test_ibtracs_estimate_missing(self): """Read a tropical cyclone and estimate missing values.""" - storm_id = '2012152N12130' + storm_id = "2012152N12130" - tc_track = tc.TCTracks.from_ibtracs_netcdf(storm_id=storm_id, estimate_missing=True) + tc_track = tc.TCTracks.from_ibtracs_netcdf( + storm_id=storm_id, estimate_missing=True + ) track_ds = tc_track.get_track() # less time steps are discarded, leading to a larger total size - self.assertEqual(track_ds['time'].size, 99) - self.assertEqual(track_ds.attrs['data_provider'], - 'ibtracs_mixed:lat(official_3h),lon(official_3h),wind(official_3h),' - 'pres(official_3h),rmw(usa),poci(usa),roci(usa)') - self.assertAlmostEqual(track_ds['lat'].values[44], 33.30, places=5) - self.assertAlmostEqual(track_ds['central_pressure'].values[44], 976, places=5) - self.assertAlmostEqual(track_ds['central_pressure'].values[42], 980, places=5) + self.assertEqual(track_ds["time"].size, 99) + self.assertEqual( + track_ds.attrs["data_provider"], + "ibtracs_mixed:lat(official_3h),lon(official_3h),wind(official_3h)," + "pres(official_3h),rmw(usa),poci(usa),roci(usa)", + ) + self.assertAlmostEqual(track_ds["lat"].values[44], 33.30, places=5) + self.assertAlmostEqual(track_ds["central_pressure"].values[44], 976, places=5) + self.assertAlmostEqual(track_ds["central_pressure"].values[42], 980, places=5) # the wind speed at position 44 is missing in the original data - self.assertAlmostEqual(track_ds['max_sustained_wind'].values[44], 58, places=0) - self.assertAlmostEqual(track_ds['radius_oci'].values[40], 160, places=0) + self.assertAlmostEqual(track_ds["max_sustained_wind"].values[44], 58, places=0) + self.assertAlmostEqual(track_ds["radius_oci"].values[40], 160, places=0) # after position 42, ROCI is missing in the original data - self.assertAlmostEqual(track_ds['radius_oci'].values[42], 200, places=-1) - self.assertAlmostEqual(track_ds['radius_oci'].values[85], 165, places=-1) - self.assertAlmostEqual(track_ds['radius_oci'].values[95], 155, places=-1) + self.assertAlmostEqual(track_ds["radius_oci"].values[42], 200, places=-1) + self.assertAlmostEqual(track_ds["radius_oci"].values[85], 165, places=-1) + self.assertAlmostEqual(track_ds["radius_oci"].values[95], 155, places=-1) def test_ibtracs_official(self): """Read a tropical cyclone, only officially reported values.""" - storm_id = '2012152N12130' + storm_id = "2012152N12130" tc_track = tc.TCTracks.from_ibtracs_netcdf( - storm_id=storm_id, interpolate_missing=False, provider='official') + storm_id=storm_id, interpolate_missing=False, provider="official" + ) track_ds = tc_track.get_track() - self.assertEqual(track_ds['time'].size, 21) - self.assertEqual(track_ds.attrs['data_provider'], 'ibtracs_official') - self.assertAlmostEqual(track_ds['lon'].values[19], 137.6, places=4) - self.assertAlmostEqual(track_ds['central_pressure'].values[19], 980, places=5) - np.testing.assert_array_equal(track_ds['radius_max_wind'].values, 0) + self.assertEqual(track_ds["time"].size, 21) + self.assertEqual(track_ds.attrs["data_provider"], "ibtracs_official") + self.assertAlmostEqual(track_ds["lon"].values[19], 137.6, places=4) + self.assertAlmostEqual(track_ds["central_pressure"].values[19], 980, places=5) + np.testing.assert_array_equal(track_ds["radius_max_wind"].values, 0) def test_ibtracs_scale_wind(self): """Read a tropical cyclone and scale wind speed according to agency.""" - storm_id = '2012152N12130' + storm_id = "2012152N12130" - tc_track = tc.TCTracks.from_ibtracs_netcdf(storm_id=storm_id, rescale_windspeeds=True) + tc_track = tc.TCTracks.from_ibtracs_netcdf( + storm_id=storm_id, rescale_windspeeds=True + ) track_ds = tc_track.get_track() - self.assertAlmostEqual(track_ds['max_sustained_wind'].values[34], (55 - 23.3) / 0.6, places=5) + self.assertAlmostEqual( + track_ds["max_sustained_wind"].values[34], (55 - 23.3) / 0.6, places=5 + ) - tc_track = tc.TCTracks.from_ibtracs_netcdf(storm_id=storm_id, rescale_windspeeds=False) + tc_track = tc.TCTracks.from_ibtracs_netcdf( + storm_id=storm_id, rescale_windspeeds=False + ) track_ds = tc_track.get_track() - self.assertAlmostEqual(track_ds['max_sustained_wind'].values[34], 55, places=5) + self.assertAlmostEqual(track_ds["max_sustained_wind"].values[34], 55, places=5) def test_ibtracs_interpolate_missing(self): """Read a tropical cyclone with and without interpolating missing values.""" - storm_id = '2010066S19050' + storm_id = "2010066S19050" - tc_track = tc.TCTracks.from_ibtracs_netcdf(storm_id=storm_id, interpolate_missing=False) + tc_track = tc.TCTracks.from_ibtracs_netcdf( + storm_id=storm_id, interpolate_missing=False + ) track_ds = tc_track.get_track() self.assertEqual(track_ds.time.size, 50) - self.assertAlmostEqual(track_ds['central_pressure'].values[30], 992, places=5) - self.assertAlmostEqual(track_ds['central_pressure'].values[31], 1006, places=5) + self.assertAlmostEqual(track_ds["central_pressure"].values[30], 992, places=5) + self.assertAlmostEqual(track_ds["central_pressure"].values[31], 1006, places=5) - tc_track = tc.TCTracks.from_ibtracs_netcdf(storm_id=storm_id, interpolate_missing=True) + tc_track = tc.TCTracks.from_ibtracs_netcdf( + storm_id=storm_id, interpolate_missing=True + ) track_ds = tc_track.get_track() - self.assertEqual(track_ds['time'].size, 65) - self.assertAlmostEqual(track_ds['central_pressure'].values[30], 992, places=5) - self.assertAlmostEqual(track_ds['central_pressure'].values[38], 999, places=5) - self.assertAlmostEqual(track_ds['central_pressure'].values[46], 1006, places=5) + self.assertEqual(track_ds["time"].size, 65) + self.assertAlmostEqual(track_ds["central_pressure"].values[30], 992, places=5) + self.assertAlmostEqual(track_ds["central_pressure"].values[38], 999, places=5) + self.assertAlmostEqual(track_ds["central_pressure"].values[46], 1006, places=5) def test_ibtracs_range(self): """Read several TCs.""" @@ -231,34 +255,47 @@ def test_ibtracs_range(self): self.assertEqual(tc_track.size, 0) tc_track = tc.TCTracks.from_ibtracs_netcdf( - provider='usa', storm_id=None, year_range=(1915, 1916), basin='WP') + provider="usa", storm_id=None, year_range=(1915, 1916), basin="WP" + ) self.assertEqual(tc_track.size, 0) tc_track = tc.TCTracks.from_ibtracs_netcdf( - provider='usa', year_range=(1993, 1994), basin='EP', estimate_missing=False) + provider="usa", year_range=(1993, 1994), basin="EP", estimate_missing=False + ) self.assertEqual(tc_track.size, 33) tc_track = tc.TCTracks.from_ibtracs_netcdf( - provider='usa', year_range=(1993, 1994), basin='EP', estimate_missing=True) + provider="usa", year_range=(1993, 1994), basin="EP", estimate_missing=True + ) self.assertEqual(tc_track.size, 45) def test_ibtracs_correct_pass(self): """Check estimate_missing option""" tc_try = tc.TCTracks.from_ibtracs_netcdf( - provider='usa', storm_id='1982267N25289', estimate_missing=True) - self.assertAlmostEqual(tc_try.data[0]['central_pressure'].values[0], 1013, places=0) - self.assertAlmostEqual(tc_try.data[0]['central_pressure'].values[5], 1008, places=0) - self.assertAlmostEqual(tc_try.data[0]['central_pressure'].values[-1], 1012, places=0) + provider="usa", storm_id="1982267N25289", estimate_missing=True + ) + self.assertAlmostEqual( + tc_try.data[0]["central_pressure"].values[0], 1013, places=0 + ) + self.assertAlmostEqual( + tc_try.data[0]["central_pressure"].values[5], 1008, places=0 + ) + self.assertAlmostEqual( + tc_try.data[0]["central_pressure"].values[-1], 1012, places=0 + ) def test_ibtracs_discard_single_points(self): """Check discard_single_points option""" passed = False for year in range(1863, 1981): tc_track_singlept = tc.TCTracks.from_ibtracs_netcdf( - provider='usa', year_range=(year,year), discard_single_points=False) - n_singlepts = np.sum([x['time'].size == 1 for x in tc_track_singlept.data]) + provider="usa", year_range=(year, year), discard_single_points=False + ) + n_singlepts = np.sum([x["time"].size == 1 for x in tc_track_singlept.data]) if n_singlepts > 0: - tc_track = tc.TCTracks.from_ibtracs_netcdf(provider='usa', year_range=(year,year)) + tc_track = tc.TCTracks.from_ibtracs_netcdf( + provider="usa", year_range=(year, year) + ) if tc_track.size == tc_track_singlept.size - n_singlepts: passed = True break @@ -270,14 +307,40 @@ def test_ibtracs_additional_variables(self): # agency-specific and that are not already considered by other parts of # `from_ibtracs_netcdf`: addtl_vars = [ - 'numobs', 'season', 'number', 'subbasin', 'name', 'source_usa', 'source_jma', - 'source_cma', 'source_hko', 'source_new', 'source_reu', 'source_bom', 'source_nad', - 'source_wel', 'source_td5', 'source_td6', 'source_ds8', 'source_neu', 'source_mlc', - 'iso_time', 'nature', 'wmo_wind', 'wmo_pres', 'wmo_agency', 'track_type', - 'main_track_sid', 'dist2land', 'landfall', 'iflag', 'storm_speed', 'storm_dir', + "numobs", + "season", + "number", + "subbasin", + "name", + "source_usa", + "source_jma", + "source_cma", + "source_hko", + "source_new", + "source_reu", + "source_bom", + "source_nad", + "source_wel", + "source_td5", + "source_td6", + "source_ds8", + "source_neu", + "source_mlc", + "iso_time", + "nature", + "wmo_wind", + "wmo_pres", + "wmo_agency", + "track_type", + "main_track_sid", + "dist2land", + "landfall", + "iflag", + "storm_speed", + "storm_dir", ] tc_track = tc.TCTracks.from_ibtracs_netcdf( - storm_id='2017242N16333', + storm_id="2017242N16333", additional_variables=addtl_vars, ) track_ds = tc_track.get_track() @@ -295,6 +358,7 @@ def test_ibtracs_additional_variables(self): self.assertEqual(track_ds["storm_speed"].values[5], 11.0) self.assertEqual(track_ds["storm_speed"].values[-1], 8.0) + class TestIO(unittest.TestCase): """Test reading of tracks from files of different formats""" @@ -307,7 +371,9 @@ def test_netcdf_io(self): path = DATA_DIR.joinpath("tc_tracks_nc") path.mkdir(exist_ok=True) tc_track = tc.TCTracks.from_ibtracs_netcdf( - provider='usa', storm_id='1988234N13299', estimate_missing=True, + provider="usa", + storm_id="1988234N13299", + estimate_missing=True, additional_variables=["numobs", "storm_speed", "nature"], ) tc_track.write_netcdf(path) @@ -328,14 +394,15 @@ def test_read_legacy_netcdf(self): anti_track = tc.TCTracks.from_netcdf(TEST_TRACKS_ANTIMERIDIAN) for tr in anti_track.data: - self.assertEqual(tr['basin'].shape, tr['time'].shape) - np.testing.assert_array_equal(tr['basin'], "SP") + self.assertEqual(tr["basin"].shape, tr["time"].shape) + np.testing.assert_array_equal(tr["basin"], "SP") def test_hdf5_io(self): """Test writing and reading hdf5 TCTracks instances""" path = DATA_DIR.joinpath("tc_tracks.h5") tc_track = tc.TCTracks.from_ibtracs_netcdf( - provider='usa', year_range=(1993, 1994), basin='EP', estimate_missing=True) + provider="usa", year_range=(1993, 1994), basin="EP", estimate_missing=True + ) tc_track.write_hdf5(path) tc_read = tc.TCTracks.from_hdf5(path) path.unlink() @@ -360,72 +427,82 @@ def test_hdf5_io(self): def test_from_processed_ibtracs_csv(self): tc_track = tc.TCTracks.from_processed_ibtracs_csv(TEST_TRACK) - self.assertEqual(tc_track.data[0]['time'].size, 38) - self.assertEqual(tc_track.data[0]['lon'][11], -39.60) - self.assertEqual(tc_track.data[0]['lat'][23], 14.10) - self.assertEqual(tc_track.data[0]['time_step'][7], 6) - self.assertEqual(np.max(tc_track.data[0]['radius_max_wind']), 0) - self.assertEqual(np.min(tc_track.data[0]['radius_max_wind']), 0) - self.assertEqual(tc_track.data[0]['max_sustained_wind'][21], 55) - self.assertAlmostEqual(tc_track.data[0]['central_pressure'].values[29], 976, places=0) - self.assertEqual(np.max(tc_track.data[0]['environmental_pressure']), 1010) - self.assertEqual(np.min(tc_track.data[0]['environmental_pressure']), 1010) - self.assertEqual(tc_track.data[0]['time'].dt.year[13], 1951) - self.assertEqual(tc_track.data[0]['time'].dt.month[26], 9) - self.assertEqual(tc_track.data[0]['time'].dt.day[7], 29) - self.assertEqual(tc_track.data[0].attrs['max_sustained_wind_unit'], 'kn') - self.assertEqual(tc_track.data[0].attrs['central_pressure_unit'], 'mb') - self.assertEqual(tc_track.data[0].attrs['orig_event_flag'], 1) - self.assertEqual(tc_track.data[0].attrs['name'], '1951239N12334') - self.assertEqual(tc_track.data[0].attrs['sid'], '1951239N12334') - self.assertEqual(tc_track.data[0].attrs['id_no'], 1951239012334) - self.assertEqual(tc_track.data[0].attrs['data_provider'], 'hurdat_atl') - np.testing.assert_array_equal(tc_track.data[0]['basin'], 'NA') - self.assertEqual(tc_track.data[0].attrs['id_no'], 1951239012334) - self.assertEqual(tc_track.data[0].attrs['category'], 1) + self.assertEqual(tc_track.data[0]["time"].size, 38) + self.assertEqual(tc_track.data[0]["lon"][11], -39.60) + self.assertEqual(tc_track.data[0]["lat"][23], 14.10) + self.assertEqual(tc_track.data[0]["time_step"][7], 6) + self.assertEqual(np.max(tc_track.data[0]["radius_max_wind"]), 0) + self.assertEqual(np.min(tc_track.data[0]["radius_max_wind"]), 0) + self.assertEqual(tc_track.data[0]["max_sustained_wind"][21], 55) + self.assertAlmostEqual( + tc_track.data[0]["central_pressure"].values[29], 976, places=0 + ) + self.assertEqual(np.max(tc_track.data[0]["environmental_pressure"]), 1010) + self.assertEqual(np.min(tc_track.data[0]["environmental_pressure"]), 1010) + self.assertEqual(tc_track.data[0]["time"].dt.year[13], 1951) + self.assertEqual(tc_track.data[0]["time"].dt.month[26], 9) + self.assertEqual(tc_track.data[0]["time"].dt.day[7], 29) + self.assertEqual(tc_track.data[0].attrs["max_sustained_wind_unit"], "kn") + self.assertEqual(tc_track.data[0].attrs["central_pressure_unit"], "mb") + self.assertEqual(tc_track.data[0].attrs["orig_event_flag"], 1) + self.assertEqual(tc_track.data[0].attrs["name"], "1951239N12334") + self.assertEqual(tc_track.data[0].attrs["sid"], "1951239N12334") + self.assertEqual(tc_track.data[0].attrs["id_no"], 1951239012334) + self.assertEqual(tc_track.data[0].attrs["data_provider"], "hurdat_atl") + np.testing.assert_array_equal(tc_track.data[0]["basin"], "NA") + self.assertEqual(tc_track.data[0].attrs["id_no"], 1951239012334) + self.assertEqual(tc_track.data[0].attrs["category"], 1) def test_from_simulations_emanuel(self): - tc_track = tc.TCTracks.from_simulations_emanuel(TEST_TRACK_EMANUEL, hemisphere='N') + tc_track = tc.TCTracks.from_simulations_emanuel( + TEST_TRACK_EMANUEL, hemisphere="N" + ) self.assertEqual(len(tc_track.data), 4) - self.assertEqual(tc_track.data[0]['time'].size, 93) - self.assertEqual(tc_track.data[0]['lon'][11], -115.57) - self.assertEqual(tc_track.data[0]['lat'][23], 10.758) - self.assertEqual(tc_track.data[0]['time_step'][7], 2.0) - self.assertEqual(tc_track.data[0]['time_step'].dtype, float) - self.assertAlmostEqual(tc_track.data[0]['radius_max_wind'][15], 44.27645788336934) - self.assertEqual(tc_track.data[0]['max_sustained_wind'][21], 27.1) - self.assertEqual(tc_track.data[0]['central_pressure'][29], 995.31) - self.assertTrue(np.all(tc_track.data[0]['environmental_pressure'] == 1010)) - self.assertTrue(np.all(tc_track.data[0]['time'].dt.year == 1950)) - self.assertEqual(tc_track.data[0]['time'].dt.month[26], 10) - self.assertEqual(tc_track.data[0]['time'].dt.day[7], 26) - self.assertEqual(tc_track.data[0].attrs['max_sustained_wind_unit'], 'kn') - self.assertEqual(tc_track.data[0].attrs['central_pressure_unit'], 'mb') - self.assertEqual(tc_track.data[0].attrs['sid'], '1') - self.assertEqual(tc_track.data[0].attrs['name'], '1') - self.assertEqual(tc_track.data[0]['basin'].dtype, ' 0)) def test_category_pass(self): """Test category computation.""" max_sus_wind = np.array([25, 30, 35, 40, 45, 45, 45, 45, 35, 25]) - max_sus_wind_unit = 'kn' + max_sus_wind_unit = "kn" cat = tc.set_category(max_sus_wind, max_sus_wind_unit) self.assertEqual(0, cat) max_sus_wind = np.array([25, 25, 25, 30, 30, 30, 30, 30, 25, 25, 20]) - max_sus_wind_unit = 'kn' + max_sus_wind_unit = "kn" cat = tc.set_category(max_sus_wind, max_sus_wind_unit) self.assertEqual(-1, cat) - max_sus_wind = np.array([80, 90, 100, 115, 120, 125, 130, - 120, 110, 80, 75, 80, 65]) - max_sus_wind_unit = 'kn' + max_sus_wind = np.array( + [80, 90, 100, 115, 120, 125, 130, 120, 110, 80, 75, 80, 65] + ) + max_sus_wind_unit = "kn" cat = tc.set_category(max_sus_wind, max_sus_wind_unit) self.assertEqual(4, cat) - max_sus_wind = np.array([ - 28.769475, 34.52337, 40.277265, 46.03116, 51.785055, 51.785055, - 51.785055, 51.785055, 40.277265, 28.769475 - ]) - max_sus_wind_unit = 'mph' + max_sus_wind = np.array( + [ + 28.769475, + 34.52337, + 40.277265, + 46.03116, + 51.785055, + 51.785055, + 51.785055, + 51.785055, + 40.277265, + 28.769475, + ] + ) + max_sus_wind_unit = "mph" cat = tc.set_category(max_sus_wind, max_sus_wind_unit) self.assertEqual(0, cat) - max_sus_wind = np.array([ - 12.86111437, 12.86111437, 12.86111437, 15.43333724, 15.43333724, - 15.43333724, 15.43333724, 15.43333724, 12.86111437, 12.86111437, - 10.2888915 - ]) - max_sus_wind_unit = 'm/s' + max_sus_wind = np.array( + [ + 12.86111437, + 12.86111437, + 12.86111437, + 15.43333724, + 15.43333724, + 15.43333724, + 15.43333724, + 15.43333724, + 12.86111437, + 12.86111437, + 10.2888915, + ] + ) + max_sus_wind_unit = "m/s" cat = tc.set_category(max_sus_wind, max_sus_wind_unit) self.assertEqual(-1, cat) - max_sus_wind = np.array([ - 148.16, 166.68, 185.2, 212.98, 222.24, 231.5, 240.76, 222.24, - 203.72, 148.16, 138.9, 148.16, 120.38 - ]) - max_sus_wind_unit = 'km/h' + max_sus_wind = np.array( + [ + 148.16, + 166.68, + 185.2, + 212.98, + 222.24, + 231.5, + 240.76, + 222.24, + 203.72, + 148.16, + 138.9, + 148.16, + 120.38, + ] + ) + max_sus_wind_unit = "km/h" cat = tc.set_category(max_sus_wind, max_sus_wind_unit) self.assertEqual(4, cat) @@ -891,9 +1189,13 @@ def test_estimate_rmw_pass(self): tc_track = tc.TCTracks.from_processed_ibtracs_csv(TEST_TRACK) tc_track.equal_timestep() - rad_max_wind = tc.estimate_rmw( - tc_track.data[0]['radius_max_wind'].values, - tc_track.data[0]['central_pressure'].values) * NM_TO_KM + rad_max_wind = ( + tc.estimate_rmw( + tc_track.data[0]["radius_max_wind"].values, + tc_track.data[0]["central_pressure"].values, + ) + * NM_TO_KM + ) self.assertAlmostEqual(rad_max_wind[0], 87, places=0) self.assertAlmostEqual(rad_max_wind[10], 87, places=0) @@ -910,19 +1212,19 @@ def test_tracks_in_exp_pass(self): """Check if tracks in exp are filtered correctly""" # Load two tracks from ibtracks - storms = {'in': '2000233N12316', 'out': '2000160N21267'} + storms = {"in": "2000233N12316", "out": "2000160N21267"} tc_track = tc.TCTracks.from_ibtracs_netcdf(storm_id=list(storms.values())) # Define exposure from geopandas - world = gpd.read_file(gpd.datasets.get_path('naturalearth_lowres')) + world = gpd.read_file(gpd.datasets.get_path("naturalearth_lowres")) exp_world = Exposures(world) - exp = Exposures(exp_world.gdf[exp_world.gdf['name']=='Cuba']) + exp = Exposures(exp_world.gdf[exp_world.gdf["name"] == "Cuba"]) # Compute tracks in exp tracks_in_exp = tc_track.tracks_in_exp(exp, buffer=1.0) - self.assertTrue(tracks_in_exp.get_track(storms['in'])) - self.assertFalse(tracks_in_exp.get_track(storms['out'])) + self.assertTrue(tracks_in_exp.get_track(storms["in"])) + self.assertFalse(tracks_in_exp.get_track(storms["out"])) def test_get_landfall_idx(self): """Test identification of landfalls""" @@ -930,35 +1232,45 @@ def test_get_landfall_idx(self): datetimes = list() for h in range(0, 24, 3): datetimes.append(dt(2000, 1, 1, h)) - tr_ds.coords['time'] = ('time', datetimes) + tr_ds.coords["time"] = ("time", datetimes) # no landfall - tr_ds['on_land'] = np.repeat(np.array([False]), 8) + tr_ds["on_land"] = np.repeat(np.array([False]), 8) sea_land_idx, land_sea_idx = tc._get_landfall_idx(tr_ds) - self.assertEqual([len(sea_land_idx), len(land_sea_idx)], [0,0]) + self.assertEqual([len(sea_land_idx), len(land_sea_idx)], [0, 0]) # single landfall - tr_ds['on_land'] = np.array([False, False, True, True, True, False, False, False]) + tr_ds["on_land"] = np.array( + [False, False, True, True, True, False, False, False] + ) sea_land_idx, land_sea_idx = tc._get_landfall_idx(tr_ds) - self.assertEqual([len(sea_land_idx), len(land_sea_idx)], [1,1]) + self.assertEqual([len(sea_land_idx), len(land_sea_idx)], [1, 1]) self.assertEqual([sea_land_idx, land_sea_idx], [2, 5]) # single landfall from starting point - tr_ds['on_land'] = np.array([True, True, True, True, True, False, False, False]) + tr_ds["on_land"] = np.array([True, True, True, True, True, False, False, False]) sea_land_idx, land_sea_idx = tc._get_landfall_idx(tr_ds) - self.assertEqual([len(sea_land_idx), len(land_sea_idx)], [0,0]) - sea_land_idx, land_sea_idx = tc._get_landfall_idx(tr_ds, include_starting_landfall=True) + self.assertEqual([len(sea_land_idx), len(land_sea_idx)], [0, 0]) + sea_land_idx, land_sea_idx = tc._get_landfall_idx( + tr_ds, include_starting_landfall=True + ) self.assertEqual([sea_land_idx, land_sea_idx], [0, 5]) # two landfalls - tr_ds['on_land'] = np.array([False, True, True, False, False, False, True, True]) + tr_ds["on_land"] = np.array( + [False, True, True, False, False, False, True, True] + ) sea_land_idx, land_sea_idx = tc._get_landfall_idx(tr_ds) - self.assertEqual([len(sea_land_idx), len(land_sea_idx)], [2,2]) - self.assertEqual(sea_land_idx.tolist(), [1,6]) - self.assertEqual(land_sea_idx.tolist(), [3,8]) + self.assertEqual([len(sea_land_idx), len(land_sea_idx)], [2, 2]) + self.assertEqual(sea_land_idx.tolist(), [1, 6]) + self.assertEqual(land_sea_idx.tolist(), [3, 8]) # two landfalls, starting on land - tr_ds['on_land'] = np.array([True, True, False, False, True, True, False, False]) + tr_ds["on_land"] = np.array( + [True, True, False, False, True, True, False, False] + ) sea_land_idx, land_sea_idx = tc._get_landfall_idx(tr_ds) self.assertEqual([sea_land_idx, land_sea_idx], [4, 6]) - sea_land_idx, land_sea_idx = tc._get_landfall_idx(tr_ds, include_starting_landfall=True) - self.assertEqual(sea_land_idx.tolist(), [0,4]) - self.assertEqual(land_sea_idx.tolist(), [2,6]) + sea_land_idx, land_sea_idx = tc._get_landfall_idx( + tr_ds, include_starting_landfall=True + ) + self.assertEqual(sea_land_idx.tolist(), [0, 4]) + self.assertEqual(land_sea_idx.tolist(), [2, 6]) def test_track_land_params(self): """Test identification of points on land and distance since landfall""" @@ -969,24 +1281,22 @@ def test_track_land_params(self): lon_shift = np.array([-360, 0, 360]) # ensure both points are considered on land as is np.testing.assert_array_equal( - u_coord.coord_on_land(lat = lat_test, lon = lon_test), - on_land + u_coord.coord_on_land(lat=lat_test, lon=lon_test), on_land ) # independently on shifts by 360 degrees in longitude np.testing.assert_array_equal( - u_coord.coord_on_land(lat = lat_test, lon = lon_test + lon_shift), - on_land + u_coord.coord_on_land(lat=lat_test, lon=lon_test + lon_shift), on_land ) np.testing.assert_array_equal( - u_coord.coord_on_land(lat = lat_test, lon = lon_test - lon_shift), - on_land + u_coord.coord_on_land(lat=lat_test, lon=lon_test - lon_shift), on_land ) # also when longitude is within correct range np.testing.assert_array_equal( - u_coord.coord_on_land(lat = lat_test, lon = u_coord.lon_normalize(lon_test)), - on_land + u_coord.coord_on_land(lat=lat_test, lon=u_coord.lon_normalize(lon_test)), + on_land, ) + # Execute Tests if __name__ == "__main__": TESTS = unittest.TestLoader().loadTestsFromTestCase(TestFuncs) diff --git a/climada/hazard/test/test_tc_tracks_synth.py b/climada/hazard/test/test_tc_tracks_synth.py index 1b2cca2c6..f0b5c0b44 100644 --- a/climada/hazard/test/test_tc_tracks_synth.py +++ b/climada/hazard/test/test_tc_tracks_synth.py @@ -29,17 +29,16 @@ import climada.hazard.tc_tracks as tc import climada.hazard.tc_tracks_synth as tc_synth import climada.util.coordinates -from climada.util.constants import TC_ANDREW_FL from climada.hazard.test import download_ibtracs +from climada.util.constants import TC_ANDREW_FL - -DATA_DIR = Path(__file__).parent.joinpath('data') +DATA_DIR = Path(__file__).parent.joinpath("data") TEST_TRACK = DATA_DIR.joinpath("trac_brb_test.csv") TEST_TRACK_SHORT = DATA_DIR.joinpath("trac_short_test.csv") -TEST_TRACK_DECAY_END_OCEAN = DATA_DIR.joinpath('1997018S11059_gen3.nc') -TEST_TRACK_DECAY_END_OCEAN_HIST = DATA_DIR.joinpath('1997018S11059.nc') -TEST_TRACK_DECAY_PENV_GT_PCEN = DATA_DIR.joinpath('1988021S12080_gen2.nc') -TEST_TRACK_DECAY_PENV_GT_PCEN_HIST = DATA_DIR.joinpath('1988021S12080.nc') +TEST_TRACK_DECAY_END_OCEAN = DATA_DIR.joinpath("1997018S11059_gen3.nc") +TEST_TRACK_DECAY_END_OCEAN_HIST = DATA_DIR.joinpath("1997018S11059.nc") +TEST_TRACK_DECAY_PENV_GT_PCEN = DATA_DIR.joinpath("1988021S12080_gen2.nc") +TEST_TRACK_DECAY_PENV_GT_PCEN_HIST = DATA_DIR.joinpath("1988021S12080.nc") class TestDecay(unittest.TestCase): @@ -56,17 +55,29 @@ def test_apply_decay_no_landfall_pass(self): extent=extent, resolution=10 ) tc.track_land_params(tc_track.data[0], land_geom) - tc_track.data[0]['orig_event_flag'] = False + tc_track.data[0]["orig_event_flag"] = False tc_ref = tc_track.data[0].copy() tc_synth._apply_land_decay(tc_track.data, dict(), dict(), land_geom) - self.assertTrue(np.allclose(tc_track.data[0]['max_sustained_wind'].values, - tc_ref['max_sustained_wind'].values)) - self.assertTrue(np.allclose(tc_track.data[0]['central_pressure'].values, - tc_ref['central_pressure'].values)) - self.assertTrue(np.allclose(tc_track.data[0]['environmental_pressure'].values, - tc_ref['environmental_pressure'].values)) - self.assertTrue(np.all(np.isnan(tc_track.data[0]['dist_since_lf'].values))) + self.assertTrue( + np.allclose( + tc_track.data[0]["max_sustained_wind"].values, + tc_ref["max_sustained_wind"].values, + ) + ) + self.assertTrue( + np.allclose( + tc_track.data[0]["central_pressure"].values, + tc_ref["central_pressure"].values, + ) + ) + self.assertTrue( + np.allclose( + tc_track.data[0]["environmental_pressure"].values, + tc_ref["environmental_pressure"].values, + ) + ) + self.assertTrue(np.all(np.isnan(tc_track.data[0]["dist_since_lf"].values))) def test_apply_decay_pass(self): """Test _apply_land_decay against MATLAB reference.""" @@ -77,7 +88,7 @@ def test_apply_decay_pass(self): 1: 0.0038950967656296597, 2: 0.0038950967656296597, 3: 0.0038950967656296597, - 5: 0.0038950967656296597 + 5: 0.0038950967656296597, } p_rel = { @@ -87,62 +98,141 @@ def test_apply_decay_pass(self): 1: (1.0499941, 0.007978940084158488), 2: (1.0499941, 0.007978940084158488), 3: (1.0499941, 0.007978940084158488), - 5: (1.0499941, 0.007978940084158488) + 5: (1.0499941, 0.007978940084158488), } tc_track = tc.TCTracks.from_processed_ibtracs_csv(TC_ANDREW_FL) - tc_track.data[0]['orig_event_flag'] = False + tc_track.data[0]["orig_event_flag"] = False extent = tc_track.get_extent() land_geom = climada.util.coordinates.get_land_geometry( extent=extent, resolution=10 ) tc.track_land_params(tc_track.data[0], land_geom) - tc_synth._apply_land_decay(tc_track.data, v_rel, p_rel, land_geom, - s_rel=True, check_plot=False) - - p_ref = np.array([ - 1.010000000000000, 1.009000000000000, 1.008000000000000, - 1.006000000000000, 1.003000000000000, 1.002000000000000, - 1.001000000000000, 1.000000000000000, 1.000000000000000, - 1.001000000000000, 1.002000000000000, 1.005000000000000, - 1.007000000000000, 1.010000000000000, 1.010000000000000, - 1.010000000000000, 1.010000000000000, 1.010000000000000, - 1.010000000000000, 1.007000000000000, 1.004000000000000, - 1.000000000000000, 0.994000000000000, 0.981000000000000, - 0.969000000000000, 0.961000000000000, 0.947000000000000, - 0.933000000000000, 0.922000000000000, 0.930000000000000, - 0.937000000000000, 0.951000000000000, 0.947000000000000, - 0.943000000000000, 0.948000000000000, 0.946000000000000, - 0.941000000000000, 0.937000000000000, 0.955000000000000, - 0.9741457117, 0.99244068917, 1.00086729492, 1.00545853355, - 1.00818354609, 1.00941850023, 1.00986192053, 1.00998400565 - ]) * 1e3 - - self.assertTrue(np.allclose(p_ref, tc_track.data[0]['central_pressure'].values)) - - v_ref = np.array([ - 0.250000000000000, 0.300000000000000, 0.300000000000000, - 0.350000000000000, 0.350000000000000, 0.400000000000000, - 0.450000000000000, 0.450000000000000, 0.450000000000000, - 0.450000000000000, 0.450000000000000, 0.450000000000000, - 0.450000000000000, 0.400000000000000, 0.400000000000000, - 0.400000000000000, 0.400000000000000, 0.450000000000000, - 0.450000000000000, 0.500000000000000, 0.500000000000000, - 0.550000000000000, 0.650000000000000, 0.800000000000000, - 0.950000000000000, 1.100000000000000, 1.300000000000000, - 1.450000000000000, 1.500000000000000, 1.250000000000000, - 1.300000000000000, 1.150000000000000, 1.150000000000000, - 1.150000000000000, 1.150000000000000, 1.200000000000000, - 1.250000000000000, 1.250000000000000, 1.200000000000000, - 0.9737967353, 0.687255951, 0.4994850556, 0.3551480462, 0.2270548036, - 0.1302099557, 0.0645385918, 0.0225325851 - ]) * 1e2 - - self.assertTrue(np.allclose(v_ref, tc_track.data[0]['max_sustained_wind'].values)) - - cat_ref = tc.set_category(tc_track.data[0]['max_sustained_wind'].values, - tc_track.data[0].attrs['max_sustained_wind_unit']) - self.assertEqual(cat_ref, tc_track.data[0].attrs['category']) + tc_synth._apply_land_decay( + tc_track.data, v_rel, p_rel, land_geom, s_rel=True, check_plot=False + ) + + p_ref = ( + np.array( + [ + 1.010000000000000, + 1.009000000000000, + 1.008000000000000, + 1.006000000000000, + 1.003000000000000, + 1.002000000000000, + 1.001000000000000, + 1.000000000000000, + 1.000000000000000, + 1.001000000000000, + 1.002000000000000, + 1.005000000000000, + 1.007000000000000, + 1.010000000000000, + 1.010000000000000, + 1.010000000000000, + 1.010000000000000, + 1.010000000000000, + 1.010000000000000, + 1.007000000000000, + 1.004000000000000, + 1.000000000000000, + 0.994000000000000, + 0.981000000000000, + 0.969000000000000, + 0.961000000000000, + 0.947000000000000, + 0.933000000000000, + 0.922000000000000, + 0.930000000000000, + 0.937000000000000, + 0.951000000000000, + 0.947000000000000, + 0.943000000000000, + 0.948000000000000, + 0.946000000000000, + 0.941000000000000, + 0.937000000000000, + 0.955000000000000, + 0.9741457117, + 0.99244068917, + 1.00086729492, + 1.00545853355, + 1.00818354609, + 1.00941850023, + 1.00986192053, + 1.00998400565, + ] + ) + * 1e3 + ) + + self.assertTrue(np.allclose(p_ref, tc_track.data[0]["central_pressure"].values)) + + v_ref = ( + np.array( + [ + 0.250000000000000, + 0.300000000000000, + 0.300000000000000, + 0.350000000000000, + 0.350000000000000, + 0.400000000000000, + 0.450000000000000, + 0.450000000000000, + 0.450000000000000, + 0.450000000000000, + 0.450000000000000, + 0.450000000000000, + 0.450000000000000, + 0.400000000000000, + 0.400000000000000, + 0.400000000000000, + 0.400000000000000, + 0.450000000000000, + 0.450000000000000, + 0.500000000000000, + 0.500000000000000, + 0.550000000000000, + 0.650000000000000, + 0.800000000000000, + 0.950000000000000, + 1.100000000000000, + 1.300000000000000, + 1.450000000000000, + 1.500000000000000, + 1.250000000000000, + 1.300000000000000, + 1.150000000000000, + 1.150000000000000, + 1.150000000000000, + 1.150000000000000, + 1.200000000000000, + 1.250000000000000, + 1.250000000000000, + 1.200000000000000, + 0.9737967353, + 0.687255951, + 0.4994850556, + 0.3551480462, + 0.2270548036, + 0.1302099557, + 0.0645385918, + 0.0225325851, + ] + ) + * 1e2 + ) + + self.assertTrue( + np.allclose(v_ref, tc_track.data[0]["max_sustained_wind"].values) + ) + + cat_ref = tc.set_category( + tc_track.data[0]["max_sustained_wind"].values, + tc_track.data[0].attrs["max_sustained_wind_unit"], + ) + self.assertEqual(cat_ref, tc_track.data[0].attrs["category"]) def test_func_decay_p_pass(self): """Test decay function for pressure with its inverse.""" @@ -152,7 +242,9 @@ def test_func_decay_p_pass(self): res = tc_synth._decay_p_function(s_coef, b_coef, x_val) b_coef_res = tc_synth._solve_decay_p_function(s_coef, res, x_val) - self.assertTrue(np.allclose(b_coef_res[1:], np.ones((x_val.size - 1,)) * b_coef)) + self.assertTrue( + np.allclose(b_coef_res[1:], np.ones((x_val.size - 1,)) * b_coef) + ) self.assertTrue(np.isnan(b_coef_res[0])) def test_func_decay_v_pass(self): @@ -162,38 +254,46 @@ def test_func_decay_v_pass(self): res = tc_synth._decay_v_function(a_coef, x_val) a_coef_res = tc_synth._solve_decay_v_function(res, x_val) - self.assertTrue(np.allclose(a_coef_res[1:], np.ones((x_val.size - 1,)) * a_coef)) + self.assertTrue( + np.allclose(a_coef_res[1:], np.ones((x_val.size - 1,)) * a_coef) + ) self.assertTrue(np.isnan(a_coef_res[0])) def test_decay_ps_value(self): """Test the calculation of S in pressure decay.""" on_land_idx = 5 tr_ds = xr.Dataset() - tr_ds.coords['time'] = ('time', np.arange(10)) - tr_ds['central_pressure'] = ('time', np.arange(10, 20)) - tr_ds['environmental_pressure'] = ('time', np.arange(20, 30)) - tr_ds['on_land'] = ('time', np.zeros((10,)).astype(bool)) + tr_ds.coords["time"] = ("time", np.arange(10)) + tr_ds["central_pressure"] = ("time", np.arange(10, 20)) + tr_ds["environmental_pressure"] = ("time", np.arange(20, 30)) + tr_ds["on_land"] = ("time", np.zeros((10,)).astype(bool)) tr_ds.on_land[on_land_idx] = True p_landfall = 100 res = tc_synth._calc_decay_ps_value(tr_ds, p_landfall, on_land_idx, s_rel=True) - self.assertEqual(res, float(tr_ds['environmental_pressure'][on_land_idx] / p_landfall)) + self.assertEqual( + res, float(tr_ds["environmental_pressure"][on_land_idx] / p_landfall) + ) res = tc_synth._calc_decay_ps_value(tr_ds, p_landfall, on_land_idx, s_rel=False) - self.assertEqual(res, float(tr_ds['central_pressure'][on_land_idx] / p_landfall)) + self.assertEqual( + res, float(tr_ds["central_pressure"][on_land_idx] / p_landfall) + ) def test_calc_decay_no_landfall_pass(self): """Test _calc_land_decay with no historical tracks with landfall""" tc_track = tc.TCTracks.from_processed_ibtracs_csv(TEST_TRACK_SHORT) - expected_warning = 'only %s historical tracks were provided. ' % len(tc_track.data) + expected_warning = "only %s historical tracks were provided. " % len( + tc_track.data + ) extent = tc_track.get_extent() land_geom = climada.util.coordinates.get_land_geometry( extent=extent, resolution=10 ) tc.track_land_params(tc_track.data[0], land_geom) - with self.assertLogs('climada.hazard.tc_tracks_synth', level='INFO') as cm: + with self.assertLogs("climada.hazard.tc_tracks_synth", level="INFO") as cm: tc_synth._calc_land_decay(tc_track.data, land_geom) self.assertIn(expected_warning, cm.output[0]) - self.assertIn('No historical track with landfall.', cm.output[1]) + self.assertIn("No historical track with landfall.", cm.output[1]) def test_calc_land_decay_pass(self): """Test _calc_land_decay with environmental pressure function.""" @@ -232,56 +332,98 @@ def test_decay_values_andrew_pass(self): s_cell_2 = 8 * [1.047120451927185] s_cell = s_cell_1 + s_cell_2 p_vs_lf_time_relative = [ - 1.0149413020277482, 1.018848167539267, 1.037696335078534, - 1.0418848167539267, 1.043979057591623, 1.0450261780104713, - 1.0460732984293193, 1.0471204188481675, 1.0471204188481675 + 1.0149413020277482, + 1.018848167539267, + 1.037696335078534, + 1.0418848167539267, + 1.043979057591623, + 1.0450261780104713, + 1.0460732984293193, + 1.0471204188481675, + 1.0471204188481675, ] self.assertEqual(list(p_lf.keys()), [ss_category]) - self.assertEqual(p_lf[ss_category][0], array.array('f', s_cell)) - self.assertEqual(p_lf[ss_category][1], array.array('f', p_vs_lf_time_relative)) + self.assertEqual(p_lf[ss_category][0], array.array("f", s_cell)) + self.assertEqual(p_lf[ss_category][1], array.array("f", p_vs_lf_time_relative)) v_vs_lf_time_relative = [ - 0.8846153846153846, 0.6666666666666666, 0.4166666666666667, - 0.2916666666666667, 0.250000000000000, 0.250000000000000, - 0.20833333333333334, 0.16666666666666666, 0.16666666666666666 + 0.8846153846153846, + 0.6666666666666666, + 0.4166666666666667, + 0.2916666666666667, + 0.250000000000000, + 0.250000000000000, + 0.20833333333333334, + 0.16666666666666666, + 0.16666666666666666, ] self.assertEqual(list(v_lf.keys()), [ss_category]) - self.assertEqual(v_lf[ss_category], array.array('f', v_vs_lf_time_relative)) - - x_val_ref = np.array([ - 95.9512939453125, 53.624916076660156, 143.09530639648438, - 225.0262908935547, 312.5832824707031, 427.43109130859375, - 570.1857299804688, 750.3827514648438, 1020.5431518554688 - ]) + self.assertEqual(v_lf[ss_category], array.array("f", v_vs_lf_time_relative)) + + x_val_ref = np.array( + [ + 95.9512939453125, + 53.624916076660156, + 143.09530639648438, + 225.0262908935547, + 312.5832824707031, + 427.43109130859375, + 570.1857299804688, + 750.3827514648438, + 1020.5431518554688, + ] + ) self.assertEqual(list(x_val.keys()), [ss_category]) self.assertTrue(np.allclose(x_val[ss_category], x_val_ref)) def test_decay_calc_coeff(self): """Test _decay_calc_coeff against MATLAB""" x_val = { - 4: np.array([ - 53.57314960249573, 142.97903059281566, 224.76733726289183, - 312.14621544207563, 426.6757021862584, 568.9358305779094, - 748.3713215157885, 1016.9904230811956 - ]) + 4: np.array( + [ + 53.57314960249573, + 142.97903059281566, + 224.76733726289183, + 312.14621544207563, + 426.6757021862584, + 568.9358305779094, + 748.3713215157885, + 1016.9904230811956, + ] + ) } v_lf = { - 4: np.array([ - 0.6666666666666666, 0.4166666666666667, 0.2916666666666667, - 0.250000000000000, 0.250000000000000, 0.20833333333333334, - 0.16666666666666666, 0.16666666666666666 - ]) + 4: np.array( + [ + 0.6666666666666666, + 0.4166666666666667, + 0.2916666666666667, + 0.250000000000000, + 0.250000000000000, + 0.20833333333333334, + 0.16666666666666666, + 0.16666666666666666, + ] + ) } p_lf = { - 4: (8 * [1.0471204188481675], - np.array([ - 1.018848167539267, 1.037696335078534, 1.0418848167539267, - 1.043979057591623, 1.0450261780104713, 1.0460732984293193, - 1.0471204188481675, 1.0471204188481675 - ]) + 4: ( + 8 * [1.0471204188481675], + np.array( + [ + 1.018848167539267, + 1.037696335078534, + 1.0418848167539267, + 1.043979057591623, + 1.0450261780104713, + 1.0460732984293193, + 1.0471204188481675, + 1.0471204188481675, + ] + ), ) } @@ -298,41 +440,141 @@ def test_decay_calc_coeff(self): def test_wrong_decay_pass(self): """Test decay not implemented when coefficient < 1""" - track = tc.TCTracks.from_ibtracs_netcdf(provider='usa', storm_id='1975178N28281') + track = tc.TCTracks.from_ibtracs_netcdf( + provider="usa", storm_id="1975178N28281" + ) track_gen = track.data[0] - track_gen['lat'] = np.array([ - 28.20340431, 28.7915261, 29.38642458, 29.97836984, 30.56844404, - 31.16265292, 31.74820301, 32.34449825, 32.92261894, 33.47430891, - 34.01492525, 34.56789399, 35.08810845, 35.55965893, 35.94835174, - 36.29355848, 36.45379561, 36.32473812, 36.07552209, 35.92224784, - 35.84144186, 35.78298537, 35.86090718, 36.02440372, 36.37555559, - 37.06207765, 37.73197352, 37.97524273, 38.05560287, 38.21901208, - 38.31486156, 38.30813367, 38.28481808, 38.28410366, 38.25894812, - 38.20583372, 38.22741099, 38.39970022, 38.68367797, 39.08329904, - 39.41434629, 39.424984, 39.31327716, 39.30336335, 39.31714429, - 39.27031932, 39.30848775, 39.48759833, 39.73326595, 39.96187967, - 40.26954226, 40.76882202, 41.40398607, 41.93809726, 42.60395785, - 43.57074792, 44.63816143, 45.61450458, 46.68528511, 47.89209365, - 49.15580502 - ]) - track_gen['lon'] = np.array([ - -79.20514075, -79.25243311, -79.28393082, -79.32324646, - -79.36668585, -79.41495519, -79.45198688, -79.40580325, - -79.34965443, -79.36938122, -79.30294825, -79.06809546, - -78.70281969, -78.29418936, -77.82170609, -77.30034709, - -76.79004969, -76.37038827, -75.98641014, -75.58383356, - -75.18310414, -74.7974524, -74.3797645, -73.86393572, -73.37910948, - -73.01059003, -72.77051313, -72.68011328, -72.66864779, - -72.62579773, -72.56307717, -72.46607618, -72.35871353, - -72.31120649, -72.15537583, -71.75577051, -71.25287498, - -70.75527907, -70.34788946, -70.17518421, -70.04446577, - -69.76582749, -69.44372386, -69.15881376, -68.84351922, - -68.47890287, -68.04184565, -67.53541437, -66.94008642, - -66.25596075, -65.53496635, -64.83491802, -64.12962685, - -63.54118808, -62.72934383, -61.34915091, -59.72580755, - -58.24404252, -56.71972992, -55.0809336, -53.31524758 - ]) + track_gen["lat"] = np.array( + [ + 28.20340431, + 28.7915261, + 29.38642458, + 29.97836984, + 30.56844404, + 31.16265292, + 31.74820301, + 32.34449825, + 32.92261894, + 33.47430891, + 34.01492525, + 34.56789399, + 35.08810845, + 35.55965893, + 35.94835174, + 36.29355848, + 36.45379561, + 36.32473812, + 36.07552209, + 35.92224784, + 35.84144186, + 35.78298537, + 35.86090718, + 36.02440372, + 36.37555559, + 37.06207765, + 37.73197352, + 37.97524273, + 38.05560287, + 38.21901208, + 38.31486156, + 38.30813367, + 38.28481808, + 38.28410366, + 38.25894812, + 38.20583372, + 38.22741099, + 38.39970022, + 38.68367797, + 39.08329904, + 39.41434629, + 39.424984, + 39.31327716, + 39.30336335, + 39.31714429, + 39.27031932, + 39.30848775, + 39.48759833, + 39.73326595, + 39.96187967, + 40.26954226, + 40.76882202, + 41.40398607, + 41.93809726, + 42.60395785, + 43.57074792, + 44.63816143, + 45.61450458, + 46.68528511, + 47.89209365, + 49.15580502, + ] + ) + track_gen["lon"] = np.array( + [ + -79.20514075, + -79.25243311, + -79.28393082, + -79.32324646, + -79.36668585, + -79.41495519, + -79.45198688, + -79.40580325, + -79.34965443, + -79.36938122, + -79.30294825, + -79.06809546, + -78.70281969, + -78.29418936, + -77.82170609, + -77.30034709, + -76.79004969, + -76.37038827, + -75.98641014, + -75.58383356, + -75.18310414, + -74.7974524, + -74.3797645, + -73.86393572, + -73.37910948, + -73.01059003, + -72.77051313, + -72.68011328, + -72.66864779, + -72.62579773, + -72.56307717, + -72.46607618, + -72.35871353, + -72.31120649, + -72.15537583, + -71.75577051, + -71.25287498, + -70.75527907, + -70.34788946, + -70.17518421, + -70.04446577, + -69.76582749, + -69.44372386, + -69.15881376, + -68.84351922, + -68.47890287, + -68.04184565, + -67.53541437, + -66.94008642, + -66.25596075, + -65.53496635, + -64.83491802, + -64.12962685, + -63.54118808, + -62.72934383, + -61.34915091, + -59.72580755, + -58.24404252, + -56.71972992, + -55.0809336, + -53.31524758, + ] + ) v_rel = { 1: 0.002249541544102336, @@ -352,22 +594,26 @@ def test_wrong_decay_pass(self): 5: (1.0894914184297835, 0.004315034379018768), 4: (1.0714354641894077, 0.002783787561718677), } - track_gen.attrs['orig_event_flag'] = False + track_gen.attrs["orig_event_flag"] = False - cp_ref = np.array([1012., 1012.]) + cp_ref = np.array([1012.0, 1012.0]) single_track = tc.TCTracks([track_gen]) extent = single_track.get_extent() land_geom = climada.util.coordinates.get_land_geometry( extent=extent, resolution=10 ) - track_res = tc_synth._apply_decay_coeffs(track_gen, v_rel, p_rel, land_geom, True) - self.assertTrue(np.array_equal(cp_ref, track_res['central_pressure'][9:11])) + track_res = tc_synth._apply_decay_coeffs( + track_gen, v_rel, p_rel, land_geom, True + ) + self.assertTrue(np.array_equal(cp_ref, track_res["central_pressure"][9:11])) def test_decay_end_ocean(self): """Test decay is applied after landfall if the track ends over the ocean""" # this track was generated without applying landfall decay # (i.e. with decay=False in tc_synth.calc_perturbed_trajectories) - tracks_synth_nodecay_example = tc.TCTracks.from_netcdf(TEST_TRACK_DECAY_END_OCEAN) + tracks_synth_nodecay_example = tc.TCTracks.from_netcdf( + TEST_TRACK_DECAY_END_OCEAN + ) # apply landfall decay extent = tracks_synth_nodecay_example.get_extent() @@ -378,7 +624,8 @@ def test_decay_end_ocean(self): tracks_synth_nodecay_example.data, tc_synth.LANDFALL_DECAY_V, tc_synth.LANDFALL_DECAY_P, - land_geom) + land_geom, + ) track = tracks_synth_nodecay_example.data[0] # read its corresponding historical track @@ -390,34 +637,53 @@ def test_decay_end_ocean(self): lf_idx = tc._get_landfall_idx(track) last_lf_idx = lf_idx[-1][1] # only suitable if track ends over the ocean - self.assertTrue(last_lf_idx < track['time'].size-2, - 'This test should be re-written, data not suitable') + self.assertTrue( + last_lf_idx < track["time"].size - 2, + "This test should be re-written, data not suitable", + ) # check pressure and wind values - p_hist_end = track_hist['central_pressure'].values[last_lf_idx:] - p_synth_end = track['central_pressure'].values[last_lf_idx:] + p_hist_end = track_hist["central_pressure"].values[last_lf_idx:] + p_synth_end = track["central_pressure"].values[last_lf_idx:] self.assertTrue(np.all(p_synth_end > p_hist_end)) - v_hist_end = track_hist['max_sustained_wind'].values[last_lf_idx:] - v_synth_end = track['max_sustained_wind'].values[last_lf_idx:] + v_hist_end = track_hist["max_sustained_wind"].values[last_lf_idx:] + v_synth_end = track["max_sustained_wind"].values[last_lf_idx:] self.assertTrue(np.all(v_synth_end < v_hist_end)) # Part 2: is landfall applied in all landfalls? - p_hist_lf = np.concatenate([track_hist['central_pressure'].values[lfs:lfe] - for lfs,lfe in zip(*lf_idx)]) - p_synth_lf = np.concatenate([track['central_pressure'].values[lfs:lfe] - for lfs,lfe in zip(*lf_idx)]) - v_hist_lf = np.concatenate([track_hist['max_sustained_wind'].values[lfs:lfe] - for lfs,lfe in zip(*lf_idx)]) - v_synth_lf = np.concatenate([track['max_sustained_wind'].values[lfs:lfe] - for lfs,lfe in zip(*lf_idx)]) + p_hist_lf = np.concatenate( + [ + track_hist["central_pressure"].values[lfs:lfe] + for lfs, lfe in zip(*lf_idx) + ] + ) + p_synth_lf = np.concatenate( + [track["central_pressure"].values[lfs:lfe] for lfs, lfe in zip(*lf_idx)] + ) + v_hist_lf = np.concatenate( + [ + track_hist["max_sustained_wind"].values[lfs:lfe] + for lfs, lfe in zip(*lf_idx) + ] + ) + v_synth_lf = np.concatenate( + [track["max_sustained_wind"].values[lfs:lfe] for lfs, lfe in zip(*lf_idx)] + ) self.assertTrue(np.all(p_synth_lf > p_hist_lf)) self.assertTrue(np.all(v_synth_lf < v_hist_lf)) - self.assertTrue(np.all(track['central_pressure'].values <= track['environmental_pressure'].values)) + self.assertTrue( + np.all( + track["central_pressure"].values + <= track["environmental_pressure"].values + ) + ) def test_decay_penv_gt_pcen(self): """Test decay is applied if penv at end of landfall < pcen just before landfall""" # this track was generated without applying landfall decay # (i.e. with decay=False in tc_synth.calc_perturbed_trajectories) - tracks_synth_nodecay_example = tc.TCTracks.from_netcdf(TEST_TRACK_DECAY_PENV_GT_PCEN) + tracks_synth_nodecay_example = tc.TCTracks.from_netcdf( + TEST_TRACK_DECAY_PENV_GT_PCEN + ) # apply landfall decay extent = tracks_synth_nodecay_example.get_extent() @@ -428,7 +694,8 @@ def test_decay_penv_gt_pcen(self): tracks_synth_nodecay_example.data, tc_synth.LANDFALL_DECAY_V, tc_synth.LANDFALL_DECAY_P, - land_geom) + land_geom, + ) track = tracks_synth_nodecay_example.data[0] # read its corresponding historical track @@ -441,41 +708,50 @@ def test_decay_penv_gt_pcen(self): start_lf_idx, end_lf_idx = lf_idx[0][0], lf_idx[1][0] # check pressure and wind values - p_hist_end = track_hist['central_pressure'].values[end_lf_idx:] - p_synth_end = track['central_pressure'].values[end_lf_idx:] + p_hist_end = track_hist["central_pressure"].values[end_lf_idx:] + p_synth_end = track["central_pressure"].values[end_lf_idx:] self.assertTrue(np.all(p_synth_end > p_hist_end)) - v_hist_end = track_hist['max_sustained_wind'].values[end_lf_idx:] - v_synth_end = track['max_sustained_wind'].values[end_lf_idx:] + v_hist_end = track_hist["max_sustained_wind"].values[end_lf_idx:] + v_synth_end = track["max_sustained_wind"].values[end_lf_idx:] self.assertTrue(np.all(v_synth_end < v_hist_end)) # Part 2: is landfall applied in all landfalls? # central pressure - p_hist_lf = track_hist['central_pressure'].values[start_lf_idx:end_lf_idx] - p_synth_lf = track['central_pressure'].values[start_lf_idx:end_lf_idx] + p_hist_lf = track_hist["central_pressure"].values[start_lf_idx:end_lf_idx] + p_synth_lf = track["central_pressure"].values[start_lf_idx:end_lf_idx] # central pressure should be higher in synth than hist; unless it was set to p_env - self.assertTrue(np.all( - np.logical_or(p_synth_lf > p_hist_lf, - p_synth_lf == track['environmental_pressure'].values[start_lf_idx:end_lf_idx]) - )) + self.assertTrue( + np.all( + np.logical_or( + p_synth_lf > p_hist_lf, + p_synth_lf + == track["environmental_pressure"].values[start_lf_idx:end_lf_idx], + ) + ) + ) # but for this track is should be higher towards the end self.assertTrue(np.any(p_synth_lf > p_hist_lf)) self.assertTrue(np.all(p_synth_lf >= p_hist_lf)) # wind speed - v_hist_lf = track_hist['max_sustained_wind'].values[start_lf_idx:end_lf_idx] - v_synth_lf = track['max_sustained_wind'].values[start_lf_idx:end_lf_idx] + v_hist_lf = track_hist["max_sustained_wind"].values[start_lf_idx:end_lf_idx] + v_synth_lf = track["max_sustained_wind"].values[start_lf_idx:end_lf_idx] # wind should decrease over time for that landfall - v_before_lf = track_hist['max_sustained_wind'].values[start_lf_idx-1] + v_before_lf = track_hist["max_sustained_wind"].values[start_lf_idx - 1] self.assertTrue(np.all(v_synth_lf[1:] < v_before_lf)) # and wind speed should be lower in synth than hist at the end of and after this landfall - self.assertTrue(np.all( - track['max_sustained_wind'].values[end_lf_idx:] < track_hist['max_sustained_wind'].values[end_lf_idx:] - )) + self.assertTrue( + np.all( + track["max_sustained_wind"].values[end_lf_idx:] + < track_hist["max_sustained_wind"].values[end_lf_idx:] + ) + ) # finally, central minus env pressure cannot increase during this landfall - p_env_lf = track['central_pressure'].values[start_lf_idx:end_lf_idx] + p_env_lf = track["central_pressure"].values[start_lf_idx:end_lf_idx] self.assertTrue(np.all(np.diff(p_env_lf - p_synth_lf) <= 0)) + class TestSynth(unittest.TestCase): @classmethod @@ -484,69 +760,80 @@ def setUpClass(cls): def test_angle_funs_pass(self): """Test functions used by random walk code""" - self.assertAlmostEqual(tc_synth._get_bearing_angle(np.array([15, 20]), - np.array([0, 0]))[0], 90.0) - self.assertAlmostEqual(tc_synth._get_bearing_angle(np.array([20, 20]), - np.array([0, 5]))[0], 0.0) - self.assertAlmostEqual(tc_synth._get_bearing_angle(np.array([0, 0.00001]), - np.array([0, 0.00001]))[0], 45) + self.assertAlmostEqual( + tc_synth._get_bearing_angle(np.array([15, 20]), np.array([0, 0]))[0], 90.0 + ) + self.assertAlmostEqual( + tc_synth._get_bearing_angle(np.array([20, 20]), np.array([0, 5]))[0], 0.0 + ) + self.assertAlmostEqual( + tc_synth._get_bearing_angle(np.array([0, 0.00001]), np.array([0, 0.00001]))[ + 0 + ], + 45, + ) pt_north = tc_synth._get_destination_points(0, 0, 0, 1) self.assertAlmostEqual(pt_north[0], 0.0) self.assertAlmostEqual(pt_north[1], 1.0) pt_west = tc_synth._get_destination_points(0, 0, -90, 3) self.assertAlmostEqual(pt_west[0], -3.0) self.assertAlmostEqual(pt_west[1], 0.0) - pt_test = tc_synth._get_destination_points(8.523224, 47.371102, - 151.14161003, 52.80812463) + pt_test = tc_synth._get_destination_points( + 8.523224, 47.371102, 151.14161003, 52.80812463 + ) self.assertAlmostEqual(pt_test[0], 31.144113) self.assertAlmostEqual(pt_test[1], -1.590347) def test_random_no_landfall_pass(self): """Test calc_perturbed_trajectories with decay and no historical tracks with landfall""" tc_track = tc.TCTracks.from_processed_ibtracs_csv(TEST_TRACK_SHORT) - expected_warning = 'only %s historical tracks were provided. ' % len(tc_track.data) - with self.assertLogs('climada.hazard.tc_tracks_synth', level='INFO') as cm: + expected_warning = "only %s historical tracks were provided. " % len( + tc_track.data + ) + with self.assertLogs("climada.hazard.tc_tracks_synth", level="INFO") as cm: tc_track.calc_perturbed_trajectories(use_global_decay_params=False) self.assertIn(expected_warning, cm.output[1]) - self.assertIn('No historical track with landfall.', cm.output[2]) + self.assertIn("No historical track with landfall.", cm.output[2]) def test_random_walk_ref_pass(self): """Test against MATLAB reference.""" tc_track = tc.TCTracks.from_processed_ibtracs_csv(TEST_TRACK_SHORT) nb_synth_tracks = 2 - tc_track.calc_perturbed_trajectories(nb_synth_tracks=nb_synth_tracks, seed=25, decay=False) + tc_track.calc_perturbed_trajectories( + nb_synth_tracks=nb_synth_tracks, seed=25, decay=False + ) self.assertEqual(len(tc_track.data), nb_synth_tracks + 1) - self.assertFalse(tc_track.data[1].attrs['orig_event_flag']) - self.assertEqual(tc_track.data[1].attrs['name'], '1951239N12334_gen1') - self.assertEqual(tc_track.data[1].attrs['id_no'], 1.951239012334010e+12) - self.assertAlmostEqual(tc_track.data[1]['lon'][0].values, -25.0448138) - self.assertAlmostEqual(tc_track.data[1]['lon'][1].values, -25.74439739) - self.assertAlmostEqual(tc_track.data[1]['lon'][2].values, -26.54491644) - self.assertAlmostEqual(tc_track.data[1]['lon'][3].values, -27.73156829) - self.assertAlmostEqual(tc_track.data[1]['lon'][4].values, -28.63175987) - self.assertAlmostEqual(tc_track.data[1]['lon'][8].values, -34.05293373) - - self.assertAlmostEqual(tc_track.data[1]['lat'][0].values, 11.96825841) - self.assertAlmostEqual(tc_track.data[1]['lat'][4].values, 11.86769405) - self.assertAlmostEqual(tc_track.data[1]['lat'][5].values, 11.84378139) - self.assertAlmostEqual(tc_track.data[1]['lat'][6].values, 11.85957282) - self.assertAlmostEqual(tc_track.data[1]['lat'][7].values, 11.84555291) - self.assertAlmostEqual(tc_track.data[1]['lat'][8].values, 11.8065998) - - self.assertFalse(tc_track.data[2].attrs['orig_event_flag']) - self.assertEqual(tc_track.data[2].attrs['name'], '1951239N12334_gen2') - self.assertAlmostEqual(tc_track.data[2].attrs['id_no'], 1.951239012334020e+12) - self.assertAlmostEqual(tc_track.data[2]['lon'][0].values, -25.47658461) - self.assertAlmostEqual(tc_track.data[2]['lon'][3].values, -28.08465841) - self.assertAlmostEqual(tc_track.data[2]['lon'][4].values, -28.85901852) - self.assertAlmostEqual(tc_track.data[2]['lon'][8].values, -33.62144837) - - self.assertAlmostEqual(tc_track.data[2]['lat'][0].values, 11.82886685) - self.assertAlmostEqual(tc_track.data[2]['lat'][6].values, 11.71068012) - self.assertAlmostEqual(tc_track.data[2]['lat'][7].values, 11.69832976) - self.assertAlmostEqual(tc_track.data[2]['lat'][8].values, 11.64145734) + self.assertFalse(tc_track.data[1].attrs["orig_event_flag"]) + self.assertEqual(tc_track.data[1].attrs["name"], "1951239N12334_gen1") + self.assertEqual(tc_track.data[1].attrs["id_no"], 1.951239012334010e12) + self.assertAlmostEqual(tc_track.data[1]["lon"][0].values, -25.0448138) + self.assertAlmostEqual(tc_track.data[1]["lon"][1].values, -25.74439739) + self.assertAlmostEqual(tc_track.data[1]["lon"][2].values, -26.54491644) + self.assertAlmostEqual(tc_track.data[1]["lon"][3].values, -27.73156829) + self.assertAlmostEqual(tc_track.data[1]["lon"][4].values, -28.63175987) + self.assertAlmostEqual(tc_track.data[1]["lon"][8].values, -34.05293373) + + self.assertAlmostEqual(tc_track.data[1]["lat"][0].values, 11.96825841) + self.assertAlmostEqual(tc_track.data[1]["lat"][4].values, 11.86769405) + self.assertAlmostEqual(tc_track.data[1]["lat"][5].values, 11.84378139) + self.assertAlmostEqual(tc_track.data[1]["lat"][6].values, 11.85957282) + self.assertAlmostEqual(tc_track.data[1]["lat"][7].values, 11.84555291) + self.assertAlmostEqual(tc_track.data[1]["lat"][8].values, 11.8065998) + + self.assertFalse(tc_track.data[2].attrs["orig_event_flag"]) + self.assertEqual(tc_track.data[2].attrs["name"], "1951239N12334_gen2") + self.assertAlmostEqual(tc_track.data[2].attrs["id_no"], 1.951239012334020e12) + self.assertAlmostEqual(tc_track.data[2]["lon"][0].values, -25.47658461) + self.assertAlmostEqual(tc_track.data[2]["lon"][3].values, -28.08465841) + self.assertAlmostEqual(tc_track.data[2]["lon"][4].values, -28.85901852) + self.assertAlmostEqual(tc_track.data[2]["lon"][8].values, -33.62144837) + + self.assertAlmostEqual(tc_track.data[2]["lat"][0].values, 11.82886685) + self.assertAlmostEqual(tc_track.data[2]["lat"][6].values, 11.71068012) + self.assertAlmostEqual(tc_track.data[2]["lat"][7].values, 11.69832976) + self.assertAlmostEqual(tc_track.data[2]["lat"][8].values, 11.64145734) def test_random_walk_decay_pass(self): """Test land decay is called from calc_perturbed_trajectories.""" @@ -554,63 +841,99 @@ def test_random_walk_decay_pass(self): tc_track = tc.TCTracks.from_processed_ibtracs_csv(TC_ANDREW_FL) nb_synth_tracks = 2 # should work if using global parameters - with self.assertLogs('climada.hazard.tc_tracks_synth', level='DEBUG') as cm0: - tc_track.calc_perturbed_trajectories(nb_synth_tracks=nb_synth_tracks, seed=25, decay=True, - use_global_decay_params=True) + with self.assertLogs("climada.hazard.tc_tracks_synth", level="DEBUG") as cm0: + tc_track.calc_perturbed_trajectories( + nb_synth_tracks=nb_synth_tracks, + seed=25, + decay=True, + use_global_decay_params=True, + ) self.assertEqual(len(cm0), 2) self.assertEqual(tc_track.size, 3) # but alert the user otherwise tc_track = tc.TCTracks.from_processed_ibtracs_csv(TC_ANDREW_FL) - with self.assertLogs('climada.hazard.tc_tracks_synth', level='DEBUG') as cm: - tc_track.calc_perturbed_trajectories(nb_synth_tracks=nb_synth_tracks, seed=25, decay=True, - use_global_decay_params=False) - self.assertIn('No historical track of category Tropical Depression ' - 'with landfall.', cm.output[2]) - self.assertIn('Decay parameters from category Hurricane Cat. 4 taken.', - cm.output[3]) - self.assertIn('No historical track of category Hurricane Cat. 1 with ' - 'landfall.', cm.output[4]) - self.assertIn('Decay parameters from category Hurricane Cat. 4 taken.', - cm.output[5]) - self.assertIn('No historical track of category Hurricane Cat. 3 with ' - 'landfall. Decay parameters from category Hurricane Cat. ' - '4 taken.', cm.output[6]) - self.assertIn('No historical track of category Hurricane Cat. 5 with ' - 'landfall.', cm.output[7]) + with self.assertLogs("climada.hazard.tc_tracks_synth", level="DEBUG") as cm: + tc_track.calc_perturbed_trajectories( + nb_synth_tracks=nb_synth_tracks, + seed=25, + decay=True, + use_global_decay_params=False, + ) + self.assertIn( + "No historical track of category Tropical Depression " "with landfall.", + cm.output[2], + ) + self.assertIn( + "Decay parameters from category Hurricane Cat. 4 taken.", cm.output[3] + ) + self.assertIn( + "No historical track of category Hurricane Cat. 1 with " "landfall.", + cm.output[4], + ) + self.assertIn( + "Decay parameters from category Hurricane Cat. 4 taken.", cm.output[5] + ) + self.assertIn( + "No historical track of category Hurricane Cat. 3 with " + "landfall. Decay parameters from category Hurricane Cat. " + "4 taken.", + cm.output[6], + ) + self.assertIn( + "No historical track of category Hurricane Cat. 5 with " "landfall.", + cm.output[7], + ) def test_random_walk_identical_pass(self): """Test 0 perturbation leads to identical tracks.""" tc_track = tc.TCTracks.from_processed_ibtracs_csv(TC_ANDREW_FL) nb_synth_tracks = 2 - tc_track.calc_perturbed_trajectories(nb_synth_tracks=nb_synth_tracks, - max_shift_ini=0, max_dspeed_rel=0, max_ddirection=0, decay=False) + tc_track.calc_perturbed_trajectories( + nb_synth_tracks=nb_synth_tracks, + max_shift_ini=0, + max_dspeed_rel=0, + max_ddirection=0, + decay=False, + ) orig_track = tc_track.data[0] for syn_track in tc_track.data[1:]: - np.testing.assert_allclose(orig_track['lon'].values, syn_track['lon'].values, atol=1e-4) - np.testing.assert_allclose(orig_track['lat'].values, syn_track['lat'].values, atol=1e-4) - for varname in ["time", "time_step", "radius_max_wind", "max_sustained_wind", - "central_pressure", "environmental_pressure"]: - np.testing.assert_array_equal(orig_track[varname].values, - syn_track[varname].values) + np.testing.assert_allclose( + orig_track["lon"].values, syn_track["lon"].values, atol=1e-4 + ) + np.testing.assert_allclose( + orig_track["lat"].values, syn_track["lat"].values, atol=1e-4 + ) + for varname in [ + "time", + "time_step", + "radius_max_wind", + "max_sustained_wind", + "central_pressure", + "environmental_pressure", + ]: + np.testing.assert_array_equal( + orig_track[varname].values, syn_track[varname].values + ) def test_random_walk_single_point(self): found = False for year in range(1951, 1981): - tc_track = tc.TCTracks.from_ibtracs_netcdf(provider='usa', - year_range=(year,year), - discard_single_points=False) - singlept = np.where([x['time'].size == 1 for x in tc_track.data])[0] + tc_track = tc.TCTracks.from_ibtracs_netcdf( + provider="usa", year_range=(year, year), discard_single_points=False + ) + singlept = np.where([x["time"].size == 1 for x in tc_track.data])[0] found = len(singlept) > 0 if found: # found a case with a single-point track, keep max three tracks for efficiency - tc_track.data = tc_track.data[max(0, singlept[0]-1):singlept[0]+2] + tc_track.data = tc_track.data[max(0, singlept[0] - 1) : singlept[0] + 2] n_tr = tc_track.size tc_track.equal_timestep() tc_track.calc_perturbed_trajectories(nb_synth_tracks=2) - self.assertEqual((2+1)*n_tr, tc_track.size) + self.assertEqual((2 + 1) * n_tr, tc_track.size) break self.assertTrue(found) + # Execute Tests if __name__ == "__main__": TESTS = unittest.TestLoader().loadTestsFromTestCase(TestDecay) diff --git a/climada/hazard/test/test_trop_cyclone.py b/climada/hazard/test/test_trop_cyclone.py index 9141778ae..9996becc3 100644 --- a/climada/hazard/test/test_trop_cyclone.py +++ b/climada/hazard/test/test_trop_cyclone.py @@ -20,30 +20,30 @@ """ import datetime as dt +import unittest from pathlib import Path from tempfile import TemporaryDirectory -import unittest import numpy as np from scipy import sparse import climada.hazard.test as hazard_test -from climada.util import ureg -from climada.test import get_test_file -from climada.hazard.tc_tracks import TCTracks -from climada.hazard.tc_clim_change import get_knutson_scaling_factor -from climada.hazard.trop_cyclone.trop_cyclone import ( - TropCyclone, ) from climada.hazard.centroids.centr import Centroids +from climada.hazard.tc_clim_change import get_knutson_scaling_factor +from climada.hazard.tc_tracks import TCTracks from climada.hazard.test import download_ibtracs +from climada.hazard.trop_cyclone.trop_cyclone import TropCyclone +from climada.test import get_test_file +from climada.util import ureg - -DATA_DIR = Path(hazard_test.__file__).parent.joinpath('data') +DATA_DIR = Path(hazard_test.__file__).parent.joinpath("data") TEST_TRACK = DATA_DIR.joinpath("trac_brb_test.csv") TEST_TRACK_SHORT = DATA_DIR.joinpath("trac_short_test.csv") -CENTR_TEST_BRB = Centroids.from_hdf5(get_test_file('centr_test_brb', file_format='hdf5')) +CENTR_TEST_BRB = Centroids.from_hdf5( + get_test_file("centr_test_brb", file_format="hdf5") +) class TestReader(unittest.TestCase): @@ -62,11 +62,23 @@ def test_memory_limit(self): # This should not affect the results. In practice, chunking is not applied due to limited # memory, but due to very high spatial/temporal resolution of the centroids/tracks. We # simulate this situation by artificially reducing the available memory. - tc_haz = TropCyclone.from_tracks(tc_track, centroids=CENTR_TEST_BRB, max_memory_gb=0.001) - intensity_idx = [0, 1, 2, 3, 80, 100, 120, 200, 220, 250, 260, 295] + tc_haz = TropCyclone.from_tracks( + tc_track, centroids=CENTR_TEST_BRB, max_memory_gb=0.001 + ) + intensity_idx = [0, 1, 2, 3, 80, 100, 120, 200, 220, 250, 260, 295] intensity_values = [ - 22.74903, 23.784691, 24.82255, 22.67403, 27.218706, 30.593959, - 18.980878, 24.540069, 27.826407, 26.846293, 0., 34.568898, + 22.74903, + 23.784691, + 24.82255, + 22.67403, + 27.218706, + 30.593959, + 18.980878, + 24.540069, + 27.826407, + 26.846293, + 0.0, + 34.568898, ] np.testing.assert_array_almost_equal( @@ -76,16 +88,36 @@ def test_memory_limit(self): def test_set_one_pass(self): """Test _tc_from_track function.""" - intensity_idx = [0, 1, 2, 3, 80, 100, 120, 200, 220, 250, 260, 295] + intensity_idx = [0, 1, 2, 3, 80, 100, 120, 200, 220, 250, 260, 295] intensity_values = { "geosphere": [ - 22.74927, 23.78498, 24.822908, 22.674202, 27.220042, 30.602122, - 18.981022, 24.540138, 27.830925, 26.8489, 0., 34.572391, + 22.74927, + 23.78498, + 24.822908, + 22.674202, + 27.220042, + 30.602122, + 18.981022, + 24.540138, + 27.830925, + 26.8489, + 0.0, + 34.572391, ], "equirect": [ - 22.74903, 23.784691, 24.82255, 22.67403, 27.218706, 30.593959, - 18.980878, 24.540069, 27.826407, 26.846293, 0., 34.568898, - ] + 22.74903, + 23.784691, + 24.82255, + 22.67403, + 27.218706, + 30.593959, + 18.980878, + 24.540069, + 27.826407, + 26.846293, + 0.0, + 34.568898, + ], } # the values for the two metrics should agree up to first digit at least for i, val in enumerate(intensity_values["geosphere"]): @@ -96,11 +128,16 @@ def test_set_one_pass(self): tc_track.data = tc_track.data[:1] for metric in ["equirect", "geosphere"]: - tc_haz = TropCyclone.from_tracks(tc_track, centroids=CENTR_TEST_BRB, model='H08', - store_windfields=True, metric=metric) + tc_haz = TropCyclone.from_tracks( + tc_track, + centroids=CENTR_TEST_BRB, + model="H08", + store_windfields=True, + metric=metric, + ) - self.assertEqual(tc_haz.haz_type, 'TC') - self.assertEqual(tc_haz.units, 'm/s') + self.assertEqual(tc_haz.haz_type, "TC") + self.assertEqual(tc_haz.units, "m/s") self.assertEqual(tc_haz.centroids.size, 296) self.assertEqual(tc_haz.event_id.size, 1) self.assertEqual(tc_haz.date.size, 1) @@ -108,7 +145,7 @@ def test_set_one_pass(self): self.assertEqual(dt.datetime.fromordinal(tc_haz.date[0]).month, 8) self.assertEqual(dt.datetime.fromordinal(tc_haz.date[0]).day, 27) self.assertEqual(tc_haz.event_id[0], 1) - self.assertEqual(tc_haz.event_name, ['1951239N12334']) + self.assertEqual(tc_haz.event_name, ["1951239N12334"]) self.assertTrue(np.array_equal(tc_haz.frequency, np.array([1]))) self.assertTrue(isinstance(tc_haz.fraction, sparse.csr_matrix)) self.assertEqual(tc_haz.fraction.shape, (1, 296)) @@ -119,7 +156,9 @@ def test_set_one_pass(self): self.assertEqual(np.nonzero(tc_haz.intensity)[0].size, 255) np.testing.assert_array_almost_equal( - tc_haz.intensity[0, intensity_idx].toarray()[0], intensity_values[metric]) + tc_haz.intensity[0, intensity_idx].toarray()[0], + intensity_values[metric], + ) for idx, val in zip(intensity_idx, intensity_values[metric]): if val == 0: self.assertEqual(tc_haz.intensity[0, idx], 0) @@ -128,7 +167,7 @@ def test_set_one_pass(self): windfields = windfields.reshape(windfields.shape[0], -1, 2) windfield_norms = np.linalg.norm(windfields, axis=-1).max(axis=0) intensity = tc_haz.intensity.toarray()[0, :] - msk = (intensity > 0) + msk = intensity > 0 np.testing.assert_array_equal(windfield_norms[msk], intensity[msk]) def test_cross_antimeridian(self): @@ -152,38 +191,136 @@ def test_cross_antimeridian(self): def test_windfield_models(self): """Test _tc_from_track function with different wind field models.""" - intensity_idx = [0, 1, 2, 3, 80, 100, 120, 200, 220, 250, 260, 295] + intensity_idx = [0, 1, 2, 3, 80, 100, 120, 200, 220, 250, 260, 295] intensity_values = [ - ("H08", None, [ - 22.74903, 23.784691, 24.82255, 22.67403, 27.218706, 30.593959, - 18.980878, 24.540069, 27.826407, 26.846293, 0., 34.568898, - ]), - ("H10", None, [ - 24.745521, 25.596484, 26.475329, 24.690914, 28.650107, 31.584395, - 21.723546, 26.140293, 28.94964, 28.051915, 18.49378, 35.312152, - ]), - # The following model configurations use recorded wind speeds, while the above use - # pressure values only. That's why some of the values are so different. - ("H10", dict(vmax_from_cen=False, rho_air_const=1.2), [ - 23.702232, 24.327615, 24.947161, 23.589233, 26.616085, 29.389295, - 21.338178, 24.257067, 26.472543, 25.662313, 18.535842, 31.886041, - ]), - ("H10", dict(vmax_from_cen=False, rho_air_const=None), [ - 24.244162, 24.835561, 25.432454, 24.139294, 27.127457, 29.719196, - 21.910658, 24.692637, 26.783575, 25.971516, 19.005555, 31.904048, - ]), - ("H10", dict(vmax_from_cen=False, rho_air_const=None, vmax_in_brackets=True), [ - 23.592924, 24.208169, 24.817104, 23.483053, 26.468975, 29.221715, - 21.260867, 24.150879, 26.34288 , 25.543635, 18.487385, 31.904048 - ]), - ("H1980", None, [ - 21.376807, 21.957217, 22.569568, 21.284351, 24.254226, 26.971303, - 19.220149, 21.984516, 24.196388, 23.449116, 0, 31.550207, - ]), - ("ER11", None, [ - 23.565332, 24.931413, 26.360758, 23.490333, 29.601171, 34.522795, - 18.996389, 26.102109, 30.780737, 29.498453, 0, 38.368805, - ]), + ( + "H08", + None, + [ + 22.74903, + 23.784691, + 24.82255, + 22.67403, + 27.218706, + 30.593959, + 18.980878, + 24.540069, + 27.826407, + 26.846293, + 0.0, + 34.568898, + ], + ), + ( + "H10", + None, + [ + 24.745521, + 25.596484, + 26.475329, + 24.690914, + 28.650107, + 31.584395, + 21.723546, + 26.140293, + 28.94964, + 28.051915, + 18.49378, + 35.312152, + ], + ), + # The following model configurations use recorded wind speeds, while the above use + # pressure values only. That's why some of the values are so different. + ( + "H10", + dict(vmax_from_cen=False, rho_air_const=1.2), + [ + 23.702232, + 24.327615, + 24.947161, + 23.589233, + 26.616085, + 29.389295, + 21.338178, + 24.257067, + 26.472543, + 25.662313, + 18.535842, + 31.886041, + ], + ), + ( + "H10", + dict(vmax_from_cen=False, rho_air_const=None), + [ + 24.244162, + 24.835561, + 25.432454, + 24.139294, + 27.127457, + 29.719196, + 21.910658, + 24.692637, + 26.783575, + 25.971516, + 19.005555, + 31.904048, + ], + ), + ( + "H10", + dict(vmax_from_cen=False, rho_air_const=None, vmax_in_brackets=True), + [ + 23.592924, + 24.208169, + 24.817104, + 23.483053, + 26.468975, + 29.221715, + 21.260867, + 24.150879, + 26.34288, + 25.543635, + 18.487385, + 31.904048, + ], + ), + ( + "H1980", + None, + [ + 21.376807, + 21.957217, + 22.569568, + 21.284351, + 24.254226, + 26.971303, + 19.220149, + 21.984516, + 24.196388, + 23.449116, + 0, + 31.550207, + ], + ), + ( + "ER11", + None, + [ + 23.565332, + 24.931413, + 26.360758, + 23.490333, + 29.601171, + 34.522795, + 18.996389, + 26.102109, + 30.780737, + 29.498453, + 0, + 38.368805, + ], + ), ] tc_track = TCTracks.from_processed_ibtracs_csv(TEST_TRACK) @@ -192,10 +329,14 @@ def test_windfield_models(self): for model, model_kwargs, inten_ref in intensity_values: tc_haz = TropCyclone.from_tracks( - tc_track, centroids=CENTR_TEST_BRB, model=model, model_kwargs=model_kwargs, + tc_track, + centroids=CENTR_TEST_BRB, + model=model, + model_kwargs=model_kwargs, ) np.testing.assert_array_almost_equal( - tc_haz.intensity[0, intensity_idx].toarray()[0], inten_ref, + tc_haz.intensity[0, intensity_idx].toarray()[0], + inten_ref, ) for idx, val in zip(intensity_idx, inten_ref): if val == 0: @@ -205,18 +346,38 @@ def test_windfield_models_different_windunits(self): """ Test _tc_from_track function should calculate the same results or raise ValueError with different windspeed units. - """ - intensity_idx = [0, 1, 2, 3, 80, 100, 120, 200, 220, 250, 260, 295] + """ + intensity_idx = [0, 1, 2, 3, 80, 100, 120, 200, 220, 250, 260, 295] intensity_values = { # Holland 1980 and Emanuel & Rotunno 2011 use recorded wind speeds, that is why checking them for different # windspeed units is so important: "H1980": [ - 21.376807, 21.957217, 22.569568, 21.284351, 24.254226, 26.971303, - 19.220149, 21.984516, 24.196388, 23.449116, 0, 31.550207, + 21.376807, + 21.957217, + 22.569568, + 21.284351, + 24.254226, + 26.971303, + 19.220149, + 21.984516, + 24.196388, + 23.449116, + 0, + 31.550207, ], "ER11": [ - 23.565332, 24.931413, 26.360758, 23.490333, 29.601171, 34.522795, - 18.996389, 26.102109, 30.780737, 29.498453, 0, 38.368805, + 23.565332, + 24.931413, + 26.360758, + 23.490333, + 29.601171, + 34.522795, + 18.996389, + 26.102109, + 30.780737, + 29.498453, + 0, + 38.368805, ], } @@ -225,27 +386,31 @@ def test_windfield_models_different_windunits(self): tc_track.data = tc_track.data[:1] tc_track_kmph = TCTracks(data=[ds.copy(deep=True) for ds in tc_track.data]) - tc_track_kmph.data[0]['max_sustained_wind'] *= ( + tc_track_kmph.data[0]["max_sustained_wind"] *= ( (1.0 * ureg.knot).to(ureg.km / ureg.hour).magnitude ) - tc_track_kmph.data[0].attrs['max_sustained_wind_unit'] = 'km/h' + tc_track_kmph.data[0].attrs["max_sustained_wind_unit"] = "km/h" tc_track_mps = TCTracks(data=[ds.copy(deep=True) for ds in tc_track.data]) - tc_track_mps.data[0]['max_sustained_wind'] *= ( + tc_track_mps.data[0]["max_sustained_wind"] *= ( (1.0 * ureg.knot).to(ureg.meter / ureg.second).magnitude ) - tc_track_mps.data[0].attrs['max_sustained_wind_unit'] = 'm/s' + tc_track_mps.data[0].attrs["max_sustained_wind_unit"] = "m/s" for model in ["H1980", "ER11"]: for tc_track_i in [tc_track_kmph, tc_track_mps]: - tc_haz = TropCyclone.from_tracks(tc_track_i, centroids=CENTR_TEST_BRB, model=model) + tc_haz = TropCyclone.from_tracks( + tc_track_i, centroids=CENTR_TEST_BRB, model=model + ) np.testing.assert_array_almost_equal( - tc_haz.intensity[0, intensity_idx].toarray()[0], intensity_values[model]) + tc_haz.intensity[0, intensity_idx].toarray()[0], + intensity_values[model], + ) for idx, val in zip(intensity_idx, intensity_values[model]): if val == 0: self.assertEqual(tc_haz.intensity[0, idx], 0) - tc_track.data[0].attrs['max_sustained_wind_unit'] = 'elbows/fortnight' + tc_track.data[0].attrs["max_sustained_wind_unit"] = "elbows/fortnight" with self.assertRaises(ValueError): TropCyclone.from_tracks(tc_track, centroids=CENTR_TEST_BRB, model=model) @@ -255,14 +420,14 @@ def test_set_one_file_pass(self): tc_haz = TropCyclone.from_tracks(tc_track, centroids=CENTR_TEST_BRB) tc_haz.check() - self.assertEqual(tc_haz.haz_type, 'TC') - self.assertEqual(tc_haz.units, 'm/s') + self.assertEqual(tc_haz.haz_type, "TC") + self.assertEqual(tc_haz.units, "m/s") self.assertEqual(tc_haz.centroids.size, 296) self.assertEqual(tc_haz.event_id.size, 1) self.assertEqual(tc_haz.event_id[0], 1) - self.assertEqual(tc_haz.event_name, ['1951239N12334']) + self.assertEqual(tc_haz.event_name, ["1951239N12334"]) self.assertEqual(tc_haz.category, tc_track.data[0].category) - self.assertEqual(tc_haz.basin[0], 'NA') + self.assertEqual(tc_haz.basin[0], "NA") self.assertIsInstance(tc_haz.basin, list) self.assertIsInstance(tc_haz.category, np.ndarray) self.assertTrue(np.array_equal(tc_haz.frequency, np.array([1]))) @@ -276,17 +441,19 @@ def test_set_one_file_pass(self): def test_two_files_pass(self): """Test from_tracks with two ibtracs.""" - tc_track = TCTracks.from_processed_ibtracs_csv([TEST_TRACK_SHORT, TEST_TRACK_SHORT]) + tc_track = TCTracks.from_processed_ibtracs_csv( + [TEST_TRACK_SHORT, TEST_TRACK_SHORT] + ) tc_haz = TropCyclone.from_tracks(tc_track, centroids=CENTR_TEST_BRB) tc_haz.remove_duplicates() tc_haz.check() - self.assertEqual(tc_haz.haz_type, 'TC') - self.assertEqual(tc_haz.units, 'm/s') + self.assertEqual(tc_haz.haz_type, "TC") + self.assertEqual(tc_haz.units, "m/s") self.assertEqual(tc_haz.centroids.size, 296) self.assertEqual(tc_haz.event_id.size, 1) self.assertEqual(tc_haz.event_id[0], 1) - self.assertEqual(tc_haz.event_name, ['1951239N12334']) + self.assertEqual(tc_haz.event_name, ["1951239N12334"]) self.assertTrue(np.array_equal(tc_haz.frequency, np.array([1]))) self.assertTrue(np.array_equal(tc_haz.orig, np.array([True]))) self.assertTrue(isinstance(tc_haz.intensity, sparse.csr_matrix)) @@ -310,11 +477,11 @@ def create_tc(self): self.tc = TropCyclone( intensity=sparse.csr_matrix(intensity), - basin=['NA', 'NA', 'NA', 'WP'], + basin=["NA", "NA", "NA", "WP"], category=np.array([2, 0, 4, 1]), event_id=np.arange(intensity.shape[0]), - frequency=np.repeat(1./intensity.shape[0], intensity.shape[0]), - date=np.array([723795, 728395, 738395, 724395]) + frequency=np.repeat(1.0 / intensity.shape[0], intensity.shape[0]), + date=np.array([723795, 728395, 738395, 724395]), ) def test_apply_climate_scenario_knu_calculations(self): @@ -324,51 +491,58 @@ def test_apply_climate_scenario_knu_calculations(self): self.create_tc() cat05_sel = np.repeat(True, self.tc.category.shape[0]) - cat03_sel = np.array([cat in [0,1,2,3] for cat in self.tc.category]) - cat45_sel = np.array([cat in [4,5] for cat in self.tc.category]) + cat03_sel = np.array([cat in [0, 1, 2, 3] for cat in self.tc.category]) + cat45_sel = np.array([cat in [4, 5] for cat in self.tc.category]) ## Retrieve scaling factors for cat 4 to 5 and 0 to 5 - percentile = '50' + percentile = "50" target_year = 2035 - rcp = '8.5' + rcp = "8.5" - future_tc = self.tc.apply_climate_scenario_knu(percentile=percentile, - scenario=rcp, - target_year=target_year) + future_tc = self.tc.apply_climate_scenario_knu( + percentile=percentile, scenario=rcp, target_year=target_year + ) for basin in np.unique(self.tc.basin): - basin_sel = np.array(self.tc.basin)==basin + basin_sel = np.array(self.tc.basin) == basin scaling_05, scaling_45 = [ - get_knutson_scaling_factor(percentile=percentile, - variable=variable, - basin=basin).loc[target_year, rcp] - for variable in ['cat05', 'cat45'] - ] + get_knutson_scaling_factor( + percentile=percentile, variable=variable, basin=basin + ).loc[target_year, rcp] + for variable in ["cat05", "cat45"] + ] ## Calulate scaling factors for cat 0 to 3 - freq_weighted_scaling_05 = scaling_05 * np.sum(self.tc.frequency[cat05_sel & basin_sel]) - freq_weighted_scaling_45 = scaling_45 * np.sum(self.tc.frequency[cat45_sel & basin_sel]) + freq_weighted_scaling_05 = scaling_05 * np.sum( + self.tc.frequency[cat05_sel & basin_sel] + ) + freq_weighted_scaling_45 = scaling_45 * np.sum( + self.tc.frequency[cat45_sel & basin_sel] + ) freq_sum_03 = np.sum(self.tc.frequency[cat03_sel & basin_sel]) - scaling_03 = (freq_weighted_scaling_05 - freq_weighted_scaling_45) / freq_sum_03 + scaling_03 = ( + freq_weighted_scaling_05 - freq_weighted_scaling_45 + ) / freq_sum_03 ## Check that frequencies obtained by function are the same as those obtained by scaling ## historic frequencies with retrieved scaling factors np.testing.assert_array_equal( - self.tc.frequency[cat03_sel & basin_sel] * (1 + scaling_03/100), - future_tc.frequency[cat03_sel & basin_sel] - ) + self.tc.frequency[cat03_sel & basin_sel] * (1 + scaling_03 / 100), + future_tc.frequency[cat03_sel & basin_sel], + ) np.testing.assert_array_equal( - self.tc.frequency[cat45_sel & basin_sel] * (1 + scaling_45/100), - future_tc.frequency[cat45_sel & basin_sel] - ) + self.tc.frequency[cat45_sel & basin_sel] * (1 + scaling_45 / 100), + future_tc.frequency[cat45_sel & basin_sel], + ) def test_apply_climate_scenario_knu_target_year_out_of_range(self): self.create_tc() with self.assertRaises(KeyError): self.tc.apply_climate_scenario_knu(target_year=2200) + class TestDumpReloadCycle(unittest.TestCase): def setUp(self): """Create a TropCyclone object and a temporary directory""" diff --git a/climada/hazard/test/test_trop_cyclone_windfields.py b/climada/hazard/test/test_trop_cyclone_windfields.py index f91ac075a..418e52867 100644 --- a/climada/hazard/test/test_trop_cyclone_windfields.py +++ b/climada/hazard/test/test_trop_cyclone_windfields.py @@ -5,9 +5,21 @@ from climada.hazard import TCTracks from climada.hazard.test.test_trop_cyclone import TEST_TRACK, TEST_TRACK_SHORT -from climada.hazard.trop_cyclone.trop_cyclone_windfields import get_close_centroids, MBAR_TO_PA, _B_holland_1980, H_TO_S, \ - _bs_holland_2008, _v_max_s_holland_2008, KM_TO_M, _x_holland_2010, _stat_holland_2010, _stat_holland_1980, \ - _stat_er_2011, tctrack_to_si, _vtrans +from climada.hazard.trop_cyclone.trop_cyclone_windfields import ( + H_TO_S, + KM_TO_M, + MBAR_TO_PA, + _B_holland_1980, + _bs_holland_2008, + _stat_er_2011, + _stat_holland_1980, + _stat_holland_2010, + _v_max_s_holland_2008, + _vtrans, + _x_holland_2010, + get_close_centroids, + tctrack_to_si, +) from climada.util import ureg @@ -16,89 +28,129 @@ class TestWindfieldHelpers(unittest.TestCase): def test_get_close_centroids_pass(self): """Test get_close_centroids function.""" - si_track = xr.Dataset({ - "lat": ("time", np.array([0, -0.5, 0])), - "lon": ("time", np.array([0.9, 2, 3.2])), - }, attrs={"mid_lon": 0.0}) - centroids = np.array([ - [0, -0.2], [0, 0.9], [-1.1, 1.2], [1, 2.1], [0, 4.3], [0.6, 3.8], [0.9, 4.1], - ]) - centroids_close, mask_close, mask_close_alongtrack = ( - get_close_centroids(si_track, centroids, 112.0) + si_track = xr.Dataset( + { + "lat": ("time", np.array([0, -0.5, 0])), + "lon": ("time", np.array([0.9, 2, 3.2])), + }, + attrs={"mid_lon": 0.0}, + ) + centroids = np.array( + [ + [0, -0.2], + [0, 0.9], + [-1.1, 1.2], + [1, 2.1], + [0, 4.3], + [0.6, 3.8], + [0.9, 4.1], + ] + ) + centroids_close, mask_close, mask_close_alongtrack = get_close_centroids( + si_track, centroids, 112.0 ) self.assertEqual(centroids_close.shape[0], mask_close.sum()) self.assertEqual(mask_close_alongtrack.shape[0], si_track.sizes["time"]) self.assertEqual(mask_close_alongtrack.shape[1], centroids_close.shape[0]) np.testing.assert_equal(mask_close_alongtrack.any(axis=0), True) - np.testing.assert_equal(mask_close, np.array( - [False, True, True, False, False, True, False] - )) - np.testing.assert_equal(mask_close_alongtrack, np.array([ - [True, False, False], - [False, True, False], - [False, False, True], - ])) + np.testing.assert_equal( + mask_close, np.array([False, True, True, False, False, True, False]) + ) + np.testing.assert_equal( + mask_close_alongtrack, + np.array( + [ + [True, False, False], + [False, True, False], + [False, False, True], + ] + ), + ) np.testing.assert_equal(centroids_close, centroids[mask_close]) # example where antimeridian is crossed - si_track = xr.Dataset({ - "lat": ("time", np.linspace(-10, 10, 11)), - "lon": ("time", np.linspace(170, 200, 11)), - }, attrs={"mid_lon": 180.0}) + si_track = xr.Dataset( + { + "lat": ("time", np.linspace(-10, 10, 11)), + "lon": ("time", np.linspace(170, 200, 11)), + }, + attrs={"mid_lon": 180.0}, + ) centroids = np.array([[-11, 169], [-7, 176], [4, -170], [10, 170], [-10, -160]]) - centroids_close, mask_close, mask_close_alongtrack = ( - get_close_centroids(si_track, centroids, 600.0) + centroids_close, mask_close, mask_close_alongtrack = get_close_centroids( + si_track, centroids, 600.0 ) self.assertEqual(centroids_close.shape[0], mask_close.sum()) self.assertEqual(mask_close_alongtrack.shape[0], si_track.sizes["time"]) self.assertEqual(mask_close_alongtrack.shape[1], centroids_close.shape[0]) np.testing.assert_equal(mask_close_alongtrack.any(axis=0), True) np.testing.assert_equal(mask_close, np.array([True, True, True, False, False])) - np.testing.assert_equal(centroids_close, np.array([ - # the longitudinal coordinate of the third centroid is normalized - [-11, 169], [-7, 176], [4, 190], - ])) + np.testing.assert_equal( + centroids_close, + np.array( + [ + # the longitudinal coordinate of the third centroid is normalized + [-11, 169], + [-7, 176], + [4, 190], + ] + ), + ) def test_B_holland_1980_pass(self): """Test _B_holland_1980 function.""" - si_track = xr.Dataset({ - "pdelta": ("time", MBAR_TO_PA * np.array([15, 30])), - "vgrad": ("time", [35, 40]), - "rho_air": ("time", [1.15, 1.15]) - }) + si_track = xr.Dataset( + { + "pdelta": ("time", MBAR_TO_PA * np.array([15, 30])), + "vgrad": ("time", [35, 40]), + "rho_air": ("time", [1.15, 1.15]), + } + ) _B_holland_1980(si_track) np.testing.assert_array_almost_equal(si_track["hol_b"], [2.5, 1.667213]) - si_track = xr.Dataset({ - "pdelta": ("time", MBAR_TO_PA * np.array([4.74, 15, 30, 40])), - "vmax": ("time", [np.nan, 22.5, 25.4, 42.5]), - "rho_air": ("time", [1.2, 1.2, 1.2, 1.2]) - }) + si_track = xr.Dataset( + { + "pdelta": ("time", MBAR_TO_PA * np.array([4.74, 15, 30, 40])), + "vmax": ("time", [np.nan, 22.5, 25.4, 42.5]), + "rho_air": ("time", [1.2, 1.2, 1.2, 1.2]), + } + ) _B_holland_1980(si_track, gradient_to_surface_winds=0.9) - np.testing.assert_allclose(si_track["hol_b"], [np.nan, 1.101, 0.810, 1.473], atol=1e-3) + np.testing.assert_allclose( + si_track["hol_b"], [np.nan, 1.101, 0.810, 1.473], atol=1e-3 + ) def test_bs_holland_2008_pass(self): """Test _bs_holland_2008 function. Compare to MATLAB reference.""" - si_track = xr.Dataset({ - "tstep": ("time", H_TO_S * np.array([1.0, 1.0, 1.0])), - "lat": ("time", [12.299999504631234, 12.299999504631343, 12.299999279463769]), - "pdelta": ("time", MBAR_TO_PA * np.array([4.74, 4.73, 4.73])), - "cen": ("time", MBAR_TO_PA * np.array([1005.2585, 1005.2633, 1005.2682])), - "vtrans_norm": ("time", [np.nan, 5.241999541820597, 5.123882725120426]), - }) - _bs_holland_2008(si_track) - np.testing.assert_allclose( - si_track["hol_b"], [np.nan, 1.27, 1.27], atol=1e-2 + si_track = xr.Dataset( + { + "tstep": ("time", H_TO_S * np.array([1.0, 1.0, 1.0])), + "lat": ( + "time", + [12.299999504631234, 12.299999504631343, 12.299999279463769], + ), + "pdelta": ("time", MBAR_TO_PA * np.array([4.74, 4.73, 4.73])), + "cen": ( + "time", + MBAR_TO_PA * np.array([1005.2585, 1005.2633, 1005.2682]), + ), + "vtrans_norm": ("time", [np.nan, 5.241999541820597, 5.123882725120426]), + } ) + _bs_holland_2008(si_track) + np.testing.assert_allclose(si_track["hol_b"], [np.nan, 1.27, 1.27], atol=1e-2) def test_v_max_s_holland_2008_pass(self): """Test _v_max_s_holland_2008 function.""" # Numbers analogous to test_B_holland_1980_pass - si_track = xr.Dataset({ - "pdelta": ("time", MBAR_TO_PA * np.array([15, 30])), - "hol_b": ("time", [2.5, 1.67]), - "rho_air": ("time", [1.15, 1.15]), - }) + si_track = xr.Dataset( + { + "pdelta": ("time", MBAR_TO_PA * np.array([15, 30])), + "hol_b": ("time", [2.5, 1.67]), + "rho_air": ("time", [1.15, 1.15]), + } + ) _v_max_s_holland_2008(si_track) np.testing.assert_array_almost_equal(si_track["vmax"], [34.635341, 40.033421]) @@ -113,77 +165,110 @@ def test_holland_2010_pass(self): # peripheral wind speeds. # # The "hol_b" parameter tunes the meaning of a "comparably" large or small RMW. - si_track = xr.Dataset({ - # four test cases: - # - low vmax, moderate RMW: x decreases moderately - # - large hol_b: x decreases sharply - # - very low vmax: x decreases so much, it needs to be clipped at 0 - # - large vmax, large RMW: x increases - "rad": ("time", KM_TO_M * np.array([75, 75, 75, 90])), - "vmax": ("time", [35.0, 35.0, 16.0, 90.0]), - "hol_b": ("time", [1.75, 2.5, 1.9, 1.6]), - }) - d_centr = KM_TO_M * np.array([ - # first column is for locations within the storm eye - # second column is for locations at or close to the radius of max wind - # third column is for locations outside the storm eye - # fourth column is for locations exactly at the peripheral radius - # fifth column is for locations outside the peripheral radius - [0., 75, 220, 300, 490], - [30, 74, 170, 300, 501], - [21, 76, 230, 300, 431], - [32, 91, 270, 300, 452], - ], dtype=float) - close_centr = np.array([ - # note that we set one of these to "False" for testing - [True, True, True, True, True], - [True, True, True, True, False], - [True, True, True, True, True], - [True, True, True, True, True], - ], dtype=bool) + si_track = xr.Dataset( + { + # four test cases: + # - low vmax, moderate RMW: x decreases moderately + # - large hol_b: x decreases sharply + # - very low vmax: x decreases so much, it needs to be clipped at 0 + # - large vmax, large RMW: x increases + "rad": ("time", KM_TO_M * np.array([75, 75, 75, 90])), + "vmax": ("time", [35.0, 35.0, 16.0, 90.0]), + "hol_b": ("time", [1.75, 2.5, 1.9, 1.6]), + } + ) + d_centr = KM_TO_M * np.array( + [ + # first column is for locations within the storm eye + # second column is for locations at or close to the radius of max wind + # third column is for locations outside the storm eye + # fourth column is for locations exactly at the peripheral radius + # fifth column is for locations outside the peripheral radius + [0.0, 75, 220, 300, 490], + [30, 74, 170, 300, 501], + [21, 76, 230, 300, 431], + [32, 91, 270, 300, 452], + ], + dtype=float, + ) + close_centr = np.array( + [ + # note that we set one of these to "False" for testing + [True, True, True, True, True], + [True, True, True, True, False], + [True, True, True, True, True], + [True, True, True, True, True], + ], + dtype=bool, + ) hol_x = _x_holland_2010(si_track, d_centr, close_centr) - np.testing.assert_array_almost_equal(hol_x, [ - [0.5, 0.500000, 0.485077, 0.476844, 0.457291], - [0.5, 0.500000, 0.410997, 0.400000, 0.000000], - [0.5, 0.497620, 0.400000, 0.400000, 0.400000], - [0.5, 0.505022, 1.403952, 1.554611, 2.317948], - ]) + np.testing.assert_array_almost_equal( + hol_x, + [ + [0.5, 0.500000, 0.485077, 0.476844, 0.457291], + [0.5, 0.500000, 0.410997, 0.400000, 0.000000], + [0.5, 0.497620, 0.400000, 0.400000, 0.400000], + [0.5, 0.505022, 1.403952, 1.554611, 2.317948], + ], + ) v_ang_norm = _stat_holland_2010(si_track, d_centr, close_centr, hol_x) - np.testing.assert_allclose(v_ang_norm, [ - # first column: converge to 0 when approaching storm eye - # second column: vmax at RMW - # fourth column: peripheral speed (17 m/s) at peripheral radius (unless x is clipped!) - [ 0.000000, 35.000000, 21.181497, 17.000000, 12.1034610], - [ 1.296480, 34.990037, 21.593755, 12.891313, 0.0000000], - [ 0.321952, 15.997500, 9.712006, 8.087240, 6.2289690], - [24.823469, 89.992938, 24.381965, 17.000000, 1.9292020], - ], atol=1e-6) + np.testing.assert_allclose( + v_ang_norm, + [ + # first column: converge to 0 when approaching storm eye + # second column: vmax at RMW + # fourth column: peripheral speed (17 m/s) at peripheral radius (unless x is clipped!) + [0.000000, 35.000000, 21.181497, 17.000000, 12.1034610], + [1.296480, 34.990037, 21.593755, 12.891313, 0.0000000], + [0.321952, 15.997500, 9.712006, 8.087240, 6.2289690], + [24.823469, 89.992938, 24.381965, 17.000000, 1.9292020], + ], + atol=1e-6, + ) def test_stat_holland_1980(self): """Test _stat_holland_1980 function. Compare to MATLAB reference.""" - d_centr = KM_TO_M * np.array([ - [299.4501244109841, 291.0737897183741, 292.5441003235722, 40.665454622610511], - [293.6067129546862, 1000.0, 298.2652319413182, 70.0], - ]) - si_track = xr.Dataset({ - "rad": ("time", KM_TO_M * np.array([40.665454622610511, 75.547902916671745])), - "hol_b": ("time", [1.486076257880692, 1.265551666104679]), - "pdelta": ("time", MBAR_TO_PA * np.array([39.12, 4.73])), - "lat": ("time", [-14.089110370469488, 12.299999279463769]), - "cp": ("time", [3.54921922e-05, 3.10598285e-05]), - "rho_air": ("time", [1.15, 1.15]), - }) - mask = np.array([[True, True, True, True], [True, False, True, True]], dtype=bool) + d_centr = KM_TO_M * np.array( + [ + [ + 299.4501244109841, + 291.0737897183741, + 292.5441003235722, + 40.665454622610511, + ], + [293.6067129546862, 1000.0, 298.2652319413182, 70.0], + ] + ) + si_track = xr.Dataset( + { + "rad": ( + "time", + KM_TO_M * np.array([40.665454622610511, 75.547902916671745]), + ), + "hol_b": ("time", [1.486076257880692, 1.265551666104679]), + "pdelta": ("time", MBAR_TO_PA * np.array([39.12, 4.73])), + "lat": ("time", [-14.089110370469488, 12.299999279463769]), + "cp": ("time", [3.54921922e-05, 3.10598285e-05]), + "rho_air": ("time", [1.15, 1.15]), + } + ) + mask = np.array( + [[True, True, True, True], [True, False, True, True]], dtype=bool + ) v_ang_norm = _stat_holland_1980(si_track, d_centr, mask) np.testing.assert_allclose( - v_ang_norm, [[11.28, 11.68, 11.61, 42.41], [5.38, 0, 5.28, 12.76]], atol=1e-2, + v_ang_norm, + [[11.28, 11.68, 11.61, 42.41], [5.38, 0, 5.28, 12.76]], + atol=1e-2, ) # without Coriolis force, values are higher, esp. far away from the center: v_ang_norm = _stat_holland_1980(si_track, d_centr, mask, cyclostrophic=True) np.testing.assert_allclose( - v_ang_norm, [[15.72, 16.04, 15.98, 43.13], [8.84, 0, 8.76, 13.81]], atol=1e-2, + v_ang_norm, + [[15.72, 16.04, 15.98, 43.13], [8.84, 0, 8.76, 13.81]], + atol=1e-2, ) d_centr = np.array([[], []]) @@ -194,18 +279,28 @@ def test_stat_holland_1980(self): def test_er_2011_pass(self): """Test Emanuel and Rotunno 2011 wind field model.""" # test at centroids within and outside of radius of max wind - d_centr = KM_TO_M * np.array([[35, 70, 75, 220], [30, 150, 1000, 300]], dtype=float) - si_track = xr.Dataset({ - "rad": ("time", KM_TO_M * np.array([75.0, 40.0])), - "vmax": ("time", [35.0, 40.0]), - "lat": ("time", [20.0, 27.0]), - "cp": ("time", [4.98665369e-05, 6.61918149e-05]), - }) - mask = np.array([[True, True, True, True], [True, False, True, True]], dtype=bool) + d_centr = KM_TO_M * np.array( + [[35, 70, 75, 220], [30, 150, 1000, 300]], dtype=float + ) + si_track = xr.Dataset( + { + "rad": ("time", KM_TO_M * np.array([75.0, 40.0])), + "vmax": ("time", [35.0, 40.0]), + "lat": ("time", [20.0, 27.0]), + "cp": ("time", [4.98665369e-05, 6.61918149e-05]), + } + ) + mask = np.array( + [[True, True, True, True], [True, False, True, True]], dtype=bool + ) v_ang_norm = _stat_er_2011(si_track, d_centr, mask) - np.testing.assert_array_almost_equal(v_ang_norm, - [[28.258025, 36.782418, 36.869995, 22.521237], - [39.670883, 0, 3.300626, 10.827206]]) + np.testing.assert_array_almost_equal( + v_ang_norm, + [ + [28.258025, 36.782418, 36.869995, 22.521237], + [39.670883, 0, 3.300626, 10.827206], + ], + ) def test_vtrans_pass(self): """Test _vtrans function. Compare to MATLAB reference.""" @@ -223,21 +318,23 @@ def test_vtrans_pass(self): self.assertAlmostEqual(si_track["vtrans_norm"].values[1] * to_kn, 10.191466246) def testtctrack_to_si(self): - """ Test tctrack_to_si should create the same vmax output independent of the input unit """ + """Test tctrack_to_si should create the same vmax output independent of the input unit""" tc_track = TCTracks.from_processed_ibtracs_csv(TEST_TRACK_SHORT).data[0] tc_track_kmph = tc_track.copy(deep=True) - tc_track_kmph['max_sustained_wind'] *= ( + tc_track_kmph["max_sustained_wind"] *= ( (1.0 * ureg.knot).to(ureg.km / ureg.hour).magnitude ) - tc_track_kmph.attrs['max_sustained_wind_unit'] = 'km/h' + tc_track_kmph.attrs["max_sustained_wind_unit"] = "km/h" si_track = tctrack_to_si(tc_track) si_track_from_kmph = tctrack_to_si(tc_track_kmph) - np.testing.assert_array_almost_equal(si_track["vmax"], si_track_from_kmph["vmax"]) + np.testing.assert_array_almost_equal( + si_track["vmax"], si_track_from_kmph["vmax"] + ) - tc_track.attrs['max_sustained_wind_unit'] = 'elbows/fortnight' + tc_track.attrs["max_sustained_wind_unit"] = "elbows/fortnight" with self.assertRaises(ValueError): tctrack_to_si(tc_track) diff --git a/climada/hazard/trop_cyclone/__init__.py b/climada/hazard/trop_cyclone/__init__.py index 452bf4364..d7f4d6c6e 100644 --- a/climada/hazard/trop_cyclone/__init__.py +++ b/climada/hazard/trop_cyclone/__init__.py @@ -1,5 +1,21 @@ from climada.hazard.trop_cyclone.trop_cyclone import * -from climada.hazard.trop_cyclone.trop_cyclone_windfields import compute_windfields_sparse, compute_angular_windspeeds, tctrack_to_si, \ - get_close_centroids, KN_TO_MS, KM_TO_M, KM_TO_M, H_TO_S, NM_TO_KM, KMH_TO_MS, MBAR_TO_PA, \ - DEF_MAX_DIST_EYE_KM, DEF_INTENSITY_THRES, DEF_MAX_MEMORY_GB, MODEL_VANG, DEF_RHO_AIR, DEF_GRADIENT_TO_SURFACE_WINDS, \ - T_ICE_K, V_ANG_EARTH +from climada.hazard.trop_cyclone.trop_cyclone_windfields import ( + DEF_GRADIENT_TO_SURFACE_WINDS, + DEF_INTENSITY_THRES, + DEF_MAX_DIST_EYE_KM, + DEF_MAX_MEMORY_GB, + DEF_RHO_AIR, + H_TO_S, + KM_TO_M, + KMH_TO_MS, + KN_TO_MS, + MBAR_TO_PA, + MODEL_VANG, + NM_TO_KM, + T_ICE_K, + V_ANG_EARTH, + compute_angular_windspeeds, + compute_windfields_sparse, + get_close_centroids, + tctrack_to_si, +) diff --git a/climada/hazard/trop_cyclone/trop_cyclone.py b/climada/hazard/trop_cyclone/trop_cyclone.py index 6dacb1b7d..ae01332ca 100644 --- a/climada/hazard/trop_cyclone/trop_cyclone.py +++ b/climada/hazard/trop_cyclone/trop_cyclone.py @@ -19,36 +19,40 @@ Define TC wind hazard (TropCyclone class). """ -__all__ = ['TropCyclone'] +__all__ = ["TropCyclone"] import copy import datetime as dt import itertools import logging import time -from typing import Optional, Tuple, List +from typing import List, Optional, Tuple -import numpy as np -from scipy import sparse import matplotlib.animation as animation -from tqdm import tqdm +import numpy as np import pathos.pools import xarray as xr +from scipy import sparse +from tqdm import tqdm -from climada.hazard.base import Hazard -from climada.hazard.tc_tracks import TCTracks -from climada.hazard.tc_clim_change import get_knutson_scaling_factor -from climada.hazard.centroids.centr import Centroids import climada.util.constants as u_const import climada.util.coordinates as u_coord import climada.util.plot as u_plot +from climada.hazard.base import Hazard +from climada.hazard.centroids.centr import Centroids +from climada.hazard.tc_clim_change import get_knutson_scaling_factor +from climada.hazard.tc_tracks import TCTracks -from .trop_cyclone_windfields import DEF_MAX_DIST_EYE_KM, DEF_INTENSITY_THRES, \ - DEF_MAX_MEMORY_GB, compute_windfields_sparse +from .trop_cyclone_windfields import ( + DEF_INTENSITY_THRES, + DEF_MAX_DIST_EYE_KM, + DEF_MAX_MEMORY_GB, + compute_windfields_sparse, +) LOGGER = logging.getLogger(__name__) -HAZ_TYPE = 'TC' +HAZ_TYPE = "TC" """Hazard type acronym for Tropical Cyclone""" @@ -83,10 +87,11 @@ class TropCyclone(Hazard): matrix of shape (npositions, ncentroids * 2) that can be reshaped to a full ndarray of shape (npositions, ncentroids, 2). """ + intensity_thres = DEF_INTENSITY_THRES """intensity threshold for storage in m/s""" - vars_opt = Hazard.vars_opt.union({'category'}) + vars_opt = Hazard.vars_opt.union({"category"}) """Name of the variables that are not needed to compute the impact.""" def __init__( @@ -125,7 +130,7 @@ def __init__( **kwargs : Hazard properties, optional All other keyword arguments are passed to the Hazard constructor. """ - kwargs.setdefault('haz_type', HAZ_TYPE) + kwargs.setdefault("haz_type", HAZ_TYPE) Hazard.__init__(self, **kwargs) self.category = category if category is not None else np.array([], int) self.basin = basin if basin is not None else [] @@ -133,13 +138,15 @@ def __init__( def set_from_tracks(self, *args, **kwargs): """This function is deprecated, use TropCyclone.from_tracks instead.""" - LOGGER.warning("The use of TropCyclone.set_from_tracks is deprecated." - "Use TropCyclone.from_tracks instead.") + LOGGER.warning( + "The use of TropCyclone.set_from_tracks is deprecated." + "Use TropCyclone.from_tracks instead." + ) if "intensity_thres" not in kwargs: # some users modify the threshold attribute before calling `set_from_tracks` kwargs["intensity_thres"] = self.intensity_thres - if self.pool is not None and 'pool' not in kwargs: - kwargs['pool'] = self.pool + if self.pool is not None and "pool" not in kwargs: + kwargs["pool"] = self.pool self.__dict__ = TropCyclone.from_tracks(*args, **kwargs).__dict__ @classmethod @@ -148,7 +155,7 @@ def from_tracks( tracks: TCTracks, centroids: Centroids, pool: Optional[pathos.pools.ProcessPool] = None, - model: str = 'H08', + model: str = "H08", model_kwargs: Optional[dict] = None, ignore_distance_to_coast: bool = False, store_windfields: bool = False, @@ -287,10 +294,10 @@ def from_tracks( [idx_centr_filter] = (np.abs(centroids.lat) <= max_latitude).nonzero() else: # Select centroids which are inside max_dist_inland_km and lat <= max_latitude - if 'dist_coast' not in centroids.gdf.columns: + if "dist_coast" not in centroids.gdf.columns: dist_coast = centroids.get_dist_coast() else: - dist_coast = centroids.gdf['dist_coast'].values + dist_coast = centroids.gdf["dist_coast"].values [idx_centr_filter] = ( (dist_coast <= max_dist_inland_km * 1000) & (np.abs(centroids.lat) <= max_latitude) @@ -303,7 +310,9 @@ def from_tracks( ) # Restrict to coastal centroids within reach of any of the tracks - t_lon_min, t_lat_min, t_lon_max, t_lat_max = tracks.get_bounds(deg_buffer=max_dist_eye_deg) + t_lon_min, t_lat_min, t_lon_max, t_lat_max = tracks.get_bounds( + deg_buffer=max_dist_eye_deg + ) t_mid_lon = 0.5 * (t_lon_min + t_lon_max) filtered_centroids = centroids.coord[idx_centr_filter] u_coord.lon_normalize(filtered_centroids[:, 1], center=t_mid_lon) @@ -328,7 +337,9 @@ def from_tracks( ) LOGGER.info( - 'Mapping %d tracks to %d coastal centroids.', num_tracks, idx_centr_filter.size, + "Mapping %d tracks to %d coastal centroids.", + num_tracks, + idx_centr_filter.size, ) if pool: chunksize = max(min(num_tracks // pool.ncpus, 1000), 1) @@ -356,20 +367,20 @@ def from_tracks( if last_perc < 100: LOGGER.info("Progress: 100%") - LOGGER.debug('Concatenate events.') + LOGGER.debug("Concatenate events.") haz = cls.concat(tc_haz_list) haz.pool = pool haz.intensity_thres = intensity_thres - LOGGER.debug('Compute frequency.') + LOGGER.debug("Compute frequency.") haz.frequency_from_tracks(tracks.data) return haz def apply_climate_scenario_knu( self, - percentile: str='50', - scenario: str='4.5', - target_year: int=2050, - **kwargs + percentile: str = "50", + scenario: str = "4.5", + target_year: int = 2050, + **kwargs, ): """ From current TC hazard instance, return new hazard set with future events @@ -421,9 +432,11 @@ def apply_climate_scenario_knu( """ if self.category.size == 0: - LOGGER.warning("Tropical cyclone categories are missing and" - "no effect of climate change can be modelled." - "The original event set is returned") + LOGGER.warning( + "Tropical cyclone categories are missing and" + "no effect of climate change can be modelled." + "The original event set is returned" + ) return self tc_cc = copy.deepcopy(self) @@ -436,40 +449,48 @@ def apply_climate_scenario_knu( for basin in np.unique(tc_cc.basin): scale_year_rcp_05, scale_year_rcp_45 = [ - get_knutson_scaling_factor( - percentile=percentile, - variable=variable, - basin=basin, - baseline=(np.min(years), np.max(years)), - **kwargs - ).loc[target_year, scenario] - for variable in ['cat05', 'cat45'] - ] + get_knutson_scaling_factor( + percentile=percentile, + variable=variable, + basin=basin, + baseline=(np.min(years), np.max(years)), + **kwargs, + ).loc[target_year, scenario] + for variable in ["cat05", "cat45"] + ] bas_sel = np.array(tc_cc.basin) == basin - cat_05_freqs_change = scale_year_rcp_05 * np.sum(tc_cc.frequency[sel_cat05 & bas_sel]) - cat_45_freqs_change = scale_year_rcp_45 * np.sum(tc_cc.frequency[sel_cat45 & bas_sel]) + cat_05_freqs_change = scale_year_rcp_05 * np.sum( + tc_cc.frequency[sel_cat05 & bas_sel] + ) + cat_45_freqs_change = scale_year_rcp_45 * np.sum( + tc_cc.frequency[sel_cat45 & bas_sel] + ) cat_03_freqs = np.sum(tc_cc.frequency[sel_cat03 & bas_sel]) - scale_year_rcp_03 = (cat_05_freqs_change-cat_45_freqs_change) / cat_03_freqs + scale_year_rcp_03 = ( + cat_05_freqs_change - cat_45_freqs_change + ) / cat_03_freqs - tc_cc.frequency[sel_cat03 & bas_sel] *= 1 + scale_year_rcp_03/100 - tc_cc.frequency[sel_cat45 & bas_sel] *= 1 + scale_year_rcp_45/100 + tc_cc.frequency[sel_cat03 & bas_sel] *= 1 + scale_year_rcp_03 / 100 + tc_cc.frequency[sel_cat45 & bas_sel] *= 1 + scale_year_rcp_45 / 100 if any(tc_cc.frequency) < 0: raise ValueError( " The application of the climate scenario leads to " " negative frequencies. One solution - if appropriate -" " could be to use a less extreme percentile." - ) + ) return tc_cc def set_climate_scenario_knu(self, *args, **kwargs): """This function is deprecated, use TropCyclone.apply_climate_scenario_knu instead.""" - LOGGER.warning("The use of TropCyclone.set_climate_scenario_knu is deprecated." - "Use TropCyclone.apply_climate_scenario_knu instead.") + LOGGER.warning( + "The use of TropCyclone.set_climate_scenario_knu is deprecated." + "Use TropCyclone.apply_climate_scenario_knu instead." + ) return self.apply_climate_scenario_knu(*args, **kwargs) @classmethod @@ -482,7 +503,7 @@ def video_intensity( writer: animation = animation.PillowWriter(bitrate=500), figsize: Tuple[float, float] = (9, 13), adapt_fontsize: bool = True, - **kwargs + **kwargs, ): """ Generate video of TC wind fields node by node and returns its @@ -520,55 +541,64 @@ def video_intensity( # initialization track = tracks.get_track(track_name) if not track: - raise ValueError(f'{track_name} not found in track data.') + raise ValueError(f"{track_name} not found in track data.") idx_plt = np.argwhere( - (track['lon'].values < centroids.total_bounds[2] + 1) - & (centroids.total_bounds[0] - 1 < track['lon'].values) - & (track['lat'].values < centroids.total_bounds[3] + 1) - & (centroids.total_bounds[1] - 1 < track['lat'].values) + (track["lon"].values < centroids.total_bounds[2] + 1) + & (centroids.total_bounds[0] - 1 < track["lon"].values) + & (track["lat"].values < centroids.total_bounds[3] + 1) + & (centroids.total_bounds[1] - 1 < track["lat"].values) ).reshape(-1) tc_list = [] - tr_coord = {'lat': [], 'lon': []} + tr_coord = {"lat": [], "lon": []} for node in range(idx_plt.size - 2): tr_piece = track.sel( - time=slice(track['time'].values[idx_plt[node]], - track['time'].values[idx_plt[node + 2]])) - tr_piece.attrs['n_nodes'] = 2 # plot only one node + time=slice( + track["time"].values[idx_plt[node]], + track["time"].values[idx_plt[node + 2]], + ) + ) + tr_piece.attrs["n_nodes"] = 2 # plot only one node tr_sel = TCTracks() tr_sel.append(tr_piece) - tr_coord['lat'].append(tr_sel.data[0]['lat'].values[:-1]) - tr_coord['lon'].append(tr_sel.data[0]['lon'].values[:-1]) + tr_coord["lat"].append(tr_sel.data[0]["lat"].values[:-1]) + tr_coord["lon"].append(tr_sel.data[0]["lon"].values[:-1]) tc_tmp = cls.from_tracks(tr_sel, centroids=centroids) tc_tmp.event_name = [ - track['name'] + ' ' + time.strftime( + track["name"] + + " " + + time.strftime( "%d %h %Y %H:%M", - time.gmtime(tr_sel.data[0]['time'][1].values.astype(int) - / 1000000000) + time.gmtime( + tr_sel.data[0]["time"][1].values.astype(int) / 1000000000 + ), ) ] tc_list.append(tc_tmp) - if 'cmap' not in kwargs: - kwargs['cmap'] = 'Greys' - if 'vmin' not in kwargs: - kwargs['vmin'] = np.array([tc_.intensity.min() for tc_ in tc_list]).min() - if 'vmax' not in kwargs: - kwargs['vmax'] = np.array([tc_.intensity.max() for tc_ in tc_list]).max() + if "cmap" not in kwargs: + kwargs["cmap"] = "Greys" + if "vmin" not in kwargs: + kwargs["vmin"] = np.array([tc_.intensity.min() for tc_ in tc_list]).min() + if "vmax" not in kwargs: + kwargs["vmax"] = np.array([tc_.intensity.max() for tc_ in tc_list]).max() def run(node): tc_list[node].plot_intensity(1, axis=axis, **kwargs) - axis.plot(tr_coord['lon'][node], tr_coord['lat'][node], 'k') + axis.plot(tr_coord["lon"][node], tr_coord["lat"][node], "k") axis.set_title(tc_list[node].event_name[0]) pbar.update() if file_name: - LOGGER.info('Generating video %s', file_name) - fig, axis, _fontsize = u_plot.make_map(figsize=figsize, adapt_fontsize=adapt_fontsize) + LOGGER.info("Generating video %s", file_name) + fig, axis, _fontsize = u_plot.make_map( + figsize=figsize, adapt_fontsize=adapt_fontsize + ) pbar = tqdm(total=idx_plt.size - 2) - ani = animation.FuncAnimation(fig, run, frames=idx_plt.size - 2, - interval=500, blit=False) + ani = animation.FuncAnimation( + fig, run, frames=idx_plt.size - 2, interval=500, blit=False + ) fig.tight_layout() ani.save(file_name, writer=writer) pbar.close() @@ -584,8 +614,8 @@ def frequency_from_tracks(self, tracks: List): """ if not tracks: return - year_max = np.amax([t['time'].dt.year.values.max() for t in tracks]) - year_min = np.amin([t['time'].dt.year.values.min() for t in tracks]) + year_max = np.amax([t["time"].dt.year.values.max() for t in tracks]) + year_min = np.amin([t["time"].dt.year.values.min() for t in tracks]) year_delta = year_max - year_min + 1 num_orig = np.count_nonzero(self.orig) ens_size = (self.event_id.size / num_orig) if num_orig > 0 else 1 @@ -597,7 +627,7 @@ def from_single_track( track: xr.Dataset, centroids: Centroids, idx_centr_filter: np.ndarray, - model: str = 'H08', + model: str = "H08", model_kwargs: Optional[dict] = None, store_windfields: bool = False, metric: str = "equirect", @@ -665,31 +695,36 @@ def from_single_track( new_haz.intensity = intensity_sparse if store_windfields: new_haz.windfields = [windfields_sparse] - new_haz.units = 'm/s' + new_haz.units = "m/s" new_haz.centroids = centroids new_haz.event_id = np.array([1]) new_haz.frequency = np.array([1]) - new_haz.event_name = [track.attrs['sid']] + new_haz.event_name = [track.attrs["sid"]] new_haz.fraction = sparse.csr_matrix(new_haz.intensity.shape) # store first day of track as date - new_haz.date = np.array([ - dt.datetime(track['time'].dt.year.values[0], - track['time'].dt.month.values[0], - track['time'].dt.day.values[0]).toordinal() - ]) - new_haz.orig = np.array([track.attrs['orig_event_flag']]) - new_haz.category = np.array([track.attrs['category']]) + new_haz.date = np.array( + [ + dt.datetime( + track["time"].dt.year.values[0], + track["time"].dt.month.values[0], + track["time"].dt.day.values[0], + ).toordinal() + ] + ) + new_haz.orig = np.array([track.attrs["orig_event_flag"]]) + new_haz.category = np.array([track.attrs["category"]]) # users that pickle TCTracks objects might still have data with the legacy basin attribute, # so we have to deal with it here - new_haz.basin = [track['basin'] if isinstance(track['basin'], str) - else str(track['basin'].values[0])] + new_haz.basin = [ + ( + track["basin"] + if isinstance(track["basin"], str) + else str(track["basin"].values[0]) + ) + ] return new_haz - def _apply_knutson_criterion( - self, - chg_int_freq: List, - scaling_rcp_year: float - ): + def _apply_knutson_criterion(self, chg_int_freq: List, scaling_rcp_year: float): """ Apply changes to intensities and cumulative frequencies. @@ -715,41 +750,42 @@ def _apply_knutson_criterion( bas_sel = np.array(tc_cc.basin) == basin # Apply intensity change - inten_chg = [chg - for chg in chg_int_freq - if (chg['variable'] == 'intensity' and - chg['basin'] == basin) - ] + inten_chg = [ + chg + for chg in chg_int_freq + if (chg["variable"] == "intensity" and chg["basin"] == basin) + ] for chg in inten_chg: - sel_cat_chg = np.isin(tc_cc.category, chg['category']) & bas_sel - inten_scaling = 1 + (chg['change'] - 1) * scaling_rcp_year + sel_cat_chg = np.isin(tc_cc.category, chg["category"]) & bas_sel + inten_scaling = 1 + (chg["change"] - 1) * scaling_rcp_year tc_cc.intensity = sparse.diags( np.where(sel_cat_chg, inten_scaling, 1) - ).dot(tc_cc.intensity) + ).dot(tc_cc.intensity) # Apply frequency change - freq_chg = [chg - for chg in chg_int_freq - if (chg['variable'] == 'frequency' and - chg['basin'] == basin) - ] - freq_chg.sort(reverse=False, key=lambda x: len(x['category'])) + freq_chg = [ + chg + for chg in chg_int_freq + if (chg["variable"] == "frequency" and chg["basin"] == basin) + ] + freq_chg.sort(reverse=False, key=lambda x: len(x["category"])) # Scale frequencies by category cat_larger_list = [] for chg in freq_chg: - cat_chg_list = [cat - for cat in chg['category'] - if cat not in cat_larger_list - ] + cat_chg_list = [ + cat for cat in chg["category"] if cat not in cat_larger_list + ] sel_cat_chg = np.isin(tc_cc.category, cat_chg_list) & bas_sel if sel_cat_chg.any(): - freq_scaling = 1 + (chg['change'] - 1) * scaling_rcp_year + freq_scaling = 1 + (chg["change"] - 1) * scaling_rcp_year tc_cc.frequency[sel_cat_chg] *= freq_scaling cat_larger_list += cat_chg_list if (tc_cc.frequency < 0).any(): - raise ValueError("The application of the given climate scenario" - "resulted in at least one negative frequency.") + raise ValueError( + "The application of the given climate scenario" + "resulted in at least one negative frequency." + ) return tc_cc diff --git a/climada/hazard/trop_cyclone/trop_cyclone_windfields.py b/climada/hazard/trop_cyclone/trop_cyclone_windfields.py index e82c0b11e..eba194bc9 100644 --- a/climada/hazard/trop_cyclone/trop_cyclone_windfields.py +++ b/climada/hazard/trop_cyclone/trop_cyclone_windfields.py @@ -20,7 +20,7 @@ """ import logging -from typing import Optional, Union, Tuple +from typing import Optional, Tuple, Union import numpy as np import xarray as xr @@ -28,7 +28,9 @@ from climada.hazard import Centroids from climada.hazard.tc_tracks import estimate_rmw -from climada.util import ureg, coordinates as u_coord, constants as u_const +from climada.util import constants as u_const +from climada.util import coordinates as u_coord +from climada.util import ureg LOGGER = logging.getLogger(__name__) @@ -49,7 +51,7 @@ DEF_MAX_MEMORY_GB = 8 """Default value of the memory limit (in GB) for windfield computations (in each thread).""" -MODEL_VANG = {'H08': 0, 'H1980': 1, 'H10': 2, 'ER11': 3} +MODEL_VANG = {"H08": 0, "H1980": 1, "H10": 2, "ER11": 3} """Enumerate different symmetric wind field models.""" DEF_RHO_AIR = 1.15 @@ -72,6 +74,7 @@ V_ANG_EARTH = 7.29e-5 """Earth angular velocity (in radians per second)""" + def _vgrad(si_track, gradient_to_surface_winds): """Gradient wind speeds (in m/s) without translational influence at each track node @@ -86,7 +89,8 @@ def _vgrad(si_track, gradient_to_surface_winds): The gradient-to-surface wind reduction factor to use. """ si_track["vgrad"] = ( - np.fmax(0, si_track["vmax"] - si_track["vtrans_norm"]) / gradient_to_surface_winds + np.fmax(0, si_track["vmax"] - si_track["vtrans_norm"]) + / gradient_to_surface_winds ) @@ -124,10 +128,10 @@ def compute_angular_windspeeds( """ model_kwargs = {} if model_kwargs is None else model_kwargs compute_funs = { - MODEL_VANG['H1980']: _compute_angular_windspeeds_h1980, - MODEL_VANG['H08']: _compute_angular_windspeeds_h08, - MODEL_VANG['H10']: _compute_angular_windspeeds_h10, - MODEL_VANG['ER11']: _stat_er_2011, + MODEL_VANG["H1980"]: _compute_angular_windspeeds_h1980, + MODEL_VANG["H08"]: _compute_angular_windspeeds_h08, + MODEL_VANG["H10"]: _compute_angular_windspeeds_h10, + MODEL_VANG["ER11"]: _stat_er_2011, } if model not in compute_funs: raise NotImplementedError(f"The specified wind model is not supported: {model}") @@ -182,7 +186,9 @@ def _compute_angular_windspeeds_h1980( _vgrad(si_track, gradient_to_surface_winds) _rho_air(si_track, rho_air_const) _B_holland_1980(si_track) - result = _stat_holland_1980(si_track, d_centr, close_centr_msk, cyclostrophic=cyclostrophic) + result = _stat_holland_1980( + si_track, d_centr, close_centr_msk, cyclostrophic=cyclostrophic + ) result *= gradient_to_surface_winds return result @@ -226,7 +232,9 @@ def _compute_angular_windspeeds_h08( """ _rho_air(si_track, rho_air_const) _bs_holland_2008(si_track, gradient_to_surface_winds=gradient_to_surface_winds) - return _stat_holland_1980(si_track, d_centr, close_centr_msk, cyclostrophic=cyclostrophic) + return _stat_holland_1980( + si_track, d_centr, close_centr_msk, cyclostrophic=cyclostrophic + ) def _compute_angular_windspeeds_h10( @@ -279,18 +287,25 @@ def _compute_angular_windspeeds_h10( """ if not cyclostrophic: LOGGER.debug( - 'The function _compute_angular_windspeeds_h10 was called with parameter ' + "The function _compute_angular_windspeeds_h10 was called with parameter " '"cyclostrophic" equal to false. Please be aware that this setting is ignored as the' - ' Holland et al. 2010 model is always cyclostrophic.') + " Holland et al. 2010 model is always cyclostrophic." + ) _rho_air(si_track, rho_air_const) if vmax_from_cen: _bs_holland_2008(si_track, gradient_to_surface_winds=gradient_to_surface_winds) _v_max_s_holland_2008(si_track) else: _B_holland_1980(si_track, gradient_to_surface_winds=gradient_to_surface_winds) - hol_x = _x_holland_2010(si_track, d_centr, close_centr_msk, vmax_in_brackets=vmax_in_brackets) + hol_x = _x_holland_2010( + si_track, d_centr, close_centr_msk, vmax_in_brackets=vmax_in_brackets + ) return _stat_holland_2010( - si_track, d_centr, close_centr_msk, hol_x, vmax_in_brackets=vmax_in_brackets, + si_track, + d_centr, + close_centr_msk, + hol_x, + vmax_in_brackets=vmax_in_brackets, ) @@ -334,7 +349,7 @@ def _rho_air(si_track: xr.Dataset, const: Optional[float]): r_dry_air = 286.9 # density of air (in kg/m³); when checking the units, note that J/Pa = m³ - si_track["rho_air"] = pres_eyewall / (r_dry_air * temp_vs) + si_track["rho_air"] = pres_eyewall / (r_dry_air * temp_vs) def _bs_holland_2008( @@ -394,12 +409,17 @@ def _bs_holland_2008( # and time steps are in hours instead of seconds, but translational wind speed is still # expected to be in m/s. pdelta = si_track["pdelta"] / MBAR_TO_PA - hol_xx = 0.6 * (1. - pdelta / 215) + hol_xx = 0.6 * (1.0 - pdelta / 215) si_track["hol_b"] = ( - -4.4e-5 * pdelta**2 + 0.01 * pdelta - + 0.03 * (si_track["cen"] - prev_cen) / si_track["tstep"] * (H_TO_S / MBAR_TO_PA) + -4.4e-5 * pdelta**2 + + 0.01 * pdelta + + 0.03 + * (si_track["cen"] - prev_cen) + / si_track["tstep"] + * (H_TO_S / MBAR_TO_PA) - 0.014 * abs(si_track["lat"]) - + 0.15 * si_track["vtrans_norm"]**hol_xx + 1.0 + + 0.15 * si_track["vtrans_norm"] ** hol_xx + + 1.0 ) clip_interval = _b_holland_clip_interval(gradient_to_surface_winds) si_track["hol_b"] = np.clip(si_track["hol_b"], *clip_interval) @@ -472,7 +492,7 @@ def _B_holland_1980( # pylint: disable=invalid-name windvar = "vgrad" if gradient_to_surface_winds is None else "vmax" si_track["hol_b"] = ( - si_track[windvar]**2 * np.exp(1) * si_track["rho_air"] / si_track["pdelta"] + si_track[windvar] ** 2 * np.exp(1) * si_track["rho_air"] / si_track["pdelta"] ) clip_interval = _b_holland_clip_interval(gradient_to_surface_winds) @@ -572,7 +592,7 @@ def _x_holland_2010( # compute peripheral exponent from second measurement # (equation (6) from Holland et al. 2010 solved for x) - r_max_norm = (r_max / r_n)**hol_b + r_max_norm = (r_max / r_n) ** hol_b if vmax_in_brackets: x_n = np.log(v_n) / np.log(v_max_s**2 * r_max_norm * np.exp(1 - r_max_norm)) @@ -586,7 +606,9 @@ def _x_holland_2010( # linearly interpolate between max exponent and peripheral exponent x_max = 0.5 - hol_x[mask_centr_close] = x_max + np.fmax(0, d_centr - r_max) * (x_n - x_max) / (r_n - r_max) + hol_x[mask_centr_close] = x_max + np.fmax(0, d_centr - r_max) * (x_n - x_max) / ( + r_n - r_max + ) # Truncate to prevent wind speed from increasing again towards the peripheral radius (which is # unphysical). A value of 0.4 has been found to be reasonable by manual testing of thresholds. @@ -656,11 +678,15 @@ def _stat_holland_2010( ] ] - r_max_norm = (r_max / np.fmax(1, d_centr))**hol_b + r_max_norm = (r_max / np.fmax(1, d_centr)) ** hol_b if vmax_in_brackets: - v_ang[mask_centr_close] = (v_max_s**2 * r_max_norm * np.exp(1 - r_max_norm))**hol_x + v_ang[mask_centr_close] = ( + v_max_s**2 * r_max_norm * np.exp(1 - r_max_norm) + ) ** hol_x else: - v_ang[mask_centr_close] = v_max_s * (r_max_norm * np.exp(1 - r_max_norm))**hol_x + v_ang[mask_centr_close] = ( + v_max_s * (r_max_norm * np.exp(1 - r_max_norm)) ** hol_x + ) return v_ang @@ -668,7 +694,7 @@ def _stat_holland_1980( si_track: xr.Dataset, d_centr: np.ndarray, mask_centr_close: np.ndarray, - cyclostrophic: bool = False + cyclostrophic: bool = False, ) -> np.ndarray: """Symmetric and static wind fields (in m/s) according to Holland 1980. @@ -725,8 +751,10 @@ def _stat_holland_1980( if not cyclostrophic: r_coriolis = 0.5 * d_centr * coriolis_p - r_max_norm = (r_max / np.fmax(1, d_centr))**hol_b - sqrt_term = hol_b / rho_air * r_max_norm * pdelta * np.exp(-r_max_norm) + r_coriolis**2 + r_max_norm = (r_max / np.fmax(1, d_centr)) ** hol_b + sqrt_term = ( + hol_b / rho_air * r_max_norm * pdelta * np.exp(-r_max_norm) + r_coriolis**2 + ) v_ang[mask_centr_close] = np.sqrt(np.fmax(0, sqrt_term)) - r_coriolis return v_ang @@ -793,7 +821,7 @@ def _stat_er_2011( momentum_max += 0.5 * coriolis_p * r_max**2 # rescale the momentum using formula (36) in Emanuel and Rotunno 2011 with Ck == Cd - r_max_norm = (d_centr / r_max)**2 + r_max_norm = (d_centr / r_max) ** 2 momentum = momentum_max * 2 * r_max_norm / (1 + r_max_norm) # extract the velocity from the rescaled momentum through division by r @@ -832,9 +860,16 @@ def _vtrans(si_track: xr.Dataset, metric: str = "equirect"): si_track["component"] = ("component", ["v", "u"]) t_lat, t_lon = si_track["lat"].values, si_track["lon"].values - norm, vec = u_coord.dist_approx(t_lat[:-1, None], t_lon[:-1, None], - t_lat[1:, None], t_lon[1:, None], - log=True, normalize=False, method=metric, units="m") + norm, vec = u_coord.dist_approx( + t_lat[:-1, None], + t_lon[:-1, None], + t_lat[1:, None], + t_lon[1:, None], + log=True, + normalize=False, + method=metric, + units="m", + ) si_track["vtrans"].values[1:, :] = vec[:, 0, 0] / si_track["tstep"].values[1:, None] si_track["vtrans_norm"].values[1:] = norm[:, 0, 0] / si_track["tstep"].values[1:] @@ -860,11 +895,12 @@ def _coriolis_parameter(lat: np.ndarray) -> np.ndarray: """ return 2 * V_ANG_EARTH * np.sin(np.radians(np.abs(lat))) + def compute_windfields_sparse( track: xr.Dataset, centroids: Centroids, idx_centr_filter: np.ndarray, - model: str = 'H08', + model: str = "H08", model_kwargs: Optional[dict] = None, store_windfields: bool = False, metric: str = "equirect", @@ -921,7 +957,7 @@ def compute_windfields_sparse( try: mod_id = MODEL_VANG[model] except KeyError as err: - raise ValueError(f'Model not implemented: {model}.') from err + raise ValueError(f"Model not implemented: {model}.") from err ncentroids = centroids.coord.shape[0] npositions = track.sizes["time"] @@ -931,7 +967,8 @@ def compute_windfields_sparse( # initialise arrays for the assumption that no centroids are within reach windfields_sparse = ( sparse.csr_matrix(([], ([], [])), shape=windfields_shape) - if store_windfields else None + if store_windfields + else None ) intensity_sparse = sparse.csr_matrix(([], ([], [])), shape=intensity_shape) @@ -948,7 +985,10 @@ def compute_windfields_sparse( # returned by `get_close_centroids` are normalized to be consistent with the coordinates in # `si_track`. centroids_close, mask_centr, mask_centr_alongtrack = get_close_centroids( - si_track, centroids.coord[idx_centr_filter], max_dist_eye_km, metric=metric, + si_track, + centroids.coord[idx_centr_filter], + max_dist_eye_km, + metric=metric, ) idx_centr_filter = idx_centr_filter[mask_centr] n_centr_close = centroids_close.shape[0] @@ -992,8 +1032,8 @@ def compute_windfields_sparse( intensity = np.linalg.norm(windfields, axis=-1).max(axis=0) intensity[intensity < intensity_thres] = 0 intensity_sparse = sparse.csr_matrix( - (intensity, idx_centr_filter, [0, intensity.size]), - shape=intensity_shape) + (intensity, idx_centr_filter, [0, intensity.size]), shape=intensity_shape + ) intensity_sparse.eliminate_zeros() windfields_sparse = None @@ -1004,8 +1044,9 @@ def compute_windfields_sparse( indices[:, :, 1] = 2 * idx_centr_filter[None] + 1 indices = indices.ravel() indptr = np.arange(npositions + 1) * n_centr_filter * 2 - windfields_sparse = sparse.csr_matrix((windfields.ravel(), indices, indptr), - shape=windfields_shape) + windfields_sparse = sparse.csr_matrix( + (windfields.ravel(), indices, indptr), shape=windfields_shape + ) windfields_sparse.eliminate_zeros() return intensity_sparse, windfields_sparse @@ -1060,8 +1101,10 @@ def _compute_windfields_sparse_chunked( for prev_chunk_end, chunk_end in zip(split_pos[:-1], split_pos[1:]): chunk_start = max(0, prev_chunk_end - 1) inten, win = compute_windfields_sparse( - track.isel(time=slice(chunk_start, chunk_end)), *args, - max_memory_gb=max_memory_gb, **kwargs, + track.isel(time=slice(chunk_start, chunk_end)), + *args, + max_memory_gb=max_memory_gb, + **kwargs, ) intensity.append(inten) windfields.append(win) @@ -1128,9 +1171,15 @@ def _compute_windfields( # compute distances (in m) and vectors to all centroids [d_centr], [v_centr_normed] = u_coord.dist_approx( - si_track["lat"].values[None], si_track["lon"].values[None], - centroids[None, :, 0], centroids[None, :, 1], - log=True, normalize=False, method=metric, units="m") + si_track["lat"].values[None], + si_track["lon"].values[None], + centroids[None, :, 0], + centroids[None, :, 1], + log=True, + normalize=False, + method=metric, + units="m", + ) # exclude centroids that are too far from or too close to the eye mask_centr_close = (d_centr <= max_dist_eye_km * KM_TO_M) & (d_centr > 1) @@ -1149,7 +1198,12 @@ def _compute_windfields( # derive (absolute) angular velocity from parametric wind profile v_ang_norm = compute_angular_windspeeds( - si_track, d_centr, mask_centr_close, model, model_kwargs=model_kwargs, cyclostrophic=False, + si_track, + d_centr, + mask_centr_close, + model, + model_kwargs=model_kwargs, + cyclostrophic=False, ) # Influence of translational speed decreases with distance from eye. @@ -1163,20 +1217,25 @@ def _compute_windfields( t_rad_bc = np.broadcast_to(si_track["rad"].values[:, None], d_centr.shape) v_trans_corr = np.zeros_like(d_centr) v_trans_corr[mask_centr_close] = np.fmin( - 1, t_rad_bc[mask_centr_close] / d_centr[mask_centr_close]) + 1, t_rad_bc[mask_centr_close] / d_centr[mask_centr_close] + ) - if model in [MODEL_VANG['H08'], MODEL_VANG['H10']]: + if model in [MODEL_VANG["H08"], MODEL_VANG["H10"]]: # In these models, v_ang_norm already contains vtrans_norm, so subtract it first, before # converting to vectors and then adding (vectorial) vtrans again. Make sure to apply the # "absorbing factor" in both steps: - vtrans_norm_bc = np.broadcast_to(si_track["vtrans_norm"].values[:, None], d_centr.shape) + vtrans_norm_bc = np.broadcast_to( + si_track["vtrans_norm"].values[:, None], d_centr.shape + ) v_ang_norm[mask_centr_close] -= ( - vtrans_norm_bc[mask_centr_close] * v_trans_corr[mask_centr_close] + vtrans_norm_bc[mask_centr_close] * v_trans_corr[mask_centr_close] ) # vectorial angular velocity windfields = ( - si_track.attrs["latsign"] * np.array([1.0, -1.0])[..., :] * v_centr_normed[:, :, ::-1] + si_track.attrs["latsign"] + * np.array([1.0, -1.0])[..., :] + * v_centr_normed[:, :, ::-1] ) windfields[mask_centr_close] *= v_ang_norm[mask_centr_close, None] @@ -1243,7 +1302,7 @@ def tctrack_to_si( except Exception as ex: raise ValueError( f"The {long_name}_unit '{unit}' in the provided track is not supported." - ) from ex + ) from ex si_track[var_name] = track[long_name] * conv_factor # normalize longitudinal coordinates @@ -1257,14 +1316,15 @@ def tctrack_to_si( # extrapolate radius of max wind from pressure if not given si_track["rad"] = track["radius_max_wind"].copy() si_track["rad"].values[:] = estimate_rmw( - si_track["rad"].values, si_track["cen"].values / MBAR_TO_PA, + si_track["rad"].values, + si_track["cen"].values / MBAR_TO_PA, ) si_track["rad"] *= NM_TO_KM * KM_TO_M - hemisphere = 'N' + hemisphere = "N" if np.count_nonzero(si_track["lat"] < 0) > np.count_nonzero(si_track["lat"] > 0): - hemisphere = 'S' - si_track.attrs["latsign"] = 1.0 if hemisphere == 'N' else -1.0 + hemisphere = "S" + si_track.attrs["latsign"] = 1.0 if hemisphere == "N" else -1.0 # add translational speed of track at every node (in m/s) _vtrans(si_track, metric=metric) @@ -1333,9 +1393,8 @@ def get_close_centroids( # centroids that are considered by a factor larger than 30). buffer_lat = buffer_km / u_const.ONE_LAT_KM buffer_lon = buffer_km / ( - u_const.ONE_LAT_KM * np.cos(np.radians( - np.fmin(89.999, np.abs(centr_lat) + buffer_lat) - )) + u_const.ONE_LAT_KM + * np.cos(np.radians(np.fmin(89.999, np.abs(centr_lat) + buffer_lat))) ) [idx_close] = ( (t_lat.min() - centr_lat <= buffer_lat) @@ -1348,15 +1407,20 @@ def get_close_centroids( # Restrict to bounding boxes of each track position. buffer_lat = buffer_km / u_const.ONE_LAT_KM - buffer_lon = buffer_km / (u_const.ONE_LAT_KM * np.cos(np.radians( - np.fmin(89.999, np.abs(t_lat[:, None]) + buffer_lat) - ))) + buffer_lon = buffer_km / ( + u_const.ONE_LAT_KM + * np.cos(np.radians(np.fmin(89.999, np.abs(t_lat[:, None]) + buffer_lat))) + ) [idx_close_sub] = ( - (t_lat[:, None] - buffer_lat <= centr_lat[None]) - & (t_lat[:, None] + buffer_lat >= centr_lat[None]) - & (t_lon[:, None] - buffer_lon <= centr_lon[None]) - & (t_lon[:, None] + buffer_lon >= centr_lon[None]) - ).any(axis=0).nonzero() + ( + (t_lat[:, None] - buffer_lat <= centr_lat[None]) + & (t_lat[:, None] + buffer_lat >= centr_lat[None]) + & (t_lon[:, None] - buffer_lon <= centr_lon[None]) + & (t_lon[:, None] + buffer_lon >= centr_lon[None]) + ) + .any(axis=0) + .nonzero() + ) idx_close = idx_close[idx_close_sub] centr_lat = centr_lat[idx_close_sub] centr_lon = centr_lon[idx_close_sub] @@ -1369,16 +1433,27 @@ def get_close_centroids( # FAITH. With a chunk size of 10, this figure is down to 240 MB. The final along-track mask # will require 1.0 GB of memory. chunk_size = 10 - chunks = np.split(np.arange(npositions), np.arange(chunk_size, npositions, chunk_size)) - mask_centr_alongtrack = np.concatenate([ - ( - u_coord.dist_approx( - t_lat[None, chunk], t_lon[None, chunk], - centr_lat[None], centr_lon[None], - normalize=False, method=metric, units="km", - )[0] <= buffer_km - ) for chunk in chunks - ], axis=0) + chunks = np.split( + np.arange(npositions), np.arange(chunk_size, npositions, chunk_size) + ) + mask_centr_alongtrack = np.concatenate( + [ + ( + u_coord.dist_approx( + t_lat[None, chunk], + t_lon[None, chunk], + centr_lat[None], + centr_lon[None], + normalize=False, + method=metric, + units="km", + )[0] + <= buffer_km + ) + for chunk in chunks + ], + axis=0, + ) [idx_close_sub] = mask_centr_alongtrack.any(axis=0).nonzero() idx_close = idx_close[idx_close_sub] centr_lat = centr_lat[idx_close_sub] diff --git a/climada/test/__init__.py b/climada/test/__init__.py index 34ef38092..83aa75857 100755 --- a/climada/test/__init__.py +++ b/climada/test/__init__.py @@ -19,8 +19,8 @@ init test """ -from climada.util.api_client import Client from climada._version import __version__ as climada_version +from climada.util.api_client import Client def get_test_file(ds_name, file_format=None): @@ -46,13 +46,25 @@ def get_test_file(ds_name, file_format=None): # get the dataset with the highest version below (or equal to) the current climada version # in this way a test dataset can be updated without breaking tests on former versions # just make sure that the new dataset has a higher version than any previous version - test_ds = [ds for ds in sorted( - client.list_dataset_infos(name=ds_name, status='test_dataset', version='ANY'), - key=lambda ds: ds.version - ) if ds.version.strip('v') <= climada_version.strip('v')][-1] + test_ds = [ + ds + for ds in sorted( + client.list_dataset_infos( + name=ds_name, status="test_dataset", version="ANY" + ), + key=lambda ds: ds.version, + ) + if ds.version.strip("v") <= climada_version.strip("v") + ][-1] _, files = client.download_dataset(test_ds) - [test_file] = [fil for fil in files if fil.name in [ - dsf.file_name - for dsf in test_ds.files - if file_format is None or dsf.file_format == file_format]] + [test_file] = [ + fil + for fil in files + if fil.name + in [ + dsf.file_name + for dsf in test_ds.files + if file_format is None or dsf.file_format == file_format + ] + ] return test_file diff --git a/climada/test/test_api_client.py b/climada/test/test_api_client.py index 3b60a016e..6bd86ed4f 100644 --- a/climada/test/test_api_client.py +++ b/climada/test/test_api_client.py @@ -18,9 +18,10 @@ Test save module. """ -from pathlib import Path + import tempfile import unittest +from pathlib import Path import numpy as np @@ -36,30 +37,40 @@ class TestClient(unittest.TestCase): def test_data_type(self): """""" lpdt = Client().get_data_type_info("tropical_cyclone") - self.assertEqual(lpdt.data_type, 'tropical_cyclone') - self.assertEqual(lpdt.data_type_group, 'hazard') - self.assertTrue('res_arcsec' in [p['property'] for p in lpdt.properties if p['mandatory']]) - self.assertTrue('ref_year' in [p['property'] for p in lpdt.properties if not p['mandatory']]) + self.assertEqual(lpdt.data_type, "tropical_cyclone") + self.assertEqual(lpdt.data_type_group, "hazard") + self.assertTrue( + "res_arcsec" in [p["property"] for p in lpdt.properties if p["mandatory"]] + ) + self.assertTrue( + "ref_year" in [p["property"] for p in lpdt.properties if not p["mandatory"]] + ) def test_data_types(self): """""" exdts = Client().list_data_type_infos("exposures") - self.assertTrue('litpop' in [exdt.data_type for exdt in exdts]) + self.assertTrue("litpop" in [exdt.data_type for exdt in exdts]) def test_datasets(self): """""" - datasets = Client().list_dataset_infos(status=None, name='FAOSTAT_data_producer_prices') + datasets = Client().list_dataset_infos( + status=None, name="FAOSTAT_data_producer_prices" + ) self.assertEqual(len(datasets), 1) def test_dataset(self): """""" client = Client() - dataset = client.get_dataset_info(name='FAOSTAT_data_producer_prices', status='test_dataset') - self.assertEqual(dataset.version, 'v1') + dataset = client.get_dataset_info( + name="FAOSTAT_data_producer_prices", status="test_dataset" + ) + self.assertEqual(dataset.version, "v1") self.assertEqual(len(dataset.files), 1) self.assertEqual(dataset.files[0].file_size, 26481) - self.assertEqual(dataset.data_type, DataTypeShortInfo('crop_production', 'exposures')) + self.assertEqual( + dataset.data_type, DataTypeShortInfo("crop_production", "exposures") + ) dataset2 = client.get_dataset_info_by_uuid(dataset.uuid) self.assertEqual(dataset, dataset2) @@ -68,49 +79,64 @@ def test_search_for_property_not_set(self): """""" client = Client() - nocountry = client.list_dataset_infos(data_type="earthquake", - properties={'country_name': None})[0] - self.assertNotIn('country_name', nocountry.properties) - self.assertIn('spatial_coverage', nocountry.properties) + nocountry = client.list_dataset_infos( + data_type="earthquake", properties={"country_name": None} + )[0] + self.assertNotIn("country_name", nocountry.properties) + self.assertIn("spatial_coverage", nocountry.properties) def test_dataset_offline(self): """""" client = Client() client.online = False - with self.assertLogs('climada.util.api_client', level='WARNING') as cm: - dataset = client.get_dataset_info(name='FAOSTAT_data_producer_prices', - status='test_dataset') - self.assertIn("there is no internet connection but the client has stored ", cm.output[0]) + with self.assertLogs("climada.util.api_client", level="WARNING") as cm: + dataset = client.get_dataset_info( + name="FAOSTAT_data_producer_prices", status="test_dataset" + ) + self.assertIn( + "there is no internet connection but the client has stored ", cm.output[0] + ) - self.assertEqual(dataset.version, 'v1') + self.assertEqual(dataset.version, "v1") self.assertEqual(len(dataset.files), 1) self.assertEqual(dataset.files[0].file_size, 26481) - self.assertEqual(dataset.data_type, DataTypeShortInfo('crop_production', 'exposures')) + self.assertEqual( + dataset.data_type, DataTypeShortInfo("crop_production", "exposures") + ) with self.assertRaises(AssertionError) as ar: - with self.assertLogs('climada.util.api_client', level='WARNING') as cm: + with self.assertLogs("climada.util.api_client", level="WARNING") as cm: dataset2 = Client().get_dataset_info_by_uuid(dataset.uuid) self.assertIn("no logs of level WARNING or higher triggered", str(ar.exception)) self.assertEqual(dataset, dataset2) - with self.assertLogs('climada.util.api_client', level='WARNING') as cm: + with self.assertLogs("climada.util.api_client", level="WARNING") as cm: dataset2 = client.get_dataset_info_by_uuid(dataset.uuid) - self.assertIn("there is no internet connection but the client has stored ", cm.output[0]) + self.assertIn( + "there is no internet connection but the client has stored ", cm.output[0] + ) self.assertEqual(dataset, dataset2) def test_download_file(self): """""" client = Client() client.MAX_WAITING_PERIOD = 0.1 - dataset = client.get_dataset_info(name='FAOSTAT_data_producer_prices', - status='test_dataset') + dataset = client.get_dataset_info( + name="FAOSTAT_data_producer_prices", status="test_dataset" + ) # test failure def fail(x, y): raise Download.Failed("on purpose") - self.assertRaises(Download.Failed, - client._download_file, DATA_DIR, dataset.files[0], check=fail) + + self.assertRaises( + Download.Failed, + client._download_file, + DATA_DIR, + dataset.files[0], + check=fail, + ) self.assertFalse(DATA_DIR.joinpath(dataset.files[0].file_name).is_file()) # test success @@ -126,7 +152,9 @@ def test_download_dataset(self): client = Client() client.MAX_WAITING_PERIOD = 0.1 - dataset = client.get_dataset_info(name='test_write_raster', status='test_dataset') + dataset = client.get_dataset_info( + name="test_write_raster", status="test_dataset" + ) download_dir, downloads = client.download_dataset(dataset, target_dir=DATA_DIR) self.assertEqual(download_dir.name, dataset.version) self.assertEqual(download_dir.parent.name, dataset.name) @@ -142,94 +170,136 @@ def test_download_dataset(self): def test_get_exposures(self): client = Client() - exposures = client.get_exposures(exposures_type='litpop', - properties={'country_iso3alpha': 'AUT', - 'fin_mode': 'pop', 'exponents': '(0,1)'}, - version='v1', - dump_dir=DATA_DIR) + exposures = client.get_exposures( + exposures_type="litpop", + properties={ + "country_iso3alpha": "AUT", + "fin_mode": "pop", + "exponents": "(0,1)", + }, + version="v1", + dump_dir=DATA_DIR, + ) self.assertEqual(len(exposures.gdf), 5782) - self.assertEqual(np.unique(exposures.gdf['region_id']), 40) - self.assertEqual(exposures.description, - "LitPop Exposure for ['AUT'] at 150 as, year: 2018, financial mode: pop, exp: [0, 1], admin1_calc: False") + self.assertEqual(np.unique(exposures.gdf["region_id"]), 40) + self.assertEqual( + exposures.description, + "LitPop Exposure for ['AUT'] at 150 as, year: 2018, financial mode: pop, exp: [0, 1], admin1_calc: False", + ) def test_get_exposures_fails(self): client = Client() with self.assertRaises(ValueError) as cm: - client.get_exposures(exposures_type='river_flood', - properties={'country_iso3alpha': 'AUT', - 'fin_mode': 'pop', 'exponents': '(0,1)'}, - dump_dir=DATA_DIR) - self.assertIn('Valid exposures types are a subset of CLIMADA exposures types. Currently', - str(cm.exception)) + client.get_exposures( + exposures_type="river_flood", + properties={ + "country_iso3alpha": "AUT", + "fin_mode": "pop", + "exponents": "(0,1)", + }, + dump_dir=DATA_DIR, + ) + self.assertIn( + "Valid exposures types are a subset of CLIMADA exposures types. Currently", + str(cm.exception), + ) with self.assertRaises(Client.AmbiguousResult) as cm: - client.get_exposures(exposures_type='litpop', - properties={'country_iso3alpha': 'AUT'}, - dump_dir=DATA_DIR) - self.assertIn('there are 3 datasets meeting the requirements', - str(cm.exception)) + client.get_exposures( + exposures_type="litpop", + properties={"country_iso3alpha": "AUT"}, + dump_dir=DATA_DIR, + ) + self.assertIn( + "there are 3 datasets meeting the requirements", str(cm.exception) + ) def test_get_hazard(self): client = Client() - hazard = client.get_hazard(hazard_type='river_flood', - properties={'country_name': 'Austria', - 'year_range': '2010_2030', 'climate_scenario': 'rcp26'}, - version='v1', - dump_dir=DATA_DIR) + hazard = client.get_hazard( + hazard_type="river_flood", + properties={ + "country_name": "Austria", + "year_range": "2010_2030", + "climate_scenario": "rcp26", + }, + version="v1", + dump_dir=DATA_DIR, + ) self.assertEqual(np.shape(hazard.intensity), (480, 5784)) self.assertEqual(np.unique(hazard.centroids.region_id), 40) self.assertEqual(np.unique(hazard.date).size, 20) - self.assertEqual(hazard.haz_type, 'RF') + self.assertEqual(hazard.haz_type, "RF") def test_get_hazard_fails(self): client = Client() with self.assertRaises(ValueError) as cm: - client.get_hazard(hazard_type='litpop', - properties={'country_name': 'Austria', - 'year_range': '2010_2030', 'climate_scenario': 'rcp26'}, - dump_dir=DATA_DIR) - self.assertIn('Valid hazard types are a subset of CLIMADA hazard types. Currently', - str(cm.exception)) + client.get_hazard( + hazard_type="litpop", + properties={ + "country_name": "Austria", + "year_range": "2010_2030", + "climate_scenario": "rcp26", + }, + dump_dir=DATA_DIR, + ) + self.assertIn( + "Valid hazard types are a subset of CLIMADA hazard types. Currently", + str(cm.exception), + ) with self.assertRaises(Client.AmbiguousResult) as cm: - client.get_hazard(hazard_type='river_flood', - properties={'country_name': ['Switzerland', 'Austria'], - 'year_range': '2010_2030', 'climate_scenario': ['rcp26', 'rcp85']}, - dump_dir=DATA_DIR) - self.assertIn('there are 4 datasets meeting the requirements:', str(cm.exception)) + client.get_hazard( + hazard_type="river_flood", + properties={ + "country_name": ["Switzerland", "Austria"], + "year_range": "2010_2030", + "climate_scenario": ["rcp26", "rcp85"], + }, + dump_dir=DATA_DIR, + ) + self.assertIn( + "there are 4 datasets meeting the requirements:", str(cm.exception) + ) def test_get_litpop(self): client = Client() - litpop = client.get_litpop(country='LUX', version='v1', dump_dir=DATA_DIR) + litpop = client.get_litpop(country="LUX", version="v1", dump_dir=DATA_DIR) self.assertEqual(len(litpop.gdf), 188) - self.assertEqual(np.unique(litpop.gdf['region_id']), 442) - self.assertEqual(litpop.description, - "LitPop Exposure for ['LUX'] at 150 as, year: 2018, financial mode: pc, exp: [1, 1], admin1_calc: False") + self.assertEqual(np.unique(litpop.gdf["region_id"]), 442) + self.assertEqual( + litpop.description, + "LitPop Exposure for ['LUX'] at 150 as, year: 2018, financial mode: pc, exp: [1, 1], admin1_calc: False", + ) def test_get_litpop_fail(self): client = Client() with self.assertRaises(ValueError) as cm: - client.get_litpop(['AUT', 'CHE']) - self.assertIn(" can only query single countries. Download the data for multiple countries individually and concatenate ", - str(cm.exception)) + client.get_litpop(["AUT", "CHE"]) + self.assertIn( + " can only query single countries. Download the data for multiple countries individually and concatenate ", + str(cm.exception), + ) def test_get_centroids_plot(self): client = Client() - client.get_centroids(country='COM').plot() + client.get_centroids(country="COM").plot() def test_get_dataset_file(self): client = Client() with tempfile.TemporaryDirectory() as temp_dir: single_file = client.get_dataset_file( - name='test_imp_mat', status='test_dataset', # get_dataset_info arguments - target_dir=Path(temp_dir), organize_path=False, # download_dataset arguments + name="test_imp_mat", + status="test_dataset", # get_dataset_info arguments + target_dir=Path(temp_dir), + organize_path=False, # download_dataset arguments ) self.assertTrue(single_file.is_file()) self.assertEqual(list(Path(temp_dir).iterdir()), [single_file]) def test_multi_filter(self): client = Client() - testds = client.list_dataset_infos(data_type='storm_europe') + testds = client.list_dataset_infos(data_type="storm_europe") # assert no systemic loss in filtering still = client._filter_datasets(testds, dict()) @@ -237,62 +307,101 @@ def test_multi_filter(self): self.assertEqual(o, r) # assert filter is effective - p = 'country_name' - a, b = 'Germany', 'Netherlands' - less = client._filter_datasets(testds, {p:[a, b]}) + p = "country_name" + a, b = "Germany", "Netherlands" + less = client._filter_datasets(testds, {p: [a, b]}) self.assertLess(len(less), len(testds)) - only = client._filter_datasets(testds, {p:[a]}) + only = client._filter_datasets(testds, {p: [a]}) self.assertLess(len(only), len(less)) self.assertLess(0, len(only)) def test_multiplicity_split(self): - properties = { - 'country_name': ['x', 'y', 'z'], - 'b': '1' - } + properties = {"country_name": ["x", "y", "z"], "b": "1"} # assert split matches expectations straight, multi = Client._divide_straight_from_multi(properties) - self.assertEqual(straight, {'b': '1'}) - self.assertEqual(multi, {'country_name': ['x', 'y', 'z']}) + self.assertEqual(straight, {"b": "1"}) + self.assertEqual(multi, {"country_name": ["x", "y", "z"]}) def test_purge_cache(self): client = Client() - active_ds = client.get_dataset_info(data_type="litpop", name="LitPop_150arcsec_ABW", version="v3") - outdated_ds = client.get_dataset_info(data_type="litpop", name="LitPop_150arcsec_ABW", version="v1") - test_ds = client.get_dataset_info(data_type="storm_europe", name="test_storm_europe_icon_2021012800", version="v1", status="test_dataset") - expired_ds = client.get_dataset_info(data_type="tropical_cyclone", name="rename_files2", version="v1", status="expired") + active_ds = client.get_dataset_info( + data_type="litpop", name="LitPop_150arcsec_ABW", version="v3" + ) + outdated_ds = client.get_dataset_info( + data_type="litpop", name="LitPop_150arcsec_ABW", version="v1" + ) + test_ds = client.get_dataset_info( + data_type="storm_europe", + name="test_storm_europe_icon_2021012800", + version="v1", + status="test_dataset", + ) + expired_ds = client.get_dataset_info( + data_type="tropical_cyclone", + name="rename_files2", + version="v1", + status="expired", + ) with tempfile.TemporaryDirectory() as temp_dir: for ds in [active_ds, outdated_ds, test_ds, expired_ds]: client.download_dataset(dataset=ds, target_dir=Path(temp_dir)) self.assertEqual( # outdated dataset present 1, - len(list(Path(temp_dir).joinpath('exposures/litpop/LitPop_150arcsec_ABW/v1').iterdir())) + len( + list( + Path(temp_dir) + .joinpath("exposures/litpop/LitPop_150arcsec_ABW/v1") + .iterdir() + ) + ), ) self.assertEqual( # expired data set present 1, - len(list(Path(temp_dir).joinpath('hazard/tropical_cyclone/rename_files2/v1').iterdir())) + len( + list( + Path(temp_dir) + .joinpath("hazard/tropical_cyclone/rename_files2/v1") + .iterdir() + ) + ), ) client.purge_cache(target_dir=temp_dir) self.assertFalse( # outdated data set removed - Path(temp_dir).joinpath('exposures/litpop/LitPop_150arcsec_ABW/v1').is_dir() + Path(temp_dir) + .joinpath("exposures/litpop/LitPop_150arcsec_ABW/v1") + .is_dir() ) self.assertFalse( # expired data set removed - Path(temp_dir).joinpath('hazard/tropical_cyclone/rename_files2/v1').is_dir() + Path(temp_dir) + .joinpath("hazard/tropical_cyclone/rename_files2/v1") + .is_dir() ) self.assertEqual( # test files are still there 3, - len(list(Path(temp_dir).joinpath('hazard/storm_europe/test_storm_europe_icon_2021012800/v1').iterdir())) + len( + list( + Path(temp_dir) + .joinpath( + "hazard/storm_europe/test_storm_europe_icon_2021012800/v1" + ) + .iterdir() + ) + ), ) client.purge_cache(target_dir=temp_dir, keep_testfiles=False) self.assertTrue( # uptodate active dataset file still there - Path(temp_dir).joinpath('exposures/litpop/LitPop_150arcsec_ABW/v3/LitPop_150arcsec_ABW.hdf5').exists() + Path(temp_dir) + .joinpath( + "exposures/litpop/LitPop_150arcsec_ABW/v3/LitPop_150arcsec_ABW.hdf5" + ) + .exists() ) self.assertFalse( # test data removed, empty directories removed - Path(temp_dir).joinpath('hazard/').exists() + Path(temp_dir).joinpath("hazard/").exists() ) diff --git a/climada/test/test_calibration.py b/climada/test/test_calibration.py index 72dcca3a4..5b83b3a3f 100644 --- a/climada/test/test_calibration.py +++ b/climada/test/test_calibration.py @@ -18,21 +18,22 @@ Test Calibration class. """ + import unittest from pathlib import Path + import pandas as pd +import climada.hazard.test as hazard_test from climada import CONFIG -from climada.entity.entity_def import Entity -from climada.hazard.base import Hazard from climada.engine import ImpactCalc from climada.engine.calibration_opt import calib_instance -from climada.util.constants import ENT_DEMO_TODAY -import climada.hazard.test as hazard_test +from climada.entity.entity_def import Entity +from climada.hazard.base import Hazard from climada.test import get_test_file +from climada.util.constants import ENT_DEMO_TODAY - -HAZ_TEST_TC = get_test_file('test_tc_florida', file_format='hdf5') +HAZ_TEST_TC = get_test_file("test_tc_florida", file_format="hdf5") DATA_FOLDER = CONFIG.test_data.dir() @@ -42,7 +43,7 @@ class TestCalib(unittest.TestCase): def test_calib_instance(self): """Test save calib instance""" - # Read default entity values + # Read default entity values ent = Entity.from_excel(ENT_DEMO_TODAY) ent.check() @@ -50,27 +51,30 @@ def test_calib_instance(self): hazard = Hazard.from_hdf5(HAZ_TEST_TC) # get impact function from set - imp_func = ent.impact_funcs.get_func(hazard.haz_type, - ent.exposures.gdf['impf_TC'].median()) + imp_func = ent.impact_funcs.get_func( + hazard.haz_type, ent.exposures.gdf["impf_TC"].median() + ) # Assign centroids to exposures ent.exposures.assign_centroids(hazard) # create input frame - df_in = pd.DataFrame.from_dict({'v_threshold': [25.7], - 'other_param': [2], - 'hazard': [HAZ_TEST_TC]}) - df_in_yearly = pd.DataFrame.from_dict({'v_threshold': [25.7], - 'other_param': [2], - 'hazard': [HAZ_TEST_TC]}) + df_in = pd.DataFrame.from_dict( + {"v_threshold": [25.7], "other_param": [2], "hazard": [HAZ_TEST_TC]} + ) + df_in_yearly = pd.DataFrame.from_dict( + {"v_threshold": [25.7], "other_param": [2], "hazard": [HAZ_TEST_TC]} + ) # Compute the impact over the whole exposures df_out = calib_instance(hazard, ent.exposures, imp_func, df_in) - df_out_yearly = calib_instance(hazard, ent.exposures, imp_func, - df_in_yearly, - yearly_impact=True) + df_out_yearly = calib_instance( + hazard, ent.exposures, imp_func, df_in_yearly, yearly_impact=True + ) # calc Impact as comparison - impact = ImpactCalc(ent.exposures, ent.impact_funcs, hazard).impact(assign_centroids=False) + impact = ImpactCalc(ent.exposures, ent.impact_funcs, hazard).impact( + assign_centroids=False + ) IYS = impact.impact_per_year(all_years=True) # do the tests @@ -78,17 +82,16 @@ def test_calib_instance(self): self.assertTrue(isinstance(df_out_yearly, pd.DataFrame)) self.assertEqual(df_out.shape[0], hazard.event_id.size) self.assertEqual(df_out_yearly.shape[0], 161) - self.assertTrue(all(df_out['event_id'] == - hazard.event_id)) - self.assertTrue(all(df_out[df_in.columns[0]].isin( - df_in[df_in.columns[0]]))) - self.assertTrue(all(df_out_yearly[df_in.columns[1]].isin( - df_in[df_in.columns[1]]))) - self.assertTrue(all(df_out_yearly[df_in.columns[2]].isin( - df_in[df_in.columns[2]]))) - self.assertTrue(all(df_out['impact_CLIMADA'].values == - impact.at_event)) - self.assertTrue(all(df_out_yearly['impact_CLIMADA'].values == [*IYS.values()])) + self.assertTrue(all(df_out["event_id"] == hazard.event_id)) + self.assertTrue(all(df_out[df_in.columns[0]].isin(df_in[df_in.columns[0]]))) + self.assertTrue( + all(df_out_yearly[df_in.columns[1]].isin(df_in[df_in.columns[1]])) + ) + self.assertTrue( + all(df_out_yearly[df_in.columns[2]].isin(df_in[df_in.columns[2]])) + ) + self.assertTrue(all(df_out["impact_CLIMADA"].values == impact.at_event)) + self.assertTrue(all(df_out_yearly["impact_CLIMADA"].values == [*IYS.values()])) # Execute Tests diff --git a/climada/test/test_engine.py b/climada/test/test_engine.py index ab078b29f..ce9ee2445 100644 --- a/climada/test/test_engine.py +++ b/climada/test/test_engine.py @@ -19,27 +19,27 @@ """ -import unittest -import numpy as np import copy import time +import unittest + +import numpy as np import scipy as sp +from tables.exceptions import HDF5ExtError +from climada import CONFIG from climada.engine import impact_data as im_d -from climada.engine.unsequa import InputVar, CalcCostBenefit -from climada.entity.entity_def import Entity +from climada.engine.unsequa import CalcCostBenefit, InputVar from climada.entity import Exposures, ImpactFunc, ImpactFuncSet +from climada.entity.entity_def import Entity from climada.hazard import Hazard -from climada import CONFIG from climada.util.constants import ( + ENT_DEMO_FUTURE, + ENT_DEMO_TODAY, EXP_DEMO_H5, HAZ_DEMO_H5, - ENT_DEMO_TODAY, - ENT_DEMO_FUTURE, ) -from tables.exceptions import HDF5ExtError - DATA_DIR = CONFIG.engine.test_data.dir() EMDAT_TEST_CSV = DATA_DIR.joinpath("emdat_testdata_BGD_USA_1970-2017.csv") @@ -66,7 +66,7 @@ def exp_dem(x_exp=1, exp=None): except HDF5ExtError: time.sleep(0.1) exp_tmp = exp.copy(deep=True) - exp_tmp.gdf['value'] *= x_exp + exp_tmp.gdf["value"] *= x_exp return exp_tmp @@ -152,8 +152,8 @@ def test_emdat_damage_yearlysum(self): ) self.assertEqual(36, df.size) - self.assertAlmostEqual(df['impact'].max(), 15150000000.0) - self.assertAlmostEqual(df['impact_scaled'].min(), 10939000.0) + self.assertAlmostEqual(df["impact"].max(), 15150000000.0) + self.assertAlmostEqual(df["impact_scaled"].min(), 10939000.0) self.assertEqual(df["year"][5], 2017) self.assertEqual(df["reference_year"].max(), 2000) self.assertIn("USA", list(df["ISO"])) diff --git a/climada/test/test_hazard.py b/climada/test/test_hazard.py index 6ae8dbfb4..0be423bcb 100644 --- a/climada/test/test_hazard.py +++ b/climada/test/test_hazard.py @@ -19,10 +19,11 @@ Test Hazard base class. """ -import unittest -import numpy as np import datetime as dt +import unittest from pathlib import Path + +import numpy as np from scipy import sparse from climada import CONFIG @@ -30,44 +31,50 @@ from climada.hazard.base import Hazard from climada.hazard.centroids import Centroids from climada.hazard.storm_europe import StormEurope -from climada.util.constants import (HAZ_DEMO_FL, WS_DEMO_NC, DEF_CRS) -from climada.util.api_client import Client -from climada.util import coordinates as u_coord from climada.test import get_test_file +from climada.util import coordinates as u_coord +from climada.util.api_client import Client +from climada.util.constants import DEF_CRS, HAZ_DEMO_FL, WS_DEMO_NC DATA_DIR = CONFIG.test_data.dir() -HAZ_TEST_TC :Path = get_test_file('test_tc_florida', file_format='hdf5') +HAZ_TEST_TC: Path = get_test_file("test_tc_florida", file_format="hdf5") """ Hazard test file from Data API: Hurricanes from 1851 to 2011 over Florida with 100 centroids. Fraction is empty. Format: HDF5. """ + class TestCentroids(unittest.TestCase): """Test centroids functionalities""" def test_read_write_raster_pass(self): """Test write_raster: Hazard from raster data""" haz_fl = Hazard.from_raster([HAZ_DEMO_FL]) - haz_fl.haz_type = 'FL' + haz_fl.haz_type = "FL" self.assertEqual(haz_fl.intensity.shape, (1, 1032226)) self.assertEqual(haz_fl.intensity.min(), -9999.0) self.assertAlmostEqual(haz_fl.intensity.max(), 4.662774085998535) - haz_fl.write_raster(DATA_DIR.joinpath('test_write_hazard.tif'), variable='intensity') + haz_fl.write_raster( + DATA_DIR.joinpath("test_write_hazard.tif"), variable="intensity" + ) - haz_read = Hazard.from_raster([DATA_DIR.joinpath('test_write_hazard.tif')]) - haz_fl.haz_type = 'FL' - self.assertTrue(np.allclose(haz_fl.intensity.toarray(), haz_read.intensity.toarray())) + haz_read = Hazard.from_raster([DATA_DIR.joinpath("test_write_hazard.tif")]) + haz_fl.haz_type = "FL" + self.assertTrue( + np.allclose(haz_fl.intensity.toarray(), haz_read.intensity.toarray()) + ) self.assertEqual(np.unique(np.array(haz_fl.fraction.toarray())).size, 2) - DATA_DIR.joinpath('test_write_hazard.tif').unlink() + DATA_DIR.joinpath("test_write_hazard.tif").unlink() def test_read_raster_pool_pass(self): """Test from_raster constructor with pool""" from pathos.pools import ProcessPool as Pool + pool = Pool() - haz_fl = Hazard.from_raster([HAZ_DEMO_FL], haz_type='FL', pool=pool) + haz_fl = Hazard.from_raster([HAZ_DEMO_FL], haz_type="FL", pool=pool) haz_fl.check() self.assertEqual(haz_fl.intensity.shape, (1, 1032226)) @@ -79,118 +86,87 @@ def test_read_raster_pool_pass(self): def test_read_write_vector_pass(self): """Test write_raster: Rasterize intensity from vector data""" haz_fl = Hazard( - 'FL', + "FL", event_id=np.array([1]), date=np.array([1]), frequency=np.array([1]), orig=np.array([1]), - event_name=['1'], + event_name=["1"], intensity=sparse.csr_matrix(np.array([0.11, 0.22, 0.33, 0.31])), - fraction=sparse.csr_matrix(np.array([0, 1, 2, 3]) ), + fraction=sparse.csr_matrix(np.array([0, 1, 2, 3])), centroids=Centroids( lon=np.array([1, 2, 3, 3]), lat=np.array([1, 2, 3, 1]), crs=DEF_CRS - ) + ), ) - haz_fl.write_raster(DATA_DIR.joinpath('test_write_hazard.tif'), variable='intensity') + haz_fl.write_raster( + DATA_DIR.joinpath("test_write_hazard.tif"), variable="intensity" + ) - haz_read = Hazard.from_raster([DATA_DIR.joinpath('test_write_hazard.tif')], haz_type='FL') + haz_read = Hazard.from_raster( + [DATA_DIR.joinpath("test_write_hazard.tif")], haz_type="FL" + ) self.assertEqual(haz_read.intensity.shape, (1, 9)) - output_raster = np.array([ - [1, 3], [2, 3], [3, 3], - [1, 2], [2, 2], [3, 2], - [1, 1], [2, 1], [3, 1] - ]) - output_instensity = np.array([ - 0, 0, 0.33, - 0, 0.22, 0, - 0.11, 0, 0.31 - ]) - - np.testing.assert_array_equal( - haz_read.centroids.lon, - output_raster[:, 0] - ) - np.testing.assert_array_equal( - haz_read.centroids.lat, - output_raster[:, 1] + output_raster = np.array( + [[1, 3], [2, 3], [3, 3], [1, 2], [2, 2], [3, 2], [1, 1], [2, 1], [3, 1]] ) + output_instensity = np.array([0, 0, 0.33, 0, 0.22, 0, 0.11, 0, 0.31]) + + np.testing.assert_array_equal(haz_read.centroids.lon, output_raster[:, 0]) + np.testing.assert_array_equal(haz_read.centroids.lat, output_raster[:, 1]) np.testing.assert_array_almost_equal( - haz_read.intensity.toarray().flatten(), - output_instensity + haz_read.intensity.toarray().flatten(), output_instensity ) - DATA_DIR.joinpath('test_write_hazard.tif').unlink() + DATA_DIR.joinpath("test_write_hazard.tif").unlink() def test_read_write_vector_fraction_pass(self): """Test write_raster: Rasterize fraction from vector data""" haz_fl = Hazard( - 'FL', + "FL", event_id=np.array([1]), date=np.array([1]), frequency=np.array([1]), orig=np.array([1]), - event_name=['1'], + event_name=["1"], intensity=sparse.csr_matrix(np.array([-0.11, -0.22, -0.33, -0.31])), fraction=sparse.csr_matrix(np.array([0.11, 0.22, 0.33, 0.31])), centroids=Centroids( lon=np.array([1, 2, 3, 3]), lat=np.array([1, 2, 3, 1]), crs=DEF_CRS - ) + ), ) - intensity_file = DATA_DIR.joinpath('test_write_hazard_intensity.tif') - fraction_file = DATA_DIR.joinpath('test_write_hazard_fraction.tif') + intensity_file = DATA_DIR.joinpath("test_write_hazard_intensity.tif") + fraction_file = DATA_DIR.joinpath("test_write_hazard_fraction.tif") - haz_fl.write_raster(fraction_file, variable='fraction') - haz_fl.write_raster(intensity_file, variable='intensity') + haz_fl.write_raster(fraction_file, variable="fraction") + haz_fl.write_raster(intensity_file, variable="intensity") - haz_read = Hazard.from_raster( - [intensity_file], [fraction_file], haz_type='FL' - ) + haz_read = Hazard.from_raster([intensity_file], [fraction_file], haz_type="FL") self.assertEqual(haz_read.fraction.shape, (1, 9)) self.assertEqual(haz_read.intensity.shape, (1, 9)) - - output_raster = np.array([ - [1, 3], [2, 3], [3, 3], - [1, 2], [2, 2], [3, 2], - [1, 1], [2, 1], [3, 1] - ]) - output_fraction = np.array([ - 0, 0, 0.33, - 0, 0.22, 0, - 0.11, 0, 0.31 - ]) - - output_intensity = np.array([ - 0, 0, -0.33, - 0, -0.22, 0, - -0.11, 0, -0.31 - ]) - - np.testing.assert_array_equal( - haz_read.centroids.lon, - output_raster[:, 0] - ) - np.testing.assert_array_equal( - haz_read.centroids.lat, - output_raster[:, 1] + output_raster = np.array( + [[1, 3], [2, 3], [3, 3], [1, 2], [2, 2], [3, 2], [1, 1], [2, 1], [3, 1]] ) + output_fraction = np.array([0, 0, 0.33, 0, 0.22, 0, 0.11, 0, 0.31]) + + output_intensity = np.array([0, 0, -0.33, 0, -0.22, 0, -0.11, 0, -0.31]) + + np.testing.assert_array_equal(haz_read.centroids.lon, output_raster[:, 0]) + np.testing.assert_array_equal(haz_read.centroids.lat, output_raster[:, 1]) np.testing.assert_array_almost_equal( - haz_read.fraction.toarray().flatten(), - output_fraction + haz_read.fraction.toarray().flatten(), output_fraction ) np.testing.assert_array_almost_equal( - haz_read.intensity.toarray().flatten(), - output_intensity + haz_read.intensity.toarray().flatten(), output_intensity ) DATA_DIR.joinpath(intensity_file).unlink() DATA_DIR.joinpath(fraction_file).unlink() - class TestStormEurope(unittest.TestCase): """Test methods to create StormEurope object""" @@ -215,9 +191,7 @@ def _test_first(haz): self.assertEqual(haz.frequency[0], 1.0) # Load first entry - storms = StormEurope.from_footprints( - WS_DEMO_NC[0] - ) + storms = StormEurope.from_footprints(WS_DEMO_NC[0]) _test_first(storms) # Omit the second file, should be the same result @@ -299,8 +273,8 @@ def test_ibtracs_with_basin(self): year_range=(1995, 1995), basin="SP", estimate_missing=True ) self.assertEqual(tc_track.size, 6) - self.assertEqual(tc_track.data[0]['basin'][0], "SP") - self.assertEqual(tc_track.data[5]['basin'][0], "SI") + self.assertEqual(tc_track.data[0]["basin"][0], "SP") + self.assertEqual(tc_track.data[5]["basin"][0], "SI") # genesis in NI tc_track = tc.TCTracks.from_ibtracs_netcdf( @@ -308,7 +282,7 @@ def test_ibtracs_with_basin(self): ) self.assertEqual(tc_track.size, 5) for tr in tc_track.data: - self.assertEqual(tr['basin'][0], "NI") + self.assertEqual(tr["basin"][0], "NI") # genesis in EP, but crosses WP at some point tc_track = tc.TCTracks.from_ibtracs_netcdf( @@ -316,8 +290,8 @@ def test_ibtracs_with_basin(self): ) self.assertEqual(tc_track.size, 3) for tr in tc_track.data: - self.assertEqual(tr['basin'][0], "EP") - self.assertIn("WP", tr['basin']) + self.assertEqual(tr["basin"][0], "EP") + self.assertIn("WP", tr["basin"]) def test_cutoff_tracks(self): tc_track = tc.TCTracks.from_ibtracs_netcdf(storm_id="1986226N30276") diff --git a/climada/test/test_litpop_integr.py b/climada/test/test_litpop_integr.py index 3a963d0df..0390a4538 100644 --- a/climada/test/test_litpop_integr.py +++ b/climada/test/test_litpop_integr.py @@ -18,207 +18,280 @@ Tests on LitPop exposures. """ + import unittest + import numpy as np from shapely.geometry import Polygon -from climada.entity.exposures.litpop import litpop as lp -from climada.entity.exposures.litpop import gpw_population -from climada.util.finance import world_bank_wealth_account, gdp, income_group import climada.util.coordinates as u_coord -from climada.util.constants import SYSTEM_DIR from climada import CONFIG +from climada.entity.exposures.litpop import gpw_population +from climada.entity.exposures.litpop import litpop as lp +from climada.util.constants import SYSTEM_DIR +from climada.util.finance import gdp, income_group, world_bank_wealth_account + +bounds = (8.41, 47.2, 8.70, 47.45) # (min_lon, max_lon, min_lat, max_lat) +shape = Polygon( + [ + (bounds[0], bounds[3]), + (bounds[2], bounds[3]), + (bounds[2], bounds[1]), + (bounds[0], bounds[1]), + ] +) -bounds = (8.41, 47.2, 8.70, 47.45) # (min_lon, max_lon, min_lat, max_lat) -shape = Polygon([ - (bounds[0], bounds[3]), - (bounds[2], bounds[3]), - (bounds[2], bounds[1]), - (bounds[0], bounds[1]) - ]) class TestLitPopExposure(unittest.TestCase): """Test LitPop exposure data model:""" def test_netherlands150_pass(self): """Test from_countries for Netherlands at 150 arcsec, first shape is empty""" - ent = lp.LitPop.from_countries('Netherlands', res_arcsec=150, reference_year=2016) + ent = lp.LitPop.from_countries( + "Netherlands", res_arcsec=150, reference_year=2016 + ) self.assertEqual(ent.gdf.shape[0], 2829) def test_BLM150_pass(self): """Test from_countries for BLM at 150 arcsec, 2 data points""" - ent = lp.LitPop.from_countries('BLM', res_arcsec=150, reference_year=2016) + ent = lp.LitPop.from_countries("BLM", res_arcsec=150, reference_year=2016) self.assertEqual(ent.gdf.shape[0], 2) def test_Monaco150_pass(self): """Test from_countries for Moncao at 150 arcsec, 1 data point""" - ent = lp.LitPop.from_countries('Monaco', res_arcsec=150, reference_year=2016) + ent = lp.LitPop.from_countries("Monaco", res_arcsec=150, reference_year=2016) self.assertEqual(ent.gdf.shape[0], 1) def test_switzerland300_pass(self): """Create LitPop entity for Switzerland on 300 arcsec:""" - country_name = ['CHE'] + country_name = ["CHE"] resolution = 300 - fin_mode = 'income_group' - with self.assertLogs('climada.entity.exposures.litpop', level='INFO') as cm: - ent = lp.LitPop.from_countries(country_name, res_arcsec=resolution, fin_mode=fin_mode, - reference_year=2016) - - self.assertIn('LitPop: Init Exposure for country: CHE', cm.output[0]) - self.assertEqual(ent.gdf['region_id'].min(), 756) - self.assertEqual(ent.gdf['region_id'].max(), 756) + fin_mode = "income_group" + with self.assertLogs("climada.entity.exposures.litpop", level="INFO") as cm: + ent = lp.LitPop.from_countries( + country_name, + res_arcsec=resolution, + fin_mode=fin_mode, + reference_year=2016, + ) + + self.assertIn("LitPop: Init Exposure for country: CHE", cm.output[0]) + self.assertEqual(ent.gdf["region_id"].min(), 756) + self.assertEqual(ent.gdf["region_id"].max(), 756) # confirm that the total value is equal to GDP * (income_group+1): - self.assertAlmostEqual(ent.gdf['value'].sum()/gdp('CHE', 2016)[1], - (income_group('CHE', 2016)[1] + 1)) - self.assertIn("LitPop Exposure for ['CHE'] at 300 as, year: 2016", ent.description) - self.assertIn('income_group', ent.description) - self.assertIn('1, 1', ent.description) - self.assertTrue(u_coord.equal_crs(ent.crs, 'epsg:4326')) - self.assertEqual(ent.meta['width'], 54) - self.assertEqual(ent.meta['height'], 23) - self.assertTrue(u_coord.equal_crs(ent.meta['crs'], 'epsg:4326')) - self.assertAlmostEqual(ent.meta['transform'][0], 0.08333333333333333) - self.assertAlmostEqual(ent.meta['transform'][1], 0) - self.assertAlmostEqual(ent.meta['transform'][2], 5.9166666666666) - self.assertAlmostEqual(ent.meta['transform'][3], 0) - self.assertAlmostEqual(ent.meta['transform'][4], -0.08333333333333333) - self.assertAlmostEqual(ent.meta['transform'][5], 47.75) + self.assertAlmostEqual( + ent.gdf["value"].sum() / gdp("CHE", 2016)[1], + (income_group("CHE", 2016)[1] + 1), + ) + self.assertIn( + "LitPop Exposure for ['CHE'] at 300 as, year: 2016", ent.description + ) + self.assertIn("income_group", ent.description) + self.assertIn("1, 1", ent.description) + self.assertTrue(u_coord.equal_crs(ent.crs, "epsg:4326")) + self.assertEqual(ent.meta["width"], 54) + self.assertEqual(ent.meta["height"], 23) + self.assertTrue(u_coord.equal_crs(ent.meta["crs"], "epsg:4326")) + self.assertAlmostEqual(ent.meta["transform"][0], 0.08333333333333333) + self.assertAlmostEqual(ent.meta["transform"][1], 0) + self.assertAlmostEqual(ent.meta["transform"][2], 5.9166666666666) + self.assertAlmostEqual(ent.meta["transform"][3], 0) + self.assertAlmostEqual(ent.meta["transform"][4], -0.08333333333333333) + self.assertAlmostEqual(ent.meta["transform"][5], 47.75) def test_switzerland30normPop_pass(self): """Create LitPop entity for Switzerland on 30 arcsec:""" - country_name = ['CHE'] + country_name = ["CHE"] resolution = 30 exp = [0, 1] - fin_mode = 'norm' - with self.assertLogs('climada.entity.exposures.litpop', level='INFO') as cm: - ent = lp.LitPop.from_countries(country_name, res_arcsec=resolution, exponents=exp, - fin_mode=fin_mode, reference_year=2015) + fin_mode = "norm" + with self.assertLogs("climada.entity.exposures.litpop", level="INFO") as cm: + ent = lp.LitPop.from_countries( + country_name, + res_arcsec=resolution, + exponents=exp, + fin_mode=fin_mode, + reference_year=2015, + ) # print(cm) - self.assertIn('LitPop: Init Exposure for country: CHE', cm.output[0]) - self.assertEqual(ent.gdf['region_id'].min(), 756) - self.assertEqual(ent.gdf['region_id'].max(), 756) - self.assertEqual(ent.gdf['value'].sum(), 1.0) + self.assertIn("LitPop: Init Exposure for country: CHE", cm.output[0]) + self.assertEqual(ent.gdf["region_id"].min(), 756) + self.assertEqual(ent.gdf["region_id"].max(), 756) + self.assertEqual(ent.gdf["value"].sum(), 1.0) self.assertEqual(ent.ref_year, 2015) def test_suriname30_nfw_pass(self): """Create LitPop entity for Suriname for non-finanical wealth in 2016:""" - country_name = ['SUR'] - fin_mode = 'nfw' - ent = lp.LitPop.from_countries(country_name, reference_year=2016, fin_mode=fin_mode) - - self.assertEqual(ent.gdf['region_id'].min(), 740) - self.assertEqual(ent.gdf['region_id'].max(), 740) + country_name = ["SUR"] + fin_mode = "nfw" + ent = lp.LitPop.from_countries( + country_name, reference_year=2016, fin_mode=fin_mode + ) + + self.assertEqual(ent.gdf["region_id"].min(), 740) + self.assertEqual(ent.gdf["region_id"].max(), 740) self.assertEqual(ent.ref_year, 2016) def test_switzerland300_admin1_pc2016_pass(self): """Create LitPop entity for Switzerland 2016 with admin1 and produced capital:""" - country_name = ['CHE'] - fin_mode = 'pc' + country_name = ["CHE"] + fin_mode = "pc" resolution = 300 ref_year = 2016 adm1 = True - comparison_total_val = world_bank_wealth_account(country_name[0], ref_year, no_land=1)[1] - ent = lp.LitPop.from_countries(country_name, res_arcsec=resolution, - reference_year=ref_year, fin_mode=fin_mode, - admin1_calc=adm1) - - self.assertAlmostEqual(np.around(ent.gdf['value'].sum()*1e-9, 0), - np.around(comparison_total_val*1e-9, 0), places=0) - self.assertEqual(ent.value_unit, 'USD') + comparison_total_val = world_bank_wealth_account( + country_name[0], ref_year, no_land=1 + )[1] + ent = lp.LitPop.from_countries( + country_name, + res_arcsec=resolution, + reference_year=ref_year, + fin_mode=fin_mode, + admin1_calc=adm1, + ) + + self.assertAlmostEqual( + np.around(ent.gdf["value"].sum() * 1e-9, 0), + np.around(comparison_total_val * 1e-9, 0), + places=0, + ) + self.assertEqual(ent.value_unit, "USD") def test_from_shape_zurich_pass(self): """test initiating LitPop for custom shape (square around Zurich City) Distributing an imaginary total value of 1000 USD""" - total_value=1000 - ent = lp.LitPop.from_shape(shape, total_value, res_arcsec=30, reference_year=2016) - self.assertEqual(ent.gdf['value'].sum(), 1000.0) - self.assertEqual(ent.gdf['value'].min(), 0.0) - self.assertEqual(ent.gdf['region_id'].min(), 756) - self.assertEqual(ent.gdf['region_id'].max(), 756) - self.assertAlmostEqual(ent.gdf['latitude'].min(), 47.20416666666661) + total_value = 1000 + ent = lp.LitPop.from_shape( + shape, total_value, res_arcsec=30, reference_year=2016 + ) + self.assertEqual(ent.gdf["value"].sum(), 1000.0) + self.assertEqual(ent.gdf["value"].min(), 0.0) + self.assertEqual(ent.gdf["region_id"].min(), 756) + self.assertEqual(ent.gdf["region_id"].max(), 756) + self.assertAlmostEqual(ent.gdf["latitude"].min(), 47.20416666666661) # index and coord. of largest value: - self.assertEqual(ent.gdf.loc[ent.gdf['value'] == ent.gdf['value'].max()].index[0], 482) - self.assertAlmostEqual(ent.gdf.loc[ent.gdf['value'] == ent.gdf['value'].max()]['latitude'].values[0], 47.34583333333325) - self.assertAlmostEqual(ent.gdf.loc[ent.gdf['value'] == ent.gdf['value'].max()]['longitude'].values[0], 8.529166666666658) + self.assertEqual( + ent.gdf.loc[ent.gdf["value"] == ent.gdf["value"].max()].index[0], 482 + ) + self.assertAlmostEqual( + ent.gdf.loc[ent.gdf["value"] == ent.gdf["value"].max()]["latitude"].values[ + 0 + ], + 47.34583333333325, + ) + self.assertAlmostEqual( + ent.gdf.loc[ent.gdf["value"] == ent.gdf["value"].max()]["longitude"].values[ + 0 + ], + 8.529166666666658, + ) def test_from_shape_and_countries_zurich_pass(self): """test initiating LitPop for custom shape (square around Zurich City) with from_shape_and_countries()""" ent = lp.LitPop.from_shape_and_countries( - shape, 'Switzerland', res_arcsec=30, reference_year=2016) - self.assertEqual(ent.gdf['value'].min(), 0.0) - self.assertEqual(ent.gdf['region_id'].min(), 756) - self.assertEqual(ent.gdf['region_id'].max(), 756) - self.assertAlmostEqual(ent.gdf['latitude'].min(), 47.20416666666661) + shape, "Switzerland", res_arcsec=30, reference_year=2016 + ) + self.assertEqual(ent.gdf["value"].min(), 0.0) + self.assertEqual(ent.gdf["region_id"].min(), 756) + self.assertEqual(ent.gdf["region_id"].max(), 756) + self.assertAlmostEqual(ent.gdf["latitude"].min(), 47.20416666666661) # coord of largest value: - self.assertEqual(ent.gdf.loc[ent.gdf['value'] == ent.gdf['value'].max()].index[0], 434) - self.assertAlmostEqual(ent.gdf.loc[ent.gdf['value'] == ent.gdf['value'].max()]['latitude'].values[0], 47.34583333333325) - self.assertAlmostEqual(ent.gdf.loc[ent.gdf['value'] == ent.gdf['value'].max()]['longitude'].values[0], 8.529166666666658) + self.assertEqual( + ent.gdf.loc[ent.gdf["value"] == ent.gdf["value"].max()].index[0], 434 + ) + self.assertAlmostEqual( + ent.gdf.loc[ent.gdf["value"] == ent.gdf["value"].max()]["latitude"].values[ + 0 + ], + 47.34583333333325, + ) + self.assertAlmostEqual( + ent.gdf.loc[ent.gdf["value"] == ent.gdf["value"].max()]["longitude"].values[ + 0 + ], + 8.529166666666658, + ) def test_Liechtenstein_15_lit_pass(self): """Create Nightlights entity for Liechtenstein 2016:""" - country_name = 'Liechtenstein' + country_name = "Liechtenstein" ref_year = 2016 ent = lp.LitPop.from_nightlight_intensity(country_name, reference_year=ref_year) - self.assertEqual(ent.gdf['value'].sum(), 36469.0) - self.assertEqual(ent.gdf['region_id'][1], 438) - self.assertEqual(ent.value_unit, '') - self.assertAlmostEqual(ent.gdf['latitude'].max(), 47.260416666666664) - self.assertAlmostEqual(ent.meta['transform'][4], -15/3600) + self.assertEqual(ent.gdf["value"].sum(), 36469.0) + self.assertEqual(ent.gdf["region_id"][1], 438) + self.assertEqual(ent.value_unit, "") + self.assertAlmostEqual(ent.gdf["latitude"].max(), 47.260416666666664) + self.assertAlmostEqual(ent.meta["transform"][4], -15 / 3600) def test_Liechtenstein_30_pop_pass(self): """Create population count entity for Liechtenstein 2015:""" - country_name = 'Liechtenstein' + country_name = "Liechtenstein" ref_year = 2015 ent = lp.LitPop.from_population(country_name, reference_year=ref_year) - self.assertEqual(ent.gdf['value'].sum(), 30068.970703125) - self.assertEqual(ent.gdf['region_id'][1], 438) - self.assertEqual(ent.value_unit, 'people') - self.assertAlmostEqual(ent.gdf['latitude'].max(), 47.2541666666666) - self.assertAlmostEqual(ent.meta['transform'][0], 30/3600) + self.assertEqual(ent.gdf["value"].sum(), 30068.970703125) + self.assertEqual(ent.gdf["region_id"][1], 438) + self.assertEqual(ent.value_unit, "people") + self.assertAlmostEqual(ent.gdf["latitude"].max(), 47.2541666666666) + self.assertAlmostEqual(ent.meta["transform"][0], 30 / 3600) def test_from_nightlight_intensity(self): - """ Test raises, logger and if methods from_countries and from_shape are - are used.""" + """Test raises, logger and if methods from_countries and from_shape are + are used.""" with self.assertRaises(ValueError) as cm: lp.LitPop.from_nightlight_intensity() - self.assertEqual('Either `countries` or `shape` required. Aborting.', str(cm.exception)) + self.assertEqual( + "Either `countries` or `shape` required. Aborting.", str(cm.exception) + ) with self.assertRaises(ValueError) as cm: - lp.LitPop.from_nightlight_intensity(countries = 'Liechtenstein', shape = shape) - self.assertEqual('Not allowed to set both `countries` and `shape`. Aborting.', str(cm.exception)) - - exp = lp.LitPop.from_nightlight_intensity(countries = 'Liechtenstein') - self.assertEqual(exp.fin_mode, 'none') - - exp = lp.LitPop.from_nightlight_intensity(shape = shape) - self.assertEqual(exp.value_unit, '') - - with self.assertLogs('climada.entity.exposures.litpop.litpop', level = 'WARNING') as cm: - lp.LitPop.from_nightlight_intensity(shape = shape) - self.assertIn('Note: set_nightlight_intensity sets values to raw nightlight intensity,', cm.output[0]) + lp.LitPop.from_nightlight_intensity(countries="Liechtenstein", shape=shape) + self.assertEqual( + "Not allowed to set both `countries` and `shape`. Aborting.", + str(cm.exception), + ) + + exp = lp.LitPop.from_nightlight_intensity(countries="Liechtenstein") + self.assertEqual(exp.fin_mode, "none") + + exp = lp.LitPop.from_nightlight_intensity(shape=shape) + self.assertEqual(exp.value_unit, "") + + with self.assertLogs( + "climada.entity.exposures.litpop.litpop", level="WARNING" + ) as cm: + lp.LitPop.from_nightlight_intensity(shape=shape) + self.assertIn( + "Note: set_nightlight_intensity sets values to raw nightlight intensity,", + cm.output[0], + ) def test_from_population(self): - """ Test raises, logger and if methods from_countries and from_shape are - are used.""" + """Test raises, logger and if methods from_countries and from_shape are + are used.""" with self.assertRaises(ValueError) as cm: lp.LitPop.from_population() - self.assertEqual('Either `countries` or `shape` required. Aborting.', str(cm.exception)) + self.assertEqual( + "Either `countries` or `shape` required. Aborting.", str(cm.exception) + ) - exp = lp.LitPop.from_population(countries = 'Liechtenstein') - self.assertEqual(exp.fin_mode, 'pop') + exp = lp.LitPop.from_population(countries="Liechtenstein") + self.assertEqual(exp.fin_mode, "pop") - exp = lp.LitPop.from_population(shape = shape) - self.assertEqual(exp.value_unit, 'people') + exp = lp.LitPop.from_population(shape=shape) + self.assertEqual(exp.value_unit, "people") with self.assertRaises(ValueError) as cm: - lp.LitPop.from_population(countries = 'Liechtenstein', shape = shape) - self.assertEqual('Not allowed to set both `countries` and `shape`. Aborting.', str(cm.exception)) + lp.LitPop.from_population(countries="Liechtenstein", shape=shape) + self.assertEqual( + "Not allowed to set both `countries` and `shape`. Aborting.", + str(cm.exception), + ) class TestAdmin1(unittest.TestCase): @@ -228,12 +301,22 @@ def test_from_countries_calc_admin1_pass(self): """test method from_countries with admin1_calc=True for Switzerland""" country = "Switzerland" resolution = 90 - fin_mode = 'gdp' - - ent = lp.LitPop.from_countries(country, res_arcsec=resolution, fin_mode=fin_mode, - reference_year=2016, admin1_calc=True) - ent_adm0 = lp.LitPop.from_countries(country, res_arcsec=resolution, fin_mode=fin_mode, - reference_year=2016, admin1_calc=False) + fin_mode = "gdp" + + ent = lp.LitPop.from_countries( + country, + res_arcsec=resolution, + fin_mode=fin_mode, + reference_year=2016, + admin1_calc=True, + ) + ent_adm0 = lp.LitPop.from_countries( + country, + res_arcsec=resolution, + fin_mode=fin_mode, + reference_year=2016, + admin1_calc=False, + ) # shape must be same as with admin1_calc = False, otherwise there # is a problem with handling of the admin1 shapes: self.assertEqual(ent.gdf.shape[0], 7800) @@ -242,41 +325,52 @@ def test_from_countries_calc_admin1_pass(self): def test_calc_admin1(self): """test function _calc_admin1_one_country for Switzerland.""" resolution = 300 - country = 'CHE' - ent = lp._calc_admin1_one_country(country, resolution, (2,1), 'pc', None, - 2016, lp.GPW_VERSION, SYSTEM_DIR) + country = "CHE" + ent = lp._calc_admin1_one_country( + country, resolution, (2, 1), "pc", None, 2016, lp.GPW_VERSION, SYSTEM_DIR + ) self.assertEqual(ent.gdf.shape[0], 699) - self.assertEqual(ent.gdf['region_id'][88], 756) - self.assertAlmostEqual(ent.gdf['latitude'].max(), 47.708333333333336) + self.assertEqual(ent.gdf["region_id"][88], 756) + self.assertAlmostEqual(ent.gdf["latitude"].max(), 47.708333333333336) # shape must be same as with admin1_calc = False, otherwise there # is a problem with handling of the admin1 shapes: - ent_adm0 = lp.LitPop.from_countries(country, res_arcsec=resolution, fin_mode='pc', - reference_year=2016, admin1_calc=False) + ent_adm0 = lp.LitPop.from_countries( + country, + res_arcsec=resolution, + fin_mode="pc", + reference_year=2016, + admin1_calc=False, + ) self.assertEqual(ent.gdf.shape[0], ent_adm0.gdf.shape[0]) def test_brandenburg(self): """test functions from_shape_and_countries and from_shape for admin1 shape of Brandenburg""" reslution_arcsec = 120 - country = 'DEU' - state_name = 'Brandenburg' + country = "DEU" + state_name = "Brandenburg" # get the shape of Brandenburg: admin1_info, admin1_shapes = u_coord.get_admin1_info(country) admin1_info = admin1_info[country] admin1_shapes = admin1_shapes[country] - admin1_names = [record['name'] for record in admin1_info] + admin1_names = [record["name"] for record in admin1_info] print(admin1_names) for idx, name in enumerate(admin1_names): - if admin1_names[idx]==state_name: + if admin1_names[idx] == state_name: break # init LitPop for Brandenburg exp_bra2 = lp.LitPop.from_shape_and_countries( - admin1_shapes[idx], country, res_arcsec=reslution_arcsec, reference_year=2016) + admin1_shapes[idx], + country, + res_arcsec=reslution_arcsec, + reference_year=2016, + ) exp_bra = lp.LitPop.from_shape( - admin1_shapes[idx], 1000, res_arcsec=reslution_arcsec, reference_year=2016) - self.assertAlmostEqual(exp_bra.gdf['value'].sum(), 1000) + admin1_shapes[idx], 1000, res_arcsec=reslution_arcsec, reference_year=2016 + ) + self.assertAlmostEqual(exp_bra.gdf["value"].sum(), 1000) # compare number of data points: self.assertEqual(exp_bra.gdf.shape[0], exp_bra2.gdf.shape[0]) self.assertEqual(exp_bra.gdf.shape[0], 3566) @@ -284,6 +378,7 @@ def test_brandenburg(self): self.assertEqual(len(exp_bra.gdf.geometry.unique()), len(exp_bra.gdf.geometry)) self.assertEqual(len(exp_bra.gdf.geometry.unique()), 3566) + class TestGPWPopulation(unittest.TestCase): """Test gpw_population submodule""" @@ -292,28 +387,30 @@ def test_get_gpw_file_path_pass(self): gpw_version = CONFIG.exposures.litpop.gpw_population.gpw_version.int() try: path = gpw_population.get_gpw_file_path(gpw_version, 2020, verbose=False) - self.assertIn('gpw_v4_population', str(path)) + self.assertIn("gpw_v4_population", str(path)) except FileExistsError as err: - self.assertIn('lease download', err.args[0]) - self.skipTest('GPW input data for GPW v4.%i not found.' %(gpw_version)) + self.assertIn("lease download", err.args[0]) + self.skipTest("GPW input data for GPW v4.%i not found." % (gpw_version)) def test_load_gpw_pop_shape_pass(self): """test method gpw_population.load_gpw_pop_shape""" gpw_version = CONFIG.exposures.litpop.gpw_population.gpw_version.int() try: - data, meta, glb_transform = \ - gpw_population.load_gpw_pop_shape(shape, 2020, gpw_version, verbose=False) + data, meta, glb_transform = gpw_population.load_gpw_pop_shape( + shape, 2020, gpw_version, verbose=False + ) self.assertEqual(data.shape, (31, 36)) - self.assertAlmostEqual(meta['transform'][0], 0.00833333333333333) - self.assertAlmostEqual(meta['transform'][0], glb_transform[0]) - self.assertEqual(meta['driver'], 'GTiff') - self.assertEqual(meta['height'], data.shape[0]) - self.assertEqual(meta['width'], data.shape[1]) + self.assertAlmostEqual(meta["transform"][0], 0.00833333333333333) + self.assertAlmostEqual(meta["transform"][0], glb_transform[0]) + self.assertEqual(meta["driver"], "GTiff") + self.assertEqual(meta["height"], data.shape[0]) + self.assertEqual(meta["width"], data.shape[1]) self.assertIsInstance(data, np.ndarray) self.assertEqual(len(data.shape), 2) except FileExistsError as err: - self.assertIn('lease download', err.args[0]) - self.skipTest('GPW input data for GPW v4.%i not found.' %(gpw_version)) + self.assertIn("lease download", err.args[0]) + self.skipTest("GPW input data for GPW v4.%i not found." % (gpw_version)) + # Execute Tests if __name__ == "__main__": diff --git a/climada/test/test_nightlight.py b/climada/test/test_nightlight.py index fff3cc633..cb463fcd7 100644 --- a/climada/test/test_nightlight.py +++ b/climada/test/test_nightlight.py @@ -21,85 +21,113 @@ import gzip import io -from pathlib import Path import tarfile -from tempfile import TemporaryDirectory import unittest +from pathlib import Path +from tempfile import TemporaryDirectory import affine import numpy as np import scipy.sparse as sparse +from osgeo import gdal from PIL import Image from shapely.geometry import Polygon -from osgeo import gdal from climada.entity.exposures.litpop import nightlight -from climada.util.constants import (SYSTEM_DIR, CONFIG) -from climada.util import (files_handler, ureg) +from climada.util import files_handler, ureg +from climada.util.constants import CONFIG, SYSTEM_DIR BM_FILENAMES = nightlight.BM_FILENAMES NOAA_RESOLUTION_DEG = (30 * ureg.arc_second).to(ureg.deg).magnitude + def init_test_shape(): """provide a rectangular shape""" bounds = (14.18, 35.78, 14.58, 36.09) # (min_lon, max_lon, min_lat, max_lat) - return bounds, Polygon([ - (bounds[0], bounds[3]), - (bounds[2], bounds[3]), - (bounds[2], bounds[1]), - (bounds[0], bounds[1]) - ]) + return bounds, Polygon( + [ + (bounds[0], bounds[3]), + (bounds[2], bounds[3]), + (bounds[2], bounds[1]), + (bounds[0], bounds[1]), + ] + ) + class TestNightlight(unittest.TestCase): """Test litpop.nightlight""" def test_load_nasa_nl_shape_single_tile(self): - """ Test that the function returns a np.ndarray containing - the cropped .tif image values. Test that - just one layer is returned. """ + """Test that the function returns a np.ndarray containing + the cropped .tif image values. Test that + just one layer is returned.""" # Initialization - path = Path(SYSTEM_DIR, 'BlackMarble_2016_C1_geo_gray.tif') + path = Path(SYSTEM_DIR, "BlackMarble_2016_C1_geo_gray.tif") _, shape = init_test_shape() # Test cropped output - out_image, meta = nightlight.load_nasa_nl_shape_single_tile(geometry = shape, path = path) + out_image, meta = nightlight.load_nasa_nl_shape_single_tile( + geometry=shape, path=path + ) self.assertIsInstance(out_image, np.ndarray) self.assertEqual(len(out_image.shape), 2) # Test meta ouput - self.assertEqual(meta['height'],out_image.shape[0]) - self.assertEqual(meta['width'],out_image.shape[1]) - self.assertEqual(meta['driver'], 'GTiff') - self.assertEqual(meta['transform'], affine.Affine(0.004166666666666667, 0.0, - 14.179166666666667, 0.0, -0.004166666666666667, 36.09166666666667)) + self.assertEqual(meta["height"], out_image.shape[0]) + self.assertEqual(meta["width"], out_image.shape[1]) + self.assertEqual(meta["driver"], "GTiff") + self.assertEqual( + meta["transform"], + affine.Affine( + 0.004166666666666667, + 0.0, + 14.179166666666667, + 0.0, + -0.004166666666666667, + 36.09166666666667, + ), + ) # Test raises with self.assertRaises(IndexError) as cm: - nightlight.load_nasa_nl_shape_single_tile(geometry = shape, path = path, layer = 4) - self.assertEqual("BlackMarble_2016_C1_geo_gray.tif has only 3 layers," - " layer 4 can't be accessed.", str(cm.exception)) + nightlight.load_nasa_nl_shape_single_tile( + geometry=shape, path=path, layer=4 + ) + self.assertEqual( + "BlackMarble_2016_C1_geo_gray.tif has only 3 layers," + " layer 4 can't be accessed.", + str(cm.exception), + ) # Test logger - with self.assertLogs('climada.entity.exposures.litpop.nightlight', level='DEBUG') as cm: - nightlight.load_nasa_nl_shape_single_tile(geometry = shape, path = path) - self.assertIn('Read cropped BlackMarble_2016_C1_geo_gray.tif as np.ndarray.', cm.output[0]) + with self.assertLogs( + "climada.entity.exposures.litpop.nightlight", level="DEBUG" + ) as cm: + nightlight.load_nasa_nl_shape_single_tile(geometry=shape, path=path) + self.assertIn( + "Read cropped BlackMarble_2016_C1_geo_gray.tif as np.ndarray.", cm.output[0] + ) def test_read_bm_files(self): - """" Test that read_bm_files function read NASA BlackMarble GeoTiff and output - an array and a gdal DataSet.""" + """ " Test that read_bm_files function read NASA BlackMarble GeoTiff and output + an array and a gdal DataSet.""" # Download 'BlackMarble_2016_A1_geo_gray.tif' in the temporary directory and create a path temp_dir = TemporaryDirectory() urls = CONFIG.exposures.litpop.nightlights.nasa_sites.list() - url = str(urls[0]) + 'BlackMarble_2016_A1_geo_gray.tif' - files_handler.download_file(url = url, download_dir = temp_dir.name) - filename = 'BlackMarble_2016_A1_geo_gray.tif' + url = str(urls[0]) + "BlackMarble_2016_A1_geo_gray.tif" + files_handler.download_file(url=url, download_dir=temp_dir.name) + filename = "BlackMarble_2016_A1_geo_gray.tif" # Test logger - with self.assertLogs('climada.entity.exposures.litpop.nightlight', level='DEBUG') as cm: - arr1, curr_file = nightlight.read_bm_file(bm_path=temp_dir.name, filename=filename) - self.assertIn('Importing' + temp_dir.name, cm.output[0]) + with self.assertLogs( + "climada.entity.exposures.litpop.nightlight", level="DEBUG" + ) as cm: + arr1, curr_file = nightlight.read_bm_file( + bm_path=temp_dir.name, filename=filename + ) + self.assertIn("Importing" + temp_dir.name, cm.output[0]) # Check outputs are a np.array and a gdal DataSet and band 1 is selected self.assertIsInstance(arr1, np.ndarray) @@ -111,64 +139,94 @@ def test_read_bm_files(self): # Check that the right exception is raised with self.assertRaises(FileNotFoundError) as cm: - nightlight.read_bm_file(bm_path='/Wrong/path/file.tif', filename='file.tif') - self.assertEqual('Invalid path: check that the path to BlackMarble file is correct.', - str(cm.exception)) + nightlight.read_bm_file(bm_path="/Wrong/path/file.tif", filename="file.tif") + self.assertEqual( + "Invalid path: check that the path to BlackMarble file is correct.", + str(cm.exception), + ) temp_dir.cleanup() def test_download_nl_files(self): - """ Test that BlackMarble GeoTiff files are downloaded. """ + """Test that BlackMarble GeoTiff files are downloaded.""" # Test Raises temp_dir = TemporaryDirectory() with self.assertRaises(ValueError) as cm: - nightlight.download_nl_files(req_files=np.ones(5), - files_exist=np.zeros(4), - dwnl_path=temp_dir.name) - self.assertEqual('The given arguments are invalid. req_files and ' - 'files_exist must both be as long as there are files to download ' - '(8).', str(cm.exception)) + nightlight.download_nl_files( + req_files=np.ones(5), files_exist=np.zeros(4), dwnl_path=temp_dir.name + ) + self.assertEqual( + "The given arguments are invalid. req_files and " + "files_exist must both be as long as there are files to download " + "(8).", + str(cm.exception), + ) with self.assertRaises(ValueError) as cm: - nightlight.download_nl_files(dwnl_path='not a folder') - self.assertEqual('The folder not a folder does not exist. Operation aborted.', - str(cm.exception)) + nightlight.download_nl_files(dwnl_path="not a folder") + self.assertEqual( + "The folder not a folder does not exist. Operation aborted.", + str(cm.exception), + ) # Test logger - with self.assertLogs('climada.entity.exposures.litpop.nightlight', level='DEBUG') as cm: - dwl_path = nightlight.download_nl_files(req_files=np.ones(len(BM_FILENAMES),), - files_exist=np.ones(len(BM_FILENAMES),), - dwnl_path=temp_dir.name, year=2016) - self.assertIn('All required files already exist. No downloads necessary.', cm.output[0]) + with self.assertLogs( + "climada.entity.exposures.litpop.nightlight", level="DEBUG" + ) as cm: + dwl_path = nightlight.download_nl_files( + req_files=np.ones( + len(BM_FILENAMES), + ), + files_exist=np.ones( + len(BM_FILENAMES), + ), + dwnl_path=temp_dir.name, + year=2016, + ) + self.assertIn( + "All required files already exist. No downloads necessary.", + cm.output[0], + ) # Test download - with self.assertLogs('climada.entity.exposures.litpop.nightlight', level='DEBUG') as cm: - dwl_path = nightlight.download_nl_files(req_files=np.array([1, 0, 0, 0, 0, 0, 0, 0]), - files_exist=np.array([0, 1, 1, 1, 1, 1, 1, 1]), - dwnl_path=temp_dir.name) - self.assertIn('Attempting to download file from ' - 'https://eoimages.gsfc.nasa.gov/images/imagerecords/' - '144000/144897/BlackMarble_2016_A1_geo_gray.tif', cm.output[0]) + with self.assertLogs( + "climada.entity.exposures.litpop.nightlight", level="DEBUG" + ) as cm: + dwl_path = nightlight.download_nl_files( + req_files=np.array([1, 0, 0, 0, 0, 0, 0, 0]), + files_exist=np.array([0, 1, 1, 1, 1, 1, 1, 1]), + dwnl_path=temp_dir.name, + ) + self.assertIn( + "Attempting to download file from " + "https://eoimages.gsfc.nasa.gov/images/imagerecords/" + "144000/144897/BlackMarble_2016_A1_geo_gray.tif", + cm.output[0], + ) # Test if dwl_path has been returned self.assertEqual(temp_dir.name, dwl_path) temp_dir.cleanup() def test_unzip_tif_to_py(self): - """ Test that .gz files are unzipped and read as a sparse matrix, - file_name is correct and logger message recorded. """ + """Test that .gz files are unzipped and read as a sparse matrix, + file_name is correct and logger message recorded.""" - path_file_tif_gz = str(SYSTEM_DIR.joinpath('F182013.v4c_web.stable_lights.avg_vis.tif.gz')) - with self.assertLogs('climada.entity.exposures.litpop.nightlight', level='INFO') as cm: + path_file_tif_gz = str( + SYSTEM_DIR.joinpath("F182013.v4c_web.stable_lights.avg_vis.tif.gz") + ) + with self.assertLogs( + "climada.entity.exposures.litpop.nightlight", level="INFO" + ) as cm: file_name, night = nightlight.unzip_tif_to_py(path_file_tif_gz) - self.assertIn(f'Unzipping file {path_file_tif_gz}', cm.output[0]) - self.assertEqual(str(file_name), 'F182013.v4c_web.stable_lights.avg_vis.tif') + self.assertIn(f"Unzipping file {path_file_tif_gz}", cm.output[0]) + self.assertEqual(str(file_name), "F182013.v4c_web.stable_lights.avg_vis.tif") self.assertIsInstance(night, sparse._csr.csr_matrix) - SYSTEM_DIR.joinpath('F182013.v4c_web.stable_lights.avg_vis.p').unlink() + SYSTEM_DIR.joinpath("F182013.v4c_web.stable_lights.avg_vis.p").unlink() def test_load_nightlight_noaa(self): - """ Test that data is not downloaded if a .tif.gz file is present - in SYSTEM_DIR. """ + """Test that data is not downloaded if a .tif.gz file is present + in SYSTEM_DIR.""" # initialization - sat_name = 'E99' + sat_name = "E99" year = 2013 pattern = f"{sat_name}{year}.v4c_web.stable_lights.avg_vis" gzfile = f"{pattern}.tif.gz" @@ -183,12 +241,14 @@ def test_load_nightlight_noaa(self): with io.BytesIO() as mem: pilim.save(mem, "tiff") # compressed image to a gzip file - with gzip.GzipFile(SYSTEM_DIR.joinpath(gzfile), 'wb') as f: + with gzip.GzipFile(SYSTEM_DIR.joinpath(gzfile), "wb") as f: f.write(mem.getvalue()) try: # with arguments - night, coord_nl, fn_light = nightlight.load_nightlight_noaa(ref_year=year, sat_name=sat_name) + night, coord_nl, fn_light = nightlight.load_nightlight_noaa( + ref_year=year, sat_name=sat_name + ) self.assertIsInstance(night, sparse._csr.csr_matrix) self.assertIn(tiffile, str(fn_light)) @@ -196,94 +256,125 @@ def test_load_nightlight_noaa(self): night, coord_nl, fn_light = nightlight.load_nightlight_noaa() self.assertIsInstance(night, sparse._csr.csr_matrix) self.assertIn(pfile, str(fn_light)) - self.assertTrue(np.array_equal(np.array([[-65, NOAA_RESOLUTION_DEG], - [-180, NOAA_RESOLUTION_DEG]]),coord_nl)) + self.assertTrue( + np.array_equal( + np.array([[-65, NOAA_RESOLUTION_DEG], [-180, NOAA_RESOLUTION_DEG]]), + coord_nl, + ) + ) # test raises from wrong input agruments with self.assertRaises(ValueError) as cm: night, coord_nl, fn_light = nightlight.load_nightlight_noaa( - ref_year=2050, sat_name='F150') - self.assertEqual('Nightlight intensities for year 2050 and satellite F150 do not exist.', - str(cm.exception)) + ref_year=2050, sat_name="F150" + ) + self.assertEqual( + "Nightlight intensities for year 2050 and satellite F150 do not exist.", + str(cm.exception), + ) finally: # clean up SYSTEM_DIR.joinpath(pfile).unlink(missing_ok=True) SYSTEM_DIR.joinpath(gzfile).unlink(missing_ok=True) def test_untar_noaa_stable_nighlight(self): - """ Testing that input .tar file is moved into SYSTEM_DIR, - tif.gz file is extracted from .tar file and moved into SYSTEM_DIR, - exception are raised when no .tif.gz file is present in the tar file, - and the logger message is recorded if more then one .tif.gz is present in - .tar file. """ + """Testing that input .tar file is moved into SYSTEM_DIR, + tif.gz file is extracted from .tar file and moved into SYSTEM_DIR, + exception are raised when no .tif.gz file is present in the tar file, + and the logger message is recorded if more then one .tif.gz is present in + .tar file.""" # Create path to .tif.gz and .csv files already existing in SYSTEM_DIR - path_tif_gz_1 = Path(SYSTEM_DIR, 'F182013.v4c_web.stable_lights.avg_vis.tif.gz') - path_csv = Path(SYSTEM_DIR, 'GDP_TWN_IMF_WEO_data.csv') - path_tar = Path(SYSTEM_DIR, 'sample.tar') + path_tif_gz_1 = Path(SYSTEM_DIR, "F182013.v4c_web.stable_lights.avg_vis.tif.gz") + path_csv = Path(SYSTEM_DIR, "GDP_TWN_IMF_WEO_data.csv") + path_tar = Path(SYSTEM_DIR, "sample.tar") # Create .tar file and add .tif.gz and .csv - file_tar = tarfile.open(path_tar, "w") #create the tar file - file_tar.add(name = path_tif_gz_1, recursive = False, arcname = 'F182013.v4c_web.stable_lights.avg_vis.tif.gz') + file_tar = tarfile.open(path_tar, "w") # create the tar file + file_tar.add( + name=path_tif_gz_1, + recursive=False, + arcname="F182013.v4c_web.stable_lights.avg_vis.tif.gz", + ) file_tar.close() # Test that the files has been moved path_to_test = nightlight.untar_noaa_stable_nightlight(path_tar) self.assertTrue(path_to_test.exists()) - self.assertTrue(path_tar .exists()) + self.assertTrue(path_tar.exists()) path_tar.unlink() # Put no .tif.gz file in .tar file and check raises - path_tar = Path(SYSTEM_DIR, 'sample.tar') - file_tar = tarfile.open(path_tar, "w") #create the tar file - file_tar.add(name = path_csv, recursive = False, arcname ='GDP_TWN_IMF_WEO_data.csv' ) + path_tar = Path(SYSTEM_DIR, "sample.tar") + file_tar = tarfile.open(path_tar, "w") # create the tar file + file_tar.add(name=path_csv, recursive=False, arcname="GDP_TWN_IMF_WEO_data.csv") file_tar.close() with self.assertRaises(ValueError) as cm: nightlight.untar_noaa_stable_nightlight(path_tar) - self.assertEqual('No stable light intensities for selected year and satellite ' - f'in file {path_tar}',str(cm.exception)) + self.assertEqual( + "No stable light intensities for selected year and satellite " + f"in file {path_tar}", + str(cm.exception), + ) path_tar.unlink() # Test logger with having two .tif.gz file in .tar file - file_tar = tarfile.open(path_tar, "w") #create the tar file - file_tar.add(name = path_tif_gz_1, recursive = False, arcname = 'F182013.v4c_web.stable_lights.avg_vis.tif.gz' ) - file_tar.add(name = path_tif_gz_1, recursive = False, arcname = 'F182013.v4c_web.stable_lights.avg_vis.tif.gz' ) + file_tar = tarfile.open(path_tar, "w") # create the tar file + file_tar.add( + name=path_tif_gz_1, + recursive=False, + arcname="F182013.v4c_web.stable_lights.avg_vis.tif.gz", + ) + file_tar.add( + name=path_tif_gz_1, + recursive=False, + arcname="F182013.v4c_web.stable_lights.avg_vis.tif.gz", + ) file_tar.close() - with self.assertLogs('climada.entity.exposures.litpop.nightlight', level = 'WARNING') as cm: + with self.assertLogs( + "climada.entity.exposures.litpop.nightlight", level="WARNING" + ) as cm: nightlight.untar_noaa_stable_nightlight(path_tar) - self.assertIn('found more than one potential intensity file in', cm.output[0]) + self.assertIn("found more than one potential intensity file in", cm.output[0]) path_tar.unlink() def test_check_nl_local_file_exists(self): - """ Test that an array with the correct number of already existing files - is produced, the LOGGER messages logged and the ValueError raised. """ + """Test that an array with the correct number of already existing files + is produced, the LOGGER messages logged and the ValueError raised.""" # check logger messages by giving a to short req_file - with self.assertLogs('climada.entity.exposures.litpop.nightlight', level='WARNING') as cm: - nightlight.check_nl_local_file_exists(required_files = np.array([0, 0, 1, 1])) - self.assertIn('The parameter \'required_files\' was too short and is ignored', - cm.output[0]) + with self.assertLogs( + "climada.entity.exposures.litpop.nightlight", level="WARNING" + ) as cm: + nightlight.check_nl_local_file_exists(required_files=np.array([0, 0, 1, 1])) + self.assertIn( + "The parameter 'required_files' was too short and is ignored", cm.output[0] + ) # check logger message: not all files are available - with self.assertLogs('climada.entity.exposures.litpop.nightlight', level='DEBUG') as cm: + with self.assertLogs( + "climada.entity.exposures.litpop.nightlight", level="DEBUG" + ) as cm: nightlight.check_nl_local_file_exists() - self.assertIn('Not all satellite files available. Found ', cm.output[0]) - self.assertIn(f' out of 8 required files in {Path(SYSTEM_DIR)}', cm.output[0]) + self.assertIn("Not all satellite files available. Found ", cm.output[0]) + self.assertIn(f" out of 8 required files in {Path(SYSTEM_DIR)}", cm.output[0]) # check logger message: no files found in checkpath - check_path = Path('climada/entity/exposures') - with self.assertLogs('climada.entity.exposures.litpop.nightlight', level='INFO') as cm: + check_path = Path("climada/entity/exposures") + with self.assertLogs( + "climada.entity.exposures.litpop.nightlight", level="INFO" + ) as cm: # using a random path where no files are stored nightlight.check_nl_local_file_exists(check_path=check_path) - self.assertIn(f'No satellite files found locally in {check_path}', - cm.output[0]) + self.assertIn(f"No satellite files found locally in {check_path}", cm.output[0]) # test raises with wrong path - check_path = Path('/random/wrong/path') + check_path = Path("/random/wrong/path") with self.assertRaises(ValueError) as cm: nightlight.check_nl_local_file_exists(check_path=check_path) - self.assertEqual(f'The given path does not exist: {check_path}', - str(cm.exception)) + self.assertEqual( + f"The given path does not exist: {check_path}", str(cm.exception) + ) # test that files_exist is correct files_exist = nightlight.check_nl_local_file_exists() @@ -295,16 +386,18 @@ def test_check_files_exist(self): # If invalid directory is supplied it has to fail try: nightlight.check_nl_local_file_exists( - np.ones(np.count_nonzero(BM_FILENAMES)), 'Invalid/path')[0] - raise Exception("if the path is not valid, check_nl_local_file_exists should fail") + np.ones(np.count_nonzero(BM_FILENAMES)), "Invalid/path" + )[0] + raise Exception( + "if the path is not valid, check_nl_local_file_exists should fail" + ) except ValueError: pass files_exist = nightlight.check_nl_local_file_exists( - np.ones(np.count_nonzero(BM_FILENAMES)), SYSTEM_DIR) - self.assertTrue( - files_exist.sum() > 0, - f'{files_exist} {BM_FILENAMES}' + np.ones(np.count_nonzero(BM_FILENAMES)), SYSTEM_DIR ) + self.assertTrue(files_exist.sum() > 0, f"{files_exist} {BM_FILENAMES}") + # Execute Tests if __name__ == "__main__": diff --git a/climada/test/test_plot.py b/climada/test/test_plot.py index dcfb608f9..082f38e1b 100644 --- a/climada/test/test_plot.py +++ b/climada/test/test_plot.py @@ -18,36 +18,45 @@ test plots """ + import copy import unittest import urllib +from pathlib import Path -import numpy as np +import contextily as ctx import matplotlib.pyplot as plt +import numpy as np import pandas as pd -import contextily as ctx -from pathlib import Path -from climada.engine.unsequa import UncOutput -from climada.engine import ImpactCalc, ImpactFreqCurve, CostBenefit -from climada.entity import (Entity, ImpactFuncSet, Exposures, DiscRates, ImpfTropCyclone, Measure, - MeasureSet) -from climada.hazard import Hazard, Centroids -from climada.util.constants import ENT_DEMO_TODAY, TEST_UNC_OUTPUT_COSTBEN, HAZ_DEMO_FL -from climada.util.api_client import Client +from climada.engine import CostBenefit, ImpactCalc, ImpactFreqCurve +from climada.engine.unsequa import UncOutput +from climada.entity import ( + DiscRates, + Entity, + Exposures, + ImpactFuncSet, + ImpfTropCyclone, + Measure, + MeasureSet, +) +from climada.hazard import Centroids, Hazard from climada.test import get_test_file +from climada.util.api_client import Client +from climada.util.constants import ENT_DEMO_TODAY, HAZ_DEMO_FL, TEST_UNC_OUTPUT_COSTBEN - -test_unc_output_costben = Client().get_dataset_file(name=TEST_UNC_OUTPUT_COSTBEN, status='test_dataset') - +test_unc_output_costben = Client().get_dataset_file( + name=TEST_UNC_OUTPUT_COSTBEN, status="test_dataset" +) -HAZ_TEST_TC :Path = get_test_file('test_tc_florida') +HAZ_TEST_TC: Path = get_test_file("test_tc_florida") """ Hazard test file from Data API: Hurricanes from 1851 to 2011 over Florida with 100 centroids. Fraction is empty. Format: HDF5. """ + class TestPlotter(unittest.TestCase): """Test plot functions.""" @@ -62,31 +71,31 @@ def test_hazard_intensity_pass(self): hazard.event_name[3898] = "NNN_1190604_gen8" hazard.event_name[5488] = "NNN_1192804_gen8" myax = hazard.plot_intensity(event=36) - self.assertIn('Event ID 36: NNN_1185106_gen5', myax.get_title()) + self.assertIn("Event ID 36: NNN_1185106_gen5", myax.get_title()) myax = hazard.plot_intensity(event=-1) - self.assertIn('1-largest Event. ID 3899: NNN_1190604_gen8', myax.get_title()) + self.assertIn("1-largest Event. ID 3899: NNN_1190604_gen8", myax.get_title()) myax = hazard.plot_intensity(event=-4) - self.assertIn('4-largest Event. ID 5489: NNN_1192804_gen8', myax.get_title()) + self.assertIn("4-largest Event. ID 5489: NNN_1192804_gen8", myax.get_title()) myax = hazard.plot_intensity(event=0) - self.assertIn('TC max intensity at each point', myax.get_title()) + self.assertIn("TC max intensity at each point", myax.get_title()) myax = hazard.plot_intensity(centr=59) - self.assertIn('Centroid 59: (30.0, -79.0)', myax.get_title()) + self.assertIn("Centroid 59: (30.0, -79.0)", myax.get_title()) myax = hazard.plot_intensity(centr=-1) - self.assertIn('1-largest Centroid. 99: (30.0, -75.0)', myax.get_title()) + self.assertIn("1-largest Centroid. 99: (30.0, -75.0)", myax.get_title()) myax = hazard.plot_intensity(centr=-4) - self.assertIn('4-largest Centroid. 69: (30.0, -78.0)', myax.get_title()) + self.assertIn("4-largest Centroid. 69: (30.0, -78.0)", myax.get_title()) myax = hazard.plot_intensity(centr=0) - self.assertIn('TC max intensity at each event', myax.get_title()) + self.assertIn("TC max intensity at each event", myax.get_title()) - myax = hazard.plot_intensity(event='NNN_1192804_gen8') - self.assertIn('NNN_1192804_gen8', myax.get_title()) + myax = hazard.plot_intensity(event="NNN_1192804_gen8") + self.assertIn("NNN_1192804_gen8", myax.get_title()) def test_hazard_fraction_pass(self): """Generate all possible plots of the hazard fraction.""" @@ -94,30 +103,30 @@ def test_hazard_fraction_pass(self): hazard.event_name = [""] * hazard.event_id.size hazard.event_name[0] = "NNN_1185106_gen5" myax = hazard.plot_fraction(event=1) - self.assertIn('Event ID 1: NNN_1185106_gen5', myax.get_title()) + self.assertIn("Event ID 1: NNN_1185106_gen5", myax.get_title()) myax = hazard.plot_fraction(centr=1) - self.assertIn('Centroid 1: (10.424, -69.324)', myax.get_title()) + self.assertIn("Centroid 1: (10.424, -69.324)", myax.get_title()) def test_hazard_rp_intensity(self): - """"Plot exceedance intensity maps for different return periods""" + """ "Plot exceedance intensity maps for different return periods""" hazard = Hazard.from_hdf5(HAZ_TEST_TC) (axis1, axis2), _ = hazard.plot_rp_intensity([25, 50]) - self.assertEqual('Return period: 25 years', axis1.get_title()) - self.assertEqual('Return period: 50 years', axis2.get_title()) + self.assertEqual("Return period: 25 years", axis1.get_title()) + self.assertEqual("Return period: 50 years", axis2.get_title()) def test_exposures_value_pass(self): """Plot exposures values.""" myexp = pd.read_excel(ENT_DEMO_TODAY) myexp = Exposures(myexp) myexp.check() - myexp.description = 'demo_today' + myexp.description = "demo_today" myax = myexp.plot_hexbin() - self.assertEqual('demo_today', myax.get_title()) + self.assertEqual("demo_today", myax.get_title()) myexp.description = None myax = myexp.plot_hexbin() - self.assertEqual('', myax.get_title()) + self.assertEqual("", myax.get_title()) myexp.plot_scatter() myexp.plot_basemap() @@ -129,9 +138,8 @@ def test_impact_funcs_pass(self): myfuncs = ImpactFuncSet.from_excel(ENT_DEMO_TODAY) myax = myfuncs.plot() self.assertEqual(2, len(myax)) - self.assertIn('TC 1: Tropical cyclone default', - myax[0].title.get_text()) - self.assertIn('TC 3: TC Building code', myax[1].title.get_text()) + self.assertIn("TC 1: Tropical cyclone default", myax[0].title.get_text()) + self.assertIn("TC 3: TC Building code", myax[1].title.get_text()) def test_impact_pass(self): """Plot impact exceedence frequency curves.""" @@ -142,21 +150,21 @@ def test_impact_pass(self): myimp = ImpactCalc(myent.exposures, myent.impact_funcs, myhaz).impact() ifc = myimp.calc_freq_curve() myax = ifc.plot() - self.assertIn('Exceedance frequency curve', myax.get_title()) + self.assertIn("Exceedance frequency curve", myax.get_title()) ifc2 = ImpactFreqCurve( return_per=ifc.return_per, impact=1.5e11 * np.ones(ifc.return_per.size), - label='prove' + label="prove", ) ifc2.plot(axis=myax) def test_ctx_osm_pass(self): """Test basemap function using osm images""" myexp = Exposures() - myexp.gdf['latitude'] = np.array([30, 40, 50]) - myexp.gdf['longitude'] = np.array([0, 0, 0]) - myexp.gdf['value'] = np.array([1, 1, 1]) + myexp.gdf["latitude"] = np.array([30, 40, 50]) + myexp.gdf["longitude"] = np.array([0, 0, 0]) + myexp.gdf["value"] = np.array([1, 1, 1]) myexp.check() myexp.plot_basemap(url=ctx.providers.OpenStreetMap.Mapnik) @@ -171,36 +179,44 @@ def test_disc_rates(self): disc.plot() def test_cost_benefit(self): - """ Test plot functions of cost benefit""" + """Test plot functions of cost benefit""" # Load hazard from the data API client = Client() future_year = 2080 - haz_present = client.get_hazard('tropical_cyclone', - properties={'country_name': 'Haiti', - 'climate_scenario': 'historical', - 'nb_synth_tracks':'10'}) - haz_future = client.get_hazard('tropical_cyclone', - properties={'country_name': 'Haiti', - 'climate_scenario': 'rcp60', - 'ref_year': str(future_year), - 'nb_synth_tracks':'10'}) + haz_present = client.get_hazard( + "tropical_cyclone", + properties={ + "country_name": "Haiti", + "climate_scenario": "historical", + "nb_synth_tracks": "10", + }, + ) + haz_future = client.get_hazard( + "tropical_cyclone", + properties={ + "country_name": "Haiti", + "climate_scenario": "rcp60", + "ref_year": str(future_year), + "nb_synth_tracks": "10", + }, + ) # Create an exposure - exp_present = client.get_litpop(country='Haiti') + exp_present = client.get_litpop(country="Haiti") exp_future = copy.deepcopy(exp_present) exp_future.ref_year = future_year n_years = exp_future.ref_year - exp_present.ref_year + 1 - growth = 1.02 ** n_years - exp_future.gdf['value'] = exp_future.gdf['value'] * growth + growth = 1.02**n_years + exp_future.gdf["value"] = exp_future.gdf["value"] * growth # Create an impact function impf_tc = ImpfTropCyclone.from_emanuel_usa() impf_set = ImpactFuncSet([impf_tc]) # Create adaptation measures meas_1 = Measure( - haz_type='TC', - name='Measure A', + haz_type="TC", + name="Measure A", color_rgb=np.array([0.8, 0.1, 0.1]), cost=5000000000, hazard_inten_imp=(1, -5), @@ -208,8 +224,8 @@ def test_cost_benefit(self): ) meas_2 = Measure( - haz_type='TC', - name='Measure B', + haz_type="TC", + name="Measure B", color_rgb=np.array([0.1, 0.1, 0.8]), cost=220000000, paa_impact=(1, -0.10), @@ -221,25 +237,41 @@ def test_cost_benefit(self): annual_discount_zero = np.zeros(n_years) discount_zero = DiscRates(year_range, annual_discount_zero) # Wrap the entity together - entity_present = Entity(exposures=exp_present, disc_rates=discount_zero, - impact_func_set=impf_set, measure_set=meas_set) - entity_future = Entity(exposures=exp_future, disc_rates=discount_zero, - impact_func_set=impf_set, measure_set=meas_set) + entity_present = Entity( + exposures=exp_present, + disc_rates=discount_zero, + impact_func_set=impf_set, + measure_set=meas_set, + ) + entity_future = Entity( + exposures=exp_future, + disc_rates=discount_zero, + impact_func_set=impf_set, + measure_set=meas_set, + ) # Create a cost benefit object costben = CostBenefit() - costben.calc(haz_present, entity_present, haz_future=haz_future, - ent_future=entity_future, future_year=future_year, - imp_time_depen=1, save_imp=True) + costben.calc( + haz_present, + entity_present, + haz_future=haz_future, + ent_future=entity_future, + future_year=future_year, + imp_time_depen=1, + save_imp=True, + ) # Call the plotting functions costben.plot_cost_benefit() costben.plot_event_view((25, 50, 100, 250)) costben.plot_waterfall_accumulated(haz_present, entity_present, entity_future) - ax = costben.plot_waterfall(haz_present, entity_present, - haz_future, entity_future) - costben.plot_arrow_averted(axis = ax, in_meas_names=['Measure A', 'Measure B'], - accumulate=True) - CostBenefit._plot_list_cost_ben(cb_list = [costben]) + ax = costben.plot_waterfall( + haz_present, entity_present, haz_future, entity_future + ) + costben.plot_arrow_averted( + axis=ax, in_meas_names=["Measure A", "Measure B"], accumulate=True + ) + CostBenefit._plot_list_cost_ben(cb_list=[costben]) def test_plot_unc_cb(self): """Test all cost benefit plots""" @@ -255,10 +287,11 @@ def test_plot_unc_cb(self): plt_sens = unc_output.plot_sensitivity() self.assertIsNotNone(plt_sens) plt.close() - plt_sens_2 = unc_output.plot_sensitivity_second_order(salib_si='S1') + plt_sens_2 = unc_output.plot_sensitivity_second_order(salib_si="S1") self.assertIsNotNone(plt_sens_2) plt.close() + # Execute Tests if __name__ == "__main__": TESTS = unittest.TestLoader().loadTestsFromTestCase(TestPlotter) diff --git a/climada/test/test_util.py b/climada/test/test_util.py index f7edaa9c5..d6ccdbf7e 100644 --- a/climada/test/test_util.py +++ b/climada/test/test_util.py @@ -24,14 +24,13 @@ import numpy as np -from climada.entity import Exposures import climada.util.lines_polys_handler as u_lp from climada.engine import Impact - +from climada.entity import Exposures from climada.util.test.test_lines_polys_handler import ( - HAZ, EXP_POLY, GDF_POLY, + HAZ, IMPF_SET, check_impact, ) diff --git a/climada/test/test_util_calibrate.py b/climada/test/test_util_calibrate.py index 5432f62cd..8fc6929ff 100644 --- a/climada/test/test_util_calibrate.py +++ b/climada/test/test_util_calibrate.py @@ -20,25 +20,23 @@ import unittest -import pandas as pd import numpy as np import numpy.testing as npt +import pandas as pd +from matplotlib.axes import Axes from scipy.optimize import NonlinearConstraint from sklearn.metrics import mean_squared_error -from matplotlib.axes import Axes - -from climada.entity import ImpactFuncSet, ImpactFunc +from climada.entity import ImpactFunc, ImpactFuncSet from climada.util.calibrate import ( - Input, - ScipyMinimizeOptimizer, BayesianOptimizer, - OutputEvaluator, - BayesianOptimizerOutputEvaluator, BayesianOptimizerController, + BayesianOptimizerOutputEvaluator, + Input, + OutputEvaluator, + ScipyMinimizeOptimizer, ) - -from climada.util.calibrate.test.test_base import hazard, exposure +from climada.util.calibrate.test.test_base import exposure, hazard class TestScipyMinimizeOptimizer(unittest.TestCase): diff --git a/climada/util/__init__.py b/climada/util/__init__.py index 6a497f64d..a4cf8a450 100755 --- a/climada/util/__init__.py +++ b/climada/util/__init__.py @@ -18,7 +18,9 @@ init util """ + import logging + from pint import UnitRegistry from .config import * @@ -28,6 +30,7 @@ ureg = UnitRegistry() + class log_level: """Context manager that sets all loggers with names starting with name_prefix (default is "") to a given specified level. @@ -50,7 +53,7 @@ def __init__(self, level, name_prefix=""): name: (logger, logger.level) for name, logger in logging.root.manager.loggerDict.items() if isinstance(logger, logging.Logger) and name.startswith(name_prefix) - } + } if name_prefix == "": self.loggers[""] = (logging.getLogger(), logging.getLogger().level) diff --git a/climada/util/api_client.py b/climada/util/api_client.py index c6afca7a0..3857cf0d8 100644 --- a/climada/util/api_client.py +++ b/climada/util/api_client.py @@ -18,24 +18,25 @@ Data API client """ -from dataclasses import dataclass -from datetime import datetime + import hashlib import json import logging +import time +from dataclasses import dataclass +from datetime import datetime from os.path import commonprefix from pathlib import Path from urllib.parse import quote, unquote, urlsplit, urlunsplit -import time import pandas as pd -from peewee import CharField, DateTimeField, IntegrityError, Model, SqliteDatabase -import requests import pycountry +import requests +from peewee import CharField, DateTimeField, IntegrityError, Model, SqliteDatabase from climada import CONFIG from climada.entity import Exposures -from climada.hazard import Hazard, Centroids +from climada.hazard import Centroids, Hazard from climada.util.constants import SYSTEM_DIR LOGGER = logging.getLogger(__name__) diff --git a/climada/util/calibrate/__init__.py b/climada/util/calibrate/__init__.py index 2e947ee04..53d753aab 100644 --- a/climada/util/calibrate/__init__.py +++ b/climada/util/calibrate/__init__.py @@ -24,6 +24,6 @@ BayesianOptimizerController, BayesianOptimizerOutput, BayesianOptimizerOutputEvaluator, - select_best + select_best, ) from .scipy_optimizer import ScipyMinimizeOptimizer diff --git a/climada/util/calibrate/base.py b/climada/util/calibrate/base.py index d61644dc7..4e3fc21f6 100644 --- a/climada/util/calibrate/base.py +++ b/climada/util/calibrate/base.py @@ -19,20 +19,20 @@ """ from abc import ABC, abstractmethod -from dataclasses import dataclass, field, InitVar -from typing import Callable, Mapping, Optional, Tuple, Union, Any, Dict +from dataclasses import InitVar, dataclass, field from numbers import Number from pathlib import Path +from typing import Any, Callable, Dict, Mapping, Optional, Tuple, Union -import pandas as pd +import h5py import numpy as np -from scipy.optimize import Bounds, LinearConstraint, NonlinearConstraint +import pandas as pd import seaborn as sns -import h5py +from scipy.optimize import Bounds, LinearConstraint, NonlinearConstraint -from climada.hazard import Hazard -from climada.entity import Exposures, ImpactFuncSet from climada.engine import Impact, ImpactCalc +from climada.entity import Exposures, ImpactFuncSet +from climada.hazard import Hazard ConstraintType = Union[LinearConstraint, NonlinearConstraint, Mapping] @@ -187,7 +187,7 @@ class Output: params: Mapping[str, Number] target: Number - def to_hdf5(self, filepath: Union[Path, str], mode:str = "x"): + def to_hdf5(self, filepath: Union[Path, str], mode: str = "x"): """Write the output into an H5 file This stores the data as attributes because we only store single numbers, not @@ -219,6 +219,7 @@ def from_hdf5(cls, filepath: Union[Path, str]): params = dict(file["base"]["params"].attrs.items()) return cls(params=params, target=target) + @dataclass class OutputEvaluator: """Evaluate the output of a calibration task diff --git a/climada/util/calibrate/bayesian_optimizer.py b/climada/util/calibrate/bayesian_optimizer.py index 98fe302c0..b34688050 100644 --- a/climada/util/calibrate/bayesian_optimizer.py +++ b/climada/util/calibrate/bayesian_optimizer.py @@ -18,26 +18,25 @@ Calibration with Bayesian Optimization """ -from dataclasses import dataclass, InitVar, field -from typing import Mapping, Optional, Any, Union, List, Tuple -from numbers import Number -from itertools import combinations, repeat -from collections import deque, namedtuple import logging +from collections import deque, namedtuple +from dataclasses import InitVar, dataclass, field +from itertools import combinations, repeat +from numbers import Number from pathlib import Path +from typing import Any, List, Mapping, Optional, Tuple, Union -import pandas as pd -import numpy as np import matplotlib as mpl -import matplotlib.pyplot as plt import matplotlib.axes as maxes import matplotlib.patches as mpatches +import matplotlib.pyplot as plt import matplotlib.ticker as mticker -from bayes_opt import BayesianOptimization, Events, UtilityFunction, ScreenLogger +import numpy as np +import pandas as pd +from bayes_opt import BayesianOptimization, Events, ScreenLogger, UtilityFunction from bayes_opt.target_space import TargetSpace -from .base import Input, Output, Optimizer, OutputEvaluator - +from .base import Input, Optimizer, Output, OutputEvaluator LOGGER = logging.getLogger(__name__) diff --git a/climada/util/calibrate/scipy_optimizer.py b/climada/util/calibrate/scipy_optimizer.py index 12d46b661..2962d8fd7 100644 --- a/climada/util/calibrate/scipy_optimizer.py +++ b/climada/util/calibrate/scipy_optimizer.py @@ -19,12 +19,12 @@ """ from dataclasses import dataclass -from typing import Mapping, Any, Dict, List +from typing import Any, Dict, List, Mapping import numpy as np -from scipy.optimize import minimize, OptimizeResult +from scipy.optimize import OptimizeResult, minimize -from .base import Output, Optimizer +from .base import Optimizer, Output @dataclass diff --git a/climada/util/calibrate/test/test_base.py b/climada/util/calibrate/test/test_base.py index f7b5fb69f..e2ef72bab 100644 --- a/climada/util/calibrate/test/test_base.py +++ b/climada/util/calibrate/test/test_base.py @@ -19,19 +19,18 @@ """ import unittest -from unittest.mock import patch, create_autospec, MagicMock -from tempfile import TemporaryDirectory from pathlib import Path +from tempfile import TemporaryDirectory +from unittest.mock import MagicMock, create_autospec, patch import numpy as np import numpy.testing as npt import pandas as pd from scipy.sparse import csr_matrix -from climada.entity import Exposures, ImpactFunc, ImpactFuncSet -from climada.hazard import Hazard, Centroids from climada.engine import ImpactCalc - +from climada.entity import Exposures, ImpactFunc, ImpactFuncSet +from climada.hazard import Centroids, Hazard from climada.util.calibrate import Input, OutputEvaluator from climada.util.calibrate.base import Optimizer, Output @@ -222,6 +221,7 @@ def test_cycle(self): self.assertEqual(output.target, output_2.target) self.assertDictEqual(output.params, output_2.params) + class TestOutputEvaluator(unittest.TestCase): """Test the output evaluator""" diff --git a/climada/util/calibrate/test/test_bayesian_optimizer.py b/climada/util/calibrate/test/test_bayesian_optimizer.py index 71af9b354..e80960fda 100644 --- a/climada/util/calibrate/test/test_bayesian_optimizer.py +++ b/climada/util/calibrate/test/test_bayesian_optimizer.py @@ -19,25 +19,25 @@ """ import unittest -from unittest.mock import patch, MagicMock -from tempfile import TemporaryDirectory from pathlib import Path +from tempfile import TemporaryDirectory +from unittest.mock import MagicMock, patch import numpy as np import numpy.testing as npt import pandas as pd from bayes_opt import BayesianOptimization, Events -from scipy.optimize import NonlinearConstraint from matplotlib.axes import Axes +from scipy.optimize import NonlinearConstraint -from climada.util.calibrate import Input, BayesianOptimizer, BayesianOptimizerController +from climada.util.calibrate import BayesianOptimizer, BayesianOptimizerController, Input from climada.util.calibrate.bayesian_optimizer import ( + BayesianOptimizerOutput, Improvement, StopEarly, - BayesianOptimizerOutput, ) -from .test_base import hazard, exposure +from .test_base import exposure, hazard def input(): diff --git a/climada/util/calibrate/test/test_scipy_optimizer.py b/climada/util/calibrate/test/test_scipy_optimizer.py index 01b04ea5f..06bf4e595 100644 --- a/climada/util/calibrate/test/test_scipy_optimizer.py +++ b/climada/util/calibrate/test/test_scipy_optimizer.py @@ -19,8 +19,8 @@ """ import unittest -from unittest.mock import patch, MagicMock, call -from typing import Optional, List +from typing import List, Optional +from unittest.mock import MagicMock, call, patch import numpy as np import numpy.testing as npt @@ -29,7 +29,7 @@ from climada.util.calibrate import Input, ScipyMinimizeOptimizer -from .test_base import hazard, exposure +from .test_base import exposure, hazard class TestScipyMinimizeOptimizer(unittest.TestCase): diff --git a/climada/util/checker.py b/climada/util/checker.py index 17e9fa76d..0d17b6036 100644 --- a/climada/util/checker.py +++ b/climada/util/checker.py @@ -20,14 +20,15 @@ """ __all__ = [ - 'size', - 'shape', - 'array_optional', - 'array_default', - 'prune_csr_matrix', + "size", + "shape", + "array_optional", + "array_default", + "prune_csr_matrix", ] import logging + import numpy as np import scipy.sparse as sparse @@ -58,12 +59,16 @@ def check_obligatories(var_dict, var_obl, name_prefix, n_size, n_row, n_col): """ for var_name, var_val in var_dict.items(): if var_name in var_obl: - if (isinstance(var_val, np.ndarray) and var_val.ndim == 1) \ - or isinstance(var_val, list): + if (isinstance(var_val, np.ndarray) and var_val.ndim == 1) or isinstance( + var_val, list + ): size(n_size, var_val, name_prefix + var_name) - elif (isinstance(var_val, np.ndarray) and var_val.ndim == 2): + elif isinstance(var_val, np.ndarray) and var_val.ndim == 2: shape(n_row, n_col, var_val, name_prefix + var_name) - elif isinstance(var_val, (np.ndarray, sparse.csr_matrix)) and var_val.ndim == 2: + elif ( + isinstance(var_val, (np.ndarray, sparse.csr_matrix)) + and var_val.ndim == 2 + ): shape(n_row, n_col, var_val, name_prefix + var_name) @@ -107,9 +112,13 @@ def size(exp_len, var, var_name): try: if isinstance(exp_len, int): if exp_len != len(var): - raise ValueError(f"Invalid {var_name} size: {str(exp_len)} != {len(var)}.") + raise ValueError( + f"Invalid {var_name} size: {str(exp_len)} != {len(var)}." + ) elif len(var) not in exp_len: - raise ValueError(f"Invalid {var_name} size: {len(var)} not in {str(exp_len)}.") + raise ValueError( + f"Invalid {var_name} size: {len(var)} not in {str(exp_len)}." + ) except TypeError as err: raise ValueError(f"{var_name} has wrong size.") from err @@ -123,9 +132,13 @@ def shape(exp_row, exp_col, var, var_name): """ try: if exp_row != var.shape[0]: - raise ValueError(f"Invalid {var_name} row size: {exp_row} != {var.shape[0]}.") + raise ValueError( + f"Invalid {var_name} row size: {exp_row} != {var.shape[0]}." + ) if exp_col != var.shape[1]: - raise ValueError(f"Invalid {var_name} column size: {exp_col} != {var.shape[1]}.") + raise ValueError( + f"Invalid {var_name} column size: {exp_col} != {var.shape[1]}." + ) except TypeError as err: raise ValueError("%s has wrong dimensions." % var_name) from err @@ -182,6 +195,7 @@ def array_default(exp_len, var, var_name, def_val): size(exp_len, var, var_name) return res + def prune_csr_matrix(matrix: sparse.csr_matrix): """Ensure that the matrix is in the "canonical format". diff --git a/climada/util/config.py b/climada/util/config.py index 7e607f7d4..17975f09a 100644 --- a/climada/util/config.py +++ b/climada/util/config.py @@ -20,17 +20,17 @@ """ __all__ = [ - 'CONFIG', + "CONFIG", ] -import sys -import re import json import logging +import re +import sys from pathlib import Path -class Config(): +class Config: """Convenience Class. A Config object is a slow JSON object like nested dictonary who's values can be accessed by their names right away. E.g.: `a.b.c.str()` instead of `a['b']['c']` """ @@ -47,10 +47,14 @@ def __getattribute__(self, __name): try: return super().__getattribute__(__name) except AttributeError: - conf_files = [Path(_find_in_parents(conf_dir, CONFIG_NAME)) - if _find_in_parents(conf_dir, CONFIG_NAME) - else conf_dir / CONFIG_NAME - for conf_dir in CONFIG_DIRS[::-1]] + conf_files = [ + ( + Path(_find_in_parents(conf_dir, CONFIG_NAME)) + if _find_in_parents(conf_dir, CONFIG_NAME) + else conf_dir / CONFIG_NAME + ) + for conf_dir in CONFIG_DIRS[::-1] + ] raise AttributeError( # pylint: disable=raise-missing-from f"there is no '{__name}' configured for '{super().__getattribute__('_name')}'." f" check your config files: {conf_files}" @@ -58,19 +62,35 @@ def __getattribute__(self, __name): def __str__(self): # pylint: disable=bare-except,multiple-statements,too-complex - try: return self.str() - except: pass - try: return str(self.int()) - except: pass - try: return str(self.float()) - except: pass - try: return str(self.bool()) - except: pass - try: return str(self.list()) - except: pass - return '{{{}}}'.format(", ".join([ - f'{k}: {v}' for (k, v) in self.__dict__.items() if not k in {'_name', '_root'} - ])) + try: + return self.str() + except: + pass + try: + return str(self.int()) + except: + pass + try: + return str(self.float()) + except: + pass + try: + return str(self.bool()) + except: + pass + try: + return str(self.list()) + except: + pass + return "{{{}}}".format( + ", ".join( + [ + f"{k}: {v}" + for (k, v) in self.__dict__.items() + if not k in {"_name", "_root"} + ] + ) + ) def __repr__(self): return self.__str__() @@ -111,15 +131,18 @@ def str(self, index=None): Exception if it is not a string """ + def feval(root, cstr): def expand(dct, lst): if len(lst) == 1: return dct.__getattribute__(lst[0]).str() return expand(dct.__getattribute__(lst[0]), lst[1:]) + def msub(match): - cpath = match.group(1).split('.') + cpath = match.group(1).split(".") return expand(root, cpath) - return re.sub(r'{([\w\.]+)}', msub, cstr) + + return re.sub(r"{([\w\.]+)}", msub, cstr) if index is None: if self._val.__class__ is str: @@ -261,7 +284,7 @@ def dir(self, index=None, create=True): @classmethod def _expand_source_dir(cls, path): parts = path.parts - if parts[0] == '...': + if parts[0] == "...": return Path(cls.SOURCE_DIR, *parts[1:]) return Path(*parts) @@ -271,9 +294,13 @@ def _objectify_dict(cls, name, dct, root): obj = Config(name=name, root=root) for key, val in dct.items(): if val.__class__ is dict: - obj.__setattr__(key, cls._objectify_dict(name=key, dct=val, root=obj._root)) + obj.__setattr__( + key, cls._objectify_dict(name=key, dct=val, root=obj._root) + ) elif val.__class__ is list: - obj.__setattr__(key, cls._objectify_list(name=key, lst=val, root=obj._root)) + obj.__setattr__( + key, cls._objectify_list(name=key, lst=val, root=obj._root) + ) else: obj.__setattr__(key, Config(name=key, val=val, root=obj._root)) return obj @@ -303,7 +330,7 @@ def from_dict(cls, dct): Config contaning the same data as the input parameter `dct` """ - return cls._objectify_dict('climada.CONFIG', dct, root=None) + return cls._objectify_dict("climada.CONFIG", dct, root=None) def _supersede(nested, addendum): @@ -328,15 +355,12 @@ def _find_in_parents(directory, filename): def _fetch_conf(directories, config_name): - superseding_configs = [ - _find_in_parents(path, config_name) - for path in directories - ] + superseding_configs = [_find_in_parents(path, config_name) for path in directories] conf_dct = dict() for conf_path in superseding_configs: if conf_path is None: continue - with open(conf_path, encoding='utf-8') as conf: + with open(conf_path, encoding="utf-8") as conf: dct = json.load(conf) conf_dct = _supersede(conf_dct, dct) @@ -344,11 +368,11 @@ def _fetch_conf(directories, config_name): SOURCE_DIR = Path(__file__).absolute().parent.parent.parent -CONFIG_NAME = 'climada.conf' +CONFIG_NAME = "climada.conf" CONFIG_DIRS = [ - Path(SOURCE_DIR, 'climada', 'conf'), # default config from the climada repository - Path(Path.home(), 'climada', 'conf'), # ~/climada/conf directory - Path(Path.home(), '.config'), # ~/.config directory + Path(SOURCE_DIR, "climada", "conf"), # default config from the climada repository + Path(Path.home(), "climada", "conf"), # ~/climada/conf directory + Path(Path.home(), ".config"), # ~/.config directory Path.cwd(), # current working directory ] @@ -358,10 +382,11 @@ def _fetch_conf(directories, config_name): # set climada style logging if CONFIG.logging.managed.bool(): - LOGGER = logging.getLogger('climada') + LOGGER = logging.getLogger("climada") LOGGER.propagate = False FORMATTER = logging.Formatter( - "%(asctime)s - %(name)s - %(levelname)s - %(message)s") + "%(asctime)s - %(name)s - %(levelname)s - %(message)s" + ) CONSOLE = logging.StreamHandler(stream=sys.stdout) CONSOLE.setFormatter(FORMATTER) LOGGER.addHandler(CONSOLE) diff --git a/climada/util/constants.py b/climada/util/constants.py index 85569f5fc..a4e595aaa 100644 --- a/climada/util/constants.py +++ b/climada/util/constants.py @@ -19,33 +19,37 @@ Define constants. """ -__all__ = ['SYSTEM_DIR', - 'DEMO_DIR', - 'ENT_DEMO_TODAY', - 'ENT_DEMO_FUTURE', - 'HAZ_DEMO_MAT', - 'HAZ_DEMO_FL', - 'ENT_TEMPLATE_XLS', - 'HAZ_TEMPLATE_XLS', - 'ONE_LAT_KM', - 'EARTH_RADIUS_KM', - 'GLB_CENTROIDS_MAT', - 'GLB_CENTROIDS_NC', - 'ISIMIP_GPWV3_NATID_150AS', - 'NATEARTH_CENTROIDS', - 'RIVER_FLOOD_REGIONS_CSV', - 'TC_ANDREW_FL', - 'HAZ_DEMO_H5', - 'EXP_DEMO_H5', - 'WS_DEMO_NC', - 'TEST_UNC_OUTPUT_IMPACT', - 'TEST_UNC_OUTPUT_COSTBEN'] +__all__ = [ + "SYSTEM_DIR", + "DEMO_DIR", + "ENT_DEMO_TODAY", + "ENT_DEMO_FUTURE", + "HAZ_DEMO_MAT", + "HAZ_DEMO_FL", + "ENT_TEMPLATE_XLS", + "HAZ_TEMPLATE_XLS", + "ONE_LAT_KM", + "EARTH_RADIUS_KM", + "GLB_CENTROIDS_MAT", + "GLB_CENTROIDS_NC", + "ISIMIP_GPWV3_NATID_150AS", + "NATEARTH_CENTROIDS", + "RIVER_FLOOD_REGIONS_CSV", + "TC_ANDREW_FL", + "HAZ_DEMO_H5", + "EXP_DEMO_H5", + "WS_DEMO_NC", + "TEST_UNC_OUTPUT_IMPACT", + "TEST_UNC_OUTPUT_COSTBEN", +] + +import matplotlib as mpl # pylint: disable=unused-import # without importing numpy ahead of fiona the debugger may run into an error import numpy from fiona.crs import from_epsg -import matplotlib as mpl + from .config import CONFIG SYSTEM_DIR = CONFIG.local_data.system.dir(create=False) @@ -54,7 +58,7 @@ DEMO_DIR = CONFIG.local_data.demo.dir(create=False) """Folder containing the data used for tutorials""" -ISIMIP_GPWV3_NATID_150AS = SYSTEM_DIR.joinpath('NatID_grid_0150as.nc') +ISIMIP_GPWV3_NATID_150AS = SYSTEM_DIR.joinpath("NatID_grid_0150as.nc") """ Compressed version of National Identifier Grid in 150 arc-seconds from ISIMIP project, based on GPWv3. Location in ISIMIP repository: @@ -70,12 +74,12 @@ GLB_CENTROIDS_NC = ISIMIP_GPWV3_NATID_150AS """For backwards compatibility, it remains available under its old name.""" -GLB_CENTROIDS_MAT = SYSTEM_DIR.joinpath('GLB_NatID_grid_0360as_adv_2.mat') +GLB_CENTROIDS_MAT = SYSTEM_DIR.joinpath("GLB_NatID_grid_0360as_adv_2.mat") """Global centroids""" NATEARTH_CENTROIDS = { - 150: SYSTEM_DIR.joinpath('NatEarth_Centroids_150as.hdf5'), - 360: SYSTEM_DIR.joinpath('NatEarth_Centroids_360as.hdf5'), + 150: SYSTEM_DIR.joinpath("NatEarth_Centroids_150as.hdf5"), + 360: SYSTEM_DIR.joinpath("NatEarth_Centroids_360as.hdf5"), } """ Global centroids at XXX arc-seconds resolution, @@ -83,30 +87,32 @@ coast from NASA. """ -ENT_TEMPLATE_XLS = SYSTEM_DIR.joinpath('entity_template.xlsx') +ENT_TEMPLATE_XLS = SYSTEM_DIR.joinpath("entity_template.xlsx") """Entity template in xls format.""" -HAZ_TEMPLATE_XLS = SYSTEM_DIR.joinpath('hazard_template.xlsx') +HAZ_TEMPLATE_XLS = SYSTEM_DIR.joinpath("hazard_template.xlsx") """Hazard template in xls format.""" -RIVER_FLOOD_REGIONS_CSV = SYSTEM_DIR.joinpath('NatRegIDs.csv') +RIVER_FLOOD_REGIONS_CSV = SYSTEM_DIR.joinpath("NatRegIDs.csv") """Look-up table for river flood module""" -HAZ_DEMO_FL = DEMO_DIR.joinpath('SC22000_VE__M1.grd.gz') +HAZ_DEMO_FL = DEMO_DIR.joinpath("SC22000_VE__M1.grd.gz") """Raster file of flood over Venezuela. Model from GAR2015""" -HAZ_DEMO_MAT = DEMO_DIR.joinpath('atl_prob_nonames.mat') +HAZ_DEMO_MAT = DEMO_DIR.joinpath("atl_prob_nonames.mat") """ Hazard demo from climada in MATLAB: hurricanes from 1851 to 2011 over Florida with 100 centroids. """ -HAZ_DEMO_H5 = DEMO_DIR.joinpath('tc_fl_1990_2004.h5') +HAZ_DEMO_H5 = DEMO_DIR.joinpath("tc_fl_1990_2004.h5") """ Hazard demo in hdf5 format: IBTrACS from 1990 to 2004 over Florida with 2500 centroids. """ -WS_DEMO_NC = [DEMO_DIR.joinpath('fp_lothar_crop-test.nc'), - DEMO_DIR.joinpath('fp_xynthia_crop-test.nc')] +WS_DEMO_NC = [ + DEMO_DIR.joinpath("fp_lothar_crop-test.nc"), + DEMO_DIR.joinpath("fp_xynthia_crop-test.nc"), +] """ Winter storm in Europe files. These test files have been generated using the netCDF kitchen sink: @@ -115,46 +121,256 @@ """ -ENT_DEMO_TODAY = DEMO_DIR.joinpath('demo_today.xlsx') +ENT_DEMO_TODAY = DEMO_DIR.joinpath("demo_today.xlsx") """Entity demo present in xslx format.""" -ENT_DEMO_FUTURE = DEMO_DIR.joinpath('demo_future_TEST.xlsx') +ENT_DEMO_FUTURE = DEMO_DIR.joinpath("demo_future_TEST.xlsx") """Entity demo future in xslx format.""" -EXP_DEMO_H5 = DEMO_DIR.joinpath('exp_demo_today.h5') +EXP_DEMO_H5 = DEMO_DIR.joinpath("exp_demo_today.h5") """Exposures over Florida""" -TC_ANDREW_FL = DEMO_DIR.joinpath('ibtracs_global_intp-None_1992230N11325.csv') +TC_ANDREW_FL = DEMO_DIR.joinpath("ibtracs_global_intp-None_1992230N11325.csv") """Tropical cyclone Andrew in Florida""" -TEST_UNC_OUTPUT_IMPACT = 'test_unc_output_impact' +TEST_UNC_OUTPUT_IMPACT = "test_unc_output_impact" """Demo uncertainty impact output""" -TEST_UNC_OUTPUT_COSTBEN = 'test_unc_output_costben' +TEST_UNC_OUTPUT_COSTBEN = "test_unc_output_costben" """Demo uncertainty costben output""" ISIMIP_NATID_TO_ISO = [ - '', 'ABW', 'AFG', 'AGO', 'AIA', 'ALB', 'AND', 'ANT', 'ARE', 'ARG', 'ARM', - 'ASM', 'ATG', 'AUS', 'AUT', 'AZE', 'BDI', 'BEL', 'BEN', 'BFA', 'BGD', 'BGR', - 'BHR', 'BHS', 'BIH', 'BLR', 'BLZ', 'BMU', 'BOL', 'BRA', 'BRB', 'BRN', 'BTN', - 'BWA', 'CAF', 'CAN', 'CHE', 'CHL', 'CHN', 'CIV', 'CMR', 'COD', 'COG', 'COK', - 'COL', 'COM', 'CPV', 'CRI', 'CUB', 'CYM', 'CYP', 'CZE', 'DEU', 'DJI', 'DMA', - 'DNK', 'DOM', 'DZA', 'ECU', 'EGY', 'ERI', 'ESP', 'EST', 'ETH', 'FIN', 'FJI', - 'FLK', 'FRA', 'FRO', 'FSM', 'GAB', 'GBR', 'GEO', 'GGY', 'GHA', 'GIB', 'GIN', - 'GLP', 'GMB', 'GNB', 'GNQ', 'GRC', 'GRD', 'GTM', 'GUF', 'GUM', 'GUY', 'HKG', - 'HND', 'HRV', 'HTI', 'HUN', 'IDN', 'IMN', 'IND', 'IRL', 'IRN', 'IRQ', 'ISL', - 'ISR', 'ITA', 'JAM', 'JEY', 'JOR', 'JPN', 'KAZ', 'KEN', 'KGZ', 'KHM', 'KIR', - 'KNA', 'KOR', 'KWT', 'LAO', 'LBN', 'LBR', 'LBY', 'LCA', 'LIE', 'LKA', 'LSO', - 'LTU', 'LUX', 'LVA', 'MAC', 'MAR', 'MCO', 'MDA', 'MDG', 'MDV', 'MEX', 'MHL', - 'MKD', 'MLI', 'MLT', 'MMR', 'MNG', 'MNP', 'MOZ', 'MRT', 'MSR', 'MTQ', 'MUS', - 'MWI', 'MYS', 'MYT', 'NAM', 'NCL', 'NER', 'NFK', 'NGA', 'NIC', 'NIU', 'NLD', - 'NOR', 'NPL', 'NRU', 'NZL', 'OMN', 'PAK', 'PAN', 'PCN', 'PER', 'PHL', 'PLW', - 'PNG', 'POL', 'PRI', 'PRK', 'PRT', 'PRY', 'PSE', 'PYF', 'QAT', 'REU', 'ROU', - 'RUS', 'RWA', 'SAU', 'SCG', 'SDN', 'SEN', 'SGP', 'SHN', 'SJM', 'SLB', 'SLE', - 'SLV', 'SMR', 'SOM', 'SPM', 'STP', 'SUR', 'SVK', 'SVN', 'SWE', 'SWZ', 'SYC', - 'SYR', 'TCA', 'TCD', 'TGO', 'THA', 'TJK', 'TKL', 'TKM', 'TLS', 'TON', 'TTO', - 'TUN', 'TUR', 'TUV', 'TWN', 'TZA', 'UGA', 'UKR', 'URY', 'USA', 'UZB', 'VCT', - 'VEN', 'VGB', 'VIR', 'VNM', 'VUT', 'WLF', 'WSM', 'YEM', 'ZAF', 'ZMB', 'ZWE', + "", + "ABW", + "AFG", + "AGO", + "AIA", + "ALB", + "AND", + "ANT", + "ARE", + "ARG", + "ARM", + "ASM", + "ATG", + "AUS", + "AUT", + "AZE", + "BDI", + "BEL", + "BEN", + "BFA", + "BGD", + "BGR", + "BHR", + "BHS", + "BIH", + "BLR", + "BLZ", + "BMU", + "BOL", + "BRA", + "BRB", + "BRN", + "BTN", + "BWA", + "CAF", + "CAN", + "CHE", + "CHL", + "CHN", + "CIV", + "CMR", + "COD", + "COG", + "COK", + "COL", + "COM", + "CPV", + "CRI", + "CUB", + "CYM", + "CYP", + "CZE", + "DEU", + "DJI", + "DMA", + "DNK", + "DOM", + "DZA", + "ECU", + "EGY", + "ERI", + "ESP", + "EST", + "ETH", + "FIN", + "FJI", + "FLK", + "FRA", + "FRO", + "FSM", + "GAB", + "GBR", + "GEO", + "GGY", + "GHA", + "GIB", + "GIN", + "GLP", + "GMB", + "GNB", + "GNQ", + "GRC", + "GRD", + "GTM", + "GUF", + "GUM", + "GUY", + "HKG", + "HND", + "HRV", + "HTI", + "HUN", + "IDN", + "IMN", + "IND", + "IRL", + "IRN", + "IRQ", + "ISL", + "ISR", + "ITA", + "JAM", + "JEY", + "JOR", + "JPN", + "KAZ", + "KEN", + "KGZ", + "KHM", + "KIR", + "KNA", + "KOR", + "KWT", + "LAO", + "LBN", + "LBR", + "LBY", + "LCA", + "LIE", + "LKA", + "LSO", + "LTU", + "LUX", + "LVA", + "MAC", + "MAR", + "MCO", + "MDA", + "MDG", + "MDV", + "MEX", + "MHL", + "MKD", + "MLI", + "MLT", + "MMR", + "MNG", + "MNP", + "MOZ", + "MRT", + "MSR", + "MTQ", + "MUS", + "MWI", + "MYS", + "MYT", + "NAM", + "NCL", + "NER", + "NFK", + "NGA", + "NIC", + "NIU", + "NLD", + "NOR", + "NPL", + "NRU", + "NZL", + "OMN", + "PAK", + "PAN", + "PCN", + "PER", + "PHL", + "PLW", + "PNG", + "POL", + "PRI", + "PRK", + "PRT", + "PRY", + "PSE", + "PYF", + "QAT", + "REU", + "ROU", + "RUS", + "RWA", + "SAU", + "SCG", + "SDN", + "SEN", + "SGP", + "SHN", + "SJM", + "SLB", + "SLE", + "SLV", + "SMR", + "SOM", + "SPM", + "STP", + "SUR", + "SVK", + "SVN", + "SWE", + "SWZ", + "SYC", + "SYR", + "TCA", + "TCD", + "TGO", + "THA", + "TJK", + "TKL", + "TKM", + "TLS", + "TON", + "TTO", + "TUN", + "TUR", + "TUV", + "TWN", + "TZA", + "UGA", + "UKR", + "URY", + "USA", + "UZB", + "VCT", + "VEN", + "VGB", + "VIR", + "VNM", + "VUT", + "WLF", + "WSM", + "YEM", + "ZAF", + "ZMB", + "ZWE", ] """ISO 3166 alpha-3 codes of countries used in ISIMIP_GPWV3_NATID_150AS""" @@ -179,7 +395,9 @@ dict(name="Serranilla Bank", alpha_2="XR", alpha_3="XXR", numeric="913"), dict(name="Siachen Glacier", alpha_2="XH", alpha_3="XXH", numeric="914"), dict(name="Somaliland", alpha_2="XM", alpha_3="XXM", numeric="915"), - dict(name="Southern Patagonian Ice Field", alpha_2="XN", alpha_3="XXN", numeric="918"), + dict( + name="Southern Patagonian Ice Field", alpha_2="XN", alpha_3="XXN", numeric="918" + ), dict(name="Spratly Is.", alpha_2="XP", alpha_3="XXP", numeric="916"), dict(name="USNB Guantanamo Bay", alpha_2="XG", alpha_3="XXG", numeric="917"), ] @@ -199,513 +417,517 @@ DEF_EPSG = 4326 """Default EPSG code""" -DEF_CRS = f'EPSG:{DEF_EPSG}' +DEF_CRS = f"EPSG:{DEF_EPSG}" """Default coordinate reference system WGS 84, str, for pyproj and rasterio CRS.from_string()""" DEF_CRS_FIONA = from_epsg(DEF_EPSG) """Default coordinate reference system WGS 84, dict, for fiona interface""" -cm_data1 = [[0.00000000, 0.00000000, 0.00000000], - [0.00032031, 0.00020876, 0.00015576], - [0.00115213, 0.00071222, 0.00050933], - [0.00246632, 0.00145292, 0.00099932], - [0.00426111, 0.00240248, 0.00159470], - [0.00654129, 0.00354149, 0.00227479], - [0.00931453, 0.00485497, 0.00302435], - [0.01259008, 0.00633067, 0.00383153], - [0.01637810, 0.00795809, 0.00468676], - [0.02068947, 0.00972796, 0.00558214], - [0.02553552, 0.01163194, 0.00651101], - [0.03092793, 0.01366243, 0.00746771], - [0.03687870, 0.01581232, 0.00844736], - [0.04329108, 0.01807499, 0.00944575], - [0.04970018, 0.02044415, 0.01045917], - [0.05607744, 0.02291381, 0.01148441], - [0.06242826, 0.02547822, 0.01251862], - [0.06875727, 0.02813185, 0.01355932], - [0.07506844, 0.03086930, 0.01460431], - [0.08136524, 0.03368535, 0.01565167], - [0.08765071, 0.03657489, 0.01669973], - [0.09392754, 0.03953289, 0.01774700], - [0.10019812, 0.04248851, 0.01879222], - [0.10646459, 0.04536893, 0.01983431], - [0.11272888, 0.04818555, 0.02087234], - [0.11899272, 0.05094021, 0.02190555], - [0.12525770, 0.05363453, 0.02293331], - [0.13152527, 0.05626994, 0.02395516], - [0.13779673, 0.05884770, 0.02497073], - [0.14407332, 0.06136894, 0.02597979], - [0.15035614, 0.06383462, 0.02698225], - [0.15664624, 0.06624561, 0.02797810], - [0.16294457, 0.06860266, 0.02896747], - [0.16925203, 0.07090640, 0.02995057], - [0.17556946, 0.07315739, 0.03092776], - [0.18189762, 0.07535608, 0.03189947], - [0.18823726, 0.07750287, 0.03286623], - [0.19458905, 0.07959805, 0.03382870], - [0.20095364, 0.08164185, 0.03478764], - [0.20733163, 0.08363445, 0.03574389], - [0.21372359, 0.08557593, 0.03669841], - [0.22013006, 0.08746634, 0.03765228], - [0.22655154, 0.08930565, 0.03860667], - [0.23298852, 0.09109380, 0.03956286], - [0.23944144, 0.09283065, 0.04052097], - [0.24591073, 0.09451600, 0.04146142], - [0.25239679, 0.09614964, 0.04239527], - [0.25890000, 0.09773126, 0.04332440], - [0.26542072, 0.09926052, 0.04425071], - [0.27195929, 0.10073705, 0.04517610], - [0.27851612, 0.10216029, 0.04610242], - [0.28509144, 0.10352983, 0.04703172], - [0.29168551, 0.10484515, 0.04796603], - [0.29829858, 0.10610566, 0.04890741], - [0.30493089, 0.10731073, 0.04985793], - [0.31158270, 0.10845962, 0.05081968], - [0.31825437, 0.10955144, 0.05179469], - [0.32494588, 0.11058558, 0.05278533], - [0.33165741, 0.11156121, 0.05379388], - [0.33838918, 0.11247734, 0.05482253], - [0.34514146, 0.11333282, 0.05587349], - [0.35191413, 0.11412692, 0.05694939], - [0.35870733, 0.11485850, 0.05805261], - [0.36552140, 0.11552606, 0.05918537], - [0.37235602, 0.11612887, 0.06035055], - [0.37921149, 0.11666531, 0.06155047], - [0.38608774, 0.11713411, 0.06278785], - [0.39298465, 0.11753398, 0.06406542], - [0.39990243, 0.11786308, 0.06538571], - [0.40684070, 0.11812026, 0.06675174], - [0.41379968, 0.11830340, 0.06816610], - [0.42077900, 0.11841110, 0.06963182], - [0.42777857, 0.11844140, 0.07115178], - [0.43479835, 0.11839213, 0.07272887], - [0.44183779, 0.11826176, 0.07436631], - [0.44889692, 0.11804763, 0.07606698], - [0.45597537, 0.11774759, 0.07783407], - [0.46307262, 0.11735955, 0.07967086], - [0.47018828, 0.11688094, 0.08158056], - [0.47732206, 0.11630887, 0.08356643], - [0.48447342, 0.11564059, 0.08563184], - [0.49164167, 0.11487339, 0.08778027], - [0.49882616, 0.11400421, 0.09001524], - [0.50602619, 0.11302981, 0.09234030], - [0.51324096, 0.11194681, 0.09475911], - [0.52046957, 0.11075165, 0.09727541], - [0.52771103, 0.10944063, 0.09989300], - [0.53496423, 0.10800987, 0.10261578], - [0.54222828, 0.10645458, 0.10544773], - [0.54950158, 0.10477099, 0.10839295], - [0.55678265, 0.10295467, 0.11145561], - [0.56407005, 0.10100050, 0.11463998], - [0.57136221, 0.09890294, 0.11795046], - [0.57865683, 0.09665778, 0.12139144], - [0.58595251, 0.09425758, 0.12496762], - [0.59324637, 0.09169820, 0.12868351], - [0.60053647, 0.08897198, 0.13254399], - [0.60781996, 0.08607290, 0.13655381], - [0.61509391, 0.08299424, 0.14071783], - [0.62235528, 0.07972847, 0.14504098], - [0.62960086, 0.07626735, 0.14952833], - [0.63682690, 0.07260321, 0.15418475], - [0.64402945, 0.06872768, 0.15901515], - [0.65120429, 0.06463189, 0.16402435], - [0.65834703, 0.06030595, 0.16921717], - [0.66545273, 0.05574060, 0.17459807], - [0.67251615, 0.05092618, 0.18017123], - [0.67953179, 0.04585268, 0.18594053], - [0.68649408, 0.04050791, 0.19190990], - [0.69339656, 0.03501827, 0.19808181], - [0.70023310, 0.02974032, 0.20445918], - [0.70699677, 0.02473108, 0.21104325], - [0.71368081, 0.02004735, 0.21783521], - [0.72027805, 0.01575128, 0.22483488], - [0.72678121, 0.01190847, 0.23204104], - [0.73318299, 0.00858729, 0.23945145], - [0.73947609, 0.00585900, 0.24706262], - [0.74565328, 0.00379723, 0.25486974], - [0.75170751, 0.00247734, 0.26286660], - [0.75763201, 0.00197573, 0.27104565], - [0.76342035, 0.00236912, 0.27939796], - [0.76906659, 0.00373375, 0.28791328], - [0.77456531, 0.00614457, 0.29658016], - [0.77991170, 0.00967453, 0.30538600], - [0.78510166, 0.01439382, 0.31431727], - [0.79013176, 0.02036922, 0.32335963], - [0.79499936, 0.02766356, 0.33249813], - [0.79970258, 0.03633527, 0.34171740], - [0.80424028, 0.04610137, 0.35100187], - [0.80861206, 0.05593074, 0.36033595], - [0.81281824, 0.06575513, 0.36970423], - [0.81685977, 0.07556701, 0.37909164], - [0.82073820, 0.08536045, 0.38848361], - [0.82445563, 0.09513050, 0.39786621], - [0.82801462, 0.10487292, 0.40722623], - [0.83141814, 0.11458394, 0.41655122], - [0.83466964, 0.12426002, 0.42582926], - [0.83777258, 0.13389850, 0.43505012], - [0.84073089, 0.14349659, 0.44420371], - [0.84354864, 0.15305194, 0.45328109], - [0.84622995, 0.16256264, 0.46227431], - [0.84877908, 0.17202698, 0.47117623], - [0.85120054, 0.18144313, 0.47998013], - [0.85349849, 0.19081025, 0.48868085], - [0.85567734, 0.20012720, 0.49727347], - [0.85774150, 0.20939307, 0.50575378], - [0.85969539, 0.21860703, 0.51411817], - [0.86154321, 0.22776871, 0.52236389], - [0.86328918, 0.23687774, 0.53048865], - [0.86493759, 0.24593368, 0.53849050], - [0.86649243, 0.25493655, 0.54636825], - [0.86795766, 0.26388635, 0.55412108], - [0.86933714, 0.27278325, 0.56174857], - [0.87063488, 0.28162708, 0.56925039], - [0.87185473, 0.29041795, 0.57662667], - [0.87299987, 0.29915672, 0.58387836], - [0.87407470, 0.30784267, 0.59100548], - [0.87508176, 0.31647731, 0.59800984], - [0.87602545, 0.32505984, 0.60489185], - [0.87690829, 0.33359164, 0.61165350], - [0.87773379, 0.34207284, 0.61829617], - [0.87850545, 0.35050356, 0.62482133], - [0.87922592, 0.35888478, 0.63123109], - [0.87989827, 0.36721697, 0.63752735], - [0.88052548, 0.37550059, 0.64371209], - [0.88111058, 0.38373605, 0.64978738], - [0.88165635, 0.39192396, 0.65575540], - [0.88216538, 0.40006502, 0.66161845], - [0.88264034, 0.40815983, 0.66737883], - [0.88308383, 0.41620898, 0.67303885], - [0.88349837, 0.42421311, 0.67860087], - [0.88388658, 0.43217272, 0.68406723], - [0.88425089, 0.44008842, 0.68944031], - [0.88459352, 0.44796098, 0.69472256], - [0.88491674, 0.45579107, 0.69991638], - [0.88522277, 0.46357936, 0.70502418], - [0.88551386, 0.47132645, 0.71004831], - [0.88579260, 0.47903263, 0.71499109], - [0.88606054, 0.48669904, 0.71985498], - [0.88631967, 0.49432634, 0.72464230], - [0.88657273, 0.50191463, 0.72935531], - [0.88682100, 0.50946512, 0.73399636], - [0.88706656, 0.51697833, 0.73856771], - [0.88731166, 0.52445464, 0.74307157], - [0.88755748, 0.53189523, 0.74751019], - [0.88780677, 0.53930002, 0.75188571], - [0.88806029, 0.54667042, 0.75620029], - [0.88832077, 0.55400637, 0.76045604], - [0.88858898, 0.56130917, 0.76465503], - [0.88886751, 0.56857881, 0.76879932], - [0.88915723, 0.57581648, 0.77289087], - [0.88946027, 0.58302245, 0.77693169], - [0.88977801, 0.59019749, 0.78092369], - [0.89011184, 0.59734231, 0.78486874], - [0.89046385, 0.60445719, 0.78876876], - [0.89083498, 0.61154309, 0.79262552], - [0.89122688, 0.61860051, 0.79644080], - [0.89164127, 0.62562987, 0.80021639], - [0.89207922, 0.63263202, 0.80395396], - [0.89254218, 0.63960749, 0.80765517], - [0.89303193, 0.64655664, 0.81132175], - [0.89354946, 0.65348027, 0.81495521], - [0.89409613, 0.66037894, 0.81855714], - [0.89467341, 0.66725312, 0.82212908], - [0.89528268, 0.67410333, 0.82567258], - [0.89592507, 0.68093022, 0.82918904], - [0.89660188, 0.68773430, 0.83267991], - [0.89731440, 0.69451609, 0.83614660], - [0.89806405, 0.70127602, 0.83959053], - [0.89885189, 0.70801470, 0.84301299], - [0.89967918, 0.71473262, 0.84641529], - [0.90054714, 0.72143026, 0.84979872], - [0.90145701, 0.72810810, 0.85316454], - [0.90241007, 0.73476657, 0.85651399], - [0.90340743, 0.74140617, 0.85984825], - [0.90445031, 0.74802735, 0.86316849], - [0.90553992, 0.75463054, 0.86647585], - [0.90667746, 0.76121615, 0.86977146], - [0.90786415, 0.76778459, 0.87305641], - [0.90910120, 0.77433626, 0.87633178], - [0.91038981, 0.78087154, 0.87959861], - [0.91173124, 0.78739078, 0.88285793], - [0.91312673, 0.79389433, 0.88611074], - [0.91457758, 0.80038249, 0.88935803], - [0.91608500, 0.80685562, 0.89260074], - [0.91765039, 0.81331396, 0.89583983], - [0.91927511, 0.81975775, 0.89907623], - [0.92096059, 0.82618722, 0.90231088], - [0.92270830, 0.83260254, 0.90554466], - [0.92451964, 0.83900395, 0.90877841], - [0.92639632, 0.84539150, 0.91201305], - [0.92834008, 0.85176524, 0.91524947], - [0.93035272, 0.85812518, 0.91848857], - [0.93243609, 0.86447132, 0.92173117], - [0.93459223, 0.87080356, 0.92497815], - [0.93682359, 0.87712161, 0.92823055], - [0.93913266, 0.88342515, 0.93148937], - [0.94152187, 0.88971391, 0.93475546], - [0.94399458, 0.89598719, 0.93803021], - [0.94655427, 0.90224421, 0.94131502], - [0.94920436, 0.90848425, 0.94461125], - ] - -cm_data2 = [[0.00000000, 0.00000000, 0.00000000], - [0.00028691, 0.00020835, 0.00028279], - [0.00102421, 0.00070903, 0.00101021], - [0.00218033, 0.00144242, 0.00214845], - [0.00375280, 0.00237790, 0.00368891], - [0.00574727, 0.00349371, 0.00562841], - [0.00817359, 0.00477242, 0.00796563], - [0.01104432, 0.00619914, 0.01069976], - [0.01437378, 0.00776073, 0.01382970], - [0.01817764, 0.00944524, 0.01735364], - [0.02247277, 0.01124162, 0.02126897], - [0.02727694, 0.01313949, 0.02557207], - [0.03260869, 0.01512908, 0.03025819], - [0.03848721, 0.01720107, 0.03532137], - [0.04472223, 0.01934661, 0.04074862], - [0.05095008, 0.02155723, 0.04620189], - [0.05718085, 0.02382484, 0.05156892], - [0.06341877, 0.02614168, 0.05685075], - [0.06966727, 0.02850036, 0.06204782], - [0.07592916, 0.03089381, 0.06716019], - [0.08220666, 0.03331529, 0.07218757], - [0.08850155, 0.03575837, 0.07712945], - [0.09481532, 0.03821687, 0.08198520], - [0.10114895, 0.04068063, 0.08675399], - [0.10750319, 0.04306161, 0.09143498], - [0.11387855, 0.04536332, 0.09602729], - [0.12027537, 0.04758808, 0.10053004], - [0.12669388, 0.04973801, 0.10494242], - [0.13313410, 0.05181515, 0.10926361], - [0.13959587, 0.05382147, 0.11349284], - [0.14607903, 0.05575879, 0.11762946], - [0.15258333, 0.05762879, 0.12167284], - [0.15910850, 0.05943303, 0.12562246], - [0.16565413, 0.06117310, 0.12947786], - [0.17221981, 0.06285040, 0.13323866], - [0.17880518, 0.06446624, 0.13690456], - [0.18540980, 0.06602187, 0.14047531], - [0.19203321, 0.06751848, 0.14395075], - [0.19867499, 0.06895715, 0.14733079], - [0.20533472, 0.07033887, 0.15061537], - [0.21201197, 0.07166460, 0.15380450], - [0.21870632, 0.07293518, 0.15689824], - [0.22541736, 0.07415142, 0.15989669], - [0.23214472, 0.07531401, 0.16279996], - [0.23888802, 0.07642364, 0.16560823], - [0.24564687, 0.07748088, 0.16832171], - [0.25242097, 0.07848626, 0.17094058], - [0.25920996, 0.07944023, 0.17346508], - [0.26601352, 0.08034324, 0.17589547], - [0.27283134, 0.08119562, 0.17823199], - [0.27966317, 0.08199764, 0.18047489], - [0.28650868, 0.08274959, 0.18262446], - [0.29336760, 0.08345167, 0.18468096], - [0.30023971, 0.08410396, 0.18664460], - [0.30712474, 0.08470663, 0.18851568], - [0.31402240, 0.08525975, 0.19029445], - [0.32093251, 0.08576327, 0.19198110], - [0.32785482, 0.08621717, 0.19357587], - [0.33478905, 0.08662148, 0.19507899], - [0.34173503, 0.08697601, 0.19649062], - [0.34869254, 0.08728060, 0.19781092], - [0.35566125, 0.08753522, 0.19904011], - [0.36264104, 0.08773953, 0.20017823], - [0.36963165, 0.08789334, 0.20122542], - [0.37663272, 0.08799656, 0.20218186], - [0.38364424, 0.08804859, 0.20304740], - [0.39066574, 0.08804944, 0.20382227], - [0.39769703, 0.08799872, 0.20450641], - [0.40473792, 0.08789596, 0.20509971], - [0.41178790, 0.08774121, 0.20560237], - [0.41884704, 0.08753353, 0.20601388], - [0.42591463, 0.08727325, 0.20633459], - [0.43299069, 0.08695948, 0.20656394], - [0.44007455, 0.08659242, 0.20670212], - [0.44716616, 0.08617128, 0.20674851], - [0.45426479, 0.08569637, 0.20670331], - [0.46137042, 0.08516677, 0.20656566], - [0.46848219, 0.08458313, 0.20633582], - [0.47560004, 0.08394454, 0.20601280], - [0.48272316, 0.08325159, 0.20559662], - [0.48985104, 0.08250434, 0.20508677], - [0.49698340, 0.08170242, 0.20448225], - [0.50411927, 0.08084690, 0.20378304], - [0.51125803, 0.07993830, 0.20298844], - [0.51839929, 0.07897664, 0.20209721], - [0.52554202, 0.07796358, 0.20110904], - [0.53268538, 0.07690049, 0.20002312], - [0.53982852, 0.07578902, 0.19883855], - [0.54697049, 0.07463129, 0.19755431], - [0.55411028, 0.07342990, 0.19616934], - [0.56124678, 0.07218810, 0.19468248], - [0.56837880, 0.07090985, 0.19309253], - [0.57550502, 0.06959997, 0.19139818], - [0.58262400, 0.06826431, 0.18959809], - [0.58973418, 0.06690989, 0.18769083], - [0.59683382, 0.06554515, 0.18567490], - [0.60392106, 0.06418012, 0.18354875], - [0.61099403, 0.06282598, 0.18131023], - [0.61805061, 0.06149625, 0.17895730], - [0.62508803, 0.06020822, 0.17648890], - [0.63210426, 0.05897851, 0.17390136], - [0.63909578, 0.05783082, 0.17119418], - [0.64606007, 0.05678752, 0.16836327], - [0.65299326, 0.05587785, 0.16540731], - [0.65989160, 0.05513269, 0.16232365], - [0.66675096, 0.05458598, 0.15910942], - [0.67356680, 0.05427454, 0.15576179], - [0.68033403, 0.05423761, 0.15227799], - [0.68704706, 0.05451589, 0.14865546], - [0.69369969, 0.05515040, 0.14489185], - [0.70028509, 0.05618108, 0.14098519], - [0.70679624, 0.05764355, 0.13693176], - [0.71322465, 0.05957213, 0.13273203], - [0.71956187, 0.06199294, 0.12838347], - [0.72579832, 0.06492701, 0.12388673], - [0.73192387, 0.06838759, 0.11924309], - [0.73792785, 0.07238015, 0.11445523], - [0.74379911, 0.07690258, 0.10952793], - [0.74952631, 0.08194530, 0.10446780], - [0.75509807, 0.08749192, 0.09928513], - [0.76050344, 0.09351949, 0.09399345], - [0.76573234, 0.09999923, 0.08860931], - [0.77077595, 0.10689714, 0.08315390], - [0.77562724, 0.11417469, 0.07765262], - [0.78028137, 0.12178994, 0.07213493], - [0.78473594, 0.12969861, 0.06663478], - [0.78899120, 0.13785534, 0.06119075], - [0.79304987, 0.14621526, 0.05584590], - [0.79691698, 0.15473527, 0.05064835], - [0.80059949, 0.16337512, 0.04565234], - [0.80410578, 0.17209842, 0.04091877], - [0.80744502, 0.18087354, 0.03656330], - [0.81062721, 0.18967261, 0.03284897], - [0.81366202, 0.19847328, 0.02978095], - [0.81655911, 0.20725703, 0.02735425], - [0.81932773, 0.21600901, 0.02556368], - [0.82197656, 0.22471783, 0.02440445], - [0.82451354, 0.23337504, 0.02387282], - [0.82694588, 0.24197470, 0.02396658], - [0.82928000, 0.25051291, 0.02468537], - [0.83152234, 0.25898625, 0.02603161], - [0.83367755, 0.26739445, 0.02800850], - [0.83575119, 0.27573587, 0.03062270], - [0.83774693, 0.28401176, 0.03388176], - [0.83966871, 0.29222281, 0.03779577], - [0.84152000, 0.30037020, 0.04231855], - [0.84330390, 0.30845547, 0.04718171], - [0.84502314, 0.31648042, 0.05232334], - [0.84668012, 0.32444703, 0.05769850], - [0.84827700, 0.33235739, 0.06327080], - [0.84981598, 0.34021329, 0.06901096], - [0.85129899, 0.34801660, 0.07489554], - [0.85272715, 0.35576999, 0.08090629], - [0.85410285, 0.36347441, 0.08702799], - [0.85542653, 0.37113285, 0.09324952], - [0.85670046, 0.37874607, 0.09956104], - [0.85792511, 0.38631664, 0.10595570], - [0.85910167, 0.39384615, 0.11242769], - [0.86023184, 0.40133560, 0.11897200], - [0.86131603, 0.40878710, 0.12558544], - [0.86235527, 0.41620202, 0.13226519], - [0.86335049, 0.42358173, 0.13900904], - [0.86430261, 0.43092748, 0.14581530], - [0.86521249, 0.43824051, 0.15268270], - [0.86608094, 0.44552198, 0.15961030], - [0.86690878, 0.45277298, 0.16659744], - [0.86769678, 0.45999455, 0.17364368], - [0.86844571, 0.46718767, 0.18074877], - [0.86915633, 0.47435325, 0.18791261], - [0.86982940, 0.48149217, 0.19513520], - [0.87046566, 0.48860521, 0.20241667], - [0.87106589, 0.49569313, 0.20975721], - [0.87163086, 0.50275663, 0.21715708], - [0.87216162, 0.50979614, 0.22461634], - [0.87265881, 0.51681240, 0.23213553], - [0.87312317, 0.52380600, 0.23971510], - [0.87355555, 0.53077744, 0.24735548], - [0.87395712, 0.53772697, 0.25505684], - [0.87432861, 0.54465512, 0.26281981], - [0.87467085, 0.55156232, 0.27064498], - [0.87498503, 0.55844876, 0.27853263], - [0.87527217, 0.56531471, 0.28648326], - [0.87553313, 0.57216055, 0.29449756], - [0.87576930, 0.57898630, 0.30257577], - [0.87598171, 0.58579221, 0.31071851], - [0.87617147, 0.59257844, 0.31892638], - [0.87634020, 0.59934489, 0.32719953], - [0.87648888, 0.60609181, 0.33553878], - [0.87661914, 0.61281908, 0.34394439], - [0.87673240, 0.61952670, 0.35241687], - [0.87683016, 0.62621463, 0.36095669], - [0.87691421, 0.63288268, 0.36956410], - [0.87698607, 0.63953083, 0.37823972], - [0.87704779, 0.64615877, 0.38698363], - [0.87710104, 0.65276640, 0.39579639], - [0.87714801, 0.65935338, 0.40467811], - [0.87719069, 0.66591948, 0.41362916], - [0.87723137, 0.67246435, 0.42264965], - [0.87727233, 0.67898764, 0.43173978], - [0.87731605, 0.68548896, 0.44089961], - [0.87736509, 0.69196788, 0.45012917], - [0.87742214, 0.69842394, 0.45942844], - [0.87749005, 0.70485663, 0.46879727], - [0.87757175, 0.71126545, 0.47823549], - [0.87767038, 0.71764981, 0.48774277], - [0.87778914, 0.72400915, 0.49731878], - [0.87793145, 0.73034282, 0.50696296], - [0.87810081, 0.73665020, 0.51667477], - [0.87830092, 0.74293060, 0.52645341], - [0.87853556, 0.74918334, 0.53629808], - [0.87880873, 0.75540769, 0.54620771], - [0.87912449, 0.76160293, 0.55618122], - [0.87948712, 0.76776830, 0.56621720], - [0.87990092, 0.77390307, 0.57631429], - [0.88037047, 0.78000643, 0.58647070], - [0.88090027, 0.78607767, 0.59668473], - [0.88149514, 0.79211598, 0.60695418], - [0.88215974, 0.79812065, 0.61727700], - [0.88289909, 0.80409090, 0.62765056], - [0.88371798, 0.81002606, 0.63807240], - [0.88462153, 0.81592540, 0.64853946], - [0.88561459, 0.82178829, 0.65904886], - [0.88670229, 0.82761408, 0.66959711], - [0.88788952, 0.83340224, 0.68018083], - [0.88918122, 0.83915225, 0.69079625], - [0.89058234, 0.84486362, 0.70143930], - [0.89209744, 0.85053601, 0.71210615], - [0.89373153, 0.85616903, 0.72279183], - [0.89548875, 0.86176252, 0.73349245], - [0.89737373, 0.86731625, 0.74420272], - [0.89939058, 0.87283016, 0.75491787], - [0.90154313, 0.87830429, 0.76563309], - [0.90383561, 0.88373862, 0.77634217], - [0.90627132, 0.88913338, 0.78704028], - [0.90885368, 0.89448881, 0.79772179], - [0.91158625, 0.89980515, 0.80838000], - [0.91447204, 0.90508277, 0.81900898], - [0.91751403, 0.91032207, 0.82960244], - [0.92071527, 0.91552347, 0.84015333], - [0.92407894, 0.92068737, 0.85065379], - [0.92760832, 0.92581419, 0.86109531], - [0.93130674, 0.93090430, 0.87146916], - [0.93517804, 0.93595804, 0.88176475], - [0.93922654, 0.94097572, 0.89196965], - [0.94345707, 0.94595767, 0.90206897], - [0.94787482, 0.95090438, 0.91204440], - ] - -CMAP_EXPOSURES = mpl.colors.LinearSegmentedColormap.from_list('cmr.sunburst', cm_data1, N=256).\ - reversed() +cm_data1 = [ + [0.00000000, 0.00000000, 0.00000000], + [0.00032031, 0.00020876, 0.00015576], + [0.00115213, 0.00071222, 0.00050933], + [0.00246632, 0.00145292, 0.00099932], + [0.00426111, 0.00240248, 0.00159470], + [0.00654129, 0.00354149, 0.00227479], + [0.00931453, 0.00485497, 0.00302435], + [0.01259008, 0.00633067, 0.00383153], + [0.01637810, 0.00795809, 0.00468676], + [0.02068947, 0.00972796, 0.00558214], + [0.02553552, 0.01163194, 0.00651101], + [0.03092793, 0.01366243, 0.00746771], + [0.03687870, 0.01581232, 0.00844736], + [0.04329108, 0.01807499, 0.00944575], + [0.04970018, 0.02044415, 0.01045917], + [0.05607744, 0.02291381, 0.01148441], + [0.06242826, 0.02547822, 0.01251862], + [0.06875727, 0.02813185, 0.01355932], + [0.07506844, 0.03086930, 0.01460431], + [0.08136524, 0.03368535, 0.01565167], + [0.08765071, 0.03657489, 0.01669973], + [0.09392754, 0.03953289, 0.01774700], + [0.10019812, 0.04248851, 0.01879222], + [0.10646459, 0.04536893, 0.01983431], + [0.11272888, 0.04818555, 0.02087234], + [0.11899272, 0.05094021, 0.02190555], + [0.12525770, 0.05363453, 0.02293331], + [0.13152527, 0.05626994, 0.02395516], + [0.13779673, 0.05884770, 0.02497073], + [0.14407332, 0.06136894, 0.02597979], + [0.15035614, 0.06383462, 0.02698225], + [0.15664624, 0.06624561, 0.02797810], + [0.16294457, 0.06860266, 0.02896747], + [0.16925203, 0.07090640, 0.02995057], + [0.17556946, 0.07315739, 0.03092776], + [0.18189762, 0.07535608, 0.03189947], + [0.18823726, 0.07750287, 0.03286623], + [0.19458905, 0.07959805, 0.03382870], + [0.20095364, 0.08164185, 0.03478764], + [0.20733163, 0.08363445, 0.03574389], + [0.21372359, 0.08557593, 0.03669841], + [0.22013006, 0.08746634, 0.03765228], + [0.22655154, 0.08930565, 0.03860667], + [0.23298852, 0.09109380, 0.03956286], + [0.23944144, 0.09283065, 0.04052097], + [0.24591073, 0.09451600, 0.04146142], + [0.25239679, 0.09614964, 0.04239527], + [0.25890000, 0.09773126, 0.04332440], + [0.26542072, 0.09926052, 0.04425071], + [0.27195929, 0.10073705, 0.04517610], + [0.27851612, 0.10216029, 0.04610242], + [0.28509144, 0.10352983, 0.04703172], + [0.29168551, 0.10484515, 0.04796603], + [0.29829858, 0.10610566, 0.04890741], + [0.30493089, 0.10731073, 0.04985793], + [0.31158270, 0.10845962, 0.05081968], + [0.31825437, 0.10955144, 0.05179469], + [0.32494588, 0.11058558, 0.05278533], + [0.33165741, 0.11156121, 0.05379388], + [0.33838918, 0.11247734, 0.05482253], + [0.34514146, 0.11333282, 0.05587349], + [0.35191413, 0.11412692, 0.05694939], + [0.35870733, 0.11485850, 0.05805261], + [0.36552140, 0.11552606, 0.05918537], + [0.37235602, 0.11612887, 0.06035055], + [0.37921149, 0.11666531, 0.06155047], + [0.38608774, 0.11713411, 0.06278785], + [0.39298465, 0.11753398, 0.06406542], + [0.39990243, 0.11786308, 0.06538571], + [0.40684070, 0.11812026, 0.06675174], + [0.41379968, 0.11830340, 0.06816610], + [0.42077900, 0.11841110, 0.06963182], + [0.42777857, 0.11844140, 0.07115178], + [0.43479835, 0.11839213, 0.07272887], + [0.44183779, 0.11826176, 0.07436631], + [0.44889692, 0.11804763, 0.07606698], + [0.45597537, 0.11774759, 0.07783407], + [0.46307262, 0.11735955, 0.07967086], + [0.47018828, 0.11688094, 0.08158056], + [0.47732206, 0.11630887, 0.08356643], + [0.48447342, 0.11564059, 0.08563184], + [0.49164167, 0.11487339, 0.08778027], + [0.49882616, 0.11400421, 0.09001524], + [0.50602619, 0.11302981, 0.09234030], + [0.51324096, 0.11194681, 0.09475911], + [0.52046957, 0.11075165, 0.09727541], + [0.52771103, 0.10944063, 0.09989300], + [0.53496423, 0.10800987, 0.10261578], + [0.54222828, 0.10645458, 0.10544773], + [0.54950158, 0.10477099, 0.10839295], + [0.55678265, 0.10295467, 0.11145561], + [0.56407005, 0.10100050, 0.11463998], + [0.57136221, 0.09890294, 0.11795046], + [0.57865683, 0.09665778, 0.12139144], + [0.58595251, 0.09425758, 0.12496762], + [0.59324637, 0.09169820, 0.12868351], + [0.60053647, 0.08897198, 0.13254399], + [0.60781996, 0.08607290, 0.13655381], + [0.61509391, 0.08299424, 0.14071783], + [0.62235528, 0.07972847, 0.14504098], + [0.62960086, 0.07626735, 0.14952833], + [0.63682690, 0.07260321, 0.15418475], + [0.64402945, 0.06872768, 0.15901515], + [0.65120429, 0.06463189, 0.16402435], + [0.65834703, 0.06030595, 0.16921717], + [0.66545273, 0.05574060, 0.17459807], + [0.67251615, 0.05092618, 0.18017123], + [0.67953179, 0.04585268, 0.18594053], + [0.68649408, 0.04050791, 0.19190990], + [0.69339656, 0.03501827, 0.19808181], + [0.70023310, 0.02974032, 0.20445918], + [0.70699677, 0.02473108, 0.21104325], + [0.71368081, 0.02004735, 0.21783521], + [0.72027805, 0.01575128, 0.22483488], + [0.72678121, 0.01190847, 0.23204104], + [0.73318299, 0.00858729, 0.23945145], + [0.73947609, 0.00585900, 0.24706262], + [0.74565328, 0.00379723, 0.25486974], + [0.75170751, 0.00247734, 0.26286660], + [0.75763201, 0.00197573, 0.27104565], + [0.76342035, 0.00236912, 0.27939796], + [0.76906659, 0.00373375, 0.28791328], + [0.77456531, 0.00614457, 0.29658016], + [0.77991170, 0.00967453, 0.30538600], + [0.78510166, 0.01439382, 0.31431727], + [0.79013176, 0.02036922, 0.32335963], + [0.79499936, 0.02766356, 0.33249813], + [0.79970258, 0.03633527, 0.34171740], + [0.80424028, 0.04610137, 0.35100187], + [0.80861206, 0.05593074, 0.36033595], + [0.81281824, 0.06575513, 0.36970423], + [0.81685977, 0.07556701, 0.37909164], + [0.82073820, 0.08536045, 0.38848361], + [0.82445563, 0.09513050, 0.39786621], + [0.82801462, 0.10487292, 0.40722623], + [0.83141814, 0.11458394, 0.41655122], + [0.83466964, 0.12426002, 0.42582926], + [0.83777258, 0.13389850, 0.43505012], + [0.84073089, 0.14349659, 0.44420371], + [0.84354864, 0.15305194, 0.45328109], + [0.84622995, 0.16256264, 0.46227431], + [0.84877908, 0.17202698, 0.47117623], + [0.85120054, 0.18144313, 0.47998013], + [0.85349849, 0.19081025, 0.48868085], + [0.85567734, 0.20012720, 0.49727347], + [0.85774150, 0.20939307, 0.50575378], + [0.85969539, 0.21860703, 0.51411817], + [0.86154321, 0.22776871, 0.52236389], + [0.86328918, 0.23687774, 0.53048865], + [0.86493759, 0.24593368, 0.53849050], + [0.86649243, 0.25493655, 0.54636825], + [0.86795766, 0.26388635, 0.55412108], + [0.86933714, 0.27278325, 0.56174857], + [0.87063488, 0.28162708, 0.56925039], + [0.87185473, 0.29041795, 0.57662667], + [0.87299987, 0.29915672, 0.58387836], + [0.87407470, 0.30784267, 0.59100548], + [0.87508176, 0.31647731, 0.59800984], + [0.87602545, 0.32505984, 0.60489185], + [0.87690829, 0.33359164, 0.61165350], + [0.87773379, 0.34207284, 0.61829617], + [0.87850545, 0.35050356, 0.62482133], + [0.87922592, 0.35888478, 0.63123109], + [0.87989827, 0.36721697, 0.63752735], + [0.88052548, 0.37550059, 0.64371209], + [0.88111058, 0.38373605, 0.64978738], + [0.88165635, 0.39192396, 0.65575540], + [0.88216538, 0.40006502, 0.66161845], + [0.88264034, 0.40815983, 0.66737883], + [0.88308383, 0.41620898, 0.67303885], + [0.88349837, 0.42421311, 0.67860087], + [0.88388658, 0.43217272, 0.68406723], + [0.88425089, 0.44008842, 0.68944031], + [0.88459352, 0.44796098, 0.69472256], + [0.88491674, 0.45579107, 0.69991638], + [0.88522277, 0.46357936, 0.70502418], + [0.88551386, 0.47132645, 0.71004831], + [0.88579260, 0.47903263, 0.71499109], + [0.88606054, 0.48669904, 0.71985498], + [0.88631967, 0.49432634, 0.72464230], + [0.88657273, 0.50191463, 0.72935531], + [0.88682100, 0.50946512, 0.73399636], + [0.88706656, 0.51697833, 0.73856771], + [0.88731166, 0.52445464, 0.74307157], + [0.88755748, 0.53189523, 0.74751019], + [0.88780677, 0.53930002, 0.75188571], + [0.88806029, 0.54667042, 0.75620029], + [0.88832077, 0.55400637, 0.76045604], + [0.88858898, 0.56130917, 0.76465503], + [0.88886751, 0.56857881, 0.76879932], + [0.88915723, 0.57581648, 0.77289087], + [0.88946027, 0.58302245, 0.77693169], + [0.88977801, 0.59019749, 0.78092369], + [0.89011184, 0.59734231, 0.78486874], + [0.89046385, 0.60445719, 0.78876876], + [0.89083498, 0.61154309, 0.79262552], + [0.89122688, 0.61860051, 0.79644080], + [0.89164127, 0.62562987, 0.80021639], + [0.89207922, 0.63263202, 0.80395396], + [0.89254218, 0.63960749, 0.80765517], + [0.89303193, 0.64655664, 0.81132175], + [0.89354946, 0.65348027, 0.81495521], + [0.89409613, 0.66037894, 0.81855714], + [0.89467341, 0.66725312, 0.82212908], + [0.89528268, 0.67410333, 0.82567258], + [0.89592507, 0.68093022, 0.82918904], + [0.89660188, 0.68773430, 0.83267991], + [0.89731440, 0.69451609, 0.83614660], + [0.89806405, 0.70127602, 0.83959053], + [0.89885189, 0.70801470, 0.84301299], + [0.89967918, 0.71473262, 0.84641529], + [0.90054714, 0.72143026, 0.84979872], + [0.90145701, 0.72810810, 0.85316454], + [0.90241007, 0.73476657, 0.85651399], + [0.90340743, 0.74140617, 0.85984825], + [0.90445031, 0.74802735, 0.86316849], + [0.90553992, 0.75463054, 0.86647585], + [0.90667746, 0.76121615, 0.86977146], + [0.90786415, 0.76778459, 0.87305641], + [0.90910120, 0.77433626, 0.87633178], + [0.91038981, 0.78087154, 0.87959861], + [0.91173124, 0.78739078, 0.88285793], + [0.91312673, 0.79389433, 0.88611074], + [0.91457758, 0.80038249, 0.88935803], + [0.91608500, 0.80685562, 0.89260074], + [0.91765039, 0.81331396, 0.89583983], + [0.91927511, 0.81975775, 0.89907623], + [0.92096059, 0.82618722, 0.90231088], + [0.92270830, 0.83260254, 0.90554466], + [0.92451964, 0.83900395, 0.90877841], + [0.92639632, 0.84539150, 0.91201305], + [0.92834008, 0.85176524, 0.91524947], + [0.93035272, 0.85812518, 0.91848857], + [0.93243609, 0.86447132, 0.92173117], + [0.93459223, 0.87080356, 0.92497815], + [0.93682359, 0.87712161, 0.92823055], + [0.93913266, 0.88342515, 0.93148937], + [0.94152187, 0.88971391, 0.93475546], + [0.94399458, 0.89598719, 0.93803021], + [0.94655427, 0.90224421, 0.94131502], + [0.94920436, 0.90848425, 0.94461125], +] + +cm_data2 = [ + [0.00000000, 0.00000000, 0.00000000], + [0.00028691, 0.00020835, 0.00028279], + [0.00102421, 0.00070903, 0.00101021], + [0.00218033, 0.00144242, 0.00214845], + [0.00375280, 0.00237790, 0.00368891], + [0.00574727, 0.00349371, 0.00562841], + [0.00817359, 0.00477242, 0.00796563], + [0.01104432, 0.00619914, 0.01069976], + [0.01437378, 0.00776073, 0.01382970], + [0.01817764, 0.00944524, 0.01735364], + [0.02247277, 0.01124162, 0.02126897], + [0.02727694, 0.01313949, 0.02557207], + [0.03260869, 0.01512908, 0.03025819], + [0.03848721, 0.01720107, 0.03532137], + [0.04472223, 0.01934661, 0.04074862], + [0.05095008, 0.02155723, 0.04620189], + [0.05718085, 0.02382484, 0.05156892], + [0.06341877, 0.02614168, 0.05685075], + [0.06966727, 0.02850036, 0.06204782], + [0.07592916, 0.03089381, 0.06716019], + [0.08220666, 0.03331529, 0.07218757], + [0.08850155, 0.03575837, 0.07712945], + [0.09481532, 0.03821687, 0.08198520], + [0.10114895, 0.04068063, 0.08675399], + [0.10750319, 0.04306161, 0.09143498], + [0.11387855, 0.04536332, 0.09602729], + [0.12027537, 0.04758808, 0.10053004], + [0.12669388, 0.04973801, 0.10494242], + [0.13313410, 0.05181515, 0.10926361], + [0.13959587, 0.05382147, 0.11349284], + [0.14607903, 0.05575879, 0.11762946], + [0.15258333, 0.05762879, 0.12167284], + [0.15910850, 0.05943303, 0.12562246], + [0.16565413, 0.06117310, 0.12947786], + [0.17221981, 0.06285040, 0.13323866], + [0.17880518, 0.06446624, 0.13690456], + [0.18540980, 0.06602187, 0.14047531], + [0.19203321, 0.06751848, 0.14395075], + [0.19867499, 0.06895715, 0.14733079], + [0.20533472, 0.07033887, 0.15061537], + [0.21201197, 0.07166460, 0.15380450], + [0.21870632, 0.07293518, 0.15689824], + [0.22541736, 0.07415142, 0.15989669], + [0.23214472, 0.07531401, 0.16279996], + [0.23888802, 0.07642364, 0.16560823], + [0.24564687, 0.07748088, 0.16832171], + [0.25242097, 0.07848626, 0.17094058], + [0.25920996, 0.07944023, 0.17346508], + [0.26601352, 0.08034324, 0.17589547], + [0.27283134, 0.08119562, 0.17823199], + [0.27966317, 0.08199764, 0.18047489], + [0.28650868, 0.08274959, 0.18262446], + [0.29336760, 0.08345167, 0.18468096], + [0.30023971, 0.08410396, 0.18664460], + [0.30712474, 0.08470663, 0.18851568], + [0.31402240, 0.08525975, 0.19029445], + [0.32093251, 0.08576327, 0.19198110], + [0.32785482, 0.08621717, 0.19357587], + [0.33478905, 0.08662148, 0.19507899], + [0.34173503, 0.08697601, 0.19649062], + [0.34869254, 0.08728060, 0.19781092], + [0.35566125, 0.08753522, 0.19904011], + [0.36264104, 0.08773953, 0.20017823], + [0.36963165, 0.08789334, 0.20122542], + [0.37663272, 0.08799656, 0.20218186], + [0.38364424, 0.08804859, 0.20304740], + [0.39066574, 0.08804944, 0.20382227], + [0.39769703, 0.08799872, 0.20450641], + [0.40473792, 0.08789596, 0.20509971], + [0.41178790, 0.08774121, 0.20560237], + [0.41884704, 0.08753353, 0.20601388], + [0.42591463, 0.08727325, 0.20633459], + [0.43299069, 0.08695948, 0.20656394], + [0.44007455, 0.08659242, 0.20670212], + [0.44716616, 0.08617128, 0.20674851], + [0.45426479, 0.08569637, 0.20670331], + [0.46137042, 0.08516677, 0.20656566], + [0.46848219, 0.08458313, 0.20633582], + [0.47560004, 0.08394454, 0.20601280], + [0.48272316, 0.08325159, 0.20559662], + [0.48985104, 0.08250434, 0.20508677], + [0.49698340, 0.08170242, 0.20448225], + [0.50411927, 0.08084690, 0.20378304], + [0.51125803, 0.07993830, 0.20298844], + [0.51839929, 0.07897664, 0.20209721], + [0.52554202, 0.07796358, 0.20110904], + [0.53268538, 0.07690049, 0.20002312], + [0.53982852, 0.07578902, 0.19883855], + [0.54697049, 0.07463129, 0.19755431], + [0.55411028, 0.07342990, 0.19616934], + [0.56124678, 0.07218810, 0.19468248], + [0.56837880, 0.07090985, 0.19309253], + [0.57550502, 0.06959997, 0.19139818], + [0.58262400, 0.06826431, 0.18959809], + [0.58973418, 0.06690989, 0.18769083], + [0.59683382, 0.06554515, 0.18567490], + [0.60392106, 0.06418012, 0.18354875], + [0.61099403, 0.06282598, 0.18131023], + [0.61805061, 0.06149625, 0.17895730], + [0.62508803, 0.06020822, 0.17648890], + [0.63210426, 0.05897851, 0.17390136], + [0.63909578, 0.05783082, 0.17119418], + [0.64606007, 0.05678752, 0.16836327], + [0.65299326, 0.05587785, 0.16540731], + [0.65989160, 0.05513269, 0.16232365], + [0.66675096, 0.05458598, 0.15910942], + [0.67356680, 0.05427454, 0.15576179], + [0.68033403, 0.05423761, 0.15227799], + [0.68704706, 0.05451589, 0.14865546], + [0.69369969, 0.05515040, 0.14489185], + [0.70028509, 0.05618108, 0.14098519], + [0.70679624, 0.05764355, 0.13693176], + [0.71322465, 0.05957213, 0.13273203], + [0.71956187, 0.06199294, 0.12838347], + [0.72579832, 0.06492701, 0.12388673], + [0.73192387, 0.06838759, 0.11924309], + [0.73792785, 0.07238015, 0.11445523], + [0.74379911, 0.07690258, 0.10952793], + [0.74952631, 0.08194530, 0.10446780], + [0.75509807, 0.08749192, 0.09928513], + [0.76050344, 0.09351949, 0.09399345], + [0.76573234, 0.09999923, 0.08860931], + [0.77077595, 0.10689714, 0.08315390], + [0.77562724, 0.11417469, 0.07765262], + [0.78028137, 0.12178994, 0.07213493], + [0.78473594, 0.12969861, 0.06663478], + [0.78899120, 0.13785534, 0.06119075], + [0.79304987, 0.14621526, 0.05584590], + [0.79691698, 0.15473527, 0.05064835], + [0.80059949, 0.16337512, 0.04565234], + [0.80410578, 0.17209842, 0.04091877], + [0.80744502, 0.18087354, 0.03656330], + [0.81062721, 0.18967261, 0.03284897], + [0.81366202, 0.19847328, 0.02978095], + [0.81655911, 0.20725703, 0.02735425], + [0.81932773, 0.21600901, 0.02556368], + [0.82197656, 0.22471783, 0.02440445], + [0.82451354, 0.23337504, 0.02387282], + [0.82694588, 0.24197470, 0.02396658], + [0.82928000, 0.25051291, 0.02468537], + [0.83152234, 0.25898625, 0.02603161], + [0.83367755, 0.26739445, 0.02800850], + [0.83575119, 0.27573587, 0.03062270], + [0.83774693, 0.28401176, 0.03388176], + [0.83966871, 0.29222281, 0.03779577], + [0.84152000, 0.30037020, 0.04231855], + [0.84330390, 0.30845547, 0.04718171], + [0.84502314, 0.31648042, 0.05232334], + [0.84668012, 0.32444703, 0.05769850], + [0.84827700, 0.33235739, 0.06327080], + [0.84981598, 0.34021329, 0.06901096], + [0.85129899, 0.34801660, 0.07489554], + [0.85272715, 0.35576999, 0.08090629], + [0.85410285, 0.36347441, 0.08702799], + [0.85542653, 0.37113285, 0.09324952], + [0.85670046, 0.37874607, 0.09956104], + [0.85792511, 0.38631664, 0.10595570], + [0.85910167, 0.39384615, 0.11242769], + [0.86023184, 0.40133560, 0.11897200], + [0.86131603, 0.40878710, 0.12558544], + [0.86235527, 0.41620202, 0.13226519], + [0.86335049, 0.42358173, 0.13900904], + [0.86430261, 0.43092748, 0.14581530], + [0.86521249, 0.43824051, 0.15268270], + [0.86608094, 0.44552198, 0.15961030], + [0.86690878, 0.45277298, 0.16659744], + [0.86769678, 0.45999455, 0.17364368], + [0.86844571, 0.46718767, 0.18074877], + [0.86915633, 0.47435325, 0.18791261], + [0.86982940, 0.48149217, 0.19513520], + [0.87046566, 0.48860521, 0.20241667], + [0.87106589, 0.49569313, 0.20975721], + [0.87163086, 0.50275663, 0.21715708], + [0.87216162, 0.50979614, 0.22461634], + [0.87265881, 0.51681240, 0.23213553], + [0.87312317, 0.52380600, 0.23971510], + [0.87355555, 0.53077744, 0.24735548], + [0.87395712, 0.53772697, 0.25505684], + [0.87432861, 0.54465512, 0.26281981], + [0.87467085, 0.55156232, 0.27064498], + [0.87498503, 0.55844876, 0.27853263], + [0.87527217, 0.56531471, 0.28648326], + [0.87553313, 0.57216055, 0.29449756], + [0.87576930, 0.57898630, 0.30257577], + [0.87598171, 0.58579221, 0.31071851], + [0.87617147, 0.59257844, 0.31892638], + [0.87634020, 0.59934489, 0.32719953], + [0.87648888, 0.60609181, 0.33553878], + [0.87661914, 0.61281908, 0.34394439], + [0.87673240, 0.61952670, 0.35241687], + [0.87683016, 0.62621463, 0.36095669], + [0.87691421, 0.63288268, 0.36956410], + [0.87698607, 0.63953083, 0.37823972], + [0.87704779, 0.64615877, 0.38698363], + [0.87710104, 0.65276640, 0.39579639], + [0.87714801, 0.65935338, 0.40467811], + [0.87719069, 0.66591948, 0.41362916], + [0.87723137, 0.67246435, 0.42264965], + [0.87727233, 0.67898764, 0.43173978], + [0.87731605, 0.68548896, 0.44089961], + [0.87736509, 0.69196788, 0.45012917], + [0.87742214, 0.69842394, 0.45942844], + [0.87749005, 0.70485663, 0.46879727], + [0.87757175, 0.71126545, 0.47823549], + [0.87767038, 0.71764981, 0.48774277], + [0.87778914, 0.72400915, 0.49731878], + [0.87793145, 0.73034282, 0.50696296], + [0.87810081, 0.73665020, 0.51667477], + [0.87830092, 0.74293060, 0.52645341], + [0.87853556, 0.74918334, 0.53629808], + [0.87880873, 0.75540769, 0.54620771], + [0.87912449, 0.76160293, 0.55618122], + [0.87948712, 0.76776830, 0.56621720], + [0.87990092, 0.77390307, 0.57631429], + [0.88037047, 0.78000643, 0.58647070], + [0.88090027, 0.78607767, 0.59668473], + [0.88149514, 0.79211598, 0.60695418], + [0.88215974, 0.79812065, 0.61727700], + [0.88289909, 0.80409090, 0.62765056], + [0.88371798, 0.81002606, 0.63807240], + [0.88462153, 0.81592540, 0.64853946], + [0.88561459, 0.82178829, 0.65904886], + [0.88670229, 0.82761408, 0.66959711], + [0.88788952, 0.83340224, 0.68018083], + [0.88918122, 0.83915225, 0.69079625], + [0.89058234, 0.84486362, 0.70143930], + [0.89209744, 0.85053601, 0.71210615], + [0.89373153, 0.85616903, 0.72279183], + [0.89548875, 0.86176252, 0.73349245], + [0.89737373, 0.86731625, 0.74420272], + [0.89939058, 0.87283016, 0.75491787], + [0.90154313, 0.87830429, 0.76563309], + [0.90383561, 0.88373862, 0.77634217], + [0.90627132, 0.88913338, 0.78704028], + [0.90885368, 0.89448881, 0.79772179], + [0.91158625, 0.89980515, 0.80838000], + [0.91447204, 0.90508277, 0.81900898], + [0.91751403, 0.91032207, 0.82960244], + [0.92071527, 0.91552347, 0.84015333], + [0.92407894, 0.92068737, 0.85065379], + [0.92760832, 0.92581419, 0.86109531], + [0.93130674, 0.93090430, 0.87146916], + [0.93517804, 0.93595804, 0.88176475], + [0.93922654, 0.94097572, 0.89196965], + [0.94345707, 0.94595767, 0.90206897], + [0.94787482, 0.95090438, 0.91204440], +] + +CMAP_EXPOSURES = mpl.colors.LinearSegmentedColormap.from_list( + "cmr.sunburst", cm_data1, N=256 +).reversed() """Default sequential colormaps, taken from https://cmasher.readthedocs.io/index.html""" -CMAP_EXPOSURES.set_under('lightgray') +CMAP_EXPOSURES.set_under("lightgray") -CMAP_IMPACT = mpl.colors.LinearSegmentedColormap.from_list('cmr.flamingo', cm_data2, N=256).\ - reversed() +CMAP_IMPACT = mpl.colors.LinearSegmentedColormap.from_list( + "cmr.flamingo", cm_data2, N=256 +).reversed() """Default sequential colormaps, taken from https://cmasher.readthedocs.io/index.html""" -CMAP_IMPACT.set_under('lightgray') +CMAP_IMPACT.set_under("lightgray") -CMAP_RASTER = 'viridis' +CMAP_RASTER = "viridis" -CMAP_CAT = 'Dark2' +CMAP_CAT = "Dark2" diff --git a/climada/util/coordinates.py b/climada/util/coordinates.py index b26c1f8d0..e160965b1 100644 --- a/climada/util/coordinates.py +++ b/climada/util/coordinates.py @@ -23,13 +23,12 @@ import copy import logging import math -from multiprocessing import cpu_count -from pathlib import Path import re import warnings import zipfile +from multiprocessing import cpu_count +from pathlib import Path -from cartopy.io import shapereader import dask.dataframe as dd import geopandas as gpd import numba @@ -41,23 +40,29 @@ import rasterio.features import rasterio.mask import rasterio.warp -import scipy.spatial import scipy.interpolate -from shapely.geometry import Polygon, MultiPolygon, Point, box +import scipy.spatial import shapely.ops import shapely.vectorized import shapely.wkt +from cartopy.io import shapereader +from shapely.geometry import MultiPolygon, Point, Polygon, box from sklearn.neighbors import BallTree +import climada.util.hdf5_handler as u_hdf5 from climada.util.config import CONFIG -from climada.util.constants import (DEF_CRS, EARTH_RADIUS_KM, SYSTEM_DIR, ONE_LAT_KM, - NATEARTH_CENTROIDS, - ISIMIP_GPWV3_NATID_150AS, - ISIMIP_NATID_TO_ISO, - NONISO_REGIONS, - RIVER_FLOOD_REGIONS_CSV) +from climada.util.constants import ( + DEF_CRS, + EARTH_RADIUS_KM, + ISIMIP_GPWV3_NATID_150AS, + ISIMIP_NATID_TO_ISO, + NATEARTH_CENTROIDS, + NONISO_REGIONS, + ONE_LAT_KM, + RIVER_FLOOD_REGIONS_CSV, + SYSTEM_DIR, +) from climada.util.files_handler import download_file -import climada.util.hdf5_handler as u_hdf5 pd.options.mode.chained_assignment = None @@ -79,6 +84,7 @@ """Distance threshold in km for coordinate assignment. Nearest neighbors with greater distances are not considered.""" + def latlon_to_geosph_vector(lat, lon, rad=False, basis=False): """Convert lat/lon coodinates to radial vectors (on geosphere) @@ -110,15 +116,23 @@ def latlon_to_geosph_vector(lat, lon, rad=False, basis=False): sin_lon, cos_lon = np.sin(rad_lon), np.cos(rad_lon) vecn = np.stack((sin_lat * cos_lon, sin_lat * sin_lon, cos_lat), axis=-1) if basis: - vbasis = np.stack(( - cos_lat * cos_lon, cos_lat * sin_lon, -sin_lat, - -sin_lon, cos_lon, np.zeros_like(cos_lat), - ), axis=-1).reshape(lat.shape + (2, 3)) + vbasis = np.stack( + ( + cos_lat * cos_lon, + cos_lat * sin_lon, + -sin_lat, + -sin_lon, + cos_lon, + np.zeros_like(cos_lat), + ), + axis=-1, + ).reshape(lat.shape + (2, 3)) return vecn, vbasis return vecn + def lon_normalize(lon, center=0.0): - """ Normalizes degrees such that always -180 < lon - center <= 180 + """Normalizes degrees such that always -180 < lon - center <= 180 The input data is modified in place! @@ -149,6 +163,7 @@ def lon_normalize(lon, center=0.0): lon[lon <= bounds[0]] += 360 return lon + def lon_bounds(lon, buffer=0.0): """Bounds of a set of degree values, respecting the periodicity in longitude @@ -265,11 +280,17 @@ def toggle_extent_bounds(bounds_or_extent): extent_or_bounds : tuple (a, c, b, d) Bounding box of the given points in "extent" (or "bounds") convention. """ - return (bounds_or_extent[0], bounds_or_extent[2], bounds_or_extent[1], bounds_or_extent[3]) + return ( + bounds_or_extent[0], + bounds_or_extent[2], + bounds_or_extent[1], + bounds_or_extent[3], + ) -def dist_approx(lat1, lon1, lat2, lon2, log=False, normalize=True, - method="equirect", units='km'): +def dist_approx( + lat1, lon1, lat2, lon2, log=False, normalize=True, method="equirect", units="km" +): """Compute approximation of geodistance in specified units Several batches of points can be processed at once for improved performance. The distances of @@ -334,15 +355,19 @@ def dist_approx(lat1, lon1, lat2, lon2, log=False, normalize=True, elif units == "degree": unit_factor = 1 else: - raise KeyError('Unknown distance unit: %s' % units) + raise KeyError("Unknown distance unit: %s" % units) if method == "equirect": if normalize: - mid_lon = 0.5 * sum(lon_bounds(np.concatenate([lon1.ravel(), lon2.ravel()]))) + mid_lon = 0.5 * sum( + lon_bounds(np.concatenate([lon1.ravel(), lon2.ravel()])) + ) lon_normalize(lon1, center=mid_lon) lon_normalize(lon2, center=mid_lon) - vtan = np.stack([lat2[:, None, :] - lat1[:, :, None], - lon2[:, None, :] - lon1[:, :, None]], axis=-1) + vtan = np.stack( + [lat2[:, None, :] - lat1[:, :, None], lon2[:, None, :] - lon1[:, :, None]], + axis=-1, + ) fact1 = np.heaviside(vtan[..., 1] - 180, 0) fact2 = np.heaviside(-vtan[..., 1] - 180, 0) vtan[..., 1] -= (fact1 - fact2) * 360 @@ -355,14 +380,16 @@ def dist_approx(lat1, lon1, lat2, lon2, log=False, normalize=True, dlat = 0.5 * (lat2[:, None, :] - lat1[:, :, None]) dlon = 0.5 * (lon2[:, None, :] - lon1[:, :, None]) # haversine formula: - hav = np.sin(dlat)**2 \ - + np.cos(lat1[:, :, None]) * np.cos(lat2[:, None, :]) * np.sin(dlon)**2 + hav = ( + np.sin(dlat) ** 2 + + np.cos(lat1[:, :, None]) * np.cos(lat2[:, None, :]) * np.sin(dlon) ** 2 + ) dist = np.degrees(2 * np.arcsin(np.sqrt(hav))) * unit_factor if log: vec1, vbasis = latlon_to_geosph_vector(lat1, lon1, rad=True, basis=True) vec2 = latlon_to_geosph_vector(lat2, lon2, rad=True) vtan = vec2[:, None, :] - (1 - 2 * hav[..., None]) * vec1[:, :, None] - vtan = np.einsum('nkli,nkji->nklj', vtan, vbasis) + vtan = np.einsum("nkli,nkji->nklj", vtan, vbasis) # faster version of `vtan_norm = np.linalg.norm(vtan, axis=-1)` vtan_norm = np.sqrt(np.einsum("...l,...l->...", vtan, vtan)) # for consistency, set dist to 0 if vtan is 0 @@ -372,6 +399,7 @@ def dist_approx(lat1, lon1, lat2, lon2, log=False, normalize=True, raise KeyError("Unknown distance approximation method: %s" % method) return (dist, vtan) if log else dist + def compute_geodesic_lengths(gdf): """Calculate the great circle (geodesic / spherical) lengths along any (complicated) line geometry object, based on the pyproj.Geod implementation. @@ -403,7 +431,7 @@ def compute_geodesic_lengths(gdf): return gdf_tmp.apply(lambda row: geod.geometry_length(row.geometry), axis=1) -def get_gridcellarea(lat, resolution=0.5, unit='ha'): +def get_gridcellarea(lat, resolution=0.5, unit="ha"): """The area covered by a grid cell is calculated depending on the latitude * 1 degree = ONE_LAT_KM (111.12km at the equator) @@ -421,15 +449,16 @@ def get_gridcellarea(lat, resolution=0.5, unit='ha'): unit of the output area (default: ha, alternatives: m2, km2) """ - if unit == 'm2': - area = (ONE_LAT_KM * resolution)**2 * np.cos(np.deg2rad(lat)) * 1000000 - elif unit == 'km2': - area = (ONE_LAT_KM * resolution)**2 * np.cos(np.deg2rad(lat)) + if unit == "m2": + area = (ONE_LAT_KM * resolution) ** 2 * np.cos(np.deg2rad(lat)) * 1000000 + elif unit == "km2": + area = (ONE_LAT_KM * resolution) ** 2 * np.cos(np.deg2rad(lat)) else: - area = (ONE_LAT_KM * resolution)**2 * np.cos(np.deg2rad(lat))*100 + area = (ONE_LAT_KM * resolution) ** 2 * np.cos(np.deg2rad(lat)) * 100 return area + def grid_is_regular(coord): """Return True if grid is regular. If True, returns height and width. @@ -453,11 +482,16 @@ def grid_is_regular(coord): _, count_lon = np.unique(coord[:, 1], return_counts=True) uni_lat_size = np.unique(count_lat).size uni_lon_size = np.unique(count_lon).size - if uni_lat_size == uni_lon_size and uni_lat_size == 1 \ - and count_lat[0] > 1 and count_lon[0] > 1: + if ( + uni_lat_size == uni_lon_size + and uni_lat_size == 1 + and count_lat[0] > 1 + and count_lon[0] > 1 + ): regular = True return regular, count_lat[0], count_lon[0] + def convert_wgs_to_utm(lon, lat): """Get EPSG code of UTM projection for input point in EPSG 4326 @@ -476,6 +510,7 @@ def convert_wgs_to_utm(lon, lat): epsg_utm_base = 32601 + (0 if lat >= 0 else 100) return epsg_utm_base + (math.floor((lon + 180) / 6) % 60) + def dist_to_coast(coord_lat, lon=None, highres=False, signed=False): """Read interpolated (signed) distance to coast (in m) from NASA data @@ -512,19 +547,26 @@ def dist_to_coast(coord_lat, lon=None, highres=False, signed=False): if lon is None: if isinstance(coord_lat, (gpd.GeoDataFrame, gpd.GeoSeries)): if not equal_crs(coord_lat.crs, DEF_CRS): - raise ValueError('Input CRS is not %s' % str(DEF_CRS)) - geom = coord_lat if isinstance(coord_lat, gpd.GeoSeries) else coord_lat["geometry"] + raise ValueError("Input CRS is not %s" % str(DEF_CRS)) + geom = ( + coord_lat + if isinstance(coord_lat, gpd.GeoSeries) + else coord_lat["geometry"] + ) lon, lat = geom.x.values, geom.y.values elif isinstance(coord_lat, np.ndarray) and coord_lat.shape[1] == 2: lat, lon = coord_lat[:, 0], coord_lat[:, 1] else: - raise ValueError('Missing longitude values.') + raise ValueError("Missing longitude values.") else: lat, lon = [np.asarray(v).reshape(-1) for v in [coord_lat, lon]] if lat.size != lon.size: - raise ValueError(f'Mismatching input coordinates size: {lat.size} != {lon.size}') + raise ValueError( + f"Mismatching input coordinates size: {lat.size} != {lon.size}" + ) return dist_to_coast_nasa(lat, lon, highres=highres, signed=signed) + def _get_dist_to_coast_nasa_tif(): """Get the path to the NASA raster file for distance to coast. If the file (300 MB) is missing it will be automatically downloaded. @@ -541,11 +583,12 @@ def _get_dist_to_coast_nasa_tif(): if not path.is_file(): url = CONFIG.util.coordinates.dist_to_coast_nasa_url.str() path_dwn = download_file(url, download_dir=SYSTEM_DIR) - zip_ref = zipfile.ZipFile(path_dwn, 'r') + zip_ref = zipfile.ZipFile(path_dwn, "r") zip_ref.extractall(SYSTEM_DIR) zip_ref.close() return path + def dist_to_coast_nasa(lat, lon, highres=False, signed=False): """Read interpolated (signed) distance to coast (in m) from NASA data @@ -572,16 +615,22 @@ def dist_to_coast_nasa(lat, lon, highres=False, signed=False): lat, lon = [np.asarray(ar).ravel() for ar in [lat, lon]] lon = lon_normalize(lon.copy()) intermediate_res = None if highres else 0.1 - west_msk = (lon < 0) + west_msk = lon < 0 dist = np.zeros_like(lat) for msk in [west_msk, ~west_msk]: if np.count_nonzero(msk) > 0: dist[msk] = read_raster_sample( - path, lat[msk], lon[msk], intermediate_res=intermediate_res, fill_value=0) + path, + lat[msk], + lon[msk], + intermediate_res=intermediate_res, + fill_value=0, + ) if not signed: dist = np.abs(dist) return 1000 * dist + def get_land_geometry(country_names=None, extent=None, resolution=10): """Get union of the specified (or all) countries or the points inside the extent. @@ -606,6 +655,7 @@ def get_land_geometry(country_names=None, extent=None, resolution=10): geom = MultiPolygon([geom]) return geom + def coord_on_land(lat, lon, land_geom=None): """Check if points are on land. @@ -624,8 +674,9 @@ def coord_on_land(lat, lon, land_geom=None): Entries are True if corresponding coordinate is on land and False otherwise. """ if lat.size != lon.size: - raise ValueError('Wrong size input coordinates: %s != %s.' - % (lat.size, lon.size)) + raise ValueError( + "Wrong size input coordinates: %s != %s." % (lat.size, lon.size) + ) if lat.size == 0: return np.empty((0,), dtype=bool) delta_deg = 1 @@ -639,8 +690,8 @@ def coord_on_land(lat, lon, land_geom=None): bounds = latlon_bounds(lat, lons, buffer=delta_deg) # load land geometry with appropriate same extent land_geom = get_land_geometry( - extent=toggle_extent_bounds(bounds), - resolution=10) + extent=toggle_extent_bounds(bounds), resolution=10 + ) elif not land_geom.is_empty: # ensure lon values are within extent of provided land_geom land_bounds = land_geom.bounds @@ -651,6 +702,7 @@ def coord_on_land(lat, lon, land_geom=None): return shapely.vectorized.contains(land_geom, lons, lat) + def nat_earth_resolution(resolution): """Check if resolution is available in Natural Earth. Build string. @@ -670,10 +722,13 @@ def nat_earth_resolution(resolution): """ avail_res = [10, 50, 110] if resolution not in avail_res: - raise ValueError('Natural Earth does not accept resolution %s m.' % resolution) - return str(resolution) + 'm' + raise ValueError("Natural Earth does not accept resolution %s m." % resolution) + return str(resolution) + "m" + -def get_country_geometries(country_names=None, extent=None, resolution=10, center_crs=True): +def get_country_geometries( + country_names=None, extent=None, resolution=10, center_crs=True +): """Natural Earth country boundaries within given extent If no arguments are given, simply returns the whole natural earth dataset. @@ -708,19 +763,19 @@ def get_country_geometries(country_names=None, extent=None, resolution=10, cente within the specified extent. """ resolution = nat_earth_resolution(resolution) - shp_file = shapereader.natural_earth(resolution=resolution, - category='cultural', - name='admin_0_countries') - nat_earth = gpd.read_file(shp_file, encoding='UTF-8') + shp_file = shapereader.natural_earth( + resolution=resolution, category="cultural", name="admin_0_countries" + ) + nat_earth = gpd.read_file(shp_file, encoding="UTF-8") if not nat_earth.crs: nat_earth.crs = NE_CRS # fill gaps in nat_earth - gap_mask = (nat_earth['ISO_A3'] == '-99') - nat_earth.loc[gap_mask, 'ISO_A3'] = nat_earth.loc[gap_mask, 'ADM0_A3'] + gap_mask = nat_earth["ISO_A3"] == "-99" + nat_earth.loc[gap_mask, "ISO_A3"] = nat_earth.loc[gap_mask, "ADM0_A3"] - gap_mask = (nat_earth['ISO_N3'] == '-99') + gap_mask = nat_earth["ISO_N3"] == "-99" for idx, country in nat_earth[gap_mask].iterrows(): nat_earth.loc[idx, "ISO_N3"] = f"{natearth_country_to_int(country):03d}" @@ -729,7 +784,7 @@ def get_country_geometries(country_names=None, extent=None, resolution=10, cente if isinstance(country_names, str): country_names = [country_names] country_mask = np.isin( - nat_earth[['ISO_A3', 'WB_A3', 'ADM0_A3']].values, + nat_earth[["ISO_A3", "WB_A3", "ADM0_A3"]].values, country_names, ).any(axis=1) out = out[country_mask] @@ -759,21 +814,26 @@ def get_country_geometries(country_names=None, extent=None, resolution=10, cente [box(*toggle_extent_bounds(e)) for e in [extent_left, extent_right]] ) bbox = gpd.GeoSeries(bbox, crs=DEF_CRS) - bbox = gpd.GeoDataFrame({'geometry': bbox}, crs=DEF_CRS) + bbox = gpd.GeoDataFrame({"geometry": bbox}, crs=DEF_CRS) out = gpd.overlay(out, bbox, how="intersection") if ~lon_normalized and center_crs: lon_mid = 0.5 * (extent[0] + extent[1]) # reset the CRS attribute after rewrapping (we don't really change the CRS) - out = ( - out - .to_crs({"proj": "longlat", "lon_wrap": lon_mid}) - .set_crs(DEF_CRS, allow_override=True) + out = out.to_crs({"proj": "longlat", "lon_wrap": lon_mid}).set_crs( + DEF_CRS, allow_override=True ) return out -def get_region_gridpoints(countries=None, regions=None, resolution=150, - iso=True, rect=False, basemap="natearth"): + +def get_region_gridpoints( + countries=None, + regions=None, + resolution=150, + iso=True, + rect=False, + basemap="natearth", +): """Get coordinates of gridpoints in specified countries or regions Parameters @@ -809,31 +869,38 @@ def get_region_gridpoints(countries=None, regions=None, resolution=150, if basemap == "natearth": base_file = NATEARTH_CENTROIDS[resolution] hdf5_f = u_hdf5.read(base_file) - meta = hdf5_f['meta'] - grid_shape = (meta['height'][0], meta['width'][0]) - transform = rasterio.Affine(*meta['transform']) - region_id = hdf5_f['region_id'].reshape(grid_shape) + meta = hdf5_f["meta"] + grid_shape = (meta["height"][0], meta["width"][0]) + transform = rasterio.Affine(*meta["transform"]) + region_id = hdf5_f["region_id"].reshape(grid_shape) lon, lat = raster_to_meshgrid(transform, grid_shape[1], grid_shape[0]) elif basemap == "isimip": hdf5_f = u_hdf5.read(ISIMIP_GPWV3_NATID_150AS) - dim_lon, dim_lat = hdf5_f['lon'], hdf5_f['lat'] + dim_lon, dim_lat = hdf5_f["lon"], hdf5_f["lat"] bounds = dim_lon.min(), dim_lat.min(), dim_lon.max(), dim_lat.max() orig_res = get_resolution(dim_lon, dim_lat) _, _, transform = pts_to_raster_meta(bounds, orig_res) grid_shape = (dim_lat.size, dim_lon.size) - region_id = hdf5_f['NatIdGrid'].reshape(grid_shape).astype(int) + region_id = hdf5_f["NatIdGrid"].reshape(grid_shape).astype(int) region_id[region_id < 0] = 0 - natid2iso_numeric = np.array(country_natid2iso(list(range(231)), "numeric"), dtype=int) + natid2iso_numeric = np.array( + country_natid2iso(list(range(231)), "numeric"), dtype=int + ) region_id = natid2iso_numeric[region_id] lon, lat = np.meshgrid(dim_lon, dim_lat) else: raise ValueError(f"Unknown basemap: {basemap}") - if basemap == "natearth" and resolution not in [150, 360] \ - or basemap == "isimip" and resolution != 150: + if ( + basemap == "natearth" + and resolution not in [150, 360] + or basemap == "isimip" + and resolution != 150 + ): resolution /= 3600 region_id, transform = refine_raster_data( - region_id, transform, resolution, method='nearest', fill_value=0) + region_id, transform, resolution, method="nearest", fill_value=0 + ) grid_shape = region_id.shape lon, lat = raster_to_meshgrid(transform, grid_shape[1], grid_shape[0]) @@ -858,11 +925,13 @@ def get_region_gridpoints(countries=None, regions=None, resolution=150, lat, lon = [ar.ravel() for ar in [lat, lon]] return lat, lon + def assign_grid_points(*args, **kwargs): """This function has been renamed, use ``match_grid_points`` instead.""" LOGGER.warning("This function has been renamed, use match_grid_points instead.") return match_grid_points(*args, **kwargs) + def match_grid_points(x, y, grid_width, grid_height, grid_transform): """To each coordinate in `x` and `y`, assign the closest centroid in the given raster grid @@ -900,13 +969,20 @@ def match_grid_points(x, y, grid_width, grid_height, grid_transform): assigned[(y_i < 0) | (y_i >= grid_height)] = -1 return assigned + def assign_coordinates(*args, **kwargs): """This function has been renamed, use ``match_coordinates`` instead.""" LOGGER.warning("This function has been renamed, use match_coordinates instead.") return match_coordinates(*args, **kwargs) -def match_coordinates(coords, coords_to_assign, distance="euclidean", - threshold=NEAREST_NEIGHBOR_THRESHOLD, **kwargs): + +def match_coordinates( + coords, + coords_to_assign, + distance="euclidean", + threshold=NEAREST_NEIGHBOR_THRESHOLD, + **kwargs, +): """To each coordinate in `coords`, assign a matching coordinate in `coords_to_assign` If there is no exact match for some entry, an attempt is made to assign the geographically @@ -969,40 +1045,54 @@ def match_coordinates(coords, coords_to_assign, distance="euclidean", } if distance not in nearest_neighbor_funcs: raise ValueError( - f'Coordinate assignment with "{distance}" distance is not supported.') + f'Coordinate assignment with "{distance}" distance is not supported.' + ) - coords = coords.astype('float64') - coords_to_assign = coords_to_assign.astype('float64') + coords = coords.astype("float64") + coords_to_assign = coords_to_assign.astype("float64") if np.array_equal(coords, coords_to_assign): assigned_idx = np.arange(coords.shape[0]) else: - LOGGER.info("No exact centroid match found. Reprojecting coordinates " - "to nearest neighbor closer than the threshold = %s", - threshold) + LOGGER.info( + "No exact centroid match found. Reprojecting coordinates " + "to nearest neighbor closer than the threshold = %s", + threshold, + ) # pairs of floats can be sorted (lexicographically) in NumPy - coords_view = coords.view(dtype='float64,float64').reshape(-1) - coords_to_assign_view = coords_to_assign.view(dtype='float64,float64').reshape(-1) + coords_view = coords.view(dtype="float64,float64").reshape(-1) + coords_to_assign_view = coords_to_assign.view(dtype="float64,float64").reshape( + -1 + ) # assign each hazard coordsinate to an element in coords using searchsorted coords_sorter = np.argsort(coords_view) - sort_assign_idx = np.fmin(coords_sorter.size - 1, np.searchsorted( - coords_view, coords_to_assign_view, side="left", sorter=coords_sorter)) + sort_assign_idx = np.fmin( + coords_sorter.size - 1, + np.searchsorted( + coords_view, coords_to_assign_view, side="left", sorter=coords_sorter + ), + ) sort_assign_idx = coords_sorter[sort_assign_idx] # determine which of the assignements match exactly - exact_assign_idx = (coords_view[sort_assign_idx] == coords_to_assign_view).nonzero()[0] + exact_assign_idx = ( + coords_view[sort_assign_idx] == coords_to_assign_view + ).nonzero()[0] assigned_idx = np.full_like(coords_sorter, -1) assigned_idx[sort_assign_idx[exact_assign_idx]] = exact_assign_idx # assign remaining coordinates to their geographically nearest neighbor if threshold > 0 and exact_assign_idx.size != coords_view.size: - not_assigned_idx_mask = (assigned_idx == -1) + not_assigned_idx_mask = assigned_idx == -1 assigned_idx[not_assigned_idx_mask] = nearest_neighbor_funcs[distance]( - coords_to_assign, coords[not_assigned_idx_mask], threshold, **kwargs) + coords_to_assign, coords[not_assigned_idx_mask], threshold, **kwargs + ) return assigned_idx -def match_centroids(coord_gdf, centroids, distance='euclidean', - threshold=NEAREST_NEIGHBOR_THRESHOLD): + +def match_centroids( + coord_gdf, centroids, distance="euclidean", threshold=NEAREST_NEIGHBOR_THRESHOLD +): """Assign to each gdf coordinate point its closest centroids's coordinate. If distances > threshold in points' distances, -1 is returned. If centroids are in a raster and coordinate point is outside of it ``-1`` is assigned @@ -1049,18 +1139,21 @@ def match_centroids(coord_gdf, centroids, distance='euclidean', try: if not equal_crs(coord_gdf.crs, centroids.crs): - raise ValueError('Set hazard and GeoDataFrame to same CRS first!') + raise ValueError("Set hazard and GeoDataFrame to same CRS first!") except AttributeError: # If the coord_gdf has no crs defined (or no valid geometry column), # no error is raised and it is assumed that the user set the crs correctly pass assigned = match_coordinates( - np.stack([coord_gdf['latitude'].values, coord_gdf['longitude'].values], axis=1), - centroids.coord, distance=distance, threshold=threshold, + np.stack([coord_gdf["latitude"].values, coord_gdf["longitude"].values], axis=1), + centroids.coord, + distance=distance, + threshold=threshold, ) return assigned + @numba.njit def _dist_sqr_approx(lats1, lons1, cos_lats1, lats2, lons2): """Compute squared equirectangular approximation distance. Values need @@ -1069,7 +1162,10 @@ def _dist_sqr_approx(lats1, lons1, cos_lats1, lats2, lons2): d_lat = lats1 - lats2 return d_lon * d_lon * cos_lats1 * cos_lats1 + d_lat * d_lat -def _nearest_neighbor_approx(centroids, coordinates, threshold, check_antimeridian=True): + +def _nearest_neighbor_approx( + centroids, coordinates, threshold, check_antimeridian=True +): """Compute the nearest centroid for each coordinate using the euclidean distance d = ((dlon)cos(lat))^2+(dlat)^2. For distant points (e.g. more than 100km apart) use the haversine distance. @@ -1099,16 +1195,19 @@ def _nearest_neighbor_approx(centroids, coordinates, threshold, check_antimeridi # Compute only for the unique coordinates. Copy the results for the # not unique coordinates - _, idx, inv = np.unique(coordinates, axis=0, return_index=True, - return_inverse=True) + _, idx, inv = np.unique(coordinates, axis=0, return_index=True, return_inverse=True) # Compute cos(lat) for all centroids centr_cos_lat = np.cos(np.radians(centroids[:, 0])) assigned = np.zeros(coordinates.shape[0], int) num_warn = 0 for icoord, iidx in enumerate(idx): - dist = _dist_sqr_approx(centroids[:, 0], centroids[:, 1], - centr_cos_lat, coordinates[iidx, 0], - coordinates[iidx, 1]) + dist = _dist_sqr_approx( + centroids[:, 0], + centroids[:, 1], + centr_cos_lat, + coordinates[iidx, 0], + coordinates[iidx, 1], + ) min_idx = dist.argmin() # Raise a warning if the minimum distance is greater than the # threshold and set an unvalid index -1 @@ -1120,15 +1219,20 @@ def _nearest_neighbor_approx(centroids, coordinates, threshold, check_antimeridi assigned[inv == icoord] = min_idx if num_warn: - LOGGER.warning('Distance to closest centroid is greater than %s' - 'km for %s coordinates.', threshold, num_warn) + LOGGER.warning( + "Distance to closest centroid is greater than %s" "km for %s coordinates.", + threshold, + num_warn, + ) if check_antimeridian: assigned = _nearest_neighbor_antimeridian( - centroids, coordinates, threshold, assigned) + centroids, coordinates, threshold, assigned + ) return assigned + def _nearest_neighbor_haversine(centroids, coordinates, threshold): """Compute the neareast centroid for each coordinate using a Ball tree with haversine distance. @@ -1150,15 +1254,18 @@ def _nearest_neighbor_haversine(centroids, coordinates, threshold): with as many rows as coordinates containing the centroids indexes """ # Construct tree from centroids - tree = BallTree(np.radians(centroids), metric='haversine') + tree = BallTree(np.radians(centroids), metric="haversine") # Select unique exposures coordinates - _, idx, inv = np.unique(coordinates, axis=0, return_index=True, - return_inverse=True) + _, idx, inv = np.unique(coordinates, axis=0, return_index=True, return_inverse=True) # query the k closest points of the n_points using dual tree - dist, assigned = tree.query(np.radians(coordinates[idx]), k=1, - return_distance=True, dualtree=True, - breadth_first=False) + dist, assigned = tree.query( + np.radians(coordinates[idx]), + k=1, + return_distance=True, + dualtree=True, + breadth_first=False, + ) # `BallTree.query` returns a row for each entry, even if k=1 (number of nearest neighbors) dist = dist[:, 0] @@ -1168,15 +1275,20 @@ def _nearest_neighbor_haversine(centroids, coordinates, threshold): # threshold and set an unvalid index -1 num_warn = np.sum(dist * EARTH_RADIUS_KM > threshold) if num_warn: - LOGGER.warning('Distance to closest centroid is greater than %s' - 'km for %s coordinates.', threshold, num_warn) + LOGGER.warning( + "Distance to closest centroid is greater than %s" "km for %s coordinates.", + threshold, + num_warn, + ) assigned[dist * EARTH_RADIUS_KM > threshold] = -1 # Copy result to all exposures and return value return assigned[inv] -def _nearest_neighbor_euclidean(centroids, coordinates, threshold, check_antimeridian=True): +def _nearest_neighbor_euclidean( + centroids, coordinates, threshold, check_antimeridian=True +): """Compute the neareast centroid for each coordinate using a k-d tree. Parameters @@ -1204,8 +1316,7 @@ def _nearest_neighbor_euclidean(centroids, coordinates, threshold, check_antimer # Construct tree from centroids tree = scipy.spatial.KDTree(np.radians(centroids)) # Select unique exposures coordinates - _, idx, inv = np.unique(coordinates, axis=0, return_index=True, - return_inverse=True) + _, idx, inv = np.unique(coordinates, axis=0, return_index=True, return_inverse=True) # query the k closest points of the n_points using dual tree dist, assigned = tree.query(np.radians(coordinates[idx]), k=1, p=2, workers=-1) @@ -1214,17 +1325,22 @@ def _nearest_neighbor_euclidean(centroids, coordinates, threshold, check_antimer # threshold and set an unvalid index -1 num_warn = np.sum(dist * EARTH_RADIUS_KM > threshold) if num_warn: - LOGGER.warning('Distance to closest centroid is greater than %s' - 'km for %s coordinates.', threshold, num_warn) + LOGGER.warning( + "Distance to closest centroid is greater than %s" "km for %s coordinates.", + threshold, + num_warn, + ) assigned[dist * EARTH_RADIUS_KM > threshold] = -1 if check_antimeridian: assigned = _nearest_neighbor_antimeridian( - centroids, coordinates[idx], threshold, assigned) + centroids, coordinates[idx], threshold, assigned + ) # Copy result to all exposures and return value return assigned[inv] + def _nearest_neighbor_antimeridian(centroids, coordinates, threshold, assigned): """Recompute nearest neighbors close to the anti-meridian with the Haversine distance @@ -1250,26 +1366,31 @@ def _nearest_neighbor_antimeridian(centroids, coordinates, threshold, assigned): lon_min = min(centroids[:, 1].min(), coordinates[:, 1].min()) lon_max = max(centroids[:, 1].max(), coordinates[:, 1].max()) if lon_max - lon_min > 360: - raise ValueError("Longitudinal coordinates need to be normalized" - "to a common 360 degree range") + raise ValueError( + "Longitudinal coordinates need to be normalized" + "to a common 360 degree range" + ) mid_lon = 0.5 * (lon_max + lon_min) antimeridian = mid_lon + 180 thres_deg = np.degrees(threshold / EARTH_RADIUS_KM) coord_strip_bool = coordinates[:, 1] + antimeridian < 1.5 * thres_deg - coord_strip_bool |= coordinates[:, 1] - antimeridian > -1.5 * thres_deg + coord_strip_bool |= coordinates[:, 1] - antimeridian > -1.5 * thres_deg if np.any(coord_strip_bool): coord_strip = coordinates[coord_strip_bool] cent_strip_bool = centroids[:, 1] + antimeridian < 2.5 * thres_deg - cent_strip_bool |= centroids[:, 1] - antimeridian > -2.5 * thres_deg + cent_strip_bool |= centroids[:, 1] - antimeridian > -2.5 * thres_deg if np.any(cent_strip_bool): cent_strip = centroids[cent_strip_bool] - strip_assigned = _nearest_neighbor_haversine(cent_strip, coord_strip, threshold) + strip_assigned = _nearest_neighbor_haversine( + cent_strip, coord_strip, threshold + ) new_coords = cent_strip_bool.nonzero()[0][strip_assigned] new_coords[strip_assigned == -1] = -1 assigned[coord_strip_bool] = new_coords return assigned + def region2isos(regions): """Convert region names to ISO 3166 alpha-3 codes of countries @@ -1287,12 +1408,13 @@ def region2isos(regions): reg_info = pd.read_csv(RIVER_FLOOD_REGIONS_CSV) isos = [] for region in regions: - region_msk = (reg_info['Reg_name'] == region) + region_msk = reg_info["Reg_name"] == region if not any(region_msk): - raise KeyError('Unknown region name: %s' % region) - isos += list(reg_info['ISO'][region_msk].values) + raise KeyError("Unknown region name: %s" % region) + isos += list(reg_info["ISO"][region_msk].values) return list(set(isos)) + def country_to_iso(countries, representation="alpha3", fillvalue=None): """Determine ISO 3166 representation of countries @@ -1345,24 +1467,32 @@ def country_to_iso(countries, representation="alpha3", fillvalue=None): try: match = pycountry.historic_countries.lookup(country) except LookupError: - match = next(filter(lambda c: country in c.values(), NONISO_REGIONS), None) + match = next( + filter(lambda c: country in c.values(), NONISO_REGIONS), None + ) if match is not None: match = pycountry.db.Data(**match) elif fillvalue is not None: match = pycountry.db.Data(**{representation: fillvalue}) else: - raise LookupError(f'Unknown country identifier: {country}') from None + raise LookupError( + f"Unknown country identifier: {country}" + ) from None iso = getattr(match, representation) if representation == "numeric": iso = int(iso) iso_list.append(iso) return iso_list[0] if return_single else iso_list + def country_iso_alpha2numeric(iso_alpha): """Deprecated: Use `country_to_iso` with `representation="numeric"` instead""" - LOGGER.warning("country_iso_alpha2numeric is deprecated, use country_to_iso instead.") + LOGGER.warning( + "country_iso_alpha2numeric is deprecated, use country_to_iso instead." + ) return country_to_iso(iso_alpha, "numeric") + def country_natid2iso(natids, representation="alpha3"): """Convert internal NatIDs to ISO 3166-1 alpha-3 codes @@ -1386,12 +1516,13 @@ def country_natid2iso(natids, representation="alpha3"): iso_list = [] for natid in natids: if natid < 0 or natid >= len(ISIMIP_NATID_TO_ISO): - raise LookupError('Unknown country NatID: %s' % natid) + raise LookupError("Unknown country NatID: %s" % natid) iso_list.append(ISIMIP_NATID_TO_ISO[natid]) if representation != "alpha3": iso_list = country_to_iso(iso_list, representation) return iso_list[0] if return_str else iso_list + def country_iso2natid(isos): """Convert ISO 3166-1 alpha-3 codes to internal NatIDs @@ -1412,9 +1543,10 @@ def country_iso2natid(isos): try: natids.append(ISIMIP_NATID_TO_ISO.index(iso)) except ValueError as ver: - raise LookupError(f'Unknown country ISO: {iso}') from ver + raise LookupError(f"Unknown country ISO: {iso}") from ver return natids[0] if return_int else natids + def natearth_country_to_int(country): """Integer representation (ISO 3166, if possible) of Natural Earth GeoPandas country row @@ -1428,10 +1560,11 @@ def natearth_country_to_int(country): iso_numeric : int Integer representation of given country. """ - if country.ISO_N3 != '-99': + if country.ISO_N3 != "-99": return int(country.ISO_N3) return country_to_iso(str(country.NAME), representation="numeric") + def get_country_code(lat, lon, gridded=False): """Provide numeric (ISO 3166) code for every point. @@ -1455,40 +1588,47 @@ def get_country_code(lat, lon, gridded=False): lat, lon = [np.asarray(ar).ravel() for ar in [lat, lon]] if lat.size == 0: return np.empty((0,), dtype=int) - LOGGER.info('Setting region_id %s points.', str(lat.size)) + LOGGER.info("Setting region_id %s points.", str(lat.size)) if gridded: base_file = u_hdf5.read(NATEARTH_CENTROIDS[150]) - meta, region_id = base_file['meta'], base_file['region_id'] - transform = rasterio.Affine(*meta['transform']) - region_id = region_id.reshape(meta['height'][0], meta['width'][0]) - region_id = interp_raster_data(region_id, lat, lon, transform, - method='nearest', fill_value=0) + meta, region_id = base_file["meta"], base_file["region_id"] + transform = rasterio.Affine(*meta["transform"]) + region_id = region_id.reshape(meta["height"][0], meta["width"][0]) + region_id = interp_raster_data( + region_id, lat, lon, transform, method="nearest", fill_value=0 + ) region_id = region_id.astype(int) else: (lon_min, lat_min, lon_max, lat_max) = latlon_bounds(lat, lon, 0.001) countries = get_country_geometries( - extent=(lon_min, lon_max, lat_min, lat_max), center_crs=False) + extent=(lon_min, lon_max, lat_min, lat_max), center_crs=False + ) with warnings.catch_warnings(): # in order to suppress the following # UserWarning: Geometry is in a geographic CRS. Results from 'area' are likely # incorrect. Use 'GeoSeries.to_crs()' to re-project geometries to a projected CRS # before this operation. - warnings.simplefilter('ignore', UserWarning) - countries['area'] = countries.geometry.area - countries = countries.sort_values(by=['area'], ascending=False) + warnings.simplefilter("ignore", UserWarning) + countries["area"] = countries.geometry.area + countries = countries.sort_values(by=["area"], ascending=False) region_id = np.full((lon.size,), -1, dtype=int) total_land = countries.geometry.unary_union - ocean_mask = (region_id.all() if total_land is None - else ~shapely.vectorized.contains(total_land, lon, lat)) + ocean_mask = ( + region_id.all() + if total_land is None + else ~shapely.vectorized.contains(total_land, lon, lat) + ) region_id[ocean_mask] = 0 for country in countries.itertuples(): unset = (region_id == -1).nonzero()[0] - select = shapely.vectorized.contains(country.geometry, - lon[unset], lat[unset]) + select = shapely.vectorized.contains( + country.geometry, lon[unset], lat[unset] + ) region_id[unset[select]] = natearth_country_to_int(country) region_id[region_id == -1] = 0 return region_id + def get_admin1_info(country_names): """Provide Natural Earth registry info and shape files for admin1 regions @@ -1506,6 +1646,7 @@ def get_admin1_info(country_names): admin1_shapes : dict Shape according to Natural Earth. """ + def _ensure_utf8(val): # Without the `*.cpg` file present, the shape reader wrongly assumes latin-1 encoding: # https://github.com/SciTools/cartopy/issues/1282 @@ -1513,7 +1654,7 @@ def _ensure_utf8(val): # As a workaround, we encode and decode again, unless this fails which means # that the `*.cpg` is present and the encoding is correct: try: - return val.encode('latin-1').decode('utf-8') + return val.encode("latin-1").decode("utf-8") except (AttributeError, UnicodeDecodeError, UnicodeEncodeError): return val @@ -1522,29 +1663,30 @@ def _ensure_utf8(val): if not isinstance(country_names, list): LOGGER.error("country_names needs to be of type list, str, int or float") raise TypeError("Invalid type for input parameter 'country_names'") - admin1_file = shapereader.natural_earth(resolution='10m', - category='cultural', - name='admin_1_states_provinces') + admin1_file = shapereader.natural_earth( + resolution="10m", category="cultural", name="admin_1_states_provinces" + ) admin1_recs = shapereader.Reader(admin1_file) admin1_info = dict() admin1_shapes = dict() for country in country_names: if isinstance(country, (int, float)): # transform numerric code to str - country = f'{int(country):03d}' + country = f"{int(country):03d}" # get alpha-3 code according to ISO 3166 country = pycountry.countries.lookup(country).alpha_3 admin1_info[country] = list() admin1_shapes[country] = list() for rec in admin1_recs.records(): - if rec.attributes['adm0_a3'] == country: + if rec.attributes["adm0_a3"] == country: rec_attributes = {k: _ensure_utf8(v) for k, v in rec.attributes.items()} admin1_info[country].append(rec_attributes) admin1_shapes[country].append(rec.geometry) if len(admin1_info[country]) == 0: - raise LookupError(f'natural_earth records are empty for country {country}') + raise LookupError(f"natural_earth records are empty for country {country}") return admin1_info, admin1_shapes + def get_admin1_geometries(countries): """ return geometries, names and codes of admin 1 regions in given countries @@ -1575,26 +1717,31 @@ def get_admin1_geometries(countries): """ # init empty GeoDataFrame: gdf = gpd.GeoDataFrame( - columns = ("admin1_name", "iso_3166_2", "geometry", "iso_3n", "iso_3a")) + columns=("admin1_name", "iso_3166_2", "geometry", "iso_3n", "iso_3a") + ) # extract admin 1 infos and shapes for each country: admin1_info, admin1_shapes = get_admin1_info(countries) for country in admin1_info: # fill admin 1 region names and codes to GDF for single country: gdf_tmp = gpd.GeoDataFrame(columns=gdf.columns) - gdf_tmp['admin1_name'] = [record['name'] for record in admin1_info[country]] - gdf_tmp['iso_3166_2'] = [record['iso_3166_2'] for record in admin1_info[country]] + gdf_tmp["admin1_name"] = [record["name"] for record in admin1_info[country]] + gdf_tmp["iso_3166_2"] = [ + record["iso_3166_2"] for record in admin1_info[country] + ] # With this initiation of GeoSeries in a list comprehension, # the ability of geopandas to convert shapereader.Shape to (Multi)Polygon is exploited: - geoseries = gpd.GeoSeries([gpd.GeoSeries(shape).values[0] - for shape in admin1_shapes[country]]) + geoseries = gpd.GeoSeries( + [gpd.GeoSeries(shape).values[0] for shape in admin1_shapes[country]] + ) gdf_tmp.geometry = list(geoseries) # fill columns with country identifiers (admin 0): - gdf_tmp['iso_3n'] = pycountry.countries.lookup(country).numeric - gdf_tmp['iso_3a'] = country + gdf_tmp["iso_3n"] = pycountry.countries.lookup(country).numeric + gdf_tmp["iso_3a"] = country gdf = pd.concat([gdf, gdf_tmp], ignore_index=True) return gdf + def get_resolution_1d(coords, min_resol=1.0e-8): """Compute resolution of scalar grid @@ -1668,6 +1815,7 @@ def pts_to_raster_meta(points_bounds, res): ras_trans = rasterio.Affine.translation(*origin) * rasterio.Affine.scale(*res) return int(nsteps[1]), int(nsteps[0]), ras_trans + def raster_to_meshgrid(transform, width, height): """Get coordinates of grid points in raster @@ -1690,8 +1838,9 @@ def raster_to_meshgrid(transform, width, height): xres, _, xmin, _, yres, ymin = transform[:6] xmax = xmin + width * xres ymax = ymin + height * yres - return np.meshgrid(np.arange(xmin + xres / 2, xmax, xres), - np.arange(ymin + yres / 2, ymax, yres)) + return np.meshgrid( + np.arange(xmin + xres / 2, xmax, xres), np.arange(ymin + yres / 2, ymax, yres) + ) def to_crs_user_input(crs_obj): @@ -1717,15 +1866,18 @@ def to_crs_user_input(crs_obj): ValueError if type(crs_obj) has the wrong type """ + def _is_deprecated_init_crs(crs_dict): - return (isinstance(crs_dict, dict) - and "init" in crs_dict - and all(k in ["init", "no_defs"] for k in crs_dict.keys()) - and crs_dict.get("no_defs", True) is True) + return ( + isinstance(crs_dict, dict) + and "init" in crs_dict + and all(k in ["init", "no_defs"] for k in crs_dict.keys()) + and crs_dict.get("no_defs", True) is True + ) if isinstance(crs_obj, (dict, int)): if _is_deprecated_init_crs(crs_obj): - return crs_obj['init'] + return crs_obj["init"] return crs_obj crs_string = crs_obj.decode() if isinstance(crs_obj, bytes) else crs_obj @@ -1733,10 +1885,10 @@ def _is_deprecated_init_crs(crs_dict): if not isinstance(crs_string, str): raise ValueError(f"crs has unhandled data set type: {type(crs_string)}") - if crs_string[0] == '{': + if crs_string[0] == "{": crs_dict = ast.literal_eval(crs_string) if _is_deprecated_init_crs(crs_dict): - return crs_dict['init'] + return crs_dict["init"] return crs_dict return crs_string @@ -1759,10 +1911,21 @@ def equal_crs(crs_one, crs_two): """ if crs_one is None: return crs_two is None - return rasterio.crs.CRS.from_user_input(crs_one) == rasterio.crs.CRS.from_user_input(crs_two) - -def _read_raster_reproject(src, src_crs, dst_meta, band=None, geometry=None, dst_crs=None, - transform=None, resampling="nearest"): + return rasterio.crs.CRS.from_user_input( + crs_one + ) == rasterio.crs.CRS.from_user_input(crs_two) + + +def _read_raster_reproject( + src, + src_crs, + dst_meta, + band=None, + geometry=None, + dst_crs=None, + transform=None, + resampling="nearest", +): """Helper function for `read_raster`.""" if isinstance(resampling, str): resampling = getattr(rasterio.warp.Resampling, resampling) @@ -1772,19 +1935,22 @@ def _read_raster_reproject(src, src_crs, dst_meta, band=None, geometry=None, dst dst_crs = src_crs if not transform: transform, width, height = rasterio.warp.calculate_default_transform( - src_crs, dst_crs, src.width, src.height, *src.bounds) + src_crs, dst_crs, src.width, src.height, *src.bounds + ) else: transform, width, height = transform - dst_meta.update({ - 'crs': dst_crs, - 'transform': transform, - 'width': width, - 'height': height, - }) + dst_meta.update( + { + "crs": dst_crs, + "transform": transform, + "width": width, + "height": height, + } + ) kwargs = {} - if src.meta['nodata']: - kwargs['src_nodata'] = src.meta['nodata'] - kwargs['dst_nodata'] = src.meta['nodata'] + if src.meta["nodata"]: + kwargs["src_nodata"] = src.meta["nodata"] + kwargs["dst_nodata"] = src.meta["nodata"] intensity = np.zeros((len(band), height, width)) for idx_band, i_band in enumerate(band): @@ -1796,40 +1962,46 @@ def _read_raster_reproject(src, src_crs, dst_meta, band=None, geometry=None, dst dst_transform=transform, dst_crs=dst_crs, resampling=resampling, - **kwargs) + **kwargs, + ) - if dst_meta['nodata'] and np.isnan(dst_meta['nodata']): + if dst_meta["nodata"] and np.isnan(dst_meta["nodata"]): nodata_mask = np.isnan(intensity[idx_band, :]) else: - nodata_mask = (intensity[idx_band, :] == dst_meta['nodata']) + nodata_mask = intensity[idx_band, :] == dst_meta["nodata"] intensity[idx_band, :][nodata_mask] = 0 if geometry: - intensity = intensity.astype('float32') + intensity = intensity.astype("float32") # update driver to GTiff as netcdf does not work reliably - dst_meta.update(driver='GTiff') + dst_meta.update(driver="GTiff") with rasterio.MemoryFile() as memfile: with memfile.open(**dst_meta) as dst: dst.write(intensity) with memfile.open() as dst: - inten, mask_trans = rasterio.mask.mask(dst, geometry, crop=True, indexes=band) - dst_meta.update({ - "height": inten.shape[1], - "width": inten.shape[2], - "transform": mask_trans, - }) + inten, mask_trans = rasterio.mask.mask( + dst, geometry, crop=True, indexes=band + ) + dst_meta.update( + { + "height": inten.shape[1], + "width": inten.shape[2], + "transform": mask_trans, + } + ) intensity = inten[range(len(band)), :] - intensity = intensity.astype('float64') + intensity = intensity.astype("float64") # reset nodata values again as driver Gtiff resets them again - if dst_meta['nodata'] and np.isnan(dst_meta['nodata']): + if dst_meta["nodata"] and np.isnan(dst_meta["nodata"]): intensity[np.isnan(intensity)] = 0 else: - intensity[intensity == dst_meta['nodata']] = 0 + intensity[intensity == dst_meta["nodata"]] = 0 return intensity + def _add_gdal_vsi_prefix(path): """Add one of GDAL's virtual file system prefixes if applicable @@ -1863,8 +2035,19 @@ def _add_gdal_vsi_prefix(path): path = f"/vsi{supported_suffixes[suffix]}/{path}" return str(path) -def read_raster(file_name, band=None, src_crs=None, window=None, geometry=None, - dst_crs=None, transform=None, width=None, height=None, resampling="nearest"): + +def read_raster( + file_name, + band=None, + src_crs=None, + window=None, + geometry=None, + dst_crs=None, + transform=None, + width=None, + height=None, + resampling="nearest", +): """Read raster of bands and set 0-values to the masked ones. Parameters @@ -1900,29 +2083,38 @@ def read_raster(file_name, band=None, src_crs=None, window=None, geometry=None, """ if not band: band = [1] - LOGGER.info('Reading %s', file_name) + LOGGER.info("Reading %s", file_name) with rasterio.Env(): - with rasterio.open(_add_gdal_vsi_prefix(file_name), 'r') as src: + with rasterio.open(_add_gdal_vsi_prefix(file_name), "r") as src: dst_meta = src.meta.copy() if dst_crs or transform: - LOGGER.debug('Reprojecting ...') + LOGGER.debug("Reprojecting ...") src_crs = src.crs if src_crs is None else src_crs if not src_crs: src_crs = rasterio.crs.CRS.from_user_input(DEF_CRS) transform = (transform, width, height) if transform else None - inten = _read_raster_reproject(src, src_crs, dst_meta, band=band, - geometry=geometry, dst_crs=dst_crs, - transform=transform, resampling=resampling) + inten = _read_raster_reproject( + src, + src_crs, + dst_meta, + band=band, + geometry=geometry, + dst_crs=dst_crs, + transform=transform, + resampling=resampling, + ) else: if geometry: - inten, trans = rasterio.mask.mask(src, geometry, crop=True, indexes=band) - if dst_meta['nodata'] and np.isnan(dst_meta['nodata']): + inten, trans = rasterio.mask.mask( + src, geometry, crop=True, indexes=band + ) + if dst_meta["nodata"] and np.isnan(dst_meta["nodata"]): inten[np.isnan(inten)] = 0 else: - inten[inten == dst_meta['nodata']] = 0 + inten[inten == dst_meta["nodata"]] = 0 else: masked_array = src.read(band, window=window, masked=True) @@ -1932,24 +2124,34 @@ def read_raster(file_name, band=None, src_crs=None, window=None, geometry=None, if window: trans = rasterio.windows.transform(window, src.transform) else: - trans = dst_meta['transform'] + trans = dst_meta["transform"] - dst_meta.update({ - "height": inten.shape[1], - "width": inten.shape[2], - "transform": trans, - }) + dst_meta.update( + { + "height": inten.shape[1], + "width": inten.shape[2], + "transform": trans, + } + ) - if not dst_meta['crs']: - dst_meta['crs'] = rasterio.crs.CRS.from_user_input(DEF_CRS) + if not dst_meta["crs"]: + dst_meta["crs"] = rasterio.crs.CRS.from_user_input(DEF_CRS) intensity = inten[range(len(band)), :] - dst_shape = (len(band), dst_meta['height'] * dst_meta['width']) + dst_shape = (len(band), dst_meta["height"] * dst_meta["width"]) return dst_meta, intensity.reshape(dst_shape) -def read_raster_bounds(path, bounds, res=None, bands=None, resampling="nearest", - global_origin=None, pad_cells=1.0): + +def read_raster_bounds( + path, + bounds, + res=None, + bands=None, + resampling="nearest", + global_origin=None, + pad_cells=1.0, +): """Read raster file within given bounds at given resolution By default, not only the grid cells of the destination raster whose cell centers fall within @@ -1996,7 +2198,7 @@ def read_raster_bounds(path, bounds, res=None, bands=None, resampling="nearest", resampling = getattr(rasterio.warp.Resampling, resampling) if not bands: bands = [1] - with rasterio.open(_add_gdal_vsi_prefix(path), 'r') as src: + with rasterio.open(_add_gdal_vsi_prefix(path), "r") as src: if res: if not isinstance(res, tuple): res = (res, res) @@ -2005,8 +2207,12 @@ def read_raster_bounds(path, bounds, res=None, bands=None, resampling="nearest", res = (np.abs(res[0]), np.abs(res[1])) # make sure that the extent of pixel centers covers the specified region - bounds = (bounds[0] - pad_cells * res[0], bounds[1] - pad_cells * res[1], - bounds[2] + pad_cells * res[0], bounds[3] + pad_cells * res[1]) + bounds = ( + bounds[0] - pad_cells * res[0], + bounds[1] - pad_cells * res[1], + bounds[2] + pad_cells * res[0], + bounds[3] + pad_cells * res[1], + ) if src.crs is not None and src.crs.to_epsg() == 4326: # We consider WGS84 (EPSG:4326) as a special case just because it's so common. @@ -2015,9 +2221,13 @@ def read_raster_bounds(path, bounds, res=None, bands=None, resampling="nearest", if global_origin is None: global_origin = (src.transform[2], src.transform[5]) - res_signed = (np.sign(src.transform[0]) * res[0], np.sign(src.transform[4]) * res[1]) + res_signed = ( + np.sign(src.transform[0]) * res[0], + np.sign(src.transform[4]) * res[1], + ) global_transform = rasterio.transform.from_origin( - *global_origin, res_signed[0], -res_signed[1]) + *global_origin, res_signed[0], -res_signed[1] + ) transform, shape = subraster_from_bounds(global_transform, bounds) data = np.zeros((len(bands),) + shape, dtype=src.dtypes[0]) @@ -2030,9 +2240,11 @@ def read_raster_bounds(path, bounds, res=None, bands=None, resampling="nearest", src_crs=src.crs, dst_transform=transform, dst_crs=crs, - resampling=resampling) + resampling=resampling, + ) return data, transform + def _raster_gradient(data, transform, latlon_to_m=False): """Compute the gradient of raster data using finite differences @@ -2059,7 +2271,7 @@ def _raster_gradient(data, transform, latlon_to_m=False): Affine transformation defining the output raster. """ xres, _, _, _, yres = transform[:5] - gradient_transform = rasterio.Affine.translation(0.5 * xres, 0.5 * yres) * transform + gradient_transform = rasterio.Affine.translation(0.5 * xres, 0.5 * yres) * transform if latlon_to_m: height, width = [s - 1 for s in data.shape] @@ -2075,6 +2287,7 @@ def _raster_gradient(data, transform, latlon_to_m=False): return gradient_data, gradient_transform + def _prepare_raster_sample(path, lat, lon, intermediate_res, fill_value): """Helper function for the sampling of points from a raster file. @@ -2105,16 +2318,18 @@ def _prepare_raster_sample(path, lat, lon, intermediate_res, fill_value): crs : CRS The CRS of the raster file. """ - LOGGER.info('Sampling from %s', path) + LOGGER.info("Sampling from %s", path) with rasterio.open(_add_gdal_vsi_prefix(path), "r") as src: if intermediate_res is None: intermediate_res = (np.abs(src.transform[0]), np.abs(src.transform[4])) - meta_nodata = src.meta['nodata'] + meta_nodata = src.meta["nodata"] crs = src.crs bounds = (lon.min(), lat.min(), lon.max(), lat.max()) - data, transform = read_raster_bounds(path, bounds, res=intermediate_res, pad_cells=2) + data, transform = read_raster_bounds( + path, bounds, res=intermediate_res, pad_cells=2 + ) data = data[0, :, :] if fill_value is not None: @@ -2125,7 +2340,10 @@ def _prepare_raster_sample(path, lat, lon, intermediate_res, fill_value): return data, transform, fill_value, crs -def read_raster_sample(path, lat, lon, intermediate_res=None, method='linear', fill_value=None): + +def read_raster_sample( + path, lat, lon, intermediate_res=None, method="linear", fill_value=None +): """Read point samples from raster file. Parameters @@ -2153,13 +2371,17 @@ def read_raster_sample(path, lat, lon, intermediate_res=None, method='linear', f return np.zeros_like(lat) data, transform, fill_value, _ = _prepare_raster_sample( - path, lat, lon, intermediate_res, fill_value) + path, lat, lon, intermediate_res, fill_value + ) return interp_raster_data( - data, lat, lon, transform, method=method, fill_value=fill_value) + data, lat, lon, transform, method=method, fill_value=fill_value + ) + -def read_raster_sample_with_gradients(path, lat, lon, intermediate_res=None, - method=('linear', 'nearest'), fill_value=None): +def read_raster_sample_with_gradients( + path, lat, lon, intermediate_res=None, method=("linear", "nearest"), fill_value=None +): """Read point samples with computed gradients from raster file. For convenience, and because this is the most common use case, the step sizes in the gradient @@ -2205,19 +2427,25 @@ def read_raster_sample_with_gradients(path, lat, lon, intermediate_res=None, method = (method, method) data, transform, fill_value, crs = _prepare_raster_sample( - path, lat, lon, intermediate_res, fill_value) + path, lat, lon, intermediate_res, fill_value + ) interp_data = interp_raster_data( - data, lat, lon, transform, method=method[0], fill_value=fill_value) + data, lat, lon, transform, method=method[0], fill_value=fill_value + ) is_latlon = crs is not None and crs.to_epsg() == 4326 grad_data, grad_transform = _raster_gradient(data, transform, latlon_to_m=is_latlon) interp_grad = interp_raster_data( - grad_data, lat, lon, grad_transform, method=method[1], fill_value=fill_value) + grad_data, lat, lon, grad_transform, method=method[1], fill_value=fill_value + ) return interp_data, interp_grad -def interp_raster_data(data, interp_y, interp_x, transform, method='linear', fill_value=0): + +def interp_raster_data( + data, interp_y, interp_x, transform, method="linear", fill_value=0 +): """Interpolate raster data, given as array and affine transform Parameters @@ -2247,7 +2475,9 @@ def interp_raster_data(data, interp_y, interp_x, transform, method='linear', fil xres, _, xmin, _, yres, ymin = transform[:6] xmax = xmin + data.shape[1] * xres ymax = ymin + data.shape[0] * yres - data = np.pad(data, [(1, 1) if i < 2 else (0, 0) for i in range(data.ndim)], mode='edge') + data = np.pad( + data, [(1, 1) if i < 2 else (0, 0) for i in range(data.ndim)], mode="edge" + ) if yres < 0: yres = -yres @@ -2262,10 +2492,17 @@ def interp_raster_data(data, interp_y, interp_x, transform, method='linear', fil data = np.array(data, dtype=np.float64) data[np.isnan(data)] = fill_value - return scipy.interpolate.interpn((y_dim, x_dim), data, np.vstack([interp_y, interp_x]).T, - method=method, bounds_error=False, fill_value=fill_value) + return scipy.interpolate.interpn( + (y_dim, x_dim), + data, + np.vstack([interp_y, interp_x]).T, + method=method, + bounds_error=False, + fill_value=fill_value, + ) + -def refine_raster_data(data, transform, res, method='linear', fill_value=0): +def refine_raster_data(data, transform, res, method="linear", fill_value=0): """Refine raster data, given as array and affine transform Parameters @@ -2297,11 +2534,13 @@ def refine_raster_data(data, transform, res, method='linear', fill_value=0): new_shape = (new_dimy.size, new_dimx.size) new_x, new_y = [ar.ravel() for ar in np.meshgrid(new_dimx, new_dimy)] new_transform = rasterio.Affine(res[0], 0, xmin, 0, res[1], ymin) - new_data = interp_raster_data(data, new_y, new_x, transform, method=method, - fill_value=fill_value) + new_data = interp_raster_data( + data, new_y, new_x, transform, method=method, fill_value=fill_value + ) new_data = new_data.reshape(new_shape) return new_data, new_transform + def read_vector(file_name, field_name, dst_crs=None): """Read vector file format supported by fiona. @@ -2325,7 +2564,7 @@ def read_vector(file_name, field_name, dst_crs=None): value : np.array Values associated to each shape. """ - LOGGER.info('Reading %s', file_name) + LOGGER.info("Reading %s", file_name) data_frame = gpd.read_file(file_name) if not data_frame.crs: data_frame.crs = DEF_CRS @@ -2339,6 +2578,7 @@ def read_vector(file_name, field_name, dst_crs=None): value[i_inten, :] = data_frame[inten].values return lat, lon, geometry, value + def write_raster(file_name, data_matrix, meta, dtype=np.float32): """Write raster in GeoTiff format. @@ -2355,20 +2595,22 @@ def write_raster(file_name, data_matrix, meta, dtype=np.float32): dtype : numpy dtype, optional A numpy dtype. Default: np.float32 """ - LOGGER.info('Writting %s', file_name) - if data_matrix.shape != (meta['height'], meta['width']): + LOGGER.info("Writting %s", file_name) + if data_matrix.shape != (meta["height"], meta["width"]): # every row is an event (from hazard intensity or fraction) == band - shape = (data_matrix.shape[0], meta['height'], meta['width']) + shape = (data_matrix.shape[0], meta["height"], meta["width"]) else: - shape = (1, meta['height'], meta['width']) + shape = (1, meta["height"], meta["width"]) dst_meta = copy.deepcopy(meta) - dst_meta.update(driver='GTiff', dtype=dtype, count=shape[0]) + dst_meta.update(driver="GTiff", dtype=dtype, count=shape[0]) data_matrix = np.asarray(data_matrix, dtype=dtype).reshape(shape) - with rasterio.open(file_name, 'w', **dst_meta) as dst: + with rasterio.open(file_name, "w", **dst_meta) as dst: dst.write(data_matrix, indexes=np.arange(1, shape[0] + 1)) -def points_to_raster(points_df, val_names=None, res=0.0, raster_res=0.0, crs=DEF_CRS, - scheduler=None): + +def points_to_raster( + points_df, val_names=None, res=0.0, raster_res=0.0, crs=DEF_CRS, scheduler=None +): """Compute raster (as data and transform) from GeoDataFrame. Parameters @@ -2399,52 +2641,65 @@ def points_to_raster(points_df, val_names=None, res=0.0, raster_res=0.0, crs=DEF Dictionary with 'crs', 'height', 'width' and 'transform' attributes. """ if not val_names: - val_names = ['value'] + val_names = ["value"] if not res: - res = np.abs(get_resolution(points_df['latitude'].values, - points_df['longitude'].values)).min() + res = np.abs( + get_resolution(points_df["latitude"].values, points_df["longitude"].values) + ).min() if not raster_res: raster_res = res def apply_box(df_exp): - fun = lambda r: Point(r['longitude'], r['latitude']).buffer(res / 2).envelope + fun = lambda r: Point(r["longitude"], r["latitude"]).buffer(res / 2).envelope return df_exp.apply(fun, axis=1) - LOGGER.info('Raster from resolution %s to %s.', res, raster_res) + LOGGER.info("Raster from resolution %s to %s.", res, raster_res) df_poly = gpd.GeoDataFrame(points_df[val_names]) if not scheduler: - df_poly['_-geometry-prov'] = apply_box(points_df) + df_poly["_-geometry-prov"] = apply_box(points_df) else: - ddata = dd.from_pandas(points_df[['latitude', 'longitude']], - npartitions=cpu_count()) - df_poly['_-geometry-prov'] = ddata.map_partitions( - apply_box).compute(scheduler=scheduler) + ddata = dd.from_pandas( + points_df[["latitude", "longitude"]], npartitions=cpu_count() + ) + df_poly["_-geometry-prov"] = ddata.map_partitions(apply_box).compute( + scheduler=scheduler + ) # depending on the dask/pandas version setting `meta=Polygon` in map_partitions # would just raise a warning and returns a string, so we have to convert explicitly - if isinstance(df_poly.loc[0, '_-geometry-prov'], str): # fails for empty `points_df` - df_poly['_-geometry-prov'] = shapely.wkt.loads(df_poly['_-geometry-prov']) - - df_poly.set_geometry('_-geometry-prov', - crs=crs if crs else points_df.crs if points_df.crs else DEF_CRS, - inplace=True, - drop=True) + if isinstance( + df_poly.loc[0, "_-geometry-prov"], str + ): # fails for empty `points_df` + df_poly["_-geometry-prov"] = shapely.wkt.loads(df_poly["_-geometry-prov"]) + + df_poly.set_geometry( + "_-geometry-prov", + crs=crs if crs else points_df.crs if points_df.crs else DEF_CRS, + inplace=True, + drop=True, + ) # renormalize longitude if necessary if equal_crs(df_poly.crs, DEF_CRS): - xmin, ymin, xmax, ymax = latlon_bounds(points_df['latitude'].values, - points_df['longitude'].values) + xmin, ymin, xmax, ymax = latlon_bounds( + points_df["latitude"].values, points_df["longitude"].values + ) x_mid = 0.5 * (xmin + xmax) # we don't really change the CRS when rewrapping, so we reset the CRS attribute afterwards - df_poly = df_poly \ - .to_crs({"proj": "longlat", "lon_wrap": x_mid}) \ - .set_crs(DEF_CRS, allow_override=True) + df_poly = df_poly.to_crs({"proj": "longlat", "lon_wrap": x_mid}).set_crs( + DEF_CRS, allow_override=True + ) else: - xmin, ymin, xmax, ymax = (points_df['longitude'].min(), points_df['latitude'].min(), - points_df['longitude'].max(), points_df['latitude'].max()) + xmin, ymin, xmax, ymax = ( + points_df["longitude"].min(), + points_df["latitude"].min(), + points_df["longitude"].max(), + points_df["latitude"].max(), + ) # construct raster - rows, cols, ras_trans = pts_to_raster_meta((xmin, ymin, xmax, ymax), - (raster_res, -raster_res)) + rows, cols, ras_trans = pts_to_raster_meta( + (xmin, ymin, xmax, ymax), (raster_res, -raster_res) + ) raster_out = np.zeros((len(val_names), rows, cols)) # TODO: parallel rasterize @@ -2455,16 +2710,18 @@ def apply_box(df_exp): transform=ras_trans, fill=0, all_touched=True, - dtype=rasterio.float32) + dtype=rasterio.float32, + ) meta = { - 'crs': df_poly.crs, - 'height': rows, - 'width': cols, - 'transform': ras_trans, + "crs": df_poly.crs, + "height": rows, + "width": cols, + "transform": ras_trans, } return raster_out, meta + def subraster_from_bounds(transform, bounds): """Compute a subraster definition from a given reference transform and bounds. @@ -2494,16 +2751,30 @@ def subraster_from_bounds(transform, bounds): # align the window bounds to the raster by rounding col_min, col_max = np.round(window.col_off), np.round(window.col_off + window.width) - row_min, row_max = np.round(window.row_off), np.round(window.row_off + window.height) - window = rasterio.windows.Window(col_min, row_min, col_max - col_min, row_max - row_min) + row_min, row_max = np.round(window.row_off), np.round( + window.row_off + window.height + ) + window = rasterio.windows.Window( + col_min, row_min, col_max - col_min, row_max - row_min + ) dst_transform = rasterio.windows.transform(window, transform) dst_shape = (int(window.height), int(window.width)) return dst_transform, dst_shape -def align_raster_data(source, src_crs, src_transform, dst_crs=None, dst_resolution=None, - dst_bounds=None, global_origin=(-180, 90), resampling="nearest", - conserve=None, **kwargs): + +def align_raster_data( + source, + src_crs, + src_transform, + dst_crs=None, + dst_resolution=None, + dst_bounds=None, + global_origin=(-180, 90), + resampling="nearest", + conserve=None, + **kwargs, +): """Reproject 2D np.ndarray to be aligned to a reference grid. This function ensures that reprojected data with the same dst_resolution and global_origins are @@ -2577,14 +2848,16 @@ def align_raster_data(source, src_crs, src_transform, dst_crs=None, dst_resoluti destination = np.zeros(dst_shape, dtype=source.dtype) try: - rasterio.warp.reproject(source=source, - destination=destination, - src_transform=src_transform, - src_crs=src_crs, - dst_transform=dst_transform, - dst_crs=dst_crs, - resampling=resampling, - **kwargs) + rasterio.warp.reproject( + source=source, + destination=destination, + src_transform=src_transform, + src_crs=src_crs, + dst_transform=dst_transform, + dst_crs=dst_crs, + resampling=resampling, + **kwargs, + ) except Exception as raster_exc: # rasterio doesn't expose all of their error classes # in particular: rasterio._err.CPLE_AppDefinedError @@ -2592,16 +2865,17 @@ def align_raster_data(source, src_crs, src_transform, dst_crs=None, dst_resoluti # e.g. in litpop._get_litpop_single_polygon raise ValueError(raster_exc) from raster_exc - if conserve == 'mean': + if conserve == "mean": destination *= source.mean() / destination.mean() - elif conserve == 'sum': + elif conserve == "sum": destination *= source.sum() / destination.sum() - elif conserve == 'norm': + elif conserve == "norm": destination *= 1.0 / destination.sum() elif conserve is not None: raise ValueError(f"Invalid value for conserve: {conserve}") return destination, dst_transform + def mask_raster_with_geometry(raster, transform, shapes, nodata=None, **kwargs): """ Change values in `raster` that are outside of given `shapes` to `nodata`. @@ -2633,7 +2907,7 @@ def mask_raster_with_geometry(raster, transform, shapes, nodata=None, **kwargs): """ with rasterio.io.MemoryFile() as memfile: with memfile.open( - driver='GTiff', + driver="GTiff", height=raster.shape[0], width=raster.shape[1], count=1, @@ -2645,6 +2919,7 @@ def mask_raster_with_geometry(raster, transform, shapes, nodata=None, **kwargs): output, _ = rasterio.mask.mask(dataset, shapes, nodata=nodata, **kwargs) return output.squeeze(0) + def set_df_geometry_points(df_val, scheduler=None, crs=None): """Set given geometry to given dataframe using dask if scheduler. @@ -2660,16 +2935,22 @@ def set_df_geometry_points(df_val, scheduler=None, crs=None): crs : object (anything readable by pyproj4.CRS.from_user_input), optional Coordinate Reference System, if omitted or None: df_val.geometry.crs """ - LOGGER.info('Setting geometry points.') + LOGGER.info("Setting geometry points.") if scheduler is not None: - warnings.warn("This function does not use dask features anymore. The parameter has no" - " effect and will be removed in a future version.", DeprecationWarning) + warnings.warn( + "This function does not use dask features anymore. The parameter has no" + " effect and will be removed in a future version.", + DeprecationWarning, + ) # keep the original crs if any - crs = df_val['crs'] if crs is None else crs # crs might now still be None + crs = df_val["crs"] if crs is None else crs # crs might now still be None - df_val.set_geometry(gpd.points_from_xy(df_val['longitude'], df_val['latitude']), - inplace=True, crs=crs) + df_val.set_geometry( + gpd.points_from_xy(df_val["longitude"], df_val["latitude"]), + inplace=True, + crs=crs, + ) def fao_code_def(): @@ -2685,8 +2966,8 @@ def fao_code_def(): # FAO_FILE2: contains FAO country codes and correstponding ISO3 Code # (http://www.fao.org/faostat/en/#definitions) fao_file = pd.read_csv(SYSTEM_DIR.joinpath("FAOSTAT_data_country_codes.csv")) - fao_code = getattr(fao_file, 'Country Code').values - fao_iso = (getattr(fao_file, 'ISO3 Code').values).tolist() + fao_code = getattr(fao_file, "Country Code").values + fao_iso = (getattr(fao_file, "ISO3 Code").values).tolist() # create a list of ISO3 codes and corresponding fao country codes iso_list = list() @@ -2698,6 +2979,7 @@ def fao_code_def(): return iso_list, faocode_list + def country_faocode2iso(input_fao): """Convert FAO country code to ISO numeric-3 codes. @@ -2724,6 +3006,7 @@ def country_faocode2iso(input_fao): return output_iso + def country_iso2faocode(input_iso): """Convert ISO numeric-3 codes to FAO country code. diff --git a/climada/util/dates_times.py b/climada/util/dates_times.py index 31de094a9..07882bf11 100644 --- a/climada/util/dates_times.py +++ b/climada/util/dates_times.py @@ -18,13 +18,16 @@ Define functions to handle dates and times in climada """ -import logging + import datetime as dt +import logging + import numpy as np import pandas as pd LOGGER = logging.getLogger(__name__) + def date_to_str(date): """Compute date string in ISO format from input datetime ordinal int. Parameters @@ -55,15 +58,16 @@ def str_to_date(date): int """ if isinstance(date, str): - year, mounth, day = (int(val) for val in date.split('-')) + year, mounth, day = (int(val) for val in date.split("-")) return dt.date(year, mounth, day).toordinal() all_date = [] for i_date in date: - year, mounth, day = (int(val) for val in i_date.split('-')) + year, mounth, day = (int(val) for val in i_date.split("-")) all_date.append(dt.date(year, mounth, day).toordinal()) return all_date + def datetime64_to_ordinal(datetime): """Converts from a numpy datetime64 object to an ordinal date. See https://stackoverflow.com/a/21916253 for the horrible details. @@ -81,6 +85,7 @@ def datetime64_to_ordinal(datetime): return [pd.to_datetime(i_dt.tolist()).toordinal() for i_dt in datetime] + def last_year(ordinal_vector): """Extract first year from ordinal date @@ -95,6 +100,7 @@ def last_year(ordinal_vector): """ return dt.date.fromordinal(np.max(ordinal_vector)).year + def first_year(ordinal_vector): """Extract first year from ordinal date diff --git a/climada/util/dwd_icon_loader.py b/climada/util/dwd_icon_loader.py index 8878f63e7..ae03712c8 100644 --- a/climada/util/dwd_icon_loader.py +++ b/climada/util/dwd_icon_loader.py @@ -23,16 +23,17 @@ """ __all__ = [ - 'download_icon_grib', - 'delete_icon_grib', - 'download_icon_centroids_file', + "download_icon_grib", + "delete_icon_grib", + "download_icon_centroids_file", ] -import logging -from pathlib import Path import bz2 import datetime as dt +import logging +from pathlib import Path + import numpy as np from climada.util.config import CONFIG @@ -41,12 +42,13 @@ LOGGER = logging.getLogger(__name__) - -def download_icon_grib(run_datetime, - model_name='icon-eu-eps', - parameter_name='vmax_10m', - max_lead_time=None, - download_dir=None): +def download_icon_grib( + run_datetime, + model_name="icon-eu-eps", + parameter_name="vmax_10m", + max_lead_time=None, + download_dir=None, +): """download the gribfiles of a weather forecast run for a certain weather parameter from opendata.dwd.de/weather/nwp/. @@ -76,16 +78,21 @@ def download_icon_grib(run_datetime, the input parameters """ - LOGGER.info('Downloading icon grib files of model %s for parameter %s with starting date %s.', - model_name, parameter_name, run_datetime.strftime('%Y%m%d%H')) - - url, file_name, lead_times = _create_icon_grib_name(run_datetime, - model_name, - parameter_name, - max_lead_time) - download_path = CONFIG.local_data.save_dir.dir() if download_dir is None else Path(download_dir) - - #download all files + LOGGER.info( + "Downloading icon grib files of model %s for parameter %s with starting date %s.", + model_name, + parameter_name, + run_datetime.strftime("%Y%m%d%H"), + ) + + url, file_name, lead_times = _create_icon_grib_name( + run_datetime, model_name, parameter_name, max_lead_time + ) + download_path = ( + CONFIG.local_data.save_dir.dir() if download_dir is None else Path(download_dir) + ) + + # download all files file_names = [] for lead_i in lead_times: file_name_i = file_name.format(lead_i=lead_i) @@ -94,29 +101,32 @@ def download_icon_grib(run_datetime, # download file if it does not exist already if not bz2_pathfile_i.exists(): try: - download_file(url + file_name_i, - download_dir=download_dir) + download_file(url + file_name_i, download_dir=download_dir) except Exception as err: err_msg = "" - if run_datetime > (dt.datetime.utcnow()-dt.timedelta(hours=6)): - err_msg += (f'Forecast file {file_name_i} might not yet be available ' - f'on {url}. Wait a few hours. ') - elif run_datetime < (dt.datetime.utcnow() - -dt.timedelta(hours=24)): - err_msg += (f'Forecast file {file_name_i} might no longer be available ' - f'on {url}. Files are only openly available for 24 hours. ') + if run_datetime > (dt.datetime.utcnow() - dt.timedelta(hours=6)): + err_msg += ( + f"Forecast file {file_name_i} might not yet be available " + f"on {url}. Wait a few hours. " + ) + elif run_datetime < (dt.datetime.utcnow() - dt.timedelta(hours=24)): + err_msg += ( + f"Forecast file {file_name_i} might no longer be available " + f"on {url}. Files are only openly available for 24 hours. " + ) err_msg += f"Error while downloading {url + file_name_i}: " raise type(err)(err_msg + str(err)) from err file_names.append(str(bz2_pathfile_i)) return file_names - -def delete_icon_grib(run_datetime, - model_name='icon-eu-eps', - parameter_name='vmax_10m', - max_lead_time=None, - download_dir=None): +def delete_icon_grib( + run_datetime, + model_name="icon-eu-eps", + parameter_name="vmax_10m", + max_lead_time=None, + download_dir=None, +): """delete the downloaded gribfiles of a weather forecast run for a certain weather parameter from opendata.dwd.de/weather/nwp/. @@ -139,26 +149,30 @@ def delete_icon_grib(run_datetime, are stored at the moment """ - _, file_name, lead_times = _create_icon_grib_name(run_datetime, - model_name, - parameter_name, - max_lead_time) - download_path = CONFIG.local_data.save_dir.dir() if download_dir is None else Path(download_dir) - #delete all files + _, file_name, lead_times = _create_icon_grib_name( + run_datetime, model_name, parameter_name, max_lead_time + ) + download_path = ( + CONFIG.local_data.save_dir.dir() if download_dir is None else Path(download_dir) + ) + # delete all files for lead_i in lead_times: file_name_i = file_name.format(lead_i=lead_i) full_path_name_i = download_path.absolute().joinpath(file_name_i) if full_path_name_i.exists(): full_path_name_i.unlink() else: - LOGGER.warning('File %s does not exist and could not be deleted.', - full_path_name_i) + LOGGER.warning( + "File %s does not exist and could not be deleted.", full_path_name_i + ) -def _create_icon_grib_name(run_datetime, - model_name='icon-eu-eps', - parameter_name='vmax_10m', - max_lead_time=None): +def _create_icon_grib_name( + run_datetime, + model_name="icon-eu-eps", + parameter_name="vmax_10m", + max_lead_time=None, +): """create all parameters to download or delete gribfiles of a weather forecast run for a certain weather parameter from opendata.dwd.de/weather/nwp/. @@ -189,70 +203,76 @@ def _create_icon_grib_name(run_datetime, in hours, which are available for download """ # define defaults of the url for each model and parameter combination - if (model_name == 'icon-eu-eps') & (parameter_name == 'vmax_10m'): - file_extension = '_europe_icosahedral_single-level_' - #this string completes the filename on the server - file_extension_2 = '' #this string completes the filename on the server - max_lead_time_default = 120 # maximum available data - lead_times = np.concatenate((np.arange(1, 49), - np.arange(51, 73, 3), - np.arange(78, 121, 6) - )) - elif (model_name == 'icon-d2-eps') & (parameter_name == 'vmax_10m'): - file_extension = '_germany_icosahedral_single-level_' - #this string completes the filename on the server - file_extension_2 = '_2d' #this string completes the filename on the server - max_lead_time_default = 48 # maximum available data - lead_times = np.concatenate((np.arange(1, 49), - )) - elif model_name == 'test': - file_extension = '_storm_europe_icon_' #this string completes the filename on the server - file_extension_2 = '' #this string completes the filename on the server - max_lead_time_default = 2 # maximum available data - lead_times = np.concatenate((np.arange(1, 49), - np.arange(51, 73, 3), - np.arange(78, 121, 6) - )) + if (model_name == "icon-eu-eps") & (parameter_name == "vmax_10m"): + file_extension = "_europe_icosahedral_single-level_" + # this string completes the filename on the server + file_extension_2 = "" # this string completes the filename on the server + max_lead_time_default = 120 # maximum available data + lead_times = np.concatenate( + (np.arange(1, 49), np.arange(51, 73, 3), np.arange(78, 121, 6)) + ) + elif (model_name == "icon-d2-eps") & (parameter_name == "vmax_10m"): + file_extension = "_germany_icosahedral_single-level_" + # this string completes the filename on the server + file_extension_2 = "_2d" # this string completes the filename on the server + max_lead_time_default = 48 # maximum available data + lead_times = np.concatenate((np.arange(1, 49),)) + elif model_name == "test": + file_extension = ( + "_storm_europe_icon_" # this string completes the filename on the server + ) + file_extension_2 = "" # this string completes the filename on the server + max_lead_time_default = 2 # maximum available data + lead_times = np.concatenate( + (np.arange(1, 49), np.arange(51, 73, 3), np.arange(78, 121, 6)) + ) else: - raise ValueError(f'Download for model {model_name} and parameter {parameter_name} ' - 'is not yet implemented. ' - 'Please define the default values in the code first.') + raise ValueError( + f"Download for model {model_name} and parameter {parameter_name} " + "is not yet implemented. " + "Please define the default values in the code first." + ) # create the url for download - url = ('https://opendata.dwd.de/weather/nwp/' + - model_name + - '/grib/' + - run_datetime.strftime('%H') + - '/' + - parameter_name + - '/') - file_name = (model_name + - file_extension + - run_datetime.strftime('%Y%m%d%H') + - '_' + - '{lead_i:03}' + - file_extension_2 + - '_' + - parameter_name + - '.grib2.bz2') - + url = ( + "https://opendata.dwd.de/weather/nwp/" + + model_name + + "/grib/" + + run_datetime.strftime("%H") + + "/" + + parameter_name + + "/" + ) + file_name = ( + model_name + + file_extension + + run_datetime.strftime("%Y%m%d%H") + + "_" + + "{lead_i:03}" + + file_extension_2 + + "_" + + parameter_name + + ".grib2.bz2" + ) # define the leadtimes - if not max_lead_time: + if not max_lead_time: max_lead_time = max_lead_time_default elif max_lead_time > max_lead_time_default: - LOGGER.warning('Parameter max_lead_time %s is bigger than maximum ' - 'available files. max_lead_time is adjusted to %s.', - max_lead_time, max_lead_time_default) + LOGGER.warning( + "Parameter max_lead_time %s is bigger than maximum " + "available files. max_lead_time is adjusted to %s.", + max_lead_time, + max_lead_time_default, + ) max_lead_time = max_lead_time_default - lead_times = lead_times[lead_times<=max_lead_time] + lead_times = lead_times[lead_times <= max_lead_time] return url, file_name, lead_times -def download_icon_centroids_file(model_name='icon-eu-eps', - download_dir = None): - """ create centroids based on netcdf files provided by dwd, links +def download_icon_centroids_file(model_name="icon-eu-eps", download_dir=None): + """create centroids based on netcdf files provided by dwd, links found here: https://www.dwd.de/DE/leistungen/opendata/neuigkeiten/opendata_dez2018_02.html https://www.dwd.de/DE/leistungen/opendata/neuigkeiten/opendata_aug2020_01.html @@ -274,32 +294,35 @@ def download_icon_centroids_file(model_name='icon-eu-eps', """ # define url and filename - url = 'https://opendata.dwd.de/weather/lib/cdo/' - if model_name == 'icon-eu-eps': - file_name = 'icon_grid_0028_R02B07_N02.nc.bz2' - elif model_name == 'icon-eu': - file_name = 'icon_grid_0024_R02B06_G.nc.bz2' - elif model_name in ('icon-d2-eps', 'icon-d2'): - file_name = 'icon_grid_0047_R19B07_L.nc.bz2' - elif model_name == 'test': - file_name = 'test_storm_europe_icon_grid.nc.bz2' + url = "https://opendata.dwd.de/weather/lib/cdo/" + if model_name == "icon-eu-eps": + file_name = "icon_grid_0028_R02B07_N02.nc.bz2" + elif model_name == "icon-eu": + file_name = "icon_grid_0024_R02B06_G.nc.bz2" + elif model_name in ("icon-d2-eps", "icon-d2"): + file_name = "icon_grid_0047_R19B07_L.nc.bz2" + elif model_name == "test": + file_name = "test_storm_europe_icon_grid.nc.bz2" else: - raise ValueError(f'Creation of centroids for the icon model {model_name} ' - 'is not yet implemented. Please define ' - 'the default values in the code first.') - download_path = CONFIG.local_data.save_dir.dir() if download_dir is None else Path(download_dir) + raise ValueError( + f"Creation of centroids for the icon model {model_name} " + "is not yet implemented. Please define " + "the default values in the code first." + ) + download_path = ( + CONFIG.local_data.save_dir.dir() if download_dir is None else Path(download_dir) + ) bz2_pathfile = download_path.absolute().joinpath(file_name) - nc_pathfile = bz2_pathfile.with_suffix('') + nc_pathfile = bz2_pathfile.with_suffix("") # download and unzip file if not nc_pathfile.exists(): if not bz2_pathfile.exists(): try: - download_file(url + file_name, - download_dir=download_path) + download_file(url + file_name, download_dir=download_path) except ValueError as err: - raise ValueError(f'Error while downloading {url + file_name}.') from err - with open(bz2_pathfile, 'rb') as source, open(nc_pathfile, 'wb') as dest: + raise ValueError(f"Error while downloading {url + file_name}.") from err + with open(bz2_pathfile, "rb") as source, open(nc_pathfile, "wb") as dest: dest.write(bz2.decompress(source.read())) bz2_pathfile.unlink() diff --git a/climada/util/earth_engine.py b/climada/util/earth_engine.py index fdc136d4d..2a35755e5 100644 --- a/climada/util/earth_engine.py +++ b/climada/util/earth_engine.py @@ -48,7 +48,7 @@ def obtain_image_landsat_composite(landsat_collection, time_range, area): Returns ------- image_composite : ee.image.Image - """ + """ collection = ee.ImageCollection(landsat_collection) # Filter by time range and location @@ -57,6 +57,7 @@ def obtain_image_landsat_composite(landsat_collection, time_range, area): image_composite = ee.Algorithms.Landsat.simpleComposite(image_area, 75, 3) return image_composite + def obtain_image_median(collection, time_range, area): """Selection of median from a collection of images in the Earth Engine library See also: https://developers.google.com/earth-engine/reducers_image_collection @@ -73,7 +74,7 @@ def obtain_image_median(collection, time_range, area): Returns ------- image_median : ee.image.Image - """ + """ collection = ee.ImageCollection(collection) # Filter by time range and location @@ -82,6 +83,7 @@ def obtain_image_median(collection, time_range, area): image_median = image_area.median() return image_median + def obtain_image_sentinel(sentinel_collection, time_range, area): """Selection of median, cloud-free image from a collection of images in the Sentinel 2 dataset See also: https://developers.google.com/earth-engine/datasets/catalog/COPERNICUS_S2 @@ -98,20 +100,25 @@ def obtain_image_sentinel(sentinel_collection, time_range, area): Returns ------- sentinel_median : ee.image.Image - """ -# First, method to remove cloud from the image + """ + + # First, method to remove cloud from the image def maskclouds(image): - band_qa = image.select('QA60') + band_qa = image.select("QA60") cloud_mask = ee.Number(2).pow(10).int() cirrus_mask = ee.Number(2).pow(11).int() - mask = band_qa.bitwiseAnd(cloud_mask).eq(0) and (band_qa.bitwiseAnd(cirrus_mask).eq(0)) + mask = band_qa.bitwiseAnd(cloud_mask).eq(0) and ( + band_qa.bitwiseAnd(cirrus_mask).eq(0) + ) return image.updateMask(mask).divide(10000) - sentinel_filtered = (ee.ImageCollection(sentinel_collection). - filterBounds(area). - filterDate(time_range[0], time_range[1]). - filter(ee.Filter.lt('CLOUDY_PIXEL_PERCENTAGE', 20)). - map(maskclouds)) + sentinel_filtered = ( + ee.ImageCollection(sentinel_collection) + .filterBounds(area) + .filterDate(time_range[0], time_range[1]) + .filter(ee.Filter.lt("CLOUDY_PIXEL_PERCENTAGE", 20)) + .map(maskclouds) + ) sentinel_median = sentinel_filtered.median() return sentinel_median @@ -139,6 +146,7 @@ def get_region(geom): region = geom return region + def get_url(name, image, scale, region): """It will open and download automatically a zip folder containing Geotiff data of 'image'. If additional parameters are needed, see also: @@ -158,12 +166,8 @@ def get_url(name, image, scale, region): Returns ------- path : str - """ - path = image.getDownloadURL({ - 'name': (name), - 'scale': scale, - 'region': (region) - }) + """ + path = image.getDownloadURL({"name": (name), "scale": scale, "region": (region)}) webbrowser.open_new_tab(path) return path diff --git a/climada/util/files_handler.py b/climada/util/files_handler.py index 03b49a0a5..7f45762e7 100644 --- a/climada/util/files_handler.py +++ b/climada/util/files_handler.py @@ -20,8 +20,8 @@ """ __all__ = [ - 'to_list', - 'get_file_names', + "to_list", + "get_file_names", ] import glob @@ -40,6 +40,7 @@ class DownloadProgressBar(tqdm): """Class to use progress bar during dowloading""" + def update_to(self, blocks=1, bsize=1, tsize=None): """Update progress bar @@ -77,10 +78,12 @@ def download_file(url, download_dir=None, overwrite=True): str the full path to the eventually downloaded file """ - file_name = url.split('/')[-1] - if file_name.strip() == '': + file_name = url.split("/")[-1] + if file_name.strip() == "": raise ValueError(f"cannot download {url} as a file") - download_path = CONFIG.local_data.save_dir.dir() if download_dir is None else Path(download_dir) + download_path = ( + CONFIG.local_data.save_dir.dir() if download_dir is None else Path(download_dir) + ) file_path = download_path.absolute().joinpath(file_name) if file_path.exists(): if not file_path.is_file() or not overwrite: @@ -89,20 +92,25 @@ def download_file(url, download_dir=None, overwrite=True): try: req_file = requests.get(url, stream=True) except IOError as ioe: - raise type(ioe)('Check URL and internet connection: ' + str(ioe)) from ioe + raise type(ioe)("Check URL and internet connection: " + str(ioe)) from ioe if req_file.status_code < 200 or req_file.status_code > 299: - raise ValueError(f'Error loading page {url}\n' - f' Status: {req_file.status_code}\n' - f' Content: {req_file.content}') + raise ValueError( + f"Error loading page {url}\n" + f" Status: {req_file.status_code}\n" + f" Content: {req_file.content}" + ) - total_size = int(req_file.headers.get('content-length', 0)) + total_size = int(req_file.headers.get("content-length", 0)) block_size = 1024 - LOGGER.info('Downloading %s to file %s', url, file_path) - with file_path.open('wb') as file: - for data in tqdm(req_file.iter_content(block_size), - total=math.ceil(total_size // block_size), - unit='KB', unit_scale=True): + LOGGER.info("Downloading %s to file %s", url, file_path) + with file_path.open("wb") as file: + for data in tqdm( + req_file.iter_content(block_size), + total=math.ceil(total_size // block_size), + unit="KB", + unit_scale=True, + ): file.write(data) return str(file_path) @@ -122,10 +130,11 @@ def download_ftp(url, file_name): ------ ValueError """ - LOGGER.info('Downloading file %s', file_name) + LOGGER.info("Downloading file %s", file_name) try: - with DownloadProgressBar(unit='B', unit_scale=True, miniters=1, - desc=url.split('/')[-1]) as prog_bar: + with DownloadProgressBar( + unit="B", unit_scale=True, miniters=1, desc=url.split("/")[-1] + ) as prog_bar: urllib.request.urlretrieve(url, file_name, reporthook=prog_bar.update_to) except Exception as exc: raise ValueError( @@ -156,7 +165,7 @@ def to_list(num_exp, values, val_name): return values if len(values) == 1: return num_exp * [values[0]] - raise ValueError(f'Provide one or {num_exp} {val_name}.') + raise ValueError(f"Provide one or {num_exp} {val_name}.") def get_file_names(file_name): @@ -208,5 +217,5 @@ def get_extension(file_name): str, str """ file_path = Path(file_name) - cuts = file_path.name.split('.') + cuts = file_path.name.split(".") return str(file_path.parent.joinpath(cuts[0])), "".join(file_path.suffixes) diff --git a/climada/util/finance.py b/climada/util/finance.py index f12b5e000..58be8b227 100644 --- a/climada/util/finance.py +++ b/climada/util/finance.py @@ -18,65 +18,70 @@ Finance functionalities. """ -__all__ = ['net_present_value', 'income_group', 'gdp'] -import shutil +__all__ = ["net_present_value", "income_group", "gdp"] + import logging +import shutil import warnings import zipfile - from pathlib import Path -import requests import numpy as np import pandas as pd -from pandas_datareader import wb +import requests from cartopy.io import shapereader +from pandas_datareader import wb -from climada.util.files_handler import download_file from climada.util.constants import SYSTEM_DIR - +from climada.util.files_handler import download_file LOGGER = logging.getLogger(__name__) -WORLD_BANK_WEALTH_ACC = \ -"https://databank.worldbank.org/data/download/Wealth-Accounts_CSV.zip" +WORLD_BANK_WEALTH_ACC = ( + "https://databank.worldbank.org/data/download/Wealth-Accounts_CSV.zip" +) """Wealth historical data (1995, 2000, 2005, 2010, 2014) from World Bank (ZIP). https://datacatalog.worldbank.org/dataset/wealth-accounting Includes variable Produced Capital (NW.PCA.TO)""" FILE_WORLD_BANK_WEALTH_ACC = "Wealth-AccountsData.csv" -WORLD_BANK_INC_GRP = \ -"http://databank.worldbank.org/data/download/site-content/OGHIST.xls" +WORLD_BANK_INC_GRP = ( + "http://databank.worldbank.org/data/download/site-content/OGHIST.xls" +) """Income group historical data from World bank.""" -INCOME_GRP_WB_TABLE = {'L': 1, # low income - 'LM': 2, # lower middle income - 'UM': 3, # upper middle income - 'H': 4, # high income - '..': np.nan # no data - } +INCOME_GRP_WB_TABLE = { + "L": 1, # low income + "LM": 2, # lower middle income + "UM": 3, # upper middle income + "H": 4, # high income + "..": np.nan, # no data +} """Meaning of values of world banks' historical table on income groups.""" -INCOME_GRP_NE_TABLE = {5: 1, # Low income - 4: 2, # Lower middle income - 3: 3, # Upper middle income - 2: 4, # High income: nonOECD - 1: 4 # High income: OECD - } +INCOME_GRP_NE_TABLE = { + 5: 1, # Low income + 4: 2, # Lower middle income + 3: 3, # Upper middle income + 2: 4, # High income: nonOECD + 1: 4, # High income: OECD +} """Meaning of values of natural earth's income groups.""" -FILE_GWP_WEALTH2GDP_FACTORS = 'WEALTH2GDP_factors_CRI_2016.csv' +FILE_GWP_WEALTH2GDP_FACTORS = "WEALTH2GDP_factors_CRI_2016.csv" """File with wealth-to-GDP factors from the Credit Suisse's Global Wealth Report 2017 (household wealth)""" -def _nat_earth_shp(resolution='10m', category='cultural', - name='admin_0_countries'): - shp_file = shapereader.natural_earth(resolution=resolution, - category=category, name=name) + +def _nat_earth_shp(resolution="10m", category="cultural", name="admin_0_countries"): + shp_file = shapereader.natural_earth( + resolution=resolution, category=category, name=name + ) return shapereader.Reader(shp_file) + def net_present_value(years, disc_rates, val_years): """Compute net present value. @@ -94,7 +99,9 @@ def net_present_value(years, disc_rates, val_years): float """ if years.size != disc_rates.size or years.size != val_years.size: - raise ValueError(f'Wrong input sizes {years.size}, {disc_rates.size}, {val_years.size}.') + raise ValueError( + f"Wrong input sizes {years.size}, {disc_rates.size}, {val_years.size}." + ) npv = val_years[-1] for val, disc in zip(val_years[-2::-1], disc_rates[-2::-1]): @@ -102,6 +109,7 @@ def net_present_value(years, disc_rates, val_years): return npv + def income_group(cntry_iso, ref_year, shp_file=None): """Get country's income group from World Bank's data at a given year, or closest year value. If no data, get the natural earth's approximation. @@ -118,15 +126,17 @@ def income_group(cntry_iso, ref_year, shp_file=None): if not provided. """ try: - close_year, close_val = world_bank(cntry_iso, ref_year, 'INC_GRP') + close_year, close_val = world_bank(cntry_iso, ref_year, "INC_GRP") except (KeyError, IndexError): # take value from natural earth repository - close_year, close_val = nat_earth_adm0(cntry_iso, 'INCOME_GRP', - shp_file=shp_file) + close_year, close_val = nat_earth_adm0( + cntry_iso, "INCOME_GRP", shp_file=shp_file + ) - LOGGER.info('Income group %s %s: %s.', cntry_iso, close_year, close_val) + LOGGER.info("Income group %s %s: %s.", cntry_iso, close_year, close_val) return close_year, close_val + def gdp(cntry_iso, ref_year, shp_file=None, per_capita=False): """Get country's (current value) GDP from World Bank's data at a given year, or closest year value. If no data, get the natural earth's approximation. @@ -148,24 +158,29 @@ def gdp(cntry_iso, ref_year, shp_file=None, per_capita=False): ------- float """ - if cntry_iso == 'TWN': - LOGGER.warning('GDP data for TWN is not provided by World Bank. \ - Instead, IMF data is returned here.') + if cntry_iso == "TWN": + LOGGER.warning( + "GDP data for TWN is not provided by World Bank. \ + Instead, IMF data is returned here." + ) close_year, close_val = _gdp_twn(ref_year, per_capita=per_capita) return close_year, close_val try: if per_capita: - close_year, close_val = world_bank(cntry_iso, ref_year, 'NY.GDP.PCAP.CD') + close_year, close_val = world_bank(cntry_iso, ref_year, "NY.GDP.PCAP.CD") else: - close_year, close_val = world_bank(cntry_iso, ref_year, 'NY.GDP.MKTP.CD') + close_year, close_val = world_bank(cntry_iso, ref_year, "NY.GDP.MKTP.CD") except (ValueError, IndexError, requests.exceptions.ConnectionError) as err: if isinstance(err, requests.exceptions.ConnectionError): - LOGGER.warning('Internet connection failed while retrieving GDPs.') - close_year, close_val = nat_earth_adm0(cntry_iso, 'GDP_MD', 'GDP_YEAR', shp_file) + LOGGER.warning("Internet connection failed while retrieving GDPs.") + close_year, close_val = nat_earth_adm0( + cntry_iso, "GDP_MD", "GDP_YEAR", shp_file + ) LOGGER.info("GDP {} {:d}: {:.3e}.".format(cntry_iso, close_year, close_val)) return close_year, close_val + def world_bank(cntry_iso, ref_year, info_ind): """Get country's GDP from World Bank's data at a given year, or closest year value. If no data, get the natural earth's approximation. @@ -188,38 +203,47 @@ def world_bank(cntry_iso, ref_year, info_ind): ------ IOError, KeyError, IndexError """ - if info_ind != 'INC_GRP': + if info_ind != "INC_GRP": with warnings.catch_warnings(): warnings.simplefilter("ignore") - cntry_gdp = wb.download(indicator=info_ind, country=cntry_iso, start=1960, end=2030) - years = np.array([int(year) for year in cntry_gdp.index.get_level_values('year')]) + cntry_gdp = wb.download( + indicator=info_ind, country=cntry_iso, start=1960, end=2030 + ) + years = np.array( + [int(year) for year in cntry_gdp.index.get_level_values("year")] + ) sort_years = np.abs(years - ref_year).argsort() close_val = cntry_gdp.iloc[sort_years].dropna() close_year = int(close_val.iloc[0].name[1]) close_val = float(close_val.iloc[0].values) else: # income group level - fn_ig = SYSTEM_DIR.joinpath('OGHIST.xls') + fn_ig = SYSTEM_DIR.joinpath("OGHIST.xls") dfr_wb = pd.DataFrame() try: if not fn_ig.is_file(): file_down = download_file(WORLD_BANK_INC_GRP) shutil.move(file_down, fn_ig) - dfr_wb = pd.read_excel(fn_ig, 'Country Analytical History', skiprows=5) - dfr_wb = dfr_wb.drop(dfr_wb.index[0:5]).set_index('Unnamed: 0') - dfr_wb = dfr_wb.replace(INCOME_GRP_WB_TABLE.keys(), - INCOME_GRP_WB_TABLE.values()) + dfr_wb = pd.read_excel(fn_ig, "Country Analytical History", skiprows=5) + dfr_wb = dfr_wb.drop(dfr_wb.index[0:5]).set_index("Unnamed: 0") + dfr_wb = dfr_wb.replace( + INCOME_GRP_WB_TABLE.keys(), INCOME_GRP_WB_TABLE.values() + ) except (IOError, requests.exceptions.ConnectionError) as err: - raise type(err)('Internet connection failed while downloading ' - 'historical income groups: ' + str(err)) from err + raise type(err)( + "Internet connection failed while downloading " + "historical income groups: " + str(err) + ) from err cntry_dfr = dfr_wb.loc[cntry_iso] - close_val = cntry_dfr.iloc[np.abs( - np.array(cntry_dfr.index[1:]) - ref_year).argsort() + 1].dropna() + close_val = cntry_dfr.iloc[ + np.abs(np.array(cntry_dfr.index[1:]) - ref_year).argsort() + 1 + ].dropna() close_year = close_val.index[0] close_val = int(close_val.iloc[0]) return close_year, close_val + def nat_earth_adm0(cntry_iso, info_name, year_name=None, shp_file=None): """Get country's parameter from natural earth's admin0 shape file. @@ -246,12 +270,12 @@ def nat_earth_adm0(cntry_iso, info_name, year_name=None, shp_file=None): ValueError """ if not shp_file: - shp_file = _nat_earth_shp('10m', 'cultural', 'admin_0_countries') + shp_file = _nat_earth_shp("10m", "cultural", "admin_0_countries") close_val = 0 close_year = 0 for info in shp_file.records(): - if info.attributes['ADM0_A3'] == cntry_iso: + if info.attributes["ADM0_A3"] == cntry_iso: close_val = info.attributes[info_name] if year_name: close_year = int(info.attributes[year_name]) @@ -261,15 +285,17 @@ def nat_earth_adm0(cntry_iso, info_name, year_name=None, shp_file=None): raise ValueError("No GDP for country %s found." % cntry_iso) # the variable name changed in Natural Earth v5.0.0 - if info_name in ['GDP_MD', 'GDP_MD_EST']: + if info_name in ["GDP_MD", "GDP_MD_EST"]: close_val *= 1e6 - elif info_name == 'INCOME_GRP': + elif info_name == "INCOME_GRP": close_val = INCOME_GRP_NE_TABLE.get(int(close_val[0])) return close_year, close_val -def wealth2gdp(cntry_iso, non_financial=True, ref_year=2016, - file_name=FILE_GWP_WEALTH2GDP_FACTORS): + +def wealth2gdp( + cntry_iso, non_financial=True, ref_year=2016, file_name=FILE_GWP_WEALTH2GDP_FACTORS +): """Get country's wealth-to-GDP factor from the Credit Suisse's Global Wealth Report 2017 (household wealth). Missing value: returns NaN. @@ -289,32 +315,39 @@ def wealth2gdp(cntry_iso, non_financial=True, ref_year=2016, float """ fname = SYSTEM_DIR.joinpath(file_name) - factors_all_countries = pd.read_csv(fname, sep=',', index_col=None, - header=0, encoding='ISO-8859-1') + factors_all_countries = pd.read_csv( + fname, sep=",", index_col=None, header=0, encoding="ISO-8859-1" + ) if ref_year != 2016: - LOGGER.warning('Reference year for the factor to convert GDP to ' - 'wealth was set to 2016 because other years have not ' - 'been implemented yet.') + LOGGER.warning( + "Reference year for the factor to convert GDP to " + "wealth was set to 2016 because other years have not " + "been implemented yet." + ) ref_year = 2016 if non_financial: try: val = factors_all_countries[ - factors_all_countries['country_iso3'] == cntry_iso]['NFW-to-GDP-ratio'].values[0] + factors_all_countries["country_iso3"] == cntry_iso + ]["NFW-to-GDP-ratio"].values[0] except (AttributeError, KeyError, IndexError): - LOGGER.warning('No data for country, using mean factor.') + LOGGER.warning("No data for country, using mean factor.") val = factors_all_countries["NFW-to-GDP-ratio"].mean() else: try: val = factors_all_countries[ - factors_all_countries['country_iso3'] == cntry_iso]['TW-to-GDP-ratio'].values[0] + factors_all_countries["country_iso3"] == cntry_iso + ]["TW-to-GDP-ratio"].values[0] except (AttributeError, KeyError, IndexError): - LOGGER.warning('No data for country, using mean factor.') + LOGGER.warning("No data for country, using mean factor.") val = factors_all_countries["TW-to-GDP-ratio"].mean() val = np.around(val, 5) return ref_year, val -def world_bank_wealth_account(cntry_iso, ref_year, variable_name="NW.PCA.TO", - no_land=True): + +def world_bank_wealth_account( + cntry_iso, ref_year, variable_name="NW.PCA.TO", no_land=True +): """ Download and unzip wealth accounting historical data (1995, 2000, 2005, 2010, 2014) from World Bank (https://datacatalog.worldbank.org/dataset/wealth-accounting). @@ -357,28 +390,36 @@ def world_bank_wealth_account(cntry_iso, ref_year, variable_name="NW.PCA.TO", try: data_file = SYSTEM_DIR.joinpath(FILE_WORLD_BANK_WEALTH_ACC) if not data_file.is_file(): - data_file = SYSTEM_DIR.joinpath('Wealth-Accounts_CSV', FILE_WORLD_BANK_WEALTH_ACC) + data_file = SYSTEM_DIR.joinpath( + "Wealth-Accounts_CSV", FILE_WORLD_BANK_WEALTH_ACC + ) if not data_file.is_file(): - if not SYSTEM_DIR.joinpath('Wealth-Accounts_CSV').is_dir(): - SYSTEM_DIR.joinpath('Wealth-Accounts_CSV').mkdir() + if not SYSTEM_DIR.joinpath("Wealth-Accounts_CSV").is_dir(): + SYSTEM_DIR.joinpath("Wealth-Accounts_CSV").mkdir() file_down = download_file(WORLD_BANK_WEALTH_ACC) - zip_ref = zipfile.ZipFile(file_down, 'r') - zip_ref.extractall(SYSTEM_DIR.joinpath('Wealth-Accounts_CSV')) + zip_ref = zipfile.ZipFile(file_down, "r") + zip_ref.extractall(SYSTEM_DIR.joinpath("Wealth-Accounts_CSV")) zip_ref.close() Path(file_down).unlink() - LOGGER.debug('Download and unzip complete. Unzipping %s', str(data_file)) + LOGGER.debug("Download and unzip complete. Unzipping %s", str(data_file)) - data_wealth = pd.read_csv(data_file, sep=',', index_col=None, header=0) + data_wealth = pd.read_csv(data_file, sep=",", index_col=None, header=0) except Exception as err: raise type(err)( - 'Downloading World Bank Wealth Accounting Data failed: ' + str(err)) from err + "Downloading World Bank Wealth Accounting Data failed: " + str(err) + ) from err - data_wealth = data_wealth[data_wealth['Country Code'].str.contains(cntry_iso) - & data_wealth['Indicator Code'].str.contains(variable_name) - ].loc[:, '1995':'2014'] + data_wealth = data_wealth[ + data_wealth["Country Code"].str.contains(cntry_iso) + & data_wealth["Indicator Code"].str.contains(variable_name) + ].loc[:, "1995":"2014"] years = list(map(int, list(data_wealth))) - if data_wealth.size == 0 and 'NW.PCA.TO' in variable_name: # if country is not found in data - LOGGER.warning('No data available for country. Using non-financial wealth instead') + if ( + data_wealth.size == 0 and "NW.PCA.TO" in variable_name + ): # if country is not found in data + LOGGER.warning( + "No data available for country. Using non-financial wealth instead" + ) gdp_year, gdp_val = gdp(cntry_iso, ref_year) fac = wealth2gdp(cntry_iso)[1] return gdp_year, np.around((fac * gdp_val), 1), 0 @@ -396,11 +437,12 @@ def world_bank_wealth_account(cntry_iso, ref_year, variable_name="NW.PCA.TO", gdp_year, gdp_val = gdp(cntry_iso, ref_year) result = data_wealth.values[0, -1] * gdp_val / gdp0_val ref_year = gdp_year - if 'NW.PCA.' in variable_name and no_land: + if "NW.PCA." in variable_name and no_land: # remove value of built-up land from produced capital result = result / 1.24 return ref_year, np.around(result, 1), 1 + def _gdp_twn(ref_year, per_capita=False): """returns GDP for TWN (Republic of China / Taiwan Province of China) based on a CSV sheet downloaded from the @@ -424,23 +466,26 @@ def _gdp_twn(ref_year, per_capita=False): ------- float """ - fname = 'GDP_TWN_IMF_WEO_data.csv' + fname = "GDP_TWN_IMF_WEO_data.csv" if not SYSTEM_DIR.joinpath(fname).is_file(): - raise FileNotFoundError(f'File {fname} not found in SYSTEM_DIR') + raise FileNotFoundError(f"File {fname} not found in SYSTEM_DIR") if per_capita: - var_name = 'Gross domestic product per capita, current prices' + var_name = "Gross domestic product per capita, current prices" else: - var_name = 'Gross domestic product, current prices' + var_name = "Gross domestic product, current prices" if ref_year < 1980: close_year = 1980 elif ref_year > 2024: close_year = 2024 else: close_year = ref_year - data = pd.read_csv(SYSTEM_DIR.joinpath('GDP_TWN_IMF_WEO_data.csv'), - index_col=None, header=0) - close_val = data.loc[data['Subject Descriptor'] == var_name, str(close_year)].values[0] - close_val = float(close_val.replace(',', '')) + data = pd.read_csv( + SYSTEM_DIR.joinpath("GDP_TWN_IMF_WEO_data.csv"), index_col=None, header=0 + ) + close_val = data.loc[ + data["Subject Descriptor"] == var_name, str(close_year) + ].values[0] + close_val = float(close_val.replace(",", "")) if not per_capita: close_val = close_val * 1e9 return close_year, close_val diff --git a/climada/util/hdf5_handler.py b/climada/util/hdf5_handler.py index 08d0a4970..8408972bd 100644 --- a/climada/util/hdf5_handler.py +++ b/climada/util/hdf5_handler.py @@ -19,44 +19,47 @@ Functionalities to handle HDF5 files. Used for MATLAB files as well. """ -__all__ = ['read', - 'get_string', - 'get_str_from_ref', - 'get_list_str_from_ref', - 'get_sparse_csr_mat' - ] +__all__ = [ + "read", + "get_string", + "get_str_from_ref", + "get_list_str_from_ref", + "get_sparse_csr_mat", +] -from scipy import sparse -import numpy as np import h5py +import numpy as np +from scipy import sparse + def read(file_name, with_refs=False): """Load a hdf5 data structure from a file. - Parameters - ---------- - file_name : - file to load - with_refs : - enable loading of the references. Default is unset, - since it increments the execution time considerably. - - Returns - ------- - contents : - dictionary structure containing all the variables. - - Examples - -------- - >>> # Contents contains the Matlab data in a dictionary. - >>> contents = read("/pathto/dummy.mat") - >>> # Contents contains the Matlab data and its reference in a dictionary. - >>> contents = read("/pathto/dummy.mat", True) - - Raises - ------ - Exception while reading - """ + Parameters + ---------- + file_name : + file to load + with_refs : + enable loading of the references. Default is unset, + since it increments the execution time considerably. + + Returns + ------- + contents : + dictionary structure containing all the variables. + + Examples + -------- + >>> # Contents contains the Matlab data in a dictionary. + >>> contents = read("/pathto/dummy.mat") + >>> # Contents contains the Matlab data and its reference in a dictionary. + >>> contents = read("/pathto/dummy.mat", True) + + Raises + ------ + Exception while reading + """ + def get_group(group): """Recursive function to get variables from a group.""" contents = {} @@ -70,81 +73,86 @@ def get_group(group): # other objects such as links are ignored return contents - with h5py.File(file_name, 'r') as file: + with h5py.File(file_name, "r") as file: return get_group(file) + def get_string(array): """Form string from input array of unisgned integers. - Parameters - ---------- - array : - array of integers + Parameters + ---------- + array : + array of integers - Returns - ------- - string + Returns + ------- + string """ - return ''.join(chr(int(c)) for c in array) + return "".join(chr(int(c)) for c in array) + def get_str_from_ref(file_name, var): """Form string from a reference HDF5 variable of the given file. - Parameters - ---------- - file_name : - matlab file name - var : - HDF5 reference variable + Parameters + ---------- + file_name : + matlab file name + var : + HDF5 reference variable - Returns - ------- - string + Returns + ------- + string """ - with h5py.File(file_name, 'r') as file: + with h5py.File(file_name, "r") as file: return get_string(file[var]) + def get_list_str_from_ref(file_name, var): """Form list of strings from a reference HDF5 variable of the given file. - Parameters - ---------- - file_name : - matlab file name - var : - array of HDF5 reference variable + Parameters + ---------- + file_name : + matlab file name + var : + array of HDF5 reference variable - Returns - ------- - string + Returns + ------- + string """ name_list = [] - with h5py.File(file_name, 'r') as file: + with h5py.File(file_name, "r") as file: for name in var: name_list.append(get_string(file[name[0]][:]).strip()) return name_list + def get_sparse_csr_mat(mat_dict, shape): """Form sparse matrix from input hdf5 sparse matrix data type. - Parameters - ---------- - mat_dict : - dictionary containing the sparse matrix information. - shape : - tuple describing output matrix shape. + Parameters + ---------- + mat_dict : + dictionary containing the sparse matrix information. + shape : + tuple describing output matrix shape. - Returns - ------- - sparse csr matrix + Returns + ------- + sparse csr matrix """ # Check if input has all the necessary data of a sparse matrix - if ('data' not in mat_dict) or ('ir' not in mat_dict) or \ - ('jc' not in mat_dict): - raise ValueError('Input data is not a sparse matrix.') + if ("data" not in mat_dict) or ("ir" not in mat_dict) or ("jc" not in mat_dict): + raise ValueError("Input data is not a sparse matrix.") + + return sparse.csc_matrix( + (mat_dict["data"], mat_dict["ir"], mat_dict["jc"]), shape + ).tocsr() - return sparse.csc_matrix((mat_dict['data'], mat_dict['ir'], - mat_dict['jc']), shape).tocsr() def to_string(str_or_bytes): """converts a bytes object into a string if necessary diff --git a/climada/util/interpolation.py b/climada/util/interpolation.py index c2e514797..4a230f9af 100644 --- a/climada/util/interpolation.py +++ b/climada/util/interpolation.py @@ -19,7 +19,6 @@ Define interpolation and extrapolation functions for calculating (local) exceedance frequencies and return periods """ - import logging import numpy as np @@ -29,17 +28,18 @@ LOGGER = logging.getLogger(__name__) + def interpolate_ev( - x_test, - x_train, - y_train, - logx = False, - logy = False, - x_threshold = None, - y_threshold = None, - extrapolation = False, - y_asymptotic = np.nan - ): + x_test, + x_train, + y_train, + logx=False, + logy=False, + x_threshold=None, + y_threshold=None, + extrapolation=False, + y_asymptotic=np.nan, +): """ Util function to interpolate (and extrapolate) training data (x_train, y_train) to new points x_test with several options (log scale, thresholds) @@ -80,38 +80,35 @@ def interpolate_ev( x_test, x_train, y_train, logx, logy, x_threshold, y_threshold ) - # handle case of small training data sizes + # handle case of small training data sizes if x_train.size < 2: - LOGGER.warning('Data is being extrapolated.') + LOGGER.warning("Data is being extrapolated.") return _interpolate_small_input(x_test, x_train, y_train, logy, y_asymptotic) # calculate fill values if extrapolation: - fill_value = 'extrapolate' + fill_value = "extrapolate" if np.min(x_test) < np.min(x_train) or np.max(x_test) > np.max(x_train): - LOGGER.warning('Data is being extrapolated.') + LOGGER.warning("Data is being extrapolated.") else: if not all(sorted(x_train) == x_train): - raise ValueError('x_train array must be sorted in ascending order.') - fill_value = (y_train[0], np.log10(y_asymptotic) if logy else y_asymptotic) + raise ValueError("x_train array must be sorted in ascending order.") + fill_value = (y_train[0], np.log10(y_asymptotic) if logy else y_asymptotic) interpolation = interpolate.interp1d( - x_train, y_train, fill_value=fill_value, bounds_error=False) + x_train, y_train, fill_value=fill_value, bounds_error=False + ) y_test = interpolation(x_test) # adapt output scale if logy: - y_test = np.power(10., y_test) + y_test = np.power(10.0, y_test) return y_test + def stepfunction_ev( - x_test, - x_train, - y_train, - x_threshold = None, - y_threshold = None, - y_asymptotic = np.nan - ): + x_test, x_train, y_train, x_threshold=None, y_threshold=None, y_asymptotic=np.nan +): """ Util function to interpolate and extrapolate training data (x_train, y_train) to new points x_test using a step function @@ -148,43 +145,42 @@ def stepfunction_ev( # find indices of x_test if sorted into x_train if not all(sorted(x_train) == x_train): - raise ValueError('Input array x_train must be sorted in ascending order.') + raise ValueError("Input array x_train must be sorted in ascending order.") indx = np.searchsorted(x_train, x_test) - y_test = y_train[indx.clip(max = len(x_train) - 1)] + y_test = y_train[indx.clip(max=len(x_train) - 1)] y_test[indx == len(x_train)] = y_asymptotic return y_test + def _preprocess_interpolation_data( - x_test, - x_train, - y_train, - logx, - logy, - x_threshold, - y_threshold - ): + x_test, x_train, y_train, logx, logy, x_threshold, y_threshold +): """ helper function to preprocess interpolation training and test data by filtering data below thresholds and converting to log scale if required """ if x_train.shape != y_train.shape: - raise ValueError(f'Incompatible shapes of input data, x_train {x_train.shape} ' - f'and y_train {y_train.shape}. Should be the same') + raise ValueError( + f"Incompatible shapes of input data, x_train {x_train.shape} " + f"and y_train {y_train.shape}. Should be the same" + ) # transform input to float arrays - x_test, x_train, y_train = (np.array(x_test).astype(float), - np.array(x_train).astype(float), - np.array(y_train).astype(float)) + x_test, x_train, y_train = ( + np.array(x_test).astype(float), + np.array(x_train).astype(float), + np.array(y_train).astype(float), + ) # cut x and y above threshold - if x_threshold or x_threshold==0: + if x_threshold or x_threshold == 0: x_th = np.asarray(x_train > x_threshold).squeeze() x_train = x_train[x_th] y_train = y_train[x_th] - if y_threshold or y_threshold==0: + if y_threshold or y_threshold == 0: y_th = np.asarray(y_train > y_threshold).squeeze() x_train = x_train[y_th] y_train = y_train[y_th] @@ -197,6 +193,7 @@ def _preprocess_interpolation_data( return (x_test, x_train, y_train) + def _interpolate_small_input(x_test, x_train, y_train, logy, y_asymptotic): """ helper function to handle if interpolation data is small (empty or one point) @@ -207,7 +204,7 @@ def _interpolate_small_input(x_test, x_train, y_train, logy, y_asymptotic): # reconvert logarithmic y_train to original y_train if logy: - y_train = np.power(10., y_train) + y_train = np.power(10.0, y_train) # if only one (x_train, y_train), return stepfunction with # y_train if x_test < x_train and y_asymtotic if x_test > x_train @@ -215,6 +212,7 @@ def _interpolate_small_input(x_test, x_train, y_train, logy, y_asymptotic): y_test[np.squeeze(x_test) > np.squeeze(x_train)] = y_asymptotic return y_test + def group_frequency(frequency, value, n_sig_dig=2): """ Util function to aggregate (add) frequencies for equal values @@ -240,15 +238,18 @@ def group_frequency(frequency, value, n_sig_dig=2): return ([], []) if len(value) != len(np.unique(sig_dig_list(value, n_sig_dig=n_sig_dig))): - #check ordering of value + # check ordering of value if not all(sorted(value) == value): - raise ValueError('Value array must be sorted in ascending order.') + raise ValueError("Value array must be sorted in ascending order.") # add frequency for equal value value, start_indices = np.unique( - sig_dig_list(value, n_sig_dig=n_sig_dig), return_index=True) + sig_dig_list(value, n_sig_dig=n_sig_dig), return_index=True + ) start_indices = np.insert(start_indices, len(value), len(frequency)) - frequency = np.array([ - sum(frequency[start_indices[i]:start_indices[i+1]]) - for i in range(len(value)) - ]) + frequency = np.array( + [ + sum(frequency[start_indices[i] : start_indices[i + 1]]) + for i in range(len(value)) + ] + ) return frequency, value diff --git a/climada/util/lines_polys_handler.py b/climada/util/lines_polys_handler.py index 22d27793e..244658b18 100755 --- a/climada/util/lines_polys_handler.py +++ b/climada/util/lines_polys_handler.py @@ -15,8 +15,9 @@ with CLIMADA. If not, see . """ -import logging + import copy +import logging from enum import Enum import cartopy.crs as ccrs @@ -41,8 +42,9 @@ class DisaggMethod(Enum): DIV : the geometry's distributed to equal parts over all its interpolated points FIX : the geometry's value is replicated over all its interpolated points """ - DIV = 'div' - FIX = 'fix' + + DIV = "div" + FIX = "fix" class AggMethod(Enum): @@ -51,12 +53,20 @@ class AggMethod(Enum): SUM : the impact is summed over all points in the polygon/line """ - SUM = 'sum' + + SUM = "sum" def calc_geom_impact( - exp, impf_set, haz, res, to_meters=False, disagg_met=DisaggMethod.DIV, - disagg_val=None, agg_met=AggMethod.SUM): + exp, + impf_set, + haz, + res, + to_meters=False, + disagg_met=DisaggMethod.DIV, + disagg_val=None, + agg_met=AggMethod.SUM, +): """ Compute impact for exposure with (multi-)polygons and/or (multi-)lines. Lat/Lon values in exp.gdf are ignored, only exp.gdf.geometry is considered. @@ -116,10 +126,12 @@ def calc_geom_impact( # disaggregate exposure exp_pnt = exp_geom_to_pnt( - exp=exp, res=res, - to_meters=to_meters, disagg_met=disagg_met, - disagg_val=disagg_val - ) + exp=exp, + res=res, + to_meters=to_meters, + disagg_met=disagg_met, + disagg_val=disagg_val, + ) exp_pnt.assign_centroids(haz) # compute point impact @@ -174,14 +186,16 @@ def impact_pnt_agg(impact_pnt, exp_pnt_gdf, agg_met): # add exposure representation points as coordinates repr_pnts = gpd.GeoSeries( - exp_pnt_gdf['geometry_orig'][:,0].apply( - lambda x: x.representative_point())) + exp_pnt_gdf["geometry_orig"][:, 0].apply(lambda x: x.representative_point()) + ) impact_agg.coord_exp = np.array([repr_pnts.y, repr_pnts.x]).transpose() # Add original geometries for plotting - impact_agg.geom_exp = exp_pnt_gdf.xs(0, level=1)\ - .set_geometry('geometry_orig')\ - .geometry.rename('geometry') + impact_agg.geom_exp = ( + exp_pnt_gdf.xs(0, level=1) + .set_geometry("geometry_orig") + .geometry.rename("geometry") + ) return impact_agg @@ -221,18 +235,24 @@ def _aggregate_impact_mat(imp_pnt, gdf_pnt, agg_met): mask = np.ones(len(col_geom)) else: raise NotImplementedError( - f'The available aggregation methods are {AggMethod._member_names_}') # pylint: disable=no-member, protected-access + f"The available aggregation methods are {AggMethod._member_names_}" + ) # pylint: disable=no-member, protected-access csr_mask = sp.sparse.csr_matrix( - (mask, (row_pnt, col_geom)), - shape=(len(row_pnt), len(np.unique(col_geom))) - ) + (mask, (row_pnt, col_geom)), shape=(len(row_pnt), len(np.unique(col_geom))) + ) return imp_pnt.imp_mat.dot(csr_mask) def calc_grid_impact( - exp, impf_set, haz, grid, disagg_met=DisaggMethod.DIV, disagg_val=None, - agg_met=AggMethod.SUM): + exp, + impf_set, + haz, + grid, + disagg_met=DisaggMethod.DIV, + disagg_val=None, + agg_met=AggMethod.SUM, +): """ Compute impact for exposure with (multi-)polygons and/or (multi-)lines. Lat/Lon values in exp.gdf are ignored, only exp.gdf.geometry is considered. @@ -288,13 +308,14 @@ def calc_grid_impact( # disaggregate exposure exp_pnt = exp_geom_to_grid( - exp=exp, grid= grid, disagg_met=disagg_met, - disagg_val=disagg_val - ) + exp=exp, grid=grid, disagg_met=disagg_met, disagg_val=disagg_val + ) exp_pnt.assign_centroids(haz) # compute point impact - impact_pnt = ImpactCalc(exp_pnt, impf_set, haz).impact(save_mat=True, assign_centroids=False) + impact_pnt = ImpactCalc(exp_pnt, impf_set, haz).impact( + save_mat=True, assign_centroids=False + ) # re-aggregate impact to original exposure geometry impact_agg = impact_pnt_agg(impact_pnt, exp_pnt.gdf, agg_met) @@ -324,22 +345,22 @@ def plot_eai_exp_geom(imp_geom, centered=False, figsize=(9, 13), **kwargs): matplotlib axes instance """ - kwargs['figsize'] = figsize - if 'legend_kwds' not in kwargs: - kwargs['legend_kwds'] = { - 'label': f"Impact [{imp_geom.unit}]", - 'orientation': "horizontal" - } - if 'legend' not in kwargs: - kwargs['legend'] = True + kwargs["figsize"] = figsize + if "legend_kwds" not in kwargs: + kwargs["legend_kwds"] = { + "label": f"Impact [{imp_geom.unit}]", + "orientation": "horizontal", + } + if "legend" not in kwargs: + kwargs["legend"] = True gdf_plot = gpd.GeoDataFrame(imp_geom.geom_exp) - gdf_plot['impact'] = imp_geom.eai_exp + gdf_plot["impact"] = imp_geom.eai_exp if centered: # pylint: disable=abstract-class-instantiated - xmin, xmax = u_coord.lon_bounds(imp_geom.coord_exp[:,1]) + xmin, xmax = u_coord.lon_bounds(imp_geom.coord_exp[:, 1]) proj_plot = ccrs.PlateCarree(central_longitude=0.5 * (xmin + xmax)) gdf_plot = gdf_plot.to_crs(proj_plot) - return gdf_plot.plot(column = 'impact', **kwargs) + return gdf_plot.plot(column="impact", **kwargs) def exp_geom_to_pnt(exp, res, to_meters, disagg_met, disagg_val): @@ -385,11 +406,13 @@ def exp_geom_to_pnt(exp, res, to_meters, disagg_met, disagg_val): if disagg_val is not None: exp = exp.copy() - exp.gdf['value'] = disagg_val + exp.gdf["value"] = disagg_val - if ((disagg_val is None) and ('value' not in exp.gdf.columns)): - raise ValueError('There is no value column in the exposure gdf to'+ - ' disaggregate from. Please set disagg_val explicitly.') + if (disagg_val is None) and ("value" not in exp.gdf.columns): + raise ValueError( + "There is no value column in the exposure gdf to" + + " disaggregate from. Please set disagg_val explicitly." + ) gdf_pnt = gdf_to_pnts(exp.gdf, res, to_meters) @@ -445,11 +468,13 @@ def exp_geom_to_grid(exp, grid, disagg_met, disagg_val): if disagg_val is not None: exp = exp.copy() - exp.gdf['value'] = disagg_val + exp.gdf["value"] = disagg_val - if ((disagg_val is None) and ('value' not in exp.gdf.columns)): - raise ValueError('There is no value column in the exposure gdf to'+ - ' disaggregate from. Please set disagg_val explicitly.') + if (disagg_val is None) and ("value" not in exp.gdf.columns): + raise ValueError( + "There is no value column in the exposure gdf to" + + " disaggregate from. Please set disagg_val explicitly." + ) gdf_pnt = gdf_to_grid(exp.gdf, grid) @@ -479,13 +504,13 @@ def _pnt_line_poly_mask(gdf): ------- pnt_mask, line_mask, poly_mask : """ - pnt_mask = gdf.geometry.apply(lambda x: isinstance(x, shgeom.Point)) + pnt_mask = gdf.geometry.apply(lambda x: isinstance(x, shgeom.Point)) - line_mask = gdf.geometry.apply(lambda x: isinstance(x, shgeom.LineString)) - line_mask |= gdf.geometry.apply(lambda x: isinstance(x, shgeom.MultiLineString)) + line_mask = gdf.geometry.apply(lambda x: isinstance(x, shgeom.LineString)) + line_mask |= gdf.geometry.apply(lambda x: isinstance(x, shgeom.MultiLineString)) - poly_mask = gdf.geometry.apply(lambda x: isinstance(x, shgeom.Polygon)) - poly_mask |= gdf.geometry.apply(lambda x: isinstance(x, shgeom.MultiPolygon)) + poly_mask = gdf.geometry.apply(lambda x: isinstance(x, shgeom.Polygon)) + poly_mask |= gdf.geometry.apply(lambda x: isinstance(x, shgeom.MultiPolygon)) return pnt_mask, line_mask, poly_mask @@ -525,23 +550,18 @@ def gdf_to_pnts(gdf, res, to_meters): gdf_pnt = gpd.GeoDataFrame([]) if pnt_mask.any(): gdf_pnt_only = gdf[pnt_mask] - gdf_pnt_only['geometry_orig'] = gdf_pnt_only['geometry'].copy() + gdf_pnt_only["geometry_orig"] = gdf_pnt_only["geometry"].copy() index = gdf_pnt_only.index.values gdf_pnt_only.index = pd.MultiIndex.from_arrays([index, np.zeros(len(index))]) - gdf_pnt = gpd.GeoDataFrame(pd.concat([ - gdf_pnt, - gdf_pnt_only - ])) + gdf_pnt = gpd.GeoDataFrame(pd.concat([gdf_pnt, gdf_pnt_only])) if line_mask.any(): - gdf_pnt = gpd.GeoDataFrame(pd.concat([ - gdf_pnt, - _line_to_pnts(gdf[line_mask], res, to_meters) - ])) + gdf_pnt = gpd.GeoDataFrame( + pd.concat([gdf_pnt, _line_to_pnts(gdf[line_mask], res, to_meters)]) + ) if poly_mask.any(): - gdf_pnt = gpd.GeoDataFrame(pd.concat([ - gdf_pnt, - _poly_to_pnts(gdf[poly_mask], res, to_meters) - ])) + gdf_pnt = gpd.GeoDataFrame( + pd.concat([gdf_pnt, _poly_to_pnts(gdf[poly_mask], res, to_meters)]) + ) return gdf_pnt @@ -583,10 +603,12 @@ def gdf_to_grid(gdf, grid): # Concatenating an empty dataframe with an index together with # a dataframe with a multi-index breaks the multi-index - if (line_mask.any() or pnt_mask.any()): - raise AttributeError("The dataframe contains lines and/or polygons." - "Currently only polygon dataframes can be " - "disaggregated onto a fixed grid.") + if line_mask.any() or pnt_mask.any(): + raise AttributeError( + "The dataframe contains lines and/or polygons." + "Currently only polygon dataframes can be " + "disaggregated onto a fixed grid." + ) if poly_mask.any(): return _poly_to_grid(gdf[poly_mask], grid) @@ -615,10 +637,10 @@ def _disagg_values_div(gdf_pnts): gdf_disagg = gdf_pnts.copy(deep=False) group = gdf_pnts.groupby(axis=0, level=0) - vals = group['value'].mean() / group['value'].count() + vals = group["value"].mean() / group["value"].count() vals = vals.reindex(gdf_pnts.index, level=0) - gdf_disagg['value'] = vals + gdf_disagg["value"] = vals return gdf_disagg @@ -652,20 +674,23 @@ def _poly_to_pnts(gdf, res, to_meters): return gdf # Needed because gdf.explode(index_parts=True) requires numeric index - idx = gdf.index.to_list() #To restore the naming of the index + idx = gdf.index.to_list() # To restore the naming of the index gdf_points = gdf.copy().reset_index(drop=True) # Check if we need to reproject if to_meters and not gdf.geometry.crs.is_projected: - gdf_points['geometry_pnt'] = gdf_points.apply( - lambda row: _interp_one_poly_m(row.geometry, res, gdf.crs), axis=1) + gdf_points["geometry_pnt"] = gdf_points.apply( + lambda row: _interp_one_poly_m(row.geometry, res, gdf.crs), axis=1 + ) else: - gdf_points['geometry_pnt'] = gdf_points.apply( - lambda row: _interp_one_poly(row.geometry, res), axis=1) + gdf_points["geometry_pnt"] = gdf_points.apply( + lambda row: _interp_one_poly(row.geometry, res), axis=1 + ) gdf_points = _swap_geom_cols( - gdf_points, geom_to='geometry_orig', new_geom='geometry_pnt') + gdf_points, geom_to="geometry_orig", new_geom="geometry_pnt" + ) gdf_points = gdf_points.explode(index_parts=True) gdf_points.index = gdf_points.index.set_levels(idx, level=0) @@ -699,16 +724,18 @@ def _poly_to_grid(gdf, grid): return gdf # Needed because gdf.explode(index_parts=True) requires numeric index - idx = gdf.index.to_list() #To restore the naming of the index + idx = gdf.index.to_list() # To restore the naming of the index gdf_points = gdf.copy().reset_index(drop=True) x_grid, y_grid = grid - gdf_points['geometry_pnt'] = gdf_points.apply( - lambda row: _interp_one_poly_grid(row.geometry, x_grid, y_grid), axis=1) + gdf_points["geometry_pnt"] = gdf_points.apply( + lambda row: _interp_one_poly_grid(row.geometry, x_grid, y_grid), axis=1 + ) gdf_points = _swap_geom_cols( - gdf_points, geom_to='geometry_orig', new_geom='geometry_pnt') + gdf_points, geom_to="geometry_orig", new_geom="geometry_pnt" + ) gdf_points = gdf_points.explode(index_parts=True) gdf_points.index = gdf_points.index.set_levels(idx, level=0) @@ -743,7 +770,7 @@ def _interp_one_poly_grid(poly, x_grid, y_grid): if sum(in_geom.flatten()) > 1: return shgeom.MultiPoint(list(zip(x_grid[in_geom], y_grid[in_geom]))) - LOGGER.warning('Polygon smaller than resolution. Setting a representative point.') + LOGGER.warning("Polygon smaller than resolution. Setting a representative point.") return shgeom.MultiPoint([poly.representative_point()]) @@ -775,7 +802,7 @@ def _interp_one_poly(poly, res): if sum(in_geom.flatten()) > 1: return shgeom.MultiPoint(list(zip(x_grid[in_geom], y_grid[in_geom]))) - LOGGER.warning('Polygon smaller than resolution. Setting a representative point.') + LOGGER.warning("Polygon smaller than resolution. Setting a representative point.") return shgeom.MultiPoint([poly.representative_point()]) @@ -812,10 +839,11 @@ def _interp_one_poly_m(poly, res, orig_crs): in_geom = sh.vectorized.contains(poly_m, x_grid, y_grid) if sum(in_geom.flatten()) > 1: x_poly, y_poly = reproject_grid( - x_grid[in_geom], y_grid[in_geom], m_crs, orig_crs) + x_grid[in_geom], y_grid[in_geom], m_crs, orig_crs + ) return shgeom.MultiPoint(list(zip(x_poly, y_poly))) - LOGGER.warning('Polygon smaller than resolution. Setting a representative point.') + LOGGER.warning("Polygon smaller than resolution. Setting a representative point.") return shgeom.MultiPoint([poly.representative_point()]) @@ -835,9 +863,9 @@ def _get_pyproj_trafo(orig_crs, dest_crs): """ Get pyproj projection from orig_crs to dest_crs """ - return pyproj.Transformer.from_proj(pyproj.Proj(orig_crs), - pyproj.Proj(dest_crs), - always_xy=True) + return pyproj.Transformer.from_proj( + pyproj.Proj(orig_crs), pyproj.Proj(dest_crs), always_xy=True + ) def reproject_grid(x_grid, y_grid, orig_crs, dest_crs): @@ -889,7 +917,6 @@ def reproject_poly(poly, orig_crs, dest_crs): def _line_to_pnts(gdf_lines, res, to_meters): - """ Convert a GeoDataFrame with LineString geometries to Point geometries, where Points are placed at a specified distance @@ -922,7 +949,7 @@ def _line_to_pnts(gdf_lines, res, to_meters): return gdf_lines # Needed because gdf.explode(index_parts=True) requires numeric index - idx = gdf_lines.index.to_list() #To restore the naming of the index + idx = gdf_lines.index.to_list() # To restore the naming of the index gdf_points = gdf_lines.copy().reset_index(drop=True) if to_meters: @@ -931,31 +958,28 @@ def _line_to_pnts(gdf_lines, res, to_meters): line_lengths = gdf_lines.length # Add warning if lines are too short w.r.t. resolution - failing_res_check_count = len(line_lengths[line_lengths > 10*res]) + failing_res_check_count = len(line_lengths[line_lengths > 10 * res]) if failing_res_check_count > 0: LOGGER.warning( "%d lines with a length < 10*resolution were found. " "Each of these lines is disaggregate to one point. " "Reaggregatint values will thus likely lead to overestimattion. " "Consider chosing a smaller resolution or filter out the short lines. ", - failing_res_check_count - ) - - line_fractions = [ - _line_fraction(length, res) - for length in line_lengths - ] - - gdf_points['geometry_pnt'] = [ - shgeom.MultiPoint([ - line.interpolate(dist, normalized=True) - for dist in fractions - ]) + failing_res_check_count, + ) + + line_fractions = [_line_fraction(length, res) for length in line_lengths] + + gdf_points["geometry_pnt"] = [ + shgeom.MultiPoint( + [line.interpolate(dist, normalized=True) for dist in fractions] + ) for line, fractions in zip(gdf_points.geometry, line_fractions) - ] + ] gdf_points = _swap_geom_cols( - gdf_points, geom_to='geometry_orig', new_geom='geometry_pnt') + gdf_points, geom_to="geometry_orig", new_geom="geometry_pnt" + ) gdf_points = gdf_points.explode(index_parts=True) gdf_points.index = gdf_points.index.set_levels(idx, level=0) @@ -985,6 +1009,7 @@ def _line_fraction(length, res): start = eff_res / 2 return np.arange(start, 1, eff_res) + def _pnts_per_line(length, res): """Calculate number of points fitting along a line, given a certain resolution (spacing) res between points. @@ -1021,9 +1046,9 @@ def _swap_geom_cols(gdf, geom_to, new_geom): gdf_swap : gpd.GeoDataFrame Copy of gdf with the new geometry column """ - gdf_swap = gdf.rename(columns = {'geometry': geom_to}) - gdf_swap.rename(columns = {new_geom: 'geometry'}, inplace=True) - gdf_swap.set_geometry('geometry', inplace=True, crs=gdf.crs) + gdf_swap = gdf.rename(columns={"geometry": geom_to}) + gdf_swap.rename(columns={new_geom: "geometry"}, inplace=True) + gdf_swap.set_geometry("geometry", inplace=True, crs=gdf.crs) return gdf_swap diff --git a/climada/util/plot.py b/climada/util/plot.py index e404da2de..92c97ad36 100644 --- a/climada/util/plot.py +++ b/climada/util/plot.py @@ -18,37 +18,39 @@ Define auxiliary functions for plots. """ + # pylint: disable=abstract-class-instantiated -__all__ = ['geo_bin_from_array', - 'geo_im_from_array', - 'make_map', - 'add_shapes', - 'add_populated_places', - 'add_cntry_names' - ] +__all__ = [ + "geo_bin_from_array", + "geo_im_from_array", + "make_map", + "add_shapes", + "add_populated_places", + "add_cntry_names", +] import logging -from textwrap import wrap import warnings +from textwrap import wrap -from scipy.interpolate import griddata -import numpy as np -import matplotlib.pyplot as plt +import cartopy.crs as ccrs +import geopandas as gpd import matplotlib as mpl +import matplotlib.pyplot as plt +import numpy as np +import requests +from cartopy.io import shapereader +from cartopy.mpl.gridliner import LATITUDE_FORMATTER, LONGITUDE_FORMATTER from matplotlib import colormaps as cm from mpl_toolkits.axes_grid1 import make_axes_locatable -from shapely.geometry import box -import cartopy.crs as ccrs -from cartopy.io import shapereader -from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER from rasterio.crs import CRS -import requests -import geopandas as gpd +from scipy.interpolate import griddata +from shapely.geometry import box -from climada.util.constants import CMAP_EXPOSURES, CMAP_CAT, CMAP_RASTER -from climada.util.files_handler import to_list import climada.util.coordinates as u_coord +from climada.util.constants import CMAP_CAT, CMAP_EXPOSURES, CMAP_RASTER +from climada.util.files_handler import to_list LOGGER = logging.getLogger(__name__) @@ -62,10 +64,21 @@ """Maximum number of bins in geo_bin_from_array""" -def geo_bin_from_array(array_sub, geo_coord, var_name, title, - pop_name=True, buffer=BUFFER, extend='neither', - proj=ccrs.PlateCarree(), shapes=True, axes=None, - figsize=(9, 13), adapt_fontsize=True, **kwargs): +def geo_bin_from_array( + array_sub, + geo_coord, + var_name, + title, + pop_name=True, + buffer=BUFFER, + extend="neither", + proj=ccrs.PlateCarree(), + shapes=True, + axes=None, + figsize=(9, 13), + adapt_fontsize=True, + **kwargs, +): """Plot array values binned over input coordinates. Parameters @@ -114,16 +127,39 @@ def geo_bin_from_array(array_sub, geo_coord, var_name, title, ValueError: Input array size missmatch """ - return _plot_scattered_data("hexbin", array_sub, geo_coord, var_name, title, - pop_name=pop_name, buffer=buffer, extend=extend, - proj=proj, shapes=shapes, axes=axes, - figsize=figsize, adapt_fontsize=adapt_fontsize, **kwargs) + return _plot_scattered_data( + "hexbin", + array_sub, + geo_coord, + var_name, + title, + pop_name=pop_name, + buffer=buffer, + extend=extend, + proj=proj, + shapes=shapes, + axes=axes, + figsize=figsize, + adapt_fontsize=adapt_fontsize, + **kwargs, + ) -def geo_scatter_from_array(array_sub, geo_coord, var_name, title, - pop_name=False, buffer=BUFFER, extend='neither', - proj=ccrs.PlateCarree(), shapes=True, axes=None, - figsize=(9, 13), adapt_fontsize=True, **kwargs): +def geo_scatter_from_array( + array_sub, + geo_coord, + var_name, + title, + pop_name=False, + buffer=BUFFER, + extend="neither", + proj=ccrs.PlateCarree(), + shapes=True, + axes=None, + figsize=(9, 13), + adapt_fontsize=True, + **kwargs, +): """Plot array values at input coordinates. Parameters @@ -171,38 +207,65 @@ def geo_scatter_from_array(array_sub, geo_coord, var_name, title, ValueError: Input array size missmatch """ - return _plot_scattered_data("scatter", array_sub, geo_coord, var_name, title, - pop_name=pop_name, buffer=buffer, extend=extend, - proj=proj, shapes=shapes, axes=axes, - figsize=figsize, adapt_fontsize=adapt_fontsize, **kwargs) + return _plot_scattered_data( + "scatter", + array_sub, + geo_coord, + var_name, + title, + pop_name=pop_name, + buffer=buffer, + extend=extend, + proj=proj, + shapes=shapes, + axes=axes, + figsize=figsize, + adapt_fontsize=adapt_fontsize, + **kwargs, + ) -def _plot_scattered_data(method, array_sub, geo_coord, var_name, title, - pop_name=False, buffer=BUFFER, extend='neither', - proj=ccrs.PlateCarree(), shapes=True, axes=None, - figsize=(9, 13), adapt_fontsize=True, **kwargs): +def _plot_scattered_data( + method, + array_sub, + geo_coord, + var_name, + title, + pop_name=False, + buffer=BUFFER, + extend="neither", + proj=ccrs.PlateCarree(), + shapes=True, + axes=None, + figsize=(9, 13), + adapt_fontsize=True, + **kwargs, +): """Function for internal use in `geo_scatter_from_array` (when called with method="scatter") and `geo_bin_from_array` (when called with method="hexbin"). See the docstrings of the respective functions for more information on the parameters.""" # Generate array of values used in each subplot num_im, list_arr = _get_collection_arrays(array_sub) - list_tit = to_list(num_im, title, 'title') - list_name = to_list(num_im, var_name, 'var_name') - list_coord = to_list(num_im, geo_coord, 'geo_coord') + list_tit = to_list(num_im, title, "title") + list_name = to_list(num_im, var_name, "var_name") + list_coord = to_list(num_im, geo_coord, "geo_coord") - if 'cmap' not in kwargs: - kwargs['cmap'] = CMAP_EXPOSURES + if "cmap" not in kwargs: + kwargs["cmap"] = CMAP_EXPOSURES if axes is None: proj_plot = proj if isinstance(proj, ccrs.PlateCarree): # for PlateCarree, center plot around data's central lon # without overwriting the data's original projection info - xmin, xmax = u_coord.lon_bounds(np.concatenate([c[:, 1] for c in list_coord])) + xmin, xmax = u_coord.lon_bounds( + np.concatenate([c[:, 1] for c in list_coord]) + ) proj_plot = ccrs.PlateCarree(central_longitude=0.5 * (xmin + xmax)) - _, axes, fontsize = make_map(num_im, proj=proj_plot, figsize=figsize, - adapt_fontsize=adapt_fontsize) + _, axes, fontsize = make_map( + num_im, proj=proj_plot, figsize=figsize, adapt_fontsize=adapt_fontsize + ) else: fontsize = None axes_iter = axes @@ -210,17 +273,24 @@ def _plot_scattered_data(method, array_sub, geo_coord, var_name, title, axes_iter = np.array([[axes]]) # Generate each subplot - for array_im, axis, tit, name, coord in \ - zip(list_arr, axes_iter.flatten(), list_tit, list_name, list_coord): + for array_im, axis, tit, name, coord in zip( + list_arr, axes_iter.flatten(), list_tit, list_name, list_coord + ): if coord.shape[0] != array_im.size: - raise ValueError(f"Size mismatch in input array: {coord.shape[0]} != {array_im.size}.") + raise ValueError( + f"Size mismatch in input array: {coord.shape[0]} != {array_im.size}." + ) # Binned image with coastlines if isinstance(proj, ccrs.PlateCarree): - xmin, ymin, xmax, ymax = u_coord.latlon_bounds(coord[:, 0], coord[:, 1], buffer=buffer) + xmin, ymin, xmax, ymax = u_coord.latlon_bounds( + coord[:, 0], coord[:, 1], buffer=buffer + ) extent = (xmin, xmax, ymin, ymax) else: - extent = _get_borders(coord, buffer=buffer, proj_limits=proj.x_limits + proj.y_limits) + extent = _get_borders( + coord, buffer=buffer, proj_limits=proj.x_limits + proj.y_limits + ) axis.set_extent((extent), proj) if shapes: @@ -229,18 +299,21 @@ def _plot_scattered_data(method, array_sub, geo_coord, var_name, title, add_populated_places(axis, extent, proj, fontsize) if method == "hexbin": - if 'gridsize' not in kwargs: - kwargs['gridsize'] = min(int(array_im.size / 2), MAX_BINS) - mappable = axis.hexbin(coord[:, 1], coord[:, 0], C=array_im, - transform=proj, **kwargs) + if "gridsize" not in kwargs: + kwargs["gridsize"] = min(int(array_im.size / 2), MAX_BINS) + mappable = axis.hexbin( + coord[:, 1], coord[:, 0], C=array_im, transform=proj, **kwargs + ) else: - mappable = axis.scatter(coord[:, 1], coord[:, 0], c=array_im, - transform=proj, **kwargs) + mappable = axis.scatter( + coord[:, 1], coord[:, 0], c=array_im, transform=proj, **kwargs + ) # Create colorbar in this axis cbax = make_axes_locatable(axis).append_axes( - 'right', size="6.5%", pad=0.1, axes_class=plt.Axes) - cbar = plt.colorbar(mappable, cax=cbax, orientation='vertical', extend=extend) + "right", size="6.5%", pad=0.1, axes_class=plt.Axes + ) + cbar = plt.colorbar(mappable, cax=cbax, orientation="vertical", extend=extend) cbar.set_label(name) axis.set_title("\n".join(wrap(tit))) if fontsize: @@ -252,9 +325,19 @@ def _plot_scattered_data(method, array_sub, geo_coord, var_name, title, return axes -def geo_im_from_array(array_sub, coord, var_name, title, - proj=None, smooth=True, shapes=True, axes=None, figsize=(9, 13), adapt_fontsize=True, - **kwargs): +def geo_im_from_array( + array_sub, + coord, + var_name, + title, + proj=None, + smooth=True, + shapes=True, + axes=None, + figsize=(9, 13), + adapt_fontsize=True, + **kwargs, +): """Image(s) plot defined in array(s) over input coordinates. Parameters @@ -298,10 +381,9 @@ def geo_im_from_array(array_sub, coord, var_name, title, # Generate array of values used in each subplot num_im, list_arr = _get_collection_arrays(array_sub) - list_tit = to_list(num_im, title, 'title') - list_name = to_list(num_im, var_name, 'var_name') - list_coord = to_list(num_im, coord, 'geo_coord') - + list_tit = to_list(num_im, title, "title") + list_name = to_list(num_im, var_name, "var_name") + list_coord = to_list(num_im, coord, "geo_coord") is_reg, height, width = u_coord.grid_is_regular(coord) extent = _get_borders(coord, proj_limits=(-360, 360, -90, 90)) @@ -313,20 +395,23 @@ def geo_im_from_array(array_sub, coord, var_name, title, if "norm" in kwargs: min_value = kwargs["norm"].vmin else: - kwargs['vmin'] = kwargs.get("vmin", np.nanmin(array_sub)) - min_value = kwargs['vmin'] - kwargs['vmax'] = kwargs.get("vmax", np.nanmax(array_sub)) - min_value = min_value/2 if min_value > 0 else min_value-1 + kwargs["vmin"] = kwargs.get("vmin", np.nanmin(array_sub)) + min_value = kwargs["vmin"] + kwargs["vmax"] = kwargs.get("vmax", np.nanmax(array_sub)) + min_value = min_value / 2 if min_value > 0 else min_value - 1 if axes is None: proj_plot = proj if isinstance(proj, ccrs.PlateCarree): # for PlateCarree, center plot around data's central lon # without overwriting the data's original projection info - xmin, xmax = u_coord.lon_bounds(np.concatenate([c[:, 1] for c in list_coord])) + xmin, xmax = u_coord.lon_bounds( + np.concatenate([c[:, 1] for c in list_coord]) + ) proj_plot = ccrs.PlateCarree(central_longitude=0.5 * (xmin + xmax)) - _, axes, fontsize = make_map(num_im, proj=proj_plot, figsize=figsize, - adapt_fontsize=adapt_fontsize) + _, axes, fontsize = make_map( + num_im, proj=proj_plot, figsize=figsize, adapt_fontsize=adapt_fontsize + ) else: fontsize = None axes_iter = axes @@ -339,19 +424,25 @@ def geo_im_from_array(array_sub, coord, var_name, title, cmap.set_under("white", alpha=0) # For values below vmin # Generate each subplot - for array_im, axis, tit, name in zip(list_arr, axes_iter.flatten(), list_tit, list_name): + for array_im, axis, tit, name in zip( + list_arr, axes_iter.flatten(), list_tit, list_name + ): if coord.shape[0] != array_im.size: - raise ValueError(f"Size mismatch in input array: {coord.shape[0]} != {array_im.size}.") + raise ValueError( + f"Size mismatch in input array: {coord.shape[0]} != {array_im.size}." + ) if smooth or not is_reg: # Create regular grid where to interpolate the array grid_x, grid_y = np.mgrid[ - extent[0]: extent[1]: complex(0, RESOLUTION), - extent[2]: extent[3]: complex(0, RESOLUTION)] + extent[0] : extent[1] : complex(0, RESOLUTION), + extent[2] : extent[3] : complex(0, RESOLUTION), + ] grid_im = griddata( (coord[:, 1], coord[:, 0]), array_im, (grid_x, grid_y), - fill_value=min_value) + fill_value=min_value, + ) else: grid_x = coord[:, 1].reshape((width, height)).transpose() grid_y = coord[:, 0].reshape((width, height)).transpose() @@ -360,24 +451,26 @@ def geo_im_from_array(array_sub, coord, var_name, title, grid_y = np.flip(grid_y) grid_im = np.flip(grid_im, 1) grid_im = np.resize(grid_im, (height, width, 1)) - axis.set_extent((extent[0] - mid_lon, extent[1] - mid_lon, - extent[2], extent[3]), crs=proj) + axis.set_extent( + (extent[0] - mid_lon, extent[1] - mid_lon, extent[2], extent[3]), crs=proj + ) # Add coastline to axis if shapes: add_shapes(axis) # Create colormesh, colorbar and labels in axis - cbax = make_axes_locatable(axis).append_axes('right', size="6.5%", - pad=0.1, axes_class=plt.Axes) + cbax = make_axes_locatable(axis).append_axes( + "right", size="6.5%", pad=0.1, axes_class=plt.Axes + ) img = axis.pcolormesh( grid_x - mid_lon, grid_y, np.squeeze(grid_im), transform=proj, cmap=cmap, - **kwargs + **kwargs, ) - cbar = plt.colorbar(img, cax=cbax, orientation='vertical') + cbar = plt.colorbar(img, cax=cbax, orientation="vertical") cbar.set_label(name) axis.set_title("\n".join(wrap(tit))) if fontsize: @@ -390,8 +483,9 @@ def geo_im_from_array(array_sub, coord, var_name, title, return axes -def geo_scatter_categorical(array_sub, geo_coord, var_name, title, - cat_name=None, adapt_fontsize=True, **kwargs): +def geo_scatter_categorical( + array_sub, geo_coord, var_name, title, cat_name=None, adapt_fontsize=True, **kwargs +): """ Map plots for categorical data defined in array(s) over input coordinates. The categories must be a finite set of unique values @@ -444,19 +538,33 @@ def geo_scatter_categorical(array_sub, geo_coord, var_name, title, # convert sorted categories to numeric array [0, 1, ...] array_sub = np.array(array_sub) - array_sub_unique, array_sub_cat = np.unique(array_sub, return_inverse=True) #flattens array + array_sub_unique, array_sub_cat = np.unique( + array_sub, return_inverse=True + ) # flattens array array_sub_cat = array_sub_cat.reshape(array_sub.shape) array_sub_n = array_sub_unique.size - if 'cmap' in kwargs: + if "cmap" in kwargs: # optional user defined colormap (can be continuous) - cmap_arg = kwargs['cmap'] + cmap_arg = kwargs["cmap"] if isinstance(cmap_arg, str): cmap_name = cmap_arg # for qualitative colormaps taking the first few colors is preferable # over jumping equal distances - if cmap_name in ['Pastel1', 'Pastel2', 'Paired', 'Accent', 'Dark2', - 'Set1', 'Set2', 'Set3', 'tab10', 'tab20', 'tab20b', 'tab20c']: + if cmap_name in [ + "Pastel1", + "Pastel2", + "Paired", + "Accent", + "Dark2", + "Set1", + "Set2", + "Set3", + "tab10", + "tab20", + "tab20b", + "tab20c", + ]: cmap = mpl.colors.ListedColormap( cm.get_cmap(cmap_name).colors[:array_sub_n] ) @@ -464,38 +572,45 @@ def geo_scatter_categorical(array_sub, geo_coord, var_name, title, cmap = cm.get_cmap(cmap_arg).resampled(array_sub_n) elif isinstance(cmap_arg, mpl.colors.ListedColormap): # If a user brings their own colormap it's probably qualitative - cmap_name = 'defined by the user' - cmap = mpl.colors.ListedColormap( - cmap_arg.colors[:array_sub_n] - ) + cmap_name = "defined by the user" + cmap = mpl.colors.ListedColormap(cmap_arg.colors[:array_sub_n]) else: - raise TypeError("if cmap is given it must be either a str or a ListedColormap") + raise TypeError( + "if cmap is given it must be either a str or a ListedColormap" + ) else: # default qualitative colormap cmap_name = CMAP_CAT - cmap = mpl.colors.ListedColormap( - cm.get_cmap(cmap_name).colors[:array_sub_n] - ) + cmap = mpl.colors.ListedColormap(cm.get_cmap(cmap_name).colors[:array_sub_n]) if array_sub_n > cmap.N: - LOGGER.warning("More than %d categories cannot be plotted accurately " - "using the colormap %s. Please specify " - "a different qualitative colormap using the `cmap` " - "attribute. For Matplotlib's built-in colormaps, see " - "https://matplotlib.org/stable/tutorials/colors/colormaps.html", - cmap.N, cmap_name) + LOGGER.warning( + "More than %d categories cannot be plotted accurately " + "using the colormap %s. Please specify " + "a different qualitative colormap using the `cmap` " + "attribute. For Matplotlib's built-in colormaps, see " + "https://matplotlib.org/stable/tutorials/colors/colormaps.html", + cmap.N, + cmap_name, + ) # define the discrete colormap kwargs - kwargs['cmap'] = cmap - kwargs['vmin'] = -0.5 - kwargs['vmax'] = array_sub_n - 0.5 + kwargs["cmap"] = cmap + kwargs["vmin"] = -0.5 + kwargs["vmax"] = array_sub_n - 0.5 # #create the axes axes = _plot_scattered_data( - "scatter", array_sub_cat, geo_coord, var_name, title, - adapt_fontsize=adapt_fontsize, **kwargs) + "scatter", + array_sub_cat, + geo_coord, + var_name, + title, + adapt_fontsize=adapt_fontsize, + **kwargs, + ) - #add colorbar labels + # add colorbar labels if cat_name is None: cat_name = array_sub_unique.astype(str) if not isinstance(cat_name, dict): @@ -540,8 +655,9 @@ def make_map(num_sub=1, figsize=(9, 13), proj=ccrs.PlateCarree(), adapt_fontsize else: num_row, num_col = num_sub - fig, axis_sub = plt.subplots(num_row, num_col, figsize=figsize, - subplot_kw=dict(projection=proj)) + fig, axis_sub = plt.subplots( + num_row, num_col, figsize=figsize, subplot_kw=dict(projection=proj) + ) axes_iter = axis_sub if not isinstance(axis_sub, np.ndarray): axes_iter = np.array([[axis_sub]]) @@ -553,11 +669,11 @@ def make_map(num_sub=1, figsize=(9, 13), proj=ccrs.PlateCarree(), adapt_fontsize grid.xformatter = LONGITUDE_FORMATTER grid.yformatter = LATITUDE_FORMATTER if adapt_fontsize: - fontsize = axis.bbox.width/35 + fontsize = axis.bbox.width / 35 if fontsize < 10: fontsize = 10 - grid.xlabel_style = {'size': fontsize} - grid.ylabel_style = {'size': fontsize} + grid.xlabel_style = {"size": fontsize} + grid.ylabel_style = {"size": fontsize} else: fontsize = None except TypeError: @@ -572,6 +688,7 @@ def make_map(num_sub=1, figsize=(9, 13), proj=ccrs.PlateCarree(), adapt_fontsize return fig, axis_sub, fontsize + def add_shapes(axis): """ Overlay Earth's countries coastlines to matplotlib.pyplot axis. @@ -584,12 +701,15 @@ def add_shapes(axis): Geographical projection. The default is PlateCarree. """ - shp_file = shapereader.natural_earth(resolution='10m', category='cultural', - name='admin_0_countries') + shp_file = shapereader.natural_earth( + resolution="10m", category="cultural", name="admin_0_countries" + ) shp = shapereader.Reader(shp_file) for geometry in shp.geometries(): - axis.add_geometries([geometry], crs=ccrs.PlateCarree(), facecolor='none', - edgecolor='dimgray') + axis.add_geometries( + [geometry], crs=ccrs.PlateCarree(), facecolor="none", edgecolor="dimgray" + ) + def _ensure_utf8(val): # Without the `*.cpg` file present, the shape reader wrongly assumes latin-1 encoding: @@ -598,10 +718,11 @@ def _ensure_utf8(val): # As a workaround, we encode and decode again, unless this fails which means # that the `*.cpg` is present and the encoding is correct: try: - return val.encode('latin-1').decode('utf-8') + return val.encode("latin-1").decode("utf-8") except (AttributeError, UnicodeDecodeError, UnicodeEncodeError): return val + def add_populated_places(axis, extent, proj=ccrs.PlateCarree(), fontsize=None): """ Add city names. @@ -620,21 +741,37 @@ def add_populated_places(axis, extent, proj=ccrs.PlateCarree(), fontsize=None): are used. """ - shp_file = shapereader.natural_earth(resolution='50m', category='cultural', - name='populated_places_simple') + shp_file = shapereader.natural_earth( + resolution="50m", category="cultural", name="populated_places_simple" + ) shp = shapereader.Reader(shp_file) ext_pts = list(box(*u_coord.toggle_extent_bounds(extent)).exterior.coords) - ext_trans = [ccrs.PlateCarree().transform_point(pts[0], pts[1], proj) - for pts in ext_pts] + ext_trans = [ + ccrs.PlateCarree().transform_point(pts[0], pts[1], proj) for pts in ext_pts + ] for rec, point in zip(shp.records(), shp.geometries()): if ext_trans[2][0] < point.x <= ext_trans[0][0]: if ext_trans[0][1] < point.y <= ext_trans[1][1]: - axis.plot(point.x, point.y, color='navy', marker='o', - transform=ccrs.PlateCarree(), markerfacecolor='None') - axis.text(point.x, point.y, _ensure_utf8(rec.attributes['name']), - horizontalalignment='right', verticalalignment='bottom', - transform=ccrs.PlateCarree(), color='navy', fontsize=fontsize) + axis.plot( + point.x, + point.y, + color="navy", + marker="o", + transform=ccrs.PlateCarree(), + markerfacecolor="None", + ) + axis.text( + point.x, + point.y, + _ensure_utf8(rec.attributes["name"]), + horizontalalignment="right", + verticalalignment="bottom", + transform=ccrs.PlateCarree(), + color="navy", + fontsize=fontsize, + ) + def add_cntry_names(axis, extent, proj=ccrs.PlateCarree(), fontsize=None): """ @@ -653,21 +790,31 @@ def add_cntry_names(axis, extent, proj=ccrs.PlateCarree(), fontsize=None): Size of the fonts. If set to None, the default matplotlib settings are used. """ - shp_file = shapereader.natural_earth(resolution='10m', category='cultural', - name='admin_0_countries') + shp_file = shapereader.natural_earth( + resolution="10m", category="cultural", name="admin_0_countries" + ) shp = shapereader.Reader(shp_file) ext_pts = list(box(*u_coord.toggle_extent_bounds(extent)).exterior.coords) - ext_trans = [ccrs.PlateCarree().transform_point(pts[0], pts[1], proj) - for pts in ext_pts] + ext_trans = [ + ccrs.PlateCarree().transform_point(pts[0], pts[1], proj) for pts in ext_pts + ] for rec, point in zip(shp.records(), shp.geometries()): point_x = point.centroid.xy[0][0] point_y = point.centroid.xy[1][0] if ext_trans[2][0] < point_x <= ext_trans[0][0]: if ext_trans[0][1] < point_y <= ext_trans[1][1]: - axis.text(point_x, point_y, _ensure_utf8(rec.attributes['NAME']), - horizontalalignment='center', verticalalignment='center', - transform=ccrs.PlateCarree(), fontsize=fontsize, color='navy') + axis.text( + point_x, + point_y, + _ensure_utf8(rec.attributes["NAME"]), + horizontalalignment="center", + verticalalignment="center", + transform=ccrs.PlateCarree(), + fontsize=fontsize, + color="navy", + ) + def _get_collection_arrays(array_sub): """ @@ -698,6 +845,7 @@ def _get_collection_arrays(array_sub): return num_im, list_arr + def _get_row_col_size(num_sub): """ Compute number of rows and columns of subplots in figure. @@ -724,6 +872,7 @@ def _get_row_col_size(num_sub): num_row = int(num_sub / 2) + num_sub % 2 return num_row, num_col + def _get_borders(geo_coord, buffer=0, proj_limits=(-180, 180, -90, 90)): """ Get min and max longitude and min and max latitude (in this order). @@ -748,6 +897,7 @@ def _get_borders(geo_coord, buffer=0, proj_limits=(-180, 180, -90, 90)): max_lat = min(np.max(geo_coord[:, 0]) + buffer, proj_limits[3]) return [min_lon, max_lon, min_lat, max_lat] + def get_transformation(crs_in): """ Get projection and its units to use in cartopy transforamtions from current crs. @@ -774,11 +924,12 @@ def get_transformation(crs_in): crs = ccrs.epsg(epsg) except ValueError: LOGGER.warning( - "Error parsing coordinate system '%s'. Using projection PlateCarree in plot.", crs_in + "Error parsing coordinate system '%s'. Using projection PlateCarree in plot.", + crs_in, ) crs = ccrs.PlateCarree() except requests.exceptions.ConnectionError: - LOGGER.warning('No internet connection. Using projection PlateCarree in plot.') + LOGGER.warning("No internet connection. Using projection PlateCarree in plot.") crs = ccrs.PlateCarree() # units @@ -788,23 +939,33 @@ def get_transformation(crs_in): # we may safely ignore it. warnings.simplefilter(action="ignore", category=UserWarning) try: - units = (crs.proj4_params.get('units') - # As of cartopy 0.20 the proj4_params attribute is {} for CRS from an EPSG number - # (see issue raised https://github.com/SciTools/cartopy/issues/1974 - # and longterm discussion on https://github.com/SciTools/cartopy/issues/813). - # In these cases the units can be fetched through the method `to_dict`. - or crs.to_dict().get('units', '°')) + units = ( + crs.proj4_params.get("units") + # As of cartopy 0.20 the proj4_params attribute is {} for CRS from an EPSG number + # (see issue raised https://github.com/SciTools/cartopy/issues/1974 + # and longterm discussion on https://github.com/SciTools/cartopy/issues/813). + # In these cases the units can be fetched through the method `to_dict`. + or crs.to_dict().get("units", "°") + ) except AttributeError: # This happens in setups with cartopy<0.20, where `to_dict` is not defined. # Officially, we require cartopy>=0.20, but there are still users around that # can't upgrade due to https://github.com/SciTools/iris/issues/4468 - units = '°' + units = "°" return crs, units -def multibar_plot(ax, data, colors=None, total_width=0.8, single_width=1, - legend=True, ticklabels=None, invert_axis=False): +def multibar_plot( + ax, + data, + colors=None, + total_width=0.8, + single_width=1, + legend=True, + ticklabels=None, + invert_axis=False, +): """ Draws a bar plot with multiple bars per data point. https://stackoverflow.com/questions/14270391/python-matplotlib-multiple-bars @@ -854,7 +1015,7 @@ def multibar_plot(ax, data, colors=None, total_width=0.8, single_width=1, # Check if colors where provided, otherwhise use the default color cycle if colors is None: - colors = plt.rcParams['axes.prop_cycle'].by_key()['color'] + colors = plt.rcParams["axes.prop_cycle"].by_key()["color"] # Number of bars per group n_bars = len(data) @@ -873,11 +1034,19 @@ def multibar_plot(ax, data, colors=None, total_width=0.8, single_width=1, # Draw a bar for every value of that type for x, y in enumerate(values): if invert_axis: - lbar = ax.barh(x + x_offset, width=y, height=bar_width * single_width, - color=colors[i % len(colors)]) + lbar = ax.barh( + x + x_offset, + width=y, + height=bar_width * single_width, + color=colors[i % len(colors)], + ) else: - lbar = ax.bar(x + x_offset, y, width=bar_width * single_width, - color=colors[i % len(colors)]) + lbar = ax.bar( + x + x_offset, + y, + width=bar_width * single_width, + color=colors[i % len(colors)], + ) # Add a handle to the last drawn bar, which we'll need for the legend bars.append(lbar[0]) @@ -892,17 +1061,18 @@ def multibar_plot(ax, data, colors=None, total_width=0.8, single_width=1, if legend: ax.legend(bars, data.keys()) + def plot_from_gdf( - gdf: gpd.GeoDataFrame, - colorbar_name: str = None, - title_subplots: callable = None, - smooth=True, - axis=None, - figsize=(9, 13), - adapt_fontsize=True, - **kwargs + gdf: gpd.GeoDataFrame, + colorbar_name: str = None, + title_subplots: callable = None, + smooth=True, + axis=None, + figsize=(9, 13), + adapt_fontsize=True, + **kwargs, ): - """Plot several subplots from different columns of a GeoDataFrame, e.g., for + """Plot several subplots from different columns of a GeoDataFrame, e.g., for plotting local return periods or local exceedance intensities. Parameters @@ -933,47 +1103,56 @@ def plot_from_gdf( # check if inputs are correct types if not isinstance(gdf, gpd.GeoDataFrame): raise ValueError("gdf is not a GeoDataFrame") - gdf_values = gdf.drop(columns='geometry').values.T + gdf_values = gdf.drop(columns="geometry").values.T # read meta data for fig and axis labels if not isinstance(colorbar_name, str): print("Unknown colorbar name. Colorbar label will be missing.") - colorbar_name = '' + colorbar_name = "" if not callable(title_subplots): - print("Unknown subplot-title-generation function. Subplot titles will be column names.") + print( + "Unknown subplot-title-generation function. Subplot titles will be column names." + ) title_subplots = lambda cols: [f"{col}" for col in cols] # use log colorbar for return periods and impact if ( - colorbar_name.strip().startswith(('Return Period', 'Impact')) and - 'norm' not in kwargs.keys() and + colorbar_name.strip().startswith(("Return Period", "Impact")) + and "norm" not in kwargs.keys() + and # check if there are no zeros values in gdf - not np.any(gdf_values == 0) and + not np.any(gdf_values == 0) + and # check if value range too small for logarithmic colorscale (np.log10(np.nanmax(gdf_values)) - np.log10(np.nanmin(gdf_values))) > 2 ): kwargs.update( - {'norm': mpl.colors.LogNorm( - vmin=np.nanmin(gdf_values), vmax=np.nanmax(gdf_values) + { + "norm": mpl.colors.LogNorm( + vmin=np.nanmin(gdf_values), vmax=np.nanmax(gdf_values) ), - 'vmin': None, 'vmax': None} + "vmin": None, + "vmax": None, + } ) # use inverted color bar for return periods - if (colorbar_name.strip().startswith('Return Period') and - 'cmap' not in kwargs.keys()): - kwargs.update({'cmap': 'viridis_r'}) + if ( + colorbar_name.strip().startswith("Return Period") + and "cmap" not in kwargs.keys() + ): + kwargs.update({"cmap": "viridis_r"}) axis = geo_im_from_array( gdf_values, - gdf.geometry.get_coordinates().values[:,::-1], + gdf.geometry.get_coordinates().values[:, ::-1], colorbar_name, - title_subplots(np.delete(gdf.columns, np.where(gdf.columns == 'geometry'))), + title_subplots(np.delete(gdf.columns, np.where(gdf.columns == "geometry"))), smooth=smooth, axes=axis, figsize=figsize, adapt_fontsize=adapt_fontsize, - **kwargs + **kwargs, ) return axis diff --git a/climada/util/save.py b/climada/util/save.py index 5d871b6f7..a1a74dde3 100644 --- a/climada/util/save.py +++ b/climada/util/save.py @@ -19,12 +19,11 @@ define save functionalities """ -__all__ = ['save', - 'load'] +__all__ = ["save", "load"] -from pathlib import Path -import pickle import logging +import pickle +from pathlib import Path from climada.util.config import CONFIG @@ -42,21 +41,26 @@ def save(out_file_name, var): var : object variable to save in pickle format """ - out_file = Path(out_file_name) if Path(out_file_name).is_absolute() \ + out_file = ( + Path(out_file_name) + if Path(out_file_name).is_absolute() else CONFIG.local_data.save_dir.dir().joinpath(out_file_name) + ) target_dir = out_file.parent try: # Generate folder if it doesn't exists if not target_dir.is_dir(): target_dir.mkdir() - LOGGER.info('Created folder %s.', target_dir) - with out_file.open('wb') as flh: + LOGGER.info("Created folder %s.", target_dir) + with out_file.open("wb") as flh: pickle.dump(var, flh, pickle.HIGHEST_PROTOCOL) - LOGGER.info('Written file %s', out_file) + LOGGER.info("Written file %s", out_file) except FileNotFoundError as err: - raise FileNotFoundError(f'Folder {target_dir} not found: ' + str(err)) from err + raise FileNotFoundError(f"Folder {target_dir} not found: " + str(err)) from err except OSError as ose: - raise ValueError('Data is probably too big. Try splitting it: ' + str(ose)) from ose + raise ValueError( + "Data is probably too big. Try splitting it: " + str(ose) + ) from ose def load(in_file_name): @@ -72,8 +76,11 @@ def load(in_file_name): ------- object """ - in_file = Path(in_file_name) if Path(in_file_name).is_absolute() \ + in_file = ( + Path(in_file_name) + if Path(in_file_name).is_absolute() else CONFIG.local_data.save_dir.dir().joinpath(in_file_name) - with in_file.open('rb') as flh: + ) + with in_file.open("rb") as flh: data = pickle.load(flh) return data diff --git a/climada/util/scalebar_plot.py b/climada/util/scalebar_plot.py index 11d16b9d0..e30f515a6 100644 --- a/climada/util/scalebar_plot.py +++ b/climada/util/scalebar_plot.py @@ -20,9 +20,10 @@ https://stackoverflow.com/questions/32333870/how-can-i-show-a-km-ruler-on-a-cartopy-matplotlib-plot/50674451#50674451 """ -import numpy as np import cartopy.crs as ccrs import cartopy.geodesic as cgeo +import numpy as np + def _axes_to_lonlat(ax, coords): """(lon, lat) from axes coordinates.""" @@ -32,6 +33,7 @@ def _axes_to_lonlat(ax, coords): return lonlat + def _upper_bound(start, direction, distance, dist_func): """A point farther than distance from start, in the given direction. @@ -97,8 +99,10 @@ def _distance_along_line(start, end, distance, dist_func, tol): """ initial_distance = dist_func(start, end) if initial_distance < distance: - raise ValueError(f"End is closer to start ({initial_distance}) than " - f"given distance ({distance}).") + raise ValueError( + f"End is closer to start ({initial_distance}) than " + f"given distance ({distance})." + ) if tol <= 0: raise ValueError(f"Tolerance is not positive: {tol}") @@ -159,10 +163,23 @@ def dist_func(a_axes, b_axes): return _distance_along_line(start, end, distance, dist_func, tol) -def scale_bar(ax, location, length, metres_per_unit=1000, unit_name='km', - tol=0.01, angle=0, color='black', linewidth=3, text_offset=0.005, - ha='center', va='bottom', plot_kwargs=None, text_kwargs=None, - **kwargs): +def scale_bar( + ax, + location, + length, + metres_per_unit=1000, + unit_name="km", + tol=0.01, + angle=0, + color="black", + linewidth=3, + text_offset=0.005, + ha="center", + va="bottom", + plot_kwargs=None, + text_kwargs=None, + **kwargs, +): """Add a scale bar to CartoPy axes. For angles between 0 and 90 the text and line may be plotted at @@ -209,10 +226,15 @@ def scale_bar(ax, location, length, metres_per_unit=1000, unit_name='km', if text_kwargs is None: text_kwargs = {} - plot_kwargs = {'linewidth': linewidth, 'color': color, **plot_kwargs, - **kwargs} - text_kwargs = {'ha': ha, 'va': va, 'rotation': angle, 'color': color, - **text_kwargs, **kwargs} + plot_kwargs = {"linewidth": linewidth, "color": color, **plot_kwargs, **kwargs} + text_kwargs = { + "ha": ha, + "va": va, + "rotation": angle, + "color": color, + **text_kwargs, + **kwargs, + } # Convert all units and types. location = np.asarray(location) # For vector addition. @@ -220,8 +242,7 @@ def scale_bar(ax, location, length, metres_per_unit=1000, unit_name='km', angle_rad = angle * np.pi / 180 # End-point of bar. - end = _point_along_line(ax, location, length_metres, angle=angle_rad, - tol=tol) + end = _point_along_line(ax, location, length_metres, angle=angle_rad, tol=tol) # Coordinates are currently in axes coordinates, so use transAxes to # put into data coordinates. *zip(a, b) produces a list of x-coords, @@ -234,5 +255,10 @@ def scale_bar(ax, location, length, metres_per_unit=1000, unit_name='km', text_location = midpoint + offset # 'rotation' keyword argument is in text_kwargs. - ax.text(*text_location, f"{length} {unit_name}", rotation_mode='anchor', - transform=ax.transAxes, **text_kwargs) + ax.text( + *text_location, + f"{length} {unit_name}", + rotation_mode="anchor", + transform=ax.transAxes, + **text_kwargs, + ) diff --git a/climada/util/select.py b/climada/util/select.py index 4b62a1b34..aaee63890 100755 --- a/climada/util/select.py +++ b/climada/util/select.py @@ -19,13 +19,13 @@ module containing functions to support various select methods. """ - - import logging + import numpy as np LOGGER = logging.getLogger(__name__) + def get_attributes_with_matching_dimension(obj, dims): """ Get the attributes of an object that have len(dims) number diff --git a/climada/util/test/test__init__.py b/climada/util/test/test__init__.py index cef6e8e48..aebd1b017 100755 --- a/climada/util/test/test__init__.py +++ b/climada/util/test/test__init__.py @@ -18,28 +18,31 @@ Test config module. """ -import unittest + import logging +import unittest from climada.util import log_level + class TestUtilInit(unittest.TestCase): """Test util __init__ methods""" def test_log_level_pass(self): """Test log level context manager passes""" - #Check loggers are set to level - with self.assertLogs('climada', level='INFO') as cm: - with log_level('WARNING'): - logging.getLogger('climada').info('info') - logging.getLogger('climada').error('error') - self.assertEqual(cm.output, ['ERROR:climada:error']) - #Check if only climada loggers level change - with self.assertLogs('matplotlib', level='DEBUG') as cm: - with log_level('ERROR', name_prefix='climada'): - logging.getLogger('climada').info('info') - logging.getLogger('matplotlib').debug('debug') - self.assertEqual(cm.output, ['DEBUG:matplotlib:debug']) + # Check loggers are set to level + with self.assertLogs("climada", level="INFO") as cm: + with log_level("WARNING"): + logging.getLogger("climada").info("info") + logging.getLogger("climada").error("error") + self.assertEqual(cm.output, ["ERROR:climada:error"]) + # Check if only climada loggers level change + with self.assertLogs("matplotlib", level="DEBUG") as cm: + with log_level("ERROR", name_prefix="climada"): + logging.getLogger("climada").info("info") + logging.getLogger("matplotlib").debug("debug") + self.assertEqual(cm.output, ["DEBUG:matplotlib:debug"]) + # Execute Tests if __name__ == "__main__": diff --git a/climada/util/test/test_checker.py b/climada/util/test/test_checker.py index 391191a7e..493f394e4 100644 --- a/climada/util/test/test_checker.py +++ b/climada/util/test/test_checker.py @@ -20,15 +20,17 @@ """ import unittest + import numpy as np import scipy.sparse as sparse import climada.util.checker as u_check + class DummyClass(object): - vars_oblig = {'id', 'array', 'sparse_arr'} - vars_opt = {'list', 'array_opt'} + vars_oblig = {"id", "array", "sparse_arr"} + vars_opt = {"list", "array_opt"} def __init__(self): self.id = np.arange(25) @@ -36,7 +38,8 @@ def __init__(self): self.array_opt = np.arange(25) self.list = np.arange(25).tolist() self.sparse_arr = sparse.csr_matrix(np.zeros((25, 2))) - self.name = 'name class' + self.name = "name class" + class TestChecks(unittest.TestCase): """Test loading funcions from the Hazard class""" @@ -44,48 +47,76 @@ class TestChecks(unittest.TestCase): def test_check_obligatories_pass(self): """Correct DummyClass definition""" dummy = DummyClass() - u_check.check_obligatories(dummy.__dict__, dummy.vars_oblig, "DummyClass.", - dummy.id.size, dummy.id.size, 2) + u_check.check_obligatories( + dummy.__dict__, + dummy.vars_oblig, + "DummyClass.", + dummy.id.size, + dummy.id.size, + 2, + ) def test_check_obligatories_fail(self): """Wrong DummyClass definition""" dummy = DummyClass() dummy.array = np.arange(3) with self.assertRaises(ValueError) as cm: - u_check.check_obligatories(dummy.__dict__, dummy.vars_oblig, "DummyClass.", - dummy.id.size, dummy.id.size, 2) - self.assertIn('Invalid DummyClass.array size: 25 != 3.', str(cm.exception)) + u_check.check_obligatories( + dummy.__dict__, + dummy.vars_oblig, + "DummyClass.", + dummy.id.size, + dummy.id.size, + 2, + ) + self.assertIn("Invalid DummyClass.array size: 25 != 3.", str(cm.exception)) dummy = DummyClass() dummy.sparse_arr = sparse.csr_matrix(np.zeros((25, 1))) with self.assertRaises(ValueError) as cm: - u_check.check_obligatories(dummy.__dict__, dummy.vars_oblig, "DummyClass.", - dummy.id.size, dummy.id.size, 2) - self.assertIn('Invalid DummyClass.sparse_arr column size: 2 != 1.', str(cm.exception)) + u_check.check_obligatories( + dummy.__dict__, + dummy.vars_oblig, + "DummyClass.", + dummy.id.size, + dummy.id.size, + 2, + ) + self.assertIn( + "Invalid DummyClass.sparse_arr column size: 2 != 1.", str(cm.exception) + ) def test_check_optionals_pass(self): """Correct DummyClass definition""" dummy = DummyClass() - u_check.check_optionals(dummy.__dict__, dummy.vars_opt, "DummyClass.", dummy.id.size) + u_check.check_optionals( + dummy.__dict__, dummy.vars_opt, "DummyClass.", dummy.id.size + ) def test_check_optionals_fail(self): """Correct DummyClass definition""" dummy = DummyClass() dummy.array_opt = np.arange(3) with self.assertRaises(ValueError) as cm: - u_check.check_optionals(dummy.__dict__, dummy.vars_opt, "DummyClass.", dummy.id.size) - self.assertIn('Invalid DummyClass.array_opt size: 25 != 3.', str(cm.exception)) + u_check.check_optionals( + dummy.__dict__, dummy.vars_opt, "DummyClass.", dummy.id.size + ) + self.assertIn("Invalid DummyClass.array_opt size: 25 != 3.", str(cm.exception)) dummy.array_opt = np.array([], int) - with self.assertLogs('climada.util.checker', level='DEBUG') as cm: - u_check.check_optionals(dummy.__dict__, dummy.vars_opt, "DummyClass.", dummy.id.size) - self.assertIn('DummyClass.array_opt not set.', cm.output[0]) + with self.assertLogs("climada.util.checker", level="DEBUG") as cm: + u_check.check_optionals( + dummy.__dict__, dummy.vars_opt, "DummyClass.", dummy.id.size + ) + self.assertIn("DummyClass.array_opt not set.", cm.output[0]) dummy = DummyClass() dummy.list = np.arange(3).tolist() with self.assertRaises(ValueError) as cm: - u_check.check_optionals(dummy.__dict__, dummy.vars_opt, "DummyClass.", dummy.id.size) - self.assertIn('Invalid DummyClass.list size: 25 != 3.', str(cm.exception)) + u_check.check_optionals( + dummy.__dict__, dummy.vars_opt, "DummyClass.", dummy.id.size + ) + self.assertIn("Invalid DummyClass.list size: 25 != 3.", str(cm.exception)) def test_prune_csr_matrix(self): """Check that csr matrices are brought into canonical format""" @@ -109,6 +140,7 @@ def test_prune_csr_matrix(self): np.testing.assert_array_equal(matrix.data, [3]) self.assertEqual(matrix.nnz, 1) + # Execute Tests if __name__ == "__main__": TESTS = unittest.TestLoader().loadTestsFromTestCase(TestChecks) diff --git a/climada/util/test/test_config.py b/climada/util/test/test_config.py index 58bdce0e6..6579927f2 100644 --- a/climada/util/test/test_config.py +++ b/climada/util/test/test_config.py @@ -18,9 +18,11 @@ Test config module. """ + import unittest -from climada.util.config import Config, CONFIG +from climada.util.config import CONFIG, Config + class TestConfig(unittest.TestCase): """Test Config methods""" @@ -33,11 +35,12 @@ def tearDown(self): def test_from_dict(self): """Check the creation and use of a Config object.""" - dct = {'a': 4., - 'b': [0, 1., '2', {'c': 'c'}, [[11, 12], [21, 22]]]} + dct = {"a": 4.0, "b": [0, 1.0, "2", {"c": "c"}, [[11, 12], [21, 22]]]} conf = Config.from_dict(dct) self.assertEqual(conf.a.float(), 4.0) - self.assertEqual(str(conf), '{a: 4.0, b: [0, 1.0, 2, {c: c}, [[11, 12], [21, 22]]]}') + self.assertEqual( + str(conf), "{a: 4.0, b: [0, 1.0, 2, {c: c}, [[11, 12], [21, 22]]]}" + ) try: conf.a.int() self.fail("this should have failed with `, not int`") @@ -45,9 +48,9 @@ def test_from_dict(self): pass self.assertEqual(conf.b.get(0).int(), 0) self.assertEqual(conf.b.int(0), 0) - self.assertEqual(conf.b.float(1), 1.) - self.assertEqual(conf.b.str(2), '2') - self.assertEqual(conf.b.get(3).c.str(), 'c') + self.assertEqual(conf.b.float(1), 1.0) + self.assertEqual(conf.b.str(2), "2") + self.assertEqual(conf.b.get(3).c.str(), "c") self.assertEqual(conf.b.get(4, 1, 0).int(), 21) self.assertEqual(conf.b.get(4, 1).int(1), 22) self.assertEqual(conf.b.get(4).list(0)[1].int(), 12) @@ -56,20 +59,28 @@ def test_from_dict(self): def test_substitute(self): global CONFIG """Check the substitution of references.""" - dct = {'a': 'https://{b.c}/{b.d}.{b.e}', 'b': {'c': 'host', 'd': 'page', 'e': 'domain'}} + dct = { + "a": "https://{b.c}/{b.d}.{b.e}", + "b": {"c": "host", "d": "page", "e": "domain"}, + } conf = Config.from_dict(dct) self.assertEqual(conf.a._root, conf._root) - self.assertEqual(conf.a.str(), 'https://host/page.domain') + self.assertEqual(conf.a.str(), "https://host/page.domain") def test_missing(self): with self.assertRaises(AttributeError) as ve: CONFIG.hazard.fire_fly.population.str() - self.assertIn("there is no 'fire_fly' configured for 'hazard'", str(ve.exception)) + self.assertIn( + "there is no 'fire_fly' configured for 'hazard'", str(ve.exception) + ) self.assertIn("check your config files: [", str(ve.exception)) with self.assertRaises(AttributeError) as ve: CONFIG.some_module.str() - self.assertIn("there is no 'some_module' configured for 'climada.CONFIG'", str(ve.exception)) + self.assertIn( + "there is no 'some_module' configured for 'climada.CONFIG'", + str(ve.exception), + ) # Execute Tests diff --git a/climada/util/test/test_coordinates.py b/climada/util/test/test_coordinates.py index aea4d5e6b..50d5a8073 100644 --- a/climada/util/test/test_coordinates.py +++ b/climada/util/test/test_coordinates.py @@ -22,80 +22,84 @@ import unittest from pathlib import Path -from cartopy.io import shapereader -import pandas as pd import geopandas as gpd import numpy as np -from pyproj.crs import CRS as PCRS +import pandas as pd +import rasterio.transform import shapely -from shapely.geometry import box -from rasterio.windows import Window -from rasterio.warp import Resampling +from cartopy.io import shapereader +from pyproj.crs import CRS as PCRS from rasterio import Affine from rasterio.crs import CRS as RCRS -import rasterio.transform +from rasterio.warp import Resampling +from rasterio.windows import Window +from shapely.geometry import box +import climada.util.coordinates as u_coord from climada import CONFIG -from climada.util.constants import HAZ_DEMO_FL, DEF_CRS, ONE_LAT_KM, DEMO_DIR from climada.hazard.base import Centroids -import climada.util.coordinates as u_coord +from climada.util.constants import DEF_CRS, DEMO_DIR, HAZ_DEMO_FL, ONE_LAT_KM DATA_DIR = CONFIG.util.test_data.dir() + + def def_input_values(): """Default input coordinates and centroids values""" # Load exposures coordinates from demo entity file - exposures = np.array([ - [26.933899, -80.128799], - [26.957203, -80.098284], - [26.783846, -80.748947], - [26.645524, -80.550704], - [26.897796, -80.596929], - [26.925359, -80.220966], - [26.914768, -80.07466], - [26.853491, -80.190281], - [26.845099, -80.083904], - [26.82651, -80.213493], - [26.842772, -80.0591], - [26.825905, -80.630096], - [26.80465, -80.075301], - [26.788649, -80.069885], - [26.704277, -80.656841], - [26.71005, -80.190085], - [26.755412, -80.08955], - [26.678449, -80.041179], - [26.725649, -80.1324], - [26.720599, -80.091746], - [26.71255, -80.068579], - [26.6649, -80.090698], - [26.664699, -80.1254], - [26.663149, -80.151401], - [26.66875, -80.058749], - [26.638517, -80.283371], - [26.59309, -80.206901], - [26.617449, -80.090649], - [26.620079, -80.055001], - [26.596795, -80.128711], - [26.577049, -80.076435], - [26.524585, -80.080105], - [26.524158, -80.06398], - [26.523737, -80.178973], - [26.520284, -80.110519], - [26.547349, -80.057701], - [26.463399, -80.064251], - [26.45905, -80.07875], - [26.45558, -80.139247], - [26.453699, -80.104316], - [26.449999, -80.188545], - [26.397299, -80.21902], - [26.4084, -80.092391], - [26.40875, -80.1575], - [26.379113, -80.102028], - [26.3809, -80.16885], - [26.349068, -80.116401], - [26.346349, -80.08385], - [26.348015, -80.241305], - [26.347957, -80.158855] - ]) + exposures = np.array( + [ + [26.933899, -80.128799], + [26.957203, -80.098284], + [26.783846, -80.748947], + [26.645524, -80.550704], + [26.897796, -80.596929], + [26.925359, -80.220966], + [26.914768, -80.07466], + [26.853491, -80.190281], + [26.845099, -80.083904], + [26.82651, -80.213493], + [26.842772, -80.0591], + [26.825905, -80.630096], + [26.80465, -80.075301], + [26.788649, -80.069885], + [26.704277, -80.656841], + [26.71005, -80.190085], + [26.755412, -80.08955], + [26.678449, -80.041179], + [26.725649, -80.1324], + [26.720599, -80.091746], + [26.71255, -80.068579], + [26.6649, -80.090698], + [26.664699, -80.1254], + [26.663149, -80.151401], + [26.66875, -80.058749], + [26.638517, -80.283371], + [26.59309, -80.206901], + [26.617449, -80.090649], + [26.620079, -80.055001], + [26.596795, -80.128711], + [26.577049, -80.076435], + [26.524585, -80.080105], + [26.524158, -80.06398], + [26.523737, -80.178973], + [26.520284, -80.110519], + [26.547349, -80.057701], + [26.463399, -80.064251], + [26.45905, -80.07875], + [26.45558, -80.139247], + [26.453699, -80.104316], + [26.449999, -80.188545], + [26.397299, -80.21902], + [26.4084, -80.092391], + [26.40875, -80.1575], + [26.379113, -80.102028], + [26.3809, -80.16885], + [26.349068, -80.116401], + [26.346349, -80.08385], + [26.348015, -80.241305], + [26.347957, -80.158855], + ] + ) # Define centroids centroids = np.zeros((100, 2)) @@ -109,20 +113,122 @@ def def_input_values(): return exposures, centroids + def def_ref(): """Default output reference""" - return np.array([46, 46, 36, 36, 36, 46, 46, 46, 46, 46, 46, - 36, 46, 46, 36, 46, 46, 46, 46, 46, 46, 46, - 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, - 46, 46, 46, 45, 45, 45, 45, 45, 45, 45, 45, - 45, 45, 45, 45, 45, 45]) + return np.array( + [ + 46, + 46, + 36, + 36, + 36, + 46, + 46, + 46, + 46, + 46, + 46, + 36, + 46, + 46, + 36, + 46, + 46, + 46, + 46, + 46, + 46, + 46, + 46, + 46, + 46, + 46, + 46, + 46, + 46, + 46, + 46, + 46, + 46, + 46, + 46, + 46, + 45, + 45, + 45, + 45, + 45, + 45, + 45, + 45, + 45, + 45, + 45, + 45, + 45, + 45, + ] + ) + def def_ref_50(): """Default output reference for maximum distance threshold 50km""" - return np.array([46, 46, 36, -1, 36, 46, 46, 46, 46, 46, 46, 36, 46, 46, - 36, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, - 46, 46, 46, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 45, - 45, 45, 45, 45, 45, 45, 45, 45]) + return np.array( + [ + 46, + 46, + 36, + -1, + 36, + 46, + 46, + 46, + 46, + 46, + 46, + 36, + 46, + 46, + 36, + 46, + 46, + 46, + 46, + 46, + 46, + 46, + 46, + 46, + 46, + 46, + 46, + 46, + 46, + 46, + 46, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + 45, + 45, + 45, + 45, + 45, + 45, + 45, + 45, + 45, + ] + ) + class TestDistance(unittest.TestCase): """Test distance functions.""" @@ -136,89 +242,106 @@ def test_dist_sqr_approx_pass(self): lons2 = 56 self.assertAlmostEqual( 7709.827814738594, - np.sqrt(u_coord._dist_sqr_approx(lats1, lons1, cos_lats1, lats2, lons2)) * ONE_LAT_KM) + np.sqrt(u_coord._dist_sqr_approx(lats1, lons1, cos_lats1, lats2, lons2)) + * ONE_LAT_KM, + ) def test_geodesic_length_geog(self): """Test compute_geodesic_lengths for geographic input crs""" - LINE_PATH = DEMO_DIR.joinpath('nl_rails.gpkg') - gdf_rails = gpd.read_file(LINE_PATH).to_crs('epsg:4326') + LINE_PATH = DEMO_DIR.joinpath("nl_rails.gpkg") + gdf_rails = gpd.read_file(LINE_PATH).to_crs("epsg:4326") lengths_geom = u_coord.compute_geodesic_lengths(gdf_rails) self.assertEqual(len(lengths_geom), len(gdf_rails)) self.assertTrue( np.all( - (abs(lengths_geom - gdf_rails['distance'])/lengths_geom < 0.1) | - (lengths_geom - gdf_rails['distance'] < 10) - ) + (abs(lengths_geom - gdf_rails["distance"]) / lengths_geom < 0.1) + | (lengths_geom - gdf_rails["distance"] < 10) ) + ) def test_geodesic_length_proj(self): """Test compute_geodesic_lengths for projected input crs""" - LINE_PATH = DEMO_DIR.joinpath('nl_rails.gpkg') - gdf_rails = gpd.read_file(LINE_PATH).to_crs('epsg:4326') - gdf_rails_proj = gpd.read_file(LINE_PATH).to_crs('epsg:4326').to_crs('EPSG:28992') + LINE_PATH = DEMO_DIR.joinpath("nl_rails.gpkg") + gdf_rails = gpd.read_file(LINE_PATH).to_crs("epsg:4326") + gdf_rails_proj = ( + gpd.read_file(LINE_PATH).to_crs("epsg:4326").to_crs("EPSG:28992") + ) lengths_geom = u_coord.compute_geodesic_lengths(gdf_rails) lengths_proj = u_coord.compute_geodesic_lengths(gdf_rails_proj) - for len_proj, len_geom in zip(lengths_proj,lengths_geom): + for len_proj, len_geom in zip(lengths_proj, lengths_geom): self.assertAlmostEqual(len_proj, len_geom, 1) self.assertTrue( np.all( - (abs(lengths_proj - gdf_rails_proj['distance'])/lengths_proj < 0.1) | - (lengths_proj - gdf_rails_proj['distance'] < 10) - ) + (abs(lengths_proj - gdf_rails_proj["distance"]) / lengths_proj < 0.1) + | (lengths_proj - gdf_rails_proj["distance"] < 10) ) + ) + def data_arrays_resampling_demo(): """init demo data arrays (2d) and meta data for resampling""" data_arrays = [ # demo pop: - np.array([[0, 1, 2], [3, 4, 5]], dtype='float32'), - np.array([[0, 1, 2], [3, 4, 5]], dtype='float32'), + np.array([[0, 1, 2], [3, 4, 5]], dtype="float32"), + np.array([[0, 1, 2], [3, 4, 5]], dtype="float32"), # demo nightlight: - np.array([[2, 10, 0, 0, 0, 0], - [10, 2, 10, 0, 0, 0], - [0, 0, 0, 0, 1, 1], - [1, 0, 0, 0, 1, 1]], dtype='float32'), + np.array( + [ + [2, 10, 0, 0, 0, 0], + [10, 2, 10, 0, 0, 0], + [0, 0, 0, 0, 1, 1], + [1, 0, 0, 0, 1, 1], + ], + dtype="float32", + ), ] - meta_list = [{'driver': 'GTiff', - 'dtype': 'float32', - 'nodata': -3.4028230607370965e+38, - 'width': 3, - 'height': 2, - 'count': 1, - 'crs': RCRS.from_epsg(4326), - 'transform': Affine(1, 0.0, -10, 0.0, -1, 40), - }, - {'driver': 'GTiff', - 'dtype': 'float32', - 'nodata': -3.4028230607370965e+38, - 'width': 3, - 'height': 2, - 'count': 1, - 'crs': RCRS.from_epsg(4326), - # shifted by 1 degree latitude to the north: - 'transform': Affine(1, 0.0, -10, 0.0, -1, 41), - }, - {'driver': 'GTiff', - 'dtype': 'float32', - 'nodata': None, - 'width': 6, - 'height': 4, - 'count': 1, - 'crs': RCRS.from_epsg(4326), - # higher resolution: - 'transform': Affine(.5, 0.0, -10, 0.0, -.5, 40), - }] + meta_list = [ + { + "driver": "GTiff", + "dtype": "float32", + "nodata": -3.4028230607370965e38, + "width": 3, + "height": 2, + "count": 1, + "crs": RCRS.from_epsg(4326), + "transform": Affine(1, 0.0, -10, 0.0, -1, 40), + }, + { + "driver": "GTiff", + "dtype": "float32", + "nodata": -3.4028230607370965e38, + "width": 3, + "height": 2, + "count": 1, + "crs": RCRS.from_epsg(4326), + # shifted by 1 degree latitude to the north: + "transform": Affine(1, 0.0, -10, 0.0, -1, 41), + }, + { + "driver": "GTiff", + "dtype": "float32", + "nodata": None, + "width": 6, + "height": 4, + "count": 1, + "crs": RCRS.from_epsg(4326), + # higher resolution: + "transform": Affine(0.5, 0.0, -10, 0.0, -0.5, 40), + }, + ] return data_arrays, meta_list + class TestFunc(unittest.TestCase): """Test auxiliary functions""" + def test_lon_normalize(self): """Test the longitude normalization function""" data = np.array([-180, 20.1, -30, 190, -350]) @@ -287,14 +410,16 @@ def test_geosph_vector(self): def test_dist_approx_pass(self): """Test approximate distance functions""" - data = np.array([ - # lat1, lon1, lat2, lon2, dist_equirect, dist_geosphere - [45.5, -32.1, 14, 56, 7702.88906574, 8750.64119051], - [45.5, 147.8, 14, -124, 7709.82781473, 8758.34146833], - [45.5, 507.9, 14, -124, 7702.88906574, 8750.64119051], - [45.5, -212.2, 14, -124, 7709.82781473, 8758.34146833], - [-3, -130.1, 4, -30.5, 11079.7217421, 11087.0352544], - ]) + data = np.array( + [ + # lat1, lon1, lat2, lon2, dist_equirect, dist_geosphere + [45.5, -32.1, 14, 56, 7702.88906574, 8750.64119051], + [45.5, 147.8, 14, -124, 7709.82781473, 8758.34146833], + [45.5, 507.9, 14, -124, 7702.88906574, 8750.64119051], + [45.5, -212.2, 14, -124, 7709.82781473, 8758.34146833], + [-3, -130.1, 4, -30.5, 11079.7217421, 11087.0352544], + ] + ) # conversion factors from reference data (in km, see above) to other units factors_km_to_x = { "m": 1e3, @@ -302,28 +427,52 @@ def test_dist_approx_pass(self): "degree": 1.0 / u_coord.ONE_LAT_KM, "km": 1.0, } - compute_dist = np.stack([ - u_coord.dist_approx(data[:, None, 0], data[:, None, 1], - data[:, None, 2], data[:, None, 3], - method="equirect")[:, 0, 0], - u_coord.dist_approx(data[:, None, 0], data[:, None, 1], - data[:, None, 2], data[:, None, 3], - method="geosphere")[:, 0, 0], - ], axis=-1) + compute_dist = np.stack( + [ + u_coord.dist_approx( + data[:, None, 0], + data[:, None, 1], + data[:, None, 2], + data[:, None, 3], + method="equirect", + )[:, 0, 0], + u_coord.dist_approx( + data[:, None, 0], + data[:, None, 1], + data[:, None, 2], + data[:, None, 3], + method="geosphere", + )[:, 0, 0], + ], + axis=-1, + ) self.assertEqual(compute_dist.shape[0], data.shape[0]) for d, cd in zip(data[:, 4:], compute_dist): self.assertAlmostEqual(d[0], cd[0]) self.assertAlmostEqual(d[1], cd[1]) for units, factor in factors_km_to_x.items(): - compute_dist = np.stack([ - u_coord.dist_approx(data[:, None, 0], data[:, None, 1], - data[:, None, 2], data[:, None, 3], - method="equirect", units=units)[:, 0, 0], - u_coord.dist_approx(data[:, None, 0], data[:, None, 1], - data[:, None, 2], data[:, None, 3], - method="geosphere", units=units)[:, 0, 0], - ], axis=-1) + compute_dist = np.stack( + [ + u_coord.dist_approx( + data[:, None, 0], + data[:, None, 1], + data[:, None, 2], + data[:, None, 3], + method="equirect", + units=units, + )[:, 0, 0], + u_coord.dist_approx( + data[:, None, 0], + data[:, None, 1], + data[:, None, 2], + data[:, None, 3], + method="geosphere", + units=units, + )[:, 0, 0], + ], + axis=-1, + ) self.assertEqual(compute_dist.shape[0], data.shape[0]) places = 4 if units == "m" else 7 for d, cd in zip(data[:, 4:], compute_dist): @@ -332,13 +481,15 @@ def test_dist_approx_pass(self): def test_dist_approx_log_pass(self): """Test log-functionality of approximate distance functions""" - data = np.array([ - # lat1, lon1, lat2, lon2, dist_equirect, dist_geosphere - [0, 0, 0, 1, 111.12, 111.12], - [-13, 179, 5, -179, 2011.84774049, 2012.30698122], - [24., 85., 23.99999967, 85., 3.666960e-5, 3.666960e-5], - [24., 85., 24., 85., 0, 0], - ]) + data = np.array( + [ + # lat1, lon1, lat2, lon2, dist_equirect, dist_geosphere + [0, 0, 0, 1, 111.12, 111.12], + [-13, 179, 5, -179, 2011.84774049, 2012.30698122], + [24.0, 85.0, 23.99999967, 85.0, 3.666960e-5, 3.666960e-5], + [24.0, 85.0, 24.0, 85.0, 0, 0], + ] + ) # conversion factors from reference data (in km, see above) to other units factors_km_to_x = { "m": 1e3, @@ -348,9 +499,15 @@ def test_dist_approx_log_pass(self): } for i, method in enumerate(["equirect", "geosphere"]): for units, factor in factors_km_to_x.items(): - dist, vec = u_coord.dist_approx(data[:, None, 0], data[:, None, 1], - data[:, None, 2], data[:, None, 3], - log=True, method=method, units=units) + dist, vec = u_coord.dist_approx( + data[:, None, 0], + data[:, None, 1], + data[:, None, 2], + data[:, None, 3], + log=True, + method=method, + units=units, + ) dist, vec = dist[:, 0, 0], vec[:, 0, 0] np.testing.assert_allclose(np.linalg.norm(vec, axis=-1), dist) np.testing.assert_allclose(dist, data[:, 4 + i] * factor) @@ -369,20 +526,22 @@ def test_dist_approx_batch_pass(self): # The distance of each of 4 points (lat1, lon1) to each of 3 points (lat2, lon2) is # computed for each of 2 batches (first dimension) of data. - test_data = np.array([ - [ - [7702.88906574, 7967.66578334, 4613.1634431], - [19389.5254652, 2006.65638992, 11079.7217421], - [7960.66983129, 7709.82781473, 14632.55958021], - [7967.66578334, 7702.88906574, 14639.95139706], - ], + test_data = np.array( [ - [14632.55958021, 7709.82781473, 7960.66983129], - [14639.95139706, 7702.88906574, 7967.66578334], - [4613.1634431, 7967.66578334, 7702.88906574], - [11079.7217421, 2006.65638992, 19389.5254652], - ], - ]) + [ + [7702.88906574, 7967.66578334, 4613.1634431], + [19389.5254652, 2006.65638992, 11079.7217421], + [7960.66983129, 7709.82781473, 14632.55958021], + [7967.66578334, 7702.88906574, 14639.95139706], + ], + [ + [14632.55958021, 7709.82781473, 7960.66983129], + [14639.95139706, 7702.88906574, 7967.66578334], + [4613.1634431, 7967.66578334, 7702.88906574], + [11079.7217421, 2006.65638992, 19389.5254652], + ], + ] + ) dist = u_coord.dist_approx(lat1, lon1, lat2, lon2) np.testing.assert_array_almost_equal(dist, test_data) @@ -402,15 +561,18 @@ def test_get_gridcellarea(self): self.assertAlmostEqual(area[1], 180352.82386516) self.assertEqual(lat.shape, area.shape) - area2 = u_coord.get_gridcellarea(lat, resolution, unit='km2') + area2 = u_coord.get_gridcellarea(lat, resolution, unit="km2") self.assertAlmostEqual(area2[0], 1781.5973363005) self.assertTrue(area2[0] <= 2500) def test_read_vector_pass(self): """Test one columns data""" - shp_file = shapereader.natural_earth(resolution='110m', category='cultural', - name='populated_places_simple') - lat, lon, geometry, intensity = u_coord.read_vector(shp_file, ['pop_min', 'pop_max']) + shp_file = shapereader.natural_earth( + resolution="110m", category="cultural", name="populated_places_simple" + ) + lat, lon, geometry, intensity = u_coord.read_vector( + shp_file, ["pop_min", "pop_max"] + ) self.assertTrue(u_coord.equal_crs(geometry.crs, u_coord.NE_EPSG)) self.assertEqual(geometry.size, lat.size) @@ -429,8 +591,8 @@ def test_read_vector_pass(self): def test_compare_crs(self): """Compare two crs""" - crs_one = 'epsg:4326' - crs_two = {'init': 'epsg:4326', 'no_defs': True} + crs_one = "epsg:4326" + crs_two = {"init": "epsg:4326", "no_defs": True} self.assertTrue(u_coord.equal_crs(crs_one, crs_two)) def test_set_df_geometry_points_pass(self): @@ -440,12 +602,12 @@ def test_set_df_geometry_points_pass(self): climada.test.test_multi_processing.TestCoordinates.test_set_df_geometry_points_scheduled_pass """ df_val = gpd.GeoDataFrame() - df_val['latitude'] = np.ones(10) * 40.0 - df_val['longitude'] = np.ones(10) * 0.50 + df_val["latitude"] = np.ones(10) * 40.0 + df_val["longitude"] = np.ones(10) * 0.50 - u_coord.set_df_geometry_points(df_val, crs='epsg:2202') + u_coord.set_df_geometry_points(df_val, crs="epsg:2202") np.testing.assert_allclose(df_val.geometry.x.values, np.ones(10) * 0.5) - np.testing.assert_allclose(df_val.geometry.y.values, np.ones(10) * 40.) + np.testing.assert_allclose(df_val.geometry.y.values, np.ones(10) * 40.0) def test_convert_wgs_to_utm_pass(self): """Test convert_wgs_to_utm""" @@ -466,9 +628,14 @@ def test_to_crs_user_input(self): self.assertEqual(rcrs, RCRS.from_user_input(u_coord.to_crs_user_input(DEF_CRS))) # can they be understood from the provider? - for arg in ['epsg:4326', b'epsg:4326', DEF_CRS, 4326, - {'init': 'epsg:4326', 'no_defs': True}, - b'{"init": "epsg:4326", "no_defs": True}']: + for arg in [ + "epsg:4326", + b"epsg:4326", + DEF_CRS, + 4326, + {"init": "epsg:4326", "no_defs": True}, + b'{"init": "epsg:4326", "no_defs": True}', + ]: self.assertEqual(pcrs, PCRS.from_user_input(u_coord.to_crs_user_input(arg))) self.assertEqual(rcrs, RCRS.from_user_input(u_coord.to_crs_user_input(arg))) @@ -477,27 +644,42 @@ def test_to_crs_user_input(self): with self.assertRaises(ValueError): u_coord.to_crs_user_input(arg) with self.assertRaises(SyntaxError): - u_coord.to_crs_user_input('{init: epsg:4326, no_defs: True}') + u_coord.to_crs_user_input("{init: epsg:4326, no_defs: True}") def test_country_to_iso(self): name_list = [ - '', 'United States', 'Argentina', 'Japan', 'Australia', 'Norway', 'Madagascar'] - al2_list = ['', 'US', 'AR', 'JP', 'AU', 'NO', 'MG'] - al3_list = ['', 'USA', 'ARG', 'JPN', 'AUS', 'NOR', 'MDG'] + "", + "United States", + "Argentina", + "Japan", + "Australia", + "Norway", + "Madagascar", + ] + al2_list = ["", "US", "AR", "JP", "AU", "NO", "MG"] + al3_list = ["", "USA", "ARG", "JPN", "AUS", "NOR", "MDG"] num_list = [0, 840, 32, 392, 36, 578, 450] natid_list = [0, 217, 9, 104, 13, 154, 128] # examples from docstring: self.assertEqual(u_coord.country_to_iso(840), "USA") - self.assertEqual(u_coord.country_to_iso("United States", representation="alpha2"), "US") - self.assertEqual(u_coord.country_to_iso(["United States of America", "SU"], "numeric"), - [840, 810]) - self.assertEqual(u_coord.country_to_iso(["XK", "Dhekelia"], "numeric"), [983, 907]) + self.assertEqual( + u_coord.country_to_iso("United States", representation="alpha2"), "US" + ) + self.assertEqual( + u_coord.country_to_iso(["United States of America", "SU"], "numeric"), + [840, 810], + ) + self.assertEqual( + u_coord.country_to_iso(["XK", "Dhekelia"], "numeric"), [983, 907] + ) # test cases: iso_lists = [name_list, al2_list, al3_list, num_list] for l1 in iso_lists: - for l2, representation in zip(iso_lists, ["name", "alpha2", "alpha3", "numeric"]): + for l2, representation in zip( + iso_lists, ["name", "alpha2", "alpha3", "numeric"] + ): self.assertEqual(u_coord.country_to_iso(l1, representation), l2) # deprecated API `country_iso_alpha2numeric` @@ -544,61 +726,120 @@ def test_match_grid_points(self): def test_match_centroids(self): """Test match_centroids function.""" - #Test 1: Raster data + # Test 1: Raster data meta = { - 'count': 1, 'crs': DEF_CRS, - 'width': 20, 'height': 10, - 'transform': rasterio.Affine(1.5, 0.0, -20, 0.0, -1.4, 8) + "count": 1, + "crs": DEF_CRS, + "width": 20, + "height": 10, + "transform": rasterio.Affine(1.5, 0.0, -20, 0.0, -1.4, 8), } centroids = Centroids.from_meta(meta=meta) - df = pd.DataFrame({ - 'longitude': np.array([ - -20.1, -20.0, -19.8, -19.0, -18.6, -18.4, - -19.0, -19.0, -19.0, -19.0, - -20.1, 0.0, 10.1, 10.1, 10.1, 0.0, -20.2, -20.3, - -6.4, 9.8, 0.0, - ]), - 'latitude': np.array([ - 7.3, 7.3, 7.3, 7.3, 7.3, 7.3, - 8.1, 7.9, 6.7, 6.5, - 8.1, 8.2, 8.3, 0.0, -6.1, -6.2, -6.3, 0.0, - -1.9, -1.7, 0.0, - ]), - }) + df = pd.DataFrame( + { + "longitude": np.array( + [ + -20.1, + -20.0, + -19.8, + -19.0, + -18.6, + -18.4, + -19.0, + -19.0, + -19.0, + -19.0, + -20.1, + 0.0, + 10.1, + 10.1, + 10.1, + 0.0, + -20.2, + -20.3, + -6.4, + 9.8, + 0.0, + ] + ), + "latitude": np.array( + [ + 7.3, + 7.3, + 7.3, + 7.3, + 7.3, + 7.3, + 8.1, + 7.9, + 6.7, + 6.5, + 8.1, + 8.2, + 8.3, + 0.0, + -6.1, + -6.2, + -6.3, + 0.0, + -1.9, + -1.7, + 0.0, + ] + ), + } + ) gdf = gpd.GeoDataFrame( df, - geometry=gpd.points_from_xy(df['longitude'], df['latitude']), + geometry=gpd.points_from_xy(df["longitude"], df["latitude"]), crs=DEF_CRS, ) assigned = u_coord.match_centroids(gdf, centroids) expected_result = [ # constant y-value, varying x-value - 0, 0, 0, 0, 0, 1, + 0, + 0, + 0, + 0, + 0, + 1, # constant x-value, varying y-value - 0, 0, 0, 20, + 0, + 0, + 0, + 20, # out of bounds: topleft, top, topright, right, bottomright, bottom, bottomleft, left - -1, -1, -1, -1, -1, -1, -1, -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, # some explicit points within the raster - 149, 139, 113, + 149, + 139, + 113, ] - np.testing.assert_array_equal(assigned,expected_result) + np.testing.assert_array_equal(assigned, expected_result) # Test 2: Vector data (copied from test_match_coordinates) # note that the coordinates are in lat/lon - gdf_coords = np.array([(0.2, 2), (0, 0), (0, 2), (2.1, 3), (1, 1), (-1, 1), (0, 179.9)]) - df = pd.DataFrame({ - 'longitude': gdf_coords[:, 1], - 'latitude': gdf_coords[:, 0] - }) - gdf = gpd.GeoDataFrame(df,geometry=gpd.points_from_xy(df['longitude'], df['latitude']), - crs=DEF_CRS) + gdf_coords = np.array( + [(0.2, 2), (0, 0), (0, 2), (2.1, 3), (1, 1), (-1, 1), (0, 179.9)] + ) + df = pd.DataFrame({"longitude": gdf_coords[:, 1], "latitude": gdf_coords[:, 0]}) + gdf = gpd.GeoDataFrame( + df, + geometry=gpd.points_from_xy(df["longitude"], df["latitude"]), + crs=DEF_CRS, + ) coords_to_assign = np.array([(2.1, 3), (0, 0), (0, 2), (0.9, 1.0), (0, -179.9)]) centroids = Centroids( - lat=coords_to_assign[:, 0], - lon=coords_to_assign[:, 1], - crs=DEF_CRS + lat=coords_to_assign[:, 0], lon=coords_to_assign[:, 1], crs=DEF_CRS ) centroids_empty = Centroids(lat=np.array([]), lon=np.array([])) @@ -612,34 +853,35 @@ def test_match_centroids(self): for distance in ["euclidean", "haversine", "approx"]: for thresh, result in expected_results: assigned = u_coord.match_centroids( - gdf, centroids, distance=distance, threshold=thresh) + gdf, centroids, distance=distance, threshold=thresh + ) np.testing.assert_array_equal(assigned, result) - #test empty centroids + # test empty centroids result = [-1, -1, -1, -1, -1, -1, -1] assigned_idx = u_coord.match_centroids( - gdf, centroids_empty, distance=distance, threshold=thresh) + gdf, centroids_empty, distance=distance, threshold=thresh + ) np.testing.assert_array_equal(assigned_idx, result) # Test 3: non matching crs - df = pd.DataFrame({ - 'longitude': [10, 20, 30], - 'latitude': [50, 60, 70] - }) - gdf = gpd.GeoDataFrame(df,geometry=gpd.points_from_xy(df['longitude'], df['latitude']), - crs = 'EPSG:4326') + df = pd.DataFrame({"longitude": [10, 20, 30], "latitude": [50, 60, 70]}) + gdf = gpd.GeoDataFrame( + df, + geometry=gpd.points_from_xy(df["longitude"], df["latitude"]), + crs="EPSG:4326", + ) coords_to_assign = np.array([(2.1, 3), (0, 0), (0, 2), (0.9, 1.0), (0, -179.9)]) centroids = Centroids( - lat=[1100000,1200000], - lon=[2500000,2600000], - crs='EPSG:2056' + lat=[1100000, 1200000], lon=[2500000, 2600000], crs="EPSG:2056" ) with self.assertRaises(ValueError) as cm: u_coord.match_centroids(gdf, centroids) - self.assertIn('Set hazard and GeoDataFrame to same CRS first!', - str(cm.exception)) + self.assertIn( + "Set hazard and GeoDataFrame to same CRS first!", str(cm.exception) + ) def test_dist_sqr_approx_pass(self): """Test approximate distance helper function.""" @@ -650,70 +892,78 @@ def test_dist_sqr_approx_pass(self): lons2 = 56 self.assertAlmostEqual( 7709.827814738594, - np.sqrt(u_coord._dist_sqr_approx(lats1, lons1, cos_lats1, lats2, lons2)) * ONE_LAT_KM) + np.sqrt(u_coord._dist_sqr_approx(lats1, lons1, cos_lats1, lats2, lons2)) + * ONE_LAT_KM, + ) def test_wrong_distance_fail(self): """Check exception is thrown when wrong distance is given""" with self.assertRaises(ValueError) as cm: - u_coord.match_coordinates(np.ones((10, 2)), np.ones((7, 2)), distance='distance') - self.assertIn('Coordinate assignment with "distance" distance is not supported.', - str(cm.exception)) + u_coord.match_coordinates( + np.ones((10, 2)), np.ones((7, 2)), distance="distance" + ) + self.assertIn( + 'Coordinate assignment with "distance" distance is not supported.', + str(cm.exception), + ) def data_input_values(self): """Default input coordinates and centroids values""" # Load exposures coordinates from demo entity file - exposures = np.array([ - [26.933899, -80.128799], - [26.957203, -80.098284], - [26.783846, -80.748947], - [26.645524, -80.550704], - [26.897796, -80.596929], - [26.925359, -80.220966], - [26.914768, -80.07466], - [26.853491, -80.190281], - [26.845099, -80.083904], - [26.82651, -80.213493], - [26.842772, -80.0591], - [26.825905, -80.630096], - [26.80465, -80.075301], - [26.788649, -80.069885], - [26.704277, -80.656841], - [26.71005, -80.190085], - [26.755412, -80.08955], - [26.678449, -80.041179], - [26.725649, -80.1324], - [26.720599, -80.091746], - [26.71255, -80.068579], - [26.6649, -80.090698], - [26.664699, -80.1254], - [26.663149, -80.151401], - [26.66875, -80.058749], - [26.638517, -80.283371], - [26.59309, -80.206901], - [26.617449, -80.090649], - [26.620079, -80.055001], - [26.596795, -80.128711], - [26.577049, -80.076435], - [26.524585, -80.080105], - [26.524158, -80.06398], - [26.523737, -80.178973], - [26.520284, -80.110519], - [26.547349, -80.057701], - [26.463399, -80.064251], - [26.45905, -80.07875], - [26.45558, -80.139247], - [26.453699, -80.104316], - [26.449999, -80.188545], - [26.397299, -80.21902], - [26.4084, -80.092391], - [26.40875, -80.1575], - [26.379113, -80.102028], - [26.3809, -80.16885], - [26.349068, -80.116401], - [26.346349, -80.08385], - [26.348015, -80.241305], - [26.347957, -80.158855] - ]) + exposures = np.array( + [ + [26.933899, -80.128799], + [26.957203, -80.098284], + [26.783846, -80.748947], + [26.645524, -80.550704], + [26.897796, -80.596929], + [26.925359, -80.220966], + [26.914768, -80.07466], + [26.853491, -80.190281], + [26.845099, -80.083904], + [26.82651, -80.213493], + [26.842772, -80.0591], + [26.825905, -80.630096], + [26.80465, -80.075301], + [26.788649, -80.069885], + [26.704277, -80.656841], + [26.71005, -80.190085], + [26.755412, -80.08955], + [26.678449, -80.041179], + [26.725649, -80.1324], + [26.720599, -80.091746], + [26.71255, -80.068579], + [26.6649, -80.090698], + [26.664699, -80.1254], + [26.663149, -80.151401], + [26.66875, -80.058749], + [26.638517, -80.283371], + [26.59309, -80.206901], + [26.617449, -80.090649], + [26.620079, -80.055001], + [26.596795, -80.128711], + [26.577049, -80.076435], + [26.524585, -80.080105], + [26.524158, -80.06398], + [26.523737, -80.178973], + [26.520284, -80.110519], + [26.547349, -80.057701], + [26.463399, -80.064251], + [26.45905, -80.07875], + [26.45558, -80.139247], + [26.453699, -80.104316], + [26.449999, -80.188545], + [26.397299, -80.21902], + [26.4084, -80.092391], + [26.40875, -80.1575], + [26.379113, -80.102028], + [26.3809, -80.16885], + [26.349068, -80.116401], + [26.346349, -80.08385], + [26.348015, -80.241305], + [26.347957, -80.158855], + ] + ) # Define centroids centroids = np.zeros((100, 2)) @@ -729,29 +979,123 @@ def data_input_values(self): def data_ref(self): """Default output reference""" - return np.array([46, 46, 36, 36, 36, 46, 46, 46, 46, 46, 46, - 36, 46, 46, 36, 46, 46, 46, 46, 46, 46, 46, - 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, - 46, 46, 46, 45, 45, 45, 45, 45, 45, 45, 45, - 45, 45, 45, 45, 45, 45]) + return np.array( + [ + 46, + 46, + 36, + 36, + 36, + 46, + 46, + 46, + 46, + 46, + 46, + 36, + 46, + 46, + 36, + 46, + 46, + 46, + 46, + 46, + 46, + 46, + 46, + 46, + 46, + 46, + 46, + 46, + 46, + 46, + 46, + 46, + 46, + 46, + 46, + 46, + 45, + 45, + 45, + 45, + 45, + 45, + 45, + 45, + 45, + 45, + 45, + 45, + 45, + 45, + ] + ) def data_ref_40(self): """Default output reference for maximum distance threshold 40km""" - return np.array([46, 46, 36, -1, -1, 46, 46, 46, 46, 46, 46, -1, 46, 46, - -1, 46, 46, 46, 46, 46, 46, 46, 46, -1, 46, -1, -1, -1, - -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, -1, -1, -1, -1, 45, -1, -1]) + return np.array( + [ + 46, + 46, + 36, + -1, + -1, + 46, + 46, + 46, + 46, + 46, + 46, + -1, + 46, + 46, + -1, + 46, + 46, + 46, + 46, + 46, + 46, + 46, + 46, + -1, + 46, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + -1, + 45, + -1, + -1, + ] + ) def data_antimeridian_values(self): """Default input coordinates and centroids value crossing antimerdian""" - exposures = np.array([ - [0, -179.99], - [0, 179.99], - [5, -179.09], - [-5, 179.09], - [0, 130], - [0, -130] - ]) + exposures = np.array( + [[0, -179.99], [0, 179.99], [5, -179.09], [-5, 179.09], [0, 130], [0, -130]] + ) # Define centroids centroids = np.zeros((100, 2)) @@ -759,7 +1103,7 @@ def data_antimeridian_values(self): for ilon in range(10): for ilat in range(10): centroids[inext][0] = -5 + ilat - if ilat -5 <= 0: + if ilat - 5 <= 0: centroids[inext][1] = 170 + ilon + 1 else: centroids[inext][1] = -170 - ilon @@ -793,9 +1137,10 @@ def normal_warning(self, dist): # Interpolate with lower threshold to raise warnings threshold = 40 - with self.assertLogs('climada.util.coordinates', level='INFO') as cm: + with self.assertLogs("climada.util.coordinates", level="INFO") as cm: neighbors = u_coord.match_coordinates( - exposures, centroids, distance=dist, threshold=threshold) + exposures, centroids, distance=dist, threshold=threshold + ) self.assertIn("Distance to closest centroid", cm.output[1]) ref_neighbors = self.data_ref_40() @@ -825,101 +1170,97 @@ def antimeridian_warning(self, dist): # Interpolate with lower threshold to raise warnings threshold = 100 - with self.assertLogs('climada.util.coordinates', level='INFO') as cm: + with self.assertLogs("climada.util.coordinates", level="INFO") as cm: neighbors = u_coord.match_coordinates( - exposures, centroids, distance=dist, threshold=threshold) + exposures, centroids, distance=dist, threshold=threshold + ) self.assertIn("Distance to closest centroid", cm.output[1]) np.testing.assert_array_equal(neighbors, self.data_ref_antimeridian()) def test_approx_normal_pass(self): """Call normal_pass test for approxiamte distance""" - self.normal_pass('approx') + self.normal_pass("approx") def test_approx_normal_warning(self): """Call normal_warning test for approxiamte distance""" - self.normal_warning('approx') + self.normal_warning("approx") def test_approx_repeat_coord_pass(self): """Call repeat_coord_pass test for approxiamte distance""" - self.repeat_coord_pass('approx') + self.repeat_coord_pass("approx") def test_approx_antimeridian_warning(self): """Call normal_warning test for approximate distance""" - self.antimeridian_warning('approx') + self.antimeridian_warning("approx") def test_haver_normal_pass(self): """Call normal_pass test for haversine distance""" - self.normal_pass('haversine') + self.normal_pass("haversine") def test_haver_normal_warning(self): """Call normal_warning test for haversine distance""" - self.normal_warning('haversine') + self.normal_warning("haversine") def test_haver_repeat_coord_pass(self): """Call repeat_coord_pass test for haversine distance""" - self.repeat_coord_pass('haversine') + self.repeat_coord_pass("haversine") def test_haver_antimeridian_warning(self): """Call normal_warning test for haversine distance""" - self.antimeridian_warning('haversine') + self.antimeridian_warning("haversine") def test_euc_normal_pass(self): """Call normal_pass test for euclidean distance""" - self.normal_pass('euclidean') + self.normal_pass("euclidean") def test_euc_normal_warning(self): """Call normal_warning test for euclidean distance""" - self.normal_warning('euclidean') + self.normal_warning("euclidean") def test_euc_repeat_coord_pass(self): """Call repeat_coord_pass test for euclidean distance""" - self.repeat_coord_pass('euclidean') + self.repeat_coord_pass("euclidean") def test_euc_antimeridian_warning(self): """Call normal_warning test for euclidean distance""" - self.antimeridian_warning('euclidean') + self.antimeridian_warning("euclidean") def test_diff_outcomes(self): """Different NN interpolation outcomes""" threshold = 100000 # Define centroids - lons = np.arange(-160, 180+1, 20) - lats = np.arange(-60, 60+1, 20) + lons = np.arange(-160, 180 + 1, 20) + lats = np.arange(-60, 60 + 1, 20) lats, lons = [arr.ravel() for arr in np.meshgrid(lats, lons)] centroids = np.transpose([lats, lons]).copy() # `copy()` makes it F-contiguous # Define exposures - exposures = np.array([ - [49.9, 9], - [49.5, 9], - [0, -175] - ]) + exposures = np.array([[49.9, 9], [49.5, 9], [0, -175]]) # Neighbors ref_neighbors = [ [62, 62, 3], [62, 61, 122], [61, 61, 3], - ] + ] - dist_list = ['approx', 'haversine', 'euclidean'] - kwargs_list = [ - {'check_antimeridian':False}, - {}, - {'check_antimeridian':False} - ] + dist_list = ["approx", "haversine", "euclidean"] + kwargs_list = [{"check_antimeridian": False}, {}, {"check_antimeridian": False}] for dist, ref, kwargs in zip(dist_list, ref_neighbors, kwargs_list): neighbors = u_coord.match_coordinates( - exposures, centroids, distance=dist, threshold=threshold, **kwargs) + exposures, centroids, distance=dist, threshold=threshold, **kwargs + ) np.testing.assert_array_equal(neighbors, ref) def test_match_coordinates(self): """Test match_coordinates function""" # note that the coordinates are in lat/lon - coords = np.array([(0.2, 2), (0, 0), (0, 2), (2.1, 3), (1, 1), (-1, 1), (0, 179.9)]) + coords = np.array( + [(0.2, 2), (0, 0), (0, 2), (2.1, 3), (1, 1), (-1, 1), (0, 179.9)] + ) coords_to_assign = np.array([(2.1, 3), (0, 0), (0, 2), (0.9, 1.0), (0, -179.9)]) expected_results = [ # test with different thresholds (in km) @@ -935,30 +1276,36 @@ def test_match_coordinates(self): coords_to_assign_typed = coords_to_assign.astype(test_dtype) for thresh, result in expected_results: assigned_idx = u_coord.match_coordinates( - coords_typed, coords_to_assign_typed, - distance=distance, threshold=thresh) + coords_typed, + coords_to_assign_typed, + distance=distance, + threshold=thresh, + ) np.testing.assert_array_equal(assigned_idx, result) - #test empty coords_to_assign + # test empty coords_to_assign coords_to_assign_empty = np.array([]) result = [-1, -1, -1, -1, -1, -1, -1] assigned_idx = u_coord.match_coordinates( - coords, coords_to_assign_empty, distance=distance, threshold=thresh) + coords, coords_to_assign_empty, distance=distance, threshold=thresh + ) np.testing.assert_array_equal(assigned_idx, result) - #test empty coords + # test empty coords coords_empty = np.array([]) result = np.array([]) assigned_idx = u_coord.match_coordinates( - coords_empty, coords_to_assign, distance=distance, threshold=thresh) + coords_empty, coords_to_assign, distance=distance, threshold=thresh + ) np.testing.assert_array_equal(assigned_idx, result) + class TestGetGeodata(unittest.TestCase): def test_nat_earth_resolution_pass(self): """Correct resolution.""" - self.assertEqual(u_coord.nat_earth_resolution(10), '10m') - self.assertEqual(u_coord.nat_earth_resolution(50), '50m') - self.assertEqual(u_coord.nat_earth_resolution(110), '110m') + self.assertEqual(u_coord.nat_earth_resolution(10), "10m") + self.assertEqual(u_coord.nat_earth_resolution(50), "50m") + self.assertEqual(u_coord.nat_earth_resolution(110), "110m") def test_nat_earth_resolution_fail(self): """Wrong resolution.""" @@ -971,33 +1318,44 @@ def test_nat_earth_resolution_fail(self): def test_get_land_geometry_country_pass(self): """get_land_geometry with selected countries.""" - iso_countries = ['DEU', 'VNM'] + iso_countries = ["DEU", "VNM"] res = u_coord.get_land_geometry(country_names=iso_countries, resolution=10) self.assertIsInstance(res, shapely.geometry.multipolygon.MultiPolygon) - for res, ref in zip(res.bounds, (5.85248986800, 8.56557851800, - 109.47242272200, 55.065334377000)): + for res, ref in zip( + res.bounds, (5.85248986800, 8.56557851800, 109.47242272200, 55.065334377000) + ): self.assertAlmostEqual(res, ref) - iso_countries = ['ESP'] + iso_countries = ["ESP"] res = u_coord.get_land_geometry(country_names=iso_countries, resolution=10) self.assertIsInstance(res, shapely.geometry.multipolygon.MultiPolygon) - for res, ref in zip(res.bounds, (-18.16722571499986, 27.642238674000, - 4.337087436000, 43.793443101)): + for res, ref in zip( + res.bounds, + (-18.16722571499986, 27.642238674000, 4.337087436000, 43.793443101), + ): self.assertAlmostEqual(res, ref) - iso_countries = ['FRA'] + iso_countries = ["FRA"] res = u_coord.get_land_geometry(country_names=iso_countries, resolution=10) self.assertIsInstance(res, shapely.geometry.multipolygon.MultiPolygon) - for res, ref in zip(res.bounds, (-61.79784094999991, -21.37078215899993, - 55.854502800000034, 51.08754088371883)): + for res, ref in zip( + res.bounds, + ( + -61.79784094999991, + -21.37078215899993, + 55.854502800000034, + 51.08754088371883, + ), + ): self.assertAlmostEqual(res, ref) def test_get_land_geometry_extent_pass(self): """get_land_geometry with selected countries.""" lat = np.array([28.203216, 28.555994, 28.860875]) lon = np.array([-16.567489, -18.554130, -9.532476]) - res = u_coord.get_land_geometry(extent=(np.min(lon), np.max(lon), - np.min(lat), np.max(lat)), resolution=10) + res = u_coord.get_land_geometry( + extent=(np.min(lon), np.max(lon), np.min(lat), np.max(lat)), resolution=10 + ) self.assertIsInstance(res, shapely.geometry.multipolygon.MultiPolygon) self.assertAlmostEqual(res.bounds[0], -18.002186653) self.assertAlmostEqual(res.bounds[1], lat[0]) @@ -1012,7 +1370,9 @@ def test_get_land_geometry_all_pass(self): def test_on_land_pass(self): """check point on land with 1:50.000.000 resolution.""" - rows, cols, trans = u_coord.pts_to_raster_meta((-179.5, -60, 179.5, 60), (1, -1)) + rows, cols, trans = u_coord.pts_to_raster_meta( + (-179.5, -60, 179.5, 60), (1, -1) + ) xgrid, ygrid = u_coord.raster_to_meshgrid(trans, cols, rows) lat = np.concatenate([[28.203216, 28.555994, 28.860875], ygrid.ravel()]) lon = np.concatenate([[-16.567489, -18.554130, -9.532476], xgrid.ravel()]) @@ -1022,18 +1382,22 @@ def test_on_land_pass(self): def test_dist_to_coast_nasa(self): """Test point in coast and point not in coast""" - points = np.array([ - # Caribbean Sea: - [13.208333333333329, -59.625000000000014], - # South America: - [-12.497529, -58.849505], - # Very close to coast of Somalia: - [1.96475615, 45.23249055], - ]) + points = np.array( + [ + # Caribbean Sea: + [13.208333333333329, -59.625000000000014], + # South America: + [-12.497529, -58.849505], + # Very close to coast of Somalia: + [1.96475615, 45.23249055], + ] + ) dists = [-3000, -1393549.5, 48.77] dists_lowres = [729.1666667, 1393670.6973145, 945.73129294] # Warning: This will download more than 300 MB of data if not already present! - result = u_coord.dist_to_coast_nasa(points[:, 0], points[:, 1], highres=True, signed=True) + result = u_coord.dist_to_coast_nasa( + points[:, 0], points[:, 1], highres=True, signed=True + ) result_lowres = u_coord.dist_to_coast_nasa(points[:, 0], points[:, 1]) np.testing.assert_array_almost_equal(dists, result) np.testing.assert_array_almost_equal(dists_lowres, result_lowres) @@ -1042,49 +1406,46 @@ def test_get_country_geometries_country_pass(self): """get_country_geometries with selected countries. issues with the natural earth data should be caught by test_get_land_geometry_* since it's very similar""" - iso_countries = ['NLD', 'VNM'] + iso_countries = ["NLD", "VNM"] res = u_coord.get_country_geometries(iso_countries, resolution=110) self.assertIsInstance(res, gpd.geodataframe.GeoDataFrame) self.assertEqual(res.shape[0], 2) def test_get_country_geometries_country_norway_pass(self): """test correct numeric ISO3 for country Norway""" - iso_countries = 'NOR' + iso_countries = "NOR" extent = [10, 11, 55, 60] res1 = u_coord.get_country_geometries(iso_countries) res2 = u_coord.get_country_geometries(extent=extent) - self.assertEqual(res1.ISO_N3.values[0], '578') - self.assertIn('578', res2.ISO_N3.values) - self.assertIn('NOR', res2.ISO_A3.values) - self.assertIn('Denmark', res2.NAME.values) - self.assertIn('Norway', res2.NAME.values) - self.assertNotIn('Sweden', res2.NAME.values) + self.assertEqual(res1.ISO_N3.values[0], "578") + self.assertIn("578", res2.ISO_N3.values) + self.assertIn("NOR", res2.ISO_A3.values) + self.assertIn("Denmark", res2.NAME.values) + self.assertIn("Norway", res2.NAME.values) + self.assertNotIn("Sweden", res2.NAME.values) def test_get_country_geometries_extent_pass(self): """get_country_geometries by selecting by extent""" lat = np.array([28.203216, 28.555994, 28.860875]) lon = np.array([-16.567489, -18.554130, -9.532476]) - res = u_coord.get_country_geometries(extent=( - np.min(lon), np.max(lon), - np.min(lat), np.max(lat) - )) + res = u_coord.get_country_geometries( + extent=(np.min(lon), np.max(lon), np.min(lat), np.max(lat)) + ) self.assertIsInstance(res, gpd.geodataframe.GeoDataFrame) + self.assertTrue(np.allclose(res.bounds.iloc[1, 1], lat[0])) self.assertTrue( - np.allclose(res.bounds.iloc[1, 1], lat[0]) + np.allclose(res.bounds.iloc[0, 0], -11.800084333105298) + or np.allclose(res.bounds.iloc[1, 0], -11.800084333105298) ) self.assertTrue( - np.allclose(res.bounds.iloc[0, 0], -11.800084333105298) or - np.allclose(res.bounds.iloc[1, 0], -11.800084333105298) + np.allclose(res.bounds.iloc[0, 2], np.max(lon)) + or np.allclose(res.bounds.iloc[1, 2], np.max(lon)) ) self.assertTrue( - np.allclose(res.bounds.iloc[0, 2], np.max(lon)) or - np.allclose(res.bounds.iloc[1, 2], np.max(lon)) - ) - self.assertTrue( - np.allclose(res.bounds.iloc[0, 3], np.max(lat)) or - np.allclose(res.bounds.iloc[1, 3], np.max(lat)) + np.allclose(res.bounds.iloc[0, 3], np.max(lat)) + or np.allclose(res.bounds.iloc[1, 3], np.max(lat)) ) def test_get_country_geometries_all_pass(self): @@ -1097,23 +1458,45 @@ def test_get_country_geometries_all_pass(self): def test_get_country_geometries_fail(self): """get_country_geometries with offensive parameters""" with self.assertRaises(ValueError) as cm: - u_coord.get_country_geometries(extent=(-20,350,0,0)) - self.assertIn("longitude extent range is greater than 360: -20 to 350", - str(cm.exception)) + u_coord.get_country_geometries(extent=(-20, 350, 0, 0)) + self.assertIn( + "longitude extent range is greater than 360: -20 to 350", str(cm.exception) + ) with self.assertRaises(ValueError) as cm: - u_coord.get_country_geometries(extent=(350,-20,0,0)) - self.assertIn("longitude extent at the left (350) is larger " - "than longitude extent at the right (-20)", - str(cm.exception)) + u_coord.get_country_geometries(extent=(350, -20, 0, 0)) + self.assertIn( + "longitude extent at the left (350) is larger " + "than longitude extent at the right (-20)", + str(cm.exception), + ) def test_country_code_pass(self): """Test set_region_id""" - lon = np.array([-59.6250000000000, -59.6250000000000, -59.6250000000000, - -59.5416666666667, -59.5416666666667, -59.4583333333333, - -60.2083333333333, -60.2083333333333]) - lat = np.array([13.125, 13.20833333, 13.29166667, 13.125, 13.20833333, - 13.125, 12.625, 12.70833333]) + lon = np.array( + [ + -59.6250000000000, + -59.6250000000000, + -59.6250000000000, + -59.5416666666667, + -59.5416666666667, + -59.4583333333333, + -60.2083333333333, + -60.2083333333333, + ] + ) + lat = np.array( + [ + 13.125, + 13.20833333, + 13.29166667, + 13.125, + 13.20833333, + 13.125, + 12.625, + 12.70833333, + ] + ) for gridded in [True, False]: region_id = u_coord.get_country_code(lat, lon, gridded=gridded) region_id_OSLO = u_coord.get_country_code(59.91, 10.75, gridded=gridded) @@ -1125,50 +1508,63 @@ def test_country_code_pass(self): def test_all_points_on_sea(self): """Test country codes for unassignable coordinates (i.e., on sea)""" - lon = [-24.1 , -24.32634711, -24.55751498, -24.79698392] - lat = [87.3 , 87.23261237, 87.14440587, 87.04121094] + lon = [-24.1, -24.32634711, -24.55751498, -24.79698392] + lat = [87.3, 87.23261237, 87.14440587, 87.04121094] for gridded in [True, False]: country_codes = u_coord.get_country_code(lat, lon, gridded=gridded) self.assertTrue(np.all(country_codes == np.array([0, 0, 0, 0]))) def test_get_admin1_info_pass(self): """test get_admin1_info()""" - country_names = ['CHE', 'Indonesia', '840', 51] - admin1_info, admin1_shapes = u_coord.get_admin1_info(country_names=country_names) + country_names = ["CHE", "Indonesia", "840", 51] + admin1_info, admin1_shapes = u_coord.get_admin1_info( + country_names=country_names + ) self.assertEqual(len(admin1_info), 4) - self.assertListEqual(list(admin1_info.keys()), ['CHE', 'IDN', 'USA', 'ARM']) - self.assertEqual(len(admin1_info['CHE']), len(admin1_shapes['CHE'])) - self.assertEqual(len(admin1_info['CHE']), 26) - self.assertEqual(len(admin1_shapes['IDN']), 33) - self.assertEqual(len(admin1_info['USA']), 51) + self.assertListEqual(list(admin1_info.keys()), ["CHE", "IDN", "USA", "ARM"]) + self.assertEqual(len(admin1_info["CHE"]), len(admin1_shapes["CHE"])) + self.assertEqual(len(admin1_info["CHE"]), 26) + self.assertEqual(len(admin1_shapes["IDN"]), 33) + self.assertEqual(len(admin1_info["USA"]), 51) # depending on the version of Natural Earth, this is Washington or Idaho: - self.assertIn(admin1_info['USA'][1]['iso_3166_2'], ['US-WA', 'US-ID']) + self.assertIn(admin1_info["USA"][1]["iso_3166_2"], ["US-WA", "US-ID"]) def test_get_admin1_geometries_pass(self): """test get_admin1_geometries""" - countries = ['CHE', 'Indonesia', '840', 51] + countries = ["CHE", "Indonesia", "840", 51] gdf = u_coord.get_admin1_geometries(countries=countries) self.assertIsInstance(gdf, gpd.GeoDataFrame) - self.assertEqual(len(gdf.iso_3a.unique()), 4) # 4 countries - self.assertEqual(gdf.loc[gdf.iso_3a=='CHE'].shape[0], 26) # 26 cantons in CHE - self.assertEqual(gdf.shape[0], 121) # 121 admin 1 regions in the 4 countries - self.assertIn('ARM', gdf['iso_3a'].values) # Armenia (region_id 051) - self.assertIn('756', gdf['iso_3n'].values) # Switzerland (region_id 756) - self.assertIn('CH-AI', gdf['iso_3166_2'].values) # canton in CHE - self.assertIn('Sulawesi Tengah', gdf['admin1_name'].values) # region in Indonesia - self.assertIsInstance(gdf.loc[gdf['iso_3166_2'] == 'CH-AI'].geometry.values[0], - shapely.geometry.MultiPolygon) - self.assertIsInstance(gdf.loc[gdf.admin1_name == 'Sulawesi Tengah'].geometry.values[0], - shapely.geometry.MultiPolygon) - self.assertIsInstance(gdf.loc[gdf.admin1_name == 'Valais'].geometry.values[0], - shapely.geometry.Polygon) + self.assertEqual(len(gdf.iso_3a.unique()), 4) # 4 countries + self.assertEqual(gdf.loc[gdf.iso_3a == "CHE"].shape[0], 26) # 26 cantons in CHE + self.assertEqual(gdf.shape[0], 121) # 121 admin 1 regions in the 4 countries + self.assertIn("ARM", gdf["iso_3a"].values) # Armenia (region_id 051) + self.assertIn("756", gdf["iso_3n"].values) # Switzerland (region_id 756) + self.assertIn("CH-AI", gdf["iso_3166_2"].values) # canton in CHE + self.assertIn( + "Sulawesi Tengah", gdf["admin1_name"].values + ) # region in Indonesia + self.assertIsInstance( + gdf.loc[gdf["iso_3166_2"] == "CH-AI"].geometry.values[0], + shapely.geometry.MultiPolygon, + ) + self.assertIsInstance( + gdf.loc[gdf.admin1_name == "Sulawesi Tengah"].geometry.values[0], + shapely.geometry.MultiPolygon, + ) + self.assertIsInstance( + gdf.loc[gdf.admin1_name == "Valais"].geometry.values[0], + shapely.geometry.Polygon, + ) def test_get_admin1_geometries_fail(self): """test get_admin1_geometries wrong input""" # non existing country: self.assertRaises(LookupError, u_coord.get_admin1_geometries, ["FantasyLand"]) # wrong variable type for 'countries', e.g. Polygon: - self.assertRaises(TypeError, u_coord.get_admin1_geometries, shapely.geometry.Polygon()) + self.assertRaises( + TypeError, u_coord.get_admin1_geometries, shapely.geometry.Polygon() + ) + class TestRasterMeta(unittest.TestCase): def test_is_regular_pass(self): @@ -1203,20 +1599,26 @@ def test_is_regular_pass(self): self.assertEqual(hei, 2) self.assertEqual(wid, 2) - grid_x, grid_y = np.mgrid[10: 100: complex(0, 5), - 0: 10: complex(0, 5)] - grid_x = grid_x.reshape(-1,) - grid_y = grid_y.reshape(-1,) + grid_x, grid_y = np.mgrid[10 : 100 : complex(0, 5), 0 : 10 : complex(0, 5)] + grid_x = grid_x.reshape( + -1, + ) + grid_y = grid_y.reshape( + -1, + ) coord = np.array([grid_x, grid_y]).transpose() reg, hei, wid = u_coord.grid_is_regular(coord) self.assertTrue(reg) self.assertEqual(hei, 5) self.assertEqual(wid, 5) - grid_x, grid_y = np.mgrid[10: 100: complex(0, 4), - 0: 10: complex(0, 5)] - grid_x = grid_x.reshape(-1,) - grid_y = grid_y.reshape(-1,) + grid_x, grid_y = np.mgrid[10 : 100 : complex(0, 4), 0 : 10 : complex(0, 5)] + grid_x = grid_x.reshape( + -1, + ) + grid_y = grid_y.reshape( + -1, + ) coord = np.array([grid_x, grid_y]).transpose() reg, hei, wid = u_coord.grid_is_regular(coord) self.assertTrue(reg) @@ -1225,14 +1627,38 @@ def test_is_regular_pass(self): def test_get_resolution_pass(self): """Test _get_resolution method""" - lat = np.array([13.125, 13.20833333, 13.29166667, 13.125, - 13.20833333, 13.125, 12.625, 12.70833333, - 12.79166667, 12.875, 12.95833333, 13.04166667]) - lon = np.array([ - -59.6250000000000, -59.6250000000000, -59.6250000000000, -59.5416666666667, - -59.5416666666667, -59.4583333333333, -60.2083333333333, -60.2083333333333, - -60.2083333333333, -60.2083333333333, -60.2083333333333, -60.2083333333333 - ]) + lat = np.array( + [ + 13.125, + 13.20833333, + 13.29166667, + 13.125, + 13.20833333, + 13.125, + 12.625, + 12.70833333, + 12.79166667, + 12.875, + 12.95833333, + 13.04166667, + ] + ) + lon = np.array( + [ + -59.6250000000000, + -59.6250000000000, + -59.6250000000000, + -59.5416666666667, + -59.5416666666667, + -59.4583333333333, + -60.2083333333333, + -60.2083333333333, + -60.2083333333333, + -60.2083333333333, + -60.2083333333333, + -60.2083333333333, + ] + ) res_lat, res_lon = u_coord.get_resolution(lat, lon) self.assertAlmostEqual(res_lat, 0.0833333333333) self.assertAlmostEqual(res_lon, 0.0833333333333) @@ -1276,179 +1702,212 @@ def test_points_to_raster_pass(self): df_val = gpd.GeoDataFrame() x, y = np.meshgrid(np.linspace(0, 2, 5), np.linspace(40, 50, 10)) - df_val['latitude'] = y.flatten() - df_val['longitude'] = x.flatten() - df_val['value'] = np.ones(len(df_val)) * 10 - crs = 'epsg:2202' - _raster, meta = u_coord.points_to_raster(df_val, val_names=['value'], crs=crs, - scheduler=scheduler) - self.assertFalse(hasattr(df_val, "crs")) # points_to_raster must not modify df_val - self.assertTrue(u_coord.equal_crs(meta['crs'], crs)) - self.assertAlmostEqual(meta['transform'][0], 0.5) - self.assertAlmostEqual(meta['transform'][1], 0) - self.assertAlmostEqual(meta['transform'][2], -0.25) - self.assertAlmostEqual(meta['transform'][3], 0) - self.assertAlmostEqual(meta['transform'][4], -0.5) - self.assertAlmostEqual(meta['transform'][5], 50.25) - self.assertEqual(meta['height'], 21) - self.assertEqual(meta['width'], 5) + df_val["latitude"] = y.flatten() + df_val["longitude"] = x.flatten() + df_val["value"] = np.ones(len(df_val)) * 10 + crs = "epsg:2202" + _raster, meta = u_coord.points_to_raster( + df_val, val_names=["value"], crs=crs, scheduler=scheduler + ) + self.assertFalse( + hasattr(df_val, "crs") + ) # points_to_raster must not modify df_val + self.assertTrue(u_coord.equal_crs(meta["crs"], crs)) + self.assertAlmostEqual(meta["transform"][0], 0.5) + self.assertAlmostEqual(meta["transform"][1], 0) + self.assertAlmostEqual(meta["transform"][2], -0.25) + self.assertAlmostEqual(meta["transform"][3], 0) + self.assertAlmostEqual(meta["transform"][4], -0.5) + self.assertAlmostEqual(meta["transform"][5], 50.25) + self.assertEqual(meta["height"], 21) + self.assertEqual(meta["width"], 5) # test for values crossing antimeridian df_val = gpd.GeoDataFrame() - df_val['latitude'] = [1, 0, 1, 0] - df_val['longitude'] = [178, -179.0, 181, -180] - df_val['value'] = np.arange(4) + df_val["latitude"] = [1, 0, 1, 0] + df_val["longitude"] = [178, -179.0, 181, -180] + df_val["value"] = np.arange(4) r_data, meta = u_coord.points_to_raster( - df_val, val_names=['value'], res=0.5, raster_res=1.0, scheduler=scheduler) - self.assertTrue(u_coord.equal_crs(meta['crs'], DEF_CRS)) - self.assertAlmostEqual(meta['transform'][0], 1.0) - self.assertAlmostEqual(meta['transform'][1], 0) - self.assertAlmostEqual(meta['transform'][2], 177.5) - self.assertAlmostEqual(meta['transform'][3], 0) - self.assertAlmostEqual(meta['transform'][4], -1.0) - self.assertAlmostEqual(meta['transform'][5], 1.5) - self.assertEqual(meta['height'], 2) - self.assertEqual(meta['width'], 4) + df_val, + val_names=["value"], + res=0.5, + raster_res=1.0, + scheduler=scheduler, + ) + self.assertTrue(u_coord.equal_crs(meta["crs"], DEF_CRS)) + self.assertAlmostEqual(meta["transform"][0], 1.0) + self.assertAlmostEqual(meta["transform"][1], 0) + self.assertAlmostEqual(meta["transform"][2], 177.5) + self.assertAlmostEqual(meta["transform"][3], 0) + self.assertAlmostEqual(meta["transform"][4], -1.0) + self.assertAlmostEqual(meta["transform"][5], 1.5) + self.assertEqual(meta["height"], 2) + self.assertEqual(meta["width"], 4) np.testing.assert_array_equal(r_data[0], [[0, 0, 0, 2], [0, 0, 3, 1]]) + class TestRasterIO(unittest.TestCase): def test_write_raster_pass(self): """Test write_raster function.""" test_file = Path(DATA_DIR, "test_write_raster.tif") data = np.arange(24).reshape(6, 4).astype(np.float32) meta = { - 'transform': Affine(0.1, 0, 0, 0, 1, 0), - 'width': data.shape[1], - 'height': data.shape[0], - 'crs': 'epsg:2202', - 'compress': 'deflate', + "transform": Affine(0.1, 0, 0, 0, 1, 0), + "width": data.shape[1], + "height": data.shape[0], + "crs": "epsg:2202", + "compress": "deflate", } u_coord.write_raster(test_file, data, meta) read_meta, read_data = u_coord.read_raster(test_file) - self.assertEqual(read_meta['transform'], meta['transform']) - self.assertEqual(read_meta['width'], meta['width']) - self.assertEqual(read_meta['height'], meta['height']) - self.assertTrue(u_coord.equal_crs(read_meta['crs'], meta['crs'])) + self.assertEqual(read_meta["transform"], meta["transform"]) + self.assertEqual(read_meta["width"], meta["width"]) + self.assertEqual(read_meta["height"], meta["height"]) + self.assertTrue(u_coord.equal_crs(read_meta["crs"], meta["crs"])) self.assertEqual(read_data.shape, (1, np.prod(data.shape))) np.testing.assert_array_equal(read_data, data.reshape(read_data.shape)) def test_window_raster_pass(self): """Test window""" - meta, inten_ras = u_coord.read_raster(HAZ_DEMO_FL, window=Window(10, 20, 50.1, 60)) - self.assertAlmostEqual(meta['crs'], DEF_CRS) - self.assertAlmostEqual(meta['transform'].c, -69.2471495969998) - self.assertAlmostEqual(meta['transform'].a, 0.009000000000000341) - self.assertAlmostEqual(meta['transform'].b, 0.0) - self.assertAlmostEqual(meta['transform'].f, 10.248220966978932) - self.assertAlmostEqual(meta['transform'].d, 0.0) - self.assertAlmostEqual(meta['transform'].e, -0.009000000000000341) - self.assertEqual(meta['height'], 60) - self.assertEqual(meta['width'], 50) + meta, inten_ras = u_coord.read_raster( + HAZ_DEMO_FL, window=Window(10, 20, 50.1, 60) + ) + self.assertAlmostEqual(meta["crs"], DEF_CRS) + self.assertAlmostEqual(meta["transform"].c, -69.2471495969998) + self.assertAlmostEqual(meta["transform"].a, 0.009000000000000341) + self.assertAlmostEqual(meta["transform"].b, 0.0) + self.assertAlmostEqual(meta["transform"].f, 10.248220966978932) + self.assertAlmostEqual(meta["transform"].d, 0.0) + self.assertAlmostEqual(meta["transform"].e, -0.009000000000000341) + self.assertEqual(meta["height"], 60) + self.assertEqual(meta["width"], 50) self.assertEqual(inten_ras.shape, (1, 60 * 50)) self.assertAlmostEqual(inten_ras.reshape((60, 50))[25, 12], 0.056825936) def test_poly_raster_pass(self): """Test geometry""" - poly = box(-69.2471495969998, 9.708220966978912, -68.79714959699979, 10.248220966978932) + poly = box( + -69.2471495969998, 9.708220966978912, -68.79714959699979, 10.248220966978932 + ) meta, inten_ras = u_coord.read_raster(HAZ_DEMO_FL, geometry=[poly]) - self.assertAlmostEqual(meta['crs'], DEF_CRS) - self.assertAlmostEqual(meta['transform'].c, -69.2471495969998) - self.assertAlmostEqual(meta['transform'].a, 0.009000000000000341) - self.assertAlmostEqual(meta['transform'].b, 0.0) - self.assertAlmostEqual(meta['transform'].f, 10.248220966978932) - self.assertAlmostEqual(meta['transform'].d, 0.0) - self.assertAlmostEqual(meta['transform'].e, -0.009000000000000341) - self.assertEqual(meta['height'], 60) - self.assertEqual(meta['width'], 50) + self.assertAlmostEqual(meta["crs"], DEF_CRS) + self.assertAlmostEqual(meta["transform"].c, -69.2471495969998) + self.assertAlmostEqual(meta["transform"].a, 0.009000000000000341) + self.assertAlmostEqual(meta["transform"].b, 0.0) + self.assertAlmostEqual(meta["transform"].f, 10.248220966978932) + self.assertAlmostEqual(meta["transform"].d, 0.0) + self.assertAlmostEqual(meta["transform"].e, -0.009000000000000341) + self.assertEqual(meta["height"], 60) + self.assertEqual(meta["width"], 50) self.assertEqual(inten_ras.shape, (1, 60 * 50)) def test_crs_raster_pass(self): """Test change projection""" meta, inten_ras = u_coord.read_raster( - HAZ_DEMO_FL, dst_crs='epsg:2202', resampling=Resampling.nearest) - self.assertAlmostEqual(meta['crs'], 'epsg:2202') - self.assertAlmostEqual(meta['transform'].c, 462486.8490210658) - self.assertAlmostEqual(meta['transform'].a, 998.576177833903) - self.assertAlmostEqual(meta['transform'].b, 0.0) - self.assertAlmostEqual(meta['transform'].f, 1164831.4772731226) - self.assertAlmostEqual(meta['transform'].d, 0.0) - self.assertAlmostEqual(meta['transform'].e, -998.576177833903) - self.assertEqual(meta['height'], 1081) - self.assertEqual(meta['width'], 968) + HAZ_DEMO_FL, dst_crs="epsg:2202", resampling=Resampling.nearest + ) + self.assertAlmostEqual(meta["crs"], "epsg:2202") + self.assertAlmostEqual(meta["transform"].c, 462486.8490210658) + self.assertAlmostEqual(meta["transform"].a, 998.576177833903) + self.assertAlmostEqual(meta["transform"].b, 0.0) + self.assertAlmostEqual(meta["transform"].f, 1164831.4772731226) + self.assertAlmostEqual(meta["transform"].d, 0.0) + self.assertAlmostEqual(meta["transform"].e, -998.576177833903) + self.assertEqual(meta["height"], 1081) + self.assertEqual(meta["width"], 968) self.assertEqual(inten_ras.shape, (1, 1081 * 968)) # TODO: NOT RESAMPLING WELL in this case!? self.assertAlmostEqual(inten_ras.reshape((1081, 968))[45, 22], 0) def test_crs_and_geometry_raster_pass(self): """Test change projection and crop to geometry""" - ply = shapely.geometry.Polygon([ - (478080.8562247154, 1105419.13439131), - (478087.5912452241, 1116475.583523723), - (500000, 1116468.876713805), - (500000, 1105412.49126517), - (478080.8562247154, 1105419.13439131) - ]) + ply = shapely.geometry.Polygon( + [ + (478080.8562247154, 1105419.13439131), + (478087.5912452241, 1116475.583523723), + (500000, 1116468.876713805), + (500000, 1105412.49126517), + (478080.8562247154, 1105419.13439131), + ] + ) meta, inten_ras = u_coord.read_raster( - HAZ_DEMO_FL, dst_crs='epsg:2202', geometry=[ply], - resampling=Resampling.nearest) - self.assertAlmostEqual(meta['crs'], 'epsg:2202') - self.assertEqual(meta['height'], 12) - self.assertEqual(meta['width'], 23) + HAZ_DEMO_FL, + dst_crs="epsg:2202", + geometry=[ply], + resampling=Resampling.nearest, + ) + self.assertAlmostEqual(meta["crs"], "epsg:2202") + self.assertEqual(meta["height"], 12) + self.assertEqual(meta["width"], 23) self.assertEqual(inten_ras.shape, (1, 12 * 23)) # TODO: NOT RESAMPLING WELL in this case!? self.assertAlmostEqual(inten_ras.reshape((12, 23))[11, 12], 0.10063865780830383) def test_transform_raster_pass(self): - transform = Affine(0.009000000000000341, 0.0, -69.33714959699981, - 0.0, -0.009000000000000341, 10.42822096697894) + transform = Affine( + 0.009000000000000341, + 0.0, + -69.33714959699981, + 0.0, + -0.009000000000000341, + 10.42822096697894, + ) meta, inten_ras = u_coord.read_raster( - HAZ_DEMO_FL, transform=transform, height=500, width=501) + HAZ_DEMO_FL, transform=transform, height=500, width=501 + ) - left = meta['transform'].xoff - top = meta['transform'].yoff - bottom = top + meta['transform'][4] * meta['height'] - right = left + meta['transform'][0] * meta['width'] + left = meta["transform"].xoff + top = meta["transform"].yoff + bottom = top + meta["transform"][4] * meta["height"] + right = left + meta["transform"][0] * meta["width"] self.assertAlmostEqual(left, -69.33714959699981) self.assertAlmostEqual(bottom, 5.928220966978939) self.assertAlmostEqual(right, -64.82814959699981) self.assertAlmostEqual(top, 10.42822096697894) - self.assertEqual(meta['width'], 501) - self.assertEqual(meta['height'], 500) - self.assertTrue(u_coord.equal_crs(meta['crs'].to_epsg(), 4326)) + self.assertEqual(meta["width"], 501) + self.assertEqual(meta["height"], 500) + self.assertTrue(u_coord.equal_crs(meta["crs"].to_epsg(), 4326)) self.assertEqual(inten_ras.shape, (1, 500 * 501)) - meta, inten_all = u_coord.read_raster(HAZ_DEMO_FL, window=Window(0, 0, 501, 500)) + meta, inten_all = u_coord.read_raster( + HAZ_DEMO_FL, window=Window(0, 0, 501, 500) + ) self.assertTrue(np.array_equal(inten_all, inten_ras)) def test_sample_raster(self): """Test sampling points from raster file""" val_1, val_2, fill_value = 0.056825936, 0.10389626, -999 - i_j_vals = np.array([ - [44, 21, 0], - [44, 22, 0], - [44, 23, 0], - [45, 21, 0], - [45, 22, val_1], - [45, 23, val_2], - [46, 21, 0], - [46, 22, 0], - [46, 23, 0], - [45, 22.2, 0.8 * val_1 + 0.2 * val_2], - [45.3, 21.4, 0.7 * 0.4 * val_1], - [-20, 0, fill_value], - ]) + i_j_vals = np.array( + [ + [44, 21, 0], + [44, 22, 0], + [44, 23, 0], + [45, 21, 0], + [45, 22, val_1], + [45, 23, val_2], + [46, 21, 0], + [46, 22, 0], + [46, 23, 0], + [45, 22.2, 0.8 * val_1 + 0.2 * val_2], + [45.3, 21.4, 0.7 * 0.4 * val_1], + [-20, 0, fill_value], + ] + ) res = 0.009000000000000341 lat = 10.42822096697894 - res / 2 - i_j_vals[:, 0] * res lon = -69.33714959699981 + res / 2 + i_j_vals[:, 1] * res - values = u_coord.read_raster_sample(HAZ_DEMO_FL, lat, lon, fill_value=fill_value) + values = u_coord.read_raster_sample( + HAZ_DEMO_FL, lat, lon, fill_value=fill_value + ) self.assertEqual(values.size, lat.size) for i, val in enumerate(i_j_vals[:, 2]): self.assertAlmostEqual(values[i], val) # with explicit intermediate resolution values = u_coord.read_raster_sample( - HAZ_DEMO_FL, lat, lon, fill_value=fill_value, intermediate_res=res) + HAZ_DEMO_FL, lat, lon, fill_value=fill_value, intermediate_res=res + ) self.assertEqual(values.size, lat.size) for i, val in enumerate(i_j_vals[:, 2]): self.assertAlmostEqual(values[i], val) @@ -1469,10 +1928,13 @@ def test_sample_raster(self): self.assertEqual(z_both.size, lat_both.size) self.assertEqual(z_both_neg.size, lat_both.size) - np.testing.assert_array_almost_equal(z_left, z_both[:z_left.size], ) - np.testing.assert_array_almost_equal(z_right, z_both[-z_right.size:]) - np.testing.assert_array_almost_equal(z_left, z_both_neg[:z_left.size]) - np.testing.assert_array_almost_equal(z_right, z_both_neg[-z_right.size:]) + np.testing.assert_array_almost_equal( + z_left, + z_both[: z_left.size], + ) + np.testing.assert_array_almost_equal(z_right, z_both[-z_right.size :]) + np.testing.assert_array_almost_equal(z_left, z_both_neg[: z_left.size]) + np.testing.assert_array_almost_equal(z_right, z_both_neg[-z_right.size :]) def test_sample_raster_gradient(self): """Test sampling gradients from a raster file""" @@ -1500,10 +1962,12 @@ def test_sample_raster_gradient(self): def test_refine_raster(self): """Test refinement of given raster data""" - data = np.array([ - [0.25, 0.75], - [0.5, 1], - ]) + data = np.array( + [ + [0.25, 0.75], + [0.5, 1], + ] + ) transform = Affine(0.5, 0, 0, 0, 0.5, 0) new_res = 0.1 new_data, new_transform = u_coord.refine_raster_data(data, transform, new_res) @@ -1524,7 +1988,8 @@ def test_bounded_refined_raster(self): res = 0.004 global_origin = (-180, 90) z, transform = u_coord.read_raster_bounds( - HAZ_DEMO_FL, bounds, res=res, global_origin=global_origin) + HAZ_DEMO_FL, bounds, res=res, global_origin=global_origin + ) # the first dimension corresponds to the raster bands: self.assertEqual(z.shape[0], 1) @@ -1548,13 +2013,17 @@ def test_bounded_refined_raster(self): # check along x-axis self.assertLessEqual(transform[2] + 0.5 * transform[0], bounds[0]) self.assertGreater(transform[2] + 1.5 * transform[0], bounds[0]) - self.assertGreaterEqual(transform[2] + (z.shape[1] - 0.5) * transform[0], bounds[2]) + self.assertGreaterEqual( + transform[2] + (z.shape[1] - 0.5) * transform[0], bounds[2] + ) self.assertLess(transform[2] + (z.shape[1] - 1.5) * transform[0], bounds[2]) # check along y-axis (note that the orientation is reversed) self.assertGreaterEqual(transform[5] + 0.5 * transform[4], bounds[3]) self.assertLess(transform[5] + 1.5 * transform[4], bounds[3]) - self.assertLessEqual(transform[5] + (z.shape[0] - 0.5) * transform[4], bounds[1]) + self.assertLessEqual( + transform[5] + (z.shape[0] - 0.5) * transform[4], bounds[1] + ) self.assertGreater(transform[5] + (z.shape[0] - 1.5) * transform[4], bounds[1]) # trigger downloading of dist-to-coast dataset (if not already present) @@ -1562,27 +2031,39 @@ def test_bounded_refined_raster(self): # make sure the buffering doesn't go beyond ±90 degrees latitude: z, transform = u_coord.read_raster_bounds( - path, (0, -90, 10, -80), res=1.0, global_origin=(-180, 90)) + path, (0, -90, 10, -80), res=1.0, global_origin=(-180, 90) + ) self.assertEqual(z.shape, (1, 11, 12)) self.assertEqual(transform[5], -79.0) z, transform = u_coord.read_raster_bounds( - path, (0, 80, 10, 90), res=1.0, global_origin=(-180, 90)) + path, (0, 80, 10, 90), res=1.0, global_origin=(-180, 90) + ) self.assertEqual(z.shape, (1, 11, 12)) self.assertEqual(transform[5], 90.0) # make sure crossing the antimeridian works fine: z_right, transform = u_coord.read_raster_bounds( - path, (-175, 0, -170, 10), res=1.0, global_origin=(-180, 90)) + path, (-175, 0, -170, 10), res=1.0, global_origin=(-180, 90) + ) z_left, transform = u_coord.read_raster_bounds( - path, (170, 0, 175, 10), res=1.0, global_origin=(-180, 90)) + path, (170, 0, 175, 10), res=1.0, global_origin=(-180, 90) + ) z_both, transform = u_coord.read_raster_bounds( - path, (170, 0, 190, 10), res=1.0, global_origin=(-180, 90)) + path, (170, 0, 190, 10), res=1.0, global_origin=(-180, 90) + ) z_both_neg, transform = u_coord.read_raster_bounds( - path, (-190, 0, -170, 10), res=1.0, global_origin=(-180, 90)) - np.testing.assert_array_equal(z_left[0,:,:], z_both[0,:,:z_left.shape[2]]) - np.testing.assert_array_equal(z_right[0,:,:], z_both[0,:,-z_right.shape[2]:]) - np.testing.assert_array_equal(z_left[0,:,:], z_both_neg[0,:,:z_left.shape[2]]) - np.testing.assert_array_equal(z_right[0,:,:], z_both_neg[0,:,-z_right.shape[2]:]) + path, (-190, 0, -170, 10), res=1.0, global_origin=(-180, 90) + ) + np.testing.assert_array_equal(z_left[0, :, :], z_both[0, :, : z_left.shape[2]]) + np.testing.assert_array_equal( + z_right[0, :, :], z_both[0, :, -z_right.shape[2] :] + ) + np.testing.assert_array_equal( + z_left[0, :, :], z_both_neg[0, :, : z_left.shape[2]] + ) + np.testing.assert_array_equal( + z_right[0, :, :], z_both_neg[0, :, -z_right.shape[2] :] + ) def test_subraster_from_bounds(self): """test subraster_from_bounds function""" @@ -1601,24 +2082,31 @@ def test_subraster_from_bounds(self): # test for more complicated input data: _, meta_list = data_arrays_resampling_demo() i = 2 - dst_resolution = (1., .2) + dst_resolution = (1.0, 0.2) bounds = rasterio.transform.array_bounds( - meta_list[i]['height'], meta_list[i]['width'], meta_list[i]['transform']) + meta_list[i]["height"], meta_list[i]["width"], meta_list[i]["transform"] + ) transform = rasterio.transform.from_origin(-180, 90, *dst_resolution) dst_transform, dst_shape = u_coord.subraster_from_bounds(transform, bounds) - self.assertEqual(dst_shape, (meta_list[0]['height'] / dst_resolution[1], - meta_list[0]['width'] / dst_resolution[0])) + self.assertEqual( + dst_shape, + ( + meta_list[0]["height"] / dst_resolution[1], + meta_list[0]["width"] / dst_resolution[0], + ), + ) self.assertEqual(dst_resolution, (dst_transform[0], -dst_transform[4])) - self.assertEqual(meta_list[i]['transform'][1], dst_transform[1]) - self.assertEqual(meta_list[i]['transform'][2], dst_transform[2]) - self.assertEqual(meta_list[i]['transform'][3], dst_transform[3]) - self.assertEqual(meta_list[i]['transform'][5], dst_transform[5]) + self.assertEqual(meta_list[i]["transform"][1], dst_transform[1]) + self.assertEqual(meta_list[i]["transform"][2], dst_transform[2]) + self.assertEqual(meta_list[i]["transform"][3], dst_transform[3]) + self.assertEqual(meta_list[i]["transform"][5], dst_transform[5]) # test for odd resolution change: i = 0 - dst_resolution = (.15, .15) + dst_resolution = (0.15, 0.15) bounds = rasterio.transform.array_bounds( - meta_list[i]['height'], meta_list[i]['width'], meta_list[i]['transform']) + meta_list[i]["height"], meta_list[i]["width"], meta_list[i]["transform"] + ) transform = rasterio.transform.from_origin(-180, 90, *dst_resolution) dst_transform, dst_shape = u_coord.subraster_from_bounds(transform, bounds) self.assertEqual(dst_shape, (14, 20)) @@ -1633,18 +2121,26 @@ def test_align_raster_data_shift(self): i = 0 # dst j = 1 # src - dst_resolution=meta_list[i]['transform'][0] + dst_resolution = meta_list[i]["transform"][0] dst_bounds = rasterio.transform.array_bounds( - meta_list[i]['height'], meta_list[i]['width'], meta_list[i]['transform']) + meta_list[i]["height"], meta_list[i]["width"], meta_list[i]["transform"] + ) data_out, dst_transform = u_coord.align_raster_data( - data_in[j], meta_list[j]['crs'], meta_list[j]['transform'], - dst_crs=meta_list[i]['crs'], dst_resolution=dst_resolution, dst_bounds=dst_bounds, - resampling='bilinear') + data_in[j], + meta_list[j]["crs"], + meta_list[j]["transform"], + dst_crs=meta_list[i]["crs"], + dst_resolution=dst_resolution, + dst_bounds=dst_bounds, + resampling="bilinear", + ) # test northward shift of box: - np.testing.assert_array_equal(data_in[1][1,:], data_out[0,:]) - np.testing.assert_array_equal(np.array([0., 0., 0.], dtype='float32'), data_out[1,:]) - self.assertEqual(meta_list[i]['transform'][5], dst_transform[5]) + np.testing.assert_array_equal(data_in[1][1, :], data_out[0, :]) + np.testing.assert_array_equal( + np.array([0.0, 0.0, 0.0], dtype="float32"), data_out[1, :] + ) + self.assertEqual(meta_list[i]["transform"][5], dst_transform[5]) def test_align_raster_data_downsampling(self): """test function align_raster_data for downsampling""" @@ -1652,17 +2148,25 @@ def test_align_raster_data_downsampling(self): i = 0 # dst j = 2 # src - dst_resolution=meta_list[i]['transform'][0] + dst_resolution = meta_list[i]["transform"][0] dst_bounds = rasterio.transform.array_bounds( - meta_list[i]['height'], meta_list[i]['width'], meta_list[i]['transform']) + meta_list[i]["height"], meta_list[i]["width"], meta_list[i]["transform"] + ) data_out, dst_transform = u_coord.align_raster_data( - data_in[j], meta_list[j]['crs'], meta_list[j]['transform'], - dst_crs=meta_list[i]['crs'], dst_resolution=dst_resolution, dst_bounds=dst_bounds, - resampling='bilinear') + data_in[j], + meta_list[j]["crs"], + meta_list[j]["transform"], + dst_crs=meta_list[i]["crs"], + dst_resolution=dst_resolution, + dst_bounds=dst_bounds, + resampling="bilinear", + ) # test downsampled data: - reference_array = np.array([[5.0204080, 2.2678570, 0.12244898], - [1.1224489, 0.6785714, 0.73469390]], dtype='float32') + reference_array = np.array( + [[5.0204080, 2.2678570, 0.12244898], [1.1224489, 0.6785714, 0.73469390]], + dtype="float32", + ) np.testing.assert_array_almost_equal_nulp(reference_array, data_out) self.assertEqual(dst_resolution, dst_transform[0]) @@ -1672,24 +2176,37 @@ def test_align_raster_data_downsample_conserve(self): data_in, meta_list = data_arrays_resampling_demo() i = 0 # dst - dst_resolution=meta_list[i]['transform'][0] + dst_resolution = meta_list[i]["transform"][0] dst_bounds = rasterio.transform.array_bounds( - meta_list[i]['height'], meta_list[i]['width'], meta_list[i]['transform']) + meta_list[i]["height"], meta_list[i]["width"], meta_list[i]["transform"] + ) # test conserve sum: - for j, data in enumerate(data_in): # src + for j, data in enumerate(data_in): # src data_out, _ = u_coord.align_raster_data( - data_in[j], meta_list[j]['crs'], meta_list[j]['transform'], - dst_crs=meta_list[i]['crs'], dst_resolution=dst_resolution, dst_bounds=dst_bounds, - resampling='bilinear', conserve='sum') + data_in[j], + meta_list[j]["crs"], + meta_list[j]["transform"], + dst_crs=meta_list[i]["crs"], + dst_resolution=dst_resolution, + dst_bounds=dst_bounds, + resampling="bilinear", + conserve="sum", + ) self.assertAlmostEqual(data_in[j].sum(), data_out.sum(), places=4) # test conserve mean: for j, data in enumerate(data_in): data_out, _ = u_coord.align_raster_data( - data_in[j], meta_list[j]['crs'], meta_list[j]['transform'], - dst_crs=meta_list[i]['crs'], dst_resolution=dst_resolution, dst_bounds=dst_bounds, - resampling='bilinear', conserve='mean') + data_in[j], + meta_list[j]["crs"], + meta_list[j]["transform"], + dst_crs=meta_list[i]["crs"], + dst_resolution=dst_resolution, + dst_bounds=dst_bounds, + resampling="bilinear", + conserve="mean", + ) self.assertAlmostEqual(data_in[j].mean(), data_out.mean(), places=4) def test_align_raster_data_upsample(self): @@ -1698,25 +2215,38 @@ def test_align_raster_data_upsample(self): data_out = list() i = 2 # dst - dst_resolution = meta_list[i]['transform'][0] + dst_resolution = meta_list[i]["transform"][0] dst_bounds = rasterio.transform.array_bounds( - meta_list[i]['height'], meta_list[i]['width'], meta_list[i]['transform']) - for j in [0,1,2]: - data_out.append(u_coord.align_raster_data( - data_in[j], meta_list[j]['crs'], meta_list[j]['transform'], - dst_crs=meta_list[i]['crs'], dst_resolution=dst_resolution, - dst_bounds=dst_bounds, resampling='bilinear')[0]) + meta_list[i]["height"], meta_list[i]["width"], meta_list[i]["transform"] + ) + for j in [0, 1, 2]: + data_out.append( + u_coord.align_raster_data( + data_in[j], + meta_list[j]["crs"], + meta_list[j]["transform"], + dst_crs=meta_list[i]["crs"], + dst_resolution=dst_resolution, + dst_bounds=dst_bounds, + resampling="bilinear", + )[0] + ) # test reference data unchanged: np.testing.assert_array_equal(data_in[2], data_out[2]) # test northward shift: - np.testing.assert_array_equal(data_out[0][2,:], data_out[1][0,:]) - np.testing.assert_array_equal(data_out[0][3,:], data_out[1][1,:]) + np.testing.assert_array_equal(data_out[0][2, :], data_out[1][0, :]) + np.testing.assert_array_equal(data_out[0][3, :], data_out[1][1, :]) # test upsampled data: - reference_array = np.array([[0.00, 0.25, 0.75, 1.25, 1.75, 2.00], - [0.75, 1.00, 1.50, 2.00, 2.50, 2.75], - [2.25, 2.50, 3.00, 3.50, 4.00, 4.25], - [3.00, 3.25, 3.75, 4.25, 4.75, 5.00]], dtype='float32') + reference_array = np.array( + [ + [0.00, 0.25, 0.75, 1.25, 1.75, 2.00], + [0.75, 1.00, 1.50, 2.00, 2.50, 2.75], + [2.25, 2.50, 3.00, 3.50, 4.00, 4.25], + [3.00, 3.25, 3.75, 4.25, 4.75, 5.00], + ], + dtype="float32", + ) np.testing.assert_array_equal(reference_array, data_out[0]) def test_align_raster_data_odd_downsample(self): @@ -1727,15 +2257,22 @@ def test_align_raster_data_odd_downsample(self): dst_resolution = 1.7 dst_bounds = rasterio.transform.array_bounds( - meta_list[i]['height'], meta_list[i]['width'], meta_list[i]['transform']) + meta_list[i]["height"], meta_list[i]["width"], meta_list[i]["transform"] + ) data_out, dst_transform = u_coord.align_raster_data( - data_in[j], meta_list[j]['crs'], meta_list[j]['transform'], - dst_crs=meta_list[i]['crs'], dst_resolution=dst_resolution, dst_bounds=dst_bounds, - resampling='bilinear') + data_in[j], + meta_list[j]["crs"], + meta_list[j]["transform"], + dst_crs=meta_list[i]["crs"], + dst_resolution=dst_resolution, + dst_bounds=dst_bounds, + resampling="bilinear", + ) self.assertEqual(dst_resolution, dst_transform[0]) - reference_array = np.array([[0.425, 1.7631578], - [3.425, 4.763158 ]], dtype='float32') + reference_array = np.array( + [[0.425, 1.7631578], [3.425, 4.763158]], dtype="float32" + ) np.testing.assert_array_equal(reference_array, data_out) def test_mask_raster_with_geometry(self): @@ -1743,14 +2280,18 @@ def test_mask_raster_with_geometry(self): raster = np.ones((4, 3), dtype=np.float32) transform = rasterio.transform.Affine(1, 0, 5, 0, -1, -10) shapes = [shapely.geometry.box(6.1, -12.9, 6.9, -11.1)] - expected = np.array([ - [0, 0, 0], - [0, 1, 0], - [0, 1, 0], - [0, 0, 0], - ], dtype=np.float32) + expected = np.array( + [ + [0, 0, 0], + [0, 1, 0], + [0, 1, 0], + [0, 0, 0], + ], + dtype=np.float32, + ) np.testing.assert_array_equal( - u_coord.mask_raster_with_geometry(raster, transform, shapes), expected) + u_coord.mask_raster_with_geometry(raster, transform, shapes), expected + ) # Execute Tests diff --git a/climada/util/test/test_dates_times.py b/climada/util/test/test_dates_times.py index 2e1d019c4..3cc9d25bc 100644 --- a/climada/util/test/test_dates_times.py +++ b/climada/util/test/test_dates_times.py @@ -18,22 +18,28 @@ Test of dates_times module """ + import datetime as dt import unittest + import numpy as np import climada.util.dates_times as u_dt + class TestDateString(unittest.TestCase): """Test date functions""" + def test_date_to_str_pass(self): """Test _date_to_str function""" ordinal_date = dt.datetime.toordinal(dt.datetime(2018, 4, 6)) - self.assertEqual('2018-04-06', u_dt.date_to_str(ordinal_date)) + self.assertEqual("2018-04-06", u_dt.date_to_str(ordinal_date)) - ordinal_date = [dt.datetime.toordinal(dt.datetime(2018, 4, 6)), - dt.datetime.toordinal(dt.datetime(2019, 1, 1))] - self.assertEqual(['2018-04-06', '2019-01-01'], u_dt.date_to_str(ordinal_date)) + ordinal_date = [ + dt.datetime.toordinal(dt.datetime(2018, 4, 6)), + dt.datetime.toordinal(dt.datetime(2019, 1, 1)), + ] + self.assertEqual(["2018-04-06", "2019-01-01"], u_dt.date_to_str(ordinal_date)) def test_str_to_date_pass(self): """Test _date_to_str function""" @@ -43,36 +49,45 @@ def test_str_to_date_pass(self): date = [640000, 730000] self.assertEqual(u_dt.str_to_date(u_dt.date_to_str(date)), date) + class TestDateNumpy(unittest.TestCase): """Test date functions for numpy datetime64 type""" + def test_datetime64_to_ordinal(self): """Test _datetime64_to_ordinal""" - date = np.datetime64('1999-12-26T06:00:00.000000000') + date = np.datetime64("1999-12-26T06:00:00.000000000") ordinal = u_dt.datetime64_to_ordinal(date) - self.assertEqual(u_dt.date_to_str(ordinal), '1999-12-26') + self.assertEqual(u_dt.date_to_str(ordinal), "1999-12-26") - date = [np.datetime64('1999-12-26T06:00:00.000000000'), - np.datetime64('2000-12-26T06:00:00.000000000')] + date = [ + np.datetime64("1999-12-26T06:00:00.000000000"), + np.datetime64("2000-12-26T06:00:00.000000000"), + ] ordinal = u_dt.datetime64_to_ordinal(date) - self.assertEqual(u_dt.date_to_str(ordinal[0]), '1999-12-26') - self.assertEqual(u_dt.date_to_str(ordinal[1]), '2000-12-26') + self.assertEqual(u_dt.date_to_str(ordinal[0]), "1999-12-26") + self.assertEqual(u_dt.date_to_str(ordinal[1]), "2000-12-26") def test_last_year_pass(self): """Test last_year""" - ordinal_date = [dt.datetime.toordinal(dt.datetime(2018, 4, 6)), - dt.datetime.toordinal(dt.datetime(1918, 4, 6)), - dt.datetime.toordinal(dt.datetime(2019, 1, 1))] + ordinal_date = [ + dt.datetime.toordinal(dt.datetime(2018, 4, 6)), + dt.datetime.toordinal(dt.datetime(1918, 4, 6)), + dt.datetime.toordinal(dt.datetime(2019, 1, 1)), + ] self.assertEqual(u_dt.last_year(ordinal_date), 2019) self.assertEqual(u_dt.last_year(np.array(ordinal_date)), 2019) def test_first_year_pass(self): """Test last_year""" - ordinal_date = [dt.datetime.toordinal(dt.datetime(2018, 4, 6)), - dt.datetime.toordinal(dt.datetime(1918, 4, 6)), - dt.datetime.toordinal(dt.datetime(2019, 1, 1))] + ordinal_date = [ + dt.datetime.toordinal(dt.datetime(2018, 4, 6)), + dt.datetime.toordinal(dt.datetime(1918, 4, 6)), + dt.datetime.toordinal(dt.datetime(2019, 1, 1)), + ] self.assertEqual(u_dt.first_year(ordinal_date), 1918) self.assertEqual(u_dt.first_year(np.array(ordinal_date)), 1918) + # Execute Tests if __name__ == "__main__": TESTS = unittest.TestLoader().loadTestsFromTestCase(TestDateString) diff --git a/climada/util/test/test_dwd_icon.py b/climada/util/test/test_dwd_icon.py index 477f54a32..db1161189 100644 --- a/climada/util/test/test_dwd_icon.py +++ b/climada/util/test/test_dwd_icon.py @@ -19,65 +19,82 @@ Test files_handler module. """ +import datetime as dt import unittest from pathlib import Path -import datetime as dt -import numpy as np -from climada.util.dwd_icon_loader import (download_icon_grib, - delete_icon_grib, - _create_icon_grib_name, - download_icon_centroids_file) +import numpy as np from climada.util.constants import SYSTEM_DIR +from climada.util.dwd_icon_loader import ( + _create_icon_grib_name, + delete_icon_grib, + download_icon_centroids_file, + download_icon_grib, +) class TestCreateIconName(unittest.TestCase): """Test _create_icon_grib_name function""" + def test_icon_name(self): """Correct strings created""" url, file_name, lead_times = _create_icon_grib_name( dt.datetime(2021, 2, 2), max_lead_time=56, - ) - self.assertEqual(url, ('https://opendata.dwd.de/weather/nwp/'+ - 'icon-eu-eps/grib/00/vmax_10m/') - ) - self.assertEqual(file_name, - ('icon-eu-eps_europe_icosahedral_single-level_'+ - '2021020200_{lead_i:03}_vmax_10m.grib2.bz2') - ) - np.testing.assert_array_equal(lead_times, - np.concatenate([np.arange(1,49), - [51,54,]]) - ) + ) + self.assertEqual( + url, + ("https://opendata.dwd.de/weather/nwp/" + "icon-eu-eps/grib/00/vmax_10m/"), + ) + self.assertEqual( + file_name, + ( + "icon-eu-eps_europe_icosahedral_single-level_" + + "2021020200_{lead_i:03}_vmax_10m.grib2.bz2" + ), + ) + np.testing.assert_array_equal( + lead_times, + np.concatenate( + [ + np.arange(1, 49), + [ + 51, + 54, + ], + ] + ), + ) def test_leadtime_warning(self): """Adjustment for wrong leadtime""" url, file_name, lead_times = _create_icon_grib_name( dt.datetime(2021, 2, 2), max_lead_time=240, - ) - self.assertEqual(lead_times.max(),120) + ) + self.assertEqual(lead_times.max(), 120) class TestDownloadIcon(unittest.TestCase): """Test download_icon_grib function""" + def test_download_icon(self): """Value Error if date to old""" try: with self.assertRaises(ValueError): - download_icon_grib(dt.datetime(2020,1,1)) + download_icon_grib(dt.datetime(2020, 1, 1)) except IOError: pass class TestDownloadIconCentroids(unittest.TestCase): """Test download_icon_centroids_file function""" + def test_download_icon(self): """Value Error if model unknown""" with self.assertRaises(ValueError): - download_icon_centroids_file(model_name='icon') + download_icon_centroids_file(model_name="icon") class TestDeleteIcon(unittest.TestCase): @@ -86,27 +103,27 @@ class TestDeleteIcon(unittest.TestCase): def test_file_not_exist_warning(self): """test warning if file does not exist""" - with self.assertLogs('climada.util.dwd_icon_loader', 'WARNING') as cm: - delete_icon_grib(dt.datetime(1908, 2, 2), - max_lead_time=1, - ) + with self.assertLogs("climada.util.dwd_icon_loader", "WARNING") as cm: + delete_icon_grib( + dt.datetime(1908, 2, 2), + max_lead_time=1, + ) self.assertEqual(len(cm.output), 1) - self.assertIn('does not exist and could not be deleted', cm.output[0]) + self.assertIn("does not exist and could not be deleted", cm.output[0]) def test_rm_file(self): """test if file is removed""" url, file_name, lead_times = _create_icon_grib_name( - dt.datetime(1908, 2, 2), - max_lead_time=1, - ) + dt.datetime(1908, 2, 2), + max_lead_time=1, + ) file_name_i = SYSTEM_DIR.absolute().joinpath( file_name.format(lead_i=lead_times[0]) - ) + ) Path(file_name_i).touch() - delete_icon_grib(dt.datetime(1908, 2, 2), - max_lead_time=1, - download_dir=SYSTEM_DIR - ) + delete_icon_grib( + dt.datetime(1908, 2, 2), max_lead_time=1, download_dir=SYSTEM_DIR + ) self.assertFalse(Path(file_name_i).exists()) @@ -114,6 +131,8 @@ def test_rm_file(self): if __name__ == "__main__": TESTS = unittest.TestLoader().loadTestsFromTestCase(TestCreateIconName) TESTS.addTests(unittest.TestLoader().loadTestsFromTestCase(TestDownloadIcon)) - TESTS.addTests(unittest.TestLoader().loadTestsFromTestCase(TestDownloadIconCentroids)) + TESTS.addTests( + unittest.TestLoader().loadTestsFromTestCase(TestDownloadIconCentroids) + ) TESTS.addTests(unittest.TestLoader().loadTestsFromTestCase(TestDeleteIcon)) unittest.TextTestRunner(verbosity=2).run(TESTS) diff --git a/climada/util/test/test_files.py b/climada/util/test/test_files.py index f7df2fbc4..75b3b8cb2 100644 --- a/climada/util/test/test_files.py +++ b/climada/util/test/test_files.py @@ -22,16 +22,21 @@ import unittest from pathlib import Path -from climada.util.files_handler import to_list, get_file_names, download_file, \ -get_extension -from climada.util.constants import DEMO_DIR, GLB_CENTROIDS_MAT, ENT_TEMPLATE_XLS +from climada.util.constants import DEMO_DIR, ENT_TEMPLATE_XLS, GLB_CENTROIDS_MAT +from climada.util.files_handler import ( + download_file, + get_extension, + get_file_names, + to_list, +) class TestDownloadUrl(unittest.TestCase): """Test download_file function""" + def test_wrong_url_fail(self): """Error raised when wrong url.""" - url = 'https://ngdc.noaa.gov/eog/data/web_data/v4composites/F172012.v4.tar' + url = "https://ngdc.noaa.gov/eog/data/web_data/v4composites/F172012.v4.tar" try: with self.assertRaises(ValueError): download_file(url) @@ -41,11 +46,12 @@ def test_wrong_url_fail(self): class TestToStrList(unittest.TestCase): """Test to_list function""" + def test_identity_pass(self): """Returns the same list if its length is correct.""" num_exp = 3 - values = ['hi', 'ho', 'ha'] - val_name = 'values' + values = ["hi", "ho", "ha"] + val_name = "values" out = to_list(num_exp, values, val_name) self.assertEqual(values, out) @@ -53,20 +59,20 @@ def test_one_to_list(self): """When input is a string or list with one element, it returns a list with the expected number of elments repeated""" num_exp = 3 - values = 'hi' - val_name = 'values' + values = "hi" + val_name = "values" out = to_list(num_exp, values, val_name) - self.assertEqual(['hi', 'hi', 'hi'], out) + self.assertEqual(["hi", "hi", "hi"], out) - values = ['ha'] + values = ["ha"] out = to_list(num_exp, values, val_name) - self.assertEqual(['ha', 'ha', 'ha'], out) + self.assertEqual(["ha", "ha", "ha"], out) def test_list_wrong_length_fail(self): """When input is list of neither expected size nor one, fail.""" num_exp = 3 - values = ['1', '2'] - val_name = 'values' + values = ["1", "2"] + val_name = "values" with self.assertRaises(ValueError) as cm: to_list(num_exp, values, val_name) @@ -75,7 +81,8 @@ def test_list_wrong_length_fail(self): class TestGetFileNames(unittest.TestCase): """Test get_file_names function. Only works with actually existing - files and directories.""" + files and directories.""" + def test_one_file_copy(self): """If input is one file name, return a list with this file name""" file_name = GLB_CENTROIDS_MAT @@ -105,21 +112,23 @@ def test_wrong_argument(self): get_file_names(str(empty_dir)) self.assertIn("no files", str(ve.exception)) - no_file = 'this is not a file' + no_file = "this is not a file" with self.assertRaises(ValueError) as ve: get_file_names(no_file) self.assertIn("cannot find", str(ve.exception)) def test_globbing(self): """If input is a glob pattern, return a list of matching visible - files; omit folders. + files; omit folders. """ file_name = DEMO_DIR - out = get_file_names(f'{file_name}/*') + out = get_file_names(f"{file_name}/*") - tmp_files = [str(f) - for f in Path(file_name).iterdir() - if f.is_file() and not f.name.startswith('.')] + tmp_files = [ + str(f) + for f in Path(file_name).iterdir() + if f.is_file() and not f.name.startswith(".") + ] self.assertListEqual(sorted(tmp_files), sorted(out)) @@ -129,26 +138,40 @@ class TestExtension(unittest.TestCase): def test_get_extension_no_pass(self): """Test no extension""" - file_name = '/Users/aznarsig/Documents/Python/climada_python/data/demo/SC22000_VE__M1' - self.assertEqual('', get_extension(file_name)[1]) + file_name = ( + "/Users/aznarsig/Documents/Python/climada_python/data/demo/SC22000_VE__M1" + ) + self.assertEqual("", get_extension(file_name)[1]) self.assertEqual(str(Path(file_name)), get_extension(file_name)[0]) def test_get_extension_one_pass(self): """Test not compressed""" - file_name = '/Users/aznarsig/Documents/Python/climada_python/data/demo/SC22000_VE__M1.grd' - self.assertEqual('.grd', get_extension(file_name)[1]) + file_name = "/Users/aznarsig/Documents/Python/climada_python/data/demo/SC22000_VE__M1.grd" + self.assertEqual(".grd", get_extension(file_name)[1]) self.assertEqual( - str(Path('/Users/aznarsig/Documents/Python/climada_python/data/demo/SC22000_VE__M1')), - get_extension(file_name)[0]) + str( + Path( + "/Users/aznarsig/Documents/Python/climada_python/data/demo/SC22000_VE__M1" + ) + ), + get_extension(file_name)[0], + ) def test_get_extension_two_pass(self): """Test compressed""" - file_name = '/Users/aznarsig/Documents/Python/climada_python' \ - '/data/demo/SC22000_VE__M1.grd.gz' - self.assertEqual('.grd.gz', get_extension(file_name)[1]) + file_name = ( + "/Users/aznarsig/Documents/Python/climada_python" + "/data/demo/SC22000_VE__M1.grd.gz" + ) + self.assertEqual(".grd.gz", get_extension(file_name)[1]) self.assertEqual( - str(Path('/Users/aznarsig/Documents/Python/climada_python/data/demo/SC22000_VE__M1')), - get_extension(file_name)[0]) + str( + Path( + "/Users/aznarsig/Documents/Python/climada_python/data/demo/SC22000_VE__M1" + ) + ), + get_extension(file_name)[0], + ) # Execute Tests diff --git a/climada/util/test/test_finance.py b/climada/util/test/test_finance.py index 50edf9a15..69c8ddd23 100644 --- a/climada/util/test/test_finance.py +++ b/climada/util/test/test_finance.py @@ -18,19 +18,32 @@ Test of finance module """ + import unittest + import numpy as np from cartopy.io import shapereader -from climada.util.finance import net_present_value, gdp, income_group, \ -nat_earth_adm0, world_bank, wealth2gdp, world_bank_wealth_account, _gdp_twn - -SHP_FN = shapereader.natural_earth(resolution='10m', category='cultural', - name='admin_0_countries') +from climada.util.finance import ( + _gdp_twn, + gdp, + income_group, + nat_earth_adm0, + net_present_value, + wealth2gdp, + world_bank, + world_bank_wealth_account, +) + +SHP_FN = shapereader.natural_earth( + resolution="10m", category="cultural", name="admin_0_countries" +) SHP_FILE = shapereader.Reader(SHP_FN) + class TestNetpresValue(unittest.TestCase): """Test date functions""" + def test_net_pres_val_pass(self): """Test net_present_value against MATLAB reference""" years = np.arange(2018, 2041) @@ -38,15 +51,16 @@ def test_net_pres_val_pass(self): val_years = np.ones(years.size) * 6.512201157564418e9 res = net_present_value(years, disc_rates, val_years) - self.assertEqual(1.215049630691397e+11, res) + self.assertEqual(1.215049630691397e11, res) + class TestWBData(unittest.TestCase): """Test World Bank data""" + def test_ne_income_grp_aia_pass(self): """Test nat_earth_adm0 function Anguilla.""" ref_year = 2012 - ne_year, ne_val = nat_earth_adm0('AIA', 'INCOME_GRP', - shp_file=SHP_FILE) + ne_year, ne_val = nat_earth_adm0("AIA", "INCOME_GRP", shp_file=SHP_FILE) ref_year = 0 ref_val = 3 @@ -56,7 +70,7 @@ def test_ne_income_grp_aia_pass(self): def test_wb_income_grp_sxm_pass(self): """Test world_bank function Sint Maarten.""" ref_year = 2012 - wb_year, wb_val = world_bank('SXM', ref_year, 'INC_GRP') + wb_year, wb_val = world_bank("SXM", ref_year, "INC_GRP") ref_year = 2012 ref_val = 4 @@ -66,23 +80,22 @@ def test_wb_income_grp_sxm_pass(self): def test_income_grp_sxm_1999_pass(self): """Test income_group function Sint Maarten.""" ref_year = 1999 - with self.assertLogs('climada.util.finance', level='INFO') as cm: - ig_year, ig_val = income_group('SXM', ref_year, SHP_FILE) + with self.assertLogs("climada.util.finance", level="INFO") as cm: + ig_year, ig_val = income_group("SXM", ref_year, SHP_FILE) ref_year = 2010 ref_val = 4 - self.assertIn('Income group SXM 2010: 4.', cm.output[0]) + self.assertIn("Income group SXM 2010: 4.", cm.output[0]) self.assertEqual(ig_year, ref_year) self.assertEqual(ig_val, ref_val) def test_ne_gdp_aia_2012_pass(self): """Test nat_earth_adm0 function Anguilla.""" ref_year = 2012 - ne_year, ne_val = nat_earth_adm0('AIA', 'GDP_MD', - 'GDP_YEAR', SHP_FILE) + ne_year, ne_val = nat_earth_adm0("AIA", "GDP_MD", "GDP_YEAR", SHP_FILE) ref_year = 2009 - ref_val = 1.75e+08 + ref_val = 1.75e08 self.assertEqual(ne_year, ref_year) self.assertEqual(ne_val, ref_val) @@ -91,22 +104,22 @@ def test_gdp_sxm_2010_pass(self): # If World Bank input data changes, make sure to set ref_year to a year where # no data is available so that the next available data point has to be selected. ref_year = 2010 - with self.assertLogs('climada.util.finance', level='INFO') as cm: - gdp_year, gdp_val = gdp('SXM', ref_year) + with self.assertLogs("climada.util.finance", level="INFO") as cm: + gdp_year, gdp_val = gdp("SXM", ref_year) - ref_val = 936089385.47486 # reference GDP value - ref_year = 2011 # nearest year with data available (might change) + ref_val = 936089385.47486 # reference GDP value + ref_year = 2011 # nearest year with data available (might change) # GDP and years with data available might change if worldbank input # data changes, check magnitude and adjust ref_val and/or ref_year # if test fails: - self.assertIn('GDP SXM %i: %1.3e' % (ref_year, ref_val), cm.output[0]) + self.assertIn("GDP SXM %i: %1.3e" % (ref_year, ref_val), cm.output[0]) self.assertEqual(gdp_year, ref_year) self.assertAlmostEqual(gdp_val, ref_val, places=0) def test_gdp_twn_2012_pass(self): """Test gdp function TWN.""" ref_year = 2014 - gdp_year, gdp_val = gdp('TWN', ref_year) + gdp_year, gdp_val = gdp("TWN", ref_year) _, gdp_val_direct = _gdp_twn(ref_year) ref_val = 530515000000.0 ref_year = 2014 @@ -114,22 +127,23 @@ def test_gdp_twn_2012_pass(self): self.assertEqual(gdp_val, ref_val) self.assertEqual(gdp_val_direct, ref_val) - def test_wb_esp_1950_pass(self): """Test world_bank function Sint Maarten.""" ref_year = 1950 - wb_year, wb_val = world_bank('ESP', ref_year, 'NY.GDP.MKTP.CD') + wb_year, wb_val = world_bank("ESP", ref_year, "NY.GDP.MKTP.CD") ref_year = 1960 ref_val = 12433394725.2159 self.assertEqual(wb_year, ref_year) self.assertAlmostEqual(wb_val, ref_val) + class TestWealth2GDP(unittest.TestCase): """Test Wealth to GDP factor extraction""" + def test_nfw_SUR_pass(self): """Test non-financial wealth-to-gdp factor with Suriname.""" - w2g_year, w2g_val = wealth2gdp('SUR') + w2g_year, w2g_val = wealth2gdp("SUR") ref_year = 2016 ref_val = 0.73656 @@ -138,7 +152,7 @@ def test_nfw_SUR_pass(self): def test_nfw_BEL_pass(self): """Test total wealth-to-gdp factor with Belgium.""" - w2g_year, w2g_val = wealth2gdp('BEL', False) + w2g_year, w2g_val = wealth2gdp("BEL", False) ref_year = 2016 ref_val = 4.88758 @@ -147,21 +161,27 @@ def test_nfw_BEL_pass(self): def test_nfw_LBY_pass(self): """Test missing factor with Libya.""" - _, w2g_val = wealth2gdp('LBY') + _, w2g_val = wealth2gdp("LBY") self.assertTrue(np.isnan(w2g_val)) + class TestWBWealthAccount(unittest.TestCase): """Test Wealth Indicator extraction from World Bank provided CSV""" + def test_pca_DEU_2010_pass(self): """Test Processed Capital value Germany 2010.""" ref_year = 2010 - cntry_iso = 'DEU' + cntry_iso = "DEU" wb_year, wb_val, q = world_bank_wealth_account(cntry_iso, ref_year, no_land=0) - wb_year_noland, wb_val_noland, q = world_bank_wealth_account(cntry_iso, ref_year, - no_land=1) - ref_val = [17675048450284.9, 19767982562092.2] # second value as updated by worldbank on - # October 27 2021 + wb_year_noland, wb_val_noland, q = world_bank_wealth_account( + cntry_iso, ref_year, no_land=1 + ) + ref_val = [ + 17675048450284.9, + 19767982562092.2, + ] # second value as updated by worldbank on + # October 27 2021 ref_val_noland = [14254071330874.9, 15941921421042.1] # dito self.assertEqual(wb_year, ref_year) self.assertEqual(q, 1) @@ -172,42 +192,49 @@ def test_pca_DEU_2010_pass(self): def test_pca_CHE_2008_pass(self): """Test Prcoessed Capital per capita Switzerland 2008 (interp.).""" ref_year = 2008 - cntry_iso = 'CHE' - var_name = 'NW.PCA.PC' - wb_year, wb_val, _ = world_bank_wealth_account(cntry_iso, ref_year, - variable_name=var_name, no_land=0) - ref_val = [328398.7, # values sporadically updated by worldbank - 369081.0] # <- October 27 2021 + cntry_iso = "CHE" + var_name = "NW.PCA.PC" + wb_year, wb_val, _ = world_bank_wealth_account( + cntry_iso, ref_year, variable_name=var_name, no_land=0 + ) + ref_val = [ + 328398.7, # values sporadically updated by worldbank + 369081.0, + ] # <- October 27 2021 self.assertEqual(wb_year, ref_year) self.assertIn(wb_val, ref_val) def test_tow_IND_1985_pass(self): """Test Total Wealth value India 1985 (outside year range).""" ref_year = 1985 - cntry_iso = 'IND' - var_name = 'NW.TOW.TO' - wb_year, wb_val, _ = world_bank_wealth_account(cntry_iso, ref_year, - variable_name=var_name) - ref_val = [5415188681934.5, # values sporadically updated by worldbank - 5861193808779.6, # <- October 27 2021 - 5861186556152.8, # <- June 29 2023 - 5861186367245.2, # <- December 20 2023 - ] + cntry_iso = "IND" + var_name = "NW.TOW.TO" + wb_year, wb_val, _ = world_bank_wealth_account( + cntry_iso, ref_year, variable_name=var_name + ) + ref_val = [ + 5415188681934.5, # values sporadically updated by worldbank + 5861193808779.6, # <- October 27 2021 + 5861186556152.8, # <- June 29 2023 + 5861186367245.2, # <- December 20 2023 + ] self.assertEqual(wb_year, ref_year) self.assertIn(wb_val, ref_val) def test_pca_CUB_2015_pass(self): """Test Processed Capital value Cuba 2015 (missing value).""" ref_year = 2015 - cntry_iso = 'CUB' + cntry_iso = "CUB" wb_year, wb_val, q = world_bank_wealth_account(cntry_iso, ref_year, no_land=1) - ref_val = [108675762920.0, # values sporadically updated by worldbank - 108675513472.0, # <- Dezember 20 2023 - ] + ref_val = [ + 108675762920.0, # values sporadically updated by worldbank + 108675513472.0, # <- Dezember 20 2023 + ] self.assertEqual(q, 0) self.assertEqual(wb_year, ref_year) self.assertIn(wb_val, ref_val) + # Execute Tests if __name__ == "__main__": TESTS = unittest.TestLoader().loadTestsFromTestCase(TestNetpresValue) diff --git a/climada/util/test/test_hdf5.py b/climada/util/test/test_hdf5.py index 32ad7a7bc..ae85003e7 100644 --- a/climada/util/test/test_hdf5.py +++ b/climada/util/test/test_hdf5.py @@ -20,11 +20,13 @@ """ import unittest -import numpy as np + import h5py +import numpy as np -from climada.util.constants import HAZ_DEMO_MAT import climada.util.hdf5_handler as u_hdf5 +from climada.util.constants import HAZ_DEMO_MAT + class TestFunc(unittest.TestCase): """Test the auxiliary functions used to retrieve variables from HDF5""" @@ -36,21 +38,23 @@ def test_get_string_pass(self): contents = u_hdf5.read(HAZ_DEMO_MAT) # Convert several strings - str_date = u_hdf5.get_string(contents['hazard']['date']) - str_comment = u_hdf5.get_string(contents['hazard']['comment']) - str_wf = u_hdf5.get_string(contents['hazard']['windfield_comment']) - str_fn = u_hdf5.get_string(contents['hazard']['filename']) + str_date = u_hdf5.get_string(contents["hazard"]["date"]) + str_comment = u_hdf5.get_string(contents["hazard"]["comment"]) + str_wf = u_hdf5.get_string(contents["hazard"]["windfield_comment"]) + str_fn = u_hdf5.get_string(contents["hazard"]["filename"]) # Check results - self.assertEqual('14-Nov-2017 10:09:05', str_date) + self.assertEqual("14-Nov-2017 10:09:05", str_date) + self.assertEqual( + "TC hazard event set, generated 14-Nov-2017 10:09:05", str_comment + ) self.assertEqual( - 'TC hazard event set, generated 14-Nov-2017 10:09:05', - str_comment) + "generating 14450 windfields took 0.25 min " + "(0.0010 sec/event)", str_wf + ) self.assertEqual( - 'generating 14450 windfields took 0.25 min ' + - '(0.0010 sec/event)', str_wf) - self.assertEqual('/Users/aznarsig/Documents/MATLAB/climada_data/' + - 'hazards/atl_prob.mat', str_fn) + "/Users/aznarsig/Documents/MATLAB/climada_data/" + "hazards/atl_prob.mat", + str_fn, + ) def test_get_sparse_mat_pass(self): """Check contents of imported sparse matrix, using the function \ @@ -60,10 +64,11 @@ def test_get_sparse_mat_pass(self): contents = u_hdf5.read(HAZ_DEMO_MAT) # get matrix size - mat_shape = (len(contents['hazard']['event_ID']), - len(contents['hazard']['centroid_ID'])) - spr_mat = u_hdf5.get_sparse_csr_mat( - contents['hazard']['intensity'], mat_shape) + mat_shape = ( + len(contents["hazard"]["event_ID"]), + len(contents["hazard"]["centroid_ID"]), + ) + spr_mat = u_hdf5.get_sparse_csr_mat(contents["hazard"]["intensity"], mat_shape) self.assertEqual(mat_shape[0], spr_mat.shape[0]) self.assertEqual(mat_shape[1], spr_mat.shape[1]) @@ -79,19 +84,20 @@ def test_get_sparse_mat_pass(self): def test_get_str_from_ref(self): """Check import string from a HDF5 object reference""" - with h5py.File(HAZ_DEMO_MAT, 'r') as file: - var = file['hazard']['name'][0][0] + with h5py.File(HAZ_DEMO_MAT, "r") as file: + var = file["hazard"]["name"][0][0] res = u_hdf5.get_str_from_ref(HAZ_DEMO_MAT, var) - self.assertEqual('NNN_1185101', res) + self.assertEqual("NNN_1185101", res) def test_get_list_str_from_ref(self): """Check import string from a HDF5 object reference""" - with h5py.File(HAZ_DEMO_MAT, 'r') as file: - var = file['hazard']['name'] + with h5py.File(HAZ_DEMO_MAT, "r") as file: + var = file["hazard"]["name"] var_list = u_hdf5.get_list_str_from_ref(HAZ_DEMO_MAT, var) - self.assertEqual('NNN_1185101', var_list[0]) - self.assertEqual('NNN_1185101_gen1', var_list[1]) - self.assertEqual('NNN_1185101_gen2', var_list[2]) + self.assertEqual("NNN_1185101", var_list[0]) + self.assertEqual("NNN_1185101_gen1", var_list[1]) + self.assertEqual("NNN_1185101_gen2", var_list[2]) + class TestReader(unittest.TestCase): """Test HDF5 reader""" @@ -104,50 +110,51 @@ def test_hazard_pass(self): # Check read contents self.assertEqual(1, len(contents)) - self.assertTrue('hazard' in contents.keys()) - self.assertEqual(False, '#refs#' in contents.keys()) - - hazard = contents['hazard'] - self.assertTrue('reference_year' in hazard.keys()) - self.assertTrue('lon' in hazard.keys()) - self.assertTrue('lat' in hazard.keys()) - self.assertTrue('centroid_ID' in hazard.keys()) - self.assertTrue('orig_years' in hazard.keys()) - self.assertTrue('orig_event_count' in hazard.keys()) - self.assertTrue('event_count' in hazard.keys()) - self.assertTrue('event_ID' in hazard.keys()) - self.assertTrue('category' in hazard.keys()) - self.assertTrue('orig_event_flag' in hazard.keys()) - self.assertTrue('yyyy' in hazard.keys()) - self.assertTrue('mm' in hazard.keys()) - self.assertTrue('dd' in hazard.keys()) - self.assertTrue('datenum' in hazard.keys()) - self.assertTrue('scenario' in hazard.keys()) - self.assertTrue('intensity' in hazard.keys()) - self.assertTrue('name' in hazard.keys()) - self.assertTrue('frequency' in hazard.keys()) - self.assertTrue('matrix_density' in hazard.keys()) - self.assertTrue('windfield_comment' in hazard.keys()) - self.assertTrue('peril_ID' in hazard.keys()) - self.assertTrue('filename' in hazard.keys()) - self.assertTrue('comment' in hazard.keys()) - self.assertTrue('date' in hazard.keys()) - self.assertTrue('units' in hazard.keys()) - self.assertTrue('orig_yearset' in hazard.keys()) - self.assertTrue('fraction' in hazard.keys()) + self.assertTrue("hazard" in contents.keys()) + self.assertEqual(False, "#refs#" in contents.keys()) + + hazard = contents["hazard"] + self.assertTrue("reference_year" in hazard.keys()) + self.assertTrue("lon" in hazard.keys()) + self.assertTrue("lat" in hazard.keys()) + self.assertTrue("centroid_ID" in hazard.keys()) + self.assertTrue("orig_years" in hazard.keys()) + self.assertTrue("orig_event_count" in hazard.keys()) + self.assertTrue("event_count" in hazard.keys()) + self.assertTrue("event_ID" in hazard.keys()) + self.assertTrue("category" in hazard.keys()) + self.assertTrue("orig_event_flag" in hazard.keys()) + self.assertTrue("yyyy" in hazard.keys()) + self.assertTrue("mm" in hazard.keys()) + self.assertTrue("dd" in hazard.keys()) + self.assertTrue("datenum" in hazard.keys()) + self.assertTrue("scenario" in hazard.keys()) + self.assertTrue("intensity" in hazard.keys()) + self.assertTrue("name" in hazard.keys()) + self.assertTrue("frequency" in hazard.keys()) + self.assertTrue("matrix_density" in hazard.keys()) + self.assertTrue("windfield_comment" in hazard.keys()) + self.assertTrue("peril_ID" in hazard.keys()) + self.assertTrue("filename" in hazard.keys()) + self.assertTrue("comment" in hazard.keys()) + self.assertTrue("date" in hazard.keys()) + self.assertTrue("units" in hazard.keys()) + self.assertTrue("orig_yearset" in hazard.keys()) + self.assertTrue("fraction" in hazard.keys()) self.assertEqual(27, len(hazard.keys())) # Check some random values - mat_shape = (len(contents['hazard']['event_ID']), - len(contents['hazard']['centroid_ID'])) - sp_mat = u_hdf5.get_sparse_csr_mat(hazard['intensity'], mat_shape) + mat_shape = ( + len(contents["hazard"]["event_ID"]), + len(contents["hazard"]["centroid_ID"]), + ) + sp_mat = u_hdf5.get_sparse_csr_mat(hazard["intensity"], mat_shape) - self.assertTrue(np.array_equal(np.array([[84], [67]]), - hazard['peril_ID'])) + self.assertTrue(np.array_equal(np.array([[84], [67]]), hazard["peril_ID"])) self.assertEqual(34.537289477809473, sp_mat[2862, 97]) - self.assertEqual(-80, hazard['lon'][46]) - self.assertEqual(28, hazard['lat'][87]) - self.assertEqual(2016, hazard['reference_year']) + self.assertEqual(-80, hazard["lon"][46]) + self.assertEqual(28, hazard["lat"][87]) + self.assertEqual(2016, hazard["reference_year"]) def test_with_refs_pass(self): """Allow to load references of the matlab file""" @@ -158,8 +165,9 @@ def test_with_refs_pass(self): # Check read contents self.assertEqual(2, len(contents)) - self.assertTrue('hazard' in contents.keys()) - self.assertTrue('#refs#' in contents.keys()) + self.assertTrue("hazard" in contents.keys()) + self.assertTrue("#refs#" in contents.keys()) + # Execute Tests if __name__ == "__main__": diff --git a/climada/util/test/test_interpolation.py b/climada/util/test/test_interpolation.py index 1c780fcce..8000ace9e 100644 --- a/climada/util/test/test_interpolation.py +++ b/climada/util/test/test_interpolation.py @@ -20,9 +20,10 @@ """ import unittest + import numpy as np -from climada.util.interpolation import interpolate_ev, stepfunction_ev, group_frequency +from climada.util.interpolation import group_frequency, interpolate_ev, stepfunction_ev class TestFitMethods(unittest.TestCase): @@ -30,153 +31,141 @@ class TestFitMethods(unittest.TestCase): def test_interpolate_ev_linear_interp(self): """Test linear interpolation""" - x_train = np.array([1., 3., 5.]) - y_train = np.array([8., 4., 2.]) - x_test = np.array([0., 3., 4., 6.]) + x_train = np.array([1.0, 3.0, 5.0]) + y_train = np.array([8.0, 4.0, 2.0]) + x_test = np.array([0.0, 3.0, 4.0, 6.0]) np.testing.assert_allclose( - interpolate_ev(x_test, x_train, y_train), - np.array([8., 4., 3., np.nan]) + interpolate_ev(x_test, x_train, y_train), np.array([8.0, 4.0, 3.0, np.nan]) ) np.testing.assert_allclose( - interpolate_ev(x_test, x_train, y_train, y_asymptotic = 0), - np.array([8., 4., 3., 0.]) + interpolate_ev(x_test, x_train, y_train, y_asymptotic=0), + np.array([8.0, 4.0, 3.0, 0.0]), ) def test_interpolate_ev_threshold_parameters(self): """Test input threshold parameters""" - x_train = np.array([0., 3., 6.]) - y_train = np.array([4., 1., 4.]) - x_test = np.array([-1., 3., 4.]) + x_train = np.array([0.0, 3.0, 6.0]) + y_train = np.array([4.0, 1.0, 4.0]) + x_test = np.array([-1.0, 3.0, 4.0]) np.testing.assert_allclose( - interpolate_ev(x_test, x_train, y_train), - np.array([4., 1., 2.]) + interpolate_ev(x_test, x_train, y_train), np.array([4.0, 1.0, 2.0]) ) np.testing.assert_allclose( - interpolate_ev(x_test, x_train, y_train, x_threshold=1.), - np.array([1., 1., 2.]) + interpolate_ev(x_test, x_train, y_train, x_threshold=1.0), + np.array([1.0, 1.0, 2.0]), ) np.testing.assert_allclose( - interpolate_ev(x_test, x_train, y_train, y_threshold=2.), - np.array([4., 4., 4.]) + interpolate_ev(x_test, x_train, y_train, y_threshold=2.0), + np.array([4.0, 4.0, 4.0]), ) - + def test_interpolate_ev_scale_parameters(self): """Test log scale parameters""" x_train = np.array([1e1, 1e3]) - y_train = np.array([1., 3.]) + y_train = np.array([1.0, 3.0]) x_test = np.array([1e0, 1e2]) np.testing.assert_allclose( interpolate_ev(x_test, x_train, y_train, logx=True, extrapolation=True), - np.array([0., 2.]) + np.array([0.0, 2.0]), ) np.testing.assert_allclose( - interpolate_ev(x_test, x_train, y_train, logx=True), - np.array([1., 2.]) + interpolate_ev(x_test, x_train, y_train, logx=True), np.array([1.0, 2.0]) ) - x_train = np.array([1., 3.]) + x_train = np.array([1.0, 3.0]) y_train = np.array([1e1, 1e3]) - x_test = np.array([0., 2.]) + x_test = np.array([0.0, 2.0]) np.testing.assert_allclose( interpolate_ev(x_test, x_train, y_train, logy=True, extrapolation=True), - np.array([1e0, 1e2]) + np.array([1e0, 1e2]), ) x_train = np.array([1e1, 1e3]) y_train = np.array([1e1, 1e5]) x_test = np.array([1e0, 1e2]) np.testing.assert_allclose( - interpolate_ev(x_test, x_train, y_train, logx=True, logy=True, extrapolation=True), - np.array([1e-1, 1e3]) + interpolate_ev( + x_test, x_train, y_train, logx=True, logy=True, extrapolation=True + ), + np.array([1e-1, 1e3]), ) def test_interpolate_ev_degenerate_input(self): """Test interp to constant zeros""" - x_train = np.array([1., 3., 5.]) - x_test = np.array([0., 2., 4.]) + x_train = np.array([1.0, 3.0, 5.0]) + x_test = np.array([0.0, 2.0, 4.0]) y_train = np.zeros(3) np.testing.assert_allclose( - interpolate_ev(x_test, x_train, y_train), - np.array([0., 0., 0.]) + interpolate_ev(x_test, x_train, y_train), np.array([0.0, 0.0, 0.0]) ) def test_interpolate_ev_small_input(self): """Test small input""" - x_train = np.array([1.]) - y_train = np.array([2.]) - x_test = np.array([0., 1., 2.]) + x_train = np.array([1.0]) + y_train = np.array([2.0]) + x_test = np.array([0.0, 1.0, 2.0]) np.testing.assert_allclose( - interpolate_ev(x_test, x_train, y_train), - np.array([2., 2., np.nan]) + interpolate_ev(x_test, x_train, y_train), np.array([2.0, 2.0, np.nan]) ) np.testing.assert_allclose( interpolate_ev(x_test, x_train, y_train, y_asymptotic=0), - np.array([2., 2., 0.]) + np.array([2.0, 2.0, 0.0]), ) x_train = np.array([]) y_train = np.array([]) - x_test = np.array([0., 1., 2.]) + x_test = np.array([0.0, 1.0, 2.0]) np.testing.assert_allclose( - interpolate_ev(x_test, x_train, y_train), - np.full(3, np.nan) + interpolate_ev(x_test, x_train, y_train), np.full(3, np.nan) ) np.testing.assert_allclose( - interpolate_ev(x_test, x_train, y_train, y_asymptotic=0), - np.zeros(3) + interpolate_ev(x_test, x_train, y_train, y_asymptotic=0), np.zeros(3) ) def test_stepfunction_ev(self): """Test stepfunction method""" - x_train = np.array([1., 3., 5.]) - y_train = np.array([8., 4., 2.]) - x_test = np.array([0., 3., 4., 6.]) + x_train = np.array([1.0, 3.0, 5.0]) + y_train = np.array([8.0, 4.0, 2.0]) + x_test = np.array([0.0, 3.0, 4.0, 6.0]) np.testing.assert_allclose( - stepfunction_ev(x_test, x_train, y_train), - np.array([8., 4., 2., np.nan]) + stepfunction_ev(x_test, x_train, y_train), np.array([8.0, 4.0, 2.0, np.nan]) ) np.testing.assert_allclose( - stepfunction_ev(x_test, x_train, y_train, y_asymptotic=0.), - np.array([8., 4., 2., 0.]) + stepfunction_ev(x_test, x_train, y_train, y_asymptotic=0.0), + np.array([8.0, 4.0, 2.0, 0.0]), ) def test_stepfunction_ev_small_input(self): """Test small input""" - x_train = np.array([1.]) - y_train = np.array([2.]) - x_test = np.array([0., 1., 2.]) + x_train = np.array([1.0]) + y_train = np.array([2.0]) + x_test = np.array([0.0, 1.0, 2.0]) np.testing.assert_allclose( - stepfunction_ev(x_test, x_train, y_train), - np.array([2., 2., np.nan]) + stepfunction_ev(x_test, x_train, y_train), np.array([2.0, 2.0, np.nan]) ) np.testing.assert_allclose( stepfunction_ev(x_test, x_train, y_train, y_asymptotic=0), - np.array([2., 2., 0.]) + np.array([2.0, 2.0, 0.0]), ) x_train = np.array([]) y_train = np.array([]) - x_test = np.array([0., 1., 2.]) + x_test = np.array([0.0, 1.0, 2.0]) np.testing.assert_allclose( - stepfunction_ev(x_test, x_train, y_train), - np.full(3, np.nan) + stepfunction_ev(x_test, x_train, y_train), np.full(3, np.nan) ) np.testing.assert_allclose( - stepfunction_ev(x_test, x_train, y_train, y_asymptotic=0), - np.zeros(3) + stepfunction_ev(x_test, x_train, y_train, y_asymptotic=0), np.zeros(3) ) - + def test_frequency_group(self): """Test frequency grouping method""" frequency = np.ones(6) - intensity = np.array([1., 1., 1., 2., 3., 3]) - np.testing.assert_allclose( - group_frequency(frequency, intensity), - ([3, 1, 2], [1, 2, 3]) - ) + intensity = np.array([1.0, 1.0, 1.0, 2.0, 3.0, 3]) np.testing.assert_allclose( - group_frequency([], []), - ([], []) + group_frequency(frequency, intensity), ([3, 1, 2], [1, 2, 3]) ) + np.testing.assert_allclose(group_frequency([], []), ([], [])) with self.assertRaises(ValueError): group_frequency(frequency, intensity[::-1]) + # Execute Tests if __name__ == "__main__": TESTS = unittest.TestLoader().loadTestsFromTestCase(TestFitMethods) diff --git a/climada/util/test/test_lines_polys_handler.py b/climada/util/test/test_lines_polys_handler.py index b9275d548..8800d6d06 100644 --- a/climada/util/test/test_lines_polys_handler.py +++ b/climada/util/test/test_lines_polys_handler.py @@ -19,39 +19,39 @@ Test of lines_polys_handler """ +import copy import unittest -from unittest.mock import patch, DEFAULT +from unittest.mock import DEFAULT, patch -import numpy as np import geopandas as gpd +import numpy as np import pandas as pd -import copy - -from shapely.geometry import Point -from shapely.geometry import LineString +from shapely.geometry import LineString, Point -from climada.entity import Exposures -import climada.util.lines_polys_handler as u_lp import climada.util.coordinates as u_coord -from climada.util.api_client import Client +import climada.util.lines_polys_handler as u_lp from climada.engine import Impact, ImpactCalc +from climada.entity import Exposures from climada.entity.impact_funcs import ImpactFuncSet from climada.entity.impact_funcs.storm_europe import ImpfStormEurope +from climada.util.api_client import Client -#TODO: add tests for the private methods +# TODO: add tests for the private methods # Load gdfs and hazard and impact functions for tests -HAZ = Client().get_hazard('storm_europe', name='test_haz_WS_nl', status='test_dataset') +HAZ = Client().get_hazard("storm_europe", name="test_haz_WS_nl", status="test_dataset") -EXP_POLY = Client().get_exposures('base', name='test_polygon_exp', status='test_dataset') -EXP_POLY.gdf['impf_WS'] = 2 +EXP_POLY = Client().get_exposures( + "base", name="test_polygon_exp", status="test_dataset" +) +EXP_POLY.gdf["impf_WS"] = 2 GDF_POLY = EXP_POLY.gdf -EXP_LINE = Client().get_exposures('base', name='test_line_exp', status='test_dataset') +EXP_LINE = Client().get_exposures("base", name="test_line_exp", status="test_dataset") GDF_LINE = EXP_LINE.gdf -EXP_POINT = Client().get_exposures('base', name='test_point_exp', status='test_dataset') +EXP_POINT = Client().get_exposures("base", name="test_point_exp", status="test_dataset") GDF_POINT = EXP_POINT.gdf IMPF = ImpfStormEurope.from_welker() @@ -59,7 +59,7 @@ IMPF2.id = 2 IMPF_SET = ImpactFuncSet([IMPF, IMPF2]) -COL_CHANGING = ['value', 'latitude', 'longitude', 'geometry', 'geometry_orig'] +COL_CHANGING = ["value", "latitude", "longitude", "geometry", "geometry_orig"] def check_unchanged_geom_gdf(self, gdf_geom, gdf_pnt): @@ -68,22 +68,26 @@ def check_unchanged_geom_gdf(self, gdf_geom, gdf_pnt): sub_gdf_pnt = gdf_pnt.xs(n, level=1) rows_sel = sub_gdf_pnt.index.to_numpy() sub_gdf = gdf_geom.loc[rows_sel] - self.assertTrue(np.alltrue(sub_gdf.geometry.geom_equals(sub_gdf_pnt.geometry_orig))) + self.assertTrue( + np.alltrue(sub_gdf.geometry.geom_equals(sub_gdf_pnt.geometry_orig)) + ) for col in gdf_pnt.columns: if col not in COL_CHANGING: np.testing.assert_allclose(gdf_pnt[col].unique(), gdf_geom[col].unique()) + def check_impact(self, imp, haz, exp, aai_agg, eai_exp): """Test properties of imapcts""" self.assertEqual(len(haz.event_id), len(imp.at_event)) self.assertIsInstance(imp, Impact) - self.assertTrue(hasattr(imp, 'geom_exp')) - self.assertTrue(hasattr(imp, 'coord_exp')) - self.assertTrue(np.all(imp.geom_exp.sort_index()==exp.gdf.geometry.sort_index())) + self.assertTrue(hasattr(imp, "geom_exp")) + self.assertTrue(hasattr(imp, "coord_exp")) + self.assertTrue(np.all(imp.geom_exp.sort_index() == exp.gdf.geometry.sort_index())) self.assertEqual(len(imp.coord_exp), len(exp.gdf)) self.assertAlmostEqual(imp.aai_agg, aai_agg, 3) np.testing.assert_allclose(imp.eai_exp, eai_exp, rtol=1e-5) + class TestExposureGeomToPnt(unittest.TestCase): """Test Exposures to points functions""" @@ -95,73 +99,174 @@ def check_unchanged_exp(self, exp_geom, exp_pnt): def test_point_exposure_from_polygons(self): """Test disaggregation of polygons to points""" - #test low res - one point per poly + # test low res - one point per poly exp_pnt = u_lp.exp_geom_to_pnt( - EXP_POLY, res=1, to_meters=False, - disagg_met=u_lp.DisaggMethod.FIX, disagg_val=None - ) - np.testing.assert_array_equal(exp_pnt.gdf['value'], EXP_POLY.gdf['value']) + EXP_POLY, + res=1, + to_meters=False, + disagg_met=u_lp.DisaggMethod.FIX, + disagg_val=None, + ) + np.testing.assert_array_equal(exp_pnt.gdf["value"], EXP_POLY.gdf["value"]) self.check_unchanged_exp(EXP_POLY, exp_pnt) - #to_meters=False, DIV + # to_meters=False, DIV exp_pnt = u_lp.exp_geom_to_pnt( - EXP_POLY, res=0.5, to_meters=False, - disagg_met=u_lp.DisaggMethod.DIV, disagg_val=None - ) + EXP_POLY, + res=0.5, + to_meters=False, + disagg_met=u_lp.DisaggMethod.DIV, + disagg_val=None, + ) self.check_unchanged_exp(EXP_POLY, exp_pnt) - val_avg = np.array([ - 4.93449000e+10, 4.22202000e+10, 6.49988000e+10, 1.04223900e+11, - 1.04223900e+11, 5.85881000e+10, 1.11822300e+11, 8.54188667e+10, - 8.54188667e+10, 8.54188667e+10, 1.43895450e+11, 1.43895450e+11, - 1.16221500e+11, 3.70562500e+11, 1.35359600e+11, 3.83689000e+10 - ]) - np.testing.assert_allclose(exp_pnt.gdf['value'], val_avg) - lat = np.array([ - 53.15019278, 52.90814037, 52.48232657, 52.23482697, 52.23482697, - 51.26574748, 51.30438894, 51.71676713, 51.71676713, 51.71676713, - 52.13772724, 52.13772724, 52.61538869, 53.10328543, 52.54974468, - 52.11286591 - ]) - np.testing.assert_allclose(exp_pnt.gdf['latitude'], lat) - - #to_meters=TRUE, FIX, dissag_val + val_avg = np.array( + [ + 4.93449000e10, + 4.22202000e10, + 6.49988000e10, + 1.04223900e11, + 1.04223900e11, + 5.85881000e10, + 1.11822300e11, + 8.54188667e10, + 8.54188667e10, + 8.54188667e10, + 1.43895450e11, + 1.43895450e11, + 1.16221500e11, + 3.70562500e11, + 1.35359600e11, + 3.83689000e10, + ] + ) + np.testing.assert_allclose(exp_pnt.gdf["value"], val_avg) + lat = np.array( + [ + 53.15019278, + 52.90814037, + 52.48232657, + 52.23482697, + 52.23482697, + 51.26574748, + 51.30438894, + 51.71676713, + 51.71676713, + 51.71676713, + 52.13772724, + 52.13772724, + 52.61538869, + 53.10328543, + 52.54974468, + 52.11286591, + ] + ) + np.testing.assert_allclose(exp_pnt.gdf["latitude"], lat) + + # to_meters=TRUE, FIX, dissag_val res = 20000 exp_pnt = u_lp.exp_geom_to_pnt( - EXP_POLY, res=res, to_meters=True, - disagg_met=u_lp.DisaggMethod.FIX, disagg_val=res**2 - ) + EXP_POLY, + res=res, + to_meters=True, + disagg_met=u_lp.DisaggMethod.FIX, + disagg_val=res**2, + ) self.check_unchanged_exp(EXP_POLY, exp_pnt) val = res**2 - self.assertEqual(np.unique(exp_pnt.gdf['value'])[0], val) - lat = np.array([ - 53.13923671, 53.13923671, 53.13923671, 53.13923671, 53.43921725, - 53.43921725, 52.90782155, 52.90782155, 52.90782155, 52.90782155, - 52.90782155, 52.40180033, 52.40180033, 52.40180033, 52.40180033, - 52.40180033, 52.69674738, 52.69674738, 52.02540815, 52.02540815, - 52.02540815, 52.02540815, 52.02540815, 52.02540815, 52.31787025, - 52.31787025, 51.31813586, 51.31813586, 51.31813586, 51.49256036, - 51.49256036, 51.49256036, 51.49256036, 51.50407349, 51.50407349, - 51.50407349, 51.50407349, 51.50407349, 51.50407349, 51.50407349, - 51.50407349, 51.50407349, 51.79318374, 51.79318374, 51.79318374, - 51.92768703, 51.92768703, 51.92768703, 51.92768703, 51.92768703, - 51.92768703, 51.92768703, 52.46150801, 52.46150801, 52.46150801, - 52.75685438, 52.75685438, 52.75685438, 52.75685438, 53.05419711, - 53.08688006, 53.08688006, 53.08688006, 53.08688006, 53.08688006, - 53.38649582, 53.38649582, 53.38649582, 52.55795685, 52.55795685, - 52.55795685, 52.55795685, 52.23308448, 52.23308448 - ]) - np.testing.assert_allclose(exp_pnt.gdf['latitude'], lat) - - #projected crs, to_meters=TRUE, FIX, dissag_val + self.assertEqual(np.unique(exp_pnt.gdf["value"])[0], val) + lat = np.array( + [ + 53.13923671, + 53.13923671, + 53.13923671, + 53.13923671, + 53.43921725, + 53.43921725, + 52.90782155, + 52.90782155, + 52.90782155, + 52.90782155, + 52.90782155, + 52.40180033, + 52.40180033, + 52.40180033, + 52.40180033, + 52.40180033, + 52.69674738, + 52.69674738, + 52.02540815, + 52.02540815, + 52.02540815, + 52.02540815, + 52.02540815, + 52.02540815, + 52.31787025, + 52.31787025, + 51.31813586, + 51.31813586, + 51.31813586, + 51.49256036, + 51.49256036, + 51.49256036, + 51.49256036, + 51.50407349, + 51.50407349, + 51.50407349, + 51.50407349, + 51.50407349, + 51.50407349, + 51.50407349, + 51.50407349, + 51.50407349, + 51.79318374, + 51.79318374, + 51.79318374, + 51.92768703, + 51.92768703, + 51.92768703, + 51.92768703, + 51.92768703, + 51.92768703, + 51.92768703, + 52.46150801, + 52.46150801, + 52.46150801, + 52.75685438, + 52.75685438, + 52.75685438, + 52.75685438, + 53.05419711, + 53.08688006, + 53.08688006, + 53.08688006, + 53.08688006, + 53.08688006, + 53.38649582, + 53.38649582, + 53.38649582, + 52.55795685, + 52.55795685, + 52.55795685, + 52.55795685, + 52.23308448, + 52.23308448, + ] + ) + np.testing.assert_allclose(exp_pnt.gdf["latitude"], lat) + + # projected crs, to_meters=TRUE, FIX, dissag_val res = 20000 EXP_POLY_PROJ = Exposures(GDF_POLY.to_crs(epsg=28992)) exp_pnt = u_lp.exp_geom_to_pnt( - EXP_POLY_PROJ, res=res, to_meters=True, - disagg_met=u_lp.DisaggMethod.FIX, disagg_val=res**2 - ) + EXP_POLY_PROJ, + res=res, + to_meters=True, + disagg_met=u_lp.DisaggMethod.FIX, + disagg_val=res**2, + ) self.check_unchanged_exp(EXP_POLY_PROJ, exp_pnt) val = res**2 - self.assertEqual(np.unique(exp_pnt.gdf['value'])[0], val) + self.assertEqual(np.unique(exp_pnt.gdf["value"])[0], val) self.assertEqual(exp_pnt.gdf.crs, EXP_POLY_PROJ.gdf.crs) @patch.multiple( @@ -210,143 +315,257 @@ def test_point_exposure_from_polygons_on_grid(self): """Test disaggregation of polygons to points on grid""" exp_poly = EXP_POLY.copy() res = 0.1 - exp_poly.set_gdf(exp_poly.gdf[exp_poly.gdf['population']<400000]) + exp_poly.set_gdf(exp_poly.gdf[exp_poly.gdf["population"] < 400000]) height, width, trafo = u_coord.pts_to_raster_meta( exp_poly.gdf.geometry.bounds, (res, res) - ) + ) x_grid, y_grid = u_coord.raster_to_meshgrid(trafo, width, height) - #to_meters=False, DIV + # to_meters=False, DIV exp_pnt = u_lp.exp_geom_to_pnt( - exp_poly, res=0.1, to_meters=False, - disagg_met=u_lp.DisaggMethod.DIV, disagg_val=None - ) + exp_poly, + res=0.1, + to_meters=False, + disagg_met=u_lp.DisaggMethod.DIV, + disagg_val=None, + ) exp_pnt_grid = u_lp.exp_geom_to_grid( - exp_poly, (x_grid, y_grid), - disagg_met=u_lp.DisaggMethod.DIV, disagg_val=None - ) + exp_poly, + (x_grid, y_grid), + disagg_met=u_lp.DisaggMethod.DIV, + disagg_val=None, + ) self.check_unchanged_exp(exp_poly, exp_pnt_grid) - for col in ['value', 'latitude', 'longitude']: + for col in ["value", "latitude", "longitude"]: np.testing.assert_allclose(exp_pnt.gdf[col], exp_pnt_grid.gdf[col]) - x_grid = np.append(x_grid, x_grid+10) - y_grid = np.append(y_grid, y_grid+10) - #to_meters=False, DIV + x_grid = np.append(x_grid, x_grid + 10) + y_grid = np.append(y_grid, y_grid + 10) + # to_meters=False, DIV exp_pnt = u_lp.exp_geom_to_pnt( - exp_poly, res=0.1, to_meters=False, - disagg_met=u_lp.DisaggMethod.DIV, disagg_val=None - ) + exp_poly, + res=0.1, + to_meters=False, + disagg_met=u_lp.DisaggMethod.DIV, + disagg_val=None, + ) exp_pnt_grid = u_lp.exp_geom_to_grid( - exp_poly, (x_grid, y_grid), - disagg_met=u_lp.DisaggMethod.DIV, disagg_val=None - ) + exp_poly, + (x_grid, y_grid), + disagg_met=u_lp.DisaggMethod.DIV, + disagg_val=None, + ) self.check_unchanged_exp(exp_poly, exp_pnt_grid) - for col in ['value', 'latitude', 'longitude']: + for col in ["value", "latitude", "longitude"]: np.testing.assert_allclose(exp_pnt.gdf[col], exp_pnt_grid.gdf[col]) - def test_point_exposure_from_lines(self): """Test disaggregation of lines to points""" - #to_meters=False, FIX + # to_meters=False, FIX exp_pnt = u_lp.exp_geom_to_pnt( - EXP_LINE, res=1, to_meters=False, - disagg_met=u_lp.DisaggMethod.FIX, disagg_val=None - ) - np.testing.assert_array_equal(exp_pnt.gdf['value'][:,0], EXP_LINE.gdf['value']) + EXP_LINE, + res=1, + to_meters=False, + disagg_met=u_lp.DisaggMethod.FIX, + disagg_val=None, + ) + np.testing.assert_array_equal(exp_pnt.gdf["value"][:, 0], EXP_LINE.gdf["value"]) self.check_unchanged_exp(EXP_LINE, exp_pnt) - #to_meters=False, DIV + # to_meters=False, DIV exp_pnt = u_lp.exp_geom_to_pnt( - EXP_LINE, res=1, to_meters=False, - disagg_met=u_lp.DisaggMethod.DIV, disagg_val=None - ) - np.testing.assert_array_equal(exp_pnt.gdf['value'][:,0], EXP_LINE.gdf['value']) + EXP_LINE, + res=1, + to_meters=False, + disagg_met=u_lp.DisaggMethod.DIV, + disagg_val=None, + ) + np.testing.assert_array_equal(exp_pnt.gdf["value"][:, 0], EXP_LINE.gdf["value"]) self.check_unchanged_exp(EXP_LINE, exp_pnt) - #to_meters=TRUE, FIX, dissag_val + # to_meters=TRUE, FIX, dissag_val res = 20000 exp_pnt = u_lp.exp_geom_to_pnt( - EXP_LINE, res=res, to_meters=True, - disagg_met=u_lp.DisaggMethod.FIX, disagg_val=res**2 - ) + EXP_LINE, + res=res, + to_meters=True, + disagg_met=u_lp.DisaggMethod.FIX, + disagg_val=res**2, + ) self.check_unchanged_exp(EXP_LINE, exp_pnt) val = res**2 - self.assertEqual(np.unique(exp_pnt.gdf['value'])[0], val) - lat = np.array([ - 50.83944191, 50.94706532, 51.85008694, 51.7524172 , 52.07732906, - 50.889641 , 51.90287148, 51.53858598, 52.30223675, 53.15931081, - 51.61111058, 52.05191342, 52.3893 , 52.14520761, 52.47715845, - 52.68641293, 52.11355 , 51.90503849, 52.49610201, 51.8418 , - 51.93188219, 51.10694216, 52.48596301, 50.87543042, 51.0801347 , - 50.82145186, 50.81341953, 51.07235498, 50.9105503 - ]) - np.testing.assert_allclose(exp_pnt.gdf['latitude'], lat) + self.assertEqual(np.unique(exp_pnt.gdf["value"])[0], val) + lat = np.array( + [ + 50.83944191, + 50.94706532, + 51.85008694, + 51.7524172, + 52.07732906, + 50.889641, + 51.90287148, + 51.53858598, + 52.30223675, + 53.15931081, + 51.61111058, + 52.05191342, + 52.3893, + 52.14520761, + 52.47715845, + 52.68641293, + 52.11355, + 51.90503849, + 52.49610201, + 51.8418, + 51.93188219, + 51.10694216, + 52.48596301, + 50.87543042, + 51.0801347, + 50.82145186, + 50.81341953, + 51.07235498, + 50.9105503, + ] + ) + np.testing.assert_allclose(exp_pnt.gdf["latitude"], lat) + class TestGeomImpactCalcs(unittest.TestCase): """Test main functions on impact calculation and impact aggregation""" def test_calc_geom_impact_lines(self): - """ test calc_geom_impact() with lines""" + """test calc_geom_impact() with lines""" # line exposures only - exp_line_novals = Exposures(GDF_LINE.drop(columns='value')) + exp_line_novals = Exposures(GDF_LINE.drop(columns="value")) imp1 = u_lp.calc_geom_impact( - EXP_LINE, IMPF_SET, HAZ, - res=0.05, to_meters=False, disagg_met=u_lp.DisaggMethod.DIV, - disagg_val=None, agg_met=u_lp.AggMethod.SUM - ) + EXP_LINE, + IMPF_SET, + HAZ, + res=0.05, + to_meters=False, + disagg_met=u_lp.DisaggMethod.DIV, + disagg_val=None, + agg_met=u_lp.AggMethod.SUM, + ) aai_agg1 = 2.114365936716238 - eai_exp1 = np.array([ - 8.58546479e-02, 4.57753040e-02, 1.07081794e-01, 1.27160538e-02, - 8.60984331e-02, 1.57751547e-01, 2.32808488e-02, 2.95520878e-02, - 4.06902083e-03, 2.27553509e-01, 5.29133033e-03, 2.72705887e-03, - 8.48207692e-03, 2.95633263e-02, 4.88225543e-01, 1.33011693e-03, - 1.03018186e-01, 7.72573773e-02, 5.48322256e-03, 1.61239410e-02, - 1.13181160e-01, 8.32840521e-02, 2.99243546e-01, 4.88901364e-02, - 1.71930351e-02, 2.49435540e-02, 2.96121155e-05, 1.03654148e-02 - ]) + eai_exp1 = np.array( + [ + 8.58546479e-02, + 4.57753040e-02, + 1.07081794e-01, + 1.27160538e-02, + 8.60984331e-02, + 1.57751547e-01, + 2.32808488e-02, + 2.95520878e-02, + 4.06902083e-03, + 2.27553509e-01, + 5.29133033e-03, + 2.72705887e-03, + 8.48207692e-03, + 2.95633263e-02, + 4.88225543e-01, + 1.33011693e-03, + 1.03018186e-01, + 7.72573773e-02, + 5.48322256e-03, + 1.61239410e-02, + 1.13181160e-01, + 8.32840521e-02, + 2.99243546e-01, + 4.88901364e-02, + 1.71930351e-02, + 2.49435540e-02, + 2.96121155e-05, + 1.03654148e-02, + ] + ) check_impact(self, imp1, HAZ, EXP_LINE, aai_agg1, eai_exp1) - imp2 = u_lp.calc_geom_impact( - EXP_LINE, IMPF_SET, HAZ, - res=300, to_meters=True, disagg_met=u_lp.DisaggMethod.DIV, - disagg_val=None, agg_met=u_lp.AggMethod.SUM - ) + EXP_LINE, + IMPF_SET, + HAZ, + res=300, + to_meters=True, + disagg_met=u_lp.DisaggMethod.DIV, + disagg_val=None, + agg_met=u_lp.AggMethod.SUM, + ) np.testing.assert_allclose(imp2.eai_exp, imp1.eai_exp, rtol=0.2) imp3 = u_lp.calc_geom_impact( - exp_line_novals, IMPF_SET, HAZ, - res=300, to_meters=True, disagg_met=u_lp.DisaggMethod.FIX, - disagg_val=5000, agg_met=u_lp.AggMethod.SUM - ) + exp_line_novals, + IMPF_SET, + HAZ, + res=300, + to_meters=True, + disagg_met=u_lp.DisaggMethod.FIX, + disagg_val=5000, + agg_met=u_lp.AggMethod.SUM, + ) aai_agg3 = 2.626753478142696 - eai_exp3 = np.array([ - 0.10307851, 0.05544964, 0.12810739, 0.01736701, 0.1092617 , - 0.19785227, 0.02959709, 0.03617366, 0.00464554, 0.27378204, - 0.00670862, 0.00329956, 0.01030654, 0.03324303, 0.61571791, - 0.00215879, 0.12245651, 0.10379203, 0.00536503, 0.01881487, - 0.14592603, 0.12312706, 0.35965216, 0.05581585, 0.01968975, - 0.02843223, 0.00241899, 0.01451368 - ]) + eai_exp3 = np.array( + [ + 0.10307851, + 0.05544964, + 0.12810739, + 0.01736701, + 0.1092617, + 0.19785227, + 0.02959709, + 0.03617366, + 0.00464554, + 0.27378204, + 0.00670862, + 0.00329956, + 0.01030654, + 0.03324303, + 0.61571791, + 0.00215879, + 0.12245651, + 0.10379203, + 0.00536503, + 0.01881487, + 0.14592603, + 0.12312706, + 0.35965216, + 0.05581585, + 0.01968975, + 0.02843223, + 0.00241899, + 0.01451368, + ] + ) check_impact(self, imp3, HAZ, exp_line_novals, aai_agg3, eai_exp3) imp4 = u_lp.calc_geom_impact( - EXP_LINE, IMPF_SET, HAZ, - res=300, to_meters=True, disagg_met=u_lp.DisaggMethod.FIX, - disagg_val=5000, agg_met=u_lp.AggMethod.SUM - ) + EXP_LINE, + IMPF_SET, + HAZ, + res=300, + to_meters=True, + disagg_met=u_lp.DisaggMethod.FIX, + disagg_val=5000, + agg_met=u_lp.AggMethod.SUM, + ) np.testing.assert_array_equal(imp3.eai_exp, imp4.eai_exp) - def test_calc_geom_impact_points(self): - """ test calc_geom_impact() with points""" + """test calc_geom_impact() with points""" imp1 = u_lp.calc_geom_impact( - EXP_POINT, IMPF_SET, HAZ, - res=0.05, to_meters=False, disagg_met=u_lp.DisaggMethod.DIV, - disagg_val=None, agg_met=u_lp.AggMethod.SUM - ) - aai_agg1 = 0.0470814 + EXP_POINT, + IMPF_SET, + HAZ, + res=0.05, + to_meters=False, + disagg_met=u_lp.DisaggMethod.DIV, + disagg_val=None, + agg_met=u_lp.AggMethod.SUM, + ) + aai_agg1 = 0.0470814 exp = EXP_POINT.copy() exp.set_lat_lon() @@ -354,66 +573,163 @@ def test_calc_geom_impact_points(self): check_impact(self, imp1, HAZ, EXP_POINT, aai_agg1, imp11.eai_exp) imp2 = u_lp.calc_geom_impact( - EXP_POINT, IMPF_SET, HAZ, - res=500, to_meters=True, disagg_met=u_lp.DisaggMethod.FIX, - disagg_val=1.0, agg_met=u_lp.AggMethod.SUM - ) + EXP_POINT, + IMPF_SET, + HAZ, + res=500, + to_meters=True, + disagg_met=u_lp.DisaggMethod.FIX, + disagg_val=1.0, + agg_met=u_lp.AggMethod.SUM, + ) - exp.gdf['value'] = 1.0 + exp.gdf["value"] = 1.0 imp22 = ImpactCalc(exp, IMPF_SET, HAZ).impact() aai_agg2 = 6.5454249333e-06 check_impact(self, imp2, HAZ, EXP_POINT, aai_agg2, imp22.eai_exp) def test_calc_geom_impact_mixed(self): - """ test calc_geom_impact() with a mixed exp (points, lines and polygons) """ + """test calc_geom_impact() with a mixed exp (points, lines and polygons)""" # mixed exposures gdf_mix = pd.concat([GDF_LINE, GDF_POLY, GDF_POINT]).reset_index(drop=True) exp_mix = Exposures(gdf_mix) imp1 = u_lp.calc_geom_impact( - exp_mix, IMPF_SET, HAZ, - res=0.05, to_meters=False, disagg_met=u_lp.DisaggMethod.DIV, - disagg_val=None, agg_met=u_lp.AggMethod.SUM - ) + exp_mix, + IMPF_SET, + HAZ, + res=0.05, + to_meters=False, + disagg_met=u_lp.DisaggMethod.DIV, + disagg_val=None, + agg_met=u_lp.AggMethod.SUM, + ) aai_agg1 = 2354303.3196003754 - eai_exp1 = np.array([ - 1.73069928e-04, 8.80741357e-04, 1.77657635e-01, 1.06413744e-02, - 1.15405492e-02, 3.40097761e-02, 8.91658032e-03, 4.19735141e-02, - 1.27160538e-02, 2.43849980e-01, 2.32808488e-02, 5.47043065e-03, - 5.44984095e-03, 5.80779958e-03, 1.06361040e-01, 4.67335812e-02, - 9.93703142e-02, 8.48207692e-03, 2.95633263e-02, 1.30223646e-01, - 3.84600393e-01, 2.05709279e-02, 1.39919480e-01, 1.61239410e-02, - 4.46991386e-02, 1.30045513e-02, 1.30045513e-02, 6.91177788e-04, - 1.61063727e+04, 1.07420484e+04, 1.44746070e+04, 7.18796281e+04, - 2.58806206e+04, 2.01316315e+05, 1.76071458e+05, 3.92482129e+05, - 2.90364327e+05, 9.05399356e+05, 1.94728210e+05, 5.11729689e+04, - 2.84224294e+02, 2.45938137e+02, 1.90644327e+02, 1.73925079e+02, - 1.76091839e+02, 4.43054173e+02, 4.41378151e+02, 4.74316805e+02, - 4.83873464e+02, 2.59001795e+02, 2.48200400e+02, 2.62995792e+02 - ]) + eai_exp1 = np.array( + [ + 1.73069928e-04, + 8.80741357e-04, + 1.77657635e-01, + 1.06413744e-02, + 1.15405492e-02, + 3.40097761e-02, + 8.91658032e-03, + 4.19735141e-02, + 1.27160538e-02, + 2.43849980e-01, + 2.32808488e-02, + 5.47043065e-03, + 5.44984095e-03, + 5.80779958e-03, + 1.06361040e-01, + 4.67335812e-02, + 9.93703142e-02, + 8.48207692e-03, + 2.95633263e-02, + 1.30223646e-01, + 3.84600393e-01, + 2.05709279e-02, + 1.39919480e-01, + 1.61239410e-02, + 4.46991386e-02, + 1.30045513e-02, + 1.30045513e-02, + 6.91177788e-04, + 1.61063727e04, + 1.07420484e04, + 1.44746070e04, + 7.18796281e04, + 2.58806206e04, + 2.01316315e05, + 1.76071458e05, + 3.92482129e05, + 2.90364327e05, + 9.05399356e05, + 1.94728210e05, + 5.11729689e04, + 2.84224294e02, + 2.45938137e02, + 1.90644327e02, + 1.73925079e02, + 1.76091839e02, + 4.43054173e02, + 4.41378151e02, + 4.74316805e02, + 4.83873464e02, + 2.59001795e02, + 2.48200400e02, + 2.62995792e02, + ] + ) check_impact(self, imp1, HAZ, exp_mix, aai_agg1, eai_exp1) imp2 = u_lp.calc_geom_impact( - exp_mix, IMPF_SET, HAZ, - res=5000, to_meters=True, disagg_met=u_lp.DisaggMethod.FIX, - disagg_val=None, agg_met=u_lp.AggMethod.SUM - ) + exp_mix, + IMPF_SET, + HAZ, + res=5000, + to_meters=True, + disagg_met=u_lp.DisaggMethod.FIX, + disagg_val=None, + agg_met=u_lp.AggMethod.SUM, + ) aai_agg2 = 321653479.4607434 - eai_exp2 = np.array([ - 1.73069928e-04, 8.80741357e-04, 2.17736979e-01, 6.48243461e-02, - 2.67262620e-02, 3.55078893e-01, 8.14081011e-02, 4.36578022e-01, - 1.02605091e-01, 3.45121722e-01, 1.62144669e-01, 1.45008544e-01, - 2.32808488e-02, 2.73521532e-02, 9.51399554e-02, 2.25921717e-01, - 6.90427531e-01, 5.29133033e-03, 2.72705887e-03, 8.48207692e-03, - 2.10403881e+00, 1.33011693e-03, 3.14644100e-01, 7.72573773e-02, - 5.48322256e-03, 1.61239410e-02, 2.68194832e-01, 7.80273077e-02, - 1.48411299e+06, 1.09137411e+06, 1.62477251e+06, 1.43455724e+07, - 2.94783633e+06, 1.06950486e+07, 3.17592949e+07, 4.58152749e+07, - 3.94173129e+07, 1.48016265e+08, 1.87811203e+07, 5.41509882e+06, - 1.24792652e+04, 1.20008305e+04, 1.43296472e+04, 3.15280802e+04, - 3.32644558e+04, 3.19325625e+04, 3.11256252e+04, 3.20372742e+04, - 1.67623417e+04, 1.64528393e+04, 1.47050883e+04, 1.37721978e+04 - ]) + eai_exp2 = np.array( + [ + 1.73069928e-04, + 8.80741357e-04, + 2.17736979e-01, + 6.48243461e-02, + 2.67262620e-02, + 3.55078893e-01, + 8.14081011e-02, + 4.36578022e-01, + 1.02605091e-01, + 3.45121722e-01, + 1.62144669e-01, + 1.45008544e-01, + 2.32808488e-02, + 2.73521532e-02, + 9.51399554e-02, + 2.25921717e-01, + 6.90427531e-01, + 5.29133033e-03, + 2.72705887e-03, + 8.48207692e-03, + 2.10403881e00, + 1.33011693e-03, + 3.14644100e-01, + 7.72573773e-02, + 5.48322256e-03, + 1.61239410e-02, + 2.68194832e-01, + 7.80273077e-02, + 1.48411299e06, + 1.09137411e06, + 1.62477251e06, + 1.43455724e07, + 2.94783633e06, + 1.06950486e07, + 3.17592949e07, + 4.58152749e07, + 3.94173129e07, + 1.48016265e08, + 1.87811203e07, + 5.41509882e06, + 1.24792652e04, + 1.20008305e04, + 1.43296472e04, + 3.15280802e04, + 3.32644558e04, + 3.19325625e04, + 3.11256252e04, + 3.20372742e04, + 1.67623417e04, + 1.64528393e04, + 1.47050883e04, + 1.37721978e04, + ] + ) check_impact(self, imp2, HAZ, exp_mix, aai_agg2, eai_exp2) # Check non-default impact function id @@ -421,26 +737,72 @@ def test_calc_geom_impact_mixed(self): impfdouble.mdd *= 2 impf_set = ImpactFuncSet([IMPF, impfdouble]) imp3 = u_lp.calc_geom_impact( - exp_mix, impf_set, HAZ, - res=0.05, to_meters=False, disagg_met=u_lp.DisaggMethod.DIV, - disagg_val=None, agg_met=u_lp.AggMethod.SUM - ) + exp_mix, + impf_set, + HAZ, + res=0.05, + to_meters=False, + disagg_met=u_lp.DisaggMethod.DIV, + disagg_val=None, + agg_met=u_lp.AggMethod.SUM, + ) aai_agg3 = 4708604.47775332 - eai_exp3 = np.array([ - 1.73069928e-04, 8.80741357e-04, 1.77657635e-01, 1.06413744e-02, - 1.15405492e-02, 3.40097761e-02, 8.91658032e-03, 4.19735141e-02, - 1.27160538e-02, 2.43849980e-01, 2.32808488e-02, 5.47043065e-03, - 5.44984095e-03, 5.80779958e-03, 1.06361040e-01, 4.67335812e-02, - 9.93703142e-02, 8.48207692e-03, 2.95633263e-02, 1.30223646e-01, - 3.84600393e-01, 2.05709279e-02, 1.39919480e-01, 1.61239410e-02, - 4.46991386e-02, 1.30045513e-02, 1.30045513e-02, 6.91177788e-04, - 3.22122197e+04, 2.14840968e+04, 2.89492139e+04, 1.43759256e+05, - 5.17612411e+04, 4.02632630e+05, 3.52142916e+05, 7.84964258e+05, - 5.80728653e+05, 1.81079871e+06, 3.89456421e+05, 1.02345938e+05, - 5.68448588e+02, 4.91876274e+02, 3.81288655e+02, 3.47850159e+02, - 3.52183678e+02, 8.86108346e+02, 8.82756302e+02, 9.48633609e+02, - 9.67746928e+02, 5.18003590e+02, 4.96400801e+02, 5.25991584e+02 - ]) + eai_exp3 = np.array( + [ + 1.73069928e-04, + 8.80741357e-04, + 1.77657635e-01, + 1.06413744e-02, + 1.15405492e-02, + 3.40097761e-02, + 8.91658032e-03, + 4.19735141e-02, + 1.27160538e-02, + 2.43849980e-01, + 2.32808488e-02, + 5.47043065e-03, + 5.44984095e-03, + 5.80779958e-03, + 1.06361040e-01, + 4.67335812e-02, + 9.93703142e-02, + 8.48207692e-03, + 2.95633263e-02, + 1.30223646e-01, + 3.84600393e-01, + 2.05709279e-02, + 1.39919480e-01, + 1.61239410e-02, + 4.46991386e-02, + 1.30045513e-02, + 1.30045513e-02, + 6.91177788e-04, + 3.22122197e04, + 2.14840968e04, + 2.89492139e04, + 1.43759256e05, + 5.17612411e04, + 4.02632630e05, + 3.52142916e05, + 7.84964258e05, + 5.80728653e05, + 1.81079871e06, + 3.89456421e05, + 1.02345938e05, + 5.68448588e02, + 4.91876274e02, + 3.81288655e02, + 3.47850159e02, + 3.52183678e02, + 8.86108346e02, + 8.82756302e02, + 9.48633609e02, + 9.67746928e02, + 5.18003590e02, + 4.96400801e02, + 5.25991584e02, + ] + ) check_impact(self, imp3, HAZ, exp_mix, aai_agg3, eai_exp3) def test_impact_pnt_agg(self): @@ -449,59 +811,117 @@ def test_impact_pnt_agg(self): exp_mix = Exposures(gdf_mix) exp_pnt = u_lp.exp_geom_to_pnt( - exp_mix, res=1, to_meters=False, disagg_met=u_lp.DisaggMethod.DIV, - disagg_val=None - ) + exp_mix, + res=1, + to_meters=False, + disagg_met=u_lp.DisaggMethod.DIV, + disagg_val=None, + ) imp_pnt = ImpactCalc(exp_pnt, IMPF_SET, HAZ).impact(save_mat=True) imp_agg = u_lp.impact_pnt_agg(imp_pnt, exp_pnt.gdf, u_lp.AggMethod.SUM) aai_agg = 1282901.0114188215 - eai_exp = np.array([ - 0.00000000e+00, 1.73069928e-04, 3.71172778e-04, 5.09568579e-04, - 8.43340681e-04, 3.47906751e-03, 3.00385618e-03, 5.62430455e-03, - 9.07998787e-03, 1.30641275e-02, 6.18365411e-03, 4.74934473e-03, - 8.34810476e-02, 5.07280880e-02, 1.02690634e-01, 1.27160538e-02, - 8.60984331e-02, 1.62144669e-01, 2.32808488e-02, 2.90389979e-02, - 4.06902083e-03, 2.33667906e-01, 5.29133033e-03, 2.72705887e-03, - 8.48207692e-03, 2.95633263e-02, 4.01271600e-01, 1.33011693e-03, - 9.94596852e-02, 7.72573773e-02, 5.48322256e-03, 1.61239410e-02, - 4.14706673e-03, 8.32840521e-02, 2.87509619e-01, 4.88901364e-02, - 1.71930351e-02, 2.49435540e-02, 2.96121155e-05, 1.03654148e-02, - 8.36178802e+03, 7.30704698e+03, 1.20628926e+04, 3.54061498e+04, - 1.23524320e+04, 7.78074661e+04, 1.28292995e+05, 2.31231953e+05, - 1.31911226e+05, 5.37897306e+05, 8.37016948e+04, 1.65661030e+04 - ]) + eai_exp = np.array( + [ + 0.00000000e00, + 1.73069928e-04, + 3.71172778e-04, + 5.09568579e-04, + 8.43340681e-04, + 3.47906751e-03, + 3.00385618e-03, + 5.62430455e-03, + 9.07998787e-03, + 1.30641275e-02, + 6.18365411e-03, + 4.74934473e-03, + 8.34810476e-02, + 5.07280880e-02, + 1.02690634e-01, + 1.27160538e-02, + 8.60984331e-02, + 1.62144669e-01, + 2.32808488e-02, + 2.90389979e-02, + 4.06902083e-03, + 2.33667906e-01, + 5.29133033e-03, + 2.72705887e-03, + 8.48207692e-03, + 2.95633263e-02, + 4.01271600e-01, + 1.33011693e-03, + 9.94596852e-02, + 7.72573773e-02, + 5.48322256e-03, + 1.61239410e-02, + 4.14706673e-03, + 8.32840521e-02, + 2.87509619e-01, + 4.88901364e-02, + 1.71930351e-02, + 2.49435540e-02, + 2.96121155e-05, + 1.03654148e-02, + 8.36178802e03, + 7.30704698e03, + 1.20628926e04, + 3.54061498e04, + 1.23524320e04, + 7.78074661e04, + 1.28292995e05, + 2.31231953e05, + 1.31911226e05, + 5.37897306e05, + 8.37016948e04, + 1.65661030e04, + ] + ) check_impact(self, imp_agg, HAZ, exp_mix, aai_agg, eai_exp) def test_calc_grid_impact_polys(self): """Test impact on grid for polygons""" import climada.util.coordinates as u_coord + res = 0.1 (_, _, xmax, ymax) = EXP_POLY.gdf.geometry.bounds.max() (xmin, ymin, _, _) = EXP_POLY.gdf.geometry.bounds.min() bounds = (xmin, ymin, xmax, ymax) - height, width, trafo = u_coord.pts_to_raster_meta( - bounds, (res, res) - ) + height, width, trafo = u_coord.pts_to_raster_meta(bounds, (res, res)) x_grid, y_grid = u_coord.raster_to_meshgrid(trafo, width, height) imp_g = u_lp.calc_grid_impact( - exp=EXP_POLY, impf_set=IMPF_SET, haz=HAZ, - grid=(x_grid, y_grid), disagg_met=u_lp.DisaggMethod.DIV, - disagg_val=None, agg_met=u_lp.AggMethod.SUM - ) + exp=EXP_POLY, + impf_set=IMPF_SET, + haz=HAZ, + grid=(x_grid, y_grid), + disagg_met=u_lp.DisaggMethod.DIV, + disagg_val=None, + agg_met=u_lp.AggMethod.SUM, + ) aai_agg = 2319608.54202 - eai_exp = np.array([ - 17230.22051525, 10974.85453081, 14423.77523209, 77906.29609785, - 22490.08925927, 147937.83580832, 132329.78961234, 375082.82348148, - 514527.07490518, 460185.19291995, 265875.77587879, 280644.81378238 - ]) + eai_exp = np.array( + [ + 17230.22051525, + 10974.85453081, + 14423.77523209, + 77906.29609785, + 22490.08925927, + 147937.83580832, + 132329.78961234, + 375082.82348148, + 514527.07490518, + 460185.19291995, + 265875.77587879, + 280644.81378238, + ] + ) check_impact(self, imp_g, HAZ, EXP_POLY, aai_agg, eai_exp) - def test_aggregate_impact_mat(self): """Private method""" pass + class TestGdfGeomToPnt(unittest.TestCase): """Test Geodataframes to points and vice-versa functions""" @@ -510,84 +930,138 @@ def test_gdf_line_to_pnt(self): gdf_pnt = u_lp._line_to_pnts(GDF_LINE, 1, False) check_unchanged_geom_gdf(self, GDF_LINE, gdf_pnt) np.testing.assert_array_equal( - np.unique(GDF_LINE['value']), np.unique(gdf_pnt['value']) - ) + np.unique(GDF_LINE["value"]), np.unique(gdf_pnt["value"]) + ) gdf_pnt = u_lp._line_to_pnts(GDF_LINE, 1000, True) check_unchanged_geom_gdf(self, GDF_LINE, gdf_pnt) np.testing.assert_array_equal( - np.unique(GDF_LINE['value']), np.unique(gdf_pnt['value']) - ) + np.unique(GDF_LINE["value"]), np.unique(gdf_pnt["value"]) + ) gdf_pnt_d = u_lp._line_to_pnts(GDF_LINE.iloc[0:1], 0.01, False) np.testing.assert_allclose( gdf_pnt_d.geometry.x.values, - np.array([ - 6.092507, 6.092895, 6.088363, 6.083726, 6.079199, 6.074582, - 6.068896, 6.061939, 6.061839 - ]) - ) + np.array( + [ + 6.092507, + 6.092895, + 6.088363, + 6.083726, + 6.079199, + 6.074582, + 6.068896, + 6.061939, + 6.061839, + ] + ), + ) np.testing.assert_allclose( gdf_pnt_d.geometry.y.values, - np.array([ - 50.876242, 50.866888, 50.857725, 50.84861 , 50.839442, 50.830321, - 50.82186 , 50.814366, 50.80475 - ]) - ) + np.array( + [ + 50.876242, + 50.866888, + 50.857725, + 50.84861, + 50.839442, + 50.830321, + 50.82186, + 50.814366, + 50.80475, + ] + ), + ) - #disaggregation in degrees and approximately same value in meters + # disaggregation in degrees and approximately same value in meters gdf_pnt_m = u_lp._line_to_pnts(GDF_LINE.iloc[0:1], 1000, True) np.testing.assert_allclose( - gdf_pnt_m.geometry.x, - gdf_pnt_d.geometry.x, rtol=1e-2) + gdf_pnt_m.geometry.x, gdf_pnt_d.geometry.x, rtol=1e-2 + ) np.testing.assert_allclose( - gdf_pnt_m.geometry.y, - gdf_pnt_d.geometry.y,rtol=1e-2) + gdf_pnt_m.geometry.y, gdf_pnt_d.geometry.y, rtol=1e-2 + ) def test_gdf_poly_to_pnts(self): """Test polygon to points disaggregation""" gdf_pnt = u_lp._poly_to_pnts(GDF_POLY, 1, False) check_unchanged_geom_gdf(self, GDF_POLY, gdf_pnt) np.testing.assert_array_equal( - np.unique(GDF_POLY['value']), np.unique(gdf_pnt['value']) - ) + np.unique(GDF_POLY["value"]), np.unique(gdf_pnt["value"]) + ) gdf_pnt = u_lp._poly_to_pnts(GDF_POLY, 5000, True) check_unchanged_geom_gdf(self, GDF_POLY, gdf_pnt) np.testing.assert_array_equal( - np.unique(GDF_POLY['value']), np.unique(gdf_pnt['value']) - ) + np.unique(GDF_POLY["value"]), np.unique(gdf_pnt["value"]) + ) gdf_pnt_d = u_lp._poly_to_pnts(GDF_POLY.iloc[0:1], 0.2, False) np.testing.assert_allclose( gdf_pnt_d.geometry.x.values, - np.array([ - 6.9690605, 7.1690605, 6.3690605, 6.5690605, 6.7690605, 6.9690605, - 7.1690605, 6.5690605, 6.7690605 - ]) - ) + np.array( + [ + 6.9690605, + 7.1690605, + 6.3690605, + 6.5690605, + 6.7690605, + 6.9690605, + 7.1690605, + 6.5690605, + 6.7690605, + ] + ), + ) np.testing.assert_allclose( gdf_pnt_d.geometry.y.values, - np.array([ - 53.04131655, 53.04131655, 53.24131655, 53.24131655, 53.24131655, - 53.24131655, 53.24131655, 53.44131655, 53.44131655 - ]) - ) + np.array( + [ + 53.04131655, + 53.04131655, + 53.24131655, + 53.24131655, + 53.24131655, + 53.24131655, + 53.24131655, + 53.44131655, + 53.44131655, + ] + ), + ) gdf_pnt_m = u_lp._poly_to_pnts(GDF_POLY.iloc[0:1], 15000, True) np.testing.assert_allclose( gdf_pnt_m.geometry.x.values, - np.array([ - 6.84279696, 6.97754426, 7.11229155, 6.30380779, 6.43855509, - 6.57330238, 6.70804967, 6.84279696, 6.97754426 - ]) - ) + np.array( + [ + 6.84279696, + 6.97754426, + 7.11229155, + 6.30380779, + 6.43855509, + 6.57330238, + 6.70804967, + 6.84279696, + 6.97754426, + ] + ), + ) np.testing.assert_allclose( gdf_pnt_m.geometry.y.values, - np.array([ - 53.0645655 , 53.0645655 , 53.0645655 , 53.28896623, 53.28896623, - 53.28896623, 53.28896623, 53.28896623, 53.28896623 - ]) - ) + np.array( + [ + 53.0645655, + 53.0645655, + 53.0645655, + 53.28896623, + 53.28896623, + 53.28896623, + 53.28896623, + 53.28896623, + 53.28896623, + ] + ), + ) def test_pnts_per_line(self): """Test number of points per line for give resolution""" @@ -603,8 +1077,8 @@ def test_line_fractions(self): 2: np.array([0.5]), 0.8: np.array([0.5]), 0.6: np.array([0.25, 0.75]), - 0.4: np.array([0.25, 0.75]) - } + 0.4: np.array([0.25, 0.75]), + } for res, fraction in res_fractions.items(): np.testing.assert_allclose(u_lp._line_fraction(length, res), fraction) @@ -613,26 +1087,31 @@ def test_line_fractions(self): 2: np.array([0.5]), 0.8: np.array([0.25, 0.75]), 0.6: np.array([0.166667, 0.5, 0.833333]), - 0.4: np.array([0.1, 0.3, 0.5, 0.7, 0.9]) - } + 0.4: np.array([0.1, 0.3, 0.5, 0.7, 0.9]), + } for res, fraction in res_fractions.items(): - np.testing.assert_allclose(u_lp._line_fraction(length, res), fraction, rtol=1e-04 ) + np.testing.assert_allclose( + u_lp._line_fraction(length, res), fraction, rtol=1e-04 + ) def test_resolution_warning(self): lines = [ LineString([[0, 0], [0, 2]]), LineString([[0, 0], [0, 12]]), - LineString([[0, 0], [0, 20]]) - ] + LineString([[0, 0], [0, 20]]), + ] gdf_lines = gpd.GeoDataFrame(geometry=lines) - with self.assertLogs('climada.util.lines_polys_handler', level='WARNING') as ctx: + with self.assertLogs( + "climada.util.lines_polys_handler", level="WARNING" + ) as ctx: u_lp._line_to_pnts(gdf_lines, 1, False) - self.assertEqual(ctx.records[0].message, + self.assertEqual( + ctx.records[0].message, f"{2} lines with a length < 10*resolution were found. " "Each of these lines is disaggregate to one point. " "Reaggregatint values will thus likely lead to overestimattion. " - "Consider chosing a smaller resolution or filter out the short lines. ") - + "Consider chosing a smaller resolution or filter out the short lines. ", + ) def test_gdf_to_grid(self): """""" @@ -658,36 +1137,35 @@ def test_pnt_line_poly_mask(self): """""" pnt, lines, poly = u_lp._pnt_line_poly_mask(GDF_POLY) self.assertTrue(np.all(poly)) - self.assertTrue(np.all(lines==False)) - self.assertTrue(np.all(pnt==False)) + self.assertTrue(np.all(lines == False)) + self.assertTrue(np.all(pnt == False)) pnt, lines, poly = u_lp._pnt_line_poly_mask(GDF_LINE) - self.assertTrue(np.all(poly==False)) + self.assertTrue(np.all(poly == False)) self.assertTrue(np.all(lines)) - self.assertTrue(np.all(pnt==False)) + self.assertTrue(np.all(pnt == False)) pnt, lines, poly = u_lp._pnt_line_poly_mask(GDF_POINT) - self.assertTrue(np.all(poly==False)) - self.assertTrue(np.all(lines==False)) + self.assertTrue(np.all(poly == False)) + self.assertTrue(np.all(lines == False)) self.assertTrue(np.all(pnt)) - def test_get_equalarea_proj(self): """Test pass get locally cylindrical equalarea projection""" poly = EXP_POLY.gdf.geometry[0] proj = u_lp._get_equalarea_proj(poly) - self.assertEqual(proj, '+proj=cea +lat_0=53.150193 +lon_0=6.881223 +units=m') + self.assertEqual(proj, "+proj=cea +lat_0=53.150193 +lon_0=6.881223 +units=m") def test_get_pyproj_trafo(self): """""" - dest_crs = '+proj=cea +lat_0=52.112866 +lon_0=5.150162 +units=m' + dest_crs = "+proj=cea +lat_0=52.112866 +lon_0=5.150162 +units=m" orig_crs = EXP_POLY.gdf.crs trafo = u_lp._get_pyproj_trafo(orig_crs, dest_crs) self.assertEqual( trafo.definition, - 'proj=pipeline step proj=unitconvert xy_in=deg' + - ' xy_out=rad step proj=cea lat_0=52.112866 lon_0=5.150162 units=m' - ) + "proj=pipeline step proj=unitconvert xy_in=deg" + + " xy_out=rad step proj=cea lat_0=52.112866 lon_0=5.150162 units=m", + ) def test_reproject_grid(self): """""" @@ -698,10 +1176,10 @@ def test_reproject_poly(self): pass def test_swap_geom_cols(self): - """Test swap of geometry columns """ + """Test swap of geometry columns""" gdf_orig = GDF_POLY.copy() - gdf_orig['new_geom'] = gdf_orig.geometry - swap_gdf = u_lp._swap_geom_cols(gdf_orig, 'old_geom', 'new_geom') + gdf_orig["new_geom"] = gdf_orig.geometry + swap_gdf = u_lp._swap_geom_cols(gdf_orig, "old_geom", "new_geom") self.assertTrue(np.alltrue(swap_gdf.geometry.geom_equals(gdf_orig.new_geom))) diff --git a/climada/util/test/test_plot.py b/climada/util/test/test_plot.py index 9588e3821..351010afb 100644 --- a/climada/util/test/test_plot.py +++ b/climada/util/test/test_plot.py @@ -20,87 +20,107 @@ """ import unittest + import cartopy -import numpy as np -import matplotlib.pyplot as plt -from matplotlib import colormaps as cm import cartopy.crs as ccrs import geopandas as gpd +import matplotlib.pyplot as plt +import numpy as np +from matplotlib import colormaps as cm from shapely import Point import climada.util.plot as u_plot + class TestFuncs(unittest.TestCase): def test_get_transform_4326_pass(self): """Check _get_transformation for 4326 epsg.""" - res, unit = u_plot.get_transformation('epsg:4326') + res, unit = u_plot.get_transformation("epsg:4326") self.assertIsInstance(res, cartopy.crs.PlateCarree) - self.assertEqual(unit, '°') + self.assertEqual(unit, "°") def test_get_transform_3395_pass(self): """Check that assigned attribute is correctly set.""" - res, unit = u_plot.get_transformation('epsg:3395') + res, unit = u_plot.get_transformation("epsg:3395") self.assertIsInstance(res, cartopy.crs.Mercator) - self.assertEqual(unit, 'm') + self.assertEqual(unit, "m") def test_get_transform_3035_pass(self): """Check that assigned attribute is correctly set.""" - res, unit = u_plot.get_transformation('epsg:3035') + res, unit = u_plot.get_transformation("epsg:3035") self.assertIsInstance(res, cartopy.crs.Projection) self.assertEqual(res.epsg_code, 3035) - self.assertEqual(unit, 'm') + self.assertEqual(unit, "m") + class TestPlots(unittest.TestCase): def test_geo_scatter_categorical(self): """Plots ones with geo_scatteR_categorical""" # test default with one plot - values = np.array([1, 2.0, 1, 'a']) + values = np.array([1, 2.0, 1, "a"]) coord = np.array([[26, 0], [26, 1], [28, 0], [29, 1]]) - u_plot.geo_scatter_categorical(values, coord, 'value', 'test plot', - pop_name=True) + u_plot.geo_scatter_categorical( + values, coord, "value", "test plot", pop_name=True + ) plt.close() - #test multiple plots with non default kwargs - values = np.array([[1, 2.0, 1, 'a'], [0, 0, 0, 0]]) + # test multiple plots with non default kwargs + values = np.array([[1, 2.0, 1, "a"], [0, 0, 0, 0]]) coord = np.array([[26, 0], [26, 1], [28, 0], [29, 1]]) - u_plot.geo_scatter_categorical(values, coord, 'value', 'test plot', - cat_name={0: 'zero', - 1: 'int', - 2.0: 'float', - 'a': 'string'}, - pop_name=False, cmap=cm.get_cmap('Set1')) + u_plot.geo_scatter_categorical( + values, + coord, + "value", + "test plot", + cat_name={0: "zero", 1: "int", 2.0: "float", "a": "string"}, + pop_name=False, + cmap=cm.get_cmap("Set1"), + ) plt.close() - #test colormap warning - values = np.array([[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], - [12, 13, 14, 15]]) + # test colormap warning + values = np.array( + [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]] + ) coord = np.array([[26, 0], [26, 4], [28, 0], [29, 1]]) - u_plot.geo_scatter_categorical(values, coord, 'value', 'test plot', - pop_name=False, cmap='viridis') + u_plot.geo_scatter_categorical( + values, coord, "value", "test plot", pop_name=False, cmap="viridis" + ) plt.close() - #test colormap warning with 256 colors - values = np.array([[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], - [12, 13, 14, 15]]) + # test colormap warning with 256 colors + values = np.array( + [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]] + ) coord = np.array([[26, 0], [26, 1], [28, 0], [29, 1]]) - u_plot.geo_scatter_categorical(values, coord, 'value', 'test plot', - pop_name=False, cmap='tab20c') + u_plot.geo_scatter_categorical( + values, coord, "value", "test plot", pop_name=False, cmap="tab20c" + ) plt.close() def test_geo_scatter_from_array(self): values = np.array([1, 2.0, 1, 1]) coord = np.array([[-17, 178], [-10, 180], [-27, 175], [-16, 186]]) - var_name = 'test' - title = 'test' + var_name = "test" + title = "test" projection = ccrs.PlateCarree() - cmap = 'viridis' - ax = u_plot.geo_scatter_from_array(values, coord, var_name, title, - pop_name=True, extend='neither', - shapes=True, axes=None, proj=projection, - figsize=(9, 13), cmap=cmap) + cmap = "viridis" + ax = u_plot.geo_scatter_from_array( + values, + coord, + var_name, + title, + pop_name=True, + extend="neither", + shapes=True, + axes=None, + proj=projection, + figsize=(9, 13), + cmap=cmap, + ) self.assertEqual(var_name, ax.get_title()) colorbar = next(x.colorbar for x in ax.collections if x.colorbar) self.assertAlmostEqual(np.max(values), colorbar.vmax) @@ -111,14 +131,23 @@ def test_geo_scatter_from_array(self): def test_geo_bin_from_array(self): values = np.array([1, 2.0, 5, 1]) coord = np.array([[-10, 17], [-30, 20], [5, 75], [-16, 20]]) - var_name = 'test' - title = 'test' + var_name = "test" + title = "test" projection = ccrs.PlateCarree() - cmap = 'viridis' - ax = u_plot.geo_bin_from_array(values, coord, var_name, title, - pop_name=True, extend='neither', - shapes=True, axes=None, proj=projection, - figsize=(9, 13), cmap=cmap) + cmap = "viridis" + ax = u_plot.geo_bin_from_array( + values, + coord, + var_name, + title, + pop_name=True, + extend="neither", + shapes=True, + axes=None, + proj=projection, + figsize=(9, 13), + cmap=cmap, + ) self.assertEqual(var_name, ax.get_title()) colorbar = next(x.colorbar for x in ax.collections if x.colorbar) self.assertAlmostEqual(np.max(values), colorbar.vmax) @@ -129,12 +158,21 @@ def test_geo_bin_from_array(self): def test_geo_im_from_array(self): values = np.array([1, 2.0, 5, np.nan]) coord = np.array([[-17, 178], [-10, 180], [-27, 175], [-16, 186]]) - var_name = 'test' - title = 'test' + var_name = "test" + title = "test" projection = ccrs.PlateCarree() - cmap = 'viridis' - ax = u_plot.geo_im_from_array(values, coord, var_name, title, - proj=projection, smooth=True, axes=None, figsize=(9, 13), cmap=cmap) + cmap = "viridis" + ax = u_plot.geo_im_from_array( + values, + coord, + var_name, + title, + proj=projection, + smooth=True, + axes=None, + figsize=(9, 13), + cmap=cmap, + ) self.assertEqual(var_name, ax.get_title()) colorbar = next(x.colorbar for x in ax.collections if x.colorbar) self.assertAlmostEqual(np.nanmax(values), colorbar.vmax) @@ -143,8 +181,17 @@ def test_geo_im_from_array(self): plt.close() projection = ccrs.AzimuthalEquidistant() - ax = u_plot.geo_im_from_array(values, coord, var_name, title, - proj=projection, smooth=True, axes=None, figsize=(9, 13), cmap=cmap) + ax = u_plot.geo_im_from_array( + values, + coord, + var_name, + title, + proj=projection, + smooth=True, + axes=None, + figsize=(9, 13), + cmap=cmap, + ) self.assertEqual(var_name, ax.get_title()) colorbar = next(x.colorbar for x in ax.collections if x.colorbar) self.assertAlmostEqual(np.nanmax(values), colorbar.vmax) @@ -155,37 +202,50 @@ def test_geo_im_from_array(self): def test_plot_from_gdf_no_log(self): """test plot_from_gdf() with linear color bar (because there is a 0 in data)""" return_periods = gpd.GeoDataFrame( - data = ((2., 5.), (0., 6.), (None, 2.), (1., 1000.)), - columns = ('10.0', '20.0') + data=((2.0, 5.0), (0.0, 6.0), (None, 2.0), (1.0, 1000.0)), + columns=("10.0", "20.0"), + ) + return_periods["geometry"] = ( + Point(45.0, 26.0), + Point(46.0, 26.0), + Point(45.0, 27.0), + Point(46.0, 27.0), ) - return_periods['geometry'] = (Point(45., 26.), Point(46., 26.), Point(45., 27.), Point(46., 27.)) - colorbar_name = 'Return Periods (Years)' - title_subplots = lambda cols: [f'Threshold Intensity: {col} m/s' for col in cols] + colorbar_name = "Return Periods (Years)" + title_subplots = lambda cols: [ + f"Threshold Intensity: {col} m/s" for col in cols + ] (axis1, axis2) = u_plot.plot_from_gdf( - return_periods, - colorbar_name=colorbar_name, - title_subplots=title_subplots) - self.assertEqual('Threshold Intensity: 10.0 m/s', axis1.get_title()) - self.assertEqual('Threshold Intensity: 20.0 m/s', axis2.get_title()) + return_periods, colorbar_name=colorbar_name, title_subplots=title_subplots + ) + self.assertEqual("Threshold Intensity: 10.0 m/s", axis1.get_title()) + self.assertEqual("Threshold Intensity: 20.0 m/s", axis2.get_title()) plt.close() def test_plot_from_gdf_log(self): """test plot_from_gdf() with log color bar)""" return_periods = gpd.GeoDataFrame( - data = ((2., 5.), (3., 6.), (None, 2.), (1., 1000.)), - columns = ('10.0', '20.0') + data=((2.0, 5.0), (3.0, 6.0), (None, 2.0), (1.0, 1000.0)), + columns=("10.0", "20.0"), ) - return_periods['geometry'] = (Point(45., 26.), Point(46., 26.), Point(45., 27.), Point(46., 27.)) - colorbar_name = 'Return Periods (Years)' - title_subplots = lambda cols: [f'Threshold Intensity: {col} m/s' for col in cols] + return_periods["geometry"] = ( + Point(45.0, 26.0), + Point(46.0, 26.0), + Point(45.0, 27.0), + Point(46.0, 27.0), + ) + colorbar_name = "Return Periods (Years)" + title_subplots = lambda cols: [ + f"Threshold Intensity: {col} m/s" for col in cols + ] (axis1, axis2) = u_plot.plot_from_gdf( - return_periods, - colorbar_name=colorbar_name, - title_subplots=title_subplots) - self.assertEqual('Threshold Intensity: 10.0 m/s', axis1.get_title()) - self.assertEqual('Threshold Intensity: 20.0 m/s', axis2.get_title()) + return_periods, colorbar_name=colorbar_name, title_subplots=title_subplots + ) + self.assertEqual("Threshold Intensity: 10.0 m/s", axis1.get_title()) + self.assertEqual("Threshold Intensity: 20.0 m/s", axis2.get_title()) plt.close() + # Execute Tests if __name__ == "__main__": TESTS = unittest.TestLoader().loadTestsFromTestCase(TestFuncs) diff --git a/climada/util/test/test_save.py b/climada/util/test/test_save.py index 24ea298d1..6a43e58dd 100644 --- a/climada/util/test/test_save.py +++ b/climada/util/test/test_save.py @@ -18,15 +18,17 @@ Test save module. """ + import unittest from pathlib import Path from climada import CONFIG -from climada.util.save import save, load +from climada.util.save import load, save DATA_DIR = CONFIG.util.test_data.str() IN_CONFIG = CONFIG.local_data.save_dir.str() + class TestSave(unittest.TestCase): """Test save function""" @@ -38,22 +40,22 @@ def tearDown(self): def test_entity_in_save_dir(self): """Returns the same list if its length is correct.""" - file_name = 'save_test.pkl' - ent = {'value': [1, 2, 3]} - with self.assertLogs('climada.util.save', level='INFO') as cm: + file_name = "save_test.pkl" + ent = {"value": [1, 2, 3]} + with self.assertLogs("climada.util.save", level="INFO") as cm: save(file_name, ent) self.assertTrue(CONFIG.local_data.save_dir.dir().joinpath(file_name).is_file()) - self.assertTrue((file_name in cm.output[0]) or - (file_name in cm.output[1])) + self.assertTrue((file_name in cm.output[0]) or (file_name in cm.output[1])) def test_load_pass(self): """Load previously saved variable""" - file_name = 'save_test.pkl' - ent = {'value': [1, 2, 3]} + file_name = "save_test.pkl" + ent = {"value": [1, 2, 3]} save(file_name, ent) res = load(file_name) - self.assertTrue('value' in res) - self.assertTrue(res['value'] == ent['value']) + self.assertTrue("value" in res) + self.assertTrue(res["value"] == ent["value"]) + # Execute Tests if __name__ == "__main__": diff --git a/climada/util/test/test_select.py b/climada/util/test/test_select.py index 7a7097735..184a7b0c9 100755 --- a/climada/util/test/test_select.py +++ b/climada/util/test/test_select.py @@ -19,22 +19,24 @@ Test select module. """ - import unittest + import numpy as np from climada.util.select import get_attributes_with_matching_dimension -class Dummy(): + +class Dummy: def __init__(self): self.oneD3 = [1, 2, 3] self.oneD4 = [1, 2, 3, 4] self.twoD2 = [[1, 2, 3], [1, 2, 3, 4]] self.twoD3 = [[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]] - self.twoD4 = [[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3 ,4]] + self.twoD4 = [[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]] self.twonp = np.array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]]) + class TestGetAttributesDimension_pass(unittest.TestCase): """Test get_attributes_with_matching_dimension function""" @@ -43,18 +45,17 @@ def test_select_pass(self): dummy = Dummy() list_attrs = get_attributes_with_matching_dimension(dummy, [3]) - self.assertTrue(np.array_equal(list_attrs, ['oneD3', 'twoD3', 'twonp'])) + self.assertTrue(np.array_equal(list_attrs, ["oneD3", "twoD3", "twonp"])) list_attrs = get_attributes_with_matching_dimension(dummy, [4, 4]) - self.assertTrue(np.array_equal(list_attrs, ['twoD4'])) + self.assertTrue(np.array_equal(list_attrs, ["twoD4"])) list_attrs = get_attributes_with_matching_dimension(dummy, [3, 4]) - self.assertTrue(np.array_equal(list_attrs, ['twoD3', 'twonp'])) + self.assertTrue(np.array_equal(list_attrs, ["twoD3", "twonp"])) list_attrs = get_attributes_with_matching_dimension(dummy, [5]) self.assertTrue(np.array_equal(list_attrs, [])) - # Execute Tests if __name__ == "__main__": TESTS = unittest.TestLoader().loadTestsFromTestCase(TestGetAttributesDimension_pass) diff --git a/climada/util/test/test_value_representation.py b/climada/util/test/test_value_representation.py index 61cf6a932..dfd662e5b 100644 --- a/climada/util/test/test_value_representation.py +++ b/climada/util/test/test_value_representation.py @@ -19,12 +19,18 @@ Test of util.math module """ - -from climada.util.value_representation import sig_dig, sig_dig_list, ABBREV -from climada.util.value_representation import value_to_monetary_unit, safe_divide +import math import unittest + import numpy as np -import math + +from climada.util.value_representation import ( + ABBREV, + safe_divide, + sig_dig, + sig_dig_list, + value_to_monetary_unit, +) class TestDigits(unittest.TestCase): @@ -37,9 +43,7 @@ def test_sig_dig_pass(self): nbs_out = [1.23, 12300, -12300, -12.3] for nb_in, nb_out in zip(nbs_in, nbs_out): self.assertEqual(sig_dig(nb_in, n_sig_dig), nb_out) - self.assertTrue( - np.array_equal(sig_dig_list(nbs_in, n_sig_dig), nbs_out) - ) + self.assertTrue(np.array_equal(sig_dig_list(nbs_in, n_sig_dig), nbs_out)) def test_sig_dig_fail(self): """Test sig_dig function""" @@ -48,16 +52,21 @@ def test_sig_dig_fail(self): nbs_out = [1.23, 12300, -12300, -12.3] for nb_in, nb_out in zip(nbs_in, nbs_out): self.assertNotEqual(sig_dig(nb_in, n_sig_dig_wrong), nb_out) - self.assertFalse( - np.array_equal(sig_dig_list(nbs_in, n_sig_dig_wrong), nbs_out) - ) + self.assertFalse(np.array_equal(sig_dig_list(nbs_in, n_sig_dig_wrong), nbs_out)) def test_value_to_monetary_unit_pass(self): """Test money_unit function""" nbs_in = [-1e10, -1e6, -1e2, 0, 1e3, 1e7, 1e11] nbs_out = [-10, -1, -100, 0, 1, 10, 100] - names_out = [ABBREV[1e9], ABBREV[1e6], ABBREV[1], ABBREV[1], - ABBREV[1e3], ABBREV[1e6], ABBREV[1e9]] + names_out = [ + ABBREV[1e9], + ABBREV[1e6], + ABBREV[1], + ABBREV[1], + ABBREV[1e3], + ABBREV[1e6], + ABBREV[1e9], + ] for j, (nb_in, nb_out) in enumerate(zip(nbs_in, nbs_out)): money, names = value_to_monetary_unit(nb_in) self.assertEqual(money[0], nb_out) @@ -67,8 +76,15 @@ def test_value_to_monetary_unit_0inf_pass(self): """Test money_unit function""" nbs_in = [-math.inf, 0, 1e-10, 1e-5, math.inf] nbs_out = [-math.inf, 0, 1e-10, 1e-5, math.inf] - names_out = [ABBREV[1], ABBREV[1], ABBREV[1], ABBREV[1], - ABBREV[1], ABBREV[1], ABBREV[1]] + names_out = [ + ABBREV[1], + ABBREV[1], + ABBREV[1], + ABBREV[1], + ABBREV[1], + ABBREV[1], + ABBREV[1], + ] for j, (nb_in, nb_out) in enumerate(zip(nbs_in, nbs_out)): money, names = value_to_monetary_unit(nb_in) self.assertEqual(money[0], nb_out) @@ -79,16 +95,29 @@ def test_value_to_monetary_unit_nan_pass(self): nb_in = math.nan money, name = value_to_monetary_unit(nb_in) self.assertTrue(math.isnan(money[0])) - self.assertEqual(name, '') - + self.assertEqual(name, "") def test_value_to_monetary_unit_sigdig_pass(self): """Test money_unit function with significant digits""" - nbs_in = [-1e10*1.2345, -1e6*1.2345, -1e2*1.2345, 0, 1e3*1.2345, - 1e7*1.2345, 1e11*1.2345] + nbs_in = [ + -1e10 * 1.2345, + -1e6 * 1.2345, + -1e2 * 1.2345, + 0, + 1e3 * 1.2345, + 1e7 * 1.2345, + 1e11 * 1.2345, + ] nbs_out = [-12.3, -1.23, -123, 0, 1.23, 12.3, 123] - names_out = [ABBREV[1e9], ABBREV[1e6], ABBREV[1], ABBREV[1], - ABBREV[1e3], ABBREV[1e6], ABBREV[1e9]] + names_out = [ + ABBREV[1e9], + ABBREV[1e6], + ABBREV[1], + ABBREV[1], + ABBREV[1e3], + ABBREV[1e6], + ABBREV[1e9], + ] for j, (nb_in, nb_out) in enumerate(zip(nbs_in, nbs_out)): money, names = value_to_monetary_unit(nb_in, n_sig_dig=3) self.assertEqual(money[0], nb_out) @@ -96,13 +125,17 @@ def test_value_to_monetary_unit_sigdig_pass(self): def test_value_to_monetary_unit_list_pass(self): """Test money_unit function with list of numbers""" - nbs_in = [-1e10*1.2345, -1e9*1.2345] + nbs_in = [-1e10 * 1.2345, -1e9 * 1.2345] nbs_out = [-12.3, -1.23] name_out = ABBREV[1e9] money, name = value_to_monetary_unit(nbs_in, n_sig_dig=3) self.assertTrue(np.array_equal(money, nbs_out)) self.assertEqual(name, name_out) - nbs_in = [1e4*1.2345, 1e3*1.2345, 1e2*1.2345,] + nbs_in = [ + 1e4 * 1.2345, + 1e3 * 1.2345, + 1e2 * 1.2345, + ] nbs_out = [12.3, 1.23, 0.123] name_out = ABBREV[1e3] money, name = value_to_monetary_unit(nbs_in, n_sig_dig=3) @@ -111,13 +144,13 @@ def test_value_to_monetary_unit_list_pass(self): def test_value_to_monetary_unit_list_0inf_pass(self): """Test money_unit function with list of numbers""" - nbs_in = [-1e10*1.2345, -1e9*1.2345, 0, math.inf] + nbs_in = [-1e10 * 1.2345, -1e9 * 1.2345, 0, math.inf] nbs_out = [-12.3, -1.23, 0, math.inf] name_out = ABBREV[1e9] money, name = value_to_monetary_unit(nbs_in, n_sig_dig=3) self.assertTrue(np.array_equal(money, nbs_out)) self.assertEqual(name, name_out) - nbs_in = [1e4*1.2345, 1e3*1.2345, 1e2*1.2345, 0, math.inf] + nbs_in = [1e4 * 1.2345, 1e3 * 1.2345, 1e2 * 1.2345, 0, math.inf] nbs_out = [12.3, 1.23, 0.123, 0, math.inf] name_out = ABBREV[1e3] money, name = value_to_monetary_unit(nbs_in, n_sig_dig=3) @@ -126,7 +159,7 @@ def test_value_to_monetary_unit_list_0inf_pass(self): def test_value_to_monetary_unit_list_nan_pass(self): """Test money_unit function with list of numbers""" - nbs_in = [-1e10*1.2345, -1e9*1.2345, math.nan] + nbs_in = [-1e10 * 1.2345, -1e9 * 1.2345, math.nan] nbs_out = [-12.3, -1.23, math.nan] name_out = ABBREV[1e9] money, name = value_to_monetary_unit(nbs_in, n_sig_dig=3) @@ -134,8 +167,9 @@ def test_value_to_monetary_unit_list_nan_pass(self): self.assertTrue(np.array_equal(money[:-1], nbs_out[:-1])) self.assertEqual(name, name_out) + class TestSafeDivide(unittest.TestCase): - + def test_scalar_division(self): self.assertEqual(safe_divide(10, 2), 5) self.assertEqual(safe_divide(-10, 5), -2) @@ -145,34 +179,47 @@ def test_scalar_division_by_zero(self): self.assertEqual(safe_divide(1, 0, replace_with=0), 0) def test_array_division(self): - np.testing.assert_array_equal(safe_divide(np.array([10, 20, 30]), np.array([2, 5, 10])), np.array([5, 4, 3])) + np.testing.assert_array_equal( + safe_divide(np.array([10, 20, 30]), np.array([2, 5, 10])), + np.array([5, 4, 3]), + ) def test_array_division_by_zero(self): - np.testing.assert_array_equal(safe_divide(np.array([1, 0, 3]), np.array([0, 0, 1])), np.array([np.nan, np.nan, 3])) - np.testing.assert_array_equal(safe_divide(np.array([1, 0, 3]), np.array([0, 0, 1]), replace_with=0), np.array([0, 0, 3])) + np.testing.assert_array_equal( + safe_divide(np.array([1, 0, 3]), np.array([0, 0, 1])), + np.array([np.nan, np.nan, 3]), + ) + np.testing.assert_array_equal( + safe_divide(np.array([1, 0, 3]), np.array([0, 0, 1]), replace_with=0), + np.array([0, 0, 3]), + ) def test_list_division_by_zero(self): list_num = [10, 0, 30] list_denom = [2, 0, 10] - expected_result = [5.0, np.nan, 3.0] - np.testing.assert_array_almost_equal(safe_divide(list_num, list_denom), expected_result) + expected_result = [5.0, np.nan, 3.0] + np.testing.assert_array_almost_equal( + safe_divide(list_num, list_denom), expected_result + ) def test_list_division(self): list_num = [10, 20, 30] list_denom = [2, 5, 10] expected_result = [5.0, 4.0, 3.0] - np.testing.assert_array_almost_equal(safe_divide(list_num, list_denom), expected_result) + np.testing.assert_array_almost_equal( + safe_divide(list_num, list_denom), expected_result + ) def test_nan_handling(self): self.assertTrue(np.isnan(safe_divide(np.nan, 1))) self.assertTrue(np.isnan(safe_divide(1, np.nan))) self.assertEqual(safe_divide(np.nan, 1, replace_with=0), 0) self.assertEqual(safe_divide(1, np.nan, replace_with=0), 0) - + def test_nan_handling_in_arrays(self): np.testing.assert_array_equal( safe_divide(np.array([1, np.nan, 3]), np.array([3, 2, 0])), - np.array([1/3, np.nan, np.nan]) + np.array([1 / 3, np.nan, np.nan]), ) def test_nan_handling_in_scalars(self): @@ -181,6 +228,7 @@ def test_nan_handling_in_scalars(self): self.assertEqual(safe_divide(np.nan, 1, replace_with=0), 0) self.assertEqual(safe_divide(1, np.nan, replace_with=0), 0) + # Execute Tests if __name__ == "__main__": TESTS = unittest.TestLoader().loadTestsFromTestCase(TestDigits) diff --git a/climada/util/test/test_yearsets.py b/climada/util/test/test_yearsets.py index 2adb5e0e6..b5fae036d 100755 --- a/climada/util/test/test_yearsets.py +++ b/climada/util/test/test_yearsets.py @@ -19,42 +19,56 @@ Test of dates_times module """ +import collections import unittest + import numpy as np -import collections +import climada.util.dates_times as u_dt import climada.util.yearsets as yearsets from climada.engine import Impact -import climada.util.dates_times as u_dt - IMP = Impact() -IMP.at_event = np.arange(10,110,10) -IMP.frequency = np.array(np.ones(10)*0.2) - -SAMPLING_VECT = [np.array([0]), np.array([4]), np.array([1]), np.array([2, 5, 7, 9, 6]), - np.array([8]), np.array([3]), np.array([2, 6]), np.array([1]), - np.array([3,5]), np.array([])] +IMP.at_event = np.arange(10, 110, 10) +IMP.frequency = np.array(np.ones(10) * 0.2) + +SAMPLING_VECT = [ + np.array([0]), + np.array([4]), + np.array([1]), + np.array([2, 5, 7, 9, 6]), + np.array([8]), + np.array([3]), + np.array([2, 6]), + np.array([1]), + np.array([3, 5]), + np.array([]), +] YEAR_LIST = list(range(2000, 2010)) + class TestYearSets(unittest.TestCase): """Test yearset functions""" def test_impact_yearset(self): """Test computing a yearly impact (yimp) for a given list of years (YEAR_LIST) from an impact (IMP) and a sampling vector (SAMPLING_VECT)""" - yimp, sampling_vect = yearsets.impact_yearset(IMP, YEAR_LIST, correction_fac=False) + yimp, sampling_vect = yearsets.impact_yearset( + IMP, YEAR_LIST, correction_fac=False + ) self.assertAlmostEqual(len(sampling_vect), len(YEAR_LIST)) def test_impact_yearset_sampling_vect(self): """Test computing a yearly impact (yimp) for a given list of years (YEAR_LIST) from an impact (IMP) and a sampling vector (SAMPLING_VECT)""" - yimp = yearsets.impact_yearset_from_sampling_vect(IMP, YEAR_LIST, SAMPLING_VECT, False) + yimp = yearsets.impact_yearset_from_sampling_vect( + IMP, YEAR_LIST, SAMPLING_VECT, False + ) self.assertAlmostEqual(yimp.at_event[3], 340) - self.assertEqual(u_dt.date_to_str(yimp.date)[0], '2000-01-01') + self.assertEqual(u_dt.date_to_str(yimp.date)[0], "2000-01-01") self.assertAlmostEqual(np.sum(yimp.at_event), 770) def test_sample_from_poisson(self): @@ -71,14 +85,29 @@ def test_sample_from_poisson(self): def test_sample_events(self): """Test the sampling of 34 events out of a pool of 20 events.""" - events_per_year = np.array([0, 2, 2, 2, 1, 2, 3, 2, 2, 0, 2, 1, 2, 2, 2, 3, 5, 0, 1, 0]) - frequencies = np.array(np.ones(20)*0.2) + events_per_year = np.array( + [0, 2, 2, 2, 1, 2, 3, 2, 2, 0, 2, 1, 2, 2, 2, 3, 5, 0, 1, 0] + ) + frequencies = np.array(np.ones(20) * 0.2) sampling_vect = yearsets.sample_events(events_per_year, frequencies) self.assertEqual(len(sampling_vect), len(events_per_year)) - self.assertEqual(len(np.concatenate(sampling_vect).ravel()), np.sum(events_per_year)) - self.assertEqual(len(np.unique(list(collections.Counter(np.concatenate(sampling_vect).ravel()).values()))), 2) + self.assertEqual( + len(np.concatenate(sampling_vect).ravel()), np.sum(events_per_year) + ) + self.assertEqual( + len( + np.unique( + list( + collections.Counter( + np.concatenate(sampling_vect).ravel() + ).values() + ) + ) + ), + 2, + ) def test_computing_imp_per_year(self): """Test the calculation of impacts per year from a given sampling dictionary.""" @@ -93,6 +122,7 @@ def test_correction_fac(self): self.assertAlmostEqual(correction_factor, 1.42857143) + # Execute Tests if __name__ == "__main__": TESTS = unittest.TestLoader().loadTestsFromTestCase(TestYearSets) diff --git a/climada/util/value_representation.py b/climada/util/value_representation.py index 9af0d258e..2590efa13 100755 --- a/climada/util/value_representation.py +++ b/climada/util/value_representation.py @@ -21,11 +21,11 @@ @author: ckropf """ +import decimal import logging import math -import decimal -import numpy as np +import numpy as np LOGGER = logging.getLogger(__name__) diff --git a/climada/util/yearsets.py b/climada/util/yearsets.py index 0e30102f8..1e1582719 100755 --- a/climada/util/yearsets.py +++ b/climada/util/yearsets.py @@ -15,6 +15,7 @@ import copy import logging + import numpy as np from numpy.random import default_rng @@ -22,6 +23,7 @@ LOGGER = logging.getLogger(__name__) + def impact_yearset(imp, sampled_years, lam=None, correction_fac=True, seed=None): """Create a yearset of impacts (yimp) containing a probabilistic impact for each year in the sampled_years list by sampling events from the impact received as input with a @@ -58,37 +60,43 @@ def impact_yearset(imp, sampled_years, lam=None, correction_fac=True, seed=None) sub-array per sampled_year, which contains the event_ids of the events used to calculate the annual impacts. Can be used to re-create the exact same yimp. - """ + """ n_sampled_years = len(sampled_years) - #create sampling vector + # create sampling vector if not lam: lam = np.sum(imp.frequency) events_per_year = sample_from_poisson(n_sampled_years, lam, seed=seed) sampling_vect = sample_events(events_per_year, imp.frequency, seed=seed) - #compute impact per sampled_year + # compute impact per sampled_year imp_per_year = compute_imp_per_year(imp, sampling_vect) - #copy imp object as basis for the yimp object + # copy imp object as basis for the yimp object yimp = copy.deepcopy(imp) - #save imp_per_year in yimp - if correction_fac: #adjust for sampling error + # save imp_per_year in yimp + if correction_fac: # adjust for sampling error yimp.at_event = imp_per_year / calculate_correction_fac(imp_per_year, imp) else: yimp.at_event = imp_per_year - #save calculations in yimp - yimp.event_id = np.arange(1, n_sampled_years+1) - yimp.date = u_dt.str_to_date([str(date) + '-01-01' for date in sampled_years]) - yimp.frequency = np.ones(n_sampled_years)*sum(len(row) for row in sampling_vect - )/n_sampled_years + # save calculations in yimp + yimp.event_id = np.arange(1, n_sampled_years + 1) + yimp.date = u_dt.str_to_date([str(date) + "-01-01" for date in sampled_years]) + yimp.frequency = ( + np.ones(n_sampled_years) + * sum(len(row) for row in sampling_vect) + / n_sampled_years + ) return yimp, sampling_vect -def impact_yearset_from_sampling_vect(imp, sampled_years, sampling_vect, correction_fac=True): + +def impact_yearset_from_sampling_vect( + imp, sampled_years, sampling_vect, correction_fac=True +): """Create a yearset of impacts (yimp) containing a probabilistic impact for each year in the sampled_years list by sampling events from the impact received as input following the sampling vector provided. @@ -124,23 +132,25 @@ def impact_yearset_from_sampling_vect(imp, sampled_years, sampling_vect, correct """ - #compute impact per sampled_year + # compute impact per sampled_year imp_per_year = compute_imp_per_year(imp, sampling_vect) - #copy imp object as basis for the yimp object + # copy imp object as basis for the yimp object yimp = copy.deepcopy(imp) - - if correction_fac: #adjust for sampling error + if correction_fac: # adjust for sampling error imp_per_year = imp_per_year / calculate_correction_fac(imp_per_year, imp) - #save calculations in yimp + # save calculations in yimp yimp.at_event = imp_per_year n_sampled_years = len(sampled_years) - yimp.event_id = np.arange(1, n_sampled_years+1) - yimp.date = u_dt.str_to_date([str(date) + '-01-01' for date in sampled_years]) - yimp.frequency = np.ones(n_sampled_years)*sum(len(row) for row in sampling_vect - )/n_sampled_years + yimp.event_id = np.arange(1, n_sampled_years + 1) + yimp.date = u_dt.str_to_date([str(date) + "-01-01" for date in sampled_years]) + yimp.frequency = ( + np.ones(n_sampled_years) + * sum(len(row) for row in sampling_vect) + / n_sampled_years + ) return yimp @@ -165,7 +175,7 @@ def sample_from_poisson(n_sampled_years, lam, seed=None): """ if seed is not None: np.random.seed(seed) - return np.round(np.random.poisson(lam=lam, size=n_sampled_years)).astype('int') + return np.round(np.random.poisson(lam=lam, size=n_sampled_years)).astype("int") def sample_events(events_per_year, freqs_orig, seed=None): @@ -199,40 +209,45 @@ def sample_events(events_per_year, freqs_orig, seed=None): freqs = freqs_orig indices = indices_orig - #sample events for each sampled year + # sample events for each sampled year for amount_events in events_per_year: - #if there are not enough input events, choice with no replace will fail + # if there are not enough input events, choice with no replace will fail if amount_events > len(freqs_orig): - raise ValueError(f"cannot sample {amount_events} distinct events for a single year" - f" when there are only {len(freqs_orig)} input events") - - #add the original indices and frequencies to the pool if there are less events - #in the pool than needed to fill the year one is sampling for - #or if the pool is empty (not covered in case amount_events is 0) + raise ValueError( + f"cannot sample {amount_events} distinct events for a single year" + f" when there are only {len(freqs_orig)} input events" + ) + + # add the original indices and frequencies to the pool if there are less events + # in the pool than needed to fill the year one is sampling for + # or if the pool is empty (not covered in case amount_events is 0) if len(np.unique(indices)) < amount_events or len(indices) == 0: indices = np.append(indices, indices_orig) freqs = np.append(freqs, freqs_orig) - #ensure that each event only occurs once per sampled year + # ensure that each event only occurs once per sampled year unique_events = np.unique(indices, return_index=True)[0] - probab_dis = freqs[np.unique(indices, return_index=True)[1]]/( - np.sum(freqs[np.unique(indices, return_index=True)[1]])) + probab_dis = freqs[np.unique(indices, return_index=True)[1]] / ( + np.sum(freqs[np.unique(indices, return_index=True)[1]]) + ) - #sample events + # sample events rng = default_rng(seed) - selected_events = rng.choice(unique_events, size=amount_events, replace=False, - p=probab_dis).astype('int') + selected_events = rng.choice( + unique_events, size=amount_events, replace=False, p=probab_dis + ).astype("int") - #determine used events to remove them from sampling pool + # determine used events to remove them from sampling pool idx_to_remove = [np.where(indices == event)[0][0] for event in selected_events] indices = np.delete(indices, idx_to_remove) freqs = np.delete(freqs, idx_to_remove) - #save sampled events in sampling vector + # save sampled events in sampling vector sampling_vect.append(selected_events) return sampling_vect + def compute_imp_per_year(imp, sampling_vect): """Sample annual impacts from the given event_impacts according to the sampling dictionary @@ -251,11 +266,13 @@ def compute_imp_per_year(imp, sampling_vect): Sampled impact per year (length = sampled_years) """ - imp_per_year = [np.sum(imp.at_event[list(sampled_events)]) for sampled_events in - sampling_vect] + imp_per_year = [ + np.sum(imp.at_event[list(sampled_events)]) for sampled_events in sampling_vect + ] return np.array(imp_per_year) + def calculate_correction_fac(imp_per_year, imp): """Calculate a correction factor that can be used to scale the yimp in such a way that the expected annual impact (eai) of the yimp amounts to the eai @@ -274,10 +291,10 @@ def calculate_correction_fac(imp_per_year, imp): The correction factor is calculated as imp_eai/yimp_eai """ - yimp_eai = np.sum(imp_per_year)/len(imp_per_year) - imp_eai = np.sum(imp.frequency*imp.at_event) - correction_factor = imp_eai/yimp_eai - LOGGER.info("The correction factor amounts to %s", (correction_factor-1)*100) + yimp_eai = np.sum(imp_per_year) / len(imp_per_year) + imp_eai = np.sum(imp.frequency * imp.at_event) + correction_factor = imp_eai / yimp_eai + LOGGER.info("The correction factor amounts to %s", (correction_factor - 1) * 100) # if correction_factor > 0.1: # tex = raw_input("Do you want to exclude small events?") diff --git a/doc/Makefile b/doc/Makefile index 0a8a51eba..41c2d07bf 100644 --- a/doc/Makefile +++ b/doc/Makefile @@ -9,7 +9,7 @@ PAPER = # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter -ALLSPHINXOPTS = -d _build/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) ./ +ALLSPHINXOPTS = -d _build/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) ./ .PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex changes linkcheck doctest diff --git a/doc/climada/climada.engine.rst b/doc/climada/climada.engine.rst index 91274418f..f21024fde 100644 --- a/doc/climada/climada.engine.rst +++ b/doc/climada/climada.engine.rst @@ -52,4 +52,3 @@ climada\.engine\.impact\_data module :members: :undoc-members: :show-inheritance: - diff --git a/doc/climada/climada.entity.disc_rates.rst b/doc/climada/climada.entity.disc_rates.rst index bc17051c6..4089561f0 100644 --- a/doc/climada/climada.entity.disc_rates.rst +++ b/doc/climada/climada.entity.disc_rates.rst @@ -8,4 +8,3 @@ climada\.entity\.disc\_rates\.base module :members: :undoc-members: :show-inheritance: - diff --git a/doc/climada/climada.entity.exposures.litpop.rst b/doc/climada/climada.entity.exposures.litpop.rst index 9e65391b0..62e233a06 100644 --- a/doc/climada/climada.entity.exposures.litpop.rst +++ b/doc/climada/climada.entity.exposures.litpop.rst @@ -24,4 +24,3 @@ climada\.entity\.exposures\.litpop\.nightlight module :members: :undoc-members: :show-inheritance: - diff --git a/doc/climada/climada.entity.exposures.rst b/doc/climada/climada.entity.exposures.rst index 30f175d10..952af75e8 100644 --- a/doc/climada/climada.entity.exposures.rst +++ b/doc/climada/climada.entity.exposures.rst @@ -12,4 +12,3 @@ climada\.entity\.exposures\.base module :members: :undoc-members: :show-inheritance: - diff --git a/doc/climada/climada.entity.impact_funcs.rst b/doc/climada/climada.entity.impact_funcs.rst index 91f88ff77..90ad9441b 100644 --- a/doc/climada/climada.entity.impact_funcs.rst +++ b/doc/climada/climada.entity.impact_funcs.rst @@ -32,4 +32,3 @@ climada\.entity\.impact\_funcs\.trop\_cyclone module :members: :undoc-members: :show-inheritance: - diff --git a/doc/climada/climada.entity.measures.rst b/doc/climada/climada.entity.measures.rst index a7d16c650..8e63a2082 100644 --- a/doc/climada/climada.entity.measures.rst +++ b/doc/climada/climada.entity.measures.rst @@ -16,4 +16,3 @@ climada\.entity\.measures\.measure\_set module :members: :undoc-members: :show-inheritance: - diff --git a/doc/climada/climada.hazard.centroids.rst b/doc/climada/climada.hazard.centroids.rst index 8038d406e..7a9c65a90 100644 --- a/doc/climada/climada.hazard.centroids.rst +++ b/doc/climada/climada.hazard.centroids.rst @@ -8,4 +8,3 @@ climada\.hazard\.centroids\.centr module :members: :undoc-members: :show-inheritance: - diff --git a/doc/climada/climada.hazard.rst b/doc/climada/climada.hazard.rst index 8e4767ae6..3b3bef00b 100644 --- a/doc/climada/climada.hazard.rst +++ b/doc/climada/climada.hazard.rst @@ -69,4 +69,3 @@ climada\.hazard\.tc\_tracks\_synth module :members: :undoc-members: :show-inheritance: - diff --git a/doc/climada/climada.hazard.trop_cyclone.rst b/doc/climada/climada.hazard.trop_cyclone.rst index c703126ec..caafdcd93 100644 --- a/doc/climada/climada.hazard.trop_cyclone.rst +++ b/doc/climada/climada.hazard.trop_cyclone.rst @@ -16,4 +16,3 @@ climada\.hazard\.trop\_cyclone\.trop\_cyclone\_windfields module :members: :undoc-members: :show-inheritance: - diff --git a/doc/climada/climada.rst b/doc/climada/climada.rst index e248812bc..557532912 100644 --- a/doc/climada/climada.rst +++ b/doc/climada/climada.rst @@ -8,4 +8,3 @@ Software documentation per package climada.entity climada.hazard climada.util - diff --git a/doc/climada/climada.util.rst b/doc/climada/climada.util.rst index 820fd43f7..98df93aec 100644 --- a/doc/climada/climada.util.rst +++ b/doc/climada/climada.util.rst @@ -152,4 +152,3 @@ climada\.util\.yearsets module :members: :undoc-members: :show-inheritance: - diff --git a/doc/conf.py b/doc/conf.py index 02e19ecc0..b4ef1dc69 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -18,49 +18,52 @@ # is relative to the documentation root, use os.path.abspath to make it # absolute, like shown here. # sys.path.append(os.path.abspath('sphinxext')) -sys.path.insert(0, os.path.abspath('../')) +sys.path.insert(0, os.path.abspath("../")) # set version from climada import _version + __version__ = _version.__version__ # -- General configuration ----------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = ['matplotlib.sphinxext.plot_directive', - 'IPython.sphinxext.ipython_directive', - 'IPython.sphinxext.ipython_console_highlighting', - 'sphinx.ext.mathjax', - 'sphinx.ext.autodoc', - 'sphinx.ext.doctest', - 'sphinx.ext.inheritance_diagram', - 'sphinx.ext.viewcode', - 'sphinx.ext.napoleon', - 'sphinx.ext.ifconfig', - 'myst_nb', - 'sphinx_markdown_tables', - 'readthedocs_ext.readthedocs',] +extensions = [ + "matplotlib.sphinxext.plot_directive", + "IPython.sphinxext.ipython_directive", + "IPython.sphinxext.ipython_console_highlighting", + "sphinx.ext.mathjax", + "sphinx.ext.autodoc", + "sphinx.ext.doctest", + "sphinx.ext.inheritance_diagram", + "sphinx.ext.viewcode", + "sphinx.ext.napoleon", + "sphinx.ext.ifconfig", + "myst_nb", + "sphinx_markdown_tables", + "readthedocs_ext.readthedocs", +] # read the docs version used for links -if 'dev' in __version__: - read_docs_url = 'en/latest/' +if "dev" in __version__: + read_docs_url = "en/latest/" else: - read_docs_url = 'en/v{}/'.format(__version__) + read_docs_url = "en/v{}/".format(__version__) # Add any paths that contain templates here, relative to this directory. templates_path = [] # The encoding of source files. -#source_encoding = 'utf-8' +# source_encoding = 'utf-8' # The master toctree document. -master_doc = 'index' +master_doc = "index" # General information about the project. -project = 'CLIMADA' -copyright = '2017, ETH Zurich' -author = 'CLIMADA contributors' +project = "CLIMADA" +copyright = "2017, ETH Zurich" +author = "CLIMADA contributors" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the @@ -73,45 +76,45 @@ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. -language = 'en' +language = "en" # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: -#today = '' +# today = '' # Else, today_fmt is used as the format for a strftime call. -#today_fmt = '%B %d, %Y' +# today_fmt = '%B %d, %Y' # List of documents that shouldn't be included in the build. -#unused_docs = [] +# unused_docs = [] # List of directories, relative to source directory, that shouldn't be searched # for source files. # exclude_trees = [] # The reST default role (used for this markup: `text`) to use for all documents. -#default_role = None +# default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. -#add_function_parentheses = True +# add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). -#add_module_names = True +# add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. -#show_authors = False +# show_authors = False # The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' +pygments_style = "sphinx" # A list of ignored prefixes for module index sorting. -#modindex_common_prefix = [] +# modindex_common_prefix = [] # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path -exclude_patterns = ['_build', 'test', 'Thumbs.db', '.DS_Store'] +exclude_patterns = ["_build", "test", "Thumbs.db", ".DS_Store"] # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = True @@ -125,17 +128,17 @@ # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. -#html_theme_options = {} +# html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. -#html_theme_path = [] +# html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". -#html_title = None +# html_title = None # A shorter title for the navigation bar. Default is the same as html_title. -#html_short_title = None +# html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. @@ -149,45 +152,45 @@ # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] +html_static_path = ["_static"] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. -#html_last_updated_fmt = '%b %d, %Y' +# html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. -#html_use_smartypants = True +# html_use_smartypants = True # Custom sidebar templates, maps document names to template names. -#html_sidebars = {} +# html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. -#html_additional_pages = {} +# html_additional_pages = {} # If false, no module index is generated. -#html_use_modindex = True +# html_use_modindex = True # If false, no index is generated. -#html_use_index = True +# html_use_index = True # If true, the index is split into individual pages for each letter. -#html_split_index = False +# html_split_index = False # If true, links to the reST sources are added to the pages. -#html_show_sourcelink = True +# html_show_sourcelink = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. -#html_use_opensearch = '' +# html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). -#html_file_suffix = '' +# html_file_suffix = '' # Output file base name for HTML help builder. -htmlhelp_basename = 'climadadoc' +htmlhelp_basename = "climadadoc" # -- Options for LaTeX output -------------------------------------------------- @@ -195,47 +198,55 @@ latex_engine = "xelatex" # The paper size ('letter' or 'a4'). -#latex_paper_size = 'letter' +# latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). -#latex_font_size = '10pt' +# latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ - (master_doc, 'climada.tex', u'CLIMADA documentation', - u'CLIMADA contributors', 'manual'), + ( + master_doc, + "climada.tex", + "CLIMADA documentation", + "CLIMADA contributors", + "manual", + ), ] # The name of an image file (relative to this directory) to place at the top of # the title page. -#latex_logo = None +# latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. -#latex_use_parts = False +# latex_use_parts = False # Additional stuff for the LaTeX preamble. -#latex_preamble = '' +# latex_preamble = '' # Documents to append as an appendix to all manuals. -#latex_appendices = [] +# latex_appendices = [] # If false, no module index is generated. -#latex_use_modindex = True +# latex_use_modindex = True + # ----------------------------------------------------------------------------- # show __init__ documentation def skip(app, what, name, obj, skip, options): - if (name == "__init__"): + if name == "__init__": return False return skip + # remove docstrings modules def remove_module_docstring(app, what, name, obj, options, lines): if what == "module": del lines[:] + autodoc_member_order = "bysource" # --- MYST Parser settings ---- @@ -260,13 +271,15 @@ def remove_module_docstring(app, what, name, obj, options, lines): # --- + def setup(app): app.connect("autodoc-skip-member", skip) app.connect("autodoc-process-docstring", remove_module_docstring) # Pass to the app if we are building this on ReadTheDocs - on_rtd = True if (os.environ.get('READTHEDOCS') == 'True') else False - app.add_config_value('readthedocs', on_rtd, 'env') + on_rtd = True if (os.environ.get("READTHEDOCS") == "True") else False + app.add_config_value("readthedocs", on_rtd, "env") + # improve parameters description napoleon_use_param = False diff --git a/doc/guide/Guide_Configuration.ipynb b/doc/guide/Guide_Configuration.ipynb index 50ffc35f2..69056eba6 100644 --- a/doc/guide/Guide_Configuration.ipynb +++ b/doc/guide/Guide_Configuration.ipynb @@ -54,9 +54,9 @@ ], "source": [ "# suboptimal\n", - "my_dict = {'x': 4}\n", - "if my_dict['x'] > 3:\n", - " msg = 'well, arh, ...'\n", + "my_dict = {\"x\": 4}\n", + "if my_dict[\"x\"] > 3:\n", + " msg = \"well, arh, ...\"\n", "msg" ] }, @@ -78,10 +78,10 @@ ], "source": [ "# good\n", - "X = 'x'\n", + "X = \"x\"\n", "my_dict = {X: 4}\n", "if my_dict[X] > 3:\n", - " msg = 'yeah!'\n", + " msg = \"yeah!\"\n", "msg" ] }, @@ -103,7 +103,7 @@ ], "source": [ "# possibly overdoing it\n", - "X = 'x'\n", + "X = \"x\"\n", "Y = \"this doesn't mean that every string must be a constant\"\n", "my_dict = {X: 4}\n", "if my_dict[X] > 3:\n", @@ -139,13 +139,16 @@ ], "source": [ "import pandas as pd\n", - "X = 'x'\n", - "df = pd.DataFrame({'x':[1,2,3], 'y':[4,5,6]})\n", + "\n", + "X = \"x\"\n", + "df = pd.DataFrame({\"x\": [1, 2, 3], \"y\": [4, 5, 6]})\n", "try:\n", " df.X\n", "except:\n", - " from sys import stderr; stderr.write(\"this does not work\\n\")\n", - "df[X] # this does work but it's less pretty\n", + " from sys import stderr\n", + "\n", + " stderr.write(\"this does not work\\n\")\n", + "df[X] # this does work but it's less pretty\n", "df.x" ] }, @@ -357,7 +360,9 @@ "try:\n", " CONFIG.hazard.trop_cyclone.random_seed.str()\n", "except Exception as e:\n", - " from sys import stderr; stderr.write(f\"cannot convert random_seed to str: {e}\\n\")" + " from sys import stderr\n", + "\n", + " stderr.write(f\"cannot convert random_seed to str: {e}\\n\")" ] }, { diff --git a/doc/guide/Guide_Exception_Logging.ipynb b/doc/guide/Guide_Exception_Logging.ipynb index 55341f434..b4f776aa9 100644 --- a/doc/guide/Guide_Exception_Logging.ipynb +++ b/doc/guide/Guide_Exception_Logging.ipynb @@ -44,7 +44,7 @@ "metadata": {}, "outputs": [], "source": [ - "#Bad (1)\n", + "# Bad (1)\n", "x = 1\n", "try:\n", " l = len(events)\n", @@ -60,7 +60,7 @@ "metadata": {}, "outputs": [], "source": [ - "#Still bad (2)\n", + "# Still bad (2)\n", "try:\n", " l = len(events)\n", " if l < 1:\n", @@ -75,7 +75,7 @@ "metadata": {}, "outputs": [], "source": [ - "#Better, but still unsufficient (3)\n", + "# Better, but still unsufficient (3)\n", "try:\n", " l = len(events)\n", " if l < 1:\n", @@ -90,7 +90,7 @@ "metadata": {}, "outputs": [], "source": [ - "#Even better (4)\n", + "# Even better (4)\n", "try:\n", " l = len(events)\n", "except TypeError:\n", @@ -105,13 +105,13 @@ "metadata": {}, "outputs": [], "source": [ - "#Even better (5)\n", + "# Even better (5)\n", "try:\n", " l = len(events)\n", "except TypeError as tper:\n", " raise TypeError(\"The provided variable events is not a list\") from tper\n", "if l < 1:\n", - " raise ValueError(\"To compute an impact there must be at least one event.\")\n" + " raise ValueError(\"To compute an impact there must be at least one event.\")" ] }, { @@ -172,6 +172,7 @@ "source": [ "import logging\n", "from climada.util.config import LOGGER\n", + "\n", "LOGGER.setLevel(logging.ERROR)" ] }, diff --git a/doc/guide/Guide_Py_Performance.ipynb b/doc/guide/Guide_Py_Performance.ipynb index bb3cf209f..21f81313d 100644 --- a/doc/guide/Guide_Py_Performance.ipynb +++ b/doc/guide/Guide_Py_Performance.ipynb @@ -188,6 +188,7 @@ ], "source": [ "import numpy as np\n", + "\n", "%timeit np.sum(list_of_numbers)" ] }, @@ -947,6 +948,7 @@ "source": [ "from numba import njit\n", "\n", + "\n", "@njit\n", "def sum_array(arr):\n", " result = 0.0\n", diff --git a/doc/guide/Guide_PythonDos-n-Donts.ipynb b/doc/guide/Guide_PythonDos-n-Donts.ipynb index 85295356a..222ffd0ab 100644 --- a/doc/guide/Guide_PythonDos-n-Donts.ipynb +++ b/doc/guide/Guide_PythonDos-n-Donts.ipynb @@ -147,14 +147,12 @@ "outputs": [], "source": [ "# Vertically aligned with opening delimiter.\n", - "foo = long_function_name(var_one, var_two,\n", - " var_three, var_four)\n", + "foo = long_function_name(var_one, var_two, var_three, var_four)\n", + "\n", "\n", "# Hanging indentation (4 additonal spaces)\n", - "def very_very_long_function_name(\n", - " var_one, var_two, var_three,\n", - " var_four):\n", - " print(var_one)\n" + "def very_very_long_function_name(var_one, var_two, var_three, var_four):\n", + " print(var_one)" ] }, { @@ -303,6 +301,8 @@ " return math.sqrt(x)\n", " else:\n", " return None\n", + "\n", + "\n", "# Wrong\n", "def foo(x):\n", " if x >= 0:\n", @@ -601,7 +601,7 @@ "source": [ "@uppercase_decorator\n", "def say_hi():\n", - " return 'hello there'" + " return \"hello there\"" ] }, { diff --git a/doc/guide/Guide_Testing.ipynb b/doc/guide/Guide_Testing.ipynb index f1876080c..319d8ada5 100644 --- a/doc/guide/Guide_Testing.ipynb +++ b/doc/guide/Guide_Testing.ipynb @@ -209,7 +209,9 @@ "source": [ "from climada.test import get_test_file\n", "\n", - "my_test_file = get_test_file(ds_name='my-test-file', file_format='hdf5') # returns a pathlib.Path object" + "my_test_file = get_test_file(\n", + " ds_name=\"my-test-file\", file_format=\"hdf5\"\n", + ") # returns a pathlib.Path object" ] }, { @@ -240,11 +242,16 @@ "outputs": [], "source": [ "import climada\n", + "\n", + "\n", "def x(download_file=climada.util.files_handler.download_file):\n", - " filepath = download_file('http://real_data.ch')\n", + " filepath = download_file(\"http://real_data.ch\")\n", " return Path(filepath).stat().st_size\n", "\n", + "\n", "import unittest\n", + "\n", + "\n", "class TestX(unittest.TestCase):\n", " def download_file_dummy(url):\n", " return \"phony_data.ch\"\n", diff --git a/doc/guide/Guide_continuous_integration_GitHub_actions.ipynb b/doc/guide/Guide_continuous_integration_GitHub_actions.ipynb index d9b1d9053..f800f8eda 100644 --- a/doc/guide/Guide_continuous_integration_GitHub_actions.ipynb +++ b/doc/guide/Guide_continuous_integration_GitHub_actions.ipynb @@ -95,20 +95,23 @@ } ], "source": [ - "def x(b:bool):\n", + "def x(b: bool):\n", " if b:\n", - " print('been here')\n", + " print(\"been here\")\n", " return 4\n", " else:\n", - " print('been there')\n", + " print(\"been there\")\n", " return 0\n", "\n", - "def y(b:bool):\n", - " print('been everywhere')\n", - " return 1/x(b)\n", + "\n", + "def y(b: bool):\n", + " print(\"been everywhere\")\n", + " return 1 / x(b)\n", "\n", "\n", "import unittest\n", + "\n", + "\n", "class TestXY(unittest.TestCase):\n", " def test_x(self):\n", " self.assertEqual(x(True), 4)\n", @@ -117,6 +120,7 @@ " def test_y(self):\n", " self.assertEqual(y(True), 0.25)\n", "\n", + "\n", "unittest.TextTestRunner().run(unittest.TestLoader().loadTestsFromTestCase(TestXY));" ] }, diff --git a/doc/index.rst b/doc/index.rst index 732290eee..4ad14dd78 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -69,7 +69,7 @@ Jump right in: .. toctree:: :caption: API Reference :hidden: - + Python Modules diff --git a/doc/tutorial/0_intro_python.ipynb b/doc/tutorial/0_intro_python.ipynb index 43df82d5b..831898602 100644 --- a/doc/tutorial/0_intro_python.ipynb +++ b/doc/tutorial/0_intro_python.ipynb @@ -27,15 +27,15 @@ "metadata": {}, "outputs": [], "source": [ - "print('Addition: 2 + 2 =', 2 + 2)\n", - "print('Substraction: 50 - 5*6 =', 50 - 5*6)\n", - "print('Use of parenthesis: (50 - 5*6) / 4 =', (50 - 5*6) / 4)\n", - "print('Classic division returns a float: 17 / 3 =', 17 / 3)\n", - "print('Floor division discards the fractional part: 17 // 3 =', 17 // 3)\n", - "print('The % operator returns the remainder of the division: 17 % 3 =', 17 % 3)\n", - "print('Result * divisor + remainder: 5 * 3 + 2 =', 5 * 3 + 2)\n", - "print('5 squared: 5 ** 2 =', 5 ** 2)\n", - "print('2 to the power of 7: 2 ** 7 =', 2 ** 7)" + "print(\"Addition: 2 + 2 =\", 2 + 2)\n", + "print(\"Substraction: 50 - 5*6 =\", 50 - 5 * 6)\n", + "print(\"Use of parenthesis: (50 - 5*6) / 4 =\", (50 - 5 * 6) / 4)\n", + "print(\"Classic division returns a float: 17 / 3 =\", 17 / 3)\n", + "print(\"Floor division discards the fractional part: 17 // 3 =\", 17 // 3)\n", + "print(\"The % operator returns the remainder of the division: 17 % 3 =\", 17 % 3)\n", + "print(\"Result * divisor + remainder: 5 * 3 + 2 =\", 5 * 3 + 2)\n", + "print(\"5 squared: 5 ** 2 =\", 5**2)\n", + "print(\"2 to the power of 7: 2 ** 7 =\", 2**7)" ] }, { @@ -72,11 +72,11 @@ "metadata": {}, "outputs": [], "source": [ - "print('spam eggs') # single quotes\n", - "print('doesn\\'t') # use \\' to escape the single quote...\n", - "print(\"doesn't\") # ...or use double quotes instead\n", + "print(\"spam eggs\") # single quotes\n", + "print(\"doesn't\") # use \\' to escape the single quote...\n", + "print(\"doesn't\") # ...or use double quotes instead\n", + "print('\"Yes,\" he said.')\n", "print('\"Yes,\" he said.')\n", - "print(\"\\\"Yes,\\\" he said.\")\n", "print('\"Isn\\'t,\" she said.')" ] }, @@ -96,13 +96,13 @@ "metadata": {}, "outputs": [], "source": [ - "word = 'Python'\n", - "print('word = ', word)\n", - "print('Character in position 0: word[0] =', word[0])\n", - "print('Character in position 5: word[5] =', word[5])\n", - "print('Last character: word[-1] =', word[-1])\n", - "print('Second-last character: word[-2] =', word[-2])\n", - "print('word[-6] =', word[-6])" + "word = \"Python\"\n", + "print(\"word = \", word)\n", + "print(\"Character in position 0: word[0] =\", word[0])\n", + "print(\"Character in position 5: word[5] =\", word[5])\n", + "print(\"Last character: word[-1] =\", word[-1])\n", + "print(\"Second-last character: word[-2] =\", word[-2])\n", + "print(\"word[-6] =\", word[-6])" ] }, { @@ -118,8 +118,8 @@ "metadata": {}, "outputs": [], "source": [ - "print('Characters from position 0 (included) to 2 (excluded): word[0:2] =', word[0:2])\n", - "print('Characters from position 2 (included) to 5 (excluded): word[2:5] =', word[2:5])" + "print(\"Characters from position 0 (included) to 2 (excluded): word[0:2] =\", word[0:2])\n", + "print(\"Characters from position 2 (included) to 5 (excluded): word[2:5] =\", word[2:5])" ] }, { @@ -145,11 +145,11 @@ "outputs": [], "source": [ "squares = [1, 4, 9, 16, 25]\n", - "print('squares: ', squares)\n", - "print('Indexing returns the item: squares[0]:', squares[0])\n", - "print('squares[-1]:', squares[-1])\n", - "print('Slicing returns a new list: squares[-3:]:', squares[-3:])\n", - "print('squares[:]:', squares[:])" + "print(\"squares: \", squares)\n", + "print(\"Indexing returns the item: squares[0]:\", squares[0])\n", + "print(\"squares[-1]:\", squares[-1])\n", + "print(\"Slicing returns a new list: squares[-3:]:\", squares[-3:])\n", + "print(\"squares[:]:\", squares[:])" ] }, { @@ -184,7 +184,7 @@ "cubes = [1, 8, 27, 65, 125] # something's wrong here\n", "cubes[3] = 64 # replace the wrong value\n", "cubes.append(216) # add the cube of 6\n", - "cubes.append(7 ** 3) # and the cube of 7\n", + "cubes.append(7**3) # and the cube of 7\n", "cubes" ] }, @@ -197,8 +197,8 @@ "# Note: execution of this cell will fail\n", "\n", "# Try to modify a character of a string\n", - "word = 'Python'\n", - "word[0] = 'p'" + "word = \"Python\"\n", + "word[0] = \"p\"" ] }, { @@ -262,7 +262,7 @@ "metadata": {}, "outputs": [], "source": [ - "t = 12345, 54321, 'hello!'\n", + "t = 12345, 54321, \"hello!\"\n", "t[0]" ] }, @@ -322,8 +322,8 @@ "metadata": {}, "outputs": [], "source": [ - "t = 12345, 54321, 'hello!' # tuple packing\n", - "x, y, z = t # tuple unpacking\n", + "t = 12345, 54321, \"hello!\" # tuple packing\n", + "x, y, z = t # tuple unpacking\n", "x, y, z" ] }, @@ -344,8 +344,8 @@ "metadata": {}, "outputs": [], "source": [ - "basket = {'apple', 'orange', 'apple', 'pear', 'orange', 'banana'}\n", - "basket # show that duplicates have been removed" + "basket = {\"apple\", \"orange\", \"apple\", \"pear\", \"orange\", \"banana\"}\n", + "basket # show that duplicates have been removed" ] }, { @@ -354,7 +354,7 @@ "metadata": {}, "outputs": [], "source": [ - "'orange' in basket # fast membership testing" + "\"orange\" in basket # fast membership testing" ] }, { @@ -363,7 +363,7 @@ "metadata": {}, "outputs": [], "source": [ - "'crabgrass' in basket" + "\"crabgrass\" in basket" ] }, { @@ -373,9 +373,9 @@ "outputs": [], "source": [ "# Demonstrate set operations on unique letters from two words\n", - "a = set('abracadabra')\n", - "b = set('alacazam')\n", - "a # unique letters in a" + "a = set(\"abracadabra\")\n", + "b = set(\"alacazam\")\n", + "a # unique letters in a" ] }, { @@ -384,7 +384,7 @@ "metadata": {}, "outputs": [], "source": [ - "a - b # letters in a but not in b" + "a - b # letters in a but not in b" ] }, { @@ -393,7 +393,7 @@ "metadata": {}, "outputs": [], "source": [ - "a | b # letters in a or b or both" + "a | b # letters in a or b or both" ] }, { @@ -402,7 +402,7 @@ "metadata": {}, "outputs": [], "source": [ - "a & b # letters in both a and b" + "a & b # letters in both a and b" ] }, { @@ -411,7 +411,7 @@ "metadata": {}, "outputs": [], "source": [ - "a ^ b # letters in a or b but not both" + "a ^ b # letters in a or b but not both" ] }, { @@ -440,7 +440,7 @@ "metadata": {}, "outputs": [], "source": [ - "# Define a new set and try some set methods (freestyle)\n" + "# Define a new set and try some set methods (freestyle)" ] }, { @@ -465,8 +465,8 @@ "metadata": {}, "outputs": [], "source": [ - "tel = {'jack': 4098, 'sape': 4139}\n", - "tel['guido'] = 4127\n", + "tel = {\"jack\": 4098, \"sape\": 4139}\n", + "tel[\"guido\"] = 4127\n", "tel" ] }, @@ -476,7 +476,7 @@ "metadata": {}, "outputs": [], "source": [ - "tel['jack']" + "tel[\"jack\"]" ] }, { @@ -485,7 +485,7 @@ "metadata": {}, "outputs": [], "source": [ - "del tel['sape']" + "del tel[\"sape\"]" ] }, { @@ -494,7 +494,7 @@ "metadata": {}, "outputs": [], "source": [ - "tel['irv'] = 4127\n", + "tel[\"irv\"] = 4127\n", "tel" ] }, @@ -522,7 +522,7 @@ "metadata": {}, "outputs": [], "source": [ - "'guido' in tel" + "\"guido\" in tel" ] }, { @@ -531,7 +531,7 @@ "metadata": {}, "outputs": [], "source": [ - "'jack' not in tel" + "\"jack\" not in tel" ] }, { @@ -554,13 +554,13 @@ "metadata": {}, "outputs": [], "source": [ - "def fib(n): # write Fibonacci series up to n\n", - " \"\"\"Print a Fibonacci series up to n.\"\"\"\n", - " a, b = 0, 1 # two assignments in one line\n", - " while a < n:\n", - " print(a, end=' ')\n", - " a, b = b, a+b # two assignments in one line\n", - " print()" + "def fib(n): # write Fibonacci series up to n\n", + " \"\"\"Print a Fibonacci series up to n.\"\"\"\n", + " a, b = 0, 1 # two assignments in one line\n", + " while a < n:\n", + " print(a, end=\" \")\n", + " a, b = b, a + b # two assignments in one line\n", + " print()" ] }, { @@ -587,7 +587,7 @@ "outputs": [], "source": [ "print(fib)\n", - "print(type(fib)) # function type\n", + "print(type(fib)) # function type\n", "f = fib\n", "f(100)" ] @@ -608,15 +608,16 @@ "def dummy(x):\n", " x += x\n", "\n", + "\n", "xx = 5\n", - "print('xx before function call: ', xx)\n", + "print(\"xx before function call: \", xx)\n", "dummy(xx)\n", - "print('xx after function call: ', xx)\n", + "print(\"xx after function call: \", xx)\n", "\n", "yy = [5]\n", - "print('yy before function call: ', yy)\n", + "print(\"yy before function call: \", yy)\n", "dummy(yy)\n", - "print('yy after function call: ', yy)" + "print(\"yy after function call: \", yy)" ] }, { @@ -634,16 +635,16 @@ "metadata": {}, "outputs": [], "source": [ - "def ask_ok(prompt, retries=4, reminder='Please try again!'):\n", + "def ask_ok(prompt, retries=4, reminder=\"Please try again!\"):\n", " while True:\n", " ok = input(prompt)\n", - " if ok in ('y', 'ye', 'yes'):\n", + " if ok in (\"y\", \"ye\", \"yes\"):\n", " return True\n", - " if ok in ('n', 'no', 'nop', 'nope'):\n", + " if ok in (\"n\", \"no\", \"nop\", \"nope\"):\n", " return False\n", " retries = retries - 1\n", " if retries < 0:\n", - " raise ValueError('invalid user response')\n", + " raise ValueError(\"invalid user response\")\n", " print(reminder)" ] }, @@ -653,10 +654,10 @@ "metadata": {}, "outputs": [], "source": [ - "#This function can be called in several ways:\n", + "# This function can be called in several ways:\n", "\n", - "#giving only the mandatory argument:\n", - "ask_ok('Do you really want to quit?')\n" + "# giving only the mandatory argument:\n", + "ask_ok(\"Do you really want to quit?\")" ] }, { @@ -666,7 +667,7 @@ "outputs": [], "source": [ "# giving one of the optional arguments:\n", - "ask_ok('OK to overwrite the file?', 2)\n" + "ask_ok(\"OK to overwrite the file?\", 2)" ] }, { @@ -676,7 +677,7 @@ "outputs": [], "source": [ "# or even giving all arguments:\n", - "ask_ok('OK to overwrite the file?', 2, 'Come on, only yes or no!')" + "ask_ok(\"OK to overwrite the file?\", 2, \"Come on, only yes or no!\")" ] }, { @@ -692,7 +693,7 @@ "metadata": {}, "outputs": [], "source": [ - "ask_ok('OK to overwrite the file?', reminder='Come on, only yes or no!')" + "ask_ok(\"OK to overwrite the file?\", reminder=\"Come on, only yes or no!\")" ] }, { @@ -710,9 +711,11 @@ "source": [ "def test(x=None):\n", " if x is None:\n", - " print('no x here')\n", + " print(\"no x here\")\n", " else:\n", " print(x)\n", + "\n", + "\n", "test()" ] }, @@ -736,15 +739,15 @@ "metadata": {}, "outputs": [], "source": [ - "class Dog: # same as \"class Dog(object)\"\n", + "class Dog: # same as \"class Dog(object)\"\n", "\n", - " kind = 'canine' # class variable shared by all instances\n", + " kind = \"canine\" # class variable shared by all instances\n", "\n", - " def __init__(self, name): # initialization method\n", - " self.name = name # instance variable unique to each instance\n", - " self.tricks = [] # creates a new empty list for each dog\n", + " def __init__(self, name): # initialization method\n", + " self.name = name # instance variable unique to each instance\n", + " self.tricks = [] # creates a new empty list for each dog\n", "\n", - " def add_trick(self, trick): # class method\n", + " def add_trick(self, trick): # class method\n", " self.tricks.append(trick)" ] }, @@ -761,7 +764,9 @@ "metadata": {}, "outputs": [], "source": [ - "d = Dog('Fido') # creates a new instance of the class and assigns this object to the local variable d\n", + "d = Dog(\n", + " \"Fido\"\n", + ") # creates a new instance of the class and assigns this object to the local variable d\n", "d.name" ] }, @@ -771,9 +776,11 @@ "metadata": {}, "outputs": [], "source": [ - "e = Dog('Buddy') # creates a new instance of the class and assigns this object to the local variable e\n", - "d.add_trick('roll over')\n", - "e.add_trick('play dead')" + "e = Dog(\n", + " \"Buddy\"\n", + ") # creates a new instance of the class and assigns this object to the local variable e\n", + "d.add_trick(\"roll over\")\n", + "e.add_trick(\"play dead\")" ] }, { @@ -782,7 +789,7 @@ "metadata": {}, "outputs": [], "source": [ - "d.tricks # unique to d" + "d.tricks # unique to d" ] }, { @@ -791,7 +798,7 @@ "metadata": {}, "outputs": [], "source": [ - "e.tricks # unique to e" + "e.tricks # unique to e" ] }, { @@ -800,7 +807,7 @@ "metadata": {}, "outputs": [], "source": [ - "d.kind # shared by all dogs" + "d.kind # shared by all dogs" ] }, { @@ -809,7 +816,7 @@ "metadata": {}, "outputs": [], "source": [ - "e.kind # shared by all dogs" + "e.kind # shared by all dogs" ] }, { @@ -831,19 +838,22 @@ "metadata": {}, "outputs": [], "source": [ - "class Animal: # base class\n", + "class Animal: # base class\n", "\n", " def __init__(self, kind):\n", " self.kind = kind\n", " self.tricks = []\n", "\n", - " def add_trick(self, trick): # class method\n", + " def add_trick(self, trick): # class method\n", " self.tricks.append(trick)\n", "\n", - "class Dog(Animal): # derived class\n", "\n", - " def __init__(self): # override of __init__ base method\n", - " super(Dog, self).__init__('canine') # call Animal __init__ method with input string" + "class Dog(Animal): # derived class\n", + "\n", + " def __init__(self): # override of __init__ base method\n", + " super(Dog, self).__init__(\n", + " \"canine\"\n", + " ) # call Animal __init__ method with input string" ] }, { @@ -852,9 +862,9 @@ "metadata": {}, "outputs": [], "source": [ - "fido = Dog() # fido is automatically an animal of kind 'canine'\n", + "fido = Dog() # fido is automatically an animal of kind 'canine'\n", "print(fido.kind)\n", - "fido.add_trick('play dead') # Dog class can use Animal class\n", + "fido.add_trick(\"play dead\") # Dog class can use Animal class\n", "print(fido.tricks)" ] }, @@ -893,7 +903,8 @@ " for item in iterable:\n", " self.items_list.append(item)\n", "\n", - " __update = update # private copy of original update() method\n", + " __update = update # private copy of original update() method\n", + "\n", "\n", "class MappingSubclass(Mapping):\n", "\n", diff --git a/doc/tutorial/1_main_climada.ipynb b/doc/tutorial/1_main_climada.ipynb index 730d5e5ed..36ce87bb2 100644 --- a/doc/tutorial/1_main_climada.ipynb +++ b/doc/tutorial/1_main_climada.ipynb @@ -182,10 +182,13 @@ "source": [ "import numpy as np\n", "from climada.hazard import TCTracks\n", - "import warnings # To hide the warnings\n", - "warnings.filterwarnings('ignore')\n", + "import warnings # To hide the warnings\n", "\n", - "tracks = TCTracks.from_ibtracs_netcdf(provider='usa', basin='NA') # Here we download the full dataset for the analysis\n", + "warnings.filterwarnings(\"ignore\")\n", + "\n", + "tracks = TCTracks.from_ibtracs_netcdf(\n", + " provider=\"usa\", basin=\"NA\"\n", + ") # Here we download the full dataset for the analysis\n", "# afterwards (e.g. return period), but you can also use \"year_range\" to adjust the range of the dataset to be downloaded.\n", "# While doing that, you need to make sure that the year 2017 is included if you want to run the blocks with the codes\n", "# subsetting a specific tropic cyclone, which happened in 2017. (Of course, you can also change the subsetting codes.)" @@ -220,8 +223,10 @@ ], "source": [ "# plotting tracks can be very time consuming, depending on the number of tracks. So we choose only a few here, by limiting the time range to one year\n", - "tracks_2017 = TCTracks.from_ibtracs_netcdf(provider='usa', basin='NA', year_range = (2017, 2017))\n", - "tracks_2017 .plot(); # This may take a very long time" + "tracks_2017 = TCTracks.from_ibtracs_netcdf(\n", + " provider=\"usa\", basin=\"NA\", year_range=(2017, 2017)\n", + ")\n", + "tracks_2017.plot(); # This may take a very long time" ] }, { @@ -368,7 +373,9 @@ } ], "source": [ - "tracks.subset({\"sid\": \"2017260N12310\"}).plot(); # This is how we subset a TCTracks object" + "tracks.subset(\n", + " {\"sid\": \"2017260N12310\"}\n", + ").plot(); # This is how we subset a TCTracks object" ] }, { @@ -397,7 +404,7 @@ } ], "source": [ - "haz.plot_intensity(event='2017260N12310');" + "haz.plot_intensity(event=\"2017260N12310\");" ] }, { @@ -433,7 +440,7 @@ } ], "source": [ - "haz.plot_rp_intensity(return_periods=(5,10,20,40));" + "haz.plot_rp_intensity(return_periods=(5, 10, 20, 40));" ] }, { @@ -553,8 +560,10 @@ "source": [ "from climada.entity.exposures import LitPop\n", "\n", - "exp_litpop = LitPop.from_countries('Puerto Rico', res_arcsec = 120) # We'll go lower resolution than default to keep it simple\n", - "exp_litpop.set_geometry_points() # Set geodataframe geometries from lat lon data\n", + "exp_litpop = LitPop.from_countries(\n", + " \"Puerto Rico\", res_arcsec=120\n", + ") # We'll go lower resolution than default to keep it simple\n", + "exp_litpop.set_geometry_points() # Set geodataframe geometries from lat lon data\n", "\n", "exp_litpop.plot_hexbin(pop_name=True, linewidth=4, buffer=0.1);" ] @@ -647,7 +656,7 @@ } ], "source": [ - "exp_litpop.gdf['impf_TC'] = 1" + "exp_litpop.gdf[\"impf_TC\"] = 1" ] }, { @@ -688,8 +697,8 @@ "from climada.entity import Measure, MeasureSet\n", "\n", "meas_mangrove = Measure(\n", - " name='Mangrove',\n", - " haz_type='TC',\n", + " name=\"Mangrove\",\n", + " haz_type=\"TC\",\n", " color_rgb=np.array([0.2, 0.2, 0.7]),\n", " cost=500000000,\n", " mdd_impact=(1, 0),\n", @@ -762,11 +771,13 @@ } ], "source": [ - "mangrove_exp, mangrove_imp_fun_set, mangrove_haz = meas_mangrove.apply(exp_litpop, imp_fun_set, haz)\n", + "mangrove_exp, mangrove_imp_fun_set, mangrove_haz = meas_mangrove.apply(\n", + " exp_litpop, imp_fun_set, haz\n", + ")\n", "axes1 = imp_fun_set.plot()\n", - "axes1.set_title('TC: Emanuel (2011) impact function')\n", + "axes1.set_title(\"TC: Emanuel (2011) impact function\")\n", "axes2 = mangrove_imp_fun_set.plot()\n", - "axes2.set_title('TC: Modified impact function')" + "axes2.set_title(\"TC: Modified impact function\")" ] }, { @@ -792,8 +803,8 @@ ], "source": [ "meas_buildings = Measure(\n", - " name='Building code',\n", - " haz_type='TC',\n", + " name=\"Building code\",\n", + " haz_type=\"TC\",\n", " color_rgb=np.array([0.2, 0.7, 0.5]),\n", " cost=100000000,\n", " hazard_freq_cutoff=0.1,\n", @@ -802,7 +813,9 @@ "meas_set.append(meas_buildings)\n", "meas_set.check()\n", "\n", - "buildings_exp, buildings_imp_fun_set, buildings_haz = meas_buildings.apply(exp_litpop, imp_fun_set, haz)" + "buildings_exp, buildings_imp_fun_set, buildings_haz = meas_buildings.apply(\n", + " exp_litpop, imp_fun_set, haz\n", + ")" ] }, { @@ -861,7 +874,7 @@ } ], "source": [ - "haz.plot_rp_intensity(return_periods=(5, 20));\n", + "haz.plot_rp_intensity(return_periods=(5, 20))\n", "buildings_haz.plot_rp_intensity(return_periods=(5, 20));" ] }, @@ -906,8 +919,8 @@ "source": [ "from climada.entity import DiscRates\n", "\n", - "years=np.arange(1950, 2101)\n", - "rates=np.ones(years.size) * 0.02\n", + "years = np.arange(1950, 2101)\n", + "rates = np.ones(years.size) * 0.02\n", "disc = DiscRates(years=years, rates=rates)\n", "disc.check()\n", "disc.plot()" @@ -941,7 +954,7 @@ " exposures=exp_litpop,\n", " disc_rates=disc,\n", " impact_func_set=imp_fun_set,\n", - " measure_set=meas_set\n", + " measure_set=meas_set,\n", ")" ] }, @@ -1030,10 +1043,10 @@ } ], "source": [ - "freq_curve = imp.calc_freq_curve() # impact exceedance frequency curve\n", - "freq_curve.plot();\n", + "freq_curve = imp.calc_freq_curve() # impact exceedance frequency curve\n", + "freq_curve.plot()\n", "\n", - "print('Expected average annual impact: {:.3e} USD'.format(imp.aai_agg))" + "print(\"Expected average annual impact: {:.3e} USD\".format(imp.aai_agg))" ] }, { @@ -1071,7 +1084,7 @@ } ], "source": [ - "imp.plot_basemap_eai_exposure(buffer=0.1); # average annual impact at each exposure" + "imp.plot_basemap_eai_exposure(buffer=0.1); # average annual impact at each exposure" ] }, { @@ -1186,9 +1199,12 @@ "from climada.engine import CostBenefit\n", "\n", "cost_ben = CostBenefit()\n", - "cost_ben.calc(haz, ent, future_year=2040) # prints costs and benefits\n", - "cost_ben.plot_cost_benefit(); # plot cost benefit ratio and averted damage of every exposure\n", - "cost_ben.plot_event_view(return_per=(10, 20, 40)); # plot averted damage of each measure for every return period" + "cost_ben.calc(haz, ent, future_year=2040) # prints costs and benefits\n", + "cost_ben.plot_cost_benefit()\n", + "# plot cost benefit ratio and averted damage of every exposure\n", + "cost_ben.plot_event_view(\n", + " return_per=(10, 20, 40)\n", + "); # plot averted damage of each measure for every return period" ] }, { diff --git a/doc/tutorial/climada_engine_CostBenefit.ipynb b/doc/tutorial/climada_engine_CostBenefit.ipynb index 514bceb9e..de98c7926 100644 --- a/doc/tutorial/climada_engine_CostBenefit.ipynb +++ b/doc/tutorial/climada_engine_CostBenefit.ipynb @@ -257,15 +257,23 @@ "\n", "client = Client()\n", "future_year = 2080\n", - "haz_present = client.get_hazard('tropical_cyclone',\n", - " properties={'country_name': 'Haiti',\n", - " 'climate_scenario': 'historical',\n", - " 'nb_synth_tracks':'10'})\n", - "haz_future = client.get_hazard('tropical_cyclone',\n", - " properties={'country_name': 'Haiti',\n", - " 'climate_scenario': 'rcp60',\n", - " 'ref_year': str(future_year),\n", - " 'nb_synth_tracks':'10'})\n" + "haz_present = client.get_hazard(\n", + " \"tropical_cyclone\",\n", + " properties={\n", + " \"country_name\": \"Haiti\",\n", + " \"climate_scenario\": \"historical\",\n", + " \"nb_synth_tracks\": \"10\",\n", + " },\n", + ")\n", + "haz_future = client.get_hazard(\n", + " \"tropical_cyclone\",\n", + " properties={\n", + " \"country_name\": \"Haiti\",\n", + " \"climate_scenario\": \"rcp60\",\n", + " \"ref_year\": str(future_year),\n", + " \"nb_synth_tracks\": \"10\",\n", + " },\n", + ")" ] }, { @@ -366,7 +374,7 @@ } ], "source": [ - "exp_present = client.get_litpop(country='Haiti')" + "exp_present = client.get_litpop(country=\"Haiti\")" ] }, { @@ -388,8 +396,8 @@ "exp_future.ref_year = future_year\n", "n_years = exp_future.ref_year - exp_present.ref_year + 1\n", "growth_rate = 1.02\n", - "growth = growth_rate ** n_years\n", - "exp_future.gdf['value'] = exp_future.gdf['value'] * growth" + "growth = growth_rate**n_years\n", + "exp_future.gdf[\"value\"] = exp_future.gdf[\"value\"] * growth" ] }, { @@ -517,8 +525,8 @@ "source": [ "# This would be done automatically in Impact calculations\n", "# but it's better to do it explicitly before the calculation\n", - "exp_present.assign_centroids(haz_present, distance='approx')\n", - "exp_future.assign_centroids(haz_future, distance='approx')" + "exp_present.assign_centroids(haz_present, distance=\"approx\")\n", + "exp_future.assign_centroids(haz_future, distance=\"approx\")" ] }, { @@ -592,9 +600,9 @@ "# This is more out of politeness, since if there's only one impact function\n", "# and one `impf_` column, CLIMADA can figure it out\n", "exp_present.gdf.rename(columns={\"impf_\": \"impf_TC\"}, inplace=True)\n", - "exp_present.gdf['impf_TC'] = 1\n", + "exp_present.gdf[\"impf_TC\"] = 1\n", "exp_future.gdf.rename(columns={\"impf_\": \"impf_TC\"}, inplace=True)\n", - "exp_future.gdf['impf_TC'] = 1" + "exp_future.gdf[\"impf_TC\"] = 1" ] }, { @@ -619,20 +627,20 @@ "from climada.entity.measures import Measure, MeasureSet\n", "\n", "meas_1 = Measure(\n", - " haz_type='TC',\n", - " name='Measure A',\n", + " haz_type=\"TC\",\n", + " name=\"Measure A\",\n", " color_rgb=np.array([0.8, 0.1, 0.1]),\n", " cost=5000000000,\n", - " hazard_inten_imp=(1, -5), # Decrease wind speeds by 5 m/s\n", + " hazard_inten_imp=(1, -5), # Decrease wind speeds by 5 m/s\n", " risk_transf_cover=0,\n", ")\n", "\n", "meas_2 = Measure(\n", - " haz_type='TC',\n", - " name='Measure B',\n", + " haz_type=\"TC\",\n", + " name=\"Measure B\",\n", " color_rgb=np.array([0.1, 0.1, 0.8]),\n", " cost=220000000,\n", - " paa_impact=(1, -0.10), # 10% fewer assets affected\n", + " paa_impact=(1, -0.10), # 10% fewer assets affected\n", ")\n", "\n", "# gather all measures\n", @@ -684,10 +692,18 @@ "source": [ "from climada.entity import Entity\n", "\n", - "entity_present = Entity(exposures=exp_present, disc_rates=discount_zero,\n", - " impact_func_set=impf_set, measure_set=meas_set)\n", - "entity_future = Entity(exposures=exp_future, disc_rates=discount_zero,\n", - " impact_func_set=impf_set, measure_set=meas_set)" + "entity_present = Entity(\n", + " exposures=exp_present,\n", + " disc_rates=discount_zero,\n", + " impact_func_set=impf_set,\n", + " measure_set=meas_set,\n", + ")\n", + "entity_future = Entity(\n", + " exposures=exp_future,\n", + " disc_rates=discount_zero,\n", + " impact_func_set=impf_set,\n", + " measure_set=meas_set,\n", + ")" ] }, { @@ -737,8 +753,16 @@ "from climada.engine.cost_benefit import risk_aai_agg\n", "\n", "costben_measures_only = CostBenefit()\n", - "costben_measures_only.calc(haz_present, entity_present, haz_future=None, ent_future=None,\n", - " future_year=future_year, risk_func=risk_aai_agg, imp_time_depen=None, save_imp=True)" + "costben_measures_only.calc(\n", + " haz_present,\n", + " entity_present,\n", + " haz_future=None,\n", + " ent_future=None,\n", + " future_year=future_year,\n", + " risk_func=risk_aai_agg,\n", + " imp_time_depen=None,\n", + " save_imp=True,\n", + ")" ] }, { @@ -783,10 +807,12 @@ } ], "source": [ - "combined_costben = costben_measures_only.combine_measures(['Measure A', 'Measure B'],\n", - " 'Combined measures',\n", - " new_color=np.array([0.1, 0.8, 0.8]),\n", - " disc_rates=discount_zero)" + "combined_costben = costben_measures_only.combine_measures(\n", + " [\"Measure A\", \"Measure B\"],\n", + " \"Combined measures\",\n", + " new_color=np.array([0.1, 0.8, 0.8]),\n", + " disc_rates=discount_zero,\n", + ")" ] }, { @@ -877,8 +903,16 @@ ], "source": [ "costben = CostBenefit()\n", - "costben.calc(haz_present, entity_present, haz_future=haz_future, ent_future=entity_future,\n", - " future_year=future_year, risk_func=risk_aai_agg, imp_time_depen=1, save_imp=True)" + "costben.calc(\n", + " haz_present,\n", + " entity_present,\n", + " haz_future=haz_future,\n", + " ent_future=entity_future,\n", + " future_year=future_year,\n", + " risk_func=risk_aai_agg,\n", + " imp_time_depen=1,\n", + " save_imp=True,\n", + ")" ] }, { @@ -939,8 +973,10 @@ "source": [ "# define this as a function because we'll use it again later\n", "def waterfall():\n", - " return costben.plot_waterfall(haz_present, entity_present, haz_future, entity_future,\n", - " risk_func=risk_aai_agg)\n", + " return costben.plot_waterfall(\n", + " haz_present, entity_present, haz_future, entity_future, risk_func=risk_aai_agg\n", + " )\n", + "\n", "\n", "ax = waterfall()" ] @@ -992,8 +1028,15 @@ } ], "source": [ - "costben.plot_arrow_averted(axis = waterfall(), in_meas_names=['Measure A', 'Measure B'], accumulate=True, combine=False,\n", - " risk_func=risk_aai_agg, disc_rates=None, imp_time_depen=1)" + "costben.plot_arrow_averted(\n", + " axis=waterfall(),\n", + " in_meas_names=[\"Measure A\", \"Measure B\"],\n", + " accumulate=True,\n", + " combine=False,\n", + " risk_func=risk_aai_agg,\n", + " disc_rates=None,\n", + " imp_time_depen=1,\n", + ")" ] }, { @@ -1025,10 +1068,18 @@ }, "outputs": [], "source": [ - "entity_present_disc = Entity(exposures=exp_present, disc_rates=discount_stern,\n", - " impact_func_set=impf_set, measure_set=meas_set)\n", - "entity_future_disc = Entity(exposures=exp_future, disc_rates=discount_stern,\n", - " impact_func_set=impf_set, measure_set=meas_set)" + "entity_present_disc = Entity(\n", + " exposures=exp_present,\n", + " disc_rates=discount_stern,\n", + " impact_func_set=impf_set,\n", + " measure_set=meas_set,\n", + ")\n", + "entity_future_disc = Entity(\n", + " exposures=exp_future,\n", + " disc_rates=discount_stern,\n", + " impact_func_set=impf_set,\n", + " measure_set=meas_set,\n", + ")" ] }, { @@ -1083,9 +1134,17 @@ ], "source": [ "costben_disc = CostBenefit()\n", - "costben_disc.calc(haz_present, entity_present_disc, haz_future=haz_future, ent_future=entity_future_disc,\n", - " future_year=future_year, risk_func=risk_aai_agg, imp_time_depen=1, save_imp=True)\n", - "print(costben_disc.imp_meas_future['no measure']['impact'].imp_mat.shape)" + "costben_disc.calc(\n", + " haz_present,\n", + " entity_present_disc,\n", + " haz_future=haz_future,\n", + " ent_future=entity_future_disc,\n", + " future_year=future_year,\n", + " risk_func=risk_aai_agg,\n", + " imp_time_depen=1,\n", + " save_imp=True,\n", + ")\n", + "print(costben_disc.imp_meas_future[\"no measure\"][\"impact\"].imp_mat.shape)" ] }, { @@ -1194,18 +1253,22 @@ } ], "source": [ - "combined_costben_disc = costben_disc.combine_measures(['Measure A', 'Measure B'],\n", - " 'Combined measures',\n", - " new_color=np.array([0.1, 0.8, 0.8]),\n", - " disc_rates=discount_stern)\n", - "efc_present = costben_disc.imp_meas_present['no measure']['efc']\n", - "efc_future = costben_disc.imp_meas_future['no measure']['efc']\n", - "efc_combined_measures = combined_costben_disc.imp_meas_future['Combined measures']['efc']\n", + "combined_costben_disc = costben_disc.combine_measures(\n", + " [\"Measure A\", \"Measure B\"],\n", + " \"Combined measures\",\n", + " new_color=np.array([0.1, 0.8, 0.8]),\n", + " disc_rates=discount_stern,\n", + ")\n", + "efc_present = costben_disc.imp_meas_present[\"no measure\"][\"efc\"]\n", + "efc_future = costben_disc.imp_meas_future[\"no measure\"][\"efc\"]\n", + "efc_combined_measures = combined_costben_disc.imp_meas_future[\"Combined measures\"][\n", + " \"efc\"\n", + "]\n", "\n", "ax = plt.subplot(1, 1, 1)\n", - "efc_present.plot(axis=ax, color='blue', label='Present')\n", - "efc_future.plot(axis=ax, color='orange', label='Future, unadapted')\n", - "efc_combined_measures.plot(axis=ax, color='green', label='Future, adapted')\n", + "efc_present.plot(axis=ax, color=\"blue\", label=\"Present\")\n", + "efc_future.plot(axis=ax, color=\"orange\", label=\"Future, unadapted\")\n", + "efc_combined_measures.plot(axis=ax, color=\"green\", label=\"Future, adapted\")\n", "leg = ax.legend()" ] }, diff --git a/doc/tutorial/climada_engine_Forecast.ipynb b/doc/tutorial/climada_engine_Forecast.ipynb index 74cbd00f8..29c9a5930 100644 --- a/doc/tutorial/climada_engine_Forecast.ipynb +++ b/doc/tutorial/climada_engine_Forecast.ipynb @@ -42,12 +42,12 @@ "metadata": {}, "outputs": [], "source": [ - "#generate hazard\n", + "# generate hazard\n", "hazard, haz_model, run_datetime, event_date = generate_WS_forecast_hazard()\n", "# generate hazard with forecasts from past dates (works only if the files have already been downloaded)\n", "# hazard, haz_model, run_datetime, event_date = generate_WS_forecast_hazard(\n", "# run_datetime=datetime(2022,5,17),\n", - "# event_date=datetime(2022,5,19)) " + "# event_date=datetime(2022,5,19))" ] }, { @@ -56,7 +56,7 @@ "metadata": {}, "outputs": [], "source": [ - "#generate vulnerability\n", + "# generate vulnerability\n", "impact_function = ImpfStormEurope.from_welker()\n", "impact_function_set = ImpactFuncSet([impact_function])" ] @@ -67,12 +67,12 @@ "metadata": {}, "outputs": [], "source": [ - "#generate exposure and save to file\n", - "filename_exp = CONFIG.local_data.save_dir.dir() / ('exp_litpop_Switzerland.hdf5')\n", + "# generate exposure and save to file\n", + "filename_exp = CONFIG.local_data.save_dir.dir() / (\"exp_litpop_Switzerland.hdf5\")\n", "if filename_exp.exists():\n", " exposure = LitPop.from_hdf5(filename_exp)\n", "else:\n", - " exposure = LitPop.from_countries('Switzerland', reference_year=2020)\n", + " exposure = LitPop.from_countries(\"Switzerland\", reference_year=2020)\n", " exposure.write_hdf5(filename_exp)" ] }, @@ -82,7 +82,7 @@ "metadata": {}, "outputs": [], "source": [ - "#create and calculate Forecast\n", + "# create and calculate Forecast\n", "CH_WS_forecast = Forecast({run_datetime: hazard}, exposure, impact_function_set)\n", "CH_WS_forecast.calc()" ] @@ -106,7 +106,7 @@ } ], "source": [ - "CH_WS_forecast.plot_imp_map(save_fig=False,close_fig=False,proj=ccrs.epsg(2056));" + "CH_WS_forecast.plot_imp_map(save_fig=False, close_fig=False, proj=ccrs.epsg(2056));" ] }, { @@ -135,7 +135,7 @@ } ], "source": [ - "CH_WS_forecast.plot_hist(save_fig=False,close_fig=False);" + "CH_WS_forecast.plot_hist(save_fig=False, close_fig=False);" ] }, { @@ -164,7 +164,9 @@ } ], "source": [ - "CH_WS_forecast.plot_exceedence_prob(threshold=5000, save_fig=False, close_fig=False,proj=ccrs.epsg(2056));" + "CH_WS_forecast.plot_exceedence_prob(\n", + " threshold=5000, save_fig=False, close_fig=False, proj=ccrs.epsg(2056)\n", + ");" ] }, { @@ -198,31 +200,30 @@ "from climada.util.config import CONFIG\n", "\n", "\n", - "#create a file containing the polygons of Swiss cantons using natural earth\n", - "cantons_file = CONFIG.local_data.save_dir.dir() / 'cantons.shp'\n", - "adm1_shape_file = shapereader.natural_earth(resolution='10m',\n", - " category='cultural',\n", - " name='admin_1_states_provinces')\n", + "# create a file containing the polygons of Swiss cantons using natural earth\n", + "cantons_file = CONFIG.local_data.save_dir.dir() / \"cantons.shp\"\n", + "adm1_shape_file = shapereader.natural_earth(\n", + " resolution=\"10m\", category=\"cultural\", name=\"admin_1_states_provinces\"\n", + ")\n", "if not cantons_file.exists():\n", - " with fiona.open(adm1_shape_file, 'r') as source:\n", - " with fiona.open(\n", - " cantons_file, 'w',\n", - " **source.meta) as sink:\n", + " with fiona.open(adm1_shape_file, \"r\") as source:\n", + " with fiona.open(cantons_file, \"w\", **source.meta) as sink:\n", "\n", " for f in source:\n", - " if f['properties']['adm0_a3'] == 'CHE':\n", + " if f[\"properties\"][\"adm0_a3\"] == \"CHE\":\n", " sink.write(f)\n", - "CH_WS_forecast.plot_warn_map(str(cantons_file),\n", - " decision_level = 'polygon',\n", - " thresholds=[100000,500000,\n", - " 1000000,5000000],\n", - " probability_aggregation='mean',\n", - " area_aggregation='sum',\n", - " title=\"Building damage warning\",\n", - " explain_text=\"warn level based on aggregated damages\",\n", - " save_fig=False,\n", - " close_fig=False,\n", - " proj=ccrs.epsg(2056));" + "CH_WS_forecast.plot_warn_map(\n", + " str(cantons_file),\n", + " decision_level=\"polygon\",\n", + " thresholds=[100000, 500000, 1000000, 5000000],\n", + " probability_aggregation=\"mean\",\n", + " area_aggregation=\"sum\",\n", + " title=\"Building damage warning\",\n", + " explain_text=\"warn level based on aggregated damages\",\n", + " save_fig=False,\n", + " close_fig=False,\n", + " proj=ccrs.epsg(2056),\n", + ");" ] }, { @@ -255,43 +256,43 @@ "\n", "### generate exposure\n", "# find out which hazard coord to consider\n", - "CHE_borders = u_plot._get_borders(np.stack([exposure.gdf['latitude'].values,\n", - " exposure.gdf['longitude'].values],\n", - " axis=1)\n", - " )\n", - "centroid_selection = np.logical_and(np.logical_and(hazard.centroids.lat >= CHE_borders[2],\n", - " hazard.centroids.lat <= CHE_borders[3]),\n", - " np.logical_and(hazard.centroids.lon >= CHE_borders[0],\n", - " hazard.centroids.lon <= CHE_borders[1])\n", - " )\n", + "CHE_borders = u_plot._get_borders(\n", + " np.stack(\n", + " [exposure.gdf[\"latitude\"].values, exposure.gdf[\"longitude\"].values], axis=1\n", + " )\n", + ")\n", + "centroid_selection = np.logical_and(\n", + " np.logical_and(\n", + " hazard.centroids.lat >= CHE_borders[2], hazard.centroids.lat <= CHE_borders[3]\n", + " ),\n", + " np.logical_and(\n", + " hazard.centroids.lon >= CHE_borders[0], hazard.centroids.lon <= CHE_borders[1]\n", + " ),\n", + ")\n", "# Fill DataFrame with values for a \"neutral\" exposure (value = 1)\n", "\n", "exp_df = DataFrame()\n", - "exp_df['value'] = np.ones_like(hazard.centroids.lat[centroid_selection]) # provide value\n", - "exp_df['latitude'] = hazard.centroids.lat[centroid_selection]\n", - "exp_df['longitude'] = hazard.centroids.lon[centroid_selection]\n", - "exp_df['impf_WS'] = np.ones_like(hazard.centroids.lat[centroid_selection], int)\n", + "exp_df[\"value\"] = np.ones_like(\n", + " hazard.centroids.lat[centroid_selection]\n", + ") # provide value\n", + "exp_df[\"latitude\"] = hazard.centroids.lat[centroid_selection]\n", + "exp_df[\"longitude\"] = hazard.centroids.lon[centroid_selection]\n", + "exp_df[\"impf_WS\"] = np.ones_like(hazard.centroids.lat[centroid_selection], int)\n", "# Generate Exposures\n", "exp = Exposures(exp_df)\n", "exp.check()\n", - "exp.value_unit = 'warn_level'\n", + "exp.value_unit = \"warn_level\"\n", "\n", "### generate impact functions\n", "## impact functions for hazard based warnings\n", - "haz_type = 'WS'\n", + "haz_type = \"WS\"\n", "idx = 1\n", - "name = 'warn_level_low_elevation'\n", - "intensity_unit = 'm/s'\n", - "intensity = np.array([0.0, 19.439, \n", - " 19.44, 24.999, \n", - " 25.0, 30.549, \n", - " 30.55, 38.879, \n", - " 38.88, 100.0])\n", - "mdd = np.array([1.0, 1.0, \n", - " 2.0, 2.0, \n", - " 3.0, 3.0, \n", - " 4.0, 4.0, \n", - " 5.0, 5.0])\n", + "name = \"warn_level_low_elevation\"\n", + "intensity_unit = \"m/s\"\n", + "intensity = np.array(\n", + " [0.0, 19.439, 19.44, 24.999, 25.0, 30.549, 30.55, 38.879, 38.88, 100.0]\n", + ")\n", + "mdd = np.array([1.0, 1.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0, 5.0, 5.0])\n", "paa = np.ones_like(mdd)\n", "imp_fun_low = ImpactFunc(haz_type, idx, intensity, mdd, paa, intensity_unit, name)\n", "imp_fun_low.check()\n", @@ -305,7 +306,7 @@ "metadata": {}, "outputs": [], "source": [ - "#create and calculate Forecast\n", + "# create and calculate Forecast\n", "warn_forecast = Forecast({run_datetime: hazard}, exp, impf_set)\n", "warn_forecast.calc()" ] @@ -336,16 +337,18 @@ } ], "source": [ - "warn_forecast.plot_warn_map(cantons_file,\n", - " thresholds=[2,3,4,5],\n", - " decision_level = 'exposure_point',\n", - " probability_aggregation=0.5,\n", - " area_aggregation=0.5,\n", - " title=\"DWD ICON METEOROLOGICAL WARNING\",\n", - " explain_text=\"warn level based on wind gust thresholds\",\n", - " save_fig=False,\n", - " close_fig=False,\n", - " proj=ccrs.epsg(2056));" + "warn_forecast.plot_warn_map(\n", + " cantons_file,\n", + " thresholds=[2, 3, 4, 5],\n", + " decision_level=\"exposure_point\",\n", + " probability_aggregation=0.5,\n", + " area_aggregation=0.5,\n", + " title=\"DWD ICON METEOROLOGICAL WARNING\",\n", + " explain_text=\"warn level based on wind gust thresholds\",\n", + " save_fig=False,\n", + " close_fig=False,\n", + " proj=ccrs.epsg(2056),\n", + ");" ] }, { @@ -390,4 +393,4 @@ }, "nbformat": 4, "nbformat_minor": 4 -} \ No newline at end of file +} diff --git a/doc/tutorial/climada_engine_Impact.ipynb b/doc/tutorial/climada_engine_Impact.ipynb index bbe55afd6..b6ea21cd8 100644 --- a/doc/tutorial/climada_engine_Impact.ipynb +++ b/doc/tutorial/climada_engine_Impact.ipynb @@ -329,7 +329,9 @@ "from climada.entity import LitPop\n", "\n", "# Cuba with resolution 10km and financial_mode = income group.\n", - "exp_lp = LitPop.from_countries(countries=['CUB'], res_arcsec=300, fin_mode='income_group')\n", + "exp_lp = LitPop.from_countries(\n", + " countries=[\"CUB\"], res_arcsec=300, fin_mode=\"income_group\"\n", + ")\n", "exp_lp.check()" ] }, @@ -492,7 +494,7 @@ "# not needed for impact calculations\n", "# visualize the define exposure\n", "exp_lp.plot_raster()\n", - "print('\\n Raster properties exposures:', exp_lp.meta)" + "print(\"\\n Raster properties exposures:\", exp_lp.meta)" ] }, { @@ -540,13 +542,17 @@ "from climada.hazard import TCTracks, TropCyclone, Centroids\n", "\n", "# Load histrocial tropical cyclone tracks from ibtracs over the North Atlantic basin between 2010-2012\n", - "ibtracks_na = TCTracks.from_ibtracs_netcdf(provider='usa', basin='NA', year_range=(2010, 2012), correct_pres=True)\n", - "print('num tracks hist:', ibtracks_na.size)\n", + "ibtracks_na = TCTracks.from_ibtracs_netcdf(\n", + " provider=\"usa\", basin=\"NA\", year_range=(2010, 2012), correct_pres=True\n", + ")\n", + "print(\"num tracks hist:\", ibtracks_na.size)\n", "\n", - "ibtracks_na.equal_timestep(0.5) # Interpolation to make the track smooth and to allow applying calc_perturbed_trajectories\n", + "ibtracks_na.equal_timestep(\n", + " 0.5\n", + ") # Interpolation to make the track smooth and to allow applying calc_perturbed_trajectories\n", "# Add randomly generated tracks using the calc_perturbed_trajectories method (1 per historical track)\n", "ibtracks_na.calc_perturbed_trajectories(nb_synth_tracks=1)\n", - "print('num tracks hist+syn:', ibtracks_na.size)" + "print(\"num tracks hist+syn:\", ibtracks_na.size)" ] }, { @@ -620,8 +626,8 @@ "outputs": [], "source": [ "# Define the centroids from the exposures position\n", - "lat = exp_lp.gdf['latitude'].values\n", - "lon = exp_lp.gdf['longitude'].values\n", + "lat = exp_lp.gdf[\"latitude\"].values\n", + "lon = exp_lp.gdf[\"longitude\"].values\n", "centrs = Centroids.from_lat_lon(lat, lon)\n", "centrs.check()" ] @@ -702,6 +708,7 @@ "outputs": [], "source": [ "from climada.entity import ImpactFuncSet, ImpfTropCyclone\n", + "\n", "# impact function TC\n", "impf_tc = ImpfTropCyclone.from_emanuel_usa()\n", "\n", @@ -865,7 +872,7 @@ "source": [ "# Exposures: rename column and assign id\n", "exp_lp.gdf.rename(columns={\"impf_\": \"impf_\" + haz_type}, inplace=True)\n", - "exp_lp.gdf['impf_' + haz_type] = haz_id\n", + "exp_lp.gdf[\"impf_\" + haz_type] = haz_id\n", "exp_lp.check()\n", "exp_lp.gdf.head()" ] @@ -910,7 +917,10 @@ "source": [ "# Compute impact\n", "from climada.engine import ImpactCalc\n", - "imp = ImpactCalc(exp_lp, impf_set, tc).impact(save_mat=False) # Do not save the results geographically resolved (only aggregate values)" + "\n", + "imp = ImpactCalc(exp_lp, impf_set, tc).impact(\n", + " save_mat=False\n", + ") # Do not save the results geographically resolved (only aggregate values)" ] }, { @@ -1215,25 +1225,27 @@ "from datetime import datetime, date\n", "import pandas as pd\n", "\n", - "#set a harvest date\n", - "harvest_DOY=290 #17 October\n", + "# set a harvest date\n", + "harvest_DOY = 290 # 17 October\n", "\n", - "#loop over all events an check if they happened before or after harvest\n", - "event_ids_post_harvest=[]\n", - "event_ids_pre_harvest=[]\n", + "# loop over all events an check if they happened before or after harvest\n", + "event_ids_post_harvest = []\n", + "event_ids_pre_harvest = []\n", "for event_id in tc.event_id:\n", - " event_date = tc.date[np.where(tc.event_id==event_id)[0][0]]\n", - " day_of_year = event_date - date(datetime.fromordinal(event_date).year, 1, 1).toordinal() + 1\n", + " event_date = tc.date[np.where(tc.event_id == event_id)[0][0]]\n", + " day_of_year = (\n", + " event_date - date(datetime.fromordinal(event_date).year, 1, 1).toordinal() + 1\n", + " )\n", "\n", - " if day_of_year > harvest_DOY:\n", - " event_ids_post_harvest.append(event_id)\n", - " else:\n", - " event_ids_pre_harvest.append(event_id)\n", + " if day_of_year > harvest_DOY:\n", + " event_ids_post_harvest.append(event_id)\n", + " else:\n", + " event_ids_pre_harvest.append(event_id)\n", "\n", - "tc_post_harvest=tc.select(event_id=event_ids_post_harvest)\n", - "tc_pre_harvest=tc.select(event_id=event_ids_pre_harvest)\n", - "#print('pre-harvest:', tc_pre_harvest.event_name)\n", - "#print('post-harvest:', tc_post_harvest.event_name)" + "tc_post_harvest = tc.select(event_id=event_ids_post_harvest)\n", + "tc_pre_harvest = tc.select(event_id=event_ids_pre_harvest)\n", + "# print('pre-harvest:', tc_pre_harvest.event_name)\n", + "# print('post-harvest:', tc_post_harvest.event_name)" ] }, { @@ -1285,18 +1297,19 @@ ], "source": [ "from climada.engine import Impact\n", + "\n", "# impact function TC\n", "impf_tc = ImpfTropCyclone.from_emanuel_usa()\n", "# impact function TC after harvest is by factor 0.5 smaller\n", "impf_tc_posth = ImpfTropCyclone.from_emanuel_usa()\n", - "impf_tc_posth.mdd = impf_tc.mdd*0.1\n", + "impf_tc_posth.mdd = impf_tc.mdd * 0.1\n", "# add the impact function to an Impact function set\n", "impf_set = ImpactFuncSet([impf_tc])\n", "impf_set_posth = ImpactFuncSet([impf_tc_posth])\n", "impf_set.check()\n", "impf_set_posth.check()\n", "\n", - "#plot\n", + "# plot\n", "impf_set.plot()\n", "impf_set_posth.plot()\n", "\n", @@ -1360,16 +1373,17 @@ ], "source": [ "# Concatenate impacts again\n", - "imp_tot = Impact.concat([imp_preh,imp_posth])\n", + "imp_tot = Impact.concat([imp_preh, imp_posth])\n", "\n", - "#plot result\n", + "# plot result\n", "import matplotlib.pyplot as plt\n", - "ax=imp_preh.plot_hexbin_eai_exposure(gridsize=100,adapt_fontsize=False)\n", - "ax.set_title('Expected annual impact: Pre-Harvest')\n", - "ax=imp_posth.plot_hexbin_eai_exposure(gridsize=100,adapt_fontsize=False)\n", - "ax.set_title('Expected annual impact: Post-Harvest')\n", - "ax=imp_tot.plot_hexbin_eai_exposure(gridsize=100,adapt_fontsize=False)\n", - "ax.set_title('Expected annual impact: Total')\n" + "\n", + "ax = imp_preh.plot_hexbin_eai_exposure(gridsize=100, adapt_fontsize=False)\n", + "ax.set_title(\"Expected annual impact: Pre-Harvest\")\n", + "ax = imp_posth.plot_hexbin_eai_exposure(gridsize=100, adapt_fontsize=False)\n", + "ax.set_title(\"Expected annual impact: Post-Harvest\")\n", + "ax = imp_tot.plot_hexbin_eai_exposure(gridsize=100, adapt_fontsize=False)\n", + "ax.set_title(\"Expected annual impact: Total\")" ] }, { @@ -1459,22 +1473,34 @@ "from climada.engine import ImpactCalc\n", "\n", "# Set Exposures in points\n", - "exp_pnt = Exposures(crs='epsg:4326') #set coordinate system\n", - "exp_pnt.gdf['latitude'] = np.array([21.899326, 21.960728, 22.220574, 22.298390, 21.787977, 21.787977, 21.981732])\n", - "exp_pnt.gdf['longitude'] = np.array([88.307422, 88.565362, 88.378337, 87.806356, 88.348835, 88.348835, 89.246521])\n", - "exp_pnt.gdf['value'] = np.array([1.0e5, 1.2e5, 1.1e5, 1.1e5, 2.0e5, 2.5e5, 0.5e5])\n", + "exp_pnt = Exposures(crs=\"epsg:4326\") # set coordinate system\n", + "exp_pnt.gdf[\"latitude\"] = np.array(\n", + " [21.899326, 21.960728, 22.220574, 22.298390, 21.787977, 21.787977, 21.981732]\n", + ")\n", + "exp_pnt.gdf[\"longitude\"] = np.array(\n", + " [88.307422, 88.565362, 88.378337, 87.806356, 88.348835, 88.348835, 89.246521]\n", + ")\n", + "exp_pnt.gdf[\"value\"] = np.array([1.0e5, 1.2e5, 1.1e5, 1.1e5, 2.0e5, 2.5e5, 0.5e5])\n", "exp_pnt.check()\n", "exp_pnt.plot_scatter(buffer=0.05)\n", "\n", "# Set Hazard in Exposures points\n", "# set centroids from exposures coordinates\n", - "centr_pnt = Centroids.from_lat_lon(exp_pnt.gdf['latitude'].values, exp_pnt.gdf['longitude'].values, exp_pnt.crs)\n", + "centr_pnt = Centroids.from_lat_lon(\n", + " exp_pnt.gdf[\"latitude\"].values, exp_pnt.gdf[\"longitude\"].values, exp_pnt.crs\n", + ")\n", "# compute Hazard in that centroids\n", - "tr_pnt = TCTracks.from_ibtracs_netcdf(storm_id='2007314N10093')\n", + "tr_pnt = TCTracks.from_ibtracs_netcdf(storm_id=\"2007314N10093\")\n", "tc_pnt = TropCyclone.from_tracks(tr_pnt, centroids=centr_pnt)\n", "tc_pnt.check()\n", - "ax_pnt = tc_pnt.centroids.plot(c=np.array(tc_pnt.intensity[0,:].todense()).squeeze()) # plot intensity per point\n", - "ax_pnt.get_figure().colorbar(ax_pnt.collections[0], fraction=0.0175, pad=0.02).set_label('Intensity (m/s)') # add colorbar\n", + "ax_pnt = tc_pnt.centroids.plot(\n", + " c=np.array(tc_pnt.intensity[0, :].todense()).squeeze()\n", + ") # plot intensity per point\n", + "ax_pnt.get_figure().colorbar(\n", + " ax_pnt.collections[0], fraction=0.0175, pad=0.02\n", + ").set_label(\n", + " \"Intensity (m/s)\"\n", + ") # add colorbar\n", "\n", "# Set impact function\n", "impf_tc = ImpfTropCyclone.from_emanuel_usa()\n", @@ -1486,13 +1512,16 @@ "[haz_id] = impf_set.get_ids()[haz_type]\n", "# Exposures: rename column and assign id\n", "exp_lp.gdf.rename(columns={\"impf_\": \"impf_\" + haz_type}, inplace=True)\n", - "exp_lp.gdf['impf_' + haz_type] = haz_id\n", + "exp_lp.gdf[\"impf_\" + haz_type] = haz_id\n", "exp_lp.gdf.head()\n", "\n", "# Compute Impact\n", "imp_pnt = ImpactCalc(exp_pnt, impf_pnt, tc_pnt).impact()\n", "# nearest neighbor of exposures to centroids gives identity\n", - "print('Nearest neighbor hazard.centroids indexes for each exposure:', exp_pnt.gdf['centr_TC'].values)\n", + "print(\n", + " \"Nearest neighbor hazard.centroids indexes for each exposure:\",\n", + " exp_pnt.gdf[\"centr_TC\"].values,\n", + ")\n", "imp_pnt.plot_scatter_eai_exposure(ignore_zero=False, buffer=0.05);" ] }, @@ -1680,24 +1709,32 @@ "from climada.util.constants import HAZ_DEMO_FL\n", "\n", "# Exposures belonging to a raster (the raser information is contained in the meta attribute)\n", - "exp_ras = LitPop.from_countries(countries=['VEN'], res_arcsec=300, fin_mode='income_group')\n", + "exp_ras = LitPop.from_countries(\n", + " countries=[\"VEN\"], res_arcsec=300, fin_mode=\"income_group\"\n", + ")\n", "exp_ras.gdf.reset_index()\n", "exp_ras.check()\n", "exp_ras.plot_raster()\n", - "print('\\n Raster properties exposures:', exp_ras.meta)\n", + "print(\"\\n Raster properties exposures:\", exp_ras.meta)\n", "\n", "# Initialize hazard object with haz_type = 'FL' (for Flood)\n", - "hazard_type='FL'\n", + "hazard_type = \"FL\"\n", "# Load a previously generated (either with CLIMADA or other means) hazard\n", "# from file (HAZ_DEMO_FL) and resample the hazard raster to the exposures' ones\n", "# Hint: check how other resampling methods affect to final impact\n", - "haz_ras = Hazard.from_raster([HAZ_DEMO_FL], haz_type=hazard_type, dst_crs=exp_ras.meta['crs'], transform=exp_ras.meta['transform'],\n", - " width=exp_ras.meta['width'], height=exp_ras.meta['height'],\n", - " resampling=Resampling.nearest)\n", - "haz_ras.intensity[haz_ras.intensity==-9999] = 0 # correct no data values\n", + "haz_ras = Hazard.from_raster(\n", + " [HAZ_DEMO_FL],\n", + " haz_type=hazard_type,\n", + " dst_crs=exp_ras.meta[\"crs\"],\n", + " transform=exp_ras.meta[\"transform\"],\n", + " width=exp_ras.meta[\"width\"],\n", + " height=exp_ras.meta[\"height\"],\n", + " resampling=Resampling.nearest,\n", + ")\n", + "haz_ras.intensity[haz_ras.intensity == -9999] = 0 # correct no data values\n", "haz_ras.check()\n", "haz_ras.plot_intensity(1)\n", - "print('Raster properties centroids:', haz_ras.centroids.meta)\n", + "print(\"Raster properties centroids:\", haz_ras.centroids.meta)\n", "\n", "# Set dummy impact function\n", "intensity = np.linspace(0, 10, 100)\n", @@ -1710,13 +1747,16 @@ "\n", "# Exposures: rename column and assign id\n", "exp_lp.gdf.rename(columns={\"impf_\": \"impf_\" + hazard_type}, inplace=True)\n", - "exp_lp.gdf['impf_' + haz_type] = haz_id\n", + "exp_lp.gdf[\"impf_\" + haz_type] = haz_id\n", "exp_lp.gdf.head()\n", "\n", "# Compute impact\n", "imp_ras = ImpactCalc(exp_ras, impf_ras, haz_ras).impact(save_mat=False)\n", "# nearest neighbor of exposures to centroids is not identity because litpop does not contain data outside the country polygon\n", - "print('\\n Nearest neighbor hazard.centroids indexes for each exposure:', exp_ras.gdf['centr_FL'].values)\n", + "print(\n", + " \"\\n Nearest neighbor hazard.centroids indexes for each exposure:\",\n", + " exp_ras.gdf[\"centr_FL\"].values,\n", + ")\n", "imp_ras.plot_raster_eai_exposure();" ] }, @@ -1957,7 +1997,7 @@ "from climada_petals.entity import BlackMarble\n", "\n", "exp_video = BlackMarble()\n", - "exp_video.set_countries(['Cuba'], 2016, res_km=2.5)\n", + "exp_video.set_countries([\"Cuba\"], 2016, res_km=2.5)\n", "exp_video.check()\n", "\n", "# impact function\n", @@ -1967,19 +2007,23 @@ "\n", "# compute sequence of hazards using TropCyclone video_intensity method\n", "exp_sea = add_sea(exp_video, (100, 5))\n", - "centr_video = Centroids.from_lat_lon(exp_sea.gdf['latitude'].values, exp_sea.gdf['longitude'].values)\n", + "centr_video = Centroids.from_lat_lon(\n", + " exp_sea.gdf[\"latitude\"].values, exp_sea.gdf[\"longitude\"].values\n", + ")\n", "centr_video.check()\n", "\n", - "track_name = '2017242N16333'\n", - "tr_irma = TCTracks.from_ibtracs_netcdf(provider='usa', storm_id=track_name) # IRMA 2017\n", + "track_name = \"2017242N16333\"\n", + "tr_irma = TCTracks.from_ibtracs_netcdf(provider=\"usa\", storm_id=track_name) # IRMA 2017\n", "\n", "tc_video = TropCyclone()\n", - "tc_list, _ = tc_video.video_intensity(track_name, tr_irma, centr_video) # empty file name to not to write the video\n", + "tc_list, _ = tc_video.video_intensity(\n", + " track_name, tr_irma, centr_video\n", + ") # empty file name to not to write the video\n", "\n", "# generate video of impacts\n", - "file_name='./results/irma_imp_fl.gif'\n", + "file_name = \"./results/irma_imp_fl.gif\"\n", "imp_video = Impact()\n", - "imp_list = imp_video.video_direct_impact(exp_video, impfs_video, tc_list, file_name)\n" + "imp_list = imp_video.video_direct_impact(exp_video, impfs_video, tc_list, file_name)" ] } ], diff --git a/doc/tutorial/climada_engine_impact_data.ipynb b/doc/tutorial/climada_engine_impact_data.ipynb index 443a6f414..40ead3d80 100644 --- a/doc/tutorial/climada_engine_impact_data.ipynb +++ b/doc/tutorial/climada_engine_impact_data.ipynb @@ -46,11 +46,15 @@ "from matplotlib import pyplot as plt\n", "\n", "from climada.util.constants import DEMO_DIR\n", - "from climada.engine.impact_data import emdat_countries_by_hazard, \\\n", - " emdat_impact_yearlysum, emdat_to_impact, clean_emdat_df\n", + "from climada.engine.impact_data import (\n", + " emdat_countries_by_hazard,\n", + " emdat_impact_yearlysum,\n", + " emdat_to_impact,\n", + " clean_emdat_df,\n", + ")\n", "\n", "# set path to CSV file downloaded from https://public.emdat.be :\n", - "emdat_file_path = DEMO_DIR.joinpath('demo_emdat_impact_data_2020.csv')" + "emdat_file_path = DEMO_DIR.joinpath(\"demo_emdat_impact_data_2020.csv\")" ] }, { @@ -129,8 +133,12 @@ "source": [ "\"\"\"Create DataFrame df with EM-DAT entries of tropical cyclones in Thailand and Viet Nam in the years 2005 and 2006\"\"\"\n", "\n", - "df = clean_emdat_df(emdat_file_path, countries=['THA', 'Viet Nam'], hazard=['TC'], \\\n", - " year_range=[2005, 2006])\n", + "df = clean_emdat_df(\n", + " emdat_file_path,\n", + " countries=[\"THA\", \"Viet Nam\"],\n", + " hazard=[\"TC\"],\n", + " year_range=[2005, 2006],\n", + ")\n", "print(df)" ] }, @@ -160,7 +168,9 @@ "source": [ "\"\"\"emdat_countries_by_hazard: get lists of countries impacted by tropical cyclones from 2010 to 2019\"\"\"\n", "\n", - "iso3_codes, country_names = emdat_countries_by_hazard(emdat_file_path, hazard='TC', year_range=(2010, 2019))\n", + "iso3_codes, country_names = emdat_countries_by_hazard(\n", + " emdat_file_path, hazard=\"TC\", year_range=(2010, 2019)\n", + ")\n", "\n", "print(country_names)\n", "\n", @@ -214,11 +224,18 @@ "source": [ "\"\"\"Global TC damages 2000 to 2009\"\"\"\n", "\n", - "impact_emdat, countries = emdat_to_impact(emdat_file_path, 'TC', year_range=(2000,2009))\n", - "\n", - "print('Number of TC events in EM-DAT 2000 to 2009 globally: %i' %(impact_emdat.event_id.size))\n", - "print('Global annual average monetary damage (AAI) from TCs as reported in EM-DAT 2000 to 2009: USD billion %2.2f' \\\n", - " %(impact_emdat.aai_agg/1e9))\n" + "impact_emdat, countries = emdat_to_impact(\n", + " emdat_file_path, \"TC\", year_range=(2000, 2009)\n", + ")\n", + "\n", + "print(\n", + " \"Number of TC events in EM-DAT 2000 to 2009 globally: %i\"\n", + " % (impact_emdat.event_id.size)\n", + ")\n", + "print(\n", + " \"Global annual average monetary damage (AAI) from TCs as reported in EM-DAT 2000 to 2009: USD billion %2.2f\"\n", + " % (impact_emdat.aai_agg / 1e9)\n", + ")" ] }, { @@ -267,26 +284,34 @@ "\"\"\"Total people affected by TCs in the Philippines in 2013:\"\"\"\n", "\n", "# People affected\n", - "impact_emdat_PHL, countries = emdat_to_impact(emdat_file_path, 'TC', countries='PHL', \\\n", - " year_range=(2013,2013), imp_str=\"Total Affected\")\n", - "\n", - "print('Number of TC events in EM-DAT in the Philipppines, 2013: %i' \\\n", - " %(impact_emdat_PHL.event_id.size))\n", - "print('\\nPeople affected by TC events in the Philippines in 2013 (per event):')\n", + "impact_emdat_PHL, countries = emdat_to_impact(\n", + " emdat_file_path,\n", + " \"TC\",\n", + " countries=\"PHL\",\n", + " year_range=(2013, 2013),\n", + " imp_str=\"Total Affected\",\n", + ")\n", + "\n", + "print(\n", + " \"Number of TC events in EM-DAT in the Philipppines, 2013: %i\"\n", + " % (impact_emdat_PHL.event_id.size)\n", + ")\n", + "print(\"\\nPeople affected by TC events in the Philippines in 2013 (per event):\")\n", "print(impact_emdat_PHL.at_event)\n", - "print('\\nPeople affected by TC events in the Philippines in 2013 (total):')\n", + "print(\"\\nPeople affected by TC events in the Philippines in 2013 (total):\")\n", "print(int(impact_emdat_PHL.aai_agg))\n", "\n", "# Comparison to monetary damages:\n", - "impact_emdat_PHL_USD, _ = emdat_to_impact(emdat_file_path, 'TC', countries='PHL', \\\n", - " year_range=(2013,2013))\n", + "impact_emdat_PHL_USD, _ = emdat_to_impact(\n", + " emdat_file_path, \"TC\", countries=\"PHL\", year_range=(2013, 2013)\n", + ")\n", "\n", "ax = plt.scatter(impact_emdat_PHL_USD.at_event, impact_emdat_PHL.at_event)\n", - "plt.title('Typhoon impacts in the Philippines, 2013')\n", - "plt.xlabel('Total Damage [USD]')\n", - "plt.ylabel('People Affected');\n", - "#plt.xscale('log')\n", - "#plt.yscale('log')" + "plt.title(\"Typhoon impacts in the Philippines, 2013\")\n", + "plt.xlabel(\"Total Damage [USD]\")\n", + "plt.ylabel(\"People Affected\");\n", + "# plt.xscale('log')\n", + "# plt.yscale('log')" ] }, { @@ -352,23 +377,40 @@ "source": [ "\"\"\"Yearly TC damages in the USA, normalized and current\"\"\"\n", "\n", - "yearly_damage_normalized_to_2019 = emdat_impact_yearlysum(emdat_file_path, countries='USA', \\\n", - " hazard='Tropical cyclone', year_range=None, \\\n", - " reference_year=2019)\n", + "yearly_damage_normalized_to_2019 = emdat_impact_yearlysum(\n", + " emdat_file_path,\n", + " countries=\"USA\",\n", + " hazard=\"Tropical cyclone\",\n", + " year_range=None,\n", + " reference_year=2019,\n", + ")\n", "\n", - "yearly_damage_current = emdat_impact_yearlysum(emdat_file_path, countries=['USA'], hazard='TC',)\n", + "yearly_damage_current = emdat_impact_yearlysum(\n", + " emdat_file_path,\n", + " countries=[\"USA\"],\n", + " hazard=\"TC\",\n", + ")\n", "\n", "import matplotlib.pyplot as plt\n", "\n", "fig, axis = plt.subplots(1, 1)\n", - "axis.plot(yearly_damage_current.year, yearly_damage_current.impact, 'b', label='USD current value')\n", - "axis.plot(yearly_damage_normalized_to_2019.year, yearly_damage_normalized_to_2019.impact_scaled, \\\n", - " 'r--', label='USD normalized to 2019')\n", + "axis.plot(\n", + " yearly_damage_current.year,\n", + " yearly_damage_current.impact,\n", + " \"b\",\n", + " label=\"USD current value\",\n", + ")\n", + "axis.plot(\n", + " yearly_damage_normalized_to_2019.year,\n", + " yearly_damage_normalized_to_2019.impact_scaled,\n", + " \"r--\",\n", + " label=\"USD normalized to 2019\",\n", + ")\n", "plt.legend()\n", - "axis.set_title('TC damage reported in EM-DAT in the USA')\n", + "axis.set_title(\"TC damage reported in EM-DAT in the USA\")\n", "axis.set_xticks([2000, 2004, 2008, 2012, 2016])\n", - "axis.set_xlabel('year')\n", - "axis.set_ylabel('Total Damage [USD]');\n" + "axis.set_xlabel(\"year\")\n", + "axis.set_ylabel(\"Total Damage [USD]\");" ] } ], diff --git a/doc/tutorial/climada_engine_unsequa.ipynb b/doc/tutorial/climada_engine_unsequa.ipynb index 08558632e..a7f6fabd6 100644 --- a/doc/tutorial/climada_engine_unsequa.ipynb +++ b/doc/tutorial/climada_engine_unsequa.ipynb @@ -154,11 +154,13 @@ "outputs": [], "source": [ "import warnings\n", - "warnings.filterwarnings('ignore') #Ignore warnings for making the tutorial's pdf.\n", "\n", - "#Define the base exposure\n", + "warnings.filterwarnings(\"ignore\") # Ignore warnings for making the tutorial's pdf.\n", + "\n", + "# Define the base exposure\n", "from climada.util.constants import EXP_DEMO_H5\n", "from climada.entity import Exposures\n", + "\n", "exp_base = Exposures.from_hdf5(EXP_DEMO_H5)" ] }, @@ -177,7 +179,7 @@ "# Here x_exp is the input uncertainty parameter and exp_func the inputvar.func.\n", "def exp_func(x_exp, exp_base=exp_base):\n", " exp = exp_base.copy()\n", - " exp.gdf['value'] *= x_exp\n", + " exp.gdf[\"value\"] *= x_exp\n", " return exp" ] }, @@ -197,8 +199,9 @@ "from climada.engine.unsequa import InputVar\n", "import scipy as sp\n", "\n", - "exp_distr = {\"x_exp\": sp.stats.uniform(0.9, 0.2),\n", - " }\n", + "exp_distr = {\n", + " \"x_exp\": sp.stats.uniform(0.9, 0.2),\n", + "}\n", "exp_iv = InputVar(exp_func, exp_distr)" ] }, @@ -249,8 +252,10 @@ ], "source": [ "# Evaluate for a given value of the uncertainty parameters\n", - "exp095 = exp_iv.func(x_exp = 0.95)\n", - "print(f\"Base value is {exp_base.gdf['value'].sum()}, and the value for x_exp=0.95 is {exp095.gdf['value'].sum()}\")" + "exp095 = exp_iv.func(x_exp=0.95)\n", + "print(\n", + " f\"Base value is {exp_base.gdf['value'].sum()}, and the value for x_exp=0.95 is {exp095.gdf['value'].sum()}\"\n", + ")" ] }, { @@ -315,11 +320,12 @@ "m_min, m_max = (1, 2)\n", "n_min, n_max = (1, 2)\n", "\n", + "\n", "# Define the function\n", "# Note that this here works, but might be slow because the method LitPop is called everytime the the function\n", "# is evaluated, and LitPop is relatively slow.\n", "def litpop_cat(m, n):\n", - " exp = Litpop.from_countries('CHE', res_arcsec=150, exponent=[m, n])\n", + " exp = Litpop.from_countries(\"CHE\", res_arcsec=150, exponent=[m, n])\n", " return exp" ] }, @@ -341,9 +347,10 @@ "litpop_dict = {}\n", "for m in range(m_min, m_max + 1):\n", " for n in range(n_min, n_max + 1):\n", - " exp_mn = LitPop.from_countries('CHE', res_arcsec=150, exponents=[m, n]);\n", + " exp_mn = LitPop.from_countries(\"CHE\", res_arcsec=150, exponents=[m, n])\n", " litpop_dict[(m, n)] = exp_mn\n", "\n", + "\n", "def litpop_cat(m, n, litpop_dict=litpop_dict):\n", " return litpop_dict[(m, n)]" ] @@ -360,16 +367,18 @@ }, "outputs": [], "source": [ - "#Define the distribution dictionnary\n", + "# Define the distribution dictionnary\n", "import scipy as sp\n", "from climada.engine.unsequa import InputVar\n", "\n", "distr_dict = {\n", - " 'm': sp.stats.randint(low=m_min, high=m_max+1),\n", - " 'n': sp.stats.randint(low=n_min, high=n_max+1)\n", - " }\n", + " \"m\": sp.stats.randint(low=m_min, high=m_max + 1),\n", + " \"n\": sp.stats.randint(low=n_min, high=n_max + 1),\n", + "}\n", "\n", - "cat_iv = InputVar(litpop_cat, distr_dict) # One can use either of the above definitions of litpop_cat" + "cat_iv = InputVar(\n", + " litpop_cat, distr_dict\n", + ") # One can use either of the above definitions of litpop_cat" ] }, { @@ -578,8 +587,9 @@ "# Requires internet connection\n", "from climada.util.constants import TEST_UNC_OUTPUT_IMPACT\n", "from climada.util.api_client import Client\n", + "\n", "apiclient = Client()\n", - "ds = apiclient.get_dataset_info(name=TEST_UNC_OUTPUT_IMPACT, status='test_dataset')\n", + "ds = apiclient.get_dataset_info(name=TEST_UNC_OUTPUT_IMPACT, status=\"test_dataset\")\n", "_target_dir, [filename] = apiclient.download_dataset(ds)" ] }, @@ -597,6 +607,7 @@ "source": [ "# If you produced your own data, you do not need the API. Just replace 'filename' with the path to your file.\n", "from climada.engine.unsequa import UncOutput\n", + "\n", "unc_imp = UncOutput.from_hdf5(filename)" ] }, @@ -623,7 +634,7 @@ } ], "source": [ - "unc_imp.plot_uncertainty(metric_list=['aai_agg'], figsize=(12,5));" + "unc_imp.plot_uncertainty(metric_list=[\"aai_agg\"], figsize=(12, 5));" ] }, { @@ -642,8 +653,9 @@ "# Requires internet connection\n", "from climada.util.constants import TEST_UNC_OUTPUT_COSTBEN\n", "from climada.util.api_client import Client\n", + "\n", "apiclient = Client()\n", - "ds = apiclient.get_dataset_info(name=TEST_UNC_OUTPUT_COSTBEN, status='test_dataset')\n", + "ds = apiclient.get_dataset_info(name=TEST_UNC_OUTPUT_COSTBEN, status=\"test_dataset\")\n", "_target_dir, [filename] = apiclient.download_dataset(ds)" ] }, @@ -661,6 +673,7 @@ "source": [ "# If you produced your own data, you do not need the API. Just replace 'filename' with the path to your file.\n", "from climada.engine.unsequa import UncOutput\n", + "\n", "unc_cb = UncOutput.from_hdf5(filename)" ] }, @@ -955,25 +968,27 @@ }, "outputs": [], "source": [ - "#Define the input variable functions\n", + "# Define the input variable functions\n", "import numpy as np\n", "\n", "from climada.entity import ImpactFunc, ImpactFuncSet, Exposures\n", "from climada.util.constants import EXP_DEMO_H5, HAZ_DEMO_H5\n", "from climada.hazard import Hazard\n", "\n", + "\n", "def impf_func(G=1, v_half=84.7, vmin=25.7, k=3, _id=1):\n", "\n", " def xhi(v, v_half, vmin):\n", " return max([(v - vmin), 0]) / (v_half - vmin)\n", "\n", " def sigmoid_func(v, G, v_half, vmin, k):\n", - " return G * xhi(v, v_half, vmin)**k / (1 + xhi(v, v_half, vmin)**k)\n", + " return G * xhi(v, v_half, vmin) ** k / (1 + xhi(v, v_half, vmin) ** k)\n", "\n", - " #In-function imports needed only for parallel computing on Windows\n", + " # In-function imports needed only for parallel computing on Windows\n", " import numpy as np\n", " from climada.entity import ImpactFunc, ImpactFuncSet\n", - " intensity_unit = 'm/s'\n", + "\n", + " intensity_unit = \"m/s\"\n", " intensity = np.linspace(0, 150, num=100)\n", " mdd = np.repeat(1, len(intensity))\n", " paa = np.array([sigmoid_func(v, G, v_half, vmin, k) for v in intensity])\n", @@ -982,16 +997,22 @@ " impf_set = ImpactFuncSet([imp_fun])\n", " return impf_set\n", "\n", + "\n", "haz = Hazard.from_hdf5(HAZ_DEMO_H5)\n", "exp_base = Exposures.from_hdf5(EXP_DEMO_H5)\n", - "#It is a good idea to assign the centroids to the base exposures in order to avoid repeating this\n", + "# It is a good idea to assign the centroids to the base exposures in order to avoid repeating this\n", "# potentially costly operation for each sample.\n", "exp_base.assign_centroids(haz)\n", + "\n", + "\n", "def exp_base_func(x_exp, exp_base):\n", " exp = exp_base.copy()\n", - " exp.gdf['value'] *= x_exp\n", + " exp.gdf[\"value\"] *= x_exp\n", " return exp\n", + "\n", + "\n", "from functools import partial\n", + "\n", "exp_func = partial(exp_base_func, exp_base=exp_base)" ] }, @@ -1018,7 +1039,7 @@ ], "source": [ "# Visualization of the parametrized impact function\n", - "impf_func(G=0.8, v_half=80, vmin=30,k=5).plot();" + "impf_func(G=0.8, v_half=80, vmin=30, k=5).plot();" ] }, { @@ -1032,13 +1053,15 @@ }, "outputs": [], "source": [ - "#Define the InputVars\n", + "# Define the InputVars\n", "\n", "import scipy as sp\n", "from climada.engine.unsequa import InputVar\n", "\n", - "exp_distr = {\"x_exp\": sp.stats.beta(10, 1.1)} #This is not really a reasonable distribution but is used\n", - " #here to show that you can use any scipy distribution.\n", + "exp_distr = {\n", + " \"x_exp\": sp.stats.beta(10, 1.1)\n", + "} # This is not really a reasonable distribution but is used\n", + "# here to show that you can use any scipy distribution.\n", "\n", "exp_iv = InputVar(exp_func, exp_distr)\n", "\n", @@ -1046,8 +1069,8 @@ " \"G\": sp.stats.truncnorm(0.5, 1.5),\n", " \"v_half\": sp.stats.uniform(35, 65),\n", " \"vmin\": sp.stats.uniform(0, 15),\n", - " \"k\": sp.stats.uniform(1, 4)\n", - " }\n", + " \"k\": sp.stats.uniform(1, 4),\n", + "}\n", "impf_iv = InputVar(impf_func, impf_distr)" ] }, @@ -1074,8 +1097,9 @@ ], "source": [ "import matplotlib.pyplot as plt\n", - "ax = exp_iv.plot(figsize=(6,4));\n", - "plt.yticks(fontsize=16);\n", + "\n", + "ax = exp_iv.plot(figsize=(6, 4))\n", + "plt.yticks(fontsize=16)\n", "plt.xticks(fontsize=16);" ] }, @@ -1215,7 +1239,7 @@ } ], "source": [ - "output_imp = calc_imp.make_sample(N=2**7, sampling_kwargs={'skip_values': 2**8})\n", + "output_imp = calc_imp.make_sample(N=2**7, sampling_kwargs={\"skip_values\": 2**8})\n", "output_imp.get_samples_df().tail()" ] }, @@ -1248,7 +1272,7 @@ } ], "source": [ - "output_imp.plot_sample(figsize=(15,8));" + "output_imp.plot_sample(figsize=(15, 8));" ] }, { @@ -1269,7 +1293,7 @@ }, "outputs": [], "source": [ - "output_imp = calc_imp.uncertainty(output_imp, rp = [50, 100, 250])" + "output_imp = calc_imp.uncertainty(output_imp, rp=[50, 100, 250])" ] }, { @@ -1306,7 +1330,7 @@ } ], "source": [ - "#All the computed uncertainty metrics attribute\n", + "# All the computed uncertainty metrics attribute\n", "output_imp.uncertainty_metrics" ] }, @@ -1384,8 +1408,8 @@ } ], "source": [ - "#One uncertainty dataframe\n", - "output_imp.get_unc_df('aai_agg').tail()" + "# One uncertainty dataframe\n", + "output_imp.get_unc_df(\"aai_agg\").tail()" ] }, { @@ -1519,7 +1543,7 @@ } ], "source": [ - "output_imp.plot_uncertainty(figsize=(12,12));" + "output_imp.plot_uncertainty(figsize=(12, 12));" ] }, { @@ -1552,7 +1576,7 @@ ], "source": [ "# Specific plot for the return period distributions\n", - "output_imp.plot_rp_uncertainty(figsize=(14.3,8));" + "output_imp.plot_rp_uncertainty(figsize=(14.3, 8));" ] }, { @@ -1704,7 +1728,7 @@ } ], "source": [ - "output_imp.get_sens_df('aai_agg').tail()" + "output_imp.get_sens_df(\"aai_agg\").tail()" ] }, { @@ -1824,7 +1848,7 @@ } ], "source": [ - "output_imp.get_sensitivity('S1')" + "output_imp.get_sensitivity(\"S1\")" ] }, { @@ -1918,7 +1942,7 @@ } ], "source": [ - "output_imp.get_largest_si(salib_si='S1')" + "output_imp.get_largest_si(salib_si=\"S1\")" ] }, { @@ -1953,7 +1977,7 @@ ], "source": [ "# Default for 'sobol' is to plot 'S1' sensitivity index.\n", - "output_imp.plot_sensitivity(figsize=(12,8));" + "output_imp.plot_sensitivity(figsize=(12, 8));" ] }, { @@ -1985,7 +2009,7 @@ } ], "source": [ - "output_imp.plot_sensitivity(salib_si = 'ST', figsize=(12,8));" + "output_imp.plot_sensitivity(salib_si=\"ST\", figsize=(12, 8));" ] }, { @@ -2017,7 +2041,7 @@ } ], "source": [ - "output_imp.plot_sensitivity_second_order(figsize=(12,8));" + "output_imp.plot_sensitivity_second_order(figsize=(12, 8));" ] }, { @@ -2050,7 +2074,7 @@ "from climada.engine.unsequa import CalcImpact\n", "\n", "calc_imp2 = CalcImpact(exp_iv, impf_iv, haz)\n", - "output_imp2 = calc_imp2.make_sample(N=1000, sampling_method='latin')" + "output_imp2 = calc_imp2.make_sample(N=1000, sampling_method=\"latin\")" ] }, { @@ -2075,7 +2099,7 @@ } ], "source": [ - "output_imp2.plot_sample(figsize=(15,8));" + "output_imp2.plot_sample(figsize=(15, 8));" ] }, { @@ -2104,13 +2128,15 @@ "import time\n", "\n", "calc_imp2 = CalcImpact(exp_iv, impf_iv, haz)\n", - "output_imp2 = calc_imp2.make_sample(N=1000, sampling_method='latin')\n", + "output_imp2 = calc_imp2.make_sample(N=1000, sampling_method=\"latin\")\n", "\n", "start = time.time()\n", - "output_imp2 = calc_imp2.uncertainty(output_imp2, rp = [50, 100, 250], calc_eai_exp=True, calc_at_event=True, processes=4)\n", + "output_imp2 = calc_imp2.uncertainty(\n", + " output_imp2, rp=[50, 100, 250], calc_eai_exp=True, calc_at_event=True, processes=4\n", + ")\n", "end = time.time()\n", - "time_passed = end-start\n", - "print(f'Time passed with pool: {time_passed}')" + "time_passed = end - start\n", + "print(f\"Time passed with pool: {time_passed}\")" ] }, { @@ -2148,13 +2174,15 @@ ], "source": [ "calc_imp2 = CalcImpact(exp_iv, impf_iv, haz)\n", - "output_imp2 = calc_imp2.make_sample(N=1000, sampling_method='latin')\n", + "output_imp2 = calc_imp2.make_sample(N=1000, sampling_method=\"latin\")\n", "\n", "start2 = time.time()\n", - "output_imp2 = calc_imp2.uncertainty(output_imp2, rp = [50, 100, 250], calc_eai_exp=True, calc_at_event=True)\n", + "output_imp2 = calc_imp2.uncertainty(\n", + " output_imp2, rp=[50, 100, 250], calc_eai_exp=True, calc_at_event=True\n", + ")\n", "end2 = time.time()\n", - "time_passed_nopool = end2-start2\n", - "print(f'Time passed without pool: {time_passed_nopool}')" + "time_passed_nopool = end2 - start2\n", + "print(f\"Time passed without pool: {time_passed_nopool}\")" ] }, { @@ -2170,10 +2198,11 @@ "source": [ "# Add the original value of the impacts (without uncertainty) to the uncertainty plot\n", "from climada.engine import ImpactCalc\n", + "\n", "imp = ImpactCalc(exp_base, impf_func(), haz).impact(assign_centroids=False)\n", "aai_agg_o = imp.aai_agg\n", "freq_curve_o = imp.calc_freq_curve([50, 100, 250]).impact\n", - "orig_list = [aai_agg_o] + list(freq_curve_o) +[1]" + "orig_list = [aai_agg_o] + list(freq_curve_o) + [1]" ] }, { @@ -2201,7 +2230,12 @@ "source": [ "# plot the aai_agg and freq_curve uncertainty only\n", "# use logarithmic x-scale\n", - "output_imp2.plot_uncertainty(metric_list=['aai_agg', 'freq_curve'], orig_list=orig_list, log=True, figsize=(12,8));" + "output_imp2.plot_uncertainty(\n", + " metric_list=[\"aai_agg\", \"freq_curve\"],\n", + " orig_list=orig_list,\n", + " log=True,\n", + " figsize=(12, 8),\n", + ");" ] }, { @@ -2217,7 +2251,9 @@ "source": [ "# Use the method 'rbd_fast' which is recommend in pair with 'latin'. In addition, change one of the kwargs\n", "# (M=15) of the salib sampling method.\n", - "output_imp2 = calc_imp2.sensitivity(output_imp2, sensitivity_method='rbd_fast', sensitivity_kwargs = {'M': 15})" + "output_imp2 = calc_imp2.sensitivity(\n", + " output_imp2, sensitivity_method=\"rbd_fast\", sensitivity_kwargs={\"M\": 15}\n", + ")" ] }, { @@ -2345,7 +2381,7 @@ } ], "source": [ - "output_imp2.get_largest_si(salib_si='S1', metric_list=['eai_exp']).tail()" + "output_imp2.get_largest_si(salib_si=\"S1\", metric_list=[\"eai_exp\"]).tail()" ] }, { @@ -2401,16 +2437,17 @@ "from climada.util.constants import EXP_DEMO_H5, HAZ_DEMO_H5\n", "from climada.hazard import Centroids, TCTracks, Hazard, TropCyclone\n", "\n", + "\n", "def impf_func(G=1, v_half=84.7, vmin=25.7, k=3, _id=1):\n", "\n", " def xhi(v, v_half, vmin):\n", " return max([(v - vmin), 0]) / (v_half - vmin)\n", "\n", " def sigmoid_func(v, G, v_half, vmin, k):\n", - " return G * xhi(v, v_half, vmin)**k / (1 + xhi(v, v_half, vmin)**k)\n", + " return G * xhi(v, v_half, vmin) ** k / (1 + xhi(v, v_half, vmin) ** k)\n", "\n", - " #In-function imports needed only for parallel computing on Windows\n", - " intensity_unit = 'm/s'\n", + " # In-function imports needed only for parallel computing on Windows\n", + " intensity_unit = \"m/s\"\n", " intensity = np.linspace(0, 150, num=100)\n", " mdd = np.repeat(1, len(intensity))\n", " paa = np.array([sigmoid_func(v, G, v_half, vmin, k) for v in intensity])\n", @@ -2446,7 +2483,7 @@ "# pack future hazard sets into dictionary - we want to sample from this dictionary later\n", "haz_fut_list = [haz_26, haz_45, haz_60, haz_85]\n", "tc_haz_fut_dict = {}\n", - "for r, rcp in enumerate(['26', '45', '60', '85']):\n", + "for r, rcp in enumerate([\"26\", \"45\", \"60\", \"85\"]):\n", " tc_haz_fut_dict[rcp] = haz_fut_list[r]" ] }, @@ -2457,14 +2494,19 @@ "outputs": [], "source": [ "exp_base = Exposures.from_hdf5(EXP_DEMO_H5)\n", - "#It is a good idea to assign the centroids to the base exposures in order to avoid repeating this\n", + "# It is a good idea to assign the centroids to the base exposures in order to avoid repeating this\n", "# potentially costly operation for each sample.\n", "exp_base.assign_centroids(haz)\n", + "\n", + "\n", "def exp_base_func(x_exp, exp_base):\n", " exp = exp_base.copy()\n", - " exp.gdf['value'] *= x_exp\n", + " exp.gdf[\"value\"] *= x_exp\n", " return exp\n", + "\n", + "\n", "from functools import partial\n", + "\n", "exp_func = partial(exp_base_func, exp_base=exp_base)" ] }, @@ -2477,8 +2519,10 @@ "import scipy as sp\n", "from climada.engine.unsequa import InputVar\n", "\n", - "exp_distr = {\"x_exp\": sp.stats.beta(10, 1.1)} #This is not really a reasonable distribution but is used\n", - " #here to show that you can use any scipy distribution.\n", + "exp_distr = {\n", + " \"x_exp\": sp.stats.beta(10, 1.1)\n", + "} # This is not really a reasonable distribution but is used\n", + "# here to show that you can use any scipy distribution.\n", "\n", "exp_iv = InputVar(exp_func, exp_distr)\n", "\n", @@ -2486,8 +2530,8 @@ " \"G\": sp.stats.truncnorm(0.5, 1.5),\n", " \"v_half\": sp.stats.uniform(35, 65),\n", " \"vmin\": sp.stats.uniform(0, 15),\n", - " \"k\": sp.stats.uniform(1, 4)\n", - " }\n", + " \"k\": sp.stats.uniform(1, 4),\n", + "}\n", "impf_iv = InputVar(impf_func, impf_distr)" ] }, @@ -2504,16 +2548,15 @@ "metadata": {}, "outputs": [], "source": [ - "rcp_key = {0: '26',\n", - " 1: '45',\n", - " 2: '60',\n", - " 3: '85'}\n", + "rcp_key = {0: \"26\", 1: \"45\", 2: \"60\", 3: \"85\"}\n", + "\n", "\n", "# future\n", "def haz_fut_func(rcp_scenario):\n", " haz_fut = tc_haz_fut_dict[rcp_key[rcp_scenario]]\n", " return haz_fut\n", "\n", + "\n", "haz_fut_distr = {\"rcp_scenario\": sp.stats.randint(0, 4)}\n", "\n", "haz_fut_iv = InputVar(haz_fut_func, haz_fut_distr)" @@ -2573,8 +2616,8 @@ ], "source": [ "from climada.engine.unsequa import CalcDeltaImpact\n", - "calc_imp = CalcDeltaImpact(exp_iv, impf_iv, haz,\n", - " exp_iv, impf_iv, haz_fut_iv)" + "\n", + "calc_imp = CalcDeltaImpact(exp_iv, impf_iv, haz, exp_iv, impf_iv, haz_fut_iv)" ] }, { @@ -2639,6 +2682,7 @@ ], "source": [ "from climada.engine.unsequa import UncOutput\n", + "\n", "output_imp.plot_uncertainty(calc_delta=True)" ] }, @@ -2687,6 +2731,7 @@ ], "source": [ "from climada.engine.unsequa import UncOutput\n", + "\n", "output_imp.plot_rp_uncertainty(calc_delta=True)" ] }, @@ -2790,39 +2835,50 @@ "from climada.entity import Entity\n", "from climada.hazard import Hazard\n", "\n", + "\n", "# Entity today has an uncertainty in the total asset value\n", "def ent_today_func(x_ent):\n", - " #In-function imports needed only for parallel computing on Windows\n", + " # In-function imports needed only for parallel computing on Windows\n", " from climada.entity import Entity\n", " from climada.util.constants import ENT_DEMO_TODAY\n", + "\n", " entity = Entity.from_excel(ENT_DEMO_TODAY)\n", " entity.exposures.ref_year = 2018\n", - " entity.exposures.gdf['value'] *= x_ent\n", + " entity.exposures.gdf[\"value\"] *= x_ent\n", " return entity\n", "\n", + "\n", "# Entity in the future has a +- 10% uncertainty in the cost of all the adapatation measures\n", "def ent_fut_func(m_fut_cost):\n", - " #In-function imports needed only for parallel computing on Windows\n", + " # In-function imports needed only for parallel computing on Windows\n", " from climada.entity import Entity\n", " from climada.util.constants import ENT_DEMO_FUTURE\n", + "\n", " entity = Entity.from_excel(ENT_DEMO_FUTURE)\n", " entity.exposures.ref_year = 2040\n", - " for meas in entity.measures.get_measure('TC'):\n", + " for meas in entity.measures.get_measure(\"TC\"):\n", " meas.cost *= m_fut_cost\n", " return entity\n", "\n", + "\n", "haz_base = Hazard.from_hdf5(HAZ_DEMO_H5)\n", + "\n", + "\n", "# The hazard intensity in the future is also uncertainty by a multiplicative factor\n", "def haz_fut(x_haz_fut, haz_base):\n", - " #In-function imports needed only for parallel computing on Windows\n", + " # In-function imports needed only for parallel computing on Windows\n", " import copy\n", " from climada.hazard import Hazard\n", " from climada.util.constants import HAZ_DEMO_H5\n", + "\n", " haz = copy.deepcopy(haz_base)\n", " haz.intensity = haz.intensity.multiply(x_haz_fut)\n", " return haz\n", + "\n", + "\n", "from functools import partial\n", - "haz_fut_func = partial(haz_fut, haz_base=haz_base)\n" + "\n", + "haz_fut_func = partial(haz_fut, haz_base=haz_base)" ] }, { @@ -2853,10 +2909,12 @@ } ], "source": [ - "costs_1 = [meas.cost for meas in ent_fut_func(1).measures.get_measure('TC')]\n", - "costs_05 = [meas.cost for meas in ent_fut_func(0.5).measures.get_measure('TC')]\n", - "print(f\"\\nThe cost for m_fut_cost=1 are {costs_1}\\n\"\n", - " f\"The cost for m_fut_cost=0.5 are {costs_05}\");" + "costs_1 = [meas.cost for meas in ent_fut_func(1).measures.get_measure(\"TC\")]\n", + "costs_05 = [meas.cost for meas in ent_fut_func(0.5).measures.get_measure(\"TC\")]\n", + "print(\n", + " f\"\\nThe cost for m_fut_cost=1 are {costs_1}\\n\"\n", + " f\"The cost for m_fut_cost=0.5 are {costs_05}\"\n", + ");" ] }, { @@ -2882,14 +2940,15 @@ "\n", "haz_today = haz_base\n", "\n", - "haz_fut_distr = {\"x_haz_fut\": sp.stats.uniform(1, 3),\n", - " }\n", + "haz_fut_distr = {\n", + " \"x_haz_fut\": sp.stats.uniform(1, 3),\n", + "}\n", "haz_fut_iv = InputVar(haz_fut_func, haz_fut_distr)\n", "\n", - "ent_today_distr = {\"x_ent\": sp.stats.uniform(0.7, 1)}\n", + "ent_today_distr = {\"x_ent\": sp.stats.uniform(0.7, 1)}\n", "ent_today_iv = InputVar(ent_today_func, ent_today_distr)\n", "\n", - "ent_fut_distr = {\"m_fut_cost\": sp.stats.norm(1, 0.1)}\n", + "ent_fut_distr = {\"m_fut_cost\": sp.stats.norm(1, 0.1)}\n", "ent_fut_iv = InputVar(ent_fut_func, ent_fut_distr)" ] }, @@ -3042,8 +3101,12 @@ "source": [ "from climada.engine.unsequa import CalcCostBenefit\n", "\n", - "unc_cb = CalcCostBenefit(haz_input_var=haz_today, ent_input_var=ent_today_iv,\n", - " haz_fut_input_var=haz_fut_iv, ent_fut_input_var=ent_fut_iv)" + "unc_cb = CalcCostBenefit(\n", + " haz_input_var=haz_today,\n", + " ent_input_var=ent_today_iv,\n", + " haz_fut_input_var=haz_fut_iv,\n", + " ent_fut_input_var=ent_fut_iv,\n", + ")" ] }, { @@ -3132,7 +3195,7 @@ } ], "source": [ - "output_cb= unc_cb.make_sample(N=10, sampling_kwargs={'calc_second_order':False})\n", + "output_cb = unc_cb.make_sample(N=10, sampling_kwargs={\"calc_second_order\": False})\n", "output_cb.get_samples_df().tail()" ] }, @@ -4626,12 +4689,11 @@ } ], "source": [ - "\n", - "#without pool\n", + "# without pool\n", "output_cb = unc_cb.uncertainty(output_cb)\n", "\n", - "#with pool\n", - "output_cb = unc_cb.uncertainty(output_cb, processes=4)\n" + "# with pool\n", + "output_cb = unc_cb.uncertainty(output_cb, processes=4)" ] }, { @@ -4667,7 +4729,7 @@ } ], "source": [ - "#Top level metrics keys\n", + "# Top level metrics keys\n", "macro_metrics = output_cb.uncertainty_metrics\n", "macro_metrics" ] @@ -4803,7 +4865,7 @@ ], "source": [ "# The benefits and cost_ben_ratio are available for each measure\n", - "output_cb.get_uncertainty(metric_list=['benefit', 'cost_ben_ratio']).tail()" + "output_cb.get_uncertainty(metric_list=[\"benefit\", \"cost_ben_ratio\"]).tail()" ] }, { @@ -5073,7 +5135,7 @@ "source": [ "# The impact_meas_present and impact_meas_future provide values of the cost_meas, risk_transf, risk,\n", "# and cost_ins for each measure\n", - "output_cb.get_uncertainty(metric_list=['imp_meas_present']).tail()" + "output_cb.get_uncertainty(metric_list=[\"imp_meas_present\"]).tail()" ] }, { @@ -5106,7 +5168,7 @@ ], "source": [ "# tot_climate_risk and benefit\n", - "output_cb.plot_uncertainty(metric_list=['benefit'], figsize=(12,8));" + "output_cb.plot_uncertainty(metric_list=[\"benefit\"], figsize=(12, 8));" ] }, { @@ -5127,7 +5189,9 @@ }, "outputs": [], "source": [ - "output_cb = unc_cb.sensitivity(output_cb, sensitivity_kwargs={'calc_second_order':False})" + "output_cb = unc_cb.sensitivity(\n", + " output_cb, sensitivity_kwargs={\"calc_second_order\": False}\n", + ")" ] }, { @@ -5161,8 +5225,10 @@ } ], "source": [ - "#plot only certain metrics\n", - "axes = output_cb.plot_sensitivity(metric_list=['cost_ben_ratio','tot_climate_risk','benefit'], figsize=(12,8));" + "# plot only certain metrics\n", + "axes = output_cb.plot_sensitivity(\n", + " metric_list=[\"cost_ben_ratio\", \"tot_climate_risk\", \"benefit\"], figsize=(12, 8)\n", + ");" ] }, { @@ -5216,6 +5282,7 @@ "outputs": [], "source": [ "from climada.util.api_client import Client\n", + "\n", "client = Client()" ] }, @@ -5231,9 +5298,9 @@ "\n", "def get_ws(iso):\n", " properties = {\n", - " 'country_iso3alpha': iso,\n", + " \"country_iso3alpha\": iso,\n", " }\n", - " return client.get_hazard('storm_europe', properties=properties)\n" + " return client.get_hazard(\"storm_europe\", properties=properties)" ] }, { @@ -5242,12 +5309,12 @@ "metadata": {}, "outputs": [], "source": [ - "#Define list of exposures and/or of hazard files\n", + "# Define list of exposures and/or of hazard files\n", "\n", - "exp_list = [get_litpop(iso) for iso in ['CHE', 'DEU', 'ITA']]\n", - "haz_list = [get_ws(iso) for iso in ['CHE', 'DEU', 'ITA']]\n", + "exp_list = [get_litpop(iso) for iso in [\"CHE\", \"DEU\", \"ITA\"]]\n", + "haz_list = [get_ws(iso) for iso in [\"CHE\", \"DEU\", \"ITA\"]]\n", "for exp, haz in zip(exp_list, haz_list):\n", - " exp.gdf['impf_WS'] = 1\n", + " exp.gdf[\"impf_WS\"] = 1\n", " exp.assign_centroids(haz)" ] }, @@ -5257,7 +5324,7 @@ "metadata": {}, "outputs": [], "source": [ - "#Define the input variable\n", + "# Define the input variable\n", "from climada.entity import ImpactFuncSet, Exposures\n", "from climada.entity.impact_funcs.storm_europe import ImpfStormEurope\n", "from climada.hazard import Hazard\n", @@ -5265,31 +5332,40 @@ "import scipy as sp\n", "import copy\n", "\n", + "\n", "def exp_func(cnt, x_exp, exp_list=exp_list):\n", " exp = exp_list[int(cnt)].copy()\n", - " exp.gdf['value'] *= x_exp\n", + " exp.gdf[\"value\"] *= x_exp\n", " return exp\n", "\n", - "exp_distr = {\"x_exp\": sp.stats.uniform(0.9, 0.2),\n", - " \"cnt\": sp.stats.randint(low=0, high=len(exp_list)) #use the same parameter name accross input variables\n", - " }\n", + "\n", + "exp_distr = {\n", + " \"x_exp\": sp.stats.uniform(0.9, 0.2),\n", + " \"cnt\": sp.stats.randint(\n", + " low=0, high=len(exp_list)\n", + " ), # use the same parameter name accross input variables\n", + "}\n", "exp_iv = InputVar(exp_func, exp_distr)\n", "\n", "\n", "def haz_func(cnt, i_haz, haz_list=haz_list):\n", - " haz = copy.deepcopy(haz_list[int(cnt)]) #use the same parameter name accross input variables\n", + " haz = copy.deepcopy(\n", + " haz_list[int(cnt)]\n", + " ) # use the same parameter name accross input variables\n", " haz.intensity *= i_haz\n", " return haz\n", "\n", - "haz_distr = {\"i_haz\": sp.stats.norm(1, 0.2),\n", - " \"cnt\": sp.stats.randint(low=0, high=len(haz_list))\n", - " }\n", + "\n", + "haz_distr = {\n", + " \"i_haz\": sp.stats.norm(1, 0.2),\n", + " \"cnt\": sp.stats.randint(low=0, high=len(haz_list)),\n", + "}\n", "haz_iv = InputVar(haz_func, haz_distr)\n", "\n", "impf = ImpfStormEurope.from_schwierz()\n", "impf_set = ImpactFuncSet()\n", "impf_set.append(impf)\n", - "impf_iv = InputVar.impfset([impf_set], bounds_mdd = [0.9, 1.1])" + "impf_iv = InputVar.impfset([impf_set], bounds_mdd=[0.9, 1.1])" ] }, { @@ -5321,7 +5397,7 @@ "metadata": {}, "outputs": [], "source": [ - "output_imp = calc_imp.make_sample(N=2**2, sampling_kwargs={'skip_values': 2**3})\n" + "output_imp = calc_imp.make_sample(N=2**2, sampling_kwargs={\"skip_values\": 2**3})" ] }, { @@ -5457,6 +5533,7 @@ "outputs": [], "source": [ "from climada.util.api_client import Client\n", + "\n", "client = Client()" ] }, @@ -5468,21 +5545,26 @@ "source": [ "def get_litpop_path(iso):\n", " properties = {\n", - " 'country_iso3alpha': iso,\n", - " 'res_arcsec': '150',\n", - " 'exponents': '(1,1)',\n", - " 'fin_mode': 'pc'\n", + " \"country_iso3alpha\": iso,\n", + " \"res_arcsec\": \"150\",\n", + " \"exponents\": \"(1,1)\",\n", + " \"fin_mode\": \"pc\",\n", " }\n", - " litpop_datasets = client.list_dataset_infos(data_type='litpop', properties=properties)\n", + " litpop_datasets = client.list_dataset_infos(\n", + " data_type=\"litpop\", properties=properties\n", + " )\n", " ds = litpop_datasets[0]\n", " download_dir, ds_files = client.download_dataset(ds)\n", " return ds_files[0]\n", "\n", + "\n", "def get_ws_path(iso):\n", " properties = {\n", - " 'country_iso3alpha': iso,\n", + " \"country_iso3alpha\": iso,\n", " }\n", - " hazard_datasets = client.list_dataset_infos(data_type='storm_europe', properties=properties)\n", + " hazard_datasets = client.list_dataset_infos(\n", + " data_type=\"storm_europe\", properties=properties\n", + " )\n", " ds = hazard_datasets[0]\n", " download_dir, ds_files = client.download_dataset(ds)\n", " return ds_files[0]" @@ -5494,10 +5576,10 @@ "metadata": {}, "outputs": [], "source": [ - "#Define list of exposures and/or of hazard files\n", + "# Define list of exposures and/or of hazard files\n", "\n", - "f_exp_list = [get_litpop_path(iso) for iso in ['CHE', 'DEU', 'ITA']]\n", - "f_haz_list = [get_ws_path(iso) for iso in ['CHE', 'DEU', 'ITA']]" + "f_exp_list = [get_litpop_path(iso) for iso in [\"CHE\", \"DEU\", \"ITA\"]]\n", + "f_haz_list = [get_ws_path(iso) for iso in [\"CHE\", \"DEU\", \"ITA\"]]" ] }, { @@ -5506,40 +5588,43 @@ "metadata": {}, "outputs": [], "source": [ - "#Define the input variable for the loading files\n", - "#The trick is to not reload a file if it is already in memory. This is done using a global variable.\n", + "# Define the input variable for the loading files\n", + "# The trick is to not reload a file if it is already in memory. This is done using a global variable.\n", "from climada.entity import ImpactFunc, ImpactFuncSet, Exposures\n", "from climada.hazard import Hazard\n", "from climada.engine.unsequa import InputVar\n", "import scipy as sp\n", "import copy\n", "\n", + "\n", "def exp_func(f_exp, x_exp, filename_list=f_exp_list):\n", " filename = filename_list[int(f_exp)]\n", " global exp_base\n", - " if 'exp_base' in globals():\n", + " if \"exp_base\" in globals():\n", " if isinstance(exp_base, Exposures):\n", - " if exp_base.gdf['filename'] != str(filename):\n", + " if exp_base.gdf[\"filename\"] != str(filename):\n", " exp_base = Exposures.from_hdf5(filename)\n", - " exp_base.gdf['filename'] = str(filename)\n", + " exp_base.gdf[\"filename\"] = str(filename)\n", " else:\n", " exp_base = Exposures.from_hdf5(filename)\n", - " exp_base.gdf['filename'] = str(filename)\n", + " exp_base.gdf[\"filename\"] = str(filename)\n", "\n", " exp = exp_base.copy()\n", - " exp.gdf['value'] *= x_exp\n", + " exp.gdf[\"value\"] *= x_exp\n", " return exp\n", "\n", - "exp_distr = {\"x_exp\": sp.stats.uniform(0.9, 0.2),\n", - " \"f_exp\": sp.stats.randint(low=0, high=len(f_exp_list))\n", - " }\n", + "\n", + "exp_distr = {\n", + " \"x_exp\": sp.stats.uniform(0.9, 0.2),\n", + " \"f_exp\": sp.stats.randint(low=0, high=len(f_exp_list)),\n", + "}\n", "exp_iv = InputVar(exp_func, exp_distr)\n", "\n", "\n", "def haz_func(f_haz, i_haz, filename_list=f_haz_list):\n", " filename = filename_list[int(f_haz)]\n", " global haz_base\n", - " if 'haz_base' in globals():\n", + " if \"haz_base\" in globals():\n", " if isinstance(haz_base, Hazard):\n", " if haz_base.filename != str(filename):\n", " haz_base = Hazard.from_hdf5(filename)\n", @@ -5552,9 +5637,11 @@ " haz.intensity *= i_haz\n", " return haz\n", "\n", - "haz_distr = {\"i_haz\": sp.stats.norm(1, 0.2),\n", - " \"f_haz\": sp.stats.randint(low=0, high=len(f_haz_list))\n", - " }\n", + "\n", + "haz_distr = {\n", + " \"i_haz\": sp.stats.norm(1, 0.2),\n", + " \"f_haz\": sp.stats.randint(low=0, high=len(f_haz_list)),\n", + "}\n", "haz_iv = InputVar(haz_func, haz_distr)\n", "\n", "\n", @@ -5564,29 +5651,33 @@ " return max([(v - vmin), 0]) / (v_half - vmin)\n", "\n", " def sigmoid_func(v, G, v_half, vmin, k):\n", - " return G * xhi(v, v_half, vmin)**k / (1 + xhi(v, v_half, vmin)**k)\n", + " return G * xhi(v, v_half, vmin) ** k / (1 + xhi(v, v_half, vmin) ** k)\n", "\n", - " #In-function imports needed only for parallel computing on Windows\n", + " # In-function imports needed only for parallel computing on Windows\n", " import numpy as np\n", " from climada.entity import ImpactFunc, ImpactFuncSet\n", + "\n", " imp_fun = ImpactFunc()\n", - " imp_fun.haz_type = 'WS'\n", + " imp_fun.haz_type = \"WS\"\n", " imp_fun.id = _id\n", - " imp_fun.intensity_unit = 'm/s'\n", + " imp_fun.intensity_unit = \"m/s\"\n", " imp_fun.intensity = np.linspace(0, 150, num=100)\n", " imp_fun.mdd = np.repeat(1, len(imp_fun.intensity))\n", - " imp_fun.paa = np.array([sigmoid_func(v, G, v_half, vmin, k) for v in imp_fun.intensity])\n", + " imp_fun.paa = np.array(\n", + " [sigmoid_func(v, G, v_half, vmin, k) for v in imp_fun.intensity]\n", + " )\n", " imp_fun.check()\n", " impf_set = ImpactFuncSet()\n", " impf_set.append(imp_fun)\n", " return impf_set\n", "\n", + "\n", "impf_distr = {\n", " \"G\": sp.stats.truncnorm(0.5, 1.5),\n", " \"v_half\": sp.stats.uniform(35, 65),\n", " \"vmin\": sp.stats.uniform(0, 15),\n", - " \"k\": sp.stats.uniform(1, 4)\n", - " }\n", + " \"k\": sp.stats.uniform(1, 4),\n", + "}\n", "impf_iv = InputVar(impf_func, impf_distr)" ] }, @@ -5615,8 +5706,8 @@ "outputs": [], "source": [ "# Ordering of the samples by hazard first and exposures second\n", - "output_imp = calc_imp.make_sample(N=2**2, sampling_kwargs={'skip_values': 2**3})\n", - "output_imp.order_samples(by=['f_haz', 'f_exp'])" + "output_imp = calc_imp.make_sample(N=2**2, sampling_kwargs={\"skip_values\": 2**3})\n", + "output_imp.order_samples(by=[\"f_haz\", \"f_exp\"])" ] }, { @@ -5633,8 +5724,9 @@ "outputs": [], "source": [ "import matplotlib.pyplot as plt\n", - "e = output_imp.samples_df['f_exp'].values\n", - "h = output_imp.samples_df['f_haz'].values" + "\n", + "e = output_imp.samples_df[\"f_exp\"].values\n", + "h = output_imp.samples_df[\"f_haz\"].values" ] }, { @@ -5650,12 +5742,12 @@ "metadata": {}, "outputs": [], "source": [ - "plt.plot(e, label='exposures');\n", - "plt.plot(h, label='hazards');\n", - "plt.xlabel('samples');\n", - "plt.ylabel('file number');\n", - "plt.title('Order of exposures and hazards files in samples');\n", - "plt.legend(loc='upper right');" + "plt.plot(e, label=\"exposures\")\n", + "plt.plot(h, label=\"hazards\")\n", + "plt.xlabel(\"samples\")\n", + "plt.ylabel(\"file number\")\n", + "plt.title(\"Order of exposures and hazards files in samples\")\n", + "plt.legend(loc=\"upper right\");" ] }, { @@ -5727,4 +5819,4 @@ }, "nbformat": 4, "nbformat_minor": 4 -} \ No newline at end of file +} diff --git a/doc/tutorial/climada_engine_unsequa_helper.ipynb b/doc/tutorial/climada_engine_unsequa_helper.ipynb index 831f5f4bd..adad22323 100644 --- a/doc/tutorial/climada_engine_unsequa_helper.ipynb +++ b/doc/tutorial/climada_engine_unsequa_helper.ipynb @@ -37,7 +37,8 @@ "outputs": [], "source": [ "import warnings\n", - "warnings.filterwarnings('ignore') #Ignore warnings for making the tutorial's pdf." + "\n", + "warnings.filterwarnings(\"ignore\") # Ignore warnings for making the tutorial's pdf." ] }, { @@ -101,9 +102,10 @@ } ], "source": [ - "#Define the base exposure\n", + "# Define the base exposure\n", "from climada.util.constants import EXP_DEMO_H5\n", "from climada.entity import Exposures\n", + "\n", "exp_base = Exposures.from_hdf5(EXP_DEMO_H5)" ] }, @@ -120,8 +122,9 @@ "outputs": [], "source": [ "from climada.engine.unsequa import InputVar\n", - "bounds_totval = [0.9, 1.1] #+- 10% noise on the total exposures value\n", - "bounds_noise = [0.9, 1.2] #-10% - +20% noise each exposures point\n", + "\n", + "bounds_totval = [0.9, 1.1] # +- 10% noise on the total exposures value\n", + "bounds_noise = [0.9, 1.2] # -10% - +20% noise each exposures point\n", "exp_iv = InputVar.exp([exp_base], bounds_totval, bounds_noise)" ] }, @@ -148,10 +151,10 @@ } ], "source": [ - "#The difference in total value between the base exposure and the average input uncertainty exposure\n", - "#due to the random noise on each exposures point (the average change in the total value is 1.0).\n", + "# The difference in total value between the base exposure and the average input uncertainty exposure\n", + "# due to the random noise on each exposures point (the average change in the total value is 1.0).\n", "avg_exp = exp_iv.evaluate()\n", - "(sum(avg_exp.gdf['value']) - sum(exp_base.gdf['value'])) / sum(exp_base.gdf['value'])" + "(sum(avg_exp.gdf[\"value\"]) - sum(exp_base.gdf[\"value\"])) / sum(exp_base.gdf[\"value\"])" ] }, { @@ -177,8 +180,8 @@ } ], "source": [ - "#The values for EN are seeds for the random number generator for the noise sampling and\n", - "#thus are uniformly sampled numbers between (0, 2**32-1)\n", + "# The values for EN are seeds for the random number generator for the noise sampling and\n", + "# thus are uniformly sampled numbers between (0, 2**32-1)\n", "exp_iv.plot();" ] }, @@ -208,19 +211,23 @@ }, "outputs": [], "source": [ - "#Define a generic method to make litpop instances with different exponent pairs.\n", + "# Define a generic method to make litpop instances with different exponent pairs.\n", "from climada.entity import LitPop\n", - "def generate_litpop_base(impf_id, value_unit, haz, assign_centr_kwargs,\n", - " choice_mn, **litpop_kwargs):\n", - " #In-function imports needed only for parallel computing on Windows\n", + "\n", + "\n", + "def generate_litpop_base(\n", + " impf_id, value_unit, haz, assign_centr_kwargs, choice_mn, **litpop_kwargs\n", + "):\n", + " # In-function imports needed only for parallel computing on Windows\n", " from climada.entity import LitPop\n", + "\n", " litpop_base = []\n", " for [m, n] in choice_mn:\n", - " print('\\n Computing litpop for m=%d, n=%d \\n' %(m, n))\n", - " litpop_kwargs['exponents'] = (m, n)\n", + " print(\"\\n Computing litpop for m=%d, n=%d \\n\" % (m, n))\n", + " litpop_kwargs[\"exponents\"] = (m, n)\n", " exp = LitPop.from_countries(**litpop_kwargs)\n", - " exp.gdf['impf_' + haz.haz_type] = impf_id\n", - " exp.gdf.drop('impf_', axis=1, inplace=True)\n", + " exp.gdf[\"impf_\" + haz.haz_type] = impf_id\n", + " exp.gdf.drop(\"impf_\", axis=1, inplace=True)\n", " if value_unit is not None:\n", " exp.value_unit = value_unit\n", " exp.assign_centroids(haz, **assign_centr_kwargs)\n", @@ -248,22 +255,23 @@ } ], "source": [ - "#Define the parameters of the LitPop instances\n", + "# Define the parameters of the LitPop instances\n", "tot_pop = 11.317e6\n", "impf_id = 1\n", - "value_unit = 'people'\n", + "value_unit = \"people\"\n", "litpop_kwargs = {\n", - " 'countries' : ['CUB'],\n", - " 'res_arcsec' : 150,\n", - " 'reference_year' : 2020,\n", - " 'fin_mode' : 'norm',\n", - " 'total_values' : [tot_pop]\n", + " \"countries\": [\"CUB\"],\n", + " \"res_arcsec\": 150,\n", + " \"reference_year\": 2020,\n", + " \"fin_mode\": \"norm\",\n", + " \"total_values\": [tot_pop],\n", "}\n", - "assign_centr_kwargs={}\n", + "assign_centr_kwargs = {}\n", "\n", "# The hazard is needed to assign centroids\n", "from climada.util.constants import HAZ_DEMO_H5\n", "from climada.hazard import Hazard\n", + "\n", "haz = Hazard.from_hdf5(HAZ_DEMO_H5)" ] }, @@ -460,11 +468,13 @@ } ], "source": [ - "#Generate the LitPop list\n", + "# Generate the LitPop list\n", "\n", - "choice_mn = [[0, 0.5], [0, 1], [0, 2]] #Choice of exponents m,n\n", + "choice_mn = [[0, 0.5], [0, 1], [0, 2]] # Choice of exponents m,n\n", "\n", - "litpop_list = generate_litpop_base(impf_id, value_unit, haz, assign_centr_kwargs, choice_mn, **litpop_kwargs)\n" + "litpop_list = generate_litpop_base(\n", + " impf_id, value_unit, haz, assign_centr_kwargs, choice_mn, **litpop_kwargs\n", + ")" ] }, { @@ -480,9 +490,9 @@ "outputs": [], "source": [ "from climada.engine.unsequa import InputVar\n", - "bounds_totval = [0.9, 1.1] #+- 10% noise on the total exposures value\n", - "litpop_iv = InputVar.exp(exp_list = litpop_list,\n", - " bounds_totval=bounds_totval)" + "\n", + "bounds_totval = [0.9, 1.1] # +- 10% noise on the total exposures value\n", + "litpop_iv = InputVar.exp(exp_list=litpop_list, bounds_totval=bounds_totval)" ] }, { @@ -848,8 +858,8 @@ } ], "source": [ - "#The values for EN are seeds for the random number generator for the noise sampling and\n", - "#thus are uniformly sampled numbers between (0, 2**32-1)\n", + "# The values for EN are seeds for the random number generator for the noise sampling and\n", + "# thus are uniformly sampled numbers between (0, 2**32-1)\n", "litpop_iv.plot();" ] }, @@ -912,9 +922,10 @@ } ], "source": [ - "#Define the base exposure\n", + "# Define the base exposure\n", "from climada.util.constants import HAZ_DEMO_H5\n", "from climada.hazard import Hazard\n", + "\n", "haz_base = Hazard.from_hdf5(HAZ_DEMO_H5)" ] }, @@ -931,10 +942,13 @@ "outputs": [], "source": [ "from climada.engine.unsequa import InputVar\n", - "bounds_freq = [0.9, 1.1] #+- 10% noise on the frequency of all events\n", - "bounds_int = None #No uncertainty on the intensity\n", + "\n", + "bounds_freq = [0.9, 1.1] # +- 10% noise on the frequency of all events\n", + "bounds_int = None # No uncertainty on the intensity\n", "n_ev = None\n", - "haz_iv = InputVar.haz([haz_base], n_ev=n_ev, bounds_freq=bounds_freq, bounds_int=bounds_int)" + "haz_iv = InputVar.haz(\n", + " [haz_base], n_ev=n_ev, bounds_freq=bounds_freq, bounds_int=bounds_int\n", + ")" ] }, { @@ -960,8 +974,8 @@ } ], "source": [ - "#The difference in frequency for HF=1.1 is indeed 10%.\n", - "haz_high_freq = haz_iv.evaluate(HE=n_ev, HI=None, HF = 1.1)\n", + "# The difference in frequency for HF=1.1 is indeed 10%.\n", + "haz_high_freq = haz_iv.evaluate(HE=n_ev, HI=None, HF=1.1)\n", "(sum(haz_high_freq.frequency) - sum(haz_base.frequency)) / sum(haz_base.frequency)" ] }, @@ -977,12 +991,18 @@ }, "outputs": [], "source": [ - "bounds_freq = [0.9, 1.1] #+- 10% noise on the frequency of all events\n", - "bounds_int = None #No uncertainty on the intensity\n", - "bounds_frac = [0.7, 1.1] #noise on the fraction of all events\n", - "n_ev = round(0.8 * haz_base.size) #sub-sample with re-draw events to obtain hazards with n=0.8*tot_number_events\n", + "bounds_freq = [0.9, 1.1] # +- 10% noise on the frequency of all events\n", + "bounds_int = None # No uncertainty on the intensity\n", + "bounds_frac = [0.7, 1.1] # noise on the fraction of all events\n", + "n_ev = round(\n", + " 0.8 * haz_base.size\n", + ") # sub-sample with re-draw events to obtain hazards with n=0.8*tot_number_events\n", "haz_iv = InputVar.haz(\n", - " [haz_base], n_ev=n_ev, bounds_freq=bounds_freq, bounds_int=bounds_int, bounds_frac=bounds_frac\n", + " [haz_base],\n", + " n_ev=n_ev,\n", + " bounds_freq=bounds_freq,\n", + " bounds_int=bounds_int,\n", + " bounds_frac=bounds_frac,\n", ")" ] }, @@ -1007,9 +1027,12 @@ "outputs": [], "source": [ "import numpy as np\n", - "HE = 2618981871 #The random seed (number between 0 and 2**32)\n", - "rng = np.random.RandomState(int(HE)) #Initialize a random state with the seed\n", - "chosen_ev = list(rng.choice(haz_base.event_name, int(n_ev))) #Obtain the corresponding events" + "\n", + "HE = 2618981871 # The random seed (number between 0 and 2**32)\n", + "rng = np.random.RandomState(int(HE)) # Initialize a random state with the seed\n", + "chosen_ev = list(\n", + " rng.choice(haz_base.event_name, int(n_ev))\n", + ") # Obtain the corresponding events" ] }, { @@ -1035,7 +1058,7 @@ } ], "source": [ - "#The first event is\n", + "# The first event is\n", "chosen_ev[0]" ] }, @@ -1062,8 +1085,8 @@ } ], "source": [ - "#The values for HE are seeds for the random number generator for the noise sampling and\n", - "#thus are uniformly sampled numbers between (0, 2**32-1)\n", + "# The values for HE are seeds for the random number generator for the noise sampling and\n", + "# thus are uniformly sampled numbers between (0, 2**32-1)\n", "haz_iv.plot();" ] }, @@ -1098,9 +1121,9 @@ } ], "source": [ - "#The number of events per sample is equal to n_ev\n", - "haz_sub = haz_iv.evaluate(HE=928165924, HI=None, HF = 1.1, HA=None)\n", - "#The number for HE is irrelevant, as all samples have the same n_Ev\n", + "# The number of events per sample is equal to n_ev\n", + "haz_sub = haz_iv.evaluate(HE=928165924, HI=None, HF=1.1, HA=None)\n", + "# The number for HE is irrelevant, as all samples have the same n_Ev\n", "haz_sub.size - n_ev" ] }, @@ -1149,6 +1172,7 @@ "outputs": [], "source": [ "from climada.entity import ImpactFuncSet, ImpfTropCyclone\n", + "\n", "impf = ImpfTropCyclone.from_emanuel_usa()\n", "impf_set_base = ImpactFuncSet([impf])" ] @@ -1174,14 +1198,17 @@ "outputs": [], "source": [ "from climada.engine.unsequa import InputVar\n", - "bounds_impfi = [-10, 10] #-10 m/s ; +10m/s uncertainty on the intensity\n", - "bounds_mdd = [0.7, 1.1] #-30% - +10% uncertainty on the mdd\n", - "bounds_paa = None #No uncertainty in the paa\n", - "impf_iv = InputVar.impfset(impf_set_list=[impf_set_base],\n", - " bounds_impfi=bounds_impfi,\n", - " bounds_mdd=bounds_mdd,\n", - " bounds_paa=bounds_paa,\n", - " haz_id_dict={'TC': [1]})" + "\n", + "bounds_impfi = [-10, 10] # -10 m/s ; +10m/s uncertainty on the intensity\n", + "bounds_mdd = [0.7, 1.1] # -30% - +10% uncertainty on the mdd\n", + "bounds_paa = None # No uncertainty in the paa\n", + "impf_iv = InputVar.impfset(\n", + " impf_set_list=[impf_set_base],\n", + " bounds_impfi=bounds_impfi,\n", + " bounds_mdd=bounds_mdd,\n", + " bounds_paa=bounds_paa,\n", + " haz_id_dict={\"TC\": [1]},\n", + ")" ] }, { @@ -1207,11 +1234,11 @@ } ], "source": [ - "#Plot the impact function for 50 random samples (note for the expert, these are not global)\n", + "# Plot the impact function for 50 random samples (note for the expert, these are not global)\n", "n = 50\n", "ax = impf_iv.evaluate().plot()\n", - "inten = impf_iv.distr_dict['IFi'].rvs(size=n)\n", - "mdd = impf_iv.distr_dict['MDD'].rvs(size=n)\n", + "inten = impf_iv.distr_dict[\"IFi\"].rvs(size=n)\n", + "mdd = impf_iv.distr_dict[\"MDD\"].rvs(size=n)\n", "for i, m in zip(inten, mdd):\n", " impf_iv.evaluate(IFi=i, MDD=m).plot(axis=ax)\n", "ax.get_legend().remove()" @@ -1286,6 +1313,7 @@ "source": [ "from climada.entity import Entity\n", "from climada.util.constants import ENT_DEMO_TODAY\n", + "\n", "ent = Entity.from_excel(ENT_DEMO_TODAY)\n", "ent.exposures.ref_year = 2018\n", "ent.check()" @@ -1304,11 +1332,12 @@ "outputs": [], "source": [ "from climada.engine.unsequa import InputVar\n", + "\n", "ent_iv = InputVar.ent(\n", - " impf_set_list = [ent.impact_funcs],\n", - " disc_rate = ent.disc_rates,\n", - " exp_list = [ent.exposures],\n", - " meas_set = ent.measures,\n", + " impf_set_list=[ent.impact_funcs],\n", + " disc_rate=ent.disc_rates,\n", + " exp_list=[ent.exposures],\n", + " meas_set=ent.measures,\n", " bounds_disc=[0, 0.08],\n", " bounds_cost=[0.5, 1.5],\n", " bounds_totval=[0.9, 1.1],\n", @@ -1316,8 +1345,8 @@ " bounds_mdd=[0.9, 1.05],\n", " bounds_paa=None,\n", " bounds_impfi=[-2, 5],\n", - " haz_id_dict={'TC': [1]}\n", - " )" + " haz_id_dict={\"TC\": [1]},\n", + ")" ] }, { @@ -1367,19 +1396,23 @@ }, "outputs": [], "source": [ - "#Define a generic method to make litpop instances with different exponent pairs.\n", + "# Define a generic method to make litpop instances with different exponent pairs.\n", "from climada.entity import LitPop\n", - "def generate_litpop_base(impf_id, value_unit, haz, assign_centr_kwargs,\n", - " choice_mn, **litpop_kwargs):\n", - " #In-function imports needed only for parallel computing on Windows\n", + "\n", + "\n", + "def generate_litpop_base(\n", + " impf_id, value_unit, haz, assign_centr_kwargs, choice_mn, **litpop_kwargs\n", + "):\n", + " # In-function imports needed only for parallel computing on Windows\n", " from climada.entity import LitPop\n", + "\n", " litpop_base = []\n", " for [m, n] in choice_mn:\n", - " print('\\n Computing litpop for m=%d, n=%d \\n' %(m, n))\n", - " litpop_kwargs['exponents'] = (m, n)\n", + " print(\"\\n Computing litpop for m=%d, n=%d \\n\" % (m, n))\n", + " litpop_kwargs[\"exponents\"] = (m, n)\n", " exp = LitPop.from_countries(**litpop_kwargs)\n", - " exp.gdf['impf_' + haz.haz_type] = impf_id\n", - " exp.gdf.drop('impf_', axis=1, inplace=True)\n", + " exp.gdf[\"impf_\" + haz.haz_type] = impf_id\n", + " exp.gdf.drop(\"impf_\", axis=1, inplace=True)\n", " if value_unit is not None:\n", " exp.value_unit = value_unit\n", " exp.assign_centroids(haz, **assign_centr_kwargs)\n", @@ -1407,19 +1440,20 @@ } ], "source": [ - "#Define the parameters of the LitPop instances\n", + "# Define the parameters of the LitPop instances\n", "impf_id = 1\n", "value_unit = None\n", "litpop_kwargs = {\n", - " 'countries' : ['CUB'],\n", - " 'res_arcsec' : 300,\n", - " 'reference_year' : 2020,\n", + " \"countries\": [\"CUB\"],\n", + " \"res_arcsec\": 300,\n", + " \"reference_year\": 2020,\n", "}\n", - "assign_centr_kwargs={}\n", + "assign_centr_kwargs = {}\n", "\n", "# The hazard is needed to assign centroids\n", "from climada.util.constants import HAZ_DEMO_H5\n", "from climada.hazard import Hazard\n", + "\n", "haz = Hazard.from_hdf5(HAZ_DEMO_H5)" ] }, @@ -1661,11 +1695,13 @@ } ], "source": [ - "#Generate the LitPop list\n", + "# Generate the LitPop list\n", "\n", - "choice_mn = [[1, 0.5], [0.5, 1], [1, 1]] #Choice of exponents m,n\n", + "choice_mn = [[1, 0.5], [0.5, 1], [1, 1]] # Choice of exponents m,n\n", "\n", - "litpop_list = generate_litpop_base(impf_id, value_unit, haz, assign_centr_kwargs, choice_mn, **litpop_kwargs)\n" + "litpop_list = generate_litpop_base(\n", + " impf_id, value_unit, haz, assign_centr_kwargs, choice_mn, **litpop_kwargs\n", + ")" ] }, { @@ -1693,6 +1729,7 @@ "source": [ "from climada.entity import Entity\n", "from climada.util.constants import ENT_DEMO_TODAY\n", + "\n", "ent = Entity.from_excel(ENT_DEMO_TODAY)\n", "ent.exposures.ref_year = 2020\n", "ent.check()" @@ -1711,11 +1748,12 @@ "outputs": [], "source": [ "from climada.engine.unsequa import InputVar\n", + "\n", "ent_iv = InputVar.ent(\n", - " impf_set_list = [ent.impact_funcs],\n", - " disc_rate = ent.disc_rates,\n", - " exp_list = litpop_list,\n", - " meas_set = ent.measures,\n", + " impf_set_list=[ent.impact_funcs],\n", + " disc_rate=ent.disc_rates,\n", + " exp_list=litpop_list,\n", + " meas_set=ent.measures,\n", " bounds_disc=[0, 0.08],\n", " bounds_cost=[0.5, 1.5],\n", " bounds_totval=[0.9, 1.1],\n", @@ -1723,8 +1761,8 @@ " bounds_mdd=[0.9, 1.05],\n", " bounds_paa=None,\n", " bounds_impfi=[-2, 5],\n", - " haz_id_dict={'TC': [1]}\n", - " )" + " haz_id_dict={\"TC\": [1]},\n", + ")" ] }, { @@ -1847,16 +1885,16 @@ "outputs": [], "source": [ "entfut_iv = InputVar.entfut(\n", - " impf_set_list = [ent_fut.impact_funcs],\n", - " exp_list = [ent_fut.exposures],\n", - " meas_set = ent_fut.measures,\n", + " impf_set_list=[ent_fut.impact_funcs],\n", + " exp_list=[ent_fut.exposures],\n", + " meas_set=ent_fut.measures,\n", " bounds_cost=[0.6, 1.2],\n", " bounds_eg=[0.8, 1.5],\n", " bounds_noise=None,\n", " bounds_mdd=[0.7, 0.9],\n", " bounds_paa=[1.3, 2],\n", - " haz_id_dict={'TC': [1]}\n", - " )" + " haz_id_dict={\"TC\": [1]},\n", + ")" ] }, { @@ -1879,19 +1917,23 @@ }, "outputs": [], "source": [ - "#Define a generic method to make litpop instances with different exponent pairs.\n", + "# Define a generic method to make litpop instances with different exponent pairs.\n", "from climada.entity import LitPop\n", - "def generate_litpop_base(impf_id, value_unit, haz, assign_centr_kwargs,\n", - " choice_mn, **litpop_kwargs):\n", - " #In-function imports needed only for parallel computing on Windows\n", + "\n", + "\n", + "def generate_litpop_base(\n", + " impf_id, value_unit, haz, assign_centr_kwargs, choice_mn, **litpop_kwargs\n", + "):\n", + " # In-function imports needed only for parallel computing on Windows\n", " from climada.entity import LitPop\n", + "\n", " litpop_base = []\n", " for [m, n] in choice_mn:\n", - " print('\\n Computing litpop for m=%d, n=%d \\n' %(m, n))\n", - " litpop_kwargs['exponents'] = (m, n)\n", + " print(\"\\n Computing litpop for m=%d, n=%d \\n\" % (m, n))\n", + " litpop_kwargs[\"exponents\"] = (m, n)\n", " exp = LitPop.from_countries(**litpop_kwargs)\n", - " exp.gdf['impf_' + haz.haz_type] = impf_id\n", - " exp.gdf.drop('impf_', axis=1, inplace=True)\n", + " exp.gdf[\"impf_\" + haz.haz_type] = impf_id\n", + " exp.gdf.drop(\"impf_\", axis=1, inplace=True)\n", " if value_unit is not None:\n", " exp.value_unit = value_unit\n", " exp.assign_centroids(haz, **assign_centr_kwargs)\n", @@ -1919,19 +1961,20 @@ } ], "source": [ - "#Define the parameters of the LitPop instances\n", + "# Define the parameters of the LitPop instances\n", "impf_id = 1\n", "value_unit = None\n", "litpop_kwargs = {\n", - " 'countries' : ['CUB'],\n", - " 'res_arcsec' : 300,\n", - " 'reference_year' : 2040,\n", + " \"countries\": [\"CUB\"],\n", + " \"res_arcsec\": 300,\n", + " \"reference_year\": 2040,\n", "}\n", - "assign_centr_kwargs={}\n", + "assign_centr_kwargs = {}\n", "\n", "# The hazard is needed to assign centroids\n", "from climada.util.constants import HAZ_DEMO_H5\n", "from climada.hazard import Hazard\n", + "\n", "haz = Hazard.from_hdf5(HAZ_DEMO_H5)" ] }, @@ -2306,11 +2349,13 @@ } ], "source": [ - "#Generate the LitPop list\n", + "# Generate the LitPop list\n", "\n", - "choice_mn = [[1, 0.5], [0.5, 1], [1, 1]] #Choice of exponents m,n\n", + "choice_mn = [[1, 0.5], [0.5, 1], [1, 1]] # Choice of exponents m,n\n", "\n", - "litpop_list = generate_litpop_base(impf_id, value_unit, haz, assign_centr_kwargs, choice_mn, **litpop_kwargs)\n" + "litpop_list = generate_litpop_base(\n", + " impf_id, value_unit, haz, assign_centr_kwargs, choice_mn, **litpop_kwargs\n", + ")" ] }, { @@ -2358,17 +2403,18 @@ "outputs": [], "source": [ "from climada.engine.unsequa import InputVar\n", + "\n", "entfut_iv = InputVar.entfut(\n", - " impf_set_list = [ent_fut.impact_funcs],\n", - " exp_list = litpop_list,\n", - " meas_set = ent_fut.measures,\n", + " impf_set_list=[ent_fut.impact_funcs],\n", + " exp_list=litpop_list,\n", + " meas_set=ent_fut.measures,\n", " bounds_cost=[0.6, 1.2],\n", " bounds_eg=[0.8, 1.5],\n", " bounds_noise=None,\n", " bounds_mdd=[0.7, 0.9],\n", " bounds_paa=[1.3, 2],\n", - " haz_id_dict={'TC': [1]}\n", - " )" + " haz_id_dict={\"TC\": [1]},\n", + ")" ] } ], diff --git a/doc/tutorial/climada_entity_DiscRates.ipynb b/doc/tutorial/climada_entity_DiscRates.ipynb index acb33de01..375e2167f 100644 --- a/doc/tutorial/climada_entity_DiscRates.ipynb +++ b/doc/tutorial/climada_entity_DiscRates.ipynb @@ -74,11 +74,11 @@ "# Compute net present value between present year and future year.\n", "ini_year = 2019\n", "end_year = 2050\n", - "val_years = np.zeros(end_year-ini_year+1)\n", - "val_years[0] = 100000000 # initial investment\n", - "val_years[10:] = 75000 # maintenance from 10th year\n", + "val_years = np.zeros(end_year - ini_year + 1)\n", + "val_years[0] = 100000000 # initial investment\n", + "val_years[10:] = 75000 # maintenance from 10th year\n", "npv = disc.net_present_value(ini_year, end_year, val_years)\n", - "print('net present value: {:.5e}'.format(npv))" + "print(\"net present value: {:.5e}\".format(npv))" ] }, { @@ -135,8 +135,8 @@ "from climada.util import ENT_TEMPLATE_XLS\n", "\n", "# Fill DataFrame from Excel file\n", - "file_name = ENT_TEMPLATE_XLS # provide absolute path of the excel file\n", - "print('Read file:', ENT_TEMPLATE_XLS)\n", + "file_name = ENT_TEMPLATE_XLS # provide absolute path of the excel file\n", + "print(\"Read file:\", ENT_TEMPLATE_XLS)\n", "disc = DiscRates.from_excel(file_name)\n", "disc.plot();" ] @@ -170,11 +170,11 @@ "from climada.util import ENT_TEMPLATE_XLS\n", "\n", "# Fill DataFrame from Excel file\n", - "file_name = ENT_TEMPLATE_XLS # provide absolute path of the excel file\n", + "file_name = ENT_TEMPLATE_XLS # provide absolute path of the excel file\n", "disc = DiscRates.from_excel(file_name)\n", "\n", "# write file\n", - "disc.write_excel('results/tutorial_disc.xlsx')" + "disc.write_excel(\"results/tutorial_disc.xlsx\")" ] }, { @@ -192,8 +192,9 @@ "outputs": [], "source": [ "from climada.util.save import save\n", + "\n", "# this generates a results folder in the current path and stores the output there\n", - "save('tutorial_disc.p', disc)" + "save(\"tutorial_disc.p\", disc)" ] } ], diff --git a/doc/tutorial/climada_entity_Exposures.ipynb b/doc/tutorial/climada_entity_Exposures.ipynb index b5db1520e..d46903e8f 100644 --- a/doc/tutorial/climada_entity_Exposures.ipynb +++ b/doc/tutorial/climada_entity_Exposures.ipynb @@ -112,13 +112,15 @@ "# Fill a pandas DataFrame with the 3 mandatory variables (latitude, longitude, value) for a number of assets (10'000).\n", "# We will do this with random dummy data for purely illustrative reasons:\n", "exp_df = DataFrame()\n", - "n_exp = 100*100\n", + "n_exp = 100 * 100\n", "# provide value\n", - "exp_df['value'] = np.arange(n_exp)\n", + "exp_df[\"value\"] = np.arange(n_exp)\n", "# provide latitude and longitude\n", - "lat, lon = np.mgrid[15 : 35 : complex(0, np.sqrt(n_exp)), 20 : 40 : complex(0, np.sqrt(n_exp))]\n", - "exp_df['latitude'] = lat.flatten()\n", - "exp_df['longitude'] = lon.flatten()" + "lat, lon = np.mgrid[\n", + " 15 : 35 : complex(0, np.sqrt(n_exp)), 20 : 40 : complex(0, np.sqrt(n_exp))\n", + "]\n", + "exp_df[\"latitude\"] = lat.flatten()\n", + "exp_df[\"longitude\"] = lon.flatten()" ] }, { @@ -131,7 +133,7 @@ "# In this case, we only specify the IDs for tropical cyclone (TC); here, each exposure entry will be treated with\n", "# the same impact function: the one that has ID '1':\n", "# Of course, this will only be relevant at later steps during impact calculations.\n", - "exp_df['impf_TC'] = np.ones(n_exp, int)" + "exp_df[\"impf_TC\"] = np.ones(n_exp, int)" ] }, { @@ -156,8 +158,8 @@ ], "source": [ "# Let's have a look at the pandas DataFrame\n", - "print('exp_df is a DataFrame:', str(type(exp_df)))\n", - "print('exp_df looks like:')\n", + "print(\"exp_df is a DataFrame:\", str(type(exp_df)))\n", + "print(\"exp_df looks like:\")\n", "print(exp_df.head())" ] }, @@ -195,12 +197,12 @@ "# Generate Exposures from the pandas DataFrame. This step converts the DataFrame into\n", "# a CLIMADA Exposures instance!\n", "exp = Exposures(exp_df)\n", - "print('exp has the type:', str(type(exp)))\n", - "print('and contains a GeoDataFrame exp.gdf:', str(type(exp.gdf)))\n", + "print(\"exp has the type:\", str(type(exp)))\n", + "print(\"and contains a GeoDataFrame exp.gdf:\", str(type(exp.gdf)))\n", "\n", "# set geometry attribute (shapely Points) from GeoDataFrame from latitude and longitude\n", "exp.set_geometry_points()\n", - "print('\\n' + 'check method logs:')\n", + "print(\"\\n\" + \"check method logs:\")\n", "\n", "# always apply the check() method in the end. It puts metadata that has not been assigned,\n", "# and points out missing mandatory data\n", @@ -243,7 +245,7 @@ ], "source": [ "# let's have a look at the Exposures instance we created!\n", - "print('\\n' + 'exp looks like:')\n", + "print(\"\\n\" + \"exp looks like:\")\n", "print(exp)" ] }, @@ -292,9 +294,9 @@ "from climada.entity import Exposures\n", "\n", "# Read spatial info from an external file into GeoDataFrame\n", - "world = gpd.read_file(gpd.datasets.get_path('naturalearth_cities'))\n", - "print('World is a GeoDataFrame:', str(type(world)))\n", - "print('World looks like:')\n", + "world = gpd.read_file(gpd.datasets.get_path(\"naturalearth_cities\"))\n", + "print(\"World is a GeoDataFrame:\", str(type(world)))\n", + "print(\"World looks like:\")\n", "print(world.head())" ] }, @@ -317,9 +319,9 @@ "# Generate Exposures: value, latitude and longitude for each exposure entry.\n", "# Convert GeoDataFrame into Exposure instance\n", "exp_gpd = Exposures(world)\n", - "print('\\n' + 'exp_gpd is an Exposures:', str(type(exp_gpd)))\n", + "print(\"\\n\" + \"exp_gpd is an Exposures:\", str(type(exp_gpd)))\n", "# add random values to entries\n", - "exp_gpd.gdf['value'] = np.arange(world.shape[0])\n", + "exp_gpd.gdf[\"value\"] = np.arange(world.shape[0])\n", "# set latitude and longitude attributes from geometry\n", "exp_gpd.set_lat_lon()" ] @@ -348,8 +350,8 @@ "# In this case, we only specify the IDs for tropical cyclone (TC); here, each exposure entry will be treated with\n", "# the same impact function: the one that has ID '1':\n", "# Of course, this will only be relevant at later steps during impact calculations.\n", - "exp_gpd.gdf['impf_TC'] = np.ones(world.shape[0], int)\n", - "print('\\n' + 'check method logs:')\n", + "exp_gpd.gdf[\"impf_TC\"] = np.ones(world.shape[0], int)\n", + "print(\"\\n\" + \"check method logs:\")\n", "\n", "# as always, run check method to assign meta-data and check for missing mandatory variables.\n", "exp_gpd.check()" @@ -414,7 +416,7 @@ ], "source": [ "# let's have a look at the Exposures instance we created!\n", - "print('\\n' + '\\x1b[1;03;30;30m' + 'exp_gpd looks like:' + '\\x1b[0m')\n", + "print(\"\\n\" + \"\\x1b[1;03;30;30m\" + \"exp_gpd looks like:\" + \"\\x1b[0m\")\n", "print(exp_gpd)" ] }, @@ -536,7 +538,7 @@ "sel_exp = exp_gpd.copy() # to keep the original exp_gpd Exposures data\n", "sel_exp.gdf = sel_exp.gdf.cx[:, -5:5]\n", "\n", - "print('\\n' + 'sel_exp contains a subset of the original data')\n", + "print(\"\\n\" + \"sel_exp contains a subset of the original data\")\n", "sel_exp.gdf.head()" ] }, @@ -659,13 +661,14 @@ "source": [ "# Example 2: extract data in a polygon\n", "from shapely.geometry import Polygon\n", + "\n", "sel_polygon = exp_gpd.copy() # to keep the original exp_gpd Exposures data\n", "\n", "poly = Polygon([(0, -10), (0, 10), (10, 5)])\n", "sel_polygon.gdf = sel_polygon.gdf[sel_polygon.gdf.intersects(poly)]\n", "\n", "# Let's have a look. Again, the sub-selection is a GeoDataFrame!\n", - "print('\\n' + 'sel_exp contains a subset of the original data')\n", + "print(\"\\n\" + \"sel_exp contains a subset of the original data\")\n", "sel_polygon.gdf" ] }, @@ -799,8 +802,10 @@ "# Example 3: change coordinate reference system\n", "# use help to see more options: help(sel_exp.to_crs)\n", "sel_polygon.to_crs(epsg=3395, inplace=True)\n", - "print('\\n' + 'the crs has changed to ' +str(sel_polygon.crs))\n", - "print('the values for latitude and longitude are now according to the new coordinate system: ')\n", + "print(\"\\n\" + \"the crs has changed to \" + str(sel_polygon.crs))\n", + "print(\n", + " \"the values for latitude and longitude are now according to the new coordinate system: \"\n", + ")\n", "sel_polygon.gdf" ] }, @@ -922,8 +927,8 @@ "exp_all = Exposures.concat([sel_polygon, sel_exp.to_crs(epsg=3395)])\n", "\n", "# the output is of type Exposures\n", - "print('exp_all type and number of rows:', type(exp_all), exp_all.gdf.shape[0])\n", - "print('number of unique rows:', exp_all.gdf.drop_duplicates().shape[0])\n", + "print(\"exp_all type and number of rows:\", type(exp_all), exp_all.gdf.shape[0])\n", + "print(\"number of unique rows:\", exp_all.gdf.drop_duplicates().shape[0])\n", "\n", "# NaNs will appear in the missing values\n", "exp_all.gdf.head()" @@ -1103,8 +1108,8 @@ "exp_templ = pd.read_excel(file_name)\n", "\n", "# Let's have a look at the data:\n", - "print('exp_templ is a DataFrame:', str(type(exp_templ)))\n", - "print('exp_templ looks like:')\n", + "print(\"exp_templ is a DataFrame:\", str(type(exp_templ)))\n", + "print(\"exp_templ looks like:\")\n", "exp_templ.head()" ] }, @@ -1145,14 +1150,14 @@ "source": [ "# Generate an Exposures instance from the dataframe.\n", "exp_templ = Exposures(exp_templ)\n", - "print('\\n' + 'exp_templ is now an Exposures:', str(type(exp_templ)))\n", + "print(\"\\n\" + \"exp_templ is now an Exposures:\", str(type(exp_templ)))\n", "\n", "# set geometry attribute (shapely Points) from GeoDataFrame from latitude and longitude\n", - "print('\\n' + 'set_geometry logs:')\n", + "print(\"\\n\" + \"set_geometry logs:\")\n", "exp_templ.set_geometry_points()\n", "# as always, run check method to include metadata and check for missing mandatory parameters\n", "\n", - "print('\\n' + 'check exp_templ:')\n", + "print(\"\\n\" + \"check exp_templ:\")\n", "exp_templ.check()" ] }, @@ -1314,7 +1319,7 @@ ], "source": [ "# Let's have a look at our Exposures instance!\n", - "print('\\n' + 'exp_templ.gdf looks like:')\n", + "print(\"\\n\" + \"exp_templ.gdf looks like:\")\n", "exp_templ.gdf.head()" ] }, @@ -1347,7 +1352,7 @@ "\n", "# We take an example with a dummy raster file (HAZ_DEMO_FL), running the method set_from_raster directly loads the\n", "# necessary info from the file into an Exposures instance.\n", - "exp_raster = Exposures.from_raster(HAZ_DEMO_FL, window= Window(10, 20, 50, 60))\n", + "exp_raster = Exposures.from_raster(HAZ_DEMO_FL, window=Window(10, 20, 50, 60))\n", "# There are several keyword argument options that come with the set_from_raster method (such as\n", "# specifying a window, if not the entire file should be read, or a bounding box. Check them out." ] @@ -1376,7 +1381,7 @@ "source": [ "# As always, run the check method, such that metadata can be assigned and checked for missing mandatory parameters.\n", "exp_raster.check()\n", - "print('Meta:', exp_raster.meta)" + "print(\"Meta:\", exp_raster.meta)" ] }, { @@ -1475,7 +1480,7 @@ ], "source": [ "# Let's have a look at the Exposures instance!\n", - "print('\\n' + 'exp_raster looks like:')\n", + "print(\"\\n\" + \"exp_raster looks like:\")\n", "exp_raster.gdf.head()" ] }, @@ -1567,7 +1572,7 @@ ], "source": [ "# Example 1: plot_hexbin method\n", - "print('Plotting exp_df.')\n", + "print(\"Plotting exp_df.\")\n", "axs = exp.plot_hexbin();\n", "\n", "# further methods to check out:\n", @@ -1606,7 +1611,7 @@ "source": [ "# Example 2: plot_scatter method\n", "\n", - "exp_gpd.to_crs('epsg:3035', inplace=True)\n", + "exp_gpd.to_crs(\"epsg:3035\", inplace=True)\n", "exp_gpd.plot_scatter(pop_name=False);" ] }, @@ -1637,9 +1642,19 @@ ], "source": [ "# Example 3: plot_raster method\n", - "from climada.util.plot import add_cntry_names # use climada's plotting utilities\n", - "ax = exp.plot_raster(); # plot with same resolution as data\n", - "add_cntry_names(ax, [exp.gdf['longitude'].min(), exp.gdf['longitude'].max(), exp.gdf['latitude'].min(), exp.gdf['latitude'].max()])\n", + "from climada.util.plot import add_cntry_names # use climada's plotting utilities\n", + "\n", + "ax = exp.plot_raster()\n", + "# plot with same resolution as data\n", + "add_cntry_names(\n", + " ax,\n", + " [\n", + " exp.gdf[\"longitude\"].min(),\n", + " exp.gdf[\"longitude\"].max(),\n", + " exp.gdf[\"latitude\"].min(),\n", + " exp.gdf[\"latitude\"].max(),\n", + " ],\n", + ")\n", "\n", "# use keyword argument save_tiff='filepath.tiff' to save the corresponding raster in tiff format\n", "# use keyword argument raster_res='desired number' to change resolution of the raster." @@ -1674,11 +1689,16 @@ "source": [ "# Example 4: plot_basemap method\n", "import contextily as ctx\n", + "\n", "# select the background image from the available ctx.providers\n", - "ax = exp_templ.plot_basemap(buffer=30000, cmap='brg'); # using Positron from CartoDB\n", - "ax = exp_templ.plot_basemap(buffer=30000, cmap='brg',\n", - " url=ctx.providers.OpenStreetMap.Mapnik, # Using OpenStreetmap,\n", - " zoom=9); # select the zoom level of the map, affects the font size of labelled objects" + "ax = exp_templ.plot_basemap(buffer=30000, cmap=\"brg\")\n", + "# using Positron from CartoDB\n", + "ax = exp_templ.plot_basemap(\n", + " buffer=30000,\n", + " cmap=\"brg\",\n", + " url=ctx.providers.OpenStreetMap.Mapnik, # Using OpenStreetmap,\n", + " zoom=9,\n", + "); # select the zoom level of the map, affects the font size of labelled objects" ] }, { @@ -1718,7 +1738,7 @@ ], "source": [ "# other visualization types\n", - "exp_templ.gdf.hist(column='value');" + "exp_templ.gdf.hist(column=\"value\");" ] }, { @@ -1737,12 +1757,15 @@ "metadata": {}, "outputs": [], "source": [ - "import fiona; fiona.supported_drivers\n", + "import fiona\n", + "\n", + "fiona.supported_drivers\n", "from climada import CONFIG\n", + "\n", "results = CONFIG.local_data.save_dir.dir()\n", "\n", "# DataFrame save to csv format. geometry writen as string, metadata not saved!\n", - "exp_templ.gdf.to_csv(results.joinpath('exp_templ.csv'), sep='\\t')" + "exp_templ.gdf.to_csv(results.joinpath(\"exp_templ.csv\"), sep=\"\\t\")" ] }, { @@ -1752,7 +1775,7 @@ "outputs": [], "source": [ "# write as hdf5 file\n", - "exp_templ.write_hdf5(results.joinpath('exp_temp.h5'))" + "exp_templ.write_hdf5(results.joinpath(\"exp_temp.h5\"))" ] }, { @@ -1771,8 +1794,9 @@ "source": [ "# save in pickle format\n", "from climada.util.save import save\n", + "\n", "# this generates a results folder in the current path and stores the output there\n", - "save('exp_templ.pkl.p', exp_templ) # creates results folder and stores there" + "save(\"exp_templ.pkl.p\", exp_templ) # creates results folder and stores there" ] }, { @@ -1814,7 +1838,7 @@ "source": [ "# set_geometry_points is expensive for big exposures\n", "# for small amount of data, the execution time might be even greater when using dask\n", - "exp.gdf.drop(columns=['geometry'], inplace=True)\n", + "exp.gdf.drop(columns=[\"geometry\"], inplace=True)\n", "print(exp.gdf.head())\n", "%time exp.set_geometry_points(scheduler='processes')\n", "print(exp.gdf.head())" diff --git a/doc/tutorial/climada_entity_Exposures_polygons_lines.ipynb b/doc/tutorial/climada_entity_Exposures_polygons_lines.ipynb index 22c5827f2..904d00f4d 100644 --- a/doc/tutorial/climada_entity_Exposures_polygons_lines.ipynb +++ b/doc/tutorial/climada_entity_Exposures_polygons_lines.ipynb @@ -58,11 +58,13 @@ "from climada.entity.impact_funcs.storm_europe import ImpfStormEurope\n", "from climada.entity import Exposures\n", "\n", - "HAZ = Client().get_hazard('storm_europe', name='test_haz_WS_nl', status='test_dataset');\n", + "HAZ = Client().get_hazard(\"storm_europe\", name=\"test_haz_WS_nl\", status=\"test_dataset\")\n", "\n", - "EXP_POLY = Client().get_exposures('base', name='test_polygon_exp', status='test_dataset');\n", - "EXP_LINE = Client().get_exposures('base', name='test_line_exp', status='test_dataset');\n", - "EXP_POINT = Client().get_exposures('base', name='test_point_exp', status='test_dataset');\n", + "EXP_POLY = Client().get_exposures(\n", + " \"base\", name=\"test_polygon_exp\", status=\"test_dataset\"\n", + ")\n", + "EXP_LINE = Client().get_exposures(\"base\", name=\"test_line_exp\", status=\"test_dataset\")\n", + "EXP_POINT = Client().get_exposures(\"base\", name=\"test_point_exp\", status=\"test_dataset\")\n", "\n", "EXP_MIX = Exposures.concat([EXP_POLY, EXP_LINE, EXP_POINT])\n", "\n", @@ -109,15 +111,20 @@ } ], "source": [ - "#disaggregate in the same CRS as the exposures are defined (here degrees), resolution 1degree\n", - "#divide values on points\n", - "#aggregate by summing\n", + "# disaggregate in the same CRS as the exposures are defined (here degrees), resolution 1degree\n", + "# divide values on points\n", + "# aggregate by summing\n", "\n", "impact = u_lp.calc_geom_impact(\n", - " exp=EXP_MIX, impf_set=IMPF_SET, haz=HAZ,\n", - " res=0.2, to_meters=False, disagg_met=u_lp.DisaggMethod.DIV, disagg_val=None,\n", - " agg_met=u_lp.AggMethod.SUM\n", - " )" + " exp=EXP_MIX,\n", + " impf_set=IMPF_SET,\n", + " haz=HAZ,\n", + " res=0.2,\n", + " to_meters=False,\n", + " disagg_met=u_lp.DisaggMethod.DIV,\n", + " disagg_val=None,\n", + " agg_met=u_lp.AggMethod.SUM,\n", + ")" ] }, { @@ -170,15 +177,20 @@ } ], "source": [ - "#disaggregate in meters\n", - "#same value for each point, fixed to 1 (allows to get percentages of affected surface/distance)\n", - "#aggregate by summing\n", + "# disaggregate in meters\n", + "# same value for each point, fixed to 1 (allows to get percentages of affected surface/distance)\n", + "# aggregate by summing\n", "\n", "impact = u_lp.calc_geom_impact(\n", - " exp=EXP_MIX, impf_set=IMPF_SET, haz=HAZ,\n", - " res=1000, to_meters=True, disagg_met=u_lp.DisaggMethod.FIX, disagg_val=1.0,\n", - " agg_met=u_lp.AggMethod.SUM\n", - " );" + " exp=EXP_MIX,\n", + " impf_set=IMPF_SET,\n", + " haz=HAZ,\n", + " res=1000,\n", + " to_meters=True,\n", + " disagg_met=u_lp.DisaggMethod.FIX,\n", + " disagg_val=1.0,\n", + " agg_met=u_lp.AggMethod.SUM,\n", + ");" ] }, { @@ -206,7 +218,10 @@ ], "source": [ "import matplotlib.pyplot as plt\n", - "ax = u_lp.plot_eai_exp_geom(impact, legend_kwds={'label': 'percentage', 'orientation': 'horizontal'})" + "\n", + "ax = u_lp.plot_eai_exp_geom(\n", + " impact, legend_kwds={\"label\": \"percentage\", \"orientation\": \"horizontal\"}\n", + ")" ] }, { @@ -282,36 +297,60 @@ " from climada_petals.entity.exposures.black_marble import country_iso_geom\n", "\n", " # open the file containing the Netherlands admin-1 polygons\n", - " shp_file = shapereader.natural_earth(resolution='10m',\n", - " category='cultural',\n", - " name='admin_0_countries')\n", + " shp_file = shapereader.natural_earth(\n", + " resolution=\"10m\", category=\"cultural\", name=\"admin_0_countries\"\n", + " )\n", " shp_file = shapereader.Reader(shp_file)\n", "\n", " # extract the NL polygons\n", - " prov_names = {'Netherlands': ['Groningen', 'Drenthe',\n", - " 'Overijssel', 'Gelderland',\n", - " 'Limburg', 'Zeeland',\n", - " 'Noord-Brabant', 'Zuid-Holland',\n", - " 'Noord-Holland', 'Friesland',\n", - " 'Flevoland', 'Utrecht']\n", - " }\n", - " polygon_Netherlands, polygons_prov_NL = country_iso_geom(prov_names,\n", - " shp_file)\n", - " prov_geom_NL = {prov: geom for prov, geom in zip(list(prov_names.values())[0], list(polygons_prov_NL.values())[0])}\n", + " prov_names = {\n", + " \"Netherlands\": [\n", + " \"Groningen\",\n", + " \"Drenthe\",\n", + " \"Overijssel\",\n", + " \"Gelderland\",\n", + " \"Limburg\",\n", + " \"Zeeland\",\n", + " \"Noord-Brabant\",\n", + " \"Zuid-Holland\",\n", + " \"Noord-Holland\",\n", + " \"Friesland\",\n", + " \"Flevoland\",\n", + " \"Utrecht\",\n", + " ]\n", + " }\n", + " polygon_Netherlands, polygons_prov_NL = country_iso_geom(prov_names, shp_file)\n", + " prov_geom_NL = {\n", + " prov: geom\n", + " for prov, geom in zip(\n", + " list(prov_names.values())[0], list(polygons_prov_NL.values())[0]\n", + " )\n", + " }\n", "\n", " # assign a value to each admin-1 area (assumption 100'000 USD per inhabitant)\n", - " population_prov_NL = {'Drenthe':493449, 'Flevoland':422202,\n", - " 'Friesland':649988, 'Gelderland':2084478,\n", - " 'Groningen':585881, 'Limburg':1118223,\n", - " 'Noord-Brabant':2562566, 'Noord-Holland':2877909,\n", - " 'Overijssel':1162215, 'Zuid-Holland':3705625,\n", - " 'Utrecht':1353596, 'Zeeland':383689}\n", - " value_prov_NL = {n: 100000 * population_prov_NL[n] for n in population_prov_NL.keys()}\n", + " population_prov_NL = {\n", + " \"Drenthe\": 493449,\n", + " \"Flevoland\": 422202,\n", + " \"Friesland\": 649988,\n", + " \"Gelderland\": 2084478,\n", + " \"Groningen\": 585881,\n", + " \"Limburg\": 1118223,\n", + " \"Noord-Brabant\": 2562566,\n", + " \"Noord-Holland\": 2877909,\n", + " \"Overijssel\": 1162215,\n", + " \"Zuid-Holland\": 3705625,\n", + " \"Utrecht\": 1353596,\n", + " \"Zeeland\": 383689,\n", + " }\n", + " value_prov_NL = {\n", + " n: 100000 * population_prov_NL[n] for n in population_prov_NL.keys()\n", + " }\n", "\n", " # combine into GeoDataFrame and add a coordinate reference system to it:\n", - " df1 = pd.DataFrame.from_dict(population_prov_NL, orient='index', columns=['population']).join(\n", - " pd.DataFrame.from_dict(value_prov_NL, orient='index', columns=['value']))\n", - " df1['geometry'] = [prov_geom_NL[prov] for prov in df1.index]\n", + " df1 = pd.DataFrame.from_dict(\n", + " population_prov_NL, orient=\"index\", columns=[\"population\"]\n", + " ).join(pd.DataFrame.from_dict(value_prov_NL, orient=\"index\", columns=[\"value\"]))\n", + " df1[\"geometry\"] = [prov_geom_NL[prov] for prov in df1.index]\n", " gdf_polys = gpd.GeoDataFrame(df1)\n", " gdf_polys = gdf_polys.set_crs(epsg=4326)\n", " return gdf_polys" @@ -417,7 +456,7 @@ ], "source": [ "exp_nl_poly = Exposures(gdf_poly())\n", - "exp_nl_poly.gdf['impf_WS'] = 1\n", + "exp_nl_poly.gdf[\"impf_WS\"] = 1\n", "exp_nl_poly.gdf.head()" ] }, @@ -456,7 +495,7 @@ ], "source": [ "# take a look\n", - "exp_nl_poly.gdf.plot('value', legend=True, cmap='OrRd')" + "exp_nl_poly.gdf.plot(\"value\", legend=True, cmap=\"OrRd\")" ] }, { @@ -557,9 +596,13 @@ ], "source": [ "imp_deg = u_lp.calc_geom_impact(\n", - " exp=exp_nl_poly, impf_set=impf_set, haz=storms,\n", - " res=0.005, disagg_met=u_lp.DisaggMethod.DIV, disagg_val=None,\n", - " agg_met=u_lp.AggMethod.SUM\n", + " exp=exp_nl_poly,\n", + " impf_set=impf_set,\n", + " haz=storms,\n", + " res=0.005,\n", + " disagg_met=u_lp.DisaggMethod.DIV,\n", + " disagg_val=None,\n", + " agg_met=u_lp.AggMethod.SUM,\n", ")" ] }, @@ -621,9 +664,14 @@ ], "source": [ "imp_m = u_lp.calc_geom_impact(\n", - " exp=exp_nl_poly, impf_set=impf_set, haz=storms,\n", - " res=500, to_meters=True, disagg_met=u_lp.DisaggMethod.DIV, disagg_val=None,\n", - " agg_met=u_lp.AggMethod.SUM\n", + " exp=exp_nl_poly,\n", + " impf_set=impf_set,\n", + " haz=storms,\n", + " res=500,\n", + " to_meters=True,\n", + " disagg_met=u_lp.DisaggMethod.DIV,\n", + " disagg_val=None,\n", + " agg_met=u_lp.AggMethod.SUM,\n", ")" ] }, @@ -711,15 +759,14 @@ }, "outputs": [], "source": [ - "#regular grid from exposures bounds\n", + "# regular grid from exposures bounds\n", "import climada.util.coordinates as u_coord\n", + "\n", "res = 0.1\n", "(_, _, xmax, ymax) = exp_nl_poly.gdf.geometry.bounds.max()\n", "(xmin, ymin, _, _) = exp_nl_poly.gdf.geometry.bounds.min()\n", "bounds = (xmin, ymin, xmax, ymax)\n", - "height, width, trafo = u_coord.pts_to_raster_meta(\n", - " bounds, (res, res)\n", - " )\n", + "height, width, trafo = u_coord.pts_to_raster_meta(bounds, (res, res))\n", "x_grid, y_grid = u_coord.raster_to_meshgrid(trafo, width, height)" ] }, @@ -747,9 +794,13 @@ ], "source": [ "imp_g = u_lp.calc_grid_impact(\n", - " exp=exp_nl_poly, impf_set=impf_set, haz=storms,\n", - " grid=(x_grid, y_grid), disagg_met=u_lp.DisaggMethod.DIV, disagg_val=None,\n", - " agg_met=u_lp.AggMethod.SUM\n", + " exp=exp_nl_poly,\n", + " impf_set=impf_set,\n", + " haz=storms,\n", + " grid=(x_grid, y_grid),\n", + " disagg_met=u_lp.DisaggMethod.DIV,\n", + " disagg_val=None,\n", + " agg_met=u_lp.AggMethod.SUM,\n", ")" ] }, @@ -931,8 +982,11 @@ "source": [ "# Disaggregate exposure to 10'000 metre grid, each point gets average value within polygon.\n", "exp_pnt = u_lp.exp_geom_to_pnt(\n", - " exp_nl_poly, res=10000, to_meters=True,\n", - " disagg_met=u_lp.DisaggMethod.DIV, disagg_val=None\n", + " exp_nl_poly,\n", + " res=10000,\n", + " to_meters=True,\n", + " disagg_met=u_lp.DisaggMethod.DIV,\n", + " disagg_val=None,\n", ")\n", "exp_pnt.gdf.head()" ] @@ -1073,8 +1127,12 @@ "source": [ "# Disaggregate exposure to 0.1° grid, no value disaggregation specified --> replicate initial value\n", "exp_pnt2 = u_lp.exp_geom_to_pnt(\n", - " exp_nl_poly, res=0.1, to_meters=False,\n", - " disagg_met=u_lp.DisaggMethod.FIX, disagg_val=None)\n", + " exp_nl_poly,\n", + " res=0.1,\n", + " to_meters=False,\n", + " disagg_met=u_lp.DisaggMethod.FIX,\n", + " disagg_val=None,\n", + ")\n", "exp_pnt2.gdf.head()" ] }, @@ -1214,8 +1272,12 @@ "# Disaggregate exposure to 1'000 metre grid, each point gets value corresponding to\n", "# its representative area (1'000^2).\n", "exp_pnt3 = u_lp.exp_geom_to_pnt(\n", - " exp_nl_poly, res=1000, to_meters=True,\n", - " disagg_met=u_lp.DisaggMethod.FIX, disagg_val=10e6)\n", + " exp_nl_poly,\n", + " res=1000,\n", + " to_meters=True,\n", + " disagg_met=u_lp.DisaggMethod.FIX,\n", + " disagg_val=10e6,\n", + ")\n", "exp_pnt3.gdf.head()" ] }, @@ -1355,8 +1417,12 @@ "# Disaggregate exposure to 1'000 metre grid, each point gets value corresponding to 1\n", "# After dissagregation, each point has a value equal to the percentage of area of the polygon\n", "exp_pnt4 = u_lp.exp_geom_to_pnt(\n", - " exp_nl_poly, res=1000, to_meters=True,\n", - " disagg_met=u_lp.DisaggMethod.DIV, disagg_val=1)\n", + " exp_nl_poly,\n", + " res=1000,\n", + " to_meters=True,\n", + " disagg_met=u_lp.DisaggMethod.DIV,\n", + " disagg_val=1,\n", + ")\n", "exp_pnt4.gdf.tail()" ] }, @@ -1494,19 +1560,18 @@ ], "source": [ "# disaggregate on pre-defined grid\n", - "#regular grid from exposures bounds\n", + "# regular grid from exposures bounds\n", "import climada.util.coordinates as u_coord\n", + "\n", "res = 0.1\n", "(_, _, xmax, ymax) = exp_nl_poly.gdf.geometry.bounds.max()\n", "(xmin, ymin, _, _) = exp_nl_poly.gdf.geometry.bounds.min()\n", "bounds = (xmin, ymin, xmax, ymax)\n", - "height, width, trafo = u_coord.pts_to_raster_meta(\n", - " bounds, (res, res)\n", - " )\n", + "height, width, trafo = u_coord.pts_to_raster_meta(bounds, (res, res))\n", "x_grid, y_grid = u_coord.raster_to_meshgrid(trafo, width, height)\n", "exp_pnt5 = u_lp.exp_geom_to_grid(\n", - " exp_nl_poly, grid=(x_grid, y_grid),\n", - " disagg_met=u_lp.DisaggMethod.DIV, disagg_val=1)\n", + " exp_nl_poly, grid=(x_grid, y_grid), disagg_met=u_lp.DisaggMethod.DIV, disagg_val=1\n", + ")\n", "exp_pnt5.gdf.tail()" ] }, @@ -1589,7 +1654,7 @@ ], "source": [ "# Plot point-impacts and aggregated impacts\n", - "imp_pnt.plot_hexbin_eai_exposure();\n", + "imp_pnt.plot_hexbin_eai_exposure()\n", "u_lp.plot_eai_exp_geom(imp_geom);" ] }, @@ -1727,7 +1792,7 @@ "outputs": [], "source": [ "def gdf_lines():\n", - " gdf_lines = gpd.read_file(Path(DEMO_DIR,'nl_rails.gpkg'))\n", + " gdf_lines = gpd.read_file(Path(DEMO_DIR, \"nl_rails.gpkg\"))\n", " gdf_lines = gdf_lines.to_crs(epsg=4326)\n", " return gdf_lines" ] @@ -1832,8 +1897,8 @@ ], "source": [ "exp_nl_lines = Exposures(gdf_lines())\n", - "exp_nl_lines.gdf['impf_WS'] = 1\n", - "exp_nl_lines.gdf['value'] = 1\n", + "exp_nl_lines.gdf[\"impf_WS\"] = 1\n", + "exp_nl_lines.gdf[\"value\"] = 1\n", "exp_nl_lines.gdf.head()" ] }, @@ -1861,7 +1926,7 @@ } ], "source": [ - "exp_nl_lines.gdf.plot('value', cmap='inferno');" + "exp_nl_lines.gdf.plot(\"value\", cmap=\"inferno\");" ] }, { @@ -1911,9 +1976,13 @@ ], "source": [ "imp_deg = u_lp.calc_geom_impact(\n", - " exp=exp_nl_lines, impf_set=impf_set, haz=storms,\n", - " res=0.005, disagg_met=u_lp.DisaggMethod.DIV, disagg_val=None,\n", - " agg_met=u_lp.AggMethod.SUM\n", + " exp=exp_nl_lines,\n", + " impf_set=impf_set,\n", + " haz=storms,\n", + " res=0.005,\n", + " disagg_met=u_lp.DisaggMethod.DIV,\n", + " disagg_val=None,\n", + " agg_met=u_lp.AggMethod.SUM,\n", ")" ] }, @@ -1975,9 +2044,14 @@ ], "source": [ "imp_m = u_lp.calc_geom_impact(\n", - " exp=exp_nl_lines, impf_set=impf_set, haz=storms,\n", - " res=500, to_meters=True, disagg_met=u_lp.DisaggMethod.DIV, disagg_val=None,\n", - " agg_met=u_lp.AggMethod.SUM\n", + " exp=exp_nl_lines,\n", + " impf_set=impf_set,\n", + " haz=storms,\n", + " res=500,\n", + " to_meters=True,\n", + " disagg_met=u_lp.DisaggMethod.DIV,\n", + " disagg_val=None,\n", + " agg_met=u_lp.AggMethod.SUM,\n", ")" ] }, @@ -2028,8 +2102,11 @@ ], "source": [ "import numpy as np\n", + "\n", "diff = np.max((imp_deg.eai_exp - imp_m.eai_exp) / imp_deg.eai_exp)\n", - "print(f\"The largest relative different between degrees and meters impact in this example is {diff}\")" + "print(\n", + " f\"The largest relative different between degrees and meters impact in this example is {diff}\"\n", + ")" ] }, { @@ -2184,7 +2261,11 @@ "source": [ "# 0.1° distance between points, average value disaggregation\n", "exp_pnt = u_lp.exp_geom_to_pnt(\n", - " exp_nl_lines, res=0.1, to_meters=False, disagg_met=u_lp.DisaggMethod.DIV, disagg_val=None\n", + " exp_nl_lines,\n", + " res=0.1,\n", + " to_meters=False,\n", + " disagg_met=u_lp.DisaggMethod.DIV,\n", + " disagg_val=None,\n", ")\n", "exp_pnt.gdf.head()" ] @@ -2317,7 +2398,11 @@ "source": [ "# 1000m distance between points, no value disaggregation\n", "exp_pnt2 = u_lp.exp_geom_to_pnt(\n", - " exp_nl_lines, res=1000, to_meters=True, disagg_met=u_lp.DisaggMethod.FIX, disagg_val=None\n", + " exp_nl_lines,\n", + " res=1000,\n", + " to_meters=True,\n", + " disagg_met=u_lp.DisaggMethod.FIX,\n", + " disagg_val=None,\n", ")\n", "exp_pnt2.gdf.head()" ] @@ -2450,7 +2535,11 @@ "source": [ "# 1000m distance between points, equal value disaggregation\n", "exp_pnt3 = u_lp.exp_geom_to_pnt(\n", - " exp_nl_lines, res=1000, to_meters=True, disagg_met=u_lp.DisaggMethod.DIV, disagg_val=None\n", + " exp_nl_lines,\n", + " res=1000,\n", + " to_meters=True,\n", + " disagg_met=u_lp.DisaggMethod.DIV,\n", + " disagg_val=None,\n", ")\n", "exp_pnt3.gdf.head()" ] @@ -2583,7 +2672,11 @@ "source": [ "# 1000m distance between points, disaggregation of value according to representative distance\n", "exp_pnt4 = u_lp.exp_geom_to_pnt(\n", - " exp_nl_lines, res=1000, to_meters=True, disagg_met=u_lp.DisaggMethod.FIX, disagg_val=1000\n", + " exp_nl_lines,\n", + " res=1000,\n", + " to_meters=True,\n", + " disagg_met=u_lp.DisaggMethod.FIX,\n", + " disagg_val=1000,\n", ")\n", "exp_pnt4.gdf.head()" ] diff --git a/doc/tutorial/climada_entity_ImpactFuncSet.ipynb b/doc/tutorial/climada_entity_ImpactFuncSet.ipynb index 2702aa60f..6df482925 100644 --- a/doc/tutorial/climada_entity_ImpactFuncSet.ipynb +++ b/doc/tutorial/climada_entity_ImpactFuncSet.ipynb @@ -113,7 +113,7 @@ ")\n", "\n", "# check if the all the attributes are set correctly\n", - "imp_fun.check()\n" + "imp_fun.check()" ] }, { @@ -131,7 +131,7 @@ ], "source": [ "# Calculate the mdr at hazard intensity 18.7 m/s\n", - "print('Mean damage ratio at intensity 18.7 m/s: ', imp_fun.calc_mdr(18.7))" + "print(\"Mean damage ratio at intensity 18.7 m/s: \", imp_fun.calc_mdr(18.7))" ] }, { @@ -282,7 +282,7 @@ "imp_fun_3.check()\n", "\n", "# add the 2 impact functions into ImpactFuncSet\n", - "imp_fun_set = ImpactFuncSet([imp_fun_1, imp_fun_3])\n" + "imp_fun_set = ImpactFuncSet([imp_fun_1, imp_fun_3])" ] }, { @@ -345,7 +345,7 @@ ], "source": [ "# extract the TC impact function with id 1\n", - "impf_tc_1 = imp_fun_set.get_func('TC', 1)\n", + "impf_tc_1 = imp_fun_set.get_func(\"TC\", 1)\n", "# plot the impact function\n", "impf_tc_1.plot();" ] @@ -404,7 +404,7 @@ ], "source": [ "# removing the TC impact function with id 3\n", - "imp_fun_set.remove_func('TC', 3)\n", + "imp_fun_set.remove_func(\"TC\", 3)\n", "# plot all the remaining impact functions in imp_fun_set\n", "imp_fun_set.plot();" ] @@ -464,7 +464,7 @@ "# plot all the impact functions from the ImpactFuncSet\n", "imp_set_xlsx.plot()\n", "# adjust the plots\n", - "plt.subplots_adjust(right=1., top=4., hspace=0.4, wspace=0.4)" + "plt.subplots_adjust(right=1.0, top=4.0, hspace=0.4, wspace=0.4)" ] }, { @@ -483,7 +483,7 @@ "outputs": [], "source": [ "# write imp_set_xlsx into an excel file\n", - "imp_set_xlsx.write_excel('tutorial_impf_set.xlsx')" + "imp_set_xlsx.write_excel(\"tutorial_impf_set.xlsx\")" ] }, { @@ -512,7 +512,7 @@ "from climada.util.save import save\n", "\n", "# this generates a results folder in the current path and stores the output there\n", - "save('tutorial_impf_set.p', imp_set_xlsx)" + "save(\"tutorial_impf_set.p\", imp_set_xlsx)" ] }, { @@ -563,7 +563,7 @@ "# plot all the impact functions\n", "imp_fun_set_TC.plot()\n", "# adjust the plots\n", - "plt.subplots_adjust(right=1., top=4., hspace=0.4, wspace=0.4)" + "plt.subplots_adjust(right=1.0, top=4.0, hspace=0.4, wspace=0.4)" ] } ], diff --git a/doc/tutorial/climada_entity_LitPop.ipynb b/doc/tutorial/climada_entity_LitPop.ipynb index 8625fe394..56c2d065a 100644 --- a/doc/tutorial/climada_entity_LitPop.ipynb +++ b/doc/tutorial/climada_entity_LitPop.ipynb @@ -155,15 +155,19 @@ "source": [ "# Initiate a default LitPop exposure entity for Switzerland and Liechtenstein (ISO3-Codes 'CHE' and 'LIE'):\n", "try:\n", - " exp = LitPop.from_countries(['CHE', 'Liechtenstein']) # you can provide either single countries or a list of countries\n", + " exp = LitPop.from_countries(\n", + " [\"CHE\", \"Liechtenstein\"]\n", + " ) # you can provide either single countries or a list of countries\n", "except FileExistsError as err:\n", - " print(\"Reason for error: The GPW population data has not been downloaded, c.f. section 'Input data' above.\")\n", + " print(\n", + " \"Reason for error: The GPW population data has not been downloaded, c.f. section 'Input data' above.\"\n", + " )\n", " raise err\n", - "exp.plot_scatter();\n", + "exp.plot_scatter()\n", "\n", "# Note that `exp.gdf['region_id']` is a number identifying each country:\n", - "print('\\n Region IDs (`region_id`) in this exposure:')\n", - "print(exp.gdf['region_id'].unique())" + "print(\"\\n Region IDs (`region_id`) in this exposure:\")\n", + "print(exp.gdf[\"region_id\"].unique())" ] }, { @@ -240,9 +244,12 @@ ], "source": [ "# Initiate a LitPop exposure entity for Costa Rica with varied resolution, fin_mode, and exponents:\n", - "exp = LitPop.from_countries('Costa Rica', fin_mode='income_group', res_arcsec=120, exponents=(1,1)) # change the parameters and see what happens...\n", + "exp = LitPop.from_countries(\n", + " \"Costa Rica\", fin_mode=\"income_group\", res_arcsec=120, exponents=(1, 1)\n", + ") # change the parameters and see what happens...\n", "# exp = LitPop.from_countries('Costa Rica', fin_mode='gdp', res_arcsec=90, exponents=(3,0)) # example of variation\n", - "exp.plot_raster(); # note the log scale of the colorbar\n", + "exp.plot_raster()\n", + "# note the log scale of the colorbar\n", "exp.plot_scatter();" ] }, @@ -312,12 +319,16 @@ "source": [ "# You may want to check if you have downloaded dataset Gridded Population of the World (GPW), v4: Population Count, v4.11\n", "# (2000 and 2020) first\n", - "pop_2000 = LitPop.from_countries('CHE', fin_mode='pop', res_arcsec=300, exponents=(0,1), reference_year=2000)\n", + "pop_2000 = LitPop.from_countries(\n", + " \"CHE\", fin_mode=\"pop\", res_arcsec=300, exponents=(0, 1), reference_year=2000\n", + ")\n", "# Alternatively, we ca use `from_population`:\n", - "pop_2021 = LitPop.from_population(countries='Switzerland', res_arcsec=300, reference_year=2021)\n", + "pop_2021 = LitPop.from_population(\n", + " countries=\"Switzerland\", res_arcsec=300, reference_year=2021\n", + ")\n", "# Since no population data for 2021 is available, the closest data point, 2020, is used (see LOGGER.warning)\n", - "pop_2000.plot_scatter();\n", - "pop_2021.plot_scatter();\n", + "pop_2000.plot_scatter()\n", + "pop_2021.plot_scatter()\n", "\"\"\"Note the difference in total values on the color bar.\"\"\"" ] }, @@ -398,16 +409,18 @@ } ], "source": [ - "res = 30 # If you don't get an output after a very long time with country = \"MEX\", try with res = 100\n", - "country = 'JAM' # Try different countries, i.e. 'JAM', 'CHE', 'RWA', 'MEX'\n", - "markersize = 4 # for plotting\n", - "buffer_deg=.04\n", - "\n", - "exp_nightlights = LitPop.from_nightlight_intensity(countries=country, res_arcsec=res) # nightlight intensity\n", - "exp_nightlights.plot_hexbin(linewidth=markersize, buffer=buffer_deg);\n", + "res = 30 # If you don't get an output after a very long time with country = \"MEX\", try with res = 100\n", + "country = \"JAM\" # Try different countries, i.e. 'JAM', 'CHE', 'RWA', 'MEX'\n", + "markersize = 4 # for plotting\n", + "buffer_deg = 0.04\n", + "\n", + "exp_nightlights = LitPop.from_nightlight_intensity(\n", + " countries=country, res_arcsec=res\n", + ") # nightlight intensity\n", + "exp_nightlights.plot_hexbin(linewidth=markersize, buffer=buffer_deg)\n", "# Compare to the population map:\n", "exp_population = LitPop().from_population(countries=country, res_arcsec=res)\n", - "exp_population.plot_hexbin(linewidth=markersize, buffer=buffer_deg);\n", + "exp_population.plot_hexbin(linewidth=markersize, buffer=buffer_deg)\n", "# Compare to default LitPop exposures:\n", "exp = LitPop.from_countries(countries=country, res_arcsec=res)\n", "exp.plot_hexbin(linewidth=markersize, buffer=buffer_deg);" @@ -495,29 +508,31 @@ "import climada.util.coordinates as u_coord\n", "import climada.entity.exposures.litpop as lp\n", "\n", - "country_iso3a = 'USA'\n", - "state_name = 'Florida'\n", + "country_iso3a = \"USA\"\n", + "state_name = \"Florida\"\n", "reslution_arcsec = 600\n", "\"\"\"First, we need to get the shape of Florida:\"\"\"\n", "admin1_info, admin1_shapes = u_coord.get_admin1_info(country_iso3a)\n", "admin1_info = admin1_info[country_iso3a]\n", "admin1_shapes = admin1_shapes[country_iso3a]\n", - "admin1_names = [record['name'] for record in admin1_info]\n", + "admin1_names = [record[\"name\"] for record in admin1_info]\n", "print(admin1_names)\n", "for idx, name in enumerate(admin1_names):\n", - " if admin1_names[idx]==state_name:\n", + " if admin1_names[idx] == state_name:\n", " break\n", - "print('Florida index: ' + str(idx))\n", + "print(\"Florida index: \" + str(idx))\n", "\n", "\"\"\"Secondly, we estimate the `total_value`\"\"\"\n", "# `total_value` required user input for `from_shape`, here we assume 5% of total value of the whole USA:\n", - "total_value = 0.05 * lp._get_total_value_per_country(country_iso3a, 'pc', 2020)\n", + "total_value = 0.05 * lp._get_total_value_per_country(country_iso3a, \"pc\", 2020)\n", "\n", "\"\"\"Then, we can initiate the exposures for Florida:\"\"\"\n", "start = time.process_time()\n", - "exp = LitPop.from_shape(admin1_shapes[idx], total_value, res_arcsec=600, reference_year=2020)\n", - "print(f'\\n Runtime `from_shape` : {time.process_time() - start:1.2f} sec.\\n')\n", - "exp.plot_scatter(vmin=100, buffer=.5);\n" + "exp = LitPop.from_shape(\n", + " admin1_shapes[idx], total_value, res_arcsec=600, reference_year=2020\n", + ")\n", + "print(f\"\\n Runtime `from_shape` : {time.process_time() - start:1.2f} sec.\\n\")\n", + "exp.plot_scatter(vmin=100, buffer=0.5);" ] }, { @@ -561,9 +576,13 @@ "# `from_shape_and_countries` does not require `total_value`, but is slower to compute than `from_shape`,\n", "# because first, the exposure for the whole USA is initiated:\n", "start = time.process_time()\n", - "exp = LitPop.from_shape_and_countries(admin1_shapes[idx], country_iso3a, res_arcsec=600, reference_year=2020)\n", - "print(f'\\n Runtime `from_shape_and_countries` : {time.process_time() - start:1.2f} sec.\\n')\n", - "exp.plot_scatter(vmin=100, buffer=.5);\n", + "exp = LitPop.from_shape_and_countries(\n", + " admin1_shapes[idx], country_iso3a, res_arcsec=600, reference_year=2020\n", + ")\n", + "print(\n", + " f\"\\n Runtime `from_shape_and_countries` : {time.process_time() - start:1.2f} sec.\\n\"\n", + ")\n", + "exp.plot_scatter(vmin=100, buffer=0.5)\n", "\"\"\"Note the differences in computational speed and total value between the two approaches\"\"\"" ] }, @@ -655,31 +674,36 @@ "from shapely.geometry import Polygon\n", "\n", "\"\"\"initiate LitPop exposures for a geographical box around the city of Zurich:\"\"\"\n", - "bounds = (8.41, 47.25, 8.70, 47.47) # (min_lon, max_lon, min_lat, max_lat)\n", - "total_value=1000 # required user input for `from_shape`, here we just assume USD 1000 of total value\n", - "shape = Polygon([\n", - " (bounds[0], bounds[3]),\n", - " (bounds[2], bounds[3]),\n", - " (bounds[2], bounds[1]),\n", - " (bounds[0], bounds[1])\n", - " ])\n", + "bounds = (8.41, 47.25, 8.70, 47.47) # (min_lon, max_lon, min_lat, max_lat)\n", + "total_value = 1000 # required user input for `from_shape`, here we just assume USD 1000 of total value\n", + "shape = Polygon(\n", + " [\n", + " (bounds[0], bounds[3]),\n", + " (bounds[2], bounds[3]),\n", + " (bounds[2], bounds[1]),\n", + " (bounds[0], bounds[1]),\n", + " ]\n", + ")\n", "import time\n", + "\n", "start = time.process_time()\n", "exp = LitPop.from_shape(shape, total_value)\n", - "print(f'\\n Runtime `from_shape` : {time.process_time() - start:1.2f} sec.\\n')\n", - "exp.plot_scatter();\n", + "print(f\"\\n Runtime `from_shape` : {time.process_time() - start:1.2f} sec.\\n\")\n", + "exp.plot_scatter()\n", "# `from_shape_and_countries` does not require `total_value`, but is slower to compute:\n", "start = time.process_time()\n", - "exp = LitPop.from_shape_and_countries(shape, 'Switzerland')\n", - "print(f'\\n Runtime `from_shape_and_countries` : {time.process_time() - start:1.2f} sec.\\n')\n", - "exp.plot_scatter();\n", + "exp = LitPop.from_shape_and_countries(shape, \"Switzerland\")\n", + "print(\n", + " f\"\\n Runtime `from_shape_and_countries` : {time.process_time() - start:1.2f} sec.\\n\"\n", + ")\n", + "exp.plot_scatter()\n", "\"\"\"Note the difference in total value between the two exposure sets!\"\"\"\n", "\n", "\"\"\"For comparison, initiate population exposure for a geographical box around the city of Zurich:\"\"\"\n", "start = time.process_time()\n", "exp_pop = LitPop.from_population(shape=shape)\n", - "print(f'\\n Runtime `from_population` : {time.process_time() - start:1.2f} sec.\\n')\n", - "exp_pop.plot_scatter();\n", + "print(f\"\\n Runtime `from_population` : {time.process_time() - start:1.2f} sec.\\n\")\n", + "exp_pop.plot_scatter()\n", "\n", "\"\"\"Population exposure for a custom shape can be initiated directly via `set_population` without providing `total_value`\"\"\"" ] @@ -727,14 +751,18 @@ "source": [ "# Initiate GDP-Entity for Switzerland, with and without admin1_calc:\n", "\n", - "ent_adm0 = LitPop.from_countries('CHE', res_arcsec=120, fin_mode='gdp', admin1_calc=False)\n", + "ent_adm0 = LitPop.from_countries(\n", + " \"CHE\", res_arcsec=120, fin_mode=\"gdp\", admin1_calc=False\n", + ")\n", "ent_adm0.set_geometry_points()\n", "\n", - "ent_adm1 = LitPop.from_countries('CHE', res_arcsec=120, fin_mode='gdp', admin1_calc=True)\n", + "ent_adm1 = LitPop.from_countries(\n", + " \"CHE\", res_arcsec=120, fin_mode=\"gdp\", admin1_calc=True\n", + ")\n", "\n", "ent_adm0.check()\n", "ent_adm1.check()\n", - "print('Done.')" + "print(\"Done.\")" ] }, { @@ -788,14 +816,15 @@ "source": [ "# Plotting:\n", "from matplotlib import colors\n", - "norm=colors.LogNorm(vmin=1e5, vmax=1e9) # setting range for the log-normal scale\n", + "\n", + "norm = colors.LogNorm(vmin=1e5, vmax=1e9) # setting range for the log-normal scale\n", "markersize = 5\n", - "ent_adm0.plot_hexbin(buffer=.3, norm=norm, linewidth=markersize);\n", - "ent_adm1.plot_hexbin(buffer=.3, norm=norm, linewidth=markersize);\n", + "ent_adm0.plot_hexbin(buffer=0.3, norm=norm, linewidth=markersize)\n", + "ent_adm1.plot_hexbin(buffer=0.3, norm=norm, linewidth=markersize)\n", "\n", - "print('admin-0: First figure')\n", - "print('admin-1: Second figure')\n", - "'''Do you spot the small differences in Graubünden (eastern Switzerland)?'''" + "print(\"admin-0: First figure\")\n", + "print(\"admin-1: Second figure\")\n", + "\"\"\"Do you spot the small differences in Graubünden (eastern Switzerland)?\"\"\"" ] } ], diff --git a/doc/tutorial/climada_entity_MeasureSet.ipynb b/doc/tutorial/climada_entity_MeasureSet.ipynb index e1b93a103..812198362 100644 --- a/doc/tutorial/climada_entity_MeasureSet.ipynb +++ b/doc/tutorial/climada_entity_MeasureSet.ipynb @@ -127,28 +127,28 @@ "\n", "# define measure\n", "meas = Measure(\n", - " name='Mangrove',\n", - " haz_type='TC',\n", + " name=\"Mangrove\",\n", + " haz_type=\"TC\",\n", " color_rgb=np.array([1, 1, 1]),\n", " cost=500000000,\n", " mdd_impact=(1, 0),\n", " paa_impact=(1, -0.15),\n", - " hazard_inten_imp=(1, -10), # reduces intensity by 10\n", + " hazard_inten_imp=(1, -10), # reduces intensity by 10\n", ")\n", "\n", "# impact functions\n", "impf_tc = ImpfTropCyclone.from_emanuel_usa()\n", "impf_all = ImpactFuncSet([impf_tc])\n", - "impf_all.plot();\n", + "impf_all.plot()\n", "\n", "# dummy Hazard and Exposures\n", - "haz = Hazard('TC') # this measure does not change hazard\n", - "exp = Exposures() # this measure does not change exposures\n", + "haz = Hazard(\"TC\") # this measure does not change hazard\n", + "exp = Exposures() # this measure does not change exposures\n", "\n", "# new impact functions\n", "new_exp, new_impfs, new_haz = meas.apply(exp, impf_all, haz)\n", - "axes = new_impfs.plot();\n", - "axes.set_title('TC: Modified impact function')" + "axes = new_impfs.plot()\n", + "axes.set_title(\"TC: Modified impact function\")" ] }, { @@ -228,8 +228,8 @@ "\n", "# define measure\n", "meas = Measure(\n", - " name='Mangrove',\n", - " haz_type='TC',\n", + " name=\"Mangrove\",\n", + " haz_type=\"TC\",\n", " color_rgb=np.array([1, 1, 1]),\n", " cost=500000000,\n", " hazard_freq_cutoff=0.0255,\n", @@ -250,14 +250,16 @@ "# new hazard\n", "new_exp, new_impfs, new_haz = meas.apply(exp, impf_all, haz)\n", "# if you look at the maximum intensity per centroid: new_haz does not contain the event with smaller impact (the most frequent)\n", - "haz.plot_intensity(0);\n", - "new_haz.plot_intensity(0);\n", + "haz.plot_intensity(0)\n", + "new_haz.plot_intensity(0)\n", "# you might also compute the exceedance frequency curve of both hazard\n", "imp = ImpactCalc(exp, impf_all, haz).impact()\n", - "ax = imp.calc_freq_curve().plot(label='original');\n", + "ax = imp.calc_freq_curve().plot(label=\"original\")\n", "\n", "new_imp = ImpactCalc(new_exp, new_impfs, new_haz).impact()\n", - "new_imp.calc_freq_curve().plot(axis=ax, label='measure'); # the damages for events with return periods > 1/0.0255 ~ 40 are 0" + "new_imp.calc_freq_curve().plot(\n", + " axis=ax, label=\"measure\"\n", + "); # the damages for events with return periods > 1/0.0255 ~ 40 are 0" ] }, { @@ -361,12 +363,12 @@ "\n", "# define measure\n", "meas = Measure(\n", - " name='Building code',\n", - " haz_type='TC',\n", + " name=\"Building code\",\n", + " haz_type=\"TC\",\n", " color_rgb=np.array([1, 1, 1]),\n", " cost=500000000,\n", " hazard_freq_cutoff=0.00455,\n", - " exp_region_id=[1], # apply measure to points close to exposures with region_id=1\n", + " exp_region_id=[1], # apply measure to points close to exposures with region_id=1\n", ")\n", "\n", "# impact functions\n", @@ -379,7 +381,7 @@ "\n", "# Exposures\n", "exp = Exposures.from_hdf5(EXP_DEMO_H5)\n", - "#exp['region_id'] = np.ones(exp.shape[0])\n", + "# exp['region_id'] = np.ones(exp.shape[0])\n", "exp.check()\n", "# all exposures have region_id=1\n", "exp.plot_hexbin(buffer=1.0)\n", @@ -449,8 +451,8 @@ "\n", "# define measure\n", "meas = Measure(\n", - " name='Insurance',\n", - " haz_type='TC',\n", + " name=\"Insurance\",\n", + " haz_type=\"TC\",\n", " color_rgb=np.array([1, 1, 1]),\n", " cost=500000000,\n", " risk_transf_attach=5.0e8,\n", @@ -471,12 +473,12 @@ "\n", "# impact before\n", "imp = ImpactCalc(exp, impf_all, haz).impact()\n", - "ax = imp.calc_freq_curve().plot(label='original');\n", + "ax = imp.calc_freq_curve().plot(label=\"original\")\n", "\n", "# impact after. risk_transf will be added to the cost of the measure\n", "imp_new, risk_transf = meas.calc_impact(exp, impf_all, haz)\n", - "imp_new.calc_freq_curve().plot(axis=ax, label='measure');\n", - "print('risk_transfer {:.3}'.format(risk_transf.aai_agg))" + "imp_new.calc_freq_curve().plot(axis=ax, label=\"measure\")\n", + "print(\"risk_transfer {:.3}\".format(risk_transf.aai_agg))" ] }, { @@ -515,8 +517,8 @@ "from climada.entity.measures import Measure, MeasureSet\n", "\n", "meas_1 = Measure(\n", - " haz_type='TC',\n", - " name='Mangrove',\n", + " haz_type=\"TC\",\n", + " name=\"Mangrove\",\n", " color_rgb=np.array([1, 1, 1]),\n", " cost=500000000,\n", " mdd_impact=(1, 2),\n", @@ -526,8 +528,8 @@ ")\n", "\n", "meas_2 = Measure(\n", - " haz_type='TC',\n", - " name='Sandbags',\n", + " haz_type=\"TC\",\n", + " name=\"Sandbags\",\n", " color_rgb=np.array([1, 1, 1]),\n", " cost=22000000,\n", " mdd_impact=(1, 2),\n", @@ -543,7 +545,7 @@ "meas_set.check()\n", "\n", "# select one measure\n", - "meas_sel = meas_set.get_measure(name='Sandbags')\n", + "meas_sel = meas_set.get_measure(name=\"Sandbags\")\n", "print(meas_sel[0].name, meas_sel[0].cost)" ] }, @@ -582,7 +584,7 @@ "from climada.util import ENT_TEMPLATE_XLS\n", "\n", "# Fill DataFrame from Excel file\n", - "file_name = ENT_TEMPLATE_XLS # provide absolute path of the excel file\n", + "file_name = ENT_TEMPLATE_XLS # provide absolute path of the excel file\n", "meas_set = MeasureSet.from_excel(file_name)\n", "meas_set" ] @@ -611,11 +613,11 @@ "from climada.util import ENT_TEMPLATE_XLS\n", "\n", "# Fill DataFrame from Excel file\n", - "file_name = ENT_TEMPLATE_XLS # provide absolute path of the excel file\n", + "file_name = ENT_TEMPLATE_XLS # provide absolute path of the excel file\n", "meas_set = MeasureSet.from_excel(file_name)\n", "\n", "# write file\n", - "meas_set.write_excel('results/tutorial_meas_set.xlsx')" + "meas_set.write_excel(\"results/tutorial_meas_set.xlsx\")" ] }, { @@ -638,8 +640,9 @@ "outputs": [], "source": [ "from climada.util.save import save\n", + "\n", "# this generates a results folder in the current path and stores the output there\n", - "save('tutorial_meas_set.p', meas_set)" + "save(\"tutorial_meas_set.p\", meas_set)" ] } ], diff --git a/doc/tutorial/climada_hazard_Hazard.ipynb b/doc/tutorial/climada_hazard_Hazard.ipynb index 94dd517dc..19cc98a0f 100644 --- a/doc/tutorial/climada_hazard_Hazard.ipynb +++ b/doc/tutorial/climada_hazard_Hazard.ipynb @@ -95,27 +95,33 @@ "import numpy as np\n", "from climada.hazard import Hazard\n", "from climada.util.constants import HAZ_DEMO_FL\n", + "\n", "# to hide the warnings\n", "import warnings\n", - "warnings.filterwarnings('ignore')\n", "\n", - "# read intensity from raster file HAZ_DEMO_FL and set frequency for the contained event\n", - "haz_ven = Hazard.from_raster([HAZ_DEMO_FL], attrs={'frequency':np.ones(1)/2}, haz_type='FL')\n", + "warnings.filterwarnings(\"ignore\")\n", + "\n", + "# read intensity from raster file HAZ_DEMO_FL and set frequency for the contained event\n", + "haz_ven = Hazard.from_raster(\n", + " [HAZ_DEMO_FL], attrs={\"frequency\": np.ones(1) / 2}, haz_type=\"FL\"\n", + ")\n", "haz_ven.check()\n", "\n", "# The masked values of the raster are set to 0\n", "# Sometimes the raster file does not contain all the information, as in this case the mask value -9999\n", "# We mask it manuall and plot it using plot_intensity()\n", - "haz_ven.intensity[haz_ven.intensity==-9999] = 0\n", - "haz_ven.plot_intensity(1, smooth=False) # if smooth=True (default value) is used, the computation time might increase\n", + "haz_ven.intensity[haz_ven.intensity == -9999] = 0\n", + "haz_ven.plot_intensity(\n", + " 1, smooth=False\n", + ") # if smooth=True (default value) is used, the computation time might increase\n", "\n", "# per default the following attributes have been set\n", - "print('event_id: ', haz_ven.event_id)\n", - "print('event_name: ', haz_ven.event_name)\n", - "print('date: ', haz_ven.date)\n", - "print('frequency: ', haz_ven.frequency)\n", - "print('orig: ', haz_ven.orig)\n", - "print('min, max fraction: ', haz_ven.fraction.min(), haz_ven.fraction.max())" + "print(\"event_id: \", haz_ven.event_id)\n", + "print(\"event_name: \", haz_ven.event_name)\n", + "print(\"date: \", haz_ven.date)\n", + "print(\"frequency: \", haz_ven.frequency)\n", + "print(\"orig: \", haz_ven.orig)\n", + "print(\"min, max fraction: \", haz_ven.fraction.min(), haz_ven.fraction.max())" ] }, { @@ -135,10 +141,7 @@ "metadata": {}, "outputs": [], "source": [ - "# Put your code here\n", - "\n", - "\n", - "\n" + "# Put your code here" ] }, { @@ -212,30 +215,42 @@ "# Solution:\n", "\n", "# 1. The CRS can be reprojected using dst_crs option\n", - "haz = Hazard.from_raster([HAZ_DEMO_FL], dst_crs='epsg:2201', haz_type='FL')\n", + "haz = Hazard.from_raster([HAZ_DEMO_FL], dst_crs=\"epsg:2201\", haz_type=\"FL\")\n", "haz.check()\n", - "print('\\n Solution 1:')\n", - "print('centroids CRS:', haz.centroids.crs)\n", - "print('raster info:', haz.centroids.get_meta())\n", + "print(\"\\n Solution 1:\")\n", + "print(\"centroids CRS:\", haz.centroids.crs)\n", + "print(\"raster info:\", haz.centroids.get_meta())\n", "\n", "# 2. Transformations of the coordinates can be set using the transform option and Affine\n", "from rasterio import Affine\n", - "haz = Hazard.from_raster([HAZ_DEMO_FL], haz_type='FL',\n", - " transform=Affine(0.009000000000000341, 0.0, -69.33714959699981, \\\n", - " 0.0, -0.009000000000000341, 10.42822096697894),\n", - " height=500, width=501)\n", + "\n", + "haz = Hazard.from_raster(\n", + " [HAZ_DEMO_FL],\n", + " haz_type=\"FL\",\n", + " transform=Affine(\n", + " 0.009000000000000341,\n", + " 0.0,\n", + " -69.33714959699981,\n", + " 0.0,\n", + " -0.009000000000000341,\n", + " 10.42822096697894,\n", + " ),\n", + " height=500,\n", + " width=501,\n", + ")\n", "haz.check()\n", - "print('\\n Solution 2:')\n", - "print('raster info:', haz.centroids.get_meta())\n", - "print('intensity size:', haz.intensity.shape)\n", + "print(\"\\n Solution 2:\")\n", + "print(\"raster info:\", haz.centroids.get_meta())\n", + "print(\"intensity size:\", haz.intensity.shape)\n", "\n", "# 3. A partial part of the raster can be loaded using the window or geometry\n", "from rasterio.windows import Window\n", - "haz = Hazard.from_raster([HAZ_DEMO_FL], haz_type='FL', window=Window(10, 10, 20, 30))\n", + "\n", + "haz = Hazard.from_raster([HAZ_DEMO_FL], haz_type=\"FL\", window=Window(10, 10, 20, 30))\n", "haz.check()\n", - "print('\\n Solution 3:')\n", - "print('raster info:', haz.centroids.get_meta())\n", - "print('intensity size:', haz.intensity.shape)" + "print(\"\\n Solution 3:\")\n", + "print(\"raster info:\", haz.centroids.get_meta())\n", + "print(\"intensity size:\", haz.intensity.shape)" ] }, { @@ -266,10 +281,13 @@ ], "source": [ "from climada.hazard import Hazard, Centroids\n", - "from climada.util import HAZ_DEMO_H5 # CLIMADA's Python file\n", + "from climada.util import HAZ_DEMO_H5 # CLIMADA's Python file\n", + "\n", "# Hazard needs to know the acronym of the hazard type to be constructed!!! Use 'NA' if not known.\n", - "haz_tc_fl = Hazard.from_hdf5(HAZ_DEMO_H5) # Historic tropical cyclones in Florida from 1990 to 2004\n", - "haz_tc_fl.check() # Use always the check() method to see if the hazard has been loaded correctly" + "haz_tc_fl = Hazard.from_hdf5(\n", + " HAZ_DEMO_H5\n", + ") # Historic tropical cyclones in Florida from 1990 to 2004\n", + "haz_tc_fl.check() # Use always the check() method to see if the hazard has been loaded correctly" ] }, { @@ -298,50 +316,152 @@ } ], "source": [ - "# setting points\n", + "# setting points\n", "import numpy as np\n", "from scipy import sparse\n", "\n", - "lat = np.array([26.933899, 26.957203, 26.783846, 26.645524, 26.897796, 26.925359, \\\n", - " 26.914768, 26.853491, 26.845099, 26.82651 , 26.842772, 26.825905, \\\n", - " 26.80465 , 26.788649, 26.704277, 26.71005 , 26.755412, 26.678449, \\\n", - " 26.725649, 26.720599, 26.71255 , 26.6649 , 26.664699, 26.663149, \\\n", - " 26.66875 , 26.638517, 26.59309 , 26.617449, 26.620079, 26.596795, \\\n", - " 26.577049, 26.524585, 26.524158, 26.523737, 26.520284, 26.547349, \\\n", - " 26.463399, 26.45905 , 26.45558 , 26.453699, 26.449999, 26.397299, \\\n", - " 26.4084 , 26.40875 , 26.379113, 26.3809 , 26.349068, 26.346349, \\\n", - " 26.348015, 26.347957])\n", - "\n", - "lon = np.array([-80.128799, -80.098284, -80.748947, -80.550704, -80.596929, \\\n", - " -80.220966, -80.07466 , -80.190281, -80.083904, -80.213493, \\\n", - " -80.0591 , -80.630096, -80.075301, -80.069885, -80.656841, \\\n", - " -80.190085, -80.08955 , -80.041179, -80.1324 , -80.091746, \\\n", - " -80.068579, -80.090698, -80.1254 , -80.151401, -80.058749, \\\n", - " -80.283371, -80.206901, -80.090649, -80.055001, -80.128711, \\\n", - " -80.076435, -80.080105, -80.06398 , -80.178973, -80.110519, \\\n", - " -80.057701, -80.064251, -80.07875 , -80.139247, -80.104316, \\\n", - " -80.188545, -80.21902 , -80.092391, -80.1575 , -80.102028, \\\n", - " -80.16885 , -80.116401, -80.08385 , -80.241305, -80.158855])\n", - "\n", - "n_cen = lon.size # number of centroids\n", - "n_ev = 10 # number of events\n", + "lat = np.array(\n", + " [\n", + " 26.933899,\n", + " 26.957203,\n", + " 26.783846,\n", + " 26.645524,\n", + " 26.897796,\n", + " 26.925359,\n", + " 26.914768,\n", + " 26.853491,\n", + " 26.845099,\n", + " 26.82651,\n", + " 26.842772,\n", + " 26.825905,\n", + " 26.80465,\n", + " 26.788649,\n", + " 26.704277,\n", + " 26.71005,\n", + " 26.755412,\n", + " 26.678449,\n", + " 26.725649,\n", + " 26.720599,\n", + " 26.71255,\n", + " 26.6649,\n", + " 26.664699,\n", + " 26.663149,\n", + " 26.66875,\n", + " 26.638517,\n", + " 26.59309,\n", + " 26.617449,\n", + " 26.620079,\n", + " 26.596795,\n", + " 26.577049,\n", + " 26.524585,\n", + " 26.524158,\n", + " 26.523737,\n", + " 26.520284,\n", + " 26.547349,\n", + " 26.463399,\n", + " 26.45905,\n", + " 26.45558,\n", + " 26.453699,\n", + " 26.449999,\n", + " 26.397299,\n", + " 26.4084,\n", + " 26.40875,\n", + " 26.379113,\n", + " 26.3809,\n", + " 26.349068,\n", + " 26.346349,\n", + " 26.348015,\n", + " 26.347957,\n", + " ]\n", + ")\n", + "\n", + "lon = np.array(\n", + " [\n", + " -80.128799,\n", + " -80.098284,\n", + " -80.748947,\n", + " -80.550704,\n", + " -80.596929,\n", + " -80.220966,\n", + " -80.07466,\n", + " -80.190281,\n", + " -80.083904,\n", + " -80.213493,\n", + " -80.0591,\n", + " -80.630096,\n", + " -80.075301,\n", + " -80.069885,\n", + " -80.656841,\n", + " -80.190085,\n", + " -80.08955,\n", + " -80.041179,\n", + " -80.1324,\n", + " -80.091746,\n", + " -80.068579,\n", + " -80.090698,\n", + " -80.1254,\n", + " -80.151401,\n", + " -80.058749,\n", + " -80.283371,\n", + " -80.206901,\n", + " -80.090649,\n", + " -80.055001,\n", + " -80.128711,\n", + " -80.076435,\n", + " -80.080105,\n", + " -80.06398,\n", + " -80.178973,\n", + " -80.110519,\n", + " -80.057701,\n", + " -80.064251,\n", + " -80.07875,\n", + " -80.139247,\n", + " -80.104316,\n", + " -80.188545,\n", + " -80.21902,\n", + " -80.092391,\n", + " -80.1575,\n", + " -80.102028,\n", + " -80.16885,\n", + " -80.116401,\n", + " -80.08385,\n", + " -80.241305,\n", + " -80.158855,\n", + " ]\n", + ")\n", + "\n", + "n_cen = lon.size # number of centroids\n", + "n_ev = 10 # number of events\n", "\n", "intensity = sparse.csr_matrix(np.random.random((n_ev, n_cen)))\n", "fraction = intensity.copy()\n", "fraction.data.fill(1)\n", "\n", - "haz = Hazard(haz_type='TC',\n", - " intensity=intensity,\n", - " fraction=fraction,\n", - " centroids=Centroids(lat=lat, lon=lon), # default crs used\n", - " units='m',\n", - " event_id=np.arange(n_ev, dtype=int),\n", - " event_name=['ev_12', 'ev_21', 'Maria', 'ev_35',\n", - " 'Irma', 'ev_16', 'ev_15', 'Edgar', 'ev_1', 'ev_9'],\n", - " date=np.array([721166, 734447, 734447, 734447, 721167,\n", - " 721166, 721167, 721200, 721166, 721166]),\n", - " orig=np.zeros(n_ev, bool),\n", - " frequency=np.ones(n_ev)/n_ev,)\n", + "haz = Hazard(\n", + " haz_type=\"TC\",\n", + " intensity=intensity,\n", + " fraction=fraction,\n", + " centroids=Centroids(lat=lat, lon=lon), # default crs used\n", + " units=\"m\",\n", + " event_id=np.arange(n_ev, dtype=int),\n", + " event_name=[\n", + " \"ev_12\",\n", + " \"ev_21\",\n", + " \"Maria\",\n", + " \"ev_35\",\n", + " \"Irma\",\n", + " \"ev_16\",\n", + " \"ev_15\",\n", + " \"Edgar\",\n", + " \"ev_1\",\n", + " \"ev_9\",\n", + " ],\n", + " date=np.array(\n", + " [721166, 734447, 734447, 734447, 721167, 721166, 721167, 721200, 721166, 721166]\n", + " ),\n", + " orig=np.zeros(n_ev, bool),\n", + " frequency=np.ones(n_ev) / n_ev,\n", + ")\n", "\n", "haz.check()\n", "haz.centroids.plot();" @@ -363,10 +483,17 @@ "# using from_pnt_bounds\n", "\n", "# bounds\n", - "left, bottom, right, top = -72, -3.0, -52.0, 22 # the bounds refer to the bounds of the center of the pixel\n", + "left, bottom, right, top = (\n", + " -72,\n", + " -3.0,\n", + " -52.0,\n", + " 22,\n", + ") # the bounds refer to the bounds of the center of the pixel\n", "# resolution\n", "res = 0.5\n", - "centroids = Centroids.from_pnt_bounds((left, bottom, right, top), res) # default crs used" + "centroids = Centroids.from_pnt_bounds(\n", + " (left, bottom, right, top), res\n", + ") # default crs used" ] }, { @@ -393,26 +520,24 @@ "\n", "# raster info:\n", "# border upper left corner (of the pixel, not of the center of the pixel)\n", - "max_lat = top + res/2\n", - "min_lon = left - res/2\n", + "max_lat = top + res / 2\n", + "min_lon = left - res / 2\n", "# resolution in lat and lon\n", - "d_lat = -res # negative because starting in upper corner\n", - "d_lon = res # same step as d_lat\n", + "d_lat = -res # negative because starting in upper corner\n", + "d_lon = res # same step as d_lat\n", "# number of points\n", "n_lat, n_lon = centroids.shape\n", "\n", "# meta: raster specification\n", "meta = {\n", - " 'dtype': 'float32',\n", - " 'width': n_lon,\n", - " 'height': n_lat,\n", - " 'crs': DEF_CRS,\n", - " 'transform': rasterio.Affine(\n", - " a=d_lon, b=0.0, c=min_lon,\n", - " d=0.0, e=d_lat, f=max_lat),\n", + " \"dtype\": \"float32\",\n", + " \"width\": n_lon,\n", + " \"height\": n_lat,\n", + " \"crs\": DEF_CRS,\n", + " \"transform\": rasterio.Affine(a=d_lon, b=0.0, c=min_lon, d=0.0, e=d_lat, f=max_lat),\n", "}\n", "\n", - "centroids_from_meta = Centroids.from_meta(meta) # default crs used\n", + "centroids_from_meta = Centroids.from_meta(meta) # default crs used\n", "\n", "centroids_from_meta == centroids" ] @@ -446,27 +571,40 @@ "import numpy as np\n", "from scipy import sparse\n", "\n", - "n_ev = 10 # number of events\n", + "n_ev = 10 # number of events\n", "\n", "intensity = sparse.csr_matrix(np.random.random((n_ev, centroids.size)))\n", "fraction = intensity.copy()\n", "fraction.data.fill(1)\n", "\n", - "haz = Hazard('TC',\n", - " centroids=centroids,\n", - " intensity=intensity,\n", - " fraction=fraction,\n", - " units='m',\n", - " event_id=np.arange(n_ev, dtype=int),\n", - " event_name=['ev_12', 'ev_21', 'Maria', 'ev_35',\n", - " 'Irma', 'ev_16', 'ev_15', 'Edgar', 'ev_1', 'ev_9'],\n", - " date=np.array([721166, 734447, 734447, 734447, 721167,\n", - " 721166, 721167, 721200, 721166, 721166]),\n", - " orig=np.zeros(n_ev, bool),\n", - " frequency=np.ones(n_ev)/n_ev,)\n", + "haz = Hazard(\n", + " \"TC\",\n", + " centroids=centroids,\n", + " intensity=intensity,\n", + " fraction=fraction,\n", + " units=\"m\",\n", + " event_id=np.arange(n_ev, dtype=int),\n", + " event_name=[\n", + " \"ev_12\",\n", + " \"ev_21\",\n", + " \"Maria\",\n", + " \"ev_35\",\n", + " \"Irma\",\n", + " \"ev_16\",\n", + " \"ev_15\",\n", + " \"Edgar\",\n", + " \"ev_1\",\n", + " \"ev_9\",\n", + " ],\n", + " date=np.array(\n", + " [721166, 734447, 734447, 734447, 721167, 721166, 721167, 721200, 721166, 721166]\n", + " ),\n", + " orig=np.zeros(n_ev, bool),\n", + " frequency=np.ones(n_ev) / n_ev,\n", + ")\n", "\n", "haz.check()\n", - "print('Check centroids borders:', haz.centroids.total_bounds)\n", + "print(\"Check centroids borders:\", haz.centroids.total_bounds)\n", "haz.centroids.plot();" ] }, @@ -512,8 +650,7 @@ "metadata": {}, "outputs": [], "source": [ - "# Put your code here:\n", - "\n" + "# Put your code here:" ] }, { @@ -522,7 +659,7 @@ "metadata": {}, "outputs": [], "source": [ - "#help(hist_tc.centroids) # If you want to run it, do it after you execute the next block" + "# help(hist_tc.centroids) # If you want to run it, do it after you execute the next block" ] }, { @@ -548,26 +685,26 @@ "# SOLUTION:\n", "\n", "# 1.How many synthetic events are contained?\n", - "print('Number of total events:', haz_tc_fl.size)\n", - "print('Number of synthetic events:', np.logical_not(haz_tc_fl.orig).astype(int).sum())\n", + "print(\"Number of total events:\", haz_tc_fl.size)\n", + "print(\"Number of synthetic events:\", np.logical_not(haz_tc_fl.orig).astype(int).sum())\n", "\n", "# 2. Generate a hazard with historical hurricanes ocurring between 1995 and 2001.\n", - "hist_tc = haz_tc_fl.select(date=('1995-01-01', '2001-12-31'), orig=True)\n", - "print('Number of historical events between 1995 and 2001:', hist_tc.size)\n", + "hist_tc = haz_tc_fl.select(date=(\"1995-01-01\", \"2001-12-31\"), orig=True)\n", + "print(\"Number of historical events between 1995 and 2001:\", hist_tc.size)\n", "\n", "# 3. How many historical hurricanes occured in 1999? Which was the year with most hurricanes between 1995 and 2001?\n", - "ev_per_year = hist_tc.calc_year_set() # events ids per year\n", - "print('Number of events in 1999:', ev_per_year[1999].size)\n", + "ev_per_year = hist_tc.calc_year_set() # events ids per year\n", + "print(\"Number of events in 1999:\", ev_per_year[1999].size)\n", "max_year = 1995\n", "max_ev = ev_per_year[1995].size\n", "for year, ev in ev_per_year.items():\n", " if ev.size > max_ev:\n", " max_year = year\n", - "print('Year with most hurricanes between 1995 and 2001:', max_year)\n", + "print(\"Year with most hurricanes between 1995 and 2001:\", max_year)\n", "\n", - "# 4. What is the number of centroids with distance to coast smaller than 1km?\n", + "# 4. What is the number of centroids with distance to coast smaller than 1km?\n", "num_cen_coast = np.argwhere(hist_tc.centroids.get_dist_coast() < 1000).size\n", - "print('Number of centroids close to coast: ', num_cen_coast)" + "print(\"Number of centroids close to coast: \", num_cen_coast)" ] }, { @@ -745,14 +882,16 @@ ], "source": [ "# 1. intensities of the largest event (defined as greater sum of intensities):\n", - "# all events:\n", - "haz_tc_fl.plot_intensity(event=-1) # largest historical event: 1992230N11325 hurricane ANDREW\n", + "# all events:\n", + "haz_tc_fl.plot_intensity(\n", + " event=-1\n", + ") # largest historical event: 1992230N11325 hurricane ANDREW\n", "\n", "# 2. maximum intensities at each centroid:\n", "haz_tc_fl.plot_intensity(event=0)\n", "\n", "# 3. intensities of hurricane 1998295N12284:\n", - "haz_tc_fl.plot_intensity(event='1998295N12284', cmap='BuGn') # setting color map\n", + "haz_tc_fl.plot_intensity(event=\"1998295N12284\", cmap=\"BuGn\") # setting color map\n", "\n", "# 4. tropical cyclone intensities maps for the return periods [10, 50, 75, 100]\n", "_, res = haz_tc_fl.plot_rp_intensity([10, 50, 75, 100])\n", @@ -760,6 +899,7 @@ "# 5. tropical cyclone return period maps for the threshold intensities [30, 40]\n", "return_periods, label, column_label = haz_tc_fl.local_return_period([30, 40])\n", "from climada.util.plot import plot_from_gdf\n", + "\n", "plot_from_gdf(return_periods, colorbar_name=label, title_subplots=column_label)\n", "\n", "# 6. intensities of all the events in centroid with id 50\n", @@ -791,9 +931,9 @@ "import matplotlib.pyplot as plt\n", "\n", "fig, ax1, fontsize = make_map(1) # map\n", - "ax2 = fig.add_subplot(2, 1, 2) # add regular axes\n", - "haz_tc_fl.plot_intensity(axis=ax1, event=0) # plot original resolution\n", - "ax1.plot(-80, 26, 'or', mfc='none', markersize=12)\n", + "ax2 = fig.add_subplot(2, 1, 2) # add regular axes\n", + "haz_tc_fl.plot_intensity(axis=ax1, event=0) # plot original resolution\n", + "ax1.plot(-80, 26, \"or\", mfc=\"none\", markersize=12)\n", "haz_tc_fl.plot_intensity(axis=ax2, centr=(26, -80))\n", "fig.subplots_adjust(hspace=6.5)" ] @@ -830,9 +970,9 @@ ], "source": [ "# If you see an error message, try to create a depository named results in the repository tutorial.\n", - "haz_tc_fl.write_hdf5('results/haz_tc_fl.h5')\n", + "haz_tc_fl.write_hdf5(\"results/haz_tc_fl.h5\")\n", "\n", - "haz = Hazard.from_hdf5('results/haz_tc_fl.h5')\n", + "haz = Hazard.from_hdf5(\"results/haz_tc_fl.h5\")\n", "haz.check()" ] }, @@ -857,7 +997,7 @@ } ], "source": [ - "haz_ven.write_raster('results/haz_ven.tif') # each event is a band of the tif file" + "haz_ven.write_raster(\"results/haz_ven.tif\") # each event is a band of the tif file" ] }, { @@ -882,8 +1022,9 @@ ], "source": [ "from climada.util.save import save\n", + "\n", "# this generates a results folder in the current path and stores the output there\n", - "save('tutorial_haz_tc_fl.p', haz_tc_fl)" + "save(\"tutorial_haz_tc_fl.p\", haz_tc_fl)" ] } ], diff --git a/doc/tutorial/climada_hazard_StormEurope.ipynb b/doc/tutorial/climada_hazard_StormEurope.ipynb index 3c0ba6865..7772d6057 100644 --- a/doc/tutorial/climada_hazard_StormEurope.ipynb +++ b/doc/tutorial/climada_hazard_StormEurope.ipynb @@ -21,7 +21,8 @@ "source": [ "%matplotlib inline\n", "import matplotlib.pyplot as plt\n", - "plt.rcParams['figure.figsize'] = [15, 10]" + "\n", + "plt.rcParams[\"figure.figsize\"] = [15, 10]" ] }, { @@ -107,7 +108,7 @@ } ], "source": [ - "storm_instance?" + "?storm_instance" ] }, { @@ -144,12 +145,12 @@ "outputs": [], "source": [ "storm_instance.set_ssi(\n", - " method = 'wind_gust',\n", - " intensity = storm_instance.intensity,\n", + " method=\"wind_gust\",\n", + " intensity=storm_instance.intensity,\n", " # the above is just a more explicit way of passing the default\n", - " on_land = True,\n", - " threshold = 25,\n", - " sel_cen = None\n", + " on_land=True,\n", + " threshold=25,\n", + " sel_cen=None,\n", " # None is default. sel_cen could be used to subset centroids\n", ")" ] @@ -244,16 +245,16 @@ "outputs": [], "source": [ "ssi_args = {\n", - " 'on_land': True,\n", - " 'threshold': 25,\n", + " \"on_land\": True,\n", + " \"threshold\": 25,\n", "}\n", "\n", "storm_prob_xtreme = storm_instance.generate_prob_storms(\n", - " reg_id=[56, 528], # BEL and NLD\n", + " reg_id=[56, 528], # BEL and NLD\n", " spatial_shift=2,\n", " ssi_args=ssi_args,\n", " power=1.5,\n", - " scale=0.3\n", + " scale=0.3,\n", ")" ] }, @@ -306,7 +307,7 @@ } ], "source": [ - "storm_prob_xtreme.plot_ssi(full_area=True);\n", + "storm_prob_xtreme.plot_ssi(full_area=True)\n", "storm_prob.plot_ssi(full_area=True);" ] } diff --git a/doc/tutorial/climada_hazard_TropCyclone.ipynb b/doc/tutorial/climada_hazard_TropCyclone.ipynb index 79b63981a..480d5c0b4 100644 --- a/doc/tutorial/climada_hazard_TropCyclone.ipynb +++ b/doc/tutorial/climada_hazard_TropCyclone.ipynb @@ -142,26 +142,35 @@ "%matplotlib inline\n", "from climada.hazard import TCTracks\n", "\n", - "tr_irma = TCTracks.from_ibtracs_netcdf(provider='usa', storm_id='2017242N16333') # IRMA 2017\n", - "ax = tr_irma.plot();\n", - "ax.set_title('IRMA') # set title\n", + "tr_irma = TCTracks.from_ibtracs_netcdf(\n", + " provider=\"usa\", storm_id=\"2017242N16333\"\n", + ") # IRMA 2017\n", + "ax = tr_irma.plot()\n", + "ax.set_title(\"IRMA\") # set title\n", "\n", "# other ibtracs selection options\n", "from climada.hazard import TCTracks\n", + "\n", "# years 1993 and 1994 in basin EP.\n", "# correct_pres ignores tracks with not enough data. For statistics (frequency of events), these should be considered as well\n", - "sel_ibtracs = TCTracks.from_ibtracs_netcdf(provider='usa', year_range=(1993, 1994), basin='EP', correct_pres=False)\n", - "print('Number of tracks:', sel_ibtracs.size)\n", - "ax = sel_ibtracs.plot();\n", - "ax.get_legend()._loc = 2 # correct legend location\n", - "ax.set_title('1993-1994, EP') # set title\n", + "sel_ibtracs = TCTracks.from_ibtracs_netcdf(\n", + " provider=\"usa\", year_range=(1993, 1994), basin=\"EP\", correct_pres=False\n", + ")\n", + "print(\"Number of tracks:\", sel_ibtracs.size)\n", + "ax = sel_ibtracs.plot()\n", + "ax.get_legend()._loc = 2 # correct legend location\n", + "ax.set_title(\"1993-1994, EP\") # set title\n", "\n", - "track1 = TCTracks.from_ibtracs_netcdf(provider='usa', storm_id='2007314N10093') # SIDR 2007\n", - "track2 = TCTracks.from_ibtracs_netcdf(provider='usa', storm_id='2016138N10081') # ROANU 2016\n", - "track1.append(track2.data) # put both tracks together\n", - "ax = track1.plot();\n", - "ax.get_legend()._loc = 2 # correct legend location\n", - "ax.set_title('SIDR and ROANU'); # set title" + "track1 = TCTracks.from_ibtracs_netcdf(\n", + " provider=\"usa\", storm_id=\"2007314N10093\"\n", + ") # SIDR 2007\n", + "track2 = TCTracks.from_ibtracs_netcdf(\n", + " provider=\"usa\", storm_id=\"2016138N10081\"\n", + ") # ROANU 2016\n", + "track1.append(track2.data) # put both tracks together\n", + "ax = track1.plot()\n", + "ax.get_legend()._loc = 2 # correct legend location\n", + "ax.set_title(\"SIDR and ROANU\"); # set title" ] }, { @@ -781,7 +790,7 @@ } ], "source": [ - "tr_irma.get_track('2017242N16333')" + "tr_irma.get_track(\"2017242N16333\")" ] }, { @@ -1675,7 +1684,7 @@ } ], "source": [ - "tr_irma.data[-1] # last synthetic track. notice the value of orig_event_flag and name" + "tr_irma.data[-1] # last synthetic track. notice the value of orig_event_flag and name" ] }, { @@ -1701,10 +1710,7 @@ }, "outputs": [], "source": [ - "# Put your code here\n", - "\n", - "\n", - "\n" + "# Put your code here" ] }, { @@ -1737,20 +1743,23 @@ "source": [ "# SOLUTION:\n", "import numpy as np\n", + "\n", "# select the track\n", - "tc_syn = tr_irma.get_track('2017242N16333_gen1')\n", + "tc_syn = tr_irma.get_track(\"2017242N16333_gen1\")\n", "\n", "# 1. Which is the time frequency of the data?\n", "# The values of a DataArray are numpy.arrays.\n", "# The nummpy.ediff1d computes the different between elements in an array\n", "diff_time_ns = np.ediff1d(tc_syn[\"time\"])\n", - "diff_time_h = diff_time_ns.astype(int)/1000/1000/1000/60/60\n", - "print('Mean time frequency in hours:', diff_time_h.mean())\n", - "print('Std time frequency in hours:', diff_time_h.std())\n", + "diff_time_h = diff_time_ns.astype(int) / 1000 / 1000 / 1000 / 60 / 60\n", + "print(\"Mean time frequency in hours:\", diff_time_h.mean())\n", + "print(\"Std time frequency in hours:\", diff_time_h.std())\n", "print()\n", "\n", "# 2. Compute the maximum sustained wind for each day.\n", - "print('Daily max sustained wind:', tc_syn[\"max_sustained_wind\"].groupby('time.day').max())" + "print(\n", + " \"Daily max sustained wind:\", tc_syn[\"max_sustained_wind\"].groupby(\"time.day\").max()\n", + ")" ] }, { @@ -1887,15 +1896,16 @@ "min_lat, max_lat, min_lon, max_lon = 16.99375, 21.95625, -72.48125, -61.66875\n", "cent = Centroids.from_pnt_bounds((min_lon, min_lat, max_lon, max_lat), res=0.12)\n", "cent.check()\n", - "cent.plot();\n", + "cent.plot()\n", "\n", "# construct tropical cyclones\n", "tc_irma = TropCyclone.from_tracks(tr_irma, centroids=cent)\n", "# tc_irma = TropCyclone.from_tracks(tr_irma) # try without given centroids. It might take too much space of your memory\n", "# and then the kernel will be killed: So, don't use this function without given centroids!\n", "tc_irma.check()\n", - "tc_irma.plot_intensity('2017242N16333'); # IRMA\n", - "tc_irma.plot_intensity('2017242N16333_gen2'); # IRMA's synthetic track 2" + "tc_irma.plot_intensity(\"2017242N16333\")\n", + "# IRMA\n", + "tc_irma.plot_intensity(\"2017242N16333_gen2\"); # IRMA's synthetic track 2" ] }, { @@ -1944,13 +1954,18 @@ "source": [ "# an Irma event-like in 2055 under RCP 4.5:\n", "tc_irma = TropCyclone.from_tracks(tr_irma, centroids=cent)\n", - "tc_irma_cc = tc_irma.apply_climate_scenario_knu(target_year=2055, scenario='4.5')\n", + "tc_irma_cc = tc_irma.apply_climate_scenario_knu(target_year=2055, scenario=\"4.5\")\n", "\n", "rel_freq_incr = np.round(\n", - " (np.mean(tc_irma_cc.frequency) - np.mean(tc_irma.frequency)\n", - " ) / np.mean(tc_irma.frequency)*100, 0)\n", + " (np.mean(tc_irma_cc.frequency) - np.mean(tc_irma.frequency))\n", + " / np.mean(tc_irma.frequency)\n", + " * 100,\n", + " 0,\n", + ")\n", "\n", - "print(f\"\\nA TC like Irma would undergo a frequency increase of about {rel_freq_incr} % in 2055 under RCP 45\")" + "print(\n", + " f\"\\nA TC like Irma would undergo a frequency increase of about {rel_freq_incr} % in 2055 under RCP 45\"\n", + ")" ] }, { @@ -2067,9 +2082,9 @@ "\n", "from climada.hazard import Centroids, TropCyclone, TCTracks\n", "\n", - "track_name = '2017242N16333' #'2016273N13300' #'1992230N11325'\n", + "track_name = \"2017242N16333\" #'2016273N13300' #'1992230N11325'\n", "\n", - "tr_irma = TCTracks.from_ibtracs_netcdf(provider='usa', storm_id='2017242N16333')\n", + "tr_irma = TCTracks.from_ibtracs_netcdf(provider=\"usa\", storm_id=\"2017242N16333\")\n", "\n", "lon_min, lat_min, lon_max, lat_max = -83.5, 24.4, -79.8, 29.6\n", "centr_video = Centroids.from_pnt_bounds((lon_min, lat_min, lon_max, lat_max), 0.04)\n", @@ -2077,7 +2092,9 @@ "\n", "tc_video = TropCyclone()\n", "\n", - "tc_list, tr_coord = tc_video.video_intensity(track_name, tr_irma, centr_video, file_name='results/irma_tc_fl.gif')" + "tc_list, tr_coord = tc_video.video_intensity(\n", + " track_name, tr_irma, centr_video, file_name=\"results/irma_tc_fl.gif\"\n", + ")" ] }, { @@ -2156,9 +2173,11 @@ "from matplotlib import animation\n", "from matplotlib.pyplot import rcParams\n", "\n", - "rcParams['animation.ffmpeg_path'] = shutil.which('ffmpeg')\n", + "rcParams[\"animation.ffmpeg_path\"] = shutil.which(\"ffmpeg\")\n", "writer = animation.FFMpegWriter(bitrate=500)\n", - "tc_list, tr_coord = tc_video.video_intensity(track_name, tr_irma, centr_video, file_name='results/irma_tc_fl.mp4', writer=writer)" + "tc_list, tr_coord = tc_video.video_intensity(\n", + " track_name, tr_irma, centr_video, file_name=\"results/irma_tc_fl.mp4\", writer=writer\n", + ")" ] }, { diff --git a/doc/tutorial/climada_util_api_client.ipynb b/doc/tutorial/climada_util_api_client.ipynb index 580e0b08d..215f8b6d0 100644 --- a/doc/tutorial/climada_util_api_client.ipynb +++ b/doc/tutorial/climada_util_api_client.ipynb @@ -28,6 +28,7 @@ "outputs": [], "source": [ "from climada.util.api_client import Client\n", + "\n", "client = Client()" ] }, @@ -150,10 +151,11 @@ ], "source": [ "import pandas as pd\n", + "\n", "data_types = client.list_data_type_infos()\n", "\n", "dtf = pd.DataFrame(data_types)\n", - "dtf.sort_values(['data_type_group', 'data_type'])" + "dtf.sort_values([\"data_type_group\", \"data_type\"])" ] }, { @@ -170,7 +172,7 @@ "metadata": {}, "outputs": [], "source": [ - "litpop_dataset_infos = client.list_dataset_infos(data_type='litpop')" + "litpop_dataset_infos = client.list_dataset_infos(data_type=\"litpop\")" ] }, { @@ -233,7 +235,9 @@ "source": [ "# as datasets are usually available per country, chosing a country or global dataset reduces the options\n", "# here we want to see which datasets are available for litpop globally:\n", - "client.get_property_values(litpop_dataset_infos, known_property_values = {'spatial_coverage':'global'})" + "client.get_property_values(\n", + " litpop_dataset_infos, known_property_values={\"spatial_coverage\": \"global\"}\n", + ")" ] }, { @@ -259,8 +263,10 @@ } ], "source": [ - "#and here for Switzerland:\n", - "client.get_property_values(litpop_dataset_infos, known_property_values = {'country_name':'Switzerland'})" + "# and here for Switzerland:\n", + "client.get_property_values(\n", + " litpop_dataset_infos, known_property_values={\"country_name\": \"Switzerland\"}\n", + ")" ] }, { @@ -313,8 +319,10 @@ } ], "source": [ - "tc_dataset_infos = client.list_dataset_infos(data_type='tropical_cyclone')\n", - "client.get_property_values(tc_dataset_infos, known_property_values = {'country_name':'Haiti'})" + "tc_dataset_infos = client.list_dataset_infos(data_type=\"tropical_cyclone\")\n", + "client.get_property_values(\n", + " tc_dataset_infos, known_property_values={\"country_name\": \"Haiti\"}\n", + ")" ] }, { @@ -347,7 +355,15 @@ ], "source": [ "client = Client()\n", - "tc_haiti = client.get_hazard('tropical_cyclone', properties={'country_name': 'Haiti', 'climate_scenario': 'rcp45', 'ref_year':'2040', 'nb_synth_tracks':'10'})\n", + "tc_haiti = client.get_hazard(\n", + " \"tropical_cyclone\",\n", + " properties={\n", + " \"country_name\": \"Haiti\",\n", + " \"climate_scenario\": \"rcp45\",\n", + " \"ref_year\": \"2040\",\n", + " \"nb_synth_tracks\": \"10\",\n", + " },\n", + ")\n", "tc_haiti.plot_intensity(0);" ] }, @@ -365,7 +381,9 @@ "metadata": {}, "outputs": [], "source": [ - "litpop_default = client.get_property_values(litpop_dataset_infos, known_property_values = {'fin_mode':'pc', 'exponents':'(1,1)'})" + "litpop_default = client.get_property_values(\n", + " litpop_dataset_infos, known_property_values={\"fin_mode\": \"pc\", \"exponents\": \"(1,1)\"}\n", + ")" ] }, { @@ -385,7 +403,7 @@ } ], "source": [ - "litpop = client.get_litpop(country='Haiti')" + "litpop = client.get_litpop(country=\"Haiti\")" ] }, { @@ -446,6 +464,7 @@ "outputs": [], "source": [ "from climada.engine import ImpactCalc\n", + "\n", "impact = ImpactCalc(litpop, imp_fun_set, tc_haiti).impact()" ] }, @@ -476,7 +495,7 @@ } ], "source": [ - "crop_dataset_infos = client.list_dataset_infos(data_type='crop_production')\n", + "crop_dataset_infos = client.list_dataset_infos(data_type=\"crop_production\")\n", "\n", "client.get_property_values(crop_dataset_infos)" ] @@ -487,7 +506,10 @@ "metadata": {}, "outputs": [], "source": [ - "rice_exposure = client.get_exposures(exposures_type='crop_production', properties = {'crop':'ric', 'unit': 'USD','irrigation_status': 'noirr'})" + "rice_exposure = client.get_exposures(\n", + " exposures_type=\"crop_production\",\n", + " properties={\"crop\": \"ric\", \"unit\": \"USD\", \"irrigation_status\": \"noirr\"},\n", + ")" ] }, { @@ -584,7 +606,7 @@ } ], "source": [ - "centroids_nopoles = client.get_centroids(extent=[-180,180,-60,50])\n", + "centroids_nopoles = client.get_centroids(extent=[-180, 180, -60, 50])\n", "centroids_nopoles.plot()" ] }, @@ -612,7 +634,7 @@ } ], "source": [ - "centroids_hti = client.get_centroids(country='HTI')" + "centroids_hti = client.get_centroids(country=\"HTI\")" ] }, { @@ -667,7 +689,7 @@ } ], "source": [ - "Client?" + "?Client" ] }, { @@ -741,7 +763,7 @@ } ], "source": [ - "client.get_dataset_info_by_uuid('b1c76120-4e60-4d8f-99c0-7e1e7b7860ec')" + "client.get_dataset_info_by_uuid(\"b1c76120-4e60-4d8f-99c0-7e1e7b7860ec\")" ] }, { @@ -810,7 +832,8 @@ ], "source": [ "from climada.util.api_client import DatasetInfo\n", - "DatasetInfo?" + "\n", + "?DatasetInfo" ] }, { @@ -849,7 +872,8 @@ ], "source": [ "from climada.util.api_client import FileInfo\n", - "FileInfo?" + "\n", + "?FileInfo" ] }, { @@ -890,7 +914,7 @@ } ], "source": [ - "client.into_datasets_df?" + "?client.into_datasets_df" ] }, { @@ -1059,8 +1083,12 @@ ], "source": [ "from climada.util.api_client import Client\n", + "\n", "client = Client()\n", - "litpop_datasets = client.list_dataset_infos(data_type='litpop', properties={'country_name': 'South Georgia and the South Sandwich Islands'})\n", + "litpop_datasets = client.list_dataset_infos(\n", + " data_type=\"litpop\",\n", + " properties={\"country_name\": \"South Georgia and the South Sandwich Islands\"},\n", + ")\n", "litpop_df = client.into_datasets_df(litpop_datasets)\n", "litpop_df" ] @@ -1127,7 +1155,7 @@ } ], "source": [ - "client.download_dataset?" + "?client.download_dataset" ] }, { @@ -1161,7 +1189,9 @@ ], "source": [ "# Let's have a look at an example for downloading a litpop dataset first\n", - "ds = litpop_datasets[0] # litpop_datasets is a list and download_dataset expects a single object as argument.\n", + "ds = litpop_datasets[\n", + " 0\n", + "] # litpop_datasets is a list and download_dataset expects a single object as argument.\n", "download_dir, ds_files = client.download_dataset(ds)\n", "ds_files[0], ds_files[0].is_file()" ] @@ -1214,9 +1244,14 @@ ], "source": [ "from climada.util.api_client import Client\n", + "\n", "Client().get_dataset_file(\n", - " data_type='litpop',\n", - " properties={'country_name': 'South Georgia and the South Sandwich Islands', 'fin_mode': 'pop'})" + " data_type=\"litpop\",\n", + " properties={\n", + " \"country_name\": \"South Georgia and the South Sandwich Islands\",\n", + " \"fin_mode\": \"pop\",\n", + " },\n", + ")" ] }, { diff --git a/doc/tutorial/climada_util_earth_engine.ipynb b/doc/tutorial/climada_util_earth_engine.ipynb index d6ca785ce..10811ce4d 100644 --- a/doc/tutorial/climada_util_earth_engine.ipynb +++ b/doc/tutorial/climada_util_earth_engine.ipynb @@ -53,8 +53,9 @@ "import webbrowser\n", "\n", "import ee\n", + "\n", "ee.Initialize()\n", - "image = ee.Image('srtm90_v4')\n", + "image = ee.Image(\"srtm90_v4\")\n", "print(image.getInfo())" ] }, @@ -75,10 +76,11 @@ "outputs": [], "source": [ "# Access a specific image\n", - "image = ee.Image('LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140318'); #Landsat 8 image, with Top of Atmosphere processing, on 2014/03/18 \n", + "image = ee.Image(\"LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140318\")\n", + "# Landsat 8 image, with Top of Atmosphere processing, on 2014/03/18\n", "\n", "# Access a collection\n", - "collection = 'LANDSAT/LE07/C01/T1' #Landsat 7 raw images collection" + "collection = \"LANDSAT/LE07/C01/T1\" # Landsat 7 raw images collection" ] }, { @@ -109,32 +111,38 @@ } ], "source": [ - "#Landsat_composite in Dresden area\n", - "area_dresden = list([(13.6, 50.96), (13.9, 50.96), (13.9, 51.12), (13.6, 51.12), (13.6, 50.96)])\n", - "area_dresden = ee.Geometry.Polygon(area_dresden) \n", - "time_range_dresden = ['2002-07-28', '2002-08-05']\n", - "\n", - "collection_dresden = ('LANDSAT/LE07/C01/T1')\n", + "# Landsat_composite in Dresden area\n", + "area_dresden = list(\n", + " [(13.6, 50.96), (13.9, 50.96), (13.9, 51.12), (13.6, 51.12), (13.6, 50.96)]\n", + ")\n", + "area_dresden = ee.Geometry.Polygon(area_dresden)\n", + "time_range_dresden = [\"2002-07-28\", \"2002-08-05\"]\n", + "\n", + "collection_dresden = \"LANDSAT/LE07/C01/T1\"\n", "print(type(area_dresden))\n", "\n", - "#Population density in Switzerland\n", - "list_swiss = list([(6.72, 47.88),(6.72, 46.55),(9.72, 46.55),(9.72, 47.88),(6.72, 47.88)]) \n", - "area_swiss = ee.Geometry.Polygon(list_swiss) \n", - "time_range_swiss=['2002-01-01', '2005-12-30']\n", + "# Population density in Switzerland\n", + "list_swiss = list(\n", + " [(6.72, 47.88), (6.72, 46.55), (9.72, 46.55), (9.72, 47.88), (6.72, 47.88)]\n", + ")\n", + "area_swiss = ee.Geometry.Polygon(list_swiss)\n", + "time_range_swiss = [\"2002-01-01\", \"2005-12-30\"]\n", "\n", - "collection_swiss = ee.ImageCollection('CIESIN/GPWv4/population-density')\n", + "collection_swiss = ee.ImageCollection(\"CIESIN/GPWv4/population-density\")\n", "print(type(collection_swiss))\n", "\n", - "#Sentinel 2 cloud-free image in Zürich\n", - "collection_zurich = ('COPERNICUS/S2')\n", - "list_zurich = list([(8.53, 47.355),(8.55, 47.355),(8.55, 47.376),(8.53, 47.376),(8.53, 47.355)]) \n", - "area_zurich = ee.Geometry.Polygon(list_swiss) \n", - "time_range_zurich = ['2018-05-01', '2018-07-30']\n", + "# Sentinel 2 cloud-free image in Zürich\n", + "collection_zurich = \"COPERNICUS/S2\"\n", + "list_zurich = list(\n", + " [(8.53, 47.355), (8.55, 47.355), (8.55, 47.376), (8.53, 47.376), (8.53, 47.355)]\n", + ")\n", + "area_zurich = ee.Geometry.Polygon(list_swiss)\n", + "time_range_zurich = [\"2018-05-01\", \"2018-07-30\"]\n", "\n", "\n", - "#Landcover in Europe with CORINE dataset\n", - "dataset_landcover = ee.Image('COPERNICUS/CORINE/V18_5_1/100m/2012')\n", - "landCover_layer = dataset_landcover.select('landcover')\n", + "# Landcover in Europe with CORINE dataset\n", + "dataset_landcover = ee.Image(\"COPERNICUS/CORINE/V18_5_1/100m/2012\")\n", + "landCover_layer = dataset_landcover.select(\"landcover\")\n", "print(type(landCover_layer))" ] }, @@ -144,9 +152,9 @@ "metadata": {}, "outputs": [], "source": [ - "#Methods from climada.util.earth_engine module\n", + "# Methods from climada.util.earth_engine module\n", "def obtain_image_landsat_composite(collection, time_range, area):\n", - " \"\"\" Selection of Landsat cloud-free composites in the Earth Engine library\n", + " \"\"\"Selection of Landsat cloud-free composites in the Earth Engine library\n", " See also: https://developers.google.com/earth-engine/landsat\n", "\n", " Parameters:\n", @@ -156,7 +164,7 @@ "\n", " Returns:\n", " image_composite (ee.image.Image)\n", - " \"\"\"\n", + " \"\"\"\n", " collection = ee.ImageCollection(collection)\n", "\n", " ## Filter by time range and location\n", @@ -165,8 +173,9 @@ " image_composite = ee.Algorithms.Landsat.simpleComposite(image_area, 75, 3)\n", " return image_composite\n", "\n", + "\n", "def obtain_image_median(collection, time_range, area):\n", - " \"\"\" Selection of median from a collection of images in the Earth Engine library\n", + " \"\"\"Selection of median from a collection of images in the Earth Engine library\n", " See also: https://developers.google.com/earth-engine/reducers_image_collection\n", "\n", " Parameters:\n", @@ -176,7 +185,7 @@ "\n", " Returns:\n", " image_median (ee.image.Image)\n", - " \"\"\"\n", + " \"\"\"\n", " collection = ee.ImageCollection(collection)\n", "\n", " ## Filter by time range and location\n", @@ -185,8 +194,9 @@ " image_median = image_area.median()\n", " return image_median\n", "\n", + "\n", "def obtain_image_sentinel(collection, time_range, area):\n", - " \"\"\" Selection of median, cloud-free image from a collection of images in the Sentinel 2 dataset\n", + " \"\"\"Selection of median, cloud-free image from a collection of images in the Sentinel 2 dataset\n", " See also: https://developers.google.com/earth-engine/datasets/catalog/COPERNICUS_S2\n", "\n", " Parameters:\n", @@ -196,24 +206,28 @@ "\n", " Returns:\n", " sentinel_median (ee.image.Image)\n", - " \"\"\"\n", - "#First, method to remove cloud from the image\n", + " \"\"\"\n", + "\n", + " # First, method to remove cloud from the image\n", " def maskclouds(image):\n", - " band_qa = image.select('QA60')\n", + " band_qa = image.select(\"QA60\")\n", " cloud_mask = ee.Number(2).pow(10).int()\n", " cirrus_mask = ee.Number(2).pow(11).int()\n", - " mask = band_qa.bitwiseAnd(cloud_mask).eq(0) and(\n", - " band_qa.bitwiseAnd(cirrus_mask).eq(0))\n", + " mask = band_qa.bitwiseAnd(cloud_mask).eq(0) and (\n", + " band_qa.bitwiseAnd(cirrus_mask).eq(0)\n", + " )\n", " return image.updateMask(mask).divide(10000)\n", "\n", - " sentinel_filtered = (ee.ImageCollection(collection).\n", - " filterBounds(area).\n", - " filterDate(time_range[0], time_range[1]).\n", - " filter(ee.Filter.lt('CLOUDY_PIXEL_PERCENTAGE', 20)).\n", - " map(maskclouds))\n", + " sentinel_filtered = (\n", + " ee.ImageCollection(collection)\n", + " .filterBounds(area)\n", + " .filterDate(time_range[0], time_range[1])\n", + " .filter(ee.Filter.lt(\"CLOUDY_PIXEL_PERCENTAGE\", 20))\n", + " .map(maskclouds)\n", + " )\n", "\n", " sentinel_median = sentinel_filtered.median()\n", - " return sentinel_median\n" + " return sentinel_median" ] }, { @@ -232,13 +246,15 @@ } ], "source": [ - "#Application to examples\n", - "composite_dresden = obtain_image_landsat_composite(collection_dresden, time_range_dresden, area_dresden)\n", + "# Application to examples\n", + "composite_dresden = obtain_image_landsat_composite(\n", + " collection_dresden, time_range_dresden, area_dresden\n", + ")\n", "median_swiss = obtain_image_median(collection_swiss, time_range_swiss, area_swiss)\n", "zurich_median = obtain_image_sentinel(collection_zurich, time_range_zurich, area_zurich)\n", "\n", - "#Selection of specific bands from an image\n", - "zurich_band = zurich_median.select(['B4','B3','B2']) \n", + "# Selection of specific bands from an image\n", + "zurich_band = zurich_median.select([\"B4\", \"B3\", \"B2\"])\n", "\n", "\n", "print(composite_dresden.getInfo())\n", @@ -279,7 +295,7 @@ "\n", "region_dresden = get_region(area_dresden)\n", "region_swiss = get_region(area_swiss)\n", - "region_zurich= get_region(area_zurich)" + "region_zurich = get_region(area_zurich)" ] }, { @@ -321,24 +337,19 @@ "\n", " Returns:\n", " path (str)\n", - " \"\"\"\n", - " path = image.getDownloadURL({\n", - " 'name':(name),\n", - " 'scale': scale,\n", - " 'region':(region)\n", - " })\n", + " \"\"\"\n", + " path = image.getDownloadURL({\"name\": (name), \"scale\": scale, \"region\": (region)})\n", "\n", " webbrowser.open_new_tab(path)\n", " return path\n", "\n", - " \n", - " \n", - "url_swiss = get_url('swiss_pop', median_swiss, 900, region_swiss)\n", - "url_dresden = get_url('dresden', composite_dresden, 30, region_dresden)\n", - "url_landcover = get_url('landcover_swiss', landCover_layer, 100, region_swiss)\n", "\n", - "#For the example of Zürich, due to size, it doesn't work on Jupyter Notebook but it works on Python\n", - "#url_zurich = get_url('sentinel', zurich_band, 10, region_zurich)\n", + "url_swiss = get_url(\"swiss_pop\", median_swiss, 900, region_swiss)\n", + "url_dresden = get_url(\"dresden\", composite_dresden, 30, region_dresden)\n", + "url_landcover = get_url(\"landcover_swiss\", landCover_layer, 100, region_swiss)\n", + "\n", + "# For the example of Zürich, due to size, it doesn't work on Jupyter Notebook but it works on Python\n", + "# url_zurich = get_url('sentinel', zurich_band, 10, region_zurich)\n", "\n", "print(url_swiss)\n", "print(url_dresden)\n", @@ -387,7 +398,7 @@ "from skimage.filters import try_all_threshold\n", "from skimage.filters import threshold_otsu, threshold_local\n", "from skimage import measure\n", - "from skimage import feature\n" + "from skimage import feature" ] }, { @@ -398,8 +409,8 @@ "source": [ "from climada.util import DEMO_DIR\n", "\n", - "swiss_pop = DEMO_DIR.joinpath('earth_engine', 'population-density_median.tif')\n", - "dresden = DEMO_DIR.joinpath('earth_engine', 'dresden.tif') #B4 of Dresden example\n" + "swiss_pop = DEMO_DIR.joinpath(\"earth_engine\", \"population-density_median.tif\")\n", + "dresden = DEMO_DIR.joinpath(\"earth_engine\", \"dresden.tif\") # B4 of Dresden example" ] }, { @@ -433,19 +444,19 @@ } ], "source": [ - "#Read a tif in python and Visualize the image\n", + "# Read a tif in python and Visualize the image\n", "image_dresden = imread(dresden)\n", "plt.figure(figsize=(10, 10))\n", - "plt.imshow(image_dresden, cmap='gray', interpolation='nearest')\n", + "plt.imshow(image_dresden, cmap=\"gray\", interpolation=\"nearest\")\n", "plt.axis()\n", "plt.show()\n", "\n", - "#Crop the image\n", - "image_dresden_crop=image_dresden[300:700,600:1400]\n", + "# Crop the image\n", + "image_dresden_crop = image_dresden[300:700, 600:1400]\n", "plt.figure(figsize=(10, 10))\n", - "plt.imshow(image_dresden_crop, cmap='gray', interpolation='nearest')\n", + "plt.imshow(image_dresden_crop, cmap=\"gray\", interpolation=\"nearest\")\n", "plt.axis()\n", - "plt.show()\n" + "plt.show()" ] }, { @@ -467,12 +478,12 @@ } ], "source": [ - "image_pop= imread(swiss_pop)\n", + "image_pop = imread(swiss_pop)\n", "plt.figure(figsize=(12, 12))\n", - "plt.imshow(image_pop, cmap='Reds', interpolation='nearest')\n", + "plt.imshow(image_pop, cmap=\"Reds\", interpolation=\"nearest\")\n", "plt.colorbar()\n", "plt.axis()\n", - "plt.show()\n" + "plt.show()" ] }, { @@ -501,7 +512,7 @@ } ], "source": [ - "#Thresholding: Selection of pixels with regards with their value\n", + "# Thresholding: Selection of pixels with regards with their value\n", "\n", "global_thresh = threshold_otsu(image_dresden_crop)\n", "binary_global = image_dresden_crop > global_thresh\n", @@ -515,19 +526,19 @@ "plt.gray()\n", "\n", "ax[0].imshow(image_dresden_crop)\n", - "ax[0].set_title('Original')\n", + "ax[0].set_title(\"Original\")\n", "\n", "ax[1].imshow(binary_global)\n", - "ax[1].set_title('Global thresholding')\n", + "ax[1].set_title(\"Global thresholding\")\n", "\n", "ax[2].imshow(binary_adaptive)\n", - "ax[2].set_title('Adaptive thresholding')\n", + "ax[2].set_title(\"Adaptive thresholding\")\n", "\n", "for a in ax:\n", - " a.axis('off')\n", + " a.axis(\"off\")\n", "plt.show()\n", "\n", - "print(np.sum(binary_global))\n" + "print(np.sum(binary_global))" ] } ], diff --git a/doc/tutorial/climada_util_yearsets.ipynb b/doc/tutorial/climada_util_yearsets.ipynb index 747d29fcf..9ead01019 100644 --- a/doc/tutorial/climada_util_yearsets.ipynb +++ b/doc/tutorial/climada_util_yearsets.ipynb @@ -40,11 +40,11 @@ "import climada.util.yearsets as yearsets\n", "from climada.engine import Impact\n", "\n", - "# dummy event_impacts object containing 10 event_impacts with the values 10-110 \n", + "# dummy event_impacts object containing 10 event_impacts with the values 10-110\n", "# and the frequency 0.2 (Return period of 5 years)\n", "imp = Impact()\n", - "imp.at_event = np.arange(10,110,10)\n", - "imp.frequency = np.array(np.ones(10)*0.2)\n", + "imp.at_event = np.arange(10, 110, 10)\n", + "imp.frequency = np.array(np.ones(10) * 0.2)\n", "\n", "# the number of years to sample impacts for (length(yimp.at_event) = sampled_years)\n", "sampled_years = 10\n", @@ -147,11 +147,13 @@ ], "source": [ "# compare the resulting yimp with our step-by-step computation without applying the correction factor:\n", - "yimp, sampling_vect = yearsets.impact_yearset(imp, sampled_years=list(range(1,11)), correction_fac=False)\n", + "yimp, sampling_vect = yearsets.impact_yearset(\n", + " imp, sampled_years=list(range(1, 11)), correction_fac=False\n", + ")\n", "\n", - "print('The yimp.at_event values equal our step-by-step computed imp_per_year:')\n", - "print('yimp.at_event = ', yimp.at_event)\n", - "print('imp_per_year = ', imp_per_year)" + "print(\"The yimp.at_event values equal our step-by-step computed imp_per_year:\")\n", + "print(\"yimp.at_event = \", yimp.at_event)\n", + "print(\"imp_per_year = \", imp_per_year)" ] }, { @@ -173,12 +175,14 @@ ], "source": [ "# and here the same comparison with applying the correction factor (default settings):\n", - "yimp, sampling_vect = yearsets.impact_yearset(imp, sampled_years=list(range(1,11)))\n", + "yimp, sampling_vect = yearsets.impact_yearset(imp, sampled_years=list(range(1, 11)))\n", "\n", - "print('The same can be shown for the case of applying the correction factor.' \n", - " 'The yimp.at_event values equal our step-by-step computed imp_per year:')\n", - "print('yimp.at_event = ', yimp.at_event)\n", - "print('imp_per_year = ', imp_per_year/correction_factor)" + "print(\n", + " \"The same can be shown for the case of applying the correction factor.\"\n", + " \"The yimp.at_event values equal our step-by-step computed imp_per year:\"\n", + ")\n", + "print(\"yimp.at_event = \", yimp.at_event)\n", + "print(\"imp_per_year = \", imp_per_year / correction_factor)" ] } ], diff --git a/requirements/env_climada.yml b/requirements/env_climada.yml index 52722f3d4..c3e9762c4 100644 --- a/requirements/env_climada.yml +++ b/requirements/env_climada.yml @@ -8,7 +8,7 @@ dependencies: - cfgrib>=0.9.9,<0.9.10 # 0.9.10 cannot read the icon_grib files from https://opendata.dwd.de - contextily>=1.6 - dask>=2024.5 - - eccodes>=2.27 # 2.28 changed some labels, in particular: gust -> i10fg (i20fg?) + - eccodes>=2.27 # 2.28 changed some labels, in particular: gust -> i10fg (i20fg?) - gdal>=3.6 - geopandas>=0.14,<1.0 - h5py>=3.8 diff --git a/script/applications/eca_san_salvador/README.txt b/script/applications/eca_san_salvador/README.txt index e81b3188e..7b3fa3df3 100644 --- a/script/applications/eca_san_salvador/README.txt +++ b/script/applications/eca_san_salvador/README.txt @@ -2,4 +2,4 @@ These notebooks show how to use CLIMADA in local case studies. The data shown was generated for the Economics of Climate Adaptation study developed with KfW in San Salvador, El Salvador. These represent only a partial outcome of the project. Execute first San_Salvador_Risk.ipynb and then San_Salvador_Adaptation.ipynb. -Contact Gabriela Aznar Siguan for any questions. +Contact Gabriela Aznar Siguan for any questions. diff --git a/script/applications/eca_san_salvador/San_Salvador_Adaptacion.ipynb b/script/applications/eca_san_salvador/San_Salvador_Adaptacion.ipynb index 21fb05cdb..5a50f09d5 100644 --- a/script/applications/eca_san_salvador/San_Salvador_Adaptacion.ipynb +++ b/script/applications/eca_san_salvador/San_Salvador_Adaptacion.ipynb @@ -128,18 +128,20 @@ "import contextily as ctx\n", "from climada.entity import Entity\n", "\n", - "ent_2015 = Entity.from_excel('FL_entity_Acelhuate_houses.xlsx')\n", - "ent_2015.exposures.ref_year = 2015 # fijamos el año de referencia\n", + "ent_2015 = Entity.from_excel(\"FL_entity_Acelhuate_houses.xlsx\")\n", + "ent_2015.exposures.ref_year = 2015 # fijamos el año de referencia\n", "ent_2015.check()\n", "\n", "# Exposures (bienes): los utilizados en el script San Salvador Risk\n", - "print('Total value in 2015: {:.3e}'.format(ent_2015.exposures.gdf.value.sum()))\n", - "ax = ent_2015.exposures.plot_basemap(s=1, zoom=15, url=ctx.providers.OpenStreetMap.Mapnik, vmax=60000, cmap='autumn')\n", - "ax.set_title('Exposure 2015')\n", + "print(\"Total value in 2015: {:.3e}\".format(ent_2015.exposures.gdf.value.sum()))\n", + "ax = ent_2015.exposures.plot_basemap(\n", + " s=1, zoom=15, url=ctx.providers.OpenStreetMap.Mapnik, vmax=60000, cmap=\"autumn\"\n", + ")\n", + "ax.set_title(\"Exposure 2015\")\n", "\n", "# Impact Functions (funciones de impacto): los utilizados en el script San Salvador Risk\n", - "ent_2015.impact_funcs.get_func('FL', 101).plot()\n", - "ent_2015.impact_funcs.get_func('FL', 102).plot()\n", + "ent_2015.impact_funcs.get_func(\"FL\", 101).plot()\n", + "ent_2015.impact_funcs.get_func(\"FL\", 102).plot()\n", "\n", "# Discount rate (tasas de descuento): 2% anual hasta 2040\n", "ent_2015.disc_rates.plot();\n", @@ -230,12 +232,16 @@ "# Exposures (bienes): crecimiento anual del 2%\n", "ent_2040.exposures.ref_year = 2040\n", "growth = 0.02\n", - "ent_2040.exposures.gdf['value'] = ent_2040.exposures.gdf.value.values*(1 + growth)**(ent_2040.exposures.ref_year - ent_2015.exposures.ref_year)\n", - "ent_2040.check() # check values are well set and assignes default values\n", + "ent_2040.exposures.gdf[\"value\"] = ent_2040.exposures.gdf.value.values * (\n", + " 1 + growth\n", + ") ** (ent_2040.exposures.ref_year - ent_2015.exposures.ref_year)\n", + "ent_2040.check() # check values are well set and assignes default values\n", "\n", - "print('Valor total en 2040: {:.3e}'.format(ent_2040.exposures.gdf.value.sum()))\n", - "ax = ent_2040.exposures.plot_basemap(s=1, zoom=15, url=ctx.providers.OpenStreetMap.Mapnik, vmax=60000, cmap='autumn')\n", - "ax.set_title('Exposure 2040');" + "print(\"Valor total en 2040: {:.3e}\".format(ent_2040.exposures.gdf.value.sum()))\n", + "ax = ent_2040.exposures.plot_basemap(\n", + " s=1, zoom=15, url=ctx.providers.OpenStreetMap.Mapnik, vmax=60000, cmap=\"autumn\"\n", + ")\n", + "ax.set_title(\"Exposure 2040\");" ] }, { @@ -286,11 +292,13 @@ "import matplotlib.patches as patches\n", "from climada.hazard import Hazard\n", "\n", - "HAZ_FILE = 'Salvador_hazard_FL_2015.mat'\n", + "HAZ_FILE = \"Salvador_hazard_FL_2015.mat\"\n", "\n", - "haz_2015 = Hazard.from_mat(HAZ_FILE) # cargamos el fichero\n", + "haz_2015 = Hazard.from_mat(HAZ_FILE) # cargamos el fichero\n", "ax = haz_2015.plot_intensity(0) # intensidad máxima alcanzada en cada punto\n", - "rect = patches.Rectangle((-0.0027, 13.6738), 0.0355, 0.0233, linewidth=1, edgecolor='r', facecolor='none') # add exposures focus\n", + "rect = patches.Rectangle(\n", + " (-0.0027, 13.6738), 0.0355, 0.0233, linewidth=1, edgecolor=\"r\", facecolor=\"none\"\n", + ") # add exposures focus\n", "ax.add_patch(rect);" ] }, @@ -348,11 +356,13 @@ "# inundaciones en 2040 bajo un fuerte cambio climático\n", "from climada.hazard import Hazard\n", "\n", - "HAZ_FILE = 'Salvador_hazard_FL_2040_extreme_cc.mat'\n", + "HAZ_FILE = \"Salvador_hazard_FL_2040_extreme_cc.mat\"\n", "\n", - "haz_2040 = Hazard.from_mat(HAZ_FILE) # cargamos el fichero\n", + "haz_2040 = Hazard.from_mat(HAZ_FILE) # cargamos el fichero\n", "ax = haz_2040.plot_intensity(0) # intensidad máxima alcanzada en cada punto\n", - "rect = patches.Rectangle((-0.0027, 13.6738), 0.0355, 0.0233, linewidth=1, edgecolor='r', facecolor='none') # add exposures focus\n", + "rect = patches.Rectangle(\n", + " (-0.0027, 13.6738), 0.0355, 0.0233, linewidth=1, edgecolor=\"r\", facecolor=\"none\"\n", + ") # add exposures focus\n", "ax.add_patch(rect);" ] }, @@ -410,7 +420,7 @@ "\n", "cb_acel = CostBenefit()\n", "ax = cb_acel.plot_waterfall(haz_2015, ent_2015, haz_2040, ent_2040)\n", - "ax.set_title('Expected Annual Impact');" + "ax.set_title(\"Expected Annual Impact\");" ] }, { @@ -460,8 +470,10 @@ "from climada.engine import risk_rp_100\n", "\n", "cb_acel = CostBenefit()\n", - "ax = cb_acel.plot_waterfall(haz_2015, ent_2015, haz_2040, ent_2040, risk_func=risk_rp_100)\n", - "ax.set_title('Impact Exceedance at 100 years Return Period');" + "ax = cb_acel.plot_waterfall(\n", + " haz_2015, ent_2015, haz_2040, ent_2040, risk_func=risk_rp_100\n", + ")\n", + "ax.set_title(\"Impact Exceedance at 100 years Return Period\");" ] }, { @@ -524,22 +536,25 @@ ], "source": [ "# impacto de la medida en 2015: No descargas en Lluvia\n", - "meas = ent_2015.measures.get_measure('FL', 'No descargas en Lluvia')\n", - "print('Measure cost {:.3e} USD'.format(meas.cost))\n", - "meas_exp_2015, meas_impf_2015, meas_haz_2015 = meas.apply(ent_2015.exposures, ent_2015.impact_funcs, haz_2015)\n", + "meas = ent_2015.measures.get_measure(\"FL\", \"No descargas en Lluvia\")\n", + "print(\"Measure cost {:.3e} USD\".format(meas.cost))\n", + "meas_exp_2015, meas_impf_2015, meas_haz_2015 = meas.apply(\n", + " ent_2015.exposures, ent_2015.impact_funcs, haz_2015\n", + ")\n", "\n", "# los bienes no cambian\n", "\n", "# las funciones de impacto mejoran ligeramente:\n", - "ax = meas_impf_2015.get_func('FL', 101).plot()\n", - "ax.set_title('Flooding AUP House with measure')\n", + "ax = meas_impf_2015.get_func(\"FL\", 101).plot()\n", + "ax.set_title(\"Flooding AUP House with measure\")\n", "\n", "# inundación reducida hasta 4.8 metros en los eventos más graves:\n", "import numpy as np\n", + "\n", "haz_diff = copy.deepcopy(haz_2015)\n", - "haz_diff.intensity = (haz_2015.intensity - meas_haz_2015.intensity)\n", - "ax = haz_diff.plot_intensity(0) # maximum intensity difference at each point\n", - "ax.set_title('Maximumm reduced intensity with measure');" + "haz_diff.intensity = haz_2015.intensity - meas_haz_2015.intensity\n", + "ax = haz_diff.plot_intensity(0) # maximum intensity difference at each point\n", + "ax.set_title(\"Maximumm reduced intensity with measure\");" ] }, { @@ -569,7 +584,7 @@ ], "source": [ "# nombre de cada medida considerada\n", - "for meas in ent_2040.measures.get_measure('FL'): # measures related to flood (FL)\n", + "for meas in ent_2040.measures.get_measure(\"FL\"): # measures related to flood (FL)\n", " print(meas.name)" ] }, @@ -668,8 +683,8 @@ ], "source": [ "cb_acel = CostBenefit()\n", - "cb_acel.calc(haz_2015, ent_2015, haz_2040, ent_2040, save_imp=True) # calcula\n", - "cb_acel.plot_cost_benefit(); # dibuja el cociente beneficio/costo por medida" + "cb_acel.calc(haz_2015, ent_2015, haz_2040, ent_2040, save_imp=True) # calcula\n", + "cb_acel.plot_cost_benefit(); # dibuja el cociente beneficio/costo por medida" ] }, { @@ -718,8 +733,13 @@ ], "source": [ "import matplotlib.colors as colors\n", - "cb_comb = cb_acel.combine_measures(['No descargas en Lluvia', 'Ahorradores en Agua en casas'],\n", - " 'Domestico', colors.to_rgb('lightcoral'), ent_2015.disc_rates)" + "\n", + "cb_comb = cb_acel.combine_measures(\n", + " [\"No descargas en Lluvia\", \"Ahorradores en Agua en casas\"],\n", + " \"Domestico\",\n", + " colors.to_rgb(\"lightcoral\"),\n", + " ent_2015.disc_rates,\n", + ")" ] }, { @@ -765,8 +785,14 @@ } ], "source": [ - "cb_comb.apply_risk_transfer('Domestico', attachment=1000, cover=22000000, \n", - " disc_rates=ent_2015.disc_rates, cost_fix=0, cost_factor=1.5)" + "cb_comb.apply_risk_transfer(\n", + " \"Domestico\",\n", + " attachment=1000,\n", + " cover=22000000,\n", + " disc_rates=ent_2015.disc_rates,\n", + " cost_fix=0,\n", + " cost_factor=1.5,\n", + ")" ] }, { @@ -859,7 +885,9 @@ ], "source": [ "ax = cb_acel.plot_waterfall_accumulated(haz_2015, ent_2015, ent_2040)\n", - "cb_acel.plot_arrow_averted(ax, accumulate=True, combine=True, disc_rates=ent_2015.disc_rates) # plot total averted damages" + "cb_acel.plot_arrow_averted(\n", + " ax, accumulate=True, combine=True, disc_rates=ent_2015.disc_rates\n", + ") # plot total averted damages" ] }, { @@ -893,6 +921,7 @@ "source": [ "# show difference between sublinear, linear and superlinear expected annual damage growth\n", "import functions_ss\n", + "\n", "functions_ss.non_linear_growth(cb_acel)" ] }, @@ -996,10 +1025,14 @@ ], "source": [ "# change growth\n", - "growth_fact = 0.5 # < 1: sublinear, >1: superlinear\n", + "growth_fact = 0.5 # < 1: sublinear, >1: superlinear\n", "cb_acel_sub = CostBenefit()\n", - "cb_acel_sub.calc(haz_2015, ent_2015, haz_2040, ent_2040, imp_time_depen=growth_fact, save_imp=True)\n", - "cb_acel_sub.plot_waterfall_accumulated(haz_2015, ent_2015, ent_2040, imp_time_depen=growth_fact);" + "cb_acel_sub.calc(\n", + " haz_2015, ent_2015, haz_2040, ent_2040, imp_time_depen=growth_fact, save_imp=True\n", + ")\n", + "cb_acel_sub.plot_waterfall_accumulated(\n", + " haz_2015, ent_2015, ent_2040, imp_time_depen=growth_fact\n", + ");" ] }, { diff --git a/script/applications/eca_san_salvador/San_Salvador_Adaptation.ipynb b/script/applications/eca_san_salvador/San_Salvador_Adaptation.ipynb index 0701e4759..98388d991 100644 --- a/script/applications/eca_san_salvador/San_Salvador_Adaptation.ipynb +++ b/script/applications/eca_san_salvador/San_Salvador_Adaptation.ipynb @@ -92,22 +92,25 @@ ], "source": [ "from warnings import simplefilter\n", - "simplefilter(action='ignore')\n", + "\n", + "simplefilter(action=\"ignore\")\n", "import contextily as ctx\n", "from climada.entity import Entity\n", "\n", - "ent_2015 = Entity.from_excel('FL_entity_Acelhuate_houses.xlsx')\n", - "ent_2015.exposures.ref_year = 2015 # fix reference year\n", + "ent_2015 = Entity.from_excel(\"FL_entity_Acelhuate_houses.xlsx\")\n", + "ent_2015.exposures.ref_year = 2015 # fix reference year\n", "ent_2015.check()\n", "\n", "# Exposures: the ones we used in San Salvador Risk script\n", - "print('Total value in 2015: {:.3e}'.format(ent_2015.exposures.gdf.value.sum()))\n", - "ax = ent_2015.exposures.plot_basemap(s=1, zoom=15, url=ctx.providers.OpenStreetMap.Mapnik, vmax=60000, cmap='autumn')\n", - "ax.set_title('Exposure 2015')\n", + "print(\"Total value in 2015: {:.3e}\".format(ent_2015.exposures.gdf.value.sum()))\n", + "ax = ent_2015.exposures.plot_basemap(\n", + " s=1, zoom=15, url=ctx.providers.OpenStreetMap.Mapnik, vmax=60000, cmap=\"autumn\"\n", + ")\n", + "ax.set_title(\"Exposure 2015\")\n", "\n", "# Impact Functions: the ones we used in San Salvador Risk script\n", - "ent_2015.impact_funcs.get_func('FL', 101).plot()\n", - "ent_2015.impact_funcs.get_func('FL', 102).plot()\n", + "ent_2015.impact_funcs.get_func(\"FL\", 101).plot()\n", + "ent_2015.impact_funcs.get_func(\"FL\", 102).plot()\n", "\n", "# Discount rate: 2% yearly discount year until 2040\n", "ent_2015.disc_rates.plot();\n", @@ -165,12 +168,16 @@ "# Exposures: yearl economic growth of 2% in exposures\n", "ent_2040.exposures.ref_year = 2040\n", "growth = 0.02\n", - "ent_2040.exposures.gdf['value'] = ent_2040.exposures.gdf.value.values*(1 + growth)**(ent_2040.exposures.ref_year - ent_2015.exposures.ref_year)\n", - "ent_2040.check() # check values are well set and assignes default values\n", + "ent_2040.exposures.gdf[\"value\"] = ent_2040.exposures.gdf.value.values * (\n", + " 1 + growth\n", + ") ** (ent_2040.exposures.ref_year - ent_2015.exposures.ref_year)\n", + "ent_2040.check() # check values are well set and assignes default values\n", "\n", - "print('Total value in 2040: {:.3e}'.format(ent_2040.exposures.gdf.value.sum()))\n", - "ax = ent_2040.exposures.plot_basemap(s=1, zoom=15, url=ctx.providers.OpenStreetMap.Mapnik, vmax=60000, cmap='autumn')\n", - "ax.set_title('Exposure 2040');" + "print(\"Total value in 2040: {:.3e}\".format(ent_2040.exposures.gdf.value.sum()))\n", + "ax = ent_2040.exposures.plot_basemap(\n", + " s=1, zoom=15, url=ctx.providers.OpenStreetMap.Mapnik, vmax=60000, cmap=\"autumn\"\n", + ")\n", + "ax.set_title(\"Exposure 2040\");" ] }, { @@ -212,11 +219,13 @@ "import matplotlib.patches as patches\n", "from climada.hazard import Hazard\n", "\n", - "HAZ_FILE = 'Salvador_hazard_FL_2015.mat'\n", + "HAZ_FILE = \"Salvador_hazard_FL_2015.mat\"\n", "\n", - "haz_2015 = Hazard.from_mat(HAZ_FILE) # load file\n", + "haz_2015 = Hazard.from_mat(HAZ_FILE) # load file\n", "ax = haz_2015.plot_intensity(0) # maximum intensity reached at each point\n", - "rect = patches.Rectangle((-0.0027, 13.6738), 0.0355, 0.0233, linewidth=1, edgecolor='r', facecolor='none') # add exposures focus\n", + "rect = patches.Rectangle(\n", + " (-0.0027, 13.6738), 0.0355, 0.0233, linewidth=1, edgecolor=\"r\", facecolor=\"none\"\n", + ") # add exposures focus\n", "ax.add_patch(rect);" ] }, @@ -265,11 +274,13 @@ "# flood as for 2040 with extreme climate change\n", "from climada.hazard import Hazard\n", "\n", - "HAZ_FILE = 'Salvador_hazard_FL_2040_extreme_cc.mat'\n", + "HAZ_FILE = \"Salvador_hazard_FL_2040_extreme_cc.mat\"\n", "\n", - "haz_2040 = Hazard.from_mat(HAZ_FILE) # load file\n", + "haz_2040 = Hazard.from_mat(HAZ_FILE) # load file\n", "ax = haz_2040.plot_intensity(0) # maximum intensity reached at each point\n", - "rect = patches.Rectangle((-0.0027, 13.6738), 0.0355, 0.0233, linewidth=1, edgecolor='r', facecolor='none') # add exposures focus\n", + "rect = patches.Rectangle(\n", + " (-0.0027, 13.6738), 0.0355, 0.0233, linewidth=1, edgecolor=\"r\", facecolor=\"none\"\n", + ") # add exposures focus\n", "ax.add_patch(rect);" ] }, @@ -310,7 +321,7 @@ "\n", "cb_acel = CostBenefit()\n", "ax = cb_acel.plot_waterfall(haz_2015, ent_2015, haz_2040, ent_2040)\n", - "ax.set_title('Expected Annual Impact');" + "ax.set_title(\"Expected Annual Impact\");" ] }, { @@ -345,8 +356,10 @@ "from climada.engine import risk_rp_100\n", "\n", "cb_acel = CostBenefit()\n", - "ax = cb_acel.plot_waterfall(haz_2015, ent_2015, haz_2040, ent_2040, risk_func=risk_rp_100)\n", - "ax.set_title('Impact Exceedance at 100 years Return Period');" + "ax = cb_acel.plot_waterfall(\n", + " haz_2015, ent_2015, haz_2040, ent_2040, risk_func=risk_rp_100\n", + ")\n", + "ax.set_title(\"Impact Exceedance at 100 years Return Period\");" ] }, { @@ -400,22 +413,25 @@ ], "source": [ "# Measure impact in 2015: No descargas en Lluvia\n", - "meas = ent_2015.measures.get_measure('FL', 'No descargas en Lluvia')\n", - "print('Measure cost {:.3e} USD'.format(meas.cost))\n", - "meas_exp_2015, meas_impf2015, meas_haz_2015 = meas.apply(ent_2015.exposures, ent_2015.impact_funcs, haz_2015)\n", + "meas = ent_2015.measures.get_measure(\"FL\", \"No descargas en Lluvia\")\n", + "print(\"Measure cost {:.3e} USD\".format(meas.cost))\n", + "meas_exp_2015, meas_impf2015, meas_haz_2015 = meas.apply(\n", + " ent_2015.exposures, ent_2015.impact_funcs, haz_2015\n", + ")\n", "\n", "# exposures stay the same\n", "\n", "# impact functions slightly improved:\n", - "ax = meas_impf2015.get_func('FL', 101).plot()\n", - "ax.set_title('Flooding AUP House with measure')\n", + "ax = meas_impf2015.get_func(\"FL\", 101).plot()\n", + "ax.set_title(\"Flooding AUP House with measure\")\n", "\n", "# flood reduced up to 4.8 meters in worst events:\n", "import numpy as np\n", + "\n", "haz_diff = copy.deepcopy(haz_2015)\n", - "haz_diff.intensity = (haz_2015.intensity - meas_haz_2015.intensity)\n", - "ax = haz_diff.plot_intensity(0) # maximum intensity difference at each point\n", - "ax.set_title('Maximumm reduced intensity with measure');" + "haz_diff.intensity = haz_2015.intensity - meas_haz_2015.intensity\n", + "ax = haz_diff.plot_intensity(0) # maximum intensity difference at each point\n", + "ax.set_title(\"Maximumm reduced intensity with measure\");" ] }, { @@ -445,7 +461,7 @@ ], "source": [ "# name of every considered measure\n", - "for meas in ent_2040.measures.get_measure('FL'): # measures related to flood (FL)\n", + "for meas in ent_2040.measures.get_measure(\"FL\"): # measures related to flood (FL)\n", " print(meas.name)" ] }, @@ -491,8 +507,8 @@ ], "source": [ "cb_acel = CostBenefit()\n", - "cb_acel.calc(haz_2015, ent_2015, haz_2040, ent_2040, save_imp=True) # compute\n", - "cb_acel.plot_cost_benefit(); # plot benefit/cost ratio per measure" + "cb_acel.calc(haz_2015, ent_2015, haz_2040, ent_2040, save_imp=True) # compute\n", + "cb_acel.plot_cost_benefit(); # plot benefit/cost ratio per measure" ] }, { @@ -541,8 +557,13 @@ ], "source": [ "import matplotlib.colors as colors\n", - "cb_comb = cb_acel.combine_measures(['No descargas en Lluvia', 'Ahorradores en Agua en casas'],\n", - " 'Domestico', colors.to_rgb('lightcoral'), ent_2015.disc_rates)" + "\n", + "cb_comb = cb_acel.combine_measures(\n", + " [\"No descargas en Lluvia\", \"Ahorradores en Agua en casas\"],\n", + " \"Domestico\",\n", + " colors.to_rgb(\"lightcoral\"),\n", + " ent_2015.disc_rates,\n", + ")" ] }, { @@ -588,8 +609,14 @@ } ], "source": [ - "cb_comb.apply_risk_transfer('Domestico', attachment=1000, cover=22000000, \n", - " disc_rates=ent_2015.disc_rates, cost_fix=0, cost_factor=1.5)" + "cb_comb.apply_risk_transfer(\n", + " \"Domestico\",\n", + " attachment=1000,\n", + " cover=22000000,\n", + " disc_rates=ent_2015.disc_rates,\n", + " cost_fix=0,\n", + " cost_factor=1.5,\n", + ")" ] }, { @@ -674,7 +701,9 @@ ], "source": [ "ax = cb_acel.plot_waterfall_accumulated(haz_2015, ent_2015, ent_2040)\n", - "cb_acel.plot_arrow_averted(ax, accumulate=True, combine=True, disc_rates=ent_2015.disc_rates); # plot total averted damages" + "cb_acel.plot_arrow_averted(\n", + " ax, accumulate=True, combine=True, disc_rates=ent_2015.disc_rates\n", + "); # plot total averted damages" ] }, { @@ -705,6 +734,7 @@ "source": [ "# show difference between sublinear, linear and superlinear expected annual damage growth\n", "import functions_ss\n", + "\n", "functions_ss.non_linear_growth(cb_acel)" ] }, @@ -750,10 +780,14 @@ ], "source": [ "# change growth\n", - "growth_fact = 0.5 # < 1: sublinear, >1: superlinear\n", + "growth_fact = 0.5 # < 1: sublinear, >1: superlinear\n", "cb_acel_sub = CostBenefit()\n", - "cb_acel_sub.calc(haz_2015, ent_2015, haz_2040, ent_2040, imp_time_depen=growth_fact, save_imp=True)\n", - "cb_acel_sub.plot_waterfall_accumulated(haz_2015, ent_2015, ent_2040, imp_time_depen=growth_fact);" + "cb_acel_sub.calc(\n", + " haz_2015, ent_2015, haz_2040, ent_2040, imp_time_depen=growth_fact, save_imp=True\n", + ")\n", + "cb_acel_sub.plot_waterfall_accumulated(\n", + " haz_2015, ent_2015, ent_2040, imp_time_depen=growth_fact\n", + ");" ] }, { diff --git a/script/applications/eca_san_salvador/San_Salvador_Parametric.ipynb b/script/applications/eca_san_salvador/San_Salvador_Parametric.ipynb index 3cafb8b3c..360be7511 100644 --- a/script/applications/eca_san_salvador/San_Salvador_Parametric.ipynb +++ b/script/applications/eca_san_salvador/San_Salvador_Parametric.ipynb @@ -44,13 +44,13 @@ "import contextily as ctx\n", "from climada.engine import Impact\n", "\n", - "ent_2015_param = Entity.from_excel('FL_entity_Acelhuate_parametric.xlsx')\n", - "ent_2015_param.exposures.ref_year = 2015 # fix reference year\n", + "ent_2015_param = Entity.from_excel(\"FL_entity_Acelhuate_parametric.xlsx\")\n", + "ent_2015_param.exposures.ref_year = 2015 # fix reference year\n", "ent_2015_param.check()\n", "\n", "# flood as for 2015\n", - "HAZ_FILE = 'Salvador_hazard_FL_2015.mat'\n", - "haz_2015 = Hazard.from_mat(HAZ_FILE) # load file" + "HAZ_FILE = \"Salvador_hazard_FL_2015.mat\"\n", + "haz_2015 = Hazard.from_mat(HAZ_FILE) # load file" ] }, { @@ -97,9 +97,13 @@ ], "source": [ "param_payout = Impact()\n", - "param_payout.calc(ent_2015_param.exposures, ent_2015_param.impact_funcs, haz_2015) # compute parametric payout\n", - "print('Annual expected payout: {:} USD'.format(param_payout.aai_agg)) # get average annual payout\n", - "param_payout.calc_freq_curve().plot() " + "param_payout.calc(\n", + " ent_2015_param.exposures, ent_2015_param.impact_funcs, haz_2015\n", + ") # compute parametric payout\n", + "print(\n", + " \"Annual expected payout: {:} USD\".format(param_payout.aai_agg)\n", + ") # get average annual payout\n", + "param_payout.calc_freq_curve().plot()" ] }, { @@ -163,8 +167,8 @@ } ], "source": [ - "ent_2015 = Entity.from_excel('FL_entity_Acelhuate_houses.xlsx')\n", - "ent_2015.exposures.ref_year = 2015 # fix reference year\n", + "ent_2015 = Entity.from_excel(\"FL_entity_Acelhuate_houses.xlsx\")\n", + "ent_2015.exposures.ref_year = 2015 # fix reference year\n", "ent_2015.check()\n", "\n", "ent_2040 = copy.deepcopy(ent_2015)\n", @@ -172,19 +176,25 @@ "# Exposures: yearly economic growth of 2% in exposures\n", "ent_2040.exposures.ref_year = 2040\n", "growth = 0.02\n", - "ent_2040.exposures.gdf['value'] = ent_2040.exposures.gdf.value.values*(1 + growth)**(ent_2040.exposures.ref_year - ent_2015.exposures.ref_year)\n", - "ent_2040.check() # check values are well set and assignes default values\n", + "ent_2040.exposures.gdf[\"value\"] = ent_2040.exposures.gdf.value.values * (\n", + " 1 + growth\n", + ") ** (ent_2040.exposures.ref_year - ent_2015.exposures.ref_year)\n", + "ent_2040.check() # check values are well set and assignes default values\n", "\n", "# flood as for 2040 with extreme climate change\n", - "HAZ_FILE = 'Salvador_hazard_FL_2040_extreme_cc.mat'\n", - "haz_2040 = Hazard.from_mat(HAZ_FILE) # load file\n", + "HAZ_FILE = \"Salvador_hazard_FL_2040_extreme_cc.mat\"\n", + "haz_2040 = Hazard.from_mat(HAZ_FILE) # load file\n", "\n", "# expected annual impact\n", "cb_acel = CostBenefit()\n", - "cb_acel.calc(haz_2015, ent_2015, haz_2040, ent_2040, save_imp=True) # compute\n", + "cb_acel.calc(haz_2015, ent_2015, haz_2040, ent_2040, save_imp=True) # compute\n", "\n", - "cb_comb = cb_acel.combine_measures(['No descargas en Lluvia', 'Ahorradores en Agua en casas'],\n", - " 'Domestico', colors.to_rgb('lightcoral'), ent_2015.disc_rates)" + "cb_comb = cb_acel.combine_measures(\n", + " [\"No descargas en Lluvia\", \"Ahorradores en Agua en casas\"],\n", + " \"Domestico\",\n", + " colors.to_rgb(\"lightcoral\"),\n", + " ent_2015.disc_rates,\n", + ")" ] }, { @@ -208,10 +218,12 @@ } ], "source": [ - "damage_after_measures=cb_comb.imp_meas_present['Domestico']['impact'].at_event\n", - "paramteric_payout=param_payout.at_event\n", - "residual_damage=np.sum((damage_after_measures-paramteric_payout)*haz_2015.frequency)\n", - "print('residual damage: {:.3e} USD'.format(residual_damage))" + "damage_after_measures = cb_comb.imp_meas_present[\"Domestico\"][\"impact\"].at_event\n", + "paramteric_payout = param_payout.at_event\n", + "residual_damage = np.sum(\n", + " (damage_after_measures - paramteric_payout) * haz_2015.frequency\n", + ")\n", + "print(\"residual damage: {:.3e} USD\".format(residual_damage))" ] } ], diff --git a/script/applications/eca_san_salvador/San_Salvador_Risk.ipynb b/script/applications/eca_san_salvador/San_Salvador_Risk.ipynb index 29da95b78..b73180b38 100644 --- a/script/applications/eca_san_salvador/San_Salvador_Risk.ipynb +++ b/script/applications/eca_san_salvador/San_Salvador_Risk.ipynb @@ -16,6 +16,7 @@ "%%capture\n", "# generate plots used in this script\n", "import functions_ss\n", + "\n", "fig_ma, fig_point, fig_houses, fig_if = functions_ss.generate_plots_risk()" ] }, @@ -245,7 +246,7 @@ } ], "source": [ - "acc_df = functions_ss.load_accounting() # load accounting.xlsx\n", + "acc_df = functions_ss.load_accounting() # load accounting.xlsx\n", "acc_df.head()" ] }, @@ -424,10 +425,10 @@ "import pandas as pd\n", "from climada.entity import Exposures\n", "\n", - "ENT_FILE = 'FL_entity_Acelhuate_houses.xlsx' # entity file name\n", + "ENT_FILE = \"FL_entity_Acelhuate_houses.xlsx\" # entity file name\n", "\n", "exp_acel = Exposures(pd.read_excel(ENT_FILE))\n", - "exp_acel.check() # check values are well set and assigns default values\n", + "exp_acel.check() # check values are well set and assigns default values\n", "exp_acel.gdf.head() # show first 5 rows" ] }, @@ -459,8 +460,12 @@ ], "source": [ "# some statistics on AUPs and non AUPs\n", - "print('Number of houses, mean and total value of AUP and non AUP: \\n')\n", - "print(exp_acel.gdf[['category', 'value']].groupby('category').agg(['count', 'mean', 'sum']))" + "print(\"Number of houses, mean and total value of AUP and non AUP: \\n\")\n", + "print(\n", + " exp_acel.gdf[[\"category\", \"value\"]]\n", + " .groupby(\"category\")\n", + " .agg([\"count\", \"mean\", \"sum\"])\n", + ")" ] }, { @@ -488,7 +493,7 @@ } ], "source": [ - "print(exp_acel.gdf[['category', 'impf_FL']].groupby('category').agg(['unique']))" + "print(exp_acel.gdf[[\"category\", \"impf_FL\"]].groupby(\"category\").agg([\"unique\"]))" ] }, { @@ -551,9 +556,11 @@ "impf_acel = ImpactFuncSet.from_excel(ENT_FILE)\n", "impf_acel.check()\n", "\n", - "print('MDD: mean damage ratio; PAA: percentage of afected assets; MDR = PAA*MDD: mean damage ratio:')\n", - "impf_acel.get_func('FL', 101).plot() # plot flood function 101\n", - "impf_acel.get_func('FL', 102).plot(); # plot flood function 102" + "print(\n", + " \"MDD: mean damage ratio; PAA: percentage of afected assets; MDR = PAA*MDD: mean damage ratio:\"\n", + ")\n", + "impf_acel.get_func(\"FL\", 101).plot() # plot flood function 101\n", + "impf_acel.get_func(\"FL\", 102).plot(); # plot flood function 102" ] }, { @@ -573,9 +580,9 @@ "source": [ "from climada.hazard import Hazard\n", "\n", - "HAZ_FILE = 'Salvador_hazard_FL_2015.mat'\n", + "HAZ_FILE = \"Salvador_hazard_FL_2015.mat\"\n", "\n", - "haz_acel = Hazard.from_mat(HAZ_FILE) # load file" + "haz_acel = Hazard.from_mat(HAZ_FILE) # load file" ] }, { @@ -680,7 +687,7 @@ "from climada.engine import Impact\n", "\n", "imp_acel = Impact()\n", - "imp_acel.calc(exp_acel, impf_acel, haz_acel) # compute hazard's impact over exposure" + "imp_acel.calc(exp_acel, impf_acel, haz_acel) # compute hazard's impact over exposure" ] }, { @@ -719,8 +726,10 @@ } ], "source": [ - "print('Annual expected impact: {:.3e} USD'.format(imp_acel.aai_agg)) # get average annual impact\n", - "imp_acel.calc_freq_curve().plot(); # plot exceedance frequency curve" + "print(\n", + " \"Annual expected impact: {:.3e} USD\".format(imp_acel.aai_agg)\n", + ") # get average annual impact\n", + "imp_acel.calc_freq_curve().plot(); # plot exceedance frequency curve" ] }, { @@ -748,7 +757,11 @@ "point_lat = exp_acel.gdf.latitude.values[point_idx]\n", "point_lon = exp_acel.gdf.longitude.values[point_idx]\n", "point_eai = imp_acel.eai_exp[point_idx]\n", - "print('Annual expected impact in {:.4f}° N {:.4f}° W is {:.0f} USD.'.format(-point_lat, point_lon, point_eai))" + "print(\n", + " \"Annual expected impact in {:.4f}° N {:.4f}° W is {:.0f} USD.\".format(\n", + " -point_lat, point_lon, point_eai\n", + " )\n", + ")" ] }, { @@ -796,7 +809,10 @@ ], "source": [ "import contextily as ctx\n", - "imp_acel.plot_basemap_eai_exposure(url=ctx.providers.OpenStreetMap.Mapnik, zoom=15, s=2, cmap='gnuplot');" + "\n", + "imp_acel.plot_basemap_eai_exposure(\n", + " url=ctx.providers.OpenStreetMap.Mapnik, zoom=15, s=2, cmap=\"gnuplot\"\n", + ");" ] }, { @@ -837,8 +853,15 @@ ], "source": [ "import numpy as np\n", - "ax = imp_acel.plot_basemap_eai_exposure(mask=np.argwhere((exp_acel.gdf.category==2).to_numpy()).reshape(-1), url=ctx.providers.OpenStreetMap.Mapnik, zoom=15, s=2, cmap='gnuplot')\n", - "ax.set_title('Expected Annual Impact - no AUP');" + "\n", + "ax = imp_acel.plot_basemap_eai_exposure(\n", + " mask=np.argwhere((exp_acel.gdf.category == 2).to_numpy()).reshape(-1),\n", + " url=ctx.providers.OpenStreetMap.Mapnik,\n", + " zoom=15,\n", + " s=2,\n", + " cmap=\"gnuplot\",\n", + ")\n", + "ax.set_title(\"Expected Annual Impact - no AUP\");" ] }, { @@ -878,8 +901,14 @@ } ], "source": [ - "ax = imp_acel.plot_basemap_eai_exposure(mask=np.argwhere((exp_acel.gdf.category==1).to_numpy()).reshape(-1), url=ctx.providers.OpenStreetMap.Mapnik, zoom=15, s=2, cmap='gnuplot')\n", - "ax.set_title('Expected Annual Impact - AUP');" + "ax = imp_acel.plot_basemap_eai_exposure(\n", + " mask=np.argwhere((exp_acel.gdf.category == 1).to_numpy()).reshape(-1),\n", + " url=ctx.providers.OpenStreetMap.Mapnik,\n", + " zoom=15,\n", + " s=2,\n", + " cmap=\"gnuplot\",\n", + ")\n", + "ax.set_title(\"Expected Annual Impact - AUP\");" ] }, { @@ -906,15 +935,21 @@ } ], "source": [ - "eai_aup = imp_acel.eai_exp[exp_acel.gdf[exp_acel.gdf.category==1].index].sum()\n", - "print('Annual expected impact of AUPs: {:.3e} USD.'.format(eai_aup))\n", - "eai_per_aup = eai_aup/exp_acel.gdf[exp_acel.gdf.category==1].value.sum()*100\n", - "print('Annual expected impact of AUPs over its total value: {:.2f}%.'.format(eai_per_aup))\n", + "eai_aup = imp_acel.eai_exp[exp_acel.gdf[exp_acel.gdf.category == 1].index].sum()\n", + "print(\"Annual expected impact of AUPs: {:.3e} USD.\".format(eai_aup))\n", + "eai_per_aup = eai_aup / exp_acel.gdf[exp_acel.gdf.category == 1].value.sum() * 100\n", + "print(\n", + " \"Annual expected impact of AUPs over its total value: {:.2f}%.\".format(eai_per_aup)\n", + ")\n", "\n", - "eai_no_aup = imp_acel.eai_exp[exp_acel.gdf[exp_acel.gdf.category==2].index].sum()\n", - "print('Annual expected impact of non AUPs: {:.3e} USD.'.format(eai_no_aup))\n", - "eai_per_no_aup = eai_no_aup/exp_acel.gdf[exp_acel.gdf.category==1].value.sum()*100\n", - "print('Annual expected impact of non AUPs over its total value: {:.2f}%.'.format(eai_per_no_aup))" + "eai_no_aup = imp_acel.eai_exp[exp_acel.gdf[exp_acel.gdf.category == 2].index].sum()\n", + "print(\"Annual expected impact of non AUPs: {:.3e} USD.\".format(eai_no_aup))\n", + "eai_per_no_aup = eai_no_aup / exp_acel.gdf[exp_acel.gdf.category == 1].value.sum() * 100\n", + "print(\n", + " \"Annual expected impact of non AUPs over its total value: {:.2f}%.\".format(\n", + " eai_per_no_aup\n", + " )\n", + ")" ] } ], diff --git a/script/applications/eca_san_salvador/functions_ss.py b/script/applications/eca_san_salvador/functions_ss.py index caee8a4f5..3d0478558 100755 --- a/script/applications/eca_san_salvador/functions_ss.py +++ b/script/applications/eca_san_salvador/functions_ss.py @@ -19,47 +19,59 @@ Define WaterScarcity (WS) class. WORK IN PROGRESS """ + import contextily as ctx import geopandas as gpd import matplotlib.patches as patches from matplotlib import colormaps as cm from shapely import wkt + def plot_salvador_ma(): - risk_shape = 'POLYGON ((-89.25090785340315 13.671, -89.251 13.671, -89.251 13.67108933717579, -89.251 13.67117867435158, -89.251 13.67126801152738, -89.251 13.67135734870317, -89.251 13.67144668587896, -89.251 13.67153602305475, -89.251 13.67162536023055, -89.251 13.67171469740634, -89.251 13.67180403458213, -89.251 13.67189337175792, -89.251 13.67198270893372, -89.251 13.67207204610951, -89.251 13.6721613832853, -89.251 13.6722507204611, -89.251 13.67234005763689, -89.251 13.67242939481268, -89.251 13.67251873198847, -89.251 13.67260806916426, -89.251 13.67269740634006, -89.251 13.67278674351585, -89.251 13.67287608069164, -89.251 13.67296541786743, -89.251 13.67305475504323, -89.251 13.67314409221902, -89.251 13.67323342939481, -89.251 13.6733227665706, -89.251 13.6734121037464, -89.251 13.67350144092219, -89.251 13.67359077809798, -89.251 13.67368011527378, -89.251 13.67376945244957, -89.251 13.67385878962536, -89.251 13.67394812680115, -89.251 13.67403746397694, -89.251 13.67412680115274, -89.251 13.67421613832853, -89.251 13.67430547550432, -89.251 13.67439481268011, -89.251 13.67448414985591, -89.251 13.6745734870317, -89.251 13.67466282420749, -89.251 13.67475216138329, -89.251 13.67484149855908, -89.251 13.67493083573487, -89.251 13.67502017291066, -89.251 13.67510951008645, -89.251 13.67519884726225, -89.251 13.67528818443804, -89.251 13.67537752161383, -89.251 13.67546685878962, -89.251 13.67555619596542, -89.251 13.67564553314121, -89.251 13.675734870317, -89.251 13.67582420749279, -89.251 13.67591354466859, -89.251 13.67600288184438, -89.251 13.67609221902017, -89.251 13.67618155619597, -89.251 13.67627089337176, -89.251 13.67636023054755, -89.251 13.67644956772334, -89.251 13.67653890489913, -89.251 13.67662824207493, -89.251 13.67671757925072, -89.251 13.67680691642651, -89.251 13.6768962536023, -89.251 13.6769855907781, -89.251 13.67707492795389, -89.251 13.67716426512968, -89.251 13.67725360230548, -89.251 13.67734293948127, -89.251 13.67743227665706, -89.251 13.67752161383285, -89.251 13.67761095100865, -89.251 13.67770028818444, -89.251 13.67778962536023, -89.251 13.67787896253602, -89.251 13.67796829971181, -89.251 13.67805763688761, -89.25090785340315 13.67832564841498, -89.25081570680629 13.67850432276657, -89.25072356020942 13.67868299711816, -89.25063141361257 13.67886167146974, -89.250354973822 13.67921902017291, -89.25017068062827 13.67948703170029, -89.2498942408377 13.67984438040346, -89.24961780104712 13.68020172910663, -89.24934136125655 13.6805590778098, -89.24915706806283 13.68082708933717, -89.24888062827226 13.68118443804035, -89.24860418848168 13.68154178674352, -89.24832774869111 13.68189913544669, -89.24814345549738 13.68216714697406, -89.24786701570682 13.68252449567723, -89.24759057591623 13.6828818443804, -89.24740628272252 13.68314985590778, -89.24712984293194 13.68350720461095, -89.24685340314137 13.68386455331412, -89.24657696335079 13.68422190201729, -89.24639267015708 13.68448991354467, -89.24556335078535 13.68556195965418, -89.24510261780105 13.68609798270893, -89.2450104712042 13.68618731988473, -89.24491832460734 13.68627665706052, -89.24436544502618 13.68690201729107, -89.24427329842932 13.68699135446686, -89.24372041884817 13.68761671469741, -89.24362827225131 13.6877060518732, -89.24353612565446 13.68779538904899, -89.24298324607331 13.68842074927954, -89.24289109947644 13.68851008645533, -89.24233821989529 13.68913544668588, -89.24224607329843 13.68922478386167, -89.24169319371728 13.68985014409222, -89.24160104712043 13.68993948126801, -89.24150890052357 13.6900288184438, -89.24095602094241 13.69065417867435, -89.24086387434555 13.69074351585014, -89.24077172774869 13.69083285302594, -89.24067958115184 13.69092219020173, -89.24058743455498 13.69101152737752, -89.24049528795813 13.69110086455331, -89.24040314136126 13.69119020172911, -89.2403109947644 13.6912795389049, -89.24021884816754 13.69136887608069, -89.23975811518325 13.69163688760807, -89.23929738219896 13.69190489913545, -89.23865235602095 13.69226224783862, -89.23819162303666 13.69253025936599, -89.23773089005236 13.69279827089337, -89.23708586387436 13.69315561959654, -89.23662513089006 13.69342363112392, -89.23616439790577 13.6936916426513, -89.23570366492147 13.69395965417867, -89.23505863874345 13.69431700288184, -89.23459790575916 13.69458501440922, -89.23413717277488 13.6948530259366, -89.23376858638744 13.69494236311239, -89.23321570680629 13.69503170028818, -89.23266282722513 13.69512103746398, -89.23220209424085 13.69521037463977, -89.23164921465968 13.69529971181556, -89.23109633507853 13.69538904899135, -89.23054345549738 13.69547838616715, -89.23008272251309 13.69556772334294, -89.22952984293194 13.69565706051873, -89.22897696335079 13.69574639769452, -89.22851623036649 13.69583573487032, -89.22741047120419 13.6960144092219, -89.22685759162304 13.69610374639769, -89.22621256544502 13.69619308357349, -89.22612041884817 13.69619308357349, -89.22602827225131 13.69619308357349, -89.22593612565446 13.69619308357349, -89.2258439790576 13.69619308357349, -89.22575183246073 13.69619308357349, -89.22565968586387 13.69619308357349, -89.22556753926702 13.69619308357349, -89.22547539267016 13.69619308357349, -89.22538324607331 13.69619308357349, -89.22529109947644 13.69619308357349, -89.22519895287958 13.69619308357349, -89.22510680628272 13.69619308357349, -89.22501465968587 13.69619308357349, -89.22492251308901 13.69619308357349, -89.22483036649214 13.69619308357349, -89.22473821989529 13.69619308357349, -89.22464607329843 13.69619308357349, -89.22455392670157 13.69619308357349, -89.22446178010472 13.69619308357349, -89.22436963350786 13.69619308357349, -89.22427748691099 13.69619308357349, -89.22418534031414 13.69619308357349, -89.22409319371728 13.69619308357349, -89.22400104712042 13.69619308357349, -89.22390890052357 13.69619308357349, -89.2238167539267 13.69619308357349, -89.22372460732984 13.69619308357349, -89.22363246073299 13.69619308357349, -89.22354031413613 13.69619308357349, -89.22344816753927 13.69619308357349, -89.2233560209424 13.69619308357349, -89.22326387434555 13.69619308357349, -89.22317172774869 13.69619308357349, -89.2222502617801 13.69628242074928, -89.22215811518325 13.69628242074928, -89.22206596858639 13.69628242074928, -89.22197382198954 13.69628242074928, -89.22188167539267 13.69628242074928, -89.22178952879581 13.69628242074928, -89.22169738219895 13.69628242074928, -89.2216052356021 13.69628242074928, -89.22151308900524 13.69628242074928, -89.22142094240837 13.69628242074928, -89.22132879581152 13.69628242074928, -89.22123664921466 13.69628242074928, -89.2211445026178 13.69628242074928, -89.22105235602095 13.69628242074928, -89.22096020942409 13.69628242074928, -89.22086806282722 13.69628242074928, -89.22077591623037 13.69628242074928, -89.22059162303665 13.69619308357349, -89.22031518324607 13.6960144092219, -89.21985445026178 13.69574639769452, -89.21957801047121 13.69556772334294, -89.21930157068063 13.69538904899135, -89.21902513089006 13.69521037463977, -89.21874869109948 13.69503170028818, -89.21828795811518 13.69476368876081, -89.21801151832462 13.69458501440922, -89.21773507853403 13.69440634005764, -89.21745863874345 13.69422766570605, -89.21699790575916 13.69395965417867, -89.21672146596859 13.69378097982709, -89.21644502617801 13.6936023054755, -89.21616858638744 13.69342363112392, -89.21589214659686 13.69324495677233, -89.21543141361256 13.69297694524496, -89.215154973822 13.69279827089337, -89.21091623036649 13.69226224783862, -89.21063979057591 13.69235158501441, -89.21036335078534 13.6924409221902, -89.21008691099476 13.69253025936599, -89.20981047120419 13.69261959654179, -89.2093497382199 13.69279827089337, -89.20907329842932 13.69288760806916, -89.20879685863875 13.69297694524496, -89.20852041884817 13.69306628242075, -89.2082439790576 13.69315561959654, -89.20815183246073 13.69315561959654, -89.20805968586387 13.69315561959654, -89.20796753926702 13.69315561959654, -89.20787539267016 13.69315561959654, -89.2077832460733 13.69315561959654, -89.20769109947643 13.69315561959654, -89.20695392670157 13.69306628242075, -89.20630890052357 13.69297694524496, -89.20557172774869 13.69288760806916, -89.20492670157068 13.69279827089337, -89.20418952879581 13.69270893371758, -89.2035445026178 13.69261959654179, -89.20280732984293 13.69253025936599, -89.19976649214659 13.69288760806916, -89.19912146596859 13.69315561959654, -89.19847643979058 13.69342363112392, -89.19829214659686 13.69351296829971, -89.19764712041885 13.69378097982709, -89.19700209424084 13.69404899135447, -89.19681780104712 13.69413832853026, -89.19617277486911 13.69440634005764, -89.19552774869111 13.69467435158501, -89.19534345549738 13.69476368876081, -89.19469842931937 13.69503170028818, -89.19405340314137 13.69529971181556, -89.19386910994764 13.69538904899135, -89.19322408376964 13.69565706051873, -89.19303979057591 13.69574639769452, -89.1923947643979 13.6960144092219, -89.1917497382199 13.69628242074928, -89.19156544502617 13.69637175792507, -89.19092041884817 13.69663976945245, -89.19027539267016 13.69690778097983, -89.19009109947643 13.69699711815562, -89.18944607329843 13.697265129683, -89.18880104712042 13.69753314121037, -89.18861675392669 13.69762247838617, -89.18797172774869 13.69789048991355, -89.18732670157068 13.69815850144092, -89.18714240837696 13.69824783861671, -89.18668167539266 13.6984265129683, -89.18658952879581 13.6984265129683, -89.18649738219895 13.6984265129683, -89.18640523560209 13.6984265129683, -89.18631308900524 13.6984265129683, -89.18566806282722 13.69815850144092, -89.18502303664921 13.69789048991355, -89.18456230366492 13.69771181556196, -89.18391727748691 13.69744380403458, -89.18327225130889 13.6971757925072, -89.18262722513089 13.69690778097983, -89.1790335078534 13.69672910662824, -89.17875706806282 13.69681844380403, -89.17848062827225 13.69690778097983, -89.17820418848167 13.69699711815562, -89.1779277486911 13.69708645533141, -89.17765130890052 13.6971757925072, -89.17700628272252 13.69735446685879, -89.17672984293193 13.69744380403458, -89.17645340314137 13.69753314121037, -89.17617696335078 13.69762247838617, -89.1759005235602 13.69771181556196, -89.17562408376963 13.69780115273775, -89.17534764397905 13.69789048991355, -89.17470261780105 13.69806916426513, -89.1725832460733 13.69931988472622, -89.17249109947643 13.69940922190202, -89.17193821989528 13.70003458213256, -89.17184607329843 13.70012391930836, -89.17175392670157 13.70021325648415, -89.17120104712042 13.7008386167147, -89.17110890052356 13.70092795389049, -89.17101675392669 13.70101729106628, -89.17092460732984 13.70110662824207, -89.17037172774869 13.70173198847262, -89.17027958115183 13.70182132564842, -89.17018743455498 13.70191066282421, -89.17009528795812 13.702, -89.17000314136125 13.702, -89.16991099476439 13.702, -89.16981884816754 13.702, -89.16972670157068 13.702, -89.16963455497383 13.702, -89.16954240837696 13.702, -89.1694502617801 13.702, -89.16935811518324 13.702, -89.16926596858639 13.702, -89.16917382198953 13.702, -89.16908167539266 13.702, -89.16898952879581 13.702, -89.16889738219895 13.702, -89.16880523560209 13.702, -89.16871308900524 13.702, -89.16862094240837 13.702, -89.16852879581151 13.702, -89.16843664921466 13.702, -89.1683445026178 13.702, -89.16825235602094 13.702, -89.16816020942409 13.702, -89.16806806282722 13.702, -89.16797591623036 13.702, -89.16788376963351 13.702, -89.16779162303665 13.702, -89.16769947643979 13.702, -89.16760732984292 13.702, -89.16751518324607 13.702, -89.16742303664921 13.702, -89.16733089005236 13.702, -89.1672387434555 13.702, -89.16714659685863 13.702, -89.16705445026177 13.702, -89.16696230366492 13.702, -89.16687015706806 13.702, -89.16677801047121 13.702, -89.16668586387435 13.702, -89.16659371727748 13.702, -89.16650157068062 13.702, -89.16640942408377 13.702, -89.16631727748691 13.702, -89.16622513089006 13.702, -89.16613298429318 13.702, -89.16604083769633 13.702, -89.16594869109947 13.702, -89.16585654450262 13.702, -89.16576439790576 13.702, -89.16567225130889 13.702, -89.16558010471203 13.702, -89.16548795811518 13.702, -89.16539581151832 13.702, -89.16530366492147 13.702, -89.16521151832461 13.702, -89.16511937172774 13.702, -89.16502722513088 13.702, -89.16493507853403 13.702, -89.16484293193717 13.702, -89.16475078534032 13.702, -89.16465863874345 13.702, -89.16456649214659 13.702, -89.16447434554973 13.702, -89.16438219895288 13.702, -89.16429005235602 13.702, -89.16419790575915 13.702, -89.1641057591623 13.702, -89.16401361256544 13.702, -89.16392146596858 13.702, -89.16382931937173 13.702, -89.16373717277487 13.702, -89.163645026178 13.702, -89.16355287958115 13.702, -89.16346073298429 13.702, -89.16336858638743 13.702, -89.16327643979058 13.702, -89.16318429319371 13.702, -89.16309214659685 13.702, -89.163 13.702, -89.163 13.70191066282421, -89.163 13.70182132564842, -89.163 13.70173198847262, -89.163 13.70164265129683, -89.163 13.70155331412104, -89.163 13.70146397694525, -89.163 13.70137463976945, -89.163 13.70128530259366, -89.163 13.70119596541787, -89.163 13.70110662824207, -89.163 13.70101729106628, -89.163 13.70092795389049, -89.163 13.7008386167147, -89.163 13.7007492795389, -89.163 13.70065994236311, -89.163 13.70057060518732, -89.163 13.70048126801153, -89.163 13.70039193083574, -89.163 13.70030259365994, -89.163 13.70021325648415, -89.163 13.70012391930836, -89.163 13.70003458213256, -89.163 13.69994524495677, -89.163 13.69985590778098, -89.163 13.69976657060519, -89.163 13.69967723342939, -89.163 13.6995878962536, -89.163 13.69949855907781, -89.163 13.69940922190202, -89.163 13.69931988472622, -89.163 13.69923054755043, -89.163 13.69914121037464, -89.163 13.69905187319885, -89.163 13.69896253602306, -89.163 13.69887319884726, -89.163 13.69878386167147, -89.163 13.69869452449568, -89.163 13.69860518731988, -89.163 13.69851585014409, -89.163 13.6984265129683, -89.163 13.69833717579251, -89.163 13.69824783861671, -89.163 13.69815850144092, -89.163 13.69806916426513, -89.163 13.69797982708934, -89.163 13.69789048991355, -89.163 13.69780115273775, -89.163 13.69771181556196, -89.163 13.69762247838617, -89.163 13.69753314121037, -89.163 13.69744380403458, -89.163 13.69735446685879, -89.163 13.697265129683, -89.163 13.6971757925072, -89.163 13.69708645533141, -89.163 13.69699711815562, -89.163 13.69690778097983, -89.163 13.69681844380403, -89.163 13.69672910662824, -89.163 13.69663976945245, -89.163 13.69655043227666, -89.163 13.69646109510087, -89.163 13.69637175792507, -89.163 13.69628242074928, -89.163 13.69619308357349, -89.163 13.69610374639769, -89.163 13.6960144092219, -89.163 13.69592507204611, -89.163 13.69583573487032, -89.163 13.69574639769452, -89.163 13.69565706051873, -89.163 13.69556772334294, -89.163 13.69547838616715, -89.163 13.69538904899135, -89.163 13.69529971181556, -89.163 13.69521037463977, -89.163 13.69512103746398, -89.163 13.69503170028818, -89.163 13.69494236311239, -89.163 13.6948530259366, -89.163 13.69476368876081, -89.163 13.69467435158501, -89.163 13.69458501440922, -89.163 13.69449567723343, -89.163 13.69440634005764, -89.163 13.69431700288184, -89.163 13.69422766570605, -89.163 13.69413832853026, -89.163 13.69404899135447, -89.163 13.69395965417867, -89.163 13.69387031700288, -89.163 13.69378097982709, -89.163 13.6936916426513, -89.163 13.6936023054755, -89.163 13.69351296829971, -89.163 13.69342363112392, -89.163 13.69333429394813, -89.163 13.69324495677233, -89.16327643979058 13.69306628242075, -89.16355287958115 13.69288760806916, -89.16382931937173 13.69270893371758, -89.1641057591623 13.69253025936599, -89.16438219895288 13.69235158501441, -89.16465863874345 13.69217291066282, -89.16493507853403 13.69199423631124, -89.16521151832461 13.69181556195965, -89.16548795811518 13.69163688760807, -89.16576439790576 13.69145821325648, -89.16604083769633 13.6912795389049, -89.16631727748691 13.69110086455331, -89.16659371727748 13.69092219020173, -89.16677801047121 13.69083285302594, -89.16696230366492 13.69074351585014, -89.16714659685863 13.69065417867435, -89.16733089005236 13.69056484149856, -89.16797591623036 13.69020749279539, -89.16816020942409 13.6901181556196, -89.1683445026178 13.6900288184438, -89.16852879581151 13.68993948126801, -89.16871308900524 13.68985014409222, -89.16889738219895 13.68976080691643, -89.16908167539266 13.68967146974063, -89.16972670157068 13.68931412103746, -89.16991099476439 13.68922478386167, -89.17009528795812 13.68913544668588, -89.17027958115183 13.68904610951009, -89.17046387434554 13.68895677233429, -89.17064816753927 13.6888674351585, -89.17083246073298 13.68877809798271, -89.17147748691099 13.68842074927954, -89.17166178010471 13.68833141210375, -89.17350471204188 13.68681268011527, -89.17378115183246 13.6864553314121, -89.17405759162304 13.68609798270893, -89.17442617801046 13.68565129682997, -89.17470261780105 13.6852939481268, -89.17507120418848 13.68484726224784, -89.17534764397905 13.68448991354467, -89.17571623036649 13.68404322766571, -89.17580837696335 13.68395389048991, -89.17599267015707 13.68386455331412, -89.17617696335078 13.68377521613833, -89.1763612565445 13.68368587896254, -89.17654554973822 13.68359654178674, -89.17672984293193 13.68350720461095, -89.17691413612565 13.68341786743516, -89.17709842931937 13.68332853025936, -89.17728272251308 13.68323919308357, -89.17746701570681 13.68314985590778, -89.17765130890052 13.68306051873199, -89.17783560209423 13.6829711815562, -89.17801989528796 13.6828818443804, -89.17820418848167 13.68279250720461, -89.17884921465968 13.68252449567723, -89.1790335078534 13.68243515850144, -89.17921780104712 13.68234582132565, -89.17940209424084 13.68225648414986, -89.17958638743455 13.68216714697406, -89.17977068062827 13.68207780979827, -89.17995497382199 13.68198847262248, -89.1801392670157 13.68189913544669, -89.18032356020942 13.68180979827089, -89.18050785340314 13.6817204610951, -89.18069214659685 13.68163112391931, -89.18087643979058 13.68154178674352, -89.18152146596859 13.68127377521614, -89.1817057591623 13.68118443804035, -89.18189005235602 13.68109510086455, -89.18198219895288 13.68109510086455, -89.18207434554974 13.68109510086455, -89.18216649214659 13.68109510086455, -89.18225863874345 13.68109510086455, -89.18235078534032 13.68109510086455, -89.18244293193717 13.68109510086455, -89.18253507853403 13.68109510086455, -89.18262722513089 13.68109510086455, -89.18271937172774 13.68109510086455, -89.18281151832461 13.68109510086455, -89.18290366492147 13.68109510086455, -89.18299581151832 13.68109510086455, -89.18308795811518 13.68109510086455, -89.18318010471204 13.68109510086455, -89.18327225130889 13.68109510086455, -89.18336439790576 13.68109510086455, -89.18345654450262 13.68109510086455, -89.18354869109947 13.68109510086455, -89.18364083769633 13.68109510086455, -89.18373298429319 13.68109510086455, -89.18382513089006 13.68109510086455, -89.18806387434554 13.67868299711816, -89.18843246073298 13.67805763688761, -89.18880104712042 13.67743227665706, -89.18889319371728 13.67725360230548, -89.18926178010472 13.67662824207493, -89.18963036649214 13.67600288184438, -89.18972251308901 13.67582420749279, -89.19009109947643 13.67519884726225, -89.19045968586387 13.6745734870317, -89.19055183246073 13.67439481268011, -89.19092041884817 13.67376945244957, -89.19119685863875 13.6733227665706, -89.19138115183246 13.67323342939481, -89.19156544502617 13.67314409221902, -89.1917497382199 13.67305475504323, -89.1923947643979 13.67278674351585, -89.19257905759163 13.67269740634006, -89.19276335078534 13.67260806916426, -89.19294764397905 13.67251873198847, -89.19313193717278 13.67242939481268, -89.19331623036649 13.67234005763689, -89.1935005235602 13.6722507204611, -89.19368481675393 13.6721613832853, -89.19386910994764 13.67207204610951, -89.19405340314137 13.67198270893372, -89.19469842931937 13.67171469740634, -89.19488272251309 13.67162536023055, -89.19506701570681 13.67153602305475, -89.19525130890052 13.67144668587896, -89.19543560209424 13.67135734870317, -89.19561989528796 13.67126801152738, -89.19580418848167 13.67117867435158, -89.19598848167539 13.67108933717579, -89.19617277486911 13.671, -89.19626492146597 13.671, -89.19635706806282 13.671, -89.19644921465968 13.671, -89.19654136125655 13.671, -89.19663350785341 13.671, -89.19672565445026 13.671, -89.19681780104712 13.671, -89.19690994764397 13.671, -89.19700209424084 13.671, -89.1970942408377 13.671, -89.19718638743456 13.671, -89.19727853403141 13.671, -89.19737068062827 13.671, -89.19746282722512 13.671, -89.19755497382199 13.671, -89.19764712041885 13.671, -89.19773926701571 13.671, -89.19783141361256 13.671, -89.19792356020942 13.671, -89.19801570680629 13.671, -89.19810785340314 13.671, -89.1982 13.671, -89.19829214659686 13.671, -89.19838429319371 13.671, -89.19847643979058 13.671, -89.19856858638744 13.671, -89.19866073298429 13.671, -89.19875287958115 13.671, -89.19884502617801 13.671, -89.19893717277488 13.671, -89.19902931937173 13.671, -89.19912146596859 13.671, -89.19921361256544 13.671, -89.1993057591623 13.671, -89.19939790575916 13.671, -89.19949005235603 13.671, -89.19958219895288 13.671, -89.19967434554974 13.671, -89.19976649214659 13.671, -89.19985863874345 13.671, -89.19995078534032 13.671, -89.20004293193718 13.671, -89.20013507853403 13.671, -89.20022722513089 13.671, -89.20031937172774 13.671, -89.20041151832461 13.671, -89.20050366492147 13.671, -89.20059581151833 13.671, -89.20068795811518 13.671, -89.20078010471204 13.671, -89.20087225130889 13.671, -89.20096439790576 13.671, -89.20105654450262 13.671, -89.20114869109948 13.671, -89.20124083769633 13.671, -89.20133298429319 13.671, -89.20142513089006 13.671, -89.20151727748691 13.671, -89.20160942408377 13.671, -89.20170157068063 13.671, -89.20179371727748 13.671, -89.20188586387435 13.671, -89.20197801047121 13.671, -89.20207015706806 13.671, -89.20216230366492 13.671, -89.20225445026178 13.671, -89.20234659685863 13.671, -89.2024387434555 13.671, -89.20253089005236 13.671, -89.20262303664921 13.671, -89.20271518324607 13.671, -89.20280732984293 13.671, -89.2028994764398 13.671, -89.20299162303665 13.671, -89.20308376963351 13.671, -89.20317591623036 13.671, -89.20326806282722 13.671, -89.20336020942409 13.671, -89.20345235602095 13.671, -89.2035445026178 13.671, -89.20363664921466 13.671, -89.20372879581151 13.671, -89.20382094240837 13.671, -89.20391308900524 13.671, -89.2040052356021 13.671, -89.20409738219895 13.671, -89.20418952879581 13.671, -89.20428167539266 13.671, -89.20437382198953 13.671, -89.20446596858639 13.671, -89.20455811518325 13.671, -89.2046502617801 13.671, -89.20474240837696 13.671, -89.20483455497383 13.671, -89.20492670157068 13.671, -89.20501884816754 13.671, -89.2051109947644 13.671, -89.20520314136125 13.671, -89.20529528795812 13.671, -89.20538743455498 13.671, -89.20547958115183 13.671, -89.20557172774869 13.671, -89.20566387434555 13.671, -89.2057560209424 13.671, -89.20584816753927 13.671, -89.20594031413613 13.671, -89.20603246073298 13.671, -89.20612460732984 13.671, -89.2062167539267 13.671, -89.20640104712042 13.67108933717579, -89.20658534031413 13.67117867435158, -89.20676963350786 13.67126801152738, -89.20695392670157 13.67135734870317, -89.20713821989528 13.67144668587896, -89.20723036649214 13.67153602305475, -89.20723036649214 13.67162536023055, -89.20723036649214 13.67171469740634, -89.20723036649214 13.67180403458213, -89.20704607329843 13.67242939481268, -89.20686178010472 13.67305475504323, -89.20667748691099 13.67368011527378, -89.20658534031413 13.67394812680115, -89.20640104712042 13.6745734870317, -89.21036335078534 13.68118443804035, -89.21091623036649 13.68127377521614, -89.21146910994764 13.68136311239193, -89.21211413612565 13.68145244956772, -89.21266701570681 13.68154178674352, -89.21321989528796 13.68163112391931, -89.21377277486911 13.6817204610951, -89.21441780104712 13.68180979827089, -89.2146942408377 13.68189913544669, -89.21487853403141 13.68198847262248, -89.215154973822 13.68216714697406, -89.21543141361256 13.68234582132565, -89.21589214659686 13.68261383285303, -89.21616858638744 13.68279250720461, -89.21644502617801 13.6829711815562, -89.21672146596859 13.68314985590778, -89.21699790575916 13.68332853025936, -89.21727434554974 13.68350720461095, -89.21773507853403 13.68377521613833, -89.21801151832462 13.68395389048991, -89.21828795811518 13.6841325648415, -89.21856439790577 13.68431123919308, -89.21884083769633 13.68448991354467, -89.21911727748692 13.68466858789625, -89.22031518324607 13.68520461095101, -89.22059162303665 13.6852939481268, -89.22086806282722 13.68538328530259, -89.2211445026178 13.68547262247839, -89.22142094240837 13.68556195965418, -89.22169738219895 13.68565129682997, -89.22197382198954 13.68574063400576, -89.2222502617801 13.68582997118156, -89.22252670157069 13.68591930835735, -89.22280314136125 13.68600864553314, -89.22307958115184 13.68609798270893, -89.2233560209424 13.68618731988473, -89.22363246073299 13.68627665706052, -89.22390890052357 13.68636599423631, -89.22768691099476 13.68618731988473, -89.22833193717278 13.68591930835735, -89.22851623036649 13.68582997118156, -89.22870052356021 13.68574063400576, -89.22888481675393 13.68565129682997, -89.22906910994764 13.68556195965418, -89.22925340314137 13.68547262247839, -89.22943769633508 13.68538328530259, -89.22962198952879 13.6852939481268, -89.22980628272252 13.68520461095101, -89.22999057591623 13.68511527377522, -89.23017486910994 13.68502593659942, -89.23035916230367 13.68493659942363, -89.23054345549738 13.68484726224784, -89.23072774869111 13.68475792507205, -89.23091204188482 13.68466858789625, -89.23109633507853 13.68457925072046, -89.23174136125655 13.68431123919308, -89.23192565445027 13.68422190201729, -89.23358429319372 13.6828818443804, -89.23413717277488 13.68225648414986, -89.23469005235603 13.68163112391931, -89.23478219895289 13.68154178674352, -89.23533507853404 13.68091642651297, -89.23588795811519 13.68029106628242, -89.23598010471204 13.68020172910663, -89.2360722513089 13.68011239193084, -89.23616439790577 13.68002305475504, -89.23625654450262 13.67993371757925, -89.23634869109948 13.67984438040346, -89.23644083769634 13.67975504322767, -89.23653298429319 13.67966570605187, -89.23662513089006 13.67957636887608, -89.23671727748692 13.67948703170029, -89.23680942408377 13.67939769452449, -89.23690157068063 13.6793083573487, -89.23699371727749 13.67921902017291, -89.23754659685864 13.67859365994236, -89.23763874345551 13.67850432276657, -89.23773089005236 13.67841498559078, -89.23782303664922 13.67832564841498, -89.23791518324607 13.67823631123919, -89.23800732984293 13.6781469740634, -89.2380994764398 13.67805763688761, -89.23819162303666 13.67796829971181, -89.23828376963351 13.67787896253602, -89.23837591623037 13.67778962536023, -89.23846806282722 13.67770028818444, -89.23856020942409 13.67761095100865, -89.23865235602095 13.67752161383285, -89.23874450261781 13.67743227665706, -89.23883664921466 13.67734293948127, -89.23938952879581 13.67671757925072, -89.23948167539267 13.67662824207493, -89.23957382198954 13.67653890489913, -89.23966596858639 13.67644956772334, -89.23975811518325 13.67636023054755, -89.23985026178011 13.67627089337176, -89.23994240837696 13.67618155619597, -89.24003455497383 13.67609221902017, -89.24012670157069 13.67600288184438, -89.24021884816754 13.67591354466859, -89.2403109947644 13.67582420749279, -89.24040314136126 13.675734870317, -89.24049528795813 13.67564553314121, -89.24058743455498 13.67555619596542, -89.24114031413613 13.67493083573487, -89.24123246073299 13.67484149855908, -89.24132460732984 13.67475216138329, -89.2414167539267 13.67466282420749, -89.24150890052357 13.6745734870317, -89.24160104712043 13.67448414985591, -89.24169319371728 13.67439481268011, -89.24178534031414 13.67430547550432, -89.24298324607331 13.67251873198847, -89.24316753926702 13.67207204610951, -89.24335183246073 13.67162536023055, -89.24353612565446 13.67117867435158, -89.24362827225131 13.671, -89.24372041884817 13.671, -89.24381256544503 13.671, -89.24390471204188 13.671, -89.24399685863875 13.671, -89.24408900523561 13.671, -89.24418115183246 13.671, -89.24427329842932 13.671, -89.24436544502618 13.671, -89.24445759162305 13.671, -89.2445497382199 13.671, -89.24464188481676 13.671, -89.24473403141361 13.671, -89.24482617801047 13.671, -89.24491832460734 13.671, -89.2450104712042 13.671, -89.24510261780105 13.671, -89.24519476439791 13.671, -89.24528691099476 13.671, -89.24537905759163 13.671, -89.24547120418849 13.671, -89.24556335078535 13.671, -89.2456554973822 13.671, -89.24574764397906 13.671, -89.24583979057591 13.671, -89.24593193717278 13.671, -89.24602408376964 13.671, -89.2461162303665 13.671, -89.24620837696335 13.671, -89.24630052356021 13.671, -89.24639267015708 13.671, -89.24648481675393 13.671, -89.24657696335079 13.671, -89.24666910994765 13.671, -89.2467612565445 13.671, -89.24685340314137 13.671, -89.24694554973823 13.671, -89.24703769633508 13.671, -89.24712984293194 13.671, -89.2472219895288 13.671, -89.24731413612565 13.671, -89.24740628272252 13.671, -89.24749842931938 13.671, -89.24759057591623 13.671, -89.24768272251309 13.671, -89.24777486910995 13.671, -89.24786701570682 13.671, -89.24795916230367 13.671, -89.24805130890053 13.671, -89.24814345549738 13.671, -89.24823560209424 13.671, -89.24832774869111 13.671, -89.24841989528797 13.671, -89.24851204188482 13.671, -89.24860418848168 13.671, -89.24869633507853 13.671, -89.24878848167539 13.671, -89.24888062827226 13.671, -89.24897277486912 13.671, -89.24906492146597 13.671, -89.24915706806283 13.671, -89.24924921465968 13.671, -89.24934136125655 13.671, -89.24943350785341 13.671, -89.24952565445027 13.671, -89.24961780104712 13.671, -89.24970994764398 13.671, -89.24980209424085 13.671, -89.2498942408377 13.671, -89.24998638743456 13.671, -89.25007853403142 13.671, -89.25017068062827 13.671, -89.25026282722513 13.671, -89.250354973822 13.671, -89.25044712041885 13.671, -89.25053926701571 13.671, -89.25063141361257 13.671, -89.25072356020942 13.671, -89.25081570680629 13.671, -89.25090785340315 13.671))' + risk_shape = "POLYGON ((-89.25090785340315 13.671, -89.251 13.671, -89.251 13.67108933717579, -89.251 13.67117867435158, -89.251 13.67126801152738, -89.251 13.67135734870317, -89.251 13.67144668587896, -89.251 13.67153602305475, -89.251 13.67162536023055, -89.251 13.67171469740634, -89.251 13.67180403458213, -89.251 13.67189337175792, -89.251 13.67198270893372, -89.251 13.67207204610951, -89.251 13.6721613832853, -89.251 13.6722507204611, -89.251 13.67234005763689, -89.251 13.67242939481268, -89.251 13.67251873198847, -89.251 13.67260806916426, -89.251 13.67269740634006, -89.251 13.67278674351585, -89.251 13.67287608069164, -89.251 13.67296541786743, -89.251 13.67305475504323, -89.251 13.67314409221902, -89.251 13.67323342939481, -89.251 13.6733227665706, -89.251 13.6734121037464, -89.251 13.67350144092219, -89.251 13.67359077809798, -89.251 13.67368011527378, -89.251 13.67376945244957, -89.251 13.67385878962536, -89.251 13.67394812680115, -89.251 13.67403746397694, -89.251 13.67412680115274, -89.251 13.67421613832853, -89.251 13.67430547550432, -89.251 13.67439481268011, -89.251 13.67448414985591, -89.251 13.6745734870317, -89.251 13.67466282420749, -89.251 13.67475216138329, -89.251 13.67484149855908, -89.251 13.67493083573487, -89.251 13.67502017291066, -89.251 13.67510951008645, -89.251 13.67519884726225, -89.251 13.67528818443804, -89.251 13.67537752161383, -89.251 13.67546685878962, -89.251 13.67555619596542, -89.251 13.67564553314121, -89.251 13.675734870317, -89.251 13.67582420749279, -89.251 13.67591354466859, -89.251 13.67600288184438, -89.251 13.67609221902017, -89.251 13.67618155619597, -89.251 13.67627089337176, -89.251 13.67636023054755, -89.251 13.67644956772334, -89.251 13.67653890489913, -89.251 13.67662824207493, -89.251 13.67671757925072, -89.251 13.67680691642651, -89.251 13.6768962536023, -89.251 13.6769855907781, -89.251 13.67707492795389, -89.251 13.67716426512968, -89.251 13.67725360230548, -89.251 13.67734293948127, -89.251 13.67743227665706, -89.251 13.67752161383285, -89.251 13.67761095100865, -89.251 13.67770028818444, -89.251 13.67778962536023, -89.251 13.67787896253602, -89.251 13.67796829971181, -89.251 13.67805763688761, -89.25090785340315 13.67832564841498, -89.25081570680629 13.67850432276657, -89.25072356020942 13.67868299711816, -89.25063141361257 13.67886167146974, -89.250354973822 13.67921902017291, -89.25017068062827 13.67948703170029, -89.2498942408377 13.67984438040346, -89.24961780104712 13.68020172910663, -89.24934136125655 13.6805590778098, -89.24915706806283 13.68082708933717, -89.24888062827226 13.68118443804035, -89.24860418848168 13.68154178674352, -89.24832774869111 13.68189913544669, -89.24814345549738 13.68216714697406, -89.24786701570682 13.68252449567723, -89.24759057591623 13.6828818443804, -89.24740628272252 13.68314985590778, -89.24712984293194 13.68350720461095, -89.24685340314137 13.68386455331412, -89.24657696335079 13.68422190201729, -89.24639267015708 13.68448991354467, -89.24556335078535 13.68556195965418, -89.24510261780105 13.68609798270893, -89.2450104712042 13.68618731988473, -89.24491832460734 13.68627665706052, -89.24436544502618 13.68690201729107, -89.24427329842932 13.68699135446686, -89.24372041884817 13.68761671469741, -89.24362827225131 13.6877060518732, -89.24353612565446 13.68779538904899, -89.24298324607331 13.68842074927954, -89.24289109947644 13.68851008645533, -89.24233821989529 13.68913544668588, -89.24224607329843 13.68922478386167, -89.24169319371728 13.68985014409222, -89.24160104712043 13.68993948126801, -89.24150890052357 13.6900288184438, -89.24095602094241 13.69065417867435, -89.24086387434555 13.69074351585014, -89.24077172774869 13.69083285302594, -89.24067958115184 13.69092219020173, -89.24058743455498 13.69101152737752, -89.24049528795813 13.69110086455331, -89.24040314136126 13.69119020172911, -89.2403109947644 13.6912795389049, -89.24021884816754 13.69136887608069, -89.23975811518325 13.69163688760807, -89.23929738219896 13.69190489913545, -89.23865235602095 13.69226224783862, -89.23819162303666 13.69253025936599, -89.23773089005236 13.69279827089337, -89.23708586387436 13.69315561959654, -89.23662513089006 13.69342363112392, -89.23616439790577 13.6936916426513, -89.23570366492147 13.69395965417867, -89.23505863874345 13.69431700288184, -89.23459790575916 13.69458501440922, -89.23413717277488 13.6948530259366, -89.23376858638744 13.69494236311239, -89.23321570680629 13.69503170028818, -89.23266282722513 13.69512103746398, -89.23220209424085 13.69521037463977, -89.23164921465968 13.69529971181556, -89.23109633507853 13.69538904899135, -89.23054345549738 13.69547838616715, -89.23008272251309 13.69556772334294, -89.22952984293194 13.69565706051873, -89.22897696335079 13.69574639769452, -89.22851623036649 13.69583573487032, -89.22741047120419 13.6960144092219, -89.22685759162304 13.69610374639769, -89.22621256544502 13.69619308357349, -89.22612041884817 13.69619308357349, -89.22602827225131 13.69619308357349, -89.22593612565446 13.69619308357349, -89.2258439790576 13.69619308357349, -89.22575183246073 13.69619308357349, -89.22565968586387 13.69619308357349, -89.22556753926702 13.69619308357349, -89.22547539267016 13.69619308357349, -89.22538324607331 13.69619308357349, -89.22529109947644 13.69619308357349, -89.22519895287958 13.69619308357349, -89.22510680628272 13.69619308357349, -89.22501465968587 13.69619308357349, -89.22492251308901 13.69619308357349, -89.22483036649214 13.69619308357349, -89.22473821989529 13.69619308357349, -89.22464607329843 13.69619308357349, -89.22455392670157 13.69619308357349, -89.22446178010472 13.69619308357349, -89.22436963350786 13.69619308357349, -89.22427748691099 13.69619308357349, -89.22418534031414 13.69619308357349, -89.22409319371728 13.69619308357349, -89.22400104712042 13.69619308357349, -89.22390890052357 13.69619308357349, -89.2238167539267 13.69619308357349, -89.22372460732984 13.69619308357349, -89.22363246073299 13.69619308357349, -89.22354031413613 13.69619308357349, -89.22344816753927 13.69619308357349, -89.2233560209424 13.69619308357349, -89.22326387434555 13.69619308357349, -89.22317172774869 13.69619308357349, -89.2222502617801 13.69628242074928, -89.22215811518325 13.69628242074928, -89.22206596858639 13.69628242074928, -89.22197382198954 13.69628242074928, -89.22188167539267 13.69628242074928, -89.22178952879581 13.69628242074928, -89.22169738219895 13.69628242074928, -89.2216052356021 13.69628242074928, -89.22151308900524 13.69628242074928, -89.22142094240837 13.69628242074928, -89.22132879581152 13.69628242074928, -89.22123664921466 13.69628242074928, -89.2211445026178 13.69628242074928, -89.22105235602095 13.69628242074928, -89.22096020942409 13.69628242074928, -89.22086806282722 13.69628242074928, -89.22077591623037 13.69628242074928, -89.22059162303665 13.69619308357349, -89.22031518324607 13.6960144092219, -89.21985445026178 13.69574639769452, -89.21957801047121 13.69556772334294, -89.21930157068063 13.69538904899135, -89.21902513089006 13.69521037463977, -89.21874869109948 13.69503170028818, -89.21828795811518 13.69476368876081, -89.21801151832462 13.69458501440922, -89.21773507853403 13.69440634005764, -89.21745863874345 13.69422766570605, -89.21699790575916 13.69395965417867, -89.21672146596859 13.69378097982709, -89.21644502617801 13.6936023054755, -89.21616858638744 13.69342363112392, -89.21589214659686 13.69324495677233, -89.21543141361256 13.69297694524496, -89.215154973822 13.69279827089337, -89.21091623036649 13.69226224783862, -89.21063979057591 13.69235158501441, -89.21036335078534 13.6924409221902, -89.21008691099476 13.69253025936599, -89.20981047120419 13.69261959654179, -89.2093497382199 13.69279827089337, -89.20907329842932 13.69288760806916, -89.20879685863875 13.69297694524496, -89.20852041884817 13.69306628242075, -89.2082439790576 13.69315561959654, -89.20815183246073 13.69315561959654, -89.20805968586387 13.69315561959654, -89.20796753926702 13.69315561959654, -89.20787539267016 13.69315561959654, -89.2077832460733 13.69315561959654, -89.20769109947643 13.69315561959654, -89.20695392670157 13.69306628242075, -89.20630890052357 13.69297694524496, -89.20557172774869 13.69288760806916, -89.20492670157068 13.69279827089337, -89.20418952879581 13.69270893371758, -89.2035445026178 13.69261959654179, -89.20280732984293 13.69253025936599, -89.19976649214659 13.69288760806916, -89.19912146596859 13.69315561959654, -89.19847643979058 13.69342363112392, -89.19829214659686 13.69351296829971, -89.19764712041885 13.69378097982709, -89.19700209424084 13.69404899135447, -89.19681780104712 13.69413832853026, -89.19617277486911 13.69440634005764, -89.19552774869111 13.69467435158501, -89.19534345549738 13.69476368876081, -89.19469842931937 13.69503170028818, -89.19405340314137 13.69529971181556, -89.19386910994764 13.69538904899135, -89.19322408376964 13.69565706051873, -89.19303979057591 13.69574639769452, -89.1923947643979 13.6960144092219, -89.1917497382199 13.69628242074928, -89.19156544502617 13.69637175792507, -89.19092041884817 13.69663976945245, -89.19027539267016 13.69690778097983, -89.19009109947643 13.69699711815562, -89.18944607329843 13.697265129683, -89.18880104712042 13.69753314121037, -89.18861675392669 13.69762247838617, -89.18797172774869 13.69789048991355, -89.18732670157068 13.69815850144092, -89.18714240837696 13.69824783861671, -89.18668167539266 13.6984265129683, -89.18658952879581 13.6984265129683, -89.18649738219895 13.6984265129683, -89.18640523560209 13.6984265129683, -89.18631308900524 13.6984265129683, -89.18566806282722 13.69815850144092, -89.18502303664921 13.69789048991355, -89.18456230366492 13.69771181556196, -89.18391727748691 13.69744380403458, -89.18327225130889 13.6971757925072, -89.18262722513089 13.69690778097983, -89.1790335078534 13.69672910662824, -89.17875706806282 13.69681844380403, -89.17848062827225 13.69690778097983, -89.17820418848167 13.69699711815562, -89.1779277486911 13.69708645533141, -89.17765130890052 13.6971757925072, -89.17700628272252 13.69735446685879, -89.17672984293193 13.69744380403458, -89.17645340314137 13.69753314121037, -89.17617696335078 13.69762247838617, -89.1759005235602 13.69771181556196, -89.17562408376963 13.69780115273775, -89.17534764397905 13.69789048991355, -89.17470261780105 13.69806916426513, -89.1725832460733 13.69931988472622, -89.17249109947643 13.69940922190202, -89.17193821989528 13.70003458213256, -89.17184607329843 13.70012391930836, -89.17175392670157 13.70021325648415, -89.17120104712042 13.7008386167147, -89.17110890052356 13.70092795389049, -89.17101675392669 13.70101729106628, -89.17092460732984 13.70110662824207, -89.17037172774869 13.70173198847262, -89.17027958115183 13.70182132564842, -89.17018743455498 13.70191066282421, -89.17009528795812 13.702, -89.17000314136125 13.702, -89.16991099476439 13.702, -89.16981884816754 13.702, -89.16972670157068 13.702, -89.16963455497383 13.702, -89.16954240837696 13.702, -89.1694502617801 13.702, -89.16935811518324 13.702, -89.16926596858639 13.702, -89.16917382198953 13.702, -89.16908167539266 13.702, -89.16898952879581 13.702, -89.16889738219895 13.702, -89.16880523560209 13.702, -89.16871308900524 13.702, -89.16862094240837 13.702, -89.16852879581151 13.702, -89.16843664921466 13.702, -89.1683445026178 13.702, -89.16825235602094 13.702, -89.16816020942409 13.702, -89.16806806282722 13.702, -89.16797591623036 13.702, -89.16788376963351 13.702, -89.16779162303665 13.702, -89.16769947643979 13.702, -89.16760732984292 13.702, -89.16751518324607 13.702, -89.16742303664921 13.702, -89.16733089005236 13.702, -89.1672387434555 13.702, -89.16714659685863 13.702, -89.16705445026177 13.702, -89.16696230366492 13.702, -89.16687015706806 13.702, -89.16677801047121 13.702, -89.16668586387435 13.702, -89.16659371727748 13.702, -89.16650157068062 13.702, -89.16640942408377 13.702, -89.16631727748691 13.702, -89.16622513089006 13.702, -89.16613298429318 13.702, -89.16604083769633 13.702, -89.16594869109947 13.702, -89.16585654450262 13.702, -89.16576439790576 13.702, -89.16567225130889 13.702, -89.16558010471203 13.702, -89.16548795811518 13.702, -89.16539581151832 13.702, -89.16530366492147 13.702, -89.16521151832461 13.702, -89.16511937172774 13.702, -89.16502722513088 13.702, -89.16493507853403 13.702, -89.16484293193717 13.702, -89.16475078534032 13.702, -89.16465863874345 13.702, -89.16456649214659 13.702, -89.16447434554973 13.702, -89.16438219895288 13.702, -89.16429005235602 13.702, -89.16419790575915 13.702, -89.1641057591623 13.702, -89.16401361256544 13.702, -89.16392146596858 13.702, -89.16382931937173 13.702, -89.16373717277487 13.702, -89.163645026178 13.702, -89.16355287958115 13.702, -89.16346073298429 13.702, -89.16336858638743 13.702, -89.16327643979058 13.702, -89.16318429319371 13.702, -89.16309214659685 13.702, -89.163 13.702, -89.163 13.70191066282421, -89.163 13.70182132564842, -89.163 13.70173198847262, -89.163 13.70164265129683, -89.163 13.70155331412104, -89.163 13.70146397694525, -89.163 13.70137463976945, -89.163 13.70128530259366, -89.163 13.70119596541787, -89.163 13.70110662824207, -89.163 13.70101729106628, -89.163 13.70092795389049, -89.163 13.7008386167147, -89.163 13.7007492795389, -89.163 13.70065994236311, -89.163 13.70057060518732, -89.163 13.70048126801153, -89.163 13.70039193083574, -89.163 13.70030259365994, -89.163 13.70021325648415, -89.163 13.70012391930836, -89.163 13.70003458213256, -89.163 13.69994524495677, -89.163 13.69985590778098, -89.163 13.69976657060519, -89.163 13.69967723342939, -89.163 13.6995878962536, -89.163 13.69949855907781, -89.163 13.69940922190202, -89.163 13.69931988472622, -89.163 13.69923054755043, -89.163 13.69914121037464, -89.163 13.69905187319885, -89.163 13.69896253602306, -89.163 13.69887319884726, -89.163 13.69878386167147, -89.163 13.69869452449568, -89.163 13.69860518731988, -89.163 13.69851585014409, -89.163 13.6984265129683, -89.163 13.69833717579251, -89.163 13.69824783861671, -89.163 13.69815850144092, -89.163 13.69806916426513, -89.163 13.69797982708934, -89.163 13.69789048991355, -89.163 13.69780115273775, -89.163 13.69771181556196, -89.163 13.69762247838617, -89.163 13.69753314121037, -89.163 13.69744380403458, -89.163 13.69735446685879, -89.163 13.697265129683, -89.163 13.6971757925072, -89.163 13.69708645533141, -89.163 13.69699711815562, -89.163 13.69690778097983, -89.163 13.69681844380403, -89.163 13.69672910662824, -89.163 13.69663976945245, -89.163 13.69655043227666, -89.163 13.69646109510087, -89.163 13.69637175792507, -89.163 13.69628242074928, -89.163 13.69619308357349, -89.163 13.69610374639769, -89.163 13.6960144092219, -89.163 13.69592507204611, -89.163 13.69583573487032, -89.163 13.69574639769452, -89.163 13.69565706051873, -89.163 13.69556772334294, -89.163 13.69547838616715, -89.163 13.69538904899135, -89.163 13.69529971181556, -89.163 13.69521037463977, -89.163 13.69512103746398, -89.163 13.69503170028818, -89.163 13.69494236311239, -89.163 13.6948530259366, -89.163 13.69476368876081, -89.163 13.69467435158501, -89.163 13.69458501440922, -89.163 13.69449567723343, -89.163 13.69440634005764, -89.163 13.69431700288184, -89.163 13.69422766570605, -89.163 13.69413832853026, -89.163 13.69404899135447, -89.163 13.69395965417867, -89.163 13.69387031700288, -89.163 13.69378097982709, -89.163 13.6936916426513, -89.163 13.6936023054755, -89.163 13.69351296829971, -89.163 13.69342363112392, -89.163 13.69333429394813, -89.163 13.69324495677233, -89.16327643979058 13.69306628242075, -89.16355287958115 13.69288760806916, -89.16382931937173 13.69270893371758, -89.1641057591623 13.69253025936599, -89.16438219895288 13.69235158501441, -89.16465863874345 13.69217291066282, -89.16493507853403 13.69199423631124, -89.16521151832461 13.69181556195965, -89.16548795811518 13.69163688760807, -89.16576439790576 13.69145821325648, -89.16604083769633 13.6912795389049, -89.16631727748691 13.69110086455331, -89.16659371727748 13.69092219020173, -89.16677801047121 13.69083285302594, -89.16696230366492 13.69074351585014, -89.16714659685863 13.69065417867435, -89.16733089005236 13.69056484149856, -89.16797591623036 13.69020749279539, -89.16816020942409 13.6901181556196, -89.1683445026178 13.6900288184438, -89.16852879581151 13.68993948126801, -89.16871308900524 13.68985014409222, -89.16889738219895 13.68976080691643, -89.16908167539266 13.68967146974063, -89.16972670157068 13.68931412103746, -89.16991099476439 13.68922478386167, -89.17009528795812 13.68913544668588, -89.17027958115183 13.68904610951009, -89.17046387434554 13.68895677233429, -89.17064816753927 13.6888674351585, -89.17083246073298 13.68877809798271, -89.17147748691099 13.68842074927954, -89.17166178010471 13.68833141210375, -89.17350471204188 13.68681268011527, -89.17378115183246 13.6864553314121, -89.17405759162304 13.68609798270893, -89.17442617801046 13.68565129682997, -89.17470261780105 13.6852939481268, -89.17507120418848 13.68484726224784, -89.17534764397905 13.68448991354467, -89.17571623036649 13.68404322766571, -89.17580837696335 13.68395389048991, -89.17599267015707 13.68386455331412, -89.17617696335078 13.68377521613833, -89.1763612565445 13.68368587896254, -89.17654554973822 13.68359654178674, -89.17672984293193 13.68350720461095, -89.17691413612565 13.68341786743516, -89.17709842931937 13.68332853025936, -89.17728272251308 13.68323919308357, -89.17746701570681 13.68314985590778, -89.17765130890052 13.68306051873199, -89.17783560209423 13.6829711815562, -89.17801989528796 13.6828818443804, -89.17820418848167 13.68279250720461, -89.17884921465968 13.68252449567723, -89.1790335078534 13.68243515850144, -89.17921780104712 13.68234582132565, -89.17940209424084 13.68225648414986, -89.17958638743455 13.68216714697406, -89.17977068062827 13.68207780979827, -89.17995497382199 13.68198847262248, -89.1801392670157 13.68189913544669, -89.18032356020942 13.68180979827089, -89.18050785340314 13.6817204610951, -89.18069214659685 13.68163112391931, -89.18087643979058 13.68154178674352, -89.18152146596859 13.68127377521614, -89.1817057591623 13.68118443804035, -89.18189005235602 13.68109510086455, -89.18198219895288 13.68109510086455, -89.18207434554974 13.68109510086455, -89.18216649214659 13.68109510086455, -89.18225863874345 13.68109510086455, -89.18235078534032 13.68109510086455, -89.18244293193717 13.68109510086455, -89.18253507853403 13.68109510086455, -89.18262722513089 13.68109510086455, -89.18271937172774 13.68109510086455, -89.18281151832461 13.68109510086455, -89.18290366492147 13.68109510086455, -89.18299581151832 13.68109510086455, -89.18308795811518 13.68109510086455, -89.18318010471204 13.68109510086455, -89.18327225130889 13.68109510086455, -89.18336439790576 13.68109510086455, -89.18345654450262 13.68109510086455, -89.18354869109947 13.68109510086455, -89.18364083769633 13.68109510086455, -89.18373298429319 13.68109510086455, -89.18382513089006 13.68109510086455, -89.18806387434554 13.67868299711816, -89.18843246073298 13.67805763688761, -89.18880104712042 13.67743227665706, -89.18889319371728 13.67725360230548, -89.18926178010472 13.67662824207493, -89.18963036649214 13.67600288184438, -89.18972251308901 13.67582420749279, -89.19009109947643 13.67519884726225, -89.19045968586387 13.6745734870317, -89.19055183246073 13.67439481268011, -89.19092041884817 13.67376945244957, -89.19119685863875 13.6733227665706, -89.19138115183246 13.67323342939481, -89.19156544502617 13.67314409221902, -89.1917497382199 13.67305475504323, -89.1923947643979 13.67278674351585, -89.19257905759163 13.67269740634006, -89.19276335078534 13.67260806916426, -89.19294764397905 13.67251873198847, -89.19313193717278 13.67242939481268, -89.19331623036649 13.67234005763689, -89.1935005235602 13.6722507204611, -89.19368481675393 13.6721613832853, -89.19386910994764 13.67207204610951, -89.19405340314137 13.67198270893372, -89.19469842931937 13.67171469740634, -89.19488272251309 13.67162536023055, -89.19506701570681 13.67153602305475, -89.19525130890052 13.67144668587896, -89.19543560209424 13.67135734870317, -89.19561989528796 13.67126801152738, -89.19580418848167 13.67117867435158, -89.19598848167539 13.67108933717579, -89.19617277486911 13.671, -89.19626492146597 13.671, -89.19635706806282 13.671, -89.19644921465968 13.671, -89.19654136125655 13.671, -89.19663350785341 13.671, -89.19672565445026 13.671, -89.19681780104712 13.671, -89.19690994764397 13.671, -89.19700209424084 13.671, -89.1970942408377 13.671, -89.19718638743456 13.671, -89.19727853403141 13.671, -89.19737068062827 13.671, -89.19746282722512 13.671, -89.19755497382199 13.671, -89.19764712041885 13.671, -89.19773926701571 13.671, -89.19783141361256 13.671, -89.19792356020942 13.671, -89.19801570680629 13.671, -89.19810785340314 13.671, -89.1982 13.671, -89.19829214659686 13.671, -89.19838429319371 13.671, -89.19847643979058 13.671, -89.19856858638744 13.671, -89.19866073298429 13.671, -89.19875287958115 13.671, -89.19884502617801 13.671, -89.19893717277488 13.671, -89.19902931937173 13.671, -89.19912146596859 13.671, -89.19921361256544 13.671, -89.1993057591623 13.671, -89.19939790575916 13.671, -89.19949005235603 13.671, -89.19958219895288 13.671, -89.19967434554974 13.671, -89.19976649214659 13.671, -89.19985863874345 13.671, -89.19995078534032 13.671, -89.20004293193718 13.671, -89.20013507853403 13.671, -89.20022722513089 13.671, -89.20031937172774 13.671, -89.20041151832461 13.671, -89.20050366492147 13.671, -89.20059581151833 13.671, -89.20068795811518 13.671, -89.20078010471204 13.671, -89.20087225130889 13.671, -89.20096439790576 13.671, -89.20105654450262 13.671, -89.20114869109948 13.671, -89.20124083769633 13.671, -89.20133298429319 13.671, -89.20142513089006 13.671, -89.20151727748691 13.671, -89.20160942408377 13.671, -89.20170157068063 13.671, -89.20179371727748 13.671, -89.20188586387435 13.671, -89.20197801047121 13.671, -89.20207015706806 13.671, -89.20216230366492 13.671, -89.20225445026178 13.671, -89.20234659685863 13.671, -89.2024387434555 13.671, -89.20253089005236 13.671, -89.20262303664921 13.671, -89.20271518324607 13.671, -89.20280732984293 13.671, -89.2028994764398 13.671, -89.20299162303665 13.671, -89.20308376963351 13.671, -89.20317591623036 13.671, -89.20326806282722 13.671, -89.20336020942409 13.671, -89.20345235602095 13.671, -89.2035445026178 13.671, -89.20363664921466 13.671, -89.20372879581151 13.671, -89.20382094240837 13.671, -89.20391308900524 13.671, -89.2040052356021 13.671, -89.20409738219895 13.671, -89.20418952879581 13.671, -89.20428167539266 13.671, -89.20437382198953 13.671, -89.20446596858639 13.671, -89.20455811518325 13.671, -89.2046502617801 13.671, -89.20474240837696 13.671, -89.20483455497383 13.671, -89.20492670157068 13.671, -89.20501884816754 13.671, -89.2051109947644 13.671, -89.20520314136125 13.671, -89.20529528795812 13.671, -89.20538743455498 13.671, -89.20547958115183 13.671, -89.20557172774869 13.671, -89.20566387434555 13.671, -89.2057560209424 13.671, -89.20584816753927 13.671, -89.20594031413613 13.671, -89.20603246073298 13.671, -89.20612460732984 13.671, -89.2062167539267 13.671, -89.20640104712042 13.67108933717579, -89.20658534031413 13.67117867435158, -89.20676963350786 13.67126801152738, -89.20695392670157 13.67135734870317, -89.20713821989528 13.67144668587896, -89.20723036649214 13.67153602305475, -89.20723036649214 13.67162536023055, -89.20723036649214 13.67171469740634, -89.20723036649214 13.67180403458213, -89.20704607329843 13.67242939481268, -89.20686178010472 13.67305475504323, -89.20667748691099 13.67368011527378, -89.20658534031413 13.67394812680115, -89.20640104712042 13.6745734870317, -89.21036335078534 13.68118443804035, -89.21091623036649 13.68127377521614, -89.21146910994764 13.68136311239193, -89.21211413612565 13.68145244956772, -89.21266701570681 13.68154178674352, -89.21321989528796 13.68163112391931, -89.21377277486911 13.6817204610951, -89.21441780104712 13.68180979827089, -89.2146942408377 13.68189913544669, -89.21487853403141 13.68198847262248, -89.215154973822 13.68216714697406, -89.21543141361256 13.68234582132565, -89.21589214659686 13.68261383285303, -89.21616858638744 13.68279250720461, -89.21644502617801 13.6829711815562, -89.21672146596859 13.68314985590778, -89.21699790575916 13.68332853025936, -89.21727434554974 13.68350720461095, -89.21773507853403 13.68377521613833, -89.21801151832462 13.68395389048991, -89.21828795811518 13.6841325648415, -89.21856439790577 13.68431123919308, -89.21884083769633 13.68448991354467, -89.21911727748692 13.68466858789625, -89.22031518324607 13.68520461095101, -89.22059162303665 13.6852939481268, -89.22086806282722 13.68538328530259, -89.2211445026178 13.68547262247839, -89.22142094240837 13.68556195965418, -89.22169738219895 13.68565129682997, -89.22197382198954 13.68574063400576, -89.2222502617801 13.68582997118156, -89.22252670157069 13.68591930835735, -89.22280314136125 13.68600864553314, -89.22307958115184 13.68609798270893, -89.2233560209424 13.68618731988473, -89.22363246073299 13.68627665706052, -89.22390890052357 13.68636599423631, -89.22768691099476 13.68618731988473, -89.22833193717278 13.68591930835735, -89.22851623036649 13.68582997118156, -89.22870052356021 13.68574063400576, -89.22888481675393 13.68565129682997, -89.22906910994764 13.68556195965418, -89.22925340314137 13.68547262247839, -89.22943769633508 13.68538328530259, -89.22962198952879 13.6852939481268, -89.22980628272252 13.68520461095101, -89.22999057591623 13.68511527377522, -89.23017486910994 13.68502593659942, -89.23035916230367 13.68493659942363, -89.23054345549738 13.68484726224784, -89.23072774869111 13.68475792507205, -89.23091204188482 13.68466858789625, -89.23109633507853 13.68457925072046, -89.23174136125655 13.68431123919308, -89.23192565445027 13.68422190201729, -89.23358429319372 13.6828818443804, -89.23413717277488 13.68225648414986, -89.23469005235603 13.68163112391931, -89.23478219895289 13.68154178674352, -89.23533507853404 13.68091642651297, -89.23588795811519 13.68029106628242, -89.23598010471204 13.68020172910663, -89.2360722513089 13.68011239193084, -89.23616439790577 13.68002305475504, -89.23625654450262 13.67993371757925, -89.23634869109948 13.67984438040346, -89.23644083769634 13.67975504322767, -89.23653298429319 13.67966570605187, -89.23662513089006 13.67957636887608, -89.23671727748692 13.67948703170029, -89.23680942408377 13.67939769452449, -89.23690157068063 13.6793083573487, -89.23699371727749 13.67921902017291, -89.23754659685864 13.67859365994236, -89.23763874345551 13.67850432276657, -89.23773089005236 13.67841498559078, -89.23782303664922 13.67832564841498, -89.23791518324607 13.67823631123919, -89.23800732984293 13.6781469740634, -89.2380994764398 13.67805763688761, -89.23819162303666 13.67796829971181, -89.23828376963351 13.67787896253602, -89.23837591623037 13.67778962536023, -89.23846806282722 13.67770028818444, -89.23856020942409 13.67761095100865, -89.23865235602095 13.67752161383285, -89.23874450261781 13.67743227665706, -89.23883664921466 13.67734293948127, -89.23938952879581 13.67671757925072, -89.23948167539267 13.67662824207493, -89.23957382198954 13.67653890489913, -89.23966596858639 13.67644956772334, -89.23975811518325 13.67636023054755, -89.23985026178011 13.67627089337176, -89.23994240837696 13.67618155619597, -89.24003455497383 13.67609221902017, -89.24012670157069 13.67600288184438, -89.24021884816754 13.67591354466859, -89.2403109947644 13.67582420749279, -89.24040314136126 13.675734870317, -89.24049528795813 13.67564553314121, -89.24058743455498 13.67555619596542, -89.24114031413613 13.67493083573487, -89.24123246073299 13.67484149855908, -89.24132460732984 13.67475216138329, -89.2414167539267 13.67466282420749, -89.24150890052357 13.6745734870317, -89.24160104712043 13.67448414985591, -89.24169319371728 13.67439481268011, -89.24178534031414 13.67430547550432, -89.24298324607331 13.67251873198847, -89.24316753926702 13.67207204610951, -89.24335183246073 13.67162536023055, -89.24353612565446 13.67117867435158, -89.24362827225131 13.671, -89.24372041884817 13.671, -89.24381256544503 13.671, -89.24390471204188 13.671, -89.24399685863875 13.671, -89.24408900523561 13.671, -89.24418115183246 13.671, -89.24427329842932 13.671, -89.24436544502618 13.671, -89.24445759162305 13.671, -89.2445497382199 13.671, -89.24464188481676 13.671, -89.24473403141361 13.671, -89.24482617801047 13.671, -89.24491832460734 13.671, -89.2450104712042 13.671, -89.24510261780105 13.671, -89.24519476439791 13.671, -89.24528691099476 13.671, -89.24537905759163 13.671, -89.24547120418849 13.671, -89.24556335078535 13.671, -89.2456554973822 13.671, -89.24574764397906 13.671, -89.24583979057591 13.671, -89.24593193717278 13.671, -89.24602408376964 13.671, -89.2461162303665 13.671, -89.24620837696335 13.671, -89.24630052356021 13.671, -89.24639267015708 13.671, -89.24648481675393 13.671, -89.24657696335079 13.671, -89.24666910994765 13.671, -89.2467612565445 13.671, -89.24685340314137 13.671, -89.24694554973823 13.671, -89.24703769633508 13.671, -89.24712984293194 13.671, -89.2472219895288 13.671, -89.24731413612565 13.671, -89.24740628272252 13.671, -89.24749842931938 13.671, -89.24759057591623 13.671, -89.24768272251309 13.671, -89.24777486910995 13.671, -89.24786701570682 13.671, -89.24795916230367 13.671, -89.24805130890053 13.671, -89.24814345549738 13.671, -89.24823560209424 13.671, -89.24832774869111 13.671, -89.24841989528797 13.671, -89.24851204188482 13.671, -89.24860418848168 13.671, -89.24869633507853 13.671, -89.24878848167539 13.671, -89.24888062827226 13.671, -89.24897277486912 13.671, -89.24906492146597 13.671, -89.24915706806283 13.671, -89.24924921465968 13.671, -89.24934136125655 13.671, -89.24943350785341 13.671, -89.24952565445027 13.671, -89.24961780104712 13.671, -89.24970994764398 13.671, -89.24980209424085 13.671, -89.2498942408377 13.671, -89.24998638743456 13.671, -89.25007853403142 13.671, -89.25017068062827 13.671, -89.25026282722513 13.671, -89.250354973822 13.671, -89.25044712041885 13.671, -89.25053926701571 13.671, -89.25063141361257 13.671, -89.25072356020942 13.671, -89.25081570680629 13.671, -89.25090785340315 13.671))" shape_poly = wkt.loads(risk_shape) shape = gpd.GeoDataFrame() - shape['geometry'] = [shape_poly] - shape.crs = 'epsg:4326' + shape["geometry"] = [shape_poly] + shape.crs = "epsg:4326" shape.to_crs(epsg=3857, inplace=True) ax = shape.plot(figsize=(10, 10), alpha=0.5) ax.set_xlim(-9943223.896891385, -9911000.065720687) ax.set_ylim(1530712.637786494, 1555600.2891258441) ctx.add_basemap(ax, zoom=12, url=ctx.providers.Stamen.Terrain) - rect = patches.Rectangle((-9931038.907412536, 1536570.51725147), 4354.653554389253, - 2941.9125608841423, linewidth=1, edgecolor='r', facecolor='none') + rect = patches.Rectangle( + (-9931038.907412536, 1536570.51725147), + 4354.653554389253, + 2941.9125608841423, + linewidth=1, + edgecolor="r", + facecolor="none", + ) ax.add_patch(rect) ax.set_axis_off() fig = ax.get_figure() - ax.set_title('Metropolitan Area of San Salvador', fontsize=10) + ax.set_title("Metropolitan Area of San Salvador", fontsize=10) fig.tight_layout() return fig -from climada.entity import Exposures, Entity + +from climada.entity import Entity, Exposures from climada.hazard import Hazard + def load_entity(): - ent_file = 'FL_entity_Acelhuate_houses.xlsx' + ent_file = "FL_entity_Acelhuate_houses.xlsx" ent = Entity.from_excel(ent_file) ent.exposures.set_geometry_points() ent.check() return ent + +import cartopy.crs as ccrs import matplotlib as mpl import matplotlib.pyplot as plt import numpy as np -import cartopy.crs as ccrs + def scale_bar(ax, length=None, location=(0.5, 0.05), linewidth=3): """ @@ -69,55 +81,75 @@ def scale_bar(ax, length=None, location=(0.5, 0.05), linewidth=3): (ie. 0.5 is the middle of the plot) linewidth is the thickness of the scalebar. """ - #Get the limits of the axis in lat long + # Get the limits of the axis in lat long llx0, llx1, lly0, lly1 = ax.get_extent(ccrs.PlateCarree()) - #Make tmc horizontally centred on the middle of the map, - #vertically at scale bar location + # Make tmc horizontally centred on the middle of the map, + # vertically at scale bar location sbllx = (llx1 + llx0) / 2 sblly = lly0 + (lly1 - lly0) * location[1] tmc = ccrs.TransverseMercator(sbllx, sblly) - #Get the extent of the plotted area in coordinates in metres + # Get the extent of the plotted area in coordinates in metres x0, x1, y0, y1 = ax.get_extent(tmc) - #Turn the specified scalebar location into coordinates in metres + # Turn the specified scalebar location into coordinates in metres sbx = x0 + (x1 - x0) * location[0] sby = y0 + (y1 - y0) * location[1] - #Calculate a scale bar length if none has been given - #(Theres probably a more pythonic way of rounding the number but this works) + # Calculate a scale bar length if none has been given + # (Theres probably a more pythonic way of rounding the number but this works) if not length: - length = (x1 - x0) / 5000 #in km - ndim = int(np.floor(np.log10(length))) #number of digits in number - length = round(length, -ndim) #round to 1sf - #Returns numbers starting with the list + length = (x1 - x0) / 5000 # in km + ndim = int(np.floor(np.log10(length))) # number of digits in number + length = round(length, -ndim) # round to 1sf + + # Returns numbers starting with the list def scale_number(x): - if str(x)[0] in ['1', '2', '5']: return int(x) - else: return scale_number(x - 10 ** ndim) + if str(x)[0] in ["1", "2", "5"]: + return int(x) + else: + return scale_number(x - 10**ndim) + length = scale_number(length) - #Generate the x coordinate for the ends of the scalebar + # Generate the x coordinate for the ends of the scalebar bar_xs = [sbx - length * 500, sbx + length * 500] - #Plot the scalebar - ax.plot(bar_xs, [sby, sby], transform=tmc, color='k', linewidth=linewidth) - #Plot the scalebar label - ax.text(sbx, sby, str(int(length*1000)) + ' m', transform=tmc, - horizontalalignment='center', verticalalignment='bottom') + # Plot the scalebar + ax.plot(bar_xs, [sby, sby], transform=tmc, color="k", linewidth=linewidth) + # Plot the scalebar label + ax.text( + sbx, + sby, + str(int(length * 1000)) + " m", + transform=tmc, + horizontalalignment="center", + verticalalignment="bottom", + ) + def plot_exposure_ss(exposures, point=None): if point is not None: - fig, ax = plt.subplots(figsize=(15, 15), subplot_kw=dict(projection=ccrs.Mercator())) - ax.scatter(exposures.gdf[point:point+1].geometry[:].x, exposures.gdf[point:point+1].geometry[:].y, c='k', - marker='+', s=800) + fig, ax = plt.subplots( + figsize=(15, 15), subplot_kw=dict(projection=ccrs.Mercator()) + ) + ax.scatter( + exposures.gdf[point : point + 1].geometry[:].x, + exposures.gdf[point : point + 1].geometry[:].y, + c="k", + marker="+", + s=800, + ) ax.set_xlim(-9931038.907412536, -9926684.253858147) ax.set_ylim(1536680.51725147, 1539512.429812354) else: # create new map for viviendas - cmap_viv = cm.get_cmap('autumn').resampled(4) - cmap_viv = mpl.colors.LinearSegmentedColormap.from_list('Custom cmap', - [cmap_viv(i) for i in range(cmap_viv.N)], cmap_viv.N) + cmap_viv = cm.get_cmap("autumn").resampled(4) + cmap_viv = mpl.colors.LinearSegmentedColormap.from_list( + "Custom cmap", [cmap_viv(i) for i in range(cmap_viv.N)], cmap_viv.N + ) # create new map for aups - cmap_aup = cm.get_cmap('winter').resampled(4) - cmap_aup = mpl.colors.LinearSegmentedColormap.from_list('Custom cmap', - [cmap_aup(i) for i in range(cmap_aup.N)], cmap_aup.N) + cmap_aup = cm.get_cmap("winter").resampled(4) + cmap_aup = mpl.colors.LinearSegmentedColormap.from_list( + "Custom cmap", [cmap_aup(i) for i in range(cmap_aup.N)], cmap_aup.N + ) # define the bins and normalize bounds_aup = np.array([6000, 8800, 10000, 12000, 14600]) @@ -125,34 +157,79 @@ def plot_exposure_ss(exposures, point=None): bounds_viv = np.array([7500, 11000, 16500, 33000, 56300]) norm_viv = mpl.colors.BoundaryNorm(bounds_viv, cmap_viv.N) - exp_merc_aup = exposures.gdf[exposures.gdf.category==1] - exp_merc_house = exposures.gdf[exposures.gdf.category==2] - - fig, ax = plt.subplots(figsize=(15, 15), subplot_kw=dict(projection=ccrs.Mercator())) - clr_1 = ax.scatter(exp_merc_aup.geometry[:].x, exp_merc_aup.geometry[:].y, c=exp_merc_aup.value.values, - marker='+', s=25, cmap=cmap_aup, norm=norm_aup) - clr_2 = ax.scatter(exp_merc_house.geometry[:].x, exp_merc_house.geometry[:].y, c=exp_merc_house.value.values, - marker='o', s=8, cmap=cmap_viv, norm=norm_viv) + exp_merc_aup = exposures.gdf[exposures.gdf.category == 1] + exp_merc_house = exposures.gdf[exposures.gdf.category == 2] + + fig, ax = plt.subplots( + figsize=(15, 15), subplot_kw=dict(projection=ccrs.Mercator()) + ) + clr_1 = ax.scatter( + exp_merc_aup.geometry[:].x, + exp_merc_aup.geometry[:].y, + c=exp_merc_aup.value.values, + marker="+", + s=25, + cmap=cmap_aup, + norm=norm_aup, + ) + clr_2 = ax.scatter( + exp_merc_house.geometry[:].x, + exp_merc_house.geometry[:].y, + c=exp_merc_house.value.values, + marker="o", + s=8, + cmap=cmap_viv, + norm=norm_viv, + ) lines_legend = [] text_legend = [] for i_col, x_col in enumerate(np.linspace(0, 1, 4)): - lines_legend.append(mpl.lines.Line2D(range(1), range(1), color='white', marker='o', markerfacecolor=cmap_viv(x_col))) - text_legend.append(str(bounds_viv[i_col]) + ' - ' + str(bounds_viv[i_col+1])) - legend1 = plt.legend(lines_legend, text_legend, numpoints=1, loc=4, title='no AUP housing') + lines_legend.append( + mpl.lines.Line2D( + range(1), + range(1), + color="white", + marker="o", + markerfacecolor=cmap_viv(x_col), + ) + ) + text_legend.append( + str(bounds_viv[i_col]) + " - " + str(bounds_viv[i_col + 1]) + ) + legend1 = plt.legend( + lines_legend, text_legend, numpoints=1, loc=4, title="no AUP housing" + ) lines_legend = [] text_legend = [] for i_col, x_col in enumerate(np.linspace(0, 1, 4)): - lines_legend.append(mpl.lines.Line2D(range(1), range(1), color=cmap_aup(x_col), marker='+', markerfacecolor=cmap_aup(x_col))) - text_legend.append(str(bounds_aup[i_col]) + ' - ' + str(bounds_aup[i_col+1])) - plt.legend(lines_legend, text_legend, numpoints=1, loc=3, title='AUP housing') + lines_legend.append( + mpl.lines.Line2D( + range(1), + range(1), + color=cmap_aup(x_col), + marker="+", + markerfacecolor=cmap_aup(x_col), + ) + ) + text_legend.append( + str(bounds_aup[i_col]) + " - " + str(bounds_aup[i_col + 1]) + ) + plt.legend(lines_legend, text_legend, numpoints=1, loc=3, title="AUP housing") plt.gca().add_artist(legend1) - ctx.add_basemap(ax, zoom=15, url=ctx.providers.OpenStreetMap.Mapnik, origin='upper') + ctx.add_basemap(ax, zoom=15, url=ctx.providers.OpenStreetMap.Mapnik, origin="upper") scale_bar(ax, 0.5, location=(0.93, 0.4), linewidth=2) - rect = patches.Rectangle((-9931033.307412536, 1536686.51725147), 4345.053554389253, - 2934.0125608841423, linewidth=2, edgecolor='r', facecolor='none', zorder=200) + rect = patches.Rectangle( + (-9931033.307412536, 1536686.51725147), + 4345.053554389253, + 2934.0125608841423, + linewidth=2, + edgecolor="r", + facecolor="none", + zorder=200, + ) ax.add_patch(rect) ax.set_axis_off() if point is not None: @@ -162,37 +239,41 @@ def plot_exposure_ss(exposures, point=None): # fig.savefig('ss_points.png', format='png', bbox_inches='tight') return fig + def flooding_aup_if(impact_funcs): - mdd = impact_funcs.get_func('FL', 101).mdd - intensity = impact_funcs.get_func('FL', 101).intensity + mdd = impact_funcs.get_func("FL", 101).mdd + intensity = impact_funcs.get_func("FL", 101).intensity fig, ax = plt.subplots() - ax.set_xlabel('Intensity (m)') - ax.set_ylabel('Mean Damage Ratio (%)') - ax.set_title('Impact Function - AUP flooding') - ax.plot(intensity, mdd*100) + ax.set_xlabel("Intensity (m)") + ax.set_ylabel("Mean Damage Ratio (%)") + ax.set_title("Impact Function - AUP flooding") + ax.plot(intensity, mdd * 100) fig.set_size_inches(4.5, 4.5) - #fig.savefig('if_house_aup.png', format='png', bbox_inches='tight') + # fig.savefig('if_house_aup.png', format='png', bbox_inches='tight') return fig + import pandas as pd + def load_accounting(): acc = pd.DataFrame() - acc['Return Period (year)'] = np.array([10, 25, 50, 100]) - acc['frequency (1/year)'] = np.array([1/10, 1/25, 1/50, 1/100]) - acc['intensity (m)'] = np.array([0.7744541, 2.820973, 4.828216, 5.742804]) - acc['Mean Damage Ration (%)'] = np.array([51.83603012, 100, 100, 100]) - acc['impact (USD)'] = np.array([4786.95371, 9234.8, 9234.8, 9234.8]) - acc['frequency * impact'] = np.array([478.695371, 369.392, 184.696, 92.348]) - acc['Expected Annual Impact'] = np.ones(4)*np.nan - acc['Expected Annual Impact'].values[0] = 1125.131371 - #acc_file = 'accounting.xlsx' - #acc_df = pd.read_excel(acc_file) + acc["Return Period (year)"] = np.array([10, 25, 50, 100]) + acc["frequency (1/year)"] = np.array([1 / 10, 1 / 25, 1 / 50, 1 / 100]) + acc["intensity (m)"] = np.array([0.7744541, 2.820973, 4.828216, 5.742804]) + acc["Mean Damage Ration (%)"] = np.array([51.83603012, 100, 100, 100]) + acc["impact (USD)"] = np.array([4786.95371, 9234.8, 9234.8, 9234.8]) + acc["frequency * impact"] = np.array([478.695371, 369.392, 184.696, 92.348]) + acc["Expected Annual Impact"] = np.ones(4) * np.nan + acc["Expected Annual Impact"].values[0] = 1125.131371 + # acc_file = 'accounting.xlsx' + # acc_df = pd.read_excel(acc_file) acc.index += 1 return acc + def generate_plots_risk(): fig_ma = plot_salvador_ma() ent = load_entity() @@ -205,26 +286,26 @@ def generate_plots_risk(): return fig_ma, fig_point, fig_houses, fig_if + def non_linear_growth(cb_acel): - risk_present = 3.562753447707e+06 - risk_future = 7.578426440635e+06 + risk_present = 3.562753447707e06 + risk_future = 7.578426440635e06 - x_var = np.arange(cb_acel.present_year, cb_acel.future_year+1) + x_var = np.arange(cb_acel.present_year, cb_acel.future_year + 1) time_dep = cb_acel._time_dependency_array(0.5) - y_sqr = risk_present + (risk_future-risk_present) * time_dep + y_sqr = risk_present + (risk_future - risk_present) * time_dep time_dep = cb_acel._time_dependency_array(1.0) - y_lin = risk_present + (risk_future-risk_present) * time_dep + y_lin = risk_present + (risk_future - risk_present) * time_dep time_dep = cb_acel._time_dependency_array(2.0) - y_quad = risk_present + (risk_future-risk_present) * time_dep + y_quad = risk_present + (risk_future - risk_present) * time_dep - plt.bar(x_var, y_sqr, color='green', label='sublinear') - plt.bar(x_var, y_lin, color='blue', label='linear') - plt.bar(x_var, y_quad, color='red', label='superlinear') - plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0)) + plt.bar(x_var, y_sqr, color="green", label="sublinear") + plt.bar(x_var, y_lin, color="blue", label="linear") + plt.bar(x_var, y_quad, color="red", label="superlinear") + plt.ticklabel_format(style="sci", axis="y", scilimits=(0, 0)) plt.ylim(3.0e6, 7.8e6) - plt.xlabel('Year') - plt.ylabel('Expected Annual Impact') + plt.xlabel("Year") + plt.ylabel("Expected Annual Impact") plt.legend() - diff --git a/script/jenkins/set_config.py b/script/jenkins/set_config.py index 406eabb5e..75c4a1695 100644 --- a/script/jenkins/set_config.py +++ b/script/jenkins/set_config.py @@ -1,12 +1,12 @@ -import sys import json +import sys key = sys.argv[1] val = sys.argv[2] -jsonfile = 'climada.conf' +jsonfile = "climada.conf" -with open(jsonfile, encoding='UTF-8') as inf: +with open(jsonfile, encoding="UTF-8") as inf: data = json.load(inf) data[key] = val -with open(jsonfile, 'w', encoding='UTF-8') as outf: +with open(jsonfile, "w", encoding="UTF-8") as outf: json.dump(data, outf) diff --git a/script/jenkins/test_data_api.py b/script/jenkins/test_data_api.py index 42e910374..38eec4cd3 100644 --- a/script/jenkins/test_data_api.py +++ b/script/jenkins/test_data_api.py @@ -19,31 +19,36 @@ Test files_handler module. """ +import datetime as dt +import unittest from pathlib import Path from sys import dont_write_bytecode -import pandas as pd -import unittest -import xmlrunner -import datetime as dt import numpy as np +import pandas as pd +import xmlrunner from pandas_datareader import wb from climada import CONFIG from climada.entity.exposures.litpop.nightlight import BM_FILENAMES, download_nl_files -from climada.hazard.tc_tracks import IBTRACS_URL, IBTRACS_FILE -from climada.util.finance import WORLD_BANK_WEALTH_ACC, WORLD_BANK_INC_GRP -from climada.util.dwd_icon_loader import (download_icon_grib, - delete_icon_grib, - download_icon_centroids_file) +from climada.hazard.tc_tracks import IBTRACS_FILE, IBTRACS_URL +from climada.util.dwd_icon_loader import ( + delete_icon_grib, + download_icon_centroids_file, + download_icon_grib, +) from climada.util.files_handler import download_file, download_ftp +from climada.util.finance import WORLD_BANK_INC_GRP, WORLD_BANK_WEALTH_ACC + class TestDataAvail(unittest.TestCase): """Test availability of data used through APIs""" def test_noaa_nl_pass(self): """Test NOAA nightlights used in BlackMarble.""" - file_down = download_file(f'{CONFIG.exposures.litpop.nightlights.noaa_url.str()}/F101992.v4.tar') + file_down = download_file( + f"{CONFIG.exposures.litpop.nightlights.noaa_url.str()}/F101992.v4.tar" + ) Path(file_down).unlink() def test_nasa_nl_pass(self): @@ -72,11 +77,11 @@ def test_wb_lev_hist_pass(self): def test_wb_api_pass(self): """Test World Bank API""" - wb.download(indicator='NY.GDP.MKTP.CD', country='CHE', start=1960, end=2030) + wb.download(indicator="NY.GDP.MKTP.CD", country="CHE", start=1960, end=2030) def test_ne_api_pass(self): """Test Natural Earth API""" - url = 'https://naturalearth.s3.amazonaws.com/10m_cultural/ne_10m_admin_0_countries.zip' + url = "https://naturalearth.s3.amazonaws.com/10m_cultural/ne_10m_admin_0_countries.zip" file_down = download_file(url) Path(file_down).unlink() @@ -87,41 +92,41 @@ def test_ibtracs_pass(self): def test_icon_eu_forecast_download(self): """Test availability of DWD icon forecast.""" run_datetime = dt.datetime.utcnow() - dt.timedelta(hours=5) - run_datetime = run_datetime.replace(hour=run_datetime.hour//12*12, - minute=0, - second=0, - microsecond=0) - icon_file = download_icon_grib(run_datetime,max_lead_time=1) + run_datetime = run_datetime.replace( + hour=run_datetime.hour // 12 * 12, minute=0, second=0, microsecond=0 + ) + icon_file = download_icon_grib(run_datetime, max_lead_time=1) self.assertEqual(len(icon_file), 1) - delete_icon_grib(run_datetime,max_lead_time=1) #deletes icon_file + delete_icon_grib(run_datetime, max_lead_time=1) # deletes icon_file self.assertFalse(Path(icon_file[0]).exists()) def test_icon_d2_forecast_download(self): """Test availability of DWD icon forecast.""" run_datetime = dt.datetime.utcnow() - dt.timedelta(hours=5) - run_datetime = run_datetime.replace(hour=run_datetime.hour//12*12, - minute=0, - second=0, - microsecond=0) - icon_file = download_icon_grib(run_datetime, - model_name='icon-d2-eps', - max_lead_time=1) + run_datetime = run_datetime.replace( + hour=run_datetime.hour // 12 * 12, minute=0, second=0, microsecond=0 + ) + icon_file = download_icon_grib( + run_datetime, model_name="icon-d2-eps", max_lead_time=1 + ) self.assertEqual(len(icon_file), 1) - delete_icon_grib(run_datetime, - model_name='icon-d2-eps', - max_lead_time=1) #deletes icon_file + delete_icon_grib( + run_datetime, model_name="icon-d2-eps", max_lead_time=1 + ) # deletes icon_file self.assertFalse(Path(icon_file[0]).exists()) def test_icon_centroids_download(self): """Test availablility of DWD icon grid information.""" grid_file = download_icon_centroids_file() Path(grid_file).unlink() - grid_file = download_icon_centroids_file(model_name='icon-d2-eps') + grid_file = download_icon_centroids_file(model_name="icon-d2-eps") Path(grid_file).unlink() + # Execute Tests -if __name__ == '__main__': +if __name__ == "__main__": TESTS = unittest.TestLoader().loadTestsFromTestCase(TestDataAvail) from sys import argv - outputdir = argv[1] if len(argv) > 1 else str(Path.cwd().joinpath('tests_xml')) + + outputdir = argv[1] if len(argv) > 1 else str(Path.cwd().joinpath("tests_xml")) xmlrunner.XMLTestRunner(output=outputdir).run(TESTS) diff --git a/script/jenkins/test_notebooks.py b/script/jenkins/test_notebooks.py index bb0420194..f2e4fcdbc 100644 --- a/script/jenkins/test_notebooks.py +++ b/script/jenkins/test_notebooks.py @@ -6,20 +6,20 @@ import sys import unittest from pathlib import Path + import nbformat import climada +BOUND_TO_FAIL = "# Note: execution of this cell will fail" +"""Cells containing this line will not be executed in the test""" -BOUND_TO_FAIL = '# Note: execution of this cell will fail' -'''Cells containing this line will not be executed in the test''' - -EXCLUDED_FROM_NOTEBOOK_TEST = ['climada_installation_step_by_step.ipynb'] -'''These notebooks are excluded from being tested''' +EXCLUDED_FROM_NOTEBOOK_TEST = ["climada_installation_step_by_step.ipynb"] +"""These notebooks are excluded from being tested""" class NotebookTest(unittest.TestCase): - '''Generic TestCase for testing the executability of notebooks + """Generic TestCase for testing the executability of notebooks Attributes ---------- @@ -28,7 +28,7 @@ class NotebookTest(unittest.TestCase): notebook : str File name of the notebook. - ''' + """ def __init__(self, methodName, wd=None, notebook=None): super(NotebookTest, self).__init__(methodName) @@ -36,64 +36,81 @@ def __init__(self, methodName, wd=None, notebook=None): self.notebook = notebook def test_notebook(self): - '''Extracts code cells from the notebook and executes them one by one, using `exec`. + """Extracts code cells from the notebook and executes them one by one, using `exec`. Magic lines and help/? calls are eliminated. Cells containing `BOUND_TO_FAIL` are elided. - Cells doing multiprocessing are elided.''' + Cells doing multiprocessing are elided.""" cwd = Path.cwd() try: # cd to the notebook directory os.chdir(self.wd) - print(f'start testing {self.notebook}') + print(f"start testing {self.notebook}") # read the notebook into a string - with open(self.notebook, encoding='utf8') as nb: + with open(self.notebook, encoding="utf8") as nb: content = nb.read() # parse the string with nbformat.reads - cells = nbformat.reads(content, 4)['cells'] + cells = nbformat.reads(content, 4)["cells"] # create namespace with IPython standards namespace = dict() - exec('from IPython.display import display', namespace) + exec("from IPython.display import display", namespace) # run all cells i = 0 for c in cells: # skip markdown cells - if c['cell_type'] != 'code': continue + if c["cell_type"] != "code": + continue i += 1 # skip deliberately failing cells - if BOUND_TO_FAIL in c['source']: continue + if BOUND_TO_FAIL in c["source"]: + continue # skip multiprocessing cells - if any([ tabu in c['source'].split() for tabu in [ - 'import multiprocessing', - 'from multiprocessing import', - ]]): - print('\n'.join([ - f'\nskip multiprocessing cell {i} in {self.notebook}', - '+'+'-'*68+'+', - c['source'] - ])) + if any( + [ + tabu in c["source"].split() + for tabu in [ + "import multiprocessing", + "from multiprocessing import", + ] + ] + ): + print( + "\n".join( + [ + f"\nskip multiprocessing cell {i} in {self.notebook}", + "+" + "-" * 68 + "+", + c["source"], + ] + ) + ) continue # remove non python lines and help calls which require user input # or involve pools being opened/closed - python_code = "\n".join([ - re.sub(r'pool=\w+', 'pool=None', ln) - for ln in c['source'].split("\n") - if not ln.startswith('%') - and not ln.startswith('help(') - and not ln.startswith('ask_ok(') - and not ln.startswith('ask_ok(') - and not ln.startswith('pool') # by convention Pool objects are called pool - and not ln.strip().endswith('?') - and not re.search(r'(\W|^)Pool\(', ln) # prevent Pool object creation - ]) + python_code = "\n".join( + [ + re.sub(r"pool=\w+", "pool=None", ln) + for ln in c["source"].split("\n") + if not ln.startswith("%") + and not ln.startswith("help(") + and not ln.startswith("ask_ok(") + and not ln.startswith("ask_ok(") + and not ln.startswith( + "pool" + ) # by convention Pool objects are called pool + and not ln.strip().endswith("?") + and not re.search( + r"(\W|^)Pool\(", ln + ) # prevent Pool object creation + ] + ) # execute the python code try: @@ -101,53 +118,60 @@ def test_notebook(self): # report failures except Exception as e: - failure = "\n".join([ - f"notebook {self.notebook} cell {i} failed with {e.__class__}", - f"{e}", - '+'+'-'*68+'+', - c['source'] - ]) - print(f'failed {self.notebook}') + failure = "\n".join( + [ + f"notebook {self.notebook} cell {i} failed with {e.__class__}", + f"{e}", + "+" + "-" * 68 + "+", + c["source"], + ] + ) + print(f"failed {self.notebook}") print(failure) self.fail(failure) - print(f'succeeded {self.notebook}') + print(f"succeeded {self.notebook}") finally: os.chdir(cwd) def main(install_dir): import xmlrunner - + sys.path.append(str(install_dir)) - - notebook_dir = install_dir.joinpath('doc', 'tutorial') - '''The path to the notebook directories.''' + + notebook_dir = install_dir.joinpath("doc", "tutorial") + """The path to the notebook directories.""" # list notebooks in the NOTEBOOK_DIR - notebooks = [f.absolute() - for f in sorted(notebook_dir.iterdir()) - if os.path.splitext(f)[1] == ('.ipynb') - and not f.name in EXCLUDED_FROM_NOTEBOOK_TEST] + notebooks = [ + f.absolute() + for f in sorted(notebook_dir.iterdir()) + if os.path.splitext(f)[1] == (".ipynb") + and not f.name in EXCLUDED_FROM_NOTEBOOK_TEST + ] # build a test suite with a test for each notebook suite = unittest.TestSuite() for notebook in notebooks: - class NBTest(NotebookTest): pass + + class NBTest(NotebookTest): + pass + test_name = "_".join(notebook.stem.split()) setattr(NBTest, test_name, NBTest.test_notebook) suite.addTest(NBTest(test_name, notebook.parent, notebook.name)) # run the tests and write xml reports to tests_xml - output_dir = install_dir.joinpath('tests_xml') + output_dir = install_dir.joinpath("tests_xml") xmlrunner.XMLTestRunner(output=str(output_dir)).run(suite) -if __name__ == '__main__': - if sys.argv[1] == 'report': +if __name__ == "__main__": + if sys.argv[1] == "report": install_dir = Path(sys.argv[2]) if len(sys.argv) > 2 else Path.cwd() main(install_dir) - + else: jd, nb = os.path.split(sys.argv[1]) - unittest.TextTestRunner(verbosity=2).run(NotebookTest('test_notebook', jd, nb)) + unittest.TextTestRunner(verbosity=2).run(NotebookTest("test_notebook", jd, nb)) diff --git a/setup.py b/setup.py index dd260d7ee..9429535c8 100644 --- a/setup.py +++ b/setup.py @@ -2,12 +2,13 @@ """ from pathlib import Path -from setuptools import setup, find_namespace_packages + +from setuptools import find_namespace_packages, setup here = Path(__file__).parent.absolute() # Get the long description from the README file -with open(here.joinpath('README.md'), encoding='utf-8') as f: +with open(here.joinpath("README.md"), encoding="utf-8") as f: long_description = f.read() # Requirements for documentation @@ -31,86 +32,77 @@ ] # Requirements for development -DEPS_DEV = DEPS_DOC + DEPS_TEST + [ - "pre-commit", -] +DEPS_DEV = ( + DEPS_DOC + + DEPS_TEST + + [ + "pre-commit", + ] +) setup( - name='climada', - - version='5.0.1-dev', - - description='CLIMADA in Python', - + name="climada", + version="5.0.1-dev", + description="CLIMADA in Python", long_description=long_description, long_description_content_type="text/markdown", - - url='https://github.com/CLIMADA-project/climada_python', - - author='ETH', - author_email='schmide@ethz.ch', - - license='OSI Approved :: GNU Lesser General Public License v3 (GPLv3)', - + url="https://github.com/CLIMADA-project/climada_python", + author="ETH", + author_email="schmide@ethz.ch", + license="OSI Approved :: GNU Lesser General Public License v3 (GPLv3)", classifiers=[ - 'Development Status :: 4 - Beta', - 'Programming Language :: Python :: 3', - 'Topic :: Scientific/Engineering :: Atmospheric Science', - 'Topic :: Scientific/Engineering :: GIS', - 'Topic :: Scientific/Engineering :: Mathematics', + "Development Status :: 4 - Beta", + "Programming Language :: Python :: 3", + "Topic :: Scientific/Engineering :: Atmospheric Science", + "Topic :: Scientific/Engineering :: GIS", + "Topic :: Scientific/Engineering :: Mathematics", ], - - keywords='climate adaptation', - + keywords="climate adaptation", python_requires=">=3.9,<3.12", - install_requires=[ - 'bayesian-optimization', - 'bottleneck', - 'cartopy', - 'cfgrib', - 'contextily', - 'dask', - 'deprecation', - 'geopandas', - 'h5py', - 'haversine', - 'matplotlib', - 'netcdf4', - 'numba', - 'openpyxl', - 'overpy', - 'pandas', - 'pandas-datareader', - 'pathos', - 'peewee', - 'pillow', - 'pint', - 'pycountry', - 'pyproj', - 'rasterio', - 'salib', - 'scikit-learn', - 'seaborn', - 'statsmodels', - 'sparse', - 'tables', - 'tabulate', - 'tqdm', - 'xarray', - 'xlrd', - 'xlsxwriter', - 'xmlrunner' + "bayesian-optimization", + "bottleneck", + "cartopy", + "cfgrib", + "contextily", + "dask", + "deprecation", + "geopandas", + "h5py", + "haversine", + "matplotlib", + "netcdf4", + "numba", + "openpyxl", + "overpy", + "pandas", + "pandas-datareader", + "pathos", + "peewee", + "pillow", + "pint", + "pycountry", + "pyproj", + "rasterio", + "salib", + "scikit-learn", + "seaborn", + "statsmodels", + "sparse", + "tables", + "tabulate", + "tqdm", + "xarray", + "xlrd", + "xlsxwriter", + "xmlrunner", ], - extras_require={ "doc": DEPS_DOC, "test": DEPS_TEST, "dev": DEPS_DEV, }, - - packages=find_namespace_packages(include=['climada*']), - - setup_requires=['setuptools_scm'], + packages=find_namespace_packages(include=["climada*"]), + setup_requires=["setuptools_scm"], include_package_data=True, )