diff --git a/CHANGELOG.md b/CHANGELOG.md index cd08fe244..ad5c01746 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,8 @@ Code freeze date: YYYY-MM-DD ### Added +- `climada.util.interpolation` module for inter- and extrapolation util functions used in local exceedance intensity and return period functions [#930](https://github.com/CLIMADA-project/climada_python/pull/930) + ### Changed - In `climada.util.plot.geo_im_from_array`, NaNs are plotted in gray while cells with no centroid are not plotted [#929](https://github.com/CLIMADA-project/climada_python/pull/929) @@ -55,6 +57,19 @@ Updated: - GitHub actions workflow for CLIMADA Petals compatibility tests [#855](https://github.com/CLIMADA-project/climada_python/pull/855) - `climada.util.calibrate` module for calibrating impact functions [#692](https://github.com/CLIMADA-project/climada_python/pull/692) +- Method `Hazard.check_matrices` for bringing the stored CSR matrices into "canonical format" [#893](https://github.com/CLIMADA-project/climada_python/pull/893) +- Generic s-shaped impact function via `ImpactFunc.from_poly_s_shape` [#878](https://github.com/CLIMADA-project/climada_python/pull/878) +- climada.hazard.centroids.centr.Centroids.get_area_pixel +- climada.hazard.centroids.centr.Centroids.get_dist_coast +- climada.hazard.centroids.centr.Centroids.get_elevation +- climada.hazard.centroids.centr.Centroids.get_meta +- climada.hazard.centroids.centr.Centroids.get_pixel_shapes +- climada.hazard.centroids.centr.Centroids.to_crs +- climada.hazard.centroids.centr.Centroids.to_default_crs +- climada.hazard.centroids.centr.Centroids.write_csv +- climada.hazard.centroids.centr.Centroids.write_excel +- climada.hazard.local_return_period [#898](https://github.com/CLIMADA-project/climada_python/pull/898) +- climada.util.plot.subplots_from_gdf [#898](https://github.com/CLIMADA-project/climada_python/pull/898) ### Changed @@ -79,22 +94,6 @@ CLIMADA tutorials. [#872](https://github.com/CLIMADA-project/climada_python/pull - Fix broken links in `CONTRIBUTING.md` [#900](https://github.com/CLIMADA-project/climada_python/pull/900) - When writing `TCTracks` to NetCDF, only apply compression to `float` or `int` data types. This fixes a downstream issue, see [climada_petals#135](https://github.com/CLIMADA-project/climada_petals/issues/135) [#911](https://github.com/CLIMADA-project/climada_python/pull/911) -### Added - -- Method `Hazard.check_matrices` for bringing the stored CSR matrices into "canonical format" [#893](https://github.com/CLIMADA-project/climada_python/pull/893) -- Generic s-shaped impact function via `ImpactFunc.from_poly_s_shape` [#878](https://github.com/CLIMADA-project/climada_python/pull/878) -- climada.hazard.centroids.centr.Centroids.get_area_pixel -- climada.hazard.centroids.centr.Centroids.get_dist_coast -- climada.hazard.centroids.centr.Centroids.get_elevation -- climada.hazard.centroids.centr.Centroids.get_meta -- climada.hazard.centroids.centr.Centroids.get_pixel_shapes -- climada.hazard.centroids.centr.Centroids.to_crs -- climada.hazard.centroids.centr.Centroids.to_default_crs -- climada.hazard.centroids.centr.Centroids.write_csv -- climada.hazard.centroids.centr.Centroids.write_excel -- climada.hazard.local_return_period [#898](https://github.com/CLIMADA-project/climada_python/pull/898) -- climada.util.plot.subplots_from_gdf [#898](https://github.com/CLIMADA-project/climada_python/pull/898) - ### Deprecated - climada.hazard.centroids.centr.Centroids.from_lat_lon diff --git a/climada/util/interpolation.py b/climada/util/interpolation.py new file mode 100644 index 000000000..c2e514797 --- /dev/null +++ b/climada/util/interpolation.py @@ -0,0 +1,254 @@ +""" +This file is part of CLIMADA. + +Copyright (C) 2017 ETH Zurich, CLIMADA contributors listed in AUTHORS. + +CLIMADA is free software: you can redistribute it and/or modify it under the +terms of the GNU General Public License as published by the Free +Software Foundation, version 3. + +CLIMADA is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A +PARTICULAR PURPOSE. See the GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with CLIMADA. If not, see . + +--- + +Define interpolation and extrapolation functions for calculating (local) exceedance frequencies and return periods +""" + + +import logging + +import numpy as np +from scipy import interpolate + +from climada.util.value_representation import sig_dig_list + +LOGGER = logging.getLogger(__name__) + +def interpolate_ev( + x_test, + x_train, + y_train, + logx = False, + logy = False, + x_threshold = None, + y_threshold = None, + extrapolation = False, + y_asymptotic = np.nan + ): + """ + Util function to interpolate (and extrapolate) training data (x_train, y_train) + to new points x_test with several options (log scale, thresholds) + + Parameters: + ------- + x_test : array_like + 1-D array of x-values for which training data should be interpolated + x_train : array_like + 1-D array of x-values of training data + y_train : array_like + 1-D array of y-values of training data + logx : bool, optional + If set to True, x_values are converted to log scale. Defaults to False. + logy : bool, optional + If set to True, y_values are converted to log scale. Defaults to False. + x_threshold : float, optional + Lower threshold to filter x_train. Defaults to None. + y_threshold : float, optional + Lower threshold to filter y_train. Defaults to None. + extrapolation : bool, optional + If set to True, values will be extrapolated. If set to False, x_test values + smaller than x_train will be assigned y_train[0] (x_train must be sorted in + ascending order), and x_test values larger than x_train will be assigned + y_asymptotic. Defaults to False + y_asymptotic : float, optional + Return value and if extrapolation is True or x_train.size < 2, for x_test + values larger than x_train. Defaults to np.nan. + + Returns + ------- + np.array + interpolated values y_test for the test points x_test + """ + + # preprocess interpolation data + x_test, x_train, y_train = _preprocess_interpolation_data( + x_test, x_train, y_train, logx, logy, x_threshold, y_threshold + ) + + # handle case of small training data sizes + if x_train.size < 2: + LOGGER.warning('Data is being extrapolated.') + return _interpolate_small_input(x_test, x_train, y_train, logy, y_asymptotic) + + # calculate fill values + if extrapolation: + fill_value = 'extrapolate' + if np.min(x_test) < np.min(x_train) or np.max(x_test) > np.max(x_train): + LOGGER.warning('Data is being extrapolated.') + else: + if not all(sorted(x_train) == x_train): + raise ValueError('x_train array must be sorted in ascending order.') + fill_value = (y_train[0], np.log10(y_asymptotic) if logy else y_asymptotic) + + interpolation = interpolate.interp1d( + x_train, y_train, fill_value=fill_value, bounds_error=False) + y_test = interpolation(x_test) + + # adapt output scale + if logy: + y_test = np.power(10., y_test) + return y_test + +def stepfunction_ev( + x_test, + x_train, + y_train, + x_threshold = None, + y_threshold = None, + y_asymptotic = np.nan + ): + """ + Util function to interpolate and extrapolate training data (x_train, y_train) + to new points x_test using a step function + + Parameters: + ------- + x_test : array_like + 1-D array of x-values for which training data should be interpolated + x_train : array_like + 1-D array of x-values of training data + y_train : array_like + 1-D array of y-values of training data + x_threshold : float, optional + Lower threshold to filter x_train. Defaults to None. + y_threshold : float, optional + Lower threshold to filter y_train. Defaults to None. + y_asymptotic : float, optional + Return value if x_test > x_train. Defaults to np.nan. + + Returns + ------- + np.array + interpolated values y_test for the test points x_test + """ + + # preprocess interpolation data + x_test, x_train, y_train = _preprocess_interpolation_data( + x_test, x_train, y_train, None, None, x_threshold, y_threshold + ) + + # handle case of small training data sizes + if x_train.size < 2: + return _interpolate_small_input(x_test, x_train, y_train, None, y_asymptotic) + + # find indices of x_test if sorted into x_train + if not all(sorted(x_train) == x_train): + raise ValueError('Input array x_train must be sorted in ascending order.') + indx = np.searchsorted(x_train, x_test) + y_test = y_train[indx.clip(max = len(x_train) - 1)] + y_test[indx == len(x_train)] = y_asymptotic + + return y_test + +def _preprocess_interpolation_data( + x_test, + x_train, + y_train, + logx, + logy, + x_threshold, + y_threshold + ): + """ + helper function to preprocess interpolation training and test data by filtering data below + thresholds and converting to log scale if required + """ + + if x_train.shape != y_train.shape: + raise ValueError(f'Incompatible shapes of input data, x_train {x_train.shape} ' + f'and y_train {y_train.shape}. Should be the same') + + # transform input to float arrays + x_test, x_train, y_train = (np.array(x_test).astype(float), + np.array(x_train).astype(float), + np.array(y_train).astype(float)) + + # cut x and y above threshold + if x_threshold or x_threshold==0: + x_th = np.asarray(x_train > x_threshold).squeeze() + x_train = x_train[x_th] + y_train = y_train[x_th] + + if y_threshold or y_threshold==0: + y_th = np.asarray(y_train > y_threshold).squeeze() + x_train = x_train[y_th] + y_train = y_train[y_th] + + # convert to log scale + if logx: + x_train, x_test = np.log10(x_train), np.log10(x_test) + if logy: + y_train = np.log10(y_train) + + return (x_test, x_train, y_train) + +def _interpolate_small_input(x_test, x_train, y_train, logy, y_asymptotic): + """ + helper function to handle if interpolation data is small (empty or one point) + """ + # return y_asymptotic if x_train and y_train empty + if x_train.size == 0: + return np.full_like(x_test, y_asymptotic) + + # reconvert logarithmic y_train to original y_train + if logy: + y_train = np.power(10., y_train) + + # if only one (x_train, y_train), return stepfunction with + # y_train if x_test < x_train and y_asymtotic if x_test > x_train + y_test = np.full_like(x_test, y_train[0]) + y_test[np.squeeze(x_test) > np.squeeze(x_train)] = y_asymptotic + return y_test + +def group_frequency(frequency, value, n_sig_dig=2): + """ + Util function to aggregate (add) frequencies for equal values + + Parameters: + ------ + frequency : array_like + Frequency array + value : array_like + Value array in ascending order + n_sig_dig : int + number of significant digits for value when grouping frequency. + Defaults to 2. + + Returns: + ------ + tuple + (frequency array after aggregation, + unique value array in ascending order) + """ + frequency, value = np.array(frequency), np.array(value) + if frequency.size == 0 and value.size == 0: + return ([], []) + + if len(value) != len(np.unique(sig_dig_list(value, n_sig_dig=n_sig_dig))): + #check ordering of value + if not all(sorted(value) == value): + raise ValueError('Value array must be sorted in ascending order.') + # add frequency for equal value + value, start_indices = np.unique( + sig_dig_list(value, n_sig_dig=n_sig_dig), return_index=True) + start_indices = np.insert(start_indices, len(value), len(frequency)) + frequency = np.array([ + sum(frequency[start_indices[i]:start_indices[i+1]]) + for i in range(len(value)) + ]) + return frequency, value diff --git a/climada/util/test/test_interpolation.py b/climada/util/test/test_interpolation.py new file mode 100644 index 000000000..1c780fcce --- /dev/null +++ b/climada/util/test/test_interpolation.py @@ -0,0 +1,183 @@ +""" +This file is part of CLIMADA. + +Copyright (C) 2017 ETH Zurich, CLIMADA contributors listed in AUTHORS. + +CLIMADA is free software: you can redistribute it and/or modify it under the +terms of the GNU General Public License as published by the Free +Software Foundation, version 3. + +CLIMADA is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A +PARTICULAR PURPOSE. See the GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with CLIMADA. If not, see . + +--- + +Test of fit_methods module +""" + +import unittest +import numpy as np + +from climada.util.interpolation import interpolate_ev, stepfunction_ev, group_frequency + + +class TestFitMethods(unittest.TestCase): + """Test different fit configurations""" + + def test_interpolate_ev_linear_interp(self): + """Test linear interpolation""" + x_train = np.array([1., 3., 5.]) + y_train = np.array([8., 4., 2.]) + x_test = np.array([0., 3., 4., 6.]) + np.testing.assert_allclose( + interpolate_ev(x_test, x_train, y_train), + np.array([8., 4., 3., np.nan]) + ) + np.testing.assert_allclose( + interpolate_ev(x_test, x_train, y_train, y_asymptotic = 0), + np.array([8., 4., 3., 0.]) + ) + + def test_interpolate_ev_threshold_parameters(self): + """Test input threshold parameters""" + x_train = np.array([0., 3., 6.]) + y_train = np.array([4., 1., 4.]) + x_test = np.array([-1., 3., 4.]) + np.testing.assert_allclose( + interpolate_ev(x_test, x_train, y_train), + np.array([4., 1., 2.]) + ) + np.testing.assert_allclose( + interpolate_ev(x_test, x_train, y_train, x_threshold=1.), + np.array([1., 1., 2.]) + ) + np.testing.assert_allclose( + interpolate_ev(x_test, x_train, y_train, y_threshold=2.), + np.array([4., 4., 4.]) + ) + + def test_interpolate_ev_scale_parameters(self): + """Test log scale parameters""" + x_train = np.array([1e1, 1e3]) + y_train = np.array([1., 3.]) + x_test = np.array([1e0, 1e2]) + np.testing.assert_allclose( + interpolate_ev(x_test, x_train, y_train, logx=True, extrapolation=True), + np.array([0., 2.]) + ) + np.testing.assert_allclose( + interpolate_ev(x_test, x_train, y_train, logx=True), + np.array([1., 2.]) + ) + x_train = np.array([1., 3.]) + y_train = np.array([1e1, 1e3]) + x_test = np.array([0., 2.]) + np.testing.assert_allclose( + interpolate_ev(x_test, x_train, y_train, logy=True, extrapolation=True), + np.array([1e0, 1e2]) + ) + x_train = np.array([1e1, 1e3]) + y_train = np.array([1e1, 1e5]) + x_test = np.array([1e0, 1e2]) + np.testing.assert_allclose( + interpolate_ev(x_test, x_train, y_train, logx=True, logy=True, extrapolation=True), + np.array([1e-1, 1e3]) + ) + + def test_interpolate_ev_degenerate_input(self): + """Test interp to constant zeros""" + x_train = np.array([1., 3., 5.]) + x_test = np.array([0., 2., 4.]) + y_train = np.zeros(3) + np.testing.assert_allclose( + interpolate_ev(x_test, x_train, y_train), + np.array([0., 0., 0.]) + ) + + def test_interpolate_ev_small_input(self): + """Test small input""" + x_train = np.array([1.]) + y_train = np.array([2.]) + x_test = np.array([0., 1., 2.]) + np.testing.assert_allclose( + interpolate_ev(x_test, x_train, y_train), + np.array([2., 2., np.nan]) + ) + np.testing.assert_allclose( + interpolate_ev(x_test, x_train, y_train, y_asymptotic=0), + np.array([2., 2., 0.]) + ) + x_train = np.array([]) + y_train = np.array([]) + x_test = np.array([0., 1., 2.]) + np.testing.assert_allclose( + interpolate_ev(x_test, x_train, y_train), + np.full(3, np.nan) + ) + np.testing.assert_allclose( + interpolate_ev(x_test, x_train, y_train, y_asymptotic=0), + np.zeros(3) + ) + + def test_stepfunction_ev(self): + """Test stepfunction method""" + x_train = np.array([1., 3., 5.]) + y_train = np.array([8., 4., 2.]) + x_test = np.array([0., 3., 4., 6.]) + np.testing.assert_allclose( + stepfunction_ev(x_test, x_train, y_train), + np.array([8., 4., 2., np.nan]) + ) + np.testing.assert_allclose( + stepfunction_ev(x_test, x_train, y_train, y_asymptotic=0.), + np.array([8., 4., 2., 0.]) + ) + + def test_stepfunction_ev_small_input(self): + """Test small input""" + x_train = np.array([1.]) + y_train = np.array([2.]) + x_test = np.array([0., 1., 2.]) + np.testing.assert_allclose( + stepfunction_ev(x_test, x_train, y_train), + np.array([2., 2., np.nan]) + ) + np.testing.assert_allclose( + stepfunction_ev(x_test, x_train, y_train, y_asymptotic=0), + np.array([2., 2., 0.]) + ) + x_train = np.array([]) + y_train = np.array([]) + x_test = np.array([0., 1., 2.]) + np.testing.assert_allclose( + stepfunction_ev(x_test, x_train, y_train), + np.full(3, np.nan) + ) + np.testing.assert_allclose( + stepfunction_ev(x_test, x_train, y_train, y_asymptotic=0), + np.zeros(3) + ) + + def test_frequency_group(self): + """Test frequency grouping method""" + frequency = np.ones(6) + intensity = np.array([1., 1., 1., 2., 3., 3]) + np.testing.assert_allclose( + group_frequency(frequency, intensity), + ([3, 1, 2], [1, 2, 3]) + ) + np.testing.assert_allclose( + group_frequency([], []), + ([], []) + ) + with self.assertRaises(ValueError): + group_frequency(frequency, intensity[::-1]) + +# Execute Tests +if __name__ == "__main__": + TESTS = unittest.TestLoader().loadTestsFromTestCase(TestFitMethods) + unittest.TextTestRunner(verbosity=2).run(TESTS) diff --git a/doc/tutorial/climada_engine_Forecast.ipynb b/doc/tutorial/climada_engine_Forecast.ipynb index 2718ca852..74cbd00f8 100644 --- a/doc/tutorial/climada_engine_Forecast.ipynb +++ b/doc/tutorial/climada_engine_Forecast.ipynb @@ -255,8 +255,8 @@ "\n", "### generate exposure\n", "# find out which hazard coord to consider\n", - "CHE_borders = u_plot._get_borders(np.stack([exposure.gdf.latitude.values,\n", - " exposure.gdf.longitude.values],\n", + "CHE_borders = u_plot._get_borders(np.stack([exposure.gdf['latitude'].values,\n", + " exposure.gdf['longitude'].values],\n", " axis=1)\n", " )\n", "centroid_selection = np.logical_and(np.logical_and(hazard.centroids.lat >= CHE_borders[2],\n", @@ -390,4 +390,4 @@ }, "nbformat": 4, "nbformat_minor": 4 -} +} \ No newline at end of file diff --git a/doc/tutorial/climada_engine_Impact.ipynb b/doc/tutorial/climada_engine_Impact.ipynb index d598d044f..bbe55afd6 100644 --- a/doc/tutorial/climada_engine_Impact.ipynb +++ b/doc/tutorial/climada_engine_Impact.ipynb @@ -68,7 +68,7 @@ "| event_id |list(int)| id (>0) of each hazard event (Hazard.event_id)|\n", "| event_name |(list(str))| name of each event (Hazard.event_name)|\n", "| date |np.array| date of events (Hazard.date)|\n", - "| coord_exp |np.array| exposures coordinates [lat, lon] (in degrees) (Exposure.gdf.latidues, Exposure.gdf.longitude)|\n", + "| coord_exp |np.array| exposures coordinates [lat, lon] (in degrees) (Exposure.gdf['latitudes'], Exposure.gdf['longitude'])|\n", "| frequency |np.array| frequency of events (Hazard.frequency)|\n", "| frequency_unit |str| unit of event frequency, by default '1/year', i.e., annual (Hazard.frequency_unit)|\n", "| unit |str| value unit used (Exposure.value_unit)|\n", @@ -1468,7 +1468,7 @@ "\n", "# Set Hazard in Exposures points\n", "# set centroids from exposures coordinates\n", - "centr_pnt = Centroids.from_lat_lon(exp_pnt.gdf.latitude.values, exp_pnt.gdf.longitude.values, exp_pnt.crs)\n", + "centr_pnt = Centroids.from_lat_lon(exp_pnt.gdf['latitude'].values, exp_pnt.gdf['longitude'].values, exp_pnt.crs)\n", "# compute Hazard in that centroids\n", "tr_pnt = TCTracks.from_ibtracs_netcdf(storm_id='2007314N10093')\n", "tc_pnt = TropCyclone.from_tracks(tr_pnt, centroids=centr_pnt)\n", @@ -1492,7 +1492,7 @@ "# Compute Impact\n", "imp_pnt = ImpactCalc(exp_pnt, impf_pnt, tc_pnt).impact()\n", "# nearest neighbor of exposures to centroids gives identity\n", - "print('Nearest neighbor hazard.centroids indexes for each exposure:', exp_pnt.gdf.centr_TC.values)\n", + "print('Nearest neighbor hazard.centroids indexes for each exposure:', exp_pnt.gdf['centr_TC'].values)\n", "imp_pnt.plot_scatter_eai_exposure(ignore_zero=False, buffer=0.05);" ] }, @@ -1716,7 +1716,7 @@ "# Compute impact\n", "imp_ras = ImpactCalc(exp_ras, impf_ras, haz_ras).impact(save_mat=False)\n", "# nearest neighbor of exposures to centroids is not identity because litpop does not contain data outside the country polygon\n", - "print('\\n Nearest neighbor hazard.centroids indexes for each exposure:', exp_ras.gdf.centr_FL.values)\n", + "print('\\n Nearest neighbor hazard.centroids indexes for each exposure:', exp_ras.gdf['centr_FL'].values)\n", "imp_ras.plot_raster_eai_exposure();" ] }, @@ -1967,7 +1967,7 @@ "\n", "# compute sequence of hazards using TropCyclone video_intensity method\n", "exp_sea = add_sea(exp_video, (100, 5))\n", - "centr_video = Centroids.from_lat_lon(exp_sea.gdf.latitude.values, exp_sea.gdf.longitude.values)\n", + "centr_video = Centroids.from_lat_lon(exp_sea.gdf['latitude'].values, exp_sea.gdf['longitude'].values)\n", "centr_video.check()\n", "\n", "track_name = '2017242N16333'\n", diff --git a/doc/tutorial/climada_engine_unsequa.ipynb b/doc/tutorial/climada_engine_unsequa.ipynb index 291641612..08558632e 100644 --- a/doc/tutorial/climada_engine_unsequa.ipynb +++ b/doc/tutorial/climada_engine_unsequa.ipynb @@ -177,7 +177,7 @@ "# Here x_exp is the input uncertainty parameter and exp_func the inputvar.func.\n", "def exp_func(x_exp, exp_base=exp_base):\n", " exp = exp_base.copy()\n", - " exp.gdf.value *= x_exp\n", + " exp.gdf['value'] *= x_exp\n", " return exp" ] }, @@ -989,7 +989,7 @@ "exp_base.assign_centroids(haz)\n", "def exp_base_func(x_exp, exp_base):\n", " exp = exp_base.copy()\n", - " exp.gdf.value *= x_exp\n", + " exp.gdf['value'] *= x_exp\n", " return exp\n", "from functools import partial\n", "exp_func = partial(exp_base_func, exp_base=exp_base)" @@ -2462,7 +2462,7 @@ "exp_base.assign_centroids(haz)\n", "def exp_base_func(x_exp, exp_base):\n", " exp = exp_base.copy()\n", - " exp.gdf.value *= x_exp\n", + " exp.gdf['value'] *= x_exp\n", " return exp\n", "from functools import partial\n", "exp_func = partial(exp_base_func, exp_base=exp_base)" @@ -2797,7 +2797,7 @@ " from climada.util.constants import ENT_DEMO_TODAY\n", " entity = Entity.from_excel(ENT_DEMO_TODAY)\n", " entity.exposures.ref_year = 2018\n", - " entity.exposures.gdf.value *= x_ent\n", + " entity.exposures.gdf['value'] *= x_ent\n", " return entity\n", "\n", "# Entity in the future has a +- 10% uncertainty in the cost of all the adapatation measures\n", @@ -5267,7 +5267,7 @@ "\n", "def exp_func(cnt, x_exp, exp_list=exp_list):\n", " exp = exp_list[int(cnt)].copy()\n", - " exp.gdf.value *= x_exp\n", + " exp.gdf['value'] *= x_exp\n", " return exp\n", "\n", "exp_distr = {\"x_exp\": sp.stats.uniform(0.9, 0.2),\n", @@ -5519,15 +5519,15 @@ " global exp_base\n", " if 'exp_base' in globals():\n", " if isinstance(exp_base, Exposures):\n", - " if exp_base.gdf.filename != str(filename):\n", + " if exp_base.gdf['filename'] != str(filename):\n", " exp_base = Exposures.from_hdf5(filename)\n", - " exp_base.gdf.filename = str(filename)\n", + " exp_base.gdf['filename'] = str(filename)\n", " else:\n", " exp_base = Exposures.from_hdf5(filename)\n", - " exp_base.gdf.filename = str(filename)\n", + " exp_base.gdf['filename'] = str(filename)\n", "\n", " exp = exp_base.copy()\n", - " exp.gdf.value *= x_exp\n", + " exp.gdf['value'] *= x_exp\n", " return exp\n", "\n", "exp_distr = {\"x_exp\": sp.stats.uniform(0.9, 0.2),\n", @@ -5727,4 +5727,4 @@ }, "nbformat": 4, "nbformat_minor": 4 -} +} \ No newline at end of file diff --git a/doc/tutorial/climada_entity_Exposures.ipynb b/doc/tutorial/climada_entity_Exposures.ipynb index 3549b6eba..bb0328994 100644 --- a/doc/tutorial/climada_entity_Exposures.ipynb +++ b/doc/tutorial/climada_entity_Exposures.ipynb @@ -1639,7 +1639,7 @@ "# Example 3: plot_raster method\n", "from climada.util.plot import add_cntry_names # use climada's plotting utilities\n", "ax = exp.plot_raster(); # plot with same resolution as data\n", - "add_cntry_names(ax, [exp.gdf.longitude.min(), exp.gdf.longitude.max(), exp.gdf.latitude.min(), exp.gdf.latitude.max()])\n", + "add_cntry_names(ax, [exp.gdf['longitude'].min(), exp.gdf['longitude'].max(), exp.gdf['latitude'].min(), exp.gdf['latitude'].max()])\n", "\n", "# use keyword argument save_tiff='filepath.tiff' to save the corresponding raster in tiff format\n", "# use keyword argument raster_res='desired number' to change resolution of the raster." diff --git a/doc/tutorial/climada_entity_LitPop.ipynb b/doc/tutorial/climada_entity_LitPop.ipynb index a6f91b83a..8625fe394 100644 --- a/doc/tutorial/climada_entity_LitPop.ipynb +++ b/doc/tutorial/climada_entity_LitPop.ipynb @@ -161,9 +161,9 @@ " raise err\n", "exp.plot_scatter();\n", "\n", - "# Note that `exp.gdf.region_id` is a number identifying each country:\n", + "# Note that `exp.gdf['region_id']` is a number identifying each country:\n", "print('\\n Region IDs (`region_id`) in this exposure:')\n", - "print(exp.gdf.region_id.unique())" + "print(exp.gdf['region_id'].unique())" ] }, { diff --git a/doc/tutorial/climada_hazard_TropCyclone.ipynb b/doc/tutorial/climada_hazard_TropCyclone.ipynb index 9c0b1d47f..b82b38184 100644 --- a/doc/tutorial/climada_hazard_TropCyclone.ipynb +++ b/doc/tutorial/climada_hazard_TropCyclone.ipynb @@ -1743,14 +1743,14 @@ "# 1. Which is the time frequency of the data?\n", "# The values of a DataArray are numpy.arrays.\n", "# The nummpy.ediff1d computes the different between elements in an array\n", - "diff_time_ns = np.ediff1d(tc_syn.time)\n", + "diff_time_ns = np.ediff1d(tc_syn[\"time\"])\n", "diff_time_h = diff_time_ns.astype(int)/1000/1000/1000/60/60\n", "print('Mean time frequency in hours:', diff_time_h.mean())\n", "print('Std time frequency in hours:', diff_time_h.std())\n", "print()\n", "\n", "# 2. Compute the maximum sustained wind for each day.\n", - "print('Daily max sustained wind:', tc_syn.max_sustained_wind.groupby('time.day').max())" + "print('Daily max sustained wind:', tc_syn[\"max_sustained_wind\"].groupby('time.day').max())" ] }, { diff --git a/doc/tutorial/climada_util_calibrate.ipynb b/doc/tutorial/climada_util_calibrate.ipynb index 202296c73..0efefc5a2 100644 --- a/doc/tutorial/climada_util_calibrate.ipynb +++ b/doc/tutorial/climada_util_calibrate.ipynb @@ -1039,7 +1039,7 @@ "from climada.util.constants import SYSTEM_DIR\n", "\n", "emdat = pd.read_csv(SYSTEM_DIR / \"tc_impf_cal_v01_EDR.csv\")\n", - "emdat_subset = emdat[(emdat.cal_region2 == \"NA1\") & (emdat.year >= 2010)]\n", + "emdat_subset = emdat[(emdat[\"cal_region2\"] == \"NA1\") & (emdat[\"year\"] >= 2010)]\n", "emdat_subset" ] }, @@ -1689,7 +1689,7 @@ "# tracks.plot()\n", "\n", "# # Calculate windfield for the tracks\n", - "# centroids = Centroids.from_lat_lon(exposure.gdf.latitude, exposure.gdf.longitude)\n", + "# centroids = Centroids.from_lat_lon(exposure.gdf['latitude'], exposure.gdf['longitude'])\n", "# hazard = TropCyclone.from_tracks(tracks, centroids)" ] },