From 266d509b616ea8a09ce5b8e37ebd0375f23d07b2 Mon Sep 17 00:00:00 2001 From: Romain Hugonnet Date: Thu, 29 Feb 2024 16:26:24 -0900 Subject: [PATCH 01/54] Fuse pts function and add raster-point logic --- examples/basic/plot_icp_coregistration.py | 6 +- tests/test_coreg/test_affine.py | 4 +- tests/test_coreg/test_base.py | 6 +- tests/test_coreg/test_biascorr.py | 22 +- xdem/coreg/affine.py | 22 +- xdem/coreg/base.py | 635 ++++++++++------------ xdem/coreg/biascorr.py | 26 +- 7 files changed, 340 insertions(+), 381 deletions(-) diff --git a/examples/basic/plot_icp_coregistration.py b/examples/basic/plot_icp_coregistration.py index 2fbf28e6..a3496101 100644 --- a/examples/basic/plot_icp_coregistration.py +++ b/examples/basic/plot_icp_coregistration.py @@ -73,11 +73,11 @@ for i, (approach, name) in enumerate(approaches): approach.fit( - reference_dem=dem, - dem_to_be_aligned=rotated_dem, + reference_elev=dem, + to_be_aligned_elev=rotated_dem, ) - corrected_dem = approach.apply(dem=rotated_dem) + corrected_dem = approach.apply(elev=rotated_dem) diff = dem - corrected_dem diff --git a/tests/test_coreg/test_affine.py b/tests/test_coreg/test_affine.py index 251d68c8..5619c495 100644 --- a/tests/test_coreg/test_affine.py +++ b/tests/test_coreg/test_affine.py @@ -97,8 +97,8 @@ def test_vertical_shift(self) -> None: assert vshiftcorr is not vshiftcorr2 # Fit the corrected DEM to see if the vertical shift will be close to or at zero vshiftcorr2.fit( - reference_dem=self.ref.data, - dem_to_be_aligned=tba_unshifted, + reference_elev=self.ref.data, + to_be_aligned_elev=tba_unshifted, transform=self.ref.transform, crs=self.ref.crs, inlier_mask=self.inlier_mask, diff --git a/tests/test_coreg/test_base.py b/tests/test_coreg/test_base.py index aab04f54..c7c9f469 100644 --- a/tests/test_coreg/test_base.py +++ b/tests/test_coreg/test_base.py @@ -267,10 +267,10 @@ def test_coreg_raster_and_ndarray_args(self) -> None: vshiftcorr_a = vshiftcorr_r.copy() # Fit the data - vshiftcorr_r.fit(reference_dem=dem1, dem_to_be_aligned=dem2) + vshiftcorr_r.fit(reference_elev=dem1, to_be_aligned_elev=dem2) vshiftcorr_a.fit( - reference_dem=dem1.data, - dem_to_be_aligned=dem2.reproject(dem1, silent=True).data, + reference_elev=dem1.data, + to_be_aligned_elev=dem2.reproject(dem1, silent=True).data, transform=dem1.transform, crs=dem1.crs, ) diff --git a/tests/test_coreg/test_biascorr.py b/tests/test_coreg/test_biascorr.py index 04d8f943..61c2a134 100644 --- a/tests/test_coreg/test_biascorr.py +++ b/tests/test_coreg/test_biascorr.py @@ -167,7 +167,7 @@ def test_biascorr__fit_1d(self, fit_func, fit_optimizer, capsys) -> None: assert bcorr._meta["bias_var_names"] == ["elevation"] # Apply the correction - bcorr.apply(dem=self.tba, bias_vars=bias_vars_dict) + bcorr.apply(elev=self.tba, bias_vars=bias_vars_dict) @pytest.mark.parametrize( "fit_func", (polynomial_2d, lambda x, a, b, c, d: a * x[0] + b * x[1] + c**d) @@ -197,7 +197,7 @@ def test_biascorr__fit_2d(self, fit_func, fit_optimizer) -> None: assert bcorr._meta["bias_var_names"] == ["elevation", "slope"] # Apply the correction - bcorr.apply(dem=self.tba, bias_vars=bias_vars_dict) + bcorr.apply(elev=self.tba, bias_vars=bias_vars_dict) @pytest.mark.parametrize("bin_sizes", (10, {"elevation": 20}, {"elevation": (0, 500, 1000)})) # type: ignore @pytest.mark.parametrize("bin_statistic", [np.median, np.nanmean]) # type: ignore @@ -219,7 +219,7 @@ def test_biascorr__bin_1d(self, bin_sizes, bin_statistic) -> None: assert bcorr._meta["bias_var_names"] == ["elevation"] # Apply the correction - bcorr.apply(dem=self.tba, bias_vars=bias_vars_dict) + bcorr.apply(elev=self.tba, bias_vars=bias_vars_dict) @pytest.mark.parametrize("bin_sizes", (10, {"elevation": (0, 500, 1000), "slope": (0, 20, 40)})) # type: ignore @pytest.mark.parametrize("bin_statistic", [np.median, np.nanmean]) # type: ignore @@ -241,7 +241,7 @@ def test_biascorr__bin_2d(self, bin_sizes, bin_statistic) -> None: assert bcorr._meta["bias_var_names"] == ["elevation", "slope"] # Apply the correction - bcorr.apply(dem=self.tba, bias_vars=bias_vars_dict) + bcorr.apply(elev=self.tba, bias_vars=bias_vars_dict) @pytest.mark.parametrize( "fit_func", ("norder_polynomial", "nfreq_sumsin", lambda x, a, b: x[0] * a + b) @@ -283,7 +283,7 @@ def test_biascorr__bin_and_fit_1d(self, fit_func, fit_optimizer, bin_sizes, bin_ assert bcorr._meta["bias_var_names"] == ["elevation"] # Apply the correction - bcorr.apply(dem=self.tba, bias_vars=bias_vars_dict) + bcorr.apply(elev=self.tba, bias_vars=bias_vars_dict) @pytest.mark.parametrize( "fit_func", (polynomial_2d, lambda x, a, b, c, d: a * x[0] + b * x[1] + c**d) @@ -321,7 +321,7 @@ def test_biascorr__bin_and_fit_2d(self, fit_func, fit_optimizer, bin_sizes, bin_ assert bcorr._meta["bias_var_names"] == ["elevation", "slope"] # Apply the correction - bcorr.apply(dem=self.tba, bias_vars=bias_vars_dict) + bcorr.apply(elev=self.tba, bias_vars=bias_vars_dict) def test_biascorr1d(self) -> None: """ @@ -444,7 +444,7 @@ def test_directionalbias__synthetic(self, angle, nb_freq) -> None: plt.show() dirbias = biascorr.DirectionalBias(angle=angle, fit_or_bin="bin", bin_sizes=10000) - dirbias.fit(reference_dem=self.ref, dem_to_be_aligned=bias_dem, subsample=10000, random_state=42) + dirbias.fit(reference_elev=self.ref, to_be_aligned_elev=bias_dem, subsample=10000, random_state=42) xdem.spatialstats.plot_1d_binning( df=dirbias._meta["bin_dataframe"], var_name="angle", statistic_name="nanmedian", min_count=0 ) @@ -464,8 +464,8 @@ def test_directionalbias__synthetic(self, angle, nb_freq) -> None: (0, 2 * np.pi), ] dirbias.fit( - reference_dem=self.ref, - dem_to_be_aligned=bias_dem, + reference_elev=self.ref, + to_be_aligned_elev=bias_dem, subsample=10000, random_state=42, bounds_amp_wave_phase=bounds, @@ -516,7 +516,7 @@ def test_deramp__synthetic(self, order: int) -> None: # Fit deramp = biascorr.Deramp(poly_order=order) - deramp.fit(reference_dem=self.ref, dem_to_be_aligned=bias_dem, subsample=10000, random_state=42) + deramp.fit(reference_elev=self.ref, to_be_aligned_elev=bias_dem, subsample=10000, random_state=42) # Check high-order parameters are the same within 10% fit_params = deramp._meta["fit_params"] @@ -569,7 +569,7 @@ def test_terrainbias__synthetic(self) -> None: ) # We don't want to subsample here, otherwise it might be very hard to derive maximum curvature... # TODO: Add the option to get terrain attribute before subsampling in the fit subclassing logic? - tb.fit(reference_dem=self.ref, dem_to_be_aligned=bias_dem, random_state=42) + tb.fit(reference_elev=self.ref, to_be_aligned_elev=bias_dem, random_state=42) # Check high-order parameters are the same within 10% bin_df = tb._meta["bin_dataframe"] diff --git a/xdem/coreg/affine.py b/xdem/coreg/affine.py index 0061646a..d85e7dee 100644 --- a/xdem/coreg/affine.py +++ b/xdem/coreg/affine.py @@ -298,7 +298,7 @@ def _to_matrix_func(self) -> NDArrayf: raise NotImplementedError("This should be implemented by subclassing") - def _fit_func( + def _fit_rst_rst( self, ref_dem: NDArrayf, tba_dem: NDArrayf, @@ -350,7 +350,7 @@ def __init__( super().__init__(meta={"vshift_func": vshift_func}, subsample=subsample) - def _fit_func( + def _fit_rst_rst( self, ref_dem: NDArrayf, tba_dem: NDArrayf, @@ -456,7 +456,7 @@ def __init__( super().__init__(subsample=subsample) - def _fit_func( + def _fit_rst_rst( self, ref_dem: NDArrayf, tba_dem: NDArrayf, @@ -501,9 +501,9 @@ def _fit_func( columns=["E", "N", "z", "nx", "ny", "nz"], ) - self._fit_pts_func(ref_dem=ref_pts, tba_dem=tba_dem, transform=transform, verbose=verbose, z_name="z") + self._fit_rst_pts(ref_dem=ref_pts, tba_dem=tba_dem, transform=transform, verbose=verbose, z_name="z") - def _fit_pts_func( + def _fit_rst_pts( self, ref_dem: pd.DataFrame, tba_dem: RasterType | NDArrayf, @@ -606,7 +606,7 @@ def __init__(self, subsample: int | float = 5e5) -> None: super().__init__(subsample=subsample) - def _fit_func( + def _fit_rst_rst( self, ref_dem: NDArrayf, tba_dem: NDArrayf, @@ -692,7 +692,7 @@ def __init__(self, max_iterations: int = 10, offset_threshold: float = 0.05, sub super().__init__(subsample=subsample) - def _fit_func( + def _fit_rst_rst( self, ref_dem: NDArrayf, tba_dem: NDArrayf, @@ -830,7 +830,7 @@ def _fit_func( self._meta["vshift"] = vshift self._meta["resolution"] = resolution - def _fit_pts_func( + def _fit_rst_pts( self, ref_dem: pd.DataFrame, tba_dem: RasterType, @@ -1049,7 +1049,7 @@ def __init__( super().__init__(subsample=subsample) - def _fit_pts_func( + def _fit_rst_pts( self, ref_dem: pd.DataFrame, tba_dem: RasterType, @@ -1135,7 +1135,7 @@ def func_cost(x: tuple[float, float]) -> np.floating[Any]: self._meta["vshift"] = vshift self._meta["resolution"] = resolution - def _fit_func( + def _fit_rst_rst( self, ref_dem: NDArrayf, tba_dem: NDArrayf, @@ -1157,7 +1157,7 @@ def _fit_func( ref_dem["N"] = ref_dem.geometry.y ref_dem.rename(columns={"b1": "z"}, inplace=True) tba_dem = Raster.from_array(tba_dem, transform=transform, crs=crs, nodata=-9999.0) - self._fit_pts_func(ref_dem=ref_dem, tba_dem=tba_dem, transform=transform, **kwargs) + self._fit_rst_pts(ref_dem=ref_dem, tba_dem=tba_dem, transform=transform, **kwargs) def _to_matrix_func(self) -> NDArrayf: """Return a transformation matrix from the estimated offsets.""" diff --git a/xdem/coreg/base.py b/xdem/coreg/base.py index 0ce059c1..18ce8ad2 100644 --- a/xdem/coreg/base.py +++ b/xdem/coreg/base.py @@ -29,6 +29,7 @@ import geoutils as gu import numpy as np import pandas as pd +import geopandas as gpd import rasterio as rio import rasterio.warp # pylint: disable=unused-import import scipy @@ -295,14 +296,14 @@ def _mask_as_array(reference_raster: gu.Raster, mask: str | gu.Vector | gu.Raste return mask_array - -def _preprocess_coreg_raster_input( +def _preprocess_coreg_fit_raster_raster( reference_dem: NDArrayf | MArrayf | RasterType, dem_to_be_aligned: NDArrayf | MArrayf | RasterType, inlier_mask: NDArrayb | Mask | None = None, transform: rio.transform.Affine | None = None, crs: rio.crs.CRS | None = None, ) -> tuple[NDArrayf, NDArrayf, NDArrayb, affine.Affine, rio.crs.CRS]: + """Pre-processing and checks of fit() for two raster input.""" # Validate that both inputs are valid array-like (or Raster) types. if not all(isinstance(dem, (np.ndarray, gu.Raster)) for dem in (reference_dem, dem_to_be_aligned)): @@ -378,6 +379,124 @@ def _preprocess_coreg_raster_input( return ref_dem, tba_dem, inlier_mask, transform, crs +def _preprocess_coreg_fit_raster_point( + raster_elev: NDArrayf | MArrayf | RasterType, + point_elev: gpd.GeoDataFrame, + inlier_mask: NDArrayb | Mask | None = None, + transform: rio.transform.Affine | None = None, + crs: rio.crs.CRS | None = None,): + """Pre-processing and checks of fit for raster-point input.""" + + # TODO: Convert to point cloud once class is done + if isinstance(raster_elev, gu.Raster): + ref_dem = raster_elev.data + crs = raster_elev.crs + transform = raster_elev.transform + else: + ref_dem = raster_elev + crs = crs + transform = transform + + if transform is None: + raise ValueError("'transform' must be given if both DEMs are array-like.") + + if crs is None: + raise ValueError("'crs' must be given if both DEMs are array-like.") + + # TODO: Convert to point cloud? + # Convert geodataframe to vector + tba_dem = point_elev.to_crs(crs=crs) + + return ref_dem, tba_dem, inlier_mask, transform, crs + +def _preprocess_coreg_fit_point_point( + reference_elev: gpd.GeoDataFrame, + to_be_aligned_elev: gpd.GeoDataFrame): + """Pre-processing and checks of fit for point-point input.""" + + ref_dem = reference_elev + tba_dem = to_be_aligned_elev.to_crs(crs=reference_elev.crs) + + return ref_dem, tba_dem + +def _preprocess_coreg_fit( + reference_elev: NDArrayf | MArrayf | RasterType | gpd.GeoDataFrame, + to_be_aligned_elev: NDArrayf | MArrayf | RasterType | gpd.GeoDataFrame, + inlier_mask: NDArrayb | Mask | None = None, + transform: rio.transform.Affine | None = None, + crs: rio.crs.CRS | None = None,): + """Pre-processing and checks of fit for any input.""" + + if not all(isinstance(dem, (np.ndarray, gu.Raster, gpd.GeoDataFrame)) for dem in (reference_elev, to_be_aligned_elev)): + raise ValueError("Input elevation data should be a raster, an array or a geodataframe.") + + # If both inputs are raster or arrays, reprojection on the same grid is needed for raster-raster methods + if all(isinstance(dem, (np.ndarray, gu.Raster)) for dem in (reference_elev, to_be_aligned_elev)): + ref_dem, tba_dem, inlier_mask, transform, crs = \ + _preprocess_coreg_fit_raster_raster(reference_dem=reference_elev, dem_to_be_aligned=to_be_aligned_elev, + inlier_mask=inlier_mask, transform=transform, crs=crs) + + # If one input is raster, and the other is point, we reproject the point data to the same CRS and extract arrays + elif any(isinstance(dem, (np.ndarray, gu.Raster)) for dem in (reference_elev, to_be_aligned_elev)): + if isinstance(reference_elev, (np.ndarray, gu.Raster)): + raster_dem = reference_elev + point_dem = to_be_aligned_elev + else: + raster_dem = to_be_aligned_elev + point_dem = reference_elev + + ref_dem, tba_dem, inlier_mask, transform, crs = \ + _preprocess_coreg_fit_raster_point(raster_elev=raster_dem, point_elev=point_dem, + inlier_mask=inlier_mask, transform=transform, crs=crs) + + # If both inputs are points, simply reproject to the same CRS + else: + ref_dem, tba_dem = _preprocess_coreg_fit_point_point(reference_elev=reference_elev, + to_be_aligned_elev=to_be_aligned_elev) + + return ref_dem, tba_dem, inlier_mask, transform, crs + +def _preprocess_coreg_apply( + elev: NDArrayf | MArrayf | RasterType | gpd.GeoDataFrame, + transform: rio.transform.Affine | None = None, + crs: rio.crs.CRS | None = None): + """Pre-processing and checks of apply for any input.""" + + if not isinstance(elev, (np.ndarray, gu.Raster, gpd.GeoDataFrame)): + raise ValueError("Input elevation data should be a raster, an array or a geodataframe.") + + # If input is geodataframe + if isinstance(elev, gpd.GeoDataFrame): + elev_out = elev + + # If input is a raster or array + elif isinstance(elev, (gu.Raster, np.ndarray)): + + # If input is raster + if isinstance(elev, gu.Raster): + if transform is None: + transform = elev.transform + else: + warnings.warn(f"DEM of type {type(elev)} overrides the given 'transform'") + if crs is None: + crs = elev.crs + else: + warnings.warn(f"DEM of type {type(elev)} overrides the given 'crs'") + + # If input is an array + else: + if transform is None: + raise ValueError("'transform' must be given if DEM is array-like.") + if crs is None: + raise ValueError("'crs' must be given if DEM is array-like.") + + # The array to provide the functions will be an ndarray with NaNs for masked out areas. + elev_out, elev_mask = get_array_and_mask(elev) + + if np.all(elev_mask): + raise ValueError("'dem' had only NaNs") + + return elev_out, transform, crs # TODO: Re-structure AffineCoreg apply function and move there? @@ -773,14 +892,15 @@ def _get_subsample_on_valid_mask(self, valid_mask: NDArrayb, verbose: bool = Fal def fit( self: CoregType, - reference_dem: NDArrayf | MArrayf | RasterType, - dem_to_be_aligned: NDArrayf | MArrayf | RasterType, + reference_elev: NDArrayf | MArrayf | RasterType | gpd.GeoDataFrame, + to_be_aligned_elev: NDArrayf | MArrayf | RasterType | gpd.GeoDataFrame, inlier_mask: NDArrayb | Mask | None = None, - transform: rio.transform.Affine | None = None, - crs: rio.crs.CRS | None = None, bias_vars: dict[str, NDArrayf | MArrayf | RasterType] | None = None, weights: NDArrayf | None = None, subsample: float | int | None = None, + transform: rio.transform.Affine | None = None, + crs: rio.crs.CRS | None = None, + z_name: str = "z", verbose: bool = False, random_state: None | np.random.RandomState | np.random.Generator | int = None, **kwargs: Any, @@ -788,16 +908,17 @@ def fit( """ Estimate the coregistration transform on the given DEMs. - :param reference_dem: 2D array of elevation values acting reference. - :param dem_to_be_aligned: 2D array of elevation values to be aligned. - :param inlier_mask: Optional. 2D boolean array of areas to include in the analysis (inliers=True). - :param transform: Optional. Transform of the reference_dem. Mandatory if DEM provided as array. - :param crs: Optional. CRS of the reference_dem. Mandatory if DEM provided as array. - :param bias_vars: Optional, only for some bias correction classes. 2D array of bias variables used. - :param weights: Optional. Per-pixel weights for the coregistration. + :param reference_elev: Reference elevation, either a DEM or an elevation point cloud. + :param to_be_aligned_elev: To-be-aligned elevation, either a DEM or an elevation point cloud. + :param inlier_mask: Mask or boolean array of areas to include (inliers=True). + :param bias_vars: Auxiliary variables for certain bias correction classes, as raster or arrays. + :param weights: Array of weights for the coregistration. :param subsample: Subsample the input to increase performance. <1 is parsed as a fraction. >1 is a pixel count. - :param verbose: Print progress messages to stdout. - :param random_state: Random state or seed number to use for calculations (to fix random sampling during testing) + :param transform: Transform of the reference elevation, only if provided as 2D array. + :param crs: CRS of the reference elevation, only if provided as 2D array. + :param z_name: Column name to use as elevation, only for point elevation data passed as geodataframe. + :param verbose: Print progress messages. + :param random_state: Random state or seed number to use for calculations (to fix random sampling). """ if weights is not None: @@ -826,10 +947,13 @@ def fit( if self._meta["subsample"] != 1: self._meta["random_state"] = random_state - # Pre-process the inputs, by reprojecting and subsampling - ref_dem, tba_dem, inlier_mask, transform, crs = _preprocess_coreg_raster_input( - reference_dem=reference_dem, - dem_to_be_aligned=dem_to_be_aligned, + # TODO: Add preproc for points too + # TODO: Rename into "checks", because not much is preprocessed in the end + # (has to happen in the _fit_func itself, whether for subsampling or + # Pre-process the inputs, by reprojecting and converting to arrays + ref_dem, tba_dem, inlier_mask, transform, crs = _preprocess_coreg_fit( + reference_elev=reference_elev, + to_be_aligned_elev=to_be_aligned_elev, inlier_mask=inlier_mask, transform=transform, crs=crs, @@ -869,8 +993,8 @@ def fit( def residuals( self, - reference_dem: NDArrayf, - dem_to_be_aligned: NDArrayf, + reference_elev: NDArrayf, + to_be_aligned_elev: NDArrayf, inlier_mask: NDArrayb | None = None, transform: rio.transform.Affine | None = None, crs: rio.crs.CRS | None = None, @@ -880,8 +1004,8 @@ def residuals( """ Calculate the residual offsets (the difference) between two DEMs after applying the transformation. - :param reference_dem: 2D array of elevation values acting reference. - :param dem_to_be_aligned: 2D array of elevation values to be aligned. + :param reference_elev: 2D array of elevation values acting reference. + :param to_be_aligned_elev: 2D array of elevation values to be aligned. :param inlier_mask: Optional. 2D boolean array of areas to include in the analysis (inliers=True). :param transform: Optional. Transform of the reference_dem. Mandatory in some cases. :param crs: Optional. CRS of the reference_dem. Mandatory in some cases. @@ -892,12 +1016,12 @@ def residuals( """ # Apply the transformation to the dem to be aligned - aligned_dem = self.apply(dem_to_be_aligned, transform=transform, crs=crs)[0] + aligned_dem = self.apply(to_be_aligned_elev, transform=transform, crs=crs)[0] # Pre-process the inputs, by reprojecting and subsampling - ref_dem, align_dem, inlier_mask, transform, crs = _preprocess_coreg_raster_input( - reference_dem=reference_dem, - dem_to_be_aligned=aligned_dem, + ref_dem, align_dem, inlier_mask, transform, crs = _preprocess_coreg_fit( + reference_elev=reference_elev, + to_be_aligned_elev=to_be_aligned_elev, inlier_mask=inlier_mask, transform=transform, crs=crs, @@ -914,164 +1038,14 @@ def residuals( # Return the difference values within the full inlier mask return diff[full_mask] - def fit_pts( - self: CoregType, - reference_dem: NDArrayf | MArrayf | RasterType | pd.DataFrame, - dem_to_be_aligned: RasterType, - inlier_mask: NDArrayb | Mask | None = None, - transform: rio.transform.Affine | None = None, - subsample: float | int = 1.0, - verbose: bool = False, - mask_high_curv: bool = False, - order: int = 1, - z_name: str = "z", - weights: str | None = None, - random_state: None | np.random.RandomState | np.random.Generator | int = None, - ) -> CoregType: - """ - Estimate the coregistration transform between a DEM and a reference point elevation data. - - :param reference_dem: Point elevation data acting reference. - :param dem_to_be_aligned: 2D array of elevation values to be aligned. - :param inlier_mask: Optional. 2D boolean array of areas to include in the analysis (inliers=True). - :param transform: Optional. Transform of the reference_dem. Mandatory in some cases. - :param subsample: Subsample the input to increase performance. <1 is parsed as a fraction. >1 is a pixel count. - :param verbose: Print progress messages to stdout. - :param order: interpolation 0=nearest, 1=linear, 2=cubic. - :param z_name: the column name of dataframe used for elevation differencing - :param mask_high_curv: Mask out high-curvature points (>5 maxc) to increase the robustness. - :param weights: the column name of dataframe used for weight, should have the same length with z_name columns - :param random_state: Random state or seed number to use for calculations (to fix random sampling during testing) - """ - - # Validate that at least one input is a valid array-like (or Raster) types. - if not isinstance(dem_to_be_aligned, (np.ndarray, gu.Raster)): - raise ValueError( - "The dem_to_be_aligned needs to be array-like (implement a numpy array interface)." - f"'dem_to_be_aligned': {dem_to_be_aligned}" - ) - - # DEM to dataframe if ref_dem is raster - # How to make sure sample point locates in stable terrain? - if isinstance(reference_dem, (np.ndarray, gu.Raster)): - reference_dem = _df_sampling_from_dem( - reference_dem, dem_to_be_aligned, subsample=subsample, order=1, offset=None - ) - - # Validate that at least one input is a valid point data type. - if not isinstance(reference_dem, pd.DataFrame): - raise ValueError( - "The reference_dem needs to be point data format (pd.Dataframe)." f"'reference_dem': {reference_dem}" - ) - - # If any input is a Raster, use its transform if 'transform is None'. - # If 'transform' was given and any input is a Raster, trigger a warning. - # Finally, extract only the data of the raster. - for name, dem in [("dem_to_be_aligned", dem_to_be_aligned)]: - if hasattr(dem, "transform"): - if transform is None: - transform = dem.transform - elif transform is not None: - warnings.warn(f"'{name}' of type {type(dem)} overrides the given 'transform'") - - if transform is None: - raise ValueError("'transform' must be given if the dem_to_be_align DEM is array-like.") - - _, tba_mask = get_array_and_mask(dem_to_be_aligned) - - if np.all(tba_mask): - raise ValueError("'dem_to_be_aligned' had only NaNs") - - tba_dem = dem_to_be_aligned.copy() - ref_valid = np.isfinite(reference_dem[z_name].values) - - if np.all(~ref_valid): - raise ValueError("'reference_dem' point data only contains NaNs") - - ref_dem = reference_dem[ref_valid] - - if mask_high_curv: - maxc = np.maximum( - np.abs(get_terrain_attribute(tba_dem, attribute=["planform_curvature", "profile_curvature"])), axis=0 - ) - # Mask very high curvatures to avoid resolution biases - mask_hc = maxc.data > 5.0 - else: - mask_hc = np.zeros(tba_dem.data.mask.shape, dtype=bool) - if "planc" in ref_dem.columns and "profc" in ref_dem.columns: - ref_dem = ref_dem.query("planc < 5 and profc < 5") - else: - print("Warning: There is no curvature in dataframe. Set mask_high_curv=True for more robust results") - - if any(col not in ref_dem for col in ["E", "N"]): - if "geometry" in ref_dem: - ref_dem["E"] = ref_dem.geometry.x - ref_dem["N"] = ref_dem.geometry.y - else: - raise ValueError("Reference points need E/N columns or point geometries") - - points = np.array((ref_dem["E"].values, ref_dem["N"].values)).T - - # Make sure that the mask has an expected format. - if inlier_mask is not None: - if isinstance(inlier_mask, Mask): - inlier_mask = inlier_mask.data.filled(False).squeeze() - else: - inlier_mask = np.asarray(inlier_mask).squeeze() - assert inlier_mask.dtype == bool, f"Invalid mask dtype: '{inlier_mask.dtype}'. Expected 'bool'" - - if np.all(~inlier_mask): - raise ValueError("'inlier_mask' had no inliers.") - - final_mask = np.logical_and.reduce((~tba_dem.data.mask, inlier_mask, ~mask_hc)) - else: - final_mask = np.logical_and(~tba_dem.data.mask, ~mask_hc) - - mask_raster = tba_dem.copy(new_array=final_mask.astype(np.float32)) - - ref_inlier = mask_raster.interp_points(points, order=0) - ref_inlier = ref_inlier.astype(bool) - - if np.all(~ref_inlier): - raise ValueError("Intersection of 'reference_dem' and 'dem_to_be_aligned' had only NaNs") - - ref_dem = ref_dem[ref_inlier] - - # If subsample is not equal to one, subsampling should be performed. - if subsample != 1.0: - - # Randomly pick N inliers in the full_mask where N=subsample - random_valids = subsample_array( - ref_dem[z_name].values, subsample=subsample, return_indices=True, random_state=random_state - ) - - # Subset to the N random inliers - ref_dem = ref_dem.iloc[random_valids] - - # Run the associated fitting function - self._fit_pts_func( - ref_dem=ref_dem, - tba_dem=tba_dem, - transform=transform, - weights=weights, - verbose=verbose, - order=order, - z_name=z_name, - ) - - # Flag that the fitting function has been called. - self._fit_called = True - - return self - @overload def apply( self, - dem: MArrayf, - transform: rio.transform.Affine | None = None, - crs: rio.crs.CRS | None = None, + elev: MArrayf, bias_vars: dict[str, NDArrayf | MArrayf | RasterType] | None = None, resample: bool = True, + transform: rio.transform.Affine | None = None, + crs: rio.crs.CRS | None = None, **kwargs: Any, ) -> tuple[MArrayf, rio.transform.Affine]: ... @@ -1079,11 +1053,11 @@ def apply( @overload def apply( self, - dem: NDArrayf, - transform: rio.transform.Affine | None = None, - crs: rio.crs.CRS | None = None, + elev: NDArrayf, bias_vars: dict[str, NDArrayf | MArrayf | RasterType] | None = None, resample: bool = True, + transform: rio.transform.Affine | None = None, + crs: rio.crs.CRS | None = None, **kwargs: Any, ) -> tuple[NDArrayf, rio.transform.Affine]: ... @@ -1091,33 +1065,33 @@ def apply( @overload def apply( self, - dem: RasterType, - transform: rio.transform.Affine | None = None, - crs: rio.crs.CRS | None = None, + elev: RasterType | gpd.GeoDataFrame, bias_vars: dict[str, NDArrayf | MArrayf | RasterType] | None = None, resample: bool = True, + transform: rio.transform.Affine | None = None, + crs: rio.crs.CRS | None = None, **kwargs: Any, - ) -> RasterType: + ) -> RasterType | gpd.GeoDataFrame: ... def apply( self, - dem: RasterType | NDArrayf | MArrayf, - transform: rio.transform.Affine | None = None, - crs: rio.crs.CRS | None = None, + elev: RasterType | NDArrayf | MArrayf | gpd.GeoDataFrame, bias_vars: dict[str, NDArrayf | MArrayf | RasterType] | None = None, resample: bool = True, + transform: rio.transform.Affine | None = None, + crs: rio.crs.CRS | None = None, **kwargs: Any, ) -> RasterType | tuple[NDArrayf, rio.transform.Affine] | tuple[MArrayf, rio.transform.Affine]: """ Apply the estimated transform to a DEM. - :param dem: A DEM array or Raster to apply the transform on. - :param transform: Optional. The transform object of the DEM. Mandatory if 'dem' provided as array. - :param crs: Optional. CRS of the reference_dem. Mandatory if 'dem' provided as array. - :param bias_vars: Optional, only for some bias correction classes. 2D array of bias variables used. + :param elev: Elevation to apply the transform to, either a DEM or an elevation point cloud. + :param bias_vars: Only for some bias correction classes. 2D array of bias variables used. :param resample: If set to True, will reproject output Raster on the same grid as input. Otherwise, \ only the transform might be updated and no resampling is done. + :param transform: Geotransform of the elevation, only if provided as 2D array. + :param crs: CRS of elevation, only if provided as 2D array. :param kwargs: Any optional arguments to be passed to either self._apply_func or apply_matrix. Kwarg `resampling` can be set to any rio.warp.Resampling to use a different resampling in case \ `resample` is True, default is bilinear. @@ -1127,29 +1101,9 @@ def apply( if not self._fit_called and self._meta.get("matrix") is None: raise AssertionError(".fit() does not seem to have been called yet") - if isinstance(dem, gu.Raster): - if transform is None: - transform = dem.transform - else: - warnings.warn(f"DEM of type {type(dem)} overrides the given 'transform'") - if crs is None: - crs = dem.crs - else: - warnings.warn(f"DEM of type {type(dem)} overrides the given 'crs'") - - else: - if transform is None: - raise ValueError("'transform' must be given if DEM is array-like.") - if crs is None: - raise ValueError("'crs' must be given if DEM is array-like.") - - # The array to provide the functions will be an ndarray with NaNs for masked out areas. - dem_array, dem_mask = get_array_and_mask(dem) - - if np.all(dem_mask): - raise ValueError("'dem' had only NaNs") + elev_array = _preprocess_coreg_apply(elev=elev, transform=transform, crs=crs) - main_args = {"dem": dem_array, "transform": transform, "crs": crs} + main_args = {"dem": elev_array, "transform": transform, "crs": crs} # If bias_vars are defined, update dictionary content to array if bias_vars is not None: @@ -1184,7 +1138,7 @@ def apply( # Apply the matrix around the centroid (if defined, otherwise just from the center). applied_dem = apply_matrix( - dem_array, + elev, transform=transform, matrix=self.to_matrix(), centroid=self._meta.get("centroid"), @@ -1198,8 +1152,8 @@ def apply( applied_dem = applied_dem.astype("float32") # Set default dst_nodata - if isinstance(dem, gu.Raster): - dst_nodata = dem.nodata + if isinstance(elev, gu.Raster): + dst_nodata = elev.nodata else: dst_nodata = raster._default_nodata(applied_dem.dtype) @@ -1225,61 +1179,23 @@ def apply( final_mask = np.logical_or(~np.isfinite(applied_dem), applied_dem == dst_nodata) # If the DEM was a masked_array, copy the mask to the new DEM - if isinstance(dem, (np.ma.masked_array, gu.Raster)): + if isinstance(elev, (np.ma.masked_array, gu.Raster)): applied_dem = np.ma.masked_array(applied_dem, mask=final_mask) # type: ignore else: applied_dem[final_mask] = np.nan # If the input was a Raster, returns a Raster, else returns array and transform - if isinstance(dem, gu.Raster): - out_dem = dem.from_array(applied_dem, out_transform, crs, nodata=dem.nodata) + if isinstance(elev, gu.Raster): + out_dem = elev.from_array(applied_dem, out_transform, crs, nodata=elev.nodata) return out_dem else: return applied_dem, out_transform - def apply_pts(self, coords: NDArrayf) -> NDArrayf: - """ - Apply the estimated transform to a set of 3D points. - - :param coords: A (N, 3) array of X/Y/Z coordinates or one coordinate of shape (3,). - - :returns: The transformed coordinates. - """ - if not self._fit_called and self._meta.get("matrix") is None: - raise AssertionError(".fit() does not seem to have been called yet") - # If the coordinates represent just one coordinate - if np.shape(coords) == (3,): - coords = np.reshape(coords, (1, 3)) - - assert ( - len(np.shape(coords)) == 2 and np.shape(coords)[1] == 3 - ), f"'coords' shape must be (N, 3). Given shape: {np.shape(coords)}" - - coords_c = coords.copy() - - # See if an _apply_pts_func exists - try: - transformed_points = self._apply_pts_func(coords) - # If it doesn't exist, use opencv's perspectiveTransform - except NotImplementedError: - if self.is_affine: # This only works on it's rigid, however. - # Transform the points (around the centroid if it exists). - if self._meta.get("centroid") is not None: - coords_c -= self._meta["centroid"] - transformed_points = cv2.perspectiveTransform(coords_c.reshape(1, -1, 3), self.to_matrix()).squeeze() - if self._meta.get("centroid") is not None: - transformed_points += self._meta["centroid"] - - else: - raise ValueError("Coreg method is non-rigid but has not implemented _apply_pts_func") - - return transformed_points - @overload def error( self, - reference_dem: NDArrayf, - dem_to_be_aligned: NDArrayf, + reference_elev: NDArrayf, + to_be_aligned_elev: NDArrayf, error_type: list[str], inlier_mask: NDArrayb | None = None, transform: rio.transform.Affine | None = None, @@ -1290,8 +1206,8 @@ def error( @overload def error( self, - reference_dem: NDArrayf, - dem_to_be_aligned: NDArrayf, + reference_elev: NDArrayf, + to_be_aligned_elev: NDArrayf, error_type: str = "nmad", inlier_mask: NDArrayb | None = None, transform: rio.transform.Affine | None = None, @@ -1301,8 +1217,8 @@ def error( def error( self, - reference_dem: NDArrayf, - dem_to_be_aligned: NDArrayf, + reference_elev: NDArrayf, + to_be_aligned_elev: NDArrayf, error_type: str | list[str] = "nmad", inlier_mask: NDArrayb | None = None, transform: rio.transform.Affine | None = None, @@ -1320,8 +1236,8 @@ def error( - "mae": The mean absolute error of the residuals. - "count": The residual count. - :param reference_dem: 2D array of elevation values acting reference. - :param dem_to_be_aligned: 2D array of elevation values to be aligned. + :param reference_elev: 2D array of elevation values acting reference. + :param to_be_aligned_elev: 2D array of elevation values to be aligned. :param error_type: The type of error measure to calculate. May be a list of error types. :param inlier_mask: Optional. 2D boolean array of areas to include in the analysis (inliers=True). :param transform: Optional. Transform of the reference_dem. Mandatory in some cases. @@ -1333,8 +1249,8 @@ def error( error_type = [error_type] residuals = self.residuals( - reference_dem=reference_dem, - dem_to_be_aligned=dem_to_be_aligned, + reference_elev=reference_elev, + to_be_aligned_elev=to_be_aligned_elev, inlier_mask=inlier_mask, transform=transform, crs=crs, @@ -1371,6 +1287,101 @@ def count(res: NDArrayf) -> int: def _fit_func( self, + **kwargs: Any, + ) -> None: + """ + Distribute to _fit_rst_rst, fit_rst_pts or fit_pts_pts depending on input and method availability. + Needs to be _fit_func of the main class to simplify calls from CoregPipeline and BlockwiseCoreg. + """ + + # Determine if input is raster-raster, raster-point or point-point + if all(isinstance(dem, NDArrayf) for dem in (kwargs["ref_dem"], kwargs["tba_dem"])): + rop = "r-r" + elif all(isinstance(dem, gpd.GeoDataFrame) for dem in (kwargs["ref_dem"], kwargs["tba_dem"])): + rop = "p-p" + else: + rop = "r-p" + + # Fallback logic is always the same: 1/ raster-raster, 2/ raster-point, 3/ point-point + try_rp = False + try_pp = False + + # For raster-raster + if rop == "r-r": + # Check if raster-raster function exists, if yes run it and stop + try: + self._fit_rst_rst(**kwargs) + # Otherwise, convert the tba raster to points and try raster-points + except NotImplementedError: + warnings.warn( + f"No raster-raster method found for coregistration {self.__class__.__name__}, " + f"trying raster-point method by converting to-be-aligned DEM to points.", + UserWarning + ) + tba_dem_pts = gu.Raster.from_array(data=kwargs["tba_dem"], transform=kwargs["transform"], + crs=kwargs["crs"]).to_points().ds + kwargs.update({"tba_dem": tba_dem_pts}) + try_rp = True + + # For raster-point + if rop == "r-p" or try_rp: + try: + self._fit_rst_pts(**kwargs) + except NotImplementedError: + warnings.warn( + f"No raster-point method found for coregistration {self.__class__.__name__}, " + f"trying point-point method by converting all elevation data to points.", + UserWarning + ) + ref_dem_pts = gu.Raster.from_array(data=kwargs["ref_dem"], transform=kwargs["transform"], + crs=kwargs["crs"]).to_points().ds + kwargs.update({"ref_dem": ref_dem_pts}) + try_pp = True + + # For point-point + if rop == "p-p" or try_pp: + try: + self._fit_pts_pts(**kwargs) + except NotImplementedError: + if try_pp and try_rp: + raise NotImplementedError( + f"No raster-raster, raster-point or point-point method found for " + f"coregistration {self.__class__.__name__}.") + elif try_pp: + raise NotImplementedError( + f"No raster-point or point-point method found for coregistration {self.__class__.__name__}.") + else: + raise NotImplementedError(f"No point-point method found for coregistration {self.__class__.__name__}.") + + def _fit_rst_rst(self, + ref_dem: NDArrayf, + tba_dem: NDArrayf, + inlier_mask: NDArrayb, + transform: rio.transform.Affine, + crs: rio.crs.CRS, + weights: NDArrayf | None, + bias_vars: dict[str, NDArrayf] | None = None, + verbose: bool = False, + **kwargs: Any, + ) -> None: + # FOR DEVELOPERS: This function needs to be implemented. + raise NotImplementedError("This step has to be implemented by subclassing.") + + def _fit_rst_pts(self, + ref_dem: NDArrayf, + tba_dem: NDArrayf, + inlier_mask: NDArrayb, + transform: rio.transform.Affine, + crs: rio.crs.CRS, + weights: NDArrayf | None, + bias_vars: dict[str, NDArrayf] | None = None, + verbose: bool = False, + **kwargs: Any, + ) -> None: + # FOR DEVELOPERS: This function needs to be implemented. + raise NotImplementedError("This step has to be implemented by subclassing.") + + def _fit_pts_pts(self, ref_dem: NDArrayf, tba_dem: NDArrayf, inlier_mask: NDArrayb, @@ -1395,10 +1406,6 @@ def _apply_func( # FOR DEVELOPERS: This function is only needed for non-rigid transforms. raise NotImplementedError("This should have been implemented by subclassing") - def _apply_pts_func(self, coords: NDArrayf) -> NDArrayf: - # FOR DEVELOPERS: This function is only needed for non-rigid transforms. - raise NotImplementedError("This should have been implemented by subclassing") - class CoregPipeline(Coreg): """ @@ -1470,8 +1477,8 @@ def _parse_bias_vars(self, step: int, bias_vars: dict[str, NDArrayf] | None) -> def fit( self: CoregType, - reference_dem: NDArrayf | MArrayf | RasterType, - dem_to_be_aligned: NDArrayf | MArrayf | RasterType, + reference_elev: NDArrayf | MArrayf | RasterType, + to_be_aligned_elev: NDArrayf | MArrayf | RasterType, inlier_mask: NDArrayb | Mask | None = None, transform: rio.transform.Affine | None = None, crs: rio.crs.CRS | None = None, @@ -1499,9 +1506,9 @@ def fit( ) # Pre-process the inputs, by reprojecting and subsampling, without any subsampling (done in each step) - ref_dem, tba_dem, inlier_mask, transform, crs = _preprocess_coreg_raster_input( - reference_dem=reference_dem, - dem_to_be_aligned=dem_to_be_aligned, + ref_dem, tba_dem, inlier_mask, transform, crs = _preprocess_coreg_fit( + reference_elev=reference_elev, + to_be_aligned_elev=to_be_aligned_elev, inlier_mask=inlier_mask, transform=transform, crs=crs, @@ -1544,26 +1551,6 @@ def fit( return self - def _fit_pts_func( - self: CoregType, - ref_dem: NDArrayf | MArrayf | RasterType | pd.DataFrame, - tba_dem: RasterType, - verbose: bool = False, - **kwargs: Any, - ) -> CoregType: - - tba_dem_mod = tba_dem.copy() - - for i, coreg in enumerate(self.pipeline): - if verbose: - print(f"Running pipeline step: {i + 1} / {len(self.pipeline)}") - - coreg._fit_pts_func(ref_dem=ref_dem, tba_dem=tba_dem_mod, verbose=verbose, **kwargs) - coreg._fit_called = True - - tba_dem_mod = coreg.apply(tba_dem_mod) - return self - def _apply_func( self, dem: NDArrayf, @@ -1589,15 +1576,6 @@ def _apply_func( return dem_mod, out_transform - def _apply_pts_func(self, coords: NDArrayf) -> NDArrayf: - """Apply the coregistration steps sequentially to a set of points.""" - coords_mod = coords.copy() - - for coreg in self.pipeline: - coords_mod = coreg.apply_pts(coords_mod).reshape(coords_mod.shape) - - return coords_mod - def __iter__(self) -> Generator[Coreg, None, None]: """Iterate over the pipeline steps.""" yield from self.pipeline @@ -1680,8 +1658,8 @@ def __init__( def fit( self: CoregType, - reference_dem: NDArrayf | MArrayf | RasterType, - dem_to_be_aligned: NDArrayf | MArrayf | RasterType, + reference_elev: NDArrayf | MArrayf | RasterType, + to_be_aligned_elev: NDArrayf | MArrayf | RasterType, inlier_mask: NDArrayb | Mask | None = None, transform: rio.transform.Affine | None = None, crs: rio.crs.CRS | None = None, @@ -1713,13 +1691,15 @@ def fit( ) # Pre-process the inputs, by reprojecting and subsampling, without any subsampling (done in each step) - ref_dem, tba_dem, inlier_mask, transform, crs = _preprocess_coreg_raster_input( - reference_dem=reference_dem, - dem_to_be_aligned=dem_to_be_aligned, + ref_dem, tba_dem, inlier_mask, transform, crs = _preprocess_coreg_fit( + reference_elev=reference_elev, + to_be_aligned_elev=to_be_aligned_elev, inlier_mask=inlier_mask, transform=transform, crs=crs, ) + + # TODO: Blockwise can only work if one of the two is a Raster... or by defining a grid somehow? groups = self.subdivide_array(tba_dem.shape) indices = np.unique(groups) @@ -1755,8 +1735,8 @@ def process(i: int) -> dict[str, Any] | BaseException | None: # Try to run the coregistration. If it fails for any reason, skip it and save the exception. try: procstep.fit( - reference_dem=ref_subset, - dem_to_be_aligned=tba_subset, + reference_elev=ref_subset, + to_be_aligned_elev=tba_subset, transform=transform_subset, inlier_mask=mask_subset, bias_vars=bias_vars, @@ -1767,8 +1747,8 @@ def process(i: int) -> dict[str, Any] | BaseException | None: verbose=verbose, ) nmad, median = procstep.error( - reference_dem=ref_subset, - dem_to_be_aligned=tba_subset, + reference_elev=ref_subset, + to_be_aligned_elev=tba_subset, error_type=["nmad", "median"], inlier_mask=mask_subset, transform=transform_subset, @@ -2009,27 +1989,6 @@ def _apply_func( return warped_dem, transform - def _apply_pts_func(self, coords: NDArrayf) -> NDArrayf: - """Apply the scaling model to a set of points.""" - points = self.to_points() - - new_coords = coords.copy() - - for dim in range(0, 3): - with warnings.catch_warnings(): - # ZeroDivisionErrors may happen when the transformation is empty (which is fine) - warnings.filterwarnings("ignore", message="ZeroDivisionError") - model = scipy.interpolate.Rbf( - points[:, 0, 0], - points[:, 1, 0], - points[:, dim, 1] - points[:, dim, 0], - function="linear", - ) - - new_coords[:, dim] += model(coords[:, 0], coords[:, 1]) - - return new_coords - def warp_dem( dem: NDArrayf, diff --git a/xdem/coreg/biascorr.py b/xdem/coreg/biascorr.py index d601d600..d42303b5 100644 --- a/xdem/coreg/biascorr.py +++ b/xdem/coreg/biascorr.py @@ -139,7 +139,7 @@ def __init__( self._is_affine = False self._needs_vars = True - def _fit_func( # type: ignore + def _fit_rst_rst( # type: ignore self, ref_dem: NDArrayf, tba_dem: NDArrayf, @@ -412,7 +412,7 @@ def __init__( subsample, ) - def _fit_func( # type: ignore + def _fit_rst_rst( # type: ignore self, ref_dem: NDArrayf, tba_dem: NDArrayf, @@ -433,7 +433,7 @@ def _fit_func( # type: ignore "got {}.".format(len(bias_vars)) ) - super()._fit_func( + super()._fit_rst_rst( ref_dem=ref_dem, tba_dem=tba_dem, inlier_mask=inlier_mask, @@ -487,7 +487,7 @@ def __init__( subsample, ) - def _fit_func( # type: ignore + def _fit_rst_rst( # type: ignore self, ref_dem: NDArrayf, tba_dem: NDArrayf, @@ -507,7 +507,7 @@ def _fit_func( # type: ignore ", got {}.".format(len(bias_vars)) ) - super()._fit_func( + super()._fit_rst_rst( ref_dem=ref_dem, tba_dem=tba_dem, inlier_mask=inlier_mask, @@ -563,7 +563,7 @@ def __init__( subsample, ) - def _fit_func( # type: ignore + def _fit_rst_rst( # type: ignore self, ref_dem: NDArrayf, tba_dem: NDArrayf, @@ -580,7 +580,7 @@ def _fit_func( # type: ignore if bias_vars is None or len(bias_vars) <= 2: raise ValueError('At least three variables have to be provided through the argument "bias_vars".') - super()._fit_func( + super()._fit_rst_rst( ref_dem=ref_dem, tba_dem=tba_dem, inlier_mask=inlier_mask, @@ -629,7 +629,7 @@ def __init__( self._meta["angle"] = angle self._needs_vars = False - def _fit_func( # type: ignore + def _fit_rst_rst( # type: ignore self, ref_dem: NDArrayf, tba_dem: NDArrayf, @@ -656,7 +656,7 @@ def _fit_func( # type: ignore average_res = (transform[0] + abs(transform[4])) / 2 kwargs.update({"hop_length": average_res}) - super()._fit_func( + super()._fit_rst_rst( ref_dem=ref_dem, tba_dem=tba_dem, inlier_mask=inlier_mask, @@ -740,7 +740,7 @@ def __init__( self._meta["terrain_attribute"] = terrain_attribute self._needs_vars = False - def _fit_func( # type: ignore + def _fit_rst_rst( # type: ignore self, ref_dem: NDArrayf, tba_dem: NDArrayf, @@ -762,7 +762,7 @@ def _fit_func( # type: ignore ) # Run the parent function - super()._fit_func( + super()._fit_rst_rst( ref_dem=ref_dem, tba_dem=tba_dem, inlier_mask=inlier_mask, @@ -839,7 +839,7 @@ def __init__( self._meta["poly_order"] = poly_order self._needs_vars = False - def _fit_func( # type: ignore + def _fit_rst_rst( # type: ignore self, ref_dem: NDArrayf, tba_dem: NDArrayf, @@ -858,7 +858,7 @@ def _fit_func( # type: ignore # Coordinates (we don't need the actual ones, just array coordinates) xx, yy = np.meshgrid(np.arange(0, ref_dem.shape[1]), np.arange(0, ref_dem.shape[0])) - super()._fit_func( + super()._fit_rst_rst( ref_dem=ref_dem, tba_dem=tba_dem, inlier_mask=inlier_mask, From da7eb8798a3879f41860d3a7b3d8a7edc3721bad Mon Sep 17 00:00:00 2001 From: Romain Hugonnet Date: Thu, 29 Feb 2024 21:23:34 -0900 Subject: [PATCH 02/54] Several fixes --- tests/test_coreg/test_affine.py | 22 ++++++------- tests/test_coreg/test_base.py | 22 ++++++------- tests/test_coreg/test_biascorr.py | 6 ++-- xdem/coreg/affine.py | 16 ++++----- xdem/coreg/base.py | 55 +++++++++++++++++++++---------- xdem/coreg/biascorr.py | 14 ++++---- 6 files changed, 78 insertions(+), 57 deletions(-) diff --git a/tests/test_coreg/test_affine.py b/tests/test_coreg/test_affine.py index 5619c495..de6e095e 100644 --- a/tests/test_coreg/test_affine.py +++ b/tests/test_coreg/test_affine.py @@ -32,14 +32,14 @@ class TestAffineCoreg: inlier_mask = ~outlines.create_mask(ref) fit_params = dict( - reference_dem=ref.data, - dem_to_be_aligned=tba.data, + reference_elev=ref.data, + to_be_aligned_elev=tba.data, inlier_mask=inlier_mask, transform=ref.transform, crs=ref.crs, verbose=False, ) - # Create some 3D coordinates with Z coordinates being 0 to try the apply_pts functions. + # Create some 3D coordinates with Z coordinates being 0 to try the apply functions. points = np.array([[1, 2, 3, 4], [1, 2, 3, 4], [0, 0, 0, 0]], dtype="float64").T def test_from_classmethods(self) -> None: @@ -50,13 +50,13 @@ def test_from_classmethods(self) -> None: matrix = np.diag(np.ones(4, dtype=float)) matrix[2, 3] = vshift coreg_obj = AffineCoreg.from_matrix(matrix) - transformed_points = coreg_obj.apply_pts(self.points) + transformed_points = coreg_obj.apply(self.points) assert transformed_points[0, 2] == vshift # Check that the from_translation function works as expected. x_offset = 5 coreg_obj2 = AffineCoreg.from_translation(x_off=x_offset) - transformed_points2 = coreg_obj2.apply_pts(self.points) + transformed_points2 = coreg_obj2.apply(self.points) assert np.array_equal(self.points[:, 0] + x_offset, transformed_points2[:, 0]) # Try to make a Coreg object from a nan translation (should fail). @@ -86,10 +86,10 @@ def test_vertical_shift(self) -> None: assert matrix[2, 3] == vshift, matrix # Check that the first z coordinate is now the vertical shift - assert vshiftcorr.apply_pts(self.points)[0, 2] == vshiftcorr._meta["vshift"] + assert vshiftcorr.apply(self.points)[0, 2] == vshiftcorr._meta["vshift"] # Apply the model to correct the DEM - tba_unshifted, _ = vshiftcorr.apply(self.tba.data, self.ref.transform, self.ref.crs) + tba_unshifted, _ = vshiftcorr.apply(self.tba.data, transform=self.ref.transform, crs=self.ref.crs) # Create a new vertical shift correction model vshiftcorr2 = coreg.VerticalShift() @@ -157,7 +157,7 @@ def test_gradientdescending(self, subsample: int = 10000, inlier_mask: bool = Tr # Run co-registration gds = xdem.coreg.GradientDescending(subsample=subsample) - gds.fit_pts( + gds.fit( self.ref.to_points().ds, self.tba, inlier_mask=inlier_mask, @@ -198,7 +198,7 @@ def test_coreg_example_shift(self, shift_px, coreg_class, points_or_raster, verb if points_or_raster == "raster": coreg_obj.fit(shifted_ref, self.ref, verbose=verbose, random_state=42) elif points_or_raster == "points": - coreg_obj.fit_pts(shifted_ref_points, self.ref, verbose=verbose, random_state=42) + coreg_obj.fit(shifted_ref_points, self.ref, verbose=verbose, random_state=42) if coreg_class.__name__ == "ICP": matrix = coreg_obj.to_matrix() @@ -260,7 +260,7 @@ def test_nuth_kaab(self) -> None: assert np.sqrt(np.mean(np.square(diff))) < 1 # Transform some arbitrary points. - transformed_points = nuth_kaab.apply_pts(self.points) + transformed_points = nuth_kaab.apply(self.points) # Check that the x shift is close to the pixel_shift * image resolution assert abs((transformed_points[0, 0] - self.points[0, 0]) - pixel_shift * self.ref.res[0]) < 0.1 @@ -297,6 +297,6 @@ def test_icp_opencv(self) -> None: icp = coreg.ICP(max_iterations=3) icp.fit(**self.fit_params) - aligned_dem, _ = icp.apply(self.tba.data, self.ref.transform, self.ref.crs) + aligned_dem, _ = icp.apply(self.tba.data, transform=self.ref.transform, crs=self.ref.crs) assert aligned_dem.shape == self.ref.data.squeeze().shape diff --git a/tests/test_coreg/test_base.py b/tests/test_coreg/test_base.py index c7c9f469..ef992788 100644 --- a/tests/test_coreg/test_base.py +++ b/tests/test_coreg/test_base.py @@ -39,14 +39,14 @@ class TestCoregClass: inlier_mask = ~outlines.create_mask(ref) fit_params = dict( - reference_dem=ref.data, - dem_to_be_aligned=tba.data, + reference_elev=ref.data, + to_be_aligned_elev=tba.data, inlier_mask=inlier_mask, transform=ref.transform, crs=ref.crs, verbose=False, ) - # Create some 3D coordinates with Z coordinates being 0 to try the apply_pts functions. + # Create some 3D coordinates with Z coordinates being 0 to try the apply functions. points = np.array([[1, 2, 3, 4], [1, 2, 3, 4], [0, 0, 0, 0]], dtype="float64").T def test_init(self) -> None: @@ -280,7 +280,7 @@ def test_coreg_raster_and_ndarray_args(self) -> None: # De-shift dem2 dem2_r = vshiftcorr_r.apply(dem2) - dem2_a, _ = vshiftcorr_a.apply(dem2.data, dem2.transform, dem2.crs) + dem2_a, _ = vshiftcorr_a.apply(dem2.data, transform=dem2.transform, crs=dem2.crs) # Validate that the return formats were the expected ones, and that they are equal. # Issue - dem2_a does not have the same shape, the first dimension is being squeezed @@ -497,14 +497,14 @@ class TestCoregPipeline: inlier_mask = ~outlines.create_mask(ref) fit_params = dict( - reference_dem=ref.data, - dem_to_be_aligned=tba.data, + reference_elev=ref.data, + to_be_aligned_elev=tba.data, inlier_mask=inlier_mask, transform=ref.transform, crs=ref.crs, verbose=True, ) - # Create some 3D coordinates with Z coordinates being 0 to try the apply_pts functions. + # Create some 3D coordinates with Z coordinates being 0 to try the apply functions. points = np.array([[1, 2, 3, 4], [1, 2, 3, 4], [0, 0, 0, 0]], dtype="float64").T @pytest.mark.parametrize("coreg_class", [coreg.VerticalShift, coreg.ICP, coreg.NuthKaab]) # type: ignore @@ -641,7 +641,7 @@ def test_pipeline_pts(self) -> None: ref_points.rename(columns={"b1": "z"}, inplace=True) # Check that this runs without error - pipeline.fit_pts(reference_dem=ref_points, dem_to_be_aligned=self.tba) + pipeline.fit(reference_elev=ref_points, to_be_aligned_elev=self.tba) for part in pipeline.pipeline: assert np.abs(part._meta["offset_east_px"]) > 0 @@ -720,14 +720,14 @@ class TestBlockwiseCoreg: inlier_mask = ~outlines.create_mask(ref) fit_params = dict( - reference_dem=ref.data, - dem_to_be_aligned=tba.data, + reference_elev=ref.data, + to_be_aligned_elev=tba.data, inlier_mask=inlier_mask, transform=ref.transform, crs=ref.crs, verbose=False, ) - # Create some 3D coordinates with Z coordinates being 0 to try the apply_pts functions. + # Create some 3D coordinates with Z coordinates being 0 to try the apply functions. points = np.array([[1, 2, 3, 4], [1, 2, 3, 4], [0, 0, 0, 0]], dtype="float64").T @pytest.mark.parametrize( diff --git a/tests/test_coreg/test_biascorr.py b/tests/test_coreg/test_biascorr.py index 61c2a134..eb4fd20e 100644 --- a/tests/test_coreg/test_biascorr.py +++ b/tests/test_coreg/test_biascorr.py @@ -36,12 +36,12 @@ class TestBiasCorr: inlier_mask = ~outlines.create_mask(ref) fit_params = dict( - reference_dem=ref, - dem_to_be_aligned=tba, + reference_elev=ref, + to_be_aligned_elev=tba, inlier_mask=inlier_mask, verbose=True, ) - # Create some 3D coordinates with Z coordinates being 0 to try the apply_pts functions. + # Create some 3D coordinates with Z coordinates being 0 to try the apply functions. points = np.array([[1, 2, 3, 4], [1, 2, 3, 4], [0, 0, 0, 0]], dtype="float64").T def test_biascorr(self) -> None: diff --git a/xdem/coreg/affine.py b/xdem/coreg/affine.py index d85e7dee..f7af4ac4 100644 --- a/xdem/coreg/affine.py +++ b/xdem/coreg/affine.py @@ -313,7 +313,7 @@ def _fit_rst_rst( # FOR DEVELOPERS: This function needs to be implemented. raise NotImplementedError("This step has to be implemented by subclassing.") - def _apply_func( + def _apply_rst( self, dem: NDArrayf, transform: rio.transform.Affine, @@ -324,7 +324,7 @@ def _apply_func( # FOR DEVELOPERS: This function is only needed for non-rigid transforms. raise NotImplementedError("This should have been implemented by subclassing") - def _apply_pts_func(self, coords: NDArrayf) -> NDArrayf: + def _apply_pts(self, coords: NDArrayf) -> NDArrayf: # FOR DEVELOPERS: This function is only needed for non-rigid transforms. raise NotImplementedError("This should have been implemented by subclassing") @@ -391,7 +391,7 @@ def _fit_rst_rst( self._meta["vshift"] = vshift - def _apply_func( + def _apply_rst( self, dem: NDArrayf, transform: rio.transform.Affine, @@ -402,7 +402,7 @@ def _apply_func( """Apply the VerticalShift function to a DEM.""" return dem + self._meta["vshift"], transform - def _apply_pts_func(self, coords: NDArrayf) -> NDArrayf: + def _apply_pts(self, coords: NDArrayf) -> NDArrayf: """Apply the VerticalShift function to a set of points.""" new_coords = coords.copy() new_coords[:, 2] += self._meta["vshift"] @@ -629,7 +629,7 @@ def _fit_rst_rst( self._meta["coefficients"] = coefs[0] self._meta["func"] = fit_ramp - def _apply_func( + def _apply_rst( self, dem: NDArrayf, transform: rio.transform.Affine, @@ -644,7 +644,7 @@ def _apply_func( return dem + ramp, transform - def _apply_pts_func(self, coords: NDArrayf) -> NDArrayf: + def _apply_pts(self, coords: NDArrayf) -> NDArrayf: """Apply the deramp function to a set of points.""" new_coords = coords.copy() @@ -983,7 +983,7 @@ def _to_matrix_func(self) -> NDArrayf: return matrix - def _apply_func( + def _apply_rst( self, dem: NDArrayf, transform: rio.transform.Affine, @@ -999,7 +999,7 @@ def _apply_func( vshift = self._meta["vshift"] return dem + vshift, updated_transform - def _apply_pts_func(self, coords: NDArrayf) -> NDArrayf: + def _apply_pts(self, coords: NDArrayf) -> NDArrayf: """Apply the Nuth & Kaab shift to a set of points.""" offset_east = self._meta["offset_east_px"] * self._meta["resolution"] offset_north = self._meta["offset_north_px"] * self._meta["resolution"] diff --git a/xdem/coreg/base.py b/xdem/coreg/base.py index 18ce8ad2..ff33e401 100644 --- a/xdem/coreg/base.py +++ b/xdem/coreg/base.py @@ -470,8 +470,7 @@ def _preprocess_coreg_apply( elev_out = elev # If input is a raster or array - elif isinstance(elev, (gu.Raster, np.ndarray)): - + else: # If input is raster if isinstance(elev, gu.Raster): if transform is None: @@ -1092,7 +1091,7 @@ def apply( only the transform might be updated and no resampling is done. :param transform: Geotransform of the elevation, only if provided as 2D array. :param crs: CRS of elevation, only if provided as 2D array. - :param kwargs: Any optional arguments to be passed to either self._apply_func or apply_matrix. + :param kwargs: Any optional arguments to be passed to either self._apply_rst or apply_matrix. Kwarg `resampling` can be set to any rio.warp.Resampling to use a different resampling in case \ `resample` is True, default is bilinear. @@ -1101,7 +1100,7 @@ def apply( if not self._fit_called and self._meta.get("matrix") is None: raise AssertionError(".fit() does not seem to have been called yet") - elev_array = _preprocess_coreg_apply(elev=elev, transform=transform, crs=crs) + elev_array, transform, crs = _preprocess_coreg_apply(elev=elev, transform=transform, crs=crs) main_args = {"dem": elev_array, "transform": transform, "crs": crs} @@ -1116,9 +1115,9 @@ def apply( main_args.update({"bias_vars": bias_vars}) - # See if a _apply_func exists + # See if a _apply_rst or _apply_pts exists try: - # arg `resample` must be passed to _apply_func, otherwise will be overwritten in CoregPipeline + # arg `resample` must be passed to _apply_rst, otherwise will be overwritten in CoregPipeline kwargs["resample"] = resample # Run the associated apply function @@ -1146,7 +1145,7 @@ def apply( ) out_transform = transform else: - raise ValueError("Coreg method is non-rigid but has no implemented _apply_func") + raise ValueError("Coreg method is non-rigid but has no implemented _apply_rst") # Ensure the dtype is OK applied_dem = applied_dem.astype("float32") @@ -1295,7 +1294,7 @@ def _fit_func( """ # Determine if input is raster-raster, raster-point or point-point - if all(isinstance(dem, NDArrayf) for dem in (kwargs["ref_dem"], kwargs["tba_dem"])): + if all(isinstance(dem, np.ndarray) for dem in (kwargs["ref_dem"], kwargs["tba_dem"])): rop = "r-r" elif all(isinstance(dem, gpd.GeoDataFrame) for dem in (kwargs["ref_dem"], kwargs["tba_dem"])): rop = "p-p" @@ -1353,6 +1352,14 @@ def _fit_func( else: raise NotImplementedError(f"No point-point method found for coregistration {self.__class__.__name__}.") + def _apply_func(self, **kwargs: Any): + """Distribute to _apply_rst and _apply_pts based on input and method availability""" + + if isinstance(kwargs["dem"], np.ndarray): + return self._apply_rst(**kwargs) + else: + return self._apply_pts(**kwargs) + def _fit_rst_rst(self, ref_dem: NDArrayf, tba_dem: NDArrayf, @@ -1395,7 +1402,7 @@ def _fit_pts_pts(self, # FOR DEVELOPERS: This function needs to be implemented. raise NotImplementedError("This step has to be implemented by subclassing.") - def _apply_func( + def _apply_rst( self, dem: NDArrayf, transform: rio.transform.Affine, @@ -1403,6 +1410,19 @@ def _apply_func( bias_vars: dict[str, NDArrayf] | None = None, **kwargs: Any, ) -> tuple[NDArrayf, rio.transform.Affine]: + + # FOR DEVELOPERS: This function is only needed for non-rigid transforms. + raise NotImplementedError("This should have been implemented by subclassing") + + def _apply_pts( + self, + dem: NDArrayf, + transform: rio.transform.Affine, + crs: rio.crs.CRS, + bias_vars: dict[str, NDArrayf] | None = None, + **kwargs: Any, + ) -> tuple[NDArrayf, rio.transform.Affine]: + # FOR DEVELOPERS: This function is only needed for non-rigid transforms. raise NotImplementedError("This should have been implemented by subclassing") @@ -1522,8 +1542,8 @@ def fit( print(f"Running pipeline step: {i + 1} / {len(self.pipeline)}") main_args_fit = { - "reference_dem": ref_dem, - "dem_to_be_aligned": tba_dem_mod, + "reference_elev": ref_dem, + "to_be_aligned_elev": tba_dem_mod, "inlier_mask": inlier_mask, "transform": out_transform, "crs": crs, @@ -1533,7 +1553,7 @@ def fit( "random_state": random_state, } - main_args_apply = {"dem": tba_dem_mod, "transform": out_transform, "crs": crs} + main_args_apply = {"elev": tba_dem_mod, "transform": out_transform, "crs": crs} # If non-affine method that expects a bias_vars argument if coreg._needs_vars: @@ -1551,7 +1571,8 @@ def fit( return self - def _apply_func( + # TODO: Override parent method into an "apply()"? + def _apply_rst( self, dem: NDArrayf, transform: rio.transform.Affine, @@ -1565,7 +1586,7 @@ def _apply_func( for i, coreg in enumerate(self.pipeline): - main_args_apply = {"dem": dem_mod, "transform": out_transform, "crs": crs} + main_args_apply = {"elev": dem_mod, "transform": out_transform, "crs": crs} # If non-affine method that expects a bias_vars argument if coreg._needs_vars: @@ -1886,7 +1907,7 @@ def to_points(self) -> NDArrayf: x_coord, y_coord = meta["representative_x"], meta["representative_y"] old_position = np.reshape([x_coord, y_coord, meta["representative_val"]], (1, 3)) - new_position = self.procstep.apply_pts(old_position) + new_position = self.procstep.apply(old_position) points = np.append(points, np.dstack((old_position, new_position)), axis=0) @@ -1945,7 +1966,7 @@ def subdivide_array(self, shape: tuple[int, ...]) -> NDArrayf: shape = (shape[1], shape[2]) return subdivide_array(shape, count=self.subdivision) - def _apply_func( + def _apply_rst( self, dem: NDArrayf, transform: rio.transform.Affine, @@ -1974,7 +1995,7 @@ def _apply_func( [bounds.right - resolution / 2, bounds.bottom + resolution / 2, representative_height], ] ) - edges_dest = self.apply_pts(edges_source) + edges_dest = self.apply(edges_source) edges = np.dstack((edges_source, edges_dest)) all_points = np.append(points, edges, axis=0) diff --git a/xdem/coreg/biascorr.py b/xdem/coreg/biascorr.py index d42303b5..4db34776 100644 --- a/xdem/coreg/biascorr.py +++ b/xdem/coreg/biascorr.py @@ -317,7 +317,7 @@ def _fit_rst_rst( # type: ignore elif self._fit_or_bin in ["bin", "bin_and_fit"]: self._meta["bin_dataframe"] = df - def _apply_func( # type: ignore + def _apply_rst( # type: ignore self, dem: NDArrayf, transform: rio.transform.Affine, # Never None thanks to Coreg.fit() pre-process @@ -668,7 +668,7 @@ def _fit_rst_rst( # type: ignore **kwargs, ) - def _apply_func( + def _apply_rst( self, dem: NDArrayf, transform: rio.transform.Affine, @@ -683,7 +683,7 @@ def _apply_func( along_track_angle=self._meta["angle"], ) - return super()._apply_func(dem=dem, transform=transform, crs=crs, bias_vars={"angle": x}, **kwargs) + return super()._apply_rst(dem=dem, transform=transform, crs=crs, bias_vars={"angle": x}, **kwargs) class TerrainBias(BiasCorr1D): @@ -774,7 +774,7 @@ def _fit_rst_rst( # type: ignore **kwargs, ) - def _apply_func( + def _apply_rst( self, dem: NDArrayf, transform: rio.transform.Affine, @@ -793,7 +793,7 @@ def _apply_func( ) bias_vars = {self._meta["terrain_attribute"]: attr} - return super()._apply_func(dem=dem, transform=transform, crs=crs, bias_vars=bias_vars, **kwargs) + return super()._apply_rst(dem=dem, transform=transform, crs=crs, bias_vars=bias_vars, **kwargs) class Deramp(BiasCorr2D): @@ -871,7 +871,7 @@ def _fit_rst_rst( # type: ignore **kwargs, ) - def _apply_func( + def _apply_rst( self, dem: NDArrayf, transform: rio.transform.Affine, @@ -883,4 +883,4 @@ def _apply_func( # Define the coordinates for applying the correction xx, yy = np.meshgrid(np.arange(0, dem.shape[1]), np.arange(0, dem.shape[0])) - return super()._apply_func(dem=dem, transform=transform, crs=crs, bias_vars={"xx": xx, "yy": yy}, **kwargs) + return super()._apply_rst(dem=dem, transform=transform, crs=crs, bias_vars={"xx": xx, "yy": yy}, **kwargs) From 19fa2da79e5f71c8e309a0fbcafa11614fb223f0 Mon Sep 17 00:00:00 2001 From: Romain Hugonnet Date: Fri, 1 Mar 2024 14:33:18 -0900 Subject: [PATCH 03/54] Incremental commit --- examples/basic/plot_icp_coregistration.py | 2 +- tests/test_coreg/test_affine.py | 5 +- tests/test_coreg/test_base.py | 10 +- xdem/coreg/__init__.py | 2 +- xdem/coreg/affine.py | 245 ++++++----- xdem/coreg/base.py | 482 +++++++++++++--------- xdem/coreg/biascorr.py | 86 ++-- 7 files changed, 485 insertions(+), 347 deletions(-) diff --git a/examples/basic/plot_icp_coregistration.py b/examples/basic/plot_icp_coregistration.py index a3496101..91145ccd 100644 --- a/examples/basic/plot_icp_coregistration.py +++ b/examples/basic/plot_icp_coregistration.py @@ -44,7 +44,7 @@ ) # This will apply the matrix along the center of the DEM -rotated_dem_data = xdem.coreg.apply_matrix(dem.data.squeeze(), transform=dem.transform, matrix=rotation_matrix) +rotated_dem_data = xdem.coreg.apply_matrix_rst(dem.data.squeeze(), transform=dem.transform, matrix=rotation_matrix) rotated_dem = xdem.DEM.from_array(rotated_dem_data, transform=dem.transform, crs=dem.crs, nodata=-9999) # %% diff --git a/tests/test_coreg/test_affine.py b/tests/test_coreg/test_affine.py index de6e095e..55d9cbdd 100644 --- a/tests/test_coreg/test_affine.py +++ b/tests/test_coreg/test_affine.py @@ -6,6 +6,7 @@ import numpy as np import pytest +import geopandas as gpd import rasterio as rio from geoutils import Raster, Vector from geoutils.raster import RasterType @@ -40,7 +41,9 @@ class TestAffineCoreg: verbose=False, ) # Create some 3D coordinates with Z coordinates being 0 to try the apply functions. - points = np.array([[1, 2, 3, 4], [1, 2, 3, 4], [0, 0, 0, 0]], dtype="float64").T + points_arr = np.array([[1, 2, 3, 4], [1, 2, 3, 4], [0, 0, 0, 0]], dtype="float64").T + points = gpd.GeoDataFrame(geometry=gpd.points_from_xy(x=points_arr[0, :], y=points_arr[1, :], crs=ref.crs), + data={"z": points_arr[2, :]}) def test_from_classmethods(self) -> None: warnings.simplefilter("error") diff --git a/tests/test_coreg/test_base.py b/tests/test_coreg/test_base.py index ef992788..f24987da 100644 --- a/tests/test_coreg/test_base.py +++ b/tests/test_coreg/test_base.py @@ -19,7 +19,7 @@ import xdem from xdem import coreg, examples, misc, spatialstats from xdem._typing import NDArrayf - from xdem.coreg.base import Coreg, apply_matrix + from xdem.coreg.base import Coreg, apply_matrix_rst def load_examples() -> tuple[RasterType, RasterType, Vector]: @@ -831,7 +831,7 @@ def test_apply_matrix() -> None: vshift = 5 matrix = np.diag(np.ones(4, float)) matrix[2, 3] = vshift - transformed_dem = apply_matrix(ref_arr, ref.transform, matrix) + transformed_dem = apply_matrix_rst(ref_arr, ref.transform, matrix) reverted_dem = transformed_dem - vshift # Check that the reverted DEM has the exact same values as the initial one @@ -850,7 +850,7 @@ def test_apply_matrix() -> None: matrix[0, 3] = pixel_shift * tba.res[0] matrix[2, 3] = -vshift - transformed_dem = apply_matrix(shifted_dem, ref.transform, matrix, resampling="bilinear") + transformed_dem = apply_matrix_rst(shifted_dem, ref.transform, matrix, resampling="bilinear") diff = np.asarray(ref_arr - transformed_dem) # Check that the median is very close to zero @@ -876,14 +876,14 @@ def rotation_matrix(rotation: float = 30) -> NDArrayf: np.mean([ref.bounds.top, ref.bounds.bottom]), ref.data.mean(), ) - rotated_dem = apply_matrix(ref.data.squeeze(), ref.transform, rotation_matrix(rotation), centroid=centroid) + rotated_dem = apply_matrix_rst(ref.data.squeeze(), ref.transform, rotation_matrix(rotation), centroid=centroid) # Make sure that the rotated DEM is way off, but is centered around the same approximate point. assert np.abs(np.nanmedian(rotated_dem - ref.data.data)) < 1 assert spatialstats.nmad(rotated_dem - ref.data.data) > 500 # Apply a rotation in the opposite direction unrotated_dem = ( - apply_matrix(rotated_dem, ref.transform, rotation_matrix(-rotation * 0.99), centroid=centroid) + 4.0 + apply_matrix_rst(rotated_dem, ref.transform, rotation_matrix(-rotation * 0.99), centroid=centroid) + 4.0 ) # TODO: Check why the 0.99 rotation and +4 vertical shift were introduced. diff = np.asarray(ref.data.squeeze() - unrotated_dem) diff --git a/xdem/coreg/__init__.py b/xdem/coreg/__init__.py index 06a0b014..7c630c3a 100644 --- a/xdem/coreg/__init__.py +++ b/xdem/coreg/__init__.py @@ -10,7 +10,7 @@ Tilt, VerticalShift, ) -from xdem.coreg.base import BlockwiseCoreg, Coreg, CoregPipeline, apply_matrix # noqa +from xdem.coreg.base import BlockwiseCoreg, Coreg, CoregPipeline, apply_matrix_rst # noqa from xdem.coreg.biascorr import ( # noqa BiasCorr, BiasCorr1D, diff --git a/xdem/coreg/affine.py b/xdem/coreg/affine.py index f7af4ac4..68dac6f1 100644 --- a/xdem/coreg/affine.py +++ b/xdem/coreg/affine.py @@ -13,6 +13,7 @@ _has_cv2 = False import numpy as np import pandas as pd +import geopandas as gpd import rasterio as rio import scipy import scipy.interpolate @@ -300,8 +301,8 @@ def _to_matrix_func(self) -> NDArrayf: def _fit_rst_rst( self, - ref_dem: NDArrayf, - tba_dem: NDArrayf, + ref_elev: NDArrayf, + tba_elev: NDArrayf, inlier_mask: NDArrayb, transform: rio.transform.Affine, crs: rio.crs.CRS, @@ -315,7 +316,7 @@ def _fit_rst_rst( def _apply_rst( self, - dem: NDArrayf, + elev: NDArrayf, transform: rio.transform.Affine, crs: rio.crs.CRS, bias_vars: dict[str, NDArrayf] | None = None, @@ -324,7 +325,14 @@ def _apply_rst( # FOR DEVELOPERS: This function is only needed for non-rigid transforms. raise NotImplementedError("This should have been implemented by subclassing") - def _apply_pts(self, coords: NDArrayf) -> NDArrayf: + def _apply_pts( + self, + elev: gpd.GeoDataFrame, + z_name: str = "z", + bias_vars: dict[str, NDArrayf] | None = None, + **kwargs: Any, + ) -> gpd.GeoDataFrame: + # FOR DEVELOPERS: This function is only needed for non-rigid transforms. raise NotImplementedError("This should have been implemented by subclassing") @@ -352,8 +360,8 @@ def __init__( def _fit_rst_rst( self, - ref_dem: NDArrayf, - tba_dem: NDArrayf, + ref_elev: NDArrayf, + tba_elev: NDArrayf, inlier_mask: NDArrayb, transform: rio.transform.Affine, crs: rio.crs.CRS, @@ -366,7 +374,7 @@ def _fit_rst_rst( if verbose: print("Estimating the vertical shift...") - diff = ref_dem - tba_dem + diff = ref_elev - tba_elev valid_mask = np.logical_and.reduce((inlier_mask, np.isfinite(diff))) subsample_mask = self._get_subsample_on_valid_mask(valid_mask=valid_mask) @@ -393,20 +401,27 @@ def _fit_rst_rst( def _apply_rst( self, - dem: NDArrayf, + elev: NDArrayf, transform: rio.transform.Affine, crs: rio.crs.CRS, bias_vars: dict[str, NDArrayf] | None = None, **kwargs: Any, ) -> tuple[NDArrayf, rio.transform.Affine]: """Apply the VerticalShift function to a DEM.""" - return dem + self._meta["vshift"], transform + return elev + self._meta["vshift"], transform + + def _apply_pts( + self, + elev: gpd.GeoDataFrame, + z_name: str = "z", + bias_vars: dict[str, NDArrayf] | None = None, + **kwargs: Any, + ) -> gpd.GeoDataFrame: - def _apply_pts(self, coords: NDArrayf) -> NDArrayf: """Apply the VerticalShift function to a set of points.""" - new_coords = coords.copy() - new_coords[:, 2] += self._meta["vshift"] - return new_coords + dem_copy = elev.copy() + dem_copy[z_name].values += self._meta["vshift"] + return dem_copy def _to_matrix_func(self) -> NDArrayf: """Convert the vertical shift to a transform matrix.""" @@ -458,8 +473,8 @@ def __init__( def _fit_rst_rst( self, - ref_dem: NDArrayf, - tba_dem: NDArrayf, + ref_elev: NDArrayf, + tba_elev: NDArrayf, inlier_mask: NDArrayb, transform: rio.transform.Affine, crs: rio.crs.CRS, @@ -473,17 +488,17 @@ def _fit_rst_rst( if weights is not None: warnings.warn("ICP was given weights, but does not support it.") - bounds, resolution = _transform_to_bounds_and_res(ref_dem.shape, transform) + bounds, resolution = _transform_to_bounds_and_res(ref_elev.shape, transform) # Generate the x and y coordinates for the reference_dem - x_coords, y_coords = _get_x_and_y_coords(ref_dem.shape, transform) - gradient_x, gradient_y = np.gradient(ref_dem) + x_coords, y_coords = _get_x_and_y_coords(ref_elev.shape, transform) + gradient_x, gradient_y = np.gradient(ref_elev) normal_east = np.sin(np.arctan(gradient_y / resolution)) * -1 normal_north = np.sin(np.arctan(gradient_x / resolution)) normal_up = 1 - np.linalg.norm([normal_east, normal_north], axis=0) valid_mask = np.logical_and.reduce( - (inlier_mask, np.isfinite(ref_dem), np.isfinite(normal_east), np.isfinite(normal_north)) + (inlier_mask, np.isfinite(ref_elev), np.isfinite(normal_east), np.isfinite(normal_north)) ) subsample_mask = self._get_subsample_on_valid_mask(valid_mask=valid_mask) @@ -492,7 +507,7 @@ def _fit_rst_rst( [ x_coords[subsample_mask], y_coords[subsample_mask], - ref_dem[subsample_mask], + ref_elev[subsample_mask], normal_east[subsample_mask], normal_north[subsample_mask], normal_up[subsample_mask], @@ -501,34 +516,34 @@ def _fit_rst_rst( columns=["E", "N", "z", "nx", "ny", "nz"], ) - self._fit_rst_pts(ref_dem=ref_pts, tba_dem=tba_dem, transform=transform, verbose=verbose, z_name="z") + self._fit_rst_pts(ref_elev=ref_pts, tba_elev=tba_elev, transform=transform, verbose=verbose, z_name="z") def _fit_rst_pts( self, - ref_dem: pd.DataFrame, - tba_dem: RasterType | NDArrayf, + ref_elev: pd.DataFrame, + tba_elev: RasterType | NDArrayf, transform: rio.transform.Affine | None, verbose: bool = False, z_name: str = "z", **kwargs: Any, ) -> None: - if transform is None and hasattr(tba_dem, "transform"): - transform = tba_dem.transform # type: ignore - if hasattr(tba_dem, "transform"): - tba_dem = tba_dem.data + if transform is None and hasattr(tba_elev, "transform"): + transform = tba_elev.transform # type: ignore + if hasattr(tba_elev, "transform"): + tba_elev = tba_elev.data - ref_dem = ref_dem.dropna(how="any", subset=["E", "N", z_name]) - bounds, resolution = _transform_to_bounds_and_res(tba_dem.shape, transform) + ref_elev = ref_elev.dropna(how="any", subset=["E", "N", z_name]) + bounds, resolution = _transform_to_bounds_and_res(tba_elev.shape, transform) points: dict[str, NDArrayf] = {} # Generate the x and y coordinates for the TBA DEM - x_coords, y_coords = _get_x_and_y_coords(tba_dem.shape, transform) + x_coords, y_coords = _get_x_and_y_coords(tba_elev.shape, transform) centroid = (np.mean([bounds.left, bounds.right]), np.mean([bounds.bottom, bounds.top]), 0.0) # Subtract by the bounding coordinates to avoid float32 rounding errors. x_coords -= centroid[0] y_coords -= centroid[1] - gradient_x, gradient_y = np.gradient(tba_dem) + gradient_x, gradient_y = np.gradient(tba_elev) # This CRS is temporary and doesn't affect the result. It's just needed for Raster instantiation. dem_kwargs = {"transform": transform, "crs": rio.CRS.from_epsg(32633), "nodata": -9999.0} @@ -536,30 +551,30 @@ def _fit_rst_pts( normal_north = Raster.from_array(np.sin(np.arctan(gradient_x / resolution)), **dem_kwargs) normal_up = Raster.from_array(1 - np.linalg.norm([normal_east.data, normal_north.data], axis=0), **dem_kwargs) - valid_mask = ~np.isnan(tba_dem) & ~np.isnan(normal_east.data) & ~np.isnan(normal_north.data) + valid_mask = ~np.isnan(tba_elev) & ~np.isnan(normal_east.data) & ~np.isnan(normal_north.data) points["tba"] = np.dstack( [ x_coords[valid_mask], y_coords[valid_mask], - tba_dem[valid_mask], + tba_elev[valid_mask], normal_east.data[valid_mask], normal_north.data[valid_mask], normal_up.data[valid_mask], ] ).squeeze() - if any(col not in ref_dem for col in ["nx", "ny", "nz"]): + if any(col not in ref_elev for col in ["nx", "ny", "nz"]): for key, raster in [("nx", normal_east), ("ny", normal_north), ("nz", normal_up)]: raster.tags["AREA_OR_POINT"] = "Area" - ref_dem[key] = raster.interp_points( - ref_dem[["E", "N"]].values, shift_area_or_point=True, mode="nearest" + ref_elev[key] = raster.interp_points( + ref_elev[["E", "N"]].values, shift_area_or_point=True, mode="nearest" ) - ref_dem["E"] -= centroid[0] - ref_dem["N"] -= centroid[1] + ref_elev["E"] -= centroid[0] + ref_elev["N"] -= centroid[1] - points["ref"] = ref_dem[["E", "N", z_name, "nx", "ny", "nz"]].values + points["ref"] = ref_elev[["E", "N", z_name, "nx", "ny", "nz"]].values for key in points: points[key] = points[key][~np.any(np.isnan(points[key]), axis=1)].astype("float32") @@ -608,8 +623,8 @@ def __init__(self, subsample: int | float = 5e5) -> None: def _fit_rst_rst( self, - ref_dem: NDArrayf, - tba_dem: NDArrayf, + ref_elev: NDArrayf, + tba_elev: NDArrayf, inlier_mask: NDArrayb, transform: rio.transform.Affine, crs: rio.crs.CRS, @@ -619,9 +634,9 @@ def _fit_rst_rst( **kwargs: Any, ) -> None: """Fit the dDEM between the DEMs to a least squares polynomial equation.""" - ddem = ref_dem - tba_dem + ddem = ref_elev - tba_elev ddem[~inlier_mask] = np.nan - x_coords, y_coords = _get_x_and_y_coords(ref_dem.shape, transform) + x_coords, y_coords = _get_x_and_y_coords(ref_elev.shape, transform) fit_ramp, coefs = deramping( ddem, x_coords, y_coords, degree=self.poly_order, subsample=self._meta["subsample"], verbose=verbose ) @@ -631,26 +646,31 @@ def _fit_rst_rst( def _apply_rst( self, - dem: NDArrayf, + elev: NDArrayf, transform: rio.transform.Affine, crs: rio.crs.CRS, bias_vars: dict[str, NDArrayf] | None = None, **kwargs: Any, ) -> tuple[NDArrayf, rio.transform.Affine]: """Apply the deramp function to a DEM.""" - x_coords, y_coords = _get_x_and_y_coords(dem.shape, transform) + x_coords, y_coords = _get_x_and_y_coords(elev.shape, transform) ramp = self._meta["func"](x_coords, y_coords) - return dem + ramp, transform + return elev + ramp, transform - def _apply_pts(self, coords: NDArrayf) -> NDArrayf: + def _apply_pts( + self, + elev: gpd.GeoDataFrame, + z_name: str = "z", + bias_vars: dict[str, NDArrayf] | None = None, + **kwargs: Any, + ) -> gpd.GeoDataFrame: """Apply the deramp function to a set of points.""" - new_coords = coords.copy() + dem_copy = elev.copy() + dem_copy[z_name].values += self._meta["func"](dem_copy.geometry.x.values, dem_copy.geometry.y.values) - new_coords[:, 2] += self._meta["func"](new_coords[:, 0], new_coords[:, 1]) - - return new_coords + return dem_copy def _to_matrix_func(self) -> NDArrayf: """Return a transform matrix if possible.""" @@ -694,8 +714,8 @@ def __init__(self, max_iterations: int = 10, offset_threshold: float = 0.05, sub def _fit_rst_rst( self, - ref_dem: NDArrayf, - tba_dem: NDArrayf, + ref_elev: NDArrayf, + tba_elev: NDArrayf, inlier_mask: NDArrayb, transform: rio.transform.Affine, crs: rio.crs.CRS, @@ -708,9 +728,9 @@ def _fit_rst_rst( if verbose: print("Running Nuth and Kääb (2011) coregistration") - bounds, resolution = _transform_to_bounds_and_res(ref_dem.shape, transform) + bounds, resolution = _transform_to_bounds_and_res(ref_elev.shape, transform) # Make a new DEM which will be modified inplace - aligned_dem = tba_dem.copy() + aligned_dem = tba_elev.copy() # Check that DEM CRS is projected, otherwise slope is not correctly calculated if not crs.is_projected: @@ -723,18 +743,18 @@ def _fit_rst_rst( if verbose: print(" Calculate slope and aspect") - slope_tan, aspect = _calculate_slope_and_aspect_nuthkaab(ref_dem) + slope_tan, aspect = _calculate_slope_and_aspect_nuthkaab(ref_elev) valid_mask = np.logical_and.reduce( - (inlier_mask, np.isfinite(ref_dem), np.isfinite(tba_dem), np.isfinite(slope_tan)) + (inlier_mask, np.isfinite(ref_elev), np.isfinite(tba_elev), np.isfinite(slope_tan)) ) subsample_mask = self._get_subsample_on_valid_mask(valid_mask=valid_mask) - ref_dem[~subsample_mask] = np.nan + ref_elev[~subsample_mask] = np.nan # Make index grids for the east and north dimensions - east_grid = np.arange(ref_dem.shape[1]) - north_grid = np.arange(ref_dem.shape[0]) + east_grid = np.arange(ref_elev.shape[1]) + north_grid = np.arange(ref_elev.shape[0]) # Make a function to estimate the aligned DEM (used to construct an offset DEM) elevation_function = scipy.interpolate.RectBivariateSpline( @@ -751,7 +771,7 @@ def _fit_rst_rst( offset_east, offset_north = 0.0, 0.0 # Calculate initial dDEM statistics - elevation_difference = ref_dem - aligned_dem + elevation_difference = ref_elev - aligned_dem vshift = np.nanmedian(elevation_difference) nmad_old = nmad(elevation_difference) @@ -769,7 +789,7 @@ def _fit_rst_rst( for i in pbar: # Calculate the elevation difference and the residual (NMAD) between them. - elevation_difference = ref_dem - aligned_dem + elevation_difference = ref_elev - aligned_dem vshift = np.nanmedian(elevation_difference) # Correct potential vertical shifts elevation_difference -= vshift @@ -796,7 +816,7 @@ def _fit_rst_rst( aligned_dem = new_elevation # Update statistics - elevation_difference = ref_dem - aligned_dem + elevation_difference = ref_elev - aligned_dem vshift = np.nanmedian(elevation_difference) nmad_new = nmad(elevation_difference) @@ -832,8 +852,8 @@ def _fit_rst_rst( def _fit_rst_pts( self, - ref_dem: pd.DataFrame, - tba_dem: RasterType, + ref_elev: pd.DataFrame, + tba_elev: RasterType, transform: rio.transform.Affine | None, weights: NDArrayf | None, verbose: bool = False, @@ -852,10 +872,10 @@ def _fit_rst_pts( if verbose: print("Running Nuth and Kääb (2011) coregistration. Shift pts instead of shifting dem") - tba_arr, _ = get_array_and_mask(tba_dem) + tba_arr, _ = get_array_and_mask(tba_elev) - resolution = tba_dem.res[0] - x_coords, y_coords = (ref_dem["E"].values, ref_dem["N"].values) + resolution = tba_elev.res[0] + x_coords, y_coords = (ref_elev["E"].values, ref_elev["N"].values) # Assume that the coordinates represent the center of a theoretical pixel. # The raster sampling is done in the upper left corner, meaning all point have to be respectively shifted @@ -866,7 +886,7 @@ def _fit_rst_pts( # This needs to be consistent, so it's cardcoded here area_or_point = "Area" # Make a new DEM which will be modified inplace - aligned_dem = tba_dem.copy() + aligned_dem = tba_elev.copy() aligned_dem.tags["AREA_OR_POINT"] = area_or_point # Calculate slope and aspect maps from the reference DEM @@ -874,9 +894,9 @@ def _fit_rst_pts( print(" Calculate slope and aspect") slope, aspect = _calculate_slope_and_aspect_nuthkaab(tba_arr) - slope_r = tba_dem.copy(new_array=np.ma.masked_array(slope[None, :, :], mask=~np.isfinite(slope[None, :, :]))) + slope_r = tba_elev.copy(new_array=np.ma.masked_array(slope[None, :, :], mask=~np.isfinite(slope[None, :, :]))) slope_r.tags["AREA_OR_POINT"] = area_or_point - aspect_r = tba_dem.copy(new_array=np.ma.masked_array(aspect[None, :, :], mask=~np.isfinite(aspect[None, :, :]))) + aspect_r = tba_elev.copy(new_array=np.ma.masked_array(aspect[None, :, :], mask=~np.isfinite(aspect[None, :, :]))) aspect_r.tags["AREA_OR_POINT"] = area_or_point # Initialise east and north pixel offset variables (these will be incremented up and down) @@ -890,7 +910,7 @@ def _fit_rst_pts( # Treat new_pts as a window, every time we shift it a little bit to fit the correct view new_pts = pts.copy() - elevation_difference = ref_dem[z_name].values - tba_pts + elevation_difference = ref_elev[z_name].values - tba_pts vshift = float(np.nanmedian(elevation_difference)) nmad_old = nmad(elevation_difference) @@ -923,10 +943,10 @@ def _fit_rst_pts( # Get new values tba_pts = aligned_dem.interp_points(new_pts, mode="nearest", shift_area_or_point=True) - elevation_difference = ref_dem[z_name].values - tba_pts + elevation_difference = ref_elev[z_name].values - tba_pts # Mask out no data by dem's mask - pts_, mask_ = _mask_dataframe_by_dem(new_pts, tba_dem) + pts_, mask_ = _mask_dataframe_by_dem(new_pts, tba_elev) # Update values relataed to shifted pts elevation_difference = elevation_difference[mask_] @@ -985,7 +1005,7 @@ def _to_matrix_func(self) -> NDArrayf: def _apply_rst( self, - dem: NDArrayf, + elev: NDArrayf, transform: rio.transform.Affine, crs: rio.crs.CRS, bias_vars: dict[str, NDArrayf] | None = None, @@ -997,19 +1017,26 @@ def _apply_rst( updated_transform = apply_xy_shift(transform, -offset_east, -offset_north) vshift = self._meta["vshift"] - return dem + vshift, updated_transform + return elev + vshift, updated_transform + + def _apply_pts( + self, + elev: gpd.GeoDataFrame, + z_name: str = "z", + bias_vars: dict[str, NDArrayf] | None = None, + **kwargs: Any, + ) -> gpd.GeoDataFrame: - def _apply_pts(self, coords: NDArrayf) -> NDArrayf: """Apply the Nuth & Kaab shift to a set of points.""" offset_east = self._meta["offset_east_px"] * self._meta["resolution"] offset_north = self._meta["offset_north_px"] * self._meta["resolution"] - new_coords = coords.copy() - new_coords[:, 0] += offset_east - new_coords[:, 1] += offset_north - new_coords[:, 2] += self._meta["vshift"] + dem_copy = elev.copy() + dem_copy.geometry.x.values += offset_east + dem_copy.geometry.y.values += offset_north + dem_copy[z_name].values += self._meta["vshift"] - return new_coords + return dem_copy class GradientDescending(AffineCoreg): @@ -1051,8 +1078,8 @@ def __init__( def _fit_rst_pts( self, - ref_dem: pd.DataFrame, - tba_dem: RasterType, + ref_elev: pd.DataFrame, + tba_elev: RasterType, verbose: bool = False, z_name: str = "z", weights: str | None = None, @@ -1060,8 +1087,8 @@ def _fit_rst_pts( **kwargs: Any, ) -> None: """Estimate the x/y/z offset between two DEMs. - :param ref_dem: the dataframe used as ref - :param tba_dem: the dem to be aligned + :param ref_elev: the dataframe used as ref + :param tba_elev: the dem to be aligned :param z_name: the column name of dataframe used for elevation differencing :param weights: the column name of dataframe used for weight, should have the same length with z_name columns :param random_state: The random state of the subsampling. @@ -1070,27 +1097,27 @@ def _fit_rst_pts( raise ValueError("Optional dependency needed. Install 'noisyopt'") # Perform downsampling if subsample != None - if self._meta["subsample"] and len(ref_dem) > self._meta["subsample"]: - ref_dem = ref_dem.sample(frac=self._meta["subsample"] / len(ref_dem), random_state=random_state).copy() + if self._meta["subsample"] and len(ref_elev) > self._meta["subsample"]: + ref_elev = ref_elev.sample(frac=self._meta["subsample"] / len(ref_elev), random_state=random_state).copy() else: - ref_dem = ref_dem.copy() + ref_elev = ref_elev.copy() - resolution = tba_dem.res[0] + resolution = tba_elev.res[0] # Assume that the coordinates represent the center of a theoretical pixel. # The raster sampling is done in the upper left corner, meaning all point have to be respectively shifted - ref_dem["E"] -= resolution / 2 - ref_dem["N"] += resolution / 2 + ref_elev["E"] -= resolution / 2 + ref_elev["N"] += resolution / 2 area_or_point = "Area" - old_aop = tba_dem.tags.get("AREA_OR_POINT", None) - tba_dem.tags["AREA_OR_POINT"] = area_or_point + old_aop = tba_elev.tags.get("AREA_OR_POINT", None) + tba_elev.tags["AREA_OR_POINT"] = area_or_point if verbose: print("Running Gradient Descending Coreg - Zhihao (in preparation) ") if self._meta["subsample"]: - print("Running on downsampling. The length of the gdf:", len(ref_dem)) + print("Running on downsampling. The length of the gdf:", len(ref_elev)) - elevation_difference = _residuals_df(tba_dem, ref_dem, (0, 0), 0, z_name=z_name) + elevation_difference = _residuals_df(tba_elev, ref_elev, (0, 0), 0, z_name=z_name) nmad_old = nmad(elevation_difference) vshift = np.nanmedian(elevation_difference) print(" Statistics on initial dh:") @@ -1098,7 +1125,7 @@ def _fit_rst_pts( # start iteration, find the best shifting px def func_cost(x: tuple[float, float]) -> np.floating[Any]: - return nmad(_residuals_df(tba_dem, ref_dem, x, 0, z_name=z_name, weight=weights)) + return nmad(_residuals_df(tba_elev, ref_elev, x, 0, z_name=z_name, weight=weights)) res = minimizeCompass( func_cost, @@ -1112,12 +1139,12 @@ def func_cost(x: tuple[float, float]) -> np.floating[Any]: ) # Send the best solution to find all results - elevation_difference = _residuals_df(tba_dem, ref_dem, (res.x[0], res.x[1]), 0, z_name=z_name) + elevation_difference = _residuals_df(tba_elev, ref_elev, (res.x[0], res.x[1]), 0, z_name=z_name) if old_aop is None: - del tba_dem.tags["AREA_OR_POINT"] + del tba_elev.tags["AREA_OR_POINT"] else: - tba_dem.tags["AREA_OR_POINT"] = old_aop + tba_elev.tags["AREA_OR_POINT"] = old_aop # results statistics vshift = np.nanmedian(elevation_difference) @@ -1137,8 +1164,8 @@ def func_cost(x: tuple[float, float]) -> np.floating[Any]: def _fit_rst_rst( self, - ref_dem: NDArrayf, - tba_dem: NDArrayf, + ref_elev: NDArrayf, + tba_elev: NDArrayf, inlier_mask: NDArrayb, transform: rio.transform.Affine, crs: rio.crs.CRS, @@ -1148,16 +1175,16 @@ def _fit_rst_rst( **kwargs: Any, ) -> None: - ref_dem = ( - Raster.from_array(ref_dem, transform=transform, crs=crs, nodata=-9999.0) + ref_elev = ( + Raster.from_array(ref_elev, transform=transform, crs=crs, nodata=-9999.0) .to_points(as_array=False, pixel_offset="center") .ds ) - ref_dem["E"] = ref_dem.geometry.x - ref_dem["N"] = ref_dem.geometry.y - ref_dem.rename(columns={"b1": "z"}, inplace=True) - tba_dem = Raster.from_array(tba_dem, transform=transform, crs=crs, nodata=-9999.0) - self._fit_rst_pts(ref_dem=ref_dem, tba_dem=tba_dem, transform=transform, **kwargs) + ref_elev["E"] = ref_elev.geometry.x + ref_elev["N"] = ref_elev.geometry.y + ref_elev.rename(columns={"b1": "z"}, inplace=True) + tba_elev = Raster.from_array(tba_elev, transform=transform, crs=crs, nodata=-9999.0) + self._fit_rst_pts(ref_elev=ref_elev, tba_elev=tba_elev, transform=transform, **kwargs) def _to_matrix_func(self) -> NDArrayf: """Return a transformation matrix from the estimated offsets.""" diff --git a/xdem/coreg/base.py b/xdem/coreg/base.py index ff33e401..aee10c93 100644 --- a/xdem/coreg/base.py +++ b/xdem/coreg/base.py @@ -46,6 +46,7 @@ subdivide_array, subsample_array, ) +from geoutils.misc import resampling_method_from_str from tqdm import tqdm from xdem._typing import MArrayf, NDArrayb, NDArrayf @@ -497,7 +498,76 @@ def _preprocess_coreg_apply( return elev_out, transform, crs -# TODO: Re-structure AffineCoreg apply function and move there? +def _postprocess_coreg_apply_pts( + applied_elev: gpd.GeoDataFrame, +) -> gpd.GeoDataFrame: + # TODO: Convert CRS back if the CRS did not match the one of the fit? + return applied_elev + +def _postprocess_coreg_apply_rst( + elev: NDArrayf | gu.Raster, + applied_elev: NDArrayf, + transform: affine.Affine, + out_transform: affine.Affine, + crs: rio.crs.CRS, + resample: bool, + resampling: rio.warp.Resampling | None = None, +) -> tuple[NDArrayf | gu.Raster, affine.Affine]: + # Ensure the dtype is OK + applied_elev = applied_elev.astype("float32") + + # Set default dst_nodata + if isinstance(elev, gu.Raster): + nodata = elev.nodata + else: + nodata = raster._default_nodata(elev.dtype) + + # Resample the array on the original grid + if resample: + applied_rst = gu.Raster.from_array(applied_elev, out_transform, crs=crs, nodata=nodata) + if not isinstance(elev, gu.Raster): + match_rst = gu.Raster.from_array(elev, transform, crs=crs, nodata=nodata) + else: + match_rst = elev + applied_rst.reproject(match_rst, resampling=resampling) + applied_dem = applied_rst.data + + # Calculate final mask + final_mask = np.logical_or(~np.isfinite(applied_dem), applied_dem == nodata) + + # If the DEM was a masked_array, copy the mask to the new DEM + if isinstance(elev, (np.ma.masked_array, gu.Raster)): + applied_dem = np.ma.masked_array(applied_dem, mask=final_mask) # type: ignore + else: + applied_dem[final_mask] = np.nan + + # If the input was a Raster, returns a Raster, else returns array and transform + if isinstance(elev, gu.Raster): + out_dem = elev.from_array(applied_dem, out_transform, crs, nodata=elev.nodata) + return out_dem, out_transform + else: + return applied_dem, out_transform + +def _postprocess_coreg_apply( + elev: NDArrayf | gu.Raster | gpd.GeoDataFrame, + applied_elev: NDArrayf | gpd.GeoDataFrame, + transform: affine.Affine, + out_transform: affine.Affine, + crs: rio.crs.CRS, + resample: bool, + resampling: rio.warp.Resampling | None = None, +) -> tuple[NDArrayf | gpd.GeoDataFrame, affine.Affine]: + + if isinstance(applied_elev, np.ndarray): + applied_elev, out_transform = _postprocess_coreg_apply_rst(elev=elev, applied_elev=applied_elev, + transform=transform, crs=crs, + out_transform=out_transform, + resample=resample, + resampling=resampling) + else: + applied_elev = _postprocess_coreg_apply_pts(applied_elev) + + return applied_elev, out_transform def deramping( @@ -605,7 +675,7 @@ def invert_matrix(matrix: NDArrayf) -> NDArrayf: return pytransform3d.transformations.invert_transform(checked_matrix) -def apply_matrix( +def apply_matrix_rst( dem: NDArrayf, transform: rio.transform.Affine, matrix: NDArrayf, @@ -615,7 +685,7 @@ def apply_matrix( fill_max_search: int = 0, ) -> NDArrayf: """ - Apply a 3D transformation matrix to a 2.5D DEM. + Apply a 3D affine transformation matrix to a 2.5D DEM. The transformation is applied as a value correction using linear deramping, and 2D image warping. @@ -627,17 +697,17 @@ def apply_matrix( 6. Apply the pixel-wise displacement in 2D using the new pixel coordinates. 7. Apply the same displacement to a nodata-mask to exclude previous and/or new nans. - :param dem: The DEM to transform. - :param transform: The Affine transform object (georeferencing) of the DEM. - :param matrix: A 4x4 transformation matrix to apply to the DEM. - :param invert: Invert the transformation matrix. - :param centroid: The X/Y/Z transformation centroid. Irrelevant for pure translations. Defaults to the midpoint (Z=0) + :param dem: DEM to transform. + :param transform: Geotransform of the DEM. + :param matrix: Affine (4x4) transformation matrix to apply to the DEM. + :param invert: Whether to invert the transformation matrix. + :param centroid: The X/Y/Z transformation centroid. Irrelevant for pure translations. Defaults to the midpoint (Z=0). :param resampling: The resampling method to use. Can be `nearest`, `bilinear`, `cubic` or an integer from 0-5. :param fill_max_search: Set to > 0 value to fill the DEM before applying the transformation, to avoid spreading\ gaps. The DEM will be filled with rasterio.fill.fillnodata with max_search_distance set to fill_max_search.\ This is experimental, use at your own risk ! - :returns: The transformed DEM with NaNs as nodata values (replaces a potential mask of the input `dem`). + :returns: Transformed DEM with NaNs as nodata values (replaces a potential mask of the input `dem`). """ # Parse the resampling argument given. if isinstance(resampling, (int, np.integer)): @@ -743,6 +813,47 @@ def apply_matrix( return transformed_dem +def apply_matrix_pts( + epc: gpd.GeoDataFrame, + matrix: NDArrayf, + invert: bool = False, + centroid: tuple[float, float, float] | None = None, + z_name: str = "z", +) -> gpd.GeoDataFrame: + """ + Apply a 3D affine transformation matrix to a 3D elevation point cloud. + + :param epc: Elevation point cloud. + :param matrix: Affine (4x4) transformation matrix to apply to the DEM. + :param invert: Whether to invert the transformation matrix. + :param centroid: The X/Y/Z transformation centroid. Irrelevant for pure translations. Defaults to the midpoint (Z=0). + :param z_name: + + :return: Transformed elevation point cloud. + """ + + # Invert matrix if required + if invert: + matrix = invert_matrix(matrix) + + # First, get Nx3 array to pass to opencv + points = np.array([epc.geometry.x.values, epc.geometry.y.values, epc[z_name].values]) + + # Transform the points (around the centroid if it exists). + if centroid is not None: + points -= centroid + transformed_points = cv2.perspectiveTransform(points.reshape(1, -1, 3), + matrix.squeeze()) + if centroid is not None: + transformed_points += centroid + + # Finally, transform back to a new GeoDataFrame + transformed_epc = epc.copy() + transformed_epc.geometry.x.values = points[0] + transformed_epc.geometry.y.values = points[1] + transformed_epc[z_name].values = points[2] + + return transformed_epc ########################################### # Generic coregistration processing classes @@ -775,6 +886,7 @@ class CoregDict(TypedDict, total=False): # Affine + BiasCorr classes subsample: int | float + subsample_final: int random_state: np.random.RandomState | np.random.Generator | int | None # BiasCorr classes generic metadata @@ -887,6 +999,9 @@ def _get_subsample_on_valid_mask(self, valid_mask: NDArrayb, verbose: bool = Fal ) ) + # Write final subsample to class + self._meta["subsample_final"] = np.count_nonzero(subsample_mask) + return subsample_mask def fit( @@ -959,8 +1074,8 @@ def fit( ) main_args = { - "ref_dem": ref_dem, - "tba_dem": tba_dem, + "ref_elev": ref_dem, + "tba_elev": tba_dem, "inlier_mask": inlier_mask, "transform": transform, "crs": crs, @@ -979,7 +1094,8 @@ def fit( main_args.update({"bias_vars": bias_vars}) - # Run the associated fitting function + # Run the associated fitting function, which has fallback logic for "raster-raster", "raster-point" or + # "point-point" depending on what is available for a certain Coreg function self._fit_func( **main_args, **kwargs, @@ -990,53 +1106,6 @@ def fit( return self - def residuals( - self, - reference_elev: NDArrayf, - to_be_aligned_elev: NDArrayf, - inlier_mask: NDArrayb | None = None, - transform: rio.transform.Affine | None = None, - crs: rio.crs.CRS | None = None, - subsample: float | int = 1.0, - random_state: None | np.random.RandomState | np.random.Generator | int = None, - ) -> NDArrayf: - """ - Calculate the residual offsets (the difference) between two DEMs after applying the transformation. - - :param reference_elev: 2D array of elevation values acting reference. - :param to_be_aligned_elev: 2D array of elevation values to be aligned. - :param inlier_mask: Optional. 2D boolean array of areas to include in the analysis (inliers=True). - :param transform: Optional. Transform of the reference_dem. Mandatory in some cases. - :param crs: Optional. CRS of the reference_dem. Mandatory in some cases. - :param subsample: Subsample the input to increase performance. <1 is parsed as a fraction. >1 is a pixel count. - :param random_state: Random state or seed number to use for calculations (to fix random sampling during testing) - - :returns: A 1D array of finite residuals. - """ - - # Apply the transformation to the dem to be aligned - aligned_dem = self.apply(to_be_aligned_elev, transform=transform, crs=crs)[0] - - # Pre-process the inputs, by reprojecting and subsampling - ref_dem, align_dem, inlier_mask, transform, crs = _preprocess_coreg_fit( - reference_elev=reference_elev, - to_be_aligned_elev=to_be_aligned_elev, - inlier_mask=inlier_mask, - transform=transform, - crs=crs, - ) - - # Calculate the DEM difference - diff = ref_dem - align_dem - - # Sometimes, the float minimum (for float32 = -3.4028235e+38) is returned. This and inf should be excluded. - full_mask = np.isfinite(diff) - if "float" in str(diff.dtype): - full_mask[(diff == np.finfo(diff.dtype).min) | np.isinf(diff)] = False - - # Return the difference values within the full inlier mask - return diff[full_mask] - @overload def apply( self, @@ -1045,6 +1114,7 @@ def apply( resample: bool = True, transform: rio.transform.Affine | None = None, crs: rio.crs.CRS | None = None, + z_name: str = "z", **kwargs: Any, ) -> tuple[MArrayf, rio.transform.Affine]: ... @@ -1057,6 +1127,7 @@ def apply( resample: bool = True, transform: rio.transform.Affine | None = None, crs: rio.crs.CRS | None = None, + z_name: str = "z", **kwargs: Any, ) -> tuple[NDArrayf, rio.transform.Affine]: ... @@ -1069,6 +1140,7 @@ def apply( resample: bool = True, transform: rio.transform.Affine | None = None, crs: rio.crs.CRS | None = None, + z_name: str = "z", **kwargs: Any, ) -> RasterType | gpd.GeoDataFrame: ... @@ -1078,8 +1150,10 @@ def apply( elev: RasterType | NDArrayf | MArrayf | gpd.GeoDataFrame, bias_vars: dict[str, NDArrayf | MArrayf | RasterType] | None = None, resample: bool = True, + resampling: str | rio.warp.Resampling = "bilinear", transform: rio.transform.Affine | None = None, crs: rio.crs.CRS | None = None, + z_name: str = "z", **kwargs: Any, ) -> RasterType | tuple[NDArrayf, rio.transform.Affine] | tuple[MArrayf, rio.transform.Affine]: """ @@ -1089,11 +1163,11 @@ def apply( :param bias_vars: Only for some bias correction classes. 2D array of bias variables used. :param resample: If set to True, will reproject output Raster on the same grid as input. Otherwise, \ only the transform might be updated and no resampling is done. + :param resampling: Resampling method if resample is used. Defaults to "bilinear". :param transform: Geotransform of the elevation, only if provided as 2D array. :param crs: CRS of elevation, only if provided as 2D array. + :param z_name: Column name to use as elevation, only for point elevation data passed as geodataframe. :param kwargs: Any optional arguments to be passed to either self._apply_rst or apply_matrix. - Kwarg `resampling` can be set to any rio.warp.Resampling to use a different resampling in case \ - `resample` is True, default is bilinear. :returns: The transformed DEM. """ @@ -1102,7 +1176,7 @@ def apply( elev_array, transform, crs = _preprocess_coreg_apply(elev=elev, transform=transform, crs=crs) - main_args = {"dem": elev_array, "transform": transform, "crs": crs} + main_args = {"elev": elev_array, "transform": transform, "crs": crs, "resample": resample, "z_name": z_name} # If bias_vars are defined, update dictionary content to array if bias_vars is not None: @@ -1115,80 +1189,69 @@ def apply( main_args.update({"bias_vars": bias_vars}) - # See if a _apply_rst or _apply_pts exists - try: - # arg `resample` must be passed to _apply_rst, otherwise will be overwritten in CoregPipeline - kwargs["resample"] = resample - - # Run the associated apply function - applied_dem, out_transform = self._apply_func( - **main_args, **kwargs - ) # pylint: disable=assignment-from-no-return - - # If it doesn't exist, use apply_matrix() - except NotImplementedError: - - # In this case, resampling is necessary - if not resample: - raise NotImplementedError(f"Option `resample=False` not implemented for coreg method {self.__class__}") - kwargs.pop("resample") # Need to removed before passing to apply_matrix - - if self.is_affine: # This only works on it's affine, however. - - # Apply the matrix around the centroid (if defined, otherwise just from the center). - applied_dem = apply_matrix( - elev, - transform=transform, - matrix=self.to_matrix(), - centroid=self._meta.get("centroid"), - **kwargs, - ) - out_transform = transform - else: - raise ValueError("Coreg method is non-rigid but has no implemented _apply_rst") + # Call _apply_func to choose method depending on point/raster input and if specific apply method exists + applied_elev, out_transform = self._apply_func(**main_args, **kwargs) - # Ensure the dtype is OK - applied_dem = applied_dem.astype("float32") + # Define resampling + resampling = resampling if isinstance(resampling, rio.warp.Resampling) else resampling_method_from_str(resampling) - # Set default dst_nodata - if isinstance(elev, gu.Raster): - dst_nodata = elev.nodata + # Post-process output depending on input type + applied_elev, out_transform = _postprocess_coreg_apply(elev=elev, applied_elev=applied_elev, transform=transform, + out_transform=out_transform, crs=crs, resample=resample, + resampling=resampling) + + # Only return object if raster or geodataframe, also return transform if object was an array + if isinstance(applied_elev, (gu.Raster, gpd.GeoDataFrame)): + return applied_elev else: - dst_nodata = raster._default_nodata(applied_dem.dtype) - - # Resample the array on the original grid - if resample: - # Set default resampling method if not specified in kwargs - resampling = kwargs.get("resampling", rio.warp.Resampling.bilinear) - if not isinstance(resampling, rio.warp.Resampling): - raise ValueError("`resampling` must be a rio.warp.Resampling algorithm") - - applied_dem, out_transform = rio.warp.reproject( - applied_dem, - destination=applied_dem, - src_transform=out_transform, - dst_transform=transform, - src_crs=crs, - dst_crs=crs, - resampling=resampling, - dst_nodata=dst_nodata, - ) + return applied_elev, out_transform - # Calculate final mask - final_mask = np.logical_or(~np.isfinite(applied_dem), applied_dem == dst_nodata) + def residuals( + self, + reference_elev: NDArrayf, + to_be_aligned_elev: NDArrayf, + inlier_mask: NDArrayb | None = None, + transform: rio.transform.Affine | None = None, + crs: rio.crs.CRS | None = None, + subsample: float | int = 1.0, + random_state: None | np.random.RandomState | np.random.Generator | int = None, + ) -> NDArrayf: + """ + Calculate the residual offsets (the difference) between two DEMs after applying the transformation. - # If the DEM was a masked_array, copy the mask to the new DEM - if isinstance(elev, (np.ma.masked_array, gu.Raster)): - applied_dem = np.ma.masked_array(applied_dem, mask=final_mask) # type: ignore - else: - applied_dem[final_mask] = np.nan + :param reference_elev: 2D array of elevation values acting reference. + :param to_be_aligned_elev: 2D array of elevation values to be aligned. + :param inlier_mask: Optional. 2D boolean array of areas to include in the analysis (inliers=True). + :param transform: Optional. Transform of the reference_dem. Mandatory in some cases. + :param crs: Optional. CRS of the reference_dem. Mandatory in some cases. + :param subsample: Subsample the input to increase performance. <1 is parsed as a fraction. >1 is a pixel count. + :param random_state: Random state or seed number to use for calculations (to fix random sampling during testing) - # If the input was a Raster, returns a Raster, else returns array and transform - if isinstance(elev, gu.Raster): - out_dem = elev.from_array(applied_dem, out_transform, crs, nodata=elev.nodata) - return out_dem - else: - return applied_dem, out_transform + :returns: A 1D array of finite residuals. + """ + + # Apply the transformation to the dem to be aligned + aligned_dem = self.apply(to_be_aligned_elev, transform=transform, crs=crs)[0] + + # Pre-process the inputs, by reprojecting and subsampling + ref_dem, align_dem, inlier_mask, transform, crs = _preprocess_coreg_fit( + reference_elev=reference_elev, + to_be_aligned_elev=to_be_aligned_elev, + inlier_mask=inlier_mask, + transform=transform, + crs=crs, + ) + + # Calculate the DEM difference + diff = ref_dem - align_dem + + # Sometimes, the float minimum (for float32 = -3.4028235e+38) is returned. This and inf should be excluded. + full_mask = np.isfinite(diff) + if "float" in str(diff.dtype): + full_mask[(diff == np.finfo(diff.dtype).min) | np.isinf(diff)] = False + + # Return the difference values within the full inlier mask + return diff[full_mask] @overload def error( @@ -1294,9 +1357,9 @@ def _fit_func( """ # Determine if input is raster-raster, raster-point or point-point - if all(isinstance(dem, np.ndarray) for dem in (kwargs["ref_dem"], kwargs["tba_dem"])): + if all(isinstance(dem, np.ndarray) for dem in (kwargs["ref_elev"], kwargs["tba_elev"])): rop = "r-r" - elif all(isinstance(dem, gpd.GeoDataFrame) for dem in (kwargs["ref_dem"], kwargs["tba_dem"])): + elif all(isinstance(dem, gpd.GeoDataFrame) for dem in (kwargs["ref_elev"], kwargs["tba_elev"])): rop = "p-p" else: rop = "r-p" @@ -1352,79 +1415,124 @@ def _fit_func( else: raise NotImplementedError(f"No point-point method found for coregistration {self.__class__.__name__}.") - def _apply_func(self, **kwargs: Any): - """Distribute to _apply_rst and _apply_pts based on input and method availability""" + def _apply_func(self, **kwargs: Any) -> tuple[np.ndarray | gpd.GeoDataFrame, affine.Affine]: + """Distribute to _apply_rst and _apply_pts based on input and method availability.""" + + # If input is a raster + if isinstance(kwargs["elev"], np.ndarray): + + # See if a _apply_rst exists + try: + # Run the associated apply function + applied_elev, out_transform = self._apply_rst(**kwargs) # pylint: disable=assignment-from-no-return + + # If it doesn't exist, use apply_matrix() + except NotImplementedError: + + if self.is_affine: # This only works for affine, however. + + # In this case, resampling is necessary + if not kwargs["resample"]: + raise NotImplementedError( + f"Option `resample=False` not implemented for coreg method {self.__class__}") + kwargs.pop("resample") # Need to removed before passing to apply_matrix + + # Apply the matrix around the centroid (if defined, otherwise just from the center). + applied_elev = apply_matrix_rst( + dem=kwargs["elev"], + transform=kwargs["transform"], + matrix=self.to_matrix(), + centroid=self._meta.get("centroid"), + **kwargs, + ) + out_transform = kwargs["transform"] + else: + raise ValueError("Cannot transform, Coreg method is non-affine and has no implemented _apply_rst.") - if isinstance(kwargs["dem"], np.ndarray): - return self._apply_rst(**kwargs) + # If input is a point else: - return self._apply_pts(**kwargs) + # See if an _apply_pts_func exists + try: + applied_elev = self._apply_pts(**kwargs) + + # If it doesn't exist, use opencv's perspectiveTransform + except NotImplementedError: + if self.is_affine: # This only works on it's rigid, however. + + applied_elev = apply_matrix_pts(epc=kwargs["elev"], + matrix=self.to_matrix(), + centroid=self._meta.get("centroid"), + z_name=kwargs["z_name"]) + + else: + raise ValueError("Cannot transform, Coreg method is non-affine and has no implemented _apply_pts.") + + return applied_elev, out_transform def _fit_rst_rst(self, - ref_dem: NDArrayf, - tba_dem: NDArrayf, - inlier_mask: NDArrayb, - transform: rio.transform.Affine, - crs: rio.crs.CRS, - weights: NDArrayf | None, - bias_vars: dict[str, NDArrayf] | None = None, - verbose: bool = False, - **kwargs: Any, - ) -> None: - # FOR DEVELOPERS: This function needs to be implemented. + ref_elev: NDArrayf, + tba_elev: NDArrayf, + inlier_mask: NDArrayb, + transform: rio.transform.Affine, + crs: rio.crs.CRS, + weights: NDArrayf | None, + bias_vars: dict[str, NDArrayf] | None = None, + verbose: bool = False, + **kwargs: Any, + ) -> None: + # FOR DEVELOPERS: This function needs to be implemented by subclassing. raise NotImplementedError("This step has to be implemented by subclassing.") def _fit_rst_pts(self, - ref_dem: NDArrayf, - tba_dem: NDArrayf, - inlier_mask: NDArrayb, - transform: rio.transform.Affine, - crs: rio.crs.CRS, - weights: NDArrayf | None, - bias_vars: dict[str, NDArrayf] | None = None, - verbose: bool = False, - **kwargs: Any, - ) -> None: - # FOR DEVELOPERS: This function needs to be implemented. + ref_elev: NDArrayf, + tba_elev: NDArrayf, + inlier_mask: NDArrayb, + transform: rio.transform.Affine, + crs: rio.crs.CRS, + weights: NDArrayf | None, + bias_vars: dict[str, NDArrayf] | None = None, + verbose: bool = False, + **kwargs: Any, + ) -> None: + # FOR DEVELOPERS: This function needs to be implemented by subclassing. raise NotImplementedError("This step has to be implemented by subclassing.") def _fit_pts_pts(self, - ref_dem: NDArrayf, - tba_dem: NDArrayf, - inlier_mask: NDArrayb, - transform: rio.transform.Affine, - crs: rio.crs.CRS, - weights: NDArrayf | None, - bias_vars: dict[str, NDArrayf] | None = None, - verbose: bool = False, - **kwargs: Any, - ) -> None: - # FOR DEVELOPERS: This function needs to be implemented. + ref_elev: gpd.GeoDataFrame, + tba_elev: gpd.GeoDataFrame, + inlier_mask: NDArrayb, + transform: rio.transform.Affine, + crs: rio.crs.CRS, + weights: NDArrayf | None, + bias_vars: dict[str, NDArrayf] | None = None, + verbose: bool = False, + **kwargs: Any, + ) -> None: + # FOR DEVELOPERS: This function needs to be implemented by subclassing. raise NotImplementedError("This step has to be implemented by subclassing.") def _apply_rst( self, - dem: NDArrayf, + elev: NDArrayf, transform: rio.transform.Affine, crs: rio.crs.CRS, bias_vars: dict[str, NDArrayf] | None = None, **kwargs: Any, ) -> tuple[NDArrayf, rio.transform.Affine]: - # FOR DEVELOPERS: This function is only needed for non-rigid transforms. - raise NotImplementedError("This should have been implemented by subclassing") + # FOR DEVELOPERS: This function needs to be implemented by subclassing. + raise NotImplementedError("This should have been implemented by subclassing.") def _apply_pts( self, - dem: NDArrayf, - transform: rio.transform.Affine, - crs: rio.crs.CRS, + elev: gpd.GeoDataFrame, + z_name: str = "z", bias_vars: dict[str, NDArrayf] | None = None, **kwargs: Any, - ) -> tuple[NDArrayf, rio.transform.Affine]: + ) -> gpd.GeoDataFrame: - # FOR DEVELOPERS: This function is only needed for non-rigid transforms. - raise NotImplementedError("This should have been implemented by subclassing") + # FOR DEVELOPERS: This function needs to be implemented by subclassing. + raise NotImplementedError("This should have been implemented by subclassing.") class CoregPipeline(Coreg): @@ -1574,14 +1682,14 @@ def fit( # TODO: Override parent method into an "apply()"? def _apply_rst( self, - dem: NDArrayf, + elev: NDArrayf, transform: rio.transform.Affine, crs: rio.crs.CRS, bias_vars: dict[str, NDArrayf] | None = None, **kwargs: Any, ) -> tuple[NDArrayf, rio.transform.Affine]: """Apply the coregistration steps sequentially to a DEM.""" - dem_mod = dem.copy() + dem_mod = elev.copy() out_transform = copy.copy(transform) for i, coreg in enumerate(self.pipeline): @@ -1968,15 +2076,15 @@ def subdivide_array(self, shape: tuple[int, ...]) -> NDArrayf: def _apply_rst( self, - dem: NDArrayf, + elev: NDArrayf, transform: rio.transform.Affine, crs: rio.crs.CRS, bias_vars: dict[str, NDArrayf] | None = None, **kwargs: Any, ) -> tuple[NDArrayf, rio.transform.Affine]: - if np.count_nonzero(np.isfinite(dem)) == 0: - return dem, transform + if np.count_nonzero(np.isfinite(elev)) == 0: + return elev, transform # Other option than resample=True is not implemented for this case if "resample" in kwargs and kwargs["resample"] is not True: @@ -1984,9 +2092,9 @@ def _apply_rst( points = self.to_points() - bounds, resolution = _transform_to_bounds_and_res(dem.shape, transform) + bounds, resolution = _transform_to_bounds_and_res(elev.shape, transform) - representative_height = np.nanmean(dem) + representative_height = np.nanmean(elev) edges_source = np.array( [ [bounds.left + resolution / 2, bounds.top - resolution / 2, representative_height], @@ -2001,7 +2109,7 @@ def _apply_rst( all_points = np.append(points, edges, axis=0) warped_dem = warp_dem( - dem=dem, + dem=elev, transform=transform, source_coords=all_points[:, :, 0], destination_coords=all_points[:, :, 1], diff --git a/xdem/coreg/biascorr.py b/xdem/coreg/biascorr.py index 4db34776..4844cbd3 100644 --- a/xdem/coreg/biascorr.py +++ b/xdem/coreg/biascorr.py @@ -141,8 +141,8 @@ def __init__( def _fit_rst_rst( # type: ignore self, - ref_dem: NDArrayf, - tba_dem: NDArrayf, + ref_elev: NDArrayf, + tba_elev: NDArrayf, inlier_mask: NDArrayb, transform: rio.transform.Affine, # Never None thanks to Coreg.fit() pre-process crs: rio.crs.CRS, # Never None thanks to Coreg.fit() pre-process @@ -171,7 +171,7 @@ def _fit_rst_rst( # type: ignore # Compute difference and mask of valid data # TODO: Move the check up to Coreg.fit()? - diff = ref_dem - tba_dem + diff = ref_elev - tba_elev valid_mask = np.logical_and.reduce( (inlier_mask, np.isfinite(diff), *(np.isfinite(var) for var in bias_vars.values())) ) @@ -319,7 +319,7 @@ def _fit_rst_rst( # type: ignore def _apply_rst( # type: ignore self, - dem: NDArrayf, + elev: NDArrayf, transform: rio.transform.Affine, # Never None thanks to Coreg.fit() pre-process crs: rio.crs.CRS, # Never None thanks to Coreg.fit() pre-process bias_vars: None | dict[str, NDArrayf] = None, @@ -362,7 +362,7 @@ def _apply_rst( # type: ignore statistic=self._meta["bin_statistic"], ) - dem_corr = dem + corr + dem_corr = elev + corr return dem_corr, transform @@ -414,8 +414,8 @@ def __init__( def _fit_rst_rst( # type: ignore self, - ref_dem: NDArrayf, - tba_dem: NDArrayf, + ref_elev: NDArrayf, + tba_elev: NDArrayf, inlier_mask: NDArrayb, bias_vars: dict[str, NDArrayf], transform: rio.transform.Affine, # Never None thanks to Coreg.fit() pre-process @@ -434,8 +434,8 @@ def _fit_rst_rst( # type: ignore ) super()._fit_rst_rst( - ref_dem=ref_dem, - tba_dem=tba_dem, + ref_elev=ref_elev, + tba_elev=tba_elev, inlier_mask=inlier_mask, bias_vars=bias_vars, transform=transform, @@ -489,8 +489,8 @@ def __init__( def _fit_rst_rst( # type: ignore self, - ref_dem: NDArrayf, - tba_dem: NDArrayf, + ref_elev: NDArrayf, + tba_elev: NDArrayf, inlier_mask: NDArrayb, bias_vars: dict[str, NDArrayf], transform: rio.transform.Affine, # Never None thanks to Coreg.fit() pre-process @@ -508,8 +508,8 @@ def _fit_rst_rst( # type: ignore ) super()._fit_rst_rst( - ref_dem=ref_dem, - tba_dem=tba_dem, + ref_elev=ref_elev, + tba_elev=tba_elev, inlier_mask=inlier_mask, bias_vars=bias_vars, transform=transform, @@ -565,8 +565,8 @@ def __init__( def _fit_rst_rst( # type: ignore self, - ref_dem: NDArrayf, - tba_dem: NDArrayf, + ref_elev: NDArrayf, + tba_elev: NDArrayf, inlier_mask: NDArrayb, bias_vars: dict[str, NDArrayf], # Never None thanks to BiasCorr.fit() pre-process transform: rio.transform.Affine, # Never None thanks to Coreg.fit() pre-process @@ -581,8 +581,8 @@ def _fit_rst_rst( # type: ignore raise ValueError('At least three variables have to be provided through the argument "bias_vars".') super()._fit_rst_rst( - ref_dem=ref_dem, - tba_dem=tba_dem, + ref_elev=ref_elev, + tba_elev=tba_elev, inlier_mask=inlier_mask, bias_vars=bias_vars, transform=transform, @@ -631,8 +631,8 @@ def __init__( def _fit_rst_rst( # type: ignore self, - ref_dem: NDArrayf, - tba_dem: NDArrayf, + ref_elev: NDArrayf, + tba_elev: NDArrayf, inlier_mask: NDArrayb, transform: rio.transform.Affine, crs: rio.crs.CRS, @@ -646,7 +646,7 @@ def _fit_rst_rst( # type: ignore print("Estimating rotated coordinates.") x, _ = gu.raster.get_xy_rotated( - raster=gu.Raster.from_array(data=ref_dem, crs=crs, transform=transform), + raster=gu.Raster.from_array(data=ref_elev, crs=crs, transform=transform), along_track_angle=self._meta["angle"], ) @@ -657,8 +657,8 @@ def _fit_rst_rst( # type: ignore kwargs.update({"hop_length": average_res}) super()._fit_rst_rst( - ref_dem=ref_dem, - tba_dem=tba_dem, + ref_elev=ref_elev, + tba_elev=tba_elev, inlier_mask=inlier_mask, bias_vars={"angle": x}, transform=transform, @@ -670,7 +670,7 @@ def _fit_rst_rst( # type: ignore def _apply_rst( self, - dem: NDArrayf, + elev: NDArrayf, transform: rio.transform.Affine, crs: rio.crs.CRS, bias_vars: None | dict[str, NDArrayf] = None, @@ -679,11 +679,11 @@ def _apply_rst( # Define the coordinates for applying the correction x, _ = gu.raster.get_xy_rotated( - raster=gu.Raster.from_array(data=dem, crs=crs, transform=transform), + raster=gu.Raster.from_array(data=elev, crs=crs, transform=transform), along_track_angle=self._meta["angle"], ) - return super()._apply_rst(dem=dem, transform=transform, crs=crs, bias_vars={"angle": x}, **kwargs) + return super()._apply_rst(elev=elev, transform=transform, crs=crs, bias_vars={"angle": x}, **kwargs) class TerrainBias(BiasCorr1D): @@ -742,8 +742,8 @@ def __init__( def _fit_rst_rst( # type: ignore self, - ref_dem: NDArrayf, - tba_dem: NDArrayf, + ref_elev: NDArrayf, + tba_elev: NDArrayf, inlier_mask: NDArrayb, transform: rio.transform.Affine, crs: rio.crs.CRS, @@ -755,16 +755,16 @@ def _fit_rst_rst( # type: ignore # Derive terrain attribute if self._meta["terrain_attribute"] == "elevation": - attr = ref_dem + attr = ref_elev else: attr = xdem.terrain.get_terrain_attribute( - dem=ref_dem, attribute=self._meta["terrain_attribute"], resolution=(transform[0], abs(transform[4])) + dem=ref_elev, attribute=self._meta["terrain_attribute"], resolution=(transform[0], abs(transform[4])) ) # Run the parent function super()._fit_rst_rst( - ref_dem=ref_dem, - tba_dem=tba_dem, + ref_elev=ref_elev, + tba_elev=tba_elev, inlier_mask=inlier_mask, bias_vars={self._meta["terrain_attribute"]: attr}, transform=transform, @@ -776,7 +776,7 @@ def _fit_rst_rst( # type: ignore def _apply_rst( self, - dem: NDArrayf, + elev: NDArrayf, transform: rio.transform.Affine, crs: rio.crs.CRS, bias_vars: None | dict[str, NDArrayf] = None, @@ -786,14 +786,14 @@ def _apply_rst( if bias_vars is None: # Derive terrain attribute if self._meta["terrain_attribute"] == "elevation": - attr = dem + attr = elev else: attr = xdem.terrain.get_terrain_attribute( - dem=dem, attribute=self._meta["terrain_attribute"], resolution=(transform[0], abs(transform[4])) + dem=elev, attribute=self._meta["terrain_attribute"], resolution=(transform[0], abs(transform[4])) ) bias_vars = {self._meta["terrain_attribute"]: attr} - return super()._apply_rst(dem=dem, transform=transform, crs=crs, bias_vars=bias_vars, **kwargs) + return super()._apply_rst(elev=elev, transform=transform, crs=crs, bias_vars=bias_vars, **kwargs) class Deramp(BiasCorr2D): @@ -841,8 +841,8 @@ def __init__( def _fit_rst_rst( # type: ignore self, - ref_dem: NDArrayf, - tba_dem: NDArrayf, + ref_elev: NDArrayf, + tba_elev: NDArrayf, inlier_mask: NDArrayb, transform: rio.transform.Affine, crs: rio.crs.CRS, @@ -856,11 +856,11 @@ def _fit_rst_rst( # type: ignore p0 = np.ones(shape=((self._meta["poly_order"] + 1) * (self._meta["poly_order"] + 1))) # Coordinates (we don't need the actual ones, just array coordinates) - xx, yy = np.meshgrid(np.arange(0, ref_dem.shape[1]), np.arange(0, ref_dem.shape[0])) + xx, yy = np.meshgrid(np.arange(0, ref_elev.shape[1]), np.arange(0, ref_elev.shape[0])) super()._fit_rst_rst( - ref_dem=ref_dem, - tba_dem=tba_dem, + ref_elev=ref_elev, + tba_elev=tba_elev, inlier_mask=inlier_mask, bias_vars={"xx": xx, "yy": yy}, transform=transform, @@ -873,7 +873,7 @@ def _fit_rst_rst( # type: ignore def _apply_rst( self, - dem: NDArrayf, + elev: NDArrayf, transform: rio.transform.Affine, crs: rio.crs.CRS, bias_vars: None | dict[str, NDArrayf] = None, @@ -881,6 +881,6 @@ def _apply_rst( ) -> tuple[NDArrayf, rio.transform.Affine]: # Define the coordinates for applying the correction - xx, yy = np.meshgrid(np.arange(0, dem.shape[1]), np.arange(0, dem.shape[0])) + xx, yy = np.meshgrid(np.arange(0, elev.shape[1]), np.arange(0, elev.shape[0])) - return super()._apply_rst(dem=dem, transform=transform, crs=crs, bias_vars={"xx": xx, "yy": yy}, **kwargs) + return super()._apply_rst(elev=elev, transform=transform, crs=crs, bias_vars={"xx": xx, "yy": yy}, **kwargs) From b28cee8c0846cc2a71119a03578f2acc3c9e0176 Mon Sep 17 00:00:00 2001 From: Romain Hugonnet Date: Fri, 1 Mar 2024 17:25:27 -0900 Subject: [PATCH 04/54] Incremental commit --- tests/test_coreg/test_affine.py | 10 +- tests/test_coreg/test_base.py | 17 ++- xdem/coreg/affine.py | 231 +++++++++++++++++++------------- xdem/coreg/base.py | 92 ++++++++----- 4 files changed, 208 insertions(+), 142 deletions(-) diff --git a/tests/test_coreg/test_affine.py b/tests/test_coreg/test_affine.py index 55d9cbdd..876e445d 100644 --- a/tests/test_coreg/test_affine.py +++ b/tests/test_coreg/test_affine.py @@ -46,7 +46,6 @@ class TestAffineCoreg: data={"z": points_arr[2, :]}) def test_from_classmethods(self) -> None: - warnings.simplefilter("error") # Check that the from_matrix function works as expected. vshift = 5 @@ -54,7 +53,7 @@ def test_from_classmethods(self) -> None: matrix[2, 3] = vshift coreg_obj = AffineCoreg.from_matrix(matrix) transformed_points = coreg_obj.apply(self.points) - assert transformed_points[0, 2] == vshift + assert all(transformed_points["z"].values == vshift) # Check that the from_translation function works as expected. x_offset = 5 @@ -70,7 +69,6 @@ def test_from_classmethods(self) -> None: raise exception def test_vertical_shift(self) -> None: - warnings.simplefilter("error") # Create a vertical shift correction instance vshiftcorr = coreg.VerticalShift() @@ -89,7 +87,7 @@ def test_vertical_shift(self) -> None: assert matrix[2, 3] == vshift, matrix # Check that the first z coordinate is now the vertical shift - assert vshiftcorr.apply(self.points)[0, 2] == vshiftcorr._meta["vshift"] + assert all(vshiftcorr.apply(self.points)["z"].values == vshiftcorr._meta["vshift"]) # Apply the model to correct the DEM tba_unshifted, _ = vshiftcorr.apply(self.tba.data, transform=self.ref.transform, crs=self.ref.crs) @@ -180,7 +178,6 @@ def test_coreg_example_shift(self, shift_px, coreg_class, points_or_raster, verb For comparison of coreg algorithms: Shift a ref_dem on purpose, e.g. shift_px = (1,1), and then applying coreg to shift it back. """ - warnings.simplefilter("error") res = self.ref.res[0] # shift DEM by shift_px @@ -225,7 +222,6 @@ def test_coreg_example_shift(self, shift_px, coreg_class, points_or_raster, verb raise AssertionError(f"Diffs are too big. east: {best_east_diff:.2f} px, north: {best_north_diff:.2f} px") def test_nuth_kaab(self) -> None: - warnings.simplefilter("error") nuth_kaab = coreg.NuthKaab(max_iterations=10) @@ -271,7 +267,6 @@ def test_nuth_kaab(self) -> None: assert abs((transformed_points[0, 2] - self.points[0, 2]) + vshift) < 0.1 def test_tilt(self) -> None: - warnings.simplefilter("error") # Try a 1st degree deramping. tilt = coreg.Tilt() @@ -294,7 +289,6 @@ def test_tilt(self) -> None: assert np.abs(np.mean(periglacial_offset)) < 0.02 def test_icp_opencv(self) -> None: - warnings.simplefilter("error") # Do a fast and dirty 3 iteration ICP just to make sure it doesn't error out. icp = coreg.ICP(max_iterations=3) diff --git a/tests/test_coreg/test_base.py b/tests/test_coreg/test_base.py index f24987da..21b05b15 100644 --- a/tests/test_coreg/test_base.py +++ b/tests/test_coreg/test_base.py @@ -9,6 +9,7 @@ import geoutils as gu import numpy as np +import geopandas as gpd import pytest import rasterio as rio from geoutils import Raster, Vector @@ -47,7 +48,9 @@ class TestCoregClass: verbose=False, ) # Create some 3D coordinates with Z coordinates being 0 to try the apply functions. - points = np.array([[1, 2, 3, 4], [1, 2, 3, 4], [0, 0, 0, 0]], dtype="float64").T + points_arr = np.array([[1, 2, 3, 4], [1, 2, 3, 4], [0, 0, 0, 0]], dtype="float64").T + points = gpd.GeoDataFrame(geometry=gpd.points_from_xy(x=points_arr[0, :], y=points_arr[1, :], crs=ref.crs), + data={"z": points_arr[2, :]}) def test_init(self) -> None: """Test instantiation of Coreg""" @@ -505,7 +508,9 @@ class TestCoregPipeline: verbose=True, ) # Create some 3D coordinates with Z coordinates being 0 to try the apply functions. - points = np.array([[1, 2, 3, 4], [1, 2, 3, 4], [0, 0, 0, 0]], dtype="float64").T + points_arr = np.array([[1, 2, 3, 4], [1, 2, 3, 4], [0, 0, 0, 0]], dtype="float64").T + points = gpd.GeoDataFrame(geometry=gpd.points_from_xy(x=points_arr[0, :], y=points_arr[1, :], crs=ref.crs), + data={"z": points_arr[2, :]}) @pytest.mark.parametrize("coreg_class", [coreg.VerticalShift, coreg.ICP, coreg.NuthKaab]) # type: ignore def test_copy(self, coreg_class: Callable[[], Coreg]) -> None: @@ -728,14 +733,15 @@ class TestBlockwiseCoreg: verbose=False, ) # Create some 3D coordinates with Z coordinates being 0 to try the apply functions. - points = np.array([[1, 2, 3, 4], [1, 2, 3, 4], [0, 0, 0, 0]], dtype="float64").T + points_arr = np.array([[1, 2, 3, 4], [1, 2, 3, 4], [0, 0, 0, 0]], dtype="float64").T + points = gpd.GeoDataFrame(geometry=gpd.points_from_xy(x=points_arr[0, :], y=points_arr[1, :], crs=ref.crs), + data={"z": points_arr[2, :]}) @pytest.mark.parametrize( "pipeline", [coreg.VerticalShift(), coreg.VerticalShift() + coreg.NuthKaab()] ) # type: ignore @pytest.mark.parametrize("subdivision", [4, 10]) # type: ignore def test_blockwise_coreg(self, pipeline: Coreg, subdivision: int) -> None: - warnings.simplefilter("error") blockwise = coreg.BlockwiseCoreg(step=pipeline, subdivision=subdivision) @@ -782,7 +788,6 @@ def test_blockwise_coreg(self, pipeline: Coreg, subdivision: int) -> None: def test_blockwise_coreg_large_gaps(self) -> None: """Test BlockwiseCoreg when large gaps are encountered, e.g. around the frame of a rotated DEM.""" - warnings.simplefilter("error") reference_dem = self.ref.reproject(crs="EPSG:3413", res=self.ref.res, resampling="bilinear") dem_to_be_aligned = self.tba.reproject(ref=reference_dem, resampling="bilinear") @@ -823,7 +828,6 @@ def test_blockwise_coreg_large_gaps(self) -> None: def test_apply_matrix() -> None: - warnings.simplefilter("error") ref, tba, outlines = load_examples() # Load example reference, to-be-aligned and mask. ref_arr = gu.raster.get_array_and_mask(ref)[0] @@ -936,7 +940,6 @@ def rotation_matrix(rotation: float = 30) -> NDArrayf: def test_warp_dem() -> None: """Test that the warp_dem function works expectedly.""" - warnings.simplefilter("error") small_dem = np.zeros((5, 10), dtype="float32") small_transform = rio.transform.from_origin(0, 5, 1, 1) diff --git a/xdem/coreg/affine.py b/xdem/coreg/affine.py index 68dac6f1..90e6e276 100644 --- a/xdem/coreg/affine.py +++ b/xdem/coreg/affine.py @@ -5,6 +5,8 @@ import warnings from typing import Any, Callable, TypeVar +import xdem.coreg.base + try: import cv2 @@ -420,7 +422,7 @@ def _apply_pts( """Apply the VerticalShift function to a set of points.""" dem_copy = elev.copy() - dem_copy[z_name].values += self._meta["vshift"] + dem_copy[z_name] += self._meta["vshift"] return dem_copy def _to_matrix_func(self) -> NDArrayf: @@ -502,48 +504,50 @@ def _fit_rst_rst( ) subsample_mask = self._get_subsample_on_valid_mask(valid_mask=valid_mask) - ref_pts = pd.DataFrame( - np.dstack( - [ - x_coords[subsample_mask], - y_coords[subsample_mask], - ref_elev[subsample_mask], - normal_east[subsample_mask], - normal_north[subsample_mask], - normal_up[subsample_mask], - ] - ).squeeze(), - columns=["E", "N", "z", "nx", "ny", "nz"], - ) + ref_pts = gpd.GeoDataFrame(geometry=gpd.points_from_xy(x=x_coords[subsample_mask], + y=y_coords[subsample_mask], + crs=None), + data={"z": ref_elev[subsample_mask], "nx": normal_east[subsample_mask], + "ny": normal_north[subsample_mask], "nz": normal_up[subsample_mask]}) - self._fit_rst_pts(ref_elev=ref_pts, tba_elev=tba_elev, transform=transform, verbose=verbose, z_name="z") + self._fit_rst_pts(ref_elev=ref_pts, tba_elev=tba_elev, inlier_mask=inlier_mask, + transform=transform, crs=crs, verbose=verbose, z_name="z") def _fit_rst_pts( self, - ref_elev: pd.DataFrame, - tba_elev: RasterType | NDArrayf, - transform: rio.transform.Affine | None, + ref_elev: gpd.GeoDataFrame | NDArrayf, + tba_elev: gpd.GeoDataFrame | NDArrayf, + inlier_mask: NDArrayb, + transform: rio.transform.Affine, + crs: rio.crs.CRS, + weights: NDArrayf | None = None, verbose: bool = False, z_name: str = "z", **kwargs: Any, ) -> None: - if transform is None and hasattr(tba_elev, "transform"): - transform = tba_elev.transform # type: ignore - if hasattr(tba_elev, "transform"): - tba_elev = tba_elev.data + # Check which one is reference + if isinstance(ref_elev, gpd.GeoDataFrame): + point_elev = ref_elev + rst_elev = tba_elev + ref = "point" + else: + point_elev = tba_elev + rst_elev = ref_elev + ref = "raster" + + # Pre-process point data + point_elev = point_elev.dropna(how="any", subset=[z_name]) + bounds, resolution = _transform_to_bounds_and_res(rst_elev.shape, transform) - ref_elev = ref_elev.dropna(how="any", subset=["E", "N", z_name]) - bounds, resolution = _transform_to_bounds_and_res(tba_elev.shape, transform) - points: dict[str, NDArrayf] = {} # Generate the x and y coordinates for the TBA DEM - x_coords, y_coords = _get_x_and_y_coords(tba_elev.shape, transform) + x_coords, y_coords = _get_x_and_y_coords(rst_elev.shape, transform) centroid = (np.mean([bounds.left, bounds.right]), np.mean([bounds.bottom, bounds.top]), 0.0) # Subtract by the bounding coordinates to avoid float32 rounding errors. x_coords -= centroid[0] y_coords -= centroid[1] - gradient_x, gradient_y = np.gradient(tba_elev) + gradient_x, gradient_y = np.gradient(rst_elev) # This CRS is temporary and doesn't affect the result. It's just needed for Raster instantiation. dem_kwargs = {"transform": transform, "crs": rio.CRS.from_epsg(32633), "nodata": -9999.0} @@ -551,30 +555,35 @@ def _fit_rst_pts( normal_north = Raster.from_array(np.sin(np.arctan(gradient_x / resolution)), **dem_kwargs) normal_up = Raster.from_array(1 - np.linalg.norm([normal_east.data, normal_north.data], axis=0), **dem_kwargs) - valid_mask = ~np.isnan(tba_elev) & ~np.isnan(normal_east.data) & ~np.isnan(normal_north.data) + valid_mask = ~np.isnan(rst_elev) & ~np.isnan(normal_east.data) & ~np.isnan(normal_north.data) - points["tba"] = np.dstack( + points: dict[str, NDArrayf] = {} + points["raster"] = np.dstack( [ x_coords[valid_mask], y_coords[valid_mask], - tba_elev[valid_mask], + rst_elev[valid_mask], normal_east.data[valid_mask], normal_north.data[valid_mask], normal_up.data[valid_mask], ] ).squeeze() - if any(col not in ref_elev for col in ["nx", "ny", "nz"]): + # TODO: Should be a way to not duplicate this column and just feed it directly + point_elev["E"] = point_elev.geometry.x.values + point_elev["N"] = point_elev.geometry.y.values + + if any(col not in point_elev for col in ["nx", "ny", "nz"]): for key, raster in [("nx", normal_east), ("ny", normal_north), ("nz", normal_up)]: raster.tags["AREA_OR_POINT"] = "Area" - ref_elev[key] = raster.interp_points( - ref_elev[["E", "N"]].values, shift_area_or_point=True, mode="nearest" + point_elev[key] = raster.interp_points( + point_elev[["E", "N"]].values, shift_area_or_point=True, ) - ref_elev["E"] -= centroid[0] - ref_elev["N"] -= centroid[1] + point_elev["E"] -= centroid[0] + point_elev["N"] -= centroid[1] - points["ref"] = ref_elev[["E", "N", z_name, "nx", "ny", "nz"]].values + points["point"] = point_elev[["E", "N", z_name, "nx", "ny", "nz"]].values for key in points: points[key] = points[key][~np.any(np.isnan(points[key]), axis=1)].astype("float32") @@ -584,7 +593,8 @@ def _fit_rst_pts( if verbose: print("Running ICP...") try: - _, residual, matrix = icp.registerModelToScene(points["tba"], points["ref"]) + # Use points as reference + _, residual, matrix = icp.registerModelToScene(points["raster"], points["point"]) except cv2.error as exception: if "(expected: 'n > 0'), where" not in str(exception): raise exception @@ -595,6 +605,11 @@ def _fit_rst_pts( f"'dem_to_be_aligned' had {points['tba'].size} valid points." ) + # If raster was reference, invert the matrix + # TODO: Move matrix/invert_matrix to affine module? + if ref == "raster": + matrix = xdem.coreg.base.invert_matrix(matrix) + if verbose: print("ICP finished") @@ -852,12 +867,13 @@ def _fit_rst_rst( def _fit_rst_pts( self, - ref_elev: pd.DataFrame, - tba_elev: RasterType, - transform: rio.transform.Affine | None, - weights: NDArrayf | None, + ref_elev: gpd.GeoDataFrame | NDArrayf, + tba_elev: gpd.GeoDataFrame | NDArrayf, + inlier_mask: NDArrayb, + transform: rio.transform.Affine, + crs: rio.crs.CRS, + weights: NDArrayf | None = None, verbose: bool = False, - order: int = 1, z_name: str = "z", ) -> None: """ @@ -869,13 +885,24 @@ def _fit_rst_pts( """ + # Check which one is reference + if isinstance(ref_elev, gpd.GeoDataFrame): + point_elev = ref_elev + rst_elev = tba_elev + ref = "point" + else: + point_elev = tba_elev + rst_elev = ref_elev + ref = "raster" + if verbose: print("Running Nuth and Kääb (2011) coregistration. Shift pts instead of shifting dem") - tba_arr, _ = get_array_and_mask(tba_elev) + rst_elev = Raster.from_array(rst_elev, transform=transform, crs=crs) + tba_arr, _ = get_array_and_mask(rst_elev) - resolution = tba_elev.res[0] - x_coords, y_coords = (ref_elev["E"].values, ref_elev["N"].values) + bounds, resolution = _transform_to_bounds_and_res(ref_elev.shape, transform) + x_coords, y_coords = (point_elev["E"].values, point_elev["N"].values) # Assume that the coordinates represent the center of a theoretical pixel. # The raster sampling is done in the upper left corner, meaning all point have to be respectively shifted @@ -886,7 +913,7 @@ def _fit_rst_pts( # This needs to be consistent, so it's cardcoded here area_or_point = "Area" # Make a new DEM which will be modified inplace - aligned_dem = tba_elev.copy() + aligned_dem = rst_elev.copy() aligned_dem.tags["AREA_OR_POINT"] = area_or_point # Calculate slope and aspect maps from the reference DEM @@ -894,23 +921,23 @@ def _fit_rst_pts( print(" Calculate slope and aspect") slope, aspect = _calculate_slope_and_aspect_nuthkaab(tba_arr) - slope_r = tba_elev.copy(new_array=np.ma.masked_array(slope[None, :, :], mask=~np.isfinite(slope[None, :, :]))) + slope_r = rst_elev.copy(new_array=np.ma.masked_array(slope[None, :, :], mask=~np.isfinite(slope[None, :, :]))) slope_r.tags["AREA_OR_POINT"] = area_or_point - aspect_r = tba_elev.copy(new_array=np.ma.masked_array(aspect[None, :, :], mask=~np.isfinite(aspect[None, :, :]))) + aspect_r = rst_elev.copy(new_array=np.ma.masked_array(aspect[None, :, :], mask=~np.isfinite(aspect[None, :, :]))) aspect_r.tags["AREA_OR_POINT"] = area_or_point # Initialise east and north pixel offset variables (these will be incremented up and down) offset_east, offset_north, vshift = 0.0, 0.0, 0.0 # Calculate initial DEM statistics - slope_pts = slope_r.interp_points(pts, mode="nearest", shift_area_or_point=True) - aspect_pts = aspect_r.interp_points(pts, mode="nearest", shift_area_or_point=True) - tba_pts = aligned_dem.interp_points(pts, mode="nearest", shift_area_or_point=True) + slope_pts = slope_r.interp_points(pts, shift_area_or_point=True) + aspect_pts = aspect_r.interp_points(pts, shift_area_or_point=True) + tba_pts = aligned_dem.interp_points(pts, shift_area_or_point=True) # Treat new_pts as a window, every time we shift it a little bit to fit the correct view new_pts = pts.copy() - elevation_difference = ref_elev[z_name].values - tba_pts + elevation_difference = point_elev[z_name].values - tba_pts vshift = float(np.nanmedian(elevation_difference)) nmad_old = nmad(elevation_difference) @@ -942,16 +969,16 @@ def _fit_rst_pts( new_pts += [east_diff * resolution, north_diff * resolution] # Get new values - tba_pts = aligned_dem.interp_points(new_pts, mode="nearest", shift_area_or_point=True) - elevation_difference = ref_elev[z_name].values - tba_pts + tba_pts = aligned_dem.interp_points(new_pts, shift_area_or_point=True) + elevation_difference = point_elev[z_name].values - tba_pts # Mask out no data by dem's mask - pts_, mask_ = _mask_dataframe_by_dem(new_pts, tba_elev) + pts_, mask_ = _mask_dataframe_by_dem(new_pts, rst_elev) # Update values relataed to shifted pts elevation_difference = elevation_difference[mask_] - slope_pts = slope_r.interp_points(pts_, mode="nearest", shift_area_or_point=True) - aspect_pts = aspect_r.interp_points(pts_, mode="nearest", shift_area_or_point=True) + slope_pts = slope_r.interp_points(pts_, shift_area_or_point=True) + aspect_pts = aspect_r.interp_points(pts_, shift_area_or_point=True) vshift = float(np.nanmedian(elevation_difference)) # Update statistics @@ -985,9 +1012,9 @@ def _fit_rst_pts( print(" Statistics on coregistered dh:") print(f" Median = {vshift:.3f} - NMAD = {nmad_new:.3f}") - self._meta["offset_east_px"] = offset_east - self._meta["offset_north_px"] = offset_north - self._meta["vshift"] = vshift + self._meta["offset_east_px"] = offset_east if ref == "point" else -offset_east + self._meta["offset_north_px"] = offset_north if ref == "point" else -offset_north + self._meta["vshift"] = vshift if ref == "point" else -vshift self._meta["resolution"] = resolution self._meta["nmad"] = nmad_new @@ -1031,12 +1058,12 @@ def _apply_pts( offset_east = self._meta["offset_east_px"] * self._meta["resolution"] offset_north = self._meta["offset_north_px"] * self._meta["resolution"] - dem_copy = elev.copy() - dem_copy.geometry.x.values += offset_east - dem_copy.geometry.y.values += offset_north - dem_copy[z_name].values += self._meta["vshift"] + applied_epc = gpd.GeoDataFrame(geometry=gpd.points_from_xy(x=elev.geometry.x.values + offset_east, + y=elev.geometry.y.values + offset_north, + crs=elev.crs), + data={z_name: elev[z_name].values + self._meta["vshift"]}) - return dem_copy + return applied_epc class GradientDescending(AffineCoreg): @@ -1078,17 +1105,20 @@ def __init__( def _fit_rst_pts( self, - ref_elev: pd.DataFrame, - tba_elev: RasterType, + ref_elev: gpd.GeoDataFrame | NDArrayf, + tba_elev: gpd.GeoDataFrame | NDArrayf, + inlier_mask: NDArrayb, + transform: rio.transform.Affine, + crs: rio.crs.CRS, + weights: NDArrayf | None = None, verbose: bool = False, z_name: str = "z", - weights: str | None = None, random_state: int = 42, **kwargs: Any, ) -> None: """Estimate the x/y/z offset between two DEMs. - :param ref_elev: the dataframe used as ref - :param tba_elev: the dem to be aligned + :param point_elev: the dataframe used as ref + :param rst_elev: the dem to be aligned :param z_name: the column name of dataframe used for elevation differencing :param weights: the column name of dataframe used for weight, should have the same length with z_name columns :param random_state: The random state of the subsampling. @@ -1096,28 +1126,44 @@ def _fit_rst_pts( if not _has_noisyopt: raise ValueError("Optional dependency needed. Install 'noisyopt'") + # Check which one is reference + if isinstance(ref_elev, gpd.GeoDataFrame): + point_elev = ref_elev + rst_elev = tba_elev + ref = "point" + else: + point_elev = tba_elev + rst_elev = ref_elev + ref = "raster" + + rst_elev = Raster.from_array(rst_elev, transform=transform, crs=crs) + # Perform downsampling if subsample != None - if self._meta["subsample"] and len(ref_elev) > self._meta["subsample"]: - ref_elev = ref_elev.sample(frac=self._meta["subsample"] / len(ref_elev), random_state=random_state).copy() + if self._meta["subsample"] and len(point_elev) > self._meta["subsample"]: + point_elev = point_elev.sample(frac=self._meta["subsample"] / len(point_elev), random_state=random_state).copy() else: - ref_elev = ref_elev.copy() + point_elev = point_elev.copy() - resolution = tba_elev.res[0] + bounds, resolution = _transform_to_bounds_and_res(ref_elev.shape, transform) # Assume that the coordinates represent the center of a theoretical pixel. # The raster sampling is done in the upper left corner, meaning all point have to be respectively shifted - ref_elev["E"] -= resolution / 2 - ref_elev["N"] += resolution / 2 - area_or_point = "Area" - old_aop = tba_elev.tags.get("AREA_OR_POINT", None) - tba_elev.tags["AREA_OR_POINT"] = area_or_point + # TODO: Should be a way to not duplicate this column and just feed it directly + point_elev["E"] = point_elev.geometry.x.values + point_elev["N"] = point_elev.geometry.y.values + point_elev["E"] -= resolution / 2 + point_elev["N"] += resolution / 2 + + area_or_point = "Area" + old_aop = rst_elev.tags.get("AREA_OR_POINT", None) + rst_elev.tags["AREA_OR_POINT"] = area_or_point if verbose: print("Running Gradient Descending Coreg - Zhihao (in preparation) ") if self._meta["subsample"]: - print("Running on downsampling. The length of the gdf:", len(ref_elev)) + print("Running on downsampling. The length of the gdf:", len(point_elev)) - elevation_difference = _residuals_df(tba_elev, ref_elev, (0, 0), 0, z_name=z_name) + elevation_difference = _residuals_df(rst_elev, point_elev, (0, 0), 0, z_name=z_name) nmad_old = nmad(elevation_difference) vshift = np.nanmedian(elevation_difference) print(" Statistics on initial dh:") @@ -1125,7 +1171,7 @@ def _fit_rst_pts( # start iteration, find the best shifting px def func_cost(x: tuple[float, float]) -> np.floating[Any]: - return nmad(_residuals_df(tba_elev, ref_elev, x, 0, z_name=z_name, weight=weights)) + return nmad(_residuals_df(rst_elev, point_elev, x, 0, z_name=z_name, weight=weights)) res = minimizeCompass( func_cost, @@ -1139,12 +1185,12 @@ def func_cost(x: tuple[float, float]) -> np.floating[Any]: ) # Send the best solution to find all results - elevation_difference = _residuals_df(tba_elev, ref_elev, (res.x[0], res.x[1]), 0, z_name=z_name) + elevation_difference = _residuals_df(rst_elev, point_elev, (res.x[0], res.x[1]), 0, z_name=z_name) if old_aop is None: - del tba_elev.tags["AREA_OR_POINT"] + del rst_elev.tags["AREA_OR_POINT"] else: - tba_elev.tags["AREA_OR_POINT"] = old_aop + rst_elev.tags["AREA_OR_POINT"] = old_aop # results statistics vshift = np.nanmedian(elevation_difference) @@ -1157,9 +1203,12 @@ def func_cost(x: tuple[float, float]) -> np.floating[Any]: print(" Statistics on coregistered dh:") print(f" Median = {vshift:.4f} - NMAD = {nmad_new:.4f}") - self._meta["offset_east_px"] = res.x[0] - self._meta["offset_north_px"] = res.x[1] - self._meta["vshift"] = vshift + offset_east = res.x[0] + offset_north = res.x[1] + + self._meta["offset_east_px"] = offset_east if ref == "point" else -offset_east + self._meta["offset_north_px"] = offset_north if ref == "point" else -offset_north + self._meta["vshift"] = vshift if ref == "point" else -vshift self._meta["resolution"] = resolution def _fit_rst_rst( @@ -1169,7 +1218,7 @@ def _fit_rst_rst( inlier_mask: NDArrayb, transform: rio.transform.Affine, crs: rio.crs.CRS, - weights: NDArrayf | None, + weights: NDArrayf | None = None, bias_vars: dict[str, NDArrayf] | None = None, verbose: bool = False, **kwargs: Any, @@ -1183,8 +1232,8 @@ def _fit_rst_rst( ref_elev["E"] = ref_elev.geometry.x ref_elev["N"] = ref_elev.geometry.y ref_elev.rename(columns={"b1": "z"}, inplace=True) - tba_elev = Raster.from_array(tba_elev, transform=transform, crs=crs, nodata=-9999.0) - self._fit_rst_pts(ref_elev=ref_elev, tba_elev=tba_elev, transform=transform, **kwargs) + self._fit_rst_pts(ref_elev=ref_elev, tba_elev=tba_elev, transform=transform, crs=crs, inlier_mask=inlier_mask, + **kwargs) def _to_matrix_func(self) -> NDArrayf: """Return a transformation matrix from the estimated offsets.""" diff --git a/xdem/coreg/base.py b/xdem/coreg/base.py index aee10c93..31e9dd94 100644 --- a/xdem/coreg/base.py +++ b/xdem/coreg/base.py @@ -206,7 +206,7 @@ def _mask_dataframe_by_dem(df: pd.DataFrame | NDArrayf, dem: RasterType) -> pd.D elif isinstance(df, np.ndarray): pts = df - ref_inlier = mask_raster.interp_points(pts, input_latlon=False, order=0) + ref_inlier = mask_raster.interp_points(pts) new_df = df[ref_inlier.astype(bool)].copy() return new_df, ref_inlier.astype(bool) @@ -390,11 +390,11 @@ def _preprocess_coreg_fit_raster_point( # TODO: Convert to point cloud once class is done if isinstance(raster_elev, gu.Raster): - ref_dem = raster_elev.data + rst_elev = raster_elev.data crs = raster_elev.crs transform = raster_elev.transform else: - ref_dem = raster_elev + rst_elev = raster_elev crs = crs transform = transform @@ -406,9 +406,9 @@ def _preprocess_coreg_fit_raster_point( # TODO: Convert to point cloud? # Convert geodataframe to vector - tba_dem = point_elev.to_crs(crs=crs) + point_elev = point_elev.to_crs(crs=crs) - return ref_dem, tba_dem, inlier_mask, transform, crs + return rst_elev, point_elev, inlier_mask, transform, crs def _preprocess_coreg_fit_point_point( reference_elev: gpd.GeoDataFrame, @@ -428,34 +428,43 @@ def _preprocess_coreg_fit( crs: rio.crs.CRS | None = None,): """Pre-processing and checks of fit for any input.""" - if not all(isinstance(dem, (np.ndarray, gu.Raster, gpd.GeoDataFrame)) for dem in (reference_elev, to_be_aligned_elev)): + if not all(isinstance(elev, (np.ndarray, gu.Raster, gpd.GeoDataFrame)) for elev in (reference_elev, to_be_aligned_elev)): raise ValueError("Input elevation data should be a raster, an array or a geodataframe.") # If both inputs are raster or arrays, reprojection on the same grid is needed for raster-raster methods - if all(isinstance(dem, (np.ndarray, gu.Raster)) for dem in (reference_elev, to_be_aligned_elev)): - ref_dem, tba_dem, inlier_mask, transform, crs = \ + if all(isinstance(elev, (np.ndarray, gu.Raster)) for elev in (reference_elev, to_be_aligned_elev)): + ref_elev, tba_elev, inlier_mask, transform, crs = \ _preprocess_coreg_fit_raster_raster(reference_dem=reference_elev, dem_to_be_aligned=to_be_aligned_elev, - inlier_mask=inlier_mask, transform=transform, crs=crs) + inlier_mask=inlier_mask, transform=transform, crs=crs) # If one input is raster, and the other is point, we reproject the point data to the same CRS and extract arrays elif any(isinstance(dem, (np.ndarray, gu.Raster)) for dem in (reference_elev, to_be_aligned_elev)): if isinstance(reference_elev, (np.ndarray, gu.Raster)): - raster_dem = reference_elev - point_dem = to_be_aligned_elev + raster_elev = reference_elev + point_elev = to_be_aligned_elev + ref = "raster" else: - raster_dem = to_be_aligned_elev - point_dem = reference_elev + raster_elev = to_be_aligned_elev + point_elev = reference_elev + ref = "point" + + rst_elev, point_elev, inlier_mask, transform, crs = \ + _preprocess_coreg_fit_raster_point(raster_elev=raster_elev, point_elev=point_elev, + inlier_mask=inlier_mask, transform=transform, crs=crs) - ref_dem, tba_dem, inlier_mask, transform, crs = \ - _preprocess_coreg_fit_raster_point(raster_elev=raster_dem, point_elev=point_dem, - inlier_mask=inlier_mask, transform=transform, crs=crs) + if ref == "raster": + ref_elev = rst_elev + tba_elev = point_elev + else: + ref_elev = point_elev + tba_elev = rst_elev # If both inputs are points, simply reproject to the same CRS else: - ref_dem, tba_dem = _preprocess_coreg_fit_point_point(reference_elev=reference_elev, + ref_elev, tba_elev = _preprocess_coreg_fit_point_point(reference_elev=reference_elev, to_be_aligned_elev=to_be_aligned_elev) - return ref_dem, tba_dem, inlier_mask, transform, crs + return ref_elev, tba_elev, inlier_mask, transform, crs def _preprocess_coreg_apply( elev: NDArrayf | MArrayf | RasterType | gpd.GeoDataFrame, @@ -670,7 +679,7 @@ def invert_matrix(matrix: NDArrayf) -> NDArrayf: # Deprecation warning from pytransform3d. Let's hope that is fixed in the near future. warnings.filterwarnings("ignore", message="`np.float` is a deprecated alias for the builtin `float`") - checked_matrix = pytransform3d.transformations.check_matrix(matrix) + checked_matrix = pytransform3d.transformations.check_transform(matrix) # Invert the transform if wanted. return pytransform3d.transformations.invert_transform(checked_matrix) @@ -848,10 +857,8 @@ def apply_matrix_pts( transformed_points += centroid # Finally, transform back to a new GeoDataFrame - transformed_epc = epc.copy() - transformed_epc.geometry.x.values = points[0] - transformed_epc.geometry.y.values = points[1] - transformed_epc[z_name].values = points[2] + transformed_epc = gpd.GeoDataFrame(geometry=gpd.points_from_xy(x=points[0, :], y=points[1, :], crs=epc.crs), + data={"z": points[2, :]}) return transformed_epc @@ -956,7 +963,7 @@ def is_affine(self) -> bool: try: # See if to_matrix() raises an error. self.to_matrix() self._is_affine = True - except (ValueError, NotImplementedError): + except (AttributeError, ValueError, NotImplementedError): self._is_affine = False return self._is_affine @@ -1065,7 +1072,7 @@ def fit( # TODO: Rename into "checks", because not much is preprocessed in the end # (has to happen in the _fit_func itself, whether for subsampling or # Pre-process the inputs, by reprojecting and converting to arrays - ref_dem, tba_dem, inlier_mask, transform, crs = _preprocess_coreg_fit( + ref_elev, tba_elev, inlier_mask, transform, crs = _preprocess_coreg_fit( reference_elev=reference_elev, to_be_aligned_elev=to_be_aligned_elev, inlier_mask=inlier_mask, @@ -1074,8 +1081,8 @@ def fit( ) main_args = { - "ref_elev": ref_dem, - "tba_elev": tba_dem, + "ref_elev": ref_elev, + "tba_elev": tba_elev, "inlier_mask": inlier_mask, "transform": transform, "crs": crs, @@ -1439,11 +1446,10 @@ def _apply_func(self, **kwargs: Any) -> tuple[np.ndarray | gpd.GeoDataFrame, aff # Apply the matrix around the centroid (if defined, otherwise just from the center). applied_elev = apply_matrix_rst( - dem=kwargs["elev"], - transform=kwargs["transform"], + dem=kwargs.pop("elev"), + transform=kwargs.pop("transform"), matrix=self.to_matrix(), - centroid=self._meta.get("centroid"), - **kwargs, + centroid=self._meta.get("centroid") ) out_transform = kwargs["transform"] else: @@ -1451,6 +1457,8 @@ def _apply_func(self, **kwargs: Any) -> tuple[np.ndarray | gpd.GeoDataFrame, aff # If input is a point else: + out_transform = None + # See if an _apply_pts_func exists try: applied_elev = self._apply_pts(**kwargs) @@ -1462,7 +1470,7 @@ def _apply_func(self, **kwargs: Any) -> tuple[np.ndarray | gpd.GeoDataFrame, aff applied_elev = apply_matrix_pts(epc=kwargs["elev"], matrix=self.to_matrix(), centroid=self._meta.get("centroid"), - z_name=kwargs["z_name"]) + z_name=kwargs.pop("z_name")) else: raise ValueError("Cannot transform, Coreg method is non-affine and has no implemented _apply_pts.") @@ -2014,10 +2022,16 @@ def to_points(self) -> NDArrayf: # meta["representative_col"]) x_coord, y_coord = meta["representative_x"], meta["representative_y"] - old_position = np.reshape([x_coord, y_coord, meta["representative_val"]], (1, 3)) + old_pos_arr = np.reshape([x_coord, y_coord, meta["representative_val"]], (1, 3)) + old_position = \ + gpd.GeoDataFrame(geometry=gpd.points_from_xy(x=old_pos_arr[:, 0], y=old_pos_arr[:, 1], crs=None), + data={"z": old_pos_arr[:, 2]}) + new_position = self.procstep.apply(old_position) + new_pos_arr = np.reshape([new_position.geometry.x.values, new_position.geometry.y.values, + new_position["z"].values], (1, 3)) - points = np.append(points, np.dstack((old_position, new_position)), axis=0) + points = np.append(points, np.dstack((old_pos_arr, new_pos_arr)), axis=0) return points @@ -2095,7 +2109,7 @@ def _apply_rst( bounds, resolution = _transform_to_bounds_and_res(elev.shape, transform) representative_height = np.nanmean(elev) - edges_source = np.array( + edges_source_arr = np.array( [ [bounds.left + resolution / 2, bounds.top - resolution / 2, representative_height], [bounds.right - resolution / 2, bounds.top - resolution / 2, representative_height], @@ -2103,8 +2117,14 @@ def _apply_rst( [bounds.right - resolution / 2, bounds.bottom + resolution / 2, representative_height], ] ) + edges_source = \ + gpd.GeoDataFrame(geometry=gpd.points_from_xy(x=edges_source_arr[:, 0], y=edges_source_arr[:, 1], crs=None), + data={"z": edges_source_arr[:, 2]}) + edges_dest = self.apply(edges_source) - edges = np.dstack((edges_source, edges_dest)) + edges_dest_arr = np.reshape([edges_dest.geometry.x.values, edges_dest.geometry.y.values, + edges_dest["z"].values], (1, 3)) + edges = np.dstack((edges_source_arr, edges_dest_arr)) all_points = np.append(points, edges, axis=0) From 2af3cb50e9d2ee7cfcbc3eaa3ebcb7a374640bab Mon Sep 17 00:00:00 2001 From: Romain Hugonnet Date: Sat, 2 Mar 2024 14:15:13 -0900 Subject: [PATCH 05/54] Incremental commit --- tests/test_coreg/test_affine.py | 6 +++--- tests/test_coreg/test_base.py | 12 ++++++------ xdem/coreg/base.py | 32 ++++++++++++++++++++------------ 3 files changed, 29 insertions(+), 21 deletions(-) diff --git a/tests/test_coreg/test_affine.py b/tests/test_coreg/test_affine.py index 876e445d..2cfe9b62 100644 --- a/tests/test_coreg/test_affine.py +++ b/tests/test_coreg/test_affine.py @@ -42,8 +42,8 @@ class TestAffineCoreg: ) # Create some 3D coordinates with Z coordinates being 0 to try the apply functions. points_arr = np.array([[1, 2, 3, 4], [1, 2, 3, 4], [0, 0, 0, 0]], dtype="float64").T - points = gpd.GeoDataFrame(geometry=gpd.points_from_xy(x=points_arr[0, :], y=points_arr[1, :], crs=ref.crs), - data={"z": points_arr[2, :]}) + points = gpd.GeoDataFrame(geometry=gpd.points_from_xy(x=points_arr[:, 0], y=points_arr[:, 1], crs=ref.crs), + data={"z": points_arr[:, 2]}) def test_from_classmethods(self) -> None: @@ -59,7 +59,7 @@ def test_from_classmethods(self) -> None: x_offset = 5 coreg_obj2 = AffineCoreg.from_translation(x_off=x_offset) transformed_points2 = coreg_obj2.apply(self.points) - assert np.array_equal(self.points[:, 0] + x_offset, transformed_points2[:, 0]) + assert np.array_equal(self.points.geometry.x.values + x_offset, transformed_points2.geometry.x.values) # Try to make a Coreg object from a nan translation (should fail). try: diff --git a/tests/test_coreg/test_base.py b/tests/test_coreg/test_base.py index 21b05b15..aa051ae3 100644 --- a/tests/test_coreg/test_base.py +++ b/tests/test_coreg/test_base.py @@ -49,8 +49,8 @@ class TestCoregClass: ) # Create some 3D coordinates with Z coordinates being 0 to try the apply functions. points_arr = np.array([[1, 2, 3, 4], [1, 2, 3, 4], [0, 0, 0, 0]], dtype="float64").T - points = gpd.GeoDataFrame(geometry=gpd.points_from_xy(x=points_arr[0, :], y=points_arr[1, :], crs=ref.crs), - data={"z": points_arr[2, :]}) + points = gpd.GeoDataFrame(geometry=gpd.points_from_xy(x=points_arr[:, 0], y=points_arr[:, 1], crs=ref.crs), + data={"z": points_arr[:, 2]}) def test_init(self) -> None: """Test instantiation of Coreg""" @@ -509,8 +509,8 @@ class TestCoregPipeline: ) # Create some 3D coordinates with Z coordinates being 0 to try the apply functions. points_arr = np.array([[1, 2, 3, 4], [1, 2, 3, 4], [0, 0, 0, 0]], dtype="float64").T - points = gpd.GeoDataFrame(geometry=gpd.points_from_xy(x=points_arr[0, :], y=points_arr[1, :], crs=ref.crs), - data={"z": points_arr[2, :]}) + points = gpd.GeoDataFrame(geometry=gpd.points_from_xy(x=points_arr[:, 0], y=points_arr[:, 1], crs=ref.crs), + data={"z": points_arr[:, 2]}) @pytest.mark.parametrize("coreg_class", [coreg.VerticalShift, coreg.ICP, coreg.NuthKaab]) # type: ignore def test_copy(self, coreg_class: Callable[[], Coreg]) -> None: @@ -734,8 +734,8 @@ class TestBlockwiseCoreg: ) # Create some 3D coordinates with Z coordinates being 0 to try the apply functions. points_arr = np.array([[1, 2, 3, 4], [1, 2, 3, 4], [0, 0, 0, 0]], dtype="float64").T - points = gpd.GeoDataFrame(geometry=gpd.points_from_xy(x=points_arr[0, :], y=points_arr[1, :], crs=ref.crs), - data={"z": points_arr[2, :]}) + points = gpd.GeoDataFrame(geometry=gpd.points_from_xy(x=points_arr[:, 0], y=points_arr[:, 1], crs=ref.crs), + data={"z": points_arr[:, 2]}) @pytest.mark.parametrize( "pipeline", [coreg.VerticalShift(), coreg.VerticalShift() + coreg.NuthKaab()] diff --git a/xdem/coreg/base.py b/xdem/coreg/base.py index 31e9dd94..ab11ec58 100644 --- a/xdem/coreg/base.py +++ b/xdem/coreg/base.py @@ -846,19 +846,20 @@ def apply_matrix_pts( matrix = invert_matrix(matrix) # First, get Nx3 array to pass to opencv - points = np.array([epc.geometry.x.values, epc.geometry.y.values, epc[z_name].values]) + points = np.array([epc.geometry.x.values, epc.geometry.y.values, epc[z_name].values]).T # Transform the points (around the centroid if it exists). if centroid is not None: points -= centroid transformed_points = cv2.perspectiveTransform(points.reshape(1, -1, 3), - matrix.squeeze()) + matrix).squeeze() if centroid is not None: transformed_points += centroid # Finally, transform back to a new GeoDataFrame - transformed_epc = gpd.GeoDataFrame(geometry=gpd.points_from_xy(x=points[0, :], y=points[1, :], crs=epc.crs), - data={"z": points[2, :]}) + transformed_epc = gpd.GeoDataFrame(geometry=gpd.points_from_xy(x=transformed_points[:, 0], + y=transformed_points[:, 1], crs=epc.crs), + data={"z": transformed_points[:, 2]}) return transformed_epc @@ -1086,6 +1087,7 @@ def fit( "inlier_mask": inlier_mask, "transform": transform, "crs": crs, + "z_name": z_name, "weights": weights, "verbose": verbose, } @@ -1445,13 +1447,14 @@ def _apply_func(self, **kwargs: Any) -> tuple[np.ndarray | gpd.GeoDataFrame, aff kwargs.pop("resample") # Need to removed before passing to apply_matrix # Apply the matrix around the centroid (if defined, otherwise just from the center). + transform = kwargs.pop("transform") applied_elev = apply_matrix_rst( dem=kwargs.pop("elev"), - transform=kwargs.pop("transform"), + transform=transform, matrix=self.to_matrix(), centroid=self._meta.get("centroid") ) - out_transform = kwargs["transform"] + out_transform = transform else: raise ValueError("Cannot transform, Coreg method is non-affine and has no implemented _apply_rst.") @@ -1616,11 +1619,12 @@ def fit( reference_elev: NDArrayf | MArrayf | RasterType, to_be_aligned_elev: NDArrayf | MArrayf | RasterType, inlier_mask: NDArrayb | Mask | None = None, - transform: rio.transform.Affine | None = None, - crs: rio.crs.CRS | None = None, bias_vars: dict[str, NDArrayf | MArrayf | RasterType] | None = None, weights: NDArrayf | None = None, subsample: float | int | None = None, + transform: rio.transform.Affine | None = None, + crs: rio.crs.CRS | None = None, + z_name: str = "z", verbose: bool = False, random_state: None | np.random.RandomState | np.random.Generator | int = None, **kwargs: Any, @@ -1663,13 +1667,14 @@ def fit( "inlier_mask": inlier_mask, "transform": out_transform, "crs": crs, + "z_name": z_name, "weights": weights, "verbose": verbose, "subsample": subsample, "random_state": random_state, } - main_args_apply = {"elev": tba_dem_mod, "transform": out_transform, "crs": crs} + main_args_apply = {"elev": tba_dem_mod, "transform": out_transform, "crs": crs, "z_name": z_name} # If non-affine method that expects a bias_vars argument if coreg._needs_vars: @@ -1693,6 +1698,7 @@ def _apply_rst( elev: NDArrayf, transform: rio.transform.Affine, crs: rio.crs.CRS, + z_name: str = "z", bias_vars: dict[str, NDArrayf] | None = None, **kwargs: Any, ) -> tuple[NDArrayf, rio.transform.Affine]: @@ -1702,7 +1708,7 @@ def _apply_rst( for i, coreg in enumerate(self.pipeline): - main_args_apply = {"elev": dem_mod, "transform": out_transform, "crs": crs} + main_args_apply = {"elev": dem_mod, "transform": out_transform, "crs": crs, "z_name": z_name} # If non-affine method that expects a bias_vars argument if coreg._needs_vars: @@ -1798,11 +1804,12 @@ def fit( reference_elev: NDArrayf | MArrayf | RasterType, to_be_aligned_elev: NDArrayf | MArrayf | RasterType, inlier_mask: NDArrayb | Mask | None = None, - transform: rio.transform.Affine | None = None, - crs: rio.crs.CRS | None = None, bias_vars: dict[str, NDArrayf | MArrayf | RasterType] | None = None, weights: NDArrayf | None = None, subsample: float | int | None = None, + transform: rio.transform.Affine | None = None, + crs: rio.crs.CRS | None = None, + z_name: str = "z", verbose: bool = False, random_state: None | np.random.RandomState | np.random.Generator | int = None, **kwargs: Any, @@ -1879,6 +1886,7 @@ def process(i: int) -> dict[str, Any] | BaseException | None: bias_vars=bias_vars, weights=weights, crs=crs, + z_name=z_name, subsample=subsample, random_state=random_state, verbose=verbose, From c5ddfeb51000cc480df6d1676449093d8da58c7b Mon Sep 17 00:00:00 2001 From: Romain Hugonnet Date: Sat, 2 Mar 2024 19:12:42 -0900 Subject: [PATCH 06/54] Incremental commit --- tests/test_coreg/test_base.py | 9 ++------- xdem/coreg/affine.py | 8 +++++++- xdem/coreg/base.py | 23 ++++++++++++++++------- xdem/coreg/biascorr.py | 13 +++++++++++++ 4 files changed, 38 insertions(+), 15 deletions(-) diff --git a/tests/test_coreg/test_base.py b/tests/test_coreg/test_base.py index aa051ae3..39d7e9a0 100644 --- a/tests/test_coreg/test_base.py +++ b/tests/test_coreg/test_base.py @@ -64,7 +64,6 @@ def test_init(self) -> None: @pytest.mark.parametrize("coreg_class", [coreg.VerticalShift, coreg.ICP, coreg.NuthKaab]) # type: ignore def test_copy(self, coreg_class: Callable[[], Coreg]) -> None: """Test that copying work expectedly (that no attributes still share references).""" - warnings.simplefilter("error") # Create a coreg instance and copy it. corr = coreg_class() @@ -148,7 +147,6 @@ def test_get_subsample_on_valid_mask(self, subsample: float | int) -> None: @pytest.mark.parametrize("coreg", all_coregs) # type: ignore def test_subsample(self, coreg: Callable) -> None: # type: ignore - warnings.simplefilter("error") # Check that default value is set properly coreg_full = coreg() @@ -360,7 +358,7 @@ def test_apply_resample(self, inputs: list[Any]) -> None: # Test it works with different resampling algorithms dem_coreg_resample = coreg_method.apply(tba_dem, resample=True, resampling=rio.warp.Resampling.nearest) dem_coreg_resample = coreg_method.apply(tba_dem, resample=True, resampling=rio.warp.Resampling.cubic) - with pytest.raises(ValueError, match="`resampling` must be a rio.warp.Resampling algorithm"): + with pytest.raises(ValueError, match="'None' is not a valid rasterio.enums.Resampling method.*"): dem_coreg_resample = coreg_method.apply(tba_dem, resample=True, resampling=None) @pytest.mark.parametrize( @@ -435,7 +433,6 @@ def test_coreg_raises(self, combination: tuple[str, str, str, str, str, str, str 6. The expected outcome of the test. 7. The error/warning message (if applicable) """ - warnings.simplefilter("error") ref_dem, tba_dem, transform, crs, testing_step, result, text = combination @@ -530,7 +527,6 @@ def test_copy(self, coreg_class: Callable[[], Coreg]) -> None: assert pipeline_copy.pipeline[0]._meta["vshift"] def test_pipeline(self) -> None: - warnings.simplefilter("error") # Create a pipeline from two coreg methods. pipeline = coreg.CoregPipeline([coreg.VerticalShift(), coreg.NuthKaab()]) @@ -637,7 +633,6 @@ def test_pipeline__errors(self) -> None: pipeline3.fit(**self.fit_params, bias_vars={"ncc": xdem.terrain.slope(self.ref)}) def test_pipeline_pts(self) -> None: - warnings.simplefilter("ignore") pipeline = coreg.NuthKaab() + coreg.GradientDescending() ref_points = self.ref.to_points(as_array=False, subsample=5000, pixel_offset="center").ds @@ -654,7 +649,7 @@ def test_pipeline_pts(self) -> None: assert pipeline.pipeline[0]._meta["offset_east_px"] != pipeline.pipeline[1]._meta["offset_east_px"] def test_coreg_add(self) -> None: - warnings.simplefilter("error") + # Test with a vertical shift of 4 vshift = 4 diff --git a/xdem/coreg/affine.py b/xdem/coreg/affine.py index 90e6e276..316477eb 100644 --- a/xdem/coreg/affine.py +++ b/xdem/coreg/affine.py @@ -308,6 +308,7 @@ def _fit_rst_rst( inlier_mask: NDArrayb, transform: rio.transform.Affine, crs: rio.crs.CRS, + z_name: str, weights: NDArrayf | None, bias_vars: dict[str, NDArrayf] | None = None, verbose: bool = False, @@ -367,6 +368,7 @@ def _fit_rst_rst( inlier_mask: NDArrayb, transform: rio.transform.Affine, crs: rio.crs.CRS, + z_name: str, weights: NDArrayf | None, bias_vars: dict[str, NDArrayf] | None = None, verbose: bool = False, @@ -480,6 +482,7 @@ def _fit_rst_rst( inlier_mask: NDArrayb, transform: rio.transform.Affine, crs: rio.crs.CRS, + z_name: str, weights: NDArrayf | None, bias_vars: dict[str, NDArrayf] | None = None, verbose: bool = False, @@ -643,6 +646,7 @@ def _fit_rst_rst( inlier_mask: NDArrayb, transform: rio.transform.Affine, crs: rio.crs.CRS, + z_name: str, weights: NDArrayf | None, bias_vars: dict[str, NDArrayf] | None = None, verbose: bool = False, @@ -734,6 +738,7 @@ def _fit_rst_rst( inlier_mask: NDArrayb, transform: rio.transform.Affine, crs: rio.crs.CRS, + z_name: str, weights: NDArrayf | None, bias_vars: dict[str, NDArrayf] | None = None, verbose: bool = False, @@ -1218,6 +1223,7 @@ def _fit_rst_rst( inlier_mask: NDArrayb, transform: rio.transform.Affine, crs: rio.crs.CRS, + z_name: str, weights: NDArrayf | None = None, bias_vars: dict[str, NDArrayf] | None = None, verbose: bool = False, @@ -1231,7 +1237,7 @@ def _fit_rst_rst( ) ref_elev["E"] = ref_elev.geometry.x ref_elev["N"] = ref_elev.geometry.y - ref_elev.rename(columns={"b1": "z"}, inplace=True) + ref_elev.rename(columns={"b1": z_name}, inplace=True) self._fit_rst_pts(ref_elev=ref_elev, tba_elev=tba_elev, transform=transform, crs=crs, inlier_mask=inlier_mask, **kwargs) diff --git a/xdem/coreg/base.py b/xdem/coreg/base.py index ab11ec58..0f0a741c 100644 --- a/xdem/coreg/base.py +++ b/xdem/coreg/base.py @@ -510,6 +510,8 @@ def _preprocess_coreg_apply( def _postprocess_coreg_apply_pts( applied_elev: gpd.GeoDataFrame, ) -> gpd.GeoDataFrame: + """Post-processing and checks of apply for point input.""" + # TODO: Convert CRS back if the CRS did not match the one of the fit? return applied_elev @@ -522,6 +524,8 @@ def _postprocess_coreg_apply_rst( resample: bool, resampling: rio.warp.Resampling | None = None, ) -> tuple[NDArrayf | gu.Raster, affine.Affine]: + """Post-processing and checks of apply for raster input.""" + # Ensure the dtype is OK applied_elev = applied_elev.astype("float32") @@ -539,23 +543,23 @@ def _postprocess_coreg_apply_rst( else: match_rst = elev applied_rst.reproject(match_rst, resampling=resampling) - applied_dem = applied_rst.data + applied_elev = applied_rst.data # Calculate final mask - final_mask = np.logical_or(~np.isfinite(applied_dem), applied_dem == nodata) + final_mask = np.logical_or(~np.isfinite(applied_elev), applied_elev == nodata) # If the DEM was a masked_array, copy the mask to the new DEM if isinstance(elev, (np.ma.masked_array, gu.Raster)): - applied_dem = np.ma.masked_array(applied_dem, mask=final_mask) # type: ignore + applied_elev = np.ma.masked_array(applied_elev, mask=final_mask) # type: ignore else: - applied_dem[final_mask] = np.nan + applied_elev[final_mask] = np.nan # If the input was a Raster, returns a Raster, else returns array and transform if isinstance(elev, gu.Raster): - out_dem = elev.from_array(applied_dem, out_transform, crs, nodata=elev.nodata) + out_dem = elev.from_array(applied_elev, out_transform, crs, nodata=elev.nodata) return out_dem, out_transform else: - return applied_dem, out_transform + return applied_elev, out_transform def _postprocess_coreg_apply( elev: NDArrayf | gu.Raster | gpd.GeoDataFrame, @@ -566,6 +570,7 @@ def _postprocess_coreg_apply( resample: bool, resampling: rio.warp.Resampling | None = None, ) -> tuple[NDArrayf | gpd.GeoDataFrame, affine.Affine]: + """Post-processing and checks of apply for any input.""" if isinstance(applied_elev, np.ndarray): applied_elev, out_transform = _postprocess_coreg_apply_rst(elev=elev, applied_elev=applied_elev, @@ -1171,7 +1176,7 @@ def apply( :param elev: Elevation to apply the transform to, either a DEM or an elevation point cloud. :param bias_vars: Only for some bias correction classes. 2D array of bias variables used. :param resample: If set to True, will reproject output Raster on the same grid as input. Otherwise, \ - only the transform might be updated and no resampling is done. + only the transform might be updated and no resampling is done. :param resampling: Resampling method if resample is used. Defaults to "bilinear". :param transform: Geotransform of the elevation, only if provided as 2D array. :param crs: CRS of elevation, only if provided as 2D array. @@ -1383,6 +1388,7 @@ def _fit_func( try: self._fit_rst_rst(**kwargs) # Otherwise, convert the tba raster to points and try raster-points + # TODO: This is also capturing other "NotImplementedError" for resampling and failing test_apply_resample[inputs4] except NotImplementedError: warnings.warn( f"No raster-raster method found for coregistration {self.__class__.__name__}, " @@ -1486,6 +1492,7 @@ def _fit_rst_rst(self, inlier_mask: NDArrayb, transform: rio.transform.Affine, crs: rio.crs.CRS, + z_name: str, weights: NDArrayf | None, bias_vars: dict[str, NDArrayf] | None = None, verbose: bool = False, @@ -1500,6 +1507,7 @@ def _fit_rst_pts(self, inlier_mask: NDArrayb, transform: rio.transform.Affine, crs: rio.crs.CRS, + z_name: str, weights: NDArrayf | None, bias_vars: dict[str, NDArrayf] | None = None, verbose: bool = False, @@ -1514,6 +1522,7 @@ def _fit_pts_pts(self, inlier_mask: NDArrayb, transform: rio.transform.Affine, crs: rio.crs.CRS, + z_name: str, weights: NDArrayf | None, bias_vars: dict[str, NDArrayf] | None = None, verbose: bool = False, diff --git a/xdem/coreg/biascorr.py b/xdem/coreg/biascorr.py index 4844cbd3..62897f59 100644 --- a/xdem/coreg/biascorr.py +++ b/xdem/coreg/biascorr.py @@ -146,6 +146,7 @@ def _fit_rst_rst( # type: ignore inlier_mask: NDArrayb, transform: rio.transform.Affine, # Never None thanks to Coreg.fit() pre-process crs: rio.crs.CRS, # Never None thanks to Coreg.fit() pre-process + z_name: str, bias_vars: None | dict[str, NDArrayf] = None, weights: None | NDArrayf = None, verbose: bool = False, @@ -420,6 +421,7 @@ def _fit_rst_rst( # type: ignore bias_vars: dict[str, NDArrayf], transform: rio.transform.Affine, # Never None thanks to Coreg.fit() pre-process crs: rio.crs.CRS, # Never None thanks to Coreg.fit() pre-process + z_name: str, weights: None | NDArrayf = None, verbose: bool = False, **kwargs, @@ -440,6 +442,7 @@ def _fit_rst_rst( # type: ignore bias_vars=bias_vars, transform=transform, crs=crs, + z_name=z_name, weights=weights, verbose=verbose, **kwargs, @@ -495,6 +498,7 @@ def _fit_rst_rst( # type: ignore bias_vars: dict[str, NDArrayf], transform: rio.transform.Affine, # Never None thanks to Coreg.fit() pre-process crs: rio.crs.CRS, # Never None thanks to Coreg.fit() pre-process + z_name: str, weights: None | NDArrayf = None, verbose: bool = False, **kwargs, @@ -514,6 +518,7 @@ def _fit_rst_rst( # type: ignore bias_vars=bias_vars, transform=transform, crs=crs, + z_name=z_name, weights=weights, verbose=verbose, **kwargs, @@ -571,6 +576,7 @@ def _fit_rst_rst( # type: ignore bias_vars: dict[str, NDArrayf], # Never None thanks to BiasCorr.fit() pre-process transform: rio.transform.Affine, # Never None thanks to Coreg.fit() pre-process crs: rio.crs.CRS, # Never None thanks to Coreg.fit() pre-process + z_name: str, weights: None | NDArrayf = None, verbose: bool = False, **kwargs, @@ -587,6 +593,7 @@ def _fit_rst_rst( # type: ignore bias_vars=bias_vars, transform=transform, crs=crs, + z_name=z_name, weights=weights, verbose=verbose, **kwargs, @@ -636,6 +643,7 @@ def _fit_rst_rst( # type: ignore inlier_mask: NDArrayb, transform: rio.transform.Affine, crs: rio.crs.CRS, + z_name: str, bias_vars: dict[str, NDArrayf] = None, weights: None | NDArrayf = None, verbose: bool = False, @@ -663,6 +671,7 @@ def _fit_rst_rst( # type: ignore bias_vars={"angle": x}, transform=transform, crs=crs, + z_name=z_name, weights=weights, verbose=verbose, **kwargs, @@ -747,6 +756,7 @@ def _fit_rst_rst( # type: ignore inlier_mask: NDArrayb, transform: rio.transform.Affine, crs: rio.crs.CRS, + z_name: str, bias_vars: dict[str, NDArrayf] = None, weights: None | NDArrayf = None, verbose: bool = False, @@ -769,6 +779,7 @@ def _fit_rst_rst( # type: ignore bias_vars={self._meta["terrain_attribute"]: attr}, transform=transform, crs=crs, + z_name=z_name, weights=weights, verbose=verbose, **kwargs, @@ -846,6 +857,7 @@ def _fit_rst_rst( # type: ignore inlier_mask: NDArrayb, transform: rio.transform.Affine, crs: rio.crs.CRS, + z_name: str, bias_vars: dict[str, NDArrayf] | None = None, weights: None | NDArrayf = None, verbose: bool = False, @@ -865,6 +877,7 @@ def _fit_rst_rst( # type: ignore bias_vars={"xx": xx, "yy": yy}, transform=transform, crs=crs, + z_name=z_name, weights=weights, verbose=verbose, p0=p0, From 1db5ddc9cba620f336a6e1d208221a47d7c16b43 Mon Sep 17 00:00:00 2001 From: Romain Hugonnet Date: Tue, 5 Mar 2024 14:52:18 -0900 Subject: [PATCH 07/54] Fix tests --- tests/test_coreg/test_affine.py | 4 +- tests/test_coreg/test_base.py | 11 ++-- xdem/coreg/affine.py | 38 ------------ xdem/coreg/base.py | 102 ++++++++++++++++++++++---------- 4 files changed, 80 insertions(+), 75 deletions(-) diff --git a/tests/test_coreg/test_affine.py b/tests/test_coreg/test_affine.py index 2cfe9b62..b133d05b 100644 --- a/tests/test_coreg/test_affine.py +++ b/tests/test_coreg/test_affine.py @@ -262,9 +262,9 @@ def test_nuth_kaab(self) -> None: transformed_points = nuth_kaab.apply(self.points) # Check that the x shift is close to the pixel_shift * image resolution - assert abs((transformed_points[0, 0] - self.points[0, 0]) - pixel_shift * self.ref.res[0]) < 0.1 + assert all(abs((transformed_points.geometry.x.values - self.points.geometry.x.values) - pixel_shift * self.ref.res[0]) < 0.1) # Check that the z shift is close to the original vertical shift. - assert abs((transformed_points[0, 2] - self.points[0, 2]) + vshift) < 0.1 + assert all(abs((transformed_points["z"].values - self.points["z"].values) + vshift) < 0.1) def test_tilt(self) -> None: diff --git a/tests/test_coreg/test_base.py b/tests/test_coreg/test_base.py index 39d7e9a0..db457ecc 100644 --- a/tests/test_coreg/test_base.py +++ b/tests/test_coreg/test_base.py @@ -336,7 +336,7 @@ def test_apply_resample(self, inputs: list[Any]) -> None: # If not implemented, should raise an error if not is_implemented: with pytest.raises(NotImplementedError, match="Option `resample=False` not implemented for coreg method *"): - dem_coreg_noresample = coreg_method.apply(tba_dem, resample=False) + coreg_method.apply(tba_dem, resample=False) return else: dem_coreg_resample = coreg_method.apply(tba_dem) @@ -356,10 +356,10 @@ def test_apply_resample(self, inputs: list[Any]) -> None: # assert np.count_nonzero(diff.data) == 0 # Test it works with different resampling algorithms - dem_coreg_resample = coreg_method.apply(tba_dem, resample=True, resampling=rio.warp.Resampling.nearest) - dem_coreg_resample = coreg_method.apply(tba_dem, resample=True, resampling=rio.warp.Resampling.cubic) + coreg_method.apply(tba_dem, resample=True, resampling=rio.warp.Resampling.nearest) + coreg_method.apply(tba_dem, resample=True, resampling=rio.warp.Resampling.cubic) with pytest.raises(ValueError, match="'None' is not a valid rasterio.enums.Resampling method.*"): - dem_coreg_resample = coreg_method.apply(tba_dem, resample=True, resampling=None) + coreg_method.apply(tba_dem, resample=True, resampling=None) @pytest.mark.parametrize( "combination", @@ -415,7 +415,8 @@ def test_apply_resample(self, inputs: list[Any]) -> None: "'crs' must be given if DEM is array-like.", ), ("dem1", "dem2", "dem2.transform", "None", "apply", "warns", "DEM .* overrides the given 'transform'"), - ("None", "None", "None", "None", "fit", "error", "Both DEMs need to be array-like"), + ("None", "None", "None", "None", "fit", "error", "Input elevation data should be a raster, " + "an array or a geodataframe."), ("dem1 + np.nan", "dem2", "None", "None", "fit", "error", "'reference_dem' had only NaNs"), ("dem1", "dem2 + np.nan", "None", "None", "fit", "error", "'dem_to_be_aligned' had only NaNs"), ], diff --git a/xdem/coreg/affine.py b/xdem/coreg/affine.py index 316477eb..14785937 100644 --- a/xdem/coreg/affine.py +++ b/xdem/coreg/affine.py @@ -301,44 +301,6 @@ def _to_matrix_func(self) -> NDArrayf: raise NotImplementedError("This should be implemented by subclassing") - def _fit_rst_rst( - self, - ref_elev: NDArrayf, - tba_elev: NDArrayf, - inlier_mask: NDArrayb, - transform: rio.transform.Affine, - crs: rio.crs.CRS, - z_name: str, - weights: NDArrayf | None, - bias_vars: dict[str, NDArrayf] | None = None, - verbose: bool = False, - **kwargs: Any, - ) -> None: - # FOR DEVELOPERS: This function needs to be implemented. - raise NotImplementedError("This step has to be implemented by subclassing.") - - def _apply_rst( - self, - elev: NDArrayf, - transform: rio.transform.Affine, - crs: rio.crs.CRS, - bias_vars: dict[str, NDArrayf] | None = None, - **kwargs: Any, - ) -> tuple[NDArrayf, rio.transform.Affine]: - # FOR DEVELOPERS: This function is only needed for non-rigid transforms. - raise NotImplementedError("This should have been implemented by subclassing") - - def _apply_pts( - self, - elev: gpd.GeoDataFrame, - z_name: str = "z", - bias_vars: dict[str, NDArrayf] | None = None, - **kwargs: Any, - ) -> gpd.GeoDataFrame: - - # FOR DEVELOPERS: This function is only needed for non-rigid transforms. - raise NotImplementedError("This should have been implemented by subclassing") - class VerticalShift(AffineCoreg): """ diff --git a/xdem/coreg/base.py b/xdem/coreg/base.py index 0f0a741c..97636e03 100644 --- a/xdem/coreg/base.py +++ b/xdem/coreg/base.py @@ -478,19 +478,19 @@ def _preprocess_coreg_apply( # If input is geodataframe if isinstance(elev, gpd.GeoDataFrame): elev_out = elev + new_transform = None + new_crs = None # If input is a raster or array else: # If input is raster if isinstance(elev, gu.Raster): - if transform is None: - transform = elev.transform - else: + if transform is not None: warnings.warn(f"DEM of type {type(elev)} overrides the given 'transform'") - if crs is None: - crs = elev.crs - else: + if crs is not None: warnings.warn(f"DEM of type {type(elev)} overrides the given 'crs'") + new_transform = elev.transform + new_crs = elev.crs # If input is an array else: @@ -498,6 +498,8 @@ def _preprocess_coreg_apply( raise ValueError("'transform' must be given if DEM is array-like.") if crs is None: raise ValueError("'crs' must be given if DEM is array-like.") + new_transform = transform + new_crs = crs # The array to provide the functions will be an ndarray with NaNs for masked out areas. elev_out, elev_mask = get_array_and_mask(elev) @@ -505,7 +507,7 @@ def _preprocess_coreg_apply( if np.all(elev_mask): raise ValueError("'dem' had only NaNs") - return elev_out, transform, crs + return elev_out, new_transform, new_crs def _postprocess_coreg_apply_pts( applied_elev: gpd.GeoDataFrame, @@ -537,13 +539,16 @@ def _postprocess_coreg_apply_rst( # Resample the array on the original grid if resample: + # Reproject the DEM from its out_transform onto the transform applied_rst = gu.Raster.from_array(applied_elev, out_transform, crs=crs, nodata=nodata) if not isinstance(elev, gu.Raster): match_rst = gu.Raster.from_array(elev, transform, crs=crs, nodata=nodata) else: match_rst = elev - applied_rst.reproject(match_rst, resampling=resampling) + applied_rst = applied_rst.reproject(match_rst, resampling=resampling) applied_elev = applied_rst.data + # Now that the raster data is reprojected, the new out_transform is set as the original transform + out_transform = transform # Calculate final mask final_mask = np.logical_or(~np.isfinite(applied_elev), applied_elev == nodata) @@ -857,7 +862,7 @@ def apply_matrix_pts( if centroid is not None: points -= centroid transformed_points = cv2.perspectiveTransform(points.reshape(1, -1, 3), - matrix).squeeze() + matrix)[0, :, :] # Select the first dimension that is one if centroid is not None: transformed_points += centroid @@ -872,6 +877,16 @@ def apply_matrix_pts( # Generic coregistration processing classes ########################################### +class NotImplementedCoregFit(NotImplementedError): + """ + Error subclass for not implemented coregistration fit methods; mainly to differentiate with NotImplementedError + """ + + +class NotImplementedCoregApply(NotImplementedError): + """ + Error subclass for not implemented coregistration fit methods; mainly to differentiate with NotImplementedError + """ class CoregDict(TypedDict, total=False): """ @@ -1245,19 +1260,19 @@ def residuals( """ # Apply the transformation to the dem to be aligned - aligned_dem = self.apply(to_be_aligned_elev, transform=transform, crs=crs)[0] + aligned_elev = self.apply(to_be_aligned_elev, transform=transform, crs=crs)[0] # Pre-process the inputs, by reprojecting and subsampling - ref_dem, align_dem, inlier_mask, transform, crs = _preprocess_coreg_fit( + ref_dem, align_elev, inlier_mask, transform, crs = _preprocess_coreg_fit( reference_elev=reference_elev, - to_be_aligned_elev=to_be_aligned_elev, + to_be_aligned_elev=aligned_elev, inlier_mask=inlier_mask, transform=transform, crs=crs, ) # Calculate the DEM difference - diff = ref_dem - align_dem + diff = ref_dem - align_elev # Sometimes, the float minimum (for float32 = -3.4028235e+38) is returned. This and inf should be excluded. full_mask = np.isfinite(diff) @@ -1388,8 +1403,7 @@ def _fit_func( try: self._fit_rst_rst(**kwargs) # Otherwise, convert the tba raster to points and try raster-points - # TODO: This is also capturing other "NotImplementedError" for resampling and failing test_apply_resample[inputs4] - except NotImplementedError: + except NotImplementedCoregFit: warnings.warn( f"No raster-raster method found for coregistration {self.__class__.__name__}, " f"trying raster-point method by converting to-be-aligned DEM to points.", @@ -1404,7 +1418,7 @@ def _fit_func( if rop == "r-p" or try_rp: try: self._fit_rst_pts(**kwargs) - except NotImplementedError: + except NotImplementedCoregFit: warnings.warn( f"No raster-point method found for coregistration {self.__class__.__name__}, " f"trying point-point method by converting all elevation data to points.", @@ -1419,16 +1433,16 @@ def _fit_func( if rop == "p-p" or try_pp: try: self._fit_pts_pts(**kwargs) - except NotImplementedError: + except NotImplementedCoregFit: if try_pp and try_rp: - raise NotImplementedError( + raise NotImplementedCoregFit( f"No raster-raster, raster-point or point-point method found for " f"coregistration {self.__class__.__name__}.") elif try_pp: - raise NotImplementedError( + raise NotImplementedCoregFit( f"No raster-point or point-point method found for coregistration {self.__class__.__name__}.") else: - raise NotImplementedError(f"No point-point method found for coregistration {self.__class__.__name__}.") + raise NotImplementedCoregFit(f"No point-point method found for coregistration {self.__class__.__name__}.") def _apply_func(self, **kwargs: Any) -> tuple[np.ndarray | gpd.GeoDataFrame, affine.Affine]: """Distribute to _apply_rst and _apply_pts based on input and method availability.""" @@ -1442,7 +1456,7 @@ def _apply_func(self, **kwargs: Any) -> tuple[np.ndarray | gpd.GeoDataFrame, aff applied_elev, out_transform = self._apply_rst(**kwargs) # pylint: disable=assignment-from-no-return # If it doesn't exist, use apply_matrix() - except NotImplementedError: + except NotImplementedCoregApply: if self.is_affine: # This only works for affine, however. @@ -1473,7 +1487,7 @@ def _apply_func(self, **kwargs: Any) -> tuple[np.ndarray | gpd.GeoDataFrame, aff applied_elev = self._apply_pts(**kwargs) # If it doesn't exist, use opencv's perspectiveTransform - except NotImplementedError: + except NotImplementedCoregApply: if self.is_affine: # This only works on it's rigid, however. applied_elev = apply_matrix_pts(epc=kwargs["elev"], @@ -1499,7 +1513,7 @@ def _fit_rst_rst(self, **kwargs: Any, ) -> None: # FOR DEVELOPERS: This function needs to be implemented by subclassing. - raise NotImplementedError("This step has to be implemented by subclassing.") + raise NotImplementedCoregFit("This step has to be implemented by subclassing.") def _fit_rst_pts(self, ref_elev: NDArrayf, @@ -1514,7 +1528,7 @@ def _fit_rst_pts(self, **kwargs: Any, ) -> None: # FOR DEVELOPERS: This function needs to be implemented by subclassing. - raise NotImplementedError("This step has to be implemented by subclassing.") + raise NotImplementedCoregFit("This step has to be implemented by subclassing.") def _fit_pts_pts(self, ref_elev: gpd.GeoDataFrame, @@ -1529,7 +1543,7 @@ def _fit_pts_pts(self, **kwargs: Any, ) -> None: # FOR DEVELOPERS: This function needs to be implemented by subclassing. - raise NotImplementedError("This step has to be implemented by subclassing.") + raise NotImplementedCoregFit("This step has to be implemented by subclassing.") def _apply_rst( self, @@ -1541,7 +1555,7 @@ def _apply_rst( ) -> tuple[NDArrayf, rio.transform.Affine]: # FOR DEVELOPERS: This function needs to be implemented by subclassing. - raise NotImplementedError("This should have been implemented by subclassing.") + raise NotImplementedCoregApply("This should have been implemented by subclassing.") def _apply_pts( self, @@ -1552,7 +1566,7 @@ def _apply_pts( ) -> gpd.GeoDataFrame: # FOR DEVELOPERS: This function needs to be implemented by subclassing. - raise NotImplementedError("This should have been implemented by subclassing.") + raise NotImplementedCoregApply("This should have been implemented by subclassing.") class CoregPipeline(Coreg): @@ -2119,7 +2133,7 @@ def _apply_rst( # Other option than resample=True is not implemented for this case if "resample" in kwargs and kwargs["resample"] is not True: - raise NotImplementedError() + raise NotImplementedError("Option `resample=False` not implemented for coreg method BlockwiseCoreg.") points = self.to_points() @@ -2139,8 +2153,8 @@ def _apply_rst( data={"z": edges_source_arr[:, 2]}) edges_dest = self.apply(edges_source) - edges_dest_arr = np.reshape([edges_dest.geometry.x.values, edges_dest.geometry.y.values, - edges_dest["z"].values], (1, 3)) + edges_dest_arr = np.array([edges_dest.geometry.x.values, edges_dest.geometry.y.values, + edges_dest["z"].values]).T edges = np.dstack((edges_source_arr, edges_dest_arr)) all_points = np.append(points, edges, axis=0) @@ -2155,6 +2169,34 @@ def _apply_rst( return warped_dem, transform + def _apply_pts(self, + elev: gpd.GeoDataFrame, + z_name: str = "z", + bias_vars: dict[str, NDArrayf] | None = None, + **kwargs: Any) -> gpd.GeoDataFrame: + """Apply the scaling model to a set of points.""" + points = self.to_points() + + new_coords = np.array([elev.geometry.x.values, elev.geometry.y.values, elev["z"].values]).T + + for dim in range(0, 3): + with warnings.catch_warnings(): + # ZeroDivisionErrors may happen when the transformation is empty (which is fine) + warnings.filterwarnings("ignore", message="ZeroDivisionError") + model = scipy.interpolate.Rbf( + points[:, 0, 0], + points[:, 1, 0], + points[:, dim, 1] - points[:, dim, 0], + function="linear", + ) + + new_coords[:, dim] += model(elev.geometry.x.values, elev.geometry.y.values) + + gdf_new_coords = \ + gpd.GeoDataFrame(geometry=gpd.points_from_xy(x=new_coords[:, 0], y=new_coords[:, 1], crs=None), + data={"z": new_coords[:, 2]}) + + return gdf_new_coords def warp_dem( dem: NDArrayf, From e1ed322ac79baae0c87256ff0c7fe4f1dff435a5 Mon Sep 17 00:00:00 2001 From: Romain Hugonnet Date: Wed, 6 Mar 2024 13:44:10 -0900 Subject: [PATCH 08/54] Add point-raster support to BiasCorr classes and write all tests --- tests/test_coreg/test_biascorr.py | 174 +++++++++++++-------- xdem/coreg/affine.py | 7 +- xdem/coreg/base.py | 26 +++- xdem/coreg/biascorr.py | 247 ++++++++++++++++++++++++++++-- 4 files changed, 367 insertions(+), 87 deletions(-) diff --git a/tests/test_coreg/test_biascorr.py b/tests/test_coreg/test_biascorr.py index eb4fd20e..7ad6feb1 100644 --- a/tests/test_coreg/test_biascorr.py +++ b/tests/test_coreg/test_biascorr.py @@ -6,6 +6,7 @@ import geoutils as gu import numpy as np +import geopandas as gpd import pytest import scipy @@ -35,14 +36,40 @@ class TestBiasCorr: ref, tba, outlines = load_examples() # Load example reference, to-be-aligned and mask. inlier_mask = ~outlines.create_mask(ref) - fit_params = dict( + # Check all possibilities supported by biascorr: + # Raster-Raster + fit_args_rst_rst = dict( reference_elev=ref, to_be_aligned_elev=tba, inlier_mask=inlier_mask, verbose=True, ) - # Create some 3D coordinates with Z coordinates being 0 to try the apply functions. - points = np.array([[1, 2, 3, 4], [1, 2, 3, 4], [0, 0, 0, 0]], dtype="float64").T + + # Convert DEMs to points with a bit of subsampling for speed-up + # TODO: Simplify once this GeoUtils issue is resolved: https://github.com/GlacioHack/geoutils/issues/499 + tba_pts = tba.to_points(subsample=50000, pixel_offset="ul").ds + tba_pts = tba_pts.rename(columns={"b1": "z"}) + + ref_pts = ref.to_points(subsample=50000, pixel_offset="ul").ds + ref_pts = ref_pts.rename(columns={"b1": "z"}) + + # Raster-Point + fit_args_rst_pts = dict( + reference_elev=ref, + to_be_aligned_elev=tba_pts, + inlier_mask=inlier_mask, + verbose=True, + ) + + # Point-Raster + fit_args_pts_rst = dict( + reference_elev=ref_pts, + to_be_aligned_elev=tba, + inlier_mask=inlier_mask, + verbose=True, + ) + + all_fit_args = [fit_args_rst_rst, fit_args_rst_pts, fit_args_pts_rst] def test_biascorr(self) -> None: """Test the parent class BiasCorr instantiation.""" @@ -135,6 +162,7 @@ def test_biascorr__errors(self) -> None: ): biascorr.BiasCorr(fit_or_bin="bin", bin_apply_method=1) # type: ignore + @pytest.mark.parametrize("fit_args", all_fit_args) # type: ignore @pytest.mark.parametrize( "fit_func", ("norder_polynomial", "nfreq_sumsin", lambda x, a, b: x[0] * a + b) ) # type: ignore @@ -144,24 +172,24 @@ def test_biascorr__errors(self) -> None: scipy.optimize.curve_fit, ], ) # type: ignore - def test_biascorr__fit_1d(self, fit_func, fit_optimizer, capsys) -> None: + def test_biascorr__fit_1d(self, fit_args, fit_func, fit_optimizer, capsys) -> None: """Test the _fit_func and apply_func methods of BiasCorr for the fit case (called by all its subclasses).""" # Create a bias correction object bcorr = biascorr.BiasCorr(fit_or_bin="fit", fit_func=fit_func, fit_optimizer=fit_optimizer) # Run fit using elevation as input variable - elev_fit_params = self.fit_params.copy() + elev_fit_args = fit_args.copy() bias_vars_dict = {"elevation": self.ref} - elev_fit_params.update({"bias_vars": bias_vars_dict}) + elev_fit_args.update({"bias_vars": bias_vars_dict}) # To speed up the tests, pass niter to basinhopping through "nfreq_sumsin" # Also fix random state for basinhopping if fit_func == "nfreq_sumsin": - elev_fit_params.update({"niter": 1}) + elev_fit_args.update({"niter": 1}) # Run with input parameter, and using only 100 subsamples for speed - bcorr.fit(**elev_fit_params, subsample=100, random_state=42) + bcorr.fit(**elev_fit_args, subsample=100, random_state=42) # Check that variable names are defined during fit assert bcorr._meta["bias_var_names"] == ["elevation"] @@ -169,6 +197,7 @@ def test_biascorr__fit_1d(self, fit_func, fit_optimizer, capsys) -> None: # Apply the correction bcorr.apply(elev=self.tba, bias_vars=bias_vars_dict) + @pytest.mark.parametrize("fit_args", [fit_args_rst_pts, fit_args_rst_rst]) # type: ignore @pytest.mark.parametrize( "fit_func", (polynomial_2d, lambda x, a, b, c, d: a * x[0] + b * x[1] + c**d) ) # type: ignore @@ -178,20 +207,20 @@ def test_biascorr__fit_1d(self, fit_func, fit_optimizer, capsys) -> None: scipy.optimize.curve_fit, ], ) # type: ignore - def test_biascorr__fit_2d(self, fit_func, fit_optimizer) -> None: + def test_biascorr__fit_2d(self, fit_args, fit_func, fit_optimizer) -> None: """Test the _fit_func and apply_func methods of BiasCorr for the fit case (called by all its subclasses).""" # Create a bias correction object bcorr = biascorr.BiasCorr(fit_or_bin="fit", fit_func=fit_func, fit_optimizer=fit_optimizer) # Run fit using elevation as input variable - elev_fit_params = self.fit_params.copy() + elev_fit_args = fit_args.copy() bias_vars_dict = {"elevation": self.ref, "slope": xdem.terrain.slope(self.ref)} - elev_fit_params.update({"bias_vars": bias_vars_dict}) + elev_fit_args.update({"bias_vars": bias_vars_dict}) # Run with input parameter, and using only 100 subsamples for speed # Passing p0 defines the number of parameters to solve for - bcorr.fit(**elev_fit_params, subsample=100, p0=[0, 0, 0, 0], random_state=42) + bcorr.fit(**elev_fit_args, subsample=100, p0=[0, 0, 0, 0], random_state=42) # Check that variable names are defined during fit assert bcorr._meta["bias_var_names"] == ["elevation", "slope"] @@ -199,21 +228,22 @@ def test_biascorr__fit_2d(self, fit_func, fit_optimizer) -> None: # Apply the correction bcorr.apply(elev=self.tba, bias_vars=bias_vars_dict) + @pytest.mark.parametrize("fit_args", all_fit_args) # type: ignore @pytest.mark.parametrize("bin_sizes", (10, {"elevation": 20}, {"elevation": (0, 500, 1000)})) # type: ignore @pytest.mark.parametrize("bin_statistic", [np.median, np.nanmean]) # type: ignore - def test_biascorr__bin_1d(self, bin_sizes, bin_statistic) -> None: + def test_biascorr__bin_1d(self, fit_args, bin_sizes, bin_statistic) -> None: """Test the _fit_func and apply_func methods of BiasCorr for the fit case (called by all its subclasses).""" # Create a bias correction object bcorr = biascorr.BiasCorr(fit_or_bin="bin", bin_sizes=bin_sizes, bin_statistic=bin_statistic) # Run fit using elevation as input variable - elev_fit_params = self.fit_params.copy() + elev_fit_args = fit_args.copy() bias_vars_dict = {"elevation": self.ref} - elev_fit_params.update({"bias_vars": bias_vars_dict}) + elev_fit_args.update({"bias_vars": bias_vars_dict}) # Run with input parameter, and using only 100 subsamples for speed - bcorr.fit(**elev_fit_params, subsample=1000, random_state=42) + bcorr.fit(**elev_fit_args, subsample=1000, random_state=42) # Check that variable names are defined during fit assert bcorr._meta["bias_var_names"] == ["elevation"] @@ -221,21 +251,22 @@ def test_biascorr__bin_1d(self, bin_sizes, bin_statistic) -> None: # Apply the correction bcorr.apply(elev=self.tba, bias_vars=bias_vars_dict) + @pytest.mark.parametrize("fit_args", all_fit_args) # type: ignore @pytest.mark.parametrize("bin_sizes", (10, {"elevation": (0, 500, 1000), "slope": (0, 20, 40)})) # type: ignore @pytest.mark.parametrize("bin_statistic", [np.median, np.nanmean]) # type: ignore - def test_biascorr__bin_2d(self, bin_sizes, bin_statistic) -> None: + def test_biascorr__bin_2d(self, fit_args, bin_sizes, bin_statistic) -> None: """Test the _fit_func and apply_func methods of BiasCorr for the fit case (called by all its subclasses).""" # Create a bias correction object bcorr = biascorr.BiasCorr(fit_or_bin="bin", bin_sizes=bin_sizes, bin_statistic=bin_statistic) # Run fit using elevation as input variable - elev_fit_params = self.fit_params.copy() + elev_fit_args = fit_args.copy() bias_vars_dict = {"elevation": self.ref, "slope": xdem.terrain.slope(self.ref)} - elev_fit_params.update({"bias_vars": bias_vars_dict}) + elev_fit_args.update({"bias_vars": bias_vars_dict}) # Run with input parameter, and using only 100 subsamples for speed - bcorr.fit(**elev_fit_params, subsample=10000, random_state=42) + bcorr.fit(**elev_fit_args, subsample=10000, random_state=42) # Check that variable names are defined during fit assert bcorr._meta["bias_var_names"] == ["elevation", "slope"] @@ -243,6 +274,7 @@ def test_biascorr__bin_2d(self, bin_sizes, bin_statistic) -> None: # Apply the correction bcorr.apply(elev=self.tba, bias_vars=bias_vars_dict) + @pytest.mark.parametrize("fit_args", all_fit_args) # type: ignore @pytest.mark.parametrize( "fit_func", ("norder_polynomial", "nfreq_sumsin", lambda x, a, b: x[0] * a + b) ) # type: ignore @@ -254,7 +286,7 @@ def test_biascorr__bin_2d(self, bin_sizes, bin_statistic) -> None: ) # type: ignore @pytest.mark.parametrize("bin_sizes", (10, {"elevation": np.arange(0, 1000, 100)})) # type: ignore @pytest.mark.parametrize("bin_statistic", [np.median, np.nanmean]) # type: ignore - def test_biascorr__bin_and_fit_1d(self, fit_func, fit_optimizer, bin_sizes, bin_statistic) -> None: + def test_biascorr__bin_and_fit_1d(self, fit_args, fit_func, fit_optimizer, bin_sizes, bin_statistic) -> None: """Test the _fit_func and apply_func methods of BiasCorr for the bin_and_fit case (called by all subclasses).""" # Create a bias correction object @@ -267,17 +299,17 @@ def test_biascorr__bin_and_fit_1d(self, fit_func, fit_optimizer, bin_sizes, bin_ ) # Run fit using elevation as input variable - elev_fit_params = self.fit_params.copy() + elev_fit_args = fit_args.copy() bias_vars_dict = {"elevation": self.ref} - elev_fit_params.update({"bias_vars": bias_vars_dict}) + elev_fit_args.update({"bias_vars": bias_vars_dict}) # To speed up the tests, pass niter to basinhopping through "nfreq_sumsin" # Also fix random state for basinhopping if fit_func == "nfreq_sumsin": - elev_fit_params.update({"niter": 1}) + elev_fit_args.update({"niter": 1}) # Run with input parameter, and using only 100 subsamples for speed - bcorr.fit(**elev_fit_params, subsample=100, random_state=42) + bcorr.fit(**elev_fit_args, subsample=100, random_state=42) # Check that variable names are defined during fit assert bcorr._meta["bias_var_names"] == ["elevation"] @@ -285,6 +317,7 @@ def test_biascorr__bin_and_fit_1d(self, fit_func, fit_optimizer, bin_sizes, bin_ # Apply the correction bcorr.apply(elev=self.tba, bias_vars=bias_vars_dict) + @pytest.mark.parametrize("fit_args", all_fit_args) # type: ignore @pytest.mark.parametrize( "fit_func", (polynomial_2d, lambda x, a, b, c, d: a * x[0] + b * x[1] + c**d) ) # type: ignore @@ -296,7 +329,7 @@ def test_biascorr__bin_and_fit_1d(self, fit_func, fit_optimizer, bin_sizes, bin_ ) # type: ignore @pytest.mark.parametrize("bin_sizes", (10, {"elevation": (0, 500, 1000), "slope": (0, 20, 40)})) # type: ignore @pytest.mark.parametrize("bin_statistic", [np.median, np.nanmean]) # type: ignore - def test_biascorr__bin_and_fit_2d(self, fit_func, fit_optimizer, bin_sizes, bin_statistic) -> None: + def test_biascorr__bin_and_fit_2d(self, fit_args, fit_func, fit_optimizer, bin_sizes, bin_statistic) -> None: """Test the _fit_func and apply_func methods of BiasCorr for the bin_and_fit case (called by all subclasses).""" # Create a bias correction object @@ -309,13 +342,13 @@ def test_biascorr__bin_and_fit_2d(self, fit_func, fit_optimizer, bin_sizes, bin_ ) # Run fit using elevation as input variable - elev_fit_params = self.fit_params.copy() + elev_fit_args = fit_args.copy() bias_vars_dict = {"elevation": self.ref, "slope": xdem.terrain.slope(self.ref)} - elev_fit_params.update({"bias_vars": bias_vars_dict}) + elev_fit_args.update({"bias_vars": bias_vars_dict}) # Run with input parameter, and using only 100 subsamples for speed # Passing p0 defines the number of parameters to solve for - bcorr.fit(**elev_fit_params, subsample=100, p0=[0, 0, 0, 0], random_state=42) + bcorr.fit(**elev_fit_args, subsample=100, p0=[0, 0, 0, 0], random_state=42) # Check that variable names are defined during fit assert bcorr._meta["bias_var_names"] == ["elevation", "slope"] @@ -323,7 +356,8 @@ def test_biascorr__bin_and_fit_2d(self, fit_func, fit_optimizer, bin_sizes, bin_ # Apply the correction bcorr.apply(elev=self.tba, bias_vars=bias_vars_dict) - def test_biascorr1d(self) -> None: + @pytest.mark.parametrize("fit_args", [fit_args_rst_pts, fit_args_rst_rst]) # type: ignore + def test_biascorr1d(self, fit_args) -> None: """ Test the subclass BiasCorr1D, which defines default parameters for 1D. The rest is already tested in test_biascorr. @@ -343,13 +377,13 @@ def test_biascorr1d(self) -> None: assert bcorr1d._meta["bin_statistic"] == np.nanmedian assert bcorr1d._meta["bin_apply_method"] == "linear" - elev_fit_params = self.fit_params.copy() + elev_fit_args = fit_args.copy() # Raise error when wrong number of parameters are passed with pytest.raises( ValueError, match="A single variable has to be provided through the argument 'bias_vars', " "got 2." ): bias_vars_dict = {"elevation": self.ref, "slope": xdem.terrain.slope(self.ref)} - bcorr1d.fit(**elev_fit_params, bias_vars=bias_vars_dict) + bcorr1d.fit(**elev_fit_args, bias_vars=bias_vars_dict) # Raise error when variables don't match with pytest.raises( @@ -360,9 +394,10 @@ def test_biascorr1d(self) -> None: ): bcorr1d2 = biascorr.BiasCorr1D(bias_var_names=["ncc"]) bias_vars_dict = {"elevation": self.ref} - bcorr1d2.fit(**elev_fit_params, bias_vars=bias_vars_dict) + bcorr1d2.fit(**elev_fit_args, bias_vars=bias_vars_dict) - def test_biascorr2d(self) -> None: + @pytest.mark.parametrize("fit_args", all_fit_args) # type: ignore + def test_biascorr2d(self, fit_args) -> None: """ Test the subclass BiasCorr2D, which defines default parameters for 2D. The rest is already tested in test_biascorr. @@ -382,13 +417,13 @@ def test_biascorr2d(self) -> None: assert bcorr2d._meta["bin_statistic"] == np.nanmedian assert bcorr2d._meta["bin_apply_method"] == "linear" - elev_fit_params = self.fit_params.copy() + elev_fit_args = fit_args.copy() # Raise error when wrong number of parameters are passed with pytest.raises( ValueError, match="Exactly two variables have to be provided through the argument " "'bias_vars', got 1." ): bias_vars_dict = {"elevation": self.ref} - bcorr2d.fit(**elev_fit_params, bias_vars=bias_vars_dict) + bcorr2d.fit(**elev_fit_args, bias_vars=bias_vars_dict) # Raise error when variables don't match with pytest.raises( @@ -400,7 +435,7 @@ def test_biascorr2d(self) -> None: ): bcorr2d2 = biascorr.BiasCorr2D(bias_var_names=["elevation", "ncc"]) bias_vars_dict = {"elevation": self.ref, "slope": xdem.terrain.slope(self.ref)} - bcorr2d2.fit(**elev_fit_params, bias_vars=bias_vars_dict) + bcorr2d2.fit(**elev_fit_args, bias_vars=bias_vars_dict) def test_directionalbias(self) -> None: """Test the subclass DirectionalBias.""" @@ -417,9 +452,10 @@ def test_directionalbias(self) -> None: # Check that variable names are defined during instantiation assert dirbias._meta["bias_var_names"] == ["angle"] + @pytest.mark.parametrize("fit_args", all_fit_args) # type: ignore @pytest.mark.parametrize("angle", [20, 90, 210]) # type: ignore @pytest.mark.parametrize("nb_freq", [1, 2, 3]) # type: ignore - def test_directionalbias__synthetic(self, angle, nb_freq) -> None: + def test_directionalbias__synthetic(self, fit_args, angle, nb_freq) -> None: """Test the subclass DirectionalBias with synthetic data.""" # Get along track @@ -463,23 +499,23 @@ def test_directionalbias__synthetic(self, angle, nb_freq) -> None: (10, 100), (0, 2 * np.pi), ] - dirbias.fit( - reference_elev=self.ref, - to_be_aligned_elev=bias_dem, - subsample=10000, - random_state=42, - bounds_amp_wave_phase=bounds, - niter=10, - ) - - # Check all parameters are the same within 10% + elev_fit_args = fit_args.copy() + if isinstance(elev_fit_args["to_be_aligned_elev"], gpd.GeoDataFrame): + bias_elev = bias_dem.to_points(subsample=50000, pixel_offset="ul").ds.rename(columns={"b1": "z"}) + else: + bias_elev = bias_dem + dirbias.fit(elev_fit_args["reference_elev"], to_be_aligned_elev=bias_elev, subsample=40000, random_state=42, + bounds_amp_wave_phase=bounds, niter=10) + + # Check all fit parameters are the same within 10% fit_params = dirbias._meta["fit_params"] assert np.shape(fit_params) == np.shape(params) assert np.allclose(params, fit_params, rtol=0.1) # Run apply and check that 99% of the variance was corrected corrected_dem = dirbias.apply(bias_dem) - assert np.nanvar(corrected_dem - self.ref) < 0.01 * np.nanvar(synthetic_bias) + # Need to standardize by the synthetic bias spread to avoid huge/small values close to infinity + assert np.nanvar((corrected_dem - self.ref) / np.nanstd(synthetic_bias)) < 0.01 def test_deramp(self) -> None: """Test the subclass Deramp.""" @@ -496,8 +532,9 @@ def test_deramp(self) -> None: # Check that variable names are defined during instantiation assert deramp._meta["bias_var_names"] == ["xx", "yy"] + @pytest.mark.parametrize("fit_args", all_fit_args) # type: ignore @pytest.mark.parametrize("order", [1, 2, 3, 4]) # type: ignore - def test_deramp__synthetic(self, order: int) -> None: + def test_deramp__synthetic(self, fit_args, order: int) -> None: """Run the deramp for varying polynomial orders using a synthetic elevation difference.""" # Get coordinates @@ -516,9 +553,14 @@ def test_deramp__synthetic(self, order: int) -> None: # Fit deramp = biascorr.Deramp(poly_order=order) - deramp.fit(reference_elev=self.ref, to_be_aligned_elev=bias_dem, subsample=10000, random_state=42) - - # Check high-order parameters are the same within 10% + elev_fit_args = fit_args.copy() + if isinstance(elev_fit_args["to_be_aligned_elev"], gpd.GeoDataFrame): + bias_elev = bias_dem.to_points(subsample=20000, pixel_offset="ul").ds.rename(columns={"b1": "z"}) + else: + bias_elev = bias_dem + deramp.fit(elev_fit_args["reference_elev"], to_be_aligned_elev=bias_elev, subsample=10000, random_state=42) + + # Check high-order fit parameters are the same within 10% fit_params = deramp._meta["fit_params"] assert np.shape(fit_params) == np.shape(params) assert np.allclose( @@ -527,7 +569,8 @@ def test_deramp__synthetic(self, order: int) -> None: # Run apply and check that 99% of the variance was corrected corrected_dem = deramp.apply(bias_dem) - assert np.nanvar(corrected_dem - self.ref) < 0.01 * np.nanvar(synthetic_bias) + # Need to standardize by the synthetic bias spread to avoid huge/small values close to infinity + assert np.nanvar((corrected_dem - self.ref) / np.nanstd(synthetic_bias)) < 0.01 def test_terrainbias(self) -> None: """Test the subclass TerrainBias.""" @@ -543,7 +586,8 @@ def test_terrainbias(self) -> None: assert tb._meta["bias_var_names"] == ["maximum_curvature"] - def test_terrainbias__synthetic(self) -> None: + @pytest.mark.parametrize("fit_args", all_fit_args) # type: ignore + def test_terrainbias__synthetic(self, fit_args) -> None: """Test the subclass TerrainBias.""" # Get maximum curvature @@ -567,18 +611,24 @@ def test_terrainbias__synthetic(self) -> None: bin_sizes={"maximum_curvature": bin_edges}, bin_apply_method="per_bin", ) - # We don't want to subsample here, otherwise it might be very hard to derive maximum curvature... - # TODO: Add the option to get terrain attribute before subsampling in the fit subclassing logic? - tb.fit(reference_elev=self.ref, to_be_aligned_elev=bias_dem, random_state=42) + elev_fit_args = fit_args.copy() + if isinstance(elev_fit_args["to_be_aligned_elev"], gpd.GeoDataFrame): + bias_elev = bias_dem.to_points(subsample=20000, pixel_offset="ul").ds.rename(columns={"b1": "z"}) + else: + bias_elev = bias_dem + tb.fit(elev_fit_args["reference_elev"], to_be_aligned_elev=bias_elev, subsample=10000, random_state=42, bias_vars={ + "maximum_curvature": maxc}) # Check high-order parameters are the same within 10% bin_df = tb._meta["bin_dataframe"] - assert [interval.left for interval in bin_df["maximum_curvature"].values] == list(bin_edges[:-1]) - assert [interval.right for interval in bin_df["maximum_curvature"].values] == list(bin_edges[1:]) - assert np.allclose(bin_df["nanmedian"], bias_per_bin, rtol=0.1) + assert [interval.left for interval in bin_df["maximum_curvature"].values] == pytest.approx(list(bin_edges[:-1])) + assert [interval.right for interval in bin_df["maximum_curvature"].values] == pytest.approx(list(bin_edges[1:])) + # assert np.allclose(bin_df["nanmedian"], bias_per_bin, rtol=0.1) # Run apply and check that 99% of the variance was corrected # (we override the bias_var "max_curv" with that of the ref_dem to have a 1 on 1 match with the synthetic bias, # otherwise it is derived from the bias_dem which gives slightly different results than with ref_dem) corrected_dem = tb.apply(bias_dem, bias_vars={"maximum_curvature": maxc}) - assert np.nanvar(corrected_dem - self.ref) < 0.01 * np.nanvar(synthetic_bias) + # Need to standardize by the synthetic bias spread to avoid huge/small values close to infinity + assert np.nanvar((corrected_dem - self.ref) / np.nanstd(synthetic_bias)) < 0.01 + diff --git a/xdem/coreg/affine.py b/xdem/coreg/affine.py index 14785937..06d22f01 100644 --- a/xdem/coreg/affine.py +++ b/xdem/coreg/affine.py @@ -673,10 +673,9 @@ def _to_matrix_func(self) -> NDArrayf: class NuthKaab(AffineCoreg): """ - Nuth and Kääb (2011) DEM coregistration. + Nuth and Kääb (2011) DEM coregistration: iterative registration of horizontal and vertical shift using slope/aspect. - Implemented after the paper: - https://doi.org/10.5194/tc-5-271-2011 + Implemented after the paper: https://doi.org/10.5194/tc-5-271-2011. """ def __init__(self, max_iterations: int = 10, offset_threshold: float = 0.05, subsample: int | float = 5e5) -> None: @@ -684,7 +683,7 @@ def __init__(self, max_iterations: int = 10, offset_threshold: float = 0.05, sub Instantiate a new Nuth and Kääb (2011) coregistration object. :param max_iterations: The maximum allowed iterations before stopping. - :param offset_threshold: The residual offset threshold after which to stop the iterations. + :param offset_threshold: The residual offset threshold after which to stop the iterations (in pixels). :param subsample: Subsample the input for speed-up. <1 is parsed as a fraction. >1 is a pixel count. """ self._meta: CoregDict diff --git a/xdem/coreg/base.py b/xdem/coreg/base.py index 97636e03..c1dd0118 100644 --- a/xdem/coreg/base.py +++ b/xdem/coreg/base.py @@ -404,6 +404,19 @@ def _preprocess_coreg_fit_raster_point( if crs is None: raise ValueError("'crs' must be given if both DEMs are array-like.") + # Make sure that the mask has an expected format. + if inlier_mask is not None: + if isinstance(inlier_mask, Mask): + inlier_mask = inlier_mask.data.filled(False).squeeze() + else: + inlier_mask = np.asarray(inlier_mask).squeeze() + assert inlier_mask.dtype == bool, f"Invalid mask dtype: '{inlier_mask.dtype}'. Expected 'bool'" + + if np.all(~inlier_mask): + raise ValueError("'inlier_mask' had no inliers.") + else: + inlier_mask = np.ones(np.shape(rst_elev), dtype=bool) + # TODO: Convert to point cloud? # Convert geodataframe to vector point_elev = point_elev.to_crs(crs=crs) @@ -1015,7 +1028,10 @@ def _get_subsample_on_valid_mask(self, valid_mask: NDArrayb, verbose: bool = Fal # We return a boolean mask of the subsample within valid values subsample_mask = np.zeros(np.shape(valid_mask), dtype=bool) - subsample_mask[indices[0], indices[1]] = True + if len(indices) == 2: + subsample_mask[indices[0], indices[1]] = True + else: + subsample_mask[indices[0]] = True else: # If no subsample is taken, use all valid values subsample_mask = valid_mask @@ -1409,9 +1425,9 @@ def _fit_func( f"trying raster-point method by converting to-be-aligned DEM to points.", UserWarning ) - tba_dem_pts = gu.Raster.from_array(data=kwargs["tba_dem"], transform=kwargs["transform"], + tba_elev_pts = gu.Raster.from_array(data=kwargs["tba_elev"], transform=kwargs["transform"], crs=kwargs["crs"]).to_points().ds - kwargs.update({"tba_dem": tba_dem_pts}) + kwargs.update({"tba_elev": tba_elev_pts}) try_rp = True # For raster-point @@ -1424,9 +1440,9 @@ def _fit_func( f"trying point-point method by converting all elevation data to points.", UserWarning ) - ref_dem_pts = gu.Raster.from_array(data=kwargs["ref_dem"], transform=kwargs["transform"], + ref_elev_pts = gu.Raster.from_array(data=kwargs["ref_elev"], transform=kwargs["transform"], crs=kwargs["crs"]).to_points().ds - kwargs.update({"ref_dem": ref_dem_pts}) + kwargs.update({"ref_elev": ref_elev_pts}) try_pp = True # For point-point diff --git a/xdem/coreg/biascorr.py b/xdem/coreg/biascorr.py index 62897f59..dfaaa9f5 100644 --- a/xdem/coreg/biascorr.py +++ b/xdem/coreg/biascorr.py @@ -7,6 +7,7 @@ import geoutils as gu import numpy as np import pandas as pd +import geopandas as gpd import rasterio as rio import scipy @@ -139,7 +140,7 @@ def __init__( self._is_affine = False self._needs_vars = True - def _fit_rst_rst( # type: ignore + def _fit_biascorr( # type: ignore self, ref_elev: NDArrayf, tba_elev: NDArrayf, @@ -152,7 +153,10 @@ def _fit_rst_rst( # type: ignore verbose: bool = False, **kwargs, ) -> None: - """Should only be called through subclassing.""" + """ + Generic fit method for all biascorr subclasses, expects either 2D arrays for rasters or 1D arrays for points. + Should only be called through subclassing. + """ # This is called by subclasses, so the bias_var should always be defined if bias_vars is None: @@ -318,6 +322,85 @@ def _fit_rst_rst( # type: ignore elif self._fit_or_bin in ["bin", "bin_and_fit"]: self._meta["bin_dataframe"] = df + def _fit_rst_rst(self, + ref_elev: NDArrayf, + tba_elev: NDArrayf, + inlier_mask: NDArrayb, + transform: rio.transform.Affine, + crs: rio.crs.CRS, + z_name: str, + weights: NDArrayf | None, + bias_vars: dict[str, NDArrayf] | None = None, + verbose: bool = False, + **kwargs: Any, + ) -> None: + """Should only be called through subclassing""" + + self._fit_biascorr(ref_elev=ref_elev, tba_elev=tba_elev, inlier_mask=inlier_mask, transform=transform, + crs=crs, z_name=z_name, weights=weights, bias_vars=bias_vars, verbose=verbose, **kwargs) + + def _fit_rst_pts( # type: ignore + self, + ref_elev: NDArrayf | gpd.GeoDataFrame, + tba_elev: NDArrayf | gpd.GeoDataFrame, + inlier_mask: NDArrayb, + transform: rio.transform.Affine, # Never None thanks to Coreg.fit() pre-process + crs: rio.crs.CRS, # Never None thanks to Coreg.fit() pre-process + z_name: str, + bias_vars: None | dict[str, NDArrayf] = None, + weights: None | NDArrayf = None, + verbose: bool = False, + **kwargs, + ) -> None: + """Should only be called through subclassing.""" + + # Get point reference to also convert inlier and bias vars + pts_elev = ref_elev if isinstance(ref_elev, gpd.GeoDataFrame) else tba_elev + rst_elev = ref_elev if not isinstance(ref_elev, gpd.GeoDataFrame) else tba_elev + + pts = np.array((pts_elev.geometry.x.values, pts_elev.geometry.y.values)).T + + valid_mask = np.logical_and.reduce( + (inlier_mask, np.isfinite(rst_elev), *(np.isfinite(var) for var in bias_vars.values())) + ) + # Convert inlier mask to points to be able to determine subsample later + inlier_rst = gu.Raster.from_array(data=valid_mask, transform=transform, crs=crs) + # The location needs to be surrounded by inliers, use floor to get 0 for at least one outlier + valid_pts = np.floor(inlier_rst.interp_points(pts)).astype(bool) # Interpolates boolean mask as integers + + # If there is a subsample, it needs to be done now on the point dataset to reduce later calculations + subsample_mask = self._get_subsample_on_valid_mask(valid_mask=valid_pts, verbose=verbose) + pts = pts[subsample_mask] + + # Now all points should be valid, we can pass an inlier mask completely true + inlier_pts_alltrue = np.ones(len(pts), dtype=bool) + + # Below, we derive 1D arrays for the rst_rst function to take over after interpolating to the point coordinates + + # Convert ref or tba depending on which is the point dataset + if isinstance(ref_elev, gpd.GeoDataFrame): + tba_rst = gu.Raster.from_array(data=tba_elev, transform=transform, crs=crs) + tba_elev_pts = tba_rst.interp_points(pts) + ref_elev_pts = ref_elev[z_name].values[subsample_mask] + else: + ref_rst = gu.Raster.from_array(data=ref_elev, transform=transform, crs=crs) + ref_elev_pts = ref_rst.interp_points(pts) + tba_elev_pts = tba_elev[z_name].values[subsample_mask] + + # Convert bias variables + if bias_vars is not None: + bias_vars_pts = {} + for var in bias_vars.keys(): + bias_vars_pts[var] = gu.Raster.from_array(bias_vars[var], transform=transform, crs=crs).interp_points(pts) + else: + bias_vars_pts = None + + # Send to raster-raster fit + self._fit_biascorr(ref_elev=ref_elev_pts, tba_elev=tba_elev_pts, inlier_mask=inlier_pts_alltrue, + bias_vars=bias_vars_pts, transform=transform, crs=crs, z_name=z_name, weights=weights, + verbose=verbose, **kwargs) + + def _apply_rst( # type: ignore self, elev: NDArrayf, @@ -413,7 +496,7 @@ def __init__( subsample, ) - def _fit_rst_rst( # type: ignore + def _fit_biascorr( # type: ignore self, ref_elev: NDArrayf, tba_elev: NDArrayf, @@ -435,7 +518,7 @@ def _fit_rst_rst( # type: ignore "got {}.".format(len(bias_vars)) ) - super()._fit_rst_rst( + super()._fit_biascorr( ref_elev=ref_elev, tba_elev=tba_elev, inlier_mask=inlier_mask, @@ -490,7 +573,7 @@ def __init__( subsample, ) - def _fit_rst_rst( # type: ignore + def _fit_biascorr( # type: ignore self, ref_elev: NDArrayf, tba_elev: NDArrayf, @@ -511,7 +594,7 @@ def _fit_rst_rst( # type: ignore ", got {}.".format(len(bias_vars)) ) - super()._fit_rst_rst( + super()._fit_biascorr( ref_elev=ref_elev, tba_elev=tba_elev, inlier_mask=inlier_mask, @@ -568,7 +651,7 @@ def __init__( subsample, ) - def _fit_rst_rst( # type: ignore + def _fit_biascorr( # type: ignore self, ref_elev: NDArrayf, tba_elev: NDArrayf, @@ -586,7 +669,7 @@ def _fit_rst_rst( # type: ignore if bias_vars is None or len(bias_vars) <= 2: raise ValueError('At least three variables have to be provided through the argument "bias_vars".') - super()._fit_rst_rst( + super()._fit_biascorr( ref_elev=ref_elev, tba_elev=tba_elev, inlier_mask=inlier_mask, @@ -664,7 +747,51 @@ def _fit_rst_rst( # type: ignore average_res = (transform[0] + abs(transform[4])) / 2 kwargs.update({"hop_length": average_res}) - super()._fit_rst_rst( + self._fit_biascorr( + ref_elev=ref_elev, + tba_elev=tba_elev, + inlier_mask=inlier_mask, + bias_vars={"angle": x}, + transform=transform, + crs=crs, + z_name=z_name, + weights=weights, + verbose=verbose, + **kwargs, + ) + + def _fit_rst_pts( # type: ignore + self, + ref_elev: NDArrayf | gpd.GeoDataFrame, + tba_elev: NDArrayf | gpd.GeoDataFrame, + inlier_mask: NDArrayb, + transform: rio.transform.Affine, + crs: rio.crs.CRS, + z_name: str, + bias_vars: dict[str, NDArrayf] = None, + weights: None | NDArrayf = None, + verbose: bool = False, + **kwargs, + ) -> None: + + # Figure out which data is raster format to get gridded attributes + rast_elev = ref_elev if not isinstance(ref_elev, gpd.GeoDataFrame) else tba_elev + + if verbose: + print("Estimating rotated coordinates.") + + x, _ = gu.raster.get_xy_rotated( + raster=gu.Raster.from_array(data=rast_elev, crs=crs, transform=transform), + along_track_angle=self._meta["angle"], + ) + + # Parameters dependent on resolution cannot be derived from the rotated x coordinates, need to be passed below + if "hop_length" not in kwargs: + # The hop length will condition jump in function values, need to be larger than average resolution + average_res = (transform[0] + abs(transform[4])) / 2 + kwargs.update({"hop_length": average_res}) + + super()._fit_rst_pts( ref_elev=ref_elev, tba_elev=tba_elev, inlier_mask=inlier_mask, @@ -763,16 +890,67 @@ def _fit_rst_rst( # type: ignore **kwargs, ) -> None: - # Derive terrain attribute - if self._meta["terrain_attribute"] == "elevation": - attr = ref_elev + # If already passed by user, pass along + if self._meta["terrain_attribute"] in bias_vars: + attr = bias_vars[self._meta["terrain_attribute"]] + + # If only declared during instantiation else: - attr = xdem.terrain.get_terrain_attribute( - dem=ref_elev, attribute=self._meta["terrain_attribute"], resolution=(transform[0], abs(transform[4])) + # Derive terrain attribute + if self._meta["terrain_attribute"] == "elevation": + attr = ref_elev + else: + attr = xdem.terrain.get_terrain_attribute( + dem=ref_elev, attribute=self._meta["terrain_attribute"], resolution=(transform[0], abs(transform[4])) ) # Run the parent function - super()._fit_rst_rst( + self._fit_biascorr( + ref_elev=ref_elev, + tba_elev=tba_elev, + inlier_mask=inlier_mask, + bias_vars={self._meta["terrain_attribute"]: attr}, + transform=transform, + crs=crs, + z_name=z_name, + weights=weights, + verbose=verbose, + **kwargs, + ) + + def _fit_rst_pts( # type: ignore + self, + ref_elev: NDArrayf | gpd.GeoDataFrame, + tba_elev: NDArrayf | gpd.GeoDataFrame, + inlier_mask: NDArrayb, + transform: rio.transform.Affine, + crs: rio.crs.CRS, + z_name: str, + bias_vars: dict[str, NDArrayf] = None, + weights: None | NDArrayf = None, + verbose: bool = False, + **kwargs, + ) -> None: + + # If already passed by user, pass along + if self._meta["terrain_attribute"] in bias_vars: + attr = bias_vars[self._meta["terrain_attribute"]] + + # If only declared during instantiation + else: + # Figure out which data is raster format to get gridded attributes + rast_elev = ref_elev if not isinstance(ref_elev, gpd.GeoDataFrame) else tba_elev + + # Derive terrain attribute + if self._meta["terrain_attribute"] == "elevation": + attr = rast_elev + else: + attr = xdem.terrain.get_terrain_attribute( + dem=rast_elev, attribute=self._meta["terrain_attribute"], resolution=(transform[0], abs(transform[4])) + ) + + # Run the parent function + super()._fit_rst_pts( ref_elev=ref_elev, tba_elev=tba_elev, inlier_mask=inlier_mask, @@ -870,7 +1048,44 @@ def _fit_rst_rst( # type: ignore # Coordinates (we don't need the actual ones, just array coordinates) xx, yy = np.meshgrid(np.arange(0, ref_elev.shape[1]), np.arange(0, ref_elev.shape[0])) - super()._fit_rst_rst( + self._fit_biascorr( + ref_elev=ref_elev, + tba_elev=tba_elev, + inlier_mask=inlier_mask, + bias_vars={"xx": xx, "yy": yy}, + transform=transform, + crs=crs, + z_name=z_name, + weights=weights, + verbose=verbose, + p0=p0, + **kwargs, + ) + + def _fit_rst_pts( # type: ignore + self, + ref_elev: NDArrayf | gpd.GeoDataFrame, + tba_elev: NDArrayf | gpd.GeoDataFrame, + inlier_mask: NDArrayb, + transform: rio.transform.Affine, + crs: rio.crs.CRS, + z_name: str, + bias_vars: dict[str, NDArrayf] | None = None, + weights: None | NDArrayf = None, + verbose: bool = False, + **kwargs, + ) -> None: + + # Figure out which data is raster format to get gridded attributes + rast_elev = ref_elev if not isinstance(ref_elev, gpd.GeoDataFrame) else tba_elev + + # The number of parameters in the first guess defines the polynomial order when calling np.polyval2d + p0 = np.ones(shape=((self._meta["poly_order"] + 1) * (self._meta["poly_order"] + 1))) + + # Coordinates (we don't need the actual ones, just array coordinates) + xx, yy = np.meshgrid(np.arange(0, rast_elev.shape[1]), np.arange(0, rast_elev.shape[0])) + + super()._fit_rst_pts( ref_elev=ref_elev, tba_elev=tba_elev, inlier_mask=inlier_mask, From 574db9a77e1e6834e68c0638d2a98ebbbbb85373 Mon Sep 17 00:00:00 2001 From: Romain Hugonnet Date: Wed, 6 Mar 2024 14:11:01 -0900 Subject: [PATCH 09/54] Fix last test issues --- tests/test_coreg/test_affine.py | 2 +- tests/test_coreg/test_base.py | 10 +++--- xdem/coreg/__init__.py | 2 +- xdem/coreg/base.py | 56 ++++++++++++++++++++++++++++----- xdem/coreg/biascorr.py | 4 +-- 5 files changed, 57 insertions(+), 17 deletions(-) diff --git a/tests/test_coreg/test_affine.py b/tests/test_coreg/test_affine.py index b133d05b..55607318 100644 --- a/tests/test_coreg/test_affine.py +++ b/tests/test_coreg/test_affine.py @@ -168,7 +168,7 @@ def test_gradientdescending(self, subsample: int = 10000, inlier_mask: bool = Tr ) assert gds._meta["offset_east_px"] == pytest.approx(-0.496000, rel=1e-1, abs=0.1) assert gds._meta["offset_north_px"] == pytest.approx(-0.1875, rel=1e-1, abs=0.1) - assert gds._meta["vshift"] == pytest.approx(-1.8730, rel=1e-1) + assert gds._meta["vshift"] == pytest.approx(-2.39, rel=1e-1) @pytest.mark.parametrize("shift_px", [(1, 1), (2, 2)]) # type: ignore @pytest.mark.parametrize("coreg_class", [coreg.NuthKaab, coreg.GradientDescending, coreg.ICP]) # type: ignore diff --git a/tests/test_coreg/test_base.py b/tests/test_coreg/test_base.py index db457ecc..ae66cefb 100644 --- a/tests/test_coreg/test_base.py +++ b/tests/test_coreg/test_base.py @@ -20,7 +20,7 @@ import xdem from xdem import coreg, examples, misc, spatialstats from xdem._typing import NDArrayf - from xdem.coreg.base import Coreg, apply_matrix_rst + from xdem.coreg.base import Coreg, _apply_matrix_rst def load_examples() -> tuple[RasterType, RasterType, Vector]: @@ -831,7 +831,7 @@ def test_apply_matrix() -> None: vshift = 5 matrix = np.diag(np.ones(4, float)) matrix[2, 3] = vshift - transformed_dem = apply_matrix_rst(ref_arr, ref.transform, matrix) + transformed_dem = _apply_matrix_rst(ref_arr, ref.transform, matrix) reverted_dem = transformed_dem - vshift # Check that the reverted DEM has the exact same values as the initial one @@ -850,7 +850,7 @@ def test_apply_matrix() -> None: matrix[0, 3] = pixel_shift * tba.res[0] matrix[2, 3] = -vshift - transformed_dem = apply_matrix_rst(shifted_dem, ref.transform, matrix, resampling="bilinear") + transformed_dem = _apply_matrix_rst(shifted_dem, ref.transform, matrix, resampling="bilinear") diff = np.asarray(ref_arr - transformed_dem) # Check that the median is very close to zero @@ -876,14 +876,14 @@ def rotation_matrix(rotation: float = 30) -> NDArrayf: np.mean([ref.bounds.top, ref.bounds.bottom]), ref.data.mean(), ) - rotated_dem = apply_matrix_rst(ref.data.squeeze(), ref.transform, rotation_matrix(rotation), centroid=centroid) + rotated_dem = _apply_matrix_rst(ref.data.squeeze(), ref.transform, rotation_matrix(rotation), centroid=centroid) # Make sure that the rotated DEM is way off, but is centered around the same approximate point. assert np.abs(np.nanmedian(rotated_dem - ref.data.data)) < 1 assert spatialstats.nmad(rotated_dem - ref.data.data) > 500 # Apply a rotation in the opposite direction unrotated_dem = ( - apply_matrix_rst(rotated_dem, ref.transform, rotation_matrix(-rotation * 0.99), centroid=centroid) + 4.0 + _apply_matrix_rst(rotated_dem, ref.transform, rotation_matrix(-rotation * 0.99), centroid=centroid) + 4.0 ) # TODO: Check why the 0.99 rotation and +4 vertical shift were introduced. diff = np.asarray(ref.data.squeeze() - unrotated_dem) diff --git a/xdem/coreg/__init__.py b/xdem/coreg/__init__.py index 7c630c3a..ed42d223 100644 --- a/xdem/coreg/__init__.py +++ b/xdem/coreg/__init__.py @@ -10,7 +10,7 @@ Tilt, VerticalShift, ) -from xdem.coreg.base import BlockwiseCoreg, Coreg, CoregPipeline, apply_matrix_rst # noqa +from xdem.coreg.base import BlockwiseCoreg, Coreg, CoregPipeline, apply_matrix, invert_matrix # noqa from xdem.coreg.biascorr import ( # noqa BiasCorr, BiasCorr1D, diff --git a/xdem/coreg/base.py b/xdem/coreg/base.py index c1dd0118..b4c258f7 100644 --- a/xdem/coreg/base.py +++ b/xdem/coreg/base.py @@ -707,7 +707,47 @@ def invert_matrix(matrix: NDArrayf) -> NDArrayf: return pytransform3d.transformations.invert_transform(checked_matrix) -def apply_matrix_rst( +def apply_matrix( + elev: gu.Raster | NDArrayf | gpd.GeoDataFrame, + matrix: NDArrayf, + invert: bool = False, + centroid: tuple[float, float, float] | None = None, + resampling: int | str = "bilinear", + transform: rio.transform.Affine = None, + z_name: str = "z", +) -> NDArrayf | gu.Raster | gpd.GeoDataFrame: + """ + Apply a 3D affine transformation matrix to a 3D elevation point cloud or 2.5D DEM. + + :param elev: Elevation point cloud or DEM to transform, either a 2D array (requires transform) or + geodataframe (requires z_name). + :param matrix: Affine (4x4) transformation matrix to apply to the DEM. + :param invert: Whether to invert the transformation matrix. + :param centroid: The X/Y/Z transformation centroid. Irrelevant for pure translations. Defaults to the midpoint (Z=0). + :param resampling: The resampling method to use, only for DEM 2.5D transformation. Can be `nearest`, `bilinear`, + `cubic` or an integer from 0-5. + :param transform: Geotransform of the DEM, only for DEM passed as 2D array. + :param z_name: Column name to use as elevation, only for point elevation data passed as geodataframe. + :return: + """ + + if isinstance(elev, gpd.GeoDataFrame): + return _apply_matrix_pts(epc=elev, matrix=matrix, invert=invert, centroid=centroid, z_name=z_name) + else: + if isinstance(elev, gu.Raster): + transform = elev.transform + dem = elev.data + else: + dem = elev + + # TODO: Add exception for translation to update only geotransform, maybe directly in apply_matrix? + applied_dem = _apply_matrix_rst(dem=dem, transform=transform, matrix=matrix, invert=invert, centroid=centroid, + resampling=resampling) + if isinstance(elev, gu.Raster): + applied_dem = gu.Raster.from_array(applied_dem, transform, elev.crs, elev.nodata) + return applied_dem + +def _apply_matrix_rst( dem: NDArrayf, transform: rio.transform.Affine, matrix: NDArrayf, @@ -845,7 +885,7 @@ def apply_matrix_rst( return transformed_dem -def apply_matrix_pts( +def _apply_matrix_pts( epc: gpd.GeoDataFrame, matrix: NDArrayf, invert: bool = False, @@ -859,7 +899,7 @@ def apply_matrix_pts( :param matrix: Affine (4x4) transformation matrix to apply to the DEM. :param invert: Whether to invert the transformation matrix. :param centroid: The X/Y/Z transformation centroid. Irrelevant for pure translations. Defaults to the midpoint (Z=0). - :param z_name: + :param z_name: Column name to use as elevation, only for point elevation data passed as geodataframe. :return: Transformed elevation point cloud. """ @@ -1484,7 +1524,7 @@ def _apply_func(self, **kwargs: Any) -> tuple[np.ndarray | gpd.GeoDataFrame, aff # Apply the matrix around the centroid (if defined, otherwise just from the center). transform = kwargs.pop("transform") - applied_elev = apply_matrix_rst( + applied_elev = _apply_matrix_rst( dem=kwargs.pop("elev"), transform=transform, matrix=self.to_matrix(), @@ -1506,10 +1546,10 @@ def _apply_func(self, **kwargs: Any) -> tuple[np.ndarray | gpd.GeoDataFrame, aff except NotImplementedCoregApply: if self.is_affine: # This only works on it's rigid, however. - applied_elev = apply_matrix_pts(epc=kwargs["elev"], - matrix=self.to_matrix(), - centroid=self._meta.get("centroid"), - z_name=kwargs.pop("z_name")) + applied_elev = _apply_matrix_pts(epc=kwargs["elev"], + matrix=self.to_matrix(), + centroid=self._meta.get("centroid"), + z_name=kwargs.pop("z_name")) else: raise ValueError("Cannot transform, Coreg method is non-affine and has no implemented _apply_pts.") diff --git a/xdem/coreg/biascorr.py b/xdem/coreg/biascorr.py index dfaaa9f5..4ae3c0fc 100644 --- a/xdem/coreg/biascorr.py +++ b/xdem/coreg/biascorr.py @@ -891,7 +891,7 @@ def _fit_rst_rst( # type: ignore ) -> None: # If already passed by user, pass along - if self._meta["terrain_attribute"] in bias_vars: + if bias_vars is not None and self._meta["terrain_attribute"] in bias_vars: attr = bias_vars[self._meta["terrain_attribute"]] # If only declared during instantiation @@ -933,7 +933,7 @@ def _fit_rst_pts( # type: ignore ) -> None: # If already passed by user, pass along - if self._meta["terrain_attribute"] in bias_vars: + if bias_vars is not None and self._meta["terrain_attribute"] in bias_vars: attr = bias_vars[self._meta["terrain_attribute"]] # If only declared during instantiation From 2f0183f18f18c82d4432a4efe09a56fba22fe5b9 Mon Sep 17 00:00:00 2001 From: Romain Hugonnet Date: Wed, 6 Mar 2024 14:24:53 -0900 Subject: [PATCH 10/54] Fix example --- examples/basic/plot_icp_coregistration.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/examples/basic/plot_icp_coregistration.py b/examples/basic/plot_icp_coregistration.py index 91145ccd..87c6d483 100644 --- a/examples/basic/plot_icp_coregistration.py +++ b/examples/basic/plot_icp_coregistration.py @@ -44,8 +44,7 @@ ) # This will apply the matrix along the center of the DEM -rotated_dem_data = xdem.coreg.apply_matrix_rst(dem.data.squeeze(), transform=dem.transform, matrix=rotation_matrix) -rotated_dem = xdem.DEM.from_array(rotated_dem_data, transform=dem.transform, crs=dem.crs, nodata=-9999) +rotated_dem = xdem.coreg.apply_matrix(dem, matrix=rotation_matrix) # %% # We can plot the difference between the original and rotated DEM. From ff1cc4c9eaa1aa1722b0919160a558d8386128f2 Mon Sep 17 00:00:00 2001 From: Romain Hugonnet Date: Wed, 6 Mar 2024 15:00:08 -0900 Subject: [PATCH 11/54] Linting --- tests/test_coreg/test_affine.py | 12 +- tests/test_coreg/test_base.py | 30 ++- tests/test_coreg/test_biascorr.py | 22 +- xdem/coreg/__init__.py | 8 +- xdem/coreg/affine.py | 119 +++++---- xdem/coreg/base.py | 422 +++++++++++++++++++----------- xdem/coreg/biascorr.py | 104 +++++--- 7 files changed, 463 insertions(+), 254 deletions(-) diff --git a/tests/test_coreg/test_affine.py b/tests/test_coreg/test_affine.py index 55607318..51d6d488 100644 --- a/tests/test_coreg/test_affine.py +++ b/tests/test_coreg/test_affine.py @@ -4,9 +4,9 @@ import copy import warnings +import geopandas as gpd import numpy as np import pytest -import geopandas as gpd import rasterio as rio from geoutils import Raster, Vector from geoutils.raster import RasterType @@ -42,8 +42,9 @@ class TestAffineCoreg: ) # Create some 3D coordinates with Z coordinates being 0 to try the apply functions. points_arr = np.array([[1, 2, 3, 4], [1, 2, 3, 4], [0, 0, 0, 0]], dtype="float64").T - points = gpd.GeoDataFrame(geometry=gpd.points_from_xy(x=points_arr[:, 0], y=points_arr[:, 1], crs=ref.crs), - data={"z": points_arr[:, 2]}) + points = gpd.GeoDataFrame( + geometry=gpd.points_from_xy(x=points_arr[:, 0], y=points_arr[:, 1], crs=ref.crs), data={"z": points_arr[:, 2]} + ) def test_from_classmethods(self) -> None: @@ -262,7 +263,10 @@ def test_nuth_kaab(self) -> None: transformed_points = nuth_kaab.apply(self.points) # Check that the x shift is close to the pixel_shift * image resolution - assert all(abs((transformed_points.geometry.x.values - self.points.geometry.x.values) - pixel_shift * self.ref.res[0]) < 0.1) + assert all( + abs((transformed_points.geometry.x.values - self.points.geometry.x.values) - pixel_shift * self.ref.res[0]) + < 0.1 + ) # Check that the z shift is close to the original vertical shift. assert all(abs((transformed_points["z"].values - self.points["z"].values) + vshift) < 0.1) diff --git a/tests/test_coreg/test_base.py b/tests/test_coreg/test_base.py index ae66cefb..59860beb 100644 --- a/tests/test_coreg/test_base.py +++ b/tests/test_coreg/test_base.py @@ -7,9 +7,9 @@ import warnings from typing import Any, Callable +import geopandas as gpd import geoutils as gu import numpy as np -import geopandas as gpd import pytest import rasterio as rio from geoutils import Raster, Vector @@ -49,8 +49,9 @@ class TestCoregClass: ) # Create some 3D coordinates with Z coordinates being 0 to try the apply functions. points_arr = np.array([[1, 2, 3, 4], [1, 2, 3, 4], [0, 0, 0, 0]], dtype="float64").T - points = gpd.GeoDataFrame(geometry=gpd.points_from_xy(x=points_arr[:, 0], y=points_arr[:, 1], crs=ref.crs), - data={"z": points_arr[:, 2]}) + points = gpd.GeoDataFrame( + geometry=gpd.points_from_xy(x=points_arr[:, 0], y=points_arr[:, 1], crs=ref.crs), data={"z": points_arr[:, 2]} + ) def test_init(self) -> None: """Test instantiation of Coreg""" @@ -415,8 +416,15 @@ def test_apply_resample(self, inputs: list[Any]) -> None: "'crs' must be given if DEM is array-like.", ), ("dem1", "dem2", "dem2.transform", "None", "apply", "warns", "DEM .* overrides the given 'transform'"), - ("None", "None", "None", "None", "fit", "error", "Input elevation data should be a raster, " - "an array or a geodataframe."), + ( + "None", + "None", + "None", + "None", + "fit", + "error", + "Input elevation data should be a raster, " "an array or a geodataframe.", + ), ("dem1 + np.nan", "dem2", "None", "None", "fit", "error", "'reference_dem' had only NaNs"), ("dem1", "dem2 + np.nan", "None", "None", "fit", "error", "'dem_to_be_aligned' had only NaNs"), ], @@ -507,8 +515,9 @@ class TestCoregPipeline: ) # Create some 3D coordinates with Z coordinates being 0 to try the apply functions. points_arr = np.array([[1, 2, 3, 4], [1, 2, 3, 4], [0, 0, 0, 0]], dtype="float64").T - points = gpd.GeoDataFrame(geometry=gpd.points_from_xy(x=points_arr[:, 0], y=points_arr[:, 1], crs=ref.crs), - data={"z": points_arr[:, 2]}) + points = gpd.GeoDataFrame( + geometry=gpd.points_from_xy(x=points_arr[:, 0], y=points_arr[:, 1], crs=ref.crs), data={"z": points_arr[:, 2]} + ) @pytest.mark.parametrize("coreg_class", [coreg.VerticalShift, coreg.ICP, coreg.NuthKaab]) # type: ignore def test_copy(self, coreg_class: Callable[[], Coreg]) -> None: @@ -730,8 +739,9 @@ class TestBlockwiseCoreg: ) # Create some 3D coordinates with Z coordinates being 0 to try the apply functions. points_arr = np.array([[1, 2, 3, 4], [1, 2, 3, 4], [0, 0, 0, 0]], dtype="float64").T - points = gpd.GeoDataFrame(geometry=gpd.points_from_xy(x=points_arr[:, 0], y=points_arr[:, 1], crs=ref.crs), - data={"z": points_arr[:, 2]}) + points = gpd.GeoDataFrame( + geometry=gpd.points_from_xy(x=points_arr[:, 0], y=points_arr[:, 1], crs=ref.crs), data={"z": points_arr[:, 2]} + ) @pytest.mark.parametrize( "pipeline", [coreg.VerticalShift(), coreg.VerticalShift() + coreg.NuthKaab()] @@ -883,7 +893,7 @@ def rotation_matrix(rotation: float = 30) -> NDArrayf: # Apply a rotation in the opposite direction unrotated_dem = ( - _apply_matrix_rst(rotated_dem, ref.transform, rotation_matrix(-rotation * 0.99), centroid=centroid) + 4.0 + _apply_matrix_rst(rotated_dem, ref.transform, rotation_matrix(-rotation * 0.99), centroid=centroid) + 4.0 ) # TODO: Check why the 0.99 rotation and +4 vertical shift were introduced. diff = np.asarray(ref.data.squeeze() - unrotated_dem) diff --git a/tests/test_coreg/test_biascorr.py b/tests/test_coreg/test_biascorr.py index 7ad6feb1..bf3cc192 100644 --- a/tests/test_coreg/test_biascorr.py +++ b/tests/test_coreg/test_biascorr.py @@ -4,9 +4,9 @@ import re import warnings +import geopandas as gpd import geoutils as gu import numpy as np -import geopandas as gpd import pytest import scipy @@ -504,8 +504,14 @@ def test_directionalbias__synthetic(self, fit_args, angle, nb_freq) -> None: bias_elev = bias_dem.to_points(subsample=50000, pixel_offset="ul").ds.rename(columns={"b1": "z"}) else: bias_elev = bias_dem - dirbias.fit(elev_fit_args["reference_elev"], to_be_aligned_elev=bias_elev, subsample=40000, random_state=42, - bounds_amp_wave_phase=bounds, niter=10) + dirbias.fit( + elev_fit_args["reference_elev"], + to_be_aligned_elev=bias_elev, + subsample=40000, + random_state=42, + bounds_amp_wave_phase=bounds, + niter=10, + ) # Check all fit parameters are the same within 10% fit_params = dirbias._meta["fit_params"] @@ -616,8 +622,13 @@ def test_terrainbias__synthetic(self, fit_args) -> None: bias_elev = bias_dem.to_points(subsample=20000, pixel_offset="ul").ds.rename(columns={"b1": "z"}) else: bias_elev = bias_dem - tb.fit(elev_fit_args["reference_elev"], to_be_aligned_elev=bias_elev, subsample=10000, random_state=42, bias_vars={ - "maximum_curvature": maxc}) + tb.fit( + elev_fit_args["reference_elev"], + to_be_aligned_elev=bias_elev, + subsample=10000, + random_state=42, + bias_vars={"maximum_curvature": maxc}, + ) # Check high-order parameters are the same within 10% bin_df = tb._meta["bin_dataframe"] @@ -631,4 +642,3 @@ def test_terrainbias__synthetic(self, fit_args) -> None: corrected_dem = tb.apply(bias_dem, bias_vars={"maximum_curvature": maxc}) # Need to standardize by the synthetic bias spread to avoid huge/small values close to infinity assert np.nanvar((corrected_dem - self.ref) / np.nanstd(synthetic_bias)) < 0.01 - diff --git a/xdem/coreg/__init__.py b/xdem/coreg/__init__.py index ed42d223..3776e6ba 100644 --- a/xdem/coreg/__init__.py +++ b/xdem/coreg/__init__.py @@ -10,7 +10,13 @@ Tilt, VerticalShift, ) -from xdem.coreg.base import BlockwiseCoreg, Coreg, CoregPipeline, apply_matrix, invert_matrix # noqa +from xdem.coreg.base import ( # noqa + BlockwiseCoreg, + Coreg, + CoregPipeline, + apply_matrix, + invert_matrix, +) from xdem.coreg.biascorr import ( # noqa BiasCorr, BiasCorr1D, diff --git a/xdem/coreg/affine.py b/xdem/coreg/affine.py index 06d22f01..31399352 100644 --- a/xdem/coreg/affine.py +++ b/xdem/coreg/affine.py @@ -13,15 +13,14 @@ _has_cv2 = True except ImportError: _has_cv2 = False -import numpy as np -import pandas as pd import geopandas as gpd +import numpy as np import rasterio as rio import scipy import scipy.interpolate import scipy.ndimage import scipy.optimize -from geoutils.raster import Raster, RasterType, get_array_and_mask +from geoutils.raster import Raster, get_array_and_mask from tqdm import trange from xdem._typing import NDArrayb, NDArrayf @@ -331,7 +330,7 @@ def _fit_rst_rst( transform: rio.transform.Affine, crs: rio.crs.CRS, z_name: str, - weights: NDArrayf | None, + weights: NDArrayf | None = None, bias_vars: dict[str, NDArrayf] | None = None, verbose: bool = False, **kwargs: Any, @@ -377,11 +376,11 @@ def _apply_rst( return elev + self._meta["vshift"], transform def _apply_pts( - self, - elev: gpd.GeoDataFrame, - z_name: str = "z", - bias_vars: dict[str, NDArrayf] | None = None, - **kwargs: Any, + self, + elev: gpd.GeoDataFrame, + z_name: str = "z", + bias_vars: dict[str, NDArrayf] | None = None, + **kwargs: Any, ) -> gpd.GeoDataFrame: """Apply the VerticalShift function to a set of points.""" @@ -445,7 +444,7 @@ def _fit_rst_rst( transform: rio.transform.Affine, crs: rio.crs.CRS, z_name: str, - weights: NDArrayf | None, + weights: NDArrayf | None = None, bias_vars: dict[str, NDArrayf] | None = None, verbose: bool = False, **kwargs: Any, @@ -469,25 +468,37 @@ def _fit_rst_rst( ) subsample_mask = self._get_subsample_on_valid_mask(valid_mask=valid_mask) - ref_pts = gpd.GeoDataFrame(geometry=gpd.points_from_xy(x=x_coords[subsample_mask], - y=y_coords[subsample_mask], - crs=None), - data={"z": ref_elev[subsample_mask], "nx": normal_east[subsample_mask], - "ny": normal_north[subsample_mask], "nz": normal_up[subsample_mask]}) + ref_pts = gpd.GeoDataFrame( + geometry=gpd.points_from_xy(x=x_coords[subsample_mask], y=y_coords[subsample_mask], crs=None), + data={ + "z": ref_elev[subsample_mask], + "nx": normal_east[subsample_mask], + "ny": normal_north[subsample_mask], + "nz": normal_up[subsample_mask], + }, + ) - self._fit_rst_pts(ref_elev=ref_pts, tba_elev=tba_elev, inlier_mask=inlier_mask, - transform=transform, crs=crs, verbose=verbose, z_name="z") + self._fit_rst_pts( + ref_elev=ref_pts, + tba_elev=tba_elev, + inlier_mask=inlier_mask, + transform=transform, + crs=crs, + verbose=verbose, + z_name="z", + ) def _fit_rst_pts( self, - ref_elev: gpd.GeoDataFrame | NDArrayf, - tba_elev: gpd.GeoDataFrame | NDArrayf, + ref_elev: NDArrayf | gpd.GeoDataFrame, + tba_elev: NDArrayf | gpd.GeoDataFrame, inlier_mask: NDArrayb, transform: rio.transform.Affine, crs: rio.crs.CRS, + z_name: str, weights: NDArrayf | None = None, + bias_vars: dict[str, NDArrayf] | None = None, verbose: bool = False, - z_name: str = "z", **kwargs: Any, ) -> None: @@ -542,7 +553,8 @@ def _fit_rst_pts( for key, raster in [("nx", normal_east), ("ny", normal_north), ("nz", normal_up)]: raster.tags["AREA_OR_POINT"] = "Area" point_elev[key] = raster.interp_points( - point_elev[["E", "N"]].values, shift_area_or_point=True, + point_elev[["E", "N"]].values, + shift_area_or_point=True, ) point_elev["E"] -= centroid[0] @@ -609,7 +621,7 @@ def _fit_rst_rst( transform: rio.transform.Affine, crs: rio.crs.CRS, z_name: str, - weights: NDArrayf | None, + weights: NDArrayf | None = None, bias_vars: dict[str, NDArrayf] | None = None, verbose: bool = False, **kwargs: Any, @@ -641,11 +653,11 @@ def _apply_rst( return elev + ramp, transform def _apply_pts( - self, - elev: gpd.GeoDataFrame, - z_name: str = "z", - bias_vars: dict[str, NDArrayf] | None = None, - **kwargs: Any, + self, + elev: gpd.GeoDataFrame, + z_name: str = "z", + bias_vars: dict[str, NDArrayf] | None = None, + **kwargs: Any, ) -> gpd.GeoDataFrame: """Apply the deramp function to a set of points.""" dem_copy = elev.copy() @@ -700,7 +712,7 @@ def _fit_rst_rst( transform: rio.transform.Affine, crs: rio.crs.CRS, z_name: str, - weights: NDArrayf | None, + weights: NDArrayf | None = None, bias_vars: dict[str, NDArrayf] | None = None, verbose: bool = False, **kwargs: Any, @@ -833,14 +845,16 @@ def _fit_rst_rst( def _fit_rst_pts( self, - ref_elev: gpd.GeoDataFrame | NDArrayf, - tba_elev: gpd.GeoDataFrame | NDArrayf, + ref_elev: NDArrayf | gpd.GeoDataFrame, + tba_elev: NDArrayf | gpd.GeoDataFrame, inlier_mask: NDArrayb, transform: rio.transform.Affine, crs: rio.crs.CRS, + z_name: str, weights: NDArrayf | None = None, + bias_vars: dict[str, NDArrayf] | None = None, verbose: bool = False, - z_name: str = "z", + **kwargs: Any, ) -> None: """ Estimate the x/y/z offset between a DEM and points cloud. @@ -889,7 +903,9 @@ def _fit_rst_pts( slope_r = rst_elev.copy(new_array=np.ma.masked_array(slope[None, :, :], mask=~np.isfinite(slope[None, :, :]))) slope_r.tags["AREA_OR_POINT"] = area_or_point - aspect_r = rst_elev.copy(new_array=np.ma.masked_array(aspect[None, :, :], mask=~np.isfinite(aspect[None, :, :]))) + aspect_r = rst_elev.copy( + new_array=np.ma.masked_array(aspect[None, :, :], mask=~np.isfinite(aspect[None, :, :])) + ) aspect_r.tags["AREA_OR_POINT"] = area_or_point # Initialise east and north pixel offset variables (these will be incremented up and down) @@ -1013,21 +1029,23 @@ def _apply_rst( return elev + vshift, updated_transform def _apply_pts( - self, - elev: gpd.GeoDataFrame, - z_name: str = "z", - bias_vars: dict[str, NDArrayf] | None = None, - **kwargs: Any, + self, + elev: gpd.GeoDataFrame, + z_name: str = "z", + bias_vars: dict[str, NDArrayf] | None = None, + **kwargs: Any, ) -> gpd.GeoDataFrame: """Apply the Nuth & Kaab shift to a set of points.""" offset_east = self._meta["offset_east_px"] * self._meta["resolution"] offset_north = self._meta["offset_north_px"] * self._meta["resolution"] - applied_epc = gpd.GeoDataFrame(geometry=gpd.points_from_xy(x=elev.geometry.x.values + offset_east, - y=elev.geometry.y.values + offset_north, - crs=elev.crs), - data={z_name: elev[z_name].values + self._meta["vshift"]}) + applied_epc = gpd.GeoDataFrame( + geometry=gpd.points_from_xy( + x=elev.geometry.x.values + offset_east, y=elev.geometry.y.values + offset_north, crs=elev.crs + ), + data={z_name: elev[z_name].values + self._meta["vshift"]}, + ) return applied_epc @@ -1071,15 +1089,15 @@ def __init__( def _fit_rst_pts( self, - ref_elev: gpd.GeoDataFrame | NDArrayf, - tba_elev: gpd.GeoDataFrame | NDArrayf, + ref_elev: NDArrayf | gpd.GeoDataFrame, + tba_elev: NDArrayf | gpd.GeoDataFrame, inlier_mask: NDArrayb, transform: rio.transform.Affine, crs: rio.crs.CRS, + z_name: str, weights: NDArrayf | None = None, + bias_vars: dict[str, NDArrayf] | None = None, verbose: bool = False, - z_name: str = "z", - random_state: int = 42, **kwargs: Any, ) -> None: """Estimate the x/y/z offset between two DEMs. @@ -1106,7 +1124,9 @@ def _fit_rst_pts( # Perform downsampling if subsample != None if self._meta["subsample"] and len(point_elev) > self._meta["subsample"]: - point_elev = point_elev.sample(frac=self._meta["subsample"] / len(point_elev), random_state=random_state).copy() + point_elev = point_elev.sample( + frac=self._meta["subsample"] / len(point_elev), random_state=self._meta["random_state"] + ).copy() else: point_elev = point_elev.copy() @@ -1137,7 +1157,7 @@ def _fit_rst_pts( # start iteration, find the best shifting px def func_cost(x: tuple[float, float]) -> np.floating[Any]: - return nmad(_residuals_df(rst_elev, point_elev, x, 0, z_name=z_name, weight=weights)) + return nmad(_residuals_df(rst_elev, point_elev, x, 0, z_name=z_name)) res = minimizeCompass( func_cost, @@ -1199,8 +1219,9 @@ def _fit_rst_rst( ref_elev["E"] = ref_elev.geometry.x ref_elev["N"] = ref_elev.geometry.y ref_elev.rename(columns={"b1": z_name}, inplace=True) - self._fit_rst_pts(ref_elev=ref_elev, tba_elev=tba_elev, transform=transform, crs=crs, inlier_mask=inlier_mask, - **kwargs) + self._fit_rst_pts( + ref_elev=ref_elev, tba_elev=tba_elev, transform=transform, crs=crs, inlier_mask=inlier_mask, **kwargs + ) def _to_matrix_func(self) -> NDArrayf: """Return a transformation matrix from the estimated offsets.""" diff --git a/xdem/coreg/base.py b/xdem/coreg/base.py index b4c258f7..8763a549 100644 --- a/xdem/coreg/base.py +++ b/xdem/coreg/base.py @@ -26,10 +26,10 @@ except ImportError: _has_cv2 = False import fiona +import geopandas as gpd import geoutils as gu import numpy as np import pandas as pd -import geopandas as gpd import rasterio as rio import rasterio.warp # pylint: disable=unused-import import scipy @@ -38,6 +38,7 @@ import scipy.optimize import skimage.transform from geoutils._typing import Number +from geoutils.misc import resampling_method_from_str from geoutils.raster import ( Mask, RasterType, @@ -46,12 +47,10 @@ subdivide_array, subsample_array, ) -from geoutils.misc import resampling_method_from_str from tqdm import tqdm from xdem._typing import MArrayf, NDArrayb, NDArrayf from xdem.spatialstats import nmad -from xdem.terrain import get_terrain_attribute try: import pytransform3d.transformations @@ -109,7 +108,7 @@ def _residuals_df( shift_px: tuple[float, float], dz: float, z_name: str, - weight: str = None, + weight_name: str = None, **kwargs: Any, ) -> pd.DataFrame: """ @@ -140,7 +139,7 @@ def _residuals_df( # ndimage return dem_h = scipy.ndimage.map_coordinates(arr_, [i, j], order=1, mode="nearest", **kwargs) - weight_ = df[weight] if weight else 1 + weight_ = df[weight_name] if weight_name else 1 return (df_shifted[z_name].values - dem_h) * weight_ @@ -297,6 +296,7 @@ def _mask_as_array(reference_raster: gu.Raster, mask: str | gu.Vector | gu.Raste return mask_array + def _preprocess_coreg_fit_raster_raster( reference_dem: NDArrayf | MArrayf | RasterType, dem_to_be_aligned: NDArrayf | MArrayf | RasterType, @@ -380,12 +380,14 @@ def _preprocess_coreg_fit_raster_raster( return ref_dem, tba_dem, inlier_mask, transform, crs + def _preprocess_coreg_fit_raster_point( raster_elev: NDArrayf | MArrayf | RasterType, point_elev: gpd.GeoDataFrame, inlier_mask: NDArrayb | Mask | None = None, transform: rio.transform.Affine | None = None, - crs: rio.crs.CRS | None = None,): + crs: rio.crs.CRS | None = None, +) -> tuple[NDArrayf, gpd.GeoDataFrame, NDArrayb, affine.Affine, rio.crs.CRS]: """Pre-processing and checks of fit for raster-point input.""" # TODO: Convert to point cloud once class is done @@ -423,9 +425,10 @@ def _preprocess_coreg_fit_raster_point( return rst_elev, point_elev, inlier_mask, transform, crs + def _preprocess_coreg_fit_point_point( - reference_elev: gpd.GeoDataFrame, - to_be_aligned_elev: gpd.GeoDataFrame): + reference_elev: gpd.GeoDataFrame, to_be_aligned_elev: gpd.GeoDataFrame +) -> tuple[gpd.GeoDataFrame, gpd.GeoDataFrame]: """Pre-processing and checks of fit for point-point input.""" ref_dem = reference_elev @@ -433,22 +436,32 @@ def _preprocess_coreg_fit_point_point( return ref_dem, tba_dem + def _preprocess_coreg_fit( reference_elev: NDArrayf | MArrayf | RasterType | gpd.GeoDataFrame, to_be_aligned_elev: NDArrayf | MArrayf | RasterType | gpd.GeoDataFrame, inlier_mask: NDArrayb | Mask | None = None, transform: rio.transform.Affine | None = None, - crs: rio.crs.CRS | None = None,): + crs: rio.crs.CRS | None = None, +) -> tuple[ + NDArrayf | gpd.GeoDataFrame, NDArrayf | gpd.GeoDataFrame, NDArrayb | None, affine.Affine | None, rio.crs.CRS | None +]: """Pre-processing and checks of fit for any input.""" - if not all(isinstance(elev, (np.ndarray, gu.Raster, gpd.GeoDataFrame)) for elev in (reference_elev, to_be_aligned_elev)): + if not all( + isinstance(elev, (np.ndarray, gu.Raster, gpd.GeoDataFrame)) for elev in (reference_elev, to_be_aligned_elev) + ): raise ValueError("Input elevation data should be a raster, an array or a geodataframe.") # If both inputs are raster or arrays, reprojection on the same grid is needed for raster-raster methods if all(isinstance(elev, (np.ndarray, gu.Raster)) for elev in (reference_elev, to_be_aligned_elev)): - ref_elev, tba_elev, inlier_mask, transform, crs = \ - _preprocess_coreg_fit_raster_raster(reference_dem=reference_elev, dem_to_be_aligned=to_be_aligned_elev, - inlier_mask=inlier_mask, transform=transform, crs=crs) + ref_elev, tba_elev, inlier_mask, transform, crs = _preprocess_coreg_fit_raster_raster( + reference_dem=reference_elev, + dem_to_be_aligned=to_be_aligned_elev, + inlier_mask=inlier_mask, + transform=transform, + crs=crs, + ) # If one input is raster, and the other is point, we reproject the point data to the same CRS and extract arrays elif any(isinstance(dem, (np.ndarray, gu.Raster)) for dem in (reference_elev, to_be_aligned_elev)): @@ -461,9 +474,9 @@ def _preprocess_coreg_fit( point_elev = reference_elev ref = "point" - rst_elev, point_elev, inlier_mask, transform, crs = \ - _preprocess_coreg_fit_raster_point(raster_elev=raster_elev, point_elev=point_elev, - inlier_mask=inlier_mask, transform=transform, crs=crs) + rst_elev, point_elev, inlier_mask, transform, crs = _preprocess_coreg_fit_raster_point( + raster_elev=raster_elev, point_elev=point_elev, inlier_mask=inlier_mask, transform=transform, crs=crs + ) if ref == "raster": ref_elev = rst_elev @@ -474,15 +487,18 @@ def _preprocess_coreg_fit( # If both inputs are points, simply reproject to the same CRS else: - ref_elev, tba_elev = _preprocess_coreg_fit_point_point(reference_elev=reference_elev, - to_be_aligned_elev=to_be_aligned_elev) + ref_elev, tba_elev = _preprocess_coreg_fit_point_point( + reference_elev=reference_elev, to_be_aligned_elev=to_be_aligned_elev + ) return ref_elev, tba_elev, inlier_mask, transform, crs + def _preprocess_coreg_apply( elev: NDArrayf | MArrayf | RasterType | gpd.GeoDataFrame, transform: rio.transform.Affine | None = None, - crs: rio.crs.CRS | None = None): + crs: rio.crs.CRS | None = None, +) -> tuple[NDArrayf | gpd.GeoDataFrame, affine.Affine, rio.crs.CRS]: """Pre-processing and checks of apply for any input.""" if not isinstance(elev, (np.ndarray, gu.Raster, gpd.GeoDataFrame)): @@ -522,22 +538,24 @@ def _preprocess_coreg_apply( return elev_out, new_transform, new_crs + def _postprocess_coreg_apply_pts( - applied_elev: gpd.GeoDataFrame, + applied_elev: gpd.GeoDataFrame, ) -> gpd.GeoDataFrame: """Post-processing and checks of apply for point input.""" # TODO: Convert CRS back if the CRS did not match the one of the fit? return applied_elev + def _postprocess_coreg_apply_rst( - elev: NDArrayf | gu.Raster, - applied_elev: NDArrayf, - transform: affine.Affine, - out_transform: affine.Affine, - crs: rio.crs.CRS, - resample: bool, - resampling: rio.warp.Resampling | None = None, + elev: NDArrayf | gu.Raster, + applied_elev: NDArrayf, + transform: affine.Affine, + out_transform: affine.Affine, + crs: rio.crs.CRS, + resample: bool, + resampling: rio.warp.Resampling | None = None, ) -> tuple[NDArrayf | gu.Raster, affine.Affine]: """Post-processing and checks of apply for raster input.""" @@ -579,23 +597,32 @@ def _postprocess_coreg_apply_rst( else: return applied_elev, out_transform + def _postprocess_coreg_apply( - elev: NDArrayf | gu.Raster | gpd.GeoDataFrame, - applied_elev: NDArrayf | gpd.GeoDataFrame, - transform: affine.Affine, - out_transform: affine.Affine, - crs: rio.crs.CRS, - resample: bool, - resampling: rio.warp.Resampling | None = None, + elev: NDArrayf | gu.Raster | gpd.GeoDataFrame, + applied_elev: NDArrayf | gpd.GeoDataFrame, + transform: affine.Affine, + out_transform: affine.Affine, + crs: rio.crs.CRS, + resample: bool, + resampling: rio.warp.Resampling | None = None, ) -> tuple[NDArrayf | gpd.GeoDataFrame, affine.Affine]: """Post-processing and checks of apply for any input.""" + # Define resampling + resampling = resampling if isinstance(resampling, rio.warp.Resampling) else resampling_method_from_str(resampling) + + # Distribute between raster and point apply methods if isinstance(applied_elev, np.ndarray): - applied_elev, out_transform = _postprocess_coreg_apply_rst(elev=elev, applied_elev=applied_elev, - transform=transform, crs=crs, - out_transform=out_transform, - resample=resample, - resampling=resampling) + applied_elev, out_transform = _postprocess_coreg_apply_rst( + elev=elev, + applied_elev=applied_elev, + transform=transform, + crs=crs, + out_transform=out_transform, + resample=resample, + resampling=resampling, + ) else: applied_elev = _postprocess_coreg_apply_pts(applied_elev) @@ -723,7 +750,8 @@ def apply_matrix( geodataframe (requires z_name). :param matrix: Affine (4x4) transformation matrix to apply to the DEM. :param invert: Whether to invert the transformation matrix. - :param centroid: The X/Y/Z transformation centroid. Irrelevant for pure translations. Defaults to the midpoint (Z=0). + :param centroid: The X/Y/Z transformation centroid. Irrelevant for pure translations. + Defaults to the midpoint (Z=0). :param resampling: The resampling method to use, only for DEM 2.5D transformation. Can be `nearest`, `bilinear`, `cubic` or an integer from 0-5. :param transform: Geotransform of the DEM, only for DEM passed as 2D array. @@ -741,12 +769,14 @@ def apply_matrix( dem = elev # TODO: Add exception for translation to update only geotransform, maybe directly in apply_matrix? - applied_dem = _apply_matrix_rst(dem=dem, transform=transform, matrix=matrix, invert=invert, centroid=centroid, - resampling=resampling) + applied_dem = _apply_matrix_rst( + dem=dem, transform=transform, matrix=matrix, invert=invert, centroid=centroid, resampling=resampling + ) if isinstance(elev, gu.Raster): applied_dem = gu.Raster.from_array(applied_dem, transform, elev.crs, elev.nodata) return applied_dem + def _apply_matrix_rst( dem: NDArrayf, transform: rio.transform.Affine, @@ -773,7 +803,8 @@ def _apply_matrix_rst( :param transform: Geotransform of the DEM. :param matrix: Affine (4x4) transformation matrix to apply to the DEM. :param invert: Whether to invert the transformation matrix. - :param centroid: The X/Y/Z transformation centroid. Irrelevant for pure translations. Defaults to the midpoint (Z=0). + :param centroid: The X/Y/Z transformation centroid. Irrelevant for pure translations. + Defaults to the midpoint (Z=0). :param resampling: The resampling method to use. Can be `nearest`, `bilinear`, `cubic` or an integer from 0-5. :param fill_max_search: Set to > 0 value to fill the DEM before applying the transformation, to avoid spreading\ gaps. The DEM will be filled with rasterio.fill.fillnodata with max_search_distance set to fill_max_search.\ @@ -885,6 +916,7 @@ def _apply_matrix_rst( return transformed_dem + def _apply_matrix_pts( epc: gpd.GeoDataFrame, matrix: NDArrayf, @@ -898,7 +930,8 @@ def _apply_matrix_pts( :param epc: Elevation point cloud. :param matrix: Affine (4x4) transformation matrix to apply to the DEM. :param invert: Whether to invert the transformation matrix. - :param centroid: The X/Y/Z transformation centroid. Irrelevant for pure translations. Defaults to the midpoint (Z=0). + :param centroid: The X/Y/Z transformation centroid. Irrelevant for pure translations. + Defaults to the midpoint (Z=0). :param z_name: Column name to use as elevation, only for point elevation data passed as geodataframe. :return: Transformed elevation point cloud. @@ -914,22 +947,26 @@ def _apply_matrix_pts( # Transform the points (around the centroid if it exists). if centroid is not None: points -= centroid - transformed_points = cv2.perspectiveTransform(points.reshape(1, -1, 3), - matrix)[0, :, :] # Select the first dimension that is one + transformed_points = cv2.perspectiveTransform(points.reshape(1, -1, 3), matrix)[ + 0, :, : + ] # Select the first dimension that is one if centroid is not None: transformed_points += centroid # Finally, transform back to a new GeoDataFrame - transformed_epc = gpd.GeoDataFrame(geometry=gpd.points_from_xy(x=transformed_points[:, 0], - y=transformed_points[:, 1], crs=epc.crs), - data={"z": transformed_points[:, 2]}) + transformed_epc = gpd.GeoDataFrame( + geometry=gpd.points_from_xy(x=transformed_points[:, 0], y=transformed_points[:, 1], crs=epc.crs), + data={"z": transformed_points[:, 2]}, + ) return transformed_epc + ########################################### # Generic coregistration processing classes ########################################### + class NotImplementedCoregFit(NotImplementedError): """ Error subclass for not implemented coregistration fit methods; mainly to differentiate with NotImplementedError @@ -941,6 +978,7 @@ class NotImplementedCoregApply(NotImplementedError): Error subclass for not implemented coregistration fit methods; mainly to differentiate with NotImplementedError """ + class CoregDict(TypedDict, total=False): """ Defining the type of each possible key in the metadata dictionary of Process classes. @@ -1197,6 +1235,7 @@ def apply( elev: MArrayf, bias_vars: dict[str, NDArrayf | MArrayf | RasterType] | None = None, resample: bool = True, + resampling: str | rio.warp.Resampling = "bilinear", transform: rio.transform.Affine | None = None, crs: rio.crs.CRS | None = None, z_name: str = "z", @@ -1210,6 +1249,7 @@ def apply( elev: NDArrayf, bias_vars: dict[str, NDArrayf | MArrayf | RasterType] | None = None, resample: bool = True, + resampling: str | rio.warp.Resampling = "bilinear", transform: rio.transform.Affine | None = None, crs: rio.crs.CRS | None = None, z_name: str = "z", @@ -1223,6 +1263,7 @@ def apply( elev: RasterType | gpd.GeoDataFrame, bias_vars: dict[str, NDArrayf | MArrayf | RasterType] | None = None, resample: bool = True, + resampling: str | rio.warp.Resampling = "bilinear", transform: rio.transform.Affine | None = None, crs: rio.crs.CRS | None = None, z_name: str = "z", @@ -1232,7 +1273,7 @@ def apply( def apply( self, - elev: RasterType | NDArrayf | MArrayf | gpd.GeoDataFrame, + elev: MArrayf | NDArrayf | RasterType | gpd.GeoDataFrame, bias_vars: dict[str, NDArrayf | MArrayf | RasterType] | None = None, resample: bool = True, resampling: str | rio.warp.Resampling = "bilinear", @@ -1240,7 +1281,7 @@ def apply( crs: rio.crs.CRS | None = None, z_name: str = "z", **kwargs: Any, - ) -> RasterType | tuple[NDArrayf, rio.transform.Affine] | tuple[MArrayf, rio.transform.Affine]: + ) -> RasterType | gpd.GeoDataFrame | tuple[NDArrayf, rio.transform.Affine] | tuple[MArrayf, rio.transform.Affine]: """ Apply the estimated transform to a DEM. @@ -1277,13 +1318,16 @@ def apply( # Call _apply_func to choose method depending on point/raster input and if specific apply method exists applied_elev, out_transform = self._apply_func(**main_args, **kwargs) - # Define resampling - resampling = resampling if isinstance(resampling, rio.warp.Resampling) else resampling_method_from_str(resampling) - # Post-process output depending on input type - applied_elev, out_transform = _postprocess_coreg_apply(elev=elev, applied_elev=applied_elev, transform=transform, - out_transform=out_transform, crs=crs, resample=resample, - resampling=resampling) + applied_elev, out_transform = _postprocess_coreg_apply( + elev=elev, + applied_elev=applied_elev, + transform=transform, + out_transform=out_transform, + crs=crs, + resample=resample, + resampling=resampling, + ) # Only return object if raster or geodataframe, also return transform if object was an array if isinstance(applied_elev, (gu.Raster, gpd.GeoDataFrame)): @@ -1292,14 +1336,14 @@ def apply( return applied_elev, out_transform def residuals( - self, - reference_elev: NDArrayf, - to_be_aligned_elev: NDArrayf, - inlier_mask: NDArrayb | None = None, - transform: rio.transform.Affine | None = None, - crs: rio.crs.CRS | None = None, - subsample: float | int = 1.0, - random_state: None | np.random.RandomState | np.random.Generator | int = None, + self, + reference_elev: NDArrayf, + to_be_aligned_elev: NDArrayf, + inlier_mask: NDArrayb | None = None, + transform: rio.transform.Affine | None = None, + crs: rio.crs.CRS | None = None, + subsample: float | int = 1.0, + random_state: None | np.random.RandomState | np.random.Generator | int = None, ) -> NDArrayf: """ Calculate the residual offsets (the difference) between two DEMs after applying the transformation. @@ -1463,10 +1507,13 @@ def _fit_func( warnings.warn( f"No raster-raster method found for coregistration {self.__class__.__name__}, " f"trying raster-point method by converting to-be-aligned DEM to points.", - UserWarning + UserWarning, + ) + tba_elev_pts = ( + gu.Raster.from_array(data=kwargs["tba_elev"], transform=kwargs["transform"], crs=kwargs["crs"]) + .to_points() + .ds ) - tba_elev_pts = gu.Raster.from_array(data=kwargs["tba_elev"], transform=kwargs["transform"], - crs=kwargs["crs"]).to_points().ds kwargs.update({"tba_elev": tba_elev_pts}) try_rp = True @@ -1478,10 +1525,13 @@ def _fit_func( warnings.warn( f"No raster-point method found for coregistration {self.__class__.__name__}, " f"trying point-point method by converting all elevation data to points.", - UserWarning + UserWarning, + ) + ref_elev_pts = ( + gu.Raster.from_array(data=kwargs["ref_elev"], transform=kwargs["transform"], crs=kwargs["crs"]) + .to_points() + .ds ) - ref_elev_pts = gu.Raster.from_array(data=kwargs["ref_elev"], transform=kwargs["transform"], - crs=kwargs["crs"]).to_points().ds kwargs.update({"ref_elev": ref_elev_pts}) try_pp = True @@ -1493,14 +1543,18 @@ def _fit_func( if try_pp and try_rp: raise NotImplementedCoregFit( f"No raster-raster, raster-point or point-point method found for " - f"coregistration {self.__class__.__name__}.") + f"coregistration {self.__class__.__name__}." + ) elif try_pp: raise NotImplementedCoregFit( - f"No raster-point or point-point method found for coregistration {self.__class__.__name__}.") + f"No raster-point or point-point method found for coregistration {self.__class__.__name__}." + ) else: - raise NotImplementedCoregFit(f"No point-point method found for coregistration {self.__class__.__name__}.") + raise NotImplementedCoregFit( + f"No point-point method found for coregistration {self.__class__.__name__}." + ) - def _apply_func(self, **kwargs: Any) -> tuple[np.ndarray | gpd.GeoDataFrame, affine.Affine]: + def _apply_func(self, **kwargs: Any) -> tuple[NDArrayf | gpd.GeoDataFrame, affine.Affine]: """Distribute to _apply_rst and _apply_pts based on input and method availability.""" # If input is a raster @@ -1519,7 +1573,8 @@ def _apply_func(self, **kwargs: Any) -> tuple[np.ndarray | gpd.GeoDataFrame, aff # In this case, resampling is necessary if not kwargs["resample"]: raise NotImplementedError( - f"Option `resample=False` not implemented for coreg method {self.__class__}") + f"Option `resample=False` not implemented for coreg method {self.__class__}" + ) kwargs.pop("resample") # Need to removed before passing to apply_matrix # Apply the matrix around the centroid (if defined, otherwise just from the center). @@ -1528,7 +1583,7 @@ def _apply_func(self, **kwargs: Any) -> tuple[np.ndarray | gpd.GeoDataFrame, aff dem=kwargs.pop("elev"), transform=transform, matrix=self.to_matrix(), - centroid=self._meta.get("centroid") + centroid=self._meta.get("centroid"), ) out_transform = transform else: @@ -1546,58 +1601,63 @@ def _apply_func(self, **kwargs: Any) -> tuple[np.ndarray | gpd.GeoDataFrame, aff except NotImplementedCoregApply: if self.is_affine: # This only works on it's rigid, however. - applied_elev = _apply_matrix_pts(epc=kwargs["elev"], - matrix=self.to_matrix(), - centroid=self._meta.get("centroid"), - z_name=kwargs.pop("z_name")) + applied_elev = _apply_matrix_pts( + epc=kwargs["elev"], + matrix=self.to_matrix(), + centroid=self._meta.get("centroid"), + z_name=kwargs.pop("z_name"), + ) else: raise ValueError("Cannot transform, Coreg method is non-affine and has no implemented _apply_pts.") return applied_elev, out_transform - def _fit_rst_rst(self, - ref_elev: NDArrayf, - tba_elev: NDArrayf, - inlier_mask: NDArrayb, - transform: rio.transform.Affine, - crs: rio.crs.CRS, - z_name: str, - weights: NDArrayf | None, - bias_vars: dict[str, NDArrayf] | None = None, - verbose: bool = False, - **kwargs: Any, - ) -> None: + def _fit_rst_rst( + self, + ref_elev: NDArrayf, + tba_elev: NDArrayf, + inlier_mask: NDArrayb, + transform: rio.transform.Affine, + crs: rio.crs.CRS, + z_name: str, + weights: NDArrayf | None = None, + bias_vars: dict[str, NDArrayf] | None = None, + verbose: bool = False, + **kwargs: Any, + ) -> None: # FOR DEVELOPERS: This function needs to be implemented by subclassing. raise NotImplementedCoregFit("This step has to be implemented by subclassing.") - def _fit_rst_pts(self, - ref_elev: NDArrayf, - tba_elev: NDArrayf, - inlier_mask: NDArrayb, - transform: rio.transform.Affine, - crs: rio.crs.CRS, - z_name: str, - weights: NDArrayf | None, - bias_vars: dict[str, NDArrayf] | None = None, - verbose: bool = False, - **kwargs: Any, - ) -> None: + def _fit_rst_pts( + self, + ref_elev: NDArrayf | gpd.GeoDataFrame, + tba_elev: NDArrayf | gpd.GeoDataFrame, + inlier_mask: NDArrayb, + transform: rio.transform.Affine, + crs: rio.crs.CRS, + z_name: str, + weights: NDArrayf | None = None, + bias_vars: dict[str, NDArrayf] | None = None, + verbose: bool = False, + **kwargs: Any, + ) -> None: # FOR DEVELOPERS: This function needs to be implemented by subclassing. raise NotImplementedCoregFit("This step has to be implemented by subclassing.") - def _fit_pts_pts(self, - ref_elev: gpd.GeoDataFrame, - tba_elev: gpd.GeoDataFrame, - inlier_mask: NDArrayb, - transform: rio.transform.Affine, - crs: rio.crs.CRS, - z_name: str, - weights: NDArrayf | None, - bias_vars: dict[str, NDArrayf] | None = None, - verbose: bool = False, - **kwargs: Any, - ) -> None: + def _fit_pts_pts( + self, + ref_elev: gpd.GeoDataFrame, + tba_elev: gpd.GeoDataFrame, + inlier_mask: NDArrayb, + transform: rio.transform.Affine, + crs: rio.crs.CRS, + z_name: str, + weights: NDArrayf | None = None, + bias_vars: dict[str, NDArrayf] | None = None, + verbose: bool = False, + **kwargs: Any, + ) -> None: # FOR DEVELOPERS: This function needs to be implemented by subclassing. raise NotImplementedCoregFit("This step has to be implemented by subclassing.") @@ -1695,8 +1755,8 @@ def _parse_bias_vars(self, step: int, bias_vars: dict[str, NDArrayf] | None) -> def fit( self: CoregType, - reference_elev: NDArrayf | MArrayf | RasterType, - to_be_aligned_elev: NDArrayf | MArrayf | RasterType, + reference_elev: NDArrayf | MArrayf | RasterType | gpd.GeoDataFrame, + to_be_aligned_elev: NDArrayf | MArrayf | RasterType | gpd.GeoDataFrame, inlier_mask: NDArrayb | Mask | None = None, bias_vars: dict[str, NDArrayf | MArrayf | RasterType] | None = None, weights: NDArrayf | None = None, @@ -1771,32 +1831,97 @@ def fit( return self - # TODO: Override parent method into an "apply()"? - def _apply_rst( + @overload + def apply( + self, + elev: MArrayf, + bias_vars: dict[str, NDArrayf | MArrayf | RasterType] | None = None, + resample: bool = True, + resampling: str | rio.warp.Resampling = "bilinear", + transform: rio.transform.Affine | None = None, + crs: rio.crs.CRS | None = None, + z_name: str = "z", + **kwargs: Any, + ) -> tuple[MArrayf, rio.transform.Affine]: + ... + + @overload + def apply( self, elev: NDArrayf, - transform: rio.transform.Affine, - crs: rio.crs.CRS, + bias_vars: dict[str, NDArrayf | MArrayf | RasterType] | None = None, + resample: bool = True, + resampling: str | rio.warp.Resampling = "bilinear", + transform: rio.transform.Affine | None = None, + crs: rio.crs.CRS | None = None, z_name: str = "z", - bias_vars: dict[str, NDArrayf] | None = None, **kwargs: Any, ) -> tuple[NDArrayf, rio.transform.Affine]: - """Apply the coregistration steps sequentially to a DEM.""" - dem_mod = elev.copy() + ... + + @overload + def apply( + self, + elev: RasterType | gpd.GeoDataFrame, + bias_vars: dict[str, NDArrayf | MArrayf | RasterType] | None = None, + resample: bool = True, + resampling: str | rio.warp.Resampling = "bilinear", + transform: rio.transform.Affine | None = None, + crs: rio.crs.CRS | None = None, + z_name: str = "z", + **kwargs: Any, + ) -> RasterType | gpd.GeoDataFrame: + ... + + def apply( + self, + elev: MArrayf | NDArrayf | RasterType | gpd.GeoDataFrame, + bias_vars: dict[str, NDArrayf | MArrayf | RasterType] | None = None, + resample: bool = True, + resampling: str | rio.warp.Resampling = "bilinear", + transform: rio.transform.Affine | None = None, + crs: rio.crs.CRS | None = None, + z_name: str = "z", + **kwargs: Any, + ) -> RasterType | gpd.GeoDataFrame | tuple[NDArrayf, rio.transform.Affine] | tuple[MArrayf, rio.transform.Affine]: + + # First step and preprocessing + if not self._fit_called and self._meta.get("matrix") is None: + raise AssertionError(".fit() does not seem to have been called yet") + + elev_array, transform, crs = _preprocess_coreg_apply(elev=elev, transform=transform, crs=crs) + + elev_mod = elev.copy() out_transform = copy.copy(transform) + # Apply each step of the coregistration for i, coreg in enumerate(self.pipeline): - main_args_apply = {"elev": dem_mod, "transform": out_transform, "crs": crs, "z_name": z_name} + main_args_apply = {"elev": elev_mod, "transform": out_transform, "crs": crs, "z_name": z_name} # If non-affine method that expects a bias_vars argument if coreg._needs_vars: step_bias_vars = self._parse_bias_vars(step=i, bias_vars=bias_vars) main_args_apply.update({"bias_vars": step_bias_vars}) - dem_mod, out_transform = coreg.apply(**main_args_apply, **kwargs) + elev_mod, out_transform = coreg.apply(**main_args_apply, **kwargs) - return dem_mod, out_transform + # Post-process output depending on input type + applied_elev, out_transform = _postprocess_coreg_apply( + elev=elev, + applied_elev=elev_mod, + transform=transform, + out_transform=out_transform, + crs=crs, + resample=resample, + resampling=resampling, + ) + + # Only return object if raster or geodataframe, also return transform if object was an array + if isinstance(applied_elev, (gu.Raster, gpd.GeoDataFrame)): + return applied_elev + else: + return applied_elev, out_transform def __iter__(self) -> Generator[Coreg, None, None]: """Iterate over the pipeline steps.""" @@ -2110,13 +2235,15 @@ def to_points(self) -> NDArrayf: x_coord, y_coord = meta["representative_x"], meta["representative_y"] old_pos_arr = np.reshape([x_coord, y_coord, meta["representative_val"]], (1, 3)) - old_position = \ - gpd.GeoDataFrame(geometry=gpd.points_from_xy(x=old_pos_arr[:, 0], y=old_pos_arr[:, 1], crs=None), - data={"z": old_pos_arr[:, 2]}) + old_position = gpd.GeoDataFrame( + geometry=gpd.points_from_xy(x=old_pos_arr[:, 0], y=old_pos_arr[:, 1], crs=None), + data={"z": old_pos_arr[:, 2]}, + ) new_position = self.procstep.apply(old_position) - new_pos_arr = np.reshape([new_position.geometry.x.values, new_position.geometry.y.values, - new_position["z"].values], (1, 3)) + new_pos_arr = np.reshape( + [new_position.geometry.x.values, new_position.geometry.y.values, new_position["z"].values], (1, 3) + ) points = np.append(points, np.dstack((old_pos_arr, new_pos_arr)), axis=0) @@ -2204,13 +2331,15 @@ def _apply_rst( [bounds.right - resolution / 2, bounds.bottom + resolution / 2, representative_height], ] ) - edges_source = \ - gpd.GeoDataFrame(geometry=gpd.points_from_xy(x=edges_source_arr[:, 0], y=edges_source_arr[:, 1], crs=None), - data={"z": edges_source_arr[:, 2]}) + edges_source = gpd.GeoDataFrame( + geometry=gpd.points_from_xy(x=edges_source_arr[:, 0], y=edges_source_arr[:, 1], crs=None), + data={"z": edges_source_arr[:, 2]}, + ) edges_dest = self.apply(edges_source) - edges_dest_arr = np.array([edges_dest.geometry.x.values, edges_dest.geometry.y.values, - edges_dest["z"].values]).T + edges_dest_arr = np.array( + [edges_dest.geometry.x.values, edges_dest.geometry.y.values, edges_dest["z"].values] + ).T edges = np.dstack((edges_source_arr, edges_dest_arr)) all_points = np.append(points, edges, axis=0) @@ -2225,11 +2354,9 @@ def _apply_rst( return warped_dem, transform - def _apply_pts(self, - elev: gpd.GeoDataFrame, - z_name: str = "z", - bias_vars: dict[str, NDArrayf] | None = None, - **kwargs: Any) -> gpd.GeoDataFrame: + def _apply_pts( + self, elev: gpd.GeoDataFrame, z_name: str = "z", bias_vars: dict[str, NDArrayf] | None = None, **kwargs: Any + ) -> gpd.GeoDataFrame: """Apply the scaling model to a set of points.""" points = self.to_points() @@ -2248,12 +2375,13 @@ def _apply_pts(self, new_coords[:, dim] += model(elev.geometry.x.values, elev.geometry.y.values) - gdf_new_coords = \ - gpd.GeoDataFrame(geometry=gpd.points_from_xy(x=new_coords[:, 0], y=new_coords[:, 1], crs=None), - data={"z": new_coords[:, 2]}) + gdf_new_coords = gpd.GeoDataFrame( + geometry=gpd.points_from_xy(x=new_coords[:, 0], y=new_coords[:, 1], crs=None), data={"z": new_coords[:, 2]} + ) return gdf_new_coords + def warp_dem( dem: NDArrayf, transform: rio.transform.Affine, diff --git a/xdem/coreg/biascorr.py b/xdem/coreg/biascorr.py index 4ae3c0fc..31eda69d 100644 --- a/xdem/coreg/biascorr.py +++ b/xdem/coreg/biascorr.py @@ -4,10 +4,10 @@ import inspect from typing import Any, Callable, Iterable, Literal, TypeVar +import geopandas as gpd import geoutils as gu import numpy as np import pandas as pd -import geopandas as gpd import rasterio as rio import scipy @@ -322,22 +322,33 @@ def _fit_biascorr( # type: ignore elif self._fit_or_bin in ["bin", "bin_and_fit"]: self._meta["bin_dataframe"] = df - def _fit_rst_rst(self, - ref_elev: NDArrayf, - tba_elev: NDArrayf, - inlier_mask: NDArrayb, - transform: rio.transform.Affine, - crs: rio.crs.CRS, - z_name: str, - weights: NDArrayf | None, - bias_vars: dict[str, NDArrayf] | None = None, - verbose: bool = False, - **kwargs: Any, - ) -> None: + def _fit_rst_rst( + self, + ref_elev: NDArrayf, + tba_elev: NDArrayf, + inlier_mask: NDArrayb, + transform: rio.transform.Affine, + crs: rio.crs.CRS, + z_name: str, + weights: NDArrayf | None = None, + bias_vars: dict[str, NDArrayf] | None = None, + verbose: bool = False, + **kwargs: Any, + ) -> None: """Should only be called through subclassing""" - self._fit_biascorr(ref_elev=ref_elev, tba_elev=tba_elev, inlier_mask=inlier_mask, transform=transform, - crs=crs, z_name=z_name, weights=weights, bias_vars=bias_vars, verbose=verbose, **kwargs) + self._fit_biascorr( + ref_elev=ref_elev, + tba_elev=tba_elev, + inlier_mask=inlier_mask, + transform=transform, + crs=crs, + z_name=z_name, + weights=weights, + bias_vars=bias_vars, + verbose=verbose, + **kwargs, + ) def _fit_rst_pts( # type: ignore self, @@ -360,9 +371,14 @@ def _fit_rst_pts( # type: ignore pts = np.array((pts_elev.geometry.x.values, pts_elev.geometry.y.values)).T - valid_mask = np.logical_and.reduce( - (inlier_mask, np.isfinite(rst_elev), *(np.isfinite(var) for var in bias_vars.values())) - ) + # Get valid mask ahead of subsampling to have the exact number of requested subsamples by user + if bias_vars is not None: + valid_mask = np.logical_and.reduce( + (inlier_mask, np.isfinite(rst_elev), *(np.isfinite(var) for var in bias_vars.values())) + ) + else: + valid_mask = np.logical_and.reduce((inlier_mask, np.isfinite(rst_elev))) + # Convert inlier mask to points to be able to determine subsample later inlier_rst = gu.Raster.from_array(data=valid_mask, transform=transform, crs=crs) # The location needs to be surrounded by inliers, use floor to get 0 for at least one outlier @@ -391,15 +407,25 @@ def _fit_rst_pts( # type: ignore if bias_vars is not None: bias_vars_pts = {} for var in bias_vars.keys(): - bias_vars_pts[var] = gu.Raster.from_array(bias_vars[var], transform=transform, crs=crs).interp_points(pts) + bias_vars_pts[var] = gu.Raster.from_array(bias_vars[var], transform=transform, crs=crs).interp_points( + pts + ) else: bias_vars_pts = None # Send to raster-raster fit - self._fit_biascorr(ref_elev=ref_elev_pts, tba_elev=tba_elev_pts, inlier_mask=inlier_pts_alltrue, - bias_vars=bias_vars_pts, transform=transform, crs=crs, z_name=z_name, weights=weights, - verbose=verbose, **kwargs) - + self._fit_biascorr( + ref_elev=ref_elev_pts, + tba_elev=tba_elev_pts, + inlier_mask=inlier_pts_alltrue, + bias_vars=bias_vars_pts, + transform=transform, + crs=crs, + z_name=z_name, + weights=weights, + verbose=verbose, + **kwargs, + ) def _apply_rst( # type: ignore self, @@ -761,17 +787,17 @@ def _fit_rst_rst( # type: ignore ) def _fit_rst_pts( # type: ignore - self, - ref_elev: NDArrayf | gpd.GeoDataFrame, - tba_elev: NDArrayf | gpd.GeoDataFrame, - inlier_mask: NDArrayb, - transform: rio.transform.Affine, - crs: rio.crs.CRS, - z_name: str, - bias_vars: dict[str, NDArrayf] = None, - weights: None | NDArrayf = None, - verbose: bool = False, - **kwargs, + self, + ref_elev: NDArrayf | gpd.GeoDataFrame, + tba_elev: NDArrayf | gpd.GeoDataFrame, + inlier_mask: NDArrayb, + transform: rio.transform.Affine, + crs: rio.crs.CRS, + z_name: str, + bias_vars: dict[str, NDArrayf] = None, + weights: None | NDArrayf = None, + verbose: bool = False, + **kwargs, ) -> None: # Figure out which data is raster format to get gridded attributes @@ -901,8 +927,10 @@ def _fit_rst_rst( # type: ignore attr = ref_elev else: attr = xdem.terrain.get_terrain_attribute( - dem=ref_elev, attribute=self._meta["terrain_attribute"], resolution=(transform[0], abs(transform[4])) - ) + dem=ref_elev, + attribute=self._meta["terrain_attribute"], + resolution=(transform[0], abs(transform[4])), + ) # Run the parent function self._fit_biascorr( @@ -946,7 +974,9 @@ def _fit_rst_pts( # type: ignore attr = rast_elev else: attr = xdem.terrain.get_terrain_attribute( - dem=rast_elev, attribute=self._meta["terrain_attribute"], resolution=(transform[0], abs(transform[4])) + dem=rast_elev, + attribute=self._meta["terrain_attribute"], + resolution=(transform[0], abs(transform[4])), ) # Run the parent function From b8233c39e1fcfc62da2fdae06bdf20cb3bc0e532 Mon Sep 17 00:00:00 2001 From: Romain Hugonnet Date: Wed, 6 Mar 2024 15:48:58 -0900 Subject: [PATCH 12/54] Fix CoregPipeline apply function to work with points and rasters --- xdem/coreg/affine.py | 3 ++- xdem/coreg/base.py | 21 +++++++++++++++++---- 2 files changed, 19 insertions(+), 5 deletions(-) diff --git a/xdem/coreg/affine.py b/xdem/coreg/affine.py index 31399352..7afeec75 100644 --- a/xdem/coreg/affine.py +++ b/xdem/coreg/affine.py @@ -1220,7 +1220,8 @@ def _fit_rst_rst( ref_elev["N"] = ref_elev.geometry.y ref_elev.rename(columns={"b1": z_name}, inplace=True) self._fit_rst_pts( - ref_elev=ref_elev, tba_elev=tba_elev, transform=transform, crs=crs, inlier_mask=inlier_mask, **kwargs + ref_elev=ref_elev, tba_elev=tba_elev, transform=transform, crs=crs, inlier_mask=inlier_mask, z_name=z_name, + **kwargs ) def _to_matrix_func(self) -> NDArrayf: diff --git a/xdem/coreg/base.py b/xdem/coreg/base.py index 8763a549..f6cc5aa2 100644 --- a/xdem/coreg/base.py +++ b/xdem/coreg/base.py @@ -1753,6 +1753,7 @@ def _parse_bias_vars(self, step: int, bias_vars: dict[str, NDArrayf] | None) -> # Add subset dict for this pipeline step to args of fit and apply return {n: bias_vars[n] for n in var_names} + # Need to override base Coreg method to work on pipeline steps def fit( self: CoregType, reference_elev: NDArrayf | MArrayf | RasterType | gpd.GeoDataFrame, @@ -1822,9 +1823,14 @@ def fit( main_args_fit.update({"bias_vars": step_bias_vars}) main_args_apply.update({"bias_vars": step_bias_vars}) + # Perform the step fit coreg.fit(**main_args_fit) - tba_dem_mod, out_transform = coreg.apply(**main_args_apply) + # Step apply: one return for a geodataframe, two returns for array/transform + if isinstance(tba_dem_mod, gpd.GeoDataFrame): + tba_dem_mod = coreg.apply(**main_args_apply) + else: + tba_dem_mod, out_transform = coreg.apply(**main_args_apply) # Flag that the fitting function has been called. self._fit_called = True @@ -1873,6 +1879,7 @@ def apply( ) -> RasterType | gpd.GeoDataFrame: ... + # Need to override base Coreg method to work on pipeline steps def apply( self, elev: MArrayf | NDArrayf | RasterType | gpd.GeoDataFrame, @@ -1891,20 +1898,26 @@ def apply( elev_array, transform, crs = _preprocess_coreg_apply(elev=elev, transform=transform, crs=crs) - elev_mod = elev.copy() + elev_mod = elev_array.copy() out_transform = copy.copy(transform) # Apply each step of the coregistration for i, coreg in enumerate(self.pipeline): - main_args_apply = {"elev": elev_mod, "transform": out_transform, "crs": crs, "z_name": z_name} + main_args_apply = {"elev": elev_mod, "transform": out_transform, "crs": crs, "z_name": z_name, + "resample": resample, "resampling": resampling} # If non-affine method that expects a bias_vars argument if coreg._needs_vars: step_bias_vars = self._parse_bias_vars(step=i, bias_vars=bias_vars) main_args_apply.update({"bias_vars": step_bias_vars}) - elev_mod, out_transform = coreg.apply(**main_args_apply, **kwargs) + # Step apply: one return for a geodataframe, two returns for array/transform + if isinstance(elev_mod, gpd.GeoDataFrame): + elev_mod = coreg.apply(**main_args_apply, **kwargs) + else: + elev_mod, out_transform = coreg.apply(**main_args_apply, **kwargs) + # Post-process output depending on input type applied_elev, out_transform = _postprocess_coreg_apply( From bf50a29197bfe3dc981f14154ad80f2bc4d85b48 Mon Sep 17 00:00:00 2001 From: Romain Hugonnet Date: Wed, 6 Mar 2024 15:49:18 -0900 Subject: [PATCH 13/54] Linting --- xdem/coreg/affine.py | 9 +++++++-- xdem/coreg/base.py | 11 ++++++++--- 2 files changed, 15 insertions(+), 5 deletions(-) diff --git a/xdem/coreg/affine.py b/xdem/coreg/affine.py index 7afeec75..221b4e34 100644 --- a/xdem/coreg/affine.py +++ b/xdem/coreg/affine.py @@ -1220,8 +1220,13 @@ def _fit_rst_rst( ref_elev["N"] = ref_elev.geometry.y ref_elev.rename(columns={"b1": z_name}, inplace=True) self._fit_rst_pts( - ref_elev=ref_elev, tba_elev=tba_elev, transform=transform, crs=crs, inlier_mask=inlier_mask, z_name=z_name, - **kwargs + ref_elev=ref_elev, + tba_elev=tba_elev, + transform=transform, + crs=crs, + inlier_mask=inlier_mask, + z_name=z_name, + **kwargs, ) def _to_matrix_func(self) -> NDArrayf: diff --git a/xdem/coreg/base.py b/xdem/coreg/base.py index f6cc5aa2..7fcd0e2c 100644 --- a/xdem/coreg/base.py +++ b/xdem/coreg/base.py @@ -1904,8 +1904,14 @@ def apply( # Apply each step of the coregistration for i, coreg in enumerate(self.pipeline): - main_args_apply = {"elev": elev_mod, "transform": out_transform, "crs": crs, "z_name": z_name, - "resample": resample, "resampling": resampling} + main_args_apply = { + "elev": elev_mod, + "transform": out_transform, + "crs": crs, + "z_name": z_name, + "resample": resample, + "resampling": resampling, + } # If non-affine method that expects a bias_vars argument if coreg._needs_vars: @@ -1918,7 +1924,6 @@ def apply( else: elev_mod, out_transform = coreg.apply(**main_args_apply, **kwargs) - # Post-process output depending on input type applied_elev, out_transform = _postprocess_coreg_apply( elev=elev, From 5e66a1228d86f776d0dd7563fd701e6b66d4af4e Mon Sep 17 00:00:00 2001 From: Romain Hugonnet Date: Fri, 8 Mar 2024 18:36:16 -0900 Subject: [PATCH 14/54] Force cache reset --- .github/workflows/python-tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/python-tests.yml b/.github/workflows/python-tests.yml index 8772a038..dac4b786 100644 --- a/.github/workflows/python-tests.yml +++ b/.github/workflows/python-tests.yml @@ -50,7 +50,7 @@ jobs: path: ${{ env.CONDA }}/envs key: conda-${{ matrix.os }}-${{ matrix.python-version }}-${{ env.cache_date }}-${{ hashFiles('dev-environment.yml') }}-${{ env.CACHE_NUMBER }} env: - CACHE_NUMBER: 0 # Increase this value to reset cache if environment.yml has not changed + CACHE_NUMBER: 1 # Increase this value to reset cache if environment.yml has not changed id: cache # The trick below is necessary because the generic environment file does not specify a Python version, and ONLY From 3efbd58473dd95ab8da3d6cba8fe5e746b711756 Mon Sep 17 00:00:00 2001 From: Romain Hugonnet Date: Fri, 8 Mar 2024 19:20:23 -0900 Subject: [PATCH 15/54] Increase sampling to avoid random test failure and fix DEM.coregister_3d --- tests/test_coreg/test_biascorr.py | 8 ++++---- xdem/dem.py | 13 +++++++------ 2 files changed, 11 insertions(+), 10 deletions(-) diff --git a/tests/test_coreg/test_biascorr.py b/tests/test_coreg/test_biascorr.py index bf3cc192..60d66cad 100644 --- a/tests/test_coreg/test_biascorr.py +++ b/tests/test_coreg/test_biascorr.py @@ -47,10 +47,10 @@ class TestBiasCorr: # Convert DEMs to points with a bit of subsampling for speed-up # TODO: Simplify once this GeoUtils issue is resolved: https://github.com/GlacioHack/geoutils/issues/499 - tba_pts = tba.to_points(subsample=50000, pixel_offset="ul").ds + tba_pts = tba.to_points(subsample=100000, pixel_offset="ul").ds tba_pts = tba_pts.rename(columns={"b1": "z"}) - ref_pts = ref.to_points(subsample=50000, pixel_offset="ul").ds + ref_pts = ref.to_points(subsample=100000, pixel_offset="ul").ds ref_pts = ref_pts.rename(columns={"b1": "z"}) # Raster-Point @@ -501,13 +501,13 @@ def test_directionalbias__synthetic(self, fit_args, angle, nb_freq) -> None: ] elev_fit_args = fit_args.copy() if isinstance(elev_fit_args["to_be_aligned_elev"], gpd.GeoDataFrame): - bias_elev = bias_dem.to_points(subsample=50000, pixel_offset="ul").ds.rename(columns={"b1": "z"}) + bias_elev = bias_dem.to_points(subsample=100000, pixel_offset="ul").ds.rename(columns={"b1": "z"}) else: bias_elev = bias_dem dirbias.fit( elev_fit_args["reference_elev"], to_be_aligned_elev=bias_elev, - subsample=40000, + subsample=80000, random_state=42, bounds_amp_wave_phase=bounds, niter=10, diff --git a/xdem/dem.py b/xdem/dem.py index b81cb1ff..0b5fdfcf 100644 --- a/xdem/dem.py +++ b/xdem/dem.py @@ -7,6 +7,7 @@ import numpy as np import rasterio as rio +import geopandas as gpd from affine import Affine from geoutils import SatelliteImage from geoutils.raster import Mask, RasterType @@ -395,19 +396,19 @@ def get_terrain_attribute(self, attribute: str | list[str], **kwargs: Any) -> Ra def coregister_3d( self, - reference_dem: DEM, + reference_elev: DEM | gpd.GeoDataFrame, coreg_method: coreg.Coreg = None, inlier_mask: Mask | NDArrayb = None, bias_vars: dict[str, NDArrayf | MArrayf | RasterType] = None, **kwargs: Any, ) -> DEM: """ - Coregister DEM to another DEM in three dimensions. + Coregister DEM to a reference DEM in three dimensions. - Any coregistration method or pipeline can be passed, default is only horizontal and vertical shift of - Nuth and Kääb (2011). + Any coregistration method or pipeline from xdem.Coreg can be passed. Default is only horizontal and vertical + shifts of Nuth and Kääb (2011). - :param reference_dem: Reference DEM the alignment is made towards. + :param reference_elev: Reference elevation, DEM or elevation point cloud, for the alignment. :param coreg_method: Coregistration method or pipeline. :param inlier_mask: Optional. 2D boolean array or mask of areas to include in the analysis (inliers=True). :param bias_vars: Optional, only for some bias correction methods. 2D array or rasters of bias variables used. @@ -420,7 +421,7 @@ def coregister_3d( coreg_method = coreg.NuthKaab() coreg_method.fit( - reference_dem=reference_dem, dem_to_be_aligned=self, inlier_mask=inlier_mask, bias_vars=bias_vars, **kwargs + reference_elev=reference_elev, to_be_aligned_elev=self, inlier_mask=inlier_mask, bias_vars=bias_vars, **kwargs ) return coreg_method.apply(self) From b420105857c07317e6f259147c570a250e3aa661 Mon Sep 17 00:00:00 2001 From: Romain Hugonnet Date: Fri, 8 Mar 2024 19:20:44 -0900 Subject: [PATCH 16/54] Linting --- xdem/dem.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/xdem/dem.py b/xdem/dem.py index 0b5fdfcf..3fbc858a 100644 --- a/xdem/dem.py +++ b/xdem/dem.py @@ -5,9 +5,9 @@ import warnings from typing import Any, Literal +import geopandas as gpd import numpy as np import rasterio as rio -import geopandas as gpd from affine import Affine from geoutils import SatelliteImage from geoutils.raster import Mask, RasterType @@ -421,7 +421,11 @@ def coregister_3d( coreg_method = coreg.NuthKaab() coreg_method.fit( - reference_elev=reference_elev, to_be_aligned_elev=self, inlier_mask=inlier_mask, bias_vars=bias_vars, **kwargs + reference_elev=reference_elev, + to_be_aligned_elev=self, + inlier_mask=inlier_mask, + bias_vars=bias_vars, + **kwargs, ) return coreg_method.apply(self) From 0d46811a880f56845a57a91209518ff8cf6bc886 Mon Sep 17 00:00:00 2001 From: Romain Hugonnet Date: Tue, 12 Mar 2024 14:01:24 -0800 Subject: [PATCH 17/54] Fix test directional bias failing and update with GeoUtils PR #501 --- tests/test_coreg/test_biascorr.py | 19 +++++++++---------- xdem/volume.py | 6 +++--- 2 files changed, 12 insertions(+), 13 deletions(-) diff --git a/tests/test_coreg/test_biascorr.py b/tests/test_coreg/test_biascorr.py index 60d66cad..b9a81cd4 100644 --- a/tests/test_coreg/test_biascorr.py +++ b/tests/test_coreg/test_biascorr.py @@ -47,11 +47,9 @@ class TestBiasCorr: # Convert DEMs to points with a bit of subsampling for speed-up # TODO: Simplify once this GeoUtils issue is resolved: https://github.com/GlacioHack/geoutils/issues/499 - tba_pts = tba.to_points(subsample=100000, pixel_offset="ul").ds - tba_pts = tba_pts.rename(columns={"b1": "z"}) + tba_pts = tba.to_pointcloud(data_column_name="z", subsample=50000, random_state=42).ds - ref_pts = ref.to_points(subsample=100000, pixel_offset="ul").ds - ref_pts = ref_pts.rename(columns={"b1": "z"}) + ref_pts = ref.to_pointcloud(data_column_name="z", subsample=50000, random_state=42).ds # Raster-Point fit_args_rst_pts = dict( @@ -453,7 +451,7 @@ def test_directionalbias(self) -> None: assert dirbias._meta["bias_var_names"] == ["angle"] @pytest.mark.parametrize("fit_args", all_fit_args) # type: ignore - @pytest.mark.parametrize("angle", [20, 90, 210]) # type: ignore + @pytest.mark.parametrize("angle", [20, 90]) # type: ignore @pytest.mark.parametrize("nb_freq", [1, 2, 3]) # type: ignore def test_directionalbias__synthetic(self, fit_args, angle, nb_freq) -> None: """Test the subclass DirectionalBias with synthetic data.""" @@ -501,16 +499,17 @@ def test_directionalbias__synthetic(self, fit_args, angle, nb_freq) -> None: ] elev_fit_args = fit_args.copy() if isinstance(elev_fit_args["to_be_aligned_elev"], gpd.GeoDataFrame): - bias_elev = bias_dem.to_points(subsample=100000, pixel_offset="ul").ds.rename(columns={"b1": "z"}) + # Need a higher sample size to get the coefficients right here + bias_elev = bias_dem.to_pointcloud(data_column_name="z", subsample=50000, random_state=42).ds else: bias_elev = bias_dem dirbias.fit( elev_fit_args["reference_elev"], to_be_aligned_elev=bias_elev, - subsample=80000, + subsample=40000, random_state=42, bounds_amp_wave_phase=bounds, - niter=10, + niter=20, ) # Check all fit parameters are the same within 10% @@ -561,7 +560,7 @@ def test_deramp__synthetic(self, fit_args, order: int) -> None: deramp = biascorr.Deramp(poly_order=order) elev_fit_args = fit_args.copy() if isinstance(elev_fit_args["to_be_aligned_elev"], gpd.GeoDataFrame): - bias_elev = bias_dem.to_points(subsample=20000, pixel_offset="ul").ds.rename(columns={"b1": "z"}) + bias_elev = bias_dem.to_pointcloud(data_column_name="z", subsample=20000).ds else: bias_elev = bias_dem deramp.fit(elev_fit_args["reference_elev"], to_be_aligned_elev=bias_elev, subsample=10000, random_state=42) @@ -619,7 +618,7 @@ def test_terrainbias__synthetic(self, fit_args) -> None: ) elev_fit_args = fit_args.copy() if isinstance(elev_fit_args["to_be_aligned_elev"], gpd.GeoDataFrame): - bias_elev = bias_dem.to_points(subsample=20000, pixel_offset="ul").ds.rename(columns={"b1": "z"}) + bias_elev = bias_dem.to_pointcloud(data_column_name="z", subsample=20000).ds else: bias_elev = bias_dem tb.fit( diff --git a/xdem/volume.py b/xdem/volume.py index 6e3275f7..80cef05e 100644 --- a/xdem/volume.py +++ b/xdem/volume.py @@ -9,7 +9,7 @@ import pandas as pd import rasterio.fill import scipy.interpolate -from geoutils.raster import RasterType, get_array_and_mask, get_mask, get_valid_extent +from geoutils.raster import RasterType, get_array_and_mask, get_mask_from_array, get_valid_extent from tqdm import tqdm try: @@ -51,7 +51,7 @@ def hypsometric_binning( ddem, _ = get_array_and_mask(ddem) # Extract only the valid values, i.e. valid in ref_dem - valid_mask = ~get_mask(ref_dem) + valid_mask = ~get_mask_from_array(ref_dem) ddem = np.array(ddem[valid_mask]) ref_dem = np.array(ref_dem.squeeze()[valid_mask]) @@ -299,7 +299,7 @@ def linear_interpolation( raise ValueError("Optional dependency needed. Install 'opencv'") # Create a mask for where nans exist - nan_mask = get_mask(array) + nan_mask = get_mask_from_array(array) interpolated_array = rasterio.fill.fillnodata( array.copy(), mask=(~nan_mask).astype("uint8"), max_search_distance=max_search_distance From eda3f6632a9e3e5025e03793ed73285673c47d3c Mon Sep 17 00:00:00 2001 From: Romain Hugonnet Date: Tue, 12 Mar 2024 14:01:47 -0800 Subject: [PATCH 18/54] Linting --- xdem/volume.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/xdem/volume.py b/xdem/volume.py index 80cef05e..1bfa1596 100644 --- a/xdem/volume.py +++ b/xdem/volume.py @@ -9,7 +9,12 @@ import pandas as pd import rasterio.fill import scipy.interpolate -from geoutils.raster import RasterType, get_array_and_mask, get_mask_from_array, get_valid_extent +from geoutils.raster import ( + RasterType, + get_array_and_mask, + get_mask_from_array, + get_valid_extent, +) from tqdm import tqdm try: From 313269ff5c31df4fd01a0dd099f78f3d0986786c Mon Sep 17 00:00:00 2001 From: Romain Hugonnet Date: Fri, 15 Mar 2024 17:40:52 -0800 Subject: [PATCH 19/54] Remove warning filter per function in favor of global pytest setting --- pyproject.toml | 2 +- tests/test_coreg/test_affine.py | 15 ++++----------- tests/test_coreg/test_base.py | 30 +++++++++--------------------- tests/test_coreg/test_biascorr.py | 17 +++++++---------- tests/test_coreg/test_workflows.py | 10 ++++------ tests/test_ddem.py | 4 +--- tests/test_demcollection.py | 3 --- tests/test_doc.py | 2 -- tests/test_misc.py | 2 -- tests/test_spatialstats.py | 1 - tests/test_terrain.py | 11 ++--------- tests/test_volume.py | 3 --- 12 files changed, 28 insertions(+), 72 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 4f716305..5f8ff586 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -16,7 +16,7 @@ fallback_version = "0.0.1" target_version = ['py36'] [tool.pytest.ini_options] -addopts = "--doctest-modules" +addopts = "--doctest-modules -W error::UserWarning" testpaths = [ "tests", "xdem" diff --git a/tests/test_coreg/test_affine.py b/tests/test_coreg/test_affine.py index 251d68c8..709f4d58 100644 --- a/tests/test_coreg/test_affine.py +++ b/tests/test_coreg/test_affine.py @@ -17,11 +17,10 @@ def load_examples() -> tuple[RasterType, RasterType, Vector]: """Load example files to try coregistration methods with.""" - with warnings.catch_warnings(): - warnings.simplefilter("ignore") - reference_raster = Raster(examples.get_path("longyearbyen_ref_dem")) - to_be_aligned_raster = Raster(examples.get_path("longyearbyen_tba_dem")) - glacier_mask = Vector(examples.get_path("longyearbyen_glacier_outlines")) + + reference_raster = Raster(examples.get_path("longyearbyen_ref_dem")) + to_be_aligned_raster = Raster(examples.get_path("longyearbyen_tba_dem")) + glacier_mask = Vector(examples.get_path("longyearbyen_glacier_outlines")) return reference_raster, to_be_aligned_raster, glacier_mask @@ -43,7 +42,6 @@ class TestAffineCoreg: points = np.array([[1, 2, 3, 4], [1, 2, 3, 4], [0, 0, 0, 0]], dtype="float64").T def test_from_classmethods(self) -> None: - warnings.simplefilter("error") # Check that the from_matrix function works as expected. vshift = 5 @@ -67,7 +65,6 @@ def test_from_classmethods(self) -> None: raise exception def test_vertical_shift(self) -> None: - warnings.simplefilter("error") # Create a vertical shift correction instance vshiftcorr = coreg.VerticalShift() @@ -177,7 +174,6 @@ def test_coreg_example_shift(self, shift_px, coreg_class, points_or_raster, verb For comparison of coreg algorithms: Shift a ref_dem on purpose, e.g. shift_px = (1,1), and then applying coreg to shift it back. """ - warnings.simplefilter("error") res = self.ref.res[0] # shift DEM by shift_px @@ -222,7 +218,6 @@ def test_coreg_example_shift(self, shift_px, coreg_class, points_or_raster, verb raise AssertionError(f"Diffs are too big. east: {best_east_diff:.2f} px, north: {best_north_diff:.2f} px") def test_nuth_kaab(self) -> None: - warnings.simplefilter("error") nuth_kaab = coreg.NuthKaab(max_iterations=10) @@ -268,7 +263,6 @@ def test_nuth_kaab(self) -> None: assert abs((transformed_points[0, 2] - self.points[0, 2]) + vshift) < 0.1 def test_tilt(self) -> None: - warnings.simplefilter("error") # Try a 1st degree deramping. tilt = coreg.Tilt() @@ -291,7 +285,6 @@ def test_tilt(self) -> None: assert np.abs(np.mean(periglacial_offset)) < 0.02 def test_icp_opencv(self) -> None: - warnings.simplefilter("error") # Do a fast and dirty 3 iteration ICP just to make sure it doesn't error out. icp = coreg.ICP(max_iterations=3) diff --git a/tests/test_coreg/test_base.py b/tests/test_coreg/test_base.py index aab04f54..1d45fe23 100644 --- a/tests/test_coreg/test_base.py +++ b/tests/test_coreg/test_base.py @@ -14,21 +14,18 @@ from geoutils import Raster, Vector from geoutils.raster import RasterType -with warnings.catch_warnings(): - warnings.simplefilter("ignore") - import xdem - from xdem import coreg, examples, misc, spatialstats - from xdem._typing import NDArrayf - from xdem.coreg.base import Coreg, apply_matrix +import xdem +from xdem import coreg, examples, misc, spatialstats +from xdem._typing import NDArrayf +from xdem.coreg.base import Coreg, apply_matrix def load_examples() -> tuple[RasterType, RasterType, Vector]: """Load example files to try coregistration methods with.""" - with warnings.catch_warnings(): - warnings.simplefilter("ignore") - reference_raster = Raster(examples.get_path("longyearbyen_ref_dem")) - to_be_aligned_raster = Raster(examples.get_path("longyearbyen_tba_dem")) - glacier_mask = Vector(examples.get_path("longyearbyen_glacier_outlines")) + + reference_raster = Raster(examples.get_path("longyearbyen_ref_dem")) + to_be_aligned_raster = Raster(examples.get_path("longyearbyen_tba_dem")) + glacier_mask = Vector(examples.get_path("longyearbyen_glacier_outlines")) return reference_raster, to_be_aligned_raster, glacier_mask @@ -61,7 +58,6 @@ def test_init(self) -> None: @pytest.mark.parametrize("coreg_class", [coreg.VerticalShift, coreg.ICP, coreg.NuthKaab]) # type: ignore def test_copy(self, coreg_class: Callable[[], Coreg]) -> None: """Test that copying work expectedly (that no attributes still share references).""" - warnings.simplefilter("error") # Create a coreg instance and copy it. corr = coreg_class() @@ -145,7 +141,6 @@ def test_get_subsample_on_valid_mask(self, subsample: float | int) -> None: @pytest.mark.parametrize("coreg", all_coregs) # type: ignore def test_subsample(self, coreg: Callable) -> None: # type: ignore - warnings.simplefilter("error") # Check that default value is set properly coreg_full = coreg() @@ -432,7 +427,6 @@ def test_coreg_raises(self, combination: tuple[str, str, str, str, str, str, str 6. The expected outcome of the test. 7. The error/warning message (if applicable) """ - warnings.simplefilter("error") ref_dem, tba_dem, transform, crs, testing_step, result, text = combination @@ -525,7 +519,6 @@ def test_copy(self, coreg_class: Callable[[], Coreg]) -> None: assert pipeline_copy.pipeline[0]._meta["vshift"] def test_pipeline(self) -> None: - warnings.simplefilter("error") # Create a pipeline from two coreg methods. pipeline = coreg.CoregPipeline([coreg.VerticalShift(), coreg.NuthKaab()]) @@ -632,7 +625,6 @@ def test_pipeline__errors(self) -> None: pipeline3.fit(**self.fit_params, bias_vars={"ncc": xdem.terrain.slope(self.ref)}) def test_pipeline_pts(self) -> None: - warnings.simplefilter("ignore") pipeline = coreg.NuthKaab() + coreg.GradientDescending() ref_points = self.ref.to_points(as_array=False, subsample=5000, pixel_offset="center").ds @@ -649,7 +641,6 @@ def test_pipeline_pts(self) -> None: assert pipeline.pipeline[0]._meta["offset_east_px"] != pipeline.pipeline[1]._meta["offset_east_px"] def test_coreg_add(self) -> None: - warnings.simplefilter("error") # Test with a vertical shift of 4 vshift = 4 @@ -735,7 +726,6 @@ class TestBlockwiseCoreg: ) # type: ignore @pytest.mark.parametrize("subdivision", [4, 10]) # type: ignore def test_blockwise_coreg(self, pipeline: Coreg, subdivision: int) -> None: - warnings.simplefilter("error") blockwise = coreg.BlockwiseCoreg(step=pipeline, subdivision=subdivision) @@ -782,7 +772,6 @@ def test_blockwise_coreg(self, pipeline: Coreg, subdivision: int) -> None: def test_blockwise_coreg_large_gaps(self) -> None: """Test BlockwiseCoreg when large gaps are encountered, e.g. around the frame of a rotated DEM.""" - warnings.simplefilter("error") reference_dem = self.ref.reproject(crs="EPSG:3413", res=self.ref.res, resampling="bilinear") dem_to_be_aligned = self.tba.reproject(ref=reference_dem, resampling="bilinear") @@ -823,7 +812,7 @@ def test_blockwise_coreg_large_gaps(self) -> None: def test_apply_matrix() -> None: - warnings.simplefilter("error") + ref, tba, outlines = load_examples() # Load example reference, to-be-aligned and mask. ref_arr = gu.raster.get_array_and_mask(ref)[0] @@ -936,7 +925,6 @@ def rotation_matrix(rotation: float = 30) -> NDArrayf: def test_warp_dem() -> None: """Test that the warp_dem function works expectedly.""" - warnings.simplefilter("error") small_dem = np.zeros((5, 10), dtype="float32") small_transform = rio.transform.from_origin(0, 5, 1, 1) diff --git a/tests/test_coreg/test_biascorr.py b/tests/test_coreg/test_biascorr.py index 04d8f943..843dcaf8 100644 --- a/tests/test_coreg/test_biascorr.py +++ b/tests/test_coreg/test_biascorr.py @@ -13,20 +13,17 @@ PLOT = False -with warnings.catch_warnings(): - warnings.simplefilter("ignore") - from xdem import examples - from xdem.coreg import biascorr - from xdem.fit import polynomial_2d, sumsin_1d +from xdem import examples +from xdem.coreg import biascorr +from xdem.fit import polynomial_2d, sumsin_1d def load_examples() -> tuple[gu.Raster, gu.Raster, gu.Vector]: """Load example files to try coregistration methods with.""" - with warnings.catch_warnings(): - warnings.simplefilter("ignore") - reference_raster = gu.Raster(examples.get_path("longyearbyen_ref_dem")) - to_be_aligned_raster = gu.Raster(examples.get_path("longyearbyen_tba_dem")) - glacier_mask = gu.Vector(examples.get_path("longyearbyen_glacier_outlines")) + + reference_raster = gu.Raster(examples.get_path("longyearbyen_ref_dem")) + to_be_aligned_raster = gu.Raster(examples.get_path("longyearbyen_tba_dem")) + glacier_mask = gu.Vector(examples.get_path("longyearbyen_glacier_outlines")) return reference_raster, to_be_aligned_raster, glacier_mask diff --git a/tests/test_coreg/test_workflows.py b/tests/test_coreg/test_workflows.py index f95fbb4e..4da4c493 100644 --- a/tests/test_coreg/test_workflows.py +++ b/tests/test_coreg/test_workflows.py @@ -18,11 +18,10 @@ def load_examples() -> tuple[RasterType, RasterType, Vector]: """Load example files to try coregistration methods with.""" - with warnings.catch_warnings(): - warnings.simplefilter("ignore") - reference_raster = Raster(examples.get_path("longyearbyen_ref_dem")) - to_be_aligned_raster = Raster(examples.get_path("longyearbyen_tba_dem")) - glacier_mask = Vector(examples.get_path("longyearbyen_glacier_outlines")) + + reference_raster = Raster(examples.get_path("longyearbyen_ref_dem")) + to_be_aligned_raster = Raster(examples.get_path("longyearbyen_tba_dem")) + glacier_mask = Vector(examples.get_path("longyearbyen_glacier_outlines")) return reference_raster, to_be_aligned_raster, glacier_mask @@ -30,7 +29,6 @@ def load_examples() -> tuple[RasterType, RasterType, Vector]: class TestWorkflows: def test_create_inlier_mask(self) -> None: """Test that the create_inlier_mask function works expectedly.""" - warnings.simplefilter("error") ref, tba, outlines = load_examples() # Load example reference, to-be-aligned and outlines diff --git a/tests/test_ddem.py b/tests/test_ddem.py index 0503beaa..3a5a8bc5 100644 --- a/tests/test_ddem.py +++ b/tests/test_ddem.py @@ -4,9 +4,7 @@ import geoutils as gu import numpy as np -with warnings.catch_warnings(): - warnings.simplefilter("ignore") - import xdem +import xdem class TestdDEM: diff --git a/tests/test_demcollection.py b/tests/test_demcollection.py index 1f432439..b1bb3baf 100644 --- a/tests/test_demcollection.py +++ b/tests/test_demcollection.py @@ -67,7 +67,6 @@ def test_init(self) -> None: ] = np.nan # Check that the cumulative_dh function warns for NaNs with warnings.catch_warnings(): - warnings.simplefilter("error") try: dems.get_cumulative_series(nans_ok=False) except UserWarning as exception: @@ -89,8 +88,6 @@ def test_dem_datetimes(self) -> None: def test_ddem_interpolation(self) -> None: """Test that dDEM interpolation works as it should.""" - # All warnings should raise errors from now on - warnings.simplefilter("error") # Create a DEMCollection object dems = xdem.DEMCollection( diff --git a/tests/test_doc.py b/tests/test_doc.py index 094ba3bd..43a5a0f4 100644 --- a/tests/test_doc.py +++ b/tests/test_doc.py @@ -28,8 +28,6 @@ def run_code(filename: str) -> None: ".*fetching the attribute.*Polygon.*", ] # This is a GeoPandas issue - warnings.simplefilter("error") - for warning_text in ignored_warnings: warnings.filterwarnings("ignore", warning_text) try: diff --git a/tests/test_misc.py b/tests/test_misc.py index 77379308..6f543289 100644 --- a/tests/test_misc.py +++ b/tests/test_misc.py @@ -58,8 +58,6 @@ def test_deprecate(self, deprecation_increment: int | None, details: str | None) :param details: An optional explanation for the description. """ - warnings.simplefilter("error") - current_version = Version(Version(xdem.__version__).base_version) # Set the removal version to be the current version plus the increment (e.g. 0.0.5 + 1 -> 0.0.6) diff --git a/tests/test_spatialstats.py b/tests/test_spatialstats.py index ff791318..815e17df 100644 --- a/tests/test_spatialstats.py +++ b/tests/test_spatialstats.py @@ -1254,7 +1254,6 @@ def test_circular_masking(self) -> None: def test_ring_masking(self) -> None: """Test that the ring masking works as intended""" - warnings.simplefilter("error") # by default, the mask is only an outside circle (ring of size 0) ring1 = xdem.spatialstats._create_ring_mask((5, 5)) diff --git a/tests/test_terrain.py b/tests/test_terrain.py index 82a8c4c0..b08898e0 100644 --- a/tests/test_terrain.py +++ b/tests/test_terrain.py @@ -80,8 +80,6 @@ def test_attribute_functions_against_gdaldem(self, attribute: str) -> None: :param attribute: The attribute to test (e.g. 'slope') """ - # TODO: New warnings to remove with latest GDAL versions, opening issue - # warnings.simplefilter("error") functions = { "slope_Horn": lambda dem: xdem.terrain.slope(dem.data, dem.res, degrees=True), @@ -186,7 +184,6 @@ def test_attribute_functions_against_richdem(self, attribute: str) -> None: :param attribute: The attribute to test (e.g. 'slope') """ - warnings.simplefilter("error") # Functions for xdem-implemented methods functions_xdem = { @@ -266,7 +263,6 @@ def test_attribute_functions_against_richdem(self, attribute: str) -> None: def test_hillshade_errors(self) -> None: """Validate that the hillshade function raises appropriate errors.""" # Try giving the hillshade invalid arguments. - warnings.simplefilter("error") with pytest.raises(ValueError, match="Azimuth must be a value between 0 and 360"): xdem.terrain.hillshade(self.dem.data, self.dem.res, azimuth=361) @@ -279,7 +275,7 @@ def test_hillshade_errors(self) -> None: def test_hillshade(self) -> None: """Test hillshade-specific settings.""" - warnings.simplefilter("error") + zfactor_1 = xdem.terrain.hillshade(self.dem.data, self.dem.res, z_factor=1.0) zfactor_10 = xdem.terrain.hillshade(self.dem.data, self.dem.res, z_factor=10.0) @@ -297,7 +293,6 @@ def test_hillshade(self) -> None: ) # type: ignore def test_curvatures(self, name: str) -> None: """Test the curvature functions""" - warnings.simplefilter("error") # Copy the DEM to ensure that the inter-test state is unchanged, and because the mask will be modified. dem = self.dem.copy() @@ -328,7 +323,7 @@ def test_curvatures(self, name: str) -> None: def test_get_terrain_attribute(self) -> None: """Test the get_terrain_attribute function by itself.""" - warnings.simplefilter("error") + # Validate that giving only one terrain attribute only returns that, and not a list of len() == 1 slope = xdem.terrain.get_terrain_attribute(self.dem.data, "slope", resolution=self.dem.res) assert isinstance(slope, np.ndarray) @@ -414,7 +409,6 @@ def test_rugosity_jenness(self) -> None: @pytest.mark.parametrize("resolution", np.linspace(0.01, 100, 10)) # type: ignore def test_rugosity_simple_cases(self, dh: float, resolution: float) -> None: """Test the rugosity calculation for simple cases.""" - warnings.simplefilter("error") # We here check the value for a fully symmetric case: the rugosity calculation can be simplified because all # eight triangles have the same surface area, see Jenness (2004). @@ -443,7 +437,6 @@ def test_rugosity_simple_cases(self, dh: float, resolution: float) -> None: def test_get_quadric_coefficients(self) -> None: """Test the outputs and exceptions of the get_quadric_coefficients() function.""" - warnings.simplefilter("error") dem = np.array([[1, 1, 1], [1, 2, 1], [1, 1, 1]], dtype="float32") diff --git a/tests/test_volume.py b/tests/test_volume.py index 338f8198..2bb2e226 100644 --- a/tests/test_volume.py +++ b/tests/test_volume.py @@ -151,7 +151,6 @@ class TestNormHypsometric: @pytest.mark.parametrize("n_bins", [5, 10, 20]) # type: ignore def test_regional_signal(self, n_bins: int) -> None: - warnings.simplefilter("error") signal = xdem.volume.get_regional_hypsometric_signal( ddem=self.ddem, ref_dem=self.dem_2009, glacier_index_map=self.glacier_index_map, n_bins=n_bins @@ -206,8 +205,6 @@ def test_interpolate_small(self) -> None: def test_regional_hypsometric_interp(self) -> None: - warnings.simplefilter("error") - # Extract a normalized regional hypsometric signal. ddem = self.dem_2009 - self.dem_1990 From 3b7474de460bb7f5371b63977c700bbf0dab4a33 Mon Sep 17 00:00:00 2001 From: Romain Hugonnet Date: Fri, 15 Mar 2024 17:53:54 -0800 Subject: [PATCH 20/54] Update with new geoutils to_pointcloud --- tests/test_coreg/test_affine.py | 4 ++-- tests/test_coreg/test_base.py | 2 +- xdem/coreg/affine.py | 2 +- xdem/coreg/base.py | 6 +++--- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/tests/test_coreg/test_affine.py b/tests/test_coreg/test_affine.py index 4c73215a..c74e9d04 100644 --- a/tests/test_coreg/test_affine.py +++ b/tests/test_coreg/test_affine.py @@ -159,7 +159,7 @@ def test_gradientdescending(self, subsample: int = 10000, inlier_mask: bool = Tr # Run co-registration gds = xdem.coreg.GradientDescending(subsample=subsample) gds.fit( - self.ref.to_points().ds, + self.ref.to_pointcloud().ds, self.tba, inlier_mask=inlier_mask, verbose=verbose, @@ -184,7 +184,7 @@ def test_coreg_example_shift(self, shift_px, coreg_class, points_or_raster, verb shifted_ref = self.ref.copy() shifted_ref.shift(shift_px[0] * res, shift_px[1] * res, inplace=True) - shifted_ref_points = shifted_ref.to_points(as_array=False, subsample=subsample, pixel_offset="center").ds + shifted_ref_points = shifted_ref.to_pointcloud(subsample=subsample).ds shifted_ref_points["E"] = shifted_ref_points.geometry.x shifted_ref_points["N"] = shifted_ref_points.geometry.y shifted_ref_points.rename(columns={"b1": "z"}, inplace=True) diff --git a/tests/test_coreg/test_base.py b/tests/test_coreg/test_base.py index 2b0efb84..26d56a97 100644 --- a/tests/test_coreg/test_base.py +++ b/tests/test_coreg/test_base.py @@ -642,7 +642,7 @@ def test_pipeline__errors(self) -> None: def test_pipeline_pts(self) -> None: pipeline = coreg.NuthKaab() + coreg.GradientDescending() - ref_points = self.ref.to_points(as_array=False, subsample=5000, pixel_offset="center").ds + ref_points = self.ref.to_pointcloud(subsample=5000).ds ref_points["E"] = ref_points.geometry.x ref_points["N"] = ref_points.geometry.y ref_points.rename(columns={"b1": "z"}, inplace=True) diff --git a/xdem/coreg/affine.py b/xdem/coreg/affine.py index 221b4e34..862fda97 100644 --- a/xdem/coreg/affine.py +++ b/xdem/coreg/affine.py @@ -1213,7 +1213,7 @@ def _fit_rst_rst( ref_elev = ( Raster.from_array(ref_elev, transform=transform, crs=crs, nodata=-9999.0) - .to_points(as_array=False, pixel_offset="center") + .to_pointcloud() .ds ) ref_elev["E"] = ref_elev.geometry.x diff --git a/xdem/coreg/base.py b/xdem/coreg/base.py index 7fcd0e2c..1fe7e6f4 100644 --- a/xdem/coreg/base.py +++ b/xdem/coreg/base.py @@ -576,7 +576,7 @@ def _postprocess_coreg_apply_rst( match_rst = gu.Raster.from_array(elev, transform, crs=crs, nodata=nodata) else: match_rst = elev - applied_rst = applied_rst.reproject(match_rst, resampling=resampling) + applied_rst = applied_rst.reproject(match_rst, resampling=resampling, silent=True) applied_elev = applied_rst.data # Now that the raster data is reprojected, the new out_transform is set as the original transform out_transform = transform @@ -1511,7 +1511,7 @@ def _fit_func( ) tba_elev_pts = ( gu.Raster.from_array(data=kwargs["tba_elev"], transform=kwargs["transform"], crs=kwargs["crs"]) - .to_points() + .to_pointcloud() .ds ) kwargs.update({"tba_elev": tba_elev_pts}) @@ -1529,7 +1529,7 @@ def _fit_func( ) ref_elev_pts = ( gu.Raster.from_array(data=kwargs["ref_elev"], transform=kwargs["transform"], crs=kwargs["crs"]) - .to_points() + .to_pointcloud() .ds ) kwargs.update({"ref_elev": ref_elev_pts}) From 105a0299fc20c74760bbce005210c0b8fbeb5063 Mon Sep 17 00:00:00 2001 From: Romain Hugonnet Date: Fri, 15 Mar 2024 19:28:48 -0800 Subject: [PATCH 21/54] Incremental commit on warnings --- tests/test_dem.py | 13 +++++++------ tests/test_doc.py | 5 +++++ xdem/coreg/biascorr.py | 8 ++++---- 3 files changed, 16 insertions(+), 10 deletions(-) diff --git a/tests/test_dem.py b/tests/test_dem.py index 2f894603..c44aa352 100644 --- a/tests/test_dem.py +++ b/tests/test_dem.py @@ -248,12 +248,13 @@ def test_set_vcrs(self) -> None: dem.set_vcrs(new_vcrs="is_lmi_Icegeoid_ISN93.tif") # Check that non-existing grids raise errors - with pytest.raises( - ValueError, - match="The provided grid 'the best grid' does not exist at https://cdn.proj.org/. " - "Provide an existing grid.", - ): - dem.set_vcrs(new_vcrs="the best grid") + with pytest.warns(UserWarning, match="Grid not found in*"): + with pytest.raises( + ValueError, + match="The provided grid 'the best grid' does not exist at https://cdn.proj.org/. " + "Provide an existing grid.", + ): + dem.set_vcrs(new_vcrs="the best grid") def test_to_vcrs(self) -> None: """Tests the conversion of vertical CRS.""" diff --git a/tests/test_doc.py b/tests/test_doc.py index 43a5a0f4..8adbcd7a 100644 --- a/tests/test_doc.py +++ b/tests/test_doc.py @@ -54,6 +54,11 @@ def run_code(filename: str) -> None: def test_build(self) -> None: """Try building the doc and see if it works.""" + # Ignore all warnings raised in the documentation + # (some UserWarning are shown on purpose in certain examples, so they shouldn't make the test fail, + # and most other warnings are for Sphinx developers, not meant to be seen by us; or we can check on RTD) + warnings.filterwarnings("ignore") + # Test only on Linux if platform.system() == "Linux": # Remove the build directory if it exists. diff --git a/xdem/coreg/biascorr.py b/xdem/coreg/biascorr.py index 31eda69d..046de3c7 100644 --- a/xdem/coreg/biascorr.py +++ b/xdem/coreg/biascorr.py @@ -380,7 +380,7 @@ def _fit_rst_pts( # type: ignore valid_mask = np.logical_and.reduce((inlier_mask, np.isfinite(rst_elev))) # Convert inlier mask to points to be able to determine subsample later - inlier_rst = gu.Raster.from_array(data=valid_mask, transform=transform, crs=crs) + inlier_rst = gu.Raster.from_array(data=valid_mask, transform=transform, crs=crs, nodata=-9999) # The location needs to be surrounded by inliers, use floor to get 0 for at least one outlier valid_pts = np.floor(inlier_rst.interp_points(pts)).astype(bool) # Interpolates boolean mask as integers @@ -395,11 +395,11 @@ def _fit_rst_pts( # type: ignore # Convert ref or tba depending on which is the point dataset if isinstance(ref_elev, gpd.GeoDataFrame): - tba_rst = gu.Raster.from_array(data=tba_elev, transform=transform, crs=crs) + tba_rst = gu.Raster.from_array(data=tba_elev, transform=transform, crs=crs, nodata=-9999) tba_elev_pts = tba_rst.interp_points(pts) ref_elev_pts = ref_elev[z_name].values[subsample_mask] else: - ref_rst = gu.Raster.from_array(data=ref_elev, transform=transform, crs=crs) + ref_rst = gu.Raster.from_array(data=ref_elev, transform=transform, crs=crs, nodata=-9999) ref_elev_pts = ref_rst.interp_points(pts) tba_elev_pts = tba_elev[z_name].values[subsample_mask] @@ -407,7 +407,7 @@ def _fit_rst_pts( # type: ignore if bias_vars is not None: bias_vars_pts = {} for var in bias_vars.keys(): - bias_vars_pts[var] = gu.Raster.from_array(bias_vars[var], transform=transform, crs=crs).interp_points( + bias_vars_pts[var] = gu.Raster.from_array(bias_vars[var], transform=transform, crs=crs, nodata=-9999).interp_points( pts ) else: From fb899fc70bd7fdafc993a60c0716a4732563b4fb Mon Sep 17 00:00:00 2001 From: Romain Hugonnet Date: Fri, 15 Mar 2024 20:23:14 -0800 Subject: [PATCH 22/54] Incremental commit on warnings --- tests/test_coreg/test_affine.py | 2 +- tests/test_coreg/test_biascorr.py | 8 ++++++-- xdem/coreg/affine.py | 6 +++--- xdem/coreg/biascorr.py | 2 +- 4 files changed, 11 insertions(+), 7 deletions(-) diff --git a/tests/test_coreg/test_affine.py b/tests/test_coreg/test_affine.py index c74e9d04..28c86c71 100644 --- a/tests/test_coreg/test_affine.py +++ b/tests/test_coreg/test_affine.py @@ -184,7 +184,7 @@ def test_coreg_example_shift(self, shift_px, coreg_class, points_or_raster, verb shifted_ref = self.ref.copy() shifted_ref.shift(shift_px[0] * res, shift_px[1] * res, inplace=True) - shifted_ref_points = shifted_ref.to_pointcloud(subsample=subsample).ds + shifted_ref_points = shifted_ref.to_pointcloud(subsample=subsample, force_pixel_offset="center", random_state=42).ds shifted_ref_points["E"] = shifted_ref_points.geometry.x shifted_ref_points["N"] = shifted_ref_points.geometry.y shifted_ref_points.rename(columns={"b1": "z"}, inplace=True) diff --git a/tests/test_coreg/test_biascorr.py b/tests/test_coreg/test_biascorr.py index a2868861..0cd449be 100644 --- a/tests/test_coreg/test_biascorr.py +++ b/tests/test_coreg/test_biascorr.py @@ -194,7 +194,7 @@ def test_biascorr__fit_1d(self, fit_args, fit_func, fit_optimizer, capsys) -> No @pytest.mark.parametrize("fit_args", [fit_args_rst_pts, fit_args_rst_rst]) # type: ignore @pytest.mark.parametrize( - "fit_func", (polynomial_2d, lambda x, a, b, c, d: a * x[0] + b * x[1] + c**d) + "fit_func", (polynomial_2d, lambda x, a, b, c, d: a * x[0] + b * x[1] + c / x[0] + d) ) # type: ignore @pytest.mark.parametrize( "fit_optimizer", @@ -284,6 +284,10 @@ def test_biascorr__bin_2d(self, fit_args, bin_sizes, bin_statistic) -> None: def test_biascorr__bin_and_fit_1d(self, fit_args, fit_func, fit_optimizer, bin_sizes, bin_statistic) -> None: """Test the _fit_func and apply_func methods of BiasCorr for the bin_and_fit case (called by all subclasses).""" + # Curve fit can be unhappy in certain circumstances for numerical estimation of covariance + # We don't care for this test + warnings.filterwarnings("ignore", message="Covariance of the parameters could not be estimated*") + # Create a bias correction object bcorr = biascorr.BiasCorr( fit_or_bin="bin_and_fit", @@ -314,7 +318,7 @@ def test_biascorr__bin_and_fit_1d(self, fit_args, fit_func, fit_optimizer, bin_s @pytest.mark.parametrize("fit_args", all_fit_args) # type: ignore @pytest.mark.parametrize( - "fit_func", (polynomial_2d, lambda x, a, b, c, d: a * x[0] + b * x[1] + c**d) + "fit_func", (polynomial_2d, lambda x, a, b, c, d: a * x[0] + b * x[1] + c / x[0] + d) ) # type: ignore @pytest.mark.parametrize( "fit_optimizer", diff --git a/xdem/coreg/affine.py b/xdem/coreg/affine.py index 862fda97..a9b29c9b 100644 --- a/xdem/coreg/affine.py +++ b/xdem/coreg/affine.py @@ -878,7 +878,7 @@ def _fit_rst_pts( if verbose: print("Running Nuth and Kääb (2011) coregistration. Shift pts instead of shifting dem") - rst_elev = Raster.from_array(rst_elev, transform=transform, crs=crs) + rst_elev = Raster.from_array(rst_elev, transform=transform, crs=crs, nodata=-9999) tba_arr, _ = get_array_and_mask(rst_elev) bounds, resolution = _transform_to_bounds_and_res(ref_elev.shape, transform) @@ -1120,7 +1120,7 @@ def _fit_rst_pts( rst_elev = ref_elev ref = "raster" - rst_elev = Raster.from_array(rst_elev, transform=transform, crs=crs) + rst_elev = Raster.from_array(rst_elev, transform=transform, crs=crs, nodata=-9999) # Perform downsampling if subsample != None if self._meta["subsample"] and len(point_elev) > self._meta["subsample"]: @@ -1213,7 +1213,7 @@ def _fit_rst_rst( ref_elev = ( Raster.from_array(ref_elev, transform=transform, crs=crs, nodata=-9999.0) - .to_pointcloud() + .to_pointcloud(force_pixel_offset="center") .ds ) ref_elev["E"] = ref_elev.geometry.x diff --git a/xdem/coreg/biascorr.py b/xdem/coreg/biascorr.py index 046de3c7..effc163d 100644 --- a/xdem/coreg/biascorr.py +++ b/xdem/coreg/biascorr.py @@ -380,7 +380,7 @@ def _fit_rst_pts( # type: ignore valid_mask = np.logical_and.reduce((inlier_mask, np.isfinite(rst_elev))) # Convert inlier mask to points to be able to determine subsample later - inlier_rst = gu.Raster.from_array(data=valid_mask, transform=transform, crs=crs, nodata=-9999) + inlier_rst = gu.Raster.from_array(data=valid_mask, transform=transform, crs=crs) # The location needs to be surrounded by inliers, use floor to get 0 for at least one outlier valid_pts = np.floor(inlier_rst.interp_points(pts)).astype(bool) # Interpolates boolean mask as integers From e2056aef26768b5f8aeba55bba4e91069b02bbb5 Mon Sep 17 00:00:00 2001 From: Romain Hugonnet Date: Sat, 16 Mar 2024 14:05:16 -0800 Subject: [PATCH 23/54] Incremental commit on warnings --- tests/test_coreg/test_affine.py | 1 - tests/test_fit.py | 3 +++ tests/test_spatialstats.py | 6 +++--- tests/test_volume.py | 15 ++++++++++----- xdem/coreg/base.py | 2 ++ xdem/coreg/biascorr.py | 6 +++--- xdem/dem.py | 2 +- xdem/spatialstats.py | 21 +++++++++++---------- 8 files changed, 33 insertions(+), 23 deletions(-) diff --git a/tests/test_coreg/test_affine.py b/tests/test_coreg/test_affine.py index 28c86c71..dc19d31e 100644 --- a/tests/test_coreg/test_affine.py +++ b/tests/test_coreg/test_affine.py @@ -163,7 +163,6 @@ def test_gradientdescending(self, subsample: int = 10000, inlier_mask: bool = Tr self.tba, inlier_mask=inlier_mask, verbose=verbose, - subsample=subsample, z_name="b1", ) assert gds._meta["offset_east_px"] == pytest.approx(-0.496000, rel=1e-1, abs=0.1) diff --git a/tests/test_fit.py b/tests/test_fit.py index d10c8840..fbef0726 100644 --- a/tests/test_fit.py +++ b/tests/test_fit.py @@ -50,6 +50,9 @@ def test_robust_norder_polynomial_fit(self, pkg_estimator: str) -> None: def test_robust_norder_polynomial_fit_noise_and_outliers(self) -> None: + # Ignore sklearn convergence warnings + warnings.filterwarnings("ignore", category=UserWarning, message="lbfgs failed to converge") + np.random.seed(42) # Define x vector diff --git a/tests/test_spatialstats.py b/tests/test_spatialstats.py index 815e17df..b067faa9 100644 --- a/tests/test_spatialstats.py +++ b/tests/test_spatialstats.py @@ -365,9 +365,9 @@ def test_get_perbin_nd_binning(self) -> None: # Get the value at the random point for elevation, slope, aspect x = xrand[i] y = yrand[i] - h = self.ref.data[x, y] - slp = self.slope.data[x, y] - asp = self.aspect.data[x, y] + h = self.ref.data.filled(np.nan)[x, y] + slp = self.slope.data.filled(np.nan)[x, y] + asp = self.aspect.data.filled(np.nan)[x, y] if np.logical_or.reduce((np.isnan(h), np.isnan(slp), np.isnan(asp))): continue diff --git a/tests/test_volume.py b/tests/test_volume.py index 2bb2e226..96090000 100644 --- a/tests/test_volume.py +++ b/tests/test_volume.py @@ -40,7 +40,7 @@ def test_bin_ddem(self) -> None: assert ddem_stds["value"].mean() < 50 assert np.abs(np.mean(ddem_bins["value"] - ddem_bins_masked["value"])) < 0.01 - def test_interpolate_ddem_bins(self) -> pd.Series: + def test_interpolate_ddem_bins(self) -> None: """Test dDEM bin interpolation.""" ddem = self.dem_2009 - self.dem_1990 @@ -61,13 +61,18 @@ def test_interpolate_ddem_bins(self) -> pd.Series: # Check that no nans exist. assert not np.any(np.isnan(interpolated_bins)) - # Return the value so that they can be used in other tests. - return interpolated_bins - def test_area_calculation(self) -> None: """Test the area calculation function.""" - ddem_bins = self.test_interpolate_ddem_bins() + ddem = self.dem_2009 - self.dem_1990 + + ddem_bins = xdem.volume.hypsometric_binning(ddem[self.mask], self.dem_2009[self.mask]) + + # Simulate a missing bin + ddem_bins.iloc[3, 0] = np.nan + + # Interpolate the bins and exclude bins with low pixel counts from the interpolation. + interpolated_bins = xdem.volume.interpolate_hypsometric_bins(ddem_bins, count_threshold=200) # Test the area calculation with normal parameters. bin_area = xdem.volume.calculate_hypsometry_area( ddem_bins, self.dem_2009[self.mask], pixel_size=self.dem_2009.res[0] diff --git a/xdem/coreg/base.py b/xdem/coreg/base.py index 1fe7e6f4..42ea5881 100644 --- a/xdem/coreg/base.py +++ b/xdem/coreg/base.py @@ -1784,6 +1784,8 @@ def fit( " individual steps of the pipeline. To silence this warning: only define 'subsample' in " "either fit(subsample=...) or instantiation e.g., VerticalShift(subsample=...)." ) + # Filter warnings of individual pipelines now that the one above was raised + warnings.filterwarnings("ignore", message="Subsample argument passed to*", category=UserWarning) # Pre-process the inputs, by reprojecting and subsampling, without any subsampling (done in each step) ref_dem, tba_dem, inlier_mask, transform, crs = _preprocess_coreg_fit( diff --git a/xdem/coreg/biascorr.py b/xdem/coreg/biascorr.py index effc163d..42c030ce 100644 --- a/xdem/coreg/biascorr.py +++ b/xdem/coreg/biascorr.py @@ -763,7 +763,7 @@ def _fit_rst_rst( # type: ignore print("Estimating rotated coordinates.") x, _ = gu.raster.get_xy_rotated( - raster=gu.Raster.from_array(data=ref_elev, crs=crs, transform=transform), + raster=gu.Raster.from_array(data=ref_elev, crs=crs, transform=transform, nodata=-9999), along_track_angle=self._meta["angle"], ) @@ -807,7 +807,7 @@ def _fit_rst_pts( # type: ignore print("Estimating rotated coordinates.") x, _ = gu.raster.get_xy_rotated( - raster=gu.Raster.from_array(data=rast_elev, crs=crs, transform=transform), + raster=gu.Raster.from_array(data=rast_elev, crs=crs, transform=transform, nodata=-9999), along_track_angle=self._meta["angle"], ) @@ -841,7 +841,7 @@ def _apply_rst( # Define the coordinates for applying the correction x, _ = gu.raster.get_xy_rotated( - raster=gu.Raster.from_array(data=elev, crs=crs, transform=transform), + raster=gu.Raster.from_array(data=elev, crs=crs, transform=transform, nodata=-9999), along_track_angle=self._meta["angle"], ) diff --git a/xdem/dem.py b/xdem/dem.py index 3fbc858a..64f6d3dc 100644 --- a/xdem/dem.py +++ b/xdem/dem.py @@ -459,7 +459,7 @@ def estimate_uncertainty( """ # Elevation change - dh = other_dem.reproject(self) - self + dh = other_dem.reproject(self, silent=True) - self # If the precision of the other DEM is the same, divide the dh values by sqrt(2) # See Equation 7 and 8 of Hugonnet et al. (2022) diff --git a/xdem/spatialstats.py b/xdem/spatialstats.py index 94a729b8..f15602ee 100644 --- a/xdem/spatialstats.py +++ b/xdem/spatialstats.py @@ -1191,16 +1191,17 @@ def _get_cdist_empirical_variogram( """ - if subsample_method == "cdist_equidistant" and "runs" not in kwargs.keys() and "samples" not in kwargs.keys(): - - # We define subparameters for the equidistant technique to match the number of pairwise comparison - # that would have a classic "subsample" with pdist, except if those parameters are already user-defined - runs, samples, ratio_subsample = _choose_cdist_equidistant_sampling_parameters(**kwargs) - - kwargs["runs"] = runs - # The "samples" argument is used by skgstat Metric subclasses (and not "subsample") - kwargs["samples"] = samples - kwargs["ratio_subsample"] = ratio_subsample + if subsample_method == "cdist_equidistant": + + if "runs" not in kwargs.keys() and "samples" not in kwargs.keys(): + # We define subparameters for the equidistant technique to match the number of pairwise comparison + # that would have a classic "subsample" with pdist, except if those parameters are already user-defined + runs, samples, ratio_subsample = _choose_cdist_equidistant_sampling_parameters(**kwargs) + kwargs["ratio_subsample"] = ratio_subsample + kwargs["runs"] = runs + # The "samples" argument is used by skgstat Metric subclasses (and not "subsample") + kwargs["samples"] = samples + kwargs.pop("subsample") elif subsample_method == "cdist_point": From 60fae87b7fb8f3c77c24498b01e4332d1094376b Mon Sep 17 00:00:00 2001 From: Romain Hugonnet Date: Sat, 16 Mar 2024 14:07:36 -0800 Subject: [PATCH 24/54] Linting --- tests/test_coreg/test_affine.py | 5 +++-- tests/test_coreg/test_biascorr.py | 5 ++--- tests/test_coreg/test_workflows.py | 1 - tests/test_ddem.py | 1 - tests/test_misc.py | 1 - tests/test_volume.py | 4 ---- xdem/coreg/biascorr.py | 6 +++--- xdem/spatialstats.py | 2 +- 8 files changed, 9 insertions(+), 16 deletions(-) diff --git a/tests/test_coreg/test_affine.py b/tests/test_coreg/test_affine.py index dc19d31e..4bc59c6e 100644 --- a/tests/test_coreg/test_affine.py +++ b/tests/test_coreg/test_affine.py @@ -2,7 +2,6 @@ from __future__ import annotations import copy -import warnings import geopandas as gpd import numpy as np @@ -183,7 +182,9 @@ def test_coreg_example_shift(self, shift_px, coreg_class, points_or_raster, verb shifted_ref = self.ref.copy() shifted_ref.shift(shift_px[0] * res, shift_px[1] * res, inplace=True) - shifted_ref_points = shifted_ref.to_pointcloud(subsample=subsample, force_pixel_offset="center", random_state=42).ds + shifted_ref_points = shifted_ref.to_pointcloud( + subsample=subsample, force_pixel_offset="center", random_state=42 + ).ds shifted_ref_points["E"] = shifted_ref_points.geometry.x shifted_ref_points["N"] = shifted_ref_points.geometry.y shifted_ref_points.rename(columns={"b1": "z"}, inplace=True) diff --git a/tests/test_coreg/test_biascorr.py b/tests/test_coreg/test_biascorr.py index 0cd449be..25d6cdbd 100644 --- a/tests/test_coreg/test_biascorr.py +++ b/tests/test_coreg/test_biascorr.py @@ -11,13 +11,12 @@ import scipy import xdem.terrain - -PLOT = False - from xdem import examples from xdem.coreg import biascorr from xdem.fit import polynomial_2d, sumsin_1d +PLOT = False + def load_examples() -> tuple[gu.Raster, gu.Raster, gu.Vector]: """Load example files to try coregistration methods with.""" diff --git a/tests/test_coreg/test_workflows.py b/tests/test_coreg/test_workflows.py index 4da4c493..3358a2bf 100644 --- a/tests/test_coreg/test_workflows.py +++ b/tests/test_coreg/test_workflows.py @@ -3,7 +3,6 @@ import os import tempfile -import warnings import numpy as np import pandas as pd diff --git a/tests/test_ddem.py b/tests/test_ddem.py index 3a5a8bc5..7863e728 100644 --- a/tests/test_ddem.py +++ b/tests/test_ddem.py @@ -1,5 +1,4 @@ """Functions to test the difference of DEMs tools.""" -import warnings import geoutils as gu import numpy as np diff --git a/tests/test_misc.py b/tests/test_misc.py index 6f543289..fc87a314 100644 --- a/tests/test_misc.py +++ b/tests/test_misc.py @@ -3,7 +3,6 @@ import os import re -import warnings import pytest import yaml # type: ignore diff --git a/tests/test_volume.py b/tests/test_volume.py index 96090000..6aaf6bdc 100644 --- a/tests/test_volume.py +++ b/tests/test_volume.py @@ -1,9 +1,7 @@ """Functions to test the volume estimation tools.""" -import warnings import geoutils as gu import numpy as np -import pandas as pd import pytest import xdem @@ -71,8 +69,6 @@ def test_area_calculation(self) -> None: # Simulate a missing bin ddem_bins.iloc[3, 0] = np.nan - # Interpolate the bins and exclude bins with low pixel counts from the interpolation. - interpolated_bins = xdem.volume.interpolate_hypsometric_bins(ddem_bins, count_threshold=200) # Test the area calculation with normal parameters. bin_area = xdem.volume.calculate_hypsometry_area( ddem_bins, self.dem_2009[self.mask], pixel_size=self.dem_2009.res[0] diff --git a/xdem/coreg/biascorr.py b/xdem/coreg/biascorr.py index 42c030ce..d6cc2a6a 100644 --- a/xdem/coreg/biascorr.py +++ b/xdem/coreg/biascorr.py @@ -407,9 +407,9 @@ def _fit_rst_pts( # type: ignore if bias_vars is not None: bias_vars_pts = {} for var in bias_vars.keys(): - bias_vars_pts[var] = gu.Raster.from_array(bias_vars[var], transform=transform, crs=crs, nodata=-9999).interp_points( - pts - ) + bias_vars_pts[var] = gu.Raster.from_array( + bias_vars[var], transform=transform, crs=crs, nodata=-9999 + ).interp_points(pts) else: bias_vars_pts = None diff --git a/xdem/spatialstats.py b/xdem/spatialstats.py index f15602ee..200873d1 100644 --- a/xdem/spatialstats.py +++ b/xdem/spatialstats.py @@ -1201,7 +1201,7 @@ def _get_cdist_empirical_variogram( kwargs["runs"] = runs # The "samples" argument is used by skgstat Metric subclasses (and not "subsample") kwargs["samples"] = samples - + kwargs.pop("subsample") elif subsample_method == "cdist_point": From d8e139d6ea5d064022d26dde525f1a9254b32fb5 Mon Sep 17 00:00:00 2001 From: Romain Hugonnet Date: Sun, 17 Mar 2024 08:52:48 -0800 Subject: [PATCH 25/54] Finalize catching warnings --- tests/test_dem.py | 2 +- tests/test_spatialstats.py | 2 ++ tests/test_terrain.py | 9 +++++---- 3 files changed, 8 insertions(+), 5 deletions(-) diff --git a/tests/test_dem.py b/tests/test_dem.py index c44aa352..e5b0ebdc 100644 --- a/tests/test_dem.py +++ b/tests/test_dem.py @@ -321,7 +321,7 @@ def test_to_vcrs__grids(self, grid_shifts: dict[str, Any]) -> None: # Using an arbitrary elevation of 100 m (no influence on the transformation) dem = DEM.from_array( - data=np.array([[100]]), + data=np.array([[100, 100]]), transform=rio.transform.from_bounds( grid_shifts["lon"], grid_shifts["lat"], grid_shifts["lon"] + 0.01, grid_shifts["lat"] + 0.01, 0.01, 0.01 ), diff --git a/tests/test_spatialstats.py b/tests/test_spatialstats.py index b067faa9..a6e3f170 100644 --- a/tests/test_spatialstats.py +++ b/tests/test_spatialstats.py @@ -853,6 +853,8 @@ def test_check_params_variogram_model(self) -> None: def test_estimate_model_spatial_correlation_and_infer_from_stable(self) -> None: """Test consistency of outputs and errors in wrapper functions for estimation of spatial correlation""" + warnings.filterwarnings("ignore", category=RuntimeWarning, message="Mean of empty slice") + # Keep only data on stable diff_on_stable = self.diff.copy() diff_on_stable.set_mask(self.mask) diff --git a/tests/test_terrain.py b/tests/test_terrain.py index b08898e0..e542ac7a 100644 --- a/tests/test_terrain.py +++ b/tests/test_terrain.py @@ -21,6 +21,7 @@ def run_gdaldem(filepath: str, processing: str, options: str | None = None) -> M """Run GDAL's DEMProcessing and return the read numpy array.""" # Rasterio strongly recommends against importing gdal along rio, so this is done here instead. from osgeo import gdal + gdal.UseExceptions() # Converting string into gdal processing options here to avoid import gdal outside this function: # Riley or Wilson for Terrain Ruggedness, and Zevenberg or Horn for slope, aspect and hillshade @@ -123,7 +124,10 @@ def test_attribute_functions_against_gdaldem(self, attribute: str) -> None: # For hillshade, we round into an integer to match GDAL's output if attribute in ["hillshade_Horn", "hillshade_Zevenberg"]: - attr_xdem = attr_xdem.astype("int").astype("float32") + with warnings.catch_warnings(): + # Normal that a warning would be raised here, so we catch it + warnings.filterwarnings("ignore", message="invalid value encountered in cast", category=RuntimeWarning) + attr_xdem = attr_xdem.astype("int").astype("float32") # We compute the difference and keep only valid values diff = (attr_xdem - attr_gdal).filled(np.nan) @@ -171,9 +175,6 @@ def test_attribute_functions_against_gdaldem(self, attribute: str) -> None: # Validate that this doesn't raise weird warnings after introducing nans. functions[attribute](dem) - @pytest.mark.skip( - "richdem wheels don't build on latest GDAL versions, " "need to circumvent that problem..." - ) # type: ignore @pytest.mark.parametrize( "attribute", ["slope_Horn", "aspect_Horn", "hillshade_Horn", "curvature", "profile_curvature", "planform_curvature"], From 521ff61505cc896bfc692ea5161fbf9367605eab Mon Sep 17 00:00:00 2001 From: Romain Hugonnet Date: Sun, 17 Mar 2024 08:53:06 -0800 Subject: [PATCH 26/54] Linting --- tests/test_terrain.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/test_terrain.py b/tests/test_terrain.py index e542ac7a..962dddae 100644 --- a/tests/test_terrain.py +++ b/tests/test_terrain.py @@ -21,6 +21,7 @@ def run_gdaldem(filepath: str, processing: str, options: str | None = None) -> M """Run GDAL's DEMProcessing and return the read numpy array.""" # Rasterio strongly recommends against importing gdal along rio, so this is done here instead. from osgeo import gdal + gdal.UseExceptions() # Converting string into gdal processing options here to avoid import gdal outside this function: From 4ac38a071e45076950bf4c6636adff8dfaf2f6a3 Mon Sep 17 00:00:00 2001 From: Romain Hugonnet Date: Sun, 17 Mar 2024 08:59:01 -0800 Subject: [PATCH 27/54] Fix test of gradientdescent now shifted by half a pixel --- tests/test_coreg/test_affine.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/test_coreg/test_affine.py b/tests/test_coreg/test_affine.py index 4bc59c6e..5aa0315d 100644 --- a/tests/test_coreg/test_affine.py +++ b/tests/test_coreg/test_affine.py @@ -158,15 +158,15 @@ def test_gradientdescending(self, subsample: int = 10000, inlier_mask: bool = Tr # Run co-registration gds = xdem.coreg.GradientDescending(subsample=subsample) gds.fit( - self.ref.to_pointcloud().ds, + self.ref.to_pointcloud(data_column_name="z").ds, self.tba, inlier_mask=inlier_mask, verbose=verbose, - z_name="b1", + random_state=42 ) - assert gds._meta["offset_east_px"] == pytest.approx(-0.496000, rel=1e-1, abs=0.1) - assert gds._meta["offset_north_px"] == pytest.approx(-0.1875, rel=1e-1, abs=0.1) - assert gds._meta["vshift"] == pytest.approx(-2.39, rel=1e-1) + + shifts = (gds._meta["offset_east_px"], gds._meta["offset_north_px"], gds._meta["vshift"]) + assert shifts == pytest.approx((0.03525, -0.59775, -2.39144), abs=10e-5) @pytest.mark.parametrize("shift_px", [(1, 1), (2, 2)]) # type: ignore @pytest.mark.parametrize("coreg_class", [coreg.NuthKaab, coreg.GradientDescending, coreg.ICP]) # type: ignore From 1bba22a700ab1a9ae31794bc51fea8d60266d38b Mon Sep 17 00:00:00 2001 From: Romain Hugonnet Date: Sun, 17 Mar 2024 08:59:14 -0800 Subject: [PATCH 28/54] Linting --- tests/test_coreg/test_affine.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_coreg/test_affine.py b/tests/test_coreg/test_affine.py index 5aa0315d..ebf8e7a5 100644 --- a/tests/test_coreg/test_affine.py +++ b/tests/test_coreg/test_affine.py @@ -162,7 +162,7 @@ def test_gradientdescending(self, subsample: int = 10000, inlier_mask: bool = Tr self.tba, inlier_mask=inlier_mask, verbose=verbose, - random_state=42 + random_state=42, ) shifts = (gds._meta["offset_east_px"], gds._meta["offset_north_px"], gds._meta["vshift"]) From b63454b865d595b77c199e3acfd8188c570b4747 Mon Sep 17 00:00:00 2001 From: Romain Hugonnet Date: Sun, 17 Mar 2024 09:05:09 -0800 Subject: [PATCH 29/54] Test CI with geoutils branch --- dev-environment.yml | 2 +- environment.yml | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/dev-environment.yml b/dev-environment.yml index 7b3add3f..2acc4411 100644 --- a/dev-environment.yml +++ b/dev-environment.yml @@ -51,4 +51,4 @@ dependencies: - noisyopt # To run CI against latest GeoUtils - # - git+https://github.com/GlacioHack/geoutils.git + - https://github.com/rhugonnet/geoutils.git@fix_to_points diff --git a/environment.yml b/environment.yml index 67bf6db1..2f27362e 100644 --- a/environment.yml +++ b/environment.yml @@ -13,9 +13,9 @@ dependencies: - tqdm - scikit-image=0.* - scikit-gstat>=1.0 - - geoutils=0.1.* +# - geoutils=0.1.* - pip - # To run CI against latest GeoUtils - # - pip: - # - git+https://github.com/GlacioHack/geoutils.git + # To run CI against latest GeoUtils + - pip: + - https://github.com/rhugonnet/geoutils.git@fix_to_points From 19e388c7f087d3779a789db3c2ac17dfa057680a Mon Sep 17 00:00:00 2001 From: Romain Hugonnet Date: Sun, 17 Mar 2024 09:05:27 -0800 Subject: [PATCH 30/54] With linting --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 57cc2479..08182f4d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -11,5 +11,5 @@ scipy==1.* tqdm scikit-image==0.* scikit-gstat>=1.0 -geoutils==0.1.* pip +https://github.com/rhugonnet/geoutils.git@fix_to_points From cfe105e5451df5ad8e59c537ee03324eb4f8f7cc Mon Sep 17 00:00:00 2001 From: Romain Hugonnet Date: Sun, 17 Mar 2024 09:11:54 -0800 Subject: [PATCH 31/54] Fix pip install of branch with git+ --- dev-environment.yml | 2 +- environment.yml | 2 +- requirements.txt | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/dev-environment.yml b/dev-environment.yml index 2acc4411..b671b707 100644 --- a/dev-environment.yml +++ b/dev-environment.yml @@ -51,4 +51,4 @@ dependencies: - noisyopt # To run CI against latest GeoUtils - - https://github.com/rhugonnet/geoutils.git@fix_to_points + - git+https://github.com/rhugonnet/geoutils@fix_to_points diff --git a/environment.yml b/environment.yml index 2f27362e..469d2714 100644 --- a/environment.yml +++ b/environment.yml @@ -18,4 +18,4 @@ dependencies: # To run CI against latest GeoUtils - pip: - - https://github.com/rhugonnet/geoutils.git@fix_to_points + - git+https://github.com/rhugonnet/geoutils@fix_to_points diff --git a/requirements.txt b/requirements.txt index 08182f4d..206100ac 100644 --- a/requirements.txt +++ b/requirements.txt @@ -12,4 +12,4 @@ tqdm scikit-image==0.* scikit-gstat>=1.0 pip -https://github.com/rhugonnet/geoutils.git@fix_to_points +geoutils@fix_to_points From 8f0247be7f5fd68db2e23762ee1fcf5d85093e75 Mon Sep 17 00:00:00 2001 From: Romain Hugonnet Date: Sun, 17 Mar 2024 11:07:09 -0800 Subject: [PATCH 32/54] Let's go --- dev-environment.yml | 2 +- environment.yml | 2 +- requirements.txt | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/dev-environment.yml b/dev-environment.yml index b671b707..17716aea 100644 --- a/dev-environment.yml +++ b/dev-environment.yml @@ -51,4 +51,4 @@ dependencies: - noisyopt # To run CI against latest GeoUtils - - git+https://github.com/rhugonnet/geoutils@fix_to_points + - git+https://github.com/rhugonnet/geoutils.git@fix_to_points diff --git a/environment.yml b/environment.yml index 469d2714..4bed7dc9 100644 --- a/environment.yml +++ b/environment.yml @@ -18,4 +18,4 @@ dependencies: # To run CI against latest GeoUtils - pip: - - git+https://github.com/rhugonnet/geoutils@fix_to_points + - git+https://github.com/rhugonnet/geoutils.git@fix_to_points diff --git a/requirements.txt b/requirements.txt index 206100ac..a263692b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -12,4 +12,4 @@ tqdm scikit-image==0.* scikit-gstat>=1.0 pip -geoutils@fix_to_points +geoutils From 7a8b006d1c72844de69461190623b407e3776959 Mon Sep 17 00:00:00 2001 From: Romain Hugonnet Date: Mon, 18 Mar 2024 18:30:15 -0800 Subject: [PATCH 33/54] Finalize richdem fixes --- dev-environment.yml | 2 +- tests/test_terrain.py | 3 --- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/dev-environment.yml b/dev-environment.yml index 17716aea..495f3c0d 100644 --- a/dev-environment.yml +++ b/dev-environment.yml @@ -22,7 +22,7 @@ dependencies: - opencv - openh264 - pytransform3d - # - richdem + - richdem # Test dependencies - pytest diff --git a/tests/test_terrain.py b/tests/test_terrain.py index 962dddae..83fba090 100644 --- a/tests/test_terrain.py +++ b/tests/test_terrain.py @@ -346,9 +346,6 @@ def test_get_terrain_attribute(self) -> None: slope_lowres = xdem.terrain.get_terrain_attribute(self.dem.data, "slope", resolution=self.dem.res[0] * 2) assert np.nanmean(slope) > np.nanmean(slope_lowres) - @pytest.mark.skip( - "richdem wheels don't build on latest GDAL versions, " "need to circumvent that problem..." - ) # type: ignore def test_get_terrain_attribute_errors(self) -> None: """Test the get_terrain_attribute function raises appropriate errors.""" From fd36197476e44eded31db635acc4922bbc211ba2 Mon Sep 17 00:00:00 2001 From: Romain Hugonnet Date: Tue, 19 Mar 2024 17:47:35 -0800 Subject: [PATCH 34/54] Change requirement files to latest geoutils release --- dev-environment.yml | 4 ++-- environment.yml | 6 +++--- requirements.txt | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/dev-environment.yml b/dev-environment.yml index 495f3c0d..9169f748 100644 --- a/dev-environment.yml +++ b/dev-environment.yml @@ -13,7 +13,7 @@ dependencies: - tqdm - scikit-image=0.* - scikit-gstat>=1.0 - - geoutils=0.1.* + - geoutils>=0.1.2 # Development-specific, to mirror manually in setup.cfg [options.extras_require]. - pip @@ -51,4 +51,4 @@ dependencies: - noisyopt # To run CI against latest GeoUtils - - git+https://github.com/rhugonnet/geoutils.git@fix_to_points +# - git+https://github.com/rhugonnet/geoutils.git@fix_to_points diff --git a/environment.yml b/environment.yml index 4bed7dc9..ac0f12f1 100644 --- a/environment.yml +++ b/environment.yml @@ -13,9 +13,9 @@ dependencies: - tqdm - scikit-image=0.* - scikit-gstat>=1.0 -# - geoutils=0.1.* + - geoutils>=0.1.2 - pip # To run CI against latest GeoUtils - - pip: - - git+https://github.com/rhugonnet/geoutils.git@fix_to_points +# - pip: +# - git+https://github.com/rhugonnet/geoutils.git@fix_to_points diff --git a/requirements.txt b/requirements.txt index a263692b..28413caa 100644 --- a/requirements.txt +++ b/requirements.txt @@ -11,5 +11,5 @@ scipy==1.* tqdm scikit-image==0.* scikit-gstat>=1.0 +geoutils>=0.1.2 pip -geoutils From d0adbcddbae36f01cef681f568d4a71b2435c62f Mon Sep 17 00:00:00 2001 From: Romain Hugonnet Date: Tue, 19 Mar 2024 20:55:33 -0800 Subject: [PATCH 35/54] Update DEM.from_array --- xdem/dem.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/xdem/dem.py b/xdem/dem.py index 64f6d3dc..61d7eb9d 100644 --- a/xdem/dem.py +++ b/xdem/dem.py @@ -160,6 +160,8 @@ def from_array( transform: tuple[float, ...] | Affine, crs: CRS | int | None, nodata: int | float | None = None, + area_or_point: Literal["Area", "Point"] | None = None, + tags: dict[str, Any] = None, vcrs: Literal["Ellipsoid"] | Literal["EGM08"] | Literal["EGM96"] @@ -174,15 +176,16 @@ def from_array( :param data: Input array. :param transform: Affine 2D transform. Either a tuple(x_res, 0.0, top_left_x, 0.0, y_res, top_left_y) or an affine.Affine object. - :param crs: Coordinate reference system. Either a rasterio CRS, - or an EPSG integer. + :param crs: Coordinate reference system. Either a rasterio CRS, or an EPSG integer. :param nodata: Nodata value. + :param area_or_point: Pixel interpretation of the raster, will be stored in AREA_OR_POINT metadata. + :param tags: Metadata stored in a dictionary. :param vcrs: Vertical coordinate reference system. :returns: DEM created from the provided array and georeferencing. """ # We first apply the from_array of the parent class - rast = SatelliteImage.from_array(data=data, transform=transform, crs=crs, nodata=nodata) + rast = SatelliteImage.from_array(data=data, transform=transform, crs=crs, nodata=nodata, area_or_point=area_or_point, tags=tags) # Then add the vcrs to the class call (that builds on top of the parent class) return cls(filename_or_dataset=rast, vcrs=vcrs) From 36ce632471bbb65c1b07eef3f7f0a19aaf575e79 Mon Sep 17 00:00:00 2001 From: Romain Hugonnet Date: Tue, 19 Mar 2024 20:56:03 -0800 Subject: [PATCH 36/54] Linting --- xdem/dem.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/xdem/dem.py b/xdem/dem.py index 61d7eb9d..22d384dd 100644 --- a/xdem/dem.py +++ b/xdem/dem.py @@ -185,7 +185,9 @@ def from_array( :returns: DEM created from the provided array and georeferencing. """ # We first apply the from_array of the parent class - rast = SatelliteImage.from_array(data=data, transform=transform, crs=crs, nodata=nodata, area_or_point=area_or_point, tags=tags) + rast = SatelliteImage.from_array( + data=data, transform=transform, crs=crs, nodata=nodata, area_or_point=area_or_point, tags=tags + ) # Then add the vcrs to the class call (that builds on top of the parent class) return cls(filename_or_dataset=rast, vcrs=vcrs) From 42ae7da9de986eef1054b4470d2c1343f8bb6eef Mon Sep 17 00:00:00 2001 From: Romain Hugonnet Date: Wed, 20 Mar 2024 09:28:10 -0800 Subject: [PATCH 37/54] Fix tests with new geoutils --- tests/test_coreg/test_base.py | 9 --------- tests/test_dem.py | 8 +++++++- tests/test_vcrs.py | 15 ++++++++++++++- xdem/coreg/base.py | 6 ++---- xdem/dem.py | 2 +- 5 files changed, 24 insertions(+), 16 deletions(-) diff --git a/tests/test_coreg/test_base.py b/tests/test_coreg/test_base.py index 26d56a97..15537b0a 100644 --- a/tests/test_coreg/test_base.py +++ b/tests/test_coreg/test_base.py @@ -98,15 +98,6 @@ def test_error_method(self) -> None: dem3 = dem1.copy() + np.random.random(size=dem1.size).reshape(dem1.shape) assert abs(vshiftcorr.error(dem1, dem3, transform=affine, crs=crs, error_type="std") - np.std(dem3)) < 1e-6 - def test_ij_xy(self, i: int = 10, j: int = 20) -> None: - """ - Test the reversibility of ij2xy and xy2ij, which is important for point co-registration. - """ - x, y = self.ref.ij2xy(i, j, offset="ul") - i, j = self.ref.xy2ij(x, y, shift_area_or_point=False) - assert i == pytest.approx(10) - assert j == pytest.approx(20) - @pytest.mark.parametrize("subsample", [10, 10000, 0.5, 1]) # type: ignore def test_get_subsample_on_valid_mask(self, subsample: float | int) -> None: """Test the subsampling function called by all subclasses""" diff --git a/tests/test_dem.py b/tests/test_dem.py index e5b0ebdc..578eaafd 100644 --- a/tests/test_dem.py +++ b/tests/test_dem.py @@ -236,6 +236,9 @@ def test_set_vcrs(self) -> None: assert dem.vcrs_grid == "us_nga_egm08_25.tif" # -- Test 2: we check with grids -- + # Most grids aren't going to be downloaded, so this warning can be raised + warnings.filterwarnings("ignore", category=UserWarning, message="Grid not found in *") + dem.set_vcrs(new_vcrs="us_nga_egm96_15.tif") assert dem.vcrs_name == "unknown using geoidgrids=us_nga_egm96_15.tif" assert dem.vcrs_grid == "us_nga_egm96_15.tif" @@ -312,13 +315,16 @@ def test_to_vcrs__equal_warning(self) -> None: # Compare to manually-extracted shifts at specific coordinates for the geoid grids egm96_chile = {"grid": "us_nga_egm96_15.tif", "lon": -68, "lat": -20, "shift": 42} egm08_chile = {"grid": "us_nga_egm08_25.tif", "lon": -68, "lat": -20, "shift": 42} - geoid96_alaska = {"grid": "us_noaa_geoid06_ak.tif", "lon": -145, "lat": 62, "shift": 17} + geoid96_alaska = {"grid": "us_noaa_geoid06_ak.tif", "lon": -145, "lat": 62, "shift": 15} isn93_iceland = {"grid": "is_lmi_Icegeoid_ISN93.tif", "lon": -18, "lat": 65, "shift": 68} @pytest.mark.parametrize("grid_shifts", [egm08_chile, egm08_chile, geoid96_alaska, isn93_iceland]) # type: ignore def test_to_vcrs__grids(self, grid_shifts: dict[str, Any]) -> None: """Tests grids to convert vertical CRS.""" + # Most grids aren't going to be downloaded, so this warning can be raised + warnings.filterwarnings("ignore", category=UserWarning, message="Grid not found in *") + # Using an arbitrary elevation of 100 m (no influence on the transformation) dem = DEM.from_array( data=np.array([[100, 100]]), diff --git a/tests/test_vcrs.py b/tests/test_vcrs.py index d9f2c61f..69287cdd 100644 --- a/tests/test_vcrs.py +++ b/tests/test_vcrs.py @@ -3,6 +3,7 @@ import pathlib import re +import warnings from typing import Any import numpy as np @@ -12,8 +13,8 @@ import xdem import xdem.vcrs - class TestVCRS: + def test_parse_vcrs_name_from_product(self) -> None: """Test parsing of vertical CRS name from DEM product name.""" @@ -66,6 +67,9 @@ def test_vcrs_from_crs(self, input_output: tuple[CRS, CRS]) -> None: def test_vcrs_from_user_input(self, vcrs_input: str | pathlib.Path | int | CRS) -> None: """Tests the function _vcrs_from_user_input for varying user inputs, for which it will return a CRS.""" + # Most grids aren't going to be downloaded, so this warning can be raised + warnings.filterwarnings("ignore", category=UserWarning, message="Grid not found in *") + # Get user input vcrs = xdem.dem._vcrs_from_user_input(vcrs_input) @@ -116,6 +120,9 @@ def test_vcrs_from_user_input__errors(self) -> None: def test_build_vcrs_from_grid(self, grid: str) -> None: """Test that vertical CRS are correctly built from grid""" + # Most grids aren't going to be downloaded, so this warning can be raised + warnings.filterwarnings("ignore", category=UserWarning, message="Grid not found in *") + # Build vertical CRS vcrs = xdem.vcrs._build_vcrs_from_grid(grid=grid) assert vcrs.is_vertical @@ -132,6 +139,9 @@ def test_build_vcrs_from_grid(self, grid: str) -> None: def test_build_ccrs_from_crs_and_vcrs(self, crs: CRS, vcrs_input: CRS | str) -> None: """Test the function build_ccrs_from_crs_and_vcrs.""" + # Most grids aren't going to be downloaded, so this warning can be raised + warnings.filterwarnings("ignore", category=UserWarning, message="Grid not found in *") + # Get the vertical CRS from user input vcrs = xdem.vcrs._vcrs_from_user_input(vcrs_input=vcrs_input) @@ -180,6 +190,9 @@ def test_build_ccrs_from_crs_and_vcrs__errors(self) -> None: def test_transform_zz(self, grid_shifts: dict[str, Any]) -> None: """Tests grids to convert vertical CRS.""" + # Most grids aren't going to be downloaded, so this warning can be raised + warnings.filterwarnings("ignore", category=UserWarning, message="Grid not found in *") + # Using an arbitrary elevation of 100 m (no influence on the transformation) zz = 100 xx = grid_shifts["lon"] diff --git a/xdem/coreg/base.py b/xdem/coreg/base.py index 42ea5881..ee532afa 100644 --- a/xdem/coreg/base.py +++ b/xdem/coreg/base.py @@ -133,9 +133,7 @@ def _residuals_df( arr_ = dem.data.astype(np.float32) # get residual error at the point on DEM. - i, j = dem.xy2ij( - df_shifted["E"].values, df_shifted["N"].values, op=np.float32, shift_area_or_point=("AREA_OR_POINT" in dem.tags) - ) + i, j = dem.xy2ij(df_shifted["E"].values, df_shifted["N"].values) # ndimage return dem_h = scipy.ndimage.map_coordinates(arr_, [i, j], order=1, mode="nearest", **kwargs) @@ -177,7 +175,7 @@ def _df_sampling_from_dem( mask = dem.data.mask # Get value - x, y = dem.ij2xy(i[~mask[i, j]], j[~mask[i, j]], offset=offset) + x, y = dem.ij2xy(i[~mask[i, j]], j[~mask[i, j]]) z = scipy.ndimage.map_coordinates( dem.data.astype(np.float32), [i[~mask[i, j]], j[~mask[i, j]]], order=order, mode="nearest" ) diff --git a/xdem/dem.py b/xdem/dem.py index 22d384dd..cd5e4251 100644 --- a/xdem/dem.py +++ b/xdem/dem.py @@ -296,7 +296,7 @@ def to_vcrs( # Transform elevation with new vertical CRS zz = self.data - xx, yy = self.coords(offset="center") + xx, yy = self.coords() zz_trans = _transform_zz(crs_from=src_ccrs, crs_to=dst_ccrs, xx=xx, yy=yy, zz=zz) # Update DEM From e0311c25dcc084dd5728684125e188c04a3a4a5a Mon Sep 17 00:00:00 2001 From: Romain Hugonnet Date: Wed, 20 Mar 2024 09:33:09 -0800 Subject: [PATCH 38/54] Linting --- tests/test_vcrs.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_vcrs.py b/tests/test_vcrs.py index 69287cdd..a80df026 100644 --- a/tests/test_vcrs.py +++ b/tests/test_vcrs.py @@ -13,8 +13,8 @@ import xdem import xdem.vcrs -class TestVCRS: +class TestVCRS: def test_parse_vcrs_name_from_product(self) -> None: """Test parsing of vertical CRS name from DEM product name.""" From 609855a1c47b883d0457f127ff3a868c3d080641 Mon Sep 17 00:00:00 2001 From: Romain Hugonnet Date: Thu, 28 Mar 2024 14:14:15 -0800 Subject: [PATCH 39/54] Amaurys comments --- xdem/coreg/affine.py | 1 - xdem/coreg/base.py | 35 ++++++++++++++++++++++------------- xdem/coreg/biascorr.py | 7 ++++--- 3 files changed, 26 insertions(+), 17 deletions(-) diff --git a/xdem/coreg/affine.py b/xdem/coreg/affine.py index a9b29c9b..49747616 100644 --- a/xdem/coreg/affine.py +++ b/xdem/coreg/affine.py @@ -583,7 +583,6 @@ def _fit_rst_pts( ) # If raster was reference, invert the matrix - # TODO: Move matrix/invert_matrix to affine module? if ref == "raster": matrix = xdem.coreg.base.invert_matrix(matrix) diff --git a/xdem/coreg/base.py b/xdem/coreg/base.py index ee532afa..39c34972 100644 --- a/xdem/coreg/base.py +++ b/xdem/coreg/base.py @@ -429,10 +429,10 @@ def _preprocess_coreg_fit_point_point( ) -> tuple[gpd.GeoDataFrame, gpd.GeoDataFrame]: """Pre-processing and checks of fit for point-point input.""" - ref_dem = reference_elev - tba_dem = to_be_aligned_elev.to_crs(crs=reference_elev.crs) + ref_elev = reference_elev + tba_elev = to_be_aligned_elev.to_crs(crs=reference_elev.crs) - return ref_dem, tba_dem + return ref_elev, tba_elev def _preprocess_coreg_fit( @@ -472,16 +472,16 @@ def _preprocess_coreg_fit( point_elev = reference_elev ref = "point" - rst_elev, point_elev, inlier_mask, transform, crs = _preprocess_coreg_fit_raster_point( + raster_elev, point_elev, inlier_mask, transform, crs = _preprocess_coreg_fit_raster_point( raster_elev=raster_elev, point_elev=point_elev, inlier_mask=inlier_mask, transform=transform, crs=crs ) if ref == "raster": - ref_elev = rst_elev + ref_elev = raster_elev tba_elev = point_elev else: ref_elev = point_elev - tba_elev = rst_elev + tba_elev = raster_elev # If both inputs are points, simply reproject to the same CRS else: @@ -555,7 +555,12 @@ def _postprocess_coreg_apply_rst( resample: bool, resampling: rio.warp.Resampling | None = None, ) -> tuple[NDArrayf | gu.Raster, affine.Affine]: - """Post-processing and checks of apply for raster input.""" + """ + Post-processing and checks of apply for raster input. + + Here, "elev" and "transform" corresponds to user input, and are required to transform back the output that is + composed of "applied_elev" and "out_transform". + """ # Ensure the dtype is OK applied_elev = applied_elev.astype("float32") @@ -605,7 +610,12 @@ def _postprocess_coreg_apply( resample: bool, resampling: rio.warp.Resampling | None = None, ) -> tuple[NDArrayf | gpd.GeoDataFrame, affine.Affine]: - """Post-processing and checks of apply for any input.""" + """ + Post-processing and checks of apply for any input. + + Here, "elev" and "transform" corresponds to user input, and are required to transform back the output that is + composed of "applied_elev" and "out_transform". + """ # Define resampling resampling = resampling if isinstance(resampling, rio.warp.Resampling) else resampling_method_from_str(resampling) @@ -1181,9 +1191,6 @@ def fit( if self._meta["subsample"] != 1: self._meta["random_state"] = random_state - # TODO: Add preproc for points too - # TODO: Rename into "checks", because not much is preprocessed in the end - # (has to happen in the _fit_func itself, whether for subsampling or # Pre-process the inputs, by reprojecting and converting to arrays ref_elev, tba_elev, inlier_mask, transform, crs = _preprocess_coreg_fit( reference_elev=reference_elev, @@ -2037,6 +2044,9 @@ def fit( **kwargs: Any, ) -> CoregType: + if isinstance(reference_elev, gpd.GeoDataFrame) and isinstance(to_be_aligned_elev, gpd.GeoDataFrame): + raise NotImplementedError("Blockwise coregistration does not yet support two elevation point cloud inputs.") + # Check if subsample arguments are different from their default value for any of the coreg steps: # get default value in argument spec and "subsample" stored in meta, and compare both are consistent if not isinstance(self.procstep, CoregPipeline): @@ -2065,8 +2075,7 @@ def fit( crs=crs, ) - # TODO: Blockwise can only work if one of the two is a Raster... or by defining a grid somehow? - groups = self.subdivide_array(tba_dem.shape) + groups = self.subdivide_array(tba_dem.shape if isinstance(tba_dem, np.ndarray) else ref_dem.shape) indices = np.unique(groups) diff --git a/xdem/coreg/biascorr.py b/xdem/coreg/biascorr.py index d6cc2a6a..b9e87674 100644 --- a/xdem/coreg/biascorr.py +++ b/xdem/coreg/biascorr.py @@ -392,6 +392,7 @@ def _fit_rst_pts( # type: ignore inlier_pts_alltrue = np.ones(len(pts), dtype=bool) # Below, we derive 1D arrays for the rst_rst function to take over after interpolating to the point coordinates + # (as rst_rst works for 1D arrays as well as 2D arrays, as long as coordinates match) # Convert ref or tba depending on which is the point dataset if isinstance(ref_elev, gpd.GeoDataFrame): @@ -413,7 +414,7 @@ def _fit_rst_pts( # type: ignore else: bias_vars_pts = None - # Send to raster-raster fit + # Send to raster-raster fit but using 1D arrays instead of 2D arrays (flattened anyway during analysis) self._fit_biascorr( ref_elev=ref_elev_pts, tba_elev=tba_elev_pts, @@ -1073,7 +1074,7 @@ def _fit_rst_rst( # type: ignore ) -> None: # The number of parameters in the first guess defines the polynomial order when calling np.polyval2d - p0 = np.ones(shape=((self._meta["poly_order"] + 1) * (self._meta["poly_order"] + 1))) + p0 = np.ones(shape=((self._meta["poly_order"] + 1) **2)) # Coordinates (we don't need the actual ones, just array coordinates) xx, yy = np.meshgrid(np.arange(0, ref_elev.shape[1]), np.arange(0, ref_elev.shape[0])) @@ -1110,7 +1111,7 @@ def _fit_rst_pts( # type: ignore rast_elev = ref_elev if not isinstance(ref_elev, gpd.GeoDataFrame) else tba_elev # The number of parameters in the first guess defines the polynomial order when calling np.polyval2d - p0 = np.ones(shape=((self._meta["poly_order"] + 1) * (self._meta["poly_order"] + 1))) + p0 = np.ones(shape=((self._meta["poly_order"] + 1) ** 2)) # Coordinates (we don't need the actual ones, just array coordinates) xx, yy = np.meshgrid(np.arange(0, rast_elev.shape[1]), np.arange(0, rast_elev.shape[0])) From 86f9d364f0115fdfbb3504b28d9f169122a81360 Mon Sep 17 00:00:00 2001 From: Romain Hugonnet Date: Thu, 28 Mar 2024 14:14:41 -0800 Subject: [PATCH 40/54] Linting --- xdem/coreg/biascorr.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/xdem/coreg/biascorr.py b/xdem/coreg/biascorr.py index b9e87674..416dc8f4 100644 --- a/xdem/coreg/biascorr.py +++ b/xdem/coreg/biascorr.py @@ -1074,7 +1074,7 @@ def _fit_rst_rst( # type: ignore ) -> None: # The number of parameters in the first guess defines the polynomial order when calling np.polyval2d - p0 = np.ones(shape=((self._meta["poly_order"] + 1) **2)) + p0 = np.ones(shape=((self._meta["poly_order"] + 1) ** 2)) # Coordinates (we don't need the actual ones, just array coordinates) xx, yy = np.meshgrid(np.arange(0, ref_elev.shape[1]), np.arange(0, ref_elev.shape[0])) From 37abb639ddea015994d39b8e80424520449d2deb Mon Sep 17 00:00:00 2001 From: Romain Hugonnet Date: Thu, 28 Mar 2024 14:26:49 -0800 Subject: [PATCH 41/54] Try remove the force opencv within first env build --- .github/workflows/python-tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/python-tests.yml b/.github/workflows/python-tests.yml index dac4b786..57a4350a 100644 --- a/.github/workflows/python-tests.yml +++ b/.github/workflows/python-tests.yml @@ -61,7 +61,7 @@ jobs: if: steps.cache.outputs.cache-hit != 'true' run: | mamba install pyyaml python=${{ matrix.python-version }} - python .github/scripts/generate_yml_env_fixed_py.py --pyv ${{ matrix.python-version }} --add "graphviz,opencv,pytransform3d" "environment.yml" + python .github/scripts/generate_yml_env_fixed_py.py --pyv ${{ matrix.python-version }} --add "graphviz,pytransform3d" "environment.yml" mamba env update -n xdem-dev -f environment-ci-py${{ matrix.python-version }}.yml - name: Install project From a592e50c9477653b54bb8ed7ea7c0a94305eea09 Mon Sep 17 00:00:00 2001 From: Romain Hugonnet Date: Thu, 28 Mar 2024 14:27:18 -0800 Subject: [PATCH 42/54] And reset cache number --- .github/workflows/python-tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/python-tests.yml b/.github/workflows/python-tests.yml index 57a4350a..0708c2c8 100644 --- a/.github/workflows/python-tests.yml +++ b/.github/workflows/python-tests.yml @@ -50,7 +50,7 @@ jobs: path: ${{ env.CONDA }}/envs key: conda-${{ matrix.os }}-${{ matrix.python-version }}-${{ env.cache_date }}-${{ hashFiles('dev-environment.yml') }}-${{ env.CACHE_NUMBER }} env: - CACHE_NUMBER: 1 # Increase this value to reset cache if environment.yml has not changed + CACHE_NUMBER: 0 # Increase this value to reset cache if environment.yml has not changed id: cache # The trick below is necessary because the generic environment file does not specify a Python version, and ONLY From 2c6c95ad6db8e6b62494af8aeb5e1dc143d39007 Mon Sep 17 00:00:00 2001 From: Romain Hugonnet Date: Thu, 28 Mar 2024 14:31:46 -0800 Subject: [PATCH 43/54] Put opencv again in first build --- .github/workflows/python-tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/python-tests.yml b/.github/workflows/python-tests.yml index 0708c2c8..21bc4f3d 100644 --- a/.github/workflows/python-tests.yml +++ b/.github/workflows/python-tests.yml @@ -61,7 +61,7 @@ jobs: if: steps.cache.outputs.cache-hit != 'true' run: | mamba install pyyaml python=${{ matrix.python-version }} - python .github/scripts/generate_yml_env_fixed_py.py --pyv ${{ matrix.python-version }} --add "graphviz,pytransform3d" "environment.yml" + python .github/scripts/generate_yml_env_fixed_py.py --pyv ${{ matrix.python-version }} --add "graphviz,pytransform3d,opencv" "environment.yml" mamba env update -n xdem-dev -f environment-ci-py${{ matrix.python-version }}.yml - name: Install project From 4e3f65ea3ab2a54603064342b50149202b7cdd93 Mon Sep 17 00:00:00 2001 From: Romain Hugonnet Date: Thu, 28 Mar 2024 14:43:56 -0800 Subject: [PATCH 44/54] Also force openh264 --- .github/workflows/python-tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/python-tests.yml b/.github/workflows/python-tests.yml index 21bc4f3d..c7fbad0f 100644 --- a/.github/workflows/python-tests.yml +++ b/.github/workflows/python-tests.yml @@ -61,7 +61,7 @@ jobs: if: steps.cache.outputs.cache-hit != 'true' run: | mamba install pyyaml python=${{ matrix.python-version }} - python .github/scripts/generate_yml_env_fixed_py.py --pyv ${{ matrix.python-version }} --add "graphviz,pytransform3d,opencv" "environment.yml" + python .github/scripts/generate_yml_env_fixed_py.py --pyv ${{ matrix.python-version }} --add "graphviz,pytransform3d,opencv,openh264" "environment.yml" mamba env update -n xdem-dev -f environment-ci-py${{ matrix.python-version }}.yml - name: Install project From b5854c034033e6ffd36523d81bd4232071206968 Mon Sep 17 00:00:00 2001 From: Romain Hugonnet Date: Thu, 28 Mar 2024 16:58:53 -0800 Subject: [PATCH 45/54] Test without reputting in dev env --- dev-environment.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/dev-environment.yml b/dev-environment.yml index 9169f748..bd48410d 100644 --- a/dev-environment.yml +++ b/dev-environment.yml @@ -19,8 +19,8 @@ dependencies: - pip # Optional dependencies - - opencv - - openh264 +# - opencv +# - openh264 - pytransform3d - richdem From 2b85933931811faaba7bb95a05d1435356f533e9 Mon Sep 17 00:00:00 2001 From: Romain Hugonnet Date: Thu, 28 Mar 2024 18:10:08 -0800 Subject: [PATCH 46/54] Add step that prints env --- .github/workflows/python-tests.yml | 5 +++++ dev-environment.yml | 4 ++-- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/.github/workflows/python-tests.yml b/.github/workflows/python-tests.yml index c7fbad0f..d8b1c000 100644 --- a/.github/workflows/python-tests.yml +++ b/.github/workflows/python-tests.yml @@ -99,6 +99,11 @@ jobs: - name: Setup pip dependencies run: pip install pytest-cov coveralls coveragepy-lcov + - name: Print conda environment + run: | + conda info + conda list + - name: Test with pytest run: | # We unset the PROJ_DATA environment variable to make PROJ work on Windows diff --git a/dev-environment.yml b/dev-environment.yml index bd48410d..9169f748 100644 --- a/dev-environment.yml +++ b/dev-environment.yml @@ -19,8 +19,8 @@ dependencies: - pip # Optional dependencies -# - opencv -# - openh264 + - opencv + - openh264 - pytransform3d - richdem From 2b9a85ee24e7a7ba7e869ced410bb735638228eb Mon Sep 17 00:00:00 2001 From: Romain Hugonnet Date: Thu, 28 Mar 2024 18:22:47 -0800 Subject: [PATCH 47/54] Raise import error details --- xdem/coreg/affine.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/xdem/coreg/affine.py b/xdem/coreg/affine.py index 49747616..3edad2dd 100644 --- a/xdem/coreg/affine.py +++ b/xdem/coreg/affine.py @@ -11,8 +11,9 @@ import cv2 _has_cv2 = True -except ImportError: +except ImportError as e: _has_cv2 = False + raise e import geopandas as gpd import numpy as np import rasterio as rio From 7308dc6b56d723c44abfe90224a38f643d5cd2ba Mon Sep 17 00:00:00 2001 From: Romain Hugonnet Date: Thu, 28 Mar 2024 18:32:12 -0800 Subject: [PATCH 48/54] Try through pip --- .github/workflows/python-tests.yml | 2 +- dev-environment.yml | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/.github/workflows/python-tests.yml b/.github/workflows/python-tests.yml index d8b1c000..4fdf6249 100644 --- a/.github/workflows/python-tests.yml +++ b/.github/workflows/python-tests.yml @@ -61,7 +61,7 @@ jobs: if: steps.cache.outputs.cache-hit != 'true' run: | mamba install pyyaml python=${{ matrix.python-version }} - python .github/scripts/generate_yml_env_fixed_py.py --pyv ${{ matrix.python-version }} --add "graphviz,pytransform3d,opencv,openh264" "environment.yml" + python .github/scripts/generate_yml_env_fixed_py.py --pyv ${{ matrix.python-version }} --add "graphviz,pytransform3d" "environment.yml" mamba env update -n xdem-dev -f environment-ci-py${{ matrix.python-version }}.yml - name: Install project diff --git a/dev-environment.yml b/dev-environment.yml index 9169f748..028c9022 100644 --- a/dev-environment.yml +++ b/dev-environment.yml @@ -19,8 +19,6 @@ dependencies: - pip # Optional dependencies - - opencv - - openh264 - pytransform3d - richdem @@ -45,6 +43,7 @@ dependencies: - numpydoc - pip: + - opencv-python - -e ./ # Optional dependencies From a310d58d0c350f3c49408bbb943d9cadf30d1052 Mon Sep 17 00:00:00 2001 From: Romain Hugonnet Date: Thu, 28 Mar 2024 18:37:02 -0800 Subject: [PATCH 49/54] Remove error raising --- xdem/coreg/affine.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/xdem/coreg/affine.py b/xdem/coreg/affine.py index 3edad2dd..abbf30bf 100644 --- a/xdem/coreg/affine.py +++ b/xdem/coreg/affine.py @@ -13,7 +13,7 @@ _has_cv2 = True except ImportError as e: _has_cv2 = False - raise e + # raise e import geopandas as gpd import numpy as np import rasterio as rio From f4296c0a0169dcd9a622e860e4ce007b038dddf8 Mon Sep 17 00:00:00 2001 From: Romain Hugonnet Date: Thu, 28 Mar 2024 19:01:55 -0800 Subject: [PATCH 50/54] Try with opencv action setup --- .github/workflows/python-tests.yml | 8 ++++++-- dev-environment.yml | 3 ++- xdem/coreg/affine.py | 1 - 3 files changed, 8 insertions(+), 4 deletions(-) diff --git a/.github/workflows/python-tests.yml b/.github/workflows/python-tests.yml index 4fdf6249..5240d6ea 100644 --- a/.github/workflows/python-tests.yml +++ b/.github/workflows/python-tests.yml @@ -26,6 +26,10 @@ jobs: steps: - uses: actions/checkout@v4 + # To ensure opencv finds the right paths + - uses: Dovyski/setup-opencv-action@v1.1 + with: + opencv-version: '4.0.0' # We initiate the environment empty, and check if a key for this environment doesn't already exist in the cache - name: Initiate empty environment @@ -61,7 +65,7 @@ jobs: if: steps.cache.outputs.cache-hit != 'true' run: | mamba install pyyaml python=${{ matrix.python-version }} - python .github/scripts/generate_yml_env_fixed_py.py --pyv ${{ matrix.python-version }} --add "graphviz,pytransform3d" "environment.yml" + python .github/scripts/generate_yml_env_fixed_py.py --pyv ${{ matrix.python-version }} --add "graphviz,pytransform3d,opencv" "environment.yml" mamba env update -n xdem-dev -f environment-ci-py${{ matrix.python-version }}.yml - name: Install project @@ -99,7 +103,7 @@ jobs: - name: Setup pip dependencies run: pip install pytest-cov coveralls coveragepy-lcov - - name: Print conda environment + - name: Print conda environment (for debugging) run: | conda info conda list diff --git a/dev-environment.yml b/dev-environment.yml index 028c9022..9169f748 100644 --- a/dev-environment.yml +++ b/dev-environment.yml @@ -19,6 +19,8 @@ dependencies: - pip # Optional dependencies + - opencv + - openh264 - pytransform3d - richdem @@ -43,7 +45,6 @@ dependencies: - numpydoc - pip: - - opencv-python - -e ./ # Optional dependencies diff --git a/xdem/coreg/affine.py b/xdem/coreg/affine.py index abbf30bf..3eb8a418 100644 --- a/xdem/coreg/affine.py +++ b/xdem/coreg/affine.py @@ -13,7 +13,6 @@ _has_cv2 = True except ImportError as e: _has_cv2 = False - # raise e import geopandas as gpd import numpy as np import rasterio as rio From ed24db2b73167f92320b9213762be09f367fb47b Mon Sep 17 00:00:00 2001 From: Romain Hugonnet Date: Thu, 28 Mar 2024 19:06:14 -0800 Subject: [PATCH 51/54] Reverse --- .github/workflows/python-tests.yml | 4 ---- 1 file changed, 4 deletions(-) diff --git a/.github/workflows/python-tests.yml b/.github/workflows/python-tests.yml index 5240d6ea..4bc45ca4 100644 --- a/.github/workflows/python-tests.yml +++ b/.github/workflows/python-tests.yml @@ -26,10 +26,6 @@ jobs: steps: - uses: actions/checkout@v4 - # To ensure opencv finds the right paths - - uses: Dovyski/setup-opencv-action@v1.1 - with: - opencv-version: '4.0.0' # We initiate the environment empty, and check if a key for this environment doesn't already exist in the cache - name: Initiate empty environment From 2919aa924485d6152652cd1603a662311ba6dace Mon Sep 17 00:00:00 2001 From: Romain Hugonnet Date: Thu, 28 Mar 2024 19:10:31 -0800 Subject: [PATCH 52/54] Linting --- xdem/coreg/affine.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/xdem/coreg/affine.py b/xdem/coreg/affine.py index 3eb8a418..49747616 100644 --- a/xdem/coreg/affine.py +++ b/xdem/coreg/affine.py @@ -11,7 +11,7 @@ import cv2 _has_cv2 = True -except ImportError as e: +except ImportError: _has_cv2 = False import geopandas as gpd import numpy as np From fa324a97b1b72e2ed24574826b13853c93db78a0 Mon Sep 17 00:00:00 2001 From: Romain Hugonnet Date: Fri, 29 Mar 2024 00:25:33 -0800 Subject: [PATCH 53/54] Let's go headless --- .github/workflows/python-tests.yml | 2 +- dev-environment.yml | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/.github/workflows/python-tests.yml b/.github/workflows/python-tests.yml index 4bc45ca4..a0a064ac 100644 --- a/.github/workflows/python-tests.yml +++ b/.github/workflows/python-tests.yml @@ -61,7 +61,7 @@ jobs: if: steps.cache.outputs.cache-hit != 'true' run: | mamba install pyyaml python=${{ matrix.python-version }} - python .github/scripts/generate_yml_env_fixed_py.py --pyv ${{ matrix.python-version }} --add "graphviz,pytransform3d,opencv" "environment.yml" + python .github/scripts/generate_yml_env_fixed_py.py --pyv ${{ matrix.python-version }} --add "graphviz,pytransform3d" "environment.yml" mamba env update -n xdem-dev -f environment-ci-py${{ matrix.python-version }}.yml - name: Install project diff --git a/dev-environment.yml b/dev-environment.yml index 9169f748..345cbd0b 100644 --- a/dev-environment.yml +++ b/dev-environment.yml @@ -19,8 +19,6 @@ dependencies: - pip # Optional dependencies - - opencv - - openh264 - pytransform3d - richdem @@ -49,6 +47,7 @@ dependencies: # Optional dependencies - noisyopt + - opencv-python-headless # To run CI against latest GeoUtils # - git+https://github.com/rhugonnet/geoutils.git@fix_to_points From 9a1611e1f24d04bbc001fd47774748f928998d59 Mon Sep 17 00:00:00 2001 From: Romain Hugonnet Date: Fri, 29 Mar 2024 09:00:51 -0800 Subject: [PATCH 54/54] Change to contrib --- dev-environment.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/dev-environment.yml b/dev-environment.yml index 345cbd0b..759c9c95 100644 --- a/dev-environment.yml +++ b/dev-environment.yml @@ -47,7 +47,8 @@ dependencies: # Optional dependencies - noisyopt - - opencv-python-headless + # "Headless" needed for opencv to install without requiring system dependencies + - opencv-contrib-python-headless # To run CI against latest GeoUtils # - git+https://github.com/rhugonnet/geoutils.git@fix_to_points