diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index ae49c0ef..e064fedc 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -65,3 +65,9 @@ repos: rev: v1.7.3 hooks: - id: actionlint + - repo: https://github.com/pre-commit/mirrors-mypy + rev: v1.11.2 + hooks: + - id: mypy + additional_dependencies: + - numpy diff --git a/docs/conf.py b/docs/conf.py index d42ebc90..05b15944 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -54,7 +54,7 @@ html_logo = "_static/logo.png" html_favicon = "_static/favicon.ico" -html_css_files = [] +html_css_files = [] # type: ignore[var-annotated] # -- Intersphinx ------------------------------------------------------------- diff --git a/glass/core/array.py b/glass/core/array.py index ff19ba2e..a30e3fb7 100644 --- a/glass/core/array.py +++ b/glass/core/array.py @@ -5,14 +5,14 @@ import numpy as np -def broadcast_first(*arrays): +def broadcast_first(*arrays): # type: ignore[no-untyped-def] """Broadcast arrays, treating the first axis as common.""" arrays = tuple(np.moveaxis(a, 0, -1) if np.ndim(a) else a for a in arrays) arrays = np.broadcast_arrays(*arrays) return tuple(np.moveaxis(a, -1, 0) if np.ndim(a) else a for a in arrays) -def broadcast_leading_axes(*args): +def broadcast_leading_axes(*args): # type: ignore[no-untyped-def] """ Broadcast all but the last N axes. @@ -49,7 +49,7 @@ def broadcast_leading_axes(*args): return (dims, *arrs) -def ndinterp(x, xp, fp, axis=-1, left=None, right=None, period=None): # noqa: PLR0913 +def ndinterp(x, xp, fp, axis=-1, left=None, right=None, period=None): # type: ignore[no-untyped-def] # noqa: PLR0913 """Interpolate multi-dimensional array over axis.""" return np.apply_along_axis( partial(np.interp, x, xp), @@ -61,7 +61,7 @@ def ndinterp(x, xp, fp, axis=-1, left=None, right=None, period=None): # noqa: P ) -def trapz_product(f, *ff, axis=-1): +def trapz_product(f, *ff, axis=-1): # type: ignore[no-untyped-def] """Trapezoidal rule for a product of functions.""" x, _ = f for x_, _ in ff: @@ -72,10 +72,10 @@ def trapz_product(f, *ff, axis=-1): y = np.interp(x, *f) for f_ in ff: y *= np.interp(x, *f_) - return np.trapz(y, x, axis=axis) + return np.trapz(y, x, axis=axis) # type: ignore[attr-defined] -def cumtrapz(f, x, dtype=None, out=None): +def cumtrapz(f, x, dtype=None, out=None): # type: ignore[no-untyped-def] """Cumulative trapezoidal rule along last axis.""" if out is None: out = np.empty_like(f, dtype=dtype) diff --git a/glass/ext/__init__.py b/glass/ext/__init__.py index df401e13..ad17a437 100644 --- a/glass/ext/__init__.py +++ b/glass/ext/__init__.py @@ -7,7 +7,7 @@ """ -def _extend_path(path, name) -> list: +def _extend_path(path, name) -> list: # type: ignore[no-untyped-def, type-arg] import os.path from pkgutil import extend_path diff --git a/glass/fields.py b/glass/fields.py index 467b1cd2..d2a8cdb8 100644 --- a/glass/fields.py +++ b/glass/fields.py @@ -37,17 +37,19 @@ # types Size = typing.Optional[typing.Union[int, tuple[int, ...]]] -Iternorm = tuple[typing.Optional[int], npt.NDArray, npt.NDArray] -ClTransform = typing.Union[str, typing.Callable[[npt.NDArray], npt.NDArray]] +Iternorm = tuple[typing.Optional[int], npt.NDArray[typing.Any], npt.NDArray[typing.Any]] +ClTransform = typing.Union[ + str, typing.Callable[[npt.NDArray[typing.Any]], npt.NDArray[typing.Any]] +] Cls = collections.abc.Sequence[ - typing.Union[npt.NDArray, collections.abc.Sequence[float]] + typing.Union[npt.NDArray[typing.Any], collections.abc.Sequence[float]] ] -Alms = npt.NDArray +Alms = npt.NDArray[typing.Any] def iternorm( k: int, - cov: collections.abc.Iterable[npt.NDArray], + cov: collections.abc.Iterable[npt.NDArray[typing.Any]], size: Size = None, ) -> collections.abc.Generator[Iternorm, None, None]: """Return the vector a and variance sigma^2 for iterative normal sampling.""" @@ -109,7 +111,7 @@ def iternorm( def cls2cov( cls: Cls, nl: int, nf: int, nc: int -) -> collections.abc.Generator[npt.NDArray, None, None]: +) -> collections.abc.Generator[npt.NDArray[typing.Any], None, None]: """Return array of cls as a covariance matrix for iterative sampling.""" cov = np.zeros((nl, nc + 1)) end = 0 @@ -117,7 +119,7 @@ def cls2cov( begin, end = end, end + j + 1 for i, cl in enumerate(cls[begin:end][: nc + 1]): if cl is None: - cov[:, i] = 0 + cov[:, i] = 0 # type: ignore[unreachable] else: if i == 0 and np.any(np.less(cl, 0)): msg = "negative values in cl" @@ -129,7 +131,7 @@ def cls2cov( yield cov -def multalm(alm: Alms, bl: npt.NDArray, *, inplace: bool = False) -> Alms: +def multalm(alm: Alms, bl: npt.NDArray[typing.Any], *, inplace: bool = False) -> Alms: """Multiply alm by bl.""" n = len(bl) out = np.asanyarray(alm) if inplace else np.copy(alm) @@ -142,7 +144,7 @@ def transform_cls(cls: Cls, tfm: ClTransform, pars: tuple[typing.Any, ...] = ()) """Transform Cls to Gaussian Cls.""" gls = [] for cl in cls: - if cl is not None and len(cl) > 0: + if cl is not None and len(cl) > 0: # type: ignore[redundant-expr] monopole = 0.0 if cl[0] == 0 else None gl, info, _, _ = gaussiancl(cl, tfm, pars, monopole=monopole) if info == 0: @@ -187,7 +189,7 @@ def gaussian_gls( gls = [] for cl in cls: - if cl is not None and len(cl) > 0: + if cl is not None and len(cl) > 0: # type: ignore[redundant-expr] if lmax is not None: cl = cl[: lmax + 1] # noqa: PLW2901 if nside is not None: @@ -216,7 +218,7 @@ def generate_gaussian( *, ncorr: int | None = None, rng: np.random.Generator | None = None, -) -> collections.abc.Generator[npt.NDArray, None, None]: +) -> collections.abc.Generator[npt.NDArray[typing.Any], None, None]: """ Sample Gaussian random fields from Cls iteratively. @@ -253,7 +255,7 @@ def generate_gaussian( ncorr = ngrf - 1 # number of modes - n = max((len(gl) for gl in gls if gl is not None), default=0) + n = max((len(gl) for gl in gls if gl is not None), default=0) # type: ignore[redundant-expr] if n == 0: msg = "all gls are empty" raise ValueError(msg) @@ -302,7 +304,7 @@ def generate_lognormal( *, ncorr: int | None = None, rng: np.random.Generator | None = None, -) -> collections.abc.Generator[npt.NDArray, None, None]: +) -> collections.abc.Generator[npt.NDArray[typing.Any], None, None]: """Sample lognormal random fields from Gaussian Cls iteratively.""" for i, m in enumerate(generate_gaussian(gls, nside, ncorr=ncorr, rng=rng)): # compute the variance of the auto-correlation @@ -324,7 +326,7 @@ def generate_lognormal( yield m -def getcl(cls, i, j, lmax=None): +def getcl(cls, i, j, lmax=None): # type: ignore[no-untyped-def] """ Return a specific angular power spectrum from an array. @@ -354,7 +356,7 @@ def getcl(cls, i, j, lmax=None): return cl -def effective_cls( +def effective_cls( # type: ignore[no-untyped-def] cls, weights1, weights2=None, *, lmax=None ) -> npt.NDArray[np.float64]: r""" @@ -408,7 +410,7 @@ def effective_cls( if weights2 is weights1: pairs = combinations_with_replacement(np.ndindex(shape1[1:]), 2) else: - pairs = product(np.ndindex(shape1[1:]), np.ndindex(shape2[1:])) + pairs = product(np.ndindex(shape1[1:]), np.ndindex(shape2[1:])) # type: ignore[assignment] # create the output array: axes for all input axes plus lmax+1 out = np.empty(shape1[1:] + shape2[1:] + (lmax + 1,)) @@ -421,7 +423,7 @@ def effective_cls( for j1, j2 in pairs: w1, w2 = weights1[c + j1], weights2[c + j2] cl = sum( - w1[i1] * w2[i2] * getcl(cls, i1, i2, lmax=lmax) + w1[i1] * w2[i2] * getcl(cls, i1, i2, lmax=lmax) # type: ignore[no-untyped-call] for i1, i2 in np.ndindex(n, n) ) out[j1 + j2] = cl diff --git a/glass/galaxies.py b/glass/galaxies.py index 4ca57cd1..c36b388b 100644 --- a/glass/galaxies.py +++ b/glass/galaxies.py @@ -37,7 +37,7 @@ def redshifts( w: RadialWindow, *, rng: np.random.Generator | None = None, -) -> npt.NDArray: +) -> npt.NDArray[typing.Any]: """ Sample redshifts from a radial window function. @@ -67,7 +67,7 @@ def redshifts_from_nz( *, rng: np.random.Generator | None = None, warn: bool = True, -) -> npt.NDArray: +) -> npt.NDArray[typing.Any]: """ Generate galaxy redshifts from a source distribution. @@ -111,10 +111,10 @@ def redshifts_from_nz( rng = np.random.default_rng() # bring inputs' leading axes into common shape - dims, count, z, nz = broadcast_leading_axes((count, 0), (z, 1), (nz, 1)) + dims, count, z, nz = broadcast_leading_axes((count, 0), (z, 1), (nz, 1)) # type: ignore[no-untyped-call] # list of results for all dimensions - redshifts = np.empty(count.sum()) + redshifts = np.empty(count.sum()) # type: ignore[union-attr] # keep track of the number of sampled redshifts total = 0 @@ -122,16 +122,16 @@ def redshifts_from_nz( # go through extra dimensions; also works if dims is empty for k in np.ndindex(dims): # compute the CDF of each galaxy population - cdf = cumtrapz(nz[k], z[k], dtype=float) + cdf = cumtrapz(nz[k], z[k], dtype=float) # type: ignore[call-overload, index, no-untyped-call] cdf /= cdf[-1] # sample redshifts and store result - redshifts[total : total + count[k]] = np.interp( - rng.uniform(0, 1, size=count[k]), + redshifts[total : total + count[k]] = np.interp( # type: ignore[call-overload, index, misc, operator] + rng.uniform(0, 1, size=count[k]), # type: ignore[arg-type, call-overload, index] cdf, - z[k], + z[k], # type: ignore[arg-type, call-overload, index] ) - total += count[k] + total += count[k] # type: ignore[assignment, call-overload, index, operator] assert total == redshifts.size # noqa: S101 @@ -139,15 +139,15 @@ def redshifts_from_nz( def galaxy_shear( # noqa: PLR0913 - lon: npt.NDArray, - lat: npt.NDArray, - eps: npt.NDArray, - kappa: npt.NDArray, - gamma1: npt.NDArray, - gamma2: npt.NDArray, + lon: npt.NDArray[typing.Any], + lat: npt.NDArray[typing.Any], + eps: npt.NDArray[typing.Any], + kappa: npt.NDArray[typing.Any], + gamma1: npt.NDArray[typing.Any], + gamma2: npt.NDArray[typing.Any], *, reduced_shear: bool = True, -) -> npt.NDArray: +) -> npt.NDArray[typing.Any]: """ Observed galaxy shears from weak lensing. @@ -212,7 +212,7 @@ def gaussian_phz( lower: npt.ArrayLike | None = None, upper: npt.ArrayLike | None = None, rng: np.random.Generator | None = None, -) -> npt.NDArray: +) -> npt.NDArray[typing.Any]: r""" Photometric redshifts assuming a Gaussian error. @@ -264,26 +264,26 @@ def gaussian_phz( sigma = np.add(1, z) * sigma_0 dims = np.shape(sigma) - zphot = rng.normal(z, sigma) + zphot = rng.normal(z, sigma) # type: ignore[arg-type] if lower is None: lower = 0.0 if upper is None: upper = np.inf - if not np.all(lower < upper): + if not np.all(lower < upper): # type: ignore[operator] msg = "requires lower < upper" raise ValueError(msg) if not dims: - while zphot < lower or zphot > upper: - zphot = rng.normal(z, sigma) + while zphot < lower or zphot > upper: # type: ignore[operator] + zphot = rng.normal(z, sigma) # type: ignore[arg-type] else: z = np.broadcast_to(z, dims) - trunc = np.where((zphot < lower) | (zphot > upper))[0] + trunc = np.where((zphot < lower) | (zphot > upper))[0] # type: ignore[operator] while trunc.size: - znew = rng.normal(z[trunc], sigma[trunc]) - zphot[trunc] = znew - trunc = trunc[(znew < lower) | (znew > upper)] + znew = rng.normal(z[trunc], sigma[trunc]) # type: ignore[arg-type] + zphot[trunc] = znew # type: ignore[index] + trunc = trunc[(znew < lower) | (znew > upper)] # type: ignore[operator] - return zphot + return zphot # type: ignore[return-value] diff --git a/glass/lensing.py b/glass/lensing.py index 4ad1b936..50b1b0c4 100644 --- a/glass/lensing.py +++ b/glass/lensing.py @@ -46,14 +46,14 @@ def from_convergence( # noqa: PLR0913 - kappa: npt.NDArray, + kappa: npt.NDArray[typing.Any], lmax: int | None = None, *, potential: bool = False, deflection: bool = False, shear: bool = False, discretized: bool = True, -) -> tuple[npt.NDArray, ...]: +) -> tuple[npt.NDArray[typing.Any], ...]: r""" Compute other weak lensing maps from the convergence. @@ -180,7 +180,7 @@ def from_convergence( # noqa: PLR0913 # if potential is requested, compute map and add to output if potential: psi = hp.alm2map(alm, nside, lmax=lmax) - results += (psi,) + results += (psi,) # type: ignore[assignment] # if no spin-weighted maps are requested, stop here if not (deflection or shear): @@ -199,7 +199,7 @@ def from_convergence( # noqa: PLR0913 if deflection: alpha = hp.alm2map_spin([alm, blm], nside, 1, lmax) alpha = alpha[0] + 1j * alpha[1] - results += (alpha,) + results += (alpha,) # type: ignore[assignment] # if no shear is requested, stop here if not shear: @@ -217,18 +217,18 @@ def from_convergence( # noqa: PLR0913 # transform to shear maps gamma = hp.alm2map_spin([alm, blm], nside, 2, lmax) gamma = gamma[0] + 1j * gamma[1] - results += (gamma,) + results += (gamma,) # type: ignore[assignment] # all done return results def shear_from_convergence( - kappa: npt.NDArray, + kappa: npt.NDArray[typing.Any], lmax: int | None = None, *, discretized: bool = True, -) -> npt.NDArray: +) -> npt.NDArray[typing.Any]: r""" Weak lensing shear from convergence. @@ -264,7 +264,7 @@ def shear_from_convergence( hp.almxfl(alm, fl, inplace=True) # transform to shear maps - return hp.alm2map_spin([alm, blm], nside, 2, lmax) + return hp.alm2map_spin([alm, blm], nside, 2, lmax) # type: ignore[no-any-return] class MultiPlaneConvergence: @@ -280,11 +280,11 @@ def __init__(self, cosmo: Cosmology) -> None: self.x3: float = 0.0 self.w3: float = 0.0 self.r23: float = 1.0 - self.delta3: npt.NDArray = np.array(0.0) - self.kappa2: npt.NDArray | None = None - self.kappa3: npt.NDArray | None = None + self.delta3: npt.NDArray[typing.Any] = np.array(0.0) + self.kappa2: npt.NDArray[typing.Any] | None = None + self.kappa3: npt.NDArray[typing.Any] | None = None - def add_window(self, delta: npt.NDArray, w: RadialWindow) -> None: + def add_window(self, delta: npt.NDArray[typing.Any], w: RadialWindow) -> None: """ Add a mass plane from a window function to the convergence. @@ -293,11 +293,13 @@ def add_window(self, delta: npt.NDArray, w: RadialWindow) -> None: """ zsrc = w.zeff - lens_weight = np.trapz(w.wa, w.za) / np.interp(zsrc, w.za, w.wa) + lens_weight = np.trapz(w.wa, w.za) / np.interp(zsrc, w.za, w.wa) # type: ignore[attr-defined] self.add_plane(delta, zsrc, lens_weight) - def add_plane(self, delta: npt.NDArray, zsrc: float, wlens: float = 1.0) -> None: + def add_plane( + self, delta: npt.NDArray[typing.Any], zsrc: float, wlens: float = 1.0 + ) -> None: """Add a mass plane at redshift ``zsrc`` to the convergence.""" if zsrc <= self.z3: msg = "source redshift must be increasing" @@ -346,12 +348,12 @@ def zsrc(self) -> float: return self.z3 @property - def kappa(self) -> npt.NDArray | None: + def kappa(self) -> npt.NDArray[typing.Any] | None: """The current convergence plane.""" return self.kappa3 @property - def delta(self) -> npt.NDArray: + def delta(self) -> npt.NDArray[typing.Any]: """The current matter plane.""" return self.delta3 @@ -411,12 +413,12 @@ def multi_plane_weights( weights = weights / np.sum(weights, axis=0) # combine weights and the matrix of lensing contributions mat = multi_plane_matrix(shells, cosmo) - return np.matmul(mat.T, weights) + return np.matmul(mat.T, weights) # type: ignore[no-any-return, union-attr] def deflect( lon: npt.ArrayLike, lat: npt.ArrayLike, alpha: npt.ArrayLike -) -> npt.NDArray: +) -> npt.NDArray[typing.Any]: r""" Apply deflections to positions. @@ -470,4 +472,4 @@ def deflect( d = np.arctan2(sa * sg, st * ca - ct * sa * cg) - return lon - np.degrees(d), np.degrees(tp) + return lon - np.degrees(d), np.degrees(tp) # type: ignore[return-value] diff --git a/glass/observations.py b/glass/observations.py index e51af6e3..37325470 100644 --- a/glass/observations.py +++ b/glass/observations.py @@ -29,6 +29,7 @@ from __future__ import annotations import math +import typing import healpy as hp import numpy as np @@ -41,7 +42,7 @@ def vmap_galactic_ecliptic( nside: int, galactic: tuple[float, float] = (30, 90), ecliptic: tuple[float, float] = (20, 80), -) -> npt.NDArray: +) -> npt.NDArray[typing.Any]: """ Visibility map masking galactic and ecliptic plane. @@ -80,16 +81,16 @@ def vmap_galactic_ecliptic( m[hp.query_strip(nside, *galactic)] = 0 m = hp.Rotator(coord="GC").rotate_map_pixel(m) m[hp.query_strip(nside, *ecliptic)] = 0 - return hp.Rotator(coord="CE").rotate_map_pixel(m) + return hp.Rotator(coord="CE").rotate_map_pixel(m) # type: ignore[no-any-return] def gaussian_nz( - z: npt.NDArray, + z: npt.NDArray[typing.Any], mean: npt.ArrayLike, sigma: npt.ArrayLike, *, norm: npt.ArrayLike | None = None, -) -> npt.NDArray: +) -> npt.NDArray[typing.Any]: r""" Gaussian redshift distribution. @@ -120,22 +121,22 @@ def gaussian_nz( sigma = np.reshape(sigma, np.shape(sigma) + (1,) * np.ndim(z)) nz = np.exp(-(((z - mean) / sigma) ** 2) / 2) - nz /= np.trapz(nz, z, axis=-1)[..., np.newaxis] + nz /= np.trapz(nz, z, axis=-1)[..., np.newaxis] # type: ignore[attr-defined] if norm is not None: nz *= norm - return nz + return nz # type: ignore[no-any-return] def smail_nz( - z: npt.NDArray, + z: npt.NDArray[typing.Any], z_mode: npt.ArrayLike, alpha: npt.ArrayLike, beta: npt.ArrayLike, *, norm: npt.ArrayLike | None = None, -) -> npt.NDArray: +) -> npt.NDArray[typing.Any]: r""" Redshift distribution following Smail et al. (1994). @@ -182,12 +183,12 @@ def smail_nz( beta = np.asanyarray(beta)[..., np.newaxis] pz = z**alpha * np.exp(-alpha / beta * (z / z_mode) ** beta) - pz /= np.trapz(pz, z, axis=-1)[..., np.newaxis] + pz /= np.trapz(pz, z, axis=-1)[..., np.newaxis] # type: ignore[attr-defined] if norm is not None: pz *= norm - return pz + return pz # type: ignore[no-any-return] def fixed_zbins( @@ -232,8 +233,8 @@ def fixed_zbins( def equal_dens_zbins( - z: npt.NDArray, - nz: npt.NDArray, + z: npt.NDArray[typing.Any], + nz: npt.NDArray[typing.Any], nbins: int, ) -> list[tuple[float, float]]: """ @@ -261,7 +262,7 @@ def equal_dens_zbins( # first compute the cumulative integral (by trapezoidal rule) # then normalise: the first z is at CDF = 0, the last z at CDF = 1 # interpolate to find the z values at CDF = i/nbins for i = 0, ..., nbins - cuml_nz = cumtrapz(nz, z) + cuml_nz = cumtrapz(nz, z) # type: ignore[no-untyped-call] cuml_nz /= cuml_nz[[-1]] zbinedges = np.interp(np.linspace(0, 1, nbins + 1), cuml_nz, z) @@ -269,11 +270,11 @@ def equal_dens_zbins( def tomo_nz_gausserr( - z: npt.NDArray, - nz: npt.NDArray, + z: npt.NDArray[typing.Any], + nz: npt.NDArray[typing.Any], sigma_0: float, zbins: list[tuple[float, float]], -) -> npt.NDArray: +) -> npt.NDArray[typing.Any]: """ Tomographic redshift bins with a Gaussian redshift error. @@ -315,7 +316,7 @@ def tomo_nz_gausserr( """ # converting zbins into an array: - zbins_arr = np.asanyarray(zbins) # type: ignore[no-redef] + zbins_arr = np.asanyarray(zbins) # bin edges and adds a new axis z_lower = zbins_arr[:, 0, np.newaxis] @@ -333,4 +334,4 @@ def tomo_nz_gausserr( binned_nz /= 1 + erf(z / sz) binned_nz *= nz - return binned_nz + return binned_nz # type: ignore[no-any-return] diff --git a/glass/points.py b/glass/points.py index 26fc5e0a..1bc397a8 100644 --- a/glass/points.py +++ b/glass/points.py @@ -37,7 +37,7 @@ ARCMIN2_SPHERE = 60**6 // 100 / np.pi -def effective_bias(z, bz, w): +def effective_bias(z, bz, w): # type: ignore[no-untyped-def] r""" Effective bias parameter from a redshift-dependent bias function. @@ -70,16 +70,16 @@ def effective_bias(z, bz, w): \;. """ - norm = np.trapz(w.wa, w.za) - return trapz_product((z, bz), (w.za, w.wa)) / norm + norm = np.trapz(w.wa, w.za) # type: ignore[attr-defined] + return trapz_product((z, bz), (w.za, w.wa)) / norm # type: ignore[no-untyped-call] -def linear_bias(delta, b): +def linear_bias(delta, b): # type: ignore[no-untyped-def] r"""Linear bias model :math:`\delta_g = b \, \delta`.""" return b * delta -def loglinear_bias(delta, b): +def loglinear_bias(delta, b): # type: ignore[no-untyped-def] r"""log-linear bias model :math:`\ln(1 + \delta_g) = b \ln(1 + \delta)`.""" delta_g = np.log1p(delta) delta_g *= b @@ -87,7 +87,7 @@ def loglinear_bias(delta, b): return delta_g -def positions_from_delta( # noqa: PLR0912, PLR0913, PLR0915 +def positions_from_delta( # type: ignore[no-untyped-def] # noqa: PLR0912, PLR0913, PLR0915 ngal, delta, bias=None, @@ -171,7 +171,7 @@ def positions_from_delta( # noqa: PLR0912, PLR0913, PLR0915 inputs += [(bias, 0)] if vis is not None: inputs += [(vis, 1)] - dims, ngal, delta, *rest = broadcast_leading_axes(*inputs) + dims, ngal, delta, *rest = broadcast_leading_axes(*inputs) # type: ignore[no-untyped-call] if bias is not None: bias, *rest = rest if vis is not None: @@ -215,7 +215,7 @@ def positions_from_delta( # noqa: PLR0912, PLR0913, PLR0915 cmask = np.zeros(dims, dtype=int) cmask[k] = 1 else: - cmask = 1 + cmask = 1 # type: ignore[assignment] # sample the map in batches step = 1000 @@ -248,7 +248,7 @@ def positions_from_delta( # noqa: PLR0912, PLR0913, PLR0915 assert np.sum(n[stop:]) == 0 # noqa: S101 -def uniform_positions(ngal, *, rng=None): +def uniform_positions(ngal, *, rng=None): # type: ignore[no-untyped-def] """ Generate positions uniformly over the sphere. @@ -296,12 +296,12 @@ def uniform_positions(ngal, *, rng=None): count = np.zeros(dims, dtype=int) count[k] = ngal[k] else: - count = int(ngal[k]) + count = int(ngal[k]) # type: ignore[assignment] yield lon, lat, count -def position_weights(densities, bias=None): +def position_weights(densities, bias=None): # type: ignore[no-untyped-def] r""" Compute relative weights for angular clustering. @@ -329,7 +329,7 @@ def position_weights(densities, bias=None): """ # bring densities and bias into the same shape if bias is not None: - densities, bias = broadcast_first(densities, bias) + densities, bias = broadcast_first(densities, bias) # type: ignore[no-untyped-call] # normalise densities after shape has been fixed densities = densities / np.sum(densities, axis=0) # apply bias after normalisation diff --git a/glass/shapes.py b/glass/shapes.py index 2471e9d6..d804a157 100644 --- a/glass/shapes.py +++ b/glass/shapes.py @@ -24,11 +24,13 @@ from __future__ import annotations +import typing + import numpy as np import numpy.typing as npt -def triaxial_axis_ratio(zeta, xi, size=None, *, rng=None): +def triaxial_axis_ratio(zeta, xi, size=None, *, rng=None): # type: ignore[no-untyped-def] r""" Axis ratio of a randomly projected triaxial ellipsoid. @@ -93,7 +95,7 @@ def triaxial_axis_ratio(zeta, xi, size=None, *, rng=None): ) -def ellipticity_ryden04(mu, sigma, gamma, sigma_gamma, size=None, *, rng=None): # noqa: PLR0913 +def ellipticity_ryden04(mu, sigma, gamma, sigma_gamma, size=None, *, rng=None): # type: ignore[no-untyped-def] # noqa: PLR0913 r""" Ellipticity distribution following Ryden (2004). @@ -159,7 +161,7 @@ def ellipticity_ryden04(mu, sigma, gamma, sigma_gamma, size=None, *, rng=None): xi = (1 - gam) * zeta # random projection of random triaxial ellipsoid - q = triaxial_axis_ratio(zeta, xi, rng=rng) + q = triaxial_axis_ratio(zeta, xi, rng=rng) # type: ignore[no-untyped-call] # assemble ellipticity with random complex phase e = np.exp(1j * rng.uniform(0, 2 * np.pi, size=np.shape(q))) @@ -174,7 +176,7 @@ def ellipticity_gaussian( sigma: npt.ArrayLike, *, rng: np.random.Generator | None = None, -) -> npt.NDArray: +) -> npt.NDArray[typing.Any]: r""" Sample Gaussian galaxy ellipticities. @@ -231,7 +233,7 @@ def ellipticity_intnorm( sigma: npt.ArrayLike, *, rng: np.random.Generator | None = None, -) -> npt.NDArray: +) -> npt.NDArray[typing.Any]: r""" Sample galaxy ellipticities with intrinsic normal distribution. diff --git a/glass/shells.py b/glass/shells.py index 2dc09f35..9b1f1798 100644 --- a/glass/shells.py +++ b/glass/shells.py @@ -57,23 +57,23 @@ from cosmology import Cosmology # types -ArrayLike1D = typing.Union[collections.abc.Sequence[float], npt.NDArray] -WeightFunc = typing.Callable[[ArrayLike1D], npt.NDArray] +ArrayLike1D = typing.Union[collections.abc.Sequence[float], npt.NDArray[typing.Any]] +WeightFunc = typing.Callable[[ArrayLike1D], npt.NDArray[typing.Any]] -def distance_weight(z: npt.ArrayLike, cosmo: Cosmology) -> npt.NDArray: +def distance_weight(z: npt.ArrayLike, cosmo: Cosmology) -> npt.NDArray[typing.Any]: """Uniform weight in comoving distance.""" - return 1 / cosmo.ef(z) + return 1 / cosmo.ef(z) # type: ignore[no-any-return] -def volume_weight(z: npt.ArrayLike, cosmo: Cosmology) -> npt.NDArray: +def volume_weight(z: npt.ArrayLike, cosmo: Cosmology) -> npt.NDArray[typing.Any]: """Uniform weight in comoving volume.""" - return cosmo.xm(z) ** 2 / cosmo.ef(z) + return cosmo.xm(z) ** 2 / cosmo.ef(z) # type: ignore[no-any-return] -def density_weight(z: npt.ArrayLike, cosmo: Cosmology) -> npt.NDArray: +def density_weight(z: npt.ArrayLike, cosmo: Cosmology) -> npt.NDArray[typing.Any]: """Uniform weight in matter density.""" - return cosmo.rho_m_z(z) * cosmo.xm(z) ** 2 / cosmo.ef(z) + return cosmo.rho_m_z(z) * cosmo.xm(z) ** 2 / cosmo.ef(z) # type: ignore[no-any-return] class RadialWindow(typing.NamedTuple): @@ -169,14 +169,14 @@ def tophat_windows( ) wht: WeightFunc - wht = weight if weight is not None else np.ones_like + wht = weight if weight is not None else np.ones_like # type: ignore[assignment] ws = [] for zmin, zmax in zip(zbins, zbins[1:]): n = max(round((zmax - zmin) / dz), 2) z = np.linspace(zmin, zmax, n) w = wht(z) - zeff = np.trapz(w * z, z) / np.trapz(w, z) - ws.append(RadialWindow(z, w, zeff)) + zeff = np.trapz(w * z, z) / np.trapz(w, z) # type: ignore[attr-defined] + ws.append(RadialWindow(z, w, zeff)) # type: ignore[arg-type] return ws @@ -299,7 +299,7 @@ def restrict( z: ArrayLike1D, f: ArrayLike1D, w: RadialWindow, -) -> tuple[npt.NDArray, npt.NDArray]: +) -> tuple[npt.NDArray[typing.Any], npt.NDArray[typing.Any]]: """ Restrict a function to a redshift window. @@ -331,7 +331,7 @@ def restrict( """ z_ = np.compress(np.greater(z, w.za[0]) & np.less(z, w.za[-1]), z) zr = np.union1d(w.za, z_) - fr = ndinterp(zr, z, f, left=0.0, right=0.0) * ndinterp(zr, w.za, w.wa) + fr = ndinterp(zr, z, f, left=0.0, right=0.0) * ndinterp(zr, w.za, w.wa) # type: ignore[no-untyped-call] return zr, fr @@ -443,7 +443,7 @@ def partition( except KeyError: msg = f"invalid method: {method}" raise ValueError(msg) from None - return partition_method(z, fz, shells) + return partition_method(z, fz, shells) # type: ignore[no-any-return] def partition_lstsq( @@ -469,24 +469,24 @@ def partition_lstsq( dz = np.gradient(zp) # create the window function matrix - a = [np.interp(zp, za, wa, left=0.0, right=0.0) for za, wa, _ in shells] - a /= np.trapz(a, zp, axis=-1)[..., None] + a = [np.interp(zp, za, wa, left=0.0, right=0.0) for za, wa, _ in shells] # type: ignore[arg-type] + a /= np.trapz(a, zp, axis=-1)[..., None] # type: ignore[attr-defined] a = a * dz # create the target vector of distribution values - b = ndinterp(zp, z, fz, left=0.0, right=0.0) + b = ndinterp(zp, z, fz, left=0.0, right=0.0) # type: ignore[no-untyped-call] b = b * dz # append a constraint for the integral mult = 1 / sumtol - a = np.concatenate([a, mult * np.ones((len(shells), 1))], axis=-1) - b = np.concatenate([b, mult * np.reshape(np.trapz(fz, z), (*dims, 1))], axis=-1) + a = np.concatenate([a, mult * np.ones((len(shells), 1))], axis=-1) # type: ignore[assignment] + b = np.concatenate([b, mult * np.reshape(np.trapz(fz, z), (*dims, 1))], axis=-1) # type: ignore[attr-defined] # now a is a matrix of shape (len(shells), len(zp) + 1) # and b is a matrix of shape (*dims, len(zp) + 1) # need to find weights x such that b == x @ a over all axes of b # do the least-squares fit over partially flattened b, then reshape - x = np.linalg.lstsq(a.T, b.reshape(-1, zp.size + 1).T, rcond=None)[0] + x = np.linalg.lstsq(a.T, b.reshape(-1, zp.size + 1).T, rcond=None)[0] # type: ignore[attr-defined, union-attr] x = x.T.reshape(*dims, len(shells)) # roll the last axis of size len(shells) to the front return np.moveaxis(x, -1, 0) @@ -523,25 +523,25 @@ def partition_nnls( dz = np.gradient(zp) # create the window function matrix - a = [np.interp(zp, za, wa, left=0.0, right=0.0) for za, wa, _ in shells] - a /= np.trapz(a, zp, axis=-1)[..., None] + a = [np.interp(zp, za, wa, left=0.0, right=0.0) for za, wa, _ in shells] # type: ignore[arg-type] + a /= np.trapz(a, zp, axis=-1)[..., None] # type: ignore[attr-defined] a = a * dz # create the target vector of distribution values - b = ndinterp(zp, z, fz, left=0.0, right=0.0) + b = ndinterp(zp, z, fz, left=0.0, right=0.0) # type: ignore[no-untyped-call] b = b * dz # append a constraint for the integral mult = 1 / sumtol - a = np.concatenate([a, mult * np.ones((len(shells), 1))], axis=-1) - b = np.concatenate([b, mult * np.reshape(np.trapz(fz, z), (*dims, 1))], axis=-1) + a = np.concatenate([a, mult * np.ones((len(shells), 1))], axis=-1) # type: ignore[assignment] + b = np.concatenate([b, mult * np.reshape(np.trapz(fz, z), (*dims, 1))], axis=-1) # type: ignore[attr-defined] # now a is a matrix of shape (len(shells), len(zp) + 1) # and b is a matrix of shape (*dims, len(zp) + 1) # for each dim, find non-negative weights x such that b == a.T @ x # reduce the dimensionality of the problem using a thin QR decomposition - q, r = np.linalg.qr(a.T) + q, r = np.linalg.qr(a.T) # type: ignore[attr-defined] y = np.einsum("ji,...j", q, b) # for each dim, find non-negative weights x such that y == r @ x @@ -561,12 +561,12 @@ def partition_restrict( """Partition by restriction and integration.""" part = np.empty((len(shells),) + np.shape(fz)[:-1]) for i, w in enumerate(shells): - zr, fr = restrict(z, fz, w) - part[i] = np.trapz(fr, zr, axis=-1) + zr, fr = restrict(z, fz, w) # type: ignore[arg-type] + part[i] = np.trapz(fr, zr, axis=-1) # type: ignore[attr-defined] return part -def redshift_grid(zmin, zmax, *, dz=None, num=None): +def redshift_grid(zmin, zmax, *, dz=None, num=None): # type: ignore[no-untyped-def] """Redshift grid with uniform spacing in redshift.""" if dz is not None and num is None: z = np.arange(zmin, np.nextafter(zmax + dz, zmax), dz) @@ -578,7 +578,7 @@ def redshift_grid(zmin, zmax, *, dz=None, num=None): return z -def distance_grid(cosmo, zmin, zmax, *, dx=None, num=None): +def distance_grid(cosmo, zmin, zmax, *, dx=None, num=None): # type: ignore[no-untyped-def] """Redshift grid with uniform spacing in comoving distance.""" xmin, xmax = cosmo.dc(zmin), cosmo.dc(zmax) if dx is not None and num is None: @@ -628,11 +628,11 @@ def combine( return sum( np.expand_dims(weight, -1) * np.interp( - z, + z, # type: ignore[arg-type] shell.za, - shell.wa / np.trapz(shell.wa, shell.za), + shell.wa / np.trapz(shell.wa, shell.za), # type: ignore[attr-defined] left=0.0, right=0.0, ) - for shell, weight in zip(shells, weights) + for shell, weight in zip(shells, weights) # type: ignore[arg-type] ) diff --git a/glass/user.py b/glass/user.py index cd7d433b..876e70d4 100644 --- a/glass/user.py +++ b/glass/user.py @@ -22,7 +22,7 @@ import numpy as np -def save_cls(filename, cls) -> None: +def save_cls(filename, cls) -> None: # type: ignore[no-untyped-def] """ Save a list of Cls to file. @@ -35,7 +35,7 @@ def save_cls(filename, cls) -> None: np.savez(filename, values=values, split=split) -def load_cls(filename): +def load_cls(filename): # type: ignore[no-untyped-def] """ Load a list of Cls from file. @@ -55,12 +55,12 @@ class _FitsWriter: Initialised with the fits object and extension name. """ - def __init__(self, fits, ext=None) -> None: + def __init__(self, fits, ext=None) -> None: # type: ignore[no-untyped-def] """Create a new, uninitialised writer.""" self.fits = fits self.ext = ext - def _append(self, data, names=None) -> None: + def _append(self, data, names=None) -> None: # type: ignore[no-untyped-def] """Write the FITS file.""" if self.ext is None or self.ext not in self.fits: self.fits.write_table(data, names=names, extname=self.ext) @@ -71,7 +71,7 @@ def _append(self, data, names=None) -> None: # not using hdu.append here because of incompatibilities hdu.write(data, names=names, firstrow=hdu.get_nrows()) - def write(self, data=None, /, **columns) -> None: + def write(self, data=None, /, **columns) -> None: # type: ignore[no-untyped-def] """ Write to FITS by calling the internal _append method. @@ -89,7 +89,7 @@ def write(self, data=None, /, **columns) -> None: @contextmanager -def write_catalog(filename, *, ext=None): +def write_catalog(filename, *, ext=None): # type: ignore[no-untyped-def] """ Write a catalogue into a FITS file. diff --git a/pyproject.toml b/pyproject.toml index daafb574..4e2cd7fe 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -92,6 +92,19 @@ build.targets.sdist.exclude = [ ] version.source = "vcs" +[tool.mypy] +disallow_untyped_decorators = false +enable_error_code = [ + "ignore-without-code", + "redundant-expr", + "truthy-bool", +] +plugins = [ + "numpy.typing.mypy_plugin", +] +strict = true +warn_unreachable = true + [tool.pytest.ini_options] addopts = [ "--strict-config", diff --git a/tests/conftest.py b/tests/conftest.py index c9b96801..511a14b1 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -2,7 +2,7 @@ @pytest.fixture(scope="session") -def rng(): +def rng(): # type: ignore[no-untyped-def] import numpy as np return np.random.default_rng(seed=42) diff --git a/tests/core/test_algorithm.py b/tests/core/test_algorithm.py index 470ab6ea..1438c95c 100644 --- a/tests/core/test_algorithm.py +++ b/tests/core/test_algorithm.py @@ -10,7 +10,7 @@ @pytest.mark.skipif(not HAVE_SCIPY, reason="test requires SciPy") -def test_nnls(rng): +def test_nnls(rng): # type: ignore[no-untyped-def] from scipy.optimize import nnls as nnls_scipy # cross-check output with scipy's nnls @@ -21,7 +21,7 @@ def test_nnls(rng): x_glass = nnls_glass(a, b) x_scipy, _ = nnls_scipy(a, b) - np.testing.assert_allclose(x_glass, x_scipy) + np.testing.assert_allclose(x_glass, x_scipy) # type: ignore[arg-type] # check matrix and vector's shape diff --git a/tests/core/test_array.py b/tests/core/test_array.py index 9fbb47de..315dd204 100644 --- a/tests/core/test_array.py +++ b/tests/core/test_array.py @@ -15,13 +15,13 @@ HAVE_SCIPY = importlib.util.find_spec("scipy") is not None -def test_broadcast_first(): +def test_broadcast_first(): # type: ignore[no-untyped-def] a = np.ones((2, 3, 4)) b = np.ones((2, 1)) # arrays with shape ((3, 4, 2)) and ((1, 2)) are passed # to np.broadcast_arrays; hence it works - a_a, b_a = broadcast_first(a, b) + a_a, b_a = broadcast_first(a, b) # type: ignore[no-untyped-call] assert a_a.shape == (2, 3, 4) assert b_a.shape == (2, 3, 4) @@ -35,7 +35,7 @@ def test_broadcast_first(): b = np.ones((5, 6)) with pytest.raises(ValueError, match="shape mismatch"): - broadcast_first(a, b) + broadcast_first(a, b) # type: ignore[no-untyped-call] # plain np.broadcast_arrays will work a_a, b_a = np.broadcast_arrays(a, b) @@ -44,56 +44,56 @@ def test_broadcast_first(): assert b_a.shape == (4, 5, 6) -def test_broadcast_leading_axes(): +def test_broadcast_leading_axes(): # type: ignore[no-untyped-def] a = 0 b = np.zeros((4, 10)) c = np.zeros((3, 1, 5, 6)) - dims, a, b, c = broadcast_leading_axes((a, 0), (b, 1), (c, 2)) + dims, a, b, c = broadcast_leading_axes((a, 0), (b, 1), (c, 2)) # type: ignore[no-untyped-call] assert dims == (3, 4) - assert a.shape == (3, 4) + assert a.shape == (3, 4) # type: ignore[attr-defined] assert b.shape == (3, 4, 10) assert c.shape == (3, 4, 5, 6) -def test_ndinterp(): +def test_ndinterp(): # type: ignore[no-untyped-def] # test 1d interpolation xp = [0, 1, 2, 3, 4] yp = [1.1, 1.2, 1.3, 1.4, 1.5] x = 0.5 - y = ndinterp(x, xp, yp) + y = ndinterp(x, xp, yp) # type: ignore[no-untyped-call] assert np.shape(y) == () np.testing.assert_allclose(y, 1.15, atol=1e-15) - x = [0.5, 1.5, 2.5] - y = ndinterp(x, xp, yp) + x = [0.5, 1.5, 2.5] # type: ignore[assignment] + y = ndinterp(x, xp, yp) # type: ignore[no-untyped-call] assert np.shape(y) == (3,) np.testing.assert_allclose(y, [1.15, 1.25, 1.35], atol=1e-15) - x = [[0.5, 1.5], [2.5, 3.5]] - y = ndinterp(x, xp, yp) + x = [[0.5, 1.5], [2.5, 3.5]] # type: ignore[assignment] + y = ndinterp(x, xp, yp) # type: ignore[no-untyped-call] assert np.shape(y) == (2, 2) np.testing.assert_allclose(y, [[1.15, 1.25], [1.35, 1.45]], atol=1e-15) # test nd interpolation in final axis - yp = [[1.1, 1.2, 1.3, 1.4, 1.5], [2.1, 2.2, 2.3, 2.4, 2.5]] + yp = [[1.1, 1.2, 1.3, 1.4, 1.5], [2.1, 2.2, 2.3, 2.4, 2.5]] # type: ignore[list-item] x = 0.5 - y = ndinterp(x, xp, yp) + y = ndinterp(x, xp, yp) # type: ignore[no-untyped-call] assert np.shape(y) == (2,) np.testing.assert_allclose(y, [1.15, 2.15], atol=1e-15) - x = [0.5, 1.5, 2.5] - y = ndinterp(x, xp, yp) + x = [0.5, 1.5, 2.5] # type: ignore[assignment] + y = ndinterp(x, xp, yp) # type: ignore[no-untyped-call] assert np.shape(y) == (2, 3) np.testing.assert_allclose(y, [[1.15, 1.25, 1.35], [2.15, 2.25, 2.35]], atol=1e-15) - x = [[0.5, 1.5], [2.5, 3.5]] - y = ndinterp(x, xp, yp) + x = [[0.5, 1.5], [2.5, 3.5]] # type: ignore[assignment] + y = ndinterp(x, xp, yp) # type: ignore[no-untyped-call] assert np.shape(y) == (2, 2, 2) np.testing.assert_allclose( y, @@ -103,15 +103,15 @@ def test_ndinterp(): # test nd interpolation in middle axis - yp = [[[1.1], [1.2], [1.3], [1.4], [1.5]], [[2.1], [2.2], [2.3], [2.4], [2.5]]] + yp = [[[1.1], [1.2], [1.3], [1.4], [1.5]], [[2.1], [2.2], [2.3], [2.4], [2.5]]] # type: ignore[list-item] x = 0.5 - y = ndinterp(x, xp, yp, axis=1) + y = ndinterp(x, xp, yp, axis=1) # type: ignore[no-untyped-call] assert np.shape(y) == (2, 1) np.testing.assert_allclose(y, [[1.15], [2.15]], atol=1e-15) - x = [0.5, 1.5, 2.5] - y = ndinterp(x, xp, yp, axis=1) + x = [0.5, 1.5, 2.5] # type: ignore[assignment] + y = ndinterp(x, xp, yp, axis=1) # type: ignore[no-untyped-call] assert np.shape(y) == (2, 3, 1) np.testing.assert_allclose( y, @@ -119,8 +119,8 @@ def test_ndinterp(): atol=1e-15, ) - x = [[0.5, 1.5, 2.5, 3.5], [3.5, 2.5, 1.5, 0.5], [0.5, 3.5, 1.5, 2.5]] - y = ndinterp(x, xp, yp, axis=1) + x = [[0.5, 1.5, 2.5, 3.5], [3.5, 2.5, 1.5, 0.5], [0.5, 3.5, 1.5, 2.5]] # type: ignore[assignment] + y = ndinterp(x, xp, yp, axis=1) # type: ignore[no-untyped-call] assert np.shape(y) == (2, 3, 4, 1) np.testing.assert_allclose( y, @@ -140,20 +140,20 @@ def test_ndinterp(): ) -def test_trapz_product(): +def test_trapz_product(): # type: ignore[no-untyped-def] x1 = np.linspace(0, 2, 100) f1 = np.full_like(x1, 2.0) x2 = np.linspace(1, 2, 10) f2 = np.full_like(x2, 0.5) - s = trapz_product((x1, f1), (x2, f2)) + s = trapz_product((x1, f1), (x2, f2)) # type: ignore[no-untyped-call] np.testing.assert_allclose(s, 1.0) @pytest.mark.skipif(not HAVE_SCIPY, reason="test requires SciPy") -def test_cumtrapz(): +def test_cumtrapz(): # type: ignore[no-untyped-def] from scipy.integrate import cumulative_trapezoid # 1D f and x @@ -163,18 +163,18 @@ def test_cumtrapz(): # default dtype (int - not supported by scipy) - glass_ct = cumtrapz(f, x) + glass_ct = cumtrapz(f, x) # type: ignore[no-untyped-call] np.testing.assert_allclose(glass_ct, np.array([0, 1, 4, 7])) # explicit dtype (float) - glass_ct = cumtrapz(f, x, dtype=float) + glass_ct = cumtrapz(f, x, dtype=float) # type: ignore[no-untyped-call] scipy_ct = cumulative_trapezoid(f, x, initial=0) np.testing.assert_allclose(glass_ct, scipy_ct) # explicit return array - result = cumtrapz(f, x, dtype=float, out=np.zeros((4,))) + result = cumtrapz(f, x, dtype=float, out=np.zeros((4,))) # type: ignore[no-untyped-call] scipy_ct = cumulative_trapezoid(f, x, initial=0) np.testing.assert_allclose(result, scipy_ct) @@ -185,17 +185,17 @@ def test_cumtrapz(): # default dtype (int - not supported by scipy) - glass_ct = cumtrapz(f, x) + glass_ct = cumtrapz(f, x) # type: ignore[no-untyped-call] np.testing.assert_allclose(glass_ct, np.array([[0, 2, 12, 31], [0, 2, 8, 17]])) # explicit dtype (float) - glass_ct = cumtrapz(f, x, dtype=float) + glass_ct = cumtrapz(f, x, dtype=float) # type: ignore[no-untyped-call] scipy_ct = cumulative_trapezoid(f, x, initial=0) np.testing.assert_allclose(glass_ct, scipy_ct) # explicit return array - glass_ct = cumtrapz(f, x, dtype=float, out=np.zeros((2, 4))) + glass_ct = cumtrapz(f, x, dtype=float, out=np.zeros((2, 4))) # type: ignore[no-untyped-call] scipy_ct = cumulative_trapezoid(f, x, initial=0) np.testing.assert_allclose(glass_ct, scipy_ct) diff --git a/tests/test_fields.py b/tests/test_fields.py index e65a0f94..18091f1e 100644 --- a/tests/test_fields.py +++ b/tests/test_fields.py @@ -1,10 +1,10 @@ from glass.fields import getcl -def test_getcl(): +def test_getcl(): # type: ignore[no-untyped-def] # make a mock Cls array with the index pairs as entries cls = [{i, j} for i in range(10) for j in range(i, -1, -1)] # make sure indices are retrieved correctly for i in range(10): for j in range(10): - assert getcl(cls, i, j) == {i, j} + assert getcl(cls, i, j) == {i, j} # type: ignore[no-untyped-call] diff --git a/tests/test_fits.py b/tests/test_fits.py index f036b4cd..3a6d6229 100644 --- a/tests/test_fits.py +++ b/tests/test_fits.py @@ -9,7 +9,7 @@ HAVE_FITSIO = importlib.util.find_spec("fitsio") is not None -def _test_append(fits, data, names): +def _test_append(fits, data, names): # type: ignore[no-untyped-def] """Write routine for FITS test cases.""" cat_name = "CATALOG" if cat_name not in fits: @@ -26,7 +26,7 @@ def _test_append(fits, data, names): @pytest.mark.skipif(not HAVE_FITSIO, reason="test requires fitsio") -def test_basic_write(tmp_path): +def test_basic_write(tmp_path): # type: ignore[no-untyped-def] import fitsio filename_gfits = "gfits.fits" # what GLASS creates @@ -42,7 +42,7 @@ def test_basic_write(tmp_path): out.write(RA=array, RB=array2) arrays = [array, array2] names = ["RA", "RB"] - _test_append(my_fits, arrays, names) + _test_append(my_fits, arrays, names) # type: ignore[no-untyped-call] with ( fitsio.FITS(tmp_path / filename_gfits) as g_fits, @@ -55,7 +55,7 @@ def test_basic_write(tmp_path): @pytest.mark.skipif(not HAVE_FITSIO, reason="test requires fitsio") -def test_write_exception(tmp_path): +def test_write_exception(tmp_path): # type: ignore[no-untyped-def] try: with user.write_catalog(tmp_path / filename, ext="CATALOG") as out: for i in range(my_max): diff --git a/tests/test_galaxies.py b/tests/test_galaxies.py index 3a171834..a38a181e 100644 --- a/tests/test_galaxies.py +++ b/tests/test_galaxies.py @@ -4,7 +4,7 @@ from glass.galaxies import galaxy_shear, gaussian_phz, redshifts, redshifts_from_nz -def test_redshifts(mocker): +def test_redshifts(mocker): # type: ignore[no-untyped-def] # create a mock radial window function w = mocker.Mock() w.za = np.linspace(0.0, 1.0, 20) @@ -21,7 +21,7 @@ def test_redshifts(mocker): assert z.shape == (10,) -def test_redshifts_from_nz(): +def test_redshifts_from_nz(): # type: ignore[no-untyped-def] # test sampling redshifts = redshifts_from_nz(10, [0, 1, 2, 3, 4], [1, 0, 0, 0, 0], warn=False) @@ -51,7 +51,7 @@ def test_redshifts_from_nz(): # case: extra dimensions from count - count = [10, 20, 30] + count = [10, 20, 30] # type: ignore[assignment] z = np.linspace(0, 1, 100) nz = z * (1 - z) @@ -63,7 +63,7 @@ def test_redshifts_from_nz(): count = 10 z = np.linspace(0, 1, 100) - nz = [z * (1 - z), (z - 0.5) ** 2] + nz = [z * (1 - z), (z - 0.5) ** 2] # type: ignore[assignment] redshifts = redshifts_from_nz(count, z, nz, warn=False) @@ -71,9 +71,9 @@ def test_redshifts_from_nz(): # case: extra dimensions from count and nz - count = [[10], [20], [30]] + count = [[10], [20], [30]] # type: ignore[assignment] z = np.linspace(0, 1, 100) - nz = [z * (1 - z), (z - 0.5) ** 2] + nz = [z * (1 - z), (z - 0.5) ** 2] # type: ignore[assignment] redshifts = redshifts_from_nz(count, z, nz, warn=False) @@ -81,9 +81,9 @@ def test_redshifts_from_nz(): # case: incompatible input shapes - count = [10, 20, 30] + count = [10, 20, 30] # type: ignore[assignment] z = np.linspace(0, 1, 100) - nz = [z * (1 - z), (z - 0.5) ** 2] + nz = [z * (1 - z), (z - 0.5) ** 2] # type: ignore[assignment] with pytest.raises(ValueError): redshifts_from_nz(count, z, nz, warn=False) @@ -92,7 +92,7 @@ def test_redshifts_from_nz(): redshifts = redshifts_from_nz(10, [0, 1, 2, 3, 4], [1, 0, 0, 0, 0]) -def test_galaxy_shear(rng): +def test_galaxy_shear(rng): # type: ignore[no-untyped-def] # check shape of the output kappa, gamma1, gamma2 = ( @@ -101,7 +101,7 @@ def test_galaxy_shear(rng): rng.normal(size=(12,)), ) - shear = galaxy_shear([], [], [], kappa, gamma1, gamma2) + shear = galaxy_shear([], [], [], kappa, gamma1, gamma2) # type: ignore[arg-type] np.testing.assert_equal(shear, []) gal_lon, gal_lat, gal_eps = ( @@ -114,7 +114,7 @@ def test_galaxy_shear(rng): # shape with no reduced shear - shear = galaxy_shear([], [], [], kappa, gamma1, gamma2, reduced_shear=False) + shear = galaxy_shear([], [], [], kappa, gamma1, gamma2, reduced_shear=False) # type: ignore[arg-type] np.testing.assert_equal(shear, []) gal_lon, gal_lat, gal_eps = ( @@ -128,7 +128,7 @@ def test_galaxy_shear(rng): assert np.shape(shear) == (512,) -def test_gaussian_phz(): +def test_gaussian_phz(): # type: ignore[no-untyped-def] # test sampling # case: zero variance @@ -142,8 +142,8 @@ def test_gaussian_phz(): # case: truncated normal - z = 0.0 - sigma_0 = np.ones(100) + z = 0.0 # type: ignore[assignment] + sigma_0 = np.ones(100) # type: ignore[assignment] phz = gaussian_phz(z, sigma_0) @@ -152,8 +152,8 @@ def test_gaussian_phz(): # case: upper and lower bound - z = 1.0 - sigma_0 = np.ones(100) + z = 1.0 # type: ignore[assignment] + sigma_0 = np.ones(100) # type: ignore[assignment] phz = gaussian_phz(z, sigma_0, lower=0.5, upper=1.5) @@ -165,7 +165,7 @@ def test_gaussian_phz(): # case: scalar redshift, scalar sigma_0 - z = 1.0 + z = 1.0 # type: ignore[assignment] sigma_0 = 0.0 phz = gaussian_phz(z, sigma_0) @@ -185,8 +185,8 @@ def test_gaussian_phz(): # case: scalar redshift, array sigma_0 - z = 1.0 - sigma_0 = np.zeros(10) + z = 1.0 # type: ignore[assignment] + sigma_0 = np.zeros(10) # type: ignore[assignment] phz = gaussian_phz(z, sigma_0) @@ -196,7 +196,7 @@ def test_gaussian_phz(): # case: array redshift, array sigma_0 z = np.linspace(0, 1, 10) - sigma_0 = np.zeros((11, 1)) + sigma_0 = np.zeros((11, 1)) # type: ignore[assignment] phz = gaussian_phz(z, sigma_0) diff --git a/tests/test_lensing.py b/tests/test_lensing.py index 2704ca5f..96bc1944 100644 --- a/tests/test_lensing.py +++ b/tests/test_lensing.py @@ -12,7 +12,7 @@ @pytest.fixture -def shells(): +def shells(): # type: ignore[no-untyped-def] return [ RadialWindow([0.0, 1.0, 2.0], [0.0, 1.0, 0.0], 1.0), RadialWindow([1.0, 2.0, 3.0], [0.0, 1.0, 0.0], 2.0), @@ -23,16 +23,16 @@ def shells(): @pytest.fixture -def cosmo(): +def cosmo(): # type: ignore[no-untyped-def] class MockCosmology: @property - def omega_m(self): + def omega_m(self): # type: ignore[no-untyped-def] return 0.3 - def ef(self, z): + def ef(self, z): # type: ignore[no-untyped-def] return (self.omega_m * (1 + z) ** 3 + 1 - self.omega_m) ** 0.5 - def xm(self, z, z2=None): + def xm(self, z, z2=None): # type: ignore[no-untyped-def] if z2 is None: return np.array(z) * 1000 return (np.array(z2) - np.array(z)) * 1000 @@ -41,37 +41,37 @@ def xm(self, z, z2=None): @pytest.mark.parametrize("usecomplex", [True, False]) -def test_deflect_nsew(usecomplex): +def test_deflect_nsew(usecomplex): # type: ignore[no-untyped-def] d = 5.0 r = np.radians(d) if usecomplex: - def alpha(re, im): + def alpha(re, im): # type: ignore[no-untyped-def] return re + 1j * im else: - def alpha(re, im): + def alpha(re, im): # type: ignore[no-untyped-def] return [re, im] # north - lon, lat = deflect(0.0, 0.0, alpha(r, 0)) + lon, lat = deflect(0.0, 0.0, alpha(r, 0)) # type: ignore[no-untyped-call] np.testing.assert_allclose([lon, lat], [0.0, d], atol=1e-15) # south - lon, lat = deflect(0.0, 0.0, alpha(-r, 0)) + lon, lat = deflect(0.0, 0.0, alpha(-r, 0)) # type: ignore[no-untyped-call] np.testing.assert_allclose([lon, lat], [0.0, -d], atol=1e-15) # east - lon, lat = deflect(0.0, 0.0, alpha(0, r)) + lon, lat = deflect(0.0, 0.0, alpha(0, r)) # type: ignore[no-untyped-call] np.testing.assert_allclose([lon, lat], [-d, 0.0], atol=1e-15) # west - lon, lat = deflect(0.0, 0.0, alpha(0, -r)) + lon, lat = deflect(0.0, 0.0, alpha(0, -r)) # type: ignore[no-untyped-call] np.testing.assert_allclose([lon, lat], [d, 0.0], atol=1e-15) -def test_deflect_many(rng): +def test_deflect_many(rng): # type: ignore[no-untyped-def] n = 1000 abs_alpha = rng.uniform(0, 2 * np.pi, size=n) arg_alpha = rng.uniform(-np.pi, np.pi, size=n) @@ -89,7 +89,7 @@ def test_deflect_many(rng): np.testing.assert_allclose(dotp, np.cos(abs_alpha)) -def test_multi_plane_matrix(shells, cosmo, rng): +def test_multi_plane_matrix(shells, cosmo, rng): # type: ignore[no-untyped-def] mat = multi_plane_matrix(shells, cosmo) np.testing.assert_array_equal(mat, np.tril(mat)) @@ -101,12 +101,12 @@ def test_multi_plane_matrix(shells, cosmo, rng): kappas = [] for shell, delta in zip(shells, deltas): convergence.add_window(delta, shell) - kappas.append(convergence.kappa.copy()) + kappas.append(convergence.kappa.copy()) # type: ignore[union-attr] np.testing.assert_allclose(mat @ deltas, kappas) -def test_multi_plane_weights(shells, cosmo, rng): +def test_multi_plane_weights(shells, cosmo, rng): # type: ignore[no-untyped-def] w_in = np.eye(len(shells)) w_out = multi_plane_weights(w_in, shells, cosmo) @@ -125,4 +125,4 @@ def test_multi_plane_weights(shells, cosmo, rng): wmat = multi_plane_weights(weights, shells, cosmo) - np.testing.assert_allclose(np.einsum("ij,ik", wmat, deltas), kappa) + np.testing.assert_allclose(np.einsum("ij,ik", wmat, deltas), kappa) # type: ignore[arg-type] diff --git a/tests/test_points.py b/tests/test_points.py index 7d8283db..584e824d 100644 --- a/tests/test_points.py +++ b/tests/test_points.py @@ -3,16 +3,16 @@ from glass.points import position_weights, positions_from_delta, uniform_positions -def catpos(pos): - lon, lat, cnt = [], [], 0 +def catpos(pos): # type: ignore[no-untyped-def] + lon, lat, cnt = [], [], 0 # type: ignore[var-annotated] for lo, la, co in pos: - lon = np.concatenate([lon, lo]) - lat = np.concatenate([lat, la]) + lon = np.concatenate([lon, lo]) # type: ignore[assignment] + lat = np.concatenate([lat, la]) # type: ignore[assignment] cnt = cnt + co return lon, lat, cnt -def test_positions_from_delta(): +def test_positions_from_delta(): # type: ignore[no-untyped-def] # case: single-dimensional input ngal = 1e-3 @@ -20,19 +20,19 @@ def test_positions_from_delta(): bias = 0.8 vis = np.ones(12) - lon, lat, cnt = catpos(positions_from_delta(ngal, delta, bias, vis)) + lon, lat, cnt = catpos(positions_from_delta(ngal, delta, bias, vis)) # type: ignore[no-untyped-call] assert isinstance(cnt, int) assert lon.shape == lat.shape == (cnt,) # case: multi-dimensional ngal - ngal = [1e-3, 2e-3] + ngal = [1e-3, 2e-3] # type: ignore[assignment] delta = np.zeros(12) bias = 0.8 vis = np.ones(12) - lon, lat, cnt = catpos(positions_from_delta(ngal, delta, bias, vis)) + lon, lat, cnt = catpos(positions_from_delta(ngal, delta, bias, vis)) # type: ignore[no-untyped-call] assert cnt.shape == (2,) assert lon.shape == (cnt.sum(),) @@ -45,7 +45,7 @@ def test_positions_from_delta(): bias = 0.8 vis = np.ones(12) - lon, lat, cnt = catpos(positions_from_delta(ngal, delta, bias, vis)) + lon, lat, cnt = catpos(positions_from_delta(ngal, delta, bias, vis)) # type: ignore[no-untyped-call] assert cnt.shape == (3, 2) assert lon.shape == (cnt.sum(),) @@ -53,54 +53,54 @@ def test_positions_from_delta(): # case: multi-dimensional broadcasting - ngal = [1e-3, 2e-3] + ngal = [1e-3, 2e-3] # type: ignore[assignment] delta = np.zeros((3, 1, 12)) bias = 0.8 vis = np.ones(12) - lon, lat, cnt = catpos(positions_from_delta(ngal, delta, bias, vis)) + lon, lat, cnt = catpos(positions_from_delta(ngal, delta, bias, vis)) # type: ignore[no-untyped-call] assert cnt.shape == (3, 2) assert lon.shape == (cnt.sum(),) assert lat.shape == (cnt.sum(),) -def test_uniform_positions(): +def test_uniform_positions(): # type: ignore[no-untyped-def] # case: scalar input ngal = 1e-3 - lon, lat, cnt = catpos(uniform_positions(ngal)) + lon, lat, cnt = catpos(uniform_positions(ngal)) # type: ignore[no-untyped-call] assert isinstance(cnt, int) assert lon.shape == lat.shape == (cnt,) # case: 1-D array input - ngal = [1e-3, 2e-3, 3e-3] + ngal = [1e-3, 2e-3, 3e-3] # type: ignore[assignment] - lon, lat, cnt = catpos(uniform_positions(ngal)) + lon, lat, cnt = catpos(uniform_positions(ngal)) # type: ignore[no-untyped-call] assert cnt.shape == (3,) assert lon.shape == lat.shape == (cnt.sum(),) # case: 2-D array input - ngal = [[1e-3, 2e-3], [3e-3, 4e-3], [5e-3, 6e-3]] + ngal = [[1e-3, 2e-3], [3e-3, 4e-3], [5e-3, 6e-3]] # type: ignore[assignment] - lon, lat, cnt = catpos(uniform_positions(ngal)) + lon, lat, cnt = catpos(uniform_positions(ngal)) # type: ignore[no-untyped-call] assert cnt.shape == (3, 2) assert lon.shape == lat.shape == (cnt.sum(),) -def test_position_weights(rng): +def test_position_weights(rng): # type: ignore[no-untyped-def] for bshape in None, (), (100,), (100, 1): for cshape in (100,), (100, 50), (100, 3, 2): counts = rng.random(cshape) bias = None if bshape is None else rng.random(bshape) - weights = position_weights(counts, bias) + weights = position_weights(counts, bias) # type: ignore[no-untyped-call] expected = counts / counts.sum(axis=0, keepdims=True) if bias is not None: diff --git a/tests/test_shapes.py b/tests/test_shapes.py index 47afea5d..f67de99a 100644 --- a/tests/test_shapes.py +++ b/tests/test_shapes.py @@ -9,31 +9,31 @@ ) -def test_triaxial_axis_ratio(): +def test_triaxial_axis_ratio(): # type: ignore[no-untyped-def] # single axis ratio - q = triaxial_axis_ratio(0.8, 0.4) + q = triaxial_axis_ratio(0.8, 0.4) # type: ignore[no-untyped-call] assert np.isscalar(q) # many axis ratios - q = triaxial_axis_ratio(0.8, 0.4, size=1000) + q = triaxial_axis_ratio(0.8, 0.4, size=1000) # type: ignore[no-untyped-call] assert np.shape(q) == (1000,) # explicit shape - q = triaxial_axis_ratio(0.8, 0.4, size=(10, 10)) + q = triaxial_axis_ratio(0.8, 0.4, size=(10, 10)) # type: ignore[no-untyped-call] assert np.shape(q) == (10, 10) # implicit size - q1 = triaxial_axis_ratio([0.8, 0.9], 0.4) - q2 = triaxial_axis_ratio(0.8, [0.4, 0.5]) + q1 = triaxial_axis_ratio([0.8, 0.9], 0.4) # type: ignore[no-untyped-call] + q2 = triaxial_axis_ratio(0.8, [0.4, 0.5]) # type: ignore[no-untyped-call] assert np.shape(q1) == np.shape(q2) == (2,) # broadcasting rule - q = triaxial_axis_ratio([[0.6, 0.7], [0.8, 0.9]], [0.4, 0.5]) + q = triaxial_axis_ratio([[0.6, 0.7], [0.8, 0.9]], [0.4, 0.5]) # type: ignore[no-untyped-call] assert np.shape(q) == (2, 2) # random parameters and check that projection is @@ -42,49 +42,49 @@ def test_triaxial_axis_ratio(): zeta, xi = np.sort(np.random.uniform(0, 1, size=(2, 1000)), axis=0) qmin = np.min([zeta, xi, xi / zeta], axis=0) qmax = np.max([zeta, xi, xi / zeta], axis=0) - q = triaxial_axis_ratio(zeta, xi) + q = triaxial_axis_ratio(zeta, xi) # type: ignore[no-untyped-call] assert np.all((qmax >= q) & (q >= qmin)) -def test_ellipticity_ryden04(): +def test_ellipticity_ryden04(): # type: ignore[no-untyped-def] # single ellipticity - e = ellipticity_ryden04(-1.85, 0.89, 0.222, 0.056) + e = ellipticity_ryden04(-1.85, 0.89, 0.222, 0.056) # type: ignore[no-untyped-call] assert np.isscalar(e) # many ellipticities - e = ellipticity_ryden04(-1.85, 0.89, 0.222, 0.056, size=1000) + e = ellipticity_ryden04(-1.85, 0.89, 0.222, 0.056, size=1000) # type: ignore[no-untyped-call] assert np.shape(e) == (1000,) # explicit shape - e = ellipticity_ryden04(-1.85, 0.89, 0.222, 0.056, size=(10, 10)) + e = ellipticity_ryden04(-1.85, 0.89, 0.222, 0.056, size=(10, 10)) # type: ignore[no-untyped-call] assert np.shape(e) == (10, 10) # implicit size - e1 = ellipticity_ryden04(-1.85, 0.89, [0.222, 0.333], 0.056) - e2 = ellipticity_ryden04(-1.85, 0.89, 0.222, [0.056, 0.067]) - e3 = ellipticity_ryden04([-1.85, -2.85], 0.89, 0.222, 0.056) - e4 = ellipticity_ryden04(-1.85, [0.89, 1.001], 0.222, 0.056) + e1 = ellipticity_ryden04(-1.85, 0.89, [0.222, 0.333], 0.056) # type: ignore[no-untyped-call] + e2 = ellipticity_ryden04(-1.85, 0.89, 0.222, [0.056, 0.067]) # type: ignore[no-untyped-call] + e3 = ellipticity_ryden04([-1.85, -2.85], 0.89, 0.222, 0.056) # type: ignore[no-untyped-call] + e4 = ellipticity_ryden04(-1.85, [0.89, 1.001], 0.222, 0.056) # type: ignore[no-untyped-call] assert np.shape(e1) == np.shape(e2) == np.shape(e3) == np.shape(e4) == (2,) # broadcasting rule - e = ellipticity_ryden04([-1.9, -2.9], 0.9, [[0.2, 0.3], [0.4, 0.5]], 0.1) + e = ellipticity_ryden04([-1.9, -2.9], 0.9, [[0.2, 0.3], [0.4, 0.5]], 0.1) # type: ignore[no-untyped-call] assert np.shape(e) == (2, 2) # check that result is in the specified range - e = ellipticity_ryden04(0.0, 1.0, 0.222, 0.056, size=10) + e = ellipticity_ryden04(0.0, 1.0, 0.222, 0.056, size=10) # type: ignore[no-untyped-call] assert np.all((e.real >= -1.0) & (e.real <= 1.0)) - e = ellipticity_ryden04(0.0, 1.0, 0.0, 1.0, size=10) + e = ellipticity_ryden04(0.0, 1.0, 0.0, 1.0, size=10) # type: ignore[no-untyped-call] assert np.all((e.real >= -1.0) & (e.real <= 1.0)) -def test_ellipticity_gaussian(): +def test_ellipticity_gaussian(): # type: ignore[no-untyped-def] n = 1_000_000 eps = ellipticity_gaussian(n, 0.256) @@ -108,7 +108,7 @@ def test_ellipticity_gaussian(): np.testing.assert_allclose(np.std(eps.imag[n:]), 0.256, atol=1e-3, rtol=0) -def test_ellipticity_intnorm(): +def test_ellipticity_intnorm(): # type: ignore[no-untyped-def] n = 1_000_000 eps = ellipticity_intnorm(n, 0.256) diff --git a/tests/test_shells.py b/tests/test_shells.py index a003d198..f468d3f6 100644 --- a/tests/test_shells.py +++ b/tests/test_shells.py @@ -4,7 +4,7 @@ from glass.shells import RadialWindow, partition, restrict, tophat_windows -def test_tophat_windows(): +def test_tophat_windows(): # type: ignore[no-untyped-def] zb = [0.0, 0.1, 0.2, 0.5, 1.0, 2.0] dz = 0.005 @@ -18,16 +18,16 @@ def test_tophat_windows(): zn <= z0 + len(w.za) * dz <= zn + dz for w, z0, zn in zip(ws, zb, zb[1:]) ) - assert all(np.all(w.wa == 1) for w in ws) + assert all(np.all(w.wa == 1) for w in ws) # type: ignore[comparison-overlap] -def test_restrict(): +def test_restrict(): # type: ignore[no-untyped-def] # Gaussian test function z = np.linspace(0.0, 5.0, 1000) f = np.exp(-(((z - 2.0) / 0.5) ** 2) / 2) # window for restriction - w = RadialWindow(za=[1.0, 2.0, 3.0, 4.0], wa=[0.0, 0.5, 0.5, 0.0], zeff=None) + w = RadialWindow(za=[1.0, 2.0, 3.0, 4.0], wa=[0.0, 0.5, 0.5, 0.0], zeff=None) # type: ignore[arg-type] zr, fr = restrict(z, f, w) @@ -49,14 +49,34 @@ def test_restrict(): @pytest.mark.parametrize("method", ["lstsq", "nnls", "restrict"]) -def test_partition(method): +def test_partition(method): # type: ignore[no-untyped-def] shells = [ - RadialWindow(np.array([0.0, 1.0]), np.array([1.0, 0.0]), 0.0), - RadialWindow(np.array([0.0, 1.0, 2.0]), np.array([0.0, 1.0, 0.0]), 0.5), - RadialWindow(np.array([1.0, 2.0, 3.0]), np.array([0.0, 1.0, 0.0]), 1.5), - RadialWindow(np.array([2.0, 3.0, 4.0]), np.array([0.0, 1.0, 0.0]), 2.5), - RadialWindow(np.array([3.0, 4.0, 5.0]), np.array([0.0, 1.0, 0.0]), 3.5), - RadialWindow(np.array([4.0, 5.0]), np.array([0.0, 1.0]), 5.0), + RadialWindow(np.array([0.0, 1.0]), np.array([1.0, 0.0]), 0.0), # type: ignore[arg-type] + RadialWindow( + np.array([0.0, 1.0, 2.0]), # type: ignore[arg-type] + np.array([0.0, 1.0, 0.0]), # type: ignore[arg-type] + 0.5, + ), + RadialWindow( + np.array([1.0, 2.0, 3.0]), # type: ignore[arg-type] + np.array([0.0, 1.0, 0.0]), # type: ignore[arg-type] + 1.5, + ), + RadialWindow( + np.array([2.0, 3.0, 4.0]), # type: ignore[arg-type] + np.array([0.0, 1.0, 0.0]), # type: ignore[arg-type] + 2.5, + ), + RadialWindow( + np.array([3.0, 4.0, 5.0]), # type: ignore[arg-type] + np.array([0.0, 1.0, 0.0]), # type: ignore[arg-type] + 3.5, + ), + RadialWindow( + np.array([4.0, 5.0]), # type: ignore[arg-type] + np.array([0.0, 1.0]), # type: ignore[arg-type] + 5.0, + ), ] z = np.linspace(0.0, 5.0, 1000) @@ -67,6 +87,6 @@ def test_partition(method): part = partition(z, fz, shells, method=method) - assert part.shape == (len(shells), 3, 2) + assert part.shape == (len(shells), 3, 2) # type: ignore[union-attr] - np.testing.assert_allclose(part.sum(axis=0), np.trapz(fz, z)) + np.testing.assert_allclose(part.sum(axis=0), np.trapz(fz, z)) # type: ignore[attr-defined, union-attr]