diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index a0887beb..bd034554 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -24,7 +24,8 @@ jobs: run: | python -m pip install --upgrade pip setuptools pip install flake8 pytest - if [ -f requirements.txt ]; then pip install -r requirements-dev.txt; fi + pip install -r requirements-dev.txt + pip install -r requirements-torch.txt - name: Install pylops run: | python -m setuptools_scm diff --git a/.github/workflows/codacy-coverage-reporter.yaml b/.github/workflows/codacy-coverage-reporter.yaml index 0e610bb1..ed90ca30 100644 --- a/.github/workflows/codacy-coverage-reporter.yaml +++ b/.github/workflows/codacy-coverage-reporter.yaml @@ -26,7 +26,8 @@ jobs: run: | python -m pip install --upgrade pip pip install flake8 pytest - if [ -f requirements.txt ]; then pip install -r requirements-dev.txt; fi + pip install -r requirements-dev.txt + pip install -r requirements-torch.txt - name: Install pylops run: | pip install . diff --git a/.gitignore b/.gitignore index 42f9cc53..0365f10d 100644 --- a/.gitignore +++ b/.gitignore @@ -17,6 +17,7 @@ build dist pylops.egg-info/ .eggs/ +__pycache__ # setuptools_scm generated # pylops/version.py diff --git a/.readthedocs.yaml b/.readthedocs.yaml index 202c29ca..88f7e618 100644 --- a/.readthedocs.yaml +++ b/.readthedocs.yaml @@ -19,5 +19,6 @@ sphinx: python: install: - requirements: requirements-doc.txt + - requirements: requirements-torch.txt - method: pip path: . diff --git a/CHANGELOG.md b/CHANGELOG.md index 09d6c06b..8989f305 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,11 +1,18 @@ Changelog ========= +# 2.4.0 +* Added `pylops.signalprocessing.FourierRadon2d` and + `pylops.signalprocessing.FourierRadon3d` operators +* Added `pylops.PyTensorOperator` operator +* Added `pylops.ToCupy` operator +* Added `pylops.utils.seismicevents.parabolic3d` method +* Fix bug in `pylops.Restriction` when passing iava as cupy array + # 2.3.1 -* Fixed bug in :py:mod:`pylops.utils.backend` (see [Issue #606](https://github.com/PyLops/pylops/issues/606)) +* Fixed bug in `pylops.utils.backend` (see [Issue #606](https://github.com/PyLops/pylops/issues/606)) # 2.3.0 - * Added `pylops.JaxOperator`, `pylops.signalprocessing.DWTND`, and `pylops.signalprocessing.DTCWT` operators. * Added `updatesrc` method to `pylops.waveeqprocessing.AcousticWave2D`. * Added `verb` to `pylops.signalprocessing.Sliding1D.sliding1d_design`, `pylops.signalprocessing.Sliding2D.sliding2d_design`, `pylops.signalprocessing.Sliding3D.sliding3d_design`, `pylops.signalprocessing.Patch2D.patch2d_design`, and `pylops.signalprocessing.Patch3D.patch3d_design`. @@ -29,7 +36,6 @@ is now unmantained and merged into `cupy`). # 2.2.0 - * Added `pylops.signalprocessing.NonStationaryConvolve3D` operator * Added nd-array capabilities to `pylops.basicoperators.Identity` and `pylops.basicoperators.Zero` * Added second implementation in `pylops.waveeqprocessing.BlendingContinuous` which is more diff --git a/Makefile b/Makefile index 35cf0e1e..14a0a26b 100755 --- a/Makefile +++ b/Makefile @@ -21,7 +21,8 @@ install: dev-install: make pipcheck - $(PIP) install -r requirements-dev.txt && $(PIP) install -e . + $(PIP) install -r requirements-dev.txt &&\ + $(PIP) install -r requirements-torch.txt && $(PIP) install -e . install_conda: conda env create -f environment.yml && conda activate pylops && pip install . diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 00db3746..ab636d61 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -61,6 +61,7 @@ jobs: - script: | python -m pip install --upgrade pip setuptools wheel django pip install -r requirements-dev.txt + pip install -r requirements-torch.txt pip install . displayName: 'Install prerequisites and library' @@ -90,6 +91,7 @@ jobs: - script: | python -m pip install --upgrade pip setuptools wheel django pip install -r requirements-dev.txt + pip install -r requirements-torch.txt pip install . displayName: 'Install prerequisites and library' diff --git a/docs/source/_static/cupy_diagram.png b/docs/source/_static/cupy_diagram.png new file mode 100755 index 00000000..7eafad98 Binary files /dev/null and b/docs/source/_static/cupy_diagram.png differ diff --git a/docs/source/_static/numpy_cupy_bd_diagram.png b/docs/source/_static/numpy_cupy_bd_diagram.png new file mode 100755 index 00000000..00ae4a9a Binary files /dev/null and b/docs/source/_static/numpy_cupy_bd_diagram.png differ diff --git a/docs/source/_static/numpy_cupy_vs_diagram.png b/docs/source/_static/numpy_cupy_vs_diagram.png new file mode 100755 index 00000000..96e3b470 Binary files /dev/null and b/docs/source/_static/numpy_cupy_vs_diagram.png differ diff --git a/docs/source/api/index.rst b/docs/source/api/index.rst index 1d77dc15..0c23cf9a 100755 --- a/docs/source/api/index.rst +++ b/docs/source/api/index.rst @@ -28,6 +28,7 @@ Templates LinearOperator FunctionOperator MemoizeOperator + PyTensorOperator TorchOperator JaxOperator @@ -62,6 +63,8 @@ Basic operators Real Imag Conj + ToCupy + Smoothing and derivatives ~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -109,6 +112,8 @@ Signal processing Seislet Radon2D Radon3D + FourierRadon2D + FourierRadon3D ChirpRadon2D ChirpRadon3D Sliding1D diff --git a/docs/source/api/others.rst b/docs/source/api/others.rst index a26d0cd7..82f5a7b1 100755 --- a/docs/source/api/others.rst +++ b/docs/source/api/others.rst @@ -118,6 +118,7 @@ Synthetics seismicevents.parabolic2d seismicevents.hyperbolic2d seismicevents.linear3d + seismicevents.parabolic3d seismicevents.hyperbolic3d .. currentmodule:: pylops.waveeqprocessing diff --git a/docs/source/changelog.rst b/docs/source/changelog.rst index b5854475..43d59cac 100644 --- a/docs/source/changelog.rst +++ b/docs/source/changelog.rst @@ -3,7 +3,19 @@ Changelog ========= +Version 2.4.0 +------------- + +*Released on: 11/12/2024* + +* Added :py:class:`pylops.signalprocessing.FourierRadon2d` and + :py:class:`pylops.signalprocessing.FourierRadon3d` operators +* Added :py:class:`pylops.PyTensorOperator` operator +* Added :py:class:`pylops.ToCupy` operator +* Added :py:class:`pylops.utils.seismicevents.parabolic3d` method +* Fix bug in :py:class:`pylops.Restriction` when passing iava as cupy array + Version 2.3.1 ------------- @@ -511,7 +523,7 @@ Version 1.3.0 * Added :py:func:`pylops.optimization.sparsity.ISTA` and :py:func:`pylops.optimization.sparsity.FISTA` sparse solvers * Added possibility to broadcast (handle multi-dimensional arrays) - to :py:class:`pylops.Diagonal` and :py:func:`pylops..Restriction` operators + to :py:class:`pylops.Diagonal` and :py:class:`pylops.Restriction` operators * Added :py:class:`pylops.signalprocessing.Interp` operator * Added :py:class:`pylops.Spread` operator * Added :py:class:`pylops.signalprocessing.Radon2D` operator diff --git a/docs/source/conf.py b/docs/source/conf.py index c5e6536d..7c98f203 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -1,8 +1,10 @@ # -*- coding: utf-8 -*- -import sys -import os import datetime +import os +import sys + from sphinx_gallery.sorting import ExampleTitleSortKey + from pylops import __version__ # Sphinx needs to be able to import the package to use autodoc and get the version number @@ -37,6 +39,8 @@ "matplotlib": ("https://matplotlib.org/", None), "pyfftw": ("https://pyfftw.readthedocs.io/en/latest/", None), "spgl1": ("https://spgl1.readthedocs.io/en/latest/", None), + "pymc": ("https://www.pymc.io/", None), + "arviz": ("https://python.arviz.org/en/latest/", None), } # Generate autodoc stubs with summaries from code @@ -103,9 +107,7 @@ # These enable substitutions using |variable| in the rst files rst_epilog = """ .. |year| replace:: {year} -""".format( - year=year -) +""".format(year=year) html_static_path = ["_static"] html_last_updated_fmt = "%b %d, %Y" html_title = "PyLops" @@ -122,15 +124,16 @@ # Theme config html_theme = "pydata_sphinx_theme" html_theme_options = { - "logo_only": True, - "display_version": True, + "github_url": "https://github.com/PyLops/pylops", + # "logo_only": True, + # "display_version": True, "logo": { "image_light": "pylops_b.png", "image_dark": "pylops.png", } } html_css_files = [ - 'css/custom.css', + "css/custom.css", ] html_context = { diff --git a/docs/source/gpu.rst b/docs/source/gpu.rst index a2d7d9f1..842770b2 100755 --- a/docs/source/gpu.rst +++ b/docs/source/gpu.rst @@ -29,6 +29,172 @@ provide data vectors to the solvers, e.g., when using For JAX, apart from following the same procedure described for CuPy, the PyLops operator must be also wrapped into a :class:`pylops.JaxOperator`. +See below for a comphrensive list of supported operators and additional functionalities for both the +``cupy`` and ``jax`` backends. + + +Examples +-------- + +Let's now briefly look at some use cases. + +End-to-end GPU powered inverse problems +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +First we consider the most common scenario when both the model and data +vectors fit onto the GPU memory. We can therefore simply replace all our +``numpy`` arrays with ``cupy`` arrays and solve the inverse problem of +interest end-to-end on the GPU. + +.. image:: _static/cupy_diagram.png + :width: 600 + :align: center + +Let's first write a code snippet using ``numpy`` arrays, which PyLops +will run on your CPU: + +.. code-block:: python + + ny, nx = 400, 400 + G = np.random.normal(0, 1, (ny, nx)).astype(np.float32) + x = np.ones(nx, dtype=np.float32) + + # Create operator + Gop = MatrixMult(G, dtype='float32') + + # Create data and invert + y = Gop @ x + xest = Gop / y + +Now we write a code snippet using ``cupy`` arrays, which PyLops will run on +your GPU: + +.. code-block:: python + + ny, nx = 400, 400 + G = cp.random.normal(0, 1, (ny, nx)).astype(np.float32) + x = cp.ones(nx, dtype=np.float32) + + # Create operator + Gop = MatrixMult(G, dtype='float32') + + # Create data and invert + y = Gop @ x + xest = Gop / y + +The code is almost unchanged apart from the fact that we now use ``cupy`` arrays, +PyLops will figure this out. + +Similarly, we write a code snippet using ``jax`` arrays which PyLops will run on +your GPU/TPU: + +.. code-block:: python + + ny, nx = 400, 400 + G = jnp.array(np.random.normal(0, 1, (ny, nx)).astype(np.float32)) + x = jnp.ones(nx, dtype=np.float32) + + # Create operator + Gop = JaxOperator(MatrixMult(G, dtype='float32')) + + # Create data and invert + y = Gop @ x + xest = Gop / y + + # Adjoint via AD + xadj = Gop.rmatvecad(x, y) + +Again, the code is almost unchanged apart from the fact that we now use ``jax`` arrays. + + +Mixed CPU-GPU powered inverse problems +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Let us now consider a more intricate scenario where we have access to +a GPU-powered operator, however the model and/or data vectors are too large +to fit onto the GPU memory (or VRAM). + +For the sake of clarity, we consider a problem where +the operator can be written as a :class:`pylops.BlockDiag` of +PyLops operators. Note how, by simply sandwiching any of the GPU-powered +operator within two :class:`pylops.ToCupy` operators, we are +able to tell PyLops to transfer to the GPU only the part of the model vector +required by a given operator and transfer back the output to the CPU before +forming the combine output vector (i.e., the output vector of the +:class:`pylops.BlockDiag`). + +.. image:: _static/numpy_cupy_bd_diagram.png + :width: 1000 + :align: center + +.. code-block:: python + + nops, n = 5, 4 + Ms = [np.diag((i + 1) * np.ones(n, dtype=dtype)) \ + for i in range(nops)] + Ms = [M.T @ M for M in Ms] + + # Create operator + Mops = [] + for iop in range(nops): + Mop = MatrixMult(cp.asarray(Ms[iop], dtype=dtype)) + Top = ToCupy(Mop.dims, dtype=dtype) + Top1 = ToCupy(Mop.dimsd, dtype=dtype) + Mop = Top1.H @ Mop @ Top + Mops.append(Mop) + Mops = BlockDiag(Mops, forceflat=True) + + # Create data and invert + x = np.ones(n * nops, dtype=dtype) + y = Mops @ x.ravel() + xest = Mops / y + + +Finally, let us consider a problem where +the operator can be written as a :class:`pylops.VStack` of +PyLops operators and the model vector can be fully transferred to the GPU. +We can use again the :class:`pylops.ToCupy` operator, however this +time we will only use it to move the output of each operator to the CPU. +Since we are now in a special scenario, where the input of the overall +operator sits on the GPU and the output on the +CPU, we need to inform the :class:`pylops.VStack` operator about this. +This can be easily done using the additional ``inoutengine`` parameter. Let's +see this with an example. + +.. image:: _static/numpy_cupy_vs_diagram.png + :width: 1000 + :align: center + +.. code-block:: python + + nops, n, m = 3, 4, 5 + Ms = [np.random.normal(0, 1, (n, m)) for _ in range(nops)] + + # Create operator + Mops = [] + for iop in range(nops): + Mop = MatrixMult(cp.asarray(Ms[iop]), dtype=dtype) + Top1 = ToCupy(Mop.dimsd, dtype=dtype) + Mop = Top1.H @ Mop + Mops.append(Mop) + Mops = VStack(Mops, inoutengine=("numpy", "cupy")) + + # Create data and invert + x = cp.ones(m, dtype=dtype) + y = Mops @ x.ravel() + xest = pylops_cgls(Mops, y, x0=cp.zeros_like(x))[0] + +These features are currently not available for ``jax`` arrays. + + +.. note:: + + More examples for the CuPy and JAX backends be found at `link1 `_ + and `link2 `_. + + +Supported Operators +------------------- In the following, we provide a list of methods in :class:`pylops.LinearOperator` with their current status (available on CPU, GPU with CuPy, and GPU with JAX): @@ -195,6 +361,7 @@ Smoothing and derivatives: - |:white_check_mark:| - |:white_check_mark:| + Signal processing: .. list-table:: @@ -281,6 +448,14 @@ Signal processing: - |:white_check_mark:| - |:red_circle:| - |:red_circle:| + * - :class:`pylops.signalprocessing.FourierRadon2D` + - |:white_check_mark:| + - |:white_check_mark:| + - |:red_circle:| + * - :class:`pylops.signalprocessing.FourierRadon3D` + - |:white_check_mark:| + - |:white_check_mark:| + - |:red_circle:| * - :class:`pylops.signalprocessing.ChirpRadon2D` - |:white_check_mark:| - |:white_check_mark:| @@ -314,6 +489,7 @@ Signal processing: - |:white_check_mark:| - |:white_check_mark:| + Wave-Equation processing .. list-table:: @@ -361,6 +537,7 @@ Wave-Equation processing - |:red_circle:| - |:red_circle:| + Geophysical subsurface characterization: .. list-table:: @@ -399,60 +576,3 @@ Geophysical subsurface characterization: operator currently works only with ``explicit=True`` due to the same issue as in point 1 for the :class:`pylops.signalprocessing.Convolve1D` operator employed when ``explicit=False``. - - -Example -------- - -Finally, let's briefly look at an example. First we write a code snippet using -``numpy`` arrays which PyLops will run on your CPU: - -.. code-block:: python - - ny, nx = 400, 400 - G = np.random.normal(0, 1, (ny, nx)).astype(np.float32) - x = np.ones(nx, dtype=np.float32) - - Gop = MatrixMult(G, dtype='float32') - y = Gop * x - xest = Gop / y - -Now we write a code snippet using ``cupy`` arrays which PyLops will run on -your GPU: - -.. code-block:: python - - ny, nx = 400, 400 - G = cp.random.normal(0, 1, (ny, nx)).astype(np.float32) - x = cp.ones(nx, dtype=np.float32) - - Gop = MatrixMult(G, dtype='float32') - y = Gop * x - xest = Gop / y - -The code is almost unchanged apart from the fact that we now use ``cupy`` arrays, -PyLops will figure this out. - -Similarly, we write a code snippet using ``jax`` arrays which PyLops will run on -your GPU/TPU: - -.. code-block:: python - - ny, nx = 400, 400 - G = jnp.array(np.random.normal(0, 1, (ny, nx)).astype(np.float32)) - x = jnp.ones(nx, dtype=np.float32) - - Gop = JaxOperator(MatrixMult(G, dtype='float32')) - y = Gop * x - xest = Gop / y - - # Adjoint via AD - xadj = Gop.rmatvecad(x, y) - - -Again, the code is almost unchanged apart from the fact that we now use ``jax`` arrays, - -.. note:: - - More examples for the CuPy and JAX backends be found `here `_ - and `here `_. \ No newline at end of file diff --git a/docs/source/installation.rst b/docs/source/installation.rst index 094f09bd..c0eff42d 100755 --- a/docs/source/installation.rst +++ b/docs/source/installation.rst @@ -437,6 +437,29 @@ You may need to set the environment variable ``NUMBA_NUM_THREADS`` define how ma It can also be checked dynamically with ``numba.config.NUMBA_DEFAULT_NUM_THREADS``. +PyMC and PyTensor +----------------- +`PyTensor `_ is used to allow seamless integration between PyLops and +`PyMC `_ operators. +Install both of them via ``conda`` with: + +.. code-block:: bash + + conda install -c conda-forge pytensor pymc + +or via ``pip`` with + +.. code-block:: bash + + >> pip install pytensor pymc + +.. note:: + PyTensor does not support NumPy 2 yet, so make sure you use NumPy 1.x with PyTensor and PyMC. + +.. note:: + OSX users may experience a ``CompileError`` error when using PyTensor. This can be solved by adding + ``pytensor.config.gcc__cxxflags = "-Wno-c++11-narrowing"`` after ``import pytensor``. + PyWavelets ---------- `PyWavelets `_ is used to implement the wavelet operators. diff --git a/examples/plot_bayeslinearregr.py b/examples/plot_bayeslinearregr.py new file mode 100644 index 00000000..42417eb1 --- /dev/null +++ b/examples/plot_bayeslinearregr.py @@ -0,0 +1,235 @@ +r""" +Bayesian Linear Regression +========================== + +In the :ref:`sphx_glr_gallery_plot_linearregr.py` example, we +performed linear regression by applying a variety of solvers to the +:py:class:`pylops.LinearRegression` operator. + +In this example, we will apply linear regression the Bayesian way. +In Bayesian inference, we are not looking for a "best" estimate +of the linear regression parameters; rather, we are looking for +all possible parameters and their associated (posterior) probability, +that is, how likely that those are the parameters that generated our data. + +To do this, we will leverage the probabilistic programming library +`PyMC `_. + +In the Bayesian formulation, we write the problem in the following manner: + + .. math:: + y_i \sim N(x_0 + x_1 t_i, \sigma) \qquad \forall i=0,1,\ldots,N-1 + +where :math:`x_0` is the intercept and :math:`x_1` is the gradient. +This notation means that the obtained measurements :math:`y_i` are normally distributed around +mean :math:`x_0 + x_1 t_i` with a standard deviation of :math:`\sigma`. +We can also express this problem in a matrix form, which makes it clear that we +can use a PyLops operator to describe this relationship. + + .. math:: + \mathbf{y} \sim N(\mathbf{A} \mathbf{x}, \sigma) + +In this example, we will combine the Bayesian power of PyMC with the linear language of +PyLops. +""" + +import arviz as az +import matplotlib.pyplot as plt +import numpy as np +import pymc as pm + +import pylops + +plt.close("all") +np.random.seed(10) + +############################################################################### +# Define the input parameters: number of samples along the t-axis (``N``), +# linear regression coefficients (``x``), and standard deviation of noise +# to be added to data (``sigma``). +N = 30 +x = np.array([1.0, 0.5]) +sigma = 0.25 + +############################################################################### +# Let's create the time axis and initialize the +# :py:class:`pylops.LinearRegression` operator +t = np.linspace(0, 1, N) +LRop = pylops.LinearRegression(t, dtype=t.dtype) + +############################################################################### +# We can then apply the operator in forward mode to compute our data points +# along the x-axis (``y``). We will also generate some random gaussian noise +# and create a noisy version of the data (``yn``). +y = LRop @ x +yn = y + np.random.normal(0, sigma, N) + +############################################################################### +# The deterministic solution is to solve the +# :math:`\mathbf{y} = \mathbf{A} \mathbf{x}` in a least-squares sense. +# Using PyLops, the ``/`` operator solves the iteratively (i.e., +# :py:func:`scipy.sparse.linalg.lsqr`). +# In Bayesian terminology, this estimator is known as the maximulum likelihood +# estimation (MLE). +x_mle = LRop / yn +noise_mle = np.sqrt(np.sum((yn - LRop @ x_mle) ** 2) / (N - 1)) + +############################################################################### +# Alternatively, we may regularize the problem. In this case we will condition +# the solution towards smaller magnitude parameters, we can use a regularized +# least squares approach. Since the weight is pretty small, we expect the +# result to be very similar to the one above. +sigma_prior = 20 +eps = 1 / np.sqrt(2) / sigma_prior +x_map, *_ = pylops.optimization.basic.lsqr(LRop, yn, damp=eps) +noise_map = np.sqrt(np.sum((yn - LRop @ x_map) ** 2) / (N - 1)) + +############################################################################### +# Let's plot the best fitting line for the case of noise free and noisy data +fig, ax = plt.subplots(figsize=(8, 4)) +for est, est_label, c in zip( + [x, x_mle, x_map], ["True", "MLE", "MAP"], ["k", "C0", "C1"] +): + ax.plot( + np.array([t.min(), t.max()]), + np.array([t.min(), t.max()]) * est[1] + est[0], + color=c, + ls="--" if est_label == "MAP" else "-", + lw=4, + label=rf"{est_label}: $x_0$ = {est[0]:.2f}, $x_1$ = {est[1]:.2f}", + ) +ax.scatter(t, y, c="r", s=70) +ax.scatter(t, yn, c="g", s=70) +ax.legend() +fig.tight_layout() + +############################################################################### +# Let's solve this problem the Bayesian way, which consists in obtaining the +# posterior probability :math:`p(\mathbf{x}\,|\,\mathbf{y})` via Bayes theorem: +# +# .. math:: +# \underbrace{p(\mathbf{x} \,|\, \mathbf{y})}_{\text{posterior}} +# \propto \overbrace{p(\mathbf{y} \,|\, \mathbf{x})}^{\text{likelihood}}\; +# \overbrace{p(\mathbf{x})}^{\text{prior}} +# +# To do so, we need to define the priors and the likelihood. +# +# As hinted above, priors in Bayesian analysis can be interpreted as the +# probabilistic equivalent to regularization. Finding the maximum a posteriori +# (MAP) estimate to a least-squares problem with a Gaussian prior on the +# parameters is equivalent to applying a Tikhonov (L2) regularization to these +# parameters. A Laplace prior is equivalent to a sparse (L1) regularization. +# In addition, the weight of the regularization is controlled by the "scale" of +# the distribution of the prior; the standard deviation (in the case of a Gaussian) +# is inversely proportional strength of the regularization. So if we use the same +# sigma_prior above as the standard deviation of our prior distribition, we +# should get the same MAP out of them. In practice, in Bayesian analysis we are +# not only interested in point estimates like MAP, but rather, the whole +# posterior distribution. If you want the MAP only, there are better, +# methods to obtain them, such as the one shown above. +# +# In this problem we will use weak, not very informative priors, by setting +# their prior to accept a wide range of probable values. This is equivalent to +# setting the "weights" to be small, as shown above: +# +# .. math:: +# x_0 \sim N(0, 20) +# +# x_1 \sim N(0, 20) +# +# \sigma \sim \text{HalfCauchy}(10) +# +# The (log) likelihood in Bayesian analysis is the equivalent of the cost +# function in deterministic inverse problems. In this case we have already +# seen this likelihood: +# +# .. math:: +# p(\mathbf{y}\,|\,\mathbf{x}) \sim N(\mathbf{A}\mathbf{x}, \sigma) +# + +# Construct a PyTensor `Op` which can be used in a PyMC model. +pytensor_lrop = pylops.PyTensorOperator(LRop) +dims = pytensor_lrop.dims # Inherits dims, dimsd and shape from LRop + +# Construct the PyMC model +with pm.Model() as model: + y_data = pm.Data("y_data", yn) + + # Define priors + sp = pm.HalfCauchy("σ", beta=10) + xp = pm.Normal("x", 0, sigma=sigma_prior, shape=dims) + mu = pm.Deterministic("mu", pytensor_lrop(xp)) + + # Define likelihood + likelihood = pm.Normal("y", mu=mu, sigma=sp, observed=y_data) + + # Inference! + idata = pm.sample(500, tune=200, chains=2) + +############################################################################### +# The plot below is known as the "trace" plot. The left column displays the +# posterior distributions of all latent variables in the model. The top-left +# plot has multiple colored posteriors, one for each parameter of the latent +# vector :math:`\mathbf{x}`. The bottom left plot displays the posterior of the +# estimated noise :math:`\sigma`. +# +# In these plots there are multiple distributions of the same color and +# multiple line styles. Each of these represents a "chain". A chain is a single +# run of a Monte Carlo algorithm. Generally, Monte Carlo methods run various +# chains to ensure that all regions of the posterior distribution are sampled. +# These chains are shown on the right hand plots. + +axes = az.plot_trace(idata, figsize=(10, 7), var_names=["~mu"]) +axes[0, 0].axvline(x[0], label="True Intercept", lw=2, color="k") +axes[0, 0].axvline(x_map[0], label="Intercept MAP", lw=2, color="C0", ls="--") +axes[0, 0].axvline(x[1], label="True Slope", lw=2, color="darkgray") +axes[0, 0].axvline(x_map[1], label="Slope MAP", lw=2, color="C1", ls="--") +axes[0, 1].axhline(x[0], label="True Intercept", lw=2, color="k") +axes[0, 1].axhline(x_map[0], label="Intercept MAP", lw=2, color="C0", ls="--") +axes[0, 1].axhline(x[1], label="True Slope", lw=2, color="darkgray") +axes[0, 1].axhline(x_map[1], label="Slope MAP", lw=2, color="C1", ls="--") +axes[1, 0].axvline(sigma, label="True Sigma", lw=2, color="k") +axes[1, 0].axvline(noise_map, label="Sigma MAP", lw=2, color="C0", ls="--") +axes[1, 1].axhline(sigma, label="True Sigma", lw=2, color="k") +axes[1, 1].axhline(noise_map, label="Sigma MAP", lw=2, color="C0", ls="--") +for ax in axes.ravel(): + ax.legend() +ax.get_figure().tight_layout() + +################################################################################ +# With this model, we can obtain an uncertainty measurement via the High Density +# Interval. To do that, we need to sample the "preditive posterior", that is, +# the posterior distribution of the data, given the model. What this does is +# sample the latent vetors from their posteriors (above), and use the model +# to construct realizations of the data given these realizations. They represent +# what the model thinks the data should look like, given everything it has +# already seen. + +with model: + pm.sample_posterior_predictive(idata, extend_inferencedata=True) + +############################################################################### +# sphinx_gallery_thumbnail_number = 3 +fig, ax = plt.subplots(figsize=(8, 4)) +az.plot_hdi( + t, + idata.posterior_predictive["y"], + fill_kwargs={"label": "95% HDI"}, + hdi_prob=0.95, + ax=ax, +) +for est, est_label, c in zip( + [x, x_mle, x_map], ["True", "MLE", "MAP"], ["k", "C0", "C1"] +): + ax.plot( + np.array([t.min(), t.max()]), + np.array([t.min(), t.max()]) * est[1] + est[0], + color=c, + ls="--" if est_label == "MAP" else "-", + lw=4, + label=rf"{est_label}: $x_0$ = {est[0]:.2f}, $x_1$ = {est[1]:.2f}", + ) +ax.scatter(t, y, c="r", s=70) +ax.scatter(t, yn, c="g", s=70) +ax.legend() +fig.tight_layout() diff --git a/examples/plot_fourierradon.py b/examples/plot_fourierradon.py new file mode 100644 index 00000000..abe94819 --- /dev/null +++ b/examples/plot_fourierradon.py @@ -0,0 +1,325 @@ +r""" +Fourier Radon Transform +======================= +This example shows how to use the :py:class:`pylops.signalprocessing.FourierRadon2D` +and :py:class:`pylops.signalprocessing.FourierRadon3D` operators to apply the linear +and parabolic Radon Transform to 2-dimensional or 3-dimensional signals, respectively. + +These operators provides transformations equivalent to those of +:py:class:`pylops.signalprocessing.Radon2D` and :py:class:`pylops.signalprocessing.Radon3D`, +however since the shift-and-sum step is performed in the frequency domain, +this is analytically correct (compared to performing to shifting the data via +nearest or linear interpolation). + +""" + +import matplotlib.pyplot as plt +import numpy as np + +import pylops + +plt.close("all") + +############################################################################### +# Let's start by creating a empty 2d matrix of size :math:`n_x \times n_t` +# with a single linear event. + +par = { + "ot": 0, + "dt": 0.004, + "nt": 51, + "ox": -250, + "dx": 10, + "nx": 51, + "oy": -250, + "dy": 10, + "ny": 51, + "f0": 40, +} +theta = [10.0] +t0 = [0.1] +amp = [1.0] + +# Create axes +t, t2, x, y = pylops.utils.seismicevents.makeaxis(par) +dt, dx, dy = par["dt"], par["dx"], par["dy"] + +# Create wavelet +wav, _, wav_c = pylops.utils.wavelets.ricker(t[:41], f0=par["f0"]) + +# Generate data +_, d = pylops.utils.seismicevents.linear2d(x, t, 1500.0, t0, theta, amp, wav) + + +############################################################################### +# We can now define our operators and apply the forward and adjoint +# steps. +nfft = int(2 ** np.ceil(np.log2(par["nt"]))) +npx, pxmax = 2 * par["nx"], 5e-4 +px = np.linspace(-pxmax, pxmax, npx) + +R2Op = pylops.signalprocessing.FourierRadon2D( + t, x, px, nfft, kind="linear", engine="numpy", dtype="float64" +) +dL = R2Op.H * d +dadj = R2Op * dL + +fig, axs = plt.subplots(1, 3, figsize=(12, 4), sharey=True) +axs[0].imshow(d.T, vmin=-1, vmax=1, cmap="bwr_r", extent=(x[0], x[-1], t[-1], t[0])) +axs[0].set(xlabel=r"$x$ [m]", ylabel=r"$t$ [s]", title="Input linear") +axs[0].axis("tight") +axs[1].imshow( + dL.T, + cmap="bwr_r", + vmin=-dL.max(), + vmax=dL.max(), + extent=(1e3 * px[0], 1e3 * px[-1], t[-1], t[0]), +) +axs[1].scatter(1e3 * np.sin(np.deg2rad(theta[0])) / 1500.0, t0[0], s=50, color="k") +axs[1].set(xlabel=r"$p$ [s/km]", title="Radon") +axs[1].axis("tight") +axs[2].imshow( + dadj.T, + cmap="bwr_r", + vmin=-dadj.max(), + vmax=dadj.max(), + extent=(x[0], x[-1], t[-1], t[0]), +) +axs[2].set(xlabel=r"$x$ [m]", title="Adj Radon") +axs[2].axis("tight") +plt.tight_layout() + + +############################################################################### +# We repeat now the same with a parabolic event + +# Generate data +pxx = [1e-6] +_, d = pylops.utils.seismicevents.parabolic2d(x, t, t0, 0, np.array(pxx), amp, wav) + +# Radon transform +npx, pxmax = 2 * par["nx"], 5e-6 +px = np.linspace(-pxmax, pxmax, npx) + +R2Op = pylops.signalprocessing.FourierRadon2D( + t, x, px, nfft, kind="parabolic", engine="numpy", dtype="float64" +) +dL = R2Op.H * d +dadj = R2Op * dL + +fig, axs = plt.subplots(1, 3, figsize=(12, 4), sharey=True) +axs[0].imshow(d.T, vmin=-1, vmax=1, cmap="bwr_r", extent=(x[0], x[-1], t[-1], t[0])) +axs[0].set(xlabel=r"$x$ [m]", ylabel=r"$t$ [s]", title="Input parabolic") +axs[0].axis("tight") +axs[1].imshow( + dL.T, + cmap="bwr_r", + vmin=-dL.max(), + vmax=dL.max(), + extent=(1e3 * px[0], 1e3 * px[-1], t[-1], t[0]), +) +axs[1].scatter(1e3 * pxx[0], t0[0], s=50, color="k") +axs[1].set(xlabel=r"$p$ [s/km]", title="Radon") +axs[1].axis("tight") +axs[2].imshow( + dadj.T, + cmap="bwr_r", + vmin=-dadj.max(), + vmax=dadj.max(), + extent=(x[0], x[-1], t[-1], t[0]), +) +axs[2].set(xlabel=r"$x$ [m]", title="Adj Radon") +axs[2].axis("tight") +plt.tight_layout() + +############################################################################### +# Finally we repeat the same exercise with 3d data. + +par = { + "ot": 0, + "dt": 0.004, + "nt": 51, + "ox": -100, + "dx": 10, + "nx": 21, + "oy": -200, + "dy": 10, + "ny": 41, + "f0": 20, +} +theta = [30] +phi = [10] +t0 = [0.1] +amp = [1.0] + +# Create axes +t, t2, x, y = pylops.utils.seismicevents.makeaxis(par) +dt, dx, dy = par["dt"], par["dx"], par["dy"] + +# Generate linear data +pxx = np.sin(np.deg2rad(theta[0])) * np.cos(np.deg2rad(phi[0])) / 1500.0 +pyy = np.sin(np.deg2rad(theta[0])) * np.sin(np.deg2rad(phi[0])) / 1500.0 +_, d = pylops.utils.seismicevents.linear3d(x, y, t, 1500.0, t0, theta, phi, amp, wav) + +# Linear Radon +npy, pymax = par["ny"], 5e-4 +npx, pxmax = par["nx"], 5e-4 +py = np.linspace(-pymax, pymax, npy) +px = np.linspace(-pxmax, pxmax, npx) + +R3Op = pylops.signalprocessing.FourierRadon3D( + t, y, x, py, px, nfft, kind=("linear", "linear"), engine="numpy", dtype="float64" +) +dL = R3Op.H * d +dadj = R3Op * dL + +fig, axs = plt.subplots(1, 3, figsize=(12, 4), sharey=True) +axs[0].imshow( + d[par["ny"] // 2].T, + vmin=-1, + vmax=1, + cmap="bwr_r", + extent=(x[0], x[-1], t[-1], t[0]), +) +axs[0].set(xlabel=r"$x$ [m]", ylabel=r"$t$ [s]", title="Input linear 3d - y") +axs[0].axis("tight") +axs[1].imshow( + dL[np.argmin(np.abs(pyy - py))].T, + cmap="bwr_r", + vmin=-dL.max(), + vmax=dL.max(), + extent=(1e3 * px[0], 1e3 * px[-1], t[-1], t[0]), +) +axs[1].scatter(1e3 * pxx, t0[0], s=50, color="k") +axs[1].set(xlabel=r"$p_x$ [s/km]", title="Radon 3d - y") +axs[1].axis("tight") +axs[2].imshow( + dadj[par["ny"] // 2].T, + cmap="bwr_r", + vmin=-dadj.max(), + vmax=dadj.max(), + extent=(x[0], x[-1], t[-1], t[0]), +) +axs[2].set(xlabel=r"$x$ [m]", title="Adj Radon 3d - y") +axs[2].axis("tight") +plt.tight_layout() + +fig, axs = plt.subplots(1, 3, figsize=(12, 4), sharey=True) +axs[0].imshow( + d[:, par["nx"] // 2].T, + vmin=-1, + vmax=1, + cmap="bwr_r", + extent=(x[0], x[-1], t[-1], t[0]), +) +axs[0].set(xlabel=r"$y$ [m]", ylabel=r"$t$ [s]", title="Input linear 3d - x") +axs[0].axis("tight") +axs[1].imshow( + dL[:, np.argmin(np.abs(pxx - px))].T, + cmap="bwr_r", + vmin=-dL.max(), + vmax=dL.max(), + extent=(1e3 * py[0], 1e3 * py[-1], t[-1], t[0]), +) +axs[1].scatter(1e3 * pyy, t0[0], s=50, color="k") +axs[1].set(xlabel=r"$p_y$ [s/km]", title="Radon 3d - x") +axs[1].axis("tight") +axs[2].imshow( + dadj[:, par["nx"] // 2].T, + cmap="bwr_r", + vmin=-dadj.max(), + vmax=dadj.max(), + extent=(x[0], x[-1], t[-1], t[0]), +) +axs[2].set(xlabel=r"$y$ [m]", title="Adj Radon 3d - x") +axs[2].axis("tight") +plt.tight_layout() + +# Generate parabolic data +pxx = [1e-6] +pyy = [2e-6] +_, d = pylops.utils.seismicevents.parabolic3d( + x, y, t, t0, 0, 0, np.array(pxx), np.array(pyy), amp, wav +) + +# Parabolic Radon +npy, pymax = par["ny"], 5e-6 +npx, pxmax = par["nx"], 5e-6 +py = np.linspace(-pymax, pymax, npy) +px = np.linspace(-pxmax, pxmax, npx) + +R3Op = pylops.signalprocessing.FourierRadon3D( + t, + y, + x, + py, + px, + nfft, + kind=("parabolic", "parabolic"), + engine="numpy", + dtype="float64", +) +dL = R3Op.H * d +dadj = R3Op * dL + +fig, axs = plt.subplots(1, 3, figsize=(12, 4), sharey=True) +axs[0].imshow( + d[par["ny"] // 2].T, + vmin=-1, + vmax=1, + cmap="bwr_r", + extent=(x[0], x[-1], t[-1], t[0]), +) +axs[0].set(xlabel=r"$x$ [m]", ylabel=r"$t$ [s]", title="Input parabolic 3d - y") +axs[0].axis("tight") +axs[1].imshow( + dL[np.argmin(np.abs(pyy - py))].T, + cmap="bwr_r", + vmin=-dL.max(), + vmax=dL.max(), + extent=(1e3 * px[0], 1e3 * px[-1], t[-1], t[0]), +) +axs[1].scatter(1e3 * pxx[0], t0[0], s=50, color="k") +axs[1].set(xlabel=r"$p_x$ [s/km]", title="Radon 3d - y") +axs[1].axis("tight") +axs[2].imshow( + dadj[par["ny"] // 2].T, + cmap="bwr_r", + vmin=-dadj.max(), + vmax=dadj.max(), + extent=(x[0], x[-1], t[-1], t[0]), +) +axs[2].set(xlabel=r"$x$ [m]", title="Adj Radon 3d - y") +axs[2].axis("tight") +plt.tight_layout() + +fig, axs = plt.subplots(1, 3, figsize=(12, 4), sharey=True) +axs[0].imshow( + d[:, par["nx"] // 2].T, + vmin=-1, + vmax=1, + cmap="bwr_r", + extent=(x[0], x[-1], t[-1], t[0]), +) +axs[0].set(xlabel=r"$y$ [m]", ylabel=r"$t$ [s]", title="Input parabolic 3d - x") +axs[0].axis("tight") +axs[1].imshow( + dL[:, np.argmin(np.abs(pxx - px))].T, + cmap="bwr_r", + vmin=-dL.max(), + vmax=dL.max(), + extent=(1e3 * py[0], 1e3 * py[-1], t[-1], t[0]), +) +axs[1].scatter(1e3 * pyy[0], t0[0], s=50, color="k") +axs[1].set(xlabel=r"$p_y$ [s/km]", title="Radon 3d - x") +axs[1].axis("tight") +axs[2].imshow( + dadj[:, par["nx"] // 2].T, + cmap="bwr_r", + vmin=-dadj.max(), + vmax=dadj.max(), + extent=(x[0], x[-1], t[-1], t[0]), +) +axs[2].set(xlabel=r"$y$ [m]", title="Adj Radon 3d - x") +axs[2].axis("tight") +plt.tight_layout() diff --git a/examples/plot_seismicevents.py b/examples/plot_seismicevents.py index 27b6e9fc..03ebaaf5 100644 --- a/examples/plot_seismicevents.py +++ b/examples/plot_seismicevents.py @@ -106,11 +106,21 @@ ############################################ # Let's finally repeat the same exercise in 3d phi = [20, 0, -10] +py = [0, 0, 0] +pyy = [3e-5, 1e-5, 5e-6] mlin, mlinwav = pylops.utils.seismicevents.linear3d( x, y, t, v, t0, theta, phi, amp, wav ) +mpar, mparwav = pylops.utils.seismicevents.parabolic3d( + x, y, t, t0, px, py, pxx, pyy, amp, wav +) + +mhyp, mhypwav = pylops.utils.seismicevents.hyperbolic3d( + x, y, t, t0, vrms, vrms, amp, wav +) + fig, axs = plt.subplots(1, 2, figsize=(7, 5), sharey=True) fig.suptitle("Linear events in 3d", fontsize=12, fontweight="bold", y=0.95) axs[0].imshow( @@ -135,9 +145,30 @@ ) axs[1].set_xlabel(r"$y(m)$") -mhyp, mhypwav = pylops.utils.seismicevents.hyperbolic3d( - x, y, t, t0, vrms, vrms, amp, wav +fig, axs = plt.subplots(1, 2, figsize=(7, 5), sharey=True) +fig.suptitle("Parabolic events in 3d", fontsize=12, fontweight="bold", y=0.95) +axs[0].imshow( + mparwav[par["ny"] // 2].T, + aspect="auto", + interpolation="nearest", + vmin=-2, + vmax=2, + cmap="gray", + extent=(x.min(), x.max(), t.max(), t.min()), +) +axs[0].set_xlabel(r"$x(m)$") +axs[0].set_ylabel(r"$t(s)$") +axs[1].imshow( + mparwav[:, par["nx"] // 2].T, + aspect="auto", + interpolation="nearest", + vmin=-2, + vmax=2, + cmap="gray", + extent=(y.min(), y.max(), t.max(), t.min()), ) +axs[1].set_xlabel(r"$y(m)$") + fig, axs = plt.subplots(1, 2, figsize=(7, 5), sharey=True) fig.suptitle("Hyperbolic events in 3d", fontsize=12, fontweight="bold", y=0.95) diff --git a/examples/plot_slopeest.py b/examples/plot_slopeest.py index 89938a4a..22cf9096 100755 --- a/examples/plot_slopeest.py +++ b/examples/plot_slopeest.py @@ -14,7 +14,7 @@ precondition sparsity-promoting inverse problems. We will show examples of a variety of different settings, including a comparison -with the original implementation in [1]. +with the original implementation in [1]_. .. [1] van Vliet, L. J., Verbeek, P. W., "Estimators for orientation and anisotropy in digitized images", Journal ASCI Imaging Workshop. 1995. @@ -145,7 +145,7 @@ ############################################################################### # Concentric circles # ------------------ -# The original paper by van Vliet and Verbeek [1] has an example with concentric +# The original paper by van Vliet and Verbeek [1]_ has an example with concentric # circles. We recover their original images and compare our implementation with # theirs. @@ -215,7 +215,7 @@ def rgb2gray(rgb): ############################################################################### # Core samples # ------------------ -# The original paper by van Vliet and Verbeek [1] also has an example with images +# The original paper by van Vliet and Verbeek [1]_ also has an example with images # of core samples. Since the original paper does not have a scale with which to # plot the angles, we have chosen ours it to match their image as closely as # possible. diff --git a/pylops/__init__.py b/pylops/__init__.py index 7672fda4..b1ee04c8 100755 --- a/pylops/__init__.py +++ b/pylops/__init__.py @@ -48,6 +48,7 @@ from .config import * from .linearoperator import * from .torchoperator import * +from .pytensoroperator import * from .jaxoperator import * from .basicoperators import * from . import ( diff --git a/pylops/basicoperators/__init__.py b/pylops/basicoperators/__init__.py index d654e50d..5d3a3d3e 100755 --- a/pylops/basicoperators/__init__.py +++ b/pylops/basicoperators/__init__.py @@ -38,6 +38,7 @@ Gradient Gradient. FirstDirectionalDerivative First Directional derivative. SecondDirectionalDerivative Second Directional derivative. + ToCupy Convert to CuPy. """ from .functionoperator import * @@ -72,6 +73,8 @@ from .laplacian import * from .gradient import * from .directionalderivative import * +from .tocupy import * + __all__ = [ "FunctionOperator", @@ -107,4 +110,5 @@ "Gradient", "FirstDirectionalDerivative", "SecondDirectionalDerivative", + "ToCupy", ] diff --git a/pylops/basicoperators/_spread_numba.py b/pylops/basicoperators/_spread_numba.py index 2416d67b..84012593 100644 --- a/pylops/basicoperators/_spread_numba.py +++ b/pylops/basicoperators/_spread_numba.py @@ -1,3 +1,4 @@ +import math import os from numba import jit, prange @@ -21,8 +22,8 @@ def _matvec_numba_table(x, y, dims, interp, table, dtable): dindices = dtable[ix0, it] for i, indexfloat in enumerate(indices): - index = int(indexfloat) - if index != -9223372036854775808: # =int(np.nan) + if not math.isnan(indexfloat): + index = int(indexfloat) if not interp: y[i, index] += x[ix0, it] else: @@ -45,8 +46,8 @@ def _rmatvec_numba_table(x, y, dims, dimsd, interp, table, dtable): dindices = dtable[ix0, it] for i, indexfloat in enumerate(indices): - index = int(indexfloat) - if index != -9223372036854775808: # =int(np.nan) + if not math.isnan(indexfloat): + index = int(indexfloat) if not interp: y[ix0, it] += x[i, index] else: @@ -71,8 +72,8 @@ def _matvec_numba_onthefly(x, y, dims, interp, fh): else: indices, dindices = fh(ix0, it) for i, indexfloat in enumerate(indices): - index = int(indexfloat) - if index != -9223372036854775808: # =int(np.nan) + if not math.isnan(indexfloat): + index = int(indexfloat) if not interp: y[i, index] += x[ix0, it] else: @@ -95,8 +96,8 @@ def _rmatvec_numba_onthefly(x, y, dims, dimsd, interp, fh): else: indices, dindices = fh(ix0, it) for i, indexfloat in enumerate(indices): - index = int(indexfloat) - if index != -9223372036854775808: # =int(np.nan) + if not math.isnan(indexfloat): + index = int(indexfloat) if not interp: y[ix0, it] += x[i, index] else: diff --git a/pylops/basicoperators/blockdiag.py b/pylops/basicoperators/blockdiag.py index 166ae137..4d9fbc36 100644 --- a/pylops/basicoperators/blockdiag.py +++ b/pylops/basicoperators/blockdiag.py @@ -21,7 +21,7 @@ from pylops import LinearOperator from pylops.basicoperators import MatrixMult -from pylops.utils.backend import get_array_module, inplace_set +from pylops.utils.backend import get_array_module, get_module, inplace_set from pylops.utils.typing import DTypeLike, NDArray @@ -48,6 +48,12 @@ class BlockDiag(LinearOperator): .. versionadded:: 2.2.0 Force an array to be flattened after matvec and rmatvec. + inoutengine : :obj:`tuple`, optional + .. versionadded:: 2.4.0 + + Type of output vectors of `matvec` and `rmatvec. If ``None``, this is + inferred directly from the input vectors. Note that this is ignored + if ``nproc>1``. dtype : :obj:`str`, optional Type of elements in input array. @@ -113,6 +119,7 @@ def __init__( ops: Sequence[LinearOperator], nproc: int = 1, forceflat: bool = None, + inoutengine: Optional[tuple] = None, dtype: Optional[DTypeLike] = None, ) -> None: self.ops = ops @@ -149,6 +156,7 @@ def __init__( if self.nproc > 1: self.pool = mp.Pool(processes=nproc) + self.inoutengine = inoutengine dtype = _get_dtype(ops) if dtype is None else np.dtype(dtype) clinear = all([getattr(oper, "clinear", True) for oper in self.ops]) super().__init__( @@ -172,7 +180,11 @@ def nproc(self, nprocnew: int) -> None: self._nproc = nprocnew def _matvec_serial(self, x: NDArray) -> NDArray: - ncp = get_array_module(x) + ncp = ( + get_array_module(x) + if self.inoutengine is None + else get_module(self.inoutengine[0]) + ) y = ncp.zeros(self.nops, dtype=self.dtype) for iop, oper in enumerate(self.ops): y = inplace_set( @@ -183,7 +195,11 @@ def _matvec_serial(self, x: NDArray) -> NDArray: return y def _rmatvec_serial(self, x: NDArray) -> NDArray: - ncp = get_array_module(x) + ncp = ( + get_array_module(x) + if self.inoutengine is None + else get_module(self.inoutengine[1]) + ) y = ncp.zeros(self.mops, dtype=self.dtype) for iop, oper in enumerate(self.ops): y = inplace_set( diff --git a/pylops/basicoperators/firstderivative.py b/pylops/basicoperators/firstderivative.py index f8bd208e..c19a2650 100644 --- a/pylops/basicoperators/firstderivative.py +++ b/pylops/basicoperators/firstderivative.py @@ -3,11 +3,15 @@ from typing import Callable, Union import numpy as np -from numpy.core.multiarray import normalize_axis_index from pylops import LinearOperator from pylops.utils._internal import _value_or_sized_to_tuple -from pylops.utils.backend import get_array_module, inplace_add, inplace_set +from pylops.utils.backend import ( + get_array_module, + get_normalize_axis_index, + inplace_add, + inplace_set, +) from pylops.utils.decorators import reshaped from pylops.utils.typing import DTypeLike, InputDimsLike, NDArray @@ -95,7 +99,7 @@ def __init__( dims = _value_or_sized_to_tuple(dims) super().__init__(dtype=np.dtype(dtype), dims=dims, dimsd=dims, name=name) - self.axis = normalize_axis_index(axis, len(self.dims)) + self.axis = get_normalize_axis_index()(axis, len(self.dims)) self.sampling = sampling self.kind = kind self.edge = edge diff --git a/pylops/basicoperators/hstack.py b/pylops/basicoperators/hstack.py index b71e8723..4cd450d5 100644 --- a/pylops/basicoperators/hstack.py +++ b/pylops/basicoperators/hstack.py @@ -21,7 +21,7 @@ from pylops import LinearOperator from pylops.basicoperators import MatrixMult -from pylops.utils.backend import get_array_module, inplace_add, inplace_set +from pylops.utils.backend import get_array_module, get_module, inplace_add, inplace_set from pylops.utils.typing import NDArray @@ -48,6 +48,12 @@ class HStack(LinearOperator): .. versionadded:: 2.2.0 Force an array to be flattened after matvec. + inoutengine : :obj:`tuple`, optional + .. versionadded:: 2.4.0 + + Type of output vectors of `matvec` and `rmatvec. If ``None``, this is + inferred directly from the input vectors. Note that this is ignored + if ``nproc>1``. dtype : :obj:`str`, optional Type of elements in input array. @@ -112,6 +118,7 @@ def __init__( ops: Sequence[LinearOperator], nproc: int = 1, forceflat: bool = None, + inoutengine: Optional[tuple] = None, dtype: Optional[str] = None, ) -> None: self.ops = ops @@ -139,6 +146,8 @@ def __init__( self.pool = None if self.nproc > 1: self.pool = mp.Pool(processes=nproc) + + self.inoutengine = inoutengine dtype = _get_dtype(self.ops) if dtype is None else np.dtype(dtype) clinear = all([getattr(oper, "clinear", True) for oper in self.ops]) super().__init__( @@ -162,7 +171,11 @@ def nproc(self, nprocnew: int): self._nproc = nprocnew def _matvec_serial(self, x: NDArray) -> NDArray: - ncp = get_array_module(x) + ncp = ( + get_array_module(x) + if self.inoutengine is None + else get_module(self.inoutengine[0]) + ) y = ncp.zeros(self.nops, dtype=self.dtype) for iop, oper in enumerate(self.ops): y = inplace_add( @@ -173,7 +186,11 @@ def _matvec_serial(self, x: NDArray) -> NDArray: return y def _rmatvec_serial(self, x: NDArray) -> NDArray: - ncp = get_array_module(x) + ncp = ( + get_array_module(x) + if self.inoutengine is None + else get_module(self.inoutengine[1]) + ) y = ncp.zeros(self.mops, dtype=self.dtype) for iop, oper in enumerate(self.ops): y = inplace_set( diff --git a/pylops/basicoperators/laplacian.py b/pylops/basicoperators/laplacian.py index d2f3e64c..f8f836a5 100644 --- a/pylops/basicoperators/laplacian.py +++ b/pylops/basicoperators/laplacian.py @@ -2,13 +2,11 @@ from typing import Tuple -from pylops.utils.typing import NDArray - -from numpy.core.multiarray import normalize_axis_index from pylops import LinearOperator from pylops.basicoperators import SecondDerivative -from pylops.utils.typing import DTypeLike, InputDimsLike +from pylops.utils.backend import get_normalize_axis_index +from pylops.utils.typing import DTypeLike, InputDimsLike, NDArray class Laplacian(LinearOperator): @@ -58,14 +56,18 @@ class Laplacian(LinearOperator): """ - def __init__(self, dims: InputDimsLike, - axes: InputDimsLike = (-2, -1), - weights: Tuple[float, ...] = (1, 1), - sampling: Tuple[float, ...] = (1, 1), - edge: bool = False, - kind: str = "centered", - dtype: DTypeLike = "float64", name: str = "L"): - axes = tuple(normalize_axis_index(ax, len(dims)) for ax in axes) + def __init__( + self, + dims: InputDimsLike, + axes: InputDimsLike = (-2, -1), + weights: Tuple[float, ...] = (1, 1), + sampling: Tuple[float, ...] = (1, 1), + edge: bool = False, + kind: str = "centered", + dtype: DTypeLike = "float64", + name: str = "L", + ): + axes = tuple(get_normalize_axis_index()(ax, len(dims)) for ax in axes) if not (len(axes) == len(weights) == len(sampling)): raise ValueError("axes, weights, and sampling have different size") self.axes = axes @@ -73,8 +75,15 @@ def __init__(self, dims: InputDimsLike, self.sampling = sampling self.edge = edge self.kind = kind - Op = self._calc_l2op(dims=dims, axes=axes, sampling=sampling, edge=edge, kind=kind, dtype=dtype, - weights=weights) + Op = self._calc_l2op( + dims=dims, + axes=axes, + sampling=sampling, + edge=edge, + kind=kind, + dtype=dtype, + weights=weights, + ) super().__init__(Op=Op, name=name) def _matvec(self, x: NDArray) -> NDArray: @@ -84,8 +93,15 @@ def _rmatvec(self, x: NDArray) -> NDArray: return super()._rmatvec(x) @staticmethod - def _calc_l2op(dims: InputDimsLike, axes: InputDimsLike, weights: Tuple[float, ...], sampling: Tuple[float, ...], - edge: bool, kind: str, dtype: DTypeLike): + def _calc_l2op( + dims: InputDimsLike, + axes: InputDimsLike, + weights: Tuple[float, ...], + sampling: Tuple[float, ...], + edge: bool, + kind: str, + dtype: DTypeLike, + ): l2op = SecondDerivative( dims, axis=axes[0], sampling=sampling[0], edge=edge, kind=kind, dtype=dtype ) diff --git a/pylops/basicoperators/matrixmult.py b/pylops/basicoperators/matrixmult.py index a5f713ab..66b96c70 100644 --- a/pylops/basicoperators/matrixmult.py +++ b/pylops/basicoperators/matrixmult.py @@ -79,12 +79,14 @@ def __init__( else: otherdims = _value_or_sized_to_array(otherdims) self.otherdims = np.array(otherdims, dtype=int) - dims, dimsd = np.insert(self.otherdims, 0, self.A.shape[1]), np.insert( - self.otherdims, 0, self.A.shape[0] + dims, dimsd = ( + np.insert(self.otherdims, 0, self.A.shape[1]), + np.insert(self.otherdims, 0, self.A.shape[0]), + ) + self.dimsflatten, self.dimsdflatten = ( + np.insert([np.prod(self.otherdims)], 0, self.A.shape[1]), + np.insert([np.prod(self.otherdims)], 0, self.A.shape[0]), ) - self.dimsflatten, self.dimsdflatten = np.insert( - [np.prod(self.otherdims)], 0, self.A.shape[1] - ), np.insert([np.prod(self.otherdims)], 0, self.A.shape[0]) self.reshape = True explicit = False @@ -138,7 +140,7 @@ def inv(self) -> NDArray: r"""Return the inverse of :math:`\mathbf{A}`. Returns - ---------- + ------- Ainv : :obj:`numpy.ndarray` Inverse matrix. diff --git a/pylops/basicoperators/regression.py b/pylops/basicoperators/regression.py index dc51ded5..1160fe9b 100644 --- a/pylops/basicoperators/regression.py +++ b/pylops/basicoperators/regression.py @@ -124,7 +124,7 @@ def apply(self, t: npt.ArrayLike, x: NDArray) -> NDArray: Regression coefficients Returns - ---------- + ------- y : :obj:`numpy.ndarray` Values along y-axis diff --git a/pylops/basicoperators/restriction.py b/pylops/basicoperators/restriction.py index 1a745b30..e27610bf 100644 --- a/pylops/basicoperators/restriction.py +++ b/pylops/basicoperators/restriction.py @@ -6,17 +6,14 @@ import numpy as np import numpy.ma as np_ma -# need to check numpy version since normalize_axis_index will be -# soon moved from numpy.core.multiarray to from numpy.lib.array_utils -np_version = np.__version__.split(".") -if int(np_version[0]) < 2: - from numpy.core.multiarray import normalize_axis_index -else: - from numpy.lib.array_utils import normalize_axis_index - from pylops import LinearOperator from pylops.utils._internal import _value_or_sized_to_tuple -from pylops.utils.backend import get_array_module, inplace_set, to_cupy_conditional +from pylops.utils.backend import ( + get_array_module, + get_normalize_axis_index, + inplace_set, + to_numpy, +) from pylops.utils.typing import DTypeLike, InputDimsLike, IntNDArray, NDArray logging.basicConfig(format="%(levelname)s: %(message)s", level=logging.WARNING) @@ -119,7 +116,7 @@ def __init__( ) -> None: ncp = get_array_module(iava) dims = _value_or_sized_to_tuple(dims) - axis = normalize_axis_index(axis, len(dims)) + axis = get_normalize_axis_index()(axis, len(dims)) dimsd = list(dims) # data dimensions dimsd[axis] = len(iava) @@ -149,7 +146,7 @@ def __init__( # explicitly create a list of indices in the n-dimensional # model space which will be used in _rmatvec to place the input if ncp != np: - self.iavamask = _compute_iavamask(self.dims, axis, iava, ncp) + self.iavamask = _compute_iavamask(self.dims, axis, to_numpy(iava), ncp) self.inplace = inplace self.axis = axis self.iavareshape = iavareshape @@ -176,7 +173,6 @@ def _rmatvec(self, x: NDArray) -> NDArray: ) else: if not hasattr(self, "iavamask"): - self.iava = to_cupy_conditional(x, self.iava) self.iavamask = _compute_iavamask(self.dims, self.axis, self.iava, ncp) y = ncp.zeros(int(self.shape[-1]), dtype=self.dtype) y = inplace_set(x.ravel(), y, self.iavamask) @@ -193,7 +189,7 @@ def mask(self, x: NDArray) -> NDArray: Input array (can be either flattened or not) Returns - ---------- + ------- y : :obj:`numpy.ma.core.MaskedArray` Masked array. diff --git a/pylops/basicoperators/secondderivative.py b/pylops/basicoperators/secondderivative.py index 8433987d..d7fe7b8b 100644 --- a/pylops/basicoperators/secondderivative.py +++ b/pylops/basicoperators/secondderivative.py @@ -3,11 +3,15 @@ from typing import Callable, Union import numpy as np -from numpy.core.multiarray import normalize_axis_index from pylops import LinearOperator from pylops.utils._internal import _value_or_sized_to_tuple -from pylops.utils.backend import get_array_module, inplace_add, inplace_set +from pylops.utils.backend import ( + get_array_module, + get_normalize_axis_index, + inplace_add, + inplace_set, +) from pylops.utils.decorators import reshaped from pylops.utils.typing import DTypeLike, InputDimsLike, NDArray @@ -86,7 +90,7 @@ def __init__( dims = _value_or_sized_to_tuple(dims) super().__init__(dtype=np.dtype(dtype), dims=dims, dimsd=dims, name=name) - self.axis = normalize_axis_index(axis, len(self.dims)) + self.axis = get_normalize_axis_index()(axis, len(self.dims)) self.sampling = sampling self.kind = kind self.edge = edge diff --git a/pylops/basicoperators/tocupy.py b/pylops/basicoperators/tocupy.py new file mode 100644 index 00000000..8a3a7c6b --- /dev/null +++ b/pylops/basicoperators/tocupy.py @@ -0,0 +1,60 @@ +__all__ = ["ToCupy"] + +from typing import Union + +import numpy as np + +from pylops import LinearOperator +from pylops.utils._internal import _value_or_sized_to_tuple +from pylops.utils.backend import to_cupy, to_numpy +from pylops.utils.typing import DTypeLike, InputDimsLike, NDArray + + +class ToCupy(LinearOperator): + r"""Convert to CuPy. + + Convert an input array to CuPy in forward mode, + and convert back to NumPy in adjoint mode. + + Parameters + ---------- + dims : :obj:`list` or :obj:`int` + Number of samples for each dimension + dtype : :obj:`str`, optional + Type of elements in input array. + name : :obj:`str`, optional + Name of operator (to be used by :func:`pylops.utils.describe.describe`) + + Attributes + ---------- + shape : :obj:`tuple` + Operator shape + explicit : :obj:`bool` + Operator contains a matrix that can be solved explicitly + (``True``) or not (``False``) + + Notes + ----- + The ToCupy operator is a special operator that does not perform + any transformation on the input arrays other than converting + them from NumPy to CuPy. This operator can be used when one + is interested to create a chain of operators where only one + (or some of them) act on CuPy arrays, whilst other operate + on NumPy arrays. + + """ + + def __init__( + self, + dims: Union[int, InputDimsLike], + dtype: DTypeLike = "float64", + name: str = "C", + ) -> None: + dims = _value_or_sized_to_tuple(dims) + super().__init__(dtype=np.dtype(dtype), dims=dims, dimsd=dims, name=name) + + def _matvec(self, x: NDArray) -> NDArray: + return to_cupy(x) + + def _rmatvec(self, x: NDArray) -> NDArray: + return to_numpy(x) diff --git a/pylops/basicoperators/transpose.py b/pylops/basicoperators/transpose.py index 0ccf05f0..88c99514 100644 --- a/pylops/basicoperators/transpose.py +++ b/pylops/basicoperators/transpose.py @@ -1,10 +1,10 @@ __all__ = ["Transpose"] import numpy as np -from numpy.core.multiarray import normalize_axis_index from pylops import LinearOperator from pylops.utils._internal import _value_or_sized_to_tuple +from pylops.utils.backend import get_normalize_axis_index from pylops.utils.decorators import reshaped from pylops.utils.typing import DTypeLike, InputDimsLike, NDArray @@ -64,7 +64,7 @@ def __init__( ) -> None: dims = _value_or_sized_to_tuple(dims) ndims = len(dims) - self.axes = [normalize_axis_index(ax, ndims) for ax in axes] + self.axes = [get_normalize_axis_index()(ax, ndims) for ax in axes] # find out if all axes are present only once in axes if len(np.unique(self.axes)) != ndims: diff --git a/pylops/basicoperators/vstack.py b/pylops/basicoperators/vstack.py index 0d66642e..55341411 100644 --- a/pylops/basicoperators/vstack.py +++ b/pylops/basicoperators/vstack.py @@ -21,7 +21,7 @@ from pylops import LinearOperator from pylops.basicoperators import MatrixMult -from pylops.utils.backend import get_array_module, inplace_add, inplace_set +from pylops.utils.backend import get_array_module, get_module, inplace_add, inplace_set from pylops.utils.typing import DTypeLike, NDArray @@ -48,6 +48,12 @@ class VStack(LinearOperator): .. versionadded:: 2.2.0 Force an array to be flattened after rmatvec. + inoutengine : :obj:`tuple`, optional + .. versionadded:: 2.4.0 + + Type of output vectors of `matvec` and `rmatvec. If ``None``, this is + inferred directly from the input vectors. Note that this is ignored + if ``nproc>1``. dtype : :obj:`str`, optional Type of elements in input array. @@ -112,6 +118,7 @@ def __init__( ops: Sequence[LinearOperator], nproc: int = 1, forceflat: bool = None, + inoutengine: Optional[tuple] = None, dtype: Optional[DTypeLike] = None, ) -> None: self.ops = ops @@ -139,6 +146,8 @@ def __init__( self.pool = None if self.nproc > 1: self.pool = mp.Pool(processes=nproc) + + self.inoutengine = inoutengine dtype = _get_dtype(self.ops) if dtype is None else np.dtype(dtype) clinear = all([getattr(oper, "clinear", True) for oper in self.ops]) super().__init__( @@ -162,7 +171,11 @@ def nproc(self, nprocnew: int): self._nproc = nprocnew def _matvec_serial(self, x: NDArray) -> NDArray: - ncp = get_array_module(x) + ncp = ( + get_array_module(x) + if self.inoutengine is None + else get_module(self.inoutengine[0]) + ) y = ncp.zeros(self.nops, dtype=self.dtype) for iop, oper in enumerate(self.ops): y = inplace_set( @@ -171,7 +184,11 @@ def _matvec_serial(self, x: NDArray) -> NDArray: return y def _rmatvec_serial(self, x: NDArray) -> NDArray: - ncp = get_array_module(x) + ncp = ( + get_array_module(x) + if self.inoutengine is None + else get_module(self.inoutengine[1]) + ) y = ncp.zeros(self.mops, dtype=self.dtype) for iop, oper in enumerate(self.ops): y = inplace_add( diff --git a/pylops/optimization/cls_basic.py b/pylops/optimization/cls_basic.py index 2e848fcf..b7c98e0a 100644 --- a/pylops/optimization/cls_basic.py +++ b/pylops/optimization/cls_basic.py @@ -10,7 +10,12 @@ import numpy as np from pylops.optimization.basesolver import Solver -from pylops.utils.backend import get_array_module, to_numpy +from pylops.utils.backend import ( + get_array_module, + to_cupy_conditional, + to_numpy, + to_numpy_conditional, +) from pylops.utils.typing import NDArray if TYPE_CHECKING: @@ -131,10 +136,10 @@ def step(self, x: NDArray, show: bool = False) -> NDArray: Updated model vector """ - Opc = self.Op.matvec(self.c) + Opc = self.Op.matvec(to_cupy_conditional(x, self.c)) cOpc = self.ncp.abs(self.c.dot(Opc.conj())) a = self.kold / cOpc - x += a * self.c + x += to_cupy_conditional(x, a) * to_cupy_conditional(x, self.c) self.r -= a * Opc k = self.ncp.abs(self.r.dot(self.r.conj())) b = k / self.kold @@ -386,7 +391,7 @@ def step(self, x: NDArray, show: bool = False) -> NDArray: self.q.dot(self.q.conj()) + self.damp * self.c.dot(self.c.conj()) ) x = x + a * self.c - self.s = self.s - a * self.q + self.s = self.s - to_numpy_conditional(self.q, a) * self.q r = self.Op.rmatvec(self.s) - self.damp * x k = self.ncp.abs(r.dot(r.conj())) b = k / self.kold @@ -773,7 +778,9 @@ def step(self, x: NDArray, show: bool = False) -> NDArray: # next beta, u, alfa, v. These satisfy the relations # beta*u = Op*v - alfa*u, # alfa*v = Op'*u - beta*v' - self.u = self.Op.matvec(self.v) - self.alfa * self.u + self.u = ( + self.Op.matvec(self.v) - to_numpy_conditional(self.u, self.alfa) * self.u + ) self.beta = self.ncp.linalg.norm(self.u) if self.beta > 0: self.u = self.u / self.beta @@ -812,7 +819,9 @@ def step(self, x: NDArray, show: bool = False) -> NDArray: self.w = self.v + self.t2 * self.w self.ddnorm = self.ddnorm + self.ncp.linalg.norm(self.dk) ** 2 if self.calc_var: - self.var = self.var + self.ncp.dot(self.dk, self.dk) + self.var = self.var + to_numpy_conditional( + self.var, self.ncp.dot(self.dk, self.dk) + ) # use a plane rotation on the right to eliminate the # super-diagonal element (theta) of the upper-bidiagonal matrix. diff --git a/pylops/optimization/cls_leastsquares.py b/pylops/optimization/cls_leastsquares.py index 64526f81..e824c86f 100644 --- a/pylops/optimization/cls_leastsquares.py +++ b/pylops/optimization/cls_leastsquares.py @@ -248,7 +248,7 @@ def run( xinv = cg( self.Op_normal, self.y_normal, - self.ncp.zeros(self.Op_normal.shape[1], dtype=self.Op_normal.dtype), + x0=self.ncp.zeros(self.Op_normal.shape[1], dtype=self.Op_normal.dtype), **kwargs_solver, )[0] istop = None @@ -594,7 +594,7 @@ def run( xinv, istop, itn, r1norm, r2norm = cgls( self.RegOp, self.datatot, - self.ncp.zeros(self.RegOp.shape[1], dtype=self.RegOp.dtype), + x0=self.ncp.zeros(self.RegOp.shape[1], dtype=self.RegOp.dtype), **kwargs_solver, )[0:5] else: @@ -816,7 +816,7 @@ def run( pinv, istop, itn, r1norm, r2norm = cgls( self.POp, self.y, - self.ncp.zeros(self.POp.shape[1], dtype=self.POp.dtype), + x0=self.ncp.zeros(self.POp.shape[1], dtype=self.POp.dtype), **kwargs_solver, )[0:5] # force it 1d as we decorate this method with disable_ndarray_multiplication diff --git a/pylops/pytensoroperator.py b/pylops/pytensoroperator.py new file mode 100644 index 00000000..cdd04c04 --- /dev/null +++ b/pylops/pytensoroperator.py @@ -0,0 +1,85 @@ +import pylops +from pylops.utils import deps + +pytensor_message = deps.pytensor_import("the pytensor module") + +if pytensor_message is not None: + + class PyTensorOperator: + """PyTensor Op which applies a PyLops Linear Operator, including gradient support. + + This class "converts" a PyLops `LinearOperator` class into a PyTensor `Op`. + This applies the `LinearOperator` in "forward-mode" in `self.perform`, and applies + its adjoint when computing the vector-Jacobian product (`self.grad`), as that is + the analytically correct gradient for linear operators. This class should pass + `pytensor.gradient.verify_grad`. + + Parameters + ---------- + LOp : pylops.LinearOperator + """ + + def __init__(self, LOp: pylops.LinearOperator) -> None: + if not deps.pytensor_enabled: + raise NotImplementedError(pytensor_message) + +else: + import pytensor.tensor as pt + from pytensor.graph.basic import Apply + from pytensor.graph.op import Op + + class _PyTensorOperatorNoGrad(Op): + """PyTensor Op which applies a PyLops Linear Operator, excluding gradient support. + + This class "converts" a PyLops `LinearOperator` class into a PyTensor `Op`. + This applies the `LinearOperator` in "forward-mode" in `self.perform`. + + Parameters + ---------- + LOp : pylops.LinearOperator + """ + + __props__ = ("dims", "dimsd", "shape") + + def __init__(self, LOp: pylops.LinearOperator) -> None: + self._LOp = LOp + self.dims = self._LOp.dims + self.dimsd = self._LOp.dimsd + self.shape = self._LOp.shape + super().__init__() + + def make_node(self, x) -> Apply: + x = pt.as_tensor_variable(x) + inputs = [x] + outputs = [pt.tensor(dtype=x.type.dtype, shape=self._LOp.dimsd)] + return Apply(self, inputs, outputs) + + def perform( + self, node: Apply, inputs: list, output_storage: list[list[None]] + ) -> None: + (x,) = inputs + (yt,) = output_storage + yt[0] = self._LOp @ x + + class PyTensorOperator(_PyTensorOperatorNoGrad): + """PyTensor Op which applies a PyLops Linear Operator, including gradient support. + + This class "converts" a PyLops `LinearOperator` class into a PyTensor `Op`. + This applies the `LinearOperator` in "forward-mode" in `self.perform`, and applies + its adjoint when computing the vector-Jacobian product (`self.grad`), as that is + the analytically correct gradient for linear operators. This class should pass + `pytensor.gradient.verify_grad`. + + Parameters + ---------- + LOp : pylops.LinearOperator + """ + + def __init__(self, LOp: pylops.LinearOperator) -> None: + super().__init__(LOp) + self._gradient_op = _PyTensorOperatorNoGrad(self._LOp.H) + + def grad( + self, inputs: list[pt.TensorVariable], output_grads: list[pt.TensorVariable] + ): + return [self._gradient_op(output_grads[0])] diff --git a/pylops/signalprocessing/__init__.py b/pylops/signalprocessing/__init__.py index a8e5ed65..d8a5a4cc 100755 --- a/pylops/signalprocessing/__init__.py +++ b/pylops/signalprocessing/__init__.py @@ -28,6 +28,10 @@ DTCWT Dual-Tree Complex Wavelet Transform. Radon2D Two dimensional Radon transform. Radon3D Three dimensional Radon transform. + FourierRadon2D Two dimensional Fourier Radon transform. + FourierRadon3D Three dimensional Fourier Radon transform. + ChirpRadon2D Two dimensional Chirp Radon transform. + ChirpRadon3D Three dimensional Chirp Radon transform. Seislet Two dimensional Seislet operator. Sliding1D 1D Sliding transform operator. Sliding2D 2D Sliding transform operator. @@ -52,6 +56,8 @@ from .bilinear import * from .radon2d import * from .radon3d import * +from .fourierradon2d import * +from .fourierradon3d import * from .chirpradon2d import * from .chirpradon3d import * from .sliding1d import * @@ -67,7 +73,6 @@ from .dct import * from .dtcwt import * - __all__ = [ "FFT", "FFT2D", @@ -85,6 +90,8 @@ "Bilinear", "Radon2D", "Radon3D", + "FourierRadon2D", + "FourierRadon3D", "ChirpRadon2D", "ChirpRadon3D", "Sliding1D", diff --git a/pylops/signalprocessing/_baseffts.py b/pylops/signalprocessing/_baseffts.py index cb8c1b41..18a4cce2 100644 --- a/pylops/signalprocessing/_baseffts.py +++ b/pylops/signalprocessing/_baseffts.py @@ -4,7 +4,6 @@ from typing import Optional, Sequence, Union import numpy as np -from numpy.core.multiarray import normalize_axis_index from pylops import LinearOperator from pylops.utils._internal import ( @@ -12,7 +11,11 @@ _value_or_sized_to_array, _value_or_sized_to_tuple, ) -from pylops.utils.backend import get_complex_dtype, get_real_dtype +from pylops.utils.backend import ( + get_complex_dtype, + get_normalize_axis_index, + get_real_dtype, +) from pylops.utils.typing import DTypeLike, InputDimsLike, NDArray logging.basicConfig(format="%(levelname)s: %(message)s", level=logging.WARNING) @@ -46,7 +49,7 @@ def __init__( axes = _value_or_sized_to_array(axis) _raise_on_wrong_dtype(axes, np.integer, "axis") - self.axis = normalize_axis_index(axes[0], self.ndim) + self.axis = get_normalize_axis_index()(axes[0], self.ndim) if nfft is None: self.nfft = dims[self.axis] @@ -152,7 +155,7 @@ def __init__( axes = _value_or_sized_to_array(axes) _raise_on_wrong_dtype(axes, np.integer, "axes") - self.axes = np.array([normalize_axis_index(d, self.ndim) for d in axes]) + self.axes = np.array([get_normalize_axis_index()(d, self.ndim) for d in axes]) self.naxes = len(self.axes) if self.naxes != len(np.unique(self.axes)): warnings.warn( diff --git a/pylops/signalprocessing/_fourierradon2d_cuda.py b/pylops/signalprocessing/_fourierradon2d_cuda.py new file mode 100755 index 00000000..4a641524 --- /dev/null +++ b/pylops/signalprocessing/_fourierradon2d_cuda.py @@ -0,0 +1,98 @@ +from math import pi + +import cupy as cp +from numba import cuda + +TWO_PI_MINUS = cp.float32(-2.0 * pi) +TWO_PI_PLUS = cp.float32(2.0 * pi) +IMG = cp.complex64(1j) + + +@cuda.jit +def _radon_inner_2d_kernel(x, y, f, px, h, flim0, flim1, npx, nh): + """Cuda kernels for FourierRadon2D operator + + Cuda implementation of the on-the-fly kernel creation and application for the + FourierRadon2D operator. See :class:`pylops.signalprocessing.FourierRadon2D` + for details about input parameters. + + """ + ih, ifr = cuda.grid(2) + if ih < nh and ifr >= flim0 and ifr <= flim1: + for ipx in range(npx): + # slow computation of exp(1j * x) + # y[ih, ifr] += x[ipx, ifr] * exp(TWO_PI_MINUS * f[ifr] * px[ipx] * h[ih]) + # fast computation of exp(1j * x) - see https://stackoverflow.com/questions/9860711/cucomplex-h-and-exp/9863048#9863048 + s, c = cuda.libdevice.sincosf(TWO_PI_MINUS * f[ifr] * px[ipx] * h[ih]) + y[ih, ifr] += x[ipx, ifr] * (c + IMG * s) + + +@cuda.jit +def _aradon_inner_2d_kernel(x, y, f, px, h, flim0, flim1, npx, nh): + """Cuda kernels for FourierRadon2D operator + + Cuda implementation of the on-the-fly kernel creation and application for the + FourierRadon2D operator. See :class:`pylops.signalprocessing.FourierRadon2D` + for details about input parameters. + + """ + ipx, ifr = cuda.grid(2) + if ipx < npx and ifr >= flim0 and ifr <= flim1: + for ih in range(nh): + # slow computation of exp(1j * x) + # x[ipx, ifr] += y[ih, ifr] * exp(TWO_PI_I_PLUS * f[ifr] * px[ipx] * h[ih]) + # fast computation of exp(1j * x) - see https://stackoverflow.com/questions/9860711/cucomplex-h-and-exp/9863048#9863048 + s, c = cuda.libdevice.sincosf(TWO_PI_PLUS * f[ifr] * px[ipx] * h[ih]) + x[ipx, ifr] += y[ih, ifr] * (c + IMG * s) + + +def _radon_inner_2d_cuda( + x, + y, + f, + px, + h, + flim0, + flim1, + npx, + nh, + num_blocks=(32, 32), + num_threads_per_blocks=(32, 32), +): + """Caller for FourierRadon2D operator + + Caller for cuda implementation of matvec kernel for FourierRadon2D operator. + See :class:`pylops.signalprocessing.FourierRadon2D` for details about + input parameters. + + """ + _radon_inner_2d_kernel[num_blocks, num_threads_per_blocks]( + x, y, f, px, h, flim0, flim1, npx, nh + ) + return y + + +def _aradon_inner_2d_cuda( + x, + y, + f, + px, + h, + flim0, + flim1, + npx, + nh, + num_blocks=(32, 32), + num_threads_per_blocks=(32, 32), +): + """Caller for FourierRadon2D operator + + Caller for cuda implementation of rmatvec kernel for FourierRadon2D operator. + See :class:`pylops.signalprocessing.FourierRadon2D` for details about + input parameters. + + """ + _aradon_inner_2d_kernel[num_blocks, num_threads_per_blocks]( + x, y, f, px, h, flim0, flim1, npx, nh + ) + return x diff --git a/pylops/signalprocessing/_fourierradon2d_numba.py b/pylops/signalprocessing/_fourierradon2d_numba.py new file mode 100755 index 00000000..8f09724e --- /dev/null +++ b/pylops/signalprocessing/_fourierradon2d_numba.py @@ -0,0 +1,25 @@ +import os +from cmath import exp +from math import pi + +from numba import jit, prange + +# detect whether to use parallel or not +numba_threads = int(os.getenv("NUMBA_NUM_THREADS", "1")) +parallel = True if numba_threads != 1 else False + + +@jit(nopython=True, parallel=parallel, nogil=True, cache=True, fastmath=True) +def _radon_inner_2d(X, Y, f, px, h, flim0, flim1, npx, nh): + for ih in prange(nh): + for ifr in range(flim0, flim1): + for ipx in range(npx): + Y[ih, ifr] += X[ipx, ifr] * exp(-1j * 2 * pi * f[ifr] * px[ipx] * h[ih]) + + +@jit(nopython=True, parallel=parallel, nogil=True, cache=True, fastmath=True) +def _aradon_inner_2d(X, Y, f, px, h, flim0, flim1, npx, nh): + for ipx in prange(npx): + for ifr in range(flim0, flim1): + for ih in range(nh): + X[ipx, ifr] += Y[ih, ifr] * exp(1j * 2 * pi * f[ifr] * px[ipx] * h[ih]) diff --git a/pylops/signalprocessing/_fourierradon3d_cuda.py b/pylops/signalprocessing/_fourierradon3d_cuda.py new file mode 100755 index 00000000..607f2e74 --- /dev/null +++ b/pylops/signalprocessing/_fourierradon3d_cuda.py @@ -0,0 +1,115 @@ +from math import pi + +import cupy as cp +from numba import cuda + +TWO_PI_MINUS = cp.float32(-2.0 * pi) +TWO_PI_PLUS = cp.float32(2.0 * pi) +IMG = cp.complex64(1j) + + +@cuda.jit +def _radon_inner_3d_kernel(x, y, f, py, px, hy, hx, flim0, flim1, npy, npx, nhy, nhx): + """Cuda kernels for FourierRadon3D operator + + Cuda implementation of the on-the-fly kernel creation and application for the + FourierRadon3D operator. See :class:`pylops.signalprocessing.FourierRadon3D` + for details about input parameters. + + """ + ihy, ihx, ifr = cuda.grid(3) + if ihy < nhy and ihx < nhx and ifr >= flim0 and ifr <= flim1: + for ipy in range(npy): + for ipx in range(npx): + # slow computation of exp(1j * x) + # y[ihy, ihx, ifr] += x[ipy, ipx, ifr] * exp( + # TWO_PI_MINUS * f[ifr] * (py[ipy] * hy[ihy] + px[ipx] * hx[ihx]) + # ) + # fast computation of exp(1j * x) - see https://stackoverflow.com/questions/9860711/cucomplex-h-and-exp/9863048#9863048 + s, c = cuda.libdevice.sincosf( + TWO_PI_MINUS * f[ifr] * (py[ipy] * hy[ihy] + px[ipx] * hx[ihx]) + ) + y[ihy, ihx, ifr] += x[ipy, ipx, ifr] * (c + IMG * s) + + +@cuda.jit +def _aradon_inner_3d_kernel(x, y, f, py, px, hy, hx, flim0, flim1, npy, npx, nhy, nhx): + """Cuda kernels for FourierRadon3D operator + + Cuda implementation of the on-the-fly kernel creation and application for the + FourierRadon3D operator. See :class:`pylops.signalprocessing.FourierRadon3D` + for details about input parameters. + + """ + ipy, ipx, ifr = cuda.grid(3) + if ipy < npy and ipx < npx and ifr >= flim0 and ifr <= flim1: + for ihy in range(nhy): + for ihx in range(nhx): + # slow computation of exp(1j * x) + # x[ipy, ipx, ifr] += y[ihy, ihx, ifr] * exp( + # TWO_PI_I_PLUS * f[ifr] * (py[ipy] * hy[ihy] + px[ipx] * hx[ihx]) + # ) + s, c = cuda.libdevice.sincosf( + TWO_PI_PLUS * f[ifr] * (py[ipy] * hy[ihy] + px[ipx] * hx[ihx]) + ) + x[ipy, ipx, ifr] += y[ihy, ihx, ifr] * (c + IMG * s) + + +def _radon_inner_3d_cuda( + x, + y, + f, + py, + px, + hy, + hx, + flim0, + flim1, + npy, + npx, + nhy, + nhx, + num_blocks=(8, 8), + num_threads_per_blocks=(8, 8), +): + """Caller for FourierRadon2D operator + + Caller for cuda implementation of matvec kernel for FourierRadon3D operator. + See :class:`pylops.signalprocessing.FourierRadon3D` for details about + input parameters. + + """ + _radon_inner_3d_kernel[num_blocks, num_threads_per_blocks]( + x, y, f, py, px, hy, hx, flim0, flim1, npy, npx, nhy, nhx + ) + return y + + +def _aradon_inner_3d_cuda( + x, + y, + f, + py, + px, + hy, + hx, + flim0, + flim1, + npy, + npx, + nhy, + nhx, + num_blocks=(8, 8), + num_threads_per_blocks=(8, 8), +): + """Caller for FourierRadon3D operator + + Caller for cuda implementation of rmatvec kernel for FourierRadon3D operator. + See :class:`pylops.signalprocessing.FourierRadon3D` for details about + input parameters. + + """ + _aradon_inner_3d_kernel[num_blocks, num_threads_per_blocks]( + x, y, f, py, px, hy, hx, flim0, flim1, npy, npx, nhy, nhx + ) + return x diff --git a/pylops/signalprocessing/_fourierradon3d_numba.py b/pylops/signalprocessing/_fourierradon3d_numba.py new file mode 100755 index 00000000..e217fe95 --- /dev/null +++ b/pylops/signalprocessing/_fourierradon3d_numba.py @@ -0,0 +1,41 @@ +import os +from cmath import exp +from math import pi + +from numba import jit, prange + +# detect whether to use parallel or not +numba_threads = int(os.getenv("NUMBA_NUM_THREADS", "1")) +parallel = True if numba_threads != 1 else False + + +@jit(nopython=True, parallel=parallel, nogil=True, cache=True, fastmath=True) +def _radon_inner_3d(X, Y, f, py, px, hy, hx, flim0, flim1, npy, npx, nhy, nhx): + for ihy in prange(nhy): + for ihx in prange(nhx): + for ifr in range(flim0, flim1): + for ipy in range(npy): + for ipx in range(npx): + Y[ihy, ihx, ifr] += X[ipy, ipx, ifr] * exp( + -1j + * 2 + * pi + * f[ifr] + * (py[ipy] * hy[ihy] + px[ipx] * hx[ihx]) + ) + + +@jit(nopython=True, parallel=parallel, nogil=True, cache=True, fastmath=True) +def _aradon_inner_3d(X, Y, f, py, px, hy, hx, flim0, flim1, npy, npx, nhy, nhx): + for ipy in prange(npy): + for ipx in range(npx): + for ifr in range(flim0, flim1): + for ihy in range(nhy): + for ihx in range(nhx): + X[ipy, ipx, ifr] += Y[ihy, ihx, ifr] * exp( + 1j + * 2 + * pi + * f[ifr] + * (py[ipy] * hy[ihy] + px[ipx] * hx[ihx]) + ) diff --git a/pylops/signalprocessing/_nonstatconvolve2d_cuda.py b/pylops/signalprocessing/_nonstatconvolve2d_cuda.py index 5de400bb..feba8ed2 100644 --- a/pylops/signalprocessing/_nonstatconvolve2d_cuda.py +++ b/pylops/signalprocessing/_nonstatconvolve2d_cuda.py @@ -100,9 +100,9 @@ def _matvec_rmatvec_call( ): """Caller for NonStationaryConvolve2D operator - Caller for cuda implementation of matvec and rmatvec for NonStationaryConvolve2D operato, with same signature + Caller for cuda implementation of matvec and rmatvec for NonStationaryConvolve2D operator, with same signature as numpy/numba counterparts. See :class:`pylops.signalprocessing.NonStationaryConvolve2D` for details about - input parameters. + input parameters. """ _matvec_rmatvec[num_blocks, num_threads_per_blocks]( diff --git a/pylops/signalprocessing/chirpradon3d.py b/pylops/signalprocessing/chirpradon3d.py index 1f544926..193d41be 100755 --- a/pylops/signalprocessing/chirpradon3d.py +++ b/pylops/signalprocessing/chirpradon3d.py @@ -14,8 +14,6 @@ pyfftw_message = deps.pyfftw_import("the chirpradon3d module") if pyfftw_message is None: - import pyfftw - from ._chirpradon3d import _chirp_radon_3d_fftw logging.basicConfig(format="%(levelname)s: %(message)s", level=logging.WARNING) @@ -101,7 +99,7 @@ def __init__( @reshaped def _matvec(self, x: NDArray) -> NDArray: - if self.engine == "fftw" and pyfftw is not None: + if self.engine == "fftw" and pyfftw_message is None: return _chirp_radon_3d_fftw( x, self.dt, self.dy, self.dx, self.pmax, mode="f", **self.kwargs_fftw ) @@ -109,7 +107,7 @@ def _matvec(self, x: NDArray) -> NDArray: @reshaped def _rmatvec(self, x: NDArray) -> NDArray: - if self.engine == "fftw" and pyfftw is not None: + if self.engine == "fftw" and pyfftw_message is None: return _chirp_radon_3d_fftw( x, self.dt, self.dy, self.dx, self.pmax, mode="a", **self.kwargs_fftw ) @@ -117,7 +115,7 @@ def _rmatvec(self, x: NDArray) -> NDArray: def inverse(self, x: NDArray) -> NDArray: x = x.reshape(self.dimsd) - if self.engine == "fftw" and pyfftw is not None: + if self.engine == "fftw" and pyfftw_message is None: y = _chirp_radon_3d_fftw( x, self.dt, self.dy, self.dx, self.pmax, mode="i", **self.kwargs_fftw ) diff --git a/pylops/signalprocessing/convolvend.py b/pylops/signalprocessing/convolvend.py index c2fcb563..14ea6e24 100644 --- a/pylops/signalprocessing/convolvend.py +++ b/pylops/signalprocessing/convolvend.py @@ -3,7 +3,6 @@ from typing import Optional, Union import numpy as np -from numpy.core.multiarray import normalize_axis_index from pylops import LinearOperator from pylops.utils._internal import _value_or_sized_to_tuple @@ -11,6 +10,7 @@ get_array_module, get_convolve, get_correlate, + get_normalize_axis_index, to_cupy_conditional, ) from pylops.utils.decorators import reshaped @@ -78,7 +78,9 @@ def __init__( self.axes = ( np.arange(len(self.dims)) if axes is None - else np.array([normalize_axis_index(ax, len(self.dims)) for ax in axes]) + else np.array( + [get_normalize_axis_index()(ax, len(self.dims)) for ax in axes] + ) ) self.h = h hshape = np.array(self.h.shape) diff --git a/pylops/signalprocessing/fourierradon2d.py b/pylops/signalprocessing/fourierradon2d.py new file mode 100755 index 00000000..37439dda --- /dev/null +++ b/pylops/signalprocessing/fourierradon2d.py @@ -0,0 +1,298 @@ +__all__ = ["FourierRadon2D"] + +import logging +from typing import Optional, Tuple + +import numpy as np +import scipy as sp + +from pylops import LinearOperator +from pylops.utils import deps +from pylops.utils.backend import get_array_module, get_complex_dtype +from pylops.utils.decorators import reshaped +from pylops.utils.typing import DTypeLike, NDArray + +jit_message = deps.numba_import("the radon2d module") +cupy_message = deps.cupy_import("the radon2d module") + +if jit_message is None: + from ._fourierradon2d_numba import _aradon_inner_2d, _radon_inner_2d +if jit_message is None and cupy_message is None: + from ._fourierradon2d_cuda import _aradon_inner_2d_cuda, _radon_inner_2d_cuda + +logging.basicConfig(format="%(levelname)s: %(message)s", level=logging.WARNING) + + +class FourierRadon2D(LinearOperator): + r"""2D Fourier Radon transform + + Apply Radon forward (and adjoint) transform using Fast + Fourier Transform to a 2-dimensional array of size :math:`[n_{p_x} \times n_t]` + (and :math:`[n_x \times n_t]`). + + Note that forward and adjoint follow the same convention of the time-space + implementation in :class:`pylops.signalprocessing.Radon2D`. + + Parameters + ---------- + taxis : :obj:`np.ndarray` + Time axis + haxis : :obj:`np.ndarray` + Spatial axis + pxaxis : :obj:`np.ndarray` + Axis of scanning variable :math:`p_x` of parametric curve + nfft : :obj:`int` + Number of samples in Fourier transform + flims : :obj:`tuple`, optional + Indices of lower and upper limits of Fourier axis to be used in + the application of the Radon matrix (when ``None``, use entire axis) + kind : :obj:`str` + Curve to be used for stacking/spreading (``linear``, ``parabolic``) + engine : :obj:`str` + Engine used for computation (``numpy`` or ``numba`` or ``cuda``) + num_threads_per_blocks : :obj:`tuple` + Number of threads in each block (only when ``engine=cuda``) + dtype : :obj:`str` + Type of elements in input array. + name : :obj:`str` + Name of operator (to be used by :func:`pylops.utils.describe.describe`) + + Attributes + ---------- + shape : :obj:`tuple` + Operator shape + explicit : :obj:`bool` + Operator contains a matrix that can be solved explicitly (``True``) or + not (``False``) + + Raises + ------ + NotImplementedError + If ``engine`` is neither ``numpy``, ``numba``, nor ``cuda``. + + Notes + ----- + The FourierRadon2D operator applies the Radon transform in the frequency domain. + After transforming a 2-dimensional array of size + :math:`[n_x \times n_t]` into the frequency domain, the following linear + transformation is applied to each frequency component in adjoint mode: + + .. math:: + \begin{bmatrix} + m(p_{x,1}, \omega_i) \\ + m(p_{x,2}, \omega_i) \\ + \vdots \\ + m(p_{x,N_p}, \omega_i) + \end{bmatrix} + = + \begin{bmatrix} + e^{-j \omega_i p_{x,1} x^l_1} & e^{-j \omega_i p_{x,1} x^l_2} & \ldots & e^{-j \omega_i p_{x,1} x^l_{N_x}} \\ + e^{-j \omega_i p_{x,2} x^l_1} & e^{-j \omega_i p_{x,2} x^l_2} & \ldots & e^{-j \omega_i p_{x,2} x^l_{N_x}} \\ + \vdots & \vdots & \ddots & \vdots \\ + e^{-j \omega_i p_{x,N_p} x^l_1} & e^{-j \omega_i p_{x,N_p} x^l_2} & \ldots & e^{-j \omega_i p_{x,N_p} x^l_{N_x}} \\ + \end{bmatrix} + \begin{bmatrix} + d(x_1, \omega_i) \\ + d(x_2, \omega_i) \\ + \vdots \\ + d(x_{N_x}, \omega_i) + \end{bmatrix} + + where :math:`l=1,2`. Similarly the forward mode is implemented by applying the + transpose and complex conjugate of the above matrix to the model transformed to + the Fourier domain. + + Refer to [1]_ for more theoretical and implementation details. + + .. [1] Sacchi, M. "Statistical and Transform Methods for + Geophysical Signal Processing", 2007. + + """ + + def __init__( + self, + taxis: NDArray, + haxis: NDArray, + pxaxis: NDArray, + nfft: int, + flims: Optional[Tuple[int, int]] = None, + kind: str = "linear", + engine: str = "numpy", + num_threads_per_blocks: Tuple[int, int] = (32, 32), + dtype: DTypeLike = "float64", + name: str = "R", + ) -> None: + # engine + if engine not in ["numpy", "numba", "cuda"]: + raise NotImplementedError("engine must be numpy or numba or cuda") + if engine == "numba" and jit_message is not None: + engine = "numpy" + + # dimensions and super + dims = len(pxaxis), len(taxis) + dimsd = len(haxis), len(taxis) + super().__init__(dtype=np.dtype(dtype), dims=dims, dimsd=dimsd, name=name) + + # other input params + self.taxis, self.haxis = taxis, haxis + self.nh, self.nt = self.dimsd + self.px = pxaxis + self.npx, self.nfft = self.dims[0], nfft + self.dt = taxis[1] - taxis[0] + self.dh = haxis[1] - haxis[0] + self.f = np.fft.rfftfreq(self.nfft, d=self.dt).astype(self.dtype) + self.nfft2 = len(self.f) + self.cdtype = get_complex_dtype(dtype) + self.flims = (0, self.nfft2) if flims is None else flims + + if kind == "parabolic": + self.haxis = self.haxis**2 + + # create additional input parameters for engine=cuda + if engine == "cuda": + self.num_threads_per_blocks = num_threads_per_blocks + ( + num_threads_per_blocks_hpx, + num_threads_per_blocks_f, + ) = num_threads_per_blocks + num_blocks_px = ( + self.dims[0] + num_threads_per_blocks_hpx - 1 + ) // num_threads_per_blocks_hpx + num_blocks_h = ( + self.dimsd[0] + num_threads_per_blocks_hpx - 1 + ) // num_threads_per_blocks_hpx + num_blocks_f = ( + self.dims[1] + num_threads_per_blocks_f - 1 + ) // num_threads_per_blocks_f + self.num_blocks_matvec = (num_blocks_h, num_blocks_f) + self.num_blocks_rmatvec = (num_blocks_px, num_blocks_f) + self._register_multiplications(engine) + + def _register_multiplications(self, engine: str) -> None: + if engine == "numba": + self._matvec = self._matvec_numba + self._rmatvec = self._rmatvec_numba + elif engine == "cuda": + self._matvec = self._matvec_cuda + self._rmatvec = self._rmatvec_cuda + else: + self._matvec = self._matvec_numpy + self._rmatvec = self._rmatvec_numpy + + @reshaped + def _matvec_numpy(self, x: NDArray) -> NDArray: + ncp = get_array_module(x) + self.f = ncp.asarray(self.f) + x = ncp.fft.rfft(x, n=self.nfft, axis=-1) + + H, PX, F = ncp.meshgrid( + self.haxis, self.px, self.f[self.flims[0] : self.flims[1]], indexing="ij" + ) + y = ncp.zeros((self.nh, self.nfft2), dtype=self.cdtype) + y[:, self.flims[0] : self.flims[1]] = ncp.einsum( + "ijk,jk->ik", + ncp.exp(-1j * 2 * ncp.pi * F * PX * H), + x[:, self.flims[0] : self.flims[1]], + ) + y = ncp.real(ncp.fft.irfft(y, n=self.nfft, axis=-1))[:, : self.nt] + return y + + @reshaped + def _rmatvec_numpy(self, y: NDArray) -> NDArray: + ncp = get_array_module(y) + self.f = ncp.asarray(self.f) + y = ncp.fft.rfft(y, n=self.nfft, axis=-1) + + PX, H, F = ncp.meshgrid( + self.px, self.haxis, self.f[self.flims[0] : self.flims[1]], indexing="ij" + ) + x = ncp.zeros((self.npx, self.nfft2), dtype=self.cdtype) + x[:, self.flims[0] : self.flims[1]] = ncp.einsum( + "ijk,jk->ik", + ncp.exp(1j * 2 * ncp.pi * F * PX * H), + y[:, self.flims[0] : self.flims[1]], + ) + x = ncp.real(ncp.fft.irfft(x, n=self.nfft, axis=-1))[:, : self.nt] + return x + + @reshaped + def _matvec_cuda(self, x: NDArray) -> NDArray: + ncp = get_array_module(x) + y = ncp.zeros((self.nh, self.nfft2), dtype=self.cdtype) + + x = ncp.fft.rfft(x, n=self.nfft, axis=-1) + y = _radon_inner_2d_cuda( + x, + y, + ncp.asarray(self.f), + self.px, + self.haxis, + self.flims[0], + self.flims[1], + self.npx, + self.nh, + num_blocks=self.num_blocks_matvec, + num_threads_per_blocks=self.num_threads_per_blocks, + ) + y = ncp.real(ncp.fft.irfft(y, n=self.nfft, axis=-1))[:, : self.nt] + return y + + @reshaped + def _rmatvec_cuda(self, y: NDArray) -> NDArray: + ncp = get_array_module(y) + x = ncp.zeros((self.npx, self.nfft2), dtype=self.cdtype) + + y = ncp.fft.rfft(y, n=self.nfft, axis=-1) + x = _aradon_inner_2d_cuda( + x, + y, + ncp.asarray(self.f), + self.px, + self.haxis, + self.flims[0], + self.flims[1], + self.npx, + self.nh, + num_blocks=self.num_blocks_rmatvec, + num_threads_per_blocks=self.num_threads_per_blocks, + ) + x = ncp.real(ncp.fft.irfft(x, n=self.nfft, axis=-1))[:, : self.nt] + return x + + @reshaped + def _matvec_numba(self, x: NDArray) -> NDArray: + y = np.zeros((self.nh, self.nfft2), dtype=self.cdtype) + + x = sp.fft.rfft(x, n=self.nfft, axis=-1) + _radon_inner_2d( + x, + y, + self.f, + self.px, + self.haxis, + self.flims[0], + self.flims[1], + self.npx, + self.nh, + ) + y = np.real(sp.fft.irfft(y, n=self.nfft, axis=-1))[:, : self.nt] + return y + + @reshaped + def _rmatvec_numba(self, y: NDArray) -> NDArray: + x = np.zeros((self.npx, self.nfft2), dtype=self.cdtype) + + y = sp.fft.rfft(y, n=self.nfft, axis=-1) + _aradon_inner_2d( + x, + y, + self.f, + self.px, + self.haxis, + self.flims[0], + self.flims[1], + self.npx, + self.nh, + ) + x = np.real(sp.fft.irfft(x, n=self.nfft, axis=-1))[:, : self.nt] + return x diff --git a/pylops/signalprocessing/fourierradon3d.py b/pylops/signalprocessing/fourierradon3d.py new file mode 100755 index 00000000..b6c8a309 --- /dev/null +++ b/pylops/signalprocessing/fourierradon3d.py @@ -0,0 +1,353 @@ +__all__ = ["FourierRadon3D"] + +import logging +from typing import Optional, Tuple + +import numpy as np +import scipy as sp + +from pylops import LinearOperator +from pylops.utils import deps +from pylops.utils.backend import get_array_module, get_complex_dtype +from pylops.utils.decorators import reshaped +from pylops.utils.typing import DTypeLike, NDArray + +jit_message = deps.numba_import("the radon2d module") +cupy_message = deps.cupy_import("the radon2d module") + +if jit_message is None: + from ._fourierradon3d_numba import _aradon_inner_3d, _radon_inner_3d +if jit_message is None and cupy_message is None: + from ._fourierradon3d_cuda import _aradon_inner_3d_cuda, _radon_inner_3d_cuda + +logging.basicConfig(format="%(levelname)s: %(message)s", level=logging.WARNING) + + +class FourierRadon3D(LinearOperator): + r"""3D Fourier Radon transform + + Apply Radon forward (and adjoint) transform using Fast + Fourier Transform to a 3-dimensional array of size :math:`[n_{p_y} \times n_{p_x} \times n_t]` + (and :math:`[n_y \times n_x \times n_t]`). + + Note that forward and adjoint follow the same convention of the time-space + implementation in :class:`pylops.signalprocessing.Radon3D`. + + Parameters + ---------- + taxis : :obj:`np.ndarray` + Time axis + hyaxis : :obj:`np.ndarray` + Slow spatial axis + hxaxis : :obj:`np.ndarray` + Fast spatial axis + pyaxis : :obj:`np.ndarray` + Axis of scanning variable :math:`p_y` of parametric curve + pxaxis : :obj:`np.ndarray` + Axis of scanning variable :math:`p_x` of parametric curve + nfft : :obj:`int` + Number of samples in Fourier transform + flims : :obj:`tuple`, optional + Indices of lower and upper limits of Fourier axis to be used in + the application of the Radon matrix (when ``None``, use entire axis) + kind : :obj:`tuple` + Curves to be used for stacking/spreading along the y- and x- axes + (``("linear", "linear")``, ``("linear", "parabolic")``, + ``("parabolic", "linear")``, or ``("parabolic", "parabolic")``) + engine : :obj:`str` + Engine used for computation (``numpy`` or ``numba`` or ``cuda``) + num_threads_per_blocks : :obj:`tuple` + Number of threads in each block (only when ``engine=cuda``) + dtype : :obj:`str` + Type of elements in input array. + name : :obj:`str` + Name of operator (to be used by :func:`pylops.utils.describe.describe`) + + Attributes + ---------- + shape : :obj:`tuple` + Operator shape + explicit : :obj:`bool` + Operator contains a matrix that can be solved explicitly (``True``) or + not (``False``) + + Raises + ------ + NotImplementedError + If ``engine`` is neither ``numpy``, ``numba``, nor ``cuda``. + ValueError + If ``kind`` is not a tuple of two elements. + + Notes + ----- + The FourierRadon3D operator applies the Radon transform in the frequency domain. + After transforming a 3-dimensional array of size + :math:`[n_y \times n_x \times n_t]` into the frequency domain, the following linear + transformation is applied to each frequency component in adjoint mode: + + .. math:: + \begin{bmatrix} + \mathbf{m}(p_{y,1}, \mathbf{p}_{x}, \omega_i) \\ + \mathbf{m}(p_{y,2}, \mathbf{p}_{x}, \omega_i) \\ + \vdots \\ + \mathbf{m}(p_{y,N_{py}}, \mathbf{p}_{x}, \omega_i) + \end{bmatrix} + = + \begin{bmatrix} + e^{-j \omega_i (p_{y,1} y^l_1 + \mathbf{p}_x \cdot \mathbf{x}^l)} & e^{-j \omega_i (p_{y,1} y^l_2 + \mathbf{p}_x \cdot \mathbf{x}^l)} & \ldots & e^{-j \omega_i (p_{y,1} y^l_{N_y} + \mathbf{p}_x \cdot \mathbf{x}^l)} \\ + e^{-j \omega_i (p_{y,2} y^l_1 + \mathbf{p}_x \cdot \mathbf{x}^l)} & e^{-j \omega_i (p_{y,2} y^l_2 + \mathbf{p}_x \cdot \mathbf{x}^l)} & \ldots & e^{-j \omega_i (p_{y,2} y^l_{N_y} + \mathbf{p}_x \cdot \mathbf{x}^l)} \\ + \vdots & \vdots & \ddots & \vdots \\ + e^{-j \omega_i (p_{y,N_{py}} y^l_1 + \mathbf{p}_x \cdot \mathbf{x}^l)} & e^{-j \omega_i (p_{y,N_{py}} y^l_2 + \mathbf{p}_x \cdot \mathbf{x}^l)} & \ldots & e^{-j \omega_i (p_{y,N_{py}} y^l_{N_y} + \mathbf{p}_x \cdot \mathbf{x}^l)} \\ + \end{bmatrix} + \begin{bmatrix} + \mathbf{d}(y_1, \mathbf{x}, \omega_i) \\ + \mathbf{d}(y_2, \mathbf{x}, \omega_i) \\ + \vdots \\ + \mathbf{d}(y_{N_y}, \mathbf{x}, \omega_i) + \end{bmatrix} + + where :math:`\cdot` represents the element-wise multiplication of two vectors and + math:`l=1,2`. Similarly the forward mode is implemented by applying the + transpose and complex conjugate of the above matrix to the model transformed to + the Fourier domain. + + Refer to [1]_ for more theoretical and implementation details. + + .. [1] Sacchi, M. "Statistical and Transform Methods for + Geophysical Signal Processing", 2007. + """ + + def __init__( + self, + taxis: NDArray, + hyaxis: NDArray, + hxaxis: NDArray, + pyaxis: NDArray, + pxaxis: NDArray, + nfft: int, + flims: Optional[Tuple[int, int]] = None, + kind: Tuple[str, str] = ("linear", "linear"), + engine: str = "numpy", + num_threads_per_blocks: Tuple[int, int] = (32, 32), + dtype: DTypeLike = "float64", + name: str = "R", + ) -> None: + # engine + if engine not in ["numpy", "numba", "cuda"]: + raise NotImplementedError("engine must be numpy or numba or cuda") + if engine == "numba" and jit_message is not None: + engine = "numpy" + + # kind + if len(kind) != 2: + raise ValueError("kind must be a tuple of two elements") + + # dimensions and super + dims = len(pyaxis), len(pxaxis), len(taxis) + dimsd = len(hyaxis), len(hxaxis), len(taxis) + super().__init__(dtype=np.dtype(dtype), dims=dims, dimsd=dimsd, name=name) + + # other input params + self.taxis, self.hyaxis, self.hxaxis = taxis, hyaxis, hxaxis + self.nhy, self.nhx, self.nt = self.dimsd + self.py, self.px = pyaxis, pxaxis + self.npy, self.npx, self.nfft = self.dims[0], self.dims[1], nfft + self.dt = taxis[1] - taxis[0] + self.dhy = hyaxis[1] - hyaxis[0] + self.dhx = hxaxis[1] - hxaxis[0] + self.f = np.fft.rfftfreq(self.nfft, d=self.dt).astype(self.dtype) + self.nfft2 = len(self.f) + self.cdtype = get_complex_dtype(dtype) + self.flims = (0, self.nfft2) if flims is None else flims + + if kind[0] == "parabolic": + self.hyaxis = self.hyaxis**2 + if kind[1] == "parabolic": + self.hxaxis = self.hxaxis**2 + + # create additional input parameters for engine=cuda + if engine == "cuda": + self.num_threads_per_blocks = num_threads_per_blocks + ( + num_threads_per_blocks_hpy, + num_threads_per_blocks_hpx, + num_threads_per_blocks_f, + ) = num_threads_per_blocks + num_blocks_py = ( + self.dims[0] + num_threads_per_blocks_hpy - 1 + ) // num_threads_per_blocks_hpx + num_blocks_px = ( + self.dims[1] + num_threads_per_blocks_hpx - 1 + ) // num_threads_per_blocks_hpx + num_blocks_hy = ( + self.dimsd[0] + num_threads_per_blocks_hpy - 1 + ) // num_threads_per_blocks_hpx + num_blocks_hx = ( + self.dimsd[1] + num_threads_per_blocks_hpx - 1 + ) // num_threads_per_blocks_hpx + num_blocks_f = ( + self.dims[2] + num_threads_per_blocks_f - 1 + ) // num_threads_per_blocks_f + self.num_blocks_matvec = (num_blocks_hy, num_blocks_hx, num_blocks_f) + self.num_blocks_rmatvec = (num_blocks_py, num_blocks_px, num_blocks_f) + + self._register_multiplications(engine) + + def _register_multiplications(self, engine: str) -> None: + if engine == "numba": + self._matvec = self._matvec_numba + self._rmatvec = self._rmatvec_numba + elif engine == "cuda": + self._matvec = self._matvec_cuda + self._rmatvec = self._rmatvec_cuda + else: + self._matvec = self._matvec_numpy + self._rmatvec = self._rmatvec_numpy + + @reshaped + def _matvec_numpy(self, x: NDArray) -> NDArray: + ncp = get_array_module(x) + self.f = ncp.asarray(self.f) + x = ncp.fft.rfft(x.reshape(-1, self.dims[-1]), n=self.nfft, axis=-1) + + HY, HX = ncp.meshgrid(self.hyaxis, self.hxaxis, indexing="ij") + PY, PX = ncp.meshgrid(self.py, self.px, indexing="ij") + + HYY, PYY, F = ncp.meshgrid( + HY.ravel(), PY.ravel(), self.f[self.flims[0] : self.flims[1]], indexing="ij" + ) + HXX, PXX, _ = ncp.meshgrid( + HX.ravel(), PX.ravel(), self.f[self.flims[0] : self.flims[1]], indexing="ij" + ) + + y = ncp.zeros((self.nhy * self.nhx, self.nfft2), dtype=self.cdtype) + y[:, self.flims[0] : self.flims[1]] = ncp.einsum( + "ijk,jk->ik", + ncp.exp(-1j * 2 * ncp.pi * F * (PYY * HYY + PXX * HXX)), + x[:, self.flims[0] : self.flims[1]], + ) + y = ncp.real(ncp.fft.irfft(y, n=self.nfft, axis=-1))[:, : self.nt] + return y + + @reshaped + def _rmatvec_numpy(self, y: NDArray) -> NDArray: + ncp = get_array_module(y) + self.f = ncp.asarray(self.f) + y = ncp.fft.rfft(y.reshape(-1, self.dimsd[-1]), n=self.nfft, axis=-1) + + HY, HX = ncp.meshgrid(self.hyaxis, self.hxaxis, indexing="ij") + PY, PX = ncp.meshgrid(self.py, self.px, indexing="ij") + + PYY, HYY, F = ncp.meshgrid( + PY.ravel(), HY.ravel(), self.f[self.flims[0] : self.flims[1]], indexing="ij" + ) + PXX, HXX, _ = ncp.meshgrid( + PX.ravel(), HX.ravel(), self.f[self.flims[0] : self.flims[1]], indexing="ij" + ) + + x = ncp.zeros((self.npy * self.npx, self.nfft2), dtype=self.cdtype) + x[:, self.flims[0] : self.flims[1]] = ncp.einsum( + "ijk,jk->ik", + ncp.exp(1j * 2 * ncp.pi * F * (PYY * HYY + PXX * HXX)), + y[:, self.flims[0] : self.flims[1]], + ) + x = ncp.real(ncp.fft.irfft(x, n=self.nfft, axis=-1))[:, : self.nt] + return x + + @reshaped + def _matvec_cuda(self, x: NDArray) -> NDArray: + ncp = get_array_module(x) + y = ncp.zeros((self.nhy, self.nhx, self.nfft2), dtype=self.cdtype) + + x = ncp.fft.rfft(x, n=self.nfft, axis=-1) + y = _radon_inner_3d_cuda( + x, + y, + ncp.asarray(self.f), + self.py, + self.px, + self.hyaxis, + self.hxaxis, + self.flims[0], + self.flims[1], + self.npy, + self.npx, + self.nhy, + self.nhx, + num_blocks=self.num_blocks_matvec, + num_threads_per_blocks=self.num_threads_per_blocks, + ) + y = ncp.real(ncp.fft.irfft(y, n=self.nfft, axis=-1))[:, :, : self.nt] + return y + + @reshaped + def _rmatvec_cuda(self, y: NDArray) -> NDArray: + ncp = get_array_module(y) + x = ncp.zeros((self.npy, self.npx, self.nfft2), dtype=self.cdtype) + + y = ncp.fft.rfft(y, n=self.nfft, axis=-1) + x = _aradon_inner_3d_cuda( + x, + y, + ncp.asarray(self.f), + self.py, + self.px, + self.hyaxis, + self.hxaxis, + self.flims[0], + self.flims[1], + self.npy, + self.npx, + self.nhy, + self.nhx, + num_blocks=self.num_blocks_rmatvec, + num_threads_per_blocks=self.num_threads_per_blocks, + ) + x = ncp.real(ncp.fft.irfft(x, n=self.nfft, axis=-1))[:, :, : self.nt] + return x + + @reshaped + def _matvec_numba(self, x: NDArray) -> NDArray: + y = np.zeros((self.nhy, self.nhx, self.nfft2), dtype=self.cdtype) + + x = sp.fft.rfft(x, n=self.nfft, axis=-1) + _radon_inner_3d( + x, + y, + self.f, + self.py, + self.px, + self.hyaxis, + self.hxaxis, + self.flims[0], + self.flims[1], + self.npy, + self.npx, + self.nhy, + self.nhx, + ) + y = np.real(sp.fft.irfft(y, n=self.nfft, axis=-1))[:, :, : self.nt] + return y + + @reshaped + def _rmatvec_numba(self, y: NDArray) -> NDArray: + x = np.zeros((self.npy, self.npx, self.nfft2), dtype=self.cdtype) + + y = sp.fft.rfft(y, n=self.nfft, axis=-1) + _aradon_inner_3d( + x, + y, + self.f, + self.py, + self.px, + self.hyaxis, + self.hxaxis, + self.flims[0], + self.flims[1], + self.npy, + self.npx, + self.nhy, + self.nhx, + ) + x = np.real(sp.fft.irfft(x, n=self.nfft, axis=-1))[:, :, : self.nt] + return x diff --git a/pylops/signalprocessing/radon2d.py b/pylops/signalprocessing/radon2d.py index d045eb8a..a76696b4 100644 --- a/pylops/signalprocessing/radon2d.py +++ b/pylops/signalprocessing/radon2d.py @@ -218,7 +218,7 @@ def Radon2D( ----- The Radon2D operator applies the following linear transform in adjoint mode to the data after reshaping it into a 2-dimensional array of - size :math:`[n_x \times n_t]` in adjoint mode: + size :math:`[n_x \times n_t]`: .. math:: m(p_x, t_0) = \int{d(x, t = f(p_x, x, t))} \,\mathrm{d}x diff --git a/pylops/signalprocessing/radon3d.py b/pylops/signalprocessing/radon3d.py index 4357b983..939c14ba 100644 --- a/pylops/signalprocessing/radon3d.py +++ b/pylops/signalprocessing/radon3d.py @@ -189,10 +189,10 @@ def Radon3D( ---------- taxis : :obj:`np.ndarray` Time axis - hxaxis : :obj:`np.ndarray` - Fast patial axis hyaxis : :obj:`np.ndarray` Slow spatial axis + hxaxis : :obj:`np.ndarray` + False spatial axis pyaxis : :obj:`np.ndarray` Axis of scanning variable :math:`p_y` of parametric curve pxaxis : :obj:`np.ndarray` diff --git a/pylops/signalprocessing/shift.py b/pylops/signalprocessing/shift.py index b7d17ed7..c94b878e 100644 --- a/pylops/signalprocessing/shift.py +++ b/pylops/signalprocessing/shift.py @@ -4,11 +4,11 @@ import numpy as np import numpy.typing as npt -from numpy.core.multiarray import normalize_axis_index from pylops.basicoperators import Diagonal from pylops.signalprocessing import FFT from pylops.utils._internal import _value_or_sized_to_array +from pylops.utils.backend import get_normalize_axis_index from pylops.utils.typing import DTypeLike @@ -113,7 +113,7 @@ def Shift( Sop = Diagonal(shift, dims=dimsdiag, axis=axis, dtype=Fop.cdtype) else: # add dimensions to shift to match dimensions of model and data - axis = normalize_axis_index(axis, len(dims)) + axis = get_normalize_axis_index()(axis, len(dims)) fdims = np.ones(shift.ndim + 1, dtype=int) fdims[axis] = Fop.f.size f = Fop.f.reshape(fdims) diff --git a/pylops/utils/backend.py b/pylops/utils/backend.py index 50da7faa..31836265 100644 --- a/pylops/utils/backend.py +++ b/pylops/utils/backend.py @@ -2,6 +2,7 @@ "get_module", "get_module_name", "get_array_module", + "get_normalize_axis_index", "get_convolve", "get_fftconvolve", "get_oaconvolve", @@ -16,8 +17,10 @@ "get_sp_fft", "get_complex_dtype", "get_real_dtype", + "to_cupy", "to_numpy", "to_cupy_conditional", + "to_numpy_conditional", "inplace_set", "inplace_add", "inplace_multiply", @@ -59,6 +62,14 @@ from jax.scipy.signal import convolve as j_convolve from jax.scipy.signal import fftconvolve as j_fftconvolve +# need to check numpy version since the namespace of normalize_axis_index +# changed from numpy>=2.0.0 +np_version = np.__version__.split(".") +if int(np_version[0]) > 1: + from numpy.lib.array_utils import normalize_axis_index +else: + from numpy.core.multiarray import normalize_axis_index + def get_module(backend: str = "numpy") -> ModuleType: """Returns correct numerical module based on backend string @@ -138,6 +149,18 @@ def get_array_module(x: npt.ArrayLike) -> ModuleType: return np +def get_normalize_axis_index() -> Callable: + """Returns correct normalize_axis_index module based on numpy version + + Returns + ------- + f : :obj:`func` + Function to be used to process array + + """ + return normalize_axis_index + + def get_convolve(x: npt.ArrayLike) -> Callable: """Returns correct convolve module based on input @@ -463,6 +486,26 @@ def get_real_dtype(dtype: DTypeLike) -> DTypeLike: return np.real(np.ones(1, dtype)).dtype +def to_cupy(x: NDArray) -> NDArray: + """Convert x to cupy array + + Parameters + ---------- + x : :obj:`numpy.ndarray` or :obj:`cupy.ndarray` + Array to evaluate + + Returns + ------- + x : :obj:`numpy.ndarray` + Converted array + + """ + if deps.cupy_enabled: + if cp.get_array_module(x) == np: + x = cp.asarray(x) + return x + + def to_numpy(x: NDArray) -> NDArray: """Convert x to numpy array @@ -506,6 +549,28 @@ def to_cupy_conditional(x: npt.ArrayLike, y: npt.ArrayLike) -> NDArray: return y +def to_numpy_conditional(x: npt.ArrayLike, y: npt.ArrayLike) -> NDArray: + """Convert y to numpy array conditional to x being a numpy array + + Parameters + ---------- + x : :obj:`numpy.ndarray` or :obj:`cupy.ndarray` + Array to evaluate + y : :obj:`numpy.ndarray` + Array to convert + + Returns + ------- + y : :obj:`cupy.ndarray` + Converted array + + """ + if deps.cupy_enabled: + if cp.get_array_module(x) == np and cp.get_array_module(y) == cp: + y = cp.asnumpy(y) + return y + + def inplace_set(x: npt.ArrayLike, y: npt.ArrayLike, idx: list) -> NDArray: """Perform inplace set based on input diff --git a/pylops/utils/deps.py b/pylops/utils/deps.py index ecf69a95..df4a0d9e 100644 --- a/pylops/utils/deps.py +++ b/pylops/utils/deps.py @@ -10,6 +10,7 @@ "spgl1_enabled", "sympy_enabled", "torch_enabled", + "pytensor_enabled", ] import os @@ -223,6 +224,23 @@ def sympy_import(message: Optional[str] = None) -> str: return sympy_message +def pytensor_import(message: Optional[str] = None) -> str: + if pytensor_enabled: + try: + import_module("pytensor") # noqa: F401 + + pytensor_message = None + except Exception as e: + pytensor_message = f"Failed to import pytensor (error:{e})." + else: + pytensor_message = ( + f"pytensor package not installed. In order to be able to use " + f"{message} run " + f'"pip install pytensor" or "conda install -c conda-forge pytensor".' + ) + return pytensor_message + + # Set package availability booleans # cupy and jax: the package is imported to check everything is working correctly, # if not the package is disabled. We do this here as these libraries are used as drop-in @@ -245,3 +263,4 @@ def sympy_import(message: Optional[str] = None) -> str: spgl1_enabled = util.find_spec("spgl1") is not None sympy_enabled = util.find_spec("sympy") is not None torch_enabled = util.find_spec("torch") is not None +pytensor_enabled = util.find_spec("pytensor") is not None diff --git a/pylops/utils/estimators.py b/pylops/utils/estimators.py index 887fac3f..5e73f686 100644 --- a/pylops/utils/estimators.py +++ b/pylops/utils/estimators.py @@ -87,7 +87,7 @@ def trace_hutchinson( Operator trace. Raises - ------- + ------ ValueError If ``neval`` is smaller than 3. @@ -194,7 +194,7 @@ def trace_hutchpp( Operator trace. Raises - ------- + ------ ValueError If ``neval`` is smaller than 3. @@ -294,7 +294,7 @@ def trace_nahutchpp( Operator trace. Raises - ------- + ------ ValueError If ``neval`` not large enough to accomodate ``c1`` and ``c2``. diff --git a/pylops/utils/seismicevents.py b/pylops/utils/seismicevents.py index 1844c82d..b061734e 100755 --- a/pylops/utils/seismicevents.py +++ b/pylops/utils/seismicevents.py @@ -4,6 +4,7 @@ "parabolic2d", "hyperbolic2d", "linear3d", + "parabolic3d", "hyperbolic3d", ] @@ -13,6 +14,8 @@ import numpy.typing as npt import scipy.signal as filt +from pylops.utils._internal import _value_or_sized_to_array + def _filterdata( d: npt.NDArray, nt: int, wav: npt.ArrayLike, wcenter: int @@ -116,12 +119,9 @@ def linear2d( where :math:`p_{x,i}=\sin( \theta_i)/v` """ - if isinstance(t0, (float, int)): - t0 = (t0,) - if isinstance(theta, (float, int)): - theta = (theta,) - if isinstance(amp, (float, int)): - amp = (amp,) + t0 = _value_or_sized_to_array(t0) + theta = _value_or_sized_to_array(theta) + amp = _value_or_sized_to_array(amp) # identify dimensions dt = t[1] - t[0] @@ -160,7 +160,7 @@ def parabolic2d( r"""Parabolic 2D events Create 2d parabolic events given intercept time, - slowness, curvature, and amplitude of each event + slowness, curvature, and amplitude of each event. Parameters ---------- @@ -196,14 +196,10 @@ def parabolic2d( t_i(x) = t_{0,i} + p_{x,i} x + p_{xx,i} x^2 """ - if isinstance(t0, (float, int)): - t0 = (t0,) - if isinstance(px, (float, int)): - px = (px,) - if isinstance(pxx, (float, int)): - pxx = (pxx,) - if isinstance(amp, (float, int)): - amp = (amp,) + t0 = _value_or_sized_to_array(t0) + px = _value_or_sized_to_array(px) + pxx = _value_or_sized_to_array(pxx) + amp = _value_or_sized_to_array(amp) # identify dimensions dt = t[1] - t[0] @@ -272,12 +268,9 @@ def hyperbolic2d( t_i(x) = \sqrt{t_{0,i}^2 + \frac{x^2}{v_{\text{rms},i}^2}} """ - if isinstance(t0, (float, int)): - t0 = (t0,) - if isinstance(vrms, (float, int)): - vrms = (vrms,) - if isinstance(amp, (float, int)): - amp = (amp,) + t0 = _value_or_sized_to_array(t0) + vrms = _value_or_sized_to_array(vrms) + amp = _value_or_sized_to_array(amp) # identify dimensions dt = t[1] - t[0] @@ -330,7 +323,7 @@ def linear3d( v : :obj:`float` propagation velocity t0 : :obj:`tuple` or :obj:`float` - intercept time at :math:`x=0` of each linear event + intercept time at :math:`x=0` and :math:`y=0` of each linear event theta : :obj:`tuple` or :obj:`float` angle in x direction (in degrees) of each linear event phi : :obj:`tuple` or :obj:`float` @@ -360,14 +353,10 @@ def linear3d( and :math:`p_{x,i}=\frac{1}{v} \sin( \theta_i)\sin( \phi_i)`. """ - if isinstance(t0, (float, int)): - t0 = (t0,) - if isinstance(theta, (float, int)): - theta = (theta,) - if isinstance(phi, (float, int)): - phi = (phi,) - if isinstance(amp, (float, int)): - amp = (amp,) + t0 = _value_or_sized_to_array(t0) + theta = _value_or_sized_to_array(theta) + phi = _value_or_sized_to_array(phi) + amp = _value_or_sized_to_array(amp) # identify dimensions dt = t[1] - t[0] @@ -397,6 +386,100 @@ def linear3d( return d, dwav +def parabolic3d( + x: npt.NDArray, + y: npt.NDArray, + t: npt.NDArray, + t0: Union[float, Tuple[float]], + px: Union[float, Tuple[float]], + py: Union[float, Tuple[float]], + pxx: Union[float, Tuple[float]], + pyy: Union[float, Tuple[float]], + amp: Union[float, Tuple[float]], + wav: npt.NDArray, +) -> Tuple[npt.NDArray, npt.NDArray]: + r"""Parabolic 3D events + + Create 3d parabolic events given intercept time, + slowness, curvature, and amplitude of each event. + + Parameters + ---------- + x : :obj:`numpy.ndarray` + space axis in x direction + y : :obj:`numpy.ndarray` + space axis in y direction + t : :obj:`numpy.ndarray` + time axis + t0 : :obj:`tuple` or :obj:`float` + intercept time at :math:`x=0` and :math:`y=0` of each parabolic event + px : :obj:`tuple` or :obj:`float` + slowness of each parabolic event in x direction + py : :obj:`tuple` or :obj:`float` + slowness of each parabolic event in y direction + pxx : :obj:`tuple` or :obj:`float` + curvature of each parabolic event + amp : :obj:`tuple` or :obj:`float` + amplitude of each linear event + wav : :obj:`numpy.ndarray` + wavelet to be applied to data + + Returns + ------- + d : :obj:`numpy.ndarray` + data without wavelet of size + :math:`[n_y \times n_x \times n_t]` + dwav : :obj:`numpy.ndarray` + data with wavelet of size + :math:`[n_y \times n_x \times n_t]` + + Notes + ----- + Each event is created using the following relation: + + .. math:: + t_i(x, y) = t_{0,i} + p_{x,i} x + p_{y,i} x + p_{xx,i} x^2 + p_{yy,i} y^2 + + """ + t0 = _value_or_sized_to_array(t0) + px = _value_or_sized_to_array(px) + py = _value_or_sized_to_array(py) + pxx = _value_or_sized_to_array(pxx) + pyy = _value_or_sized_to_array(pyy) + amp = _value_or_sized_to_array(amp) + + # identify dimensions + dt = t[1] - t[0] + wcenter = int(len(wav) / 2) + nx = np.size(x) + ny = np.size(y) + nt = np.size(t) + wcenter + nevents = np.size(t0) + + # create events + d = np.zeros((ny, nx, nt)) + for ievent in range(nevents): + for iy in range(ny): + tevent = ( + t0[ievent] + + px[ievent] * x + + py[ievent] * y[iy] + + pxx[ievent] * x**2 + + pyy[ievent] * y[iy] ** 2 + ) + tevent = (tevent - t[0]) / dt + itevent = tevent.astype(int) + dtevent = tevent - itevent + for ix in range(nx): + if itevent[ix] < nt - 1 and itevent[ix] >= 0: + d[iy, ix, itevent[ix]] += amp[ievent] * (1 - dtevent[ix]) + d[iy, ix, itevent[ix] + 1] += amp[ievent] * dtevent[ix] + + # filter events with certain wavelet + d, dwav = _filterdata(d, nt, wav, wcenter) + return d, dwav + + def hyperbolic3d( x: npt.NDArray, y: npt.NDArray, @@ -451,14 +534,10 @@ def hyperbolic3d( simply control the curvature of the hyperboloid along the spatial axes. """ - if isinstance(t0, (float, int)): - t0 = (t0,) - if isinstance(vrms_x, (float, int)): - vrms_x = (vrms_x,) - if isinstance(vrms_y, (float, int)): - vrms_y = (vrms_y,) - if isinstance(amp, (float, int)): - amp = (amp,) + t0 = _value_or_sized_to_array(t0) + vrms_x = _value_or_sized_to_array(vrms_x) + vrms_y = _value_or_sized_to_array(vrms_y) + amp = _value_or_sized_to_array(amp) # identify dimensions dt = t[1] - t[0] diff --git a/pylops/waveeqprocessing/marchenko.py b/pylops/waveeqprocessing/marchenko.py index bf8e3305..d810c70b 100644 --- a/pylops/waveeqprocessing/marchenko.py +++ b/pylops/waveeqprocessing/marchenko.py @@ -301,7 +301,7 @@ def apply_onepoint( greens: bool = False, dottest: bool = False, usematmul: bool = False, - **kwargs_solver + **kwargs_solver, ) -> Union[ Tuple[NDArray, NDArray, NDArray, NDArray, NDArray], Tuple[NDArray, NDArray, NDArray, NDArray], @@ -341,7 +341,7 @@ def apply_onepoint( for numpy and cupy `data`, respectively) Returns - ---------- + ------- f1_inv_minus : :obj:`numpy.ndarray` Inverted upgoing focusing function of size :math:`[n_r \times n_t]` f1_inv_plus : :obj:`numpy.ndarray` @@ -473,7 +473,7 @@ def apply_onepoint( Mop, d.ravel(), x0=self.ncp.zeros(2 * (2 * self.nt - 1) * self.nr, dtype=self.dtype), - **kwargs_solver + **kwargs_solver, )[0] f1_inv = f1_inv.reshape(2 * self.nt2, self.nr) @@ -486,8 +486,9 @@ def apply_onepoint( # Create Green's functions g_inv = Gop * f1_inv_tot.ravel() g_inv = g_inv.reshape(2 * self.nt2, self.ns) - g_inv_minus, g_inv_plus = -g_inv[: self.nt2].T, np.fliplr( - g_inv[self.nt2 :].T + g_inv_minus, g_inv_plus = ( + -g_inv[: self.nt2].T, + np.fliplr(g_inv[self.nt2 :].T), ) if rtm and greens: return f1_inv_minus, f1_inv_plus, p0_minus, g_inv_minus, g_inv_plus @@ -507,7 +508,7 @@ def apply_multiplepoints( greens: bool = False, dottest: bool = False, usematmul: bool = False, - **kwargs_solver + **kwargs_solver, ) -> Union[ Tuple[NDArray, NDArray, NDArray, NDArray, NDArray], Tuple[NDArray, NDArray, NDArray, NDArray], @@ -548,7 +549,7 @@ def apply_multiplepoints( for numpy and cupy `data`, respectively) Returns - ---------- + ------- f1_inv_minus : :obj:`numpy.ndarray` Inverted upgoing focusing function of size :math:`[n_r \times n_{vs} \times n_t]` @@ -695,7 +696,7 @@ def apply_multiplepoints( x0=self.ncp.zeros( 2 * (2 * self.nt - 1) * self.nr * nvs, dtype=self.dtype ), - **kwargs_solver + **kwargs_solver, )[0] f1_inv = f1_inv.reshape(2 * self.nt2, self.nr, nvs) diff --git a/pytests/test_basicoperators.py b/pytests/test_basicoperators.py index 8f3f0528..f12c8c68 100755 --- a/pytests/test_basicoperators.py +++ b/pytests/test_basicoperators.py @@ -16,6 +16,7 @@ Roll, Sum, Symmetrize, + ToCupy, Zero, ) from pylops.utils import dottest @@ -601,3 +602,18 @@ def test_Conj(par): assert_array_equal(x, xadj) assert_array_equal(y, np.conj(x)) assert_array_equal(xadj, np.conj(y)) + + +@pytest.mark.parametrize("par", [(par1), (par2), (par1j), (par2j), (par3)]) +def test_ToCupy(par): + """Forward and adjoint for ToCupy operator (checking that it works also + when cupy is not available) + """ + Top = ToCupy(par["nx"], dtype=par["dtype"]) + + np.random.seed(10) + x = np.random.randn(par["nx"]) + par["imag"] * np.random.randn(par["nx"]) + y = Top * x + xadj = Top.H * y + assert_array_equal(x, xadj) + assert_array_equal(y, x) diff --git a/pytests/test_fourierradon.py b/pytests/test_fourierradon.py new file mode 100755 index 00000000..accbf257 --- /dev/null +++ b/pytests/test_fourierradon.py @@ -0,0 +1,135 @@ +import numpy as np +import pytest +from numpy.testing import assert_array_almost_equal + +from pylops.optimization.sparsity import fista +from pylops.signalprocessing import FourierRadon2D, FourierRadon3D +from pylops.utils import dottest + +par1 = { + "nt": 11, + "nhx": 21, + "nhy": 10, + "npx": 21, + "npy": 17, + "pymax": 1e-2, + "pxmax": 2e-2, + "centeredh": True, + "kind": "linear", + "interp": True, + "engine": "numpy", +} # linear, numpy +par2 = { + "nt": 11, + "nhx": 21, + "nhy": 10, + "npx": 21, + "npy": 17, + "pymax": 1e-2, + "pxmax": 2e-2, + "kind": "linear", + "engine": "numba", +} # linear, numba +par3 = { + "nt": 11, + "nhx": 21, + "nhy": 10, + "npx": 21, + "npy": 17, + "pymax": 8e-3, + "pxmax": 7e-3, + "kind": "parabolic", + "engine": "numpy", +} # parabolic, numpy +par4 = { + "nt": 11, + "nhx": 21, + "nhy": 10, + "npx": 21, + "npy": 17, + "pymax": 8e-3, + "pxmax": 7e-3, + "kind": "parabolic", + "engine": "numba", +} # parabolic, numba + + +def test_unknown_engine2D(): + """Check error is raised if unknown engine is passed""" + with pytest.raises(NotImplementedError): + _ = FourierRadon2D(None, None, None, None, engine="foo") + + +def test_unknown_engine3D(): + """Check error is raised if unknown engine is passed""" + with pytest.raises(NotImplementedError): + _ = FourierRadon3D(None, None, None, None, None, None, engine="foo") + + +@pytest.mark.parametrize("par", [(par1), (par2), (par3), (par4)]) +def test_FourierRadon2D(par): + """Dot-test and sparse inverse for FourierRadon2D operator""" + dt, dh = 0.005, 1 + t = np.arange(par["nt"]) * dt + h = np.arange(par["nhx"]) * dh + px = np.linspace(0, par["pxmax"], par["npx"]) + nfft = int(2 ** np.ceil(np.log2(par["nt"]))) + + x = np.zeros((par["npx"], par["nt"])) + x[2, par["nt"] // 2] = 1 + + Rop = FourierRadon2D( + t, + h, + px, + nfft, + kind=par["kind"], + engine=par["engine"], + dtype="float64", + ) + assert dottest(Rop, par["nhx"] * par["nt"], par["npx"] * par["nt"], rtol=1e-3) + + y = Rop * x.ravel() + + if par["engine"] == "numba": # as numpy is too slow here... + xinv, _, _ = fista(Rop, y, niter=200, eps=3e0) + assert_array_almost_equal(x.ravel(), xinv, decimal=1) + + +@pytest.mark.parametrize("par", [(par1), (par2), (par3), (par4)]) +def test_FourierRadon3D(par): + """Dot-test and sparse inverse for FourierRadon3D operator""" + dt, dhy, dhx = 0.005, 1, 1 + t = np.arange(par["nt"]) * dt + hy = np.arange(par["nhy"]) * dhy + hx = np.arange(par["nhx"]) * dhx + py = np.linspace(0, par["pymax"], par["npy"]) + px = np.linspace(0, par["pxmax"], par["npx"]) + nfft = int(2 ** np.ceil(np.log2(par["nt"]))) + + x = np.zeros((par["npy"], par["npx"], par["nt"])) + x[3, 2, par["nt"] // 2] = 1 + + Rop = FourierRadon3D( + t, + hy, + hx, + py, + px, + nfft, + kind=(par["kind"], par["kind"]), + engine=par["engine"], + dtype="float64", + ) + assert dottest( + Rop, + par["nhy"] * par["nhx"] * par["nt"], + par["npy"] * par["npx"] * par["nt"], + rtol=1e-3, + ) + + y = Rop * x.ravel() + + if par["engine"] == "numba": # as numpy is too slow here... + xinv, _, _ = fista(Rop, y, niter=200, eps=1e1) + assert_array_almost_equal(x.ravel(), xinv, decimal=1) diff --git a/pytests/test_oneway.py b/pytests/test_oneway.py index 48f73a9e..1f1b936c 100755 --- a/pytests/test_oneway.py +++ b/pytests/test_oneway.py @@ -96,7 +96,7 @@ def test_PhaseShift_3dsignal(par): @pytest.mark.parametrize("par", [(par1), (par2), (par1v), (par2v)]) def test_Deghosting_2dsignal(par, create_data2D): """Deghosting of 2d data""" - p2d, p2d_minus = create_data2D(1 if par["kind"] is "p" else -1) + p2d, p2d_minus = create_data2D(1 if par["kind"] == "p" else -1) p2d_minus_inv, p2d_plus_inv = Deghosting( p2d, @@ -111,7 +111,7 @@ def test_Deghosting_2dsignal(par, create_data2D): npad=0, ntaper=0, dtype=par["dtype"], - **dict(damp=1e-10, iter_lim=60) + **dict(damp=1e-10, iter_lim=60), ) assert np.linalg.norm(p2d_minus_inv - p2d_minus) / np.linalg.norm(p2d_minus) < 3e-1 diff --git a/pytests/test_pytensoroperator.py b/pytests/test_pytensoroperator.py new file mode 100755 index 00000000..9a59bc88 --- /dev/null +++ b/pytests/test_pytensoroperator.py @@ -0,0 +1,51 @@ +import numpy as np +import pytensor +import pytest +from numpy.testing import assert_array_equal + +from pylops import MatrixMult, PyTensorOperator + +par1 = {"ny": 11, "nx": 11, "dtype": np.float32} # square +par2 = {"ny": 21, "nx": 11, "dtype": np.float32} # overdetermined + +np.random.seed(0) +rng = np.random.default_rng() + + +@pytest.mark.parametrize("par", [(par1)]) +def test_PyTensorOperator(par): + """Verify output and gradient of PyTensor function obtained from a LinearOperator.""" + Dop = MatrixMult(np.random.normal(0.0, 1.0, (par["ny"], par["nx"]))) + pytensor_op = PyTensorOperator(Dop) + + # Check gradient + inp = np.random.randn(*pytensor_op.dims) + pytensor.gradient.verify_grad(pytensor_op, (inp,), rng=rng) + + # Check value + x = pytensor.tensor.dvector() + f = pytensor.function([x], pytensor_op(x)) + out = f(inp) + assert_array_equal(out, Dop @ inp) + + +@pytest.mark.parametrize("par", [(par1)]) +def test_PyTensorOperator_nd(par): + """Verify output and gradient of PyTensor function obtained from a LinearOperator + using an ND-array.""" + otherdims = rng.choice(range(1, 3), size=rng.choice(range(2, 8))) + Dop = MatrixMult( + np.random.normal(0.0, 1.0, (par["ny"], par["nx"])), otherdims=otherdims + ) + pytensor_op = PyTensorOperator(Dop) + + # Check gradient + inp = np.random.randn(*pytensor_op.dims) + pytensor.gradient.verify_grad(pytensor_op, (inp,), rng=rng) + + # Check value + tensor = pytensor.tensor.TensorType(dtype="float64", shape=(None,) * inp.ndim) + x = tensor() + f = pytensor.function([x], pytensor_op(x)) + out = f(inp) + assert_array_equal(out, Dop @ inp) diff --git a/pytests/test_radon.py b/pytests/test_radon.py index cc4b8054..26d08ba1 100755 --- a/pytests/test_radon.py +++ b/pytests/test_radon.py @@ -1,5 +1,3 @@ -import multiprocessing - import numpy as np import pytest from numpy.testing import assert_array_almost_equal @@ -176,63 +174,59 @@ def test_Radon2D(par): "par", [(par1), (par2), (par3), (par4), (par5), (par6), (par7), (par8)] ) def test_Radon3D(par): - """Dot-test, forward and adjoint consistency check + """Dot-test, forward and adjoint consistency check (for onthefly parameter), and sparse inverse for Radon3D operator """ - if ( - par["engine"] == "numpy" or multiprocessing.cpu_count() >= 4 - ): # avoid timeout in travis for numba - - dt, dhy, dhx = 0.005, 1, 1 - t = np.arange(par["nt"]) * dt - hy = np.arange(par["nhy"]) * dhy - hx = np.arange(par["nhx"]) * dhx - py = np.linspace(0, par["pymax"], par["npy"]) - px = np.linspace(0, par["pxmax"], par["npx"]) - x = np.zeros((par["npy"], par["npx"], par["nt"])) - x[3, 2, par["nt"] // 2] = 1 - - Rop = Radon3D( - t, - hy, - hx, - py, - px, - centeredh=par["centeredh"], - interp=par["interp"], - kind=par["kind"], - onthefly=False, - engine=par["engine"], - dtype="float64", - ) - R1op = Radon3D( - t, - hy, - hx, - py, - px, - centeredh=par["centeredh"], - interp=par["interp"], - kind=par["kind"], - onthefly=True, - engine=par["engine"], - dtype="float64", - ) - - assert dottest( - Rop, - par["nhy"] * par["nhx"] * par["nt"], - par["npy"] * par["npx"] * par["nt"], - rtol=1e-3, - ) - y = Rop * x.ravel() - y1 = R1op * x.ravel() - assert_array_almost_equal(y, y1, decimal=4) - - xadj = Rop.H * y - xadj1 = R1op.H * y - assert_array_almost_equal(xadj, xadj1, decimal=4) - - if Rop.engine == "numba": # as numpy is too slow here... - xinv, _, _ = fista(Rop, y, niter=200, eps=3e0) - assert_array_almost_equal(x.ravel(), xinv, decimal=1) + dt, dhy, dhx = 0.005, 1, 1 + t = np.arange(par["nt"]) * dt + hy = np.arange(par["nhy"]) * dhy + hx = np.arange(par["nhx"]) * dhx + py = np.linspace(0, par["pymax"], par["npy"]) + px = np.linspace(0, par["pxmax"], par["npx"]) + x = np.zeros((par["npy"], par["npx"], par["nt"])) + x[3, 2, par["nt"] // 2] = 1 + + Rop = Radon3D( + t, + hy, + hx, + py, + px, + centeredh=par["centeredh"], + interp=par["interp"], + kind=par["kind"], + onthefly=False, + engine=par["engine"], + dtype="float64", + ) + R1op = Radon3D( + t, + hy, + hx, + py, + px, + centeredh=par["centeredh"], + interp=par["interp"], + kind=par["kind"], + onthefly=True, + engine=par["engine"], + dtype="float64", + ) + + assert dottest( + Rop, + par["nhy"] * par["nhx"] * par["nt"], + par["npy"] * par["npx"] * par["nt"], + rtol=1e-3, + ) + y = Rop * x.ravel() + y1 = R1op * x.ravel() + assert_array_almost_equal(y, y1, decimal=4) + + xadj = Rop.H * y + xadj1 = R1op.H * y + assert_array_almost_equal(xadj, xadj1, decimal=4) + + if Rop.engine == "numba": # as numpy is too slow here... + xinv, _, _ = fista(Rop, y, niter=200, eps=3e0) + assert_array_almost_equal(x.ravel(), xinv, decimal=1) diff --git a/requirements-dev.txt b/requirements-dev.txt index 6ce1fb00..8eb9f87d 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -1,7 +1,5 @@ -numpy>=1.21.0 +numpy>=1.21.0,<2 scipy>=1.11.0 ---extra-index-url https://download.pytorch.org/whl/cpu -torch>=1.2.0 jax numba pyfftw @@ -30,3 +28,4 @@ isort black flake8 mypy +pytensor diff --git a/requirements-doc.txt b/requirements-doc.txt index 74fea77d..e13ad06e 100644 --- a/requirements-doc.txt +++ b/requirements-doc.txt @@ -1,7 +1,9 @@ # Currently we force rdt to use numpy<2.0.0 to build the documentation -# since the dtcwt and spgl1 are not yet compatible with numpy=2.0.0 +# since the dtcwt is not yet compatible with numpy=2.0.0. For the +# same reason, we force devito==4.8.7 as later versions of devito +# require numpy>=2.0.0 numpy>=1.21.0,<2.0.0 -scipy>=1.11.0 +scipy>=1.11.0,<1.13 jax --extra-index-url https://download.pytorch.org/whl/cpu torch>=1.2.0 @@ -11,7 +13,7 @@ PyWavelets spgl1 scikit-fmm sympy -devito +devito==4.8.6 dtcwt matplotlib ipython @@ -32,3 +34,5 @@ isort black flake8 mypy +pytensor +pymc diff --git a/requirements-torch.txt b/requirements-torch.txt new file mode 100644 index 00000000..3f94f19f --- /dev/null +++ b/requirements-torch.txt @@ -0,0 +1,2 @@ +--index-url https://download.pytorch.org/whl/cpu +torch>=1.2.0,<2.5 diff --git a/tutorials/ilsm.py b/tutorials/ilsm.py index 1f394bfc..8759efe2 100755 --- a/tutorials/ilsm.py +++ b/tutorials/ilsm.py @@ -164,14 +164,14 @@ dpsf[ns // 2, :, :].T, extent=(rx[0], rx[-1], t[-1], t[0]), cmap="gray", - vmin=-200, - vmax=200, + vmin=-1e1, + vmax=1e1, ) axs[0].axis("tight") axs[0].set_xlabel("x [m]"), axs[0].set_ylabel("t [m]") axs[0].set_title(r"$d_{psf}$") axs[1].imshow( - mmigpsf.T, cmap="gray", extent=(x[0], x[-1], z[-1], z[0]), vmin=-200, vmax=200 + mmigpsf.T, cmap="gray", extent=(x[0], x[-1], z[-1], z[0]), vmin=-5e0, vmax=5e0 ) axs[1].scatter(Psfx.ravel() * dx, Psfz.ravel() * dz, c="r") axs[1].set_xlabel("x [m]"), axs[1].set_ylabel("z [m]") @@ -196,16 +196,16 @@ axs[0].imshow( psfs[:, 0].reshape(len(psfx) * psfsize[0], psfsize[1]).T, cmap="gray", - vmin=-200, - vmax=200, + vmin=-5e0, + vmax=5e0, ) axs[0].set_title(r"$m_{psf}$ iz=0") axs[0].axis("tight") axs[1].imshow( psfs[:, 1].reshape(len(psfx) * psfsize[0], psfsize[1]).T, cmap="gray", - vmin=-200, - vmax=200, + vmin=-5e0, + vmax=5e0, ) axs[1].set_title(r"$m_{psf}$ iz=1") axs[1].axis("tight") @@ -219,12 +219,12 @@ fig, axs = plt.subplots(1, 2, figsize=(10, 5)) axs[0].imshow( - mmig.T, cmap="gray", extent=(x[0], x[-1], z[-1], z[0]), vmin=-1e3, vmax=1e3 + mmig.T, cmap="gray", extent=(x[0], x[-1], z[-1], z[0]), vmin=-5e1, vmax=5e1 ) axs[0].set_title(r"$m_{mig}$") axs[0].axis("tight") axs[1].imshow( - mmigpsf.T, cmap="gray", extent=(x[0], x[-1], z[-1], z[0]), vmin=-1e3, vmax=1e3 + mmigpsf.T, cmap="gray", extent=(x[0], x[-1], z[-1], z[0]), vmin=-5e1, vmax=5e1 ) axs[1].set_title(r"$m_{mig, psf}$") axs[1].axis("tight") @@ -235,13 +235,17 @@ # reflectivity using the :py:func:`pylops.optimization.sparsity.fista` solver. minv, _, resnorm = pylops.optimization.sparsity.fista( - Cop, mmig.ravel(), eps=1e5, niter=100, eigsdict=dict(niter=5, tol=1e-2), show=True + Cop, mmig.ravel(), eps=1e2, niter=100, eigsdict=dict(niter=5, tol=1e-2), show=True ) minv = minv.reshape(nx, nz) fig, axs = plt.subplots(1, 2, figsize=(10, 5)) axs[0].imshow( - mmig.T, cmap="gray", extent=(x[0], x[-1], z[-1], z[0]), vmin=-500, vmax=500 + mmig.T, + cmap="gray", + extent=(x[0], x[-1], z[-1], z[0]), + vmin=-5e1, + vmax=5e1, ) axs[0].set_title(r"$m_{mig}$") axs[0].axis("tight") diff --git a/tutorials/torchop.py b/tutorials/torchop.py index 9e73d7b3..a18ff2ef 100755 --- a/tutorials/torchop.py +++ b/tutorials/torchop.py @@ -14,6 +14,7 @@ modelling operators. """ + import matplotlib.pyplot as plt import numpy as np import torch @@ -30,24 +31,24 @@ # In this example we consider a simple multidimensional functional: # # .. math:: -# \mathbf{y} = \mathbf{A} sin(\mathbf{x}) +# \mathbf{y} = \mathbf{A} \sin(\mathbf{x}) # # and we use AD to compute the gradient with respect to the input vector # evaluated at :math:`\mathbf{x}=\mathbf{x}_0` : -# :math:`\mathbf{g} = d\mathbf{y} / d\mathbf{x} |_{\mathbf{x}=\mathbf{x}_0}`. +# :math:`\mathbf{g} = \partial\mathbf{y} / \partial\mathbf{x} |_{\mathbf{x}=\mathbf{x}_0}`. # # Let's start by defining the Jacobian: # # .. math:: # \textbf{J} = \begin{bmatrix} -# dy_1 / dx_1 & ... & dy_1 / dx_M \\ -# ... & ... & ... \\ -# dy_N / dx_1 & ... & dy_N / dx_M +# \frac{\partial y_1}{\partial x_1} & \cdots & \frac{\partial y_1}{\partial x_M} \\ +# \vdots & \ddots & \vdots \\ +# \frac{\partial y_N}{\partial x_1} & \cdots & \frac{\partial y_N}{\partial x_M} # \end{bmatrix} = \begin{bmatrix} -# a_{11} cos(x_1) & ... & a_{1M} cos(x_M) \\ -# ... & ... & ... \\ -# a_{N1} cos(x_1) & ... & a_{NM} cos(x_M) -# \end{bmatrix} = \textbf{A} cos(\mathbf{x}) +# a_{11} \cos(x_1) & \cdots & a_{1M} \cos(x_M) \\ +# \vdots & \ddots & \vdots \\ +# a_{N1} \cos(x_1) & \cdots & a_{NM} \cos(x_M) +# \end{bmatrix} = \textbf{A} \cos(\mathbf{x}) # # Since both input and output are multidimensional, # PyTorch ``backward`` actually computes the product between the transposed