From f80f7cb4b6cbc53ace09399fc64cb8e821b741e7 Mon Sep 17 00:00:00 2001 From: rgjini <88095587+rgjini@users.noreply.github.com> Date: Wed, 16 Oct 2024 10:27:58 -0700 Subject: [PATCH 1/9] GNKI doc source and edits --- docs/make.jl | 1 + docs/src/defaults.md | 9 +++++++++ docs/src/gauss_newton_kalman_inversion.md | 3 +++ docs/src/index.md | 1 + 4 files changed, 14 insertions(+) create mode 100644 docs/src/gauss_newton_kalman_inversion.md diff --git a/docs/make.jl b/docs/make.jl index 0abd2ca36..7074768b0 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -77,6 +77,7 @@ pages = [ "Examples" => examples, "List of default configurations" => "defaults.md", "Ensemble Kalman Inversion" => "ensemble_kalman_inversion.md", + "Gauss Newton Kalman Inversion" => "gauss_newton_kalman_inversion.md" "Ensemble Kalman Sampler" => "ensemble_kalman_sampler.md", "Unscented Kalman Inversion" => "unscented_kalman_inversion.md", "Learning rate schedulers" => "learning_rate_scheduler.md", diff --git a/docs/src/defaults.md b/docs/src/defaults.md index 8519a0a9d..af4bbb554 100644 --- a/docs/src/defaults.md +++ b/docs/src/defaults.md @@ -69,6 +69,15 @@ failure_handler_method = SampleSuccGauss() accelerator = DefaultAccelerator() ``` +## `process <: GaussNewtonInversion` +Process documentation [here](@ref gnki) +```julia +scheduler = DataMisfitController(terminate_at = 1) +localization_method = Localizers.SECNice() +failure_handler_method = SampleSuccGauss() +accelerator = NesterovAccelerator() +``` + ## `process <: Sampler` Process documentation [here](@ref eks) ```julia diff --git a/docs/src/gauss_newton_kalman_inversion.md b/docs/src/gauss_newton_kalman_inversion.md new file mode 100644 index 000000000..2896c8620 --- /dev/null +++ b/docs/src/gauss_newton_kalman_inversion.md @@ -0,0 +1,3 @@ +# [Gauss Newton Kalman INversion](@id gnki) + +### What Is It and What Does It Do? \ No newline at end of file diff --git a/docs/src/index.md b/docs/src/index.md index 30bc81266..ae82dd964 100644 --- a/docs/src/index.md +++ b/docs/src/index.md @@ -5,6 +5,7 @@ Currently, the following methods are implemented in the library: - Ensemble Kalman Inversion (EKI) - The traditional optimization technique based on the (perturbed-observation-based) Ensemble Kalman Filter EnKF ([Iglesias, Law, Stuart, 2013](http://dx.doi.org/10.1088/0266-5611/29/4/045001)), - Ensemble Transform Kalman Inversion (ETKI) - An optimization technique based on the (square-root-based) ensemble transform Kalman filter ([Bishop et al., 2001](http://doi.org/10.1175/1520-0493(2001)129<0420:ASWTET>2.0.CO;2), [Huang et al., 2022](http://doi.org/10.1088/1361-6420/ac99fa) ) + - Gauss Newton Kalman Inversion (GNKI) [a.k.a. Iterative Ensemble Kalman Filter with Satistical Linearization] - (description here) - Ensemble Kalman Sampler (EKS) - also obtains a Gaussian Approximation of the posterior distribution, through a Monte Carlo integration ([Garbuno-Inigo, Hoffmann, Li, Stuart, 2020](https://doi.org/10.1137/19M1251655)), - Unscented Kalman Inversion (UKI) - also obtains a Gaussian Approximation of the posterior distribution, through a quadrature based integration approach ([Huang, Schneider, Stuart, 2022](https://doi.org/10.1016/j.jcp.2022.111262)), - Sparsity-inducing Ensemble Kalman Inversion (SEKI) - Additionally adds approximate ``L^0`` and ``L^1`` penalization to the EKI ([Schneider, Stuart, Wu, 2020](https://doi.org/10.48550/arXiv.2007.06175)). From 1993a166e125b46b055c1833646bd30ece8aa0c4 Mon Sep 17 00:00:00 2001 From: rgjini <88095587+rgjini@users.noreply.github.com> Date: Wed, 16 Oct 2024 10:38:50 -0700 Subject: [PATCH 2/9] fix bug in make.jl --- docs/make.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/make.jl b/docs/make.jl index 7074768b0..f8272d426 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -77,7 +77,7 @@ pages = [ "Examples" => examples, "List of default configurations" => "defaults.md", "Ensemble Kalman Inversion" => "ensemble_kalman_inversion.md", - "Gauss Newton Kalman Inversion" => "gauss_newton_kalman_inversion.md" + "Gauss Newton Kalman Inversion" => "gauss_newton_kalman_inversion.md", "Ensemble Kalman Sampler" => "ensemble_kalman_sampler.md", "Unscented Kalman Inversion" => "unscented_kalman_inversion.md", "Learning rate schedulers" => "learning_rate_scheduler.md", From 63f7f536de1f573dd0dda615d5eb6d67a91a40e7 Mon Sep 17 00:00:00 2001 From: rgjini <88095587+rgjini@users.noreply.github.com> Date: Thu, 21 Nov 2024 17:20:20 -0800 Subject: [PATCH 3/9] GNKI docs with short description and algorithm described --- docs/src/gauss_newton_kalman_inversion.md | 58 ++++++++++++++++++++++- 1 file changed, 56 insertions(+), 2 deletions(-) diff --git a/docs/src/gauss_newton_kalman_inversion.md b/docs/src/gauss_newton_kalman_inversion.md index 2896c8620..374f5f4aa 100644 --- a/docs/src/gauss_newton_kalman_inversion.md +++ b/docs/src/gauss_newton_kalman_inversion.md @@ -1,3 +1,57 @@ -# [Gauss Newton Kalman INversion](@id gnki) +# [Gauss Newton Kalman Inversion](@id gnki) + +### What Is It and What Does It Do? +Gauss Netwon Kalman Inversion (GNKI) ([Chada et al, 2020](https://arxiv.org/pdf/2010.13299)), also known as the Iterative Ensemble Kalman Filter with Statistical Linearization, is a derivative-free ensemble optimizaton method based on the Gauss Newton optimization update and the Iterative Extended Kalman Filter (IExKF) ([Jazwinski, 2007]). In the linear case and continuous limit, GNKI recovers the true posterior mean and covariance. Empirically, GNKI performs well as an optimization algorithm in the nonlinear case. + +### Problem Formulation + +The data ``y`` and parameter vector ``\theta`` are assumed to be related according to: +```math +\tag{1} y = \mathcal{G}(\theta) + \eta \,, +``` +where ``\mathcal{G}: \mathbb{R}^p \rightarrow \mathbb{R}^d`` denotes the forward map, ``y \in \mathbb{R}^d`` is the vector of observations, and ``\eta`` is the observational noise, which is assumed to be drawn from a ``d``-dimensional Gaussian with distribution ``\mathcal{N}(0, \Gamma_y)``. The objective of the inverse problem is to compute the unknown parameters ``\theta`` given the observations ``y``, the known forward map ``\mathcal{G}``, and noise characteristics ``\eta`` of the process. + +!!! note + GNKI relies on minimizing a loss function that includes regularization. The user must specify a Gaussian prior distribution. See [Prior distributions](@ref parameter-distributions) to see how one can apply flexible constraints while maintaining Gaussian priors. + +The optimal parameters ``\theta^*`` given relation (1) minimize the loss + + ```math +\mathcal{L}(\theta, y) = \langle \mathcal{G}(\theta) - y \, , \, \Gamma_y^{-1} \left ( \mathcal{G}(\theta) - y \right ) \rangle + \langle m - \theta \, , \, \Gamma_{\theta}^{-1} \left ( m - \theta \right ) \rangle, +``` + +where ``m`` is the prior mean and ``\Gamma_{\theta}`` is the prior covariance. + +### Algorithm + +GNKI updates the ``j``-th ensemble member at the ``n``-th iteration by directly approximating the Jacobian with statistics from the ensemble. + +First, the ensemble covariance matrices are computed: +```math +\begin{aligned} + &\mathcal{G}_n^{(j)} = \mathcal{G}(\theta_n^{(j)}) \qquad + \bar{\mathcal{G}}_n = \dfrac{1}{J}\sum_{k=1}^J\mathcal{G}_n^{(k)} \\ + & C^{\theta \mathcal{G}}_n = \dfrac{1}{J - 1}\sum_{k=1}^{J} + (\theta_n^{(k)} - \bar{\theta}_n )(\mathcal{G}_n^{(k)} - \bar{\mathcal{G}}_n)^T \\ + & C^{\theta \theta}_n = \dfrac{1}{J - 1} \sum_{k=1}^{J} + (\theta_n^{(k)} - \bar{\theta}_n )(\theta_n^{(k)} - \bar{\theta}_n )^T. + +\end{aligned} +``` + +Using the ensemble covariance matrices, the update equation from ``n`` to ``n+1`` under GNKI is +```math +\begin{aligned} + & K_n = \Gamma_{\theta} G_n^T \left(G_n \Gamma_{\theta} G_n^T + \Gamma_{y}\right)^{-1} , \qquad G_n = \left(C^{\theta \mathcal{G}}_n\right)^T \left(C^{\theta \theta}_n\right)^{-1} \\ + + & \theta_{n+1}^{(j)} = \theta_n^{(j)} + \alpha \left\{ K_n\left(y_n^{(j)} - \mathcal{G}(\theta_n^{(j)})\right) + \left(I - K_n G_n\right)\left(m_n^{(j)} - \theta_n^{(j)}\right) \right\}, +\end{aligned} +``` + +where ``y_n^{(j)} \sim \mathcal{N}(y, 2\alpha^{-1}\Gamma_y)`` and ``m_n^{(j)} \sim \mathcal{N}(m, 2\alpha^{-1}\Gamma_{\theta})``. + +## Creating the EKI Object + +An ensemble Kalman inversion object can be created using the `EnsembleKalmanProcess` constructor by specifying the ` GaussNewtonInversion()` process type. + -### What Is It and What Does It Do? \ No newline at end of file From 0cbf394a7d4e6f9b7292f570cf88b4b97c3d07f1 Mon Sep 17 00:00:00 2001 From: rgjini <88095587+rgjini@users.noreply.github.com> Date: Wed, 16 Oct 2024 10:27:58 -0700 Subject: [PATCH 4/9] GNKI doc source and edits --- docs/make.jl | 1 + docs/src/defaults.md | 9 +++++++++ docs/src/gauss_newton_kalman_inversion.md | 3 +++ docs/src/index.md | 1 + 4 files changed, 14 insertions(+) create mode 100644 docs/src/gauss_newton_kalman_inversion.md diff --git a/docs/make.jl b/docs/make.jl index 4ec2618f4..f256c9105 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -77,6 +77,7 @@ pages = [ "Examples" => examples, "List of default configurations" => "defaults.md", "Ensemble Kalman Inversion" => "ensemble_kalman_inversion.md", + "Gauss Newton Kalman Inversion" => "gauss_newton_kalman_inversion.md" "Ensemble Kalman Sampler" => "ensemble_kalman_sampler.md", "Unscented Kalman Inversion" => "unscented_kalman_inversion.md", "Learning rate schedulers" => "learning_rate_scheduler.md", diff --git a/docs/src/defaults.md b/docs/src/defaults.md index 8519a0a9d..af4bbb554 100644 --- a/docs/src/defaults.md +++ b/docs/src/defaults.md @@ -69,6 +69,15 @@ failure_handler_method = SampleSuccGauss() accelerator = DefaultAccelerator() ``` +## `process <: GaussNewtonInversion` +Process documentation [here](@ref gnki) +```julia +scheduler = DataMisfitController(terminate_at = 1) +localization_method = Localizers.SECNice() +failure_handler_method = SampleSuccGauss() +accelerator = NesterovAccelerator() +``` + ## `process <: Sampler` Process documentation [here](@ref eks) ```julia diff --git a/docs/src/gauss_newton_kalman_inversion.md b/docs/src/gauss_newton_kalman_inversion.md new file mode 100644 index 000000000..2896c8620 --- /dev/null +++ b/docs/src/gauss_newton_kalman_inversion.md @@ -0,0 +1,3 @@ +# [Gauss Newton Kalman INversion](@id gnki) + +### What Is It and What Does It Do? \ No newline at end of file diff --git a/docs/src/index.md b/docs/src/index.md index 30bc81266..ae82dd964 100644 --- a/docs/src/index.md +++ b/docs/src/index.md @@ -5,6 +5,7 @@ Currently, the following methods are implemented in the library: - Ensemble Kalman Inversion (EKI) - The traditional optimization technique based on the (perturbed-observation-based) Ensemble Kalman Filter EnKF ([Iglesias, Law, Stuart, 2013](http://dx.doi.org/10.1088/0266-5611/29/4/045001)), - Ensemble Transform Kalman Inversion (ETKI) - An optimization technique based on the (square-root-based) ensemble transform Kalman filter ([Bishop et al., 2001](http://doi.org/10.1175/1520-0493(2001)129<0420:ASWTET>2.0.CO;2), [Huang et al., 2022](http://doi.org/10.1088/1361-6420/ac99fa) ) + - Gauss Newton Kalman Inversion (GNKI) [a.k.a. Iterative Ensemble Kalman Filter with Satistical Linearization] - (description here) - Ensemble Kalman Sampler (EKS) - also obtains a Gaussian Approximation of the posterior distribution, through a Monte Carlo integration ([Garbuno-Inigo, Hoffmann, Li, Stuart, 2020](https://doi.org/10.1137/19M1251655)), - Unscented Kalman Inversion (UKI) - also obtains a Gaussian Approximation of the posterior distribution, through a quadrature based integration approach ([Huang, Schneider, Stuart, 2022](https://doi.org/10.1016/j.jcp.2022.111262)), - Sparsity-inducing Ensemble Kalman Inversion (SEKI) - Additionally adds approximate ``L^0`` and ``L^1`` penalization to the EKI ([Schneider, Stuart, Wu, 2020](https://doi.org/10.48550/arXiv.2007.06175)). From 5b709086949e643510d25d3648a9cf358745ac8a Mon Sep 17 00:00:00 2001 From: rgjini <88095587+rgjini@users.noreply.github.com> Date: Wed, 16 Oct 2024 10:38:50 -0700 Subject: [PATCH 5/9] fix bug in make.jl --- docs/make.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/make.jl b/docs/make.jl index f256c9105..a0608bd27 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -77,7 +77,7 @@ pages = [ "Examples" => examples, "List of default configurations" => "defaults.md", "Ensemble Kalman Inversion" => "ensemble_kalman_inversion.md", - "Gauss Newton Kalman Inversion" => "gauss_newton_kalman_inversion.md" + "Gauss Newton Kalman Inversion" => "gauss_newton_kalman_inversion.md", "Ensemble Kalman Sampler" => "ensemble_kalman_sampler.md", "Unscented Kalman Inversion" => "unscented_kalman_inversion.md", "Learning rate schedulers" => "learning_rate_scheduler.md", From af546b663711e6d8b9860ed76935f0f1344df4e9 Mon Sep 17 00:00:00 2001 From: rgjini <88095587+rgjini@users.noreply.github.com> Date: Thu, 21 Nov 2024 17:20:20 -0800 Subject: [PATCH 6/9] GNKI docs with short description and algorithm described --- docs/src/gauss_newton_kalman_inversion.md | 58 ++++++++++++++++++++++- 1 file changed, 56 insertions(+), 2 deletions(-) diff --git a/docs/src/gauss_newton_kalman_inversion.md b/docs/src/gauss_newton_kalman_inversion.md index 2896c8620..374f5f4aa 100644 --- a/docs/src/gauss_newton_kalman_inversion.md +++ b/docs/src/gauss_newton_kalman_inversion.md @@ -1,3 +1,57 @@ -# [Gauss Newton Kalman INversion](@id gnki) +# [Gauss Newton Kalman Inversion](@id gnki) + +### What Is It and What Does It Do? +Gauss Netwon Kalman Inversion (GNKI) ([Chada et al, 2020](https://arxiv.org/pdf/2010.13299)), also known as the Iterative Ensemble Kalman Filter with Statistical Linearization, is a derivative-free ensemble optimizaton method based on the Gauss Newton optimization update and the Iterative Extended Kalman Filter (IExKF) ([Jazwinski, 2007]). In the linear case and continuous limit, GNKI recovers the true posterior mean and covariance. Empirically, GNKI performs well as an optimization algorithm in the nonlinear case. + +### Problem Formulation + +The data ``y`` and parameter vector ``\theta`` are assumed to be related according to: +```math +\tag{1} y = \mathcal{G}(\theta) + \eta \,, +``` +where ``\mathcal{G}: \mathbb{R}^p \rightarrow \mathbb{R}^d`` denotes the forward map, ``y \in \mathbb{R}^d`` is the vector of observations, and ``\eta`` is the observational noise, which is assumed to be drawn from a ``d``-dimensional Gaussian with distribution ``\mathcal{N}(0, \Gamma_y)``. The objective of the inverse problem is to compute the unknown parameters ``\theta`` given the observations ``y``, the known forward map ``\mathcal{G}``, and noise characteristics ``\eta`` of the process. + +!!! note + GNKI relies on minimizing a loss function that includes regularization. The user must specify a Gaussian prior distribution. See [Prior distributions](@ref parameter-distributions) to see how one can apply flexible constraints while maintaining Gaussian priors. + +The optimal parameters ``\theta^*`` given relation (1) minimize the loss + + ```math +\mathcal{L}(\theta, y) = \langle \mathcal{G}(\theta) - y \, , \, \Gamma_y^{-1} \left ( \mathcal{G}(\theta) - y \right ) \rangle + \langle m - \theta \, , \, \Gamma_{\theta}^{-1} \left ( m - \theta \right ) \rangle, +``` + +where ``m`` is the prior mean and ``\Gamma_{\theta}`` is the prior covariance. + +### Algorithm + +GNKI updates the ``j``-th ensemble member at the ``n``-th iteration by directly approximating the Jacobian with statistics from the ensemble. + +First, the ensemble covariance matrices are computed: +```math +\begin{aligned} + &\mathcal{G}_n^{(j)} = \mathcal{G}(\theta_n^{(j)}) \qquad + \bar{\mathcal{G}}_n = \dfrac{1}{J}\sum_{k=1}^J\mathcal{G}_n^{(k)} \\ + & C^{\theta \mathcal{G}}_n = \dfrac{1}{J - 1}\sum_{k=1}^{J} + (\theta_n^{(k)} - \bar{\theta}_n )(\mathcal{G}_n^{(k)} - \bar{\mathcal{G}}_n)^T \\ + & C^{\theta \theta}_n = \dfrac{1}{J - 1} \sum_{k=1}^{J} + (\theta_n^{(k)} - \bar{\theta}_n )(\theta_n^{(k)} - \bar{\theta}_n )^T. + +\end{aligned} +``` + +Using the ensemble covariance matrices, the update equation from ``n`` to ``n+1`` under GNKI is +```math +\begin{aligned} + & K_n = \Gamma_{\theta} G_n^T \left(G_n \Gamma_{\theta} G_n^T + \Gamma_{y}\right)^{-1} , \qquad G_n = \left(C^{\theta \mathcal{G}}_n\right)^T \left(C^{\theta \theta}_n\right)^{-1} \\ + + & \theta_{n+1}^{(j)} = \theta_n^{(j)} + \alpha \left\{ K_n\left(y_n^{(j)} - \mathcal{G}(\theta_n^{(j)})\right) + \left(I - K_n G_n\right)\left(m_n^{(j)} - \theta_n^{(j)}\right) \right\}, +\end{aligned} +``` + +where ``y_n^{(j)} \sim \mathcal{N}(y, 2\alpha^{-1}\Gamma_y)`` and ``m_n^{(j)} \sim \mathcal{N}(m, 2\alpha^{-1}\Gamma_{\theta})``. + +## Creating the EKI Object + +An ensemble Kalman inversion object can be created using the `EnsembleKalmanProcess` constructor by specifying the ` GaussNewtonInversion()` process type. + -### What Is It and What Does It Do? \ No newline at end of file From 1e1b25c5c69c94a2d686214430ce494f931fe87a Mon Sep 17 00:00:00 2001 From: rgjini <88095587+rgjini@users.noreply.github.com> Date: Wed, 18 Dec 2024 16:52:31 -0500 Subject: [PATCH 7/9] fixing up GNKI documentation formatting and making a couple small additions --- docs/src/gauss_newton_kalman_inversion.md | 18 +++++++++++++----- docs/src/index.md | 4 ++-- 2 files changed, 15 insertions(+), 7 deletions(-) diff --git a/docs/src/gauss_newton_kalman_inversion.md b/docs/src/gauss_newton_kalman_inversion.md index 374f5f4aa..55cefb8a0 100644 --- a/docs/src/gauss_newton_kalman_inversion.md +++ b/docs/src/gauss_newton_kalman_inversion.md @@ -1,7 +1,7 @@ # [Gauss Newton Kalman Inversion](@id gnki) ### What Is It and What Does It Do? -Gauss Netwon Kalman Inversion (GNKI) ([Chada et al, 2020](https://arxiv.org/pdf/2010.13299)), also known as the Iterative Ensemble Kalman Filter with Statistical Linearization, is a derivative-free ensemble optimizaton method based on the Gauss Newton optimization update and the Iterative Extended Kalman Filter (IExKF) ([Jazwinski, 2007]). In the linear case and continuous limit, GNKI recovers the true posterior mean and covariance. Empirically, GNKI performs well as an optimization algorithm in the nonlinear case. +Gauss Netwon Kalman Inversion (GNKI) ([Chada et al, 2020](https://doi.org/10.48550/arXiv.2010.13299)), also known as the Iterative Ensemble Kalman Filter with Statistical Linearization, is a derivative-free ensemble optimizaton method based on the Gauss Newton optimization update and the Iterative Extended Kalman Filter (IExKF) ([Jazwinski, 1970](https://books.google.com/books?hl=en&lr=&id=4AqL3vE2J-sC&oi=fnd&pg=PP1&ots=434RD37EaN&sig=MhbgcFsSpqf3UsgqWybtnhBkVDU#v=onepage&q&f=false)). In the linear case and continuous limit, GNKI recovers the true posterior mean and covariance. Empirically, GNKI performs well as an optimization algorithm in the nonlinear case. ### Problem Formulation @@ -12,7 +12,7 @@ The data ``y`` and parameter vector ``\theta`` are assumed to be related accordi where ``\mathcal{G}: \mathbb{R}^p \rightarrow \mathbb{R}^d`` denotes the forward map, ``y \in \mathbb{R}^d`` is the vector of observations, and ``\eta`` is the observational noise, which is assumed to be drawn from a ``d``-dimensional Gaussian with distribution ``\mathcal{N}(0, \Gamma_y)``. The objective of the inverse problem is to compute the unknown parameters ``\theta`` given the observations ``y``, the known forward map ``\mathcal{G}``, and noise characteristics ``\eta`` of the process. !!! note - GNKI relies on minimizing a loss function that includes regularization. The user must specify a Gaussian prior distribution. See [Prior distributions](@ref parameter-distributions) to see how one can apply flexible constraints while maintaining Gaussian priors. + GNKI relies on minimizing a loss function that includes regularization. The user must specify a Gaussian prior with distribution ``\mathcal{N}(m, \Gamma_{\theta})``. See [Prior distributions](@ref parameter-distributions) to see how one can apply flexible constraints while maintaining Gaussian priors. The optimal parameters ``\theta^*`` given relation (1) minimize the loss @@ -42,9 +42,15 @@ First, the ensemble covariance matrices are computed: Using the ensemble covariance matrices, the update equation from ``n`` to ``n+1`` under GNKI is ```math \begin{aligned} - & K_n = \Gamma_{\theta} G_n^T \left(G_n \Gamma_{\theta} G_n^T + \Gamma_{y}\right)^{-1} , \qquad G_n = \left(C^{\theta \mathcal{G}}_n\right)^T \left(C^{\theta \theta}_n\right)^{-1} \\ + & \theta_{n+1}^{(j)} = \theta_n^{(j)} + \alpha \left\{ K_n\left(y_n^{(j)} - \mathcal{G}(\theta_n^{(j)})\right) + \left(I - K_n G_n\right)\left(m_n^{(j)} - \theta_n^{(j)}\right) \right\} \\ + + & \\ + + & K_n = \Gamma_{\theta} G_n^T \left(G_n \Gamma_{\theta} G_n^T + \Gamma_{y}\right)^{-1} \\ + + & G_n = \left(C^{\theta \mathcal{G}}_n\right)^T \left(C^{\theta \theta}_n\right)^{-1}, + - & \theta_{n+1}^{(j)} = \theta_n^{(j)} + \alpha \left\{ K_n\left(y_n^{(j)} - \mathcal{G}(\theta_n^{(j)})\right) + \left(I - K_n G_n\right)\left(m_n^{(j)} - \theta_n^{(j)}\right) \right\}, \end{aligned} ``` @@ -52,6 +58,8 @@ where ``y_n^{(j)} \sim \mathcal{N}(y, 2\alpha^{-1}\Gamma_y)`` and ``m_n^{(j)} \s ## Creating the EKI Object -An ensemble Kalman inversion object can be created using the `EnsembleKalmanProcess` constructor by specifying the ` GaussNewtonInversion()` process type. +We first build a prior distribution (for details of the construction see [here](@ref constrained-gaussian)). +Then we build our EKP object with `EnsembleKalmanProcess(args..., GaussNewtonInversion(prior); kwargs...)`. For general EKP object creation requirements see [Creating the EKI object](@ref eki). To make updates using the inversion algorithm see [Updating the Ensemble](@ref eki). + diff --git a/docs/src/index.md b/docs/src/index.md index ae82dd964..2e42a50c8 100644 --- a/docs/src/index.md +++ b/docs/src/index.md @@ -4,8 +4,8 @@ Currently, the following methods are implemented in the library: - Ensemble Kalman Inversion (EKI) - The traditional optimization technique based on the (perturbed-observation-based) Ensemble Kalman Filter EnKF ([Iglesias, Law, Stuart, 2013](http://dx.doi.org/10.1088/0266-5611/29/4/045001)), - - Ensemble Transform Kalman Inversion (ETKI) - An optimization technique based on the (square-root-based) ensemble transform Kalman filter ([Bishop et al., 2001](http://doi.org/10.1175/1520-0493(2001)129<0420:ASWTET>2.0.CO;2), [Huang et al., 2022](http://doi.org/10.1088/1361-6420/ac99fa) ) - - Gauss Newton Kalman Inversion (GNKI) [a.k.a. Iterative Ensemble Kalman Filter with Satistical Linearization] - (description here) + - Ensemble Transform Kalman Inversion (ETKI) - An optimization technique based on the (square-root-based) ensemble transform Kalman filter ([Bishop et al., 2001](http://doi.org/10.1175/1520-0493(2001)129<0420:ASWTET>2.0.CO;2), [Huang et al., 2022](http://doi.org/10.1088/1361-6420/ac99fa)), + - Gauss Newton Kalman Inversion (GNKI) [a.k.a. Iterative Ensemble Kalman Filter with Satistical Linearization] - An optimization technique based on the Gauss Newton optimization update and the iterative extended Kalman filter ([Chada et al, 2020](https://doi.org/10.48550/arXiv.2010.13299)), - Ensemble Kalman Sampler (EKS) - also obtains a Gaussian Approximation of the posterior distribution, through a Monte Carlo integration ([Garbuno-Inigo, Hoffmann, Li, Stuart, 2020](https://doi.org/10.1137/19M1251655)), - Unscented Kalman Inversion (UKI) - also obtains a Gaussian Approximation of the posterior distribution, through a quadrature based integration approach ([Huang, Schneider, Stuart, 2022](https://doi.org/10.1016/j.jcp.2022.111262)), - Sparsity-inducing Ensemble Kalman Inversion (SEKI) - Additionally adds approximate ``L^0`` and ``L^1`` penalization to the EKI ([Schneider, Stuart, Wu, 2020](https://doi.org/10.48550/arXiv.2007.06175)). From 3644a3426a75559ee8b65fc00777c1dea455c316 Mon Sep 17 00:00:00 2001 From: rgjini <88095587+rgjini@users.noreply.github.com> Date: Thu, 19 Dec 2024 15:33:57 -0500 Subject: [PATCH 8/9] fixed code block --- docs/src/gauss_newton_kalman_inversion.md | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/docs/src/gauss_newton_kalman_inversion.md b/docs/src/gauss_newton_kalman_inversion.md index 55cefb8a0..9582903c1 100644 --- a/docs/src/gauss_newton_kalman_inversion.md +++ b/docs/src/gauss_newton_kalman_inversion.md @@ -59,7 +59,13 @@ where ``y_n^{(j)} \sim \mathcal{N}(y, 2\alpha^{-1}\Gamma_y)`` and ``m_n^{(j)} \s ## Creating the EKI Object We first build a prior distribution (for details of the construction see [here](@ref constrained-gaussian)). -Then we build our EKP object with `EnsembleKalmanProcess(args..., GaussNewtonInversion(prior); kwargs...)`. For general EKP object creation requirements see [Creating the EKI object](@ref eki). To make updates using the inversion algorithm see [Updating the Ensemble](@ref eki). +Then we build our EKP object with +```julia +using EnsembleKalmanProcesses + +gnkiobj = EnsembleKalmanProcess(args..., GaussNewtonInversion(prior); kwargs...) +``` +For general EKP object creation requirements see [Creating the EKI object](@ref eki). To make updates using the inversion algorithm see [Updating the Ensemble](@ref eki). From df4e866cd4a752dfc199dad4f30db1d40c9e46c3 Mon Sep 17 00:00:00 2001 From: rgjini <88095587+rgjini@users.noreply.github.com> Date: Thu, 19 Dec 2024 17:32:24 -0500 Subject: [PATCH 9/9] added an additional reference and fixed typo --- docs/src/gauss_newton_kalman_inversion.md | 2 +- docs/src/index.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/src/gauss_newton_kalman_inversion.md b/docs/src/gauss_newton_kalman_inversion.md index 9582903c1..dcf220651 100644 --- a/docs/src/gauss_newton_kalman_inversion.md +++ b/docs/src/gauss_newton_kalman_inversion.md @@ -1,7 +1,7 @@ # [Gauss Newton Kalman Inversion](@id gnki) ### What Is It and What Does It Do? -Gauss Netwon Kalman Inversion (GNKI) ([Chada et al, 2020](https://doi.org/10.48550/arXiv.2010.13299)), also known as the Iterative Ensemble Kalman Filter with Statistical Linearization, is a derivative-free ensemble optimizaton method based on the Gauss Newton optimization update and the Iterative Extended Kalman Filter (IExKF) ([Jazwinski, 1970](https://books.google.com/books?hl=en&lr=&id=4AqL3vE2J-sC&oi=fnd&pg=PP1&ots=434RD37EaN&sig=MhbgcFsSpqf3UsgqWybtnhBkVDU#v=onepage&q&f=false)). In the linear case and continuous limit, GNKI recovers the true posterior mean and covariance. Empirically, GNKI performs well as an optimization algorithm in the nonlinear case. +Gauss Netwon Kalman Inversion (GNKI) ([Chada et al., 2021](https://doi.org/10.48550/arXiv.2010.13299), [Chen & Oliver, 2013](https://doi.org/10.1007/s10596-013-9351-5)), also known as the Iterative Ensemble Kalman Filter with Statistical Linearization, is a derivative-free ensemble optimizaton method based on the Gauss Newton optimization update and the Iterative Extended Kalman Filter (IExKF) ([Jazwinski, 1970](https://books.google.com/books?hl=en&lr=&id=4AqL3vE2J-sC&oi=fnd&pg=PP1&ots=434RD37EaN&sig=MhbgcFsSpqf3UsgqWybtnhBkVDU#v=onepage&q&f=false)). In the linear case and continuous limit, GNKI recovers the true posterior mean and covariance. Empirically, GNKI performs well as an optimization algorithm in the nonlinear case. ### Problem Formulation diff --git a/docs/src/index.md b/docs/src/index.md index 2e42a50c8..7dfb97eb4 100644 --- a/docs/src/index.md +++ b/docs/src/index.md @@ -5,7 +5,7 @@ Currently, the following methods are implemented in the library: - Ensemble Kalman Inversion (EKI) - The traditional optimization technique based on the (perturbed-observation-based) Ensemble Kalman Filter EnKF ([Iglesias, Law, Stuart, 2013](http://dx.doi.org/10.1088/0266-5611/29/4/045001)), - Ensemble Transform Kalman Inversion (ETKI) - An optimization technique based on the (square-root-based) ensemble transform Kalman filter ([Bishop et al., 2001](http://doi.org/10.1175/1520-0493(2001)129<0420:ASWTET>2.0.CO;2), [Huang et al., 2022](http://doi.org/10.1088/1361-6420/ac99fa)), - - Gauss Newton Kalman Inversion (GNKI) [a.k.a. Iterative Ensemble Kalman Filter with Satistical Linearization] - An optimization technique based on the Gauss Newton optimization update and the iterative extended Kalman filter ([Chada et al, 2020](https://doi.org/10.48550/arXiv.2010.13299)), + - Gauss Newton Kalman Inversion (GNKI) [a.k.a. Iterative Ensemble Kalman Filter with Satistical Linearization] - An optimization technique based on the Gauss Newton optimization update and the iterative extended Kalman filter ([Chada et al., 2021](https://doi.org/10.48550/arXiv.2010.13299), [Chen & Oliver, 2013](https://doi.org/10.1007/s10596-013-9351-5)), - Ensemble Kalman Sampler (EKS) - also obtains a Gaussian Approximation of the posterior distribution, through a Monte Carlo integration ([Garbuno-Inigo, Hoffmann, Li, Stuart, 2020](https://doi.org/10.1137/19M1251655)), - Unscented Kalman Inversion (UKI) - also obtains a Gaussian Approximation of the posterior distribution, through a quadrature based integration approach ([Huang, Schneider, Stuart, 2022](https://doi.org/10.1016/j.jcp.2022.111262)), - Sparsity-inducing Ensemble Kalman Inversion (SEKI) - Additionally adds approximate ``L^0`` and ``L^1`` penalization to the EKI ([Schneider, Stuart, Wu, 2020](https://doi.org/10.48550/arXiv.2007.06175)).