From 0456f26a56e76dff61eee833fc1c948054224ee7 Mon Sep 17 00:00:00 2001 From: Tommy Odland Date: Fri, 24 Nov 2023 08:22:17 +0100 Subject: [PATCH] added test for ESMDA vs. SIES with one iteration --- src/iterative_ensemble_smoother/esmda.py | 18 ++++------ tests/test_esmda.py | 46 +++++++++++++++++++++++- 2 files changed, 52 insertions(+), 12 deletions(-) diff --git a/src/iterative_ensemble_smoother/esmda.py b/src/iterative_ensemble_smoother/esmda.py index e69d0a6d..a72dafc9 100644 --- a/src/iterative_ensemble_smoother/esmda.py +++ b/src/iterative_ensemble_smoother/esmda.py @@ -35,6 +35,7 @@ inversion_subspace, normalize_alpha, ) +from iterative_ensemble_smoother.utils import sample_mvnormal class BaseESMDA(ABC): @@ -115,18 +116,13 @@ def perturb_observations( # a zero cented normal means that y := L @ z, where z ~ norm(0, 1). # Therefore, scaling C_D by alpha is equivalent to scaling L with sqrt(alpha). - D: npt.NDArray[np.double] - # Two cases, depending on whether C_D was given as 1D or 2D array - if self.C_D.ndim == 2: - D = self.observations.reshape(-1, 1) + np.sqrt( - alpha - ) * self.C_D_L @ self.rng.normal(size=size) - else: - D = self.observations.reshape(-1, 1) + np.sqrt(alpha) * self.rng.normal( - size=size - ) * self.C_D_L.reshape(-1, 1) - assert D.shape == size + D: npt.NDArray[np.double] + # TODO: what if we have cov = [1, 2, 3] and we mask? + # we must pick out correct indices. 'size' is not enough... + D = self.observations.reshape(-1, 1) + np.sqrt(alpha) * sample_mvnormal( + C_dd_cholesky=self.C_D_L, rng=self.rng, size=size[1] + ) return D diff --git a/tests/test_esmda.py b/tests/test_esmda.py index 3fe83c0b..faa53e90 100644 --- a/tests/test_esmda.py +++ b/tests/test_esmda.py @@ -25,9 +25,53 @@ from iterative_ensemble_smoother.esmda import ESMDA from iterative_ensemble_smoother.esmda_inversion import empirical_cross_covariance +from iterative_ensemble_smoother.sies import SIES class TestESMDA: + @pytest.mark.parametrize("num_inputs", [10, 25, 50]) + @pytest.mark.parametrize("num_outputs", [5, 25, 50]) + @pytest.mark.parametrize("sies_inversion", ["direct", "subspace_exact"]) + @pytest.mark.parametrize("seed", list(range(1))) + def test_that_ESMDA_and_SIES_produce_same_result_with_one_step( + self, seed, sies_inversion, num_outputs, num_inputs + ): + """With a single step (alpha=1), ESMDA = SIES = ES. + + When num_inputs < num_ensemble - 1, then Section (2.4.3) in the SIES + paper triggers and the result is not identical. + """ + rng = np.random.default_rng(seed) + + num_ensemble = 10 + alpha = 1 + + # Create problem instance + X = rng.normal(size=(num_inputs, num_ensemble)) + Y = rng.normal(size=(num_outputs, num_ensemble)) + covariance = np.exp(rng.normal(size=num_outputs)) + observations = rng.normal(size=num_outputs, loc=1) + + # Create ESMDA instance and perform one iteration + esmda = ESMDA(covariance, observations, alpha=alpha, seed=0, inversion="exact") + X_ESMDA = np.copy(X) + for _ in range(esmda.num_assimilations()): + X_ESMDA = esmda.assimilate(X_ESMDA, Y) + + # Create SIES instance and perform one iteration + sies = SIES( + parameters=X, + covariance=covariance, + observations=observations, + inversion=sies_inversion, + truncation=1.0, + seed=0, + ) + + X_SIES = sies.sies_iteration(responses=Y, step_length=1) + + assert np.allclose(X_ESMDA, X_SIES) + @pytest.mark.parametrize("seed", list(range(10))) @pytest.mark.parametrize("inversion", ["exact", "subspace"]) def test_that_diagonal_covariance_gives_same_answer_as_dense(self, seed, inversion): @@ -450,6 +494,6 @@ def g(X): args=[ __file__, "-v", - "-k test_row_by_row_assimilation", + "-k test_that_ESMDA_and_SIES_produce_same_result_with_one_step", ] )