Skip to content

Commit

Permalink
updated docstrings
Browse files Browse the repository at this point in the history
  • Loading branch information
tommyod committed Oct 19, 2023
1 parent 7a9c3e7 commit 5a77c23
Show file tree
Hide file tree
Showing 3 changed files with 30 additions and 28 deletions.
46 changes: 26 additions & 20 deletions src/iterative_ensemble_smoother/esmda.py
Original file line number Diff line number Diff line change
Expand Up @@ -171,17 +171,20 @@ def assimilate(
) -> npt.NDArray[np.double]:
"""Assimilate data and return an updated ensemble X_posterior.
num_parameters, ensemble_size
Parameters
----------
X : np.ndarray
2D array of shape (num_parameters, ensemble_size).
A 2D array of shape (num_parameters, ensemble_size). Each row
corresponds to a parameter in the model, and each column corresponds
to an ensemble member (realization).
Y : np.ndarray
2D array of shape (num_parameters, ensemble_size).
2D array of shape (num_parameters, ensemble_size), containing
responses when evaluating the model at X. In other words, Y = g(X),
where g is the forward model.
overwrite : bool
If True, then arguments X and Y may be overwritten.
If False, then the method will not permute inputs in any way.
If True, then arguments X and Y may be overwritten and mutated.
If False, then the method will not mutate inputs in any way.
Setting this to True might save memory.
truncation : float
How large a fraction of the singular values to keep in the inversion
routine. Must be a float in the range (0, 1]. A lower number means
Expand Down Expand Up @@ -254,24 +257,26 @@ def get_K(
The purpose of this method is to facilitate row-by-row, or batch-by-batch,
updates of X. This is useful if X is too large to fit in memory.
Parameters
----------
Y : npt.NDArray[np.double]
DESCRIPTION.
* : TYPE
DESCRIPTION.
Y : np.ndarray
2D array of shape (num_parameters, ensemble_size), containing
responses when evaluating the model at X. In other words, Y = g(X),
where g is the forward model.
alpha : float
DESCRIPTION.
truncation : float, optional
DESCRIPTION. The default is 1.0.
: TYPE
DESCRIPTION.
The covariance inflation factor. The sequence of alphas should
obey the equation sum_i (1/alpha_i) = 1. However, this is NOT enforced
in this method call. The user/caller is responsible for this.
truncation : float
How large a fraction of the singular values to keep in the inversion
routine. Must be a float in the range (0, 1]. A lower number means
a more approximate answer and a slightly faster computation.
Returns
-------
None.
K : np.ndarray
A matrix K such that X_posterior = X_prior + X_prior @ K.
It has shape (num_ensemble_members, num_ensemble_members).
"""

D = self.get_D(size=Y.shape, alpha=alpha)
Expand All @@ -292,13 +297,14 @@ def get_D(self, *, size: Tuple[int, int], alpha: float) -> npt.NDArray[np.double
In the Emerick (2013) paper, the matrix D is defined in section 6.
See section 2(b) of the ES-MDA algorithm in the paper.
Parameters
----------
size : Tuple[int, int]
The size, a tuple with (num_observations, ensemble_size).
alpha : float
The inflation factor for the covariance.
The covariance inflation factor. The sequence of alphas should
obey the equation sum_i (1/alpha_i) = 1. However, this is NOT enforced
in this method call. The user/caller is responsible for this.
Returns
-------
Expand Down
8 changes: 2 additions & 6 deletions tests/test_esmda.py
Original file line number Diff line number Diff line change
Expand Up @@ -405,15 +405,12 @@ def g(X):
seed=1,
)
X = np.copy(X_prior)
print(X_prior)
for iteration in range(smoother.num_assimilations()):
X = smoother.assimilate(X, g(X))
print(X)

X_posterior_highlevel_API = np.copy(X)

# =========== Use the low-level level API ===========
print("# =========== Use the low-level level API ===========")
smoother = ESMDA(
covariance=covariance,
observations=observations,
Expand All @@ -422,16 +419,15 @@ def g(X):
seed=1,
)
X = np.copy(X_prior)
print(X_prior)
for alpha_i in smoother.alpha:
K = smoother.get_K(Y=g(X), alpha=alpha_i)

# TODO: Why is this equivalent?
# TODO: Why is this equivalent? ...
X_centered = X - np.mean(X, axis=1, keepdims=True)
assert np.allclose(X_centered @ K, X @ K)

# Here we could loop over each row in X and multiply by K
X += X_centered @ K
print(X)

X_posterior_lowlevel_API = np.copy(X)

Expand Down
4 changes: 2 additions & 2 deletions tests/test_esmda_inversion.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,8 +68,8 @@ def test_that_returning_K_is_equivalent_to_full_computation(
ans = function(alpha=alpha, C_D=C_D, D=D, Y=Y, X=X)
K = function(alpha=alpha, C_D=C_D, D=D, Y=Y, X=None, return_K=True)

X_centered = X - np.mean(X, axis=1, keepdims=True)
assert np.allclose(X + ans, X + X_centered @ K)
X - np.mean(X, axis=1, keepdims=True)
assert np.allclose(X + ans, X + X @ K)

@pytest.mark.parametrize("length", list(range(1, 101, 5)))
def test_that_the_sum_of_normalize_alpha_is_one(self, length):
Expand Down

0 comments on commit 5a77c23

Please sign in to comment.