Skip to content

Commit

Permalink
Moved FCValidation and LMValidation into experimental
Browse files Browse the repository at this point in the history
  • Loading branch information
mbaudin47 committed Jun 20, 2024
1 parent 04813b6 commit 493a150
Show file tree
Hide file tree
Showing 14 changed files with 42 additions and 36 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -96,12 +96,12 @@ public:
const Sample &outputSample,
const Basis &basis,
const Point &shift,
LinearModelResult &linearModelResult);
LinearModelResult & linearModelResultOut);

BoxCoxTransform buildWithLM(const Sample &inputSample,
const Sample &outputSample,
const Point &shift,
LinearModelResult &linearModelResult);
LinearModelResult & linearModelResultOut);

/** Optimization solver accessor */
OptimizationAlgorithm getOptimizationAlgorithm() const;
Expand Down
3 changes: 2 additions & 1 deletion python/doc/pyplots/FunctionalChaosValidation.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import openturns as ot
from openturns.viewer import View
from openturns.usecases import ishigami_function
import openturns.experimental as otexp

im = ishigami_function.IshigamiModel()
sampleSize = 500
Expand All @@ -20,6 +21,6 @@
)
chaosalgo.run()
result = chaosalgo.getResult()
validation = ot.FunctionalChaosValidation(result)
validation = otexp.FunctionalChaosValidation(result)
graph = validation.drawValidation()
View(graph)
3 changes: 2 additions & 1 deletion python/doc/pyplots/LinearModelValidation.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import openturns as ot
from openturns.viewer import View
import openturns.experimental as otexp

func = ot.SymbolicFunction(
['x1', 'x2', 'x3'],
Expand All @@ -16,6 +17,6 @@
algo.run()
result = algo.getResult()
splitter = ot.LeaveOneOutSplitter(sampleSize)
validation = ot.LinearModelValidation(result, splitter)
validation = otexp.LinearModelValidation(result, splitter)
graph = validation.drawValidation()
View(graph)
8 changes: 4 additions & 4 deletions python/doc/theory/meta_modeling/cross_validation.rst
Original file line number Diff line number Diff line change
Expand Up @@ -635,16 +635,16 @@ The generic cross-validation method can be implemented using the following class
to split the data set.

Since the :class:`~openturns.LinearModelResult` is based on linear least
squares, fast methods are implemented in the :class:`~openturns.LinearModelValidation`.
squares, fast methods are implemented in the :class:`~openturns.experimental.LinearModelValidation`.

See :ref:`pce_cross_validation` and :class:`~openturns.FunctionalChaosValidation`
See :ref:`pce_cross_validation` and :class:`~openturns.experimental.FunctionalChaosValidation`
for specific methods for the the cross-validation of a polynomial chaos expansion.

.. topic:: API:

- See :class:`~openturns.MetaModelValidation`
- See :class:`~openturns.LinearModelValidation`
- See :class:`~openturns.FunctionalChaosValidation`
- See :class:`~openturns.experimental.LinearModelValidation`
- See :class:`~openturns.experimental.FunctionalChaosValidation`
- See :class:`~openturns.KFoldSplitter`
- See :class:`~openturns.LeaveOneOutSplitter`

Expand Down
4 changes: 2 additions & 2 deletions python/doc/theory/meta_modeling/pce_cross_validation.rst
Original file line number Diff line number Diff line change
Expand Up @@ -52,11 +52,11 @@ then the fast methods presented in :ref:`cross_validation` can be applied:
- the fast leave-one-out cross-validation,
- the fast K-Fold cross-validation.

Fast methods are implemented in :class:`~openturns.FunctionalChaosValidation`.
Fast methods are implemented in :class:`~openturns.experimental.FunctionalChaosValidation`.

.. topic:: API:

- See :class:`~openturns.FunctionalChaosValidation`
- See :class:`~openturns.experimental.FunctionalChaosValidation`

.. topic:: References:

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ Results
FunctionalChaosSobolIndices

:template: classWithPlot.rst_t
FunctionalChaosValidation
experimental.FunctionalChaosValidation

Functional chaos on fields
==========================
Expand Down
2 changes: 1 addition & 1 deletion python/doc/user_manual/response_surface/lm.rst
Original file line number Diff line number Diff line change
Expand Up @@ -26,4 +26,4 @@ Post-processing

:template: classWithPlot.rst_t

LinearModelValidation
experimental.LinearModelValidation
4 changes: 2 additions & 2 deletions python/src/BoxCoxFactory.i
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
// SWIG file BoxCoxFactory.i

// do not pass argument by reference, return it as tuple item
%typemap(in, numinputs=0) OT::LinearModelResult & linearModelResult ($*ltype temp) %{ temp = OT::LinearModelResult(); $1 = &temp; %}
%typemap(argout) OT::LinearModelResult & linearModelResult %{ $result = SWIG_Python_AppendOutput($result, SWIG_NewPointerObj(new OT::LinearModelResult(*$1), SWIGTYPE_p_OT__LinearModelResult, SWIG_POINTER_OWN | 0 )); %}
%typemap(in, numinputs=0) OT::LinearModelResult & linearModelResultOut ($*ltype temp) %{ temp = OT::LinearModelResult(); $1 = &temp; %}
%typemap(argout) OT::LinearModelResult & linearModelResultOut %{ $result = SWIG_Python_AppendOutput($result, SWIG_NewPointerObj(new OT::LinearModelResult(*$1), SWIGTYPE_p_OT__LinearModelResult, SWIG_POINTER_OWN | 0 )); %}

%typemap(in, numinputs=0) OT::GeneralLinearModelResult & generalLinearModelResult ($*ltype temp) %{ temp = OT::GeneralLinearModelResult(); $1 = &temp; %}
%typemap(argout) OT::GeneralLinearModelResult & generalLinearModelResult %{ $result = SWIG_Python_AppendOutput($result, SWIG_NewPointerObj(new OT::GeneralLinearModelResult(*$1), SWIGTYPE_p_OT__GeneralLinearModelResult, SWIG_POINTER_OWN | 0 )); %}
Expand Down
13 changes: 7 additions & 6 deletions python/src/FunctionalChaosValidation_doc.i.in
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ conditions are met.
If model selection is involved, the naive methods based on the
:class:`~openturns.LeaveOneOutSplitter` and :class:`~openturns.KFoldSplitter`
classes can be used, but this can be much slower than the
analytical methods implemented in the :class:`~openturns.FunctionalChaosValidation`
analytical methods implemented in the :class:`~openturns.experimental.FunctionalChaosValidation`
class.
In many cases, however, the order of magnitude of the estimate from the
analytical formula applied to a sparse model is correct: the estimate of
Expand All @@ -72,15 +72,15 @@ the :math:`i`-th prediction is the prediction of the linear model
trained using the hold-out sample where the :math:`i`-th observation
was removed.
This produces a sample of residuals which can be retrieved using
the :class:`~openturns.FunctionalChaosValidation.getResidualSample` method.
The :class:`~openturns.FunctionalChaosValidation.drawValidation` performs
the :class:`~openturns.experimental.FunctionalChaosValidation.getResidualSample` method.
The :class:`~openturns.experimental.FunctionalChaosValidation.drawValidation` performs
similarly.

If the weights of the observations are not equal, the analytical method
may not necessarily provide an accurate estimator of the mean squared error (MSE).
This is because LOO and K-Fold cross-validation do not take the weights
into account.
Since the :class:`~openturns.FunctionalChaosResult` object does not know
Since the :class:`~openturns.experimental.FunctionalChaosResult` object does not know
if the weights are equal, no exception can be generated.

If the sample was not produced from Monte Carlo, then the leave-one-out
Expand All @@ -106,6 +106,7 @@ prevents us from using the fast analytical formulas and get an
accurate estimator of the MSE.

>>> import openturns as ot
>>> import openturns.experimental as otexp
>>> from math import pi
>>> from openturns.usecases import ishigami_function
>>> im = ishigami_function.IshigamiModel()
Expand All @@ -128,7 +129,7 @@ accurate estimator of the MSE.
Validate the polynomial chaos expansion using leave-one-out cross-validation.

>>> splitterLOO = ot.LeaveOneOutSplitter(sampleSize)
>>> validation = ot.FunctionalChaosValidation(result, splitterLOO)
>>> validation = otexp.FunctionalChaosValidation(result, splitterLOO)
>>> r2Score = validation.computeR2Score()
>>> print('R2 = ', r2Score[0])
R2 = 0.99...
Expand All @@ -138,7 +139,7 @@ and set the :math:`k` parameter.

>>> kParameter = 10
>>> splitterKF = ot.KFoldSplitter(sampleSize, kParameter)
>>> validation = ot.FunctionalChaosValidation(
>>> validation = otexp.FunctionalChaosValidation(
... result, splitterKF
... )

Expand Down
15 changes: 8 additions & 7 deletions python/src/LinearModelValidation_doc.i.in
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ cross-validation methods can be used.
If model selection is involved, the naive methods based on the
:class:`~openturns.LeaveOneOutSplitter` and :class:`~openturns.KFoldSplitter`
classes can be used directly, but this can be much slower than the
analytical methods implemented in the :class:`~openturns.LinearModelValidation`
analytical methods implemented in the :class:`~openturns.experimental.LinearModelValidation`
class.
In many cases, however, the order of magnitude of the estimate from the
analytical formula applied to a sparse model is correct: the estimate of
Expand All @@ -64,15 +64,16 @@ the :math:`i`-th prediction is the prediction of the linear model
trained using the hold-out sample where the :math:`i`-th observation
was removed.
This produces a sample of residuals which can be retrieved using
the :class:`~openturns.LinearModelValidation.getResidualSample` method.
The :class:`~openturns.LinearModelValidation.drawValidation` performs
the :class:`~openturns.experimental.LinearModelValidation.getResidualSample` method.
The :class:`~openturns.experimental.LinearModelValidation.drawValidation` performs
similarly.

Examples
--------
Create a linear model.

>>> import openturns as ot
>>> import openturns.experimental as otexp
>>> func = ot.SymbolicFunction(
... ['x1', 'x2', 'x3'],
... ['x1 + x2 + sin(x2 * 2 * pi_) / 5 + 1e-3 * x3^2']
Expand All @@ -88,26 +89,26 @@ Create a linear model.

Validate the linear model using leave-one-out cross-validation.

>>> validation = ot.LinearModelValidation(result)
>>> validation = otexp.LinearModelValidation(result)

We can use a specific cross-validation splitter if needed.

>>> splitterLOO = ot.LeaveOneOutSplitter(sampleSize)
>>> validation = ot.LinearModelValidation(result, splitterLOO)
>>> validation = otexp.LinearModelValidation(result, splitterLOO)
>>> r2Score = validation.computeR2Score()
>>> print('R2 = ', r2Score[0])
R2 = 0.98...

Validate the linear model using K-Fold cross-validation.

>>> splitterKFold = ot.KFoldSplitter(sampleSize)
>>> validation = ot.LinearModelValidation(result, splitterKFold)
>>> validation = otexp.LinearModelValidation(result, splitterKFold)

Validate the linear model using K-Fold cross-validation and set K.

>>> kFoldParameter = 10
>>> splitterKFold = ot.KFoldSplitter(sampleSize, kFoldParameter)
>>> validation = ot.LinearModelValidation(result, splitterKFold)
>>> validation = otexp.LinearModelValidation(result, splitterKFold)

Draw the validation graph.

Expand Down
4 changes: 4 additions & 0 deletions python/src/experimental_module.i
Original file line number Diff line number Diff line change
Expand Up @@ -83,3 +83,7 @@
%include UniformOrderStatistics.i
%include GeneralizedExtremeValueValidation.i
%include GeneralizedParetoValidation.i

/* Uncertainty/Algorithm/Metamodel */
%include FunctionalChaosValidation.i
%include LinearModelValidation.i
2 changes: 0 additions & 2 deletions python/src/metamodel_module.i
Original file line number Diff line number Diff line change
Expand Up @@ -60,14 +60,12 @@
%include FunctionalChaosAlgorithm.i
%include FunctionalChaosSobolIndices.i
%include MetaModelValidation.i
%include FunctionalChaosValidation.i
%include GeneralLinearModelResult.i
%include GeneralLinearModelAlgorithm.i
%include KrigingAlgorithm.i
%include LinearModelStepwiseAlgorithm.i
%include LinearModelAlgorithm.i
%include LinearModelAnalysis.i
%include LinearModelValidation.i

/* Uncertainty/Model */
%include RandomVector.i
Expand Down
9 changes: 4 additions & 5 deletions python/test/t_FunctionalChaosValidation_std.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
import openturns as ot
from openturns.usecases import ishigami_function
from openturns.testing import assert_almost_equal
import openturns.experimental as otexp


def computeMSENaiveLOO(
Expand Down Expand Up @@ -170,7 +171,7 @@ def computeMSENaiveKFold(
#
print("1. Analytical leave-one-out")
splitterLOO = ot.LeaveOneOutSplitter(sampleSize)
validationLOO = ot.FunctionalChaosValidation(
validationLOO = otexp.FunctionalChaosValidation(
chaosResult, splitterLOO
)
mseLOOAnalytical = validationLOO.computeMeanSquaredError()
Expand Down Expand Up @@ -206,13 +207,11 @@ def computeMSENaiveKFold(
#
print("2. Analytical K-Fold")
splitterKF = ot.KFoldSplitter(sampleSize, kFoldParameter)
validationKFold = ot.FunctionalChaosValidation(
validationKFold = otexp.FunctionalChaosValidation(
chaosResult, splitterKF
)
print("KFold with K = ", kFoldParameter)
assert validationKFold.getSplitter().getN() == sampleSize
# TODO: fix this
# assert validationKFold.getSplitter().getSize() == kFoldParameter

# Compute mean squared error
mseKFoldAnalytical = validationKFold.computeMeanSquaredError()
Expand Down Expand Up @@ -261,7 +260,7 @@ def computeMSENaiveKFold(

# Analytical leave-one-out
splitterLOO = ot.LeaveOneOutSplitter(sampleSize)
validationLOO = ot.FunctionalChaosValidation(
validationLOO = otexp.FunctionalChaosValidation(
chaosResult, splitterLOO
)
mseLOOAnalytical = validationLOO.computeMeanSquaredError()
Expand Down
5 changes: 3 additions & 2 deletions python/test/t_LinearModelValidation_std.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@

import openturns as ot
from openturns.testing import assert_almost_equal
import openturns.experimental as otexp

ot.TESTPREAMBLE()

Expand Down Expand Up @@ -29,7 +30,7 @@

# Create LOO validation
splitterLOO = ot.LeaveOneOutSplitter(sampleSize)
validationLOO = ot.LinearModelValidation(result, splitterLOO)
validationLOO = otexp.LinearModelValidation(result, splitterLOO)
print(validationLOO)

# Compute analytical LOO MSE
Expand Down Expand Up @@ -73,7 +74,7 @@

# Create KFold validation
splitterKFold = ot.KFoldSplitter(sampleSize, kFoldParameter)
validationKFold = ot.LinearModelValidation(
validationKFold = otexp.LinearModelValidation(
result, splitterKFold
)
print(validationKFold)
Expand Down

0 comments on commit 493a150

Please sign in to comment.