Skip to content

Commit

Permalink
FunctionalChaosValidation and LinearModelValidation
Browse files Browse the repository at this point in the history
Moved to experimental
  • Loading branch information
mbaudin47 committed May 6, 2024
1 parent 80c9089 commit 461222e
Show file tree
Hide file tree
Showing 11 changed files with 41 additions and 27 deletions.
3 changes: 2 additions & 1 deletion python/doc/pyplots/FunctionalChaosValidation.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import openturns as ot
import openturns.experimental as otexp
from openturns.viewer import View
from openturns.usecases import ishigami_function

Expand All @@ -20,6 +21,6 @@
)
chaosalgo.run()
result = chaosalgo.getResult()
validation = ot.FunctionalChaosValidation(result)
validation = otexp.FunctionalChaosValidation(result)
graph = validation.drawValidation()
View(graph)
3 changes: 2 additions & 1 deletion python/doc/pyplots/LinearModelValidation.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import openturns as ot
import openturns.experimental as otexp
from openturns.viewer import View

func = ot.SymbolicFunction(
Expand All @@ -16,6 +17,6 @@
algo.run()
result = algo.getResult()
splitter = ot.LeaveOneOutSplitter(sampleSize)
validation = ot.LinearModelValidation(result, splitter)
validation = otexp.LinearModelValidation(result, splitter)
graph = validation.drawValidation()
View(graph)
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ Results
FunctionalChaosSobolIndices

:template: classWithPlot.rst_t
FunctionalChaosValidation
experimental.FunctionalChaosValidation

Functional chaos on fields
==========================
Expand Down
2 changes: 1 addition & 1 deletion python/doc/user_manual/response_surface/lm.rst
Original file line number Diff line number Diff line change
Expand Up @@ -26,4 +26,4 @@ Post-processing

:template: classWithPlot.rst_t

LinearModelValidation
experimental.LinearModelValidation
4 changes: 2 additions & 2 deletions python/src/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -872,7 +872,6 @@ ot_add_python_module(metamodel metamodel_module.i
LinearModelStepwiseAlgorithm.i LinearModelStepwiseAlgorithm_doc.i.in
LinearModelAlgorithm.i LinearModelAlgorithm_doc.i.in
LinearModelAnalysis.i LinearModelAnalysis_doc.i.in
LinearModelValidation.i LinearModelValidation_doc.i.in
RandomVector.i RandomVector_doc.i.in
PythonEvaluation.cxx
PythonGradient.cxx
Expand All @@ -887,7 +886,6 @@ ot_add_python_module(metamodel metamodel_module.i
UnionEvent.i UnionEvent_doc.i.in
FunctionalChaosRandomVector.i FunctionalChaosRandomVector_doc.i.in
FunctionalChaosSobolIndices.i FunctionalChaosSobolIndices_doc.i.in
FunctionalChaosValidation.i FunctionalChaosValidation_doc.i.in
KrigingRandomVector.i KrigingRandomVector_doc.i.in
LeastSquaresExpansion.i LeastSquaresExpansion_doc.i.in
IntegrationExpansion.i IntegrationExpansion_doc.i.in
Expand Down Expand Up @@ -1042,6 +1040,8 @@ ot_add_python_module(experimental experimental_module.i
RankSobolSensitivityAlgorithm.i RankSobolSensitivityAlgorithm_doc.i.in
CubaIntegration.i CubaIntegration_doc.i.in
ExperimentIntegration.i ExperimentIntegration_doc.i.in
LinearModelValidation.i LinearModelValidation_doc.i.in
FunctionalChaosValidation.i FunctionalChaosValidation_doc.i.in
)
set (OPENTURNS_PYTHON_MODULES ${OPENTURNS_PYTHON_MODULES} PARENT_SCOPE) # for the docstring test

Expand Down
16 changes: 10 additions & 6 deletions python/src/FunctionalChaosValidation_doc.i.in
Original file line number Diff line number Diff line change
@@ -1,6 +1,9 @@
%feature("docstring") OT::FunctionalChaosValidation
"Validate a functional chaos metamodel.

.. warning::
This class is experimental and likely to be modified in future releases.
To use it, import the ``openturns.experimental`` submodule.

Parameters
----------
Expand All @@ -13,7 +16,7 @@ splitter : :class:`~openturns.SplitterImplementation`, optional

See also
--------
FunctionalChaosAlgorithm, FunctionalChaosResult
openturns.experimental.LinearModelValidation, FunctionalChaosAlgorithm, FunctionalChaosResult

Notes
-----
Expand Down Expand Up @@ -47,7 +50,7 @@ conditions are met.
If model selection is involved, the naive methods based on the
:class:`~openturns.LeaveOneOutSplitter` and :class:`~openturns.KFoldSplitter`
classes can be used, but this can be much slower than the
analytical methods implemented in the :class:`~openturns.FunctionalChaosValidation`
analytical methods implemented in the :class:`~openturns.experimental.FunctionalChaosValidation`
class.
In many cases, however, the order of magnitude of the estimate from the
analytical formula applied to a sparse model is correct: the estimate of
Expand All @@ -68,8 +71,8 @@ the :math:`i`-th prediction is the prediction of the linear model
trained using the hold-out sample where the :math:`i`-th observation
was removed.
This produces a sample of residuals which can be retrieved using
the :class:`~openturns.FunctionalChaosValidation.getResidualSample` method.
The :class:`~openturns.FunctionalChaosValidation.drawValidation` performs
the :class:`~openturns.experimental.FunctionalChaosValidation.getResidualSample` method.
The :class:`~openturns.experimental.FunctionalChaosValidation.drawValidation` performs
similarly.

If the weights of the observations are not equal, the analytical method
Expand Down Expand Up @@ -102,6 +105,7 @@ prevents us from using the fast analytical formulas and get an
accurate estimator of the MSE.

>>> import openturns as ot
>>> import openturns.experimental as otexp
>>> from math import pi
>>> from openturns.usecases import ishigami_function
>>> im = ishigami_function.IshigamiModel()
Expand All @@ -124,7 +128,7 @@ accurate estimator of the MSE.
Validate the polynomial chaos expansion using leave-one-out cross-validation.

>>> splitterLOO = ot.LeaveOneOutSplitter(sampleSize)
>>> validation = ot.FunctionalChaosValidation(result, splitterLOO)
>>> validation = otexp.FunctionalChaosValidation(result, splitterLOO)
>>> r2Score = validation.computeR2Score()
>>> print('R2 = ', r2Score[0])
R2 = 0.99...
Expand All @@ -134,7 +138,7 @@ and set the :math:`k` parameter.

>>> kParameter = 10
>>> splitterKF = ot.KFoldSplitter(sampleSize, kParameter)
>>> validation = ot.FunctionalChaosValidation(
>>> validation = otexp.FunctionalChaosValidation(
... result, splitterKF
... )

Expand Down
21 changes: 13 additions & 8 deletions python/src/LinearModelValidation_doc.i.in
Original file line number Diff line number Diff line change
@@ -1,6 +1,10 @@
%feature("docstring") OT::LinearModelValidation
"Validate a linear regression metamodel.

.. warning::
This class is experimental and likely to be modified in future releases.
To use it, import the ``openturns.experimental`` submodule.

Parameters
----------
result : :class:`~openturns.LinearModelResult`
Expand All @@ -12,7 +16,7 @@ splitter : :class:`~openturns.SplitterImplementation`

See also
--------
LinearModelResult
openturns.experimental.FunctionalChaosValidation, LinearModelResult

Notes
-----
Expand All @@ -39,7 +43,7 @@ cross-validation methods can be used.
If model selection is involved, the naive methods based on the
:class:`~openturns.LeaveOneOutSplitter` and :class:`~openturns.KFoldSplitter`
classes can be used directly, but this can be much slower than the
analytical methods implemented in the :class:`~openturns.LinearModelValidation`
analytical methods implemented in the :class:`~openturns.experimental.LinearModelValidation`
class.
In many cases, however, the order of magnitude of the estimate from the
analytical formula applied to a sparse model is correct: the estimate of
Expand All @@ -60,15 +64,16 @@ the :math:`i`-th prediction is the prediction of the linear model
trained using the hold-out sample where the :math:`i`-th observation
was removed.
This produces a sample of residuals which can be retrieved using
the :class:`~openturns.LinearModelValidation.getResidualSample` method.
The :class:`~openturns.LinearModelValidation.drawValidation` performs
the :class:`~openturns.experimental.LinearModelValidation.getResidualSample` method.
The :class:`~openturns.experimental.LinearModelValidation.drawValidation` performs
similarly.

Examples
--------
Create a linear model.

>>> import openturns as ot
>>> import openturns.experimental as otexp
>>> func = ot.SymbolicFunction(
... ['x1', 'x2', 'x3'],
... ['x1 + x2 + sin(x2 * 2 * pi_) / 5 + 1e-3 * x3^2']
Expand All @@ -84,26 +89,26 @@ Create a linear model.

Validate the linear model using leave-one-out cross-validation.

>>> validation = ot.LinearModelValidation(result)
>>> validation = otexp.LinearModelValidation(result)

We can use a specific cross-validation splitter if needed.

>>> splitterLOO = ot.LeaveOneOutSplitter(sampleSize)
>>> validation = ot.LinearModelValidation(result, splitterLOO)
>>> validation = otexp.LinearModelValidation(result, splitterLOO)
>>> r2Score = validation.computeR2Score()
>>> print('R2 = ', r2Score[0])
R2 = 0.98...

Validate the linear model using K-Fold cross-validation.

>>> splitterKFold = ot.KFoldSplitter(sampleSize)
>>> validation = ot.LinearModelValidation(result, splitterKFold)
>>> validation = otexp.LinearModelValidation(result, splitterKFold)

Validate the linear model using K-Fold cross-validation and set K.

>>> kFoldParameter = 10
>>> splitterKFold = ot.KFoldSplitter(sampleSize, kFoldParameter)
>>> validation = ot.LinearModelValidation(result, splitterKFold)
>>> validation = otexp.LinearModelValidation(result, splitterKFold)

Draw the validation graph.

Expand Down
3 changes: 3 additions & 0 deletions python/src/experimental_module.i
Original file line number Diff line number Diff line change
Expand Up @@ -59,11 +59,14 @@
%include CubaIntegration.i

/* Uncertainty/Algorithm/Metamodel */
%import weightedexperiment_module.i
%include UserDefinedMetropolisHastings.i
%include FieldFunctionalChaosResult.i
%include FieldToPointFunctionalChaosAlgorithm.i
%include FieldFunctionalChaosSobolIndices.i
%include PointToFieldFunctionalChaosAlgorithm.i
%include FunctionalChaosValidation.i
%include LinearModelValidation.i

/* Uncertainty/Algorithm/EventSimulation */
%include CrossEntropyResult.i
Expand Down
2 changes: 0 additions & 2 deletions python/src/metamodel_module.i
Original file line number Diff line number Diff line change
Expand Up @@ -60,14 +60,12 @@
%include FunctionalChaosAlgorithm.i
%include FunctionalChaosSobolIndices.i
%include MetaModelValidation.i
%include FunctionalChaosValidation.i
%include GeneralLinearModelResult.i
%include GeneralLinearModelAlgorithm.i
%include KrigingAlgorithm.i
%include LinearModelStepwiseAlgorithm.i
%include LinearModelAlgorithm.i
%include LinearModelAnalysis.i
%include LinearModelValidation.i

/* Uncertainty/Model */
%include RandomVector.i
Expand Down
7 changes: 4 additions & 3 deletions python/test/t_FunctionalChaosValidation_std.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
#! /usr/bin/env python

import openturns as ot
import openturns.experimental as otexp
from openturns.usecases import ishigami_function
from openturns.testing import assert_almost_equal

Expand Down Expand Up @@ -170,7 +171,7 @@ def computeMSENaiveKFold(
#
print("1. Analytical leave-one-out")
splitterLOO = ot.LeaveOneOutSplitter(sampleSize)
validationLOO = ot.FunctionalChaosValidation(
validationLOO = otexp.FunctionalChaosValidation(
chaosResult, splitterLOO
)
mseLOOAnalytical = validationLOO.computeMeanSquaredError()
Expand Down Expand Up @@ -206,7 +207,7 @@ def computeMSENaiveKFold(
#
print("2. Analytical K-Fold")
splitterKF = ot.KFoldSplitter(sampleSize, kFoldParameter)
validationKFold = ot.FunctionalChaosValidation(
validationKFold = otexp.FunctionalChaosValidation(
chaosResult, splitterKF
)
print("KFold with K = ", kFoldParameter)
Expand Down Expand Up @@ -261,7 +262,7 @@ def computeMSENaiveKFold(

# Analytical leave-one-out
splitterLOO = ot.LeaveOneOutSplitter(sampleSize)
validationLOO = ot.FunctionalChaosValidation(
validationLOO = otexp.FunctionalChaosValidation(
chaosResult, splitterLOO
)
mseLOOAnalytical = validationLOO.computeMeanSquaredError()
Expand Down
5 changes: 3 additions & 2 deletions python/test/t_LinearModelValidation_std.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
#! /usr/bin/env python

import openturns as ot
import openturns.experimental as otexp
from openturns.testing import assert_almost_equal

ot.TESTPREAMBLE()
Expand Down Expand Up @@ -29,7 +30,7 @@

# Create LOO validation
splitterLOO = ot.LeaveOneOutSplitter(sampleSize)
validationLOO = ot.LinearModelValidation(result, splitterLOO)
validationLOO = otexp.LinearModelValidation(result, splitterLOO)
print(validationLOO)

# Compute analytical LOO MSE
Expand Down Expand Up @@ -73,7 +74,7 @@

# Create KFold validation
splitterKFold = ot.KFoldSplitter(sampleSize, kFoldParameter)
validationKFold = ot.LinearModelValidation(
validationKFold = otexp.LinearModelValidation(
result, splitterKFold
)
print(validationKFold)
Expand Down

0 comments on commit 461222e

Please sign in to comment.