diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
index bcf916000f..8c6c9187d3 100644
--- a/.github/workflows/build.yml
+++ b/.github/workflows/build.yml
@@ -18,6 +18,7 @@ jobs:
-DCMAKE_INSTALL_PREFIX=~/.local \
-DPython_EXECUTABLE=/opt/homebrew/bin/python3 \
-DUSE_BISON=OFF \
+ -DUSE_CMINPACK=OFF \
-DFLEX_EXECUTABLE=/opt/homebrew/opt/flex/bin/flex \
-DBISON_EXECUTABLE=/opt/homebrew/opt/bison/bin/bison \
-DCMAKE_CXX_FLAGS="-Wall -Wextra -Werror" \
diff --git a/ChangeLog b/ChangeLog
index a914958fe2..5aed94614a 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -6,6 +6,7 @@
==== Major changes ====
* Swapped InverseGamma shape/scale parameters: InverseGamma(k, lambda)
+ * New Gaussian process regression classes
==== New classes ====
diff --git a/lib/etc/openturns.conf.in b/lib/etc/openturns.conf.in
index 1096b2f30b..42051ad828 100644
--- a/lib/etc/openturns.conf.in
+++ b/lib/etc/openturns.conf.in
@@ -901,6 +901,18 @@
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/lib/src/Base/Common/ResourceMap.cxx b/lib/src/Base/Common/ResourceMap.cxx
index 41d1839b76..89aab1fad3 100644
--- a/lib/src/Base/Common/ResourceMap.cxx
+++ b/lib/src/Base/Common/ResourceMap.cxx
@@ -1521,6 +1521,18 @@ void ResourceMap::loadDefaultConfiguration()
addAsString("GeneralLinearModelAlgorithm-DefaultOptimizationAlgorithm", "TNC");
addAsString("GeneralLinearModelAlgorithm-LinearAlgebra", "LAPACK");
+ // GaussianProcessFitter parameters //
+ addAsBool("GaussianProcessFitter-KeepCovariance", true);
+ addAsBool("GaussianProcessFitter-OptimizeParameters", true);
+ addAsBool("GaussianProcessFitter-UnbiasedVariance", true);
+ addAsBool("GaussianProcessFitter-UseAnalyticalAmplitudeEstimate", true);
+ addAsScalar("GaussianProcessFitter-DefaultOptimizationLowerBound", 1.0e-2);
+ addAsScalar("GaussianProcessFitter-DefaultOptimizationScaleFactor", 2.0);
+ addAsScalar("GaussianProcessFitter-DefaultOptimizationUpperBound", 1.0e2);
+ addAsScalar("GaussianProcessFitter-MeanEpsilon", 1.0e-12);
+ addAsString("GaussianProcessFitter-DefaultOptimizationAlgorithm", "Cobyla");
+ addAsString("GaussianProcessFitter-LinearAlgebra", "LAPACK");
+
// KrigingAlgorithm parameters //
addAsString("KrigingAlgorithm-LinearAlgebra", "LAPACK");
diff --git a/lib/src/Uncertainty/Algorithm/MetaModel/Kriging/CMakeLists.txt b/lib/src/Uncertainty/Algorithm/MetaModel/Kriging/CMakeLists.txt
index 865d48934c..d41196a6a3 100644
--- a/lib/src/Uncertainty/Algorithm/MetaModel/Kriging/CMakeLists.txt
+++ b/lib/src/Uncertainty/Algorithm/MetaModel/Kriging/CMakeLists.txt
@@ -10,7 +10,11 @@ ot_add_source_file (KrigingAlgorithm.cxx)
ot_add_source_file (KrigingResult.cxx)
ot_add_source_file (GeneralLinearModelResult.cxx)
ot_add_source_file (GeneralLinearModelAlgorithm.cxx)
-
+ot_add_source_file (GaussianProcessFitterResult.cxx)
+ot_add_source_file (GaussianProcessFitter.cxx)
+ot_add_source_file (GaussianProcessRegressionResult.cxx)
+ot_add_source_file (GaussianProcessRegression.cxx)
+ot_add_source_file (GaussianProcessConditionalCovariance.cxx)
ot_install_header_file (KrigingEvaluation.hxx)
ot_install_header_file (KrigingGradient.hxx)
@@ -18,5 +22,10 @@ ot_install_header_file (KrigingAlgorithm.hxx)
ot_install_header_file (KrigingResult.hxx)
ot_install_header_file (GeneralLinearModelResult.hxx)
ot_install_header_file (GeneralLinearModelAlgorithm.hxx)
+ot_install_header_file (GaussianProcessFitterResult.hxx)
+ot_install_header_file (GaussianProcessFitter.hxx)
+ot_install_header_file (GaussianProcessRegressionResult.hxx)
+ot_install_header_file (GaussianProcessRegression.hxx)
+ot_install_header_file (GaussianProcessConditionalCovariance.hxx)
ot_install_header_file (OTKriging.hxx)
diff --git a/lib/src/Uncertainty/Algorithm/MetaModel/Kriging/GaussianProcessConditionalCovariance.cxx b/lib/src/Uncertainty/Algorithm/MetaModel/Kriging/GaussianProcessConditionalCovariance.cxx
new file mode 100644
index 0000000000..df397cdb8f
--- /dev/null
+++ b/lib/src/Uncertainty/Algorithm/MetaModel/Kriging/GaussianProcessConditionalCovariance.cxx
@@ -0,0 +1,465 @@
+// -*- C++ -*-
+/**
+ * @brief The postprocessing of a GPR result (conditional covariance)
+ *
+ * Copyright 2005-2024 Airbus-EDF-IMACS-ONERA-Phimeca
+ *
+ * This library is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this library. If not, see .
+ *
+ */
+#include "openturns/GaussianProcessConditionalCovariance.hxx"
+#include "openturns/OSS.hxx"
+#include "openturns/PersistentObjectFactory.hxx"
+#include "openturns/Log.hxx"
+#include "openturns/AggregatedFunction.hxx"
+
+BEGIN_NAMESPACE_OPENTURNS
+
+CLASSNAMEINIT(GaussianProcessConditionalCovariance)
+static const Factory Factory_GaussianProcessConditionalCovariance;
+
+/* Default constructor */
+GaussianProcessConditionalCovariance::GaussianProcessConditionalCovariance()
+ : PersistentObject()
+ , result_()
+{
+ // Nothing to do
+}
+
+
+/* Constructor with parameters & Cholesky factor */
+GaussianProcessConditionalCovariance::GaussianProcessConditionalCovariance(const GaussianProcessRegressionResult & result)
+ : PersistentObject()
+ , result_(result)
+{
+ computePhi();
+}
+
+
+/* Virtual constructor */
+GaussianProcessConditionalCovariance * GaussianProcessConditionalCovariance::clone() const
+{
+ return new GaussianProcessConditionalCovariance(*this);
+}
+
+
+/* String converter */
+String GaussianProcessConditionalCovariance::__repr__() const
+{
+ return OSS(true) << "class=" << getClassName()
+ << ", GPR Result=" << result_;
+}
+
+String GaussianProcessConditionalCovariance::__str__(const String & ) const
+{
+ OSS oss(false);
+ oss << getClassName() << "(";
+ oss << "gpr result=" << result_;
+ return oss;
+}
+
+/* Compute cross matrix method ==> not necessary square matrix */
+void GaussianProcessConditionalCovariance::computePhi()
+{
+ // regression matrix F
+ const Matrix F(result_.getRegressionMatrix());
+ if (F.getNbColumns() == 0) return;
+ // Nothing to do if the design matrix has already been computed
+ LOGINFO("Solve linear system L * phi= F");
+ const Matrix phi(solveTriangularSystem(F));
+ // Compute QR decomposition of Phi_
+ LOGINFO("Compute the QR decomposition of phi");
+ Matrix G;
+ (void) phi.computeQR(G);
+ Gt_ = G.transpose();
+ phiT_ = phi.transpose();
+}
+
+Matrix GaussianProcessConditionalCovariance::solveTriangularSystem(const Matrix & rhs) const
+{
+ Matrix result;
+ const GaussianProcessFitterResult::LinearAlgebra method = result_.getLinearAlgebraMethod();
+ if (method == GaussianProcessFitterResult::LAPACK)
+ {
+ result = result_.getCholeskyFactor().solveLinearSystem(rhs);
+ }
+ else
+ {
+ result = result_.getHMatCholeskyFactor().solveLower(rhs);
+ }
+ return result;
+}
+
+/* Compute mean of new points conditionally to observations */
+Sample GaussianProcessConditionalCovariance::getConditionalMean(const Sample & xi) const
+{
+ // For a process of dimension p & xi's size=s,
+ // returned matrix should have dimensions (p * s) x (p * s)
+ const UnsignedInteger inputDimension = xi.getDimension();
+ const CovarianceModel covarianceModel(result_.getCovarianceModel());
+ if (inputDimension != covarianceModel.getInputDimension())
+ throw InvalidArgumentException(HERE) << " In GaussianProcessConditionalCovariance::getConditionalMean, input data should have the same dimension as covariance model's input dimension. Here, (input dimension = " << inputDimension << ", covariance model spatial's dimension = " << covarianceModel.getInputDimension() << ")";
+ const UnsignedInteger sampleSize = xi.getSize();
+ if (sampleSize == 0)
+ throw InvalidArgumentException(HERE) << " In GaussianProcessConditionalCovariance::getConditionalMean, expected a non empty sample";
+ const Function metaModel(result_.getMetaModel());
+ // Use of metamodel to return result
+ // Need to think if it is required to implement a specific method
+ // in order to avoid data copy
+ // sample is of size xi.getSize() * covarianceModel.getDimension()
+ return metaModel.operator()(xi);
+}
+
+/* Compute mean of new points conditionally to observations */
+Point GaussianProcessConditionalCovariance::getConditionalMean(const Point & xi) const
+{
+ // Use of meta model evaluation
+ const Point output(result_.getMetaModel().operator()(xi));
+ return output;
+}
+
+
+/* Compute covariance matrix conditionally to observations*/
+CovarianceMatrix GaussianProcessConditionalCovariance::getConditionalCovariance(const Sample & xi) const
+{
+ // For a process of dimension p & xi's size=s,
+ // returned matrix should have dimensions (p * s) x (p * s)
+ const Basis basis(result_.getBasis());
+ const CovarianceModel covarianceModel(result_.getCovarianceModel());
+ const UnsignedInteger inputDimension = xi.getDimension();
+ const UnsignedInteger outputDimension = covarianceModel.getOutputDimension();
+ const UnsignedInteger sampleSize = xi.getSize();
+ if (sampleSize == 0)
+ throw InvalidArgumentException(HERE) << " In GaussianProcessConditionalCovariance::getConditionalCovariance, expected a non empty sample";
+ if (inputDimension != covarianceModel.getInputDimension())
+ throw InvalidArgumentException(HERE) << " In GaussianProcessConditionalCovariance::getConditionalCovariance, input data should have the same dimension as covariance model's input dimension. Here, (input dimension = " << inputDimension << ", covariance model spatial's dimension = " << covarianceModel.getInputDimension() << ")";
+
+ // 1) compute \sigma_{x,x}
+ LOGINFO("Compute interactions Sigma_xx");
+ const CovarianceMatrix sigmaXX(covarianceModel.discretize(xi));
+
+ // 2) compute \sigma_{y,x}
+ // compute r(x), the crossCovariance between the conditioned data & xi
+ LOGINFO("Compute cross-interactions sigmaYX");
+ const Matrix crossCovariance(covarianceModel.computeCrossCovariance(result_.getInputSample(), xi));
+ // 3) Compute r^t R^{-1} r'(x)
+ // As we get the Cholesky factor L, we can solve triangular linear system
+ // We define B = L^{-1} * r(x)
+ LOGINFO("Solve L.B = SigmaYX");
+ const Matrix B(solveTriangularSystem(crossCovariance));
+ // Use of gram to compute B^{t} * B
+ // Default gram computes B*B^t
+ // With transpose argument=true, it performs B^t*B
+ // With full argument=false, lower triangular matrix B^t*B is not symmetrized
+ LOGINFO("Compute B^tB");
+ const CovarianceMatrix BtB(B.computeGram(true));
+
+ // Interest is to compute sigma_xx -= BtB
+ // However it is not trivial that A - B is a covariance matrix if A & B are covariance matrices
+ // Symmetric : ok but not necessary definite. Here by definition it is!
+ // So should we define operator - & operator -= with covariances?
+ LOGINFO("Compute Sigma_xx-BtB");
+ CovarianceMatrix result(*sigmaXX.getImplementation() - *BtB.getImplementation() );
+
+ // Case of simple Kriging
+ if(basis.getSize() == 0) return result;
+
+ // Case of universal Kriging: compute the covariance due to the regression part
+ // Additional information have to be computed
+ // 1) compute F
+ LOGINFO("Compute the regression matrix F");
+ // 2) Interest is (F^t R^{-1} F)^{-1}
+ // F^{t} R^{-1} F = F^{t} L^{-t} L^{-1} F
+ // Solve first L phi = F
+ // 3) Compute u(x) = F^t *R^{-1} * r(x) - f(x)
+ // = F^{t} * L^{-1}^t * L{-1} * r(x) - f(x)
+ // = phiT_ * B - f(x)
+ LOGINFO("Compute psi = phi^t * B");
+ Matrix ux(phiT_ * B);
+ // compute f(x) & define u = psi - f(x)
+ LOGINFO("Compute f(x) & ux = psi - fx");
+ const UnsignedInteger basisSize = basis.getSize();
+ // Basis \Phi is a function from R^{inputDimension} to R^{outputDimension}
+ // As we get B functions, total number of values is B * outputDimension
+ // Compute fx & ux = ux - fx
+ for (UnsignedInteger j = 0; j < basisSize; ++j)
+ {
+ // Compute phi_j (X)
+ // Here we use potential parallelism in the evaluation of the basis functions
+ // It generates a sample of shape (sampleSize, outputDimension)
+ const Sample basisSample(basis[j](xi));
+ for (UnsignedInteger i = 0; i < sampleSize; ++i)
+ for (UnsignedInteger outputMarginal = 0; outputMarginal < outputDimension; ++outputMarginal)
+ ux(j * outputDimension + outputMarginal, outputMarginal + i * outputDimension) -= basisSample(i, outputMarginal);
+ }
+
+ // interest now is to solve G rho = ux
+ LOGINFO("Solve linear system G * rho = ux");
+ const Matrix rho(Gt_.solveLinearSystem(ux));
+ LOGINFO("Compute Sigma_xx-BtB + rho^{t}*rho");
+ result = result + rho.computeGram(true);
+ return result;
+}
+
+/* Compute covariance matrix conditionally to observations*/
+CovarianceMatrix GaussianProcessConditionalCovariance::getConditionalCovariance(const Point & point) const
+{
+ const UnsignedInteger inputDimension = point.getDimension();
+ const CovarianceModel covarianceModel(result_.getCovarianceModel());
+ if (inputDimension != covarianceModel.getInputDimension())
+ throw InvalidArgumentException(HERE) << " In GaussianProcessConditionalCovariance::getConditionalMarginalCovariance, input data should have the same dimension as covariance model's input dimension. Here, (input dimension = " << inputDimension << ", covariance model spatial's dimension = " << covarianceModel.getInputDimension() << ")";
+ const Sample pointAsSample(1, point);
+ return getConditionalCovariance(pointAsSample);
+}
+
+/** Compute covariance matrices conditionally to observations (1 cov / point)*/
+GaussianProcessConditionalCovariance::CovarianceMatrixCollection GaussianProcessConditionalCovariance::getConditionalMarginalCovariance(const Sample & xi) const
+{
+ // For a process of dimension p & xi's size=s,
+ // returned a s-collection of cov matrices (pxp)
+ const CovarianceModel covarianceModel(result_.getCovarianceModel());
+ const UnsignedInteger inputDimension = xi.getDimension();
+ if (inputDimension != covarianceModel.getInputDimension())
+ throw InvalidArgumentException(HERE) << " In GaussianProcessConditionalCovariance::getConditionalMarginalCovariance, input data should have the same dimension as covariance model's input dimension. Here, (input dimension = " << inputDimension << ", covariance model spatial's dimension = " << covarianceModel.getInputDimension() << ")";
+ const UnsignedInteger sampleSize = xi.getSize();
+ if (sampleSize == 0)
+ throw InvalidArgumentException(HERE) << " In GaussianProcessConditionalCovariance::getConditionalMarginalCovariance, expected a non empty sample";
+
+ CovarianceMatrixCollection collection(sampleSize);
+ Point data(inputDimension);
+ for (UnsignedInteger i = 0; i < sampleSize; ++i)
+ {
+ for (UnsignedInteger j = 0; j < inputDimension; ++j) data[j] = xi(i, j);
+ // Rely on getConditionalCovariance(Point&)
+ collection[i] = getConditionalCovariance(data);
+ }
+ return collection;
+}
+
+/** Compute covariance matrix conditionally to observations (1 cov of size outdimension)*/
+CovarianceMatrix GaussianProcessConditionalCovariance::getConditionalMarginalCovariance(const Point & xi) const
+{
+ const UnsignedInteger inputDimension = xi.getDimension();
+ const CovarianceModel covarianceModel(result_.getCovarianceModel());
+ if (inputDimension != covarianceModel.getInputDimension())
+ throw InvalidArgumentException(HERE) << " In GaussianProcessConditionalCovariance::getConditionalMarginalCovariance, input data should have the same dimension as covariance model's input dimension. Here, (input dimension = " << inputDimension << ", covariance model spatial's dimension = " << covarianceModel.getInputDimension() << ")";
+ return getConditionalCovariance(xi);
+}
+
+/** Compute marginal variance conditionally to observations (1 cov of size outputDimension)*/
+Scalar GaussianProcessConditionalCovariance::getConditionalMarginalVariance(const Point & point,
+ const UnsignedInteger marginalIndex) const
+{
+ const UnsignedInteger inputDimension = point.getDimension();
+ const CovarianceModel covarianceModel(result_.getCovarianceModel());
+ const UnsignedInteger outputDimension = covarianceModel.getOutputDimension();
+ if (inputDimension != covarianceModel.getInputDimension())
+ throw InvalidArgumentException(HERE) << " In GaussianProcessConditionalCovariance::getConditionalMarginalVariance, input data should have the same dimension as covariance model's input dimension. Here, (input dimension = " << inputDimension << ", covariance model spatial's dimension = " << covarianceModel.getInputDimension() << ")";
+ if ( !(marginalIndex < outputDimension))
+ throw InvalidArgumentException(HERE) << " In GaussianProcessConditionalCovariance::getConditionalMarginalVariance, marginalIndex should be in [0," << outputDimension << "]. Here, marginalIndex = " << marginalIndex ;
+ // Compute the matrix & return only the marginalIndex diagonal element
+ const CovarianceMatrix marginalCovarianceMatrix(getConditionalMarginalCovariance(point));
+ return marginalCovarianceMatrix(marginalIndex, marginalIndex);
+}
+
+/** Compute marginal variance conditionally to observations (1 cov / point)*/
+Sample GaussianProcessConditionalCovariance::getConditionalMarginalVariance(const Sample & xi,
+ const UnsignedInteger marginalIndex) const
+{
+
+ const UnsignedInteger inputDimension = xi.getDimension();
+ const CovarianceModel covarianceModel(result_.getCovarianceModel());
+ const UnsignedInteger outputDimension = covarianceModel.getOutputDimension();
+ if (inputDimension != covarianceModel.getInputDimension())
+ throw InvalidArgumentException(HERE) << " In GaussianProcessConditionalCovariance::getConditionalMarginalVariance, input data should have the same dimension as covariance model's input dimension. Here, (input dimension = " << inputDimension << ", covariance model spatial's dimension = " << covarianceModel.getInputDimension() << ")";
+ if ( !(marginalIndex < outputDimension))
+ throw InvalidArgumentException(HERE) << " In GaussianProcessConditionalCovariance::getConditionalMarginalVariance, marginalIndex should be in [0," << outputDimension << "]. Here, marginalIndex = " << marginalIndex ;
+ const UnsignedInteger sampleSize = xi.getSize();
+ if (sampleSize == 0)
+ throw InvalidArgumentException(HERE) << " In GaussianProcessConditionalCovariance::getConditionalMarginalVariance, expected a non empty sample";
+
+ if (outputDimension == 1)
+ {
+ // 1) compute \sigma_{x,x}
+ LOGINFO("Compute interactions Sigma_xx");
+ // Only diagonal of the discretization Matrix
+ // First set sigmaXX
+ const Point defaultPoint(inputDimension);
+ const Point sigma2(1, covarianceModel.computeAsScalar(defaultPoint, defaultPoint));
+ Sample result(sampleSize, sigma2);
+
+
+ // 2) compute \sigma_{y,x}
+ // compute r(x), the crossCovariance between the conditioned data & xi
+ LOGINFO("Compute cross-interactions sigmaYX");
+ const Matrix crossCovariance(covarianceModel.computeCrossCovariance(result_.getInputSample(), xi));
+ // 3) Compute r^t R^{-1} r'(x)
+ // As we get the Cholesky factor L, we can solve triangular linear system
+ // We define B = L^{-1} * r(x)
+ LOGINFO("Solve L.B = SigmaYX");
+ const Matrix B(solveTriangularSystem(crossCovariance));
+ // We compute diag(B^t B)
+ // We can notice that it corresponds to the sum of elements
+ // for each column
+ LOGINFO("Compute B^tB & Sigma_xx-BtB");
+ for (UnsignedInteger j = 0; j < B.getNbColumns(); ++j)
+ {
+ Scalar sum = 0.0;
+ for (UnsignedInteger i = 0; i < B.getNbRows(); ++i)
+ sum += B(i, j) * B(i, j);
+ result(j, 0) -= sum;
+ }
+
+ // Case of simple Kriging
+ const Basis basis(result_.getBasis());
+ if (basis.getSize() == 0) return result;
+
+ // Case of universal Kriging: compute the covariance due to the regression part
+ // Additional information have to be computed
+ // 1) compute F
+ LOGINFO("Compute the regression matrix F");
+ // 2) Interest is (F^t R^{-1} F)^{-1}
+ // F^{t} R^{-1} F = F^{t} L^{-t} L^{-1} F
+ // Solve first L phi = F
+ // 3) Compute u(x) = F^t *R^{-1} * r(x) - f(x)
+ // = F^{t} * L^{-1}^t * L{-1} * r(x) - f(x)
+ // = phiT_ * B - f(x)
+ LOGINFO("Compute psi = phi^t * B");
+ // ux playing the role of psi
+ Matrix ux(phiT_ * B);
+ // compute f(x) & define u = psi - f(x)
+ LOGINFO("Compute f(x) & ux = psi - fx");
+ for (UnsignedInteger j = 0; j < basis.getSize(); ++j)
+ {
+ // Compute phi_j (X)
+ // Here we use potential parallelism in the evaluation of the basis functions
+ // It generates a sample of shape (sampleSize, outputDimension)
+ const Sample basisSample(basis[j](xi));
+ for (UnsignedInteger i = 0; i < sampleSize; ++i)
+ ux(j, i) -= basisSample(i, 0);
+ }
+ // interest now is to solve G rho = ux
+ LOGINFO("Solve linear system G * rho = ux");
+ const Matrix rho(Gt_.solveLinearSystem(ux));
+ LOGINFO("Compute Sigma_xx-BtB + rho^{t}*rho");
+ for (UnsignedInteger j = 0; j < rho.getNbColumns(); ++j)
+ {
+ Scalar sum = 0.0;
+ for (UnsignedInteger i = 0; i < rho.getNbRows(); ++i)
+ sum += rho(i, j) * rho(i, j);
+ result(j, 0) += sum;
+ }
+ return result;
+ } // end for if outputdim=1
+
+ // Run sequentially over the sample
+ Sample marginalVariance(sampleSize, 1);
+ Point data(inputDimension);
+ for (UnsignedInteger i = 0; i < sampleSize; ++i)
+ {
+ for (UnsignedInteger j = 0; j < inputDimension; ++j) data[j] = xi(i, j);
+ marginalVariance(i, 0) = getConditionalMarginalVariance(data, marginalIndex);
+ }
+ return marginalVariance;
+}
+
+Point GaussianProcessConditionalCovariance::getConditionalMarginalVariance(const Point & point,
+ const Indices &indices) const
+{
+ const UnsignedInteger inputDimension = point.getDimension();
+ const CovarianceModel covarianceModel(result_.getCovarianceModel());
+ if (inputDimension != covarianceModel.getInputDimension())
+ throw InvalidArgumentException(HERE) << " In GaussianProcessConditionalCovariance::getConditionalMarginalVariance, input data should have the same dimension as covariance model's input dimension. Here, (input dimension = " << inputDimension << ", covariance model spatial's dimension = " << covarianceModel.getInputDimension() << ")";
+ if (!indices.check(covarianceModel.getOutputDimension()))
+ throw InvalidArgumentException(HERE) << "In GaussianProcessConditionalCovariance::getConditionalMarginalVariance, the indices of a marginal sample must be in the range [0," << covarianceModel.getOutputDimension()
+ << " ] and must be different";
+ // Compute the matrix & return only the marginalIndex diagonal element
+ const CovarianceMatrix covarianceMatrix(getConditionalMarginalCovariance(point));
+ Point result(indices.getSize());
+ for (UnsignedInteger j = 0; j < indices.getSize(); ++j) result[j] = covarianceMatrix(indices[j], indices[j]);
+ return result;
+}
+
+Sample GaussianProcessConditionalCovariance::getConditionalMarginalVariance(const Sample & xi,
+ const Indices & indices) const
+{
+
+ const UnsignedInteger inputDimension = xi.getDimension();
+ const CovarianceModel covarianceModel(result_.getCovarianceModel());
+ if (inputDimension != covarianceModel.getInputDimension())
+ throw InvalidArgumentException(HERE) << " In GaussianProcessConditionalCovariance::getConditionalMarginalVariance, input data should have the same dimension as covariance model's input dimension. Here, (input dimension = " << inputDimension << ", covariance model spatial's dimension = " << covarianceModel.getInputDimension() << ")";
+ if (!indices.check(covarianceModel.getOutputDimension()))
+ throw InvalidArgumentException(HERE) << "In GaussianProcessConditionalCovariance::getConditionalMarginalVariance, the indices of a marginal sample must be in the range [0," << covarianceModel.getOutputDimension()
+ << " ] and must be different";
+ const UnsignedInteger sampleSize = xi.getSize();
+ if (sampleSize == 0)
+ throw InvalidArgumentException(HERE) << " In GaussianProcessConditionalCovariance::getConditionalMarginalVariance, expected a non empty sample";
+ // Compute the matrix & return only the marginalIndex diagonal element
+ Sample result(inputDimension, indices.getSize());
+ Point data(inputDimension);
+ for (UnsignedInteger i = 0; i < sampleSize; ++i)
+ {
+ for (UnsignedInteger j = 0; j < inputDimension; ++j) data[j] = xi(i, j);
+ result[i] = getConditionalMarginalVariance(data, indices);
+ }
+ return result;
+}
+
+
+/* Compute joint normal distribution conditionally to observations*/
+Normal GaussianProcessConditionalCovariance::operator()(const Point & xi) const
+{
+ const Sample sample(1, xi);
+ return operator()(sample);
+}
+
+/* Compute joint normal distribution conditionally to observations*/
+Normal GaussianProcessConditionalCovariance::operator()(const Sample & xi) const
+{
+ // The Normal distribution is defined by its mean & covariance
+ LOGINFO("In GaussianProcessConditionalCovariance::operator() : evaluating the mean");
+ const Sample meanAsSample(getConditionalMean(xi));
+ // Mean should be a Point ==> data are copied form the Sample to a Point
+ const Point mean(meanAsSample.getImplementation()->getData());
+ LOGINFO("In GaussianProcessConditionalCovariance::operator() : evaluating the covariance");
+ const CovarianceMatrix covarianceMatrix = getConditionalCovariance(xi);
+ // Check the covariance matrix. Indeed, if point is very similar to one of the learning points, covariance is null
+ // Even if this check is done in Normal::Normal, we perform debugging
+ LOGINFO("In GaussianProcessConditionalCovariance::operator() : evaluating the Normal distribution");
+ // Finally return the distribution
+ return Normal(mean, covarianceMatrix);
+}
+
+/* Method save() stores the object through the StorageManager */
+void GaussianProcessConditionalCovariance::save(Advocate & adv) const
+{
+ PersistentObject::save(adv);
+ adv.saveAttribute( "result_", result_ );
+ adv.saveAttribute( "phiT_", phiT_);
+ adv.saveAttribute( "Gt_", Gt_);
+}
+
+
+/* Method load() reloads the object from the StorageManager */
+void GaussianProcessConditionalCovariance::load(Advocate & adv)
+{
+ PersistentObject::load(adv);
+ adv.loadAttribute( "result_", result_ );
+ adv.loadAttribute( "phiT_", phiT_);
+ adv.loadAttribute( "Gt_", Gt_);
+}
+
+
+
+END_NAMESPACE_OPENTURNS
diff --git a/lib/src/Uncertainty/Algorithm/MetaModel/Kriging/GaussianProcessFitter.cxx b/lib/src/Uncertainty/Algorithm/MetaModel/Kriging/GaussianProcessFitter.cxx
new file mode 100644
index 0000000000..6670de1bc7
--- /dev/null
+++ b/lib/src/Uncertainty/Algorithm/MetaModel/Kriging/GaussianProcessFitter.cxx
@@ -0,0 +1,738 @@
+// -*- C++ -*-
+/**
+ * @brief The class fits gaussian process models
+ *
+ * Copyright 2005-2024 Airbus-EDF-IMACS-ONERA-Phimeca
+ *
+ * This library is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this library. If not, see .
+ *
+ */
+
+#include "openturns/GaussianProcessFitter.hxx"
+#include "openturns/PersistentObjectFactory.hxx"
+#include "openturns/HMatrixFactory.hxx"
+#include "openturns/Log.hxx"
+#include "openturns/SpecFunc.hxx"
+#include "openturns/NonCenteredFiniteDifferenceGradient.hxx"
+#include "openturns/TNC.hxx"
+#include "openturns/Cobyla.hxx"
+#ifdef OPENTURNS_HAVE_ANALYTICAL_PARSER
+#include "openturns/SymbolicFunction.hxx"
+#else
+#include "openturns/DatabaseFunction.hxx"
+#endif
+#include "openturns/ComposedFunction.hxx"
+#include "openturns/LinearCombinationFunction.hxx"
+#include "openturns/AggregatedFunction.hxx"
+#include "openturns/MemoizeFunction.hxx"
+
+BEGIN_NAMESPACE_OPENTURNS
+
+CLASSNAMEINIT(GaussianProcessFitter)
+
+static const Factory Factory_GaussianProcessFitter;
+
+/* Default constructor */
+GaussianProcessFitter::GaussianProcessFitter()
+ : MetaModelAlgorithm()
+ , covarianceModel_()
+ , reducedCovarianceModel_()
+ , solver_()
+ , optimizationBounds_()
+ , beta_(0)
+ , rho_(0)
+ , F_(0, 0)
+ , result_()
+ , basis_()
+ , covarianceCholeskyFactor_()
+ , covarianceCholeskyFactorHMatrix_()
+{
+ // Nothing to do
+}
+
+GaussianProcessFitter::GaussianProcessFitter(const Sample & inputSample,
+ const Sample & outputSample,
+ const CovarianceModel & covarianceModel,
+ const Basis & basis)
+ : MetaModelAlgorithm(inputSample, outputSample)
+ , covarianceModel_()
+ , reducedCovarianceModel_()
+ , solver_()
+ , optimizationBounds_()
+ , beta_(0)
+ , rho_(0)
+ , F_(0, 0)
+ , result_()
+ , basis_()
+ , covarianceCholeskyFactor_()
+ , covarianceCholeskyFactorHMatrix_()
+{
+ // Set covariance model
+ setCovarianceModel(covarianceModel);
+
+ if (basis.getSize() > 0)
+ setBasis(basis);
+
+ initializeMethod();
+ initializeDefaultOptimizationAlgorithm();
+}
+
+/* Covariance model accessors */
+void GaussianProcessFitter::setCovarianceModel(const CovarianceModel & covarianceModel)
+{
+ // Here we can store any modified version of the given covariance model wrt its parameters as it is mainly a parametric template
+ const UnsignedInteger inputDimension = inputSample_.getDimension();
+ const UnsignedInteger outputDimension = outputSample_.getDimension();
+
+ if (covarianceModel.getInputDimension() != inputDimension)
+ throw InvalidArgumentException(HERE) << "Covariance model input dimension is " << covarianceModel.getInputDimension() << ", expected " << inputDimension;
+ if (covarianceModel.getOutputDimension() != outputDimension)
+ throw InvalidArgumentException(HERE) << "Covariance model output dimension is " << covarianceModel.getOutputDimension() << ", expected " << outputDimension;
+ covarianceModel_ = covarianceModel;
+ // All the computation will be done on the reduced covariance model. We keep the initial covariance model (ie the one we just built) in order to reinitialize the reduced covariance model if some flags are changed after the creation of the algorithm.
+ reducedCovarianceModel_ = covarianceModel_;
+ // Now, adapt the model parameters.
+ // First, check if the parameters have to be optimized. If not, remove all the active parameters.
+ analyticalAmplitude_ = false;
+ if (!optimizeParameters_) reducedCovarianceModel_.setActiveParameter(Indices());
+ // Second, check if the amplitude parameter is unique and active
+ else if (ResourceMap::GetAsBool("GaussianProcessFitter-UseAnalyticalAmplitudeEstimate"))
+ {
+ // The model has to be of dimension 1
+ if (reducedCovarianceModel_.getOutputDimension() == 1)
+ {
+ const Description activeParametersDescription(reducedCovarianceModel_.getParameterDescription());
+ // And one of the active parameters must be called amplitude_0
+ for (UnsignedInteger i = 0; i < activeParametersDescription.getSize(); ++i)
+ if (activeParametersDescription[i] == "amplitude_0")
+ {
+ analyticalAmplitude_ = true;
+ Indices newActiveParameters(reducedCovarianceModel_.getActiveParameter());
+ newActiveParameters.erase(newActiveParameters.begin() + i);
+ reducedCovarianceModel_.setActiveParameter(newActiveParameters);
+ // Here we have to change the current value of the amplitude as it has
+ // to be equal to 1 during the potential optimization step in order for
+ // the analytical formula to be correct.
+ // Now, the amplitude has disappear form the active parameters so it must
+ // be updated using the amplitude accessor.
+ reducedCovarianceModel_.setAmplitude(Point(1, 1.0));
+ break;
+ }
+ } // reducedCovarianceModel_.getDimension() == 1
+ } // optimizeParameters_
+ LOGINFO(OSS() << "final active parameters=" << reducedCovarianceModel_.getActiveParameter());
+ // Define the bounds of the optimization problem
+ const UnsignedInteger optimizationDimension = reducedCovarianceModel_.getParameter().getSize();
+ if (optimizationDimension > 0)
+ {
+ const Scalar scaleFactor(ResourceMap::GetAsScalar( "GaussianProcessFitter-DefaultOptimizationScaleFactor"));
+ if (!(scaleFactor > 0))
+ throw InvalidArgumentException(HERE) << "Scale factor set in ResourceMap is invalid. It should be a positive value. Here, scale = " << scaleFactor ;
+ const Point lowerBound(optimizationDimension, ResourceMap::GetAsScalar( "GaussianProcessFitter-DefaultOptimizationLowerBound"));
+ Point upperBound(optimizationDimension, ResourceMap::GetAsScalar( "GaussianProcessFitter-DefaultOptimizationUpperBound"));
+ // We could set scale parameter if these parameters are enabled.
+ // check if scale is active
+ const Indices activeParameters(reducedCovarianceModel_.getActiveParameter());
+ Bool isScaleActive(true);
+ for (UnsignedInteger k = 0; k < reducedCovarianceModel_.getScale().getSize(); ++k)
+ {
+ if (!activeParameters.contains(k))
+ {
+ isScaleActive = false;
+ break;
+ }
+ }
+ if (isScaleActive)
+ {
+ const Point inputSampleRange(inputSample_.computeRange());
+ for (UnsignedInteger k = 0; k < reducedCovarianceModel_.getScale().getSize(); ++k)
+ {
+ upperBound[k] = inputSampleRange[k] * scaleFactor;
+ if (upperBound[k] < lowerBound[k])
+ upperBound[k] += lowerBound[k];
+ } //k (upper bounds settingĂ
+ } //if active scale
+ LOGWARN(OSS() << "Warning! For coherency we set scale upper bounds = " << upperBound.__str__());
+
+ optimizationBounds_ = Interval(lowerBound, upperBound);
+ }
+ else optimizationBounds_ = Interval();
+}
+
+CovarianceModel GaussianProcessFitter::getCovarianceModel() const
+{
+ return covarianceModel_;
+}
+
+CovarianceModel GaussianProcessFitter::getReducedCovarianceModel() const
+{
+ return reducedCovarianceModel_;
+}
+
+/* Set basis method */
+void GaussianProcessFitter::setBasis(const Basis & basis)
+{
+ // Basis does not provide any getOutputDimension
+ // getDimension checks also only the dimension of the first element in case of FiniteBasis
+ // If basis given, then its size should be the same as the output dimension (each item of the basis is a function with the same input/output dimensions).
+ if (!basis.isFinite())
+ throw InvalidArgumentException(HERE) << "In GaussianProcessFitter::GaussianProcessFitter, basis should be finite!" ;
+ const UnsignedInteger size = basis.getSize();
+ for (UnsignedInteger index = 0; index < size; ++index)
+ {
+ if (basis[index].getOutputDimension() != outputSample_.getDimension())
+ throw InvalidArgumentException(HERE) << "In GaussianProcessFitter::GaussianProcessFitter, output sample dimension=" << outputSample_.getDimension() << " does not match basis[=" << index << "] dimension=" << basis[index].getOutputDimension();
+ if (basis[index].getInputDimension() != inputSample_.getDimension())
+ throw InvalidArgumentException(HERE) << "In GaussianProcessFitter::GaussianProcessFitter, input sample dimension=" << inputSample_.getDimension() << " does not match basis[=" << index << "] dimension=" << basis[index].getInputDimension();
+ }
+ // Everything is ok, we set the basis
+ basis_ = basis;
+}
+
+void GaussianProcessFitter::initializeDefaultOptimizationAlgorithm()
+{
+ const String solverName(ResourceMap::GetAsString("GaussianProcessFitter-DefaultOptimizationAlgorithm"));
+ solver_ = OptimizationAlgorithm::Build(solverName);
+ Cobyla* cobyla = dynamic_cast(solver_.getImplementation().get());
+ if (cobyla)
+ cobyla->setCheckStatus(true);
+ TNC* tnc = dynamic_cast(solver_.getImplementation().get());
+ if (tnc)
+ tnc->setCheckStatus(true);
+}
+
+/* Virtual constructor */
+GaussianProcessFitter * GaussianProcessFitter::clone() const
+{
+ return new GaussianProcessFitter(*this);
+}
+
+/* Compute the design matrix */
+void GaussianProcessFitter::computeF()
+{
+ // Nothing to do if the design matrix has already been computed
+ if (F_.getNbRows() != 0) return;
+ LOGINFO("Compute the design matrix");
+ // No early exit based on the sample/basis size as F_ must be initialized with the correct dimensions
+ // With a multivariate basis of size similar to output dimension, each ith-basis should be applied to elements
+ // of corresponding marginal
+ const UnsignedInteger outputDimension = outputSample_.getDimension();
+ const UnsignedInteger sampleSize = inputSample_.getSize();
+ const UnsignedInteger basisSize = basis_.getSize();
+ // Basis \Phi is a function from R^{inputDimension} to R^{outputDimension}
+ // As we get B functions, total number of values is B * outputDimension
+ const UnsignedInteger totalSize = outputDimension * basisSize;
+
+ F_ = Matrix(sampleSize * outputDimension, totalSize);
+ if (totalSize == 0) return;
+
+ // Compute F
+ for (UnsignedInteger j = 0; j < basisSize; ++j)
+ {
+ // Compute phi_j (X)
+ // Here we use potential parallelism in the evaluation of the basis functions
+ // It generates a sample of shape (sampleSize, outputDimension)
+ const Sample basisSample(basis_[j](inputSample_));
+ for (UnsignedInteger i = 0; i < sampleSize; ++i)
+ for (UnsignedInteger outputMarginal = 0; outputMarginal < outputDimension; ++outputMarginal)
+ F_(outputMarginal + i * outputDimension, j * outputDimension + outputMarginal) = basisSample(i, outputMarginal);
+ }
+}
+
+/* Perform regression
+1) Compute the design matrix
+2) Call the parameters optimization
+ a) Compute the log-likelihood with the initial parameters. It is mandatory
+ even if no parameter has to be optimized as this computation has many side
+ effects such as:
+ * computing the trend coefficients beta
+ * computing the discretized covariance matrix Cholesky factor
+ b) If the amplitude can be computed analytically from the other parameters:
+ * set its value to 1
+ * remove it from the list of parameters
+ c) If some parameters remain, perform the optimization
+ d) Deduce the associated value of the amplitude by the analytical formula if possible
+3) Build the result:
+ a) Extract the different parts of the trend
+ b) Update the covariance model if needed
+ */
+
+Bool GaussianProcessFitter::getKeepCholeskyFactor() const
+{
+ return keepCholeskyFactor_;
+}
+
+void GaussianProcessFitter::setKeepCholeskyFactor(const Bool keepCholeskyFactor)
+{
+ keepCholeskyFactor_ = keepCholeskyFactor;
+ reset();
+}
+
+void GaussianProcessFitter::run()
+{
+ // Do not run again if already computed
+ if (hasRun_) return;
+ computeF();
+ const UnsignedInteger outputDimension = outputSample_.getDimension();
+ // optimization of likelihood function if provided
+ // Here we call the optimizeReducedLogLikelihood() method even if the covariance
+ // model has no active parameter, because:
+ // + it can be due to the fact that the amplitude is obtained through an
+ // analytical formula and this situation is taken into account in
+ // maximizeReducedLogLikelihood()
+ // + even if there is actually no parameter to optimize,
+ // maximizeReducedLogLikelihood() is the entry point to
+ // computeReducedLogLikelihood() which has side effects on covariance
+ // discretization and factorization, and it computes beta_
+ Scalar optimalLogLikelihood = maximizeReducedLogLikelihood();
+
+ LOGINFO("Store the estimates");
+ LOGINFO("Build the output meta-model");
+ Collection marginalCollections(basis_.getSize());
+ Collection marginalFunctions(outputDimension);
+ Point beta_k(basis_.getSize());
+ Function metaModel;
+
+ if (basis_.getSize() > 0)
+ {
+ for (UnsignedInteger outputMarginal = 0; outputMarginal < outputDimension; ++outputMarginal)
+ {
+ for (UnsignedInteger k = 0; k < basis_.getSize(); ++k)
+ {
+ marginalCollections[k] = basis_[k].getMarginal(outputMarginal);
+ beta_k[k] = beta_[k * outputDimension + outputMarginal];
+ }
+ LinearCombinationFunction marginalFunction(marginalCollections, beta_k);
+ marginalFunctions[outputMarginal] = marginalFunction;
+ }
+
+ // Care ! collection should be non empty
+ metaModel = AggregatedFunction(marginalFunctions);
+ }
+ else
+ {
+ // If no basis ==> zero function
+ metaModel = SymbolicFunction(Description::BuildDefault(covarianceModel_.getInputDimension(), "x"), Description(covarianceModel_.getOutputDimension(), "0.0"));
+ }
+
+ // compute residual, relative error
+ const Point outputVariance(outputSample_.computeVariance());
+ const Sample mY(metaModel(inputSample_));
+ const Point squaredResiduals((outputSample_ - mY).computeRawMoment(2));
+
+ Point residuals(outputDimension);
+ Point relativeErrors(outputDimension);
+
+ const UnsignedInteger size = inputSample_.getSize();
+ for ( UnsignedInteger outputIndex = 0; outputIndex < outputDimension; ++ outputIndex )
+ {
+ residuals[outputIndex] = std::sqrt(squaredResiduals[outputIndex] / size);
+ relativeErrors[outputIndex] = squaredResiduals[outputIndex] / outputVariance[outputIndex];
+ }
+
+ // return optimized covmodel with the original active parameters (see analyticalAmplitude_)
+ CovarianceModel reducedCovarianceModelCopy(reducedCovarianceModel_);
+ reducedCovarianceModelCopy.setActiveParameter(covarianceModel_.getActiveParameter());
+
+ result_ = GaussianProcessFitterResult(inputSample_, outputSample_, metaModel, residuals, relativeErrors, F_, basis_, beta_, reducedCovarianceModelCopy, optimalLogLikelihood, method_);
+ result_.setRho(rho_);
+
+ // The scaling is done there because it has to be done as soon as some optimization has been done, either numerically or through an analytical formula
+ if (keepCholeskyFactor_)
+ {
+ if (analyticalAmplitude_)
+ {
+ const Scalar sigma = reducedCovarianceModel_.getAmplitude()[0];
+ // Case of LAPACK backend
+ if (method_ == GaussianProcessFitterResult::LAPACK) covarianceCholeskyFactor_ = covarianceCholeskyFactor_ * sigma;
+ else covarianceCholeskyFactorHMatrix_.scale(sigma);
+ }
+ result_.setCholeskyFactor(covarianceCholeskyFactor_, covarianceCholeskyFactorHMatrix_);
+ }
+ hasRun_ = true;
+ }
+
+// Maximize the log-likelihood of the Gaussian process model wrt the observations
+// If the covariance model has no active parameter, no numerical optimization
+// is done. There are two cases:
+// + no parameter has to be optimized, in which case a single call to
+// computeReducedLogLikelihood() is made in order to compute beta_ and to
+// factor the covariance matrix
+// + the amplitude is the only covariance parameter to be estimated and it is
+// done thanks to an analytical formula
+// The method returns the optimal log-likelihood (which is equal to the optimal
+// reduced log-likelihood), the corresponding parameters being directly stored
+// into the covariance model
+Scalar GaussianProcessFitter::maximizeReducedLogLikelihood()
+{
+ // initial guess
+ const Point initialParameters(reducedCovarianceModel_.getParameter());
+ // We use the functional form of the log-likelihood computation to benefit from the cache mechanism
+ Function reducedLogLikelihoodFunction(getObjectiveFunction());
+ const Bool noNumericalOptimization = initialParameters.getSize() == 0 || !getOptimizeParameters();
+ // Early exit if the parameters are known
+ if (noNumericalOptimization)
+ {
+ // We only need to compute the log-likelihood function at the initial parameters in order to get the Cholesky factor and the trend coefficients
+ const Scalar initialReducedLogLikelihood = reducedLogLikelihoodFunction(initialParameters)[0];
+ LOGINFO("No covariance parameter to optimize");
+ LOGINFO(OSS() << "initial parameters=" << initialParameters << ", log-likelihood=" << initialReducedLogLikelihood);
+ return initialReducedLogLikelihood;
+ }
+ // At this point we have an optimization problem to solve
+ // Define the optimization problem
+ OptimizationProblem problem(reducedLogLikelihoodFunction);
+ problem.setMinimization(false);
+ problem.setBounds(optimizationBounds_);
+ solver_.setProblem(problem);
+ try
+ {
+ // If the solver is single start, we can use its setStartingPoint method
+ solver_.setStartingPoint(initialParameters);
+ }
+ catch (const NotDefinedException &) // setStartingPoint is not defined for the solver
+ {
+ // Nothing to do if setStartingPoint is not defined
+ }
+ LOGINFO(OSS(false) << "Solve problem=" << problem << " using solver=" << solver_);
+ solver_.run();
+ const OptimizationAlgorithm::Result result(solver_.getResult());
+ const Scalar optimalLogLikelihood = result.getOptimalValue()[0];
+ const Point optimalParameters(result.getOptimalPoint());
+ const UnsignedInteger evaluationNumber = result.getCallsNumber();
+ // Check if the optimal value corresponds to the last computed value, in order to
+ // see if the by-products (Cholesky factor etc) are correct
+ if (lastReducedLogLikelihood_ != optimalLogLikelihood)
+ {
+ LOGDEBUG(OSS(false) << "Need to evaluate the objective function one more time because the last computed reduced log-likelihood value=" << lastReducedLogLikelihood_ << " is different from the optimal one=" << optimalLogLikelihood);
+ (void) computeReducedLogLikelihood(optimalParameters);
+ }
+ // Final call to reducedLogLikelihoodFunction() in order to update the amplitude
+ // No additional cost since the cache mechanism is activated
+ LOGINFO(OSS() << evaluationNumber << " evaluations, optimized parameters=" << optimalParameters << ", log-likelihood=" << optimalLogLikelihood);
+
+ return optimalLogLikelihood;
+}
+
+Point GaussianProcessFitter::computeReducedLogLikelihood(const Point & parameters)
+{
+ // Check that the parameters have a size compatible with the covariance model
+ if (parameters.getSize() != reducedCovarianceModel_.getParameter().getSize())
+ throw InvalidArgumentException(HERE) << "In GaussianProcessFitter::computeReducedLogLikelihood, could not compute likelihood,"
+ << " covariance model requires an argument of size " << reducedCovarianceModel_.getParameter().getSize()
+ << " but here we got " << parameters.getSize();
+ LOGDEBUG(OSS(false) << "Compute reduced log-likelihood for parameters=" << parameters);
+ const Scalar constant = - SpecFunc::LOGSQRT2PI * static_cast(inputSample_.getSize()) * static_cast(outputSample_.getDimension());
+ Scalar logDeterminant = 0.0;
+ // If the amplitude is deduced from the other parameters, work with
+ // the correlation function
+ if (analyticalAmplitude_) reducedCovarianceModel_.setAmplitude(Point(1, 1.0));
+ reducedCovarianceModel_.setParameter(parameters);
+ // First, compute the log-determinant of the Cholesky factor of the covariance
+ // matrix. As a by-product, also compute rho.
+ if (method_ == GaussianProcessFitterResult::LAPACK)
+ logDeterminant = computeLapackLogDeterminantCholesky();
+ else
+ logDeterminant = computeHMatLogDeterminantCholesky();
+ // Compute the amplitude using an analytical formula if needed
+ // and update the reduced log-likelihood.
+ if (analyticalAmplitude_)
+ {
+ LOGDEBUG("Analytical amplitude");
+ // J(\sigma)=-\log(\sqrt{\sigma^{2N}\det{R}})-(Y-M)^tR^{-1}(Y-M)/(2\sigma^2)
+ // =-N\log(\sigma)-\log(\det{R})/2-(Y-M)^tR^{-1}(Y-M)/(2\sigma^2)
+ // dJ/d\sigma=-N/\sigma+(Y-M)^tR^{-1}(Y-M)/\sigma^3=0
+ // \sigma=\sqrt{(Y-M)^tR^{-1}(Y-M)/N}
+ const UnsignedInteger size = inputSample_.getSize();
+ const Scalar sigma = std::sqrt(rho_.normSquare() / (ResourceMap::GetAsBool("GaussianProcessFitter-UnbiasedVariance") ? size - beta_.getSize() : size));
+ LOGDEBUG(OSS(false) << "sigma=" << sigma);
+ reducedCovarianceModel_.setAmplitude(Point(1, sigma));
+ logDeterminant += 2.0 * size * std::log(sigma);
+ rho_ /= sigma;
+ LOGDEBUG(OSS(false) << "rho_=" << rho_);
+ } // analyticalAmplitude
+
+ LOGDEBUG(OSS(false) << "log-determinant=" << logDeterminant << ", rho=" << rho_);
+ const Scalar epsilon = rho_.normSquare();
+ LOGDEBUG(OSS(false) << "epsilon=||rho||^2=" << epsilon);
+ if (epsilon <= 0) lastReducedLogLikelihood_ = SpecFunc::LowestScalar;
+ // For the general multidimensional case, we have to compute the general log-likelihood (ie including marginal variances)
+ else lastReducedLogLikelihood_ = constant - 0.5 * (logDeterminant + epsilon);
+ LOGINFO(OSS(false) << "Point " << parameters << " -> reduced log-likelihood=" << lastReducedLogLikelihood_);
+ return Point(1, lastReducedLogLikelihood_);
+}
+
+
+Scalar GaussianProcessFitter::computeLapackLogDeterminantCholesky()
+{
+ // Using the hypothesis that parameters = scale & model writes : C(s,t) = diag(sigma) * R(s,t) * diag(sigma) with R a correlation function
+ LOGDEBUG(OSS(false) << "Compute the LAPACK log-determinant of the Cholesky factor for covariance=" << reducedCovarianceModel_);
+
+ LOGDEBUG("Discretize the covariance model");
+ CovarianceMatrix C(reducedCovarianceModel_.discretize(inputSample_));
+ if (C.getDimension() < 20)
+ LOGDEBUG(OSS(false) << "C=\n" << C);
+ LOGDEBUG("Compute the Cholesky factor of the covariance matrix");
+ covarianceCholeskyFactor_ = C.computeRegularizedCholesky();
+
+ // y corresponds to output data
+ const Point y(outputSample_.getImplementation()->getData());
+ LOGDEBUG(OSS(false) << "y=" << y);
+ // rho = L^{-1}y
+ LOGDEBUG("Solve L.rho = y");
+ rho_ = covarianceCholeskyFactor_.solveLinearSystem(y);
+ LOGDEBUG(OSS(false) << "rho_=L^{-1}y=" << rho_);
+ // If trend to estimate
+ if (basis_.getSize() > 0)
+ {
+ // Phi = L^{-1}F
+ LOGDEBUG("Solve L.Phi = F");
+ LOGDEBUG(OSS(false) << "F_=\n" << F_);
+ Matrix Phi(covarianceCholeskyFactor_.solveLinearSystem(F_));
+ LOGDEBUG(OSS(false) << "Phi=\n" << Phi);
+ LOGDEBUG("Solve min_beta||Phi.beta - rho||^2");
+ beta_ = Phi.solveLinearSystem(rho_);
+ LOGDEBUG(OSS(false) << "beta_=" << beta_);
+ LOGDEBUG("Update rho");
+ rho_ -= Phi * beta_;
+ LOGDEBUG(OSS(false) << "rho_=L^{-1}y-L^{-1}F.beta=" << rho_);
+ }
+ LOGDEBUG("Compute log(|det(L)|)=log(sqrt(|det(C)|))");
+ Scalar logDetL = 0.0;
+ for (UnsignedInteger i = 0; i < covarianceCholeskyFactor_.getDimension(); ++i )
+ {
+ const Scalar lii = covarianceCholeskyFactor_(i, i);
+ if (lii <= 0.0) return SpecFunc::LowestScalar;
+ logDetL += log(lii);
+ }
+ LOGDEBUG(OSS(false) << "logDetL=" << logDetL);
+ return 2.0 * logDetL;
+}
+
+Scalar GaussianProcessFitter::computeHMatLogDeterminantCholesky()
+{
+ // Using the hypothesis that parameters = scale & model writes : C(s,t) = \sigma^2 * R(s,t) with R a correlation function
+ LOGDEBUG(OSS(false) << "Compute the HMAT log-determinant of the Cholesky factor for covariance=" << reducedCovarianceModel_);
+
+ const UnsignedInteger covarianceDimension = reducedCovarianceModel_.getOutputDimension();
+
+ HMatrixFactory hmatrixFactory;
+ HMatrixParameters hmatrixParameters;
+
+ covarianceCholeskyFactorHMatrix_ = hmatrixFactory.build(inputSample_, covarianceDimension, true, hmatrixParameters);
+ if (covarianceDimension == 1)
+ {
+ CovarianceAssemblyFunction simple(reducedCovarianceModel_, inputSample_);
+ covarianceCholeskyFactorHMatrix_.assemble(simple, 'L');
+ }
+ else
+ {
+ CovarianceBlockAssemblyFunction block(reducedCovarianceModel_, inputSample_);
+ covarianceCholeskyFactorHMatrix_.assemble(block, 'L');
+ }
+ // Factorize
+ covarianceCholeskyFactorHMatrix_.factorize(hmatrixParameters.getFactorizationMethod());
+ // y corresponds to output data
+ // The PersistentCollection is returned as Point with the right memory map
+ const Point y(outputSample_.getImplementation()->getData());
+ // rho = L^{-1}y
+ LOGDEBUG("Solve L.rho = y");
+ rho_ = covarianceCholeskyFactorHMatrix_.solveLower(y);
+ // If trend to estimate
+ if (basis_.getSize() > 0)
+ {
+ // Phi = L^{-1}F
+ LOGDEBUG("Solve L.Phi = F");
+ const Matrix Phi(covarianceCholeskyFactorHMatrix_.solveLower(F_));
+ LOGDEBUG("Solve min_beta||Phi.beta - rho||^2");
+ beta_ = Phi.solveLinearSystem(rho_);
+ rho_ -= Phi * beta_;
+ }
+ LOGDEBUG("Compute log(sqrt(|det(C)|)) = log(|det(L)|)");
+ Scalar logDetL = 0.0;
+ Point diagonal(covarianceCholeskyFactorHMatrix_.getDiagonal());
+ for (UnsignedInteger i = 0; i < rho_.getSize(); ++i )
+ {
+ const Scalar lii = diagonal[i];
+ if (lii <= 0.0) return SpecFunc::LowestScalar;
+ logDetL += log(lii);
+ }
+ return 2.0 * logDetL;
+}
+
+/* Optimization solver accessor */
+OptimizationAlgorithm GaussianProcessFitter::getOptimizationAlgorithm() const
+{
+ return solver_;
+}
+
+void GaussianProcessFitter::setOptimizationAlgorithm(const OptimizationAlgorithm & solver)
+{
+ solver_ = solver;
+ reset();
+}
+
+/* Optimize parameters flag accessor */
+Bool GaussianProcessFitter::getOptimizeParameters() const
+{
+ return optimizeParameters_;
+}
+
+void GaussianProcessFitter::setOptimizeParameters(const Bool optimizeParameters)
+{
+ if (optimizeParameters != optimizeParameters_)
+ {
+ optimizeParameters_ = optimizeParameters;
+ // Here we have to call setCovarianceModel() as it computes reducedCovarianceModel from covarianceModel_ in a way influenced by optimizeParameters_ flag.
+ setCovarianceModel(covarianceModel_);
+ }
+}
+
+/* Accessor to optimization bounds */
+void GaussianProcessFitter::setOptimizationBounds(const Interval & optimizationBounds)
+{
+ if (!(optimizationBounds.getDimension() == optimizationBounds_.getDimension())) throw InvalidArgumentException(HERE) << "Error: expected bounds of dimension=" << optimizationBounds_.getDimension() << ", got dimension=" << optimizationBounds.getDimension();
+ optimizationBounds_ = optimizationBounds;
+ reset();
+}
+
+Interval GaussianProcessFitter::getOptimizationBounds() const
+{
+ return optimizationBounds_;
+}
+
+/* String converter */
+String GaussianProcessFitter::__repr__() const
+{
+ OSS oss;
+ oss << "class=" << getClassName()
+ << ", inputSample=" << inputSample_
+ << ", outputSample=" << outputSample_
+ << ", basis=" << basis_
+ << ", covarianceModel=" << covarianceModel_
+ << ", reducedCovarianceModel=" << reducedCovarianceModel_
+ << ", solver=" << solver_
+ << ", optimizeParameters=" << optimizeParameters_;
+ return oss;
+}
+
+
+GaussianProcessFitterResult GaussianProcessFitter::getResult()
+{
+ if (!hasRun_) run();
+ return result_;
+}
+
+
+Function GaussianProcessFitter::getObjectiveFunction()
+{
+ computeF();
+ MemoizeFunction logLikelihood(ReducedLogLikelihoodEvaluation(*this));
+ // Here we change the finite difference gradient for a non centered one in order to reduce the computational cost
+ const Scalar finiteDifferenceEpsilon = ResourceMap::GetAsScalar( "NonCenteredFiniteDifferenceGradient-DefaultEpsilon" );
+ logLikelihood.setGradient(NonCenteredFiniteDifferenceGradient(finiteDifferenceEpsilon, logLikelihood.getEvaluation()).clone());
+ logLikelihood.enableCache();
+ return logLikelihood;
+}
+
+void GaussianProcessFitter::initializeMethod()
+{
+ if (ResourceMap::GetAsString("GaussianProcessFitter-LinearAlgebra") == "HMAT")
+ setMethod(GaussianProcessFitterResult::HMAT);
+}
+
+GaussianProcessFitter::LinearAlgebra GaussianProcessFitter::getMethod() const
+{
+ return method_;
+}
+
+void GaussianProcessFitter::reset()
+{
+ // Reset elements for new computation
+ // No need to update F_ as computeF /setBasis are private
+ // Same remark for setCovarianceModel & setData
+ covarianceCholeskyFactor_ = TriangularMatrix();
+ covarianceCholeskyFactorHMatrix_ = HMatrix();
+ hasRun_ = false;
+ lastReducedLogLikelihood_ = SpecFunc::LowestScalar;
+ beta_ = Point();
+ rho_ = Point();
+ // The current output Gram matrix
+ F_ = Matrix();
+}
+
+/* Method accessor (lapack/hmat) - Protected but friend with GaussianProcessFitter class */
+void GaussianProcessFitter::setMethod(const LinearAlgebra method)
+{
+ // First update only if method has changed. It avoids useless reset
+ if (method != method_)
+ {
+ if (method > 1)
+ throw InvalidArgumentException(HERE) << "Expecting 0 (LAPACK) or 1 (HMAT)";
+ // Set new method
+ method_ = method;
+ // reset for new computation
+ reset();
+ }
+}
+
+/* Method save() stores the object through the StorageManager */
+void GaussianProcessFitter::save(Advocate & adv) const
+{
+ MetaModelAlgorithm::save(adv);
+ adv.saveAttribute( "covarianceModel_", covarianceModel_ );
+ adv.saveAttribute( "reducedCovarianceModel_", reducedCovarianceModel_ );
+ adv.saveAttribute( "solver_", solver_ );
+ adv.saveAttribute( "optimizationBounds_", optimizationBounds_ );
+ adv.saveAttribute( "beta_", beta_ );
+ adv.saveAttribute( "rho_", rho_ );
+ adv.saveAttribute( "F_", F_ );
+ adv.saveAttribute( "result_", result_ );
+ adv.saveAttribute( "basis_", basis_ );
+ adv.saveAttribute( "covarianceCholeskyFactor_", covarianceCholeskyFactor_ );
+ adv.saveAttribute( "keepCholeskyFactor_", keepCholeskyFactor_ );
+ UnsignedInteger method = static_cast(method_);
+ adv.saveAttribute( "method_", method );
+ adv.saveAttribute( "hasRun_", hasRun_ );
+ adv.saveAttribute( "optimizeParameters_", optimizeParameters_ );
+ adv.saveAttribute( "analyticalAmplitude_", analyticalAmplitude_ );
+ adv.saveAttribute( "lastReducedLogLikelihood_", lastReducedLogLikelihood_ );
+}
+
+
+/* Method load() reloads the object from the StorageManager */
+void GaussianProcessFitter::load(Advocate & adv)
+{
+ MetaModelAlgorithm::load(adv);
+ adv.loadAttribute( "covarianceModel_", covarianceModel_ );
+ adv.loadAttribute( "reducedCovarianceModel_", reducedCovarianceModel_ );
+ adv.loadAttribute( "solver_", solver_ );
+ adv.loadAttribute( "optimizationBounds_", optimizationBounds_ );
+ adv.loadAttribute( "beta_", beta_ );
+ adv.loadAttribute( "rho_", rho_ );
+ adv.loadAttribute( "F_", F_ );
+ adv.loadAttribute( "result_", result_ );
+ adv.loadAttribute( "basis_", basis_ );
+ adv.loadAttribute( "covarianceCholeskyFactor_", covarianceCholeskyFactor_ );
+ adv.loadAttribute( "keepCholeskyFactor_", keepCholeskyFactor_ );
+ UnsignedInteger method = 0;
+ adv.loadAttribute( "method_", method );
+ method_ = static_cast(method);
+ adv.loadAttribute( "hasRun_", hasRun_ );
+ adv.loadAttribute( "optimizeParameters_", optimizeParameters_ );
+ adv.loadAttribute( "analyticalAmplitude_", analyticalAmplitude_ );
+ adv.loadAttribute( "lastReducedLogLikelihood_", lastReducedLogLikelihood_ );
+}
+
+END_NAMESPACE_OPENTURNS
diff --git a/lib/src/Uncertainty/Algorithm/MetaModel/Kriging/GaussianProcessFitterResult.cxx b/lib/src/Uncertainty/Algorithm/MetaModel/Kriging/GaussianProcessFitterResult.cxx
new file mode 100644
index 0000000000..126b903390
--- /dev/null
+++ b/lib/src/Uncertainty/Algorithm/MetaModel/Kriging/GaussianProcessFitterResult.cxx
@@ -0,0 +1,230 @@
+// -*- C++ -*-
+/**
+ * @brief The result of a gaussian process fitter
+ *
+ * Copyright 2005-2024 Airbus-EDF-IMACS-ONERA-Phimeca
+ *
+ * This library is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this library. If not, see .
+ *
+ */
+#include "openturns/GaussianProcessFitterResult.hxx"
+#include "openturns/OSS.hxx"
+#include "openturns/PersistentObjectFactory.hxx"
+#include "openturns/Log.hxx"
+#include "openturns/Mesh.hxx"
+#include "openturns/GaussianProcess.hxx"
+#include "openturns/WhiteNoise.hxx"
+#include "openturns/Normal.hxx"
+#include "openturns/DatabaseFunction.hxx"
+
+BEGIN_NAMESPACE_OPENTURNS
+
+CLASSNAMEINIT(GaussianProcessFitterResult)
+static const Factory Factory_GaussianProcessFitterResult;
+
+/* Default constructor */
+GaussianProcessFitterResult::GaussianProcessFitterResult()
+ : MetaModelResult()
+{
+ // Nothing to do
+}
+
+/* Constructor with parameters & Cholesky factor */
+GaussianProcessFitterResult::GaussianProcessFitterResult(const Sample & inputSample,
+ const Sample & outputSample,
+ const Function & metaModel,
+ const Point & residuals,
+ const Point & relativeErrors,
+ const Matrix & regressionMatrix,
+ const Basis & basis,
+ const Point & trendCoefficients,
+ const CovarianceModel & covarianceModel,
+ const Scalar optimalLogLikelihood,
+ const LinearAlgebra linearAlgebraMethod)
+ : MetaModelResult(inputSample, outputSample, metaModel, residuals, relativeErrors),
+ regressionMatrix_(regressionMatrix),
+ basis_(basis),
+ beta_(trendCoefficients),
+ covarianceModel_(covarianceModel),
+ optimalLogLikelihood_(optimalLogLikelihood),
+ linearAlgebraMethod_(linearAlgebraMethod),
+ hasCholeskyFactor_(false),
+ covarianceCholeskyFactor_(),
+ covarianceHMatrix_()
+{
+ const UnsignedInteger size = inputSample.getSize();
+ if (size != outputSample.getSize())
+ throw InvalidArgumentException(HERE) << "In GaussianProcessFitterResult::GaussianProcessFitterResult, input & output sample have different size. input sample size = " << size << ", output sample size = " << outputSample.getSize();
+}
+
+
+/* Virtual constructor */
+GaussianProcessFitterResult * GaussianProcessFitterResult::clone() const
+{
+ return new GaussianProcessFitterResult(*this);
+}
+
+
+/* String converter */
+String GaussianProcessFitterResult::__repr__() const
+{
+ return OSS(true) << "class=" << getClassName()
+ << ", covariance model=" << covarianceModel_
+ << ", basis=" << basis_
+ << ", trend coefficients=" << beta_;
+}
+
+String GaussianProcessFitterResult::__str__(const String & offset) const
+{
+ OSS oss(false);
+ oss << getClassName() << "("
+ << "covariance model=" << covarianceModel_.__str__(offset)
+ << ", basis=" << basis_.__str__(offset)
+ << ", trend coefficients=" << beta_.__str__(offset) << ")";
+ return oss;
+}
+
+/* Basis accessor */
+Basis GaussianProcessFitterResult::getBasis() const
+{
+ return basis_;
+}
+
+/* Trend coefficients accessor */
+Point GaussianProcessFitterResult::getTrendCoefficients() const
+{
+ return beta_;
+}
+
+/* Covariance models accessor */
+CovarianceModel GaussianProcessFitterResult::getCovarianceModel() const
+{
+ return covarianceModel_;
+}
+
+/** Regression matrix accessor */
+Matrix GaussianProcessFitterResult::getRegressionMatrix() const
+{
+ return regressionMatrix_;
+}
+
+Scalar GaussianProcessFitterResult::getOptimalLogLikelihood() const
+{
+ return optimalLogLikelihood_;
+}
+
+GaussianProcessFitterResult::LinearAlgebra GaussianProcessFitterResult::getLinearAlgebraMethod() const
+{
+ return linearAlgebraMethod_;
+}
+
+/* process accessor */
+Process GaussianProcessFitterResult::getNoise() const
+{
+ // Define noise process
+ if (getCovarianceModel().getClassName() == "DiracCovarianceModel")
+ {
+ // Here it is assumed that the covariance model parameters are the
+ // marginal amplitude.
+ const Point sigma(getCovarianceModel().getParameter());
+ const CorrelationMatrix R(getCovarianceModel().getOutputCorrelation());
+ const Normal dist(Point(sigma.getSize(), 0.0), sigma, R);
+ const WhiteNoise noise(dist);
+ return noise;
+ }
+ // Other covariance models
+ const GaussianProcess noise(getCovarianceModel(), Mesh(getInputSample()));
+ return noise;
+}
+
+/* Method that returns the covariance factor - Lapack */
+TriangularMatrix GaussianProcessFitterResult::getCholeskyFactor() const
+{
+ return covarianceCholeskyFactor_;
+}
+
+/** rho accessor */
+Point GaussianProcessFitterResult::getRho() const
+{
+ return rho_;
+}
+
+/** rho accessor */
+void GaussianProcessFitterResult::setRho(const Point & rho)
+{
+ rho_ = rho;
+}
+
+void GaussianProcessFitterResult::setCholeskyFactor(const TriangularMatrix & covarianceCholeskyFactor,
+ const HMatrix & covarianceHMatrix)
+{
+ const UnsignedInteger size = getInputSample().getSize();
+ const UnsignedInteger outputDimension = getMetaModel().getOutputDimension();
+ if (covarianceCholeskyFactor_.getDimension() != 0 && covarianceCholeskyFactor_.getDimension() != size * outputDimension)
+ throw InvalidArgumentException(HERE) << "In GaussianProcessFitterResult::setCholeskyFactor, Cholesky factor has unexpected dimensions. Its dimension should be " << size * outputDimension << ". Here dimension = " << covarianceCholeskyFactor_.getDimension();
+ if (covarianceHMatrix_.getNbRows() != 0)
+ {
+ if (covarianceHMatrix_.getNbRows() != covarianceHMatrix_.getNbColumns())
+ throw InvalidArgumentException(HERE) << "In GaussianProcessFitterResult::setCholeskyFactor, HMAT Cholesky factor is not square. Its dimension is " << covarianceHMatrix_.getNbRows() << "x" << covarianceHMatrix_.getNbColumns();
+ if (covarianceHMatrix_.getNbRows() != size * outputDimension)
+ throw InvalidArgumentException(HERE) << "In GaussianProcessFitterResult::setCholeskyFactor, HMAT Cholesky factor has unexpected dimensions. Its dimension should be " << size * outputDimension << ". Here dimension = " << covarianceHMatrix_.getNbRows();
+ }
+ covarianceCholeskyFactor_ = covarianceCholeskyFactor;
+ covarianceHMatrix_ = covarianceHMatrix;
+}
+
+/* Method that returns the covariance factor - hmat */
+HMatrix GaussianProcessFitterResult::getHMatCholeskyFactor() const
+{
+ return covarianceHMatrix_;
+}
+
+
+/* Method save() stores the object through the StorageManager */
+void GaussianProcessFitterResult::save(Advocate & adv) const
+{
+ MetaModelResult::save(adv);
+ adv.saveAttribute("regressionMatrix_", regressionMatrix_);
+ adv.saveAttribute("basis_", basis_);
+ adv.saveAttribute("beta_", beta_ );
+ adv.saveAttribute("covarianceModel_", covarianceModel_ );
+ adv.saveAttribute("rho_", covarianceModel_);
+ adv.saveAttribute("optimalLogLikelihood_", optimalLogLikelihood_);
+ UnsignedInteger linearAlgebraMethod = static_cast(linearAlgebraMethod_);
+ adv.saveAttribute("linearAlgebraMethod_", linearAlgebraMethod);
+ adv.saveAttribute("hasCholeskyFactor_", hasCholeskyFactor_);
+ adv.saveAttribute("covarianceCholeskyFactor_", covarianceCholeskyFactor_);
+}
+
+
+/* Method load() reloads the object from the StorageManager */
+void GaussianProcessFitterResult::load(Advocate & adv)
+{
+ MetaModelResult::load(adv);
+ adv.loadAttribute("regressionMatrix_", regressionMatrix_);
+ adv.loadAttribute("basis_", basis_);
+ adv.loadAttribute("beta_", beta_ );
+ adv.loadAttribute("covarianceModel_", covarianceModel_ );
+ adv.loadAttribute("rho_", covarianceModel_);
+ adv.loadAttribute("optimalLogLikelihood_", optimalLogLikelihood_);
+ UnsignedInteger linearAlgebraMethod = 0;
+ adv.loadAttribute( "linearAlgebraMethod_", linearAlgebraMethod );
+ linearAlgebraMethod_ = static_cast(linearAlgebraMethod);
+ adv.loadAttribute("hasCholeskyFactor_", hasCholeskyFactor_);
+ adv.loadAttribute("covarianceCholeskyFactor_", covarianceCholeskyFactor_);
+}
+
+
+
+END_NAMESPACE_OPENTURNS
diff --git a/lib/src/Uncertainty/Algorithm/MetaModel/Kriging/GaussianProcessRegression.cxx b/lib/src/Uncertainty/Algorithm/MetaModel/Kriging/GaussianProcessRegression.cxx
new file mode 100644
index 0000000000..12c18eb434
--- /dev/null
+++ b/lib/src/Uncertainty/Algorithm/MetaModel/Kriging/GaussianProcessRegression.cxx
@@ -0,0 +1,204 @@
+// -*- C++ -*-
+/**
+ * @brief The class building gaussian process regression
+ *
+ * Copyright 2005-2024 Airbus-EDF-IMACS-ONERA-Phimeca
+ *
+ * This library is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this library. If not, see .
+ *
+ */
+
+#include "openturns/GaussianProcessRegression.hxx"
+#include "openturns/PersistentObjectFactory.hxx"
+#include "openturns/KrigingEvaluation.hxx"
+#include "openturns/KrigingGradient.hxx"
+#include "openturns/CenteredFiniteDifferenceHessian.hxx"
+#include "openturns/GaussianProcessFitter.hxx"
+
+BEGIN_NAMESPACE_OPENTURNS
+
+CLASSNAMEINIT(GaussianProcessRegression)
+
+static const Factory Factory_GaussianProcessRegression;
+
+
+/* Default constructor */
+GaussianProcessRegression::GaussianProcessRegression()
+ : MetaModelAlgorithm()
+ , covarianceModel_()
+ , basis_()
+ , beta_(0)
+ , gaussianProcessFitterResult_()
+ , result_()
+{
+ // Nothing to do
+}
+
+GaussianProcessRegression::GaussianProcessRegression(const GaussianProcessFitterResult & result)
+ : MetaModelAlgorithm(result.getInputSample(), result.getOutputSample())
+ , covarianceModel_(result.getCovarianceModel())
+ , basis_(result.getBasis())
+ , beta_(0)
+ , gaussianProcessFitterResult_(result)
+ , result_()
+{
+ basis_ = gaussianProcessFitterResult_.getBasis();
+ beta_ = gaussianProcessFitterResult_.getTrendCoefficients();
+}
+
+/* Constructor */
+GaussianProcessRegression::GaussianProcessRegression(const Sample & inputSample,
+ const Sample & outputSample,
+ const CovarianceModel & covarianceModel,
+ const Function & trendFunction)
+ : MetaModelAlgorithm(inputSample, outputSample),
+ covarianceModel_(covarianceModel),
+ basis_(),
+ beta_(0),
+ gaussianProcessFitterResult_(),
+ result_()
+{
+ // check in/out samples
+ if (inputSample.getSize() != outputSample.getSize())
+ throw InvalidArgumentException(HERE) << "GaussianProcessRegression input sample size (" << inputSample.getSize() << ") does not match output sample size (" << outputSample.getSize() << ")";
+
+ const UnsignedInteger inputDimension = inputSample.getDimension();
+ const UnsignedInteger outputDimension = outputSample.getDimension();
+
+ // covariance model checking
+ if (covarianceModel.getInputDimension() != inputDimension)
+ throw InvalidArgumentException(HERE) << "GaussianProcessRegression : Covariance model input dimension is " << covarianceModel.getInputDimension() << ", expected " << inputDimension;
+ if (covarianceModel.getOutputDimension() != outputDimension)
+ throw InvalidArgumentException(HERE) << "GaussianProcessRegression : Covariance model output dimension is " << covarianceModel.getOutputDimension() << ", expected " << outputDimension;
+
+ // trend checking
+ if (trendFunction.getInputDimension() != inputDimension)
+ throw InvalidArgumentException(HERE) << "GaussianProcessRegression : trend input dimension is " << trendFunction.getInputDimension() << ", expected " << inputDimension;
+ if (trendFunction.getOutputDimension() != outputDimension)
+ throw InvalidArgumentException(HERE) << "GaussianProcessRegression : trend output dimension is " << trendFunction.getOutputDimension() << ", expected " << outputDimension;
+
+ const Sample detrended(outputSample_ - trendFunction(inputSample_));
+ // Launch a fit
+ GaussianProcessFitter algo(inputSample_, detrended, covarianceModel_);
+ algo.setKeepCholeskyFactor(true);
+ algo.setOptimizeParameters(false);
+ algo.run();
+ gaussianProcessFitterResult_ = algo.getResult();
+ // we do not estimate any trend but the trend function should be stored in the Kriging eval
+ basis_ = Basis(1);
+ basis_[0] = trendFunction;
+ beta_ = Point(outputDimension, 1.0);
+}
+
+/* Virtual constructor */
+GaussianProcessRegression * GaussianProcessRegression::clone() const
+{
+ return new GaussianProcessRegression(*this);
+}
+
+Point GaussianProcessRegression::computeGamma() const
+{
+ // Get cholesky factor & rho from gaussian fitter result
+ LOGINFO("Solve L^t.gamma = rho");
+
+ const Point rho(gaussianProcessFitterResult_.getRho());
+
+ const GaussianProcessFitterResult::LinearAlgebra algebraMethod = gaussianProcessFitterResult_.getLinearAlgebraMethod();
+ if (algebraMethod == GaussianProcessFitterResult::HMAT)
+ {
+ return gaussianProcessFitterResult_.getHMatCholeskyFactor().solveLower(rho, true);
+ }
+ else
+ {
+ // Arguments are keepIntact=true, matrix_lower=true & solving_transposed=true
+ return gaussianProcessFitterResult_.getCholeskyFactor().getImplementation()->solveLinearSystemTri(rho, true, true);
+ }
+}
+
+/* Perform regression */
+void GaussianProcessRegression::run()
+{
+
+ // Covariance coefficients are computed once, ever if optimizer is fixed
+ LOGINFO("Compute the interpolation part");
+ const Point gamma(computeGamma());
+ LOGINFO("Store the estimates");
+ LOGINFO("Build the output meta-model");
+ Function metaModel;
+
+ // We use directly the points
+ const CovarianceModel conditionalCovarianceModel(gaussianProcessFitterResult_.getCovarianceModel());
+ const Sample inputSample(getInputSample());
+ const Sample outputSample(getOutputSample());
+ const UnsignedInteger outputDimension = outputSample.getDimension();
+ Sample covarianceCoefficients(inputSample.getSize(), outputDimension);
+ covarianceCoefficients.getImplementation()->setData(gamma);
+
+ // Meta model definition
+ metaModel.setEvaluation(new KrigingEvaluation(basis_, inputSample, conditionalCovarianceModel, beta_, covarianceCoefficients));
+ metaModel.setGradient(new KrigingGradient(basis_, inputSample, conditionalCovarianceModel, beta_, covarianceCoefficients));
+ metaModel.setHessian(new CenteredFiniteDifferenceHessian(ResourceMap::GetAsScalar( "CenteredFiniteDifferenceGradient-DefaultEpsilon" ), metaModel.getEvaluation()));
+
+ // compute residual, relative error
+ const Point outputVariance(outputSample.computeVariance());
+ const Sample mY(metaModel(inputSample));
+ const Point squaredResiduals((outputSample - mY).computeRawMoment(2));
+
+ const UnsignedInteger size = inputSample.getSize();
+ Point residuals(outputDimension);
+ Point relativeErrors(outputDimension);
+ for (UnsignedInteger outputIndex = 0; outputIndex < outputDimension; ++ outputIndex)
+ {
+ residuals[outputIndex] = std::sqrt(squaredResiduals[outputIndex] / size);
+ relativeErrors[outputIndex] = squaredResiduals[outputIndex] / outputVariance[outputIndex];
+ }
+ result_ = GaussianProcessRegressionResult(gaussianProcessFitterResult_, covarianceCoefficients);
+ // Set metamodel
+ result_.setMetaModel(metaModel);
+ result_.setResiduals(residuals);
+ result_.setRelativeErrors(relativeErrors);
+}
+
+
+/* String converter */
+String GaussianProcessRegression::__repr__() const
+{
+ return OSS() << "class=" << getClassName();
+}
+
+
+GaussianProcessRegressionResult GaussianProcessRegression::getResult() const
+{
+ return result_;
+}
+
+/* Method save() stores the object through the StorageManager */
+void GaussianProcessRegression::save(Advocate & adv) const
+{
+ MetaModelAlgorithm::save(adv);
+ adv.saveAttribute( "covarianceModel_", covarianceModel_ );
+ adv.saveAttribute( "basis_", basis_);
+ adv.saveAttribute( "result_", result_);
+}
+
+/* Method load() reloads the object from the StorageManager */
+void GaussianProcessRegression::load(Advocate & adv)
+{
+ MetaModelAlgorithm::load(adv);
+ adv.loadAttribute( "covarianceModel_", covarianceModel_ );
+ adv.loadAttribute( "basis_", basis_);
+ adv.loadAttribute( "result_", result_);
+}
+
+END_NAMESPACE_OPENTURNS
diff --git a/lib/src/Uncertainty/Algorithm/MetaModel/Kriging/GaussianProcessRegressionResult.cxx b/lib/src/Uncertainty/Algorithm/MetaModel/Kriging/GaussianProcessRegressionResult.cxx
new file mode 100644
index 0000000000..3c7f38e615
--- /dev/null
+++ b/lib/src/Uncertainty/Algorithm/MetaModel/Kriging/GaussianProcessRegressionResult.cxx
@@ -0,0 +1,158 @@
+// -*- C++ -*-
+/**
+ * @brief The result of a gaussian process regressions estimation
+ *
+ * Copyright 2005-2024 Airbus-EDF-IMACS-ONERA-Phimeca
+ *
+ * This library is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this library. If not, see .
+ *
+ */
+#include "openturns/GaussianProcessRegressionResult.hxx"
+#include "openturns/OSS.hxx"
+#include "openturns/PersistentObjectFactory.hxx"
+#include "openturns/Log.hxx"
+#include "openturns/AggregatedFunction.hxx"
+
+BEGIN_NAMESPACE_OPENTURNS
+
+CLASSNAMEINIT(GaussianProcessRegressionResult)
+static const Factory Factory_GaussianProcessRegressionResult;
+
+/* Default constructor */
+GaussianProcessRegressionResult::GaussianProcessRegressionResult()
+ : MetaModelResult()
+{
+ // Nothing to do
+}
+
+/* Constructor with parameters */
+GaussianProcessRegressionResult::GaussianProcessRegressionResult(const GaussianProcessFitterResult & result,
+ const Sample & covarianceCoefficients)
+ : gpfResult_(result),
+ covarianceCoefficients_(covarianceCoefficients)
+{
+ setInputSample(result.getInputSample());
+ setOutputSample(result.getOutputSample());
+ setMetaModel(result.getMetaModel());
+ setResiduals(result.getResiduals());
+ setRelativeErrors(result.getRelativeErrors());
+ const UnsignedInteger size = covarianceCoefficients.getSize();
+ if (size != getInputSample().getSize())
+ throw InvalidArgumentException(HERE) << "In GaussianProcessRegressionResult::GaussianProcessRegressionResult, covariance coefficients has incorrect size. Expected size = " << getInputSample().getSize() << ", covariance coefficients size = " << size;
+}
+
+/* Virtual constructor */
+GaussianProcessRegressionResult * GaussianProcessRegressionResult::clone() const
+{
+ return new GaussianProcessRegressionResult(*this);
+}
+
+/* Covariance coefficients accessor */
+Sample GaussianProcessRegressionResult::getCovarianceCoefficients() const
+{
+ return covarianceCoefficients_;
+}
+
+/* String converter */
+String GaussianProcessRegressionResult::__repr__() const
+{
+ return OSS(true) << "class=" << getClassName()
+ << ", basis=" << getBasis()
+ << ", trend coefficients=" << getTrendCoefficients()
+ << ", covariance models=" << getCovarianceModel()
+ << ", covariance coefficients=" << covarianceCoefficients_;
+}
+
+String GaussianProcessRegressionResult::__str__(const String & ) const
+{
+ OSS oss(false);
+ oss << getClassName() << "(";
+ oss << "covariance models=" << getCovarianceModel();
+ oss << ", covariance coefficients=" << covarianceCoefficients_;
+ oss << ", basis=" << getBasis();
+ oss << ", trend coefficients=" << getTrendCoefficients() << ")";
+ return oss;
+}
+
+/* Basis accessor */
+Basis GaussianProcessRegressionResult::getBasis() const
+{
+ return gpfResult_.getBasis();
+}
+
+/* Trend coefficients accessor */
+Point GaussianProcessRegressionResult::getTrendCoefficients() const
+{
+ return gpfResult_.getTrendCoefficients();
+}
+
+/* Covariance models accessor */
+CovarianceModel GaussianProcessRegressionResult::getCovarianceModel() const
+{
+ return gpfResult_.getCovarianceModel();
+}
+
+/** Regression matrix accessor */
+Matrix GaussianProcessRegressionResult::getRegressionMatrix() const
+{
+ return gpfResult_.getRegressionMatrix();
+}
+
+Scalar GaussianProcessRegressionResult::getOptimalLogLikelihood() const
+{
+ return gpfResult_.getOptimalLogLikelihood();
+}
+
+GaussianProcessRegressionResult::LinearAlgebra GaussianProcessRegressionResult::getLinearAlgebraMethod() const
+{
+ return gpfResult_.getLinearAlgebraMethod();
+}
+
+/* process accessor */
+Process GaussianProcessRegressionResult::getNoise() const
+{
+ return gpfResult_.getNoise();
+}
+
+/* Method that returns the covariance factor - Lapack */
+TriangularMatrix GaussianProcessRegressionResult::getCholeskyFactor() const
+{
+ return gpfResult_.getCholeskyFactor();
+}
+
+/* Method that returns the covariance factor - hmat */
+HMatrix GaussianProcessRegressionResult::getHMatCholeskyFactor() const
+{
+ return gpfResult_.getHMatCholeskyFactor();
+}
+
+/* Method save() stores the object through the StorageManager */
+void GaussianProcessRegressionResult::save(Advocate & adv) const
+{
+ MetaModelResult::save(adv);
+ adv.saveAttribute( "gpfResult_", gpfResult_);
+ adv.saveAttribute( "covarianceCoefficients_", covarianceCoefficients_ );
+}
+
+
+/* Method load() reloads the object from the StorageManager */
+void GaussianProcessRegressionResult::load(Advocate & adv)
+{
+ MetaModelResult::load(adv);
+ adv.loadAttribute("gpfResult_", gpfResult_);
+ adv.loadAttribute("covarianceCoefficients_", covarianceCoefficients_);
+}
+
+
+END_NAMESPACE_OPENTURNS
diff --git a/lib/src/Uncertainty/Algorithm/MetaModel/Kriging/openturns/GaussianProcessConditionalCovariance.hxx b/lib/src/Uncertainty/Algorithm/MetaModel/Kriging/openturns/GaussianProcessConditionalCovariance.hxx
new file mode 100644
index 0000000000..d41475bd98
--- /dev/null
+++ b/lib/src/Uncertainty/Algorithm/MetaModel/Kriging/openturns/GaussianProcessConditionalCovariance.hxx
@@ -0,0 +1,129 @@
+// -*- C++ -*-
+/**
+ * @brief The postprocessing of a GPR result (conditional covariance)
+ *
+ * Copyright 2005-2024 Airbus-EDF-IMACS-ONERA-Phimeca
+ *
+ * This library is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this library. If not, see .
+ *
+ */
+#ifndef OPENTURNS_GaussianProcessConditionalCovariance_HXX
+#define OPENTURNS_GaussianProcessConditionalCovariance_HXX
+
+#include "openturns/GaussianProcessRegressionResult.hxx"
+#include "openturns/CovarianceModel.hxx"
+#include "openturns/PersistentCollection.hxx"
+#include "openturns/Indices.hxx"
+#include "openturns/Function.hxx"
+#include "openturns/Normal.hxx"
+
+BEGIN_NAMESPACE_OPENTURNS
+
+/**
+ * @class GaussianProcessConditionalCovariance
+ *
+ * The result of a chaos expansion
+ */
+
+class OT_API GaussianProcessConditionalCovariance
+ : public PersistentObject
+{
+ CLASSNAME
+
+public:
+
+ typedef GaussianProcessFitterResult::LinearAlgebra LinearAlgebra;
+ typedef Collection CovarianceMatrixCollection;
+
+ /** Default constructor */
+ GaussianProcessConditionalCovariance();
+
+ /** Parameter constructor without any cholesky factor*/
+ GaussianProcessConditionalCovariance(const GaussianProcessRegressionResult & result);
+
+ /** Virtual constructor */
+ GaussianProcessConditionalCovariance * clone() const override;
+
+ /** String converter */
+ String __repr__() const override;
+ String __str__(const String & offset = "") const override;
+
+ /** Compute mean of new points conditionally to observations */
+ virtual Sample getConditionalMean(const Sample & xi) const;
+
+ /** Compute mean of new points conditionally to observations */
+ virtual Point getConditionalMean(const Point & xi) const;
+
+ /** Compute covariance matrix conditionally to observations*/
+ virtual CovarianceMatrix getConditionalCovariance(const Sample & xi) const ;
+
+ /** Compute covariance matrix conditionally to observations*/
+ virtual CovarianceMatrix getConditionalCovariance(const Point & xi) const;
+
+ /** Compute covariance matrices conditionally to observations (1 cov / point)*/
+ virtual CovarianceMatrixCollection getConditionalMarginalCovariance(const Sample & xi) const;
+
+ /** Compute covariance matrix conditionally to observations (1 cov of size outdimension)*/
+ virtual CovarianceMatrix getConditionalMarginalCovariance(const Point & xi) const;
+
+ /** Compute marginal variance conditionally to observations (1 cov of size outdimension)*/
+ virtual Scalar getConditionalMarginalVariance(const Point & point,
+ const UnsignedInteger marginalIndex = 0) const;
+
+ /** Compute marginal variance conditionally to observations (1 cov / point)*/
+ virtual Sample getConditionalMarginalVariance(const Sample & xi,
+ const UnsignedInteger marginalIndex = 0) const;
+
+ virtual Point getConditionalMarginalVariance(const Point & point,
+ const Indices & indices) const;
+
+ virtual Sample getConditionalMarginalVariance(const Sample & xi,
+ const Indices & indices) const;
+
+ /** Compute joint normal distribution conditionally to observations*/
+ virtual Normal operator()(const Sample & xi) const;
+
+ /** Compute joint normal distribution conditionally to observations*/
+ virtual Normal operator()(const Point & xi) const;
+
+ /** Method save() stores the object through the StorageManager */
+ void save(Advocate & adv) const override;
+
+ /** Method load() reloads the object from the StorageManager */
+ void load(Advocate & adv) override;
+
+
+protected:
+
+ void computePhi();
+ Matrix solveTriangularSystem(const Matrix & rhs) const;
+
+private:
+
+ /** Conditional covariance */
+ GaussianProcessRegressionResult result_;
+
+ /** Matrix phi = L^{-1}F ==> phiT is the transposed matrix */
+ Matrix phiT_;
+
+ /** Matrix F^{t}R^{-1}F writes phi_ = L^{-1}F ==> QR decomposition */
+ // G_ is the triangular matrix ==> Gt the transposed
+ Matrix Gt_;
+
+} ; /* class GaussianProcessConditionalCovariance */
+
+
+END_NAMESPACE_OPENTURNS
+
+#endif /* OPENTURNS_GaussianProcessConditionalCovariance_HXX */
diff --git a/lib/src/Uncertainty/Algorithm/MetaModel/Kriging/openturns/GaussianProcessFitter.hxx b/lib/src/Uncertainty/Algorithm/MetaModel/Kriging/openturns/GaussianProcessFitter.hxx
new file mode 100644
index 0000000000..b8b73b6a41
--- /dev/null
+++ b/lib/src/Uncertainty/Algorithm/MetaModel/Kriging/openturns/GaussianProcessFitter.hxx
@@ -0,0 +1,252 @@
+// -*- C++ -*-
+/**
+ * @brief The class builds gaussian processes
+ *
+ * Copyright 2005-2024 Airbus-EDF-IMACS-ONERA-Phimeca
+ *
+ * This library is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this library. If not, see .
+ *
+ */
+#ifndef OPENTURNS_GAUSSIANPROCESSFITTER_HXX
+#define OPENTURNS_GAUSSIANPROCESSFITTER_HXX
+
+#include "openturns/MetaModelAlgorithm.hxx"
+#include "openturns/Basis.hxx"
+#include "openturns/CovarianceModel.hxx"
+#include "openturns/KrigingResult.hxx"
+#include "openturns/HMatrix.hxx"
+#include "openturns/SpecFunc.hxx"
+#include "openturns/OptimizationAlgorithm.hxx"
+#include "openturns/GaussianProcessFitterResult.hxx"
+
+BEGIN_NAMESPACE_OPENTURNS
+
+/**
+ * @class GaussianProcessFitter
+ *
+ * The class fitting gaussian processes
+ */
+
+class OT_API GaussianProcessFitter
+ : public MetaModelAlgorithm
+{
+ CLASSNAME
+
+public:
+
+ typedef GaussianProcessFitterResult::LinearAlgebra LinearAlgebra;
+
+ /** Default constructor */
+ GaussianProcessFitter();
+
+ /** Parameters constructor */
+ GaussianProcessFitter (const Sample & inputSample,
+ const Sample & outputSample,
+ const CovarianceModel & covarianceModel,
+ const Basis & basis = Basis(0));
+
+ /** Virtual constructor */
+ GaussianProcessFitter * clone() const override;
+
+ /** String converter */
+ String __repr__() const override;
+
+ /** Perform regression */
+ void run() override;
+
+ /** result accessor */
+ GaussianProcessFitterResult getResult();
+
+ /** Objective function (reduced log-Likelihood) accessor */
+ Function getObjectiveFunction();
+
+ /** Optimization solver accessor */
+ OptimizationAlgorithm getOptimizationAlgorithm() const;
+ void setOptimizationAlgorithm(const OptimizationAlgorithm & solver);
+
+ /** Optimization flag accessor */
+ Bool getOptimizeParameters() const;
+ void setOptimizeParameters(const Bool optimizeParameters);
+
+ /** Accessor to optimization bounds */
+ void setOptimizationBounds(const Interval & optimizationBounds);
+ Interval getOptimizationBounds() const;
+
+ /** Accessor to keep cholesky (for Regression usage)*/
+ Bool getKeepCholeskyFactor() const;
+ void setKeepCholeskyFactor(const Bool keepCholeskyFactor);
+
+ // method accessors
+ void setMethod(const LinearAlgebra method);
+ LinearAlgebra getMethod() const;
+
+ /** Method save() stores the object through the StorageManager */
+ void save(Advocate & adv) const override;
+
+ /** Method load() reloads the object from the StorageManager */
+ void load(Advocate & adv) override;
+
+protected:
+ // Maximize the reduced log-likelihood
+ Scalar maximizeReducedLogLikelihood();
+
+ // Compute the output log-likelihood function
+ Point computeReducedLogLikelihood(const Point & parameters);
+ Scalar computeLapackLogDeterminantCholesky();
+ Scalar computeHMatLogDeterminantCholesky();
+
+ // Compute the design matrix on the input sample
+ void computeF();
+
+ /** Method accessor (lapack/hmat) */
+ void initializeMethod();
+
+ /** reset method - If one change method */
+ void reset();
+
+ // Initialize default optimization solver
+ void initializeDefaultOptimizationAlgorithm();
+
+private:
+
+ // Helper class to compute the reduced log-likelihood function of the model
+ class ReducedLogLikelihoodEvaluation: public EvaluationImplementation
+ {
+ public:
+ // Constructor from a GLM algorithm
+ ReducedLogLikelihoodEvaluation(GaussianProcessFitter & algorithm)
+ : EvaluationImplementation()
+ , algorithm_(algorithm)
+ {
+ // Nothing to do
+ }
+
+ ReducedLogLikelihoodEvaluation * clone() const override
+ {
+ return new ReducedLogLikelihoodEvaluation(*this);
+ }
+
+ // It is a simple call to the computeReducedLogLikelihood() of the algo
+ Point operator() (const Point & point) const override
+ {
+ const Point value(algorithm_.computeReducedLogLikelihood(point));
+ return value;
+ }
+
+ UnsignedInteger getInputDimension() const override
+ {
+ return algorithm_.getReducedCovarianceModel().getParameter().getDimension();
+ }
+
+ UnsignedInteger getOutputDimension() const override
+ {
+ return 1;
+ }
+
+ Description getInputDescription() const override
+ {
+ return algorithm_.getReducedCovarianceModel().getParameterDescription();
+ }
+
+ Description getOutputDescription() const override
+ {
+ return Description(1, "ReducedLogLikelihood");
+ }
+
+ Description getDescription() const override
+ {
+ Description description(getInputDescription());
+ description.add(getOutputDescription());
+ return description;
+ }
+
+ String __repr__() const override
+ {
+ OSS oss;
+ // Don't print algorithm_ here as it will result in an infinite loop!
+ oss << "ReducedLogLikelihoodEvaluation";
+ return oss;
+ }
+
+ String __str__(const String & offset = "") const override
+ {
+ // Don't print algorithm_ here as it will result in an infinite loop!
+ return OSS() << offset << __repr__();
+ }
+
+ private:
+ GaussianProcessFitter & algorithm_;
+ }; // ReducedLogLikelihoodEvaluation
+
+ /** Covariance model accessor */
+ void setCovarianceModel(const CovarianceModel & covarianceModel);
+ CovarianceModel getCovarianceModel() const;
+ CovarianceModel getReducedCovarianceModel() const;
+
+ /** Set basis collection method */
+ void setBasis(const Basis & basis);
+
+ // The covariance model parametric family
+ CovarianceModel covarianceModel_;
+ CovarianceModel reducedCovarianceModel_;
+
+ // The optimization algorithm used for the meta-parameters estimation
+ OptimizationAlgorithm solver_;
+
+ // Bounds used for parameter optimization
+ Interval optimizationBounds_;
+
+ // The coefficients of the current output conditional expectation part
+ Point beta_;
+ // Temporarily used to compute gamma
+ Point rho_;
+ // The current output Gram matrix
+ Matrix F_;
+
+ /** Result */
+ GaussianProcessFitterResult result_;
+
+ /** BasisCollection */
+ Basis basis_;
+
+ /** Cholesky factor ==> TriangularMatrix */
+ TriangularMatrix covarianceCholeskyFactor_;
+
+ /** Cholesky factor when using hmat-oss */
+ HMatrix covarianceCholeskyFactorHMatrix_;
+
+ /** Boolean argument for keep covariance */
+ Bool keepCholeskyFactor_ = ResourceMap::GetAsBool("GaussianProcessFitter-KeepCovariance");
+
+ /** Linear algebra */
+ LinearAlgebra method_ = GaussianProcessFitterResult::LAPACK;
+
+ /** Bool to tell if optimization has run */
+ Bool hasRun_ = false;
+
+ /** Flag to tell if the parameters of the covariance model
+ have to be optimized */
+ Bool optimizeParameters_ = ResourceMap::GetAsBool("GaussianProcessFitter-OptimizeParameters");
+
+ /** Flag to tell if the amplitude parameters are estimated using an analytical derivation */
+ Bool analyticalAmplitude_ = false;
+
+ /** Cache of the last computed reduced log-likelihood */
+ Scalar lastReducedLogLikelihood_ = SpecFunc::LowestScalar;
+}; // class GaussianProcessFitter
+
+
+END_NAMESPACE_OPENTURNS
+
+#endif
diff --git a/lib/src/Uncertainty/Algorithm/MetaModel/Kriging/openturns/GaussianProcessFitterResult.hxx b/lib/src/Uncertainty/Algorithm/MetaModel/Kriging/openturns/GaussianProcessFitterResult.hxx
new file mode 100644
index 0000000000..f75b3c28c3
--- /dev/null
+++ b/lib/src/Uncertainty/Algorithm/MetaModel/Kriging/openturns/GaussianProcessFitterResult.hxx
@@ -0,0 +1,170 @@
+// -*- C++ -*-
+/**
+ * @brief The result of a gaussian process fitter
+ *
+ * Copyright 2005-2024 Airbus-EDF-IMACS-ONERA-Phimeca
+ *
+ * This library is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this library. If not, see .
+ *
+ */
+#ifndef OPENTURNS_GAUSSIANPROCESSFITTERRESULT_HXX
+#define OPENTURNS_GAUSSIANPROCESSFITTERRESULT_HXX
+
+#include "openturns/MetaModelResult.hxx"
+#include "openturns/CovarianceModel.hxx"
+#include "openturns/Sample.hxx"
+#include "openturns/Collection.hxx"
+#include "openturns/PersistentCollection.hxx"
+#include "openturns/Basis.hxx"
+#include "openturns/Function.hxx"
+#include "openturns/Process.hxx"
+#include "openturns/HMatrix.hxx"
+#include "openturns/Basis.hxx"
+#include "openturns/Function.hxx"
+
+BEGIN_NAMESPACE_OPENTURNS
+
+/**
+ * @class GaussianProcessFitterResult
+ *
+ * The result of a gaussian process fitter
+ */
+
+class OT_API GaussianProcessFitterResult
+ : public MetaModelResult
+{
+ CLASSNAME
+
+public:
+
+ enum LinearAlgebra { LAPACK, HMAT };
+
+ typedef Collection PointCollection;
+ typedef PersistentCollection PointPersistentCollection;
+
+ /** Default constructor */
+ GaussianProcessFitterResult();
+
+ /** Parameter constructor after a gaussian process fitting */
+ GaussianProcessFitterResult(const Sample & inputData,
+ const Sample & outputData,
+ const Function & metaModel,
+ const Point & residuals,
+ const Point & relativeErrors,
+ const Matrix & regressionMatrix,
+ const Basis & basis,
+ const Point & trendCoefficients,
+ const CovarianceModel & covarianceModel,
+ const Scalar optimalLogLikelihood,
+ const LinearAlgebra linearAlgebraMethod);
+
+ /** Virtual constructor */
+ GaussianProcessFitterResult * clone() const override;
+
+ /** String converter */
+ String __repr__() const override;
+ String __str__(const String & offset = "") const override;
+
+ /** Trend basis accessor */
+ Basis getBasis() const;
+
+ /** Trend coefficients accessor */
+ Point getTrendCoefficients() const;
+
+ /** Conditional covariance models accessor */
+ CovarianceModel getCovarianceModel() const;
+
+ /** optimal log-likelihood value */
+ Scalar getOptimalLogLikelihood() const;
+
+ /** linear algebra method */
+ LinearAlgebra getLinearAlgebraMethod() const;
+
+ /** Regression matrix accessor */
+ Matrix getRegressionMatrix() const;
+
+ /** process accessor */
+ Process getNoise() const;
+
+ /** Method save() stores the object through the StorageManager */
+ void save(Advocate & adv) const override;
+
+ /** Method load() reloads the object from the StorageManager */
+ void load(Advocate & adv) override;
+
+
+protected:
+
+ // GaussianProcessFitter::run could set the Cholesky factor
+ friend class GaussianProcessFitter;
+
+ // GaussianProcessRegressionResult could use Cholesky setters
+ friend class GaussianProcessRegressionResult;
+
+ // GaussianProcessRegressionResult could use Cholesky setters
+ friend class GaussianProcessRegression;
+
+ /** Accessor to the Cholesky factor*/
+ void setCholeskyFactor(const TriangularMatrix & covarianceCholeskyFactor,
+ const HMatrix & covarianceHMatrix);
+
+ /** Method that returns the covariance factor - lapack */
+ TriangularMatrix getCholeskyFactor() const;
+
+ /** Method that returns the covariance factor - hmat */
+ HMatrix getHMatCholeskyFactor() const;
+
+ /** rho accessor */
+ Point getRho() const;
+ void setRho(const Point & rho);
+
+
+private:
+
+ /** Regression matrix (aka F matrix) */
+ Matrix regressionMatrix_;
+
+ /** The trend basis */
+ Basis basis_;
+
+ /** The trend coefficients */
+ Point beta_;
+
+ /** The covariance model */
+ CovarianceModel covarianceModel_;
+
+ /** The rho point */
+ Point rho_;
+
+ /** optimal log-likelihood value */
+ Scalar optimalLogLikelihood_;
+
+ /** Linear algebra method */
+ LinearAlgebra linearAlgebraMethod_;
+
+ /** Boolean for Cholesky. */
+ Bool hasCholeskyFactor_;
+
+ /** Cholesky factor */
+ TriangularMatrix covarianceCholeskyFactor_;
+
+ /** Cholesky factor when using hmat-oss/hmat */
+ HMatrix covarianceHMatrix_;
+
+} ; /* class GaussianProcessFitterResult */
+
+
+END_NAMESPACE_OPENTURNS
+
+#endif /* OPENTURNS_GAUSSIANPROCESSFITTERRESULT_HXX */
diff --git a/lib/src/Uncertainty/Algorithm/MetaModel/Kriging/openturns/GaussianProcessRegression.hxx b/lib/src/Uncertainty/Algorithm/MetaModel/Kriging/openturns/GaussianProcessRegression.hxx
new file mode 100644
index 0000000000..8ee064c1c2
--- /dev/null
+++ b/lib/src/Uncertainty/Algorithm/MetaModel/Kriging/openturns/GaussianProcessRegression.hxx
@@ -0,0 +1,105 @@
+// -*- C++ -*-
+/**
+ * @brief The class building gaussian process regression
+ *
+ * Copyright 2005-2024 Airbus-EDF-IMACS-ONERA-Phimeca
+ *
+ * This library is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this library. If not, see .
+ *
+ */
+#ifndef OPENTURNS_GAUSSIANPROCESSREGRESSION_HXX
+#define OPENTURNS_GAUSSIANPROCESSREGRESSION_HXX
+
+#include "openturns/MetaModelAlgorithm.hxx"
+#include "openturns/CovarianceModel.hxx"
+#include "openturns/Function.hxx"
+#include "openturns/GaussianProcessRegressionResult.hxx"
+#include "openturns/GaussianProcessFitterResult.hxx"
+
+BEGIN_NAMESPACE_OPENTURNS
+
+/**
+ * @class GaussianProcessRegression
+ *
+ * The class building a gaussian process regression, relying on a GaussianProcessFitterResult
+ * for the evaluation of the coefficients of the parameters
+ */
+
+class OT_API GaussianProcessRegression
+ : public MetaModelAlgorithm
+{
+ CLASSNAME
+
+public:
+
+ typedef GaussianProcessFitterResult::LinearAlgebra LinearAlgebra;
+
+ /** Default constructor */
+ GaussianProcessRegression();
+
+ /** Constructor */
+ GaussianProcessRegression(const GaussianProcessFitterResult & result);
+
+ /** Constructor */
+ GaussianProcessRegression (const Sample & inputSample,
+ const Sample & outputSample,
+ const CovarianceModel & covarianceModel,
+ const Function & trendFunction);
+
+ /** Virtual constructor */
+ GaussianProcessRegression * clone() const override;
+
+ /** String converter */
+ String __repr__() const override;
+
+ /** Perform regression */
+ void run() override;
+
+ /** result accessor */
+ GaussianProcessRegressionResult getResult() const;
+
+ /** Method save() stores the object through the StorageManager */
+ void save(Advocate & adv) const override;
+
+ /** Method load() reloads the object from the StorageManager */
+ void load(Advocate & adv) override;
+
+
+protected:
+
+ /** The method helps to compute the gamma point */
+ Point computeGamma() const;
+
+private:
+
+ // The covariance model
+ CovarianceModel covarianceModel_;
+
+ // Basis argument
+ Basis basis_;
+
+ // The coefficients of the trend
+ Point beta_;
+
+ /** Results */
+ GaussianProcessFitterResult gaussianProcessFitterResult_;
+ GaussianProcessRegressionResult result_;
+
+
+}; // class GaussianProcessRegression
+
+
+END_NAMESPACE_OPENTURNS
+
+#endif
diff --git a/lib/src/Uncertainty/Algorithm/MetaModel/Kriging/openturns/GaussianProcessRegressionResult.hxx b/lib/src/Uncertainty/Algorithm/MetaModel/Kriging/openturns/GaussianProcessRegressionResult.hxx
new file mode 100644
index 0000000000..4b9a771649
--- /dev/null
+++ b/lib/src/Uncertainty/Algorithm/MetaModel/Kriging/openturns/GaussianProcessRegressionResult.hxx
@@ -0,0 +1,127 @@
+// -*- C++ -*-
+/**
+ * @brief The result of a gaussian process regression estimation
+ *
+ * Copyright 2005-2024 Airbus-EDF-IMACS-ONERA-Phimeca
+ *
+ * This library is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this library. If not, see .
+ *
+ */
+#ifndef OPENTURNS_GAUSSIANPROCESSREGRESSIONRESULT_HXX
+#define OPENTURNS_GAUSSIANPROCESSREGRESSIONRESULT_HXX
+
+#include "openturns/GaussianProcessFitterResult.hxx"
+#include "openturns/CovarianceModel.hxx"
+#include "openturns/Sample.hxx"
+#include "openturns/Collection.hxx"
+#include "openturns/Basis.hxx"
+#include "openturns/PersistentCollection.hxx"
+#include "openturns/Function.hxx"
+#include "openturns/Normal.hxx"
+#include "openturns/HMatrix.hxx"
+#include "openturns/Basis.hxx"
+
+BEGIN_NAMESPACE_OPENTURNS
+
+/**
+ * @class GaussianProcessRegressionResult
+ *
+ * The result of a chaos expansion
+ */
+
+class OT_API GaussianProcessRegressionResult
+ : public MetaModelResult
+{
+ CLASSNAME
+
+public:
+
+ typedef GaussianProcessFitterResult::LinearAlgebra LinearAlgebra;
+
+ /** Default constructor */
+ GaussianProcessRegressionResult();
+
+ /** Parameter constructor */
+ GaussianProcessRegressionResult(const GaussianProcessFitterResult & result,
+ const Sample & covarianceCoefficients);
+
+ /** Virtual constructor */
+ GaussianProcessRegressionResult * clone() const override;
+
+ /** String converter */
+ String __repr__() const override;
+ String __str__(const String & offset = "") const override;
+
+ /** Trend basis accessor */
+ Basis getBasis() const;
+
+ /** Trend coefficients accessor */
+ Point getTrendCoefficients() const;
+
+ /** Conditional covariance models accessor */
+ CovarianceModel getCovarianceModel() const;
+
+ /** optimal log-likelihood value */
+ Scalar getOptimalLogLikelihood() const;
+
+ /** linear algebra method */
+ LinearAlgebra getLinearAlgebraMethod() const;
+
+ /** Regression matrix accessor */
+ Matrix getRegressionMatrix() const;
+
+ /** process accessor */
+ Process getNoise() const;
+
+ /** Process coefficients accessor */
+ Sample getCovarianceCoefficients() const;
+
+ /** Method save() stores the object through the StorageManager */
+ void save(Advocate & adv) const override;
+
+ /** Method load() reloads the object from the StorageManager */
+ void load(Advocate & adv) override;
+
+protected:
+
+ // GaussianProcessFitter::run could set the Cholesky factor
+ friend class GaussianProcessFitter;
+
+ // GaussianProcessFitter::run could set the Cholesky factor
+ friend class GaussianProcessConditionalCovariance;
+
+ // GaussianProcessRegressionResult could use Cholesky setters
+ friend class GaussianProcessRegression;
+
+ /** Method that returns the covariance factor - lapack */
+ TriangularMatrix getCholeskyFactor() const;
+
+ /** Method that returns the covariance factor - hmat */
+ HMatrix getHMatCholeskyFactor() const;
+
+private:
+
+ /** GaussianProcessFitterResult */
+ GaussianProcessFitterResult gpfResult_;
+
+ /** The covariance coefficients */
+ Sample covarianceCoefficients_;
+
+
+} ; /* class GaussianProcessRegressionResult */
+
+
+END_NAMESPACE_OPENTURNS
+
+#endif /* OPENTURNS_GaussianProcessRegressionResult_HXX */
diff --git a/lib/src/Uncertainty/Algorithm/MetaModel/Kriging/openturns/OTKriging.hxx b/lib/src/Uncertainty/Algorithm/MetaModel/Kriging/openturns/OTKriging.hxx
index 9b6eb1f037..00b1800ae0 100644
--- a/lib/src/Uncertainty/Algorithm/MetaModel/Kriging/openturns/OTKriging.hxx
+++ b/lib/src/Uncertainty/Algorithm/MetaModel/Kriging/openturns/OTKriging.hxx
@@ -28,5 +28,10 @@
#include "openturns/KrigingResult.hxx"
#include "openturns/GeneralLinearModelResult.hxx"
#include "openturns/GeneralLinearModelAlgorithm.hxx"
+#include "openturns/GaussianProcessFitterResult.hxx"
+#include "openturns/GaussianProcessFitter.hxx"
+#include "openturns/GaussianProcessRegressionResult.hxx"
+#include "openturns/GaussianProcessRegression.hxx"
+#include "openturns/GaussianProcessConditionalCovariance.hxx"
#endif /* OPENTURNS_OTKRIGING_HXX */
diff --git a/lib/src/Uncertainty/Algorithm/OrthogonalBasis/OrthogonalFunctionFactory.cxx b/lib/src/Uncertainty/Algorithm/OrthogonalBasis/OrthogonalFunctionFactory.cxx
index 41c746f2f6..889cf9dbc7 100644
--- a/lib/src/Uncertainty/Algorithm/OrthogonalBasis/OrthogonalFunctionFactory.cxx
+++ b/lib/src/Uncertainty/Algorithm/OrthogonalBasis/OrthogonalFunctionFactory.cxx
@@ -22,6 +22,7 @@
#include "openturns/OSS.hxx"
#include "openturns/PersistentObjectFactory.hxx"
#include "openturns/Exception.hxx"
+#include "openturns/OrthogonalBasis.hxx"
BEGIN_NAMESPACE_OPENTURNS
@@ -88,13 +89,17 @@ EnumerateFunction OrthogonalFunctionFactory::getEnumerateFunction() const
throw NotYetImplementedException(HERE) << "In OrthogonalFunctionFactory::getEnumerateFunction() const";
}
+/* Get the function factory corresponding to marginal input indices */
+OrthogonalBasis OrthogonalFunctionFactory::getMarginal(const Indices & ) const
+{
+ throw NotYetImplementedException(HERE) << "In OrthogonalBasis::getMarginal() const";
+}
Bool OrthogonalFunctionFactory::isOrthogonal() const
{
return true;
}
-
/* String converter */
String OrthogonalFunctionFactory::__repr__() const
{
diff --git a/lib/src/Uncertainty/Algorithm/OrthogonalBasis/OrthogonalProductFunctionFactory.cxx b/lib/src/Uncertainty/Algorithm/OrthogonalBasis/OrthogonalProductFunctionFactory.cxx
index 96ec87f142..21ce04fca4 100644
--- a/lib/src/Uncertainty/Algorithm/OrthogonalBasis/OrthogonalProductFunctionFactory.cxx
+++ b/lib/src/Uncertainty/Algorithm/OrthogonalBasis/OrthogonalProductFunctionFactory.cxx
@@ -32,8 +32,6 @@
BEGIN_NAMESPACE_OPENTURNS
-
-
TEMPLATE_CLASSNAMEINIT(PersistentCollection)
static const Factory > Factory_PersistentCollection_OrthogonalUniVariateFunctionFamily;
@@ -53,7 +51,6 @@ OrthogonalProductFunctionFactory::OrthogonalProductFunctionFactory()
// Nothing to do
}
-
/* Constructor */
OrthogonalProductFunctionFactory::OrthogonalProductFunctionFactory(const FunctionFamilyCollection & coll)
: OrthogonalFunctionFactory()
@@ -62,7 +59,6 @@ OrthogonalProductFunctionFactory::OrthogonalProductFunctionFactory(const Functio
buildMeasure(coll);
}
-
/* Constructor */
OrthogonalProductFunctionFactory::OrthogonalProductFunctionFactory(const FunctionFamilyCollection & coll,
const EnumerateFunction & phi)
@@ -73,14 +69,12 @@ OrthogonalProductFunctionFactory::OrthogonalProductFunctionFactory(const Functio
buildMeasure(coll);
}
-
/* Virtual constructor */
OrthogonalProductFunctionFactory * OrthogonalProductFunctionFactory::clone() const
{
return new OrthogonalProductFunctionFactory(*this);
}
-
/* Return the enumerate function that translate unidimensional indices into multidimensional indices */
EnumerateFunction OrthogonalProductFunctionFactory::getEnumerateFunction() const
{
@@ -99,13 +93,17 @@ OrthogonalProductFunctionFactory::FunctionFamilyCollection OrthogonalProductFunc
return coll;
}
-
/* Build the Function of the given index */
Function OrthogonalProductFunctionFactory::build(const UnsignedInteger index) const
{
return tensorizedFunctionFactory_.build(index);
}
+/* Build the Function of the given index */
+Function OrthogonalProductFunctionFactory::build(const Indices & indices) const
+{
+ return tensorizedFunctionFactory_.build(getEnumerateFunction().inverse(indices));
+}
/* String converter */
String OrthogonalProductFunctionFactory::__repr__() const
@@ -115,7 +113,6 @@ String OrthogonalProductFunctionFactory::__repr__() const
<< " measure=" << measure_;
}
-
/* Method save() stores the object through the StorageManager */
void OrthogonalProductFunctionFactory::save(Advocate & adv) const
{
@@ -123,7 +120,6 @@ void OrthogonalProductFunctionFactory::save(Advocate & adv) const
adv.saveAttribute("tensorizedFunctionFactory_", tensorizedFunctionFactory_);
}
-
/* Method load() reloads the object from the StorageManager */
void OrthogonalProductFunctionFactory::load(Advocate & adv)
{
@@ -157,5 +153,23 @@ void OrthogonalProductFunctionFactory::buildMeasure(const FunctionFamilyCollecti
measure_ = JointDistribution(distributions);
}
+/* Get the function factory corresponding to marginal input indices */
+OrthogonalBasis OrthogonalProductFunctionFactory::getMarginal(const Indices & indices) const
+{
+ OrthogonalProductFunctionFactory::FunctionFamilyCollection functionColl(getFunctionFamilyCollection());
+ const UnsignedInteger size = functionColl.getSize();
+ if (!indices.check(size))
+ throw InvalidArgumentException(HERE) << "The indices of a marginal sample must be in the range [0, size-1] and must be different";
+ // Create list of factories corresponding to input marginal indices
+ OrthogonalProductFunctionFactory::FunctionFamilyCollection functionMarginalCollection;
+ for (UnsignedInteger index = 0; index < size; ++ index)
+ if (indices.contains(index))
+ functionMarginalCollection.add(functionColl[index]);
+ // Create function
+ const EnumerateFunction enumerateFunction(tensorizedFunctionFactory_.getEnumerateFunction());
+ const EnumerateFunction marginalEnumerateFunction(enumerateFunction.getMarginal(indices));
+ const OrthogonalProductFunctionFactory marginalFactory(functionMarginalCollection, marginalEnumerateFunction);
+ return marginalFactory;
+}
END_NAMESPACE_OPENTURNS
diff --git a/lib/src/Uncertainty/Algorithm/OrthogonalBasis/OrthogonalProductPolynomialFactory.cxx b/lib/src/Uncertainty/Algorithm/OrthogonalBasis/OrthogonalProductPolynomialFactory.cxx
index ad3560d347..43a1ad1231 100644
--- a/lib/src/Uncertainty/Algorithm/OrthogonalBasis/OrthogonalProductPolynomialFactory.cxx
+++ b/lib/src/Uncertainty/Algorithm/OrthogonalBasis/OrthogonalProductPolynomialFactory.cxx
@@ -304,4 +304,21 @@ Sample OrthogonalProductPolynomialFactory::getNodesAndWeights(const Indices & de
return nodes;
}
+/* Get the function factory corresponding to the input marginal indices */
+OrthogonalBasis OrthogonalProductPolynomialFactory::getMarginal(const Indices & indices) const
+{
+ const UnsignedInteger size = coll_.getSize();
+ if (!indices.check(size))
+ throw InvalidArgumentException(HERE) << "The indices of a marginal sample must be in the range [0, size-1] and must be different";
+ // Create list of factories corresponding to input marginal indices
+ OrthogonalProductPolynomialFactory::PolynomialFamilyCollection polynomialMarginalCollection;
+ for (UnsignedInteger index = 0; index < size; ++ index)
+ if (indices.contains(index))
+ polynomialMarginalCollection.add(coll_[index]);
+ // Create function
+ const EnumerateFunction marginalEnumerateFunction(phi_.getMarginal(indices));
+ const OrthogonalProductPolynomialFactory marginalFactory(polynomialMarginalCollection, marginalEnumerateFunction);
+ return marginalFactory;
+}
+
END_NAMESPACE_OPENTURNS
diff --git a/lib/src/Uncertainty/Algorithm/OrthogonalBasis/openturns/OrthogonalFunctionFactory.hxx b/lib/src/Uncertainty/Algorithm/OrthogonalBasis/openturns/OrthogonalFunctionFactory.hxx
index 1a2145714c..1e41b09e31 100644
--- a/lib/src/Uncertainty/Algorithm/OrthogonalBasis/openturns/OrthogonalFunctionFactory.hxx
+++ b/lib/src/Uncertainty/Algorithm/OrthogonalBasis/openturns/OrthogonalFunctionFactory.hxx
@@ -28,6 +28,8 @@
BEGIN_NAMESPACE_OPENTURNS
+class OrthogonalBasis;
+
/**
* @class OrthogonalFunctionFactory
*
@@ -51,7 +53,7 @@ public:
Function build(const UnsignedInteger index) const override;
/** Build the Function of the given multi-indices */
- Function build(const Indices & indices) const;
+ virtual Function build(const Indices & indices) const;
/** Return the measure upon which the basis is orthogonal */
virtual Distribution getMeasure() const;
@@ -62,6 +64,10 @@ public:
/** Virtual constructor */
OrthogonalFunctionFactory * clone() const override;
+
+ /** Get the function factory corresponding to marginal input indices */
+ virtual OrthogonalBasis getMarginal(const Indices & indices) const;
+
Bool isOrthogonal() const override;
/** String converter */
diff --git a/lib/src/Uncertainty/Algorithm/OrthogonalBasis/openturns/OrthogonalProductFunctionFactory.hxx b/lib/src/Uncertainty/Algorithm/OrthogonalBasis/openturns/OrthogonalProductFunctionFactory.hxx
index ffb531d07f..158a47f67b 100644
--- a/lib/src/Uncertainty/Algorithm/OrthogonalBasis/openturns/OrthogonalProductFunctionFactory.hxx
+++ b/lib/src/Uncertainty/Algorithm/OrthogonalBasis/openturns/OrthogonalProductFunctionFactory.hxx
@@ -27,6 +27,8 @@
#include "openturns/OrthogonalUniVariateFunctionFamily.hxx"
#include "openturns/TensorizedUniVariateFunctionFactory.hxx"
#include "openturns/EnumerateFunction.hxx"
+#include "openturns/OrthogonalBasis.hxx"
+
BEGIN_NAMESPACE_OPENTURNS
@@ -62,12 +64,18 @@ public:
/** Build the Function of the given index */
Function build(const UnsignedInteger index) const override;
+ /** Build the Function of the given multi-indices */
+ Function build(const Indices & indices) const override;
+
/** Return the enumerate function that translate unidimensional indices nto multidimensional indices */
EnumerateFunction getEnumerateFunction() const override;
/** Return the collection of univariate orthogonal polynomial families */
FunctionFamilyCollection getFunctionFamilyCollection() const;
+ /** Get the function factory corresponding to marginal input indices */
+ OrthogonalBasis getMarginal(const Indices & indices) const override;
+
/** Virtual constructor */
OrthogonalProductFunctionFactory * clone() const override;
diff --git a/lib/src/Uncertainty/Algorithm/OrthogonalBasis/openturns/OrthogonalProductPolynomialFactory.hxx b/lib/src/Uncertainty/Algorithm/OrthogonalBasis/openturns/OrthogonalProductPolynomialFactory.hxx
index 8a3d903949..da892e9d29 100644
--- a/lib/src/Uncertainty/Algorithm/OrthogonalBasis/openturns/OrthogonalProductPolynomialFactory.hxx
+++ b/lib/src/Uncertainty/Algorithm/OrthogonalBasis/openturns/OrthogonalProductPolynomialFactory.hxx
@@ -22,6 +22,7 @@
#define OPENTURNS_ORTHOGONALPRODUCTPOLYNOMIALFACTORY_HXX
#include "openturns/OrthogonalFunctionFactory.hxx"
+#include "openturns/OrthogonalProductFunctionFactory.hxx"
#include "openturns/Distribution.hxx"
#include "openturns/Indices.hxx"
#include "openturns/Point.hxx"
@@ -80,6 +81,10 @@ public:
Sample getNodesAndWeights(const Indices & degrees,
Point & weightsOut) const;
+ /** Get the function factory corresponding to the given input marginal indices */
+ using OrthogonalFunctionFactory::getMarginal;
+ OrthogonalBasis getMarginal(const Indices & indices) const override;
+
/** String converter */
String __repr__() const override;
String __str__(const String & offset = "") const override;
diff --git a/lib/src/Uncertainty/Distribution/InverseWishart.cxx b/lib/src/Uncertainty/Distribution/InverseWishart.cxx
index 142c3d4374..55a83fdaa0 100644
--- a/lib/src/Uncertainty/Distribution/InverseWishart.cxx
+++ b/lib/src/Uncertainty/Distribution/InverseWishart.cxx
@@ -238,22 +238,33 @@ void InverseWishart::computeCovariance() const
{
const UnsignedInteger p = cholesky_.getDimension();
const Scalar den = (nu_ - p) * std::pow(nu_ - p - 1.0, 2) * (nu_ - p - 3.0);
- if (!(den > 0.0)) throw NotDefinedException(HERE) << "Error: the covariance of the inverse Wishart distribution is defined only if nu > p+3";
- const CovarianceMatrix V(getV());
- covariance_ = CovarianceMatrix(getDimension());
- UnsignedInteger indexRow = 0;
+ if (!(den > 0.0)) throw NotDefinedException(HERE) << "Error: the covariance of the inverse Wishart distribution is defined only if nu > p+3, here nu = " << nu_ << " and p = " << p;
+
+ // Get the collection of Indices of the random matrix
+ // in the order of the corresponding flattened random vector
+ Collection matrixIndices;
for (UnsignedInteger i = 0; i < p; ++i)
+ {
for (UnsignedInteger j = 0; j <= i; ++j)
{
- UnsignedInteger indexColumn = 0;
- for (UnsignedInteger m = 0; m <= i; ++m)
- for (UnsignedInteger n = 0; n <= j; ++n)
- {
- covariance_(indexRow, indexColumn) = (2.0 * V(i, j) * V(m, n) + (nu_ - p - 1.0) * (V(i, m) * V(j, n) + V(i, n) * V(m, j))) / den;
- ++indexColumn;
- }
- ++indexRow;
+ matrixIndices.add(Indices({i, j}));
+ }
+ }
+
+ // Populate the covariance matrix of the flattened random vector
+ const CovarianceMatrix V(getV());
+ covariance_ = CovarianceMatrix(getDimension());
+ for (UnsignedInteger row = 0; row < matrixIndices.getSize(); ++row)
+ {
+ for (UnsignedInteger col = 0; col <= row; ++col)
+ {
+ const UnsignedInteger irow = matrixIndices[row][0];
+ const UnsignedInteger icol = matrixIndices[col][0];
+ const UnsignedInteger jrow = matrixIndices[row][1];
+ const UnsignedInteger jcol = matrixIndices[col][1];
+ covariance_(row, col) = ((nu_ - p + 1.0) * V(irow, jcol) * V(icol, jrow) + (nu_ - p - 1.0) * V(irow, icol) * V(jrow, jcol)) / den;
}
+ }
isAlreadyComputedCovariance_ = true;
}
diff --git a/lib/src/Uncertainty/Distribution/Wishart.cxx b/lib/src/Uncertainty/Distribution/Wishart.cxx
index 2e7067376c..317fef1ef2 100644
--- a/lib/src/Uncertainty/Distribution/Wishart.cxx
+++ b/lib/src/Uncertainty/Distribution/Wishart.cxx
@@ -229,6 +229,38 @@ void Wishart::computeMean() const
isAlreadyComputedMean_ = true;
}
+/* Compute the covariance of the distribution */
+void Wishart::computeCovariance() const
+{
+ // Get the collection of Indices of the random matrix
+ // in the order of the corresponding flattened random vector
+ const UnsignedInteger p = cholesky_.getDimension();
+ Collection matrixIndices;
+ for (UnsignedInteger i = 0; i < p; ++i)
+ {
+ for (UnsignedInteger j = 0; j <= i; ++j)
+ {
+ matrixIndices.add(Indices({i, j}));
+ }
+ }
+
+ // Populate the covariance matrix of the flattened random vector
+ const CovarianceMatrix V(getV());
+ covariance_ = CovarianceMatrix(getDimension());
+ for (UnsignedInteger row = 0; row < matrixIndices.getSize(); ++row)
+ {
+ for (UnsignedInteger col = 0; col <= row; ++col)
+ {
+ const UnsignedInteger irow = matrixIndices[row][0];
+ const UnsignedInteger icol = matrixIndices[col][0];
+ const UnsignedInteger jrow = matrixIndices[row][1];
+ const UnsignedInteger jcol = matrixIndices[col][1];
+ covariance_(row, col) = nu_ * (V(irow, jcol) * V(icol, jrow) + V(irow, icol) * V(jrow, jcol));
+ }
+ }
+ isAlreadyComputedCovariance_ = true;
+}
+
/* Compute the entropy of the distribution */
Scalar Wishart::computeEntropy() const
{
diff --git a/lib/src/Uncertainty/Distribution/openturns/Wishart.hxx b/lib/src/Uncertainty/Distribution/openturns/Wishart.hxx
index 9db59a82f1..ededc672c0 100644
--- a/lib/src/Uncertainty/Distribution/openturns/Wishart.hxx
+++ b/lib/src/Uncertainty/Distribution/openturns/Wishart.hxx
@@ -124,6 +124,9 @@ private:
/** Compute the mean of the distribution */
void computeMean() const override;
+ /** Compute the covariance of the distribution */
+ void computeCovariance() const override;
+
/** Compute the numerical range of the distribution given the parameters values */
void computeRange() override;
diff --git a/lib/test/CMakeLists.txt b/lib/test/CMakeLists.txt
index 7a4ffd21c4..062eb76660 100644
--- a/lib/test/CMakeLists.txt
+++ b/lib/test/CMakeLists.txt
@@ -642,19 +642,27 @@ ot_check_test (KrigingAlgorithm_nuggetFactor IGNOREOUT)
ot_check_test (KrigingRandomVector_std)
ot_check_test (MetaModelValidation_std)
ot_check_test (GeneralLinearModelAlgorithm_std IGNOREOUT)
+ot_check_test (GaussianProcessFitter_std IGNOREOUT)
+ot_check_test (GaussianProcessRegression_std IGNOREOUT)
+ot_check_test (GaussianProcessConditionalCovariance_std IGNOREOUT)
ot_check_test (LinearModelAlgorithm_std)
ot_check_test (LinearModelAnalysis_std)
ot_check_test (LinearModelValidation_std IGNOREOUT)
ot_check_test (KrigingAlgorithm_isotropic_std IGNOREOUT)
ot_check_test (OrthogonalProductPolynomialFactory_std)
+ot_check_test (OrthogonalProductFunctionFactory_std)
if (HMAT_FOUND)
ot_check_test (KrigingAlgorithm_std_hmat)
ot_check_test (GeneralLinearModelAlgorithm_std_hmat)
+ot_check_test (GaussianProcessFitter_std_hmat IGNOREOUT)
+ot_check_test (GaussianProcessRegression_std_hmat IGNOREOUT)
endif ()
if (NLopt_FOUND)
ot_check_test (GeneralLinearModelAlgorithm_nlopt)
+ot_check_test (GaussianProcessFitter_nlopt IGNOREOUT)
+ot_check_test (GaussianProcessRegression_nlopt IGNOREOUT)
endif ()
# Sensitivity
diff --git a/lib/test/t_GaussianProcessConditionalCovariance_std.cxx b/lib/test/t_GaussianProcessConditionalCovariance_std.cxx
new file mode 100644
index 0000000000..6fa89db7eb
--- /dev/null
+++ b/lib/test/t_GaussianProcessConditionalCovariance_std.cxx
@@ -0,0 +1,331 @@
+// -*- C++ -*-
+/**
+ * @brief The test file of GaussianProcessConditionalCovariance class
+ *
+ * Copyright 2005-2024 Airbus-EDF-IMACS-ONERA-Phimeca
+ *
+ * This library is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this library. If not, see .
+ *
+ */
+#include "openturns/OT.hxx"
+#include "openturns/OTtestcode.hxx"
+
+using namespace OT;
+using namespace OT::Test;
+
+int main(int, char *[])
+{
+ TESTPREAMBLE;
+ OStream fullprint(std::cout);
+ setRandomGenerator();
+ ResourceMap::SetAsUnsignedInteger("OptimizationAlgorithm-DefaultMaximumCallsNumber", 20000);
+ ResourceMap::SetAsScalar("Cobyla-DefaultRhoBeg", 0.5);
+ ResourceMap::SetAsScalar("OptimizationAlgorithm-DefaultMaximumAbsoluteError", 1e-8);
+
+
+ try
+ {
+ // Test 1: a real value function
+ {
+ // Set Numerical precision to 4
+ PlatformInfo::SetNumericalPrecision(4);
+ const UnsignedInteger sampleSize = 6;
+ const UnsignedInteger dimension = 1;
+
+ const SymbolicFunction f("x", "x * sin(x)");
+
+ Sample inputTrain(sampleSize, dimension);
+ Sample inputTest(sampleSize, dimension);
+ for (UnsignedInteger i = 0; i < sampleSize; ++i)
+ {
+ inputTrain(i, 0) = 3.0 + i;
+ inputTest(i, 0) = 2.5 + i;
+ }
+ inputTrain(0, 0) = 1.0;
+ inputTrain(1, 0) = 3.0;
+ // input test sample
+ inputTest(0, 0) = 2.0;
+ inputTest(1, 0) = 4.0;
+ const Sample outputTrain(f(inputTrain));
+
+ // create covariance model
+ const Basis basis(ConstantBasisFactory(dimension).build());
+ const SquaredExponential covarianceModel;
+
+ // create algorithm
+ GaussianProcessFitter fit_algo(inputTrain, outputTrain, covarianceModel, basis);
+
+ // set sensible optimization bounds and estimate hyper parameters
+ fit_algo.setOptimizationBounds(Interval(inputTrain.getMin(), inputTrain.getMax()));
+ fit_algo.run();
+
+ // perform an evaluation
+ GaussianProcessRegression algo(fit_algo.getResult());
+ algo.run();
+ GaussianProcessRegressionResult result(algo.getResult());
+
+ // GPCC
+ GaussianProcessConditionalCovariance gccc(result);
+
+ CovarianceMatrix covariance(gccc.getConditionalCovariance(inputTrain));
+ SquareMatrix nullMatrix(sampleSize);
+ assert_almost_equal(covariance, nullMatrix, 0.0, 1e-13);
+
+ // Covariance per marginal & extract variance component
+ Collection coll(gccc.getConditionalMarginalCovariance(inputTrain));
+ Point variance(coll.getSize());
+ const Point nullVariance(coll.getSize());
+ for (UnsignedInteger index = 0; index < coll.getSize(); ++ index)
+ variance[index] = coll[index](0,0);
+
+ assert_almost_equal(variance, nullVariance, 1e-14, 1e-13);
+
+ // Variance per marginal
+ assert_almost_equal(gccc.getConditionalMarginalVariance(inputTrain), Sample(sampleSize, 1), 1e-14, 1e-13);
+
+
+ // Kriging variance is non-null on validation points
+ CovarianceMatrix validCovariance(gccc.getConditionalCovariance(inputTest));
+
+ const Point rowData = {0.81942182, -0.35599947, -0.17488593, 0.04622401, -0.03143555, 0.04054783,\
+ -0.35599947, 0.20874735, 0.10943841, -0.03236419, 0.02397483, -0.03269184,\
+ -0.17488593, 0.10943841, 0.05832917, -0.01779918, 0.01355719, -0.01891618,\
+ 0.04622401, -0.03236419, -0.01779918, 0.00578327, -0.00467674, 0.00688697,\
+ -0.03143555, 0.02397483, 0.01355719, -0.00467674, 0.0040267, -0.00631173,\
+ 0.04054783, -0.03269184, -0.01891618, 0.00688697, -0.00631173, 0.01059488};
+
+ Matrix values(sampleSize, sampleSize, rowData);
+
+ assert_almost_equal(validCovariance - values.transpose(), nullMatrix, 1.e-5, 1e-6);
+ }
+ // Test 2 : 2 inputs, one output
+ {
+ const UnsignedInteger dimension = 2;
+ const Description inputDesc ={"x", "y"};
+ const Description outputDesc ={"cos(0.5*x) + sin(y)"};
+ const SymbolicFunction model(inputDesc, outputDesc);
+
+ const Indices levels = {8, 5};
+ const Box box(levels);
+ Sample inputSample(box.generate());
+
+ // Scale each direction
+ inputSample *= 10.0;
+ const Sample outputSample(model(inputSample));
+
+ //Definition of exponential model
+ // The parameters have been calibrated using Cobyla optimization
+ // and AbsoluteExponential models
+ const Point scale = {5.33532, 2.61534};
+ const Point amplitude = {1.61536};
+ SquaredExponential covarianceModel(scale, amplitude);
+
+ // Basis definition
+ const Basis basis(ConstantBasisFactory(dimension).build());
+
+ // GP algorithm
+ // create algorithm
+ GaussianProcessFitter fit_algo(inputSample, outputSample, covarianceModel, basis);
+ // set sensible optimization bounds and estimate hyper parameters
+ fit_algo.setOptimizationBounds(Interval(inputSample.getMin(), inputSample.getMax()));
+ fit_algo.run();
+
+ // perform an evaluation
+ GaussianProcessRegression algo(fit_algo.getResult());
+ algo.run();
+
+ GaussianProcessRegressionResult result(algo.getResult());
+
+ // 5) Kriging variance is 0 on learning points
+ GaussianProcessConditionalCovariance gccc(result);
+
+ const CovarianceMatrix covariance(gccc.getConditionalCovariance(inputSample));
+ assert_almost_equal(covariance, SquareMatrix(inputSample.getSize()), 7e-7, 7e-7);
+
+ // Covariance per marginal & extract variance component
+ Collection condMargCov(gccc.getConditionalMarginalCovariance(inputSample));
+ Point variance(condMargCov.getSize());
+ const Point nullVariance(condMargCov.getSize());
+ for (UnsignedInteger index = 0; index < condMargCov.getSize(); ++index)
+ variance[index] = condMargCov[index](0,0);
+
+ assert_almost_equal(variance, nullVariance, 1e-14, 1e-13);
+
+ // Variance per marginal
+ assert_almost_equal(gccc.getConditionalMarginalVariance(inputSample),\
+ Sample(inputSample.getSize(), 1), 1e-14, 1e-13);
+
+ }
+ // Test 3 : R --> R^2 function
+ {
+ const Description inputDesc ={"x"};
+ const Description outputDesc ={"x * sin(x)", "x * cos(x)"};
+ const SymbolicFunction f(inputDesc, outputDesc);
+ const UnsignedInteger sampleSize = 8;
+ const UnsignedInteger dimension = 1;
+ Sample sampleX(sampleSize, dimension);
+ for (UnsignedInteger i = 0; i < sampleSize; ++i)
+ sampleX(i, 0) = 1.0 + i;
+ const Sample sampleY(f(sampleX));
+ const Point scale = {1.0};
+ const Point amplitude = {1.0};
+ SquaredExponential kernel(scale, amplitude);
+ kernel.setActiveParameter(Indices(0));
+ Collection collectionCov;
+ collectionCov.add(kernel);
+ collectionCov.add(kernel);
+ TensorizedCovarianceModel covarianceModel(collectionCov);
+
+ // Basis definition
+ // Build a basis phi from R --> R^2
+ // phi_{0,0} = phi_{0,1} = x
+ //# phi_{1,0} = phi_{1,1} = x^2
+ const AggregatedFunction phi0({SymbolicFunction("x", "x"), SymbolicFunction("x", "x")});
+ const AggregatedFunction phi1({SymbolicFunction("x", "x^2"), SymbolicFunction("x", "x^2")});
+ const Collection collFunctions = {phi0, phi1};
+ const Basis basis(collFunctions);
+
+ // GP algorithm
+ // create algorithm
+ GaussianProcessFitter fit_algo(sampleX, sampleY, covarianceModel, basis);
+ fit_algo.run();
+
+ // perform an evaluation
+ GaussianProcessRegression algo(fit_algo.getResult());
+ algo.run();
+
+ GaussianProcessRegressionResult result(algo.getResult());
+
+ // Kriging variance is 0 on learning points
+ GaussianProcessConditionalCovariance gccc(result);
+
+ const Point rowData = {4.4527, 0.0, 8.34404, 0.0, 0.0, 2.8883, 0.0, 5.41246,\
+ 8.34404, 0.0, 15.7824, 0.0, 0.0, 5.41246, 0.0, 10.2375};
+ const Matrix reference_covariance(4, 4, rowData);
+ const SquareMatrix nullMatrix(4);
+ const Point pointOfInterest = {9.5, 10.0};
+ const Sample sample(Sample::BuildFromPoint(pointOfInterest));
+ const CovarianceMatrix covarianceMat(gccc(sample).getCovariance());
+ assert_almost_equal(covarianceMat - reference_covariance.transpose(), nullMatrix, 0.0, 2e-2);
+
+ }
+ // stationary cov function - fix https://github.com/openturns/openturns/issues/1861
+ {
+ RandomGenerator::SetSeed(0);
+ const SymbolicFunction rho("tau", "exp(-abs(tau))*cos(2*pi_*abs(tau))");
+ const StationaryFunctionalCovarianceModel covarianceModel({1.0}, {1.0}, rho);
+ Sample X(Normal(0, 1.0).getSample(20));
+ X.setDescription({"J0"});
+ Sample Y(Normal(0, 0.1).getSample(20));
+ Y = Y + X;
+ Y.setDescription({"G0"});
+
+ // Basis definition
+ const Basis basis(LinearBasisFactory(1).build());
+
+ // GP algorithm
+ // create algorithm
+ GaussianProcessFitter fit_algo(X, Y, covarianceModel, basis);
+ fit_algo.run();
+
+ // perform an evaluation
+ GaussianProcessRegression algo(fit_algo.getResult());
+ algo.run();
+
+ GaussianProcessRegressionResult result(algo.getResult());
+ // GPCC
+ GaussianProcessConditionalCovariance gccc(result);
+ // Variance per marginal
+ assert_almost_equal(gccc.getConditionalMarginalVariance(X), Sample(X.getSize(), 1), 1e-15, 1e-15);
+
+ }
+ // GPR with already calibrated parameters
+ {
+ const UnsignedInteger sampleSize = 6;
+ const UnsignedInteger dimension = 1;
+
+ const SymbolicFunction f("x", "x * sin(x)");
+
+ Sample inputTrain(sampleSize, dimension);
+ Sample inputTest(sampleSize, dimension);
+ for (UnsignedInteger i = 0; i < sampleSize; ++i)
+ {
+ inputTrain(i, 0) = 3.0 + i;
+ inputTest(i, 0) = 2.5 + i;
+ }
+ inputTrain(0, 0) = 1.0;
+ inputTrain(1, 0) = 3.0;
+ // input test sample
+ inputTest(0, 0) = 2.0;
+ inputTest(1, 0) = 4.0;
+ const Sample outputTrain(f(inputTrain));
+
+ // create covariance model
+ const Basis basis(ConstantBasisFactory(dimension).build());
+ const Point scale = {1.6326932047296538};
+ const Point amplitude = {4.895995962015954};
+ const SquaredExponential covarianceModel(scale, amplitude);
+
+ const SymbolicFunction trend_function("x", "1.49543");
+
+ // create covariance model
+
+ // create algorithm
+ GaussianProcessRegression algo(inputTrain, outputTrain, covarianceModel, trend_function);
+ algo.run();
+ GaussianProcessRegressionResult result(algo.getResult());
+ // GPCC
+ GaussianProcessConditionalCovariance gccc(result);
+
+ CovarianceMatrix covariance(gccc.getConditionalCovariance(inputTrain));
+ SquareMatrix nullMatrix(sampleSize);
+ assert_almost_equal(covariance, nullMatrix, 0.0, 1e-13);
+
+ // Covariance per marginal & extract variance component
+ Collection coll(gccc.getConditionalMarginalCovariance(inputTrain));
+ Point variance(coll.getSize());
+ const Point nullVariance(coll.getSize());
+ for (UnsignedInteger index = 0; index < coll.getSize(); ++ index)
+ variance[index] = coll[index](0,0);
+
+ assert_almost_equal(variance, nullVariance, 1e-14, 1e-13);
+
+ // Variance per marginal
+ assert_almost_equal(gccc.getConditionalMarginalVariance(inputTrain), Sample(sampleSize, 1), 1e-14, 1e-13);
+
+
+ // Kriging variance is non-null on validation points
+ CovarianceMatrix validCovariance(gccc.getConditionalCovariance(inputTest));
+
+ const Point rowData = {0.786400318519185, -0.342314710430317, -0.167625132016427, 0.0437937446519361, -0.0291542115306344, 0.0362074153614559,\
+ -0.342314710430317, 0.20307609313608, 0.106429376006901, -0.0313570361766278, 0.0230293899173111, -0.0308930847149105,\
+ -0.167625132016427, 0.106429376006901, 0.0567326538237296, -0.0172648099111221, 0.0130555631357385, -0.0179618049872801,\
+ 0.0437937446519361, -0.0313570361766278, -0.0172648099111221, 0.00560441404059731, -0.00450884121944028, 0.00656752917461922,\
+ -0.0291542115306344, 0.0230293899173111, 0.0130555631357385, -0.00450884121944028, 0.00386908619998749, -0.00601186391616793,\
+ 0.0362074153614559, -0.0308930847149105, -0.0179618049872801, 0.00656752917461922, -0.00601186391616793, 0.0100243621895402};
+
+ const Matrix values(sampleSize, sampleSize, rowData);
+ assert_almost_equal(validCovariance - values.transpose(), nullMatrix, 1.e-8, 1e-8);
+ }
+
+ }
+ catch (TestFailed & ex)
+ {
+ std::cerr << ex << std::endl;
+ return ExitCode::Error;
+ }
+
+
+ return ExitCode::Success;
+}
diff --git a/lib/test/t_GaussianProcessFitter_nlopt.cxx b/lib/test/t_GaussianProcessFitter_nlopt.cxx
new file mode 100644
index 0000000000..43dda7c798
--- /dev/null
+++ b/lib/test/t_GaussianProcessFitter_nlopt.cxx
@@ -0,0 +1,77 @@
+// -*- C++ -*-
+/**
+ * @brief The test file of GaussianProcessFitter class
+ *
+ * Copyright 2005-2024 Airbus-EDF-IMACS-ONERA-Phimeca
+ *
+ * This library is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this library. If not, see .
+ *
+ */
+#include "openturns/OT.hxx"
+#include "openturns/OTtestcode.hxx"
+
+using namespace OT;
+using namespace OT::Test;
+
+
+int main(int, char *[])
+{
+ TESTPREAMBLE;
+ OStream fullprint(std::cout);
+ setRandomGenerator();
+
+ try
+ {
+ // Set Numerical precision to 3
+ PlatformInfo::SetNumericalPrecision(3);
+
+ // Calibration of default optimizer
+ ResourceMap::SetAsScalar("GaussianProcessFitter-DefaultOptimizationLowerBound", 1.0e-5);
+ ResourceMap::SetAsScalar("GaussianProcessFitter-DefaultOptimizationUpperBound", 100);
+ // Data & estimation
+ const UnsignedInteger inputDimension = 1;
+ Sample X = Normal(0, 1).getSample(100);
+ X = X.sortAccordingToAComponent(0);
+ SquaredExponential covarianceModel(1);
+ Description inDescription(1);
+ inDescription[0] = "x";
+ Description formula(1);
+ formula[0] = "x - 0.6 * cos(x/3)";
+ SymbolicFunction model(inDescription, formula);
+ const Sample Y = model(X);
+ const Basis basis = QuadraticBasisFactory(inputDimension).build();
+ GaussianProcessFitter algo(X, Y, covarianceModel, basis);
+ NLopt solver("LN_NELDERMEAD");
+ algo.setOptimizationAlgorithm(solver);
+ algo.run();
+
+ // perform an evaluation
+ GaussianProcessFitterResult result = algo.getResult();
+ const Function metaModel = result.getMetaModel();
+ const CovarianceModel conditionalCovariance = result.getCovarianceModel();
+ const Sample residual = metaModel(X) - Y;
+ assert_almost_equal(residual.computeCentralMoment(2), Point(1, 1.06e-05), 1e-5, 1e-5);
+ const Point parameter = {0.619144, 0.000937};
+ assert_almost_equal(conditionalCovariance.getParameter(), parameter, 1e-2, 1e-2);
+
+ }
+ catch (TestFailed & ex)
+ {
+ std::cerr << ex << std::endl;
+ return ExitCode::Error;
+ }
+
+
+ return ExitCode::Success;
+}
diff --git a/lib/test/t_GaussianProcessFitter_std.cxx b/lib/test/t_GaussianProcessFitter_std.cxx
new file mode 100644
index 0000000000..b63747f9dd
--- /dev/null
+++ b/lib/test/t_GaussianProcessFitter_std.cxx
@@ -0,0 +1,110 @@
+// -*- C++ -*-
+/**
+ * @brief The test file of GaussianProcessFitter class
+ *
+ * Copyright 2005-2024 Airbus-EDF-IMACS-ONERA-Phimeca
+ *
+ * This library is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this library. If not, see .
+ *
+ */
+#include "openturns/OT.hxx"
+#include "openturns/OTtestcode.hxx"
+
+using namespace OT;
+using namespace OT::Test;
+
+int main(int, char *[])
+{
+ TESTPREAMBLE;
+ OStream fullprint(std::cout);
+ setRandomGenerator();
+
+ try
+ {
+ // Set Numerical precision to 4
+ PlatformInfo::SetNumericalPrecision(4);
+ UnsignedInteger sampleSize = 40;
+ UnsignedInteger inputDimension = 1;
+
+ // Create the function to estimate
+ SymbolicFunction model("x0", "x0");
+
+ Sample X(sampleSize, inputDimension);
+ for (UnsignedInteger i = 0; i < sampleSize; ++ i)
+ X(i, 0) = 3.0 + (8.0 * i) / sampleSize;
+ Sample Y = model(X);
+
+ // Add a small noise to data
+ Y += GaussianProcess(AbsoluteExponential(Point(1, 0.1), Point(1, 0.2)), Mesh(X)).getRealization().getValues();
+
+ Basis basis = LinearBasisFactory(inputDimension).build();
+ // Case of a misspecified covariance model
+ DiracCovarianceModel covarianceModel(inputDimension);
+ GaussianProcessFitter algo(X, Y, covarianceModel, basis);
+ algo.run();
+
+ GaussianProcessFitterResult result = algo.getResult();
+ Point ref = {0.1957};
+ assert_almost_equal(result.getCovarianceModel().getParameter(), ref, 1e-4, 1e-4);
+ ref = {-0.1109, 1.015};
+ assert_almost_equal(result.getTrendCoefficients(), ref, 1e-4, 1e-4);
+ // Now without estimating covariance parameters
+ basis = LinearBasisFactory(inputDimension).build();
+ covarianceModel = DiracCovarianceModel(inputDimension);
+ algo = GaussianProcessFitter(X, Y, covarianceModel, basis);
+ algo.setOptimizeParameters(false);
+ algo.run();
+ result = algo.getResult();
+ ref = {1.0};
+ assert_almost_equal(result.getCovarianceModel().getParameter(), ref, 1e-4, 1e-4);
+ ref = {-0.1109, 1.015};
+ assert_almost_equal(result.getTrendCoefficients(), ref, 1e-4, 1e-4);
+ // Case of a well specified covariance model
+ // Test the optimization when the amplitude is deduced analytically from the scale
+ {
+ AbsoluteExponential covarianceModel2(inputDimension);
+ algo = GaussianProcessFitter(X, Y, covarianceModel2, basis);
+ algo.run();
+ result = algo.getResult();
+ ref = {0.1328, 0.1956};
+ assert_almost_equal(result.getCovarianceModel().getParameter(), ref, 1e-4, 1e-4);
+ ref = {-0.1034, 1.014};
+ assert_almost_equal(result.getTrendCoefficients(), ref, 1e-4, 1e-4);
+ ResourceMap::SetAsBool("GaussianProcessFitter-UnbiasedVariance", false);
+ algo = GaussianProcessFitter(X, Y, covarianceModel2, basis);
+ algo.run();
+ result = algo.getResult();
+ ref = {0.1328, 0.1907};
+ assert_almost_equal(result.getCovarianceModel().getParameter(), ref, 1e-4, 1e-4);
+ ref = {-0.1034, 1.014};
+ assert_almost_equal(result.getTrendCoefficients(), ref, 1e-4, 1e-4);
+ ResourceMap::SetAsBool("GaussianProcessFitter-UseAnalyticalAmplitudeEstimate", false);
+ algo = GaussianProcessFitter(X, Y, covarianceModel2, basis);
+ algo.run();
+ result = algo.getResult();
+ ref = {0.1328, 0.1908};
+ assert_almost_equal(result.getCovarianceModel().getParameter(), ref, 1e-2, 1e-2);
+ ref = {-0.1034, 1.014};
+ assert_almost_equal(result.getTrendCoefficients(), ref, 1e-4, 1e-4);
+ }
+ }
+ catch (TestFailed & ex)
+ {
+ std::cerr << ex << std::endl;
+ return ExitCode::Error;
+ }
+
+
+ return ExitCode::Success;
+}
diff --git a/lib/test/t_GaussianProcessFitter_std_hmat.cxx b/lib/test/t_GaussianProcessFitter_std_hmat.cxx
new file mode 100644
index 0000000000..049bd2d6d4
--- /dev/null
+++ b/lib/test/t_GaussianProcessFitter_std_hmat.cxx
@@ -0,0 +1,86 @@
+// -*- C++ -*-
+/**
+ * @brief The test file of GaussianProcessFitter class
+ *
+ * Copyright 2005-2024 Airbus-EDF-IMACS-ONERA-Phimeca
+ *
+ * This library is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this library. If not, see .
+ *
+ */
+#include "openturns/OT.hxx"
+#include "openturns/OTtestcode.hxx"
+
+using namespace OT;
+using namespace OT::Test;
+
+
+int main(int, char *[])
+{
+ TESTPREAMBLE;
+ OStream fullprint(std::cout);
+ setRandomGenerator();
+ ResourceMap::Set("GaussianProcessFitter-LinearAlgebra", "HMAT");
+
+ try
+ {
+ // Set Numerical precision to 3
+ PlatformInfo::SetNumericalPrecision(3);
+
+ UnsignedInteger sampleSize = 6;
+ UnsignedInteger inputDimension = 1;
+
+ // Create the function to estimate
+ Description input(inputDimension);
+ input[0] = "x0";
+ Description formulas(1);
+ formulas[0] = "x0";
+ SymbolicFunction model(input, formulas);
+
+ Sample X(sampleSize, inputDimension);
+ Sample X2(sampleSize, inputDimension);
+ for ( UnsignedInteger i = 0; i < sampleSize; ++ i )
+ {
+ X(i, 0) = 3.0 + i;
+ X2(i, 0) = 2.5 + i;
+ }
+ X(0, 0) = 1.0;
+ X(1, 0) = 3.0;
+ X2(0, 0) = 2.0;
+ X2(1, 0) = 4.0;
+ Sample Y = model(X);
+ for ( UnsignedInteger i = 0; i < sampleSize; ++ i )
+ {
+ Y(i, 0) += 0.01 * DistFunc::rNormal();
+ }
+ Basis basis = LinearBasisFactory(inputDimension).build();
+ DiracCovarianceModel covarianceModel(inputDimension);
+ GaussianProcessFitter algo(X, Y, covarianceModel, basis);
+ algo.run();
+
+ // perform an evaluation
+ GaussianProcessFitterResult result = algo.getResult();
+ Function metaModel = result.getMetaModel();
+ const Sample residual = metaModel(X) - Y;
+ assert_almost_equal(residual.computeCentralMoment(2), Point(1, 0.00013144), 1e-5, 1e-5);
+
+ }
+ catch (TestFailed & ex)
+ {
+ std::cerr << ex << std::endl;
+ return ExitCode::Error;
+ }
+
+
+ return ExitCode::Success;
+}
diff --git a/lib/test/t_GaussianProcessRegression_nlopt.cxx b/lib/test/t_GaussianProcessRegression_nlopt.cxx
new file mode 100644
index 0000000000..eaa7b677de
--- /dev/null
+++ b/lib/test/t_GaussianProcessRegression_nlopt.cxx
@@ -0,0 +1,230 @@
+// -*- C++ -*-
+/**
+ * @brief The test file of GaussianProcessRegression class using NLOpt
+ *
+ * Copyright 2005-2024 Airbus-EDF-IMACS-ONERA-Phimeca
+ *
+ * This library is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this library. If not, see .
+ *
+ */
+#include "openturns/OT.hxx"
+#include "openturns/OTtestcode.hxx"
+
+using namespace OT;
+using namespace OT::Test;
+
+int main(int, char *[])
+{
+ TESTPREAMBLE;
+ OStream fullprint(std::cout);
+ setRandomGenerator();
+ ResourceMap::SetAsUnsignedInteger("OptimizationAlgorithm-DefaultMaximumCallsNumber", 10000);
+ ResourceMap::SetAsScalar("Cobyla-DefaultRhoBeg", 0.5);
+
+ try
+ {
+ // Test 1: a real value function
+ {
+ // Set Numerical precision to 4
+ PlatformInfo::SetNumericalPrecision(4);
+ const UnsignedInteger sampleSize = 6;
+ const UnsignedInteger dimension = 1;
+
+ const SymbolicFunction f("x", "x * sin(x)");
+
+ Sample inputTrain(sampleSize, dimension);
+ for (UnsignedInteger i = 0; i < sampleSize; ++i)
+ {
+ inputTrain(i, 0) = 3.0 + i;
+ }
+ inputTrain(0, 0) = 1.0;
+ inputTrain(1, 0) = 3.0;
+ // input test sample
+ const Sample outputTrain(f(inputTrain));
+
+ // create covariance model
+ const Basis basis(ConstantBasisFactory(dimension).build());
+ const SquaredExponential covarianceModel;
+
+ // create algorithm
+ GaussianProcessFitter fit_algo(inputTrain, outputTrain, covarianceModel, basis);
+
+ // set sensible optimization bounds and estimate hyper parameters
+ fit_algo.setOptimizationBounds(Interval(inputTrain.getMin(), inputTrain.getMax()));
+ NLopt solver("LN_NELDERMEAD");
+ fit_algo.setOptimizationAlgorithm(solver);
+ fit_algo.run();
+
+ // perform an evaluation
+ GaussianProcessRegression algo(fit_algo.getResult());
+ algo.run();
+ GaussianProcessRegressionResult result(algo.getResult());
+ assert_almost_equal(result.getMetaModel()(inputTrain), outputTrain);
+ assert_almost_equal(result.getResiduals(), {1.32804e-07}, 1e-3, 1e-3);
+ assert_almost_equal(result.getRelativeErrors(), {5.20873e-21});
+
+ // Prediction accuracy
+ const Point point = {7.5};
+ assert_almost_equal(result.getMetaModel()(point), f(point), 0.3, 0.0);
+
+ }
+ // Test 2 : 2 inputs, one output
+ {
+ const UnsignedInteger sampleSize = 10;
+ const UnsignedInteger dimension = 2;
+ const Description inputDesc ={"x", "y"};
+ const Description outputDesc ={"cos(0.5*x) + sin(y)"};
+ const SymbolicFunction model(inputDesc, outputDesc);
+
+ const Indices levels = {8, 5};
+ const Box box(levels);
+ Sample inputSample(box.generate());
+
+ // Scale each direction
+ inputSample *= 10.0;
+ const Sample outputSample(model(inputSample));
+
+ // Validation
+ Collection coll;
+ coll.add(Uniform(0, 10.0));
+ coll.add(Uniform(0, 10.0));
+ const JointDistribution dist(coll);
+ const Sample inputValidSample(dist.getSample(sampleSize));
+ const Sample outputValidSample(model(inputValidSample));
+
+ //Definition of exponential model
+ // The parameters have been calibrated using TNC optimization
+ // and AbsoluteExponential models
+ const Point scale = {5.33532, 2.61534};
+ const Point amplitude = {1.61536};
+ SquaredExponential covarianceModel(scale, amplitude);
+
+ // Basis definition
+ const Basis basis(ConstantBasisFactory(dimension).build());
+
+ // GP algorithm
+ // create algorithm
+ GaussianProcessFitter fit_algo(inputSample, outputSample, covarianceModel, basis);
+ // set sensible optimization bounds and estimate hyper parameters
+ fit_algo.setOptimizationBounds(Interval(inputSample.getMin(), inputSample.getMax()));
+ NLopt solver("LN_NELDERMEAD");
+ fit_algo.setOptimizationAlgorithm(solver);
+ fit_algo.run();
+
+ // perform an evaluation
+ GaussianProcessRegression algo(fit_algo.getResult());
+ algo.run();
+
+ GaussianProcessRegressionResult result(algo.getResult());
+ // Get meta model
+ Function metaModel(result.getMetaModel());
+ const Sample outData(metaModel(inputValidSample));
+ // 5) Errors
+ // Interpolation
+ assert_almost_equal(outputSample, metaModel(inputSample), 3.0e-5, 3.0e-5);
+ // Prediction
+ assert_almost_equal(outputValidSample, outData, 1.0e-1, 1e-1);
+ }
+ // Test 3 : R --> R^2 function
+ {
+ const Description inputDesc ={"x"};
+ const Description outputDesc ={"x * sin(x)", "x * cos(x)"};
+ const SymbolicFunction f(inputDesc, outputDesc);
+ const UnsignedInteger sampleSize = 8;
+ const UnsignedInteger dimension = 1;
+ Sample sampleX(sampleSize, dimension);
+ for (UnsignedInteger i = 0; i < sampleSize; ++i)
+ sampleX(i, 0) = 1.0 + i;
+ const Sample sampleY(f(sampleX));
+ const Point scale = {1.0};
+ const Point amplitude = {1.0};
+ SquaredExponential kernel(scale, amplitude);
+ kernel.setActiveParameter(Indices(0));
+ Collection collectionCov;
+ collectionCov.add(kernel);
+ collectionCov.add(kernel);
+ TensorizedCovarianceModel covarianceModel(collectionCov);
+
+ // Basis definition
+ // Build a basis phi from R --> R^2
+ // phi_{0,0} = phi_{0,1} = x
+ //# phi_{1,0} = phi_{1,1} = x^2
+ const AggregatedFunction phi0({SymbolicFunction("x", "x"), SymbolicFunction("x", "x")});
+ const AggregatedFunction phi1({SymbolicFunction("x", "x^2"), SymbolicFunction("x", "x^2")});
+ const Collection collFunctions = {phi0, phi1};
+ const Basis basis(collFunctions);
+
+ // GP algorithm
+ // create algorithm
+ GaussianProcessFitter fit_algo(sampleX, sampleY, covarianceModel, basis);
+ NLopt solver("LN_NELDERMEAD");
+ fit_algo.setOptimizationAlgorithm(solver);
+ fit_algo.run();
+
+ // perform an evaluation
+ GaussianProcessRegression algo(fit_algo.getResult());
+ algo.run();
+
+ GaussianProcessRegressionResult result(algo.getResult());
+ // Get meta model
+ Function metaModel(result.getMetaModel());
+ // Prediction & interpolation
+ //assert_almost_equal(sampleY, metaModel(sampleX));
+ const Point point = {5.5};
+ const Point outputValue = {-3.88363, 3.90286};
+ assert_almost_equal(metaModel(point), outputValue);
+ }
+ // stationary cov function - fix https://github.com/openturns/openturns/issues/1861
+ {
+ RandomGenerator::SetSeed(0);
+ const SymbolicFunction rho("tau", "exp(-abs(tau))*cos(2*pi_*abs(tau))");
+ const StationaryFunctionalCovarianceModel covarianceModel({1.0}, {1.0}, rho);
+ Sample X(Normal(0, 1.0).getSample(20));
+ X.setDescription({"J0"});
+ Sample Y(Normal(0, 0.1).getSample(20));
+ Y = Y + X;
+ Y.setDescription({"G0"});
+
+ // Basis definition
+ const Basis basis(LinearBasisFactory(1).build());
+
+ // GP algorithm
+ // create algorithm
+ GaussianProcessFitter fit_algo(X, Y, covarianceModel, basis);
+ NLopt solver("LN_NELDERMEAD");
+ fit_algo.setOptimizationAlgorithm(solver);
+ fit_algo.run();
+
+ // perform an evaluation
+ GaussianProcessRegression algo = (fit_algo.getResult());
+ algo.run();
+
+ GaussianProcessRegressionResult result(algo.getResult());
+ // Get meta model
+ Function metaModel(result.getMetaModel());
+ const Point point = {5.5};
+ const Point outputValue = {5.58838};
+ assert_almost_equal(metaModel(point), outputValue);
+ }
+
+ }
+ catch (TestFailed & ex)
+ {
+ std::cerr << ex << std::endl;
+ return ExitCode::Error;
+ }
+
+
+ return ExitCode::Success;
+}
diff --git a/lib/test/t_GaussianProcessRegression_std.cxx b/lib/test/t_GaussianProcessRegression_std.cxx
new file mode 100644
index 0000000000..3541ba604f
--- /dev/null
+++ b/lib/test/t_GaussianProcessRegression_std.cxx
@@ -0,0 +1,271 @@
+// -*- C++ -*-
+/**
+ * @brief The test file of GaussianProcessRegression class
+ *
+ * Copyright 2005-2024 Airbus-EDF-IMACS-ONERA-Phimeca
+ *
+ * This library is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this library. If not, see .
+ *
+ */
+#include "openturns/OT.hxx"
+#include "openturns/OTtestcode.hxx"
+
+using namespace OT;
+using namespace OT::Test;
+
+int main(int, char *[])
+{
+ TESTPREAMBLE;
+ OStream fullprint(std::cout);
+ setRandomGenerator();
+ ResourceMap::SetAsUnsignedInteger("OptimizationAlgorithm-DefaultMaximumCallsNumber", 10000);
+ ResourceMap::SetAsScalar("Cobyla-DefaultRhoBeg", 0.5);
+
+ try
+ {
+ // Test 1: a real value function
+ {
+ // Set Numerical precision to 4
+ PlatformInfo::SetNumericalPrecision(4);
+ const UnsignedInteger sampleSize = 6;
+ const UnsignedInteger dimension = 1;
+
+ const SymbolicFunction f("x", "x * sin(x)");
+
+ Sample inputTrain(sampleSize, dimension);
+ Sample inputTest(sampleSize, dimension);
+ for (UnsignedInteger i = 0; i < sampleSize; ++i)
+ {
+ inputTrain(i, 0) = 3.0 + i;
+ inputTest(i, 0) = 2.5 + i;
+ }
+ inputTrain(0, 0) = 1.0;
+ inputTrain(1, 0) = 3.0;
+ // input test sample
+ inputTest(0, 0) = 2.0;
+ inputTest(1, 0) = 4.0;
+ const Sample outputTrain(f(inputTrain));
+ const Sample outputTest(f(inputTest));
+
+ // create covariance model
+ const Basis basis(ConstantBasisFactory(dimension).build());
+ const SquaredExponential covarianceModel;
+
+ // create algorithm
+ GaussianProcessFitter fit_algo(inputTrain, outputTrain, covarianceModel, basis);
+
+ // set sensible optimization bounds and estimate hyper parameters
+ fit_algo.setOptimizationBounds(Interval(inputTrain.getMin(), inputTrain.getMax()));
+ fit_algo.run();
+
+ // perform an evaluation
+ GaussianProcessRegression algo(fit_algo.getResult());
+ algo.run();
+ GaussianProcessRegressionResult result(algo.getResult());
+ assert_almost_equal(result.getMetaModel()(inputTrain), outputTrain);
+ assert_almost_equal(result.getResiduals(), {1.32804e-07}, 1e-3, 1e-3);
+ assert_almost_equal(result.getRelativeErrors(), {5.20873e-21});
+
+ // Prediction accuracy
+ assert_almost_equal(outputTest, result.getMetaModel()(inputTest), 0.3, 0.0);
+ }
+ // Test 2 : 2 inputs, one output
+ {
+ const UnsignedInteger sampleSize = 10;
+ const UnsignedInteger dimension = 2;
+ const Description inputDesc ={"x", "y"};
+ const Description outputDesc ={"cos(0.5*x) + sin(y)"};
+ const SymbolicFunction model(inputDesc, outputDesc);
+
+ const Indices levels = {8, 5};
+ const Box box(levels);
+ Sample inputSample(box.generate());
+
+ // Scale each direction
+ inputSample *= 10.0;
+ const Sample outputSample(model(inputSample));
+
+ // Validation
+ Collection coll;
+ coll.add(Uniform(0, 10.0));
+ coll.add(Uniform(0, 10.0));
+ const JointDistribution dist(coll);
+ const Sample inputValidSample(dist.getSample(sampleSize));
+ const Sample outputValidSample(model(inputValidSample));
+
+ //Definition of exponential model
+ // The parameters have been calibrated using TNC optimization
+ // and AbsoluteExponential models
+ const Point scale = {5.33532, 2.61534};
+ const Point amplitude = {1.61536};
+ SquaredExponential covarianceModel(scale, amplitude);
+
+ // Basis definition
+ const Basis basis(ConstantBasisFactory(dimension).build());
+
+ // GP algorithm
+ // create algorithm
+ GaussianProcessFitter fit_algo(inputSample, outputSample, covarianceModel, basis);
+ // set sensible optimization bounds and estimate hyper parameters
+ fit_algo.setOptimizationBounds(Interval(inputSample.getMin(), inputSample.getMax()));
+ fit_algo.run();
+
+ // perform an evaluation
+ GaussianProcessRegression algo(fit_algo.getResult());
+ algo.run();
+
+ GaussianProcessRegressionResult result(algo.getResult());
+ // Get meta model
+ Function metaModel(result.getMetaModel());
+ const Sample outData(metaModel(inputValidSample));
+ // 5) Errors
+ // Interpolation
+ assert_almost_equal(outputSample, metaModel(inputSample), 3.0e-5, 3.0e-5);
+ // Prediction
+ assert_almost_equal(outputValidSample, outData, 1.0e-1, 1e-1);
+ }
+ // Test 3 : R --> R^2 function
+ {
+ const Description inputDesc ={"x"};
+ const Description outputDesc ={"x * sin(x)", "x * cos(x)"};
+ const SymbolicFunction f(inputDesc, outputDesc);
+ const UnsignedInteger sampleSize = 8;
+ const UnsignedInteger dimension = 1;
+ Sample sampleX(sampleSize, dimension);
+ for (UnsignedInteger i = 0; i < sampleSize; ++i)
+ sampleX(i, 0) = 1.0 + i;
+ const Sample sampleY(f(sampleX));
+ const Point scale = {1.0};
+ const Point amplitude = {1.0};
+ SquaredExponential kernel(scale, amplitude);
+ kernel.setActiveParameter(Indices(0));
+ Collection collectionCov;
+ collectionCov.add(kernel);
+ collectionCov.add(kernel);
+ TensorizedCovarianceModel covarianceModel(collectionCov);
+
+ // Basis definition
+ // Build a basis phi from R --> R^2
+ // phi_{0,0} = phi_{0,1} = x
+ //# phi_{1,0} = phi_{1,1} = x^2
+ const AggregatedFunction phi0({SymbolicFunction("x", "x"), SymbolicFunction("x", "x")});
+ const AggregatedFunction phi1({SymbolicFunction("x", "x^2"), SymbolicFunction("x", "x^2")});
+ const Collection collFunctions = {phi0, phi1};
+ const Basis basis(collFunctions);
+
+ // GP algorithm
+ // create algorithm
+ GaussianProcessFitter fit_algo(sampleX, sampleY, covarianceModel, basis);
+ fit_algo.run();
+
+ // perform an evaluation
+ GaussianProcessRegression algo(fit_algo.getResult());
+ algo.run();
+
+ GaussianProcessRegressionResult result(algo.getResult());
+ // Get meta model
+ Function metaModel(result.getMetaModel());
+ // Prediction & interpolation
+ //assert_almost_equal(sampleY, metaModel(sampleX));
+ const Point point = {5.5};
+ const Point outputValue = {-3.88368, 3.90286};
+ assert_almost_equal(metaModel(point), outputValue);
+ }
+ // stationary cov function - fix https://github.com/openturns/openturns/issues/1861
+ {
+ RandomGenerator::SetSeed(0);
+ const SymbolicFunction rho("tau", "exp(-abs(tau))*cos(2*pi_*abs(tau))");
+ const StationaryFunctionalCovarianceModel covarianceModel({1.0}, {1.0}, rho);
+ Sample X(Normal(0, 1.0).getSample(20));
+ X.setDescription({"J0"});
+ Sample Y(Normal(0, 0.1).getSample(20));
+ Y = Y + X;
+ Y.setDescription({"G0"});
+
+ // Basis definition
+ const Basis basis(LinearBasisFactory(1).build());
+
+ // GP algorithm
+ // create algorithm
+ GaussianProcessFitter fit_algo(X, Y, covarianceModel, basis);
+ fit_algo.run();
+
+ // perform an evaluation
+ GaussianProcessRegression algo(fit_algo.getResult());
+ algo.run();
+
+ GaussianProcessRegressionResult result(algo.getResult());
+ // Get meta model
+ Function metaModel(result.getMetaModel());
+ const Point point = {5.5};
+ const Point outputValue = {5.58283};
+ assert_almost_equal(metaModel(point), outputValue);
+ }
+ // GPR with already calibrated parameters
+ {
+
+ const UnsignedInteger sampleSize = 6;
+ const UnsignedInteger dimension = 1;
+
+ const SymbolicFunction f("x", "x * sin(x)");
+
+ Sample inputTrain(sampleSize, dimension);
+ Sample inputTest(sampleSize, dimension);
+ for (UnsignedInteger i = 0; i < sampleSize; ++i)
+ {
+ inputTrain(i, 0) = 3.0 + i;
+ inputTest(i, 0) = 2.5 + i;
+ }
+ inputTrain(0, 0) = 1.0;
+ inputTrain(1, 0) = 3.0;
+ // input test sample
+ inputTest(0, 0) = 2.0;
+ inputTest(1, 0) = 4.0;
+ const Sample outputTrain(f(inputTrain));
+ const Sample outputTest(f(inputTest));
+
+ // create covariance model
+ const Basis basis(ConstantBasisFactory(dimension).build());
+ const Point scale = {1.6326932047296538};
+ const Point amplitude = {4.895995962015954};
+ const SquaredExponential covarianceModel(scale, amplitude);
+
+ const SymbolicFunction trend_function("x", "1.49543");
+
+ // create covariance model
+
+ // create algorithm
+ GaussianProcessRegression algo(inputTrain, outputTrain, covarianceModel, trend_function);
+ algo.run();
+ GaussianProcessRegressionResult result(algo.getResult());
+ const Function metaModel(result.getMetaModel());
+ // Validation similar to the first test
+ assert_almost_equal(metaModel(inputTrain), outputTrain);
+ assert_almost_equal(result.getResiduals(), {1.32804e-07}, 1e-3, 1e-3);
+ assert_almost_equal(result.getRelativeErrors(), {5.20873e-21});
+
+ // Prediction accuracy
+ assert_almost_equal(outputTest, metaModel(inputTest), 0.3, 0.0);
+ }
+
+ }
+ catch (TestFailed & ex)
+ {
+ std::cerr << ex << std::endl;
+ return ExitCode::Error;
+ }
+
+
+ return ExitCode::Success;
+}
diff --git a/lib/test/t_GaussianProcessRegression_std_hmat.cxx b/lib/test/t_GaussianProcessRegression_std_hmat.cxx
new file mode 100644
index 0000000000..48882ba44f
--- /dev/null
+++ b/lib/test/t_GaussianProcessRegression_std_hmat.cxx
@@ -0,0 +1,275 @@
+// -*- C++ -*-
+/**
+ * @brief The test file of GaussianProcessRegression class using HMAT
+ *
+ * Copyright 2005-2024 Airbus-EDF-IMACS-ONERA-Phimeca
+ *
+ * This library is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this library. If not, see .
+ *
+ */
+#include "openturns/OT.hxx"
+#include "openturns/OTtestcode.hxx"
+
+using namespace OT;
+using namespace OT::Test;
+
+int main(int, char *[])
+{
+ TESTPREAMBLE;
+ OStream fullprint(std::cout);
+ setRandomGenerator();
+ ResourceMap::SetAsUnsignedInteger("OptimizationAlgorithm-DefaultMaximumCallsNumber", 10000);
+ ResourceMap::SetAsScalar("Cobyla-DefaultRhoBeg", 0.5);
+ PlatformInfo::SetNumericalPrecision(3);
+ ResourceMap::SetAsString("GaussianProcessFitter-LinearAlgebra", "HMAT");
+ ResourceMap::SetAsScalar("HMatrix-RegularizationEpsilon", 1e-7);
+
+
+ try
+ {
+ // Test 1: a real value function
+ {
+ // Set Numerical precision to 4
+ PlatformInfo::SetNumericalPrecision(4);
+ const UnsignedInteger sampleSize = 6;
+ const UnsignedInteger dimension = 1;
+
+ const SymbolicFunction f("x", "x * sin(x)");
+
+ Sample inputTrain(sampleSize, dimension);
+ Sample inputTest(sampleSize, dimension);
+ for (UnsignedInteger i = 0; i < sampleSize; ++i)
+ {
+ inputTrain(i, 0) = 3.0 + i;
+ inputTest(i, 0) = 2.5 + i;
+ }
+ inputTrain(0, 0) = 1.0;
+ inputTrain(1, 0) = 3.0;
+ // input test sample
+ inputTest(0, 0) = 2.0;
+ inputTest(1, 0) = 4.0;
+ const Sample outputTrain(f(inputTrain));
+ const Sample outputTest(f(inputTest));
+
+ // create covariance model
+ const Basis basis(ConstantBasisFactory(dimension).build());
+ const SquaredExponential covarianceModel;
+
+ // create algorithm
+ GaussianProcessFitter fit_algo(inputTrain, outputTrain, covarianceModel, basis);
+
+ // set sensible optimization bounds and estimate hyper parameters
+ fit_algo.setOptimizationBounds(Interval(inputTrain.getMin(), inputTrain.getMax()));
+ fit_algo.run();
+
+ // perform an evaluation
+ GaussianProcessRegression algo(fit_algo.getResult());
+ algo.run();
+ GaussianProcessRegressionResult result(algo.getResult());
+ assert_almost_equal(result.getMetaModel()(inputTrain), outputTrain, 1e-2);
+ assert_almost_equal(result.getResiduals(), {2.44e-06});
+ assert_almost_equal(result.getRelativeErrors(), {1.76e-12});
+
+ // Prediction accuracy
+ assert_almost_equal(outputTest, result.getMetaModel()(inputTest), 0.3, 0.0);
+ }
+ // Test 2 : 2 inputs, one output
+ {
+ const UnsignedInteger sampleSize = 10;
+ const UnsignedInteger dimension = 2;
+ const Description inputDesc ={"x", "y"};
+ const Description outputDesc ={"cos(0.5*x) + sin(y)"};
+ const SymbolicFunction model(inputDesc, outputDesc);
+
+ const Indices levels = {8, 5};
+ const Box box(levels);
+ Sample inputSample(box.generate());
+
+ // Scale each direction
+ inputSample *= 10.0;
+ const Sample outputSample(model(inputSample));
+
+ // Validation
+ Collection coll;
+ coll.add(Uniform(0, 10.0));
+ coll.add(Uniform(0, 10.0));
+ const JointDistribution dist(coll);
+ const Sample inputValidSample(dist.getSample(sampleSize));
+ const Sample outputValidSample(model(inputValidSample));
+
+ //Definition of exponential model
+ // The parameters have been calibrated using TNC optimization
+ // and AbsoluteExponential models
+ const Point scale = {5.33532, 2.61534};
+ const Point amplitude = {1.61536};
+ SquaredExponential covarianceModel(scale, amplitude);
+
+ // Basis definition
+ const Basis basis(ConstantBasisFactory(dimension).build());
+
+ // GP algorithm
+ // create algorithm
+ GaussianProcessFitter fit_algo(inputSample, outputSample, covarianceModel, basis);
+ // set sensible optimization bounds and estimate hyper parameters
+ fit_algo.setOptimizationBounds(Interval(inputSample.getMin(), inputSample.getMax()));
+ fit_algo.run();
+
+ // perform an evaluation
+ GaussianProcessRegression algo(fit_algo.getResult());
+ algo.run();
+
+ GaussianProcessRegressionResult result(algo.getResult());
+ // Get meta model
+ Function metaModel(result.getMetaModel());
+ const Sample outData(metaModel(inputValidSample));
+ // 5) Errors
+ // Interpolation
+ assert_almost_equal(outputSample, metaModel(inputSample), 3.0e-2, 3.0e-2);
+ // Prediction
+ assert_almost_equal(outputValidSample, outData, 1.0e-1, 1e-1);
+ }
+ // Test 3 : R --> R^2 function
+ {
+ const Description inputDesc ={"x"};
+ const Description outputDesc ={"x * sin(x)", "x * cos(x)"};
+ const SymbolicFunction f(inputDesc, outputDesc);
+ const UnsignedInteger sampleSize = 8;
+ const UnsignedInteger dimension = 1;
+ Sample sampleX(sampleSize, dimension);
+ for (UnsignedInteger i = 0; i < sampleSize; ++i)
+ sampleX(i, 0) = 1.0 + i;
+ const Sample sampleY(f(sampleX));
+ const Point scale = {1.0};
+ const Point amplitude = {1.0};
+ SquaredExponential kernel(scale, amplitude);
+ kernel.setActiveParameter(Indices(0));
+ Collection collectionCov;
+ collectionCov.add(kernel);
+ collectionCov.add(kernel);
+ TensorizedCovarianceModel covarianceModel(collectionCov);
+
+ // Basis definition
+ // Build a basis phi from R --> R^2
+ // phi_{0,0} = phi_{0,1} = x
+ //# phi_{1,0} = phi_{1,1} = x^2
+ const AggregatedFunction phi0({SymbolicFunction("x", "x"), SymbolicFunction("x", "x")});
+ const AggregatedFunction phi1({SymbolicFunction("x", "x^2"), SymbolicFunction("x", "x^2")});
+ const Collection collFunctions = {phi0, phi1};
+ const Basis basis(collFunctions);
+
+ // GP algorithm
+ // create algorithm
+ GaussianProcessFitter fit_algo(sampleX, sampleY, covarianceModel, basis);
+ fit_algo.run();
+
+ // perform an evaluation
+ GaussianProcessRegression algo(fit_algo.getResult());
+ algo.run();
+
+ GaussianProcessRegressionResult result(algo.getResult());
+ // Get meta model
+ Function metaModel(result.getMetaModel());
+ // Prediction & interpolation
+ //assert_almost_equal(sampleY, metaModel(sampleX));
+ const Point point = {5.5};
+ const Point outputValue = {-3.88, 3.90};
+ assert_almost_equal(metaModel(point), outputValue, 1e-2, 1e-3);
+ }
+ // stationary cov function - fix https://github.com/openturns/openturns/issues/1861
+ {
+ RandomGenerator::SetSeed(0);
+ const SymbolicFunction rho("tau", "exp(-abs(tau))*cos(2*pi_*abs(tau))");
+ const StationaryFunctionalCovarianceModel covarianceModel({1.0}, {1.0}, rho);
+ Sample X(Normal(0, 1.0).getSample(20));
+ X.setDescription({"J0"});
+ Sample Y(Normal(0, 0.1).getSample(20));
+ Y = Y + X;
+ Y.setDescription({"G0"});
+
+ // Basis definition
+ const Basis basis(LinearBasisFactory(1).build());
+
+ // GP algorithm
+ // create algorithm
+ GaussianProcessFitter fit_algo(X, Y, covarianceModel, basis);
+ fit_algo.run();
+
+ // perform an evaluation
+ GaussianProcessRegression algo(fit_algo.getResult());
+ algo.run();
+
+ GaussianProcessRegressionResult result(algo.getResult());
+ // Get meta model
+ Function metaModel(result.getMetaModel());
+ const Point point = {5.5};
+ const Point outputValue = {5.58283};
+ assert_almost_equal(metaModel(point), outputValue);
+ }
+ // GPR with already calibrated parameters
+ {
+
+ const UnsignedInteger sampleSize = 6;
+ const UnsignedInteger dimension = 1;
+
+ const SymbolicFunction f("x", "x * sin(x)");
+
+ Sample inputTrain(sampleSize, dimension);
+ Sample inputTest(sampleSize, dimension);
+ for (UnsignedInteger i = 0; i < sampleSize; ++i)
+ {
+ inputTrain(i, 0) = 3.0 + i;
+ inputTest(i, 0) = 2.5 + i;
+ }
+ inputTrain(0, 0) = 1.0;
+ inputTrain(1, 0) = 3.0;
+ // input test sample
+ inputTest(0, 0) = 2.0;
+ inputTest(1, 0) = 4.0;
+ const Sample outputTrain(f(inputTrain));
+ const Sample outputTest(f(inputTest));
+
+ // create covariance model
+ const Basis basis(ConstantBasisFactory(dimension).build());
+ const Point scale = {1.6326932047296538};
+ const Point amplitude = {4.895995962015954};
+ const SquaredExponential covarianceModel(scale, amplitude);
+
+ const SymbolicFunction trend_function("x", "1.49543");
+
+ // create covariance model
+
+ // create algorithm
+ GaussianProcessRegression algo(inputTrain, outputTrain, covarianceModel, trend_function);
+ algo.run();
+ GaussianProcessRegressionResult result(algo.getResult());
+ const Function metaModel(result.getMetaModel());
+ // Validation similar to the first test
+ assert_almost_equal(metaModel(inputTrain), outputTrain);
+ assert_almost_equal(result.getResiduals(), {1.32804e-07}, 1e-3, 1e-3);
+ assert_almost_equal(result.getRelativeErrors(), {5.20873e-21});
+
+ // Prediction accuracy
+ assert_almost_equal(outputTest, metaModel(inputTest), 0.3, 0.0);
+ }
+
+ }
+ catch (TestFailed & ex)
+ {
+ std::cerr << ex << std::endl;
+ return ExitCode::Error;
+ }
+
+
+ return ExitCode::Success;
+}
diff --git a/lib/test/t_InverseWishart_std.cxx b/lib/test/t_InverseWishart_std.cxx
index b7077164b1..ad83c90f85 100644
--- a/lib/test/t_InverseWishart_std.cxx
+++ b/lib/test/t_InverseWishart_std.cxx
@@ -144,6 +144,16 @@ int main(int, char *[])
Arcsine::PointWithDescriptionCollection parameters = distribution.getParametersCollection();
fullprint << "parameters=" << parameters << std::endl;
fullprint << "Standard representative=" << distribution.getStandardRepresentative().__str__() << std::endl;
+
+ // Verify covariance
+ CovarianceMatrix matrix(2);
+ matrix(0, 0) = 100.0;
+ matrix(1, 0) = 50.0;
+ matrix(1, 1) = 80.0;
+ // Empirical estimators are typically bad for this distribution, so we use a high degree of freedom.
+ InverseWishart multidimensional(matrix, 40.0);
+ CovarianceMatrix empirical(multidimensional.getSample(100000).computeCovariance());
+ assert_almost_equal(multidimensional.getCovariance(), empirical, 0.1, 0.0);
}
catch (TestFailed & ex)
{
diff --git a/lib/test/t_OrthogonalProductFunctionFactory_std.cxx b/lib/test/t_OrthogonalProductFunctionFactory_std.cxx
new file mode 100644
index 0000000000..8da06a66c9
--- /dev/null
+++ b/lib/test/t_OrthogonalProductFunctionFactory_std.cxx
@@ -0,0 +1,162 @@
+// -*- C++ -*-
+/**
+ * @brief The test file of OrthogonalProductFunctionFactory class
+ *
+ * Copyright 2005-2024 Airbus-EDF-IMACS-ONERA-Phimeca
+ *
+ * This library is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this library. If not, see .
+ *
+ */
+#include "openturns/OT.hxx"
+#include "openturns/OTtestcode.hxx"
+
+using namespace OT;
+using namespace OT::Test;
+
+// Compute reference function value from index and point
+Point computeTripleHaarFunctionValue(const UnsignedInteger & index, const Point & point)
+{
+ if (point.getDimension() != 3)
+ throw InvalidArgumentException(HERE) << "Expected a dimension 3 point, but dimension is " << point.getDimension();
+ const UnsignedInteger dimension = 3;
+ TensorizedUniVariateFunctionFactory::FunctionFamilyCollection functionCollection(dimension);
+ functionCollection[0] = HaarWaveletFactory();
+ functionCollection[1] = HaarWaveletFactory();
+ functionCollection[2] = HaarWaveletFactory();
+ const LinearEnumerateFunction enumerate(dimension);
+ const TensorizedUniVariateFunctionFactory factory(functionCollection, enumerate);
+ const Function referenceFunction(factory.build(index));
+ const Point value(referenceFunction(point));
+ return value;
+}
+
+// Compute reference function value from multi-index and point
+Point computeTripleHaarFunctionValue(const Indices & indices, const Point & point)
+{
+ if (point.getDimension() != 3)
+ throw InvalidArgumentException(HERE) << "Expected a dimension 3 point, but dimension is " << point.getDimension();
+ const UnsignedInteger dimension = 3;
+ const LinearEnumerateFunction enumerate(dimension);
+ const UnsignedInteger index = enumerate.inverse(indices);
+ const Point value(computeTripleHaarFunctionValue(index, point));
+ return value;
+}
+
+Point computeHaarFourierFunctionValue(const UnsignedInteger & index, const Point & point)
+{
+ if (point.getDimension() != 3)
+ throw InvalidArgumentException(HERE) << "Expected a dimension 3 point, but dimension is " << point.getDimension();
+ const UnsignedInteger dimension = 3;
+ TensorizedUniVariateFunctionFactory::FunctionFamilyCollection functionCollection(dimension);
+ functionCollection[0] = HaarWaveletFactory();
+ functionCollection[1] = HaarWaveletFactory();
+ functionCollection[2] = FourierSeriesFactory();
+ const LinearEnumerateFunction enumerate(dimension);
+ const TensorizedUniVariateFunctionFactory factory(functionCollection, enumerate);
+ const Function referenceFunction(factory.build(index));
+ const Point value(referenceFunction(point));
+ return value;
+}
+
+// Compute reference function value from multi-index and point
+Point computeHaarFourierFunctionValue(const Indices & indices, const Point & point)
+{
+ if (point.getDimension() != 3)
+ throw InvalidArgumentException(HERE) << "Expected a dimension 3 point, but dimension is " << point.getDimension();
+ const UnsignedInteger dimension = 3;
+ const LinearEnumerateFunction enumerate(dimension);
+ const UnsignedInteger index = enumerate.inverse(indices);
+ const Point value(computeHaarFourierFunctionValue(index, point));
+ return value;
+}
+
+
+int main(int, char *[])
+{
+ TESTPREAMBLE;
+ OStream fullprint(std::cout);
+ setRandomGenerator();
+
+
+ try
+ {
+ // Create the orthogonal basis
+ fullprint << "Create the orthogonal basis" << std::endl;
+ UnsignedInteger dimension = 3;
+ OrthogonalProductFunctionFactory::FunctionFamilyCollection functionCollection(dimension);
+ functionCollection[0] = HaarWaveletFactory();
+ functionCollection[1] = HaarWaveletFactory();
+ functionCollection[2] = HaarWaveletFactory();
+
+ // Create linear enumerate function
+ fullprint << "Create linear enumerate function" << std::endl;
+ LinearEnumerateFunction enumerateFunction(dimension);
+ OrthogonalProductFunctionFactory productBasis(functionCollection, enumerateFunction);
+ fullprint << productBasis.__str__() << std::endl;
+ fullprint << productBasis.__repr_markdown__() << std::endl;
+ // Test the build() method on a collection of functions
+ const Point center({0.5, 0.5, 0.5});
+ for (UnsignedInteger i = 0; i < 10; ++ i)
+ {
+ // Test build from index
+ const Function function(productBasis.build(i));
+ assert_almost_equal(function(center), computeTripleHaarFunctionValue(i, center));
+ // Test build from multi-index
+ const Indices indices(enumerateFunction(i));
+ const Function function2(productBasis.build(indices));
+ assert_almost_equal(function2(center), computeTripleHaarFunctionValue(indices, center));
+ }
+
+ // Heterogeneous collection
+ fullprint << "Heterogeneous collection" << std::endl;
+ OrthogonalProductFunctionFactory::FunctionFamilyCollection functionCollection2(dimension);
+ functionCollection2[0] = HaarWaveletFactory();
+ functionCollection2[1] = FourierSeriesFactory();
+ functionCollection2[2] = HaarWaveletFactory();
+ OrthogonalProductFunctionFactory productBasis2(functionCollection2);
+ fullprint << productBasis2.__str__() << std::endl;
+ fullprint << productBasis2.__repr_markdown__() << std::endl;
+ OrthogonalProductFunctionFactory::FunctionFamilyCollection functionCollection4(productBasis2.getFunctionFamilyCollection());
+ assert_equal((int) functionCollection4.getSize(), 3);
+
+ // Test getMarginal
+ fullprint << "Test getMarginal" << std::endl;
+ UnsignedInteger dimension2 = 5;
+ OrthogonalProductFunctionFactory::FunctionFamilyCollection functionCollection3(dimension2);
+ functionCollection3[0] = HaarWaveletFactory();
+ functionCollection3[1] = FourierSeriesFactory();
+ functionCollection3[2] = HaarWaveletFactory();
+ functionCollection3[3] = HaarWaveletFactory();
+ functionCollection3[4] = FourierSeriesFactory();
+ OrthogonalProductFunctionFactory productBasis5(functionCollection3);
+ Indices indices({0, 2, 4});
+ OrthogonalBasis productBasis6(productBasis5.getMarginal(indices));
+ fullprint << productBasis6.__str__() << std::endl;
+ // Test the build() method on a collection of functions
+ const Point center2({0.5, 0.5, 0.5});
+ for (UnsignedInteger i = 0; i < 10; ++ i)
+ {
+ // Test build from index
+ const Function function(productBasis6.build(i));
+ assert_almost_equal(function(center2), computeHaarFourierFunctionValue(i, center2));
+ }
+ }
+ catch (TestFailed & ex)
+ {
+ std::cerr << ex << std::endl;
+ return ExitCode::Error;
+ }
+
+ return ExitCode::Success;
+}
diff --git a/lib/test/t_OrthogonalProductFunctionFactory_std.expout b/lib/test/t_OrthogonalProductFunctionFactory_std.expout
new file mode 100644
index 0000000000..250eddf377
--- /dev/null
+++ b/lib/test/t_OrthogonalProductFunctionFactory_std.expout
@@ -0,0 +1,9 @@
+Create the orthogonal basis
+Create linear enumerate function
+class=OrthogonalProductFunctionFactory factory=class=TensorizedUniVariateFunctionFactory univariate function collection=[class=UniVariateFunctionFamily implementation=class=HaarWaveletFactory measure=class=Uniform name=Uniform dimension=1 a=0 b=1,class=UniVariateFunctionFamily implementation=class=HaarWaveletFactory measure=class=Uniform name=Uniform dimension=1 a=0 b=1,class=UniVariateFunctionFamily implementation=class=HaarWaveletFactory measure=class=Uniform name=Uniform dimension=1 a=0 b=1] enumerate function=class=LinearEnumerateFunction dimension=3 measure=class=JointDistribution name=JointDistribution dimension=3 copula=class=IndependentCopula name=IndependentCopula dimension=3 marginal[0]=class=Uniform name=Uniform dimension=1 a=0 b=1 marginal[1]=class=Uniform name=Uniform dimension=1 a=0 b=1 marginal[2]=class=Uniform name=Uniform dimension=1 a=0 b=1
+class=OrthogonalProductFunctionFactory factory=class=TensorizedUniVariateFunctionFactory univariate function collection=[class=UniVariateFunctionFamily implementation=class=HaarWaveletFactory measure=class=Uniform name=Uniform dimension=1 a=0 b=1,class=UniVariateFunctionFamily implementation=class=HaarWaveletFactory measure=class=Uniform name=Uniform dimension=1 a=0 b=1,class=UniVariateFunctionFamily implementation=class=HaarWaveletFactory measure=class=Uniform name=Uniform dimension=1 a=0 b=1] enumerate function=class=LinearEnumerateFunction dimension=3 measure=class=JointDistribution name=JointDistribution dimension=3 copula=class=IndependentCopula name=IndependentCopula dimension=3 marginal[0]=class=Uniform name=Uniform dimension=1 a=0 b=1 marginal[1]=class=Uniform name=Uniform dimension=1 a=0 b=1 marginal[2]=class=Uniform name=Uniform dimension=1 a=0 b=1
+Heterogeneous collection
+class=OrthogonalProductFunctionFactory factory=class=TensorizedUniVariateFunctionFactory univariate function collection=[class=UniVariateFunctionFamily implementation=class=HaarWaveletFactory measure=class=Uniform name=Uniform dimension=1 a=0 b=1,class=UniVariateFunctionFamily implementation=class=FourierSeriesFactory measure=class=Uniform name=Uniform dimension=1 a=-3.14159 b=3.14159,class=UniVariateFunctionFamily implementation=class=HaarWaveletFactory measure=class=Uniform name=Uniform dimension=1 a=0 b=1] enumerate function=class=LinearEnumerateFunction dimension=3 measure=class=JointDistribution name=JointDistribution dimension=3 copula=class=IndependentCopula name=IndependentCopula dimension=3 marginal[0]=class=Uniform name=Uniform dimension=1 a=0 b=1 marginal[1]=class=Uniform name=Uniform dimension=1 a=-3.14159 b=3.14159 marginal[2]=class=Uniform name=Uniform dimension=1 a=0 b=1
+class=OrthogonalProductFunctionFactory factory=class=TensorizedUniVariateFunctionFactory univariate function collection=[class=UniVariateFunctionFamily implementation=class=HaarWaveletFactory measure=class=Uniform name=Uniform dimension=1 a=0 b=1,class=UniVariateFunctionFamily implementation=class=FourierSeriesFactory measure=class=Uniform name=Uniform dimension=1 a=-3.14159 b=3.14159,class=UniVariateFunctionFamily implementation=class=HaarWaveletFactory measure=class=Uniform name=Uniform dimension=1 a=0 b=1] enumerate function=class=LinearEnumerateFunction dimension=3 measure=class=JointDistribution name=JointDistribution dimension=3 copula=class=IndependentCopula name=IndependentCopula dimension=3 marginal[0]=class=Uniform name=Uniform dimension=1 a=0 b=1 marginal[1]=class=Uniform name=Uniform dimension=1 a=-3.14159 b=3.14159 marginal[2]=class=Uniform name=Uniform dimension=1 a=0 b=1
+Test getMarginal
+class=OrthogonalProductFunctionFactory factory=class=TensorizedUniVariateFunctionFactory univariate function collection=[class=UniVariateFunctionFamily implementation=class=HaarWaveletFactory measure=class=Uniform name=Uniform dimension=1 a=0 b=1,class=UniVariateFunctionFamily implementation=class=HaarWaveletFactory measure=class=Uniform name=Uniform dimension=1 a=0 b=1,class=UniVariateFunctionFamily implementation=class=FourierSeriesFactory measure=class=Uniform name=Uniform dimension=1 a=-3.14159 b=3.14159] enumerate function=class=LinearEnumerateFunction dimension=3 measure=class=JointDistribution name=JointDistribution dimension=3 copula=class=IndependentCopula name=IndependentCopula dimension=3 marginal[0]=class=Uniform name=Uniform dimension=1 a=0 b=1 marginal[1]=class=Uniform name=Uniform dimension=1 a=0 b=1 marginal[2]=class=Uniform name=Uniform dimension=1 a=-3.14159 b=3.14159
diff --git a/lib/test/t_OrthogonalProductPolynomialFactory_std.cxx b/lib/test/t_OrthogonalProductPolynomialFactory_std.cxx
index fca9bad692..652fc4068c 100644
--- a/lib/test/t_OrthogonalProductPolynomialFactory_std.cxx
+++ b/lib/test/t_OrthogonalProductPolynomialFactory_std.cxx
@@ -1,6 +1,6 @@
// -*- C++ -*-
/**
- * @brief The test file of FunctionalChaosAlgoritm class
+ * @brief The test file of OrthogonalProductPolynomialFactory class
*
* Copyright 2005-2024 Airbus-EDF-IMACS-ONERA-Phimeca
*
@@ -24,6 +24,38 @@
using namespace OT;
using namespace OT::Test;
+// Compute reference function value from index and point
+Point computePolynomialValue(const UnsignedInteger & index, const Point & point)
+{
+ if (point.getDimension() != 3)
+ throw InvalidArgumentException(HERE) << "Expected a dimension 3 point, but dimension is " << point.getDimension();
+ const UnsignedInteger dimension = 3;
+ const LinearEnumerateFunction enumerate(dimension);
+ // Compute the multi-indices using the EnumerateFunction
+ Indices indices(enumerate(index));
+ // Then build the collection of polynomials using the collection of factories
+ ProductPolynomialEvaluation::PolynomialCollection polynomials(dimension);
+ for (UnsignedInteger i = 0; i < dimension; ++i)
+ {
+ polynomials[i] = LegendreFactory().build(indices[i]);
+ }
+ const ProductPolynomialEvaluation product(polynomials);
+ const Point value(product(point));
+ return value;
+}
+
+// Compute reference function value from multi-index and point
+Point computePolynomialValue(const Indices & indices, const Point & point)
+{
+ if (point.getDimension() != 3)
+ throw InvalidArgumentException(HERE) << "Expected a dimension 3 point, but dimension is " << point.getDimension();
+ const UnsignedInteger dimension = 3;
+ const LinearEnumerateFunction enumerate(dimension);
+ const UnsignedInteger index = enumerate.inverse(indices);
+ const Point value(computePolynomialValue(index, point));
+ return value;
+}
+
int main(int, char *[])
{
TESTPREAMBLE;
@@ -43,6 +75,18 @@ int main(int, char *[])
OrthogonalProductPolynomialFactory productBasis(polynomialCollection, enumerateFunction);
fullprint << productBasis.__str__() << std::endl;
fullprint << productBasis.__repr_markdown__() << std::endl;
+ // Test the build() method on a collection of functions
+ const Point center({0.5, 0.5, 0.5});
+ for (UnsignedInteger i = 0; i < 10; ++ i)
+ {
+ // Test build from index
+ const Function polynomial(productBasis.build(i));
+ assert_almost_equal(polynomial(center), computePolynomialValue(i, center));
+ // Test build from multi-index
+ const Indices indices(enumerateFunction(i));
+ const Function polynomial2(productBasis.build(indices));
+ assert_almost_equal(polynomial2(center), computePolynomialValue(indices, center));
+ }
// Heterogeneous collection
OrthogonalProductPolynomialFactory::PolynomialFamilyCollection polynomCollection2(dimension);
@@ -70,6 +114,23 @@ int main(int, char *[])
OrthogonalProductPolynomialFactory productBasis4(aCollection4);
fullprint << productBasis4.__str__() << std::endl;
fullprint << productBasis4.__repr_markdown__() << std::endl;
+
+ // Test getMarginal
+ fullprint << "Test getMarginal" << std::endl;
+ UnsignedInteger dimension2 = 5;
+ Collection marginals4(dimension2, Uniform(0.0, 1.0));
+ OrthogonalProductPolynomialFactory productBasis5(marginals4);
+ Indices indices({0, 2, 4});
+ OrthogonalBasis productBasis6(productBasis5.getMarginal(indices));
+ fullprint << productBasis6.__str__() << std::endl;
+ // Test the build() method on a collection of functions
+ const Point center2({0.5, 0.5, 0.5});
+ for (UnsignedInteger i = 0; i < 10; ++ i)
+ {
+ // Test build from index
+ const Function polynomial(productBasis6.build(i));
+ assert_almost_equal(polynomial(center2), computePolynomialValue(i, center2));
+ }
}
catch (TestFailed & ex)
{
@@ -77,6 +138,5 @@ int main(int, char *[])
return ExitCode::Error;
}
-
return ExitCode::Success;
}
diff --git a/lib/test/t_OrthogonalProductPolynomialFactory_std.expout b/lib/test/t_OrthogonalProductPolynomialFactory_std.expout
index e9e1d6ef1f..31401121d9 100644
--- a/lib/test/t_OrthogonalProductPolynomialFactory_std.expout
+++ b/lib/test/t_OrthogonalProductPolynomialFactory_std.expout
@@ -86,3 +86,15 @@ OrthogonalProductPolynomialFactory
| 1 | LegendreFactory |
| 2 | AdaptiveStieltjesAlgorithm |
+Test getMarginal
+OrthogonalProductPolynomialFactory
+- measure=Distribution
+- isOrthogonal=true
+- enumerateFunction=class=LinearEnumerateFunction dimension=3
+
+| Index | Type |
+|-------|-----------------|
+| 0 | LegendreFactory |
+| 1 | LegendreFactory |
+| 2 | LegendreFactory |
+
diff --git a/lib/test/t_Wishart_std.cxx b/lib/test/t_Wishart_std.cxx
index 23c9b2ebb2..b116f2ff76 100644
--- a/lib/test/t_Wishart_std.cxx
+++ b/lib/test/t_Wishart_std.cxx
@@ -143,6 +143,15 @@ int main(int, char *[])
Arcsine::PointWithDescriptionCollection parameters = distribution.getParametersCollection();
fullprint << "parameters=" << parameters << std::endl;
fullprint << "Standard representative=" << distribution.getStandardRepresentative().__str__() << std::endl;
+
+ // Verify covariance
+ CovarianceMatrix matrix(2);
+ matrix(0, 0) = 10.0;
+ matrix(1, 0) = 5.0;
+ matrix(1, 1) = 8.0;
+ Wishart multidimensional(matrix, 3.0);
+ CovarianceMatrix empirical(multidimensional.getSample(100000).computeCovariance());
+ assert_almost_equal(multidimensional.getCovariance(), empirical, 0.02, 0.0);
}
catch (TestFailed & ex)
{
diff --git a/python/doc/examples/data_analysis/manage_data_and_samples/_static/pandas.png b/python/doc/_static/pandas.png
similarity index 100%
rename from python/doc/examples/data_analysis/manage_data_and_samples/_static/pandas.png
rename to python/doc/_static/pandas.png
diff --git a/python/doc/examples/graphs/plot_graphs_basics.py b/python/doc/examples/graphs/plot_graphs_basics.py
index 0bdb9a462d..74f5970429 100644
--- a/python/doc/examples/graphs/plot_graphs_basics.py
+++ b/python/doc/examples/graphs/plot_graphs_basics.py
@@ -11,7 +11,7 @@
# The `draw` method the `Graph` class
# -----------------------------------
#
-# The simplest way to create a graph is to use the `draw` method. The `Normal` distribution for example provides a method to draw the density function of the gaussian distribution.
+# The simplest way to create a graph is to use the `draw` method. The :class:`~openturns.Normal` distribution for example provides a method to draw its density function.
# %%
import openturns as ot
@@ -29,20 +29,20 @@
view = viewer.View(graph)
# %%
-# To configure the look of the plot, we can first observe the type of graph returned by the `drawPDF` method returns: it is a `Graph`.
+# To configure the look of the plot, we can first observe the type of graph returned by the `drawPDF` method returns: it is a :class:`~openturns.Graph`.
# %%
graph = n.drawPDF()
type(graph)
# %%
-# The `Graph` class provides several methods to configure the legends, the title and the colors.
-# Since a graph can contain several sub-graphs, the `setColors` takes a list of colors as inputs argument: each item of the list corresponds to the sub-graphs.
+# The class:`~openturns.Graph` class provides several methods to configure the legends, the title and the colors.
+# Since a graph can contain several sub-graphs, the `setColors` method takes a list of colors as inputs argument: each item of the list corresponds to the sub-graphs.
# %%
graph.setXTitle("N")
graph.setYTitle("PDF")
-graph.setTitle("Probability density function of the standard gaussian distribution")
+graph.setTitle("Probability density function of the standard Gaussian distribution")
graph.setLegends(["N"])
graph.setColors(["blue"])
view = viewer.View(graph)
@@ -74,7 +74,7 @@
# Draw a cloud
# ------------
#
-# The `Cloud` class creates clouds of bidimensional points. To demonstrate it, let us create two gaussian distributions in two dimensions.
+# The :class:`~openturns.Cloud` class creates clouds of bidimensional points. To illustrate it, let us create two Normal distributions in two dimensions.
# %%
# Create a Funky distribution
@@ -108,7 +108,7 @@
view = viewer.View(graph)
# %%
-# We sometimes want to customize the graph by choosing the type of point (square, triangle, circle, etc...), of line (continuous, dashed, etc...) or another parameter.
+# We sometimes want to customize the graph by choosing the type of point (square, triangle, circle, etc.), of line (continuous, dashed, etc.) or another parameter.
# We can know the list of possible values with the corresponding `getValid` method.
#
# For example, the following function returns the possible values of the `PointStyle` parameter.
@@ -137,7 +137,7 @@
# Configure the style of points and the thickness of a curve
# ----------------------------------------------------------
#
-# Assume that we want to plot the sine curve from -2 to 2. The simplest way is to use the `draw` method of the function.
+# Assume that we want to plot the `sine` curve from -2 to 2. The simplest way is to use the `draw` method of the function.
# %%
g = ot.SymbolicFunction("x", "sin(x)")
@@ -147,14 +147,14 @@
view = viewer.View(graph)
# %%
-# I would rather get a dashed curve: let us search for the available line styles.
+# One would rather get a dashed curve: let us search for the available line styles.
# %%
ot.Drawable.GetValidLineStyles()
# %%
-# In order to use the `Curve` class, it will be easier if we have a method to generate a `Sample` containing points regularly spaced in an interval.
+# In order to use the :class:`~openturns.Curve` class, it will be easier if we have a method to generate a :class:`~openturns.Sample` containing points regularly spaced in an interval.
# %%
@@ -216,9 +216,9 @@ def createHSVColor(indexCurve, maximumNumberOfCurves):
# Create matrices of graphs
# -------------------------
#
-# The library provides features to create a grid of graphs. However, we can use the `add_subplot` function from Matplotlib.
+# The library provides features to create a grid of graphs. However, we can use the `add_subplot` function from `Matplotlib`.
#
-# Let us create two graphs of the PDF and CDF of the following gaussian distribution..
+# Let us create two graphs of the PDF and CDF of the following Normal distribution..
# %%
n = ot.Normal()
@@ -233,9 +233,9 @@ def createHSVColor(indexCurve, maximumNumberOfCurves):
_ = viewer.View(grid)
# %%
-# Another method is to create a figure with the `figure` function from Matplotlib,
+# Another method is to create a figure with the `figure` function from `Matplotlib`,
# then add two graphs with the `add_subplot` function.
-# We use the `viewer.View` function to create the required Matplotlib object.
+# We use the `viewer.View` function to create the required `Matplotlib` object.
# Since we are not interested by the output of the `View` function, we use the dummy variable `_` as output.
# The title is finally configured with `suptitle`.
@@ -252,7 +252,7 @@ def createHSVColor(indexCurve, maximumNumberOfCurves):
# -----------------------
# %%
-# The `View` class has a `save` method which saves the graph into an image.
+# The :class:`openturns.viewer.View` class has a `save` method which saves the graph into an image.
# %%
@@ -269,13 +269,13 @@ def createHSVColor(indexCurve, maximumNumberOfCurves):
view.save("normal-100dpi.png", dpi=100)
# %%
-# Configure the size of a graph with matplotlib
-# ---------------------------------------------
+# Configure the size of a graph with `matplotlib`
+# -----------------------------------------------
# %%
# %%
-# We first create a graph containing the PDF of a gaussian distribution
+# We first create a graph containing the PDF of a Normal distribution
# %%
n = ot.Normal()
diff --git a/python/doc/examples/graphs/plot_graphs_contour.py b/python/doc/examples/graphs/plot_graphs_contour.py
index 5f9fc05071..679e8fb76c 100644
--- a/python/doc/examples/graphs/plot_graphs_contour.py
+++ b/python/doc/examples/graphs/plot_graphs_contour.py
@@ -18,7 +18,7 @@
import openturns.viewer as viewer
# %%
-# We build a bidimensional function (function of x and y), define the study domain and the sample size
+# We build a bidimensional function (function of `x` and `y`), define the study domain and the sample size
# %%
f = ot.SymbolicFunction(["x", "y"], ["exp(-sin(cos(y)^2 * x^2 + sin(x)^2 * y^2))"])
@@ -37,14 +37,14 @@
view = viewer.View(graph)
# %%
-# The graph contains an unique drawable whose implementation is of class `Contour`
+# The graph contains a unique drawable whose implementation is of class :class:`~openturns.Contour`
# %%
contour = graph.getDrawable(0).getImplementation()
print(type(contour).__name__)
# %%
-# Another way to build the contour is to build the data sample and give it to the constructor of the `Contour` class
+# Another way to build the contour is to build the data sample and give it to the constructor of the :class:`~openturns.Contour` class
# %%
inputData = ot.Box([NX, NY]).generate()
@@ -100,7 +100,7 @@
# %%
# When the function takes values very different in magnitude, it may be useful to change the norm which is
# used to distribute the colors and to bound the color interval.
-# Here we will also let `matplotlib` calculate the levels by not giving any level to the contour
+# Here we will also let `Matplotlib` calculate the levels by not giving any level to the contour
# %%
contour.setColorMapNorm("log")
@@ -130,8 +130,8 @@
mixture = ot.Mixture([x_funk, x_punk], [0.5, 1.0])
# %%
-# The constructed graph is composed of the superposition of a filled contour and iso lines
-# We also changed the thickness and style of the lines to show the effect although it is not useful here
+# The constructed graph is composed of the superposition of a filled contour and iso lines.
+# We also changed the thickness and style of the lines to show the effect although it is not useful here.
# %%
graph = mixture.drawPDF([-5.0, -5.0], [5.0, 5.0])
diff --git a/python/doc/examples/graphs/plot_graphs_fill_area.py b/python/doc/examples/graphs/plot_graphs_fill_area.py
index d518a5fe76..5e09fdcbdf 100644
--- a/python/doc/examples/graphs/plot_graphs_fill_area.py
+++ b/python/doc/examples/graphs/plot_graphs_fill_area.py
@@ -16,7 +16,7 @@
ot.Log.Show(ot.Log.NONE)
# %%
-# We generate a sample from a standard gaussian distribution.
+# We generate a sample from the standard Normal distribution.
# %%
dist = ot.Normal()
diff --git a/python/doc/examples/graphs/plot_graphs_loglikelihood_contour.py b/python/doc/examples/graphs/plot_graphs_loglikelihood_contour.py
index 91dfac45e8..b3d5dd5fd6 100644
--- a/python/doc/examples/graphs/plot_graphs_loglikelihood_contour.py
+++ b/python/doc/examples/graphs/plot_graphs_loglikelihood_contour.py
@@ -20,7 +20,7 @@
# -----------------
# %%
-# We create a `TruncatedNormal` and generate a small sample.
+# We create a :class:`~openturns.TruncatedNormal` and generate a small sample.
# %%
a = -1
@@ -31,7 +31,7 @@
sample = distribution.getSample(11)
# %%
-# In order to see the distribution and the sample, we draw the PDF of the distribution and generate a clouds which X coordinates are the sample values.
+# In order to see the distribution and the sample, we draw the PDF of the distribution and generate a cloud which `X` coordinates are the sample values.
# %%
graph = distribution.drawPDF()
@@ -46,7 +46,7 @@
# %%
-# The following function computes the log-likelihood of a `TruncatedNormal`
+# The following function computes the log-likelihood of a :class:`~openturns.TruncatedNormal`
# which mean and standard deviations are given as input arguments.
# The lower and upper bounds of the distribution are computed as minimum and maximum of the sample.
@@ -90,7 +90,7 @@ def logLikelihood(X):
# %%
# Draw the log-likelihood function with the `draw` method: this is much faster than using a `for` loop.
-# In order to print LaTeX X and Y labels, we use the `"r"` character in front of the string containing the LaTeX command.
+# In order to print LaTeX `X` and `Y` labels, we use the `"r"` character in front of the string containing the LaTeX command.
# %%
logLikelihoodFunction = ot.PythonFunction(2, 1, logLikelihood)
@@ -105,7 +105,7 @@ def logLikelihood(X):
# %%
# The level values are computed from the quantiles of the data, so that the contours are equally spaced.
-# We can configure the number of levels by setting the `Contour-DefaultLevelsNumber` key in the `ResourceMap`.
+# We can configure the number of levels by setting the `Contour-DefaultLevelsNumber` key in the :class:`~openturns.ResourceMap`.
# %%
ot.ResourceMap.SetAsUnsignedInteger("Contour-DefaultLevelsNumber", 5)
@@ -121,7 +121,7 @@ def logLikelihood(X):
contour = graphBasic.getDrawable(0)
# %%
-# To be able to use specific `Contour` methods like `buildDefaultLevels`, we need to use `getImplementation`.
+# To be able to use specific `Contour` methods like `buildDefaultLevels`, we need to use the method named `getImplementation`.
contour = contour.getImplementation()
contour.buildDefaultLevels(50)
@@ -133,8 +133,9 @@ def logLikelihood(X):
# %%
# Using a rank-based normalization of the colors
# ----------------------------------------------
+
# %%
-# In the previous plots, there was little color variation for isolines corresponding to high log-likelihood values.
+# In the previous plots, there was little color variation for isolines corresponding to large log-likelihood values.
# This is due to a steep cliff visible for low values of :math:`\sigma`.
# To make the color variation clearer around -13, we use a normalization based on the rank of the level curve and not on its value.
contour.setColorMapNorm("rank")
@@ -204,9 +205,9 @@ def logLikelihood(X):
# ----------------------------
# %%
-# The `Contour` class does not allow us to manually set multiple colors.
+# The :class:`~openturns.Contour` class does not allow us to manually set multiple colors.
# Here we show how to assign explicit colors to the different contour lines by passing keyword
-# arguments to the `viewer.View` class.
+# arguments to the class:`~openturns.viewer.View` class.
# Build a range of colors corresponding to the Tableau palette
palette = ot.Drawable.BuildTableauPalette(len(levels))
diff --git a/python/doc/examples/meta_modeling/kriging_metamodel/plot_gpr_cantilever_beam.py b/python/doc/examples/meta_modeling/kriging_metamodel/plot_gpr_cantilever_beam.py
new file mode 100644
index 0000000000..7cc40bdfd0
--- /dev/null
+++ b/python/doc/examples/meta_modeling/kriging_metamodel/plot_gpr_cantilever_beam.py
@@ -0,0 +1,177 @@
+"""
+Gaussian Process Regression : cantilever beam model
+===================================================
+"""
+# %%
+# In this example, we create a Gaussian Process Regression (GPR) metamodel of the :ref:`cantilever beam `.
+# We use a squared exponential covariance kernel for the Gaussian process. In order to estimate the hyper-parameters, we use a design of experiments of size 20.
+
+
+# %%
+# Definition of the model
+# -----------------------
+
+# %%
+from openturns.usecases import cantilever_beam
+import openturns as ot
+import openturns.experimental as otexp
+import openturns.viewer as viewer
+from matplotlib import pylab as plt
+
+ot.Log.Show(ot.Log.NONE)
+
+# Reset default settings
+ot.ResourceMap.Reload()
+ot.RandomGenerator.SetSeed(0)
+
+# %%
+# We load the cantilever beam use case :
+cb = cantilever_beam.CantileverBeam()
+
+# %%
+# We define the function which evaluates the output depending on the inputs.
+model = cb.model
+
+# %%
+# Then we define the distribution of the input random vector.
+myDistribution = cb.distribution
+
+# %%
+# Create the design of experiments
+# --------------------------------
+
+# %%
+# We consider a simple Monte-Carlo sample as a design of experiments.
+# This is why we generate an input sample using the `getSample` method of the distribution. Then we evaluate the output using the `model` function.
+
+# %%
+sampleSize_train = 20
+X_train = myDistribution.getSample(sampleSize_train)
+Y_train = model(X_train)
+
+# %%
+# The following figure presents the distribution of the vertical deviations Y on the training sample. We observe that the large deviations occur less often.
+
+# %%
+histo = ot.HistogramFactory().build(Y_train).drawPDF()
+histo.setXTitle("Vertical deviation (cm)")
+histo.setTitle("Distribution of the vertical deviation")
+histo.setLegends([""])
+view = viewer.View(histo)
+
+# %%
+# Create the metamodel
+# --------------------
+
+# %%
+# In order to create the GPR metamodel, we first select a constant trend with the `ConstantBasisFactory` class. Then we use a squared exponential covariance kernel.
+# The `SquaredExponential` kernel has one amplitude coefficient and 4 scale coefficients.
+# This is because this covariance kernel is anisotropic : each of the 4 input variables is associated with its own scale coefficient.
+
+# %%
+basis = ot.ConstantBasisFactory(cb.dim).build()
+covarianceModel = ot.SquaredExponential(cb.dim)
+
+# %%
+# Typically, the optimization algorithm is quite good at setting sensible optimization bounds.
+# In this case, however, the range of the input domain is extreme.
+
+# %%
+print("Lower and upper bounds of X_train:")
+print(X_train.getMin(), X_train.getMax())
+
+# %%
+# We need to manually define sensible optimization bounds.
+# Note that since the amplitude parameter is computed analytically (this is possible when the output dimension is 1), we only need to set bounds on the scale parameter.
+
+# %%
+scaleOptimizationBounds = ot.Interval(
+ [1.0, 1.0, 1.0, 1.0e-10], [1.0e11, 1.0e3, 1.0e1, 1.0e-5]
+)
+
+# %%
+# Finally, we use the `GaussianProcessFitter` and `GaussianProcessRegression` classes to create the GPR metamodel.
+# It requires a training sample, a covariance kernel and a trend basis as input arguments.
+# We need to set the initial scale parameter for the optimization. The upper bound of the input domain is a sensible choice here.
+# We must not forget to actually set the optimization bounds defined above.
+
+# %%
+covarianceModel.setScale(X_train.getMax())
+fitter_algo = otexp.GaussianProcessFitter(X_train, Y_train, covarianceModel, basis)
+fitter_algo.setOptimizationBounds(scaleOptimizationBounds)
+
+
+# %%
+# The `run` method optimizes the metamodel hyperparameters.
+#
+# We can then print the constant trend of the metamodel, estimated using the least squares method.
+
+# %%
+fitter_algo.run()
+fitter_result = fitter_algo.getResult()
+gpr_algo = otexp.GaussianProcessRegression(fitter_result)
+gpr_algo.run()
+gpr_result = gpr_algo.getResult()
+gprMetamodel = gpr_result.getMetaModel()
+
+# %%
+# The `getTrendCoefficients` method returns the coefficients of the trend.
+
+# %%
+print(gpr_result.getTrendCoefficients())
+
+# %%
+# We can also print the hyperparameters of the covariance model, which have been estimated by maximizing the likelihood.
+
+# %%
+gpr_result.getCovarianceModel()
+
+# %%
+# Validate the metamodel
+# ----------------------
+
+# %%
+# We finally want to validate the GPR metamodel. This is why we generate a validation sample with size 100 and we evaluate the output of the model on this sample.
+
+# %%
+sampleSize_test = 100
+X_test = myDistribution.getSample(sampleSize_test)
+Y_test = model(X_test)
+
+# %%
+# The `MetaModelValidation` classe makes the validation easy. To create it, we use the validation samples and the metamodel.
+
+# %%
+val = ot.MetaModelValidation(Y_test, gprMetamodel(X_test))
+
+# %%
+# The `computeR2Score` computes the R2 score.
+
+# %%
+R2 = val.computeR2Score()[0]
+print(R2)
+
+# %%
+# The residuals are the difference between the model and the metamodel.
+
+# %%
+r = val.getResidualSample()
+graph = ot.HistogramFactory().build(r).drawPDF()
+graph.setXTitle("Residuals (cm)")
+graph.setTitle("Distribution of the residuals")
+graph.setLegends([""])
+view = viewer.View(graph)
+
+# %%
+# We observe that the negative residuals occur with nearly the same frequency of the positive residuals: this is a first sign of good quality.
+
+# %%
+# The `drawValidation` method allows one to compare the observed outputs and the metamodel outputs.
+
+# %%
+# sphinx_gallery_thumbnail_number = 3
+graph = val.drawValidation()
+graph.setTitle("R2 = %.2f%%" % (100 * R2))
+view = viewer.View(graph)
+
+plt.show()
diff --git a/python/doc/examples/reliability_sensitivity/reliability/plot_estimate_probability_form.py b/python/doc/examples/reliability_sensitivity/reliability/plot_estimate_probability_form.py
index aae10037e6..64a6023f89 100644
--- a/python/doc/examples/reliability_sensitivity/reliability/plot_estimate_probability_form.py
+++ b/python/doc/examples/reliability_sensitivity/reliability/plot_estimate_probability_form.py
@@ -10,16 +10,10 @@
# - the probability estimation according to the FORM approximation, and the following SORM ones: Tvedt, Hohenbichler and Breitung,
# - the Hasofer reliability index and the generalized ones evaluated from the Breitung, Tvedt and Hohenbichler approximations,
# - the importance factors defined as the normalized director factors of the design point in the :math:`U`-space
-# - the sensitivity factors of the Hasofer reliability index and the FORM probability.
+# - the sensitivity factors of the Hasofer reliability index and the FORM probability,
# - the coordinates of the mean point in the standard event space.
#
-# The coordinates of the mean point in the standard event space is:
-#
-# .. math::
-# \frac{1}{E_1(-\beta)}\int_{\beta}^{\infty} u_1 p_1(u_1)du_1
-#
-#
-# where :math:`E_1` is the spheric univariate distribution of the standard space and :math:`\beta` is the reliability index.
+# See :ref:`FORM ` and :ref:`SORM ` for theoretical details.
# %%
# Model definition
diff --git a/python/doc/examples/reliability_sensitivity/reliability/plot_form_explained.py b/python/doc/examples/reliability_sensitivity/reliability/plot_form_explained.py
index fb21989725..0053f05628 100644
--- a/python/doc/examples/reliability_sensitivity/reliability/plot_form_explained.py
+++ b/python/doc/examples/reliability_sensitivity/reliability/plot_form_explained.py
@@ -11,228 +11,279 @@
# simple example. We focus on the different steps and compare them with an analytic
# computation whenever possible.
#
+# See :ref:`FORM ` and :ref:`SORM ` and to get more theoretical details.
import openturns as ot
import openturns.viewer as otv
import numpy as np
# %%
-# Position of the problem
-# -----------------------
+# Context
+# -------
#
-# We consider a bivariate random vector :math:`X = (X_1, X_2)` with the following independent marginals:
+# We consider a bivariate random vector :math:`\inputRV = (X_1, X_2)` with the following independent components that follow:
#
-# - an exponential distribution with parameter :math:`\lambda=1`, :math:`X_1 \sim \mathcal{E}(1.0)` ;
-# - a standard unit gaussian :math:`X_2 \sim \mathcal{N}(0,1)`.
+# - the exponential distribution with parameter :math:`\lambda=1`, :math:`X_1 \sim \mathcal{E}(1.0)` ;
+# - the standard unit normal distribution :math:`X_2 \sim \mathcal{N}(0,1)`.
#
# The support of the input vector is :math:`[0, +\infty[ \times \mathbb{R}`
-#
-
-# %%
-distX1 = ot.Exponential(1.0)
-distX2 = ot.Normal()
-distX = ot.JointDistribution([distX1, distX2])
+dist_X1 = ot.Exponential(1.0)
+dist_X2 = ot.Normal()
+dist_X = ot.JointDistribution([dist_X1, dist_X2])
# %%
-# We can draw the bidimensional PDF of the distribution `distX` over :math:`[0,-10] \times [10,10]`:
+# We can draw the isolines of the PDF of the distribution `dist_X`:
ot.ResourceMap.SetAsUnsignedInteger("Contour-DefaultLevelsNumber", 8)
-graphPDF = distX.drawPDF([0, -10], [10, 10])
-graphPDF.setTitle(r"2D-PDF of the input variables $(X_1, X_2)$")
-graphPDF.setXTitle(r"$x_1$")
-graphPDF.setYTitle(r"$x_2$")
-graphPDF.setLegendPosition("lower right")
-contours = graphPDF.getDrawable(0).getImplementation()
+graph_PDF = dist_X.drawPDF([0.0, -10.0], [20.0, 10.0])
+graph_PDF.setTitle(r"2D-PDF of the input variables $(X_1, X_2)$")
+graph_PDF.setXTitle(r"$x_1$")
+graph_PDF.setYTitle(r"$x_2$")
+graph_PDF.setLegendPosition("lower right")
+contours = graph_PDF.getDrawable(0).getImplementation()
contours.setColorMapNorm("log")
-graphPDF.setDrawable(contours, 0)
-view = otv.View(graphPDF)
+graph_PDF.setDrawable(contours, 0)
+view = otv.View(graph_PDF, square_axes=True)
# %%
-# We consider the model :math:`f : (x_1, x_2) \mapsto x_1 x_2` which maps the random input vector :math:`X` to the output variable :math:`Y=f(X) \in \mathbb{R}`.
-# We also draw the isolines of the model `f`.
+# We consider the model from :math:`\Rset^2` into :math:`\Rset` defined by:
#
-f = ot.SymbolicFunction(["x1", "x2"], ["x1 * x2"])
-graphModel = f.draw([0.0, -10.0], [10.0, 10.0])
-graphModel.setXTitle(r"$x_1$")
-graphModel.setXTitle(r"$x_2$")
-graphModel.setTitle(r"Isolines of the model : $Y = f(X)$")
-view = otv.View(graphModel)
+# .. math::
+#
+# \model : (x_1, x_2) \mapsto x_1 x_2
+#
+# We start by drawing the isolines of the model :math:`\model`.
+g = ot.SymbolicFunction(["x1", "x2"], ["x1 * x2"])
+graph_model = g.draw([0.0, -10.0], [20.0, 10.0])
+graph_model.setXTitle(r"$x_1$")
+graph_model.setYTitle(r"$x_2$")
+graph_model.setTitle(r"Isolines of the model : $g$")
+view = otv.View(graph_model, square_axes=True)
# %%
+# We consider the univariate output variable :
+#
+# .. math::
+#
+# Y = \model(\inputRV)
+#
# We want to estimate the probability :math:`P_f` of the output variable to be greater than a prescribed threshold :math:`s=10` : this is the failure event.
-# This probability is simply expressed as an integral:
+# This probability is simply expressed for a continuous random vector :math:`\inputRV` as:
#
# .. math::
+# :label: PfDef
#
-# P_f = \int_{\mathcal{D}} \mathbf{1}_{\mathcal{D}}(x) df_{X_1,X_2}(x)
+# P_f = \Prob{Y \geq s} = \int_{\set{D}} \mathbf{1}_{\set{D}}(x) \pdf d\vect{x}
+#
+# where:
+#
+# .. math::
#
-# where :math:`\mathcal{D} = \{ (x_1, x_2) \in [0,+\infty[ \times \mathbb{R} / x_1 x_2 \geq s \}` is the failure domain.
-# In the general case the probability density function :math:`f_{X_1,X_2}` and the domain of integration :math:`\mathcal{D}` are difficult to handle.
+# \set{D} = \{ (x_1, x_2) \in [0,+\infty[ \times \mathbb{R} \, | \, \model(x_1, x_2) \geq s \}
+#
+# is the failure domain and :math:`\inputMeasure` is the probability density function (PDF)
+# of :math:`\inputRV`.
# %%
# We first define RandomVector objects and the failure event associated to the output random variable.
-vectorX = ot.RandomVector(distX)
-vectorY = ot.CompositeRandomVector(f, vectorX)
+vector_X = ot.RandomVector(dist_X)
+vector_Y = ot.CompositeRandomVector(g, vector_X)
s = 10.0
-event = ot.ThresholdEvent(vectorY, ot.Greater(), s)
+event = ot.ThresholdEvent(vector_Y, ot.Greater(), s)
# %%
-# This event can easily be represented with a 1D curve as it is a branch of an hyperbole.
-# If :math:`y = x_1 x_2 = 10.0`, then the boundary of the domain of failure is the curve :
+# The boundary of the failure domain can easily be represented as it is a branch of an hyperbole: the
+# boundary is the graph of the function defined from :math:`\Rset` into :math:`\Rset` by:
+#
+# .. math::
+# :label: defH
+#
+# h : x_1 \mapsto x_2 = \frac{s}{x_1}
+#
+# The boundary of the failure domain is also the isoline of the model :math:`\model` associated to the
+# level :math:`s`:
#
# .. math::
#
-# h : x_1 \mapsto \frac{10.0}{x_1}
+# \partial \set{D} = \{(x_1, x_2)\, |\, \model(x_1, x_2) = s \}
#
+# We can draw it with the `draw` method of the function :math:`\model`.
+
+# %%
+nb_points = 101
+graph_g = g.draw([0.0, -10.0], [20.0, 10.0], [nb_points] * 2)
+draw_boundary = graph_g.getDrawable(0)
+draw_boundary.setLevels([s])
+draw_boundary.setLegend(r'Boundary $\partial \mathcal{D}$')
+graph_g.setDrawables([draw_boundary])
+
+# %%
+texts = [r" $\mathcal{D} = \{(x_1, x_2)\, |\, g(x_1, x_2) \geq 10 \}$"]
+text_graph = ot.Text([[10.0, 3.0]], texts)
+text_graph.setTextSize(1)
+text_graph.setColor("black")
+graph_g.add(text_graph)
# %%
-# We shall represent this curve using a :class:`~openturns.Contour` object.
-nx, ny = 15, 15
-xx = ot.Box([nx], ot.Interval([0.0], [10.0])).generate()
-yy = ot.Box([ny], ot.Interval([-10.0], [10.0])).generate()
-inputData = ot.Box([nx, ny], ot.Interval([0.0, -10.0], [10.0, 10.0])).generate()
-outputData = f(inputData)
-mycontour = ot.Contour(xx, yy, outputData)
-mycontour.setLevels([10.0])
-mycontour.setLabels(["10.0"])
-myGraph = ot.Graph("Representation of the failure domain", r"$X_1$", r"$X_2$", True, "")
-myGraph.add(mycontour)
+graph_g.setTitle("Failure domain in the physical space")
+graph_g.setXTitle(r"$x_1$")
+graph_g.setYTitle(r"$x_2$")
+graph_g.setLegendPosition('topright')
+
+view = otv.View(graph_g, square_axes=True)
# %%
-texts = [r" Event : $\mathcal{D} = \{Y \geq 10.0\}$"]
-myText = ot.Text([[4.0, 4.0]], texts)
-myText.setTextSize(1)
-myText.setColor("black")
-myGraph.add(myText)
-view = otv.View(myGraph)
+# We can superimpose the event boundary with the bivariate PDF insolines of the input distribution:
+draw_boundary.setColor("black")
+graph_PDF.add(draw_boundary)
+graph_PDF.setLegendPosition("lower right")
+view = otv.View(graph_PDF, square_axes=True)
# %%
-# We can superimpose the event boundary with the 2D-PDF ot the input variables :
+# From the previous figure, we observe that in the failure domain, the PDF takes small
+# (and even very small) values.
+# Consequently the failure probability :math:`P_f` is also expected to be small.
+# The FORM/SORM methods estimate the failure probability.
#
-mycontour.setColor("black")
-mycontour.setLabels(["event"])
-graphPDF.add(mycontour)
-graphPDF.setLegendPosition("lower right")
-view = otv.View(graphPDF)
# %%
-# From the previous figure we observe that in the failure domain the PDF takes small (and even very small) values.
-# Consequently the probability of the failure, the integral :math:`P_f` is also expected to be small.
-# The FORM/SORM methods estimate this kind of integral.
+# The FORM/SORM approximations
+# ----------------------------
+#
+# The basic steps of the FORM and SORM algorithms are:
+#
+# - use an isoprobabilistic transformation to map the input random vector into the standard space;
+# - find the design point which is the nearest point to the origin in the standard space;
+# - estimate the probability.
#
# %%
-# The FORM approximation
-# ----------------------
+# Isoprobabilistic transformation
+# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
-# The basic steps of the FORM (or SORM) algorithm are :
+# The interest of the isoprobabilistic transformation is the rotational
+# invariance of the distribution in the standard space. This property reduces the dimension
+# of the problem to 1. See :ref:`Isoprobabilistic transformation ` for more theoretical details.
#
-# - an isoprobabilistic transform ;
-# - finding the design point : that is the nearest point wrt the origin in the standard space ;
-# - estimating the probability integral.
+# OpenTURNS has several isoprobabilistic transformations, depending on the distribution of the input random vector:
#
-# As mentioned, both the density function and the domain of integration are complex in general.
-# The first step of the FORM method makes the density easier to work with and the second step tackles
-# the domain of integration problem.
+# - the Nataf transformation is used if the input distribution has a normal copula,
+# - the Generalized Nataf transformation is used if the input distribution has an elliptical copula,
+# - the Rosenblatt transformation is used in any other cases.
#
-# Variable transform
-# ^^^^^^^^^^^^^^^^^^
+# The Nataf and Rosenblatt transformations map the input random vector into a vector that follows a
+# normal distribution with zero mean and unit variance. The Generalized Nataf transformation maps the
+# input random vector into a vector that follows the standard elliptical distribution associated to the
+# elliptical copula of the input distribution.
#
-
-# %%
-# OpenTURNS has several isoprobabilistic transforms and the FORM/SORM classes implement the Generalized
-# Nataf and Rosenblatt transforms. In this case the `distX` distribution is not elliptical so the default method is the Rosenblatt transform.
+# In this example, the input distribution is not elliptical so the isoprobabilistic transformation is the
+# Rosenblatt transformation.
#
-print("Is Elliptical ? ", distX.isElliptical())
+print("Is Elliptical ? ", dist_X.isElliptical())
# %%
-# We seek an isoprobabilistic transform :math:`T` such as
+# The Rosenblatt transformation :math:`T` is defined by:
#
# .. math::
-# T : X \mapsto Z
+# :label: defT
#
-# where each component of :math:`Z` is a standard unit gaussian.
+# T : \vect{x} \mapsto \vect{z}
#
-# The isoprobabilistic transform and its inverse are methods of the distribution `distX` :
-transformation = distX.getIsoProbabilisticTransformation()
-inverseTransformation = distX.getInverseIsoProbabilisticTransformation()
-
-# %%
-# The main goal of this step is to work with a simpler probability density function of
-# the input variables as they will be standard gaussian unit and uncorrelated. The domain of
-# integration will still be complicated but will be handled with a well chosen approximate.
+# such that the random vector :math:`\standardRV = T(\inputRV)` follows a bivariate normal distribution
+# with zero mean and unit variance. It follows that the components :math:`Z_1` and
+# :math:`Z_2` are independent.
#
-
-# %%
-# We detail the Rosenblatt transform in this simple case.
-# In this example we consider independent variables so the transform is simpler, we only have to
-# perform the transformation on each variable. For the second one is already a standard unit gaussian
-# we transform the first variable only.
+# We detail the Rosenblatt transform in this simple case where the input random vector :math:`\inputRV`
+# has independent components. Then, the Rosenblatt transform is defined by:
+#
+# .. math::
#
+# z_i = \Phi^{-1} \circ F_i(x_i)
+#
+# where :math:`F_i` is the cumulative distribution function (CDF) of :math:`X_i` and
+# :math:`\Phi` the CDF of the univariate normal distribution with zero mean and unit variance.
+# Note that in this example, :math:`\Phi^{-1} \circ F_2 = I_d` as :math:`F_2 = \Phi`.
+
+# The isoprobabilistic transform and its inverse are methods of the distribution :
+transformation = dist_X.getIsoProbabilisticTransformation()
+inverse_transformation = dist_X.getInverseIsoProbabilisticTransformation()
# %%
+# Let us detail this transformation, step by step.
# We draw a realization of the random input vector. This point is said to be in the physical space.
-# We shall focus on the first component.
-xi = vectorX.getRealization()
+xi = vector_X.getRealization()
+
+# %%
+# We build `zi` the mapping of `xi` through the Rosenblatt transformation.
+# The point `zi` is said to be in the standard space. Note that the second component remained unchanged.
+ui = [dist_X1.computeCDF(xi[0]), dist_X2.computeCDF(xi[1])]
+zi = [ot.Normal().computeQuantile(ui[0])[0], ot.Normal().computeQuantile(ui[1])[0]]
+print(xi, "->", ui, "->", zi)
# %%
-# The first step of the Rosenblatt transform is to build a random variable :math:`u` with a uniform law in ]0,1[.
-# This is done through an evaluation of the CDF of `distX1` at the given point in the physical space.
-# Once again, please note that the second component is left unchanged.
+# We also build the isoprobabilistic transform :math:`T_1` and its inverse :math:`T_1^{-1}` for the
+# first marginal:
+#
+# .. math::
+# :label: detT1
+#
+# T_1 = \Phi^{-1} \circ F_1
#
-ui = [distX1.computeCDF(xi[0]), xi[1]]
+transform_X1 = dist_X1.getIsoProbabilisticTransformation()
+inverse_transform_X1 = dist_X1.getInverseIsoProbabilisticTransformation()
# %%
-# The second step is to build a standard unit gaussian from a uniform variable. This is done by a
-# simple call to the probit function. The point `zi` is said to be in the standard space.
-zi = [-ot.Normal().computeInverseSurvivalFunction(ui[0])[0], ui[1]]
+# We can implement the transformation using :math:`T_1` on the first components
+# directly using :math:`T` on both components `xi`:
+zi1D = [transform_X1([xi[0]])[0], xi[1]]
+zi2D = transformation(xi)
# %%
-# The sought transform then maps a point in the physical space to the standard space :
-print(xi, "->", ui, "->", zi)
+# We can check the result of our experiment : we observe the results are the same.
+print("zi = ", zi)
+print("zi1D = ", zi1D)
+print("zi2D = ", zi2D)
# %%
-# We also build the isoprobabilistic transform :math:`T_1` and its inverse :math:`T_1^{-1}` for the
-# first marginal :
-transformX1 = distX1.getIsoProbabilisticTransformation()
-inverseTransformX1 = distX1.getInverseIsoProbabilisticTransformation()
+# The model in the standard space is defined by:
+#
+# .. math::
+#
+# \widetilde{\model} = \model \circ T^{-1}
+#
+# We can define it using the capacities of the composition of functions implemented in the library.
+g_tilde = ot.ComposedFunction(g, inverse_transformation)
# %%
-# We can check the result of our experiment against :
+# The failure domain in the standard space is defined by:
#
-# - the 2D-transform :math:`T` ;
-# - the 1D-transform :math:`T_1` and the second component unchanged ;
+# .. math::
+#
+# \set{\widetilde{D}} = \{ (z_1, z_2) \in [0,+\infty[ \times \mathbb{R} \, | \, \widetilde{\model}(z_1, z_2) \geq s \}
+#
+# and its boundary is defined by:
+#
+# .. math::
+#
+# \partial \set{\widetilde{D}} = \{ (z_1, z_2) \in [0,+\infty[ \times \mathbb{R} \, | \,
+# \widetilde{\model}(z_1, z_2) = s \}
#
-# and observe the results are the same.
-zi1D = [transformX1([xi[0]])[0], xi[1]]
-zi2D = transformation(xi)
-print("zi = ", zi)
-print("zi1D = ", zi1D)
-print("zi2D = ", zi2D)
-
# %%
-# We can represent the boundary of the event in the standard space : that is a composition of the
-# hyperbole :math:`h : x \mapsto 10/x` and the inverse transform :math:`T_1^{-1}` defined by
-# :math:`inverseTransformX1`.
-failureBoundaryPhysicalSpace = ot.SymbolicFunction(["x"], ["10.0 / x"])
-failureBoundaryStandardSpace = ot.ComposedFunction(
- failureBoundaryPhysicalSpace, inverseTransformX1
-)
-x = np.linspace(1.1, 5.0, 100)
-cx = np.array([failureBoundaryStandardSpace([xi])[0] for xi in x])
+# We draw the graph of :math:`\widetilde{g}` in the standard space.
+graph_standard_space = g_tilde.draw([0.0, 0.0], [7.0, 7.0], [101] * 2)
-graphStandardSpace = ot.Graph(
- "Failure event in the standard space", r"$u_1$", r"$u_2$", True, ""
-)
-curveCX = ot.Curve(x, cx, r"Boundary of the event $\partial \mathcal{D}$")
-curveCX.setLineStyle("solid")
-curveCX.setColor("blue")
-graphStandardSpace.add(curveCX)
+draw_boundary_stand_space = graph_standard_space.getDrawable(0)
+draw_boundary_stand_space.setLevels([s])
+draw_boundary_stand_space.setLegend(r"Boundary $\partial \mathcal{\tilde{D}}$")
+graph_standard_space.setDrawables([draw_boundary_stand_space])
+graph_standard_space.setXTitle(r"$z_1$")
+graph_standard_space.setYTitle(r"$z_2$")
+graph_standard_space.setTitle('Failure domain in the standard space')
# %%
# We add the origin to the previous graph.
@@ -240,27 +291,27 @@
cloud.setColor("black")
cloud.setPointStyle("fcircle")
cloud.setLegend("origin")
-graphStandardSpace.add(cloud)
-graphStandardSpace.setGrid(True)
-graphStandardSpace.setLegendPosition("lower right")
+graph_standard_space.add(cloud)
+
+# Some annotations
+texts = [r"$\mathcal{\tilde{D}} = \{(z_1, z_2)\, |\, \tilde{g}(z_1, z_2) \geq 10 \}$"]
+text_graph = ot.Text([[4.0, 3.0]], texts)
+text_graph.setTextSize(1)
+text_graph.setColor("black")
+graph_standard_space.add(text_graph)
-# Some annotation
-texts = [r"Event : $\mathcal{D} = \{Y \geq 10.0\}$"]
-myText = ot.Text([[3.0, 4.0]], texts)
-myText.setTextSize(1)
-myText.setColor("black")
-graphStandardSpace.add(myText)
-view = otv.View(graphStandardSpace)
+graph_standard_space.setLegendPosition("topright")
+view = otv.View(graph_standard_space, square_axes=True)
# %%
# The design point
# ^^^^^^^^^^^^^^^^
#
-# The FORM and SORM methods assume that the failure probability integral has its support in
-# the vicinity of the closest point of the domain to the origin.
+# Due to the spherical distribution in the standard space, the area that contributes
+# the most to the integral defining the probability is the vicinity of the closest point
+# of the failure domain to the origin of the standard space.
# Then the second step of the method is to find this point, *the design point*, through a
# minimization problem under constraints.
-#
# %%
# We configure the Cobyla solver that we use for the optimization :
@@ -273,194 +324,238 @@
# %%
# We build the FORM algorithm with its basic constructor. The starting point for the optimization
-# algorithm is the mean of the input variables.
-algoFORM = ot.FORM(solver, event, distX.getMean())
+# algorithm is the mean of the input distribution.
+algo_FORM = ot.FORM(solver, event, dist_X.getMean())
# %%
-# We are ready to run the algorithm and store the result :
-algoFORM.run()
-result = algoFORM.getResult()
+# We are ready to run the algorithm and store the result.
+algo_FORM.run()
+result = algo_FORM.getResult()
# %%
# The design point can be retrieved in both physical and standard space with respectively the
-# `getPhysicalSpaceDesignPoint` and `getStandardSpaceDesignPoint` methods.
-designPointPhysicalSpace = result.getPhysicalSpaceDesignPoint()
-designPointStandardSpace = result.getStandardSpaceDesignPoint()
-print("Design point in physical space : ", designPointPhysicalSpace)
-print("Design point in standard space : ", designPointStandardSpace)
-
+# `getPhysicalSpaceDesignPoint` and `getStandardSpaceDesignPoint` methods. We denote them respectively
+# :math:`\vect{x}^*` and :math:`\vect{z}^*`.
+design_point_physical_space = result.getPhysicalSpaceDesignPoint()
+design_point_standard_space = result.getStandardSpaceDesignPoint()
+print("Design point in physical space : ", design_point_physical_space)
+print("Design point in standard space : ", design_point_standard_space)
# %%
-# We can get the Hasofer index with the `getHasoferReliabilityIndex` method which is the distance of the design point to the origin:
-betaHL = result.getHasoferReliabilityIndex()
-print("Hasofer index : ", betaHL)
-
+# We can get the Hasofer index with the `getHasoferReliabilityIndex` method which is the distance of
+# the design point to the origin:
+beta_HL = result.getHasoferReliabilityIndex()
+print("Hasofer index : ", beta_HL)
# %%
-# We visualize it on the previous graph.
-cloud = ot.Cloud([designPointStandardSpace[0]], [designPointStandardSpace[1]])
+# We visualize the design point on the previous graph.
+cloud = ot.Cloud([design_point_standard_space])
cloud.setColor("red")
cloud.setPointStyle("fcircle")
-cloud.setLegend("design point")
-graphStandardSpace.add(cloud)
-graphStandardSpace.setGrid(True)
-graphStandardSpace.setLegendPosition("lower right")
+cloud.setLegend(r"design point $z^*$")
+graph_standard_space.add(cloud)
+graph_standard_space.setGrid(True)
+graph_standard_space.setLegendPosition("lower right")
cc = ot.Curve(
- [0.0, designPointStandardSpace[0]],
- [0.0, designPointStandardSpace[1]],
+ [0.0, design_point_standard_space[0]],
+ [0.0, design_point_standard_space[1]],
r"$\beta_{HL}$ distance",
)
cc.setLineStyle("dashed")
cc.setColor("black")
-graphStandardSpace.add(cc)
-view = otv.View(graphStandardSpace)
+graph_standard_space.add(cc)
+graph_standard_space.setLegendPosition('topright')
+view = otv.View(graph_standard_space, square_axes=True)
# %%
-# Estimating the failure probability integral
-# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+# The FORM approximation
+# ^^^^^^^^^^^^^^^^^^^^^^
#
-# The last step of the FORM algorithm is to replace the domain of integration by the half-space at
-# the design point. In this simple example the half-space is delimited by the tangent at the design
-# point in the standard space.
+# The last step of the FORM algorithm is to replace the failure domain boundary by the hyperplane
+# which is tangent to the failure domain at the design point in the standard space.
+# To draw this hyperplane :math:`\mathcal{P}_{\vect{z}^*}`, we define the function from
+# :math:`\Rset^2` to :math:`\Rset` defined by:
#
-# The expression of the failure domain boundary in the standard space is the composition of the
-# hyperbole :math:`h:x \mapsto 10/x` and the inverse transform on the first variable. We can compute
-# the gradient (here the first derivative of a 1D function :math:`h(u_0)` ) at any given point with the
-# getGradient method :
-
-# %%
-u0 = [designPointStandardSpace[0]]
-du0 = failureBoundaryStandardSpace.getGradient().gradient(u0)
-print("abscissa of the design point u0 = ", u0[0])
-print("value of the failure boundary at u0 = ", failureBoundaryStandardSpace(u0)[0])
-print("value of the gradient of the failure boundary at u0 = ", du0[0, 0])
-
-
-# %%
-# In the standard space the equation of the tangent :math:`\Pi_{u_0}(x)` is given by
+# .. math::
+#
+# M \rightarrow \scalarproduct{\nabla \widetilde{\model}(\vect{z}^*)}{\vect{Z^*M}}
+#
+# where :math:`\nabla \vect{\widetilde{\model}(\vect{z}^*)}` is the gradient of the
+# function :math:`\widetilde{\model}`
+# at the design point :math:`Z^*(\vect{z}^*)`.
+# Then, the tangent hyperplane is the isoline associated to the zero level of the previous function :
#
# .. math::
#
-# \Pi_{u_0}(x) = (h \circ T^{-1}) (u_0) + \frac{d}{dx} (h \circ T^{-1}) (u_0) (x-u_0)
+# \mathcal{P}_{z^*} = \{ \vect{z} \in \Rset^2 \, | \,
+# \scalarproduct{\nabla \widetilde{\model}(\vect{z}^*)}{\vect{Z^*M}} = 0\}
#
-x = np.linspace(1.1, 5.0, 100)
-hyperplane = failureBoundaryStandardSpace(u0)[0] + du0[0, 0] * (x - u0)
-curveHyperplane = ot.Curve(x, hyperplane, r"$\Pi_{u_0}$ (FORM)")
-curveHyperplane.setLineStyle("dashed")
-curveHyperplane.setColor("green")
-graphStandardSpace.add(curveHyperplane)
-view = otv.View(graphStandardSpace)
+# We can use the class LinearFunction.
+center = design_point_standard_space
+grad_design_point = g_tilde.gradient(design_point_standard_space)
+constant = [0.0]
+linear_mat = ot.Matrix(1, 2)
+linear_mat[0, 0] = grad_design_point[0, 0]
+linear_mat[0, 1] = grad_design_point[1, 0]
+linear_proj = ot.LinearFunction(center, constant, linear_mat)
+
+graph_tangent = linear_proj.getMarginal(0).draw([0.0, 0.0], [7.0, 7.0], [101] * 2)
+draw_tangent = graph_tangent.getDrawable(0)
+draw_tangent.setLevels([0])
+draw_tangent.setLegend(r"$\mathcal{\Pi}_{z^*}$ (FORM)")
+draw_tangent.setColor('green')
+draw_tangent.setLineStyle('dashed')
+graph_standard_space.add(draw_tangent)
+graph_standard_space.setLegendPosition('topright')
+view = otv.View(graph_standard_space, square_axes=True)
# %%
-# In the standard space the PDF of the input variables is rotationally invariant so
+# Depending on whether the origin of the standard space :math:`\vect{0}` belongs to the failure domain,
+# the FORM probability is defined by:
#
# .. math::
#
-# P_f \approx E(\beta_{HL}),
+# P_{FORM} \approx E(-\beta_{HL}) & \quad \mbox{if } \vect{0} \notin \set{\widetilde{D}} \\
+# P_{FORM} \approx E(+\beta_{HL}) & \quad \mbox{if } \vect{0} \in \set{\widetilde{D}}
#
-# where :math:`E(.)` is the survival function of the standard unit gaussian.
+# where :math:`E(.)` is the marginal cumulative distribution function along any direction of
+# the spherical distribution in the standard space. In this example, this is the normal distribution.
+# So we have:
#
-pf = ot.Normal().computeSurvivalFunction(betaHL)
-print("FORM : Pf = ", pf)
+isOriginFail = result.getIsStandardPointOriginInFailureSpace()
+normal = ot.Normal()
+if isOriginFail:
+ pf_FORM = normal.computeCDF(beta_HL)
+else:
+ pf_FORM = normal.computeCDF(-beta_HL)
+print("FORM : Pf_FORM = ", pf_FORM)
# %%
-# This probability of failure is the one computed in the FORMResult and obtained with the `getEventProbability` method:
+# This failure probability is implemented but the FORM algorithm and can be obtained
+# with the `getEventProbability` method. We check we have the same result.
pf = result.getEventProbability()
-print("Probability of failure (FORM) Pf = ", pf)
-
+print("Probability of failure (FORM) Pf_FORM = ", pf)
# %%
# The SORM approximation
-# ----------------------
+# ^^^^^^^^^^^^^^^^^^^^^^
+#
+# The SORM approximation uses the main curvatures :math:`\kappa_i^0` of the homothetic of the failure domain
+# at distance 1 from the origin. These curvatures are calculated at the design point.
+# They are linked to the curvatures :math:`\kappa_i` of the failure domain by:
+#
+# .. math::
+#
+# \kappa_i^0 = \beta_{HL} \kappa_i
#
-# The SORM approximate uses an osculating paraboloid instead of the half-space delimited by the
-# tangent at the design point. In this case it is a simple parabola we can obtain through Taylor expansion at the design point.
-# However, in the general case one has to manipulate the gradient and the hessian in the
-# standard space which is cumbersome.
+# The Breitung approximation is valid for :math:`\beta_{HL} \rightarrow +\infty` and is defined by :
#
+# .. math::
+#
+# P_{SORM, Breitung} \approx E(-\beta_{HL}) \prod_{i=1}^{d-1} \dfrac{1}{\sqrt{1+\kappa_i^0}} &
+# \mbox{if } \vect{0} \notin \set{\widetilde{D}} \\
+# P_{SORM, Breitung} \approx E(+\beta_{HL}) \prod_{i=1}^{d-1} \dfrac{1}{\sqrt{1+\kappa_i^0}} &
+# \mbox{if } \vect{0} \in \set{\widetilde{D}}
+#
+# and approximates the boundary by the osculating paraboloid at the design point.
+#
+# Note that the term :math:`\kappa_i^0` does not depend on :math:`\beta_{HL}`.
# %%
-# We need the value of the second derivative of the failure boundary function at the design point in
-# the standard space :
-u0 = [designPointStandardSpace[0]]
-d2u0 = failureBoundaryStandardSpace.getHessian().hessian(u0)
-print("abscissa of the design point u0 = ", u0[0])
-print("value of the hessian of the failure boundary at u0 = ", d2u0[0, 0, 0])
+# In this example, we can easily implement the boundary of the failure domain in the
+# physical space, using the function :math:`h` defined in :eq:`defH`.
+#
+# In the standard space, the boundary is defined by the composed function
+# :math:`z_1 \mapsto h \circ T_1^{-1}(z_1)`.
+failure_boundary_physical_space = ot.SymbolicFunction(["x"], ["10.0 / x"])
+failure_boundary_standard_space = ot.ComposedFunction(
+ failure_boundary_physical_space, inverse_transform_X1
+)
+# %%
+# We need the value of the second derivative of the failure boundary function
+# at the abscissa of the design point in the standard space:
+z1_star = [design_point_standard_space[0]]
+dz1_star = failure_boundary_standard_space.getGradient().gradient(z1_star)
+d2z1_star = failure_boundary_standard_space.getHessian().hessian(z1_star)
+print("first component of the design point = ", z1_star[0])
+print("second component of the design point = ", failure_boundary_standard_space(z1_star)[0])
+print("value of the hessian of the failure boundary at this abscissa= ", d2z1_star[0, 0, 0])
# %%
-# In the standard space the equation of the osculating parabola :math:`\mathcal{P}_{u_0}(x)` at :math:`u_0` is given by
+# In the standard space, the osculating parabola :math:`\mathcal{P}_{\vect{z}^*}`
+# at :math:`\vect{z}^*` is the graph of the function defined by:
#
# .. math::
#
-# \mathcal{P}_{u_0}(x) = h \circ T^{-1} (u_0) + \frac{d}{dx} (h \circ T^{-1})(u_0) (x-u_0) + \frac{1}{2} \frac{d^2}{dx^2} (h \circ T^{-1})(u_0) (x-u_0)^2
+# z_1 \mapsto h \circ T_1^{-1} (z_1^*) + \frac{d}{du_1} (h \circ T_1^{-1})(z_1^*) (z_1-z_1^*) +
+# \frac{1}{2} \frac{d^2}{dz_1^2} (h \circ T_1^{-1})(z_1^*) (z_1-z_1^*)^2
#
-x = np.linspace(1.1, 5.0, 100)
+z = np.linspace(1.1, 4.0, 100)
parabola = (
- failureBoundaryStandardSpace(u0)[0]
- + du0[0, 0] * (x - u0)
- + 0.5 * d2u0[0, 0, 0] * (x - u0) ** 2
+ failure_boundary_standard_space(z1_star)[0]
+ + dz1_star[0, 0] * (z - z1_star)
+ + 0.5 * d2z1_star[0, 0, 0] * (z - z1_star) ** 2
)
-curveParabola = ot.Curve(x, parabola, r"$\mathcal{P}_{u_0}$ (SORM)")
-curveParabola.setLineStyle("dashed")
-curveParabola.setColor("orange")
-graphStandardSpace.add(curveParabola)
-view = otv.View(graphStandardSpace)
+curve_parabola = ot.Curve(z, parabola, r"$\mathcal{P}_{z^*}$ (SORM)")
+curve_parabola.setLineStyle("dashed")
+curve_parabola.setColor("orange")
+graph_standard_space.add(curve_parabola)
+graph_standard_space.setLegendPosition('topright')
+view = otv.View(graph_standard_space)
# %%
# The next step is to estimate the principal curvatures of the osculating paraboloid.
#
-# For any regular function :math:`g` the curvature :math:`\kappa(x_0)` at the point :math:`x_0` in cartesian coordinates reads as
+# For any regular function :math:`\ell: \Rset \rightarrow \Rset` the curvature :math:`\kappa(x)` at the point :math:`x` in
+# cartesian coordinates reads as:
#
# .. math::
#
-# \kappa(x_0) = \frac{g''(x_0)}{(1+[g'(x_0)]^2)^{3/2}}.
+# \kappa(x) = \frac{\ell''(x)}{(1+[\ell'(x)]^2)^{3/2}}.
#
-# For the oscillating parabola of concern we use the gradient and hessian previously computed :
+# For the oscillating parabola of concern we use the gradient and hessian previously computed:
#
-curvature = (d2u0[0, 0, 0]) / (1 + (du0[0, 0]) ** 2) ** (3 / 2)
+curvature = (d2z1_star[0, 0, 0]) / (1 + (dz1_star[0, 0]) ** 2) ** (3 / 2)
print("Curvature (analytic formula) = ", curvature)
# %%
# We build the SORM algorithm and run it :
-algoSORM = ot.SORM(solver, event, distX.getMean())
-algoSORM.run()
+algo_SORM = ot.SORM(solver, event, dist_X.getMean())
+algo_SORM.run()
# %%
-# The SORM result is obtained with the `getResult` method :
-resultSORM = algoSORM.getResult()
+# The SORM result is obtained with the `getResult` method:
+result_SORM = algo_SORM.getResult()
# %%
# The principal curvatures of the osculating paraboloid at the design point is obtained by the
-# `getSortedCurvatures` method :
-print("Curvature (estimated) = ", resultSORM.getSortedCurvatures()[1])
+# `getSortedCurvatures` method:
+print("Curvature (library) = ", result_SORM.getSortedCurvatures()[1])
# %%
-# Once the curvature is obtained there are several ways of approximating the failure probability :math:`P_f`.
+# Once the curvature is computed, there are several ways of approximating the failure probability :math:`P_f`.
# The library implements the Breitung, Hohenbichler and Tvedt estimates.
-#
-# For instance, the Breitung approximation gives
-#
-# .. math::
-#
-# P_f \approx E(\beta_{HL}) \frac{1}{\sqrt{1+\beta_{HL}\kappa}}
-#
-coeff = (1.0 + betaHL * curvature) ** (-0.5)
-pf = (1.0 - ot.Normal().computeCDF(betaHL)) * coeff
-print("SORM : Pf = ", pf)
+# We detail here the calculus of the Breitung approximation.
+coeff = (1.0 + beta_HL * curvature) ** (-0.5)
+if isOriginFail:
+ pf_SORM = (normal.computeCDF(beta_HL)) * coeff
+else:
+ pf_SORM = (normal.computeCDF(-beta_HL)) * coeff
+print("SORM : Pf_SORM = ", pf_SORM)
# %%
# We can compare with the different estimators:
-pfBreitung = resultSORM.getEventProbabilityBreitung()
-pfHohenbichler = resultSORM.getEventProbabilityHohenbichler()
-pfTvedt = resultSORM.getEventProbabilityTvedt()
+pf_Breitung = result_SORM.getEventProbabilityBreitung()
+pf_Hohenbichler = result_SORM.getEventProbabilityHohenbichler()
+pf_Tvedt = result_SORM.getEventProbabilityTvedt()
-print("Probability of failure (SORM Breintung) Pf = ", pfBreitung)
-print("Probability of failure (SORM Hohenbichler) Pf = ", pfHohenbichler)
-print("Probability of failure (SORM Tvedt) Pf = ", pfTvedt)
+print("Probability of failure (SORM Breintung) Pf = ", pf_Breitung)
+print("Probability of failure (SORM Hohenbichler) Pf = ", pf_Hohenbichler)
+print("Probability of failure (SORM Tvedt) Pf = ", pf_Tvedt)
# %%
# Display all figures
diff --git a/python/doc/examples/reliability_sensitivity/sensitivity_analysis/plot_hsic_estimators_ishigami.py b/python/doc/examples/reliability_sensitivity/sensitivity_analysis/plot_hsic_estimators_ishigami.py
index 9af72bdec2..183073b428 100644
--- a/python/doc/examples/reliability_sensitivity/sensitivity_analysis/plot_hsic_estimators_ishigami.py
+++ b/python/doc/examples/reliability_sensitivity/sensitivity_analysis/plot_hsic_estimators_ishigami.py
@@ -13,7 +13,6 @@
# HSIC estimators rely on a reproducing kernel of a Hilbert space. We can use them to compute sensitivity
# indices. We present the methods on the :ref:`Ishigami function`.
-
# %%
# Definition of the model
# -----------------------
@@ -148,7 +147,7 @@
dist2criticalDomain = ot.DistanceToDomainFunction(criticalDomain)
# %%
-# We define the parameters in our function from the output sample
+# We define the empirical parameter values in our function from the output sample
s = 0.1 * Y.computeStandardDeviation()[0]
# %%
@@ -158,6 +157,11 @@
phi = ot.ParametricFunction(f, [1], [s])
filterFunction = ot.ComposedFunction(phi, dist2criticalDomain)
+# %%
+# We modify the output covariance kernel so as to adapt it to the filtered output
+Y_filtered = filterFunction(Y)
+outputCovariance.setScale(Y_filtered.computeStandardDeviation())
+covarianceModelCollection[-1] = outputCovariance
# %%
# We choose an unbiased estimator
@@ -214,6 +218,11 @@
# Here the weight function is the filter function we used previously.
weightFunction = filterFunction
+# %%
+# We revert to the covariance kernel associated to the unfiltered output
+outputCovariance.setScale(Y.computeStandardDeviation())
+covarianceModelCollection[-1] = outputCovariance
+
# %%
# We have to select a biased -but asymptotically unbiased- estimator
estimatorType = ot.HSICVStat()
diff --git a/python/doc/math_notations.sty b/python/doc/math_notations.sty
index 4468fe6546..eaee80b7cb 100644
--- a/python/doc/math_notations.sty
+++ b/python/doc/math_notations.sty
@@ -29,11 +29,15 @@
\newcommand{\outputRV}{\vect{Y}} % The output random vector of the model
\newcommand{\inputMeasure}{\mu_{\inputRV}} % The distribution of the input random vector of the model
\newcommand{\outputMeasure}{\mu_{\outputRV}} % The distribution of the output random vector of the model
-% Realization of the random vetor
-\newcommand{\inputReal}{\vect{x}} % An observation of the input random vector of the model
-\newcommand{\outputReal}{\vect{y}} % An observation of the output random vector of the model
\newcommand{\standardRV}{\vect{Z}} % The standard random vector (e.g. for polynomial chaos expansion)
\newcommand{\RVU}{\vect{U}}
+
+\newcommand{\pdf}{\inputMeasure(\inputReal)}
+
+% Realization of the random vector
+\newcommand{\inputReal}{\vect{x}} % An observation of the input random vector of the model
+\newcommand{\outputReal}{\vect{y}} % An observation of the output random vector of the model
+
\newcommand{\standardDim}{d_Z} % The dimension of the standard random vector
\newcommand{\dimU}{d_U}
\newcommand{\standardReal}{\vect{z}} % An observation of the standard random vector
@@ -41,17 +45,15 @@
\newcommand{\supp}[1] {\operatorname{supp}\left(#1\right)} % The support of a distribution
\newcommand{\sampleSize}{n} % The sample size
-\newcommand{\metamodel}{{\widehat{\model}}}
-
\newcommand{\scalarproduct}[2]{\left\langle #1, #2 \right\rangle}
% A set
\newcommand{\set}[1]{\mathcal{#1}}
-% The set of standard input random observations
+% The set of standard input variables
\newcommand{\standardInputSpace}{\set{Z}}
-% The set of physical input random observations
+% The set of physical input variables
\newcommand{\physicalInputSpace}{\set{X}}
-% The set of physical output random observations
+% The set of physical output variables
\newcommand{\physicalOutputSpace}{\set{Y}}
% The probability density function of the input random vector
@@ -71,7 +73,6 @@
\newcommand{\Econd}[2]{{\mathbb E}_{#1}\left[ #2 \right]}
\newcommand{\Prob}[1]{{\mathbb P}\left( #1 \right)}
\newcommand{\ProbCond}[2]{{\mathbb P}_{#1}\left( #2 \right)}
-\newcommand{\pdf}{f_{\un{X}}(\un{x})}
\newcommand{\ech}{\left\{ x_1, \, \dots\,, x_N \right\}}
\newcommand{\matcov} {\mathbf C}
\newcommand{\matcor} {\mathbf R}
diff --git a/python/doc/pyplots/GaussianProcessFitter.py b/python/doc/pyplots/GaussianProcessFitter.py
new file mode 100644
index 0000000000..4ba80a0e83
--- /dev/null
+++ b/python/doc/pyplots/GaussianProcessFitter.py
@@ -0,0 +1,33 @@
+import openturns as ot
+import openturns.experimental as otexp
+from openturns.viewer import View
+
+f = ot.SymbolicFunction(["x"], ["x * sin(x)"])
+sampleX = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0], [7.0], [8.0]]
+sampleY = f(sampleX)
+basis = ot.Basis(
+ [ot.SymbolicFunction(["x"], ["1"])]
+)
+covarianceModel = ot.SquaredExponential([1.0])
+covarianceModel.setActiveParameter([])
+fitter_algo = otexp.GaussianProcessFitter(sampleX, sampleY, covarianceModel, basis)
+fitter_algo.run()
+fitter_result = fitter_algo.getResult()
+
+graph = f.draw(0.0, 10.0)
+graph.add(fitter_result.getMetaModel().draw(0.0, 10.0))
+graph.add(ot.Cloud(sampleX, sampleY))
+
+algo = otexp.GaussianProcessRegression(fitter_result)
+algo.run()
+result = algo.getResult()
+fhat = result.getMetaModel()
+
+graph.add(fhat.draw(0.0, 10.0))
+graph.setColors(["blue", "red", "black", "pink"])
+graph.setLegends(["model", "GP Fitter", "sample", "GP Regression"])
+graph.setLegendPosition("upper left")
+graph.setTitle(r"$y(x)=x * sin(x)$")
+graph.setYTitle(r"$f$")
+graph.setXTitle(r"$x$")
+View(graph, figure_kw={"figsize": (8, 4)})
diff --git a/python/doc/pyplots/GaussianProcessRegression.py b/python/doc/pyplots/GaussianProcessRegression.py
new file mode 100644
index 0000000000..bf94c50231
--- /dev/null
+++ b/python/doc/pyplots/GaussianProcessRegression.py
@@ -0,0 +1,45 @@
+import openturns as ot
+import openturns.experimental as otexp
+from openturns.viewer import View
+
+f = ot.SymbolicFunction(["x"], ["x * sin(x)"])
+sampleX = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0], [7.0], [8.0]]
+sampleY = f(sampleX)
+basis = ot.Basis(
+ [ot.SymbolicFunction(["x"], ["1"])]
+)
+covarianceModel = ot.SquaredExponential([1.0])
+covarianceModel.setActiveParameter([])
+fitter_algo = otexp.GaussianProcessFitter(sampleX, sampleY, covarianceModel, basis)
+fitter_algo.run()
+algo = otexp.GaussianProcessRegression(fitter_algo.getResult())
+algo.run()
+result = algo.getResult()
+fhat = result.getMetaModel()
+
+graph = f.draw(0.0, 10.0)
+graph.add(fhat.draw(0.0, 10.0))
+graph.add(ot.Cloud(sampleX, sampleY))
+graph.setColors(["blue", "red", "black"])
+
+# Conditional variance
+gccc = otexp.GaussianProcessConditionalCovariance(result)
+# Vriance per marginal & extract variance component
+newSampleX = ot.Sample.BuildFromPoint([0.0, 0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5])
+newSampleX.add(sampleX)
+newSampleX = newSampleX.sort()
+newSampleY = fhat(newSampleX)
+conditionalVariance = gccc.getConditionalMarginalVariance(newSampleX) + [1e-15]
+conditionalSigma = ot.SymbolicFunction("x", "sqrt(x)")(conditionalVariance)
+alpha = 0.1
+bilateralCI = ot.Normal().computeBilateralConfidenceInterval(1.0 - alpha)
+lowerCI = newSampleY + conditionalSigma * bilateralCI.getLowerBound()
+upperCI = newSampleY + conditionalSigma * bilateralCI.getUpperBound()
+boundsPoly = ot.Polygon.FillBetween(newSampleX, lowerCI, upperCI)
+boundsPoly.setColor("green")
+boundsPoly_legend = "%d%% C.I." % ((1.0 - alpha) * 100)
+graph.add(boundsPoly)
+graph.setLegends(["model", "meta model", "sample", boundsPoly_legend])
+graph.setLegendPosition("upper left")
+graph.setTitle("y(x)=x * sin(x)")
+View(graph, figure_kw={"figsize": (8, 4)})
diff --git a/python/doc/theory/meta_modeling/cross_validation.rst b/python/doc/theory/meta_modeling/cross_validation.rst
index d346aaa680..8c1e36ebf3 100644
--- a/python/doc/theory/meta_modeling/cross_validation.rst
+++ b/python/doc/theory/meta_modeling/cross_validation.rst
@@ -33,9 +33,9 @@ in terms of the weighted :math:`L^2`-norm:
.. math::
\operatorname{MSE} \left(\metaModel\right)
& = \left\| \model(\inputReal) - \metaModel(\inputReal) \right\|_{L^2(\physicalInputSpace)}^2 \\
- & = \int_{\physicalInputSpace} \left( \model(\inputReal) - \metaModel(\inputReal) \right)^2 \inputProbabilityDensityFunction(\inputReal) d\inputReal
+ & = \int_{\physicalInputSpace} \left( \model(\inputReal) - \metaModel(\inputReal) \right)^2 \pdf d\inputReal
-where :math:`\inputProbabilityDensityFunction` is the probability density function
+where :math:`\inputMeasure` is the probability density function
of the random vector :math:`\inputRV`.
In this section, we present the cross-validation of linear least squares
models, as presented in [wang2012]_ page 485.
diff --git a/python/doc/theory/reliability_sensitivity/form_approximation.rst b/python/doc/theory/reliability_sensitivity/form_approximation.rst
index b399f6f29f..43d2c29fce 100644
--- a/python/doc/theory/reliability_sensitivity/form_approximation.rst
+++ b/python/doc/theory/reliability_sensitivity/form_approximation.rst
@@ -69,7 +69,7 @@ can be obtained exactly, thanks to the rotation invariance of the standard distr
\right.
where :math:`\beta` is the Hasofer-Lind reliability index, defined as the distance of the design point
-:math:`P^*` to the origin of the standard space and :math:`E` the marginal cumulative density function
+:math:`P^*` to the origin of the standard space and :math:`E` the marginal cumulative distribution function
along any direction of
the spherical distribution in the standard space (refer to :ref:`nataf_transformation` and
:ref:`rosenblatt_transformation`).
diff --git a/python/doc/theory/reliability_sensitivity/sorm_approximation.rst b/python/doc/theory/reliability_sensitivity/sorm_approximation.rst
index 750ad58f0e..401f92e03f 100644
--- a/python/doc/theory/reliability_sensitivity/sorm_approximation.rst
+++ b/python/doc/theory/reliability_sensitivity/sorm_approximation.rst
@@ -38,7 +38,7 @@ Let :math:`(\kappa_i)_{1 \leq i \leq \inputDim-1}` the :math:`(n-1)` main curva
point in the standard space.
Several approximations are available,
detailed here in the case where the origin of the standard
-space does not belong to the failure domain:
+space does not belong to the failure domain.
**Breitung’s formula** is an asymptotic result. The
usual formula used in the normal standard space has been generalized
@@ -54,7 +54,7 @@ density function of the spherical distributions in the standard space:
where :math:`\Phi` is the cumulative distribution function of the standard 1D normal
-distribution and :math:`(\kappa_1^0, \dots, \kappa_\inputDim^0)` the main curvatures of the
+distribution and :math:`(\kappa_1^0, \dots, \kappa_{\inputDim-1}^0)` the main curvatures of the
homothetic of the failure domain at distance 1 from the origin.
**Hohenbichler’s formula** is an approximation of :eq:`PfSORM_B`:
@@ -62,7 +62,7 @@ homothetic of the failure domain at distance 1 from the origin.
.. math::
:label: PfSORM_HB
- P_{Hohenbichler} = \Phi(-\beta) \prod_{i=1}^{d-1} \left( 1+\frac{\phi(-\beta)}{\beta \Phi(-\beta)}\kappa_i^0
+ P_{Hohenbichler} = \Phi(-\beta) \prod_{i=1}^{\inputDim-1} \left( 1+\frac{\phi(-\beta)}{\beta \Phi(-\beta)}\kappa_i^0
\right) ^{-1/2}
Recording to the Mill's ratio, :math:`\frac{\phi(-\beta)}{\beta \Phi(-\beta)}` tends to 1 when :math:`\beta` tends
diff --git a/python/doc/user_manual/functions.rst b/python/doc/user_manual/functions.rst
index 09f6ce9e94..67762115e3 100644
--- a/python/doc/user_manual/functions.rst
+++ b/python/doc/user_manual/functions.rst
@@ -194,12 +194,10 @@ Differential equation solvers
RungeKutta
Fehlberg
-Special constants & functions
-=============================
-
Special functions
------------------
-Special functions are implemented as C++ static methods and are hence casted
+=================
+
+Special functions are implemented as C++ static methods and are available
as Python functions.
.. autosummary::
@@ -265,6 +263,18 @@ as Python functions.
.. _coupling_tools:
+
+Special constants
+=================
+
+- SpecFunc.MaxScalar : inf
+- SpecFunc.ActualMaxScalar : maximum float value
+- SpecFunc.LogMaxScalar : log of SpecFunc.MaxScalar (inf)
+- SpecFunc.MinScalar : minimum positive float value
+- SpecFunc.LowestScalar : -inf
+- SpecFunc.LogMinScalar : log of SpecFunc.MinScalar
+
+
External code coupling
======================
diff --git a/python/doc/user_manual/response_surface/kriging.rst b/python/doc/user_manual/response_surface/kriging.rst
index cd76579ffd..c2fe61f39a 100644
--- a/python/doc/user_manual/response_surface/kriging.rst
+++ b/python/doc/user_manual/response_surface/kriging.rst
@@ -16,6 +16,23 @@ Kriging algorithm
:template: class.rst_t
KrigingResult
+
+Gaussian Process Regression
+===========================
+
+.. autosummary::
+ :toctree: _generated/
+ :template: classWithPlot.rst_t
+
+ experimental.GaussianProcessRegression
+ experimental.GaussianProcessFitter
+
+ :template: class.rst_t
+ experimental.GaussianProcessRegressionResult
+ experimental.GaussianProcessFitterResult
+ experimental.GaussianProcessConditionalCovariance
+
+
Construction of the regression basis
====================================
diff --git a/python/src/BoxCoxTransform_doc.i.in b/python/src/BoxCoxTransform_doc.i.in
index d1693e7d70..c98552ba14 100644
--- a/python/src/BoxCoxTransform_doc.i.in
+++ b/python/src/BoxCoxTransform_doc.i.in
@@ -4,9 +4,9 @@
Refer to :ref:`boxcox_transformation`.
Available constructors:
- BoxCoxTransform(*lambdaVect, shiftVect = [0]*)
+ BoxCoxTransform(*lambdaVect, shiftVect*)
- BoxCoxTransform(*lambda, shift=0*)
+ BoxCoxTransform(*lambda, shift*)
Parameters
----------
diff --git a/python/src/CMakeLists.txt b/python/src/CMakeLists.txt
index 99ab535387..87257f9f2d 100644
--- a/python/src/CMakeLists.txt
+++ b/python/src/CMakeLists.txt
@@ -1031,6 +1031,11 @@ ot_add_python_module(experimental experimental_module.i
RankSobolSensitivityAlgorithm.i RankSobolSensitivityAlgorithm_doc.i.in
CubaIntegration.i CubaIntegration_doc.i.in
ExperimentIntegration.i ExperimentIntegration_doc.i.in
+ GaussianProcessFitterResult.i GaussianProcessFitterResult_doc.i.in
+ GaussianProcessFitter.i GaussianProcessFitter_doc.i.in
+ GaussianProcessRegressionResult.i GaussianProcessRegressionResult_doc.i.in
+ GaussianProcessRegression.i GaussianProcessRegression_doc.i.in
+ GaussianProcessConditionalCovariance.i GaussianProcessConditionalCovariance_doc.i.in
)
set (OPENTURNS_PYTHON_MODULES ${OPENTURNS_PYTHON_MODULES} PARENT_SCOPE) # for the docstring test
diff --git a/python/src/CleaningStrategy_doc.i.in b/python/src/CleaningStrategy_doc.i.in
index fabfd10054..f2d47af70b 100644
--- a/python/src/CleaningStrategy_doc.i.in
+++ b/python/src/CleaningStrategy_doc.i.in
@@ -12,11 +12,9 @@ Parameters
orthogonalBasis : :class:`~openturns.OrthogonalBasis`
An OrthogonalBasis.
maximumDimension : positive int
- Maximum index that can be used by the :class:`~openturns.EnumerateFunction`
- to determine the last term of the basis.
-maximumSize : positive int
- Parameter that characterizes the cleaning strategy. It represents the
- number of efficient coefficients of the basis.
+ Maximum index of the basis function that can be used by the :class:`~openturns.EnumerateFunction`.
+maximumSize : positive int, :math:`maximumSize \leq maximumDimension`
+ Maximum number of functions used in the meta model.
Its default value is the `CleaningStrategy-DefaultMaximumSize` key of
the :class:`~openturns.ResourceMap`.
significanceFactor : float
@@ -43,26 +41,24 @@ limit the chances of potential surrogate model overfitting.
Let *maximumDimension* be the number of coefficients in the full expansion
and let *maximumSize* be the maximum number of coefficients defined
-by the user.
-On output, at most the minimum of *maximumDimension* and *maximumSize*
+by the user. On output, at most *maximumSize*
coefficients are selected.
Let :math:`\epsilon` be the value of the *significanceFactor*.
The method proceeds as follows:
-- Generate an initial PC basis made of the *maximumDimension* first polynomials
+- Generate an initial PC basis made of the *maximumSize* first functions
(according to the adopted :class:`~openturns.EnumerateFunction`), or
- equivalently an initial set of indices :math:`\cK = \{0, \ldots, \textrm{maximumDimension} - 1\}`.
+ equivalently an initial set of indices :math:`\cK = \{0, \ldots, maximumSize - 1\}`.
-- Discard from the basis any polynomial :math:`\Psi_j` associated with
- an insignificant coefficient, i.e. such that:
+- Discard from the basis any function :math:`\Psi_j` associated with
+ an insignificant coefficient :math:`a_j`, i.e. such that:
.. math::
- |a_j| \leq \epsilon \times \max_{ k \in \cK, k \neq 0 } |a_k|.
+ |a_j| \leq \epsilon \max_{ k \in \cK, k \neq 0 } |a_k|.
-- Add the next basis term :math:`\Psi_{k+1}` to the current basis :math:`\cK`.
-- Reiterate the procedure until the minimum of *maximumDimension* and
- *maximumSize* has been reached.
+- Add the next function to the current basis :math:`\cK` according to the :class:`~openturns.EnumerateFunction` used.
+- Reiterate the procedure until the first *maximumDimension* functions have been considered.
Examples
@@ -105,7 +101,7 @@ index : int
// ---------------------------------------------------------------------
%feature("docstring") OT::CleaningStrategy::getMaximumSize
-"Accessor to the maximum size of the orthogonal basis.
+"Accessor to the maximum number of functions used.
Returns
-------
@@ -133,7 +129,7 @@ setSignificanceFactor"
// ---------------------------------------------------------------------
%feature("docstring") OT::CleaningStrategy::setMaximumSize
-"Accessor to the maximum size of the orthogonal basis.
+"Accessor to the maximum number of functions used.
Parameters
----------
diff --git a/python/src/FunctionalChaosResult_doc.i.in b/python/src/FunctionalChaosResult_doc.i.in
index 6a58e2b03b..7df3b028f5 100644
--- a/python/src/FunctionalChaosResult_doc.i.in
+++ b/python/src/FunctionalChaosResult_doc.i.in
@@ -304,5 +304,5 @@ involvesModelSelection: bool
Returns
-------
residualsSample : :class:`~openturns.Sample`
- The sample of residuals :math:`r_{ji} = y_{ji} - \metamodel_i(\vect{x^{(j)}})`
+ The sample of residuals :math:`r_{ji} = y_{ji} - \metaModel_i(\vect{x^{(j)}})`
for :math:`i = 1, ..., n_Y` and :math:`j = 1, ..., n`."
diff --git a/python/src/GaussianProcessConditionalCovariance.i b/python/src/GaussianProcessConditionalCovariance.i
new file mode 100644
index 0000000000..dec38c54f1
--- /dev/null
+++ b/python/src/GaussianProcessConditionalCovariance.i
@@ -0,0 +1,11 @@
+// SWIG file GaussianProcessConditionalCovariance.i
+
+%{
+#include "openturns/GaussianProcessConditionalCovariance.hxx"
+%}
+
+%include GaussianProcessConditionalCovariance_doc.i
+
+%copyctor OT::GaussianProcessConditionalCovariance;
+
+%include openturns/GaussianProcessConditionalCovariance.hxx
diff --git a/python/src/GaussianProcessConditionalCovariance_doc.i.in b/python/src/GaussianProcessConditionalCovariance_doc.i.in
new file mode 100644
index 0000000000..133edf87b2
--- /dev/null
+++ b/python/src/GaussianProcessConditionalCovariance_doc.i.in
@@ -0,0 +1,190 @@
+%feature("docstring") OT::GaussianProcessConditionalCovariance
+"Conditional covariance post processing of a Gaussian Process Regression result.
+
+Parameters
+----------
+gprResult : :class:`~openturns.experimental.GaussianProcessRegressionResult`
+ The result class of a Gaussian process regression.
+
+
+Notes
+-----
+The class provides services around conditional covariance of a Gaussian Process Regression
+
+
+Examples
+--------
+Create the model :math:`\cM: \Rset \mapsto \Rset` and the samples:
+
+>>> import openturns as ot
+>>> from openturns.experimental import GaussianProcessRegression
+>>> from openturns.experimental import GaussianProcessConditionalCovariance
+>>> trend = ot.SymbolicFunction(['x'], ['1'])
+>>> sampleX = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]]
+>>> sampleY = trend(sampleX)
+
+Create the algorithm:
+
+>>> covarianceModel = ot.SquaredExponential([1.0])
+>>> covarianceModel.setActiveParameter([])
+
+>>> algo = GaussianProcessRegression(sampleX, sampleY, covarianceModel, trend)
+>>> algo.run()
+>>> result = algo.getResult()
+>>> condCov = GaussianProcessConditionalCovariance(result)
+>>> c = condCov([1.1])
+
+"
+
+
+// ---------------------------------------------------------------------
+
+%feature("docstring") OT::GaussianProcessConditionalCovariance::getConditionalMean
+"Compute the conditional mean of the Gaussian process on a point or a sample of points.
+
+Parameters
+----------
+x : sequence of float
+ The point :math:`\vect{x}` where the conditional mean of the output has to be evaluated.
+sampleX : 2-d sequence of float
+ The sample :math:`(\vect{\xi}_1, \dots, \vect{\xi}_M)` where the conditional mean of the output has to be evaluated (*M* can be equal to 1).
+
+Returns
+-------
+condMean : :class:`~openturns.Point`
+ The conditional mean :math:`\Expect{\vect{Y}(\omega, \vect{x})\, | \, \cC}` at point :math:`\vect{x}`.
+ Or the conditional mean matrix at the sample :math:`(\vect{\xi}_1, \dots, \vect{\xi}_M)`:
+
+ .. math::
+
+ \left(
+ \begin{array}{l}
+ \Expect{\vect{Y}(\omega, \vect{\xi}_1)\, | \, \cC}\\
+ \dots \\
+ \Expect{\vect{Y}(\omega, \vect{\xi}_M)\, | \, \cC}
+ \end{array}
+ \right)
+
+"
+
+
+// ---------------------------------------------------------------------
+
+%feature("docstring") OT::GaussianProcessConditionalCovariance::getConditionalCovariance
+"Compute the conditional covariance of the Gaussian process on a point (or several points).
+
+Parameters
+----------
+x : sequence of float
+ The point :math:`\vect{x}` where the conditional covariance of the output has to be evaluated.
+sampleX : 2-d sequence of float
+ The sample :math:`(\vect{\xi}_1, \dots, \vect{\xi}_M)` where the conditional covariance of the output has to be evaluated (*M* can be equal to 1).
+
+Returns
+-------
+condCov : :class:`~openturns.CovarianceMatrix`
+ The conditional covariance :math:`\Cov{\vect{Y}(\omega, \vect{x})\, | \, \cC}` at point :math:`\vect{x}`.
+ Or the conditional covariance matrix at the sample :math:`(\vect{\xi}_1, \dots, \vect{\xi}_M)`:
+
+ .. math::
+
+ \left(
+ \begin{array}{lcl}
+ \Sigma_{11} & \dots & \Sigma_{1M} \\
+ \dots \\
+ \Sigma_{M1} & \dots & \Sigma_{MM}
+ \end{array}
+ \right)
+
+ where :math:`\Sigma_{ij} = \Cov{\vect{Y}(\omega, \vect{\xi}_i), \vect{Y}(\omega, \vect{\xi}_j)\, | \, \cC}`."
+
+// ---------------------------------------------------------------------
+
+%feature("docstring") OT::GaussianProcessConditionalCovariance::getConditionalMarginalCovariance
+"Compute the conditional covariance of the Gaussian process on a point (or several points).
+
+Parameters
+----------
+x : sequence of float
+ The point :math:`\vect{x}` where the conditional marginal covariance of the output has to be evaluated.
+sampleX : 2-d sequence of float
+ The sample :math:`(\vect{\xi}_1, \dots, \vect{\xi}_M)` where the conditional marginal covariance of the output has to be evaluated (*M* can be equal to 1).
+
+Returns
+-------
+condCov : :class:`~openturns.CovarianceMatrix`
+ The conditional covariance :math:`\Cov{\vect{Y}(\omega, \vect{x})\, | \, \cC}` at point :math:`\vect{x}`.
+
+condCov : :class:`~openturns.CovarianceMatrixCollection`
+ The collection of conditional covariance matrices :math:`\Cov{\vect{Y}(\omega, \vect{\xi})\, | \, \cC}` at
+ each point of the sample :math:`(\vect{\xi}_1, \dots, \vect{\xi}_M)`:
+
+Notes
+-----
+In case input parameter is a of type :class:`~openturns.Sample`, each element of the collection corresponds to the conditional
+covariance with respect to the input learning set (pointwise evaluation of the getConditionalCovariance)."
+// ---------------------------------------------------------------------
+
+%feature("docstring") OT::GaussianProcessConditionalCovariance::getConditionalMarginalVariance
+"Compute the conditional variance of the Gaussian process on a point (or several points).
+
+Parameters
+----------
+x : sequence of float
+ The point :math:`\vect{x}` where the conditional variance of the output has to be evaluated.
+sampleX : 2-d sequence of float
+ The sample :math:`(\vect{\xi}_1, \dots, \vect{\xi}_M)` where the conditional variance of the output has to be evaluated (*M* can be equal to 1).
+marginalIndex : int
+ Marginal of interest (for multiple outputs).
+ Default value is 0
+marginalIndices : sequence of int
+ Marginals of interest (for multiple outputs).
+
+Returns
+-------
+var : float
+ Variance of interest.
+ float if one point (x) and one marginal of interest (x, marginalIndex)
+
+varPoint : sequence of float
+ The marginal variances
+
+
+Notes
+-----
+In case of fourth usage, the sequence of float is given as the concatenation of marginal variances
+for each point in sampleX."
+
+// ---------------------------------------------------------------------
+
+%feature("docstring") OT::GaussianProcessConditionalCovariance::operator()
+"Compute the conditional Gaussian distribution on a new point / sample conditionally to the observed paths.
+
+Parameters
+----------
+pt : sequence of float
+ The point :math:`\vect{x}` where the conditional distribution of the output has to be evaluated.
+sampleX : 2-d sequence of float
+ The sample :math:`(\vect{\xi}_1, \dots, \vect{\xi}_M)` where the conditional distribution of the output has to be evaluated (*M* can be equal to 1).
+
+Returns
+-------
+condDist : :class:`~openturns.Normal`
+ The conditional Gaussian distribution.
+
+Notes
+-----
+The conditional distribution :math:`\cN(\Expect{\vect{Y}}, \Cov{\vect{Y}})` has respectively conditional mean and covariance functions
+implemented in :meth:`getConditionalMean` (respectively :meth:`getConditionalCovariance`)."
+
+// ---------------------------------------------------------------------
+
+%feature("docstring") OT::GaussianProcessConditionalCovariance::getMetaModel
+"Accessor to the metamodel.
+
+Returns
+-------
+metaModel : :class:`~openturns.Function`
+ The meta model :math:`\tilde{\cM}: \Rset^d \rightarrow \Rset^p`.
+"
+
diff --git a/python/src/GaussianProcessFitter.i b/python/src/GaussianProcessFitter.i
new file mode 100644
index 0000000000..2cb2ad6221
--- /dev/null
+++ b/python/src/GaussianProcessFitter.i
@@ -0,0 +1,12 @@
+// SWIG file GaussianProcessFitter.i
+
+%{
+#include "openturns/GaussianProcessFitter.hxx"
+%}
+
+%include GaussianProcessFitter_doc.i
+
+%include openturns/GaussianProcessFitter.hxx
+
+namespace OT{ %extend GaussianProcessFitter { GaussianProcessFitter(const GaussianProcessFitter & other) { return new OT::GaussianProcessFitter(other); } } }
+
diff --git a/python/src/GaussianProcessFitterResult.i b/python/src/GaussianProcessFitterResult.i
new file mode 100644
index 0000000000..165641d959
--- /dev/null
+++ b/python/src/GaussianProcessFitterResult.i
@@ -0,0 +1,13 @@
+// SWIG file GaussianProcessFitterResult.i
+
+%{
+#include "openturns/GaussianProcessFitterResult.hxx"
+%}
+
+%include GaussianProcessFitterResult_doc.i
+
+%include openturns/GaussianProcessFitterResult.hxx
+
+namespace OT{ %extend GaussianProcessFitterResult { GaussianProcessFitterResult(const GaussianProcessFitterResult & other) { return new OT::GaussianProcessFitterResult(other); } } }
+
+
diff --git a/python/src/GaussianProcessFitterResult_doc.i.in b/python/src/GaussianProcessFitterResult_doc.i.in
new file mode 100644
index 0000000000..f067ad6f9c
--- /dev/null
+++ b/python/src/GaussianProcessFitterResult_doc.i.in
@@ -0,0 +1,198 @@
+%feature("docstring") OT::GaussianProcessFitterResult
+"Gaussian process fitter result.
+
+.. warning::
+ This class is experimental and likely to be modified in future releases.
+ To use it, import the ``openturns.experimental`` submodule.
+
+Parameters
+----------
+inputSample, outputSample : :class:`~openturns.Sample`
+ The samples :math:`(\vect{x}_k)_{1 \leq k \leq \sampleSize} \in \Rset^{\inputDim}` and :math:`(\vect{y}_k)_{1 \leq k \leq \sampleSize}\in \Rset^{\outputDim}`.
+metaModel : :class:`~openturns.Function`
+ The meta model: :math:`\tilde{\cM}: \Rset^{\inputDim} \rightarrow \Rset^{\outputDim}`, defined in :eq:metaModelGPF.
+residuals : :class:`~openturns.Point`
+ The residual errors.
+relativeErrors : :class:`~openturns.Point`
+ The relative errors.
+regressionMatrix : :class:`~openturns.Matrix`
+ The regression matrix, e.g the evaluation of the basis function
+ upon the input design sample.
+basis : :class:`~openturns.Basis`
+ Functional basis of size :math:`b` : :math:`(\varphi^l: \Rset^{\inputDim} \rightarrow \Rset^{\outputDim})` for each :math:`l \in [1, b]`.
+ Its size should be equal to zero if the trend is not estimated.
+trendCoef : sequence of float
+ The trend coefficients vectors :math:`(\vect{\alpha}^1, \dots, \vect{\alpha}^{\outputDim})` stored as a Point.
+covarianceModel : :class:`~openturns.CovarianceModel`
+ Covariance function of the Gaussian process with its optimized parameters.
+optimalLogLikelihood : float
+ The maximum log-likelihood corresponding to the model.
+linAlgMethod : int
+ The used linear algebra method to fit the model:
+
+ - otexp.GaussianProcessFitterResult.LAPACK or 0: using `LAPACK` to fit the model,
+
+ - otexp.GaussianProcessFitterResult.HMAT or 1: using `HMAT` to fit the model.
+
+Notes
+-----
+The structure is usually created by the method `run` of a :class:`~openturns.experimental.GaussianProcessFitter`, and obtained thanks to the `getResult()` method.
+
+The meta model :math:`\tilde{\cM}: \Rset^{\inputDim} \rightarrow \Rset^{\outputDim}` is defined by:
+
+.. math::
+ :label: metaModelGPF
+
+ \tilde{\cM}(\vect{x}) = \left(
+ \begin{array}{l}
+ \mu_1(\vect{x}) \\
+ \dots \\
+ \mu_p(\vect{x})
+ \end{array}
+ \right)
+
+where :math:`\mu_\ell(\vect{x}) = \sum_{j=1}^{b} \alpha_j^\ell \varphi_j^\ell(\vect{x})` and
+:math:`\varphi_j^\ell: \Rset^{\inputDim} \rightarrow \Rset` are the trend functions
+(the :math:`\ell-th` marginal of :math:`\varphi(x)`).
+
+
+.. math::
+ :label: metaModelWithTGPF
+
+ \tilde{\cM}(\vect{x}) = \left(
+ \begin{array}{l}
+ \mu_1\circ T(\vect{x}) \\
+ \dots \\
+ \mu_p\circ T(\vect{x})
+ \end{array}
+ \right)
+
+Examples
+--------
+Create the model :math:`\cM: \Rset \mapsto \Rset` and the samples:
+
+>>> import openturns as ot
+>>> import openturns.experimental as otexp
+>>> g = ot.SymbolicFunction(['x'], ['x * sin(x)'])
+>>> sampleX = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]]
+>>> sampleY = g(sampleX)
+
+Create the algorithm:
+
+>>> basis = ot.Basis([ot.SymbolicFunction(['x'], ['x']), ot.SymbolicFunction(['x'], ['x^2'])])
+>>> covarianceModel = ot.GeneralizedExponential([2.0], 2.0)
+>>> algo = otexp.GaussianProcessFitter(sampleX, sampleY, covarianceModel, basis)
+>>> algo.run()
+
+Get the result:
+
+>>> result = algo.getResult()
+
+Get the meta model:
+
+>>> metaModel = result.getMetaModel()
+>>> graph = metaModel.draw(0.0, 7.0)
+>>> cloud = ot.Cloud(sampleX, sampleY)
+>>> cloud.setPointStyle('fcircle')
+>>> graph = ot.Graph()
+>>> graph.add(cloud)
+>>> graph.add(g.draw(0.0, 7.0))
+"
+
+// ---------------------------------------------------------------------
+
+%feature("docstring") OT::GaussianProcessFitterResult::getBasis
+"Accessor to the basis.
+
+Returns
+-------
+basis : :class:`~openturns.Basis`
+ Functional basis of size :math:`b` : :math:`(\varphi^\ell: \Rset^{\inputDim} \rightarrow \Rset^{\outputDim})`
+ for each :math:`l \in [1, b]`.
+
+Notes
+-----
+If the trend is not estimated, the basis is empty. "
+
+
+// ---------------------------------------------------------------------
+
+%feature("docstring") OT::GaussianProcessFitterResult::getCovarianceModel
+"Accessor to the covariance model.
+
+Returns
+-------
+covModel : :class:`~openturns.CovarianceModel`
+ The covariance model of the Gaussian process *W*.
+"
+
+// ---------------------------------------------------------------------
+%feature("docstring") OT::GaussianProcessFitterResult::getLinearAlgebraMethod
+"Accessor to the used linear algebra method to fit.
+
+Returns
+-------
+linAlgMethod : int
+ The used linear algebra method to fit the model:
+
+ - otexp.GaussianProcessFitterResult.LAPACK or 0: using `LAPACK` to fit the model,
+
+ - otexp.GaussianProcessFitterResult.HMAT or 1: using `HMAT` to fit the model.
+"
+
+// ---------------------------------------------------------------------
+
+%feature("docstring") OT::GaussianProcessFitterResult::getMetaModel
+"Accessor to the metamodel.
+
+Returns
+-------
+metaModel : :class:`~openturns.Function`
+ The meta model :math:`\tilde{\cM}: \Rset^{\inputDim} \rightarrow \Rset^{\outputDim}`, defined in :eq:'metaModelGPF'.
+"
+
+// ---------------------------------------------------------------------
+
+%feature("docstring") OT::GaussianProcessFitterResult::getNoise
+"Accessor to the Gaussian process.
+
+Returns
+-------
+process : :class:`~openturns.Process`
+ Returns the Gaussian process :math:`W` with the optimized parameters.
+"
+
+// ---------------------------------------------------------------------
+
+%feature("docstring") OT::GaussianProcessFitterResult::getOptimalLogLikelihood
+"Accessor to the optimal log-likelihood of the model.
+
+Returns
+-------
+optimalLogLikelihood : float
+ The value of the log-likelihood corresponding to the model.
+"
+
+// ---------------------------------------------------------------------
+
+%feature("docstring") OT::GaussianProcessFitterResult::getRegressionMatrix
+"Accessor to the regression matrix.
+
+Returns
+-------
+process : :class:`~openturns.Matrix`
+ Returns the regression matrix.
+"
+
+// ---------------------------------------------------------------------
+
+%feature("docstring") OT::GaussianProcessFitterResult::getTrendCoefficients
+"Accessor to the trend coefficients.
+
+Returns
+-------
+trendCoef : sequence of float
+ The trend coefficients vectors :math:`(\vect{\alpha}^1, \dots, \vect{\alpha}^{\outputDim})` as a :class:`~openturns.Point`
+"
+
+// ---------------------------------------------------------------------
diff --git a/python/src/GaussianProcessFitter_doc.i.in b/python/src/GaussianProcessFitter_doc.i.in
new file mode 100644
index 0000000000..2eb98800cd
--- /dev/null
+++ b/python/src/GaussianProcessFitter_doc.i.in
@@ -0,0 +1,385 @@
+%feature("docstring") OT::GaussianProcessFitter
+"Fit gaussian process models
+
+.. warning::
+ This class is experimental and likely to be modified in future releases.
+ To use it, import the ``openturns.experimental`` submodule.
+
+Parameters
+----------
+inputSample, outputSample : :class:`~openturns.Sample` or 2d-array
+ The samples :math:`(\vect{x}_k)_{1 \leq k \leq \sampleSize} \in \Rset^\inputDim` and :math:`(\vect{y}_k)_{1 \leq k \leq \sampleSize}\in \Rset^{\outputDim}`.
+
+covarianceModel : :class:`~openturns.CovarianceModel`
+ Covariance model of the Gaussian process. See notes for the details.
+
+basis : :class:`~openturns.Basis`
+ Functional basis to estimate the trend: :math:`(\varphi_j)_{1 \leq j \leq n_1}: \Rset^\inputDim \rightarrow \Rset`.
+ If :math:`\outputDim > 1`, the same basis is used for each marginal output.
+ Default value is `Basis(0)`, i.e. no trend to estimate
+
+Notes
+-----
+We suppose we have a sample :math:`(\vect{x}_k, \vect{y}_k)_{1 \leq k \leq \sampleSize}` where :math:`\vect{y}_k = \cM(\vect{x}_k)` for all :math:`k`, with :math:`\model:\Rset^\inputDim \mapsto \Rset^{\outputDim}` a given function.
+
+The objective is to build a metamodel :math:`\metaModel`, using a **Gaussian process**: the sample :math:`(\vect{y}_k)_{1 \leq k \leq \sampleSize}` is considered as the restriction of a Gaussian process :math:`\vect{Y}(\omega, \vect{x})` on :math:`(\vect{x}_k)_{1 \leq k \leq \sampleSize}`. The Gaussian process :math:`\vect{Y}(\omega, \vect{x})` is defined by:
+
+.. math::
+
+ \vect{Y}(\omega, \vect{x}) = \vect{\mu}(\vect{x}) + \vect{W}(\omega, \vect{x})
+
+where:
+
+.. math::
+
+ \vect{\mu}(\vect{x}) = \left(
+ \begin{array}{l}
+ \mu_1(\vect{x}) \\
+ \vdots \\
+ \mu_d(\vect{x})
+ \end{array}
+ \right)
+
+with :math:`\mu_\ell(\vect{x}) = \sum_{j=1}^{n_\ell} \beta_j^\ell \varphi_j^\ell(\vect{x})` and :math:`\varphi_j^\ell: \Rset^n \rightarrow \Rset` the trend functions.
+
+Let :math:`\vect{W}` be a Gaussian process of dimension :math:`\outputDim` with zero mean and covariance function :math:`C = C(\vect{\theta}, \vect{\sigma}, \mat{R}, \vect{\lambda})` (see :class:`~openturns.CovarianceModel` for the notations).
+
+We note:
+
+.. math::
+
+ \vect{\beta}^\ell = \left(
+ \begin{array}{l}
+ \beta_1^\ell \\
+ \vdots \\
+ \beta_{n_\ell}^\ell
+ \end{array}
+ \right) \in \Rset^{n_\ell}
+ \quad \mbox{ and } \quad
+ \vect{\beta} = \left(
+ \begin{array}{l}
+ \vect{\beta}^1\\
+ \vdots \\
+ \vect{\beta}^{\inputDim}
+ \end{array}
+ \right)\in \Rset^{\sum_{\ell=1}^{\outputDim} n_\ell}
+
+
+The *GaussianProcessFitter* class estimates the coefficients :math:`\beta_j^\ell` and :math:`\vect{p}`
+where :math:`\vect{p}` is the vector of parameters of the covariance model (a subset of
+:math:`\vect{\theta}, \vect{\sigma}, \mat{R}, \vect{\lambda}`) that has been declared as
+*active* (by default, the full vectors :math:`\vect{\theta}` and :math:`\vect{\sigma}`).
+
+The estimation is done by maximizing the *reduced* log-likelihood of the model (see its expression below).
+
+**Estimation of the parameters** :math:`\beta_j^\ell` and :math:`\vect{p}`
+
+We note:
+
+.. math::
+
+ \vect{y} = \left(
+ \begin{array}{l}
+ \vect{y}_1 \\
+ \vdots \\
+ \vect{y}_{\sampleSize}
+ \end{array}
+ \right) \in \Rset^{\inputDim \times \sampleSize},
+ \quad
+ \vect{m}_{\vect{\beta}} = \left(
+ \begin{array}{l}
+ \vect{\mu}(\vect{x}_1) \\
+ \vdots \\
+ \vect{\mu}(\vect{x}_{\sampleSize})
+ \end{array}
+ \right) \in \Rset^{\inputDim \times \sampleSize}
+
+ and
+
+.. math::
+
+ \mat{C}_{\vect{p}} = \left(
+ \begin{array}{lcl}
+ \mat{C}_{11} & \dots & \mat{C}_{1 \times \sampleSize}\\
+ \vdots & & \vdots \\
+ \mat{C}_{\sampleSize \times 1} & \dots & \mat{C}_{\sampleSize \times \sampleSize}
+ \end{array}
+ \right) \in \cS_{\inputDim \times \sampleSize}^+(\Rset)
+
+where :math:`\mat{C}_{ij} = C_{\vect{p}}(\vect{x}_i, \vect{x}_j)`.
+
+The model likelihood writes:
+
+.. math::
+
+ \cL(\vect{\beta}, \vect{p};(\vect{x}_k, \vect{y}_k)_{1 \leq k \leq \sampleSize}) = \dfrac{1}{(2\pi)^{\inputDim \times \sampleSize/2} |\det \mat{C}_{\vect{p}}|^{1/2}} \exp\left[ -\dfrac{1}{2}\Tr{\left( \vect{y}-\vect{m} \right)} \mat{C}_{\vect{p}}^{-1} \left( \vect{y}-\vect{m} \right) \right]
+
+Let :math:`\mat{L}_{\vect{p}}` ve the Cholesky factor of :math:`\mat{C}_{\vect{p}}`, i.e. the lower triangular matrix with positive diagonal such that :math:`\mat{L}_{\vect{p}} \,\Tr{\mat{L}_{\vect{p}}} = \mat{C}_{\vect{p}}`.
+Therefore:
+
+.. math::
+ :label: logLikelihoodGP
+
+ \log \cL(\vect{\beta}, \vect{p};(\vect{x}_k, \vect{y}_k)_{1 \leq k \leq \sampleSize})
+ = cste - \log \det \mat{L}_{\vect{p}} -\dfrac{1}{2} \| \mat{L}_{\vect{p}}^{-1}(\vect{y}-\vect{m}_{\vect{\beta}}) \|^2
+
+The maximization of :eq:`logLikelihoodGP` leads to the following optimality condition for :math:`\vect{\beta}`:
+
+.. math::
+
+ \vect{\beta}^*(\vect{p}^*)
+ = \argmin_{\vect{\beta}} \| \mat{L}_{\vect{p}^*}^{-1}(\vect{y} - \vect{m}_{\vect{\beta}}) \|^2_2
+
+This expression of :math:`\vect{\beta}^*` as a function of :math:`\vect{p}^*` is taken as a general relation
+between :math:`\vect{\beta}` and :math:`\vect{p}` and is substituted into :eq:`logLikelihood`, leading to
+a *reduced log-likelihood* function depending solely on :math:`\vect{p}`.
+
+In the particular case where :math:`d=\dim(\vect{\sigma})=1` and :math:`\sigma` is a part of :math:`\vect{p}`, then a further reduction is possible. In this case, if :math:`\vect{q}` is the vector :math:`\vect{p}` in which :math:`\sigma` has been substituted by 1, then:
+
+.. math::
+
+ \| \mat{L}_{\vect{p}}^{-1}(\vect{y}-\vect{m}_{\vect{\beta}}) \|^2
+ = \frac{1}{\sigma^2} \| \mat{L}_{\vect{q}}^{-1}(\vect{y}-\vect{m}_{\vect{\beta}}) \|^2_2
+
+showing that :math:`\vect{\beta}^*` is a function of :math:`\vect{q}^*` only, and the optimality condition for :math:`\sigma` reads:
+
+.. math::
+
+ \vect{\sigma}^*(\vect{q}^*)
+ = \dfrac{1}{\sampleSize} \| \mat{L}_{\vect{q}^*}^{-1}(\vect{y} - \vect{m}_{\vect{\beta}^*(\vect{q}^*)}) \|^2_2
+
+which leads to a further reduction of the log-likelihood function where both :math:`\vect{\beta}` and :math:`\sigma` are replaced by their expression in terms of :math:`\vect{q}`.
+
+The default optimizer is :class:`~openturns.Cobyla` and can be changed thanks to the :meth:`setOptimizationAlgorithm` method.
+User could also change the default optimization solver by setting the `GaussianProcessFitter-DefaultOptimizationAlgorithm` resource map key to one of the :class:`~openturns.NLopt` solver names.
+
+It is also possible to proceed as follows:
+
+- ask for the reduced log-likelihood function thanks to the :meth:`getObjectiveFunction` method
+- optimize it with respect to the parameters :math:`\vect{\theta}` and :math:`\vect{\sigma}` using any optimization algorithms (that can take into account some additional constraints if needed)
+- set the optimal parameter value into the covariance model used in the *GaussianProcessFitter*
+- tell the algorithm not to optimize the parameter using the :meth:`setOptimizeParameters` method
+
+The behaviour of the reduction is controlled by the following keys in :class:`~openturns.ResourceMap`:
+- *ResourceMap.SetAsBool('GaussianProcessFitter-UseAnalyticalAmplitudeEstimate', True)* to use the reduction associated to :math:`\sigma`. It has no effect if :math:`d>1` or if :math:`d=1` and :math:`\sigma` is not part of :math:`\vect{p}`
+- *ResourceMap.SetAsBool('GaussianProcessFitter-UnbiasedVariance', True)* allows one to use the *unbiased* estimate of :math:`\sigma` where :math:`\dfrac{1}{\sampleSize}` is replaced by :math:`\dfrac{1}{\sampleSize-\outputDim}` in the optimality condition for :math:`\sigma`.
+
+With huge samples, the `hierarchical matrix `_ implementation could be used if `hmat-oss` support has been enabled.
+
+This implementation, which is based on a compressed representation of an approximated covariance matrix (and its Cholesky factor), has a better complexity both in terms of memory requirements
+and floating point operations. To use it, the `GaussianProcessFitter-LinearAlgebra` resource map key should be instancied to `HMAT`. Default value of the key is `LAPACK`.
+
+Examples
+--------
+Create the model :math:`\model: \Rset \mapsto \Rset` and the samples:
+
+>>> import openturns as ot
+>>> import openturns.experimental as otexp
+>>> g = ot.SymbolicFunction(['x'], ['x + x * sin(x)'])
+>>> inputSample = ot.Sample([[1.0], [3.0], [5.0], [6.0], [7.0], [8.0]])
+>>> outputSample = g(inputSample)
+
+Create the algorithm:
+
+>>> g1 = ot.SymbolicFunction(['x'], ['sin(x)'])
+>>> g2 = ot.SymbolicFunction(['x'], ['x'])
+>>> g3 = ot.SymbolicFunction(['x'], ['cos(x)'])
+>>> basis = ot.Basis([g1, g2, g3])
+>>> covarianceModel = ot.SquaredExponential([1.0])
+>>> covarianceModel.setActiveParameter([])
+>>> algo = otexp.GaussianProcessFitter(inputSample, outputSample, covarianceModel, basis)
+>>> algo.run()
+
+Get the resulting meta model:
+
+>>> result = algo.getResult()
+>>> metamodel = result.getMetaModel()"
+
+// ---------------------------------------------------------------------
+
+%feature("docstring") OT::GaussianProcessFitter::getResult
+"Get the results of the metamodel computation.
+
+Returns
+-------
+result : :class:`~openturns.experimental.GaussianProcessFitterResult`
+ Structure containing all the results obtained after computation
+ and created by the method :py:meth:`run`.
+"
+
+//-----------------------------------------------------------------------
+
+%feature("docstring") OT::GaussianProcessFitter::getInputSample
+"Accessor to the input sample.
+
+Returns
+-------
+inputSample : :class:`~openturns.Sample`
+ The input sample :math:`(\vect{x}_k)_{1 \leq k \leq \sampleSize}`."
+
+// ---------------------------------------------------------------------
+
+%feature("docstring") OT::GaussianProcessFitter::getOutputSample
+"Accessor to the output sample.
+
+Returns
+-------
+outputSample : :class:`~openturns.Sample`
+ The output sample :math:`(\vect{y}_k)_{1 \leq k \leq \sampleSize}` ."
+
+// ---------------------------------------------------------------------
+
+%feature("docstring") OT::GaussianProcessFitter::getObjectiveFunction()
+"Accessor to the log-likelihood function that writes as argument of the covariance's model parameters.
+
+Returns
+-------
+logLikelihood : :class:`~openturns.Function`
+ The log-likelihood function degined in :eq:`logLikelihood` as a function of :math:`(\vect{\theta}, \vect{\sigma})`.
+
+Notes
+-----
+The log-likelihood function may be useful for some postprocessing: maximization using external optimizers for example.
+
+
+Examples
+--------
+Create the model :math:`\cM: \Rset \mapsto \Rset` and the samples:
+
+>>> import openturns as ot
+>>> import openturns.experimental as otexp
+>>> g = ot.SymbolicFunction(['x0'], ['x0 * sin(x0)'])
+>>> inputSample = ot.Sample([[1.0], [3.0], [5.0], [6.0], [7.0], [8.0]])
+>>> outputSample = g(inputSample)
+
+Create the algorithm:
+
+>>> basis = ot.ConstantBasisFactory().build()
+>>> covarianceModel = ot.SquaredExponential(1)
+>>> algo = otexp.GaussianProcessFitter(inputSample, outputSample, covarianceModel, basis)
+>>> algo.run()
+
+Get the log-likelihood function:
+
+>>> likelihoodFunction = algo.getObjectiveFunction()
+"
+
+
+// ---------------------------------------------------------------------
+
+%feature("docstring") OT::GaussianProcessFitter::run
+"Compute the response surface.
+
+Notes
+-----
+It computes the response surface and creates a
+:class:`~openturns.experimental.GaussianProcessFitterResult` structure containing all the results."
+
+// ---------------------------------------------------------------------
+
+%feature("docstring") OT::GaussianProcessFitter::getOptimizationAlgorithm
+"Accessor to solver used to optimize the covariance model parameters.
+
+Returns
+-------
+algorithm : :class:`~openturns.OptimizationAlgorithm`
+ Solver used to optimize the covariance model parameters.
+ Default optimizer is :class:`~openturns.Cobyla`"
+
+// ---------------------------------------------------------------------
+
+%feature("docstring") OT::GaussianProcessFitter::setOptimizationAlgorithm
+"Accessor to the solver used to optimize the covariance model parameters.
+
+Parameters
+----------
+algorithm : :class:`~openturns.OptimizationAlgorithm`
+ Solver used to optimize the covariance model parameters."
+
+// ---------------------------------------------------------------------
+
+%feature("docstring") OT::GaussianProcessFitter::setOptimizeParameters
+"Accessor to the covariance model parameters optimization flag.
+
+Parameters
+----------
+optimizeParameters : bool
+ Whether to optimize the covariance model parameters."
+
+// ---------------------------------------------------------------------
+
+%feature("docstring") OT::GaussianProcessFitter::getOptimizeParameters
+"Accessor to the covariance model parameters optimization flag.
+
+Returns
+-------
+optimizeParameters : bool
+ Whether to optimize the covariance model parameters."
+
+// ---------------------------------------------------------------------
+
+%feature("docstring") OT::GaussianProcessFitter::setOptimizationBounds
+"Optimization bounds accessor.
+
+Parameters
+----------
+bounds : :class:`~openturns.Interval`
+ Bounds for covariance model parameter optimization.
+
+Notes
+-----
+Parameters involved by this method are:
+
+ - Scale parameters,
+ - Amplitude parameters if output dimension is greater than one or
+ analytical sigma disabled,
+ - Additional parameters.
+
+Lower & upper bounds are defined in resource map.
+Default lower upper bounds value for all parameters is
+:math:`10^{-2}` and defined thanks to the
+`GaussianProcessFitter-DefaultOptimizationLowerBound`
+resource map key.
+
+For scale parameters, default upper bounds are set as :math:`2`
+times the difference between the max and min values of `X` for
+each coordinate, `X` being the (transformed) input sample.
+The value :math:`2` is defined in resource map
+(`GaussianProcessFitter-DefaultOptimizationScaleFactor`).
+
+Finally for other parameters (amplitude,...), default upper bound is set
+to :math:`100` (corresponding resource map key is
+`GaussianProcessFitter-DefaultOptimizationUpperBound`)
+"
+
+// ---------------------------------------------------------------------
+
+%feature("docstring") OT::GaussianProcessFitter::getOptimizationBounds
+"Optimization bounds accessor.
+
+Returns
+-------
+bounds : :class:`~openturns.Interval`
+ Bounds for covariance model parameter optimization."
+
+// ---------------------------------------------------------------------
+
+%feature("docstring") OT::GaussianProcessFitter::getKeepCholeskyFactor
+"Keep Cholesky factor accessor.
+
+Returns
+-------
+keepCholesky : bool
+ Tells whether we keep or not the final Cholesky factor."
+
+// ---------------------------------------------------------------------
+
+%feature("docstring") OT::GaussianProcessFitter::setKeepCholeskyFactor
+"Keep Cholesky factor setter.
+
+Parameters
+----------
+keepCholesky : bool
+ Tells whether we keep or not the final Cholesky factor."
+
+// ---------------------------------------------------------------------
diff --git a/python/src/GaussianProcessRegression.i b/python/src/GaussianProcessRegression.i
new file mode 100644
index 0000000000..938b0f7b40
--- /dev/null
+++ b/python/src/GaussianProcessRegression.i
@@ -0,0 +1,11 @@
+// SWIG file GaussianProcessRegression.i
+
+%{
+#include "openturns/GaussianProcessRegression.hxx"
+%}
+
+%include GaussianProcessRegression_doc.i
+
+%include openturns/GaussianProcessRegression.hxx
+
+namespace OT{ %extend GaussianProcessRegression { GaussianProcessRegression(const GaussianProcessRegression & other) { return new OT::GaussianProcessRegression(other); } } }
diff --git a/python/src/GaussianProcessRegressionResult.i b/python/src/GaussianProcessRegressionResult.i
new file mode 100644
index 0000000000..fb58140342
--- /dev/null
+++ b/python/src/GaussianProcessRegressionResult.i
@@ -0,0 +1,11 @@
+// SWIG file GaussianProcessRegressionResult.i
+
+%{
+#include "openturns/GaussianProcessRegressionResult.hxx"
+%}
+
+%include GaussianProcessRegressionResult_doc.i
+
+%include openturns/GaussianProcessRegressionResult.hxx
+
+namespace OT{ %extend GaussianProcessRegressionResult { GaussianProcessRegressionResult(const GaussianProcessRegressionResult & other) { return new OT::GaussianProcessRegressionResult(other); } } }
diff --git a/python/src/GaussianProcessRegressionResult_doc.i.in b/python/src/GaussianProcessRegressionResult_doc.i.in
new file mode 100644
index 0000000000..b3bf630bf7
--- /dev/null
+++ b/python/src/GaussianProcessRegressionResult_doc.i.in
@@ -0,0 +1,202 @@
+%feature("docstring") OT::GaussianProcessRegressionResult
+"Gaussian process regression (aka kriging) result.
+
+.. warning::
+ This class is experimental and likely to be modified in future releases.
+ To use it, import the ``openturns.experimental`` submodule.
+
+
+Parameters
+----------
+gpfResult : :class:`~openturns.experimental.GaussianProcessFitterResult`
+ Structure result of a gaussian process fitter.
+covarianceCoefficients : 2-d sequence of float
+ The :math:`\vect{\gamma}` defined in :eq:`gammaEq`.
+
+Notes
+-----
+The Gaussian Process Regression (aka Kriging) meta model :math:`\tilde{\cM}` is defined by:
+
+.. math::
+ :label: metaModelGP
+
+ \tilde{\cM}(\vect{x}) = \vect{\mu}(\vect{x}) + \Expect{\vect{Y}(\omega, \vect{x})\,| \,\cC}
+
+where :math:`\cC` is the condition :math:`\vect{Y}(\omega, \vect{x}_k) = \vect{y}_k` for each :math:`k \in [1, \sampleSize]`.
+
+Equation :eq:`metaModelKrig` writes:
+
+.. math::
+
+ \tilde{\cM}(\vect{x}) = \vect{\mu}(\vect{x}) + \Cov{\vect{Y}(\omega, \vect{x}), (\vect{Y}(\omega,\vect{x}_1),\dots,\vect{Y}(\omega, \vect{x}_{\sampleSize}))}\vect{\gamma}
+
+where
+
+.. math::
+
+ \Cov{\vect{Y}(\omega, \vect{x}), (\vect{Y}(\omega, \vect{x}_1),\dots,\vect{Y}(\omega, \vect{x}_{\sampleSize}))} = \left(\mat{C}(\vect{x},\vect{x}_1)|\dots|\mat{C}(\vect{x},\vect{x}_{\sampleSize})\right)\in \cM_{\outputDim,\sampleSize \times \outputDim}(\Rset)
+
+and
+
+.. math::
+ :label: gammaEqGP
+
+ \vect{\gamma} = \mat{C}^{-1}(\vect{y}-\vect{m})
+
+At the end, the meta model writes:
+
+.. math::
+ :label: metaModelGPFinal
+
+ \tilde{\cM}(\vect{x}) = \vect{\mu}(\vect{x}) + \sum_{i=1}^{\sampleSize} \gamma_i \mat{C}(\vect{x},\vect{x}_i)
+
+
+
+
+Examples
+--------
+Create the model :math:`\cM: \Rset \mapsto \Rset` and the samples:
+
+>>> import openturns as ot
+>>> import openturns.experimental as otexp
+>>> g = ot.SymbolicFunction(['x'], ['x * sin(x)'])
+>>> sampleX = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]]
+>>> sampleY = g(sampleX)
+
+Create the algorithm:
+
+>>> basis = ot.Basis([ot.SymbolicFunction(['x'], ['x']), ot.SymbolicFunction(['x'], ['x^2'])])
+>>> covarianceModel = ot.GeneralizedExponential([2.0], 2.0)
+
+>>> fit_algo = otexp.GaussianProcessFitter(sampleX, sampleY, covarianceModel, basis)
+>>> fit_algo.run()
+
+>>> algo = otexp.GaussianProcessRegression(fit_algo.getResult())
+>>> algo.run()
+
+Get the result:
+
+>>> result = algo.getResult()
+
+Get the meta model:
+
+>>> metaModel = result.getMetaModel()
+"
+
+// ---------------------------------------------------------------------
+
+%feature("docstring") OT::GaussianProcessRegressionResult::getCovarianceCoefficients
+"Accessor to the covariance coefficients.
+
+Returns
+-------
+covCoeff : :class:`~openturns.Sample`
+ The :math:`\vect{\gamma}` defined in :eq:`gammaEq`.
+"
+
+// ---------------------------------------------------------------------
+
+%feature("docstring") OT::GaussianProcessRegressionResult::getBasis
+"Accessor to the collection of basis.
+
+Returns
+-------
+basis : :class:`~openturns.Basis`
+ Functional basis of size :math:`b` : :math:`(\varphi^\ell: \Rset^{\inputDim} \rightarrow \Rset^{\outputDim})` for each :math:`l \in [1, b]`.
+
+Notes
+-----
+If the trend is not estimated, the basis is empty. "
+
+
+// ---------------------------------------------------------------------
+
+%feature("docstring") OT::GaussianProcessRegressionResult::getCovarianceModel
+"Accessor to the covariance model.
+
+Returns
+-------
+covModel : :class:`~openturns.CovarianceModel`
+ The covariance model of the Gaussian process *W*.
+"
+
+// ---------------------------------------------------------------------
+%feature("docstring") OT::GaussianProcessRegressionResult::getLinearAlgebraMethod
+"Accessor to the used linear algebra method to fit.
+
+Returns
+-------
+linAlgMethod : int
+ The used linear algebra method to fit the model:
+
+ - otexp.GaussianProcessFitterResult.LAPACK or 0: using `LAPACK` to fit the model,
+
+ - otexp.GaussianProcessFitterResult.HMAT or 1: using `HMAT` to fit the model.
+"
+
+// ---------------------------------------------------------------------
+
+%feature("docstring") OT::GaussianProcessRegressionResult::getMetaModel
+"Accessor to the metamodel.
+
+Returns
+-------
+metaModel : :class:`~openturns.Function`
+ The meta model :math:`\tilde{\cM}: \Rset^{\inputDim} \rightarrow \Rset^{\outputDim}`, defined in :eq:'metaModelGPF'.
+"
+
+// ---------------------------------------------------------------------
+
+%feature("docstring") OT::GaussianProcessRegressionResult::getNoise
+"Accessor to the Gaussian process.
+
+Returns
+-------
+process : :class:`~openturns.Process`
+ Returns the Gaussian process :math:`W` with the optimized parameters.
+"
+
+// ---------------------------------------------------------------------
+
+%feature("docstring") OT::GaussianProcessRegressionResult::getOptimalLogLikelihood
+"Accessor to the optimal log-likelihood of the model.
+
+Returns
+-------
+optimalLogLikelihood : float
+ The value of the log-likelihood corresponding to the model.
+"
+
+// ---------------------------------------------------------------------
+
+%feature("docstring") OT::GaussianProcessRegressionResult::getRegressionMatrix
+"Accessor to the regression matrix.
+
+Returns
+-------
+process : :class:`~openturns.Matrix`
+ Returns the regression matrix.
+"
+
+// ---------------------------------------------------------------------
+
+%feature("docstring") OT::GaussianProcessRegressionResult::getTrendCoefficients
+"Accessor to the trend coefficients.
+
+Returns
+-------
+trendCoef : sequence of float
+ The trend coefficients vectors :math:`(\vect{\alpha}^1, \dots, \vect{\alpha}^{\outputDim})` as a :class:`~openturns.Point`
+"
+
+// ---------------------------------------------------------------------
+
+%feature("docstring") OT::GaussianProcessRegressionResult::getTrendFunction
+"Accessor to the trend function.
+
+Returns
+-------
+trendFunc : :class:`~openturns.Function`
+ The trend function.
+"
+// ---------------------------------------------------------------------
\ No newline at end of file
diff --git a/python/src/GaussianProcessRegression_doc.i.in b/python/src/GaussianProcessRegression_doc.i.in
new file mode 100644
index 0000000000..6b4e386106
--- /dev/null
+++ b/python/src/GaussianProcessRegression_doc.i.in
@@ -0,0 +1,131 @@
+%feature("docstring") OT::GaussianProcessRegression
+"Gaussian process regression algorithm.
+
+.. warning::
+ This class is experimental and likely to be modified in future releases.
+ To use it, import the ``openturns.experimental`` submodule.
+
+Refer to :ref:`kriging`.
+
+Available constructors:
+ GaussianProcessRegression(*gprFitterResult*)
+
+ GaussianProcessRegression(*inputSample, outputSample, covarianceModel, trendFunction*)
+
+Parameters
+----------
+gprFitterResult : :class:`~openturns.experimental.GaussianProcessFitterResult`
+ Result class
+
+inputSample, outputSample : 2-d sequence of float
+ The samples :math:`(\vect{x}_k)_{1 \leq k \leq \sampleSize} \in \Rset^{\inputDim}` and :math:`(\vect{y}_k)_{1 \leq k \leq \sampleSize}\in \Rset^{\outputDim}` upon which the meta-model is built.
+covarianceModel : :class:`~openturns.CovarianceModel`
+
+covarianceModel : :class:`~openturns.CovarianceModel`
+ Covariance model used for the underlying Gaussian process assumption.
+trendFunction : :class:`~openturns.Function`
+ A trend function
+
+Notes
+-----
+
+We suppose we have a sample :math:`(\vect{x}_k, \vect{y}_k)_{1 \leq k \leq \sampleSize}` where :math:`\vect{y}_k = \cM(\vect{x}_k)` for all *k*, with :math:`\cM:\Rset^{\inputDim} \mapsto \Rset^{\outputDim}` the model.
+The class allows making a gaussian process interpolating on the input samples.
+
+Within the first constructor, we suppose all gaussian process parameters (the trend coefficients :math:`\beta_j^l`, the scale :math:`\vect{\theta}` and the amplitude :math:`\vect{\sigma}`) already calibrated and the objective
+is to condionning this process (the gaussian process to become interpolating over the dataset)
+
+Within the second constructor, we assume covariance model already calibrated. A gaussian process is fitted using :class:`~openturns.experimental.GaussianProcessFitter`
+and the sample :math:`(\vect{y}_k)_{1 \leq k \leq \sampleSize}` is considered as the trace of a this gaussian process :math:`\vect{Y}(\omega, \vect{x})` on :math:`(\vect{x}_k)_{1 \leq k \leq \sampleSize}`.
+
+The Gaussian process :math:`\vect{Y}(\omega, \vect{x})` is defined by:
+
+
+.. math::
+ :label: metaModelGPAlgo
+
+ \vect{Y}(\omega, \vect{x}) = \vect{\mu}(\vect{x}) + W(\omega, \vect{x})
+
+where:
+
+.. math::
+
+ \vect{\mu}(\vect{x}) = \left(
+ \begin{array}{l}
+ \mu_1(\vect{x}) \\
+ \dots \\
+ \mu_p(\vect{x})
+ \end{array}
+ \right)
+
+with :math:`\mu_l(\vect{x}) = \sum_{j=1}^{n_l} \beta_j^l \varphi_j^\ell(\vect{x})` and :math:`\varphi_j^\ell: \Rset^{\inputDim} \rightarrow \Rset` the trend functions.
+
+:math:`W` is a Gaussian process of dimension *p* with zero mean and covariance function :math:`C = C(\vect{\theta}, \vect{\sigma}, \mat{R}, \vect{\lambda})` (see :class:`~openturns.CovarianceModel` for the notations).
+
+
+The Gaussian Process Regression meta model :math:`\tilde{\cM}` is defined by:
+
+
+.. math::
+
+ \tilde{\cM}(\vect{x}) = \vect{\mu}(\vect{x}) + \Expect{\vect{Y}(\omega, \vect{x})\, | \, \cC}
+
+where :math:`\cC` is the condition :math:`\vect{Y}(\omega, \vect{x}_k) = \vect{y}_k` for each :math:`k \in [1, \sampleSize]`.
+
+:eq:`metaModelKrigAlgo` writes:
+
+.. math::
+
+ \tilde{\cM}(\vect{x}) = \vect{\mu}(\vect{x}) + \Cov{\vect{Y}(\omega, \vect{x}), (\vect{Y}(\omega, \vect{x}_1), \dots, \vect{Y}(\omega, \vect{x}_{\sampleSize}))} \vect{\gamma}
+
+where :math:`\Cov{\vect{Y}(\omega, \vect{x}), (\vect{Y}(\omega, \vect{x}_1), \dots, \vect{Y}(\omega, \vect{x}_{\sampleSize}))} = \left( \mat{C}( \vect{x}, \vect{x}_1) | \dots | \mat{C}( \vect{x}, \vect{x}_{\sampleSize}) \right)` is a matrix in :math:`\cM_{\inputDim,\sampleSize \times \outputDim}(\Rset)` and :math:`\vect{\gamma} = \mat{C}^{-1}(\vect{y}-\vect{m})`.
+
+Examples
+--------
+Create the model :math:`\cM: \Rset \mapsto \Rset` and the samples:
+
+>>> import openturns as ot
+>>> import openturns.experimental as otexp
+>>> g = ot.SymbolicFunction(['x'], ['x * sin(x)'])
+>>> sampleX = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0], [7.0], [8.0]]
+>>> sampleY = g(sampleX)
+
+Create the algorithm:
+
+>>> basis = ot.Basis([ot.SymbolicFunction(['x'], ['x']), ot.SymbolicFunction(['x'], ['x^2'])])
+>>> covarianceModel = ot.SquaredExponential([1.0])
+>>> covarianceModel.setActiveParameter([])
+>>> fit_algo = otexp.GaussianProcessFitter(sampleX, sampleY, covarianceModel, basis)
+>>> fit_algo.run()
+
+Get the resulting meta model:
+
+>>> fit_result = fit_algo.getResult()
+>>> algo = otexp.GaussianProcessRegression(fit_result)
+>>> algo.run()
+>>> result = algo.getResult()
+>>> metamodel = result.getMetaModel()"
+
+// ---------------------------------------------------------------------
+
+%feature("docstring") OT::GaussianProcessRegression::getResult
+"Get the results of the metamodel computation.
+
+Returns
+-------
+result : :class:`~openturns.experimental.GaussianProcessRegressionResult`
+ Structure containing all the results obtained after computation
+ and created by the method :py:meth:`run`.
+"
+// ---------------------------------------------------------------------
+
+%feature("docstring") OT::GaussianProcessRegression::run
+"Compute the response surface.
+
+Notes
+-----
+It computes the kriging response surface and creates a
+:class:`~openturns.experimental.GaussianProcessRegressionResult` structure containing all the results."
+
+// ---------------------------------------------------------------------
+
diff --git a/python/src/LinearModelTest_doc.i.in b/python/src/LinearModelTest_doc.i.in
index e946850e3f..98bc9fecd8 100644
--- a/python/src/LinearModelTest_doc.i.in
+++ b/python/src/LinearModelTest_doc.i.in
@@ -303,7 +303,7 @@ Parameters
firstSample : 2-d sequence of float
First tested sample.
secondSample : 2-d sequence of float
- Second tested sample, of dimension 1.
+ Second tested sample, of dimension 1. This sample should be ordered in increasing order.
linearModelResult : :class:`~openturns.LinearModelResult`
A linear model. If not provided, it is built using the given samples.
hypothesis : str
@@ -339,8 +339,8 @@ scalar variable :math:`Y` and the :math:`n`-dimensional one
where :math:`\epsilon` is the residual.
The Durbin-Watson test checks the autocorrelation of the residuals. It is possible
-to test is the autocorrelation is equal to 0, and less or greater than 0. The
-p-value is computed using a normal approximation with mean and variance of the
+to test is the autocorrelation is equal to 0.
+The p-value is computed using a normal approximation based on the mean and variance of the
Durbin-Watson test statistic. If the binary quality measure is false, then the
given autocorrelation hypothesis can be rejected with respect to the given level.
diff --git a/python/src/LowDiscrepancySequenceImplementation_doc.i.in b/python/src/LowDiscrepancySequenceImplementation_doc.i.in
index 0448816b06..7fc8d22bc6 100644
--- a/python/src/LowDiscrepancySequenceImplementation_doc.i.in
+++ b/python/src/LowDiscrepancySequenceImplementation_doc.i.in
@@ -17,6 +17,7 @@ The **discrepancy** of a set :math:`P = \{x_1, \hdots, x_N\}` is defined,
using Niederreiter's notation, as:
.. math::
+ :label: discrepancyDef
D_N(P) = \sup_{B\in J} \left| \frac{A(B;P)}{N} - \lambda_s(B) \right|
@@ -25,6 +26,7 @@ is the number of points in :math:`P` that fall into :math:`B`, and :math:`J` is
the set of s-dimensional intervals or boxes of the form:
.. math::
+ :label: discrepancySet
\prod_{i=1}^s [a_i, b_i) = \{ \mathbf{x} \in \mathbf{R}^s : a_i \le x_i < b_i \} \,
@@ -34,6 +36,7 @@ The star-discrepancy :math:`D_N^*(P)` is defined similarly, except that the
supremum is taken over the set :math:`J^*` of intervals of the form:
.. math::
+ :label: starDiscrepancySet
\prod_{i=1}^s [0, u_i)
@@ -74,6 +77,10 @@ Returns
starDiscrepancy : float
Star discrepancy of a sample uniformly distributed over [0, 1).
+Notes
+-----
+The star discrepancy is detailed in :eq:`discrepancyDef` and :eq:`starDiscrepancySet`.
+
Examples
--------
>>> import openturns as ot
diff --git a/python/src/OrthogonalFunctionFactory_doc.i.in b/python/src/OrthogonalFunctionFactory_doc.i.in
index 2a0f3d324a..fc8c813330 100644
--- a/python/src/OrthogonalFunctionFactory_doc.i.in
+++ b/python/src/OrthogonalFunctionFactory_doc.i.in
@@ -105,3 +105,18 @@ Normal(mu = 0, sigma = 1)"
%enddef
%feature("docstring") OT::OrthogonalFunctionFactory::getMeasure
OT_OrthogonalBasis_getMeasure_doc
+
+// ---------------------------------------------------------------------
+
+%feature("docstring") OT::OrthogonalFunctionFactory::getMarginal
+"Get the marginal orthogonal functions.
+
+Parameters
+----------
+indices : sequence of int, :math:`0 \leq i < \inputDim`
+ List of marginal indices of the input variables.
+
+Returns
+-------
+functionFamilylist : list of :class:`~openturns.OrthogonalBasis`
+ The marginal orthogonal functions."
diff --git a/python/src/OrthogonalProductFunctionFactory_doc.i.in b/python/src/OrthogonalProductFunctionFactory_doc.i.in
index 2b74f374a2..1a27230184 100644
--- a/python/src/OrthogonalProductFunctionFactory_doc.i.in
+++ b/python/src/OrthogonalProductFunctionFactory_doc.i.in
@@ -29,11 +29,17 @@ OrthogonalProductPolynomialFactory
Examples
--------
+Create from a list of orthogonal functions.
+
>>> import openturns as ot
->>> from math import pi
>>> funcColl = [ot.HaarWaveletFactory(), ot.FourierSeriesFactory()]
->>> dim = len(funcColl)
->>> enumerateFunction = ot.LinearEnumerateFunction(dim)
+>>> productBasis = ot.OrthogonalProductFunctionFactory(funcColl)
+
+Set an enumerate function.
+
+>>> funcColl = [ot.HaarWaveletFactory(), ot.FourierSeriesFactory()]
+>>> inputDimension = len(funcColl)
+>>> enumerateFunction = ot.LinearEnumerateFunction(inputDimension)
>>> productBasis = ot.OrthogonalProductFunctionFactory(funcColl, enumerateFunction)"
// ---------------------------------------------------------------------
@@ -45,3 +51,25 @@ Returns
-------
polynomialFamily : list of :class:`~openturns.OrthogonalUniVariateFunctionFamily`
List of orthogonal univariate function families."
+
+// ---------------------------------------------------------------------
+
+%feature("docstring") OT::OrthogonalProductFunctionFactory::getMarginal
+"Get the marginal orthogonal functions.
+
+Parameters
+----------
+indices : sequence of int, :math:`0 \leq i < \inputDim`
+ List of marginal indices of the input variables.
+
+Returns
+-------
+functionFamilylist : list of :class:`~openturns.OrthogonalUniVariateFunctionFamily`
+ The marginal orthogonal functions.
+
+Examples
+--------
+>>> import openturns as ot
+>>> funcColl = [ot.HaarWaveletFactory(), ot.FourierSeriesFactory(), ot.HaarWaveletFactory()]
+>>> productBasis = ot.OrthogonalProductFunctionFactory(funcColl)
+>>> marginalProduct = productBasis.getMarginal([0, 2]) # [ot.HaarWaveletFactory(), ot.HaarWaveletFactory()]"
diff --git a/python/src/OrthogonalProductPolynomialFactory_doc.i.in b/python/src/OrthogonalProductPolynomialFactory_doc.i.in
index 776c007c43..8c72e25a5f 100644
--- a/python/src/OrthogonalProductPolynomialFactory_doc.i.in
+++ b/python/src/OrthogonalProductPolynomialFactory_doc.i.in
@@ -38,19 +38,23 @@ StandardDistributionPolynomialFactory
Examples
--------
+Create from a collection of orthogonal polynomials.
+
>>> import openturns as ot
->>> # Define the model
->>> myModel = ot.SymbolicFunction(['x1','x2','x3'], ['1+x1*x2 + 2*x3^2'])
->>> # Create a distribution of dimension 3
->>> Xdist = ot.JointDistribution([ot.Normal(), ot.Uniform(), ot.Gamma(2.75, 1.0)])
->>> # Create the multivariate orthonormal basis
->>> polyColl = [ot.HermiteFactory(), ot.LegendreFactory(), ot.LaguerreFactory(2.75)]
->>> enumerateFunction = ot.LinearEnumerateFunction(3)
->>> productBasis = ot.OrthogonalProductPolynomialFactory(polyColl, enumerateFunction)
+>>> polyColl = [ot.HermiteFactory(), ot.LegendreFactory(), ot.LaguerreFactory()]
+>>> productBasis = ot.OrthogonalProductPolynomialFactory(polyColl)
+
+Easy way to create a multivariate orthonormal basis from a distribution.
->>> # Easier way to create the same multivariate orthonormal basis
+>>> Xdist = ot.JointDistribution([ot.Normal(), ot.Uniform(), ot.Gamma(2.75, 1.0)])
>>> marginals = [Xdist.getMarginal(i) for i in range(Xdist.getDimension())]
->>> productBasis = ot.OrthogonalProductPolynomialFactory(marginals)"
+>>> productBasis = ot.OrthogonalProductPolynomialFactory(marginals)
+
+Set an enumerate function.
+
+>>> polyColl = [ot.HermiteFactory(), ot.LegendreFactory(), ot.LaguerreFactory()]
+>>> enumerateFunction = ot.LinearEnumerateFunction(3)
+>>> productBasis = ot.OrthogonalProductPolynomialFactory(polyColl, enumerateFunction)"
// ---------------------------------------------------------------------
@@ -104,3 +108,25 @@ Examples
1 : [ 1 -0.774597 3.75 ]
>>> print(weights[:2])
[0.138889,0.138889]"
+
+// ---------------------------------------------------------------------
+
+%feature("docstring") OT::OrthogonalProductPolynomialFactory::getMarginal
+"Get the marginal orthogonal polynomials.
+
+Parameters
+----------
+indices : sequence of int, :math:`0 \leq i < \inputDim`
+ List of marginal indices of the input variables.
+
+Returns
+-------
+polynomialFamilylist : list of :class:`~openturns.OrthogonalUniVariatePolynomialFamily`
+ The marginal orthogonal polynomials.
+
+Examples
+--------
+>>> import openturns as ot
+>>> funcColl = [ot.LegendreFactory(), ot.LaguerreFactory(), ot.HermiteFactory()]
+>>> productBasis = ot.OrthogonalProductPolynomialFactory(funcColl)
+>>> marginalProduct = productBasis.getMarginal([0, 2]) # [ot.LegendreFactory(), ot.HermiteFactory()]"
diff --git a/python/src/SpaceFillingMinDist_doc.i.in b/python/src/SpaceFillingMinDist_doc.i.in
index 3d1d729652..7d77ca81aa 100644
--- a/python/src/SpaceFillingMinDist_doc.i.in
+++ b/python/src/SpaceFillingMinDist_doc.i.in
@@ -9,10 +9,24 @@ Compute the criterion based on the minimal distance of sample points:
:nowrap:
\begin{equation*}
- \phi_{min}(X) = \min_{i \neq j} ||x^{(i)} - x^{(j)}||
+ \phi_{min}(\mat{X}) = \min_{i \neq j} \norm{ \vect{x}^{(i)} - \vect{x}^{(j)} }
\end{equation*}
-"
+
+If at least one of the sample points does not belong to the unit cube
+(i.e. not all components belong to the interval :math:`[0,1]`),
+then the whole sample is rescaled.
+Letting :math:`\vect{M}` (resp. :math:`\vect{m}`) denote the point
+containing the component-wise maximum (resp. minimum) values of the sample,
+the actual formula is in this case:
+
+.. math::
+ :nowrap:
+
+ \begin{equation*}
+ \phi_{min}(X) = \min_{i \neq j} \norm{ \frac{\vect{x}^{(i)} - \vect{x}^{(j)}}{\vect{M} - \vect{m}} }
+ \end{equation*}
+"
%enddef
%feature("docstring") OT::SpaceFillingMinDist
OT_SpaceFillingMinDist_doc
diff --git a/python/src/SquaredExponential_doc.i.in b/python/src/SquaredExponential_doc.i.in
index 15ad9467e2..12006cc899 100644
--- a/python/src/SquaredExponential_doc.i.in
+++ b/python/src/SquaredExponential_doc.i.in
@@ -2,7 +2,7 @@
"Squared exponential covariance function.
Available constructors:
- SquaredExponential(*spatialDim=1*)
+ SquaredExponential(*spatialDim*)
SquaredExponential(*scale*)
diff --git a/python/src/common_module.i b/python/src/common_module.i
index 082fb20b99..db591a797a 100644
--- a/python/src/common_module.i
+++ b/python/src/common_module.i
@@ -28,6 +28,12 @@
#endif
#if defined(OPENTURNS_HAVE_HDF5) && defined(OPENTURNS_HAVE_LIBXML2)
%include XMLH5StorageManager.i
+#else
+// needed for sphinx
+%pythoncode %{
+class XMLH5StorageManager:
+ pass
+%}
#endif
%include TTY.i
%include Log.i
diff --git a/python/src/experimental_module.i b/python/src/experimental_module.i
index 4e3cbc6970..f17689cb2f 100644
--- a/python/src/experimental_module.i
+++ b/python/src/experimental_module.i
@@ -83,7 +83,11 @@
%include UniformOrderStatistics.i
%include GeneralizedExtremeValueValidation.i
%include GeneralizedParetoValidation.i
-
/* Uncertainty/Algorithm/Metamodel */
%include FunctionalChaosValidation.i
%include LinearModelValidation.i
+%include GaussianProcessFitterResult.i
+%include GaussianProcessFitter.i
+%include GaussianProcessRegressionResult.i
+%include GaussianProcessRegression.i
+%include GaussianProcessConditionalCovariance.i
\ No newline at end of file
diff --git a/python/src/orthogonalbasis_module.i b/python/src/orthogonalbasis_module.i
index 361db9497d..1d8f548be8 100644
--- a/python/src/orthogonalbasis_module.i
+++ b/python/src/orthogonalbasis_module.i
@@ -28,6 +28,7 @@
%import weightedexperiment_module.i
/* Uncertainty/Algorithm/OrthogonalBasis */
+%import func_module.i
%include OrthogonalUniVariatePolynomial.i
%include OrthogonalUniVariatePolynomialFactory.i
%include CharlierFactory.i
@@ -45,14 +46,14 @@
%include AdaptiveStieltjesAlgorithm.i
%include StandardDistributionPolynomialFactory.i
%include OrthogonalFunctionFactory.i
-%include OrthogonalProductPolynomialFactory.i
%include OrthogonalBasis.i
%include OrthogonalUniVariateFunctionFactory.i
%include OrthogonalUniVariateFunctionFamily.i
%include OrthogonalUniVariatePolynomialFunctionFactory.i
+%include OrthogonalProductFunctionFactory.i
+%include OrthogonalProductPolynomialFactory.i
%include FourierSeriesFactory.i
%include HaarWaveletFactory.i
-%include OrthogonalProductFunctionFactory.i
%include SoizeGhanemFactory.i
/* At last we include template definitions */
diff --git a/python/src/viewer.py b/python/src/viewer.py
index 9f68bc81ad..2a3b0bdb75 100644
--- a/python/src/viewer.py
+++ b/python/src/viewer.py
@@ -596,9 +596,12 @@ def __init__(
elif drawableKind == "Contour":
contour = drawable.getImplementation()
X, Y = np.meshgrid(drawable.getX(), drawable.getY())
+ # X values move with columns
+ # Y values move with rows
+ # Z shape is therefore (getY().getSize(), getX().getSize())
Z = np.reshape(
drawable.getData(),
- (drawable.getX().getSize(), drawable.getY().getSize()),
+ (drawable.getY().getSize(), drawable.getX().getSize()),
)
if len(drawable.getLevels()) > 0:
contour_kw.setdefault("levels", drawable.getLevels())
diff --git a/python/test/CMakeLists.txt b/python/test/CMakeLists.txt
index e7ec0656bc..b4f5da4ee9 100644
--- a/python/test/CMakeLists.txt
+++ b/python/test/CMakeLists.txt
@@ -713,6 +713,7 @@ ot_pyinstallcheck_test (OrthogonalUniVariatePolynomialFunctionFactory_std)
ot_pyinstallcheck_test (FourierSeriesFactory_std)
ot_pyinstallcheck_test (HaarWaveletFactory_std)
ot_pyinstallcheck_test (OrthogonalProductPolynomialFactory_std)
+ot_pyinstallcheck_test (OrthogonalProductFunctionFactory_std)
## Algorithm
ot_pyinstallcheck_test (TaylorExpansionMoments_std)
@@ -757,12 +758,19 @@ ot_pyinstallcheck_test (KrigingRandomVector_std)
ot_pyinstallcheck_test (MetaModelAlgorithm_std)
ot_pyinstallcheck_test (MetaModelValidation_std)
ot_pyinstallcheck_test (GeneralLinearModelAlgorithm_std)
+ot_pyinstallcheck_test (GaussianProcessFitter_std IGNOREOUT)
+ot_pyinstallcheck_test (GaussianProcessRegression_std IGNOREOUT)
+ot_pyinstallcheck_test (GaussianProcessConditionalCovariance_std IGNOREOUT)
if (HMAT_FOUND)
ot_pyinstallcheck_test (KrigingAlgorithm_std_hmat)
ot_pyinstallcheck_test (GeneralLinearModelAlgorithm_std_hmat)
+ ot_pyinstallcheck_test (GaussianProcessFitter_std_hmat IGNOREOUT)
+ ot_pyinstallcheck_test (GaussianProcessRegression_std_hmat IGNOREOUT)
endif ()
if (NLopt_FOUND)
ot_pyinstallcheck_test (GeneralLinearModelAlgorithm_nlopt IGNOREOUT)
+ ot_pyinstallcheck_test (GaussianProcessFitter_nlopt IGNOREOUT)
+ ot_pyinstallcheck_test (GaussianProcessRegression_nlopt IGNOREOUT)
endif ()
ot_pyinstallcheck_test (LinearModelAlgorithm_std)
ot_pyinstallcheck_test (LinearModelAnalysis_std)
diff --git a/python/test/t_GaussianProcessConditionalCovariance_std.py b/python/test/t_GaussianProcessConditionalCovariance_std.py
new file mode 100644
index 0000000000..8f5257d9ae
--- /dev/null
+++ b/python/test/t_GaussianProcessConditionalCovariance_std.py
@@ -0,0 +1,289 @@
+#! /usr/bin/env python
+
+import openturns as ot
+from openturns.experimental import GaussianProcessRegression, GaussianProcessFitter, GaussianProcessConditionalCovariance
+import openturns.testing as ott
+
+ot.TESTPREAMBLE()
+ot.ResourceMap.SetAsUnsignedInteger("OptimizationAlgorithm-DefaultMaximumCallsNumber", 20000)
+ot.ResourceMap.SetAsScalar("Cobyla-DefaultRhoBeg", 0.5)
+ot.ResourceMap.SetAsScalar("OptimizationAlgorithm-DefaultMaximumAbsoluteError", 1e-8)
+
+
+# Test 1
+def test_one_input_one_output():
+ sampleSize = 6
+ dimension = 1
+
+ f = ot.SymbolicFunction(["x0"], ["x0 * sin(x0)"])
+
+ X = ot.Sample(sampleSize, dimension)
+ X2 = ot.Sample(sampleSize, dimension)
+ for i in range(sampleSize):
+ X[i, 0] = 3.0 + i
+ X2[i, 0] = 2.5 + i
+ X[0, 0] = 1.0
+ X[1, 0] = 3.0
+ X2[0, 0] = 2.0
+ X2[1, 0] = 4.0
+ Y = f(X)
+
+ # create covariance model
+ basis = ot.ConstantBasisFactory(dimension).build()
+ covarianceModel = ot.SquaredExponential()
+
+ # create algorithm
+ fit_algo = GaussianProcessFitter(X, Y, covarianceModel, basis)
+
+ # set sensible optimization bounds and estimate hyper parameters
+ fit_algo.setOptimizationBounds(ot.Interval(X.getMin(), X.getMax()))
+ fit_algo.run()
+
+ # perform an evaluation
+ fit_result = fit_algo.getResult()
+
+ algo = GaussianProcessRegression(fit_result)
+ algo.run()
+ result = algo.getResult()
+ gccc = GaussianProcessConditionalCovariance(result)
+
+ mean = gccc.getConditionalMean(X)
+ ott.assert_almost_equal(mean, Y, 0.0, 1e-13)
+
+ covariance = gccc.getConditionalCovariance(X)
+ nullMatrix = ot.Matrix(sampleSize, sampleSize)
+ ott.assert_almost_equal(covariance, nullMatrix, 0.0, 1e-13)
+
+ # Covariance per marginal & extract variance component
+ coll = gccc.getConditionalMarginalCovariance(X)
+ var = [mat[0, 0] for mat in coll]
+ ott.assert_almost_equal(var, [0] * sampleSize, 1e-14, 1e-13)
+
+ # Variance per marginal
+ var = gccc.getConditionalMarginalVariance(X)
+ ott.assert_almost_equal(var, ot.Sample(sampleSize, 1), 1e-14, 1e-13)
+
+ # Kriging variance is non-null on validation points
+ validCovariance = gccc.getConditionalCovariance(X2)
+ values = ot.Matrix(
+ [
+ [0.81942182, -0.35599947, -0.17488593, 0.04622401, -0.03143555, 0.04054783],
+ [-0.35599947, 0.20874735, 0.10943841, -0.03236419, 0.02397483, -0.03269184],
+ [-0.17488593, 0.10943841, 0.05832917, -0.01779918, 0.01355719, -0.01891618],
+ [0.04622401, -0.03236419, -0.01779918, 0.00578327, -0.00467674, 0.00688697],
+ [-0.03143555, 0.02397483, 0.01355719, -0.00467674, 0.0040267, -0.00631173],
+ [0.04054783, -0.03269184, -0.01891618, 0.00688697, -0.00631173, 0.01059488],
+ ]
+ )
+ ott.assert_almost_equal(validCovariance - values, nullMatrix, 1.e-5, 1e-6)
+
+
+# Test 2
+def test_two_inputs_one_output():
+
+ inputDimension = 2
+ # Learning data
+ levels = [8, 5]
+ box = ot.Box(levels)
+ inputSample = box.generate()
+ # Scale each direction
+ inputSample *= 10.0
+
+ model = ot.SymbolicFunction(["x", "y"], ["cos(0.5*x) + sin(y)"])
+ outputSample = model(inputSample)
+
+ # 2) Definition of exponential model
+ # The parameters have been calibrated using TNC optimization
+ # and AbsoluteExponential models
+ scale = [5.33532, 2.61534]
+ amplitude = [1.61536]
+ covarianceModel = ot.SquaredExponential(scale, amplitude)
+
+ # 3) Basis definition
+ basis = ot.ConstantBasisFactory(inputDimension).build()
+
+ # 4) GPF algorithm
+ fit_algo = GaussianProcessFitter(inputSample, outputSample, covarianceModel, basis)
+ # set sensible optimization bounds and estimate hyper parameters
+ fit_algo.setOptimizationBounds(ot.Interval(inputSample.getMin(), inputSample.getMax()))
+ fit_algo.run()
+
+ # perform an evaluation
+ fit_result = fit_algo.getResult()
+ # Regression algorithm
+ algo = GaussianProcessRegression(fit_result)
+ algo.run()
+ result = algo.getResult()
+
+ # 5) Kriging variance is 0 on learning points
+ gccc = GaussianProcessConditionalCovariance(result)
+ covariance = gccc.getConditionalCovariance(inputSample)
+
+ mean = gccc.getConditionalMean(inputSample)
+ ott.assert_almost_equal(mean, outputSample, 1e-5, 1e-8)
+
+ ott.assert_almost_equal(covariance, ot.SquareMatrix(len(inputSample)), 7e-7, 7e-7)
+
+ # Covariance per marginal & extract variance component
+ coll = gccc.getConditionalMarginalCovariance(inputSample)
+ var = [mat[0, 0] for mat in coll]
+ ott.assert_almost_equal(var, [0] * len(var), 0.0, 1e-13)
+
+ # Variance per marginal
+ var = gccc.getConditionalMarginalVariance(inputSample)
+ ott.assert_almost_equal(var, ot.Sample(inputSample.getSize(), 1), 0.0, 1e-13)
+
+
+def test_two_outputs():
+ f = ot.SymbolicFunction(["x"], ["x * sin(x)", "x * cos(x)"])
+ sampleX = ot.Sample([[1.0], [2.0], [3.0], [4.0], [5.0], [6.0], [7.0], [8.0]])
+ sampleY = f(sampleX)
+ # Build a basis phi from R --> R^2
+ # phi_{0,0} = phi_{0,1} = x
+ # phi_{1,0} = phi_{1,1} = x^2
+ phi0 = ot.AggregatedFunction(
+ [ot.SymbolicFunction(["x"], ["x"]), ot.SymbolicFunction(["x"], ["x"])]
+ )
+ phi1 = ot.AggregatedFunction(
+ [ot.SymbolicFunction(["x"], ["x^2"]), ot.SymbolicFunction(["x"], ["x^2"])]
+ )
+ basis = ot.Basis([phi0, phi1])
+ covarianceModel = ot.SquaredExponential([1.0])
+ covarianceModel.setActiveParameter([])
+ covarianceModel = ot.TensorizedCovarianceModel([covarianceModel] * 2)
+
+ fit_algo = GaussianProcessFitter(sampleX, sampleY, covarianceModel, basis)
+ # set sensible optimization bounds and estimate hyper parameters
+ fit_algo.run()
+
+ # perform an evaluation
+ fit_result = fit_algo.getResult()
+ algo = GaussianProcessRegression(fit_result)
+ algo.run()
+ result = algo.getResult()
+ gccc = GaussianProcessConditionalCovariance(result)
+
+ mean = gccc.getConditionalMean(sampleX)
+ ott.assert_almost_equal(mean, sampleY, 1e-5, 1e-8)
+
+ # Check the conditional covariance
+ reference_covariance = ot.Matrix(
+ [
+ [4.4527, 0.0, 8.34404, 0.0],
+ [0.0, 2.8883, 0.0, 5.41246],
+ [8.34404, 0.0, 15.7824, 0.0],
+ [0.0, 5.41246, 0.0, 10.2375],
+ ]
+ )
+ ott.assert_almost_equal(
+ gccc([[9.5], [10.0]]).getCovariance() - reference_covariance,
+ ot.Matrix(4, 4),
+ 0.0,
+ 2e-2,
+ )
+
+ marginalVariance_0 = gccc.getConditionalMarginalVariance(sampleX, 0)
+ ott.assert_almost_equal(marginalVariance_0, ot.Sample(len(sampleX), 1), 0., 1e-6)
+ marginalVariance_1 = gccc.getConditionalMarginalVariance(sampleX, 1)
+ ott.assert_almost_equal(marginalVariance_1, ot.Sample(len(sampleX), 1), 0., 1e-6)
+
+ # Marginal variance on a specific point
+ x = [1.1]
+ covTest = gccc.getConditionalMarginalCovariance(x)
+ ref_cov_test = ot.Matrix(
+ [
+ [0.000205032, 2.28332e-20],
+ [2.28332e-20, 0.000133002],
+ ]
+ )
+ margVarTest_0 = gccc.getConditionalMarginalVariance(x, 0)
+ margVarTest_1 = gccc.getConditionalMarginalVariance(x, 1)
+ ott.assert_almost_equal(covTest[0, 0], margVarTest_0, 0., 0)
+ ott.assert_almost_equal(covTest[1, 1], margVarTest_1, 0., 0)
+ ott.assert_almost_equal(covTest, ref_cov_test, 1.e-6, 1e-6)
+
+
+def test_stationary_fun():
+ # fix https://github.com/openturns/openturns/issues/1861
+ ot.RandomGenerator.SetSeed(0)
+ rho = ot.SymbolicFunction("tau", "exp(-abs(tau))*cos(2*pi_*abs(tau))")
+ model = ot.StationaryFunctionalCovarianceModel([1], [1], rho)
+ x = ot.Normal().getSample(20)
+ x.setDescription(["J0"])
+ y = x + ot.Normal(0, 0.1).getSample(20)
+ y.setDescription(["G0"])
+
+ fit_algo = GaussianProcessFitter(x, y, model, ot.LinearBasisFactory().build())
+ # set sensible optimization bounds and estimate hyper parameters
+ fit_algo.run()
+
+ # perform an evaluation
+ fit_result = fit_algo.getResult()
+ algo = GaussianProcessRegression(fit_result)
+
+ algo.run()
+ result = algo.getResult()
+
+ gccc = GaussianProcessConditionalCovariance(result)
+ variance = gccc.getConditionalMarginalVariance(x)
+ ott.assert_almost_equal(variance, ot.Sample(len(x), 1), 1e-15, 1e-15)
+
+
+def test_gpr_no_opt():
+ sampleSize = 6
+ dimension = 1
+
+ f = ot.SymbolicFunction(["x0"], ["x0 * sin(x0)"])
+
+ X = ot.Sample(sampleSize, dimension)
+ X2 = ot.Sample(sampleSize, dimension)
+ for i in range(sampleSize):
+ X[i, 0] = 3.0 + i
+ X2[i, 0] = 2.5 + i
+ X[0, 0] = 1.0
+ X[1, 0] = 3.0
+ X2[0, 0] = 2.0
+ X2[1, 0] = 4.0
+ Y = f(X)
+
+ # create covariance model
+ covarianceModel = ot.SquaredExponential([1.6326932047296538], [4.895995962015954])
+ trend_function = ot.SymbolicFunction("x", "1.49543")
+ # GPR (comparable with test_one_input_one_output)
+ algo = GaussianProcessRegression(X, Y, covarianceModel, trend_function)
+ algo.run()
+ result = algo.getResult()
+ gccc = GaussianProcessConditionalCovariance(result)
+
+ mean = gccc.getConditionalMean(X)
+ ott.assert_almost_equal(mean, Y, 1e-5, 1e-8)
+
+ covariance = gccc.getConditionalCovariance(X)
+ nullMatrix = ot.Matrix(sampleSize, sampleSize)
+ ott.assert_almost_equal(covariance, nullMatrix, 0.0, 1e-13)
+
+ variance = gccc.getConditionalMarginalVariance(X)
+ ott.assert_almost_equal(variance, ot.Sample(len(X), 1), 1e-14, 1e-14)
+
+ # Kriging variance is non-null on validation points
+ validCovariance = gccc.getConditionalCovariance(X2)
+
+ values = ot.Matrix(
+ [
+ [0.786400318519185, -0.342314710430317, -0.167625132016427, 0.0437937446519361, -0.0291542115306344, 0.0362074153614559],
+ [-0.342314710430317, 0.20307609313608, 0.106429376006901, -0.0313570361766278, 0.0230293899173111, -0.0308930847149105],
+ [-0.167625132016427, 0.106429376006901, 0.0567326538237296, -0.0172648099111221, 0.0130555631357385, -0.0179618049872801],
+ [0.0437937446519361, -0.0313570361766278, -0.0172648099111221, 0.00560441404059731, -0.00450884121944028, 0.00656752917461922],
+ [-0.0291542115306344, 0.0230293899173111, 0.0130555631357385, -0.00450884121944028, 0.00386908619998749, -0.00601186391616793],
+ [0.0362074153614559, -0.0308930847149105, -0.0179618049872801, 0.00656752917461922, -0.00601186391616793, 0.0100243621895402]
+ ]
+ )
+ ott.assert_almost_equal(validCovariance - values, nullMatrix, 1.e-8, 1e-8)
+
+
+if __name__ == "__main__":
+ test_one_input_one_output()
+ test_two_inputs_one_output()
+ test_two_outputs()
+ test_stationary_fun()
+ test_gpr_no_opt()
diff --git a/python/test/t_GaussianProcessFitter_nlopt.py b/python/test/t_GaussianProcessFitter_nlopt.py
new file mode 100644
index 0000000000..8f30e9c093
--- /dev/null
+++ b/python/test/t_GaussianProcessFitter_nlopt.py
@@ -0,0 +1,43 @@
+#! /usr/bin/env python
+
+import openturns as ot
+from openturns.experimental import GaussianProcessFitter
+import openturns.testing as ott
+
+ot.TESTPREAMBLE()
+
+# Set precision
+ot.PlatformInfo.SetNumericalPrecision(3)
+
+# Calibration of default optimizer
+ot.ResourceMap.SetAsScalar(
+ "GaussianProcessFitter-DefaultOptimizationLowerBound", 1.0e-5
+)
+ot.ResourceMap.SetAsScalar(
+ "GaussianProcessFitter-DefaultOptimizationUpperBound", 100
+)
+# Data & estimation
+inputDimension = 1
+X = ot.Normal().getSample(100)
+X = X.sortAccordingToAComponent(0)
+covarianceModel = ot.SquaredExponential([1.0], [1.0])
+model = ot.SymbolicFunction(["x"], ["x - 0.6 * cos(x/3)"])
+Y = model(X)
+basis = ot.QuadraticBasisFactory(inputDimension).build()
+algo = GaussianProcessFitter(X, Y, covarianceModel, basis)
+algo.setKeepCholeskyFactor(False)
+algo.setOptimizationAlgorithm(ot.NLopt("LN_NELDERMEAD"))
+algo.run()
+
+# perform an evaluation
+result = algo.getResult()
+metaModel = result.getMetaModel()
+conditionalCovariance = result.getCovarianceModel()
+residual = metaModel(X) - Y
+ott.assert_almost_equal(residual.computeCentralMoment(2), [
+ 1.06e-05], 1e-5, 1e-5)
+ott.assert_almost_equal(
+ conditionalCovariance.getParameter(), [0.619144, 0.000937], 5e-3, 1e-3
+)
+likelihood = algo.getObjectiveFunction()
+assert likelihood.getInputDimension() == 1, "likelihood dim"
diff --git a/python/test/t_GaussianProcessFitter_std.py b/python/test/t_GaussianProcessFitter_std.py
new file mode 100644
index 0000000000..e8fe69a080
--- /dev/null
+++ b/python/test/t_GaussianProcessFitter_std.py
@@ -0,0 +1,160 @@
+import openturns as ot
+from openturns.experimental import GaussianProcessFitter
+import openturns.testing as ott
+
+ot.PlatformInfo.SetNumericalPrecision(4)
+
+
+ot.TESTPREAMBLE()
+
+
+def use_case_1(X, Y):
+ """
+ optim problem (scale)
+ Dirac model
+ """
+ basis = ot.LinearBasisFactory(inputDimension).build()
+ # Case of a misspecified covariance model
+ covarianceModel = ot.DiracCovarianceModel(inputDimension)
+ algo = GaussianProcessFitter(X, Y, covarianceModel, basis)
+ assert algo.getOptimizeParameters()
+ assert algo.getKeepCholeskyFactor()
+ algo.setKeepCholeskyFactor(False)
+ algo.run()
+ cov_amplitude = [0.19575]
+ trend_coefficients = [-0.1109, 1.0149]
+ result = algo.getResult()
+ ott.assert_almost_equal(result.getCovarianceModel().getAmplitude(), cov_amplitude, 1e-4, 1e-4)
+ ott.assert_almost_equal(result.getTrendCoefficients(), trend_coefficients, 1e-4, 1e-4)
+
+
+def use_case_2(X, Y):
+ """
+ No optim with Dirac model
+ """
+ basis = ot.LinearBasisFactory(inputDimension).build()
+ # Case of a misspecified covariance model
+ covarianceModel = ot.DiracCovarianceModel(inputDimension)
+ algo = GaussianProcessFitter(X, Y, covarianceModel, basis)
+ assert algo.getKeepCholeskyFactor()
+ algo.setKeepCholeskyFactor(False)
+ algo.setOptimizeParameters(False)
+ algo.run()
+ cov_amplitude = [1]
+ trend_coefficients = [-0.1109, 1.01498]
+ result = algo.getResult()
+ ott.assert_almost_equal(result.getCovarianceModel().getAmplitude(), cov_amplitude, 1e-4, 1e-4)
+ ott.assert_almost_equal(result.getTrendCoefficients(), trend_coefficients, 1e-4, 1e-4)
+
+
+def use_case_3(X, Y):
+ """
+ full optim problem (scale)
+ analytical variance estimate
+ """
+ basis = ot.LinearBasisFactory(inputDimension).build()
+ # Case of a misspecified covariance model
+ covarianceModel = ot.AbsoluteExponential(inputDimension)
+ algo = GaussianProcessFitter(X, Y, covarianceModel, basis)
+ assert algo.getOptimizeParameters()
+ algo.setKeepCholeskyFactor(False)
+ algo.run()
+ cov_param = [0.1327, 0.1956]
+ trend_coefficients = [-0.1034, 1.0141]
+ result = algo.getResult()
+ assert algo.getOptimizationAlgorithm().getImplementation().getClassName() == "Cobyla"
+ ott.assert_almost_equal(result.getCovarianceModel().getParameter(), cov_param, 1e-4, 1e-4)
+ ott.assert_almost_equal(result.getTrendCoefficients(), trend_coefficients, 1e-4, 1e-4)
+
+
+def use_case_4(X, Y):
+ """
+ optim problem (scale)
+ Biased variance estimate
+ """
+ ot.ResourceMap.SetAsBool("GeneralLinearModelAlgorithm-UnbiasedVariance", False)
+ basis = ot.LinearBasisFactory(inputDimension).build()
+ # Case of a misspecified covariance model
+ covarianceModel = ot.AbsoluteExponential(inputDimension)
+ algo = GaussianProcessFitter(X, Y, covarianceModel, basis)
+ assert algo.getKeepCholeskyFactor()
+ assert algo.getOptimizeParameters()
+ algo.setKeepCholeskyFactor(False)
+ algo.run()
+ cov_param = [0.1327, 0.1956]
+ trend_coefficients = [-0.1034, 1.0141]
+ result = algo.getResult()
+ assert algo.getOptimizationAlgorithm().getImplementation().getClassName() == "Cobyla"
+ ott.assert_almost_equal(result.getCovarianceModel().getParameter(), cov_param, 1e-4, 1e-4)
+ ott.assert_almost_equal(result.getTrendCoefficients(), trend_coefficients, 1e-4, 1e-4)
+
+
+def use_case_5(X, Y):
+ """
+ full optim problem (scale, amplitude)
+ """
+ ot.ResourceMap.SetAsBool("GaussianProcessFitter-UnbiasedVariance", False)
+ ot.ResourceMap.SetAsBool(
+ "GaussianProcessFitter-UseAnalyticalAmplitudeEstimate", False
+ )
+ basis = ot.LinearBasisFactory(inputDimension).build()
+ # Case of a misspecified covariance model
+ covarianceModel = ot.AbsoluteExponential(inputDimension)
+
+ algo = GaussianProcessFitter(X, Y, covarianceModel, basis)
+ assert algo.getKeepCholeskyFactor()
+ assert algo.getOptimizeParameters()
+ algo.setKeepCholeskyFactor(False)
+ bounds = ot.Interval([1e-2] * 2, [100] * 2)
+ algo.setOptimizationBounds(bounds)
+ algo.run()
+
+ cov_param = [0.1327, 0.19068]
+ trend_coefficients = [-0.1034, 1.0141]
+ result = algo.getResult()
+ assert algo.getOptimizationAlgorithm().getImplementation().getClassName() == "Cobyla"
+ ott.assert_almost_equal(result.getCovarianceModel().getParameter(), cov_param, 1e-4, 1e-4)
+ ott.assert_almost_equal(result.getTrendCoefficients(), trend_coefficients, 1e-4, 1e-4)
+
+
+def use_case_6(X, Y):
+ ot.RandomGenerator.SetSeed(0)
+ covarianceModel = ot.AbsoluteExponential()
+ algo = GaussianProcessFitter(X, Y, covarianceModel)
+ assert algo.getKeepCholeskyFactor()
+ algo.setKeepCholeskyFactor(False)
+ assert algo.getOptimizeParameters()
+ algo.run()
+ result = algo.getResult()
+ cov_param = [15.6, 2.3680]
+ assert algo.getOptimizationAlgorithm().getImplementation().getClassName() == "Cobyla"
+ ott.assert_almost_equal(result.getCovarianceModel().getParameter(), cov_param, 1e-4, 1e-4)
+ ott.assert_almost_equal(result.getTrendCoefficients(), [])
+
+
+if __name__ == "__main__":
+
+ ot.RandomGenerator.SetSeed(0)
+ sampleSize = 40
+ inputDimension = 1
+
+ # Create the function to estimate
+ model = ot.SymbolicFunction(["x0"], ["x0"])
+
+ X = ot.Sample(sampleSize, inputDimension)
+ for i in range(sampleSize):
+ X[i, 0] = 3.0 + (8.0 * i) / sampleSize
+ Y = model(X)
+
+ # Add a small noise to data
+ Y += (
+ ot.GaussianProcess(ot.AbsoluteExponential([0.1], [0.2]), ot.Mesh(X))
+ .getRealization()
+ .getValues()
+ )
+ use_case_1(X, Y)
+ use_case_2(X, Y)
+ use_case_3(X, Y)
+ use_case_4(X, Y)
+ use_case_5(X, Y)
+ use_case_6(X, Y)
diff --git a/python/test/t_GaussianProcessFitter_std_hmat.py b/python/test/t_GaussianProcessFitter_std_hmat.py
new file mode 100644
index 0000000000..6157aceabe
--- /dev/null
+++ b/python/test/t_GaussianProcessFitter_std_hmat.py
@@ -0,0 +1,46 @@
+import openturns as ot
+from openturns.experimental import GaussianProcessFitter
+from openturns.testing import assert_almost_equal
+
+ot.TESTPREAMBLE()
+
+# Set precision
+ot.PlatformInfo.SetNumericalPrecision(3)
+ot.ResourceMap.Set("GaussianProcessFitter-LinearAlgebra", "HMAT")
+
+sampleSize = 6
+inputDimension = 1
+
+# Create the function to estimate
+input_description = ["x0"]
+formulas = ["x0"]
+model = ot.SymbolicFunction(input_description, formulas)
+
+X = ot.Sample(sampleSize, inputDimension)
+X2 = ot.Sample(sampleSize, inputDimension)
+for i in range(sampleSize):
+ X[i, 0] = 3.0 + i
+ X2[i, 0] = 2.5 + i
+X[0, 0] = 1.0
+X[1, 0] = 3.0
+X2[0, 0] = 2.0
+X2[1, 0] = 4.0
+Y = model(X)
+# Data validation
+Y2 = model(X2)
+for i in range(sampleSize):
+ # Add a small noise to data
+ Y[i, 0] += 0.01 * ot.DistFunc.rNormal()
+
+basis = ot.LinearBasisFactory(inputDimension).build()
+covarianceModel = ot.DiracCovarianceModel(inputDimension)
+algo = GaussianProcessFitter(X, Y, covarianceModel, basis)
+algo.setKeepCholeskyFactor(True)
+algo.run()
+
+# perform an evaluation
+result = algo.getResult()
+metaModel = result.getMetaModel()
+conditionalCovariance = result.getCovarianceModel()
+residual = metaModel(X) - Y
+assert_almost_equal(residual.computeCentralMoment(2), [0.00013144], 1e-5, 1e-5)
diff --git a/python/test/t_GaussianProcessRegression_nlopt.py b/python/test/t_GaussianProcessRegression_nlopt.py
new file mode 100644
index 0000000000..c7af374901
--- /dev/null
+++ b/python/test/t_GaussianProcessRegression_nlopt.py
@@ -0,0 +1,167 @@
+#! /usr/bin/env python
+
+import openturns as ot
+from openturns.experimental import GaussianProcessRegression, GaussianProcessFitter
+import openturns.testing as ott
+
+
+# Test 1
+def test_one_input_one_output():
+ sampleSize = 6
+ dimension = 1
+
+ f = ot.SymbolicFunction(["x0"], ["x0 * sin(x0)"])
+
+ X = ot.Sample(sampleSize, dimension)
+ for i in range(sampleSize):
+ X[i, 0] = 3.0 + i
+ X[0, 0] = 1.0
+ X[1, 0] = 3.0
+ Y = f(X)
+
+ # create covariance model
+ basis = ot.ConstantBasisFactory(dimension).build()
+ covarianceModel = ot.SquaredExponential()
+
+ # create algorithm
+ fit_algo = GaussianProcessFitter(X, Y, covarianceModel, basis)
+
+ # set sensible optimization bounds and estimate hyper parameters
+ fit_algo.setOptimizationBounds(ot.Interval(X.getMin(), X.getMax()))
+ fit_algo.setOptimizationAlgorithm(ot.NLopt("LN_NELDERMEAD"))
+ fit_algo.run()
+
+ # perform an evaluation
+ fit_result = fit_algo.getResult()
+
+ algo = GaussianProcessRegression(fit_result)
+ algo.run()
+ result = algo.getResult()
+ ott.assert_almost_equal(result.getMetaModel()(X), Y)
+ ott.assert_almost_equal(result.getResiduals(), [1.32804e-07], 1e-3, 1e-3)
+ ott.assert_almost_equal(result.getRelativeErrors(), [5.20873e-21])
+
+ # Prediction accuracy
+ ott.assert_almost_equal(result.getMetaModel()([7.5]), f([7.5]), 0.3, 0.0)
+
+
+# Test 2
+def test_two_inputs_one_output():
+
+ inputDimension = 2
+ # Learning data
+ levels = [8, 5]
+ box = ot.Box(levels)
+ inputSample = box.generate()
+ # Scale each direction
+ inputSample *= 10.0
+
+ model = ot.SymbolicFunction(["x", "y"], ["cos(0.5*x) + sin(y)"])
+ outputSample = model(inputSample)
+
+ # Validation
+ sampleSize = 10
+ inputValidSample = ot.JointDistribution(2 * [ot.Uniform(0, 10.0)]).getSample(
+ sampleSize
+ )
+ outputValidSample = model(inputValidSample)
+
+ # 2) Definition of exponential model
+ # The parameters have been calibrated using TNC optimization
+ # and AbsoluteExponential models
+ scale = [5.33532, 2.61534]
+ amplitude = [1.61536]
+ covarianceModel = ot.SquaredExponential(scale, amplitude)
+
+ # 3) Basis definition
+ basis = ot.ConstantBasisFactory(inputDimension).build()
+
+ # 4) GPF algorithm
+ fit_algo = GaussianProcessFitter(inputSample, outputSample, covarianceModel, basis)
+ # set sensible optimization bounds and estimate hyper parameters
+ fit_algo.setOptimizationBounds(ot.Interval(inputSample.getMin(), inputSample.getMax()))
+ fit_algo.setOptimizationAlgorithm(ot.NLopt("LN_NELDERMEAD"))
+ fit_algo.run()
+
+ # perform an evaluation
+ fit_result = fit_algo.getResult()
+ # Regression algorithm
+ algo = GaussianProcessRegression(fit_result)
+ algo.run()
+
+ result = algo.getResult()
+ # Get meta model
+ metaModel = result.getMetaModel()
+ outData = metaModel(inputValidSample)
+
+ # 5) Errors
+ # Interpolation
+ ott.assert_almost_equal(outputSample, metaModel(inputSample), 3.0e-5, 3.0e-5)
+
+ # Prediction
+ ott.assert_almost_equal(outputValidSample, outData, 1.0e-1, 1e-1)
+
+
+def test_two_outputs():
+ f = ot.SymbolicFunction(["x"], ["x * sin(x)", "x * cos(x)"])
+ sampleX = ot.Sample([[1.0], [2.0], [3.0], [4.0], [5.0], [6.0], [7.0], [8.0]])
+ sampleY = f(sampleX)
+ # Build a basis phi from R --> R^2
+ # phi_{0,0} = phi_{0,1} = x
+ # phi_{1,0} = phi_{1,1} = x^2
+ phi0 = ot.AggregatedFunction(
+ [ot.SymbolicFunction(["x"], ["x"]), ot.SymbolicFunction(["x"], ["x"])]
+ )
+ phi1 = ot.AggregatedFunction(
+ [ot.SymbolicFunction(["x"], ["x^2"]), ot.SymbolicFunction(["x"], ["x^2"])]
+ )
+ basis = ot.Basis([phi0, phi1])
+ covarianceModel = ot.SquaredExponential([1.0])
+ covarianceModel.setActiveParameter([])
+ covarianceModel = ot.TensorizedCovarianceModel([covarianceModel] * 2)
+
+ fit_algo = GaussianProcessFitter(sampleX, sampleY, covarianceModel, basis)
+ # set sensible optimization bounds and estimate hyper parameters
+ fit_algo.setOptimizationAlgorithm(ot.NLopt("LN_NELDERMEAD"))
+ fit_algo.run()
+
+ # perform an evaluation
+ fit_result = fit_algo.getResult()
+ algo = GaussianProcessRegression(fit_result)
+ algo.run()
+ result = algo.getResult()
+ mm = result.getMetaModel()
+ assert mm.getOutputDimension() == 2, "wrong output dim"
+ ott.assert_almost_equal(mm([5.5]), [-3.88363, 3.90286])
+
+
+def test_stationary_fun():
+ # fix https://github.com/openturns/openturns/issues/1861
+ ot.RandomGenerator.SetSeed(0)
+ rho = ot.SymbolicFunction("tau", "exp(-abs(tau))*cos(2*pi_*abs(tau))")
+ model = ot.StationaryFunctionalCovarianceModel([1], [1], rho)
+ x = ot.Normal().getSample(20)
+ x.setDescription(["J0"])
+ y = x + ot.Normal(0, 0.1).getSample(20)
+ y.setDescription(["G0"])
+
+ fit_algo = GaussianProcessFitter(x, y, model, ot.LinearBasisFactory().build())
+ # set sensible optimization bounds and estimate hyper parameters
+ fit_algo.setOptimizationAlgorithm(ot.NLopt("LN_NELDERMEAD"))
+ fit_algo.run()
+
+ # perform an evaluation
+ fit_result = fit_algo.getResult()
+ algo = GaussianProcessRegression(fit_result)
+
+ algo.run()
+ result = algo.getResult()
+ mm = result.getMetaModel()
+ ott.assert_almost_equal(mm([5.5]), [5.58838])
+
+
+if __name__ == "__main__":
+ test_one_input_one_output()
+ test_two_inputs_one_output()
+ test_two_outputs()
+ test_stationary_fun()
diff --git a/python/test/t_GaussianProcessRegression_std.py b/python/test/t_GaussianProcessRegression_std.py
new file mode 100644
index 0000000000..a44ecdfe3c
--- /dev/null
+++ b/python/test/t_GaussianProcessRegression_std.py
@@ -0,0 +1,206 @@
+#! /usr/bin/env python
+
+import openturns as ot
+from openturns.experimental import GaussianProcessRegression, GaussianProcessFitter
+import openturns.testing as ott
+
+ot.TESTPREAMBLE()
+ot.ResourceMap.SetAsUnsignedInteger("OptimizationAlgorithm-DefaultMaximumCallsNumber", 20000)
+ot.ResourceMap.SetAsScalar("Cobyla-DefaultRhoBeg", 0.5)
+ot.ResourceMap.SetAsScalar("OptimizationAlgorithm-DefaultMaximumAbsoluteError", 1e-8)
+
+
+# Test 1
+def test_one_input_one_output():
+ sampleSize = 6
+ dimension = 1
+
+ f = ot.SymbolicFunction(["x0"], ["x0 * sin(x0)"])
+
+ X = ot.Sample(sampleSize, dimension)
+ X2 = ot.Sample(sampleSize, dimension)
+ for i in range(sampleSize):
+ X[i, 0] = 3.0 + i
+ X2[i, 0] = 2.5 + i
+ X[0, 0] = 1.0
+ X[1, 0] = 3.0
+ X2[0, 0] = 2.0
+ X2[1, 0] = 4.0
+ Y = f(X)
+ Y2 = f(X2)
+
+ # create covariance model
+ basis = ot.ConstantBasisFactory(dimension).build()
+ covarianceModel = ot.SquaredExponential()
+
+ # create algorithm
+ fit_algo = GaussianProcessFitter(X, Y, covarianceModel, basis)
+
+ # set sensible optimization bounds and estimate hyper parameters
+ fit_algo.setOptimizationBounds(ot.Interval(X.getMin(), X.getMax()))
+ fit_algo.run()
+
+ # perform an evaluation
+ fit_result = fit_algo.getResult()
+
+ algo = GaussianProcessRegression(fit_result)
+ algo.run()
+ result = algo.getResult()
+ ott.assert_almost_equal(result.getMetaModel()(X), Y)
+ ott.assert_almost_equal(result.getResiduals(), [1.32804e-07], 1e-3, 1e-3)
+ ott.assert_almost_equal(result.getRelativeErrors(), [5.20873e-21])
+
+ # Prediction accuracy
+ ott.assert_almost_equal(Y2, result.getMetaModel()(X2), 0.3, 0.0)
+
+
+# Test 2
+def test_two_inputs_one_output():
+
+ inputDimension = 2
+ # Learning data
+ levels = [8, 5]
+ box = ot.Box(levels)
+ inputSample = box.generate()
+ # Scale each direction
+ inputSample *= 10.0
+
+ model = ot.SymbolicFunction(["x", "y"], ["cos(0.5*x) + sin(y)"])
+ outputSample = model(inputSample)
+
+ # Validation
+ sampleSize = 10
+ inputValidSample = ot.JointDistribution(2 * [ot.Uniform(0, 10.0)]).getSample(
+ sampleSize
+ )
+ outputValidSample = model(inputValidSample)
+
+ # 2) Definition of exponential model
+ # The parameters have been calibrated using TNC optimization
+ # and AbsoluteExponential models
+ scale = [5.33532, 2.61534]
+ amplitude = [1.61536]
+ covarianceModel = ot.SquaredExponential(scale, amplitude)
+
+ # 3) Basis definition
+ basis = ot.ConstantBasisFactory(inputDimension).build()
+
+ # 4) GPF algorithm
+ fit_algo = GaussianProcessFitter(inputSample, outputSample, covarianceModel, basis)
+ # set sensible optimization bounds and estimate hyper parameters
+ fit_algo.setOptimizationBounds(ot.Interval(inputSample.getMin(), inputSample.getMax()))
+ fit_algo.run()
+
+ # perform an evaluation
+ fit_result = fit_algo.getResult()
+ # Regression algorithm
+ algo = GaussianProcessRegression(fit_result)
+ algo.run()
+
+ result = algo.getResult()
+ # Get meta model
+ metaModel = result.getMetaModel()
+ outData = metaModel(inputValidSample)
+
+ # 5) Errors
+ # Interpolation
+ ott.assert_almost_equal(outputSample, metaModel(inputSample), 3.0e-5, 3.0e-5)
+
+ # Prediction
+ ott.assert_almost_equal(outputValidSample, outData, 1.0e-1, 1e-1)
+
+
+def test_two_outputs():
+ f = ot.SymbolicFunction(["x"], ["x * sin(x)", "x * cos(x)"])
+ sampleX = ot.Sample([[1.0], [2.0], [3.0], [4.0], [5.0], [6.0], [7.0], [8.0]])
+ sampleY = f(sampleX)
+ # Build a basis phi from R --> R^2
+ # phi_{0,0} = phi_{0,1} = x
+ # phi_{1,0} = phi_{1,1} = x^2
+ phi0 = ot.AggregatedFunction(
+ [ot.SymbolicFunction(["x"], ["x"]), ot.SymbolicFunction(["x"], ["x"])]
+ )
+ phi1 = ot.AggregatedFunction(
+ [ot.SymbolicFunction(["x"], ["x^2"]), ot.SymbolicFunction(["x"], ["x^2"])]
+ )
+ basis = ot.Basis([phi0, phi1])
+ covarianceModel = ot.SquaredExponential([1.0])
+ covarianceModel.setActiveParameter([])
+ covarianceModel = ot.TensorizedCovarianceModel([covarianceModel] * 2)
+
+ fit_algo = GaussianProcessFitter(sampleX, sampleY, covarianceModel, basis)
+ # set sensible optimization bounds and estimate hyper parameters
+ fit_algo.run()
+
+ # perform an evaluation
+ fit_result = fit_algo.getResult()
+ algo = GaussianProcessRegression(fit_result)
+ algo.run()
+ result = algo.getResult()
+ mm = result.getMetaModel()
+ assert mm.getOutputDimension() == 2, "wrong output dim"
+ ott.assert_almost_equal(mm([5.5]), [-3.88368, 3.90286])
+
+
+def test_stationary_fun():
+ # fix https://github.com/openturns/openturns/issues/1861
+ ot.RandomGenerator.SetSeed(0)
+ rho = ot.SymbolicFunction("tau", "exp(-abs(tau))*cos(2*pi_*abs(tau))")
+ model = ot.StationaryFunctionalCovarianceModel([1], [1], rho)
+ x = ot.Normal().getSample(20)
+ x.setDescription(["J0"])
+ y = x + ot.Normal(0, 0.1).getSample(20)
+ y.setDescription(["G0"])
+
+ fit_algo = GaussianProcessFitter(x, y, model, ot.LinearBasisFactory().build())
+ # set sensible optimization bounds and estimate hyper parameters
+ fit_algo.run()
+
+ # perform an evaluation
+ fit_result = fit_algo.getResult()
+ algo = GaussianProcessRegression(fit_result)
+
+ algo.run()
+ result = algo.getResult()
+ mm = result.getMetaModel()
+ ott.assert_almost_equal(mm([5.5]), [5.58283])
+
+
+def test_gpr_no_opt():
+ sampleSize = 6
+ dimension = 1
+
+ f = ot.SymbolicFunction(["x0"], ["x0 * sin(x0)"])
+
+ X = ot.Sample(sampleSize, dimension)
+ X2 = ot.Sample(sampleSize, dimension)
+ for i in range(sampleSize):
+ X[i, 0] = 3.0 + i
+ X2[i, 0] = 2.5 + i
+ X[0, 0] = 1.0
+ X[1, 0] = 3.0
+ X2[0, 0] = 2.0
+ X2[1, 0] = 4.0
+ Y = f(X)
+ Y2 = f(X2)
+
+ # create covariance model
+ covarianceModel = ot.SquaredExponential([1.6326932047296538], [4.895995962015954])
+ trend_function = ot.SymbolicFunction("x", "1.49543")
+ # GPR (comparable with test_one_input_one_output)
+ algo = GaussianProcessRegression(X, Y, covarianceModel, trend_function)
+ algo.run()
+ result = algo.getResult()
+ ott.assert_almost_equal(result.getMetaModel()(X), Y)
+ ott.assert_almost_equal(result.getResiduals(), [1.32804e-07], 1e-3, 1e-3)
+ ott.assert_almost_equal(result.getRelativeErrors(), [5.20873e-21])
+ # Prediction accuracy
+ ott.assert_almost_equal(Y2, result.getMetaModel()(X2), 0.3, 0.0)
+
+
+if __name__ == "__main__":
+ test_one_input_one_output()
+ test_two_inputs_one_output()
+ test_two_outputs()
+ test_stationary_fun()
+ test_gpr_no_opt()
diff --git a/python/test/t_GaussianProcessRegression_std_hmat.py b/python/test/t_GaussianProcessRegression_std_hmat.py
new file mode 100644
index 0000000000..56bce9cce7
--- /dev/null
+++ b/python/test/t_GaussianProcessRegression_std_hmat.py
@@ -0,0 +1,212 @@
+#! /usr/bin/env python
+
+import openturns as ot
+from openturns.experimental import GaussianProcessRegression, GaussianProcessFitter
+import openturns.testing as ott
+
+ot.TESTPREAMBLE()
+ot.ResourceMap.SetAsUnsignedInteger("OptimizationAlgorithm-DefaultMaximumCallsNumber", 20000)
+ot.ResourceMap.SetAsScalar("Cobyla-DefaultRhoBeg", 0.5)
+ot.ResourceMap.SetAsScalar("OptimizationAlgorithm-DefaultMaximumAbsoluteError", 1e-8)
+
+ot.PlatformInfo.SetNumericalPrecision(3)
+ot.ResourceMap.SetAsString("GaussianProcessFitter-LinearAlgebra", "HMAT")
+ot.ResourceMap.SetAsScalar("HMatrix-RegularizationEpsilon", 1e-7)
+
+
+# Test 1
+def test_one_input_one_output():
+ sampleSize = 6
+ dimension = 1
+
+ f = ot.SymbolicFunction(["x0"], ["x0 * sin(x0)"])
+
+ X = ot.Sample(sampleSize, dimension)
+ X2 = ot.Sample(sampleSize, dimension)
+ for i in range(sampleSize):
+ X[i, 0] = 3.0 + i
+ X2[i, 0] = 2.5 + i
+ X[0, 0] = 1.0
+ X[1, 0] = 3.0
+ X2[0, 0] = 2.0
+ X2[1, 0] = 4.0
+ Y = f(X)
+ Y2 = f(X2)
+
+ # create covariance model
+ basis = ot.ConstantBasisFactory(dimension).build()
+ covarianceModel = ot.SquaredExponential()
+
+ # create algorithm
+ fit_algo = GaussianProcessFitter(X, Y, covarianceModel, basis)
+
+ # set sensible optimization bounds and estimate hyper parameters
+ fit_algo.setOptimizationBounds(ot.Interval(X.getMin(), X.getMax()))
+ fit_algo.run()
+
+ # perform an evaluation
+ fit_result = fit_algo.getResult()
+
+ algo = GaussianProcessRegression(fit_result)
+ algo.run()
+ result = algo.getResult()
+ ott.assert_almost_equal(result.getMetaModel()(X), Y, 1e-2)
+ ott.assert_almost_equal(result.getResiduals(), [2.44e-06])
+ ott.assert_almost_equal(result.getRelativeErrors(), [1.76e-12])
+
+ # Prediction accuracy
+ ott.assert_almost_equal(Y2, result.getMetaModel()(X2), 0.3, 0.0)
+
+
+# Test 2
+def test_two_inputs_one_output():
+
+ inputDimension = 2
+ # Learning data
+ levels = [8, 5]
+ box = ot.Box(levels)
+ inputSample = box.generate()
+ # Scale each direction
+ inputSample *= 10.0
+
+ model = ot.SymbolicFunction(["x", "y"], ["cos(0.5*x) + sin(y)"])
+ outputSample = model(inputSample)
+
+ # Validation
+ sampleSize = 10
+ inputValidSample = ot.JointDistribution(2 * [ot.Uniform(0, 10.0)]).getSample(
+ sampleSize
+ )
+ outputValidSample = model(inputValidSample)
+
+ # 2) Definition of exponential model
+ # The parameters have been calibrated using TNC optimization
+ # and AbsoluteExponential models
+ scale = [5.33532, 2.61534]
+ amplitude = [1.61536]
+ covarianceModel = ot.SquaredExponential(scale, amplitude)
+
+ # 3) Basis definition
+ basis = ot.ConstantBasisFactory(inputDimension).build()
+
+ # 4) GPF algorithm
+ fit_algo = GaussianProcessFitter(inputSample, outputSample, covarianceModel, basis)
+ # set sensible optimization bounds and estimate hyper parameters
+ fit_algo.setOptimizationBounds(ot.Interval(inputSample.getMin(), inputSample.getMax()))
+ fit_algo.run()
+
+ # perform an evaluation
+ fit_result = fit_algo.getResult()
+ # Regression algorithm
+ algo = GaussianProcessRegression(fit_result)
+ algo.run()
+
+ result = algo.getResult()
+ # Get meta model
+ metaModel = result.getMetaModel()
+ outData = metaModel(inputValidSample)
+
+ # 5) Errors
+ # Interpolation
+ Yhat = metaModel(inputSample)
+ ott.assert_almost_equal(outputSample, Yhat, 3.0e-2, 3.0e-2)
+
+ # Prediction
+ ott.assert_almost_equal(outputValidSample, outData, 1.0e-1, 1e-1)
+
+
+def test_two_outputs():
+ f = ot.SymbolicFunction(["x"], ["x * sin(x)", "x * cos(x)"])
+ sampleX = ot.Sample([[1.0], [2.0], [3.0], [4.0], [5.0], [6.0], [7.0], [8.0]])
+ sampleY = f(sampleX)
+ # Build a basis phi from R --> R^2
+ # phi_{0,0} = phi_{0,1} = x
+ # phi_{1,0} = phi_{1,1} = x^2
+ phi0 = ot.AggregatedFunction(
+ [ot.SymbolicFunction(["x"], ["x"]), ot.SymbolicFunction(["x"], ["x"])]
+ )
+ phi1 = ot.AggregatedFunction(
+ [ot.SymbolicFunction(["x"], ["x^2"]), ot.SymbolicFunction(["x"], ["x^2"])]
+ )
+ basis = ot.Basis([phi0, phi1])
+ covarianceModel = ot.SquaredExponential([1.0])
+ covarianceModel.setActiveParameter([])
+ covarianceModel = ot.TensorizedCovarianceModel([covarianceModel] * 2)
+
+ fit_algo = GaussianProcessFitter(sampleX, sampleY, covarianceModel, basis)
+ # set sensible optimization bounds and estimate hyper parameters
+ fit_algo.run()
+
+ # perform an evaluation
+ fit_result = fit_algo.getResult()
+ algo = GaussianProcessRegression(fit_result)
+ algo.run()
+ result = algo.getResult()
+ mm = result.getMetaModel()
+ assert mm.getOutputDimension() == 2, "wrong output dim"
+ ott.assert_almost_equal(mm([5.5]), [-3.88, 3.90], 1e-2, 1e-3)
+
+
+def test_stationary_fun():
+ # fix https://github.com/openturns/openturns/issues/1861
+ ot.RandomGenerator.SetSeed(0)
+ rho = ot.SymbolicFunction("tau", "exp(-abs(tau))*cos(2*pi_*abs(tau))")
+ model = ot.StationaryFunctionalCovarianceModel([1], [1], rho)
+ x = ot.Normal().getSample(20)
+ x.setDescription(["J0"])
+ y = x + ot.Normal(0, 0.1).getSample(20)
+ y.setDescription(["G0"])
+
+ fit_algo = GaussianProcessFitter(x, y, model, ot.LinearBasisFactory().build())
+ # set sensible optimization bounds and estimate hyper parameters
+ fit_algo.run()
+
+ # perform an evaluation
+ fit_result = fit_algo.getResult()
+ algo = GaussianProcessRegression(fit_result)
+
+ algo.run()
+ result = algo.getResult()
+ mm = result.getMetaModel()
+ ott.assert_almost_equal(mm([5.5]), [5.58283])
+
+
+def test_gpr_no_opt():
+ sampleSize = 6
+ dimension = 1
+
+ f = ot.SymbolicFunction(["x0"], ["x0 * sin(x0)"])
+
+ X = ot.Sample(sampleSize, dimension)
+ X2 = ot.Sample(sampleSize, dimension)
+ for i in range(sampleSize):
+ X[i, 0] = 3.0 + i
+ X2[i, 0] = 2.5 + i
+ X[0, 0] = 1.0
+ X[1, 0] = 3.0
+ X2[0, 0] = 2.0
+ X2[1, 0] = 4.0
+ Y = f(X)
+ Y2 = f(X2)
+
+ # create covariance model
+ covarianceModel = ot.SquaredExponential([1.6326932047296538], [4.895995962015954])
+ trend_function = ot.SymbolicFunction("x", "1.49543")
+ # GPR (comparable with test_one_input_one_output)
+ algo = GaussianProcessRegression(X, Y, covarianceModel, trend_function)
+ algo.run()
+ result = algo.getResult()
+ Yhat = result.getMetaModel()(X)
+ ott.assert_almost_equal(Yhat, Y)
+ ott.assert_almost_equal(result.getResiduals(), [1.32804e-07], 1e-3, 1e-3)
+ ott.assert_almost_equal(result.getRelativeErrors(), [5.20873e-21])
+ # Prediction accuracy
+ ott.assert_almost_equal(Y2, result.getMetaModel()(X2), 0.3, 0.0)
+
+
+if __name__ == "__main__":
+ test_one_input_one_output()
+ test_two_inputs_one_output()
+ test_two_outputs()
+ test_stationary_fun()
+ test_gpr_no_opt()
diff --git a/python/test/t_OrthogonalProductFunctionFactory_std.expout b/python/test/t_OrthogonalProductFunctionFactory_std.expout
new file mode 100644
index 0000000000..13bce04af9
--- /dev/null
+++ b/python/test/t_OrthogonalProductFunctionFactory_std.expout
@@ -0,0 +1,202 @@
+class=OrthogonalProductFunctionFactory factory=class=TensorizedUniVariateFunctionFactory univariate function collection=[class=UniVariateFunctionFamily implementation=class=HaarWaveletFactory measure=class=Uniform name=Uniform dimension=1 a=0 b=1,class=UniVariateFunctionFamily implementation=class=HaarWaveletFactory measure=class=Uniform name=Uniform dimension=1 a=0 b=1] enumerate function=class=LinearEnumerateFunction dimension=2 measure=class=JointDistribution name=JointDistribution dimension=2 copula=class=IndependentCopula name=IndependentCopula dimension=2 marginal[0]=class=Uniform name=Uniform dimension=1 a=0 b=1 marginal[1]=class=Uniform name=Uniform dimension=1 a=0 b=1
+print() :
+type =
+class=ProductUniVariateFunctionEvaluation functions=[class=HaarWavelet j=0 k=0 isScaling=true a=0 m=0.5 b=1,class=HaarWavelet j=0 k=0 isScaling=true a=0 m=0.5 b=1]
+
+ - Input dimension = 2
+ - Input description = [x0,x1]
+ - Output dimension = 1
+ - Output description = [y0]
+ - Parameter = []
+
+
+type =
+class=ProductUniVariateFunctionEvaluation functions=[class=HaarWavelet j=0 k=0 isScaling=false a=0 m=0.5 b=1,class=HaarWavelet j=0 k=0 isScaling=true a=0 m=0.5 b=1]
+
+ - Input dimension = 2
+ - Input description = [x0,x1]
+ - Output dimension = 1
+ - Output description = [y0]
+ - Parameter = []
+
+
+type =
+class=ProductUniVariateFunctionEvaluation functions=[class=HaarWavelet j=0 k=0 isScaling=true a=0 m=0.5 b=1,class=HaarWavelet j=0 k=0 isScaling=false a=0 m=0.5 b=1]
+
+ - Input dimension = 2
+ - Input description = [x0,x1]
+ - Output dimension = 1
+ - Output description = [y0]
+ - Parameter = []
+
+
+type =
+class=ProductUniVariateFunctionEvaluation functions=[class=HaarWavelet j=1 k=0 isScaling=false a=0 m=0.25 b=0.5,class=HaarWavelet j=0 k=0 isScaling=true a=0 m=0.5 b=1]
+
+ - Input dimension = 2
+ - Input description = [x0,x1]
+ - Output dimension = 1
+ - Output description = [y0]
+ - Parameter = []
+
+
+type =
+class=ProductUniVariateFunctionEvaluation functions=[class=HaarWavelet j=0 k=0 isScaling=false a=0 m=0.5 b=1,class=HaarWavelet j=0 k=0 isScaling=false a=0 m=0.5 b=1]
+
+ - Input dimension = 2
+ - Input description = [x0,x1]
+ - Output dimension = 1
+ - Output description = [y0]
+ - Parameter = []
+
+
+type =
+class=ProductUniVariateFunctionEvaluation functions=[class=HaarWavelet j=0 k=0 isScaling=true a=0 m=0.5 b=1,class=HaarWavelet j=1 k=0 isScaling=false a=0 m=0.25 b=0.5]
+
+ - Input dimension = 2
+ - Input description = [x0,x1]
+ - Output dimension = 1
+ - Output description = [y0]
+ - Parameter = []
+
+
+type =
+class=ProductUniVariateFunctionEvaluation functions=[class=HaarWavelet j=1 k=1 isScaling=false a=0.5 m=0.75 b=1,class=HaarWavelet j=0 k=0 isScaling=true a=0 m=0.5 b=1]
+
+ - Input dimension = 2
+ - Input description = [x0,x1]
+ - Output dimension = 1
+ - Output description = [y0]
+ - Parameter = []
+
+
+type =
+class=ProductUniVariateFunctionEvaluation functions=[class=HaarWavelet j=1 k=0 isScaling=false a=0 m=0.25 b=0.5,class=HaarWavelet j=0 k=0 isScaling=false a=0 m=0.5 b=1]
+
+ - Input dimension = 2
+ - Input description = [x0,x1]
+ - Output dimension = 1
+ - Output description = [y0]
+ - Parameter = []
+
+
+type =
+class=ProductUniVariateFunctionEvaluation functions=[class=HaarWavelet j=0 k=0 isScaling=false a=0 m=0.5 b=1,class=HaarWavelet j=1 k=0 isScaling=false a=0 m=0.25 b=0.5]
+
+ - Input dimension = 2
+ - Input description = [x0,x1]
+ - Output dimension = 1
+ - Output description = [y0]
+ - Parameter = []
+
+
+type =
+class=ProductUniVariateFunctionEvaluation functions=[class=HaarWavelet j=0 k=0 isScaling=true a=0 m=0.5 b=1,class=HaarWavelet j=1 k=1 isScaling=false a=0.5 m=0.75 b=1]
+
+ - Input dimension = 2
+ - Input description = [x0,x1]
+ - Output dimension = 1
+ - Output description = [y0]
+ - Parameter = []
+
+
+type =
+class=ProductUniVariateFunctionEvaluation functions=[class=HaarWavelet j=2 k=0 isScaling=false a=0 m=0.125 b=0.25,class=HaarWavelet j=0 k=0 isScaling=true a=0 m=0.5 b=1]
+
+ - Input dimension = 2
+ - Input description = [x0,x1]
+ - Output dimension = 1
+ - Output description = [y0]
+ - Parameter = []
+
+
+type =
+class=ProductUniVariateFunctionEvaluation functions=[class=HaarWavelet j=1 k=1 isScaling=false a=0.5 m=0.75 b=1,class=HaarWavelet j=0 k=0 isScaling=false a=0 m=0.5 b=1]
+
+ - Input dimension = 2
+ - Input description = [x0,x1]
+ - Output dimension = 1
+ - Output description = [y0]
+ - Parameter = []
+
+
+type =
+class=ProductUniVariateFunctionEvaluation functions=[class=HaarWavelet j=1 k=0 isScaling=false a=0 m=0.25 b=0.5,class=HaarWavelet j=1 k=0 isScaling=false a=0 m=0.25 b=0.5]
+
+ - Input dimension = 2
+ - Input description = [x0,x1]
+ - Output dimension = 1
+ - Output description = [y0]
+ - Parameter = []
+
+
+type =
+class=ProductUniVariateFunctionEvaluation functions=[class=HaarWavelet j=0 k=0 isScaling=false a=0 m=0.5 b=1,class=HaarWavelet j=1 k=1 isScaling=false a=0.5 m=0.75 b=1]
+
+ - Input dimension = 2
+ - Input description = [x0,x1]
+ - Output dimension = 1
+ - Output description = [y0]
+ - Parameter = []
+
+
+type =
+class=ProductUniVariateFunctionEvaluation functions=[class=HaarWavelet j=0 k=0 isScaling=true a=0 m=0.5 b=1,class=HaarWavelet j=2 k=0 isScaling=false a=0 m=0.125 b=0.25]
+
+ - Input dimension = 2
+ - Input description = [x0,x1]
+ - Output dimension = 1
+ - Output description = [y0]
+ - Parameter = []
+
+
+type =
+class=ProductUniVariateFunctionEvaluation functions=[class=HaarWavelet j=2 k=1 isScaling=false a=0.25 m=0.375 b=0.5,class=HaarWavelet j=0 k=0 isScaling=true a=0 m=0.5 b=1]
+
+ - Input dimension = 2
+ - Input description = [x0,x1]
+ - Output dimension = 1
+ - Output description = [y0]
+ - Parameter = []
+
+
+type =
+class=ProductUniVariateFunctionEvaluation functions=[class=HaarWavelet j=2 k=0 isScaling=false a=0 m=0.125 b=0.25,class=HaarWavelet j=0 k=0 isScaling=false a=0 m=0.5 b=1]
+
+ - Input dimension = 2
+ - Input description = [x0,x1]
+ - Output dimension = 1
+ - Output description = [y0]
+ - Parameter = []
+
+
+type =
+class=ProductUniVariateFunctionEvaluation functions=[class=HaarWavelet j=1 k=1 isScaling=false a=0.5 m=0.75 b=1,class=HaarWavelet j=1 k=0 isScaling=false a=0 m=0.25 b=0.5]
+
+ - Input dimension = 2
+ - Input description = [x0,x1]
+ - Output dimension = 1
+ - Output description = [y0]
+ - Parameter = []
+
+
+type =
+class=ProductUniVariateFunctionEvaluation functions=[class=HaarWavelet j=1 k=0 isScaling=false a=0 m=0.25 b=0.5,class=HaarWavelet j=1 k=1 isScaling=false a=0.5 m=0.75 b=1]
+
+ - Input dimension = 2
+ - Input description = [x0,x1]
+ - Output dimension = 1
+ - Output description = [y0]
+ - Parameter = []
+
+
+type =
+class=ProductUniVariateFunctionEvaluation functions=[class=HaarWavelet j=0 k=0 isScaling=false a=0 m=0.5 b=1,class=HaarWavelet j=2 k=0 isScaling=false a=0 m=0.125 b=0.25]
+
+ - Input dimension = 2
+ - Input description = [x0,x1]
+ - Output dimension = 1
+ - Output description = [y0]
+ - Parameter = []
+
+
diff --git a/python/test/t_OrthogonalProductFunctionFactory_std.py b/python/test/t_OrthogonalProductFunctionFactory_std.py
new file mode 100644
index 0000000000..b516bee502
--- /dev/null
+++ b/python/test/t_OrthogonalProductFunctionFactory_std.py
@@ -0,0 +1,41 @@
+#! /usr/bin/env python
+
+import openturns as ot
+
+ot.TESTPREAMBLE()
+
+dimension = 2
+
+# Create the orthogonal basis
+enumerateFunction = ot.LinearEnumerateFunction(dimension)
+productBasis = ot.OrthogonalProductFunctionFactory(
+ [ot.HaarWaveletFactory(), ot.HaarWaveletFactory()], enumerateFunction
+)
+print(productBasis)
+print("print() :")
+for i in range(20):
+ p = productBasis.build(i)
+ print("type = ", type(p))
+ print(p)
+ print(p._repr_html_())
+
+# Test build from multi-index
+for i in range(20):
+ index = enumerateFunction(i)
+ termBasis2 = productBasis.build(index)
+
+# Test getMarginal
+enumerateFunction = ot.LinearEnumerateFunction(5)
+productBasis = ot.OrthogonalProductPolynomialFactory(
+ [
+ ot.LegendreFactory(),
+ ot.HermiteFactory(),
+ ot.LegendreFactory(),
+ ot.HermiteFactory(),
+ ot.HermiteFactory(),
+ ],
+ enumerateFunction,
+)
+productBasisMarginal = productBasis.getMarginal([0, 2, 4])
+for i in range(20):
+ function = productBasisMarginal.build(i)
diff --git a/python/test/t_OrthogonalProductPolynomialFactory_std.py b/python/test/t_OrthogonalProductPolynomialFactory_std.py
index efae7efb9f..41e87e1a8d 100755
--- a/python/test/t_OrthogonalProductPolynomialFactory_std.py
+++ b/python/test/t_OrthogonalProductPolynomialFactory_std.py
@@ -18,3 +18,24 @@
print("type = ", type(p))
print(p)
print(p._repr_html_())
+
+# Test build from multi-index
+for i in range(20):
+ index = enumerateFunction(i)
+ termBasis2 = productBasis.build(index)
+
+# Test getMarginal
+enumerateFunction = ot.LinearEnumerateFunction(5)
+productBasis = ot.OrthogonalProductPolynomialFactory(
+ [
+ ot.LegendreFactory(),
+ ot.HermiteFactory(),
+ ot.LegendreFactory(),
+ ot.HermiteFactory(),
+ ot.HermiteFactory(),
+ ],
+ enumerateFunction,
+)
+productBasisMarginal = productBasis.getMarginal([0, 2, 4])
+for i in range(20):
+ function = productBasisMarginal.build(i)
diff --git a/python/test/t_Viewer.py b/python/test/t_Viewer.py
index 337af71b1d..69b4e922dc 100755
--- a/python/test/t_Viewer.py
+++ b/python/test/t_Viewer.py
@@ -270,7 +270,9 @@
# mixed legend
f = ot.SymbolicFunction(["x", "y"], ["sin(x)*sin(y)"])
-graph = f.draw([-4.0] * 2, [4.0] * 2, [64] * 2)
+# use different numbers of points for x and y to check
+# these numbers are handled properly by the viewer
+graph = f.draw([-4.0] * 2, [4.0] * 2, [64, 32])
curve = ot.Curve([-4.0, 4.0], [1.0, 1.0], "curve")
curve.setColor("black")
curve.setLineStyle("dashed")