forked from openturns/openturns
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Created a Python unit test for FunctionalChaosValidation
- Loading branch information
Showing
2 changed files
with
147 additions
and
1 deletion.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,146 @@ | ||
#! /usr/bin/env python | ||
|
||
import openturns as ot | ||
from openturns.usecases import ishigami_function | ||
from openturns.testing import assert_almost_equal | ||
|
||
ot.TESTPREAMBLE() | ||
|
||
# Problem parameters | ||
im = ishigami_function.IshigamiModel() | ||
|
||
dimension = im.distributionX.getDimension() | ||
|
||
# Compute the sample size from number of folds to guarantee a non constant integer | ||
# number of points per fold | ||
kFoldParameter = 10 | ||
foldSampleSize = 20 | ||
sampleSize = foldSampleSize * kFoldParameter + 1 | ||
|
||
degree = 5 | ||
enumerateFunction = ot.LinearEnumerateFunction(dimension) | ||
basisSize = enumerateFunction.getBasisSizeFromTotalDegree(degree) | ||
print("basisSize = ", basisSize) | ||
productBasis = ot.OrthogonalProductPolynomialFactory( | ||
[ot.LegendreFactory()] * dimension, enumerateFunction | ||
) | ||
adaptiveStrategy = ot.FixedStrategy(productBasis, basisSize) | ||
selectionAlgorithm = ( | ||
ot.PenalizedLeastSquaresAlgorithmFactory() | ||
) # Get a full PCE: do not use model selection. | ||
projectionStrategy = ot.LeastSquaresStrategy(selectionAlgorithm) | ||
inputSample = im.distributionX.getSample(sampleSize) | ||
outputSample = im.model(inputSample) | ||
algo = ot.FunctionalChaosAlgorithm( | ||
inputSample, outputSample, im.distributionX, adaptiveStrategy, projectionStrategy | ||
) | ||
algo.run() | ||
chaosResult = algo.getResult() | ||
|
||
# Analytical leave-one-out | ||
validationLOO = ot.FunctionalChaosValidation( | ||
chaosResult, ot.FunctionalChaosValidation.LEAVEONEOUT | ||
) | ||
mseLOOAnalytical = validationLOO.computeMeanSquaredError() | ||
print("Analytical LOO MSE = ", mseLOOAnalytical) | ||
assert validationLOO.getMethod() == ot.LinearModelValidation.LEAVEONEOUT | ||
|
||
# Naive leave-one-out | ||
residualsLOO = ot.Point(sampleSize) | ||
for j in range(sampleSize): | ||
indicesLOO = list(range(sampleSize)) | ||
indicesLOO.pop(j) | ||
inputSampleTrainLOO = ot.Sample(inputSample.select(indicesLOO)) | ||
outputSampleTrainLOO = outputSample.select(indicesLOO) | ||
inputPointLOOTest = inputSample[j] | ||
outputPointLOOTest = outputSample[j] | ||
algoLOO = ot.FunctionalChaosAlgorithm( | ||
inputSampleTrainLOO, | ||
outputSampleTrainLOO, | ||
im.distributionX, | ||
adaptiveStrategy, | ||
ot.LeastSquaresStrategy(), | ||
) | ||
algoLOO.run() | ||
resultLOO = ot.FunctionalChaosResult(algoLOO.getResult()) | ||
metamodelLOO = resultLOO.getMetaModel() | ||
predictionLOOTest = metamodelLOO(inputPointLOOTest) | ||
residualsLOOTest = predictionLOOTest - outputPointLOOTest | ||
residualsLOO[j] = residualsLOOTest[0] | ||
|
||
mseLOOnaive = residualsLOO.normSquare() / sampleSize | ||
print("Naive LOO MSE = ", mseLOOnaive) | ||
|
||
# Test | ||
rtolLOO = 1.0e-8 | ||
atolLOO = 0.0 | ||
assert_almost_equal(mseLOOAnalytical[0], mseLOOnaive, rtolLOO, atolLOO) | ||
|
||
# Check LOO R2 | ||
r2ScoreLOO = validationLOO.computeR2Score() | ||
print("Analytical LOO R2 score = ", r2ScoreLOO) | ||
sampleVariance = outputSample.computeCentralMoment(2) | ||
print("sampleVariance = ", sampleVariance) | ||
r2ScoreReference = 1.0 - mseLOOAnalytical[0] / sampleVariance[0] | ||
print("Computed R2 score = ", r2ScoreReference) | ||
rtolLOO = 1.0e-12 | ||
atolLOO = 0.0 | ||
assert_almost_equal(r2ScoreReference, r2ScoreLOO[0], rtolLOO, atolLOO) | ||
|
||
# Analytical K-Fold | ||
validationKFold = ot.FunctionalChaosValidation( | ||
chaosResult, ot.FunctionalChaosValidation.KFOLD, kFoldParameter | ||
) | ||
print("KFold with K = ", kFoldParameter) | ||
assert validationKFold.getKParameter() == kFoldParameter | ||
assert validationKFold.getMethod() == ot.LinearModelValidation.KFOLD | ||
|
||
# Compute mean squared error | ||
mseKFoldAnalytical = validationKFold.computeMeanSquaredError() | ||
print("Analytical KFold MSE = ", mseKFoldAnalytical) | ||
|
||
# Naive KFold | ||
residualsKFold = ot.Sample(kFoldParameter, 1) | ||
splitter = ot.KFoldSplitter(sampleSize, kFoldParameter) | ||
selectionAlgorithm = ( | ||
ot.PenalizedLeastSquaresAlgorithmFactory() | ||
) # Get a full PCE: do not use model selection. | ||
projectionStrategy = ot.LeastSquaresStrategy(selectionAlgorithm) | ||
foldIndex = 0 | ||
for indicesTrain, indicesTest in splitter: | ||
foldSize = indicesTest.getSize() | ||
inputSampleKFoldTrain = inputSample[indicesTrain] | ||
outputSampleKFoldTrain = outputSample[indicesTrain] | ||
inputSampleKFoldTest = inputSample[indicesTest] | ||
outputSampleKFoldTest = outputSample[indicesTest] | ||
algoKFold = ot.FunctionalChaosAlgorithm( | ||
inputSampleKFoldTrain, | ||
outputSampleKFoldTrain, | ||
im.distributionX, | ||
adaptiveStrategy, | ||
projectionStrategy, | ||
) | ||
algoKFold.run() | ||
resultKFold = algoKFold.getResult() | ||
metamodelKFold = resultKFold.getMetaModel() | ||
predictionKFoldTest = metamodelKFold(inputSampleKFoldTest) | ||
residualsKFoldTest = predictionKFoldTest.asPoint() - outputSampleKFoldTest.asPoint() | ||
residualsKFold[foldIndex, 0] = residualsKFoldTest.normSquare() / foldSize | ||
foldIndex += 1 | ||
|
||
mseKFoldnaive = residualsKFold.computeMean() | ||
print("Naive KFold MSE = ", mseKFoldnaive) | ||
|
||
# Test | ||
rtolKFold = 1.0e-5 | ||
atolKFold = 0.0 | ||
assert_almost_equal(mseKFoldAnalytical, mseKFoldnaive, rtolKFold, atolKFold) | ||
|
||
# Check K-Fold R2 | ||
r2ScoreKFold = validationKFold.computeR2Score() | ||
print("Analytical K-Fold R2 score = ", r2ScoreKFold) | ||
r2ScoreReference = 1.0 - mseKFoldAnalytical[0] / sampleVariance[0] | ||
print("Computed R2 score = ", r2ScoreReference) | ||
rtolKFold = 1.0e-12 | ||
atolKFold = 0.0 | ||
assert_almost_equal(r2ScoreReference, r2ScoreKFold[0], rtolKFold, atolKFold) |