Skip to content

Commit

Permalink
Created a Python unit test for FunctionalChaosValidation
Browse files Browse the repository at this point in the history
  • Loading branch information
mbaudin47 committed Jul 7, 2023
1 parent f320a5f commit 5035415
Show file tree
Hide file tree
Showing 2 changed files with 147 additions and 1 deletion.
2 changes: 1 addition & 1 deletion python/src/metamodel_module.i
Original file line number Diff line number Diff line change
Expand Up @@ -59,8 +59,8 @@
%include FunctionalChaosResult.i
%include FunctionalChaosAlgorithm.i
%include FunctionalChaosSobolIndices.i
%include FunctionalChaosValidation.i
%include MetaModelValidation.i
%include FunctionalChaosValidation.i
%include GeneralLinearModelResult.i
%include GeneralLinearModelAlgorithm.i
%include KrigingAlgorithm.i
Expand Down
146 changes: 146 additions & 0 deletions python/test/t_FunctionalChaosValidation_std.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,146 @@
#! /usr/bin/env python

import openturns as ot
from openturns.usecases import ishigami_function
from openturns.testing import assert_almost_equal

ot.TESTPREAMBLE()

# Problem parameters
im = ishigami_function.IshigamiModel()

dimension = im.distributionX.getDimension()

# Compute the sample size from number of folds to guarantee a non constant integer
# number of points per fold
kFoldParameter = 10
foldSampleSize = 20
sampleSize = foldSampleSize * kFoldParameter + 1

degree = 5
enumerateFunction = ot.LinearEnumerateFunction(dimension)
basisSize = enumerateFunction.getBasisSizeFromTotalDegree(degree)
print("basisSize = ", basisSize)
productBasis = ot.OrthogonalProductPolynomialFactory(
[ot.LegendreFactory()] * dimension, enumerateFunction
)
adaptiveStrategy = ot.FixedStrategy(productBasis, basisSize)
selectionAlgorithm = (
ot.PenalizedLeastSquaresAlgorithmFactory()
) # Get a full PCE: do not use model selection.
projectionStrategy = ot.LeastSquaresStrategy(selectionAlgorithm)
inputSample = im.distributionX.getSample(sampleSize)
outputSample = im.model(inputSample)
algo = ot.FunctionalChaosAlgorithm(
inputSample, outputSample, im.distributionX, adaptiveStrategy, projectionStrategy
)
algo.run()
chaosResult = algo.getResult()

# Analytical leave-one-out
validationLOO = ot.FunctionalChaosValidation(
chaosResult, ot.FunctionalChaosValidation.LEAVEONEOUT
)
mseLOOAnalytical = validationLOO.computeMeanSquaredError()
print("Analytical LOO MSE = ", mseLOOAnalytical)
assert validationLOO.getMethod() == ot.LinearModelValidation.LEAVEONEOUT

# Naive leave-one-out
residualsLOO = ot.Point(sampleSize)
for j in range(sampleSize):
indicesLOO = list(range(sampleSize))
indicesLOO.pop(j)
inputSampleTrainLOO = ot.Sample(inputSample.select(indicesLOO))
outputSampleTrainLOO = outputSample.select(indicesLOO)
inputPointLOOTest = inputSample[j]
outputPointLOOTest = outputSample[j]
algoLOO = ot.FunctionalChaosAlgorithm(
inputSampleTrainLOO,
outputSampleTrainLOO,
im.distributionX,
adaptiveStrategy,
ot.LeastSquaresStrategy(),
)
algoLOO.run()
resultLOO = ot.FunctionalChaosResult(algoLOO.getResult())
metamodelLOO = resultLOO.getMetaModel()
predictionLOOTest = metamodelLOO(inputPointLOOTest)
residualsLOOTest = predictionLOOTest - outputPointLOOTest
residualsLOO[j] = residualsLOOTest[0]

mseLOOnaive = residualsLOO.normSquare() / sampleSize
print("Naive LOO MSE = ", mseLOOnaive)

# Test
rtolLOO = 1.0e-8
atolLOO = 0.0
assert_almost_equal(mseLOOAnalytical[0], mseLOOnaive, rtolLOO, atolLOO)

# Check LOO R2
r2ScoreLOO = validationLOO.computeR2Score()
print("Analytical LOO R2 score = ", r2ScoreLOO)
sampleVariance = outputSample.computeCentralMoment(2)
print("sampleVariance = ", sampleVariance)
r2ScoreReference = 1.0 - mseLOOAnalytical[0] / sampleVariance[0]
print("Computed R2 score = ", r2ScoreReference)
rtolLOO = 1.0e-12
atolLOO = 0.0
assert_almost_equal(r2ScoreReference, r2ScoreLOO[0], rtolLOO, atolLOO)

# Analytical K-Fold
validationKFold = ot.FunctionalChaosValidation(
chaosResult, ot.FunctionalChaosValidation.KFOLD, kFoldParameter
)
print("KFold with K = ", kFoldParameter)
assert validationKFold.getKParameter() == kFoldParameter
assert validationKFold.getMethod() == ot.LinearModelValidation.KFOLD

# Compute mean squared error
mseKFoldAnalytical = validationKFold.computeMeanSquaredError()
print("Analytical KFold MSE = ", mseKFoldAnalytical)

# Naive KFold
residualsKFold = ot.Sample(kFoldParameter, 1)
splitter = ot.KFoldSplitter(sampleSize, kFoldParameter)
selectionAlgorithm = (
ot.PenalizedLeastSquaresAlgorithmFactory()
) # Get a full PCE: do not use model selection.
projectionStrategy = ot.LeastSquaresStrategy(selectionAlgorithm)
foldIndex = 0
for indicesTrain, indicesTest in splitter:
foldSize = indicesTest.getSize()
inputSampleKFoldTrain = inputSample[indicesTrain]
outputSampleKFoldTrain = outputSample[indicesTrain]
inputSampleKFoldTest = inputSample[indicesTest]
outputSampleKFoldTest = outputSample[indicesTest]
algoKFold = ot.FunctionalChaosAlgorithm(
inputSampleKFoldTrain,
outputSampleKFoldTrain,
im.distributionX,
adaptiveStrategy,
projectionStrategy,
)
algoKFold.run()
resultKFold = algoKFold.getResult()
metamodelKFold = resultKFold.getMetaModel()
predictionKFoldTest = metamodelKFold(inputSampleKFoldTest)
residualsKFoldTest = predictionKFoldTest.asPoint() - outputSampleKFoldTest.asPoint()
residualsKFold[foldIndex, 0] = residualsKFoldTest.normSquare() / foldSize
foldIndex += 1

mseKFoldnaive = residualsKFold.computeMean()
print("Naive KFold MSE = ", mseKFoldnaive)

# Test
rtolKFold = 1.0e-5
atolKFold = 0.0
assert_almost_equal(mseKFoldAnalytical, mseKFoldnaive, rtolKFold, atolKFold)

# Check K-Fold R2
r2ScoreKFold = validationKFold.computeR2Score()
print("Analytical K-Fold R2 score = ", r2ScoreKFold)
r2ScoreReference = 1.0 - mseKFoldAnalytical[0] / sampleVariance[0]
print("Computed R2 score = ", r2ScoreReference)
rtolKFold = 1.0e-12
atolKFold = 0.0
assert_almost_equal(r2ScoreReference, r2ScoreKFold[0], rtolKFold, atolKFold)

0 comments on commit 5035415

Please sign in to comment.