Skip to content

Commit

Permalink
Doc: Try to run examples in parallel
Browse files Browse the repository at this point in the history
  • Loading branch information
jschueller committed Sep 19, 2024
1 parent 8061d60 commit 1194c6f
Show file tree
Hide file tree
Showing 8 changed files with 250 additions and 11 deletions.
4 changes: 2 additions & 2 deletions .github/workflows/build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ jobs:
shell: bash -l {0}
run: |
black --check otbenchmark --config pyproject.toml
flake8 otbenchmark
flake8 otbenchmark doc/examples --max-line-length 120
- name: Test
shell: bash -l {0}
run: |
Expand All @@ -25,7 +25,7 @@ jobs:
shell: bash -l {0}
run: |
sudo apt install -y texlive-latex-recommended texlive-fonts-recommended texlive-latex-extra
make html -C doc
make html SPHINXOPTS="-W -T -j3" -C doc
- name: Upload
if: ${{ github.ref == 'refs/heads/master' }}
run: |
Expand Down
15 changes: 15 additions & 0 deletions doc/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,18 @@
import sys
import subprocess

import sphinx_gallery
try:
from packaging.version import Version
except ImportError:
from pkg_resources import parse_version as Version

try:
import joblib
have_joblib = True
except ImportError:
have_joblib = False

# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
Expand All @@ -38,6 +50,9 @@
'show_signature': False,
}

if Version(sphinx_gallery.__version__) >= Version("0.17.0"):
sphinx_gallery_conf["parallel"] = have_joblib

autodoc_default_options = {
"members": None,
"inherited-members": None,
Expand Down
7 changes: 0 additions & 7 deletions doc/examples/plot_crosscut_distribution_2d.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,11 +5,7 @@

# %%
import openturns as ot
import numpy as np
import otbenchmark as otb
import openturns.viewer as otv
import pylab as pl


# %%
# Create a Funky distribution
Expand All @@ -20,18 +16,15 @@
x2 = ot.Normal(2.0, 1.0)
x_funk = ot.ComposedDistribution([x1, x2], copula)


# %%
# Create a Punk distribution
x1 = ot.Normal(1.0, 1.0)
x2 = ot.Normal(-2.0, 1.0)
x_punk = ot.ComposedDistribution([x1, x2], copula)


# %%
distribution = ot.Mixture([x_funk, x_punk], [0.5, 1.0])


# %%
referencePoint = distribution.getMean()

Expand Down
2 changes: 2 additions & 0 deletions doc/examples/sensitivity_problems/README.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
Sensitivity examples
====================
171 changes: 171 additions & 0 deletions doc/examples/sensitivity_problems/plot_convergence_ishigami.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,171 @@
"""
Convergence of estimators on Ishigami
=====================================
"""

# %%
# In this example, we present the convergence of the sensitivity indices of the Ishigami test function.
#
# We compare different estimators.
# * Sampling methods with different estimators: Saltelli, Mauntz-Kucherenko, Martinez, Jansen,
# * Sampling methods with different design of experiments: Monte-Carlo, LHS, Quasi-Monte-Carlo,
# * Polynomial chaos.
#

# %%
import openturns as ot
import otbenchmark as otb
import openturns.viewer as otv
import numpy as np


# %%
# When we estimate Sobol' indices, we may encounter the following warning messages:
# ```
# WRN - The estimated first order Sobol index (2) is greater than its total order index...
# WRN - The estimated total order Sobol index (2) is lesser than first order index ...
# ```
# Lots of these messages are printed in the current Notebook. This is why we disable them with:
ot.Log.Show(ot.Log.NONE)


# %%
problem = otb.IshigamiSensitivity()
print(problem)

# %%
distribution = problem.getInputDistribution()
model = problem.getFunction()

# %%
# Exact first and total order
exact_first_order = problem.getFirstOrderIndices()
print(exact_first_order)
exact_total_order = problem.getTotalOrderIndices()
print(exact_total_order)

# %%
# Perform sensitivity analysis
# ----------------------------

# %%
# Create X/Y data
ot.RandomGenerator.SetSeed(0)
size = 10000
inputDesign = ot.SobolIndicesExperiment(distribution, size).generate()
outputDesign = model(inputDesign)

# %%
# Compute first order indices using the Saltelli estimator
sensitivityAnalysis = ot.SaltelliSensitivityAlgorithm(inputDesign, outputDesign, size)
computed_first_order = sensitivityAnalysis.getFirstOrderIndices()
computed_total_order = sensitivityAnalysis.getTotalOrderIndices()

# %%
# Compare with exact results
print("Sample size : ", size)
# First order
# Compute absolute error (the LRE cannot be computed,
# because S can be zero)
print("Computed first order = ", computed_first_order)
print("Exact first order = ", exact_first_order)
# Total order
print("Computed total order = ", computed_total_order)
print("Exact total order = ", exact_total_order)

# %%
dimension = distribution.getDimension()

# %%
# Compute componentwise absolute error.
first_order_AE = ot.Point(np.abs(exact_first_order - computed_first_order))
total_order_AE = ot.Point(np.abs(exact_total_order - computed_total_order))

# %%
print("Absolute error")
for i in range(dimension):
print(
"AE(S%d) = %.4f, AE(T%d) = %.4f" % (i, first_order_AE[i], i, total_order_AE[i])
)

# %%
metaSAAlgorithm = otb.SensitivityBenchmarkMetaAlgorithm(problem)
for estimator in ["Saltelli", "Martinez", "Jansen", "MauntzKucherenko", "Janon"]:
print("Estimator:", estimator)
benchmark = otb.SensitivityConvergence(
problem,
metaSAAlgorithm,
numberOfRepetitions=4,
maximum_elapsed_time=2.0,
sample_size_initial=20,
estimator=estimator,
)
grid = benchmark.plotConvergenceGrid(verbose=False)
view = otv.View(grid)
figure = view.getFigure()
_ = figure.suptitle("%s, %s" % (problem.getName(), estimator))
figure.set_figwidth(10.0)
figure.set_figheight(5.0)
figure.subplots_adjust(wspace=0.4, hspace=0.4)

# %%
benchmark = otb.SensitivityConvergence(
problem,
metaSAAlgorithm,
numberOfRepetitions=4,
maximum_elapsed_time=2.0,
sample_size_initial=20,
estimator="Saltelli",
sampling_method="MonteCarlo",
)
graph = benchmark.plotConvergenceCurve()
_ = otv.View(graph)

# %%
grid = ot.GridLayout(1, 3)
maximum_absolute_error = 1.0
minimum_absolute_error = 1.0e-5
sampling_method_list = ["MonteCarlo", "LHS", "QMC"]
for sampling_method_index in range(3):
sampling_method = sampling_method_list[sampling_method_index]
benchmark = otb.SensitivityConvergence(
problem,
metaSAAlgorithm,
numberOfRepetitions=4,
maximum_elapsed_time=2.0,
sample_size_initial=20,
estimator="Saltelli",
sampling_method=sampling_method,
)
graph = benchmark.plotConvergenceCurve()
# Change bounding box
box = graph.getBoundingBox()
bound = box.getLowerBound()
bound[1] = minimum_absolute_error
box.setLowerBound(bound)
bound = box.getUpperBound()
bound[1] = maximum_absolute_error
box.setUpperBound(bound)
graph.setBoundingBox(box)
grid.setGraph(0, sampling_method_index, graph)
_ = otv.View(grid)

# %%
# Use polynomial chaos.
benchmark = otb.SensitivityConvergence(
problem,
metaSAAlgorithm,
numberOfExperiments=12,
numberOfRepetitions=1,
maximum_elapsed_time=5.0,
sample_size_initial=20,
use_sampling=False,
total_degree=20,
hyperbolic_quasinorm=1.0,
)
graph = benchmark.plotConvergenceCurve(verbose=True)
graph.setLegendPosition("bottomleft")
_ = otv.View(graph)

# %%
otv.View.ShowAll()
56 changes: 56 additions & 0 deletions doc/examples/sensitivity_problems/plot_print_problems.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
"""
Print the sensitivity analysis problems
=======================================
"""

# %%
# In this example, we show how to print all the sensitivity analysis problems.

# %%
import otbenchmark as otb
import pandas as pd

# %%
# We import the list of problems.
benchmarkProblemList = otb.SensitivityBenchmarkProblemList()
numberOfProblems = len(benchmarkProblemList)
print(numberOfProblems)

# %%
# For each problem in the benchmark, print the problem name and the exact Sobol' indices.
for i in range(numberOfProblems):
problem = benchmarkProblemList[i]
name = problem.getName()
first_order_indices = problem.getFirstOrderIndices()
total_order_indices = problem.getTotalOrderIndices()
dimension = problem.getInputDistribution().getDimension()
print(
"#",
i,
":",
name,
" : S = ",
first_order_indices,
"T=",
total_order_indices,
", dimension=",
dimension,
)

# %%
problem_names = [
benchmarkProblem.getName() for benchmarkProblem in benchmarkProblemList
]
columns = ["$d$"]
df_problems_list = pd.DataFrame(index=problem_names, columns=columns)
for problem in benchmarkProblemList:
name = problem.getName()
d = problem.getInputDistribution().getDimension()
df_problems_list.loc[name] = [int(d)]
print(df_problems_list)

# %%
latex_code = df_problems_list.to_latex()
text_file = open("sensitivity_problems_list.tex", "w")
text_file.write(latex_code)
text_file.close()
5 changes: 3 additions & 2 deletions otbenchmark/SparsePolynomialChaosSensitivityAnalysis.py
Original file line number Diff line number Diff line change
Expand Up @@ -145,8 +145,9 @@ def run(self, verbose=False):
experiment = ot.MonteCarloExperiment(distribution, self.sample_size_test)
inputTest = experiment.generate()
outputTest = model(inputTest)
val = ot.MetaModelValidation(inputTest, outputTest, metamodel)
predictivity_coefficient = val.computePredictivityFactor()[0]
predictions = metamodel(inputTest)
val = ot.MetaModelValidation(outputTest, predictions)
predictivity_coefficient = val.computeR2Score()[0]
if verbose:
print("Q2=%.2f%%" % (100 * predictivity_coefficient))

Expand Down
1 change: 1 addition & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -13,3 +13,4 @@ pandas<2.0
tqdm
shapely
lxml-html-clean
joblib

0 comments on commit 1194c6f

Please sign in to comment.