From 95b28088ef7ed0e7158fe1a27eef1c87ed3aa1fa Mon Sep 17 00:00:00 2001 From: Michael Baudin <31351465+mbaudin47@users.noreply.github.com> Date: Mon, 11 Sep 2023 12:22:48 +0200 Subject: [PATCH 01/31] KernelSmoothing: Improve doc * KernelSmoothing: Fix doc. Fixed bibliographic references in KernelSmoothing. * Added doc of ResourceMap keys. * Clarifies the Mixed rule --- python/doc/bibliography.rst | 23 +- .../theory/data_analysis/kernel_smoothing.rst | 617 ++++++++++++++---- python/src/HistogramFactory_doc.i.in | 12 +- python/src/KernelSmoothing_doc.i.in | 57 +- 4 files changed, 563 insertions(+), 146 deletions(-) diff --git a/python/doc/bibliography.rst b/python/doc/bibliography.rst index 988c1482fe0..0ae82e5f32a 100644 --- a/python/doc/bibliography.rst +++ b/python/doc/bibliography.rst @@ -47,6 +47,8 @@ Bibliography `pdf `__ .. [ceres2012] Sameer Agarwal and Keir Mierle and Others, *Ceres Solver*, http://ceres-solver.org +.. [chacon2018] Chacón, J. E., & Duong, T. (2018). + *Multivariate kernel smoothing and its applications.* CRC Press. .. [cminpack2007] Devernay, F. *C/C++ Minpack*, 2007. http://devernay.free.fr/hacks/cminpack .. [coles2001] Coles, S. G., *An Introduction to Statistical Modelling of Extreme Values*. @@ -249,6 +251,9 @@ Bibliography http://www.itl.nist.gov/div898/handbook/ .. [nlopt2009] Steven G. Johnson, The NLopt nonlinear-optimization package, http://ab-initio.mit.edu/nlopt +.. [park1990] Byeong U. Park and J. S. Marron. + *Comparison of data-driven bandwidth selectors.* + Journal of the American Statistical Association, 85(409) :66–72, 1990. .. [petras2003] Petras, K. (2003). *Smolyak cubature of given polynomial degree with few nodes for increasing dimension.* Numerische Mathematik, 93 (4), 729-753. @@ -259,6 +264,9 @@ Bibliography *Design of computer experiments: Space filling and beyond.* Statistics and Computing 22(3): 681-701. `pdf `__ +.. [raykar2006] Vikas Chandrakant Raykar, Ramani Duraiswami + *Very Fast optimal bandwidth selection for univariate kernel density estimation.* + CS-TR-4774. University of Maryland, College Park, MD 20783, 2006 .. [rawlings2001] Rawlings, J. O., Pantula, S. G., and Dickey, D. A. *Applied regression analysis: a research tool.* Springer Science and Business Media, 2001. @@ -288,14 +296,27 @@ Bibliography Journal of Mechanical Design, 134(3):031008, 2012. .. [saporta1990] Saporta, G. (1990). *Probabilités, Analyse de données et Statistique*, Technip -.. [scott1992] David W. Scott (1992). *Multivariate density estimation*, +.. [scott1992] Scott, D. W. (1992). *Multivariate density estimation*, John Wiley & Sons, Inc. +.. [scott2015] Scott, D. W. (2015). + *Multivariate density estimation: theory, practice, and visualization.* + John Wiley & Sons. .. [ScottStewart2011] W. F. Scott & B. Stewart. *Tables for the Lilliefors and Modified Cramer–von Mises Tests of Normality.*, Communications in Statistics - Theory and Methods. Volume 40, 2011 - Issue 4. Pages 726-730. +.. [sheather1991] Sheather, S. J. and Jones, M. C. (1991). + *A reliable data-based bandwidth selection method for kernel density estimation.* + Journal of the Royal Statistical Society. Series B (Methodological), + 53(3) :683–690. .. [simard2011] Simard, R. & L'Ecuyer, P. *Computing the Two-Sided Kolmogorov- Smirnov Distribution.* Journal of Statistical Software, 2011, 39(11), 1-18. `pdf `__ +.. [silverman1982] B. W. Silverman + *Algorithm AS 176: Kernel Density Estimation Using the Fast Fourier Transform.* + Journal of the Royal Statistical Society. Series C (Applied Statistics), + Vol. 31, No. 1 (1982), pp. 93-99 (7 pages) +.. [silverman1986] Silverman, B. W. (1986). + *Density estimation.* (Chapman Hall, London). .. [sobol1993] Sobol, I. M. *Sensitivity analysis for non-linear mathematical model* Math. Modelling Comput. Exp., 1993, 1, 407-414. `pdf `__ diff --git a/python/doc/theory/data_analysis/kernel_smoothing.rst b/python/doc/theory/data_analysis/kernel_smoothing.rst index b016cf16f9a..8d069a6d5ed 100644 --- a/python/doc/theory/data_analysis/kernel_smoothing.rst +++ b/python/doc/theory/data_analysis/kernel_smoothing.rst @@ -5,298 +5,613 @@ Kernel smoothing Kernel smoothing is a non parametric estimation method of the probability density function of a distribution. -In dimension 1, the kernel smoothed probability density function :math:`\hat{p}` has the following expression, +Introduction +~~~~~~~~~~~~ + +Let :math:`X` be a random variable with probability density function :math:`p`. +Given a sample of independent observations :math:`x_1, ..., x_n` of :math:`X` +and any point :math:`x \in \Rset`, the kernel smoothing estimator provides +an approximation :math:`\widehat{p}(x)` of :math:`p(x)`. + +In dimension 1, the kernel smoothed probability density function :math:`\widehat{p}` has the following expression, where *K* is the univariate kernel, *n* the sample size and :math:`(X_1, \cdots, X_n) \in \Rset^n` -the univariate random sample with :math:`\forall i, \, \, X_i \in \Rset`: +the univariate random sample with :math:`\forall i, \, \, X_i \in \Rset` ([wand1994]_ eq. 2.2 page 12): .. math:: :label: kernelSmooth - \hat{p}(x) = \displaystyle \frac{1}{nh}\sum_{i=1}^{n} K\left(\frac{x-X_i}{h}\right) + \widehat{p}(x) = \frac{1}{nh}\sum_{i=1}^{n} K\left(\frac{x-X_i}{h}\right) + +The kernel *K* is a function satisfying: + +.. math:: + + \int K(x)\, dx=1. -The kernel *K* is a function satisfying :math:`\int K(x)\, dx=1`. -Usually *K* is chosen to be a unimodal probability density function that is symmetric about 0. +Usually, the kernel *K* is chosen to be a unimodal probability density function that is symmetric about 0. The parameter *h* is called the *bandwidth*. +Multivariate kernels +~~~~~~~~~~~~~~~~~~~~ In dimension :math:`d>1`, the kernel may be defined as a product kernel :math:`K_d`, -as follows where :math:`\vect{x} = (x_1, \cdots, x_d)\in \Rset^d`: +as follows where :math:`\vect{x} = (x_1, \cdots, x_d)\in \Rset^d` +([wand1994]_ eq. 2.2 page 91): .. math:: - K_d(\vect{x}) = \displaystyle \prod_{j=1}^{d} K(x_j) + K_d(\vect{x}) = \prod_{j=1}^{d} K(x_j). + +The kernel smoothed probability density function in dimension *d* is: + +.. math:: + + \widehat{p}(\vect{x}) + = \frac{1}{n \prod_{j=1}^{d}h_j} \sum_{i=1}^{n} K_d\left(\frac{x_1 - (X_{i})_1 }{h_1}, \dots, \frac{x_d - (X_{i})_d}{h_d}\right) -which leads to the kernel smoothed probability density function in dimension *d*, where :math:`(\vect{X}_1, \cdots, \vect{X}_n)` is the d-variate random sample -which components are denoted :math:`\vect{X}_i = (X_{i1}, \dots, X_{id})`: +which components are denoted :math:`\vect{X}_i = (X_{i1}, \dots, X_{id})`. + +In the multivariate case, the bandwidth is the vector +:math:`\vect{h} = (h_1, \cdots, h_d)`. + +Asymptotic error and asymptotically optimal bandwidth +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The quality of the approximation may be controlled by the AMISE (Asymptotic Mean Integrated Square error) criteria defined as follows. +By definition, the mean squared error (MSE) at point :math:`\vect{x}` is +(see [scott2015]_ eq. page 40, [wand1994]_ pages 14-21): + +.. math:: + + \operatorname{MSE}(\widehat{p}, \vect{x}) + = \mathbb{E}_\vect{X}\left[\left(\widehat{p}(\vect{x}) - p(\vect{x})\right)^2 \right]. + + +It can be proved that the mean squared error is the sum of the +variance and the squared bias: .. math:: - \hat{p}(\vect{x}) = \displaystyle \frac{1}{N \prod_{j=1}^{d}h_j} \sum_{i=1}^{N} K_d\left(\frac{x_1 - X_{i1} }{h_1}, \dots, \frac{x_d - X_{id}}{h_d}\right) + \operatorname{MSE}(\widehat{p}, \vect{x}) + = \operatorname{Var}_\vect{X}\left[\widehat{p}(\vect{x})\right] + + \left(\operatorname{Bias}\left(\widehat{p}(\vect{x})\right)\right)^2 + +where the bias is: -Let's note that the bandwidth is the vector :math:`\vect{h} = (h_1, \cdots, h_d)`. +.. math:: + \operatorname{Bias}\left(\widehat{p}(\vect{x})\right) + = \mathbb{E}_\vect{X}\left[\widehat{p}(\vect{x})\right] - p(\vect{x}). -The quality of the approximation may be controlled by the AMISE (Asymptotic Mean Integrated Square error) criteria defined as: +The MSE depends on the point where the density is evaluated. +The mean integrated squared error (MISE) is (see [scott2015]_ eq. page 41): .. math:: + \operatorname{MISE}(\widehat{p}) + = \mathbb{E}_\vect{X}\left[||\widehat{p} - p||^2_{L_2}\right] = \int \, \operatorname{MSE}(\widehat{p}, \vect{x}) \, d\vect{x} \\ - \left\{ - \begin{array}{lcl} - AMISE(\hat{p}) & = & \mbox{two first terms in the series expansion with respect to $n$ in } MISE(\hat{p}) \\ - MISE(\hat{p}) & = & \mathbb{E}_\vect{X}\left[||\hat{p} - p||^2_{L_2}\right] = \int \, MSE(\hat{p}, \vect{x}) \, d\vect{x} \\ - MSE(\hat{p}, \vect{x})& = & \left[ \mathbb{E}_\vect{X}\left[\hat{p}(\vect{x})\right] - p(\vect{x})\right]^2 + {\rm Var}_\vect{X}\left[\hat{p}(\vect{x})\right] - \end{array} - \right. +Finally, the asymptotic mean integrated squared error (AMISE), +denoted :math:`\operatorname{AMISE}(\widehat{p})` is defined as the two first terms +in the Taylor series expansion of :math:`\operatorname{MISE}(\widehat{p})` when :math:`n` +tends to infinity. The quality of the estimation essentially depends on the value of the bandwidth *h*. -The bandwidth that minimizes the AMISE criteria has the expression (given in dimension 1): +In dimension 1, the bandwidth that minimizes the AMISE criteria is +(see [wand1994]_ eq 2.13 page 22): .. math:: :label: AMISE1 - h_{AMISE}(K) = \displaystyle \left[ \frac{R(K)}{\mu_2(K)^2R(p^{(2)})}\right]^{\frac{1}{5}}n^{-\frac{1}{5}} + h_{\operatorname{AMISE}}(K) + = \left( \frac{R(K)}{\mu_2(K)^2 R\left(p^{(2)}\right)}\right)^{\frac{1}{5}}n^{-\frac{1}{5}} + +where the rugosity of the kernel is: + +.. math:: + R(K) = \int K(\vect{x})^2\, d\vect{x} + +and the second raw moment of the kernel is: + +.. math:: + \mu_2(K) = \int \vect{x}^2K(\vect{x})\, d\vect{x} = \sigma_K^2. + +In the equation :eq:`AMISE1`, the expression :math:`R\left(p^{(2)}\right)` is the rugosity of +the second derivative of the density probability function :math:`p` that +we wish to approximate. +Since, by hypothesis, the true density :math:`p` is unknown, its +second derivative is also unknown. +Hence the equation :eq:`AMISE1` cannot be used directly to compute the bandwidth. + +We have ([wand1994]_ page 67): + +.. math:: + R\left(p^{(r)}\right) = (-1)^r\Psi_{2r} + +where: -where :math:`R(K) = \int K(\vect{x})^2\, d\vect{x}` and :math:`\mu_2(K) = \int \vect{x}^2K(\vect{x})\, d\vect{x} = \sigma_K^2`. +.. math:: + \Psi_r(p) + = \int p^{(r)}p(x)\, dx = \mathbb{E}_\vect{X}\left[p^{(r)}\right]. -If we note that :math:`R(p^{(r)}) = (-1)^r\Phi_{2r}` with :math:`\Phi_r = \int p^{(r)}p(x)\, dx = \mathbb{E}_\vect{X}\left[p^{(r)}\right]`, -then relation writes: +Therefore: .. math:: :label: AMISE - h_{AMISE}(K) = \displaystyle \left[ \frac{R(K)}{\mu_2(K)^2\Phi_4}\right]^{\frac{1}{5}}n^{-\frac{1}{5}} + h_{\operatorname{AMISE}}(K) + = \left( \frac{R(K)}{\mu_2(K)^2\Psi_4(p)}\right)^{\frac{1}{5}}n^{-\frac{1}{5}} -Several methods exist to evaluate the optimal bandwidth :math:`h_{AMISE}(K)` based on different approximations of :math:`\Phi_4`: +Several methods exist to evaluate the optimal bandwidth :math:`h_{\operatorname{AMISE}}(K)` based on different approximations of :math:`\Psi_4`: - Silverman's rule in dimension 1, - the plug-in bandwidth selection, - Scott's rule in dimension d. +Efficiency of a kernel +~~~~~~~~~~~~~~~~~~~~~~ + +Let :math:`K` be a kernel. +We may be interested if a particular kernel may be able to reduce the +estimation error. +The efficiency of a kernel is (see [scott2015]_ page 151): + +.. math:: + \operatorname{eff}(k) + = \frac{\sigma_k R(k)}{\sigma_{k_E} R(k_E)} + +where :math:`k_E` is Epanechnikov's kernel. +The AMISE error is proportional to the efficiency (see [scott2015]_ +eq. 6.25 page 151): + +.. math:: + \operatorname{AMISE} \propto \operatorname{eff}(k) + +The next table presents several kernels available in the +library and their associated variance, rugosity and efficiency. +We see that the best kernel is Epanechnikov's kernel. +We also see that there is not much difference between the different +kernels. +This is one of the reasons why the normal kernel is often used. + ++-----------------+---------------------------------+----------------+--------------------------+-----------------------------------+ +| Kernel | :math:`\operatorname{Var}(k)` | :math:`R(k)` | :math:`\sigma_k R(k)` | :math:`\operatorname{eff}(k)` | ++=================+=================================+================+==========================+===================================+ +| Epanechnikov | 0.2000 | 0.6000 | 0.2683 | 100.00 \% | ++-----------------+---------------------------------+----------------+--------------------------+-----------------------------------+ +| Biweight | 0.1429 | 0.7143 | 0.2700 | 99.39 \% | ++-----------------+---------------------------------+----------------+--------------------------+-----------------------------------+ +| Quartic | 0.1429 | 0.7143 | 0.2700 | 99.39 \% | ++-----------------+---------------------------------+----------------+--------------------------+-----------------------------------+ +| Triweight | 0.1111 | 0.8159 | 0.2720 | 98.67 \% | ++-----------------+---------------------------------+----------------+--------------------------+-----------------------------------+ +| Triangular | 0.1667 | 0.6667 | 0.2722 | 98.59 \% | ++-----------------+---------------------------------+----------------+--------------------------+-----------------------------------+ +| Normal | 1.0000 | 0.2821 | 0.2821 | 95.12 \% | ++-----------------+---------------------------------+----------------+--------------------------+-----------------------------------+ +| Uniform | 0.3333 | 0.5000 | 0.2887 | 92.95 \% | ++-----------------+---------------------------------+----------------+--------------------------+-----------------------------------+ +| Logistic | 3.2899 | 0.1667 | 0.3023 | 88.76 \% | ++-----------------+---------------------------------+----------------+--------------------------+-----------------------------------+ + +**Table 1.** Efficiency of several order 2 kernels. + Silverman's rule (dimension 1) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -In the case where the density *p* is normal with standard deviation :math:`\sigma`, -then the term :math:`\Phi_4` can be exactly evaluated. -In that particular case, the optimal bandwidth of relation :eq:`AMISE` with respect to the AMISE criteria writes as follows: +In this section, we consider a random variable i.e. :math:`d = 1`. +If the density *p* is normal with standard deviation :math:`\sigma`, +then the term :math:`\Psi_4` can be exactly evaluated. +By definition, the Silverman rule for the bandwidth is +the optimal bandwidth of the AMISE criteria when the true density *p* is normal +(see [silverman1986]_ page 45): + +.. math:: + :label: Silverman + + h^{Silver}(K) + := h^{p = normal}_{\operatorname{AMISE}}(K) + = \left( \frac{8\sqrt{\pi} R(K)}{3\mu_2(K)^2}\right)^{\frac{1}{5}} + \sigma n^{-\frac{1}{5}}. + +The Silverman rule is based on the hypothesis that the true +density *p* is close to the normal density, +even if the density *p* is not necessarily normal. + +The equation :eq:`Silverman` is accurate when +the density is not *far* from a normal one. +In the special case where we use the normal kernel, the Silverman rule +is (see [silverman1986]_ eq 3.28 page 45): .. math:: - :label: pNormal + :label: SilvermanNormal - h^{p = normal}_{AMISE}(K) = \displaystyle \left[ \frac{8\sqrt{\pi} R(K)}{3\mu_2(K)^2}\right]^{\frac{1}{5}}\sigma n^{-\frac{1}{5}} + h^{Silver}(\mathcal{N}) + = 1.06 \sigma n^{-\frac{1}{5}}. -An estimator of :math:`h^{p= normal}_{AMISE}(K)` is obtained by replacing :math:`\sigma` by its estimator :math:`\hat{\sigma}^n`, -evaluated from the sample :math:`(X_1, \dots, X_n)`: +Choice for the standard deviation +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +We estimate the standard deviation :math:`\sigma` by its sample +counterpart :math:`\hat{\sigma}`, evaluated from the sample +:math:`(x_1, \dots, x_n)`: .. math:: :label: Estimpnormal - \hat{h}^{p = normal}_{AMISE}(K) = \displaystyle \left[ \frac{8\sqrt{\pi} R(K)}{3\mu_2(K)^2}\right]^{\frac{1}{5}}\hat{\sigma}^n n^{-\frac{1}{5}} + h^{Silver}(K) + = \left( \frac{8\sqrt{\pi} R(K)}{3\mu_2(K)^2}\right)^{\frac{1}{5}} + \hat{\sigma} n^{-\frac{1}{5}} -The Silverman rule consists in considering :math:`\hat{h}^{p = normal}_{AMISE}(K)` of relation :eq:`Estimpnormal` even if the density *p* is not normal: +The estimator :math:`\hat{\sigma}` of the true standard deviation can +be estimated using the sample standard deviation based +on the sample :math:`(x_1, \dots, x_n)`. +This is: .. math:: - :label: Silverman + \hat{\sigma} + = \sqrt{\frac{1}{n - 1} \sum_{i = 1}^n (x_i - \bar{x})^2 } + +where :math:`\bar{x}` is the sample mean: + +.. math:: + \bar{x} + = \frac{1}{n} \sum_{i = 1}^n x_i. + +Another method is to use the standardized interquartile range +([wand1994]_ page 60): - h^{Silver}(K) = \displaystyle \left[ \frac{8\sqrt{\pi} R(K)}{3\mu_2(K)^2}\right]^{\frac{1}{5}}\hat{\sigma}^n n^{-\frac{1}{5}} +.. math:: + \widehat{\sigma}_{\operatorname{IQR}} + = \frac{\widehat{q}(3/4) - \widehat{q}(1/4)}{\Phi^{-1}(3/4) - \Phi^{-1}(1/4)} -Relation :eq:`Silverman` is empirical and gives good results when the density is not *far* from a normal one. +where :math:`\Phi^{-1}` is the quantile function of the +standard normal distribution and +:math:`\widehat{q}(3/4)` and :math:`\widehat{q}(1/4)` are the sample +quartiles at levels 75% and 25% respectively. +The previous estimator is robust against outliers that might be +in the sample. Plug-in bandwidth selection method (dimension 1) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The plug-in bandwidth selection method improves the estimation of the rugosity of the second derivative of the density. -Instead of making the gaussian assumption, the method uses a kernel smoothing method +Instead of making the Gaussian assumption, the method uses a kernel smoothing method in order to make an approximation of higher derivatives of the density. +This method is due to [sheather1991]_ who used ideas from [park1990]_. +The algorithm is presented in [wand1994]_, page 74 under the "Solve the equation rule" name. +The implementation uses ideas from [raykar2006]_, but the fast selection is not implemented. -Relation :eq:`AMISE` requires the evaluation of the quantity :math:`\Phi_4`. -As a general rule, we use the estimator :math:`\hat{\Phi}_r` of :math:`\Phi_r` defined by: +The equation :eq:`AMISE` requires the evaluation of the quantity :math:`\Psi_4`. +We use the estimator :math:`\hat{\Psi}_r` of :math:`\Psi_r`, using a kernel +density estimator of the :math:`r`-th derivative of the +density. +The estimator is (see [wand1994]_ page 64): .. math:: :label: EstimPhir - \hat{\Phi}_r = \displaystyle \frac{1}{n}\sum_{i=1}^{n} \hat{p}^{(r)}(X_i) + \hat{\Psi}_r(K) + = \frac{1}{n}\sum_{i=1}^{n} \widehat{p}^{(r)}(X_i) -Deriving relation :eq:`kernelSmooth` leads to: +where :math:`\hat{\Psi}_r(K)` is the estimator based on the kernel +*K*. +Deriving equation :eq:`kernelSmooth` leads to: .. math:: :label: kernelSmoothDerivative - \hat{p}^{(r)}(x) = \displaystyle \frac{1}{nh^{r+1}}\sum_{i=1}^{n} K^{(r)}\left(\frac{x-X_i}{h}\right) + \widehat{p}^{(r)}(x) + = \frac{1}{nh^{r+1}}\sum_{i=1}^{n} K^{(r)}\left(\frac{x-X_i}{h}\right) -and then the estimator :math:`\hat{\Phi}_r(h)` is defined as: +and then the estimator :math:`\hat{\Psi}_r(h)` is defined as: .. math:: :label: EstimPhirFin - \hat{\Phi}_r(h) = \displaystyle \frac{1}{n^2h^{r+1}}\sum_{i=1}^{n}\sum_{j=1}^{n} K^{(r)}\left(\frac{X_i-X_j}{h}\right) + \hat{\Psi}_r(h) + = \frac{1}{n^2h^{r+1}}\sum_{i=1}^{n}\sum_{j=1}^{n} K^{(r)}\left(\frac{X_i-X_j}{h}\right) -We note that :math:`\hat{\Phi}_r(h)` depends of the parameter *h* which can be -taken in order to minimize the AMSE (Asymptotic Mean Square Error) criteria -evaluated between :math:`\Phi_r` and :math:`\hat{\Phi}_r(h)`. +We note that :math:`\hat{\Psi}_r(h)` depends of the parameter *h* which can be +taken in order to minimize the Asymptotic Mean Square Error (AMSE) criteria +evaluated between :math:`\Psi_r` and :math:`\hat{\Psi}_r(h)`. The optimal parameter *h* is: .. math:: :label: optimHamse - h^{(r)}_{AMSE} = \displaystyle \left(\frac{-2K^{(r)}(0)}{\mu_2(K)\Phi_{r+2}}\right)^{\frac{1}{r+3}}n^{-\frac{1}{r+3}} + h^{(r)}_{\operatorname{AMSE}} + = \left(\frac{-2K^{(r)}(0)}{\mu_2(K) \Psi_{r+2}}\right)^{\frac{1}{r+3}}n^{-\frac{1}{r+3}} + +The previous equation states that the bandwidth :math:`h^{(r)}` required +to compute :math:`\widehat{p}^{(r)}` depends on :math:`\Psi_{r+2}`. +But to compute :math:`\Psi_{r+2}`, we need :math:`h^{(r + 2)}`, etc. +The goal of the method is to break this infinite set of equations at some point +by providing a *pilot* bandwidth. +The :math:`\ell`-stage plug-in bandwidth method uses :math:`\ell` different +intermediate bandwidths before evaluating the final one. -Given that preliminary results, the solve-the-equation plug-in method proceeds as follows: +In this document, we present the two stage solve-the-equation plug-in method. -- Relation :eq:`AMISE` defines :math:`h_{AMISE}(K)` as a function of :math:`\Phi_4` we denote here as: +- The equation :eq:`AMISE` defines :math:`h_{\operatorname{AMISE}}(K)` as a function of :math:`\Psi_4`. + Let :math:`t` be the function defined by the equation: .. math:: :label: rel1 - h_{AMISE}(K) = t(\Phi_4) + h_{\operatorname{AMSE}}(K) = t(\Psi_4). -- The term :math:`\Phi_4` is approximated by its estimator defined in - :eq:`EstimPhirFin` evaluated with its optimal parameter :math:`h^{(4)}_{AMSE}` +- The term :math:`\Psi_4` is approximated by its estimator defined in + :eq:`EstimPhirFin` evaluated with its optimal parameter :math:`h^{(4)}_{\operatorname{AMSE}}` defined in :eq:`optimHamse`: .. math:: :label: h4 - h^{(4)}_{AMSE} = \displaystyle \left(\frac{-2K^{(4)}(0)}{\mu_2(K)\Phi_{6}}\right)^{\frac{1}{7}}n^{-\frac{1}{7}} + h^{(4)}_{\operatorname{AMSE}} + = \left(\frac{-2K^{(4)}(0)}{\mu_2(K)\Psi_{6}}\right)^{\frac{1}{7}}n^{-\frac{1}{7}} - which leads to a relation of type: + which leads to the approximation: .. math:: :label: rel2 - \Phi_4 \simeq \hat{\Phi}_4(h^{(4)}_{AMSE}) + \hat{\Psi}_4 \left(h^{(4)}_{\operatorname{AMSE}}\right) \approx \Psi_4 -- Relations :eq:`AMISE` and :eq:`h4` lead to the new one: +- The equation :eq:`AMISE` and :eq:`h4` lead to: .. math:: :label: h4hAmise - h^{(4)}_{AMSE} = \displaystyle \left( \frac{-2K^{(4)}(0)\mu_2(K)\Phi_4}{R(K)\Phi_{6}}\right) ^{\frac{1}{7}}h_{AMISE}(K)^{\frac{5}{7}} + h^{(4)}_{\operatorname{AMSE}} + = \left( \frac{-2K^{(4)}(0)\mu_2(K)\Psi_4}{R(K)\Psi_{6}}\right) ^{\frac{1}{7}}h_{\operatorname{AMISE}}(K)^{\frac{5}{7}}. - which rewrites: + Let :math:`\ell` be the function defined by the equation: .. math:: :label: rel3 - h^{(4)}_{AMSE} = l(h_{AMISE}(K)) + h^{(4)}_{\operatorname{AMSE}} + = \ell(h_{\operatorname{AMISE}}(K)). -- Relation :eq:`h4hAmise` depends on both terms :math:`\Phi_4` and - :math:`\Phi_6` which are evaluated with their estimators defined in :eq:`EstimPhirFin` +- The equation :eq:`h4hAmise` depends on both terms :math:`\Psi_4` and + :math:`\Psi_6` which are evaluated with their estimators defined in :eq:`EstimPhirFin` respectively with their AMSE optimal parameters :math:`g_1` and :math:`g_2` - (see relation :eq:`optimHamse`). It leads to the expressions: + (see eq. :eq:`optimHamse`). It leads to the expressions: .. math:: :label: g12 - \left\{ - \begin{array}{lcl} - g_1 & = & \displaystyle \left(\frac{-2K^{(4)}(0)}{\mu_2(K)\Phi_{6}}\right)^{\frac{1}{7}}n^{-\frac{1}{7}}\\ - g_2 & = & \displaystyle \left(\frac{-2K^{(6)}(0)}{\mu_2(K)\Phi_{8}}\right)^{\frac{1}{7}}n^{-\frac{1}{9}} - \end{array} - \right. + g_1 & = \left(\frac{-2K^{(4)}(0)}{\mu_2(K)\Psi_{6}}\right)^{\frac{1}{7}}n^{-\frac{1}{7}}\\ + g_2 & = \left(\frac{-2K^{(6)}(0)}{\mu_2(K)\Psi_{8}}\right)^{\frac{1}{7}}n^{-\frac{1}{9}} -- In order to evaluate :math:`\Phi_6` and :math:`\Phi_8`, - we suppose that the density *p* is normal with a variance :math:`\sigma^2` +- In order to evaluate :math:`\Psi_6` and :math:`\Psi_8`, + we assume that the density *p* is normal with a variance :math:`\sigma^2` which is approximated by the empirical variance of the sample, which leads to: .. math:: :label: Phi68 - \left\{ - \begin{array}{lcl} - \hat{\Phi}_6 & = & \displaystyle \frac{-15}{16\sqrt{\pi}}\hat{\sigma}^{-7}\\ - \hat{\Phi}_8 & = & \displaystyle \frac{105^{\strut}}{32\sqrt{\pi}}\hat{\sigma}^{-9} - \end{array} - \right. + \hat{\Psi}_6 & = \frac{-15}{16\sqrt{\pi}}\hat{\sigma}^{-7}\\ + \hat{\Psi}_8 & = \frac{105^{\strut}}{32\sqrt{\pi}}\hat{\sigma}^{-9} -Then, to summarize, thanks to relations :eq:`rel1`, :eq:`rel2`, :eq:`rel3`, :eq:`g12` and :eq:`Phi68`, the optimal bandwidth is solution of the equation: +Finally, thanks to the equations :eq:`rel1`, :eq:`rel2`, :eq:`rel3`, :eq:`g12` and :eq:`Phi68`, +the optimal bandwidth of the STE rule, :math:`h^{\operatorname{STE}}`, is solution of the equation: .. math:: :label: equhAmise - \boldsymbol{h_{AMISE}(K) = t \circ \hat{\Phi}_4 \circ l (h_{AMISE}(K))} + h^{\operatorname{STE}} + = t \circ \hat{\Psi}_4 \circ \ell (h^{\operatorname{STE}}) -This method is due to (Sheather, Jones, 1991) who used ideas from (Park, Marron, 1990). -The algorithm is presented in (Wand, Jones, 1994), page 74 under the "Solve the equation rule" name. -The implementation uses ideas from (Raykar, Duraiswami, 2006), but the fast selection is not implemented. +This equation does not necessarily have a close form expression and +an numerical method must be used in general. -Scott rule (dimension d) -~~~~~~~~~~~~~~~~~~~~~~~~ +A cut-off value can be used to define the function :math:`\widehat{\psi_r}` in the equation :eq:`EstimPhirFin`. +Let :math:`\phi` be the probability density function of the standard Gaussian distribution. +We have: -The Scott rule is a simplification of the Silverman rule generalized to the -dimension *d* which is optimal when the density *p* is normal with independent components. -In all the other cases, it gives an empirical rule that gives good result when the density *p* is not *far* from the normal one. -For examples, the Scott bandwidth may appear too large when *p* presents several maximum. +.. math:: + \phi(x) \rightarrow 0 -The Silverman rule given in dimension 1 in relation :eq:`Silverman` can be generalized in dimension *d* as follows: -if we suppose that the density *p* is normal with independent components, -in dimension *d* and that we use the normal kernel :math:`N(0,1)` to estimate it, -then the optimal bandwidth vector :math:`\vect{h}` with respect to the AMISE criteria writes as follows: +when :math:`|x|\rightarrow +\infty`, with a fast decrease to zero. +Let :math:`t> 0` the cut-off value. +The evaluation is as follows: .. math:: - :label: SilvermanNormalKernel + \widetilde{\phi}(x)= + \begin{cases} + \phi(x) & \textrm{ if } |x| \leq t, \\ + 0 & \textrm{ otherwise}. + \end{cases} - \vect{h}^{Silver}(N) = \left(\left(\frac{4}{d+2}\right)^{1/(d+4)}\hat{\sigma}_i^n n^{-1/(d+4)}\right)_i +Hence, only the most significant values in the evaluation of :math:`\hat{\psi_r}` +are taken into account, which improves the speed while slightly decreasing +the accuracy. -where :math:`\hat{\sigma}_i^n` is the standard deviation of the *i*-th component of the sample -:math:`(\vect{X}_1, \cdots, \vect{X}_n)`, and :math:`\sigma_K` the standard deviation of the 1D kernel *K*. +Rescaling a bandwidth +~~~~~~~~~~~~~~~~~~~~~ -Scott' method is a simplification of Silverman's rule, based on the fact that the coefficient -:math:`\left(\frac{4}{d+2}\right)^{1/(d+4)}` remains in :math:`[0.924, 1.059]` when the dimension *d* varies. -Thus, Scott fixed it to *1*: +In this section, we show that, if the optimal bandwidth is known +for the normal kernel, then it can be computed for any kernel +*K* using a rescaling equation. + +Let :math:`K_1` and :math:`K_2` be two kernels. +The equation :eq:`AMISE1` implies: .. math:: - :label: coefficientScott + :label: ChangeBandwidth - \left(\frac{4}{d+2}\right)^{1/(d+4)} \simeq 1 + \frac{h_{\operatorname{AMISE}}(K_1)}{h_{\operatorname{AMISE}}(K_2)}=\frac{\sigma_{K_2}}{\sigma_{K_1}} + \left(\frac{\sigma_{K_1}R(K_1)}{\sigma_{K_2}R(K_2)}\right)^{1/5}. -which leads to the simplified expression: +Scott (see [scott2015]_ table 6.2 page 152) notices that: .. math:: - :label: SilvermanNormalKernelSimplif + \frac{\sigma_{K_1}R(K_1)}{\sigma_{K_2}R(K_2)} \in [1, 1.86] + +for many pairs of common kernels. +Hence the equation :eq:`ChangeBandwidth` implies the *equivalent +kernel rescaling equation* (see [scott2015]_ eq. 6.30 page 154): - \vect{h}^{Silver}(N) \simeq \left( \hat{\sigma}_i^n n^{-1/(d+4)}\right)_i +.. math:: + :label: SimplifiedChangeBandwidth -Furthermore, in the general case, we have from relation (\ref{AMISE1}) : + h_{\operatorname{AMISE}}(K_1) \approx h_{\operatorname{AMISE}}(K_2)\frac{\sigma_{K_2}}{\sigma_{K_1}} + +Consider for example the normal kernel :math:`K_2 = \mathcal{N}(0,1)`. +Since :math:`\sigma_{K_1} = \sigma_{\mathcal{N}(0,1)} = 1`, +then equation :eq:`SimplifiedChangeBandwidth` implies: .. math:: - :label: ChangeBandwidth + :label: SimplifiedChangeBandwidthNormal - \frac{h_{AMISE}(K_1)}{h_{AMISE}(K_2)}=\frac{\sigma_{K_2}}{\sigma_{K_1}}\left[\frac{\sigma_{K_1}R(K_1)}{\sigma_{K_2}R(K_2)}\right]^{1/5} + h_{\operatorname{AMISE}}(K) \approx h_{\operatorname{AMISE}}(\mathcal{N})\frac{1}{\sigma_{K}} -Considering that :math:`\sigma_{K}R(K) \simeq 1` whatever the kernel *K*, relation :eq:`ChangeBandwidth` simplifies in: +We will use the previous equation in the derivation of the +*mixed* rule presented in the next section. +The previous equation applied to the Silverman rule implies: .. math:: - :label: SimplifiedChangeBandwidth + :label: SimplifiedChangeBandwidthSilvNormal + + h^{Silver}(K) \approx h^{Silver}(\mathcal{N})\frac{1}{\sigma_{K}} + +A mixed rule for a large sample +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - h_{AMISE}(K_1) \simeq h_{AMISE}(K_2)\frac{\sigma_{K_2}}{\sigma_{K_1}} +When the sample size :math:`n` is large, the *solve-the-equation* (STE) +rule cannot be applied because of its CPU cost. +In this case, we use a method which combines the STE rule +and the Silverman rule, which is less costly. +Moreover, we combine these rules using two different kernels, namely +the kernel *K* given by the user and the normal kernel :math:`\mathcal{N}(0, 1)`. +Finally, we combine two different samples, that is to say the +large sample size :math:`n` and a smaller sample size for which the +STE rule can be evaluated. -If we consider the normal kernel :math:`N(0,1)` for :math:`K_2`, then relation :eq:`SimplifiedChangeBandwidth` writes in a more general notation: +The equation :eq:`AMISE` implies that: .. math:: - :label: SimplifiedChangeBandwidthNormal + \frac{h_{\operatorname{AMISE}}(K)}{h^{Silver}(K)} + = \left(\frac{\Psi_4(\mathcal{N})}{\Psi_4(p)}\right)^{1/5} + +where *K* is a given kernel and :math:`h_{\operatorname{AMISE}}(K)` is the +optimal AMISE bandwidth for the kernel *K*. +We notice that the previous ratio is independent from the sample +size :math:`n`. +Let :math:`n_t \ll n` be a small sample size. +Hence, the ratio is the same if we consider the sample size :math:`n` +or the sample size :math:`n_t`. +We apply this equation to the normal kernel, approximate the +AMISE optimal bandwidth by the STE rule and use the sample +sizes :math:`n` and :math:`n_t`. +We get: - h_{AMISE}(K) \simeq h_{AMISE}(N)\frac{1}{\sigma_{K}} +.. math:: + \frac{h^{n, STE}(\mathcal{N})}{h^{n, Silver}(\mathcal{N})} + \approx \frac{h^{n_t, STE}(\mathcal{N})}{h^{n_t, Silver}(\mathcal{N})} -If :math:`h_{AMISE}(N)` is evaluated with the Silverman rule, :eq:`SimplifiedChangeBandwidthNormal` rewrites: +which implies: .. math:: - :label: SimplifiedChangeBandwidthSilvNormal + h^{n, STE}(\mathcal{N}) + \approx \frac{h^{n_t, STE}(\mathcal{N})}{h^{n_t, Silver}(\mathcal{N})} + h^{n, Silver}(\mathcal{N}) - h^{Silver}(K) \simeq h^{Silver}(N)\frac{1}{\sigma_{K}} +The equation :eq:`SimplifiedChangeBandwidthNormal` leads to the +bandwidth of the STE rule for the kernel *K*: -At last, from relation :eq:`SilvermanNormalKernelSimplif` and :eq:`SimplifiedChangeBandwidthSilvNormal` -applied in each direction *i*, we deduce the Scott rule: +.. math:: + h^{n, STE}(K) + \approx h^{n, STE}(\mathcal{N}) \frac{1}{\sigma_{K}}. + +We substitute the expression of :math:`h^{n, STE}` in the +previous equation and get the *mixed* rule: + +.. math:: + :label: MixedBandwidthRule + + h^{n, STE}(K) + \approx \frac{h^{n_t, STE}(\mathcal{N})}{h^{n_t, Silver}(\mathcal{N})} + h^{n, Silver}(\mathcal{N}) \frac{1}{\sigma_{K}}. + +Scott rule (dimension d) +~~~~~~~~~~~~~~~~~~~~~~~~ + +In this section, we consider the general case where the +random vector has dimension :math:`d`. +The Scott rule is a simplification of the Silverman rule generalized to the +dimension *d* which is optimal when the density *p* is normal with independent components. +In all the other cases, it gives an empirical rule that gives good result when the density *p* is not *far* from the normal one. +For examples, the Scott bandwidth may appear too large when *p* presents several maximum. + +The Silverman rule given in dimension 1 in equation :eq:`Silverman` can be generalized in dimension *d* as follows. +We make the assumption that the density *p* is normal with independent components, +in dimension *d* and that we use the normal kernel :math:`\mathcal{N}(0,1)` +to estimate it. +Therefore the optimal bandwidth vector :math:`\vect{h}` with respect to the +AMISE criteria is given by the *normal reference rule* (see [scott2015]_ eq. +6.43 page 164): + +.. math:: + :label: SilvermanNormalKernel + + \vect{h}^{Silver}(\mathcal{N}) = \left(\left(\frac{4}{d+2}\right)^{1 / (d + 4)}\hat{\sigma}_i^n n^{-1 / (d + 4)}\right)_i + +where :math:`\hat{\sigma}_i^n` is the standard deviation of the *i*-th component of the sample +:math:`(\vect{X}_1, \cdots, \vect{X}_n)`, and :math:`\sigma_K` the standard deviation of the 1D kernel *K*. + +Scott' method is a simplification of Silverman's rule, based on the fact that the coefficient +:math:`\left(\frac{4}{d+2}\right)^{1 / (d + 4)}` remains in :math:`[0.924, 1.059]` when the dimension *d* varies (see [scott2015]_ page 164). +Thus, Scott fixed it to *1*: + +.. math:: + :label: coefficientScott + + \left(\frac{4}{d+2}\right)^{1 / (d + 4)} \approx 1. + +This leads to Scott's rule (see [scott2015]_ eq. 6.44 page 164): + +.. math:: + :label: SilvermanNormalKernelSimplif + + \vect{h}^{Silver}(\mathcal{N}) \approx \left( \hat{\sigma}_i^n n^{-1 / (d + 4)}\right)_i + +Finally, the equations :eq:`SilvermanNormalKernelSimplif` and :eq:`SimplifiedChangeBandwidthSilvNormal` +applied in each direction *i* imply: .. math:: :label: ScottRule - \boldsymbol{\vect{h}^{Scott} = \left(\frac{\hat{\sigma}_i^n}{\sigma_K}n^{-1/(d+4)}\right)_i} + \vect{h}^{Scott} + = \left(\frac{\hat{\sigma}_i^n}{\sigma_K}n^{-1 / (d + 4)}\right)_i + +for :math:`i = 1, ..., d`. Boundary treatment ~~~~~~~~~~~~~~~~~~ -In dimension 1, the boundary effects may be taken into account: +In this section, we consider a random variable i.e. :math:`d = 1`. +Assume that the domain of definition of the density is bounded. +Then one of the problems of kernel smoothing is that it may +produce a non zero density estimate even in the regions where +we know it is zero. +This is known as the *boundary bias problem* (see [silverman1986]_ page 29). +The reason is that a subpart of the kernel windows does not contain +any observation ([wand1994]_ page 127). +In this case, for some observation :math:`x_i` near the boundary, +the density may be underestimated if the kernel sets a positive weight +outside the domain ([chacon2018]_ page 73). + +There are several methods to solve this problem. +One of the methods is to apply a transformation to the data +(see [chacon2018]_ page 73). +Another method is to use boundary kernels (see [chacon2018]_ page 76, +[scott2015]_ page 157). + +In dimension 1, the boundary effects may be taken into account using +a *reflection* or *mirroring* method (see [silverman1982]_ page 31). the boundaries are automatically detected from the sample (with the *min* and *max* functions) and the kernel smoothed PDF is corrected in the boundary areas to remain within the boundaries, @@ -315,6 +630,52 @@ according to the mirroring technique: - this last kernel smoothed PDF is truncated within the initial range :math:`[min, max]` (conditional PDF). +Conclusion +~~~~~~~~~~ +The next table presents a summary of histogram, kernel smoothing and +parametric methods. +It appears that the kernel density estimator has an AMISE error which is +quite close to the parametric rate. + ++------------------+-----------------------------------------+ +| Method | Optimal :math:`\operatorname{AMISE}` | ++==================+=========================================+ +| Histogram | :math:`\propto n^{-\frac{2}{3}}` | ++------------------+-----------------------------------------+ +| Kernel smoothing | :math:`\propto n^{-\frac{4}{5}}` | ++------------------+-----------------------------------------+ +| Parametric | :math:`\propto n^{-1}` | ++------------------+-----------------------------------------+ + +**Table 2.** The AMISE error depending on the method to estimate the density, +from the least to the most accurate. + +The next table compare the different estimators of the +bandwidth that we have presented so far. +The best method is the STE rule, but this can be +costly to evaluate if the sample is large. +In this case the *mixed* rule can be used. +If this rule is still too large, then the Silverman rule can be +used and might be satisfactory if the true density *p* +is not too far away from the normal distribution (i.e. +unimodal and symmetric). +Otherwise, the Silverman rule may produce a too large bandwidth, +leading to oversmoothing. + ++--------------------------+----------------------+---------------+----------------------------+ +| Method | Assumption | Cost | Accuracy | ++==========================+======================+===============+============================+ +| Silverman | Normal assumption | Low | If *p* not far from normal | ++--------------------------+----------------------+---------------+----------------------------+ +| Mixed | Normal assumption | Moderate | Intermediate | ++--------------------------+----------------------+---------------+----------------------------+ +| Solve-the-equation (STE) | Normal assumption | High | Accurate | ++--------------------------+----------------------+---------------+----------------------------+ + +**Table 3.** Different estimators of the bandwidth from the least to the +most accurate. + + .. topic:: API: - See the :class:`~openturns.KernelSmoothing` factory @@ -325,8 +686,10 @@ according to the mirroring technique: .. topic:: References: - - *Kernel smoothing*, M.P. Wand and M.C. Jones, Chapman & Hall/CRC edition, ISNB 0-412-55270-1, 1994. - - *Multivariate Density Estimation, practice and Visualization, Theory*, David W. Scott, Wiley edition, 1992. - - *A reliable data-based bandwidth selection method for kernel density estimation.*, S. J. Sheather and M. C. Jones, Journal of the Royal Statistical Society. Series B (Methodological), 53(3) :683–690, 1991. - - "Comparison of data-driven bandwidth selectors.", Byeong U. Park and J. S. Marron. Journal of the American Statistical Association, 85(409) :66–72, 1990. - - "Very Fast optimal bandwidth selection for univariate kernel density estimation", Vikas Chandrakant Raykar, Ramani Duraiswami, CS-TR-4774. University of Maryland, CollegePark, MD 20783, 2006 + - [silverman1986]_ + - [wand1994]_ + - [scott2015]_ + - [sheather1991]_ + - [park1990]_ + - [raykar2006]_ + - [silverman1982]_ diff --git a/python/src/HistogramFactory_doc.i.in b/python/src/HistogramFactory_doc.i.in index 8e7b9f72e6e..e84c0369f85 100644 --- a/python/src/HistogramFactory_doc.i.in +++ b/python/src/HistogramFactory_doc.i.in @@ -7,7 +7,7 @@ Available constructor: Notes ----- -The range is :math:`[min(data), max(data)]`. +The range is :math:`[\min(data), \max(data)]`. See the `computeBandwidth` method for the bandwidth selection. @@ -118,26 +118,20 @@ sample: .. math:: - \begin{aligned} Q_3 = q_n(0.75), \qquad Q_1 = q_n(0.25), - \end{aligned} and let :math:`IQR` be the inter-quartiles range: .. math:: - \begin{aligned} IQR = Q_3 - Q_1. - \end{aligned} In this case, the bandwidth is the robust estimator of the AMISE-optimal bandwidth, known as Freedman and Diaconis rule [freedman1981]_: .. math:: - \begin{aligned} h = \frac{IQR}{2\Phi^{-1}(0.75)} \left(\frac{24 \sqrt{\pi}}{n}\right)^{\frac{1}{3}} - \end{aligned} where :math:`\Phi^{-1}` is the quantile function of the gaussian standard distribution. @@ -147,13 +141,11 @@ The normalized inter-quartile range is a robust estimator of the scale of the distribution (see [wand1994]_, page 60). When `useQuantile` is `False`, the bandwidth is the AMISE-optimal one, -known as Scott's rule: +known as Scott's rule (see [scott2015]_ eq. 3.16 page 59): .. math:: - \begin{aligned} h = \sigma_n \left(\frac{24 \sqrt{\pi}}{n}\right)^{\frac{1}{3}} - \end{aligned} where :math:`\sigma_n^2` is the unbiaised variance of the data. This estimator is optimal for the gaussian distribution (see [scott1992]_). diff --git a/python/src/KernelSmoothing_doc.i.in b/python/src/KernelSmoothing_doc.i.in index c3ba32d5f6a..ed2ff6a5055 100644 --- a/python/src/KernelSmoothing_doc.i.in +++ b/python/src/KernelSmoothing_doc.i.in @@ -19,7 +19,9 @@ Notes ----- The binning mechanism creates a regular grid of *binNumber* intervals in each dimension, then the unit weight of each point is linearly affected to the vertices -of the bin containing the point. See [wand1994]_ for the details. +of the bin containing the point (see [wand1994]_ appendix D, page 182). +The `KernelSmoothing-BinNumber` key defines the default value of the +number of bins used in the _binning_ algorithm to improve the evaluation speed. The boundary correction is available only in one dimension, and it is done using the mirroring technique. See the notes of the :meth:`setBoundingOption` method for @@ -346,7 +348,13 @@ automaticUpperBound : bool Returns ------- bandwidth : :class:`~openturns.Point` - Bandwidth which components are evaluated according to the Silverman rule assuming a normal distribution. The bandwidth uses a robust estimate of the sample standard deviation, based on the interquartile range (rather than the standard deviation of the distribution and the sample). This method can manage a multivariate sample and produces a multivariate bandwidth. + Bandwidth which components are evaluated according to the Silverman rule + assuming a normal distribution. + The bandwidth uses a robust estimate of the + sample standard deviation, based on the interquartile range introduced + in :ref:`kernel_smoothing` (rather than the sample standard deviation). + This method can manage a multivariate sample and produces a + multivariate bandwidth. " // --------------------------------------------------------------------- @@ -360,18 +368,39 @@ bandwidth : :class:`~openturns.Point` Notes ----- -This plug-in method is based on the solve-the-equation rule from (Sheather, Jones, 1991). +This plug-in method is based on the *solve-the-equation* rule from [sheather1991]_. This method can take a lot of time for large samples, as the cost is quadratic with the sample size. -The `KernelSmoothing-CutOffPlugin` key of the :class:`~openturns.ResourceMap` controls +Several keys of the :class:`~openturns.ResourceMap` are used by the [sheather1991]_ method. + +- The key `KernelSmoothing-AbsolutePrecision` is used in the Sheather-Jones algorithm + to estimate the bandwidth. + It defines the absolute tolerance used by the solver + to solve the nonlinear equation. + +- The `KernelSmoothing-MaximumIteration` key defines the maximum number of iterations + used by the solver. + +- The `KernelSmoothing-RelativePrecision` key defines the relative tolerance. + +- The `KernelSmoothing-AbsolutePrecision` key defines the absolute tolerance. + +- The `KernelSmoothing-ResidualPrecision` key defines the absolute + tolerance on the residual. + +- The `KernelSmoothing-CutOffPlugin` key is the cut-off value + introduced in :ref:`kernel_smoothing`. + +More precisely, the `KernelSmoothing-CutOffPlugin` key of the :class:`~openturns.ResourceMap` controls the accuracy of the approximation used to estimate the rugosity of the second derivative of the distribution. The default value ensures that terms in the sum which weight are lower than -4.e-6 are ignored, which can reduce the calculation in some situations. +:math:`4 \times 10^{-6}` are ignored, which can reduce the calculation in some situations. The properties of the standard gaussian density are so that, in order to make the computation exact, the value of the `KernelSmoothing-CutOffPlugin` must be set to 39, but this may increase the -computation time." +computation time. +" // --------------------------------------------------------------------- %feature("docstring") OT::KernelSmoothing::computeMixedBandwidth @@ -383,6 +412,18 @@ bandwidth : :class:`~openturns.Point` Bandwidth which components are evaluated according to a mixed rule. Notes ------ -Use the plugin rule for small sample, estimate the ratio between the plugin rule and the Silverman rule on this small sample, and finally scale the Silverman bandwidth computed on the full sample with this ratio. The size of the small sample is based on the `KernelSmoothing-SmallSize` key of the `ResourceMap`. +----- +This method uses the *mixed* rule introduced in :ref:`kernel_smoothing`. +Its goal is to provide an accurate estimator of the bandwidth +when the sample size is large. + +Let :math:`n` be the sample size. +The estimator depends on the threshold sample size :math:`n_t` defined in the +`KernelSmoothing-SmallSize` key of the :class:`~openturns.ResourceMap`. + + +- If :math:`n \leq n_t`, i.e. for a small sample, we use the plugin solve-the-equation + method. + +- Otherwise, the *mixed* rule is used. " From 093179f02f4f7cd9d27bca5db5fe410c24989b4e Mon Sep 17 00:00:00 2001 From: Julien Schueller Date: Tue, 27 Jun 2023 10:30:01 +0200 Subject: [PATCH 02/31] CMake: Rename dsfmt __BIG_ENDIAN__ macro Closes #2347 --- CMakeLists.txt | 6 +++--- lib/include/OTconfig.h.in | 4 +--- lib/src/Base/Stat/dsfmt.cxx | 2 +- lib/src/Base/Stat/dsfmt.h | 4 ++-- 4 files changed, 7 insertions(+), 9 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 76082e3095d..bf108b37af4 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -426,10 +426,10 @@ endif () if (CMAKE_VERSION VERSION_LESS 3.20) include (TestBigEndian) - test_big_endian (__BIG_ENDIAN__) + test_big_endian (DSFMT_BIG_ENDIAN) else () - if ("${CMAKE_CXX_BYTE_ORDER}" STREQUAL "BIG_ENDIAN") - set (__BIG_ENDIAN__ 1) + if (CMAKE_CXX_BYTE_ORDER STREQUAL "BIG_ENDIAN") + set (DSFMT_BIG_ENDIAN 1) endif () endif () diff --git a/lib/include/OTconfig.h.in b/lib/include/OTconfig.h.in index 19d73ce343b..a5740c283b1 100644 --- a/lib/include/OTconfig.h.in +++ b/lib/include/OTconfig.h.in @@ -195,9 +195,7 @@ #cmakedefine size_t /* Define to 1 if big endian system */ -#ifndef __BIG_ENDIAN__ -#cmakedefine __BIG_ENDIAN__ -#endif +#cmakedefine DSFMT_BIG_ENDIAN /* Define to 1 if UnsignedInteger and uint64_t are the same type */ #cmakedefine OPENTURNS_UNSIGNEDLONG_SAME_AS_UINT64 diff --git a/lib/src/Base/Stat/dsfmt.cxx b/lib/src/Base/Stat/dsfmt.cxx index b35ab9fd84e..7a3be17dbf2 100644 --- a/lib/src/Base/Stat/dsfmt.cxx +++ b/lib/src/Base/Stat/dsfmt.cxx @@ -66,7 +66,7 @@ inline void twist(uint64v2_t& l, uint64v2_t& r, inline int idx(int i) { -#ifdef __BIG_ENDIAN__ +#ifdef DSFMT_BIG_ENDIAN return i ^ 1; #else return i; diff --git a/lib/src/Base/Stat/dsfmt.h b/lib/src/Base/Stat/dsfmt.h index 29f909d02e9..6932779c203 100644 --- a/lib/src/Base/Stat/dsfmt.h +++ b/lib/src/Base/Stat/dsfmt.h @@ -79,7 +79,7 @@ namespace tutils { uint32_t words[4]; memcpy(&words[0], &u_[i], 16); -#ifdef __BIG_ENDIAN__ +#ifdef DSFMT_BIG_ENDIAN state[4 * i ] = words[1]; state[4 * i + 1] = words[0]; state[4 * i + 2] = words[3]; @@ -99,7 +99,7 @@ namespace tutils for (OT::UnsignedInteger i = 0; i <= (uint32_t)(N); i++) { uint32_t words[4]; -#ifdef __BIG_ENDIAN__ +#ifdef DSFMT_BIG_ENDIAN words[1] = (uint32_t)(state[4 * i ]); words[0] = (uint32_t)(state[4 * i + 1]); words[3] = (uint32_t)(state[4 * i + 2]); From a208a4b9cf700ed10d214b83bb1424a74cda1744 Mon Sep 17 00:00:00 2001 From: Julien Schueller Date: Tue, 27 Jun 2023 15:32:54 +0200 Subject: [PATCH 03/31] RandomGenerator: Cleanup --- lib/src/Base/Stat/RandomGenerator.cxx | 72 +++++++++---------- .../Base/Stat/openturns/RandomGenerator.hxx | 6 +- 2 files changed, 37 insertions(+), 41 deletions(-) diff --git a/lib/src/Base/Stat/RandomGenerator.cxx b/lib/src/Base/Stat/RandomGenerator.cxx index 00ac3404cec..d7364f511c9 100644 --- a/lib/src/Base/Stat/RandomGenerator.cxx +++ b/lib/src/Base/Stat/RandomGenerator.cxx @@ -37,9 +37,9 @@ class MersenneTwister : public tutils::dsfmt19937 -Bool RandomGenerator::IsInitialized = false; +Bool RandomGenerator::IsInitialized_ = false; -MersenneTwister RandomGenerator::Generator(ResourceMap::GetAsUnsignedInteger( "RandomGenerator-InitialSeed" )); +MersenneTwister RandomGenerator::Generator_(ResourceMap::GetAsUnsignedInteger("RandomGenerator-InitialSeed")); /* Sub-classes methods */ @@ -55,71 +55,69 @@ RandomGenerator::RandomGenerator() /* Seed accessor */ void RandomGenerator::SetSeed(const UnsignedInteger seed) { - Generator.init((uint32_t)(seed)); - IsInitialized = true; + Generator_.init((uint32_t)(seed)); + IsInitialized_ = true; } /* State accessor */ void RandomGenerator::SetState(const RandomGeneratorState & state) { - UnsignedInteger size = state.buffer_.getSize(); - UnsignedInteger stateSize = Generator.get_state_length_32(); + const UnsignedInteger size = state.buffer_.getSize(); + const UnsignedInteger stateSize = Generator_.get_state_length_32(); /* The unusual case, the given seed is too small. It is completed with 0 */ Indices stateArray(state.buffer_); - for (UnsignedInteger i = size; i < stateSize; i++) stateArray.add(0); + for (UnsignedInteger i = size; i < stateSize; ++i) + stateArray.add(0); // Set the state array - Generator.set_state(&stateArray[0]); + Generator_.set_state(&stateArray[0]); // Set the index - Generator.set_index(state.index_); - IsInitialized = true; + Generator_.set_index(state.index_); + IsInitialized_ = true; return; } /* Seed accessor */ RandomGeneratorState RandomGenerator::GetState() { - UnsignedInteger size = (UnsignedInteger)(Generator.get_state_length_32()); + const UnsignedInteger size = (UnsignedInteger)(Generator_.get_state_length_32()); // Create the state and get the index at the same time - RandomGeneratorState state(Indices(size, 0), (UnsignedInteger)(Generator.get_index())); + RandomGeneratorState state(Indices(size, 0), (UnsignedInteger)(Generator_.get_index())); // Get the state array - Generator.get_state(&state.buffer_[0]); + Generator_.get_state(&state.buffer_[0]); return state; } -/* Generate a pseudo-random number uniformly distributed over ]0, 1[ */ -Scalar RandomGenerator::Generate() +void RandomGenerator::Initialize() { - if (!IsInitialized) + if (!IsInitialized_) { - SetSeed(ResourceMap::GetAsUnsignedInteger( "RandomGenerator-InitialSeed" )); - IsInitialized = true; + SetSeed(ResourceMap::GetAsUnsignedInteger("RandomGenerator-InitialSeed")); + IsInitialized_ = true; } - return Generator.gen(); +} + +/* Generate a pseudo-random number uniformly distributed over ]0, 1[ */ +Scalar RandomGenerator::Generate() +{ + Initialize(); + return Generator_.gen(); } /* Generate a pseudo-random integer uniformly distributed over [[0,...,n-1]] */ UnsignedInteger RandomGenerator::IntegerGenerate(const UnsignedInteger n) { - if (!IsInitialized) - { - SetSeed(ResourceMap::GetAsUnsignedInteger( "RandomGenerator-InitialSeed" )); - IsInitialized = true; - } - return Generator.igen((uint32_t)(n)); + Initialize(); + return Generator_.igen((uint32_t)(n)); } /* Generate a pseudo-random vector of numbers uniformly distributed over ]0, 1[ */ Point RandomGenerator::Generate(const UnsignedInteger size) { Point result(size); - if (!IsInitialized) - { - SetSeed(ResourceMap::GetAsUnsignedInteger( "RandomGenerator-InitialSeed" )); - IsInitialized = true; - } - for (UnsignedInteger i = 0; i < size; i++) + Initialize(); + for (UnsignedInteger i = 0; i < size; ++ i) { - result[i] = Generator.gen(); + result[i] = Generator_.gen(); } return result; } @@ -128,14 +126,10 @@ Point RandomGenerator::Generate(const UnsignedInteger size) RandomGenerator::UnsignedIntegerCollection RandomGenerator::IntegerGenerate(const UnsignedInteger size, const UnsignedInteger n) { UnsignedIntegerCollection result(size); - if (!IsInitialized) - { - SetSeed(ResourceMap::GetAsUnsignedInteger( "RandomGenerator-InitialSeed" )); - IsInitialized = true; - } - for (UnsignedInteger i = 0; i < size; i++) + Initialize(); + for (UnsignedInteger i = 0; i < size; ++ i) { - result[i] = Generator.igen((uint32_t)(n)); + result[i] = Generator_.igen((uint32_t)(n)); } return result; } diff --git a/lib/src/Base/Stat/openturns/RandomGenerator.hxx b/lib/src/Base/Stat/openturns/RandomGenerator.hxx index ac6a84dda5d..ee1ef605d4a 100644 --- a/lib/src/Base/Stat/openturns/RandomGenerator.hxx +++ b/lib/src/Base/Stat/openturns/RandomGenerator.hxx @@ -65,8 +65,10 @@ private: /** Default constructor */ RandomGenerator(); - static Bool IsInitialized; - static MersenneTwister Generator; + static void Initialize(); + + static Bool IsInitialized_; + static MersenneTwister Generator_; }; /* class RandomGenerator */ From 9622d0bce3ad112dc9294911448ea22d80c45058 Mon Sep 17 00:00:00 2001 From: Julien Schueller Date: Thu, 29 Jun 2023 14:17:04 +0200 Subject: [PATCH 04/31] CMake: Bump required Python version --- CMakeLists.txt | 4 ++-- python/doc/developer_guide/architecture.rst | 2 +- python/src/METADATA.in | 19 ------------------- 3 files changed, 3 insertions(+), 22 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index bf108b37af4..cb13010dd0b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -604,9 +604,9 @@ if (BUILD_PYTHON) endif () if (CMAKE_VERSION VERSION_LESS 3.24) - find_package (Python 3.5 COMPONENTS Interpreter Development) + find_package (Python 3.6 COMPONENTS Interpreter Development) else () - find_package (Python 3.5 COMPONENTS Interpreter Development.Module) + find_package (Python 3.6 COMPONENTS Interpreter Development.Module) endif () if (NOT TARGET Python::Module) diff --git a/python/doc/developer_guide/architecture.rst b/python/doc/developer_guide/architecture.rst index a197568c145..efd0fa1200e 100644 --- a/python/doc/developer_guide/architecture.rst +++ b/python/doc/developer_guide/architecture.rst @@ -233,7 +233,7 @@ The tools chosen for the development of the platform are: +---------------------------------------+---------------------------------------------------------------------------------+-------------------+ | Multithreading (optional) | `TBB `_ | 2017 | +---------------------------------------+---------------------------------------------------------------------------------+-------------------+ -| Python support | `Python `_ | 3.5 | +| Python support | `Python `_ | 3.6 | +---------------------------------------+---------------------------------------------------------------------------------+-------------------+ | Plotting library (optional) | `Matplotlib `_ | 1.3.1 | +---------------------------------------+---------------------------------------------------------------------------------+-------------------+ diff --git a/python/src/METADATA.in b/python/src/METADATA.in index 9aa0fbbfea1..de3cf2ab8be 100644 --- a/python/src/METADATA.in +++ b/python/src/METADATA.in @@ -10,27 +10,8 @@ Keywords: probability reliability sensitivity metamodel Platform: any Classifier: Development Status :: 5 - Production/Stable Classifier: License :: OSI Approved :: GNU Lesser General Public License v3 or later (LGPLv3+) -Classifier: Operating System :: Microsoft :: Windows -Classifier: Operating System :: MacOS :: MacOS X -Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: C++ -Classifier: Programming Language :: Python -Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3.5 -Classifier: Programming Language :: Python :: 3.6 -Classifier: Programming Language :: Python :: 3.7 -Classifier: Programming Language :: Python :: 3.8 -Classifier: Programming Language :: Python :: 3.9 -Classifier: Programming Language :: Python :: 3.10 -Classifier: Programming Language :: Python :: 3.11 -Classifier: Topic :: Scientific/Engineering :: Information Analysis Classifier: Topic :: Scientific/Engineering :: Mathematics -Classifier: Topic :: Scientific/Engineering :: Physics -Classifier: Topic :: Software Development :: Libraries -Classifier: Topic :: Software Development :: Libraries :: Python Modules -Classifier: Intended Audience :: Developers -Classifier: Intended Audience :: Financial and Insurance Industry -Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: Science/Research Requires-Dist: dill Requires-Dist: psutil From 519df3836b080dbca6ea735e71575439f392cef6 Mon Sep 17 00:00:00 2001 From: Julien Schueller Date: Mon, 3 Jul 2023 08:41:06 +0200 Subject: [PATCH 05/31] CMake: Use CMAKE_INSTALL_DOCDIR --- .circleci/run_docker_linux.sh | 2 +- CMakeLists.txt | 4 ++-- distro/debian/not-installed | 2 +- distro/rpm/openturns.spec | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.circleci/run_docker_linux.sh b/.circleci/run_docker_linux.sh index 5b909beb524..8d62c076afe 100755 --- a/.circleci/run_docker_linux.sh +++ b/.circleci/run_docker_linux.sh @@ -33,7 +33,7 @@ cmake -DCMAKE_INSTALL_PREFIX=~/.local \ make install if test -n "${uid}" -a -n "${gid}" then - cp -r ~/.local/share/openturns/doc/html . + cp -r ~/.local/share/doc/openturns/html . zip -r openturns-doc.zip html/* sudo chown ${uid}:${gid} openturns-doc.zip && sudo cp openturns-doc.zip ${source_dir} fi diff --git a/CMakeLists.txt b/CMakeLists.txt index cb13010dd0b..ce36aade390 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -7,7 +7,7 @@ set(CMAKE_BUILD_TYPE Release CACHE STRING "Build type") set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS "" Release Debug RelWithDebInfo MinSizeRel) set (CMAKE_TRY_COMPILE_CONFIGURATION ${CMAKE_BUILD_TYPE}) -project (OpenTURNS) +project (openturns) option (USE_BISON "Looks for Bison if true and then build parser" ON) option (USE_FLEX "Looks for Flex if true and then build lexer" ON) @@ -107,7 +107,7 @@ set (OPENTURNS_INCLUDE_PATH ${CMAKE_INSTALL_INCLUDEDIR} CACHE PATH "The director set (OPENTURNS_CONFIG_CMAKE_PATH ${CMAKE_INSTALL_LIBDIR}/cmake/openturns CACHE PATH "The directory where the CMake files are installed") set (OPENTURNS_SYSCONFIG_PATH ${CMAKE_INSTALL_SYSCONFDIR} CACHE PATH "The directory where the configuration file is installed") set (OPENTURNS_DATA_PATH ${CMAKE_INSTALL_DATAROOTDIR} CACHE PATH "The directory where the common files are installed") -set (OPENTURNS_DOC_PATH ${CMAKE_INSTALL_DATAROOTDIR}/openturns/doc CACHE PATH "The directory where the license files are installed") +set (OPENTURNS_DOC_PATH ${CMAKE_INSTALL_DOCDIR} CACHE PATH "The directory where the license files are installed") if (WIN32) set (DEFAULT_TMP TEMP) diff --git a/distro/debian/not-installed b/distro/debian/not-installed index 296f88883bf..4b388c6b451 100644 --- a/distro/debian/not-installed +++ b/distro/debian/not-installed @@ -1,4 +1,4 @@ usr/lib/python*/*-packages/openturns/*/*.pyc usr/lib/python*/*-packages/openturns/usecases/*/*.pyc usr/share/gdb/auto-load/usr/lib/*/libOT.so*gdb.py -usr/share/openturns/doc/COPYING* +usr/share/doc/openturns/COPYING* diff --git a/distro/rpm/openturns.spec b/distro/rpm/openturns.spec index 64bbc9cdc1d..fb6315fceb1 100644 --- a/distro/rpm/openturns.spec +++ b/distro/rpm/openturns.spec @@ -106,7 +106,7 @@ make %install make install DESTDIR=%{buildroot} -rm -r %{buildroot}%{_datadir}/%{name}/doc +rm -r %{buildroot}%{_datadir}/doc/%{name} %check LD_LIBRARY_PATH=%{buildroot}%{_libdir} OPENTURNS_NUM_THREADS=1 ctest --output-on-failure %{?_smp_mflags} -E "cppcheck|ChaosSobol|Kriging" --timeout 1000 --schedule-random || echo "fail" From e1f9f9d08b979cf0bb9f0b205b650df7723ae3f0 Mon Sep 17 00:00:00 2001 From: Julien Schueller Date: Mon, 3 Jul 2023 15:15:55 +0200 Subject: [PATCH 06/31] Sample: Missing array ctor doc --- python/src/Sample_doc.i.in | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/python/src/Sample_doc.i.in b/python/src/Sample_doc.i.in index 9eaab34c0ac..6e102e7a07f 100644 --- a/python/src/Sample_doc.i.in +++ b/python/src/Sample_doc.i.in @@ -2,6 +2,8 @@ "Sample of real vectors. Available constructors: + Sample(array) + Sample(*size, dim*) Sample(*size, point*) @@ -10,6 +12,8 @@ Available constructors: Parameters ---------- +array : 2-d sequence of float + The data size : int, :math:`m > 0`, optional The sample size. Default creates an empty sample with dimension 1. From 68ed0e4691cef54a23dafc366166a0f4ddb2cf4b Mon Sep 17 00:00:00 2001 From: Julien Schueller Date: Fri, 7 Jul 2023 14:39:55 +0200 Subject: [PATCH 07/31] Distribution: Fix comparison --- .../Func/openturns/EvaluationImplementation.hxx | 2 ++ .../Func/openturns/FieldFunctionImplementation.hxx | 2 ++ .../FieldToPointFunctionImplementation.hxx | 2 ++ .../Base/Func/openturns/FunctionImplementation.hxx | 2 ++ .../Base/Func/openturns/GradientImplementation.hxx | 2 ++ .../Base/Func/openturns/HessianImplementation.hxx | 2 ++ .../Model/openturns/ContinuousDistribution.hxx | 2 ++ .../Model/openturns/DiscreteDistribution.hxx | 2 ++ .../Model/openturns/DistributionImplementation.hxx | 2 ++ python/test/t_Distribution_std.py | 13 +++++++++++-- 10 files changed, 29 insertions(+), 2 deletions(-) diff --git a/lib/src/Base/Func/openturns/EvaluationImplementation.hxx b/lib/src/Base/Func/openturns/EvaluationImplementation.hxx index 32af8bdd2ba..6ee9cd3df7a 100644 --- a/lib/src/Base/Func/openturns/EvaluationImplementation.hxx +++ b/lib/src/Base/Func/openturns/EvaluationImplementation.hxx @@ -60,7 +60,9 @@ public: EvaluationImplementation * clone() const override; /** Comparison operator */ +protected: using PersistentObject::operator ==; +public: Bool operator ==(const EvaluationImplementation & other) const; /** String converter */ diff --git a/lib/src/Base/Func/openturns/FieldFunctionImplementation.hxx b/lib/src/Base/Func/openturns/FieldFunctionImplementation.hxx index 952c5d5b26c..5160f64da0b 100644 --- a/lib/src/Base/Func/openturns/FieldFunctionImplementation.hxx +++ b/lib/src/Base/Func/openturns/FieldFunctionImplementation.hxx @@ -60,7 +60,9 @@ public: FieldFunctionImplementation * clone() const override; /** Comparison operator */ +protected: using PersistentObject::operator ==; +public: Bool operator ==(const FieldFunctionImplementation & other) const; /** String converter */ diff --git a/lib/src/Base/Func/openturns/FieldToPointFunctionImplementation.hxx b/lib/src/Base/Func/openturns/FieldToPointFunctionImplementation.hxx index bf30df90520..3a9cdc52856 100644 --- a/lib/src/Base/Func/openturns/FieldToPointFunctionImplementation.hxx +++ b/lib/src/Base/Func/openturns/FieldToPointFunctionImplementation.hxx @@ -55,7 +55,9 @@ public: FieldToPointFunctionImplementation * clone() const override; /** Comparison operator */ +protected: using PersistentObject::operator ==; +public: Bool operator ==(const FieldToPointFunctionImplementation & other) const; /** String converter */ diff --git a/lib/src/Base/Func/openturns/FunctionImplementation.hxx b/lib/src/Base/Func/openturns/FunctionImplementation.hxx index f4839de7447..ef830ac32b2 100644 --- a/lib/src/Base/Func/openturns/FunctionImplementation.hxx +++ b/lib/src/Base/Func/openturns/FunctionImplementation.hxx @@ -80,7 +80,9 @@ public: FunctionImplementation * clone() const override; /** Comparison operator */ +protected: using PersistentObject::operator ==; +public: Bool operator ==(const FunctionImplementation & other) const; /** String converter */ diff --git a/lib/src/Base/Func/openturns/GradientImplementation.hxx b/lib/src/Base/Func/openturns/GradientImplementation.hxx index 187bfd64a4d..4253e1a07b1 100644 --- a/lib/src/Base/Func/openturns/GradientImplementation.hxx +++ b/lib/src/Base/Func/openturns/GradientImplementation.hxx @@ -58,7 +58,9 @@ public: GradientImplementation * clone() const override; /** Comparison operator */ +protected: using PersistentObject::operator ==; +public: Bool operator ==(const GradientImplementation & other) const; /** String converter */ diff --git a/lib/src/Base/Func/openturns/HessianImplementation.hxx b/lib/src/Base/Func/openturns/HessianImplementation.hxx index 4c0c0d8edd1..606fbc56432 100644 --- a/lib/src/Base/Func/openturns/HessianImplementation.hxx +++ b/lib/src/Base/Func/openturns/HessianImplementation.hxx @@ -55,7 +55,9 @@ public: HessianImplementation * clone() const override; /** Comparison operator */ +protected: using PersistentObject::operator ==; +public: Bool operator ==(const HessianImplementation & other) const; /** String converter */ diff --git a/lib/src/Uncertainty/Model/openturns/ContinuousDistribution.hxx b/lib/src/Uncertainty/Model/openturns/ContinuousDistribution.hxx index 91e998b4f14..2c551799124 100644 --- a/lib/src/Uncertainty/Model/openturns/ContinuousDistribution.hxx +++ b/lib/src/Uncertainty/Model/openturns/ContinuousDistribution.hxx @@ -44,7 +44,9 @@ public: ContinuousDistribution * clone() const override; /** Comparison operator */ +protected: using DistributionImplementation::operator ==; +public: Bool operator ==(const ContinuousDistribution & other) const; /** Get the PDF of the distribution */ diff --git a/lib/src/Uncertainty/Model/openturns/DiscreteDistribution.hxx b/lib/src/Uncertainty/Model/openturns/DiscreteDistribution.hxx index 7608e9470a1..f2409247bb9 100644 --- a/lib/src/Uncertainty/Model/openturns/DiscreteDistribution.hxx +++ b/lib/src/Uncertainty/Model/openturns/DiscreteDistribution.hxx @@ -46,7 +46,9 @@ public: DiscreteDistribution * clone() const override; /** Comparison operator */ +protected: using DistributionImplementation::operator ==; +public: Bool operator ==(const DiscreteDistribution & other) const; /** String converter */ diff --git a/lib/src/Uncertainty/Model/openturns/DistributionImplementation.hxx b/lib/src/Uncertainty/Model/openturns/DistributionImplementation.hxx index 0fc0b569ea2..1d385910095 100644 --- a/lib/src/Uncertainty/Model/openturns/DistributionImplementation.hxx +++ b/lib/src/Uncertainty/Model/openturns/DistributionImplementation.hxx @@ -69,7 +69,9 @@ public: DistributionImplementation(); /** Comparison operator */ +protected: using PersistentObject::operator ==; +public: Bool operator ==(const DistributionImplementation & other) const; protected: virtual Bool equals(const DistributionImplementation & other) const; diff --git a/python/test/t_Distribution_std.py b/python/test/t_Distribution_std.py index 44372ae4cca..e518bff1e95 100755 --- a/python/test/t_Distribution_std.py +++ b/python/test/t_Distribution_std.py @@ -2,10 +2,19 @@ import openturns.testing as ott import math as m -ot.DistributionFactory.GetContinuousUniVariateFactories() -for factory in []: + +for factory in ot.DistributionFactory.GetContinuousUniVariateFactories(): distribution = factory.build() + # comparison + distribution2 = factory.build() + print(distribution, distribution2) + assert distribution == distribution2, "==" + assert not distribution != distribution2, "!=" + distribution3 = ot.Dirac(42.0) + assert distribution != distribution3, "==Dirac" + assert not distribution == distribution3, "!=Dirac" + # avoid flat pdfs if distribution.getName() == "Dirichlet": distribution = ot.Dirichlet([2, 6]) From f70a0f74e996c0e37e82e263a7e2e0dc9e7b29e8 Mon Sep 17 00:00:00 2001 From: Julien Schueller Date: Tue, 11 Jul 2023 13:25:00 +0200 Subject: [PATCH 08/31] MetaModelResult: Fix docstring --- python/src/MetaModelResult_doc.i.in | 2 -- 1 file changed, 2 deletions(-) diff --git a/python/src/MetaModelResult_doc.i.in b/python/src/MetaModelResult_doc.i.in index c06f1a42a1a..bd42f1a8a67 100644 --- a/python/src/MetaModelResult_doc.i.in +++ b/python/src/MetaModelResult_doc.i.in @@ -5,8 +5,6 @@ Parameters ---------- sampleX, sampleY : 2-d sequence of float Input/output samples -model : :class:`~openturns.Function` - Physical model approximated by a metamodel. metaModel : :class:`~openturns.Function` Definition of the response surface(s) of the model's output(s). residuals : sequence of float From f4d11b25c489acf2d7b2c6b31fd4eeb1dd67a62e Mon Sep 17 00:00:00 2001 From: Julien Schueller Date: Tue, 11 Jul 2023 16:26:01 +0200 Subject: [PATCH 09/31] Pagmo: Set iteration number to generation number --- ChangeLog | 1 + TODO | 1 + lib/src/Base/Optim/Pagmo.cxx | 55 +++++++++++-------- lib/src/Base/Optim/openturns/Pagmo.hxx | 5 +- .../optimization/plot_optimization_pagmo.py | 4 +- python/src/Pagmo_doc.i.in | 28 ++-------- python/test/t_Pagmo_std.py | 6 +- 7 files changed, 45 insertions(+), 55 deletions(-) diff --git a/ChangeLog b/ChangeLog index 935ebca9321..960ef6b9f32 100644 --- a/ChangeLog +++ b/ChangeLog @@ -15,6 +15,7 @@ * Removed deprecated EllipticalDistribution::getInverseCorrelation method * Removed deprecated Basis::getDimension method * Removed deprecated HSICStat ctors relying on weight matrix + * Deprecated Pagmo.setGenerationNumber for setMaximumIterationNumber === Documentation === diff --git a/TODO b/TODO index e69de29bb2d..5ae3d3edef7 100644 --- a/TODO +++ b/TODO @@ -0,0 +1 @@ +Remove Pagmo.setGenerationNumber diff --git a/lib/src/Base/Optim/Pagmo.cxx b/lib/src/Base/Optim/Pagmo.cxx index 0f89a6271f9..a30bc11e222 100644 --- a/lib/src/Base/Optim/Pagmo.cxx +++ b/lib/src/Base/Optim/Pagmo.cxx @@ -117,7 +117,7 @@ struct PagmoProblem // callbacks if (algorithm_->progressCallback_.first) { - algorithm_->progressCallback_.first((100.0 * evaluationNumber_) / (algorithm_->getStartingSample().getSize() * algorithm_->getGenerationNumber()), algorithm_->progressCallback_.second); + algorithm_->progressCallback_.first((100.0 * evaluationNumber_) / (algorithm_->getStartingSample().getSize() * algorithm_->getMaximumIterationNumber()), algorithm_->progressCallback_.second); } if (algorithm_->stopCallback_.first) { @@ -202,7 +202,7 @@ struct PagmoProblem // callbacks if (algorithm_->progressCallback_.first) { - algorithm_->progressCallback_.first((100.0 * evaluationNumber_) / (algorithm_->getStartingSample().getSize() * algorithm_->getGenerationNumber()), algorithm_->progressCallback_.second); + algorithm_->progressCallback_.first((100.0 * evaluationNumber_) / (algorithm_->getStartingSample().getSize() * algorithm_->getMaximumIterationNumber()), algorithm_->progressCallback_.second); } if (algorithm_->stopCallback_.first) { @@ -398,7 +398,7 @@ void Pagmo::run() const Bool memory = ResourceMap::GetAsBool("Pagmo-memory"); if (!memory) ker = std::min(ker, populationSize); - pagmo::gaco algorithm_impl(generationNumber_, ker, q, oracle, acc, threshold, n_gen_mark, impstop, getMaximumEvaluationNumber(), focus, memory); + pagmo::gaco algorithm_impl(getMaximumIterationNumber(), ker, q, oracle, acc, threshold, n_gen_mark, impstop, getMaximumEvaluationNumber(), focus, memory); if (!emulatedConstraints) algorithm_impl.set_bfe(pagmo::bfe{}); algo = algorithm_impl; @@ -409,7 +409,7 @@ void Pagmo::run() const Scalar F = ResourceMap::GetAsScalar("Pagmo-de-F"); const Scalar CR = ResourceMap::GetAsScalar("Pagmo-de-CR"); const UnsignedInteger variant = ResourceMap::GetAsUnsignedInteger("Pagmo-de-variant"); - algo = pagmo::de(generationNumber_, F, CR, variant, getMaximumResidualError(), getMaximumAbsoluteError()); + algo = pagmo::de(getMaximumIterationNumber(), F, CR, variant, getMaximumResidualError(), getMaximumAbsoluteError()); } else if (algoName_ == "sade") { @@ -417,19 +417,19 @@ void Pagmo::run() const UnsignedInteger variant = ResourceMap::GetAsUnsignedInteger("Pagmo-sade-variant"); const UnsignedInteger variant_adptv = ResourceMap::GetAsUnsignedInteger("Pagmo-sade-variant_adptv"); const Bool memory = ResourceMap::GetAsBool("Pagmo-memory"); - algo = pagmo::sade(generationNumber_, variant, variant_adptv, getMaximumResidualError(), getMaximumAbsoluteError(), memory); + algo = pagmo::sade(getMaximumIterationNumber(), variant, variant_adptv, getMaximumResidualError(), getMaximumAbsoluteError(), memory); } else if (algoName_ == "de1220") { // de1220(unsigned gen = 1u, std::vector allowed_variants = de1220_statics::allowed_variants, unsigned variant_adptv = 1u, double ftol = 1e-6, double xtol = 1e-6, bool memory = false, unsigned seed = pagmo::random_device::next()) const UnsignedInteger variant_adptv = ResourceMap::GetAsUnsignedInteger("Pagmo-de1220-variant_adptv"); const Bool memory = ResourceMap::GetAsBool("Pagmo-memory"); - algo = pagmo::de1220(generationNumber_, pagmo::de1220_statics::allowed_variants, variant_adptv, getMaximumResidualError(), getMaximumAbsoluteError(), memory); + algo = pagmo::de1220(getMaximumIterationNumber(), pagmo::de1220_statics::allowed_variants, variant_adptv, getMaximumResidualError(), getMaximumAbsoluteError(), memory); } else if (algoName_ == "gwo") { // gwo(unsigned gen = 1u, unsigned seed = pagmo::random_device::next()) - algo = pagmo::gwo(generationNumber_); + algo = pagmo::gwo(getMaximumIterationNumber()); } else if (algoName_ == "ihs") { @@ -439,7 +439,7 @@ void Pagmo::run() const Scalar ppar_max = ResourceMap::GetAsScalar("Pagmo-ihs-ppar_max"); const Scalar bw_min = ResourceMap::GetAsScalar("Pagmo-ihs-bw_min"); const Scalar bw_max = ResourceMap::GetAsScalar("Pagmo-ihs-bw_max"); - algo = pagmo::ihs(generationNumber_, phmcr, ppar_min, ppar_max, bw_min, bw_max); + algo = pagmo::ihs(getMaximumIterationNumber(), phmcr, ppar_min, ppar_max, bw_min, bw_max); } else if (algoName_ == "pso") { @@ -452,7 +452,7 @@ void Pagmo::run() const UnsignedInteger neighb_type = ResourceMap::GetAsUnsignedInteger("Pagmo-pso-neighb_type"); const UnsignedInteger neighb_param = ResourceMap::GetAsUnsignedInteger("Pagmo-pso-neighb_param"); const Bool memory = ResourceMap::GetAsBool("Pagmo-memory"); - algo = pagmo::pso(generationNumber_, omega, eta1, eta2, max_vel, variant, neighb_type, neighb_param, memory); + algo = pagmo::pso(getMaximumIterationNumber(), omega, eta1, eta2, max_vel, variant, neighb_type, neighb_param, memory); } else if (algoName_ == "pso_gen") { @@ -465,7 +465,7 @@ void Pagmo::run() const UnsignedInteger neighb_type = ResourceMap::GetAsUnsignedInteger("Pagmo-pso-neighb_type"); const UnsignedInteger neighb_param = ResourceMap::GetAsUnsignedInteger("Pagmo-pso-neighb_param"); const Bool memory = ResourceMap::GetAsBool("Pagmo-memory"); - pagmo::pso_gen algorithm_impl(generationNumber_, omega, eta1, eta2, max_vel, variant, neighb_type, neighb_param, memory); + pagmo::pso_gen algorithm_impl(getMaximumIterationNumber(), omega, eta1, eta2, max_vel, variant, neighb_type, neighb_param, memory); if (!emulatedConstraints) algorithm_impl.set_bfe(pagmo::bfe{}); algo = algorithm_impl; @@ -473,7 +473,7 @@ void Pagmo::run() else if (algoName_ == "sea") { // sea(unsigned gen = 1u, unsigned seed = pagmo::random_device::next()) - algo = pagmo::sea(generationNumber_); + algo = pagmo::sea(getMaximumIterationNumber()); } else if (algoName_ == "sga") { @@ -486,7 +486,7 @@ void Pagmo::run() const String crossover = ResourceMap::GetAsString("Pagmo-sga-crossover"); const String mutation = ResourceMap::GetAsString("Pagmo-sga-mutation"); const String selection = ResourceMap::GetAsString("Pagmo-sga-selection"); - algo = pagmo::sga(generationNumber_, cr, eta_c, m, param_m, param_s, crossover, mutation, selection); + algo = pagmo::sga(getMaximumIterationNumber(), cr, eta_c, m, param_m, param_s, crossover, mutation, selection); } else if (algoName_ == "simulated_annealing") { @@ -503,7 +503,7 @@ void Pagmo::run() { // bee_colony(unsigned gen = 1u, unsigned limit = 20u, unsigned seed = pagmo::random_device::next()) const UnsignedInteger limit = ResourceMap::GetAsUnsignedInteger("Pagmo-bee_colony-limit"); - algo = pagmo::bee_colony(generationNumber_, limit); + algo = pagmo::bee_colony(getMaximumIterationNumber(), limit); } #ifdef PAGMO_WITH_EIGEN3 else if (algoName_ == "cmaes") @@ -515,7 +515,7 @@ void Pagmo::run() const Scalar cmu = ResourceMap::GetAsScalar("Pagmo-cmaes-cmu"); const Scalar sigma0 = ResourceMap::GetAsScalar("Pagmo-cmaes-sigma0"); const Bool memory = ResourceMap::GetAsBool("Pagmo-memory"); - algo = pagmo::cmaes(generationNumber_, cc, cs, c1, cmu, sigma0, getMaximumResidualError(), getMaximumAbsoluteError(), memory, getProblem().hasBounds()); + algo = pagmo::cmaes(getMaximumIterationNumber(), cc, cs, c1, cmu, sigma0, getMaximumResidualError(), getMaximumAbsoluteError(), memory, getProblem().hasBounds()); } else if (algoName_ == "xnes") { @@ -525,7 +525,7 @@ void Pagmo::run() const Scalar eta_b = ResourceMap::GetAsScalar("Pagmo-xnes-eta_b"); const Scalar sigma0 = ResourceMap::GetAsScalar("Pagmo-xnes-sigma0"); const Bool memory = ResourceMap::GetAsBool("Pagmo-memory"); - algo = pagmo::xnes(generationNumber_, eta_mu, eta_sigma, eta_b, sigma0, getMaximumResidualError(), getMaximumAbsoluteError(), memory, getProblem().hasBounds()); + algo = pagmo::xnes(getMaximumIterationNumber(), eta_mu, eta_sigma, eta_b, sigma0, getMaximumResidualError(), getMaximumAbsoluteError(), memory, getProblem().hasBounds()); } #endif else if (algoName_ == "nsga2") @@ -535,7 +535,7 @@ void Pagmo::run() const Scalar eta_c = ResourceMap::GetAsScalar("Pagmo-nsga2-eta_c"); const Scalar m = ResourceMap::GetAsScalar("Pagmo-nsga2-m"); const Scalar eta_m = ResourceMap::GetAsScalar("Pagmo-nsga2-eta_m"); - pagmo::nsga2 algorithm_impl(generationNumber_, cr, eta_c, m, eta_m); + pagmo::nsga2 algorithm_impl(getMaximumIterationNumber(), cr, eta_c, m, eta_m); if (!emulatedConstraints) algorithm_impl.set_bfe(pagmo::bfe{}); algo = algorithm_impl; @@ -552,7 +552,7 @@ void Pagmo::run() const Scalar realb = ResourceMap::GetAsScalar("Pagmo-moead-realb"); const UnsignedInteger limit = ResourceMap::GetAsUnsignedInteger("Pagmo-moead-limit"); const Bool preserve_diversity = ResourceMap::GetAsBool("Pagmo-moead-preserve_diversity"); - algo = pagmo::moead(generationNumber_, weight_generation, decomposition, neighbours, CR, F, eta_m, realb, limit, preserve_diversity); + algo = pagmo::moead(getMaximumIterationNumber(), weight_generation, decomposition, neighbours, CR, F, eta_m, realb, limit, preserve_diversity); } #if (PAGMO_VERSION_MAJOR * 1000 + PAGMO_VERSION_MINOR) >= 2019 else if (algoName_ == "moead_gen") @@ -567,7 +567,7 @@ void Pagmo::run() const Scalar realb = ResourceMap::GetAsScalar("Pagmo-moead-realb"); const UnsignedInteger limit = ResourceMap::GetAsUnsignedInteger("Pagmo-moead-limit"); const Bool preserve_diversity = ResourceMap::GetAsBool("Pagmo-moead-preserve_diversity"); - algo = pagmo::moead_gen(generationNumber_, weight_generation, decomposition, neighbours, CR, F, eta_m, realb, limit, preserve_diversity); + algo = pagmo::moead_gen(getMaximumIterationNumber(), weight_generation, decomposition, neighbours, CR, F, eta_m, realb, limit, preserve_diversity); } #endif else if (algoName_ == "mhaco") @@ -581,7 +581,7 @@ void Pagmo::run() const Bool memory = ResourceMap::GetAsBool("Pagmo-memory"); if (!memory) ker = std::min(ker, populationSize); - pagmo::maco algorithm_impl(generationNumber_, ker, q, threshold, n_gen_mark, getMaximumEvaluationNumber(), focus, memory); + pagmo::maco algorithm_impl(getMaximumIterationNumber(), ker, q, threshold, n_gen_mark, getMaximumEvaluationNumber(), focus, memory); if (!emulatedConstraints) algorithm_impl.set_bfe(pagmo::bfe{}); algo = algorithm_impl; @@ -597,7 +597,7 @@ void Pagmo::run() const UnsignedInteger leader_selection_range = ResourceMap::GetAsUnsignedInteger("Pagmo-nspso-leader_selection_range"); const String diversity_mechanism = ResourceMap::GetAsString("Pagmo-nspso-diversity_mechanism"); const Bool memory = ResourceMap::GetAsBool("Pagmo-memory"); - pagmo::nspso algorithm_impl(generationNumber_, omega, c1, c2, chi, v_coeff, leader_selection_range, diversity_mechanism, memory); + pagmo::nspso algorithm_impl(getMaximumIterationNumber(), omega, c1, c2, chi, v_coeff, leader_selection_range, diversity_mechanism, memory); if (!emulatedConstraints) algorithm_impl.set_bfe(pagmo::bfe{}); algo = algorithm_impl; @@ -610,6 +610,7 @@ void Pagmo::run() pop = algo.evolve(pop); result_ = OptimizationResult(getProblem()); result_.setEvaluationNumber(PagmoProblem::evaluationNumber_); + result_.setIterationNumber(getMaximumIterationNumber()); Scalar optimalValue = 0.0; Sample finalPoints(0, getProblem().getDimension()); @@ -771,12 +772,14 @@ String Pagmo::getAlgorithmName() const /* Number of generations to evolve */ void Pagmo::setGenerationNumber(const UnsignedInteger generationNumber) { - generationNumber_ = generationNumber; + LOGWARN(OSS() << "Pagmo.setGenerationNumber is deprecated in favor of setMaximumIterationNumber"); + setMaximumIterationNumber(generationNumber); } UnsignedInteger Pagmo::getGenerationNumber() const { - return generationNumber_; + LOGWARN(OSS() << "Pagmo.getGenerationNumber is deprecated in favor of getMaximumIterationNumber"); + return getMaximumIterationNumber(); } /* Random generator seed accessor */ @@ -807,7 +810,6 @@ void Pagmo::save(Advocate & adv) const OptimizationAlgorithmImplementation::save(adv); adv.saveAttribute("algoName_", algoName_); adv.saveAttribute("startingSample_", startingSample_); - adv.saveAttribute("generationNumber_", generationNumber_); adv.saveAttribute("seed_", seed_); adv.saveAttribute("blockSize_", blockSize_); } @@ -818,7 +820,12 @@ void Pagmo::load(Advocate & adv) OptimizationAlgorithmImplementation::load(adv); adv.loadAttribute("algoName_", algoName_); adv.loadAttribute("startingSample_", startingSample_); - adv.loadAttribute("generationNumber_", generationNumber_); + if (adv.hasAttribute("generationNumber_")) // OT<=1.21 + { + UnsignedInteger generationNumber = 0; + adv.loadAttribute("generationNumber_", generationNumber); + setMaximumIterationNumber(generationNumber); + } adv.loadAttribute("seed_", seed_); adv.loadAttribute("blockSize_", blockSize_); } diff --git a/lib/src/Base/Optim/openturns/Pagmo.hxx b/lib/src/Base/Optim/openturns/Pagmo.hxx index ec655f88aec..03ba9b071c0 100644 --- a/lib/src/Base/Optim/openturns/Pagmo.hxx +++ b/lib/src/Base/Optim/openturns/Pagmo.hxx @@ -65,7 +65,7 @@ public: void setAlgorithmName(const String & algoName); String getAlgorithmName() const; - /** Number of generations to evolve */ + /** @deprecated Number of generations to evolve */ void setGenerationNumber(const UnsignedInteger generationNumber); UnsignedInteger getGenerationNumber() const; @@ -102,9 +102,6 @@ private: // initial population Sample startingSample_; - // number of generations to evolve - UnsignedInteger generationNumber_ = 10; - // random generator seed UnsignedInteger seed_ = 0; diff --git a/python/doc/examples/numerical_methods/optimization/plot_optimization_pagmo.py b/python/doc/examples/numerical_methods/optimization/plot_optimization_pagmo.py index d42522a6917..293e90a6d1a 100644 --- a/python/doc/examples/numerical_methods/optimization/plot_optimization_pagmo.py +++ b/python/doc/examples/numerical_methods/optimization/plot_optimization_pagmo.py @@ -32,7 +32,7 @@ # %% # We create the algorithm that should evolve over 10 generations algo = ot.Pagmo(zdt1, "nsga2", pop0) -algo.setGenerationNumber(10) +algo.setMaximumIterationNumber(10) # %% # Benefit from parallel evaluations if the function allows it @@ -70,7 +70,7 @@ fronts = [] for gen in range(5): algo = ot.Pagmo(zdt1, "nsga2", pop0) - algo.setGenerationNumber(gen) + algo.setMaximumIterationNumber(gen) algo.run() front0 = algo.getResult().getParetoFrontsIndices()[0] fronts.append(algo.getResult().getFinalValues().select(front0)) diff --git a/python/src/Pagmo_doc.i.in b/python/src/Pagmo_doc.i.in index 257edf8b1da..6bea39f82d0 100644 --- a/python/src/Pagmo_doc.i.in +++ b/python/src/Pagmo_doc.i.in @@ -4,7 +4,7 @@ This class exposes bio-inspired and evolutionary global optimization algorithms from the `Pagmo `_ library. These algorithms start from an initial population and make it evolve to obtain -a final population after a defined number of generations (by :meth:`setGenerationNumber`). +a final population after a defined number of generations (by :meth:`setMaximumIterationNumber`). A few of these algorithms allow for multi-objective optimization, and in that case the result is not the best point among the final population but a set of dominant points: a pareto front. @@ -20,6 +20,7 @@ startingSample : 2-d sequence of float, optional Notes ----- +The total number of evaluations is the size of the initial population multiplied by the iteration number. Starting points provided through the *startingSample* parameter should be within the bounds of the :class:`~openturns.OptimizationProblem`, but this is not enforced. @@ -95,7 +96,7 @@ Sample the initial population inside a box: Run GACO on our problem: >>> algo = ot.Pagmo(problem, 'gaco', init_pop) # doctest: +SKIP ->>> algo.setGenerationNumber(5) # doctest: +SKIP +>>> algo.setMaximumIterationNumber(5) # doctest: +SKIP >>> algo.run() # doctest: +SKIP >>> result = algo.getResult() # doctest: +SKIP >>> x_star = result.getOptimalPoint() # doctest: +SKIP @@ -123,7 +124,7 @@ Sample the initial population inside a box: Run NSGA2 on our problem: >>> algo = ot.Pagmo(problem, 'nsga2', init_pop) # doctest: +SKIP ->>> algo.setGenerationNumber(5) # doctest: +SKIP +>>> algo.setMaximumIterationNumber(5) # doctest: +SKIP >>> algo.run() # doctest: +SKIP >>> result = algo.getResult() # doctest: +SKIP >>> final_pop_x = result.getFinalPoints() # doctest: +SKIP @@ -159,27 +160,6 @@ startingSample : 2-d sequence of float // --------------------------------------------------------------------- -%feature("docstring") OT::Pagmo::setGenerationNumber -"Generation number accessor. - -Parameters ----------- -gen : int - Number of generations to evolve. - Ignored for the simulated_annealing algorithm." - -// --------------------------------------------------------------------- - -%feature("docstring") OT::Pagmo::getGenerationNumber -"Generation number accessor. - -Returns -------- -gen : int - Number of generations to evolve." - -// --------------------------------------------------------------------- - %feature("docstring") OT::Pagmo::setSeed "Random generator seed accessor. diff --git a/python/test/t_Pagmo_std.py b/python/test/t_Pagmo_std.py index 619b756cf73..4b8c275d6f5 100755 --- a/python/test/t_Pagmo_std.py +++ b/python/test/t_Pagmo_std.py @@ -43,6 +43,7 @@ def stop(): if use_ineq: zdt1.setInequalityConstraint(ineq) algo = ot.Pagmo(zdt1, name, pop0) + algo.setMaximumIterationNumber(10) algo.setBlockSize(8) # algo.setProgressCallback(progress) # algo.setStopCallback(stop) @@ -54,7 +55,7 @@ def stop(): assert len(fronts) > 0, "no pareto" print(name, len(fronts)) assert ( - result.getEvaluationNumber() == algo.getGenerationNumber() * size + result.getEvaluationNumber() == algo.getMaximumIterationNumber() * size ), "wrong size" # rosenbrock for the other algorithms @@ -74,6 +75,7 @@ def stop(): if use_ineq: problem.setInequalityConstraint(ineq) algo = ot.Pagmo(problem, name, pop0) + algo.setMaximumIterationNumber(10) algo.setBlockSize(8) # algo.setProgressCallback(progress) # algo.setStopCallback(stop) @@ -174,6 +176,7 @@ def minlp_cstr(x): ) for name in ["gaco", "ihs", "sga"]: algo = ot.Pagmo(problem, name, pop0) + algo.setMaximumIterationNumber(10) algo.setBlockSize(8) algo.run() result = algo.getResult() @@ -198,6 +201,7 @@ def minlp_cstr(x): dist = ot.ComposedDistribution([ot.Uniform(0.0, 5.0)] * 2) pop0 = dist.getSample(50) algo = ot.Pagmo(zdt1, "nsga2", pop0) +algo.setMaximumIterationNumber(10) algo.run() result = algo.getResult() x = result.getFinalPoints() From add944c59aa8b80e6985e7583a978c0070a2bc94 Mon Sep 17 00:00:00 2001 From: Julien Schueller Date: Fri, 21 Jul 2023 09:26:25 +0200 Subject: [PATCH 10/31] CMake: Simplify OpenMP detection --- CMakeLists.txt | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index ce36aade390..1c4b4e10590 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -408,12 +408,7 @@ if (USE_OPENMP) find_package (OpenMP) if (OPENMP_FOUND) set (OPENTURNS_HAVE_OPENMP TRUE) - if (TARGET OpenMP::OpenMP_CXX) - list (APPEND OPENTURNS_PRIVATE_LIBRARIES OpenMP::OpenMP_CXX) - else () - set (CMAKE_CXX_FLAGS "${OpenMP_CXX_FLAGS} ${CMAKE_CXX_FLAGS}") - set (CMAKE_SHARED_LINKER_FLAGS "${OpenMP_CXX_FLAGS} ${CMAKE_SHARED_LINKER_FLAGS}") - endif () + list (APPEND OPENTURNS_PRIVATE_LIBRARIES OpenMP::OpenMP_CXX) endif () endif () From 6ccfd9e4adcb5768377507b70524c0a502875fcb Mon Sep 17 00:00:00 2001 From: Julien Schueller Date: Thu, 27 Jul 2023 10:45:06 +0200 Subject: [PATCH 11/31] Add Issue templates --- .github/ISSUE_TEMPLATE/0_bug.yml | 68 ++++++++++++++++++++++++++++ .github/ISSUE_TEMPLATE/1_feature.yml | 22 +++++++++ 2 files changed, 90 insertions(+) create mode 100755 .github/ISSUE_TEMPLATE/0_bug.yml create mode 100755 .github/ISSUE_TEMPLATE/1_feature.yml diff --git a/.github/ISSUE_TEMPLATE/0_bug.yml b/.github/ISSUE_TEMPLATE/0_bug.yml new file mode 100755 index 00000000000..24a3da236e8 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/0_bug.yml @@ -0,0 +1,68 @@ +name: Bug Report +description: Create a bug report. +labels: + - bug +body: + - type: textarea + id: what + attributes: + label: What happened? + description: What should have happened instead? Please provide as many details as possible. + validations: + required: true + - type: textarea + id: info + attributes: + label: How to reproduce the issue? + description: Please add the minimal code that helps replicating the issue. + render: shell + validations: + required: true + - type: textarea + id: version + attributes: + label: Version + description: | + Please tell us the version you are using. + + ```python + import openturns as ot + print(ot.__version__) + ``` + validations: + required: true + - type: dropdown + id: oscheck + attributes: + label: Operating System + description: Please provide the operating system running. + options: + - unknown + - Windows + - Linux + - MacOS + - all + - other + validations: + required: true + - type: dropdown + id: sourcecheck + attributes: + label: Installation media + description: Please provide the installation media. + options: + - unknown + - pip + - conda + - Windows NSIS installer + - Debian package + - RPM package + - from source + - other + validations: + required: true + - type: textarea + id: context + attributes: + label: Additional Context + description: Include any additional information that you think would be valuable. diff --git a/.github/ISSUE_TEMPLATE/1_feature.yml b/.github/ISSUE_TEMPLATE/1_feature.yml new file mode 100755 index 00000000000..a948de25f62 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/1_feature.yml @@ -0,0 +1,22 @@ +name: Feature Request +description: Create a feature request. +labels: + - enhancement +body: + - type: textarea + id: idea + attributes: + label: What is the idea? + description: Describe what the feature is and the desired state. + validations: + required: true + - type: textarea + id: why + attributes: + label: Why is this needed? + description: Who would benefit from this feature? Why would this add value to them? What problem does this solve? + - type: textarea + id: context + attributes: + label: Additional Context + description: Include any additional information that you think would be valuable. From 6a74ab5b9616851c6af67f9512c047fcc386146d Mon Sep 17 00:00:00 2001 From: Julien Schueller Date: Thu, 27 Jul 2023 15:14:17 +0200 Subject: [PATCH 12/31] WhittleFactory: Drop unneeded symbol --- python/src/WhittleFactory.i | 1 - 1 file changed, 1 deletion(-) diff --git a/python/src/WhittleFactory.i b/python/src/WhittleFactory.i index 96bbec8afe9..0651c6e51d6 100644 --- a/python/src/WhittleFactory.i +++ b/python/src/WhittleFactory.i @@ -9,7 +9,6 @@ %} %template(WhittleFactoryStateCollection) OT::Collection; -%template(WhittleFactoryStatePersistentCollection) OT::PersistentCollection; %include WhittleFactory_doc.i From ff903207b425c1a447cb84edd0ef7b8e91acb443 Mon Sep 17 00:00:00 2001 From: Julien Schueller Date: Thu, 27 Jul 2023 15:51:28 +0200 Subject: [PATCH 13/31] Text: Fix rotation accessor docstring --- python/src/Text_doc.i.in | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/python/src/Text_doc.i.in b/python/src/Text_doc.i.in index cc92ad99c12..cb56468036f 100644 --- a/python/src/Text_doc.i.in +++ b/python/src/Text_doc.i.in @@ -38,7 +38,7 @@ Examples // ---------------------------------------------------------------------------- -%feature("docstring") OT::Text::setTextRotation +%feature("docstring") OT::Text::setRotation "Text rotation accessor. Parameters @@ -48,7 +48,7 @@ rotation : float // ---------------------------------------------------------------------------- -%feature("docstring") OT::Text::getTextRotation +%feature("docstring") OT::Text::getRotation "Text rotation accessor. Returns From bae7b221842e4a262b645df94638a2382a88fe79 Mon Sep 17 00:00:00 2001 From: Julien Schueller Date: Thu, 27 Jul 2023 16:09:43 +0200 Subject: [PATCH 14/31] PosteriorDistribution: Hide computeLikelihood --- python/src/PosteriorDistribution.i | 3 +++ 1 file changed, 3 insertions(+) diff --git a/python/src/PosteriorDistribution.i b/python/src/PosteriorDistribution.i index d6a085e6748..62e7bcf5e88 100644 --- a/python/src/PosteriorDistribution.i +++ b/python/src/PosteriorDistribution.i @@ -6,5 +6,8 @@ %include PosteriorDistribution_doc.i +%ignore OT::PosteriorDistribution::computeLikelihood; +%ignore OT::PosteriorDistribution::computeLogLikelihood; + %include openturns/PosteriorDistribution.hxx namespace OT { %extend PosteriorDistribution { PosteriorDistribution(const PosteriorDistribution & other) { return new OT::PosteriorDistribution(other); } } } From 57b97d3b017593f8741dbc14c2fa99743815f2b8 Mon Sep 17 00:00:00 2001 From: Julien Schueller Date: Thu, 27 Jul 2023 16:20:52 +0200 Subject: [PATCH 15/31] LHSResult: Hide add --- python/src/LHSResult.i | 2 ++ 1 file changed, 2 insertions(+) diff --git a/python/src/LHSResult.i b/python/src/LHSResult.i index 980b0f30ab8..bb71687fb6d 100644 --- a/python/src/LHSResult.i +++ b/python/src/LHSResult.i @@ -6,5 +6,7 @@ %include LHSResult_doc.i +%ignore OT::LHSResult::add; + %include openturns/LHSResult.hxx namespace OT { %extend LHSResult { LHSResult(const LHSResult & other) { return new OT::LHSResult(other); } } } From c1d6f640d986df76916673f6d97713ff8bd69858 Mon Sep 17 00:00:00 2001 From: Julien Schueller Date: Thu, 27 Jul 2023 17:27:39 +0200 Subject: [PATCH 16/31] Dlib: Missing docstring --- python/src/Dlib_doc.i.in | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/python/src/Dlib_doc.i.in b/python/src/Dlib_doc.i.in index 41c41106b00..bd9c91f02e6 100644 --- a/python/src/Dlib_doc.i.in +++ b/python/src/Dlib_doc.i.in @@ -244,3 +244,23 @@ Returns ------- algorithmNames : :class:`~openturns.Description` List of the names of available dlib search strategies." + +// --------------------------------------------------------------------- + +%feature("docstring") OT::Dlib::setAlgorithmName +"Accessor to the algorithm name. + +Parameters +---------- +algoName : str + The identifier of the algorithm." + +// --------------------------------------------------------------------- + +%feature("docstring") OT::Dlib::getAlgorithmName +"Accessor to the algorithm name. + +Returns +------- +algoName : str + The identifier of the algorithm." From a869c5a343af1da0167f473e29d4fd3c1e85ce89 Mon Sep 17 00:00:00 2001 From: Julien Schueller Date: Fri, 28 Jul 2023 08:45:37 +0200 Subject: [PATCH 17/31] TimeVaryingResult: Missing getTimeGrid docstring --- python/src/TimeVaryingResult_doc.i.in | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/python/src/TimeVaryingResult_doc.i.in b/python/src/TimeVaryingResult_doc.i.in index f01955dc933..00face0382b 100644 --- a/python/src/TimeVaryingResult_doc.i.in +++ b/python/src/TimeVaryingResult_doc.i.in @@ -73,7 +73,17 @@ Returns distribution : :class:`~openturns.Distribution` The Parent distribution at time :math:`t`." -// ------------------Time stamps--------------------------------------------------- +// --------------------------------------------------------------------- + +%feature("docstring") OT::TimeVaryingResult::getTimeGrid +"Accessor to the time grid. + +Returns +------- +timeGrid : :class:`~openturns.Sample` + Timestamp values." + +// --------------------------------------------------------------------- %feature("docstring") OT::TimeVaryingResult::getParameterDistribution "Accessor to the distribution of :math:`\vect{\beta}`. From 0c8ed4e1da1b9e7fb434ce761926c36124912267 Mon Sep 17 00:00:00 2001 From: Julien Schueller Date: Tue, 29 Aug 2023 10:20:58 +0200 Subject: [PATCH 18/31] TestResult: Missing docstring --- lib/src/Base/Stat/TestResult.cxx | 2 +- lib/src/Base/Stat/openturns/TestResult.hxx | 2 +- python/src/TestResult_doc.i.in | 30 ++++++++++++++++++++++ 3 files changed, 32 insertions(+), 2 deletions(-) diff --git a/lib/src/Base/Stat/TestResult.cxx b/lib/src/Base/Stat/TestResult.cxx index a3a9c5883c4..7381d6d31f5 100644 --- a/lib/src/Base/Stat/TestResult.cxx +++ b/lib/src/Base/Stat/TestResult.cxx @@ -63,7 +63,7 @@ TestResult * TestResult::clone() const return new TestResult(*this); } -/* Description Accessor */ +/* Description accessor */ void TestResult::setDescription(const Description & description) { description_ = description; diff --git a/lib/src/Base/Stat/openturns/TestResult.hxx b/lib/src/Base/Stat/openturns/TestResult.hxx index 69f37e3c32b..9e2eeca11b7 100644 --- a/lib/src/Base/Stat/openturns/TestResult.hxx +++ b/lib/src/Base/Stat/openturns/TestResult.hxx @@ -57,7 +57,7 @@ public: /** Virtual constructor */ TestResult * clone() const override; - /** Description Accessor */ + /** Description accessor */ void setDescription(const Description & description); Description getDescription() const; diff --git a/python/src/TestResult_doc.i.in b/python/src/TestResult_doc.i.in index e45bb433c09..9c500221c12 100644 --- a/python/src/TestResult_doc.i.in +++ b/python/src/TestResult_doc.i.in @@ -69,3 +69,33 @@ Returns ------- statistic : float Measure used for the statistical test." + +// --------------------------------------------------------------------- + +%feature("docstring") OT::TestResult::getDescription +"Accessor to the test description. + +Returns +------- +statistic : :class:`~openturns.Description` + Test description." + +// --------------------------------------------------------------------- + +%feature("docstring") OT::TestResult::getTestType +"Accessor to the test type. + +Returns +------- +type : str + Test type." + +// --------------------------------------------------------------------- + +%feature("docstring") OT::TestResult::setDescription +"Accessor to the test description. + +Parameters +---------- +description : sequence of str + Test description." From afeac13a2c375ab30d797c66f9c4df76ddcbd342 Mon Sep 17 00:00:00 2001 From: Julien Schueller Date: Tue, 29 Aug 2023 10:50:11 +0200 Subject: [PATCH 19/31] TTY: Hide GetColor --- python/src/TTY.i | 3 +++ 1 file changed, 3 insertions(+) diff --git a/python/src/TTY.i b/python/src/TTY.i index e070db7d29a..31bd2dc693e 100644 --- a/python/src/TTY.i +++ b/python/src/TTY.i @@ -6,4 +6,7 @@ %include TTY_doc.i +%ignore OT::TTY::GetColor; +%nodefaultctor TTY; + %include openturns/TTY.hxx From 9b601cd55b90c038800bb06d173efbab458cc2ea Mon Sep 17 00:00:00 2001 From: Julien Schueller Date: Tue, 29 Aug 2023 10:50:29 +0200 Subject: [PATCH 20/31] Field: Missing asSample docstring --- python/src/FieldImplementation_doc.i.in | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/python/src/FieldImplementation_doc.i.in b/python/src/FieldImplementation_doc.i.in index 005deb0d359..154e861266d 100644 --- a/python/src/FieldImplementation_doc.i.in +++ b/python/src/FieldImplementation_doc.i.in @@ -387,3 +387,16 @@ value : :class:`~openturns.Field`. %enddef %feature("docstring") OT::FieldImplementation::getMarginal OT_Field_getMarginal_doc + +// --------------------------------------------------------------------- + +%define OT_Field_asSample_doc +"Convert to Sample. + +Returns +------- +sample : :class:`~openturns.Sample` + Data as a Sample object." +%enddef +%feature("docstring") OT::FieldImplementation::asSample +OT_Field_asSample_doc From 76ca00f2d779355d9b8ca454ad54928c7da26a7a Mon Sep 17 00:00:00 2001 From: Julien Schueller Date: Tue, 29 Aug 2023 10:50:55 +0200 Subject: [PATCH 21/31] t_docstring_missing: Lower threshold --- python/test/t_docstring_missing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/test/t_docstring_missing.py b/python/test/t_docstring_missing.py index cded69b0309..9586c36a2d9 100755 --- a/python/test/t_docstring_missing.py +++ b/python/test/t_docstring_missing.py @@ -44,5 +44,5 @@ print( f"-- undocumented methods: {count_methods_undoc} ({100.0 * count_methods_undoc / count_methods:.2f}%) --" ) -if count_class_undoc + count_methods_undoc > 494: +if count_class_undoc + count_methods_undoc > 400: raise ValueError("too much undocumented class/methods") From a414ebc9821b05eab5b3a6001ae9f37d860d2707 Mon Sep 17 00:00:00 2001 From: Julien Schueller Date: Fri, 1 Sep 2023 11:37:13 +0200 Subject: [PATCH 22/31] Distribution: Drop extra semicolons --- .../openturns/AdaptiveStieltjesAlgorithm.hxx | 6 ++--- .../Sensitivity/HSICStatImplementation.cxx | 2 +- .../Algorithm/Sensitivity/HSICUStat.cxx | 2 +- .../WeightedExperiments/SmolyakExperiment.cxx | 2 +- .../Distribution/NegativeBinomialFactory.cxx | 2 +- .../Distribution/ProductDistribution.cxx | 12 +++++----- .../Distribution/RandomMixture.cxx | 6 ++--- .../Uncertainty/Distribution/RiceFactory.cxx | 2 +- .../openturns/DistributionImplementation.hxx | 24 +++++++++---------- 9 files changed, 29 insertions(+), 29 deletions(-) diff --git a/lib/src/Uncertainty/Algorithm/OrthogonalBasis/openturns/AdaptiveStieltjesAlgorithm.hxx b/lib/src/Uncertainty/Algorithm/OrthogonalBasis/openturns/AdaptiveStieltjesAlgorithm.hxx index 445a68c7855..c15908051ea 100644 --- a/lib/src/Uncertainty/Algorithm/OrthogonalBasis/openturns/AdaptiveStieltjesAlgorithm.hxx +++ b/lib/src/Uncertainty/Algorithm/OrthogonalBasis/openturns/AdaptiveStieltjesAlgorithm.hxx @@ -76,7 +76,7 @@ private: qN_(qN), weight_(weight) { // Nothing to do - }; + } // This method allows one to compute Point kernelSym(const Point & point) const @@ -87,7 +87,7 @@ private: Point result(1); result[0] = qNX * qNX * pdf; return result; - }; + } // This method allows one to compute and Point kernelGen(const Point & point) const @@ -100,7 +100,7 @@ private: result[0] = qNX * qNX * pdf; result[1] = xQNX * qNX * pdf; return result; - }; + } const OrthogonalUniVariatePolynomial & qN_; const Distribution & weight_; diff --git a/lib/src/Uncertainty/Algorithm/Sensitivity/HSICStatImplementation.cxx b/lib/src/Uncertainty/Algorithm/Sensitivity/HSICStatImplementation.cxx index f0ec54c992c..6982b590b08 100644 --- a/lib/src/Uncertainty/Algorithm/Sensitivity/HSICStatImplementation.cxx +++ b/lib/src/Uncertainty/Algorithm/Sensitivity/HSICStatImplementation.cxx @@ -35,7 +35,7 @@ HSICStatImplementation::HSICStatImplementation() : PersistentObject() { // Nothing -}; +} /* Virtual constructor */ HSICStatImplementation * HSICStatImplementation::clone() const diff --git a/lib/src/Uncertainty/Algorithm/Sensitivity/HSICUStat.cxx b/lib/src/Uncertainty/Algorithm/Sensitivity/HSICUStat.cxx index 5cc9c0ed16e..493e277aa2f 100644 --- a/lib/src/Uncertainty/Algorithm/Sensitivity/HSICUStat.cxx +++ b/lib/src/Uncertainty/Algorithm/Sensitivity/HSICUStat.cxx @@ -32,7 +32,7 @@ CLASSNAMEINIT(HSICUStat) HSICUStat::HSICUStat() { // Nothing -}; +} /* Virtual constructor */ HSICUStat* HSICUStat::clone() const diff --git a/lib/src/Uncertainty/Algorithm/WeightedExperiments/SmolyakExperiment.cxx b/lib/src/Uncertainty/Algorithm/WeightedExperiments/SmolyakExperiment.cxx index e64145ee74c..518ff546375 100644 --- a/lib/src/Uncertainty/Algorithm/WeightedExperiments/SmolyakExperiment.cxx +++ b/lib/src/Uncertainty/Algorithm/WeightedExperiments/SmolyakExperiment.cxx @@ -147,7 +147,7 @@ class PointApproximateComparison , relativeEpsilon_(relativeEpsilon) { // Nothing to do - }; + } /* Compare two points, according to lexicographic order * Returns true if x < y, false otherwise. diff --git a/lib/src/Uncertainty/Distribution/NegativeBinomialFactory.cxx b/lib/src/Uncertainty/Distribution/NegativeBinomialFactory.cxx index 61a2653b752..60b7acabed4 100644 --- a/lib/src/Uncertainty/Distribution/NegativeBinomialFactory.cxx +++ b/lib/src/Uncertainty/Distribution/NegativeBinomialFactory.cxx @@ -56,7 +56,7 @@ struct NegativeBinomialFactoryParameterConstraint mean_(mean) { // Nothing to do - }; + } Point computeConstraint(const Point & parameter) const { diff --git a/lib/src/Uncertainty/Distribution/ProductDistribution.cxx b/lib/src/Uncertainty/Distribution/ProductDistribution.cxx index e6c4110fe73..3dddcc2bf89 100644 --- a/lib/src/Uncertainty/Distribution/ProductDistribution.cxx +++ b/lib/src/Uncertainty/Distribution/ProductDistribution.cxx @@ -133,7 +133,7 @@ class PDFKernelProductDistribution: public UniVariateFunctionImplementation , isZero_(std::abs(x) < ResourceMap::GetAsScalar("Distribution-DefaultQuantileEpsilon")), pdf0_(isZero_ ? p_right->computePDF(0.0) : 0.0) { // Nothing to do - }; + } PDFKernelProductDistribution * clone() const { @@ -159,7 +159,7 @@ class PDFKernelProductDistribution: public UniVariateFunctionImplementation return value * 0.5 * (p_right_->computePDF(x_ / epsilon) + p_right_->computePDF(-x_ / epsilon)) / epsilon; } return value * p_right_->computePDF(x_ / u) / absU; - }; + } private: const Pointer p_left_; @@ -186,7 +186,7 @@ class CDFKernelProductDistribution: public UniVariateFunctionImplementation , ccdf0_(isZero_ ? p_right->computeComplementaryCDF(0.0) : 0.0) { // Nothing to do - }; + } CDFKernelProductDistribution * clone() const { @@ -201,7 +201,7 @@ class CDFKernelProductDistribution: public UniVariateFunctionImplementation if (isZero_) return value * cdf0_; if (u == 0.0) return (x_ < 0.0 ? 0.0 : value); return value * p_right_->computeCDF(x_ / u); - }; + } private: const Pointer p_left_; @@ -228,7 +228,7 @@ class ComplementaryCDFKernelProductDistributionProductDistribution: public UniVa , ccdf0_(isZero_ ? p_right->computeComplementaryCDF(0.0) : 0.0) { // Nothing to do - }; + } ComplementaryCDFKernelProductDistributionProductDistribution * clone() const { @@ -243,7 +243,7 @@ class ComplementaryCDFKernelProductDistributionProductDistribution: public UniVa if (isZero_) return value * ccdf0_; if (u == 0.0) return (x_ < 0.0 ? 0.0 : value); return value * p_right_->computeComplementaryCDF(x_ / u); - }; + } private: const Pointer p_left_; diff --git a/lib/src/Uncertainty/Distribution/RandomMixture.cxx b/lib/src/Uncertainty/Distribution/RandomMixture.cxx index 6cfe090e852..379bd95479c 100644 --- a/lib/src/Uncertainty/Distribution/RandomMixture.cxx +++ b/lib/src/Uncertainty/Distribution/RandomMixture.cxx @@ -1119,7 +1119,7 @@ class PDFKernelRandomMixture: public UniVariateFunctionImplementation , z0_(z0) { // Nothing to do - }; + } PDFKernelRandomMixture * clone() const { @@ -1160,7 +1160,7 @@ class CDFKernelRandomMixture: public UniVariateFunctionImplementation , z0_(z0) { // Nothing to do - }; + } CDFKernelRandomMixture * clone() const { @@ -1201,7 +1201,7 @@ class ComplementaryCDFKernelRandomMixture: public UniVariateFunctionImplementati , z0_(z0) { // Nothing to do - }; + } ComplementaryCDFKernelRandomMixture * clone() const { diff --git a/lib/src/Uncertainty/Distribution/RiceFactory.cxx b/lib/src/Uncertainty/Distribution/RiceFactory.cxx index 2df1732542a..c78f962eaa7 100644 --- a/lib/src/Uncertainty/Distribution/RiceFactory.cxx +++ b/lib/src/Uncertainty/Distribution/RiceFactory.cxx @@ -51,7 +51,7 @@ struct RiceFactoryParameterConstraint r2p1_(1.0 + r * r) { // Nothing to do - }; + } Point computeConstraint(const Point & parameter) const { diff --git a/lib/src/Uncertainty/Model/openturns/DistributionImplementation.hxx b/lib/src/Uncertainty/Model/openturns/DistributionImplementation.hxx index 1d385910095..1c47b702c8d 100644 --- a/lib/src/Uncertainty/Model/openturns/DistributionImplementation.hxx +++ b/lib/src/Uncertainty/Model/openturns/DistributionImplementation.hxx @@ -911,7 +911,7 @@ protected: Sample operator() (const Sample & sample) const override { return p_distribution_->computePDF(sample); - }; + } UnsignedInteger getInputDimension() const override { @@ -975,7 +975,7 @@ protected: Sample operator() (const Sample & sample) const override { return p_distribution_->computeLogPDF(sample); - }; + } UnsignedInteger getInputDimension() const override { @@ -1044,12 +1044,12 @@ protected: Point computeCDF(const Point & point) const { return Point(1, p_distribution_->computeCDF(point)); - }; + } Sample computeCDF(const Sample & sample) const { return p_distribution_->computeCDF(sample); - }; + } UnsignedInteger getInputDimension() const override { @@ -1487,7 +1487,7 @@ protected: , p_distribution_(p_distribution) { // Nothing to do - }; + } ShiftedMomentWrapper * clone() const override { @@ -1499,7 +1499,7 @@ protected: const Scalar power = std::pow(point[0] - shift_, n_); const Scalar pdf = p_distribution_->computePDF(point); return Point(1, power * pdf); - }; + } Sample operator() (const Sample & sample) const override { @@ -1509,7 +1509,7 @@ protected: for (UnsignedInteger i = 0; i < size; ++i) result(i, 0) = std::pow(sample(i, 0) - shift_, n_) * pdf(i, 0); return result; - }; + } UnsignedInteger getInputDimension() const override { @@ -1551,14 +1551,14 @@ protected: , p_distribution_(p_distribution) { // Nothing to do - }; + } Scalar operator() (const Scalar x) const override { Collection z(y_); z.add(x); return p_distribution_->computePDF(z); - }; + } void setParameter(const Point & parameters) { @@ -1599,7 +1599,7 @@ protected: , p_distribution_(p_distribution) { // Nothing to do - }; + } ConditionalCDFWrapper * clone() const override { @@ -1609,7 +1609,7 @@ protected: Scalar operator() (const Scalar x) const override { return p_distribution_->computeConditionalCDF(x, y_); - }; + } void setParameter(const Point & parameters) { @@ -1673,7 +1673,7 @@ protected: result(i, 0) = -std::exp(logPDFI) * logPDFI; } return result; - }; + } UnsignedInteger getInputDimension() const override { From 914012e5da79f2fd137aa1b0e77cb3737a0f8a68 Mon Sep 17 00:00:00 2001 From: Julien Schueller Date: Wed, 6 Sep 2023 14:49:59 +0200 Subject: [PATCH 23/31] Ceres: Handle windows.h include --- lib/src/Base/Optim/Ceres.cxx | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/src/Base/Optim/Ceres.cxx b/lib/src/Base/Optim/Ceres.cxx index b98230e9110..04312759c46 100644 --- a/lib/src/Base/Optim/Ceres.cxx +++ b/lib/src/Base/Optim/Ceres.cxx @@ -24,6 +24,7 @@ #include "openturns/Log.hxx" #include "openturns/SpecFunc.hxx" #ifdef OPENTURNS_HAVE_CERES +#include "openturns/OTwindows.h" // ceres includes windows.h #include #endif From 095fc34de387f7bcc6dda64e57a9018ff0a71c46 Mon Sep 17 00:00:00 2001 From: Julien Schueller Date: Wed, 6 Sep 2023 16:04:30 +0200 Subject: [PATCH 24/31] CMake: Disable bison tests --- python/test/CMakeLists.txt | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/python/test/CMakeLists.txt b/python/test/CMakeLists.txt index d92f032a9fe..62177a662d9 100644 --- a/python/test/CMakeLists.txt +++ b/python/test/CMakeLists.txt @@ -50,7 +50,9 @@ endmacro () ot_pyinstallcheck_test (ackley_function IGNOREOUT) ot_pyinstallcheck_test (branin_function IGNOREOUT) ot_pyinstallcheck_test (cantilever_beam IGNOREOUT) +if (BISON_FOUND AND FLEX_FOUND) ot_pyinstallcheck_test (coles IGNOREOUT) +endif () ot_pyinstallcheck_test (chaboche_model IGNOREOUT) ot_pyinstallcheck_test (deflection_tube IGNOREOUT) ot_pyinstallcheck_test (flood_model IGNOREOUT) @@ -473,7 +475,9 @@ ot_pyinstallcheck_test (GalambosCopula_std IGNOREOUT) ot_pyinstallcheck_test (Gamma_std) ot_pyinstallcheck_test (GammaFactory_std IGNOREOUT) ot_pyinstallcheck_test (GeneralizedExtremeValue_std) +if (BISON_FOUND AND FLEX_FOUND) ot_pyinstallcheck_test (GeneralizedExtremeValueFactory_std IGNOREOUT) +endif () ot_pyinstallcheck_test (GeneralizedExtremeValueValidation_std IGNOREOUT) ot_pyinstallcheck_test (GeneralizedPareto_std) ot_pyinstallcheck_test (GeneralizedParetoFactory_std) @@ -594,7 +598,9 @@ ot_pyinstallcheck_test (Wishart_std) ot_pyinstallcheck_test (ZipfMandelbrot_std) ot_pyinstallcheck_test (DistributionParameters_std IGNOREOUT) ot_pyinstallcheck_test (DistFunc_beta) +if (BISON_FOUND AND FLEX_FOUND) ot_pyinstallcheck_test (DistFunc_binomial IGNOREOUT) +endif () if (NOT PROJECT_BINARY_DIR STREQUAL PROJECT_SOURCE_DIR) execute_process (COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/t_DistFunc_binomial1.csv ${CMAKE_CURRENT_BINARY_DIR}) execute_process (COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/t_DistFunc_binomial2.csv ${CMAKE_CURRENT_BINARY_DIR}) @@ -862,6 +868,13 @@ if (MATPLOTLIB_FOUND) endif () if (NOT BISON_FOUND OR NOT FLEX_FOUND) set_tests_properties (pyinstallcheck_example_plot_import_export_sample_csv PROPERTIES DISABLED TRUE) + set_tests_properties (pyinstallcheck_example_plot_estimate_gev_fremantle PROPERTIES DISABLED TRUE) + set_tests_properties (pyinstallcheck_example_plot_estimate_gev_pirie PROPERTIES DISABLED TRUE) + set_tests_properties (pyinstallcheck_example_plot_estimate_gev_racetime PROPERTIES DISABLED TRUE) + set_tests_properties (pyinstallcheck_example_plot_estimate_gev_venice PROPERTIES DISABLED TRUE) + set_tests_properties (pyinstallcheck_example_plot_estimate_dependence_wavesurge PROPERTIES DISABLED TRUE) + set_tests_properties (pyinstallcheck_example_plot_estimate_dependence_wind PROPERTIES DISABLED TRUE) + set_tests_properties (pyinstallcheck_example_plot_import_export_sample_csv PROPERTIES DISABLED TRUE) endif () if (Python_VERSION VERSION_LESS 3.8) set_tests_properties (pyinstallcheck_example_plot_enumeratefunction PROPERTIES DISABLED TRUE) From bef9a6a9db2b738b5f2c84889e19ed7369adaf8c Mon Sep 17 00:00:00 2001 From: Julien Schueller Date: Wed, 6 Sep 2023 16:13:34 +0200 Subject: [PATCH 25/31] Fix plot_random_generator --- .../numerical_methods/general_methods/plot_random_generator.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/doc/examples/numerical_methods/general_methods/plot_random_generator.py b/python/doc/examples/numerical_methods/general_methods/plot_random_generator.py index 40e62ba92f0..13d533db434 100644 --- a/python/doc/examples/numerical_methods/general_methods/plot_random_generator.py +++ b/python/doc/examples/numerical_methods/general_methods/plot_random_generator.py @@ -32,7 +32,7 @@ # %% # Example 2: using the time in milliseconds # ----------------------------------------- -ot.RandomGenerator.SetSeed(int(1000 * time.time())) +ot.RandomGenerator.SetSeed(int(1000 * time.time() % 1e9)) # %% # Example 3: using a previously saved generator state From afa4c2150ac7b47e7cf203b679061ddd6c1abc09 Mon Sep 17 00:00:00 2001 From: Julien Schueller Date: Mon, 11 Sep 2023 12:24:00 +0200 Subject: [PATCH 26/31] TruncatedDistribution: Document getSimplifiedVersion --- python/src/TruncatedDistribution_doc.i.in | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/python/src/TruncatedDistribution_doc.i.in b/python/src/TruncatedDistribution_doc.i.in index 9a977dda7c1..9d791ba5b65 100644 --- a/python/src/TruncatedDistribution_doc.i.in +++ b/python/src/TruncatedDistribution_doc.i.in @@ -140,3 +140,16 @@ threshold : float, :math:`\tau \in [0, 1]` If :math:`F(upperBound)-F(lowerBound)<\tau`: a CDF inversion is performed. If :math:`F(upperBound)-F(lowerBound)>\tau`: rejection." + +// --------------------------------------------------------------------- + +%feature("docstring") OT::TruncatedDistribution::getSimplifiedVersion +"Accessor to the simplified distribution. + +Drops unneeded truncation if the distribution given as argument is already +truncated by nature (Uniform, etc). + +Returns +------- +simplified : :class:`~openturns.Distribution` + The simplified distribution." From b0234d2710d729ffffc86cd5ddde47a1c6c949f6 Mon Sep 17 00:00:00 2001 From: Julien Schueller Date: Mon, 11 Sep 2023 14:07:31 +0200 Subject: [PATCH 27/31] CI: Update upload_github_io.sh --- .circleci/upload_github_io.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/upload_github_io.sh b/.circleci/upload_github_io.sh index d13ec2336f7..60863c21eae 100755 --- a/.circleci/upload_github_io.sh +++ b/.circleci/upload_github_io.sh @@ -15,7 +15,7 @@ then CIRCLE_BRANCH="${CIRCLE_TAG:1}" fi mkdir -p openturns.github.io/${CIRCLE_PROJECT_REPONAME}/${CIRCLE_BRANCH} -cp -r ~/.local/share/${CIRCLE_PROJECT_REPONAME}/doc/html/* openturns.github.io/${CIRCLE_PROJECT_REPONAME}/${CIRCLE_BRANCH} +cp -r ~/.local/share/doc/${CIRCLE_PROJECT_REPONAME}/html/* openturns.github.io/${CIRCLE_PROJECT_REPONAME}/${CIRCLE_BRANCH} cd openturns.github.io touch .nojekyll git config user.email "sayhi@circleci.com" From 591f5d74ba404a8eab822fd348f64f6ad84e9f69 Mon Sep 17 00:00:00 2001 From: regislebrun Date: Sun, 10 Sep 2023 21:40:26 +0200 Subject: [PATCH 28/31] Fixed PiecewiseLinearEvaluation and PiecewiseHermiteEvaluation Now they can be built with only one value, resulting into constant functions. --- lib/src/Base/Func/PiecewiseHermiteEvaluation.cxx | 6 ++++-- lib/src/Base/Func/PiecewiseLinearEvaluation.cxx | 6 ++++-- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/lib/src/Base/Func/PiecewiseHermiteEvaluation.cxx b/lib/src/Base/Func/PiecewiseHermiteEvaluation.cxx index 509d8d6639e..621cb9baaa6 100644 --- a/lib/src/Base/Func/PiecewiseHermiteEvaluation.cxx +++ b/lib/src/Base/Func/PiecewiseHermiteEvaluation.cxx @@ -121,6 +121,7 @@ Sample PiecewiseHermiteEvaluation::operator () (const Sample & inSample) const { if (inSample.getDimension() != 1) throw InvalidArgumentException(HERE) << "Error: expected an input sample of dimension 1, got dimension=" << inSample.getDimension(); const UnsignedInteger size = inSample.getSize(); + if (values_.getSize() == 1) return Sample(size, values_[0]); const UnsignedInteger dimension = getOutputDimension(); Sample output(size, dimension); const UnsignedInteger iRight = locations_.getSize() - 1; @@ -154,6 +155,7 @@ Sample PiecewiseHermiteEvaluation::operator () (const Sample & inSample) const Point PiecewiseHermiteEvaluation::derivate(const Point & inP) const { if (inP.getDimension() != 1) throw InvalidArgumentException(HERE) << "Error: expected an input point of dimension 1, got dimension=" << inP.getDimension(); + if (values_.getSize() == 1) return Point(values_.getDimension());; const Scalar x = inP[0]; UnsignedInteger iLeft = 0; if (x <= locations_[iLeft]) return values_[iLeft]; @@ -232,7 +234,7 @@ Sample PiecewiseHermiteEvaluation::getDerivatives() const void PiecewiseHermiteEvaluation::setDerivatives(const Sample & derivatives) { const UnsignedInteger size = derivatives.getSize(); - if (!(size >= 2)) throw InvalidArgumentException(HERE) << "Error: there must be at least 2 points to build a piecewise Hermite interpolation function, but size=" << size; + if (!(size >= 1)) throw InvalidArgumentException(HERE) << "Error: there must be at least 1 point to build a piecewise Hermite interpolation function, but size=" << size; if (size != locations_.getSize()) throw InvalidArgumentException(HERE) << "Error: the number of derivatives=" << size << " must match the number of previously set locations=" << locations_.getSize(); derivatives_ = derivatives; } @@ -243,7 +245,7 @@ void PiecewiseHermiteEvaluation::setLocationsValuesAndDerivatives(const Point & const Sample & derivatives) { const UnsignedInteger size = locations.getSize(); - if (!(size >= 2)) throw InvalidArgumentException(HERE) << "Error: there must be at least 2 points to build a piecewise Hermite interpolation function, but size=" << size; + if (!(size >= 1)) throw InvalidArgumentException(HERE) << "Error: there must be at least 1 point to build a piecewise Hermite interpolation function, but size=" << size; if (size != values.getSize()) throw InvalidArgumentException(HERE) << "Error: the number of values=" << values.getSize() << " must match the number of locations=" << size; if (size != derivatives.getSize()) throw InvalidArgumentException(HERE) << "Error: the number of derivatives=" << derivatives.getSize() << " must match the number of locations=" << size; const UnsignedInteger outputDimension = values.getDimension(); diff --git a/lib/src/Base/Func/PiecewiseLinearEvaluation.cxx b/lib/src/Base/Func/PiecewiseLinearEvaluation.cxx index 48fbdd1f8f7..59d79dbfe8a 100644 --- a/lib/src/Base/Func/PiecewiseLinearEvaluation.cxx +++ b/lib/src/Base/Func/PiecewiseLinearEvaluation.cxx @@ -135,6 +135,7 @@ UnsignedInteger PiecewiseLinearEvaluation::FindSegmentIndex(const Point & locati Point PiecewiseLinearEvaluation::operator () (const Point & inP) const { if (inP.getDimension() != 1) throw InvalidArgumentException(HERE) << "Error: expected an input point of dimension 1, got dimension=" << inP.getDimension(); + if (values_.getSize() == 1) return values_[0]; const Scalar x = inP[0]; UnsignedInteger iLeft = 0; if (x <= locations_[iLeft]) @@ -159,6 +160,7 @@ Sample PiecewiseLinearEvaluation::operator () (const Sample & inSample) const { if (inSample.getDimension() != 1) throw InvalidArgumentException(HERE) << "Error: expected an input sample of dimension 1, got dimension=" << inSample.getDimension(); const UnsignedInteger size = inSample.getSize(); + if (values_.getSize() == 1) return Sample(size, values_[0]); const UnsignedInteger dimension = getOutputDimension(); Sample output(size, dimension); const UnsignedInteger maxIndex = locations_.getSize() - 1; @@ -241,7 +243,7 @@ void PiecewiseLinearEvaluation::setValues(const Point & values) void PiecewiseLinearEvaluation::setValues(const Sample & values) { const UnsignedInteger size = values.getSize(); - if (!(size >= 2)) throw InvalidArgumentException(HERE) << "Error: there must be at least 2 points to build a piecewise Hermite interpolation function, but size=" << size; + if (!(size >= 1)) throw InvalidArgumentException(HERE) << "Error: there must be at least 1 point to build a piecewise linear interpolation function, but size=" << size; if (size != locations_.getSize()) throw InvalidArgumentException(HERE) << "Error: the number of values=" << size << " must match the number of previously set locations=" << locations_.getSize(); values_ = values; } @@ -250,7 +252,7 @@ void PiecewiseLinearEvaluation::setLocationsAndValues(const Point & locations, const Sample & values) { const UnsignedInteger size = locations.getSize(); - if (!(size >= 2)) throw InvalidArgumentException(HERE) << "Error: there must be at least 2 points to build a piecewise Hermite interpolation function, but size=" << size; + if (!(size >= 1)) throw InvalidArgumentException(HERE) << "Error: there must be at least 1 point to build a piecewise linear interpolation function, but size=" << size; if (size != values.getSize()) throw InvalidArgumentException(HERE) << "Error: the number of values=" << values.getSize() << " must match the number of locations=" << size; // Sort the data in increasing order according to the locations Collection< std::pair > locationAndIndex(size); From 59049bc0fbf7d4d60f0615f5b6fd37828548bc89 Mon Sep 17 00:00:00 2001 From: m-balesdent Date: Mon, 18 Sep 2023 20:37:16 +0200 Subject: [PATCH 29/31] Update use_case_beam.rst * Update use_case_beam.rst This PR corrects the Probability symbol in the doc. * Update use_case_beam.rst --- python/doc/usecases/use_case_beam.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/doc/usecases/use_case_beam.rst b/python/doc/usecases/use_case_beam.rst index 7e4020c604b..a9bf330ec08 100644 --- a/python/doc/usecases/use_case_beam.rst +++ b/python/doc/usecases/use_case_beam.rst @@ -59,7 +59,7 @@ F Normal( :math:`\mu_F=750` , :math:`\sigma_F=50`) [N] The failure probability is: -.. math:: P_f = \text{Prob}(G(R,F) \leq 0). +.. math:: P_f = \Prob{G(R,F) \leq 0}. The exact :math:`P_f` is From b669e117c30d2cb068d0458da961cc0013777480 Mon Sep 17 00:00:00 2001 From: m-balesdent Date: Mon, 18 Sep 2023 20:37:42 +0200 Subject: [PATCH 30/31] Update use_case_flood_model.rst * Update use_case_flood_model.rst This PR fixes the probability symbol in the doc. * Update use_case_flood_model.rst --- python/doc/usecases/use_case_flood_model.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/doc/usecases/use_case_flood_model.rst b/python/doc/usecases/use_case_flood_model.rst index 17859fe4b52..5a2495488a7 100644 --- a/python/doc/usecases/use_case_flood_model.rst +++ b/python/doc/usecases/use_case_flood_model.rst @@ -95,7 +95,7 @@ Moreover, we assume that the input random variables are independent. We want to estimate the flood probability: .. math:: - P_f = P(S > 0). + P_f = \Prob{S > 0}. Analysis of the calibration problem ----------------------------------- From 31fc1f66f383304a354eafd6bb674ce0dc1c9260 Mon Sep 17 00:00:00 2001 From: m-balesdent Date: Tue, 19 Sep 2023 09:20:51 +0200 Subject: [PATCH 31/31] New use case : two-degree-of-freedom oscillator * Proposal of new use case that is a non linear two stage oscillator. Proposal of examples of FORM / SORM to explain the refinement proposed by SORM with illustration on non linear limit state function. * modification of white spaces * Modification of lambda function. * modification of white spaces * modification of line too long * Line too long * modification allows one to * add test on oscillator * modification of constant to int and float * Modification of attribute class in the doc * Update python/doc/examples/reliability_sensitivity/reliability/plot_estimate_probability_form_oscillator.py Co-authored-by: JPelamatti * Update python/doc/examples/reliability_sensitivity/reliability/plot_estimate_probability_form_oscillator.py Co-authored-by: JPelamatti * Update python/doc/examples/reliability_sensitivity/reliability/plot_estimate_probability_form_oscillator.py Co-authored-by: JPelamatti * Update python/src/usecases/oscillator.py Co-authored-by: JPelamatti * add blank spaces in the formula of oscillator * add underline * add svg file and test * modification of test * modification of test * fixe test * modif oscillator * Update plot_estimate_probability_form_oscillator.py * Modification of graphs according to Vincent's review. * modification of "non-linear" in the doc * set randomgenerator seed * Fix graph legend * Update python/doc/examples/reliability_sensitivity/reliability/plot_estimate_probability_form_oscillator.py Co-authored-by: Michael Baudin <31351465+mbaudin47@users.noreply.github.com> * Update use_case_oscillator.rst --------- Co-authored-by: JPelamatti Co-authored-by: Michael Baudin <31351465+mbaudin47@users.noreply.github.com> --- python/doc/_static/oscillator.png | Bin 0 -> 10958 bytes python/doc/_static/oscillator.svg | 406 ++++++++++++++++ ...ot_estimate_probability_form_oscillator.py | 438 ++++++++++++++++++ python/doc/usecases/use_case_oscillator.rst | 99 ++++ python/doc/usecases/usecases.rst | 1 + python/doc/user_manual/usecases.rst | 2 +- python/src/usecases/oscillator.py | 219 +++++++++ python/test/CMakeLists.txt | 1 + python/test/t_oscillator.py | 33 ++ 9 files changed, 1198 insertions(+), 1 deletion(-) create mode 100644 python/doc/_static/oscillator.png create mode 100644 python/doc/_static/oscillator.svg create mode 100644 python/doc/examples/reliability_sensitivity/reliability/plot_estimate_probability_form_oscillator.py create mode 100644 python/doc/usecases/use_case_oscillator.rst create mode 100644 python/src/usecases/oscillator.py create mode 100755 python/test/t_oscillator.py diff --git a/python/doc/_static/oscillator.png b/python/doc/_static/oscillator.png new file mode 100644 index 0000000000000000000000000000000000000000..084e96597323a4b35f0ab7a543c2c52a912dae8f GIT binary patch literal 10958 zcmXY11z1zx|K1qgN;5(P6zT3xl~zESAp!#gL28m4h#&|Ubh8Q4H9(pv62jU(5QvBi_>Chc1wQrR?#jRy zv9IPmGjiY+LjEKM_)p<|*UA?JqW$*o4NexKg#riJ{B$h+9zJvN3v_($3bW0lKHHVTQ=snh#7f`+ah?dn6i(yb04Tm^RrYkCkw+d((BD z!6}g@_;pdW9;Ps~-IT$mq*!)SrK{+>F+=D39(El@aSIct&Il$Gv);?gW4zBR$6f`j z>k2;Wq@}{6($RH*gX{3|z`|H9=aWlq`)b|RK-cs zve#npc)YOrDP$Q{tjA;M6PvcT1wi9Zga*%eioTC1rf41@zB;nFRMj|mN8DzIf zz_Rtyaz+Ah0{`HX<iFUprP5=N-NV zOqUFjx@6jte3NXBRJiD;GB1)B>4Y+Ng4+4cFqdzyCvHCt&;VxxbKOUEiZEk%x=(bd z9jaUpcFAw-81|v5pxC0ZrP(Ll26g?oX$#EsU>$?Xnk88sMK}|aktorjPO1GRt#E=* zNdGVGv7k0NZ-tDn&xagcTvBd77 zhIK*<)q7BMXSNjg5GP%G)aT|yJA^NL*@oMKf8VmKMP*0QmX!?>w@($2F0CFU}A9o!R2GFK!mf4Q#Ne36klYTYC~@_gotK+hV{0*T@u zWl;~G4Gd_Rv$pp)7b8;J>gH?-s{6Y@upe9F(sB#sj-A-f)I%zJXaxJ`P~tNBWkhB& zgiim1E`_oz-{j|Yot7`3#)p64c-Pbx4aT&eCJPSwla@mfpjvSIEf$k=;WJwajip`H zKLVC(a?_J^2|g|Ty{PDdd%VP44jPj=6`P2X)|E`16b=2y4!Sz;j?h;zrtun4pQ7`c z_btXrvR>9OscEObX)9{KS3It2Y}z9FYj|_HC#@UqQtm0X#4cSP7I=78)SQ%QE)vOZ zYYt2;*`Z#jvMe}9qtESgjHh4!h8|8?dj7fesRFmo?h?k#w^|OHc;UpIonBXRF;p7_ z^}a*!l&+iG>de(f!M9)aS&kq?-_fk47a`II ze_7py!tcLxl2yMmsOeObxNvO>|MM@^(%`h()w`ci+YEle9xps(1tN#(|8<|qpnj$# zWxK~={JHHH4c_vY@=jaz_7)gI z`gP?#y5#?ww~{C*)wjWjIU;wv)XSmnbX>&nb@^BVH;oPLOr zx?Jj3(stS$%Q19Db(su)$~tEG+;#3*&&{4Z;m%xY1 zeDLK}UNN&2)|w*aN=#iYeNkstjnU?^{SJY|?}Hga%g@bF&#KjYc;V56xpy@+cI1oK^@ zT>|@!&h#j`sm0dURCQGe5#o!Ux$Eg@%RzGU*O76k;ps^`TXy@)?OKbRIDHYr_X48v z9fE5?YMHi<>)kjplqy9+X%gG<=?r{;xG7eqR?tO!M(URU%rI$Qp7uf${+oGyewV11 zv0p#vhKmbNOH>$H2y`6dnOOKF!cfxqz88GLptrnzV9={2qRD$9^0LE|)HWhAVcQZJ z@KShOavooHO+@nm$&%-mN&?@Y!J5Q6E58Df<=+jU0fu01Na@Py1G3i)D|JhB%IT_& zy4OsnaMBGzk3)J5nV{w=1HoplW)@#IzD1iwG8Yqey9MR-pc}&sVt5bDkjbPbgs-U$ z!p!nRK;BS6u6RL%z=u*&9-_uzsqb6$oCq5EYx5s7qTXxtJ+9Q%PXN*}M<1v;-??sf zbS}P4dgI~T8$w#>2Tjqc~l!Ke(j&HOvb**B+x7b==P-_ zCH}B=adCMq#De(cDwm_9|5QxRurffs)Sh`F273>4wVKBwYM`Z<%`Sa^Hmi$$t>l?-4JdcKNH{{rW+gii$l8`leO z>civ9it8{mVGG*X_tex&9{?yr=-QxM(n^!m4oMdBG(|Zo?E5pFgR56zxSq`umyy;R zE>8nER)2>+b8(p%n=!>y`3}u&-q@&?|EiFpN7KVaVx$qOlF^wD@9Y_@SqwxQbi@b_ zekLi|SR=j#cG&KkQba1?>;yo5*#Op&X$?r4Za}VOCdX^o@C_1$MOxI`ej8~$hnmwd z3aEh2mCokcYZAv@SQL>b@bFBdB(QZkboEqj=B?XxT4ew%kf0BD*P?5niI9gTi*dPm zPrs|;-dO5AvRTwlkKfzN369@(X6YjOV9MyvJ^;SQM7>m!qfiX#?XhhxL$bno*=d5FP7%+51o=YPm)c^4bwUJ}?f+FK#FfBT!3J5(iXF# z6mGP;z-Nqh?li?xaZVq#r5@SdK4rTyez{kR{gq^R*(j^Qa$O25-uBvk?-Z?4CpgV_ zCNKE4+GkVWS>z*U1NHdLbB_7)@M}}_&T9ZbK5D~SzNKFJmx#gKA8`yI6cZoyjR_UNLlN*u&%iO zUvf&p-tm`Z)YKYs+A5|gbHYhQp~|HBn_UKDKm6o-Q>eiSnk2>X?)2U_1hYchzgGV< z@Y2=r{A;7FTJ4u(bwDhy#%1ZICY-20Cs6E=;XE9l!eB zSuTM$bw160Z@BaJT0@Ww!M*3m;HGVW_2>6MFLd#ctyE-(5sFf+PP}jUXC#UDr@riD#-T7lb z#CTh7KMmlS>aRDe0yTOSC0m^P<*D8&upVrOC`WH73~((3G?xA)C?Sbl+Pq8RVx3; zIEl^bu9GZBY3=)hp{I-ofuMFF-#JU^+f7Ayi4$m7GNtk?l0mZc*N-DsKdq{4ClQZdZzMii9~! z(<%#pTsF0?n4vne#x>%gMJE7*g?bz6sfPllg#}}_F9KUaJUlLZ51`*bBF#8SEXot=~WR>dM?nLlBMNzv>Q{x z5c>9!*zRen<#U$l=eJDTWHFI*?itTr&^N^HXLq{^jQ*V@-;uzs_=*e0h@~iG=Y*D9 z4#p1)H^TC40go|L`uq6sEOE&HJ4qq){V`Q8-l2qwN+LOz+*uY8unMG~xA5L2<2{Kl zqBS41vPu~)&Ph`hCV5{{fiRmv%`zog3f#^g@xraNx|8Lru`SHR%u!%XF|+U#hdx>8 z{1Rg+1w};Cxpl1@e*&<|EBwo|PZXfDUbkhU*DqwkTT%O|eGg03xqU;?gvTq5XzoVL z^z~!kFCHqUfumKWUWzD{;T+NA!g?dUJ7%9a`q{OhO+GQrrCuC{X7E4Q& zmIoHwUAye(&vBqu{xb4K+tJYt#_nD@6gS5hRF$h=?KEp*vZwy8J=nvq^%N>zy1BND z4a~meiYpTTmq+kKqGXrkEw+pKGH;A(mCMDO#jlrZQ$U6N^2fzGBhqO7Oc4z|9-Rg{ z^F&Gl+=#4%ZB^lf%FMNdGmw{h3bRB$!#aIosO% z>1A&US?qahbe_jV!={r;NP$s!q7LI2qAc27&YsMxNA1d4N}gxeWHvNvTSjp zHyj~!HQioB`yz-oh(Fc&jbAUA(M|84lN5TO$5wZW7Rl!E?6ZC;3(@>V(*kASKDt{( zn4yDDRnLZ@I2*>wJxki-Tki8q9`A-}=U$_CT8Fl4I*^=0_frKP3Wg2~Cf52_24~p@ z@HkY(ZO&dBkfCr+mmLhQh__v?61bJ08(6$m6uF-KT;(pltd5bC!4rPJ-txn@bm3My z{i@u)+d2tib=T$>1g08GdORi~W`;wSXyNVee650%4)4p7h-yy1fA+pj*H|MU-#hk+ z@Ni_5-jJ=^W#fcKwgTpfQ4iN5)p#_c^)(Tn9AwF8KVCgO_B_<`73R+J8AYKSA@ZAB&jbBgH#43_t22(_UN|3gv-A2B7R4dPu03-gn4RB5Y!#Ol?~og4 zhHCuXa8J+`@(wfotL)nv6_81F(R7zVm zGG!`d)^rkY{m4<=blZBDxM`g-AvAl0Vc}<;r|KU(C+oQxxt2^=gfzwEt$0ch<{oB1)wr9hFJ?{V&uOpUgb2 z11lpf{po@Au10*BjDglPZBd9w<}2CHn{0nAhYnY_=NHNU99U{_zTI~$Em;AmF)1)Z?e@FIJvq|-neRv* zveKofl+>)6mPjS|k~mQoc3RhwcH_TN6+Zg<#4FM%Zfo@cT(l9n>>i?0a}#Y4SBuG} ze5R{%dqDFcX(sy3cm9vq7nW_Xrr-H#0|#sbxZL2L^i0b()AO^KgV$yyJIFaz zVPpD=;a}2>-nMr0L&95>fuBTN=u3>a5@$A4NjaiN7P-g0;52STGiz`o>QzNn{}Z9Q zFx9e70s1Bu%b|GnON`ACmg}zV1wFNI8N5BI2bN6m7(Y=g& zJL(ReaZ#n+!#emJ3+41lV+l<$0U~kFu`-tCeP@gD1!*^T1^ z1qjh`JbMS`_$Vs*o#Xe-E-rZgK|fsS;K9XNdA0nSI97MpVgypvm7+oOsVk2kO$H9n zZ@M`wgUY`9jGInMN@}B!`Va5!&E(X3Ors1_OwRiCdYy%$-qOH&6Fq|prw3?9=&=NW zC?my<>`+Vp>F8W{2SB?v@!+4&;3SJgju2zz`-ZhfXV;(i6&fY@`?*ZmT_AT{uR@pz z)TbQWF-<06uTs=5Z=PAhWR_T;Hwz4E3eAdQf<-R-t@{a^%Sv#66Vj<`p_h`l54c6E zCcWMnIP|?n5$|y}uCI>U%ZU}76;1hy8GMwdq8r4laQNgor$YbFpDF9(P|b?@gY92U z_n5=_^+pTwn5Vf@r0@6b>#Sk-JYiI|)viBQ|ET>s99WO^y3WkR zvB%aQ?i+r~nOSRJ^Gm0S0A(;I3Acxtd@+Qsl!T0HypK+qOE-5_2xVuo^4D+dq@F4? zKu)91dN6NuWde6uq+#cs!~5ri?k*1**TFCx)&0y=|3m>-j{CiKrAcTEy528rfDgM8 z=oMc`2&y3N4#l)}B$^N7oa`83E&cDOX~ix4c|sM?e=wuD`899+DBcZ{o0C9uU7DmC|b&JVv)rTd4W zEAy9}4g`(<;5yvDp!5#@LK4H1NHthJQ;GWbWC<99-|}q$0ZC|U*>{3kfA;0QnFIB z4CAjjC~T>eX>1{L$PE5ax$1Z6b+3Hj`8~>J+v32INnYZ+M)HHSUGf?z*%OCmh&+v} z9KNjPgXvzioe4qW`Y}_+`|K!-!E~z@BaoxoGOUKf)MYCRCP9UO6$0{g7U~f52_z z%vLLlpjpx#8?VZ!c(iv?s*kfv9)qyak47*>A z-|8?hM{e6(0s&Ok3Zzk370%;ux}-OJfV3D{dBnl=Yu2tPW!2b zdgi)PtZW`cUf`?_4{Th@ZZNcHh{i z3*lnQFw%%B3f)WCwHL9VS|}~3=6q8@(Js1sx`VA`@d_C)X?dg{D&1=@av%wsqLMkF zJ`78b4QTOX%-tD(V5C%~;w?p>oM~CHH~Ut@=1uW?2aPZD4{7Z2)wp13XzM6TQ3ZKv zQ0Yt%UB3QPQh)ckNoNwJGAY2a8JMxka`bc3Xsk~ajY_<{9(v~83u!nn zFP1UwwW{w?{+;yz7<7r;EbajIm{*7=vWArmtcP)~O9YsLI!1>aCl?F+R?*eq^jZ*+ z;9V82#g?@3`_Ig@t=DvMQy9?soW)ij8E^$hq?pR&YU$qrjI|>r3C>E{%xkZOBo!w1Fis`=0fEN3W#b$EX4RH) zxe=%f84r@&-qiuCA}!PCku#Igs#%l!GM=iW0e1mH&Bev-!5xAn=@gB#T)kHIIG~|* z(PZNdDh9#;h0{!sU`Zygd{%d}*K1Qh2nz+;TacZg^u)|C*Ph5NsCz_;eT@$X1`#G@ zgW1&z{w5#$A>8A29z|*w>jX6JzZ0oQQ$cK?Zg=0I_@{EKDR&~q=f5H+!~@xUQ)tfo zX!mls$ZZM?NSNuzuYgNj+(v0zjB4{Ra#eCO%3gv5^CerD%-CQDFwMZ|X28Qp;J&~e zQhyJ%Q7eIti0pRh0HyM=G`QT7+C#MJVSLwPk+P|PrjyfrbJ0ZbLu4g*6$0c4cJBG1r3jrbh9N3AZy z{Rv&n2AshbJVA1zj!F^Fpj9-zDVr1_q{<{^Op{9hrMtqV7tb4L9CTxXedaZnUm23p zI9GjZd=hK~dWCk&(D|q5;oVEJdxbdqA2ZY^0!dtc8wL}r)!K^3@d#M|8@aUT!IA_y z35y>3PEt4W19AiPqK@;Qx?Nycjl*zGF&lo`qz$2-dnw5|s+oo5{&O^p*VUG1PDM}ypsZ`z}_)y7I3D{ylsrGL#0Ef*t8QjZrc6sGr)!d`! zR|~pYXpSj5>ptE`M4-pSduq&6T5DBtmR_tFJm)K5v=(a`)wHLh-r#m!6$ z1u?bpKRS~8hm|DZ(bFg9ZGkLih$de5QdOEMf%@TpYJN#AAlfAF(%aFPvIC`+hh0 zbN;Qz0KY!hVmAK=pn$8XtDx5FHJpT7-Xz`Rdueu zl=OR*k#WQ-W{0s)E@j|~acuYKzT$;nelpE1NY>)wt`Ejq0ZPh-*=OPa1!*QNwbIyh>b{P<2?Por;{jCQ77(Z^?gJ9r`Ryc5-h<} zPM>C>N(`;$)|pjX)!6R^g12>3l*ub#&4p;0JAF}@r(p3&|J)6z>AhcEx@5z zL+L!9Nzek+{an@N(KE;+erZt;iB6(K4S@#v>y}i`az;cI-BB1I?ZJ7a`BjOx#Ofpo z?eLkF+XONWgk?7IKkBh#eN$y)MXuW2DKDOV=->O5E=Lixpixk2rB0g=yL zQs5$hu}V>#;+UCm8y3d23Z-8hVt)Y5fy!W4_8i7IY8Vg<4Rl+#!cnx`pwW_(YM|Sth7u zs(+Ab!VIJImWnMLb>VQYm`J$@(^*;-5E>)NN*ijh?(|rKPaC z{(=Nozj;Z0+eVxd8%%DjdwB!*w07>#^`*`ky7K%Gq7aaBjK!(&{Q!9vm;C*klgGnl zJd9Mzx*$DJG#CQx&*AWfy5F3Lr6tqF{InwDbeRyYhZI7d@u|>b)d4L|vr}(*IDAYF zB{XEcVEYMX=pba46azU#^ zJ3uc|*ce?95WVEs5H_6CG({xBTR_Rh(ifL7uCh0M21~*%Znff*)?5NJTK{#W!3xK6Se!{USt8FZT@VU z4k9R#@og1yDXgGMY<+M89YXL&_1LW1@X9Br_gpa|j*kJmzJSp!-AI~Gqp(FeeZ6nJB9uPSEP_$bl^A1RIM$%0R-}N^iqi5d7&p6Zm)c=y0(@%!oG{5AU2| zM6&~j*r($)`yg>{<>A=$fBij`*kU6sPL)VXZdJ$!jvQSCj>C{MXp*_xL1ZH=^)+l+h$C4R2 z@>KpxTbY3gqSAKQ&{k5<^<<5x{AhIcqh}gQ-Vy#6wE}jLp)%RDCZ$ti3@50<1lRnsO z02kcJ)zh)wL5o)Qp^MXuk-^Bgp<07GY<}HK0UIt8@TQ@X)E7ljI8E5<<`HW4I&13m zgRZ=kKiuHG;r9hVM%gfL&ANH8f&iZy3h1f^lF_SJ4--c!9r$sM;&9K^4? z%HnVk*xDm(EtXqJ<#KPbR$)_*xrH2Mb@xm0jXiw4J@`*Fh zfsZ1!OyXuFmWw&^Myo8jA>`62KdGW>Q z9mGRYQ40HqMz8-Xr3hX%#irGj`95g_=|OQ?=EYtCJcz%93$Ea4eTqn`Q8;~p+GY+* z*e0bs5Zb-GobEo6NCeVZ^R~|D@4$S}6CQW*0A=^qCFi;cPqx}Of4GZYOx-rmaxw>& z5qA}+3sy%qUEx+w)oQQ41oJ1@w13-!KH;9H0hprxt)kR!s~3YL55L_WYRWDv2mhLk z5LP$r5Y-L5EVs1H5EBr!>lul%^g!oMLX*V))bKYpjE#J}7OOAB7Lzhu5$&#ofBewW z*8022*ym-Pv1(s=Z((ubt-X;y8hULA)A1tJ_~*Zew4qdTg`7_G5u>Z z^}mgEM2Xkn&b^LBMQf^K*u`m@{~>1*mS%L91xvn*WhI}20QRPZI@CH{rr83n1hbex zz+BC=XY5*s#bPY#*{kFl4VTN#25x<}N^U<-epLyRv&5$CU4grIk@fda?@R!DPeJ!| LjI=8>VG;ial^)0# literal 0 HcmV?d00001 diff --git a/python/doc/_static/oscillator.svg b/python/doc/_static/oscillator.svg new file mode 100644 index 00000000000..a7f86e6e8c5 --- /dev/null +++ b/python/doc/_static/oscillator.svg @@ -0,0 +1,406 @@ + + + + + + + + + + + + image/svg+xml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + m + p + m + s + k + p + k + s + z + z + p + s + + diff --git a/python/doc/examples/reliability_sensitivity/reliability/plot_estimate_probability_form_oscillator.py b/python/doc/examples/reliability_sensitivity/reliability/plot_estimate_probability_form_oscillator.py new file mode 100644 index 00000000000..810518a47ac --- /dev/null +++ b/python/doc/examples/reliability_sensitivity/reliability/plot_estimate_probability_form_oscillator.py @@ -0,0 +1,438 @@ +""" +Using the FORM - SORM algorithms on a nonlinear function +========================================================= +""" +# %% +# In this example, we estimate a failure probability with the `FORM` and `SORM` algorithms on the :ref:`oscillator ` example. +# This test-case is highly non linear with a significant curvature near the design point. + +# %% +# Model definition +# ----------------- + +# %% +from openturns.usecases import oscillator +import openturns as ot +from matplotlib import pylab as plt +import numpy as np + +ot.Log.Show(ot.Log.NONE) +ot.RandomGenerator.SetSeed(1) + +# %% +# We load the model from the usecases module: +osc = oscillator.Oscillator() + +# %% +# We use the input parameters distribution from the data class: +distribution = osc.distribution + + +# %% +# We define the model: +model = osc.model + +# %% +# We create the event whose probability we want to estimate. + +# %% +vect = ot.RandomVector(distribution) +G = ot.CompositeRandomVector(model, vect) +event = ot.ThresholdEvent(G, ot.Less(), 0.0) +event.setName("failure") + +# %% +# Cross cuts in the physical space +# --------------------------------- + +# %% +# Let’s have a look on 2D cross cuts of the limit state function. For each 2D cross cut, the other variables are fixed to the input distribution mean values. +# This graph allows one to have a first idea of the variations of the function in pairs of dimensions. The colors of each contour plot are comparable. +# The number of contour levels are related to the amount of variation of the function in the corresponding coordinates. + + +fig = plt.figure(figsize=(12, 12)) +lowerBound = [1e-5] * 8 +upperBound = distribution.getRange().getUpperBound() + +# Definition of number of meshes in x and y axes for the 2D cross cut plots +nX = 50 +nY = 50 +for i in range(distribution.getDimension()): + for j in range(i): + crossCutIndices = [] + crossCutReferencePoint = [] + for k in range(distribution.getDimension()): + if k != i and k != j: + crossCutIndices.append(k) + # Definition of the reference point + crossCutReferencePoint.append(distribution.getMean()[k]) + # Definition of 2D cross cut function + crossCutFunction = ot.ParametricFunction( + model, crossCutIndices, crossCutReferencePoint + ) + crossCutLowerBound = [lowerBound[j], lowerBound[i]] + crossCutUpperBound = [upperBound[j], upperBound[i]] + # Definition of the mesh + inputData = ot.Box([nX, nY]).generate() + inputData *= ot.Point(crossCutUpperBound) - ot.Point(crossCutLowerBound) + inputData += ot.Point(crossCutLowerBound) + meshX = np.array(inputData)[:, 0].reshape(nX + 2, nY + 2) + meshY = np.array(inputData)[:, 1].reshape(nX + 2, nY + 2) + data = crossCutFunction(inputData) + meshZ = np.array(data).reshape(nX + 2, nY + 2) + levels = [(150 + 3 * i) for i in range(101)] + + # Creation of the contour + index = 1 + i * distribution.getDimension() + j + + ax = fig.add_subplot( + distribution.getDimension(), distribution.getDimension(), index + ) + ax.pcolormesh(meshX, meshY, meshZ, cmap="hsv", vmin=-5, vmax=50, shading="auto") + ax.set_xticks([]) + ax.set_yticks([]) + # Creation of axes title + if j == 0: + ax.set_ylabel(distribution.getDescription()[i]) + if i == 7: + ax.set_xlabel(distribution.getDescription()[j]) + +# %% +# Computation of reference probability with Monte-Carlo simulation +# ---------------------------------------------------------------- + +# %% +# The target probability is supposed to be extremely low (:math:`3.78\times 10^{-7}`). +# Indeed, when performing Monte-Carlo simulation with a simulation budget of 100000 points, no sample are in the failure state, that induces a probability estimate of zero. + +experiment = ot.MonteCarloExperiment() +algo = ot.ProbabilitySimulationAlgorithm(event, experiment) +algo.setMaximumOuterSampling(int(1e5)) +algo.run() + +result = algo.getResult() +probability = result.getProbabilityEstimate() +print("Failure probability = ", probability) + +# %% +# To get an accurate estimate of this probability, a billion of simulations are necessary. + +# %% +# FORM Analysis +# ------------- + +# %% +# To get a first idea of the failure probability with reduced simulation budget, one can use the First Order Reliability Method (FORM). + + +# %% +# Define a solver: +optimAlgo = ot.Cobyla() +optimAlgo.setMaximumEvaluationNumber(1000) +optimAlgo.setMaximumAbsoluteError(1.0e-3) +optimAlgo.setMaximumRelativeError(1.0e-3) +optimAlgo.setMaximumResidualError(1.0e-3) +optimAlgo.setMaximumConstraintError(1.0e-3) + +# %% +# Run FORM initialized at the mean of the distribution: +algo = ot.FORM(optimAlgo, event, distribution.getMean()) +algo.run() +result = algo.getResult() + +# %% +# Probability of failure: +print("FORM estimate of the probability of failure: ", result.getEventProbability()) + +# %% +# Design point in the standard space: + +# %% +designPointStandardSpace = result.getStandardSpaceDesignPoint() +print("Design point in standard space : ", designPointStandardSpace) + +# %% +# Design point in the physical space: + +# %% +print("Design point in physical space : ", result.getPhysicalSpaceDesignPoint()) + + +# %% +# For this test case, in order to validate the results obtained by FORM, we can plot the cross cuts in the standard space near the design point found by FORM. + + +# %% + +distributionStandard = ot.Normal(distribution.getDimension()) +inverseIsoProbabilistic = distribution.getInverseIsoProbabilisticTransformation() +standardSpaceLimitState = ot.ComposedFunction(model, inverseIsoProbabilistic) +standardSpaceLimitStateFunction = ot.PythonFunction(8, 1, standardSpaceLimitState) + +fig = plt.figure(figsize=(12, 12)) +lowerBound = [-4] * 8 +upperBound = [4] * 8 + +# sphinx_gallery_thumbnail_number = 2 + +# Definition of number of meshes in x and y axes for the 2D cross cut plots +nX = 50 +nY = 50 +my_labels = { + "MPP": "Design Point", + "O": "Origin in Standard Space", + "TLSF": "True Limit State Function", + "ALSF": "Approximated Limit State Function", +} +for i in range(distribution.getDimension()): + for j in range(i): + + crossCutIndices = [] + crossCutReferencePoint = [] + for k in range(distribution.getDimension()): + if k != i and k != j: + crossCutIndices.append(k) + # Definition of the reference point + crossCutReferencePoint.append(designPointStandardSpace[k]) + # Definition of 2D cross cut function + crossCutFunction = ot.ParametricFunction( + standardSpaceLimitStateFunction, crossCutIndices, crossCutReferencePoint + ) + + crossCutLowerBound = [ + lowerBound[j] + designPointStandardSpace[j], + lowerBound[i] + designPointStandardSpace[i], + ] + crossCutUpperBound = [ + upperBound[j] + designPointStandardSpace[j], + upperBound[i] + designPointStandardSpace[i], + ] + # Definition of the mesh + inputData = ot.Box([nX, nY]).generate() + inputData *= ot.Point(crossCutUpperBound) - ot.Point(crossCutLowerBound) + inputData += ot.Point(crossCutLowerBound) + meshX = np.array(inputData)[:, 0].reshape(nX + 2, nY + 2) + meshY = np.array(inputData)[:, 1].reshape(nX + 2, nY + 2) + data = crossCutFunction(inputData) + meshZ = np.array(data).reshape(nX + 2, nY + 2) + levels = [(150 + 3 * i) for i in range(101)] + + # Creation of the contour + index = 1 + i * distribution.getDimension() + j + + ax = fig.add_subplot( + distribution.getDimension(), distribution.getDimension(), index + ) + + graph = ot.Graph() + + ax.pcolormesh(meshX, meshY, meshZ, cmap="hsv", vmin=-5, vmax=50, shading="auto") + + cs0 = ax.plot( + designPointStandardSpace[j], + designPointStandardSpace[i], + "o", + label=my_labels["MPP"], + ) + cs1 = ax.plot(0.0, 0.0, "rs", label=my_labels["O"]) + cs2 = ax.contour(meshX, meshY, meshZ, [0.0]) + + ax.set_xticks([]) + ax.set_yticks([]) + + u0 = [designPointStandardSpace[j], designPointStandardSpace[i]] + algo = ot.LinearTaylor(u0, crossCutFunction) + algo.run() + responseSurface = algo.getMetaModel() + data2 = responseSurface(inputData) + meshZ2 = np.array(data2).reshape(nX + 2, nY + 2) + + cs3 = ax.contour(meshX, meshY, meshZ2, [0.0], linestyles="dotted") + + # Creation of axes title + if j == 0: + ax.set_ylabel(distribution.getDescription()[i]) + if i == 7: + ax.set_xlabel(distribution.getDescription()[j]) + + if i == 1 and j == 0: + cs2.collections[0].set_label(my_labels["TLSF"]) + h2, l2 = cs2.legend_elements() + cs3.collections[0].set_label(my_labels["ALSF"]) + h3, l3 = cs3.legend_elements() + lg = ax.legend( + [h2[0], h3[0]], + [my_labels["TLSF"], my_labels["ALSF"]], + frameon=False, + loc="upper center", + bbox_to_anchor=(8, -1.5), + ) + elif i == 2 and j == 0: + lg1 = ax.legend( + frameon=False, loc="upper center", bbox_to_anchor=(7.65, -0.8) + ) + +# %% +# As it can be seen, the curvature of the limit state function near the design point is significant. In that way, FORM provides poor estimate since it linearly approximates the limit state function. +# Thus, SORM can be used in order to refine this probability estimation by approximating the limit state function with a quadratic model. + +# %% +# SORM Analysis +# ------------- + +# %% +# Run SORM initialized at the mean of the distribution: + + +# %% +algoSORM = ot.SORM(ot.Cobyla(), event, distribution.getMean()) +algoSORM.run() + +# %% +# Analysis of SORM results: + +# %% +resultSORM = algoSORM.getResult() +print( + "Probability estimate with SORM (Breitung correction) =", + resultSORM.getEventProbabilityBreitung(), +) + +print( + "Probability estimate with SORM (Hohenbichler correction) =", + resultSORM.getEventProbabilityHohenbichler(), +) + +optim_res = resultSORM.getOptimizationResult() +print("Simulation budget:", optim_res.getEvaluationNumber()) + +# %% +# One can see that the probability estimate has been decreased by a factor 10 compared to the FORM estimate. +# This probability is quite close to the reference probability and obtained with less than 1000 evaluations of the model. + +# %% +# In order to visualize the SORM limit state approximation, we can draw cross cuts of SORM oscultating parabola using second order Taylor approximation. + +fig = plt.figure(figsize=(12, 12)) +lowerBound = [-4] * 8 +upperBound = [4] * 8 + +# Definition of number of meshes in x and y axes for the 2D cross cut plots +nX = 50 +nY = 50 + +my_labels = { + "MPP": "Design Point", + "O": "Origin in Standard Space", + "TLSF": "True Limit State Function", + "ALSF": "Approximated Limit State Function", +} + +for i in range(distribution.getDimension()): + for j in range(i): + + crossCutIndices = [] + crossCutReferencePoint = [] + for k in range(distribution.getDimension()): + if k != i and k != j: + crossCutIndices.append(k) + # Definition of the reference point + crossCutReferencePoint.append(designPointStandardSpace[k]) + # Definition of 2D cross cut function + crossCutFunction = ot.ParametricFunction( + standardSpaceLimitStateFunction, crossCutIndices, crossCutReferencePoint + ) + + crossCutLowerBound = [ + lowerBound[j] + designPointStandardSpace[j], + lowerBound[i] + designPointStandardSpace[i], + ] + crossCutUpperBound = [ + upperBound[j] + designPointStandardSpace[j], + upperBound[i] + designPointStandardSpace[i], + ] + # Definition of the mesh + inputData = ot.Box([nX, nY]).generate() + inputData *= ot.Point(crossCutUpperBound) - ot.Point(crossCutLowerBound) + inputData += ot.Point(crossCutLowerBound) + meshX = np.array(inputData)[:, 0].reshape(nX + 2, nY + 2) + meshY = np.array(inputData)[:, 1].reshape(nX + 2, nY + 2) + data = crossCutFunction(inputData) + meshZ = np.array(data).reshape(nX + 2, nY + 2) + levels = [(150 + 3 * i) for i in range(101)] + + # Creation of the contour + index = 1 + i * distribution.getDimension() + j + + ax = fig.add_subplot( + distribution.getDimension(), distribution.getDimension(), index + ) + + graph = ot.Graph() + ax.pcolormesh(meshX, meshY, meshZ, cmap="hsv", vmin=-5, vmax=50, shading="auto") + cs0 = ax.plot( + designPointStandardSpace[j], + designPointStandardSpace[i], + "o", + label=my_labels["MPP"], + ) + cs1 = ax.plot(0.0, 0.0, "rs", label=my_labels["O"]) + cs2 = ax.contour(meshX, meshY, meshZ, [0.0]) + ax.set_xticks([]) + ax.set_yticks([]) + + u0 = [designPointStandardSpace[j], designPointStandardSpace[i]] + algo = ot.QuadraticTaylor(u0, crossCutFunction) + algo.run() + responseSurface = algo.getMetaModel() + data2 = responseSurface(inputData) + meshZ2 = np.array(data2).reshape(nX + 2, nY + 2) + cs3 = ax.contour(meshX, meshY, meshZ2, [0.0], linestyles="dotted") + + # Creation of axes title + if j == 0: + ax.set_ylabel(distribution.getDescription()[i]) + if i == 7: + ax.set_xlabel(distribution.getDescription()[j]) + + if i == 1 and j == 0: + cs2.collections[0].set_label(my_labels["TLSF"]) + h2, l2 = cs2.legend_elements() + cs3.collections[0].set_label(my_labels["ALSF"]) + h3, l3 = cs3.legend_elements() + lg = ax.legend( + [h2[0], h3[0]], + [my_labels["TLSF"], my_labels["ALSF"]], + frameon=False, + loc="upper center", + bbox_to_anchor=(8, -1.5), + ) + elif i == 2 and j == 0: + lg1 = ax.legend( + frameon=False, loc="upper center", bbox_to_anchor=(7.65, -0.8) + ) +# %% +# We can see that this approximation is very appropriate, explaining the accuracy of the obtained results. + +# %% +# Estimation with post analytical Importance Sampling +# --------------------------------------------------- + +# %% +# Different algorithms exist for the reliability analysis by Importance Sampling. +# One way is to perform post analytical Importance Sampling by defining the auxiliary density centered at the design point found by FORM. + + +# %% +postAnalyticalIS = ot.PostAnalyticalImportanceSampling(result) +postAnalyticalIS.setMaximumCoefficientOfVariation(0.05) +postAnalyticalIS.run() +resultPostAnalyticalIS = postAnalyticalIS.getResult() +print( + "Probability of failure with post analytical IS = ", + resultPostAnalyticalIS.getProbabilityEstimate(), +) + +# %% +# We can see that this post-treatment of FORM result allows one to greatly improve the quality of the probability estimation. diff --git a/python/doc/usecases/use_case_oscillator.rst b/python/doc/usecases/use_case_oscillator.rst new file mode 100644 index 00000000000..f1ee2ba7b21 --- /dev/null +++ b/python/doc/usecases/use_case_oscillator.rst @@ -0,0 +1,99 @@ +.. _use-case-oscillator: + +A two degree-of-fredom primary/secondary damped oscillator +========================================================== + +We consider a two degree-of-fredom primary-secondary damped oscillator. +This system is composed of a two-stage oscillator characterized by a mass, a stiffness and a damping ratio for each of the two oscillators. +This system is submitted to a white-noise excitation. +The limit-state function is highly nonlinear, mainly due to the interactions between the two stages of the system, and presents one failure zone. + + +.. figure:: ../_static/oscillator.png + :align: center + :alt: use case oscillator + :width: 50% + + Two stage oscillator + + +The limit state function is defined as follows: + +.. math:: G = F_s - 3 k_s \sqrt{\frac{\pi S_0}{4 \zeta_s \omega_s^3} \left[\frac{\zeta_a \zeta_s}{\zeta_p \zeta_s (4 \zeta_a^2 + \theta^2)+\gamma \zeta_a^2}\frac{(\zeta_p \omega_p^3 + \zeta_s \omega_s^3)\omega_p}{4 \zeta_a \omega_a^4}\right]} + + +The natural frequency of the first oscillator is equal to: + +.. math:: \omega_p = \sqrt{\frac{k_p}{m_p}} + +The natural frequency of the secondary oscillator is equal to: + +.. math:: \omega_s = \sqrt{\frac{k_s}{m_s}} + +The average natural frequency of the system is equal to: + +.. math:: \omega_a = \frac{\omega_p+\omega_s}{2} + +The average damping ratio of the system is equal to: + +.. math:: \zeta_a = \frac{\zeta_p+\zeta_s}{2} + +The mass ratio is equal to: + +.. math:: \gamma = \frac{m_s}{m_p} + +The tuning parameter of the system is equal to: + +.. math:: \theta = \frac{\omega_p - \omega_s}{\omega_a} + +Eight uncertainties are considered in the system: + +- on the masses of the primary and secondary systems (:math:`m_p` and :math:`m_s`), +- on the spring stiffeness of the primary and secondary oscillators (:math:`k_p` and :math:`k_s`), +- on the damping ratios of the primary and secondary systems (:math:`\zeta_p` and :math:`\zeta_s`), +- on the loading capacity of the secondary spring (:math:`F_s`), +- on the intensity of the white noise excitation (:math:`S_0`). + + +We consider the following distribution fResponse of Two-Degree-of-Freedom Systems +to White-Noise Base Excitation: + +- :math:`m_p \sim \text{LogNormalSigmaOverMu}( \mu_{m_p} = 1.5, \delta_{m_p} = 0.1)` +- :math:`m_s \sim \text{LogNormalSigmaOverMu}( \mu_{m_s} = 0.01, \delta_{m_s} = 0.1)` +- :math:`k_p \sim \text{LogNormalSigmaOverMu}( \mu_{k_p} = 1, \delta_{k_p} = 0.2)` +- :math:`k_s \sim \text{LogNormalSigmaOverMu}( \mu_{k_s} = 0.01, \delta_{k_s} = 0.2)` +- :math:`\zeta_p \sim \text{LogNormalSigmaOverMu}( \mu_{\zeta_p} = 0.05, \delta_{\zeta_p} = 0.4)` +- :math:`\zeta_s \sim \text{LogNormalSigmaOverMu}( \mu_{\zeta_s} = 0.02, \delta_{\zeta_s} = 0.5)` +- :math:`F_s \sim \text{LogNormalSigmaOverMu}( \mu_{F_s} = 27.5, \delta_{F_s} = 0.1)` +- :math:`S_0 \sim \text{LogNormalSigmaOverMu}( \mu_{S_0} = 100, \delta_{S_0} = 0.1)` + + + +The failure probability is: + +.. math:: P_f = \Prob{G(m_p,m_s,k_p,k_z,\zeta_p,\zeta_s,F_s,Z_0) \leq 0}. + + +The value of :math:`P_f` is: + +.. math:: P_f = 3.78 \times 10^{-7}. + +References +---------- +* Der Kiureghian, A. and De Stefano, M. (1991). Efficient Algorithm for Second-Order Reliability Analysis, Journal of engineering mechanics, 117(12), 2904-2923 + +* Dubourg, V., Sudret, B., Deheeger, F. (2013). Metamodel-based importance sampling for structural reliability analysis. Probabilistic Engineering Mechanics, 33, pp. 47–57 + +API documentation +----------------- + +.. currentmodule:: openturns.usecases.oscillator + +.. autoclass:: Oscillator + :noindex: + +Examples based on this use case +------------------------------- + +.. minigallery:: openturns.usecases.oscillator.Oscillator + diff --git a/python/doc/usecases/usecases.rst b/python/doc/usecases/usecases.rst index a1a1a5e6bcc..592311a51d0 100644 --- a/python/doc/usecases/usecases.rst +++ b/python/doc/usecases/usecases.rst @@ -24,5 +24,6 @@ Contents use_case_viscous_fall use_case_fireSatellite use_case_wingweight + use_case_oscillator coles use_case_linthurst diff --git a/python/doc/user_manual/usecases.rst b/python/doc/user_manual/usecases.rst index 6cb4b853c59..fd01dcac602 100644 --- a/python/doc/user_manual/usecases.rst +++ b/python/doc/user_manual/usecases.rst @@ -23,4 +23,4 @@ Use cases from the usecases module usecases.viscous_free_fall.ViscousFreeFall usecases.fireSatellite_function.FireSatelliteModel usecases.wingweight_function.WingWeightModel - + usecases.oscillator.Oscillator diff --git a/python/src/usecases/oscillator.py b/python/src/usecases/oscillator.py new file mode 100644 index 00000000000..89321ef9911 --- /dev/null +++ b/python/src/usecases/oscillator.py @@ -0,0 +1,219 @@ +""" +Use case : two degree-of-fredom primary/secondary damped oscillator +=================================================================== +""" +import openturns as ot + + +class Oscillator: + """ + Data class for the oscillator example. + + Attributes + ---------- + + dim : int + dim = 8, dimension of the problem + + model : :class:`~openturns.SymbolicFunction` + The limit state function + + muMp : float + muMp = 1.5, mean of the mass of the primary system + + sigmaOverMuMp : float + sigmaOverMuMp = 0.1, coefficient of variation of the mass of the primary system + + distributionMp : :class:`~openturns.LogNormal` distribution of the mass of the primary system + distributionMp = ot.LogNormalMuSigmaOverMu(muMp, sigmaOverMuMp).getDistribution() + + muMs : float + muMs = 0.01, mean of the mass of the primary system + + sigmaOverMuMs : float + sigmaOverMuMs = 0.1, coefficient of variation of the mass of the primary system + + distributionMs : :class:`~openturns.LogNormal` distribution of the mass of the secondary system + distributionMs = ot.LogNormalMuSigmaOverMu(muMs, sigmaOverMuMs).getDistribution() + + muKp : float + muKp = 1, mean of the spring stiffness of the primary system + + sigmaOverMuKp : float + sigmaOverMuKp = 0.2, coefficient of variation of the spring stiffness of the primary system + + distributionKp : :class:`~openturns.LogNormal` distribution of the spring stiffness of the primary system + distributionKp = ot.LogNormalMuSigmaOverMu(muKp, sigmaOverMuKp).getDistribution() + + muKs : float + muKs = 0.01, mean of the spring stiffness of the secondary system + + sigmaOverMuKs : float + sigmaOverMuKs = 0.2, coefficient of variation of the spring stiffness of the secondary system + + distributionKs : :class:`~openturns.LogNormal` distribution of the spring stiffness of the secondary system + distributionKs = ot.LogNormalMuSigmaOverMu(muKs, sigmaOverMuKs).getDistribution() + + muZetap : float + muZetap = 0.05, mean of the damping ratio of the primary system + + sigmaOverMuZetap : float + sigmaOverMuZetap = 0.4, coefficient of variation of the damping ratio of the primary system + + distributionZetap : :class:`~openturns.LogNormal` distribution of the damping ratio of the primary system + distributionZetap = ot.LogNormalMuSigmaOverMu(muZetap, sigmaOverMuZetap).getDistribution() + + muZetas : float + muZetas = 0.02, mean of the damping ratio of the secondary system + + sigmaOverMuZetas : float + sigmaOverMuZetas = 0.5, coefficient of variation of the damping ratio of the secondary system + + distributionZetas : :class:`~openturns.LogNormal` distribution of the damping ratio of the secondary system + distributionZetas = ot.LogNormalMuSigmaOverMu(muZetas, sigmaOverMuZetas).getDistribution() + + muFs : float + muFs = 27.5, mean of the loading capacity of the secondary spring + + sigmaOverFs : float + sigmaOverFs = 0.1, coefficient of variation of the loading capacity of the secondary spring + + distributionFs : :class:`~openturns.LogNormal` distribution of the loading capacity of the secondary spring + distributionFs = ot.LogNormalMuSigmaOverMu(muFs, sigmaOverFs).getDistribution() + + muS0 : float + muS0 = 100, mean of the intensity of the white noise + + sigmaOverS0 : float + sigmaOverS0 = 0.1, coefficient of variation of the intensity of the white noise + + distributionS0 : :class:`~openturns.LogNormal` distribution of the intensity of the white noise + distributionS0 = ot.LogNormalMuSigmaOverMu(muS0, sigmaOverS0).getDistribution() + + distribution : :class:`~openturns.ComposedDistribution` + The joint distribution of the input parameters + + Examples + -------- + >>> from openturns.usecases import oscillator + >>> # Load the oscillator + >>> osc = oscillator.Oscillator() + """ + + def __init__(self): + self.dim = 8 + + # Random variable : mp + self.muMp = 1.5 + self.sigmaOverMuMp = 0.1 + + # Random variable : ms + self.muMs = 0.01 + self.sigmaOverMuMs = 0.1 + + # Random variable : kp + self.muKp = 1.0 + self.sigmaOverMuKp = 0.2 + + # Random variable : ks + self.muKs = 0.01 + self.sigmaOverMuKs = 0.2 + + # Random variable : zetap + self.muZetap = 0.05 + self.sigmaOverMuZetap = 0.4 + + # Random variable : zetas + self.muZetas = 0.02 + self.sigmaOverMuZetas = 0.5 + + # Random variable : Fs + self.muFs = 27.5 + self.sigmaOverMuFs = 0.1 + + # Random variable : S0 + self.muS0 = 100.0 + self.sigmaOverMuS0 = 0.1 + + # create the limit state function model + formula = "Fs - 3 * ks * sqrt(" + formula += "pi_ * S0 * (zetap/2 + zetas/2) * zetas * (zetap * sqrt(kp / mp)^3 +" + formula += "zetas * sqrt(ks / ms)^3) * sqrt(kp / mp) /" + formula += "(4 * zetas * sqrt(ks / ms)^3 * (zetap * zetas * (4 * (zetap/2 + zetas/2)^2 +" + formula += "((sqrt(kp / mp) - sqrt(ks / ms)) / (sqrt(kp / mp)/2 + sqrt(ks / ms)/2))^2) +" + formula += "(ms/mp) * (zetap/2 + zetas/2)^2) * 4 * (zetap/2 + zetas/2) *" + formula += "(sqrt(kp / mp)/2 + sqrt(ks / ms)/2)^4))" + + self.model = ot.SymbolicFunction( + ["mp", "ms", "kp", "ks", "zetap", "zetas", "Fs", "S0"], [formula] + ) + + # Mass of primary system + self.distributionMp = ot.LogNormalMuSigmaOverMu( + self.muMp, self.sigmaOverMuMp + ).getDistribution() + self.distributionMp.setName("Mass of primary system") + self.distributionMp.setDescription(["mp"]) + + # Mass of secondary system + self.distributionMs = ot.LogNormalMuSigmaOverMu( + self.muMs, self.sigmaOverMuMs + ).getDistribution() + self.distributionMs.setName("Mass of secondary system") + self.distributionMs.setDescription(["ms"]) + + # Spring stiffness of primary system + self.distributionKp = ot.LogNormalMuSigmaOverMu( + self.muKp, self.sigmaOverMuKp + ).getDistribution() + self.distributionKp.setName("Spring stiffness of primary system") + self.distributionKp.setDescription(["kp"]) + + # Spring stiffness of secondary system + self.distributionKs = ot.LogNormalMuSigmaOverMu( + self.muKs, self.sigmaOverMuKs + ).getDistribution() + self.distributionKs.setName("Spring stiffness of secondary system") + self.distributionKs.setDescription(["ks"]) + + # Damping ratio of primary system + self.distributionZetap = ot.LogNormalMuSigmaOverMu( + self.muZetap, self.sigmaOverMuZetap + ).getDistribution() + self.distributionZetap.setName("Damping ratio of primary system") + self.distributionZetap.setDescription(["zetap"]) + + # Damping ratio of secondary system + self.distributionZetas = ot.LogNormalMuSigmaOverMu( + self.muZetas, self.sigmaOverMuZetas + ).getDistribution() + self.distributionZetas.setName("Damping ratio of secondary system") + self.distributionZetas.setDescription(["zetas"]) + + # Force capacity of secondary spring + self.distributionFs = ot.LogNormalMuSigmaOverMu( + self.muFs, self.sigmaOverMuFs + ).getDistribution() + self.distributionFs.setName("Force capacity of secondary spring") + self.distributionFs.setDescription(["Fs"]) + + # Intensity of white noise excitation + self.distributionS0 = ot.LogNormalMuSigmaOverMu( + self.muS0, self.sigmaOverMuS0 + ).getDistribution() + self.distributionS0.setName("Intensity of white noise excitation") + self.distributionS0.setDescription(["S0"]) + + # Joint distribution of the input parameters + self.distribution = ot.ComposedDistribution( + [ + self.distributionMp, + self.distributionMs, + self.distributionKp, + self.distributionKs, + self.distributionZetap, + self.distributionZetas, + self.distributionFs, + self.distributionS0, + ] + ) diff --git a/python/test/CMakeLists.txt b/python/test/CMakeLists.txt index 62177a662d9..bc6f322163e 100644 --- a/python/test/CMakeLists.txt +++ b/python/test/CMakeLists.txt @@ -61,6 +61,7 @@ ot_pyinstallcheck_test (logistic_model IGNOREOUT) ot_pyinstallcheck_test (stressed_beam IGNOREOUT) ot_pyinstallcheck_test (fireSatellite_function IGNOREOUT) ot_pyinstallcheck_test (wingweight_function IGNOREOUT) +ot_pyinstallcheck_test (oscillator IGNOREOUT) if (NUMPY_FOUND) ot_pyinstallcheck_test (viscous_free_fall IGNOREOUT) endif() diff --git a/python/test/t_oscillator.py b/python/test/t_oscillator.py new file mode 100755 index 00000000000..efbeae04b1b --- /dev/null +++ b/python/test/t_oscillator.py @@ -0,0 +1,33 @@ +#! /usr/bin/env python + +from openturns.testing import assert_almost_equal +from openturns.usecases import oscillator +import openturns as ot + +""" +Test the Oscillator data class on a reference point. +""" +osc = oscillator.Oscillator() + +assert_almost_equal(osc.model(osc.distribution.getMean()), [23.1897], 1e-2) + +# Compute probability with FORM +model = osc.model +distribution = osc.distribution +vect = ot.RandomVector(distribution) +G = ot.CompositeRandomVector(model, vect) +event = ot.ThresholdEvent(G, ot.Less(), 0.0) +event.setName("failure") + +optimAlgo = ot.Cobyla() +optimAlgo.setMaximumEvaluationNumber(1000) +optimAlgo.setMaximumAbsoluteError(1.0e-3) +optimAlgo.setMaximumRelativeError(1.0e-3) +optimAlgo.setMaximumResidualError(1.0e-3) +optimAlgo.setMaximumConstraintError(1.0e-3) + +algo = ot.FORM(optimAlgo, event, distribution.getMean()) +algo.run() +result = algo.getResult() + +assert_almost_equal(result.getEventProbability(), 3.8906e-06, 1e-3)