diff --git a/.ci_support/run_docker_coverage.sh b/.ci_support/run_docker_coverage.sh index 3367b4f14f..b7dc27848b 100755 --- a/.ci_support/run_docker_coverage.sh +++ b/.ci_support/run_docker_coverage.sh @@ -22,6 +22,7 @@ cmake -DCMAKE_INSTALL_PREFIX=~/.local \ -DCMAKE_C_FLAGS="--coverage" -DCMAKE_CXX_FLAGS="--coverage -fuse-ld=mold" \ -DSWIG_COMPILE_FLAGS="-O1" \ -DUSE_SPHINX=OFF \ + -DUSE_HMAT=ON \ ${source_dir} make install OPENTURNS_NUM_THREADS=1 ctest -R pyinstallcheck --output-on-failure --timeout 200 ${MAKEFLAGS} --repeat after-timeout:2 --schedule-random diff --git a/.circleci/run_docker_linux.sh b/.circleci/run_docker_linux.sh index c54bc18511..7ca6542349 100755 --- a/.circleci/run_docker_linux.sh +++ b/.circleci/run_docker_linux.sh @@ -27,10 +27,9 @@ cmake -DCMAKE_INSTALL_PREFIX=~/.local \ -DCMAKE_UNITY_BUILD=ON -DCMAKE_UNITY_BUILD_BATCH_SIZE=32 \ -DCMAKE_C_FLAGS="-Wall -Wextra -Wpedantic -Werror" -DCMAKE_CXX_FLAGS="-Wall -Wextra -Wpedantic -Wshadow -Werror -D_GLIBCXX_ASSERTIONS -fuse-ld=mold" \ -DSWIG_COMPILE_FLAGS="-O1 -Wno-unused-parameter -Wno-shadow" \ - -DSPHINX_FLAGS="-W -T -j4" \ - -DUSE_HMAT=ON \ + -DUSE_SPHINX=ON -DSPHINX_FLAGS="-W -T -j4" \ ${source_dir} -make install +OPENTURNS_NUM_THREADS=1 make install if test -n "${uid}" -a -n "${gid}" then cp -r ~/.local/share/doc/openturns/html . diff --git a/.circleci/run_docker_mingw.sh b/.circleci/run_docker_mingw.sh index 8a627e075d..e2b076338e 100755 --- a/.circleci/run_docker_mingw.sh +++ b/.circleci/run_docker_mingw.sh @@ -12,18 +12,17 @@ fi mkdir build && cd build -ARCH=i686 +ARCH=x86_64 MINGW_PREFIX=/usr/${ARCH}-w64-mingw32 PYMAJMIN=310 PREFIX=${PWD}/install -CXXFLAGS="-Wall -Wextra -Wpedantic -Wshadow -Werror -D_GLIBCXX_ASSERTIONS" ${ARCH}-w64-mingw32-cmake \ +CXXFLAGS="-Wall -Wextra -Wpedantic -Wshadow -Werror -D_GLIBCXX_ASSERTIONS -fuse-ld=lld" ${ARCH}-w64-mingw32-cmake \ -DSWIG_COMPILE_FLAGS="-O0 -Wno-unused-parameter -Wno-shadow" \ -DCMAKE_INSTALL_PREFIX=${PREFIX} -DCMAKE_INSTALL_LIBDIR=${PREFIX}/lib \ -DPython_INCLUDE_DIR=${MINGW_PREFIX}/include/python${PYMAJMIN} \ -DPython_LIBRARY=${MINGW_PREFIX}/lib/libpython${PYMAJMIN}.dll.a \ -DPython_EXECUTABLE=/usr/bin/${ARCH}-w64-mingw32-python${PYMAJMIN}-bin \ -DCMAKE_UNITY_BUILD=ON -DCMAKE_UNITY_BUILD_BATCH_SIZE=32 \ - -DUSE_TBB=OFF \ ${source_dir} make install ${ARCH}-w64-mingw32-strip --strip-unneeded ${PREFIX}/bin/*.dll ${PREFIX}/Lib/site-packages/openturns/*.pyd diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 24d9fc64a0..bcf916000f 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -12,7 +12,7 @@ jobs: - uses: actions/checkout@v4 - run: | brew install openblas swig boost python3 tbb nlopt cminpack ceres-solver bison flex hdf5 ipopt primesieve spectra pagmo cuba nanoflann - pip3 install matplotlib scipy chaospy pandas dill --break-system-packages + pip3 install matplotlib "numpy<2" scipy chaospy pandas dill --break-system-packages - run: | cmake \ -DCMAKE_INSTALL_PREFIX=~/.local \ diff --git a/CMakeLists.txt b/CMakeLists.txt index 906ba6f074..bbcae89978 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -21,7 +21,7 @@ option (USE_HDF5 "Use HDF5 for high volume storage" option (USE_BOOST "Use Boost for distribution computation" ON) option (USE_MPFR "Use MPFR for real valued special functions computation" ON) option (USE_MPC "Use MPC for complex valued special functions computation" ON) -option (USE_SPHINX "Use sphinx for documentation" ON) +option (USE_SPHINX "Use sphinx for documentation" OFF) option (USE_DOXYGEN "Use Doxygen for API documentation" ON) option (USE_NLOPT "Use NLopt for additional optimization algorithms" ON) option (USE_CERES "Use Ceres Solver for additional optimization algorithms" ON) @@ -256,7 +256,16 @@ if (HDF5_FOUND) endif () if (USE_BOOST) - find_package (Boost 1.46) + find_package (Boost CONFIG 1.46) + if (Boost_FOUND) + message(STATUS "Found Boost: ${Boost_DIR} (found suitable version \"${Boost_VERSION}\")") + endif () + + # fallback to FindBoost before its deprecation + if (CMAKE_VERSION VERSION_LESS 3.30 AND NOT Boost_FOUND) + find_package (Boost MODULE 1.46) + endif () + if (Boost_FOUND) if (CMAKE_VERSION VERSION_LESS 3.15) # Boost_VERSION reports the integer BOOST_VERSION from boost/version.hpp instead of x.y.z format @@ -281,7 +290,7 @@ if (USE_BOOST) list (APPEND OPENTURNS_ENABLED_FEATURES "mpc") endif () endif () - list (APPEND OPENTURNS_PRIVATE_INCLUDE_DIRS ${Boost_INCLUDE_DIRS}) + list (APPEND OPENTURNS_PRIVATE_LIBRARIES Boost::boost) list (APPEND OPENTURNS_ENABLED_FEATURES "boost") endif () endif () @@ -624,20 +633,10 @@ if (BUILD_PYTHON) endif () endif () - if (NOT DEFINED PYTHON_SITE_PACKAGES AND NOT CMAKE_CROSSCOMPILING) - execute_process (COMMAND ${Python_EXECUTABLE} -c "import sysconfig, os; print(sysconfig.get_path('platlib').replace(sysconfig.get_path('data'), '').lstrip(os.path.sep))" - OUTPUT_VARIABLE PYTHON_SITE_PACKAGES OUTPUT_STRIP_TRAILING_WHITESPACE) - file (TO_CMAKE_PATH "${PYTHON_SITE_PACKAGES}" PYTHON_SITE_PACKAGES) - endif () - - if (DEFINED PYTHON_SITE_PACKAGES) - set (OPENTURNS_PYTHON_MODULE_PATH "${PYTHON_SITE_PACKAGES}") + if (WIN32) + set (OPENTURNS_PYTHON_MODULE_PATH Lib/site-packages CACHE PATH "site-packages dir") else () - if (WIN32) - set (OPENTURNS_PYTHON_MODULE_PATH Lib/site-packages) - else () - set (OPENTURNS_PYTHON_MODULE_PATH ${CMAKE_INSTALL_LIBDIR}/python${Python_VERSION_MAJOR}.${Python_VERSION_MINOR}/site-packages) - endif () + set (OPENTURNS_PYTHON_MODULE_PATH ${CMAKE_INSTALL_LIBDIR}/python${Python_VERSION_MAJOR}.${Python_VERSION_MINOR}/site-packages CACHE PATH "site-packages dir") endif () endif () endif () diff --git a/CMakePresets.json b/CMakePresets.json index ab0729822d..732d93db4a 100644 --- a/CMakePresets.json +++ b/CMakePresets.json @@ -6,7 +6,6 @@ "binaryDir": "${sourceDir}/build", "generator": "Unix Makefiles", "cacheVariables": { - "USE_SPHINX": "OFF", "CMAKE_BUILD_TYPE": "RelWithDebInfo", "CMAKE_C_FLAGS": "-Wall -Wextra -Wpedantic -Wshadow", "CMAKE_CXX_FLAGS": "-Wall -Wextra -Wpedantic -Wshadow -D_GLIBCXX_ASSERTIONS -fno-inline", diff --git a/ChangeLog b/ChangeLog index 067b9574c2..a914958fe2 100644 --- a/ChangeLog +++ b/ChangeLog @@ -5,6 +5,7 @@ === Library === ==== Major changes ==== + * Swapped InverseGamma shape/scale parameters: InverseGamma(k, lambda) ==== New classes ==== diff --git a/distro/debian/rules b/distro/debian/rules index 1481520dd1..dac726d28c 100755 --- a/distro/debian/rules +++ b/distro/debian/rules @@ -36,7 +36,6 @@ override_dh_auto_configure: dh_auto_configure -Bbuilddir -- \ $(with_tbb) \ -Dot_configure_date:STRING='$(BUILD_DATE)' \ - -DUSE_SPHINX:BOOL=OFF \ -DCMAKE_SKIP_INSTALL_RPATH:BOOL=ON \ -DCMAKE_INSTALL_PREFIX:PATH=/usr \ -DCMAKE_INSTALL_LIBDIR:PATH="/usr/lib/$(DEB_HOST_MULTIARCH)" \ diff --git a/lib/etc/openturns.conf.in b/lib/etc/openturns.conf.in index 4fd7f83374..1096b2f30b 100644 --- a/lib/etc/openturns.conf.in +++ b/lib/etc/openturns.conf.in @@ -12,17 +12,14 @@ - - + - - @@ -556,7 +553,7 @@ - + @@ -579,13 +576,14 @@ - - - - - - - + + + + + + + + @@ -604,6 +602,9 @@ + + + @@ -1100,6 +1101,7 @@ + diff --git a/lib/src/Base/Algo/EnclosingSimplexMonotonic1D.cxx b/lib/src/Base/Algo/EnclosingSimplexMonotonic1D.cxx index 22590d7424..ff564f6b15 100644 --- a/lib/src/Base/Algo/EnclosingSimplexMonotonic1D.cxx +++ b/lib/src/Base/Algo/EnclosingSimplexMonotonic1D.cxx @@ -35,7 +35,6 @@ static const Factory Factory_EnclosingSimplexMonoto /* Constructor without parameters */ EnclosingSimplexMonotonic1D::EnclosingSimplexMonotonic1D() : EnclosingSimplexAlgorithmImplementation() - , increasing_(true) { // Nothing to do } @@ -43,7 +42,6 @@ EnclosingSimplexMonotonic1D::EnclosingSimplexMonotonic1D() /* Parameter constructor */ EnclosingSimplexMonotonic1D::EnclosingSimplexMonotonic1D(const Sample & vertices) : EnclosingSimplexAlgorithmImplementation() - , increasing_(true) { IndicesCollection simplices; setVerticesAndSimplices(vertices, simplices); @@ -161,4 +159,19 @@ String EnclosingSimplexMonotonic1D::__str__(const String & ) const return OSS(false) << "class=" << EnclosingSimplexMonotonic1D::GetClassName(); } +/* Method save() stores the object through the StorageManager */ +void EnclosingSimplexMonotonic1D::save(Advocate & adv) const +{ + EnclosingSimplexAlgorithmImplementation::save(adv); + adv.saveAttribute("increasing_", increasing_); +} + + +/* Method load() reloads the object from the StorageManager */ +void EnclosingSimplexMonotonic1D::load(Advocate & adv) +{ + EnclosingSimplexAlgorithmImplementation::load(adv); + adv.loadAttribute("increasing_", increasing_); +} + END_NAMESPACE_OPENTURNS diff --git a/lib/src/Base/Algo/RegularGridEnclosingSimplex.cxx b/lib/src/Base/Algo/RegularGridEnclosingSimplex.cxx index 259601f3c0..d3f1b51732 100644 --- a/lib/src/Base/Algo/RegularGridEnclosingSimplex.cxx +++ b/lib/src/Base/Algo/RegularGridEnclosingSimplex.cxx @@ -37,9 +37,6 @@ static const Factory Factory_RegularGridEnclosingSi /* Constructor without parameters */ RegularGridEnclosingSimplex::RegularGridEnclosingSimplex() : EnclosingSimplexAlgorithmImplementation() - , start_(0) - , N_(0) - , step_(0) { // Nothing to do } @@ -68,11 +65,21 @@ RegularGridEnclosingSimplex * RegularGridEnclosingSimplex::emptyClone() const void RegularGridEnclosingSimplex::setVerticesAndSimplices(const Sample & vertices, const IndicesCollection & simplices) { EnclosingSimplexAlgorithmImplementation::setVerticesAndSimplices(vertices, simplices); - // Check that sample can be converted to a RegularGrid, and get N, start, step - RegularGrid newGrid = Mesh(vertices); - start_ = newGrid.getStart(); - N_ = newGrid.getN(); - step_ = newGrid.getStep(); + if (vertices.getSize()) + { + // Check that sample can be converted to a RegularGrid, and get N, start, step + RegularGrid newGrid = Mesh(vertices); + start_ = newGrid.getStart(); + N_ = newGrid.getN(); + step_ = newGrid.getStep(); + } + else + { + // allow one to reset it + start_ = 0.0; + N_ = 0.0; + step_ = 0.0; + } } /* Get the index of the enclosing simplex of the given point */ @@ -109,4 +116,23 @@ String RegularGridEnclosingSimplex::__str__(const String & ) const return OSS(false) << "class=" << RegularGridEnclosingSimplex::GetClassName(); } +/* Method save() stores the object through the StorageManager */ +void RegularGridEnclosingSimplex::save(Advocate & adv) const +{ + EnclosingSimplexAlgorithmImplementation::save(adv); + adv.saveAttribute("start_", start_); + adv.saveAttribute("N_", N_); + adv.saveAttribute("step_", step_); +} + + +/* Method load() reloads the object from the StorageManager */ +void RegularGridEnclosingSimplex::load(Advocate & adv) +{ + EnclosingSimplexAlgorithmImplementation::load(adv); + adv.loadAttribute("start_", start_); + adv.loadAttribute("N_", N_); + adv.loadAttribute("step_", step_); +} + END_NAMESPACE_OPENTURNS diff --git a/lib/src/Base/Algo/SimplicialCubature.cxx b/lib/src/Base/Algo/SimplicialCubature.cxx index 82b67bdecd..929bbc1201 100644 --- a/lib/src/Base/Algo/SimplicialCubature.cxx +++ b/lib/src/Base/Algo/SimplicialCubature.cxx @@ -21,6 +21,7 @@ #include "openturns/SimplicialCubature.hxx" #include "openturns/PersistentObjectFactory.hxx" #include "openturns/ResourceMap.hxx" +#include "openturns/IntervalMesher.hxx" BEGIN_NAMESPACE_OPENTURNS @@ -34,7 +35,7 @@ static const Factory Factory_SimplicialCubature; /* Default constructor */ SimplicialCubature::SimplicialCubature() - : PersistentObject() + : IntegrationAlgorithmImplementation() , rule_(ResourceMap::GetAsUnsignedInteger("SimplicialCubature-DefaultRule")) , maximumAbsoluteError_(ResourceMap::GetAsScalar("SimplicialCubature-DefaultMaximumAbsoluteError")) , maximumRelativeError_(ResourceMap::GetAsScalar("SimplicialCubature-DefaultMaximumRelativeError")) @@ -116,14 +117,14 @@ String SimplicialCubature::__str__(const String & ) const /* Method save() stores the object through the StorageManager */ void SimplicialCubature::save(Advocate & adv) const { - PersistentObject::save(adv); + IntegrationAlgorithmImplementation::save(adv); adv.saveAttribute("rule_", rule_); } /* Method load() reloads the object from the StorageManager */ void SimplicialCubature::load(Advocate & adv) { - PersistentObject::load(adv); + IntegrationAlgorithmImplementation::load(adv); adv.loadAttribute("rule_", rule_); } @@ -151,6 +152,16 @@ UnsignedInteger SimplicialCubature::getNodeNumber(const UnsignedInteger dimensio } +Point SimplicialCubature::integrate(const Function & function, + const Interval & interval) const +{ + const UnsignedInteger intervalsNumber = ResourceMap::GetAsUnsignedInteger("SimplicialCubature-MarginalDiscretizationIntervalsNumber"); + const Indices discretization(interval.getDimension(), intervalsNumber); + const IntervalMesher mesher(discretization); + const Mesh mesh(mesher.build(interval)); + return integrate(function, mesh); +} + /* Compute an approximation of \int_a^b f(x_1,\dots,x_n)dx_1\dotsdx_n, where [a,b] is an n-D interval. */ Point SimplicialCubature::integrate(const Function & F, const Mesh & mesh) const diff --git a/lib/src/Base/Algo/openturns/EnclosingSimplexMonotonic1D.hxx b/lib/src/Base/Algo/openturns/EnclosingSimplexMonotonic1D.hxx index 1a9d69ab0d..7f143ca4b4 100644 --- a/lib/src/Base/Algo/openturns/EnclosingSimplexMonotonic1D.hxx +++ b/lib/src/Base/Algo/openturns/EnclosingSimplexMonotonic1D.hxx @@ -70,10 +70,16 @@ public: /** String converter */ String __str__(const String & offset = "") const override; + /** Method save() stores the object through the StorageManager */ + void save(Advocate & adv) const override; + + /** Method load() reloads the object from the StorageManager */ + void load(Advocate & adv) override; + private: /** Flag telling whether vertices are sorted in ascending or descending order */ - Bool increasing_; + Bool increasing_ = true; } ; /* class EnclosingSimplexMonotonic1D */ diff --git a/lib/src/Base/Algo/openturns/RegularGridEnclosingSimplex.hxx b/lib/src/Base/Algo/openturns/RegularGridEnclosingSimplex.hxx index d7ae355468..49ff308e27 100644 --- a/lib/src/Base/Algo/openturns/RegularGridEnclosingSimplex.hxx +++ b/lib/src/Base/Algo/openturns/RegularGridEnclosingSimplex.hxx @@ -70,12 +70,18 @@ public: /** String converter */ String __str__(const String & offset = "") const override; + /** Method save() stores the object through the StorageManager */ + void save(Advocate & adv) const override; + + /** Method load() reloads the object from the StorageManager */ + void load(Advocate & adv) override; + private: /** Cached values taken from grid_ */ - Scalar start_; - UnsignedInteger N_; - Scalar step_; + Scalar start_ = 0.0; + UnsignedInteger N_ = 0.0; + Scalar step_ = 0.0; } ; /* class RegularGridEnclosingSimplex */ diff --git a/lib/src/Base/Algo/openturns/SimplicialCubature.hxx b/lib/src/Base/Algo/openturns/SimplicialCubature.hxx index a5a6cf6db8..e6676ace00 100644 --- a/lib/src/Base/Algo/openturns/SimplicialCubature.hxx +++ b/lib/src/Base/Algo/openturns/SimplicialCubature.hxx @@ -22,7 +22,6 @@ #define OPENTURNS_SIMPLICIALCUBATURE_HXX #include "openturns/IntegrationAlgorithmImplementation.hxx" -#include "openturns/IntegrationAlgorithm.hxx" #include "openturns/SpecFunc.hxx" BEGIN_NAMESPACE_OPENTURNS @@ -32,7 +31,7 @@ BEGIN_NAMESPACE_OPENTURNS */ class OT_API SimplicialCubature - : public PersistentObject + : public IntegrationAlgorithmImplementation { CLASSNAME @@ -49,6 +48,13 @@ public: */ Point integrate(const Function & function, const Mesh & mesh) const; + /** Compute an approximation of \int_{[a,b]}f(x)dx, where [a,b] + * is an n-D interval + */ + using IntegrationAlgorithmImplementation::integrate; + Point integrate(const Function & function, + const Interval & interval) const override; + // Integration rule accessor void setRule(const UnsignedInteger rule); UnsignedInteger getRule() const; diff --git a/lib/src/Base/Common/Os.cxx b/lib/src/Base/Common/Os.cxx index 77a735a3e2..4f2eb52de4 100644 --- a/lib/src/Base/Common/Os.cxx +++ b/lib/src/Base/Common/Os.cxx @@ -28,12 +28,10 @@ // include OTConfig that defines OPENTURNS_HAVE_XXX #include "openturns/OTconfig.hxx" -#ifdef OPENTURNS_HAVE_UNISTD_H -# include // for rmdir, unlink +#ifdef OPENTURNS_ENABLE_CXX17 +#include #endif -#include // for system(3) - #ifdef OPENTURNS_HAVE_SYS_TYPES_H # include // for stat #endif @@ -49,17 +47,12 @@ #ifdef _MSC_VER # include -# define MKDIR(p, mode) _mkdir(p) # if !defined(S_ISDIR) # define S_ISDIR(mode) (((mode) & S_IFDIR) != 0) # endif # if !defined(S_ISREG) # define S_ISREG(mode) (((mode) & S_IFREG) != 0) # endif -#elif defined(_WIN32) -# define MKDIR(p, mode) mkdir(p) -#else -# define MKDIR(p, mode) mkdir(p, mode) #endif BEGIN_NAMESPACE_OPENTURNS @@ -82,76 +75,6 @@ const char * Os::GetDirectoryListSeparator() #endif } -String Os::GetDeleteCommandOutput() -{ -#ifndef _WIN32 - return " > /dev/null 2>&1"; -#else - return " > NUL"; -#endif -} - -// Returns 0 if no error -int Os::ExecuteCommand(const String & command) -{ - int rc = -1; - LOGINFO( OSS() << "Execute command=" << command ); -#ifdef _WIN32 - if ( ResourceMap::GetAsBool("Os-CreateProcess")) - { - // Startup information - STARTUPINFO si; - ZeroMemory(&si, sizeof(si)); - si.cb = sizeof(si); - si.dwFlags = STARTF_USESHOWWINDOW; - si.wShowWindow = SW_HIDE; - - // Process information - PROCESS_INFORMATION pi; - ZeroMemory(&pi, sizeof(pi)); - - // Create the process - DWORD dwProcessFlags = 0; - char * cmd = strdup(command.c_str()); - const Bool processOk = CreateProcess(NULL, cmd, NULL, NULL, true, dwProcessFlags, NULL, NULL, &si, &pi) != 0; - free(cmd); - if ( processOk ) - { - // Wait for the external application to finish - DWORD waitRc = WaitForSingleObject(pi.hProcess, INFINITE); - if ( waitRc != WAIT_FAILED ) - { - DWORD exit_code = 0; - const Bool codeOk = GetExitCodeProcess(pi.hProcess, &exit_code) != 0; - if (codeOk) - { - rc = exit_code; - } - else - { - rc = GetLastError(); - } - } - - // Close everything - CloseHandle(pi.hProcess); - CloseHandle(pi.hThread); - } - else - { - rc = GetLastError(); - } - } // use create process - else -#endif - { - rc = system(command.c_str()); - } - LOGINFO( OSS() << "Return code=" << rc << " for command=" << command ); - return rc; -} - - void Os::Remove(const String& fileName) { if (!ResourceMap::GetAsBool("Os-RemoveFiles")) return; @@ -161,143 +84,26 @@ void Os::Remove(const String& fileName) } } -// Function helper for Os::MakeDirectory: replace backslash by slash -static void -convert_backslashes(String & path) -{ -#ifdef _WIN32 - const char* current_char = path.c_str(); - String::size_type pos = 0; - // On Windows, leading \\ is for network paths and must not be stripped - if (*current_char == '\\' && *(current_char + 1) == '\\') - { - pos = 2; - current_char += pos; - } - for ( ; *current_char != '\0'; ++pos, ++current_char ) - { - if (*current_char == '\\') path[pos] = '/'; - } -#else - (void) path; -#endif -} - Bool Os::IsDirectory(const String & fileName) { +#ifdef OPENTURNS_ENABLE_CXX17 + return std::filesystem::is_directory(std::filesystem::u8path(fileName)); +#else struct stat dir_stat; if(stat(fileName.c_str(), &dir_stat) != 0) return false; return S_ISDIR(dir_stat.st_mode); +#endif } Bool Os::IsFile(const String & fileName) { +#ifdef OPENTURNS_ENABLE_CXX17 + return std::filesystem::is_regular_file(std::filesystem::u8path(fileName)); +#else struct stat dir_stat; if(stat(fileName.c_str(), &dir_stat) != 0) return false; return S_ISREG(dir_stat.st_mode); -} - -// Returns 0 if no error -int Os::MakeDirectory(const String & path) -{ - if (path.empty()) return 1; - if (IsDirectory(path)) return 0; - - String slashPath(path); - convert_backslashes(slashPath); - - String::size_type pos = 0; - while((pos = slashPath.find('/', pos)) != String::npos) - { - String current_dir(path.substr(0, pos)); - const char * cpath = current_dir.c_str(); - if (!IsDirectory(current_dir) && (0 != MKDIR(cpath, 0777))) return 1; - pos++; - } - - return 0; -} - -#ifndef _WIN32 -static int deleteRegularFileOrDirectory(const char * path, - const struct stat *, - int typeflag, - struct FTW * ) -{ - int rc; - - switch (typeflag) - { - case FTW_DP: - rc = rmdir( path ); - if ( rc < 0 ) return 1; - break; - - case FTW_SL: - case FTW_SLN: - case FTW_F: - rc = unlink( path ); - if ( rc < 0 ) return 1; - break; - - } /* end switch */ - - return 0; -} -#endif /* !WIN32 */ - - - -// Delete a directory and its contents recursively. Returns 0 if no error -int Os::DeleteDirectory(const String & path) -{ - if (path.empty()) return 1; - if (!IsDirectory(path)) return 1; - - // Refuse to delete root directory (/) and current directory (.) - if (path == "/" || path == ".") return 1; - - const char * directory = path.c_str(); -#ifdef _WIN32 - if ( ((strlen( directory ) == 3) && (directory[1] == ':') && (directory[2] == '\\' || directory[2] == '/')) || - ((strlen( directory ) == 2) && (directory[1] == ':')) ) - { - // do not delete root directory - return 1; - } #endif - - int rc = 0; - -#ifndef _WIN32 - - rc = nftw(directory, deleteRegularFileOrDirectory, 20, FTW_DEPTH); - -#else /* WIN32 */ - - UnsignedInteger countdown = ResourceMap::GetAsUnsignedInteger("Os-DeleteTimeout"); - const String rmdirCmd("rmdir /Q /S \"" + path + "\"" + " > NUL 2>&1"); - Bool directoryExists = true; - - do - { - rc = system(rmdirCmd.c_str()); - - // check if directory still there (rmdir dos command always return 0) - directoryExists = IsDirectory(path); - if (directoryExists) - { - if (countdown == 0) return 1; - -- countdown; - } - Sleep(1000); - } - while (directoryExists); - -#endif /* WIN32 */ - - return rc; } - END_NAMESPACE_OPENTURNS diff --git a/lib/src/Base/Common/Path.cxx b/lib/src/Base/Common/Path.cxx index 9eb987739e..d5287c3933 100644 --- a/lib/src/Base/Common/Path.cxx +++ b/lib/src/Base/Common/Path.cxx @@ -21,19 +21,15 @@ #include // for std::vector #include // for std::string #include // for getenv -#include // for strcpy +#include // for strdup #ifdef _WIN32 #include // for ofstream #include "openturns/OTwindows.h" // for GetTempFileName, GetModuleFileName -#ifdef _MSC_VER -# include -#define mkdir(p) _mkdir(p) -#endif /* _MSC_VER */ -#endif /* WIN32 */ +#endif // Include OTConfig that defines OPENTURNS_HAVE_XXX -// It also defines INSTALL_PATH, SYSCONFIG_PATH, DATA_PATH, OPENTURNS_HOME_ENV_VAR +// It also defines INSTALL_PATH, SYSCONFIG_PATH, OPENTURNS_HOME_ENV_VAR #include "openturns/OTconfig.hxx" #include "openturns/OSS.hxx" @@ -42,12 +38,13 @@ #include "openturns/ResourceMap.hxx" #include "openturns/Os.hxx" #include "openturns/Log.hxx" + #ifdef OPENTURNS_HAVE_LIBGEN_H #include // for dirname #endif #ifdef OPENTURNS_HAVE_UNISTD_H -#include // for getpid, readlink, close +#include // for close #endif #ifndef INSTALL_PATH @@ -62,10 +59,6 @@ #error "SYSCONFIG_PATH is NOT defined. Check configuration." #endif -#ifndef DATA_PATH -#error "DATA_PATH is NOT defined. Check configuration." -#endif - #ifndef OPENTURNS_HOME_ENV_VAR #error "OPENTURNS_HOME_ENV_VAR is NOT defined. Check configuration." #endif @@ -302,132 +295,4 @@ FileName Path::FindFileByNameInDirectoryList(const FileName & name, } /* end findFileByNameInDirectoryList */ -/* - * escape backslash in filename - * ex: if filename = C:\windows\temp, return C:\\windows\\temp - */ -void Path::EscapeBackslash(FileName & filename) -{ - String backslash("\\"); - String::size_type loc = filename.find(backslash); - while(loc != String::npos) - { - // "\" at the last pos - if(loc == filename.size() - 1) - { - filename.insert(loc, backslash); - break; - } - // "\" in the middle - if(filename.at(loc + 1) != backslash[0]) - filename.insert(loc, backslash); - // else: no "\", or "\\" in the middle - loc = filename.find(backslash, loc + 2); - } -} - - -FileName Path::GetTemporaryDirectory() -{ - FileName tempDirectory; - - String tempStr(ResourceMap::GetAsString("Path-TemporaryDirectory")); -#ifndef _WIN32 - tempDirectory = tempStr; -#else - const char * tempEnv = getenv(tempStr.c_str()); - if (tempEnv) - { - // if temporary-directory is an env var, return the content of the env var. - tempDirectory = String(tempEnv); - } - else - { - // if not, just return the content of temporary-directory - tempDirectory = tempStr; - } -#endif - - return tempDirectory; -} - - -/* Build a temporary file name given a prefix */ -FileName Path::BuildTemporaryFileName(const FileName & prefix) -{ -#ifndef _WIN32 - const String fullPattern(GetTemporaryDirectory() + String(Os::GetDirectorySeparator()) + prefix + String("_XXXXXX")); - char * temporaryFileName = strdup(fullPattern.c_str()); - int fileDescriptor(mkstemp(temporaryFileName)); - close(fileDescriptor); - FileName result(temporaryFileName); - free(temporaryFileName); - return result; -#else - // get uniq name - char temporaryFileName[MAX_PATH]; - GetTempFileName(GetTemporaryDirectory().c_str(), // directory for tmp files - TEXT(prefix.c_str()), // temp file name prefix - 0, // create unique name - temporaryFileName); // buffer for name - // check temporary filename - if (!Os::IsFile(String(temporaryFileName))) - LOGERROR(OSS() << "Temporary file name " << temporaryFileName << " does NOT exists. Check your temporary directory."); - // add "/" to the directory - String slashedTemporaryFileName(temporaryFileName); - EscapeBackslash(slashedTemporaryFileName); - return slashedTemporaryFileName; -#endif -} - -/* Create a temporary directory. - */ -String Path::CreateTemporaryDirectory (const FileName & directoryPrefix) -{ - if (directoryPrefix.size() == 0) throw InvalidArgumentException(HERE) << "No prefix defined to create temporary directory"; - -#ifndef _WIN32 - String tempDir(GetTemporaryDirectory()); - tempDir += Os::GetDirectorySeparator(); - tempDir += directoryPrefix; - tempDir += "_XXXXXX"; - - char * tempDirName = (char *) calloc(tempDir.size() + 1, sizeof (char)); - strncpy(tempDirName, tempDir.c_str(), tempDir.size() + 1); - char *tempDirName_p = mkdtemp(tempDirName); - if ( ! tempDirName_p ) throw FileOpenException(HERE) << "Could not create temporary directory from template " << tempDir; - const String finalTempDirName(tempDirName); - free(tempDirName); -#else - char temporaryDirName[MAX_PATH]; - int ret = 0; - for (int retry = 10000; retry >= 0; --retry) - { - ret = GetTempFileName(GetTemporaryDirectory().c_str(), // directory for tmp files - TEXT((directoryPrefix + "abc").c_str()), // temp file name prefix (only 3 characters are used) - 0, // create unique name - temporaryDirName); // buffer for name - if (0 == ret) - { - ret = -1; - continue; - } - DeleteFile(temporaryDirName); - ret = mkdir(temporaryDirName); - if (0 == ret) break; - } - if (0 != ret) - { - LOGERROR(OSS() << "Can't create temporary directory."); - temporaryDirName[0] = '\0'; - } - - const String finalTempDirName(temporaryDirName); - -#endif - - return finalTempDirName; -} - - END_NAMESPACE_OPENTURNS diff --git a/lib/src/Base/Common/ResourceMap.cxx b/lib/src/Base/Common/ResourceMap.cxx index 1f95c75e7b..41d1839b76 100644 --- a/lib/src/Base/Common/ResourceMap.cxx +++ b/lib/src/Base/Common/ResourceMap.cxx @@ -19,10 +19,8 @@ * */ #include +#include #include "openturns/OTconfig.hxx" -#ifdef OPENTURNS_HAVE_UNISTD_H -#include // for sysconf -#endif #include "openturns/OSS.hxx" #include "openturns/ResourceMap.hxx" #include "openturns/Exception.hxx" @@ -621,16 +619,8 @@ void ResourceMap::loadConfigurationFile() /* Load the configuration defined at installation time */ void ResourceMap::loadDefaultConfiguration() { -#ifndef _WIN32 - addAsString("Path-TemporaryDirectory", "/tmp"); - addAsUnsignedInteger("TBB-ThreadsNumber", sysconf(_SC_NPROCESSORS_CONF)); -#else - addAsString("Path-TemporaryDirectory", "TEMP"); - UnsignedInteger numberOfProcessors = 0; - std::istringstream iss(getenv("NUMBER_OF_PROCESSORS")); - iss >> numberOfProcessors; - addAsUnsignedInteger("TBB-ThreadsNumber", numberOfProcessors); -#endif + // using physical cores numbers (logical/2) is faster in most situations + addAsUnsignedInteger("TBB-ThreadsNumber", std::max(std::thread::hardware_concurrency() / 2, 1u)); if (const char* env_num_threads = std::getenv("OPENTURNS_NUM_THREADS")) { try @@ -642,12 +632,10 @@ void ResourceMap::loadDefaultConfiguration() throw InternalException(HERE) << "OPENTURNS_NUM_THREADS must be an integer, got " << env_num_threads; } } - addAsUnsignedInteger("Cache-MaxSize", 1024); + addAsUnsignedInteger("Cache-MaxSize", 65536); // Os parameters - addAsBool("Os-CreateProcess", false); addAsBool("Os-RemoveFiles", true); - addAsUnsignedInteger("Os-DeleteTimeout", 2); // XMLStorageManager parameters addAsUnsignedInteger("XMLStorageManager-DefaultCompressionLevel", 0); @@ -1185,7 +1173,7 @@ void ResourceMap::loadDefaultConfiguration() addAsScalar("GeneralizedParetoFactory-MaximumRelativeError", 1.0e-10); addAsScalar("GeneralizedParetoFactory-MeanResidualLifeConfidenceLevel", 0.95); addAsScalar("GeneralizedParetoFactory-ThresholdStabilityConfidenceLevel", 0.95); - addAsUnsignedInteger("GeneralizedParetoFactory-MaximumEvaluationNumber", 1000); + addAsUnsignedInteger("GeneralizedParetoFactory-MaximumCallsNumber", 1000); addAsUnsignedInteger("GeneralizedParetoFactory-MeanResidualLifePointNumber", 100); addAsUnsignedInteger("GeneralizedParetoFactory-ThresholdStabilityPointNumber", 100); addAsUnsignedInteger("GeneralizedParetoFactory-SmallSize", 20); @@ -1212,6 +1200,7 @@ void ResourceMap::loadDefaultConfiguration() addAsScalar("KernelSmoothing-CutOffPlugin", 5.0); addAsScalar("KernelSmoothing-RelativePrecision", 1.0e-5); addAsScalar("KernelSmoothing-ResidualPrecision", 1.0e-10); + addAsScalar("KernelSmoothing-DefaultShiftScale", 1.0e-5); addAsUnsignedInteger("KernelSmoothing-BinNumber", 1024); addAsUnsignedInteger("KernelSmoothing-MaximumIteration", 50); addAsUnsignedInteger("KernelSmoothing-SmallSize", 250); @@ -1733,6 +1722,7 @@ void ResourceMap::loadDefaultConfiguration() addAsScalar("SimplicialCubature-DefaultMaximumRelativeError", 1.0e-5); addAsUnsignedInteger("SimplicialCubature-DefaultMaximumCallsNumber", 10000); addAsUnsignedInteger("SimplicialCubature-DefaultRule", 3); + addAsUnsignedInteger("SimplicialCubature-MarginalDiscretizationIntervalsNumber", 1); // SparseMethod parameters // addAsScalar("SparseMethod-ErrorThreshold", 1.0e-3); diff --git a/lib/src/Base/Common/openturns/Os.hxx b/lib/src/Base/Common/openturns/Os.hxx index 5b17d85b2e..a7d08be849 100644 --- a/lib/src/Base/Common/openturns/Os.hxx +++ b/lib/src/Base/Common/openturns/Os.hxx @@ -47,31 +47,11 @@ public: */ static const char * GetDirectoryListSeparator(); - /** - * Return the command that permit one to snub the output of a command. - */ - static String GetDeleteCommandOutput(); - /** * Remove a file. */ static void Remove(const String & fileName); - /** - * Create a directory. - */ - static int MakeDirectory(const String & fileName); - - /** - * Delete a directory recursively. - */ - static int DeleteDirectory(const String & path); - - /** - * Make a system call. Return 0 if no error. - */ - static int ExecuteCommand(const String & command); - static Bool IsDirectory(const String & fileName); static Bool IsFile(const String & fileName); diff --git a/lib/src/Base/Common/openturns/Path.hxx b/lib/src/Base/Common/openturns/Path.hxx index 755f95d3f2..1188bb815f 100644 --- a/lib/src/Base/Common/openturns/Path.hxx +++ b/lib/src/Base/Common/openturns/Path.hxx @@ -77,26 +77,6 @@ public: static FileName FindFileByNameInDirectoryList(const FileName & name, const DirectoryList & dirList); - /** Get the temporary directory set in the openturns conf file. - * - * On Windows, if temporary-directory is an env var, return the content of the env var, - * if not, just return the content of temporary-directory. - * This function is mostly useful on windows in order to get the TEMP env var. - */ - static FileName GetTemporaryDirectory(); - - /** Build a temporary file name given a prefix - * @result A new unique filename based on the prefix - */ - static FileName BuildTemporaryFileName(const FileName & prefix); - - /** Create a temporary directory. - */ - static FileName CreateTemporaryDirectory(const FileName & directoryPrefix); - - /** Escape backslash in file name */ - static void EscapeBackslash(FileName & filename); - /** Get the location of the OT shared library (at runtime) */ static FileName GetLibraryDirectory(); diff --git a/lib/src/Base/Common/openturns/Pointer.hxx b/lib/src/Base/Common/openturns/Pointer.hxx index 8640cbb11b..37721ace81 100644 --- a/lib/src/Base/Common/openturns/Pointer.hxx +++ b/lib/src/Base/Common/openturns/Pointer.hxx @@ -232,7 +232,7 @@ public: */ inline Bool unique() const { - return ptr_.unique(); + return ptr_.use_count() == 1; } /** diff --git a/lib/src/Base/Func/EnumerateFunction.cxx b/lib/src/Base/Func/EnumerateFunction.cxx index a4c495a65a..8620b82109 100644 --- a/lib/src/Base/Func/EnumerateFunction.cxx +++ b/lib/src/Base/Func/EnumerateFunction.cxx @@ -126,4 +126,14 @@ Indices EnumerateFunction::getUpperBound() const return getImplementation()->getUpperBound(); } +EnumerateFunction EnumerateFunction::getMarginal(const Indices & indices) const +{ + return getImplementation()->getMarginal(indices); +} + +EnumerateFunction EnumerateFunction::getMarginal(const UnsignedInteger i) const +{ + return getImplementation()->getMarginal(i); +} + END_NAMESPACE_OPENTURNS diff --git a/lib/src/Base/Func/EnumerateFunctionImplementation.cxx b/lib/src/Base/Func/EnumerateFunctionImplementation.cxx index 15c2f5ce8f..3ed7d21625 100644 --- a/lib/src/Base/Func/EnumerateFunctionImplementation.cxx +++ b/lib/src/Base/Func/EnumerateFunctionImplementation.cxx @@ -19,6 +19,7 @@ * */ #include +#include "openturns/EnumerateFunction.hxx" #include "openturns/EnumerateFunctionImplementation.hxx" #include "openturns/OSS.hxx" #include "openturns/PersistentObjectFactory.hxx" @@ -148,5 +149,16 @@ void EnumerateFunctionImplementation::load(Advocate & adv) upperBound_ = Indices(getDimension(), std::numeric_limits::max()); } +/* Returns the marginal enumerate function */ +EnumerateFunction EnumerateFunctionImplementation::getMarginal(const Indices &) const +{ + throw NotYetImplementedException(HERE) << "In EnumerateFunctionImplementation::getMarginal"; +} + +/* Returns the marginal enumerate function */ +EnumerateFunction EnumerateFunctionImplementation::getMarginal(const UnsignedInteger i) const +{ + return getMarginal(Indices({i})); +} END_NAMESPACE_OPENTURNS diff --git a/lib/src/Base/Func/HyperbolicAnisotropicEnumerateFunction.cxx b/lib/src/Base/Func/HyperbolicAnisotropicEnumerateFunction.cxx index fec931a8a6..b3a97698b8 100644 --- a/lib/src/Base/Func/HyperbolicAnisotropicEnumerateFunction.cxx +++ b/lib/src/Base/Func/HyperbolicAnisotropicEnumerateFunction.cxx @@ -19,6 +19,7 @@ * */ #include +#include "openturns/EnumerateFunction.hxx" #include "openturns/HyperbolicAnisotropicEnumerateFunction.hxx" #include "openturns/OSS.hxx" #include "openturns/PersistentObjectFactory.hxx" @@ -275,6 +276,21 @@ void HyperbolicAnisotropicEnumerateFunction::setUpperBound(const Indices & upper initialize(); } +/* The marginal enumerate function */ +EnumerateFunction HyperbolicAnisotropicEnumerateFunction::getMarginal(const Indices & indices) const +{ + const UnsignedInteger inputDimension = getDimension(); + if (!indices.check(inputDimension)) + { + throw InvalidArgumentException(HERE) << "Indices " << indices << "must not exceed dimension " << inputDimension; + } + const UnsignedInteger activeDimension = indices.getSize(); + Point weightMarginal(activeDimension); + for (UnsignedInteger i = 0; i < activeDimension; ++i) + weightMarginal[i] = weight_[indices[i]]; + const HyperbolicAnisotropicEnumerateFunction enumerateFunctionMarginal(weightMarginal, q_); + return enumerateFunctionMarginal; +} /* Method save() stores the object through the StorageManager */ void HyperbolicAnisotropicEnumerateFunction::save(Advocate & adv) const diff --git a/lib/src/Base/Func/LinearEnumerateFunction.cxx b/lib/src/Base/Func/LinearEnumerateFunction.cxx index 17710264f9..78f7b81bd1 100644 --- a/lib/src/Base/Func/LinearEnumerateFunction.cxx +++ b/lib/src/Base/Func/LinearEnumerateFunction.cxx @@ -18,6 +18,7 @@ * along with this library. If not, see . * */ +#include "openturns/EnumerateFunction.hxx" #include "openturns/LinearEnumerateFunction.hxx" #include "openturns/OSS.hxx" #include "openturns/PersistentObjectFactory.hxx" @@ -174,6 +175,19 @@ void LinearEnumerateFunction::setUpperBound(const Indices & /*upperBound*/) throw NotYetImplementedException(HERE) << " in LinearEnumerateFunction::setUpperBound"; } +/* The marginal enumerate function */ +EnumerateFunction LinearEnumerateFunction::getMarginal(const Indices & indices) const +{ + const UnsignedInteger inputDimension = getDimension(); + if (!indices.check(inputDimension)) + { + throw InvalidArgumentException(HERE) << "Indices " << indices << "must not exceed dimension " << inputDimension; + } + const UnsignedInteger activeDimension = indices.getSize(); + const LinearEnumerateFunction enumerateFunctionMarginal(activeDimension); + return enumerateFunctionMarginal; +} + /* Method save() stores the object through the StorageManager */ void LinearEnumerateFunction::save(Advocate & adv) const { diff --git a/lib/src/Base/Func/NormInfEnumerateFunction.cxx b/lib/src/Base/Func/NormInfEnumerateFunction.cxx index cb6b73bd11..c44f08ffb8 100644 --- a/lib/src/Base/Func/NormInfEnumerateFunction.cxx +++ b/lib/src/Base/Func/NormInfEnumerateFunction.cxx @@ -18,6 +18,7 @@ * along with this library. If not, see . * */ +#include "openturns/EnumerateFunction.hxx" #include "openturns/NormInfEnumerateFunction.hxx" #include "openturns/OSS.hxx" #include "openturns/PersistentObjectFactory.hxx" @@ -143,6 +144,19 @@ UnsignedInteger NormInfEnumerateFunction::getMaximumDegreeStrataIndex(const Unsi return maximumDegree / getDimension(); } +/* The marginal enumerate function */ +EnumerateFunction NormInfEnumerateFunction::getMarginal(const Indices & indices) const +{ + const UnsignedInteger inputDimension = getDimension(); + if (!indices.check(inputDimension)) + { + throw InvalidArgumentException(HERE) << "Indices " << indices << "must not exceed dimension " << inputDimension; + } + const UnsignedInteger activeDimension = indices.getSize(); + const NormInfEnumerateFunction enumerateFunctionMarginal(activeDimension); + return enumerateFunctionMarginal; +} + /* Method save() stores the object through the StorageManager */ void NormInfEnumerateFunction::save(Advocate & adv) const { diff --git a/lib/src/Base/Func/openturns/EnumerateFunction.hxx b/lib/src/Base/Func/openturns/EnumerateFunction.hxx index 44b4f55e2c..7e8a45b3fc 100644 --- a/lib/src/Base/Func/openturns/EnumerateFunction.hxx +++ b/lib/src/Base/Func/openturns/EnumerateFunction.hxx @@ -77,6 +77,12 @@ public: /** Basis size from degree */ UnsignedInteger getBasisSizeFromTotalDegree(const UnsignedInteger maximumDegree) const; + /** The marginal enumerate function */ + EnumerateFunction getMarginal(const Indices & indices) const; + + /** The marginal enumerate function */ + EnumerateFunction getMarginal(const UnsignedInteger i) const; + /** Dimension accessor */ void setDimension(const UnsignedInteger dimension); UnsignedInteger getDimension() const; diff --git a/lib/src/Base/Func/openturns/EnumerateFunctionImplementation.hxx b/lib/src/Base/Func/openturns/EnumerateFunctionImplementation.hxx index 6535de47e4..9a68636805 100644 --- a/lib/src/Base/Func/openturns/EnumerateFunctionImplementation.hxx +++ b/lib/src/Base/Func/openturns/EnumerateFunctionImplementation.hxx @@ -26,6 +26,9 @@ BEGIN_NAMESPACE_OPENTURNS +// Forward declaration +class EnumerateFunction; + /** * @class EnumerateFunctionImplementation * @@ -69,6 +72,12 @@ public: /** Basis size from total degree */ virtual UnsignedInteger getBasisSizeFromTotalDegree(const UnsignedInteger maximumDegree) const; + /** The marginal enumerate function */ + virtual EnumerateFunction getMarginal(const Indices & indices) const; + + /** The marginal enumerate function */ + virtual EnumerateFunction getMarginal(const UnsignedInteger i) const; + /** Dimension accessor */ void setDimension(const UnsignedInteger dimension); UnsignedInteger getDimension() const; diff --git a/lib/src/Base/Func/openturns/HyperbolicAnisotropicEnumerateFunction.hxx b/lib/src/Base/Func/openturns/HyperbolicAnisotropicEnumerateFunction.hxx index dff8b4e477..b2a141a8a7 100644 --- a/lib/src/Base/Func/openturns/HyperbolicAnisotropicEnumerateFunction.hxx +++ b/lib/src/Base/Func/openturns/HyperbolicAnisotropicEnumerateFunction.hxx @@ -87,6 +87,10 @@ public: /** Upper bound accessor */ void setUpperBound(const Indices & upperBound) override; + /** The marginal enumerate function */ + using EnumerateFunctionImplementation::getMarginal; + EnumerateFunction getMarginal(const Indices & indices) const override; + /** Method save() stores the object through the StorageManager */ void save(Advocate & adv) const override; diff --git a/lib/src/Base/Func/openturns/LinearEnumerateFunction.hxx b/lib/src/Base/Func/openturns/LinearEnumerateFunction.hxx index 979370947f..9c05a033ea 100644 --- a/lib/src/Base/Func/openturns/LinearEnumerateFunction.hxx +++ b/lib/src/Base/Func/openturns/LinearEnumerateFunction.hxx @@ -71,6 +71,10 @@ public: /** Upper bound accessor */ void setUpperBound(const Indices & upperBound) override; + /** The marginal enumerate function */ + using EnumerateFunctionImplementation::getMarginal; + EnumerateFunction getMarginal(const Indices & indices) const override; + /** Method save() stores the object through the StorageManager */ void save(Advocate & adv) const override; diff --git a/lib/src/Base/Func/openturns/NormInfEnumerateFunction.hxx b/lib/src/Base/Func/openturns/NormInfEnumerateFunction.hxx index 3859e80bc4..68596df7c1 100644 --- a/lib/src/Base/Func/openturns/NormInfEnumerateFunction.hxx +++ b/lib/src/Base/Func/openturns/NormInfEnumerateFunction.hxx @@ -61,6 +61,10 @@ public: /** The index of the strata of degree max <= maximumDegree */ UnsignedInteger getMaximumDegreeStrataIndex(const UnsignedInteger maximumDegree) const override; + /** The marginal enumerate function */ + using EnumerateFunctionImplementation::getMarginal; + EnumerateFunction getMarginal(const Indices & indices) const override; + /** Method save() stores the object through the StorageManager */ void save(Advocate & adv) const override; diff --git a/lib/src/Base/Geom/LevelSetMesher.cxx b/lib/src/Base/Geom/LevelSetMesher.cxx index 79c5bc3c37..c575cb53a1 100644 --- a/lib/src/Base/Geom/LevelSetMesher.cxx +++ b/lib/src/Base/Geom/LevelSetMesher.cxx @@ -246,6 +246,8 @@ Mesh LevelSetMesher::build(const LevelSet & levelSet, try { solver.run(); + if (!solver.getResult().getOptimalPoint().getDimension()) + throw InvalidArgumentException(HERE) << "no feasible point"; movedVertices.add(currentVertex + solver.getResult().getOptimalPoint()); } catch(...) diff --git a/lib/src/Base/Optim/Bonmin.cxx b/lib/src/Base/Optim/Bonmin.cxx index c17385aaf8..3b237d5ac5 100644 --- a/lib/src/Base/Optim/Bonmin.cxx +++ b/lib/src/Base/Optim/Bonmin.cxx @@ -165,10 +165,7 @@ void Bonmin::run() for (UnsignedInteger i = 0; i < algos.getSize(); ++ i) if (!app.options()->SetIntegerValue("bonmin." + algos[i] + "_log_level", 0)) throw InvalidArgumentException(HERE) << "Bonmin: Invalid parameter for bonmin." << algos[i] << "_log_level"; - if (getMaximumConstraintError() > 0.0) - app.options()->SetNumericValue("constr_viol_tol", getMaximumConstraintError()); - else - app.options()->SetNumericValue("constr_viol_tol", SpecFunc::MinScalar); + app.options()->SetNumericValue("constr_viol_tol", std::max(getMaximumConstraintError(), SpecFunc::MinScalar)); app.options()->SetNumericValue("bound_relax_factor", 0.0); GetOptionsFromResourceMap(app.options()); diff --git a/lib/src/Base/Optim/Cobyla.cxx b/lib/src/Base/Optim/Cobyla.cxx index f79c3f0f76..3e34d3895b 100644 --- a/lib/src/Base/Optim/Cobyla.cxx +++ b/lib/src/Base/Optim/Cobyla.cxx @@ -212,6 +212,7 @@ int Cobyla::ComputeObjectiveAndConstraint(int n, /* Convert the input vector to Point */ Point inP(n); std::copy(x, x + n, inP.begin()); + Point inClip(inP); const UnsignedInteger nbIneqConst = problem.getInequalityConstraint().getOutputDimension(); const UnsignedInteger nbEqConst = problem.getEqualityConstraint().getOutputDimension(); @@ -226,7 +227,6 @@ int Cobyla::ComputeObjectiveAndConstraint(int n, throw InvalidArgumentException(HERE) << "Cobyla got a nan/inf input value"; // evaluate the function on the clipped point (still penalized if outside the bounds) - Point inClip(inP); if (problem.hasBounds()) { const Point lowerBound(problem.getBounds().getLowerBound()); @@ -235,9 +235,9 @@ int Cobyla::ComputeObjectiveAndConstraint(int n, for (UnsignedInteger i = 0; i < inP.getDimension(); ++ i) { if (problem.getBounds().getFiniteLowerBound()[i]) - inClip[i] = std::max(inP[i], lowerBound[i] - maximumConstraintError); + inClip[i] = std::max(inClip[i], lowerBound[i] - maximumConstraintError); if (problem.getBounds().getFiniteUpperBound()[i]) - inClip[i] = std::min(inP[i], upperBound[i] + maximumConstraintError); + inClip[i] = std::min(inClip[i], upperBound[i] + maximumConstraintError); } } outP = problem.getObjective().operator()(inClip); @@ -266,7 +266,7 @@ int Cobyla::ComputeObjectiveAndConstraint(int n, /* Compute the inequality constraints at inP */ if (problem.hasInequalityConstraint()) { - const Point constraintInequalityValue(problem.getInequalityConstraint().operator()(inP)); + const Point constraintInequalityValue(problem.getInequalityConstraint().operator()(inClip)); algorithm->inequalityConstraintHistory_.add(constraintInequalityValue); for(UnsignedInteger index = 0; index < nbIneqConst; ++index) constraintValue[index + shift] = constraintInequalityValue[index]; shift += nbIneqConst; @@ -275,7 +275,7 @@ int Cobyla::ComputeObjectiveAndConstraint(int n, /* Compute the equality constraints at inP */ if (problem.hasEqualityConstraint()) { - const Point constraintEqualityValue = problem.getEqualityConstraint().operator()(inP); + const Point constraintEqualityValue = problem.getEqualityConstraint().operator()(inClip); algorithm->equalityConstraintHistory_.add(constraintEqualityValue); for(UnsignedInteger index = 0; index < nbEqConst; ++index) constraintValue[index + shift] = constraintEqualityValue[index] + algorithm->getMaximumConstraintError(); shift += nbEqConst; diff --git a/lib/src/Base/Optim/MultiStart.cxx b/lib/src/Base/Optim/MultiStart.cxx index 8d5857164c..0b8e68529c 100644 --- a/lib/src/Base/Optim/MultiStart.cxx +++ b/lib/src/Base/Optim/MultiStart.cxx @@ -116,8 +116,10 @@ void MultiStart::run() try { solver.run(); - ++ successNumber; const OptimizationResult result(solver.getResult()); + if (!result.getOptimalPoint().getDimension()) + throw InvalidArgumentException(HERE) << "no feasible point"; + ++ successNumber; LOGDEBUG(OSS() << "Local search succeeded with " << result.getStatusMessage()); if (keepResults_) resultCollection_.add(result); diff --git a/lib/src/Base/Optim/OptimizationAlgorithmImplementation.cxx b/lib/src/Base/Optim/OptimizationAlgorithmImplementation.cxx index b3c5f11d5d..71ea14788b 100644 --- a/lib/src/Base/Optim/OptimizationAlgorithmImplementation.cxx +++ b/lib/src/Base/Optim/OptimizationAlgorithmImplementation.cxx @@ -355,7 +355,10 @@ void OptimizationAlgorithmImplementation::setResultFromEvaluationHistory( if (!result_.getOptimalPoint().getDimension()) { result_.setStatus(OptimizationResult::FAILURE); - throw InvalidArgumentException(HERE) << "no feasible point found during optimization"; + if (checkStatus_) + throw InvalidArgumentException(HERE) << "no feasible point found during optimization"; + else + LOGWARN(OSS() << "no feasible point found during optimization"); } result_.setCallsNumber(size); } diff --git a/lib/src/Base/Optim/OptimizationResult.cxx b/lib/src/Base/Optim/OptimizationResult.cxx index 6285e5ca78..b61a171ec1 100644 --- a/lib/src/Base/Optim/OptimizationResult.cxx +++ b/lib/src/Base/Optim/OptimizationResult.cxx @@ -382,38 +382,38 @@ Graph OptimizationResult::drawErrorHistory() const Graph result("Error history", iterationNumber_ > 0 ? "Iteration number" : "Evaluation number", "Error value", true, "topright", 1.0, GraphImplementation::LOGY); result.setGrid(true); result.setGridColor("black"); -// create a sample with the iteration number to be plotted as x data + // create a sample with the iteration number to be plotted as x data const UnsignedInteger size = getAbsoluteErrorHistory().getSize(); { Sample data(getAbsoluteErrorHistory()); for (UnsignedInteger i = 0; i < size; ++i) if (data(i, 0) <= 0.0) data(i, 0) = SpecFunc::ScalarEpsilon; - Curve absoluteErrorCurve( data, "absolute error" ); + Curve absoluteErrorCurve(data, "absolute error"); absoluteErrorCurve.setLegend("absolute error"); result.add( absoluteErrorCurve ); } -// Relative error + // Relative error { Sample data(getRelativeErrorHistory()); for (UnsignedInteger i = 0; i < size; ++i) if (data(i, 0) <= 0.0) data(i, 0) = SpecFunc::ScalarEpsilon; - Curve relativeErrorCurve( data, "relative error" ); + Curve relativeErrorCurve(data, "relative error"); relativeErrorCurve.setLegend("relative error"); result.add( relativeErrorCurve ); } -// Residual error + // Residual error { Sample data(getResidualErrorHistory()); for (UnsignedInteger i = 0; i < size; ++i) if (data(i, 0) <= 0.0) data(i, 0) = SpecFunc::ScalarEpsilon; - Curve residualErrorCurve( data, "residual error" ); + Curve residualErrorCurve(data, "residual error"); residualErrorCurve.setLegend("residual error"); result.add( residualErrorCurve ); } -// Constraint error + // Constraint error { Sample data(getConstraintErrorHistory()); for (UnsignedInteger i = 0; i < size; ++i) if (data(i, 0) <= 0.0) data(i, 0) = SpecFunc::ScalarEpsilon; - Curve constraintErrorCurve( data, "constraint error" ); + Curve constraintErrorCurve(data, "constraint error"); constraintErrorCurve.setLegend("constraint error"); - result.add( constraintErrorCurve ); + result.add(constraintErrorCurve); } result.setYMargin(0.0);// tighten the Y axis return result; @@ -424,6 +424,8 @@ Graph OptimizationResult::drawOptimalValueHistory() const { if (getProblem().getObjective().getOutputDimension() > 1) throw NotYetImplementedException(HERE) << "drawOptimalValueHistory is not available for multi-objective"; + if (!getOptimalPoint().getDimension()) + throw InvalidDimensionException(HERE) << "drawOptimalValueHistory cannot be called without feasible point"; Graph result("Optimal value history", iterationNumber_ > 0 ? "Iteration number" : "Evaluation number", "Optimal value", true, "topright", 1.0); result.setGrid(true); result.setGridColor("black"); diff --git a/lib/src/Base/Optim/Pagmo.cxx b/lib/src/Base/Optim/Pagmo.cxx index 4fb0d61cf8..2adb4f7acf 100644 --- a/lib/src/Base/Optim/Pagmo.cxx +++ b/lib/src/Base/Optim/Pagmo.cxx @@ -45,7 +45,8 @@ #endif #include #include -#if (PAGMO_VERSION_MAJOR * 1000 + PAGMO_VERSION_MINOR) >= 2019 +#define PAGMO_VERSION_NR PAGMO_VERSION_MAJOR * 100000 + PAGMO_VERSION_MINOR * 100 + PAGMO_VERSION_PATCH +#if PAGMO_VERSION_NR >= 201900 #include #endif #include @@ -63,7 +64,9 @@ struct PagmoProblem { PagmoProblem() {}; - PagmoProblem(const Pagmo *algorithm, Sample *evaluationInputHistory, Sample *evaluationOutputHistory) + PagmoProblem(const Pagmo *algorithm, + Sample *evaluationInputHistory, + Sample *evaluationOutputHistory) : algorithm_(algorithm) , evaluationInputHistory_(evaluationInputHistory) , evaluationOutputHistory_(evaluationOutputHistory) @@ -109,8 +112,8 @@ struct PagmoProblem pagmo::vector_double fitness(const pagmo::vector_double & inv) const { const Point inP(renumber(Point(inv.begin(), inv.end()))); - evaluationInputHistory_->add(inP); Point outP(algorithm_->getProblem().getObjective()(inP)); + evaluationInputHistory_->add(inP); evaluationOutputHistory_->add(outP); for (UnsignedInteger i = 0; i < outP.getDimension(); ++ i) if (!algorithm_->getProblem().isMinimization(i)) @@ -129,7 +132,7 @@ struct PagmoProblem if (algorithm_->progressCallback_.first) { const UnsignedInteger callsNumber = evaluationInputHistory_->getSize(); - algorithm_->progressCallback_.first((100.0 * callsNumber) / (algorithm_->getStartingSample().getSize() * algorithm_->getMaximumIterationNumber()), algorithm_->progressCallback_.second); + algorithm_->progressCallback_.first((100.0 * callsNumber) / (algorithm_->getStartingSample().getSize() * (algorithm_->getMaximumIterationNumber() + 1)), algorithm_->progressCallback_.second); } if (algorithm_->stopCallback_.first && algorithm_->stopCallback_.first(algorithm_->stopCallback_.second)) throw InterruptionException(HERE) << "User stopped optimization"; @@ -176,6 +179,7 @@ struct PagmoProblem const UnsignedInteger blockSize = algorithm_->getBlockSize(); const UnsignedInteger size = xs.size() / inputDimension; const UnsignedInteger blockNumber = static_cast(ceil(1.0 * size / blockSize)); + UnsignedInteger totalDimension = outputDimension; if (problem.hasEqualityConstraint()) totalDimension += problem.getEqualityConstraint().getOutputDimension(); @@ -192,13 +196,14 @@ struct PagmoProblem const Point xsi(xs.begin() + offset + i * inputDimension, xs.begin() + offset + (i + 1) * inputDimension); inSb[i] = renumber(xsi); } - evaluationInputHistory_->add(inSb); Sample outSb(problem.getObjective()(inSb)); + evaluationInputHistory_->add(inSb); evaluationOutputHistory_->add(outSb); for (UnsignedInteger i = 0; i < effectiveBlockSize; ++ i) for (UnsignedInteger j = 0; j < outputDimension; ++ j) if (!problem.isMinimization(j)) outSb(i, j) *= -1.0; + if (problem.hasEqualityConstraint()) outSb.stack(problem.getEqualityConstraint()(inSb)); if (problem.hasInequalityConstraint()) @@ -215,7 +220,7 @@ struct PagmoProblem if (algorithm_->progressCallback_.first) { const UnsignedInteger callsNumber = evaluationInputHistory_->getSize(); - algorithm_->progressCallback_.first((100.0 * callsNumber) / (algorithm_->getStartingSample().getSize() * algorithm_->getMaximumIterationNumber()), algorithm_->progressCallback_.second); + algorithm_->progressCallback_.first((100.0 * callsNumber) / (algorithm_->getStartingSample().getSize() * (algorithm_->getMaximumIterationNumber() + 1)), algorithm_->progressCallback_.second); } if (algorithm_->stopCallback_.first && algorithm_->stopCallback_.first(algorithm_->stopCallback_.second)) throw InterruptionException(HERE) << "User stopped optimization"; @@ -293,7 +298,7 @@ void Pagmo::checkProblem(const OptimizationProblem & problem) const if (problem.hasResidualFunction() || problem.hasLevelFunction()) throw InvalidArgumentException(HERE) << "Pagmo does not support least squares or nearest point problems"; const Description multiObjectiveAgorithms = {"nsga2", "moead", -#if (PAGMO_VERSION_MAJOR * 1000 + PAGMO_VERSION_MINOR) >= 2019 +#if PAGMO_VERSION_NR >= 201900 "moead_gen", #endif "mhaco", "nspso" @@ -314,13 +319,15 @@ void Pagmo::checkProblem(const OptimizationProblem & problem) const void Pagmo::run() { - if (!startingSample_.getSize()) + Sample startingSample(getStartingSample()); + UnsignedInteger size = startingSample.getSize(); + if (!size) throw InvalidArgumentException(HERE) << "Starting sample is empty"; - if (startingSample_.getDimension() != getProblem().getDimension()) + if (startingSample.getDimension() != getProblem().getDimension()) throw InvalidArgumentException(HERE) << "Starting sample dimension does not match problem dimension"; - for (UnsignedInteger i = 0; i < startingSample_.getSize(); ++ i) + for (UnsignedInteger i = 0; i < size; ++ i) { - const Point inP(startingSample_[i]); + const Point inP(startingSample[i]); if (!getProblem().getBounds().contains(inP)) LOGWARN(OSS() << "Starting point " << i << " lies outside bounds"); if (!getProblem().isContinuous()) @@ -345,61 +352,16 @@ void Pagmo::run() pagmo::problem prob(pproblem); const pagmo::vector_double ctol(prob.get_nc(), getMaximumConstraintError()); prob.set_c_tol(ctol); - const Description constrainedAgorithms = {"gaco", "ihs"}; - Bool emulatedConstraints = false; - if ((getProblem().hasInequalityConstraint() || getProblem().hasEqualityConstraint()) && !constrainedAgorithms.contains(getAlgorithmName())) + + // most algorithms do not support constraints but they can be emulated (by penalization for example) + const Bool emulatedConstraints = (getProblem().hasInequalityConstraint() || getProblem().hasEqualityConstraint()) + && !Description({"gaco", "ihs"}).contains(getAlgorithmName()); + if (emulatedConstraints) { - emulatedConstraints = true; const String unconstrainMethod = ResourceMap::GetAsString("Pagmo-UnconstrainMethod"); prob = pagmo::unconstrain(prob, unconstrainMethod); } - pagmo::population pop(prob, 0, 0); - // nsga2 needs the population size to be a multiple of 4 - UnsignedInteger populationSize = startingSample_.getSize(); - if ((algoName_ == "nsga2") && (populationSize % 4)) - { - LOGINFO(OSS() << "Pagmo: must drop the last " << (populationSize % 4) << " points of the initial population for NSGA2 as the size=" << populationSize << " is not a multiple of 4"); - populationSize = 4 * (populationSize / 4); - } - // with mhaco starting population must satisfy constraints - if ((algoName_ == "mhaco") && (getProblem().hasInequalityConstraint() || getProblem().hasEqualityConstraint())) - { - Sample startingSampleConstrained(0, getProblem().getDimension()); - Sample ineqOutput; - if (getProblem().hasInequalityConstraint()) - ineqOutput = getProblem().getInequalityConstraint()(startingSample_); - Sample eqOutput; - if (getProblem().hasEqualityConstraint()) - eqOutput = getProblem().getEqualityConstraint()(startingSample_); - for (UnsignedInteger i = 0; i < populationSize; ++ i) - { - Bool ok = true; - if (getProblem().hasInequalityConstraint()) - for (UnsignedInteger j = 0; j < ineqOutput.getDimension(); ++ j) - ok = ok && (ineqOutput(i, j) >= -getMaximumConstraintError()); - if (getProblem().hasEqualityConstraint()) - for (UnsignedInteger j = 0; j < eqOutput.getDimension(); ++ j) - ok = ok && (std::abs(eqOutput(i, j)) <= getMaximumConstraintError()); - if (ok) - startingSampleConstrained.add(startingSample_[i]); - } - if (!startingSampleConstrained.getSize()) - throw InvalidArgumentException(HERE) << "No point in starting population satisfies constraints"; - if (startingSampleConstrained.getSize() < populationSize) - { - const RandomGenerator::UnsignedIntegerCollection selection(RandomGenerator::IntegerGenerate(populationSize, startingSampleConstrained.getSize())); - const Indices indices(selection.begin(), selection.end()); - startingSample_ = startingSampleConstrained.select(indices); - LOGINFO(OSS() << "Pagmo: Initial population bootstrapped to satisfy constraints"); - } - } - for (UnsignedInteger i = 0; i < populationSize; ++ i) - { - const Point inP(startingSample_[i]); - pagmo::vector_double x(pproblem.renumber(inP).toStdVector()); - pop.push_back(x); - } pagmo::algorithm algo; if (algoName_ == "gaco") { @@ -414,10 +376,15 @@ void Pagmo::run() const Scalar focus = ResourceMap::GetAsScalar("Pagmo-gaco-focus"); const Bool memory = ResourceMap::GetAsBool("Pagmo-memory"); if (!memory) - ker = std::min(ker, populationSize); + ker = std::min(ker, size); pagmo::gaco algorithm_impl(getMaximumIterationNumber(), ker, q, oracle, acc, threshold, n_gen_mark, impstop, getMaximumCallsNumber(), focus, memory); +#if PAGMO_VERSION_NR >= 201901 + // requires https://github.com/esa/pagmo2/pull/575 + algorithm_impl.set_bfe(pagmo::bfe{}); +#else if (!emulatedConstraints) algorithm_impl.set_bfe(pagmo::bfe{}); +#endif algo = algorithm_impl; } else if (algoName_ == "de") @@ -483,8 +450,13 @@ void Pagmo::run() const UnsignedInteger neighb_param = ResourceMap::GetAsUnsignedInteger("Pagmo-pso-neighb_param"); const Bool memory = ResourceMap::GetAsBool("Pagmo-memory"); pagmo::pso_gen algorithm_impl(getMaximumIterationNumber(), omega, eta1, eta2, max_vel, variant, neighb_type, neighb_param, memory); +#if PAGMO_VERSION_NR >= 201901 + // requires https://github.com/esa/pagmo2/pull/575 + algorithm_impl.set_bfe(pagmo::bfe{}); +#else if (!emulatedConstraints) algorithm_impl.set_bfe(pagmo::bfe{}); +#endif algo = algorithm_impl; } else if (algoName_ == "sea") @@ -532,7 +504,16 @@ void Pagmo::run() const Scalar cmu = ResourceMap::GetAsScalar("Pagmo-cmaes-cmu"); const Scalar sigma0 = ResourceMap::GetAsScalar("Pagmo-cmaes-sigma0"); const Bool memory = ResourceMap::GetAsBool("Pagmo-memory"); - algo = pagmo::cmaes(getMaximumIterationNumber(), cc, cs, c1, cmu, sigma0, getMaximumResidualError(), getMaximumAbsoluteError(), memory, getProblem().hasBounds()); + const Bool force_bounds = getProblem().hasBounds(); + pagmo::cmaes algorithm_impl(getMaximumIterationNumber(), cc, cs, c1, cmu, sigma0, getMaximumResidualError(), getMaximumAbsoluteError(), memory, force_bounds); +#if PAGMO_VERSION_NR >= 201901 + // requires https://github.com/esa/pagmo2/pull/575 + algorithm_impl.set_bfe(pagmo::bfe{}); +#else + if (!emulatedConstraints) + algorithm_impl.set_bfe(pagmo::bfe{}); +#endif + algo = algorithm_impl; } else if (algoName_ == "xnes") { @@ -542,19 +523,33 @@ void Pagmo::run() const Scalar eta_b = ResourceMap::GetAsScalar("Pagmo-xnes-eta_b"); const Scalar sigma0 = ResourceMap::GetAsScalar("Pagmo-xnes-sigma0"); const Bool memory = ResourceMap::GetAsBool("Pagmo-memory"); - algo = pagmo::xnes(getMaximumIterationNumber(), eta_mu, eta_sigma, eta_b, sigma0, getMaximumResidualError(), getMaximumAbsoluteError(), memory, getProblem().hasBounds()); + const Bool force_bounds = getProblem().hasBounds(); + algo = pagmo::xnes(getMaximumIterationNumber(), eta_mu, eta_sigma, eta_b, sigma0, getMaximumResidualError(), getMaximumAbsoluteError(), memory, force_bounds); } #endif else if (algoName_ == "nsga2") { + const UnsignedInteger reminder = size % 4; + if (reminder) + { + LOGINFO(OSS() << "Pagmo: must drop the last " << reminder << " points of the initial population for NSGA2 as the size (" << size << ") is not a multiple of 4"); + size -= reminder; + startingSample.split(size); + } + // nsga2(unsigned gen = 1u, double cr = 0.95, double eta_c = 10., double m = 0.01, double eta_m = 50., unsigned seed = pagmo::random_device::next()) const Scalar cr = ResourceMap::GetAsScalar("Pagmo-nsga2-cr"); const Scalar eta_c = ResourceMap::GetAsScalar("Pagmo-nsga2-eta_c"); const Scalar m = ResourceMap::GetAsScalar("Pagmo-nsga2-m"); const Scalar eta_m = ResourceMap::GetAsScalar("Pagmo-nsga2-eta_m"); pagmo::nsga2 algorithm_impl(getMaximumIterationNumber(), cr, eta_c, m, eta_m); +#if PAGMO_VERSION_NR >= 201901 + // requires https://github.com/esa/pagmo2/pull/575 + algorithm_impl.set_bfe(pagmo::bfe{}); +#else if (!emulatedConstraints) algorithm_impl.set_bfe(pagmo::bfe{}); +#endif algo = algorithm_impl; } else if (algoName_ == "moead") @@ -571,7 +566,7 @@ void Pagmo::run() const Bool preserve_diversity = ResourceMap::GetAsBool("Pagmo-moead-preserve_diversity"); algo = pagmo::moead(getMaximumIterationNumber(), weight_generation, decomposition, neighbours, CR, F, eta_m, realb, limit, preserve_diversity); } -#if (PAGMO_VERSION_MAJOR * 1000 + PAGMO_VERSION_MINOR) >= 2019 +#if PAGMO_VERSION_NR >= 201900 else if (algoName_ == "moead_gen") { // moead_gen(unsigned gen = 1u, std::string weight_generation = "grid", std::string decomposition = "tchebycheff", population::size_type neighbours = 20u, double CR = 1.0, double F = 0.5, double eta_m = 20., double realb = 0.9, unsigned limit = 2u, bool preserve_diversity = true, unsigned seed = pagmo::random_device::next()) @@ -584,11 +579,52 @@ void Pagmo::run() const Scalar realb = ResourceMap::GetAsScalar("Pagmo-moead-realb"); const UnsignedInteger limit = ResourceMap::GetAsUnsignedInteger("Pagmo-moead-limit"); const Bool preserve_diversity = ResourceMap::GetAsBool("Pagmo-moead-preserve_diversity"); - algo = pagmo::moead_gen(getMaximumIterationNumber(), weight_generation, decomposition, neighbours, CR, F, eta_m, realb, limit, preserve_diversity); + pagmo::moead_gen algorithm_impl(getMaximumIterationNumber(), weight_generation, decomposition, neighbours, CR, F, eta_m, realb, limit, preserve_diversity); +#if PAGMO_VERSION_NR >= 201901 + // requires https://github.com/esa/pagmo2/pull/575 + algorithm_impl.set_bfe(pagmo::bfe{}); +#else + if (!emulatedConstraints) + algorithm_impl.set_bfe(pagmo::bfe{}); +#endif + algo = algorithm_impl; } #endif else if (algoName_ == "mhaco") { + // starting population must satisfy constraints + if (getProblem().hasInequalityConstraint() || getProblem().hasEqualityConstraint()) + { + Sample startingSampleConstrained(0, getProblem().getDimension()); + Sample ineqOutput; + if (getProblem().hasInequalityConstraint()) + ineqOutput = getProblem().getInequalityConstraint()(startingSample); + Sample eqOutput; + if (getProblem().hasEqualityConstraint()) + eqOutput = getProblem().getEqualityConstraint()(startingSample); + for (UnsignedInteger i = 0; i < size; ++ i) + { + Bool ok = true; + if (getProblem().hasInequalityConstraint()) + for (UnsignedInteger j = 0; j < ineqOutput.getDimension(); ++ j) + ok = ok && (ineqOutput(i, j) >= -getMaximumConstraintError()); + if (getProblem().hasEqualityConstraint()) + for (UnsignedInteger j = 0; j < eqOutput.getDimension(); ++ j) + ok = ok && (std::abs(eqOutput(i, j)) <= getMaximumConstraintError()); + if (ok) + startingSampleConstrained.add(startingSample[i]); + } + if (!startingSampleConstrained.getSize()) + throw InvalidArgumentException(HERE) << "No point in starting population satisfies constraints"; + if (startingSampleConstrained.getSize() < size) + { + const RandomGenerator::UnsignedIntegerCollection selection(RandomGenerator::IntegerGenerate(size, startingSampleConstrained.getSize())); + const Indices indices(selection.begin(), selection.end()); + startingSample = startingSampleConstrained.select(indices); + LOGINFO(OSS() << "Pagmo: Initial population bootstrapped to satisfy constraints"); + } + } + // maco(unsigned gen = 100u, unsigned ker = 63u, double q = 1.0, unsigned threshold = 1u, unsigned n_gen_mark = 7u, unsigned evalstop = 100000u, double focus = 0., bool memory = false, unsigned seed = pagmo::random_device::next()) UnsignedInteger ker = ResourceMap::GetAsUnsignedInteger("Pagmo-mhaco-ker"); const Scalar q = ResourceMap::GetAsScalar("Pagmo-mhaco-q"); @@ -597,10 +633,15 @@ void Pagmo::run() const Scalar focus = ResourceMap::GetAsScalar("Pagmo-mhaco-focus"); const Bool memory = ResourceMap::GetAsBool("Pagmo-memory"); if (!memory) - ker = std::min(ker, populationSize); + ker = std::min(ker, size); pagmo::maco algorithm_impl(getMaximumIterationNumber(), ker, q, threshold, n_gen_mark, getMaximumCallsNumber(), focus, memory); +#if PAGMO_VERSION_NR >= 201901 + // requires https://github.com/esa/pagmo2/pull/575 + algorithm_impl.set_bfe(pagmo::bfe{}); +#else if (!emulatedConstraints) algorithm_impl.set_bfe(pagmo::bfe{}); +#endif algo = algorithm_impl; } else if (algoName_ == "nspso") @@ -615,8 +656,13 @@ void Pagmo::run() const String diversity_mechanism = ResourceMap::GetAsString("Pagmo-nspso-diversity_mechanism"); const Bool memory = ResourceMap::GetAsBool("Pagmo-memory"); pagmo::nspso algorithm_impl(getMaximumIterationNumber(), omega, c1, c2, chi, v_coeff, leader_selection_range, diversity_mechanism, memory); +#if PAGMO_VERSION_NR >= 201901 + // requires https://github.com/esa/pagmo2/pull/575 + algorithm_impl.set_bfe(pagmo::bfe{}); +#else if (!emulatedConstraints) algorithm_impl.set_bfe(pagmo::bfe{}); +#endif algo = algorithm_impl; } else @@ -624,7 +670,44 @@ void Pagmo::run() algo.set_verbosity(Log::HasDebug()); algo.set_seed(seed_); + // evaluate initial population + pagmo::population pop(prob); + // requires https://github.com/esa/pagmo2/pull/575 +#if PAGMO_VERSION_NR >= 201901 + const OptimizationProblem problem(getProblem()); + const UnsignedInteger inputDimension = problem.getObjective().getInputDimension(); + const UnsignedInteger blockSize = getBlockSize(); + const UnsignedInteger blockNumber = static_cast(ceil(1.0 * size / blockSize)); + for (UnsignedInteger outerSampling = 0; outerSampling < blockNumber; ++ outerSampling) + { + const UnsignedInteger effectiveBlockSize = ((outerSampling == (blockNumber - 1)) && (size % blockSize)) ? (size % blockSize) : blockSize; + Sample inSb(effectiveBlockSize, inputDimension); + for (UnsignedInteger i = 0; i < effectiveBlockSize; ++ i) + inSb[i] = startingSample[i + outerSampling * blockSize]; + const pagmo::vector_double inV(inSb.getImplementation()->getData().toStdVector()); + const pagmo::vector_double outV(prob.batch_fitness(inV)); + const UnsignedInteger nf = outV.size() / effectiveBlockSize; + for (UnsignedInteger i = 0; i < effectiveBlockSize; ++ i) + { + const pagmo::vector_double inVi(inV.begin() + i * inputDimension, inV.begin() + (i + 1) * inputDimension); + const pagmo::vector_double outVi(outV.begin() + i * nf, outV.begin() + (i + 1) * nf); + pop.push_back(inVi, outVi); + } + } +#else + for (UnsignedInteger i = 0; i < size; ++ i) + { + const Point inP(startingSample[i]); + pagmo::vector_double x(pproblem.renumber(inP).toStdVector()); + pagmo::vector_double y(prob.fitness(x)); + pop.push_back(x, y); + } +#endif + + // evolve initial population over several generations pop = algo.evolve(pop); + + // retrieve results result_ = OptimizationResult(getProblem()); result_.setCallsNumber(evaluationInputHistory.getSize()); result_.setIterationNumber(getMaximumIterationNumber()); @@ -757,7 +840,6 @@ Point Pagmo::getStartingPoint() const /* Starting sample accessor */ void Pagmo::setStartingSample(const Sample & startingSample) { -// checkStartingSampleConsistentWithOptimizationProblem(startingSample, getProblem()); startingSample_ = startingSample; } @@ -775,7 +857,7 @@ Description Pagmo::GetAlgorithmNames() "cmaes", "xnes", #endif "nsga2", "moead", -#if (PAGMO_VERSION_MAJOR * 1000 + PAGMO_VERSION_MINOR) >= 2019 +#if PAGMO_VERSION_NR >= 201900 "moead_gen", #endif "mhaco", "nspso" diff --git a/lib/src/Base/Stat/LowDiscrepancySequenceImplementation.cxx b/lib/src/Base/Stat/LowDiscrepancySequenceImplementation.cxx index ba640b5090..e21d052c68 100644 --- a/lib/src/Base/Stat/LowDiscrepancySequenceImplementation.cxx +++ b/lib/src/Base/Stat/LowDiscrepancySequenceImplementation.cxx @@ -383,7 +383,7 @@ Unsigned64BitsInteger LowDiscrepancySequenceImplementation::GetNextPrimeNumber(c #if PRIMESIEVE_VERSION_MAJOR >= 11 it.jump_to(n); #else - it.skipto(n); + it.skipto(n - 1); #endif return it.next_prime(); #else diff --git a/lib/src/CMakeLists.txt b/lib/src/CMakeLists.txt index 26929ba640..d786d0697f 100644 --- a/lib/src/CMakeLists.txt +++ b/lib/src/CMakeLists.txt @@ -3,7 +3,6 @@ # Register current directory files ot_add_current_dir_to_include_dirs () ot_install_header_file (OT.hxx) -ot_add_source_file (openturns_library_ok.c) # Recurse in subdirectories add_subdirectory (Base) @@ -124,11 +123,6 @@ target_compile_features (OT PUBLIC cxx_std_11) set_property (TARGET OT PROPERTY OPENTURNS_ENABLED_FEATURES "${OPENTURNS_ENABLED_FEATURES}") set_property (TARGET OT APPEND PROPERTY EXPORT_PROPERTIES "OPENTURNS_ENABLED_FEATURES") -# Build an empty executable to check link dependencies and completeness -add_executable (linktest main.cxx) -target_link_libraries (linktest OT) -set_target_properties (linktest PROPERTIES INSTALL_RPATH "${CMAKE_INSTALL_RPATH};${PROJECT_BINARY_DIR}/lib/src") - # Add targets to the build-tree export set export(TARGETS OT FILE "${PROJECT_BINARY_DIR}/OpenTURNS-Targets.cmake") diff --git a/lib/src/Uncertainty/Algorithm/MetaModel/Kriging/GeneralLinearModelAlgorithm.cxx b/lib/src/Uncertainty/Algorithm/MetaModel/Kriging/GeneralLinearModelAlgorithm.cxx index 4525838e8a..39625871ef 100644 --- a/lib/src/Uncertainty/Algorithm/MetaModel/Kriging/GeneralLinearModelAlgorithm.cxx +++ b/lib/src/Uncertainty/Algorithm/MetaModel/Kriging/GeneralLinearModelAlgorithm.cxx @@ -464,8 +464,10 @@ Scalar GeneralLinearModelAlgorithm::maximizeReducedLogLikelihood() solver.run(); const OptimizationAlgorithm::Result result(solver.getResult()); const Scalar optimalLogLikelihood = result.getOptimalValue()[0]; - const Point optimalParameters = result.getOptimalPoint(); - const UnsignedInteger evaluationNumber = result.getCallsNumber(); + const Point optimalParameters(result.getOptimalPoint()); + if (!optimalParameters.getDimension()) + throw InvalidArgumentException(HERE) << "optimization in GeneralLinearModelAlgorithm did not yield feasible points"; + const UnsignedInteger callsNumber = result.getCallsNumber(); // Check if the optimal value corresponds to the last computed value, in order to // see if the by-products (Cholesky factor etc) are correct if (lastReducedLogLikelihood_ != optimalLogLikelihood) @@ -475,7 +477,7 @@ Scalar GeneralLinearModelAlgorithm::maximizeReducedLogLikelihood() } // Final call to reducedLogLikelihoodFunction() in order to update the amplitude // No additional cost since the cache mechanism is activated - LOGINFO(OSS() << evaluationNumber << " evaluations, optimized parameters=" << optimalParameters << ", log-likelihood=" << optimalLogLikelihood); + LOGINFO(OSS() << callsNumber << " evaluations, optimized parameters=" << optimalParameters << ", log-likelihood=" << optimalLogLikelihood); return optimalLogLikelihood; } diff --git a/lib/src/Uncertainty/Algorithm/Optimization/EfficientGlobalOptimization.cxx b/lib/src/Uncertainty/Algorithm/Optimization/EfficientGlobalOptimization.cxx index b5e662ec1e..83532cad82 100644 --- a/lib/src/Uncertainty/Algorithm/Optimization/EfficientGlobalOptimization.cxx +++ b/lib/src/Uncertainty/Algorithm/Optimization/EfficientGlobalOptimization.cxx @@ -278,7 +278,9 @@ void EfficientGlobalOptimization::run() const OptimizationResult improvementResult(solver.getResult()); // store improvement - Point improvementValue(improvementResult.getOptimalValue()); + const Point improvementValue(improvementResult.getOptimalValue()); + if (!improvementValue.getDimension()) + throw InvalidArgumentException(HERE) << "optimization in EGO did not yield feasible points"; expectedImprovement_.add(improvementValue); const Point newPoint(improvementResult.getOptimalPoint()); diff --git a/lib/src/Uncertainty/Algorithm/Simulation/AdaptiveDirectionalStratification.cxx b/lib/src/Uncertainty/Algorithm/Simulation/AdaptiveDirectionalStratification.cxx index 1d9934101f..b21f801aa5 100644 --- a/lib/src/Uncertainty/Algorithm/Simulation/AdaptiveDirectionalStratification.cxx +++ b/lib/src/Uncertainty/Algorithm/Simulation/AdaptiveDirectionalStratification.cxx @@ -31,7 +31,7 @@ CLASSNAMEINIT(AdaptiveDirectionalStratification) AdaptiveDirectionalStratification::AdaptiveDirectionalStratification() : EventSimulation() , partialStratification_(false) - , maximumStratificationDimension_(ResourceMap::GetAsScalar("AdaptiveDirectionalStratification-DefaultMaximumStratificationDimension")) + , maximumStratificationDimension_(ResourceMap::GetAsUnsignedInteger("AdaptiveDirectionalStratification-DefaultMaximumStratificationDimension")) { // Nothing to do } diff --git a/lib/src/Uncertainty/Bayesian/GaussianNonLinearCalibration.cxx b/lib/src/Uncertainty/Bayesian/GaussianNonLinearCalibration.cxx index b00e8d106f..1f4149a7d0 100644 --- a/lib/src/Uncertainty/Bayesian/GaussianNonLinearCalibration.cxx +++ b/lib/src/Uncertainty/Bayesian/GaussianNonLinearCalibration.cxx @@ -397,6 +397,8 @@ Point GaussianNonLinearCalibration::run(const Sample & inputObservations, } algorithm_.run(); const Point thetaStar(algorithm_.getResult().getOptimalPoint()); + if (!thetaStar.getDimension()) + throw InvalidArgumentException(HERE) << "optimization in NonLinearLeastSquaresCalibration did not yield a feasible point"; return thetaStar; } diff --git a/lib/src/Uncertainty/Bayesian/NonLinearLeastSquaresCalibration.cxx b/lib/src/Uncertainty/Bayesian/NonLinearLeastSquaresCalibration.cxx index 84b7255958..9fe1e276a4 100644 --- a/lib/src/Uncertainty/Bayesian/NonLinearLeastSquaresCalibration.cxx +++ b/lib/src/Uncertainty/Bayesian/NonLinearLeastSquaresCalibration.cxx @@ -316,7 +316,9 @@ Point NonLinearLeastSquaresCalibration::run(const Sample & inputObservations, << algorithm_.getImplementation()->getClassName() << " has no setStartingPoint method."); } algorithm_.run(); - Point optimalPoint(algorithm_.getResult().getOptimalPoint()); + const Point optimalPoint(algorithm_.getResult().getOptimalPoint()); + if (!optimalPoint.getDimension()) + throw InvalidArgumentException(HERE) << "optimization in NonLinearLeastSquaresCalibration did not yield a feasible point"; // If asked for the residual values if (residual.getSize() > 0) { diff --git a/lib/src/Uncertainty/Distribution/Binomial.cxx b/lib/src/Uncertainty/Distribution/Binomial.cxx index 5f4fb57d50..2dae30aca9 100644 --- a/lib/src/Uncertainty/Distribution/Binomial.cxx +++ b/lib/src/Uncertainty/Distribution/Binomial.cxx @@ -327,17 +327,21 @@ Scalar Binomial::computeScalarQuantile(const Scalar prob, return quantile; } oldCDF = cdf; + Bool forward = false; while (cdf < prob) { + forward = true; quantile += step; oldCDF = cdf; cdf = tail ? computeComplementaryCDF(quantile) : computeCDF(quantile); LOGDEBUG(OSS() << "in Binomial::computeScalarQuantile, forward search, quantile=" << quantile << ", cdf=" << cdf); } - if (cdf >= oldCDF) + if (!forward && (cdf >= oldCDF)) + { quantile -= step; + } LOGDEBUG(OSS() << "in Binomial::computeScalarQuantile, final quantile=" << quantile); - return quantile; + return std::max(0.0, quantile); } /* Get the characteristic function of the distribution, i.e. phi(u) = E(exp(I*u*X)) */ diff --git a/lib/src/Uncertainty/Distribution/Burr.cxx b/lib/src/Uncertainty/Distribution/Burr.cxx index 18895ddcc2..217b73f658 100644 --- a/lib/src/Uncertainty/Distribution/Burr.cxx +++ b/lib/src/Uncertainty/Distribution/Burr.cxx @@ -5,7 +5,7 @@ * Copyright 2005-2024 Airbus-EDF-IMACS-ONERA-Phimeca * * This library is free software: you can redistribute it and/or modify - * it under the terms of the GNU Lesser General Public License as published bydistributiolib + * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * diff --git a/lib/src/Uncertainty/Distribution/GeneralizedParetoFactory.cxx b/lib/src/Uncertainty/Distribution/GeneralizedParetoFactory.cxx index 74333ee01e..1701dabdae 100644 --- a/lib/src/Uncertainty/Distribution/GeneralizedParetoFactory.cxx +++ b/lib/src/Uncertainty/Distribution/GeneralizedParetoFactory.cxx @@ -49,7 +49,7 @@ GeneralizedParetoFactory::GeneralizedParetoFactory() { // Create the optimization solver parameters using the parameters in the ResourceMap solver_ = OptimizationAlgorithm::Build(ResourceMap::GetAsString("GeneralizedParetoFactory-DefaultOptimizationAlgorithm")); - solver_.setMaximumCallsNumber(ResourceMap::GetAsUnsignedInteger("GeneralizedParetoFactory-MaximumEvaluationNumber")); + solver_.setMaximumCallsNumber(ResourceMap::GetAsUnsignedInteger("GeneralizedParetoFactory-MaximumCallsNumber")); solver_.setMaximumAbsoluteError(ResourceMap::GetAsScalar("GeneralizedParetoFactory-MaximumAbsoluteError")); solver_.setMaximumRelativeError(ResourceMap::GetAsScalar("GeneralizedParetoFactory-MaximumRelativeError")); solver_.setMaximumResidualError(ResourceMap::GetAsScalar("GeneralizedParetoFactory-MaximumObjectiveError")); diff --git a/lib/src/Uncertainty/Distribution/Histogram.cxx b/lib/src/Uncertainty/Distribution/Histogram.cxx index 43b8d48a5f..af3cbc708e 100644 --- a/lib/src/Uncertainty/Distribution/Histogram.cxx +++ b/lib/src/Uncertainty/Distribution/Histogram.cxx @@ -482,9 +482,36 @@ Point Histogram::getSingularities() const Graph Histogram::drawPDF(const UnsignedInteger pointNumber, const Bool logScale) const { + // draw full bars, but take into account the quantile levels + const Scalar qMin = computeQuantile(ResourceMap::GetAsScalar("Distribution-QMin"))[0]; + const Scalar qMax = computeQuantile(ResourceMap::GetAsScalar("Distribution-QMax"))[0]; + const Scalar delta = 2.0 * (qMax - qMin) * (1.0 - 0.5 * (ResourceMap::GetAsScalar("Distribution-QMax" ) - ResourceMap::GetAsScalar("Distribution-QMin"))); + const Scalar xMinRef = qMin - delta; + const Scalar xMaxRef = qMax + delta; + + // if first bar ends before xMinRef, find last bar before xMinRef const UnsignedInteger lastIndex = cumulatedWidth_.getSize() - 1; + Scalar xMin = first_ - 0.5 * width_[0]; + if (first_ + 0.5 * width_[0] < xMinRef) + { + UnsignedInteger index = 0; + while ((index < lastIndex) && (first_ + cumulatedWidth_[index + 1] - 0.5 * width_[index + 1] < xMinRef)) + { + ++ index; + } + xMin = first_ + cumulatedWidth_[index] - 0.5 * width_[index]; + } + + // find first bar after xMaxRef + UnsignedInteger index = lastIndex; + while ((index > 0) && (first_ + cumulatedWidth_[index - 1] + 0.5 * width_[index - 1] > xMaxRef)) + { + -- index; + } + const Scalar xMax = first_ + cumulatedWidth_[index] + 0.5 * width_[index]; + // Must prefix explicitly by the class name in order to avoid conflict with the methods in the upper class - return Histogram::drawPDF(first_ - 0.5 * width_[0], first_ + cumulatedWidth_[lastIndex] + 0.5 * width_[lastIndex], pointNumber, logScale); + return Histogram::drawPDF(xMin, xMax, pointNumber, logScale); } /* Draw the PDF of the Histogram using a specific presentation */ diff --git a/lib/src/Uncertainty/Distribution/InverseGamma.cxx b/lib/src/Uncertainty/Distribution/InverseGamma.cxx index 243a29ea0e..567e6cee41 100644 --- a/lib/src/Uncertainty/Distribution/InverseGamma.cxx +++ b/lib/src/Uncertainty/Distribution/InverseGamma.cxx @@ -35,9 +35,6 @@ static const Factory Factory_InverseGamma; /* Default constructor */ InverseGamma::InverseGamma() : ContinuousDistribution() - , lambda_(1.0) - , k_(1.0) - , normalizationFactor_(0.0) { setName("InverseGamma"); setDimension(1); @@ -45,12 +42,11 @@ InverseGamma::InverseGamma() } /* Parameters constructor */ -InverseGamma::InverseGamma(const Scalar lambda, - const Scalar k) +InverseGamma::InverseGamma(const Scalar k, + const Scalar lambda) : ContinuousDistribution() - , lambda_(0.0) , k_(0.0) - , normalizationFactor_(0.0) + , lambda_(0.0) { setName("InverseGamma"); setKLambda(k, lambda); @@ -77,15 +73,15 @@ String InverseGamma::__repr__() const oss << "class=" << InverseGamma::GetClassName() << " name=" << getName() << " dimension=" << getDimension() - << " lambda=" << lambda_ - << " k=" << k_; + << " k=" << k_ + << " lambda=" << lambda_; return oss; } String InverseGamma::__str__(const String & ) const { OSS oss; - oss << getClassName() << "(lambda = " << lambda_ << ", k = " << k_ << ")"; + oss << getClassName() << "(k = " << k_ << ", lambda = " << lambda_ << ")"; return oss; } @@ -265,8 +261,8 @@ Point InverseGamma::computePDFGradient(const Point & point) const const Scalar x = point[0]; if (x <= 0.0) return pdfGradient; const Scalar pdf = computePDF(point); - pdfGradient[0] = (1.0 / (lambda_ * x) - k_) * pdf / lambda_; - pdfGradient[1] = -(std::log(lambda_) + std::log(x) + SpecFunc::DiGamma(k_)) * pdf; + pdfGradient[0] = -(std::log(lambda_) + std::log(x) + SpecFunc::DiGamma(k_)) * pdf; + pdfGradient[1] = (1.0 / (lambda_ * x) - k_) * pdf / lambda_; return pdfGradient; } @@ -281,8 +277,8 @@ Point InverseGamma::computeCDFGradient(const Point & point) const const Scalar lambdaXInverse = 1.0 / (lambda_ * x); const Scalar pdf = computePDF(x); const Scalar eps = std::pow(cdfEpsilon_, 1.0 / 3.0); - cdfGradient[0] = pdf * x / lambda_; - cdfGradient[1] = (DistFunc::pGamma(k_ + eps, lambdaXInverse, true) - DistFunc::pGamma(k_ - eps, lambdaXInverse, true)) / (2.0 * eps); + cdfGradient[0] = (DistFunc::pGamma(k_ + eps, lambdaXInverse, true) - DistFunc::pGamma(k_ - eps, lambdaXInverse, true)) / (2.0 * eps); + cdfGradient[1] = pdf * x / lambda_; return cdfGradient; } @@ -349,10 +345,7 @@ void InverseGamma::computeCovariance() const /* Parameters value and description accessor */ Point InverseGamma::getParameter() const { - Point point(2); - point[0] = lambda_; - point[1] = k_; - return point; + return {k_, lambda_}; } void InverseGamma::setParameter(const Point & parameter) @@ -366,18 +359,15 @@ void InverseGamma::setParameter(const Point & parameter) /* Parameters description accessor */ Description InverseGamma::getParameterDescription() const { - Description description(2); - description[0] = "lambda"; - description[1] = "k"; - return description; + return {"k", "lambda"}; } /* Method save() stores the object through the StorageManager */ void InverseGamma::save(Advocate & adv) const { ContinuousDistribution::save(adv); - adv.saveAttribute( "lambda_", lambda_ ); adv.saveAttribute( "k_", k_ ); + adv.saveAttribute( "lambda_", lambda_ ); adv.saveAttribute( "normalizationFactor_", normalizationFactor_ ); } @@ -385,8 +375,8 @@ void InverseGamma::save(Advocate & adv) const void InverseGamma::load(Advocate & adv) { ContinuousDistribution::load(adv); - adv.loadAttribute( "lambda_", lambda_ ); adv.loadAttribute( "k_", k_ ); + adv.loadAttribute( "lambda_", lambda_ ); adv.loadAttribute( "normalizationFactor_", normalizationFactor_ ); computeRange(); } diff --git a/lib/src/Uncertainty/Distribution/KernelSmoothing.cxx b/lib/src/Uncertainty/Distribution/KernelSmoothing.cxx index 7d9a05da9f..5427621ecf 100644 --- a/lib/src/Uncertainty/Distribution/KernelSmoothing.cxx +++ b/lib/src/Uncertainty/Distribution/KernelSmoothing.cxx @@ -25,6 +25,8 @@ #include "openturns/PersistentObjectFactory.hxx" #include "openturns/Brent.hxx" #include "openturns/MethodBoundEvaluation.hxx" +#include "openturns/SymbolicFunction.hxx" +#include "openturns/ParametricFunction.hxx" #include "openturns/Function.hxx" #include "openturns/HermiteFactory.hxx" #include "openturns/UniVariatePolynomial.hxx" @@ -33,6 +35,7 @@ #include "openturns/SobolSequence.hxx" #include "openturns/ResourceMap.hxx" #include "openturns/JointDistribution.hxx" +#include "openturns/CompositeDistribution.hxx" #include "openturns/BlockIndependentDistribution.hxx" BEGIN_NAMESPACE_OPENTURNS @@ -256,8 +259,39 @@ Distribution KernelSmoothing::build(const Sample & sample) const { // For 1D sample, use the rule that give the best tradeoff between speed and precision if (sample.getDimension() == 1) - return build(sample, computeMixedBandwidth(sample)); - + { + if (useLogTransform_) + { + const Scalar skewness = sample.computeSkewness()[0]; + const Scalar xMin = sample.getMin()[0]; + const Scalar xMax = sample.getMax()[0]; + const Scalar delta = (xMax - xMin) * std::max(SpecFunc::Precision, ResourceMap::GetAsScalar("KernelSmoothing-DefaultShiftScale")); + ParametricFunction transform; + ParametricFunction inverseTransform; + // Need to construct explicitly a Description to disambiguate the call + // to SymbolicFunction constructor + const Description inVars = {"x", "shift"}; + if (skewness >= 0.0) + { + const Scalar shift = delta - xMin; + transform = ParametricFunction(SymbolicFunction(inVars, {"log(x+shift)"}), {1}, {shift}); + inverseTransform = ParametricFunction(SymbolicFunction(inVars, {"exp(x)-shift"}), {1}, {shift}); + } + else + { + const Scalar shift = xMax + delta; + transform = ParametricFunction(SymbolicFunction(inVars, {"log(shift-x)"}), {1}, {shift}); + inverseTransform = ParametricFunction(SymbolicFunction(inVars, {"shift - exp(x)"}), {1}, {shift}); + } + const Sample transformedSample(transform(sample)); + const Distribution transformedDistribution(build(transformedSample, computeMixedBandwidth(transformedSample))); + CompositeDistribution fitted(inverseTransform, transformedDistribution); + fitted.setDescription(sample.getDescription()); + return fitted; + } // useLogTransform + else + return build(sample, computeMixedBandwidth(sample)); + } // dimension 1 // For nD sample, use the only available rule return build(sample, computeSilvermanBandwidth(sample)); } @@ -279,8 +313,8 @@ Distribution KernelSmoothing::build(const Sample & sample, if (xmin == xmax) { bandwidth_ = bandwidth; - KernelSmoothing::Implementation result(new Dirac(xmin)); - result->setDescription(sample.getDescription()); + Dirac result(xmin); + result.setDescription(sample.getDescription()); return result; } Indices degenerateIndices; @@ -586,6 +620,11 @@ void KernelSmoothing::setBoundaryCorrection(const Bool boundaryCorrection) boundingOption_ = (boundaryCorrection ? BOTH : NONE); } +Bool KernelSmoothing::getBoundaryCorrection() const +{ + return (boundingOption_ != NONE); +} + /* Boundary correction accessor */ void KernelSmoothing::setBoundingOption(const BoundingOption boundingOption) { @@ -616,6 +655,40 @@ void KernelSmoothing::setAutomaticUpperBound(const Bool automaticUpperBound) automaticUpperBound_ = automaticUpperBound; } +/* Binning accessors */ +void KernelSmoothing::setBinning(const Bool binned) +{ + binned_ = binned; +} + +Bool KernelSmoothing::getBinning() const +{ + return binned_; +} + +/* Bin number accessor */ +void KernelSmoothing::setBinNumber(const UnsignedInteger binNumber) +{ + if (binNumber < 2) + throw InvalidArgumentException(HERE) << "Error: The number of bins=" << binNumber << " is less than 2."; + binNumber_ = binNumber; +} + +UnsignedInteger KernelSmoothing::getBinNumber() const +{ + return binNumber_; +} + +/* Use log transform accessor */ +void KernelSmoothing::setUseLogTransform(const Bool useLog) +{ + useLogTransform_ = useLog; +} + +Bool KernelSmoothing::getUseLogTransform() const +{ + return useLogTransform_; +} /* Method save() stores the object through the StorageManager */ void KernelSmoothing::save(Advocate & adv) const @@ -630,6 +703,7 @@ void KernelSmoothing::save(Advocate & adv) const adv.saveAttribute("automaticLowerBound_", automaticLowerBound_); adv.saveAttribute("upperBound_", upperBound_); adv.saveAttribute("automaticUpperBound_", automaticUpperBound_); + adv.saveAttribute("useLogTransform_", useLogTransform_); } /* Method load() reloads the object from the StorageManager */ @@ -647,6 +721,8 @@ void KernelSmoothing::load(Advocate & adv) adv.loadAttribute("automaticLowerBound_", automaticLowerBound_); adv.loadAttribute("upperBound_", upperBound_); adv.loadAttribute("automaticUpperBound_", automaticUpperBound_); + if (adv.hasAttribute("useLogTransform_")) + adv.loadAttribute("useLogTransform_", useLogTransform_); } END_NAMESPACE_OPENTURNS diff --git a/lib/src/Uncertainty/Distribution/MaximumLikelihoodFactory.cxx b/lib/src/Uncertainty/Distribution/MaximumLikelihoodFactory.cxx index d5c2633a01..f0bd4b91c0 100644 --- a/lib/src/Uncertainty/Distribution/MaximumLikelihoodFactory.cxx +++ b/lib/src/Uncertainty/Distribution/MaximumLikelihoodFactory.cxx @@ -318,9 +318,12 @@ Point MaximumLikelihoodFactory::buildParameter(const Sample & sample) const solver.setProblem(problem); solver.run(); + const Point parameter(solver.getResult().getOptimalPoint()); + if (!parameter.getDimension()) + throw InvalidArgumentException(HERE) << "optimization in MaximumLikelihoodFactory did not yield feasible points"; + Point effectiveParameter(effectiveParameterSize); // set unknown values - Point parameter(solver.getResult().getOptimalPoint()); UnsignedInteger index = 0; for (UnsignedInteger j = 0; j < effectiveParameterSize; ++ j) { @@ -331,7 +334,7 @@ Point MaximumLikelihoodFactory::buildParameter(const Sample & sample) const } } // set known values - UnsignedInteger knownParametersSize = knownParameterIndices_.getSize(); + const UnsignedInteger knownParametersSize = knownParameterIndices_.getSize(); for (UnsignedInteger j = 0; j < knownParametersSize; ++ j) { effectiveParameter[knownParameterIndices_[j]] = knownParameterValues_[j]; diff --git a/lib/src/Uncertainty/Distribution/Mixture.cxx b/lib/src/Uncertainty/Distribution/Mixture.cxx index 336ce857e1..38a6f72c57 100644 --- a/lib/src/Uncertainty/Distribution/Mixture.cxx +++ b/lib/src/Uncertainty/Distribution/Mixture.cxx @@ -36,19 +36,8 @@ static const Factory Factory_Mixture; /* Default constructor */ Mixture::Mixture() : DistributionImplementation() - , distributionCollection_() - , base_() - , alias_() - , uniformWeights_(true) - , p_() - , pdfApproximationCDF_() - , cdfApproximation_() - , pdfApproximationCCDF_() - , ccdfApproximation_() - , useApproximatePDFCDF_(false) { setName("Mixture"); - setParallel(true); // Set an empty range setDistributionCollection(DistributionCollection(1)); } @@ -56,16 +45,6 @@ Mixture::Mixture() /* Parameters constructor */ Mixture::Mixture(const DistributionCollection & coll) : DistributionImplementation() - , distributionCollection_() - , base_() - , alias_() - , uniformWeights_(true) - , p_() - , pdfApproximationCDF_() - , cdfApproximation_() - , pdfApproximationCCDF_() - , ccdfApproximation_() - , useApproximatePDFCDF_(false) { setName("Mixture"); // We could NOT set distributionCollection_ in the member area of the constructor @@ -73,7 +52,7 @@ Mixture::Mixture(const DistributionCollection & coll) // distributions of the collection have the same dimension). We do this by calling // the setDistributionCollection() method that do it for us. // This call set also the range. - setDistributionCollection( coll ); + setDistributionCollection(coll); } /* Parameters constructor */ @@ -252,6 +231,7 @@ void Mixture::setDistributionCollectionWithWeights(const DistributionCollection ccdfApproximation_ = interpolation[3]; useApproximatePDFCDF_ = true; } + isDiscreteOrContinuous_ = isContinuous() || isDiscrete(); } @@ -295,6 +275,8 @@ Scalar Mixture::computePDF(const Point & point) const { const UnsignedInteger dimension = getDimension(); if (point.getDimension() != dimension) throw InvalidArgumentException(HERE) << "Error: the given point must have dimension=" << dimension << ", here dimension=" << point.getDimension(); + if (!isDiscreteOrContinuous_) + throw InvalidArgumentException(HERE) << "Cannot compute the PDF of a neither continuous nor discrete Mixture."; if (useApproximatePDFCDF_) { if (point[0] < getMean()[0]) return pdfApproximationCDF_.derivate(point)[0]; diff --git a/lib/src/Uncertainty/Distribution/StudentCopula.cxx b/lib/src/Uncertainty/Distribution/StudentCopula.cxx index 52cb9c2501..1cbca7c1f7 100644 --- a/lib/src/Uncertainty/Distribution/StudentCopula.cxx +++ b/lib/src/Uncertainty/Distribution/StudentCopula.cxx @@ -31,7 +31,7 @@ static const Factory Factory_StudentCopula; /* Default constructor */ StudentCopula::StudentCopula() - : SklarCopula(Student()) + : SklarCopula(Student(3.0, 2)) { // Nothing to do } diff --git a/lib/src/Uncertainty/Distribution/TruncatedNormalFactory.cxx b/lib/src/Uncertainty/Distribution/TruncatedNormalFactory.cxx index 5d158a90da..fe75bd186b 100644 --- a/lib/src/Uncertainty/Distribution/TruncatedNormalFactory.cxx +++ b/lib/src/Uncertainty/Distribution/TruncatedNormalFactory.cxx @@ -23,6 +23,7 @@ #include "openturns/MethodOfMomentsFactory.hxx" #include "openturns/MaximumLikelihoodFactory.hxx" #include "openturns/PersistentObjectFactory.hxx" +#include "openturns/Normal.hxx" BEGIN_NAMESPACE_OPENTURNS @@ -149,13 +150,19 @@ TruncatedNormal TruncatedNormalFactory::buildMethodOfLikelihoodMaximization(cons // The parameters are scaled back // X_norm = alpha * (X - beta) // X = beta + X_norm / alpha - Point scaledParameters(4, beta); - scaledParameters[0] += parameters[0] / alpha;// mu - scaledParameters[1] = parameters[1] / alpha;// sigma - scaledParameters[2] -= oneEps / alpha;// a - scaledParameters[3] += oneEps / alpha;// b + const Scalar mu = beta + parameters[0] / alpha; + const Scalar sigma = parameters[1] / alpha; + const Scalar a = beta - oneEps / alpha; + const Scalar b = beta + oneEps / alpha; + + // check if the parameters of the Normal part make sense wrt the bound parameters + // note that we still want to allow it from the ctor of TruncatedNormal + // but not in the context of inference as in the chaos we want to avoid such degenerated distributions + const Scalar epsilon = ResourceMap::GetAsScalar("Distribution-DefaultCDFEpsilon"); + if (Normal(mu, sigma).computeProbability(Interval(a, b)) < epsilon) + throw InvalidArgumentException(HERE) << "Likelihood-optimized TruncatedNormal is not valid"; - TruncatedNormal result(buildAsTruncatedNormal(scaledParameters)); + TruncatedNormal result(buildAsTruncatedNormal({mu, sigma, a, b})); // abort if distribution is not valid if (!SpecFunc::IsNormal(result.getMean()[0])) diff --git a/lib/src/Uncertainty/Distribution/openturns/InverseGamma.hxx b/lib/src/Uncertainty/Distribution/openturns/InverseGamma.hxx index c6bd55bd6e..7131065aa1 100644 --- a/lib/src/Uncertainty/Distribution/openturns/InverseGamma.hxx +++ b/lib/src/Uncertainty/Distribution/openturns/InverseGamma.hxx @@ -41,8 +41,8 @@ public: InverseGamma(); /** Parameters constructor */ - InverseGamma(const Scalar lambda, - const Scalar k); + InverseGamma(const Scalar k, + const Scalar lambda); /** Comparison operator */ @@ -159,9 +159,9 @@ private: void update(); /** The main parameter set of the distribution */ - Scalar lambda_; - Scalar k_; - Scalar normalizationFactor_; + Scalar k_ = 1.0; + Scalar lambda_ = 1.0; + Scalar normalizationFactor_ = 0.0; }; /* class InverseGamma */ diff --git a/lib/src/Uncertainty/Distribution/openturns/KernelSmoothing.hxx b/lib/src/Uncertainty/Distribution/openturns/KernelSmoothing.hxx index bb37d35817..a38f147894 100644 --- a/lib/src/Uncertainty/Distribution/openturns/KernelSmoothing.hxx +++ b/lib/src/Uncertainty/Distribution/openturns/KernelSmoothing.hxx @@ -75,20 +75,33 @@ public: /** Kernel accessor */ Distribution getKernel() const; - /* Boundary correction accessor, shortcut for setBoundingOption(NONE) or setBoundingOption(BOTH) */ + /** Boundary correction accessor, shortcut for setBoundingOption(NONE) or setBoundingOption(BOTH) */ void setBoundaryCorrection(const Bool boundaryCorrection); + Bool getBoundaryCorrection() const; - /* Boundary correction accessor */ + /** Boundary correction accessor */ void setBoundingOption(const BoundingOption boundingOption); - /* Boundary accessor */ + /** Boundary accessor */ void setLowerBound(const Scalar lowerBound); void setUpperBound(const Scalar upperBound); - /* Automatic boundary accessor */ + /** Automatic boundary accessor */ void setAutomaticLowerBound(const Bool automaticLowerBound); void setAutomaticUpperBound(const Bool automaticUpperBound); + /** Binning accessors */ + void setBinning(const Bool binned); + Bool getBinning() const; + + /** Bin number accessor */ + void setBinNumber(const UnsignedInteger binNumber); + UnsignedInteger getBinNumber() const; + + /** Use log transform accessor */ + void setUseLogTransform(const Bool useLog); + Bool getUseLogTransform() const; + /** Compute the bandwidth according to Silverman's rule */ Point computeSilvermanBandwidth(const Sample & sample) const; @@ -124,13 +137,13 @@ private: Distribution kernel_; // Flag to tell if we compute a binned version of the estimator - Bool binned_; + Bool binned_ = false; // Number of bins in each dimension - UnsignedInteger binNumber_; + UnsignedInteger binNumber_ = ResourceMap::GetAsUnsignedInteger("KernelSmoothing-BinNumber"); // Direction of the boundary treatment - BoundingOption boundingOption_; + BoundingOption boundingOption_ = NONE; // Known bounds Scalar lowerBound_; @@ -138,6 +151,8 @@ private: Scalar upperBound_; Bool automaticUpperBound_; + // Use log transform + Bool useLogTransform_ = false; }; /* class KernelSmoothing */ diff --git a/lib/src/Uncertainty/Distribution/openturns/Mixture.hxx b/lib/src/Uncertainty/Distribution/openturns/Mixture.hxx index 971a59b643..71dd15a76c 100644 --- a/lib/src/Uncertainty/Distribution/openturns/Mixture.hxx +++ b/lib/src/Uncertainty/Distribution/openturns/Mixture.hxx @@ -193,7 +193,7 @@ private: /** Structures for the alias sampling method */ Point base_; Indices alias_; - Bool uniformWeights_; + Bool uniformWeights_ = true; Point p_; /** PDF approximation associated to the CDF approximation */ @@ -209,7 +209,10 @@ private: PiecewiseHermiteEvaluation ccdfApproximation_; /** Do I have an approximation for the CDF? */ - Bool useApproximatePDFCDF_; + Bool useApproximatePDFCDF_ = false; + + /* Am I discrete or continuous, or neither */ + Bool isDiscreteOrContinuous_ = true; }; /* class Mixture */ diff --git a/lib/src/Uncertainty/Model/DistributionImplementation.cxx b/lib/src/Uncertainty/Model/DistributionImplementation.cxx index d801948b68..e6c581978d 100644 --- a/lib/src/Uncertainty/Model/DistributionImplementation.cxx +++ b/lib/src/Uncertainty/Model/DistributionImplementation.cxx @@ -892,8 +892,10 @@ Sample DistributionImplementation::computeCDFParallel(const Sample & inSample) c if (inSample.getDimension() != dimension_) throw InvalidArgumentException(HERE) << "Error: the given sample has an invalid dimension. Expect a dimension " << dimension_ << ", got " << inSample.getDimension(); const UnsignedInteger size = inSample.getSize(); Sample result(size, 1); - const ComputeCDFPolicy policy( inSample, result, *this ); - TBBImplementation::ParallelFor( 0, size, policy ); + const ComputeCDFPolicy policy(inSample, result, *this); + // This calls GaussKronrodRule::InitializeRules before entering parallel region to prevent concurrent access + GaussKronrod::GetRules(); + TBBImplementation::ParallelFor(0, size, policy); return result; } diff --git a/lib/src/Uncertainty/Model/EllipticalDistribution.cxx b/lib/src/Uncertainty/Model/EllipticalDistribution.cxx index a4c6a55a5e..267456d904 100644 --- a/lib/src/Uncertainty/Model/EllipticalDistribution.cxx +++ b/lib/src/Uncertainty/Model/EllipticalDistribution.cxx @@ -173,9 +173,10 @@ Scalar EllipticalDistribution::computeDensityGeneratorSecondDerivative(const Sca /* Get the DDF of the distribution */ Point EllipticalDistribution::computeDDF(const Point & point) const { - if (point.getDimension() != getDimension()) throw InvalidArgumentException(HERE) << "Error: the given point must have dimension=1, here dimension=" << point.getDimension(); - const UnsignedInteger dimension = getDimension(); + if (point.getDimension() != dimension) + throw InvalidArgumentException(HERE) << "Error: the given point must have dimension=" << dimension << ", here dimension=" << point.getDimension(); + switch(dimension) { case 1: @@ -253,9 +254,10 @@ Point EllipticalDistribution::computeDDF(const Point & point) const /* Get the PDF of the distribution */ Scalar EllipticalDistribution::computePDF(const Point & point) const { - if (point.getDimension() != getDimension()) throw InvalidArgumentException(HERE) << "Error: the given point must have dimension=1, here dimension=" << point.getDimension(); - const UnsignedInteger dimension = getDimension(); + if (point.getDimension() != dimension) + throw InvalidArgumentException(HERE) << "Error: the given point must have dimension=" << dimension << ", here dimension=" << point.getDimension(); + switch(dimension) { case 1: @@ -315,9 +317,10 @@ Scalar EllipticalDistribution::computePDF(const Point & point) const /* Get the log-PDF of the distribution */ Scalar EllipticalDistribution::computeLogPDF(const Point & point) const { - if (point.getDimension() != getDimension()) throw InvalidArgumentException(HERE) << "Error: the given point must have dimension=1, here dimension=" << point.getDimension(); - const UnsignedInteger dimension = getDimension(); + if (point.getDimension() != dimension) + throw InvalidArgumentException(HERE) << "Error: the given point must have dimension=" << dimension << ", here dimension=" << point.getDimension(); + switch(dimension) { case 1: @@ -386,10 +389,10 @@ Scalar EllipticalDistribution::computeLogPDF(const Point & point) const /* Get the PDF gradient of the distribution */ Point EllipticalDistribution::computePDFGradient(const Point & point) const { - if (point.getDimension() != getDimension()) - throw InvalidArgumentException(HERE) << "Error: the given point must have dimension=1, here dimension=" << point.getDimension(); - const UnsignedInteger dimension = getDimension(); + if (point.getDimension() != dimension) + throw InvalidArgumentException(HERE) << "Error: the given point must have dimension=" << dimension << ", here dimension=" << point.getDimension(); + const Point u(normalize(point)); Point iRu(u); for (UnsignedInteger i = 0; i < dimension; ++ i) diff --git a/lib/src/Uncertainty/Process/ARMALikelihoodFactory.cxx b/lib/src/Uncertainty/Process/ARMALikelihoodFactory.cxx index 66841ce9a9..038f448155 100644 --- a/lib/src/Uncertainty/Process/ARMALikelihoodFactory.cxx +++ b/lib/src/Uncertainty/Process/ARMALikelihoodFactory.cxx @@ -575,6 +575,9 @@ ARMA ARMALikelihoodFactory::build(const TimeSeries & timeSeries) const // optimal point const Point optpoint(solver.getResult().getOptimalPoint()); + if (!optpoint.getDimension()) + throw InvalidArgumentException(HERE) << "optimization in ARMA did not yield feasible points"; + beta = optpoint; // Return result diff --git a/lib/src/Uncertainty/Process/KarhunenLoeveSVDAlgorithm.cxx b/lib/src/Uncertainty/Process/KarhunenLoeveSVDAlgorithm.cxx index ff15f48217..2cdf03171f 100644 --- a/lib/src/Uncertainty/Process/KarhunenLoeveSVDAlgorithm.cxx +++ b/lib/src/Uncertainty/Process/KarhunenLoeveSVDAlgorithm.cxx @@ -287,7 +287,7 @@ void KarhunenLoeveSVDAlgorithm::run() LOGINFO(OSS() << "U=" << U.getNbRows() << "x" << U.getNbColumns()); } else - throw InvalidArgumentException(HERE) << "Unknow random SVD variant: " << ResourceMap::GetAsString("KarhunenLoeveSVDAlgorithm-RandomSVDVariant"); + throw InvalidArgumentException(HERE) << "Unknown random SVD variant: " << ResourceMap::GetAsString("KarhunenLoeveSVDAlgorithm-RandomSVDVariant"); } else { diff --git a/lib/src/Uncertainty/Process/WhittleFactory.cxx b/lib/src/Uncertainty/Process/WhittleFactory.cxx index 5f2daa2722..76063cfaaa 100644 --- a/lib/src/Uncertainty/Process/WhittleFactory.cxx +++ b/lib/src/Uncertainty/Process/WhittleFactory.cxx @@ -433,6 +433,8 @@ ARMA WhittleFactory::maximizeLogLikelihood(Point & informationCriteria) const // optimal point const Point optpoint(solver.getResult().getOptimalPoint()); + if (!optpoint.getDimension()) + throw InvalidArgumentException(HERE) << "optimization in WhittleFactory did not yield feasible points"; theta = optpoint; } // Compute the information criteria diff --git a/lib/src/main.cxx b/lib/src/main.cxx deleted file mode 100644 index da7ea8986e..0000000000 --- a/lib/src/main.cxx +++ /dev/null @@ -1,35 +0,0 @@ -// -*- C++ -*- -/** - * @brief The main program. The entry point of the project... - * - * Copyright 2005-2024 Airbus-EDF-IMACS-ONERA-Phimeca - * - * This library is free software: you can redistribute it and/or modify - * it under the terms of the GNU Lesser General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this library. If not, see . - * - */ - -#include "openturns/OT.hxx" -extern "C" int openturns_library_ok(); - -/* - * @fn int main (int argc, char *argv[]) - * @brief The main program. The entry point of the project... - * @return The return code (null if correct) - * @param argc The number or arguments in argv - * @param argv The array of arguments from the calling program - */ -int main(int, char *[]) -{ - return openturns_library_ok() ? 0 : 1 ; -} diff --git a/lib/src/openturns_library_ok.c b/lib/src/openturns_library_ok.c deleted file mode 100644 index a745cd7303..0000000000 --- a/lib/src/openturns_library_ok.c +++ /dev/null @@ -1,33 +0,0 @@ -// -*- C -*- -/** - * @brief This file contains a single C function to allow m4 easy detection - * - * Copyright 2005-2024 Airbus-EDF-IMACS-ONERA-Phimeca - * - * This library is free software: you can redistribute it and/or modify - * it under the terms of the GNU Lesser General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this library. If not, see . - * - */ -#include "openturns/OTdebug.h" - -/* - * @fn int openturns_library_ok() - * @brief A function that return 1 on success - * @return The return code - */ -int OT_API openturns_library_ok(void); - -int OT_API openturns_library_ok(void) -{ - return 1; -} diff --git a/lib/test/CMakeLists.txt b/lib/test/CMakeLists.txt index d75a495736..7a4ffd21c4 100644 --- a/lib/test/CMakeLists.txt +++ b/lib/test/CMakeLists.txt @@ -31,6 +31,7 @@ macro (ot_check_test TESTNAME) set (TEST_TARGET t_${TESTNAME}) add_executable (${TEST_TARGET} EXCLUDE_FROM_ALL ${TEST_TARGET}.cxx) add_dependencies(tests ${TEST_TARGET}) + target_include_directories(${TEST_TARGET} PRIVATE ${INTERNAL_INCLUDE_DIRS} BEFORE) target_link_libraries (${TEST_TARGET} PRIVATE OT) if (MINGW AND CMAKE_SIZEOF_VOID_P EQUAL 4) target_link_libraries (${TEST_TARGET} PRIVATE CRT_fp8) @@ -59,8 +60,6 @@ macro (ot_check_test TESTNAME) endmacro () -include_directories (BEFORE ${INTERNAL_INCLUDE_DIRS}) - # Common ot_check_test (Object_std) ot_check_test (OStream_std) diff --git a/lib/test/t_AtomicInt_std.cxx b/lib/test/t_AtomicInt_std.cxx index 44bf5152e2..2da4bf9c6e 100644 --- a/lib/test/t_AtomicInt_std.cxx +++ b/lib/test/t_AtomicInt_std.cxx @@ -18,10 +18,6 @@ * along with this library. If not, see . * */ -#ifndef _WIN32 -#include // sysconf -#endif -#include // getenv #include #include "openturns/OT.hxx" #include "openturns/OTtestcode.hxx" @@ -55,17 +51,7 @@ int main(int, char *[]) { AtomicInt atom; -#ifndef _WIN32 - unsigned int nbThreads = sysconf(_SC_NPROCESSORS_CONF); -#else - std::istringstream converter(getenv("NUMBER_OF_PROCESSORS")); - unsigned int nbThreads; - if (!(converter >> nbThreads)) - { - throw TestFailed("OT::AtomicInt wrong nb of thread!"); - } -#endif - ++ nbThreads; + unsigned int nbThreads = std::thread::hardware_concurrency(); std::thread * threads = new std::thread[nbThreads]; for (unsigned int i = 0; i < nbThreads; ++ i) diff --git a/lib/test/t_Binomial_std.expout b/lib/test/t_Binomial_std.expout index c47ed1d549..6c4e650429 100644 --- a/lib/test/t_Binomial_std.expout +++ b/lib/test/t_Binomial_std.expout @@ -24,8 +24,8 @@ ccdf=0.996347 survival=0.996347 quantile=class=Point name=Unnamed dimension=1 values=[13] cdf(quantile)=0.964732 -quantile (tail)=class=Point name=Unnamed dimension=1 values=[7] -cdf (tail)=0.949987 +quantile (tail)=class=Point name=Unnamed dimension=1 values=[6] +cdf (tail)=0.984757 characteristic function=(0.0107296,-0.0674204) log characteristic function=(-2.6843,-13.9793) generating function=(0.00258535,-0.00490073) @@ -42,4 +42,4 @@ spearman=class=CovarianceMatrix dimension=1 implementation=class=MatrixImplement kendall=class=CovarianceMatrix dimension=1 implementation=class=MatrixImplementation name=Unnamed rows=1 columns=1 values=[1] parameters=[[n : 15, p : 0.7]] Standard representative=Binomial(n = 15, p = 0.7) -95% bilateral confidence interval = [7, 14] +95% bilateral confidence interval = [7, 13] diff --git a/lib/test/t_Bonmin_std.cxx b/lib/test/t_Bonmin_std.cxx index 02fdffeac7..5f818e28b8 100644 --- a/lib/test/t_Bonmin_std.cxx +++ b/lib/test/t_Bonmin_std.cxx @@ -43,44 +43,27 @@ int main() // Definition of objective function const Description inputVariables = {"x0", "x1", "x2", "x3"}; - Description objective(1, "-x0 -x1 -x2"); + const Description objective(1, "-x0 -x1 -x2"); SymbolicFunction objectiveFunction(inputVariables, objective); // Definition of bounds - Point variablesLowerBounds(4); - Point variablesUpperBounds(4); + const Point variablesLowerBounds(4, 0.0); + const Point variablesUpperBounds = {1.0, DBL_MAX, DBL_MAX, 5.0}; Collection variablesFiniteLowerBounds(4, 1); - Collection variablesFiniteUpperBounds(4, 1); - variablesLowerBounds[0] = 0; - variablesUpperBounds[0] = 1; - - variablesLowerBounds[1] = 0; - variablesUpperBounds[1] = DBL_MAX; - variablesFiniteUpperBounds[1] = 0; - - variablesLowerBounds[2] = 0; - variablesUpperBounds[2] = DBL_MAX; - variablesFiniteUpperBounds[2] = 0; - - variablesLowerBounds[3] = 0; - variablesUpperBounds[3] = 5; - Interval variablesBounds(variablesLowerBounds, variablesUpperBounds, variablesFiniteLowerBounds, variablesFiniteUpperBounds) ; + Collection variablesFiniteUpperBounds = {1, 0, 0, 1}; + Interval variablesBounds(variablesLowerBounds, variablesUpperBounds, variablesFiniteLowerBounds, variablesFiniteUpperBounds); // Definition of inequality constraints: // Bonmin constraints are defined as g_l <= g(x) <= g_u // OpenTURNS' are defined as g(x) >= 0 - Description inequalityFormulas(3); - inequalityFormulas[0] = "-(x1 - 1/2)^2 - (x2 - 1/2)^2 + 1/4"; - inequalityFormulas[1] = "-x0 + x1"; - inequalityFormulas[2] = "-x0 - x2 - x3 + 2"; + const Description inequalityFormulas = {"-(x1 - 1/2)^2 - (x2 - 1/2)^2 + 1/4", "-x0 + x1", "-x0 - x2 - x3 + 2"}; SymbolicFunction inequalityConstraints(inputVariables, inequalityFormulas); // Definition of variables types - Indices varTypes(4); - varTypes[0] = OT::OptimizationProblemImplementation::BINARY; - varTypes[1] = OT::OptimizationProblemImplementation::CONTINUOUS; - varTypes[2] = OT::OptimizationProblemImplementation::CONTINUOUS; - varTypes[3] = OT::OptimizationProblemImplementation::INTEGER; + const Indices varTypes = {OT::OptimizationProblemImplementation::BINARY, + OT::OptimizationProblemImplementation::CONTINUOUS, + OT::OptimizationProblemImplementation::CONTINUOUS, + OT::OptimizationProblemImplementation::INTEGER}; // Definition of OptimizationProblem OptimizationProblem problem(objectiveFunction); diff --git a/lib/test/t_GaussianNonLinearCalibration_noobs.cxx b/lib/test/t_GaussianNonLinearCalibration_noobs.cxx index 290fde7a45..b482d65a49 100644 --- a/lib/test/t_GaussianNonLinearCalibration_noobs.cxx +++ b/lib/test/t_GaussianNonLinearCalibration_noobs.cxx @@ -91,7 +91,8 @@ int main(int, char *[]) assert_almost_equal(parameterMAP, trueParameter, 10e-1); // With TNC fullprint << "2. TNC optim" << std::endl; - algo.setOptimizationAlgorithm(MultiStart(TNC(), LowDiscrepancyExperiment(SobolSequence(), Normal(candidate, CovarianceMatrix(candidate.getDimension())), ResourceMap::GetAsUnsignedInteger("GaussianNonLinearCalibration-MultiStartSize")).generate())); + const UnsignedInteger multiStartSize = 10; + algo.setOptimizationAlgorithm(MultiStart(TNC(), LowDiscrepancyExperiment(SobolSequence(), Normal(candidate, CovarianceMatrix(candidate.getDimension())), multiStartSize).generate())); algo.run(); parameterMAP = algo.getResult().getParameterMAP(); fullprint << "MAP =" << parameterMAP << std::endl; diff --git a/lib/test/t_GaussianNonLinearCalibration_std.cxx b/lib/test/t_GaussianNonLinearCalibration_std.cxx index 489af2095e..b1823ccccb 100644 --- a/lib/test/t_GaussianNonLinearCalibration_std.cxx +++ b/lib/test/t_GaussianNonLinearCalibration_std.cxx @@ -82,7 +82,8 @@ int main(int, char *[]) assert_almost_equal(parameterMAP, trueParameter, 5e-1); // With TNC fullprint << "2. TNC optim" << std::endl; - algo.setOptimizationAlgorithm(MultiStart(TNC(), LowDiscrepancyExperiment(SobolSequence(), Normal(candidate, CovarianceMatrix(candidate.getDimension())), ResourceMap::GetAsUnsignedInteger("GaussianNonLinearCalibration-MultiStartSize")).generate())); + const UnsignedInteger multiStartSize = 10; + algo.setOptimizationAlgorithm(MultiStart(TNC(), LowDiscrepancyExperiment(SobolSequence(), Normal(candidate, CovarianceMatrix(candidate.getDimension())), multiStartSize).generate())); algo.run(); parameterMAP = algo.getResult().getParameterMAP(); fullprint << "MAP =" << parameterMAP << std::endl; diff --git a/lib/test/t_HSICEstimatorTargetSensitivity_std.cxx b/lib/test/t_HSICEstimatorTargetSensitivity_std.cxx index 9c256389bd..55698c5938 100644 --- a/lib/test/t_HSICEstimatorTargetSensitivity_std.cxx +++ b/lib/test/t_HSICEstimatorTargetSensitivity_std.cxx @@ -129,11 +129,11 @@ int main(int, char *[]) assert_almost_equal(pvaluesAs, referencePValuesAs); /* We set the number of permutations for the pvalue estimate. */ - UnsignedInteger b = 1000 ; + UnsignedInteger b = 100; TSA.setPermutationSize(b); /* We get the pvalue estimate by permutations */ - Point referencePValuesPerm = {0, 0.233766, 0.265734}; + Point referencePValuesPerm = {0, 0.257426, 0.217822}; Point pvaluesPerm = TSA.getPValuesPermutation(); assert_almost_equal(pvaluesPerm, referencePValuesPerm); @@ -143,7 +143,7 @@ int main(int, char *[]) TSA.setFilterFunction(alternateFilter); assert_almost_equal(TSA.getR2HSICIndices(), {0.373511, 0.0130156, 0.0153977}); assert_almost_equal(TSA.getHSICIndices(), {0.00118685, 4.12193e-05, 5.07577e-05}, 1e-4, 0.0); - assert_almost_equal(TSA.getPValuesPermutation(), {0, 0.137862, 0.112887}); + assert_almost_equal(TSA.getPValuesPermutation(), {0.0, 0.118812, 0.158416}); assert_almost_equal(TSA.getPValuesAsymptotic(), {7.32022e-13, 0.143851, 0.128866}); } catch (TestFailed & ex) diff --git a/lib/test/t_HyperbolicAnisotropicEnumerateFunction_std.cxx b/lib/test/t_HyperbolicAnisotropicEnumerateFunction_std.cxx index 47cc46a765..79f5c5f339 100644 --- a/lib/test/t_HyperbolicAnisotropicEnumerateFunction_std.cxx +++ b/lib/test/t_HyperbolicAnisotropicEnumerateFunction_std.cxx @@ -83,6 +83,17 @@ int main(int, char *[]) } } + // Test getMarginal() + fullprint << "Test getMarginal()" << std::endl; + HyperbolicAnisotropicEnumerateFunction enumerateFunction(10, 0.5); + Indices indices({0, 2, 4, 6, 9}); + EnumerateFunction marginalEnumerate(enumerateFunction.getMarginal(indices)); + assert_equal(marginalEnumerate.getDimension(), indices.getSize()); + for (UnsignedInteger index = 0; index < size; ++index) + { + Indices multiIndex(marginalEnumerate(index)); + fullprint << "index=" << index << ", multi-index=" << multiIndex << std::endl; + } } diff --git a/lib/test/t_HyperbolicAnisotropicEnumerateFunction_std.expout b/lib/test/t_HyperbolicAnisotropicEnumerateFunction_std.expout index 324943d800..120c92948e 100644 --- a/lib/test/t_HyperbolicAnisotropicEnumerateFunction_std.expout +++ b/lib/test/t_HyperbolicAnisotropicEnumerateFunction_std.expout @@ -291,3 +291,29 @@ index=23 [0,0,6,0] index=24 [0,0,0,6] And first 5 strata cardinals :[1,4,4,4,4] +Test getMarginal() +index=0, multi-index=[0,0,0,0,0] +index=1, multi-index=[1,0,0,0,0] +index=2, multi-index=[0,1,0,0,0] +index=3, multi-index=[0,0,1,0,0] +index=4, multi-index=[0,0,0,1,0] +index=5, multi-index=[0,0,0,0,1] +index=6, multi-index=[2,0,0,0,0] +index=7, multi-index=[0,2,0,0,0] +index=8, multi-index=[0,0,2,0,0] +index=9, multi-index=[0,0,0,2,0] +index=10, multi-index=[0,0,0,0,2] +index=11, multi-index=[3,0,0,0,0] +index=12, multi-index=[0,3,0,0,0] +index=13, multi-index=[0,0,3,0,0] +index=14, multi-index=[0,0,0,3,0] +index=15, multi-index=[0,0,0,0,3] +index=16, multi-index=[1,1,0,0,0] +index=17, multi-index=[1,0,1,0,0] +index=18, multi-index=[1,0,0,1,0] +index=19, multi-index=[1,0,0,0,1] +index=20, multi-index=[0,1,1,0,0] +index=21, multi-index=[0,1,0,1,0] +index=22, multi-index=[0,1,0,0,1] +index=23, multi-index=[0,0,1,1,0] +index=24, multi-index=[0,0,1,0,1] diff --git a/lib/test/t_InverseGamma_std.cxx b/lib/test/t_InverseGamma_std.cxx index 8515791d9a..8671bbd4a4 100644 --- a/lib/test/t_InverseGamma_std.cxx +++ b/lib/test/t_InverseGamma_std.cxx @@ -27,7 +27,7 @@ using namespace OT::Test; class TestObject : public InverseGamma { public: - TestObject() : InverseGamma(2.5, 1.5) {} + TestObject() : InverseGamma(1.5, 2.5) {} virtual ~TestObject() {} }; @@ -45,8 +45,8 @@ int main(int, char *[]) // Instantiate one distribution object Collection allDistributions(0); - allDistributions.add(InverseGamma(2.5, 5.5)); - allDistributions.add(InverseGamma(2.5, 15.0)); + allDistributions.add(InverseGamma(5.5, 2.5)); + allDistributions.add(InverseGamma(15.0, 2.5)); for (UnsignedInteger n = 0; n < allDistributions.getSize(); ++n) { InverseGamma distribution(allDistributions[n]); @@ -105,18 +105,18 @@ int main(int, char *[]) Point PDFgr = distribution.computePDFGradient( point ); fullprint << "pdf gradient =" << PDFgr << std::endl; Point PDFgrFD(2); - PDFgrFD[0] = (InverseGamma(distribution.getLambda() + eps, distribution.getK()).computePDF(point) - - InverseGamma(distribution.getLambda() - eps, distribution.getK()).computePDF(point)) / (2.0 * eps); - PDFgrFD[1] = (InverseGamma(distribution.getLambda(), distribution.getK() + eps).computePDF(point) - - InverseGamma(distribution.getLambda(), distribution.getK() - eps).computePDF(point)) / (2.0 * eps); + PDFgrFD[0] = (InverseGamma(distribution.getK() + eps, distribution.getLambda()).computePDF(point) - + InverseGamma(distribution.getK() - eps, distribution.getLambda()).computePDF(point)) / (2.0 * eps); + PDFgrFD[1] = (InverseGamma(distribution.getK(), distribution.getLambda() + eps).computePDF(point) - + InverseGamma(distribution.getK(), distribution.getLambda() - eps).computePDF(point)) / (2.0 * eps); fullprint << "pdf gradient (FD)=" << PDFgrFD << std::endl; Point CDFgr = distribution.computeCDFGradient( point ); fullprint << "cdf gradient =" << CDFgr << std::endl; Point CDFgrFD(2); - CDFgrFD[0] = (InverseGamma(distribution.getLambda() + eps, distribution.getK()).computeCDF(point) - - InverseGamma(distribution.getLambda() - eps, distribution.getK()).computeCDF(point)) / (2.0 * eps); - CDFgrFD[1] = (InverseGamma(distribution.getLambda(), distribution.getK() + eps).computeCDF(point) - - InverseGamma(distribution.getLambda(), distribution.getK() - eps).computeCDF(point)) / (2.0 * eps); + CDFgrFD[0] = (InverseGamma(distribution.getK() + eps, distribution.getLambda()).computeCDF(point) - + InverseGamma(distribution.getK() - eps, distribution.getLambda()).computeCDF(point)) / (2.0 * eps); + CDFgrFD[1] = (InverseGamma(distribution.getK(), distribution.getLambda() + eps).computeCDF(point) - + InverseGamma(distribution.getK(), distribution.getLambda() - eps).computeCDF(point)) / (2.0 * eps); fullprint << "cdf gradient (FD)=" << CDFgrFD << std::endl; Point quantile = distribution.computeQuantile( 0.95 ); fullprint << "quantile=" << quantile << std::endl; diff --git a/lib/test/t_InverseGamma_std.expout b/lib/test/t_InverseGamma_std.expout index ec6d8ff206..ac43e6c37c 100644 --- a/lib/test/t_InverseGamma_std.expout +++ b/lib/test/t_InverseGamma_std.expout @@ -2,13 +2,13 @@ Testing class InverseGamma checkConstructorAndDestructor() checkCopyConstructor() streamObject(const T & anObject) -class=InverseGamma name=InverseGamma dimension=1 lambda=2.5 k=1.5 +class=InverseGamma name=InverseGamma dimension=1 k=1.5 lambda=2.5 streamObject(const T & anObject) -class=InverseGamma name=InverseGamma dimension=1 lambda=2.5 k=1.5 +class=InverseGamma name=InverseGamma dimension=1 k=1.5 lambda=2.5 areSameObjects(const T & firstObject, const T & secondObject) areDifferentObjects(const T & firstObject, const T & secondObject) -Distribution class=InverseGamma name=InverseGamma dimension=1 lambda=2.5 k=5.5 -Distribution InverseGamma(lambda = 2.5, k = 5.5) +Distribution class=InverseGamma name=InverseGamma dimension=1 k=5.5 lambda=2.5 +Distribution InverseGamma(k = 5.5, lambda = 2.5) Elliptical = false Continuous = true oneRealization=class=Point name=Unnamed dimension=1 values=[0.0599153] @@ -29,15 +29,15 @@ Inverse survival=class=Point name=Unnamed dimension=1 values=[0.0406605] Survival(inverse survival)=0.95 characteristic function=(0.999937,0.0129291) log characteristic function=(2.03228e-05,0.0129292) -pdf gradient =class=Point name=Unnamed dimension=2 values=[-2.40895,-1.31286] -pdf gradient (FD)=class=Point name=Unnamed dimension=2 values=[-2.40895,-1.31286] -cdf gradient =class=Point name=Unnamed dimension=2 values=[0.127416,0.082705] -cdf gradient (FD)=class=Point name=Unnamed dimension=2 values=[0.127416,0.082705] +pdf gradient =class=Point name=Unnamed dimension=2 values=[-1.31286,-2.40895] +pdf gradient (FD)=class=Point name=Unnamed dimension=2 values=[-1.31286,-2.40895] +cdf gradient =class=Point name=Unnamed dimension=2 values=[0.082705,0.127416] +cdf gradient (FD)=class=Point name=Unnamed dimension=2 values=[0.082705,0.127416] quantile=class=Point name=Unnamed dimension=1 values=[0.174871] cdf(quantile)=0.95 Minimum volume interval=class=Interval name=Unnamed dimension=1 lower bound=class=Point name=Unnamed dimension=1 values=[0.0280966] upper bound=class=Point name=Unnamed dimension=1 values=[0.177562] finite lower bound=[1] finite upper bound=[1] threshold=0.95 -Minimum volume level set=class=LevelSet name=Unnamed dimension=1 function=class=Function name=Unnamed implementation=class=FunctionImplementation name=Unnamed description=[X0,-logPDF] evaluationImplementation=MinimumVolumeLevelSetEvaluation(InverseGamma(lambda = 2.5, k = 5.5)) gradientImplementation=MinimumVolumeLevelSetGradient(InverseGamma(lambda = 2.5, k = 5.5)) hessianImplementation=class=CenteredFiniteDifferenceHessian name=Unnamed epsilon=class=Point name=Unnamed dimension=1 values=[0.0001] evaluation=MinimumVolumeLevelSetEvaluation(InverseGamma(lambda = 2.5, k = 5.5)) level=34.4467 +Minimum volume level set=class=LevelSet name=Unnamed dimension=1 function=class=Function name=Unnamed implementation=class=FunctionImplementation name=Unnamed description=[X0,-logPDF] evaluationImplementation=MinimumVolumeLevelSetEvaluation(InverseGamma(k = 5.5, lambda = 2.5)) gradientImplementation=MinimumVolumeLevelSetGradient(InverseGamma(k = 5.5, lambda = 2.5)) hessianImplementation=class=CenteredFiniteDifferenceHessian name=Unnamed epsilon=class=Point name=Unnamed dimension=1 values=[0.0001] evaluation=MinimumVolumeLevelSetEvaluation(InverseGamma(k = 5.5, lambda = 2.5)) level=34.4467 beta=1.09642e-15 Bilateral confidence interval=class=Interval name=Unnamed dimension=1 lower bound=class=Point name=Unnamed dimension=1 values=[0.0364963] upper bound=class=Point name=Unnamed dimension=1 values=[0.209657] finite lower bound=[1] finite upper bound=[1] beta=0.95 @@ -52,13 +52,13 @@ covariance=class=CovarianceMatrix dimension=1 implementation=class=MatrixImpleme correlation=class=CovarianceMatrix dimension=1 implementation=class=MatrixImplementation name=Unnamed rows=1 columns=1 values=[1] spearman=class=CovarianceMatrix dimension=1 implementation=class=MatrixImplementation name=Unnamed rows=1 columns=1 values=[1] kendall=class=CovarianceMatrix dimension=1 implementation=class=MatrixImplementation name=Unnamed rows=1 columns=1 values=[1] -parameters=[[lambda : 2.5, k : 5.5]] -Standard representative=InverseGamma(lambda = 5.5, k = 1) +parameters=[[k : 5.5, lambda : 2.5]] +Standard representative=InverseGamma(k = 5.5, lambda = 1) standard deviation=class=Point name=Unnamed dimension=1 values=[0.0475131] skewness=class=Point name=Unnamed dimension=1 values=[2.99333] kurtosis=class=Point name=Unnamed dimension=1 values=[29.4] -Distribution class=InverseGamma name=InverseGamma dimension=1 lambda=2.5 k=15 -Distribution InverseGamma(lambda = 2.5, k = 15) +Distribution class=InverseGamma name=InverseGamma dimension=1 k=15 lambda=2.5 +Distribution InverseGamma(k = 15, lambda = 2.5) Elliptical = false Continuous = true oneRealization=class=Point name=Unnamed dimension=1 values=[0.0384087] @@ -79,15 +79,15 @@ Inverse survival=class=Point name=Unnamed dimension=1 values=[0.0182761] Survival(inverse survival)=0.95 characteristic function=(0.999999,0.00152381) log characteristic function=(-8.93075e-08,0.00152381) -pdf gradient =class=Point name=Unnamed dimension=2 values=[-4.76897,-1.04829] -pdf gradient (FD)=class=Point name=Unnamed dimension=2 values=[-4.76897,-1.04829] -cdf gradient =class=Point name=Unnamed dimension=2 values=[0.0339127,0.00792151] -cdf gradient (FD)=class=Point name=Unnamed dimension=2 values=[0.0339127,0.00792151] +pdf gradient =class=Point name=Unnamed dimension=2 values=[-1.04829,-4.76897] +pdf gradient (FD)=class=Point name=Unnamed dimension=2 values=[-1.04829,-4.76897] +cdf gradient =class=Point name=Unnamed dimension=2 values=[0.00792151,0.0339127] +cdf gradient (FD)=class=Point name=Unnamed dimension=2 values=[0.00792151,0.0339127] quantile=class=Point name=Unnamed dimension=1 values=[0.0432604] cdf(quantile)=0.95 Minimum volume interval=class=Interval name=Unnamed dimension=1 lower bound=class=Point name=Unnamed dimension=1 values=[0.0154445] upper bound=class=Point name=Unnamed dimension=1 values=[0.0443646] finite lower bound=[1] finite upper bound=[1] threshold=0.95 -Minimum volume level set=class=LevelSet name=Unnamed dimension=1 function=class=Function name=Unnamed implementation=class=FunctionImplementation name=Unnamed description=[X0,-logPDF] evaluationImplementation=MinimumVolumeLevelSetEvaluation(InverseGamma(lambda = 2.5, k = 15)) gradientImplementation=MinimumVolumeLevelSetGradient(InverseGamma(lambda = 2.5, k = 15)) hessianImplementation=class=CenteredFiniteDifferenceHessian name=Unnamed epsilon=class=Point name=Unnamed dimension=1 values=[0.0001] evaluation=MinimumVolumeLevelSetEvaluation(InverseGamma(lambda = 2.5, k = 15)) level=-1.89324 +Minimum volume level set=class=LevelSet name=Unnamed dimension=1 function=class=Function name=Unnamed implementation=class=FunctionImplementation name=Unnamed description=[X0,-logPDF] evaluationImplementation=MinimumVolumeLevelSetEvaluation(InverseGamma(k = 15, lambda = 2.5)) gradientImplementation=MinimumVolumeLevelSetGradient(InverseGamma(k = 15, lambda = 2.5)) hessianImplementation=class=CenteredFiniteDifferenceHessian name=Unnamed epsilon=class=Point name=Unnamed dimension=1 values=[0.0001] evaluation=MinimumVolumeLevelSetEvaluation(InverseGamma(k = 15, lambda = 2.5)) level=-1.89324 beta=6.64087 Bilateral confidence interval=class=Interval name=Unnamed dimension=1 lower bound=class=Point name=Unnamed dimension=1 values=[0.0170288] upper bound=class=Point name=Unnamed dimension=1 values=[0.0476452] finite lower bound=[1] finite upper bound=[1] beta=0.95 @@ -102,8 +102,8 @@ covariance=class=CovarianceMatrix dimension=1 implementation=class=MatrixImpleme correlation=class=CovarianceMatrix dimension=1 implementation=class=MatrixImplementation name=Unnamed rows=1 columns=1 values=[1] spearman=class=CovarianceMatrix dimension=1 implementation=class=MatrixImplementation name=Unnamed rows=1 columns=1 values=[1] kendall=class=CovarianceMatrix dimension=1 implementation=class=MatrixImplementation name=Unnamed rows=1 columns=1 values=[1] -parameters=[[lambda : 2.5, k : 15]] -Standard representative=InverseGamma(lambda = 15, k = 1) +parameters=[[k : 15, lambda : 2.5]] +Standard representative=InverseGamma(k = 15, lambda = 1) standard deviation=class=Point name=Unnamed dimension=1 values=[0.00792429] skewness=class=Point name=Unnamed dimension=1 values=[1.20185] kurtosis=class=Point name=Unnamed dimension=1 values=[5.90909] diff --git a/lib/test/t_JointDistribution_std.cxx b/lib/test/t_JointDistribution_std.cxx index 7c04146873..4657145e37 100644 --- a/lib/test/t_JointDistribution_std.cxx +++ b/lib/test/t_JointDistribution_std.cxx @@ -130,8 +130,9 @@ int main(int, char *[]) std::cout << "Distribution (Markdown)" << std::endl; std::cout << distribution.__repr_markdown__() << std::endl; fullprint << "Parameters " << distribution.getParametersCollection() << std::endl; - // Too slow if nCore == 2 - if (nCore != 2) + // Too slow for Mixture/KernelMixture + if (distribution.getCore().getImplementation()->getName() != "Mixture" && + distribution.getCore().getImplementation()->getName() != "KernelMixture") { fullprint << "entropy=" << distribution.computeEntropy() << std::endl; fullprint << "entropy (MC)=" << -distribution.computeLogPDF(distribution.getSample(1000000)).computeMean()[0] << std::endl; diff --git a/lib/test/t_JointDistribution_std.expout b/lib/test/t_JointDistribution_std.expout index 3bc5ed81a1..598cebc55a 100644 --- a/lib/test/t_JointDistribution_std.expout +++ b/lib/test/t_JointDistribution_std.expout @@ -278,16 +278,14 @@ JointDistribution | 2 | Three | Normal(mu = 1, sigma = 4) | Parameters [[mu_0_marginal_0 : 3, sigma_0_marginal_0 : 2],[mu_0_marginal_1 : 2, sigma_0_marginal_1 : 3],[mu_0_marginal_2 : 1, sigma_0_marginal_2 : 4],[x_0^0_core : 0, h_0_core : 1]] -entropy=5.19767 -entropy (MC)=3.88293 Mean class=Point name=Unnamed dimension=3 values=[2.68546,1.52819,0.370924] Elliptical distribution= false Elliptical copula= false Independent copula= false -oneRealization=class=Point name=Unnamed dimension=3 values=[2.01631,-0.27946,0.808824] -oneSample=class=Sample name=myDist implementation=class=SampleImplementation name=myDist size=10 dimension=3 description=[One,Two,Three] data=[[2.34836,1.24943,1.20811],[1.99841,0.907041,-0.0312187],[3.13525,0.250368,0.15086],[2.02497,1.91411,-1.65737],[2.68345,2.40798,-0.526989],[2.48291,0.689151,1.06407],[3.23655,2.44213,-0.410566],[1.40593,0.675174,1.41943],[2.13836,3.92642,1.41709],[2.46907,1.8888,1.66652]] -anotherSample mean=class=Point name=Unnamed dimension=3 values=[2.68537,1.53137,0.383789] -anotherSample covariance=class=CovarianceMatrix dimension=3 implementation=class=MatrixImplementation name=Unnamed rows=3 columns=3 values=[0.398306,-0.0120354,-0.0115013,-0.0120354,0.891089,0.009268,-0.0115013,0.009268,1.57741] +oneRealization=class=Point name=Unnamed dimension=3 values=[2.47375,1.28004,2.70986] +oneSample=class=Sample name=myDist implementation=class=SampleImplementation name=myDist size=10 dimension=3 description=[One,Two,Three] data=[[2.34744,1.18181,-1.48221],[2.7688,2.17159,-0.0466215],[3.29622,2.10719,1.0288],[2.82913,2.02964,1.57338],[3.25336,2.96383,-1.2737],[3.57357,1.81325,0.3924],[1.62737,1.9019,2.1167],[2.59277,0.864636,0.325633],[2.15339,0.414897,-1.84509],[2.56808,-0.0368341,-0.20478]] +anotherSample mean=class=Point name=Unnamed dimension=3 values=[2.68358,1.52794,0.354861] +anotherSample covariance=class=CovarianceMatrix dimension=3 implementation=class=MatrixImplementation name=Unnamed rows=3 columns=3 values=[0.389789,0.00279988,-0.00535941,0.00279988,0.899959,0.00446549,-0.00535941,0.00446549,1.58906] Zero point= class=Point name=Unnamed dimension=3 values=[0,0,0] pdf=0 cdf=0 Quantile=class=Point name=Unnamed dimension=3 values=[4.03964,3.55946,3.07928] CDF(quantile)=0.95 @@ -295,32 +293,32 @@ margin=class=JointDistribution name=JointDistribution dimension=1 core=class=Ker margin PDF=0 margin CDF=0 margin quantile=class=Point name=Unnamed dimension=1 values=[3.77516] -margin realization=class=Point name=Unnamed dimension=1 values=[2.25633] +margin realization=class=Point name=Unnamed dimension=1 values=[2.29997] margin=class=JointDistribution name=JointDistribution dimension=1 core=class=KernelMixture name=KernelMixture kernel=class=Beta name=Beta dimension=1 alpha=2 beta=3 a=0.2 b=0.8 bandwidth=class=Point name=Unnamed dimension=1 values=[1] sample=class=Sample name=Unnamed implementation=class=SampleImplementation name=Unnamed size=1 dimension=1 data=[[0]] marginal[0]=class=Normal name=Second dimension=1 mean=class=Point name=Unnamed dimension=1 values=[2] sigma=class=Point name=Unnamed dimension=1 values=[3] correlationMatrix=class=CorrelationMatrix dimension=1 implementation=class=MatrixImplementation name=Unnamed rows=1 columns=1 values=[1] margin PDF=0.155143 margin CDF=0.0407431 margin quantile=class=Point name=Unnamed dimension=1 values=[3.16275] -margin realization=class=Point name=Unnamed dimension=1 values=[2.74422] +margin realization=class=Point name=Unnamed dimension=1 values=[1.07153] margin=class=JointDistribution name=JointDistribution dimension=1 core=class=KernelMixture name=KernelMixture kernel=class=Beta name=Beta dimension=1 alpha=2 beta=3 a=0.2 b=0.8 bandwidth=class=Point name=Unnamed dimension=1 values=[1] sample=class=Sample name=Unnamed implementation=class=SampleImplementation name=Unnamed size=1 dimension=1 data=[[0]] marginal[0]=class=Normal name=Third dimension=1 mean=class=Point name=Unnamed dimension=1 values=[1] sigma=class=Point name=Unnamed dimension=1 values=[4] correlationMatrix=class=CorrelationMatrix dimension=1 implementation=class=MatrixImplementation name=Unnamed rows=1 columns=1 values=[1] margin PDF=0.286412 margin CDF=0.41124 margin quantile=class=Point name=Unnamed dimension=1 values=[2.55033] -margin realization=class=Point name=Unnamed dimension=1 values=[2.54773] +margin realization=class=Point name=Unnamed dimension=1 values=[-1.12608] indices=[1,0] margins=class=JointDistribution name=JointDistribution dimension=2 core=class=KernelMixture name=KernelMixture kernel=class=Beta name=Beta dimension=1 alpha=2 beta=3 a=0.2 b=0.8 bandwidth=class=Point name=Unnamed dimension=2 values=[1,1] sample=class=Sample name=Unnamed implementation=class=SampleImplementation name=Unnamed size=1 dimension=2 data=[[0,0]] marginal[0]=class=Normal name=Second dimension=1 mean=class=Point name=Unnamed dimension=1 values=[2] sigma=class=Point name=Unnamed dimension=1 values=[3] correlationMatrix=class=CorrelationMatrix dimension=1 implementation=class=MatrixImplementation name=Unnamed rows=1 columns=1 values=[1] marginal[1]=class=Normal name=First dimension=1 mean=class=Point name=Unnamed dimension=1 values=[3] sigma=class=Point name=Unnamed dimension=1 values=[2] correlationMatrix=class=CorrelationMatrix dimension=1 implementation=class=MatrixImplementation name=Unnamed rows=1 columns=1 values=[1] margins PDF=0 margins CDF=0 margins quantile=class=Point name=Unnamed dimension=2 values=[2.05979,3.03986] margins CDF(quantile)=0.5 -margins realization=class=Point name=Unnamed dimension=2 values=[1.09898,1.8244] +margins realization=class=Point name=Unnamed dimension=2 values=[2.32558,2.52062] conditional PDF=0 conditional CDF=0 conditional quantile=4.36648 sequential conditional PDF=class=Point name=Unnamed dimension=3 values=[0.122043,0.243098,0.0176626] sequential conditional CDF(class=Point name=Unnamed dimension=3 values=[1.5,2.5,3.5])=class=Point name=Unnamed dimension=3 values=[0.0111293,0.832468,0.995118] sequential conditional quantile(class=Point name=Unnamed dimension=3 values=[0.0111293,0.832468,0.995118])=class=Point name=Unnamed dimension=3 values=[1.5,2.5,3.5] -anotherSample mean=class=Point name=Unnamed dimension=3 values=[2.70038,1.52644,0.367288] -anotherSample covariance=class=CovarianceMatrix dimension=3 implementation=class=MatrixImplementation name=Unnamed rows=3 columns=3 values=[0.406464,0.00197769,0.0034419,0.00197769,0.893273,7.39114e-05,0.0034419,7.39114e-05,1.56214] +anotherSample mean=class=Point name=Unnamed dimension=3 values=[2.68499,1.51644,0.363684] +anotherSample covariance=class=CovarianceMatrix dimension=3 implementation=class=MatrixImplementation name=Unnamed rows=3 columns=3 values=[0.399154,0.00656646,0.00735094,0.00656646,0.892133,-0.0207362,0.00735094,-0.0207362,1.6153] Distribution class=JointDistribution name=myDist2 dimension=3 copula=class=IndependentCopula name=IndependentCopula dimension=3 marginal[0]=class=Normal name=First dimension=1 mean=class=Point name=Unnamed dimension=1 values=[0] sigma=class=Point name=Unnamed dimension=1 values=[1] correlationMatrix=class=CorrelationMatrix dimension=1 implementation=class=MatrixImplementation name=Unnamed rows=1 columns=1 values=[1] marginal[1]=class=Uniform name=Second dimension=1 a=12345.6 b=123457 marginal[2]=class=TruncatedDistribution name=Third distribution=class=Normal name=Normal dimension=1 mean=class=Point name=Unnamed dimension=1 values=[2] sigma=class=Point name=Unnamed dimension=1 values=[1.5] correlationMatrix=class=CorrelationMatrix dimension=1 implementation=class=MatrixImplementation name=Unnamed rows=1 columns=1 values=[1] bounds=class=Interval name=Unnamed dimension=1 lower bound=class=Point name=Unnamed dimension=1 values=[1] upper bound=class=Point name=Unnamed dimension=1 values=[4] finite lower bound=[1] finite upper bound=[1] thresholdRealization=0.5 Distribution JointDistribution(Normal(mu = 0, sigma = 1), Uniform(a = 12345.6, b = 123457), TruncatedDistribution(Normal(mu = 2, sigma = 1.5), bounds = [1, 4]), IndependentCopula(dimension = 3)) diff --git a/lib/test/t_LinearEnumerateFunction_std.cxx b/lib/test/t_LinearEnumerateFunction_std.cxx index 6d3a80b3fa..dc999d8626 100644 --- a/lib/test/t_LinearEnumerateFunction_std.cxx +++ b/lib/test/t_LinearEnumerateFunction_std.cxx @@ -42,6 +42,18 @@ int main(int, char *[]) fullprint << "index=" << index << ", multi-index=" << multiIndex << ", linear index=" << f.inverse(multiIndex) << std::endl; } } + // Test getMarginal() + fullprint << "Test getMarginal()" << std::endl; + LinearEnumerateFunction enumerateFunction(10); + Indices indices({0, 2, 4, 6, 9}); + EnumerateFunction marginalEnumerate(enumerateFunction.getMarginal(indices)); + assert_equal(marginalEnumerate.getDimension(), indices.getSize()); + for (UnsignedInteger index = 0; index < size; ++index) + { + Indices multiIndex(marginalEnumerate(index)); + fullprint << "index=" << index << ", multi-index=" << multiIndex << std::endl; + } + } catch (TestFailed & ex) { diff --git a/lib/test/t_LinearEnumerateFunction_std.expout b/lib/test/t_LinearEnumerateFunction_std.expout index 943b932e90..ca2565f04b 100644 --- a/lib/test/t_LinearEnumerateFunction_std.expout +++ b/lib/test/t_LinearEnumerateFunction_std.expout @@ -31,3 +31,14 @@ index=6, multi-index=[1,0,1], linear index=6 index=7, multi-index=[0,2,0], linear index=7 index=8, multi-index=[0,1,1], linear index=8 index=9, multi-index=[0,0,2], linear index=9 +Test getMarginal() +index=0, multi-index=[0,0,0,0,0] +index=1, multi-index=[1,0,0,0,0] +index=2, multi-index=[0,1,0,0,0] +index=3, multi-index=[0,0,1,0,0] +index=4, multi-index=[0,0,0,1,0] +index=5, multi-index=[0,0,0,0,1] +index=6, multi-index=[2,0,0,0,0] +index=7, multi-index=[1,1,0,0,0] +index=8, multi-index=[1,0,1,0,0] +index=9, multi-index=[1,0,0,1,0] diff --git a/lib/test/t_NonLinearLeastSquaresCalibration_noobs.cxx b/lib/test/t_NonLinearLeastSquaresCalibration_noobs.cxx index 099c0bb447..c365edb80e 100644 --- a/lib/test/t_NonLinearLeastSquaresCalibration_noobs.cxx +++ b/lib/test/t_NonLinearLeastSquaresCalibration_noobs.cxx @@ -36,10 +36,7 @@ int main(int, char *[]) UnsignedInteger m = 100; Sample x(m, 0); - Description inVars(0); - inVars.add("a"); - inVars.add("b"); - inVars.add("c"); + const Description inVars = {"a", "b", "c"}; // Derived from y = a + b * x + c * x^2 at x=[-1.0, -0.6, -0.2, 0.2, 0.6, 1.0] Description formulas(1, "a + -1.0 * b + 1.0 * c"); formulas.add("a + -0.6 * b + 0.36 * c"); @@ -50,19 +47,14 @@ int main(int, char *[]) SymbolicFunction g(inVars, formulas); UnsignedInteger inputDimension = g.getInputDimension(); UnsignedInteger outputDimension = g.getOutputDimension(); - Point trueParameter(0); - trueParameter.add(2.8); - trueParameter.add(1.2); - trueParameter.add(0.5); + const Point trueParameter = {2.8, 1.2, 0.5}; Indices params(inputDimension); params.fill(); ParametricFunction model(g, params, trueParameter); Sample y = model(x); y += Normal(Point(outputDimension), Point(outputDimension, 0.05), IdentityMatrix(outputDimension)).getSample(y.getSize()); Point candidate(inputDimension, 1.0); - Indices bootstrapSizes(0); - bootstrapSizes.add(0); - bootstrapSizes.add(100); + const Indices bootstrapSizes = {0, 100}; for (UnsignedInteger n = 0; n < bootstrapSizes.getSize(); ++n) { fullprint << "Bootstrap size =" << bootstrapSizes[n] << std::endl; @@ -76,7 +68,8 @@ int main(int, char *[]) assert_almost_equal(parameterMAP, trueParameter, 1e-2); // Test with TNC fullprint << "2. TNC optim" << std::endl; - algo.setOptimizationAlgorithm(MultiStart(TNC(), LowDiscrepancyExperiment(SobolSequence(), Normal(candidate, CovarianceMatrix(candidate.getDimension())), ResourceMap::GetAsUnsignedInteger("NonLinearLeastSquaresCalibration-MultiStartSize")).generate())); + const UnsignedInteger multiStartSize = 10; + algo.setOptimizationAlgorithm(MultiStart(TNC(), LowDiscrepancyExperiment(SobolSequence(), Normal(candidate, CovarianceMatrix(candidate.getDimension())), multiStartSize).generate())); algo.run(); parameterMAP = algo.getResult().getParameterMAP(); fullprint << "MAP =" << parameterMAP << std::endl; diff --git a/lib/test/t_NonLinearLeastSquaresCalibration_std.cxx b/lib/test/t_NonLinearLeastSquaresCalibration_std.cxx index d277b74dd4..2abe000bfc 100644 --- a/lib/test/t_NonLinearLeastSquaresCalibration_std.cxx +++ b/lib/test/t_NonLinearLeastSquaresCalibration_std.cxx @@ -36,20 +36,12 @@ int main(int, char *[]) Sample x(m, 1); for (UnsignedInteger i = 0; i < m; ++i) x(i, 0) = (0.5 + i) / m; - Description inVars(0); - inVars.add("a"); - inVars.add("b"); - inVars.add("c"); - inVars.add("x"); + const Description inVars = {"a", "b", "c", "x"}; Description formulas(1, "a + b * exp(c * x)"); formulas.add("(a * x^2 + b) / (c + x^2)"); SymbolicFunction g(inVars, formulas); - Point trueParameter(0); - trueParameter.add(2.8); - trueParameter.add(1.2); - trueParameter.add(0.5); - Indices params(3); - params.fill(); + const Point trueParameter = {2.8, 1.2, 0.5}; + const Indices params = {0, 1, 2}; ParametricFunction model(g, params, trueParameter); Sample y = model(x); y += Normal(Point(2), Point(2, 0.05), IdentityMatrix(2)).getSample(y.getSize()); @@ -69,7 +61,8 @@ int main(int, char *[]) // To avoid discrepance between the platforms with or without CMinpack // With TNC fullprint << "2. TNC optim" << std::endl; - algo.setOptimizationAlgorithm(MultiStart(TNC(), LowDiscrepancyExperiment(SobolSequence(), Normal(candidate, CovarianceMatrix(candidate.getDimension())), ResourceMap::GetAsUnsignedInteger("NonLinearLeastSquaresCalibration-MultiStartSize")).generate())); + const UnsignedInteger multiStartSize = 10; + algo.setOptimizationAlgorithm(MultiStart(TNC(), LowDiscrepancyExperiment(SobolSequence(), Normal(candidate, CovarianceMatrix(candidate.getDimension())), multiStartSize).generate())); algo.run(); parameterMAP = algo.getResult().getParameterMAP(); fullprint << "MAP =" << parameterMAP << std::endl; diff --git a/lib/test/t_NormalGamma_std.cxx b/lib/test/t_NormalGamma_std.cxx index 97e48dbc6a..fdc8bb675c 100644 --- a/lib/test/t_NormalGamma_std.cxx +++ b/lib/test/t_NormalGamma_std.cxx @@ -130,7 +130,7 @@ int main(int, char *[]) fullprint << "Unilateral confidence interval (upper tail)=" << distribution.computeUnilateralConfidenceIntervalWithMarginalProbability(0.95, true, beta) << std::endl; fullprint << "beta=" << beta << std::endl; fullprint << "entropy=" << distribution.computeEntropy() << std::endl; - fullprint << "entropy (MC)=" << -distribution.computeLogPDF(distribution.getSample(1000000)).computeMean()[0] << std::endl; + //fullprint << "entropy (MC)=" << -distribution.computeLogPDF(distribution.getSample(1000000)).computeMean()[0] << std::endl; Point mean = distribution.getMean(); fullprint << "mean=" << mean << std::endl; Point standardDeviation = distribution.getStandardDeviation(); diff --git a/lib/test/t_NormalGamma_std.expout b/lib/test/t_NormalGamma_std.expout index 902a2024ba..641a161ffb 100644 --- a/lib/test/t_NormalGamma_std.expout +++ b/lib/test/t_NormalGamma_std.expout @@ -40,7 +40,6 @@ beta=0.997715 Unilateral confidence interval (upper tail)=class=Interval name=Unnamed dimension=2 lower bound=class=Point name=Unnamed dimension=2 values=[0.160511,-0.941338] upper bound=class=Point name=Unnamed dimension=2 values=[9.72932,370.978] finite lower bound=[1,1] finite upper bound=[1,1] beta=0.972528 entropy=1.7654 -entropy (MC)=1.76742 mean=class=Point name=Unnamed dimension=2 values=[1,0.75] standard deviation=class=Point name=Unnamed dimension=2 values=[0.5,1.03078] skewness=class=Point name=Unnamed dimension=2 values=[1.1547,0] diff --git a/lib/test/t_Normal_std.cxx b/lib/test/t_Normal_std.cxx index 5671e0cb74..f2e9583e8f 100644 --- a/lib/test/t_Normal_std.cxx +++ b/lib/test/t_Normal_std.cxx @@ -128,11 +128,11 @@ int main(int, char *[]) } Scalar Survival = distribution.computeSurvivalFunction( point ); fullprint << "survival=" << Survival << std::endl; - Point InverseSurvival = distribution.computeInverseSurvivalFunction(0.95); - fullprint << "Inverse survival=" << InverseSurvival << std::endl; if (dim <= 3) { - fullprint << "Survival(inverse survival)=" << distribution.computeSurvivalFunction(InverseSurvival) << std::endl; + const Point inverseSurvival = distribution.computeInverseSurvivalFunction(0.95); + fullprint << "Inverse survival=" << inverseSurvival << std::endl; + fullprint << "Survival(inverse survival)=" << distribution.computeSurvivalFunction(inverseSurvival) << std::endl; } Complex CF = distribution.computeCharacteristicFunction( point ); fullprint << "characteristic function=" << CF << std::endl; @@ -164,14 +164,14 @@ int main(int, char *[]) { Point CDFgr = distribution.computeCDFGradient( point ); fullprint << "cdf gradient =" << CDFgr << std::endl; + const Point quantile(distribution.computeQuantile(0.95)); + int oldPrecision = PlatformInfo::GetNumericalPrecision(); + PlatformInfo::SetNumericalPrecision( 4 ); + fullprint << "quantile=" << quantile << std::endl; + PlatformInfo::SetNumericalPrecision( oldPrecision ); + fullprint << "cdf(quantile)=" << distribution.computeCDF(quantile) << std::endl; } - Point quantile = distribution.computeQuantile( 0.95 ); - int oldPrecision = PlatformInfo::GetNumericalPrecision(); - PlatformInfo::SetNumericalPrecision( 4 ); - fullprint << "quantile=" << quantile << std::endl; - PlatformInfo::SetNumericalPrecision( oldPrecision ); - fullprint << "cdf(quantile)=" << distribution.computeCDF(quantile) << std::endl; - if (distribution.getDimension() <= 2) + if (dim <= 2) { // Confidence regions Scalar threshold; @@ -257,7 +257,7 @@ int main(int, char *[]) fullprint << "margins=" << margins << std::endl; fullprint << "margins PDF=" << margins.computePDF(Point(2, 0.5)) << std::endl; fullprint << "margins CDF=" << margins.computeCDF(Point(2, 0.5)) << std::endl; - quantile = margins.computeQuantile(0.95); + const Point quantile(margins.computeQuantile(0.95)); fullprint << "margins quantile=" << quantile << std::endl; fullprint << "margins CDF(quantile)=" << margins.computeCDF(quantile) << std::endl; fullprint << "margins realization=" << margins.getRealization() << std::endl; diff --git a/lib/test/t_Normal_std.expout b/lib/test/t_Normal_std.expout index d75cbf5a58..59e05a44cc 100644 --- a/lib/test/t_Normal_std.expout +++ b/lib/test/t_Normal_std.expout @@ -286,13 +286,10 @@ log pdf=-6.4181 pdf =0.0016318 cdf=0.22145 survival=0.067873 -Inverse survival=class=Point name=Unnamed dimension=4 values=[-2.1934,-4.38679,-6.58019,-8.77359] characteristic function=(0.0019305,0) log characteristic function=(-6.25,0) pdf gradient =class=Point name=Unnamed dimension=14 values=[0.000951889,-0.000135984,0.000135984,0,-0.00115586,-0.000849901,-0.000521272,-0.000407952,0.00179952,-0.00106747,0.0025429,0.000652724,-0.00130545,0.00195817] pdf gradient (FD)=class=Point name=Unnamed dimension=8 values=[0.000951889,-0.000135984,0.000135984,0,-0.00115586,-0.000849901,-0.000521272,-0.000407952] -quantile=class=Point name=Unnamed dimension=4 values=[2.193,4.387,6.58,8.774] -cdf(quantile)=0.95 entropy=8.2722 entropy (MC)=8.2708 mean=class=Point name=Unnamed dimension=4 values=[0,0,0,0] diff --git a/lib/test/t_Path_std.cxx b/lib/test/t_Path_std.cxx index cf5c52edf8..8fe241f919 100644 --- a/lib/test/t_Path_std.cxx +++ b/lib/test/t_Path_std.cxx @@ -36,23 +36,6 @@ int main(int, char *[]) Path::DirectoryList configDirectoryList(Path::GetConfigDirectoryList()); for (UnsignedInteger i = 0; i < configDirectoryList.size(); ++i) fullprint << "configDirectoryList[" << i << "]=" << configDirectoryList[i] << std::endl; - Path::DirectoryList list(2); - FileName directory1(Path::CreateTemporaryDirectory("testDirectory1")); - fullprint << "Directory 1=" << directory1 << std::endl; - list[0] = directory1; - FileName directory2(Path::CreateTemporaryDirectory("testDirectory2")); - fullprint << "Directory 2=" << directory2 << std::endl; - list[1] = directory2; - // Create a file in dir2 - std::ofstream testFile(FileName(directory2 + FileName("/testFile")).c_str()); - testFile << "test" << std::endl; - testFile.close(); - FileName findName(Path::FindFileByNameInDirectoryList("testFile", list)); - fullprint << "Find file=" << findName << std::endl; - FileName fileName(Path::BuildTemporaryFileName("testFile")); - fullprint << "Temporary file name=" << fileName << std::endl; - Os::DeleteDirectory(directory1); - Os::DeleteDirectory(directory2); } catch (FileOpenException & ex) { diff --git a/lib/test/t_ResourceMap_std.cxx b/lib/test/t_ResourceMap_std.cxx index 94f09c29f0..309de5e8cc 100644 --- a/lib/test/t_ResourceMap_std.cxx +++ b/lib/test/t_ResourceMap_std.cxx @@ -32,7 +32,7 @@ int main(int, char *[]) // Create a ResourceMap Object fullprint << ResourceMap::GetInstance() << std::endl; - fullprint << "Extract from ResourceMap: Path-TemporaryDirectory -> " << ResourceMap::GetAsString("Path-TemporaryDirectory") << std::endl; + fullprint << "Extract from ResourceMap: Cache-MaxSize -> " << ResourceMap::GetAsUnsignedInteger("Cache-MaxSize") << std::endl; // Create string key fullprint << "Create key: dummy_key" << std::endl; diff --git a/python/doc/_static/css/custom.css b/python/doc/_static/css/custom.css index a5cf9950a3..f12696e808 100644 --- a/python/doc/_static/css/custom.css +++ b/python/doc/_static/css/custom.css @@ -28,3 +28,9 @@ pre, div[class*="highlight-"] { /* Shrink overlarge warning box from PR #2169 */ div.admonition::after{content:none;} + +/* override basic.css + * xref https://github.com/sphinx-doc/sphinx/issues/10918 */ +table.align-default { + margin-left: 0px; +} diff --git a/python/doc/_templates/Copula.rst_t b/python/doc/_templates/Copula.rst_t index 2e2440d12b..4adc73e526 100644 --- a/python/doc/_templates/Copula.rst_t +++ b/python/doc/_templates/Copula.rst_t @@ -8,28 +8,30 @@ import openturns.experimental as otexp from matplotlib import pyplot as plt from openturns.viewer import View - if ot.{{ objname }}().__class__.__name__ == 'EmpiricalBernsteinCopula': + if "{{ objname }}" == "EmpiricalBernsteinCopula": sample = ot.Dirichlet([1.0, 2.0, 3.0]).getSample(100) copula = ot.EmpiricalBernsteinCopula(sample, 4) - elif ot.{{ objname }}().__class__.__name__ == 'ExtremeValueCopula': + elif "{{ objname }}" == "ExtremeValueCopula": copula = ot.ExtremeValueCopula(ot.SymbolicFunction("t", "t^3/2-t/2+1")) - elif ot.{{ objname }}().__class__.__name__ == 'MaximumEntropyOrderStatisticsCopula': + elif "{{ objname }}" == "MaximumEntropyOrderStatisticsCopula": marginals = [ot.Beta(1.5, 3.2, 0.0, 1.0), ot.Beta(2.0, 4.3, 0.5, 1.2)] copula = ot.MaximumEntropyOrderStatisticsCopula(marginals) - elif ot.{{ objname }}().__class__.__name__ == 'NormalCopula': + elif "{{ objname }}" == "NormalCopula": R = ot.CorrelationMatrix(2) R[1, 0] = 0.8 copula = ot.NormalCopula(R) - elif ot.{{ objname }}().__class__.__name__ == 'SklarCopula': + elif "{{ objname }}" == "SklarCopula": student = ot.Student(3.0, [1.0] * 2, [3.0] * 2, ot.CorrelationMatrix(2)) copula = ot.SklarCopula(student) - elif ot.{{ objname }}().__class__.__name__ == 'StudentCopula': - copula = otexp.StudentCopula(3.0, ot.CorrelationMatrix(2)) + elif "{{ objname }}" == "StudentCopula": + R = ot.CorrelationMatrix(2) + R[1, 0] = 0.3 + copula = otexp.StudentCopula(3.0, R) else: copula = ot.{{ objname }}() if copula.getDimension() == 1: copula = ot.{{ objname }}(2) - copula.setDescription(['$u_1$', '$u_2$']) + copula.setDescription(["$u_1$", "$u_2$"]) pdf_graph = copula.drawPDF() cdf_graph = copula.drawCDF() fig = plt.figure(figsize=(10, 4)) @@ -37,7 +39,7 @@ cdf_axis = fig.add_subplot(122) View(pdf_graph, figure=fig, axes=[pdf_axis], add_legend=False, square_axes=True) View(cdf_graph, figure=fig, axes=[cdf_axis], add_legend=False, square_axes=True) - title = str(copula)[:100].split('\n')[0] + title = str(copula)[:100].split("\n")[0] fig.suptitle(title) .. currentmodule:: {{ module }} diff --git a/python/doc/_templates/CovarianceModel.rst_t b/python/doc/_templates/CovarianceModel.rst_t index 46e8598754..ee0c39635b 100644 --- a/python/doc/_templates/CovarianceModel.rst_t +++ b/python/doc/_templates/CovarianceModel.rst_t @@ -7,23 +7,23 @@ import openturns as ot from matplotlib import pyplot as plt from openturns.viewer import View - if ot.{{ objname }}().__class__.__name__ == 'ExponentialModel': + if "{{ objname }}" == "ExponentialModel": covarianceModel = ot.ExponentialModel([0.5], [5.0]) - elif ot.{{ objname }}().__class__.__name__ == 'GeneralizedExponential': + elif "{{ objname }}" == "GeneralizedExponential": covarianceModel = ot.GeneralizedExponential([2.0], [3.0], 1.5) - elif ot.{{ objname }}().__class__.__name__ == 'ProductCovarianceModel': + elif "{{ objname }}" == "ProductCovarianceModel": amplitude = [1.0] scale1 = [4.0] scale2 = [4.0] cov1 = ot.ExponentialModel(scale1, amplitude) cov2 = ot.ExponentialModel(scale2, amplitude) covarianceModel = ot.ProductCovarianceModel([cov1, cov2]) - elif ot.{{ objname }}().__class__.__name__ == 'RankMCovarianceModel': + elif "{{ objname }}" == "RankMCovarianceModel": variance = [1.0, 2.0] basis = ot.LinearBasisFactory().build() covarianceModel = ot.RankMCovarianceModel(variance, basis) - elif ot.{{ objname }}().__class__.__name__ == 'StationaryFunctionalCovarianceModel': - rho = ot.SymbolicFunction(['tau'], ['exp(-tau)*cos(2*pi_*tau)']) + elif "{{ objname }}" == "StationaryFunctionalCovarianceModel": + rho = ot.SymbolicFunction(["tau"], ["exp(-tau)*cos(2*pi_*tau)"]) covarianceModel = ot.StationaryFunctionalCovarianceModel([1.0], [1.0], rho) else: covarianceModel = ot.{{ objname }}() @@ -34,7 +34,7 @@ def f(x): return [covarianceModel(x)[0, 0]] func = ot.PythonFunction(1, 1, f) - func.setDescription(['$tau$', '$cov$']) + func.setDescription(["$tau$", "$cov$"]) cov_graph = func.draw(-3.0 * scale, 3.0 * scale, 129) cov_graph.setTitle(title) fig = plt.figure(figsize=(10, 4)) @@ -44,7 +44,7 @@ def f(x): return [covarianceModel([x[0]], [x[1]])[0, 0]] func = ot.PythonFunction(2, 1, f) - func.setDescription(['$s$', '$t$', '$cov$']) + func.setDescription(["$s$", "$t$", "$cov$"]) cov_graph = func.draw([-3.0 * scale]*2, [3.0 * scale]*2, [129]*2) cov_graph.setTitle(title) fig = plt.figure(figsize=(10, 4)) @@ -56,7 +56,7 @@ def f(x): return [covarianceModel(x)[0, 0]] func = ot.PythonFunction(2, 1, f) - func.setDescription(['$s$', '$t$', '$cov$']) + func.setDescription(["$s$", "$t$", "$cov$"]) cov_graph = func.draw(-3.0 * scale, 3.0 * scale, [129]*2) cov_graph.setTitle(title) fig = plt.figure(figsize=(10, 4)) diff --git a/python/doc/_templates/Distribution.rst_t b/python/doc/_templates/Distribution.rst_t index 28398b7c64..06350b5278 100644 --- a/python/doc/_templates/Distribution.rst_t +++ b/python/doc/_templates/Distribution.rst_t @@ -8,41 +8,41 @@ from matplotlib import pyplot as plt from openturns.viewer import View title = None - if ot.{{ objname }}().__class__.__name__ == 'Bernoulli': + if "{{ objname }}" == "Bernoulli": distribution = ot.Bernoulli(0.7) - elif ot.{{ objname }}().__class__.__name__ == 'Binomial': + elif "{{ objname }}" == "Binomial": distribution = ot.Binomial(5, 0.2) - elif ot.{{ objname }}().__class__.__name__ == 'Hypergeometric': + elif "{{ objname }}" == "Hypergeometric": distribution = ot.Hypergeometric(10, 4, 7) - elif ot.{{ objname }}().__class__.__name__ == 'CumulativeDistributionNetwork': + elif "{{ objname }}" == "CumulativeDistributionNetwork": coll = [ot.Normal(2),ot.Dirichlet([0.5, 1.0, 1.5])] distribution = ot.CumulativeDistributionNetwork(coll, ot.BipartiteGraph([[0,1], [0,1]])) - elif ot.{{ objname }}().__class__.__name__ == 'Histogram': + elif "{{ objname }}" == "Histogram": distribution = ot.Histogram([-1.0, 0.5, 1.0, 2.0], [0.45, 0.4, 0.15]) - elif ot.{{ objname }}().__class__.__name__ == 'KernelMixture': + elif "{{ objname }}" == "KernelMixture": kernel = ot.Uniform() sample = ot.Normal().getSample(5) bandwidth = [1.0] distribution = ot.KernelMixture(kernel, bandwidth, sample) - elif ot.{{ objname }}().__class__.__name__ == 'MaximumDistribution': + elif "{{ objname }}" == "MaximumDistribution": coll = [ot.Uniform(2.5, 3.5), ot.LogUniform(1.0, 1.2), ot.Triangular(2.0, 3.0, 4.0)] distribution = ot.MaximumDistribution(coll) - elif ot.{{ objname }}().__class__.__name__ == 'Multinomial': + elif "{{ objname }}" == "Multinomial": distribution = ot.Multinomial(5, [0.2]) - elif ot.{{ objname }}().__class__.__name__ == 'RandomMixture': + elif "{{ objname }}" == "RandomMixture": coll = [ot.Triangular(0.0, 1.0, 5.0), ot.Uniform(-2.0, 2.0)] weights = [0.8, 0.2] cst = 3.0 distribution = ot.RandomMixture(coll, weights, cst) - elif ot.{{ objname }}().__class__.__name__ == 'SmoothedUniform': + elif "{{ objname }}" == "SmoothedUniform": distribution = ot.SmoothedUniform(-1.0, 10.0, 1.0) - elif ot.{{ objname }}().__class__.__name__ == 'TruncatedDistribution': + elif "{{ objname }}" == "TruncatedDistribution": distribution = ot.TruncatedDistribution(ot.Normal(2.0, 1.5), 1.0, 4.0) - elif ot.{{ objname }}().__class__.__name__ == 'UserDefined': + elif "{{ objname }}" == "UserDefined": distribution = ot.UserDefined([[1.0], [2.0], [3.0]], [0.4, 0.5, 1.0]) - elif ot.{{ objname }}().__class__.__name__ == 'ZipfMandelbrot': + elif "{{ objname }}" == "ZipfMandelbrot": distribution = ot.ZipfMandelbrot(10, 2.5, 0.3) - elif ot.{{ objname }}().__class__.__name__ == 'Normal': + elif "{{ objname }}" == "Normal": cov = ot.CovarianceMatrix([[1.0, -0.5], [-0.5, 1.0]]) distribution = ot.Normal([0.0, 0.0], cov) title = "Normal dist. with correlation coefficient {}".format(cov[0, 1]) @@ -51,9 +51,9 @@ dimension = distribution.getDimension() if title is None: - title = str(distribution)[:100].split('\n')[0] + title = str(distribution)[:100].split("\n")[0] if dimension == 1: - distribution.setDescription(['$x$']) + distribution.setDescription(["$x$"]) pdf_graph = distribution.drawPDF() cdf_graph = distribution.drawCDF() fig = plt.figure(figsize=(10, 4)) @@ -78,8 +78,8 @@ grid.setGraph(0, 1, cdf_graph) grid.setTitle(title) fig = View(grid).getFigure() - fig.axes[0].set_title('PDF') - fig.axes[1].set_title('CDF') + fig.axes[0].set_title("PDF") + fig.axes[1].set_title("CDF") .. currentmodule:: {{ module }} diff --git a/python/doc/_templates/DistributionFactory.rst_t b/python/doc/_templates/DistributionFactory.rst_t index 3e70ebe64e..d3738d7f2f 100644 --- a/python/doc/_templates/DistributionFactory.rst_t +++ b/python/doc/_templates/DistributionFactory.rst_t @@ -5,10 +5,14 @@ :include-source: False import openturns as ot + import openturns.experimental as otexp from matplotlib import pyplot as plt from openturns.viewer import View ot.RandomGenerator.SetSeed(0) - factory = ot.{{ objname }}() + if hasattr(ot, "{{ objname }}"): + factory = ot.{{ objname }}() + else: + factory = otexp.{{ objname }}() ref = factory.build() dimension = ref.getDimension() if dimension <= 2: diff --git a/python/doc/_templates/DistributionHighDimension.rst_t b/python/doc/_templates/DistributionHighDimension.rst_t index 227eb77eee..003768e511 100644 --- a/python/doc/_templates/DistributionHighDimension.rst_t +++ b/python/doc/_templates/DistributionHighDimension.rst_t @@ -7,9 +7,9 @@ import openturns as ot from matplotlib import pyplot as plt from openturns.viewer import View - if ot.{{ objname }}().__class__.__name__ == 'BlockIndependentCopula': + if "{{ objname }}" == "BlockIndependentCopula": distribution = ot.BlockIndependentCopula([ot.ClaytonCopula(2.0), ot.GumbelCopula(3.0)]) - elif ot.{{ objname }}().__class__.__name__ == 'JointDistribution': + elif "{{ objname }}" == "JointDistribution": R = ot.CorrelationMatrix(3) R[0,1]=0.5 R[0,2]=0.3 @@ -17,7 +17,7 @@ copula = ot.NormalCopula(R) marginals = [ot.Uniform(1.0, 2.0), ot.Normal(2.0, 3.0), ot.Gamma(5.5, 2.0)] distribution = ot.JointDistribution(marginals, copula) - elif ot.{{ objname }}().__class__.__name__ == 'BlockIndependentDistribution': + elif "{{ objname }}" == "BlockIndependentDistribution": R = ot.CorrelationMatrix(2) R[0,1]=0.5 atom1 = ot.JointDistribution([ot.Exponential(2.0), ot.WeibullMax(2.0, 2.0)], ot.NormalCopula(R)) @@ -63,7 +63,7 @@ if j == dimension-1: pdf_graph.setXTitle(r"$x_" + str(i) + r"$") View(pdf_graph, figure=fig, axes=[pdf_axis], add_legend=False, square_axes=distribution.isCopula()) - title = str(distribution)[:100].split('\n')[0] + title = str(distribution)[:100].split("\n")[0] fig.suptitle(title) .. currentmodule:: {{ module }} diff --git a/python/doc/_templates/OrthogonalUniVariateFunctionFamily.rst_t b/python/doc/_templates/OrthogonalUniVariateFunctionFamily.rst_t index 664c4a5095..f640c8b406 100644 --- a/python/doc/_templates/OrthogonalUniVariateFunctionFamily.rst_t +++ b/python/doc/_templates/OrthogonalUniVariateFunctionFamily.rst_t @@ -9,11 +9,11 @@ from matplotlib import pyplot as plt n_functions = 8 function_factory = ot.{{ objname }}() - if function_factory.getClassName() == 'KrawtchoukFactory': + if function_factory.getClassName() == "KrawtchoukFactory": function_factory = ot.{{ objname }}(n_functions, .5) functions = [function_factory.build(i) for i in range(n_functions)] measure = function_factory.getMeasure() - if hasattr(measure, 'getA') and hasattr(measure, 'getB'): + if hasattr(measure, "getA") and hasattr(measure, "getB"): x_min = measure.getA() x_max = measure.getB() else: @@ -26,9 +26,9 @@ for i in range(n_functions): plt.plot(meshed_support, [functions[i](x) for x in meshed_support], lw=1.5, - label='$\phi_{' + str(i) + '}(x)$') - plt.xlabel('$x$') - plt.ylabel('$\phi_i(x)$') + label=fr"$\phi_{{i}}(x)$") + plt.xlabel(r"$x$") + plt.ylabel(r"$\phi_i(x)$") plt.xlim(x_min, x_max) plt.grid() box = ax.get_position() diff --git a/python/doc/_templates/Process.rst_t b/python/doc/_templates/Process.rst_t index e7e2f27a1b..05e5fccafd 100644 --- a/python/doc/_templates/Process.rst_t +++ b/python/doc/_templates/Process.rst_t @@ -7,10 +7,10 @@ import openturns as ot from matplotlib import pyplot as plt from openturns.viewer import View - if ot.{{ objname }}().__class__.__name__ == 'Process': + if "{{ objname }}" == "Process": # default to Gaussian for the interface class process = ot.GaussianProcess() - elif ot.{{ objname }}().__class__.__name__ == 'DiscreteMarkovChain': + elif "{{ objname }}" == "DiscreteMarkovChain": process = ot.{{ objname }}() process.setTransitionMatrix(ot.SquareMatrix([[0.0,0.5,0.5],[0.7,0.0,0.3],[0.8,0.0,0.2]])) origin = 0 @@ -18,7 +18,7 @@ else: process = ot.{{ objname }}() process.setTimeGrid(ot.RegularGrid(0.0, 0.02, 50)) - process.setDescription(['$x$']) + process.setDescription(["$x$"]) sample = process.getSample(6) sample_graph = sample.drawMarginal(0) sample_graph.setTitle(str(process)) diff --git a/python/doc/bibliography.rst b/python/doc/bibliography.rst index f0449f254d..bf0710254e 100644 --- a/python/doc/bibliography.rst +++ b/python/doc/bibliography.rst @@ -72,6 +72,10 @@ Bibliography http://ceres-solver.org .. [chacon2018] Chacón, J. E., & Duong, T. (2018). *Multivariate kernel smoothing and its applications.* CRC Press. +.. [charpentier2015] Charpentier, A., & Flachaire, E. (2014). + *Log-Transform Kernel Density Estimation of Income Distribution* WP 2015-Nr 6, + AMSE Aix Marseille School of Economics. + `pdf `__ .. [chihara1978] Chihara, T. S. (1978). *An introduction to orthogonal polynomials.* Dover publications. .. [chapelle2002] Chapelle, O., Vapnik, V., & Bengio, Y. (2002). @@ -93,6 +97,11 @@ Bibliography .. [daveiga2022] Da Veiga, S., Gamboa, F., Iooss, B., and Prieur, C. (2021). *Basics and trends in sensitivity analysis: theory and practice in R.* Society for Industrial and Applied Mathematics. +.. [delmas2006] Delmas, J.F. and Jourdain, B. *Modèles aléatoires: Applications aux + sciences de l'ingénieur et du vivant* , Berlin, Heidelberg: Springer Berlin Heidelberg (2006). + *La maîtrise des incertitudes dans un contexte industriel. + 1re partie: une approche méthodologique globale basée sur des exemples.* + Journal de la Société française de statistique, 147 (3), 33-71. .. [deRocquigny2006] De Rocquigny, É. (2006). *La maîtrise des incertitudes dans un contexte industriel. 1re partie: une approche méthodologique globale basée sur des exemples.* @@ -144,7 +153,7 @@ Bibliography ACM Transactions on Mathematical Software 29(3):297-308, September 2003. `pdf `__ .. [ghanem1991] Ghanem R. and P. Spanos, 1991, - *Stochastic finite elements – A spectral approach*, + *Stochastic finite elements - A spectral approach*, Springer Verlag. (Reedited by Dover Publications, 2003). .. [gerstner1998] Gerstner, T., & Griebel, M. (1998). *Numerical integration using sparse grids.* Numerical algorithms, 18 (3), 209-232. @@ -155,9 +164,9 @@ Bibliography .. [hormann1993] Hormann W., *The generation of Binomial Random Variates* Journal of Statistical Computation and Simulation 46, pp. 101-110, 1993. `pdf `__ -.. [hahn2005] Thomas Hahn, *Cuba — a library for multidimensional numerical integration* +.. [hahn2005] Thomas Hahn, *Cuba - a library for multidimensional numerical integration* Computer Physics Communications, 168(2), 78-95. - `pdf ` + `pdf `__ .. [halko2010] Nathan Halko, Per-Gunnar Martinsson, Joel A. Tropp, *Finding structure with randomness: Probabilistic algorithms for constructing approximate matrix decompositions*, @@ -204,6 +213,10 @@ Bibliography *Global optimization of expensive black-box functions*, Journal of Global Optimization, 13(4), 455-492, 1998. `pdf `__ +.. [jones1993] M.C. Jones, + *Simple boundary correction for kernel density estimation*, + Statistics and Computing. Vol. 3, Issue 3, 1993, pp. 135-146, + https://doi.org/10.1007/BF00147776 .. [Keutelian1991] Hovhannes Keutelian. *The Kolmogorov-Smirnov test when parameters are estimated from data*, 30 April 1991, Fermilab. diff --git a/python/doc/conf.py.in b/python/doc/conf.py.in index ad02ee77fb..d15aae484f 100644 --- a/python/doc/conf.py.in +++ b/python/doc/conf.py.in @@ -14,6 +14,18 @@ import sys import os import subprocess from sphinx_gallery.sorting import ExplicitOrder +import sphinx_gallery + +try: + from packaging.version import Version +except ImportError: + from pkg_resources import parse_version as Version + +try: + import joblib + have_joblib = True +except ImportError: + have_joblib = False # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the @@ -69,7 +81,7 @@ extensions.append('matplotlib.sphinxext.plot_directive') plot_formats = ['png'] extensions.append('sphinx_copybutton') -copybutton_prompt_text = ">>> " +copybutton_exclude = '.linenos, .gp' extensions.append('sphinx_gallery.gen_gallery') sphinx_gallery_conf = { @@ -127,6 +139,9 @@ sphinx_gallery_conf = { 'min_reported_time': 2, } +if Version(sphinx_gallery.__version__) >= Version("0.17.0"): + sphinx_gallery_conf["parallel"] = have_joblib + # TODO: drop jquery in custom search.html extensions.append('sphinxcontrib.jquery') @@ -134,7 +149,7 @@ extensions.append('sphinxcontrib.jquery') templates_path = ['_templates'] # The suffix of source filenames. -source_suffix = '.rst' +source_suffix = {'.rst': 'restructuredtext'} # The encoding of source files. #source_encoding = 'utf-8-sig' diff --git a/python/doc/examples/data_analysis/distribution_fitting/plot_estimate_gev_fremantle.py b/python/doc/examples/data_analysis/distribution_fitting/plot_estimate_gev_fremantle.py index eb201947b3..e61976e726 100644 --- a/python/doc/examples/data_analysis/distribution_fitting/plot_estimate_gev_fremantle.py +++ b/python/doc/examples/data_analysis/distribution_fitting/plot_estimate_gev_fremantle.py @@ -287,12 +287,12 @@ # We get the asymptotic distribution of :math:`\vect{\beta}` to compute some confidence intervals of # the estimates, for example of order :math:`p = 0.95`. dist_beta = result_NonStatLL.getParameterDistribution() -condifence_level = 0.95 +confidence_level = 0.95 for i in range(beta.getSize()): - lower_bound = dist_beta.getMarginal(i).computeQuantile((1 - condifence_level) / 2)[ + lower_bound = dist_beta.getMarginal(i).computeQuantile((1 - confidence_level) / 2)[ 0 ] - upper_bound = dist_beta.getMarginal(i).computeQuantile((1 + condifence_level) / 2)[ + upper_bound = dist_beta.getMarginal(i).computeQuantile((1 + confidence_level) / 2)[ 0 ] print( diff --git a/python/doc/examples/data_analysis/distribution_fitting/plot_estimate_gev_pirie.py b/python/doc/examples/data_analysis/distribution_fitting/plot_estimate_gev_pirie.py index a4ccfb95cf..a12f818c10 100644 --- a/python/doc/examples/data_analysis/distribution_fitting/plot_estimate_gev_pirie.py +++ b/python/doc/examples/data_analysis/distribution_fitting/plot_estimate_gev_pirie.py @@ -252,12 +252,12 @@ # We get the asymptotic distribution of :math:`\vect{\beta}` to compute some confidence intervals of # the estimates, for example of order :math:`p = 0.95`. dist_beta = result_NonStatLL.getParameterDistribution() -condifence_level = 0.95 +confidence_level = 0.95 for i in range(beta.getSize()): - lower_bound = dist_beta.getMarginal(i).computeQuantile((1 - condifence_level) / 2)[ + lower_bound = dist_beta.getMarginal(i).computeQuantile((1 - confidence_level) / 2)[ 0 ] - upper_bound = dist_beta.getMarginal(i).computeQuantile((1 + condifence_level) / 2)[ + upper_bound = dist_beta.getMarginal(i).computeQuantile((1 + confidence_level) / 2)[ 0 ] print( diff --git a/python/doc/examples/data_analysis/distribution_fitting/plot_estimate_gev_racetime.py b/python/doc/examples/data_analysis/distribution_fitting/plot_estimate_gev_racetime.py index 18af19d8ce..046b2d7850 100644 --- a/python/doc/examples/data_analysis/distribution_fitting/plot_estimate_gev_racetime.py +++ b/python/doc/examples/data_analysis/distribution_fitting/plot_estimate_gev_racetime.py @@ -320,12 +320,12 @@ # the estimates, for example of order :math:`p = 0.95`. dist_beta = result_NonStatLL.getParameterDistribution() -condifence_level = 0.95 +confidence_level = 0.95 for i in range(beta.getSize()): - lower_bound = dist_beta.getMarginal(i).computeQuantile((1 - condifence_level) / 2)[ + lower_bound = dist_beta.getMarginal(i).computeQuantile((1 - confidence_level) / 2)[ 0 ] - upper_bound = dist_beta.getMarginal(i).computeQuantile((1 + condifence_level) / 2)[ + upper_bound = dist_beta.getMarginal(i).computeQuantile((1 + confidence_level) / 2)[ 0 ] print( @@ -463,12 +463,12 @@ # We get the asymptotic distribution of :math:`\vect{\beta}` to compute some confidence intervals of # the estimates, for example of order :math:`p = 0.95`. dist_beta = result_NonStatLL_2.getParameterDistribution() -condifence_level = 0.95 +confidence_level = 0.95 for i in range(beta.getSize()): - lower_bound = dist_beta.getMarginal(i).computeQuantile((1 - condifence_level) / 2)[ + lower_bound = dist_beta.getMarginal(i).computeQuantile((1 - confidence_level) / 2)[ 0 ] - upper_bound = dist_beta.getMarginal(i).computeQuantile((1 + condifence_level) / 2)[ + upper_bound = dist_beta.getMarginal(i).computeQuantile((1 + confidence_level) / 2)[ 0 ] print( diff --git a/python/doc/examples/data_analysis/distribution_fitting/plot_estimate_gpd_rain.py b/python/doc/examples/data_analysis/distribution_fitting/plot_estimate_gpd_rain.py index 0e8dcd0983..8c5657b66e 100644 --- a/python/doc/examples/data_analysis/distribution_fitting/plot_estimate_gpd_rain.py +++ b/python/doc/examples/data_analysis/distribution_fitting/plot_estimate_gpd_rain.py @@ -261,12 +261,12 @@ # We get the asymptotic distribution of :math:`\vect{\beta}` to compute some confidence intervals of # the estimates, for example of order :math:`p = 0.95`. dist_beta = result_NonStatLL.getParameterDistribution() -condifence_level = 0.95 +confidence_level = 0.95 for i in range(beta.getSize()): - lower_bound = dist_beta.getMarginal(i).computeQuantile((1 - condifence_level) / 2)[ + lower_bound = dist_beta.getMarginal(i).computeQuantile((1 - confidence_level) / 2)[ 0 ] - upper_bound = dist_beta.getMarginal(i).computeQuantile((1 + condifence_level) / 2)[ + upper_bound = dist_beta.getMarginal(i).computeQuantile((1 + confidence_level) / 2)[ 0 ] print( diff --git a/python/doc/examples/data_analysis/distribution_fitting/plot_estimate_non_parametric_distribution.py b/python/doc/examples/data_analysis/distribution_fitting/plot_estimate_non_parametric_distribution.py index 0836dfaf48..815d58ffbe 100644 --- a/python/doc/examples/data_analysis/distribution_fitting/plot_estimate_non_parametric_distribution.py +++ b/python/doc/examples/data_analysis/distribution_fitting/plot_estimate_non_parametric_distribution.py @@ -204,7 +204,7 @@ # Boundary corrections # -------------------- # -# We finish this example on an advanced feature of the kernel smoothing, the boundary corrections. +# We detail here an advanced feature of the kernel smoothing, the boundary corrections. # # %% @@ -257,5 +257,60 @@ # %% # The boundary correction made has a remarkable impact on the quality of the estimate for the small values. +# %% +# Log-transform treatment +# ----------------------- +# +# We finish this example on another advanced feature of the kernel smoothing: the log-transform treatment. +# This treatment is highly suited to skewed distributions, which are all challenging for kernel smoothing. +# + +# %% +# We consider several distributions which have significant skewness: +distCollection = [ot.LogNormal(0.0, 2.5), ot.Beta(20000.5, 2.5, 0.0, 1.0), ot.Exponential(), + ot.WeibullMax(1.0, 0.9, 0.0), ot.Mixture([ot.Normal(-1.0, 0.5), ot.Normal(1.0, 1.0)], [0.4, 0.6]), + ot.Mixture([ot.LogNormal(-1.0, 1.0, -1.0), ot.LogNormal(1.0, 1.0, 1.0)], [0.2, 0.8])] + +# %% +# For each distribution, we do the following steps: +# +# - we generate a sample of size 5000, +# - we fit a kernel smoothing distribution without the log-transform treatment, +# - we fit a kernel smoothing distribution with the log-transform treatment, +# - we plot the real distribution and both non parametric estimations. +# +# Other transformations could be used, but the Log-transform one is quite effective. If the skewness is moderate, +# there is almost no change wrt simple kernel smoothing. But if the skewness is large, the transformation performs +# very well. Note that, in addition, this transformation performs an automatic boundary correction. +grid = ot.GridLayout(2, 3) +ot.RandomGenerator.SetSeed(0) +for i, distribution in enumerate(distCollection): + sample = distribution.getSample(5000) + + # We draw the real distribution + graph = distribution.drawPDF() + graph.setLegends([distribution.getClassName()]) + # We choose the default kernel + kernel = ot.KernelSmoothing() + + # We activate no particular treatment + fitted = kernel.build(sample) + curve = fitted.drawPDF() + curve.setLegends(["Fitted"]) + graph.add(curve) + + # We activate the log-transform treatment + kernel.setUseLogTransform(True) + fitted = kernel.build(sample) + curve = fitted.drawPDF() + curve.setLegends(["Fitted LogTransform"]) + curve = curve.getDrawable(0) + curve.setLineStyle("dashed") + + graph.add(curve) + graph.setColors(ot.Drawable.BuildDefaultPalette(3)) + grid.setGraph(i // 3, i % 3, graph) + +view = viewer.View(grid) plt.show() diff --git a/python/doc/examples/data_analysis/manage_data_and_samples/plot_quantile_estimation_wilks.py b/python/doc/examples/data_analysis/manage_data_and_samples/plot_quantile_estimation_wilks.py index fce087d54d..ea4eac9732 100644 --- a/python/doc/examples/data_analysis/manage_data_and_samples/plot_quantile_estimation_wilks.py +++ b/python/doc/examples/data_analysis/manage_data_and_samples/plot_quantile_estimation_wilks.py @@ -1,57 +1,29 @@ """ -Estimate Wilks and empirical quantile -===================================== +Estimate a confidence interval of a quantile +============================================ """ # %% -# In this example we want to evaluate a particular quantile, with the empirical estimator or the Wilks one, from a sample of a random variable. +# In this example, we introduce two methods to estimate a confidence interval of the +# :math:`\alpha` level quantile (:math:`\alpha \in [0,1]`) of the distribution of +# a scalar random +# variable :math:`X`. Both methods use the order statistics to estimate: # +# - an asympotic confidence interval with confidence level :math:`\beta \in [0,1]`, +# - an exact upper bounded confidence interval with confidence level :math:`\beta \in [0,1]`. # -# Let us suppose we want to estimate the quantile :math:`q_{\alpha}` of order :math:`\alpha` of the variable :math:`Y`: -# :math:`P(Y \leq q_{\alpha}) = \alpha`, from the sample :math:`(Y_1, ..., Y_n)` -# of size :math:`n`, with a confidence level equal to :math:`\beta`. -# -# We note :math:`(Y^{(1)}, ..., Y^{(n)})` the sample where the values are sorted in ascending order. -# The empirical estimator, noted :math:`q_{\alpha}^{emp}`, and its confidence interval, are defined by the expressions: -# -# .. math:: -# \left\{ -# \begin{array}{lcl} -# q_{\alpha}^{emp} & = & Y^{(E[n\alpha])} \\ -# P(q_{\alpha} \in [Y^{(i_n)}, Y^{(j_n)}]) & = & \beta \\ -# i_n & = & E[n\alpha - a_{\alpha}\sqrt{n\alpha(1-\alpha)}] \\ -# i_n & = & E[n\alpha + a_{\alpha}\sqrt{n\alpha(1-\alpha)}] -# \end{array} -# \right\} -# -# The Wilks estimator, noted :math:`q_{\alpha, \beta}^{Wilks}`, and its confidence interval, are defined by the expressions: -# -# .. math:: -# \left\{ -# \begin{array}{lcl} -# q_{\alpha, \beta}^{Wilks} & = & Y^{(n-i)} \\ -# P(q_{\alpha} \leq q_{\alpha, \beta}^{Wilks}) & \geq & \beta \\ -# i\geq 0 \, \, / \, \, n \geq N_{Wilks}(\alpha, \beta,i) -# \end{array} -# \right\} -# -# Once the order :math:`i` has been chosen, the Wilks number :math:`N_{Wilks}(\alpha, \beta,i)` is evaluated, -# thanks to the static method :math:`ComputeSampleSize(\alpha, \beta, i)` of the :class:`~openturns.Wilks` object. -# -# In the example, we want to evaluate a quantile :math:`\alpha = 95\%`, -# with a confidence level of :math:`\beta = 90\%` thanks to the :math:`4` th maximum of -# the ordered sample (associated to the order :math:`i = 3` ). -# -# Be careful: :math:`i=0` means that the Wilks estimator is the maximum of the sample: -# it corresponds to the first maximum of the sample. +# In this example, we consider the quantile of level :math:`\alpha = 95\%`, +# with a confidence level of :math:`\beta = 90\%`. # %% import openturns as ot import math as m -import openturns.viewer as viewer ot.Log.Show(ot.Log.NONE) +# %% +# We consider a random vector which is the output of a model and an input distribution. + # %% model = ot.SymbolicFunction(["x1", "x2"], ["x1^2+x2"]) R = ot.CorrelationMatrix(2) @@ -60,49 +32,82 @@ inputDist.setDescription(["X1", "X2"]) inputVector = ot.RandomVector(inputDist) -# Create the output random vector Y=model(X) +# Create the output random vector output = ot.CompositeRandomVector(model, inputVector) # %% -# Quantile level -alpha = 0.95 +# We define the level :math:`\alpha` of the quantile and the confidence level :math:`\beta`. -# Confidence level of the estimation +# %% +alpha = 0.95 beta = 0.90 # %% -# Get a sample of the variable -N = 10**4 -sample = output.getSample(N) -graph = ot.UserDefined(sample).drawCDF() -view = viewer.View(graph) +# We generate a sample of the variable. + +# %% +n = 10**4 +sample = output.getSample(n) + +# %% +# We get the empirical estimator of the :math:`\alpha` level quantile which is the +# :math:`\lfloor \sampleSize \alpha \rfloor` -th order statistics evaluated on +# the sample. # %% -# Empirical Quantile Estimator empiricalQuantile = sample.computeQuantile(alpha) +print(empiricalQuantile) -# Get the indices of the confidence interval bounds -aAlpha = ot.Normal(1).computeQuantile((1.0 + beta) / 2.0)[0] -min_i = int(N * alpha - aAlpha * m.sqrt(N * alpha * (1.0 - alpha))) -max_i = int(N * alpha + aAlpha * m.sqrt(N * alpha * (1.0 - alpha))) -# print(min_i, max_i) +# %% +# The asymptotic confidence interval of level :math:`\beta` is :math:`\left[ X_{(i_n)}, X_{(j_n)}\right]` +# such that: +# +# .. math:: +# +# i_\sampleSize & = \left\lfloor \sampleSize \alpha - \sqrt{\sampleSize} \; z_{\frac{1+\beta}{2}} \; \sqrt{\alpha(1 - \alpha)} \right\rfloor\\ +# j_\sampleSize & = \left\lfloor \sampleSize \alpha + \sqrt{\sampleSize} \; z_{\frac{1+\beta}{2}} \; \sqrt{\alpha(1 - \alpha)} \right\rfloor +# +# where :math:`z_{\frac{1+\beta}{2}}` is the :math:`\frac{1+\beta}{2}` level quantile of the standard normal distribution (see [delmas2006]_ proposition 11.1.13). +# +# Then we have: +# +# .. math:: +# +# \lim\limits_{\sampleSize \rightarrow +\infty} \Prob{x_{\alpha} \in \left[ X_{(i_\sampleSize,\sampleSize)}, X_{(j_\sampleSize,\sampleSize)}\right]} = \beta +# + +# %% +a_beta = ot.Normal(1).computeQuantile((1.0 + beta) / 2.0)[0] +i_n = int(n * alpha - a_beta * m.sqrt(n * alpha * (1.0 - alpha))) +j_n = int(n * alpha + a_beta * m.sqrt(n * alpha * (1.0 - alpha))) +print(i_n, j_n) # Get the sorted sample sortedSample = sample.sort() -# Get the Confidence interval of the Empirical Quantile Estimator [infQuantile, supQuantile] -infQuantile = sortedSample[min_i - 1] -supQuantile = sortedSample[max_i - 1] +# Get the asymptotic confidence interval :math:`\left[ X_{(i_n)}, X_{(j_n)}\right]` +# Care: the index in the sorted sample is :math:`i_n-1` and :math:`j_n-1` +infQuantile = sortedSample[i_n - 1] +supQuantile = sortedSample[j_n - 1] print(infQuantile, empiricalQuantile, supQuantile) # %% -# Wilks number -i = N - (min_i + max_i) // 2 # compute wilks with the same sample size -wilksNumber = ot.Wilks.ComputeSampleSize(alpha, beta, i) -print("wilksNumber =", wilksNumber) +# The empirical quantile was estimated with the :math:`\lfloor \sampleSize\alpha \rfloor` -th order statistics evaluated on +# the sample of size :math:`\sampleSize`. +# We define :math:`i = \sampleSize-\lfloor \sampleSize\alpha \rfloor` and we evaluate the minimum sample size :math:`\tilde{\sampleSize}` that +# ensures that the :math:`(\tilde{\sampleSize}-i)` order statistics is greater than :math:`x_{\alpha}` with the confidence :math:`\beta`. + +# %% +i = n - int(n * alpha) +minSampleSize = ot.Wilks.ComputeSampleSize(alpha, beta, i) +print(minSampleSize) + +# %% +# Here we directly ask for the evaluation of the upper bounded confidence interval: +# the Wilks class estimates the previous minimum sample size, generates a +# sample with that size and extracts the empirical quantile of order :math:`(\tilde{\sampleSize}-i)`. # %% -# Wilks Quantile Estimator algo = ot.Wilks(output) -wilksQuantile = algo.computeQuantileBound(alpha, beta, i) -print("wilks Quantile 0.95 =", wilksQuantile) +upperBoundQuantile = algo.computeQuantileBound(alpha, beta, i) +print(upperBoundQuantile) diff --git a/python/doc/examples/meta_modeling/general_purpose_metamodels/plot_distribution_linear_regression.py b/python/doc/examples/meta_modeling/general_purpose_metamodels/plot_distribution_linear_regression.py index b5659d3333..615879f510 100644 --- a/python/doc/examples/meta_modeling/general_purpose_metamodels/plot_distribution_linear_regression.py +++ b/python/doc/examples/meta_modeling/general_purpose_metamodels/plot_distribution_linear_regression.py @@ -160,10 +160,10 @@ def plot_sample_by_kernel_smoothing( # We first consider the estimation of the variance :math:`\sigma^2`. # In the next cell, we consider a sample size equal to :math:`n = 6` with # :math:`p = 3` parameters. -# We use :math:`r = 1000` repetitions. +# We use :math:`r = 100` repetitions. -repetitions_size = 1000 +repetitions_size = 100 true_standard_deviation = 0.1 sample_size = 6 coefficients = ot.Point([3.0, 2.0, -1.0]) @@ -186,7 +186,7 @@ def plot_sample_by_kernel_smoothing( # %% # Then we increase the sample size :math:`n`. -repetitions_size = 1000 +repetitions_size = 100 true_standard_deviation = 0.1 sample_size = 100 coefficients = ot.Point([3.0, 2.0, -1.0]) @@ -210,7 +210,7 @@ def plot_sample_by_kernel_smoothing( # We now consider the estimation of the standard deviation :math:`\sigma`. -repetitions_size = 1000 +repetitions_size = 100 true_standard_deviation = 0.1 sample_size = 6 coefficients = ot.Point([3.0, 2.0, -1.0]) @@ -232,7 +232,7 @@ def plot_sample_by_kernel_smoothing( # as we could expect. -repetitions_size = 1000 +repetitions_size = 100 true_standard_deviation = 0.1 sample_size = 100 coefficients = ot.Point([3.0, 2.0, -1.0]) @@ -250,3 +250,7 @@ def plot_sample_by_kernel_smoothing( # If we use a sample size equal to :math:`n = 100` with # :math:`p = 3` parameters, we see that the distribution is almost normal. # We notice that the bias disappeared. + + +# %% +otv.View.ShowAll() diff --git a/python/doc/examples/meta_modeling/kriging_metamodel/plot_kriging_categorical.py b/python/doc/examples/meta_modeling/kriging_metamodel/plot_kriging_categorical.py index 9df78f951a..aac1c0f746 100644 --- a/python/doc/examples/meta_modeling/kriging_metamodel/plot_kriging_categorical.py +++ b/python/doc/examples/meta_modeling/kriging_metamodel/plot_kriging_categorical.py @@ -81,7 +81,7 @@ def illustrativeFunc(inp): initDistInd.add(ot.Uniform(lowerBoundInd[i], upperBoundInd[i])) initDistInd = ot.JointDistribution(initDistInd) initSampleInd = initDistInd.getSample(10) -optAlgInd = ot.MultiStart(ot.NLopt("LN_COBYLA"), initSampleInd) +optAlgInd = ot.MultiStart(ot.Cobyla(), initSampleInd) # %% # Generate the training data set @@ -96,7 +96,7 @@ def illustrativeFunc(inp): # %% # Initialize and parameterize the optimization algorithm initSampleLV = initDistLV.getSample(30) -optAlgLV = ot.MultiStart(ot.NLopt("LN_COBYLA"), initSampleLV) +optAlgLV = ot.MultiStart(ot.Cobyla(), initSampleLV) # %% # Create and train the Gaussian process models @@ -295,7 +295,7 @@ def Goldstein(inp): initDistInd.add(ot.Uniform(lowerBoundInd[i], upperBoundInd[i])) initDistInd = ot.JointDistribution(initDistInd) initSampleInd = initDistInd.getSample(10) -optAlgInd = ot.MultiStart(ot.NLopt("LN_COBYLA"), initSampleInd) +optAlgInd = ot.MultiStart(ot.Cobyla(), initSampleInd) # %% # In order to assess their respective robustness with regards to the training data set, @@ -316,7 +316,7 @@ def Goldstein(inp): # Initialize and parameterize the optimization algorithm initSampleLV = initDistLV.getSample(10) - optAlgLV = ot.MultiStart(ot.NLopt("LN_COBYLA"), initSampleLV) + optAlgLV = ot.MultiStart(ot.Cobyla(), initSampleLV) # Create and train the Gaussian process models basis = ot.ConstantBasisFactory(dim).build() diff --git a/python/doc/examples/meta_modeling/polynomial_chaos_metamodel/plot_functional_chaos.py b/python/doc/examples/meta_modeling/polynomial_chaos_metamodel/plot_functional_chaos.py index 70be95b844..52ee506946 100644 --- a/python/doc/examples/meta_modeling/polynomial_chaos_metamodel/plot_functional_chaos.py +++ b/python/doc/examples/meta_modeling/polynomial_chaos_metamodel/plot_functional_chaos.py @@ -213,10 +213,10 @@ ot.ResourceMap.GetAsUnsignedInteger("FunctionalChaosAlgorithm-MaximumTotalDegree") # %% -# This is why we explore the values from 1 to 14. +# This is why we explore the values from 1 to 10. # %% -maximumDegree = 15 +maximumDegree = 11 degrees = range(1, maximumDegree) r2Score = ot.Sample(len(degrees), outputDimension) for maximumDegree in degrees: diff --git a/python/doc/math_notations.sty b/python/doc/math_notations.sty index 5174097937..9acd0c2a9f 100644 --- a/python/doc/math_notations.sty +++ b/python/doc/math_notations.sty @@ -4,6 +4,7 @@ \usepackage{amsmath} \usepackage{amssymb} \usepackage{siunitx} +\usepackage{ stmaryrd } % Non-standard units \DeclareSIUnit{\mph}{mph} diff --git a/python/doc/theory/data_analysis/data_analysis.rst b/python/doc/theory/data_analysis/data_analysis.rst index 42d5e9c705..08a3e10833 100644 --- a/python/doc/theory/data_analysis/data_analysis.rst +++ b/python/doc/theory/data_analysis/data_analysis.rst @@ -14,14 +14,21 @@ Comparison of two samples qqplot_graph smirnov_test -Estimation of a parametric model --------------------------------- +Estimation of a nonparametric model +----------------------------------- .. toctree:: :maxdepth: 1 empirical_cdf kernel_smoothing + +Estimation of a parametric model +-------------------------------- + +.. toctree:: + :maxdepth: 1 + maximum_likelihood parametric_estimation diff --git a/python/doc/theory/data_analysis/kernel_smoothing.rst b/python/doc/theory/data_analysis/kernel_smoothing.rst index b580f5ab23..bf0f14c155 100644 --- a/python/doc/theory/data_analysis/kernel_smoothing.rst +++ b/python/doc/theory/data_analysis/kernel_smoothing.rst @@ -610,7 +610,7 @@ Another method is to use boundary kernels (see [chacon2018]_ page 76, [scott2015]_ page 157). In dimension 1, the boundary effects may be taken into account using -a *reflection* or *mirroring* method (see [silverman1982]_ page 31). +a *reflection* or *mirroring* method (see [silverman1982]_ page 31, [jones1993]_). the boundaries are automatically detected from the sample (with the *min* and *max* functions) and the kernel smoothed PDF is corrected in the boundary areas to remain within the boundaries, @@ -629,6 +629,49 @@ according to the mirroring technique: - this last kernel smoothed PDF is truncated within the initial range :math:`[min, max]` (conditional PDF). +Log-transform treatment +~~~~~~~~~~~~~~~~~~~~~~~ + +In this section, we consider a random variable i.e. :math:`d = 1`. This treatment is highly suited to skewed distributions, +which are all challenging for kernel smoothing. See [charpentier2015]_ to get more details. + +Let :math:`(X_i)_{1 \leq i \leq \sampleSize}` be some independent random variates, identically distributed according to :math:`X`. + +The log-transform treatment maps each :math:`X_j` into :math:`Y_j` as follows: + +.. math:: + + Y_j = T(X_j) = \left | + \begin{cases} + \log (X_j - \min_{i} X_i + \delta) & \mbox{if } \gamma_1(X) > 0\\ + \log (\max_{i} X_i - X_j + \delta) & \mbox{if } \gamma_1(X) > 0 + \end{cases} + \right. + +where :math:`\gamma_1(X) = \dfrac{\Expect{\left( X - \mu\right)^3}}{\sigma}` +is the skewness of :math:`X` with :math:`\mu = \Expect{X}`, :math:`\sigma^2 = \Var{X}` +and :math:`\delta \in \Rset^+_*` the shift scale. + +Once a kernel smoothed distribution has been fitted on the transformed data, the fitted distribution of :math:`X` +is built as :math:`T^{-1}(Y)` where :math:`Y` is distributed according to the kernel smoothed distribution. + +Given a sample :math:`(x_i)_{1 \leq i \leq n}` from :math:`X`, we denote by :math:`\hat{a} = \min_{i} x_i`, +:math:`\hat{b} = \max_{i} x_i` and :math:`y_i = T(x_i)` for :math:`1 \leq i \leq n`. +We build the kernel smoothing distribution of :math:`Y` using :math:`(y_i)_{1 \leq i \leq n}` +which pdf is :math:`\hat{p}_Y` and cdf :math:`\hat{F}_Y`. + +We recover the pdf and cdf of :math:`X` as follows: + +.. math:: + + \hat{F}_X(x) & = \hat{F}_Y(T(x)) \\ + \hat{p}_X(x) & = T'(x) \hat{p}_Y(T(x)) + +We note that this transformation also embeds a treatment of the boundaries as the finite lower bound in case of positive skewness +or the finite upper bound in case of negative skewness is rejected to infinity. Thus, there is no more boundary effect on the +:math:`Y`-sample. + + Conclusion ~~~~~~~~~~ The next table presents a summary of histogram, kernel smoothing and diff --git a/python/doc/theory/data_analysis/quantile_estimation_wilks.rst b/python/doc/theory/data_analysis/quantile_estimation_wilks.rst index 4e4529cf04..3901fdf771 100644 --- a/python/doc/theory/data_analysis/quantile_estimation_wilks.rst +++ b/python/doc/theory/data_analysis/quantile_estimation_wilks.rst @@ -1,197 +1,103 @@ .. _quantile_estimation_wilks: -Estimation of a quantile by Wilks' method ------------------------------------------ +Estimation of a quantile upper bound by Wilks' method +----------------------------------------------------- -Let us denote -:math:`\underline{Y} = h\left( \vect{X},\vect{d} \right) = \left( Y^1,\ldots,Y^{n_Y} \right)`, -where :math:`\vect{X}= \left( X^1,\ldots,X^{n_X} \right)` is a random -vector, and :math:`\vect{d}` a deterministic vector. We seek here to -evaluate, using the probability distribution of the random vector -:math:`\vect{X}`, the :math:`\alpha`-quantile :math:`q_{Y^i}(\alpha)` of -:math:`Y^i`, where :math:`\alpha \in (0, 1)`: +We consider a random variable :math:`X` of dimension 1 and the unknown :math:`x_{\alpha}` +level quantile of its distribution (:math:`\alpha \in [0, 1]`). +We seek to evaluate an upper bound of :math:`x_{\alpha}` with a confidence greater or equal to +:math:`\beta`, using a given order statistics. + +Let :math:`(X_1, \dots, X_\sampleSize)` be some independent copies of :math:`X`. +Let :math:`X_{(k)}` be the :math:`k` -th order statistics of :math:`(X_1, \dots, X_\sampleSize)` which means that +:math:`X_{(k)}` is the :math:`k` -th maximum of :math:`(X_1, \dots, X_\sampleSize)` for :math:`1 \leq k \leq \sampleSize`. For +example, :math:`X_{(1)} = \min (X_1, \dots, X_\sampleSize)` is the minimum +and :math:`X_{(\sampleSize)} = \max (X_1, \dots, X_\sampleSize)` is the maximum. We have: .. math:: - \begin{aligned} - \Prob{ Y^i \leq q_{Y^i}(\alpha)} = \alpha - \end{aligned} + X_{(1)} \leq X_{(2)} \leq \dots \leq X_{(\sampleSize)} -If we have a sample -:math:`\left\{ \vect{x}_1,\ldots,\vect{x}_N \right\}` of :math:`N` -independent samples of the random vector :math:`\vect{X}`, -:math:`q_{Y^i}(\alpha)` can be estimated as follows: -- the sample :math:`\left\{ \vect{x}_1,\ldots,\vect{x}_N \right\}` of - vector :math:`\vect{X}` is first transformed to a sample - :math:`\left\{ y^i_1,\ldots,y^i_N \right\}` of the variable - :math:`Y^i`, using :math:`\underline{y} = h(\vect{x}_i,\vect{d})`, +Smallest rank for an upper bound to the quantile +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- the sample :math:`\left\{ y^i_1,\ldots,y^i_N \right\}` is then placed - in ascending order, which gives the sample - :math:`\left\{ y^{(1)},\ldots,y^{(N)} \right\}`, +Let :math:`(x_1, \dots, x_\sampleSize)` be an i.i.d. sample of size :math:`\sampleSize` of +the random variable :math:`X`. +Given a quantile level :math:`\alpha \in [0,1]`, a confidence level +:math:`\beta \in [0,1]`, and a sample size :math:`\sampleSize`, we seek the smallest +rank :math:`k \in \llbracket 1, \sampleSize \rrbracket` such that: -- this empirical estimation of the quantile is then calculated by the - formula: +.. math:: + :label: EqOrderStat - .. math:: + \Prob{x_{\alpha} \leq X_{(k)}} \geq \beta - \begin{aligned} - \widehat{q}_{y^i}(\alpha) = y^{([N\alpha]+1)} - \end{aligned} +The probability density and cumulative distribution functions of the order +statistics :math:`X_{(k)}` are: -where :math:`[N\alpha]` denotes the integral part of -:math:`N\alpha`. +.. math:: + :label: DistOrderStat + + F_{X_{(k)}}(x) & = \sum_{i=k}^{\sampleSize} \binom{\sampleSize}{i}\left(F(x) + \right)^i \left(1-F(x) + \right)^{\sampleSize-i} \\ + p_{X_{(k)}}(x) & = (\sampleSize-k+1)\binom{\sampleSize}{k-1}\left(F(x)\right)^{k-1} + \left(1-F(x) + \right)^{\sampleSize-k} p(x) + +We notice that :math:`F_{X_{(k)}}(x) = \overline{F}_{(\sampleSize,F(x))}(k-1)` where +:math:`F_{(\sampleSize,F(x))}` is the cumulated +distribution function of the Binomial distribution :math:`\cB(\sampleSize,F(x))` and +:math:`\overline{F}_{(\sampleSize,F(x))}(k) = 1 - F_{(\sampleSize,F(x))}(k)` is the +complementary cumulated distribution fonction (also named survival function in dimension +1). +Therefore: + +.. math:: -For example, if :math:`N=100` and :math:`\alpha = 0.95`, -:math:`\widehat{q}_Z(0.95)` is equal to :math:`y^{(96)}`, which is the -:math:`5^\textrm{th}` largest value of the sample -:math:`\left\{ y^i_1,\ldots,y^i_N \right\}`. We note that this -estimation has no meaning unless :math:`1/N \leq \alpha \leq 1-1/N`. For -example, if :math:`N=100`, one can only consider values of a to be -between 1% and 99%. + F_{X_{(k)}}(x_{\alpha}) = \sum_{i=k}^{\sampleSize} \binom{\sampleSize}{i} \alpha^i (1-\alpha)^{\sampleSize-i} + = \overline{F}_{(\sampleSize,\alpha)}(k-1) -It is also possible to calculate an upper limit for the quantile with a -confidence level :math:`\beta` chosen by the user; one can then be sure -with a :math:`\beta` level of confidence that the real value of -:math:`q_{Y^i}(\alpha))` is less than or equal to -:math:`\widehat{q}_{Y^i}(\alpha)_{\sup}`: +and equation :eq:`EqOrderStat` implies: .. math:: + :label: EqOrderStat2 - \begin{aligned} - \Prob{q_{Y^i}(\alpha) \leq \widehat{q}_{Y^i}(\alpha)_{\sup}} = \beta - \end{aligned} + 1-F_{X_{(k)}}(x_{\alpha})\geq \beta -The most robust method for calculating this upper limit consists of -taking -:math:`\widehat{q}_{Y^i}(\alpha)_{\sup} = y^{(j(\alpha,\beta,N))}` where -:math:`j(\alpha,\beta,N)` is an integer between 2 and :math:`N` found by -solving the equation: +This implies: .. math:: - \begin{aligned} - \sum_{k=1}^{j(\alpha,\beta,N) - 1} C^k_N \alpha^k \left( 1-\alpha \right)^{N-k} = \beta - \end{aligned} + F_{\sampleSize, \alpha}(k-1)\geq \beta -A solution to this does not necessarily exist, i.e. there may be no -integer value for :math:`j(\alpha,\beta,N)` satisfying this equality; -one can in this case choose the smallest integer :math:`j` such that: +The smallest rank :math:`k_{sol}` such that the previous equation is satisfied is: .. math:: - \begin{aligned} - \sum_{k=1}^{j(\alpha,\beta,N) - 1} C^k_N \alpha^k \left( 1-\alpha \right)^{N-k} > \beta - \end{aligned} - -which ensures that -:math:`\Prob{q_{Y^i}(\alpha) \leq \widehat{q}_{Y^i}(\alpha)_{\sup}} > \beta`; -in other words, the level of confidence of the quantile estimation is -greater than that initially required. - -This formula of the confidence interval can be used in two ways: - -- either directly to determine :math:`j(\alpha,\beta,N)` for the values - :math:`\alpha,\beta,N` chosen by the user, - -- or in reverse to determine the number :math:`N` of simulations to be - carried out for the values :math:`\alpha,\beta` and - :math:`j(\alpha,\beta,N)` chosen by the user; this is known as Wilks’ - formula. - -For example for :math:`\alpha = \beta = 95\%`, we take :math:`j=59` with -:math:`N = 59` simulations (that is the maximum value out of 59 samples) -or else :math:`j = 92` with :math:`N = 93` simulations (that is the -second largest result out of the 93 selections). For values of :math:`N` -between :math:`59` and :math:`92`, the upper limit is the maximum value -of the sample. The following tabular presents the whole results for -:math:`N \leq 1000`, still for :math:`\alpha = \beta = 95\%`. - -+-------------+------------------------------------------+--------------------------------------+ -| :math:`N` | Rank of the upper bound of the quantile | Rank of the empirical quantile | -+=============+==========================================+======================================+ -| 59 | 59 | 57 | -+-------------+------------------------------------------+--------------------------------------+ -| 93 | 92 | 89 | -+-------------+------------------------------------------+--------------------------------------+ -| 124 | 122 | 118 | -+-------------+------------------------------------------+--------------------------------------+ -| 153 | 150 | 146 | -+-------------+------------------------------------------+--------------------------------------+ -| 181 | 177 | 172 | -+-------------+------------------------------------------+--------------------------------------+ -| 208 | 203 | 198 | -+-------------+------------------------------------------+--------------------------------------+ -| 234 | 228 | 223 | -+-------------+------------------------------------------+--------------------------------------+ -| 260 | 253 | 248 | -+-------------+------------------------------------------+--------------------------------------+ -| 286 | 278 | 272 | -+-------------+------------------------------------------+--------------------------------------+ -| 311 | 302 | 296 | -+-------------+------------------------------------------+--------------------------------------+ -| 336 | 326 | 320 | -+-------------+------------------------------------------+--------------------------------------+ -| 361 | 350 | 343 | -+-------------+------------------------------------------+--------------------------------------+ -| 386 | 374 | 367 | -+-------------+------------------------------------------+--------------------------------------+ -| 410 | 397 | 390 | -+-------------+------------------------------------------+--------------------------------------+ -| 434 | 420 | 413 | -+-------------+------------------------------------------+--------------------------------------+ -| 458 | 443 | 436 | -+-------------+------------------------------------------+--------------------------------------+ -| 482 | 466 | 458 | -+-------------+------------------------------------------+--------------------------------------+ -| 506 | 489 | 481 | -+-------------+------------------------------------------+--------------------------------------+ -| 530 | 512 | 504 | -+-------------+------------------------------------------+--------------------------------------+ -| 554 | 535 | 527 | -+-------------+------------------------------------------+--------------------------------------+ -| 577 | 557 | 549 | -+-------------+------------------------------------------+--------------------------------------+ -| 601 | 580 | 571 | -+-------------+------------------------------------------+--------------------------------------+ -| 624 | 602 | 593 | -+-------------+------------------------------------------+--------------------------------------+ -| 647 | 624 | 615 | -+-------------+------------------------------------------+--------------------------------------+ -| 671 | 647 | 638 | -+-------------+------------------------------------------+--------------------------------------+ -| 694 | 669 | 660 | -+-------------+------------------------------------------+--------------------------------------+ -| 717 | 691 | 682 | -+-------------+------------------------------------------+--------------------------------------+ -| 740 | 713 | 704 | -+-------------+------------------------------------------+--------------------------------------+ -| 763 | 735 | 725 | -+-------------+------------------------------------------+--------------------------------------+ -| 786 | 757 | 747 | -+-------------+------------------------------------------+--------------------------------------+ -| 809 | 779 | 769 | -+-------------+------------------------------------------+--------------------------------------+ -| 832 | 801 | 791 | -+-------------+------------------------------------------+--------------------------------------+ -| 855 | 823 | 813 | -+-------------+------------------------------------------+--------------------------------------+ -| 877 | 844 | 834 | -+-------------+------------------------------------------+--------------------------------------+ -| 900 | 866 | 856 | -+-------------+------------------------------------------+--------------------------------------+ -| 923 | 888 | 877 | -+-------------+------------------------------------------+--------------------------------------+ -| 945 | 909 | 898 | -+-------------+------------------------------------------+--------------------------------------+ -| 968 | 931 | 920 | -+-------------+------------------------------------------+--------------------------------------+ -| 991 | 953 | 942 | -+-------------+------------------------------------------+--------------------------------------+ - -:math:`\widehat{q}_{Y^i}(\alpha)` is often called the “empirical -:math:`\alpha`-quantile” for the variable :math:`{Y^i}`. + k_{sol} & = \min \{ k \in \llbracket 1, n \rrbracket \, | \, F_{\sampleSize, \alpha}(k-1)\geq \beta \}\\ + & = 1 + \min \{ k \in \llbracket 1, n\rrbracket \, | \, F_{\sampleSize, \alpha}(k)\geq \beta \} + +An upper bound of :math:`x_{\alpha}` is estimated by the value of :math:`X_{(k_{sol})}` +on the sample +:math:`(x_1, \dots, x_\sampleSize)`. + +Minimum sample size for an upper bound to the quantile +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Given :math:`\alpha`, :math:`\beta`, and :math:`k`, we seek for the smallest sample size +:math:`\sampleSize` +such that the equation :eq:`EqOrderStat` is satisfied. In order to do so, we solve the +equation :eq:`EqOrderStat2` with respect to the sample size :math:`\sampleSize`. + +Once the smallest size :math:`\sampleSize` has been estimated, a sample of size +:math:`\sampleSize` can be +generated from +:math:`X` and an upper bound of :math:`x_{\alpha}` is estimated using +:math:`x_{(\sampleSize-i)}` i.e. the :math:`\sampleSize - i`-th observation +in the ordered sample :math:`(x_{(1)}, \dots, x_{(\sampleSize)})`. + .. topic:: API: @@ -203,6 +109,6 @@ of the sample. The following tabular presents the whole results for .. topic:: References: - - Wilks, S.S. (1962). "Mathematical Statistics", New York-London + - Wilks, S. S. (1941). Determination of sample sizes for setting tolerance limits. The Annals of Mathematical Statistics, 12(1), 91-96 - Robert C.P., Casella G. (2004). Monte-Carlo Statistical Methods, Springer, ISBN 0-387-21239-6, 2nd ed. - Rubinstein R.Y. (1981). Simulation and The Monte-Carlo methods, John Wiley & Sons diff --git a/python/doc/user_manual/probabilistic_modelling.rst b/python/doc/user_manual/probabilistic_modelling.rst index 8bf4e2c579..337e887905 100644 --- a/python/doc/user_manual/probabilistic_modelling.rst +++ b/python/doc/user_manual/probabilistic_modelling.rst @@ -254,8 +254,6 @@ Refer to :ref:`copula`. :template: Copula.rst_t NormalCopula - - :template: class.rst_t experimental.StudentCopula diff --git a/python/doc/user_manual/statistics_on_sample.rst b/python/doc/user_manual/statistics_on_sample.rst index 0dfbc0b7a1..b7981e0fac 100644 --- a/python/doc/user_manual/statistics_on_sample.rst +++ b/python/doc/user_manual/statistics_on_sample.rst @@ -105,13 +105,7 @@ Building distributions from samples :template: DistributionFactory.rst_t SkellamFactory - - :template: class.rst_t - experimental.SmoothedUniformFactory - - :template: DistributionFactory.rst_t - StudentFactory TrapezoidalFactory TriangularFactory @@ -147,9 +141,6 @@ Building copulas from samples IndependentCopulaFactory NormalCopulaFactory PlackettCopulaFactory - - :template: class.rst_t - experimental.StudentCopulaFactory Sensitivity Analysis diff --git a/python/src/CMakeLists.txt b/python/src/CMakeLists.txt index e56b70d2ec..99ab535387 100644 --- a/python/src/CMakeLists.txt +++ b/python/src/CMakeLists.txt @@ -7,11 +7,6 @@ set (CMAKE_SWIG_FLAGS "" CACHE STRING "SWIG flags for generating wrapper code") # allows one to pass compile flags like -O1 to reduce memory usage set (SWIG_COMPILE_FLAGS "" CACHE STRING "C++ compiler flags used for wrapper code") -if (NOT DEFINED PYTHON_EXTENSION_MODULE_SUFFIX AND NOT CMAKE_CROSSCOMPILING) - execute_process (COMMAND ${Python_EXECUTABLE} -c "import importlib.machinery; print(importlib.machinery.EXTENSION_SUFFIXES[0])" - OUTPUT_VARIABLE PYTHON_EXTENSION_MODULE_SUFFIX OUTPUT_STRIP_TRAILING_WHITESPACE) -endif () - # generate SWIG runtime header execute_process (COMMAND ${CMAKE_COMMAND} -E make_directory ${CMAKE_CURRENT_BINARY_DIR}/openturns) add_custom_command (OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/openturns/swigpyrun.h @@ -22,14 +17,12 @@ add_custom_target (generate_swig_runtime DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/ope add_custom_target (generate_docstrings) add_custom_target (pylib) -include_directories (BEFORE ${CMAKE_CURRENT_BINARY_DIR}) -include_directories (BEFORE ${INTERNAL_INCLUDE_DIRS}) -include_directories (BEFORE ${CMAKE_CURRENT_SOURCE_DIR}) - set (OPENTURNS_PYTHON_MODULES) macro (ot_add_python_module MODULENAME SOURCEFILE) set_source_files_properties (${SOURCEFILE} PROPERTIES CPLUSPLUS ON) set_source_files_properties (${SOURCEFILE} PROPERTIES SWIG_MODULE_NAME ${MODULENAME}) + set_source_files_properties (${SOURCEFILE} PROPERTIES INCLUDE_DIRECTORIES "${INTERNAL_INCLUDE_DIRS};${CMAKE_CURRENT_SOURCE_DIR}") + ot_install_swig_file (${SOURCEFILE}) if (${ARGC} GREATER 2) set (SWIG_MODULE_${MODULENAME}_EXTRA_DEPS ${ARGN}) @@ -64,7 +57,7 @@ macro (ot_add_python_module MODULENAME SOURCEFILE) add_custom_target (generate_${MODULENAME}_docstrings COMMAND ${CMAKE_COMMAND} -DDOCSTRING_SOURCES="${docstring_sources}" -DCURRENT_SOURCE_DIR=${CMAKE_CURRENT_SOURCE_DIR} -DTIMES=${TIMES} - -P ${PROJECT_SOURCE_DIR}/cmake/escape_backslash.cmake + -P ${PROJECT_SOURCE_DIR}/cmake/escape_backslash.cmake DEPENDS ${docstring_sources}) add_dependencies (generate_docstrings generate_${MODULENAME}_docstrings) @@ -72,23 +65,19 @@ macro (ot_add_python_module MODULENAME SOURCEFILE) swig_add_library (${MODULENAME} LANGUAGE python SOURCES ${SOURCEFILE} ${swig_other_sources}) add_dependencies (${MODULENAME} generate_swig_runtime) - target_include_directories (${MODULENAME} PRIVATE ${Python_INCLUDE_DIRS}) + target_include_directories (${MODULENAME} PRIVATE ${INTERNAL_INCLUDE_DIRS} ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR} BEFORE) swig_link_libraries (${MODULENAME} OT) if (TARGET Python::Module) target_link_libraries (${MODULENAME} Python::Module) else () + target_include_directories (${MODULENAME} PRIVATE ${Python_INCLUDE_DIRS}) target_link_libraries_with_dynamic_lookup (${MODULENAME} ${Python_LIBRARIES}) endif () set_target_properties (${MODULENAME} PROPERTIES COMPILE_FLAGS "${SWIG_COMPILE_FLAGS}") - set_target_properties (${MODULENAME} PROPERTIES UNITY_BUILD OFF) - if (DEFINED PYTHON_EXTENSION_MODULE_SUFFIX) - set_target_properties (${MODULENAME} PROPERTIES SUFFIX "${PYTHON_EXTENSION_MODULE_SUFFIX}") - endif () - install (FILES ${CMAKE_CURRENT_BINARY_DIR}/${MODULENAME}.py DESTINATION ${OPENTURNS_PYTHON_MODULE_PATH}/openturns) install (TARGETS ${MODULENAME} @@ -1055,10 +1044,10 @@ else () target_link_libraries_with_dynamic_lookup (memoryview ${Python_LIBRARIES}) endif () set_target_properties(memoryview PROPERTIES NO_SONAME ON PREFIX "") -if (DEFINED PYTHON_EXTENSION_MODULE_SUFFIX) - set_target_properties (memoryview PROPERTIES SUFFIX "${PYTHON_EXTENSION_MODULE_SUFFIX}") -elseif (WIN32 AND NOT CYGWIN) +if (WIN32 AND NOT CYGWIN) set_target_properties(memoryview PROPERTIES SUFFIX ".pyd") +else () + set_target_properties(memoryview PROPERTIES SUFFIX ".so") endif () set_target_properties(memoryview PROPERTIES COMPILE_FLAGS "${SWIG_COMPILE_FLAGS}") install (TARGETS memoryview LIBRARY DESTINATION ${OPENTURNS_PYTHON_MODULE_PATH}/openturns) diff --git a/python/src/Contour_doc.i.in b/python/src/Contour_doc.i.in index 56c47ec234..9cf48fc7d6 100644 --- a/python/src/Contour_doc.i.in +++ b/python/src/Contour_doc.i.in @@ -241,6 +241,7 @@ getAlpha" %feature("docstring") OT::Contour::isVminUsed "Accessor to the flag isVminUsed of the Contour element. +If false, the *vmin* value is ignored. Returns ------- @@ -262,6 +263,7 @@ setIsVminUsed" %feature("docstring") OT::Contour::setIsVminUsed "Accessor to the flag isVminUsed of the Contour element. +If false, the *vmin* value is ignored. Parameters ---------- @@ -290,6 +292,12 @@ Examples >>> print(contour.getVmin()) 5.0 +Notes +----- +The full documentation is available in the Matplotlib +`contour `_ +page. + See Also -------- setVmin" @@ -304,6 +312,12 @@ Parameters vmin : float The vmin value of the Contour element. +Notes +----- +The full documentation is available in the Matplotlib +`contour `_ +page. + See Also -------- getVmin" @@ -312,6 +326,7 @@ getVmin" %feature("docstring") OT::Contour::isVmaxUsed "Accessor to the flag isVmaxUsed of the Contour element. +If false, the *vmax* value is ignored. Returns ------- @@ -333,6 +348,7 @@ setIsVmaxUsed" %feature("docstring") OT::Contour::setIsVmaxUsed "Accessor to the flag isVmaxUsed of the Contour element. +If false, the *vmax* value is ignored. Parameters ---------- @@ -361,6 +377,12 @@ Examples >>> print(contour.getVmax()) 5.0 +Notes +----- +The full documentation is available in the Matplotlib +`contour `_ +page. + See Also -------- setVmax" @@ -375,6 +397,12 @@ Parameters vmax : float The vmax value of the Contour element. +Notes +----- +The full documentation is available in the Matplotlib +`contour `_ +page. + See Also -------- getVmax" diff --git a/python/src/DistributionFactory_doc.i.in b/python/src/DistributionFactory_doc.i.in index d5d17c6850..b4c435f635 100644 --- a/python/src/DistributionFactory_doc.i.in +++ b/python/src/DistributionFactory_doc.i.in @@ -74,7 +74,7 @@ listFactories : collection of :class:`~openturns.DistributionFactory` // --------------------------------------------------------------------- %feature("docstring") OT::DistributionFactory::GetByName -"Instanciate a distribution factory. +"Instantiate a distribution factory. Parameters ---------- diff --git a/python/src/DistributionImplementation_doc.i.in b/python/src/DistributionImplementation_doc.i.in index 1ac1023b9b..0172f0dfc6 100644 --- a/python/src/DistributionImplementation_doc.i.in +++ b/python/src/DistributionImplementation_doc.i.in @@ -1245,7 +1245,7 @@ We consider a Normal(2) distribution with zero mean, unit standard deviation and We note :math:`\Phi_2` its cdf. Due to symetries of the distribution, the bilateral confidence interval is :math:`I^*_{\alpha} = [-a, a] \times \times [-a, a]` where :math:`a = \Phi^{-1}((1+\beta)/2)` where :math:`\Phi` is the marginal cdf of each component. Then :math:`\beta` is such that -:math:`\Phi_2(I^*_{\alpha}) = \alpha`. As :math:`\Phi_2(I^*_{\alpha}) = (2\Phi(a) - 1)^2 = _beta^2`, +:math:`\Phi_2(I^*_{\alpha}) = \alpha`. As :math:`\Phi_2(I^*_{\alpha}) = (2\Phi(a) - 1)^2 = \beta^2`, then, :math:`\beta` is equal to :math:`\beta = \sqrt{\alpha} \simeq 0.9486` and :math:`a \simeq -1.9488`. diff --git a/python/src/EnumerateFunctionImplementation_doc.i.in b/python/src/EnumerateFunctionImplementation_doc.i.in index a54c32401a..aed7af4962 100644 --- a/python/src/EnumerateFunctionImplementation_doc.i.in +++ b/python/src/EnumerateFunctionImplementation_doc.i.in @@ -342,3 +342,21 @@ ub : sequence of int %enddef %feature("docstring") OT::EnumerateFunctionImplementation::getUpperBound OT_EnumerateFunction_getUpperBound_doc + +// --------------------------------------------------------------------- + +%define OT_EnumerateFunction_getMarginal_doc +"Get the marginal enumerate function. + +Parameters +---------- +indices : int or sequence of int, :math:`0 \leq i < n` + List of marginal indices. + +Returns +------- +enumerateFunction : :class:`~openturns.EnumerateFunction` + The marginal enumerate function." +%enddef +%feature("docstring") OT::EnumerateFunctionImplementation::getMarginal +OT_EnumerateFunction_getMarginal_doc diff --git a/python/src/EnumerateFunction_doc.i.in b/python/src/EnumerateFunction_doc.i.in index 585f55c874..45640527df 100644 --- a/python/src/EnumerateFunction_doc.i.in +++ b/python/src/EnumerateFunction_doc.i.in @@ -22,3 +22,5 @@ OT_EnumerateFunction_setDimension_doc OT_EnumerateFunction_setUpperBound_doc %feature("docstring") OT::EnumerateFunction::getUpperBound OT_EnumerateFunction_getUpperBound_doc +%feature("docstring") OT::EnumerateFunction::getMarginal +OT_EnumerateFunction_getMarginal_doc diff --git a/python/src/GeneralLinearModelAlgorithm_doc.i.in b/python/src/GeneralLinearModelAlgorithm_doc.i.in index 4d2da8fea2..fc3a1bf26a 100644 --- a/python/src/GeneralLinearModelAlgorithm_doc.i.in +++ b/python/src/GeneralLinearModelAlgorithm_doc.i.in @@ -160,7 +160,9 @@ The behaviour of the reduction is controlled by the following keys in :class:`~o With huge samples, the `hierarchical matrix `_ implementation could be used if OpenTURNS had been compiled with `hmat-oss` support. -This implementation, which is based on a compressed representation of an approximated covariance matrix (and its Cholesky factor), has a better complexity both in terms of memory requirements and floating point operations. To use it, the `GeneralLinearModelAlgorithm-LinearAlgebra` resource map key should be instancied to `HMAT`. Default value of the key is `LAPACK`. +This implementation, which is based on a compressed representation of an approximated covariance matrix (and its Cholesky factor), +has a better complexity both in terms of memory requirements and floating point operations. +To use it, the `GeneralLinearModelAlgorithm-LinearAlgebra` resource map key should be set to `HMAT`. Default value of the key is `LAPACK`. A known centered gaussian observation noise :math:`\epsilon_k` can be taken into account with :func:`setNoise()`: diff --git a/python/src/HyperbolicAnisotropicEnumerateFunction_doc.i.in b/python/src/HyperbolicAnisotropicEnumerateFunction_doc.i.in index ef79424449..2b3380d58a 100644 --- a/python/src/HyperbolicAnisotropicEnumerateFunction_doc.i.in +++ b/python/src/HyperbolicAnisotropicEnumerateFunction_doc.i.in @@ -140,3 +140,4 @@ Parameters ---------- w : sequence of float Weights of the indices in each dimension." + diff --git a/python/src/InverseChiSquare_doc.i.in b/python/src/InverseChiSquare_doc.i.in index 4e98eb0ffa..1012a2087a 100644 --- a/python/src/InverseChiSquare_doc.i.in +++ b/python/src/InverseChiSquare_doc.i.in @@ -12,7 +12,7 @@ nu : float, :math:`\nu > 0` Notes ----- :math:`X` follows an Inverse ChiSquare distribution of parameter :math:`\nu` means that :math:`\dfrac{1}{X}` follows the :math:`\chi^2(\nu)` distribution, with :math:`\nu>0`. -The Inverse ChiSquare distribution parameterized by :math:`\nu` is exactly the :math:`InverseGamma(2, \dfrac{\nu}{2})` distribution. +The Inverse ChiSquare distribution parameterized by :math:`\nu` is exactly the :math:`InverseGamma(\dfrac{\nu}{2}, 2)` distribution. Its probability density function is defined as: diff --git a/python/src/InverseGamma_doc.i.in b/python/src/InverseGamma_doc.i.in index 99f3077558..de0bef4d94 100644 --- a/python/src/InverseGamma_doc.i.in +++ b/python/src/InverseGamma_doc.i.in @@ -1,16 +1,13 @@ %feature("docstring") OT::InverseGamma "InverseGamma distribution. - Parameters ---------- -lambda : float, :math:`\lambda > 0` - Rate parameter. - - Default value is 1.0. k : float, :math:`k > 0` Shape parameter. - + Default value is 1.0. +lambda : float, :math:`\lambda > 0` + Rate parameter. Default value is 1.0. Notes diff --git a/python/src/KernelSmoothing_doc.i.in b/python/src/KernelSmoothing_doc.i.in index 600a5d87df..17e36db2c8 100644 --- a/python/src/KernelSmoothing_doc.i.in +++ b/python/src/KernelSmoothing_doc.i.in @@ -10,21 +10,23 @@ kernel : :class:`~openturns.Distribution`, optional binned : bool, optional Activates bining mechanism only in the univariate or bivariate cases. It allows one to speed up the manipulation of the density function of the resulting distribution. By default, the mechanism is activated. binNumber : int, :math:`binNumber \geq 2`, optional - Indicates the number of bins used by the bining mechanism. By default, OpenTURNS uses the values stored in the *ResourceMap*. + Indicates the number of bins used by the bining mechanism. By default, OpenTURNS uses the values stored in :class:`~openturns.ResourceMap`. boundaryCorrection : bool, optional Activates the boundary correction using the mirroring technique. By default, the correction is not provided. Notes ----- -The binning mechanism creates a regular grid of *binNumber* intervals in each -dimension, then the unit weight of each point is linearly affected to the vertices -of the bin containing the point (see [wand1994]_ appendix D, page 182). -The `KernelSmoothing-BinNumber` key defines the default value of the -number of bins used in the _binning_ algorithm to improve the evaluation speed. +The binning mechanism is available in dimension 1 and 2 only. See the notes of the +:meth:`setBinning` method for details. + +The boundary correction is available in dimension 1 only, and it is done using +the mirroring technique (also named as the reflection correction). +See the notes of the :meth:`setBoundingOption` method for +details. -The boundary correction is available only in one dimension, and it is done using -the mirroring technique. See the notes of the :meth:`setBoundingOption` method for +It is possible to apply a log-transformation on the data in dimension 1 only, and build the kernel smoothing +distribution on the transformed data. See the notes of the :meth:`setUseLogTransform` method for details. When applied to multivariate samples, the kernel is the kernel product of the @@ -78,7 +80,8 @@ Nevertheless, the parameters can be manually set. Variants of the :meth:`build` method can be used when the distribution to build is expected to be of a certain type. In those cases however, the bandwidth must be user-specified. -To use :meth:`buildAsTruncatedDistribution`, boundary correction must be enabled. +To use :meth:`buildAsTruncatedDistribution`, boundary correction must be activated. +To use the LogTransform treatment, activate it with :meth:`setUseLogTransform`. >>> distribution = ks.buildAsKernelMixture(sample, bandwidth) >>> print(distribution.getClassName()) @@ -88,7 +91,11 @@ KernelMixture Mixture >>> distribution = ks.buildAsTruncatedDistribution(sample, bandwidth) >>> print(distribution.getClassName()) -TruncatedDistribution" +TruncatedDistribution +>>> ks.setUseLogTransform(True) +>>> distribution = ks.build(sample) +>>> print(distribution.getClassName()) +Distribution" // --------------------------------------------------------------------- %feature("docstring") OT::KernelSmoothing::buildAsKernelMixture @@ -103,12 +110,12 @@ bandwidth : :class:`~openturns.Point` Returns ------- -fittdDist : :class:`~openturns.KernelMixture` +fittedDist : :class:`~openturns.KernelMixture` The fitted distribution. Notes ----- -It builds a :math:`~openturns.KernelMixture` using the given data and bandwidth regardless of the binning or boundary treatment flags. +It builds a :class:`~openturns.KernelMixture` using the given data and bandwidth regardless of the binning or boundary treatment flags. Examples -------- @@ -132,12 +139,12 @@ bandwidth : :class:`~openturns.Point` Returns ------- -fittdDist : :class:`~openturns.KernelMixture` +fittedDist : :class:`~openturns.Mixture` The fitted distribution. Notes ----- -It builds a :math:`~openturns.Mixture` using the given bandwidth and a binning of the given data regardless of the bin number, the data size, the binning flag or boundary treatment flags. This method is available only for 1D or 2D samples. +It builds a :class:`~openturns.Mixture` using the given bandwidth and a binning of the given data regardless of the bin number, the data size, the binning flag or boundary treatment flags. This method is available only for 1D or 2D samples. Examples -------- @@ -161,7 +168,7 @@ bandwidth : :class:`~openturns.Point` Returns ------- -fittdDist : :class:`~openturns.TruncatedDistribution` +fittedDist : :class:`~openturns.TruncatedDistribution` The estimated distribution as a :class:`~openturns.TruncatedDistribution`. Examples @@ -186,7 +193,7 @@ bandwidth : :class:`~openturns.Point`, optional Returns ------- -fittdDist : :class:`~openturns.Distribution` +fittedDist : :class:`~openturns.Distribution` The fitted distribution. Notes @@ -194,17 +201,26 @@ Notes According to the dimension of the data and the specified treatments, the resulting distribution differs. - If the sample is constant, a :class:`~openturns.Dirac` distribution is built. -- If dimension > 2 or if no treatment has been asked for, a :class:`~openturns.KernelMixture` is built by calling *buildAsKernelMixture*. -- If dimension = 1 and a boundary treatment has been asked for, a :class:`~openturns.TruncatedDistribution` is built by calling *buildAsTruncatedDistribution* -- If dimension = 1 or 2 and no boundary treatment has been asked for, but a binning treatment has been asked for, - - If the sample size is greater than the bin number, then a :class:`~openturns.Mixture` is built by calling `buildAsMixture` - - Otherwise a :class:`~openturns.KernelMixture` is built by calling `buildAsKernelMixture` +- In dimension 1: + + - if no treatment is activated, a :class:`~openturns.KernelMixture` is built by using :meth:`buildAsKernelMixture`, + - if a boundary treatment is activated, a :class:`~openturns.TruncatedDistribution` is built by using :meth:`buildAsTruncatedDistribution`, + - if a log-transformation is activated, a :class:`~openturns.CompositeDistribution` is built by using :meth:`build`. + +- In dimension > 2: + + - no treatment (boundary correction or log-transformation) is available. A :class:`~openturns.KernelMixture` is built by using :meth:`buildAsKernelMixture`. + +- In dimension 1 or 2, if a binning treatment is activated: + + - If the sample size is greater than the bin number, then a :class:`~openturns.Mixture` is built by using :meth:`buildAsMixture`, + - Otherwise a :class:`~openturns.KernelMixture` is built by using :meth:`buildAsKernelMixture`. -The bandwidth selection depends on the dimension. +The bandwidth selection depends on the dimension: -- If dimension = 1, then `computeMixedBandwidth` is used. -- Otherwise, then the only multivariate rule `computeSilvermanBandwidth` is used. + - If dimension 1, then :meth:`computeMixedBandwidth` is used, + - Otherwise, then the only multivariate rule :meth:`computeSilvermanBandwidth` is used. Examples -------- @@ -233,7 +249,7 @@ Compare the PDFs: Returns ------- bandwidth : :class:`~openturns.Point` - Bandwidth used in each direction. + Bandwidth. " // --------------------------------------------------------------------- @@ -248,13 +264,45 @@ kernel : :class:`~openturns.Distribution` // --------------------------------------------------------------------- +%feature("docstring") OT::KernelSmoothing::getBoundaryCorrection +"Accessor to the boundary correction flag. + +Returns +------- +boundaryCorrection : bool + Flag to tell if the boundary correction is activated. + +Notes +----- +This treatment is available in dimension 1 only." + +// --------------------------------------------------------------------- + %feature("docstring") OT::KernelSmoothing::setBoundaryCorrection "Accessor to the boundary correction flag. Parameters ---------- boundaryCorrection : bool - Activates the boundary correction using the mirroring technique." + Activates the boundary correction using the mirroring technique. + +Notes +----- +This treatment is available in dimension 1 only. See [jones1993]_ to get more details. +The *reflection* or *mirroring* method +is used: the boundaries are automatically detected from the sample +(with the :meth:`Sample.getMin` and :meth:`Sample.getMax` functions) and the kernel smoothed distribution +is corrected in the boundary areas to remain within the boundaries, +according to the mirroring technique: + +- the Scott bandwidth is evaluated from the sample: *h* +- two sub-samples are extracted from the initial sample, + containing all the points within the range :math:`[min, min + h[` and :math:`]max-h, max]`, +- both sub-samples are transformed into their symmetric samples with respect their respective boundary: + its results two samples within the range :math:`]min-h, min]` and :math:`[max, max+h[`, +- a kernel smoothed PDF is built from the new sample composed with + the initial one and the two new ones, with the previous bandwidth *h*, +- this last kernel smoothed PDF is truncated within the initial range :math:`[min, max]` (conditional PDF)." // --------------------------------------------------------------------- @@ -275,9 +323,9 @@ The possible values for the bounding option are: - KernelSmoothing.UPPER or 2: apply the boundary correction to the upper bound - KernelSmoothing.BOTH or 3: apply the boundary correction to both bounds -It applies only to 1D samples. Each bound can be defined by the user or computed -automatically from the sample, see *setLowerBound*, *setUpperBound*, -*setAutomaticLowerBound*, *setAutomaticUpperBound*." +This treatment is available in dimension 1 only. Each bound can be defined by the user or computed +automatically from the sample, see :meth:`setLowerBound`, :meth:`setUpperBound`, +:meth:`setAutomaticLowerBound`, :meth:`setAutomaticUpperBound`." // --------------------------------------------------------------------- @@ -291,6 +339,7 @@ lowerBound : float Notes ----- +This treatment is available in dimension 1 only. This method automatically sets the *automaticLowerBound* flag to *False*. The given value will be taken into account only if *boundingOption* is set to either 1 or 3. If the algorithm is applied to a sample with a minimum value @@ -306,10 +355,11 @@ less than the user-defined lower bound and the *automaticLowerBound* is set to Parameters ---------- upperBound : float - A user-defined lower bound to take into account for boundary correction. + A user-defined upper bound to take into account for boundary correction. Notes ----- +This treatment is available in dimension 1 only. This method automatically sets the *automaticLowerBound* flag to *False*. The given value will be taken into account only if *boundingOption* is set to either 1 or 3. If the algorithm is applied to a sample with a minimum value @@ -325,7 +375,13 @@ less than the user-defined lower bound and the *automaticLowerBound* is set to Parameters ---------- automaticLowerBound : bool - Flag to tell if the user-defined lower bound has to be taken into account (value *False*) or if the minimum of the given sample has to be used (value *True*)." + Flag to tell if the lower bound is automatically calculated from the sample. + +Notes +----- +This treatment is available in dimension 1 only. +The automatic lower bound is the minimum of the given sample. In the other case, +the user has to specify the lower bound." // --------------------------------------------------------------------- @@ -335,7 +391,84 @@ automaticLowerBound : bool Parameters ---------- automaticUpperBound : bool - Flag to tell if the user-defined upper bound has to be taken into account (value *False*) or if the maximum of the given sample has to be used (value *True*)." + Flag to tell if the upper bound is automatically calculated from the sample. + +Notes +----- +This treatment is available in dimension 1 only. +The automatic upper bound is the maximum of the given sample. In the other case, +the user has to specify the upper bound." + +// --------------------------------------------------------------------- + +%feature("docstring") OT::KernelSmoothing::getBinning +"Accessor to the binning flag. + +Returns +------- +binning : bool + Flag to tell if the binning treatment is activated. + +Notes +----- +This treatment is available in dimension 1 and 2 only." + +// --------------------------------------------------------------------- + +%feature("docstring") OT::KernelSmoothing::setBinning +"Accessor to the binning flag. + +Parameters +---------- +binning : bool + Flag to tell if the binning treatment is activated. + +Notes +----- +This treatment is available in dimension 1 and 2 only. +It creates a regular grid of *binNumber* +intervals in each +dimension, then the unit weight of each point is linearly affected to the vertices +of the bin containing the point (see [wand1994]_ appendix D, page 182). +The `KernelSmoothing-BinNumber` key of the class :class:`~openturns.ResourceMap` defines the default value of the +number of bins used in the _binning_ algorithm to improve the evaluation speed." + +// --------------------------------------------------------------------- + +%feature("docstring") OT::KernelSmoothing::setUseLogTransform +"Accessor to the log-transform flag. + +Parameters +---------- +useLogTransform : bool + Flag to tell if the kernel smoothing distribution is built on the log-transformed data. + +Notes +----- +This treatment is available in dimension 1 only. See [charpentier2015]_ to get more details. + +We denote by :math:`(X_i)_{1 \leq i \leq \sampleSize}` +some independent random variates, identically distributed according to :math:`X`. + +Refer to :ref:`kernel_smoothing` for the details. The shift +scale is fixed in the `KernelSmoothing-DefaultShiftScale` key of the class :class:`~openturns.ResourceMap`. + +Once a kernel smoothed distribution has been fitted on the transformed data, the fitted distribution of :math:`X` +is built as a :class:`~openturns.CompositeDistribution` from :math:`T^{-1}` and the kernel smoothed distribution." + +// --------------------------------------------------------------------- + +%feature("docstring") OT::KernelSmoothing::getUseLogTransform +"Accessor to the log-transform flag. + +Returns +------- +useLogTransform : bool + Flag to tell if the kernel smoothing distribution is built on the log-transformed data. + +Notes +----- +This treatment is available in dimension 1 only." // --------------------------------------------------------------------- %feature("docstring") OT::KernelSmoothing::computeSilvermanBandwidth @@ -344,13 +477,16 @@ automaticUpperBound : bool Returns ------- bandwidth : :class:`~openturns.Point` - Bandwidth which components are evaluated according to the Silverman rule - assuming a normal distribution. - The bandwidth uses a robust estimate of the - sample standard deviation, based on the interquartile range introduced - in :ref:`kernel_smoothing` (rather than the sample standard deviation). - This method can manage a multivariate sample and produces a - multivariate bandwidth. + Bandwidth computed according to the Silverman rule. + +Notes +----- +Each component of the bandwidth which components is evaluated according to the Silverman rule +assuming a normal distribution. The bandwidth uses a robust estimate of the +sample standard deviation, based on the interquartile range introduced +in :ref:`kernel_smoothing` (rather than the sample standard deviation). +This method can manage a multivariate sample and produces a +multivariate bandwidth. " // --------------------------------------------------------------------- @@ -360,11 +496,12 @@ bandwidth : :class:`~openturns.Point` Returns ------- bandwidth : :class:`~openturns.Point` - Bandwidth which components are evaluated according to the plugin rule. + Bandwidth computed according to the plug-in rule. Notes ----- -This plug-in method is based on the *solve-the-equation* rule from [sheather1991]_. +Each component of the bandwidth which components is evaluated according to +the plug-in rule. This plug-in rule is based on the *solve-the-equation* method from [sheather1991]_. This method can take a lot of time for large samples, as the cost is quadratic with the sample size. Several keys of the :class:`~openturns.ResourceMap` are used by the [sheather1991]_ method. @@ -373,17 +510,12 @@ Several keys of the :class:`~openturns.ResourceMap` are used by the [sheather199 to estimate the bandwidth. It defines the absolute tolerance used by the solver to solve the nonlinear equation. - - The `KernelSmoothing-MaximumIteration` key defines the maximum number of iterations used by the solver. - - The `KernelSmoothing-RelativePrecision` key defines the relative tolerance. - - The `KernelSmoothing-AbsolutePrecision` key defines the absolute tolerance. - - The `KernelSmoothing-ResidualPrecision` key defines the absolute tolerance on the residual. - - The `KernelSmoothing-CutOffPlugin` key is the cut-off value introduced in :ref:`kernel_smoothing`. @@ -413,13 +545,29 @@ This method uses the *mixed* rule introduced in :ref:`kernel_smoothing`. Its goal is to provide an accurate estimator of the bandwidth when the sample size is large. -Let :math:`n` be the sample size. +Let :math:`\sampleSize` be the sample size. The estimator depends on the threshold sample size :math:`n_t` defined in the -`KernelSmoothing-SmallSize` key of the :class:`~openturns.ResourceMap`. +`KernelSmoothing-SmallSize` key of the :class:`~openturns.ResourceMap`: +- if :math:`\sampleSize \leq n_t`, i.e. for a small sample, we use the plugin solve-the-equation method, +- otherwise, the *mixed* rule is used." -- If :math:`n \leq n_t`, i.e. for a small sample, we use the plugin solve-the-equation - method. +// --------------------------------------------------------------------- -- Otherwise, the *mixed* rule is used. -" +%feature("docstring") OT::KernelSmoothing::getBinNumber +"Accessor to the bin number. + +Returns +------- +binNumber : int + The bin number." + +// --------------------------------------------------------------------- + +%feature("docstring") OT::KernelSmoothin::setBinNumber +"Accessor to the bin number. + +Parameters +---------- +binNumber : int + The bin number." diff --git a/python/src/LinearModelValidation_doc.i.in b/python/src/LinearModelValidation_doc.i.in index 9bff154c19..dca63c0c15 100644 --- a/python/src/LinearModelValidation_doc.i.in +++ b/python/src/LinearModelValidation_doc.i.in @@ -74,6 +74,7 @@ Create a linear model. >>> import openturns as ot >>> import openturns.experimental as otexp +>>> ot.RandomGenerator.SetSeed(0) >>> func = ot.SymbolicFunction( ... ['x1', 'x2', 'x3'], ... ['x1 + x2 + sin(x2 * 2 * pi_) / 5 + 1e-3 * x3^2'] diff --git a/python/src/OrderStatisticsMarginalChecker_doc.i.in b/python/src/OrderStatisticsMarginalChecker_doc.i.in index 135b9f4051..2559f0bbba 100644 --- a/python/src/OrderStatisticsMarginalChecker_doc.i.in +++ b/python/src/OrderStatisticsMarginalChecker_doc.i.in @@ -4,7 +4,8 @@ Parameters ---------- coll : sequence of :class:`~openturns.Distribution` - The marginals :math:`(F_1, \dots, F_n)` which are tested with respect to the order :math:`F_1 < \dots < F_n` in the context of the maximum order statistics distribution. + The marginals :math:`(F_1, \dots, F_n)` which are tested with respect to the order :math:`F_1 < \dots < F_n` + in the context of the maximum order statistics distribution. Notes ----- @@ -13,7 +14,8 @@ Three tests are performed. We note :math:`[a_i,b_i]` the range of :math:`X_i`. T - Test 1 checks that :math:`a_i \leq a_{i+1}` and :math:`b_i \leq b_{i+1}` for all :math:`i`. -- Test 2 discretizes :math:`[0,1]` with :math:`\{\dfrac{1}{2n},\dfrac{3}{2n}, \dots,\dfrac{2n-1}{2n}\} = \{q_1, \dots, q_{2n-1} \}` where :math:`n` is defined in the :class:`~openturns.ResourceMap` with `OSMC-OptimizationEpsilon`. By default, :math:`n=100`. Test 2 checks that: +- Test 2 discretizes :math:`[0,1]` with :math:`\{\dfrac{1}{2n},\dfrac{3}{2n}, \dots,\dfrac{2n-1}{2n}\} = \{q_1, \dots, q_{2n-1} \}` + where :math:`n` is defined in the :class:`~openturns.ResourceMap` with `OSMC-OptimizationEpsilon`. By default, :math:`n=100`. Test 2 checks that: .. math:: @@ -57,7 +59,7 @@ resCompatibility : bool Notes ----- -This method throws an exception in case of compatibility problem with a message indicating the first compatibility problem arised. +This method throws an exception in case of compatibility problem with a message indicating the first compatibility problem found. " // --------------------------------------------------------------------- @@ -67,6 +69,28 @@ This method throws an exception in case of compatibility problem with a message Returns ------- indepMarginals : :class:`~openturns.Indices` - Indicates the indices that build some independent sets of marginals. If we note :math:`indepMarginals = [i_1, i_2]` then the sub random vectors :math:`(X_1, \dots, X_{i_1})`, :math:`(X_{i_1+1}, \dots, X_{i_2})` and :math:`(X_{i_2+1}, \dots, X_n)` are independent. - This information is automatically used by OpenTURNS to build the appropriated maximum entropy order statistics distribution." + Indicates the indices that build some independent sets of marginals. + If we note :math:`indepMarginals = [i_1, i_2]` then the sub random vectors :math:`(X_1, \dots, X_{i_1})`, + :math:`(X_{i_1+1}, \dots, X_{i_2})` and :math:`(X_{i_2+1}, \dots, X_n)` are independent. + This information is automatically used to build the appropriate maximum entropy order statistics distribution." + +// --------------------------------------------------------------------- + +%feature("docstring") OT::OrderStatisticsMarginalChecker::getOptimizationAlgorithm +"Accessor to the optimization algorithm used for the computation. + +Returns +------- +algo : :class:`~openturns.OptimizationAlgorithm` + Optimization algorithm used for the computation." + +// --------------------------------------------------------------------- + +%feature("docstring") OT::OrderStatisticsMarginalChecker::setOptimizationAlgorithm +"Accessor to the optimization algorithm used for the computation. + +Parameters +---------- +algo : :class:`~openturns.OptimizationAlgorithm` + Optimization algorithm to use for the computation." diff --git a/python/src/Path.i b/python/src/Path.i index 7485265cec..728c05e026 100644 --- a/python/src/Path.i +++ b/python/src/Path.i @@ -10,11 +10,6 @@ %nodefaultctor Path; -// these are available in tempfile or os -%ignore OT::Path::GetTemporaryDirectory; -%ignore OT::Path::BuildTemporaryFileName; -%ignore OT::Path::CreateTemporaryDirectory; %ignore OT::Path::FindFileByNameInDirectoryList; -%ignore OT::Path::EscapeBackslash; %include openturns/Path.hxx diff --git a/python/src/Point.i b/python/src/Point.i index 503ed8e0d9..4ccd6f85bc 100644 --- a/python/src/Point.i +++ b/python/src/Point.i @@ -60,7 +60,7 @@ %ignore OT::Point::Point(std::initializer_list initList); %include openturns/Point.hxx -%copyctor Point; +%copyctor OT::Point; namespace OT { diff --git a/python/src/TimeSeries_doc.i.in b/python/src/TimeSeries_doc.i.in index 7b59eb3cae..17a975dacf 100644 --- a/python/src/TimeSeries_doc.i.in +++ b/python/src/TimeSeries_doc.i.in @@ -25,7 +25,7 @@ sample : 2-d sequence of float Values assigned to each time stamp of the time series. field : :class:`~openturns.Field` - Maps a field into a time series when the associated lesh cn be interpretated as a regular time grid. + Maps a field into a time series when the associated mesh can be interpretated as a regular time grid. Examples -------- diff --git a/python/src/Wilks_doc.i.in b/python/src/Wilks_doc.i.in index 7b01e516cd..1c35146775 100644 --- a/python/src/Wilks_doc.i.in +++ b/python/src/Wilks_doc.i.in @@ -1,66 +1,97 @@ %feature("docstring") OT::Wilks -"Class to evaluate the Wilks number. +"Class to estimate a confidence interval on a quantile. Refer to :ref:`quantile_estimation_wilks`. Parameters ---------- -randomVector : :class:`~openturns.RandomVector` of dimension 1 - Output variable of interest. +X : :class:`~openturns.RandomVector`, + A random vector of dimension 1. Notes ----- -This class is a static class which enables the evaluation of the Wilks number: -the minimal sample size :math:`N_{\alpha, \beta, i}` to perform in order to -guarantee that the empirical quantile :math:`\alpha`, noted -:math:`\tilde{q}_{\alpha} N_{\alpha, \beta, i}` evaluated with the -:math:`(n - i)^{th}` maximum of the sample, noted :math:`X_{n - i}` be greater -than the theoretical quantile :math:`q_{\alpha}` with a probability at least -:math:`\beta`: +This static class estimates an upper bound of the quantile of level :math:`\alpha \in [0,1]` of the random variable :math:`X` +with a confidence greater than :math:`\beta`, using a given order statistics. + +Let :math:`x_{\alpha}` be the unknown quantile of level :math:`\alpha` of the random variable :math:`X` of dimension 1. +Let :math:`(X_1, \dots, X_\sampleSize)` be a sample of independent and identically distributed variables according to :math:`X`. +Let :math:`X_{(k)}` be the :math:`k` -th order statistics of :math:`(X_1, \dots, X_\sampleSize)` which means that +:math:`X_{(k)}` is the :math:`k` -th maximum of :math:`(X_1, \dots, X_\sampleSize)` for :math:`1 \leq k \leq \sampleSize`. +For example, :math:`X_{(1)} = \min (X_1, \dots, X_\sampleSize)` is the minimum +and :math:`X_{(\sampleSize)} = \max (X_1, \dots, X_\sampleSize)` is the maximum. We have: + +.. math:: + + X_{(1)} \leq X_{(2)} \leq \dots \leq X_{(\sampleSize)} + +Given :math:`\alpha`, :math:`\beta` and :math:`i`, the class estimates the minimal size :math:`\sampleSize` such that: .. math:: - \Pset (\tilde{q}_{\alpha} N_{\alpha, \beta, i} > q_{\alpha}) > \beta + \Prob{x_{\alpha} \leq X_{(\sampleSize-i)}} \geq \beta -where :math:`\tilde{q}_{\alpha} N_{\alpha, \beta, i} = X_{n-i}`." +Once the minimal size :math:`\sampleSize` has been estimated, a sample of size :math:`\sampleSize` can be generated from +:math:`X` and an upper bound of :math:`x_{\alpha}` is estimated by the value of the :math:`X_{(\sampleSize-i)}` on the sample." // --------------------------------------------------------------------- %feature("docstring") OT::Wilks::ComputeSampleSize -"Evaluate the size of the sample. +"Evaluate the minimum size of the sample. Parameters ---------- -alpha : positive float :math:`< 1` - The order of the quantile we want to evaluate. -beta : positive float :math:`< 1` - Confidence on the evaluation of the empirical quantile. +alpha : positive float in :math:`[0,1)` + The level :math:`\alpha` of the quantile. +beta : positive float in :math:`[0,1)`, + The confidence level on the upper bound. i : int - Rank of the maximum which will evaluate the empirical quantile. Default - :math:`i = 0` (maximum of the sample) + The index such that :math:`X_{(\sampleSize -i)}` is an upper bound of :math:`x_{\alpha}` + with confidence :math:`\beta`. + Default value is :math:`i = 0`. Returns ------- -w : int - the Wilks number." +n : int, + The minimum size of the sample. + +Notes +----- +The minimum sample size :math:`\sampleSize` is such that: + +.. math:: + + \Prob{x_{\alpha} \leq X_{(\sampleSize-i)}} \geq \beta +" // --------------------------------------------------------------------- %feature("docstring") OT::Wilks::computeQuantileBound -"Evaluate the bound of the quantile. +"Evaluate an upper bound of a quantile. Parameters ---------- -alpha : positive float :math:`< 1` - The order of the quantile we want to evaluate. -beta : positive float :math:`< 1` - Confidence on the evaluation of the empirical quantile. +alpha : positive float in :math:`[0,1)` + The level :math:`\alpha` of the quantile. +beta : positive float in :math:`[0,1)` + The confidence level on the upper bound. i : int - Rank of the maximum which will evaluate the empirical quantile. Default - :math:`i = 0` (maximum of the sample) + The index such that :math:`X_{(\sampleSize -i)}` is an upper bound of :math:`x_{\alpha}` + with confidence level :math:`\beta`. + Default value is :math:`i = 0`. Returns ------- -q : :class:`~openturns.Point` - The estimate of the quantile upper bound for the given quantile level, at - the given confidence level and using the given upper statistics." +upperBound : :class:`~openturns.Point` + The estimate of the quantile upper bound. + +Notes +----- +The method starts by evaluating the minimum sample size :math:`\sampleSize` such that: + +.. math:: + + \Prob{x_{\alpha} \leq X_{(\sampleSize-i)}} \geq \beta + +Then, it generates a sample of size :math:`\sampleSize` from the random vector :math:`X`. The upper bound of :math:`x_{\alpha}` +is :math:`x_{(\sampleSize-i)}`, that is, the :math:`\sampleSize - i`-th observation in the ordered sample. +" diff --git a/python/src/common_module.i b/python/src/common_module.i index c135497ad1..082fb20b99 100644 --- a/python/src/common_module.i +++ b/python/src/common_module.i @@ -28,11 +28,6 @@ #endif #if defined(OPENTURNS_HAVE_HDF5) && defined(OPENTURNS_HAVE_LIBXML2) %include XMLH5StorageManager.i -#else -%pythoncode %{ -class XMLH5StorageManager: - pass -%} #endif %include TTY.i %include Log.i diff --git a/python/src/viewer.py b/python/src/viewer.py index 5a65fd418b..9f68bc81ad 100644 --- a/python/src/viewer.py +++ b/python/src/viewer.py @@ -21,9 +21,9 @@ import io try: - from pkg_resources import parse_version + from packaging.version import Version except ImportError: - from packaging.version import Version as parse_version + from pkg_resources import parse_version as Version __all__ = ["View", "PlotDesign"] @@ -233,7 +233,7 @@ def __init__( ) # Store matplotlib version - matplotlib_version = parse_version(matplotlib.__version__) + matplotlib_version = Version(matplotlib.__version__) # check that arguments are dictionaries figure_kw = self._CheckDict(figure_kw) @@ -417,7 +417,7 @@ def __init__( polygoncollection_kw["zorder"] = zorder contour_kw["zorder"] = zorder step_kw["zorder"] = zorder - if matplotlib_version >= parse_version("3.3"): + if matplotlib_version >= Version("3.3"): clabel_kw["zorder"] = zorder scatter_kw["zorder"] = zorder text_kw["zorder"] = zorder @@ -647,7 +647,7 @@ def __init__( if "norm" in contour_kw_default else contour.getColorMapNorm() ) - if type(norm) is str and matplotlib_version < parse_version("3.6.0"): + if type(norm) is str and matplotlib_version < Version("3.6.0"): # matplotlib before 3.6 does not support norms as strings try: normDict = { @@ -655,7 +655,7 @@ def __init__( "linear": cls.Normalize(), "log": cls.LogNorm(), "symlog": cls.SymLogNorm(linthresh=0.03) - if matplotlib_version < parse_version("3.2.0") + if matplotlib_version < Version("3.2.0") else cls.SymLogNorm(linthresh=0.03, base=10), } contour_kw["norm"] = normDict[norm] @@ -703,7 +703,7 @@ def __init__( legend_labels.append(drawable.getLegend()) if contour.getColorBarPosition() and len(contour.getLevels()) != 1: colorbar = None - if matplotlib_version >= parse_version("3.7.0"): + if matplotlib_version >= Version("3.7.0"): colorbar = self._fig.colorbar( contourset, location=contour.getColorBarPosition(), diff --git a/python/test/CMakeLists.txt b/python/test/CMakeLists.txt index dec59f40e3..e7ec0656bc 100644 --- a/python/test/CMakeLists.txt +++ b/python/test/CMakeLists.txt @@ -861,7 +861,6 @@ if (MATPLOTLIB_FOUND) set_tests_properties (pyinstallcheck_example_plot_optimization_rosenbrock PROPERTIES DISABLED TRUE) set_tests_properties (pyinstallcheck_example_plot_advanced_mle_estimator PROPERTIES DISABLED TRUE) set_tests_properties (pyinstallcheck_example_plot_kriging_advanced PROPERTIES DISABLED TRUE) - set_tests_properties (pyinstallcheck_example_plot_kriging_categorical PROPERTIES DISABLED TRUE) set_tests_properties (pyinstallcheck_example_plot_kriging_hyperparameters_optimization PROPERTIES DISABLED TRUE) endif () if (NOT Pagmo_FOUND) diff --git a/python/test/t_Binomial_std.expout b/python/test/t_Binomial_std.expout index 8d9594ca1c..11f8e08add 100644 --- a/python/test/t_Binomial_std.expout +++ b/python/test/t_Binomial_std.expout @@ -71,3 +71,45 @@ row = 9/37, x = 16, n = 30, pr = 0.999, P(X = x) = 1.4311328174833965e-34, CDF = computeCDF. Computed = 1.432662322407474e-34, expected = 1.4326623224074817e-34, abs.err. = 7.697562365065523e-49 computeComplementaryCDF. Computed = 1.0, expected = 1.0, abs.err. = 0.0 computeQuantile. Computed X = 17, expected = 16, diff = 1 +95.00% bilateral confidence interval = [7, 13] +n=59 k=1 p(k-1)=0.0485 p(k)=0.1991 ok=True +n=60 k=1 p(k-1)=0.0461 p(k)=0.1916 ok=True +n=61 k=1 p(k-1)=0.0438 p(k)=0.1843 ok=True +n=62 k=1 p(k-1)=0.0416 p(k)=0.1773 ok=True +n=63 k=1 p(k-1)=0.0395 p(k)=0.1705 ok=True +n=64 k=1 p(k-1)=0.0375 p(k)=0.1639 ok=True +n=65 k=1 p(k-1)=0.0356 p(k)=0.1576 ok=True +n=66 k=1 p(k-1)=0.0339 p(k)=0.1515 ok=True +n=67 k=1 p(k-1)=0.0322 p(k)=0.1456 ok=True +n=68 k=1 p(k-1)=0.0306 p(k)=0.1399 ok=True +n=69 k=1 p(k-1)=0.0290 p(k)=0.1345 ok=True +n=70 k=1 p(k-1)=0.0276 p(k)=0.1292 ok=True +n=71 k=1 p(k-1)=0.0262 p(k)=0.1241 ok=True +n=72 k=1 p(k-1)=0.0249 p(k)=0.1192 ok=True +n=73 k=1 p(k-1)=0.0236 p(k)=0.1145 ok=True +n=74 k=1 p(k-1)=0.0225 p(k)=0.1100 ok=True +n=75 k=1 p(k-1)=0.0213 p(k)=0.1056 ok=True +n=76 k=1 p(k-1)=0.0203 p(k)=0.1014 ok=True +n=77 k=1 p(k-1)=0.0193 p(k)=0.0973 ok=True +n=78 k=1 p(k-1)=0.0183 p(k)=0.0934 ok=True +n=79 k=1 p(k-1)=0.0174 p(k)=0.0897 ok=True +n=80 k=1 p(k-1)=0.0165 p(k)=0.0861 ok=True +n=81 k=1 p(k-1)=0.0157 p(k)=0.0826 ok=True +n=82 k=1 p(k-1)=0.0149 p(k)=0.0792 ok=True +n=83 k=1 p(k-1)=0.0142 p(k)=0.0760 ok=True +n=84 k=1 p(k-1)=0.0135 p(k)=0.0729 ok=True +n=85 k=1 p(k-1)=0.0128 p(k)=0.0699 ok=True +n=86 k=1 p(k-1)=0.0121 p(k)=0.0671 ok=True +n=87 k=1 p(k-1)=0.0115 p(k)=0.0643 ok=True +n=88 k=1 p(k-1)=0.0110 p(k)=0.0617 ok=True +n=89 k=1 p(k-1)=0.0104 p(k)=0.0592 ok=True +n=90 k=1 p(k-1)=0.0099 p(k)=0.0567 ok=True +n=91 k=1 p(k-1)=0.0094 p(k)=0.0544 ok=True +n=92 k=1 p(k-1)=0.0089 p(k)=0.0521 ok=True +n=93 k=2 p(k-1)=0.0500 p(k)=0.1504 ok=True +n=94 k=2 p(k-1)=0.0479 p(k)=0.1454 ok=True +n=95 k=2 p(k-1)=0.0459 p(k)=0.1405 ok=True +n=96 k=2 p(k-1)=0.0440 p(k)=0.1358 ok=True +n=97 k=2 p(k-1)=0.0422 p(k)=0.1312 ok=True +n=98 k=2 p(k-1)=0.0404 p(k)=0.1268 ok=True +n=99 k=2 p(k-1)=0.0387 p(k)=0.1225 ok=True diff --git a/python/test/t_Binomial_std.py b/python/test/t_Binomial_std.py index a2e96f1691..2a1d5fe4fa 100755 --- a/python/test/t_Binomial_std.py +++ b/python/test/t_Binomial_std.py @@ -125,3 +125,14 @@ # %% +# quantile bug +alpha = 0.05 +beta = 0.05 +for n in range(59, 100): + d = ot.Binomial(n, alpha) + k = d.computeQuantile(beta)[0] + p1 = d.computeCDF(k - 1) + p2 = d.computeCDF(k) + ok = p1 < beta <= p2 + print(f"n={n} k={k:.0f} p(k-1)={p1:.4f} p(k)={p2:.4f} ok={ok}") + assert ok diff --git a/python/test/t_Bonmin_std.py b/python/test/t_Bonmin_std.py index 01ee596dda..82cb91039f 100755 --- a/python/test/t_Bonmin_std.py +++ b/python/test/t_Bonmin_std.py @@ -55,6 +55,7 @@ def stop(): bonminAlgorithm = ot.Bonmin(problem, "B-BB") bonminAlgorithm.setStartingPoint([0, 0, 0, 0]) bonminAlgorithm.setMaximumCallsNumber(10000) +bonminAlgorithm.setMaximumIterationNumber(1000) bonminAlgorithm.setProgressCallback(progress) bonminAlgorithm.setStopCallback(stop) diff --git a/python/test/t_DistributionFactory_std.py b/python/test/t_DistributionFactory_std.py index b3c4a4eb8b..7414948ce8 100755 --- a/python/test/t_DistributionFactory_std.py +++ b/python/test/t_DistributionFactory_std.py @@ -25,9 +25,9 @@ factories.add(ot.DistributionFactory.GetDiscreteUniVariateFactories()) for factory in factories: print(factory) - + dist = factory.build() # check if raise on constant sample - if factory.build().isContinuous(): + if dist.isContinuous(): sample = ot.Sample(100, [1.0e5]) ok = False try: @@ -39,7 +39,7 @@ # check if raises on sample with nan/inf for weird in ["nan", "inf"]: - sample = factory.build().getSample(100) + sample = dist.getSample(100) sample[0, 0] = float(weird) ok = False try: diff --git a/python/test/t_GaussianNonLinearCalibration_noobs.py b/python/test/t_GaussianNonLinearCalibration_noobs.py index cf7f35eb0c..154c172849 100755 --- a/python/test/t_GaussianNonLinearCalibration_noobs.py +++ b/python/test/t_GaussianNonLinearCalibration_noobs.py @@ -70,6 +70,7 @@ rtol = 0.0 atol = 0.5 ott.assert_almost_equal(parameterMAP, trueParameter, rtol, atol) + multiStartSize = 10 algo.setOptimizationAlgorithm( ot.MultiStart( ot.TNC(), @@ -78,9 +79,7 @@ ot.Normal( candidate, ot.CovarianceMatrix(ot.Point(candidate).getDimension()) ), - ot.ResourceMap.GetAsUnsignedInteger( - "GaussianNonLinearCalibration-MultiStartSize" - ), + multiStartSize, ).generate(), ) ) diff --git a/python/test/t_GaussianNonLinearCalibration_std.py b/python/test/t_GaussianNonLinearCalibration_std.py index f71a5ddc05..e6be472473 100755 --- a/python/test/t_GaussianNonLinearCalibration_std.py +++ b/python/test/t_GaussianNonLinearCalibration_std.py @@ -42,6 +42,7 @@ algo.run() # To avoid discrepance between the platforms with or without CMinpack print("result (Auto)=", algo.getResult().getParameterMAP()) + multiStartSize = 10 algo.setOptimizationAlgorithm( ot.MultiStart( ot.TNC(), @@ -50,9 +51,7 @@ ot.Normal( candidate, ot.CovarianceMatrix(ot.Point(candidate).getDimension()) ), - ot.ResourceMap.GetAsUnsignedInteger( - "GaussianNonLinearCalibration-MultiStartSize" - ), + multiStartSize, ).generate(), ) ) diff --git a/python/test/t_Gibbs_mixture.py b/python/test/t_Gibbs_mixture.py index 3ce1fbeb2f..841a36d409 100755 --- a/python/test/t_Gibbs_mixture.py +++ b/python/test/t_Gibbs_mixture.py @@ -68,12 +68,12 @@ def zpost(pt): gibbs = ot.Gibbs([sampler0, sampler1, sampler2]) # Run the Gibbs algorithm -s = gibbs.getSample(10000) +s = gibbs.getSample(1000) # Extract the relevant marginals: the first (:math:`mu_0`) and the second (:math:`\mu_1`). posterior_sample = s[:, 0:2] mean = posterior_sample.computeMean() stddev = posterior_sample.computeStandardDeviation() print(mean, stddev) -ott.assert_almost_equal(mean, [-0.0788226, 2.80322]) -ott.assert_almost_equal(stddev, [0.0306272, 0.0591087]) +ott.assert_almost_equal(mean, [-0.078428, 2.80587]) +ott.assert_almost_equal(stddev, [0.0463082, 0.108863]) diff --git a/python/test/t_HSICEstimatorTargetSensitivity_std.py b/python/test/t_HSICEstimatorTargetSensitivity_std.py index 507d982450..0feb9e83ef 100755 --- a/python/test/t_HSICEstimatorTargetSensitivity_std.py +++ b/python/test/t_HSICEstimatorTargetSensitivity_std.py @@ -84,12 +84,12 @@ ott.assert_almost_equal(pvaluesAs, [0.00000000, 0.26201467, 0.28227083]) # We set the number of permutations for the pvalue estimate - b = 1000 + b = 100 TSA.setPermutationSize(b) # We get the pvalue estimate by permutations pvaluesPerm = TSA.getPValuesPermutation() - ott.assert_almost_equal(pvaluesPerm, [0.00000000, 0.23376623, 0.26573427]) + ott.assert_almost_equal(pvaluesPerm, [0, 0.257426, 0.217822]) # Change the filter function and recompute everything squaredExponential = ot.SymbolicFunction("x", "exp(-0.1 * x^2)") @@ -99,7 +99,8 @@ ott.assert_almost_equal( TSA.getHSICIndices(), [0.00118685, 4.12193e-05, 5.07577e-05], 1e-4, 0.0 ) - ott.assert_almost_equal(TSA.getPValuesPermutation(), [0, 0.137862, 0.112887]) + print(TSA.getPValuesPermutation()) + ott.assert_almost_equal(TSA.getPValuesPermutation(), [0.0, 0.118812, 0.158416]) ott.assert_almost_equal( TSA.getPValuesAsymptotic(), [7.32022e-13, 0.143851, 0.128866] ) diff --git a/python/test/t_HyperbolicAnisotropicEnumerateFunction_std.expout b/python/test/t_HyperbolicAnisotropicEnumerateFunction_std.expout index 6099b7099b..32df2d0c37 100644 --- a/python/test/t_HyperbolicAnisotropicEnumerateFunction_std.expout +++ b/python/test/t_HyperbolicAnisotropicEnumerateFunction_std.expout @@ -412,3 +412,25 @@ index= 21 [0,6] index= 22 [3,4] index= 23 [2,5] index= 24 [1,6] +Test getMarginal() from indices +index= 0 [0,0,0] 0 +index= 1 [1,0,0] 1 +index= 2 [0,1,0] 2 +index= 3 [0,0,1] 3 +index= 4 [2,0,0] 4 +index= 5 [0,2,0] 5 +index= 6 [0,0,2] 6 +index= 7 [3,0,0] 7 +index= 8 [0,3,0] 8 +index= 9 [0,0,3] 9 +Test getMarginal() from a single integer +index= 0 [0] 0 +index= 1 [1] 1 +index= 2 [2] 2 +index= 3 [3] 3 +index= 4 [4] 4 +index= 5 [5] 5 +index= 6 [6] 6 +index= 7 [7] 7 +index= 8 [8] 8 +index= 9 [9] 9 diff --git a/python/test/t_HyperbolicAnisotropicEnumerateFunction_std.py b/python/test/t_HyperbolicAnisotropicEnumerateFunction_std.py index dae082950f..150c5f1526 100755 --- a/python/test/t_HyperbolicAnisotropicEnumerateFunction_std.py +++ b/python/test/t_HyperbolicAnisotropicEnumerateFunction_std.py @@ -81,3 +81,19 @@ print("index=", index, repr(m)) assert m[0] <= 3, "wrong bound" assert index == index_inv, "wrong inverse" +# +print("Test getMarginal() from indices") +f = ot.HyperbolicAnisotropicEnumerateFunction(5, 0.5) +marginalf = f.getMarginal([0, 3, 4]) +for index in range(10): + m = marginalf(index) + index_inv = marginalf.inverse(m) + print("index=", index, repr(m), index_inv) + +print("Test getMarginal() from a single integer") +f = ot.HyperbolicAnisotropicEnumerateFunction(5, 0.5) +marginalf = f.getMarginal(3) +for index in range(10): + m = marginalf(index) + index_inv = marginalf.inverse(m) + print("index=", index, repr(m), index_inv) diff --git a/python/test/t_InverseGamma_std.expout b/python/test/t_InverseGamma_std.expout index 974bd68c58..e6bb0e771f 100644 --- a/python/test/t_InverseGamma_std.expout +++ b/python/test/t_InverseGamma_std.expout @@ -1,4 +1,4 @@ -Distribution InverseGamma(lambda = 2.5, k = 5.5) +Distribution InverseGamma(k = 5.5, lambda = 2.5) Elliptical = False Continuous = True oneRealization= [0.0599153] @@ -18,10 +18,10 @@ ccdf= 0.0954394676229 survival= 0.0954394676229 characteristic function=(0.999937, 0.0129291) log characteristic function=(2.03228e-05, 0.0129292) -pdf gradient = [-2.40895,-1.31286] -pdf gradient (FD)= [-2.40895,-1.31286] -cdf gradient = [0.127416,0.082705] -cdf gradient (FD)= [0.127416,0.082705] +pdf gradient = [-1.31286,-2.40895] +pdf gradient (FD)= [-1.31286,-2.40895] +cdf gradient = [0.082705,0.127416] +cdf gradient (FD)= [0.082705,0.127416] quantile= [0.174871] cdf(quantile)= 0.95 InverseSurvival= class=Point name=Unnamed dimension=1 values=[0.0406605] @@ -30,7 +30,7 @@ entropy=-1.930582 Minimum volume interval= [0.0280966, 0.177562] threshold= [0.95] Minimum volume level set= {x | f(x) <= 34.4467} with f= -MinimumVolumeLevelSetEvaluation(InverseGamma(lambda = 2.5, k = 5.5)) +MinimumVolumeLevelSetEvaluation(InverseGamma(k = 5.5, lambda = 2.5)) beta= [1.09642e-15] Bilateral confidence interval= [0.0364963, 0.209657] beta= [0.95] @@ -43,12 +43,12 @@ covariance= [[ 0.0022575 ]] correlation= [[ 1 ]] spearman= [[ 1 ]] kendall= [[ 1 ]] -parameters= [[lambda : 2.5, k : 5.5]] -Standard representative= InverseGamma(lambda = 5.5, k = 1) +parameters= [[k : 5.5, lambda : 2.5]] +Standard representative= InverseGamma(k = 5.5, lambda = 1) standard deviation= [0.0475131] skewness= [2.99333] kurtosis= [29.4] -Distribution InverseGamma(lambda = 2.5, k = 15) +Distribution InverseGamma(k = 15, lambda = 2.5) Elliptical = False Continuous = True oneRealization= [0.0272144] @@ -68,10 +68,10 @@ ccdf= 0.0102604279123 survival= 0.0102604279123 characteristic function=(0.999999, 0.00152381) log characteristic function=(-8.93075e-08, 0.00152381) -pdf gradient = [-4.76897,-1.04829] -pdf gradient (FD)= [-4.76897,-1.04829] -cdf gradient = [0.0339127,0.00792151] -cdf gradient (FD)= [0.0339127,0.00792151] +pdf gradient = [-1.04829,-4.76897] +pdf gradient (FD)= [-1.04829,-4.76897] +cdf gradient = [0.00792151,0.0339127] +cdf gradient (FD)= [0.00792151,0.0339127] quantile= [0.0432604] cdf(quantile)= 0.95 InverseSurvival= class=Point name=Unnamed dimension=1 values=[0.0182761] @@ -80,7 +80,7 @@ entropy=-3.514616 Minimum volume interval= [0.0154445, 0.0443646] threshold= [0.95] Minimum volume level set= {x | f(x) <= -1.89324} with f= -MinimumVolumeLevelSetEvaluation(InverseGamma(lambda = 2.5, k = 15)) +MinimumVolumeLevelSetEvaluation(InverseGamma(k = 15, lambda = 2.5)) beta= [6.64087] Bilateral confidence interval= [0.0170288, 0.0476452] beta= [0.95] @@ -93,8 +93,8 @@ covariance= [[ 6.27943e-05 ]] correlation= [[ 1 ]] spearman= [[ 1 ]] kendall= [[ 1 ]] -parameters= [[lambda : 2.5, k : 15]] -Standard representative= InverseGamma(lambda = 15, k = 1) +parameters= [[k : 15, lambda : 2.5]] +Standard representative= InverseGamma(k = 15, lambda = 1) standard deviation= [0.00792429] skewness= [1.20185] kurtosis= [5.90909] diff --git a/python/test/t_InverseGamma_std.py b/python/test/t_InverseGamma_std.py index 73802aca03..0be40c1fc3 100755 --- a/python/test/t_InverseGamma_std.py +++ b/python/test/t_InverseGamma_std.py @@ -5,7 +5,7 @@ ot.TESTPREAMBLE() -allDistributions = [ot.InverseGamma(2.5, 5.5), ot.InverseGamma(2.5, 15.0)] +allDistributions = [ot.InverseGamma(5.5, 2.5), ot.InverseGamma(15.0, 2.5)] for n in range(len(allDistributions)): distribution = allDistributions[n] print("Distribution ", distribution) @@ -86,19 +86,19 @@ print("pdf gradient =", PDFgr) PDFgrFD = ot.Point(2) PDFgrFD[0] = ( - ot.InverseGamma(distribution.getLambda() + eps, distribution.getK()).computePDF( + ot.InverseGamma(distribution.getK() + eps, distribution.getLambda()).computePDF( point ) - ot.InverseGamma( - distribution.getLambda() - eps, distribution.getK() + distribution.getK() - eps, distribution.getLambda() ).computePDF(point) ) / (2.0 * eps) PDFgrFD[1] = ( - ot.InverseGamma(distribution.getLambda(), distribution.getK() + eps).computePDF( + ot.InverseGamma(distribution.getK(), distribution.getLambda() + eps).computePDF( point ) - ot.InverseGamma( - distribution.getLambda(), distribution.getK() - eps + distribution.getK(), distribution.getLambda() - eps ).computePDF(point) ) / (2.0 * eps) print("pdf gradient (FD)=", PDFgrFD) @@ -106,19 +106,19 @@ print("cdf gradient =", CDFgr) CDFgrFD = ot.Point(2) CDFgrFD[0] = ( - ot.InverseGamma(distribution.getLambda() + eps, distribution.getK()).computeCDF( + ot.InverseGamma(distribution.getK() + eps, distribution.getLambda()).computeCDF( point ) - ot.InverseGamma( - distribution.getLambda() - eps, distribution.getK() + distribution.getK() - eps, distribution.getLambda() ).computeCDF(point) ) / (2.0 * eps) CDFgrFD[1] = ( - ot.InverseGamma(distribution.getLambda(), distribution.getK() + eps).computeCDF( + ot.InverseGamma(distribution.getK(), distribution.getLambda() + eps).computeCDF( point ) - ot.InverseGamma( - distribution.getLambda(), distribution.getK() - eps + distribution.getK(), distribution.getLambda() - eps ).computeCDF(point) ) / (2.0 * eps) print("cdf gradient (FD)=", CDFgrFD) diff --git a/python/test/t_InverseWishart_std.py b/python/test/t_InverseWishart_std.py index dfc8b34643..360f62afaf 100755 --- a/python/test/t_InverseWishart_std.py +++ b/python/test/t_InverseWishart_std.py @@ -60,7 +60,7 @@ def setUpClass(cls): cls.one_dimensional_inverse_wishart = ot.InverseWishart( ot.CovarianceMatrix([[scale]]), DoF ) - cls.inverse_gamma = ot.InverseGamma(1.0 / cls.beta, cls.k) + cls.inverse_gamma = ot.InverseGamma(cls.k, 1.0 / cls.beta) # attributes to test a multi-dimensional InverseWishart cls.dimension = 5 cls.DoF = cls.dimension + 3 + U.getRealization()[0] @@ -125,7 +125,7 @@ def test_computeLogPDF_diagonal_case(self): dimension, 0.5 * DoF ) + dimension * ot.SpecFunc.LogGamma(0.5 * (DoF + dimension - 1)) for d in range(dimension): - inverse_gamma = ot.InverseGamma(2.0 / Scale[d, d], k) + inverse_gamma = ot.InverseGamma(k, 2.0 / Scale[d, d]) logdensity = logdensity - inverse_gamma.computeLogPDF(diagX[d, 0]) logratio = logratio + 0.5 * (1 - dimension) * log(0.5 * Scale[d, d]) ott.assert_almost_equal(logdensity, logratio) diff --git a/python/test/t_JointDistribution_std.expout b/python/test/t_JointDistribution_std.expout index cc5fd77f0b..8d1fa59cf8 100644 --- a/python/test/t_JointDistribution_std.expout +++ b/python/test/t_JointDistribution_std.expout @@ -345,29 +345,27 @@ JointDistribution Parameters [[mu_0_marginal_0 : 3, sigma_0_marginal_0 : 2],[mu_0_marginal_1 : 2, sigma_0_marginal_1 : 3],[mu_0_marginal_2 : 1, sigma_0_marginal_2 : 4],[x_0^0_core : 0, h_0_core : 1]] nCore= 4 -entropy=5.19767e+00 -entropy (MC)=3.88293e+00 Mean [2.68546,1.52819,0.370924] Covariance Elliptical distribution= False Elliptical copula= False Independent copula= False -oneRealization= [2.01631,-0.27946,0.808824] +oneRealization= [2.47375,1.28004,2.70986] oneSample= [ One Two Three ] -0 : [ 2.34836 1.24943 1.20811 ] -1 : [ 1.99841 0.907041 -0.0312187 ] -2 : [ 3.13525 0.250368 0.15086 ] -3 : [ 2.02497 1.91411 -1.65737 ] -4 : [ 2.68345 2.40798 -0.526989 ] -5 : [ 2.48291 0.689151 1.06407 ] -6 : [ 3.23655 2.44213 -0.410566 ] -7 : [ 1.40593 0.675174 1.41943 ] -8 : [ 2.13836 3.92642 1.41709 ] -9 : [ 2.46907 1.8888 1.66652 ] -anotherSample mean= [2.68537,1.53137,0.383789] -anotherSample covariance= [[ 0.398306 -0.0120354 -0.0115013 ] - [ -0.0120354 0.891089 0.009268 ] - [ -0.0115013 0.009268 1.57741 ]] +0 : [ 2.34744 1.18181 -1.48221 ] +1 : [ 2.7688 2.17159 -0.0466215 ] +2 : [ 3.29622 2.10719 1.0288 ] +3 : [ 2.82913 2.02964 1.57338 ] +4 : [ 3.25336 2.96383 -1.2737 ] +5 : [ 3.57357 1.81325 0.3924 ] +6 : [ 1.62737 1.9019 2.1167 ] +7 : [ 2.59277 0.864636 0.325633 ] +8 : [ 2.15339 0.414897 -1.84509 ] +9 : [ 2.56808 -0.0368341 -0.20478 ] +anotherSample mean= [2.68358,1.52794,0.354861] +anotherSample covariance= [[ 0.389789 0.00279988 -0.00535941 ] + [ 0.00279988 0.899959 0.00446549 ] + [ -0.00535941 0.00446549 1.58906 ]] Zero point= [0.0, 0.0, 0.0] pdf=0.00000e+00 cdf=0.00000e+00 Quantile= [4.03964,3.55946,3.07928] CDF(quantile)=9.50000e-01 @@ -375,17 +373,17 @@ margin= JointDistribution(Normal(mu = 3, sigma = 2)) margin PDF=0.00000e+00 margin CDF=0.00000e+00 margin quantile= [3.77516] -margin realization= [2.25633] +margin realization= [2.29997] margin= JointDistribution(Normal(mu = 2, sigma = 3)) margin PDF=1.55143e-01 margin CDF=4.07431e-02 margin quantile= [3.16275] -margin realization= [2.74422] +margin realization= [1.07153] margin= JointDistribution(Normal(mu = 1, sigma = 4)) margin PDF=2.86412e-01 margin CDF=4.11240e-01 margin quantile= [2.55033] -margin realization= [2.54773] +margin realization= [-1.12608] indices= [1, 0] margins= JointDistribution(Normal(mu = 2, sigma = 3), Normal(mu = 3, sigma = 2), KernelMixture(kernel = Beta(alpha = 2, beta = 3, a = 0.2, b = 0.8), bandwidth = [1,1], sample = 0 : [ 0 0 ]) @@ -393,17 +391,17 @@ margins PDF=0.00000e+00 margins CDF=0.00000e+00 margins quantile= [2.05979,3.03986] margins CDF(quantile)=5.00000e-01 -margins realization= [1.09898,1.8244] +margins realization= [2.32558,2.52062] conditional PDF=0.00000e+00 conditional CDF=0.00000e+00 conditional quantile=4.36648e+00 sequential conditional PDF= [0.122043,0.243098,0.0176626] sequential conditional CDF( [1.5, 2.5, 3.5] )= [0.0111293,0.832468,0.995118] sequential conditional quantile( [0.0111293,0.832468,0.995118] )= [1.5,2.5,3.5] -anotherSample mean= [2.70038,1.52644,0.367288] -anotherSample covariance= [[ 0.406464 0.00197769 0.0034419 ] - [ 0.00197769 0.893273 7.39114e-05 ] - [ 0.0034419 7.39114e-05 1.56214 ]] +anotherSample mean= [2.68499,1.51644,0.363684] +anotherSample covariance= [[ 0.399154 0.00656646 0.00735094 ] + [ 0.00656646 0.892133 -0.0207362 ] + [ 0.00735094 -0.0207362 1.6153 ]] Distribution JointDistribution(Normal(mu = 0, sigma = 1), Uniform(a = 12345.6, b = 123457), TruncatedDistribution(Normal(mu = 2, sigma = 1.5), bounds = [1, 4]), IndependentCopula(dimension = 3)) Distribution (Markdown) @@ -420,38 +418,38 @@ JointDistribution | 2 | Three | TruncatedDistribution(Normal(mu = 2, sigma = 1.5), bounds = [1, 4]) | [ One Two Three ] -0 : [ 2.48105 2.05321 -1.94393 ] -1 : [ 3.18572 0.844792 0.958033 ] -2 : [ 3.48657 2.74036 -0.689542 ] -3 : [ 3.02652 1.7995 1.04916 ] -4 : [ 3.40255 1.77738 -1.04242 ] -5 : [ 1.41575 1.79786 -0.981916 ] -6 : [ 2.70755 1.47211 0.101387 ] -7 : [ 3.4692 1.02521 -1.13322 ] -8 : [ 2.7703 0.421518 0.544253 ] -9 : [ 3.27239 0.782717 2.64054 ] - [ Y0 Y1 Y2 ] -0 : [ -0.252322 0.538768 -2.175 ] -1 : [ 0.751601 -0.637252 0.459225 ] -2 : [ 1.19032 1.20084 -0.758656 ] -3 : [ 0.526147 0.300548 0.523413 ] -4 : [ 1.0653 0.279761 -1.06585 ] -5 : [ -2.72346 0.299012 -1.0107 ] -6 : [ 0.075735 -0.00931063 -0.150097 ] -7 : [ 1.16427 -0.449137 -1.15102 ] -8 : [ 0.164877 -1.12388 0.167471 ] -9 : [ 0.875756 -0.70406 1.72086 ] +0 : [ 2.86935 1.72498 0.606077 ] +1 : [ 2.02058 1.06736 1.44694 ] +2 : [ 3.76145 -0.259331 0.692046 ] +3 : [ 2.52815 0.66119 2.14081 ] +4 : [ 2.54643 2.89504 1.05626 ] +5 : [ 2.37774 0.415634 1.12497 ] +6 : [ 2.26951 2.19607 2.8116 ] +7 : [ 3.29997 4.45568 2.45291 ] +8 : [ 2.83298 1.12661 0.675113 ] +9 : [ 3.72817 1.99207 -0.361464 ] + [ Y0 Y1 Y2 ] +0 : [ 0.304801 0.230459 0.211217 ] +1 : [ -0.989978 -0.406284 0.805514 ] +2 : [ 1.62207 -2.31221 0.271915 ] +3 : [ -0.183034 -0.838698 1.31778 ] +4 : [ -0.156316 1.35841 0.528413 ] +5 : [ -0.407043 -1.1313 0.576881 ] +6 : [ -0.574315 0.673411 1.87015 ] +7 : [ 0.915568 4.41344 1.5646 ] +8 : [ 0.253509 -0.346639 0.259969 ] +9 : [ 1.56739 0.481327 -0.496658 ] [ y0 y1 y2 ] -0 : [ 2.48105 2.05321 -1.94393 ] -1 : [ 3.18572 0.844792 0.958033 ] -2 : [ 3.48657 2.74036 -0.689542 ] -3 : [ 3.02652 1.7995 1.04916 ] -4 : [ 3.40255 1.77738 -1.04242 ] -5 : [ 1.41575 1.79786 -0.981916 ] -6 : [ 2.70755 1.47211 0.101387 ] -7 : [ 3.4692 1.02521 -1.13322 ] -8 : [ 2.7703 0.421518 0.544253 ] -9 : [ 3.27239 0.782717 2.64054 ] +0 : [ 2.86935 1.72498 0.606077 ] +1 : [ 2.02058 1.06736 1.44694 ] +2 : [ 3.76145 -0.259331 0.692046 ] +3 : [ 2.52815 0.66119 2.14081 ] +4 : [ 2.54643 2.89504 1.05626 ] +5 : [ 2.37774 0.415634 1.12497 ] +6 : [ 2.26951 2.19607 2.8116 ] +7 : [ 3.29997 4.45568 2.45291 ] +8 : [ 2.83298 1.12661 0.675113 ] +9 : [ 3.72817 1.99207 -0.361464 ] conditional PDF=0.000000 conditional CDF=0.000000 conditional quantile=4.366485 diff --git a/python/test/t_JointDistribution_std.py b/python/test/t_JointDistribution_std.py index a96f91f87a..07011ee2ee 100755 --- a/python/test/t_JointDistribution_std.py +++ b/python/test/t_JointDistribution_std.py @@ -89,19 +89,16 @@ # Create a copula aCopula = ot.IndependentCopula(dim) aCopula.setName("Independent copula") -cores = list() -cores.append(aCopula) +cores = [aCopula] # With a Normal copula correlation = ot.CorrelationMatrix(dim) for i in range(1, dim): correlation[i - 1, i] = 0.25 - anotherCopula = ot.NormalCopula(correlation) +anotherCopula = ot.NormalCopula(correlation) anotherCopula.setName("Normal copula") cores.append(anotherCopula) # With a copula which is not a copula by type -atoms = list() -atoms.append(aCopula) -atoms.append(anotherCopula) +atoms = [aCopula, anotherCopula] cores.append(ot.Mixture(atoms, [0.25, 0.75])) # With a non-copula core cores.append(otexp.UniformOrderStatistics(dim)) @@ -120,7 +117,9 @@ print(distribution.__repr_markdown__()) print("Parameters", distribution.getParametersCollection()) print("nCore=", nCore) - if nCore != 2: + + # too slow for Mixture/KernelMixture + if "Mixture" not in distribution.getCore().getImplementation().getName(): print("entropy=%.5e" % distribution.computeEntropy()) print( "entropy (MC)=%.5e" diff --git a/python/test/t_KernelSmoothing_std.py b/python/test/t_KernelSmoothing_std.py index e0e0ef3385..2e861af324 100755 --- a/python/test/t_KernelSmoothing_std.py +++ b/python/test/t_KernelSmoothing_std.py @@ -1,6 +1,7 @@ #! /usr/bin/env python import openturns as ot +from openturns.testing import assert_almost_equal ot.TESTPREAMBLE() @@ -229,3 +230,20 @@ sample = distribution.getSample(30) h = factory.computePluginBandwidth(sample)[0] print("with reduced cutoff. h=%.6g" % (h)) + +# test of logTransform +for i, distribution in enumerate([ot.LogNormal(0.0, 2.5), + ot.Beta(20000.5, 2.5, 0.0, 1.0), + ot.Exponential(), + ot.WeibullMax(1.0, 0.9, 0.0), + ot.Mixture([ot.LogNormal(-1.0, 1.0, -1.0), ot.LogNormal(1.0, 1.0, 1.0)], [0.2, 0.8])]): + sample = distribution.getSample(10000) + kernel = ot.KernelSmoothing() + kernel.setUseLogTransform(True) + fitted = kernel.build(sample) + quantile = distribution.computeQuantile(0.25) + assert_almost_equal(distribution.computePDF(quantile), fitted.computePDF(quantile), 0.05) + quantile = distribution.computeQuantile(0.5) + assert_almost_equal(distribution.computePDF(quantile), fitted.computePDF(quantile), 0.05) + quantile = distribution.computeQuantile(0.75) + assert_almost_equal(distribution.computePDF(quantile), fitted.computePDF(quantile), 0.05) diff --git a/python/test/t_LinearEnumerateFunction_std.expout b/python/test/t_LinearEnumerateFunction_std.expout index f85fda2983..2ac5ac5a25 100644 --- a/python/test/t_LinearEnumerateFunction_std.expout +++ b/python/test/t_LinearEnumerateFunction_std.expout @@ -136,3 +136,25 @@ degree 8 max_degree_strata_index 8 degree 8 size 165 degree 9 max_degree_strata_index 9 degree 9 size 220 +Test getMarginal() from Indices +index= 0 [0,0,0] 0 +index= 1 [1,0,0] 1 +index= 2 [0,1,0] 2 +index= 3 [0,0,1] 3 +index= 4 [2,0,0] 4 +index= 5 [1,1,0] 5 +index= 6 [1,0,1] 6 +index= 7 [0,2,0] 7 +index= 8 [0,1,1] 8 +index= 9 [0,0,2] 9 +Test getMarginal() from a single integer +index= 0 [0] 0 +index= 1 [1] 1 +index= 2 [2] 2 +index= 3 [3] 3 +index= 4 [4] 4 +index= 5 [5] 5 +index= 6 [6] 6 +index= 7 [7] 7 +index= 8 [8] 8 +index= 9 [9] 9 diff --git a/python/test/t_LinearEnumerateFunction_std.py b/python/test/t_LinearEnumerateFunction_std.py index b6a128b8fb..12598dd353 100755 --- a/python/test/t_LinearEnumerateFunction_std.py +++ b/python/test/t_LinearEnumerateFunction_std.py @@ -19,3 +19,19 @@ print("degree", d, "max_degree_strata_index", idx) size = f.getBasisSizeFromTotalDegree(d) print("degree", d, "size", size) +# +print("Test getMarginal() from Indices") +f = ot.LinearEnumerateFunction(5) +marginalf = f.getMarginal([0, 3, 4]) +for index in range(10): + m = marginalf(index) + index_inv = marginalf.inverse(m) + print("index=", index, repr(m), index_inv) + +print("Test getMarginal() from a single integer") +f = ot.LinearEnumerateFunction(5) +marginalf = f.getMarginal(3) +for index in range(10): + m = marginalf(index) + index_inv = marginalf.inverse(m) + print("index=", index, repr(m), index_inv) diff --git a/python/test/t_NonLinearLeastSquaresCalibration_noobs.py b/python/test/t_NonLinearLeastSquaresCalibration_noobs.py index 6706936e69..312a34a76c 100755 --- a/python/test/t_NonLinearLeastSquaresCalibration_noobs.py +++ b/python/test/t_NonLinearLeastSquaresCalibration_noobs.py @@ -63,6 +63,7 @@ rtol = 1.0e-2 atol = 0.0 ott.assert_almost_equal(parameterMAP, trueParameter, rtol, atol) + multiStartSize = 10 algo.setOptimizationAlgorithm( ot.MultiStart( ot.TNC(), @@ -71,9 +72,7 @@ ot.Normal( candidate, ot.CovarianceMatrix(ot.Point(candidate).getDimension()) ), - ot.ResourceMap.GetAsUnsignedInteger( - "NonLinearLeastSquaresCalibration-MultiStartSize" - ), + multiStartSize, ).generate(), ) ) diff --git a/python/test/t_NonLinearLeastSquaresCalibration_std.py b/python/test/t_NonLinearLeastSquaresCalibration_std.py index 67a12ce74c..abbcf91e01 100755 --- a/python/test/t_NonLinearLeastSquaresCalibration_std.py +++ b/python/test/t_NonLinearLeastSquaresCalibration_std.py @@ -29,6 +29,7 @@ ott.assert_almost_equal( result.getObservationsError().getMean(), [0.0051, -0.0028], 1e-1, 1e-3 ) + multiStartSize = 10 algo.setOptimizationAlgorithm( ot.MultiStart( ot.TNC(), @@ -37,9 +38,7 @@ ot.Normal( candidate, ot.CovarianceMatrix(ot.Point(candidate).getDimension()) ), - ot.ResourceMap.GetAsUnsignedInteger( - "NonLinearLeastSquaresCalibration-MultiStartSize" - ), + multiStartSize, ).generate(), ) ) diff --git a/python/test/t_NormInfEnumerateFunction_std.expout b/python/test/t_NormInfEnumerateFunction_std.expout index 97d8ffef10..537af6d21a 100644 --- a/python/test/t_NormInfEnumerateFunction_std.expout +++ b/python/test/t_NormInfEnumerateFunction_std.expout @@ -135,3 +135,25 @@ index= 21 [1,5] 21 index= 22 [2,5] 22 index= 23 [3,5] 23 index= 24 [0,6] 24 +Test getMarginal() from indices +index= 0 [0,0,0] 0 +index= 1 [1,0,0] 1 +index= 2 [0,1,0] 2 +index= 3 [1,1,0] 3 +index= 4 [0,0,1] 4 +index= 5 [1,0,1] 5 +index= 6 [0,1,1] 6 +index= 7 [1,1,1] 7 +index= 8 [2,0,0] 8 +index= 9 [2,1,0] 9 +Test getMarginal() from a single integer +index= 0 [0] 0 +index= 1 [1] 1 +index= 2 [2] 2 +index= 3 [3] 3 +index= 4 [4] 4 +index= 5 [5] 5 +index= 6 [6] 6 +index= 7 [7] 7 +index= 8 [8] 8 +index= 9 [9] 9 diff --git a/python/test/t_NormInfEnumerateFunction_std.py b/python/test/t_NormInfEnumerateFunction_std.py index b1b9e333d7..cd8224fdf7 100755 --- a/python/test/t_NormInfEnumerateFunction_std.py +++ b/python/test/t_NormInfEnumerateFunction_std.py @@ -29,3 +29,19 @@ print("index=", index, repr(m), index_inv) assert m[0] <= 3, "wrong bound" assert index == index_inv, "wrong inverse" +# +print("Test getMarginal() from indices") +f = ot.NormInfEnumerateFunction(5) +marginalf = f.getMarginal([0, 3, 4]) +for index in range(10): + m = marginalf(index) + index_inv = marginalf.inverse(m) + print("index=", index, repr(m), index_inv) +# +print("Test getMarginal() from a single integer") +f = ot.NormInfEnumerateFunction(5) +marginalf = f.getMarginal(3) +for index in range(10): + m = marginalf(index) + index_inv = marginalf.inverse(m) + print("index=", index, repr(m), index_inv) diff --git a/python/test/t_Normal_std.expout b/python/test/t_Normal_std.expout index 92d03bed3a..ff4ad731b0 100644 --- a/python/test/t_Normal_std.expout +++ b/python/test/t_Normal_std.expout @@ -254,9 +254,6 @@ cdf=0.221452 ccdf=0.778548 pdf gradient = class=Point name=Unnamed dimension=14 values=[0.00095188,-0.00013598,0.00013598,0,-0.0011559,-0.0008499,-0.00052128,-0.00040795,0.0017995,-0.0010675,0.0025429,0.00065273,-0.0013055,0.0019582] pdf gradient (FD)= class=Point name=Unnamed dimension=8 values=[0.00095188,-0.00013598,0.00013598,0,-0.0011559,-0.0008499,-0.00052128,-0.00040795] -quantile= class=Point name=Unnamed dimension=4 values=[2.1934,4.3868,6.5802,8.7736] -cdf(quantile)=0.950000 -InverseSurvival= class=Point name=Unnamed dimension=4 values=[-2.1934,-4.3868,-6.5802,-8.7736] entropy=8.272233 mean= class=Point name=Unnamed dimension=4 values=[0,0,0,0] standard deviation= class=Point name=Unnamed dimension=4 values=[1,2,3,4] diff --git a/python/test/t_Normal_std.py b/python/test/t_Normal_std.py index ca769f4390..c884a173f2 100755 --- a/python/test/t_Normal_std.py +++ b/python/test/t_Normal_std.py @@ -133,13 +133,13 @@ def cleanPoint(inPoint): # print "cdf gradient =" , CDFgr # quantile - quantile = distribution.computeQuantile(0.95) - print("quantile=", repr(quantile)) - print("cdf(quantile)=%.6f" % distribution.computeCDF(quantile)) - # Get 95% survival function - inverseSurvival = ot.Point(distribution.computeInverseSurvivalFunction(0.95)) - print("InverseSurvival=", repr(inverseSurvival)) if dim < 4: + quantile = distribution.computeQuantile(0.95) + print("quantile=", repr(quantile)) + print("cdf(quantile)=%.6f" % distribution.computeCDF(quantile)) + # Get 95% survival function + inverseSurvival = ot.Point(distribution.computeInverseSurvivalFunction(0.95)) + print("InverseSurvival=", repr(inverseSurvival)) print( "Survival(inverseSurvival)=%.6f" % distribution.computeSurvivalFunction(inverseSurvival) diff --git a/python/test/t_OptimizationAlgorithm_std.py b/python/test/t_OptimizationAlgorithm_std.py index 1c91515e11..1e0139696d 100755 --- a/python/test/t_OptimizationAlgorithm_std.py +++ b/python/test/t_OptimizationAlgorithm_std.py @@ -20,6 +20,9 @@ problem.setInequalityConstraint(h) names = ot.OptimizationAlgorithm.GetAlgorithmNames(problem) for name in names: + if "global" in name: + # slow + continue algo = ot.OptimizationAlgorithm.Build(name) algo.setProblem(problem) algo.setMaximumConstraintError(1e-1) @@ -119,3 +122,34 @@ def _exec(X): calls = algo.getResult().getCallsNumber() print(f"{name}: {status} {msg} {calls}") assert status == ot.OptimizationResult.TIMEOUT, name + +# infeasible problem +objective = ot.SymbolicFunction( + ["x1", "x2", "x3", "x4"], ["x1 + 2 * x2 - 3 * x3 + 4 * x4"] +) +inequality_constraint = ot.SymbolicFunction(["x1", "x2", "x3", "x4"], ["-1.0"]) +dim = objective.getInputDimension() +bounds = ot.Interval([-3.0] * dim, [5.0] * dim) +problem = ot.OptimizationProblem(objective) +problem.setMinimization(True) +problem.setInequalityConstraint(inequality_constraint) +problem.setBounds(bounds) +for name in ot.OptimizationAlgorithm.GetAlgorithmNames(): + algo = ot.OptimizationAlgorithm.Build(name) + algo.setCheckStatus(False) + try: + algo.setProblem(problem) + startingPoint = [0.0] * dim + algo.setStartingPoint(startingPoint) + except Exception: + # not supported + continue + print(f"{name}...") + algo.run() + result = algo.getResult() + status = algo.getResult().getStatus() + msg = algo.getResult().getStatusMessage() + calls = algo.getResult().getCallsNumber() + print(f"{name}: {status} {msg} {calls}") + assert len(result.getOptimalPoint()) == 0, "should not find point" + assert status == ot.OptimizationResult.FAILURE, "should return FAILURE" diff --git a/python/test/t_Pagmo_std.py b/python/test/t_Pagmo_std.py index 40ae794f24..96cae32aee 100755 --- a/python/test/t_Pagmo_std.py +++ b/python/test/t_Pagmo_std.py @@ -50,13 +50,16 @@ def stop(): algo.run() result = algo.getResult() x = result.getFinalPoints() + if use_ineq: + for x1, x2 in x: + assert x1 >= x2, f"ineq constraint not verified: {x1} >= {x2}" y = result.getFinalValues() fronts = result.getParetoFrontsIndices() assert len(fronts) > 0, "no pareto" print(name, len(fronts)) assert ( result.getCallsNumber() == (algo.getMaximumIterationNumber() + 1) * size - ), "wrong size" + ), f"wrong size: {result.getCallsNumber()}" # rosenbrock for the other algorithms print("----- mono-obj -----") @@ -83,7 +86,9 @@ def stop(): result = algo.getResult() x = result.getOptimalPoint() y = result.getOptimalValue() - if not use_ineq: + if use_ineq: + assert x[1] < 1e-5, f"ineq constraint not verified: {x[1]} < 0" + else: assert result.getFinalPoints().getSize() == pop0.getSize(), "no final pop" assert y[0] < 40.0, str(y) print(name, x, y) @@ -211,7 +216,7 @@ def minlp_obj2(x): x = result.getOptimalPoint() y = result.getOptimalValue() print("gaco reorder", x, y) -assert abs(-5.0 - y[0]) < 1e-4, "wrong value" +assert abs(-5.0 - y[0]) < 2e-3, f"wrong value {y}" # check we don't expose penalized values f = ot.SymbolicFunction( diff --git a/python/test/t_ResourceMap_std.py b/python/test/t_ResourceMap_std.py index 8557b9415d..0a704e92c7 100755 --- a/python/test/t_ResourceMap_std.py +++ b/python/test/t_ResourceMap_std.py @@ -9,6 +9,6 @@ print(" %s => %s," % (key, ot.ResourceMap.Get(key))) print("}") print( - "Extract from ResourceMap: Path-TemporaryDirectory -> ", - ot.ResourceMap.Get("Path-TemporaryDirectory"), + "Extract from ResourceMap: Cache-MaxSize -> ", + ot.ResourceMap.Get("Cache-MaxSize"), ) diff --git a/python/test/t_SimplicialCubature_std.py b/python/test/t_SimplicialCubature_std.py index 712b56a85c..c14f78b559 100755 --- a/python/test/t_SimplicialCubature_std.py +++ b/python/test/t_SimplicialCubature_std.py @@ -20,3 +20,9 @@ value = algo.integrate(f, mesh)[0] print(value) ott.assert_almost_equal(value, (m.exp(1.0) - 1.0) ** 3 / 6) + +# Test with interval inferface +f = ot.SymbolicFunction(["x", "y", "z"], ["sin(x) * cos(y) * exp(z)"]) +valueRef = -m.sin(1.0) * (m.cos(1.0) - 1.0) * (m.e - 1.0) +value = algo.integrate(f, ot.Interval([0.0] * 3, [1.0] * 3)) +ott.assert_almost_equal(value[0], valueRef) diff --git a/python/test/t_Study_std.py b/python/test/t_Study_std.py index a8d9db0001..84dfbf7a74 100755 --- a/python/test/t_Study_std.py +++ b/python/test/t_Study_std.py @@ -8,7 +8,7 @@ ot.TESTPREAMBLE() ot.Log.Show(ot.Log.NONE) -# find all instanciable classes +# find all instantiable classes persistentClasses = {} for mod in [ot, otexp]: for name, obj in inspect.getmembers(mod): diff --git a/python/test/t_SystemFORM_std.py b/python/test/t_SystemFORM_std.py index 61c756f902..4cbbeb69c4 100755 --- a/python/test/t_SystemFORM_std.py +++ b/python/test/t_SystemFORM_std.py @@ -76,14 +76,14 @@ [ ot.IntersectionEvent([e0, e3, e4, e5]), ot.IntersectionEvent([e1, e3, e4, e5]), - ot.IntersectionEvent([e2, e3, e4, e5]), + # ot.IntersectionEvent([e2, e3, e4, e5]), ] ) # sampling test pf_sim = event.getSample(10000).computeMean()[0] print("pf_sim = %.6g" % pf_sim) -ott.assert_almost_equal(pf_sim, 0.00384, 1e-3, 1e-3) +ott.assert_almost_equal(pf_sim, 0.0023, 1e-3, 1e-3) # system FORM algo = ot.SystemFORM(solver, event, mean) @@ -91,7 +91,7 @@ result = algo.getResult() pf_sysform = result.getEventProbability() print("pf_sysform = %.6g" % pf_sysform) -ott.assert_almost_equal(pf_sysform, 0.00418394, 1e-4, 1e-4) +ott.assert_almost_equal(pf_sysform, 0.00234983, 1e-4, 1e-4) for form_result in result.getFORMResultCollection(): print(" beta=", form_result.getGeneralisedReliabilityIndex()) diff --git a/python/test/t_docstring_missing.py b/python/test/t_docstring_missing.py index b210c4beac..4ebd2c43ec 100755 --- a/python/test/t_docstring_missing.py +++ b/python/test/t_docstring_missing.py @@ -6,8 +6,8 @@ ot.TESTPREAMBLE() -# find all instanciable classes -instanciables = [] +# find all instantiable classes +instantiables = [] for mod in [ot, otexp]: for name, obj in inspect.getmembers(mod): if inspect.isclass(obj): @@ -16,7 +16,7 @@ continue try: instance = obj() - instanciables.append(obj) + instantiables.append(obj) except Exception: pass @@ -25,7 +25,7 @@ count_class_undoc = 0 count_methods = 0 count_methods_undoc = 0 -for class_ in instanciables: +for class_ in instantiables: count_class += 1 if class_.__doc__ is None: print(f"{class_.__name__} class") diff --git a/python/test/t_sphinx_missing.py b/python/test/t_sphinx_missing.py index 029181f4ac..12720a5379 100755 --- a/python/test/t_sphinx_missing.py +++ b/python/test/t_sphinx_missing.py @@ -17,8 +17,8 @@ with open(rst_file) as f: rst_lines += f.read().splitlines() -# find all instanciable classes -instanciables = [] +# find all instantiable classes +instantiables = [] for mod in [ot, otexp]: for name, obj in inspect.getmembers(mod): if inspect.isclass(obj): @@ -31,7 +31,7 @@ continue try: instance = obj() - instanciables.append(obj) + instantiables.append(obj) except Exception: pass @@ -41,7 +41,7 @@ count_methods = 0 count_methods_undoc = 0 -for class_ in instanciables: +for class_ in instantiables: cn = class_.__name__ count_class += 1 found = False diff --git a/utils/costDiff.py b/utils/costDiff.py new file mode 100644 index 0000000000..5ab1fbf3a1 --- /dev/null +++ b/utils/costDiff.py @@ -0,0 +1,87 @@ +# Tool to compare testsuite costs +# +# 1. run tests in reference configuration: +# $ make install && make tests && rm -rf Testing && ctest && cp Testing/Temporary/CTestCostData.txt CTestCostData0.txt +# +# 2. run tests with new code: +# $ make install && make tests && rm -rf Testing && ctest && cp Testing/Temporary/CTestCostData.txt CTestCostData1.txt +# +# 3. compare tests costs: +# $ python3 ../utils/costDiff.py CTestCostData0.txt CTestCostData1.txt + + +import argparse + +def parse_cost(path): + tdata = dict() + with open(path) as cost1: + for line in cost1.readlines(): + try: + tag, reps, cost = line.split(" ") + if reps == "0": + raise ValueError() + tdata[tag] = float(cost) + except ValueError: + pass + return tdata + +def compare_cost(path1, path2): + data1 = parse_cost(path1) + data2 = parse_cost(path2) + diffData = {} + relDiffData = {} + n_slower = 0 + n_faster = 0 + a_tol = 8e-1 + r_tol = 1e-1 + + for key in data1.keys(): + if data1[key] < a_tol: + continue + if key in data2: + diff = data2[key] - data1[key] + diffData[key] = diff + relDiff = diff / data1[key] + if diff > a_tol and relDiff > r_tol: + n_slower +=1 + if diff < -a_tol and -relDiff > r_tol: + n_faster +=1 + relDiffData[key] = relDiff + + for key in diffData.keys(): + print(f"{key} diff {diffData[key]:.3f} slowdown {relDiffData[key]:.3f}") + + n = 10 + print(f"= top {n} SLOWEST tests ============") + std = sorted(data1.items(), key=lambda item: item[1]) + slowest = dict(reversed(std[-n:])) + for key in slowest.keys(): + print(f"{key} {data1[key]:.3f}") + + std = sorted(relDiffData.items(), key=lambda item: item[1]) + slower = dict(reversed(std[-n:])) + faster = dict(std[:n]) + print(f"= top {n} FASTER tests ============") + for key in faster.keys(): + print(f"{key} diff {diffData[key]:.3f} speedup {-100.0*relDiffData[key]:.3f}% (before={data1[key]:.3f} after={data2[key]:.3f})") + + print(f"= top {n} SLOWER tests ============") + for key in slower.keys(): + print(f"{key} diff {diffData[key]:.3f} slowdown {100.0*relDiffData[key]:.3f}% (before={data1[key]:.3f} after={data2[key]:.3f})") + + print(f"==============") + print(f"n_faster={n_faster} n_slower={n_slower}") + print(f"==============") + +def main(): + """ + entry point. + """ + parser = argparse.ArgumentParser(description="Compare test data") + parser.add_argument("path1", type=str, help="Path to the cost data (eg CTestCostData.txt)") + parser.add_argument("path2", type=str, help="Path to the cost data (eg CTestCostData.txt)") + args = parser.parse_args() + compare_cost(**vars(args)) + +if __name__ == "__main__": + main() diff --git a/utils/docfast.py b/utils/docfast.py index 437e982017..775d322d87 100755 --- a/utils/docfast.py +++ b/utils/docfast.py @@ -24,6 +24,8 @@ * sphinx * sphinx-gallery * numpydoc + * sphinx-copybutton + * sphinxcontrib-jquery Sphinx reads the docstrings of the OpenTURNS methods to generate the API doc. This means that changing the source _doc.i.in files will have no effect, diff --git a/utils/rename.sh b/utils/rename.sh index f20cced9cf..2f2ac30500 100755 --- a/utils/rename.sh +++ b/utils/rename.sh @@ -10,7 +10,7 @@ usage() test $# = 2 || usage - +# rename files files=`find lib python -name ${1}[\._]*` t_files=`find lib python -name t_${1}_*` for src_file in ${files} ${t_files} @@ -20,7 +20,10 @@ do git mv ${src_file} ${parent_dir}/${dest_file} done +# rename symbols +grep -lr $1 lib python validation|grep -v '~'|xargs sed -i "s|\b$1\b|$2|g;s|\b$1_|$2_|g;s|_$1\b|_$2|g" + +# rename include guard upper_src=`echo $1 | tr "[:lower:]" "[:upper:]"` upper_dest=`echo $2 | tr "[:lower:]" "[:upper:]"` -grep -lr $1 lib python|grep -v '~'|xargs sed -i "s|\b$1\b|$2|g;s|\b$1_|$2_|g;s|_$1\b|_$2|g" find lib python -name $2.hxx | xargs sed -i "s|_${upper_src}_HXX|_${upper_dest}_HXX|g" diff --git a/validation/src/ValidSymbolicFunctionParallel.py b/validation/src/ValidSymbolicFunctionParallel.py index c250364acb..57cf17e238 100644 --- a/validation/src/ValidSymbolicFunctionParallel.py +++ b/validation/src/ValidSymbolicFunctionParallel.py @@ -6,7 +6,7 @@ # one evaluation of a big sample, varying formula length N = int(1e7) -x = ot.ComposedDistribution([ot.Uniform(-m.pi, m.pi)] * 3).getSample(N) +x = ot.JointDistribution([ot.Uniform(-m.pi, m.pi)] * 3).getSample(N) for M in [1, 10, 50]: f = ot.SymbolicFunction(['x1', 'x2', 'x3'], ['+'.join(['cosh(x1)+cosh(x2)+cosh(x3)'] * M)]) t0 = time.time() @@ -33,7 +33,7 @@ f = ot.SymbolicFunction(['x1', 'x2', 'x3'], ['+'.join(['cosh(x1)+cosh(x2)+cosh(x3)'] * M)]) for p in range(1, 7): m1 = int(10 ** p) - x = ot.ComposedDistribution([ot.Uniform(-m.pi, m.pi)] * 3).getSample(m1) + x = ot.JointDistribution([ot.Uniform(-m.pi, m.pi)] * 3).getSample(m1) t0 = time.time() for i in range(N // m1): y = f(x) diff --git a/validation/src/optimal_lhs/compute_spacefilling_perturbLHS.py b/validation/src/optimal_lhs/compute_spacefilling_perturbLHS.py index acbb6d76af..e242a0bb30 100755 --- a/validation/src/optimal_lhs/compute_spacefilling_perturbLHS.py +++ b/validation/src/optimal_lhs/compute_spacefilling_perturbLHS.py @@ -9,7 +9,7 @@ size = 20 # Factory: lhs generates -lhsDesign = ot.LHSExperiment(ot.ComposedDistribution([ot.Uniform(0.0, 1.0)] * dimension), size) +lhsDesign = ot.LHSExperiment(ot.JointDistribution([ot.Uniform(0.0, 1.0)] * dimension), size) lhsDesign.setRandomShift(False) # centered lhsDesign.setAlwaysShuffle(True) # randomized @@ -43,7 +43,7 @@ # Size of sample size = 100 -lhsDesign = ot.LHSExperiment(ot.ComposedDistribution([ot.Uniform(0.0, 1.0)] * dimension), size) +lhsDesign = ot.LHSExperiment(ot.JointDistribution([ot.Uniform(0.0, 1.0)] * dimension), size) lhsDesign.setRandomShift(False) # centered lhsDesign.setAlwaysShuffle(True) # randomized diff --git a/validation/src/optimal_lhs/generate_lhs_nonunit_samples.py b/validation/src/optimal_lhs/generate_lhs_nonunit_samples.py index 4898d0f1cb..2b01804f4a 100755 --- a/validation/src/optimal_lhs/generate_lhs_nonunit_samples.py +++ b/validation/src/optimal_lhs/generate_lhs_nonunit_samples.py @@ -8,7 +8,7 @@ size = 20 # Factory: lhs generates -lhsDesign = ot.LHSExperiment(ot.ComposedDistribution([ot.Uniform(20.0, 30.0)] * dimension), size) +lhsDesign = ot.LHSExperiment(ot.JointDistribution([ot.Uniform(20.0, 30.0)] * dimension), size) lhsDesign.setRandomShift(False) # centered lhsDesign.setAlwaysShuffle(True) # randomized @@ -33,7 +33,7 @@ # Size of sample size = 100 -lhsDesign = ot.LHSExperiment(ot.ComposedDistribution([ot.Uniform(20.0, 30.0)] * dimension), size) +lhsDesign = ot.LHSExperiment(ot.JointDistribution([ot.Uniform(20.0, 30.0)] * dimension), size) lhsDesign.setRandomShift(False) # centered lhsDesign.setAlwaysShuffle(True) # randomized diff --git a/validation/src/optimal_lhs/generate_lhs_samples.py b/validation/src/optimal_lhs/generate_lhs_samples.py index 2640d34a61..0bc3f04860 100755 --- a/validation/src/optimal_lhs/generate_lhs_samples.py +++ b/validation/src/optimal_lhs/generate_lhs_samples.py @@ -9,7 +9,7 @@ size = 20 # Factory: lhs generates -lhsDesign = ot.LHSExperiment(ot.ComposedDistribution([ot.Uniform(0.0, 1.0)] * dimension), size) +lhsDesign = ot.LHSExperiment(ot.JointDistribution([ot.Uniform(0.0, 1.0)] * dimension), size) lhsDesign.setRandomShift(False) # centered lhsDesign.setAlwaysShuffle(True) # randomized @@ -35,7 +35,7 @@ # Size of sample size = 100 -lhsDesign = ot.LHSExperiment(ot.ComposedDistribution([ot.Uniform(0.0, 1.0)] * dimension), size) +lhsDesign = ot.LHSExperiment(ot.JointDistribution([ot.Uniform(0.0, 1.0)] * dimension), size) lhsDesign.setRandomShift(False) # centered lhsDesign.setAlwaysShuffle(True) # randomized diff --git a/validation/src/optimal_lhs/validate_MC_big.py b/validation/src/optimal_lhs/validate_MC_big.py index dd04e2a14d..a737ec4454 100755 --- a/validation/src/optimal_lhs/validate_MC_big.py +++ b/validation/src/optimal_lhs/validate_MC_big.py @@ -20,7 +20,7 @@ # Size of sample size = 100 # Factory: lhs generates -lhsDesign = ot.LHSExperiment(ot.ComposedDistribution([ot.Uniform(0.0, 1.0)] * dimension), size) +lhsDesign = ot.LHSExperiment(ot.JointDistribution([ot.Uniform(0.0, 1.0)] * dimension), size) lhsDesign.setAlwaysShuffle(True) # randomized mc = ot.MonteCarloLHS(lhsDesign, nSimu, c2) @@ -38,7 +38,7 @@ minDist = ot.SpaceFillingMinDist() # Factory: lhs generates -lhsDesign = ot.LHSExperiment(ot.ComposedDistribution([ot.Uniform(0.0, 1.0)] * dimension), size) +lhsDesign = ot.LHSExperiment(ot.JointDistribution([ot.Uniform(0.0, 1.0)] * dimension), size) lhsDesign.setAlwaysShuffle(True) # randomized mc = ot.MonteCarloLHS(lhsDesign, nSimu, minDist) diff --git a/validation/src/optimal_lhs/validate_MC_small.py b/validation/src/optimal_lhs/validate_MC_small.py index 8e7a690334..a2d92b5136 100755 --- a/validation/src/optimal_lhs/validate_MC_small.py +++ b/validation/src/optimal_lhs/validate_MC_small.py @@ -23,7 +23,7 @@ for nSimu in [100, 200, 400, 800, 1600, 3200, 6400, 12800, 25600, 51200, 102400, 204800, 409600]: ot.RandomGenerator.SetSeed(0) # Factory: lhs generates - lhsDesign = ot.LHSExperiment(ot.ComposedDistribution([ot.Uniform(0.0, 1.0)] * dimension), size) + lhsDesign = ot.LHSExperiment(ot.JointDistribution([ot.Uniform(0.0, 1.0)] * dimension), size) lhsDesign.setAlwaysShuffle(True) # randomized mc = ot.MonteCarloLHS(lhsDesign, nSimu, c2) @@ -43,7 +43,7 @@ minDist = ot.SpaceFillingMinDist() # Factory: lhs generates -lhsDesign = ot.LHSExperiment(ot.ComposedDistribution([ot.Uniform(0.0, 1.0)] * dimension), size) +lhsDesign = ot.LHSExperiment(ot.JointDistribution([ot.Uniform(0.0, 1.0)] * dimension), size) lhsDesign.setAlwaysShuffle(True) # randomized mc = ot.MonteCarloLHS(lhsDesign, nSimu, minDist) diff --git a/validation/src/optimal_lhs/validate_SA_big.py b/validation/src/optimal_lhs/validate_SA_big.py index 87d250ff7f..717977c916 100755 --- a/validation/src/optimal_lhs/validate_SA_big.py +++ b/validation/src/optimal_lhs/validate_SA_big.py @@ -17,7 +17,7 @@ size = 100 # Factory: lhs generates -lhsDesign = ot.LHSExperiment(ot.ComposedDistribution([ot.Uniform(0.0, 1.0)] * dimension), size) +lhsDesign = ot.LHSExperiment(ot.JointDistribution([ot.Uniform(0.0, 1.0)] * dimension), size) lhsDesign.setAlwaysShuffle(True) # randomized geomProfile = ot.GeometricProfile(10.0, 0.999, 50000) diff --git a/validation/src/optimal_lhs/validate_SA_small.py b/validation/src/optimal_lhs/validate_SA_small.py index f25538ff93..d8a454b87a 100755 --- a/validation/src/optimal_lhs/validate_SA_small.py +++ b/validation/src/optimal_lhs/validate_SA_small.py @@ -18,7 +18,7 @@ size = 10 # Factory: lhs generates -lhsDesign = ot.LHSExperiment(ot.ComposedDistribution([ot.Uniform(0.0, 1.0)] * dimension), size) +lhsDesign = ot.LHSExperiment(ot.JointDistribution([ot.Uniform(0.0, 1.0)] * dimension), size) lhsDesign.setAlwaysShuffle(True) # randomized geomProfile = ot.GeometricProfile(10.0, 0.999, 50000) diff --git a/validation/src/sobol_estimator_variance/convergence_stddev.py b/validation/src/sobol_estimator_variance/convergence_stddev.py index 9a3ba95b01..5524219f0f 100644 --- a/validation/src/sobol_estimator_variance/convergence_stddev.py +++ b/validation/src/sobol_estimator_variance/convergence_stddev.py @@ -273,7 +273,7 @@ def runConvergence( E.setParameter(ot.LogNormalMuSigmaOverMu()([3e4, 0.12, 0.0])) F = ot.LogNormal() F.setParameter(ot.LogNormalMuSigmaOverMu()([0.1, 0.20, 0.0])) - distribution_poutre = ot.ComposedDistribution([L, b, h, E, F]) + distribution_poutre = ot.JointDistribution([L, b, h, E, F]) # Saltelli runConvergence( diff --git a/validation/src/sobol_estimator_variance/gsobollib.py b/validation/src/sobol_estimator_variance/gsobollib.py index cc9962d393..c270d29c79 100644 --- a/validation/src/sobol_estimator_variance/gsobollib.py +++ b/validation/src/sobol_estimator_variance/gsobollib.py @@ -6,7 +6,7 @@ # EDF R&D - 2017 - Michael Baudin # -from openturns import ComposedDistribution, Uniform +from openturns import JointDistribution, Uniform from numpy import array, prod, ones, zeros @@ -41,7 +41,7 @@ def gsobolSAExact(a): def gsobolDistribution(d): - distribution = ComposedDistribution([Uniform(0, 1)] * d) + distribution = JointDistribution([Uniform(0, 1)] * d) return distribution diff --git a/validation/src/sobol_estimator_variance/ishigamilib.py b/validation/src/sobol_estimator_variance/ishigamilib.py index ed4f88419a..ee9ea0c41d 100644 --- a/validation/src/sobol_estimator_variance/ishigamilib.py +++ b/validation/src/sobol_estimator_variance/ishigamilib.py @@ -7,7 +7,7 @@ # from math import sin, pi -from openturns import ComposedDistribution, Uniform, SymbolicFunction +from openturns import JointDistribution, Uniform, SymbolicFunction def ishigamiAB(): @@ -48,7 +48,7 @@ def ishigamiGSymbolic(): def ishigamiDistribution(): - distribution = ComposedDistribution([Uniform(-pi, pi)] * 3) + distribution = JointDistribution([Uniform(-pi, pi)] * 3) return distribution diff --git a/validation/src/sobol_estimator_variance/sensitivity_distribution.py b/validation/src/sobol_estimator_variance/sensitivity_distribution.py index 1d4c165da9..95838dfd1c 100644 --- a/validation/src/sobol_estimator_variance/sensitivity_distribution.py +++ b/validation/src/sobol_estimator_variance/sensitivity_distribution.py @@ -526,7 +526,7 @@ def plot_indices_histogram( E.setParameter(ot.LogNormalMuSigmaOverMu()([3e4, 0.12, 0.0])) F = ot.LogNormal() F.setParameter(ot.LogNormalMuSigmaOverMu()([0.1, 0.20, 0.0])) - distribution_poutre = ot.ComposedDistribution([L, b, h, E, F]) + distribution_poutre = ot.JointDistribution([L, b, h, E, F]) sTest_poutre_saltelli = SensitivityConfidenceTest( model_poutre, @@ -567,7 +567,7 @@ def plot_indices_histogram( ["2*X1 + X2 - 3*X3 + 0.3*X1*X2", "-5*X1 + 4*X2 - 0.8*X2*X3 + 2*X3"], ) model_aggregated.setName("AggregatedSobol") - distribution_aggregated = ot.ComposedDistribution([ot.Uniform()] * 3) + distribution_aggregated = ot.JointDistribution([ot.Uniform()] * 3) sTest_poutre_saltelli = SensitivityConfidenceTest( model_aggregated,