From 0121253e96c2e842640b5f7c2f90cd03530ddaa1 Mon Sep 17 00:00:00 2001 From: Glenn Date: Tue, 13 Feb 2024 13:52:16 -0500 Subject: [PATCH] weejobs - integrate as new job scheduling subsystem (#2443) * weethreads sdk * gcc compile fix * gcc compile fix * fix gcc compiler error 3 * gcc compiler fix 4 * gcc compiler error #5 * fix gcc compile error #6 * gcc compile error #7 * Update oe_unix.cmake * Update oe_unix.cmake * gcc compiler fix #8 * Remove the getCurrentThreadId function and replace thread IDs with the cross platform std::thread::id * Update weethreads.h * weejobs --- CMakeLists.txt | 2 +- CMakeModules/oe_unix.cmake | 2 + .../osgearth_bakefeaturetiles.cpp | 4 +- .../osgearth_clamp/osgearth_clamp.cpp | 1 - .../osgearth_collecttriangles.cpp | 21 +- .../osgearth_conv/osgearth_conv.cpp | 4 +- .../osgearth_exportvegetation.cpp | 8 +- .../osgearth_features/osgearth_features.cpp | 2 +- .../osgearth_imposterbaker.cpp | 4 +- src/osgEarth/AnnotationRegistry.cpp | 6 +- src/osgEarth/CMakeLists.txt | 14 +- src/osgEarth/Cache.cpp | 10 +- src/osgEarth/CacheBin.cpp | 2 +- src/osgEarth/CascadeDrapingDecorator.cpp | 4 +- src/osgEarth/Chonk.cpp | 12 +- src/osgEarth/Clamping.cpp | 3 +- src/osgEarth/ColorFilter.cpp | 6 +- src/osgEarth/Common | 1 + src/osgEarth/Containers | 70 +- src/osgEarth/Controls.cpp | 8 +- src/osgEarth/Cube.cpp | 4 +- src/osgEarth/CullingUtils.cpp | 1 + src/osgEarth/DepthOffset.cpp | 5 +- src/osgEarth/DrapingCullSet | 2 +- src/osgEarth/DrapingCullSet.cpp | 1 - src/osgEarth/DrapingTechnique.cpp | 4 +- src/osgEarth/Elevation.cpp | 2 +- src/osgEarth/ElevationLayer.cpp | 6 +- src/osgEarth/ElevationPool | 6 +- src/osgEarth/ElevationPool.cpp | 29 +- src/osgEarth/FeatureModelGraph.cpp | 1 - src/osgEarth/FeatureSource | 2 +- src/osgEarth/FeatureSource.cpp | 6 +- src/osgEarth/FeatureSourceIndexNode | 4 +- src/osgEarth/FeatureSourceIndexNode.cpp | 23 +- src/osgEarth/FileUtils.cpp | 4 +- src/osgEarth/Filter.cpp | 4 +- src/osgEarth/FlatteningLayer | 2 +- src/osgEarth/FlatteningLayer.cpp | 2 +- src/osgEarth/GDAL | 9 +- src/osgEarth/GDAL.cpp | 69 +- src/osgEarth/GLUtils | 47 +- src/osgEarth/GLUtils.cpp | 32 +- src/osgEarth/Geocoder | 10 +- src/osgEarth/Geocoder.cpp | 45 +- src/osgEarth/GeodeticGraticule | 2 +- src/osgEarth/GeodeticGraticule.cpp | 8 +- src/osgEarth/Geoid.cpp | 6 +- src/osgEarth/GeometryCloud.cpp | 1 + src/osgEarth/GeometryCompiler.cpp | 2 +- src/osgEarth/HTTPClient.cpp | 7 +- src/osgEarth/HorizonClipPlane.cpp | 3 +- src/osgEarth/IconSymbol.cpp | 4 +- src/osgEarth/ImGui/LayersGUI | 68 +- src/osgEarth/ImGui/NotifyGUI | 2 +- src/osgEarth/ImGui/SearchGUI | 8 - src/osgEarth/ImGui/SystemGUI | 32 +- src/osgEarth/ImGui/TerrainGUI | 3 +- src/osgEarth/ImageLayer.cpp | 23 +- src/osgEarth/ImageOverlay | 2 +- src/osgEarth/ImageOverlay.cpp | 10 +- src/osgEarth/ImageUtils.cpp | 6 +- src/osgEarth/LabelNode.cpp | 4 +- src/osgEarth/Layer.cpp | 2 +- src/osgEarth/Lighting | 2 +- src/osgEarth/Lighting.cpp | 8 +- src/osgEarth/LineDrawable | 2 +- src/osgEarth/LineDrawable.cpp | 6 +- src/osgEarth/MBTiles | 2 +- src/osgEarth/MBTiles.cpp | 11 +- src/osgEarth/Map.cpp | 3 +- src/osgEarth/MapNode.cpp | 6 +- src/osgEarth/MemCache.cpp | 4 +- src/osgEarth/ModelResource.cpp | 13 +- src/osgEarth/NetworkMonitor.cpp | 15 +- src/osgEarth/Notify | 20 +- src/osgEarth/Notify.cpp | 12 + src/osgEarth/OGRFeatureSource | 3 +- src/osgEarth/OGRFeatureSource.cpp | 6 +- src/osgEarth/ObjectIndex | 4 +- src/osgEarth/ObjectIndex.cpp | 15 +- src/osgEarth/OverlayDecorator.cpp | 13 +- src/osgEarth/PagedNode | 6 +- src/osgEarth/PagedNode.cpp | 42 +- src/osgEarth/PlaceNode.cpp | 8 +- src/osgEarth/PointDrawable.cpp | 8 +- src/osgEarth/Progress | 8 +- src/osgEarth/Progress.cpp | 4 +- src/osgEarth/Registry | 10 +- src/osgEarth/Registry.cpp | 68 +- src/osgEarth/Resource | 2 +- src/osgEarth/Resource.cpp | 3 +- src/osgEarth/ResourceCache | 6 +- src/osgEarth/ResourceCache.cpp | 17 +- src/osgEarth/ResourceLibrary.cpp | 20 +- src/osgEarth/ResourceReleaser | 2 +- src/osgEarth/ResourceReleaser.cpp | 9 +- src/osgEarth/SDF | 14 - src/osgEarth/SceneGraphCallback | 2 +- src/osgEarth/SceneGraphCallback.cpp | 13 +- src/osgEarth/ScreenSpaceLayout.cpp | 6 +- src/osgEarth/ScriptEngine | 2 +- src/osgEarth/ScriptEngine.cpp | 4 +- src/osgEarth/ShaderFactory.cpp | 4 +- src/osgEarth/ShaderGenerator.cpp | 6 +- src/osgEarth/SimplePager | 2 +- src/osgEarth/SimplePager.cpp | 29 +- src/osgEarth/Skins.cpp | 2 +- src/osgEarth/SpatialReference | 2 +- src/osgEarth/SpatialReference.cpp | 14 +- src/osgEarth/StateSetCache | 2 +- src/osgEarth/StateSetCache.cpp | 29 +- src/osgEarth/StyleSheet.cpp | 1 - src/osgEarth/Symbol.cpp | 4 +- src/osgEarth/TDTiles | 2 +- src/osgEarth/TDTiles.cpp | 32 +- src/osgEarth/Terrain.cpp | 7 +- src/osgEarth/TerrainEngineNode.cpp | 3 +- src/osgEarth/TerrainResources | 2 +- src/osgEarth/TerrainResources.cpp | 18 +- src/osgEarth/Text.cpp | 8 +- src/osgEarth/TextureArena.cpp | 18 +- src/osgEarth/Threading | 1231 +++-------------- src/osgEarth/Threading.cpp | 955 +------------ src/osgEarth/TileMesher.cpp | 6 +- src/osgEarth/TileRasterizer | 2 +- src/osgEarth/TileRasterizer.cpp | 4 +- src/osgEarth/TileSource | 2 +- src/osgEarth/TileSourceElevationLayer.cpp | 5 +- src/osgEarth/TileVisitor | 4 +- src/osgEarth/TileVisitor.cpp | 50 +- src/osgEarth/TrackNode.cpp | 8 +- src/osgEarth/VerticalDatum.cpp | 4 +- src/osgEarth/VirtualProgram | 4 +- src/osgEarth/VirtualProgram.cpp | 43 +- src/osgEarth/WindLayer.cpp | 8 +- src/osgEarth/weejobs.h | 872 ++++++++++++ src/osgEarth/weethreads.h | 934 +++++++++++++ src/osgEarthDrivers/CMakeLists.txt | 2 - .../cache_filesystem/FileSystemCache.cpp | 42 +- .../cache_rocksdb/RocksDBCache.cpp | 4 +- .../cache_rocksdb/RocksDBCacheBin | 2 +- .../cache_rocksdb/RocksDBCacheBin.cpp | 4 +- src/osgEarthDrivers/cache_rocksdb/Tracker | 8 +- .../engine_rex/CreateTileImplementation | 2 +- .../engine_rex/CreateTileImplementation.cpp | 2 +- src/osgEarthDrivers/engine_rex/GeometryPool | 2 +- .../engine_rex/GeometryPool.cpp | 18 +- .../engine_rex/LoadTileData.cpp | 17 +- src/osgEarthDrivers/engine_rex/Loader | 2 +- src/osgEarthDrivers/engine_rex/Loader.cpp | 24 +- .../engine_rex/RexTerrainEngineNode.cpp | 19 +- src/osgEarthDrivers/engine_rex/TileNode | 2 +- src/osgEarthDrivers/engine_rex/TileNode.cpp | 49 +- .../engine_rex/TileNodeRegistry | 2 +- .../engine_rex/TileNodeRegistry.cpp | 15 +- src/osgEarthDrivers/gltf/GLTFReader.h | 6 +- src/osgEarthDrivers/kml/KMZArchive.cpp | 4 +- .../script_engine_duktape/DuktapeEngine.cpp | 5 +- .../sky_simple/SimpleSkyNode.cpp | 5 +- src/osgEarthDrivers/zip/ZipArchive.cpp | 8 +- src/osgEarthDrivers/zip/ZipArchive.h | 11 +- src/osgEarthProcedural/BiomeLayer.cpp | 11 +- src/osgEarthProcedural/BiomeManager.cpp | 20 +- src/osgEarthProcedural/ImGui/LifeMapLayerGUI | 15 +- .../ImGui/VegetationLayerGUI | 5 +- src/osgEarthProcedural/RoadSurfaceLayer.cpp | 4 +- .../TextureSplattingLayer.cpp | 6 +- src/osgEarthProcedural/VegetationLayer.cpp | 50 +- src/osgEarthSilverLining/SilverLiningContext | 2 +- .../SilverLiningContext.cpp | 2 +- src/osgEarthSilverLining/SilverLiningNode.cpp | 8 +- src/osgEarthTriton/TritonContext | 2 +- src/osgEarthTriton/TritonContext.cpp | 2 +- src/osgEarthTriton/TritonHeightMap.cpp | 6 +- tests/osm.earth | 2 +- tests/viewpoints.xml | 18 +- 177 files changed, 2887 insertions(+), 2934 deletions(-) create mode 100644 src/osgEarth/weejobs.h create mode 100644 src/osgEarth/weethreads.h diff --git a/CMakeLists.txt b/CMakeLists.txt index 408e6736ef..1747df821b 100755 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -20,7 +20,7 @@ set(OSGEARTH_PATCH_VERSION 0) set(OSGEARTH_VERSION ${OSGEARTH_MAJOR_VERSION}.${OSGEARTH_MINOR_VERSION}.${OSGEARTH_PATCH_VERSION}) # Increment this each time the ABI changes -set(OSGEARTH_SOVERSION 149) +set(OSGEARTH_SOVERSION 150) # Require C++11 set_property(GLOBAL PROPERTY CXX_STANDARD 11) diff --git a/CMakeModules/oe_unix.cmake b/CMakeModules/oe_unix.cmake index f2399fefee..2acbca2525 100644 --- a/CMakeModules/oe_unix.cmake +++ b/CMakeModules/oe_unix.cmake @@ -6,4 +6,6 @@ IF(UNIX AND NOT ANDROID) FIND_PACKAGE(X11) # Some Unicies need explicit linkage to the Math library or the build fails. FIND_LIBRARY(MATH_LIBRARY m) + # for ptheads in linux + find_package(Threads REQUIRED) ENDIF(UNIX AND NOT ANDROID) diff --git a/src/applications/osgearth_bakefeaturetiles/osgearth_bakefeaturetiles.cpp b/src/applications/osgearth_bakefeaturetiles/osgearth_bakefeaturetiles.cpp index e68d889b7a..60bd9647a4 100644 --- a/src/applications/osgearth_bakefeaturetiles/osgearth_bakefeaturetiles.cpp +++ b/src/applications/osgearth_bakefeaturetiles/osgearth_bakefeaturetiles.cpp @@ -339,7 +339,7 @@ struct ProgressReporter : public osgEarth::ProgressCallback unsigned totalStages, const std::string& msg) { - ScopedMutexLock lock(_mutex); + std::lock_guard lock(_mutex); if (_first) { @@ -388,7 +388,7 @@ struct ProgressReporter : public osgEarth::ProgressCallback return false; } - Threading::Mutex _mutex; + std::mutex _mutex; bool _first; osg::Timer_t _start; }; diff --git a/src/applications/osgearth_clamp/osgearth_clamp.cpp b/src/applications/osgearth_clamp/osgearth_clamp.cpp index cadaeb20f2..bcfdc93b10 100644 --- a/src/applications/osgearth_clamp/osgearth_clamp.cpp +++ b/src/applications/osgearth_clamp/osgearth_clamp.cpp @@ -56,7 +56,6 @@ struct App osg::ref_ptr input; osg::ref_ptr output; Threading::Mutexed > outputQueue; - Threading::Event gate; std::string attrName; bool verbose; diff --git a/src/applications/osgearth_collecttriangles/osgearth_collecttriangles.cpp b/src/applications/osgearth_collecttriangles/osgearth_collecttriangles.cpp index 08e91656ff..1f99b26ac0 100644 --- a/src/applications/osgearth_collecttriangles/osgearth_collecttriangles.cpp +++ b/src/applications/osgearth_collecttriangles/osgearth_collecttriangles.cpp @@ -485,11 +485,12 @@ void computeIntersectionsThreaded(osg::Node* node, std::vector< IntersectionQuer } } } - - JobArena::get("oe.intersections")->setConcurrency(num_threads); + + auto pool = jobs::get_pool("oe.intersections"); + pool->set_concurrency(num_threads); // Poor man's parallel for - JobGroup intersections; + jobs::jobgroup intersections; //unsigned int workSize = 500; // Try to split the jobs evenly among the threads @@ -505,12 +506,16 @@ void computeIntersectionsThreaded(osg::Node* node, std::vector< IntersectionQuer unsigned int curSize = curStart + workSize <= queries.size() ? workSize : queries.size() - curStart; if (curSize > 0) { - Job job; - job.setArena("oe.intersections"); - job.setGroup(&intersections); - job.dispatch_and_forget([node, curStart, curSize, &queries](Cancelable*) { + jobs::context context; + context.pool = pool; + context.group = &intersections; + + jobs::dispatch([node, curStart, curSize, &queries](Cancelable&) { computeIntersections(node, queries, curStart, curSize); - }); + return true; + }, + context + ); ++numJobs; } start += workSize; diff --git a/src/applications/osgearth_conv/osgearth_conv.cpp b/src/applications/osgearth_conv/osgearth_conv.cpp index 807cf7fbc8..640ec78975 100644 --- a/src/applications/osgearth_conv/osgearth_conv.cpp +++ b/src/applications/osgearth_conv/osgearth_conv.cpp @@ -213,7 +213,7 @@ struct ProgressReporter : public osgEarth::ProgressCallback unsigned totalStages, const std::string& msg ) { - ScopedMutexLock lock(_mutex); + std::lock_guard lock(_mutex); if (_first) { @@ -278,7 +278,7 @@ struct ProgressReporter : public osgEarth::ProgressCallback return false; } - Threading::Mutex _mutex; + std::mutex _mutex; bool _first; osg::Timer_t _start; }; diff --git a/src/applications/osgearth_exportvegetation/osgearth_exportvegetation.cpp b/src/applications/osgearth_exportvegetation/osgearth_exportvegetation.cpp index 616472bae7..a7eb63f644 100644 --- a/src/applications/osgearth_exportvegetation/osgearth_exportvegetation.cpp +++ b/src/applications/osgearth_exportvegetation/osgearth_exportvegetation.cpp @@ -172,17 +172,11 @@ main(int argc, char** argv) if (keys.empty()) return usage(argv[0], "No data in extent"); - JobArena arena("Vegetation Export", 1u); - std::cout << "Exporting " << keys.size() << " keys.." << std::endl; for(const auto key : keys) { - Job(&arena).dispatch_and_forget([&app, key](Cancelable*) - { - app.exportKey(key); - } - ); + jobs::dispatch([&app, key]() { app.exportKey(key); }); } diff --git a/src/applications/osgearth_features/osgearth_features.cpp b/src/applications/osgearth_features/osgearth_features.cpp index 81b88c49e2..8f88daa9af 100644 --- a/src/applications/osgearth_features/osgearth_features.cpp +++ b/src/applications/osgearth_features/osgearth_features.cpp @@ -41,7 +41,7 @@ using namespace osgEarth::Util; int usage( const std::string& app ) { - OE_NOTICE "\n" << app << "\n" + OE_NOTICE << "\n" << app << "\n" << " --rasterize : draw features as rasterized image tiles \n" << " --drape : draw features as projected texture \n" << " --clamp : draw features using shader clamping \n" diff --git a/src/applications/osgearth_imposterbaker/osgearth_imposterbaker.cpp b/src/applications/osgearth_imposterbaker/osgearth_imposterbaker.cpp index 22ee238f2a..d0053a5b2d 100644 --- a/src/applications/osgearth_imposterbaker/osgearth_imposterbaker.cpp +++ b/src/applications/osgearth_imposterbaker/osgearth_imposterbaker.cpp @@ -41,8 +41,8 @@ using namespace osgEarth; int fail(const std::string& msg, char** argv) { - OE_WARN LC << msg << std::endl; - OE_WARN LC << argv[0] + OE_WARN << LC << msg << std::endl; + OE_WARN << LC << argv[0] << "\n --in ; model to process" << "\n --out ; output texture filename" << "\n --size ; dimension of texture" diff --git a/src/osgEarth/AnnotationRegistry.cpp b/src/osgEarth/AnnotationRegistry.cpp index d11a7acd7e..a8d18edd95 100644 --- a/src/osgEarth/AnnotationRegistry.cpp +++ b/src/osgEarth/AnnotationRegistry.cpp @@ -62,12 +62,12 @@ AnnotationRegistry::instance() { // OK to be in the local scope since this gets called at static init time // by the OSGEARTH_REGISTER_ANNOTATION macro - static AnnotationRegistry* s_singleton =0L; - static Threading::Mutex s_singletonMutex(OE_MUTEX_NAME); + static AnnotationRegistry* s_singleton = nullptr; + static std::mutex s_singletonMutex; if ( !s_singleton ) { - Threading::ScopedMutexLock lock(s_singletonMutex); + std::lock_guard lock(s_singletonMutex); if ( !s_singleton ) { s_singleton = new AnnotationRegistry(); diff --git a/src/osgEarth/CMakeLists.txt b/src/osgEarth/CMakeLists.txt index 3c9808a02a..294919d0d5 100644 --- a/src/osgEarth/CMakeLists.txt +++ b/src/osgEarth/CMakeLists.txt @@ -456,12 +456,11 @@ SET(LIB_PUBLIC_HEADERS TrackNode WindLayer TerrainLayer + SDF rtree.h - - FileGDBFeatureSource - - SDF + weemesh.h + weejobs.h ${OSGEARTH_VERSION_HEADER} ) @@ -495,7 +494,6 @@ IF (NOT TINYXML_FOUND) ${LIB_PUBLIC_HEADERS} tinystr.h tinyxml.h - weemesh.h ) ENDIF (NOT TINYXML_FOUND) @@ -859,7 +857,7 @@ set(TARGET_SRC SDF.cpp FileGDBFeatureSource.cpp - + ${SHADERS_CPP} ) @@ -978,6 +976,10 @@ IF (TRACY_FOUND) LINK_WITH_VARIABLES(${LIB_NAME} TRACY_LIBRARY) ENDIF(TRACY_FOUND) +if(UNIX) + target_link_libraries(${LIB_NAME} PUBLIC Threads::Threads) +endif() + OPTION(NRL_STATIC_LIBRARIES "Link osgEarth against static GDAL and cURL, including static OpenSSL, Proj4, JPEG, PNG, and TIFF." OFF) if(NOT NRL_STATIC_LIBRARIES) LINK_WITH_VARIABLES(${LIB_NAME} OSG_LIBRARY OSGUTIL_LIBRARY OSGSIM_LIBRARY OSGDB_LIBRARY OSGVIEWER_LIBRARY OSGTEXT_LIBRARY OSGGA_LIBRARY OSGSHADOW_LIBRARY CURL_LIBRARY GDAL_LIBRARY OSGMANIPULATOR_LIBRARY) diff --git a/src/osgEarth/Cache.cpp b/src/osgEarth/Cache.cpp index 0531a3506d..c028a247b7 100644 --- a/src/osgEarth/Cache.cpp +++ b/src/osgEarth/Cache.cpp @@ -129,9 +129,8 @@ CacheSettings::toString() const //------------------------------------------------------------------------ -Cache::Cache( const CacheOptions& options ) : -_options( options ), -_bins("OE.Cache.bins") +Cache::Cache(const CacheOptions& options) : + _options(options) { //nop } @@ -140,9 +139,8 @@ Cache::~Cache() { } -Cache::Cache( const Cache& rhs, const osg::CopyOp& op ) : -osg::Object( rhs, op ), -_bins("OE.Cache.bins") +Cache::Cache(const Cache& rhs, const osg::CopyOp& op) : + osg::Object(rhs, op) { _status = rhs._status; } diff --git a/src/osgEarth/CacheBin.cpp b/src/osgEarth/CacheBin.cpp index 5ea2151e00..19ec706129 100644 --- a/src/osgEarth/CacheBin.cpp +++ b/src/osgEarth/CacheBin.cpp @@ -247,7 +247,7 @@ namespace }; - Threading::Gate WriteExternalReferencesToCache::_imageGate(OE_MUTEX_NAME); + Threading::Gate WriteExternalReferencesToCache::_imageGate; } diff --git a/src/osgEarth/CascadeDrapingDecorator.cpp b/src/osgEarth/CascadeDrapingDecorator.cpp index e5fe1ac556..7295b9ca08 100644 --- a/src/osgEarth/CascadeDrapingDecorator.cpp +++ b/src/osgEarth/CascadeDrapingDecorator.cpp @@ -324,8 +324,8 @@ CascadeDrapingDecorator::reserveTextureImageUnit() { if (_unit < 0) { - static Threading::Mutex mutex(OE_MUTEX_NAME); - Threading::ScopedMutexLock lock(mutex); + static std::mutex mutex; + std::lock_guard lock(mutex); osg::ref_ptr tr; if (_unit < 0 && _resources.lock(tr)) diff --git a/src/osgEarth/Chonk.cpp b/src/osgEarth/Chonk.cpp index 6153145608..7ea1a00043 100644 --- a/src/osgEarth/Chonk.cpp +++ b/src/osgEarth/Chonk.cpp @@ -800,7 +800,7 @@ ChonkDrawable::installRenderBin(ChonkDrawable* d) static osg::ref_ptr s_vp; static Mutex s_mutex; - ScopedMutexLock lock(s_mutex); + std::lock_guard lock(s_mutex); auto& ss = s_stateSets[d->getRenderBinNumber()]; if (!ss.valid()) @@ -860,7 +860,7 @@ ChonkDrawable::add(Chonk::Ptr chonk, const osg::Matrixf& xform, const osg::Vec2f { if (chonk) { - ScopedMutexLock lock(_m); + std::lock_guard lock(_m); Instance instance; instance.xform = xform; @@ -896,7 +896,7 @@ ChonkDrawable::update_and_cull_batches(osg::State& state) const // if something changed, we need to refresh the GPU tables. if (globjects._dirty) { - ScopedMutexLock lock(_m); + std::lock_guard lock(_m); globjects._gpucull = _gpucull; globjects.update(_batches, this, _fadeNear, _fadeFar, _birthday, _alphaCutoff, state); } @@ -918,7 +918,7 @@ ChonkDrawable::draw_batches(osg::State& state) const osg::BoundingBox ChonkDrawable::computeBoundingBox() const { - ScopedMutexLock lock(_m); + std::lock_guard lock(_m); osg::BoundingBox result; @@ -974,7 +974,7 @@ ChonkDrawable::refreshProxy() const { if (_proxy_dirty) { - ScopedMutexLock lock(_m); + std::lock_guard lock(_m); _proxy_verts.clear(); _proxy_indices.clear(); @@ -1354,7 +1354,7 @@ ChonkRenderBin::ChonkRenderBin(const ChonkRenderBin& rhs, const osg::CopyOp& op) if (!_cullSS.valid()) { static Mutex m; - ScopedMutexLock lock(m); + std::lock_guard lock(m); auto proto = static_cast(getRenderBinPrototype("ChonkBin")); if (!proto->_cullSS.valid()) diff --git a/src/osgEarth/Clamping.cpp b/src/osgEarth/Clamping.cpp index 56d8a87f15..df64464dd5 100644 --- a/src/osgEarth/Clamping.cpp +++ b/src/osgEarth/Clamping.cpp @@ -268,8 +268,7 @@ ClampingCullSet::accept(osg::NodeVisitor& nv) } } -ClampingManager::ClampingManager() : - _sets(OE_MUTEX_NAME) +ClampingManager::ClampingManager() { //nop } diff --git a/src/osgEarth/ColorFilter.cpp b/src/osgEarth/ColorFilter.cpp index 00a7fdbcad..4ea574a0c7 100644 --- a/src/osgEarth/ColorFilter.cpp +++ b/src/osgEarth/ColorFilter.cpp @@ -26,12 +26,12 @@ ColorFilterRegistry::instance() { // OK to be in the local scope since this gets called at static init time // by the OSGEARTH_REGISTER_COLORFILTER macro - static ColorFilterRegistry* s_singleton =0L; - static Threading::Mutex s_singletonMutex(OE_MUTEX_NAME); + static ColorFilterRegistry* s_singleton = nullptr; + static std::mutex s_singletonMutex; if ( !s_singleton ) { - Threading::ScopedMutexLock lock(s_singletonMutex); + std::lock_guard lock(s_singletonMutex); if ( !s_singleton ) { s_singleton = new ColorFilterRegistry(); diff --git a/src/osgEarth/Common b/src/osgEarth/Common index 4223c7aa45..64ae8138c1 100644 --- a/src/osgEarth/Common +++ b/src/osgEarth/Common @@ -20,6 +20,7 @@ #define OSGEARTH_COMMON_H 1 #include +#include #include #include #include diff --git a/src/osgEarth/Containers b/src/osgEarth/Containers index 368745dcb2..8330507597 100644 --- a/src/osgEarth/Containers +++ b/src/osgEarth/Containers @@ -32,6 +32,7 @@ #include #include #include +#include namespace osgEarth { namespace Util { @@ -238,16 +239,16 @@ namespace osgEarth { namespace Util unsigned _buf; unsigned _queries; unsigned _hits; - bool _threadsafe; - mutable Threading::Mutex _mutex; + bool _threadsafe; + mutable std::mutex _mutex; public: - LRUCache( unsigned max =100 ) : _max(max), _threadsafe(false), _mutex("LRUCache(OE)") { + LRUCache( unsigned max =100 ) : _max(max), _threadsafe(false) { _queries = 0; _hits = 0; setMaxSize_impl(max); } - LRUCache( bool threadsafe, unsigned max =100 ) : _max(max), _threadsafe(threadsafe), _mutex("LRUCache(OE)") { + LRUCache( bool threadsafe, unsigned max =100 ) : _max(max), _threadsafe(threadsafe) { _queries = 0; _hits = 0; setMaxSize_impl(max); @@ -258,7 +259,7 @@ namespace osgEarth { namespace Util void insert( const K& key, const T& value ) { if ( _threadsafe ) { - Threading::ScopedMutexLock lock(_mutex); + std::lock_guard lock(_mutex); insert_impl( key, value ); } else { @@ -268,7 +269,7 @@ namespace osgEarth { namespace Util bool get( const K& key, Record& out ) { if ( _threadsafe ) { - Threading::ScopedMutexLock lock(_mutex); + std::lock_guard lock(_mutex); get_impl( key, out ); } else { @@ -279,7 +280,7 @@ namespace osgEarth { namespace Util bool has( const K& key ) { if ( _threadsafe ) { - Threading::ScopedMutexLock lock(_mutex); + std::lock_guard lock(_mutex); return has_impl( key ); } else { @@ -289,7 +290,7 @@ namespace osgEarth { namespace Util void erase( const K& key ) { if ( _threadsafe ) { - Threading::ScopedMutexLock lock(_mutex); + std::lock_guard lock(_mutex); erase_impl( key ); } else { @@ -299,7 +300,7 @@ namespace osgEarth { namespace Util void clear() { if ( _threadsafe ) { - Threading::ScopedMutexLock lock(_mutex); + std::lock_guard lock(_mutex); clear_impl(); } else { @@ -309,7 +310,7 @@ namespace osgEarth { namespace Util void setMaxSize( unsigned max ) { if ( _threadsafe ) { - Threading::ScopedMutexLock lock(_mutex); + std::lock_guard lock(_mutex); setMaxSize_impl( max ); } else { @@ -328,7 +329,7 @@ namespace osgEarth { namespace Util void forEach(const Functor& functor) const { if (_threadsafe) { - Threading::ScopedMutexLock lock(_mutex); + std::lock_guard lock(_mutex); iterate_impl(functor); } else { @@ -564,23 +565,21 @@ namespace osgEarth { namespace Util /** Template for per-thread data storage */ template - struct PerThread : public Threading::Mutex + struct PerThread : public std::mutex { - PerThread() : Threading::Mutex() { } - - PerThread(const std::string& name) : Threading::Mutex(name) { } + PerThread() : std::mutex() { } T& get() { - Threading::ScopedMutexLock lock(*this); - return _data[Threading::getCurrentThreadId()]; + std::lock_guard lock(*this); + return _data[std::this_thread::get_id()]; } void clear() { - Threading::ScopedMutexLock lock(*this); + std::lock_guard lock(*this); _data.clear(); } - typedef typename std::unordered_map container_t; + typedef typename std::unordered_map container_t; typedef typename container_t::iterator iterator; // NB. lock before using these! @@ -597,7 +596,6 @@ namespace osgEarth { namespace Util struct PerObjectMap { PerObjectMap() { } - PerObjectMap(const std::string& name) : _mutex(name) { } bool threadsafe = true; DATA& get(KEY k) @@ -647,7 +645,7 @@ namespace osgEarth { namespace Util DATA& get(KEY k) { - osgEarth::Threading::ScopedLockIf lock(_mutex, threadsafe); + osgEarth::Threading::scoped_lock_if lock(_mutex, threadsafe); typename std::unordered_map::iterator i = _data.find(k); if ( i != _data.end() ) return i->second; @@ -657,53 +655,53 @@ namespace osgEarth { namespace Util void remove(KEY k) { - osgEarth::Threading::ScopedLockIf lock(_mutex, threadsafe); + osgEarth::Threading::scoped_lock_if lock(_mutex, threadsafe); _data.erase( k ); } void forEach(Functor& functor) { - osgEarth::Threading::ScopedLockIf lock(_mutex, threadsafe); + osgEarth::Threading::scoped_lock_if lock(_mutex, threadsafe); for (typename std::unordered_map::iterator i = _data.begin(); i != _data.end(); ++i) functor.operator()(i->second); } void forEach(std::function functor) { - osgEarth::Threading::ScopedLockIf lock(_mutex, threadsafe); + osgEarth::Threading::scoped_lock_if lock(_mutex, threadsafe); for (auto& entry : _data) functor(entry.second); } void forEach(ConstFunctor& functor) const { - osgEarth::Threading::ScopedLockIf lock(_mutex, threadsafe); + osgEarth::Threading::scoped_lock_if lock(_mutex, threadsafe); for (typename std::unordered_map::const_iterator i = _data.begin(); i != _data.end(); ++i) functor.operator()(i->second); } void forEach(std::function functor) const { - osgEarth::Threading::ScopedLockIf lock(_mutex, threadsafe); + osgEarth::Threading::scoped_lock_if lock(_mutex, threadsafe); for (auto& entry : _data) functor(entry.second); } unsigned size() const { - osgEarth::Threading::ScopedLockIf lock(_mutex, threadsafe); + osgEarth::Threading::scoped_lock_if lock(_mutex, threadsafe); return _data.size(); } void clear() { - osgEarth::Threading::ScopedLockIf lock(_mutex, threadsafe); + osgEarth::Threading::scoped_lock_if lock(_mutex, threadsafe); _data.clear(); } private: std::unordered_map _data; - mutable osgEarth::Threading::Mutex _mutex; + mutable std::mutex _mutex; }; /** Template for thread safe per-object data storage */ @@ -712,8 +710,6 @@ namespace osgEarth { namespace Util { PerObjectRefMap() { } - PerObjectRefMap(const std::string& name) : _mutex(name) { } - DATA* get(KEY k) { osgEarth::Threading::ScopedReadLock lock(_mutex); @@ -838,24 +834,24 @@ namespace osgEarth { namespace Util public: //! Push a new item safely void push(const T& t) { - osgEarth::Threading::ScopedMutexLock lock(_m); + std::lock_guard lock(_m); _queue.push(t); } //! Remove the front item safely void pop() { - osgEarth::Threading::ScopedMutexLock lock(_m); + std::lock_guard lock(_m); _queue.pop(); } //! Safely return a copy of the item at the head of //! the queue T front() { - osgEarth::Threading::ScopedMutexLock lock(_m); + std::lock_guard lock(_m); return _queue.empty() ? _default : _queue.front(); } //! Safely remove the item at the head of the queue //! and return it T take_front() { - osgEarth::Threading::ScopedMutexLock lock(_m); + std::lock_guard lock(_m); T t = _queue.empty() ? _default : _queue.front(); if (_queue.empty() == false) _queue.pop(); @@ -863,7 +859,7 @@ namespace osgEarth { namespace Util } //! Safely clear the queue void clear() { - osgEarth::Threading::ScopedMutexLock lock(_m); + std::lock_guard lock(_m); _queue.swap(std::queue()); } //! True if the queue is empty (but only at the @@ -874,7 +870,7 @@ namespace osgEarth { namespace Util private: std::queue _queue; - mutable osgEarth::Threading::Mutex _m; + mutable std::mutex _m; T _default; }; diff --git a/src/osgEarth/Controls.cpp b/src/osgEarth/Controls.cpp index e4e255bc1f..920d6cfd75 100755 --- a/src/osgEarth/Controls.cpp +++ b/src/osgEarth/Controls.cpp @@ -190,8 +190,8 @@ Control::getGeomStateSet() osg::ref_ptr stateSet; if (s_geomStateSet.lock(stateSet) == false) { - static Threading::Mutex m(OE_MUTEX_NAME); - Threading::ScopedMutexLock lock(m); + static std::mutex m; + std::lock_guard lock(m); if (s_geomStateSet.lock(stateSet) == false) { s_geomStateSet = stateSet = new osg::StateSet(); @@ -1054,8 +1054,8 @@ ImageControl::getImageStateSet() osg::ref_ptr stateSet; if (s_imageStateSet.lock(stateSet) == false) { - static Threading::Mutex m(OE_MUTEX_NAME); - Threading::ScopedMutexLock lock(m); + static std::mutex m; + std::lock_guard lock(m); if (s_imageStateSet.lock(stateSet) == false) { s_imageStateSet = stateSet = new osg::StateSet(); diff --git a/src/osgEarth/Cube.cpp b/src/osgEarth/Cube.cpp index 39f4cbd885..8839afe729 100644 --- a/src/osgEarth/Cube.cpp +++ b/src/osgEarth/Cube.cpp @@ -16,8 +16,8 @@ * You should have received a copy of the GNU Lesser General Public License * along with this program. If not, see */ - -#include +#include "Cube" +#include "Notify" using namespace osgEarth; using namespace osgEarth::Util; diff --git a/src/osgEarth/CullingUtils.cpp b/src/osgEarth/CullingUtils.cpp index 94eea9a4b4..f9f43dabed 100644 --- a/src/osgEarth/CullingUtils.cpp +++ b/src/osgEarth/CullingUtils.cpp @@ -21,6 +21,7 @@ #include "VirtualProgram" #include "Utils" #include "Math" +#include "Notify" #include #include diff --git a/src/osgEarth/DepthOffset.cpp b/src/osgEarth/DepthOffset.cpp index e517df169a..440b4d2f7c 100644 --- a/src/osgEarth/DepthOffset.cpp +++ b/src/osgEarth/DepthOffset.cpp @@ -22,6 +22,7 @@ #include #include #include +#include "Notify" #include #include @@ -293,8 +294,8 @@ DepthOffsetGroup::computeBound() const { if ( _adapter.supported() ) { - static Threading::Mutex s_mutex(OE_MUTEX_NAME); - Threading::ScopedMutexLock lock(s_mutex); + static std::mutex s_mutex; + std::lock_guard lock(s_mutex); const_cast(this)->scheduleUpdate(); } diff --git a/src/osgEarth/DrapingCullSet b/src/osgEarth/DrapingCullSet index d07244e00d..bf77d63cd0 100644 --- a/src/osgEarth/DrapingCullSet +++ b/src/osgEarth/DrapingCullSet @@ -70,7 +70,7 @@ namespace osgEarth { namespace Util }; using SortedFrameData = std::map; SortedFrameData _data; - Threading::Mutex _data_mutex; + std::mutex _data_mutex; }; /** diff --git a/src/osgEarth/DrapingCullSet.cpp b/src/osgEarth/DrapingCullSet.cpp index 689b32c351..ca3f4e2671 100644 --- a/src/osgEarth/DrapingCullSet.cpp +++ b/src/osgEarth/DrapingCullSet.cpp @@ -29,7 +29,6 @@ using namespace osgEarth::Util; DrapingManager::DrapingManager() : - _sets(OE_MUTEX_NAME), _renderBinNum(1) { #ifdef OSGEARTH_SINGLE_THREADED_OSG diff --git a/src/osgEarth/DrapingTechnique.cpp b/src/osgEarth/DrapingTechnique.cpp index f7cf0510b9..ca3d701738 100644 --- a/src/osgEarth/DrapingTechnique.cpp +++ b/src/osgEarth/DrapingTechnique.cpp @@ -579,8 +579,8 @@ DrapingTechnique::preCullTerrain(OverlayDecorator::TechRTTParams& params, // allocate a texture image unit the first time through. if ( !_textureUnit.isSet() ) { - static Threading::Mutex m(OE_MUTEX_NAME); - Threading::ScopedMutexLock lock(m); + static std::mutex m; + std::lock_guard lock(m); if ( !_textureUnit.isSet() ) { diff --git a/src/osgEarth/Elevation.cpp b/src/osgEarth/Elevation.cpp index 7750e9dc82..1f6e0922f5 100644 --- a/src/osgEarth/Elevation.cpp +++ b/src/osgEarth/Elevation.cpp @@ -185,7 +185,7 @@ ElevationTexture::generateNormalMap( void* workingSet, ProgressCallback* progress) { - ScopedMutexLock lock(_mutex); + std::lock_guard lock(_mutex); if (!_normalTex.valid()) { diff --git a/src/osgEarth/ElevationLayer.cpp b/src/osgEarth/ElevationLayer.cpp index 2889cf5c95..5cacd71e97 100644 --- a/src/osgEarth/ElevationLayer.cpp +++ b/src/osgEarth/ElevationLayer.cpp @@ -104,8 +104,6 @@ ElevationLayer::init() // open and visible are the same thing for elevation layers _visibleTiedToOpen = true; - _sentry.setName("ElevationLayer " + getName()); - // override with a different default tile size since elevation // tiles need overlapping edges if (!options().tileSize().isSet()) @@ -1060,8 +1058,8 @@ ElevationLayerVector::populateHeightField( #ifdef ANALYZE { - static Threading::Mutex m; - Threading::ScopedMutexLock lock(m); + static std::mutex m; + std::lock_guard lock(m); std::cout << key.str() << ": "; for (std::map::const_iterator i = layerAnalysis.begin(); i != layerAnalysis.end(); ++i) diff --git a/src/osgEarth/ElevationPool b/src/osgEarth/ElevationPool index 3ba0047ed7..5cead8ce3c 100644 --- a/src/osgEarth/ElevationPool +++ b/src/osgEarth/ElevationPool @@ -308,14 +308,14 @@ namespace osgEarth //! Sample elevation at a point at highest available resolution //! @param p Point at which to sample terrain elevation //! @return Future result of the sample - Future getSample( + jobs::future getSample( const GeoPoint& p); //! Sample elevation at a point and a target resolution //! @param p Point at which to sample terrain elevation //! @param resolution Resolution at which to sample terrain //! @return Future result of the sample - Future getSample( + jobs::future getSample( const GeoPoint& p, const Distance& resolution); @@ -323,7 +323,7 @@ namespace osgEarth osg::observer_ptr _map; ElevationPool::WorkingSet _ws; - JobArena* _arena; + jobs::jobpool* _arena; }; } // namespace diff --git a/src/osgEarth/ElevationPool.cpp b/src/osgEarth/ElevationPool.cpp index 52c8d2f03f..d8f1c1887b 100644 --- a/src/osgEarth/ElevationPool.cpp +++ b/src/osgEarth/ElevationPool.cpp @@ -24,6 +24,7 @@ #include #include #include +#include #include #include @@ -41,7 +42,7 @@ ElevationPool::StrongLRU::StrongLRU(unsigned maxSize) : void ElevationPool::StrongLRU::push(ElevationPool::Pointer& p) { - ScopedMutexLock lock(_lru); + std::lock_guard lock(_lru.mutex()); _lru.push(p); if (_lru.size() > (unsigned)((1.5f*(float)_maxSize))) { @@ -53,7 +54,7 @@ ElevationPool::StrongLRU::push(ElevationPool::Pointer& p) void ElevationPool::StrongLRU::clear() { - ScopedMutexLock lock(_lru); + std::lock_guard lock(_lru.mutex()); while(!_lru.empty()) _lru.pop(); } @@ -61,13 +62,10 @@ ElevationPool::StrongLRU::clear() ElevationPool::ElevationPool() : _index(nullptr), _tileSize(257), - _mutex("OE.ElevPool.RM"), - _globalLUTMutex("OE.ElevPool.GLUT"), _L2(64u), _mapRevision(-1), _elevationHash(0) { - _L2._lru.setName("OE.ElevPool.LRU"); } namespace @@ -219,7 +217,6 @@ ElevationPool::WorkingSet::WorkingSet(unsigned size) : _lru(size) { //nop - _lru._lru.setName("OE.WorkingSet.LRU"); } void @@ -1018,7 +1015,7 @@ namespace osgEarth { namespace Internal GeoPoint _p; Distance _res; ElevationPool::WorkingSet* _ws; - Promise _promise; + jobs::promise _promise; SampleElevationOp(osg::observer_ptr map, const GeoPoint& p, const Distance& res, ElevationPool::WorkingSet* ws) : _map(map), _p(p), _res(res), _ws(ws) { } @@ -1048,8 +1045,8 @@ AsyncElevationSampler::AsyncElevationSampler( _map(map), _arena(nullptr) { - _arena = JobArena::get("oe.asyncelevation"); - _arena->setConcurrency(numThreads > 0 ? numThreads : _arena->getConcurrency()); + _arena = jobs::get_pool("oe.asyncelevation"); + _arena->set_concurrency(numThreads > 0 ? numThreads : _arena->concurrency()); } Future @@ -1061,15 +1058,18 @@ AsyncElevationSampler::getSample(const GeoPoint& p) Future AsyncElevationSampler::getSample(const GeoPoint& point, const Distance& resolution) { - return Job(_arena).dispatch([=](Cancelable* cancelable) + jobs::context c; + c.pool = _arena; + + auto task = [=](Cancelable& cancelable) { ElevationSample sample; - if (cancelable == nullptr || !cancelable->isCanceled()) + if (!cancelable.canceled()) { osg::ref_ptr map(_map); if (map.valid()) { - osg::ref_ptr progress = new ProgressCallback(cancelable); + osg::ref_ptr progress = new ProgressCallback(&cancelable); sample = map->getElevationPool()->getSample( point, @@ -1079,6 +1079,7 @@ AsyncElevationSampler::getSample(const GeoPoint& point, const Distance& resoluti } } return sample; - } - ); + }; + + return jobs::dispatch(task, c); } diff --git a/src/osgEarth/FeatureModelGraph.cpp b/src/osgEarth/FeatureModelGraph.cpp index 44033fee13..375c54c18b 100644 --- a/src/osgEarth/FeatureModelGraph.cpp +++ b/src/osgEarth/FeatureModelGraph.cpp @@ -408,7 +408,6 @@ FeatureModelGraph::FeatureModelGraph(const FeatureModelOptions& options) : _options(options), _featureExtentClamped(false), _useTiledSource(false), - _blacklistMutex("FMG BlackList(OE)"), _isActive(false), loadedTiles(std::make_shared(0)) { diff --git a/src/osgEarth/FeatureSource b/src/osgEarth/FeatureSource index cb4c5a6846..b6703b3359 100644 --- a/src/osgEarth/FeatureSource +++ b/src/osgEarth/FeatureSource @@ -273,7 +273,7 @@ namespace osgEarth typedef LRUCache FeaturesLRU; std::unique_ptr< FeaturesLRU > _featuresCache; - Threading::Mutex _featuresCacheMutex; + std::mutex _featuresCacheMutex; //! Implements the feature cursor creation virtual FeatureCursor* createFeatureCursorImplementation( diff --git a/src/osgEarth/FeatureSource.cpp b/src/osgEarth/FeatureSource.cpp index 941e7aebcd..82362fc52d 100644 --- a/src/osgEarth/FeatureSource.cpp +++ b/src/osgEarth/FeatureSource.cpp @@ -95,9 +95,7 @@ void FeatureSource::init() { Layer::init(); - _blacklistMutex.setName("FeatureSource(OE).blacklist " + getName()); _blacklistSize = 0u; - _featuresCacheMutex.setName("FeatureSource(OE).featuresCache " + getName()); } Status @@ -270,7 +268,7 @@ FeatureSource::createFeatureCursor( { FeaturesLRU::Record result; { - ScopedMutexLock lk(_featuresCacheMutex); + std::lock_guard lk(_featuresCacheMutex); _featuresCache->get(*query.tileKey(), result); } if (result.valid()) @@ -304,7 +302,7 @@ FeatureSource::createFeatureCursor( return osg::clone(feature.get(), osg::CopyOp::DEEP_COPY_ALL); }); { - ScopedMutexLock lk(_featuresCacheMutex); + std::lock_guard lk(_featuresCacheMutex); _featuresCache->insert(*query.tileKey(), copy); } #else diff --git a/src/osgEarth/FeatureSourceIndexNode b/src/osgEarth/FeatureSourceIndexNode index 43cd4ec566..d3d5dade31 100644 --- a/src/osgEarth/FeatureSourceIndexNode +++ b/src/osgEarth/FeatureSourceIndexNode @@ -100,7 +100,7 @@ namespace osgEarth template void removeFIDs(InputIter first, InputIter last) { - Threading::ScopedMutexLock lock(_mutex); + std::lock_guard lock(_mutex); for(InputIter fid = first; fid != last; ++fid ) { FID_to_RefIDPair::iterator f = _fids.find( *fid ); @@ -132,7 +132,7 @@ namespace osgEarth FeatureSourceIndexOptions _options; bool _embed; - mutable Threading::Mutex _mutex; + mutable std::mutex _mutex; OID_to_FID _oids; FID_to_RefIDPair _fids; diff --git a/src/osgEarth/FeatureSourceIndexNode.cpp b/src/osgEarth/FeatureSourceIndexNode.cpp index a7edab61d4..313402976c 100644 --- a/src/osgEarth/FeatureSourceIndexNode.cpp +++ b/src/osgEarth/FeatureSourceIndexNode.cpp @@ -345,13 +345,10 @@ namespace osgEarth { namespace Serializers { namespace FeatureSourceIndexNodeCla #undef LC #define LC "[FeatureSourceIndex] " -FeatureSourceIndex::FeatureSourceIndex(FeatureSource* featureSource, - ObjectIndex* index, - const FeatureSourceIndexOptions& options) : -_featureSource ( featureSource ), -_masterIndex ( index ), -_options ( options ), -_mutex( "FeatureSourceIndex(OE)" ) +FeatureSourceIndex::FeatureSourceIndex(FeatureSource* featureSource, ObjectIndex* index, const FeatureSourceIndexOptions& options) : + _featureSource(featureSource), + _masterIndex(index), + _options(options) { _embed = _options.embedFeatures() == true || @@ -377,7 +374,7 @@ FeatureSourceIndex::tagDrawable(osg::Drawable* drawable, Feature* feature) { if ( !feature ) return 0L; - Threading::ScopedMutexLock lock(_mutex); + std::lock_guard lock(_mutex); RefIDPair* p = 0L; FeatureID fid = feature->getFID(); @@ -410,7 +407,7 @@ FeatureSourceIndex::tagAllDrawables(osg::Node* node, Feature* feature) { if ( !feature ) return 0L; - Threading::ScopedMutexLock lock(_mutex); + std::lock_guard lock(_mutex); RefIDPair* p = 0L; FeatureID fid = feature->getFID(); @@ -443,7 +440,7 @@ FeatureSourceIndex::tagRange(osg::Drawable* drawable, Feature* feature, unsigned { if (!feature) return 0L; - Threading::ScopedMutexLock lock(_mutex); + std::lock_guard lock(_mutex); RefIDPair* p = 0L; FeatureID fid = feature->getFID(); @@ -476,7 +473,7 @@ FeatureSourceIndex::tagNode(osg::Node* node, Feature* feature) { if ( !feature ) return 0L; - Threading::ScopedMutexLock lock(_mutex); + std::lock_guard lock(_mutex); RefIDPair* p = 0L; FeatureID fid = feature->getFID(); @@ -511,7 +508,7 @@ Feature* FeatureSourceIndex::getFeature(ObjectID oid) const { Feature* feature = 0L; - Threading::ScopedMutexLock lock(_mutex); + std::lock_guard lock(_mutex); OID_to_FID::const_iterator i = _oids.find( oid ); if ( i != _oids.end() ) { @@ -533,7 +530,7 @@ FeatureSourceIndex::getFeature(ObjectID oid) const ObjectID FeatureSourceIndex::getObjectID(FeatureID fid) const { - Threading::ScopedMutexLock lock(_mutex); + std::lock_guard lock(_mutex); FID_to_RefIDPair::const_iterator i = _fids.find(fid); if ( i != _fids.end() ) return i->second->_oid; diff --git a/src/osgEarth/FileUtils.cpp b/src/osgEarth/FileUtils.cpp index 13300eeae8..206d841c0e 100644 --- a/src/osgEarth/FileUtils.cpp +++ b/src/osgEarth/FileUtils.cpp @@ -21,6 +21,7 @@ #include #include #include +#include "Notify" #include #include #include @@ -437,11 +438,10 @@ osgEarth::Util::getTempName(const std::string& prefix, const std::string& suffix while (true) { std::stringstream ss; - ss << prefix << "~" << osgEarth::Threading::getCurrentThreadId() << "_" << rand() << suffix; + ss << prefix << "~" << std::this_thread::get_id() << "_" << rand() << suffix; if (!osgDB::fileExists(ss.str())) return ss.str(); } -// return ""; } bool diff --git a/src/osgEarth/Filter.cpp b/src/osgEarth/Filter.cpp index 36ae8318c2..924f387ff0 100644 --- a/src/osgEarth/Filter.cpp +++ b/src/osgEarth/Filter.cpp @@ -93,11 +93,11 @@ FeatureFilterRegistry::instance() // OK to be in the local scope since this gets called at static init time // by the OSGEARTH_REGISTER_ANNOTATION macro static FeatureFilterRegistry* s_singleton =0L; - static Threading::Mutex s_singletonMutex(OE_MUTEX_NAME); + static std::mutex s_singletonMutex; if ( !s_singleton ) { - Threading::ScopedMutexLock lock(s_singletonMutex); + std::lock_guard lock(s_singletonMutex); if ( !s_singleton ) { s_singleton = new FeatureFilterRegistry(); diff --git a/src/osgEarth/FlatteningLayer b/src/osgEarth/FlatteningLayer index 420f772b8a..9785d01d10 100644 --- a/src/osgEarth/FlatteningLayer +++ b/src/osgEarth/FlatteningLayer @@ -113,7 +113,7 @@ namespace osgEarth { namespace Contrib typedef LRUCache FeaturesLRU; FeaturesLRU _featuresCache; - Threading::Mutex _featuresCacheMutex; + std::mutex _featuresCacheMutex; osg::ref_ptr _filterChain; }; diff --git a/src/osgEarth/FlatteningLayer.cpp b/src/osgEarth/FlatteningLayer.cpp index b6068373e0..d2ac0d56bd 100644 --- a/src/osgEarth/FlatteningLayer.cpp +++ b/src/osgEarth/FlatteningLayer.cpp @@ -920,7 +920,7 @@ FlatteningLayer::removedFromMap(const Map* map) FeatureList FlatteningLayer::getFeatures(const TileKey& key) { - OpenThreads::ScopedLock< Threading::Mutex > lk(_featuresCacheMutex); + std::lock_guard lk(_featuresCacheMutex); FeaturesLRU::Record result; _featuresCache.get(key, result); diff --git a/src/osgEarth/GDAL b/src/osgEarth/GDAL index 2f0398bac2..abd28f14d4 100644 --- a/src/osgEarth/GDAL +++ b/src/osgEarth/GDAL @@ -24,6 +24,7 @@ #include #include #include +#include /** * GDAL (Geospatial Data Abstraction Library) Layers @@ -162,7 +163,6 @@ namespace osgEarth { const GDAL::Options& gdalOptions() const { return _gdalOptions; } osg::ref_ptr _externalDataset; std::string _name; - unsigned _threadId; const std::string& getName() const { return _name; } }; @@ -182,9 +182,10 @@ namespace osgEarth { struct LayerBase { protected: - mutable Threading::Mutex _driversMutex; - mutable Threading::Mutex _singleThreadingMutex; - mutable std::unordered_map _drivers; + mutable Util::PerThread _driverPerThread; + mutable std::mutex _singleThreadingMutex; + mutable GDAL::Driver::Ptr _driverSingleThreaded = nullptr; + mutable Util::ReadWriteMutex _createCloseMutex; }; } } diff --git a/src/osgEarth/GDAL.cpp b/src/osgEarth/GDAL.cpp index 875b00adeb..50129b335f 100644 --- a/src/osgEarth/GDAL.cpp +++ b/src/osgEarth/GDAL.cpp @@ -36,6 +36,7 @@ #include #include +#include #include #include @@ -483,7 +484,7 @@ GDAL::Driver::Driver() : _maxDataLevel(30), _linearUnits(1.0) { - _threadId = osgEarth::Threading::getCurrentThreadId(); + //nop } GDAL::Driver::~Driver() @@ -493,7 +494,7 @@ GDAL::Driver::~Driver() else if (_srcDS) GDALClose(_srcDS); - OE_DEBUG << "Closed GDAL Driver on thread " << _threadId << std::endl; + OE_DEBUG << "Closed GDAL Driver on thread " << std::this_thread::get_id() << std::endl; } void @@ -1798,10 +1799,6 @@ GDALImageLayer::init() // Initialize the image layer ImageLayer::init(); - - _driversMutex.setName("OE.GDALImageLayer.drivers"); - _singleThreadingMutex.setName("OE.GDALImageLayer.st"); - } Status @@ -1811,8 +1808,6 @@ GDALImageLayer::openImplementation() if (parent.isError()) return parent; - unsigned id = getSingleThreaded() ? 0u : Threading::getCurrentThreadId(); - osg::ref_ptr profile; if (options().fallbackProfile().isSet()) @@ -1828,11 +1823,12 @@ GDALImageLayer::openImplementation() // So we just encapsulate the entire setup once per thread. // https://trac.osgeo.org/gdal/wiki/FAQMiscellaneous#IstheGDALlibrarythread-safe - ScopedMutexLock lock(_driversMutex); - - GDAL::Driver::Ptr& driver = _drivers[id]; + // Note: no need to mutex the _driverSingleThreaded instance since we are in open + // and open is single-threaded by definition. + GDAL::Driver::Ptr& driver = getSingleThreaded() ? _driverSingleThreaded : _driverPerThread.get(); DataExtentList dataExtents; + Status s = openOnThisThread( this, driver, @@ -1857,8 +1853,10 @@ Status GDALImageLayer::closeImplementation() { // safely shut down all per-thread handles. - Threading::ScopedMutexLock lock(_driversMutex); - _drivers.clear(); + Util::ScopedWriteLock unique_lock(_createCloseMutex); + _driverPerThread.clear(); + _driverSingleThreaded = nullptr; + return ImageLayer::closeImplementation(); } @@ -1868,19 +1866,17 @@ GDALImageLayer::createImageImplementation(const TileKey& key, ProgressCallback* if (getStatus().isError()) return GeoImage::INVALID; - unsigned id = getSingleThreaded() ? 0u : Threading::getCurrentThreadId(); + Util::ScopedReadLock shared_lock(_createCloseMutex); GDAL::Driver::Ptr driver; // lock while we look up and verify the per-thread driver: { - ScopedMutexLock lock(_driversMutex); - // check while locked to ensure we may continue if (isClosing() || !isOpen()) return GeoImage::INVALID; - GDAL::Driver::Ptr& test_driver = _drivers[id]; + GDAL::Driver::Ptr& test_driver = getSingleThreaded() ? _driverSingleThreaded : _driverPerThread.get(); if (test_driver == nullptr) { @@ -1898,8 +1894,8 @@ GDALImageLayer::createImageImplementation(const TileKey& key, ProgressCallback* { OE_PROFILING_ZONE; - if (getSingleThreaded()) - _singleThreadingMutex.lock(); + // serialize acccess if we're in single-threaded mode + Util::scoped_lock_if lock(_singleThreadingMutex, getSingleThreaded()); osg::ref_ptr image = driver->createImage( key, @@ -1907,9 +1903,6 @@ GDALImageLayer::createImageImplementation(const TileKey& key, ProgressCallback* options().coverage() == true, progress); - if (getSingleThreaded()) - _singleThreadingMutex.unlock(); - return GeoImage(image.get(), key.getExtent()); } @@ -1956,8 +1949,6 @@ void GDALElevationLayer::init() { ElevationLayer::init(); - _driversMutex.setName("OE.GDALElevationLayer.drivers"); - _singleThreadingMutex.setName("OE.GDALElevationLayer.st"); } Status @@ -1967,20 +1958,17 @@ GDALElevationLayer::openImplementation() if (parent.isError()) return parent; - unsigned id = getSingleThreaded() ? 0u : Threading::getCurrentThreadId(); - osg::ref_ptr profile; // GDAL thread-safety requirement: each thread requires a separate GDALDataSet. // So we just encapsulate the entire setup once per thread. // https://trac.osgeo.org/gdal/wiki/FAQMiscellaneous#IstheGDALlibrarythread-safe - ScopedMutexLock lock(_driversMutex); - // Open the dataset temporarily to query the profile and extents. - GDAL::Driver::Ptr driver; + GDAL::Driver::Ptr& driver = getSingleThreaded() ? _driverSingleThreaded : _driverPerThread.get(); DataExtentList dataExtents; + Status s = openOnThisThread( this, driver, @@ -2002,9 +1990,14 @@ GDALElevationLayer::openImplementation() Status GDALElevationLayer::closeImplementation() { - // safely shut down all per-thread handles. - Threading::ScopedMutexLock lock(_driversMutex); - _drivers.clear(); + // safely shut down all per-thread handles. The mutex prevents closing + // while the layer is working on a create call. + { + Util::ScopedWriteLock unique_lock(_createCloseMutex); + _driverPerThread.clear(); + _driverSingleThreaded = nullptr; + } + return ElevationLayer::closeImplementation(); } @@ -2014,19 +2007,17 @@ GDALElevationLayer::createHeightFieldImplementation(const TileKey& key, Progress if (getStatus().isError()) return GeoHeightField(getStatus()); - unsigned id = getSingleThreaded() ? 0u : Threading::getCurrentThreadId(); + Util::ScopedReadLock shared_lock(_createCloseMutex); GDAL::Driver::Ptr driver; // lock while we look up and verify the per-thread driver: { - ScopedMutexLock lock(_driversMutex); - // check while locked to ensure we may continue if (isClosing() || !isOpen()) return GeoHeightField::INVALID; - GDAL::Driver::Ptr& test_driver = _drivers[id]; + GDAL::Driver::Ptr& test_driver = getSingleThreaded() ? _driverSingleThreaded : _driverPerThread.get(); if (test_driver == nullptr) { @@ -2044,8 +2035,7 @@ GDALElevationLayer::createHeightFieldImplementation(const TileKey& key, Progress { OE_PROFILING_ZONE; - if (getSingleThreaded()) - _singleThreadingMutex.lock(); + Util::scoped_lock_if lock(_singleThreadingMutex, getSingleThreaded()); osg::ref_ptr heightfield; @@ -2064,9 +2054,6 @@ GDALElevationLayer::createHeightFieldImplementation(const TileKey& key, Progress progress); } - if (getSingleThreaded()) - _singleThreadingMutex.unlock(); - return GeoHeightField(heightfield.get(), key.getExtent()); } diff --git a/src/osgEarth/GLUtils b/src/osgEarth/GLUtils index e65b9b87d5..241e369843 100644 --- a/src/osgEarth/GLUtils +++ b/src/osgEarth/GLUtils @@ -34,6 +34,7 @@ #include #include #include +#include #include #ifndef GLintptr @@ -602,7 +603,7 @@ namespace osgEarth template typename T::Ptr recycle(const GLObject::Compatible& compatible) { - ScopedMutexLock lock(_mutex); + std::lock_guard lock(_mutex); typename T::Ptr result; for (auto& object : _objects) { if (object.use_count() == 1 && compatible(object.get())) { @@ -807,8 +808,8 @@ namespace osgEarth template using Delegate = std::function& promise, - int invocation)>; + jobs::promise& promise, + int invocation)>; public: //! Gets the GL pipeline for a State @@ -819,7 +820,7 @@ namespace osgEarth template struct DelegateOperation : public osg::Operation { Delegate _delegate; - Promise _promise; + jobs::promise _promise; int _invocation; DelegateOperation(Delegate d) : @@ -840,10 +841,10 @@ namespace osgEarth template struct DelegateOperation2 : public osg::Operation { Delegate _delegate; - Promise& _promise; + jobs::promise& _promise; int _invocation; - DelegateOperation2(Delegate d, Promise& promise) : + DelegateOperation2(Delegate d, jobs::promise& promise) : osg::Operation("GLPipeline", true), _delegate(d), _promise(promise), @@ -860,10 +861,10 @@ namespace osgEarth public: // Launch an operation on this GL Pipeline. template - Future dispatch(Delegate delegate) + jobs::future dispatch(Delegate delegate) { auto operation = new DelegateOperation(delegate); - Future future = operation->_promise; // .getFuture(); + jobs::future future = operation->_promise; if (_dispatcher.valid()) _dispatcher->push(operation); else @@ -874,7 +875,7 @@ namespace osgEarth // Launch an operation on this GL Pipeline, supplying your own Promise. // Be sure to call getFuture() prior to calling this function. template - void dispatch(Delegate delegate, Promise& promise) + void dispatch(Delegate delegate, jobs::promise& promise) { auto operation = new DelegateOperation2(delegate, promise); if (_dispatcher.valid()) @@ -939,28 +940,6 @@ namespace osgEarth void readback(osg::State* state); }; - template - class PerThreadComputeSession - { - public: - PerThreadComputeSession() : - _sessions("PerThreadComputeSession(OE)") { } - - T& get(osg::Program* program) const { - ScopedMutexLock lock(_sessions); - SessionPtr& ptr = _sessions[getCurrentThreadId()]; - if (ptr == nullptr) { - ptr = std::make_shared(); - ptr->setProgram(program); - } - return *ptr.get(); - } - private: - using SessionPtr = std::shared_ptr; - using SessionPtrPerThread = Mutexed>; - mutable SessionPtrPerThread _sessions; - }; - /** * Utility to "pre-compile" a node by running it through the ICO * if one exists in the Options. If there is no ICO, this is a no-op @@ -975,18 +954,18 @@ namespace osgEarth void compileNow( const osg::ref_ptr& node, const osg::Object* host, - osgEarth::Threading::Cancelable* progress) const; + osgEarth::Cancelable* progress) const; Future> compileAsync( const osg::ref_ptr& node, const osg::Object* host, - osgEarth::Threading::Cancelable* progress) const; + osgEarth::Cancelable* progress) const; Future> compileAsync( const osg::ref_ptr& node, osgUtil::StateToCompile* state, const osg::Object* host, - osgEarth::Threading::Cancelable* progress) const; + osgEarth::Cancelable* progress) const; static int totalJobs() { return (int)_jobsActive; } diff --git a/src/osgEarth/GLUtils.cpp b/src/osgEarth/GLUtils.cpp index 7a249f19f8..b6e2dd4b2f 100644 --- a/src/osgEarth/GLUtils.cpp +++ b/src/osgEarth/GLUtils.cpp @@ -26,6 +26,7 @@ #include #include #include +#include #include #include @@ -538,7 +539,7 @@ std::unordered_map GLObjectPool::getAll() { std::unordered_map result; - ScopedMutexLock lock(_pools); + std::lock_guard lock(_pools.mutex()); for (auto& pool : _pools) result[pool->getContextID()] = pool; return result; @@ -552,7 +553,7 @@ GLObjectPool::GLObjectPool(unsigned cxid) : _avarice(10.f) { _gcs.resize(256); - ScopedMutexLock lock(_pools); + std::lock_guard lock(_pools.mutex()); _pools.emplace_back(this); char* value = ::getenv("OSGEARTH_GL_OBJECT_POOL_DELAY"); @@ -626,7 +627,7 @@ GLObjectPool::track(osg::GraphicsContext* gc) void GLObjectPool::watch(GLObject::Ptr object) { - ScopedMutexLock lock(_mutex); + std::lock_guard lock(_mutex); _objects.emplace_back(object); //if (object->shareable()) @@ -653,7 +654,7 @@ GLObjectPool::releaseGLObjects(osg::State* state) //void //GLObjectPool::releaseAll() //{ -// ScopedMutexLock lock(_mutex); +// std::lock_guard lock(_mutex); // for (auto& object : _objects) // object->release(); // _objects.clear(); @@ -686,7 +687,7 @@ GLObjectPool::flushAllDeletedGLObjects() void GLObjectPool::deleteAllGLObjects() { - //ScopedMutexLock lock(_mutex); + //std::lock_guard lock(_mutex); //for (auto& object : _objects) // object->release(); //_objects.clear(); @@ -696,7 +697,7 @@ GLObjectPool::deleteAllGLObjects() void GLObjectPool::discardAllGLObjects() { - //ScopedMutexLock lock(_mutex); + //std::lock_guard lock(_mutex); //_objects.clear(); //_totalBytes = 0; } @@ -704,7 +705,7 @@ GLObjectPool::discardAllGLObjects() void GLObjectPool::releaseAll(const osg::GraphicsContext* gc) { - ScopedMutexLock lock(_mutex); + std::lock_guard lock(_mutex); GLsizeiptr bytes = 0; GLObjectPool::Collection keepers; @@ -729,7 +730,7 @@ GLObjectPool::releaseAll(const osg::GraphicsContext* gc) void GLObjectPool::releaseOrphans(const osg::GraphicsContext* gc) { - ScopedMutexLock lock(_mutex); + std::lock_guard lock(_mutex); unsigned maxNumToRelease = std::max(1u, (unsigned)pow(4.0f, _avarice)); unsigned numReleased = 0u; @@ -767,7 +768,7 @@ GLObjectPool::releaseOrphans(const osg::GraphicsContext* gc) unsigned GLObjectPool::flush(GLObjectPool::Collection& objects) { - ScopedMutexLock lock(_mutex); + std::lock_guard lock(_mutex); GLsizeiptr bytes_released = 0; std::unordered_set keep; @@ -1672,14 +1673,14 @@ GLPipeline::Dispatcher::operator()(osg::GraphicsContext* gc) } //static defs -Mutex GLPipeline::_mutex("GLPipeline(OE)"); +Mutex GLPipeline::_mutex; std::unordered_map GLPipeline::_lut; GLPipeline::Ptr GLPipeline::get(osg::State& state) { - ScopedMutexLock lock(GLPipeline::_mutex); + std::lock_guard lock(GLPipeline::_mutex); GLPipeline::Ptr& p = _lut[&state]; if (p == nullptr) @@ -1728,8 +1729,7 @@ ComputeImageSession::setImage(osg::Image* image) void ComputeImageSession::execute(osg::State& state) { - auto job = GLPipeline::get(state)->dispatch( - [this](osg::State& state, Promise& promise, int invocation) + auto task = [this](osg::State& state, jobs::promise& promise, int invocation) { if (invocation == 0) { @@ -1743,9 +1743,9 @@ ComputeImageSession::execute(osg::State& state) readback(&state); return false; // all done. } - } - ); + }; + auto job = GLPipeline::get(state)->dispatch(task); job.join(); } @@ -1803,7 +1803,7 @@ namespace struct ICOCallback : public ICO::CompileCompletedCallback { - Promise> _promise; + jobs::promise> _promise; osg::ref_ptr _node; std::atomic_int& _jobsActive; diff --git a/src/osgEarth/Geocoder b/src/osgEarth/Geocoder index c68172173b..828b6430dc 100644 --- a/src/osgEarth/Geocoder +++ b/src/osgEarth/Geocoder @@ -50,9 +50,12 @@ namespace osgEarth }; //! Result object returned from a geocoding attempt - class OSGEARTH_EXPORT Results : public FutureResult + class OSGEARTH_EXPORT Results { public: + bool isReady() const { return _result.available(); } + bool isWorking() const { return _result.working(); } + //! Status of geocode operation -- check this first Status getStatus(); @@ -61,8 +64,11 @@ namespace osgEarth //! Internal constructors Results(const Status& status, FeatureCursor* cursor); - Results(Future future); + Results(jobs::future future); Results(const OutputData& results); + + private: + jobs::future _result; }; public: diff --git a/src/osgEarth/Geocoder.cpp b/src/osgEarth/Geocoder.cpp index b020941bda..97ce89f33d 100644 --- a/src/osgEarth/Geocoder.cpp +++ b/src/osgEarth/Geocoder.cpp @@ -130,10 +130,10 @@ namespace struct GeocodeAsyncOperation : public osg::Operation { std::string _input; - Promise _promise; + jobs::promise _promise; osg::ref_ptr _impl; - GeocodeAsyncOperation(const std::string& input, Promise promise, Geocoder::Implementation* impl) : + GeocodeAsyncOperation(const std::string& input, jobs::promise promise, Geocoder::Implementation* impl) : _input(input), _promise(promise), _impl(impl) @@ -171,29 +171,20 @@ Geocoder::search(const std::string& input, const osgDB::Options* io_options) { if (_impl) { - auto geocode = [=](Cancelable* progress) + auto geocode = [=](Cancelable& c) { osg::ref_ptr cursor; Status status = _impl->search(input, cursor); return OutputData(status, cursor.release()); }; - std::shared_ptr arena; - if (ObjectStorage::get(io_options, arena)) - { - Future out = Job(arena.get()).dispatch(geocode); - return Geocoder::Results(out); - } - else - { - return Geocoder::Results(geocode(nullptr)); - } + return Geocoder::Results(jobs::dispatch(geocode)); } else { return Geocoder::Results( Status(Status::ServiceUnavailable, "No geocoder implementation installed"), - NULL); + nullptr); } } @@ -206,36 +197,36 @@ Geocoder::setImplementation(Geocoder::Implementation* impl) } } -Geocoder::Results::Results(const Status& status, FeatureCursor* cursor) : - FutureResult(Geocoder::OutputData(status, cursor)) +Geocoder::Results::Results(const Status& status, FeatureCursor* cursor) { - //NOP - error status + // results immediately available + _result.resolve(Geocoder::OutputData(status, cursor)); } -Geocoder::Results::Results(Future data) : - FutureResult(data) +Geocoder::Results::Results(jobs::future data) : + _result(data) { - //NOP + // NOP - results eventually available } -Geocoder::Results::Results(const Geocoder::OutputData& data) : - FutureResult(data) +Geocoder::Results::Results(const Geocoder::OutputData& data) { - //NOP + // results immediately available + _result.resolve(data); } Status Geocoder::Results::getStatus() { - return _future.available() ? - _future.value()._status : + return _result.available() ? + _result.value()._status : Status(Status::ServiceUnavailable, "Operation canceled"); } FeatureCursor* Geocoder::Results::getFeatures() { - return _future.available() ? - _future.value()._cursor.get() : + return _result.available() ? + _result.value()._cursor.get() : nullptr; } diff --git a/src/osgEarth/GeodeticGraticule b/src/osgEarth/GeodeticGraticule index 862b3150ba..9650cc487c 100644 --- a/src/osgEarth/GeodeticGraticule +++ b/src/osgEarth/GeodeticGraticule @@ -158,7 +158,7 @@ namespace osgEarth { namespace Util }; typedef std::unordered_map CameraDataMap; mutable CameraDataMap _cameraDataMap; - mutable Threading::Mutex _cameraDataMapMutex; + mutable std::mutex _cameraDataMapMutex; CameraData& getCameraData(osg::Camera*) const; diff --git a/src/osgEarth/GeodeticGraticule.cpp b/src/osgEarth/GeodeticGraticule.cpp index d89ac48405..830bc1cac8 100644 --- a/src/osgEarth/GeodeticGraticule.cpp +++ b/src/osgEarth/GeodeticGraticule.cpp @@ -402,7 +402,7 @@ GeodeticGraticule::rebuild() _root->addChild(_labelingEngine); // destroy all per-camera data so it can reinitialize itself - Threading::ScopedMutexLock lock(_cameraDataMapMutex); + std::lock_guard lock(_cameraDataMapMutex); _cameraDataMap.clear(); } @@ -631,7 +631,7 @@ GeodeticGraticule::updateLabels() const osgEarth::SpatialReference* srs = osgEarth::SpatialReference::create("wgs84"); - Threading::ScopedMutexLock lock(_cameraDataMapMutex); + std::lock_guard lock(_cameraDataMapMutex); for (CameraDataMap::iterator itr = _cameraDataMap.begin(); itr != _cameraDataMap.end(); ++itr) { CameraData& cdata = itr->second; @@ -727,7 +727,7 @@ GeodeticGraticule::updateLabels() GeodeticGraticule::CameraData& GeodeticGraticule::getCameraData(osg::Camera* cam) const { - Threading::ScopedMutexLock lock(_cameraDataMapMutex); + std::lock_guard lock(_cameraDataMapMutex); CameraData& cdata = _cameraDataMap[cam]; // New camera data? Initialize: @@ -807,7 +807,7 @@ GeodeticGraticule::releaseGLObjects(osg::State* state) const { VisibleLayer::releaseGLObjects(state); - Threading::ScopedMutexLock lock(_cameraDataMapMutex); + std::lock_guard lock(_cameraDataMapMutex); for (CameraDataMap::iterator i = _cameraDataMap.begin(); i != _cameraDataMap.end(); ++i) { CameraData& data = i->second; diff --git a/src/osgEarth/Geoid.cpp b/src/osgEarth/Geoid.cpp index 78dbb04829..2682787de9 100644 --- a/src/osgEarth/Geoid.cpp +++ b/src/osgEarth/Geoid.cpp @@ -16,9 +16,9 @@ * You should have received a copy of the GNU Lesser General Public License * along with this program. If not, see */ - -#include -#include +#include "Geoid" +#include "HeightFieldUtils" +#include "Notify" #define LC "[Geoid] " diff --git a/src/osgEarth/GeometryCloud.cpp b/src/osgEarth/GeometryCloud.cpp index 3813a25625..9653682950 100644 --- a/src/osgEarth/GeometryCloud.cpp +++ b/src/osgEarth/GeometryCloud.cpp @@ -21,6 +21,7 @@ */ #include "GeometryCloud" #include "Metrics" +#include "Notify" #include #include diff --git a/src/osgEarth/GeometryCompiler.cpp b/src/osgEarth/GeometryCompiler.cpp index f7400ed5e8..99191ff20d 100644 --- a/src/osgEarth/GeometryCompiler.cpp +++ b/src/osgEarth/GeometryCompiler.cpp @@ -568,7 +568,7 @@ GeometryCompiler::compile(FeatureList& workingSet, #ifdef PROFILING static double totalTime = 0.0; - static Threading::Mutex totalTimeMutex; + static std::mutex totalTimeMutex; osg::Timer_t p_end = osg::Timer::instance()->tick(); double t = osg::Timer::instance()->delta_s(p_start, p_end); totalTimeMutex.lock(); diff --git a/src/osgEarth/HTTPClient.cpp b/src/osgEarth/HTTPClient.cpp index e3c19318e4..5f1ffe780d 100644 --- a/src/osgEarth/HTTPClient.cpp +++ b/src/osgEarth/HTTPClient.cpp @@ -25,6 +25,7 @@ #include #include #include +#include "Notify" #include #include #include @@ -432,7 +433,7 @@ HTTPResponse::setHeadersFromConfig(const Config& conf) namespace { // per-thread client map (must be global scope) - static PerThread s_clientPerThread("HTTPClient(OE)"); + static PerThread s_clientPerThread; static optional s_proxySettings; @@ -444,7 +445,7 @@ namespace // HTTP debugging. static bool s_HTTP_DEBUG = false; - static Threading::Mutex s_HTTP_DEBUG_mutex; + static std::mutex s_HTTP_DEBUG_mutex; static int s_HTTP_DEBUG_request_count; static double s_HTTP_DEBUG_total_duration; @@ -810,7 +811,7 @@ namespace } { - Threading::ScopedMutexLock lock(s_HTTP_DEBUG_mutex); + std::lock_guard lock(s_HTTP_DEBUG_mutex); s_HTTP_DEBUG_request_count++; s_HTTP_DEBUG_total_duration += response.getDuration(); diff --git a/src/osgEarth/HorizonClipPlane.cpp b/src/osgEarth/HorizonClipPlane.cpp index 4a5feec517..55ef14fe7e 100644 --- a/src/osgEarth/HorizonClipPlane.cpp +++ b/src/osgEarth/HorizonClipPlane.cpp @@ -58,8 +58,7 @@ HorizonClipPlane::HorizonClipPlane() : HorizonClipPlane::HorizonClipPlane(const Ellipsoid& em) : _ellipsoid(em), - _num(0u), - _data(OE_MUTEX_NAME) + _num(0u) { #ifdef OSGEARTH_SINGLE_THREADED_OSG _data.threadsafe = false; diff --git a/src/osgEarth/IconSymbol.cpp b/src/osgEarth/IconSymbol.cpp index 32d6f94478..361ef4b2c4 100644 --- a/src/osgEarth/IconSymbol.cpp +++ b/src/osgEarth/IconSymbol.cpp @@ -98,7 +98,7 @@ IconSymbol::mergeConfig( const Config& conf ) namespace { - static Threading::Mutex s_getImageMutex(OE_MUTEX_NAME); + static std::mutex s_getImageMutex; } osg::Image* @@ -106,7 +106,7 @@ IconSymbol::getImage( unsigned maxSize ) const { if ( !_image.valid() && _url.isSet() ) { - Threading::ScopedMutexLock lock(s_getImageMutex); + std::lock_guard lock(s_getImageMutex); if ( !_image.valid() ) { osg::ref_ptr dbOptions = Registry::instance()->cloneOrCreateOptions(); diff --git a/src/osgEarth/ImGui/LayersGUI b/src/osgEarth/ImGui/LayersGUI index 1065cea4da..277da092de 100644 --- a/src/osgEarth/ImGui/LayersGUI +++ b/src/osgEarth/ImGui/LayersGUI @@ -1038,45 +1038,59 @@ namespace osgEarth void drawUsefulLayers() { - if (ImGui::BeginMenu("Useful Layers")) + ImGui::Separator(); + + //if (ImGui::BeginMenu("Useful Layers")) { // ReadyMap Imagery - if (ImGui::MenuItem("ReadyMap Imagery")) + if (_mapNode->getMap()->getLayerByName("ReadyMap Imagery") == nullptr) { - TMSImageLayer* readymap = new TMSImageLayer(); - readymap->setName("ReadyMap Imagery"); - readymap->setURL("http://readymap.org/readymap/tiles/1.0.0/7/"); - _mapNode->getMap()->addLayer(readymap); + if (ImGui::MenuItem("ReadyMap Imagery")) + { + TMSImageLayer* readymap = new TMSImageLayer(); + readymap->setName("ReadyMap Imagery"); + readymap->setURL("https://readymap.org/readymap/tiles/1.0.0/7/"); + _mapNode->getMap()->addLayer(readymap); + } } // ReadyMap Elevation - if (ImGui::MenuItem("ReadyMap Elevation")) + if (_mapNode->getMap()->getLayerByName("ReadyMap Elevation") == nullptr) { - TMSElevationLayer* readymap = new TMSElevationLayer(); - readymap->setName("ReadyMap Elevation"); - readymap->setURL("http://readymap.org/readymap/tiles/1.0.0/116/"); - _mapNode->getMap()->addLayer(readymap); + if (ImGui::MenuItem("ReadyMap Elevation")) + { + TMSElevationLayer* readymap = new TMSElevationLayer(); + readymap->setName("ReadyMap Elevation"); + readymap->setURL("https://readymap.org/readymap/tiles/1.0.0/116/"); + _mapNode->getMap()->addLayer(readymap); + } } // OpenStreetMap - if (ImGui::MenuItem("OpenStreetMap")) + if (_mapNode->getMap()->getLayerByName("OpenStreetMap") == nullptr) { - XYZImageLayer* osm = new XYZImageLayer(); - osm->setName("OpenStreetMap"); - osm->setURL("http://[abc].tile.openstreetmap.org/{z}/{x}/{y}.png"); - osm->setProfile(osgEarth::Registry::instance()->getSphericalMercatorProfile()); - osm->setAttribution("©OpenStreetMap contributors"); - _mapNode->getMap()->addLayer(osm); + if (ImGui::MenuItem("OpenStreetMap")) + { + XYZImageLayer* osm = new XYZImageLayer(); + osm->setName("OpenStreetMap"); + osm->setURL("https://[abc].tile.openstreetmap.org/{z}/{x}/{y}.png"); + osm->setProfile(osgEarth::Registry::instance()->getSphericalMercatorProfile()); + osm->setAttribution("©OpenStreetMap contributors"); + _mapNode->getMap()->addLayer(osm); + } } - if (ImGui::MenuItem("Debug")) + if (_mapNode->getMap()->getLayer() == nullptr) { - DebugImageLayer* debugImage = new DebugImageLayer; - debugImage->setName("Debug"); - _mapNode->getMap()->addLayer(debugImage); + if (ImGui::MenuItem("Debug")) + { + DebugImageLayer* debugImage = new DebugImageLayer; + debugImage->setName("Debug"); + _mapNode->getMap()->addLayer(debugImage); + } } - ImGui::EndMenu(); + //ImGui::EndMenu(); } } @@ -1095,10 +1109,10 @@ namespace osgEarth if (key.valid()) { - _imageLayerValueUnderMouse = Job().dispatch([this, key, p](Cancelable* c) + auto task = [this, key, p](Cancelable& c) { ValueUnderMouse value; - osg::ref_ptr prog = new ProgressCallback(c); + osg::ref_ptr prog = new ProgressCallback(&c); GeoImage g = _mouseOverImageLayer->createImage(key, prog.get()); if (g.valid()) { @@ -1111,7 +1125,9 @@ namespace osgEarth } } return Result(Status::Error("No value")); - }); + }; + + _imageLayerValueUnderMouse = jobs::dispatch(task); } } } diff --git a/src/osgEarth/ImGui/NotifyGUI b/src/osgEarth/ImGui/NotifyGUI index 8fde11a338..ee533547c5 100644 --- a/src/osgEarth/ImGui/NotifyGUI +++ b/src/osgEarth/ImGui/NotifyGUI @@ -36,7 +36,7 @@ namespace osgEarth { namespace GUI public: void notify(osg::NotifySeverity severity, const char *message) { - ScopedMutexLock lk(_mutex); + std::lock_guard lk(_mutex); int old_size = Buf.size(); Buf.append(message); for (int new_size = Buf.size(); old_size < new_size; old_size++) diff --git a/src/osgEarth/ImGui/SearchGUI b/src/osgEarth/ImGui/SearchGUI index 471d977cd3..066e3a2230 100644 --- a/src/osgEarth/ImGui/SearchGUI +++ b/src/osgEarth/ImGui/SearchGUI @@ -47,13 +47,6 @@ namespace osgEarth { if (!isVisible()) return; - if (!_arena) - { - _options = new osgDB::Options(); - _arena = std::make_shared("oe.geocoder", 1U); - ObjectStorage::set(_options.get(), _arena); - } - if (!_mapSRS.valid()) { MapNode* mapNode = findNode(ri); if (mapNode) @@ -120,7 +113,6 @@ namespace osgEarth { char _search[128]; Geocoder::Results _results; osg::ref_ptr _options; - std::shared_ptr _arena; FeatureList _features; Geocoder geocoder; osg::ref_ptr _mapSRS; diff --git a/src/osgEarth/ImGui/SystemGUI b/src/osgEarth/ImGui/SystemGUI index 7f7c76f7df..a3c4346be3 100644 --- a/src/osgEarth/ImGui/SystemGUI +++ b/src/osgEarth/ImGui/SystemGUI @@ -76,36 +76,38 @@ namespace osgEarth ImGui::Text(" Total: %.1lf MB", (double)pb / 1048576.0); ImGui::Separator(); - ImGui::TextColored(ImVec4(1, 1, 0, 1), "Job Arenas:"); + ImGui::TextColored(ImVec4(1, 1, 0, 1), "Job Pools:"); - if (ImGui::BeginTable("arenas", 2)) + if (ImGui::BeginTable("thread pools", 2)) { - const JobArena::Metrics& m = JobArena::allMetrics(); - for (int i = 0; i <= m.maxArenaIndex; ++i) + auto metrics = jobs::get_metrics(); + + auto all_pool_metrics = metrics->all(); + + for(auto pool_metrics : all_pool_metrics) { - auto metrics = m.arena(i); - if (metrics) + if (pool_metrics && pool_metrics->total > 0) { ImGui::TableNextColumn(); ImGui::Text("%s", - metrics->arenaName.c_str()); + (pool_metrics->name.empty() ? "default" : pool_metrics->name.c_str())); ImGui::TableNextColumn(); ImGui::Text("(%d) %d / %d // %d", - (int)metrics->concurrency, - (int)metrics->numJobsRunning, - (int)metrics->numJobsPending, - (int)metrics->numJobsCanceled); + (int)pool_metrics->concurrency, + (int)pool_metrics->running, + (int)pool_metrics->pending, + (int)pool_metrics->canceled); if (_showArenaControls) { ImGui::TableNextColumn(); ImGui::Text(" Concurrency:"); ImGui::TableNextColumn(); - ImGui::PushID(i); - int concurrency = metrics->concurrency; + ImGui::PushID((std::uintptr_t)pool_metrics); + int concurrency = pool_metrics->concurrency; if (ImGui::SliderInt("", &concurrency, 1, 16)) { - JobArena::get(metrics->arenaName)->setConcurrency(concurrency); + jobs::get_pool(pool_metrics->name)->set_concurrency(concurrency); } ImGui::PopID(); ImGui::Separator(); @@ -115,7 +117,7 @@ namespace osgEarth ImGui::EndTable(); } ImGui::Separator(); - ImGui::Text("Total: %d", JobArena::allMetrics().totalJobs()); + ImGui::Text("Total: %d", jobs::get_metrics()->totalJobs()); ImGui::SameLine(); int icojobs = 0; diff --git a/src/osgEarth/ImGui/TerrainGUI b/src/osgEarth/ImGui/TerrainGUI index 0da56fe2de..c3d69dd112 100644 --- a/src/osgEarth/ImGui/TerrainGUI +++ b/src/osgEarth/ImGui/TerrainGUI @@ -31,6 +31,7 @@ #include #include #include +#include namespace osgEarth { @@ -160,7 +161,7 @@ namespace osgEarth ImGui::Separator(); } - unsigned threads = options.getConcurrency(); + unsigned threads = std::max(1u, std::thread::hardware_concurrency()); if (ImGuiLTable::SliderInt("Load threads", (int*)&threads, 1, 16)) { options.setConcurrency(threads); diff --git a/src/osgEarth/ImageLayer.cpp b/src/osgEarth/ImageLayer.cpp index 346fbe98e2..63f6b7e64e 100644 --- a/src/osgEarth/ImageLayer.cpp +++ b/src/osgEarth/ImageLayer.cpp @@ -249,7 +249,6 @@ ImageLayer::init() TileLayer::init(); _useCreateTexture = false; - _sentry.setName("ImageLayer " + getName()); // image layers render as a terrain texture. setRenderType(RENDERTYPE_TERRAIN_SURFACE); @@ -893,7 +892,7 @@ ImageLayer::removeCallback(ImageLayer::Callback* c) void ImageLayer::addPostLayer(ImageLayer* layer) { - ScopedMutexLock lock(_postLayers); + std::lock_guard lock(_postLayers.mutex()); _postLayers.push_back(layer); } @@ -926,23 +925,25 @@ FutureTexture2D::dispatch() const osg::observer_ptr layer_ptr(_layer); TileKey key(_key); - Job job(JobArena::get(ARENA_ASYNC_LAYER)); - job.setName(Stringify() << key.str() << " " << _layer->getName()); - - // prioritize higher LOD tiles. - job.setPriority(key.getLOD()); - - _result = job.dispatch([layer_ptr, key](Cancelable* progress) mutable + auto task = [layer_ptr, key](Cancelable& progress) mutable { GeoImage result; osg::ref_ptr safe(layer_ptr); if (safe.valid()) { - osg::ref_ptr p = new ProgressCallback(progress); + osg::ref_ptr p = new ProgressCallback(&progress); result = safe->createImage(key, p.get()); } return result; - }); + }; + + jobs::context context{ + Stringify() << key.str() << " " << _layer->getName(), + jobs::get_pool(ARENA_ASYNC_LAYER), + [key]() { return key.getLOD(); } + }; + + jobs::dispatch(task, context); } void diff --git a/src/osgEarth/ImageOverlay b/src/osgEarth/ImageOverlay index 40fe3e4a3f..94a2b7884b 100644 --- a/src/osgEarth/ImageOverlay +++ b/src/osgEarth/ImageOverlay @@ -167,7 +167,7 @@ namespace osgEarth osg::Polytope _boundingPolytope; osg::ref_ptr< osg::Image > _image; bool _dirty; - Threading::Mutex _mutex; + std::mutex _mutex; osg::Group* _root; osg::Texture* _texture; diff --git a/src/osgEarth/ImageOverlay.cpp b/src/osgEarth/ImageOverlay.cpp index aa79f08501..c82b80f32c 100644 --- a/src/osgEarth/ImageOverlay.cpp +++ b/src/osgEarth/ImageOverlay.cpp @@ -239,8 +239,9 @@ ImageOverlay::construct() if (!_program.valid()) { - static Threading::Mutex mutex(OE_MUTEX_NAME); - mutex.lock(); + static std::mutex mutex; + std::lock_guard lock(mutex); + if (_program.valid() == false) { _program = new VirtualProgram; @@ -248,7 +249,6 @@ ImageOverlay::construct() _program->setFunction("oe_ImageOverlay_VS", imageVS, VirtualProgram::LOCATION_VERTEX_MODEL); _program->setFunction("oe_ImageOverlay_FS", imageFS, VirtualProgram::LOCATION_FRAGMENT_COLORING); } - mutex.unlock(); } _root = new osg::Group(); @@ -265,7 +265,7 @@ ImageOverlay::construct() void ImageOverlay::compile() { - Threading::ScopedMutexLock lock(_mutex); + std::lock_guard lock(_mutex); if (_root->getNumChildren() > 0) { @@ -795,7 +795,7 @@ ImageOverlay::traverse(osg::NodeVisitor &nv) void ImageOverlay::dirty() { { - Threading::ScopedMutexLock lock(_mutex); + std::lock_guard lock(_mutex); _dirty = true; } diff --git a/src/osgEarth/ImageUtils.cpp b/src/osgEarth/ImageUtils.cpp index 67cfc12bb4..72d4187483 100644 --- a/src/osgEarth/ImageUtils.cpp +++ b/src/osgEarth/ImageUtils.cpp @@ -748,7 +748,7 @@ ImageUtils::compressImageInPlace( OE_PROFILING_ZONE; // prevent 2 threads from compressing the same object at the same time - static Threading::Gate gate("ImageUtils::compressImageInPlace"); + static Threading::Gate gate; Threading::ScopedGate lock(gate, input); if (!input) @@ -1109,7 +1109,7 @@ ImageUtils::createSharpenedImage( const osg::Image* input ) namespace { - static Threading::Mutex s_emptyImageMutex(OE_MUTEX_NAME); + static std::mutex s_emptyImageMutex; static osg::ref_ptr s_emptyImage; } @@ -1118,7 +1118,7 @@ ImageUtils::createEmptyImage() { if (!s_emptyImage.valid()) { - Threading::ScopedMutexLock exclusive( s_emptyImageMutex ); + std::lock_guard exclusive( s_emptyImageMutex ); if (!s_emptyImage.valid()) { s_emptyImage = createEmptyImage( 1, 1 ); diff --git a/src/osgEarth/LabelNode.cpp b/src/osgEarth/LabelNode.cpp index 05ebba943e..a8f901cde8 100644 --- a/src/osgEarth/LabelNode.cpp +++ b/src/osgEarth/LabelNode.cpp @@ -91,8 +91,8 @@ LabelNode::construct() osg::ref_ptr geodeStateSet; if (s_geodeStateSet.lock(geodeStateSet) == false) { - static Threading::Mutex s_mutex(OE_MUTEX_NAME); - Threading::ScopedMutexLock lock(s_mutex); + static std::mutex s_mutex; + std::lock_guard lock(s_mutex); if (s_geodeStateSet.lock(geodeStateSet) == false) { diff --git a/src/osgEarth/Layer.cpp b/src/osgEarth/Layer.cpp index 158c3fb421..5847526ed6 100644 --- a/src/osgEarth/Layer.cpp +++ b/src/osgEarth/Layer.cpp @@ -351,7 +351,7 @@ Layer::init() osg::Object::setName("[" + std::string(className()) + "]"); } - _mutex = new Threading::ReadWriteMutex(options().name().isSet() ? options().name().get() : "Unnamed Layer(OE)"); + _mutex = new Threading::ReadWriteMutex(); } Status diff --git a/src/osgEarth/Lighting b/src/osgEarth/Lighting index f72c37b65d..11b1fff5f6 100644 --- a/src/osgEarth/Lighting +++ b/src/osgEarth/Lighting @@ -153,7 +153,7 @@ namespace osgEarth void releaseGLObjects(osg::State* state) const; mutable std::vector > _statesets; - mutable Threading::Mutex _statesetsMutex; + mutable std::mutex _statesetsMutex; }; } diff --git a/src/osgEarth/Lighting.cpp b/src/osgEarth/Lighting.cpp index 42172d425b..0b816cc942 100644 --- a/src/osgEarth/Lighting.cpp +++ b/src/osgEarth/Lighting.cpp @@ -139,7 +139,7 @@ GenerateGL3LightingUniforms::apply(osg::LightSource& lightSource) //............................................................................ LightSourceGL3UniformGenerator::LightSourceGL3UniformGenerator() : - _statesetsMutex("LightSourceGL3UniformGenerator(OE)") + _statesetsMutex() { //nop } @@ -178,7 +178,7 @@ LightSourceGL3UniformGenerator::run(osg::Object* obj, osg::Object* data) { cv->getCurrentRenderStage()->setStateSet(ss = new osg::StateSet()); - Threading::ScopedMutexLock lock(_statesetsMutex); + std::lock_guard lock(_statesetsMutex); _statesets.push_back(ss); } @@ -230,7 +230,7 @@ LightSourceGL3UniformGenerator::run(osg::Object* obj, osg::Object* data) void LightSourceGL3UniformGenerator::resizeGLBufferObjects(unsigned maxSize) { - Threading::ScopedMutexLock lock(_statesetsMutex); + std::lock_guard lock(_statesetsMutex); for(unsigned i=0; i<_statesets.size(); ++i) _statesets[i]->resizeGLObjectBuffers(maxSize); } @@ -238,7 +238,7 @@ LightSourceGL3UniformGenerator::resizeGLBufferObjects(unsigned maxSize) void LightSourceGL3UniformGenerator::releaseGLObjects(osg::State* state) const { - Threading::ScopedMutexLock lock(_statesetsMutex); + std::lock_guard lock(_statesetsMutex); for(unsigned i=0; i<_statesets.size(); ++i) _statesets[i]->releaseGLObjects(state); _statesets.clear(); diff --git a/src/osgEarth/LineDrawable b/src/osgEarth/LineDrawable index 00b75a4e91..ab6e6cf0b7 100644 --- a/src/osgEarth/LineDrawable +++ b/src/osgEarth/LineDrawable @@ -211,7 +211,7 @@ namespace osgEarth static osg::observer_ptr s_gpuStateSet; osg::ref_ptr _gpuStateSet; - mutable Threading::Mutex _mutex; + mutable std::mutex _mutex; osg::ref_ptr _geom; public: // osg::Drawable pass thrus diff --git a/src/osgEarth/LineDrawable.cpp b/src/osgEarth/LineDrawable.cpp index 002cfe31cb..934b86ddad 100644 --- a/src/osgEarth/LineDrawable.cpp +++ b/src/osgEarth/LineDrawable.cpp @@ -1294,8 +1294,8 @@ LineDrawable::setupShaders() if (s_gpuStateSet.lock(_gpuStateSet) == false) { // serialize access and double-check: - static Threading::Mutex s_mutex(OE_MUTEX_NAME); - Threading::ScopedMutexLock lock(s_mutex); + static std::mutex s_mutex; + std::lock_guard lock(s_mutex); if (s_gpuStateSet.lock(_gpuStateSet) == false) { @@ -1336,7 +1336,7 @@ LineDrawable::accept(osg::NodeVisitor& nv) if (!_current) { - ScopedMutexLock lock(_mutex); + std::lock_guard lock(_mutex); if (!_current) initialize(); } diff --git a/src/osgEarth/MBTiles b/src/osgEarth/MBTiles index a52bdffc4f..d7a1025032 100644 --- a/src/osgEarth/MBTiles +++ b/src/osgEarth/MBTiles @@ -92,7 +92,7 @@ namespace osgEarth { namespace MBTiles std::string _name; // because no one knows if/when sqlite3 is threadsafe. - mutable Threading::Mutex _mutex; + mutable std::mutex _mutex; bool createTables(); void computeLevels(); diff --git a/src/osgEarth/MBTiles.cpp b/src/osgEarth/MBTiles.cpp index 78d0c0b289..a399bfa278 100644 --- a/src/osgEarth/MBTiles.cpp +++ b/src/osgEarth/MBTiles.cpp @@ -332,8 +332,7 @@ MBTiles::Driver::Driver() : _minLevel(0), _maxLevel(19), _forceRGB(false), - _database(nullptr), - _mutex("MBTiles Driver(OE)") + _database(nullptr) { //nop } @@ -646,7 +645,7 @@ MBTiles::Driver::read( ProgressCallback* progress, const osgDB::Options* readOptions) const { - Threading::ScopedMutexLock exclusiveLock(_mutex); + std::lock_guard exclusiveLock(_mutex); int z = key.getLevelOfDetail(); int x = key.getTileX(); @@ -744,7 +743,7 @@ MBTiles::Driver::write( if (!key.valid() || !image) return Status::AssertionFailure; - Threading::ScopedMutexLock exclusiveLock(_mutex); + std::lock_guard exclusiveLock(_mutex); // encode the data stream: std::stringstream buf; @@ -844,7 +843,7 @@ MBTiles::Driver::write( bool MBTiles::Driver::getMetaData(const std::string& key, std::string& value) { - Threading::ScopedMutexLock exclusiveLock(_mutex); + std::lock_guard exclusiveLock(_mutex); sqlite3* database = (sqlite3*)_database; @@ -886,7 +885,7 @@ MBTiles::Driver::getMetaData(const std::string& key, std::string& value) bool MBTiles::Driver::putMetaData(const std::string& key, const std::string& value) { - Threading::ScopedMutexLock exclusiveLock(_mutex); + std::lock_guard exclusiveLock(_mutex); sqlite3* database = (sqlite3*)_database; diff --git a/src/osgEarth/Map.cpp b/src/osgEarth/Map.cpp index 45fa2ce465..179f5da179 100644 --- a/src/osgEarth/Map.cpp +++ b/src/osgEarth/Map.cpp @@ -19,6 +19,7 @@ #include #include #include +#include using namespace osgEarth; @@ -123,8 +124,6 @@ Map::init() // reset the revision: _dataModelRevision = 0; - _mapDataMutex.setName("Map dataMutex(OE)"); - // set the object name from the options: if (options().name().isSet()) osg::Object::setName(options().name().get()); diff --git a/src/osgEarth/MapNode.cpp b/src/osgEarth/MapNode.cpp index ce5f93ad68..b782c0ae7c 100644 --- a/src/osgEarth/MapNode.cpp +++ b/src/osgEarth/MapNode.cpp @@ -670,7 +670,7 @@ MapNode::addExtension(Extension* extension, const osgDB::Options* options) } } - OE_INFO << LC << "Added extension \"" << extension->getName() << "\"\n"; + //OE_INFO << LC << "Added extension \"" << extension->getName() << "\"\n"; } } @@ -831,8 +831,8 @@ MapNode::traverse( osg::NodeVisitor& nv ) nv.getVisitorType() == nv.CULL_VISITOR || nv.getVisitorType() == nv.UPDATE_VISITOR) { - static Threading::Mutex s_openMutex(OE_MUTEX_NAME); - Threading::ScopedMutexLock lock(s_openMutex); + static std::mutex s_openMutex; + std::lock_guard lock(s_openMutex); if (!_isOpen) { _isOpen = open(); diff --git a/src/osgEarth/MemCache.cpp b/src/osgEarth/MemCache.cpp index 3668285a7c..cf042a88bb 100644 --- a/src/osgEarth/MemCache.cpp +++ b/src/osgEarth/MemCache.cpp @@ -123,7 +123,7 @@ namespace }; - static Threading::Mutex s_defaultBinMutex(OE_MUTEX_NAME); + static std::mutex s_defaultBinMutex; } //------------------------------------------------------------------------ @@ -154,7 +154,7 @@ MemCache::getOrCreateDefaultBin() { if ( !_defaultBin.valid() ) { - Threading::ScopedMutexLock lock( s_defaultBinMutex ); + std::lock_guard lock( s_defaultBinMutex ); // double check if ( !_defaultBin.valid() ) { diff --git a/src/osgEarth/ModelResource.cpp b/src/osgEarth/ModelResource.cpp index f964fd609b..c998935eee 100644 --- a/src/osgEarth/ModelResource.cpp +++ b/src/osgEarth/ModelResource.cpp @@ -32,13 +32,12 @@ using namespace osgEarth; //--------------------------------------------------------------------------- -ModelResource::ModelResource( const Config& conf ) : -InstanceResource( conf ), -_canScaleToFitXY(true), -_canScaleToFitZ(true) +ModelResource::ModelResource(const Config& conf) : + InstanceResource(conf), + _canScaleToFitXY(true), + _canScaleToFitZ(true) { - _mutex.setName("OE.ModelResource"); - mergeConfig( conf ); + mergeConfig(conf); } void @@ -63,7 +62,7 @@ ModelResource::getBoundingBox(const osgDB::Options* dbo) { if ( !_bbox.valid() && _status.isOK() ) { - Threading::ScopedMutexLock lock(_mutex); + std::lock_guard lock(_mutex); if ( !_bbox.valid() ) { osg::ref_ptr node = createNodeFromURI( uri().get(), dbo ); diff --git a/src/osgEarth/NetworkMonitor.cpp b/src/osgEarth/NetworkMonitor.cpp index 4a6b5ce501..9ec3de3403 100644 --- a/src/osgEarth/NetworkMonitor.cpp +++ b/src/osgEarth/NetworkMonitor.cpp @@ -21,6 +21,7 @@ #include #include #include +#include using namespace osgEarth; @@ -28,10 +29,10 @@ namespace { static NetworkMonitor::Requests s_requests; static NetworkMonitor::URICount s_counts; - osgEarth::Threading::ReadWriteMutex s_requestsMutex("NetworkMonitor(OE)"); + osgEarth::Threading::ReadWriteMutex s_requestsMutex; static unsigned long s_requestId = 0; static bool s_enabled = false; - static std::unordered_map s_requestLayer; + static std::unordered_map s_requestLayer; } #define LC "[NetworkMonitor] " @@ -42,7 +43,7 @@ unsigned long NetworkMonitor::begin(const std::string& uri, const std::string& s { osgEarth::Threading::ScopedWriteLock lock(s_requestsMutex); Request req(uri, status); - req.layer = s_requestLayer[osgEarth::Threading::getCurrentThreadId()]; + req.layer = s_requestLayer[std::this_thread::get_id()]; req.type = type; req.count = ++s_counts.insert(std::make_pair(uri, 0u)).first->second; @@ -131,13 +132,13 @@ void NetworkMonitor::saveCSV(Requests& requests, const std::string& filename) void NetworkMonitor::setRequestLayer(const std::string& name) { - osgEarth::Threading::ScopedWriteLock lock(s_requestsMutex); - s_requestLayer[osgEarth::Threading::getCurrentThreadId()] = name; + osgEarth::Threading::ScopedWriteLock unique_lock(s_requestsMutex); + s_requestLayer[std::this_thread::get_id()] = name; } std::string NetworkMonitor::getRequestLayer() { - osgEarth::Threading::ScopedReadLock lock(s_requestsMutex); - return s_requestLayer[osgEarth::Threading::getCurrentThreadId()]; + osgEarth::Threading::ScopedReadLock shared_lock (s_requestsMutex); + return s_requestLayer[std::this_thread::get_id()]; } diff --git a/src/osgEarth/Notify b/src/osgEarth/Notify index aaf8565b9a..bb90154cb6 100644 --- a/src/osgEarth/Notify +++ b/src/osgEarth/Notify @@ -46,6 +46,14 @@ namespace osgEarth inline std::ostream& notify(void) { return osgEarth::notify(osg::INFO); } + struct OSGEARTH_EXPORT NotifyPrefix + { + static std::string DEBUG; + static std::string INFO; + static std::string NOTICE; + static std::string WARN; + static std::string ALWAYS; + }; #define OE_NOTIFY( X,Y ) if(osgEarth::isNotifyEnabled( X )) osgEarth::notify( X ) << Y @@ -58,13 +66,13 @@ namespace osgEarth #define OE_INFO_CONTINUE OE_NOTIFY(osg::INFO, "") #define OE_DEBUG OE_NOTIFY(osg::DEBUG_INFO,"") #else -#define OE_CRITICAL OE_NOTIFY(osg::ALWAYS,"[osgEarth]**") -#define OE_FATAL OE_NOTIFY(osg::FATAL,"[osgEarth]* ") -#define OE_WARN OE_NOTIFY(osg::WARN,"[osgEarth]* ") -#define OE_NOTICE OE_NOTIFY(osg::NOTICE,"[osgEarth] ") -#define OE_INFO OE_NOTIFY(osg::INFO,"[osgEarth] ") +#define OE_CRITICAL OE_NOTIFY(osg::ALWAYS, osgEarth::NotifyPrefix::ALWAYS) +#define OE_FATAL OE_NOTIFY(osg::FATAL, osgEarth::NotifyPrefix::ALWAYS) +#define OE_WARN OE_NOTIFY(osg::WARN, osgEarth::NotifyPrefix::WARN) +#define OE_NOTICE OE_NOTIFY(osg::NOTICE, osgEarth::NotifyPrefix::NOTICE) +#define OE_INFO OE_NOTIFY(osg::INFO, osgEarth::NotifyPrefix::INFO) #define OE_INFO_CONTINUE OE_NOTIFY(osg::INFO, "") -#define OE_DEBUG OE_NOTIFY(osg::DEBUG_INFO,"[osgEarth] ") +#define OE_DEBUG OE_NOTIFY(osg::DEBUG_INFO, osgEarth::NotifyPrefix::DEBUG) #endif #define OE_NULL if(false) osgEarth::notify(osg::ALWAYS) diff --git a/src/osgEarth/Notify.cpp b/src/osgEarth/Notify.cpp index 95ce3f9d35..4b89e9bf34 100644 --- a/src/osgEarth/Notify.cpp +++ b/src/osgEarth/Notify.cpp @@ -137,6 +137,12 @@ struct NotifyStream : public std::ostream using namespace osgEarth; +std::string NotifyPrefix::DEBUG = "[osgEarth] "; +std::string NotifyPrefix::INFO = "[osgEarth] "; +std::string NotifyPrefix::NOTICE = "[osgEarth] "; +std::string NotifyPrefix::WARN = "[osgEarth]* "; +std::string NotifyPrefix::ALWAYS = "[osgEarth]**"; + namespace { static osg::ApplicationUsageProxy Notify_e0(osg::ApplicationUsage::ENVIRONMENTAL_VARIABLE, "OSGEARTH_NOTIFY_LEVEL ", "FATAL | WARN | NOTICE | DEBUG_INFO | DEBUG_FP | DEBUG | INFO | ALWAYS"); @@ -149,6 +155,12 @@ namespace _logger = spdlog::stdout_color_mt("osgearth"); _logger->set_pattern("%^[%n %l]%$ %v"); _logger->set_level(spdlog::level::debug); + + NotifyPrefix::DEBUG = {}; + NotifyPrefix::INFO = {}; + NotifyPrefix::NOTICE = {}; + NotifyPrefix::WARN = {}; + NotifyPrefix::ALWAYS = {}; } void notify(osg::NotifySeverity severity, const char *message) diff --git a/src/osgEarth/OGRFeatureSource b/src/osgEarth/OGRFeatureSource index 42633d47b8..131db11d30 100644 --- a/src/osgEarth/OGRFeatureSource +++ b/src/osgEarth/OGRFeatureSource @@ -21,6 +21,7 @@ #include #include +#include namespace osgEarth { @@ -146,7 +147,7 @@ namespace osgEarth void* _dsHandle; void* _layerHandle; void* _ogrDriverHandle; - unsigned _dsHandleThreadId; + std::thread::id _dsHandleThreadId; int _featureCount; bool _needsSync; bool _writable; diff --git a/src/osgEarth/OGRFeatureSource.cpp b/src/osgEarth/OGRFeatureSource.cpp index 2fcc32b34b..bf13cc10d1 100644 --- a/src/osgEarth/OGRFeatureSource.cpp +++ b/src/osgEarth/OGRFeatureSource.cpp @@ -515,7 +515,7 @@ OGRFeatureSource::openImplementation() // otherwise, assume we're loading from the URL/connection: // remember the thread so we don't use the handles illegaly. - _dsHandleThreadId = osgEarth::Threading::getCurrentThreadId(); + _dsHandleThreadId = std::this_thread::get_id(); // If the user request a particular driver, set that up now: std::string driverName = options().ogrDriver().value(); @@ -776,7 +776,7 @@ OGRFeatureSource::create(const FeatureProfile* profile, _ogrDriverHandle = OGRGetDriverByName(driverName.c_str()); - _dsHandleThreadId = osgEarth::Threading::getCurrentThreadId(); + _dsHandleThreadId = std::this_thread::get_id(); // this handle may ONLY be used from this thread! // https://github.com/OSGeo/gdal/blob/v2.4.1/gdal/gcore/gdaldataset.cpp#L2577 @@ -825,7 +825,7 @@ OGRFeatureSource::buildSpatialIndex() if (_dsHandle && _layerHandle && OGR_L_TestCapability(_layerHandle, OLCFastSpatialFilter) == 0 && - _dsHandleThreadId == osgEarth::Threading::getCurrentThreadId()) + _dsHandleThreadId == std::this_thread::get_id()) { std::stringstream buf; const char* name = OGR_FD_GetName(OGR_L_GetLayerDefn(_layerHandle)); diff --git a/src/osgEarth/ObjectIndex b/src/osgEarth/ObjectIndex index 185edaa413..d6a2a6ff56 100644 --- a/src/osgEarth/ObjectIndex +++ b/src/osgEarth/ObjectIndex @@ -99,7 +99,7 @@ namespace osgEarth */ template osg::ref_ptr get(ObjectID id) const { - Threading::ScopedMutexLock lock(_mutex); + std::lock_guard lock(_mutex); return dynamic_cast( getImpl(id) ); } @@ -235,7 +235,7 @@ namespace osgEarth IndexMap _index; int _attribLocation; std::string _oidUniformName; - mutable Threading::Mutex _mutex; + mutable std::mutex _mutex; std::atomic_int _idGen; ShaderPackage _shaders; std::string _attribName; diff --git a/src/osgEarth/ObjectIndex.cpp b/src/osgEarth/ObjectIndex.cpp index 6e7548a4cd..1d2de58065 100644 --- a/src/osgEarth/ObjectIndex.cpp +++ b/src/osgEarth/ObjectIndex.cpp @@ -59,8 +59,7 @@ namespace } ObjectIndex::ObjectIndex() : -_idGen( STARTING_OBJECT_ID ), -_mutex("ObjectIndex(OE)") + _idGen(STARTING_OBJECT_ID) { _attribName = "oe_index_objectid_attr"; _attribLocation = osg::Drawable::SECONDARY_COLORS; @@ -100,7 +99,7 @@ ObjectIndex::setObjectIDAtrribLocation(int value) ObjectID ObjectIndex::insert(osg::Referenced* object) { - Threading::ScopedMutexLock excl( _mutex ); + std::lock_guard excl( _mutex ); return insertImpl( object ); } @@ -125,7 +124,7 @@ ObjectIndex::getImpl(ObjectID id) const void ObjectIndex::remove(ObjectID id) { - Threading::ScopedMutexLock excl(_mutex); + std::lock_guard excl(_mutex); removeImpl(id); } @@ -141,7 +140,7 @@ ObjectIndex::removeImpl(ObjectID id) ObjectID ObjectIndex::tagDrawable(osg::Drawable* drawable, osg::Referenced* object) { - Threading::ScopedMutexLock lock(_mutex); + std::lock_guard lock(_mutex); ObjectID oid = insertImpl(object); tagDrawable(drawable, oid); return oid; @@ -170,7 +169,7 @@ ObjectIndex::tagDrawable(osg::Drawable* drawable, ObjectID id) const ObjectID ObjectIndex::tagRange(osg::Drawable* drawable, osg::Referenced* object, unsigned int start, unsigned int count) { - Threading::ScopedMutexLock lock(_mutex); + std::lock_guard lock(_mutex); ObjectID oid = insertImpl(object); tagRange(drawable, oid, start, count); return oid; @@ -236,7 +235,7 @@ namespace ObjectID ObjectIndex::tagAllDrawables(osg::Node* node, osg::Referenced* object) { - Threading::ScopedMutexLock lock(_mutex); + std::lock_guard lock(_mutex); ObjectID oid = insertImpl(object); tagAllDrawables(node, oid); return oid; @@ -255,7 +254,7 @@ ObjectIndex::tagAllDrawables(osg::Node* node, ObjectID id) const ObjectID ObjectIndex::tagNode(osg::Node* node, osg::Referenced* object) { - Threading::ScopedMutexLock lock(_mutex); + std::lock_guard lock(_mutex); ObjectID oid = insertImpl(object); tagNode(node, oid); return oid; diff --git a/src/osgEarth/OverlayDecorator.cpp b/src/osgEarth/OverlayDecorator.cpp index 252561f96e..fc7c26e0e7 100755 --- a/src/osgEarth/OverlayDecorator.cpp +++ b/src/osgEarth/OverlayDecorator.cpp @@ -272,13 +272,12 @@ namespace //--------------------------------------------------------------------------- OverlayDecorator::OverlayDecorator() : -_dumpRequested ( false ), -_rttTraversalMask ( ~0 ), -_maxHorizonDistance ( DBL_MAX ), -_totalOverlayChildren( 0 ), -_maxHeight ( 500000.0 ), -_isGeocentric(true), -_perViewDataMutex(OE_MUTEX_NAME) + _dumpRequested(false), + _rttTraversalMask(~0), + _maxHorizonDistance(DBL_MAX), + _totalOverlayChildren(0), + _maxHeight(500000.0), + _isGeocentric(true) { //nop. } diff --git a/src/osgEarth/PagedNode b/src/osgEarth/PagedNode index b9a599bfbd..7ced2c8b5f 100644 --- a/src/osgEarth/PagedNode +++ b/src/osgEarth/PagedNode @@ -224,7 +224,7 @@ namespace osgEarth { namespace Util float _maxPixels; bool _useRange; float _priorityScale; - Job _job; + jobs::context _job; bool _preCompile; std::function(Cancelable*)> _load; std::atomic_int _revision; @@ -249,7 +249,7 @@ namespace osgEarth { namespace Util //! Subordinates call this to inform the paging manager they are still alive. void* use(PagedNode2* node, void* token) { - ScopedLockIf lock(_trackerMutex, _threadsafe); + scoped_lock_if lock(_trackerMutex, _threadsafe); return _tracker.use(node, token); } @@ -270,7 +270,7 @@ namespace osgEarth { namespace Util std::list _trash; using UpdateFunc = std::function; UpdateFunc _updateFunc; - JobArena::Metrics::Arena::Ptr _metrics; + jobs::jobpool::metrics_t* _metrics; mutable Mutex _mergeMutex; struct ToMerge { diff --git a/src/osgEarth/PagedNode.cpp b/src/osgEarth/PagedNode.cpp index b2690d6866..507404f4a4 100644 --- a/src/osgEarth/PagedNode.cpp +++ b/src/osgEarth/PagedNode.cpp @@ -42,7 +42,6 @@ PagedNode2::PagedNode2() : _mergeTriggered(false), _merged(false), _failed(false), - _mutex("PagedNode.mutex(OE)"), _minRange(0.0f), _maxRange(FLT_MAX), _minPixels(0.0f), @@ -55,8 +54,8 @@ PagedNode2::PagedNode2() : _autoUnload(true), _lastRange(FLT_MAX) { - _job.setName(typeid(*this).name()); - _job.setArena(PAGEDNODE_ARENA_NAME); + _job.name = (typeid(*this).name()); + _job.pool = jobs::get_pool(PAGEDNODE_ARENA_NAME); } PagedNode2::~PagedNode2() @@ -86,7 +85,7 @@ PagedNode2::traverse(osg::NodeVisitor& nv) // locate the paging manager if there is one if (_pagingManager == nullptr) { - ScopedMutexLock lock(_mutex); + std::lock_guard lock(_mutex); if (_pagingManager == nullptr) // double check { osg::ref_ptr pm; @@ -258,14 +257,14 @@ PagedNode2::load(float priority, const osg::Object* host) osg::observer_ptr callbacks_weakptr(_callbacks); bool preCompile = _preCompile; - _job.setPriority(priority); + jobs::context job = _job; + job.priority = [priority]() { return priority; }; - _loaded = _job.dispatch( - [load, callbacks_weakptr, preCompile](Cancelable* c) + auto task = [load, callbacks_weakptr, preCompile](Cancelable& c) { Loaded result; - osg::ref_ptr progress = new ProgressCallback(c); + osg::ref_ptr progress = new ProgressCallback(&c); // invoke the loader function result._node = load(progress.get()); @@ -287,8 +286,9 @@ PagedNode2::load(float priority, const osg::Object* host) } return result; - } - ); + }; + + _loaded = jobs::dispatch(task, job); } else { @@ -377,8 +377,6 @@ bool PagedNode2::isLoaded() const } PagingManager::PagingManager() : - _trackerMutex(OE_MUTEX_NAME), - _mergeMutex(OE_MUTEX_NAME), _tracker(), _mergesPerFrame(4u), _newFrame(false) @@ -386,9 +384,9 @@ PagingManager::PagingManager() : setCullingActive(false); ADJUST_UPDATE_TRAV_COUNT(this, +1); - auto arena = JobArena::get(PAGEDNODE_ARENA_NAME); - arena->setConcurrency(4u); - _metrics = arena->metrics(); + auto pool = jobs::get_pool(PAGEDNODE_ARENA_NAME); + pool->set_concurrency(4u); + _metrics = pool->metrics(); #ifdef OSGEARTH_SINGLE_THREADED_OSG _threadsafe = false; @@ -399,8 +397,8 @@ PagingManager::~PagingManager() { if (_mergeQueue.size() > 0) { - _metrics->numJobsRunning.exchange( - _metrics->numJobsRunning - _mergeQueue.size()); + _metrics->running.exchange( + _metrics->running - _mergeQueue.size()); } } @@ -426,7 +424,7 @@ PagingManager::traverse(osg::NodeVisitor& nv) if (nv.getVisitorType() == nv.CULL_VISITOR) { // After culling is complete, update all of the ranges for all of the node - ScopedLockIf lock(_trackerMutex, _threadsafe); + scoped_lock_if lock(_trackerMutex, _threadsafe); for (auto& entry : _tracker._list) { @@ -442,19 +440,19 @@ PagingManager::traverse(osg::NodeVisitor& nv) void PagingManager::merge(PagedNode2* host) { - ScopedLockIf lock(_mergeMutex, _threadsafe); + scoped_lock_if lock(_mergeMutex, _threadsafe); ToMerge toMerge; toMerge._node = host; toMerge._revision = host->_revision; _mergeQueue.push(std::move(toMerge)); - _metrics->numJobsRunning++; + _metrics->running++; } void PagingManager::update() { - ScopedLockIf lock(_trackerMutex, _threadsafe); + scoped_lock_if lock(_trackerMutex, _threadsafe); _tracker.flush( _mergesPerFrame, @@ -501,6 +499,6 @@ PagingManager::update() ++count; } _mergeQueue.pop(); - _metrics->numJobsRunning--; + _metrics->running--; } } diff --git a/src/osgEarth/PlaceNode.cpp b/src/osgEarth/PlaceNode.cpp index 03a75a5856..d41135f7c3 100644 --- a/src/osgEarth/PlaceNode.cpp +++ b/src/osgEarth/PlaceNode.cpp @@ -113,8 +113,8 @@ PlaceNode::construct() // Construct the shared state sets if (s_geodeStateSet.lock(_geodeStateSet) == false) { - static Threading::Mutex s_mutex(OE_MUTEX_NAME); - Threading::ScopedMutexLock lock(s_mutex); + static std::mutex s_mutex; + std::lock_guard lock(s_mutex); if (s_geodeStateSet.lock(_geodeStateSet) == false) { @@ -135,8 +135,8 @@ PlaceNode::construct() if (s_imageStateSet.lock(_imageStateSet) == false) { - static Threading::Mutex s_mutex(OE_MUTEX_NAME); - Threading::ScopedMutexLock lock(s_mutex); + static std::mutex s_mutex; + std::lock_guard lock(s_mutex); if (s_imageStateSet.lock(_imageStateSet) == false) { diff --git a/src/osgEarth/PointDrawable.cpp b/src/osgEarth/PointDrawable.cpp index 3cf9eb4552..4a09d24dbe 100644 --- a/src/osgEarth/PointDrawable.cpp +++ b/src/osgEarth/PointDrawable.cpp @@ -630,8 +630,8 @@ PointDrawable::setupState() { if (s_sharedStateSet.lock(_sharedStateSet) == false) { - static Threading::Mutex s_mutex(OE_MUTEX_NAME); - Threading::ScopedMutexLock lock(s_mutex); + static std::mutex s_mutex; + std::lock_guard lock(s_mutex); if (s_sharedStateSet.lock(_sharedStateSet) == false) { @@ -676,8 +676,8 @@ PointDrawable::checkSharedStateSet(osg::State* state) const { if (_sharedStateSet.valid() && !_sharedStateSetCompiled) { - static Threading::Mutex s_mutex(OE_MUTEX_NAME); - Threading::ScopedMutexLock lock(s_mutex); + static std::mutex s_mutex; + std::lock_guard lock(s_mutex); if (!_sharedStateSetCompiled) { diff --git a/src/osgEarth/Progress b/src/osgEarth/Progress index bc21e36f69..c51509229d 100644 --- a/src/osgEarth/Progress +++ b/src/osgEarth/Progress @@ -30,9 +30,7 @@ namespace osgEarth * ProgressCallback is a general purpose interface for functions that need to report progress * or handle cancelation of a task. */ - class OSGEARTH_EXPORT ProgressCallback : - public osg::Referenced, - public Threading::Cancelable + class OSGEARTH_EXPORT ProgressCallback : public osg::Referenced, public Cancelable { public: //! Creates a new ProgressCallback @@ -104,7 +102,9 @@ namespace osgEarth * recoverable problem occurred and the task is eligible to be * tried again later (e.g., HTTP timeout) */ - bool isCanceled() const override; + bool canceled() const override; + + bool isCanceled() const { return canceled(); } // backwards compatibility //! Resets the cancelation flag void reset(); diff --git a/src/osgEarth/Progress.cpp b/src/osgEarth/Progress.cpp index cfd98b70e5..12a9fee61b 100644 --- a/src/osgEarth/Progress.cpp +++ b/src/osgEarth/Progress.cpp @@ -59,12 +59,12 @@ ProgressCallback::reset() } bool -ProgressCallback::isCanceled() const +ProgressCallback::canceled() const { if (!_canceled) { if ((shouldCancel()) || - (_cancelable && _cancelable->isCanceled()) || + (_cancelable && _cancelable->canceled()) || (_cancelPredicate && _cancelPredicate())) { _canceled = true; diff --git a/src/osgEarth/Registry b/src/osgEarth/Registry index e462a9d2ca..d81e29bc95 100644 --- a/src/osgEarth/Registry +++ b/src/osgEarth/Registry @@ -298,7 +298,7 @@ namespace osgEarth template R* registerSingleton(R* singleton) { - Threading::ScopedMutexLock lock(_regMutex); + std::lock_guard lock(_regMutex); _singletons.push_back(singleton); return singleton; } @@ -307,7 +307,7 @@ namespace osgEarth virtual ~Registry(); Registry(); - mutable Threading::Mutex _regMutex; + mutable std::mutex _regMutex; ProgramRepo _programRepo; @@ -325,7 +325,7 @@ namespace osgEarth // system capabilities: osg::ref_ptr< Capabilities > _caps; - mutable Threading::Mutex _capsMutex; + mutable std::mutex _capsMutex; void initCapabilities(); osg::ref_ptr _defaultOptions; @@ -336,7 +336,7 @@ namespace osgEarth typedef std::vector UnitsVector; UnitsVector _unitsVector; - //mutable Threading::Mutex _unitsVectorMutex; + //mutable std::mutex _unitsVectorMutex; osg::ref_ptr _stateSetCache; @@ -352,7 +352,7 @@ namespace osgEarth } }; std::set _activities; - mutable Threading::Mutex _activityMutex; + mutable std::mutex _activityMutex; optional _unRefImageDataAfterApply; diff --git a/src/osgEarth/Registry.cpp b/src/osgEarth/Registry.cpp index 99a0596c3b..a4a49df12d 100644 --- a/src/osgEarth/Registry.cpp +++ b/src/osgEarth/Registry.cpp @@ -65,6 +65,11 @@ void osgEarth::initialize() { GLUtils::useNVGL(true); } + + // Tell the weetjobs library how to set a thread name + jobs::set_thread_name_function([](const char* value) { + osgEarth::setThreadName(value); + }); } void osgEarth::initialize(osg::ArgumentParser& args) @@ -100,19 +105,14 @@ namespace } Registry::Registry() : -_caps ( 0L ), -_defaultFont ( 0L ), -_terrainEngineDriver( "rex" ), -_cacheDriver ( "filesystem" ), -_overrideCachePolicyInitialized( false ), -_devicePixelRatio(1.0f), -_maxVertsPerDrawable(UINT_MAX), -_regMutex("Registry(OE)"), -_activityMutex("Reg.Activity(OE)"), -_capsMutex("Reg.Caps(OE)"), -_srsCache("Reg.SRSCache(OE)"), -_blacklist("Reg.BlackList(OE)"), -_maxImageDimension(INT_MAX) + _caps(nullptr), + _defaultFont(nullptr), + _terrainEngineDriver("rex"), + _cacheDriver("filesystem"), + _overrideCachePolicyInitialized(false), + _devicePixelRatio(1.0f), + _maxVertsPerDrawable(UINT_MAX), + _maxImageDimension(INT_MAX) { // set up GDAL and OGR. OGRRegisterAll(); @@ -240,7 +240,7 @@ _maxImageDimension(INT_MAX) Units::registerAll( this ); // Default concurrency for async image layers - JobArena::setConcurrency("oe.layer.async", 4u); + jobs::get_pool("oe.layer.async")->set_concurrency(4u); // register the chonk bin with OSG osgUtil::RenderBin::addRenderBinPrototype( @@ -343,7 +343,7 @@ Registry::release() Threading::RecursiveMutex& osgEarth::getGDALMutex() { - static osgEarth::Threading::RecursiveMutex _gdal_mutex("GDAL Mutex"); + static osgEarth::Threading::RecursiveMutex _gdal_mutex; return _gdal_mutex; } @@ -382,7 +382,7 @@ Registry::getNamedProfile( const std::string& name ) const osg::ref_ptr Registry::getOrCreateSRS(const SpatialReference::Key& key) { - ScopedMutexLock lock(_srsCache); + std::lock_guard lock(_srsCache.mutex()); osg::ref_ptr& srs = _srsCache[key]; if (!srs.valid()) { @@ -432,7 +432,7 @@ Registry::getDefaultCacheDriverName() const { if (!_cacheDriver.isSet()) { - Threading::ScopedMutexLock lock(_regMutex); + std::lock_guard lock(_regMutex); if (!_cacheDriver.isSet()) { @@ -459,7 +459,7 @@ Registry::overrideCachePolicy() const { if ( !_overrideCachePolicyInitialized ) { - Threading::ScopedMutexLock lock(_regMutex); + std::lock_guard lock(_regMutex); if ( !_overrideCachePolicyInitialized ) { @@ -501,7 +501,7 @@ Registry::getDefaultCache() const { std::string driverName = getDefaultCacheDriverName(); - Threading::ScopedMutexLock lock(_regMutex); + std::lock_guard lock(_regMutex); if (!_defaultCache.valid()) { const char* noCache = ::getenv(OSGEARTH_ENV_NO_CACHE); @@ -536,7 +536,7 @@ Registry::setDefaultCache(Cache* cache) bool Registry::isBlacklisted(const std::string& filename) { - Threading::ScopedMutexLock sharedLock(_blacklist.mutex()); + std::lock_guard sharedLock(_blacklist.mutex()); return _blacklist.find(filename) != _blacklist.end(); } @@ -560,7 +560,7 @@ Registry::clearBlacklist() unsigned int Registry::getNumBlacklistedFilenames() { - Threading::ScopedMutexLock sharedLock(_blacklist.mutex()); + std::lock_guard sharedLock(_blacklist.mutex()); return _blacklist.size(); } @@ -588,7 +588,7 @@ Registry::setCapabilities( Capabilities* caps ) void Registry::initCapabilities() { - ScopedMutexLock lock( _capsMutex ); // double-check pattern (see getCapabilities) + std::lock_guard lock( _capsMutex ); // double-check pattern (see getCapabilities) if ( !_caps.valid() ) _caps = new Capabilities(); } @@ -598,7 +598,7 @@ Registry::getShaderFactory() const { if (!_shaderLib.valid()) { - ScopedMutexLock lock(_regMutex); + std::lock_guard lock(_regMutex); if (!_shaderLib.valid()) const_cast(this)->_shaderLib = new ShaderFactory(); } @@ -640,14 +640,14 @@ Registry::getURIReadCallback() const void Registry::setDefaultFont( osgText::Font* font ) { - Threading::ScopedMutexLock exclusive(_regMutex); + std::lock_guard exclusive(_regMutex); _defaultFont = font; } osgText::Font* Registry::getDefaultFont() { - Threading::ScopedMutexLock shared(_regMutex); + std::lock_guard shared(_regMutex); return _defaultFont.get(); } @@ -670,14 +670,14 @@ Registry::cloneOrCreateOptions(const osgDB::Options* input) void Registry::registerUnits( const Units* units ) { - Threading::ScopedMutexLock lock(_regMutex); + std::lock_guard lock(_regMutex); _unitsVector.push_back(units); } const Units* Registry::getUnits(const std::string& name) const { - Threading::ScopedMutexLock lock(_regMutex); + std::lock_guard lock(_regMutex); std::string lower = toLower(name); for( UnitsVector::const_iterator i = _unitsVector.begin(); i != _unitsVector.end(); ++i ) { @@ -725,7 +725,7 @@ Registry::getObjectIndex() const { if (!_objectIndex.valid()) { - ScopedMutexLock lock(_regMutex); + std::lock_guard lock(_regMutex); if (!_objectIndex.valid()) { _objectIndex = new ObjectIndex(); @@ -737,7 +737,7 @@ Registry::getObjectIndex() const void Registry::startActivity(const std::string& activity) { - Threading::ScopedMutexLock lock(_activityMutex); + std::lock_guard lock(_activityMutex); _activities.insert(Activity(activity,std::string())); } @@ -745,7 +745,7 @@ void Registry::startActivity(const std::string& activity, const std::string& value) { - Threading::ScopedMutexLock lock(_activityMutex); + std::lock_guard lock(_activityMutex); _activities.erase(Activity(activity,std::string())); _activities.insert(Activity(activity,value)); } @@ -753,14 +753,14 @@ Registry::startActivity(const std::string& activity, void Registry::endActivity(const std::string& activity) { - Threading::ScopedMutexLock lock(_activityMutex); + std::lock_guard lock(_activityMutex); _activities.erase(Activity(activity,std::string())); } void Registry::getActivities(std::set& output) { - Threading::ScopedMutexLock lock(_activityMutex); + std::lock_guard lock(_activityMutex); for(std::set::const_iterator i = _activities.begin(); i != _activities.end(); ++i) @@ -807,14 +807,14 @@ Registry::getMimeTypeForExtension(const std::string& ext) void Registry::setTextureImageUnitOffLimits(int unit) { - Threading::ScopedMutexLock exclusive(_regMutex); + std::lock_guard exclusive(_regMutex); _offLimitsTextureImageUnits.insert(unit); } const std::set Registry::getOffLimitsTextureImageUnits() const { - Threading::ScopedMutexLock exclusive(_regMutex); + std::lock_guard exclusive(_regMutex); return _offLimitsTextureImageUnits; } diff --git a/src/osgEarth/Resource b/src/osgEarth/Resource index 2023adfaee..a8c4759a37 100644 --- a/src/osgEarth/Resource +++ b/src/osgEarth/Resource @@ -64,7 +64,7 @@ namespace osgEarth { namespace Util protected: - mutable Threading::Mutex _mutex; + mutable std::mutex _mutex; mutable Status _status; private: diff --git a/src/osgEarth/Resource.cpp b/src/osgEarth/Resource.cpp index 440ece16ae..b8171ea9fc 100644 --- a/src/osgEarth/Resource.cpp +++ b/src/osgEarth/Resource.cpp @@ -21,8 +21,7 @@ using namespace osgEarth; -Resource::Resource( const Config& conf ) : -_mutex("OE.Resource") +Resource::Resource( const Config& conf ) { mergeConfig( conf ); } diff --git a/src/osgEarth/ResourceCache b/src/osgEarth/ResourceCache index 64bf2c63b2..4b34bf4ec6 100644 --- a/src/osgEarth/ResourceCache +++ b/src/osgEarth/ResourceCache @@ -82,16 +82,16 @@ namespace osgEarth { namespace Util //typedef LRUCache > SkinCache; typedef LRUCache > SkinCache; SkinCache _skinCache; - Threading::Mutex _skinMutex; + std::mutex _skinMutex; typedef LRUCache > TextureCache; TextureCache _texCache; - Threading::Mutex _texMutex; + std::mutex _texMutex; //typedef LRUCache > InstanceCache; typedef LRUCache > InstanceCache; InstanceCache _instanceCache; - Threading::Mutex _instanceMutex; + std::mutex _instanceMutex; }; } } diff --git a/src/osgEarth/ResourceCache.cpp b/src/osgEarth/ResourceCache.cpp index 6c04ffa885..55d86d6676 100644 --- a/src/osgEarth/ResourceCache.cpp +++ b/src/osgEarth/ResourceCache.cpp @@ -25,12 +25,9 @@ using namespace osgEarth; // internal thread-safety not required since we mutex it in this object. ResourceCache::ResourceCache() : -_skinCache ( false ), -_instanceCache( false ), -_texCache( false ), -_skinMutex(OE_MUTEX_NAME), -_instanceMutex(OE_MUTEX_NAME), -_texMutex(OE_MUTEX_NAME) + _skinCache(false), + _instanceCache(false), + _texCache(false) { //nop } @@ -38,7 +35,7 @@ _texMutex(OE_MUTEX_NAME) bool ResourceCache::getOrCreateLineTexture(const URI& uri, osg::ref_ptr& output, const osgDB::Options* readOptions) { - Threading::ScopedMutexLock lock(_texMutex); + std::lock_guard lock(_texMutex); TextureCache::Record rec; if (_texCache.get(uri.full(), rec) && rec.value().valid()) { @@ -85,7 +82,7 @@ ResourceCache::getOrCreateStateSet(SkinResource* skin, // exclusive lock (since it's an LRU) { - Threading::ScopedMutexLock exclusive( _skinMutex ); + std::lock_guard exclusive( _skinMutex ); // double check to avoid race condition SkinCache::Record rec; @@ -118,7 +115,7 @@ ResourceCache::getOrCreateInstanceNode(InstanceResource* res, // exclusive lock (since it's an LRU) { - Threading::ScopedMutexLock exclusive( _instanceMutex ); + std::lock_guard exclusive( _instanceMutex ); // double check to avoid race condition InstanceCache::Record rec; @@ -150,7 +147,7 @@ ResourceCache::cloneOrCreateInstanceNode(InstanceResource* res, // exclusive lock (since it's an LRU) { - Threading::ScopedMutexLock exclusive( _instanceMutex ); + std::lock_guard exclusive( _instanceMutex ); // Deep copy everything except for images. Some models may share imagery so we only want one copy of it at a time. osg::CopyOp copyOp = osg::CopyOp::DEEP_COPY_ALL & ~osg::CopyOp::DEEP_COPY_IMAGES & ~osg::CopyOp::DEEP_COPY_TEXTURES; diff --git a/src/osgEarth/ResourceLibrary.cpp b/src/osgEarth/ResourceLibrary.cpp index 8e6bdd4e2b..f0334838bd 100644 --- a/src/osgEarth/ResourceLibrary.cpp +++ b/src/osgEarth/ResourceLibrary.cpp @@ -33,18 +33,16 @@ using namespace osgEarth; //------------------------------------------------------------------------ ResourceLibrary::ResourceLibrary(const Config& conf) : -_initialized( false ), -_mutex(OE_MUTEX_NAME) + _initialized(false) { - mergeConfig( conf ); + mergeConfig(conf); } -ResourceLibrary::ResourceLibrary(const std::string& name, - const URI& uri) : -_name ( name ), -_uri ( uri, uri ), -_initialized( false ), -_mutex(OE_MUTEX_NAME) +ResourceLibrary::ResourceLibrary(const std::string& name, + const URI& uri) : + _name(name), + _uri(uri, uri), + _initialized(false) { //nop } @@ -145,7 +143,7 @@ ResourceLibrary::removeResource( Resource* resource ) namespace { - static Threading::Mutex s_initMutex(OE_MUTEX_NAME); + static std::mutex s_initMutex; } bool @@ -155,7 +153,7 @@ ResourceLibrary::initialize( const osgDB::Options* dbOptions ) if ( !_initialized ) { - Threading::ScopedMutexLock exclusive(s_initMutex); + std::lock_guard exclusive(s_initMutex); if ( !_initialized ) { ok = false; diff --git a/src/osgEarth/ResourceReleaser b/src/osgEarth/ResourceReleaser index 156316b21c..76ab16f791 100644 --- a/src/osgEarth/ResourceReleaser +++ b/src/osgEarth/ResourceReleaser @@ -55,7 +55,7 @@ namespace osgEarth { namespace Util private: mutable ObjectList _toRelease; - mutable Threading::Mutex _mutex; + mutable std::mutex _mutex; }; } } diff --git a/src/osgEarth/ResourceReleaser.cpp b/src/osgEarth/ResourceReleaser.cpp index 10b98f26ce..ff6f0de42f 100644 --- a/src/osgEarth/ResourceReleaser.cpp +++ b/src/osgEarth/ResourceReleaser.cpp @@ -30,8 +30,7 @@ using namespace osgEarth::Util; #define LC "[ResourceReleaser] " -ResourceReleaser::ResourceReleaser() : - _mutex("ResourceReleaser(OE)") +ResourceReleaser::ResourceReleaser() { // ensure this node always gets traversed: this->setCullingActive(false); @@ -46,7 +45,7 @@ ResourceReleaser::ResourceReleaser() : void ResourceReleaser::push(osg::Object* object) { - Threading::ScopedMutexLock lock(_mutex); + std::lock_guard lock(_mutex); _toRelease.push_back(object); } @@ -54,7 +53,7 @@ ResourceReleaser::push(osg::Object* object) void ResourceReleaser::push(const ObjectList& objects) { - Threading::ScopedMutexLock lock(_mutex); + std::lock_guard lock(_mutex); _toRelease.reserve(_toRelease.size() + objects.size()); for (unsigned i = 0; i lock(_mutex); if (!_toRelease.empty()) { for (ObjectList::const_iterator i = _toRelease.begin(); i != _toRelease.end(); ++i) diff --git a/src/osgEarth/SDF b/src/osgEarth/SDF index c5e441887f..352fc8bda0 100644 --- a/src/osgEarth/SDF +++ b/src/osgEarth/SDF @@ -115,21 +115,7 @@ namespace osgEarth { namespace Util private: - //void compute_nnf_on_gpu(osg::Image* buf) const; void compute_nnf_on_cpu(osg::Image* buf) const; - -#if 0 - struct NNFSession : public ComputeImageSession - { - public: - NNFSession() : _L_uniform(-1) { } - void renderImplementation(osg::State* state) override; - GLint _L_uniform; - }; - - PerThreadComputeSession _compute; - osg::ref_ptr _program; -#endif bool _useGPU; }; diff --git a/src/osgEarth/SceneGraphCallback b/src/osgEarth/SceneGraphCallback index 6d296d9e81..219104d275 100644 --- a/src/osgEarth/SceneGraphCallback +++ b/src/osgEarth/SceneGraphCallback @@ -79,7 +79,7 @@ namespace osgEarth private: SceneGraphCallbackVector _callbacks; - Threading::ReadWriteRecursiveMutex _mutex; + Threading::RecursiveMutex _mutex; osg::observer_ptr _sender; }; diff --git a/src/osgEarth/SceneGraphCallback.cpp b/src/osgEarth/SceneGraphCallback.cpp index 1a395b6d2c..dc25408d7d 100644 --- a/src/osgEarth/SceneGraphCallback.cpp +++ b/src/osgEarth/SceneGraphCallback.cpp @@ -27,8 +27,7 @@ using namespace osgEarth::Util; //................................................................... SceneGraphCallbacks::SceneGraphCallbacks(osg::Object* sender) : -_sender(sender), -_mutex("SceneGraphCallbacks(OE)") + _sender(sender) { //nop } @@ -38,7 +37,7 @@ SceneGraphCallbacks::add(SceneGraphCallback* cb) { if (cb) { - Threading::ScopedRecursiveWriteLock lock(_mutex); + Threading::ScopedRecursiveMutexLock lock(_mutex); _callbacks.push_back(cb); } } @@ -48,7 +47,7 @@ SceneGraphCallbacks::remove(SceneGraphCallback* cb) { if (cb) { - Threading::ScopedRecursiveWriteLock lock(_mutex); + Threading::ScopedRecursiveMutexLock lock(_mutex); for (SceneGraphCallbackVector::iterator i = _callbacks.begin(); i != _callbacks.end(); ++i) { if (i->get() == cb) @@ -63,7 +62,7 @@ SceneGraphCallbacks::remove(SceneGraphCallback* cb) void SceneGraphCallbacks::firePreMergeNode(osg::Node* node) { - Threading::ScopedRecursiveReadLock lock(_mutex); + Threading::ScopedRecursiveMutexLock lock(_mutex); osg::ref_ptr sender; _sender.lock(sender); for (SceneGraphCallbackVector::iterator i = _callbacks.begin(); i != _callbacks.end(); ++i) @@ -73,7 +72,7 @@ SceneGraphCallbacks::firePreMergeNode(osg::Node* node) void SceneGraphCallbacks::firePostMergeNode(osg::Node* node) { - Threading::ScopedRecursiveReadLock lock(_mutex); // prob not necessary but good measure + Threading::ScopedRecursiveMutexLock lock(_mutex); // prob not necessary but good measure osg::ref_ptr sender; _sender.lock(sender); for (SceneGraphCallbackVector::iterator i = _callbacks.begin(); i != _callbacks.end(); ++i) @@ -83,7 +82,7 @@ SceneGraphCallbacks::firePostMergeNode(osg::Node* node) void SceneGraphCallbacks::fireRemoveNode(osg::Node* node) { - Threading::ScopedRecursiveReadLock lock(_mutex); // prob not necessary but good measure + Threading::ScopedRecursiveMutexLock lock(_mutex); // prob not necessary but good measure osg::ref_ptr sender; _sender.lock(sender); for (SceneGraphCallbackVector::iterator i = _callbacks.begin(); i != _callbacks.end(); ++i) diff --git a/src/osgEarth/ScreenSpaceLayout.cpp b/src/osgEarth/ScreenSpaceLayout.cpp index be54a7ad9c..fc0a7172d4 100644 --- a/src/osgEarth/ScreenSpaceLayout.cpp +++ b/src/osgEarth/ScreenSpaceLayout.cpp @@ -117,7 +117,7 @@ namespace // we clone the render bin. This play nicely with static initialization. if (!_vpInstalled) { - Threading::ScopedMutexLock lock(_vpMutex); + std::lock_guard lock(_vpMutex); if (!_vpInstalled) { VirtualProgram* vp = VirtualProgram::getOrCreate(getStateSet()); @@ -165,11 +165,11 @@ namespace osg::ref_ptr _f; osg::ref_ptr _context; - static Threading::Mutex _vpMutex; + static std::mutex _vpMutex; static bool _vpInstalled; }; - Threading::Mutex osgEarthScreenSpaceLayoutRenderBin::_vpMutex(OE_MUTEX_NAME); + std::mutex osgEarthScreenSpaceLayoutRenderBin::_vpMutex; bool osgEarthScreenSpaceLayoutRenderBin::_vpInstalled = false; } diff --git a/src/osgEarth/ScriptEngine b/src/osgEarth/ScriptEngine index 5f9a3c3f62..9e790a2917 100644 --- a/src/osgEarth/ScriptEngine +++ b/src/osgEarth/ScriptEngine @@ -148,7 +148,7 @@ namespace osgEarth { namespace Util std::vector _failedDrivers; static ScriptEngineFactory* s_singleton; - static osgEarth::Threading::Mutex s_singletonMutex; + static std::mutex s_singletonMutex; }; } } diff --git a/src/osgEarth/ScriptEngine.cpp b/src/osgEarth/ScriptEngine.cpp index 37ec889410..d238deeedf 100644 --- a/src/osgEarth/ScriptEngine.cpp +++ b/src/osgEarth/ScriptEngine.cpp @@ -87,14 +87,14 @@ ScriptEngine::run( #define SCRIPT_ENGINE_OPTIONS_TAG "__osgEarth::ScriptEngineOptions" ScriptEngineFactory* ScriptEngineFactory::s_singleton = 0L; -osgEarth::Threading::Mutex ScriptEngineFactory::s_singletonMutex(OE_MUTEX_NAME); +std::mutex ScriptEngineFactory::s_singletonMutex; ScriptEngineFactory* ScriptEngineFactory::instance() { if ( !s_singleton ) { - Threading::ScopedMutexLock lock(s_singletonMutex); + std::lock_guard lock(s_singletonMutex); if ( !s_singleton ) { s_singleton = new ScriptEngineFactory(); diff --git a/src/osgEarth/ShaderFactory.cpp b/src/osgEarth/ShaderFactory.cpp index 5a94fdd309..cd12c4a5e5 100755 --- a/src/osgEarth/ShaderFactory.cpp +++ b/src/osgEarth/ShaderFactory.cpp @@ -34,7 +34,7 @@ using namespace osgEarth::Util; namespace { - Threading::Mutex s_glslMutex; + std::mutex s_glslMutex; std::string s_glslHeader; #if defined(OSG_GLES2_AVAILABLE) || defined(OSG_GLES3_AVAILABLE) @@ -56,7 +56,7 @@ ShaderFactory::getGLSLHeader() { if (s_glslHeader.empty()) { - Threading::ScopedMutexLock lock(s_glslMutex); + std::lock_guard lock(s_glslMutex); if (s_glslHeader.empty()) { int version = Capabilities::get().getGLSLVersionInt(); diff --git a/src/osgEarth/ShaderGenerator.cpp b/src/osgEarth/ShaderGenerator.cpp index dff79651b9..be78c08f85 100644 --- a/src/osgEarth/ShaderGenerator.cpp +++ b/src/osgEarth/ShaderGenerator.cpp @@ -639,15 +639,15 @@ ShaderGenerator::apply(osg::PagedLOD& node) for( unsigned i=0; i lock(s_mutex); + const std::string& filename = node.getFileName( i ); if (!filename.empty() && osgDB::getLowerCaseFileExtension(filename).compare(SHADERGEN_PL_EXTENSION) != 0 ) { node.setFileName( i, Stringify() << filename << "." << SHADERGEN_PL_EXTENSION ); } - s_mutex.unlock(); } apply( static_cast(node) ); diff --git a/src/osgEarth/SimplePager b/src/osgEarth/SimplePager index 05661fa0fb..70581f00d8 100644 --- a/src/osgEarth/SimplePager +++ b/src/osgEarth/SimplePager @@ -136,7 +136,7 @@ namespace osgEarth { namespace Util osg::observer_ptr< const osgEarth::Map > _map; - mutable Threading::Mutex _mutex; + mutable std::mutex _mutex; using Callbacks = std::vector>; Callbacks _callbacks; diff --git a/src/osgEarth/SimplePager.cpp b/src/osgEarth/SimplePager.cpp index bd23173c7c..a570ad0196 100644 --- a/src/osgEarth/SimplePager.cpp +++ b/src/osgEarth/SimplePager.cpp @@ -20,18 +20,17 @@ using namespace osgEarth::Util; #define LC "[SimplerPager] " -SimplePager::SimplePager(const osgEarth::Map* map, const osgEarth::Profile* profile): -_map(map), -_profile( profile ), -_rangeFactor( 6.0 ), -_additive(false), -_minLevel(0), -_maxLevel(30), -_priorityScale(1.0f), -_priorityOffset(0.0f), -_canCancel(true), -_done(false), -_mutex("SimplePager(OE)") +SimplePager::SimplePager(const osgEarth::Map* map, const osgEarth::Profile* profile) : + _map(map), + _profile(profile), + _rangeFactor(6.0), + _additive(false), + _minLevel(0), + _maxLevel(30), + _priorityScale(1.0f), + _priorityOffset(0.0f), + _canCancel(true), + _done(false) { if (map) { @@ -293,7 +292,7 @@ void SimplePager::addCallback(Callback* callback) { if (callback) { - Threading::ScopedMutexLock lock(_mutex); + std::lock_guard lock(_mutex); _callbacks.push_back(callback); } } @@ -302,7 +301,7 @@ void SimplePager::removeCallback(Callback* callback) { if (callback) { - Threading::ScopedMutexLock lock(_mutex); + std::lock_guard lock(_mutex); for (Callbacks::iterator i = _callbacks.begin(); i != _callbacks.end(); ++i) { if (i->get() == callback) @@ -316,7 +315,7 @@ void SimplePager::removeCallback(Callback* callback) void SimplePager::fire_onCreateNode(const TileKey& key, osg::Node* node) { - Threading::ScopedMutexLock lock(_mutex); + std::lock_guard lock(_mutex); for (Callbacks::iterator i = _callbacks.begin(); i != _callbacks.end(); ++i) i->get()->onCreateNode(key, node); } \ No newline at end of file diff --git a/src/osgEarth/Skins.cpp b/src/osgEarth/Skins.cpp index e6eada2e64..76802c8772 100644 --- a/src/osgEarth/Skins.cpp +++ b/src/osgEarth/Skins.cpp @@ -234,7 +234,7 @@ SkinResource::createImage( const osgDB::Options* dbOptions ) const if (result.failed()) { - Threading::ScopedMutexLock lock(_mutex); + std::lock_guard lock(_mutex); if (_status.isOK()) _status = Status::Error(Status::ServiceUnavailable, "Failed to load resource image\n"); } diff --git a/src/osgEarth/SpatialReference b/src/osgEarth/SpatialReference index d6417b28e9..b3f9ad9b45 100644 --- a/src/osgEarth/SpatialReference +++ b/src/osgEarth/SpatialReference @@ -388,7 +388,7 @@ namespace osgEarth mutable osg::ref_ptr _geo_srs; mutable osg::ref_ptr _geodetic_srs; // _geo_srs with a NULL vdatum. mutable osg::ref_ptr _geocentric_srs; - mutable Threading::Mutex _mutex; + mutable std::mutex _mutex; mutable bool _valid; mutable bool _initialized; Setup _setup; diff --git a/src/osgEarth/SpatialReference.cpp b/src/osgEarth/SpatialReference.cpp index 7761752ab5..5113b5bf1b 100644 --- a/src/osgEarth/SpatialReference.cpp +++ b/src/osgEarth/SpatialReference.cpp @@ -138,9 +138,7 @@ SpatialReference::SpatialReference(void* handle) : _is_user_defined(false), _is_ltp(false), _is_spherical_mercator(false), - _ellipsoidId(0u), - _local("OE.SRS.Local"), - _mutex("OE.SRS") + _ellipsoidId(0u) { _setup.srcHandle = handle; @@ -159,9 +157,7 @@ SpatialReference::SpatialReference(const Key& key) : _is_user_defined(false), _is_ltp(false), _is_spherical_mercator(false), - _ellipsoidId(0u), - _local("OE.SRS.Local"), - _mutex("OE.SRS") + _ellipsoidId(0u) { // shortcut for spherical-mercator: // https://wiki.openstreetmap.org/wiki/EPSG:3857 @@ -566,7 +562,7 @@ SpatialReference::getGeographicSRS() const if ( !_geo_srs.valid() ) { - Threading::ScopedMutexLock lock(_mutex); + std::lock_guard lock(_mutex); if ( !_geo_srs.valid() ) // double-check pattern { @@ -598,7 +594,7 @@ SpatialReference::getGeodeticSRS() const if ( !_geodetic_srs.valid() ) { - Threading::ScopedMutexLock lock(_mutex); + std::lock_guard lock(_mutex); if ( !_geodetic_srs.valid() ) // double check pattern { @@ -630,7 +626,7 @@ SpatialReference::getGeocentricSRS() const if ( !_geocentric_srs.valid() ) { - Threading::ScopedMutexLock lock(_mutex); + std::lock_guard lock(_mutex); if ( !_geocentric_srs.valid() ) // double-check pattern { diff --git a/src/osgEarth/StateSetCache b/src/osgEarth/StateSetCache index e7f013867c..39545502a4 100644 --- a/src/osgEarth/StateSetCache +++ b/src/osgEarth/StateSetCache @@ -155,7 +155,7 @@ namespace osgEarth typedef std::set< osg::ref_ptr, CompareStateAttributes> StateAttributeSet; StateAttributeSet _stateAttributeCache; - mutable Threading::Mutex _mutex; + mutable std::mutex _mutex; void prune(); void pruneIfNecessary(); diff --git a/src/osgEarth/StateSetCache.cpp b/src/osgEarth/StateSetCache.cpp index fd4a8c1816..e6afac7432 100644 --- a/src/osgEarth/StateSetCache.cpp +++ b/src/osgEarth/StateSetCache.cpp @@ -247,27 +247,26 @@ namespace //------------------------------------------------------------------------ StateSetCache::StateSetCache() : - _pruneCount ( 0 ), - _maxSize ( DEFAULT_PRUNE_ACCESS_COUNT ), - _attrShareAttempts( 0 ), - _attrsIneligible ( 0 ), - _attrShareHits ( 0 ), - _attrShareMisses ( 0 ), - _mutex ("StateSetCache(OE)") + _pruneCount(0), + _maxSize(DEFAULT_PRUNE_ACCESS_COUNT), + _attrShareAttempts(0), + _attrsIneligible(0), + _attrShareHits(0), + _attrShareMisses(0) { //nop } StateSetCache::~StateSetCache() { - Threading::ScopedMutexLock lock( _mutex ); + std::lock_guard lock( _mutex ); prune(); } void StateSetCache::releaseGLObjects(osg::State* state) const { - Threading::ScopedMutexLock lock( _mutex ); + std::lock_guard lock( _mutex ); for(StateSetSet::const_iterator i = _stateSetCache.begin(); i != _stateSetCache.end(); ++i) { i->get()->releaseGLObjects(state); @@ -280,7 +279,7 @@ StateSetCache::setMaxSize(unsigned value) { _maxSize = value; { - Threading::ScopedMutexLock lock( _mutex ); + std::lock_guard lock( _mutex ); pruneIfNecessary(); } } @@ -342,7 +341,7 @@ StateSetCache::share(osg::ref_ptr& input, if ( !checkEligible || eligible(input.get()) ) { - Threading::ScopedMutexLock lock( _mutex ); + std::lock_guard lock( _mutex ); pruneIfNecessary(); @@ -390,7 +389,7 @@ StateSetCache::share(osg::ref_ptr& input, if ( !checkEligible || eligible(input.get()) ) { - Threading::ScopedMutexLock lock( _mutex ); + std::lock_guard lock( _mutex ); pruneIfNecessary(); @@ -470,7 +469,7 @@ StateSetCache::prune() void StateSetCache::clear() { - Threading::ScopedMutexLock lock( _mutex ); + std::lock_guard lock( _mutex ); prune(); _stateAttributeCache.clear(); @@ -480,7 +479,7 @@ StateSetCache::clear() void StateSetCache::protect() { - Threading::ScopedMutexLock lock( _mutex ); + std::lock_guard lock( _mutex ); for(auto i : _stateSetCache) { i->setDataVariance(osg::Object::DYNAMIC); @@ -491,7 +490,7 @@ StateSetCache::protect() void StateSetCache::dumpStats() { - Threading::ScopedMutexLock lock( _mutex ); + std::lock_guard lock( _mutex ); OE_NOTICE << LC << "StateSetCache Dump:" << std::endl << " attr attempts = " << _attrShareAttempts << std::endl diff --git a/src/osgEarth/StyleSheet.cpp b/src/osgEarth/StyleSheet.cpp index 3dd7622f5f..ac19dc4950 100644 --- a/src/osgEarth/StyleSheet.cpp +++ b/src/osgEarth/StyleSheet.cpp @@ -194,7 +194,6 @@ void StyleSheet::init() { Layer::init(); - _resLibsMutex.setName(OE_MUTEX_NAME); } void diff --git a/src/osgEarth/Symbol.cpp b/src/osgEarth/Symbol.cpp index 477b31369b..21e03367ec 100644 --- a/src/osgEarth/Symbol.cpp +++ b/src/osgEarth/Symbol.cpp @@ -28,11 +28,11 @@ SymbolRegistry* SymbolRegistry::instance() { static SymbolRegistry* s_singleton =0L; - static Threading::Mutex s_singletonMutex(OE_MUTEX_NAME); + static std::mutex s_singletonMutex; if ( !s_singleton ) { - Threading::ScopedMutexLock lock(s_singletonMutex); + std::lock_guard lock(s_singletonMutex); if ( !s_singleton ) { s_singleton = new SymbolRegistry(); diff --git a/src/osgEarth/TDTiles b/src/osgEarth/TDTiles index a2e5576963..b287ef01a8 100644 --- a/src/osgEarth/TDTiles +++ b/src/osgEarth/TDTiles @@ -371,7 +371,7 @@ namespace osgEarth { namespace Contrib { namespace ThreeDTiles osg::ref_ptr _options; float _maximumScreenSpaceError; - mutable Threading::Mutex _mutex; + mutable std::mutex _mutex; ThreeDTileNode::TileTracker _tracker; ThreeDTileNode::TileTracker::iterator _sentryItr; diff --git a/src/osgEarth/TDTiles.cpp b/src/osgEarth/TDTiles.cpp index 9795f08a5c..44eb67541c 100644 --- a/src/osgEarth/TDTiles.cpp +++ b/src/osgEarth/TDTiles.cpp @@ -489,7 +489,7 @@ namespace { NetworkMonitor::ScopedRequestLayer layerRequest(_requestLayer); - if (progress && progress->isCanceled()) + if (progress && progress->canceled()) return nullptr; osg::ref_ptr tilesetNode; @@ -508,7 +508,7 @@ namespace osg::ref_ptr tileset = Tileset::create(rr.getString(), _uri.full()); if (tileset.valid()) { - if (progress && progress->isCanceled()) + if (progress && progress->canceled()) return nullptr; tilesetNode = new ThreeDTilesetContentNode(parentTileset.get(), tileset.get(), _options.get()); @@ -546,12 +546,13 @@ namespace std::shared_ptr operation = std::make_shared( parentTileset, uri, options); - JobArena* arena = JobArena::get("oe.3dtiles"); - return Job(arena).dispatch([operation, options](Cancelable* progress) - { - return operation->loadTileSet(progress); - } - ); + auto job = [operation](Cancelable& progress) + { + return operation->loadTileSet(&progress); + }; + + return jobs::dispatch(job, + jobs::context{ uri.full(), jobs::get_pool("oe.3dtiles") }); } osg::ref_ptr readTileContentSync( @@ -572,19 +573,22 @@ namespace const URI& uri, osg::ref_ptr options) { - JobArena* arena = JobArena::get("oe.3dtiles"); + jobs::context context; + context.name = uri.full(); + context.pool = jobs::get_pool("oe.3dtiles"); - return Job(arena).dispatch([uri, options](Cancelable* progress) + return jobs::dispatch([uri, options](Cancelable& progress) { osg::ref_ptr node = uri.getNode(options.get(), nullptr); if (node.valid()) { ImageUtils::compressAndMipmapTextures(node.get()); GLObjectsCompiler compiler; - compiler.compileNow(node.get(), options.get(), progress); + compiler.compileNow(node.get(), options.get(), &progress); } return node; - } + }, + context ); } } @@ -1360,7 +1364,7 @@ ThreeDTilesetNode::runPostMergeOperations(osg::Node* node) void ThreeDTilesetNode::touchTile(ThreeDTileNode* node) { - ScopedMutexLock lock(_mutex); + std::lock_guard lock(_mutex); if (node->_trackerItrValid) { _tracker.erase(node->_trackerItr); @@ -1381,7 +1385,7 @@ void ThreeDTilesetNode::expireTiles(const osg::NodeVisitor& nv) osg::Timer_t startTime = osg::Timer::instance()->tick(); osg::Timer_t endTime; - ScopedMutexLock lock(_mutex); + std::lock_guard lock(_mutex); // Max time in ms to allocate to erasing tiles float maxTime = 2.0f; diff --git a/src/osgEarth/Terrain.cpp b/src/osgEarth/Terrain.cpp index 9b924afd2f..2f404b2428 100644 --- a/src/osgEarth/Terrain.cpp +++ b/src/osgEarth/Terrain.cpp @@ -64,10 +64,9 @@ void Terrain::onTileUpdateOperation::operator()(osg::Object*) //--------------------------------------------------------------------------- Terrain::Terrain(osg::Node* graph, const Profile* mapProfile) : -_graph ( graph ), -_profile ( mapProfile ), -_callbacksMutex(OE_MUTEX_NAME), -_callbacksSize (0) + _graph(graph), + _profile(mapProfile), + _callbacksSize(0) { _updateQueue = new osg::OperationQueue(); } diff --git a/src/osgEarth/TerrainEngineNode.cpp b/src/osgEarth/TerrainEngineNode.cpp index 15b33a5dd3..ef7664e92a 100644 --- a/src/osgEarth/TerrainEngineNode.cpp +++ b/src/osgEarth/TerrainEngineNode.cpp @@ -91,8 +91,7 @@ TerrainEngineNode::TerrainEngineNode() : _requireParentTextures(false), _requireElevationBorder(false), _requireFullDataAtFirstLOD(false), - _updateScheduled(false), - _createTileModelCallbacksMutex(OE_MUTEX_NAME) + _updateScheduled(false) { // register for event traversals so we can properly reset the dirtyCount ADJUST_EVENT_TRAV_COUNT(this, 1); diff --git a/src/osgEarth/TerrainResources b/src/osgEarth/TerrainResources index ca66f48724..3b7c779c5d 100644 --- a/src/osgEarth/TerrainResources +++ b/src/osgEarth/TerrainResources @@ -107,7 +107,7 @@ namespace osgEarth void setVisibilityRangeHint(unsigned lod, float range); private: - Threading::Mutex _reservedUnitsMutex; + std::mutex _reservedUnitsMutex; typedef std::set ReservedUnits; ReservedUnits _globallyReservedUnits; diff --git a/src/osgEarth/TerrainResources.cpp b/src/osgEarth/TerrainResources.cpp index 400ca03922..07dce762e6 100755 --- a/src/osgEarth/TerrainResources.cpp +++ b/src/osgEarth/TerrainResources.cpp @@ -27,21 +27,19 @@ using namespace osgEarth; #define LC "[TerrainResources] " -TerrainResources::TerrainResources() : - _reservedUnitsMutex("TerrainResources(OE)") +TerrainResources::TerrainResources() { // Unit 0 cannot be reserved _globallyReservedUnits.insert(0); } bool -TerrainResources::reserveTextureImageUnit(int& out_unit, - const char* requestor) +TerrainResources::reserveTextureImageUnit(int& out_unit, const char* requestor) { out_unit = -1; unsigned maxUnits = osgEarth::Registry::instance()->getCapabilities().getMaxGPUTextureUnits(); - Threading::ScopedMutexLock exclusiveLock( _reservedUnitsMutex ); + std::lock_guard exclusiveLock( _reservedUnitsMutex ); // first collect a list of units that are already in use. std::set taken; @@ -77,7 +75,7 @@ TerrainResources::reserveTextureImageUnit(TextureImageUnitReservation& reservati reservation._unit = -1; unsigned maxUnits = osgEarth::Registry::instance()->getCapabilities().getMaxGPUTextureUnits(); - Threading::ScopedMutexLock exclusiveLock( _reservedUnitsMutex ); + std::lock_guard exclusiveLock( _reservedUnitsMutex ); // first collect a list of units that are already in use. std::set taken; @@ -121,7 +119,7 @@ TerrainResources::reserveTextureImageUnitForLayer(TextureImageUnitReservation& r reservation._unit = -1; unsigned maxUnits = osgEarth::Registry::instance()->getCapabilities().getMaxGPUTextureUnits(); - Threading::ScopedMutexLock exclusiveLock( _reservedUnitsMutex ); + std::lock_guard exclusiveLock( _reservedUnitsMutex ); // first collect a list of units that are already in use. std::set taken; @@ -151,7 +149,7 @@ TerrainResources::reserveTextureImageUnitForLayer(TextureImageUnitReservation& r void TerrainResources::releaseTextureImageUnit(int unit) { - Threading::ScopedMutexLock exclusiveLock( _reservedUnitsMutex ); + std::lock_guard exclusiveLock( _reservedUnitsMutex ); _globallyReservedUnits.erase( unit ); OE_INFO << LC << "Texture unit " << unit << " released" << std::endl; } @@ -162,7 +160,7 @@ TerrainResources::releaseTextureImageUnit(int unit, const Layer* layer) if (layer == 0L) releaseTextureImageUnit(unit); - Threading::ScopedMutexLock exclusiveLock( _reservedUnitsMutex ); + std::lock_guard exclusiveLock( _reservedUnitsMutex ); PerLayerReservedUnits::iterator i = _perLayerReservedUnits.find(layer); if (i != _perLayerReservedUnits.end()) { @@ -182,7 +180,7 @@ TerrainResources::releaseTextureImageUnit(int unit, const Layer* layer) bool TerrainResources::setTextureImageUnitOffLimits(int unit) { - Threading::ScopedMutexLock exclusiveLock( _reservedUnitsMutex ); + std::lock_guard exclusiveLock( _reservedUnitsMutex ); // Make sure it's not already reserved: if (_globallyReservedUnits.find(unit) != _globallyReservedUnits.end()) diff --git a/src/osgEarth/Text.cpp b/src/osgEarth/Text.cpp index cf3b984006..c467b3bf55 100644 --- a/src/osgEarth/Text.cpp +++ b/src/osgEarth/Text.cpp @@ -173,8 +173,8 @@ Text::createStateSet() // The remaining of this method is exclusive so we don't corrupt the // stateset cache when creating text objects from multiple threads. -gw - static Threading::Mutex mutex(OE_MUTEX_NAME); - Threading::ScopedMutexLock lock(mutex); + static std::mutex mutex; + std::lock_guard lock(mutex); if (!statesets.empty()) { @@ -237,8 +237,8 @@ Text::setFont(osg::ref_ptr font) #if OSG_VERSION_GREATER_OR_EQUAL(3,5,8) osgText::Text::setFont(font); #else - static Threading::Mutex mutex(OE_MUTEX_NAME); - Threading::ScopedMutexLock lock(mutex); + static std::mutex mutex(OE_MUTEX_NAME); + std::lock_guard lock(mutex); osg::StateSet* previousFontStateSet = _font.valid() ? _font->getStateSet() : osgText::Font::getDefaultFont()->getStateSet(); osg::StateSet* newFontStateSet = font.valid() ? font->getStateSet() : osgText::Font::getDefaultFont()->getStateSet(); diff --git a/src/osgEarth/TextureArena.cpp b/src/osgEarth/TextureArena.cpp index ef24268bd9..f75a14d876 100644 --- a/src/osgEarth/TextureArena.cpp +++ b/src/osgEarth/TextureArena.cpp @@ -579,14 +579,14 @@ TextureArena::find_no_lock(Texture::Ptr tex) const int TextureArena::find(Texture::Ptr tex) const { - ScopedMutexLock lock(_m); + std::lock_guard lock(_m); return find_no_lock(tex); } Texture::Ptr TextureArena::find(unsigned index) const { - ScopedMutexLock lock(_m); + std::lock_guard lock(_m); if (index >= _textures.size()) return nullptr; @@ -604,7 +604,7 @@ TextureArena::add(Texture::Ptr tex, const osgDB::Options* readOptions) // Lock the respository - we do that early because if you have multiple // views/gcs, it's very possible that both will try to add the same // texture in parallel. - ScopedMutexLock lock(_m); + std::lock_guard lock(_m); // First check whether it's already there; if so, return the index. int existingIndex = find_no_lock(tex); @@ -785,7 +785,7 @@ TextureArena::update(osg::NodeVisitor& nv) OE_PROFILING_ZONE_NAMED("update/autorelease"); - ScopedMutexLock lock(_m); + std::lock_guard lock(_m); if (_textures.empty()) return; @@ -807,7 +807,7 @@ TextureArena::flush() OE_PROFILING_ZONE_NAMED("flush"); - ScopedMutexLock lock(_m); + std::lock_guard lock(_m); for (unsigned i = 0; i < _textures.size(); ++i) { @@ -821,7 +821,7 @@ TextureArena::apply(osg::State& state) const if (_textures.empty()) return; - ScopedMutexLock lock(_m); + std::lock_guard lock(_m); OE_PROFILING_ZONE; @@ -980,7 +980,7 @@ TextureArena::apply(osg::State& state) const void TextureArena::notifyOfTextureRelease(osg::State* state) const { - ScopedMutexLock lock(_m); + std::lock_guard lock(_m); if (state) { @@ -1007,7 +1007,7 @@ TextureArena::compileGLObjects(osg::State& state) const void TextureArena::resizeGLObjectBuffers(unsigned maxSize) { - ScopedMutexLock lock(_m); + std::lock_guard lock(_m); if (_globjects.size() < maxSize) { @@ -1030,7 +1030,7 @@ TextureArena::releaseGLObjects(osg::State* state) const void TextureArena::releaseGLObjects(osg::State* state, bool force) const { - ScopedMutexLock lock(_m); + std::lock_guard lock(_m); //OE_DEVEL << LC << "releaseGLObjects on arena " << getName() << std::endl; diff --git a/src/osgEarth/Threading b/src/osgEarth/Threading index 6e1abd34e5..08852ddd9d 100644 --- a/src/osgEarth/Threading +++ b/src/osgEarth/Threading @@ -16,1103 +16,238 @@ * You should have received a copy of the GNU Lesser General Public License * along with this program. If not, see */ -#ifndef OSGEARTH_THREADING_UTILS_H -#define OSGEARTH_THREADING_UTILS_H 1 +#pragma once +#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include +// bring in weejobs in the jobs namespace +#define WEEJOBS_EXPORT OSGEARTH_EXPORT +#include -// to include the file and line as the mutex name -#define OE_MUTEX_NAME __FILE__ ":" OE_STRINGIFY(__LINE__) - -// uncomment to activate mutex contention tracking when profiling -// #define OSGEARTH_MUTEX_CONTENTION_TRACKING - -namespace osgEarth { namespace Threading +namespace osgEarth { - //! C++ BasicLockable requirement - class BasicLockable - { - public: - virtual void lock() =0; - virtual void unlock() = 0; - }; - - //! C++ Lockable requirement - class Lockable : public BasicLockable - { - public: - virtual bool try_lock() =0; - }; - -#ifdef OSGEARTH_MUTEX_CONTENTION_TRACKING - - /** - * A normal mutex that supports contention tracking - */ - class OSGEARTH_EXPORT Mutex : public Lockable - { - public: - Mutex(); - ~Mutex(); - - //! Explicitly block copying of mutexes - Mutex(const Mutex& copy) = delete; - Mutex& operator=(const Mutex& copy) = delete; - - void lock() override; - void unlock() override; - bool try_lock() override; - - // Methods for metrics-enabled mutexes - Mutex(const std::string& name, const char* file = nullptr, std::uint32_t line = 0); - void setName(const std::string& name); - - private: - std::string _name; - void* _handle; - void* _metricsData; - }; - - /** - * A recursive mutex that supports contention tracking - */ - class OSGEARTH_EXPORT RecursiveMutex : public Lockable - { - public: - RecursiveMutex(); - RecursiveMutex(const std::string& name, const char* file = nullptr, std::uint32_t line = 0); - ~RecursiveMutex(); - - //! Enable or disable this mutex. Don't call this while threads are running. - void disable(); - - void lock() override; - void unlock() override; - bool try_lock() override; - - void setName(const std::string& name); - - private: - bool _enabled; - std::string _name; - void* _handle; - void* _metricsData; - }; - -#else - - /** - * Standard mutex that implements Lockable - */ - class Mutex : public Lockable, public std::mutex - { - public: - Mutex() {} - - Mutex(const Mutex& copy) = delete; - Mutex& operator=(const Mutex& copy) = delete; - - void lock() override { std::mutex::lock(); } - void unlock() override { std::mutex::unlock(); } - bool try_lock() override { return std::mutex::try_lock(); } - - // stub out the naming stuff so the API is compatible with - // the tracking mutex - Mutex(const std::string& name, const char* file = nullptr, std::uint32_t line = 0) { } - void setName(const std::string& name) { } - }; - - /** - * Recursive mutex that implements Lockable - */ - class RecursiveMutex : public Lockable, std::recursive_mutex - { - public: - RecursiveMutex() { } - - RecursiveMutex(const RecursiveMutex& copy) = delete; - RecursiveMutex& operator=(const RecursiveMutex& copy) = delete; - - void lock() override { std::recursive_mutex::lock(); } - void unlock() override { std::recursive_mutex::unlock(); } - bool try_lock() override { return std::recursive_mutex::try_lock(); } - - // stub out the naming stuff so the API is compatible with - // the tracking mutex - RecursiveMutex(const std::string& name, const char* file = nullptr, std::uint32_t line = 0) { } - void setName(const std::string& name) { } - }; - -#endif - - //! Locks a mutex for the duration of the scope - using ScopedMutexLock = std::lock_guard; - using ScopedLock = ScopedMutexLock; - - struct ScopedMutexLockIf { - ScopedMutexLockIf(BasicLockable& lock, bool condition) : _lock(lock), _condition(condition) { - if (_condition) _lock.lock(); - } - ~ScopedMutexLockIf() { - if (_condition) _lock.unlock(); - } - BasicLockable& _lock; - bool _condition; - }; - using ScopedLockIf = ScopedMutexLockIf; - - //! Locks a recursive mutex for the duration of the scope - using ScopedRecursiveMutexLock = std::lock_guard; - using ScopedRecursiveLock = ScopedRecursiveMutexLock; - - /** - * Gets the approximate number of available threading contexts. - * Result is guaranteed to be greater than zero - */ - extern OSGEARTH_EXPORT unsigned getConcurrency(); - - /** - * Gets the unique ID of the running thread. - */ - extern OSGEARTH_EXPORT unsigned getCurrentThreadId(); - /** * Pure interface for an object that can be canceled. */ - class Cancelable - { - public: - virtual bool isCanceled() const = 0; - bool canceled() const { return isCanceled(); } - }; - - /** - * Event with a binary signaled state, for multi-threaded sychronization. - * - * The event has two states: - * "set" means that a call to wait() will not block; - * "unset" means that calls to wait() will block until another thread calls set(). - * - * The event starts out unset. - * - * Typical usage: Thread A creates Thread B to run asynchronous code. Thread A - * then calls wait(), which blocks Thread A. When Thread B is finished, it calls - * set(). Thread A then wakes up and continues execution. - * - * NOTE: ALL waiting threads will wake up when the Event is cleared. - */ - class OSGEARTH_EXPORT Event - { - public: - //! Construct a new event - Event() : _set(false) { } - - //! DTOR - ~Event() { - _set = false; - for (int i = 0; i < 255; ++i) // workaround buggy broadcast - _cond.notify_all(); - } - - //! Block until the event is set, then return true. - inline bool wait() { - while (!_set) { - std::unique_lock lock(_m); - if (!_set) - _cond.wait(lock); - } - return _set; - } - - //! Block until the event is set or the timout expires. - //! Return true if the event has set, otherwise false. - template - inline bool wait(T timeout) { - if (!_set) { - std::unique_lock lock(_m); - if (!_set) - _cond.wait_for(lock, timeout); - } - return _set; - } - - //! Block until the event is set; then reset it. - inline bool waitAndReset() { - std::unique_lock lock(_m); - if (!_set) - _cond.wait(lock); - _set = false; - return true; - } - - //! Set the event state, causing any waiters to unblock. - inline void set() { - if (!_set) { - std::unique_lock lock(_m); - if (!_set) { - _set = true; - _cond.notify_all(); - } - } - } - - //! Reset (unset) the event state; new waiters will block until set() is called. - inline void reset() { - std::unique_lock lock(_m); - _set = false; - } - - //! Whether the event state is set (waiters will not block). - inline bool isSet() const { - return _set; - } + using Cancelable = WEEJOBS_NAMESPACE::cancelable; - protected: - std::mutex _m; // do not use Mutex, we never want tracking - std::condition_variable_any _cond; - bool _set; - }; + //! Sets the name of the curent thread + extern OSGEARTH_EXPORT void setThreadName(const std::string& name); - /** - * Future holds the future result of an asynchronous operation. - * - * Usage: - * Producer (usually an asynchronous function call) creates a Future - * (the promise of a future result) and immediately returns it. The Consumer - * then performs other work, and eventually (or immediately) checks available() - * for a result or canceled() for cancelation. If availabile() is true, - * Consumer calls value() to fetch the valid result. - * - * As long as at least two equivalent Future object (i.e. Futures pointing to the - * same internal shared data) exist, the Future is considered valid. Once - * that count goes to one, the Future is either available (the value is ready) - * or empty (i.e., canceled or abandoned). - */ - template - class Future : public Cancelable + namespace Threading { - private: - // internal structure to track references to the result - // One instance of this is shared among all Future instances - // created from the copy constructor. - struct Shared - { - T _obj; - mutable Event _ev; - }; - - public: - //! Blank CTOR - Future() { - _shared = std::make_shared(); - } - - Future(const Future& rhs) = default; - - //! True is this Future is unused and not connected to any other Future - bool empty() const { - return !available() && _shared.use_count() == 1; - } + // backwards compatibility typedefs. + using Mutex = std::mutex; + using RecursiveMutex = std::recursive_mutex; + using ScopedMutexLock = std::lock_guard; + using ScopedRecursiveMutexLock = std::lock_guard; - //! True if the promise was resolved and a result if available. - bool available() const { - return _shared->_ev.isSet(); - } + template using Future = typename WEEJOBS_NAMESPACE::future; - //! True if a promise exists, but has not yet been resolved; - //! Presumably the asynchronous task is still working. - bool working() const { - return !empty() && !available(); - } + using Event = jobs::detail::event; - //! Synonym for empty() - Cancelable interface - bool isCanceled() const override { - return empty(); - } - bool canceled() const { - return empty(); - } + // C++17 typedefs - for now we need to support C++11 so cannot use these yet. + //using ReadWriteMutex = std::shared_mutex; + //using ScopedReadLock = std::shared_lock; + //using ScopedWriteLock = std::unique_lock; + //using ScopedReadLock = ScopedRead; + //using ScopedWriteLock = ScopedWrite; + //using ScopedRecursiveReadLock = ScopedRead; + //using ScopedRecursiveWriteLock = ScopedWrite; - //! Deference the result object. Make sure you check available() - //! to check that the future was actually resolved; otherwise you - //! will just get the default object. - const T& value() const { - return _shared->_obj; - } - - //! Dereference this object to const pointer to the result. - const T* operator -> () const { - return &_shared->_obj; - } - - //! Same as value(), but if the result is available will reset the - //! future before returning the result object. - T release() { - bool avail = available(); - T result = value(); - if (avail) - reset(); - return result; - } - - //! Blocks until the result becomes available or the future is abandoned; - //! then returns the result object. - T join() const { - while ( - !empty() && - !_shared->_ev.wait(std::chrono::milliseconds(1))); - return value(); - } + /** + * Mutex that allows many simultaneous readers but only one writer + */ + template class ReadWrite + { + public: + ReadWrite() : + _writers(0u), _readers(0u) { } - //! Blocks until the result becomes available or the future is abandoned - //! or a cancelation flag is set; then returns the result object. - T join(const Cancelable& p) const { - while (working() && !p.canceled()) - { - _shared->_ev.wait(std::chrono::milliseconds(1)); + void read_lock() { + std::unique_lock lock(_m); + _unlocked.wait(lock, [&]() { return _writers == 0; }); + ++_readers; } - return value(); - } - //! Blocks until the result becomes available or the future is abandoned - //! or a cancelation flag is set; then returns the result object. - T join(Cancelable* p) const { - while (working() && (p == nullptr || !p->canceled())) - { - _shared->_ev.wait(std::chrono::milliseconds(1)); + void read_unlock() { + std::unique_lock lock(_m); + --_readers; + if (_readers == 0) + _unlocked.notify_one(); } - return value(); - } - - //! Release reference to a promise, resetting this future to its default state - void abandon() { - _shared.reset(new Shared()); - } - - //! synonym for abandon. - void reset() { - abandon(); - } - - //! Resolve (fulfill) the promise with the provided result value. - void resolve(const T& value) { - _shared->_obj = value; - _shared->_ev.set(); - } - - //! Resolve (fulfill) the promise with an rvalue - void resolve(T&& value) { - _shared->_obj = std::move(value); - _shared->_ev.set(); - } - - //! Resolve (fulfill) the promise with a default result - void resolve() { - _shared->_ev.set(); - } - - //! The number of objects, including this one, that - //! reference the shared container. If this method - //! returns 1, that means this is the only object with - //! access to the data. This method will never return zero. - unsigned refs() const { - return _shared.use_count(); - } - - private: - std::shared_ptr _shared; - }; - - template using Promise = Future; - - /** - * Convenience base class for representing a Result object that may be - * synchronous or asynchronous, depending on which constructor you use. - */ - template - class FutureResult - { - public: - bool isReady() const { - return _future.available() || _future.empty(); - } - - bool isWorking() const { - return !_future.available() && !_future.empty(); - } - - protected: - //! Asynchronous constructor - FutureResult(Future f) : _future(f) { } - - //! Immediate synchronous resolve constructor - FutureResult(const T& data) { - Promise p; - _future = p; // .getFuture(); - p.resolve(data); - } - Future _future; - }; - - - /** - * Mutex that locks on a per-object basis - */ - template - class Gate - { - public: - Gate() { } - - Gate(const std::string& name) : _m(name) { } - - inline void lock(const T& key) { - std::unique_lock lock(_m); - auto thread_id = getCurrentThreadId(); - for (;;) { - auto i = _keys.emplace(key, thread_id); - if (i.second) - return; - OE_HARD_ASSERT(i.first->second != thread_id, "Recursive Gate access attempt"); - _unlocked.wait(lock); + void read_lock(std::function f) { + read_lock(); + f(); + read_unlock(); } - } - - inline void unlock(const T& key) { - std::unique_lock lock(_m); - _keys.erase(key); - _unlocked.notify_all(); - } - - inline void setName(const std::string& name) { - _m.setName(name); - } - - private: - Mutex _m; - std::condition_variable_any _unlocked; - std::unordered_map _keys; - }; - - //! Gate the locks for the duration of this object's scope - template - struct ScopedGate { - public: - //! Lock a gate based on key "key" - ScopedGate(Gate& gate, const T& key) : - _gate(gate), - _key(key), - _active(true) - { - _gate.lock(key); - } - - //! Lock a gate based on key "key" IFF the predicate is true, - //! else it's a nop. - ScopedGate(Gate& gate, const T& key, std::function pred) : - _gate(gate), - _key(key), - _active(pred()) - { - if (_active) - _gate.lock(_key); - } - - //! End-of-scope destructor unlocks the gate - ~ScopedGate() - { - if (_active) - _gate.unlock(_key); - } - - private: - Gate& _gate; - T _key; - bool _active; - }; - - /** - * Mutex that allows many simultaneous readers but only one writer - */ - template - class ReadWrite - { - public: - ReadWrite() : - _writers(0u), _readers(0u) { } - - ReadWrite(const std::string& name) : - _m(name), _writers(0u), _readers(0u) { } - void read_lock() { - std::unique_lock lock(_m); - _unlocked.wait(lock, [&]() { return _writers == 0; }); - ++_readers; - } + void write_lock() { + std::unique_lock lock(_m); + _unlocked.wait(lock, [&]() { return _writers == 0 && _readers == 0; }); + ++_writers; + } - void read_unlock() { - std::unique_lock lock(_m); - --_readers; - if (_readers == 0) + void write_unlock() { + std::unique_lock lock(_m); + _writers = 0; _unlocked.notify_one(); - } - - void read_lock(std::function f) { - read_lock(); - f(); - read_unlock(); - } - - void write_lock() { - std::unique_lock lock(_m); - _unlocked.wait(lock, [&]() { return _writers == 0 && _readers == 0; }); - ++_writers; - } - - void write_unlock() { - std::unique_lock lock(_m); - _writers = 0; - _unlocked.notify_one(); - } - - void write_lock(std::function f) { - write_lock(); - f(); - write_unlock(); - } - - void upgrade_to_write_lock() { - std::unique_lock lock(_m); - _unlocked.wait(lock, [&]() { return _writers == 0 && _readers == 1; }); - --_readers; - ++_writers; - } - - void setName(const std::string& name) { - _m.setName(name); - } - - private: - T _m; - std::condition_variable_any _unlocked; - unsigned _writers; - unsigned _readers; - }; - - template - struct ScopedWrite { - ScopedWrite( ReadWrite& lock ) : _lock(lock) { _lock.write_lock(); } - ~ScopedWrite() { _lock.write_unlock(); } - private: - ReadWrite& _lock; - }; + } - template - struct ScopedRead { - ScopedRead( ReadWrite& lock ) : _lock(lock) { _lock.read_lock(); } - void upgradeToWriteLock() { _lock.upgrade_to_write_lock(); _upgraded = true; } - ~ScopedRead() { - if (_upgraded) - _lock.write_unlock(); - else - _lock.read_unlock(); - } - private: - ReadWrite& _lock; - bool _upgraded = false; - }; + void write_lock(std::function f) { + write_lock(); + f(); + write_unlock(); + } - typedef ReadWrite ReadWriteMutex; - typedef ReadWrite ReadWriteRecursiveMutex; - typedef ScopedRead ScopedReadLock; - typedef ScopedWrite ScopedWriteLock; - typedef ScopedRead ScopedRecursiveReadLock; - typedef ScopedWrite ScopedRecursiveWriteLock; + void upgrade_to_write_lock() { + std::unique_lock lock(_m); + _unlocked.wait(lock, [&]() { return _writers == 0 && _readers == 1; }); + --_readers; + ++_writers; + } - /** - * Simple convenience construct to make another type "lockable" - * as long as it has a default constructor - */ - template - struct Mutexed : public T, public BasicLockable { - Mutexed() : T() { } - Mutexed(const std::string& name) : _lockable_mutex(name), T() { } - void setName(const std::string& name) { _lockable_mutex.setName(name); } - void lock() { _lockable_mutex.lock(); } - void lock() const { _lockable_mutex.lock(); } - void unlock() { _lockable_mutex.unlock(); } - void unlock() const { _lockable_mutex.unlock(); } - void lock(std::function func) { lock(); func(); unlock(); } - void scoped_lock(std::function func) { lock(); func(); unlock(); } - Mutex& mutex() const { return _lockable_mutex; } - T& operator = (const T& rhs) { return T::operator=(rhs); } - T& operator = (const T&& rhs) { return T::operator=(rhs); } - private: - mutable Mutex _lockable_mutex; - }; + private: + T _m; + std::condition_variable_any _unlocked; + unsigned _writers; + unsigned _readers; + }; + template + struct ScopedWrite { + ScopedWrite(ReadWrite& lock) : _lock(lock) { _lock.write_lock(); } + ~ScopedWrite() { _lock.write_unlock(); } + private: + ReadWrite& _lock; + }; - /** - * Simple atomic counter that increments an atomic - * when entering a scope and decrements it upon exiting the scope - */ - struct ScopedAtomicCounter - { - std::atomic_int& _a; - ScopedAtomicCounter(std::atomic_int& a) : _a(a) { ++_a; } - ~ScopedAtomicCounter() { --_a; } - }; + template + struct ScopedRead { + ScopedRead(ReadWrite& lock) : _lock(lock) { _lock.read_lock(); } + void upgradeToWriteLock() { _lock.upgrade_to_write_lock(); _upgraded = true; } + ~ScopedRead() { + if (_upgraded) + _lock.write_unlock(); + else + _lock.read_unlock(); + } + private: + ReadWrite& _lock; + bool _upgraded = false; + }; - //! Sets the name of the curent thread - extern OSGEARTH_EXPORT void setThreadName(const std::string& name); + using ReadWriteMutex = ReadWrite; + using ReadWriteRecursiveMutex = ReadWrite; + using ScopedReadLock = ScopedRead; + using ScopedWriteLock = ScopedWrite; + using ScopedRecursiveReadLock = ScopedRead; + using ScopedRecursiveWriteLock = ScopedWrite; - //! Sets the thread name with details when scoped - struct ScopedThreadName - { - std::string _base; - ScopedThreadName(const std::string& base, const std::string& detail) : - _base(base) - { - setThreadName(base + "(" + detail + ")"); - } - ~ScopedThreadName() + /** + * Mutex that locks on a per-object basis + */ + template + class Gate { - setThreadName(_base); - } - }; - - /** - * Sempahore lets N users aquire it and then notifies when the - * count goes back down to zero. - */ - class Semaphore - { - public: - //! Construct a semaphore - Semaphore(); - - //! Construct a named semaphore - Semaphore(const std::string& name); - - //! Acquire, increasing the usage count by one - void acquire(); - - //! Release, decreasing the usage count by one. - //! When the count reaches zero, joiners will be notified and - //! the semaphore will reset to its initial state. - void release(); - - //! Reset to initialize state; this will cause a join to occur - //! even if no acquisitions have taken place. - void reset(); - - //! Current count in the semaphore - std::size_t count() const; - - //! Block until the semaphore count returns to zero. - //! (It must first have left zero) - //! Warning: this method will block forever if the count - //! never reaches zero! - void join(); - - //! Block until the semaphore count returns to zero, or - //! the operation is canceled. - //! (It must first have left zero) - void join(Cancelable* cancelable); - - private: - int _count; - std::condition_variable_any _cv; - mutable Mutex _m; - }; - - class JobArena; - - /** - * A job group. Dispatch jobs along with a group, and you - * can then wait on the entire group to finish. - */ - class OSGEARTH_EXPORT JobGroup - { - public: - //! Construct a new job group - JobGroup(); - - //! Construct a new named job group - JobGroup(const std::string& name); - - //! Block until all jobs dispatched under this group are complete. - void join(); - - //! Block until all jobs dispatched under this group are complete, - //! or the operation is canceled. - void join(Cancelable*); - - private: - std::shared_ptr _sema; - friend class JobArena; - }; - - /** - * API for scheduling a task to run in the background. - * - * Example usage: - * - * int a = 10, b = 20; - * - * Job job; - * - * Future result = job.dispatch( - * [a, b](Cancelable* progress) { - * return (a + b); - * } - * ); - * - * // later... - * - * if (result.isAvailable()) { - * std::cout << "Answer = " << result.get() << std::endl; - * } - * else if (result.isAbandoned()) { - * // task was canceled - * } - * - * @notes Once you call dispatch, you can discard the job, - * or you can keep it around and dispatch it again later. - * Any changes you make to a Job after dispatch will - * not affect the already-dispatched Job. - */ - class Job - { - public: - - //! Construct a new blank job - Job() : - _priority(0.0f), _arena(nullptr), _group(nullptr) { } - - //! Construct a new job in the specified arena - Job(JobArena* arena) : - _priority(0.0f), _arena(arena), _group(nullptr) { } - - //! Construct a new job in the specified arena and job group - Job(JobArena* arena, JobGroup* group) : - _priority(0.0f), _arena(arena), _group(group) { } - - //! Name of this job - void setName(const std::string& value) { - _name = value; - } - const std::string& getName() const { - return _name; - } - - //! Set the job arena in which to run this Job. - //void setArena(JobArena* arena) { - // _arena = arena; - //} - void setArena(const std::string& value) { - _arenaName = value; - } - - //! Static priority - void setPriority(float value) { - _priority = value; - } - - //! Function to call to get this job's priority - void setPriorityFunction(const std::function& func) { - _priorityFunc = func; - } - - //! Get the priority of this job - float getPriority() const { - return _priorityFunc != nullptr ? _priorityFunc() : _priority; - } - - //! Assign this job to a group - void setGroup(JobGroup* group) { - _group = group; - } - JobGroup* getGroup() const { - return _group; - } - - //! Dispatch this job for asynchronous execution. - //! @func Function to execute - //! @return Future result. If this objects goes out of scope, - //! the job will be canceled and may not run at all. - template::type> - Future dispatch(FUNC function) const; - - //! Dispatch the job for asynchronous execution and forget - //! about it. No return value. - template - void dispatch_and_forget(FUNC function) const; + public: + Gate() { } - private: - std::string _name; - float _priority; - std::string _arenaName; - JobArena* _arena; - JobGroup* _group; - std::function _priorityFunc; - friend class JobArena; - }; + inline void lock(const T& key) { + std::unique_lock lock(_m); + auto thread_id = std::this_thread::get_id(); + for (;;) { + auto i = _keys.emplace(key, thread_id); + if (i.second) + return; + OE_HARD_ASSERT(i.first->second != thread_id, "Recursive Gate access attempt"); + _unlocked.wait(lock); + } + } + inline void unlock(const T& key) { + std::unique_lock lock(_m); + _keys.erase(key); + _unlocked.notify_all(); + } - /** - * Schedules asynchronous tasks on a thread pool. - * You usually don't need to use this class directly. - * Use Job::schedule() to queue a new job. - */ - class OSGEARTH_EXPORT JobArena - { - public: - //! Type of Job Arena (thread pool versus traversal) - enum Type - { - THREAD_POOL, - UPDATE_TRAVERSAL + private: + std::mutex _m; + std::condition_variable_any _unlocked; + std::unordered_map _keys; }; - //! Construct a new JobArena - JobArena( - const std::string& name = "", - unsigned concurrency = 2u, - const Type& type = THREAD_POOL); - - //! Destroy - ~JobArena(); - - //! Set the concurrency of this job arena - //! (Only applies to THREAD_POOL type arenas) - void setConcurrency(unsigned value); - - //! Get the target concurrency (thread count) - unsigned getConcurrency() const; - - //! Discard all queued jobs. The arena will continue to run and - //! to accept new job. - void cancelAllPendingJobs(); - - //! Stop all thread pools across all arenas. All arenas will stop - //! running their threads and will not accept any more jobs. - static void stopAllThreads(); - - public: // statics - - //! Access a named arena - static JobArena* get(const std::string& name); - - //! Access the first arena of type "type". Typically use this to - //! access an UPDATE_TRAVERSAL arena singleton. - static JobArena* get(const Type& type); - - //! Sets the concurrency of a named arena - static void setConcurrency(const std::string& name, unsigned value); - - //! Name of the arena to use when none is specified - static const std::string& defaultArenaName(); - - //! Whether the job system is alive; this is true until you call stopAllThreads(). - static bool alive(); - - public: - - /** - * Reflects the current state of the JobArena system - * This structure is designed to be accessed atomically - * with no lock contention - */ - class OSGEARTH_EXPORT Metrics + //! Gate the locks for the duration of this object's scope + template + struct ScopedGate { public: - //! Per-arena metrics. - struct Arena + //! Lock a gate based on key "key" + ScopedGate(Gate& gate, const T& key) : + _gate(gate), + _key(key), + _active(true) { - using Ptr = std::shared_ptr; - std::string arenaName; - std::atomic concurrency; - std::atomic numJobsPending; - std::atomic numJobsRunning; - std::atomic numJobsCanceled; - - Arena() : concurrency(0), numJobsPending(0), numJobsRunning(0), numJobsCanceled(0) { } - }; + _gate.lock(key); + } - //! Report sent to the user reporting function if set - struct Report + //! Lock a gate based on key "key" IFF the predicate is true, + //! else it's a nop. + ScopedGate(Gate& gate, const T& key, std::function pred) : + _gate(gate), + _key(key), + _active(pred()) { - Report(const Job& job_, const std::string& arena_, const std::chrono::steady_clock::duration& duration_) - : job(job_), arena(arena_), duration(duration_) { } - const Job& job; - std::string arena; - std::chrono::steady_clock::duration duration; - }; - - std::atomic maxArenaIndex; - - //! create a new arena and return its index - Arena::Ptr getOrCreate(const std::string& name); - - //! metrics about the arena at index "index" - const Arena::Ptr arena(int index) const; - - //! Total number of pending jobs across all arenas - int totalJobsPending() const; - - //! Total number of running jobs across all arenas - int totalJobsRunning() const; - - //! Total number of canceled jobs across all arenas - int totalJobsCanceled() const; - - //! Total number of active jobs in the system - int totalJobs() const { - return totalJobsPending() + totalJobsRunning(); + if (_active) + _gate.lock(_key); } - //! Set a user reporting function and threshold - void setReportFunction( - std::function function, - std::chrono::steady_clock::duration minDuration = std::chrono::steady_clock::duration(0)) + //! End-of-scope destructor unlocks the gate + ~ScopedGate() { - _report = function; - _reportMinDuration = minDuration; + if (_active) + _gate.unlock(_key); } private: - Metrics(); - std::vector _arenas; - std::function _report; - std::chrono::steady_clock::duration _reportMinDuration; - friend class JobArena; + Gate& _gate; + T _key; + bool _active; }; - //! Access to the system-wide job arena metrics - static Metrics& allMetrics() { return _allMetrics; } - - //! Metrics object for thie arena - Metrics::Arena::Ptr metrics() { return _metrics; } - - - public: // INTERNAL - - //! Run one or more pending jobs. - //! Internal function - do not call this directly. - void runJobs(); - - private: - - void startThreads(); - - void stopThreads(); - - void joinThreads(); - - //static void shutdownAll(); - - using Delegate = std::function; - - //! Schedule an asynchronous task on this arena. - //! Use Job::dispatch to run jobs (no need to call this directly) - //! @param job Job details - //! @param delegate Function to execute - void dispatch(const Job& job, Delegate& delegate); - - struct QueuedJob { - QueuedJob() { } - QueuedJob(const Job& job, const Delegate& delegate, std::shared_ptr sema) : - _job(job), _delegate(delegate), _groupsema(sema) { } - Job _job; - Delegate _delegate; - std::shared_ptr _groupsema; - bool operator < (const QueuedJob& rhs) const { - return _job.getPriority() < rhs._job.getPriority(); - } + /** + * Simple convenience construct to make another type "lockable" + * as long as it has a default constructor + */ + template + struct Mutexed : public T + { + Mutexed() : T() { } + void lock() { _lockable_mutex.lock(); } + void lock() const { _lockable_mutex.lock(); } + void unlock() { _lockable_mutex.unlock(); } + void unlock() const { _lockable_mutex.unlock(); } + void lock(std::function func) { lock(); func(); unlock(); } + void scoped_lock(std::function func) { lock(); func(); unlock(); } + MUTEX& mutex() const { return _lockable_mutex; } + T& operator = (const T& rhs) { return T::operator=(rhs); } + T& operator = (const T&& rhs) { return T::operator=(rhs); } + private: + mutable MUTEX _lockable_mutex; }; - // pool name - std::string _name; - // type of arena - Type _type; - // queued operations to run asynchronously - using Queue = std::vector; - Queue _queue; - // protect access to the queue - mutable Mutex _queueMutex; - mutable Mutex _quitMutex; - // target number of concurrent threads in the pool - std::atomic _targetConcurrency; - // thread waiter block - std::condition_variable_any _block; - // set to true when threads should exit - bool _done; - // threads in the pool - std::vector _threads; - // pointer to the stats structure for this arena - Metrics::Arena::Ptr _metrics; - - static ReadWriteMutex _arenas_mutex; - static std::unordered_map _arenaSizes; - static std::unordered_map> _arenas; - static std::string _defaultArenaName; - static Metrics _allMetrics; - static bool _alive; - - friend class Job; // allow access to private dispatch method - }; - - template - Future Job::dispatch(FUNC function) const - { - Promise promise; - Future future = promise; - JobArena::Delegate delegate = [function, promise]() mutable - { - bool good = !promise.empty(); - if (good) - promise.resolve(function(&promise)); - return good; - }; - auto name = _arena ? _arena->_name : _arenaName; - JobArena::get(name)->dispatch(*this, delegate); - return std::move(future); - } - - template - void Job::dispatch_and_forget(FUNC function) const - { - auto name = _arena ? _arena->_name : _arenaName; - JobArena* arena = JobArena::get(name); - if (arena) + template + struct scoped_lock_if_base { - JobArena::Delegate delegate = [function]() - { - function(nullptr); - return true; - }; - arena->dispatch(*this, delegate); - } + scoped_lock_if_base(basic_lockable& lock, bool condition) : _lock(lock), _condition(condition) { + if (_condition) _lock.lock(); + } + ~scoped_lock_if_base() { + if (_condition) _lock.unlock(); + } + basic_lockable& _lock; + bool _condition; + }; + using scoped_lock_if = scoped_lock_if_base; } - -} } // namepsace osgEarth::Threading - -#define OE_THREAD_NAME(name) osgEarth::Threading::setThreadName(name); - -#define OE_SCOPED_THREAD_NAME(base,name) osgEarth::Threading::ScopedThreadName _scoped_threadName(base,name); - -#endif // OSGEARTH_THREADING_UTILS_H +} diff --git a/src/osgEarth/Threading.cpp b/src/osgEarth/Threading.cpp index 8652e624b8..16fe129648 100644 --- a/src/osgEarth/Threading.cpp +++ b/src/osgEarth/Threading.cpp @@ -17,353 +17,29 @@ * along with this program. If not, see */ #include "Threading" -#include "Utils" -#include "Metrics" #include #include -#include +#include #ifdef _WIN32 # include -//# include #elif defined(__APPLE__) || defined(__LINUX__) || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__ANDROID__) # include # include # include #endif -// b/c windows defines override std:: functions -#undef min -#undef max +// define the threading library singleton +WEEJOBS_INSTANCE; -using namespace osgEarth::Threading; -using namespace osgEarth::Util; +using namespace osgEarth; -//................................................................... - -//#ifdef OSGEARTH_PROFILING -//#define MUTEX_TYPE tracy::Lockable -//#else -//#define MUTEX_TYPE std::recursive_mutex -//#endif - -#ifdef OSGEARTH_MUTEX_CONTENTION_TRACKING - -Mutex::Mutex() : - _handle(nullptr), - _metricsData(nullptr) -{ - if (Metrics::enabled()) - { - tracy::SourceLocationData* s = new tracy::SourceLocationData(); - s->name = nullptr; - s->function = "unnamed"; - s->file = __FILE__; - s->line = __LINE__; - s->color = 0; - _handle = new tracy::Lockable(s); - _metricsData = s; - } - else - { - _handle = new std::mutex(); - } -} - -Mutex::Mutex(const std::string& name, const char* file, std::uint32_t line) : - _name(name), - _handle(nullptr), - _metricsData(nullptr) -{ - if (Metrics::enabled()) - { - tracy::SourceLocationData* s = new tracy::SourceLocationData(); - s->name = nullptr; - s->function = _name.c_str(); - s->file = file; - s->line = line; - s->color = 0; - _handle = new tracy::Lockable(s); - _metricsData = s; - } - else - { - _handle = new std::mutex(); - } -} - -Mutex::~Mutex() -{ - if (_metricsData) - delete static_cast*>(_handle); - else - delete static_cast(_handle); -} - -void -Mutex::setName(const std::string& name) -{ - _name = name; - if (_metricsData) - { - tracy::SourceLocationData* s = static_cast(_metricsData); - s->function = _name.c_str(); - } -} - -void -Mutex::lock() -{ - //if (_name.empty()) { - // volatile int x =0 ; // breakpoint for finding unnamed mutexes - //} - - if (_metricsData) - static_cast*>(_handle)->lock(); - else - static_cast(_handle)->lock(); -} - -void -Mutex::unlock() -{ - if (_metricsData) - static_cast*>(_handle)->unlock(); - else - static_cast(_handle)->unlock(); -} - -bool -Mutex::try_lock() -{ - if (_metricsData) - return static_cast*>(_handle)->try_lock(); - else - return static_cast(_handle)->try_lock(); -} - -#endif // OSGEARTH_MUTEX_CONTENTION_TRACKING - -//................................................................... - -#ifdef OSGEARTH_MUTEX_CONTENTION_TRACKING - -RecursiveMutex::RecursiveMutex() : - _enabled(true), - _metricsData(nullptr) -{ - if (Metrics::enabled()) - { - tracy::SourceLocationData* s = new tracy::SourceLocationData(); - s->name = nullptr; - s->function = "unnamed recursive"; - s->file = __FILE__; - s->line = __LINE__; - s->color = 0; - _handle = new tracy::Lockable(s); - _metricsData = s; - } - else - { - _handle = new std::recursive_mutex(); - } -} - -RecursiveMutex::RecursiveMutex(const std::string& name, const char* file, std::uint32_t line) : - _name(name), - _enabled(true), - _metricsData(nullptr) -{ - if (Metrics::enabled()) - { - tracy::SourceLocationData* s = new tracy::SourceLocationData(); - s->name = nullptr; - s->function = _name.c_str(); - s->file = file; - s->line = line; - s->color = 0; - _handle = new tracy::Lockable(s); - _metricsData = s; - } - else - { - _handle = new std::recursive_mutex(); - } -} - -RecursiveMutex::~RecursiveMutex() -{ - if (_handle) - { - if (_metricsData) - delete static_cast*>(_handle); - else - delete static_cast(_handle); - } -} - -void -RecursiveMutex::disable() -{ - _enabled = false; -} - -void -RecursiveMutex::setName(const std::string& name) -{ - _name = name; - - if (_metricsData) - { - tracy::SourceLocationData* s = static_cast(_metricsData); - s->function = _name.c_str(); - } -} - -void -RecursiveMutex::lock() -{ - if (_enabled) - { - if (_metricsData) - static_cast*>(_handle)->lock(); - else - static_cast(_handle)->lock(); - } -} - -void -RecursiveMutex::unlock() -{ - if (_enabled) - { - if (_metricsData) - static_cast*>(_handle)->unlock(); - else - static_cast(_handle)->unlock(); - } -} - -bool -RecursiveMutex::try_lock() -{ - if (_enabled) - { - if (_metricsData) - return static_cast*>(_handle)->try_lock(); - else - return static_cast(_handle)->try_lock(); - } - else return true; -} - -#endif // OSGEARTH_MUTEX_CONTENTION_TRACKING - -//................................................................... - -unsigned osgEarth::Threading::getCurrentThreadId() -{ -#ifdef _WIN32 - return (unsigned)::GetCurrentThreadId(); -#elif __APPLE__ - return ::syscall(SYS_thread_selfid); -#elif __ANDROID__ - return gettid(); -#elif __LINUX__ - return (unsigned)::syscall(SYS_gettid); -#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) - long tid; - syscall(SYS_thr_self, &tid); - return (unsigned)tid; -#else - /* :XXX: this truncates to 32 bits, but better than nothing */ - return (unsigned)pthread_self(); -#endif -} - -unsigned osgEarth::Threading::getConcurrency() -{ - int value = std::thread::hardware_concurrency(); - return value > 0 ? (unsigned)value : 4u; -} - -//................................................................... - -#if 0 -Event::Event() : -_set(false) -{ - //nop -} - -Event::~Event() -{ - _set = false; - for(int i=0; i<255; ++i) // workaround buggy broadcast - _cond.notify_all(); -} - -bool Event::wait() -{ - while(!_set) - { - std::unique_lock lock(_m); - if (!_set) - _cond.wait(lock); - } - return _set; -} - -bool Event::wait(unsigned timeout_ms) -{ - if (!_set) - { - std::unique_lock lock(_m); - if (!_set) // double check - { - _cond.wait_for(lock, std::chrono::milliseconds(timeout_ms)); - } - } - return _set; -} - -bool Event::waitAndReset() -{ - std::unique_lock lock(_m); - if (!_set) - _cond.wait(lock); - _set = false; - return true; -} - -void Event::set() -{ - if (!_set) - { - std::unique_lock lock(_m); - if (!_set) { - _set = true; - _cond.notify_all(); - } - } -} - -void Event::reset() -{ - std::lock_guard lock(_m); - _set = false; -} -#endif - -void -osgEarth::Threading::setThreadName(const std::string& name) +void osgEarth::setThreadName(const std::string& name) { #if (defined _WIN32 && defined _WIN32_WINNT_WIN10 && defined _WIN32_WINNT && _WIN32_WINNT >= _WIN32_WINNT_WIN10) || (defined __CYGWIN__) wchar_t buf[256]; mbstowcs(buf, name.c_str(), 256); - //::SetThreadDescription(::GetCurrentThread(), buf); - // Look up the address of the SetThreadDescription function rather than using it directly. typedef ::HRESULT(WINAPI* SetThreadDescription)(::HANDLE hThread, ::PCWSTR lpThreadDescription); auto set_thread_description_func = reinterpret_cast(::GetProcAddress(::GetModuleHandle("Kernel32.dll"), "SetThreadDescription")); @@ -373,628 +49,17 @@ osgEarth::Threading::setThreadName(const std::string& name) } #elif defined _GNU_SOURCE && !defined __EMSCRIPTEN__ && !defined __CYGWIN__ - - const auto sz = strlen( name.c_str() ); - if( sz <= 15 ) + const auto sz = strlen(name.c_str()); + if (sz <= 15) { - pthread_setname_np( pthread_self(), name.c_str() ); + pthread_setname_np(pthread_self(), name.c_str()); } else { char buf[16]; - memcpy( buf, name.c_str(), 15 ); + memcpy(buf, name.c_str(), 15); buf[15] = '\0'; - pthread_setname_np( pthread_self(), buf ); - } -#endif -} - -#undef LC -#define LC "[Semaphore]" - -Semaphore::Semaphore() : - _count(0), - _m("oe.Semaphore") -{ - //nop -} - -Semaphore::Semaphore(const std::string& name) : - _count(0), - _m(name) -{ - //nop -} - -void -Semaphore::acquire() -{ - ScopedMutexLock lock(_m); - ++_count; -} - -void -Semaphore::release() -{ - ScopedMutexLock lock(_m); - _count = std::max(_count - 1, 0); - if (_count == 0) - _cv.notify_all(); -} - -void -Semaphore::reset() -{ - ScopedMutexLock lock(_m); - _count = 0; - _cv.notify_all(); -} - -std::size_t -Semaphore::count() const -{ - ScopedMutexLock lock(_m); - return _count; -} - -void -Semaphore::join() -{ - ScopedMutexLock lock(_m); - _cv.wait( - _m, - [this]() - { - return _count == 0; - } - ); -} - -void -Semaphore::join(Cancelable* cancelable) -{ - ScopedMutexLock lock(_m); - _cv.wait_for( - _m, - std::chrono::seconds(1), - [this, cancelable]() { - return - (_count == 0) || - (cancelable && cancelable->isCanceled()); - } - ); - _count = 0; -} - - -#undef LC -#define LC "[JobGroup]" - -JobGroup::JobGroup() : - _sema(std::make_shared()) -{ - //nop -} - -JobGroup::JobGroup(const std::string& name) : - _sema(std::make_shared(name)) -{ - //nop -} - -void -JobGroup::join() -{ - if (_sema != nullptr && _sema.use_count() > 1) - { - _sema->join(); - } -} - -void -JobGroup::join(Cancelable* cancelable) -{ - if (_sema != nullptr && _sema.use_count() > 1) - { - _sema->join(cancelable); - } -} - - -#undef LC -#define LC "[JobArena] " - -// JobArena statics: -ReadWriteMutex JobArena::_arenas_mutex("OE:JobArena"); -std::unordered_map> JobArena::_arenas; -std::unordered_map JobArena::_arenaSizes; -std::string JobArena::_defaultArenaName = "oe.general"; -JobArena::Metrics JobArena::_allMetrics; -bool JobArena::_alive = true; - -#define OE_ARENA_DEFAULT_SIZE 2u - -JobArena::JobArena(const std::string& name, unsigned concurrency, const Type& type) : - _name(name.empty()? defaultArenaName() : name), - _targetConcurrency(concurrency), - _type(type), - _done(false), - _queueMutex("OE.JobArena[" + _name + "].queue"), - _quitMutex("OE.JobArena[" + _name + "].quit") -{ - // find a slot in the stats - _metrics = _allMetrics.getOrCreate(_name); - - if (_type == THREAD_POOL) - { - startThreads(); + pthread_setname_np(pthread_self(), buf); } -} - -JobArena::~JobArena() -{ - if (_type == THREAD_POOL) - { - stopThreads(); - joinThreads(); - } -} - -bool -JobArena::alive() -{ - return _alive; -} - -const std::string& -JobArena::defaultArenaName() -{ - return _defaultArenaName; -} - -void -JobArena::stopAllThreads() -{ - _alive = false; - - ScopedReadLock lock(_arenas_mutex); - - for (auto iter : _arenas) - iter.second->stopThreads(); - - for (auto iter : _arenas) - iter.second->joinThreads(); -} - -JobArena* -JobArena::get(const std::string& name_) -{ - if (_arenas.empty()) - { - std::atexit(JobArena::stopAllThreads); - } - - std::string name(name_.empty() ? "oe.general" : name_); - - ScopedReadLock lock(_arenas_mutex); - - auto iter = _arenas.find(name); - if (iter != _arenas.end()) - { - return iter->second.get(); - } - - lock.upgradeToWriteLock(); - - std::shared_ptr& arena = _arenas[name]; - if (arena == nullptr) - { - auto iter = _arenaSizes.find(name); - unsigned numThreads = iter != _arenaSizes.end() ? iter->second : OE_ARENA_DEFAULT_SIZE; - - arena = std::make_shared(name, numThreads); - } - return arena.get(); -} - -#if 0 -JobArena* -JobArena::get(const Type& type_) -{ - if (type_ == THREAD_POOL) - { - return get("oe.general"); - } - - ScopedMutexLock lock(_arenas_mutex); - - if (_arenas.empty()) - { - std::atexit(JobArena::stopAllThreads); - } - - if (type_ == UPDATE_TRAVERSAL) - { - std::string name("oe.UPDATE"); - std::shared_ptr& arena = _arenas[name]; - if (arena == nullptr) - { - arena = std::make_shared(name, 0, type_); - } - return arena.get(); - } - - return nullptr; -} #endif - -unsigned -JobArena::getConcurrency() const -{ - return _targetConcurrency; -} - -void -JobArena::setConcurrency(unsigned value) -{ - value = std::max(value, 1u); - - if (_type == THREAD_POOL && _targetConcurrency != value) - { - _targetConcurrency = value; - startThreads(); - } -} - -void -JobArena::setConcurrency(const std::string& name, unsigned value) -{ - // this method exists so you can set an arena's concurrency - // before the arena is actually created - - value = std::max(value, 1u); - - ScopedReadLock lock(_arenas_mutex); - - if (_arenaSizes[name] != value) - { - _arenaSizes[name] = value; - - auto iter = _arenas.find(name); - if (iter != _arenas.end()) - { - std::shared_ptr arena = iter->second; - OE_SOFT_ASSERT_AND_RETURN(arena != nullptr, void()); - arena->setConcurrency(value); - } - } -} - -void -JobArena::cancelAllPendingJobs() -{ - std::lock_guard lock(_queueMutex); - _queue.clear(); - _metrics->numJobsCanceled += _metrics->numJobsPending; - _metrics->numJobsPending = 0; -} - -void -JobArena::dispatch(const Job& job, Delegate& delegate) -{ - // If we have a group semaphore, acquire it BEFORE queuing the job - JobGroup* group = job.getGroup(); - std::shared_ptr sema = group ? group->_sema : nullptr; - if (sema) - { - sema->acquire(); - } - - if (_type == THREAD_POOL) - { - if (_targetConcurrency > 0) - { - std::lock_guard lock(_queueMutex); - _queue.emplace_back(job, delegate, sema); - _metrics->numJobsPending++; - _block.notify_one(); - } - else - { - // no threads? run synchronously. - delegate(); - - if (sema) - { - sema->release(); - } - } - } - - else // _type == traversal - { - std::lock_guard lock(_queueMutex); - _queue.emplace_back(job, delegate, sema); - _metrics->numJobsPending++; - } -} - -void -JobArena::runJobs() -{ - // cap the number of jobs to run (applies to TRAVERSAL modes only) - int jobsLeftToRun = INT_MAX; - - while (!_done) - { - QueuedJob next; - - bool have_next = false; - { - std::unique_lock lock(_queueMutex); - - if (_type == THREAD_POOL) - { - _block.wait(lock, [this] { - return _queue.empty() == false || _done == true; - }); - } - else // traversal type - { - // Prevents jobs that re-queue themselves from running - // during the same traversal frame. - if (jobsLeftToRun == INT_MAX) - jobsLeftToRun = _queue.size(); - - if (_queue.empty() || jobsLeftToRun == 0) - { - return; - } - } - - if (!_queue.empty() && !_done) - { - // Find the highest priority item in the queue. - // Note: We could use std::partial_sort or std::nth_element, - // but benchmarking proves that a simple brute-force search - // is always the fastest. - // (Benchmark: https://stackoverflow.com/a/20365638/4218920) - // Also note: it is indeed possible for the results of - // Job::getPriority() to change during the search. We don't care. - int index = -1; - float highest_priority = -FLT_MAX; - for (unsigned i = 0; i < _queue.size(); ++i) - { - if (index < 0 || _queue[i]._job.getPriority() > highest_priority) - { - index = i; - highest_priority = _queue[i]._job.getPriority(); - } - } - - next = std::move(_queue[index]); - have_next = true; - - // move the last element into the empty position: - if (index < _queue.size()-1) - { - _queue[index] = std::move(_queue.back()); - } - - // and remove the last element. - _queue.erase(_queue.end() - 1); - } - } - - if (have_next) - { - _metrics->numJobsRunning++; - _metrics->numJobsPending--; - - auto t0 = std::chrono::steady_clock::now(); - - bool job_executed = next._delegate(); - - auto duration = std::chrono::steady_clock::now() - t0; - - if (job_executed) - { - jobsLeftToRun--; - - if (_allMetrics._report != nullptr) - { - if (duration >= _allMetrics._reportMinDuration) - { - _allMetrics._report(Metrics::Report(next._job, _name, duration)); - } - } - } - else - { - _metrics->numJobsCanceled++; - } - - // release the group semaphore if necessary - if (next._groupsema != nullptr) - { - next._groupsema->release(); - } - - _metrics->numJobsRunning--; - } - - if (_type == THREAD_POOL) - { - // See if we no longer need this thread because the - // target concurrency has been reduced - ScopedMutexLock quitLock(_quitMutex); - if ((int)_targetConcurrency < _metrics->concurrency) - { - _metrics->concurrency--; - break; - } - } - } -} - -void -JobArena::startThreads() -{ - _done = false; - - OE_INFO << LC << "Arena \"" << _name << "\" concurrency=" << _targetConcurrency << std::endl; - - // Not enough? Start up more - while(_metrics->concurrency < (int)_targetConcurrency) - { - _metrics->concurrency++; - - _threads.push_back(std::thread([this] - { - //OE_INFO << LC << "Arena \"" << _name << "\" starting thread " << std::this_thread::get_id() << std::endl; - OE_THREAD_NAME(_name.c_str()); - - runJobs(); - - // exit thread here - //OE_INFO << LC << "Thread " << std::this_thread::get_id() << " exiting" << std::endl; - } - )); - } -} - -void JobArena::stopThreads() -{ - _done = true; - - // Clear out the queue - { - std::lock_guard lock(_queueMutex); - - // reset any group semaphores so that JobGroup.join() - // will not deadlock. - for (auto& queuedjob : _queue) - { - if (queuedjob._groupsema != nullptr) - { - queuedjob._groupsema->reset(); - } - } - _queue.clear(); - - // wake up all threads so they can exit - _block.notify_all(); - } -} - -void JobArena::joinThreads() -{ - // wait for them to exit - for (unsigned i = 0; i < _threads.size(); ++i) - { - if (_threads[i].joinable()) - { - _threads[i].join(); - } - } - - if (_threads.size() > 0) - { - OE_INFO << LC << "\"" << _name << "\" " << std::to_string(_threads.size()) << " threads stopped." << std::endl; - } - - _threads.clear(); -} - - -JobArena::Metrics::Metrics() : - maxArenaIndex(-1), - _report(nullptr), - _reportMinDuration(0) -{ - // to prevent thread safety issues - _arenas.resize(128); - - const char* report_us = ::getenv("OSGEARTH_JOB_REPORT_THRESHOLD"); - if (report_us) - { - _report = [](const Report& r) - { - static Mutex _mutex; - ScopedMutexLock lock(_mutex); - std::string jobname = r.job.getName().empty() ? "unknown" : r.job.getName(); - OE_INFO - << "[Job] " << jobname - << " (" << r.arena << ") " - << std::fixed << std::setprecision(1) - << 0.001f*(float)(std::chrono::duration_cast(r.duration).count()) << " ms" << std::endl; - }; - - _reportMinDuration = std::chrono::microseconds(as(report_us, 132)); - - OE_INFO << LC << "Job report min duration set to " << report_us << "us" << std::endl; - } -} - -JobArena::Metrics::Arena::Ptr -JobArena::Metrics::getOrCreate(const std::string& name) -{ - for (int i = 0; i < _arenas.size(); ++i) - { - if (_arenas[i] != nullptr && _arenas[i]->arenaName == name) - { - return _arenas[i]; - } - } - - ++maxArenaIndex; - - if (maxArenaIndex >= _arenas.size()) - { - OE_SOFT_ASSERT(maxArenaIndex >= _arenas.size(), - "Ran out of arena space...using arena[0] :("); - return _arenas[0]; - } - - auto new_arena = _arenas[maxArenaIndex] = Arena::Ptr(new Arena); - new_arena->arenaName = name; - new_arena->concurrency = 0; - - return new_arena; -} - -const JobArena::Metrics::Arena::Ptr -JobArena::Metrics::arena(int index) const -{ - return _arenas[index]; -} - -int -JobArena::Metrics::totalJobsPending() const -{ - int count = 0; - for (int i = 0; i <= maxArenaIndex; ++i) - if (arena(i)) - count += arena(i)->numJobsPending; - return count; -} - -int -JobArena::Metrics::totalJobsRunning() const -{ - int count = 0; - for (int i = 0; i <= maxArenaIndex; ++i) - if (arena(i)) - count += arena(i)->numJobsRunning; - return count; -} - -int -JobArena::Metrics::totalJobsCanceled() const -{ - int count = 0; - for (int i = 0; i <= maxArenaIndex; ++i) - if (arena(i)) - count += arena(i)->numJobsCanceled; - return count; } diff --git a/src/osgEarth/TileMesher.cpp b/src/osgEarth/TileMesher.cpp index 43da20fbff..3fc0e8cfe5 100644 --- a/src/osgEarth/TileMesher.cpp +++ b/src/osgEarth/TileMesher.cpp @@ -106,7 +106,7 @@ TileMesher::getOrCreateStandardIndices() const { if (!_standardIndices.valid()) { - ScopedMutexLock lock(_mutex); + std::lock_guard lock(_mutex); if (!_standardIndices.valid()) { unsigned tileSize = _options.getTileSize(); @@ -562,7 +562,7 @@ TileMesher::createMeshWithConstraints( } } - if (cancelable && cancelable->isCanceled()) + if (cancelable && cancelable->canceled()) return {}; } } @@ -641,7 +641,7 @@ TileMesher::createMeshWithConstraints( } } - if (cancelable && cancelable->isCanceled()) + if (cancelable && cancelable->canceled()) return {}; } } diff --git a/src/osgEarth/TileRasterizer b/src/osgEarth/TileRasterizer index 8cc53f6a9c..a5a87ad259 100644 --- a/src/osgEarth/TileRasterizer +++ b/src/osgEarth/TileRasterizer @@ -114,7 +114,7 @@ namespace osgEarth GeoExtent _extent; // viewport extent of the node Renderer::Ptr _renderer; // Rtt camera to use to renderer - Promise _promise; + jobs::promise _promise; void useRenderer(Renderer::Ptr); }; diff --git a/src/osgEarth/TileRasterizer.cpp b/src/osgEarth/TileRasterizer.cpp index 5d4ce87e68..bde4b26b73 100644 --- a/src/osgEarth/TileRasterizer.cpp +++ b/src/osgEarth/TileRasterizer.cpp @@ -388,13 +388,13 @@ TileRasterizer::postDraw(osg::RenderInfo& ri) OE_HARD_ASSERT(job != nullptr); // Check to see if the client still wants the result: - if (job->_promise.isCanceled()) + if (job->_promise.canceled()) return; job->_renderer->allocate(state); // GPU task delegate: - auto gpu_task = [job](osg::State& state, Promise& promise, int invocation) + auto gpu_task = [job](osg::State& state, jobs::promise& promise, int invocation) { if (promise.empty()) { diff --git a/src/osgEarth/TileSource b/src/osgEarth/TileSource index b082da9db5..6c7bcd394b 100644 --- a/src/osgEarth/TileSource +++ b/src/osgEarth/TileSource @@ -418,7 +418,7 @@ namespace osgEarth { namespace Contrib bool _openCalled; - mutable Threading::Mutex _mutex; + mutable std::mutex _mutex; }; //-------------------------------------------------------------------- diff --git a/src/osgEarth/TileSourceElevationLayer.cpp b/src/osgEarth/TileSourceElevationLayer.cpp index dae44e93ef..865a44dfa2 100644 --- a/src/osgEarth/TileSourceElevationLayer.cpp +++ b/src/osgEarth/TileSourceElevationLayer.cpp @@ -89,8 +89,9 @@ TileSourceElevationLayer::getOrCreatePreCacheOp() const { if ( !_preCacheOp.valid() ) { - static Mutex s_mutex; - Threading::ScopedLock lock(s_mutex); + static std::mutex s_mutex; + std::lock_guard lock(s_mutex); + if ( !_preCacheOp.valid() ) { _preCacheOp = new NormalizeNoDataValues(this); diff --git a/src/osgEarth/TileVisitor b/src/osgEarth/TileVisitor index 1ec695f9c1..78e04890f9 100644 --- a/src/osgEarth/TileVisitor +++ b/src/osgEarth/TileVisitor @@ -110,7 +110,7 @@ namespace osgEarth { namespace Util osg::ref_ptr< const Profile > _profile; - osgEarth::Threading::Mutex _progressMutex; + std::mutex _progressMutex; HasDataCallback _hasData; @@ -141,7 +141,7 @@ namespace osgEarth { namespace Util unsigned int _numThreads; - JobGroup _group; + jobs::jobgroup _group; }; diff --git a/src/osgEarth/TileVisitor.cpp b/src/osgEarth/TileVisitor.cpp index bf8b0032f2..7e8d6df258 100644 --- a/src/osgEarth/TileVisitor.cpp +++ b/src/osgEarth/TileVisitor.cpp @@ -20,6 +20,7 @@ #include #include #include +#include #if OSG_VERSION_GREATER_OR_EQUAL(3,5,10) #include @@ -31,23 +32,21 @@ using namespace osgEarth; using namespace osgEarth::Util; -TileVisitor::TileVisitor(): -_total(0), -_processed(0), -_minLevel(0), -_maxLevel(99), -_progressMutex("TileVisitor Progress") +TileVisitor::TileVisitor() : + _total(0), + _processed(0), + _minLevel(0), + _maxLevel(99) { } -TileVisitor::TileVisitor(TileHandler* handler): -_tileHandler( handler ), -_total(0), -_processed(0), -_minLevel(0), -_maxLevel(99), -_progressMutex("TileVisitor Progress") +TileVisitor::TileVisitor(TileHandler* handler) : + _tileHandler(handler), + _total(0), + _processed(0), + _minLevel(0), + _maxLevel(99) { } @@ -203,7 +202,7 @@ void TileVisitor::processKey( const TileKey& key ) void TileVisitor::incrementProgress(unsigned int amount) { { - Threading::ScopedMutexLock lk(_progressMutex ); + std::lock_guard lk(_progressMutex ); _processed += amount; } @@ -235,7 +234,7 @@ bool TileVisitor::handleTile( const TileKey& key ) /*****************************************************************************************/ MultithreadedTileVisitor::MultithreadedTileVisitor() : - _numThreads(Threading::getConcurrency()) + _numThreads(std::max(1u, std::thread::hardware_concurrency())) { // We must do this to avoid an error message in OpenSceneGraph b/c the findWrapper method doesn't appear to be threadsafe. // This really isn't a big deal b/c this only effects data that is already cached. @@ -244,7 +243,7 @@ MultithreadedTileVisitor::MultithreadedTileVisitor() : MultithreadedTileVisitor::MultithreadedTileVisitor(TileHandler* handler) : TileVisitor(handler), - _numThreads(Threading::getConcurrency()) + _numThreads(std::max(1u, std::thread::hardware_concurrency())) { } @@ -265,7 +264,7 @@ void MultithreadedTileVisitor::run(const Profile* mapProfile) // Start up the task service OE_INFO << "Starting " << _numThreads << " threads " << std::endl; - JobArena::get(MTTV)->setConcurrency(_numThreads); + jobs::get_pool(MTTV)->set_concurrency(_numThreads); // Produce the tiles TileVisitor::run( mapProfile ); @@ -279,25 +278,28 @@ bool MultithreadedTileVisitor::handleTile(const TileKey& key) //_numTiles++; // don't let the task queue get too large...? - while (JobArena::allMetrics().totalJobsPending() > 1000) + while(jobs::get_metrics()->totalJobsPending() > 1000) { std::this_thread::sleep_for(std::chrono::seconds(1)); } // Add the tile to the task queue. - auto delegate = [this, key](Cancelable*) + auto task = [this, key](Cancelable&) { - if ((_tileHandler.valid()) && - (!_progress.valid() || !_progress->isCanceled())) + if ((_tileHandler.valid()) && (!_progress.valid() || !_progress->isCanceled())) { _tileHandler->handleTile(key, *this); this->incrementProgress(1); } + return true; }; - Job job(JobArena::get(MTTV), &_group); - job.setName("handleTile"); - job.dispatch_and_forget(delegate); + jobs::context job; + job.name = "handleTile"; + job.pool = jobs::get_pool(MTTV); + job.group = &_group; + + jobs::dispatch(task, job); return true; } diff --git a/src/osgEarth/TrackNode.cpp b/src/osgEarth/TrackNode.cpp index e6500c2746..8ecfc5fcf5 100644 --- a/src/osgEarth/TrackNode.cpp +++ b/src/osgEarth/TrackNode.cpp @@ -109,8 +109,8 @@ TrackNode::construct() osg::ref_ptr geodeStateSet; if (s_geodeStateSet.lock(geodeStateSet) == false) { - static Threading::Mutex m(OE_MUTEX_NAME); - Threading::ScopedMutexLock lock(m); + static std::mutex m; + std::lock_guard lock(m); if (s_geodeStateSet.lock(geodeStateSet) == false) { s_geodeStateSet = geodeStateSet = new osg::StateSet(); @@ -131,8 +131,8 @@ TrackNode::construct() if (s_imageStateSet.lock(_imageStateSet) == false) { - static Threading::Mutex m(OE_MUTEX_NAME); - Threading::ScopedMutexLock lock(m); + static std::mutex m; + std::lock_guard lock(m); if (s_imageStateSet.lock(_imageStateSet) == false) { s_imageStateSet = _imageStateSet = new osg::StateSet(); diff --git a/src/osgEarth/VerticalDatum.cpp b/src/osgEarth/VerticalDatum.cpp index b69af8ae49..951e237d83 100644 --- a/src/osgEarth/VerticalDatum.cpp +++ b/src/osgEarth/VerticalDatum.cpp @@ -35,7 +35,7 @@ namespace { typedef std::map > VDatumCache; VDatumCache _vdatumCache; - Threading::Mutex _vdataCacheMutex("VDatumCache(OE)"); + std::mutex _vdataCacheMutex; bool _vdatumWarning = false; } @@ -47,7 +47,7 @@ VerticalDatum::get( const std::string& initString ) if (initString.empty()) return result; - Threading::ScopedMutexLock exclusive(_vdataCacheMutex); + std::lock_guard exclusive(_vdataCacheMutex); if (::getenv("OSGEARTH_IGNORE_VERTICAL_DATUMS")) { diff --git a/src/osgEarth/VirtualProgram b/src/osgEarth/VirtualProgram index c00ad1e1f2..73f5e9e0fa 100644 --- a/src/osgEarth/VirtualProgram +++ b/src/osgEarth/VirtualProgram @@ -290,7 +290,7 @@ namespace osgEarth bool _dirty; - static Threading::Mutex _cacheMutex; + static std::mutex _cacheMutex; using PolyShaderCache = std::map< std::pair, osg::ref_ptr>; @@ -568,7 +568,7 @@ namespace osgEarth mutable osg::buffered_object _apply; // protects access to the data members, which may be accessed by other VPs in the state stack. - mutable Threading::Mutex _dataModelMutex; + mutable std::mutex _dataModelMutex; bool _useDataModelMutex = true; // The program cache holds an osg::Program instance for each collection of shaders diff --git a/src/osgEarth/VirtualProgram.cpp b/src/osgEarth/VirtualProgram.cpp index 7341832073..0d353ee236 100644 --- a/src/osgEarth/VirtualProgram.cpp +++ b/src/osgEarth/VirtualProgram.cpp @@ -72,7 +72,7 @@ using namespace osgEarth::Threading; #ifdef USE_POLYSHADER_CACHE -Mutex VirtualProgram::PolyShader::_cacheMutex("VP PolyShader Cache(OE)"); +std::mutex VirtualProgram::PolyShader::_cacheMutex; VirtualProgram::PolyShader::PolyShaderCache VirtualProgram::PolyShader::_polyShaderCache; #endif @@ -109,7 +109,6 @@ namespace #define LC "[ProgramRepo] " ProgramRepo::ProgramRepo() : - Threading::Mutexed("ProgramRepo(OE)"), _releaseUnusedPrograms(true) { const char* value = ::getenv("OSGEARTH_PROGRAM_BINARY_CACHE_PATH"); @@ -853,8 +852,7 @@ VirtualProgram::VirtualProgram(unsigned mask) : _logShaders(false), _logPath(""), _acceptCallbacksVaryPerFrame(false), - _isAbstract(false), - _dataModelMutex("OE.VirtualProgram") + _isAbstract(false) { // Note: we cannot set _active here. Wait until apply(). // It will cause a conflict in the Registry. @@ -910,8 +908,7 @@ VirtualProgram::VirtualProgram(const VirtualProgram& rhs, const osg::CopyOp& cop _logPath(rhs._logPath), _template(osg::clone(rhs._template.get())), _acceptCallbacksVaryPerFrame(rhs._acceptCallbacksVaryPerFrame), - _isAbstract(rhs._isAbstract), - _dataModelMutex("OE.VirtualProgram") + _isAbstract(rhs._isAbstract) { _id = osgEarth::createUID(); @@ -964,14 +961,14 @@ VirtualProgram::compare(const osg::StateAttribute& sa) const void VirtualProgram::addBindAttribLocation(const std::string& name, GLuint index) { - ScopedLockIf lock(_dataModelMutex, _useDataModelMutex); + scoped_lock_if lock(_dataModelMutex, _useDataModelMutex); _attribBindingList[name] = index; } void VirtualProgram::removeBindAttribLocation(const std::string& name) { - ScopedLockIf lock(_dataModelMutex, _useDataModelMutex); + scoped_lock_if lock(_dataModelMutex, _useDataModelMutex); _attribBindingList.erase(name); } @@ -1042,7 +1039,7 @@ VirtualProgram::releaseGLObjects(osg::State* state) const VirtualProgram::PolyShader* VirtualProgram::getPolyShader(const std::string& shaderID) const { - ScopedLockIf lock(_dataModelMutex, _useDataModelMutex); + scoped_lock_if lock(_dataModelMutex, _useDataModelMutex); ShaderMap::const_iterator i = _shaderMap.find(MAKE_SHADER_ID(shaderID)); const ShaderEntry* entry = i != _shaderMap.end() ? &i->second : NULL; return entry ? entry->_shader.get() : 0L; @@ -1074,7 +1071,7 @@ VirtualProgram::setShader( // lock the data model and insert the new shader. { - ScopedLockIf lock(_dataModelMutex, _useDataModelMutex); + scoped_lock_if lock(_dataModelMutex, _useDataModelMutex); ShaderEntry& entry = _shaderMap[MAKE_SHADER_ID(shaderID)]; entry._shader = pshader; entry._overrideValue = ov; @@ -1110,7 +1107,7 @@ VirtualProgram::setShader( // lock the data model while changing it. { - ScopedLockIf lock(_dataModelMutex, _useDataModelMutex); + scoped_lock_if lock(_dataModelMutex, _useDataModelMutex); checkSharing(); @@ -1150,7 +1147,7 @@ VirtualProgram::setFunction( // lock the functions map while iterating and then modifying it: { - ScopedLockIf lock(_dataModelMutex, _useDataModelMutex); + scoped_lock_if lock(_dataModelMutex, _useDataModelMutex); checkSharing(); @@ -1196,7 +1193,7 @@ VirtualProgram::setFunction( bool VirtualProgram::addGLSLExtension(const std::string& extension) { - ScopedLockIf lock(_dataModelMutex, _useDataModelMutex); + scoped_lock_if lock(_dataModelMutex, _useDataModelMutex); std::pair insertPair = _globalExtensions.insert(extension); return insertPair.second; } @@ -1204,7 +1201,7 @@ VirtualProgram::addGLSLExtension(const std::string& extension) bool VirtualProgram::hasGLSLExtension(const std::string& extension) const { - ScopedLockIf lock(_dataModelMutex, _useDataModelMutex); + scoped_lock_if lock(_dataModelMutex, _useDataModelMutex); bool doesHave = _globalExtensions.find(extension) != _globalExtensions.end(); return doesHave; } @@ -1212,7 +1209,7 @@ VirtualProgram::hasGLSLExtension(const std::string& extension) const bool VirtualProgram::removeGLSLExtension(const std::string& extension) { - ScopedLockIf lock(_dataModelMutex, _useDataModelMutex); + scoped_lock_if lock(_dataModelMutex, _useDataModelMutex); ExtensionsSet::size_type erased = _globalExtensions.erase(extension); return erased > 0; } @@ -1221,7 +1218,7 @@ void VirtualProgram::removeShader(const std::string& shaderID) { // lock te functions map while making changes: - ScopedLockIf lock(_dataModelMutex, _useDataModelMutex); + scoped_lock_if lock(_dataModelMutex, _useDataModelMutex); _shaderMap.erase(MAKE_SHADER_ID(shaderID)); @@ -1366,7 +1363,7 @@ VirtualProgram::apply(osg::State& state) const // Next, add the data from this VP. { - ScopedLockIf lock(_dataModelMutex, _useDataModelMutex); + scoped_lock_if lock(_dataModelMutex, _useDataModelMutex); for (auto& iter : _shaderMap) { @@ -1549,7 +1546,7 @@ VirtualProgram::getFunctions( VirtualProgram::FunctionLocationMap& out) const { // make a safe copy of the functions map. - ScopedLockIf lock(_dataModelMutex, _useDataModelMutex); + scoped_lock_if lock(_dataModelMutex, _useDataModelMutex); out = _functions; } @@ -1557,7 +1554,7 @@ void VirtualProgram::getShaderMap(ShaderMap& out) const { // make a safe copy of the functions map. - ScopedLockIf lock(_dataModelMutex, _useDataModelMutex); + scoped_lock_if lock(_dataModelMutex, _useDataModelMutex); out = _shaderMap; } @@ -1641,7 +1638,7 @@ VirtualProgram::accumulateFunctions( // add the local ones too: { - ScopedLockIf lock(_dataModelMutex, _useDataModelMutex); + scoped_lock_if lock(_dataModelMutex, _useDataModelMutex); for (auto& j : _functions) { @@ -1742,7 +1739,7 @@ VirtualProgram::addShadersToAccumulationMap( VirtualProgram::ShaderMap& accumMap, const osg::State& state) const { - ScopedLockIf lock(_dataModelMutex, _useDataModelMutex); + scoped_lock_if lock(_dataModelMutex, _useDataModelMutex); for (auto& iter : _shaderMap) { @@ -1831,7 +1828,7 @@ void VirtualProgram::setAcceptCallbacksVaryPerFrame(bool acceptCallbacksVaryPerF int VirtualProgram::compare_safe(const VirtualProgram& rhs) const { - ScopedLockIf lock(_dataModelMutex, _useDataModelMutex); + scoped_lock_if lock(_dataModelMutex, _useDataModelMutex); // compare each parameter COMPARE_StateAttribute_Parameter(_mask); @@ -2052,7 +2049,7 @@ VirtualProgram::PolyShader::lookUpShader( #ifdef USE_POLYSHADER_CACHE - Threading::ScopedMutexLock lock(_cacheMutex); + std::lock_guard lock(_cacheMutex); std::pair hashKey(functionName, shaderSource); diff --git a/src/osgEarth/WindLayer.cpp b/src/osgEarth/WindLayer.cpp index 5718a43de9..527b274159 100644 --- a/src/osgEarth/WindLayer.cpp +++ b/src/osgEarth/WindLayer.cpp @@ -106,19 +106,19 @@ namespace { CameraState& get(const osg::Camera* camera) { - ScopedMutexLock lock(mutex()); + std::lock_guard lock(mutex()); return (*this)[camera]; } void for_each(const std::function& func) { - ScopedMutexLock lock(mutex()); + std::lock_guard lock(mutex()); for (auto& i : *this) { func(i.second); } } void for_each(const std::function& func) const { - ScopedMutexLock lock(mutex()); + std::lock_guard lock(mutex()); for (auto& i : *this) { func(i.second); } @@ -178,8 +178,6 @@ namespace { OE_SOFT_ASSERT(sizeof(WindData) % 16 == 0, "struct WindData is not 16-byte aligned; expect chaos"); - _cameraState.setName(OE_MUTEX_NAME); - // Always run the shader. setCullingActive(false); diff --git a/src/osgEarth/weejobs.h b/src/osgEarth/weejobs.h new file mode 100644 index 0000000000..960f15c324 --- /dev/null +++ b/src/osgEarth/weejobs.h @@ -0,0 +1,872 @@ +/** + * weejobs + * Copyright 2024 Pelican Mapping + * https://github.com/pelicanmapping/weejobs + * MIT License + */ +#pragma once +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// OPTIONAL: Define WEEJOBS_EXPORT if you want to use this library from multiple modules (DLLs) +#ifndef WEEJOBS_EXPORT +#define WEEJOBS_EXPORT +#endif + +// OPTIONAL: Customize the namespace by defining WEEJOBS_NAMESPACE before including this file. +#ifndef WEEJOBS_NAMESPACE +#define WEEJOBS_NAMESPACE jobs +#endif + +/** +* weejobs is an API for scheduling a task to run in the background. +* Please read the README.md file for more information. +*/ +namespace WEEJOBS_NAMESPACE +{ + /** + * Interface for something that can be canceled + */ + class cancelable + { + public: + virtual bool canceled() const { return false; } + }; + + namespace detail + { + /** + * Event with a binary signaled state, for multi-threaded sychronization. + * + * The event has two states: + * "set" means that a call to wait() will not block; + * "unset" means that calls to wait() will block until another thread calls set(). + * + * The event starts out unset. + * + * Typical usage: Thread A creates Thread B to run asynchronous code. Thread A + * then calls wait(), which blocks Thread A. When Thread B is finished, it calls + * set(). Thread A then wakes up and continues execution. + * + * NOTE: ALL waiting threads will wake up when the Event is cleared. + */ + struct event + { + public: + //! Construct a new event + event() : _set(false) { } + + //! DTOR + ~event() { + _set = false; + for (int i = 0; i < 255; ++i) // workaround buggy broadcast + _cond.notify_all(); + } + + //! Block until the event is set, then return true. + inline bool wait() { + while (!_set) { + std::unique_lock lock(_m); + if (!_set) + _cond.wait(lock); + } + return _set; + } + + //! Block until the event is set or the timout expires. + //! Return true if the event has set, otherwise false. + template + inline bool wait(T timeout) { + if (!_set) { + std::unique_lock lock(_m); + if (!_set) + _cond.wait_for(lock, timeout); + } + return _set; + } + + //! Block until the event is set; then reset it. + inline bool waitAndReset() { + std::unique_lock lock(_m); + if (!_set) + _cond.wait(lock); + _set = false; + return true; + } + + //! Set the event state, causing any waiters to unblock. + inline void set() { + if (!_set) { + std::unique_lock lock(_m); + if (!_set) { + _set = true; + _cond.notify_all(); + } + } + } + + //! Reset (unset) the event state; new waiters will block until set() is called. + inline void reset() { + std::unique_lock lock(_m); + _set = false; + } + + //! Whether the event state is set (waiters will not block). + inline bool isSet() const { + return _set; + } + + protected: + std::mutex _m; // do not use Mutex, we never want tracking + std::condition_variable_any _cond; + bool _set; + }; + + + /** + * Sempahore lets N users aquire it and then notifies when the + * count goes back down to zero. + */ + class semaphore + { + public: + //! Acquire, increasing the usage count by one + void acquire() + { + std::unique_lock lock(_m); + ++_count; + } + + //! Release, decreasing the usage count by one. + //! When the count reaches zero, joiners will be notified and + //! the semaphore will reset to its initial state. + void release() + { + std::unique_lock lock(_m); + _count = std::max(_count - 1, 0); + if (_count == 0) + _cv.notify_all(); + } + + //! Reset to initialize state; this will cause a join to occur + //! even if no acquisitions have taken place. + void reset() + { + std::unique_lock lock(_m); + _count = 0; + _cv.notify_all(); + } + + //! Current count in the semaphore + std::size_t count() const + { + std::unique_lock lock(_m); + return _count; + } + + //! Block until the semaphore count returns to zero. + //! (It must first have left zero) + //! Warning: this method will block forever if the count + //! never reaches zero! + void join() + { + std::unique_lock lock(_m); + while (_count > 0) + _cv.wait(lock); + } + + //! Block until the semaphore count returns to zero, or + //! the operation is canceled. + //! (It must first have left zero) + void join(cancelable* c) + { + _cv.wait_for(_m, std::chrono::seconds(1), [this, c]() { + return + (_count == 0) || + (c && c->canceled()); + } + ); + _count = 0; + } + + private: + int _count = 0; + std::condition_variable_any _cv; + mutable std::mutex _m; + }; + +#if __cplusplus >= 202000L + template + using result_of_t = typename std::invoke_result::type; +#else + template + using result_of_t = typename std::result_of::type; +#endif + } + + + /** + * Future holds the future result of an asynchronous operation. + * + * Usage: + * Producer (usually an asynchronous function call) creates a future + * (the promise of a future result) and immediately returns it. The Consumer + * then performs other work, and eventually (or immediately) checks available() + * for a result or canceled() for cancelation. If availabile() is true, + * Consumer calls value() to fetch the valid result. + * + * As long as at least two equivalent Future object (i.e. Futures pointing to the + * same internal shared data) exist, the Future is considered valid. Once + * that count goes to one, the Future is either available (the value is ready) + * or empty (i.e., canceled or abandoned). + */ + template + class future : public cancelable + { + private: + // internal structure to track references to the result + // One instance of this is shared among all Future instances + // created from the copy constructor. + struct shared_t + { + T _obj; + mutable detail::event _ev; + }; + + public: + //! Default constructor + future() + { + _shared = std::make_shared(); + } + + //! Default copy constructor + future(const future& rhs) = default; + + //! True is this Future is unused and not connected to any other Future + bool empty() const + { + return !available() && _shared.use_count() == 1; + } + + //! True if the promise was resolved and a result if available. + bool available() const + { + return _shared->_ev.isSet(); + } + + //! True if a promise exists, but has not yet been resolved; + //! Presumably the asynchronous task is still working. + bool working() const + { + return !empty() && !available(); + } + + // cancelable interface + bool canceled() const override + { + return empty(); + } + + //! Deference the result object. Make sure you check available() + //! to check that the future was actually resolved; otherwise you + //! will just get the default object. + const T& value() const + { + return _shared->_obj; + } + + //! Dereference this object to const pointer to the result. + const T* operator -> () const + { + return &_shared->_obj; + } + + //! Same as value(), but if the result is available will reset the + //! future before returning the result object. + T release() + { + bool avail = available(); + T result = value(); + if (avail) + reset(); + return result; + } + + //! Blocks until the result becomes available or the future is abandoned; + //! then returns the result object. + const T& join() const + { + while ( + !empty() && + !_shared->_ev.wait(std::chrono::milliseconds(1))); + return value(); + } + + //! Blocks until the result becomes available or the future is abandoned + //! or a cancelation flag is set; then returns the result object. Be sure to + //! check canceled() after calling join() to see if the return value is valid. + const T& join(cancelable* p) const + { + while (working() && (p == nullptr || !p->canceled())) + { + _shared->_ev.wait(std::chrono::milliseconds(1)); + } + return value(); + } + + //! Blocks until the result becomes available or the future is abandoned + //! or a cancelation flag is set; then returns the result object. Be sure to + //! check canceled() after calling join() to see if the return value is valid. + const T& join(const cancelable& p) const + { + return join(&p); + } + + //! Release reference to a promise, resetting this future to its default state + void abandon() + { + _shared.reset(new shared_t()); + } + + //! synonym for abandon. + void reset() + { + abandon(); + } + + //! Resolve (fulfill) the promise with the provided result value. + void resolve(const T& value) + { + _shared->_obj = value; + _shared->_ev.set(); + } + + //! Resolve (fulfill) the promise with an rvalue + void resolve(T&& value) + { + _shared->_obj = std::move(value); + _shared->_ev.set(); + } + + //! Resolve (fulfill) the promise with a default result + void resolve() + { + _shared->_ev.set(); + } + + //! The number of objects, including this one, that + //! reference the shared container. If this method + //! returns 1, that means this is the only object with + //! access to the data. This method will never return zero. + unsigned refs() const + { + return _shared.use_count(); + } + + private: + std::shared_ptr _shared; + }; + + //! in the "promise/future" pattern, we use the same object for both, + //! but here's an alias for clarity. + template using promise = future; + + /** + * Include a jobgroup in a context to group together multiple jobs. + * You can then call jobgroup::join() to wait for the whole group + * to finish. + */ + using jobgroup = detail::semaphore; + + /** + * Context object you can pass to dispatch(...) to control aspects of + * how the background task is run. + */ + struct context + { + std::string name; + class jobpool* pool = nullptr; + std::function priority = {}; + jobgroup* group = nullptr; + }; + + /** + * A priority-sorted collection of jobs that are running or waiting + * to run in a thread pool. + */ + class jobpool + { + public: + /** + * Metrics of a thread pool. + */ + struct metrics_t + { + std::string name; + std::atomic_uint concurrency = { 0u }; + std::atomic_uint pending = { 0u }; + std::atomic_uint running = { 0u }; + std::atomic_uint canceled = { 0u }; + std::atomic_uint total = { 0u }; + }; + + public: + //! Destroy + ~jobpool() + { + stop_threads(); + } + + //! Name of this job pool + const std::string& name() const + { + return _metrics.name; + } + + metrics_t* metrics() + { + return &_metrics; + } + + //! Set the concurrency of this job scheduler + void set_concurrency(unsigned value) + { + value = std::max(value, 1u); + if (_targetConcurrency != value) + { + _targetConcurrency = value; + start_threads(); + } + } + + //! Get the target concurrency (thread count) + unsigned concurrency() const + { + return _targetConcurrency; + } + + //! Discard all queued jobs + void cancel_all() + { + std::unique_lock lock(_queueMutex); + _queue.clear(); + _metrics.canceled += _metrics.pending; + _metrics.pending = 0; + } + + //! Schedule an asynchronous task on this scheduler + //! Use job::dispatch to run jobs (usually no need to call this directly) + //! @param delegate Function to execute + //! @param context Job details + void dispatch(std::function& delegate, const context& context) + { + // If we have a group semaphore, acquire it BEFORE queuing the job + if (context.group) + { + context.group->acquire(); + } + + if (_targetConcurrency > 0) + { + std::unique_lock lock(_queueMutex); + if (!_done) + { + _queue.emplace_back(job{ context, delegate }); + _metrics.pending++; + _metrics.total++; + _block.notify_one(); + } + } + else + { + // no threads? run synchronously. + delegate(); + + if (context.group) + { + context.group->release(); + } + } + } + + //! Construct a new job pool. + //! Do not call this directly - call getPool(name) instead. + jobpool(const std::string& name, unsigned concurrency) : + _targetConcurrency(concurrency) + { + _metrics.name = name; + _metrics.concurrency = 0; + } + + //! Pulls queued jobs and runs them in whatever thread run() is called from. + //! Runs in a loop until _done is set. + void run() + { + while (!_done) + { + job next; + bool have_next = false; + { + std::unique_lock lock(_queueMutex); + + _block.wait(lock, [this] { + return _queue.empty() == false || _done == true; + }); + + if (!_queue.empty() && !_done) + { + // Find the highest priority item in the queue. + // Note: We could use std::partial_sort or std::nth_element, + // but benchmarking proves that a simple brute-force search + // is always the fastest. + // (Benchmark: https://stackoverflow.com/a/20365638/4218920) + // Also note: it is indeed possible for the results of + // priority() to change during the search. We don't care. + int index = -1; + float highest_priority = -FLT_MAX; + for (unsigned i = 0; i < _queue.size(); ++i) + { + float priority = _queue[i].ctx.priority != nullptr ? + _queue[i].ctx.priority() : + 0.0f; + + if (index < 0 || priority > highest_priority) + { + index = i; + highest_priority = priority; + } + } + if (index < 0) + index = 0; + + next = std::move(_queue[index]); + have_next = true; + + // move the last element into the empty position: + if (index < _queue.size() - 1) + { + _queue[index] = std::move(_queue.back()); + } + + // and remove the last element. + _queue.erase(_queue.end() - 1); + } + } + + if (have_next) + { + _metrics.running++; + _metrics.pending--; + + auto t0 = std::chrono::steady_clock::now(); + + bool job_executed = next._delegate(); + + auto duration = std::chrono::steady_clock::now() - t0; + + if (job_executed == false) + { + _metrics.canceled++; + } + + // release the group semaphore if necessary + if (next.ctx.group != nullptr) + { + next.ctx.group->release(); + } + + _metrics.running--; + } + + // See if we no longer need this thread because the + // target concurrency has been reduced + std::lock_guard lock(_quitMutex); + + if (_targetConcurrency < _metrics.concurrency) + { + _metrics.concurrency--; + break; + } + } + } + + //! Spawn all threads in this scheduler + inline void start_threads(); + + //! Signall all threads to stop + inline void stop_threads(); + + //! Wait for all threads to exit (after calling stop_threads) + inline void join_threads(); + + + struct job + { + context ctx; + std::function _delegate; + + bool operator < (const job& rhs) const + { + float lp = ctx.priority ? ctx.priority() : -FLT_MAX; + float rp = rhs.ctx.priority ? rhs.ctx.priority() : -FLT_MAX; + return lp < rp; + } + }; + + std::string _name; // pool name + std::vector _queue; // queued operations to run asynchronously + mutable std::mutex _queueMutex; // protect access to the queue + mutable std::mutex _quitMutex; // protects access to _done + std::atomic _targetConcurrency; // target number of concurrent threads in the pool + std::condition_variable_any _block; // thread waiter block + bool _done = false; // set to true when threads should exit + std::vector _threads; // threads in the pool + metrics_t _metrics; // metrics for this pool + }; + + class metrics + { + public: + //! Total number of pending jobs across all schedulers + int totalJobsPending() const + { + int count = 0; + for (auto pool : _pools) + count += pool->pending; + return count; + } + + //! Total number of running jobs across all schedulers + int totalJobsRunning() const + { + int count = 0; + for (auto pool : _pools) + count += pool->running; + return count; + } + + //! Total number of canceled jobs across all schedulers + int totalJobsCanceled() const + { + int count = 0; + for (auto pool : _pools) + count += pool->canceled; + return count; + } + + //! Total number of active jobs in the system + int totalJobs() const + { + return totalJobsPending() + totalJobsRunning(); + } + + //! Gets a vector of all jobpool metrics structures. + inline const std::vector all() + { + return _pools; + } + + std::vector _pools; + }; + + /** + * Runtime singleton object; + * Declare with WEEJOBS_INSTANCE in one of your .cpp files. + */ + namespace detail + { + struct runtime + { + inline runtime(); + + inline void kill() + { + _alive = false; + + for (auto& pool : _pools) + if (pool) + pool->stop_threads(); + + for (auto& pool : _pools) + if (pool) + pool->join_threads(); + } + + bool _alive = true; + std::mutex _mutex; + std::vector _pool_names; + std::vector _pools; + metrics _metrics; + std::function _setThreadName; + }; + } + + extern WEEJOBS_EXPORT detail::runtime& instance(); + + //! Returns the job pool with the given name, creating a new one if it doesn't + //! already exist. If you don't specify a name, a default pool is used. + inline jobpool* get_pool(const std::string& name = {}) + { + std::lock_guard lock(instance()._mutex); + for (auto pool : instance()._pools) + { + if (pool->name() == name) + return pool; + } + auto new_pool = new jobpool(name, 2u); + instance()._pools.push_back(new_pool); + instance()._metrics._pools.push_back(&new_pool->_metrics); + new_pool->start_threads(); + return new_pool; + } + + namespace detail + { + inline void pool_dispatch(std::function delegate, const context& context) + { + auto pool = context.pool ? context.pool : get_pool({}); + if (pool) + pool->dispatch(delegate, context); + } + } + + //! Dispatches a job with no return value. Fire and forget. + //! @param task Function to run in a thread. Prototype is void(void). + //! @param context Optional configuration for the asynchronous function call + inline void dispatch(std::function task, const context& context = {}) + { + auto delegate = [task]() mutable -> bool { task(); return true; }; + detail::pool_dispatch(delegate, context); + } + + //! Dispatches a job and immediately returns a future result. + //! @param task Function to run in a thread. Prototype is T(cancelable&) + //! @param context Optional configuration for the asynchronous function call + //! @param promise Optional user-supplied promise object + //! @return Future result of the async function call + template> + inline future dispatch(FUNC task, const context& context = {}, future promise = {}) + { + std::function delegate = [task, promise]() mutable + { + bool good = !promise.canceled(); + if (good) + promise.resolve(task(promise)); + return good; + }; + + detail::pool_dispatch(delegate, context); + + return promise; + } + + + inline void jobpool::start_threads() + { + _done = false; + + // Not enough? Start up more + while (_metrics.concurrency < _targetConcurrency) + { + _metrics.concurrency++; + + _threads.push_back(std::thread([this] + { + if (instance()._setThreadName) + { + instance()._setThreadName(_name.c_str()); + } + run(); + } + )); + } + } + + inline void jobpool::stop_threads() + { + _done = true; + + // Clear out the queue + { + std::unique_lock lock(_queueMutex); + + // reset any group semaphores so that JobGroup.join() + // will not deadlock. + for (auto& queuedjob : _queue) + { + if (queuedjob.ctx.group != nullptr) + { + queuedjob.ctx.group->reset(); + } + } + _queue.clear(); + + // wake up all threads so they can exit + _block.notify_all(); + } + } + + //! Wait for all threads to exit (after calling stop_threads) + inline void jobpool::join_threads() + { + // wait for them to exit + for (unsigned i = 0; i < _threads.size(); ++i) + { + if (_threads[i].joinable()) + { + _threads[i].join(); + } + } + + _threads.clear(); + } + + //! Metrics for all job pool + inline metrics* get_metrics() + { + return &instance()._metrics; + } + + //! stop all threads, wait for them to exit, and shut down the system + inline void shutdown() + { + instance().kill(); + } + + //! Whether the weejobs runtime is still alive (has not been shutdown) + inline bool alive() + { + return instance()._alive; + } + + //! Install a function that the SDK can use to set job pool thread names + //! when it spawns them. + inline void set_thread_name_function(std::function f) + { + instance()._setThreadName = f; + } + + // internal + inline detail::runtime::runtime() + { + std::atexit(shutdown); + } + + // Use this macro ONCE in your application in a .cpp file to + // instaniate the weejobs runtime singleton. +#define WEEJOBS_INSTANCE \ + namespace WEEJOBS_NAMESPACE { \ + static detail::runtime runtime_singleton_instance; \ + detail::runtime& instance() { return runtime_singleton_instance; } \ + } +} diff --git a/src/osgEarth/weethreads.h b/src/osgEarth/weethreads.h new file mode 100644 index 0000000000..3d45bf4d61 --- /dev/null +++ b/src/osgEarth/weethreads.h @@ -0,0 +1,934 @@ +/** + * weethreads + * Copyright 2024 Pelican Mapping + * MIT License + */ +#pragma once +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// MANDATORY: Somewhere in one of your .cpp files, use this macro to instantiate the singleton: +// WEETHREADS_INSTANCE; + +// OPTIONAL: Define WEETHREADS_EXPORT if you want to use this library from multiple modules (DLLs) +#ifndef WEETHREADS_EXPORT +#define WEETHREADS_EXPORT +#endif + +// OPTIONAL: Customize the namespace by defining WEETHREADS_NAMESPACE before including this file. +#ifndef WEETHREADS_NAMESPACE +#define WEETHREADS_NAMESPACE jobs +#endif + +/** +* WeeThreads is an API for scheduling a task to run in the background. +* +* WeeThreads is header-only and has no dependencies aside from the STL. +* +* Usage: +* +* Use this macro somewhere in your app. Put it in a .cpp file if you plan + to use multiple modules, DLLs, etc.: +* +* WEETHREADS_INSTANCE; +* +* Example: Spawn a job with no return value (fire and forget): +* +* auto job = []() { std::cout << "Hello, world!" << std::endl; }; +* jobs::dispatch(job); +* +* Example: Spawn a job and get a future result: +* +* auto job = [](jobs::cancelable&) { return 7; }; +* jobs::future result = jobs::dispatch(job); +* // later... +* if (result.available()) +* std::cout << "Result = " << result.value() << std::endl; +* else if (result.canceled()) +* std::cout << "Job was canceled" << std::endl; +* else +* // still running.... come back later +* +* Example: Spawn a job and wait for it to complete: +* +* auto job = [url](jobs::cancelable&) { return fetch_data_from_network(url); }; +* auto result = jobs::dispatch(job); +* auto value = result.join(); +* +* Example: Spwan a job with some context information: +* +* auto job = []() { std::cout << "Hello, world!" << std::endl; }; +* jobs::context context; +* context.name = "My Job"; +* context.pool = jobs::get_pool("My Job Pool"); +* context.priority = []() { return 1.0f; }; +* jobs::dispatch(job, context); +* +* Example: Check for cancelation within a job: +* +* auto job = [url](jobs::cancelable& state) { +* std::string data; +* if (!state.canceled()) +* data = fetch_data_from_network(url); +* return data; +* }; +* +* auto result = jobs::dispatch(job); +* // if "result" goes out of scope, "state.canceled()" in the job will return true +* +* This SDK exists because existing solutions do not support two things we need: +* automatic job cancelation, and job prioritization. This system acheives +* cancelation by tracking the reference count of the shared result object contained +* in the Future object; if that reference count goes to one, it means that ONLY the +* scheduler knows about the job, and no one else is around to fetch its result. +* In this case, future.canceled() returns true. It's up to the task itself to +* check the cancelable& object if it wants to quit early. +*/ +namespace WEETHREADS_NAMESPACE +{ + /** + * Interface for something that can be canceled + */ + class cancelable + { + public: + virtual bool canceled() const { return false; } + }; + + namespace detail + { + /** + * Event with a binary signaled state, for multi-threaded sychronization. + * + * The event has two states: + * "set" means that a call to wait() will not block; + * "unset" means that calls to wait() will block until another thread calls set(). + * + * The event starts out unset. + * + * Typical usage: Thread A creates Thread B to run asynchronous code. Thread A + * then calls wait(), which blocks Thread A. When Thread B is finished, it calls + * set(). Thread A then wakes up and continues execution. + * + * NOTE: ALL waiting threads will wake up when the Event is cleared. + */ + struct event + { + public: + //! Construct a new event + event() : _set(false) { } + + //! DTOR + ~event() { + _set = false; + for (int i = 0; i < 255; ++i) // workaround buggy broadcast + _cond.notify_all(); + } + + //! Block until the event is set, then return true. + inline bool wait() { + while (!_set) { + std::unique_lock lock(_m); + if (!_set) + _cond.wait(lock); + } + return _set; + } + + //! Block until the event is set or the timout expires. + //! Return true if the event has set, otherwise false. + template + inline bool wait(T timeout) { + if (!_set) { + std::unique_lock lock(_m); + if (!_set) + _cond.wait_for(lock, timeout); + } + return _set; + } + + //! Block until the event is set; then reset it. + inline bool waitAndReset() { + std::unique_lock lock(_m); + if (!_set) + _cond.wait(lock); + _set = false; + return true; + } + + //! Set the event state, causing any waiters to unblock. + inline void set() { + if (!_set) { + std::unique_lock lock(_m); + if (!_set) { + _set = true; + _cond.notify_all(); + } + } + } + + //! Reset (unset) the event state; new waiters will block until set() is called. + inline void reset() { + std::unique_lock lock(_m); + _set = false; + } + + //! Whether the event state is set (waiters will not block). + inline bool isSet() const { + return _set; + } + + protected: + std::mutex _m; // do not use Mutex, we never want tracking + std::condition_variable_any _cond; + bool _set; + }; + + + /** + * Sempahore lets N users aquire it and then notifies when the + * count goes back down to zero. + */ + class semaphore + { + public: + //! Acquire, increasing the usage count by one + void acquire() + { + std::unique_lock lock(_m); + ++_count; + } + + //! Release, decreasing the usage count by one. + //! When the count reaches zero, joiners will be notified and + //! the semaphore will reset to its initial state. + void release() + { + std::unique_lock lock(_m); + _count = std::max(_count - 1, 0); + if (_count == 0) + _cv.notify_all(); + } + + //! Reset to initialize state; this will cause a join to occur + //! even if no acquisitions have taken place. + void reset() + { + std::unique_lock lock(_m); + _count = 0; + _cv.notify_all(); + } + + //! Current count in the semaphore + std::size_t count() const + { + std::unique_lock lock(_m); + return _count; + } + + //! Block until the semaphore count returns to zero. + //! (It must first have left zero) + //! Warning: this method will block forever if the count + //! never reaches zero! + void join() + { + std::unique_lock lock(_m); + while (_count > 0) + _cv.wait(lock); + } + + //! Block until the semaphore count returns to zero, or + //! the operation is canceled. + //! (It must first have left zero) + void join(cancelable* c) + { + _cv.wait_for(_m, std::chrono::seconds(1), [this, c]() { + return + (_count == 0) || + (c && c->canceled()); + } + ); + _count = 0; + } + + private: + int _count = 0; + std::condition_variable_any _cv; + mutable std::mutex _m; + }; + +#if __cplusplus >= 202000L + template + using result_of_t = typename std::invoke_result::type; +#else + template + using result_of_t = typename std::result_of::type; +#endif + } + + + /** + * Future holds the future result of an asynchronous operation. + * + * Usage: + * Producer (usually an asynchronous function call) creates a future + * (the promise of a future result) and immediately returns it. The Consumer + * then performs other work, and eventually (or immediately) checks available() + * for a result or canceled() for cancelation. If availabile() is true, + * Consumer calls value() to fetch the valid result. + * + * As long as at least two equivalent Future object (i.e. Futures pointing to the + * same internal shared data) exist, the Future is considered valid. Once + * that count goes to one, the Future is either available (the value is ready) + * or empty (i.e., canceled or abandoned). + */ + template + class future : public cancelable + { + private: + // internal structure to track references to the result + // One instance of this is shared among all Future instances + // created from the copy constructor. + struct shared_t + { + T _obj; + mutable detail::event _ev; + }; + + public: + //! Default constructor + future() + { + _shared = std::make_shared(); + } + + //! Default copy constructor + future(const future& rhs) = default; + + //! True is this Future is unused and not connected to any other Future + bool empty() const + { + return !available() && _shared.use_count() == 1; + } + + //! True if the promise was resolved and a result if available. + bool available() const + { + return _shared->_ev.isSet(); + } + + //! True if a promise exists, but has not yet been resolved; + //! Presumably the asynchronous task is still working. + bool working() const + { + return !empty() && !available(); + } + + // cancelable interface + bool canceled() const override + { + return empty(); + } + + //! Deference the result object. Make sure you check available() + //! to check that the future was actually resolved; otherwise you + //! will just get the default object. + const T& value() const + { + return _shared->_obj; + } + + //! Dereference this object to const pointer to the result. + const T* operator -> () const + { + return &_shared->_obj; + } + + //! Same as value(), but if the result is available will reset the + //! future before returning the result object. + T release() + { + bool avail = available(); + T result = value(); + if (avail) + reset(); + return result; + } + + //! Blocks until the result becomes available or the future is abandoned; + //! then returns the result object. + const T& join() const + { + while ( + !empty() && + !_shared->_ev.wait(std::chrono::milliseconds(1))); + return value(); + } + + //! Blocks until the result becomes available or the future is abandoned + //! or a cancelation flag is set; then returns the result object. Be sure to + //! check canceled() after calling join() to see if the return value is valid. + const T& join(cancelable* p) const + { + while (working() && (p == nullptr || !p->canceled())) + { + _shared->_ev.wait(std::chrono::milliseconds(1)); + } + return value(); + } + + //! Blocks until the result becomes available or the future is abandoned + //! or a cancelation flag is set; then returns the result object. Be sure to + //! check canceled() after calling join() to see if the return value is valid. + const T& join(const cancelable& p) const + { + return join(&p); + } + + //! Release reference to a promise, resetting this future to its default state + void abandon() + { + _shared.reset(new shared_t()); + } + + //! synonym for abandon. + void reset() + { + abandon(); + } + + //! Resolve (fulfill) the promise with the provided result value. + void resolve(const T& value) + { + _shared->_obj = value; + _shared->_ev.set(); + } + + //! Resolve (fulfill) the promise with an rvalue + void resolve(T&& value) + { + _shared->_obj = std::move(value); + _shared->_ev.set(); + } + + //! Resolve (fulfill) the promise with a default result + void resolve() + { + _shared->_ev.set(); + } + + //! The number of objects, including this one, that + //! reference the shared container. If this method + //! returns 1, that means this is the only object with + //! access to the data. This method will never return zero. + unsigned refs() const + { + return _shared.use_count(); + } + + private: + std::shared_ptr _shared; + }; + + //! in the "promise/future" pattern, we use the same object for both, + //! but here's an alias for clarity. + template using promise = future; + + /** + * Include a jobgroup in a context to group together multiple jobs. + * You can then call jobgroup::join() to wait for the whole group + * to finish. + */ + using jobgroup = detail::semaphore; + + /** + * Context object you can pass to dispatch(...) to control aspects of + * how the background task is run. + */ + struct context + { + std::string name; + class jobpool* pool = nullptr; + std::function priority = {}; + jobgroup* group = nullptr; + }; + + /** + * A priority-sorted collection of jobs that are running or waiting + * to run in a thread pool. + */ + class jobpool + { + public: + /** + * Metrics of a thread pool. + */ + struct metrics_t + { + std::string name; + std::atomic_uint concurrency = { 0u }; + std::atomic_uint pending = { 0u }; + std::atomic_uint running = { 0u }; + std::atomic_uint canceled = { 0u }; + std::atomic_uint total = { 0u }; + }; + + public: + //! Destroy + ~jobpool() + { + stop_threads(); + } + + //! Name of this job pool + const std::string& name() const + { + return _metrics.name; + } + + metrics_t* metrics() + { + return &_metrics; + } + + //! Set the concurrency of this job scheduler + void set_concurrency(unsigned value) + { + value = std::max(value, 1u); + if (_targetConcurrency != value) + { + _targetConcurrency = value; + start_threads(); + } + } + + //! Get the target concurrency (thread count) + unsigned concurrency() const + { + return _targetConcurrency; + } + + //! Discard all queued jobs + void cancel_all() + { + std::unique_lock lock(_queueMutex); + _queue.clear(); + _metrics.canceled += _metrics.pending; + _metrics.pending = 0; + } + + //! Schedule an asynchronous task on this scheduler + //! Use job::dispatch to run jobs (usually no need to call this directly) + //! @param delegate Function to execute + //! @param context Job details + void dispatch(std::function& delegate, const context& context) + { + // If we have a group semaphore, acquire it BEFORE queuing the job + if (context.group) + { + context.group->acquire(); + } + + if (_targetConcurrency > 0) + { + std::unique_lock lock(_queueMutex); + if (!_done) + { + _queue.emplace_back(job{ context, delegate }); + _metrics.pending++; + _metrics.total++; + _block.notify_one(); + } + } + else + { + // no threads? run synchronously. + delegate(); + + if (context.group) + { + context.group->release(); + } + } + } + + //! Construct a new job pool. + //! Do not call this directly - call getPool(name) instead. + jobpool(const std::string& name, unsigned concurrency) : + _targetConcurrency(concurrency) + { + _metrics.name = name; + _metrics.concurrency = 0; + } + + //! Pulls queued jobs and runs them in whatever thread run() is called from. + //! Runs in a loop until _done is set. + void run() + { + while (!_done) + { + job next; + bool have_next = false; + { + std::unique_lock lock(_queueMutex); + + _block.wait(lock, [this] { + return _queue.empty() == false || _done == true; + }); + + if (!_queue.empty() && !_done) + { + // Find the highest priority item in the queue. + // Note: We could use std::partial_sort or std::nth_element, + // but benchmarking proves that a simple brute-force search + // is always the fastest. + // (Benchmark: https://stackoverflow.com/a/20365638/4218920) + // Also note: it is indeed possible for the results of + // priority() to change during the search. We don't care. + int index = -1; + float highest_priority = -FLT_MAX; + for (unsigned i = 0; i < _queue.size(); ++i) + { + float priority = _queue[i].ctx.priority != nullptr ? + _queue[i].ctx.priority() : + 0.0f; + + if (index < 0 || priority > highest_priority) + { + index = i; + highest_priority = priority; + } + } + if (index < 0) + index = 0; + + next = std::move(_queue[index]); + have_next = true; + + // move the last element into the empty position: + if (index < _queue.size() - 1) + { + _queue[index] = std::move(_queue.back()); + } + + // and remove the last element. + _queue.erase(_queue.end() - 1); + } + } + + if (have_next) + { + _metrics.running++; + _metrics.pending--; + + auto t0 = std::chrono::steady_clock::now(); + + bool job_executed = next._delegate(); + + auto duration = std::chrono::steady_clock::now() - t0; + + if (job_executed == false) + { + _metrics.canceled++; + } + + // release the group semaphore if necessary + if (next.ctx.group != nullptr) + { + next.ctx.group->release(); + } + + _metrics.running--; + } + + // See if we no longer need this thread because the + // target concurrency has been reduced + std::lock_guard lock(_quitMutex); + + if (_targetConcurrency < _metrics.concurrency) + { + _metrics.concurrency--; + break; + } + } + } + + //! Spawn all threads in this scheduler + inline void start_threads(); + + //! Signall all threads to stop + inline void stop_threads(); + + //! Wait for all threads to exit (after calling stop_threads) + inline void join_threads(); + + + struct job + { + context ctx; + std::function _delegate; + + bool operator < (const job& rhs) const + { + float lp = ctx.priority ? ctx.priority() : -FLT_MAX; + float rp = rhs.ctx.priority ? rhs.ctx.priority() : -FLT_MAX; + return lp < rp; + } + }; + + std::string _name; // pool name + std::vector _queue; // queued operations to run asynchronously + mutable std::mutex _queueMutex; // protect access to the queue + mutable std::mutex _quitMutex; // protects access to _done + std::atomic _targetConcurrency; // target number of concurrent threads in the pool + std::condition_variable_any _block; // thread waiter block + bool _done = false; // set to true when threads should exit + std::vector _threads; // threads in the pool + metrics_t _metrics; // metrics for this pool + }; + + class metrics + { + public: + //! Total number of pending jobs across all schedulers + int totalJobsPending() const + { + int count = 0; + for (auto pool : _pools) + count += pool->pending; + return count; + } + + //! Total number of running jobs across all schedulers + int totalJobsRunning() const + { + int count = 0; + for (auto pool : _pools) + count += pool->running; + return count; + } + + //! Total number of canceled jobs across all schedulers + int totalJobsCanceled() const + { + int count = 0; + for (auto pool : _pools) + count += pool->canceled; + return count; + } + + //! Total number of active jobs in the system + int totalJobs() const + { + return totalJobsPending() + totalJobsRunning(); + } + + //! Gets a vector of all jobpool metrics structures. + inline const std::vector all() + { + return _pools; + } + + std::vector _pools; + }; + + /** + * Runtime singleton object; + * Declare with WEETHREADS_INSTANCE in one of your .cpp files. + */ + namespace detail + { + struct runtime + { + inline runtime(); + + inline void kill() + { + _alive = false; + + for (auto& pool : _pools) + if (pool) + pool->stop_threads(); + + for (auto& pool : _pools) + if (pool) + pool->join_threads(); + } + + bool _alive = true; + std::mutex _mutex; + std::vector _pool_names; + std::vector _pools; + metrics _metrics; + std::function _setThreadName; + }; + } + + extern WEETHREADS_EXPORT detail::runtime& instance(); + + //! Returns the job pool with the given name, creating a new one if it doesn't + //! already exist. If you don't specify a name, a default pool is used. + inline jobpool* get_pool(const std::string& name = {}) + { + std::lock_guard lock(instance()._mutex); + for (auto pool : instance()._pools) + { + if (pool->name() == name) + return pool; + } + auto new_pool = new jobpool(name, 2u); + instance()._pools.push_back(new_pool); + instance()._metrics._pools.push_back(&new_pool->_metrics); + new_pool->start_threads(); + return new_pool; + } + + namespace detail + { + inline void pool_dispatch(std::function delegate, const context& context) + { + auto pool = context.pool ? context.pool : get_pool({}); + if (pool) + pool->dispatch(delegate, context); + } + } + + //! Dispatches a job with no return value. Fire and forget. + //! @param task Function to run in a thread. Prototype is void(void). + //! @param context Optional configuration for the asynchronous function call + inline void dispatch(std::function task, const context& context = {}) + { + auto delegate = [task]() mutable -> bool { task(); return true; }; + detail::pool_dispatch(delegate, context); + } + + //! Dispatches a job and immediately returns a future result. + //! @param task Function to run in a thread. Prototype is T(cancelable&) + //! @param context Optional configuration for the asynchronous function call + //! @param promise Optional user-supplied promise object + //! @return Future result of the async function call + template> + inline future dispatch(FUNC task, const context& context = {}, future promise = {}) + { + std::function delegate = [task, promise]() mutable + { + bool good = !promise.canceled(); + if (good) + promise.resolve(task(promise)); + return good; + }; + + detail::pool_dispatch(delegate, context); + + return promise; + } + + + inline void jobpool::start_threads() + { + _done = false; + + // Not enough? Start up more + while (_metrics.concurrency < _targetConcurrency) + { + _metrics.concurrency++; + + _threads.push_back(std::thread([this] + { + if (instance()._setThreadName) + { + instance()._setThreadName(_name.c_str()); + } + run(); + } + )); + } + } + + inline void jobpool::stop_threads() + { + _done = true; + + // Clear out the queue + { + std::unique_lock lock(_queueMutex); + + // reset any group semaphores so that JobGroup.join() + // will not deadlock. + for (auto& queuedjob : _queue) + { + if (queuedjob.ctx.group != nullptr) + { + queuedjob.ctx.group->reset(); + } + } + _queue.clear(); + + // wake up all threads so they can exit + _block.notify_all(); + } + } + + //! Wait for all threads to exit (after calling stop_threads) + inline void jobpool::join_threads() + { + // wait for them to exit + for (unsigned i = 0; i < _threads.size(); ++i) + { + if (_threads[i].joinable()) + { + _threads[i].join(); + } + } + + _threads.clear(); + } + + //! Metrics for all job pool + inline metrics* get_metrics() + { + return &instance()._metrics; + } + + //! stop all threads, wait for them to exit, and shut down the system + inline void shutdown() + { + instance().kill(); + } + + //! Whether the weethreads runtime is still alive (has not been shutdown) + inline bool alive() + { + return instance()._alive; + } + + //! Install a function that the SDK can use to set job pool thread names + //! when it spawns them. + inline void set_thread_name_function(std::function f) + { + instance()._setThreadName = f; + } + + // internal + inline detail::runtime::runtime() + { + std::atexit(shutdown); + } + + // Use this macro ONCE in your application in a .cpp file to + // instaniate the weethreads runtime singleton. +#define WEETHREADS_INSTANCE \ + namespace WEETHREADS_NAMESPACE { \ + static detail::runtime runtime_singleton_instance; \ + detail::runtime& instance() { return runtime_singleton_instance; } \ + } +} diff --git a/src/osgEarthDrivers/CMakeLists.txt b/src/osgEarthDrivers/CMakeLists.txt index 051692fbd8..12fccd2045 100644 --- a/src/osgEarthDrivers/CMakeLists.txt +++ b/src/osgEarthDrivers/CMakeLists.txt @@ -35,8 +35,6 @@ add_subdirectory(featurefilter_intersect) add_subdirectory(featurefilter_join) add_subdirectory(gltf) add_subdirectory(kml) -add_subdirectory(mapinspector) -add_subdirectory(monitor) add_subdirectory(script_engine_duktape) add_subdirectory(sky_gl) add_subdirectory(sky_simple) diff --git a/src/osgEarthDrivers/cache_filesystem/FileSystemCache.cpp b/src/osgEarthDrivers/cache_filesystem/FileSystemCache.cpp index 47b8c0de79..fb27c6c95f 100644 --- a/src/osgEarthDrivers/cache_filesystem/FileSystemCache.cpp +++ b/src/osgEarthDrivers/cache_filesystem/FileSystemCache.cpp @@ -75,8 +75,8 @@ namespace protected: std::string _rootPath; - std::shared_ptr _jobArena; FileSystemCacheOptions _options; + jobs::jobpool* _pool = nullptr; }; struct WriteCacheRecord { @@ -96,7 +96,7 @@ namespace const std::string& name, const std::string& rootPath, const FileSystemCacheOptions& options, - std::shared_ptr& jobArena); + jobs::jobpool* pool); static bool _s_debug; @@ -136,7 +136,7 @@ namespace FileSystemCacheOptions _options; // pool for asynchronous writes - std::shared_ptr _jobArena; + jobs::jobpool* _pool = nullptr; public: // cache for objects waiting to be written; this supports reading from @@ -193,7 +193,8 @@ namespace { FileSystemCache::FileSystemCache(const CacheOptions& options) : Cache(options), - _options(options) + _options(options), + _pool(nullptr) { // read the root path from ENV is necessary: if ( !_options.rootPath().isSet()) @@ -222,11 +223,12 @@ namespace { if (num > 0u) { - _jobArena = std::make_shared("oe.fscache", osg::clampBetween(num, 1u, 8u)); + _pool = jobs::get_pool("oe.fscache"); + _pool->set_concurrency(osg::clampBetween(num, 1u, 8u)); } else { - _jobArena = nullptr; + _pool = nullptr; } } @@ -236,7 +238,7 @@ namespace if (getStatus().isError()) return NULL; - return _bins.getOrCreate(name, new FileSystemCacheBin(name, _rootPath, _options, _jobArena)); + return _bins.getOrCreate(name, new FileSystemCacheBin(name, _rootPath, _options, _pool)); } CacheBin* @@ -245,13 +247,13 @@ namespace if (getStatus().isError()) return NULL; - static Mutex s_defaultBinMutex(OE_MUTEX_NAME); + static Mutex s_defaultBinMutex; if ( !_defaultBin.valid() ) { - ScopedMutexLock lock( s_defaultBinMutex ); + std::lock_guard lock( s_defaultBinMutex ); if ( !_defaultBin.valid() ) // double-check { - _defaultBin = new FileSystemCacheBin("__default", _rootPath, _options, _jobArena); + _defaultBin = new FileSystemCacheBin("__default", _rootPath, _options, _pool); } } return _defaultBin.get(); @@ -323,15 +325,13 @@ namespace const std::string& binID, const std::string& rootPath, const FileSystemCacheOptions& options, - std::shared_ptr& jobArena) : + jobs::jobpool* pool) : CacheBin(binID, options.enableNodeCaching().get()), - _jobArena(jobArena), + _pool(pool), _binPathExists(false), _options(options), - _ok(true), - _fileGate("CacheBinFileGate(OE)"), - _writeCacheRWM("CacheBinWriteL2(OE)") + _ok(true) { _binPath = osgDB::concatPaths(rootPath, binID); _metaPath = osgDB::concatPaths(_binPath, "osgearth_cacheinfo.json"); @@ -402,7 +402,7 @@ namespace // lock the file: ScopedGate lockFile(_fileGate, fileURI.full()); - if (_jobArena) + if (_pool) { // first check the write-pending cache. The record will be there // if the object is queued for asynchronous writing but hasn't @@ -487,7 +487,7 @@ namespace // lock the file: ScopedGate lockFile(_fileGate, fileURI.full()); - if (_jobArena) + if (_pool) { // first check the write-pending cache. The record will be there // if the object is queued for asynchronous writing but hasn't @@ -592,7 +592,7 @@ namespace osg::ref_ptr object(raw_object); osg::ref_ptr writeOptions(dbo); - auto write_op = [=](Cancelable*) + auto write_op = [=]() { OE_PROFILING_ZONE_NAMED("OE FS Cache Write"); @@ -660,7 +660,7 @@ namespace } }; - if (_jobArena != nullptr) + if (_pool != nullptr) { // Store in the write-cache until it's actually written. // Will override any existing entry and that's OK since the @@ -672,13 +672,13 @@ namespace _writeCacheRWM.write_unlock(); // asynchronous write - Job(_jobArena.get()).dispatch_and_forget(write_op); + jobs::dispatch(write_op, jobs::context{ fileURI.full(), _pool }); } else { // synchronous write - write_op(nullptr); + write_op(); } return true; diff --git a/src/osgEarthDrivers/cache_rocksdb/RocksDBCache.cpp b/src/osgEarthDrivers/cache_rocksdb/RocksDBCache.cpp index 5ef78affe9..fc91bd8247 100644 --- a/src/osgEarthDrivers/cache_rocksdb/RocksDBCache.cpp +++ b/src/osgEarthDrivers/cache_rocksdb/RocksDBCache.cpp @@ -210,10 +210,10 @@ RocksDBCacheImpl::getOrCreateDefaultBin() if ( !_db ) return 0L; - static Threading::Mutex s_defaultBinMutex; + static std::mutex s_defaultBinMutex; if ( !_defaultBin.valid() ) { - Threading::ScopedMutexLock lock( s_defaultBinMutex ); + std::lock_guard lock( s_defaultBinMutex ); if ( !_defaultBin.valid() ) // double-check { _defaultBin = new RocksDBCacheBin("_default", _db, _tracker.get()); diff --git a/src/osgEarthDrivers/cache_rocksdb/RocksDBCacheBin b/src/osgEarthDrivers/cache_rocksdb/RocksDBCacheBin index 1a9b279fac..2e2edf8f5f 100644 --- a/src/osgEarthDrivers/cache_rocksdb/RocksDBCacheBin +++ b/src/osgEarthDrivers/cache_rocksdb/RocksDBCacheBin @@ -85,7 +85,7 @@ namespace osgEarth { namespace RocksDBCache std::string _binPath; // full path to the bin's root folder osg::ref_ptr _rw; osg::ref_ptr _rwOptions; - Threading::Mutex _rwMutex; + std::mutex _rwMutex; rocksdb::DB* _db; osg::ref_ptr _tracker; bool _debug; diff --git a/src/osgEarthDrivers/cache_rocksdb/RocksDBCacheBin.cpp b/src/osgEarthDrivers/cache_rocksdb/RocksDBCacheBin.cpp index 9752a4a0ad..a3c981ae55 100644 --- a/src/osgEarthDrivers/cache_rocksdb/RocksDBCacheBin.cpp +++ b/src/osgEarthDrivers/cache_rocksdb/RocksDBCacheBin.cpp @@ -611,7 +611,7 @@ RocksDBCacheBin::readMetadata() if ( !binValidForReading() ) return Config(); - ScopedMutexLock exclusiveLock( _rwMutex ); + std::lock_guard exclusiveLock( _rwMutex ); std::string binvalue; rocksdb::Status status = _db->Get(rocksdb::ReadOptions(), binKey(), &binvalue); @@ -629,7 +629,7 @@ RocksDBCacheBin::writeMetadata(const Config& conf) if ( !binValidForWriting() ) return false; - ScopedMutexLock exclusiveLock( _rwMutex ); + std::lock_guard exclusiveLock( _rwMutex ); // inject the cache version Config mutableConf(conf); diff --git a/src/osgEarthDrivers/cache_rocksdb/Tracker b/src/osgEarthDrivers/cache_rocksdb/Tracker index aac1e30d1e..abe7a9171f 100644 --- a/src/osgEarthDrivers/cache_rocksdb/Tracker +++ b/src/osgEarthDrivers/cache_rocksdb/Tracker @@ -31,8 +31,6 @@ namespace osgEarth { namespace RocksDBCache { - typedef OpenThreads::Atomic unsigned_atomic; - /** * Tracks usage metrics across a RocksDB cache */ @@ -56,9 +54,9 @@ namespace osgEarth { namespace RocksDBCache virtual ~Tracker() { } public: - unsigned_atomic reads; - unsigned_atomic hits; - unsigned_atomic writes; + std::atomic_uint reads; + std::atomic_uint hits; + std::atomic_uint writes; bool hasSizeLimit() const { return _options.maxSizeMB().isSet(); diff --git a/src/osgEarthDrivers/engine_rex/CreateTileImplementation b/src/osgEarthDrivers/engine_rex/CreateTileImplementation index d1e689cdc0..a0af06d094 100644 --- a/src/osgEarthDrivers/engine_rex/CreateTileImplementation +++ b/src/osgEarthDrivers/engine_rex/CreateTileImplementation @@ -44,7 +44,7 @@ namespace osgEarth { namespace REX int flags, unsigned referenceLOD, const TileKey& area, - Threading::Cancelable* progress = nullptr); + Cancelable* progress = nullptr); }; } } diff --git a/src/osgEarthDrivers/engine_rex/CreateTileImplementation.cpp b/src/osgEarthDrivers/engine_rex/CreateTileImplementation.cpp index 145b8e23b7..deecd3377c 100644 --- a/src/osgEarthDrivers/engine_rex/CreateTileImplementation.cpp +++ b/src/osgEarthDrivers/engine_rex/CreateTileImplementation.cpp @@ -104,7 +104,7 @@ CreateTileImplementation::createTile( sharedGeom, progress); - if (progress && progress->isCanceled()) + if (progress && progress->canceled()) { return nullptr; } diff --git a/src/osgEarthDrivers/engine_rex/GeometryPool b/src/osgEarthDrivers/engine_rex/GeometryPool index 93e0c8c3d5..e9d33747e0 100644 --- a/src/osgEarthDrivers/engine_rex/GeometryPool +++ b/src/osgEarthDrivers/engine_rex/GeometryPool @@ -323,7 +323,7 @@ namespace osgEarth using GeometryMap = std::unordered_map>; mutable Threading::Gate _keygate; - mutable Threading::Mutex _geometryMapMutex; + mutable std::mutex _geometryMapMutex; GeometryMap _geometryMap; osg::ref_ptr _defaultPrimSet; diff --git a/src/osgEarthDrivers/engine_rex/GeometryPool.cpp b/src/osgEarthDrivers/engine_rex/GeometryPool.cpp index d44a556690..ac38b33837 100644 --- a/src/osgEarthDrivers/engine_rex/GeometryPool.cpp +++ b/src/osgEarthDrivers/engine_rex/GeometryPool.cpp @@ -31,9 +31,7 @@ using namespace osgEarth::REX; GeometryPool::GeometryPool() : _enabled(true), - _debug(false), - _geometryMapMutex("GeometryPool(OE)"), - _keygate("GeometryPool(OE).keygate") + _debug(false) { ADJUST_UPDATE_TRAV_COUNT(this, +1); @@ -64,7 +62,7 @@ GeometryPool::getPooledGeometry( // make our globally shared EBO if we need it { - Threading::ScopedMutexLock lock(_geometryMapMutex); + std::lock_guard lock(_geometryMapMutex); if (!_defaultPrimSet.valid()) { // convert the mesher's indices to a SharedDrawElements @@ -120,7 +118,7 @@ GeometryPool::getPooledGeometry( // first check the sharing cache (note: tiles with edits are not cached) if (edits.empty()) { - Threading::ScopedMutexLock lock(_geometryMapMutex); + std::lock_guard lock(_geometryMapMutex); GeometryMap::iterator i = _geometryMap.find(geomKey); if (i != _geometryMap.end()) { @@ -137,7 +135,7 @@ GeometryPool::getPooledGeometry( // only store as a shared geometry if there are no constraints. if (out.valid() && !out->hasConstraints()) { - Threading::ScopedMutexLock lock(_geometryMapMutex); + std::lock_guard lock(_geometryMapMutex); _geometryMap[geomKey] = out.get(); } } @@ -224,7 +222,7 @@ GeometryPool::traverse(osg::NodeVisitor& nv) { if (nv.getVisitorType() == nv.UPDATE_VISITOR && _enabled) { - Threading::ScopedMutexLock lock(_geometryMapMutex); + std::lock_guard lock(_geometryMapMutex); std::vector keys; @@ -251,7 +249,7 @@ void GeometryPool::clear() { releaseGLObjects(nullptr); - Threading::ScopedMutexLock lock(_geometryMapMutex); + std::lock_guard lock(_geometryMapMutex); _geometryMap.clear(); } @@ -262,7 +260,7 @@ GeometryPool::resizeGLObjectBuffers(unsigned maxsize) return; // collect all objects in a thread safe manner - Threading::ScopedMutexLock lock(_geometryMapMutex); + std::lock_guard lock(_geometryMapMutex); for (GeometryMap::const_iterator i = _geometryMap.begin(); i != _geometryMap.end(); ++i) { @@ -283,7 +281,7 @@ GeometryPool::releaseGLObjects(osg::State* state) const return; // collect all objects in a thread safe manner - Threading::ScopedMutexLock lock(_geometryMapMutex); + std::lock_guard lock(_geometryMapMutex); for (auto& entry : _geometryMap) { diff --git a/src/osgEarthDrivers/engine_rex/LoadTileData.cpp b/src/osgEarthDrivers/engine_rex/LoadTileData.cpp index 50a821e4a8..0f9fe6872b 100644 --- a/src/osgEarthDrivers/engine_rex/LoadTileData.cpp +++ b/src/osgEarthDrivers/engine_rex/LoadTileData.cpp @@ -86,10 +86,10 @@ LoadTileDataOperation::dispatch(bool async) TileKey key(_tilenode->getKey()); - auto load = [engine, map, key, manifest, enableCancel] (Cancelable* progress) + auto load = [engine, map, key, manifest, enableCancel] (Cancelable& progress) { osg::ref_ptr wrapper = - enableCancel ? new ProgressCallback(progress) : nullptr; + enableCancel ? new ProgressCallback(&progress) : nullptr; osg::ref_ptr result = engine->createTileModel( map.get(), @@ -115,16 +115,15 @@ LoadTileDataOperation::dispatch(bool async) if (async) { - Job job; - job.setArena(ARENA_LOAD_TILE); - job.setPriorityFunction(priority_func); - _result = job.dispatch(load); + jobs::context context; + context.pool = jobs::get_pool(ARENA_LOAD_TILE); + context.priority = priority_func; + _result = jobs::dispatch(load, context); } else { - Promise promise; - _result = promise; // .getFuture(); - promise.resolve(load(nullptr)); + Cancelable c; + _result.resolve(load(c)); } return true; diff --git a/src/osgEarthDrivers/engine_rex/Loader b/src/osgEarthDrivers/engine_rex/Loader index 1275aefbf2..0adad4e9f8 100644 --- a/src/osgEarthDrivers/engine_rex/Loader +++ b/src/osgEarthDrivers/engine_rex/Loader @@ -77,7 +77,7 @@ namespace osgEarth { namespace REX // Queue of tile data to merge during UPDATE traversal using MergeQueue = std::queue; MergeQueue _mergeQueue; - JobArena::Metrics::Arena::Ptr _metrics; + jobs::jobpool::metrics_t* _metrics; Mutex _mutex; unsigned _mergesPerFrame; diff --git a/src/osgEarthDrivers/engine_rex/Loader.cpp b/src/osgEarthDrivers/engine_rex/Loader.cpp index 1f46723103..d43bfa0900 100644 --- a/src/osgEarthDrivers/engine_rex/Loader.cpp +++ b/src/osgEarthDrivers/engine_rex/Loader.cpp @@ -40,9 +40,9 @@ Merger::Merger() : { setCullingActive(false); setNumChildrenRequiringUpdateTraversal(+1); - _mutex.setName(OE_MUTEX_NAME); - _metrics = JobArena::get(ARENA_LOAD_TILE)->metrics(); + auto pool = jobs::get_pool(ARENA_LOAD_TILE); + _metrics = pool->metrics(); } Merger::~Merger() @@ -59,18 +59,18 @@ Merger::setMergesPerFrame(unsigned value) void Merger::clear() { - ScopedMutexLock lock(_mutex); + std::lock_guard lock(_mutex); // Decrement the numJobsRunning stat b/c these jobs in the queues will never actually run. if (_metrics) { for (unsigned int i = 0; i < _mergeQueue.size(); ++i) { - _metrics->numJobsRunning--; + _metrics->running--; } for (unsigned int i = 0; i < _compileQueue.size(); ++i) { - _metrics->numJobsRunning--; + _metrics->running--; } } @@ -96,7 +96,7 @@ Merger::merge(LoadTileDataOperationPtr data, osg::NodeVisitor& nv) bool bindless = GLUtils::useNVGL(); data->_result.join()->getStateToCompile(*state.get(), bindless, data->_tilenode.get()); - ScopedMutexLock lock(_mutex); + std::lock_guard lock(_mutex); if (!state->empty()) { @@ -117,13 +117,13 @@ Merger::merge(LoadTileDataOperationPtr data, osg::NodeVisitor& nv) } else { - ScopedMutexLock lock(_mutex); + std::lock_guard lock(_mutex); _mergeQueue.push(data); } if (_metrics) { - _metrics->numJobsRunning++; + _metrics->running++; } } @@ -136,7 +136,7 @@ Merger::traverse(osg::NodeVisitor& nv) } else if (nv.getVisitorType() == nv.UPDATE_VISITOR && _clock.update()) { - ScopedMutexLock lock(_mutex); + std::lock_guard lock(_mutex); // Check the GL compile queue // TODO: the ICO will orphan compilesets when a graphics context @@ -157,8 +157,8 @@ Merger::traverse(osg::NodeVisitor& nv) // compile canceled, ditch it if (_metrics) { - _metrics->numJobsRunning--; - _metrics->numJobsCanceled++; + _metrics->running--; + _metrics->canceled++; } } else @@ -196,7 +196,7 @@ Merger::traverse(osg::NodeVisitor& nv) _mergeQueue.pop(); if (_metrics) { - _metrics->numJobsRunning--; + _metrics->running--; } } diff --git a/src/osgEarthDrivers/engine_rex/RexTerrainEngineNode.cpp b/src/osgEarthDrivers/engine_rex/RexTerrainEngineNode.cpp index bb0269df74..055af8e0e9 100644 --- a/src/osgEarthDrivers/engine_rex/RexTerrainEngineNode.cpp +++ b/src/osgEarthDrivers/engine_rex/RexTerrainEngineNode.cpp @@ -327,7 +327,7 @@ RexTerrainEngineNode::onSetMap() const char* concurrency_str = ::getenv("OSGEARTH_TERRAIN_CONCURRENCY"); if (concurrency_str) concurrency = Strings::as(concurrency_str, concurrency); - JobArena::setConcurrency(ARENA_LOAD_TILE, concurrency); + jobs::get_pool(ARENA_LOAD_TILE)->set_concurrency(concurrency); // Make a tile unloader _unloader = new UnloaderGroup(_tiles.get(), getOptions()); @@ -412,7 +412,6 @@ RexTerrainEngineNode::onSetMap() { ShadersGL4 sh; std::string incStrGL4 = ShaderLoader::load(sh.ENGINE_TYPES, sh); - //const std::string incGL4 = "#pragma include RexEngine.GL4.glsl"; if (source.find(incStrGL4) == std::string::npos) { buf << incStrGL4 << "\n"; @@ -574,10 +573,11 @@ RexTerrainEngineNode::refresh(bool forceDirty) this->ref(); // Load all the root key tiles. - JobGroup loadGroup; - Job load; - load.setArena(ARENA_LOAD_TILE); - load.setGroup(&loadGroup); + jobs::jobgroup loadGroup; + + jobs::context context; + context.group = &loadGroup; + context.pool = jobs::get_pool(ARENA_LOAD_TILE); for (unsigned i = 0; i < keys.size(); ++i) { @@ -597,10 +597,7 @@ RexTerrainEngineNode::refresh(bool forceDirty) tileNode->initializeData(); // And load the tile's data - load.dispatch_and_forget([tileNode](Cancelable*) - { - tileNode->loadSync(); - }); + jobs::dispatch([tileNode]() { tileNode->loadSync(); }, context); OE_DEBUG << " - " << (i + 1) << "/" << keys.size() << " : " << keys[i].str() << std::endl; } @@ -751,7 +748,7 @@ RexTerrainEngineNode::dirtyTerrainOptions() _merger->setMergesPerFrame(options.getMergesPerFrame()); - JobArena::setConcurrency(ARENA_LOAD_TILE, options.getConcurrency()); + jobs::get_pool(ARENA_LOAD_TILE)->set_concurrency(options.getConcurrency()); getSurfaceStateSet()->getOrCreateUniform( "oe_terrain_tess", osg::Uniform::FLOAT)->set(options.getTessellationLevel()); diff --git a/src/osgEarthDrivers/engine_rex/TileNode b/src/osgEarthDrivers/engine_rex/TileNode index 9720048f46..3a90963357 100644 --- a/src/osgEarthDrivers/engine_rex/TileNode +++ b/src/osgEarthDrivers/engine_rex/TileNode @@ -184,7 +184,7 @@ namespace osgEarth { namespace REX osg::observer_ptr _parentTile; osg::ref_ptr _surface; osg::observer_ptr _context; - Threading::Mutex _mutex; + std::mutex _mutex; std::atomic _lastTraversalFrame; double _lastTraversalTime = 0.0; float _lastTraversalRange = FLT_MAX; diff --git a/src/osgEarthDrivers/engine_rex/TileNode.cpp b/src/osgEarthDrivers/engine_rex/TileNode.cpp index a66c67f918..3aac2adcf5 100644 --- a/src/osgEarthDrivers/engine_rex/TileNode.cpp +++ b/src/osgEarthDrivers/engine_rex/TileNode.cpp @@ -31,6 +31,7 @@ #include #include #include +#include using namespace osgEarth::REX; using namespace osgEarth; @@ -38,6 +39,16 @@ using namespace osgEarth::Util; #define LC "[TileNode] " +// template to capture the result type of a function: +template +struct result_type; + +template +struct result_type +{ + typedef R type; +}; + namespace { // Scale and bias matrices, one for each TileKey quadrant. @@ -55,8 +66,6 @@ TileNode::TileNode(const TileKey& key, TileNode* parent, EngineContext* context, _parentTile(parent), _context(context), _lastTraversalFrame(0), - _mutex("TileNode(OE)"), - _loadQueue("TileNode LoadQueue(OE)"), _loadPriority(0.0f) { OE_HARD_ASSERT(context != nullptr); @@ -127,7 +136,7 @@ TileNode::createGeometry(Cancelable* progress) geom, progress); - if (progress && progress->isCanceled()) + if (progress && progress->canceled()) return; if (geom.valid()) @@ -675,30 +684,29 @@ TileNode::createChildren() EngineContext* context(_context.get()); osg::observer_ptr tile_weakptr(this); - auto createChildrenOperation = [context, tile_weakptr](Cancelable* state) + auto createChildrenOperation = [context, tile_weakptr](auto& state) { CreateChildrenResult result; osg::ref_ptr tile; - if (tile_weakptr.lock(tile) && !state->isCanceled()) + if (tile_weakptr.lock(tile) && !state.canceled()) { for (unsigned q = 0; q < 4; ++q) { auto childkey = tile->getKey().createChildKey(q); - result.emplace_back(tile->createChild(childkey, state)); + result.emplace_back(tile->createChild(childkey, &state)); } } - if (state && state->isCanceled()) + if (state.canceled()) result.clear(); return result; }; - Job job; - job.setArena(ARENA_CREATE_CHILD); - job.setName(_key.str()); - _createChildrenFutureResult = job.dispatch(createChildrenOperation); + jobs::context c{ _key.str() }; + c.pool = jobs::get_pool(ARENA_CREATE_CHILD); + _createChildrenFutureResult = jobs::dispatch(createChildrenOperation, c); } else if (_createChildrenFutureResult.available()) @@ -738,22 +746,21 @@ TileNode::createChildren() TileKey childkey = getKey().createChildKey(quadrant); osg::observer_ptr tile_weakptr(this); - auto createChildOperation = [context, tile_weakptr, childkey](Cancelable* state) + auto createChildOperation = [context, tile_weakptr, childkey](Cancelable& state) { CreateChildResult result; osg::ref_ptr tile; - if (tile_weakptr.lock(tile) && !state->isCanceled()) - result = tile->createChild(childkey, state); + if (tile_weakptr.lock(tile) && state.canceled()) + result = tile->createChild(childkey, &state); return result; }; - Job job; - job.setArena(ARENA_CREATE_CHILD); - job.setName(childkey.str()); - - _createChildResults.emplace_back(job.dispatch(createChildOperation)); + jobs::context c; + c.name = childkey.str(); + c.pool = jobs::get_pool(ARENA_CREATE_CHILD); + _createChildResults.emplace_back(jobs::dispatch(createChildOperation, c)); } } @@ -797,7 +804,7 @@ TileNode::createChild(const TileKey& childkey, Cancelable* progress) progress); return - progress && progress->isCanceled() ? nullptr + progress && progress->canceled() ? nullptr : node.release(); } @@ -1303,7 +1310,7 @@ TileNode::load(TerrainCuller* culler) _loadPriority = priority; // Check the status of the load - ScopedMutexLock lock(_loadQueue); + std::lock_guard lock(_loadQueue.mutex()); if (_loadQueue.empty() == false) { diff --git a/src/osgEarthDrivers/engine_rex/TileNodeRegistry b/src/osgEarthDrivers/engine_rex/TileNodeRegistry index 70a36791c0..9c5a554b6b 100644 --- a/src/osgEarthDrivers/engine_rex/TileNodeRegistry +++ b/src/osgEarthDrivers/engine_rex/TileNodeRegistry @@ -104,7 +104,7 @@ namespace osgEarth { namespace REX TileTable _tiles; Tracker _tracker; - mutable Threading::Mutex _mutex; + mutable std::mutex _mutex; bool _notifyNeighbors; const FrameClock* _clock; diff --git a/src/osgEarthDrivers/engine_rex/TileNodeRegistry.cpp b/src/osgEarthDrivers/engine_rex/TileNodeRegistry.cpp index f01aafe32a..a07ae41bf1 100644 --- a/src/osgEarthDrivers/engine_rex/TileNodeRegistry.cpp +++ b/src/osgEarthDrivers/engine_rex/TileNodeRegistry.cpp @@ -34,8 +34,7 @@ using namespace osgEarth; //---------------------------------------------------------------------------- TileNodeRegistry::TileNodeRegistry() : - _notifyNeighbors(false), - _mutex("TileNodeRegistry(OE)") + _notifyNeighbors(false) { //nop } @@ -58,7 +57,7 @@ TileNodeRegistry::setDirty( unsigned maxLevel, const CreateTileManifest& manifest) { - ScopedMutexLock lock(_mutex); + std::lock_guard lock(_mutex); for( TileTable::iterator i = _tiles.begin(); i != _tiles.end(); ++i ) { @@ -76,7 +75,7 @@ TileNodeRegistry::setDirty( void TileNodeRegistry::add(TileNode* tile) { - ScopedMutexLock lock(_mutex); + std::lock_guard lock(_mutex); auto& entry = _tiles[tile->getKey()]; entry._tile = tile; @@ -171,7 +170,7 @@ TileNodeRegistry::stopListeningFor( void TileNodeRegistry::releaseAll(osg::State* state) { - ScopedMutexLock lock(_mutex); + std::lock_guard lock(_mutex); for (auto& tile : _tiles) { @@ -191,7 +190,7 @@ TileNodeRegistry::releaseAll(osg::State* state) void TileNodeRegistry::touch(TileNode* tile, osg::NodeVisitor& nv) { - ScopedMutexLock lock(_mutex); + std::lock_guard lock(_mutex); TileTable::iterator i = _tiles.find(tile->getKey()); @@ -208,7 +207,7 @@ TileNodeRegistry::touch(TileNode* tile, osg::NodeVisitor& nv) void TileNodeRegistry::update(osg::NodeVisitor& nv) { - ScopedMutexLock lock(_mutex); + std::lock_guard lock(_mutex); if (!_tilesToUpdate.empty()) { @@ -244,7 +243,7 @@ TileNodeRegistry::collectDormantTiles( unsigned maxTiles, std::vector>& output) { - ScopedMutexLock lock(_mutex); + std::lock_guard lock(_mutex); unsigned count = 0u; diff --git a/src/osgEarthDrivers/gltf/GLTFReader.h b/src/osgEarthDrivers/gltf/GLTFReader.h index 31d3247e83..ea5638fde5 100644 --- a/src/osgEarthDrivers/gltf/GLTFReader.h +++ b/src/osgEarthDrivers/gltf/GLTFReader.h @@ -52,7 +52,7 @@ using namespace osgEarth::Util; class GLTFReader { public: - using TextureCache = osgEarth::Mutexed< + using TextureCache = osgEarth::Threading::Mutexed< std::unordered_map> >; struct NodeBuilder; @@ -586,7 +586,7 @@ class GLTFReader TextureCache* texCache = reader->_texCache; if (!imageEmbedded && texCache) { - ScopedMutexLock lock(*texCache); + std::lock_guard lock(texCache->mutex()); auto texItr = texCache->find(imageURI.full()); if (texItr != texCache->end()) { @@ -604,7 +604,7 @@ class GLTFReader { if (!imageEmbedded && texCache && !cachedTex) { - ScopedMutexLock lock(*texCache); + std::lock_guard lock(texCache->mutex()); auto insResult = texCache->insert(TextureCache::value_type(imageURI.full(), tex)); if (insResult.second) { diff --git a/src/osgEarthDrivers/kml/KMZArchive.cpp b/src/osgEarthDrivers/kml/KMZArchive.cpp index cb818b991b..6a97f84059 100644 --- a/src/osgEarthDrivers/kml/KMZArchive.cpp +++ b/src/osgEarthDrivers/kml/KMZArchive.cpp @@ -36,12 +36,12 @@ namespace { // get a handle on the file cache. This is a temporary setup just to get things // working. - static Threading::Mutex s_fcMutex(OE_MUTEX_NAME); + static std::mutex s_fcMutex; static URIContext s_cache; if ( s_cache.empty() ) { - Threading::ScopedMutexLock exclusiveLock(s_fcMutex); + std::lock_guard exclusiveLock(s_fcMutex); if ( s_cache.empty() ) { const char* osgCacheDir = ::getenv("OSG_FILE_CACHE"); diff --git a/src/osgEarthDrivers/script_engine_duktape/DuktapeEngine.cpp b/src/osgEarthDrivers/script_engine_duktape/DuktapeEngine.cpp index 0453981af8..1f6b9ec041 100644 --- a/src/osgEarthDrivers/script_engine_duktape/DuktapeEngine.cpp +++ b/src/osgEarthDrivers/script_engine_duktape/DuktapeEngine.cpp @@ -272,9 +272,8 @@ DuktapeEngine::Context::~Context() //............................................................................ DuktapeEngine::DuktapeEngine(const ScriptEngineOptions& options) : -ScriptEngine( options ), -_options ( options ), -_contexts ( "DuktapeEngine(OE)" ) + ScriptEngine(options), + _options(options) { //nop } diff --git a/src/osgEarthDrivers/sky_simple/SimpleSkyNode.cpp b/src/osgEarthDrivers/sky_simple/SimpleSkyNode.cpp index 70dc3406fa..cee71db66c 100644 --- a/src/osgEarthDrivers/sky_simple/SimpleSkyNode.cpp +++ b/src/osgEarthDrivers/sky_simple/SimpleSkyNode.cpp @@ -207,8 +207,7 @@ namespace SimpleSkyNode::SimpleSkyNode(const SimpleSkyOptions& options) : SkyNode(options), _options(options), - _eb_initialized(false), - _eb_mutex("SimpleSkyNode.eb_mutex(OE)") + _eb_initialized(false) { construct(); @@ -344,7 +343,7 @@ SimpleSkyNode::traverse(osg::NodeVisitor& nv) // Generate LUTs on the first pass if (_useBruneton && !_eb_drawable.valid()) { - ScopedMutexLock lock(_eb_mutex); + std::lock_guard lock(_eb_mutex); if (!_eb_drawable.valid()) { _eb_drawable = new Bruneton::ComputeDrawable( diff --git a/src/osgEarthDrivers/zip/ZipArchive.cpp b/src/osgEarthDrivers/zip/ZipArchive.cpp index 413f27106c..92d59a0911 100644 --- a/src/osgEarthDrivers/zip/ZipArchive.cpp +++ b/src/osgEarthDrivers/zip/ZipArchive.cpp @@ -45,7 +45,7 @@ void ZipArchive::close() { if ( _zipLoaded ) { - OpenThreads::ScopedLock exclusive(_zipMutex); + std::lock_guard lock(_zipMutex); if ( _zipLoaded ) { // close the file (on one thread since it's a shared file) @@ -111,7 +111,7 @@ bool ZipArchive::open(const std::string& file, ArchiveStatus /*status*/, const o if ( !_zipLoaded ) { // exclusive lock when we open for the first time: - OpenThreads::ScopedLock exclusiveLock( _zipMutex ); + std::lock_guard lock(_zipMutex); if ( !_zipLoaded ) // double-check avoids race condition { @@ -510,7 +510,7 @@ std::string ZipArchive::ReadPassword(const osgDB::ReaderWriter::Options* options const ZipArchive::PerThreadData& ZipArchive::getData() const { - OpenThreads::ScopedLock exclusive( const_cast(this)->_zipMutex ); + std::lock_guard lock(_zipMutex); return getDataNoLock(); } @@ -519,7 +519,7 @@ const ZipArchive::PerThreadData& ZipArchive::getDataNoLock() const { // get/create data for the currently running thread: - size_t current = osgEarth::Threading::getCurrentThreadId(); + auto current = std::this_thread::get_id(); PerThreadDataMap::const_iterator i = _perThreadData.find( current ); diff --git a/src/osgEarthDrivers/zip/ZipArchive.h b/src/osgEarthDrivers/zip/ZipArchive.h index b0f8308aab..c59e4e41c3 100644 --- a/src/osgEarthDrivers/zip/ZipArchive.h +++ b/src/osgEarthDrivers/zip/ZipArchive.h @@ -5,7 +5,8 @@ #include #include -#include +#include +#include #include @@ -74,15 +75,15 @@ class ZipArchive : public osgDB::Archive std::string _filename, _password, _membuffer; - OpenThreads::Mutex _zipMutex; - bool _zipLoaded; - ZipEntryMap _zipIndex; + mutable std::mutex _zipMutex; + bool _zipLoaded; + ZipEntryMap _zipIndex; struct PerThreadData { zip_t* _zipHandle; }; - typedef std::map PerThreadDataMap; + typedef std::map PerThreadDataMap; PerThreadDataMap _perThreadData; const PerThreadData& getData() const; diff --git a/src/osgEarthProcedural/BiomeLayer.cpp b/src/osgEarthProcedural/BiomeLayer.cpp index 9ab1dfc7fc..7a32ac1c58 100644 --- a/src/osgEarthProcedural/BiomeLayer.cpp +++ b/src/osgEarthProcedural/BiomeLayer.cpp @@ -93,8 +93,6 @@ BiomeLayer::init() _autoBiomeManagement = true; - _tracker.setName("BiomeLayer.tracker(OE)"); - setProfile(Profile::create(Profile::GLOBAL_GEODETIC)); } @@ -132,7 +130,7 @@ BiomeLayer::closeImplementation() // cache: { - ScopedMutexLock lock(_imageCache); + std::lock_guard lock(_imageCache.mutex()); _imageCache.clear(); } @@ -231,7 +229,7 @@ BiomeLayer::setBlendRadius(const Distance& value) options().blendRadius() = value; bumpRevision(); - ScopedMutexLock lock(_imageCache); + std::lock_guard lock(_imageCache.mutex()); _imageCache.clear(); } @@ -269,7 +267,8 @@ BiomeLayer::createImageImplementation( #if 1 // check the cache: { - ScopedMutexLock lock(_imageCache); + std::lock_guard lock(_imageCache.mutex()); + auto iter = _imageCache.find(key); osg::ref_ptr image; if (iter != _imageCache.end() && iter->second.lock(image)) @@ -466,7 +465,7 @@ BiomeLayer::createImageImplementation( #if 1 // local cache: { - ScopedMutexLock lock(_imageCache); + std::lock_guard lock(_imageCache.mutex()); _imageCache[key] = image.get(); } #endif diff --git a/src/osgEarthProcedural/BiomeManager.cpp b/src/osgEarthProcedural/BiomeManager.cpp index cdd5b97b81..dc174be36f 100644 --- a/src/osgEarthProcedural/BiomeManager.cpp +++ b/src/osgEarthProcedural/BiomeManager.cpp @@ -124,8 +124,6 @@ namespace BiomeManager::BiomeManager() : _revision(0), - _refsAndRevision_mutex("BiomeManager.refsAndRevision(OE)"), - _residentData_mutex("BiomeManager.residentData(OE)"), _lodTransitionPixelScale(16.0f), _locked(false) { @@ -139,7 +137,7 @@ BiomeManager::BiomeManager() : void BiomeManager::ref(const Biome* biome) { - ScopedMutexLock lock(_refsAndRevision_mutex); + std::lock_guard lock(_refsAndRevision_mutex); auto item = _refs.emplace(biome, 0); ++item.first->second; @@ -153,7 +151,7 @@ BiomeManager::ref(const Biome* biome) void BiomeManager::unref(const Biome* biome) { - ScopedMutexLock lock(_refsAndRevision_mutex); + std::lock_guard lock(_refsAndRevision_mutex); auto iter = _refs.find(biome); @@ -212,7 +210,7 @@ BiomeManager::reset() // reset the reference counts, and bump the revision so the // next call to update will remove any resident data { - ScopedMutexLock lock(_refsAndRevision_mutex); + std::lock_guard lock(_refsAndRevision_mutex); for (auto& iter : _refs) { @@ -245,7 +243,7 @@ BiomeManager::recalculateResidentBiomes() // Figure out which biomes we need to load and which we can discard. { - ScopedMutexLock lock(_refsAndRevision_mutex); + std::lock_guard lock(_refsAndRevision_mutex); for (auto& ref : _refs) { @@ -265,7 +263,7 @@ BiomeManager::recalculateResidentBiomes() // Update the resident biome data structure: { - ScopedMutexLock lock(_residentData_mutex); + std::lock_guard lock(_residentData_mutex); // add biomes that might need adding for (auto biome : biomes_to_add) @@ -304,7 +302,7 @@ BiomeManager::recalculateResidentBiomes() std::vector BiomeManager::getActiveBiomes() const { - ScopedMutexLock lock(_refsAndRevision_mutex); + std::lock_guard lock(_refsAndRevision_mutex); std::vector result; @@ -321,7 +319,7 @@ BiomeManager::getResidentAssets() const { std::vector result; - ScopedMutexLock lock(_residentData_mutex); + std::lock_guard lock(_residentData_mutex); result.reserve(_residentModelAssets.size()); @@ -424,7 +422,7 @@ BiomeManager::materializeNewAssets( OE_PROFILING_ZONE; // exclusive access to the resident dataset - ScopedMutexLock lock(_residentData_mutex); + std::lock_guard lock(_residentData_mutex); // Some caches to avoid duplicating data std::map texcache; @@ -837,7 +835,7 @@ BiomeManager::setCreateImpostorFunction( const std::string& group, BiomeManager::CreateImpostorFunction func) { - ScopedMutexLock lock(_residentData_mutex); + std::lock_guard lock(_residentData_mutex); _createImpostorFunctions[group] = func; } diff --git a/src/osgEarthProcedural/ImGui/LifeMapLayerGUI b/src/osgEarthProcedural/ImGui/LifeMapLayerGUI index 18dddae3a5..2fa3c294e0 100644 --- a/src/osgEarthProcedural/ImGui/LifeMapLayerGUI +++ b/src/osgEarthProcedural/ImGui/LifeMapLayerGUI @@ -228,10 +228,10 @@ namespace osgEarth TileKey key = _lifemap->getBestAvailableTileKey(keyUnderMouse, false); if (key.valid()) { - _lifemapUnderMouse = Job().dispatch([this, key, pointUnderMouse](Cancelable* c) + auto task = [this, key, pointUnderMouse](Cancelable& c) { osg::Vec4 result(0, 0, 0, 1); - osg::ref_ptr prog = new ProgressCallback(c); + osg::ref_ptr prog = new ProgressCallback(&c); auto g = _lifemap->createImage(key, prog.get()); if (g.valid()) { @@ -239,7 +239,9 @@ namespace osgEarth g.read(result, pointUnderMouse); } return result; - }); + }; + + _lifemapUnderMouse = jobs::dispatch(task); } } @@ -255,10 +257,10 @@ namespace osgEarth TileKey key = coverage->getBestAvailableTileKey(keyUnderMouse, false); if (key.valid()) { - _landcoverUnderMouse = Job().dispatch([this, key, pointUnderMouse](Cancelable *c) + auto task = [this, key, pointUnderMouse](Cancelable& c) { LandCoverSample result; - osg::ref_ptr prog = new ProgressCallback(c); + osg::ref_ptr prog = new ProgressCallback(&c); auto factory = LandCoverSample::Factory::create(_lifemap->getLandCoverLayer()); auto g = factory->createCoverage(key, prog.get()); if (g.valid()) @@ -266,7 +268,8 @@ namespace osgEarth g.readAtCoords(result, pointUnderMouse.x(), pointUnderMouse.y()); } return result; - }); + }; + _landcoverUnderMouse = jobs::dispatch(task); } } } diff --git a/src/osgEarthProcedural/ImGui/VegetationLayerGUI b/src/osgEarthProcedural/ImGui/VegetationLayerGUI index edd910d989..3d36b14892 100644 --- a/src/osgEarthProcedural/ImGui/VegetationLayerGUI +++ b/src/osgEarthProcedural/ImGui/VegetationLayerGUI @@ -521,10 +521,11 @@ namespace osgEarth if (key.valid()) { _biomeUnderMouseKey = key; - _biomeUnderMouse = Job().dispatch([&, key, p](Cancelable* c) + + _biomeUnderMouse = jobs::dispatch([&, key, p](Cancelable& c) { const Biome* result = nullptr; - osg::ref_ptr prog = new ProgressCallback(c); + osg::ref_ptr prog = new ProgressCallback(&c); auto g = _biolayer->createImage(key, prog.get()); if (g.valid()) { diff --git a/src/osgEarthProcedural/RoadSurfaceLayer.cpp b/src/osgEarthProcedural/RoadSurfaceLayer.cpp index e2be6e7817..78a49abe16 100644 --- a/src/osgEarthProcedural/RoadSurfaceLayer.cpp +++ b/src/osgEarthProcedural/RoadSurfaceLayer.cpp @@ -85,8 +85,6 @@ RoadSurfaceLayer::init() { ImageLayer::init(); - _keygate.setName("RoadSurfaceLayer " + getName()); - // Generate Mercator tiles by default. setProfile(Profile::create(Profile::GLOBAL_GEODETIC)); @@ -435,7 +433,7 @@ RoadSurfaceLayer::createImageImplementation(const TileKey& key, ProgressCallback progress, [layer]() { osg::ref_ptr safe; - return !layer.lock(safe) || !safe->isOpen() || !JobArena::alive(); + return !layer.lock(safe) || !safe->isOpen() || !jobs::alive(); } ); diff --git a/src/osgEarthProcedural/TextureSplattingLayer.cpp b/src/osgEarthProcedural/TextureSplattingLayer.cpp index 9e1bc9382d..7dd0065864 100644 --- a/src/osgEarthProcedural/TextureSplattingLayer.cpp +++ b/src/osgEarthProcedural/TextureSplattingLayer.cpp @@ -201,7 +201,7 @@ TextureSplattingLayer::prepareForRendering(TerrainEngine* engine) Registry::instance()->getMaxTextureSize()); // Function to load all material textures. - auto loadMaterials = [assets, tile_height_m, readOptions, maxTextureSize](Cancelable* c) -> Materials::Ptr + auto loadMaterials = [assets, tile_height_m, readOptions, maxTextureSize](Cancelable& c) -> Materials::Ptr { Materials::Ptr result = Materials::Ptr(new Materials); @@ -238,7 +238,7 @@ TextureSplattingLayer::prepareForRendering(TerrainEngine* engine) << ", t=" << std::chrono::duration_cast(t1 - t0).count() << "ms" << std::endl; - if (c && c->isCanceled()) + if (c.canceled()) return nullptr; // Set up the texture scaling: @@ -258,7 +258,7 @@ TextureSplattingLayer::prepareForRendering(TerrainEngine* engine) }; // Load material asynchronously - _materialsJob = Job().dispatch(loadMaterials); + _materialsJob = jobs::dispatch(loadMaterials); } } else diff --git a/src/osgEarthProcedural/VegetationLayer.cpp b/src/osgEarthProcedural/VegetationLayer.cpp index b2b297e574..163b0ec551 100644 --- a/src/osgEarthProcedural/VegetationLayer.cpp +++ b/src/osgEarthProcedural/VegetationLayer.cpp @@ -37,6 +37,7 @@ #include #include #include +#include #include #include @@ -417,7 +418,7 @@ VegetationLayer::update(osg::NodeVisitor& nv) if (_newAssets.available()) { - ScopedMutexLock lock(_assets); + std::lock_guard lock(_assets.mutex()); _assets = std::move(_newAssets.release()); } @@ -885,7 +886,7 @@ VegetationLayer::prepareForRendering(TerrainEngine* engine) setUseImpostorNormalMaps(options().useImpostorNormalMaps().get()); // configure the thread pool - JobArena::setConcurrency(JOB_ARENA_VEGETATION, options().threads().get()); + jobs::get_pool(JOB_ARENA_VEGETATION)->set_concurrency(options().threads().get()); } namespace @@ -1086,7 +1087,7 @@ VegetationLayer::checkForNewAssets() const osg::observer_ptr layer_weakptr(this); - auto loadNewAssets = [layer_weakptr](Cancelable* c) -> AssetsByGroup + auto loadNewAssets = [layer_weakptr](Cancelable& c) -> AssetsByGroup { OE_PROFILING_ZONE_NAMED("VegetationLayer::loadNewAssets(job)"); @@ -1101,7 +1102,7 @@ VegetationLayer::checkForNewAssets() const // re-organize the data into a form we can readily use. for (auto iter : biomes) { - if (c && c->isCanceled()) + if (c.canceled()) break; const Biome* biome = iter.second.biome; @@ -1157,10 +1158,12 @@ VegetationLayer::checkForNewAssets() const return result; }; - Job job; - job.setName("VegetationLayer asset loader"); - job.setArena(JOB_ARENA_VEGETATION); - _newAssets = job.dispatch(loadNewAssets); + jobs::context context{ + "VegetationLayer asset loader", + jobs::get_pool(JOB_ARENA_VEGETATION) + }; + + _newAssets = jobs::dispatch(loadNewAssets, context); return true; } @@ -1215,9 +1218,9 @@ VegetationLayer::createDrawableAsync( osg::BoundingBox tile_bbox = tile_bbox_; osg::ref_ptr framestamp = framestamp_; - auto function = [layer, key, group, tile_bbox, framestamp, backup_birthday](Cancelable* c) // -> osg::ref_ptr + auto function = [layer, key, group, tile_bbox, framestamp, backup_birthday](Cancelable& c) // -> osg::ref_ptr { - osg::ref_ptr p = new ProgressCallback(c); + osg::ref_ptr p = new ProgressCallback(&c); auto result = layer->createDrawable(key, group, tile_bbox, p.get()); if (result.valid()) asChonkDrawable(result)->setBirthday( @@ -1225,11 +1228,12 @@ VegetationLayer::createDrawableAsync( return result; }; - Job job; - job.setName("Vegetation create drawable"); - job.setArena(JOB_ARENA_VEGETATION); - job.setPriority(-range); // closer is sooner - return job.dispatch(function); + jobs::context context; + context.name = "Vegetation create drawable"; + context.pool = jobs::get_pool(JOB_ARENA_VEGETATION); + context.priority = [range]() { return -range; }; // closer is sooner + + return jobs::dispatch(function, context); } #undef RAND @@ -1268,7 +1272,7 @@ VegetationLayer::getAssetPlacements( if (loadBiomesOnDemand == false) { - ScopedMutexLock lock(_assets); + std::lock_guard lock(_assets.mutex()); auto iter = _assets.find(group); if (iter == _assets.end()) @@ -1343,14 +1347,14 @@ VegetationLayer::getAssetPlacements( AssetsByGroup newAssets = _newAssets.release(); if (!newAssets.empty()) { - ScopedMutexLock lock(_assets); + std::lock_guard lock(_assets.mutex()); _assets = std::move(newAssets); } } // make a shallow copy of assets list safely { - ScopedMutexLock lock(_assets); + std::lock_guard lock(_assets.mutex()); auto iter = _assets.find(group); if (iter == _assets.end()) { @@ -1726,7 +1730,7 @@ VegetationLayer::simulateAssetPlacement(const GeoPoint& point, const std::string if (loadBiomesOnDemand == false) { - ScopedMutexLock lock(_assets); + std::lock_guard lock(_assets.mutex()); auto iter = _assets.find(group); if (iter == _assets.end()) @@ -1800,14 +1804,14 @@ VegetationLayer::simulateAssetPlacement(const GeoPoint& point, const std::string AssetsByGroup newAssets = _newAssets.release(); if (!newAssets.empty()) { - ScopedMutexLock lock(_assets); + std::lock_guard lock(_assets.mutex()); _assets = std::move(newAssets); } } // make a shallow copy of assets list safely { - ScopedMutexLock lock(_assets); + std::lock_guard lock(_assets.mutex()); auto iter = _assets.find(group); if (iter == _assets.end()) { @@ -2032,7 +2036,7 @@ VegetationLayer::cull(const TileBatch& batch, osg::NodeVisitor& nv) const { // We don't want more than one camera creating the // same drawable, so this _tiles table tracks tiles globally. - ScopedMutexLock lock(_tiles); + std::lock_guard lock(_tiles.mutex()); // First, find a placeholder based on the same tile key, // ignoring the revision. (Even if we find an existing tile, @@ -2179,7 +2183,7 @@ VegetationLayer::resizeGLObjectBuffers(unsigned maxSize) { PatchLayer::resizeGLObjectBuffers(maxSize); - ScopedMutexLock lock(_tiles); + std::lock_guard lock(_tiles.mutex()); for (auto& tile : _tiles) { diff --git a/src/osgEarthSilverLining/SilverLiningContext b/src/osgEarthSilverLining/SilverLiningContext index 0be68f1beb..e751833c41 100644 --- a/src/osgEarthSilverLining/SilverLiningContext +++ b/src/osgEarthSilverLining/SilverLiningContext @@ -100,7 +100,7 @@ namespace osgEarth { namespace SilverLining bool _initAttempted; bool _initFailed; - osgEarth::Threading::Mutex _initMutex; + std::mutex _initMutex; double _maxAmbientLightingAlt; diff --git a/src/osgEarthSilverLining/SilverLiningContext.cpp b/src/osgEarthSilverLining/SilverLiningContext.cpp index a8d4d39615..5e35ca7c08 100644 --- a/src/osgEarthSilverLining/SilverLiningContext.cpp +++ b/src/osgEarthSilverLining/SilverLiningContext.cpp @@ -123,7 +123,7 @@ SilverLiningContext::initialize(osg::RenderInfo& renderInfo) if ( !_initAttempted && !_initFailed ) { // lock/double-check: - Threading::ScopedMutexLock excl(_initMutex); + std::lock_guard excl(_initMutex); if ( !_initAttempted && !_initFailed ) { _initAttempted = true; diff --git a/src/osgEarthSilverLining/SilverLiningNode.cpp b/src/osgEarthSilverLining/SilverLiningNode.cpp index 07114ce213..82e37c1ab6 100644 --- a/src/osgEarthSilverLining/SilverLiningNode.cpp +++ b/src/osgEarthSilverLining/SilverLiningNode.cpp @@ -132,7 +132,7 @@ SilverLiningNode::onSetDateTime() void SilverLiningNode::traverse(osg::NodeVisitor& nv) { - static Threading::Mutex s_mutex(OE_MUTEX_NAME); + static std::mutex s_mutex; if ( nv.getVisitorType() == nv.CULL_VISITOR ) { @@ -140,7 +140,7 @@ SilverLiningNode::traverse(osg::NodeVisitor& nv) osg::Camera* camera = cv->getCurrentCamera(); if ( camera ) { - Threading::ScopedMutexLock lock(s_mutex); + std::lock_guard lock(s_mutex); CameraContextMap::const_iterator i = _contexts.find(camera); if (i == _contexts.end()) @@ -158,7 +158,7 @@ SilverLiningNode::traverse(osg::NodeVisitor& nv) else if (nv.getVisitorType() == nv.UPDATE_VISITOR) { { - Threading::ScopedMutexLock lock(s_mutex); + std::lock_guard lock(s_mutex); if (!_camerasToAdd.empty()) { for (CameraSet::const_iterator i = _camerasToAdd.begin(); i != _camerasToAdd.end(); ++i) @@ -179,7 +179,7 @@ SilverLiningNode::traverse(osg::NodeVisitor& nv) else { - Threading::ScopedMutexLock lock(s_mutex); + std::lock_guard lock(s_mutex); for (CameraContextMap::const_iterator i = _contexts.begin(); i != _contexts.end(); ++i) { i->second->accept(nv); diff --git a/src/osgEarthTriton/TritonContext b/src/osgEarthTriton/TritonContext index 021584c30c..29bdddfe01 100644 --- a/src/osgEarthTriton/TritonContext +++ b/src/osgEarthTriton/TritonContext @@ -97,7 +97,7 @@ namespace osgEarth { namespace Triton bool _initAttempted; bool _initFailed; - Threading::Mutex _initMutex; + std::mutex _initMutex; osg::ref_ptr _srs; diff --git a/src/osgEarthTriton/TritonContext.cpp b/src/osgEarthTriton/TritonContext.cpp index 2cc79b889c..7bbf4f2fdf 100644 --- a/src/osgEarthTriton/TritonContext.cpp +++ b/src/osgEarthTriton/TritonContext.cpp @@ -86,7 +86,7 @@ TritonContext::initialize(osg::RenderInfo& renderInfo) if ( !_initAttempted && !_initFailed ) { // lock/double-check: - Threading::ScopedMutexLock excl(_initMutex); + std::lock_guard excl(_initMutex); if ( !_initAttempted && !_initFailed ) { _initAttempted = true; diff --git a/src/osgEarthTriton/TritonHeightMap.cpp b/src/osgEarthTriton/TritonHeightMap.cpp index c036e9c463..b6a75cbd5b 100644 --- a/src/osgEarthTriton/TritonHeightMap.cpp +++ b/src/osgEarthTriton/TritonHeightMap.cpp @@ -157,8 +157,8 @@ TritonHeightMap::configure(unsigned texSize, osg::State& state) if (_texSize == 0u) { // first time through, single-lane and set up FBO parameters. - static Threading::Mutex s_mutex(OE_MUTEX_NAME); - s_mutex.lock(); + static std::mutex s_mutex; + std::lock_guard lock(s_mutex); if (_texSize == 0u) { @@ -168,8 +168,6 @@ TritonHeightMap::configure(unsigned texSize, osg::State& state) result = false; } } - - s_mutex.unlock(); } return result; } diff --git a/tests/osm.earth b/tests/osm.earth index a6fd5faaf8..110cd88b5b 100644 --- a/tests/osm.earth +++ b/tests/osm.earth @@ -4,7 +4,7 @@ osgEarth Sample - OpenStreetMap Features - http://[abc].tile.openstreetmap.org/{z}/{x}/{y}.png + https://[abc].tile.openstreetmap.org/{z}/{x}/{y}.png spherical-mercator ©OpenStreetMap contributors diff --git a/tests/viewpoints.xml b/tests/viewpoints.xml index e5b20c1c8b..5cab7f4970 100644 --- a/tests/viewpoints.xml +++ b/tests/viewpoints.xml @@ -138,13 +138,13 @@ 72.19622826110572 +proj=longlat +datum=WGS84 +no_defs - - 0.252151 - -50.4646 - 1081.05m - -117.1638458565494 - 32.71773276857689 - -6.852578997612 - +proj=longlat +datum=WGS84 +no_defs - + + -62.0354 + -16.6573 + 2923.13m + -87.62551572200417 + 41.89062024802782 + 166.1599231623113 + +proj=longlat +datum=WGS84 +no_defs +