diff --git a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp index e4ff32a71d3..40b45bc84c8 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp @@ -152,23 +152,28 @@ bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) { return false; } - // Concurrent stack processing - if (heap->is_evacuation_in_progress()) { - entry_thread_roots(); - } - - // Process weak roots that might still point to regions that would be broken by cleanup + // Process weak roots that might still point to regions that would be broken by cleanup. This must precede cleanup. if (heap->is_concurrent_weak_root_in_progress()) { entry_weak_refs(); entry_weak_roots(); } - // Final mark might have reclaimed some immediate garbage, kick cleanup to reclaim - // the space. This would be the last action if there is nothing to evacuate. Note that - // we will not age young-gen objects in the case that we skip evacuation. + // Final mark might have reclaimed some immediate garbage, kick cleanup to reclaim the space. We do this before + // concurrent roots and concurrent class unloading so as to expedite recycling of immediate garbage. Note that + // we will not age young-gen objects in the case that we skip evacuation for abbreviated cycles. entry_cleanup_early(); +#ifdef KELVIN_DEPRECATE + // We just dumped the free set after rebuilding free set in final_mark. Let's not dump it again. + // There may be some contention with mutator and GC worker threads who are trying to begin their evacuation + // efforts, and would prefer not to grab the heap lock right here. heap->free_set()->log_status_under_lock(); +#endif + + // Concurrent stack processing + if (heap->is_evacuation_in_progress()) { + entry_thread_roots(); + } // Perform concurrent class unloading if (heap->unload_classes() && @@ -746,7 +751,7 @@ void ShenandoahConcurrentGC::op_final_mark() { // Arm nmethods/stack for concurrent processing if (!heap->collection_set()->is_empty()) { - // Iff objects will be evaluated, arm the nmethod barriers. These will be disarmed + // Iff objects will be evacuated, arm the nmethod barriers. These will be disarmed // under the same condition (established in prepare_concurrent_roots) after strong // root evacuation has completed (see op_strong_roots). ShenandoahCodeRoots::arm_nmethods_for_evac(); @@ -1061,7 +1066,10 @@ void ShenandoahConcurrentGC::op_strong_roots() { } void ShenandoahConcurrentGC::op_cleanup_early() { - ShenandoahHeap::heap()->free_set()->recycle_trash(); + ShenandoahHeap* heap = ShenandoahHeap::heap(); + if (heap->free_set()->recycle_trash()) { + heap->control_thread()->notify_alloc_failure_waiters(false); + } } void ShenandoahConcurrentGC::op_evacuate() { diff --git a/src/hotspot/share/gc/shenandoah/shenandoahController.cpp b/src/hotspot/share/gc/shenandoah/shenandoahController.cpp index 6d6d21c4066..925b227d4c7 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahController.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahController.cpp @@ -53,6 +53,10 @@ size_t ShenandoahController::get_gc_id() { return Atomic::load(&_gc_id); } +void ShenandoahController::anticipate_immediate_garbage(size_t anticipated_immediate_garbage) { + Atomic::store(&_anticipated_immediate_garbage, anticipated_immediate_garbage); +} + void ShenandoahController::handle_alloc_failure(ShenandoahAllocRequest& req, bool block) { ShenandoahHeap* heap = ShenandoahHeap::heap(); @@ -65,11 +69,12 @@ void ShenandoahController::handle_alloc_failure(ShenandoahAllocRequest& req, boo req.type_string(), byte_size_in_proper_unit(req.size() * HeapWordSize), proper_unit_for_byte_size(req.size() * HeapWordSize)); - // Now that alloc failure GC is scheduled, we can abort everything else - heap->cancel_gc(GCCause::_allocation_failure); + if (Atomic::load(&_anticipated_immediate_garbage) < req.size()) { + // Now that alloc failure GC is scheduled, we can abort everything else + heap->cancel_gc(GCCause::_allocation_failure); + } } - if (block) { MonitorLocker ml(&_alloc_failure_waiters_lock); while (is_alloc_failure_gc()) { @@ -92,9 +97,11 @@ void ShenandoahController::handle_alloc_failure_evac(size_t words) { heap->cancel_gc(GCCause::_shenandoah_allocation_failure_evac); } -void ShenandoahController::notify_alloc_failure_waiters() { - _alloc_failure_gc.unset(); - _humongous_alloc_failure_gc.unset(); +void ShenandoahController::notify_alloc_failure_waiters(bool clear_alloc_failure) { + if (clear_alloc_failure) { + _alloc_failure_gc.unset(); + _humongous_alloc_failure_gc.unset(); + } MonitorLocker ml(&_alloc_failure_waiters_lock); ml.notify_all(); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahController.hpp b/src/hotspot/share/gc/shenandoah/shenandoahController.hpp index 6c28ff4e969..53372501bce 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahController.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahController.hpp @@ -43,6 +43,8 @@ class ShenandoahController: public ConcurrentGCThread { shenandoah_padding(1); volatile size_t _gc_id; shenandoah_padding(2); + volatile size_t _anticipated_immediate_garbage; + shenandoah_padding(3); protected: ShenandoahSharedFlag _alloc_failure_gc; @@ -71,6 +73,8 @@ class ShenandoahController: public ConcurrentGCThread { // until another cycle runs and clears the alloc failure gc flag. void handle_alloc_failure(ShenandoahAllocRequest& req, bool block); + void anticipate_immediate_garbage(size_t anticipated_immediate_garbage_words); + // Invoked for allocation failures during evacuation. This cancels // the collection cycle without blocking. void handle_alloc_failure_evac(size_t words); @@ -79,7 +83,7 @@ class ShenandoahController: public ConcurrentGCThread { bool try_set_alloc_failure_gc(bool is_humongous); // Notify threads waiting for GC to complete. - void notify_alloc_failure_waiters(); + void notify_alloc_failure_waiters(bool clear_alloc_failure = true); // True if allocation failure flag has been set. bool is_alloc_failure_gc(); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp index 5fcaf0c673c..de7970f592e 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp @@ -111,6 +111,9 @@ ShenandoahRegionPartitions::ShenandoahRegionPartitions(size_t max_regions, Shena } inline bool ShenandoahFreeSet::can_allocate_from(ShenandoahHeapRegion *r) const { + // This test for trash regions is conservative. Strictly, we only need to assure that concurrent weak reference processing + // is not under way. That finishes long before concurrent weak root processing. It is ok to be conservative. At the + // end of weak reference processing, we recycle trashed regions en masse. return r->is_empty() || (r->is_trash() && !_heap->is_concurrent_weak_root_in_progress()); } @@ -1230,13 +1233,17 @@ HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req) { return _heap->get_region(beg)->bottom(); } -void ShenandoahFreeSet::try_recycle_trashed(ShenandoahHeapRegion* r) { +bool ShenandoahFreeSet::try_recycle_trashed(ShenandoahHeapRegion* r) { + bool result = false; if (r->is_trash()) { r->recycle(); + result = true; } + return true; } -void ShenandoahFreeSet::recycle_trash() { +bool ShenandoahFreeSet::recycle_trash() { + bool result = false; // lock is not reentrable, check we don't have it shenandoah_assert_not_heaplocked(); @@ -1256,9 +1263,13 @@ void ShenandoahFreeSet::recycle_trash() { ShenandoahHeapLocker locker(_heap->lock()); const jlong deadline = os::javaTimeNanos() + deadline_ns; while (idx < count && os::javaTimeNanos() < deadline) { - try_recycle_trashed(_trash_regions[idx++]); + if (try_recycle_trashed(_trash_regions[idx++])) { + result = true; + } } } + _heap->control_thread()->anticipate_immediate_garbage((size_t) 0); + return result; } void ShenandoahFreeSet::flip_to_old_gc(ShenandoahHeapRegion* r) { diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp index 5f69ec47cfd..d0993a9b7e3 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp @@ -330,7 +330,9 @@ class ShenandoahFreeSet : public CHeapObj { void flip_to_old_gc(ShenandoahHeapRegion* r); void clear_internal(); - void try_recycle_trashed(ShenandoahHeapRegion *r); + + // If region r is a trash region, recycle it, returning true iff r was recycled. + bool try_recycle_trashed(ShenandoahHeapRegion *r); // Returns true iff this region is entirely available, either because it is empty() or because it has been found to represent // immediate trash and we'll be able to immediately recycle it. Note that we cannot recycle immediate trash if @@ -410,7 +412,8 @@ class ShenandoahFreeSet : public CHeapObj { // for evacuation, invoke this to make regions available for mutator allocations. void move_regions_from_collector_to_mutator(size_t cset_regions); - void recycle_trash(); + // Recycle any trash that is known to the freeset, returning true if any trash was recycled. + bool recycle_trash(); // Acquire heap lock and log status, assuming heap lock is not acquired by the caller. void log_status_under_lock(); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp index 4b4662a7702..fd0a5f10c0a 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp @@ -753,6 +753,9 @@ void ShenandoahGeneration::prepare_regions_and_collection_set(bool concurrent) { // We are preparing for evacuation. At this time, we ignore cset region tallies. size_t first_old, last_old, num_old; heap->free_set()->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old); + size_t anticipated_immediate_garbage = (old_cset_regions + young_cset_regions) * ShenandoahHeapRegion::region_size_words(); + heap->control_thread()->anticipate_immediate_garbage(anticipated_immediate_garbage); + // Free set construction uses reserve quantities, because they are known to be valid here heap->free_set()->finish_rebuild(young_cset_regions, old_cset_regions, num_old, true); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp index 0ef91b2d81a..342c023d180 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp @@ -2436,9 +2436,9 @@ void ShenandoahHeap::update_heap_region_states(bool concurrent) { ShenandoahPhaseTimings::final_update_refs_update_region_states : ShenandoahPhaseTimings::degen_gc_final_update_refs_update_region_states); - final_update_refs_update_region_states(); + final_update_refs_update_region_states(); - assert_pinned_region_status(); + assert_pinned_region_status(); } { @@ -2462,6 +2462,9 @@ void ShenandoahHeap::rebuild_free_set(bool concurrent) { size_t young_cset_regions, old_cset_regions; size_t first_old_region, last_old_region, old_region_count; _free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old_region, last_old_region, old_region_count); + size_t anticipated_immediate_garbage = (old_cset_regions + young_cset_regions) * ShenandoahHeapRegion::region_size_words(); + control_thread()->anticipate_immediate_garbage(anticipated_immediate_garbage); + // If there are no old regions, first_old_region will be greater than last_old_region assert((first_old_region > last_old_region) || ((last_old_region + 1 - first_old_region >= old_region_count) && diff --git a/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.cpp b/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.cpp index 42fb4e2cd9d..38c0473f667 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.cpp @@ -474,6 +474,9 @@ void ShenandoahOldGeneration::prepare_regions_and_collection_set(bool concurrent size_t cset_young_regions, cset_old_regions; size_t first_old, last_old, num_old; heap->free_set()->prepare_to_rebuild(cset_young_regions, cset_old_regions, first_old, last_old, num_old); + size_t anticipated_immediate_garbage = (cset_young_regions + cset_old_regions) * ShenandoahHeapRegion::region_size_words(); + heap->control_thread()->anticipate_immediate_garbage(anticipated_immediate_garbage); + // This is just old-gen completion. No future budgeting required here. The only reason to rebuild the freeset here // is in case there was any immediate old garbage identified. heap->free_set()->finish_rebuild(cset_young_regions, cset_old_regions, num_old);