From 6852d00bf33fa855eacd29eb32b3434bdb04b641 Mon Sep 17 00:00:00 2001 From: Roman Kennke Date: Tue, 25 Jun 2024 12:58:42 +0000 Subject: [PATCH] Remove debug fluff --- src/hotspot/share/gc/g1/g1ConcurrentMark.cpp | 14 +------ src/hotspot/share/gc/g1/g1ConcurrentMark.hpp | 2 +- .../share/gc/g1/g1ConcurrentMark.inline.hpp | 2 - .../share/gc/g1/g1OopClosures.inline.hpp | 7 +--- .../share/gc/g1/g1ParScanThreadState.cpp | 3 -- .../gc/g1/g1ParScanThreadState.inline.hpp | 2 - src/hotspot/share/gc/g1/g1TaskQueueEntry.hpp | 40 ++++++------------- src/hotspot/share/gc/g1/g1YoungCollector.cpp | 1 - src/hotspot/share/gc/shared/gc_globals.hpp | 2 +- 9 files changed, 18 insertions(+), 55 deletions(-) diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp index 4ab7152b1b24f..da26c13348557 100644 --- a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp +++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp @@ -123,9 +123,8 @@ bool G1CMMarkStack::initialize() { size_t initial_capacity = MarkStackSize; size_t max_capacity = MarkStackSizeMax; - size_t const TaskEntryChunkSizeInVoidStar = sizeof(TaskQueueEntryChunk) /* sizeof(G1TaskQueueEntry) NOT_LP64(* 2)*/; - //assert(TaskEntryChunkSizeInVoidStar == 1024, "wrong chunk size: " SIZE_FORMAT, TaskEntryChunkSizeInVoidStar); - log_trace(gc)("capacity_alignment: " SIZE_FORMAT, capacity_alignment()); + size_t const TaskEntryChunkSizeInVoidStar = sizeof(TaskQueueEntryChunk) / sizeof(G1TaskQueueEntry); + size_t max_num_chunks = align_up(max_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar; size_t initial_num_chunks = align_up(initial_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar; @@ -143,7 +142,6 @@ bool G1CMMarkStack::initialize() { log_debug(gc)("Initialize mark stack with " SIZE_FORMAT " chunks, maximum " SIZE_FORMAT, initial_num_chunks, max_capacity); - log_trace(gc)("max_num_chunks: " SIZE_FORMAT, max_num_chunks); return _chunk_allocator.initialize(initial_num_chunks, max_num_chunks); } @@ -272,10 +270,8 @@ bool G1CMMarkStack::ChunkAllocator::reserve(size_t new_capacity) { _max_capacity - _capacity; - log_trace(gc)("allocating " SIZE_FORMAT " TaskQueueArrayChunks", bucket_capacity); TaskQueueEntryChunk* bucket_base = MmapArrayAllocator::allocate_or_null(bucket_capacity, mtGC); - log_trace(gc)("allocated " SIZE_FORMAT " TaskQueueArrayChunks " PTR_FORMAT, bucket_capacity, p2i(bucket_base)); if (bucket_base == nullptr) { log_warning(gc)("Failed to reserve memory for increasing the overflow mark stack capacity with " SIZE_FORMAT " chunks and size " SIZE_FORMAT "B.", bucket_capacity, bucket_capacity * sizeof(TaskQueueEntryChunk)); @@ -292,10 +288,8 @@ void G1CMMarkStack::expand() { } void G1CMMarkStack::add_chunk_to_list(TaskQueueEntryChunk* volatile* list, TaskQueueEntryChunk* elem) { - log_trace(gc)("adding chunk to list"); elem->next = *list; *list = elem; - log_trace(gc)("adding chunk to list done"); } void G1CMMarkStack::add_chunk_to_chunk_list(TaskQueueEntryChunk* elem) { @@ -344,9 +338,7 @@ bool G1CMMarkStack::par_push_chunk(G1TaskQueueEntry* ptr_arr) { } } - log_trace(gc)("par_push_chunk before copy"); Copy::conjoint_memory_atomic(ptr_arr, new_chunk->data, EntriesPerChunk * sizeof(G1TaskQueueEntry)); - log_trace(gc)("par_push_chunk after copy"); add_chunk_to_chunk_list(new_chunk); @@ -360,9 +352,7 @@ bool G1CMMarkStack::par_pop_chunk(G1TaskQueueEntry* ptr_arr) { return false; } - log_trace(gc)("par_pop_chunk before copy"); Copy::conjoint_memory_atomic(cur->data, ptr_arr, EntriesPerChunk * sizeof(G1TaskQueueEntry)); - log_trace(gc)("par_pop_chunk after copy"); add_chunk_to_free_list(cur); return true; diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp b/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp index c6855506ae5a7..17dbe55533bd3 100644 --- a/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp +++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp @@ -99,7 +99,7 @@ class G1CMSubjectToDiscoveryClosure : public BoolObjectClosure { class G1CMMarkStack { public: // Number of TaskQueueEntries that can fit in a single chunk. - static const size_t EntriesPerChunk = 512 - 1 /* One reference for the next pointer */; + static const size_t EntriesPerChunk = 1024 - 1 /* One reference for the next pointer */; private: struct TaskQueueEntryChunk { TaskQueueEntryChunk* next; diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMark.inline.hpp b/src/hotspot/share/gc/g1/g1ConcurrentMark.inline.hpp index ce443e46ea952..5d5cce4e199ef 100644 --- a/src/hotspot/share/gc/g1/g1ConcurrentMark.inline.hpp +++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.inline.hpp @@ -95,13 +95,11 @@ inline void G1CMMarkStack::iterate(Fn fn) const { guarantee(num_chunks <= _chunks_in_chunk_list, "Found " SIZE_FORMAT " oop chunks which is more than there should be", num_chunks); for (size_t i = 0; i < EntriesPerChunk; ++i) { - log_trace(gc)("iterating chunk data: " SIZE_FORMAT, i); if (cur->data[i].is_null()) { break; } fn(cur->data[i]); } - log_trace(gc)("iterating chunk next: " PTR_FORMAT, p2i(cur->next)); cur = cur->next; num_chunks++; } diff --git a/src/hotspot/share/gc/g1/g1OopClosures.inline.hpp b/src/hotspot/share/gc/g1/g1OopClosures.inline.hpp index 9580603538c32..431586c9608c0 100644 --- a/src/hotspot/share/gc/g1/g1OopClosures.inline.hpp +++ b/src/hotspot/share/gc/g1/g1OopClosures.inline.hpp @@ -60,11 +60,8 @@ inline void G1ScanClosureBase::prefetch_and_push(T* p, const oop obj) { (obj->is_forwarded() && obj->forwardee() == RawAccess<>::oop_load(p)), "p should still be pointing to obj or to its forwardee"); - log_trace(gc)("push obj task 2: %d", sizeof(G1TaskQueueEntry)); - G1TaskQueueEntry entry(p); - log_trace(gc)("push obj created"); - _par_scan_state->push_on_queue(entry); - log_trace(gc)("push obj task 2 done"); + + _par_scan_state->push_on_queue(G1TaskQueueEntry(p)); } template diff --git a/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp b/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp index 8b028f9f447fc..ae8e5353b1c75 100644 --- a/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp +++ b/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp @@ -235,7 +235,6 @@ void G1ParScanThreadState::do_partial_array(oop obj, int slice, int pow) { while ((1 << pow) > (int)ObjArrayMarkingStride && (slice * 2 < G1TaskQueueEntry::slice_size())) { pow--; slice *= 2; - log_trace(gc)("pushing new partial array"); push_on_queue(G1TaskQueueEntry(array, slice - 1, pow)); } @@ -304,7 +303,6 @@ void G1ParScanThreadState::start_partial_objarray(G1HeapRegionAttr dest_attr, pow--; slice = 2; last_idx = (1 << pow); - log_trace(gc)("pushing new partial array overflow"); push_on_queue(G1TaskQueueEntry(array, 1, pow)); } @@ -317,7 +315,6 @@ void G1ParScanThreadState::start_partial_objarray(G1HeapRegionAttr dest_attr, int right_slice = slice * 2; int left_slice_end = left_slice * (1 << pow); if (left_slice_end < len) { - log_trace(gc)("pushing new partial array 2"); push_on_queue(G1TaskQueueEntry(array, left_slice, pow)); slice = right_slice; last_idx = left_slice_end; diff --git a/src/hotspot/share/gc/g1/g1ParScanThreadState.inline.hpp b/src/hotspot/share/gc/g1/g1ParScanThreadState.inline.hpp index b94071e75258f..edb23300e902d 100644 --- a/src/hotspot/share/gc/g1/g1ParScanThreadState.inline.hpp +++ b/src/hotspot/share/gc/g1/g1ParScanThreadState.inline.hpp @@ -36,9 +36,7 @@ inline void G1ParScanThreadState::push_on_queue(G1TaskQueueEntry task) { verify_task(task); - log_trace(gc)("push_on_queue before"); _task_queue->push(task); - log_trace(gc)("push_on_queue after"); } bool G1ParScanThreadState::needs_partial_trimming() const { diff --git a/src/hotspot/share/gc/g1/g1TaskQueueEntry.hpp b/src/hotspot/share/gc/g1/g1TaskQueueEntry.hpp index ac67c595fcdef..c5272488002fa 100644 --- a/src/hotspot/share/gc/g1/g1TaskQueueEntry.hpp +++ b/src/hotspot/share/gc/g1/g1TaskQueueEntry.hpp @@ -26,9 +26,6 @@ #ifndef SHARE_GC_G1_G1TASKQUEUEENTRY_HPP #define SHARE_GC_G1_G1TASKQUEUEENTRY_HPP -#include "logging/log.hpp" -#include "logging/logTag.hpp" - // A task queue entry that encodes both regular oops, and the array oops plus sliceing data for // parallel array processing. // The design goal is to make the regular oop ops very fast, because that would be the prevailing @@ -207,39 +204,26 @@ class G1TaskQueueEntry { uint16_t _pow; public: - G1TaskQueueEntry() { - _ptr = nullptr; - _slice = 0; - _pow = 0; - assert(sizeof(G1TaskQueueEntry) == 8, "incorrect size"); - log_trace(gc)("init new G1TaskQueueEntry"); + G1TaskQueueEntry() : + _ptr(nullptr), _slice(0), _pow(0) { } - G1TaskQueueEntry(oop o) { - _ptr = cast_from_oop(o); - _slice = 0; - _pow = 0; - assert(sizeof(G1TaskQueueEntry) == 8, "incorrect size"); + G1TaskQueueEntry(oop o) : + _ptr(cast_from_oop(o)), + _slice(0), _pow(0) { assert(!is_array_slice(), "task should not be sliced"); - log_trace(gc)("init new G1TaskQueueEntry: oop: " PTR_FORMAT, p2i(o)); } - G1TaskQueueEntry(oop* o) { - _ptr = reinterpret_cast(o); - _slice = 0; - _pow = 0; - assert(sizeof(G1TaskQueueEntry) == 8, "incorrect size"); + G1TaskQueueEntry(oop* o) : + _ptr(reinterpret_cast(o)), + _slice(0), _pow(0) { assert(!is_array_slice(), "task should not be sliced"); - log_trace(gc)("init new G1TaskQueueEntry: oop*: " PTR_FORMAT, p2i(o)); } G1TaskQueueEntry(narrowOop* o) { ShouldNotReachHere(); } - G1TaskQueueEntry(oop o, int slice, int pow) { - _ptr = cast_from_oop(o); - _slice = slice; - _pow = pow; - assert(sizeof(G1TaskQueueEntry) == 8, "incorrect size"); + G1TaskQueueEntry(oop o, int slice, int pow) : + _ptr(cast_from_oop(o)), + _slice(slice), _pow(pow) { assert(is_array_slice(), "task should be sliced"); - log_trace(gc)("init new G1TaskQueueEntry: oop: " PTR_FORMAT ", slice: %d, pow: %d", p2i(o), slice, pow); } // Trivially copyable. @@ -247,7 +231,7 @@ class G1TaskQueueEntry { public: bool is_oop_ptr() const { return !is_array_slice(); } bool is_narrow_oop_ptr() const { return false; } - bool is_array_slice() const { return is_array_slice(); } + bool is_array_slice() const { return _slice != 0; } bool is_oop() const { return !is_array_slice(); } bool is_null() const { return _ptr == nullptr; } diff --git a/src/hotspot/share/gc/g1/g1YoungCollector.cpp b/src/hotspot/share/gc/g1/g1YoungCollector.cpp index 453cc7d92b1a5..5279e2fe1f26e 100644 --- a/src/hotspot/share/gc/g1/g1YoungCollector.cpp +++ b/src/hotspot/share/gc/g1/g1YoungCollector.cpp @@ -859,7 +859,6 @@ class G1CopyingKeepAliveClosure: public OopClosure { // When the queue is drained (after each phase of reference processing) // the object and it's followers will be copied, the reference field set // to point to the new location, and the RSet updated. - log_trace(gc)("push obj task"); _par_scan_state->push_on_queue(G1TaskQueueEntry(p)); } } diff --git a/src/hotspot/share/gc/shared/gc_globals.hpp b/src/hotspot/share/gc/shared/gc_globals.hpp index b49eafc88ec36..66496544b9627 100644 --- a/src/hotspot/share/gc/shared/gc_globals.hpp +++ b/src/hotspot/share/gc/shared/gc_globals.hpp @@ -201,7 +201,7 @@ "Maximum size of marking stack in bytes.") \ range(1, (INT_MAX - 1)) \ \ - product(size_t, MarkStackSize, NOT_LP64(512*K) LP64_ONLY(4*M), \ + product(size_t, MarkStackSize, NOT_LP64(64*K) LP64_ONLY(4*M), \ "Size of marking stack in bytes.") \ constraint(MarkStackSizeConstraintFunc,AfterErgo) \ range(1, (INT_MAX - 1)) \