Skip to content

Commit

Permalink
Remove debug fluff
Browse files Browse the repository at this point in the history
  • Loading branch information
rkennke committed Jun 25, 2024
1 parent 7579290 commit 6852d00
Show file tree
Hide file tree
Showing 9 changed files with 18 additions and 55 deletions.
14 changes: 2 additions & 12 deletions src/hotspot/share/gc/g1/g1ConcurrentMark.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -123,9 +123,8 @@ bool G1CMMarkStack::initialize() {
size_t initial_capacity = MarkStackSize;
size_t max_capacity = MarkStackSizeMax;

size_t const TaskEntryChunkSizeInVoidStar = sizeof(TaskQueueEntryChunk) /* sizeof(G1TaskQueueEntry) NOT_LP64(* 2)*/;
//assert(TaskEntryChunkSizeInVoidStar == 1024, "wrong chunk size: " SIZE_FORMAT, TaskEntryChunkSizeInVoidStar);
log_trace(gc)("capacity_alignment: " SIZE_FORMAT, capacity_alignment());
size_t const TaskEntryChunkSizeInVoidStar = sizeof(TaskQueueEntryChunk) / sizeof(G1TaskQueueEntry);

size_t max_num_chunks = align_up(max_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar;
size_t initial_num_chunks = align_up(initial_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar;

Expand All @@ -143,7 +142,6 @@ bool G1CMMarkStack::initialize() {

log_debug(gc)("Initialize mark stack with " SIZE_FORMAT " chunks, maximum " SIZE_FORMAT,
initial_num_chunks, max_capacity);
log_trace(gc)("max_num_chunks: " SIZE_FORMAT, max_num_chunks);

return _chunk_allocator.initialize(initial_num_chunks, max_num_chunks);
}
Expand Down Expand Up @@ -272,10 +270,8 @@ bool G1CMMarkStack::ChunkAllocator::reserve(size_t new_capacity) {
_max_capacity - _capacity;


log_trace(gc)("allocating " SIZE_FORMAT " TaskQueueArrayChunks", bucket_capacity);
TaskQueueEntryChunk* bucket_base = MmapArrayAllocator<TaskQueueEntryChunk>::allocate_or_null(bucket_capacity, mtGC);

log_trace(gc)("allocated " SIZE_FORMAT " TaskQueueArrayChunks " PTR_FORMAT, bucket_capacity, p2i(bucket_base));
if (bucket_base == nullptr) {
log_warning(gc)("Failed to reserve memory for increasing the overflow mark stack capacity with " SIZE_FORMAT " chunks and size " SIZE_FORMAT "B.",
bucket_capacity, bucket_capacity * sizeof(TaskQueueEntryChunk));
Expand All @@ -292,10 +288,8 @@ void G1CMMarkStack::expand() {
}

void G1CMMarkStack::add_chunk_to_list(TaskQueueEntryChunk* volatile* list, TaskQueueEntryChunk* elem) {
log_trace(gc)("adding chunk to list");
elem->next = *list;
*list = elem;
log_trace(gc)("adding chunk to list done");
}

void G1CMMarkStack::add_chunk_to_chunk_list(TaskQueueEntryChunk* elem) {
Expand Down Expand Up @@ -344,9 +338,7 @@ bool G1CMMarkStack::par_push_chunk(G1TaskQueueEntry* ptr_arr) {
}
}

log_trace(gc)("par_push_chunk before copy");
Copy::conjoint_memory_atomic(ptr_arr, new_chunk->data, EntriesPerChunk * sizeof(G1TaskQueueEntry));
log_trace(gc)("par_push_chunk after copy");

add_chunk_to_chunk_list(new_chunk);

Expand All @@ -360,9 +352,7 @@ bool G1CMMarkStack::par_pop_chunk(G1TaskQueueEntry* ptr_arr) {
return false;
}

log_trace(gc)("par_pop_chunk before copy");
Copy::conjoint_memory_atomic(cur->data, ptr_arr, EntriesPerChunk * sizeof(G1TaskQueueEntry));
log_trace(gc)("par_pop_chunk after copy");

add_chunk_to_free_list(cur);
return true;
Expand Down
2 changes: 1 addition & 1 deletion src/hotspot/share/gc/g1/g1ConcurrentMark.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@ class G1CMSubjectToDiscoveryClosure : public BoolObjectClosure {
class G1CMMarkStack {
public:
// Number of TaskQueueEntries that can fit in a single chunk.
static const size_t EntriesPerChunk = 512 - 1 /* One reference for the next pointer */;
static const size_t EntriesPerChunk = 1024 - 1 /* One reference for the next pointer */;
private:
struct TaskQueueEntryChunk {
TaskQueueEntryChunk* next;
Expand Down
2 changes: 0 additions & 2 deletions src/hotspot/share/gc/g1/g1ConcurrentMark.inline.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -95,13 +95,11 @@ inline void G1CMMarkStack::iterate(Fn fn) const {
guarantee(num_chunks <= _chunks_in_chunk_list, "Found " SIZE_FORMAT " oop chunks which is more than there should be", num_chunks);

for (size_t i = 0; i < EntriesPerChunk; ++i) {
log_trace(gc)("iterating chunk data: " SIZE_FORMAT, i);
if (cur->data[i].is_null()) {
break;
}
fn(cur->data[i]);
}
log_trace(gc)("iterating chunk next: " PTR_FORMAT, p2i(cur->next));
cur = cur->next;
num_chunks++;
}
Expand Down
7 changes: 2 additions & 5 deletions src/hotspot/share/gc/g1/g1OopClosures.inline.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -60,11 +60,8 @@ inline void G1ScanClosureBase::prefetch_and_push(T* p, const oop obj) {
(obj->is_forwarded() &&
obj->forwardee() == RawAccess<>::oop_load(p)),
"p should still be pointing to obj or to its forwardee");
log_trace(gc)("push obj task 2: %d", sizeof(G1TaskQueueEntry));
G1TaskQueueEntry entry(p);
log_trace(gc)("push obj created");
_par_scan_state->push_on_queue(entry);
log_trace(gc)("push obj task 2 done");

_par_scan_state->push_on_queue(G1TaskQueueEntry(p));
}

template <class T>
Expand Down
3 changes: 0 additions & 3 deletions src/hotspot/share/gc/g1/g1ParScanThreadState.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -235,7 +235,6 @@ void G1ParScanThreadState::do_partial_array(oop obj, int slice, int pow) {
while ((1 << pow) > (int)ObjArrayMarkingStride && (slice * 2 < G1TaskQueueEntry::slice_size())) {
pow--;
slice *= 2;
log_trace(gc)("pushing new partial array");
push_on_queue(G1TaskQueueEntry(array, slice - 1, pow));
}

Expand Down Expand Up @@ -304,7 +303,6 @@ void G1ParScanThreadState::start_partial_objarray(G1HeapRegionAttr dest_attr,
pow--;
slice = 2;
last_idx = (1 << pow);
log_trace(gc)("pushing new partial array overflow");
push_on_queue(G1TaskQueueEntry(array, 1, pow));
}

Expand All @@ -317,7 +315,6 @@ void G1ParScanThreadState::start_partial_objarray(G1HeapRegionAttr dest_attr,
int right_slice = slice * 2;
int left_slice_end = left_slice * (1 << pow);
if (left_slice_end < len) {
log_trace(gc)("pushing new partial array 2");
push_on_queue(G1TaskQueueEntry(array, left_slice, pow));
slice = right_slice;
last_idx = left_slice_end;
Expand Down
2 changes: 0 additions & 2 deletions src/hotspot/share/gc/g1/g1ParScanThreadState.inline.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -36,9 +36,7 @@

inline void G1ParScanThreadState::push_on_queue(G1TaskQueueEntry task) {
verify_task(task);
log_trace(gc)("push_on_queue before");
_task_queue->push(task);
log_trace(gc)("push_on_queue after");
}

bool G1ParScanThreadState::needs_partial_trimming() const {
Expand Down
40 changes: 12 additions & 28 deletions src/hotspot/share/gc/g1/g1TaskQueueEntry.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -26,9 +26,6 @@
#ifndef SHARE_GC_G1_G1TASKQUEUEENTRY_HPP
#define SHARE_GC_G1_G1TASKQUEUEENTRY_HPP

#include "logging/log.hpp"
#include "logging/logTag.hpp"

// A task queue entry that encodes both regular oops, and the array oops plus sliceing data for
// parallel array processing.
// The design goal is to make the regular oop ops very fast, because that would be the prevailing
Expand Down Expand Up @@ -207,47 +204,34 @@ class G1TaskQueueEntry {
uint16_t _pow;

public:
G1TaskQueueEntry() {
_ptr = nullptr;
_slice = 0;
_pow = 0;
assert(sizeof(G1TaskQueueEntry) == 8, "incorrect size");
log_trace(gc)("init new G1TaskQueueEntry");
G1TaskQueueEntry() :
_ptr(nullptr), _slice(0), _pow(0) {
}
G1TaskQueueEntry(oop o) {
_ptr = cast_from_oop<void*>(o);
_slice = 0;
_pow = 0;
assert(sizeof(G1TaskQueueEntry) == 8, "incorrect size");
G1TaskQueueEntry(oop o) :
_ptr(cast_from_oop<void*>(o)),
_slice(0), _pow(0) {
assert(!is_array_slice(), "task should not be sliced");
log_trace(gc)("init new G1TaskQueueEntry: oop: " PTR_FORMAT, p2i(o));
}
G1TaskQueueEntry(oop* o) {
_ptr = reinterpret_cast<void*>(o);
_slice = 0;
_pow = 0;
assert(sizeof(G1TaskQueueEntry) == 8, "incorrect size");
G1TaskQueueEntry(oop* o) :
_ptr(reinterpret_cast<void*>(o)),
_slice(0), _pow(0) {
assert(!is_array_slice(), "task should not be sliced");
log_trace(gc)("init new G1TaskQueueEntry: oop*: " PTR_FORMAT, p2i(o));
}
G1TaskQueueEntry(narrowOop* o) {
ShouldNotReachHere();
}
G1TaskQueueEntry(oop o, int slice, int pow) {
_ptr = cast_from_oop<void*>(o);
_slice = slice;
_pow = pow;
assert(sizeof(G1TaskQueueEntry) == 8, "incorrect size");
G1TaskQueueEntry(oop o, int slice, int pow) :
_ptr(cast_from_oop<void*>(o)),
_slice(slice), _pow(pow) {
assert(is_array_slice(), "task should be sliced");
log_trace(gc)("init new G1TaskQueueEntry: oop: " PTR_FORMAT ", slice: %d, pow: %d", p2i(o), slice, pow);
}

// Trivially copyable.

public:
bool is_oop_ptr() const { return !is_array_slice(); }
bool is_narrow_oop_ptr() const { return false; }
bool is_array_slice() const { return is_array_slice(); }
bool is_array_slice() const { return _slice != 0; }
bool is_oop() const { return !is_array_slice(); }
bool is_null() const { return _ptr == nullptr; }

Expand Down
1 change: 0 additions & 1 deletion src/hotspot/share/gc/g1/g1YoungCollector.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -859,7 +859,6 @@ class G1CopyingKeepAliveClosure: public OopClosure {
// When the queue is drained (after each phase of reference processing)
// the object and it's followers will be copied, the reference field set
// to point to the new location, and the RSet updated.
log_trace(gc)("push obj task");
_par_scan_state->push_on_queue(G1TaskQueueEntry(p));
}
}
Expand Down
2 changes: 1 addition & 1 deletion src/hotspot/share/gc/shared/gc_globals.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -201,7 +201,7 @@
"Maximum size of marking stack in bytes.") \
range(1, (INT_MAX - 1)) \
\
product(size_t, MarkStackSize, NOT_LP64(512*K) LP64_ONLY(4*M), \
product(size_t, MarkStackSize, NOT_LP64(64*K) LP64_ONLY(4*M), \
"Size of marking stack in bytes.") \
constraint(MarkStackSizeConstraintFunc,AfterErgo) \
range(1, (INT_MAX - 1)) \
Expand Down

0 comments on commit 6852d00

Please sign in to comment.