Skip to content

Commit

Permalink
Make triggering GC and resizing heap consistent.
Browse files Browse the repository at this point in the history
We count size metadata in when reporting pending allocation size to the
GC trigger.  This ensures the MemBalancer increases the heap size
by the same amount (or larger due to over-estimation) as estimated by
`Space::reserved_pages`.
  • Loading branch information
wks committed Jan 20, 2025
1 parent 8b22d08 commit 3ef3429
Show file tree
Hide file tree
Showing 4 changed files with 29 additions and 4 deletions.
6 changes: 5 additions & 1 deletion src/policy/lockfreeimmortalspace.rs
Original file line number Diff line number Diff line change
Expand Up @@ -129,10 +129,14 @@ impl<VM: VMBinding> Space<VM> for LockFreeImmortalSpace<VM> {
unsafe { sft_map.eager_initialize(self.as_sft(), self.start, self.total_bytes) };
}

fn estimate_side_meta_pages(&self, data_pages: usize) -> usize {
self.metadata.calculate_reserved_pages(data_pages)
}

fn reserved_pages(&self) -> usize {
let cursor = self.cursor.load(Ordering::Relaxed);
let data_pages = conversions::bytes_to_pages_up(self.limit - cursor);
let meta_pages = self.metadata.calculate_reserved_pages(data_pages);
let meta_pages = self.estimate_side_meta_pages(data_pages);
data_pages + meta_pages
}

Expand Down
6 changes: 5 additions & 1 deletion src/policy/marksweepspace/malloc_ms/global.rs
Original file line number Diff line number Diff line change
Expand Up @@ -215,14 +215,18 @@ impl<VM: VMBinding> Space<VM> for MallocSpace<VM> {
"MallocSpace"
}

fn estimate_side_meta_pages(&self, data_pages: usize) -> usize {
self.metadata.calculate_reserved_pages(data_pages)
}

#[allow(clippy::assertions_on_constants)]
fn reserved_pages(&self) -> usize {
use crate::util::constants::LOG_BYTES_IN_PAGE;
// Assume malloc pages are no smaller than 4K pages. Otherwise the substraction below will fail.
debug_assert!(LOG_BYTES_IN_MALLOC_PAGE >= LOG_BYTES_IN_PAGE);
let data_pages = self.active_pages.load(Ordering::SeqCst)
<< (LOG_BYTES_IN_MALLOC_PAGE - LOG_BYTES_IN_PAGE);
let meta_pages = self.metadata.calculate_reserved_pages(data_pages);
let meta_pages = self.estimate_side_meta_pages(data_pages);
data_pages + meta_pages
}

Expand Down
18 changes: 16 additions & 2 deletions src/policy/space.rs
Original file line number Diff line number Diff line change
Expand Up @@ -109,9 +109,12 @@ pub trait Space<VM: VMBinding>: 'static + SFT + Sync + Downcast {

// Clear the request, and inform GC trigger about the pending allocation.
pr.clear_request(pages_reserved);

let meta_pages_reserved = self.estimate_side_meta_pages(pages_reserved);
let total_pages_reserved = pages_reserved + meta_pages_reserved;
self.get_gc_trigger()
.policy
.on_pending_allocation(pages_reserved);
.on_pending_allocation(total_pages_reserved);

VM::VMCollection::block_for_gc(VMMutatorThread(tls)); // We have checked that this is mutator
unsafe { Address::zero() }
Expand Down Expand Up @@ -313,9 +316,20 @@ pub trait Space<VM: VMBinding>: 'static + SFT + Sync + Downcast {
.mark_as_mapped(self.common().start, self.common().extent);
}

/// Estimate the amount of side metadata memory needed for a give data memory size in pages. The
/// result will over-estimate the amount of metadata pages needed, with at least one page per
/// side metadata. This relatively accurately describes the number of side metadata pages the
/// space actually consumes.
///
/// This function is used for both triggering GC (via [`Space::reserved_pages`]) and resizing
/// the heap (via [`crate::util::heap::GCTriggerPolicy::on_pending_allocation`]).
fn estimate_side_meta_pages(&self, data_pages: usize) -> usize {
self.common().metadata.calculate_reserved_pages(data_pages)
}

fn reserved_pages(&self) -> usize {
let data_pages = self.get_page_resource().reserved_pages();
let meta_pages = self.common().metadata.calculate_reserved_pages(data_pages);
let meta_pages = self.estimate_side_meta_pages(data_pages);
data_pages + meta_pages
}

Expand Down
3 changes: 3 additions & 0 deletions src/util/metadata/side_metadata/global.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1362,6 +1362,9 @@ impl SideMetadataContext {
pub fn calculate_reserved_pages(&self, data_pages: usize) -> usize {
let mut total = 0;
for spec in self.global.iter() {
// This rounds up. No matter how small `data_pages` is, the side metadata size will be
// at least one page. This behavior is *intended*. The over-estimated amount is used
// for triggering GC and resizing the heap.
total += data_to_meta_size_round_up(spec, data_pages);
}
for spec in self.local.iter() {
Expand Down

0 comments on commit 3ef3429

Please sign in to comment.