From 3ef3429743d36cd4b1bc8855ab5c9cfc94fa48ff Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Mon, 20 Jan 2025 16:31:16 +0800 Subject: [PATCH] Make triggering GC and resizing heap consistent. We count size metadata in when reporting pending allocation size to the GC trigger. This ensures the MemBalancer increases the heap size by the same amount (or larger due to over-estimation) as estimated by `Space::reserved_pages`. --- src/policy/lockfreeimmortalspace.rs | 6 +++++- src/policy/marksweepspace/malloc_ms/global.rs | 6 +++++- src/policy/space.rs | 18 ++++++++++++++++-- src/util/metadata/side_metadata/global.rs | 3 +++ 4 files changed, 29 insertions(+), 4 deletions(-) diff --git a/src/policy/lockfreeimmortalspace.rs b/src/policy/lockfreeimmortalspace.rs index 3863934414..7a7160d8bc 100644 --- a/src/policy/lockfreeimmortalspace.rs +++ b/src/policy/lockfreeimmortalspace.rs @@ -129,10 +129,14 @@ impl Space for LockFreeImmortalSpace { unsafe { sft_map.eager_initialize(self.as_sft(), self.start, self.total_bytes) }; } + fn estimate_side_meta_pages(&self, data_pages: usize) -> usize { + self.metadata.calculate_reserved_pages(data_pages) + } + fn reserved_pages(&self) -> usize { let cursor = self.cursor.load(Ordering::Relaxed); let data_pages = conversions::bytes_to_pages_up(self.limit - cursor); - let meta_pages = self.metadata.calculate_reserved_pages(data_pages); + let meta_pages = self.estimate_side_meta_pages(data_pages); data_pages + meta_pages } diff --git a/src/policy/marksweepspace/malloc_ms/global.rs b/src/policy/marksweepspace/malloc_ms/global.rs index 7327d80d67..83ec1e369b 100644 --- a/src/policy/marksweepspace/malloc_ms/global.rs +++ b/src/policy/marksweepspace/malloc_ms/global.rs @@ -215,6 +215,10 @@ impl Space for MallocSpace { "MallocSpace" } + fn estimate_side_meta_pages(&self, data_pages: usize) -> usize { + self.metadata.calculate_reserved_pages(data_pages) + } + #[allow(clippy::assertions_on_constants)] fn reserved_pages(&self) -> usize { use crate::util::constants::LOG_BYTES_IN_PAGE; @@ -222,7 +226,7 @@ impl Space for MallocSpace { debug_assert!(LOG_BYTES_IN_MALLOC_PAGE >= LOG_BYTES_IN_PAGE); let data_pages = self.active_pages.load(Ordering::SeqCst) << (LOG_BYTES_IN_MALLOC_PAGE - LOG_BYTES_IN_PAGE); - let meta_pages = self.metadata.calculate_reserved_pages(data_pages); + let meta_pages = self.estimate_side_meta_pages(data_pages); data_pages + meta_pages } diff --git a/src/policy/space.rs b/src/policy/space.rs index 7057638dde..3ce1ee3fe1 100644 --- a/src/policy/space.rs +++ b/src/policy/space.rs @@ -109,9 +109,12 @@ pub trait Space: 'static + SFT + Sync + Downcast { // Clear the request, and inform GC trigger about the pending allocation. pr.clear_request(pages_reserved); + + let meta_pages_reserved = self.estimate_side_meta_pages(pages_reserved); + let total_pages_reserved = pages_reserved + meta_pages_reserved; self.get_gc_trigger() .policy - .on_pending_allocation(pages_reserved); + .on_pending_allocation(total_pages_reserved); VM::VMCollection::block_for_gc(VMMutatorThread(tls)); // We have checked that this is mutator unsafe { Address::zero() } @@ -313,9 +316,20 @@ pub trait Space: 'static + SFT + Sync + Downcast { .mark_as_mapped(self.common().start, self.common().extent); } + /// Estimate the amount of side metadata memory needed for a give data memory size in pages. The + /// result will over-estimate the amount of metadata pages needed, with at least one page per + /// side metadata. This relatively accurately describes the number of side metadata pages the + /// space actually consumes. + /// + /// This function is used for both triggering GC (via [`Space::reserved_pages`]) and resizing + /// the heap (via [`crate::util::heap::GCTriggerPolicy::on_pending_allocation`]). + fn estimate_side_meta_pages(&self, data_pages: usize) -> usize { + self.common().metadata.calculate_reserved_pages(data_pages) + } + fn reserved_pages(&self) -> usize { let data_pages = self.get_page_resource().reserved_pages(); - let meta_pages = self.common().metadata.calculate_reserved_pages(data_pages); + let meta_pages = self.estimate_side_meta_pages(data_pages); data_pages + meta_pages } diff --git a/src/util/metadata/side_metadata/global.rs b/src/util/metadata/side_metadata/global.rs index 1eea859832..ffb3fb8f44 100644 --- a/src/util/metadata/side_metadata/global.rs +++ b/src/util/metadata/side_metadata/global.rs @@ -1362,6 +1362,9 @@ impl SideMetadataContext { pub fn calculate_reserved_pages(&self, data_pages: usize) -> usize { let mut total = 0; for spec in self.global.iter() { + // This rounds up. No matter how small `data_pages` is, the side metadata size will be + // at least one page. This behavior is *intended*. The over-estimated amount is used + // for triggering GC and resizing the heap. total += data_to_meta_size_round_up(spec, data_pages); } for spec in self.local.iter() {