Skip to content

Commit 3cfdf75

Browse files
authored
Call Collection::out_of_memory if the allocation size is larger than max heap size (#896)
This closes #867.
1 parent c9ea320 commit 3cfdf75

11 files changed

+175
-18
lines changed

src/plan/global.rs

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -278,7 +278,10 @@ pub trait Plan: 'static + Sync + Downcast {
278278

279279
/// Get the total number of pages for the heap.
280280
fn get_total_pages(&self) -> usize {
281-
self.base().gc_trigger.policy.get_heap_size_in_pages()
281+
self.base()
282+
.gc_trigger
283+
.policy
284+
.get_current_heap_size_in_pages()
282285
}
283286

284287
/// Get the number of pages that are still available for use. The available pages

src/policy/space.rs

Lines changed: 35 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ use crate::util::heap::{PageResource, VMRequest};
1313
use crate::util::options::Options;
1414
use crate::vm::{ActivePlan, Collection};
1515

16-
use crate::util::constants::LOG_BYTES_IN_MBYTE;
16+
use crate::util::constants::{LOG_BYTES_IN_MBYTE, LOG_BYTES_IN_PAGE};
1717
use crate::util::conversions;
1818
use crate::util::opaque_pointer::*;
1919

@@ -46,8 +46,42 @@ pub trait Space<VM: VMBinding>: 'static + SFT + Sync + Downcast {
4646
/// Currently after we create a boxed plan, spaces in the plan have a non-moving address.
4747
fn initialize_sft(&self);
4848

49+
/// A check for the obvious out-of-memory case: if the requested size is larger than
50+
/// the heap size, it is definitely an OOM. We would like to identify that, and
51+
/// allows the binding to deal with OOM. Without this check, we will attempt
52+
/// to allocate from the page resource. If the requested size is unrealistically large
53+
/// (such as `usize::MAX`), it breaks the assumptions of our implementation of
54+
/// page resource, vm map, etc. This check prevents that, and allows us to
55+
/// handle the OOM case.
56+
/// Each allocator that may request an arbitrary size should call this method before
57+
/// acquring memory from the space. For example, bump pointer allocator and large object
58+
/// allocator need to call this method. On the other hand, allocators that only allocate
59+
/// memory in fixed size blocks do not need to call this method.
60+
/// An allocator should call this method before doing any computation on the size to
61+
/// avoid arithmatic overflow. If we have to do computation in the allocation fastpath and
62+
/// overflow happens there, there is nothing we can do about it.
63+
/// Return a boolean to indicate if we will be out of memory, determined by the check.
64+
fn will_oom_on_acquire(&self, tls: VMThread, size: usize) -> bool {
65+
let max_pages = self.get_gc_trigger().policy.get_max_heap_size_in_pages();
66+
let requested_pages = size >> LOG_BYTES_IN_PAGE;
67+
if requested_pages > max_pages {
68+
VM::VMCollection::out_of_memory(
69+
tls,
70+
crate::util::alloc::AllocationError::HeapOutOfMemory,
71+
);
72+
return true;
73+
}
74+
false
75+
}
76+
4977
fn acquire(&self, tls: VMThread, pages: usize) -> Address {
5078
trace!("Space.acquire, tls={:?}", tls);
79+
80+
debug_assert!(
81+
!self.will_oom_on_acquire(tls, pages << LOG_BYTES_IN_PAGE),
82+
"The requested pages is larger than the max heap size. Is will_go_oom_on_acquire used before acquring memory?"
83+
);
84+
5185
// Should we poll to attempt to GC?
5286
// - If tls is collector, we cannot attempt a GC.
5387
// - If gc is disabled, we cannot attempt a GC.

src/util/alloc/bumpallocator.rs

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -158,6 +158,10 @@ impl<VM: VMBinding> BumpAllocator<VM> {
158158
offset: usize,
159159
stress_test: bool,
160160
) -> Address {
161+
if self.space.will_oom_on_acquire(self.tls, size) {
162+
return Address::ZERO;
163+
}
164+
161165
let block_size = (size + BLOCK_MASK) & (!BLOCK_MASK);
162166
let acquired_start = self.space.acquire(self.tls, bytes_to_pages(block_size));
163167
if acquired_start.is_zero() {

src/util/alloc/large_object_allocator.rs

Lines changed: 6 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -45,15 +45,13 @@ impl<VM: VMBinding> Allocator<VM> for LargeObjectAllocator<VM> {
4545
}
4646

4747
fn alloc_slow_once(&mut self, size: usize, align: usize, _offset: usize) -> Address {
48-
let header = 0; // HashSet is used instead of DoublyLinkedList
49-
let maxbytes = allocator::get_maximum_aligned_size::<VM>(size + header, align);
50-
let pages = crate::util::conversions::bytes_to_pages_up(maxbytes);
51-
let sp = self.space.allocate_pages(self.tls, pages);
52-
if sp.is_zero() {
53-
sp
54-
} else {
55-
sp + header
48+
if self.space.will_oom_on_acquire(self.tls, size) {
49+
return Address::ZERO;
5650
}
51+
52+
let maxbytes = allocator::get_maximum_aligned_size::<VM>(size, align);
53+
let pages = crate::util::conversions::bytes_to_pages_up(maxbytes);
54+
self.space.allocate_pages(self.tls, pages)
5755
}
5856
}
5957

src/util/heap/gc_trigger.rs

Lines changed: 13 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -105,7 +105,9 @@ pub trait GCTriggerPolicy<VM: VMBinding>: Sync + Send {
105105
/// Is current heap full?
106106
fn is_heap_full(&self, plan: &'static dyn Plan<VM = VM>) -> bool;
107107
/// Return the current heap size (in pages)
108-
fn get_heap_size_in_pages(&self) -> usize;
108+
fn get_current_heap_size_in_pages(&self) -> usize;
109+
/// Return the upper bound of heap size
110+
fn get_max_heap_size_in_pages(&self) -> usize;
109111
/// Can the heap size grow?
110112
fn can_heap_size_grow(&self) -> bool;
111113
}
@@ -130,7 +132,11 @@ impl<VM: VMBinding> GCTriggerPolicy<VM> for FixedHeapSizeTrigger {
130132
plan.get_reserved_pages() > self.total_pages
131133
}
132134

133-
fn get_heap_size_in_pages(&self) -> usize {
135+
fn get_current_heap_size_in_pages(&self) -> usize {
136+
self.total_pages
137+
}
138+
139+
fn get_max_heap_size_in_pages(&self) -> usize {
134140
self.total_pages
135141
}
136142

@@ -394,10 +400,14 @@ impl<VM: VMBinding> GCTriggerPolicy<VM> for MemBalancerTrigger {
394400
plan.get_reserved_pages() > self.current_heap_pages.load(Ordering::Relaxed)
395401
}
396402

397-
fn get_heap_size_in_pages(&self) -> usize {
403+
fn get_current_heap_size_in_pages(&self) -> usize {
398404
self.current_heap_pages.load(Ordering::Relaxed)
399405
}
400406

407+
fn get_max_heap_size_in_pages(&self) -> usize {
408+
self.max_heap_pages
409+
}
410+
401411
fn can_heap_size_grow(&self) -> bool {
402412
self.current_heap_pages.load(Ordering::Relaxed) < self.max_heap_pages
403413
}

vmbindings/dummyvm/src/tests/allocate_with_initialize_collection.rs

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,9 @@ pub fn allocate_with_initialize_collection() {
1212
mmtk_init(MB);
1313
mmtk_initialize_collection(VMThread::UNINITIALIZED);
1414
let handle = mmtk_bind_mutator(VMMutatorThread(VMThread::UNINITIALIZED));
15-
// Attempt to allocate 2MB. This will trigger GC.
16-
let addr = mmtk_alloc(handle, 2 * MB, 8, 0, AllocationSemantics::Default);
15+
// Fill up the heap
16+
let _ = mmtk_alloc(handle, MB, 8, 0, AllocationSemantics::Default);
17+
// Attempt another allocation. This will trigger GC.
18+
let addr = mmtk_alloc(handle, MB, 8, 0, AllocationSemantics::Default);
1719
assert!(!addr.is_zero());
1820
}

vmbindings/dummyvm/src/tests/allocate_without_initialize_collection.rs

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,9 @@ pub fn allocate_without_initialize_collection() {
1111
// 1MB heap
1212
mmtk_init(MB);
1313
let handle = mmtk_bind_mutator(VMMutatorThread(VMThread::UNINITIALIZED));
14-
// Attempt to allocate 2MB memory. This should trigger a GC, but as we never call initialize_collection(), we cannot do GC.
15-
let addr = mmtk_alloc(handle, 2 * MB, 8, 0, AllocationSemantics::Default);
14+
// Fill up the heap.
15+
let _ = mmtk_alloc(handle, MB, 8, 0, AllocationSemantics::Default);
16+
// Attempt another memory. This should trigger a GC, but as we never call initialize_collection(), we cannot do GC.
17+
let addr = mmtk_alloc(handle, MB, 8, 0, AllocationSemantics::Default);
1618
assert!(!addr.is_zero());
1719
}

vmbindings/dummyvm/src/tests/fixtures/mod.rs

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -62,6 +62,27 @@ impl<T: FixtureContent> SerialFixture<T> {
6262
}
6363
func(c.as_ref().unwrap())
6464
}
65+
66+
pub fn with_fixture_expect_benign_panic<
67+
F: Fn(&T) + std::panic::UnwindSafe + std::panic::RefUnwindSafe,
68+
>(
69+
&self,
70+
func: F,
71+
) {
72+
let res = {
73+
// The lock will be dropped at the end of the block. So panic won't poison the lock.
74+
let mut c = self.content.lock().unwrap();
75+
if c.is_none() {
76+
*c = Some(Box::new(T::create()));
77+
}
78+
79+
std::panic::catch_unwind(|| func(c.as_ref().unwrap()))
80+
};
81+
// We do not hold the lock now. It is safe to resume now.
82+
if let Err(e) = res {
83+
std::panic::resume_unwind(e);
84+
}
85+
}
6586
}
6687

6788
pub struct SingleObject {
Lines changed: 82 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,82 @@
1+
// GITHUB-CI: MMTK_PLAN=all
2+
3+
use crate::api;
4+
use crate::tests::fixtures::{MutatorFixture, SerialFixture};
5+
use mmtk::plan::AllocationSemantics;
6+
7+
lazy_static! {
8+
static ref MUTATOR: SerialFixture<MutatorFixture> = SerialFixture::new();
9+
}
10+
11+
#[test]
12+
#[should_panic(expected = "Out of memory with HeapOutOfMemory!")]
13+
pub fn allocate_max_size_object() {
14+
let (size, align) = (usize::MAX, 8);
15+
16+
MUTATOR.with_fixture_expect_benign_panic(|fixture| {
17+
api::mmtk_alloc(
18+
fixture.mutator,
19+
size,
20+
align,
21+
0,
22+
AllocationSemantics::Default,
23+
);
24+
})
25+
}
26+
27+
#[test]
28+
// This test panics with 'attempt to add with overflow', as we do computation with the size
29+
// in the fastpath. I don't think we want to do any extra check in the fastpath. There is
30+
// nothing we can do with it without sacrificing performance.
31+
#[should_panic(expected = "Out of memory with HeapOutOfMemory!")]
32+
#[ignore]
33+
pub fn allocate_max_size_object_after_succeed() {
34+
MUTATOR.with_fixture_expect_benign_panic(|fixture| {
35+
// Allocate something so we have a thread local allocation buffer
36+
api::mmtk_alloc(fixture.mutator, 8, 8, 0, AllocationSemantics::Default);
37+
// Allocate an unrealistically large object
38+
api::mmtk_alloc(
39+
fixture.mutator,
40+
usize::MAX,
41+
8,
42+
0,
43+
AllocationSemantics::Default,
44+
);
45+
})
46+
}
47+
48+
#[test]
49+
#[should_panic(expected = "Out of memory with HeapOutOfMemory!")]
50+
pub fn allocate_unrealistically_large_object() {
51+
const CHUNK: usize = 4 * 1024 * 1024; // 4MB
52+
// Leave some room, so we won't have arithmetic overflow when we compute size and do alignment.
53+
let (size, align) = (
54+
mmtk::util::conversions::raw_align_down(usize::MAX - CHUNK, 4096),
55+
8,
56+
);
57+
58+
MUTATOR.with_fixture_expect_benign_panic(|fixture| {
59+
api::mmtk_alloc(
60+
fixture.mutator,
61+
size,
62+
align,
63+
0,
64+
AllocationSemantics::Default,
65+
);
66+
})
67+
}
68+
69+
#[test]
70+
#[should_panic(expected = "Out of memory with HeapOutOfMemory!")]
71+
pub fn allocate_more_than_heap_size() {
72+
// The heap has 1 MB. Allocating with 2MB will cause OOM.
73+
MUTATOR.with_fixture_expect_benign_panic(|fixture| {
74+
api::mmtk_alloc(
75+
fixture.mutator,
76+
2 * 1024 * 1024,
77+
8,
78+
0,
79+
AllocationSemantics::Default,
80+
);
81+
})
82+
}

vmbindings/dummyvm/src/tests/mod.rs

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,8 @@ mod fixtures;
1818
mod handle_mmap_conflict;
1919
mod handle_mmap_oom;
2020
mod is_in_mmtk_spaces;
21-
mod issue139;
21+
mod issue139_allocate_unaligned_object_size;
22+
mod issue867_allocate_unrealistically_large_object;
2223
#[cfg(not(feature = "malloc_counted_size"))]
2324
mod malloc_api;
2425
#[cfg(feature = "malloc_counted_size")]

0 commit comments

Comments
 (0)