Skip to content

Commit dd84218

Browse files
authored
Use SFTDenseChunkMap on 64bits when vm_space is enabled (#1094)
This PR resolves the issues found in mmtk/mmtk-julia#143. ```console [2024-03-19T05:23:33Z INFO mmtk::policy::vmspace] Set [78624DC00000, 786258000000) as VM region (heap available range [20000000000, 220000000000)) thread '<unnamed>' panicked at 'start = 78624DC00000 + bytes = 171966464 should be smaller than space_start 380000000000 + max extent 2199023255552, index 28, table size 31', /home/runner/.cargo/git/checkouts/mmtk-core-89cdc7bf360cce7f/46b0abe/src/policy/sft_map.rs:202:21 note: run with `RUST_BACKTRACE=1` environment variable to display a backtrace fatal runtime error: failed to initiate panic, error 5 ``` Basically we should not use `SFTSpaceMap` when we have off-heap memory. Julia tries to set VM space at an address range outside our heap range, and that causes issues when we compute index for `SFTSpaceMap`. Using `SFTDenseChunkMap` will solve the issue. Changes: * Use `SFTDenseChunkMap` if `vm_space` is enabled. * Add a range check for `SFTSpaceMap::has_sft_entry()`. The address/index computation is only correct if the address is in the range. * Add a simple test case for mmtk/mmtk-julia#143.
1 parent 00d316f commit dd84218

10 files changed

+118
-76
lines changed

src/policy/sft_map.rs

Lines changed: 34 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -67,17 +67,23 @@ pub trait SFTMap {
6767

6868
pub(crate) fn create_sft_map() -> Box<dyn SFTMap> {
6969
cfg_if::cfg_if! {
70-
if #[cfg(all(feature = "malloc_mark_sweep", target_pointer_width = "64"))] {
71-
// 64-bit malloc mark sweep needs a chunk-based SFT map, but the sparse map is not suitable for 64bits.
72-
Box::new(dense_chunk_map::SFTDenseChunkMap::new())
73-
} else if #[cfg(target_pointer_width = "64")] {
70+
if #[cfg(target_pointer_width = "64")] {
71+
// For 64bits, we generally want to use the space map, which requires using contiguous space and no off-heap memory.
72+
// If the requirements do not meet, we have to choose a different SFT map implementation.
7473
use crate::util::heap::layout::vm_layout::vm_layout;
75-
if vm_layout().force_use_contiguous_spaces {
76-
Box::new(space_map::SFTSpaceMap::new())
77-
} else {
74+
if !vm_layout().force_use_contiguous_spaces {
75+
// This is usually the case for compressed pointer. Use the 32bits implementation.
7876
Box::new(sparse_chunk_map::SFTSparseChunkMap::new())
77+
} else if cfg!(any(feature = "malloc_mark_sweep", feature = "vm_space")) {
78+
// We have off-heap memory (malloc'd objects, or VM space). We have to use a chunk-based map.
79+
Box::new(dense_chunk_map::SFTDenseChunkMap::new())
80+
} else {
81+
// We can use space map.
82+
Box::new(space_map::SFTSpaceMap::new())
7983
}
8084
} else if #[cfg(target_pointer_width = "32")] {
85+
// Use sparse chunk map. As we have limited virtual address range on 32 bits,
86+
// it is okay to have a sparse chunk map which maps every chunk into an index in the array.
8187
Box::new(sparse_chunk_map::SFTSparseChunkMap::new())
8288
} else {
8389
compile_err!("Cannot figure out which SFT map to use.");
@@ -154,15 +160,17 @@ mod space_map {
154160
/// Space map is a small table, and it has one entry for each MMTk space.
155161
pub struct SFTSpaceMap {
156162
sft: Vec<SFTRefStorage>,
163+
space_address_start: Address,
164+
space_address_end: Address,
157165
}
158166

159167
unsafe impl Sync for SFTSpaceMap {}
160168

161169
impl SFTMap for SFTSpaceMap {
162-
fn has_sft_entry(&self, _addr: Address) -> bool {
163-
// Address::ZERO is mapped to index 0, and Address::MAX is mapped to index 31 (TABLE_SIZE-1)
164-
// So any address has an SFT entry.
165-
true
170+
fn has_sft_entry(&self, addr: Address) -> bool {
171+
// An arbitrary address from Address::ZERO to Address::MAX will be cyclically mapped to an index between 0 and 31
172+
// Only addresses between the virtual address range we use have valid entries.
173+
addr >= self.space_address_start && addr < self.space_address_end
166174
}
167175

168176
fn get_side_metadata(&self) -> Option<&SideMetadataSpec> {
@@ -186,21 +194,23 @@ mod space_map {
186194
start: Address,
187195
bytes: usize,
188196
) {
189-
let table_size = Self::addr_to_index(Address::MAX) + 1;
190197
let index = Self::addr_to_index(start);
191198
if cfg!(debug_assertions) {
192199
// Make sure we only update from empty to a valid space, or overwrite the space
193200
let old = self.sft[index].load();
194201
assert!((*old).name() == EMPTY_SFT_NAME || (*old).name() == (*space).name());
195202
// Make sure the range is in the space
196203
let space_start = Self::index_to_space_start(index);
197-
// FIXME: Curerntly skip the check for the last space. The following works fine for MMTk internal spaces,
198-
// but the VM space is an exception. Any address after the last space is considered as the last space,
199-
// based on our indexing function. In that case, we cannot assume the end of the region is within the last space (with MAX_SPACE_EXTENT).
200-
if index != table_size - 1 {
201-
assert!(start >= space_start);
202-
assert!(start + bytes <= space_start + vm_layout().max_space_extent());
203-
}
204+
assert!(start >= space_start);
205+
assert!(
206+
start + bytes <= space_start + vm_layout().max_space_extent(),
207+
"The range of {} + {} bytes does not fall into the space range {} and {}, \
208+
and it is probably outside the address range we use.",
209+
start,
210+
bytes,
211+
space_start,
212+
space_start + vm_layout().max_space_extent()
213+
);
204214
}
205215

206216
self.sft.get_unchecked(index).store(space);
@@ -216,12 +226,15 @@ mod space_map {
216226
/// Create a new space map.
217227
#[allow(clippy::assertions_on_constants)] // We assert to make sure the constants
218228
pub fn new() -> Self {
229+
use crate::util::heap::layout::heap_parameters::MAX_SPACES;
219230
let table_size = Self::addr_to_index(Address::MAX) + 1;
220-
debug_assert!(table_size >= crate::util::heap::layout::heap_parameters::MAX_SPACES);
231+
debug_assert!(table_size >= MAX_SPACES);
221232
Self {
222233
sft: std::iter::repeat_with(SFTRefStorage::default)
223234
.take(table_size)
224235
.collect(),
236+
space_address_start: Self::index_to_space_range(1).0, // the start of the first space
237+
space_address_end: Self::index_to_space_range(MAX_SPACES - 1).1, // the end of the last space
225238
}
226239
}
227240

@@ -261,7 +274,7 @@ mod space_map {
261274

262275
let assert_for_index = |i: usize| {
263276
let (start, end) = SFTSpaceMap::index_to_space_range(i);
264-
debug!("Space: Index#{} = [{}, {})", i, start, end);
277+
println!("Space: Index#{} = [{}, {})", i, start, end);
265278
assert_eq!(SFTSpaceMap::addr_to_index(start), i);
266279
assert_eq!(SFTSpaceMap::addr_to_index(end - 1), i);
267280
};

src/policy/vmspace.rs

Lines changed: 0 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -127,20 +127,6 @@ impl<VM: VMBinding> Space<VM> for VMSpace<VM> {
127127
// The default implementation checks with vm map. But vm map has some assumptions about
128128
// the address range for spaces and the VM space may break those assumptions (as the space is
129129
// mmapped by the runtime rather than us). So we we use SFT here.
130-
131-
// However, SFT map may not be an ideal solution either for 64 bits. The default
132-
// implementation of SFT map on 64 bits is `SFTSpaceMap`, which maps the entire address
133-
// space into an index between 0 and 31, and assumes any address with the same index
134-
// is in the same space (with the same SFT). MMTk spaces uses 1-16. We guarantee that
135-
// VM space does not overlap with the address range that MMTk spaces may use. So
136-
// any region used as VM space will have an index of 0, or 17-31, and all the addresses
137-
// that are mapped to the same index will be considered as in the VM space. That means,
138-
// after we map a region as VM space, the nearby addresses will also be considered
139-
// as in the VM space if we use the default `SFTSpaceMap`. We can guarantee the nearby
140-
// addresses are not MMTk spaces, but we cannot tell whether they really in the VM space
141-
// or not.
142-
// A solution to this is to use `SFTDenseChunkMap` if `vm_space` is enabled on 64 bits.
143-
// `SFTDenseChunkMap` has an overhead of a few percentages (~3%) compared to `SFTSpaceMap`.
144130
SFT_MAP.get_checked(start).name() == self.name()
145131
}
146132
}

src/util/test_util/fixtures.rs

Lines changed: 14 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -115,7 +115,7 @@ impl<T: FixtureContent> Default for SerialFixture<T> {
115115
}
116116

117117
pub struct MMTKFixture {
118-
pub mmtk: &'static MMTK<MockVM>,
118+
mmtk: *mut MMTK<MockVM>,
119119
}
120120

121121
impl FixtureContent for MMTKFixture {
@@ -143,13 +143,21 @@ impl MMTKFixture {
143143

144144
let mmtk = memory_manager::mmtk_init(&builder);
145145
let mmtk_ptr = Box::into_raw(mmtk);
146-
let mmtk_static: &'static MMTK<MockVM> = unsafe { &*mmtk_ptr };
147146

148147
if initialize_collection {
148+
let mmtk_static: &'static MMTK<MockVM> = unsafe { &*mmtk_ptr };
149149
memory_manager::initialize_collection(mmtk_static, VMThread::UNINITIALIZED);
150150
}
151151

152-
MMTKFixture { mmtk: mmtk_static }
152+
MMTKFixture { mmtk: mmtk_ptr }
153+
}
154+
155+
pub fn get_mmtk(&self) -> &'static MMTK<MockVM> {
156+
unsafe { &*self.mmtk }
157+
}
158+
159+
pub fn get_mmtk_mut(&mut self) -> &'static mut MMTK<MockVM> {
160+
unsafe { &mut *self.mmtk }
153161
}
154162
}
155163

@@ -186,7 +194,7 @@ impl MutatorFixture {
186194
true,
187195
);
188196
let mutator =
189-
memory_manager::bind_mutator(mmtk.mmtk, VMMutatorThread(VMThread::UNINITIALIZED));
197+
memory_manager::bind_mutator(mmtk.get_mmtk(), VMMutatorThread(VMThread::UNINITIALIZED));
190198
Self { mmtk, mutator }
191199
}
192200

@@ -196,12 +204,12 @@ impl MutatorFixture {
196204
{
197205
let mmtk = MMTKFixture::create_with_builder(with_builder, true);
198206
let mutator =
199-
memory_manager::bind_mutator(mmtk.mmtk, VMMutatorThread(VMThread::UNINITIALIZED));
207+
memory_manager::bind_mutator(mmtk.get_mmtk(), VMMutatorThread(VMThread::UNINITIALIZED));
200208
Self { mmtk, mutator }
201209
}
202210

203211
pub fn mmtk(&self) -> &'static MMTK<MockVM> {
204-
self.mmtk.mmtk
212+
self.mmtk.get_mmtk()
205213
}
206214
}
207215

src/vm/tests/mock_tests/mock_test_allocate_without_initialize_collection.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@ pub fn allocate_without_initialize_collection() {
2525

2626
// Build mutator
2727
let mut mutator = memory_manager::bind_mutator(
28-
fixture.mmtk,
28+
fixture.get_mmtk(),
2929
VMMutatorThread(VMThread::UNINITIALIZED),
3030
);
3131

src/vm/tests/mock_tests/mock_test_allocator_info.rs

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -14,12 +14,14 @@ pub fn test_allocator_info() {
1414
|| {
1515
let fixture = MMTKFixture::create();
1616

17-
let selector =
18-
memory_manager::get_allocator_mapping(fixture.mmtk, AllocationSemantics::Default);
17+
let selector = memory_manager::get_allocator_mapping(
18+
fixture.get_mmtk(),
19+
AllocationSemantics::Default,
20+
);
1921
let base_offset = crate::plan::Mutator::<MockVM>::get_allocator_base_offset(selector);
2022
let allocator_info = AllocatorInfo::new::<MockVM>(selector);
2123

22-
match *fixture.mmtk.get_options().plan {
24+
match *fixture.get_mmtk().get_options().plan {
2325
PlanSelector::NoGC
2426
| PlanSelector::Immix
2527
| PlanSelector::SemiSpace

src/vm/tests/mock_tests/mock_test_doc_avoid_resolving_allocator.rs

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -22,13 +22,15 @@ pub fn acquire_typed_allocator() {
2222

2323
// ANCHOR: avoid_resolving_allocator
2424
// At boot time
25-
let selector =
26-
memory_manager::get_allocator_mapping(fixture.mmtk, AllocationSemantics::Default);
25+
let selector = memory_manager::get_allocator_mapping(
26+
fixture.get_mmtk(),
27+
AllocationSemantics::Default,
28+
);
2729
unsafe {
2830
DEFAULT_ALLOCATOR_OFFSET =
2931
crate::plan::Mutator::<MockVM>::get_allocator_base_offset(selector);
3032
}
31-
let mutator = memory_manager::bind_mutator(fixture.mmtk, tls_opaque_pointer);
33+
let mutator = memory_manager::bind_mutator(fixture.get_mmtk(), tls_opaque_pointer);
3234

3335
// At run time: allocate with the default semantics without resolving allocator
3436
let default_allocator: &mut BumpAllocator<MockVM> = {

src/vm/tests/mock_tests/mock_test_doc_mutator_storage.rs

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ pub fn boxed_pointer() {
2828
}
2929

3030
// Bind an MMTk mutator
31-
let mutator = memory_manager::bind_mutator(fixture.mmtk, tls_opaque_pointer);
31+
let mutator = memory_manager::bind_mutator(fixture.get_mmtk(), tls_opaque_pointer);
3232
// Store the pointer in TLS
3333
let mut storage = MutatorInTLS { ptr: mutator };
3434

@@ -58,7 +58,7 @@ pub fn embed_mutator_struct() {
5858
}
5959

6060
// Bind an MMTk mutator
61-
let mutator = memory_manager::bind_mutator(fixture.mmtk, tls_opaque_pointer);
61+
let mutator = memory_manager::bind_mutator(fixture.get_mmtk(), tls_opaque_pointer);
6262
// Store the struct (or use memcpy for non-Rust code)
6363
let mut storage = MutatorInTLS { embed: *mutator };
6464
// Allocate
@@ -94,7 +94,7 @@ pub fn embed_fastpath_struct() {
9494
}
9595

9696
// Bind an MMTk mutator
97-
let mutator = memory_manager::bind_mutator(fixture.mmtk, tls_opaque_pointer);
97+
let mutator = memory_manager::bind_mutator(fixture.get_mmtk(), tls_opaque_pointer);
9898
// Create a fastpath BumpPointer with default(). The BumpPointer from default() will guarantee to fail on the first allocation
9999
// so the allocation goes to the slowpath and we will get an allocation buffer from MMTk.
100100
let default_bump_pointer = BumpPointer::default();
@@ -116,7 +116,7 @@ pub fn embed_fastpath_struct() {
116116
} else {
117117
use crate::util::alloc::Allocator;
118118
let selector = memory_manager::get_allocator_mapping(
119-
fixture.mmtk,
119+
fixture.get_mmtk(),
120120
AllocationSemantics::Default,
121121
);
122122
let default_allocator = unsafe {

src/vm/tests/mock_tests/mock_test_malloc_counted.rs

Lines changed: 24 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -12,15 +12,15 @@ pub fn malloc_free() {
1212
default_setup,
1313
|| {
1414
MMTK.with_fixture(|fixture| {
15-
let bytes_before = memory_manager::get_malloc_bytes(&fixture.mmtk);
15+
let bytes_before = memory_manager::get_malloc_bytes(fixture.get_mmtk());
1616

17-
let res = memory_manager::counted_malloc(&fixture.mmtk, 8);
17+
let res = memory_manager::counted_malloc(fixture.get_mmtk(), 8);
1818
assert!(!res.is_zero());
19-
let bytes_after_alloc = memory_manager::get_malloc_bytes(&fixture.mmtk);
19+
let bytes_after_alloc = memory_manager::get_malloc_bytes(fixture.get_mmtk());
2020
assert_eq!(bytes_before + 8, bytes_after_alloc);
2121

22-
memory_manager::free_with_size(&fixture.mmtk, res, 8);
23-
let bytes_after_free = memory_manager::get_malloc_bytes(&fixture.mmtk);
22+
memory_manager::free_with_size(fixture.get_mmtk(), res, 8);
23+
let bytes_after_free = memory_manager::get_malloc_bytes(fixture.get_mmtk());
2424
assert_eq!(bytes_before, bytes_after_free);
2525
});
2626
},
@@ -34,15 +34,15 @@ pub fn calloc_free() {
3434
default_setup,
3535
|| {
3636
MMTK.with_fixture(|fixture| {
37-
let bytes_before = memory_manager::get_malloc_bytes(&fixture.mmtk);
37+
let bytes_before = memory_manager::get_malloc_bytes(fixture.get_mmtk());
3838

39-
let res = memory_manager::counted_calloc(&fixture.mmtk, 1, 8);
39+
let res = memory_manager::counted_calloc(fixture.get_mmtk(), 1, 8);
4040
assert!(!res.is_zero());
41-
let bytes_after_alloc = memory_manager::get_malloc_bytes(&fixture.mmtk);
41+
let bytes_after_alloc = memory_manager::get_malloc_bytes(fixture.get_mmtk());
4242
assert_eq!(bytes_before + 8, bytes_after_alloc);
4343

44-
memory_manager::free_with_size(&fixture.mmtk, res, 8);
45-
let bytes_after_free = memory_manager::get_malloc_bytes(&fixture.mmtk);
44+
memory_manager::free_with_size(fixture.get_mmtk(), res, 8);
45+
let bytes_after_free = memory_manager::get_malloc_bytes(fixture.get_mmtk());
4646
assert_eq!(bytes_before, bytes_after_free);
4747
});
4848
},
@@ -56,21 +56,21 @@ pub fn realloc_grow() {
5656
default_setup,
5757
|| {
5858
MMTK.with_fixture(|fixture| {
59-
let bytes_before = memory_manager::get_malloc_bytes(&fixture.mmtk);
59+
let bytes_before = memory_manager::get_malloc_bytes(fixture.get_mmtk());
6060

61-
let res1 = memory_manager::counted_malloc(&fixture.mmtk, 8);
61+
let res1 = memory_manager::counted_malloc(&fixture.get_mmtk(), 8);
6262
assert!(!res1.is_zero());
63-
let bytes_after_alloc = memory_manager::get_malloc_bytes(&fixture.mmtk);
63+
let bytes_after_alloc = memory_manager::get_malloc_bytes(fixture.get_mmtk());
6464
assert_eq!(bytes_before + 8, bytes_after_alloc);
6565

6666
// grow to 16 bytes
67-
let res2 = memory_manager::realloc_with_old_size(&fixture.mmtk, res1, 16, 8);
67+
let res2 = memory_manager::realloc_with_old_size(fixture.get_mmtk(), res1, 16, 8);
6868
assert!(!res2.is_zero());
69-
let bytes_after_realloc = memory_manager::get_malloc_bytes(&fixture.mmtk);
69+
let bytes_after_realloc = memory_manager::get_malloc_bytes(fixture.get_mmtk());
7070
assert_eq!(bytes_before + 16, bytes_after_realloc);
7171

72-
memory_manager::free_with_size(&fixture.mmtk, res2, 16);
73-
let bytes_after_free = memory_manager::get_malloc_bytes(&fixture.mmtk);
72+
memory_manager::free_with_size(&fixture.get_mmtk(), res2, 16);
73+
let bytes_after_free = memory_manager::get_malloc_bytes(fixture.get_mmtk());
7474
assert_eq!(bytes_before, bytes_after_free);
7575
});
7676
},
@@ -84,21 +84,21 @@ pub fn realloc_shrink() {
8484
default_setup,
8585
|| {
8686
MMTK.with_fixture(|fixture| {
87-
let bytes_before = memory_manager::get_malloc_bytes(&fixture.mmtk);
87+
let bytes_before = memory_manager::get_malloc_bytes(fixture.get_mmtk());
8888

89-
let res1 = memory_manager::counted_malloc(&fixture.mmtk, 16);
89+
let res1 = memory_manager::counted_malloc(fixture.get_mmtk(), 16);
9090
assert!(!res1.is_zero());
91-
let bytes_after_alloc = memory_manager::get_malloc_bytes(&fixture.mmtk);
91+
let bytes_after_alloc = memory_manager::get_malloc_bytes(fixture.get_mmtk());
9292
assert_eq!(bytes_before + 16, bytes_after_alloc);
9393

9494
// shrink to 8 bytes
95-
let res2 = memory_manager::realloc_with_old_size(&fixture.mmtk, res1, 8, 16);
95+
let res2 = memory_manager::realloc_with_old_size(fixture.get_mmtk(), res1, 8, 16);
9696
assert!(!res2.is_zero());
97-
let bytes_after_realloc = memory_manager::get_malloc_bytes(&fixture.mmtk);
97+
let bytes_after_realloc = memory_manager::get_malloc_bytes(fixture.get_mmtk());
9898
assert_eq!(bytes_before + 8, bytes_after_realloc);
9999

100-
memory_manager::free_with_size(&fixture.mmtk, res2, 8);
101-
let bytes_after_free = memory_manager::get_malloc_bytes(&fixture.mmtk);
100+
memory_manager::free_with_size(fixture.get_mmtk(), res2, 8);
101+
let bytes_after_free = memory_manager::get_malloc_bytes(fixture.get_mmtk());
102102
assert_eq!(bytes_before, bytes_after_free);
103103
});
104104
},

0 commit comments

Comments
 (0)