From 658bce8971d14c6cbd522ca9d3371f0b760b9c90 Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Mon, 18 Dec 2023 16:33:38 +0800 Subject: [PATCH] Introduce MockVM (#1049) This PR introduces `MockVM`, a type that implements `VMBinding` and allows users to control the behavior of each method for testing. This PR also moves all the tests in the current `DummyVM` to `MockVM`, and removes the current `DummyVM`. This PR closes https://github.com/mmtk/mmtk-core/issues/99. Note that the current `MockVM` implementation does not allow changing constants or associated types in `VMBinding` -- I would suggest we create another issue to track this problem. Changes: * Introduce `MockVM`, and write all the current `DummyVM` tests with `MockVM`. * Remove `DummyVM`, and remove `./examples` which uses `DummyVM`. * Change CI scripts to run tests with `MockVM`. * Remove `pub` visibility for some modules. Those modules were exposed so we can test them from `DummyVM`. As now `MockVM` is a part of the main crate, we can test private items. We no longer need `pub` for those modules. --- .github/scripts/ci-style.sh | 6 +- .github/scripts/ci-test.sh | 22 +- Cargo.toml | 9 + benches/alloc.rs | 17 + benches/main.rs | 37 ++ benches/sft.rs | 19 + .../src/portingguide/perf_tuning/alloc.md | 8 +- examples/allocation_benchmark.c | 17 - examples/bench.sh | 23 - examples/build.py | 102 --- examples/main.c | 27 - examples/reference_bump_allocator.c | 70 -- src/lib.rs | 2 - src/scheduler/gc_work.rs | 6 +- src/util/malloc/malloc_ms_util.rs | 4 +- src/util/malloc/mod.rs | 3 +- src/util/metadata/header_metadata.rs | 2 +- .../side_metadata/side_metadata_tests.rs | 2 +- src/util/mod.rs | 6 +- src/util/test_util/fixtures.rs | 276 ++++++++ src/util/test_util/mock_method.rs | 173 +++++ src/util/test_util/mock_vm.rs | 615 ++++++++++++++++++ src/util/{test_util.rs => test_util/mod.rs} | 7 + src/vm/mod.rs | 3 + src/vm/tests/malloc_api.rs | 24 + .../mock_test_allocate_align_offset.rs | 80 +++ ...k_test_allocate_with_disable_collection.rs | 36 + ...est_allocate_with_initialize_collection.rs | 53 ++ ...test_allocate_with_re_enable_collection.rs | 60 ++ ..._allocate_without_initialize_collection.rs | 52 ++ .../mock_tests/mock_test_allocator_info.rs | 58 ++ .../mock_test_barrier_slow_path_assertion.rs | 62 ++ .../mock_tests/mock_test_conservatism.rs | 214 ++++++ ...mock_test_doc_avoid_resolving_allocator.rs | 47 ++ .../mock_test_doc_mutator_storage.rs | 151 +++++ src/vm/tests/mock_tests/mock_test_edges.rs | 429 ++++++++++++ .../mock_test_handle_mmap_conflict.rs | 36 + .../mock_tests/mock_test_handle_mmap_oom.rs | 45 ++ .../mock_tests/mock_test_is_in_mmtk_spaces.rs | 117 ++++ ..._allocate_non_multiple_of_min_alignment.rs | 24 + ...7_allocate_unrealistically_large_object.rs | 140 ++++ .../mock_tests/mock_test_malloc_counted.rs | 107 +++ .../tests/mock_tests/mock_test_malloc_ms.rs | 47 ++ .../mock_tests/mock_test_nogc_lock_free.rs | 40 ++ .../mock_test_vm_layout_compressed_pointer.rs | 39 ++ .../mock_tests/mock_test_vm_layout_default.rs | 41 ++ .../mock_test_vm_layout_heap_start.rs | 32 + .../mock_test_vm_layout_log_address_space.rs | 24 + src/vm/tests/mock_tests/mod.rs | 51 ++ src/vm/tests/mod.rs | 5 + vmbindings/dummyvm/Cargo.toml | 43 -- vmbindings/dummyvm/api/mmtk.h | 125 ---- vmbindings/dummyvm/benches/bench_alloc.rs | 18 - vmbindings/dummyvm/benches/bench_sft.rs | 20 - vmbindings/dummyvm/benches/main.rs | 15 - vmbindings/dummyvm/src/active_plan.rs | 25 - vmbindings/dummyvm/src/api.rs | 266 -------- vmbindings/dummyvm/src/collection.rs | 26 - vmbindings/dummyvm/src/edges.rs | 231 ------- vmbindings/dummyvm/src/lib.rs | 54 -- vmbindings/dummyvm/src/object_model.rs | 80 --- vmbindings/dummyvm/src/reference_glue.rs | 20 - vmbindings/dummyvm/src/scanning.rs | 39 -- vmbindings/dummyvm/src/test_fixtures.rs | 237 ------- .../src/tests/allocate_align_offset.rs | 64 -- .../tests/allocate_with_disable_collection.rs | 22 - .../allocate_with_initialize_collection.rs | 20 - .../allocate_with_re_enable_collection.rs | 26 - .../allocate_without_initialize_collection.rs | 19 - .../dummyvm/src/tests/allocator_info.rs | 58 -- .../src/tests/barrier_slow_path_assertion.rs | 64 -- vmbindings/dummyvm/src/tests/conservatism.rs | 167 ----- .../tests/doc_avoid_resolving_allocator.rs | 47 -- .../dummyvm/src/tests/doc_mutator_storage.rs | 127 ---- vmbindings/dummyvm/src/tests/edges_test.rs | 216 ------ .../dummyvm/src/tests/handle_mmap_conflict.rs | 24 - .../dummyvm/src/tests/handle_mmap_oom.rs | 30 - .../dummyvm/src/tests/is_in_mmtk_spaces.rs | 81 --- ...issue139_allocate_unaligned_object_size.rs | 17 - ...7_allocate_unrealistically_large_object.rs | 82 --- vmbindings/dummyvm/src/tests/malloc_api.rs | 24 - .../dummyvm/src/tests/malloc_counted.rs | 84 --- vmbindings/dummyvm/src/tests/malloc_ms.rs | 40 -- vmbindings/dummyvm/src/tests/mod.rs | 38 -- .../dummyvm/src/tests/nogc_lock_free.rs | 35 - .../tests/vm_layout_compressed_pointer_64.rs | 33 - .../dummyvm/src/tests/vm_layout_default.rs | 25 - .../dummyvm/src/tests/vm_layout_heap_start.rs | 25 - .../src/tests/vm_layout_log_address_space.rs | 17 - 89 files changed, 3198 insertions(+), 2856 deletions(-) create mode 100644 benches/alloc.rs create mode 100644 benches/main.rs create mode 100644 benches/sft.rs delete mode 100644 examples/allocation_benchmark.c delete mode 100755 examples/bench.sh delete mode 100755 examples/build.py delete mode 100644 examples/main.c delete mode 100644 examples/reference_bump_allocator.c create mode 100644 src/util/test_util/fixtures.rs create mode 100644 src/util/test_util/mock_method.rs create mode 100644 src/util/test_util/mock_vm.rs rename src/util/{test_util.rs => test_util/mod.rs} (96%) create mode 100644 src/vm/tests/malloc_api.rs create mode 100644 src/vm/tests/mock_tests/mock_test_allocate_align_offset.rs create mode 100644 src/vm/tests/mock_tests/mock_test_allocate_with_disable_collection.rs create mode 100644 src/vm/tests/mock_tests/mock_test_allocate_with_initialize_collection.rs create mode 100644 src/vm/tests/mock_tests/mock_test_allocate_with_re_enable_collection.rs create mode 100644 src/vm/tests/mock_tests/mock_test_allocate_without_initialize_collection.rs create mode 100644 src/vm/tests/mock_tests/mock_test_allocator_info.rs create mode 100644 src/vm/tests/mock_tests/mock_test_barrier_slow_path_assertion.rs create mode 100644 src/vm/tests/mock_tests/mock_test_conservatism.rs create mode 100644 src/vm/tests/mock_tests/mock_test_doc_avoid_resolving_allocator.rs create mode 100644 src/vm/tests/mock_tests/mock_test_doc_mutator_storage.rs create mode 100644 src/vm/tests/mock_tests/mock_test_edges.rs create mode 100644 src/vm/tests/mock_tests/mock_test_handle_mmap_conflict.rs create mode 100644 src/vm/tests/mock_tests/mock_test_handle_mmap_oom.rs create mode 100644 src/vm/tests/mock_tests/mock_test_is_in_mmtk_spaces.rs create mode 100644 src/vm/tests/mock_tests/mock_test_issue139_allocate_non_multiple_of_min_alignment.rs create mode 100644 src/vm/tests/mock_tests/mock_test_issue867_allocate_unrealistically_large_object.rs create mode 100644 src/vm/tests/mock_tests/mock_test_malloc_counted.rs create mode 100644 src/vm/tests/mock_tests/mock_test_malloc_ms.rs create mode 100644 src/vm/tests/mock_tests/mock_test_nogc_lock_free.rs create mode 100644 src/vm/tests/mock_tests/mock_test_vm_layout_compressed_pointer.rs create mode 100644 src/vm/tests/mock_tests/mock_test_vm_layout_default.rs create mode 100644 src/vm/tests/mock_tests/mock_test_vm_layout_heap_start.rs create mode 100644 src/vm/tests/mock_tests/mock_test_vm_layout_log_address_space.rs create mode 100644 src/vm/tests/mock_tests/mod.rs create mode 100644 src/vm/tests/mod.rs delete mode 100644 vmbindings/dummyvm/Cargo.toml delete mode 100644 vmbindings/dummyvm/api/mmtk.h delete mode 100644 vmbindings/dummyvm/benches/bench_alloc.rs delete mode 100644 vmbindings/dummyvm/benches/bench_sft.rs delete mode 100644 vmbindings/dummyvm/benches/main.rs delete mode 100644 vmbindings/dummyvm/src/active_plan.rs delete mode 100644 vmbindings/dummyvm/src/api.rs delete mode 100644 vmbindings/dummyvm/src/collection.rs delete mode 100644 vmbindings/dummyvm/src/edges.rs delete mode 100644 vmbindings/dummyvm/src/lib.rs delete mode 100644 vmbindings/dummyvm/src/object_model.rs delete mode 100644 vmbindings/dummyvm/src/reference_glue.rs delete mode 100644 vmbindings/dummyvm/src/scanning.rs delete mode 100644 vmbindings/dummyvm/src/test_fixtures.rs delete mode 100644 vmbindings/dummyvm/src/tests/allocate_align_offset.rs delete mode 100644 vmbindings/dummyvm/src/tests/allocate_with_disable_collection.rs delete mode 100644 vmbindings/dummyvm/src/tests/allocate_with_initialize_collection.rs delete mode 100644 vmbindings/dummyvm/src/tests/allocate_with_re_enable_collection.rs delete mode 100644 vmbindings/dummyvm/src/tests/allocate_without_initialize_collection.rs delete mode 100644 vmbindings/dummyvm/src/tests/allocator_info.rs delete mode 100644 vmbindings/dummyvm/src/tests/barrier_slow_path_assertion.rs delete mode 100644 vmbindings/dummyvm/src/tests/conservatism.rs delete mode 100644 vmbindings/dummyvm/src/tests/doc_avoid_resolving_allocator.rs delete mode 100644 vmbindings/dummyvm/src/tests/doc_mutator_storage.rs delete mode 100644 vmbindings/dummyvm/src/tests/edges_test.rs delete mode 100644 vmbindings/dummyvm/src/tests/handle_mmap_conflict.rs delete mode 100644 vmbindings/dummyvm/src/tests/handle_mmap_oom.rs delete mode 100644 vmbindings/dummyvm/src/tests/is_in_mmtk_spaces.rs delete mode 100644 vmbindings/dummyvm/src/tests/issue139_allocate_unaligned_object_size.rs delete mode 100644 vmbindings/dummyvm/src/tests/issue867_allocate_unrealistically_large_object.rs delete mode 100644 vmbindings/dummyvm/src/tests/malloc_api.rs delete mode 100644 vmbindings/dummyvm/src/tests/malloc_counted.rs delete mode 100644 vmbindings/dummyvm/src/tests/malloc_ms.rs delete mode 100644 vmbindings/dummyvm/src/tests/mod.rs delete mode 100644 vmbindings/dummyvm/src/tests/nogc_lock_free.rs delete mode 100644 vmbindings/dummyvm/src/tests/vm_layout_compressed_pointer_64.rs delete mode 100644 vmbindings/dummyvm/src/tests/vm_layout_default.rs delete mode 100644 vmbindings/dummyvm/src/tests/vm_layout_heap_start.rs delete mode 100644 vmbindings/dummyvm/src/tests/vm_layout_log_address_space.rs diff --git a/.github/scripts/ci-style.sh b/.github/scripts/ci-style.sh index 077c08e599..617660bd02 100755 --- a/.github/scripts/ci-style.sh +++ b/.github/scripts/ci-style.sh @@ -27,6 +27,11 @@ if [[ $arch == "x86_64" && $os == "linux" ]]; then cargo clippy --tests --features perf_counter fi +# mock tests +cargo clippy --features mock_test +cargo clippy --features mock_test --tests +cargo clippy --features mock_test --benches + # --- Check auxiliary crate --- style_check_auxiliary_crate() { @@ -37,4 +42,3 @@ style_check_auxiliary_crate() { } style_check_auxiliary_crate macros -style_check_auxiliary_crate vmbindings/dummyvm diff --git a/.github/scripts/ci-test.sh b/.github/scripts/ci-test.sh index b6f201dc5b..42bca18c91 100755 --- a/.github/scripts/ci-test.sh +++ b/.github/scripts/ci-test.sh @@ -11,23 +11,18 @@ if [[ $arch == "x86_64" && $os == "linux" ]]; then cargo test --features perf_counter fi -./examples/build.py - ALL_PLANS=$(sed -n '/enum PlanSelector/,/}/p' src/util/options.rs | sed -e 's;//.*;;g' -e '/^$/d' -e 's/,//g' | xargs | grep -o '{.*}' | grep -o '\w\+') -# Test with DummyVM (each test in a separate run) -cd vmbindings/dummyvm -for fn in $(ls src/tests/*.rs); do - t=$(basename -s .rs $fn) - - if [[ $t == "mod" ]]; then - continue - fi +# Test with mock VM: +# - Find all the files that start with mock_test_ +# - Run each file separately with cargo test, with the feature 'mock_test' +find ./src ./tests -type f -name "mock_test_*" | while read -r file; do + t=$(basename -s .rs $file) # Get the required plans. # Some tests need to be run with multiple plans because # some bugs can only be reproduced in some plans but not others. - PLANS=$(sed -n 's/^\/\/ *GITHUB-CI: *MMTK_PLAN=//p' $fn) + PLANS=$(sed -n 's/^\/\/ *GITHUB-CI: *MMTK_PLAN=//p' $file) if [[ $PLANS == 'all' ]]; then PLANS=$ALL_PLANS elif [[ -z $PLANS ]]; then @@ -35,11 +30,10 @@ for fn in $(ls src/tests/*.rs); do fi # Some tests need some features enabled. - FEATURES=$(sed -n 's/^\/\/ *GITHUB-CI: *FEATURES=//p' $fn) + FEATURES=$(sed -n 's/^\/\/ *GITHUB-CI: *FEATURES=//p' $file) # Run the test with each plan it needs. for MMTK_PLAN in $PLANS; do - env MMTK_PLAN=$MMTK_PLAN cargo test --features "$FEATURES" -- $t; + env MMTK_PLAN=$MMTK_PLAN cargo test --features mock_test,"$FEATURES" -- $t; done done - diff --git a/Cargo.toml b/Cargo.toml index 643d75c3ef..551226ac18 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -54,10 +54,15 @@ sysinfo = "0.29" [dev-dependencies] paste = "1.0.8" rand = "0.8.5" +criterion = "0.4" [build-dependencies] built = { version = "0.7.1", features = ["git2"] } +[[bench]] +name = "main" +harness = false + [features] default = [] @@ -65,6 +70,10 @@ default = [] # It's manually added to CI scripts perf_counter = ["pfm"] +# This feature is only used for tests with MockVM. +# CI scripts run those tests with this feature. +mock_test = [] + # .github/scripts/ci-common.sh extracts features from the following part (including from comments). # So be careful when editing or adding stuff to the section below. diff --git a/benches/alloc.rs b/benches/alloc.rs new file mode 100644 index 0000000000..adf388efdd --- /dev/null +++ b/benches/alloc.rs @@ -0,0 +1,17 @@ +use criterion::Criterion; + +use mmtk::memory_manager; +use mmtk::util::test_util::fixtures::*; +use mmtk::AllocationSemantics; + +pub fn bench(c: &mut Criterion) { + // Disable GC so we won't trigger GC + let mut fixture = MutatorFixture::create_with_heapsize(1 << 30); + memory_manager::disable_collection(fixture.mmtk()); + c.bench_function("alloc", |b| { + b.iter(|| { + let _addr = + memory_manager::alloc(&mut fixture.mutator, 8, 8, 0, AllocationSemantics::Default); + }) + }); +} diff --git a/benches/main.rs b/benches/main.rs new file mode 100644 index 0000000000..92b836a40c --- /dev/null +++ b/benches/main.rs @@ -0,0 +1,37 @@ +use criterion::criterion_group; +use criterion::criterion_main; +use criterion::Criterion; + +// As we can only initialize one MMTk instance, we have to run each benchmark in a separate process. +// So we only register one benchmark to criterion ('bench_main'), and based on the env var MMTK_BENCH, +// we pick the right benchmark to run. + +// The benchmark can be executed with the following command. The feature `mock_test` is required, as the tests use MockVM. +// MMTK_BENCH=alloc cargo bench --features mock_test +// MMTK_BENCH=sft cargo bench --features mock_test + +// [Yi] I am not sure if these benchmarks are helpful any more after the MockVM refactoring. MockVM is really slow, as it +// is accessed with a lock, and it dispatches every call to function pointers in a struct. These tests may use MockVM, +// so they become slower as well. And the slowdown +// from MockVM may hide the actual performance difference when we change the functions that are benchmarked. +// We may want to improve the MockVM implementation so we can skip dispatching for benchmarking, or introduce another MockVM +// implementation for benchmarking. +// However, I will just keep these benchmarks here. If we find it not useful, and we do not plan to improve MockVM, we can delete +// them. + +mod alloc; +mod sft; + +fn bench_main(c: &mut Criterion) { + match std::env::var("MMTK_BENCH") { + Ok(bench) => match bench.as_str() { + "alloc" => alloc::bench(c), + "sft" => sft::bench(c), + _ => panic!("Unknown benchmark {:?}", bench), + }, + Err(_) => panic!("Need to name a benchmark by the env var MMTK_BENCH"), + } +} + +criterion_group!(benches, bench_main); +criterion_main!(benches); diff --git a/benches/sft.rs b/benches/sft.rs new file mode 100644 index 0000000000..9788779011 --- /dev/null +++ b/benches/sft.rs @@ -0,0 +1,19 @@ +use criterion::black_box; +use criterion::Criterion; + +use mmtk::memory_manager; +use mmtk::util::test_util::fixtures::*; +use mmtk::util::test_util::mock_vm::*; +use mmtk::vm::ObjectModel; +use mmtk::vm::VMBinding; +use mmtk::AllocationSemantics; + +pub fn bench(c: &mut Criterion) { + let mut fixture = MutatorFixture::create(); + let addr = memory_manager::alloc(&mut fixture.mutator, 8, 8, 0, AllocationSemantics::Default); + let obj = ::VMObjectModel::address_to_ref(addr); + + c.bench_function("sft read", |b| { + b.iter(|| memory_manager::is_in_mmtk_spaces::(black_box(obj))) + }); +} diff --git a/docs/userguide/src/portingguide/perf_tuning/alloc.md b/docs/userguide/src/portingguide/perf_tuning/alloc.md index 201c2464e4..8c372111fa 100644 --- a/docs/userguide/src/portingguide/perf_tuning/alloc.md +++ b/docs/userguide/src/portingguide/perf_tuning/alloc.md @@ -33,7 +33,7 @@ If the VM is not implemented in Rust, the binding needs to turn the boxed pointer into a raw pointer before storing it. ```rust -{{#include ../../../../../vmbindings/dummyvm/src/tests/doc_mutator_storage.rs:mutator_storage_boxed_pointer}} +{{#include ../../../../../src/vm/tests/mock_tests/mock_test_doc_mutator_storage.rs:mutator_storage_boxed_pointer}} ``` ### Option 2: Embed the `Mutator` struct @@ -44,7 +44,7 @@ If the implementation language is not Rust, the developer needs to create a type have an assertion to ensure that the native type has the exact same layout as the Rust type `Mutator`. ```rust -{{#include ../../../../../vmbindings/dummyvm/src/tests/doc_mutator_storage.rs:mutator_storage_embed_mutator_struct}} +{{#include ../../../../../src/vm/tests/mock_tests/mock_test_doc_mutator_storage.rs:mutator_storage_embed_mutator_struct}} ``` ### Option 3: Embed the fast-path struct @@ -78,7 +78,7 @@ which includes (but not limited to) `NoGC`, `SemiSpace`, `Immix`, generational p If a plan does not do bump-pointer allocation, we may still implement fast-paths, but we need to embed different data structures instead of `BumpPointer`. ```rust -{{#include ../../../../../vmbindings/dummyvm/src/tests/doc_mutator_storage.rs:mutator_storage_embed_fast-path_struct}} +{{#include ../../../../../src/vm/tests/mock_tests/mock_test_doc_mutator_storage.rs:mutator_storage_embed_fastpath_struct}} ``` And pseudo-code for how you would reset the `BumpPointer`s for all mutators in `resume_mutators`. Note that these mutators are the runtime's actual mutator threads (i.e. where the cached bump pointers are stored) and are different from MMTk's `Mutator` struct. @@ -120,7 +120,7 @@ Once MMTk is initialized, a binding can get the memory offset for the default al with the default allocation semantics, we can use the offset to get a reference to the actual allocator (with unsafe code), and allocate with the allocator. ```rust -{{#include ../../../../../vmbindings/dummyvm/src/tests/doc_avoid_resolving_allocator.rs:avoid_resolving_allocator}} +{{#include ../../../../../src/vm/tests/mock_tests/mock_test_doc_avoid_resolving_allocator.rs:avoid_resolving_allocator}} ``` ## Emitting Allocation Sequence in a JIT Compiler diff --git a/examples/allocation_benchmark.c b/examples/allocation_benchmark.c deleted file mode 100644 index e113aa8763..0000000000 --- a/examples/allocation_benchmark.c +++ /dev/null @@ -1,17 +0,0 @@ -#include -#include -#include -#include "mmtk.h" - -int main() { - volatile uint64_t * tmp; - mmtk_init(1024*1024*1024); - MMTk_Mutator handle = mmtk_bind_mutator(0); - - for (int i=0; i<1024*1024*100; i++) { - tmp = mmtk_alloc(handle, 8, 1, 0, 0); - #ifdef STORE - *tmp = 42; - #endif - } -} diff --git a/examples/bench.sh b/examples/bench.sh deleted file mode 100755 index afe73d9a5e..0000000000 --- a/examples/bench.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/bash - -echo "Using Rust bump pointer allocator" -clang -O3 -lmmtk -L../target/release -o bench-exe -I../api ./allocation_benchmark.c -export LD_LIBRARY_PATH=../target/release -time ./bench-exe - -echo "Using C bump pointer allocator" -clang -O3 -shared -fPIC -o ./libmmtk.so ./reference_bump_allocator.c -clang -O3 -lmmtk -L. -o bench-exe -I../api ./allocation_benchmark.c -export LD_LIBRARY_PATH=. -time ./bench-exe - -echo "Using Rust bump pointer allocator with storing" -clang -O3 -lmmtk -L../target/release -o bench-exe -D STORE -I../api ./allocation_benchmark.c -export LD_LIBRARY_PATH=../target/release -time ./bench-exe - -echo "Using C bump pointer allocator with storing" -clang -O3 -shared -fPIC -o ./libmmtk.so ./reference_bump_allocator.c -clang -O3 -lmmtk -L. -o bench-exe -D STORE -I../api ./allocation_benchmark.c -export LD_LIBRARY_PATH=. -time ./bench-exe diff --git a/examples/build.py b/examples/build.py deleted file mode 100755 index 480023c6a4..0000000000 --- a/examples/build.py +++ /dev/null @@ -1,102 +0,0 @@ -#!/usr/bin/env python3 - -import platform -import subprocess -import shutil -import os -import sys - -MMTk_ROOT = os.path.join(__file__, "..", "..") - -PLANS = [] - -# Find all plans from options.rs -options = "" -with open(os.path.abspath(os.path.join(MMTk_ROOT, "src", "util", "options.rs")), 'r') as file: - options = file.read().replace('\n', '') -import re -search = re.search("enum PlanSelector \{([^\}]*)\}", options) -if search: - raw_plans = search.group(1) - # Python split() results in an empty string as the last element. Use filter() to remove it. - PLANS = list(filter(None, [x.strip() for x in raw_plans.split(",")])) -else: - print("cannot find PlanSelector in options.rs") - sys.exit(1) - -os.chdir(os.path.abspath(MMTk_ROOT)) - -extra_features = "" -if len(sys.argv) > 1: - extra_features = sys.argv[1] - - -def exec_and_redirect(args, env=None): - print("[exec_and_redirect] {} (env = {})".format(args, env)) - p = subprocess.Popen(args, - env=env) - p.wait() - if p.returncode != 0: - exit(p.returncode) - -# Get the active toolchain, something like this: stable-x86_64-unknown-linux-gnu -active_toolchain = str(subprocess.check_output(["rustup", "show", "active-toolchain"]).decode('utf-8')).split(' ')[0] -print("Active rust toolchain: " + active_toolchain) -if "x86_64" in active_toolchain: - m32 = False -elif "i686" in active_toolchain: - m32 = True -else: - print("Unknown toolchain: " + active_toolchain) - sys.exit(1) - -system = platform.system() -assert system == "Darwin" or system == "Linux" - -SUFFIX = ".so" -if system == "Darwin": - SUFFIX = ".dylib" -elif system == "Linux": - SUFFIX = ".so" - -LIBRARY_PATH = "LD_LIBRARY_PATH" -if system == "Darwin": - LIBRARY_PATH = "DYLD_LIBRARY_PATH" -elif system == "Linux": - LIBRARY_PATH = "LD_LIBRARY_PATH" - -vmbinding = "vmbindings/dummyvm" - -cmd = [] -cmd.append("cargo") -cmd.extend([ - "build", - "--manifest-path", - "vmbindings/dummyvm/Cargo.toml", - "--features", " ".join(extra_features) -]) - -exec_and_redirect(cmd) -exec_and_redirect(cmd + ["--release"]) -shutil.copyfile("{}/target/release/libmmtk_dummyvm{}".format(vmbinding, SUFFIX), - "./libmmtk{}".format(SUFFIX)) - -cmd = [ - "gcc", - "./examples/main.c", - "-lmmtk", - "-L.", - "-I{}/api".format(vmbinding), - "-O3", - "-o", - "test_mmtk", -] -if m32: - cmd.append("-m32") - -exec_and_redirect(cmd) - -for plan in PLANS: - exec_and_redirect(["./test_mmtk"], env={LIBRARY_PATH: ".", "MMTK_PLAN": plan}) - -os.remove("./test_mmtk") diff --git a/examples/main.c b/examples/main.c deleted file mode 100644 index be9820d2c9..0000000000 --- a/examples/main.c +++ /dev/null @@ -1,27 +0,0 @@ -#include -#include "mmtk.h" - -int main(int argc, char* argv[]){ - mmtk_init(1024*1024); - - MMTk_Mutator handle = mmtk_bind_mutator(0); - - for (int i=0;i<4;i++){ - int arr_size = 10000; - int* my_arr = mmtk_alloc(handle, sizeof(int)*arr_size, 8, 0, 0); - if (!my_arr){ - printf("OOM\n"); - break; - } - for (int j=0;j -#include -#include -#include "../api/mmtk.h" - -typedef struct { - void* heap_start; - void* heap_end; - void* heap_cursor; -} Space; - -Space IMMORTAL_SPACE; - -size_t align_up (size_t addr, size_t align) { - return (addr + align - 1) & ~(align - 1); -} - -extern void gc_init(size_t heap_size) { - size_t SPACE_ALIGN = 1 << 19; - void* alloced = mmap(NULL, heap_size + SPACE_ALIGN, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); - if (!alloced) { - printf("Unable to allocate memory\n"); - exit(1); - } - IMMORTAL_SPACE.heap_start = (void*) align_up((size_t) alloced, SPACE_ALIGN); - IMMORTAL_SPACE.heap_end = (void*) ((size_t) IMMORTAL_SPACE.heap_start + heap_size); - IMMORTAL_SPACE.heap_cursor = IMMORTAL_SPACE.heap_start; -} - -extern MMTk_Mutator bind_mutator(void *tls) { - return NULL; -} - -extern void* align_allocation(void* region, size_t align, size_t offset) { - ssize_t region_signed = (ssize_t) region; - - ssize_t mask = (ssize_t) (align - 1); - ssize_t neg_off = -offset; - ssize_t delta = (neg_off - region_signed) & mask; - - return (void*) ((ssize_t)region + delta); -} - -extern void* alloc(MMTk_Mutator mutator, size_t size, - size_t align, size_t offset, int allocator) { - - void* result = align_allocation(IMMORTAL_SPACE.heap_cursor, align, offset); - void* new_cursor = (void*)((size_t) result + size); - if (new_cursor > IMMORTAL_SPACE.heap_end) { - return NULL; - } - IMMORTAL_SPACE.heap_cursor = new_cursor; - return (void*) result; -} - -extern void* alloc_slow(MMTk_Mutator mutator, size_t size, - size_t align, size_t offset, int allocator) { - - perror("Not implemented\n"); - exit(1); - return NULL; -} - -void* mmtk_malloc(size_t size) { - return alloc(NULL, size, 1, 0, 0); -} - -void mmtk_free(void* ptr) { - return; -} \ No newline at end of file diff --git a/src/lib.rs b/src/lib.rs index 1b6f8b422f..c13550076e 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -58,9 +58,7 @@ pub mod build_info; pub mod memory_manager; pub mod plan; pub mod scheduler; -#[deny(missing_docs)] pub mod util; -#[deny(missing_docs)] pub mod vm; pub use crate::plan::{ diff --git a/src/scheduler/gc_work.rs b/src/scheduler/gc_work.rs index 7d0add88a4..1de11db2b3 100644 --- a/src/scheduler/gc_work.rs +++ b/src/scheduler/gc_work.rs @@ -265,7 +265,7 @@ impl GCWork for EndOfGC { /// This implements `ObjectTracer` by forwarding the `trace_object` calls to the wrapped /// `ProcessEdgesWork` instance. -struct ProcessEdgesWorkTracer { +pub(crate) struct ProcessEdgesWorkTracer { process_edges_work: E, stage: WorkBucketStage, } @@ -310,7 +310,7 @@ impl ProcessEdgesWorkTracer { /// the call to `with_tracer`, making use of its `trace_object` method. It then creates work /// packets using the methods of the `ProcessEdgesWork` and add the work packet into the given /// `stage`. -struct ProcessEdgesWorkTracerContext { +pub(crate) struct ProcessEdgesWorkTracerContext { stage: WorkBucketStage, phantom_data: PhantomData, } @@ -737,7 +737,7 @@ impl ProcessEdgesWork for SFTProcessEdges { ScanObjects::::new(nodes, false, roots, self.bucket) } } -struct ProcessEdgesWorkRootsWorkFactory< +pub(crate) struct ProcessEdgesWorkRootsWorkFactory< VM: VMBinding, E: ProcessEdgesWork, I: ProcessEdgesWork, diff --git a/src/util/malloc/malloc_ms_util.rs b/src/util/malloc/malloc_ms_util.rs index a744111557..68839d8d58 100644 --- a/src/util/malloc/malloc_ms_util.rs +++ b/src/util/malloc/malloc_ms_util.rs @@ -4,7 +4,7 @@ use crate::util::Address; use crate::vm::VMBinding; /// Allocate with alignment. This also guarantees the memory is zero initialized. -pub fn align_alloc(size: usize, align: usize) -> Address { +pub fn align_alloc(size: usize, align: usize) -> Address { let mut ptr = std::ptr::null_mut::(); let ptr_ptr = std::ptr::addr_of_mut!(ptr); let result = unsafe { posix_memalign(ptr_ptr, align, size) }; @@ -76,7 +76,7 @@ pub fn alloc(size: usize, align: usize, offset: usize) -> (Addres address = Address::from_mut_ptr(raw); debug_assert!(address.is_aligned_to(align)); } else if align > 16 && offset == 0 { - address = align_alloc::(size, align); + address = align_alloc(size, align); #[cfg(feature = "malloc_hoard")] { is_offset_malloc = true; diff --git a/src/util/malloc/mod.rs b/src/util/malloc/mod.rs index 247e765975..63d0495df5 100644 --- a/src/util/malloc/mod.rs +++ b/src/util/malloc/mod.rs @@ -11,8 +11,7 @@ /// Malloc provided by libraries pub(crate) mod library; /// Using malloc as mark sweep free-list allocator. -// This module is made public so we can test it from dummyvm. It should be pub(crate). -pub mod malloc_ms_util; +pub(crate) mod malloc_ms_util; use crate::util::Address; #[cfg(feature = "malloc_counted_size")] diff --git a/src/util/metadata/header_metadata.rs b/src/util/metadata/header_metadata.rs index b1aa406d60..176a508fbb 100644 --- a/src/util/metadata/header_metadata.rs +++ b/src/util/metadata/header_metadata.rs @@ -454,7 +454,7 @@ impl fmt::Debug for HeaderMetadataSpec { } } -#[cfg(test)] +#[cfg(all(test, debug_assertions))] mod tests { use super::*; use crate::util::address::Address; diff --git a/src/util/metadata/side_metadata/side_metadata_tests.rs b/src/util/metadata/side_metadata/side_metadata_tests.rs index 69eeaabd5a..ab1abe01d3 100644 --- a/src/util/metadata/side_metadata/side_metadata_tests.rs +++ b/src/util/metadata/side_metadata/side_metadata_tests.rs @@ -1,4 +1,4 @@ -#[cfg(test)] +#[cfg(all(test, debug_assertions))] mod tests { use atomic::Ordering; diff --git a/src/util/mod.rs b/src/util/mod.rs index 9a5ae0bb23..79922a8b06 100644 --- a/src/util/mod.rs +++ b/src/util/mod.rs @@ -25,6 +25,9 @@ pub mod memory; pub mod opaque_pointer; /// MMTk command line options. pub mod options; +/// Test utilities. We need this module for `MockVM` in criterion benches, which does not include code with `cfg(test)`. +#[cfg(any(test, feature = "mock_test"))] +pub mod test_util; // The following modules are only public in the mmtk crate. They should only be used in MMTk core. /// An analysis framework for collecting data and profiling in GC. @@ -59,9 +62,6 @@ pub(crate) mod rust_util; pub(crate) mod sanity; /// Utils for collecting statistics. pub(crate) mod statistics; -/// Test utilities. -#[cfg(test)] -pub(crate) mod test_util; /// A treadmill implementation. pub(crate) mod treadmill; diff --git a/src/util/test_util/fixtures.rs b/src/util/test_util/fixtures.rs new file mode 100644 index 0000000000..1867330fad --- /dev/null +++ b/src/util/test_util/fixtures.rs @@ -0,0 +1,276 @@ +// Some tests are conditionally compiled. So not all the code in this module will be used. We simply allow dead code in this module. +#![allow(dead_code)] + +use atomic_refcell::AtomicRefCell; +use std::sync::Mutex; +use std::sync::Once; + +use crate::memory_manager; +use crate::util::test_util::mock_vm::MockVM; +use crate::util::{ObjectReference, VMMutatorThread, VMThread}; +use crate::AllocationSemantics; +use crate::MMTKBuilder; +use crate::MMTK; + +pub trait FixtureContent { + fn create() -> Self; +} + +pub struct Fixture { + content: AtomicRefCell>>, + once: Once, +} + +unsafe impl Sync for Fixture {} + +impl Fixture { + pub fn new() -> Self { + Self { + content: AtomicRefCell::new(None), + once: Once::new(), + } + } + + pub fn with_fixture(&self, func: F) { + self.once.call_once(|| { + let content = Box::new(T::create()); + let mut borrow = self.content.borrow_mut(); + *borrow = Some(content); + }); + let borrow = self.content.borrow(); + func(borrow.as_ref().unwrap()) + } + + pub fn with_fixture_mut(&self, func: F) { + self.once.call_once(|| { + let content = Box::new(T::create()); + let mut borrow = self.content.borrow_mut(); + *borrow = Some(content); + }); + let mut borrow = self.content.borrow_mut(); + func(borrow.as_mut().unwrap()) + } +} + +impl Default for Fixture { + fn default() -> Self { + Self::new() + } +} + +/// SerialFixture ensures all `with_fixture()` calls will be executed serially. +pub struct SerialFixture { + content: Mutex>>, +} + +impl SerialFixture { + pub fn new() -> Self { + Self { + content: Mutex::new(None), + } + } + + pub fn with_fixture(&self, func: F) { + let mut c = self.content.lock().unwrap(); + if c.is_none() { + *c = Some(Box::new(T::create())); + } + func(c.as_ref().unwrap()) + } + + pub fn with_fixture_mut(&self, func: F) { + let mut c = self.content.lock().unwrap(); + if c.is_none() { + *c = Some(Box::new(T::create())); + } + func(c.as_mut().unwrap()) + } + + pub fn with_fixture_expect_benign_panic< + F: Fn(&T) + std::panic::UnwindSafe + std::panic::RefUnwindSafe, + >( + &self, + func: F, + ) { + let res = { + // The lock will be dropped at the end of the block. So panic won't poison the lock. + let mut c = self.content.lock().unwrap(); + if c.is_none() { + *c = Some(Box::new(T::create())); + } + + std::panic::catch_unwind(|| func(c.as_ref().unwrap())) + }; + // We do not hold the lock now. It is safe to resume now. + if let Err(e) = res { + std::panic::resume_unwind(e); + } + } +} + +impl Default for SerialFixture { + fn default() -> Self { + Self::new() + } +} + +pub struct MMTKFixture { + pub mmtk: &'static MMTK, +} + +impl FixtureContent for MMTKFixture { + fn create() -> Self { + Self::create_with_builder( + |builder| { + const MB: usize = 1024 * 1024; + builder + .options + .gc_trigger + .set(crate::util::options::GCTriggerSelector::FixedHeapSize(MB)); + }, + true, + ) + } +} + +impl MMTKFixture { + pub fn create_with_builder(with_builder: F, initialize_collection: bool) -> Self + where + F: FnOnce(&mut MMTKBuilder), + { + let mut builder = MMTKBuilder::new(); + with_builder(&mut builder); + + let mmtk = memory_manager::mmtk_init(&builder); + let mmtk_ptr = Box::into_raw(mmtk); + let mmtk_static: &'static MMTK = unsafe { &*mmtk_ptr }; + + if initialize_collection { + memory_manager::initialize_collection(mmtk_static, VMThread::UNINITIALIZED); + } + + MMTKFixture { mmtk: mmtk_static } + } +} + +impl Drop for MMTKFixture { + fn drop(&mut self) { + let mmtk_ptr: *const MMTK = self.mmtk as _; + let _ = unsafe { Box::from_raw(mmtk_ptr as *mut MMTK) }; + } +} + +use crate::plan::Mutator; + +pub struct MutatorFixture { + mmtk: MMTKFixture, + pub mutator: Box>, +} + +impl FixtureContent for MutatorFixture { + fn create() -> Self { + const MB: usize = 1024 * 1024; + Self::create_with_heapsize(MB) + } +} + +impl MutatorFixture { + pub fn create_with_heapsize(size: usize) -> Self { + let mmtk = MMTKFixture::create_with_builder( + |builder| { + builder + .options + .gc_trigger + .set(crate::util::options::GCTriggerSelector::FixedHeapSize(size)); + }, + true, + ); + let mutator = + memory_manager::bind_mutator(mmtk.mmtk, VMMutatorThread(VMThread::UNINITIALIZED)); + Self { mmtk, mutator } + } + + pub fn create_with_builder(with_builder: F) -> Self + where + F: FnOnce(&mut MMTKBuilder), + { + let mmtk = MMTKFixture::create_with_builder(with_builder, true); + let mutator = + memory_manager::bind_mutator(mmtk.mmtk, VMMutatorThread(VMThread::UNINITIALIZED)); + Self { mmtk, mutator } + } + + pub fn mmtk(&self) -> &'static MMTK { + self.mmtk.mmtk + } +} + +unsafe impl Send for MutatorFixture {} + +pub struct SingleObject { + pub objref: ObjectReference, + mutator: MutatorFixture, +} + +impl FixtureContent for SingleObject { + fn create() -> Self { + use crate::vm::object_model::ObjectModel; + let mut mutator = MutatorFixture::create(); + + // A relatively small object, typical for Ruby. + let size = 40; + let semantics = AllocationSemantics::Default; + + let addr = memory_manager::alloc(&mut mutator.mutator, size, 8, 0, semantics); + assert!(!addr.is_zero()); + + let objref = MockVM::address_to_ref(addr); + memory_manager::post_alloc(&mut mutator.mutator, objref, size, semantics); + + SingleObject { objref, mutator } + } +} + +impl SingleObject { + pub fn mutator(&self) -> &Mutator { + &self.mutator.mutator + } + + pub fn mutator_mut(&mut self) -> &mut Mutator { + &mut self.mutator.mutator + } +} + +pub struct TwoObjects { + pub objref1: ObjectReference, + pub objref2: ObjectReference, + mutator: MutatorFixture, +} + +impl FixtureContent for TwoObjects { + fn create() -> Self { + use crate::vm::object_model::ObjectModel; + let mut mutator = MutatorFixture::create(); + + let size = 128; + let semantics = AllocationSemantics::Default; + + let addr1 = memory_manager::alloc(&mut mutator.mutator, size, 8, 0, semantics); + assert!(!addr1.is_zero()); + + let objref1 = MockVM::address_to_ref(addr1); + memory_manager::post_alloc(&mut mutator.mutator, objref1, size, semantics); + + let addr2 = memory_manager::alloc(&mut mutator.mutator, size, 8, 0, semantics); + assert!(!addr2.is_zero()); + + let objref2 = MockVM::address_to_ref(addr2); + memory_manager::post_alloc(&mut mutator.mutator, objref2, size, semantics); + + TwoObjects { + objref1, + objref2, + mutator, + } + } +} diff --git a/src/util/test_util/mock_method.rs b/src/util/test_util/mock_method.rs new file mode 100644 index 0000000000..e1ffa02739 --- /dev/null +++ b/src/util/test_util/mock_method.rs @@ -0,0 +1,173 @@ +use std::any::Any; + +/// `MockAny` hides any type information. It is useful when we want to create +/// a mock method for methods with generic type parameters. +/// When `MockAny` is used for a method, the user needs to make sure that the types in the +/// actual [`MockMethod`] behind the MockAny match the arguments that will be passed to the method, +/// otherwise, the downcast from `Any` will fail. We use `MockAny` for some methods in [`super::mockvm::MockVM`]. +/// The user should check if their intended arguments match the default `MockMethod` type, and if not, +/// they should create their own `MockMethod`s for those methods. +pub trait MockAny { + fn call_any(&mut self, args: Box) -> Box; +} + +impl MockAny for MockMethod { + fn call_any(&mut self, args: Box) -> Box { + let typed_args: Box = args.downcast().unwrap(); + let typed_args_inner: I = *typed_args; + let typed_ret = self.call(typed_args_inner); + Box::new(typed_ret) + } +} + +/// Mocking a method. The type parameters are the types of arguments +/// and the return values of the method as tuples. +pub struct MockMethod { + imp: MockImpl, +} + +/// The actual implementation of the mock method. +pub enum MockImpl { + /// Invocation to the method will call the closures one by one, and wrap around when we call the last one. + Sequence(Vec>), + /// Every invocation to the method will call the closure. + Fixed(MockClosure), +} + +/// The function pointer for the mock closure. +pub type MockClosureSignature = Box R + Send + Sync>; + +/// The function pointer for the closure, and some metadata. +pub struct MockClosure { + closure: MockClosureSignature, + call_count: usize, +} + +impl MockClosure { + fn new(closure: MockClosureSignature) -> Self { + Self { + closure, + call_count: 0, + } + } + fn call(&mut self, args: I) -> R { + self.call_count += 1; + (self.closure)(args) + } +} + +impl std::default::Default for MockMethod { + fn default() -> Self { + Self::new_unimplemented() + } +} + +impl MockMethod { + /// The method will panic with `unimplemented!()` when called. + pub fn new_unimplemented() -> Self { + Self { + imp: MockImpl::Fixed(MockClosure::new(Box::new(|_| unimplemented!()))), + } + } + + /// The method will return the default value for the return type. + pub fn new_default() -> Self + where + R: Default, + { + Self { + imp: MockImpl::Fixed(MockClosure::new(Box::new(|_| R::default()))), + } + } + + /// The method will execute the given closure when called. + pub fn new_fixed(closure: MockClosureSignature) -> Self { + Self { + imp: MockImpl::Fixed(MockClosure::new(closure)), + } + } + + /// The method will execute the next closure in the sequence when called. + pub fn new_sequence(closures: Vec>) -> Self { + Self { + imp: MockImpl::Sequence(closures.into_iter().map(|c| MockClosure::new(c)).collect()), + } + } + + /// Call the mock method. + pub fn call(&mut self, args: I) -> R { + let cur_call = self.call_count(); + + match &mut self.imp { + MockImpl::Sequence(closures) => { + let len = closures.len(); + closures[cur_call % len].call(args) + } + MockImpl::Fixed(closure) => closure.call(args), + } + } + + /// Is the method called? + pub fn is_called(&self) -> bool { + self.call_count() > 0 + } + + /// How many times has the method been called? + pub fn call_count(&self) -> usize { + match &self.imp { + MockImpl::Fixed(c) => c.call_count, + MockImpl::Sequence(vec) => vec.iter().map(|c| c.call_count).sum(), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn mock_fixed_single_arg() { + let mut mock = MockMethod::new_fixed(Box::new(|a: usize| -> usize { a + 1 })); + assert_eq!(mock.call_count(), 0); + let ret = mock.call(0); + assert_eq!(ret, 1); + assert_eq!(mock.call_count(), 1); + } + + #[test] + fn mock_fixed_multi_args() { + let mut mock = MockMethod::new_fixed(Box::new(|(a, b): (usize, usize)| -> usize { a + b })); + assert_eq!(mock.call_count(), 0); + let ret = mock.call((1, 1)); + assert_eq!(ret, 2); + assert_eq!(mock.call_count(), 1); + } + + #[test] + fn mock_fixed_no_arg() { + let mut mock = MockMethod::new_fixed(Box::new(|()| -> usize { 42 })); + assert_eq!(mock.call_count(), 0); + let ret = mock.call(()); + assert_eq!(ret, 42); + assert_eq!(mock.call_count(), 1); + } + + #[test] + fn mock_sequence() { + let mut mock = MockMethod::new_sequence(vec![ + Box::new(|()| -> usize { 0 }), + Box::new(|()| -> usize { 1 }), + ]); + assert_eq!(mock.call_count(), 0); + + assert_eq!(mock.call(()), 0); + assert_eq!(mock.call_count(), 1); + assert_eq!(mock.call(()), 1); + assert_eq!(mock.call_count(), 2); + + assert_eq!(mock.call(()), 0); + assert_eq!(mock.call_count(), 3); + assert_eq!(mock.call(()), 1); + assert_eq!(mock.call_count(), 4); + } +} diff --git a/src/util/test_util/mock_vm.rs b/src/util/test_util/mock_vm.rs new file mode 100644 index 0000000000..8dc3c5b783 --- /dev/null +++ b/src/util/test_util/mock_vm.rs @@ -0,0 +1,615 @@ +// Some mock methods may get really complex +#![allow(clippy::type_complexity)] + +use crate::plan::ObjectQueue; +use crate::scheduler::gc_work::ProcessEdgesWorkRootsWorkFactory; +use crate::scheduler::gc_work::ProcessEdgesWorkTracerContext; +use crate::scheduler::gc_work::SFTProcessEdges; +use crate::scheduler::*; +use crate::util::alloc::AllocationError; +use crate::util::copy::*; +use crate::util::opaque_pointer::*; +use crate::util::{Address, ObjectReference}; +use crate::vm::object_model::specs::*; +use crate::vm::EdgeVisitor; +use crate::vm::GCThreadContext; +use crate::vm::ObjectTracer; +use crate::vm::ObjectTracerContext; +use crate::vm::RootsWorkFactory; +use crate::vm::VMBinding; +use crate::Mutator; + +use super::mock_method::*; + +use std::default::Default; +use std::ops::Range; +use std::sync::Mutex; + +/// The offset between object reference and the allocation address if we use +/// the default mock VM. +pub const DEFAULT_OBJECT_REF_OFFSET: usize = 4; + +// To mock static methods, we have to create a static instance of `MockVM`. +lazy_static! { + // The mutex may get poisoned any time. Accessing this mutex needs to deal with the poisoned case. + // One can use read/write_mockvm to access mock vm. + static ref MOCK_VM_INSTANCE: Mutex = Mutex::new(MockVM::default()); +} + +// MockVM only allows mock methods with references of no lifetime or static lifetime. +// If `VMBinding` methods has references of a specific lifetime, +// the references need to be turned into static lifetime before we can call mock methods. +// This is correct as long as we only use the references within the mock methods, and +// we do not store them for access after the mock method returns. +macro_rules! lifetime { + ($e: expr) => { + unsafe { std::mem::transmute($e) } + }; +} + +/// Call `MockMethod`. +macro_rules! mock { + ($fn: ident($($arg:expr),*)) => { + write_mockvm(|mock| mock.$fn.call(($($arg),*))) + }; +} +/// Call `MockAny`. +macro_rules! mock_any { + ($fn: ident($($arg:expr),*)) => { + *write_mockvm(|mock| mock.$fn.call_any(Box::new(($($arg),*)))).downcast().unwrap() + }; +} + +/// Read from the static MockVM instance. It deals with the case of a poisoned lock. +pub fn read_mockvm(func: F) -> R +where + F: FnOnce(&MockVM) -> R, +{ + let lock = MOCK_VM_INSTANCE + .lock() + .unwrap_or_else(|poisoned| poisoned.into_inner()); + func(&lock) +} +/// Write to the static MockVM instance. It deals with the case of a poisoned lock. +pub fn write_mockvm(func: F) -> R +where + F: FnOnce(&mut MockVM) -> R, +{ + let mut lock = MOCK_VM_INSTANCE + .lock() + .unwrap_or_else(|poisoned| poisoned.into_inner()); + func(&mut lock) +} + +/// A test that uses `MockVM`` should use this method to wrap the entire test +/// that may use `MockVM`. +/// +/// # Arguents +/// * `setup`: Create a `MockVM`. Most tests can just use the default `MockVM::default()`. +/// A test may also overwrite some methods for its own testing purpose. +/// * `test`: The actual test. All the code that may access `MockVM`/`VMBinding` should be +/// wrapped in the test closure. +/// * `cleanup`: Any clean up or post check when the test finishes or aborts. +pub fn with_mockvm(setup: S, test: T, cleanup: C) +where + S: FnOnce() -> MockVM, + T: FnOnce() + std::panic::UnwindSafe, + C: FnOnce(), +{ + super::serial_test(|| { + // Setup + { + write_mockvm(|mock| *mock = setup()); + } + super::with_cleanup(test, cleanup); + }) +} + +/// Set up a default `MockVM` +pub fn default_setup() -> MockVM { + MockVM::default() +} + +/// No extra clean up after the test. +pub fn no_cleanup() {} + +/// A struct that allows us to mock the behavior of a `VMBinding` and the VM traits for testing. +/// For simplicity, we implement `VMBinding` as well as `ActivePlan`, `Collection`, +/// `ObjectModel`, `ReferenceGlue`, `Scanning` on the `MockVM` type, and forward each +/// method to the mock methods in the static `MockVM` instance. +/// By changing the mock closures in the struct, we can control the behavior of the `VMBinding`. +/// Use [`with_mockvm`] in the tests that need `MockVM`. +/// +/// # Mocking methods +/// +/// The struct includes one mock method for each methods in the VM traits. +/// +/// ## Methods with only value types +/// +/// It is straighforward to mock methods with only value types. Just group the argument types, +/// and the return types into two tuples (e.g. `I` and `R`), and create a `MockMethod`. +/// For example, [`crate::vm::ActivePlan::is_mutator`] has a signature of `fn(VMThread) -> bool`, +/// we can just create `MockMethod` for it. +/// +/// ## Methods with reference types +/// +/// As we cannot have extra type parameters (including generic lifetime paraeters) on `MockVM`, `MockVM` can only +/// have `MockMethod` with types of `'static` lifetime. To create a mock method for methods with +/// reference types, just replace the lifetime specifier in the reference with `'static` lifetime. +/// For example, [`crate::vm::ActivePlan::mutators`] has a signature of `fn<'a>() -> Box> + 'a>`, +/// we just replace all the lifetime specifiers with `'static.`, and create +/// `MockMethod<(), Box> + 'static>>`. +/// When we invoke the `MockMethod`, we can use the `lifetime!` macro to hack the lifetime. +/// Though this is unsafe, it is correct as long as we only use the reference within the mock implementation. +/// +/// ## Methods with generic type parameters +/// +/// As we cannot have extra type parameters on `MockVM`, there are two ways +/// to mock methods with generic type parameters. +/// +/// ### Use trait objects +/// +/// We can use trait objects if the trait is object safe. For example, +/// [`crate::vm::ActivePlan::vm_trace_object`] has a signature of +/// `fn(&mut Q, ObjectReference, &mut GCWorker) -> ObjectReference`, +/// we can mock `&mut Q` as `&mut dyn ObjectQueue`, and use +/// `MockMethod<(&'static mut dyn ObjectQueue, ObjectReference, &'static mut GCWorker), ObjectReference>` +/// for the method. +/// +/// ### Use `MockAny` +/// +/// For cases where we cannot use trait objects, we can use `MockAny`. +/// We simply use `Box` and initiate it with a `MockMethod` of +/// concrete types. For example, [`crate::vm::Scanning::process_weak_refs`] +/// has a signature of `fn(&mut GCWorker, impl ObjectTracerContext`. +/// `ObjectTracerContext` is not object safe. So we just use `Box` +/// in `MockVM`, and initiate it with a concrete type of `ObjectTracerContext`, such as +/// `Box::new((MockMethod::<(&'static mut GCWorker,ProcessEdgesWorkTracerContext>,),bool>::new_unimplemented())`. +/// +/// Note that when `MockAny` is used, one needs to make sure that the types of the actual arguments match the argument types used for creating the `MockMethod`. +/// We provide a default implementation for those `MockAny` methods, and it is very possible that the types in the default implementation do not +/// match the arguments you would like to test with. You should overwrite the default `MockMethod` during the MockVM setup. +/// +/// # Mock constants and associated types +/// +/// These are not supported at the moment. As those will change the `MockVM` type, we will have +/// to use macros to generate a new `MockVM` type when we custimize constants or associated types. + +// The current implementation is not perfect, but at least it works, and it is easy enough to debug with. +// I have tried different third-party libraries for mocking, and each has its own limitation. And +// none of the libraries I tried can mock `VMBinding` and the associated traits out of box. Even after I attempted +// to remove all those VM traits and had all the methods in `VMBinding`, the libraries still did not +// work out. +pub struct MockVM { + // active plan + pub number_of_mutators: MockMethod<(), usize>, + pub is_mutator: MockMethod, + pub mutator: MockMethod>, + pub mutators: MockMethod<(), Box> + 'static>>, + pub vm_trace_object: MockMethod< + ( + &'static mut dyn ObjectQueue, + ObjectReference, + &'static mut GCWorker, + ), + ObjectReference, + >, + // collection + pub stop_all_mutators: + MockMethod<(VMWorkerThread, Box)>), ()>, + pub resume_mutators: MockMethod, + pub block_for_gc: MockMethod, + pub spawn_gc_thread: MockMethod<(VMThread, GCThreadContext), ()>, + pub out_of_memory: MockMethod<(VMThread, AllocationError), ()>, + pub schedule_finalization: MockMethod, + pub post_forwarding: MockMethod, + pub vm_live_bytes: MockMethod<(), usize>, + // object model + pub copy_object: MockMethod< + ( + ObjectReference, + CopySemantics, + &'static GCWorkerCopyContext, + ), + ObjectReference, + >, + pub copy_object_to: MockMethod<(ObjectReference, ObjectReference, Address), Address>, + pub get_object_size: MockMethod, + pub get_object_size_when_copied: MockMethod, + pub get_object_align_when_copied: MockMethod, + pub get_object_align_offset_when_copied: MockMethod, + pub get_type_descriptor: MockMethod<(), &'static [i8]>, + pub get_object_reference_when_copied_to: + MockMethod<(ObjectReference, Address), ObjectReference>, + pub ref_to_object_start: MockMethod, + pub ref_to_header: MockMethod, + pub ref_to_address: MockMethod, + pub address_to_ref: MockMethod, + pub dump_object: MockMethod, + // reference glue + pub weakref_clear_referent: MockMethod, + pub weakref_set_referent: MockMethod<(ObjectReference, ObjectReference), ()>, + pub weakref_get_referent: MockMethod, + pub weakref_is_referent_cleared: MockMethod, + pub weakref_enqueue_references: MockMethod<(&'static [ObjectReference], VMWorkerThread), ()>, + // scanning + pub support_edge_enqueuing: MockMethod<(VMWorkerThread, ObjectReference), bool>, + pub scan_object: MockMethod< + ( + VMWorkerThread, + ObjectReference, + &'static mut dyn EdgeVisitor<::VMEdge>, + ), + (), + >, + pub scan_object_and_trace_edges: MockMethod< + ( + VMWorkerThread, + ObjectReference, + &'static mut dyn ObjectTracer, + ), + (), + >, + pub scan_roots_in_mutator_thread: Box, + pub scan_vm_specific_roots: Box, + pub notify_initial_thread_scan_complete: MockMethod<(bool, VMWorkerThread), ()>, + pub supports_return_barrier: MockMethod<(), bool>, + pub prepare_for_roots_re_scanning: MockMethod<(), ()>, + pub process_weak_refs: Box, + pub forward_weak_refs: Box, +} + +impl Default for MockVM { + fn default() -> Self { + Self { + number_of_mutators: MockMethod::new_unimplemented(), + is_mutator: MockMethod::new_fixed(Box::new(|_| true)), + mutator: MockMethod::new_unimplemented(), + mutators: MockMethod::new_unimplemented(), + vm_trace_object: MockMethod::new_fixed(Box::new(|(_, object, _)| { + panic!("MMTk cannot trace object {:?} as it does not belong to any MMTk space. If the object is known to the VM, the binding can override this method and handle its tracing.", object) + })), + + stop_all_mutators: MockMethod::new_unimplemented(), + resume_mutators: MockMethod::new_unimplemented(), + block_for_gc: MockMethod::new_unimplemented(), + spawn_gc_thread: MockMethod::new_default(), + out_of_memory: MockMethod::new_fixed(Box::new(|(_, err)| { + panic!("Out of memory with {:?}!", err) + })), + schedule_finalization: MockMethod::new_default(), + post_forwarding: MockMethod::new_default(), + vm_live_bytes: MockMethod::new_default(), + + copy_object: MockMethod::new_unimplemented(), + copy_object_to: MockMethod::new_unimplemented(), + get_object_size: MockMethod::new_unimplemented(), + get_object_size_when_copied: MockMethod::new_unimplemented(), + get_object_align_when_copied: MockMethod::new_fixed(Box::new(|_| { + std::mem::size_of::() + })), + get_object_align_offset_when_copied: MockMethod::new_fixed(Box::new(|_| 0)), + get_type_descriptor: MockMethod::new_unimplemented(), + get_object_reference_when_copied_to: MockMethod::new_unimplemented(), + ref_to_object_start: MockMethod::new_fixed(Box::new(|object| { + object.to_raw_address().sub(DEFAULT_OBJECT_REF_OFFSET) + })), + ref_to_header: MockMethod::new_fixed(Box::new(|object| object.to_raw_address())), + ref_to_address: MockMethod::new_fixed(Box::new(|object| { + object.to_raw_address().sub(DEFAULT_OBJECT_REF_OFFSET) + })), + address_to_ref: MockMethod::new_fixed(Box::new(|addr| { + ObjectReference::from_raw_address(addr.add(DEFAULT_OBJECT_REF_OFFSET)) + })), + dump_object: MockMethod::new_unimplemented(), + + weakref_clear_referent: MockMethod::new_unimplemented(), + weakref_get_referent: MockMethod::new_unimplemented(), + weakref_set_referent: MockMethod::new_unimplemented(), + weakref_is_referent_cleared: MockMethod::new_fixed(Box::new(|r| r.is_null())), + weakref_enqueue_references: MockMethod::new_unimplemented(), + + support_edge_enqueuing: MockMethod::new_fixed(Box::new(|_| true)), + scan_object: MockMethod::new_unimplemented(), + scan_object_and_trace_edges: MockMethod::new_unimplemented(), + // We instantiate a `MockMethod` with the arguments as ProcessEdgesWorkRootsWorkFactory<..., SFTProcessEdges, ...>, + // thus the mock method expects the actual call arguments to match the type. + // In most cases, this won't work and this `MockMethod` is just a place holder. It is + // fine as long as the method is not actually called. + // If the user will need this method, and would like to mock the method in their particular test, + // they are expected to provide their own + // `MockMethod` that matches the argument types they will pass for the test case. + // See the documents on the section about `MockAny` on the `MockVM` type. + scan_roots_in_mutator_thread: Box::new(MockMethod::< + ( + VMWorkerThread, + &'static mut Mutator, + ProcessEdgesWorkRootsWorkFactory< + MockVM, + SFTProcessEdges, + SFTProcessEdges, + >, + ), + (), + >::new_unimplemented()), + // Same here: the `MockMethod` is just a place holder. See the above comments. + scan_vm_specific_roots: Box::new(MockMethod::< + ( + VMWorkerThread, + ProcessEdgesWorkRootsWorkFactory< + MockVM, + SFTProcessEdges, + SFTProcessEdges, + >, + ), + (), + >::new_unimplemented()), + notify_initial_thread_scan_complete: MockMethod::new_unimplemented(), + supports_return_barrier: MockMethod::new_unimplemented(), + prepare_for_roots_re_scanning: MockMethod::new_unimplemented(), + // Same here: the `MockMethod` is just a place holder. See the above comments. + process_weak_refs: Box::new(MockMethod::< + ( + &'static mut GCWorker, + ProcessEdgesWorkTracerContext>, + ), + bool, + >::new_unimplemented()), + // Same here: the `MockMethod` is just a place holder. See the above comments. + forward_weak_refs: Box::new(MockMethod::< + ( + &'static mut GCWorker, + ProcessEdgesWorkTracerContext>, + ), + (), + >::new_default()), + } + } +} + +unsafe impl Sync for MockVM {} +unsafe impl Send for MockVM {} + +impl VMBinding for MockVM { + type VMEdge = Address; + type VMMemorySlice = Range
; + + type VMActivePlan = MockVM; + type VMCollection = MockVM; + type VMObjectModel = MockVM; + type VMReferenceGlue = MockVM; + type VMScanning = MockVM; + + /// Allowed maximum alignment in bytes. + const MAX_ALIGNMENT: usize = 1 << 6; +} + +impl crate::vm::ActivePlan for MockVM { + fn number_of_mutators() -> usize { + mock!(number_of_mutators()) + } + + fn is_mutator(tls: VMThread) -> bool { + mock!(is_mutator(tls)) + } + + fn mutator(tls: VMMutatorThread) -> &'static mut Mutator { + mock!(mutator(tls)) + } + + fn mutators<'a>() -> Box> + 'a> { + let ret = mock!(mutators()); + lifetime!(ret) + } + + fn vm_trace_object( + queue: &mut Q, + object: ObjectReference, + worker: &mut GCWorker, + ) -> ObjectReference { + mock!(vm_trace_object( + lifetime!(queue as &mut dyn ObjectQueue), + object, + lifetime!(worker) + )) + } +} + +impl crate::vm::Collection for MockVM { + fn stop_all_mutators(tls: VMWorkerThread, mutator_visitor: F) + where + F: FnMut(&'static mut Mutator), + { + mock!(stop_all_mutators( + tls, + lifetime!(Box::new(mutator_visitor) as Box)>) + )) + } + + fn resume_mutators(tls: VMWorkerThread) { + mock!(resume_mutators(tls)) + } + + fn block_for_gc(tls: VMMutatorThread) { + mock!(block_for_gc(tls)) + } + + fn spawn_gc_thread(tls: VMThread, ctx: GCThreadContext) { + mock!(spawn_gc_thread(tls, ctx)) + } + + fn out_of_memory(tls: VMThread, err_kind: AllocationError) { + mock!(out_of_memory(tls, err_kind)) + } + + fn schedule_finalization(tls: VMWorkerThread) { + mock!(schedule_finalization(tls)) + } + + fn post_forwarding(tls: VMWorkerThread) { + mock!(post_forwarding(tls)) + } + + fn vm_live_bytes() -> usize { + mock!(vm_live_bytes()) + } +} + +impl crate::vm::ObjectModel for MockVM { + const GLOBAL_LOG_BIT_SPEC: VMGlobalLogBitSpec = VMGlobalLogBitSpec::in_header(0); + const LOCAL_FORWARDING_POINTER_SPEC: VMLocalForwardingPointerSpec = + VMLocalForwardingPointerSpec::in_header(0); + const LOCAL_FORWARDING_BITS_SPEC: VMLocalForwardingBitsSpec = + VMLocalForwardingBitsSpec::in_header(0); + const LOCAL_MARK_BIT_SPEC: VMLocalMarkBitSpec = VMLocalMarkBitSpec::in_header(0); + const LOCAL_LOS_MARK_NURSERY_SPEC: VMLocalLOSMarkNurserySpec = + VMLocalLOSMarkNurserySpec::in_header(0); + + const OBJECT_REF_OFFSET_LOWER_BOUND: isize = DEFAULT_OBJECT_REF_OFFSET as isize; + + fn copy( + from: ObjectReference, + semantics: CopySemantics, + copy_context: &mut GCWorkerCopyContext, + ) -> ObjectReference { + mock!(copy_object(from, semantics, lifetime!(copy_context))) + } + + fn copy_to(from: ObjectReference, to: ObjectReference, region: Address) -> Address { + mock!(copy_object_to(from, to, region)) + } + + fn get_current_size(object: ObjectReference) -> usize { + mock!(get_object_size(object)) + } + + fn get_size_when_copied(object: ObjectReference) -> usize { + mock!(get_object_size_when_copied(object)) + } + + fn get_align_when_copied(object: ObjectReference) -> usize { + mock!(get_object_align_when_copied(object)) + } + + fn get_align_offset_when_copied(object: ObjectReference) -> usize { + mock!(get_object_align_offset_when_copied(object)) + } + + fn get_type_descriptor(_reference: ObjectReference) -> &'static [i8] { + // We do not use this method, and it will be removed. + unreachable!() + } + + fn get_reference_when_copied_to(from: ObjectReference, to: Address) -> ObjectReference { + mock!(get_object_reference_when_copied_to(from, to)) + } + + fn ref_to_object_start(object: ObjectReference) -> Address { + mock!(ref_to_object_start(object)) + } + + fn ref_to_header(object: ObjectReference) -> Address { + mock!(ref_to_header(object)) + } + + fn ref_to_address(object: ObjectReference) -> Address { + mock!(ref_to_address(object)) + } + + fn address_to_ref(addr: Address) -> ObjectReference { + mock!(address_to_ref(addr)) + } + + fn dump_object(object: ObjectReference) { + mock!(dump_object(object)) + } +} + +impl crate::vm::ReferenceGlue for MockVM { + type FinalizableType = ObjectReference; + + fn clear_referent(new_reference: ObjectReference) { + mock!(weakref_clear_referent(new_reference)) + } + + fn set_referent(reference: ObjectReference, referent: ObjectReference) { + mock!(weakref_set_referent(reference, referent)) + } + fn get_referent(object: ObjectReference) -> ObjectReference { + mock!(weakref_get_referent(object)) + } + fn is_referent_cleared(referent: ObjectReference) -> bool { + mock!(weakref_is_referent_cleared(referent)) + } + fn enqueue_references(references: &[ObjectReference], tls: VMWorkerThread) { + mock!(weakref_enqueue_references(lifetime!(references), tls)) + } +} + +impl crate::vm::Scanning for MockVM { + fn support_edge_enqueuing(tls: VMWorkerThread, object: ObjectReference) -> bool { + mock!(support_edge_enqueuing(tls, object)) + } + fn scan_object::VMEdge>>( + tls: VMWorkerThread, + object: ObjectReference, + edge_visitor: &mut EV, + ) { + mock!(scan_object( + tls, + object, + lifetime!(edge_visitor as &mut dyn EdgeVisitor<::VMEdge>) + )) + } + fn scan_object_and_trace_edges( + tls: VMWorkerThread, + object: ObjectReference, + object_tracer: &mut OT, + ) { + mock!(scan_object_and_trace_edges( + tls, + object, + lifetime!(object_tracer as &mut dyn ObjectTracer) + )) + } + fn scan_roots_in_mutator_thread( + tls: VMWorkerThread, + mutator: &'static mut Mutator, + factory: impl RootsWorkFactory<::VMEdge>, + ) { + mock_any!(scan_roots_in_mutator_thread( + tls, + mutator, + Box::new(factory) + )) + } + fn scan_vm_specific_roots( + tls: VMWorkerThread, + factory: impl RootsWorkFactory<::VMEdge>, + ) { + mock_any!(scan_vm_specific_roots(tls, Box::new(factory))) + } + fn notify_initial_thread_scan_complete(partial_scan: bool, tls: VMWorkerThread) { + mock!(notify_initial_thread_scan_complete(partial_scan, tls)) + } + fn supports_return_barrier() -> bool { + mock!(supports_return_barrier()) + } + fn prepare_for_roots_re_scanning() { + mock!(prepare_for_roots_re_scanning()) + } + fn process_weak_refs( + worker: &mut GCWorker, + tracer_context: impl ObjectTracerContext, + ) -> bool { + let worker: &'static mut GCWorker = lifetime!(worker); + mock_any!(process_weak_refs(worker, tracer_context)) + } + fn forward_weak_refs( + worker: &mut GCWorker, + tracer_context: impl ObjectTracerContext, + ) { + let worker: &'static mut GCWorker = lifetime!(worker); + mock_any!(forward_weak_refs(worker, tracer_context)) + } +} diff --git a/src/util/test_util.rs b/src/util/test_util/mod.rs similarity index 96% rename from src/util/test_util.rs rename to src/util/test_util/mod.rs index 8401d79c4d..56348b276e 100644 --- a/src/util/test_util.rs +++ b/src/util/test_util/mod.rs @@ -6,6 +6,13 @@ use std::sync::Mutex; use std::thread; use std::time::Duration; +#[cfg(feature = "mock_test")] +pub mod fixtures; +#[cfg(feature = "mock_test")] +pub mod mock_method; +#[cfg(feature = "mock_test")] +pub mod mock_vm; + // Sometimes we need to mmap for tests. We want to ensure that the mmapped addresses do not overlap // for different tests, so we organize them here. diff --git a/src/vm/mod.rs b/src/vm/mod.rs index 223a02e5cc..e9b3003664 100644 --- a/src/vm/mod.rs +++ b/src/vm/mod.rs @@ -35,6 +35,9 @@ pub use self::scanning::ObjectTracerContext; pub use self::scanning::RootsWorkFactory; pub use self::scanning::Scanning; +#[cfg(test)] +mod tests; + /// Default min alignment 4 bytes const DEFAULT_LOG_MIN_ALIGNMENT: usize = 2; /// Default max alignment 8 bytes diff --git a/src/vm/tests/malloc_api.rs b/src/vm/tests/malloc_api.rs new file mode 100644 index 0000000000..b86fcb7c94 --- /dev/null +++ b/src/vm/tests/malloc_api.rs @@ -0,0 +1,24 @@ +use crate::memory_manager; + +#[test] +pub fn malloc_free() { + let res = memory_manager::malloc(8); + assert!(!res.is_zero()); + memory_manager::free(res); +} + +#[test] +pub fn calloc_free() { + let res = memory_manager::calloc(1, 8); + assert!(!res.is_zero()); + memory_manager::free(res); +} + +#[test] +pub fn realloc_free() { + let res1 = memory_manager::malloc(8); + assert!(!res1.is_zero()); + let res2 = memory_manager::realloc(res1, 16); + assert!(!res2.is_zero()); + memory_manager::free(res2); +} diff --git a/src/vm/tests/mock_tests/mock_test_allocate_align_offset.rs b/src/vm/tests/mock_tests/mock_test_allocate_align_offset.rs new file mode 100644 index 0000000000..bef39dcab0 --- /dev/null +++ b/src/vm/tests/mock_tests/mock_test_allocate_align_offset.rs @@ -0,0 +1,80 @@ +// GITHUB-CI: MMTK_PLAN=all + +use lazy_static::lazy_static; + +use super::mock_test_prelude::*; +use crate::plan::AllocationSemantics; + +lazy_static! { + static ref MUTATOR: Fixture = Fixture::new(); +} + +#[test] +pub fn allocate_alignment() { + with_mockvm( + default_setup, + || { + MUTATOR.with_fixture_mut(|fixture| { + let min = MockVM::MIN_ALIGNMENT; + let max = MockVM::MAX_ALIGNMENT; + info!("Allowed alignment between {} and {}", min, max); + let mut align = min; + while align <= max { + info!("Test allocation with alignment {}", align); + let addr = memory_manager::alloc( + &mut fixture.mutator, + 8, + align, + 0, + AllocationSemantics::Default, + ); + assert!( + addr.is_aligned_to(align), + "Expected allocation alignment {}, returned address is {:?}", + align, + addr + ); + align *= 2; + } + }) + }, + no_cleanup, + ) +} + +#[test] +pub fn allocate_offset() { + with_mockvm( + default_setup, + || { + MUTATOR.with_fixture_mut(|fixture| { + const OFFSET: usize = 4; + let min = MockVM::MIN_ALIGNMENT; + let max = MockVM::MAX_ALIGNMENT; + info!("Allowed alignment between {} and {}", min, max); + let mut align = min; + while align <= max { + info!( + "Test allocation with alignment {} and offset {}", + align, OFFSET + ); + let addr = memory_manager::alloc( + &mut fixture.mutator, + 8, + align, + OFFSET, + AllocationSemantics::Default, + ); + assert!( + (addr + OFFSET).is_aligned_to(align), + "Expected allocation alignment {}, returned address is {:?}", + align, + addr + ); + align *= 2; + } + }); + }, + no_cleanup, + ) +} diff --git a/src/vm/tests/mock_tests/mock_test_allocate_with_disable_collection.rs b/src/vm/tests/mock_tests/mock_test_allocate_with_disable_collection.rs new file mode 100644 index 0000000000..e6f8170a3a --- /dev/null +++ b/src/vm/tests/mock_tests/mock_test_allocate_with_disable_collection.rs @@ -0,0 +1,36 @@ +use crate::memory_manager; +use crate::util::test_util::fixtures::*; +use crate::util::test_util::mock_vm::*; +use crate::AllocationSemantics; + +/// This test allocates after calling disable_collection(). When we exceed the heap limit, MMTk will NOT trigger a GC. +/// And the allocation will succeed. +#[test] +pub fn allocate_with_disable_collection() { + with_mockvm( + default_setup, + || { + // 1MB heap + const MB: usize = 1024 * 1024; + let mut fixture = MutatorFixture::create_with_heapsize(MB); + + // Allocate half MB. It should be fine. + let addr = memory_manager::alloc( + &mut fixture.mutator, + MB >> 1, + 8, + 0, + AllocationSemantics::Default, + ); + assert!(!addr.is_zero()); + + // Disable GC + memory_manager::disable_collection(fixture.mmtk()); + // Allocate another MB. This exceeds the heap size. But as we have disabled GC, MMTk will not trigger a GC, and allow this allocation. + let addr = + memory_manager::alloc(&mut fixture.mutator, MB, 8, 0, AllocationSemantics::Default); + assert!(!addr.is_zero()); + }, + no_cleanup, + ) +} diff --git a/src/vm/tests/mock_tests/mock_test_allocate_with_initialize_collection.rs b/src/vm/tests/mock_tests/mock_test_allocate_with_initialize_collection.rs new file mode 100644 index 0000000000..2ec8ab95e0 --- /dev/null +++ b/src/vm/tests/mock_tests/mock_test_allocate_with_initialize_collection.rs @@ -0,0 +1,53 @@ +use super::mock_test_prelude::*; +use crate::AllocationSemantics; + +// This test allocates after calling initialize_collection(). When we exceed the heap limit, MMTk will trigger a GC. And block_for_gc will be called. +// We havent implemented block_for_gc so it will panic. +#[test] +#[should_panic(expected = "block_for_gc is called")] +pub fn allocate_with_initialize_collection() { + // 1MB heap + with_mockvm( + || -> MockVM { + MockVM { + block_for_gc: MockMethod::new_fixed(Box::new(|_| panic!("block_for_gc is called"))), + ..MockVM::default() + } + }, + || { + const MB: usize = 1024 * 1024; + let mut fixture = MutatorFixture::create_with_heapsize(MB); + + // Allocate half MB. It should be fine. + let addr = memory_manager::alloc( + &mut fixture.mutator, + MB >> 1, + 8, + 0, + AllocationSemantics::Default, + ); + assert!(!addr.is_zero()); + + // Fill up the heap + let _ = memory_manager::alloc( + &mut fixture.mutator, + MB >> 1, + 8, + 0, + AllocationSemantics::Default, + ); + + // Attempt another allocation. This will trigger GC. + let addr = + memory_manager::alloc(&mut fixture.mutator, MB, 8, 0, AllocationSemantics::Default); + assert!(!addr.is_zero()); + }, + || { + // This is actually redundant, as we defined block_for_gc for this test. + // This just demostrates that we can check if the method is called. + read_mockvm(|mock| { + assert!(mock.block_for_gc.is_called()); + }); + }, + ) +} diff --git a/src/vm/tests/mock_tests/mock_test_allocate_with_re_enable_collection.rs b/src/vm/tests/mock_tests/mock_test_allocate_with_re_enable_collection.rs new file mode 100644 index 0000000000..0547cf35e3 --- /dev/null +++ b/src/vm/tests/mock_tests/mock_test_allocate_with_re_enable_collection.rs @@ -0,0 +1,60 @@ +use crate::memory_manager; +use crate::util::test_util::fixtures::*; +use crate::util::test_util::mock_method::*; +use crate::util::test_util::mock_vm::*; +use crate::AllocationSemantics; + +// This test allocates after calling initialize_collection(). When we exceed the heap limit, MMTk will trigger a GC. And block_for_gc will be called. +// We havent implemented block_for_gc so it will panic. This test is similar to allocate_with_initialize_collection, except that we once disabled GC in the test. +#[test] +#[should_panic(expected = "block_for_gc is called")] +pub fn allocate_with_re_enable_collection() { + // 1MB heap + with_mockvm( + || -> MockVM { + MockVM { + block_for_gc: MockMethod::new_fixed(Box::new(|_| panic!("block_for_gc is called"))), + ..MockVM::default() + } + }, + || { + const MB: usize = 1024 * 1024; + let mut fixture = MutatorFixture::create_with_heapsize(MB); + + // Allocate half MB. It should be fine. + let addr = memory_manager::alloc( + &mut fixture.mutator, + MB >> 1, + 8, + 0, + AllocationSemantics::Default, + ); + assert!(!addr.is_zero()); + + // Disable GC. So we can keep allocate without triggering a GC. + memory_manager::disable_collection(fixture.mmtk()); + // Fill up the heap + let _ = memory_manager::alloc( + &mut fixture.mutator, + MB >> 1, + 8, + 0, + AllocationSemantics::Default, + ); + + // Enable GC again. + memory_manager::enable_collection(fixture.mmtk()); + // Attempt another allocation. This will trigger GC. + let addr = + memory_manager::alloc(&mut fixture.mutator, MB, 8, 0, AllocationSemantics::Default); + assert!(!addr.is_zero()); + }, + || { + // This is actually redundant, as we defined block_for_gc for this test. + // This just demostrates that we can check if the method is called. + read_mockvm(|mock| { + assert!(mock.block_for_gc.is_called()); + }); + }, + ) +} diff --git a/src/vm/tests/mock_tests/mock_test_allocate_without_initialize_collection.rs b/src/vm/tests/mock_tests/mock_test_allocate_without_initialize_collection.rs new file mode 100644 index 0000000000..423c3a299c --- /dev/null +++ b/src/vm/tests/mock_tests/mock_test_allocate_without_initialize_collection.rs @@ -0,0 +1,52 @@ +use super::mock_test_prelude::*; + +use crate::util::opaque_pointer::*; +use crate::AllocationSemantics; + +/// This test allocates without calling initialize_collection(). When we exceed the heap limit, a GC should be triggered by MMTk. +/// But as we haven't enabled collection, GC is not initialized, so MMTk will panic. +#[test] +#[should_panic(expected = "GC is not allowed here")] +pub fn allocate_without_initialize_collection() { + // 1MB heap + with_mockvm( + default_setup, + || { + const MB: usize = 1024 * 1024; + let fixture = MMTKFixture::create_with_builder( + |builder| { + builder + .options + .gc_trigger + .set(crate::util::options::GCTriggerSelector::FixedHeapSize(MB)); + }, + false, + ); // Do not initialize collection + + // Build mutator + let mut mutator = memory_manager::bind_mutator( + fixture.mmtk, + VMMutatorThread(VMThread::UNINITIALIZED), + ); + + // Allocate half MB. It should be fine. + let addr = + memory_manager::alloc(&mut mutator, MB >> 1, 8, 0, AllocationSemantics::Default); + assert!(!addr.is_zero()); + + // Fill up the heap + let _ = + memory_manager::alloc(&mut mutator, MB >> 1, 8, 0, AllocationSemantics::Default); + + // Attempt another allocation. + let addr = memory_manager::alloc(&mut mutator, MB, 8, 0, AllocationSemantics::Default); + assert!(!addr.is_zero()); + }, + || { + // We panic before calling block_for_gc. + read_mockvm(|mock| { + assert!(!mock.block_for_gc.is_called()); + }); + }, + ) +} diff --git a/src/vm/tests/mock_tests/mock_test_allocator_info.rs b/src/vm/tests/mock_tests/mock_test_allocator_info.rs new file mode 100644 index 0000000000..9b03f7c9c1 --- /dev/null +++ b/src/vm/tests/mock_tests/mock_test_allocator_info.rs @@ -0,0 +1,58 @@ +// GITHUB-CI: MMTK_PLAN=all + +use crate::memory_manager; +use crate::util::alloc::AllocatorInfo; +use crate::util::options::PlanSelector; +use crate::util::test_util::fixtures::*; +use crate::util::test_util::mock_vm::*; +use crate::AllocationSemantics; + +#[test] +pub fn test_allocator_info() { + with_mockvm( + default_setup, + || { + let fixture = MMTKFixture::create(); + + let selector = + memory_manager::get_allocator_mapping(fixture.mmtk, AllocationSemantics::Default); + let base_offset = crate::plan::Mutator::::get_allocator_base_offset(selector); + let allocator_info = AllocatorInfo::new::(selector); + + match *fixture.mmtk.get_options().plan { + PlanSelector::NoGC + | PlanSelector::Immix + | PlanSelector::SemiSpace + | PlanSelector::GenCopy + | PlanSelector::GenImmix + | PlanSelector::MarkCompact + | PlanSelector::StickyImmix => { + // These plans all use bump pointer allocator. + let AllocatorInfo::BumpPointer { + bump_pointer_offset, + } = allocator_info + else { + panic!("Expected AllocatorInfo for a bump pointer allocator"); + }; + // In all of those plans, the first field at base offset is tls, and the second field is the BumpPointer struct. + assert_eq!( + base_offset + crate::util::constants::BYTES_IN_ADDRESS, + bump_pointer_offset + ); + } + PlanSelector::MarkSweep => { + if cfg!(feature = "malloc_mark_sweep") { + // We provide no info for a malloc allocator + assert!(matches!(allocator_info, AllocatorInfo::None)) + } else { + // We haven't implemented for a free list allocator + assert!(matches!(allocator_info, AllocatorInfo::Unimplemented)) + } + } + // We provide no info for a large object allocator + PlanSelector::PageProtect => assert!(matches!(allocator_info, AllocatorInfo::None)), + } + }, + no_cleanup, + ) +} diff --git a/src/vm/tests/mock_tests/mock_test_barrier_slow_path_assertion.rs b/src/vm/tests/mock_tests/mock_test_barrier_slow_path_assertion.rs new file mode 100644 index 0000000000..bdbabec5fb --- /dev/null +++ b/src/vm/tests/mock_tests/mock_test_barrier_slow_path_assertion.rs @@ -0,0 +1,62 @@ +// GITHUB-CI: MMTK_PLAN=GenImmix +// GITHUB-CI: FEATURES=vo_bit,extreme_assertions + +use super::mock_test_prelude::*; + +use crate::util::{Address, ObjectReference}; +use atomic::Atomic; + +lazy_static! { + static ref FIXTURE: Fixture = Fixture::new(); +} + +#[test] +#[should_panic(expected = "object bit is unset")] +fn test_assertion_barrier_invalid_ref() { + with_mockvm( + default_setup, + || { + FIXTURE.with_fixture_mut(|fixture| { + let objref = fixture.objref; + + // Create an edge + let slot = Atomic::new(objref); + let edge = Address::from_ref(&slot); + + // Create an invalid object reference (offset 8 bytes on the original object ref), and invoke barrier slowpath with it + // The invalid object ref has no VO bit, and the assertion should fail. + let invalid_objref = + ObjectReference::from_raw_address(objref.to_raw_address() + 8usize); + fixture.mutator_mut().barrier.object_reference_write_slow( + invalid_objref, + edge, + objref, + ); + }); + }, + no_cleanup, + ); +} + +#[test] +fn test_assertion_barrier_valid_ref() { + with_mockvm( + default_setup, + || { + FIXTURE.with_fixture_mut(|fixture| { + let objref = fixture.objref; + + // Create an edge + let slot = Atomic::new(objref); + let edge = Address::from_ref(&slot); + + // Invoke barrier slowpath with the valid object ref + fixture + .mutator_mut() + .barrier + .object_reference_write_slow(objref, edge, objref); + }); + }, + no_cleanup, + ) +} diff --git a/src/vm/tests/mock_tests/mock_test_conservatism.rs b/src/vm/tests/mock_tests/mock_test_conservatism.rs new file mode 100644 index 0000000000..9f3d47db04 --- /dev/null +++ b/src/vm/tests/mock_tests/mock_test_conservatism.rs @@ -0,0 +1,214 @@ +// GITHUB-CI: MMTK_PLAN=all +// GITHUB-CI: FEATURES=is_mmtk_object + +use super::mock_test_prelude::*; + +use crate::util::constants::LOG_BITS_IN_WORD; +use crate::util::is_mmtk_object::VO_BIT_REGION_SIZE; +use crate::util::*; + +lazy_static! { + static ref SINGLE_OBJECT: Fixture = Fixture::new(); +} + +fn basic_filter(addr: Address) -> bool { + !addr.is_zero() + && addr.as_usize() % VO_BIT_REGION_SIZE == (DEFAULT_OBJECT_REF_OFFSET % VO_BIT_REGION_SIZE) +} + +fn assert_filter_pass(addr: Address) { + assert!( + basic_filter(addr), + "{} should pass basic filter, but failed.", + addr, + ); +} + +fn assert_filter_fail(addr: Address) { + assert!( + !basic_filter(addr), + "{} should fail basic filter, but passed.", + addr, + ); +} + +fn assert_valid_objref(addr: Address) { + assert!( + memory_manager::is_mmtk_object(addr), + "mmtk_is_mmtk_object({}) should return true. Got false.", + addr, + ); +} + +fn assert_invalid_objref(addr: Address, real: Address) { + assert!( + !memory_manager::is_mmtk_object(addr), + "mmtk_is_mmtk_object({}) should return false. Got true. Real object: {}", + addr, + real, + ); +} + +#[test] +pub fn null() { + with_mockvm( + default_setup, + || { + SINGLE_OBJECT.with_fixture(|fixture| { + let addr = Address::ZERO; + assert_filter_fail(addr); + assert_invalid_objref(addr, fixture.objref.to_raw_address()); + }); + }, + no_cleanup, + ) +} + +// This should be small enough w.r.t `HEAP_START` and `HEAP_END`. +const SMALL_OFFSET: usize = 16384; + +#[test] +pub fn too_small() { + with_mockvm( + default_setup, + || { + SINGLE_OBJECT.with_fixture(|fixture| { + for offset in 1usize..SMALL_OFFSET { + let addr = Address::ZERO + offset; + assert_invalid_objref(addr, fixture.objref.to_raw_address()); + } + }); + }, + no_cleanup, + ) +} + +#[test] +pub fn max() { + with_mockvm( + default_setup, + || { + SINGLE_OBJECT.with_fixture(|fixture| { + let addr = Address::MAX; + assert_invalid_objref(addr, fixture.objref.to_raw_address()); + }); + }, + no_cleanup, + ) +} + +#[test] +pub fn too_big() { + with_mockvm( + default_setup, + || { + SINGLE_OBJECT.with_fixture(|fixture| { + for offset in 1usize..SMALL_OFFSET { + let addr = Address::MAX - offset; + assert_invalid_objref(addr, fixture.objref.to_raw_address()); + } + }); + }, + no_cleanup, + ) +} + +#[test] +pub fn direct_hit() { + with_mockvm( + default_setup, + || { + SINGLE_OBJECT.with_fixture(|fixture| { + let addr = fixture.objref.to_raw_address(); + assert_filter_pass(addr); + assert_valid_objref(addr); + }); + }, + no_cleanup, + ) +} + +const SEVERAL_PAGES: usize = 4 * crate::util::constants::BYTES_IN_PAGE; + +#[test] +pub fn small_offsets() { + with_mockvm( + default_setup, + || { + SINGLE_OBJECT.with_fixture(|fixture| { + for offset in 1usize..SEVERAL_PAGES { + let addr = fixture.objref.to_raw_address() + offset; + if basic_filter(addr) { + assert_invalid_objref(addr, fixture.objref.to_raw_address()); + } + } + }); + }, + no_cleanup, + ) +} + +#[test] +pub fn medium_offsets_aligned() { + SINGLE_OBJECT.with_fixture(|fixture| { + let alignment = std::mem::align_of::
(); + for offset in (alignment..(alignment * SEVERAL_PAGES)).step_by(alignment) { + let addr = fixture.objref.to_raw_address() + offset; + assert_filter_pass(addr); + assert_invalid_objref(addr, fixture.objref.to_raw_address()); + } + }); +} + +#[test] +pub fn large_offsets_aligned() { + with_mockvm( + default_setup, + || { + SINGLE_OBJECT.with_fixture(|fixture| { + for log_offset in 12usize..(usize::BITS as usize) { + let offset = 1usize << log_offset; + let addr = match fixture + .objref + .to_raw_address() + .as_usize() + .checked_add(offset) + { + Some(n) => unsafe { Address::from_usize(n) }, + None => break, + }; + assert_filter_pass(addr); + assert_invalid_objref(addr, fixture.objref.to_raw_address()); + } + }); + }, + no_cleanup, + ) +} + +#[test] +pub fn negative_offsets() { + with_mockvm( + default_setup, + || { + SINGLE_OBJECT.with_fixture(|fixture| { + for log_offset in LOG_BITS_IN_WORD..(usize::BITS as usize) { + let offset = 1usize << log_offset; + let addr = match fixture + .objref + .to_raw_address() + .as_usize() + .checked_sub(offset) + { + Some(0) => break, + Some(n) => unsafe { Address::from_usize(n) }, + None => break, + }; + assert_filter_pass(addr); + assert_invalid_objref(addr, fixture.objref.to_raw_address()); + } + }); + }, + no_cleanup, + ) +} diff --git a/src/vm/tests/mock_tests/mock_test_doc_avoid_resolving_allocator.rs b/src/vm/tests/mock_tests/mock_test_doc_avoid_resolving_allocator.rs new file mode 100644 index 0000000000..789b06c99e --- /dev/null +++ b/src/vm/tests/mock_tests/mock_test_doc_avoid_resolving_allocator.rs @@ -0,0 +1,47 @@ +use super::mock_test_prelude::*; + +use crate::util::alloc::Allocator; +use crate::util::alloc::BumpAllocator; +use crate::util::Address; +use crate::util::OpaquePointer; +use crate::util::{VMMutatorThread, VMThread}; +use crate::AllocationSemantics; + +lazy_static! { + static ref FIXTURE: Fixture = Fixture::new(); +} + +#[test] +pub fn acquire_typed_allocator() { + with_mockvm( + default_setup, + || { + let fixture = MMTKFixture::create(); + let tls_opaque_pointer = VMMutatorThread(VMThread(OpaquePointer::UNINITIALIZED)); + static mut DEFAULT_ALLOCATOR_OFFSET: usize = 0; + + // ANCHOR: avoid_resolving_allocator + // At boot time + let selector = + memory_manager::get_allocator_mapping(fixture.mmtk, AllocationSemantics::Default); + unsafe { + DEFAULT_ALLOCATOR_OFFSET = + crate::plan::Mutator::::get_allocator_base_offset(selector); + } + let mutator = memory_manager::bind_mutator(fixture.mmtk, tls_opaque_pointer); + + // At run time: allocate with the default semantics without resolving allocator + let default_allocator: &mut BumpAllocator = { + let mutator_addr = Address::from_ref(&*mutator); + unsafe { + (mutator_addr + DEFAULT_ALLOCATOR_OFFSET).as_mut_ref::>() + } + }; + let addr = default_allocator.alloc(8, 8, 0); + // ANCHOR_END: avoid_resolving_allocator + + assert!(!addr.is_zero()); + }, + no_cleanup, + ) +} diff --git a/src/vm/tests/mock_tests/mock_test_doc_mutator_storage.rs b/src/vm/tests/mock_tests/mock_test_doc_mutator_storage.rs new file mode 100644 index 0000000000..6d367c875d --- /dev/null +++ b/src/vm/tests/mock_tests/mock_test_doc_mutator_storage.rs @@ -0,0 +1,151 @@ +// GITHUB-CI: MMTK_PLAN=NoGC,SemiSpace,Immix,GenImmix,StickyImmix + +use super::mock_test_prelude::*; + +use crate::util::Address; +use crate::util::OpaquePointer; +use crate::util::{VMMutatorThread, VMThread}; +use crate::AllocationSemantics; +use crate::Mutator; + +lazy_static! { + static ref FIXTURE: Fixture = Fixture::new(); +} + +#[test] +pub fn boxed_pointer() { + with_mockvm( + default_setup, + || { + FIXTURE.with_fixture(|fixture| { + let tls_opaque_pointer = VMMutatorThread(VMThread(OpaquePointer::UNINITIALIZED)); + + // ANCHOR: mutator_storage_boxed_pointer + struct MutatorInTLS { + // Store the mutator as a boxed pointer. + // Accessing any value in the mutator will need a dereferencing of the boxed pointer. + ptr: Box>, + } + + // Bind an MMTk mutator + let mutator = memory_manager::bind_mutator(fixture.mmtk, tls_opaque_pointer); + // Store the pointer in TLS + let mut storage = MutatorInTLS { ptr: mutator }; + + // Allocate + let addr = + memory_manager::alloc(&mut storage.ptr, 8, 8, 0, AllocationSemantics::Default); + // ANCHOR_END: mutator_storage_boxed_pointer + + assert!(!addr.is_zero()); + }); + }, + no_cleanup, + ) +} + +#[test] +pub fn embed_mutator_struct() { + with_mockvm( + default_setup, + || { + FIXTURE.with_fixture(|fixture| { + let tls_opaque_pointer = VMMutatorThread(VMThread(OpaquePointer::UNINITIALIZED)); + + // ANCHOR: mutator_storage_embed_mutator_struct + struct MutatorInTLS { + embed: Mutator, + } + + // Bind an MMTk mutator + let mutator = memory_manager::bind_mutator(fixture.mmtk, tls_opaque_pointer); + // Store the struct (or use memcpy for non-Rust code) + let mut storage = MutatorInTLS { embed: *mutator }; + // Allocate + let addr = memory_manager::alloc( + &mut storage.embed, + 8, + 8, + 0, + AllocationSemantics::Default, + ); + // ANCHOR_END: mutator_storage_embed_mutator_struct + + assert!(!addr.is_zero()); + }) + }, + no_cleanup, + ) +} + +#[test] +pub fn embed_fastpath_struct() { + with_mockvm( + default_setup, + || { + FIXTURE.with_fixture(|fixture| { + let tls_opaque_pointer = VMMutatorThread(VMThread(OpaquePointer::UNINITIALIZED)); + + // ANCHOR: mutator_storage_embed_fastpath_struct + use crate::util::alloc::BumpPointer; + struct MutatorInTLS { + default_bump_pointer: BumpPointer, + mutator: Box>, + } + + // Bind an MMTk mutator + let mutator = memory_manager::bind_mutator(fixture.mmtk, tls_opaque_pointer); + // Create a fastpath BumpPointer with default(). The BumpPointer from default() will guarantee to fail on the first allocation + // so the allocation goes to the slowpath and we will get an allocation buffer from MMTk. + let default_bump_pointer = BumpPointer::default(); + // Store the fastpath BumpPointer along with the mutator + let mut storage = MutatorInTLS { + default_bump_pointer, + mutator, + }; + + // Allocate + let mut allocate_default = |size: usize| -> Address { + // Alignment code is omitted here to make the code simpler to read. + // In an actual implementation, alignment and offset need to be considered by the bindings. + let new_cursor = storage.default_bump_pointer.cursor + size; + if new_cursor < storage.default_bump_pointer.limit { + let addr = storage.default_bump_pointer.cursor; + storage.default_bump_pointer.cursor = new_cursor; + addr + } else { + use crate::util::alloc::Allocator; + let selector = memory_manager::get_allocator_mapping( + fixture.mmtk, + AllocationSemantics::Default, + ); + let default_allocator = unsafe { + storage + .mutator + .allocator_impl_mut::>( + selector, + ) + }; + // Copy bump pointer values to the allocator in the mutator + default_allocator.bump_pointer = storage.default_bump_pointer; + // Do slow path allocation with MMTk + let addr = default_allocator.alloc_slow(size, 8, 0); + // Copy bump pointer values to the fastpath BumpPointer so we will have an allocation buffer. + storage.default_bump_pointer = default_allocator.bump_pointer; + addr + } + }; + + // Allocate: this will fail in the fastpath, and will get an allocation buffer from the slowpath + let addr1 = allocate_default(8); + // Allocate: this will allocate from the fastpath + let addr2 = allocate_default(8); + // ANCHOR_END: mutator_storage_embed_fastpath_struct + + assert!(!addr1.is_zero()); + assert!(!addr2.is_zero()); + }) + }, + no_cleanup, + ) +} diff --git a/src/vm/tests/mock_tests/mock_test_edges.rs b/src/vm/tests/mock_tests/mock_test_edges.rs new file mode 100644 index 0000000000..b443ca3a69 --- /dev/null +++ b/src/vm/tests/mock_tests/mock_test_edges.rs @@ -0,0 +1,429 @@ +// GITHUB-CI: MMTK_PLAN=NoGC + +#![allow(unused)] + +use super::mock_test_prelude::*; +use crate::{ + util::{Address, ObjectReference}, + vm::edge_shape::{Edge, SimpleEdge}, +}; +use atomic::{Atomic, Ordering}; + +lazy_static! { + static ref FIXTURE: Fixture = Fixture::new(); +} + +mod simple_edges { + use super::*; + + #[test] + pub fn load_simple() { + with_mockvm( + default_setup, + || { + FIXTURE.with_fixture(|fixture| { + let mut slot: Atomic = Atomic::new(fixture.objref1); + + let edge = SimpleEdge::from_address(Address::from_ref(&slot)); + let objref = edge.load(); + + assert_eq!(objref, fixture.objref1); + }); + }, + no_cleanup, + ) + } + + #[test] + pub fn store_simple() { + with_mockvm( + default_setup, + || { + FIXTURE.with_fixture(|fixture| { + let mut slot: Atomic = Atomic::new(fixture.objref1); + + let edge = SimpleEdge::from_address(Address::from_ref(&slot)); + edge.store(fixture.objref2); + assert_eq!(slot.load(Ordering::SeqCst), fixture.objref2); + + let objref = edge.load(); + assert_eq!(objref, fixture.objref2); + }); + }, + no_cleanup, + ) + } +} + +#[cfg(target_pointer_width = "64")] +mod compressed_oop { + use super::*; + + /// This represents a location that holds a 32-bit pointer on a 64-bit machine. + /// + /// OpenJDK uses this kind of edge to store compressed OOPs on 64-bit machines. + #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] + pub struct CompressedOopEdge { + slot_addr: *mut Atomic, + } + + unsafe impl Send for CompressedOopEdge {} + + impl CompressedOopEdge { + pub fn from_address(address: Address) -> Self { + Self { + slot_addr: address.to_mut_ptr(), + } + } + pub fn as_address(&self) -> Address { + Address::from_mut_ptr(self.slot_addr) + } + } + + impl Edge for CompressedOopEdge { + fn load(&self) -> ObjectReference { + let compressed = unsafe { (*self.slot_addr).load(atomic::Ordering::Relaxed) }; + let expanded = (compressed as usize) << 3; + ObjectReference::from_raw_address(unsafe { Address::from_usize(expanded) }) + } + + fn store(&self, object: ObjectReference) { + let expanded = object.to_raw_address().as_usize(); + let compressed = (expanded >> 3) as u32; + unsafe { (*self.slot_addr).store(compressed, atomic::Ordering::Relaxed) } + } + } + + // Two 35-bit addresses aligned to 8 bytes (3 zeros in the lowest bits). + const COMPRESSABLE_ADDR1: usize = 0b101_10111011_11011111_01111110_11111000usize; + const COMPRESSABLE_ADDR2: usize = 0b110_11110111_01101010_11011101_11101000usize; + + #[test] + pub fn load_compressed() { + // Note: We cannot guarantee GC will allocate an object in the low address region. + // So we make up addresses just for testing the bit operations of compressed OOP edges. + let compressed1 = (COMPRESSABLE_ADDR1 >> 3) as u32; + let objref1 = + ObjectReference::from_raw_address(unsafe { Address::from_usize(COMPRESSABLE_ADDR1) }); + + let mut slot: Atomic = Atomic::new(compressed1); + + let edge = CompressedOopEdge::from_address(Address::from_ref(&slot)); + let objref = edge.load(); + + assert_eq!(objref, objref1); + } + + #[test] + pub fn store_compressed() { + // Note: We cannot guarantee GC will allocate an object in the low address region. + // So we make up addresses just for testing the bit operations of compressed OOP edges. + let compressed1 = (COMPRESSABLE_ADDR1 >> 3) as u32; + let compressed2 = (COMPRESSABLE_ADDR2 >> 3) as u32; + let objref2 = + ObjectReference::from_raw_address(unsafe { Address::from_usize(COMPRESSABLE_ADDR2) }); + + let mut slot: Atomic = Atomic::new(compressed1); + + let edge = CompressedOopEdge::from_address(Address::from_ref(&slot)); + edge.store(objref2); + assert_eq!(slot.load(Ordering::SeqCst), compressed2); + + let objref = edge.load(); + assert_eq!(objref, objref2); + } +} + +mod offset_edge { + use super::*; + + /// This represents an edge that holds a pointer to the *middle* of an object, and the offset is known. + /// + /// Julia uses this trick to facilitate deleting array elements from the front. + #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] + pub struct OffsetEdge { + slot_addr: *mut Atomic
, + offset: usize, + } + + unsafe impl Send for OffsetEdge {} + + impl OffsetEdge { + pub fn new_no_offset(address: Address) -> Self { + Self { + slot_addr: address.to_mut_ptr(), + offset: 0, + } + } + + pub fn new_with_offset(address: Address, offset: usize) -> Self { + Self { + slot_addr: address.to_mut_ptr(), + offset, + } + } + + pub fn slot_address(&self) -> Address { + Address::from_mut_ptr(self.slot_addr) + } + + pub fn offset(&self) -> usize { + self.offset + } + } + + impl Edge for OffsetEdge { + fn load(&self) -> ObjectReference { + let middle = unsafe { (*self.slot_addr).load(atomic::Ordering::Relaxed) }; + let begin = middle - self.offset; + ObjectReference::from_raw_address(begin) + } + + fn store(&self, object: ObjectReference) { + let begin = object.to_raw_address(); + let middle = begin + self.offset; + unsafe { (*self.slot_addr).store(middle, atomic::Ordering::Relaxed) } + } + } + + pub const OFFSET: usize = 48; + + #[test] + pub fn load_offset() { + with_mockvm( + default_setup, + || { + FIXTURE.with_fixture(|fixture| { + let addr1 = fixture.objref1.to_raw_address(); + let mut slot: Atomic
= Atomic::new(addr1 + OFFSET); + + let edge = OffsetEdge::new_with_offset(Address::from_ref(&slot), OFFSET); + let objref = edge.load(); + + assert_eq!(objref, fixture.objref1); + }); + }, + no_cleanup, + ) + } + + #[test] + pub fn store_offset() { + with_mockvm( + default_setup, + || { + FIXTURE.with_fixture(|fixture| { + let addr1 = fixture.objref1.to_raw_address(); + let addr2 = fixture.objref2.to_raw_address(); + let mut slot: Atomic
= Atomic::new(addr1 + OFFSET); + + let edge = OffsetEdge::new_with_offset(Address::from_ref(&slot), OFFSET); + edge.store(fixture.objref2); + assert_eq!(slot.load(Ordering::SeqCst), addr2 + OFFSET); + + let objref = edge.load(); + assert_eq!(objref, fixture.objref2); + }); + }, + no_cleanup, + ) + } +} + +mod tagged_edge { + use super::*; + + /// This edge presents the object reference itself to mmtk-core. + #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] + pub struct TaggedEdge { + slot_addr: *mut Atomic, + } + + unsafe impl Send for TaggedEdge {} + + impl TaggedEdge { + // The DummyVM has OBJECT_REF_OFFSET = 4. + // Using a two-bit tag should be safe on both 32-bit and 64-bit platforms. + const TAG_BITS_MASK: usize = 0b11; + + pub fn new(address: Address) -> Self { + Self { + slot_addr: address.to_mut_ptr(), + } + } + } + + impl Edge for TaggedEdge { + fn load(&self) -> ObjectReference { + let tagged = unsafe { (*self.slot_addr).load(atomic::Ordering::Relaxed) }; + let untagged = tagged & !Self::TAG_BITS_MASK; + ObjectReference::from_raw_address(unsafe { Address::from_usize(untagged) }) + } + + fn store(&self, object: ObjectReference) { + let old_tagged = unsafe { (*self.slot_addr).load(atomic::Ordering::Relaxed) }; + let new_untagged = object.to_raw_address().as_usize(); + let new_tagged = new_untagged | (old_tagged & Self::TAG_BITS_MASK); + unsafe { (*self.slot_addr).store(new_tagged, atomic::Ordering::Relaxed) } + } + } + + pub const TAG1: usize = 0b01; + pub const TAG2: usize = 0b10; + + #[test] + pub fn load_tagged() { + with_mockvm( + default_setup, + || { + FIXTURE.with_fixture(|fixture| { + let mut slot1: Atomic = + Atomic::new(fixture.objref1.to_raw_address().as_usize() | TAG1); + let mut slot2: Atomic = + Atomic::new(fixture.objref1.to_raw_address().as_usize() | TAG2); + + let edge1 = TaggedEdge::new(Address::from_ref(&slot1)); + let edge2 = TaggedEdge::new(Address::from_ref(&slot2)); + let objref1 = edge1.load(); + let objref2 = edge2.load(); + + // Tags should not affect loaded values. + assert_eq!(objref1, fixture.objref1); + assert_eq!(objref2, fixture.objref1); + }); + }, + no_cleanup, + ) + } + + #[test] + pub fn store_tagged() { + with_mockvm( + default_setup, + || { + FIXTURE.with_fixture(|fixture| { + let mut slot1: Atomic = + Atomic::new(fixture.objref1.to_raw_address().as_usize() | TAG1); + let mut slot2: Atomic = + Atomic::new(fixture.objref1.to_raw_address().as_usize() | TAG2); + + let edge1 = TaggedEdge::new(Address::from_ref(&slot1)); + let edge2 = TaggedEdge::new(Address::from_ref(&slot2)); + edge1.store(fixture.objref2); + edge2.store(fixture.objref2); + + // Tags should be preserved. + assert_eq!( + slot1.load(Ordering::SeqCst), + fixture.objref2.to_raw_address().as_usize() | TAG1 + ); + assert_eq!( + slot2.load(Ordering::SeqCst), + fixture.objref2.to_raw_address().as_usize() | TAG2 + ); + + let objref1 = edge1.load(); + let objref2 = edge2.load(); + + // Tags should not affect loaded values. + assert_eq!(objref1, fixture.objref2); + assert_eq!(objref2, fixture.objref2); + }); + }, + no_cleanup, + ) + } +} + +mod mixed { + #[cfg(target_pointer_width = "64")] + use super::compressed_oop::CompressedOopEdge; + use super::offset_edge::OffsetEdge; + use super::offset_edge::OFFSET; + use super::tagged_edge::TaggedEdge; + use super::tagged_edge::TAG1; + use super::*; + use crate::vm::edge_shape::SimpleEdge; + + /// If a VM supports multiple kinds of edges, we can use tagged union to represent all of them. + #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] + pub enum DummyVMEdge { + Simple(SimpleEdge), + #[cfg(target_pointer_width = "64")] + Compressed(compressed_oop::CompressedOopEdge), + Offset(OffsetEdge), + Tagged(TaggedEdge), + } + + unsafe impl Send for DummyVMEdge {} + + impl Edge for DummyVMEdge { + fn load(&self) -> ObjectReference { + match self { + DummyVMEdge::Simple(e) => e.load(), + #[cfg(target_pointer_width = "64")] + DummyVMEdge::Compressed(e) => e.load(), + DummyVMEdge::Offset(e) => e.load(), + DummyVMEdge::Tagged(e) => e.load(), + } + } + + fn store(&self, object: ObjectReference) { + match self { + DummyVMEdge::Simple(e) => e.store(object), + #[cfg(target_pointer_width = "64")] + DummyVMEdge::Compressed(e) => e.store(object), + DummyVMEdge::Offset(e) => e.store(object), + DummyVMEdge::Tagged(e) => e.store(object), + } + } + } + + #[test] + pub fn mixed() { + with_mockvm( + default_setup, + || { + const OFFSET: usize = 48; + + FIXTURE.with_fixture(|fixture| { + let addr1 = fixture.objref1.to_raw_address(); + let addr2 = fixture.objref2.to_raw_address(); + + let mut slot1: Atomic = Atomic::new(fixture.objref1); + let mut slot3: Atomic
= Atomic::new(addr1 + OFFSET); + let mut slot4: Atomic = Atomic::new(addr1.as_usize() | TAG1); + + let edge1 = SimpleEdge::from_address(Address::from_ref(&slot1)); + let edge3 = OffsetEdge::new_with_offset(Address::from_ref(&slot3), OFFSET); + let edge4 = TaggedEdge::new(Address::from_ref(&slot4)); + + let de1 = DummyVMEdge::Simple(edge1); + let de3 = DummyVMEdge::Offset(edge3); + let de4 = DummyVMEdge::Tagged(edge4); + + let edges = [de1, de3, de4]; + for (i, edge) in edges.iter().enumerate() { + let objref = edge.load(); + assert_eq!(objref, fixture.objref1, "Edge {} is not properly loaded", i); + } + + let mutable_edges = [de1, de3, de4]; + for (i, edge) in mutable_edges.iter().enumerate() { + edge.store(fixture.objref2); + let objref = edge.load(); + assert_eq!( + objref, fixture.objref2, + "Edge {} is not properly loaded after store", + i + ); + } + + assert_eq!(slot1.load(Ordering::SeqCst), fixture.objref2); + assert_eq!(slot3.load(Ordering::SeqCst), addr2 + OFFSET); + }); + }, + no_cleanup, + ) + } +} diff --git a/src/vm/tests/mock_tests/mock_test_handle_mmap_conflict.rs b/src/vm/tests/mock_tests/mock_test_handle_mmap_conflict.rs new file mode 100644 index 0000000000..7d07f7f8e4 --- /dev/null +++ b/src/vm/tests/mock_tests/mock_test_handle_mmap_conflict.rs @@ -0,0 +1,36 @@ +use super::mock_test_prelude::*; + +use crate::util::memory; +use crate::util::opaque_pointer::*; +use crate::util::Address; + +#[test] +pub fn test_handle_mmap_conflict() { + with_mockvm( + default_setup, + || { + let start = unsafe { Address::from_usize(0x100_0000) }; + let one_megabyte = 1000000; + let mmap1_res = + memory::dzmmap_noreplace(start, one_megabyte, memory::MmapStrategy::Normal); + assert!(mmap1_res.is_ok()); + + let panic_res = std::panic::catch_unwind(|| { + let mmap2_res = + memory::dzmmap_noreplace(start, one_megabyte, memory::MmapStrategy::Normal); + assert!(mmap2_res.is_err()); + memory::handle_mmap_error::( + mmap2_res.err().unwrap(), + VMThread::UNINITIALIZED, + ); + }); + + // The error should match the error message in memory::handle_mmap_error() + assert!(panic_res.is_err()); + let err = panic_res.err().unwrap(); + assert!(err.is::<&str>()); + assert_eq!(err.downcast_ref::<&str>().unwrap(), &"Failed to mmap, the address is already mapped. Should MMTk quanrantine the address range first?"); + }, + no_cleanup, + ) +} diff --git a/src/vm/tests/mock_tests/mock_test_handle_mmap_oom.rs b/src/vm/tests/mock_tests/mock_test_handle_mmap_oom.rs new file mode 100644 index 0000000000..c2886d745c --- /dev/null +++ b/src/vm/tests/mock_tests/mock_test_handle_mmap_oom.rs @@ -0,0 +1,45 @@ +use super::mock_test_prelude::*; + +use crate::util::memory; +use crate::util::opaque_pointer::*; +use crate::util::Address; + +#[cfg(target_pointer_width = "32")] +const LARGE_SIZE: usize = 4_294_967_295; +#[cfg(target_pointer_width = "64")] +const LARGE_SIZE: usize = 1_000_000_000_000; + +#[test] +pub fn test_handle_mmap_oom() { + with_mockvm( + default_setup, + || { + let panic_res = std::panic::catch_unwind(move || { + let start = unsafe { Address::from_usize(0x100_0000) }; + // mmap 1 terabyte memory - we expect this will fail due to out of memory. + // If that's not the case, increase the size we mmap. + let mmap_res = + memory::dzmmap_noreplace(start, LARGE_SIZE, memory::MmapStrategy::Normal); + + memory::handle_mmap_error::( + mmap_res.err().unwrap(), + VMThread::UNINITIALIZED, + ); + }); + assert!(panic_res.is_err()); + + // The error should match the default implementation of Collection::out_of_memory() + let err = panic_res.err().unwrap(); + assert!(err.is::()); + assert_eq!( + err.downcast_ref::().unwrap(), + &"Out of memory with MmapOutOfMemory!" + ); + }, + || { + read_mockvm(|mock| { + assert!(mock.out_of_memory.is_called()); + }) + }, + ) +} diff --git a/src/vm/tests/mock_tests/mock_test_is_in_mmtk_spaces.rs b/src/vm/tests/mock_tests/mock_test_is_in_mmtk_spaces.rs new file mode 100644 index 0000000000..45cf7ce535 --- /dev/null +++ b/src/vm/tests/mock_tests/mock_test_is_in_mmtk_spaces.rs @@ -0,0 +1,117 @@ +// GITHUB-CI: MMTK_PLAN=all + +use super::mock_test_prelude::*; + +use crate::util::*; + +lazy_static! { + static ref SINGLE_OBJECT: Fixture = Fixture::new(); +} + +#[test] +pub fn null() { + with_mockvm( + default_setup, + || { + SINGLE_OBJECT.with_fixture(|_| { + assert!( + !memory_manager::is_in_mmtk_spaces::(ObjectReference::NULL), + "NULL pointer should not be in any MMTk spaces." + ); + }); + }, + no_cleanup, + ) +} + +#[test] +pub fn max() { + with_mockvm( + default_setup, + || { + SINGLE_OBJECT.with_fixture(|_fixture| { + assert!( + !memory_manager::is_in_mmtk_spaces::( + ObjectReference::from_raw_address(Address::MAX) + ), + "Address::MAX should not be in any MMTk spaces." + ); + }); + }, + no_cleanup, + ) +} + +#[test] +pub fn direct_hit() { + with_mockvm( + default_setup, + || { + SINGLE_OBJECT.with_fixture(|fixture| { + assert!( + memory_manager::is_in_mmtk_spaces::(fixture.objref), + "The address of the allocated object should be in the space" + ); + }); + }, + no_cleanup, + ) +} + +#[test] +pub fn large_offsets_aligned() { + with_mockvm( + default_setup, + || { + SINGLE_OBJECT.with_fixture(|fixture| { + for log_offset in 12usize..(usize::BITS as usize) { + let offset = 1usize << log_offset; + let addr = match fixture + .objref + .to_raw_address() + .as_usize() + .checked_add(offset) + { + Some(n) => unsafe { Address::from_usize(n) }, + None => break, + }; + // It's just a smoke test. It is hard to predict if the addr is still in any space, + // but it must not crash. + let _ = memory_manager::is_in_mmtk_spaces::( + ObjectReference::from_raw_address(addr), + ); + } + }); + }, + no_cleanup, + ) +} + +#[test] +pub fn negative_offsets() { + with_mockvm( + default_setup, + || { + SINGLE_OBJECT.with_fixture(|fixture| { + for log_offset in 1usize..(usize::BITS as usize) { + let offset = 1usize << log_offset; + let addr = match fixture + .objref + .to_raw_address() + .as_usize() + .checked_sub(offset) + { + Some(n) => unsafe { Address::from_usize(n) }, + None => break, + }; + // It's just a smoke test. It is hard to predict if the addr is still in any space, + // but it must not crash. + let _ = memory_manager::is_in_mmtk_spaces::( + ObjectReference::from_raw_address(addr), + ); + } + }); + }, + no_cleanup, + ) +} diff --git a/src/vm/tests/mock_tests/mock_test_issue139_allocate_non_multiple_of_min_alignment.rs b/src/vm/tests/mock_tests/mock_test_issue139_allocate_non_multiple_of_min_alignment.rs new file mode 100644 index 0000000000..04061cb98e --- /dev/null +++ b/src/vm/tests/mock_tests/mock_test_issue139_allocate_non_multiple_of_min_alignment.rs @@ -0,0 +1,24 @@ +use super::mock_test_prelude::*; + +use crate::AllocationSemantics; + +#[test] +pub fn issue139_alloc_non_multiple_of_min_alignment() { + with_mockvm( + default_setup, + || { + let mut fixture = MutatorFixture::create(); + + // Allocate 6 bytes with 8 bytes ailgnment required + let addr = + memory_manager::alloc(&mut fixture.mutator, 14, 8, 0, AllocationSemantics::Default); + assert!(addr.is_aligned_to(8)); + // After the allocation, the cursor is not MIN_ALIGNMENT aligned. If we have the assertion in the next allocation to check if the cursor is aligned to MIN_ALIGNMENT, it fails. + // We have to remove that assertion. + let addr2 = + memory_manager::alloc(&mut fixture.mutator, 14, 8, 0, AllocationSemantics::Default); + assert!(addr2.is_aligned_to(8)); + }, + no_cleanup, + ) +} diff --git a/src/vm/tests/mock_tests/mock_test_issue867_allocate_unrealistically_large_object.rs b/src/vm/tests/mock_tests/mock_test_issue867_allocate_unrealistically_large_object.rs new file mode 100644 index 0000000000..5db0a65774 --- /dev/null +++ b/src/vm/tests/mock_tests/mock_test_issue867_allocate_unrealistically_large_object.rs @@ -0,0 +1,140 @@ +// GITHUB-CI: MMTK_PLAN=all + +use super::mock_test_prelude::*; + +use crate::plan::AllocationSemantics; +use crate::util::Address; +use crate::Mutator; +use crate::MMTK; + +lazy_static! { + static ref MUTATOR: Fixture = Fixture::new(); +} + +// Checks if the allocation should be an LOS allocation. +fn alloc_default_or_large( + mmtk: &MMTK, + mutator: &mut Mutator, + size: usize, + align: usize, + offset: usize, + semantic: AllocationSemantics, +) -> Address { + let max_non_los_size = mmtk + .get_plan() + .constraints() + .max_non_los_default_alloc_bytes; + if size >= max_non_los_size { + memory_manager::alloc(mutator, size, align, offset, AllocationSemantics::Los) + } else { + memory_manager::alloc(mutator, size, align, offset, semantic) + } +} + +#[test] +#[should_panic(expected = "Out of memory with HeapOutOfMemory!")] +pub fn allocate_max_size_object() { + with_mockvm( + default_setup, + || { + let (size, align) = (usize::MAX, 8); + + MUTATOR.with_fixture_mut(|fixture| { + alloc_default_or_large( + fixture.mmtk(), + &mut fixture.mutator, + size, + align, + 0, + AllocationSemantics::Default, + ); + }) + }, + no_cleanup, + ) +} + +#[test] +// This test panics with 'attempt to add with overflow', as we do computation with the size +// in the fastpath. I don't think we want to do any extra check in the fastpath. There is +// nothing we can do with it without sacrificing performance. +#[should_panic(expected = "Out of memory with HeapOutOfMemory!")] +#[ignore] +pub fn allocate_max_size_object_after_succeed() { + with_mockvm( + default_setup, + || { + MUTATOR.with_fixture_mut(|fixture| { + // Allocate something so we have a thread local allocation buffer + alloc_default_or_large( + fixture.mmtk(), + &mut fixture.mutator, + 8, + 8, + 0, + AllocationSemantics::Default, + ); + // Allocate an unrealistically large object + alloc_default_or_large( + fixture.mmtk(), + &mut fixture.mutator, + usize::MAX, + 8, + 0, + AllocationSemantics::Default, + ); + }) + }, + no_cleanup, + ) +} + +#[test] +#[should_panic(expected = "Out of memory with HeapOutOfMemory!")] +pub fn allocate_unrealistically_large_object() { + with_mockvm( + default_setup, + || { + const CHUNK: usize = 4 * 1024 * 1024; // 4MB + // Leave some room, so we won't have arithmetic overflow when we compute size and do alignment. + let (size, align) = ( + crate::util::conversions::raw_align_down(usize::MAX - CHUNK, 4096), + 8, + ); + + MUTATOR.with_fixture_mut(|fixture| { + alloc_default_or_large( + fixture.mmtk(), + &mut fixture.mutator, + size, + align, + 0, + AllocationSemantics::Default, + ); + }) + }, + no_cleanup, + ) +} + +#[test] +#[should_panic(expected = "Out of memory with HeapOutOfMemory!")] +pub fn allocate_more_than_heap_size() { + with_mockvm( + default_setup, + || { + // The heap has 1 MB. Allocating with 2MB will cause OOM. + MUTATOR.with_fixture_mut(|fixture| { + alloc_default_or_large( + fixture.mmtk(), + &mut fixture.mutator, + 2 * 1024 * 1024, + 8, + 0, + AllocationSemantics::Default, + ); + }) + }, + no_cleanup, + ) +} diff --git a/src/vm/tests/mock_tests/mock_test_malloc_counted.rs b/src/vm/tests/mock_tests/mock_test_malloc_counted.rs new file mode 100644 index 0000000000..8dd50b6c60 --- /dev/null +++ b/src/vm/tests/mock_tests/mock_test_malloc_counted.rs @@ -0,0 +1,107 @@ +// GITHUB-CI: FEATURES=malloc_counted_size + +use super::mock_test_prelude::*; + +lazy_static! { + static ref MMTK: Fixture = Fixture::new(); +} + +#[test] +pub fn malloc_free() { + with_mockvm( + default_setup, + || { + MMTK.with_fixture(|fixture| { + let bytes_before = memory_manager::get_malloc_bytes(&fixture.mmtk); + + let res = memory_manager::counted_malloc(&fixture.mmtk, 8); + assert!(!res.is_zero()); + let bytes_after_alloc = memory_manager::get_malloc_bytes(&fixture.mmtk); + assert_eq!(bytes_before + 8, bytes_after_alloc); + + memory_manager::free_with_size(&fixture.mmtk, res, 8); + let bytes_after_free = memory_manager::get_malloc_bytes(&fixture.mmtk); + assert_eq!(bytes_before, bytes_after_free); + }); + }, + no_cleanup, + ) +} + +#[test] +pub fn calloc_free() { + with_mockvm( + default_setup, + || { + MMTK.with_fixture(|fixture| { + let bytes_before = memory_manager::get_malloc_bytes(&fixture.mmtk); + + let res = memory_manager::counted_calloc(&fixture.mmtk, 1, 8); + assert!(!res.is_zero()); + let bytes_after_alloc = memory_manager::get_malloc_bytes(&fixture.mmtk); + assert_eq!(bytes_before + 8, bytes_after_alloc); + + memory_manager::free_with_size(&fixture.mmtk, res, 8); + let bytes_after_free = memory_manager::get_malloc_bytes(&fixture.mmtk); + assert_eq!(bytes_before, bytes_after_free); + }); + }, + no_cleanup, + ) +} + +#[test] +pub fn realloc_grow() { + with_mockvm( + default_setup, + || { + MMTK.with_fixture(|fixture| { + let bytes_before = memory_manager::get_malloc_bytes(&fixture.mmtk); + + let res1 = memory_manager::counted_malloc(&fixture.mmtk, 8); + assert!(!res1.is_zero()); + let bytes_after_alloc = memory_manager::get_malloc_bytes(&fixture.mmtk); + assert_eq!(bytes_before + 8, bytes_after_alloc); + + // grow to 16 bytes + let res2 = memory_manager::realloc_with_old_size(&fixture.mmtk, res1, 16, 8); + assert!(!res2.is_zero()); + let bytes_after_realloc = memory_manager::get_malloc_bytes(&fixture.mmtk); + assert_eq!(bytes_before + 16, bytes_after_realloc); + + memory_manager::free_with_size(&fixture.mmtk, res2, 16); + let bytes_after_free = memory_manager::get_malloc_bytes(&fixture.mmtk); + assert_eq!(bytes_before, bytes_after_free); + }); + }, + no_cleanup, + ) +} + +#[test] +pub fn realloc_shrink() { + with_mockvm( + default_setup, + || { + MMTK.with_fixture(|fixture| { + let bytes_before = memory_manager::get_malloc_bytes(&fixture.mmtk); + + let res1 = memory_manager::counted_malloc(&fixture.mmtk, 16); + assert!(!res1.is_zero()); + let bytes_after_alloc = memory_manager::get_malloc_bytes(&fixture.mmtk); + assert_eq!(bytes_before + 16, bytes_after_alloc); + + // shrink to 8 bytes + let res2 = memory_manager::realloc_with_old_size(&fixture.mmtk, res1, 8, 16); + assert!(!res2.is_zero()); + let bytes_after_realloc = memory_manager::get_malloc_bytes(&fixture.mmtk); + assert_eq!(bytes_before + 8, bytes_after_realloc); + + memory_manager::free_with_size(&fixture.mmtk, res2, 8); + let bytes_after_free = memory_manager::get_malloc_bytes(&fixture.mmtk); + assert_eq!(bytes_before, bytes_after_free); + }); + }, + no_cleanup, + ) +} diff --git a/src/vm/tests/mock_tests/mock_test_malloc_ms.rs b/src/vm/tests/mock_tests/mock_test_malloc_ms.rs new file mode 100644 index 0000000000..2046535783 --- /dev/null +++ b/src/vm/tests/mock_tests/mock_test_malloc_ms.rs @@ -0,0 +1,47 @@ +use super::mock_test_prelude::*; + +use crate::util::malloc::malloc_ms_util; + +#[test] +fn test_malloc() { + with_mockvm( + default_setup, + || { + let (address1, bool1) = malloc_ms_util::alloc::(16, 8, 0); + let (address2, bool2) = malloc_ms_util::alloc::(16, 32, 0); + let (address3, bool3) = malloc_ms_util::alloc::(16, 8, 4); + let (address4, bool4) = malloc_ms_util::alloc::(32, 64, 4); + + assert!(address1.is_aligned_to(8)); + assert!(address2.is_aligned_to(32)); + assert!((address3 + 4_isize).is_aligned_to(8)); + assert!((address4 + 4_isize).is_aligned_to(64)); + + assert!(!bool1); + #[cfg(feature = "malloc_hoard")] + assert!(bool2); + #[cfg(not(feature = "malloc_hoard"))] + assert!(!bool2); + assert!(bool3); + assert!(bool4); + + assert!(malloc_ms_util::get_malloc_usable_size(address1, bool1) >= 16); + assert!(malloc_ms_util::get_malloc_usable_size(address2, bool2) >= 16); + assert!(malloc_ms_util::get_malloc_usable_size(address3, bool3) >= 16); + assert!(malloc_ms_util::get_malloc_usable_size(address4, bool4) >= 32); + + unsafe { + malloc_ms_util::free(address1.to_mut_ptr()); + } + #[cfg(feature = "malloc_hoard")] + malloc_ms_util::offset_free(address2); + #[cfg(not(feature = "malloc_hoard"))] + unsafe { + malloc_ms_util::free(address2.to_mut_ptr()); + } + malloc_ms_util::offset_free(address3); + malloc_ms_util::offset_free(address4); + }, + no_cleanup, + ) +} diff --git a/src/vm/tests/mock_tests/mock_test_nogc_lock_free.rs b/src/vm/tests/mock_tests/mock_test_nogc_lock_free.rs new file mode 100644 index 0000000000..e78d5e2169 --- /dev/null +++ b/src/vm/tests/mock_tests/mock_test_nogc_lock_free.rs @@ -0,0 +1,40 @@ +// GITHUB-CI: MMTK_PLAN=NoGC +// GITHUB-CI: FEATURES=nogc_lock_free + +use super::mock_test_prelude::*; + +use crate::plan::AllocationSemantics; +use crate::vm::VMBinding; + +#[test] +pub fn nogc_lock_free_allocate() { + with_mockvm( + default_setup, + || { + let mut fixture = MutatorFixture::create(); + let min = MockVM::MIN_ALIGNMENT; + let max = MockVM::MAX_ALIGNMENT; + info!("Allowed alignment between {} and {}", min, max); + let mut align = min; + while align <= max { + info!("Test allocation with alignment {}", align); + let addr = memory_manager::alloc( + &mut fixture.mutator, + 8, + align, + 0, + AllocationSemantics::Default, + ); + info!("addr = {}", addr); + assert!( + addr.is_aligned_to(align), + "Expected allocation alignment {}, returned address is {:?}", + align, + addr + ); + align *= 2; + } + }, + no_cleanup, + ) +} diff --git a/src/vm/tests/mock_tests/mock_test_vm_layout_compressed_pointer.rs b/src/vm/tests/mock_tests/mock_test_vm_layout_compressed_pointer.rs new file mode 100644 index 0000000000..64146591db --- /dev/null +++ b/src/vm/tests/mock_tests/mock_test_vm_layout_compressed_pointer.rs @@ -0,0 +1,39 @@ +// GITHUB-CI: MMTK_PLAN=all + +use super::mock_test_prelude::*; +use super::mock_test_vm_layout_default::test_with_vm_layout; +use crate::util::conversions::*; +use crate::util::heap::vm_layout::VMLayout; +use crate::util::Address; + +// This test only run on 64bits. + +#[test] +fn test_vm_layout_compressed_pointer() { + with_mockvm( + default_setup, + || { + let start = if cfg!(target_os = "macos") { + // Impossible to map 0x4000_0000 on maocOS. SO choose a different address. + 0x40_0000_0000 + } else { + 0x4000_0000 + }; + let heap_size = 1024 * 1024; + let end = match start + heap_size { + end if end <= (4usize << 30) => 4usize << 30, + end if end <= (32usize << 30) => 32usize << 30, + _ => start + (32usize << 30), + }; + let layout = VMLayout { + log_address_space: 35, + heap_start: chunk_align_down(unsafe { Address::from_usize(start) }), + heap_end: chunk_align_up(unsafe { Address::from_usize(end) }), + log_space_extent: 31, + force_use_contiguous_spaces: false, + }; + test_with_vm_layout(Some(layout)); + }, + no_cleanup, + ) +} diff --git a/src/vm/tests/mock_tests/mock_test_vm_layout_default.rs b/src/vm/tests/mock_tests/mock_test_vm_layout_default.rs new file mode 100644 index 0000000000..f4f4ac5696 --- /dev/null +++ b/src/vm/tests/mock_tests/mock_test_vm_layout_default.rs @@ -0,0 +1,41 @@ +// GITHUB-CI: MMTK_PLAN=all + +use super::mock_test_prelude::*; +use crate::util::heap::vm_layout::VMLayout; + +pub fn test_with_vm_layout(layout: Option) { + use crate::plan::AllocationSemantics; + + let mut fixture = MutatorFixture::create_with_builder(|builder| { + // 1MB + builder + .options + .gc_trigger + .set(crate::util::options::GCTriggerSelector::FixedHeapSize( + 1024 * 1024, + )); + // Set layout + if let Some(layout) = layout { + builder.set_vm_layout(layout); + } + }); + + // Test allocation + let addr = memory_manager::alloc(&mut fixture.mutator, 8, 8, 0, AllocationSemantics::Default); + let obj = ::VMObjectModel::address_to_ref(addr); + // Test SFT + assert!(memory_manager::is_in_mmtk_spaces::(obj)); + // Test mmapper + assert!(memory_manager::is_mapped_address(addr)); +} + +#[test] +fn test_vm_layout_default() { + with_mockvm( + default_setup, + || { + test_with_vm_layout(None); + }, + no_cleanup, + ) +} diff --git a/src/vm/tests/mock_tests/mock_test_vm_layout_heap_start.rs b/src/vm/tests/mock_tests/mock_test_vm_layout_heap_start.rs new file mode 100644 index 0000000000..fbf7573bb5 --- /dev/null +++ b/src/vm/tests/mock_tests/mock_test_vm_layout_heap_start.rs @@ -0,0 +1,32 @@ +// GITHUB-CI: MMTK_PLAN=all + +use super::mock_test_prelude::*; +use super::mock_test_vm_layout_default::test_with_vm_layout; +use crate::util::heap::vm_layout::VMLayout; +use crate::util::Address; + +#[test] +fn test_vm_layout_heap_start() { + with_mockvm( + default_setup, + || { + let default = VMLayout::default(); + + // Test with an start address that is different to the default heap start + #[cfg(target_pointer_width = "32")] + let heap_start = unsafe { Address::from_usize(0x7000_0000) }; + #[cfg(target_pointer_width = "64")] + let heap_start = unsafe { Address::from_usize(0x0000_0400_0000_0000usize) }; + #[cfg(target_pointer_width = "64")] + assert!(heap_start.is_aligned_to(default.max_space_extent())); + + let layout = VMLayout { + heap_start, + // Use default for the rest. + ..default + }; + test_with_vm_layout(Some(layout)); + }, + no_cleanup, + ) +} diff --git a/src/vm/tests/mock_tests/mock_test_vm_layout_log_address_space.rs b/src/vm/tests/mock_tests/mock_test_vm_layout_log_address_space.rs new file mode 100644 index 0000000000..d6feac320e --- /dev/null +++ b/src/vm/tests/mock_tests/mock_test_vm_layout_log_address_space.rs @@ -0,0 +1,24 @@ +// GITHUB-CI: MMTK_PLAN=all + +use super::mock_test_prelude::*; +use super::mock_test_vm_layout_default::test_with_vm_layout; +use crate::util::heap::vm_layout::VMLayout; + +#[test] +fn test_vm_layout_log_address_space() { + with_mockvm( + default_setup, + || { + let layout = VMLayout { + #[cfg(target_pointer_width = "32")] + log_address_space: 31, + #[cfg(target_pointer_width = "64")] + log_address_space: 45, + // Use default for the rest. + ..VMLayout::default() + }; + test_with_vm_layout(Some(layout)); + }, + no_cleanup, + ) +} diff --git a/src/vm/tests/mock_tests/mod.rs b/src/vm/tests/mock_tests/mod.rs new file mode 100644 index 0000000000..6255b1fb8a --- /dev/null +++ b/src/vm/tests/mock_tests/mod.rs @@ -0,0 +1,51 @@ +// NOTE: MMTk will panic if MMTK is initialized more than once per process (this is a bug and we should fix it). +// To work around the problem, we run each of the following modules in a separate test process +// if the test initializes an MMTk intance. + +// All the tests with prefix 'mock_test_' and with the feature 'mock_test' will use MockVM, and will initialize MMTk. +// To avoid re-initialization, one can have only one #[test] per module, +// or use fixtures in `crate::util::test_util::fixtures` to create one MMTk instance +// per module and reuse the instance in multiple tests. + +// Mock tests can be placed anywhere in the source directory `src` or the test directory `tests`. +// * They need to be conditional compiled when the feature `mock_test` is enabled. Otherwise they cannot access `MockVM`. +// * They should have the prefix 'mock_test_' in their file name so they will be picked up by the CI testing scripts. + +// Common includes for mock tests. +pub(crate) mod mock_test_prelude { + pub use crate::memory_manager; + pub use crate::util::test_util::fixtures::*; + pub use crate::util::test_util::mock_method::*; + pub use crate::util::test_util::mock_vm::*; + pub use crate::vm::*; +} + +mod mock_test_allocate_align_offset; +mod mock_test_allocate_with_disable_collection; +mod mock_test_allocate_with_initialize_collection; +mod mock_test_allocate_with_re_enable_collection; +mod mock_test_allocate_without_initialize_collection; +mod mock_test_allocator_info; +mod mock_test_barrier_slow_path_assertion; +#[cfg(feature = "is_mmtk_object")] +mod mock_test_conservatism; +mod mock_test_edges; +#[cfg(target_os = "linux")] +mod mock_test_handle_mmap_conflict; +mod mock_test_handle_mmap_oom; +mod mock_test_is_in_mmtk_spaces; +mod mock_test_issue139_allocate_non_multiple_of_min_alignment; +mod mock_test_issue867_allocate_unrealistically_large_object; +#[cfg(feature = "malloc_counted_size")] +mod mock_test_malloc_counted; +mod mock_test_malloc_ms; +#[cfg(feature = "nogc_lock_free")] +mod mock_test_nogc_lock_free; +#[cfg(target_pointer_width = "64")] +mod mock_test_vm_layout_compressed_pointer; +mod mock_test_vm_layout_default; +mod mock_test_vm_layout_heap_start; +mod mock_test_vm_layout_log_address_space; + +mod mock_test_doc_avoid_resolving_allocator; +mod mock_test_doc_mutator_storage; diff --git a/src/vm/tests/mod.rs b/src/vm/tests/mod.rs new file mode 100644 index 0000000000..1cb2e0bc9a --- /dev/null +++ b/src/vm/tests/mod.rs @@ -0,0 +1,5 @@ +#[cfg(not(feature = "malloc_counted_size"))] +mod malloc_api; + +#[cfg(feature = "mock_test")] +mod mock_tests; diff --git a/vmbindings/dummyvm/Cargo.toml b/vmbindings/dummyvm/Cargo.toml deleted file mode 100644 index 13def039a5..0000000000 --- a/vmbindings/dummyvm/Cargo.toml +++ /dev/null @@ -1,43 +0,0 @@ -[package] -name = "mmtk_dummyvm" -version = "0.0.1" -authors = [" <>"] -edition = "2021" - -[lib] -name = "mmtk_dummyvm" -# be careful - LTO is only allowed for certain crate types -# We know that cdylib should work for LTO. -# We keep rlib here as we need to use the crate from benches. -crate-type = ["cdylib", "rlib"] - -[profile.release] -lto = true - -[dependencies] -mmtk = { path = "../../", version = "*" } -libc = "0.2" -lazy_static = "1.1" -atomic_refcell = "0.1.7" -atomic = "0.4.6" -log = "0.4" - -[dev-dependencies] -criterion = "0.4" - -[[bench]] -name = "main" -harness = false - -[features] -default = [] -is_mmtk_object = ["mmtk/is_mmtk_object"] -malloc_counted_size = ["mmtk/malloc_counted_size"] -malloc_mark_sweep = ["mmtk/malloc_mark_sweep"] -vo_bit = ["mmtk/vo_bit"] -extreme_assertions = ["mmtk/extreme_assertions"] -nogc_lock_free=["mmtk/nogc_lock_free"] - -# Feature to control which benchmarks to run. See benches/main.rs -bench_sft = [] -bench_alloc = [] diff --git a/vmbindings/dummyvm/api/mmtk.h b/vmbindings/dummyvm/api/mmtk.h deleted file mode 100644 index fcb6e5dd7d..0000000000 --- a/vmbindings/dummyvm/api/mmtk.h +++ /dev/null @@ -1,125 +0,0 @@ -// This is an example of native API for the single instance MMTk. - -// Note: the mmtk core does not directly provide this API. However, it provides -// a similar multi-instance Rust API. A VM binding should write their own C -// header file (possibly based on this example with their own extension and -// modification), and expose the Rust API based on their native API. - -#ifndef MMTK_H -#define MMTK_H - -#include -#include - -#ifdef __cplusplus -extern "C" { -#endif - -typedef void* MMTk_Mutator; - -// Initialize an MMTk instance -extern void mmtk_init(size_t heap_size); - -// Request MMTk to create a new mutator for the given `tls` thread -extern MMTk_Mutator mmtk_bind_mutator(void* tls); - -// Reclaim mutator that is no longer needed -extern void mmtk_destroy_mutator(MMTk_Mutator mutator); - -// Flush mutator local state -extern void mmtk_flush_mutator(MMTk_Mutator mutator); - -// Initialize MMTk scheduler and GC workers -extern void mmtk_initialize_collection(void* tls); - -// Allow MMTk to perform a GC when the heap is full -extern void mmtk_enable_collection(); - -// Disallow MMTk to perform a GC when the heap is full -extern void mmtk_disable_collection(); - -// Allocate memory for an object -extern void* mmtk_alloc(MMTk_Mutator mutator, - size_t size, - size_t align, - size_t offset, - int allocator); - -// Slowpath allocation for an object -extern void* mmtk_alloc_slow(MMTk_Mutator mutator, - size_t size, - size_t align, - size_t offset, - int allocator); - -// Perform post-allocation hooks or actions such as initializing object metadata -extern void mmtk_post_alloc(MMTk_Mutator mutator, - void* refer, - int bytes, - int allocator); - -// Return if the object pointed to by `ref` is live -extern bool mmtk_is_live_object(void* ref); - -// Return if the object pointed to by `ref` is in mapped memory -extern bool mmtk_is_mapped_object(void* ref); - -// Return if the address pointed to by `addr` is in mapped memory -extern bool mmtk_is_mapped_address(void* addr); - -// Return if object pointed to by `object` will never move -extern bool mmtk_will_never_move(void* object); - -// Process an MMTk option. Return true if option was processed successfully -extern bool mmtk_process(char* name, char* value); - -// Process MMTk options. Return true if all options were processed successfully -extern bool mmtk_process_bulk(char* options); - -// Sanity only. Scan heap for discrepancies and errors -extern void mmtk_scan_region(); - -// Request MMTk to trigger a GC. Note that this may not actually trigger a GC -extern void mmtk_handle_user_collection_request(void* tls); - -// Run the main loop for the GC controller thread. Does not return -extern void mmtk_start_control_collector(void* tls, void* worker); - -// Run the main loop for a GC worker. Does not return -extern void mmtk_start_worker(void* tls, void* worker); - -// Return the current amount of free memory in bytes -extern size_t mmtk_free_bytes(); - -// Return the current amount of used memory in bytes -extern size_t mmtk_used_bytes(); - -// Return the current amount of total memory in bytes -extern size_t mmtk_total_bytes(); - -// Return the starting address of MMTk's heap -extern void* mmtk_starting_heap_address(); - -// Return the ending address of MMTk's heap -extern void* mmtk_last_heap_address(); - -// Add a reference to the list of weak references -extern void mmtk_add_weak_candidate(void* ref); - -// Add a reference to the list of soft references -extern void mmtk_add_soft_candidate(void* ref); - -// Add a reference to the list of phantom references -extern void mmtk_add_phantom_candidate(void* ref); - -// Generic hook to allow benchmarks to be harnessed -extern void mmtk_harness_begin(void* tls); - -// Generic hook to allow benchmarks to be harnessed -extern void mmtk_harness_end(); - -#ifdef __cplusplus -} -#endif - -#endif // MMTK_H diff --git a/vmbindings/dummyvm/benches/bench_alloc.rs b/vmbindings/dummyvm/benches/bench_alloc.rs deleted file mode 100644 index 482cfee33f..0000000000 --- a/vmbindings/dummyvm/benches/bench_alloc.rs +++ /dev/null @@ -1,18 +0,0 @@ -use criterion::{criterion_group, Criterion}; - -use mmtk::plan::AllocationSemantics; -use mmtk_dummyvm::api; -use mmtk_dummyvm::test_fixtures::MutatorFixture; - -fn alloc(c: &mut Criterion) { - println!("Init MMTK in alloc bench"); - // 1GB so we are unlikely to OOM - let fixture = MutatorFixture::create_with_heapsize(1 << 30); - c.bench_function("alloc", |b| { - b.iter(|| { - let _addr = api::mmtk_alloc(fixture.mutator, 8, 8, 0, AllocationSemantics::Default); - }) - }); -} - -criterion_group!(benches, alloc); diff --git a/vmbindings/dummyvm/benches/bench_sft.rs b/vmbindings/dummyvm/benches/bench_sft.rs deleted file mode 100644 index 40ccf01406..0000000000 --- a/vmbindings/dummyvm/benches/bench_sft.rs +++ /dev/null @@ -1,20 +0,0 @@ -use criterion::{black_box, criterion_group, Criterion}; - -use mmtk::plan::AllocationSemantics; -use mmtk::vm::ObjectModel; -use mmtk_dummyvm::api; -use mmtk_dummyvm::test_fixtures::FixtureContent; -use mmtk_dummyvm::test_fixtures::MutatorFixture; - -fn sft(c: &mut Criterion) { - println!("Init MMTK in sft bench"); - let fixture = MutatorFixture::create(); - let addr = api::mmtk_alloc(fixture.mutator, 8, 8, 0, AllocationSemantics::Default); - let obj = mmtk_dummyvm::object_model::VMObjectModel::address_to_ref(addr); - - c.bench_function("sft read", |b| { - b.iter(|| api::mmtk_is_in_mmtk_spaces(black_box(obj))) - }); -} - -criterion_group!(benches, sft); diff --git a/vmbindings/dummyvm/benches/main.rs b/vmbindings/dummyvm/benches/main.rs deleted file mode 100644 index 804379ee1b..0000000000 --- a/vmbindings/dummyvm/benches/main.rs +++ /dev/null @@ -1,15 +0,0 @@ -use criterion::criterion_main; - -// As we can only initialize one MMTk instance, we have to run each benchmark separately. -// Filtering like `cargo bench -- sft` won't work, as it still evalutes all the benchmark groups (which initialize MMTk). -// We can use conditional compilation, and run with `cargo bench --features bench_sft`. The features are defined in the dummy VM crate. - -#[cfg(feature = "bench_sft")] -mod bench_sft; -#[cfg(feature = "bench_sft")] -criterion_main!(bench_sft::benches); - -#[cfg(feature = "bench_alloc")] -mod bench_alloc; -#[cfg(feature = "bench_alloc")] -criterion_main!(bench_alloc::benches); diff --git a/vmbindings/dummyvm/src/active_plan.rs b/vmbindings/dummyvm/src/active_plan.rs deleted file mode 100644 index 80144f96f5..0000000000 --- a/vmbindings/dummyvm/src/active_plan.rs +++ /dev/null @@ -1,25 +0,0 @@ -use crate::DummyVM; -use mmtk::util::opaque_pointer::*; -use mmtk::vm::ActivePlan; -use mmtk::Mutator; - -pub struct VMActivePlan {} - -impl ActivePlan for VMActivePlan { - fn number_of_mutators() -> usize { - unimplemented!() - } - - fn is_mutator(_tls: VMThread) -> bool { - // FIXME - true - } - - fn mutator(_tls: VMMutatorThread) -> &'static mut Mutator { - unimplemented!() - } - - fn mutators<'a>() -> Box> + 'a> { - unimplemented!() - } -} diff --git a/vmbindings/dummyvm/src/api.rs b/vmbindings/dummyvm/src/api.rs deleted file mode 100644 index 94e6a96337..0000000000 --- a/vmbindings/dummyvm/src/api.rs +++ /dev/null @@ -1,266 +0,0 @@ -// All functions here are extern function. There is no point for marking them as unsafe. -#![allow(clippy::not_unsafe_ptr_arg_deref)] - -use crate::DummyVM; -use crate::BUILDER; -use crate::SINGLETON; -use libc::c_char; -use mmtk::memory_manager; -use mmtk::scheduler::{GCController, GCWorker}; -use mmtk::util::heap::vm_layout::VMLayout; -use mmtk::util::opaque_pointer::*; -use mmtk::util::{Address, ObjectReference}; -use mmtk::AllocationSemantics; -use mmtk::Mutator; -use std::ffi::CStr; -use std::sync::atomic::Ordering; - -#[no_mangle] -pub fn mmtk_init(heap_size: usize) { - mmtk_init_with_layout(heap_size, None) -} - -#[no_mangle] -pub fn mmtk_init_with_layout(heap_size: usize, layout: Option) { - // set heap size first - { - let mut builder = BUILDER.lock().unwrap(); - if let Some(layout) = layout { - builder.set_vm_layout(layout); - } - let success = - builder - .options - .gc_trigger - .set(mmtk::util::options::GCTriggerSelector::FixedHeapSize( - heap_size, - )); - assert!(success, "Failed to set heap size to {}", heap_size); - } - - // Make sure MMTk has not yet been initialized - assert!(!crate::MMTK_INITIALIZED.load(Ordering::SeqCst)); - // Initialize MMTk here - lazy_static::initialize(&SINGLETON); -} - -#[no_mangle] -pub extern "C" fn mmtk_bind_mutator(tls: VMMutatorThread) -> *mut Mutator { - Box::into_raw(memory_manager::bind_mutator(&SINGLETON, tls)) -} - -#[no_mangle] -pub extern "C" fn mmtk_destroy_mutator(mutator: *mut Mutator) { - // notify mmtk-core about destroyed mutator - memory_manager::destroy_mutator(unsafe { &mut *mutator }); - // turn the ptr back to a box, and let Rust properly reclaim it - let _ = unsafe { Box::from_raw(mutator) }; -} - -#[no_mangle] -pub extern "C" fn mmtk_alloc( - mutator: *mut Mutator, - size: usize, - align: usize, - offset: usize, - mut semantics: AllocationSemantics, -) -> Address { - if size - >= SINGLETON - .get_plan() - .constraints() - .max_non_los_default_alloc_bytes - { - semantics = AllocationSemantics::Los; - } - memory_manager::alloc::(unsafe { &mut *mutator }, size, align, offset, semantics) -} - -#[no_mangle] -pub extern "C" fn mmtk_post_alloc( - mutator: *mut Mutator, - refer: ObjectReference, - bytes: usize, - mut semantics: AllocationSemantics, -) { - if bytes - >= SINGLETON - .get_plan() - .constraints() - .max_non_los_default_alloc_bytes - { - semantics = AllocationSemantics::Los; - } - memory_manager::post_alloc::(unsafe { &mut *mutator }, refer, bytes, semantics) -} - -#[no_mangle] -pub extern "C" fn mmtk_will_never_move(object: ObjectReference) -> bool { - !object.is_movable() -} - -#[no_mangle] -pub extern "C" fn mmtk_start_control_collector( - tls: VMWorkerThread, - controller: &'static mut GCController, -) { - memory_manager::start_control_collector(&SINGLETON, tls, controller); -} - -#[no_mangle] -pub extern "C" fn mmtk_start_worker(tls: VMWorkerThread, worker: &'static mut GCWorker) { - memory_manager::start_worker::(&SINGLETON, tls, worker) -} - -#[no_mangle] -pub extern "C" fn mmtk_initialize_collection(tls: VMThread) { - memory_manager::initialize_collection(&SINGLETON, tls) -} - -#[no_mangle] -pub extern "C" fn mmtk_disable_collection() { - memory_manager::disable_collection(&SINGLETON) -} - -#[no_mangle] -pub extern "C" fn mmtk_enable_collection() { - memory_manager::enable_collection(&SINGLETON) -} - -#[no_mangle] -pub extern "C" fn mmtk_used_bytes() -> usize { - memory_manager::used_bytes(&SINGLETON) -} - -#[no_mangle] -pub extern "C" fn mmtk_free_bytes() -> usize { - memory_manager::free_bytes(&SINGLETON) -} - -#[no_mangle] -pub extern "C" fn mmtk_total_bytes() -> usize { - memory_manager::total_bytes(&SINGLETON) -} - -#[no_mangle] -pub extern "C" fn mmtk_is_live_object(object: ObjectReference) -> bool { - memory_manager::is_live_object(object) -} - -#[cfg(feature = "is_mmtk_object")] -#[no_mangle] -pub extern "C" fn mmtk_is_mmtk_object(addr: Address) -> bool { - memory_manager::is_mmtk_object(addr) -} - -#[no_mangle] -pub extern "C" fn mmtk_is_in_mmtk_spaces(object: ObjectReference) -> bool { - memory_manager::is_in_mmtk_spaces::(object) -} - -#[no_mangle] -pub extern "C" fn mmtk_is_mapped_address(address: Address) -> bool { - memory_manager::is_mapped_address(address) -} - -#[no_mangle] -pub extern "C" fn mmtk_handle_user_collection_request(tls: VMMutatorThread) { - memory_manager::handle_user_collection_request::(&SINGLETON, tls); -} - -#[no_mangle] -pub extern "C" fn mmtk_add_weak_candidate(reff: ObjectReference) { - memory_manager::add_weak_candidate(&SINGLETON, reff) -} - -#[no_mangle] -pub extern "C" fn mmtk_add_soft_candidate(reff: ObjectReference) { - memory_manager::add_soft_candidate(&SINGLETON, reff) -} - -#[no_mangle] -pub extern "C" fn mmtk_add_phantom_candidate(reff: ObjectReference) { - memory_manager::add_phantom_candidate(&SINGLETON, reff) -} - -#[no_mangle] -pub extern "C" fn mmtk_harness_begin(tls: VMMutatorThread) { - memory_manager::harness_begin(&SINGLETON, tls) -} - -#[no_mangle] -pub extern "C" fn mmtk_harness_end() { - memory_manager::harness_end(&SINGLETON) -} - -#[no_mangle] -pub extern "C" fn mmtk_process(name: *const c_char, value: *const c_char) -> bool { - let name_str: &CStr = unsafe { CStr::from_ptr(name) }; - let value_str: &CStr = unsafe { CStr::from_ptr(value) }; - let mut builder = BUILDER.lock().unwrap(); - memory_manager::process( - &mut builder, - name_str.to_str().unwrap(), - value_str.to_str().unwrap(), - ) -} - -#[no_mangle] -pub extern "C" fn mmtk_starting_heap_address() -> Address { - memory_manager::starting_heap_address() -} - -#[no_mangle] -pub extern "C" fn mmtk_last_heap_address() -> Address { - memory_manager::last_heap_address() -} - -#[no_mangle] -#[cfg(feature = "malloc_counted_size")] -pub extern "C" fn mmtk_counted_malloc(size: usize) -> Address { - memory_manager::counted_malloc::(&SINGLETON, size) -} -#[no_mangle] -pub extern "C" fn mmtk_malloc(size: usize) -> Address { - memory_manager::malloc(size) -} - -#[no_mangle] -#[cfg(feature = "malloc_counted_size")] -pub extern "C" fn mmtk_counted_calloc(num: usize, size: usize) -> Address { - memory_manager::counted_calloc::(&SINGLETON, num, size) -} -#[no_mangle] -pub extern "C" fn mmtk_calloc(num: usize, size: usize) -> Address { - memory_manager::calloc(num, size) -} - -#[no_mangle] -#[cfg(feature = "malloc_counted_size")] -pub extern "C" fn mmtk_realloc_with_old_size( - addr: Address, - size: usize, - old_size: usize, -) -> Address { - memory_manager::realloc_with_old_size::(&SINGLETON, addr, size, old_size) -} -#[no_mangle] -pub extern "C" fn mmtk_realloc(addr: Address, size: usize) -> Address { - memory_manager::realloc(addr, size) -} - -#[no_mangle] -#[cfg(feature = "malloc_counted_size")] -pub extern "C" fn mmtk_free_with_size(addr: Address, old_size: usize) { - memory_manager::free_with_size::(&SINGLETON, addr, old_size) -} -#[no_mangle] -pub extern "C" fn mmtk_free(addr: Address) { - memory_manager::free(addr) -} - -#[no_mangle] -#[cfg(feature = "malloc_counted_size")] -pub extern "C" fn mmtk_get_malloc_bytes() -> usize { - memory_manager::get_malloc_bytes(&SINGLETON) -} diff --git a/vmbindings/dummyvm/src/collection.rs b/vmbindings/dummyvm/src/collection.rs deleted file mode 100644 index f82e793fd3..0000000000 --- a/vmbindings/dummyvm/src/collection.rs +++ /dev/null @@ -1,26 +0,0 @@ -use crate::DummyVM; -use mmtk::util::opaque_pointer::*; -use mmtk::vm::Collection; -use mmtk::vm::GCThreadContext; -use mmtk::Mutator; - -pub struct VMCollection {} - -impl Collection for VMCollection { - fn stop_all_mutators(_tls: VMWorkerThread, _mutator_visitor: F) - where - F: FnMut(&'static mut Mutator), - { - unimplemented!() - } - - fn resume_mutators(_tls: VMWorkerThread) { - unimplemented!() - } - - fn block_for_gc(_tls: VMMutatorThread) { - panic!("block_for_gc is not implemented") - } - - fn spawn_gc_thread(_tls: VMThread, _ctx: GCThreadContext) {} -} diff --git a/vmbindings/dummyvm/src/edges.rs b/vmbindings/dummyvm/src/edges.rs deleted file mode 100644 index bd4f6388b4..0000000000 --- a/vmbindings/dummyvm/src/edges.rs +++ /dev/null @@ -1,231 +0,0 @@ -use atomic::Atomic; -use mmtk::util::constants::LOG_BYTES_IN_ADDRESS; -use mmtk::{ - util::{Address, ObjectReference}, - vm::edge_shape::{Edge, MemorySlice, SimpleEdge}, -}; - -/// If a VM supports multiple kinds of edges, we can use tagged union to represent all of them. -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] -pub enum DummyVMEdge { - Simple(SimpleEdge), - #[cfg(target_pointer_width = "64")] - Compressed(only_64_bit::CompressedOopEdge), - Offset(OffsetEdge), - Tagged(TaggedEdge), -} - -unsafe impl Send for DummyVMEdge {} - -impl Edge for DummyVMEdge { - fn load(&self) -> ObjectReference { - match self { - DummyVMEdge::Simple(e) => e.load(), - #[cfg(target_pointer_width = "64")] - DummyVMEdge::Compressed(e) => e.load(), - DummyVMEdge::Offset(e) => e.load(), - DummyVMEdge::Tagged(e) => e.load(), - } - } - - fn store(&self, object: ObjectReference) { - match self { - DummyVMEdge::Simple(e) => e.store(object), - #[cfg(target_pointer_width = "64")] - DummyVMEdge::Compressed(e) => e.store(object), - DummyVMEdge::Offset(e) => e.store(object), - DummyVMEdge::Tagged(e) => e.store(object), - } - } -} - -#[derive(Clone, Debug, PartialEq, Eq, Hash)] -pub struct DummyVMMemorySlice(*mut [ObjectReference]); - -unsafe impl Send for DummyVMMemorySlice {} - -impl MemorySlice for DummyVMMemorySlice { - type Edge = DummyVMEdge; - type EdgeIterator = DummyVMMemorySliceIterator; - - fn iter_edges(&self) -> Self::EdgeIterator { - DummyVMMemorySliceIterator { - cursor: unsafe { (*self.0).as_mut_ptr_range().start }, - limit: unsafe { (*self.0).as_mut_ptr_range().end }, - } - } - - fn object(&self) -> Option { - None - } - - fn start(&self) -> Address { - Address::from_ptr(unsafe { (*self.0).as_ptr_range().start }) - } - - fn bytes(&self) -> usize { - unsafe { std::mem::size_of_val(&*self.0) } - } - - fn copy(src: &Self, tgt: &Self) { - debug_assert_eq!(src.bytes(), tgt.bytes()); - debug_assert_eq!( - src.bytes() & ((1 << LOG_BYTES_IN_ADDRESS) - 1), - 0, - "bytes are not a multiple of words" - ); - // Raw memory copy - unsafe { - let words = tgt.bytes() >> LOG_BYTES_IN_ADDRESS; - let src = src.start().to_ptr::(); - let tgt = tgt.start().to_mut_ptr::(); - std::ptr::copy(src, tgt, words) - } - } -} - -pub struct DummyVMMemorySliceIterator { - cursor: *mut ObjectReference, - limit: *mut ObjectReference, -} - -impl Iterator for DummyVMMemorySliceIterator { - type Item = DummyVMEdge; - - fn next(&mut self) -> Option { - if self.cursor >= self.limit { - None - } else { - let edge = self.cursor; - self.cursor = unsafe { self.cursor.add(1) }; - Some(DummyVMEdge::Simple(SimpleEdge::from_address( - Address::from_ptr(edge), - ))) - } - } -} - -/// Compressed OOP edge only makes sense on 64-bit architectures. -#[cfg(target_pointer_width = "64")] -pub mod only_64_bit { - use super::*; - - /// This represents a location that holds a 32-bit pointer on a 64-bit machine. - /// - /// OpenJDK uses this kind of edge to store compressed OOPs on 64-bit machines. - #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] - pub struct CompressedOopEdge { - slot_addr: *mut Atomic, - } - - unsafe impl Send for CompressedOopEdge {} - - impl CompressedOopEdge { - pub fn from_address(address: Address) -> Self { - Self { - slot_addr: address.to_mut_ptr(), - } - } - pub fn as_address(&self) -> Address { - Address::from_mut_ptr(self.slot_addr) - } - } - - impl Edge for CompressedOopEdge { - fn load(&self) -> ObjectReference { - let compressed = unsafe { (*self.slot_addr).load(atomic::Ordering::Relaxed) }; - let expanded = (compressed as usize) << 3; - ObjectReference::from_raw_address(unsafe { Address::from_usize(expanded) }) - } - - fn store(&self, object: ObjectReference) { - let expanded = object.to_raw_address().as_usize(); - let compressed = (expanded >> 3) as u32; - unsafe { (*self.slot_addr).store(compressed, atomic::Ordering::Relaxed) } - } - } -} - -/// This represents an edge that holds a pointer to the *middle* of an object, and the offset is known. -/// -/// Julia uses this trick to facilitate deleting array elements from the front. -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] -pub struct OffsetEdge { - slot_addr: *mut Atomic
, - offset: usize, -} - -unsafe impl Send for OffsetEdge {} - -impl OffsetEdge { - pub fn new_no_offset(address: Address) -> Self { - Self { - slot_addr: address.to_mut_ptr(), - offset: 0, - } - } - - pub fn new_with_offset(address: Address, offset: usize) -> Self { - Self { - slot_addr: address.to_mut_ptr(), - offset, - } - } - - pub fn slot_address(&self) -> Address { - Address::from_mut_ptr(self.slot_addr) - } - - pub fn offset(&self) -> usize { - self.offset - } -} - -impl Edge for OffsetEdge { - fn load(&self) -> ObjectReference { - let middle = unsafe { (*self.slot_addr).load(atomic::Ordering::Relaxed) }; - let begin = middle - self.offset; - ObjectReference::from_raw_address(begin) - } - - fn store(&self, object: ObjectReference) { - let begin = object.to_raw_address(); - let middle = begin + self.offset; - unsafe { (*self.slot_addr).store(middle, atomic::Ordering::Relaxed) } - } -} - -/// This edge presents the object reference itself to mmtk-core. -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] -pub struct TaggedEdge { - slot_addr: *mut Atomic, -} - -unsafe impl Send for TaggedEdge {} - -impl TaggedEdge { - // The DummyVM has OBJECT_REF_OFFSET = 4. - // Using a two-bit tag should be safe on both 32-bit and 64-bit platforms. - const TAG_BITS_MASK: usize = 0b11; - - pub fn new(address: Address) -> Self { - Self { - slot_addr: address.to_mut_ptr(), - } - } -} - -impl Edge for TaggedEdge { - fn load(&self) -> ObjectReference { - let tagged = unsafe { (*self.slot_addr).load(atomic::Ordering::Relaxed) }; - let untagged = tagged & !Self::TAG_BITS_MASK; - ObjectReference::from_raw_address(unsafe { Address::from_usize(untagged) }) - } - - fn store(&self, object: ObjectReference) { - let old_tagged = unsafe { (*self.slot_addr).load(atomic::Ordering::Relaxed) }; - let new_untagged = object.to_raw_address().as_usize(); - let new_tagged = new_untagged | (old_tagged & Self::TAG_BITS_MASK); - unsafe { (*self.slot_addr).store(new_tagged, atomic::Ordering::Relaxed) } - } -} diff --git a/vmbindings/dummyvm/src/lib.rs b/vmbindings/dummyvm/src/lib.rs deleted file mode 100644 index 57e630f222..0000000000 --- a/vmbindings/dummyvm/src/lib.rs +++ /dev/null @@ -1,54 +0,0 @@ -extern crate libc; -extern crate mmtk; -#[macro_use] -extern crate lazy_static; - -use mmtk::vm::VMBinding; -use mmtk::MMTKBuilder; -use mmtk::MMTK; - -pub mod active_plan; -pub mod api; -pub mod collection; -pub mod object_model; -pub mod reference_glue; -pub mod scanning; - -pub mod test_fixtures; - -mod edges; -#[cfg(test)] -mod tests; - -#[derive(Default)] -pub struct DummyVM; - -impl VMBinding for DummyVM { - type VMObjectModel = object_model::VMObjectModel; - type VMScanning = scanning::VMScanning; - type VMCollection = collection::VMCollection; - type VMActivePlan = active_plan::VMActivePlan; - type VMReferenceGlue = reference_glue::VMReferenceGlue; - type VMEdge = edges::DummyVMEdge; - type VMMemorySlice = edges::DummyVMMemorySlice; - - /// Allowed maximum alignment in bytes. - const MAX_ALIGNMENT: usize = 1 << 6; -} - -use std::sync::atomic::{AtomicBool, Ordering}; -use std::sync::Mutex; - -/// This is used to ensure we initialize MMTk at a specified timing. -pub static MMTK_INITIALIZED: AtomicBool = AtomicBool::new(false); - -lazy_static! { - pub static ref BUILDER: Mutex = Mutex::new(MMTKBuilder::new()); - pub static ref SINGLETON: MMTK = { - let builder = BUILDER.lock().unwrap(); - debug_assert!(!MMTK_INITIALIZED.load(Ordering::SeqCst)); - let ret = mmtk::memory_manager::mmtk_init(&builder); - MMTK_INITIALIZED.store(true, std::sync::atomic::Ordering::Relaxed); - *ret - }; -} diff --git a/vmbindings/dummyvm/src/object_model.rs b/vmbindings/dummyvm/src/object_model.rs deleted file mode 100644 index c666b1e7b1..0000000000 --- a/vmbindings/dummyvm/src/object_model.rs +++ /dev/null @@ -1,80 +0,0 @@ -use crate::DummyVM; -use mmtk::util::copy::{CopySemantics, GCWorkerCopyContext}; -use mmtk::util::{Address, ObjectReference}; -use mmtk::vm::*; - -pub struct VMObjectModel {} - -// This is intentionally set to a non-zero value to see if it breaks. -// Change this if you want to test other values. -pub const OBJECT_REF_OFFSET: usize = 4; - -impl ObjectModel for VMObjectModel { - const GLOBAL_LOG_BIT_SPEC: VMGlobalLogBitSpec = VMGlobalLogBitSpec::in_header(0); - const LOCAL_FORWARDING_POINTER_SPEC: VMLocalForwardingPointerSpec = - VMLocalForwardingPointerSpec::in_header(0); - const LOCAL_FORWARDING_BITS_SPEC: VMLocalForwardingBitsSpec = - VMLocalForwardingBitsSpec::in_header(0); - const LOCAL_MARK_BIT_SPEC: VMLocalMarkBitSpec = VMLocalMarkBitSpec::in_header(0); - const LOCAL_LOS_MARK_NURSERY_SPEC: VMLocalLOSMarkNurserySpec = - VMLocalLOSMarkNurserySpec::in_header(0); - - const OBJECT_REF_OFFSET_LOWER_BOUND: isize = OBJECT_REF_OFFSET as isize; - - fn copy( - _from: ObjectReference, - _semantics: CopySemantics, - _copy_context: &mut GCWorkerCopyContext, - ) -> ObjectReference { - unimplemented!() - } - - fn copy_to(_from: ObjectReference, _to: ObjectReference, _region: Address) -> Address { - unimplemented!() - } - - fn get_current_size(_object: ObjectReference) -> usize { - unimplemented!() - } - - fn get_size_when_copied(object: ObjectReference) -> usize { - Self::get_current_size(object) - } - - fn get_align_when_copied(_object: ObjectReference) -> usize { - ::std::mem::size_of::() - } - - fn get_align_offset_when_copied(_object: ObjectReference) -> usize { - 0 - } - - fn get_reference_when_copied_to(_from: ObjectReference, _to: Address) -> ObjectReference { - unimplemented!() - } - - fn get_type_descriptor(_reference: ObjectReference) -> &'static [i8] { - unimplemented!() - } - - fn ref_to_object_start(object: ObjectReference) -> Address { - object.to_raw_address().sub(OBJECT_REF_OFFSET) - } - - fn ref_to_header(object: ObjectReference) -> Address { - object.to_raw_address() - } - - fn ref_to_address(object: ObjectReference) -> Address { - // Just use object start. - Self::ref_to_object_start(object) - } - - fn address_to_ref(addr: Address) -> ObjectReference { - ObjectReference::from_raw_address(addr.add(OBJECT_REF_OFFSET)) - } - - fn dump_object(_object: ObjectReference) { - unimplemented!() - } -} diff --git a/vmbindings/dummyvm/src/reference_glue.rs b/vmbindings/dummyvm/src/reference_glue.rs deleted file mode 100644 index 66e09c5e5a..0000000000 --- a/vmbindings/dummyvm/src/reference_glue.rs +++ /dev/null @@ -1,20 +0,0 @@ -use crate::DummyVM; -use mmtk::util::opaque_pointer::VMWorkerThread; -use mmtk::util::ObjectReference; -use mmtk::vm::ReferenceGlue; - -pub struct VMReferenceGlue {} - -impl ReferenceGlue for VMReferenceGlue { - type FinalizableType = ObjectReference; - - fn set_referent(_reference: ObjectReference, _referent: ObjectReference) { - unimplemented!() - } - fn get_referent(_object: ObjectReference) -> ObjectReference { - unimplemented!() - } - fn enqueue_references(_references: &[ObjectReference], _tls: VMWorkerThread) { - unimplemented!() - } -} diff --git a/vmbindings/dummyvm/src/scanning.rs b/vmbindings/dummyvm/src/scanning.rs deleted file mode 100644 index 960f9d642b..0000000000 --- a/vmbindings/dummyvm/src/scanning.rs +++ /dev/null @@ -1,39 +0,0 @@ -use crate::edges::DummyVMEdge; -use crate::DummyVM; -use mmtk::util::opaque_pointer::*; -use mmtk::util::ObjectReference; -use mmtk::vm::EdgeVisitor; -use mmtk::vm::RootsWorkFactory; -use mmtk::vm::Scanning; -use mmtk::Mutator; - -pub struct VMScanning {} - -impl Scanning for VMScanning { - fn scan_roots_in_mutator_thread( - _tls: VMWorkerThread, - _mutator: &'static mut Mutator, - _factory: impl RootsWorkFactory, - ) { - unimplemented!() - } - fn scan_vm_specific_roots(_tls: VMWorkerThread, _factory: impl RootsWorkFactory) { - unimplemented!() - } - fn scan_object>( - _tls: VMWorkerThread, - _object: ObjectReference, - _edge_visitor: &mut EV, - ) { - unimplemented!() - } - fn notify_initial_thread_scan_complete(_partial_scan: bool, _tls: VMWorkerThread) { - unimplemented!() - } - fn supports_return_barrier() -> bool { - unimplemented!() - } - fn prepare_for_roots_re_scanning() { - unimplemented!() - } -} diff --git a/vmbindings/dummyvm/src/test_fixtures.rs b/vmbindings/dummyvm/src/test_fixtures.rs deleted file mode 100644 index 4e400bc6d2..0000000000 --- a/vmbindings/dummyvm/src/test_fixtures.rs +++ /dev/null @@ -1,237 +0,0 @@ -// Some tests are conditionally compiled. So not all the code in this module will be used. We simply allow dead code in this module. -#![allow(dead_code)] - -use atomic_refcell::AtomicRefCell; -use std::sync::Mutex; -use std::sync::Once; - -use mmtk::util::{ObjectReference, VMMutatorThread, VMThread}; -use mmtk::AllocationSemantics; -use mmtk::MMTK; - -use crate::api::*; -use crate::object_model::OBJECT_REF_OFFSET; -use crate::DummyVM; - -pub trait FixtureContent { - fn create() -> Self; -} - -pub struct Fixture { - content: AtomicRefCell>>, - once: Once, -} - -unsafe impl Sync for Fixture {} - -impl Fixture { - pub fn new() -> Self { - Self { - content: AtomicRefCell::new(None), - once: Once::new(), - } - } - - pub fn with_fixture(&self, func: F) { - self.once.call_once(|| { - let content = Box::new(T::create()); - let mut borrow = self.content.borrow_mut(); - *borrow = Some(content); - }); - let borrow = self.content.borrow(); - func(borrow.as_ref().unwrap()) - } -} - -impl Default for Fixture { - fn default() -> Self { - Self::new() - } -} - -/// SerialFixture ensures all `with_fixture()` calls will be executed serially. -pub struct SerialFixture { - content: Mutex>>, -} - -impl SerialFixture { - pub fn new() -> Self { - Self { - content: Mutex::new(None), - } - } - - pub fn with_fixture(&self, func: F) { - let mut c = self.content.lock().unwrap(); - if c.is_none() { - *c = Some(Box::new(T::create())); - } - func(c.as_ref().unwrap()) - } - - pub fn with_fixture_expect_benign_panic< - F: Fn(&T) + std::panic::UnwindSafe + std::panic::RefUnwindSafe, - >( - &self, - func: F, - ) { - let res = { - // The lock will be dropped at the end of the block. So panic won't poison the lock. - let mut c = self.content.lock().unwrap(); - if c.is_none() { - *c = Some(Box::new(T::create())); - } - - std::panic::catch_unwind(|| func(c.as_ref().unwrap())) - }; - // We do not hold the lock now. It is safe to resume now. - if let Err(e) = res { - std::panic::resume_unwind(e); - } - } -} - -impl Default for SerialFixture { - fn default() -> Self { - Self::new() - } -} - -pub struct SingleObject { - pub objref: ObjectReference, -} - -impl FixtureContent for SingleObject { - fn create() -> Self { - const MB: usize = 1024 * 1024; - // 1MB heap - mmtk_init(MB); - mmtk_initialize_collection(VMThread::UNINITIALIZED); - // Make sure GC does not run during test. - mmtk_disable_collection(); - let handle = mmtk_bind_mutator(VMMutatorThread(VMThread::UNINITIALIZED)); - - // A relatively small object, typical for Ruby. - let size = 40; - let semantics = AllocationSemantics::Default; - - let addr = mmtk_alloc(handle, size, 8, 0, semantics); - assert!(!addr.is_zero()); - - let objref = ObjectReference::from_raw_address(addr.add(OBJECT_REF_OFFSET)); - mmtk_post_alloc(handle, objref, size, semantics); - - SingleObject { objref } - } -} - -pub struct MMTKSingleton { - pub mmtk: &'static MMTK, -} - -impl FixtureContent for MMTKSingleton { - fn create() -> Self { - const MB: usize = 1024 * 1024; - // 1MB heap - mmtk_init(MB); - mmtk_initialize_collection(VMThread::UNINITIALIZED); - - MMTKSingleton { - mmtk: &crate::SINGLETON, - } - } -} - -pub struct TwoObjects { - pub objref1: ObjectReference, - pub objref2: ObjectReference, -} - -impl FixtureContent for TwoObjects { - fn create() -> Self { - const MB: usize = 1024 * 1024; - // 1MB heap - mmtk_init(MB); - mmtk_initialize_collection(VMThread::UNINITIALIZED); - // Make sure GC does not run during test. - mmtk_disable_collection(); - let handle = mmtk_bind_mutator(VMMutatorThread(VMThread::UNINITIALIZED)); - - let size = 128; - let semantics = AllocationSemantics::Default; - - let addr = mmtk_alloc(handle, size, 8, 0, semantics); - assert!(!addr.is_zero()); - - let objref1 = ObjectReference::from_raw_address(addr.add(OBJECT_REF_OFFSET)); - mmtk_post_alloc(handle, objref1, size, semantics); - - let objref2 = ObjectReference::from_raw_address(addr.add(OBJECT_REF_OFFSET)); - mmtk_post_alloc(handle, objref2, size, semantics); - - TwoObjects { objref1, objref2 } - } -} - -use mmtk::plan::Mutator; - -pub struct MutatorFixture { - pub mmtk: &'static MMTK, - pub mutator: *mut Mutator, -} - -impl FixtureContent for MutatorFixture { - fn create() -> Self { - const MB: usize = 1024 * 1024; - Self::create_with_heapsize(MB) - } -} - -impl MutatorFixture { - pub fn create_with_heapsize(size: usize) -> Self { - mmtk_init(size); - mmtk_initialize_collection(VMThread::UNINITIALIZED); - // Make sure GC does not run during test. - mmtk_disable_collection(); - let handle = mmtk_bind_mutator(VMMutatorThread(VMThread::UNINITIALIZED)); - - MutatorFixture { - mmtk: &crate::SINGLETON, - mutator: handle, - } - } -} - -unsafe impl Send for MutatorFixture {} - -use mmtk::util::heap::vm_layout::VMLayout; - -pub struct VMLayoutFixture { - pub mmtk: &'static MMTK, - pub mutator: *mut Mutator, -} - -impl VMLayoutFixture { - pub fn create_with_layout(layout: Option) -> Self { - const MB: usize = 1024 * 1024; - // 1MB heap - mmtk_init_with_layout(MB, layout); - mmtk_initialize_collection(VMThread::UNINITIALIZED); - // Make sure GC does not run during test. - mmtk_disable_collection(); - let handle = mmtk_bind_mutator(VMMutatorThread(VMThread::UNINITIALIZED)); - - VMLayoutFixture { - mmtk: &crate::SINGLETON, - mutator: handle, - } - } -} - -impl FixtureContent for VMLayoutFixture { - fn create() -> Self { - Self::create_with_layout(None::) - } -} - -unsafe impl Send for VMLayoutFixture {} diff --git a/vmbindings/dummyvm/src/tests/allocate_align_offset.rs b/vmbindings/dummyvm/src/tests/allocate_align_offset.rs deleted file mode 100644 index ed39c5ce20..0000000000 --- a/vmbindings/dummyvm/src/tests/allocate_align_offset.rs +++ /dev/null @@ -1,64 +0,0 @@ -// GITHUB-CI: MMTK_PLAN=all - -use crate::api; -use crate::test_fixtures::{MutatorFixture, SerialFixture}; -use crate::DummyVM; -use log::info; -use mmtk::plan::AllocationSemantics; -use mmtk::vm::VMBinding; - -lazy_static! { - static ref MUTATOR: SerialFixture = SerialFixture::new(); -} - -#[test] -pub fn allocate_alignment() { - MUTATOR.with_fixture(|fixture| { - let min = DummyVM::MIN_ALIGNMENT; - let max = DummyVM::MAX_ALIGNMENT; - info!("Allowed alignment between {} and {}", min, max); - let mut align = min; - while align <= max { - info!("Test allocation with alignment {}", align); - let addr = api::mmtk_alloc(fixture.mutator, 8, align, 0, AllocationSemantics::Default); - assert!( - addr.is_aligned_to(align), - "Expected allocation alignment {}, returned address is {:?}", - align, - addr - ); - align *= 2; - } - }) -} - -#[test] -pub fn allocate_offset() { - MUTATOR.with_fixture(|fixture| { - const OFFSET: usize = 4; - let min = DummyVM::MIN_ALIGNMENT; - let max = DummyVM::MAX_ALIGNMENT; - info!("Allowed alignment between {} and {}", min, max); - let mut align = min; - while align <= max { - info!( - "Test allocation with alignment {} and offset {}", - align, OFFSET - ); - let addr = api::mmtk_alloc( - fixture.mutator, - 8, - align, - OFFSET, - AllocationSemantics::Default, - ); - assert!( - (addr + OFFSET).is_aligned_to(align), - "Expected allocation alignment {}, returned address is {:?}", - align, - addr - ); - align *= 2; - } - }) -} diff --git a/vmbindings/dummyvm/src/tests/allocate_with_disable_collection.rs b/vmbindings/dummyvm/src/tests/allocate_with_disable_collection.rs deleted file mode 100644 index 37094ff370..0000000000 --- a/vmbindings/dummyvm/src/tests/allocate_with_disable_collection.rs +++ /dev/null @@ -1,22 +0,0 @@ -use crate::api::*; -use mmtk::util::opaque_pointer::*; -use mmtk::AllocationSemantics; - -/// This test allocates after calling disable_collection(). When we exceed the heap limit, MMTk will NOT trigger a GC. -/// And the allocation will succeed. -#[test] -pub fn allocate_with_disable_collection() { - const MB: usize = 1024 * 1024; - // 1MB heap - mmtk_init(MB); - mmtk_initialize_collection(VMThread::UNINITIALIZED); - let handle = mmtk_bind_mutator(VMMutatorThread(VMThread::UNINITIALIZED)); - // Allocate 1MB. It should be fine. - let addr = mmtk_alloc(handle, MB, 8, 0, AllocationSemantics::Default); - assert!(!addr.is_zero()); - // Disable GC - mmtk_disable_collection(); - // Allocate another MB. This exceeds the heap size. But as we have disabled GC, MMTk will not trigger a GC, and allow this allocation. - let addr = mmtk_alloc(handle, MB, 8, 0, AllocationSemantics::Default); - assert!(!addr.is_zero()); -} diff --git a/vmbindings/dummyvm/src/tests/allocate_with_initialize_collection.rs b/vmbindings/dummyvm/src/tests/allocate_with_initialize_collection.rs deleted file mode 100644 index e69daff813..0000000000 --- a/vmbindings/dummyvm/src/tests/allocate_with_initialize_collection.rs +++ /dev/null @@ -1,20 +0,0 @@ -use crate::api::*; -use mmtk::util::opaque_pointer::*; -use mmtk::AllocationSemantics; - -/// This test allocates after calling initialize_collection(). When we exceed the heap limit, MMTk will trigger a GC. And block_for_gc will be called. -/// We havent implemented block_for_gc so it will panic. -#[test] -#[should_panic(expected = "block_for_gc is not implemented")] -pub fn allocate_with_initialize_collection() { - const MB: usize = 1024 * 1024; - // 1MB heap - mmtk_init(MB); - mmtk_initialize_collection(VMThread::UNINITIALIZED); - let handle = mmtk_bind_mutator(VMMutatorThread(VMThread::UNINITIALIZED)); - // Fill up the heap - let _ = mmtk_alloc(handle, MB, 8, 0, AllocationSemantics::Default); - // Attempt another allocation. This will trigger GC. - let addr = mmtk_alloc(handle, MB, 8, 0, AllocationSemantics::Default); - assert!(!addr.is_zero()); -} diff --git a/vmbindings/dummyvm/src/tests/allocate_with_re_enable_collection.rs b/vmbindings/dummyvm/src/tests/allocate_with_re_enable_collection.rs deleted file mode 100644 index cf49532577..0000000000 --- a/vmbindings/dummyvm/src/tests/allocate_with_re_enable_collection.rs +++ /dev/null @@ -1,26 +0,0 @@ -use crate::api::*; -use mmtk::util::opaque_pointer::*; -use mmtk::AllocationSemantics; - -/// This test allocates after calling initialize_collection(). When we exceed the heap limit, MMTk will trigger a GC. And block_for_gc will be called. -/// We havent implemented block_for_gc so it will panic. This test is similar to allocate_with_initialize_collection, except that we once disabled GC in the test. -#[test] -#[should_panic(expected = "block_for_gc is not implemented")] -pub fn allocate_with_re_enable_collection() { - const MB: usize = 1024 * 1024; - // 1MB heap - mmtk_init(MB); - mmtk_initialize_collection(VMThread::UNINITIALIZED); - let handle = mmtk_bind_mutator(VMMutatorThread(VMThread::UNINITIALIZED)); - // Allocate 1MB. It should be fine. - let addr = mmtk_alloc(handle, MB, 8, 0, AllocationSemantics::Default); - assert!(!addr.is_zero()); - // Disable GC. So we can keep allocate without triggering a GC. - mmtk_disable_collection(); - let addr = mmtk_alloc(handle, MB, 8, 0, AllocationSemantics::Default); - assert!(!addr.is_zero()); - // Enable GC again. When we allocate, we should see a GC triggered immediately. - mmtk_enable_collection(); - let addr = mmtk_alloc(handle, MB, 8, 0, AllocationSemantics::Default); - assert!(!addr.is_zero()); -} diff --git a/vmbindings/dummyvm/src/tests/allocate_without_initialize_collection.rs b/vmbindings/dummyvm/src/tests/allocate_without_initialize_collection.rs deleted file mode 100644 index 8d6b31e2e3..0000000000 --- a/vmbindings/dummyvm/src/tests/allocate_without_initialize_collection.rs +++ /dev/null @@ -1,19 +0,0 @@ -use crate::api::*; -use mmtk::util::opaque_pointer::*; -use mmtk::AllocationSemantics; - -/// This test allocates without calling initialize_collection(). When we exceed the heap limit, a GC should be triggered by MMTk. -/// But as we haven't enabled collection, GC is not initialized, so MMTk will panic. -#[test] -#[should_panic(expected = "GC is not allowed here")] -pub fn allocate_without_initialize_collection() { - const MB: usize = 1024 * 1024; - // 1MB heap - mmtk_init(MB); - let handle = mmtk_bind_mutator(VMMutatorThread(VMThread::UNINITIALIZED)); - // Fill up the heap. - let _ = mmtk_alloc(handle, MB, 8, 0, AllocationSemantics::Default); - // Attempt another memory. This should trigger a GC, but as we never call initialize_collection(), we cannot do GC. - let addr = mmtk_alloc(handle, MB, 8, 0, AllocationSemantics::Default); - assert!(!addr.is_zero()); -} diff --git a/vmbindings/dummyvm/src/tests/allocator_info.rs b/vmbindings/dummyvm/src/tests/allocator_info.rs deleted file mode 100644 index 4b59434314..0000000000 --- a/vmbindings/dummyvm/src/tests/allocator_info.rs +++ /dev/null @@ -1,58 +0,0 @@ -// GITHUB-CI: MMTK_PLAN=all - -use mmtk::util::alloc::AllocatorInfo; -use mmtk::util::options::PlanSelector; -use mmtk::AllocationSemantics; - -use crate::test_fixtures::{Fixture, MMTKSingleton}; -use crate::DummyVM; - -lazy_static! { - static ref MMTK_SINGLETON: Fixture = Fixture::new(); -} - -#[test] -fn test_allocator_info() { - MMTK_SINGLETON.with_fixture(|fixture| { - let selector = mmtk::memory_manager::get_allocator_mapping( - &fixture.mmtk, - AllocationSemantics::Default, - ); - let base_offset = mmtk::plan::Mutator::::get_allocator_base_offset(selector); - let allocator_info = AllocatorInfo::new::(selector); - - match *fixture.mmtk.get_options().plan { - PlanSelector::NoGC - | PlanSelector::Immix - | PlanSelector::SemiSpace - | PlanSelector::GenCopy - | PlanSelector::GenImmix - | PlanSelector::MarkCompact - | PlanSelector::StickyImmix => { - // These plans all use bump pointer allocator. - let AllocatorInfo::BumpPointer { - bump_pointer_offset, - } = allocator_info - else { - panic!("Expected AllocatorInfo for a bump pointer allocator"); - }; - // In all of those plans, the first field at base offset is tls, and the second field is the BumpPointer struct. - assert_eq!( - base_offset + mmtk::util::constants::BYTES_IN_ADDRESS, - bump_pointer_offset - ); - } - PlanSelector::MarkSweep => { - if cfg!(feature = "malloc_mark_sweep") { - // We provide no info for a malloc allocator - assert!(matches!(allocator_info, AllocatorInfo::None)) - } else { - // We haven't implemented for a free list allocator - assert!(matches!(allocator_info, AllocatorInfo::Unimplemented)) - } - } - // We provide no info for a large object allocator - PlanSelector::PageProtect => assert!(matches!(allocator_info, AllocatorInfo::None)), - } - }) -} diff --git a/vmbindings/dummyvm/src/tests/barrier_slow_path_assertion.rs b/vmbindings/dummyvm/src/tests/barrier_slow_path_assertion.rs deleted file mode 100644 index 063c053dcd..0000000000 --- a/vmbindings/dummyvm/src/tests/barrier_slow_path_assertion.rs +++ /dev/null @@ -1,64 +0,0 @@ -// GITHUB-CI: MMTK_PLAN=GenImmix -// GITHUB-CI: FEATURES=vo_bit,extreme_assertions - -// Run the test with any plan that uses object barrier, and we also need both VO bit and extreme assertions. - -use crate::object_model::OBJECT_REF_OFFSET; -use crate::test_fixtures::FixtureContent; -use crate::test_fixtures::MMTKSingleton; -use crate::{api::*, edges}; -use atomic::Atomic; -use mmtk::util::{Address, ObjectReference}; -use mmtk::util::{VMMutatorThread, VMThread}; -use mmtk::vm::edge_shape::SimpleEdge; -use mmtk::AllocationSemantics; - -lazy_static! { - static ref MMTK_SINGLETON: MMTKSingleton = MMTKSingleton::create(); -} - -#[test] -#[should_panic(expected = "object bit is unset")] -fn test_assertion_barrier_invalid_ref() { - let mutator = mmtk_bind_mutator(VMMutatorThread(VMThread::UNINITIALIZED)); - - // Allocate - let size = 24; - let addr = mmtk_alloc(mutator, size, 8, 0, AllocationSemantics::Default); - let objref: ObjectReference = ObjectReference::from_raw_address(addr.add(OBJECT_REF_OFFSET)); - mmtk_post_alloc(mutator, objref, size, AllocationSemantics::Default); - // Create an edge - let mut slot: Atomic = Atomic::new(objref); - let edge = SimpleEdge::from_address(Address::from_ref(&mut slot)); - // Create an invalid object reference (offset 8 bytes on the original object ref), and invoke barrier slowpath with it - // The invalid object ref has no VO bit, and the assertion should fail. - let invalid_objref = ObjectReference::from_raw_address(objref.to_raw_address() + 8usize); - unsafe { - let mu = &mut *mutator; - mu.barrier.object_reference_write_slow( - invalid_objref, - edges::DummyVMEdge::Simple(edge), - objref, - ); - } -} - -#[test] -fn test_assertion_barrier_valid_ref() { - let mutator = mmtk_bind_mutator(VMMutatorThread(VMThread::UNINITIALIZED)); - - // Allocate - let size = 24; - let addr = mmtk_alloc(mutator, size, 8, 0, AllocationSemantics::Default); - let objref: ObjectReference = ObjectReference::from_raw_address(addr.add(OBJECT_REF_OFFSET)); - mmtk_post_alloc(mutator, objref, size, AllocationSemantics::Default); - // Create an edge - let mut slot: Atomic = Atomic::new(objref); - let edge = SimpleEdge::from_address(Address::from_ref(&mut slot)); - // Invoke barrier slowpath with the valid object ref - unsafe { - let mu = &mut *mutator; - mu.barrier - .object_reference_write_slow(objref, edges::DummyVMEdge::Simple(edge), objref); - } -} diff --git a/vmbindings/dummyvm/src/tests/conservatism.rs b/vmbindings/dummyvm/src/tests/conservatism.rs deleted file mode 100644 index 6262769888..0000000000 --- a/vmbindings/dummyvm/src/tests/conservatism.rs +++ /dev/null @@ -1,167 +0,0 @@ -// GITHUB-CI: MMTK_PLAN=all -// GITHUB-CI: FEATURES=is_mmtk_object - -use crate::api::*; -use crate::object_model::OBJECT_REF_OFFSET; -use crate::test_fixtures::{Fixture, SingleObject}; -use mmtk::util::constants::LOG_BITS_IN_WORD; -use mmtk::util::is_mmtk_object::VO_BIT_REGION_SIZE; -use mmtk::util::*; - -lazy_static! { - static ref SINGLE_OBJECT: Fixture = Fixture::new(); -} - -fn basic_filter(addr: Address) -> bool { - !addr.is_zero() - && addr.as_usize() % VO_BIT_REGION_SIZE == (OBJECT_REF_OFFSET % VO_BIT_REGION_SIZE) -} - -fn assert_filter_pass(addr: Address) { - assert!( - basic_filter(addr), - "{} should pass basic filter, but failed.", - addr, - ); -} - -fn assert_filter_fail(addr: Address) { - assert!( - !basic_filter(addr), - "{} should fail basic filter, but passed.", - addr, - ); -} - -fn assert_valid_objref(addr: Address) { - assert!( - mmtk_is_mmtk_object(addr), - "mmtk_is_mmtk_object({}) should return true. Got false.", - addr, - ); -} - -fn assert_invalid_objref(addr: Address, real: Address) { - assert!( - !mmtk_is_mmtk_object(addr), - "mmtk_is_mmtk_object({}) should return false. Got true. Real object: {}", - addr, - real, - ); -} - -#[test] -pub fn null() { - SINGLE_OBJECT.with_fixture(|fixture| { - let addr = Address::ZERO; - assert_filter_fail(addr); - assert_invalid_objref(addr, fixture.objref.to_raw_address()); - }); -} - -// This should be small enough w.r.t `HEAP_START` and `HEAP_END`. -const SMALL_OFFSET: usize = 16384; - -#[test] -pub fn too_small() { - SINGLE_OBJECT.with_fixture(|fixture| { - for offset in 1usize..SMALL_OFFSET { - let addr = Address::ZERO + offset; - assert_invalid_objref(addr, fixture.objref.to_raw_address()); - } - }); -} - -#[test] -pub fn max() { - SINGLE_OBJECT.with_fixture(|fixture| { - let addr = Address::MAX; - assert_invalid_objref(addr, fixture.objref.to_raw_address()); - }); -} - -#[test] -pub fn too_big() { - SINGLE_OBJECT.with_fixture(|fixture| { - for offset in 1usize..SMALL_OFFSET { - let addr = Address::MAX - offset; - assert_invalid_objref(addr, fixture.objref.to_raw_address()); - } - }); -} - -#[test] -pub fn direct_hit() { - SINGLE_OBJECT.with_fixture(|fixture| { - let addr = fixture.objref.to_raw_address(); - assert_filter_pass(addr); - assert_valid_objref(addr); - }); -} - -const SEVERAL_PAGES: usize = 4 * mmtk::util::constants::BYTES_IN_PAGE; - -#[test] -pub fn small_offsets() { - SINGLE_OBJECT.with_fixture(|fixture| { - for offset in 1usize..SEVERAL_PAGES { - let addr = fixture.objref.to_raw_address() + offset; - if basic_filter(addr) { - assert_invalid_objref(addr, fixture.objref.to_raw_address()); - } - } - }); -} - -#[test] -pub fn medium_offsets_aligned() { - SINGLE_OBJECT.with_fixture(|fixture| { - let alignment = std::mem::align_of::
(); - for offset in (alignment..(alignment * SEVERAL_PAGES)).step_by(alignment) { - let addr = fixture.objref.to_raw_address() + offset; - assert_filter_pass(addr); - assert_invalid_objref(addr, fixture.objref.to_raw_address()); - } - }); -} - -#[test] -pub fn large_offsets_aligned() { - SINGLE_OBJECT.with_fixture(|fixture| { - for log_offset in 12usize..(usize::BITS as usize) { - let offset = 1usize << log_offset; - let addr = match fixture - .objref - .to_raw_address() - .as_usize() - .checked_add(offset) - { - Some(n) => unsafe { Address::from_usize(n) }, - None => break, - }; - assert_filter_pass(addr); - assert_invalid_objref(addr, fixture.objref.to_raw_address()); - } - }); -} - -#[test] -pub fn negative_offsets() { - SINGLE_OBJECT.with_fixture(|fixture| { - for log_offset in LOG_BITS_IN_WORD..(usize::BITS as usize) { - let offset = 1usize << log_offset; - let addr = match fixture - .objref - .to_raw_address() - .as_usize() - .checked_sub(offset) - { - Some(0) => break, - Some(n) => unsafe { Address::from_usize(n) }, - None => break, - }; - assert_filter_pass(addr); - assert_invalid_objref(addr, fixture.objref.to_raw_address()); - } - }); -} diff --git a/vmbindings/dummyvm/src/tests/doc_avoid_resolving_allocator.rs b/vmbindings/dummyvm/src/tests/doc_avoid_resolving_allocator.rs deleted file mode 100644 index 24dc07a4b1..0000000000 --- a/vmbindings/dummyvm/src/tests/doc_avoid_resolving_allocator.rs +++ /dev/null @@ -1,47 +0,0 @@ -// GITHUB-CI: MMTK_PLAN=NoGC,SemiSpace,Immix,GenImmix,StickyImmix - -use crate::test_fixtures::{MMTKSingleton, SerialFixture}; -use crate::DummyVM; - -use mmtk::util::alloc::Allocator; -use mmtk::util::alloc::BumpAllocator; -use mmtk::util::Address; -use mmtk::util::OpaquePointer; -use mmtk::util::{VMMutatorThread, VMThread}; -use mmtk::AllocationSemantics; - -lazy_static! { - static ref MMTK_SINGLETON: SerialFixture = SerialFixture::new(); -} - -#[test] -pub fn acquire_typed_allocator() { - MMTK_SINGLETON.with_fixture(|fixture| { - let tls_opaque_pointer = VMMutatorThread(VMThread(OpaquePointer::UNINITIALIZED)); - static mut DEFAULT_ALLOCATOR_OFFSET: usize = 0; - - // ANCHOR: avoid_resolving_allocator - // At boot time - let selector = mmtk::memory_manager::get_allocator_mapping( - &fixture.mmtk, - AllocationSemantics::Default, - ); - unsafe { - DEFAULT_ALLOCATOR_OFFSET = - mmtk::plan::Mutator::::get_allocator_base_offset(selector); - } - let mutator = mmtk::memory_manager::bind_mutator(&fixture.mmtk, tls_opaque_pointer); - - // At run time: allocate with the default semantics without resolving allocator - let default_allocator: &mut BumpAllocator = { - let mutator_addr = Address::from_ref(&*mutator); - unsafe { - (mutator_addr + DEFAULT_ALLOCATOR_OFFSET).as_mut_ref::>() - } - }; - let addr = default_allocator.alloc(8, 8, 0); - // ANCHOR_END: avoid_resolving_allocator - - assert!(!addr.is_zero()); - }); -} diff --git a/vmbindings/dummyvm/src/tests/doc_mutator_storage.rs b/vmbindings/dummyvm/src/tests/doc_mutator_storage.rs deleted file mode 100644 index 6d4b60917b..0000000000 --- a/vmbindings/dummyvm/src/tests/doc_mutator_storage.rs +++ /dev/null @@ -1,127 +0,0 @@ -// GITHUB-CI: MMTK_PLAN=NoGC,SemiSpace,Immix,GenImmix,StickyImmix - -use crate::test_fixtures::{MMTKSingleton, SerialFixture}; -use crate::DummyVM; - -use mmtk::util::Address; -use mmtk::util::OpaquePointer; -use mmtk::util::{VMMutatorThread, VMThread}; -use mmtk::AllocationSemantics; -use mmtk::Mutator; - -lazy_static! { - static ref MMTK_SINGLETON: SerialFixture = SerialFixture::new(); -} - -#[test] -pub fn boxed_pointer() { - MMTK_SINGLETON.with_fixture(|fixture| { - let tls_opaque_pointer = VMMutatorThread(VMThread(OpaquePointer::UNINITIALIZED)); - - // ANCHOR: mutator_storage_boxed_pointer - struct MutatorInTLS { - // Store the mutator as a boxed pointer. - // Accessing any value in the mutator will need a dereferencing of the boxed pointer. - ptr: Box>, - } - - // Bind an MMTk mutator - let mutator = mmtk::memory_manager::bind_mutator(&fixture.mmtk, tls_opaque_pointer); - // Store the pointer in TLS - let mut storage = MutatorInTLS { ptr: mutator }; - - // Allocate - let addr = - mmtk::memory_manager::alloc(&mut storage.ptr, 8, 8, 0, AllocationSemantics::Default); - // ANCHOR_END: mutator_storage_boxed_pointer - - assert!(!addr.is_zero()); - }); -} - -#[test] -pub fn embed_mutator_struct() { - MMTK_SINGLETON.with_fixture(|fixture| { - let tls_opaque_pointer = VMMutatorThread(VMThread(OpaquePointer::UNINITIALIZED)); - - // ANCHOR: mutator_storage_embed_mutator_struct - struct MutatorInTLS { - embed: Mutator, - } - - // Bind an MMTk mutator - let mutator = mmtk::memory_manager::bind_mutator(&fixture.mmtk, tls_opaque_pointer); - // Store the struct (or use memcpy for non-Rust code) - let mut storage = MutatorInTLS { embed: *mutator }; - // Allocate - let addr = - mmtk::memory_manager::alloc(&mut storage.embed, 8, 8, 0, AllocationSemantics::Default); - // ANCHOR_END: mutator_storage_embed_mutator_struct - - assert!(!addr.is_zero()); - }) -} - -#[test] -pub fn embed_fastpath_struct() { - MMTK_SINGLETON.with_fixture(|fixture| { - let tls_opaque_pointer = VMMutatorThread(VMThread(OpaquePointer::UNINITIALIZED)); - - // ANCHOR: mutator_storage_embed_fastpath_struct - use mmtk::util::alloc::BumpPointer; - struct MutatorInTLS { - default_bump_pointer: BumpPointer, - mutator: Box>, - } - - // Bind an MMTk mutator - let mutator = mmtk::memory_manager::bind_mutator(&fixture.mmtk, tls_opaque_pointer); - // Create a fastpath BumpPointer with default(). The BumpPointer from default() will guarantee to fail on the first allocation - // so the allocation goes to the slowpath and we will get an allocation buffer from MMTk. - let default_bump_pointer = BumpPointer::default(); - // Store the fastpath BumpPointer along with the mutator - let mut storage = MutatorInTLS { - default_bump_pointer, - mutator, - }; - - // Allocate - let mut allocate_default = |size: usize| -> Address { - // Alignment code is omitted here to make the code simpler to read. - // In an actual implementation, alignment and offset need to be considered by the bindings. - let new_cursor = storage.default_bump_pointer.cursor + size; - if new_cursor < storage.default_bump_pointer.limit { - let addr = storage.default_bump_pointer.cursor; - storage.default_bump_pointer.cursor = new_cursor; - addr - } else { - use crate::mmtk::util::alloc::Allocator; - let selector = mmtk::memory_manager::get_allocator_mapping( - &fixture.mmtk, - AllocationSemantics::Default, - ); - let default_allocator = unsafe { - storage - .mutator - .allocator_impl_mut::>(selector) - }; - // Copy bump pointer values to the allocator in the mutator - default_allocator.bump_pointer = storage.default_bump_pointer; - // Do slow path allocation with MMTk - let addr = default_allocator.alloc_slow(size, 8, 0); - // Copy bump pointer values to the fastpath BumpPointer so we will have an allocation buffer. - storage.default_bump_pointer = default_allocator.bump_pointer; - addr - } - }; - - // Allocate: this will fail in the fastpath, and will get an allocation buffer from the slowpath - let addr1 = allocate_default(8); - // Allocate: this will allocate from the fastpath - let addr2 = allocate_default(8); - // ANCHOR_END: mutator_storage_embed_fastpath_struct - - assert!(!addr1.is_zero()); - assert!(!addr2.is_zero()); - }) -} diff --git a/vmbindings/dummyvm/src/tests/edges_test.rs b/vmbindings/dummyvm/src/tests/edges_test.rs deleted file mode 100644 index f927e420a6..0000000000 --- a/vmbindings/dummyvm/src/tests/edges_test.rs +++ /dev/null @@ -1,216 +0,0 @@ -// GITHUB-CI: MMTK_PLAN=NoGC - -use atomic::{Atomic, Ordering}; -use mmtk::{ - util::{Address, ObjectReference}, - vm::edge_shape::{Edge, SimpleEdge}, -}; - -use crate::{ - edges::{DummyVMEdge, OffsetEdge, TaggedEdge}, - test_fixtures::{Fixture, TwoObjects}, -}; - -#[cfg(target_pointer_width = "64")] -use crate::edges::only_64_bit::CompressedOopEdge; - -lazy_static! { - static ref FIXTURE: Fixture = Fixture::new(); -} - -#[test] -pub fn load_simple() { - FIXTURE.with_fixture(|fixture| { - let mut slot: Atomic = Atomic::new(fixture.objref1); - - let edge = SimpleEdge::from_address(Address::from_ref(&mut slot)); - let objref = edge.load(); - - assert_eq!(objref, fixture.objref1); - }); -} - -#[test] -pub fn store_simple() { - FIXTURE.with_fixture(|fixture| { - let mut slot: Atomic = Atomic::new(fixture.objref1); - - let edge = SimpleEdge::from_address(Address::from_ref(&mut slot)); - edge.store(fixture.objref2); - assert_eq!(slot.load(Ordering::SeqCst), fixture.objref2); - - let objref = edge.load(); - assert_eq!(objref, fixture.objref2); - }); -} - -#[cfg(target_pointer_width = "64")] -mod only_64_bit { - use super::*; - - // Two 35-bit addresses aligned to 8 bytes (3 zeros in the lowest bits). - const COMPRESSABLE_ADDR1: usize = 0b101_10111011_11011111_01111110_11111000usize; - const COMPRESSABLE_ADDR2: usize = 0b110_11110111_01101010_11011101_11101000usize; - - #[test] - pub fn load_compressed() { - // Note: We cannot guarantee GC will allocate an object in the low address region. - // So we make up addresses just for testing the bit operations of compressed OOP edges. - let compressed1 = (COMPRESSABLE_ADDR1 >> 3) as u32; - let objref1 = - ObjectReference::from_raw_address(unsafe { Address::from_usize(COMPRESSABLE_ADDR1) }); - - let mut slot: Atomic = Atomic::new(compressed1); - - let edge = CompressedOopEdge::from_address(Address::from_ref(&mut slot)); - let objref = edge.load(); - - assert_eq!(objref, objref1); - } - - #[test] - pub fn store_compressed() { - // Note: We cannot guarantee GC will allocate an object in the low address region. - // So we make up addresses just for testing the bit operations of compressed OOP edges. - let compressed1 = (COMPRESSABLE_ADDR1 >> 3) as u32; - let compressed2 = (COMPRESSABLE_ADDR2 >> 3) as u32; - let objref2 = - ObjectReference::from_raw_address(unsafe { Address::from_usize(COMPRESSABLE_ADDR2) }); - - let mut slot: Atomic = Atomic::new(compressed1); - - let edge = CompressedOopEdge::from_address(Address::from_ref(&mut slot)); - edge.store(objref2); - assert_eq!(slot.load(Ordering::SeqCst), compressed2); - - let objref = edge.load(); - assert_eq!(objref, objref2); - } -} - -#[test] -pub fn load_offset() { - const OFFSET: usize = 48; - FIXTURE.with_fixture(|fixture| { - let addr1 = fixture.objref1.to_raw_address(); - let mut slot: Atomic
= Atomic::new(addr1 + OFFSET); - - let edge = OffsetEdge::new_with_offset(Address::from_ref(&mut slot), OFFSET); - let objref = edge.load(); - - assert_eq!(objref, fixture.objref1); - }); -} - -#[test] -pub fn store_offset() { - const OFFSET: usize = 48; - FIXTURE.with_fixture(|fixture| { - let addr1 = fixture.objref1.to_raw_address(); - let addr2 = fixture.objref2.to_raw_address(); - let mut slot: Atomic
= Atomic::new(addr1 + OFFSET); - - let edge = OffsetEdge::new_with_offset(Address::from_ref(&mut slot), OFFSET); - edge.store(fixture.objref2); - assert_eq!(slot.load(Ordering::SeqCst), addr2 + OFFSET); - - let objref = edge.load(); - assert_eq!(objref, fixture.objref2); - }); -} - -const TAG1: usize = 0b01; -const TAG2: usize = 0b10; - -#[test] -pub fn load_tagged() { - FIXTURE.with_fixture(|fixture| { - let mut slot1: Atomic = - Atomic::new(fixture.objref1.to_raw_address().as_usize() | TAG1); - let mut slot2: Atomic = - Atomic::new(fixture.objref1.to_raw_address().as_usize() | TAG2); - - let edge1 = TaggedEdge::new(Address::from_ref(&mut slot1)); - let edge2 = TaggedEdge::new(Address::from_ref(&mut slot2)); - let objref1 = edge1.load(); - let objref2 = edge2.load(); - - // Tags should not affect loaded values. - assert_eq!(objref1, fixture.objref1); - assert_eq!(objref2, fixture.objref1); - }); -} - -#[test] -pub fn store_tagged() { - FIXTURE.with_fixture(|fixture| { - let mut slot1: Atomic = - Atomic::new(fixture.objref1.to_raw_address().as_usize() | TAG1); - let mut slot2: Atomic = - Atomic::new(fixture.objref1.to_raw_address().as_usize() | TAG2); - - let edge1 = TaggedEdge::new(Address::from_ref(&mut slot1)); - let edge2 = TaggedEdge::new(Address::from_ref(&mut slot2)); - edge1.store(fixture.objref2); - edge2.store(fixture.objref2); - - // Tags should be preserved. - assert_eq!( - slot1.load(Ordering::SeqCst), - fixture.objref2.to_raw_address().as_usize() | TAG1 - ); - assert_eq!( - slot2.load(Ordering::SeqCst), - fixture.objref2.to_raw_address().as_usize() | TAG2 - ); - - let objref1 = edge1.load(); - let objref2 = edge2.load(); - - // Tags should not affect loaded values. - assert_eq!(objref1, fixture.objref2); - assert_eq!(objref2, fixture.objref2); - }); -} - -#[test] -pub fn mixed() { - const OFFSET: usize = 48; - - FIXTURE.with_fixture(|fixture| { - let addr1 = fixture.objref1.to_raw_address(); - let addr2 = fixture.objref2.to_raw_address(); - - let mut slot1: Atomic = Atomic::new(fixture.objref1); - let mut slot3: Atomic
= Atomic::new(addr1 + OFFSET); - let mut slot4: Atomic = Atomic::new(addr1.as_usize() | TAG1); - - let edge1 = SimpleEdge::from_address(Address::from_ref(&mut slot1)); - let edge3 = OffsetEdge::new_with_offset(Address::from_ref(&mut slot3), OFFSET); - let edge4 = TaggedEdge::new(Address::from_ref(&mut slot4)); - - let de1 = DummyVMEdge::Simple(edge1); - let de3 = DummyVMEdge::Offset(edge3); - let de4 = DummyVMEdge::Tagged(edge4); - - let edges = vec![de1, de3, de4]; - for (i, edge) in edges.iter().enumerate() { - let objref = edge.load(); - assert_eq!(objref, fixture.objref1, "Edge {} is not properly loaded", i); - } - - let mutable_edges = vec![de1, de3, de4]; - for (i, edge) in mutable_edges.iter().enumerate() { - edge.store(fixture.objref2); - let objref = edge.load(); - assert_eq!( - objref, fixture.objref2, - "Edge {} is not properly loaded after store", - i - ); - } - - assert_eq!(slot1.load(Ordering::SeqCst), fixture.objref2); - assert_eq!(slot3.load(Ordering::SeqCst), addr2 + OFFSET); - }); -} diff --git a/vmbindings/dummyvm/src/tests/handle_mmap_conflict.rs b/vmbindings/dummyvm/src/tests/handle_mmap_conflict.rs deleted file mode 100644 index 5a702a37f0..0000000000 --- a/vmbindings/dummyvm/src/tests/handle_mmap_conflict.rs +++ /dev/null @@ -1,24 +0,0 @@ -use crate::DummyVM; -use mmtk::util::memory; -use mmtk::util::opaque_pointer::*; -use mmtk::util::Address; - -#[test] -pub fn test_handle_mmap_conflict() { - let start = unsafe { Address::from_usize(0x100_0000) }; - let one_megabyte = 1000000; - let mmap1_res = memory::dzmmap_noreplace(start, one_megabyte, memory::MmapStrategy::Normal); - assert!(mmap1_res.is_ok()); - - let panic_res = std::panic::catch_unwind(|| { - let mmap2_res = memory::dzmmap_noreplace(start, one_megabyte, memory::MmapStrategy::Normal); - assert!(mmap2_res.is_err()); - memory::handle_mmap_error::(mmap2_res.err().unwrap(), VMThread::UNINITIALIZED); - }); - - // The error should match the error message in memory::handle_mmap_error() - assert!(panic_res.is_err()); - let err = panic_res.err().unwrap(); - assert!(err.is::<&str>()); - assert_eq!(err.downcast_ref::<&str>().unwrap(), &"Failed to mmap, the address is already mapped. Should MMTk quanrantine the address range first?"); -} diff --git a/vmbindings/dummyvm/src/tests/handle_mmap_oom.rs b/vmbindings/dummyvm/src/tests/handle_mmap_oom.rs deleted file mode 100644 index 043f621fea..0000000000 --- a/vmbindings/dummyvm/src/tests/handle_mmap_oom.rs +++ /dev/null @@ -1,30 +0,0 @@ -use crate::DummyVM; -use mmtk::util::memory; -use mmtk::util::opaque_pointer::*; -use mmtk::util::Address; - -#[cfg(target_pointer_width = "32")] -const LARGE_SIZE: usize = 4_294_967_295; -#[cfg(target_pointer_width = "64")] -const LARGE_SIZE: usize = 1_000_000_000_000; - -#[test] -pub fn test_handle_mmap_oom() { - let panic_res = std::panic::catch_unwind(move || { - let start = unsafe { Address::from_usize(0x100_0000) }; - // mmap 1 terabyte memory - we expect this will fail due to out of memory. - // If that's not the case, increase the size we mmap. - let mmap_res = memory::dzmmap_noreplace(start, LARGE_SIZE, memory::MmapStrategy::Normal); - - memory::handle_mmap_error::(mmap_res.err().unwrap(), VMThread::UNINITIALIZED); - }); - assert!(panic_res.is_err()); - - // The error should match the default implementation of Collection::out_of_memory() - let err = panic_res.err().unwrap(); - assert!(err.is::()); - assert_eq!( - err.downcast_ref::().unwrap(), - &"Out of memory with MmapOutOfMemory!" - ); -} diff --git a/vmbindings/dummyvm/src/tests/is_in_mmtk_spaces.rs b/vmbindings/dummyvm/src/tests/is_in_mmtk_spaces.rs deleted file mode 100644 index 8a41f4116f..0000000000 --- a/vmbindings/dummyvm/src/tests/is_in_mmtk_spaces.rs +++ /dev/null @@ -1,81 +0,0 @@ -// GITHUB-CI: MMTK_PLAN=all - -use crate::api::mmtk_is_in_mmtk_spaces as is_in_mmtk_spaces; -use crate::test_fixtures::{Fixture, SingleObject}; -use mmtk::util::*; - -lazy_static! { - static ref SINGLE_OBJECT: Fixture = Fixture::new(); -} - -#[test] -pub fn null() { - SINGLE_OBJECT.with_fixture(|_fixture| { - assert!( - !is_in_mmtk_spaces(ObjectReference::NULL), - "NULL pointer should not be in any MMTk spaces." - ); - }); -} - -#[test] -pub fn max() { - SINGLE_OBJECT.with_fixture(|_fixture| { - assert!( - !is_in_mmtk_spaces(ObjectReference::from_raw_address(Address::MAX)), - "Address::MAX should not be in any MMTk spaces." - ); - }); -} - -#[test] -pub fn direct_hit() { - SINGLE_OBJECT.with_fixture(|fixture| { - assert!( - is_in_mmtk_spaces(fixture.objref), - "The address of the allocated object should be in the space" - ); - }); -} - -#[test] -pub fn large_offsets_aligned() { - SINGLE_OBJECT.with_fixture(|fixture| { - for log_offset in 12usize..(usize::BITS as usize) { - let offset = 1usize << log_offset; - let addr = match fixture - .objref - .to_raw_address() - .as_usize() - .checked_add(offset) - { - Some(n) => unsafe { Address::from_usize(n) }, - None => break, - }; - // It's just a smoke test. It is hard to predict if the addr is still in any space, - // but it must not crash. - let _ = is_in_mmtk_spaces(ObjectReference::from_raw_address(addr)); - } - }); -} - -#[test] -pub fn negative_offsets() { - SINGLE_OBJECT.with_fixture(|fixture| { - for log_offset in 1usize..(usize::BITS as usize) { - let offset = 1usize << log_offset; - let addr = match fixture - .objref - .to_raw_address() - .as_usize() - .checked_sub(offset) - { - Some(n) => unsafe { Address::from_usize(n) }, - None => break, - }; - // It's just a smoke test. It is hard to predict if the addr is still in any space, - // but it must not crash. - let _ = is_in_mmtk_spaces(ObjectReference::from_raw_address(addr)); - } - }); -} diff --git a/vmbindings/dummyvm/src/tests/issue139_allocate_unaligned_object_size.rs b/vmbindings/dummyvm/src/tests/issue139_allocate_unaligned_object_size.rs deleted file mode 100644 index f62cab5ca1..0000000000 --- a/vmbindings/dummyvm/src/tests/issue139_allocate_unaligned_object_size.rs +++ /dev/null @@ -1,17 +0,0 @@ -use crate::api::*; -use mmtk::util::opaque_pointer::*; -use mmtk::AllocationSemantics; - -#[test] -pub fn issue139_alloc_non_multiple_of_min_alignment() { - mmtk_init(200 * 1024 * 1024); - let handle = mmtk_bind_mutator(VMMutatorThread(VMThread::UNINITIALIZED)); - - // Allocate 6 bytes with 8 bytes ailgnment required - let addr = mmtk_alloc(handle, 14, 8, 0, AllocationSemantics::Default); - assert!(addr.is_aligned_to(8)); - // After the allocation, the cursor is not MIN_ALIGNMENT aligned. If we have the assertion in the next allocation to check if the cursor is aligned to MIN_ALIGNMENT, it fails. - // We have to remove that assertion. - let addr2 = mmtk_alloc(handle, 14, 8, 0, AllocationSemantics::Default); - assert!(addr2.is_aligned_to(8)); -} diff --git a/vmbindings/dummyvm/src/tests/issue867_allocate_unrealistically_large_object.rs b/vmbindings/dummyvm/src/tests/issue867_allocate_unrealistically_large_object.rs deleted file mode 100644 index 8822506413..0000000000 --- a/vmbindings/dummyvm/src/tests/issue867_allocate_unrealistically_large_object.rs +++ /dev/null @@ -1,82 +0,0 @@ -// GITHUB-CI: MMTK_PLAN=all - -use crate::api; -use crate::test_fixtures::{MutatorFixture, SerialFixture}; -use mmtk::plan::AllocationSemantics; - -lazy_static! { - static ref MUTATOR: SerialFixture = SerialFixture::new(); -} - -#[test] -#[should_panic(expected = "Out of memory with HeapOutOfMemory!")] -pub fn allocate_max_size_object() { - let (size, align) = (usize::MAX, 8); - - MUTATOR.with_fixture_expect_benign_panic(|fixture| { - api::mmtk_alloc( - fixture.mutator, - size, - align, - 0, - AllocationSemantics::Default, - ); - }) -} - -#[test] -// This test panics with 'attempt to add with overflow', as we do computation with the size -// in the fastpath. I don't think we want to do any extra check in the fastpath. There is -// nothing we can do with it without sacrificing performance. -#[should_panic(expected = "Out of memory with HeapOutOfMemory!")] -#[ignore] -pub fn allocate_max_size_object_after_succeed() { - MUTATOR.with_fixture_expect_benign_panic(|fixture| { - // Allocate something so we have a thread local allocation buffer - api::mmtk_alloc(fixture.mutator, 8, 8, 0, AllocationSemantics::Default); - // Allocate an unrealistically large object - api::mmtk_alloc( - fixture.mutator, - usize::MAX, - 8, - 0, - AllocationSemantics::Default, - ); - }) -} - -#[test] -#[should_panic(expected = "Out of memory with HeapOutOfMemory!")] -pub fn allocate_unrealistically_large_object() { - const CHUNK: usize = 4 * 1024 * 1024; // 4MB - // Leave some room, so we won't have arithmetic overflow when we compute size and do alignment. - let (size, align) = ( - mmtk::util::conversions::raw_align_down(usize::MAX - CHUNK, 4096), - 8, - ); - - MUTATOR.with_fixture_expect_benign_panic(|fixture| { - api::mmtk_alloc( - fixture.mutator, - size, - align, - 0, - AllocationSemantics::Default, - ); - }) -} - -#[test] -#[should_panic(expected = "Out of memory with HeapOutOfMemory!")] -pub fn allocate_more_than_heap_size() { - // The heap has 1 MB. Allocating with 2MB will cause OOM. - MUTATOR.with_fixture_expect_benign_panic(|fixture| { - api::mmtk_alloc( - fixture.mutator, - 2 * 1024 * 1024, - 8, - 0, - AllocationSemantics::Default, - ); - }) -} diff --git a/vmbindings/dummyvm/src/tests/malloc_api.rs b/vmbindings/dummyvm/src/tests/malloc_api.rs deleted file mode 100644 index 4b31fd8773..0000000000 --- a/vmbindings/dummyvm/src/tests/malloc_api.rs +++ /dev/null @@ -1,24 +0,0 @@ -use crate::api::*; - -#[test] -pub fn malloc_free() { - let res = mmtk_malloc(8); - assert!(!res.is_zero()); - mmtk_free(res); -} - -#[test] -pub fn calloc_free() { - let res = mmtk_calloc(1, 8); - assert!(!res.is_zero()); - mmtk_free(res); -} - -#[test] -pub fn realloc_free() { - let res1 = mmtk_malloc(8); - assert!(!res1.is_zero()); - let res2 = mmtk_realloc(res1, 16); - assert!(!res2.is_zero()); - mmtk_free(res2); -} diff --git a/vmbindings/dummyvm/src/tests/malloc_counted.rs b/vmbindings/dummyvm/src/tests/malloc_counted.rs deleted file mode 100644 index 00ffe77ed9..0000000000 --- a/vmbindings/dummyvm/src/tests/malloc_counted.rs +++ /dev/null @@ -1,84 +0,0 @@ -// GITHUB-CI: FEATURES=malloc_counted_size - -use crate::api::*; -use crate::test_fixtures::{MMTKSingleton, SerialFixture}; - -lazy_static! { - static ref MMTK_SINGLETON: SerialFixture = SerialFixture::new(); -} - -#[test] -pub fn malloc_free() { - MMTK_SINGLETON.with_fixture(|_| { - let bytes_before = mmtk_get_malloc_bytes(); - - let res = mmtk_counted_malloc(8); - assert!(!res.is_zero()); - let bytes_after_alloc = mmtk_get_malloc_bytes(); - assert_eq!(bytes_before + 8, bytes_after_alloc); - - mmtk_free_with_size(res, 8); - let bytes_after_free = mmtk_get_malloc_bytes(); - assert_eq!(bytes_before, bytes_after_free); - }); -} - -#[test] -pub fn calloc_free() { - MMTK_SINGLETON.with_fixture(|_| { - let bytes_before = mmtk_get_malloc_bytes(); - - let res = mmtk_counted_calloc(1, 8); - assert!(!res.is_zero()); - let bytes_after_alloc = mmtk_get_malloc_bytes(); - assert_eq!(bytes_before + 8, bytes_after_alloc); - - mmtk_free_with_size(res, 8); - let bytes_after_free = mmtk_get_malloc_bytes(); - assert_eq!(bytes_before, bytes_after_free); - }); -} - -#[test] -pub fn realloc_grow() { - MMTK_SINGLETON.with_fixture(|_| { - let bytes_before = mmtk_get_malloc_bytes(); - - let res1 = mmtk_counted_malloc(8); - assert!(!res1.is_zero()); - let bytes_after_alloc = mmtk_get_malloc_bytes(); - assert_eq!(bytes_before + 8, bytes_after_alloc); - - // grow to 16 bytes - let res2 = mmtk_realloc_with_old_size(res1, 16, 8); - assert!(!res2.is_zero()); - let bytes_after_realloc = mmtk_get_malloc_bytes(); - assert_eq!(bytes_before + 16, bytes_after_realloc); - - mmtk_free_with_size(res2, 16); - let bytes_after_free = mmtk_get_malloc_bytes(); - assert_eq!(bytes_before, bytes_after_free); - }); -} - -#[test] -pub fn realloc_shrink() { - MMTK_SINGLETON.with_fixture(|_| { - let bytes_before = mmtk_get_malloc_bytes(); - - let res1 = mmtk_counted_malloc(16); - assert!(!res1.is_zero()); - let bytes_after_alloc = mmtk_get_malloc_bytes(); - assert_eq!(bytes_before + 16, bytes_after_alloc); - - // shrink to 8 bytes - let res2 = mmtk_realloc_with_old_size(res1, 8, 16); - assert!(!res2.is_zero()); - let bytes_after_realloc = mmtk_get_malloc_bytes(); - assert_eq!(bytes_before + 8, bytes_after_realloc); - - mmtk_free_with_size(res2, 8); - let bytes_after_free = mmtk_get_malloc_bytes(); - assert_eq!(bytes_before, bytes_after_free); - }); -} diff --git a/vmbindings/dummyvm/src/tests/malloc_ms.rs b/vmbindings/dummyvm/src/tests/malloc_ms.rs deleted file mode 100644 index db08f65bbb..0000000000 --- a/vmbindings/dummyvm/src/tests/malloc_ms.rs +++ /dev/null @@ -1,40 +0,0 @@ -use crate::DummyVM; -use mmtk::util::malloc::malloc_ms_util; - -#[test] -fn test_malloc() { - let (address1, bool1) = malloc_ms_util::alloc::(16, 8, 0); - let (address2, bool2) = malloc_ms_util::alloc::(16, 32, 0); - let (address3, bool3) = malloc_ms_util::alloc::(16, 8, 4); - let (address4, bool4) = malloc_ms_util::alloc::(32, 64, 4); - - assert!(address1.is_aligned_to(8)); - assert!(address2.is_aligned_to(32)); - assert!((address3 + 4 as isize).is_aligned_to(8)); - assert!((address4 + 4 as isize).is_aligned_to(64)); - - assert!(!bool1); - #[cfg(feature = "malloc_hoard")] - assert!(bool2); - #[cfg(not(feature = "malloc_hoard"))] - assert!(!bool2); - assert!(bool3); - assert!(bool4); - - assert!(malloc_ms_util::get_malloc_usable_size(address1, bool1) >= 16); - assert!(malloc_ms_util::get_malloc_usable_size(address2, bool2) >= 16); - assert!(malloc_ms_util::get_malloc_usable_size(address3, bool3) >= 16); - assert!(malloc_ms_util::get_malloc_usable_size(address4, bool4) >= 32); - - unsafe { - malloc_ms_util::free(address1.to_mut_ptr()); - } - #[cfg(feature = "malloc_hoard")] - malloc_ms_util::offset_free(address2); - #[cfg(not(feature = "malloc_hoard"))] - unsafe { - malloc_ms_util::free(address2.to_mut_ptr()); - } - malloc_ms_util::offset_free(address3); - malloc_ms_util::offset_free(address4); -} diff --git a/vmbindings/dummyvm/src/tests/mod.rs b/vmbindings/dummyvm/src/tests/mod.rs deleted file mode 100644 index c65b0aff43..0000000000 --- a/vmbindings/dummyvm/src/tests/mod.rs +++ /dev/null @@ -1,38 +0,0 @@ -// NOTE: Since the dummyvm uses a global MMTK instance, -// it will panic if MMTK initialized more than once per process. -// We run each of the following modules in a separate test process. -// -// One way to avoid re-initialization is to have only one #[test] per module. -// There are also helpers for creating fixtures in `fixture/mod.rs`. -mod allocate_align_offset; -mod allocate_with_disable_collection; -mod allocate_with_initialize_collection; -mod allocate_with_re_enable_collection; -mod allocate_without_initialize_collection; -mod allocator_info; -mod barrier_slow_path_assertion; -#[cfg(feature = "is_mmtk_object")] -mod conservatism; -mod edges_test; -#[cfg(target_os = "linux")] -mod handle_mmap_conflict; -mod handle_mmap_oom; -mod is_in_mmtk_spaces; -mod issue139_allocate_unaligned_object_size; -mod issue867_allocate_unrealistically_large_object; -#[cfg(not(feature = "malloc_counted_size"))] -mod malloc_api; -#[cfg(feature = "malloc_counted_size")] -mod malloc_counted; -mod malloc_ms; -#[cfg(feature = "nogc_lock_free")] -mod nogc_lock_free; -#[cfg(target_pointer_width = "64")] -mod vm_layout_compressed_pointer_64; -mod vm_layout_default; -mod vm_layout_heap_start; -mod vm_layout_log_address_space; - -// The code snippets of these tests are also referred in our docs. -mod doc_avoid_resolving_allocator; -mod doc_mutator_storage; diff --git a/vmbindings/dummyvm/src/tests/nogc_lock_free.rs b/vmbindings/dummyvm/src/tests/nogc_lock_free.rs deleted file mode 100644 index f69bee7178..0000000000 --- a/vmbindings/dummyvm/src/tests/nogc_lock_free.rs +++ /dev/null @@ -1,35 +0,0 @@ -// GITHUB-CI: MMTK_PLAN=NoGC -// GITHUB-CI: FEATURES=nogc_lock_free - -use crate::api; -use crate::test_fixtures::{MutatorFixture, SerialFixture}; -use crate::DummyVM; -use log::info; -use mmtk::plan::AllocationSemantics; -use mmtk::vm::VMBinding; - -lazy_static! { - static ref MUTATOR: SerialFixture = SerialFixture::new(); -} - -#[test] -pub fn nogc_lock_free_allocate() { - MUTATOR.with_fixture(|fixture| { - let min = DummyVM::MIN_ALIGNMENT; - let max = DummyVM::MAX_ALIGNMENT; - info!("Allowed alignment between {} and {}", min, max); - let mut align = min; - while align <= max { - info!("Test allocation with alignment {}", align); - let addr = api::mmtk_alloc(fixture.mutator, 8, align, 0, AllocationSemantics::Default); - info!("addr = {}", addr); - assert!( - addr.is_aligned_to(align), - "Expected allocation alignment {}, returned address is {:?}", - align, - addr - ); - align *= 2; - } - }) -} diff --git a/vmbindings/dummyvm/src/tests/vm_layout_compressed_pointer_64.rs b/vmbindings/dummyvm/src/tests/vm_layout_compressed_pointer_64.rs deleted file mode 100644 index 9abc782a41..0000000000 --- a/vmbindings/dummyvm/src/tests/vm_layout_compressed_pointer_64.rs +++ /dev/null @@ -1,33 +0,0 @@ -// GITHUB-CI: MMTK_PLAN=all - -use mmtk::util::conversions::*; -use mmtk::util::heap::vm_layout::VMLayout; -use mmtk::util::Address; - -use crate::tests::vm_layout_default::test_with_vm_layout; - -// This test only run on 64bits. - -#[test] -fn test_vm_layout_compressed_pointer() { - let start = if cfg!(target_os = "macos") { - // Impossible to map 0x4000_0000 on maocOS. SO choose a different address. - 0x40_0000_0000 - } else { - 0x4000_0000 - }; - let heap_size = 1024 * 1024; - let end = match start + heap_size { - end if end <= (4usize << 30) => 4usize << 30, - end if end <= (32usize << 30) => 32usize << 30, - _ => start + (32usize << 30), - }; - let layout = VMLayout { - log_address_space: 35, - heap_start: chunk_align_down(unsafe { Address::from_usize(start) }), - heap_end: chunk_align_up(unsafe { Address::from_usize(end) }), - log_space_extent: 31, - force_use_contiguous_spaces: false, - }; - test_with_vm_layout(Some(layout)); -} diff --git a/vmbindings/dummyvm/src/tests/vm_layout_default.rs b/vmbindings/dummyvm/src/tests/vm_layout_default.rs deleted file mode 100644 index 38fa24a10e..0000000000 --- a/vmbindings/dummyvm/src/tests/vm_layout_default.rs +++ /dev/null @@ -1,25 +0,0 @@ -// GITHUB-CI: MMTK_PLAN=all - -use mmtk::util::heap::vm_layout::VMLayout; - -pub fn test_with_vm_layout(layout: Option) { - use crate::api; - use crate::test_fixtures::VMLayoutFixture; - use mmtk::plan::AllocationSemantics; - use mmtk::vm::ObjectModel; - - let fixture = VMLayoutFixture::create_with_layout(layout); - - // Test allocation - let addr = api::mmtk_alloc(fixture.mutator, 8, 8, 0, AllocationSemantics::Default); - let obj = crate::object_model::VMObjectModel::address_to_ref(addr); - // Test SFT - assert!(api::mmtk_is_in_mmtk_spaces(obj)); - // Test mmapper - assert!(api::mmtk_is_mapped_address(addr)); -} - -#[test] -fn test_vm_layout_default() { - test_with_vm_layout(None::); -} diff --git a/vmbindings/dummyvm/src/tests/vm_layout_heap_start.rs b/vmbindings/dummyvm/src/tests/vm_layout_heap_start.rs deleted file mode 100644 index ec64a4413e..0000000000 --- a/vmbindings/dummyvm/src/tests/vm_layout_heap_start.rs +++ /dev/null @@ -1,25 +0,0 @@ -// GITHUB-CI: MMTK_PLAN=all - -use crate::tests::vm_layout_default::test_with_vm_layout; -use mmtk::util::heap::vm_layout::VMLayout; -use mmtk::util::Address; - -#[test] -fn test_vm_layout_heap_start() { - let default = VMLayout::default(); - - // Test with an start address that is different to the default heap start - #[cfg(target_pointer_width = "32")] - let heap_start = unsafe { Address::from_usize(0x7000_0000) }; - #[cfg(target_pointer_width = "64")] - let heap_start = unsafe { Address::from_usize(0x0000_0400_0000_0000usize) }; - #[cfg(target_pointer_width = "64")] - assert!(heap_start.is_aligned_to(default.max_space_extent())); - - let layout = VMLayout { - heap_start, - // Use default for the rest. - ..default - }; - test_with_vm_layout(Some(layout)); -} diff --git a/vmbindings/dummyvm/src/tests/vm_layout_log_address_space.rs b/vmbindings/dummyvm/src/tests/vm_layout_log_address_space.rs deleted file mode 100644 index 504e66b30f..0000000000 --- a/vmbindings/dummyvm/src/tests/vm_layout_log_address_space.rs +++ /dev/null @@ -1,17 +0,0 @@ -// GITHUB-CI: MMTK_PLAN=all - -use crate::tests::vm_layout_default::test_with_vm_layout; -use mmtk::util::heap::vm_layout::VMLayout; - -#[test] -fn test_vm_layout_log_address_space() { - let layout = VMLayout { - #[cfg(target_pointer_width = "32")] - log_address_space: 31, - #[cfg(target_pointer_width = "64")] - log_address_space: 45, - // Use default for the rest. - ..VMLayout::default() - }; - test_with_vm_layout(Some(layout)); -}