diff --git a/macros/src/plan_trace_object_impl.rs b/macros/src/plan_trace_object_impl.rs index 7be2e44c9a..7a01361a2c 100644 --- a/macros/src/plan_trace_object_impl.rs +++ b/macros/src/plan_trace_object_impl.rs @@ -86,7 +86,7 @@ pub(crate) fn generate_trace_object<'a>( } } else { quote! { - >::vm_trace_object::(__mmtk_queue, __mmtk_objref, __mmtk_worker) + VM::vm_trace_object::(__mmtk_queue, __mmtk_objref, __mmtk_worker) } }; diff --git a/src/memory_manager.rs b/src/memory_manager.rs index de2cab8ace..af7d62749b 100644 --- a/src/memory_manager.rs +++ b/src/memory_manager.rs @@ -23,7 +23,6 @@ use crate::util::heap::layout::vm_layout::vm_layout; use crate::util::opaque_pointer::*; use crate::util::{Address, ObjectReference}; use crate::vm::edge_shape::MemorySlice; -use crate::vm::ReferenceGlue; use crate::vm::VMBinding; use std::sync::atomic::Ordering; /// Initialize an MMTk instance. A VM should call this method after creating an [`crate::MMTK`] @@ -446,16 +445,15 @@ pub fn get_malloc_bytes(mmtk: &MMTK) -> usize { /// However, if a binding uses counted malloc (which won't poll for GC), they may want to poll for GC manually. /// This function should only be used by mutator threads. pub fn gc_poll(mmtk: &MMTK, tls: VMMutatorThread) { - use crate::vm::{ActivePlan, Collection}; debug_assert!( - VM::VMActivePlan::is_mutator(tls.0), + VM::is_mutator(tls.0), "gc_poll() can only be called by a mutator thread." ); if mmtk.state.should_trigger_gc_when_heap_is_full() && mmtk.gc_trigger.poll(false, None) { debug!("Collection required"); assert!(mmtk.state.is_initialized(), "GC is not allowed here: collection is not initialized (did you call initialize_collection()?)."); - VM::VMCollection::block_for_gc(tls); + VM::block_for_gc(tls); } } @@ -785,10 +783,7 @@ pub fn harness_end(mmtk: &'static MMTK) { /// Arguments: /// * `mmtk`: A reference to an MMTk instance /// * `object`: The object that has a finalizer -pub fn add_finalizer( - mmtk: &'static MMTK, - object: >::FinalizableType, -) { +pub fn add_finalizer(mmtk: &'static MMTK, object: VM::FinalizableType) { if *mmtk.options.no_finalizer { warn!("add_finalizer() is called when no_finalizer = true"); } @@ -845,9 +840,7 @@ pub fn is_pinned(object: ObjectReference) -> bool { /// /// Arguments: /// * `mmtk`: A reference to an MMTk instance. -pub fn get_finalized_object( - mmtk: &'static MMTK, -) -> Option<>::FinalizableType> { +pub fn get_finalized_object(mmtk: &'static MMTK) -> Option { if *mmtk.options.no_finalizer { warn!("get_finalized_object() is called when no_finalizer = true"); } @@ -865,9 +858,7 @@ pub fn get_finalized_object( /// /// Arguments: /// * `mmtk`: A reference to an MMTk instance. -pub fn get_all_finalizers( - mmtk: &'static MMTK, -) -> Vec<>::FinalizableType> { +pub fn get_all_finalizers(mmtk: &'static MMTK) -> Vec { if *mmtk.options.no_finalizer { warn!("get_all_finalizers() is called when no_finalizer = true"); } @@ -887,7 +878,7 @@ pub fn get_all_finalizers( pub fn get_finalizers_for( mmtk: &'static MMTK, object: ObjectReference, -) -> Vec<>::FinalizableType> { +) -> Vec { if *mmtk.options.no_finalizer { warn!("get_finalizers() is called when no_finalizer = true"); } diff --git a/src/mmtk.rs b/src/mmtk.rs index fa90270637..0008aa7fa1 100644 --- a/src/mmtk.rs +++ b/src/mmtk.rs @@ -21,7 +21,6 @@ use crate::util::reference_processor::ReferenceProcessors; #[cfg(feature = "sanity")] use crate::util::sanity::sanity_checker::SanityChecker; use crate::util::statistics::stats::Stats; -use crate::vm::ReferenceGlue; use crate::vm::VMBinding; use std::cell::UnsafeCell; use std::default::Default; @@ -109,8 +108,7 @@ pub struct MMTK { pub(crate) state: Arc, pub(crate) plan: UnsafeCell>>, pub(crate) reference_processors: ReferenceProcessors, - pub(crate) finalizable_processor: - Mutex>::FinalizableType>>, + pub(crate) finalizable_processor: Mutex>, pub(crate) scheduler: Arc>, #[cfg(feature = "sanity")] pub(crate) sanity_checker: Mutex>, @@ -201,9 +199,7 @@ impl MMTK { state, plan: UnsafeCell::new(plan), reference_processors: ReferenceProcessors::new(), - finalizable_processor: Mutex::new(FinalizableProcessor::< - >::FinalizableType, - >::new()), + finalizable_processor: Mutex::new(FinalizableProcessor::::new()), scheduler, #[cfg(feature = "sanity")] sanity_checker: Mutex::new(SanityChecker::new()), @@ -312,7 +308,6 @@ impl MMTK { force: bool, exhaustive: bool, ) { - use crate::vm::Collection; if !self.get_plan().constraints().collects_garbage { warn!("User attempted a collection request, but the plan can not do GC. The request is ignored."); return; @@ -330,7 +325,7 @@ impl MMTK { .user_triggered_collection .store(true, Ordering::Relaxed); self.gc_requester.request(); - VM::VMCollection::block_for_gc(tls); + VM::block_for_gc(tls); } } diff --git a/src/plan/barriers.rs b/src/plan/barriers.rs index caf3372251..ec1c48a9d4 100644 --- a/src/plan/barriers.rs +++ b/src/plan/barriers.rs @@ -1,7 +1,6 @@ //! Read/Write barrier implementations. use crate::vm::edge_shape::{Edge, MemorySlice}; -use crate::vm::ObjectModel; use crate::{ util::{metadata::MetadataSpec, *}, vm::VMBinding, @@ -134,8 +133,7 @@ impl Barrier for NoBarrier {} pub trait BarrierSemantics: 'static + Send { type VM: VMBinding; - const UNLOG_BIT_SPEC: MetadataSpec = - *::VMObjectModel::GLOBAL_LOG_BIT_SPEC.as_spec(); + const UNLOG_BIT_SPEC: MetadataSpec = *::GLOBAL_LOG_BIT_SPEC.as_spec(); /// Flush thread-local buffers or remembered sets. /// Normally this is called by the slow-path implementation whenever the thread-local buffers are full. diff --git a/src/plan/generational/gc_work.rs b/src/plan/generational/gc_work.rs index a5c38e84ea..1cb4977bb4 100644 --- a/src/plan/generational/gc_work.rs +++ b/src/plan/generational/gc_work.rs @@ -99,7 +99,7 @@ impl GCWork for ProcessModBuf { fn do_work(&mut self, worker: &mut GCWorker, mmtk: &'static MMTK) { // Flip the per-object unlogged bits to "unlogged" state. for obj in &self.modbuf { - ::VMObjectModel::GLOBAL_LOG_BIT_SPEC.store_atomic::( + ::GLOBAL_LOG_BIT_SPEC.store_atomic::( *obj, 1, None, diff --git a/src/plan/generational/global.rs b/src/plan/generational/global.rs index e12430f576..981428dc49 100644 --- a/src/plan/generational/global.rs +++ b/src/plan/generational/global.rs @@ -11,7 +11,7 @@ use crate::util::statistics::counter::EventCounter; use crate::util::Address; use crate::util::ObjectReference; use crate::util::VMWorkerThread; -use crate::vm::{ObjectModel, VMBinding}; +use crate::vm::VMBinding; use std::sync::atomic::AtomicBool; use std::sync::atomic::Ordering; use std::sync::{Arc, Mutex}; @@ -87,8 +87,7 @@ impl CommonGenPlan { /// Returns `true` if the nursery has grown to the extent that it may not be able to be copied /// into the mature space. fn virtual_memory_exhausted(plan: &dyn GenerationalPlan) -> bool { - ((plan.get_collection_reserved_pages() as f64 - * VM::VMObjectModel::VM_WORST_CASE_COPY_EXPANSION) as usize) + ((plan.get_collection_reserved_pages() as f64 * VM::VM_WORST_CASE_COPY_EXPANSION) as usize) > plan.get_mature_physical_pages_available() } diff --git a/src/plan/generational/mod.rs b/src/plan/generational/mod.rs index 95aaa48928..bf19efc5a8 100644 --- a/src/plan/generational/mod.rs +++ b/src/plan/generational/mod.rs @@ -11,7 +11,6 @@ use crate::policy::space::Space; use crate::util::alloc::AllocatorSelector; use crate::util::metadata::side_metadata::SideMetadataContext; use crate::util::metadata::side_metadata::SideMetadataSpec; -use crate::vm::ObjectModel; use crate::vm::VMBinding; use crate::Plan; @@ -60,7 +59,7 @@ pub const GEN_CONSTRAINTS: PlanConstraints = PlanConstraints { /// So if a plan calls this, it should not call SideMetadataContext::new_global_specs() again. pub fn new_generational_global_metadata_specs() -> Vec { let specs = if ACTIVE_BARRIER == BarrierSelector::ObjectBarrier { - crate::util::metadata::extract_side_metadata(&[*VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC]) + crate::util::metadata::extract_side_metadata(&[*VM::GLOBAL_LOG_BIT_SPEC]) } else { vec![] }; diff --git a/src/plan/global.rs b/src/plan/global.rs index 66560fe0c9..e9ef6bbd52 100644 --- a/src/plan/global.rs +++ b/src/plan/global.rs @@ -25,7 +25,6 @@ use crate::util::options::PlanSelector; use crate::util::statistics::stats::Stats; use crate::util::{conversions, ObjectReference}; use crate::util::{VMMutatorThread, VMWorkerThread}; -use crate::vm::*; use downcast_rs::Downcast; use enum_map::EnumMap; use std::sync::atomic::Ordering; @@ -225,7 +224,7 @@ pub trait Plan: 'static + HasSpaces + Sync + Downcast { fn get_reserved_pages(&self) -> usize { let used_pages = self.get_used_pages(); let collection_reserve = self.get_collection_reserved_pages(); - let vm_live_bytes = ::VMCollection::vm_live_bytes(); + let vm_live_bytes = ::vm_live_bytes(); // Note that `vm_live_bytes` may not be the exact number of bytes in whole pages. The VM // binding is allowed to return an approximate value if it is expensive or impossible to // compute the exact number of pages occupied. @@ -495,7 +494,7 @@ impl BasePlan { return self.vm_space.trace_object(queue, object); } - VM::VMActivePlan::vm_trace_object::(queue, object, worker) + VM::vm_trace_object::(queue, object, worker) } pub fn prepare(&mut self, _tls: VMWorkerThread, _full_heap: bool) { diff --git a/src/plan/markcompact/gc_work.rs b/src/plan/markcompact/gc_work.rs index 4b60cafa9b..10b06caeda 100644 --- a/src/plan/markcompact/gc_work.rs +++ b/src/plan/markcompact/gc_work.rs @@ -6,8 +6,6 @@ use crate::scheduler::gc_work::*; use crate::scheduler::GCWork; use crate::scheduler::GCWorker; use crate::scheduler::WorkBucketStage; -use crate::vm::ActivePlan; -use crate::vm::Scanning; use crate::vm::VMBinding; use crate::MMTK; use std::marker::PhantomData; @@ -41,7 +39,7 @@ unsafe impl Send for UpdateReferences {} impl GCWork for UpdateReferences { fn do_work(&mut self, worker: &mut GCWorker, mmtk: &'static MMTK) { // The following needs to be done right before the second round of root scanning - VM::VMScanning::prepare_for_roots_re_scanning(); + VM::prepare_for_roots_re_scanning(); mmtk.state.prepare_for_stack_scanning(); // Prepare common and base spaces for the 2nd round of transitive closure let plan_mut = unsafe { &mut *(self.plan as *mut MarkCompact) }; @@ -56,7 +54,7 @@ impl GCWork for UpdateReferences { .worker_group .get_and_clear_worker_live_bytes(); - for mutator in VM::VMActivePlan::mutators() { + for mutator in VM::mutators() { mmtk.scheduler.work_buckets[WorkBucketStage::SecondRoots].add(ScanMutatorRoots::< MarkCompactForwardingGCWorkContext, >(mutator)); diff --git a/src/plan/sticky/immix/global.rs b/src/plan/sticky/immix/global.rs index 44e6ad6153..11f1cc3689 100644 --- a/src/plan/sticky/immix/global.rs +++ b/src/plan/sticky/immix/global.rs @@ -12,7 +12,6 @@ use crate::util::copy::CopySelector; use crate::util::copy::CopySemantics; use crate::util::metadata::side_metadata::SideMetadataContext; use crate::util::statistics::counter::EventCounter; -use crate::vm::ObjectModel; use crate::vm::VMBinding; use crate::Plan; @@ -166,7 +165,7 @@ impl Plan for StickyImmix { fn sanity_check_object(&self, object: crate::util::ObjectReference) -> bool { if self.is_current_gc_nursery() { // Every reachable object should be logged - if !VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.is_unlogged::(object, Ordering::SeqCst) { + if !VM::GLOBAL_LOG_BIT_SPEC.is_unlogged::(object, Ordering::SeqCst) { error!("Object {} is not unlogged (all objects that have been traced should be unlogged/mature)", object); return false; } diff --git a/src/policy/copyspace.rs b/src/policy/copyspace.rs index 8d08ec1507..31842a2399 100644 --- a/src/policy/copyspace.rs +++ b/src/policy/copyspace.rs @@ -149,8 +149,8 @@ impl CopySpace { true, false, extract_side_metadata(&[ - *VM::VMObjectModel::LOCAL_FORWARDING_BITS_SPEC, - *VM::VMObjectModel::LOCAL_FORWARDING_POINTER_SPEC, + *VM::LOCAL_FORWARDING_BITS_SPEC, + *VM::LOCAL_FORWARDING_POINTER_SPEC, ]), )); CopySpace { @@ -169,8 +169,7 @@ impl CopySpace { // Clear the metadata if we are using side forwarding status table. Otherwise // objects may inherit forwarding status from the previous GC. // TODO: Fix performance. - if let MetadataSpec::OnSide(side_forwarding_status_table) = - *>::LOCAL_FORWARDING_BITS_SPEC + if let MetadataSpec::OnSide(side_forwarding_status_table) = *VM::LOCAL_FORWARDING_BITS_SPEC { side_forwarding_status_table .bzero_metadata(self.common.start, self.pr.cursor() - self.common.start); diff --git a/src/policy/immix/immixspace.rs b/src/policy/immix/immixspace.rs index 3809f7bd24..fb65e81303 100644 --- a/src/policy/immix/immixspace.rs +++ b/src/policy/immix/immixspace.rs @@ -116,15 +116,15 @@ impl SFT for ImmixSpace { } #[cfg(feature = "object_pinning")] fn pin_object(&self, object: ObjectReference) -> bool { - VM::VMObjectModel::LOCAL_PINNING_BIT_SPEC.pin_object::(object) + VM::LOCAL_PINNING_BIT_SPEC.pin_object::(object) } #[cfg(feature = "object_pinning")] fn unpin_object(&self, object: ObjectReference) -> bool { - VM::VMObjectModel::LOCAL_PINNING_BIT_SPEC.unpin_object::(object) + VM::LOCAL_PINNING_BIT_SPEC.unpin_object::(object) } #[cfg(feature = "object_pinning")] fn is_object_pinned(&self, object: ObjectReference) -> bool { - VM::VMObjectModel::LOCAL_PINNING_BIT_SPEC.is_object_pinned::(object) + VM::LOCAL_PINNING_BIT_SPEC.is_object_pinned::(object) } fn is_movable(&self) -> bool { !super::NEVER_MOVE_OBJECTS @@ -241,11 +241,11 @@ impl ImmixSpace { MetadataSpec::OnSide(Block::DEFRAG_STATE_TABLE), MetadataSpec::OnSide(Block::MARK_TABLE), MetadataSpec::OnSide(ChunkMap::ALLOC_TABLE), - *VM::VMObjectModel::LOCAL_MARK_BIT_SPEC, - *VM::VMObjectModel::LOCAL_FORWARDING_BITS_SPEC, - *VM::VMObjectModel::LOCAL_FORWARDING_POINTER_SPEC, + *VM::LOCAL_MARK_BIT_SPEC, + *VM::LOCAL_FORWARDING_BITS_SPEC, + *VM::LOCAL_FORWARDING_POINTER_SPEC, #[cfg(feature = "object_pinning")] - *VM::VMObjectModel::LOCAL_PINNING_BIT_SPEC, + *VM::LOCAL_PINNING_BIT_SPEC, ] } else { vec![ @@ -253,11 +253,11 @@ impl ImmixSpace { MetadataSpec::OnSide(Block::DEFRAG_STATE_TABLE), MetadataSpec::OnSide(Block::MARK_TABLE), MetadataSpec::OnSide(ChunkMap::ALLOC_TABLE), - *VM::VMObjectModel::LOCAL_MARK_BIT_SPEC, - *VM::VMObjectModel::LOCAL_FORWARDING_BITS_SPEC, - *VM::VMObjectModel::LOCAL_FORWARDING_POINTER_SPEC, + *VM::LOCAL_MARK_BIT_SPEC, + *VM::LOCAL_FORWARDING_BITS_SPEC, + *VM::LOCAL_FORWARDING_POINTER_SPEC, #[cfg(feature = "object_pinning")] - *VM::VMObjectModel::LOCAL_PINNING_BIT_SPEC, + *VM::LOCAL_PINNING_BIT_SPEC, ] }) } @@ -362,7 +362,7 @@ impl ImmixSpace { pub fn prepare(&mut self, major_gc: bool, plan_stats: StatsForDefrag) { if major_gc { // Update mark_state - if VM::VMObjectModel::LOCAL_MARK_BIT_SPEC.is_on_side() { + if VM::LOCAL_MARK_BIT_SPEC.is_on_side() { self.mark_state = Self::MARKED_STATE; } else { // For header metadata, we use cyclic mark bits. @@ -666,15 +666,14 @@ impl ImmixSpace { + crate::util::constants::LOG_MIN_OBJECT_SIZE)) ); const_assert_eq!( - crate::vm::object_model::specs::VMGlobalLogBitSpec::LOG_NUM_BITS, + crate::vm::metadata_specs::VMGlobalLogBitSpec::LOG_NUM_BITS, 0 ); // We should put this to the addition, but type casting is not allowed in constant assertions. // Every immix line is 256 bytes, which is mapped to 4 bytes in the side metadata. // If we have one object in the line that is mature, we can assume all the objects in the line are mature objects. // So we can just mark the byte. - VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC - .mark_byte_as_unlogged::(object, Ordering::Relaxed); + VM::GLOBAL_LOG_BIT_SPEC.mark_byte_as_unlogged::(object, Ordering::Relaxed); } } @@ -688,16 +687,13 @@ impl ImmixSpace { /// Atomically mark an object. fn attempt_mark(&self, object: ObjectReference, mark_state: u8) -> bool { loop { - let old_value = VM::VMObjectModel::LOCAL_MARK_BIT_SPEC.load_atomic::( - object, - None, - Ordering::SeqCst, - ); + let old_value = + VM::LOCAL_MARK_BIT_SPEC.load_atomic::(object, None, Ordering::SeqCst); if old_value == mark_state { return false; } - if VM::VMObjectModel::LOCAL_MARK_BIT_SPEC + if VM::LOCAL_MARK_BIT_SPEC .compare_exchange_metadata::( object, old_value, @@ -716,11 +712,8 @@ impl ImmixSpace { /// Check if an object is marked. fn is_marked_with(&self, object: ObjectReference, mark_state: u8) -> bool { - let old_value = VM::VMObjectModel::LOCAL_MARK_BIT_SPEC.load_atomic::( - object, - None, - Ordering::SeqCst, - ); + let old_value = + VM::LOCAL_MARK_BIT_SPEC.load_atomic::(object, None, Ordering::SeqCst); old_value == mark_state } @@ -795,7 +788,7 @@ impl ImmixSpace { /// Post copy routine for Immix copy contexts fn post_copy(&self, object: ObjectReference, _bytes: usize) { // Mark the object - VM::VMObjectModel::LOCAL_MARK_BIT_SPEC.store_atomic::( + VM::LOCAL_MARK_BIT_SPEC.store_atomic::( object, self.mark_state, None, @@ -821,11 +814,11 @@ impl PrepareBlockState { fn reset_object_mark(&self) { // NOTE: We reset the mark bits because cyclic mark bit is currently not supported, yet. // See `ImmixSpace::prepare`. - if let MetadataSpec::OnSide(side) = *VM::VMObjectModel::LOCAL_MARK_BIT_SPEC { + if let MetadataSpec::OnSide(side) = *VM::LOCAL_MARK_BIT_SPEC { side.bzero_metadata(self.chunk.start(), Chunk::BYTES); } if self.space.space_args.reset_log_bit_in_major_gc { - if let MetadataSpec::OnSide(side) = *VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC { + if let MetadataSpec::OnSide(side) = *VM::GLOBAL_LOG_BIT_SPEC { // We zero all the log bits in major GC, and for every object we trace, we will mark the log bit again. side.bzero_metadata(self.chunk.start(), Chunk::BYTES); } else { @@ -836,7 +829,7 @@ impl PrepareBlockState { } } // If the forwarding bits are on the side, we need to clear them, too. - if let MetadataSpec::OnSide(side) = *VM::VMObjectModel::LOCAL_FORWARDING_BITS_SPEC { + if let MetadataSpec::OnSide(side) = *VM::LOCAL_FORWARDING_BITS_SPEC { side.bzero_metadata(self.chunk.start(), Chunk::BYTES); } } diff --git a/src/policy/immix/line.rs b/src/policy/immix/line.rs index 94036ecc65..6b5a17cc19 100644 --- a/src/policy/immix/line.rs +++ b/src/policy/immix/line.rs @@ -65,7 +65,7 @@ impl Line { pub fn mark_lines_for_object(object: ObjectReference, state: u8) -> usize { debug_assert!(!super::BLOCK_ONLY); let start = object.to_object_start::(); - let end = start + VM::VMObjectModel::get_current_size(object); + let end = start + VM::get_object_size(object); let start_line = Line::from_unaligned_address(start); let mut end_line = Line::from_unaligned_address(end); if !Line::is_aligned(end) { diff --git a/src/policy/immortalspace.rs b/src/policy/immortalspace.rs index 5eeebd58c9..20cb4d6c46 100644 --- a/src/policy/immortalspace.rs +++ b/src/policy/immortalspace.rs @@ -11,7 +11,7 @@ use crate::util::{metadata, ObjectReference}; use crate::plan::{ObjectQueue, VectorObjectQueue}; use crate::policy::sft::GCWorkerMutRef; -use crate::vm::{ObjectModel, VMBinding}; +use crate::vm::VMBinding; /// This type implements a simple immortal collection /// policy. Under this policy all that is required is for the @@ -58,7 +58,7 @@ impl SFT for ImmortalSpace { self.mark_state .on_object_metadata_initialization::(object); if self.common.needs_log_bit { - VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.mark_as_unlogged::(object, Ordering::SeqCst); + VM::GLOBAL_LOG_BIT_SPEC.mark_as_unlogged::(object, Ordering::SeqCst); } #[cfg(feature = "vo_bit")] crate::util::metadata::vo_bit::set_vo_bit::(object); @@ -125,7 +125,7 @@ impl ImmortalSpace { let common = CommonSpace::new(args.into_policy_args( false, true, - metadata::extract_side_metadata(&[*VM::VMObjectModel::LOCAL_MARK_BIT_SPEC]), + metadata::extract_side_metadata(&[*VM::LOCAL_MARK_BIT_SPEC]), )); ImmortalSpace { mark_state: MarkState::new(), @@ -152,7 +152,7 @@ impl ImmortalSpace { common: CommonSpace::new(args.into_policy_args( false, true, - metadata::extract_side_metadata(&[*VM::VMObjectModel::LOCAL_MARK_BIT_SPEC]), + metadata::extract_side_metadata(&[*VM::LOCAL_MARK_BIT_SPEC]), )), vm_space: true, } diff --git a/src/policy/largeobjectspace.rs b/src/policy/largeobjectspace.rs index ec6b2f7506..c39317b477 100644 --- a/src/policy/largeobjectspace.rs +++ b/src/policy/largeobjectspace.rs @@ -11,7 +11,6 @@ use crate::util::metadata; use crate::util::opaque_pointer::*; use crate::util::treadmill::TreadMill; use crate::util::{Address, ObjectReference}; -use crate::vm::ObjectModel; use crate::vm::VMBinding; #[allow(unused)] @@ -57,16 +56,13 @@ impl SFT for LargeObjectSpace { true } fn initialize_object_metadata(&self, object: ObjectReference, alloc: bool) { - let old_value = VM::VMObjectModel::LOCAL_LOS_MARK_NURSERY_SPEC.load_atomic::( - object, - None, - Ordering::SeqCst, - ); + let old_value = + VM::LOCAL_LOS_MARK_NURSERY_SPEC.load_atomic::(object, None, Ordering::SeqCst); let mut new_value = (old_value & (!LOS_BIT_MASK)) | self.mark_state; if alloc { new_value |= NURSERY_BIT; } - VM::VMObjectModel::LOCAL_LOS_MARK_NURSERY_SPEC.store_atomic::( + VM::LOCAL_LOS_MARK_NURSERY_SPEC.store_atomic::( object, new_value, None, @@ -75,7 +71,7 @@ impl SFT for LargeObjectSpace { // If this object is freshly allocated, we do not set it as unlogged if !alloc && self.common.needs_log_bit { - VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.mark_as_unlogged::(object, Ordering::SeqCst); + VM::GLOBAL_LOG_BIT_SPEC.mark_as_unlogged::(object, Ordering::SeqCst); } #[cfg(feature = "vo_bit")] @@ -148,7 +144,7 @@ impl LargeObjectSpace { let common = CommonSpace::new(args.into_policy_args( false, false, - metadata::extract_side_metadata(&[*VM::VMObjectModel::LOCAL_LOS_MARK_NURSERY_SPEC]), + metadata::extract_side_metadata(&[*VM::LOCAL_LOS_MARK_NURSERY_SPEC]), )); let mut pr = if is_discontiguous { FreeListPageResource::new_discontiguous(vm_map) @@ -209,8 +205,7 @@ impl LargeObjectSpace { self.treadmill.copy(object, nursery_object); // We just moved the object out of the logical nursery, mark it as unlogged. if nursery_object && self.common.needs_log_bit { - VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC - .mark_as_unlogged::(object, Ordering::SeqCst); + VM::GLOBAL_LOG_BIT_SPEC.mark_as_unlogged::(object, Ordering::SeqCst); } queue.enqueue(object); } else { @@ -257,7 +252,7 @@ impl LargeObjectSpace { } else { MARK_BIT }; - let old_value = VM::VMObjectModel::LOCAL_LOS_MARK_NURSERY_SPEC.load_atomic::( + let old_value = VM::LOCAL_LOS_MARK_NURSERY_SPEC.load_atomic::( object, None, Ordering::SeqCst, @@ -267,7 +262,7 @@ impl LargeObjectSpace { return false; } // using LOS_BIT_MASK have side effects of clearing nursery bit - if VM::VMObjectModel::LOCAL_LOS_MARK_NURSERY_SPEC + if VM::LOCAL_LOS_MARK_NURSERY_SPEC .compare_exchange_metadata::( object, old_value, @@ -285,21 +280,15 @@ impl LargeObjectSpace { } fn test_mark_bit(&self, object: ObjectReference, value: u8) -> bool { - VM::VMObjectModel::LOCAL_LOS_MARK_NURSERY_SPEC.load_atomic::( - object, - None, - Ordering::SeqCst, - ) & MARK_BIT + VM::LOCAL_LOS_MARK_NURSERY_SPEC.load_atomic::(object, None, Ordering::SeqCst) + & MARK_BIT == value } /// Check if a given object is in nursery fn is_in_nursery(&self, object: ObjectReference) -> bool { - VM::VMObjectModel::LOCAL_LOS_MARK_NURSERY_SPEC.load_atomic::( - object, - None, - Ordering::Relaxed, - ) & NURSERY_BIT + VM::LOCAL_LOS_MARK_NURSERY_SPEC.load_atomic::(object, None, Ordering::Relaxed) + & NURSERY_BIT == NURSERY_BIT } } diff --git a/src/policy/markcompactspace.rs b/src/policy/markcompactspace.rs index 693218b492..f511dd5de6 100644 --- a/src/policy/markcompactspace.rs +++ b/src/policy/markcompactspace.rs @@ -202,7 +202,7 @@ impl MarkCompactSpace { pub fn new(args: crate::policy::space::PlanCreateSpaceArgs) -> Self { let vm_map = args.vm_map; let is_discontiguous = args.vmrequest.is_discontiguous(); - let local_specs = extract_side_metadata(&[*VM::VMObjectModel::LOCAL_MARK_BIT_SPEC]); + let local_specs = extract_side_metadata(&[*VM::LOCAL_MARK_BIT_SPEC]); let common = CommonSpace::new(args.into_policy_args(true, false, local_specs)); MarkCompactSpace { pr: if is_discontiguous { @@ -255,16 +255,13 @@ impl MarkCompactSpace { pub fn test_and_mark(object: ObjectReference) -> bool { loop { - let old_value = VM::VMObjectModel::LOCAL_MARK_BIT_SPEC.load_atomic::( - object, - None, - Ordering::SeqCst, - ); + let old_value = + VM::LOCAL_MARK_BIT_SPEC.load_atomic::(object, None, Ordering::SeqCst); let mark_bit = old_value & GC_MARK_BIT_MASK; if mark_bit != 0 { return false; } - if VM::VMObjectModel::LOCAL_MARK_BIT_SPEC + if VM::LOCAL_MARK_BIT_SPEC .compare_exchange_metadata::( object, old_value, @@ -283,17 +280,14 @@ impl MarkCompactSpace { pub fn test_and_clear_mark(object: ObjectReference) -> bool { loop { - let old_value = VM::VMObjectModel::LOCAL_MARK_BIT_SPEC.load_atomic::( - object, - None, - Ordering::SeqCst, - ); + let old_value = + VM::LOCAL_MARK_BIT_SPEC.load_atomic::(object, None, Ordering::SeqCst); let mark_bit = old_value & GC_MARK_BIT_MASK; if mark_bit == 0 { return false; } - if VM::VMObjectModel::LOCAL_MARK_BIT_SPEC + if VM::LOCAL_MARK_BIT_SPEC .compare_exchange_metadata::( object, old_value, @@ -311,11 +305,8 @@ impl MarkCompactSpace { } pub fn is_marked(object: ObjectReference) -> bool { - let old_value = VM::VMObjectModel::LOCAL_MARK_BIT_SPEC.load_atomic::( - object, - None, - Ordering::SeqCst, - ); + let old_value = + VM::LOCAL_MARK_BIT_SPEC.load_atomic::(object, None, Ordering::SeqCst); let mark_bit = old_value & GC_MARK_BIT_MASK; mark_bit != 0 } @@ -346,9 +337,9 @@ impl MarkCompactSpace { .filter(Self::to_be_compacted) { let copied_size = - VM::VMObjectModel::get_size_when_copied(obj) + Self::HEADER_RESERVED_IN_BYTES; - let align = VM::VMObjectModel::get_align_when_copied(obj); - let offset = VM::VMObjectModel::get_align_offset_when_copied(obj); + VM::get_object_size_when_copied(obj) + Self::HEADER_RESERVED_IN_BYTES; + let align = VM::get_object_align_when_copied(obj); + let offset = VM::get_object_align_offset_when_copied(obj); // move to_cursor to aliged start address to_cursor = align_allocation_no_fill::(to_cursor, align, offset); // move to next to-block if there is no sufficient memory in current region @@ -359,7 +350,7 @@ impl MarkCompactSpace { debug_assert!(to_cursor + copied_size <= to_end); } // Get copied object - let new_obj = VM::VMObjectModel::get_reference_when_copied_to( + let new_obj = VM::get_object_reference_when_copied_to( obj, to_cursor + Self::HEADER_RESERVED_IN_BYTES, ); @@ -368,7 +359,7 @@ impl MarkCompactSpace { trace!( "Calculate forward: {} (size when copied = {}) ~> {} (size = {})", obj, - VM::VMObjectModel::get_size_when_copied(obj), + VM::get_object_size_when_copied(obj), to_cursor, copied_size ); @@ -383,7 +374,7 @@ impl MarkCompactSpace { for (from_start, size) in self.pr.iterate_allocated_regions() { let from_end = from_start + size; for obj in self.linear_scan_objects(from_start..from_end) { - let copied_size = VM::VMObjectModel::get_size_when_copied(obj); + let copied_size = VM::get_object_size_when_copied(obj); // clear the VO bit vo_bit::unset_vo_bit::(obj); @@ -396,8 +387,7 @@ impl MarkCompactSpace { // copy object trace!(" copy from {} to {}", obj, new_object); - let end_of_new_object = - VM::VMObjectModel::copy_to(obj, new_object, Address::ZERO); + let end_of_new_object = VM::copy_object_to(obj, new_object, Address::ZERO); // update VO bit, vo_bit::set_vo_bit::(new_object); to = new_object.to_object_start::() + copied_size; @@ -416,6 +406,6 @@ impl MarkCompactSpace { struct MarkCompactObjectSize(std::marker::PhantomData); impl crate::util::linear_scan::LinearScanObjectSize for MarkCompactObjectSize { fn size(object: ObjectReference) -> usize { - VM::VMObjectModel::get_current_size(object) + VM::get_object_size(object) } } diff --git a/src/policy/marksweepspace/malloc_ms/global.rs b/src/policy/marksweepspace/malloc_ms/global.rs index 7454fbb287..472ba5c2c0 100644 --- a/src/policy/marksweepspace/malloc_ms/global.rs +++ b/src/policy/marksweepspace/malloc_ms/global.rs @@ -20,7 +20,6 @@ use crate::util::Address; use crate::util::ObjectReference; use crate::util::{conversions, metadata}; use crate::vm::VMBinding; -use crate::vm::{ActivePlan, Collection, ObjectModel}; use crate::{policy::space::Space, util::heap::layout::vm_layout::BYTES_IN_CHUNK}; #[cfg(debug_assertions)] use std::collections::HashMap; @@ -264,7 +263,7 @@ impl MallocSpace { local: metadata::extract_side_metadata(&[ MetadataSpec::OnSide(ACTIVE_PAGE_METADATA_SPEC), MetadataSpec::OnSide(OFFSET_MALLOC_METADATA_SPEC), - *VM::VMObjectModel::LOCAL_MARK_BIT_SPEC, + *VM::LOCAL_MARK_BIT_SPEC, ]), }, scheduler: args.scheduler.clone(), @@ -330,8 +329,8 @@ impl MallocSpace { pub fn alloc(&self, tls: VMThread, size: usize, align: usize, offset: usize) -> Address { // TODO: Should refactor this and Space.acquire() if self.get_gc_trigger().poll(false, Some(self)) { - assert!(VM::VMActivePlan::is_mutator(tls), "Polling in GC worker"); - VM::VMCollection::block_for_gc(VMMutatorThread(tls)); + assert!(VM::is_mutator(tls), "Polling in GC worker"); + VM::block_for_gc(VMMutatorThread(tls)); return unsafe { Address::zero() }; } @@ -496,7 +495,7 @@ impl MallocSpace { pub fn sweep_chunk(&self, chunk_start: Address) { // Call the relevant sweep function depending on the location of the mark bits - match *VM::VMObjectModel::LOCAL_MARK_BIT_SPEC { + match *VM::LOCAL_MARK_BIT_SPEC { MetadataSpec::OnSide(local_mark_bit_side_spec) => { self.sweep_chunk_mark_on_side(chunk_start, local_mark_bit_side_spec); } diff --git a/src/policy/marksweepspace/malloc_ms/metadata.rs b/src/policy/marksweepspace/malloc_ms/metadata.rs index 1216c50120..7121c94ad5 100644 --- a/src/policy/marksweepspace/malloc_ms/metadata.rs +++ b/src/policy/marksweepspace/malloc_ms/metadata.rs @@ -6,7 +6,7 @@ use crate::util::metadata::side_metadata::SideMetadataSpec; use crate::util::metadata::vo_bit; use crate::util::Address; use crate::util::ObjectReference; -use crate::vm::{ObjectModel, VMBinding}; +use crate::vm::VMBinding; use std::sync::atomic::Ordering; use std::sync::Mutex; @@ -171,11 +171,11 @@ pub fn has_object_alloced_by_malloc(addr: Address) -> Option(object: ObjectReference, ordering: Ordering) -> bool { - VM::VMObjectModel::LOCAL_MARK_BIT_SPEC.load_atomic::(object, None, ordering) == 1 + VM::LOCAL_MARK_BIT_SPEC.load_atomic::(object, None, ordering) == 1 } pub unsafe fn is_marked_unsafe(object: ObjectReference) -> bool { - VM::VMObjectModel::LOCAL_MARK_BIT_SPEC.load::(object, None) == 1 + VM::LOCAL_MARK_BIT_SPEC.load::(object, None) == 1 } /// Set the page mark from 0 to 1. Return true if we set it successfully in this call. @@ -223,7 +223,7 @@ pub fn set_vo_bit(object: ObjectReference) { } pub fn set_mark_bit(object: ObjectReference, ordering: Ordering) { - VM::VMObjectModel::LOCAL_MARK_BIT_SPEC.store_atomic::(object, 1, None, ordering); + VM::LOCAL_MARK_BIT_SPEC.store_atomic::(object, 1, None, ordering); } #[allow(unused)] @@ -261,7 +261,7 @@ pub unsafe fn unset_vo_bit_unsafe(object: ObjectReference) { #[allow(unused)] pub unsafe fn unset_mark_bit(object: ObjectReference) { - VM::VMObjectModel::LOCAL_MARK_BIT_SPEC.store::(object, 0, None); + VM::LOCAL_MARK_BIT_SPEC.store::(object, 0, None); } #[allow(unused)] diff --git a/src/policy/marksweepspace/native_ms/block.rs b/src/policy/marksweepspace/native_ms/block.rs index 625a82d851..2ec0d8dec1 100644 --- a/src/policy/marksweepspace/native_ms/block.rs +++ b/src/policy/marksweepspace/native_ms/block.rs @@ -6,7 +6,6 @@ use super::BlockList; use super::MarkSweepSpace; use crate::util::heap::chunk_map::*; use crate::util::linear_scan::Region; -use crate::vm::ObjectModel; use crate::{ util::{ metadata::side_metadata::SideMetadataSpec, Address, ObjectReference, OpaquePointer, @@ -267,7 +266,7 @@ impl Block { self.load_block_cell_size(), VM::MAX_ALIGNMENT, ) - && VM::VMObjectModel::UNIFIED_OBJECT_REFERENCE_ADDRESS + && VM::UNIFIED_OBJECT_REFERENCE_ADDRESS { // In this case, we can use the simplest and the most efficicent sweep. self.simple_sweep::() @@ -289,9 +288,7 @@ impl Block { // We may not really have an object in this cell, but if we do, this object reference is correct. let potential_object = ObjectReference::from_raw_address(cell); - if !VM::VMObjectModel::LOCAL_MARK_BIT_SPEC - .is_marked::(potential_object, Ordering::SeqCst) - { + if !VM::LOCAL_MARK_BIT_SPEC.is_marked::(potential_object, Ordering::SeqCst) { // clear VO bit if it is ever set. It is possible that the VO bit is never set for this cell (i.e. there was no object in this cell before this GC), // we unset the bit anyway. #[cfg(feature = "vo_bit")] @@ -327,9 +324,8 @@ impl Block { while cell + cell_size <= self.end() { // possible object ref - let potential_object_ref = ObjectReference::from_raw_address( - cursor + VM::VMObjectModel::OBJECT_REF_OFFSET_LOWER_BOUND, - ); + let potential_object_ref = + ObjectReference::from_raw_address(cursor + VM::OBJECT_REF_OFFSET_LOWER_BOUND); trace!( "{:?}: cell = {}, last cell in free list = {}, cursor = {}, potential object = {}", self, @@ -339,9 +335,7 @@ impl Block { potential_object_ref ); - if VM::VMObjectModel::LOCAL_MARK_BIT_SPEC - .is_marked::(potential_object_ref, Ordering::SeqCst) - { + if VM::LOCAL_MARK_BIT_SPEC.is_marked::(potential_object_ref, Ordering::SeqCst) { debug!("{:?} Live cell: {}", self, cell); // If the mark bit is set, the cell is alive. // We directly jump to the end of the cell. diff --git a/src/policy/marksweepspace/native_ms/global.rs b/src/policy/marksweepspace/native_ms/global.rs index 8d8eae7d0e..19bccbc95a 100644 --- a/src/policy/marksweepspace/native_ms/global.rs +++ b/src/policy/marksweepspace/native_ms/global.rs @@ -25,7 +25,6 @@ use crate::util::constants::LOG_BYTES_IN_PAGE; use crate::util::heap::chunk_map::*; use crate::util::linear_scan::Region; use crate::util::VMThread; -use crate::vm::ObjectModel; use std::sync::Mutex; /// The result for `MarkSweepSpace.acquire_block()`. `MarkSweepSpace` will attempt @@ -96,7 +95,7 @@ impl SFT for MarkSweepSpace { } fn is_live(&self, object: crate::util::ObjectReference) -> bool { - VM::VMObjectModel::LOCAL_MARK_BIT_SPEC.is_marked::(object, Ordering::SeqCst) + VM::LOCAL_MARK_BIT_SPEC.is_marked::(object, Ordering::SeqCst) } #[cfg(feature = "object_pinning")] @@ -215,7 +214,7 @@ impl MarkSweepSpace { MetadataSpec::OnSide(Block::TLS_TABLE), MetadataSpec::OnSide(Block::MARK_TABLE), MetadataSpec::OnSide(ChunkMap::ALLOC_TABLE), - *VM::VMObjectModel::LOCAL_MARK_BIT_SPEC, + *VM::LOCAL_MARK_BIT_SPEC, ]) }; let common = CommonSpace::new(args.into_policy_args(false, false, local_specs)); @@ -249,8 +248,8 @@ impl MarkSweepSpace { "Cannot mark an object {} that was not alloced by free list allocator.", object, ); - if !VM::VMObjectModel::LOCAL_MARK_BIT_SPEC.is_marked::(object, Ordering::SeqCst) { - VM::VMObjectModel::LOCAL_MARK_BIT_SPEC.mark::(object, Ordering::SeqCst); + if !VM::LOCAL_MARK_BIT_SPEC.is_marked::(object, Ordering::SeqCst) { + VM::LOCAL_MARK_BIT_SPEC.mark::(object, Ordering::SeqCst); let block = Block::containing::(object); block.set_state(BlockState::Marked); queue.enqueue(object); @@ -268,7 +267,7 @@ impl MarkSweepSpace { } pub fn prepare(&mut self) { - if let MetadataSpec::OnSide(side) = *VM::VMObjectModel::LOCAL_MARK_BIT_SPEC { + if let MetadataSpec::OnSide(side) = *VM::LOCAL_MARK_BIT_SPEC { for chunk in self.chunk_map.all_chunks() { side.bzero_metadata(chunk.start(), Chunk::BYTES); } diff --git a/src/policy/space.rs b/src/policy/space.rs index 63bae58b52..db294e5b91 100644 --- a/src/policy/space.rs +++ b/src/policy/space.rs @@ -11,7 +11,6 @@ use crate::util::ObjectReference; use crate::util::heap::layout::vm_layout::{vm_layout, LOG_BYTES_IN_CHUNK}; use crate::util::heap::{PageResource, VMRequest}; use crate::util::options::Options; -use crate::vm::{ActivePlan, Collection}; use crate::util::constants::{LOG_BYTES_IN_MBYTE, LOG_BYTES_IN_PAGE}; use crate::util::conversions; @@ -66,10 +65,7 @@ pub trait Space: 'static + SFT + Sync + Downcast { let max_pages = self.get_gc_trigger().policy.get_max_heap_size_in_pages(); let requested_pages = size >> LOG_BYTES_IN_PAGE; if requested_pages > max_pages { - VM::VMCollection::out_of_memory( - tls, - crate::util::alloc::AllocationError::HeapOutOfMemory, - ); + VM::out_of_memory(tls, crate::util::alloc::AllocationError::HeapOutOfMemory); return true; } false @@ -86,7 +82,7 @@ pub trait Space: 'static + SFT + Sync + Downcast { // Should we poll to attempt to GC? // - If tls is collector, we cannot attempt a GC. // - If gc is disabled, we cannot attempt a GC. - let should_poll = VM::VMActivePlan::is_mutator(tls) + let should_poll = VM::is_mutator(tls) && self .common() .global_state @@ -111,7 +107,7 @@ pub trait Space: 'static + SFT + Sync + Downcast { .policy .on_pending_allocation(pages_reserved); - VM::VMCollection::block_for_gc(VMMutatorThread(tls)); // We have checked that this is mutator + VM::block_for_gc(VMMutatorThread(tls)); // We have checked that this is mutator unsafe { Address::zero() } } else { debug!("Collection not required"); @@ -224,7 +220,7 @@ pub trait Space: 'static + SFT + Sync + Downcast { .policy .on_pending_allocation(pages_reserved); - VM::VMCollection::block_for_gc(VMMutatorThread(tls)); // We asserted that this is mutator. + VM::block_for_gc(VMMutatorThread(tls)); // We asserted that this is mutator. unsafe { Address::zero() } } } diff --git a/src/policy/vmspace.rs b/src/policy/vmspace.rs index 7aa1eb9903..8057213da9 100644 --- a/src/policy/vmspace.rs +++ b/src/policy/vmspace.rs @@ -11,7 +11,7 @@ use crate::util::heap::PageResource; use crate::util::metadata::mark_bit::MarkState; use crate::util::opaque_pointer::*; use crate::util::ObjectReference; -use crate::vm::{ObjectModel, VMBinding}; +use crate::vm::VMBinding; use std::sync::atomic::Ordering; @@ -58,7 +58,7 @@ impl SFT for VMSpace { self.mark_state .on_object_metadata_initialization::(object); if self.common.needs_log_bit { - VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.mark_as_unlogged::(object, Ordering::SeqCst); + VM::GLOBAL_LOG_BIT_SPEC.mark_as_unlogged::(object, Ordering::SeqCst); } #[cfg(feature = "vo_bit")] crate::util::metadata::vo_bit::set_vo_bit::(object); @@ -173,9 +173,7 @@ impl VMSpace { common: CommonSpace::new(args.into_policy_args( false, true, - crate::util::metadata::extract_side_metadata(&[ - *VM::VMObjectModel::LOCAL_MARK_BIT_SPEC, - ]), + crate::util::metadata::extract_side_metadata(&[*VM::LOCAL_MARK_BIT_SPEC]), )), }; diff --git a/src/scheduler/gc_work.rs b/src/scheduler/gc_work.rs index 7d0add88a4..4908c12062 100644 --- a/src/scheduler/gc_work.rs +++ b/src/scheduler/gc_work.rs @@ -60,7 +60,7 @@ impl GCWork for Prepare { plan_mut.prepare(worker.tls); if plan_mut.constraints().needs_prepare_mutator { - for mutator in ::VMActivePlan::mutators() { + for mutator in ::mutators() { mmtk.scheduler.work_buckets[WorkBucketStage::Prepare] .add(PrepareMutator::::new(mutator)); } @@ -133,7 +133,7 @@ impl GCWork for Release { let plan_mut: &mut C::PlanType = unsafe { &mut *(self.plan as *const _ as *mut _) }; plan_mut.release(worker.tls); - for mutator in ::VMActivePlan::mutators() { + for mutator in ::mutators() { mmtk.scheduler.work_buckets[WorkBucketStage::Release] .add(ReleaseMutator::::new(mutator)); } @@ -200,7 +200,7 @@ impl GCWork for StopMutators { fn do_work(&mut self, worker: &mut GCWorker, mmtk: &'static MMTK) { trace!("stop_all_mutators start"); mmtk.state.prepare_for_stack_scanning(); - ::VMCollection::stop_all_mutators(worker.tls, |mutator| { + ::stop_all_mutators(worker.tls, |mutator| { // TODO: The stack scanning work won't start immediately, as the `Prepare` bucket is not opened yet (the bucket is opened in notify_mutators_paused). // Should we push to Unconstrained instead? mmtk.scheduler.work_buckets[WorkBucketStage::Prepare] @@ -259,7 +259,7 @@ impl GCWork for EndOfGC { // Set to NotInGC after everything, and right before resuming mutators. mmtk.set_gc_status(GcStatus::NotInGC); - ::VMCollection::resume_mutators(worker.tls); + ::resume_mutators(worker.tls); } } @@ -382,7 +382,7 @@ impl GCWork for VMProcessWeakRefs { stage, phantom_data: PhantomData, }; - ::VMScanning::process_weak_refs(worker, tracer_factory) + ::process_weak_refs(worker, tracer_factory) }; if need_to_repeat { @@ -424,7 +424,7 @@ impl GCWork for VMForwardWeakRefs { stage, phantom_data: PhantomData, }; - ::VMScanning::forward_weak_refs(worker, tracer_factory) + ::forward_weak_refs(worker, tracer_factory) } } @@ -442,7 +442,7 @@ pub struct VMPostForwarding { impl GCWork for VMPostForwarding { fn do_work(&mut self, worker: &mut GCWorker, _mmtk: &'static MMTK) { trace!("VMPostForwarding start"); - ::VMCollection::post_forwarding(worker.tls); + ::post_forwarding(worker.tls); trace!("VMPostForwarding end"); } } @@ -452,13 +452,13 @@ pub struct ScanMutatorRoots(pub &'static mut Mutator); impl GCWork for ScanMutatorRoots { fn do_work(&mut self, worker: &mut GCWorker, mmtk: &'static MMTK) { trace!("ScanMutatorRoots for mutator {:?}", self.0.get_tls()); - let mutators = ::VMActivePlan::number_of_mutators(); + let mutators = ::number_of_mutators(); let factory = ProcessEdgesWorkRootsWorkFactory::< C::VM, C::ProcessEdgesWorkType, C::TPProcessEdges, >::new(mmtk); - ::VMScanning::scan_roots_in_mutator_thread( + ::scan_roots_in_mutator_thread( worker.tls, unsafe { &mut *(self.0 as *mut _) }, factory, @@ -466,9 +466,7 @@ impl GCWork for ScanMutatorRoots { self.0.flush(); if mmtk.state.inform_stack_scanned(mutators) { - ::VMScanning::notify_initial_thread_scan_complete( - false, worker.tls, - ); + ::notify_initial_thread_scan_complete(false, worker.tls); mmtk.set_gc_status(GcStatus::GcProper); } } @@ -491,7 +489,7 @@ impl GCWork for ScanVMSpecificRoots { C::ProcessEdgesWorkType, C::TPProcessEdges, >::new(mmtk); - ::VMScanning::scan_vm_specific_roots(worker.tls, factory); + ::scan_vm_specific_roots(worker.tls, factory); } } @@ -852,12 +850,12 @@ pub trait ScanObjectsWork: GCWork + Sized { closure .worker .shared - .increase_live_bytes(VM::VMObjectModel::get_current_size(object)); + .increase_live_bytes(VM::get_object_size(object)); - if ::VMScanning::support_edge_enqueuing(tls, object) { + if ::support_edge_enqueuing(tls, object) { trace!("Scan object (edge) {}", object); // If an object supports edge-enqueuing, we enqueue its edges. - ::VMScanning::scan_object(tls, object, &mut closure); + ::scan_object(tls, object, &mut closure); self.post_scan_object(object); } else { // If an object does not support edge-enqueuing, we have to use @@ -882,11 +880,7 @@ pub trait ScanObjectsWork: GCWork + Sized { // Scan objects and trace their edges at the same time. for object in scan_later.iter().copied() { trace!("Scan object (node) {}", object); - ::VMScanning::scan_object_and_trace_edges( - tls, - object, - object_tracer, - ); + ::scan_object_and_trace_edges(tls, object, object_tracer); self.post_scan_object(object); } }); diff --git a/src/scheduler/scheduler.rs b/src/scheduler/scheduler.rs index 5c71296eca..bb2c62f938 100644 --- a/src/scheduler/scheduler.rs +++ b/src/scheduler/scheduler.rs @@ -6,7 +6,6 @@ use crate::mmtk::MMTK; use crate::util::opaque_pointer::*; use crate::util::options::AffinityKind; use crate::util::rust_util::array_from_fn; -use crate::vm::Collection; use crate::vm::{GCThreadContext, VMBinding}; use crossbeam::deque::{self, Steal}; use enum_map::{Enum, EnumMap}; @@ -97,7 +96,7 @@ impl GCWorkScheduler { self.clone(), coordinator_worker, ); - VM::VMCollection::spawn_gc_thread(tls, GCThreadContext::::Controller(gc_controller)); + VM::spawn_gc_thread(tls, GCThreadContext::::Controller(gc_controller)); self.worker_group.spawn(mmtk, tls) } diff --git a/src/scheduler/worker.rs b/src/scheduler/worker.rs index 07f631ace0..d59cd88600 100644 --- a/src/scheduler/worker.rs +++ b/src/scheduler/worker.rs @@ -4,7 +4,7 @@ use super::*; use crate::mmtk::MMTK; use crate::util::copy::GCWorkerCopyContext; use crate::util::opaque_pointer::*; -use crate::vm::{Collection, GCThreadContext, VMBinding}; +use crate::vm::{GCThreadContext, VMBinding}; use atomic::Atomic; use atomic_refcell::{AtomicRef, AtomicRefCell, AtomicRefMut}; use crossbeam::deque::{self, Stealer}; @@ -425,7 +425,7 @@ impl WorkerGroup { shared.clone(), unspawned_local_work_queues.pop().unwrap(), )); - VM::VMCollection::spawn_gc_thread(tls, GCThreadContext::::Worker(worker)); + VM::spawn_gc_thread(tls, GCThreadContext::::Worker(worker)); } debug_assert!(unspawned_local_work_queues.is_empty()); } diff --git a/src/util/address.rs b/src/util/address.rs index 0f860a4436..c895740d8d 100644 --- a/src/util/address.rs +++ b/src/util/address.rs @@ -474,7 +474,7 @@ use crate::vm::VMBinding; /// /// We currently do not allow an opaque `ObjectReference` type for which a binding can define /// their layout. We now only allow a binding to define their semantics through a set of -/// methods in [`crate::vm::ObjectModel`]. Major refactoring is needed in MMTk to allow +/// methods in [`crate::vm::VMBinding`]. Major refactoring is needed in MMTk to allow /// the opaque `ObjectReference` type, and we haven't seen a use case for now. #[repr(transparent)] #[derive(Copy, Clone, Eq, Hash, PartialOrd, Ord, PartialEq)] @@ -488,7 +488,7 @@ impl ObjectReference { /// /// MMTk should not make any assumption on the actual location of the address with the object reference. /// MMTk should not assume the address returned by this method is in our allocation. For the purposes of - /// setting object metadata, MMTk should use [`crate::vm::ObjectModel::ref_to_address()`] or [`crate::vm::ObjectModel::ref_to_header()`]. + /// setting object metadata, MMTk should use [`crate::vm::VMBinding::ref_to_address()`] or [`crate::vm::VMBinding::ref_to_header()`]. pub fn to_raw_address(self) -> Address { Address(self.0) } @@ -496,47 +496,43 @@ impl ObjectReference { /// Cast a raw address to an object reference. This method is mostly for the convinience of a binding. /// This is how a binding creates `ObjectReference` instances. /// - /// MMTk should not assume an arbitrary address can be turned into an object reference. MMTk can use [`crate::vm::ObjectModel::address_to_ref()`] - /// to turn addresses that are from [`crate::vm::ObjectModel::ref_to_address()`] back to object. + /// MMTk should not assume an arbitrary address can be turned into an object reference. MMTk can use [`crate::vm::VMBinding::address_to_ref()`] + /// to turn addresses that are from [`crate::vm::VMBinding::ref_to_address()`] back to object. pub fn from_raw_address(addr: Address) -> ObjectReference { ObjectReference(addr.0) } /// Get the in-heap address from an object reference. This method is used by MMTk to get an in-heap address - /// for an object reference. This method is syntactic sugar for [`crate::vm::ObjectModel::ref_to_address`]. See the - /// comments on [`crate::vm::ObjectModel::ref_to_address`]. + /// for an object reference. This method is syntactic sugar for [`crate::vm::VMBinding::ref_to_address`]. See the + /// comments on [`crate::vm::VMBinding::ref_to_address`]. pub fn to_address(self) -> Address { - use crate::vm::ObjectModel; - let to_address = VM::VMObjectModel::ref_to_address(self); - debug_assert!(!VM::VMObjectModel::UNIFIED_OBJECT_REFERENCE_ADDRESS || to_address == self.to_raw_address(), "The binding claims unified object reference address, but for object reference {}, ref_to_address() returns {}", self, to_address); + let to_address = VM::ref_to_address(self); + debug_assert!(!VM::UNIFIED_OBJECT_REFERENCE_ADDRESS || to_address == self.to_raw_address(), "The binding claims unified object reference address, but for object reference {}, ref_to_address() returns {}", self, to_address); to_address } /// Get the header base address from an object reference. This method is used by MMTk to get a base address for the - /// object header, and access the object header. This method is syntactic sugar for [`crate::vm::ObjectModel::ref_to_header`]. - /// See the comments on [`crate::vm::ObjectModel::ref_to_header`]. + /// object header, and access the object header. This method is syntactic sugar for [`crate::vm::VMBinding::ref_to_header`]. + /// See the comments on [`crate::vm::VMBinding::ref_to_header`]. pub fn to_header(self) -> Address { - use crate::vm::ObjectModel; - VM::VMObjectModel::ref_to_header(self) + VM::ref_to_header(self) } /// Get the start of the allocation address for the object. This method is used by MMTk to get the start of the allocation /// address originally returned from [`crate::memory_manager::alloc`] for the object. - /// This method is syntactic sugar for [`crate::vm::ObjectModel::ref_to_object_start`]. See comments on [`crate::vm::ObjectModel::ref_to_object_start`]. + /// This method is syntactic sugar for [`crate::vm::VMBinding::ref_to_object_start`]. See comments on [`crate::vm::VMBinding::ref_to_object_start`]. pub fn to_object_start(self) -> Address { - use crate::vm::ObjectModel; - let object_start = VM::VMObjectModel::ref_to_object_start(self); - debug_assert!(!VM::VMObjectModel::UNIFIED_OBJECT_REFERENCE_ADDRESS || object_start == self.to_raw_address(), "The binding claims unified object reference address, but for object reference {}, ref_to_address() returns {}", self, object_start); + let object_start = VM::ref_to_object_start(self); + debug_assert!(!VM::UNIFIED_OBJECT_REFERENCE_ADDRESS || object_start == self.to_raw_address(), "The binding claims unified object reference address, but for object reference {}, ref_to_address() returns {}", self, object_start); object_start } /// Get the object reference from an address that is returned from [`crate::util::address::ObjectReference::to_address`] - /// or [`crate::vm::ObjectModel::ref_to_address`]. This method is syntactic sugar for [`crate::vm::ObjectModel::address_to_ref`]. - /// See the comments on [`crate::vm::ObjectModel::address_to_ref`]. + /// or [`crate::vm::VMBinding::ref_to_address`]. This method is syntactic sugar for [`crate::vm::VMBinding::address_to_ref`]. + /// See the comments on [`crate::vm::VMBinding::address_to_ref`]. pub fn from_address(addr: Address) -> ObjectReference { - use crate::vm::ObjectModel; - let obj = VM::VMObjectModel::address_to_ref(addr); - debug_assert!(!VM::VMObjectModel::UNIFIED_OBJECT_REFERENCE_ADDRESS || addr == obj.to_raw_address(), "The binding claims unified object reference address, but for address {}, address_to_ref() returns {}", addr, obj); + let obj = VM::address_to_ref(addr); + debug_assert!(!VM::UNIFIED_OBJECT_REFERENCE_ADDRESS || addr == obj.to_raw_address(), "The binding claims unified object reference address, but for address {}, address_to_ref() returns {}", addr, obj); obj } diff --git a/src/util/alloc/allocator.rs b/src/util/alloc/allocator.rs index a60d26935c..879d66b0c5 100644 --- a/src/util/alloc/allocator.rs +++ b/src/util/alloc/allocator.rs @@ -13,7 +13,6 @@ use crate::policy::space::Space; use crate::util::constants::*; use crate::util::opaque_pointer::*; use crate::vm::VMBinding; -use crate::vm::{ActivePlan, Collection}; use downcast_rs::Downcast; #[repr(C)] @@ -181,7 +180,7 @@ pub trait Allocator: Downcast { /// from its TLAB, otherwise it will default to using the slowpath, i.e. [`alloc_slow`](Allocator::alloc_slow). /// /// Note that in the case where the VM is out of memory, we invoke - /// [`Collection::out_of_memory`] to inform the binding and then return a null pointer back to + /// [`VMBinding::out_of_memory`] to inform the binding and then return a null pointer back to /// it. We have no assumptions on whether the VM will continue executing or abort immediately. /// /// An allocator needs to make sure the object reference for the returned address is in the same @@ -213,7 +212,7 @@ pub trait Allocator: Downcast { /// being used, the [`alloc_slow_once_precise_stress`](Allocator::alloc_slow_once_precise_stress) function is used instead. /// /// Note that in the case where the VM is out of memory, we invoke - /// [`Collection::out_of_memory`] with a [`AllocationError::HeapOutOfMemory`] error to inform + /// [`VMBinding::out_of_memory`] with a [`AllocationError::HeapOutOfMemory`] error to inform /// the binding and then return a null pointer back to it. We have no assumptions on whether /// the VM will continue executing or abort immediately on a /// [`AllocationError::HeapOutOfMemory`] error. @@ -224,7 +223,7 @@ pub trait Allocator: Downcast { /// * `offset` the required offset in bytes. fn alloc_slow_inline(&mut self, size: usize, align: usize, offset: usize) -> Address { let tls = self.get_tls(); - let is_mutator = VM::VMActivePlan::is_mutator(tls); + let is_mutator = VM::is_mutator(tls); let stress_test = self.get_context().options.is_stress_test_gc_enabled(); // Information about the previous collection. @@ -330,7 +329,7 @@ pub trait Allocator: Downcast { if fail_with_oom { // Note that we throw a `HeapOutOfMemory` error here and return a null ptr back to the VM trace!("Throw HeapOutOfMemory!"); - VM::VMCollection::out_of_memory(tls, AllocationError::HeapOutOfMemory); + VM::out_of_memory(tls, AllocationError::HeapOutOfMemory); self.get_context() .state .allocation_success diff --git a/src/util/copy/mod.rs b/src/util/copy/mod.rs index 7d256f830b..dabf1b589d 100644 --- a/src/util/copy/mod.rs +++ b/src/util/copy/mod.rs @@ -11,7 +11,6 @@ use crate::policy::space::Space; use crate::util::object_forwarding; use crate::util::opaque_pointer::VMWorkerThread; use crate::util::{Address, ObjectReference}; -use crate::vm::ObjectModel; use crate::vm::VMBinding; use crate::MMTK; use std::sync::atomic::Ordering; @@ -115,8 +114,7 @@ impl GCWorkerCopyContext { // If we are copying objects in mature space, we would need to mark the object as mature. if semantics.is_mature() && self.config.constraints.needs_log_bit { // If the plan uses unlogged bit, we set the unlogged bit (the object is unlogged/mature) - VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC - .mark_byte_as_unlogged::(object, Ordering::Relaxed); + VM::GLOBAL_LOG_BIT_SPEC.mark_byte_as_unlogged::(object, Ordering::Relaxed); } // Policy specific post copy. match self.config.copy_mapping[semantics] { diff --git a/src/util/finalizable_processor.rs b/src/util/finalizable_processor.rs index d341a4694b..f938adbabd 100644 --- a/src/util/finalizable_processor.rs +++ b/src/util/finalizable_processor.rs @@ -4,7 +4,7 @@ use crate::scheduler::{GCWork, GCWorker, WorkBucketStage}; use crate::util::ObjectReference; use crate::util::VMWorkerThread; use crate::vm::Finalizable; -use crate::vm::{Collection, VMBinding}; +use crate::vm::VMBinding; use crate::MMTK; use std::marker::PhantomData; @@ -75,7 +75,7 @@ impl FinalizableProcessor { // Set nursery_index to the end of the candidates (the candidates before the index are scanned) self.nursery_index = self.candidates.len(); - <::VM as VMBinding>::VMCollection::schedule_finalization(tls); + <::VM as VMBinding>::schedule_finalization(tls); } pub fn forward_candidate(&mut self, e: &mut E, _nursery: bool) { diff --git a/src/util/heap/pageresource.rs b/src/util/heap/pageresource.rs index 9154620e0d..908ba7357d 100644 --- a/src/util/heap/pageresource.rs +++ b/src/util/heap/pageresource.rs @@ -1,7 +1,6 @@ use crate::util::address::Address; use crate::util::conversions; use crate::util::opaque_pointer::*; -use crate::vm::ActivePlan; use std::sync::Mutex; use super::layout::VMMap; @@ -73,7 +72,7 @@ pub trait PageResource: 'static { let delta = actual_pages - reserved_pages; self.common().accounting.reserve(delta); self.common().accounting.commit(actual_pages); - if VM::VMActivePlan::is_mutator(tls) { + if VM::is_mutator(tls) { self.vm_map() .add_to_cumulative_committed_pages(actual_pages); } diff --git a/src/util/linear_scan.rs b/src/util/linear_scan.rs index bf391c785d..8a9623aadf 100644 --- a/src/util/linear_scan.rs +++ b/src/util/linear_scan.rs @@ -1,7 +1,6 @@ use crate::util::metadata::vo_bit; use crate::util::Address; use crate::util::ObjectReference; -use crate::vm::ObjectModel; use crate::vm::VMBinding; use std::marker::PhantomData; @@ -68,11 +67,11 @@ pub trait LinearScanObjectSize { fn size(object: ObjectReference) -> usize; } -/// Default object size as ObjectModel::get_current_size() +/// Default object size as ObjectModel::get_object_size() pub struct DefaultObjectSize(PhantomData); impl LinearScanObjectSize for DefaultObjectSize { fn size(object: ObjectReference) -> usize { - VM::VMObjectModel::get_current_size(object) + VM::get_object_size(object) } } diff --git a/src/util/memory.rs b/src/util/memory.rs index 4bd68faef4..5260acbd6d 100644 --- a/src/util/memory.rs +++ b/src/util/memory.rs @@ -1,7 +1,7 @@ use crate::util::alloc::AllocationError; use crate::util::opaque_pointer::*; use crate::util::Address; -use crate::vm::{Collection, VMBinding}; +use crate::vm::VMBinding; use libc::{PROT_EXEC, PROT_NONE, PROT_READ, PROT_WRITE}; use std::io::{Error, Result}; use sysinfo::{RefreshKind, System, SystemExt}; @@ -140,7 +140,7 @@ pub fn handle_mmap_error(error: Error, tls: VMThread) -> ! { ErrorKind::OutOfMemory => { // Signal `MmapOutOfMemory`. Expect the VM to abort immediately. trace!("Signal MmapOutOfMemory!"); - VM::VMCollection::out_of_memory(tls, AllocationError::MmapOutOfMemory); + VM::out_of_memory(tls, AllocationError::MmapOutOfMemory); unreachable!() } // Before Rust had ErrorKind::OutOfMemory, this is how we capture OOM from OS calls. @@ -152,7 +152,7 @@ pub fn handle_mmap_error(error: Error, tls: VMThread) -> ! { if os_errno == libc::ENOMEM { // Signal `MmapOutOfMemory`. Expect the VM to abort immediately. trace!("Signal MmapOutOfMemory!"); - VM::VMCollection::out_of_memory(tls, AllocationError::MmapOutOfMemory); + VM::out_of_memory(tls, AllocationError::MmapOutOfMemory); unreachable!() } } diff --git a/src/util/metadata/global.rs b/src/util/metadata/global.rs index 7583009489..fcfa24100c 100644 --- a/src/util/metadata/global.rs +++ b/src/util/metadata/global.rs @@ -2,7 +2,6 @@ use super::header_metadata::HeaderMetadataSpec; use crate::util::metadata::metadata_val_traits::*; use crate::util::metadata::side_metadata::SideMetadataSpec; use crate::util::ObjectReference; -use crate::vm::ObjectModel; use crate::vm::VMBinding; use atomic::Ordering; @@ -57,7 +56,7 @@ impl MetadataSpec { match self { MetadataSpec::OnSide(metadata_spec) => metadata_spec.load(object.to_address::()), MetadataSpec::InHeader(metadata_spec) => { - VM::VMObjectModel::load_metadata::(metadata_spec, object, mask) + VM::load_metadata::(metadata_spec, object, mask) } } } @@ -82,7 +81,7 @@ impl MetadataSpec { metadata_spec.load_atomic(object.to_address::(), ordering) } MetadataSpec::InHeader(metadata_spec) => { - VM::VMObjectModel::load_metadata_atomic::(metadata_spec, object, mask, ordering) + VM::load_metadata_atomic::(metadata_spec, object, mask, ordering) } } } @@ -108,7 +107,7 @@ impl MetadataSpec { metadata_spec.store(object.to_address::(), val); } MetadataSpec::InHeader(metadata_spec) => { - VM::VMObjectModel::store_metadata::(metadata_spec, object, val, mask) + VM::store_metadata::(metadata_spec, object, val, mask) } } } @@ -132,13 +131,9 @@ impl MetadataSpec { MetadataSpec::OnSide(metadata_spec) => { metadata_spec.store_atomic(object.to_address::(), val, ordering); } - MetadataSpec::InHeader(metadata_spec) => VM::VMObjectModel::store_metadata_atomic::( - metadata_spec, - object, - val, - mask, - ordering, - ), + MetadataSpec::InHeader(metadata_spec) => { + VM::store_metadata_atomic::(metadata_spec, object, val, mask, ordering) + } } } @@ -171,17 +166,15 @@ impl MetadataSpec { success_order, failure_order, ), - MetadataSpec::InHeader(metadata_spec) => { - VM::VMObjectModel::compare_exchange_metadata::( - metadata_spec, - object, - old_val, - new_val, - mask, - success_order, - failure_order, - ) - } + MetadataSpec::InHeader(metadata_spec) => VM::compare_exchange_metadata::( + metadata_spec, + object, + old_val, + new_val, + mask, + success_order, + failure_order, + ), } } @@ -205,7 +198,7 @@ impl MetadataSpec { metadata_spec.fetch_add_atomic(object.to_address::(), val, order) } MetadataSpec::InHeader(metadata_spec) => { - VM::VMObjectModel::fetch_add_metadata::(metadata_spec, object, val, order) + VM::fetch_add_metadata::(metadata_spec, object, val, order) } } } @@ -230,7 +223,7 @@ impl MetadataSpec { metadata_spec.fetch_sub_atomic(object.to_address::(), val, order) } MetadataSpec::InHeader(metadata_spec) => { - VM::VMObjectModel::fetch_sub_metadata::(metadata_spec, object, val, order) + VM::fetch_sub_metadata::(metadata_spec, object, val, order) } } } @@ -255,7 +248,7 @@ impl MetadataSpec { metadata_spec.fetch_and_atomic(object.to_address::(), val, order) } MetadataSpec::InHeader(metadata_spec) => { - VM::VMObjectModel::fetch_and_metadata::(metadata_spec, object, val, order) + VM::fetch_and_metadata::(metadata_spec, object, val, order) } } } @@ -280,7 +273,7 @@ impl MetadataSpec { metadata_spec.fetch_or_atomic(object.to_address::(), val, order) } MetadataSpec::InHeader(metadata_spec) => { - VM::VMObjectModel::fetch_or_metadata::(metadata_spec, object, val, order) + VM::fetch_or_metadata::(metadata_spec, object, val, order) } } } @@ -313,13 +306,9 @@ impl MetadataSpec { fetch_order, f, ), - MetadataSpec::InHeader(metadata_spec) => VM::VMObjectModel::fetch_update_metadata( - metadata_spec, - object, - set_order, - fetch_order, - f, - ), + MetadataSpec::InHeader(metadata_spec) => { + VM::fetch_update_metadata(metadata_spec, object, set_order, fetch_order, f) + } } } } diff --git a/src/util/metadata/mark_bit.rs b/src/util/metadata/mark_bit.rs index 6e155eac5a..2ab6208b35 100644 --- a/src/util/metadata/mark_bit.rs +++ b/src/util/metadata/mark_bit.rs @@ -1,6 +1,5 @@ use crate::util::Address; use crate::util::ObjectReference; -use crate::vm::ObjectModel; use crate::vm::VMBinding; use crate::vm::VMLocalMarkBitSpec; use std::sync::atomic::Ordering; @@ -52,11 +51,7 @@ impl MarkState { /// Check if the object is marked pub fn is_marked(&self, object: ObjectReference) -> bool { - let state = VM::VMObjectModel::LOCAL_MARK_BIT_SPEC.load_atomic::( - object, - None, - Ordering::SeqCst, - ); + let state = VM::LOCAL_MARK_BIT_SPEC.load_atomic::(object, None, Ordering::SeqCst); state == self.state } @@ -64,16 +59,13 @@ impl MarkState { /// Otherwise return false -- the object was marked by others. pub fn test_and_mark(&self, object: ObjectReference) -> bool { loop { - let old_value = VM::VMObjectModel::LOCAL_MARK_BIT_SPEC.load_atomic::( - object, - None, - Ordering::SeqCst, - ); + let old_value = + VM::LOCAL_MARK_BIT_SPEC.load_atomic::(object, None, Ordering::SeqCst); if old_value == self.state { return false; } - if VM::VMObjectModel::LOCAL_MARK_BIT_SPEC + if VM::LOCAL_MARK_BIT_SPEC .compare_exchange_metadata::( object, old_value, @@ -93,8 +85,8 @@ impl MarkState { /// This has to be called during object initialization. pub fn on_object_metadata_initialization(&self, object: ObjectReference) { // If it is in header, we have to set the mark bit for every newly allocated object - if VM::VMObjectModel::LOCAL_MARK_BIT_SPEC.is_in_header() { - VM::VMObjectModel::LOCAL_MARK_BIT_SPEC.store_atomic::( + if VM::LOCAL_MARK_BIT_SPEC.is_in_header() { + VM::LOCAL_MARK_BIT_SPEC.store_atomic::( object, self.unmarked_state(), None, @@ -110,16 +102,14 @@ impl MarkState { /// after a GC tracing (eagerly). This method will reset the mark bit. The policy should not use the mark bit before /// doing another tracing. pub fn on_block_reset(&self, start: Address, size: usize) { - if let crate::util::metadata::MetadataSpec::OnSide(side) = - *VM::VMObjectModel::LOCAL_MARK_BIT_SPEC - { + if let crate::util::metadata::MetadataSpec::OnSide(side) = *VM::LOCAL_MARK_BIT_SPEC { side.bzero_metadata(start, size); } } /// This has to be called in the global release of a space pub fn on_global_release(&mut self) { - if VM::VMObjectModel::LOCAL_MARK_BIT_SPEC.is_in_header() { + if VM::LOCAL_MARK_BIT_SPEC.is_in_header() { // If it is in header, we flip it. In this case, we do not need to reset the bits for marked objects self.state = self.unmarked_state() } diff --git a/src/util/metadata/mod.rs b/src/util/metadata/mod.rs index 4b64f38bcc..0f61df4930 100644 --- a/src/util/metadata/mod.rs +++ b/src/util/metadata/mod.rs @@ -4,7 +4,7 @@ //! //! The new metadata design differentiates per-object metadata (e.g. forwarding-bits and marking-bit) from other types of metadata including per-address (e.g. VO bit) and per-X (where X != object size), because the per-object metadata can optionally be kept in the object headers. //! -//! MMTk acknowledges the VM-dependant nature of the in-object metadata, and asks the VM bindings to contribute by implementing the related parts in the ['ObjectModel'](crate::vm::ObjectModel). +//! MMTk acknowledges the VM-dependant nature of the in-object metadata, and asks the VM bindings to contribute by implementing the related parts in the [`crate::vm::VMBinding`]. //! //! //! # Side Metadata @@ -121,7 +121,7 @@ //! For each global metadata bit-set, a constant instance of the `MetadataSpec` struct should be created. //! //! If the metadata is per-object and may possibly reside in objects, the constant instance should be created in the VM's ObjectModel. -//! For instance, the forwarding-bits metadata spec should be assigned to `LOCAL_FORWARDING_BITS_SPEC` in [`ObjectModel`](crate::vm::ObjectModel). +//! For instance, the forwarding-bits metadata spec should be assigned to `LOCAL_FORWARDING_BITS_SPEC` in [`crate::vm::VMBinding`]. //! The VM binding decides whether to put these metadata bit-sets in-objects or on-side. //! //! For other metadata bit-sets, constant `MetadataSpec` instances, created inside MMTk by plans/policies, are used in conjunction with the access functions from the current module. diff --git a/src/util/metadata/vo_bit/helper.rs b/src/util/metadata/vo_bit/helper.rs index c9c992693d..c67741e2f1 100644 --- a/src/util/metadata/vo_bit/helper.rs +++ b/src/util/metadata/vo_bit/helper.rs @@ -31,7 +31,7 @@ use crate::{ metadata::{vo_bit, MetadataSpec}, ObjectReference, }, - vm::{ObjectModel, VMBinding}, + vm::VMBinding, }; /// The strategy to update the valid object (VO) bits. @@ -85,7 +85,7 @@ const fn strategy() -> VOBitUpdateStrategy { // VO bits during tracing. We use it as the default strategy. // TODO: Revisit this choice in the future if non-trivial changes are made and the performance // characterestics may change for the strategies. - match VM::VMObjectModel::LOCAL_MARK_BIT_SPEC.as_spec() { + match VM::LOCAL_MARK_BIT_SPEC.as_spec() { // Note that currently ImmixSpace doesn't support in-header mark bits, // but the DummyVM for testing declares mark bits to be "in header" as a place holder // because it never runs GC. @@ -96,10 +96,7 @@ const fn strategy() -> VOBitUpdateStrategy { pub(crate) fn validate_config() { assert!( - !(VM::VMObjectModel::NEED_VO_BITS_DURING_TRACING - && VM::VMObjectModel::LOCAL_MARK_BIT_SPEC - .as_spec() - .is_in_header()), + !(VM::NEED_VO_BITS_DURING_TRACING && VM::LOCAL_MARK_BIT_SPEC.as_spec().is_in_header()), "The VM binding needs VO bits during tracing but also has in-header mark bits. \ We currently don't have an appropriate strategy for this case." ); @@ -110,7 +107,7 @@ We currently don't have an appropriate strategy for this case." // Always valid } VOBitUpdateStrategy::CopyFromMarkBits => { - let mark_bit_spec = VM::VMObjectModel::LOCAL_MARK_BIT_SPEC; + let mark_bit_spec = VM::LOCAL_MARK_BIT_SPEC; assert!( mark_bit_spec.is_on_side(), "The {s:?} strategy requires the mark bits to be on the side." @@ -170,12 +167,7 @@ pub(crate) fn on_object_forwarded(new_object: ObjectReference) { VOBitUpdateStrategy::CopyFromMarkBits => { // In this strategy, we will copy mark bits to VO bits. // We need to set mark bits for to-space objects, too. - VM::VMObjectModel::LOCAL_MARK_BIT_SPEC.store_atomic::( - new_object, - 1, - None, - Ordering::SeqCst, - ); + VM::LOCAL_MARK_BIT_SPEC.store_atomic::(new_object, 1, None, Ordering::SeqCst); // We set the VO bit for the to-space object eagerly. vo_bit::set_vo_bit::(new_object); diff --git a/src/util/metadata/vo_bit/mod.rs b/src/util/metadata/vo_bit/mod.rs index 0382d933d0..ae2e59494e 100644 --- a/src/util/metadata/vo_bit/mod.rs +++ b/src/util/metadata/vo_bit/mod.rs @@ -21,7 +21,7 @@ //! visible to the VM binding. By default, if the "vo_bit" cargo feature is enabled, the VO bits //! metadata will be available to the VM binding during stack scanning time. The VM binding can //! further require the VO bits to be available during tracing (for object scanning) by setting -//! [`crate::vm::ObjectModel::NEED_VO_BITS_DURING_TRACING`] to `true`. mmtk-core does not +//! [`crate::vm::VMBinding::NEED_VO_BITS_DURING_TRACING`] to `true`. mmtk-core does not //! guarantee the VO bits are available to the VM binding during other time. //! //! Internally, mmtk-core will also make the VO bits available when necessary if mmtk-core needs to @@ -51,7 +51,6 @@ use atomic::Ordering; use crate::util::metadata::side_metadata::SideMetadataSpec; use crate::util::Address; use crate::util::ObjectReference; -use crate::vm::object_model::ObjectModel; use crate::vm::VMBinding; /// A VO bit is required per min-object-size aligned address, rather than per object, and can only exist as side metadata. @@ -150,7 +149,7 @@ pub fn bzero_vo_bit(start: Address, size: usize) { /// The caller needs to ensure the mark bits are set exactly wherever VO bits need to be set before /// calling this function. pub fn bcopy_vo_bit_from_mark_bit(start: Address, size: usize) { - let mark_bit_spec = VM::VMObjectModel::LOCAL_MARK_BIT_SPEC; + let mark_bit_spec = VM::LOCAL_MARK_BIT_SPEC; debug_assert!( mark_bit_spec.is_on_side(), "bcopy_vo_bit_from_mark_bits can only be used with on-the-side mark bits." diff --git a/src/util/object_forwarding.rs b/src/util/object_forwarding.rs index 01b3fec447..d28cb70515 100644 --- a/src/util/object_forwarding.rs +++ b/src/util/object_forwarding.rs @@ -1,7 +1,6 @@ use crate::util::copy::*; use crate::util::metadata::MetadataSpec; use crate::util::{constants, ObjectReference}; -use crate::vm::ObjectModel; use crate::vm::VMBinding; use std::sync::atomic::Ordering; @@ -24,7 +23,7 @@ pub fn attempt_to_forward(object: ObjectReference) -> u8 { loop { let old_value = get_forwarding_status::(object); if old_value != FORWARDING_NOT_TRIGGERED_YET - || VM::VMObjectModel::LOCAL_FORWARDING_BITS_SPEC + || VM::LOCAL_FORWARDING_BITS_SPEC .compare_exchange_metadata::( object, old_value, @@ -79,9 +78,9 @@ pub fn forward_object( semantics: CopySemantics, copy_context: &mut GCWorkerCopyContext, ) -> ObjectReference { - let new_object = VM::VMObjectModel::copy(object, semantics, copy_context); + let new_object = VM::copy_object(object, semantics, copy_context); if let Some(shift) = forwarding_bits_offset_in_forwarding_pointer::() { - VM::VMObjectModel::LOCAL_FORWARDING_POINTER_SPEC.store_atomic::( + VM::LOCAL_FORWARDING_POINTER_SPEC.store_atomic::( object, new_object.to_raw_address().as_usize() | ((FORWARDED as usize) << shift), None, @@ -89,7 +88,7 @@ pub fn forward_object( ) } else { write_forwarding_pointer::(object, new_object); - VM::VMObjectModel::LOCAL_FORWARDING_BITS_SPEC.store_atomic::( + VM::LOCAL_FORWARDING_BITS_SPEC.store_atomic::( object, FORWARDED, None, @@ -101,11 +100,7 @@ pub fn forward_object( /// Return the forwarding bits for a given `ObjectReference`. pub fn get_forwarding_status(object: ObjectReference) -> u8 { - VM::VMObjectModel::LOCAL_FORWARDING_BITS_SPEC.load_atomic::( - object, - None, - Ordering::SeqCst, - ) + VM::LOCAL_FORWARDING_BITS_SPEC.load_atomic::(object, None, Ordering::SeqCst) } pub fn is_forwarded(object: ObjectReference) -> bool { @@ -131,12 +126,7 @@ pub fn state_is_being_forwarded(forwarding_bits: u8) -> bool { /// Zero the forwarding bits of an object. /// This function is used on new objects. pub fn clear_forwarding_bits(object: ObjectReference) { - VM::VMObjectModel::LOCAL_FORWARDING_BITS_SPEC.store_atomic::( - object, - 0, - None, - Ordering::SeqCst, - ) + VM::LOCAL_FORWARDING_BITS_SPEC.store_atomic::(object, 0, None, Ordering::SeqCst) } /// Read the forwarding pointer of an object. @@ -151,7 +141,7 @@ pub fn read_forwarding_pointer(object: ObjectReference) -> Object // We write the forwarding poiner. We know it is an object reference. unsafe { ObjectReference::from_raw_address(crate::util::Address::from_usize( - VM::VMObjectModel::LOCAL_FORWARDING_POINTER_SPEC.load_atomic::( + VM::LOCAL_FORWARDING_POINTER_SPEC.load_atomic::( object, Some(FORWARDING_POINTER_MASK), Ordering::SeqCst, @@ -174,7 +164,7 @@ pub fn write_forwarding_pointer( ); trace!("write_forwarding_pointer({}, {})", object, new_object); - VM::VMObjectModel::LOCAL_FORWARDING_POINTER_SPEC.store_atomic::( + VM::LOCAL_FORWARDING_POINTER_SPEC.store_atomic::( object, new_object.to_raw_address().as_usize(), Some(FORWARDING_POINTER_MASK), @@ -194,8 +184,8 @@ pub(super) fn forwarding_bits_offset_in_forwarding_pointer() -> O use std::ops::Deref; // if both forwarding bits and forwarding pointer are in-header match ( - VM::VMObjectModel::LOCAL_FORWARDING_POINTER_SPEC.deref(), - VM::VMObjectModel::LOCAL_FORWARDING_BITS_SPEC.deref(), + VM::LOCAL_FORWARDING_POINTER_SPEC.deref(), + VM::LOCAL_FORWARDING_BITS_SPEC.deref(), ) { (MetadataSpec::InHeader(fp), MetadataSpec::InHeader(fb)) => { let maybe_shift = fb.bit_offset - fp.bit_offset; diff --git a/src/util/reference_processor.rs b/src/util/reference_processor.rs index 9683048881..076c74e2f2 100644 --- a/src/util/reference_processor.rs +++ b/src/util/reference_processor.rs @@ -9,7 +9,6 @@ use crate::scheduler::ProcessEdgesWork; use crate::scheduler::WorkBucketStage; use crate::util::ObjectReference; use crate::util::VMWorkerThread; -use crate::vm::ReferenceGlue; use crate::vm::VMBinding; /// Holds all reference processors for each weak reference Semantics. @@ -244,8 +243,8 @@ impl ReferenceProcessor { sync.references.iter().for_each(|reff| { debug_assert!(!reff.is_null()); debug_assert!(reff.is_in_any_space()); - let referent = VM::VMReferenceGlue::get_referent(*reff); - if !VM::VMReferenceGlue::is_referent_cleared(referent) { + let referent = VM::weakref_get_referent(*reff); + if !VM::weakref_is_referent_cleared(referent) { debug_assert!( referent.is_in_any_space(), "Referent {:?} (of reference {:?}) is not in any space", @@ -258,14 +257,14 @@ impl ReferenceProcessor { sync.enqueued_references.iter().for_each(|reff| { debug_assert!(!reff.is_null()); debug_assert!(reff.is_in_any_space()); - let referent = VM::VMReferenceGlue::get_referent(*reff); - debug_assert!(VM::VMReferenceGlue::is_referent_cleared(referent)); + let referent = VM::weakref_get_referent(*reff); + debug_assert!(VM::weakref_is_referent_cleared(referent)); }); } if !sync.enqueued_references.is_empty() { trace!("enqueue: {:?}", sync.enqueued_references); - VM::VMReferenceGlue::enqueue_references(&sync.enqueued_references, tls); + VM::weakref_enqueue_references(&sync.enqueued_references, tls); sync.enqueued_references.clear(); } @@ -284,16 +283,15 @@ impl ReferenceProcessor { trace: &mut E, reference: ObjectReference, ) -> ObjectReference { - let old_referent = ::VMReferenceGlue::get_referent(reference); + let old_referent = ::weakref_get_referent(reference); let new_referent = ReferenceProcessor::get_forwarded_referent(trace, old_referent); - ::VMReferenceGlue::set_referent(reference, new_referent); + ::weakref_set_referent(reference, new_referent); let new_reference = ReferenceProcessor::get_forwarded_reference(trace, reference); { - use crate::vm::ObjectModel; trace!( "Forwarding reference: {} (size: {})", reference, - ::VMObjectModel::get_current_size(reference) + ::get_object_size(reference) ); trace!( " referent: {} (forwarded to {})", @@ -395,8 +393,8 @@ impl ReferenceProcessor { } // Reference is definitely reachable. Retain the referent. - let referent = ::VMReferenceGlue::get_referent(*reference); - if !::VMReferenceGlue::is_referent_cleared(referent) { + let referent = ::weakref_get_referent(*reference); + if !::weakref_is_referent_cleared(referent) { Self::keep_referent_alive(trace, referent); } trace!(" ~> {:?} (retained)", referent); @@ -425,7 +423,7 @@ impl ReferenceProcessor { // If the reference is dead, we're done with it. Let it (and // possibly its referent) be garbage-collected. if !reference.is_live() { - ::VMReferenceGlue::clear_referent(reference); + ::weakref_clear_referent(reference); trace!(" UNREACHABLE reference: {}", reference); trace!(" (unreachable)"); return None; @@ -433,14 +431,14 @@ impl ReferenceProcessor { // The reference object is live let new_reference = Self::get_forwarded_reference(trace, reference); - let old_referent = ::VMReferenceGlue::get_referent(reference); + let old_referent = ::weakref_get_referent(reference); trace!(" ~> {}", old_referent); // If the application has cleared the referent the Java spec says // this does not cause the Reference object to be enqueued. We // simply allow the Reference object to fall out of our // waiting list. - if ::VMReferenceGlue::is_referent_cleared(old_referent) { + if ::weakref_is_referent_cleared(old_referent) { trace!(" (cleared referent) "); return None; } @@ -461,13 +459,13 @@ impl ReferenceProcessor { // copying collector. // Update the referent - ::VMReferenceGlue::set_referent(new_reference, new_referent); + ::weakref_set_referent(new_reference, new_referent); Some(new_reference) } else { // Referent is unreachable. Clear the referent and enqueue the reference object. trace!(" UNREACHABLE referent: {}", old_referent); - ::VMReferenceGlue::clear_referent(new_reference); + ::weakref_clear_referent(new_reference); enqueued_references.push(new_reference); None } diff --git a/src/util/sanity/sanity_checker.rs b/src/util/sanity/sanity_checker.rs index c3436470d8..18a25a2b28 100644 --- a/src/util/sanity/sanity_checker.rs +++ b/src/util/sanity/sanity_checker.rs @@ -132,7 +132,7 @@ impl GCWork for SanityPrepare

{ let mut sanity_checker = mmtk.sanity_checker.lock().unwrap(); sanity_checker.refs.clear(); } - for mutator in ::VMActivePlan::mutators() { + for mutator in ::mutators() { mmtk.scheduler.work_buckets[WorkBucketStage::Prepare] .add(PrepareMutator::::new(mutator)); } @@ -157,7 +157,7 @@ impl GCWork for SanityRelease

{ fn do_work(&mut self, _worker: &mut GCWorker, mmtk: &'static MMTK) { info!("Sanity GC release"); mmtk.sanity_checker.lock().unwrap().clear_roots_cache(); - for mutator in ::VMActivePlan::mutators() { + for mutator in ::mutators() { mmtk.scheduler.work_buckets[WorkBucketStage::Release] .add(ReleaseMutator::::new(mutator)); } @@ -221,11 +221,7 @@ impl ProcessEdgesWork for SanityGCProcessEdges { ); // Let VM check object - assert!( - VM::VMObjectModel::is_object_sane(object), - "Invalid reference {:?}", - object - ); + assert!(VM::is_object_sane(object), "Invalid reference {:?}", object); // Object is not "marked" sanity_checker.refs.insert(object); // "Mark" it diff --git a/src/vm/active_plan.rs b/src/vm/active_plan.rs deleted file mode 100644 index 1828a1352a..0000000000 --- a/src/vm/active_plan.rs +++ /dev/null @@ -1,59 +0,0 @@ -use crate::plan::Mutator; -use crate::scheduler::GCWorker; -use crate::util::opaque_pointer::*; -use crate::util::ObjectReference; -use crate::vm::VMBinding; -use crate::ObjectQueue; - -/// VM-specific methods for the current plan. -pub trait ActivePlan { - /// Return whether there is a mutator created and associated with the thread. - /// - /// Arguments: - /// * `tls`: The thread to query. - /// - /// # Safety - /// The caller needs to make sure that the thread is valid (a value passed in by the VM binding through API). - fn is_mutator(tls: VMThread) -> bool; - - /// Return a `Mutator` reference for the thread. - /// - /// Arguments: - /// * `tls`: The thread to query. - /// - /// # Safety - /// The caller needs to make sure that the thread is a mutator thread. - fn mutator(tls: VMMutatorThread) -> &'static mut Mutator; - - /// Return an iterator that includes all the mutators at the point of invocation. - fn mutators<'a>() -> Box> + 'a>; - - /// Return the total count of mutators. - fn number_of_mutators() -> usize; - - /// The fallback for object tracing. MMTk generally expects to find an object in one of MMTk's spaces (if it is allocated by MMTK), - /// and apply the corresponding policy to trace the object. Tracing in MMTk means identifying whether we have encountered this object in the - /// current GC. For example, for mark sweep, we will check if an object is marked, and if it is not yet marked, mark and enqueue the object - /// for later scanning. For copying policies, copying also happens in this step. For example for MMTk's copying space, we will - /// copy an object if it is in 'from space', and enqueue the copied object for later scanning. - /// - /// If a binding would like to trace objects that are not allocated by MMTk and are not in any MMTk space, they can override this method. - /// They should check whether the object is encountered before in this current GC. If not, they should record the object as encountered themselves, - /// and enqueue the object reference to the object queue provided by the argument. If a binding moves objects, they should do the copying in the method, - /// and enqueue the new object reference instead. - /// - /// The method should return the new object reference if the method moves the object, otherwise return the original object reference. - /// - /// Arguments: - /// * `queue`: The object queue. If an object is encountered for the first time in this GC, we expect the implementation to call `queue.enqueue()` - /// for the object. If the object is moved during the tracing, the new object reference (after copying) should be enqueued instead. - /// * `object`: The object to trace. - /// * `worker`: The GC worker that is doing this tracing. This is used to copy object (see [`crate::vm::ObjectModel::copy`]) - fn vm_trace_object( - _queue: &mut Q, - object: ObjectReference, - _worker: &mut GCWorker, - ) -> ObjectReference { - panic!("MMTk cannot trace object {:?} as it does not belong to any MMTk space. If the object is known to the VM, the binding can override this method and handle its tracing.", object) - } -} diff --git a/src/vm/collection.rs b/src/vm/collection.rs deleted file mode 100644 index 5420224ab0..0000000000 --- a/src/vm/collection.rs +++ /dev/null @@ -1,142 +0,0 @@ -use crate::util::alloc::AllocationError; -use crate::util::opaque_pointer::*; -use crate::vm::VMBinding; -use crate::{scheduler::*, Mutator}; - -/// Thread context for the spawned GC thread. It is used by spawn_gc_thread. -pub enum GCThreadContext { - /// The GC thread to spawn is a controller thread. There is only one controller thread. - Controller(Box>), - /// The GC thread to spawn is a worker thread. There can be multiple worker threads. - Worker(Box>), -} - -/// VM-specific methods for garbage collection. -pub trait Collection { - /// Stop all the mutator threads. MMTk calls this method when it requires all the mutator to yield for a GC. - /// This method should not return until all the threads are yielded. - /// The actual thread synchronization mechanism is up to the VM, and MMTk does not make assumptions on that. - /// MMTk provides a callback function and expects the binding to use the callback for each mutator when it - /// is ready for stack scanning. Usually a stack can be scanned as soon as the thread stops in the yieldpoint. - /// - /// Arguments: - /// * `tls`: The thread pointer for the GC worker. - /// * `mutator_visitor`: A callback. Call it with a mutator as argument to notify MMTk that the mutator is ready to be scanned. - fn stop_all_mutators(tls: VMWorkerThread, mutator_visitor: F) - where - F: FnMut(&'static mut Mutator); - - /// Resume all the mutator threads, the opposite of the above. When a GC is finished, MMTk calls this method. - /// - /// This method may not be called by the same GC thread that called `stop_all_mutators`. - /// - /// Arguments: - /// * `tls`: The thread pointer for the GC worker. Currently it is the tls of the embedded `GCWorker` instance - /// of the coordinator thread, but it is subject to change, and should not be depended on. - fn resume_mutators(tls: VMWorkerThread); - - /// Block the current thread for GC. This is called when an allocation request cannot be fulfilled and a GC - /// is needed. MMTk calls this method to inform the VM that the current thread needs to be blocked as a GC - /// is going to happen. Then MMTk starts a GC. For a stop-the-world GC, MMTk will then call `stop_all_mutators()` - /// before the GC, and call `resume_mutators()` after the GC. - /// - /// Arguments: - /// * `tls`: The current thread pointer that should be blocked. The VM can optionally check if the current thread matches `tls`. - fn block_for_gc(tls: VMMutatorThread); - - /// Ask the VM to spawn a GC thread for MMTk. A GC thread may later call into the VM through these VM traits. Some VMs - /// have assumptions that those calls needs to be within VM internal threads. - /// As a result, MMTk does not spawn GC threads itself to avoid breaking this kind of assumptions. - /// MMTk calls this method to spawn GC threads during [`initialize_collection()`](../memory_manager/fn.initialize_collection.html). - /// - /// Arguments: - /// * `tls`: The thread pointer for the parent thread that we spawn new threads from. This is the same `tls` when the VM - /// calls `initialize_collection()` and passes as an argument. - /// * `ctx`: The context for the GC thread. - /// * If `Controller` is passed, it means spawning a thread to run as the GC controller. - /// The spawned thread shall call `memory_manager::start_control_collector`. - /// * If `Worker` is passed, it means spawning a thread to run as a GC worker. - /// The spawned thread shall call `memory_manager::start_worker`. - /// In either case, the `Box` inside should be passed back to the called function. - fn spawn_gc_thread(tls: VMThread, ctx: GCThreadContext); - - /// Inform the VM of an out-of-memory error. The binding should hook into the VM's error - /// routine for OOM. Note that there are two different categories of OOM: - /// * Critical OOM: This is the case where the OS is unable to mmap or acquire more memory. - /// MMTk expects the VM to abort immediately if such an error is thrown. - /// * Heap OOM: This is the case where the specified heap size is insufficient to execute the - /// application. MMTk expects the binding to notify the VM about this OOM. MMTk makes no - /// assumptions about whether the VM will continue executing or abort immediately. - /// - /// See [`AllocationError`] for more information. - /// - /// Arguments: - /// * `tls`: The thread pointer for the mutator which failed the allocation and triggered the OOM. - /// * `err_kind`: The type of OOM error that was encountered. - fn out_of_memory(_tls: VMThread, err_kind: AllocationError) { - panic!("Out of memory with {:?}!", err_kind); - } - - /// Inform the VM to schedule finalization threads. - /// - /// Arguments: - /// * `tls`: The thread pointer for the current GC thread. - fn schedule_finalization(_tls: VMWorkerThread) {} - - /// A hook for the VM to do work after forwarding objects. - /// - /// This function is called after all of the following have finished: - /// - The life and death of objects are determined. Objects determined to be live will not - /// be reclaimed in this GC. - /// - Live objects have been moved to their destinations. (copying GC only) - /// - References in objects have been updated to point to new addresses. (copying GC only) - /// - /// And this function may run concurrently with the release work of GC, i.e. freeing the space - /// occupied by dead objects. - /// - /// It is safe for the VM to read and write object fields at this time, although GC has not - /// finished yet. GC will be reclaiming spaces of dead objects, but will not damage live - /// objects. However, the VM cannot allocate new objects at this time. - /// - /// One possible use of this hook is enqueuing `{Soft,Weak,Phantom}Reference` instances to - /// reference queues (for Java). VMs (including JVM implementations) do not have to handle - /// weak references this way, but mmtk-core provides this opportunity. - /// - /// Arguments: - /// * `tls_worker`: The thread pointer for the worker thread performing this call. - fn post_forwarding(_tls: VMWorkerThread) {} - - /// Return the amount of memory (in bytes) which the VM allocated outside the MMTk heap but - /// wants to include into the current MMTk heap size. MMTk core will consider the reported - /// memory as part of MMTk heap for the purpose of heap size accounting. - /// - /// This amount should include memory that is kept alive by heap objects and can be released by - /// executing finalizers (or other language-specific cleaning-up routines) that are executed - /// when the heap objects are dead. For example, if a language implementation allocates array - /// headers in the MMTk heap, but allocates their underlying buffers that hold the actual - /// elements using `malloc`, then those buffers should be included in this amount. When the GC - /// finds such an array dead, its finalizer shall `free` the buffer and reduce this amount. - /// - /// If possible, the VM should account off-heap memory in pages. That is, count the number of - /// pages occupied by off-heap objects, and report the number of bytes of those whole pages - /// instead of individual objects. Because the underlying operating system manages memory at - /// page granularity, the occupied pages (instead of individual objects) determine the memory - /// footprint of a process, and how much memory MMTk spaces can obtain from the OS. - /// - /// However, if the VM is incapable of accounting off-heap memory in pages (for example, if the - /// VM uses `malloc` and the implementation of `malloc` is opaque to the VM), the VM binding - /// can simply return the total number of bytes of those off-heap objects as an approximation. - /// - /// # Performance note - /// - /// This function will be called when MMTk polls for GC. It happens every time the MMTk - /// allocators have allocated a certain amount of memory, usually one or a few blocks. Because - /// this function is called very frequently, its implementation must be efficient. If it is - /// too expensive to compute the exact amount, an approximate value should be sufficient for - /// MMTk to trigger GC promptly in order to release off-heap memory, and keep the memory - /// footprint under control. - fn vm_live_bytes() -> usize { - // By default, MMTk assumes the amount of memory the VM allocates off-heap is negligible. - 0 - } -} diff --git a/src/vm/finalizable.rs b/src/vm/finalizable.rs new file mode 100644 index 0000000000..a68fc7ff77 --- /dev/null +++ b/src/vm/finalizable.rs @@ -0,0 +1,33 @@ +use crate::scheduler::gc_work::ProcessEdgesWork; +use crate::util::ObjectReference; + +/// A finalizable object for MMTk. MMTk needs to know the actual object reference in the type, +/// while a binding can use this type to store some runtime information about finalizable objects. +/// For example, for bindings that allows multiple finalizer methods with one object, they can define +/// the type as a tuple of `(object, finalize method)`, and register different finalizer methods to MMTk +/// for the same object. +/// The implementation should mark theird method implementations as inline for performance. +pub trait Finalizable: std::fmt::Debug + Send { + /// Load the object reference. + fn get_reference(&self) -> ObjectReference; + /// Store the object reference. + fn set_reference(&mut self, object: ObjectReference); + /// Keep the heap references in the finalizable object alive. For example, the reference itself needs to be traced. However, + /// if the finalizable object includes other heap references, the implementation should trace them as well. + /// Note that trace_object() may move objects so we need to write the new reference in case that it is moved. + fn keep_alive(&mut self, trace: &mut E); +} + +/// This provides an implementation of `Finalizable` for `ObjectReference`. Most bindings +/// should be able to use `ObjectReference` as `ReferenceGlue::FinalizableType`. +impl Finalizable for ObjectReference { + fn get_reference(&self) -> ObjectReference { + *self + } + fn set_reference(&mut self, object: ObjectReference) { + *self = object; + } + fn keep_alive(&mut self, trace: &mut E) { + *self = trace.trace_object(*self); + } +} diff --git a/src/vm/metadata_specs.rs b/src/vm/metadata_specs.rs new file mode 100644 index 0000000000..5b6a2b18c3 --- /dev/null +++ b/src/vm/metadata_specs.rs @@ -0,0 +1,186 @@ +//! Side Specs Layout +//! +//! Short version +//! +//! * For *global* side metadata: +//! * The first spec: VMGlobalXXXSpec::side_first() +//! * The following specs: VMGlobalXXXSpec::side_after(FIRST_GLOAL.as_spec()) +//! * For *local* side metadata: +//! * The first spec: VMLocalXXXSpec::side_first() +//! * The following specs: VMLocalXXXSpec::side_after(FIRST_LOCAL.as_spec()) +//! +//! Detailed explanation +//! +//! There are two types of side metadata layout in MMTk: +//! +//! 1. Contiguous layout: is the layout in which the whole metadata space for a SideMetadataSpec is contiguous. +//! 2. Chunked layout: is the layout in which the whole metadata memory space, that is shared between MMTk policies, is divided into metadata-chunks. Each metadata-chunk stores all of the metadata for all `SideMetadataSpec`s which apply to a source-data chunk. +//! +//! In 64-bits targets, both Global and PolicySpecific side metadata are contiguous. +//! Also, in 32-bits targets, the Global side metadata is contiguous. +//! This means if the starting address (variable named `offset`) of the metadata space for a SideMetadataSpec (`SPEC1`) is `BASE1`, the starting address (`offset`) of the next SideMetadataSpec (`SPEC2`) will be `BASE1 + total_metadata_space_size(SPEC1)`, which is located immediately after the end of the whole metadata space of `SPEC1`. +//! Now, if we add a third SideMetadataSpec (`SPEC3`), its starting address (`offset`) will be `BASE2 + total_metadata_space_size(SPEC2)`, which is located immediately after the end of the whole metadata space of `SPEC2`. +//! +//! In 32-bits targets, the PolicySpecific side metadata is chunked. +//! This means for each chunk (2^22 Bytes) of data, which, by definition, is managed by exactly one MMTk policy, there is a metadata chunk (2^22 * some_fixed_ratio Bytes) that contains all of its PolicySpecific metadata. +//! This means if a policy has one SideMetadataSpec (`LS1`), the `offset` of that spec will be `0` (= at the start of a metadata chunk). +//! If there is a second SideMetadataSpec (`LS2`) for this specific policy, the `offset` for that spec will be `0 + required_metadata_space_per_chunk(LS1)`, +//! and for a third SideMetadataSpec (`LS3`), the `offset` will be `BASE(LS2) + required_metadata_space_per_chunk(LS2)`. +//! +//! For all other policies, the `offset` starts from zero. This is safe because no two policies ever manage one chunk, so there will be no overlap. + +use crate::util::constants::LOG_BITS_IN_WORD; +use crate::util::constants::LOG_BYTES_IN_PAGE; +use crate::util::constants::LOG_MIN_OBJECT_SIZE; +use crate::util::metadata::side_metadata::*; +use crate::util::metadata::{ + header_metadata::HeaderMetadataSpec, + side_metadata::{SideMetadataOffset, SideMetadataSpec}, + MetadataSpec, +}; + +// This macro is invoked in define_vm_metadata_global_spec or define_vm_metadata_local_spec. +// Use those two to define a new VM metadata spec. +macro_rules! define_vm_metadata_spec { + ($(#[$outer:meta])*$spec_name: ident, $is_global: expr, $log_num_bits: expr, $side_min_obj_size: expr) => { + $(#[$outer])* + pub struct $spec_name(MetadataSpec); + impl $spec_name { + /// The number of bits (in log2) that are needed for the spec. + pub const LOG_NUM_BITS: usize = $log_num_bits; + + /// Whether this spec is global or local. For side metadata, the binding needs to make sure + /// global specs are laid out after another global spec, and local specs are laid + /// out after another local spec. Otherwise, there will be an assertion failure. + pub const IS_GLOBAL: bool = $is_global; + + /// Declare that the VM uses in-header metadata for this metadata type. + /// For the specification of the `bit_offset` argument, please refer to + /// the document of `[crate::util::metadata::header_metadata::HeaderMetadataSpec.bit_offset]`. + /// The binding needs to make sure that the bits used for a spec in the header do not conflict with + /// the bits of another spec (unless it is specified that some bits may be reused). + pub const fn in_header(bit_offset: isize) -> Self { + Self(MetadataSpec::InHeader(HeaderMetadataSpec { + bit_offset, + num_of_bits: 1 << Self::LOG_NUM_BITS, + })) + } + + /// Declare that the VM uses side metadata for this metadata type, + /// and the side metadata is the first of its kind (global or local). + /// The first global or local side metadata should be declared with `side_first()`, + /// and the rest side metadata should be declared with `side_after()` after a defined + /// side metadata of the same kind (global or local). Logically, all the declarations + /// create two list of side metadata, one for global, and one for local. + pub const fn side_first() -> Self { + if Self::IS_GLOBAL { + Self(MetadataSpec::OnSide(SideMetadataSpec { + name: stringify!($spec_name), + is_global: Self::IS_GLOBAL, + offset: GLOBAL_SIDE_METADATA_VM_BASE_OFFSET, + log_num_of_bits: Self::LOG_NUM_BITS, + log_bytes_in_region: $side_min_obj_size as usize, + })) + } else { + Self(MetadataSpec::OnSide(SideMetadataSpec { + name: stringify!($spec_name), + is_global: Self::IS_GLOBAL, + offset: LOCAL_SIDE_METADATA_VM_BASE_OFFSET, + log_num_of_bits: Self::LOG_NUM_BITS, + log_bytes_in_region: $side_min_obj_size as usize, + })) + } + } + + /// Declare that the VM uses side metadata for this metadata type, + /// and the side metadata should be laid out after the given side metadata spec. + /// The first global or local side metadata should be declared with `side_first()`, + /// and the rest side metadata should be declared with `side_after()` after a defined + /// side metadata of the same kind (global or local). Logically, all the declarations + /// create two list of side metadata, one for global, and one for local. + pub const fn side_after(spec: &MetadataSpec) -> Self { + assert!(spec.is_on_side()); + let side_spec = spec.extract_side_spec(); + assert!(side_spec.is_global == Self::IS_GLOBAL); + Self(MetadataSpec::OnSide(SideMetadataSpec { + name: stringify!($spec_name), + is_global: Self::IS_GLOBAL, + offset: SideMetadataOffset::layout_after(side_spec), + log_num_of_bits: Self::LOG_NUM_BITS, + log_bytes_in_region: $side_min_obj_size as usize, + })) + } + + /// Return the inner `[crate::util::metadata::MetadataSpec]` for the metadata type. + pub const fn as_spec(&self) -> &MetadataSpec { + &self.0 + } + + /// Return the number of bits for the metadata type. + pub const fn num_bits(&self) -> usize { + 1 << $log_num_bits + } + } + impl std::ops::Deref for $spec_name { + type Target = MetadataSpec; + fn deref(&self) -> &Self::Target { + self.as_spec() + } + } + }; +} + +// Log bit: 1 bit per object, global +define_vm_metadata_spec!( + /// 1-bit global metadata to log an object. + VMGlobalLogBitSpec, + true, + 0, + LOG_MIN_OBJECT_SIZE +); +// Forwarding pointer: word size per object, local +define_vm_metadata_spec!( + /// 1-word local metadata for spaces that may copy objects. + /// This metadata has to be stored in the header. + /// This metadata can be defined at a position within the object payload. + /// As a forwarding pointer is only stored in dead objects which is not + /// accessible by the language, it is okay that store a forwarding pointer overwrites object payload + VMLocalForwardingPointerSpec, + false, + LOG_BITS_IN_WORD, + LOG_MIN_OBJECT_SIZE +); +// Forwarding bits: 2 bits per object, local +define_vm_metadata_spec!( + /// 2-bit local metadata for spaces that store a forwarding state for objects. + /// If this spec is defined in the header, it can be defined with a position of the lowest 2 bits in the forwarding pointer. + VMLocalForwardingBitsSpec, + false, + 1, + LOG_MIN_OBJECT_SIZE +); +// Mark bit: 1 bit per object, local +define_vm_metadata_spec!( + /// 1-bit local metadata for spaces that need to mark an object. + VMLocalMarkBitSpec, + false, + 0, + LOG_MIN_OBJECT_SIZE +); +// Pinning bit: 1 bit per object, local +define_vm_metadata_spec!( + /// 1-bit local metadata for spaces that support pinning. + VMLocalPinningBitSpec, + false, + 0, + LOG_MIN_OBJECT_SIZE +); +// Mark&nursery bits for LOS: 2 bit per page, local +define_vm_metadata_spec!( + /// 2-bits local metadata for the large object space. The two bits serve as + /// the mark bit and the nursery bit. + VMLocalLOSMarkNurserySpec, + false, + 1, + LOG_BYTES_IN_PAGE +); diff --git a/src/vm/mod.rs b/src/vm/mod.rs index 223a02e5cc..3b1703d187 100644 --- a/src/vm/mod.rs +++ b/src/vm/mod.rs @@ -15,66 +15,20 @@ //! 2. Make sure that the crate type for a VM binding supports LTO. To our knowledge, `staticlib` and `cdylib` support LTO, and //! `rlib` does *not* support LTO. -mod active_plan; -mod collection; /// Allows MMTk to access edges in a VM-defined way. pub mod edge_shape; -pub(crate) mod object_model; -mod reference_glue; -mod scanning; -pub use self::active_plan::ActivePlan; -pub use self::collection::Collection; -pub use self::collection::GCThreadContext; -pub use self::object_model::specs::*; -pub use self::object_model::ObjectModel; -pub use self::reference_glue::Finalizable; -pub use self::reference_glue::ReferenceGlue; -pub use self::scanning::EdgeVisitor; -pub use self::scanning::ObjectTracer; -pub use self::scanning::ObjectTracerContext; -pub use self::scanning::RootsWorkFactory; -pub use self::scanning::Scanning; +mod finalizable; +pub(crate) mod metadata_specs; +/// Imports needed for VMBinding. +pub mod prelude; +mod scan_utils; +mod vmbinding; -/// Default min alignment 4 bytes -const DEFAULT_LOG_MIN_ALIGNMENT: usize = 2; -/// Default max alignment 8 bytes -const DEFAULT_LOG_MAX_ALIGNMENT: usize = 3; - -/// The `VMBinding` trait associates with each trait, and provides VM-specific constants. -pub trait VMBinding -where - Self: Sized + 'static + Send + Sync + Default, -{ - /// The binding's implementation of [`crate::vm::ObjectModel`]. - type VMObjectModel: ObjectModel; - /// The binding's implementation of [`crate::vm::Scanning`]. - type VMScanning: Scanning; - /// The binding's implementation of [`crate::vm::Collection`]. - type VMCollection: Collection; - /// The binding's implementation of [`crate::vm::ActivePlan`]. - type VMActivePlan: ActivePlan; - /// The binding's implementation of [`crate::vm::ReferenceGlue`]. - type VMReferenceGlue: ReferenceGlue; - - /// The type of edges in this VM. - type VMEdge: edge_shape::Edge; - /// The type of heap memory slice in this VM. - type VMMemorySlice: edge_shape::MemorySlice; - - /// A value to fill in alignment gaps. This value can be used for debugging. - const ALIGNMENT_VALUE: usize = 0xdead_beef; - /// Allowed minimal alignment in bytes. - const MIN_ALIGNMENT: usize = 1 << DEFAULT_LOG_MIN_ALIGNMENT; - /// Allowed maximum alignment in bytes. - const MAX_ALIGNMENT: usize = 1 << DEFAULT_LOG_MAX_ALIGNMENT; - /// Does the binding use a non-zero allocation offset? If this is false, we expect the binding - /// to always use offset === 0 for allocation, and we are able to do some optimization if we know - /// offset === 0. - const USE_ALLOCATION_OFFSET: bool = true; - - /// This value is used to assert if the cursor is reasonable after allocations. - /// At the end of an allocation, the allocation cursor should be aligned to this value. - /// Note that MMTk does not attempt to do anything to align the cursor to this value, but - /// it merely asserts with this constant. - const ALLOC_END_ALIGNMENT: usize = 1; -} +pub use self::finalizable::Finalizable; +pub use self::metadata_specs::*; +pub use self::scan_utils::EdgeVisitor; +pub use self::scan_utils::ObjectTracer; +pub use self::scan_utils::ObjectTracerContext; +pub use self::scan_utils::RootsWorkFactory; +pub use self::vmbinding::GCThreadContext; +pub use self::vmbinding::VMBinding; diff --git a/src/vm/object_model.rs b/src/vm/object_model.rs deleted file mode 100644 index a185268e8c..0000000000 --- a/src/vm/object_model.rs +++ /dev/null @@ -1,633 +0,0 @@ -use atomic::Ordering; - -use self::specs::*; -use crate::util::copy::*; -use crate::util::metadata::header_metadata::HeaderMetadataSpec; -use crate::util::metadata::MetadataValue; -use crate::util::{Address, ObjectReference}; -use crate::vm::VMBinding; - -/// VM-specific methods for object model. -/// -/// This trait includes 3 parts: -/// -/// 1. Specifications for per object metadata: a binding needs to specify the location for each per object metadata spec. -/// A binding can choose between `in_header()` or `side()`, e.g. `VMGlobalLogBitSpec::side()`. -/// * in_header: a binding needs to specify the bit offset to an object reference that can be used for the per object metadata spec. -/// The actual number of bits required for a spec can be obtained from the `num_bits()` method of the spec type. -/// * side: a binding does not need to provide any specific storage for metadata in the header. Instead, MMTk -/// will use side tables to store the metadata. The following section Side Specs Layout will discuss how to correctly create -/// side metadata specs. -/// 2. In header metadata access: A binding -/// need to further define the functions with suffix _metadata about how to access the bits in the header. We provide default implementations -/// for those methods, assuming the bits in the spec are always available to MMTk. A binding could implement their -/// own routines to access the bits if VM specific treatment is needed (e.g. some bits are not always available to MMTk). -/// 3. VM-specific object info needed by MMTk: MMTk does not know object info as it is VM specific. However, MMTk needs -/// some object information for GC. A binding needs to implement them correctly. -/// -/// Note that depending on the selected GC plan, only a subset of the methods provided here will be used. -/// -/// Side Specs Layout -/// -/// Short version -/// -/// * For *global* side metadata: -/// * The first spec: VMGlobalXXXSpec::side_first() -/// * The following specs: VMGlobalXXXSpec::side_after(FIRST_GLOAL.as_spec()) -/// * For *local* side metadata: -/// * The first spec: VMLocalXXXSpec::side_first() -/// * The following specs: VMLocalXXXSpec::side_after(FIRST_LOCAL.as_spec()) -/// -/// Detailed explanation -/// -/// There are two types of side metadata layout in MMTk: -/// -/// 1. Contiguous layout: is the layout in which the whole metadata space for a SideMetadataSpec is contiguous. -/// 2. Chunked layout: is the layout in which the whole metadata memory space, that is shared between MMTk policies, is divided into metadata-chunks. Each metadata-chunk stores all of the metadata for all `SideMetadataSpec`s which apply to a source-data chunk. -/// -/// In 64-bits targets, both Global and PolicySpecific side metadata are contiguous. -/// Also, in 32-bits targets, the Global side metadata is contiguous. -/// This means if the starting address (variable named `offset`) of the metadata space for a SideMetadataSpec (`SPEC1`) is `BASE1`, the starting address (`offset`) of the next SideMetadataSpec (`SPEC2`) will be `BASE1 + total_metadata_space_size(SPEC1)`, which is located immediately after the end of the whole metadata space of `SPEC1`. -/// Now, if we add a third SideMetadataSpec (`SPEC3`), its starting address (`offset`) will be `BASE2 + total_metadata_space_size(SPEC2)`, which is located immediately after the end of the whole metadata space of `SPEC2`. -/// -/// In 32-bits targets, the PolicySpecific side metadata is chunked. -/// This means for each chunk (2^22 Bytes) of data, which, by definition, is managed by exactly one MMTk policy, there is a metadata chunk (2^22 * some_fixed_ratio Bytes) that contains all of its PolicySpecific metadata. -/// This means if a policy has one SideMetadataSpec (`LS1`), the `offset` of that spec will be `0` (= at the start of a metadata chunk). -/// If there is a second SideMetadataSpec (`LS2`) for this specific policy, the `offset` for that spec will be `0 + required_metadata_space_per_chunk(LS1)`, -/// and for a third SideMetadataSpec (`LS3`), the `offset` will be `BASE(LS2) + required_metadata_space_per_chunk(LS2)`. -/// -/// For all other policies, the `offset` starts from zero. This is safe because no two policies ever manage one chunk, so there will be no overlap. -pub trait ObjectModel { - // Per-object Metadata Spec definitions go here - // - // Note a number of Global and PolicySpecific side metadata specifications are already reserved by mmtk-core. - // Any side metadata offset calculation must consider these to prevent overlaps. A binding should start their - // side metadata from GLOBAL_SIDE_METADATA_VM_BASE_ADDRESS or LOCAL_SIDE_METADATA_VM_BASE_ADDRESS. - - /// A global 1-bit metadata used by generational plans to track cross-generational pointers. It is generally - /// located in side metadata. - /// - /// Note that for this bit, 0 represents logged (default), and 1 represents unlogged. - /// This bit is also referred to as unlogged bit in Java MMTk for this reason. - const GLOBAL_LOG_BIT_SPEC: VMGlobalLogBitSpec; - - /// A local word-size metadata for the forwarding pointer, used by copying plans. It is almost always - /// located in the object header as it is fine to destroy an object header in order to copy it. - const LOCAL_FORWARDING_POINTER_SPEC: VMLocalForwardingPointerSpec; - - /// A local 2-bit metadata for the forwarding status bits, used by copying plans. If your runtime requires - /// word-aligned addresses (i.e. 4- or 8-bytes), you can use the last two bits in the object header to store - /// the forwarding bits. Note that you must be careful if you place this in the header as the runtime may - /// be using those bits for some other reason. - const LOCAL_FORWARDING_BITS_SPEC: VMLocalForwardingBitsSpec; - - /// A local 1-bit metadata for the mark bit, used by most plans that need to mark live objects. Like with the - /// [forwarding bits](crate::vm::ObjectModel::LOCAL_FORWARDING_BITS_SPEC), you can often steal the last bit in - /// the object header (due to alignment requirements) for the mark bit. Though some bindings such as the - /// OpenJDK binding prefer to have the mark bits in side metadata to allow for bulk operations. - const LOCAL_MARK_BIT_SPEC: VMLocalMarkBitSpec; - - #[cfg(feature = "object_pinning")] - /// A local 1-bit metadata specification for the pinning bit, used by plans that need to pin objects. It is - /// generally in side metadata. - const LOCAL_PINNING_BIT_SPEC: VMLocalPinningBitSpec; - - /// A local 2-bit metadata used by the large object space to mark objects and set objects as "newly allocated". - /// Used by any plan with large object allocation. It is generally in the header as we can add an extra word - /// before the large object to store this metadata. This is fine as the metadata size is insignificant in - /// comparison to the object size. - // - // TODO: Cleanup and place the LOS mark and nursery bits in the header. See here: https://github.com/mmtk/mmtk-core/issues/847 - const LOCAL_LOS_MARK_NURSERY_SPEC: VMLocalLOSMarkNurserySpec; - - /// Set this to true if the VM binding requires the valid object (VO) bits to be available - /// during tracing. If this constant is set to `false`, it is undefined behavior if the binding - /// attempts to access VO bits during tracing. - /// - /// Note that the VO bits is always available during root scanning even if this flag is false, - /// which is suitable for using VO bits (and the `is_mmtk_object()` method) for conservative - /// stack scanning. However, if a binding is also conservative in finding references during - /// object scanning, they need to set this constant to `true`. See the comments of individual - /// methods in the `Scanning` trait. - /// - /// Depending on the internal implementation of mmtk-core, different strategies for handling - /// VO bits have different time/space overhead. mmtk-core will choose the best strategy - /// according to the configuration of the VM binding, including this flag. Currently, setting - /// this flag to true does not impose any additional overhead. - #[cfg(feature = "vo_bit")] - const NEED_VO_BITS_DURING_TRACING: bool = false; - - /// A function to non-atomically load the specified per-object metadata's content. - /// The default implementation assumes the bits defined by the spec are always avilable for MMTk to use. If that is not the case, a binding should override this method, and provide their implementation. - /// Returns the metadata value. - /// - /// # Arguments: - /// - /// * `metadata_spec`: is the header metadata spec that tries to perform the operation. - /// * `object`: is a reference to the target object. - /// * `mask`: is an optional mask value for the metadata. This value is used in cases like the forwarding pointer metadata, where some of the bits are reused by other metadata such as the forwarding bits. - /// - /// # Safety - /// This is a non-atomic load, thus not thread-safe. - unsafe fn load_metadata( - metadata_spec: &HeaderMetadataSpec, - object: ObjectReference, - mask: Option, - ) -> T { - metadata_spec.load::(object.to_header::(), mask) - } - - /// A function to atomically load the specified per-object metadata's content. - /// The default implementation assumes the bits defined by the spec are always avilable for MMTk to use. If that is not the case, a binding should override this method, and provide their implementation. - /// Returns the metadata value. - /// - /// # Arguments: - /// - /// * `metadata_spec`: is the header metadata spec that tries to perform the operation. - /// * `object`: is a reference to the target object. - /// * `mask`: is an optional mask value for the metadata. This value is used in cases like the forwarding pointer metadata, where some of the bits are reused by other metadata such as the forwarding bits. - /// * `atomic_ordering`: is the atomic ordering for the load operation. - fn load_metadata_atomic( - metadata_spec: &HeaderMetadataSpec, - object: ObjectReference, - mask: Option, - ordering: Ordering, - ) -> T { - metadata_spec.load_atomic::(object.to_header::(), mask, ordering) - } - - /// A function to non-atomically store a value to the specified per-object metadata. - /// The default implementation assumes the bits defined by the spec are always avilable for MMTk to use. If that is not the case, a binding should override this method, and provide their implementation. - /// - /// # Arguments: - /// - /// * `metadata_spec`: is the header metadata spec that tries to perform the operation. - /// * `object`: is a reference to the target object. - /// * `val`: is the new metadata value to be stored. - /// * `mask`: is an optional mask value for the metadata. This value is used in cases like the forwarding pointer metadata, where some of the bits are reused by other metadata such as the forwarding bits. - /// - /// # Safety - /// This is a non-atomic store, thus not thread-safe. - unsafe fn store_metadata( - metadata_spec: &HeaderMetadataSpec, - object: ObjectReference, - val: T, - mask: Option, - ) { - metadata_spec.store::(object.to_header::(), val, mask) - } - - /// A function to atomically store a value to the specified per-object metadata. - /// The default implementation assumes the bits defined by the spec are always avilable for MMTk to use. If that is not the case, a binding should override this method, and provide their implementation. - /// - /// # Arguments: - /// - /// * `metadata_spec`: is the header metadata spec that tries to perform the operation. - /// * `object`: is a reference to the target object. - /// * `val`: is the new metadata value to be stored. - /// * `mask`: is an optional mask value for the metadata. This value is used in cases like the forwarding pointer metadata, where some of the bits are reused by other metadata such as the forwarding bits. - /// * `atomic_ordering`: is the optional atomic ordering for the store operation. - fn store_metadata_atomic( - metadata_spec: &HeaderMetadataSpec, - object: ObjectReference, - val: T, - mask: Option, - ordering: Ordering, - ) { - metadata_spec.store_atomic::(object.to_header::(), val, mask, ordering) - } - - /// A function to atomically compare-and-exchange the specified per-object metadata's content. - /// The default implementation assumes the bits defined by the spec are always avilable for MMTk to use. If that is not the case, a binding should override this method, and provide their implementation. - /// Returns `true` if the operation is successful, and `false` otherwise. - /// - /// # Arguments: - /// - /// * `metadata_spec`: is the header metadata spec that tries to perform the operation. - /// * `object`: is a reference to the target object. - /// * `old_val`: is the expected current value of the metadata. - /// * `new_val`: is the new metadata value to be stored if the compare-and-exchange operation is successful. - /// * `mask`: is an optional mask value for the metadata. This value is used in cases like the forwarding pointer metadata, where some of the bits are reused by other metadata such as the forwarding bits. - /// * `success_order`: is the atomic ordering used if the operation is successful. - /// * `failure_order`: is the atomic ordering used if the operation fails. - fn compare_exchange_metadata( - metadata_spec: &HeaderMetadataSpec, - object: ObjectReference, - old_val: T, - new_val: T, - mask: Option, - success_order: Ordering, - failure_order: Ordering, - ) -> std::result::Result { - metadata_spec.compare_exchange::( - object.to_header::(), - old_val, - new_val, - mask, - success_order, - failure_order, - ) - } - - /// A function to atomically perform an add operation on the specified per-object metadata's content. - /// The default implementation assumes the bits defined by the spec are always avilable for MMTk to use. If that is not the case, a binding should override this method, and provide their implementation. - /// This is a wrapping add. - /// # Returns the old metadata value. - /// - /// # Arguments: - /// - /// * `metadata_spec`: is the header metadata spec that tries to perform the operation. - /// * `object`: is a reference to the target object. - /// * `val`: is the value to be added to the current value of the metadata. - /// * `order`: is the atomic ordering of the fetch-and-add operation. - fn fetch_add_metadata( - metadata_spec: &HeaderMetadataSpec, - object: ObjectReference, - val: T, - order: Ordering, - ) -> T { - metadata_spec.fetch_add::(object.to_header::(), val, order) - } - - /// A function to atomically perform a subtract operation on the specified per-object metadata's content. - /// The default implementation assumes the bits defined by the spec are always avilable for MMTk to use. If that is not the case, a binding should override this method, and provide their implementation. - /// This is a wrapping sub. - /// Returns the old metadata value. - /// - /// # Arguments: - /// - /// * `metadata_spec`: is the header metadata spec that tries to perform the operation. - /// * `object`: is a reference to the target object. - /// * `val`: is the value to be subtracted from the current value of the metadata. - /// * `order`: is the atomic ordering of the fetch-and-add operation. - fn fetch_sub_metadata( - metadata_spec: &HeaderMetadataSpec, - object: ObjectReference, - val: T, - order: Ordering, - ) -> T { - metadata_spec.fetch_sub::(object.to_header::(), val, order) - } - - /// A function to atomically perform a bit-and operation on the specified per-object metadata's content. - /// The default implementation assumes the bits defined by the spec are always avilable for MMTk to use. If that is not the case, a binding should override this method, and provide their implementation. - /// Returns the old metadata value. - /// - /// # Arguments: - /// - /// * `metadata_spec`: is the header metadata spec that tries to perform the operation. - /// * `object`: is a reference to the target object. - /// * `val`: is the value to bit-and with the current value of the metadata. - /// * `order`: is the atomic ordering of the fetch-and-add operation. - fn fetch_and_metadata( - metadata_spec: &HeaderMetadataSpec, - object: ObjectReference, - val: T, - order: Ordering, - ) -> T { - metadata_spec.fetch_and::(object.to_header::(), val, order) - } - - /// A function to atomically perform a bit-or operation on the specified per-object metadata's content. - /// The default implementation assumes the bits defined by the spec are always avilable for MMTk to use. If that is not the case, a binding should override this method, and provide their implementation. - /// Returns the old metadata value. - /// - /// # Arguments: - /// - /// * `metadata_spec`: is the header metadata spec that tries to perform the operation. - /// * `object`: is a reference to the target object. - /// * `val`: is the value to bit-or with the current value of the metadata. - /// * `order`: is the atomic ordering of the fetch-and-add operation. - fn fetch_or_metadata( - metadata_spec: &HeaderMetadataSpec, - object: ObjectReference, - val: T, - order: Ordering, - ) -> T { - metadata_spec.fetch_or::(object.to_header::(), val, order) - } - - /// A function to atomically perform an update operation on the specified per-object metadata's content. - /// The default implementation assumes the bits defined by the spec are always avilable for MMTk to use. If that is not the case, a binding should override this method, and provide their implementation. - /// The semantics of this method are the same as the `fetch_update()` on Rust atomic types. - /// - /// # Arguments: - /// - /// * `metadata_spec`: is the header metadata spec that tries to perform the operation. - /// * `object`: is a reference to the target object. - /// * `val`: is the value to bit-and with the current value of the metadata. - /// * `order`: is the atomic ordering of the fetch-and-add operation. - /// - /// # Returns the old metadata value. - fn fetch_update_metadata Option + Copy>( - metadata_spec: &HeaderMetadataSpec, - object: ObjectReference, - set_order: Ordering, - fetch_order: Ordering, - f: F, - ) -> std::result::Result { - metadata_spec.fetch_update::(object.to_header::(), set_order, fetch_order, f) - } - - /// Copy an object and return the address of the new object. Usually in the implementation of this method, - /// `alloc_copy()` and `post_copy()` from [`GCWorkerCopyContext`](util/copy/struct.GCWorkerCopyContext.html) - /// are used for copying. - /// - /// Arguments: - /// * `from`: The address of the object to be copied. - /// * `semantics`: The copy semantic to use. - /// * `copy_context`: The `GCWorkerCopyContext` for the GC thread. - fn copy( - from: ObjectReference, - semantics: CopySemantics, - copy_context: &mut GCWorkerCopyContext, - ) -> ObjectReference; - - /// Copy an object. This is required - /// for delayed-copy collectors such as compacting collectors. During the - /// collection, MMTk reserves a region in the heap for an object as per - /// requirements found from `ObjectModel` and then asks `ObjectModel` to - /// determine what the object's reference will be post-copy. Return the address - /// past the end of the copied object. - /// - /// Arguments: - /// * `from`: The address of the object to be copied. - /// * `to`: The target location. - /// * `region: The start of the region that was reserved for this object. - fn copy_to(from: ObjectReference, to: ObjectReference, region: Address) -> Address; - - /// Return the reference that an object will be referred to after it is copied - /// to the specified region. Used in delayed-copy collectors such as compacting - /// collectors. - /// - /// Arguments: - /// * `from`: The object to be copied. - /// * `to`: The region to be copied to. - fn get_reference_when_copied_to(from: ObjectReference, to: Address) -> ObjectReference; - - /// Return the size used by an object. - /// - /// Arguments: - /// * `object`: The object to be queried. - fn get_current_size(object: ObjectReference) -> usize; - - /// Return the size when an object is copied. - /// - /// Arguments: - /// * `object`: The object to be queried. - fn get_size_when_copied(object: ObjectReference) -> usize; - - /// Return the alignment when an object is copied. - /// - /// Arguments: - /// * `object`: The object to be queried. - fn get_align_when_copied(object: ObjectReference) -> usize; - - /// Return the alignment offset when an object is copied. - /// - /// Arguments: - /// * `object`: The object to be queried. - fn get_align_offset_when_copied(object: ObjectReference) -> usize; - - /// Get the type descriptor for an object. - /// - /// FIXME: Do we need this? If so, determine lifetime, return byte[] - /// - /// Arguments: - /// * `reference`: The object to be queried. - fn get_type_descriptor(reference: ObjectReference) -> &'static [i8]; - - /// This is the worst case expansion that can occur due to object size increasing while - /// copying. This constant is used to calculate whether a nursery has grown larger than the - /// mature space for generational plans. - const VM_WORST_CASE_COPY_EXPANSION: f64 = 1.5; - - /// If this is true, the binding guarantees that an object reference's raw address is always equal to the return value of the `ref_to_address` method - /// and the return value of the `ref_to_object_start` method. This is a very strong guarantee, but it is also helpful for MMTk to - /// make some assumptions and optimize for this case. - /// If a binding sets this to true, and the related methods return inconsistent results, this is an undefined behavior. MMTk may panic - /// if any assertion catches this error, but may also fail silently. - const UNIFIED_OBJECT_REFERENCE_ADDRESS: bool = false; - - /// For our allocation result (object_start), the binding may have an offset between the allocation result - /// and the raw address of their object reference, i.e. object ref's raw address = object_start + offset. - /// The offset could be zero. The offset is not necessary to be - /// constant for all the objects. This constant defines the smallest possible offset. - /// - /// This is used as an indication for MMTk to predict where object references may point to in some algorithms. - /// - /// We should have the invariant: - /// * object ref >= object_start + OBJECT_REF_OFFSET_LOWER_BOUND - const OBJECT_REF_OFFSET_LOWER_BOUND: isize; - - /// Return the lowest address of the storage associated with an object. This should be - /// the address that a binding gets by an allocation call ([`crate::memory_manager::alloc`]). - /// - /// Arguments: - /// * `object`: The object to be queried. It should not be null. - fn ref_to_object_start(object: ObjectReference) -> Address; - - /// Return the header base address from an object reference. Any object header metadata - /// in the [`crate::vm::ObjectModel`] declares a piece of header metadata with an offset - /// from this address. If a binding does not use any header metadata for MMTk, this method - /// will not be called, and the binding can simply use `unreachable!()` for the method. - /// - /// Arguments: - /// * `object`: The object to be queried. It should not be null. - fn ref_to_header(object: ObjectReference) -> Address; - - /// Return an address guaranteed to be inside the storage associated - /// with an object. The returned address needs to be deterministic - /// for an given object. For a given object, the returned address - /// should be a constant offset from the object reference address. - /// - /// Note that MMTk may forge an arbitrary address - /// directly into a potential object reference, and call this method on the 'object reference'. - /// In that case, the argument `object` may not be a valid object reference, - /// and the implementation of this method should not use any object metadata. - /// - /// MMTk uses this method more frequently than [`crate::vm::ObjectModel::ref_to_object_start`]. - /// - /// Arguments: - /// * `object`: The object to be queried. It should not be null. - fn ref_to_address(object: ObjectReference) -> Address; - - /// Return an object for a given address returned by `ref_to_address()`. - /// This does exactly the opposite of `ref_to_address()`. The argument `addr` has - /// to be an address that is previously returned from `ref_to_address()`. Invoking this method - /// with an unexpected address is undefined behavior. - /// - /// Arguments: - /// * `addr`: An address that is returned from `ref_to_address()` - fn address_to_ref(addr: Address) -> ObjectReference; - - /// Dump debugging information for an object. - /// - /// Arguments: - /// * `object`: The object to be dumped. - fn dump_object(object: ObjectReference); - - /// Return if an object is valid from the runtime point of view. This is used - /// to debug MMTk. - fn is_object_sane(_object: ObjectReference) -> bool { - true - } -} - -pub mod specs { - use crate::util::constants::LOG_BITS_IN_WORD; - use crate::util::constants::LOG_BYTES_IN_PAGE; - use crate::util::constants::LOG_MIN_OBJECT_SIZE; - use crate::util::metadata::side_metadata::*; - use crate::util::metadata::{ - header_metadata::HeaderMetadataSpec, - side_metadata::{SideMetadataOffset, SideMetadataSpec}, - MetadataSpec, - }; - - // This macro is invoked in define_vm_metadata_global_spec or define_vm_metadata_local_spec. - // Use those two to define a new VM metadata spec. - macro_rules! define_vm_metadata_spec { - ($(#[$outer:meta])*$spec_name: ident, $is_global: expr, $log_num_bits: expr, $side_min_obj_size: expr) => { - $(#[$outer])* - pub struct $spec_name(MetadataSpec); - impl $spec_name { - /// The number of bits (in log2) that are needed for the spec. - pub const LOG_NUM_BITS: usize = $log_num_bits; - - /// Whether this spec is global or local. For side metadata, the binding needs to make sure - /// global specs are laid out after another global spec, and local specs are laid - /// out after another local spec. Otherwise, there will be an assertion failure. - pub const IS_GLOBAL: bool = $is_global; - - /// Declare that the VM uses in-header metadata for this metadata type. - /// For the specification of the `bit_offset` argument, please refer to - /// the document of `[crate::util::metadata::header_metadata::HeaderMetadataSpec.bit_offset]`. - /// The binding needs to make sure that the bits used for a spec in the header do not conflict with - /// the bits of another spec (unless it is specified that some bits may be reused). - pub const fn in_header(bit_offset: isize) -> Self { - Self(MetadataSpec::InHeader(HeaderMetadataSpec { - bit_offset, - num_of_bits: 1 << Self::LOG_NUM_BITS, - })) - } - - /// Declare that the VM uses side metadata for this metadata type, - /// and the side metadata is the first of its kind (global or local). - /// The first global or local side metadata should be declared with `side_first()`, - /// and the rest side metadata should be declared with `side_after()` after a defined - /// side metadata of the same kind (global or local). Logically, all the declarations - /// create two list of side metadata, one for global, and one for local. - pub const fn side_first() -> Self { - if Self::IS_GLOBAL { - Self(MetadataSpec::OnSide(SideMetadataSpec { - name: stringify!($spec_name), - is_global: Self::IS_GLOBAL, - offset: GLOBAL_SIDE_METADATA_VM_BASE_OFFSET, - log_num_of_bits: Self::LOG_NUM_BITS, - log_bytes_in_region: $side_min_obj_size as usize, - })) - } else { - Self(MetadataSpec::OnSide(SideMetadataSpec { - name: stringify!($spec_name), - is_global: Self::IS_GLOBAL, - offset: LOCAL_SIDE_METADATA_VM_BASE_OFFSET, - log_num_of_bits: Self::LOG_NUM_BITS, - log_bytes_in_region: $side_min_obj_size as usize, - })) - } - } - - /// Declare that the VM uses side metadata for this metadata type, - /// and the side metadata should be laid out after the given side metadata spec. - /// The first global or local side metadata should be declared with `side_first()`, - /// and the rest side metadata should be declared with `side_after()` after a defined - /// side metadata of the same kind (global or local). Logically, all the declarations - /// create two list of side metadata, one for global, and one for local. - pub const fn side_after(spec: &MetadataSpec) -> Self { - assert!(spec.is_on_side()); - let side_spec = spec.extract_side_spec(); - assert!(side_spec.is_global == Self::IS_GLOBAL); - Self(MetadataSpec::OnSide(SideMetadataSpec { - name: stringify!($spec_name), - is_global: Self::IS_GLOBAL, - offset: SideMetadataOffset::layout_after(side_spec), - log_num_of_bits: Self::LOG_NUM_BITS, - log_bytes_in_region: $side_min_obj_size as usize, - })) - } - - /// Return the inner `[crate::util::metadata::MetadataSpec]` for the metadata type. - pub const fn as_spec(&self) -> &MetadataSpec { - &self.0 - } - - /// Return the number of bits for the metadata type. - pub const fn num_bits(&self) -> usize { - 1 << $log_num_bits - } - } - impl std::ops::Deref for $spec_name { - type Target = MetadataSpec; - fn deref(&self) -> &Self::Target { - self.as_spec() - } - } - }; - } - - // Log bit: 1 bit per object, global - define_vm_metadata_spec!( - /// 1-bit global metadata to log an object. - VMGlobalLogBitSpec, - true, - 0, - LOG_MIN_OBJECT_SIZE - ); - // Forwarding pointer: word size per object, local - define_vm_metadata_spec!( - /// 1-word local metadata for spaces that may copy objects. - /// This metadata has to be stored in the header. - /// This metadata can be defined at a position within the object payload. - /// As a forwarding pointer is only stored in dead objects which is not - /// accessible by the language, it is okay that store a forwarding pointer overwrites object payload - VMLocalForwardingPointerSpec, - false, - LOG_BITS_IN_WORD, - LOG_MIN_OBJECT_SIZE - ); - // Forwarding bits: 2 bits per object, local - define_vm_metadata_spec!( - /// 2-bit local metadata for spaces that store a forwarding state for objects. - /// If this spec is defined in the header, it can be defined with a position of the lowest 2 bits in the forwarding pointer. - VMLocalForwardingBitsSpec, - false, - 1, - LOG_MIN_OBJECT_SIZE - ); - // Mark bit: 1 bit per object, local - define_vm_metadata_spec!( - /// 1-bit local metadata for spaces that need to mark an object. - VMLocalMarkBitSpec, - false, - 0, - LOG_MIN_OBJECT_SIZE - ); - // Pinning bit: 1 bit per object, local - define_vm_metadata_spec!( - /// 1-bit local metadata for spaces that support pinning. - VMLocalPinningBitSpec, - false, - 0, - LOG_MIN_OBJECT_SIZE - ); - // Mark&nursery bits for LOS: 2 bit per page, local - define_vm_metadata_spec!( - /// 2-bits local metadata for the large object space. The two bits serve as - /// the mark bit and the nursery bit. - VMLocalLOSMarkNurserySpec, - false, - 1, - LOG_BYTES_IN_PAGE - ); -} diff --git a/src/vm/prelude.rs b/src/vm/prelude.rs new file mode 100644 index 0000000000..27f2ea3fda --- /dev/null +++ b/src/vm/prelude.rs @@ -0,0 +1,15 @@ +pub use crate::plan::Mutator; +pub use crate::scheduler::{GCController, GCWorker}; +pub use crate::util::alloc::AllocationError; +pub use crate::util::copy::*; +pub use crate::util::metadata::header_metadata::HeaderMetadataSpec; +pub use crate::util::metadata::MetadataValue; +pub use crate::util::opaque_pointer::*; +pub use crate::util::{Address, ObjectReference}; +pub use crate::vm::edge_shape; +pub use crate::vm::finalizable::*; +pub use crate::vm::metadata_specs::*; +pub use crate::vm::scan_utils::*; +pub use crate::ObjectQueue; + +pub use atomic::Ordering; diff --git a/src/vm/reference_glue.rs b/src/vm/reference_glue.rs deleted file mode 100644 index 3b1ef98852..0000000000 --- a/src/vm/reference_glue.rs +++ /dev/null @@ -1,92 +0,0 @@ -use crate::util::ObjectReference; -use crate::util::VMWorkerThread; -use crate::vm::VMBinding; - -/// VM-specific methods for reference processing, including weak references, and finalizers. -/// We handle weak references and finalizers differently: -/// * for weak references, we assume they are implemented as normal reference objects (also known as weak objects) -/// with a referent that is actually weakly reachable. This trait provides a few methods to access -/// the referent of such an reference object. -/// * for finalizers, we provide a `Finalizable` trait, and require bindings to specify a type -/// that implements `Finalizable`. When the binding registers or pops a finalizable object -/// from MMTk, the specified type is used for the finalizable objects. For most languages, -/// they can just use `ObjectReference` for the finalizable type, meaning that they are registering -/// and popping a normal object reference as finalizable objects. -pub trait ReferenceGlue { - /// The type of finalizable objects. This type is used when the binding registers and pops finalizable objects. - type FinalizableType: Finalizable; - - // TODO: Should we also move the following methods about weak references to a trait (similar to the `Finalizable` trait)? - - /// Weak and soft references always clear the referent - /// before enqueueing. - /// - /// Arguments: - /// * `new_reference`: The reference whose referent is to be cleared. - fn clear_referent(new_reference: ObjectReference) { - Self::set_referent(new_reference, ObjectReference::NULL); - } - - /// Get the referent from a weak reference object. - /// - /// Arguments: - /// * `object`: The object reference. - fn get_referent(object: ObjectReference) -> ObjectReference; - - /// Set the referent in a weak reference object. - /// - /// Arguments: - /// * `reff`: The object reference for the reference. - /// * `referent`: The referent object reference. - fn set_referent(reff: ObjectReference, referent: ObjectReference); - - /// Check if the referent has been cleared. - /// - /// Arguments: - /// * `referent`: The referent object reference. - fn is_referent_cleared(referent: ObjectReference) -> bool { - referent.is_null() - } - - /// For weak reference types, if the referent is cleared during GC, the reference - /// will be added to a queue, and MMTk will call this method to inform - /// the VM about the changes for those references. This method is used - /// to implement Java's ReferenceQueue. - /// Note that this method is called for each type of weak references during GC, and - /// the references slice will be cleared after this call is returned. That means - /// MMTk will no longer keep these references alive once this method is returned. - fn enqueue_references(references: &[ObjectReference], tls: VMWorkerThread); -} - -use crate::scheduler::gc_work::ProcessEdgesWork; - -/// A finalizable object for MMTk. MMTk needs to know the actual object reference in the type, -/// while a binding can use this type to store some runtime information about finalizable objects. -/// For example, for bindings that allows multiple finalizer methods with one object, they can define -/// the type as a tuple of `(object, finalize method)`, and register different finalizer methods to MMTk -/// for the same object. -/// The implementation should mark theird method implementations as inline for performance. -pub trait Finalizable: std::fmt::Debug + Send { - /// Load the object reference. - fn get_reference(&self) -> ObjectReference; - /// Store the object reference. - fn set_reference(&mut self, object: ObjectReference); - /// Keep the heap references in the finalizable object alive. For example, the reference itself needs to be traced. However, - /// if the finalizable object includes other heap references, the implementation should trace them as well. - /// Note that trace_object() may move objects so we need to write the new reference in case that it is moved. - fn keep_alive(&mut self, trace: &mut E); -} - -/// This provides an implementation of `Finalizable` for `ObjectReference`. Most bindings -/// should be able to use `ObjectReference` as `ReferenceGlue::FinalizableType`. -impl Finalizable for ObjectReference { - fn get_reference(&self) -> ObjectReference { - *self - } - fn set_reference(&mut self, object: ObjectReference) { - *self = object; - } - fn keep_alive(&mut self, trace: &mut E) { - *self = trace.trace_object(*self); - } -} diff --git a/src/vm/scan_utils.rs b/src/vm/scan_utils.rs new file mode 100644 index 0000000000..21398d23c2 --- /dev/null +++ b/src/vm/scan_utils.rs @@ -0,0 +1,127 @@ +use crate::scheduler::GCWorker; +use crate::util::ObjectReference; +use crate::vm::edge_shape::Edge; +use crate::vm::VMBinding; + +/// Callback trait of scanning functions that report edges. +pub trait EdgeVisitor { + /// Call this function for each edge. + fn visit_edge(&mut self, edge: ES); +} + +/// This lets us use closures as EdgeVisitor. +impl EdgeVisitor for F { + fn visit_edge(&mut self, edge: ES) { + #[cfg(debug_assertions)] + trace!( + "(FunctionClosure) Visit edge {:?} (pointing to {})", + edge, + edge.load() + ); + self(edge) + } +} + +/// Callback trait of scanning functions that directly trace through edges. +pub trait ObjectTracer { + /// Call this function for the content of each edge, + /// and assign the returned value back to the edge. + /// + /// Note: This function is performance-critical. + /// Implementations should consider inlining if necessary. + fn trace_object(&mut self, object: ObjectReference) -> ObjectReference; +} + +/// This lets us use closures as ObjectTracer. +impl ObjectReference> ObjectTracer for F { + fn trace_object(&mut self, object: ObjectReference) -> ObjectReference { + self(object) + } +} + +/// An `ObjectTracerContext` gives a GC worker temporary access to an `ObjectTracer`, allowing +/// the GC worker to trace objects. This trait is intended to abstract out the implementation +/// details of tracing objects, enqueuing objects, and creating work packets that expand the +/// transitive closure, allowing the VM binding to focus on VM-specific parts. +/// +/// This trait is used during root scanning and binding-side weak reference processing. +pub trait ObjectTracerContext: Clone + Send + 'static { + /// The concrete `ObjectTracer` type. + /// + /// FIXME: The current code works because of the unsafe method `ProcessEdgesWork::set_worker`. + /// The tracer should borrow the worker passed to `with_queuing_tracer` during its lifetime. + /// For this reason, `TracerType` should have a `<'w>` lifetime parameter. + /// Generic Associated Types (GAT) is already stablized in Rust 1.65. + /// We should update our toolchain version, too. + type TracerType: ObjectTracer; + + /// Create a temporary `ObjectTracer` and provide access in the scope of `func`. + /// + /// When the `ObjectTracer::trace_object` is called, if the traced object is first visited + /// in this transitive closure, it will be enqueued. After `func` returns, the implememtation + /// will create work packets to continue computing the transitive closure from the newly + /// enqueued objects. + /// + /// API functions that provide `QueuingTracerFactory` should document + /// 1. on which fields the user is supposed to call `ObjectTracer::trace_object`, and + /// 2. which work bucket the generated work packet will be added to. Sometimes the user needs + /// to know when the computing of transitive closure finishes. + /// + /// Arguments: + /// - `worker`: The current GC worker. + /// - `func`: A caller-supplied closure in which the created `ObjectTracer` can be used. + /// + /// Returns: The return value of `func`. + fn with_tracer(&self, worker: &mut GCWorker, func: F) -> R + where + F: FnOnce(&mut Self::TracerType) -> R; +} + +/// Root-scanning methods use this trait to create work packets for processing roots. +/// +/// Notes on the required traits: +/// +/// - `Clone`: The VM may divide one root-scanning call (such as `scan_vm_specific_roots`) into +/// multiple work packets to scan roots in parallel. In this case, the factory shall be cloned +/// to be given to multiple work packets. +/// +/// Cloning may be expensive if a factory contains many states. If the states are immutable, a +/// `RootsWorkFactory` implementation may hold those states in an `Arc` field so that multiple +/// factory instances can still share the part held in the `Arc` even after cloning. +/// +/// - `Send` + 'static: The factory will be given to root-scanning work packets. +/// Because work packets are distributed to and executed on different GC workers, +/// it needs `Send` to be sent between threads. `'static` means it must not have +/// references to variables with limited lifetime (such as local variables), because +/// it needs to be moved between threads. +pub trait RootsWorkFactory: Clone + Send + 'static { + /// Create work packets to handle root edges. + /// + /// The work packet may update the edges. + /// + /// Arguments: + /// * `edges`: A vector of edges. + fn create_process_edge_roots_work(&mut self, edges: Vec); + + /// Create work packets to handle non-transitively pinning roots. + /// + /// The work packet will prevent the objects in `nodes` from moving, + /// i.e. they will be pinned for the duration of the GC. + /// But it will not prevent the children of those objects from moving. + /// + /// This method is useful for conservative stack scanning, or VMs that cannot update some + /// of the root edges. + /// + /// Arguments: + /// * `nodes`: A vector of references to objects pointed by root edges. + fn create_process_pinning_roots_work(&mut self, nodes: Vec); + + /// Create work packets to handle transitively pinning (TP) roots. + /// + /// Similar to `create_process_pinning_roots_work`, this work packet will not move objects in `nodes`. + /// Unlike ``create_process_pinning_roots_work`, no objects in the transitive closure of `nodes` will be moved, either. + /// + /// Arguments: + /// * `nodes`: A vector of references to objects pointed by root edges. + fn create_process_tpinning_roots_work(&mut self, nodes: Vec); +} diff --git a/src/vm/scanning.rs b/src/vm/scanning.rs deleted file mode 100644 index 44ae056190..0000000000 --- a/src/vm/scanning.rs +++ /dev/null @@ -1,337 +0,0 @@ -use crate::plan::Mutator; -use crate::scheduler::GCWorker; -use crate::util::ObjectReference; -use crate::util::VMWorkerThread; -use crate::vm::edge_shape::Edge; -use crate::vm::VMBinding; - -/// Callback trait of scanning functions that report edges. -pub trait EdgeVisitor { - /// Call this function for each edge. - fn visit_edge(&mut self, edge: ES); -} - -/// This lets us use closures as EdgeVisitor. -impl EdgeVisitor for F { - fn visit_edge(&mut self, edge: ES) { - #[cfg(debug_assertions)] - trace!( - "(FunctionClosure) Visit edge {:?} (pointing to {})", - edge, - edge.load() - ); - self(edge) - } -} - -/// Callback trait of scanning functions that directly trace through edges. -pub trait ObjectTracer { - /// Call this function for the content of each edge, - /// and assign the returned value back to the edge. - /// - /// Note: This function is performance-critical. - /// Implementations should consider inlining if necessary. - fn trace_object(&mut self, object: ObjectReference) -> ObjectReference; -} - -/// This lets us use closures as ObjectTracer. -impl ObjectReference> ObjectTracer for F { - fn trace_object(&mut self, object: ObjectReference) -> ObjectReference { - self(object) - } -} - -/// An `ObjectTracerContext` gives a GC worker temporary access to an `ObjectTracer`, allowing -/// the GC worker to trace objects. This trait is intended to abstract out the implementation -/// details of tracing objects, enqueuing objects, and creating work packets that expand the -/// transitive closure, allowing the VM binding to focus on VM-specific parts. -/// -/// This trait is used during root scanning and binding-side weak reference processing. -pub trait ObjectTracerContext: Clone + Send + 'static { - /// The concrete `ObjectTracer` type. - /// - /// FIXME: The current code works because of the unsafe method `ProcessEdgesWork::set_worker`. - /// The tracer should borrow the worker passed to `with_queuing_tracer` during its lifetime. - /// For this reason, `TracerType` should have a `<'w>` lifetime parameter. - /// Generic Associated Types (GAT) is already stablized in Rust 1.65. - /// We should update our toolchain version, too. - type TracerType: ObjectTracer; - - /// Create a temporary `ObjectTracer` and provide access in the scope of `func`. - /// - /// When the `ObjectTracer::trace_object` is called, if the traced object is first visited - /// in this transitive closure, it will be enqueued. After `func` returns, the implememtation - /// will create work packets to continue computing the transitive closure from the newly - /// enqueued objects. - /// - /// API functions that provide `QueuingTracerFactory` should document - /// 1. on which fields the user is supposed to call `ObjectTracer::trace_object`, and - /// 2. which work bucket the generated work packet will be added to. Sometimes the user needs - /// to know when the computing of transitive closure finishes. - /// - /// Arguments: - /// - `worker`: The current GC worker. - /// - `func`: A caller-supplied closure in which the created `ObjectTracer` can be used. - /// - /// Returns: The return value of `func`. - fn with_tracer(&self, worker: &mut GCWorker, func: F) -> R - where - F: FnOnce(&mut Self::TracerType) -> R; -} - -/// Root-scanning methods use this trait to create work packets for processing roots. -/// -/// Notes on the required traits: -/// -/// - `Clone`: The VM may divide one root-scanning call (such as `scan_vm_specific_roots`) into -/// multiple work packets to scan roots in parallel. In this case, the factory shall be cloned -/// to be given to multiple work packets. -/// -/// Cloning may be expensive if a factory contains many states. If the states are immutable, a -/// `RootsWorkFactory` implementation may hold those states in an `Arc` field so that multiple -/// factory instances can still share the part held in the `Arc` even after cloning. -/// -/// - `Send` + 'static: The factory will be given to root-scanning work packets. -/// Because work packets are distributed to and executed on different GC workers, -/// it needs `Send` to be sent between threads. `'static` means it must not have -/// references to variables with limited lifetime (such as local variables), because -/// it needs to be moved between threads. -pub trait RootsWorkFactory: Clone + Send + 'static { - /// Create work packets to handle root edges. - /// - /// The work packet may update the edges. - /// - /// Arguments: - /// * `edges`: A vector of edges. - fn create_process_edge_roots_work(&mut self, edges: Vec); - - /// Create work packets to handle non-transitively pinning roots. - /// - /// The work packet will prevent the objects in `nodes` from moving, - /// i.e. they will be pinned for the duration of the GC. - /// But it will not prevent the children of those objects from moving. - /// - /// This method is useful for conservative stack scanning, or VMs that cannot update some - /// of the root edges. - /// - /// Arguments: - /// * `nodes`: A vector of references to objects pointed by root edges. - fn create_process_pinning_roots_work(&mut self, nodes: Vec); - - /// Create work packets to handle transitively pinning (TP) roots. - /// - /// Similar to `create_process_pinning_roots_work`, this work packet will not move objects in `nodes`. - /// Unlike ``create_process_pinning_roots_work`, no objects in the transitive closure of `nodes` will be moved, either. - /// - /// Arguments: - /// * `nodes`: A vector of references to objects pointed by root edges. - fn create_process_tpinning_roots_work(&mut self, nodes: Vec); -} - -/// VM-specific methods for scanning roots/objects. -pub trait Scanning { - /// Return true if the given object supports edge enqueuing. - /// - /// - If this returns true, MMTk core will call `scan_object` on the object. - /// - Otherwise, MMTk core will call `scan_object_and_trace_edges` on the object. - /// - /// For maximum performance, the VM should support edge-enqueuing for as many objects as - /// practical. Also note that this method is called for every object to be scanned, so it - /// must be fast. The VM binding should avoid expensive checks and keep it as efficient as - /// possible. - /// - /// Arguments: - /// * `tls`: The VM-specific thread-local storage for the current worker. - /// * `object`: The object to be scanned. - fn support_edge_enqueuing(_tls: VMWorkerThread, _object: ObjectReference) -> bool { - true - } - - /// Delegated scanning of a object, visiting each reference field encountered. - /// - /// The VM shall call `edge_visitor.visit_edge` on each reference field. - /// - /// The VM may skip a reference field if it holds a null reference. If the VM supports tagged - /// references, it must skip tagged reference fields which are not holding references. - /// - /// The `memory_manager::is_mmtk_object` function can be used in this function if - /// - the "is_mmtk_object" feature is enabled, and - /// - `VM::VMObjectModel::NEED_VO_BITS_DURING_TRACING` is true. - /// - /// Arguments: - /// * `tls`: The VM-specific thread-local storage for the current worker. - /// * `object`: The object to be scanned. - /// * `edge_visitor`: Called back for each edge. - fn scan_object>( - tls: VMWorkerThread, - object: ObjectReference, - edge_visitor: &mut EV, - ); - - /// Delegated scanning of a object, visiting each reference field encountered, and trace the - /// objects pointed by each field. - /// - /// The VM shall call `object_tracer.trace_object` on the value held in each reference field, - /// and assign the returned value back to the field. If the VM uses tagged references, the - /// value passed to `object_tracer.trace_object` shall be the `ObjectReference` to the object - /// without any tag bits. - /// - /// The VM may skip a reference field if it holds a null reference. If the VM supports tagged - /// references, it must skip tagged reference fields which are not holding references. - /// - /// The `memory_manager::is_mmtk_object` function can be used in this function if - /// - the "is_mmtk_object" feature is enabled, and - /// - `VM::VMObjectModel::NEED_VO_BITS_DURING_TRACING` is true. - /// - /// Arguments: - /// * `tls`: The VM-specific thread-local storage for the current worker. - /// * `object`: The object to be scanned. - /// * `object_tracer`: Called back for the content of each edge. - fn scan_object_and_trace_edges( - _tls: VMWorkerThread, - _object: ObjectReference, - _object_tracer: &mut OT, - ) { - unreachable!("scan_object_and_trace_edges() will not be called when support_edge_enqueue() is always true.") - } - - /// MMTk calls this method at the first time during a collection that thread's stacks - /// have been scanned. This can be used (for example) to clean up - /// obsolete compiled methods that are no longer being executed. - /// - /// Arguments: - /// * `partial_scan`: Whether the scan was partial or full-heap. - /// * `tls`: The GC thread that is performing the thread scan. - fn notify_initial_thread_scan_complete(partial_scan: bool, tls: VMWorkerThread); - - /// Scan one mutator for stack roots. - /// - /// Some VM bindings may not be able to implement this method. - /// For example, the VM binding may only be able to enumerate all threads and - /// scan them while enumerating, but cannot scan stacks individually when given - /// the references of threads. - /// In that case, it can leave this method empty, and deal with stack - /// roots in [`Scanning::scan_vm_specific_roots`]. However, in that case, MMTk - /// does not know those roots are stack roots, and cannot perform any possible - /// optimization for the stack roots. - /// - /// The `memory_manager::is_mmtk_object` function can be used in this function if - /// - the "is_mmtk_object" feature is enabled. - /// - /// Arguments: - /// * `tls`: The GC thread that is performing this scanning. - /// * `mutator`: The reference to the mutator whose roots will be scanned. - /// * `factory`: The VM uses it to create work packets for scanning roots. - fn scan_roots_in_mutator_thread( - tls: VMWorkerThread, - mutator: &'static mut Mutator, - factory: impl RootsWorkFactory, - ); - - /// Scan VM-specific roots. The creation of all root scan tasks (except thread scanning) - /// goes here. - /// - /// The `memory_manager::is_mmtk_object` function can be used in this function if - /// - the "is_mmtk_object" feature is enabled. - /// - /// Arguments: - /// * `tls`: The GC thread that is performing this scanning. - /// * `factory`: The VM uses it to create work packets for scanning roots. - fn scan_vm_specific_roots(tls: VMWorkerThread, factory: impl RootsWorkFactory); - - /// Return whether the VM supports return barriers. This is unused at the moment. - fn supports_return_barrier() -> bool; - - /// Prepare for another round of root scanning in the same GC. Some GC algorithms - /// need multiple transitive closures, and each transitive closure starts from - /// root scanning. We expect the binding to provide the same root set for every - /// round of root scanning in the same GC. Bindings can use this call to get - /// ready for another round of root scanning to make sure that the same root - /// set will be returned in the upcoming calls of root scanning methods, - /// such as [`crate::vm::Scanning::scan_roots_in_mutator_thread`] and - /// [`crate::vm::Scanning::scan_vm_specific_roots`]. - fn prepare_for_roots_re_scanning(); - - /// Process weak references. - /// - /// This function is called after a transitive closure is completed. - /// - /// MMTk core enables the VM binding to do the following in this function: - /// - /// 1. Query if an object is already reached in this transitive closure. - /// 2. Get the new address of an object if it is already reached. - /// 3. Keep an object and its descendents alive if not yet reached. - /// 4. Request this function to be called again after transitive closure is finished again. - /// - /// The VM binding can query if an object is currently reached by calling - /// `ObjectReference::is_reachable()`. - /// - /// If an object is already reached, the VM binding can get its new address by calling - /// `ObjectReference::get_forwarded_object()` as the object may have been moved. - /// - /// If an object is not yet reached, the VM binding can keep that object and its descendents - /// alive. To do this, the VM binding should use `tracer_context.with_tracer` to get access to - /// an `ObjectTracer`, and then call its `trace_object(object)` method. The `trace_object` - /// method will return the new address of the `object` if it moved the object, or its original - /// address if not moved. Implementation-wise, the `ObjectTracer` may contain an internal - /// queue for newly traced objects, and will flush the queue when `tracer_context.with_tracer` - /// returns. Therefore, it is recommended to reuse the `ObjectTracer` instance to trace - /// multiple objects. - /// - /// *Note that if `trace_object` is called on an already reached object, the behavior will be - /// equivalent to `ObjectReference::get_forwarded_object()`. It will return the new address if - /// the GC already moved the object when tracing that object, or the original address if the GC - /// did not move the object when tracing it. In theory, the VM binding can use `trace_object` - /// wherever `ObjectReference::get_forwarded_object()` is needed. However, if a VM never - /// resurrects objects, it should completely avoid touching `tracer_context`, and exclusively - /// use `ObjectReference::get_forwarded_object()` to get new addresses of objects. By doing - /// so, the VM binding can avoid accidentally resurrecting objects.* - /// - /// The VM binding can return `true` from `process_weak_refs` to request `process_weak_refs` - /// to be called again after the MMTk core finishes transitive closure again from the objects - /// newly visited by `ObjectTracer::trace_object`. This is useful if a VM supports multiple - /// levels of reachabilities (such as Java) or ephemerons. - /// - /// Implementation-wise, this function is called as the "sentinel" of the `VMRefClosure` work - /// bucket, which means it is called when all work packets in that bucket have finished. The - /// `tracer_context` expands the transitive closure by adding more work packets in the same - /// bucket. This means if `process_weak_refs` returns true, those work packets will have - /// finished (completing the transitive closure) by the time `process_weak_refs` is called - /// again. The VM binding can make use of this by adding custom work packets into the - /// `VMRefClosure` bucket. The bucket will be `VMRefForwarding`, instead, when forwarding. - /// See below. - /// - /// The `memory_manager::is_mmtk_object` function can be used in this function if - /// - the "is_mmtk_object" feature is enabled, and - /// - `VM::VMObjectModel::NEED_VO_BITS_DURING_TRACING` is true. - /// - /// Arguments: - /// * `worker`: The current GC worker. - /// * `tracer_context`: Use this to get access an `ObjectTracer` and use it to retain and - /// update weak references. - /// - /// This function shall return true if this function needs to be called again after the GC - /// finishes expanding the transitive closure from the objects kept alive. - fn process_weak_refs( - _worker: &mut GCWorker, - _tracer_context: impl ObjectTracerContext, - ) -> bool { - false - } - - /// Forward weak references. - /// - /// This function will only be called in the forwarding stage when using the mark-compact GC - /// algorithm. Mark-compact computes transive closure twice during each GC. It marks objects - /// in the first transitive closure, and forward references in the second transitive closure. - /// - /// Arguments: - /// * `worker`: The current GC worker. - /// * `tracer_context`: Use this to get access an `ObjectTracer` and use it to update weak - /// references. - fn forward_weak_refs( - _worker: &mut GCWorker, - _tracer_context: impl ObjectTracerContext, - ) { - } -} diff --git a/src/vm/vmbinding.rs b/src/vm/vmbinding.rs new file mode 100644 index 0000000000..52dd85ade2 --- /dev/null +++ b/src/vm/vmbinding.rs @@ -0,0 +1,887 @@ +use super::prelude::*; + +/// Default min alignment 4 bytes +const DEFAULT_LOG_MIN_ALIGNMENT: usize = 2; +/// Default max alignment 8 bytes +const DEFAULT_LOG_MAX_ALIGNMENT: usize = 3; + +/// Thread context for the spawned GC thread. It is used by spawn_gc_thread. +pub enum GCThreadContext { + /// The GC thread to spawn is a controller thread. There is only one controller thread. + Controller(Box>), + /// The GC thread to spawn is a worker thread. There can be multiple worker threads. + Worker(Box>), +} + +/// The `VMBinding` trait associates with each trait, and provides VM-specific constants. +pub trait VMBinding +where + Self: Sized + 'static + Send + Sync + Default, +{ + /// The type of edges in this VM. + type VMEdge: edge_shape::Edge; + /// The type of heap memory slice in this VM. + type VMMemorySlice: edge_shape::MemorySlice; + + /// A value to fill in alignment gaps. This value can be used for debugging. + const ALIGNMENT_VALUE: usize = 0xdead_beef; + /// Allowed minimal alignment in bytes. + const MIN_ALIGNMENT: usize = 1 << DEFAULT_LOG_MIN_ALIGNMENT; + /// Allowed maximum alignment in bytes. + const MAX_ALIGNMENT: usize = 1 << DEFAULT_LOG_MAX_ALIGNMENT; + /// Does the binding use a non-zero allocation offset? If this is false, we expect the binding + /// to always use offset === 0 for allocation, and we are able to do some optimization if we know + /// offset === 0. + const USE_ALLOCATION_OFFSET: bool = true; + + /// This value is used to assert if the cursor is reasonable after allocations. + /// At the end of an allocation, the allocation cursor should be aligned to this value. + /// Note that MMTk does not attempt to do anything to align the cursor to this value, but + /// it merely asserts with this constant. + const ALLOC_END_ALIGNMENT: usize = 1; + + // --- Active Plan --- + + /// Return whether there is a mutator created and associated with the thread. + /// + /// Arguments: + /// * `tls`: The thread to query. + /// + /// # Safety + /// The caller needs to make sure that the thread is valid (a value passed in by the VM binding through API). + fn is_mutator(tls: VMThread) -> bool; + + /// Return a `Mutator` reference for the thread. + /// + /// Arguments: + /// * `tls`: The thread to query. + /// + /// # Safety + /// The caller needs to make sure that the thread is a mutator thread. + fn mutator(tls: VMMutatorThread) -> &'static mut Mutator; + + /// Return an iterator that includes all the mutators at the point of invocation. + fn mutators<'a>() -> Box> + 'a>; + + /// Return the total count of mutators. + fn number_of_mutators() -> usize; + + /// The fallback for object tracing. MMTk generally expects to find an object in one of MMTk's spaces (if it is allocated by MMTK), + /// and apply the corresponding policy to trace the object. Tracing in MMTk means identifying whether we have encountered this object in the + /// current GC. For example, for mark sweep, we will check if an object is marked, and if it is not yet marked, mark and enqueue the object + /// for later scanning. For copying policies, copying also happens in this step. For example for MMTk's copying space, we will + /// copy an object if it is in 'from space', and enqueue the copied object for later scanning. + /// + /// If a binding would like to trace objects that are not allocated by MMTk and are not in any MMTk space, they can override this method. + /// They should check whether the object is encountered before in this current GC. If not, they should record the object as encountered themselves, + /// and enqueue the object reference to the object queue provided by the argument. If a binding moves objects, they should do the copying in the method, + /// and enqueue the new object reference instead. + /// + /// The method should return the new object reference if the method moves the object, otherwise return the original object reference. + /// + /// Arguments: + /// * `queue`: The object queue. If an object is encountered for the first time in this GC, we expect the implementation to call `queue.enqueue()` + /// for the object. If the object is moved during the tracing, the new object reference (after copying) should be enqueued instead. + /// * `object`: The object to trace. + /// * `worker`: The GC worker that is doing this tracing. This is used to copy object (see [`crate::vm::VMBinding::copy_object`]) + fn vm_trace_object( + _queue: &mut Q, + object: ObjectReference, + _worker: &mut GCWorker, + ) -> ObjectReference { + panic!("MMTk cannot trace object {:?} as it does not belong to any MMTk space. If the object is known to the VM, the binding can override this method and handle its tracing.", object) + } + + // --- Collection --- + + /// Stop all the mutator threads. MMTk calls this method when it requires all the mutator to yield for a GC. + /// This method should not return until all the threads are yielded. + /// The actual thread synchronization mechanism is up to the VM, and MMTk does not make assumptions on that. + /// MMTk provides a callback function and expects the binding to use the callback for each mutator when it + /// is ready for stack scanning. Usually a stack can be scanned as soon as the thread stops in the yieldpoint. + /// + /// Arguments: + /// * `tls`: The thread pointer for the GC worker. + /// * `mutator_visitor`: A callback. Call it with a mutator as argument to notify MMTk that the mutator is ready to be scanned. + fn stop_all_mutators(tls: VMWorkerThread, mutator_visitor: F) + where + F: FnMut(&'static mut Mutator); + + /// Resume all the mutator threads, the opposite of the above. When a GC is finished, MMTk calls this method. + /// + /// This method may not be called by the same GC thread that called `stop_all_mutators`. + /// + /// Arguments: + /// * `tls`: The thread pointer for the GC worker. Currently it is the tls of the embedded `GCWorker` instance + /// of the coordinator thread, but it is subject to change, and should not be depended on. + fn resume_mutators(tls: VMWorkerThread); + + /// Block the current thread for GC. This is called when an allocation request cannot be fulfilled and a GC + /// is needed. MMTk calls this method to inform the VM that the current thread needs to be blocked as a GC + /// is going to happen. Then MMTk starts a GC. For a stop-the-world GC, MMTk will then call `stop_all_mutators()` + /// before the GC, and call `resume_mutators()` after the GC. + /// + /// Arguments: + /// * `tls`: The current thread pointer that should be blocked. The VM can optionally check if the current thread matches `tls`. + fn block_for_gc(tls: VMMutatorThread); + + /// Ask the VM to spawn a GC thread for MMTk. A GC thread may later call into the VM through these VM traits. Some VMs + /// have assumptions that those calls needs to be within VM internal threads. + /// As a result, MMTk does not spawn GC threads itself to avoid breaking this kind of assumptions. + /// MMTk calls this method to spawn GC threads during [`initialize_collection()`](../memory_manager/fn.initialize_collection.html). + /// + /// Arguments: + /// * `tls`: The thread pointer for the parent thread that we spawn new threads from. This is the same `tls` when the VM + /// calls `initialize_collection()` and passes as an argument. + /// * `ctx`: The context for the GC thread. + /// * If `Controller` is passed, it means spawning a thread to run as the GC controller. + /// The spawned thread shall call `memory_manager::start_control_collector`. + /// * If `Worker` is passed, it means spawning a thread to run as a GC worker. + /// The spawned thread shall call `memory_manager::start_worker`. + /// In either case, the `Box` inside should be passed back to the called function. + fn spawn_gc_thread(tls: VMThread, ctx: GCThreadContext); + + /// Inform the VM of an out-of-memory error. The binding should hook into the VM's error + /// routine for OOM. Note that there are two different categories of OOM: + /// * Critical OOM: This is the case where the OS is unable to mmap or acquire more memory. + /// MMTk expects the VM to abort immediately if such an error is thrown. + /// * Heap OOM: This is the case where the specified heap size is insufficient to execute the + /// application. MMTk expects the binding to notify the VM about this OOM. MMTk makes no + /// assumptions about whether the VM will continue executing or abort immediately. + /// + /// See [`AllocationError`] for more information. + /// + /// Arguments: + /// * `tls`: The thread pointer for the mutator which failed the allocation and triggered the OOM. + /// * `err_kind`: The type of OOM error that was encountered. + fn out_of_memory(_tls: VMThread, err_kind: AllocationError) { + panic!("Out of memory with {:?}!", err_kind); + } + + /// Inform the VM to schedule finalization threads. + /// + /// Arguments: + /// * `tls`: The thread pointer for the current GC thread. + fn schedule_finalization(_tls: VMWorkerThread) {} + + /// A hook for the VM to do work after forwarding objects. + /// + /// This function is called after all of the following have finished: + /// - The life and death of objects are determined. Objects determined to be live will not + /// be reclaimed in this GC. + /// - Live objects have been moved to their destinations. (copying GC only) + /// - References in objects have been updated to point to new addresses. (copying GC only) + /// + /// And this function may run concurrently with the release work of GC, i.e. freeing the space + /// occupied by dead objects. + /// + /// It is safe for the VM to read and write object fields at this time, although GC has not + /// finished yet. GC will be reclaiming spaces of dead objects, but will not damage live + /// objects. However, the VM cannot allocate new objects at this time. + /// + /// One possible use of this hook is enqueuing `{Soft,Weak,Phantom}Reference` instances to + /// reference queues (for Java). VMs (including JVM implementations) do not have to handle + /// weak references this way, but mmtk-core provides this opportunity. + /// + /// Arguments: + /// * `tls_worker`: The thread pointer for the worker thread performing this call. + fn post_forwarding(_tls: VMWorkerThread) {} + + /// Return the amount of memory (in bytes) which the VM allocated outside the MMTk heap but + /// wants to include into the current MMTk heap size. MMTk core will consider the reported + /// memory as part of MMTk heap for the purpose of heap size accounting. + /// + /// This amount should include memory that is kept alive by heap objects and can be released by + /// executing finalizers (or other language-specific cleaning-up routines) that are executed + /// when the heap objects are dead. For example, if a language implementation allocates array + /// headers in the MMTk heap, but allocates their underlying buffers that hold the actual + /// elements using `malloc`, then those buffers should be included in this amount. When the GC + /// finds such an array dead, its finalizer shall `free` the buffer and reduce this amount. + /// + /// If possible, the VM should account off-heap memory in pages. That is, count the number of + /// pages occupied by off-heap objects, and report the number of bytes of those whole pages + /// instead of individual objects. Because the underlying operating system manages memory at + /// page granularity, the occupied pages (instead of individual objects) determine the memory + /// footprint of a process, and how much memory MMTk spaces can obtain from the OS. + /// + /// However, if the VM is incapable of accounting off-heap memory in pages (for example, if the + /// VM uses `malloc` and the implementation of `malloc` is opaque to the VM), the VM binding + /// can simply return the total number of bytes of those off-heap objects as an approximation. + /// + /// # Performance note + /// + /// This function will be called when MMTk polls for GC. It happens every time the MMTk + /// allocators have allocated a certain amount of memory, usually one or a few blocks. Because + /// this function is called very frequently, its implementation must be efficient. If it is + /// too expensive to compute the exact amount, an approximate value should be sufficient for + /// MMTk to trigger GC promptly in order to release off-heap memory, and keep the memory + /// footprint under control. + fn vm_live_bytes() -> usize { + // By default, MMTk assumes the amount of memory the VM allocates off-heap is negligible. + 0 + } + + // --- Object Model --- + + // Per-object Metadata Spec definitions go here + // + // Note a number of Global and PolicySpecific side metadata specifications are already reserved by mmtk-core. + // Any side metadata offset calculation must consider these to prevent overlaps. A binding should start their + // side metadata from GLOBAL_SIDE_METADATA_VM_BASE_ADDRESS or LOCAL_SIDE_METADATA_VM_BASE_ADDRESS. + + /// A global 1-bit metadata used by generational plans to track cross-generational pointers. It is generally + /// located in side metadata. + /// + /// Note that for this bit, 0 represents logged (default), and 1 represents unlogged. + /// This bit is also referred to as unlogged bit in Java MMTk for this reason. + const GLOBAL_LOG_BIT_SPEC: VMGlobalLogBitSpec; + + /// A local word-size metadata for the forwarding pointer, used by copying plans. It is almost always + /// located in the object header as it is fine to destroy an object header in order to copy it. + const LOCAL_FORWARDING_POINTER_SPEC: VMLocalForwardingPointerSpec; + + /// A local 2-bit metadata for the forwarding status bits, used by copying plans. If your runtime requires + /// word-aligned addresses (i.e. 4- or 8-bytes), you can use the last two bits in the object header to store + /// the forwarding bits. Note that you must be careful if you place this in the header as the runtime may + /// be using those bits for some other reason. + const LOCAL_FORWARDING_BITS_SPEC: VMLocalForwardingBitsSpec; + + /// A local 1-bit metadata for the mark bit, used by most plans that need to mark live objects. Like with the + /// [forwarding bits](crate::vm::VMBinding::LOCAL_FORWARDING_BITS_SPEC), you can often steal the last bit in + /// the object header (due to alignment requirements) for the mark bit. Though some bindings such as the + /// OpenJDK binding prefer to have the mark bits in side metadata to allow for bulk operations. + const LOCAL_MARK_BIT_SPEC: VMLocalMarkBitSpec; + + #[cfg(feature = "object_pinning")] + /// A local 1-bit metadata specification for the pinning bit, used by plans that need to pin objects. It is + /// generally in side metadata. + const LOCAL_PINNING_BIT_SPEC: VMLocalPinningBitSpec; + + /// A local 2-bit metadata used by the large object space to mark objects and set objects as "newly allocated". + /// Used by any plan with large object allocation. It is generally in the header as we can add an extra word + /// before the large object to store this metadata. This is fine as the metadata size is insignificant in + /// comparison to the object size. + // + // TODO: Cleanup and place the LOS mark and nursery bits in the header. See here: https://github.com/mmtk/mmtk-core/issues/847 + const LOCAL_LOS_MARK_NURSERY_SPEC: VMLocalLOSMarkNurserySpec; + + /// Set this to true if the VM binding requires the valid object (VO) bits to be available + /// during tracing. If this constant is set to `false`, it is undefined behavior if the binding + /// attempts to access VO bits during tracing. + /// + /// Note that the VO bits is always available during root scanning even if this flag is false, + /// which is suitable for using VO bits (and the `is_mmtk_object()` method) for conservative + /// stack scanning. However, if a binding is also conservative in finding references during + /// object scanning, they need to set this constant to `true`. See the comments of individual + /// methods in the `Scanning` trait. + /// + /// Depending on the internal implementation of mmtk-core, different strategies for handling + /// VO bits have different time/space overhead. mmtk-core will choose the best strategy + /// according to the configuration of the VM binding, including this flag. Currently, setting + /// this flag to true does not impose any additional overhead. + #[cfg(feature = "vo_bit")] + const NEED_VO_BITS_DURING_TRACING: bool = false; + + /// A function to non-atomically load the specified per-object metadata's content. + /// The default implementation assumes the bits defined by the spec are always avilable for MMTk to use. If that is not the case, a binding should override this method, and provide their implementation. + /// Returns the metadata value. + /// + /// # Arguments: + /// + /// * `metadata_spec`: is the header metadata spec that tries to perform the operation. + /// * `object`: is a reference to the target object. + /// * `mask`: is an optional mask value for the metadata. This value is used in cases like the forwarding pointer metadata, where some of the bits are reused by other metadata such as the forwarding bits. + /// + /// # Safety + /// This is a non-atomic load, thus not thread-safe. + unsafe fn load_metadata( + metadata_spec: &HeaderMetadataSpec, + object: ObjectReference, + mask: Option, + ) -> T { + metadata_spec.load::(object.to_header::(), mask) + } + + /// A function to atomically load the specified per-object metadata's content. + /// The default implementation assumes the bits defined by the spec are always avilable for MMTk to use. If that is not the case, a binding should override this method, and provide their implementation. + /// Returns the metadata value. + /// + /// # Arguments: + /// + /// * `metadata_spec`: is the header metadata spec that tries to perform the operation. + /// * `object`: is a reference to the target object. + /// * `mask`: is an optional mask value for the metadata. This value is used in cases like the forwarding pointer metadata, where some of the bits are reused by other metadata such as the forwarding bits. + /// * `atomic_ordering`: is the atomic ordering for the load operation. + fn load_metadata_atomic( + metadata_spec: &HeaderMetadataSpec, + object: ObjectReference, + mask: Option, + ordering: Ordering, + ) -> T { + metadata_spec.load_atomic::(object.to_header::(), mask, ordering) + } + + /// A function to non-atomically store a value to the specified per-object metadata. + /// The default implementation assumes the bits defined by the spec are always avilable for MMTk to use. If that is not the case, a binding should override this method, and provide their implementation. + /// + /// # Arguments: + /// + /// * `metadata_spec`: is the header metadata spec that tries to perform the operation. + /// * `object`: is a reference to the target object. + /// * `val`: is the new metadata value to be stored. + /// * `mask`: is an optional mask value for the metadata. This value is used in cases like the forwarding pointer metadata, where some of the bits are reused by other metadata such as the forwarding bits. + /// + /// # Safety + /// This is a non-atomic store, thus not thread-safe. + unsafe fn store_metadata( + metadata_spec: &HeaderMetadataSpec, + object: ObjectReference, + val: T, + mask: Option, + ) { + metadata_spec.store::(object.to_header::(), val, mask) + } + + /// A function to atomically store a value to the specified per-object metadata. + /// The default implementation assumes the bits defined by the spec are always avilable for MMTk to use. If that is not the case, a binding should override this method, and provide their implementation. + /// + /// # Arguments: + /// + /// * `metadata_spec`: is the header metadata spec that tries to perform the operation. + /// * `object`: is a reference to the target object. + /// * `val`: is the new metadata value to be stored. + /// * `mask`: is an optional mask value for the metadata. This value is used in cases like the forwarding pointer metadata, where some of the bits are reused by other metadata such as the forwarding bits. + /// * `atomic_ordering`: is the optional atomic ordering for the store operation. + fn store_metadata_atomic( + metadata_spec: &HeaderMetadataSpec, + object: ObjectReference, + val: T, + mask: Option, + ordering: Ordering, + ) { + metadata_spec.store_atomic::(object.to_header::(), val, mask, ordering) + } + + /// A function to atomically compare-and-exchange the specified per-object metadata's content. + /// The default implementation assumes the bits defined by the spec are always avilable for MMTk to use. If that is not the case, a binding should override this method, and provide their implementation. + /// Returns `true` if the operation is successful, and `false` otherwise. + /// + /// # Arguments: + /// + /// * `metadata_spec`: is the header metadata spec that tries to perform the operation. + /// * `object`: is a reference to the target object. + /// * `old_val`: is the expected current value of the metadata. + /// * `new_val`: is the new metadata value to be stored if the compare-and-exchange operation is successful. + /// * `mask`: is an optional mask value for the metadata. This value is used in cases like the forwarding pointer metadata, where some of the bits are reused by other metadata such as the forwarding bits. + /// * `success_order`: is the atomic ordering used if the operation is successful. + /// * `failure_order`: is the atomic ordering used if the operation fails. + fn compare_exchange_metadata( + metadata_spec: &HeaderMetadataSpec, + object: ObjectReference, + old_val: T, + new_val: T, + mask: Option, + success_order: Ordering, + failure_order: Ordering, + ) -> std::result::Result { + metadata_spec.compare_exchange::( + object.to_header::(), + old_val, + new_val, + mask, + success_order, + failure_order, + ) + } + + /// A function to atomically perform an add operation on the specified per-object metadata's content. + /// The default implementation assumes the bits defined by the spec are always avilable for MMTk to use. If that is not the case, a binding should override this method, and provide their implementation. + /// This is a wrapping add. + /// # Returns the old metadata value. + /// + /// # Arguments: + /// + /// * `metadata_spec`: is the header metadata spec that tries to perform the operation. + /// * `object`: is a reference to the target object. + /// * `val`: is the value to be added to the current value of the metadata. + /// * `order`: is the atomic ordering of the fetch-and-add operation. + fn fetch_add_metadata( + metadata_spec: &HeaderMetadataSpec, + object: ObjectReference, + val: T, + order: Ordering, + ) -> T { + metadata_spec.fetch_add::(object.to_header::(), val, order) + } + + /// A function to atomically perform a subtract operation on the specified per-object metadata's content. + /// The default implementation assumes the bits defined by the spec are always avilable for MMTk to use. If that is not the case, a binding should override this method, and provide their implementation. + /// This is a wrapping sub. + /// Returns the old metadata value. + /// + /// # Arguments: + /// + /// * `metadata_spec`: is the header metadata spec that tries to perform the operation. + /// * `object`: is a reference to the target object. + /// * `val`: is the value to be subtracted from the current value of the metadata. + /// * `order`: is the atomic ordering of the fetch-and-add operation. + fn fetch_sub_metadata( + metadata_spec: &HeaderMetadataSpec, + object: ObjectReference, + val: T, + order: Ordering, + ) -> T { + metadata_spec.fetch_sub::(object.to_header::(), val, order) + } + + /// A function to atomically perform a bit-and operation on the specified per-object metadata's content. + /// The default implementation assumes the bits defined by the spec are always avilable for MMTk to use. If that is not the case, a binding should override this method, and provide their implementation. + /// Returns the old metadata value. + /// + /// # Arguments: + /// + /// * `metadata_spec`: is the header metadata spec that tries to perform the operation. + /// * `object`: is a reference to the target object. + /// * `val`: is the value to bit-and with the current value of the metadata. + /// * `order`: is the atomic ordering of the fetch-and-add operation. + fn fetch_and_metadata( + metadata_spec: &HeaderMetadataSpec, + object: ObjectReference, + val: T, + order: Ordering, + ) -> T { + metadata_spec.fetch_and::(object.to_header::(), val, order) + } + + /// A function to atomically perform a bit-or operation on the specified per-object metadata's content. + /// The default implementation assumes the bits defined by the spec are always avilable for MMTk to use. If that is not the case, a binding should override this method, and provide their implementation. + /// Returns the old metadata value. + /// + /// # Arguments: + /// + /// * `metadata_spec`: is the header metadata spec that tries to perform the operation. + /// * `object`: is a reference to the target object. + /// * `val`: is the value to bit-or with the current value of the metadata. + /// * `order`: is the atomic ordering of the fetch-and-add operation. + fn fetch_or_metadata( + metadata_spec: &HeaderMetadataSpec, + object: ObjectReference, + val: T, + order: Ordering, + ) -> T { + metadata_spec.fetch_or::(object.to_header::(), val, order) + } + + /// A function to atomically perform an update operation on the specified per-object metadata's content. + /// The default implementation assumes the bits defined by the spec are always avilable for MMTk to use. If that is not the case, a binding should override this method, and provide their implementation. + /// The semantics of this method are the same as the `fetch_update()` on Rust atomic types. + /// + /// # Arguments: + /// + /// * `metadata_spec`: is the header metadata spec that tries to perform the operation. + /// * `object`: is a reference to the target object. + /// * `val`: is the value to bit-and with the current value of the metadata. + /// * `order`: is the atomic ordering of the fetch-and-add operation. + /// + /// # Returns the old metadata value. + fn fetch_update_metadata Option + Copy>( + metadata_spec: &HeaderMetadataSpec, + object: ObjectReference, + set_order: Ordering, + fetch_order: Ordering, + f: F, + ) -> std::result::Result { + metadata_spec.fetch_update::(object.to_header::(), set_order, fetch_order, f) + } + + /// Copy an object and return the address of the new object. Usually in the implementation of this method, + /// `alloc_copy()` and `post_copy()` from [`GCWorkerCopyContext`](util/copy/struct.GCWorkerCopyContext.html) + /// are used for copying. + /// + /// Arguments: + /// * `from`: The address of the object to be copied. + /// * `semantics`: The copy semantic to use. + /// * `copy_context`: The `GCWorkerCopyContext` for the GC thread. + fn copy_object( + from: ObjectReference, + semantics: CopySemantics, + copy_context: &mut GCWorkerCopyContext, + ) -> ObjectReference; + + /// Copy an object. This is required + /// for delayed-copy collectors such as compacting collectors. During the + /// collection, MMTk reserves a region in the heap for an object as per + /// requirements found from `ObjectModel` and then asks `ObjectModel` to + /// determine what the object's reference will be post-copy. Return the address + /// past the end of the copied object. + /// + /// Arguments: + /// * `from`: The address of the object to be copied. + /// * `to`: The target location. + /// * `region: The start of the region that was reserved for this object. + fn copy_object_to(from: ObjectReference, to: ObjectReference, region: Address) -> Address; + + /// Return the reference that an object will be referred to after it is copied + /// to the specified region. Used in delayed-copy collectors such as compacting + /// collectors. + /// + /// Arguments: + /// * `from`: The object to be copied. + /// * `to`: The region to be copied to. + fn get_object_reference_when_copied_to(from: ObjectReference, to: Address) -> ObjectReference; + + /// Return the size used by an object. + /// + /// Arguments: + /// * `object`: The object to be queried. + fn get_object_size(object: ObjectReference) -> usize; + + /// Return the size when an object is copied. + /// + /// Arguments: + /// * `object`: The object to be queried. + fn get_object_size_when_copied(object: ObjectReference) -> usize; + + /// Return the alignment when an object is copied. + /// + /// Arguments: + /// * `object`: The object to be queried. + fn get_object_align_when_copied(object: ObjectReference) -> usize; + + /// Return the alignment offset when an object is copied. + /// + /// Arguments: + /// * `object`: The object to be queried. + fn get_object_align_offset_when_copied(object: ObjectReference) -> usize; + + /// This is the worst case expansion that can occur due to object size increasing while + /// copying. This constant is used to calculate whether a nursery has grown larger than the + /// mature space for generational plans. + const VM_WORST_CASE_COPY_EXPANSION: f64 = 1.5; + + /// If this is true, the binding guarantees that an object reference's raw address is always equal to the return value of the `ref_to_address` method + /// and the return value of the `ref_to_object_start` method. This is a very strong guarantee, but it is also helpful for MMTk to + /// make some assumptions and optimize for this case. + /// If a binding sets this to true, and the related methods return inconsistent results, this is an undefined behavior. MMTk may panic + /// if any assertion catches this error, but may also fail silently. + const UNIFIED_OBJECT_REFERENCE_ADDRESS: bool = false; + + /// For our allocation result (object_start), the binding may have an offset between the allocation result + /// and the raw address of their object reference, i.e. object ref's raw address = object_start + offset. + /// The offset could be zero. The offset is not necessary to be + /// constant for all the objects. This constant defines the smallest possible offset. + /// + /// This is used as an indication for MMTk to predict where object references may point to in some algorithms. + /// + /// We should have the invariant: + /// * object ref >= object_start + OBJECT_REF_OFFSET_LOWER_BOUND + const OBJECT_REF_OFFSET_LOWER_BOUND: isize; + + /// Return the lowest address of the storage associated with an object. This should be + /// the address that a binding gets by an allocation call ([`crate::memory_manager::alloc`]). + /// + /// Arguments: + /// * `object`: The object to be queried. It should not be null. + fn ref_to_object_start(object: ObjectReference) -> Address; + + /// Return the header base address from an object reference. Any object header metadata + /// in the [`crate::vm::VMBinding`] declares a piece of header metadata with an offset + /// from this address. If a binding does not use any header metadata for MMTk, this method + /// will not be called, and the binding can simply use `unreachable!()` for the method. + /// + /// Arguments: + /// * `object`: The object to be queried. It should not be null. + fn ref_to_header(object: ObjectReference) -> Address; + + /// Return an address guaranteed to be inside the storage associated + /// with an object. The returned address needs to be deterministic + /// for an given object. For a given object, the returned address + /// should be a constant offset from the object reference address. + /// + /// Note that MMTk may forge an arbitrary address + /// directly into a potential object reference, and call this method on the 'object reference'. + /// In that case, the argument `object` may not be a valid object reference, + /// and the implementation of this method should not use any object metadata. + /// + /// MMTk uses this method more frequently than [`crate::vm::VMBinding::ref_to_object_start`]. + /// + /// Arguments: + /// * `object`: The object to be queried. It should not be null. + fn ref_to_address(object: ObjectReference) -> Address; + + /// Return an object for a given address returned by `ref_to_address()`. + /// This does exactly the opposite of `ref_to_address()`. The argument `addr` has + /// to be an address that is previously returned from `ref_to_address()`. Invoking this method + /// with an unexpected address is undefined behavior. + /// + /// Arguments: + /// * `addr`: An address that is returned from `ref_to_address()` + fn address_to_ref(addr: Address) -> ObjectReference; + + /// Dump debugging information for an object. + /// + /// Arguments: + /// * `object`: The object to be dumped. + fn dump_object(object: ObjectReference); + + /// Return if an object is valid from the runtime point of view. This is used + /// to debug MMTk. + fn is_object_sane(_object: ObjectReference) -> bool { + true + } + + // --- Reference Glue --- + + /// The type of finalizable objects. This type is used when the binding registers and pops finalizable objects. + /// For most languages, they can just use `ObjectReference` for the finalizable type, meaning that they are registering + /// and popping a normal object reference as finalizable objects. + type FinalizableType: Finalizable; + + // TODO: Should we also move the following methods about weak references to a trait (similar to the `Finalizable` trait)? + + /// Weak and soft references always clear the referent + /// before enqueueing. + /// + /// Arguments: + /// * `new_reference`: The reference whose referent is to be cleared. + fn weakref_clear_referent(new_reference: ObjectReference) { + Self::weakref_set_referent(new_reference, ObjectReference::NULL); + } + + /// Get the referent from a weak reference object. + /// + /// Arguments: + /// * `object`: The object reference. + fn weakref_get_referent(object: ObjectReference) -> ObjectReference; + + /// Set the referent in a weak reference object. + /// + /// Arguments: + /// * `reff`: The object reference for the reference. + /// * `referent`: The referent object reference. + fn weakref_set_referent(reff: ObjectReference, referent: ObjectReference); + + /// Check if the referent has been cleared. + /// + /// Arguments: + /// * `referent`: The referent object reference. + fn weakref_is_referent_cleared(referent: ObjectReference) -> bool { + referent.is_null() + } + + /// For weak reference types, if the referent is cleared during GC, the reference + /// will be added to a queue, and MMTk will call this method to inform + /// the VM about the changes for those references. This method is used + /// to implement Java's ReferenceQueue. + /// Note that this method is called for each type of weak references during GC, and + /// the references slice will be cleared after this call is returned. That means + /// MMTk will no longer keep these references alive once this method is returned. + fn weakref_enqueue_references(references: &[ObjectReference], tls: VMWorkerThread); + + // --- Scanning --- + + /// Return true if the given object supports edge enqueuing. + /// + /// - If this returns true, MMTk core will call `scan_object` on the object. + /// - Otherwise, MMTk core will call `scan_object_and_trace_edges` on the object. + /// + /// For maximum performance, the VM should support edge-enqueuing for as many objects as + /// practical. Also note that this method is called for every object to be scanned, so it + /// must be fast. The VM binding should avoid expensive checks and keep it as efficient as + /// possible. + /// + /// Arguments: + /// * `tls`: The VM-specific thread-local storage for the current worker. + /// * `object`: The object to be scanned. + fn support_edge_enqueuing(_tls: VMWorkerThread, _object: ObjectReference) -> bool { + true + } + + /// Delegated scanning of a object, visiting each reference field encountered. + /// + /// The VM shall call `edge_visitor.visit_edge` on each reference field. + /// + /// The VM may skip a reference field if it holds a null reference. If the VM supports tagged + /// references, it must skip tagged reference fields which are not holding references. + /// + /// The `memory_manager::is_mmtk_object` function can be used in this function if + /// - the "is_mmtk_object" feature is enabled, and + /// - `VM::NEED_VO_BITS_DURING_TRACING` is true. + /// + /// Arguments: + /// * `tls`: The VM-specific thread-local storage for the current worker. + /// * `object`: The object to be scanned. + /// * `edge_visitor`: Called back for each edge. + fn scan_object>( + tls: VMWorkerThread, + object: ObjectReference, + edge_visitor: &mut EV, + ); + + /// Delegated scanning of a object, visiting each reference field encountered, and trace the + /// objects pointed by each field. + /// + /// The VM shall call `object_tracer.trace_object` on the value held in each reference field, + /// and assign the returned value back to the field. If the VM uses tagged references, the + /// value passed to `object_tracer.trace_object` shall be the `ObjectReference` to the object + /// without any tag bits. + /// + /// The VM may skip a reference field if it holds a null reference. If the VM supports tagged + /// references, it must skip tagged reference fields which are not holding references. + /// + /// The `memory_manager::is_mmtk_object` function can be used in this function if + /// - the "is_mmtk_object" feature is enabled, and + /// - `VM::NEED_VO_BITS_DURING_TRACING` is true. + /// + /// Arguments: + /// * `tls`: The VM-specific thread-local storage for the current worker. + /// * `object`: The object to be scanned. + /// * `object_tracer`: Called back for the content of each edge. + fn scan_object_and_trace_edges( + _tls: VMWorkerThread, + _object: ObjectReference, + _object_tracer: &mut OT, + ) { + unreachable!("scan_object_and_trace_edges() will not be called when support_edge_enqueue() is always true.") + } + + /// MMTk calls this method at the first time during a collection that thread's stacks + /// have been scanned. This can be used (for example) to clean up + /// obsolete compiled methods that are no longer being executed. + /// + /// Arguments: + /// * `partial_scan`: Whether the scan was partial or full-heap. + /// * `tls`: The GC thread that is performing the thread scan. + fn notify_initial_thread_scan_complete(partial_scan: bool, tls: VMWorkerThread); + + /// Scan one mutator for stack roots. + /// + /// Some VM bindings may not be able to implement this method. + /// For example, the VM binding may only be able to enumerate all threads and + /// scan them while enumerating, but cannot scan stacks individually when given + /// the references of threads. + /// In that case, it can leave this method empty, and deal with stack + /// roots in [`VMBinding::scan_vm_specific_roots`]. However, in that case, MMTk + /// does not know those roots are stack roots, and cannot perform any possible + /// optimization for the stack roots. + /// + /// The `memory_manager::is_mmtk_object` function can be used in this function if + /// - the "is_mmtk_object" feature is enabled. + /// + /// Arguments: + /// * `tls`: The GC thread that is performing this scanning. + /// * `mutator`: The reference to the mutator whose roots will be scanned. + /// * `factory`: The VM uses it to create work packets for scanning roots. + fn scan_roots_in_mutator_thread( + tls: VMWorkerThread, + mutator: &'static mut Mutator, + factory: impl RootsWorkFactory, + ); + + /// Scan VM-specific roots. The creation of all root scan tasks (except thread scanning) + /// goes here. + /// + /// The `memory_manager::is_mmtk_object` function can be used in this function if + /// - the "is_mmtk_object" feature is enabled. + /// + /// Arguments: + /// * `tls`: The GC thread that is performing this scanning. + /// * `factory`: The VM uses it to create work packets for scanning roots. + fn scan_vm_specific_roots(tls: VMWorkerThread, factory: impl RootsWorkFactory); + + /// Return whether the VM supports return barriers. This is unused at the moment. + fn supports_return_barrier() -> bool; + + /// Prepare for another round of root scanning in the same GC. Some GC algorithms + /// need multiple transitive closures, and each transitive closure starts from + /// root scanning. We expect the binding to provide the same root set for every + /// round of root scanning in the same GC. Bindings can use this call to get + /// ready for another round of root scanning to make sure that the same root + /// set will be returned in the upcoming calls of root scanning methods, + /// such as [`crate::vm::VMBinding::scan_roots_in_mutator_thread`] and + /// [`crate::vm::VMBinding::scan_vm_specific_roots`]. + fn prepare_for_roots_re_scanning(); + + /// Process weak references. + /// + /// This function is called after a transitive closure is completed. + /// + /// MMTk core enables the VM binding to do the following in this function: + /// + /// 1. Query if an object is already reached in this transitive closure. + /// 2. Get the new address of an object if it is already reached. + /// 3. Keep an object and its descendents alive if not yet reached. + /// 4. Request this function to be called again after transitive closure is finished again. + /// + /// The VM binding can query if an object is currently reached by calling + /// `ObjectReference::is_reachable()`. + /// + /// If an object is already reached, the VM binding can get its new address by calling + /// `ObjectReference::get_forwarded_object()` as the object may have been moved. + /// + /// If an object is not yet reached, the VM binding can keep that object and its descendents + /// alive. To do this, the VM binding should use `tracer_context.with_tracer` to get access to + /// an `ObjectTracer`, and then call its `trace_object(object)` method. The `trace_object` + /// method will return the new address of the `object` if it moved the object, or its original + /// address if not moved. Implementation-wise, the `ObjectTracer` may contain an internal + /// queue for newly traced objects, and will flush the queue when `tracer_context.with_tracer` + /// returns. Therefore, it is recommended to reuse the `ObjectTracer` instance to trace + /// multiple objects. + /// + /// *Note that if `trace_object` is called on an already reached object, the behavior will be + /// equivalent to `ObjectReference::get_forwarded_object()`. It will return the new address if + /// the GC already moved the object when tracing that object, or the original address if the GC + /// did not move the object when tracing it. In theory, the VM binding can use `trace_object` + /// wherever `ObjectReference::get_forwarded_object()` is needed. However, if a VM never + /// resurrects objects, it should completely avoid touching `tracer_context`, and exclusively + /// use `ObjectReference::get_forwarded_object()` to get new addresses of objects. By doing + /// so, the VM binding can avoid accidentally resurrecting objects.* + /// + /// The VM binding can return `true` from `process_weak_refs` to request `process_weak_refs` + /// to be called again after the MMTk core finishes transitive closure again from the objects + /// newly visited by `ObjectTracer::trace_object`. This is useful if a VM supports multiple + /// levels of reachabilities (such as Java) or ephemerons. + /// + /// Implementation-wise, this function is called as the "sentinel" of the `VMRefClosure` work + /// bucket, which means it is called when all work packets in that bucket have finished. The + /// `tracer_context` expands the transitive closure by adding more work packets in the same + /// bucket. This means if `process_weak_refs` returns true, those work packets will have + /// finished (completing the transitive closure) by the time `process_weak_refs` is called + /// again. The VM binding can make use of this by adding custom work packets into the + /// `VMRefClosure` bucket. The bucket will be `VMRefForwarding`, instead, when forwarding. + /// See below. + /// + /// The `memory_manager::is_mmtk_object` function can be used in this function if + /// - the "is_mmtk_object" feature is enabled, and + /// - `VM::NEED_VO_BITS_DURING_TRACING` is true. + /// + /// Arguments: + /// * `worker`: The current GC worker. + /// * `tracer_context`: Use this to get access an `ObjectTracer` and use it to retain and + /// update weak references. + /// + /// This function shall return true if this function needs to be called again after the GC + /// finishes expanding the transitive closure from the objects kept alive. + fn process_weak_refs( + _worker: &mut GCWorker, + _tracer_context: impl ObjectTracerContext, + ) -> bool { + false + } + + /// Forward weak references. + /// + /// This function will only be called in the forwarding stage when using the mark-compact GC + /// algorithm. Mark-compact computes transive closure twice during each GC. It marks objects + /// in the first transitive closure, and forward references in the second transitive closure. + /// + /// Arguments: + /// * `worker`: The current GC worker. + /// * `tracer_context`: Use this to get access an `ObjectTracer` and use it to update weak + /// references. + fn forward_weak_refs( + _worker: &mut GCWorker, + _tracer_context: impl ObjectTracerContext, + ) { + } +} diff --git a/vmbindings/dummyvm/src/active_plan.rs b/vmbindings/dummyvm/src/active_plan.rs deleted file mode 100644 index 80144f96f5..0000000000 --- a/vmbindings/dummyvm/src/active_plan.rs +++ /dev/null @@ -1,25 +0,0 @@ -use crate::DummyVM; -use mmtk::util::opaque_pointer::*; -use mmtk::vm::ActivePlan; -use mmtk::Mutator; - -pub struct VMActivePlan {} - -impl ActivePlan for VMActivePlan { - fn number_of_mutators() -> usize { - unimplemented!() - } - - fn is_mutator(_tls: VMThread) -> bool { - // FIXME - true - } - - fn mutator(_tls: VMMutatorThread) -> &'static mut Mutator { - unimplemented!() - } - - fn mutators<'a>() -> Box> + 'a> { - unimplemented!() - } -} diff --git a/vmbindings/dummyvm/src/collection.rs b/vmbindings/dummyvm/src/collection.rs deleted file mode 100644 index f82e793fd3..0000000000 --- a/vmbindings/dummyvm/src/collection.rs +++ /dev/null @@ -1,26 +0,0 @@ -use crate::DummyVM; -use mmtk::util::opaque_pointer::*; -use mmtk::vm::Collection; -use mmtk::vm::GCThreadContext; -use mmtk::Mutator; - -pub struct VMCollection {} - -impl Collection for VMCollection { - fn stop_all_mutators(_tls: VMWorkerThread, _mutator_visitor: F) - where - F: FnMut(&'static mut Mutator), - { - unimplemented!() - } - - fn resume_mutators(_tls: VMWorkerThread) { - unimplemented!() - } - - fn block_for_gc(_tls: VMMutatorThread) { - panic!("block_for_gc is not implemented") - } - - fn spawn_gc_thread(_tls: VMThread, _ctx: GCThreadContext) {} -} diff --git a/vmbindings/dummyvm/src/lib.rs b/vmbindings/dummyvm/src/lib.rs index 57e630f222..b498b37bb4 100644 --- a/vmbindings/dummyvm/src/lib.rs +++ b/vmbindings/dummyvm/src/lib.rs @@ -7,33 +7,170 @@ use mmtk::vm::VMBinding; use mmtk::MMTKBuilder; use mmtk::MMTK; -pub mod active_plan; pub mod api; -pub mod collection; -pub mod object_model; -pub mod reference_glue; -pub mod scanning; - pub mod test_fixtures; mod edges; #[cfg(test)] mod tests; +use edges::*; +use mmtk::vm::prelude::*; +use mmtk::vm::GCThreadContext; + +// This is intentionally set to a non-zero value to see if it breaks. +// Change this if you want to test other values. +pub const OBJECT_REF_OFFSET: usize = 4; + #[derive(Default)] pub struct DummyVM; impl VMBinding for DummyVM { - type VMObjectModel = object_model::VMObjectModel; - type VMScanning = scanning::VMScanning; - type VMCollection = collection::VMCollection; - type VMActivePlan = active_plan::VMActivePlan; - type VMReferenceGlue = reference_glue::VMReferenceGlue; type VMEdge = edges::DummyVMEdge; type VMMemorySlice = edges::DummyVMMemorySlice; /// Allowed maximum alignment in bytes. const MAX_ALIGNMENT: usize = 1 << 6; + + fn number_of_mutators() -> usize { + unimplemented!() + } + + fn is_mutator(_tls: VMThread) -> bool { + // FIXME + true + } + + fn mutator(_tls: VMMutatorThread) -> &'static mut Mutator { + unimplemented!() + } + + fn mutators<'a>() -> Box> + 'a> { + unimplemented!() + } + + fn stop_all_mutators(_tls: VMWorkerThread, _mutator_visitor: F) + where + F: FnMut(&'static mut Mutator), + { + unimplemented!() + } + + fn resume_mutators(_tls: VMWorkerThread) { + unimplemented!() + } + + fn block_for_gc(_tls: VMMutatorThread) { + panic!("block_for_gc is not implemented") + } + + fn spawn_gc_thread(_tls: VMThread, _ctx: GCThreadContext) {} + + const GLOBAL_LOG_BIT_SPEC: VMGlobalLogBitSpec = VMGlobalLogBitSpec::in_header(0); + const LOCAL_FORWARDING_POINTER_SPEC: VMLocalForwardingPointerSpec = + VMLocalForwardingPointerSpec::in_header(0); + const LOCAL_FORWARDING_BITS_SPEC: VMLocalForwardingBitsSpec = + VMLocalForwardingBitsSpec::in_header(0); + const LOCAL_MARK_BIT_SPEC: VMLocalMarkBitSpec = VMLocalMarkBitSpec::in_header(0); + const LOCAL_LOS_MARK_NURSERY_SPEC: VMLocalLOSMarkNurserySpec = + VMLocalLOSMarkNurserySpec::in_header(0); + + const OBJECT_REF_OFFSET_LOWER_BOUND: isize = OBJECT_REF_OFFSET as isize; + + fn copy_object( + _from: ObjectReference, + _semantics: CopySemantics, + _copy_context: &mut GCWorkerCopyContext, + ) -> ObjectReference { + unimplemented!() + } + + fn copy_object_to(_from: ObjectReference, _to: ObjectReference, _region: Address) -> Address { + unimplemented!() + } + + fn get_object_size(_object: ObjectReference) -> usize { + unimplemented!() + } + + fn get_object_size_when_copied(object: ObjectReference) -> usize { + Self::get_object_size(object) + } + + fn get_object_align_when_copied(_object: ObjectReference) -> usize { + ::std::mem::size_of::() + } + + fn get_object_align_offset_when_copied(_object: ObjectReference) -> usize { + 0 + } + + fn get_object_reference_when_copied_to( + _from: ObjectReference, + _to: Address, + ) -> ObjectReference { + unimplemented!() + } + + fn ref_to_object_start(object: ObjectReference) -> Address { + object.to_raw_address().sub(OBJECT_REF_OFFSET) + } + + fn ref_to_header(object: ObjectReference) -> Address { + object.to_raw_address() + } + + fn ref_to_address(object: ObjectReference) -> Address { + // Just use object start. + Self::ref_to_object_start(object) + } + + fn address_to_ref(addr: Address) -> ObjectReference { + ObjectReference::from_raw_address(addr.add(OBJECT_REF_OFFSET)) + } + + fn dump_object(_object: ObjectReference) { + unimplemented!() + } + + type FinalizableType = ObjectReference; + + fn weakref_set_referent(_reference: ObjectReference, _referent: ObjectReference) { + unimplemented!() + } + fn weakref_get_referent(_object: ObjectReference) -> ObjectReference { + unimplemented!() + } + fn weakref_enqueue_references(_references: &[ObjectReference], _tls: VMWorkerThread) { + unimplemented!() + } + + fn scan_roots_in_mutator_thread( + _tls: VMWorkerThread, + _mutator: &'static mut Mutator, + _factory: impl RootsWorkFactory, + ) { + unimplemented!() + } + fn scan_vm_specific_roots(_tls: VMWorkerThread, _factory: impl RootsWorkFactory) { + unimplemented!() + } + fn scan_object>( + _tls: VMWorkerThread, + _object: ObjectReference, + _edge_visitor: &mut EV, + ) { + unimplemented!() + } + fn notify_initial_thread_scan_complete(_partial_scan: bool, _tls: VMWorkerThread) { + unimplemented!() + } + fn supports_return_barrier() -> bool { + unimplemented!() + } + fn prepare_for_roots_re_scanning() { + unimplemented!() + } } use std::sync::atomic::{AtomicBool, Ordering}; diff --git a/vmbindings/dummyvm/src/object_model.rs b/vmbindings/dummyvm/src/object_model.rs deleted file mode 100644 index c666b1e7b1..0000000000 --- a/vmbindings/dummyvm/src/object_model.rs +++ /dev/null @@ -1,80 +0,0 @@ -use crate::DummyVM; -use mmtk::util::copy::{CopySemantics, GCWorkerCopyContext}; -use mmtk::util::{Address, ObjectReference}; -use mmtk::vm::*; - -pub struct VMObjectModel {} - -// This is intentionally set to a non-zero value to see if it breaks. -// Change this if you want to test other values. -pub const OBJECT_REF_OFFSET: usize = 4; - -impl ObjectModel for VMObjectModel { - const GLOBAL_LOG_BIT_SPEC: VMGlobalLogBitSpec = VMGlobalLogBitSpec::in_header(0); - const LOCAL_FORWARDING_POINTER_SPEC: VMLocalForwardingPointerSpec = - VMLocalForwardingPointerSpec::in_header(0); - const LOCAL_FORWARDING_BITS_SPEC: VMLocalForwardingBitsSpec = - VMLocalForwardingBitsSpec::in_header(0); - const LOCAL_MARK_BIT_SPEC: VMLocalMarkBitSpec = VMLocalMarkBitSpec::in_header(0); - const LOCAL_LOS_MARK_NURSERY_SPEC: VMLocalLOSMarkNurserySpec = - VMLocalLOSMarkNurserySpec::in_header(0); - - const OBJECT_REF_OFFSET_LOWER_BOUND: isize = OBJECT_REF_OFFSET as isize; - - fn copy( - _from: ObjectReference, - _semantics: CopySemantics, - _copy_context: &mut GCWorkerCopyContext, - ) -> ObjectReference { - unimplemented!() - } - - fn copy_to(_from: ObjectReference, _to: ObjectReference, _region: Address) -> Address { - unimplemented!() - } - - fn get_current_size(_object: ObjectReference) -> usize { - unimplemented!() - } - - fn get_size_when_copied(object: ObjectReference) -> usize { - Self::get_current_size(object) - } - - fn get_align_when_copied(_object: ObjectReference) -> usize { - ::std::mem::size_of::() - } - - fn get_align_offset_when_copied(_object: ObjectReference) -> usize { - 0 - } - - fn get_reference_when_copied_to(_from: ObjectReference, _to: Address) -> ObjectReference { - unimplemented!() - } - - fn get_type_descriptor(_reference: ObjectReference) -> &'static [i8] { - unimplemented!() - } - - fn ref_to_object_start(object: ObjectReference) -> Address { - object.to_raw_address().sub(OBJECT_REF_OFFSET) - } - - fn ref_to_header(object: ObjectReference) -> Address { - object.to_raw_address() - } - - fn ref_to_address(object: ObjectReference) -> Address { - // Just use object start. - Self::ref_to_object_start(object) - } - - fn address_to_ref(addr: Address) -> ObjectReference { - ObjectReference::from_raw_address(addr.add(OBJECT_REF_OFFSET)) - } - - fn dump_object(_object: ObjectReference) { - unimplemented!() - } -} diff --git a/vmbindings/dummyvm/src/reference_glue.rs b/vmbindings/dummyvm/src/reference_glue.rs deleted file mode 100644 index 66e09c5e5a..0000000000 --- a/vmbindings/dummyvm/src/reference_glue.rs +++ /dev/null @@ -1,20 +0,0 @@ -use crate::DummyVM; -use mmtk::util::opaque_pointer::VMWorkerThread; -use mmtk::util::ObjectReference; -use mmtk::vm::ReferenceGlue; - -pub struct VMReferenceGlue {} - -impl ReferenceGlue for VMReferenceGlue { - type FinalizableType = ObjectReference; - - fn set_referent(_reference: ObjectReference, _referent: ObjectReference) { - unimplemented!() - } - fn get_referent(_object: ObjectReference) -> ObjectReference { - unimplemented!() - } - fn enqueue_references(_references: &[ObjectReference], _tls: VMWorkerThread) { - unimplemented!() - } -} diff --git a/vmbindings/dummyvm/src/scanning.rs b/vmbindings/dummyvm/src/scanning.rs deleted file mode 100644 index 960f9d642b..0000000000 --- a/vmbindings/dummyvm/src/scanning.rs +++ /dev/null @@ -1,39 +0,0 @@ -use crate::edges::DummyVMEdge; -use crate::DummyVM; -use mmtk::util::opaque_pointer::*; -use mmtk::util::ObjectReference; -use mmtk::vm::EdgeVisitor; -use mmtk::vm::RootsWorkFactory; -use mmtk::vm::Scanning; -use mmtk::Mutator; - -pub struct VMScanning {} - -impl Scanning for VMScanning { - fn scan_roots_in_mutator_thread( - _tls: VMWorkerThread, - _mutator: &'static mut Mutator, - _factory: impl RootsWorkFactory, - ) { - unimplemented!() - } - fn scan_vm_specific_roots(_tls: VMWorkerThread, _factory: impl RootsWorkFactory) { - unimplemented!() - } - fn scan_object>( - _tls: VMWorkerThread, - _object: ObjectReference, - _edge_visitor: &mut EV, - ) { - unimplemented!() - } - fn notify_initial_thread_scan_complete(_partial_scan: bool, _tls: VMWorkerThread) { - unimplemented!() - } - fn supports_return_barrier() -> bool { - unimplemented!() - } - fn prepare_for_roots_re_scanning() { - unimplemented!() - } -} diff --git a/vmbindings/dummyvm/src/test_fixtures.rs b/vmbindings/dummyvm/src/test_fixtures.rs index 4e400bc6d2..67219058a5 100644 --- a/vmbindings/dummyvm/src/test_fixtures.rs +++ b/vmbindings/dummyvm/src/test_fixtures.rs @@ -10,8 +10,8 @@ use mmtk::AllocationSemantics; use mmtk::MMTK; use crate::api::*; -use crate::object_model::OBJECT_REF_OFFSET; use crate::DummyVM; +use crate::OBJECT_REF_OFFSET; pub trait FixtureContent { fn create() -> Self; diff --git a/vmbindings/dummyvm/src/tests/barrier_slow_path_assertion.rs b/vmbindings/dummyvm/src/tests/barrier_slow_path_assertion.rs index 063c053dcd..b6447232da 100644 --- a/vmbindings/dummyvm/src/tests/barrier_slow_path_assertion.rs +++ b/vmbindings/dummyvm/src/tests/barrier_slow_path_assertion.rs @@ -3,9 +3,9 @@ // Run the test with any plan that uses object barrier, and we also need both VO bit and extreme assertions. -use crate::object_model::OBJECT_REF_OFFSET; use crate::test_fixtures::FixtureContent; use crate::test_fixtures::MMTKSingleton; +use crate::OBJECT_REF_OFFSET; use crate::{api::*, edges}; use atomic::Atomic; use mmtk::util::{Address, ObjectReference}; diff --git a/vmbindings/dummyvm/src/tests/conservatism.rs b/vmbindings/dummyvm/src/tests/conservatism.rs index 6262769888..2d882133ab 100644 --- a/vmbindings/dummyvm/src/tests/conservatism.rs +++ b/vmbindings/dummyvm/src/tests/conservatism.rs @@ -2,8 +2,8 @@ // GITHUB-CI: FEATURES=is_mmtk_object use crate::api::*; -use crate::object_model::OBJECT_REF_OFFSET; use crate::test_fixtures::{Fixture, SingleObject}; +use crate::OBJECT_REF_OFFSET; use mmtk::util::constants::LOG_BITS_IN_WORD; use mmtk::util::is_mmtk_object::VO_BIT_REGION_SIZE; use mmtk::util::*; diff --git a/vmbindings/dummyvm/src/tests/vm_layout_default.rs b/vmbindings/dummyvm/src/tests/vm_layout_default.rs index 38fa24a10e..d8ad89d9a0 100644 --- a/vmbindings/dummyvm/src/tests/vm_layout_default.rs +++ b/vmbindings/dummyvm/src/tests/vm_layout_default.rs @@ -1,18 +1,18 @@ // GITHUB-CI: MMTK_PLAN=all use mmtk::util::heap::vm_layout::VMLayout; +use mmtk::vm::VMBinding; pub fn test_with_vm_layout(layout: Option) { use crate::api; use crate::test_fixtures::VMLayoutFixture; use mmtk::plan::AllocationSemantics; - use mmtk::vm::ObjectModel; let fixture = VMLayoutFixture::create_with_layout(layout); // Test allocation let addr = api::mmtk_alloc(fixture.mutator, 8, 8, 0, AllocationSemantics::Default); - let obj = crate::object_model::VMObjectModel::address_to_ref(addr); + let obj = crate::DummyVM::address_to_ref(addr); // Test SFT assert!(api::mmtk_is_in_mmtk_spaces(obj)); // Test mmapper