Skip to content

Commit

Permalink
Move methods in multiple VM traits into VMBinding.
Browse files Browse the repository at this point in the history
  • Loading branch information
qinsoon committed Dec 3, 2023
1 parent a87636c commit efd0770
Show file tree
Hide file tree
Showing 61 changed files with 1,637 additions and 1,878 deletions.
2 changes: 1 addition & 1 deletion macros/src/plan_trace_object_impl.rs
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ pub(crate) fn generate_trace_object<'a>(
}
} else {
quote! {
<VM::VMActivePlan as crate::vm::ActivePlan<VM>>::vm_trace_object::<Q>(__mmtk_queue, __mmtk_objref, __mmtk_worker)
VM::vm_trace_object::<Q>(__mmtk_queue, __mmtk_objref, __mmtk_worker)
}
};

Expand Down
21 changes: 6 additions & 15 deletions src/memory_manager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@ use crate::util::heap::layout::vm_layout::vm_layout;
use crate::util::opaque_pointer::*;
use crate::util::{Address, ObjectReference};
use crate::vm::edge_shape::MemorySlice;
use crate::vm::ReferenceGlue;
use crate::vm::VMBinding;
use std::sync::atomic::Ordering;
/// Initialize an MMTk instance. A VM should call this method after creating an [`crate::MMTK`]
Expand Down Expand Up @@ -446,16 +445,15 @@ pub fn get_malloc_bytes<VM: VMBinding>(mmtk: &MMTK<VM>) -> usize {
/// However, if a binding uses counted malloc (which won't poll for GC), they may want to poll for GC manually.
/// This function should only be used by mutator threads.
pub fn gc_poll<VM: VMBinding>(mmtk: &MMTK<VM>, tls: VMMutatorThread) {
use crate::vm::{ActivePlan, Collection};
debug_assert!(
VM::VMActivePlan::is_mutator(tls.0),
VM::is_mutator(tls.0),
"gc_poll() can only be called by a mutator thread."
);

if mmtk.state.should_trigger_gc_when_heap_is_full() && mmtk.gc_trigger.poll(false, None) {
debug!("Collection required");
assert!(mmtk.state.is_initialized(), "GC is not allowed here: collection is not initialized (did you call initialize_collection()?).");
VM::VMCollection::block_for_gc(tls);
VM::block_for_gc(tls);
}
}

Expand Down Expand Up @@ -785,10 +783,7 @@ pub fn harness_end<VM: VMBinding>(mmtk: &'static MMTK<VM>) {
/// Arguments:
/// * `mmtk`: A reference to an MMTk instance
/// * `object`: The object that has a finalizer
pub fn add_finalizer<VM: VMBinding>(
mmtk: &'static MMTK<VM>,
object: <VM::VMReferenceGlue as ReferenceGlue<VM>>::FinalizableType,
) {
pub fn add_finalizer<VM: VMBinding>(mmtk: &'static MMTK<VM>, object: VM::FinalizableType) {
if *mmtk.options.no_finalizer {
warn!("add_finalizer() is called when no_finalizer = true");
}
Expand Down Expand Up @@ -845,9 +840,7 @@ pub fn is_pinned<VM: VMBinding>(object: ObjectReference) -> bool {
///
/// Arguments:
/// * `mmtk`: A reference to an MMTk instance.
pub fn get_finalized_object<VM: VMBinding>(
mmtk: &'static MMTK<VM>,
) -> Option<<VM::VMReferenceGlue as ReferenceGlue<VM>>::FinalizableType> {
pub fn get_finalized_object<VM: VMBinding>(mmtk: &'static MMTK<VM>) -> Option<VM::FinalizableType> {
if *mmtk.options.no_finalizer {
warn!("get_finalized_object() is called when no_finalizer = true");
}
Expand All @@ -865,9 +858,7 @@ pub fn get_finalized_object<VM: VMBinding>(
///
/// Arguments:
/// * `mmtk`: A reference to an MMTk instance.
pub fn get_all_finalizers<VM: VMBinding>(
mmtk: &'static MMTK<VM>,
) -> Vec<<VM::VMReferenceGlue as ReferenceGlue<VM>>::FinalizableType> {
pub fn get_all_finalizers<VM: VMBinding>(mmtk: &'static MMTK<VM>) -> Vec<VM::FinalizableType> {
if *mmtk.options.no_finalizer {
warn!("get_all_finalizers() is called when no_finalizer = true");
}
Expand All @@ -887,7 +878,7 @@ pub fn get_all_finalizers<VM: VMBinding>(
pub fn get_finalizers_for<VM: VMBinding>(
mmtk: &'static MMTK<VM>,
object: ObjectReference,
) -> Vec<<VM::VMReferenceGlue as ReferenceGlue<VM>>::FinalizableType> {
) -> Vec<VM::FinalizableType> {
if *mmtk.options.no_finalizer {
warn!("get_finalizers() is called when no_finalizer = true");
}
Expand Down
11 changes: 3 additions & 8 deletions src/mmtk.rs
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@ use crate::util::reference_processor::ReferenceProcessors;
#[cfg(feature = "sanity")]
use crate::util::sanity::sanity_checker::SanityChecker;
use crate::util::statistics::stats::Stats;
use crate::vm::ReferenceGlue;
use crate::vm::VMBinding;
use std::cell::UnsafeCell;
use std::default::Default;
Expand Down Expand Up @@ -109,8 +108,7 @@ pub struct MMTK<VM: VMBinding> {
pub(crate) state: Arc<GlobalState>,
pub(crate) plan: UnsafeCell<Box<dyn Plan<VM = VM>>>,
pub(crate) reference_processors: ReferenceProcessors,
pub(crate) finalizable_processor:
Mutex<FinalizableProcessor<<VM::VMReferenceGlue as ReferenceGlue<VM>>::FinalizableType>>,
pub(crate) finalizable_processor: Mutex<FinalizableProcessor<VM::FinalizableType>>,
pub(crate) scheduler: Arc<GCWorkScheduler<VM>>,
#[cfg(feature = "sanity")]
pub(crate) sanity_checker: Mutex<SanityChecker<VM::VMEdge>>,
Expand Down Expand Up @@ -201,9 +199,7 @@ impl<VM: VMBinding> MMTK<VM> {
state,
plan: UnsafeCell::new(plan),
reference_processors: ReferenceProcessors::new(),
finalizable_processor: Mutex::new(FinalizableProcessor::<
<VM::VMReferenceGlue as ReferenceGlue<VM>>::FinalizableType,
>::new()),
finalizable_processor: Mutex::new(FinalizableProcessor::<VM::FinalizableType>::new()),
scheduler,
#[cfg(feature = "sanity")]
sanity_checker: Mutex::new(SanityChecker::new()),
Expand Down Expand Up @@ -312,7 +308,6 @@ impl<VM: VMBinding> MMTK<VM> {
force: bool,
exhaustive: bool,
) {
use crate::vm::Collection;
if !self.get_plan().constraints().collects_garbage {
warn!("User attempted a collection request, but the plan can not do GC. The request is ignored.");
return;
Expand All @@ -330,7 +325,7 @@ impl<VM: VMBinding> MMTK<VM> {
.user_triggered_collection
.store(true, Ordering::Relaxed);
self.gc_requester.request();
VM::VMCollection::block_for_gc(tls);
VM::block_for_gc(tls);
}
}

Expand Down
4 changes: 1 addition & 3 deletions src/plan/barriers.rs
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
//! Read/Write barrier implementations.
use crate::vm::edge_shape::{Edge, MemorySlice};
use crate::vm::ObjectModel;
use crate::{
util::{metadata::MetadataSpec, *},
vm::VMBinding,
Expand Down Expand Up @@ -134,8 +133,7 @@ impl<VM: VMBinding> Barrier<VM> for NoBarrier {}
pub trait BarrierSemantics: 'static + Send {
type VM: VMBinding;

const UNLOG_BIT_SPEC: MetadataSpec =
*<Self::VM as VMBinding>::VMObjectModel::GLOBAL_LOG_BIT_SPEC.as_spec();
const UNLOG_BIT_SPEC: MetadataSpec = *<Self::VM as VMBinding>::GLOBAL_LOG_BIT_SPEC.as_spec();

/// Flush thread-local buffers or remembered sets.
/// Normally this is called by the slow-path implementation whenever the thread-local buffers are full.
Expand Down
2 changes: 1 addition & 1 deletion src/plan/generational/gc_work.rs
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@ impl<E: ProcessEdgesWork> GCWork<E::VM> for ProcessModBuf<E> {
fn do_work(&mut self, worker: &mut GCWorker<E::VM>, mmtk: &'static MMTK<E::VM>) {
// Flip the per-object unlogged bits to "unlogged" state.
for obj in &self.modbuf {
<E::VM as VMBinding>::VMObjectModel::GLOBAL_LOG_BIT_SPEC.store_atomic::<E::VM, u8>(
<E::VM as VMBinding>::GLOBAL_LOG_BIT_SPEC.store_atomic::<E::VM, u8>(
*obj,
1,
None,
Expand Down
5 changes: 2 additions & 3 deletions src/plan/generational/global.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ use crate::util::statistics::counter::EventCounter;
use crate::util::Address;
use crate::util::ObjectReference;
use crate::util::VMWorkerThread;
use crate::vm::{ObjectModel, VMBinding};
use crate::vm::VMBinding;
use std::sync::atomic::AtomicBool;
use std::sync::atomic::Ordering;
use std::sync::{Arc, Mutex};
Expand Down Expand Up @@ -87,8 +87,7 @@ impl<VM: VMBinding> CommonGenPlan<VM> {
/// Returns `true` if the nursery has grown to the extent that it may not be able to be copied
/// into the mature space.
fn virtual_memory_exhausted(plan: &dyn GenerationalPlan<VM = VM>) -> bool {
((plan.get_collection_reserved_pages() as f64
* VM::VMObjectModel::VM_WORST_CASE_COPY_EXPANSION) as usize)
((plan.get_collection_reserved_pages() as f64 * VM::VM_WORST_CASE_COPY_EXPANSION) as usize)
> plan.get_mature_physical_pages_available()
}

Expand Down
3 changes: 1 addition & 2 deletions src/plan/generational/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,6 @@ use crate::policy::space::Space;
use crate::util::alloc::AllocatorSelector;
use crate::util::metadata::side_metadata::SideMetadataContext;
use crate::util::metadata::side_metadata::SideMetadataSpec;
use crate::vm::ObjectModel;
use crate::vm::VMBinding;
use crate::Plan;

Expand Down Expand Up @@ -60,7 +59,7 @@ pub const GEN_CONSTRAINTS: PlanConstraints = PlanConstraints {
/// So if a plan calls this, it should not call SideMetadataContext::new_global_specs() again.
pub fn new_generational_global_metadata_specs<VM: VMBinding>() -> Vec<SideMetadataSpec> {
let specs = if ACTIVE_BARRIER == BarrierSelector::ObjectBarrier {
crate::util::metadata::extract_side_metadata(&[*VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC])
crate::util::metadata::extract_side_metadata(&[*VM::GLOBAL_LOG_BIT_SPEC])
} else {
vec![]
};
Expand Down
5 changes: 2 additions & 3 deletions src/plan/global.rs
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,6 @@ use crate::util::options::PlanSelector;
use crate::util::statistics::stats::Stats;
use crate::util::{conversions, ObjectReference};
use crate::util::{VMMutatorThread, VMWorkerThread};
use crate::vm::*;
use downcast_rs::Downcast;
use enum_map::EnumMap;
use std::sync::atomic::Ordering;
Expand Down Expand Up @@ -225,7 +224,7 @@ pub trait Plan: 'static + HasSpaces + Sync + Downcast {
fn get_reserved_pages(&self) -> usize {
let used_pages = self.get_used_pages();
let collection_reserve = self.get_collection_reserved_pages();
let vm_live_bytes = <Self::VM as VMBinding>::VMCollection::vm_live_bytes();
let vm_live_bytes = <Self::VM as VMBinding>::vm_live_bytes();
// Note that `vm_live_bytes` may not be the exact number of bytes in whole pages. The VM
// binding is allowed to return an approximate value if it is expensive or impossible to
// compute the exact number of pages occupied.
Expand Down Expand Up @@ -495,7 +494,7 @@ impl<VM: VMBinding> BasePlan<VM> {
return self.vm_space.trace_object(queue, object);
}

VM::VMActivePlan::vm_trace_object::<Q>(queue, object, worker)
VM::vm_trace_object::<Q>(queue, object, worker)
}

pub fn prepare(&mut self, _tls: VMWorkerThread, _full_heap: bool) {
Expand Down
6 changes: 2 additions & 4 deletions src/plan/markcompact/gc_work.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,6 @@ use crate::scheduler::gc_work::*;
use crate::scheduler::GCWork;
use crate::scheduler::GCWorker;
use crate::scheduler::WorkBucketStage;
use crate::vm::ActivePlan;
use crate::vm::Scanning;
use crate::vm::VMBinding;
use crate::MMTK;
use std::marker::PhantomData;
Expand Down Expand Up @@ -41,7 +39,7 @@ unsafe impl<VM: VMBinding> Send for UpdateReferences<VM> {}
impl<VM: VMBinding> GCWork<VM> for UpdateReferences<VM> {
fn do_work(&mut self, worker: &mut GCWorker<VM>, mmtk: &'static MMTK<VM>) {
// The following needs to be done right before the second round of root scanning
VM::VMScanning::prepare_for_roots_re_scanning();
VM::prepare_for_roots_re_scanning();
mmtk.state.prepare_for_stack_scanning();
// Prepare common and base spaces for the 2nd round of transitive closure
let plan_mut = unsafe { &mut *(self.plan as *mut MarkCompact<VM>) };
Expand All @@ -56,7 +54,7 @@ impl<VM: VMBinding> GCWork<VM> for UpdateReferences<VM> {
.worker_group
.get_and_clear_worker_live_bytes();

for mutator in VM::VMActivePlan::mutators() {
for mutator in VM::mutators() {
mmtk.scheduler.work_buckets[WorkBucketStage::SecondRoots].add(ScanMutatorRoots::<
MarkCompactForwardingGCWorkContext<VM>,
>(mutator));
Expand Down
3 changes: 1 addition & 2 deletions src/plan/sticky/immix/global.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@ use crate::util::copy::CopySelector;
use crate::util::copy::CopySemantics;
use crate::util::metadata::side_metadata::SideMetadataContext;
use crate::util::statistics::counter::EventCounter;
use crate::vm::ObjectModel;
use crate::vm::VMBinding;
use crate::Plan;

Expand Down Expand Up @@ -166,7 +165,7 @@ impl<VM: VMBinding> Plan for StickyImmix<VM> {
fn sanity_check_object(&self, object: crate::util::ObjectReference) -> bool {
if self.is_current_gc_nursery() {
// Every reachable object should be logged
if !VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.is_unlogged::<VM>(object, Ordering::SeqCst) {
if !VM::GLOBAL_LOG_BIT_SPEC.is_unlogged::<VM>(object, Ordering::SeqCst) {
error!("Object {} is not unlogged (all objects that have been traced should be unlogged/mature)", object);
return false;
}
Expand Down
7 changes: 3 additions & 4 deletions src/policy/copyspace.rs
Original file line number Diff line number Diff line change
Expand Up @@ -149,8 +149,8 @@ impl<VM: VMBinding> CopySpace<VM> {
true,
false,
extract_side_metadata(&[
*VM::VMObjectModel::LOCAL_FORWARDING_BITS_SPEC,
*VM::VMObjectModel::LOCAL_FORWARDING_POINTER_SPEC,
*VM::LOCAL_FORWARDING_BITS_SPEC,
*VM::LOCAL_FORWARDING_POINTER_SPEC,
]),
));
CopySpace {
Expand All @@ -169,8 +169,7 @@ impl<VM: VMBinding> CopySpace<VM> {
// Clear the metadata if we are using side forwarding status table. Otherwise
// objects may inherit forwarding status from the previous GC.
// TODO: Fix performance.
if let MetadataSpec::OnSide(side_forwarding_status_table) =
*<VM::VMObjectModel as ObjectModel<VM>>::LOCAL_FORWARDING_BITS_SPEC
if let MetadataSpec::OnSide(side_forwarding_status_table) = *VM::LOCAL_FORWARDING_BITS_SPEC
{
side_forwarding_status_table
.bzero_metadata(self.common.start, self.pr.cursor() - self.common.start);
Expand Down
Loading

0 comments on commit efd0770

Please sign in to comment.