Skip to content

Commit

Permalink
Post-release dependency version bump for v0.21.0
Browse files Browse the repository at this point in the history
Bump the version of dependencies to their latest version.

The "atomic" crate, since version 0.6.0, requires the `T` in `Atomic<T>`
to implement `bytemuck::NoUnint`.  It is a marker trait for types that
satisfies some requirements.  One requirement is that `T` must not have
any padding bytes (in the middle or at the end).  The derive macro
`#[derive(NoUninit)]` can be used on custom types to automatically check
if the type satisfies its requirements.  Two notable types are:

1.  `enum MMapStrategy`.  It is missing a representation annotation
    which `NoUninit` requires.  This PR adds `#[repr(u8)]` to fix this.
2.  `WORKER_ORDINAL: Atomic<Option<ThreadId>>`.  The `Option` type does
    have padding bytes, making it un-eligible for `Atomic<T>`.  This PR
    changes it to `Atomic<ThreadId>`, with `ThreadId::max` representing
    the uninitialized value.
  • Loading branch information
wks committed Nov 13, 2023
1 parent 6a5e5ff commit bd46bef
Show file tree
Hide file tree
Showing 6 changed files with 26 additions and 16 deletions.
14 changes: 7 additions & 7 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -19,17 +19,18 @@ crate-type = ["rlib"]
doctest = false

[dependencies]
atomic = "0.5.1"
atomic = "0.6.0"
atomic_refcell = "0.1.7"
atomic-traits = "0.3.0"
bytemuck = { version = "1.14.0", features = ["derive"] }
cfg-if = "1.0"
crossbeam = "0.8.1"
delegate = "0.9.0"
delegate = "0.10.0"
downcast-rs = "1.1.1"
enum-map = "2.4.2"
env_logger = "0.10.0"
is-terminal = "0.4.7"
itertools = "0.10.5"
itertools = "0.11.0"
jemalloc-sys = { version = "0.5.3", features = ["disable_initial_exec_tls"], optional = true }
lazy_static = "1.1"
libc = "0.2"
Expand All @@ -46,17 +47,16 @@ probe = "0.5"
regex = "1.7.0"
spin = "0.9.5"
static_assertions = "1.1.0"
strum = "0.24"
strum_macros = "0.24"
strum = "0.25"
strum_macros = "0.25"
sysinfo = "0.29"

[dev-dependencies]
paste = "1.0.8"
rand = "0.8.5"

[build-dependencies]
# Fix to 0.6.0. Updating to 0.6.1 requires MSRV 1.64.
built = { version = "=0.6.0", features = ["git2"] }
built = { version = "0.7.1", features = ["git2"] }

[features]
default = []
Expand Down
14 changes: 10 additions & 4 deletions src/scheduler/worker.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,12 +19,18 @@ pub type ThreadId = usize;

thread_local! {
/// Current worker's ordinal
static WORKER_ORDINAL: Atomic<Option<ThreadId>> = Atomic::new(None);
static WORKER_ORDINAL: Atomic<ThreadId> = Atomic::new(ThreadId::MAX);
}

/// Get current worker ordinal. Return `None` if the current thread is not a worker.
pub fn current_worker_ordinal() -> Option<ThreadId> {
WORKER_ORDINAL.with(|x| x.load(Ordering::Relaxed))
pub fn current_worker_ordinal() -> ThreadId {
let ordinal = WORKER_ORDINAL.with(|x| x.load(Ordering::Relaxed));
debug_assert_ne!(
ordinal,
ThreadId::MAX,
"Thread-local variable WORKER_ORDINAL not set yet."
);
ordinal
}

/// The part shared between a GCWorker and the scheduler.
Expand Down Expand Up @@ -365,7 +371,7 @@ impl<VM: VMBinding> GCWorker<VM> {
/// Each worker will keep polling and executing work packets in a loop.
pub fn run(&mut self, tls: VMWorkerThread, mmtk: &'static MMTK<VM>) {
probe!(mmtk, gcworker_run);
WORKER_ORDINAL.with(|x| x.store(Some(self.ordinal), Ordering::SeqCst));
WORKER_ORDINAL.with(|x| x.store(self.ordinal, Ordering::SeqCst));
self.scheduler.resolve_affinity(self.ordinal);
self.tls = tls;
self.copy = crate::plan::create_gc_worker_context(tls, mmtk);
Expand Down
5 changes: 3 additions & 2 deletions src/util/address.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
use atomic_traits::Atomic;
use bytemuck::NoUninit;

use std::fmt;
use std::mem;
Expand All @@ -18,7 +19,7 @@ pub type ByteOffset = isize;
/// (memory wise and time wise). The idea is from the paper
/// High-level Low-level Programming (VEE09) and JikesRVM.
#[repr(transparent)]
#[derive(Copy, Clone, Eq, Hash, PartialOrd, Ord, PartialEq)]
#[derive(Copy, Clone, Eq, Hash, PartialOrd, Ord, PartialEq, NoUninit)]
pub struct Address(usize);

/// Address + ByteSize (positive)
Expand Down Expand Up @@ -469,7 +470,7 @@ use crate::vm::VMBinding;
/// methods in [`crate::vm::ObjectModel`]. Major refactoring is needed in MMTk to allow
/// the opaque `ObjectReference` type, and we haven't seen a use case for now.
#[repr(transparent)]
#[derive(Copy, Clone, Eq, Hash, PartialOrd, Ord, PartialEq)]
#[derive(Copy, Clone, Eq, Hash, PartialOrd, Ord, PartialEq, NoUninit)]
pub struct ObjectReference(usize);

impl ObjectReference {
Expand Down
2 changes: 1 addition & 1 deletion src/util/heap/blockpageresource.rs
Original file line number Diff line number Diff line change
Expand Up @@ -309,7 +309,7 @@ impl<B: Region> BlockPool<B> {
/// Push a block to the thread-local queue
pub fn push(&self, block: B) {
self.count.fetch_add(1, Ordering::SeqCst);
let id = crate::scheduler::current_worker_ordinal().unwrap();
let id = crate::scheduler::current_worker_ordinal();
let failed = unsafe {
self.worker_local_freed_blocks[id]
.push_relaxed(block)
Expand Down
3 changes: 2 additions & 1 deletion src/util/heap/layout/mmapper.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ use crate::util::memory::*;
use crate::util::rust_util::rev_group::RevisitableGroupByForIterator;
use crate::util::Address;
use atomic::{Atomic, Ordering};
use bytemuck::NoUninit;
use std::io::Result;

/// Generic mmap and protection functionality
Expand Down Expand Up @@ -65,7 +66,7 @@ pub trait Mmapper: Sync {

/// The mmap state of a mmap chunk.
#[repr(u8)]
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
#[derive(Copy, Clone, PartialEq, Eq, Debug, NoUninit)]
pub(super) enum MapState {
/// The chunk is unmapped and not managed by MMTk.
Unmapped,
Expand Down
4 changes: 3 additions & 1 deletion src/util/memory.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ use crate::util::alloc::AllocationError;
use crate::util::opaque_pointer::*;
use crate::util::Address;
use crate::vm::{Collection, VMBinding};
use bytemuck::NoUninit;
use libc::{PROT_EXEC, PROT_NONE, PROT_READ, PROT_WRITE};
use std::io::{Error, Result};
use sysinfo::{RefreshKind, System, SystemExt};
Expand Down Expand Up @@ -57,7 +58,8 @@ const MMAP_FLAGS: libc::c_int = libc::MAP_ANON | libc::MAP_PRIVATE | libc::MAP_F
/// This currently supports switching between different huge page allocation
/// methods. However, this can later be refactored to reduce other code
/// repetition.
#[derive(Debug, Copy, Clone)]
#[repr(u8)]
#[derive(Debug, Copy, Clone, NoUninit)]
pub enum MmapStrategy {
Normal,
TransparentHugePages,
Expand Down

0 comments on commit bd46bef

Please sign in to comment.