diff --git a/Cargo.toml b/Cargo.toml
index 4048e66098..d8a357f8b5 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -19,17 +19,18 @@ crate-type = ["rlib"]
 doctest = false
 
 [dependencies]
-atomic = "0.5.1"
+atomic = "0.6.0"
 atomic_refcell = "0.1.7"
 atomic-traits = "0.3.0"
+bytemuck = { version = "1.14.0", features = ["derive"] }
 cfg-if = "1.0"
 crossbeam = "0.8.1"
-delegate = "0.9.0"
+delegate = "0.10.0"
 downcast-rs = "1.1.1"
 enum-map = "2.4.2"
 env_logger = "0.10.0"
 is-terminal = "0.4.7"
-itertools = "0.10.5"
+itertools = "0.11.0"
 jemalloc-sys = { version = "0.5.3", features = ["disable_initial_exec_tls"], optional = true }
 lazy_static = "1.1"
 libc = "0.2"
@@ -46,8 +47,8 @@ probe = "0.5"
 regex = "1.7.0"
 spin = "0.9.5"
 static_assertions = "1.1.0"
-strum = "0.24"
-strum_macros = "0.24"
+strum = "0.25"
+strum_macros = "0.25"
 sysinfo = "0.29"
 
 [dev-dependencies]
@@ -55,8 +56,7 @@ paste = "1.0.8"
 rand = "0.8.5"
 
 [build-dependencies]
-# Fix to 0.6.0. Updating to 0.6.1 requires MSRV 1.64.
-built = { version = "=0.6.0", features = ["git2"] }
+built = { version = "0.7.1", features = ["git2"] }
 
 [features]
 default = []
diff --git a/src/scheduler/worker.rs b/src/scheduler/worker.rs
index 9bb9ca6443..8183f148dc 100644
--- a/src/scheduler/worker.rs
+++ b/src/scheduler/worker.rs
@@ -19,12 +19,18 @@ pub type ThreadId = usize;
 
 thread_local! {
     /// Current worker's ordinal
-    static WORKER_ORDINAL: Atomic<Option<ThreadId>> = Atomic::new(None);
+    static WORKER_ORDINAL: Atomic<ThreadId> = Atomic::new(ThreadId::MAX);
 }
 
 /// Get current worker ordinal. Return `None` if the current thread is not a worker.
-pub fn current_worker_ordinal() -> Option<ThreadId> {
-    WORKER_ORDINAL.with(|x| x.load(Ordering::Relaxed))
+pub fn current_worker_ordinal() -> ThreadId {
+    let ordinal = WORKER_ORDINAL.with(|x| x.load(Ordering::Relaxed));
+    debug_assert_ne!(
+        ordinal,
+        ThreadId::MAX,
+        "Thread-local variable WORKER_ORDINAL not set yet."
+    );
+    ordinal
 }
 
 /// The part shared between a GCWorker and the scheduler.
@@ -365,7 +371,7 @@ impl<VM: VMBinding> GCWorker<VM> {
     /// Each worker will keep polling and executing work packets in a loop.
     pub fn run(&mut self, tls: VMWorkerThread, mmtk: &'static MMTK<VM>) {
         probe!(mmtk, gcworker_run);
-        WORKER_ORDINAL.with(|x| x.store(Some(self.ordinal), Ordering::SeqCst));
+        WORKER_ORDINAL.with(|x| x.store(self.ordinal, Ordering::SeqCst));
         self.scheduler.resolve_affinity(self.ordinal);
         self.tls = tls;
         self.copy = crate::plan::create_gc_worker_context(tls, mmtk);
diff --git a/src/util/address.rs b/src/util/address.rs
index 3c52a854c2..254eb5fa87 100644
--- a/src/util/address.rs
+++ b/src/util/address.rs
@@ -1,4 +1,5 @@
 use atomic_traits::Atomic;
+use bytemuck::NoUninit;
 
 use std::fmt;
 use std::mem;
@@ -18,7 +19,7 @@ pub type ByteOffset = isize;
 /// (memory wise and time wise). The idea is from the paper
 /// High-level Low-level Programming (VEE09) and JikesRVM.
 #[repr(transparent)]
-#[derive(Copy, Clone, Eq, Hash, PartialOrd, Ord, PartialEq)]
+#[derive(Copy, Clone, Eq, Hash, PartialOrd, Ord, PartialEq, NoUninit)]
 pub struct Address(usize);
 
 /// Address + ByteSize (positive)
@@ -469,7 +470,7 @@ use crate::vm::VMBinding;
 /// methods in [`crate::vm::ObjectModel`]. Major refactoring is needed in MMTk to allow
 /// the opaque `ObjectReference` type, and we haven't seen a use case for now.
 #[repr(transparent)]
-#[derive(Copy, Clone, Eq, Hash, PartialOrd, Ord, PartialEq)]
+#[derive(Copy, Clone, Eq, Hash, PartialOrd, Ord, PartialEq, NoUninit)]
 pub struct ObjectReference(usize);
 
 impl ObjectReference {
diff --git a/src/util/heap/blockpageresource.rs b/src/util/heap/blockpageresource.rs
index 5b3ffbc2f2..8d74f7dd20 100644
--- a/src/util/heap/blockpageresource.rs
+++ b/src/util/heap/blockpageresource.rs
@@ -309,7 +309,7 @@ impl<B: Region> BlockPool<B> {
     /// Push a block to the thread-local queue
     pub fn push(&self, block: B) {
         self.count.fetch_add(1, Ordering::SeqCst);
-        let id = crate::scheduler::current_worker_ordinal().unwrap();
+        let id = crate::scheduler::current_worker_ordinal();
         let failed = unsafe {
             self.worker_local_freed_blocks[id]
                 .push_relaxed(block)
diff --git a/src/util/heap/layout/mmapper.rs b/src/util/heap/layout/mmapper.rs
index cee9df26e4..ce84f3784c 100644
--- a/src/util/heap/layout/mmapper.rs
+++ b/src/util/heap/layout/mmapper.rs
@@ -3,6 +3,7 @@ use crate::util::memory::*;
 use crate::util::rust_util::rev_group::RevisitableGroupByForIterator;
 use crate::util::Address;
 use atomic::{Atomic, Ordering};
+use bytemuck::NoUninit;
 use std::io::Result;
 
 /// Generic mmap and protection functionality
@@ -65,7 +66,7 @@ pub trait Mmapper: Sync {
 
 /// The mmap state of a mmap chunk.
 #[repr(u8)]
-#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+#[derive(Copy, Clone, PartialEq, Eq, Debug, NoUninit)]
 pub(super) enum MapState {
     /// The chunk is unmapped and not managed by MMTk.
     Unmapped,
diff --git a/src/util/memory.rs b/src/util/memory.rs
index f89bd6d6bb..6109ae661b 100644
--- a/src/util/memory.rs
+++ b/src/util/memory.rs
@@ -2,6 +2,7 @@ use crate::util::alloc::AllocationError;
 use crate::util::opaque_pointer::*;
 use crate::util::Address;
 use crate::vm::{Collection, VMBinding};
+use bytemuck::NoUninit;
 use libc::{PROT_EXEC, PROT_NONE, PROT_READ, PROT_WRITE};
 use std::io::{Error, Result};
 use sysinfo::{RefreshKind, System, SystemExt};
@@ -57,7 +58,8 @@ const MMAP_FLAGS: libc::c_int = libc::MAP_ANON | libc::MAP_PRIVATE | libc::MAP_F
 /// This currently supports switching between different huge page allocation
 /// methods. However, this can later be refactored to reduce other code
 /// repetition.
-#[derive(Debug, Copy, Clone)]
+#[repr(u8)]
+#[derive(Debug, Copy, Clone, NoUninit)]
 pub enum MmapStrategy {
     Normal,
     TransparentHugePages,