Skip to content

Commit

Permalink
ASLR: Use OnceLock for storing guest_address
Browse files Browse the repository at this point in the history
This will be used by other functions for now, instead of
relying on mem.guest_address.

guest_address should only be a GuestPhysAddr internally for now,
as other functions in places like src/linux/x86_64/kvm_cpu.rs
use it.
  • Loading branch information
n0toose committed Jul 22, 2024
1 parent 08bfcfd commit 8077f6d
Show file tree
Hide file tree
Showing 6 changed files with 45 additions and 24 deletions.
6 changes: 4 additions & 2 deletions src/arch/aarch64/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -120,6 +120,8 @@ pub fn virt_to_phys(
return Err(PagetableError::InvalidAddress);
}

let guest_address = (*crate::vm::GUEST_ADDRESS.get().unwrap()).as_u64();

// Assumptions:
// - We use 4KiB granule
// - We use maximum VA length
Expand All @@ -132,7 +134,7 @@ pub fn virt_to_phys(
// - We are page_aligned, and thus also PageTableEntry aligned.
let mut pagetable: &[PageTableEntry] = unsafe {
std::mem::transmute::<&[u8], &[PageTableEntry]>(
mem.slice_at(mem.guest_address, PAGE_SIZE).unwrap(),
mem.slice_at(guest_address, PAGE_SIZE).unwrap(),
)
};
// TODO: Depending on the virtual address length and granule (defined in TCR register by TG and TxSZ), we could reduce the number of pagetable walks. Hermit doesn't do this at the moment.
Expand All @@ -154,7 +156,7 @@ pub fn virt_to_phys(
Ok(pte.address())
}

pub fn init_guest_mem(mem: &mut [u8], _guest_address: u64) {
pub fn init_guest_mem(mem: &mut [u8]) {
let mem_addr = std::ptr::addr_of_mut!(mem[0]);

assert!(mem.len() >= PGT_OFFSET as usize + 512 * size_of::<u64>());
Expand Down
8 changes: 4 additions & 4 deletions src/arch/x86_64/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -121,8 +121,9 @@ pub fn virt_to_phys(
/// Number of bits of the index in each table (PML4, PDPT, PDT, PGT).
pub const PAGE_MAP_BITS: usize = 9;

let guest_address = (*crate::vm::GUEST_ADDRESS.get().unwrap()).as_u64();
let mut page_table = unsafe {
(mem.host_address(GuestPhysAddr::new(mem.guest_address.as_u64() + PML4_OFFSET))
(mem.host_address(GuestPhysAddr::new(guest_address + PML4_OFFSET))
.unwrap() as *mut PageTable)
.as_mut()
}
Expand Down Expand Up @@ -153,9 +154,9 @@ pub fn virt_to_phys(
Ok(entry.addr() + (addr.as_u64() & !((!0u64) << PAGE_BITS)))
}

pub fn init_guest_mem(mem: &mut [u8], guest_address: u64) {
pub fn init_guest_mem(mem: &mut [u8]) {
// TODO: we should maybe return an error on failure (e.g., the memory is too small)
paging::initialize_pagetables(mem, guest_address);
paging::initialize_pagetables(mem);
}

#[cfg(test)]
Expand Down Expand Up @@ -256,7 +257,6 @@ mod tests {
);
init_guest_mem(
unsafe { mem.as_slice_mut() }.try_into().unwrap(),
guest_address,
);

// Get the address of the first entry in PML4 (the address of the PML4 itself)
Expand Down
8 changes: 3 additions & 5 deletions src/arch/x86_64/paging/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,9 +12,10 @@ use crate::consts::*;
/// Also, the memory `mem` needs to be zeroed for [`PAGE_SIZE`] bytes at the
/// offsets [`BOOT_PML4`] and [`BOOT_PDPTE`], otherwise the integrity of the
/// pagetables and thus the integrity of the guest's memory is not ensured
pub fn initialize_pagetables(mem: &mut [u8], guest_address: u64) {
pub fn initialize_pagetables(mem: &mut [u8]) {
assert!(mem.len() >= MIN_PHYSMEM_SIZE);
let mem_addr = std::ptr::addr_of_mut!(mem[0]);
let guest_address = (*crate::vm::GUEST_ADDRESS.get().unwrap()).as_u64();

let (gdt_entry, pml4, pdpte, pde);
// Safety:
Expand Down Expand Up @@ -99,10 +100,7 @@ mod tests {

let mut mem: Vec<u8> = vec![0; MIN_PHYSMEM_SIZE];
// This will return a pagetable setup that we will check.
initialize_pagetables(
(&mut mem[0..MIN_PHYSMEM_SIZE]).try_into().unwrap(),
guest_address,
);
initialize_pagetables((&mut mem[0..MIN_PHYSMEM_SIZE]).try_into().unwrap());

// Check PDPTE address
let addr_pdpte = u64::from_le_bytes(
Expand Down
4 changes: 2 additions & 2 deletions src/linux/x86_64/kvm_cpu.rs
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ pub fn initialize_kvm(mem: &MmapMemory, use_pit: bool) -> HypervisorResult<()> {
slot: 0,
flags: mem.flags,
memory_size: sz as u64,
guest_phys_addr: mem.guest_address.as_u64(),
guest_phys_addr: (*crate::vm::GUEST_ADDRESS.get().unwrap()).as_u64(),
userspace_addr: mem.host_address as u64,
};

Expand All @@ -50,7 +50,7 @@ pub fn initialize_kvm(mem: &MmapMemory, use_pit: bool) -> HypervisorResult<()> {
slot: 1,
flags: mem.flags,
memory_size: (mem.memory_size - KVM_32BIT_GAP_START - KVM_32BIT_GAP_SIZE) as u64,
guest_phys_addr: mem.guest_address.as_u64()
guest_phys_addr: (*crate::vm::GUEST_ADDRESS.get().unwrap()).as_u64()
+ (KVM_32BIT_GAP_START + KVM_32BIT_GAP_SIZE) as u64,
userspace_addr: (mem.host_address as usize + KVM_32BIT_GAP_START + KVM_32BIT_GAP_SIZE)
as u64,
Expand Down
2 changes: 1 addition & 1 deletion src/macos/x86_64/vcpu.rs
Original file line number Diff line number Diff line change
Expand Up @@ -648,7 +648,7 @@ impl VirtualCPU for XhyveCpu {
vcpu.init(
parent_vm.get_entry_point(),
parent_vm.stack_address(),
parent_vm.guest_address(),
,
id,
)?;

Expand Down
41 changes: 31 additions & 10 deletions src/vm.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ use std::{
num::NonZeroU32,
path::PathBuf,
ptr,
sync::{Arc, Mutex},
sync::{Arc, Mutex, OnceLock},
time::SystemTime,
};

Expand All @@ -15,6 +15,7 @@ use hermit_entry::{
};
use log::{error, warn};
use thiserror::Error;
use uhyve_interface::GuestPhysAddr;

#[cfg(target_arch = "x86_64")]
use crate::arch::x86_64::{
Expand All @@ -29,6 +30,8 @@ use crate::{

pub type HypervisorResult<T> = Result<T, HypervisorError>;

pub static GUEST_ADDRESS: OnceLock<GuestPhysAddr> = OnceLock::new();

#[derive(Error, Debug)]
pub enum LoadKernelError {
#[error(transparent)]
Expand Down Expand Up @@ -75,7 +78,7 @@ pub struct UhyveVm<VCpuType: VirtualCPU = VcpuDefault> {
offset: u64,
entry_point: u64,
stack_address: u64,
guest_address: u64,
guest_address: GuestPhysAddr,
pub mem: Arc<MmapMemory>,
num_cpus: u32,
path: PathBuf,
Expand All @@ -90,21 +93,40 @@ pub struct UhyveVm<VCpuType: VirtualCPU = VcpuDefault> {
impl<VCpuType: VirtualCPU> UhyveVm<VCpuType> {
pub fn new(kernel_path: PathBuf, params: Params) -> HypervisorResult<UhyveVm<VCpuType>> {
let memory_size = params.memory_size.get();
let guest_address = *GUEST_ADDRESS.get_or_init(|| arch::RAM_START);

// TODO: Move functionality to load_kernel. We don't know whether the binaries are relocatable yet.
// TODO: Use random address instead of arch::RAM_START here.
#[cfg(target_os = "linux")]
#[cfg(target_arch = "x86_64")]
let mem = MmapMemory::new(0, memory_size, arch::RAM_START, params.thp, params.ksm);
let mem = MmapMemory::new(
0,
memory_size,
guest_address,
params.thp,
params.ksm,
);

// TODO: guest_address is only taken into account on Linux platforms.
// TODO: Before changing this, fix init_guest_mem in `src/arch/aarch64/mod.rs`
#[cfg(target_os = "linux")]
#[cfg(not(target_arch = "x86_64"))]
let mem = MmapMemory::new(0, memory_size, arch::RAM_START, params.thp, params.ksm);
let mem = MmapMemory::new(
0,
memory_size,
guest_address,
params.thp,
params.ksm,
);

#[cfg(not(target_os = "linux"))]
let mem = MmapMemory::new(0, memory_size, arch::RAM_START, false, false);
let mem = MmapMemory::new(
0,
memory_size,
guest_address,
false,
false,
);

// create virtio interface
// TODO: Remove allow once fixed:
Expand All @@ -130,7 +152,7 @@ impl<VCpuType: VirtualCPU> UhyveVm<VCpuType> {
offset: 0,
entry_point: 0,
stack_address: 0,
guest_address: mem.guest_address.as_u64(),
guest_address: guest_address,
mem: mem.into(),
num_cpus: cpu_count,
path: kernel_path,
Expand Down Expand Up @@ -165,7 +187,7 @@ impl<VCpuType: VirtualCPU> UhyveVm<VCpuType> {
}

pub fn guest_address(&self) -> u64 {
self.guest_address
self.guest_address.as_u64()
}

/// Returns the number of cores for the vm.
Expand All @@ -188,7 +210,6 @@ impl<VCpuType: VirtualCPU> UhyveVm<VCpuType> {
unsafe { self.mem.as_slice_mut() } // slice only lives during this fn call
.try_into()
.expect("Guest memory is not large enough for pagetables"),
self.guest_address,
);
}

Expand All @@ -201,7 +222,7 @@ impl<VCpuType: VirtualCPU> UhyveVm<VCpuType> {
// TODO: should be a random start address, if we have a relocatable executable
let kernel_start_address = object
.start_addr()
.unwrap_or(self.mem.guest_address.as_u64() + kernel_offset as u64)
.unwrap_or_else(|| self.mem.guest_address.as_u64() + kernel_offset as u64)
as usize;
let kernel_end_address = kernel_start_address + object.mem_size();
self.offset = kernel_start_address as u64;
Expand Down Expand Up @@ -261,7 +282,7 @@ impl<VCpuType: VirtualCPU> fmt::Debug for UhyveVm<VCpuType> {
f.debug_struct("UhyveVm")
.field("entry_point", &self.entry_point)
.field("stack_address", &self.stack_address)
.field("guest_address", &self.guest_address)
.field("guest_address", &self.guest_address.as_u64())
.field("mem", &self.mem)
.field("num_cpus", &self.num_cpus)
.field("path", &self.path)
Expand Down

0 comments on commit 8077f6d

Please sign in to comment.