Skip to content

Commit

Permalink
ASLR: Use OnceLock for storing guest_address
Browse files Browse the repository at this point in the history
This will be used by other functions for now, instead of
relying on mem.guest_address.

guest_address should only be a GuestPhysAddr internally for now,
as other functions in places like src/linux/x86_64/kvm_cpu.rs
use it.
  • Loading branch information
n0toose committed Jul 22, 2024
1 parent 08bfcfd commit 3775efa
Show file tree
Hide file tree
Showing 5 changed files with 40 additions and 18 deletions.
4 changes: 3 additions & 1 deletion src/arch/aarch64/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -120,6 +120,8 @@ pub fn virt_to_phys(
return Err(PagetableError::InvalidAddress);
}

let guest_address = (*crate::vm::GUEST_ADDRESS.get().unwrap()).as_u64();

// Assumptions:
// - We use 4KiB granule
// - We use maximum VA length
Expand All @@ -132,7 +134,7 @@ pub fn virt_to_phys(
// - We are page_aligned, and thus also PageTableEntry aligned.
let mut pagetable: &[PageTableEntry] = unsafe {
std::mem::transmute::<&[u8], &[PageTableEntry]>(
mem.slice_at(mem.guest_address, PAGE_SIZE).unwrap(),
mem.slice_at(guest_address, PAGE_SIZE).unwrap(),
)
};
// TODO: Depending on the virtual address length and granule (defined in TCR register by TG and TxSZ), we could reduce the number of pagetable walks. Hermit doesn't do this at the moment.
Expand Down
5 changes: 3 additions & 2 deletions src/arch/x86_64/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -121,8 +121,9 @@ pub fn virt_to_phys(
/// Number of bits of the index in each table (PML4, PDPT, PDT, PGT).
pub const PAGE_MAP_BITS: usize = 9;

let guest_address = (*crate::vm::GUEST_ADDRESS.get().unwrap()).as_u64();
let mut page_table = unsafe {
(mem.host_address(GuestPhysAddr::new(mem.guest_address.as_u64() + PML4_OFFSET))
(mem.host_address(GuestPhysAddr::new(guest_address + PML4_OFFSET))
.unwrap() as *mut PageTable)
.as_mut()
}
Expand Down Expand Up @@ -256,7 +257,7 @@ mod tests {
);
init_guest_mem(
unsafe { mem.as_slice_mut() }.try_into().unwrap(),
guest_address,
guest_address
);

// Get the address of the first entry in PML4 (the address of the PML4 itself)
Expand Down
5 changes: 1 addition & 4 deletions src/arch/x86_64/paging/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -99,10 +99,7 @@ mod tests {

let mut mem: Vec<u8> = vec![0; MIN_PHYSMEM_SIZE];
// This will return a pagetable setup that we will check.
initialize_pagetables(
(&mut mem[0..MIN_PHYSMEM_SIZE]).try_into().unwrap(),
guest_address,
);
initialize_pagetables((&mut mem[0..MIN_PHYSMEM_SIZE]).try_into().unwrap(), guest_address);

// Check PDPTE address
let addr_pdpte = u64::from_le_bytes(
Expand Down
2 changes: 1 addition & 1 deletion src/linux/x86_64/kvm_cpu.rs
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ pub fn initialize_kvm(mem: &MmapMemory, use_pit: bool) -> HypervisorResult<()> {
slot: 1,
flags: mem.flags,
memory_size: (mem.memory_size - KVM_32BIT_GAP_START - KVM_32BIT_GAP_SIZE) as u64,
guest_phys_addr: mem.guest_address.as_u64()
guest_phys_addr: (*crate::vm::GUEST_ADDRESS.get().unwrap()).as_u64()
+ (KVM_32BIT_GAP_START + KVM_32BIT_GAP_SIZE) as u64,
userspace_addr: (mem.host_address as usize + KVM_32BIT_GAP_START + KVM_32BIT_GAP_SIZE)
as u64,
Expand Down
42 changes: 32 additions & 10 deletions src/vm.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ use std::{
num::NonZeroU32,
path::PathBuf,
ptr,
sync::{Arc, Mutex},
sync::{Arc, Mutex, OnceLock},
time::SystemTime,
};

Expand All @@ -15,6 +15,7 @@ use hermit_entry::{
};
use log::{error, warn};
use thiserror::Error;
use uhyve_interface::GuestPhysAddr;

#[cfg(target_arch = "x86_64")]
use crate::arch::x86_64::{
Expand All @@ -29,6 +30,8 @@ use crate::{

pub type HypervisorResult<T> = Result<T, HypervisorError>;

pub static GUEST_ADDRESS: OnceLock<GuestPhysAddr> = OnceLock::new();

#[derive(Error, Debug)]
pub enum LoadKernelError {
#[error(transparent)]
Expand Down Expand Up @@ -75,7 +78,7 @@ pub struct UhyveVm<VCpuType: VirtualCPU = VcpuDefault> {
offset: u64,
entry_point: u64,
stack_address: u64,
guest_address: u64,
guest_address: GuestPhysAddr,
pub mem: Arc<MmapMemory>,
num_cpus: u32,
path: PathBuf,
Expand All @@ -90,21 +93,40 @@ pub struct UhyveVm<VCpuType: VirtualCPU = VcpuDefault> {
impl<VCpuType: VirtualCPU> UhyveVm<VCpuType> {
pub fn new(kernel_path: PathBuf, params: Params) -> HypervisorResult<UhyveVm<VCpuType>> {
let memory_size = params.memory_size.get();
let guest_address = *GUEST_ADDRESS.get_or_init(|| arch::RAM_START);

// TODO: Move functionality to load_kernel. We don't know whether the binaries are relocatable yet.
// TODO: Use random address instead of arch::RAM_START here.
#[cfg(target_os = "linux")]
#[cfg(target_arch = "x86_64")]
let mem = MmapMemory::new(0, memory_size, arch::RAM_START, params.thp, params.ksm);
let mem = MmapMemory::new(
0,
memory_size,
guest_address,
params.thp,
params.ksm,
);

// TODO: guest_address is only taken into account on Linux platforms.
// TODO: Before changing this, fix init_guest_mem in `src/arch/aarch64/mod.rs`
#[cfg(target_os = "linux")]
#[cfg(not(target_arch = "x86_64"))]
let mem = MmapMemory::new(0, memory_size, arch::RAM_START, params.thp, params.ksm);
let mem = MmapMemory::new(
0,
memory_size,
guest_address,
params.thp,
params.ksm,
);

#[cfg(not(target_os = "linux"))]
let mem = MmapMemory::new(0, memory_size, arch::RAM_START, false, false);
let mem = MmapMemory::new(
0,
memory_size,
guest_address,
false,
false,
);

// create virtio interface
// TODO: Remove allow once fixed:
Expand All @@ -130,7 +152,7 @@ impl<VCpuType: VirtualCPU> UhyveVm<VCpuType> {
offset: 0,
entry_point: 0,
stack_address: 0,
guest_address: mem.guest_address.as_u64(),
guest_address,
mem: mem.into(),
num_cpus: cpu_count,
path: kernel_path,
Expand Down Expand Up @@ -165,7 +187,7 @@ impl<VCpuType: VirtualCPU> UhyveVm<VCpuType> {
}

pub fn guest_address(&self) -> u64 {
self.guest_address
self.guest_address.as_u64()
}

/// Returns the number of cores for the vm.
Expand All @@ -188,7 +210,7 @@ impl<VCpuType: VirtualCPU> UhyveVm<VCpuType> {
unsafe { self.mem.as_slice_mut() } // slice only lives during this fn call
.try_into()
.expect("Guest memory is not large enough for pagetables"),
self.guest_address,
self.mem.guest_address.as_u64()
);
}

Expand All @@ -201,7 +223,7 @@ impl<VCpuType: VirtualCPU> UhyveVm<VCpuType> {
// TODO: should be a random start address, if we have a relocatable executable
let kernel_start_address = object
.start_addr()
.unwrap_or(self.mem.guest_address.as_u64() + kernel_offset as u64)
.unwrap_or_else(|| self.mem.guest_address.as_u64() + kernel_offset as u64)
as usize;
let kernel_end_address = kernel_start_address + object.mem_size();
self.offset = kernel_start_address as u64;
Expand Down Expand Up @@ -261,7 +283,7 @@ impl<VCpuType: VirtualCPU> fmt::Debug for UhyveVm<VCpuType> {
f.debug_struct("UhyveVm")
.field("entry_point", &self.entry_point)
.field("stack_address", &self.stack_address)
.field("guest_address", &self.guest_address)
.field("guest_address", &self.guest_address.as_u64())
.field("mem", &self.mem)
.field("num_cpus", &self.num_cpus)
.field("path", &self.path)
Expand Down

0 comments on commit 3775efa

Please sign in to comment.