diff --git a/src/arch/aarch64/mod.rs b/src/arch/aarch64/mod.rs index 8576a8b2..38e430c7 100644 --- a/src/arch/aarch64/mod.rs +++ b/src/arch/aarch64/mod.rs @@ -120,6 +120,8 @@ pub fn virt_to_phys( return Err(PagetableError::InvalidAddress); } + let guest_address = (*crate::vm::GUEST_ADDRESS.get().unwrap()).as_u64(); + // Assumptions: // - We use 4KiB granule // - We use maximum VA length @@ -132,7 +134,7 @@ pub fn virt_to_phys( // - We are page_aligned, and thus also PageTableEntry aligned. let mut pagetable: &[PageTableEntry] = unsafe { std::mem::transmute::<&[u8], &[PageTableEntry]>( - mem.slice_at(mem.guest_address, PAGE_SIZE).unwrap(), + mem.slice_at(guest_address, PAGE_SIZE).unwrap(), ) }; // TODO: Depending on the virtual address length and granule (defined in TCR register by TG and TxSZ), we could reduce the number of pagetable walks. Hermit doesn't do this at the moment. diff --git a/src/arch/x86_64/mod.rs b/src/arch/x86_64/mod.rs index 4d0a13d5..8674e73e 100644 --- a/src/arch/x86_64/mod.rs +++ b/src/arch/x86_64/mod.rs @@ -121,8 +121,9 @@ pub fn virt_to_phys( /// Number of bits of the index in each table (PML4, PDPT, PDT, PGT). pub const PAGE_MAP_BITS: usize = 9; + let guest_address = (*crate::vm::GUEST_ADDRESS.get().unwrap()).as_u64(); let mut page_table = unsafe { - (mem.host_address(GuestPhysAddr::new(mem.guest_address.as_u64() + PML4_OFFSET)) + (mem.host_address(GuestPhysAddr::new(guest_address + PML4_OFFSET)) .unwrap() as *mut PageTable) .as_mut() } @@ -256,7 +257,7 @@ mod tests { ); init_guest_mem( unsafe { mem.as_slice_mut() }.try_into().unwrap(), - guest_address, + guest_address ); // Get the address of the first entry in PML4 (the address of the PML4 itself) diff --git a/src/arch/x86_64/paging/mod.rs b/src/arch/x86_64/paging/mod.rs index 1502b1ad..aeeb03bb 100644 --- a/src/arch/x86_64/paging/mod.rs +++ b/src/arch/x86_64/paging/mod.rs @@ -99,10 +99,7 @@ mod tests { let mut mem: Vec = vec![0; MIN_PHYSMEM_SIZE]; // This will return a pagetable setup that we will check. - initialize_pagetables( - (&mut mem[0..MIN_PHYSMEM_SIZE]).try_into().unwrap(), - guest_address, - ); + initialize_pagetables((&mut mem[0..MIN_PHYSMEM_SIZE]).try_into().unwrap(), guest_address); // Check PDPTE address let addr_pdpte = u64::from_le_bytes( diff --git a/src/linux/x86_64/kvm_cpu.rs b/src/linux/x86_64/kvm_cpu.rs index a1285a64..64c14875 100644 --- a/src/linux/x86_64/kvm_cpu.rs +++ b/src/linux/x86_64/kvm_cpu.rs @@ -50,7 +50,7 @@ pub fn initialize_kvm(mem: &MmapMemory, use_pit: bool) -> HypervisorResult<()> { slot: 1, flags: mem.flags, memory_size: (mem.memory_size - KVM_32BIT_GAP_START - KVM_32BIT_GAP_SIZE) as u64, - guest_phys_addr: mem.guest_address.as_u64() + guest_phys_addr: (*crate::vm::GUEST_ADDRESS.get().unwrap()).as_u64() + (KVM_32BIT_GAP_START + KVM_32BIT_GAP_SIZE) as u64, userspace_addr: (mem.host_address as usize + KVM_32BIT_GAP_START + KVM_32BIT_GAP_SIZE) as u64, diff --git a/src/vm.rs b/src/vm.rs index 1a9bfd48..fa2f463e 100644 --- a/src/vm.rs +++ b/src/vm.rs @@ -5,7 +5,7 @@ use std::{ num::NonZeroU32, path::PathBuf, ptr, - sync::{Arc, Mutex}, + sync::{Arc, Mutex, OnceLock}, time::SystemTime, }; @@ -15,6 +15,7 @@ use hermit_entry::{ }; use log::{error, warn}; use thiserror::Error; +use uhyve_interface::GuestPhysAddr; #[cfg(target_arch = "x86_64")] use crate::arch::x86_64::{ @@ -29,6 +30,8 @@ use crate::{ pub type HypervisorResult = Result; +pub static GUEST_ADDRESS: OnceLock = OnceLock::new(); + #[derive(Error, Debug)] pub enum LoadKernelError { #[error(transparent)] @@ -75,7 +78,7 @@ pub struct UhyveVm { offset: u64, entry_point: u64, stack_address: u64, - guest_address: u64, + guest_address: GuestPhysAddr, pub mem: Arc, num_cpus: u32, path: PathBuf, @@ -90,21 +93,40 @@ pub struct UhyveVm { impl UhyveVm { pub fn new(kernel_path: PathBuf, params: Params) -> HypervisorResult> { let memory_size = params.memory_size.get(); + let guest_address = *GUEST_ADDRESS.get_or_init(|| arch::RAM_START); // TODO: Move functionality to load_kernel. We don't know whether the binaries are relocatable yet. // TODO: Use random address instead of arch::RAM_START here. #[cfg(target_os = "linux")] #[cfg(target_arch = "x86_64")] - let mem = MmapMemory::new(0, memory_size, arch::RAM_START, params.thp, params.ksm); + let mem = MmapMemory::new( + 0, + memory_size, + guest_address, + params.thp, + params.ksm, + ); // TODO: guest_address is only taken into account on Linux platforms. // TODO: Before changing this, fix init_guest_mem in `src/arch/aarch64/mod.rs` #[cfg(target_os = "linux")] #[cfg(not(target_arch = "x86_64"))] - let mem = MmapMemory::new(0, memory_size, arch::RAM_START, params.thp, params.ksm); + let mem = MmapMemory::new( + 0, + memory_size, + guest_address, + params.thp, + params.ksm, + ); #[cfg(not(target_os = "linux"))] - let mem = MmapMemory::new(0, memory_size, arch::RAM_START, false, false); + let mem = MmapMemory::new( + 0, + memory_size, + guest_address, + false, + false, + ); // create virtio interface // TODO: Remove allow once fixed: @@ -130,7 +152,7 @@ impl UhyveVm { offset: 0, entry_point: 0, stack_address: 0, - guest_address: mem.guest_address.as_u64(), + guest_address, mem: mem.into(), num_cpus: cpu_count, path: kernel_path, @@ -165,7 +187,7 @@ impl UhyveVm { } pub fn guest_address(&self) -> u64 { - self.guest_address + self.guest_address.as_u64() } /// Returns the number of cores for the vm. @@ -188,7 +210,7 @@ impl UhyveVm { unsafe { self.mem.as_slice_mut() } // slice only lives during this fn call .try_into() .expect("Guest memory is not large enough for pagetables"), - self.guest_address, + self.mem.guest_address.as_u64() ); } @@ -201,7 +223,7 @@ impl UhyveVm { // TODO: should be a random start address, if we have a relocatable executable let kernel_start_address = object .start_addr() - .unwrap_or(self.mem.guest_address.as_u64() + kernel_offset as u64) + .unwrap_or_else(|| self.mem.guest_address.as_u64() + kernel_offset as u64) as usize; let kernel_end_address = kernel_start_address + object.mem_size(); self.offset = kernel_start_address as u64; @@ -261,7 +283,7 @@ impl fmt::Debug for UhyveVm { f.debug_struct("UhyveVm") .field("entry_point", &self.entry_point) .field("stack_address", &self.stack_address) - .field("guest_address", &self.guest_address) + .field("guest_address", &self.guest_address.as_u64()) .field("mem", &self.mem) .field("num_cpus", &self.num_cpus) .field("path", &self.path)