Skip to content

Commit 9448b19

Browse files
n0toosejounathaen
andcommitted
Implement ASLR for uhyve.
- Kernel is loaded to a random physical address - Pagetables are created for the kernel region instead of just the first gigabyte Fixes #719. Co-authored-by: Jonathan <github@jonathanklimt.de>
1 parent 0bfe983 commit 9448b19

File tree

16 files changed

+620
-244
lines changed

16 files changed

+620
-244
lines changed

Cargo.lock

Lines changed: 4 additions & 2 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

Cargo.toml

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -36,10 +36,12 @@ path = "benches/benchmarks.rs"
3636
harness = false
3737

3838
[features]
39-
default = []
39+
default = ["aslr"]
40+
aslr = ["dep:rand"]
4041
instrument = ["rftrace", "rftrace-frontend"]
4142

4243
[dependencies]
44+
align-address = "0.3.0"
4345
byte-unit = { version = "5", features = ["byte"] }
4446
clap = { version = "4.5", features = ["derive", "env"] }
4547
nix = { version = "0.29", features = ["mman", "pthread", "signal"] }
@@ -48,7 +50,7 @@ either = "1.13"
4850
env_logger = "0.11"
4951
gdbstub = "0.7"
5052
gdbstub_arch = "0.3"
51-
hermit-entry = { version = "0.10", features = ["loader"] }
53+
hermit-entry = { version = "0.10.3", features = ["loader"] }
5254
libc = "0.2"
5355
log = "0.4"
5456
mac_address = "1.1"
@@ -59,6 +61,7 @@ uhyve-interface = { version = "0.1.1", path = "uhyve-interface", features = ["st
5961
virtio-bindings = { version = "0.2", features = ["virtio-v4_14_0"] }
6062
rftrace = { version = "0.1", optional = true }
6163
rftrace-frontend = { version = "0.2", optional = true }
64+
rand = { version = "0.8.5", optional = true }
6265
shell-words = "1"
6366
sysinfo = { version = "0.33.1", default-features = false, features = ["system"] }
6467
vm-fdt = "0.3"

src/arch/aarch64/mod.rs

Lines changed: 120 additions & 55 deletions
Original file line numberDiff line numberDiff line change
@@ -1,20 +1,29 @@
11
use std::mem::size_of;
22

3+
use align_address::Align;
34
use bitflags::bitflags;
45
use uhyve_interface::{GuestPhysAddr, GuestVirtAddr};
56

67
use crate::{
7-
consts::{BOOT_INFO_ADDR, BOOT_PGT},
8+
consts::{PAGETABLES_END, PAGETABLES_OFFSET, PGT_OFFSET},
89
mem::MmapMemory,
9-
paging::PagetableError,
10+
paging::{BumpAllocator, PagetableError},
1011
};
1112

12-
pub const RAM_START: GuestPhysAddr = GuestPhysAddr::new(0x00);
13+
pub(crate) const RAM_START: GuestPhysAddr = GuestPhysAddr::new(0x00);
1314

14-
pub const PT_DEVICE: u64 = 0x707;
15-
pub const PT_PT: u64 = 0x713;
16-
pub const PT_MEM: u64 = 0x713;
17-
pub const PT_MEM_CD: u64 = 0x70F;
15+
const SIZE_4KIB: u64 = 0x1000;
16+
17+
// PageTableEntry Flags
18+
/// Present + 4KiB + device memory + inner_sharable + accessed
19+
pub const PT_DEVICE: u64 = 0b11100000111;
20+
/// Present + 4KiB + normal + inner_sharable + accessed
21+
pub const PT_PT: u64 = 0b11100010011;
22+
/// Present + 4KiB + normal + inner_sharable + accessed
23+
pub const PT_MEM: u64 = 0b11100010011;
24+
/// Present + 4KiB + device + inner_sharable + accessed
25+
pub const PT_MEM_CD: u64 = 0b11100001111;
26+
/// Self reference flag
1827
pub const PT_SELF: u64 = 1 << 55;
1928

2029
/*
@@ -115,7 +124,7 @@ fn is_valid_address(virtual_address: GuestVirtAddr) -> bool {
115124
pub fn virt_to_phys(
116125
addr: GuestVirtAddr,
117126
mem: &MmapMemory,
118-
pagetable_l0: GuestPhysAddr,
127+
pgt: GuestPhysAddr,
119128
) -> Result<GuestPhysAddr, PagetableError> {
120129
if !is_valid_address(addr) {
121130
return Err(PagetableError::InvalidAddress);
@@ -132,9 +141,7 @@ pub fn virt_to_phys(
132141
// - Our indices can't be larger than 512, so we stay in the borders of the page.
133142
// - We are page_aligned, and thus also PageTableEntry aligned.
134143
let mut pagetable: &[PageTableEntry] = unsafe {
135-
std::mem::transmute::<&[u8], &[PageTableEntry]>(
136-
mem.slice_at(pagetable_l0, PAGE_SIZE).unwrap(),
137-
)
144+
std::mem::transmute::<&[u8], &[PageTableEntry]>(mem.slice_at(pgt, PAGE_SIZE).unwrap())
138145
};
139146
// TODO: Depending on the virtual address length and granule (defined in TCR register by TG and TxSZ), we could reduce the number of pagetable walks. Hermit doesn't do this at the moment.
140147
for level in 0..3 {
@@ -155,71 +162,129 @@ pub fn virt_to_phys(
155162
Ok(pte.address())
156163
}
157164

158-
pub fn init_guest_mem(mem: &mut [u8]) {
165+
pub fn init_guest_mem(
166+
mem: &mut [u8],
167+
guest_address: GuestPhysAddr,
168+
length: u64,
169+
_legacy_mapping: bool,
170+
) {
171+
warn!("aarch64 pagetable initialization is untested!");
172+
159173
let mem_addr = std::ptr::addr_of_mut!(mem[0]);
160174

161-
assert!(mem.len() >= BOOT_PGT.as_u64() as usize + 512 * size_of::<u64>());
162-
let pgt_slice = unsafe {
163-
std::slice::from_raw_parts_mut(mem_addr.offset(BOOT_PGT.as_u64() as isize) as *mut u64, 512)
164-
};
165-
pgt_slice.fill(0);
166-
pgt_slice[0] = BOOT_PGT.as_u64() + 0x1000 + PT_PT;
167-
pgt_slice[511] = BOOT_PGT.as_u64() + PT_PT + PT_SELF;
175+
assert!(mem.len() >= PGT_OFFSET as usize + 512 * size_of::<u64>());
168176

169-
assert!(mem.len() >= BOOT_PGT.as_u64() as usize + 0x1000 + 512 * size_of::<u64>());
170177
let pgt_slice = unsafe {
171-
std::slice::from_raw_parts_mut(
172-
mem_addr.offset(BOOT_PGT.as_u64() as isize + 0x1000) as *mut u64,
173-
512,
174-
)
178+
std::slice::from_raw_parts_mut(mem_addr.offset(PGT_OFFSET as isize) as *mut u64, 512)
175179
};
176180
pgt_slice.fill(0);
177-
pgt_slice[0] = BOOT_PGT.as_u64() + 0x2000 + PT_PT;
181+
pgt_slice[511] = (guest_address + PGT_OFFSET) | PT_PT | PT_SELF;
178182

179-
assert!(mem.len() >= BOOT_PGT.as_u64() as usize + 0x2000 + 512 * size_of::<u64>());
180-
let pgt_slice = unsafe {
181-
std::slice::from_raw_parts_mut(
182-
mem_addr.offset(BOOT_PGT.as_u64() as isize + 0x2000) as *mut u64,
183-
512,
184-
)
185-
};
186-
pgt_slice.fill(0);
187-
pgt_slice[0] = BOOT_PGT.as_u64() + 0x3000 + PT_PT;
188-
pgt_slice[1] = BOOT_PGT.as_u64() + 0x4000 + PT_PT;
189-
pgt_slice[2] = BOOT_PGT.as_u64() + 0x5000 + PT_PT;
183+
let mut boot_frame_allocator = BumpAllocator::<SIZE_4KIB>::new(
184+
guest_address + PAGETABLES_OFFSET,
185+
(PAGETABLES_END - PAGETABLES_OFFSET) / SIZE_4KIB,
186+
);
190187

191-
assert!(mem.len() >= BOOT_PGT.as_u64() as usize + 0x3000 + 512 * size_of::<u64>());
192-
let pgt_slice = unsafe {
188+
// Hypercalls are MMIO reads/writes in the lowest 4KiB of address space. Thus, we need to provide pagetable entries for this region.
189+
let pgd0_addr = boot_frame_allocator.allocate().unwrap().as_u64();
190+
pgt_slice[0] = pgd0_addr | PT_PT;
191+
let pgd0_slice = unsafe {
193192
std::slice::from_raw_parts_mut(
194-
mem_addr.offset(BOOT_PGT.as_u64() as isize + 0x3000) as *mut u64,
193+
mem_addr.offset((pgd0_addr - guest_address.as_u64()) as isize) as *mut u64,
195194
512,
196195
)
197196
};
198-
pgt_slice.fill(0);
199-
// map Uhyve ports into the virtual address space
200-
pgt_slice[0] = PT_MEM_CD;
201-
// map BootInfo into the virtual address space
202-
pgt_slice[BOOT_INFO_ADDR.as_u64() as usize / PAGE_SIZE] = BOOT_INFO_ADDR.as_u64() + PT_MEM;
197+
pgd0_slice.fill(0);
198+
let pud0_addr = boot_frame_allocator.allocate().unwrap().as_u64();
199+
pgd0_slice[0] = pud0_addr | PT_PT;
203200

204-
assert!(mem.len() >= BOOT_PGT.as_u64() as usize + 0x4000 + 512 * size_of::<u64>());
205-
let pgt_slice = unsafe {
201+
let pud0_slice = unsafe {
206202
std::slice::from_raw_parts_mut(
207-
mem_addr.offset(BOOT_PGT.as_u64() as isize + 0x4000) as *mut u64,
203+
mem_addr.offset((pud0_addr - guest_address.as_u64()) as isize) as *mut u64,
208204
512,
209205
)
210206
};
211-
for (idx, i) in pgt_slice.iter_mut().enumerate() {
212-
*i = 0x200000u64 + (idx * PAGE_SIZE) as u64 + PT_MEM;
213-
}
207+
pud0_slice.fill(0);
208+
let pmd0_addr = boot_frame_allocator.allocate().unwrap().as_u64();
209+
pud0_slice[0] = pmd0_addr | PT_PT;
214210

215-
assert!(mem.len() >= BOOT_PGT.as_u64() as usize + 0x5000 + 512 * size_of::<u64>());
216-
let pgt_slice = unsafe {
211+
let pmd0_slice = unsafe {
217212
std::slice::from_raw_parts_mut(
218-
mem_addr.offset(BOOT_PGT.as_u64() as isize + 0x5000) as *mut u64,
213+
mem_addr.offset((pmd0_addr - guest_address.as_u64()) as isize) as *mut u64,
219214
512,
220215
)
221216
};
222-
for (idx, i) in pgt_slice.iter_mut().enumerate() {
223-
*i = 0x400000u64 + (idx * PAGE_SIZE) as u64 + PT_MEM;
217+
pmd0_slice.fill(0);
218+
// Hypercall/IO mapping
219+
pmd0_slice[0] = PT_MEM;
220+
221+
for frame_addr in (guest_address.align_down(SIZE_4KIB).as_u64()
222+
..(guest_address + length).align_up(SIZE_4KIB).as_u64())
223+
.step_by(SIZE_4KIB as usize)
224+
{
225+
let idx_l4 = (frame_addr as usize / (0x80_0000_0000)) & (0xFFF);
226+
let idx_l3 = (frame_addr as usize / (0x4000_0000)) & (0xFFF);
227+
let idx_l2 = (frame_addr as usize / (0x20_0000)) & (0xFFF);
228+
let idx_l1 = (frame_addr as usize / (0x1000)) & (0xFFF);
229+
debug!("mapping frame {frame_addr:x} to pagetable {idx_l4}-{idx_l3}-{idx_l2}-{idx_l1}");
230+
231+
let (pgd_addr, new) = if pgt_slice[idx_l4] == 0 {
232+
(boot_frame_allocator.allocate().unwrap() | PT_PT, true)
233+
} else {
234+
(
235+
PageTableEntry::from(pgt_slice[idx_l4]).address().as_u64(),
236+
false,
237+
)
238+
};
239+
let pgd_slice = unsafe {
240+
std::slice::from_raw_parts_mut(
241+
mem_addr.offset((pgd_addr - guest_address.as_u64()) as isize) as *mut u64,
242+
512,
243+
)
244+
};
245+
if new {
246+
pgd_slice.fill(0);
247+
pgt_slice[idx_l4] = pgd_addr | PT_PT;
248+
}
249+
250+
let (pud_addr, new) = if pgd_slice[idx_l3] == 0 {
251+
(boot_frame_allocator.allocate().unwrap() | PT_PT, true)
252+
} else {
253+
(
254+
PageTableEntry::from(pgd_slice[idx_l3]).address().as_u64(),
255+
false,
256+
)
257+
};
258+
let pud_slice = unsafe {
259+
std::slice::from_raw_parts_mut(
260+
mem_addr.offset((pud_addr - guest_address.as_u64()) as isize) as *mut u64,
261+
512,
262+
)
263+
};
264+
if new {
265+
pud_slice.fill(0);
266+
pgd_slice[idx_l3] = pud_addr | PT_PT;
267+
}
268+
269+
let (pmd_addr, new) = if pud_slice[idx_l2] == 0 {
270+
(boot_frame_allocator.allocate().unwrap() | PT_PT, true)
271+
} else {
272+
(
273+
PageTableEntry::from(pud_slice[idx_l2]).address().as_u64(),
274+
false,
275+
)
276+
};
277+
let pmd_slice = unsafe {
278+
std::slice::from_raw_parts_mut(
279+
mem_addr.offset((pmd_addr - guest_address.as_u64()) as isize) as *mut u64,
280+
512,
281+
)
282+
};
283+
if new {
284+
pmd_slice.fill(0);
285+
pud_slice[idx_l2] = pmd_addr | PT_PT;
286+
}
287+
288+
pmd_slice[idx_l1] = frame_addr | PT_MEM
224289
}
225290
}

src/arch/x86_64/mod.rs

Lines changed: 36 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ pub const RAM_START: GuestPhysAddr = GuestPhysAddr::new(0x00);
1616
pub fn virt_to_phys(
1717
addr: GuestVirtAddr,
1818
mem: &MmapMemory,
19-
pagetable_l0: GuestPhysAddr,
19+
pml4: GuestPhysAddr,
2020
) -> Result<GuestPhysAddr, PagetableError> {
2121
/// Number of Offset bits of a virtual address for a 4 KiB page, which are shifted away to get its Page Frame Number (PFN).
2222
pub const PAGE_BITS: u64 = 12;
@@ -25,7 +25,7 @@ pub fn virt_to_phys(
2525
pub const PAGE_MAP_BITS: usize = 9;
2626

2727
let mut page_table =
28-
unsafe { (mem.host_address(pagetable_l0).unwrap() as *mut PageTable).as_mut() }.unwrap();
28+
unsafe { (mem.host_address(pml4).unwrap() as *mut PageTable).as_mut() }.unwrap();
2929
let mut page_bits = 39;
3030
let mut entry = PageTableEntry::new();
3131

@@ -53,17 +53,22 @@ pub fn virt_to_phys(
5353
Ok((entry.addr() + (addr.as_u64() & !((!0u64) << PAGE_BITS))).into())
5454
}
5555

56-
pub fn init_guest_mem(mem: &mut [u8]) {
56+
pub fn init_guest_mem(
57+
mem: &mut [u8],
58+
guest_address: GuestPhysAddr,
59+
length: u64,
60+
legacy_mapping: bool,
61+
) {
5762
// TODO: we should maybe return an error on failure (e.g., the memory is too small)
58-
initialize_pagetables(mem);
63+
initialize_pagetables(mem, guest_address, length, legacy_mapping);
5964
}
6065

6166
#[cfg(test)]
6267
mod tests {
6368
use x86_64::structures::paging::PageTableFlags;
6469

6570
use super::*;
66-
use crate::consts::{BOOT_PDE, BOOT_PDPTE, BOOT_PML4};
71+
use crate::consts::{MIN_PHYSMEM_SIZE, PAGETABLES_END, PAGETABLES_OFFSET, PML4_OFFSET};
6772

6873
#[test]
6974
fn test_virt_to_phys() {
@@ -72,38 +77,45 @@ mod tests {
7277
.is_test(true)
7378
.try_init();
7479

75-
let mem = MmapMemory::new(
76-
0,
77-
align_up!(paging::MIN_PHYSMEM_SIZE * 2, 0x20_0000),
78-
GuestPhysAddr::zero(),
79-
true,
80-
true,
80+
let guest_address = GuestPhysAddr::new(0x11111000);
81+
82+
let mem = MmapMemory::new(0, MIN_PHYSMEM_SIZE * 2, guest_address, true, true);
83+
println!("mmap memory created {mem:x?}");
84+
85+
init_guest_mem(
86+
unsafe { mem.as_slice_mut() }.try_into().unwrap(),
87+
guest_address,
88+
MIN_PHYSMEM_SIZE as u64 * 2,
89+
false,
8190
);
82-
println!("mmap memory created {mem:?}");
83-
initialize_pagetables(unsafe { mem.as_slice_mut() }.try_into().unwrap());
8491

8592
// Get the address of the first entry in PML4 (the address of the PML4 itself)
8693
let virt_addr = GuestVirtAddr::new(0xFFFFFFFFFFFFF000);
87-
let p_addr = virt_to_phys(virt_addr, &mem, BOOT_PML4).unwrap();
88-
assert_eq!(p_addr, BOOT_PML4);
94+
let p_addr = virt_to_phys(virt_addr, &mem, guest_address + PML4_OFFSET).unwrap();
95+
assert_eq!(p_addr, guest_address + PML4_OFFSET);
8996

9097
// The last entry on the PML4 is the address of the PML4 with flags
9198
let virt_addr = GuestVirtAddr::new(0xFFFFFFFFFFFFF000 | (4096 - 8));
92-
let p_addr = virt_to_phys(virt_addr, &mem, BOOT_PML4).unwrap();
99+
let p_addr = virt_to_phys(virt_addr, &mem, guest_address + PML4_OFFSET).unwrap();
93100
assert_eq!(
94101
mem.read::<u64>(p_addr).unwrap(),
95-
BOOT_PML4.as_u64() | (PageTableFlags::PRESENT | PageTableFlags::WRITABLE).bits()
102+
(guest_address + PML4_OFFSET).as_u64()
103+
| (PageTableFlags::PRESENT | PageTableFlags::WRITABLE).bits()
96104
);
97105

98106
// the first entry on the 3rd level entry in the pagetables is the address of the boot pdpte
99107
let virt_addr = GuestVirtAddr::new(0xFFFFFFFFFFE00000);
100-
let p_addr = virt_to_phys(virt_addr, &mem, BOOT_PML4).unwrap();
101-
assert_eq!(p_addr, BOOT_PDPTE);
102-
103-
// the first entry on the 2rd level entry in the pagetables is the address of the boot pde
104-
let virt_addr = GuestVirtAddr::new(0xFFFFFFFFC0000000);
105-
let p_addr = virt_to_phys(virt_addr, &mem, BOOT_PML4).unwrap();
106-
assert_eq!(p_addr, BOOT_PDE);
108+
let p_addr = virt_to_phys(virt_addr, &mem, guest_address + PML4_OFFSET).unwrap();
109+
assert!(p_addr.as_u64() - guest_address.as_u64() >= PAGETABLES_OFFSET);
110+
assert!(p_addr.as_u64() - guest_address.as_u64() <= PAGETABLES_END);
111+
112+
// the idx2 entry on the 2rd level entry in the pagetables is the address of the boot pde
113+
let idx2 = GuestVirtAddr::new(guest_address.as_u64()).p2_index();
114+
let virt_addr = GuestVirtAddr::new(0xFFFFFFFFC0000000)
115+
+ u64::from(idx2) * size_of::<PageTableEntry>() as u64;
116+
let p_addr = virt_to_phys(virt_addr, &mem, guest_address + PML4_OFFSET).unwrap();
117+
assert!(p_addr.as_u64() - guest_address.as_u64() >= PAGETABLES_OFFSET);
118+
assert!(p_addr.as_u64() - guest_address.as_u64() <= PAGETABLES_END);
107119
// That address points to a huge page
108120
assert!(
109121
PageTableFlags::from_bits_truncate(mem.read::<u64>(p_addr).unwrap()).contains(

0 commit comments

Comments
 (0)