1
1
use std:: mem:: size_of;
2
2
3
+ use align_address:: Align ;
3
4
use bitflags:: bitflags;
4
5
use uhyve_interface:: { GuestPhysAddr , GuestVirtAddr } ;
5
6
6
7
use crate :: {
7
- consts:: { BOOT_INFO_ADDR , BOOT_PGT } ,
8
+ consts:: { PAGETABLES_END , PAGETABLES_OFFSET , PGT_OFFSET } ,
8
9
mem:: MmapMemory ,
9
- paging:: PagetableError ,
10
+ paging:: { BumpAllocator , PagetableError } ,
10
11
} ;
11
12
12
- pub const RAM_START : GuestPhysAddr = GuestPhysAddr :: new ( 0x00 ) ;
13
+ pub ( crate ) const RAM_START : GuestPhysAddr = GuestPhysAddr :: new ( 0x00 ) ;
13
14
14
- pub const PT_DEVICE : u64 = 0x707 ;
15
- pub const PT_PT : u64 = 0x713 ;
16
- pub const PT_MEM : u64 = 0x713 ;
17
- pub const PT_MEM_CD : u64 = 0x70F ;
15
+ const SIZE_4KIB : u64 = 0x1000 ;
16
+
17
+ // PageTableEntry Flags
18
+ /// Present + 4KiB + device memory + inner_sharable + accessed
19
+ pub const PT_DEVICE : u64 = 0b11100000111 ;
20
+ /// Present + 4KiB + normal + inner_sharable + accessed
21
+ pub const PT_PT : u64 = 0b11100010011 ;
22
+ /// Present + 4KiB + normal + inner_sharable + accessed
23
+ pub const PT_MEM : u64 = 0b11100010011 ;
24
+ /// Present + 4KiB + device + inner_sharable + accessed
25
+ pub const PT_MEM_CD : u64 = 0b11100001111 ;
26
+ /// Self reference flag
18
27
pub const PT_SELF : u64 = 1 << 55 ;
19
28
20
29
/*
@@ -115,7 +124,7 @@ fn is_valid_address(virtual_address: GuestVirtAddr) -> bool {
115
124
pub fn virt_to_phys (
116
125
addr : GuestVirtAddr ,
117
126
mem : & MmapMemory ,
118
- pagetable_l0 : GuestPhysAddr ,
127
+ pgt : GuestPhysAddr ,
119
128
) -> Result < GuestPhysAddr , PagetableError > {
120
129
if !is_valid_address ( addr) {
121
130
return Err ( PagetableError :: InvalidAddress ) ;
@@ -132,9 +141,7 @@ pub fn virt_to_phys(
132
141
// - Our indices can't be larger than 512, so we stay in the borders of the page.
133
142
// - We are page_aligned, and thus also PageTableEntry aligned.
134
143
let mut pagetable: & [ PageTableEntry ] = unsafe {
135
- std:: mem:: transmute :: < & [ u8 ] , & [ PageTableEntry ] > (
136
- mem. slice_at ( pagetable_l0, PAGE_SIZE ) . unwrap ( ) ,
137
- )
144
+ std:: mem:: transmute :: < & [ u8 ] , & [ PageTableEntry ] > ( mem. slice_at ( pgt, PAGE_SIZE ) . unwrap ( ) )
138
145
} ;
139
146
// TODO: Depending on the virtual address length and granule (defined in TCR register by TG and TxSZ), we could reduce the number of pagetable walks. Hermit doesn't do this at the moment.
140
147
for level in 0 ..3 {
@@ -155,71 +162,129 @@ pub fn virt_to_phys(
155
162
Ok ( pte. address ( ) )
156
163
}
157
164
158
- pub fn init_guest_mem ( mem : & mut [ u8 ] ) {
165
+ pub fn init_guest_mem (
166
+ mem : & mut [ u8 ] ,
167
+ guest_address : GuestPhysAddr ,
168
+ length : u64 ,
169
+ _legacy_mapping : bool ,
170
+ ) {
171
+ warn ! ( "aarch64 pagetable initialization is untested!" ) ;
172
+
159
173
let mem_addr = std:: ptr:: addr_of_mut!( mem[ 0 ] ) ;
160
174
161
- assert ! ( mem. len( ) >= BOOT_PGT . as_u64( ) as usize + 512 * size_of:: <u64 >( ) ) ;
162
- let pgt_slice = unsafe {
163
- std:: slice:: from_raw_parts_mut ( mem_addr. offset ( BOOT_PGT . as_u64 ( ) as isize ) as * mut u64 , 512 )
164
- } ;
165
- pgt_slice. fill ( 0 ) ;
166
- pgt_slice[ 0 ] = BOOT_PGT . as_u64 ( ) + 0x1000 + PT_PT ;
167
- pgt_slice[ 511 ] = BOOT_PGT . as_u64 ( ) + PT_PT + PT_SELF ;
175
+ assert ! ( mem. len( ) >= PGT_OFFSET as usize + 512 * size_of:: <u64 >( ) ) ;
168
176
169
- assert ! ( mem. len( ) >= BOOT_PGT . as_u64( ) as usize + 0x1000 + 512 * size_of:: <u64 >( ) ) ;
170
177
let pgt_slice = unsafe {
171
- std:: slice:: from_raw_parts_mut (
172
- mem_addr. offset ( BOOT_PGT . as_u64 ( ) as isize + 0x1000 ) as * mut u64 ,
173
- 512 ,
174
- )
178
+ std:: slice:: from_raw_parts_mut ( mem_addr. offset ( PGT_OFFSET as isize ) as * mut u64 , 512 )
175
179
} ;
176
180
pgt_slice. fill ( 0 ) ;
177
- pgt_slice[ 0 ] = BOOT_PGT . as_u64 ( ) + 0x2000 + PT_PT ;
181
+ pgt_slice[ 511 ] = ( guest_address + PGT_OFFSET ) | PT_PT | PT_SELF ;
178
182
179
- assert ! ( mem. len( ) >= BOOT_PGT . as_u64( ) as usize + 0x2000 + 512 * size_of:: <u64 >( ) ) ;
180
- let pgt_slice = unsafe {
181
- std:: slice:: from_raw_parts_mut (
182
- mem_addr. offset ( BOOT_PGT . as_u64 ( ) as isize + 0x2000 ) as * mut u64 ,
183
- 512 ,
184
- )
185
- } ;
186
- pgt_slice. fill ( 0 ) ;
187
- pgt_slice[ 0 ] = BOOT_PGT . as_u64 ( ) + 0x3000 + PT_PT ;
188
- pgt_slice[ 1 ] = BOOT_PGT . as_u64 ( ) + 0x4000 + PT_PT ;
189
- pgt_slice[ 2 ] = BOOT_PGT . as_u64 ( ) + 0x5000 + PT_PT ;
183
+ let mut boot_frame_allocator = BumpAllocator :: < SIZE_4KIB > :: new (
184
+ guest_address + PAGETABLES_OFFSET ,
185
+ ( PAGETABLES_END - PAGETABLES_OFFSET ) / SIZE_4KIB ,
186
+ ) ;
190
187
191
- assert ! ( mem. len( ) >= BOOT_PGT . as_u64( ) as usize + 0x3000 + 512 * size_of:: <u64 >( ) ) ;
192
- let pgt_slice = unsafe {
188
+ // Hypercalls are MMIO reads/writes in the lowest 4KiB of address space. Thus, we need to provide pagetable entries for this region.
189
+ let pgd0_addr = boot_frame_allocator. allocate ( ) . unwrap ( ) . as_u64 ( ) ;
190
+ pgt_slice[ 0 ] = pgd0_addr | PT_PT ;
191
+ let pgd0_slice = unsafe {
193
192
std:: slice:: from_raw_parts_mut (
194
- mem_addr. offset ( BOOT_PGT . as_u64 ( ) as isize + 0x3000 ) as * mut u64 ,
193
+ mem_addr. offset ( ( pgd0_addr - guest_address . as_u64 ( ) ) as isize ) as * mut u64 ,
195
194
512 ,
196
195
)
197
196
} ;
198
- pgt_slice. fill ( 0 ) ;
199
- // map Uhyve ports into the virtual address space
200
- pgt_slice[ 0 ] = PT_MEM_CD ;
201
- // map BootInfo into the virtual address space
202
- pgt_slice[ BOOT_INFO_ADDR . as_u64 ( ) as usize / PAGE_SIZE ] = BOOT_INFO_ADDR . as_u64 ( ) + PT_MEM ;
197
+ pgd0_slice. fill ( 0 ) ;
198
+ let pud0_addr = boot_frame_allocator. allocate ( ) . unwrap ( ) . as_u64 ( ) ;
199
+ pgd0_slice[ 0 ] = pud0_addr | PT_PT ;
203
200
204
- assert ! ( mem. len( ) >= BOOT_PGT . as_u64( ) as usize + 0x4000 + 512 * size_of:: <u64 >( ) ) ;
205
- let pgt_slice = unsafe {
201
+ let pud0_slice = unsafe {
206
202
std:: slice:: from_raw_parts_mut (
207
- mem_addr. offset ( BOOT_PGT . as_u64 ( ) as isize + 0x4000 ) as * mut u64 ,
203
+ mem_addr. offset ( ( pud0_addr - guest_address . as_u64 ( ) ) as isize ) as * mut u64 ,
208
204
512 ,
209
205
)
210
206
} ;
211
- for ( idx , i ) in pgt_slice . iter_mut ( ) . enumerate ( ) {
212
- * i = 0x200000u64 + ( idx * PAGE_SIZE ) as u64 + PT_MEM ;
213
- }
207
+ pud0_slice . fill ( 0 ) ;
208
+ let pmd0_addr = boot_frame_allocator . allocate ( ) . unwrap ( ) . as_u64 ( ) ;
209
+ pud0_slice [ 0 ] = pmd0_addr | PT_PT ;
214
210
215
- assert ! ( mem. len( ) >= BOOT_PGT . as_u64( ) as usize + 0x5000 + 512 * size_of:: <u64 >( ) ) ;
216
- let pgt_slice = unsafe {
211
+ let pmd0_slice = unsafe {
217
212
std:: slice:: from_raw_parts_mut (
218
- mem_addr. offset ( BOOT_PGT . as_u64 ( ) as isize + 0x5000 ) as * mut u64 ,
213
+ mem_addr. offset ( ( pmd0_addr - guest_address . as_u64 ( ) ) as isize ) as * mut u64 ,
219
214
512 ,
220
215
)
221
216
} ;
222
- for ( idx, i) in pgt_slice. iter_mut ( ) . enumerate ( ) {
223
- * i = 0x400000u64 + ( idx * PAGE_SIZE ) as u64 + PT_MEM ;
217
+ pmd0_slice. fill ( 0 ) ;
218
+ // Hypercall/IO mapping
219
+ pmd0_slice[ 0 ] = PT_MEM ;
220
+
221
+ for frame_addr in ( guest_address. align_down ( SIZE_4KIB ) . as_u64 ( )
222
+ ..( guest_address + length) . align_up ( SIZE_4KIB ) . as_u64 ( ) )
223
+ . step_by ( SIZE_4KIB as usize )
224
+ {
225
+ let idx_l4 = ( frame_addr as usize / ( 0x80_0000_0000 ) ) & ( 0xFFF ) ;
226
+ let idx_l3 = ( frame_addr as usize / ( 0x4000_0000 ) ) & ( 0xFFF ) ;
227
+ let idx_l2 = ( frame_addr as usize / ( 0x20_0000 ) ) & ( 0xFFF ) ;
228
+ let idx_l1 = ( frame_addr as usize / ( 0x1000 ) ) & ( 0xFFF ) ;
229
+ debug ! ( "mapping frame {frame_addr:x} to pagetable {idx_l4}-{idx_l3}-{idx_l2}-{idx_l1}" ) ;
230
+
231
+ let ( pgd_addr, new) = if pgt_slice[ idx_l4] == 0 {
232
+ ( boot_frame_allocator. allocate ( ) . unwrap ( ) | PT_PT , true )
233
+ } else {
234
+ (
235
+ PageTableEntry :: from ( pgt_slice[ idx_l4] ) . address ( ) . as_u64 ( ) ,
236
+ false ,
237
+ )
238
+ } ;
239
+ let pgd_slice = unsafe {
240
+ std:: slice:: from_raw_parts_mut (
241
+ mem_addr. offset ( ( pgd_addr - guest_address. as_u64 ( ) ) as isize ) as * mut u64 ,
242
+ 512 ,
243
+ )
244
+ } ;
245
+ if new {
246
+ pgd_slice. fill ( 0 ) ;
247
+ pgt_slice[ idx_l4] = pgd_addr | PT_PT ;
248
+ }
249
+
250
+ let ( pud_addr, new) = if pgd_slice[ idx_l3] == 0 {
251
+ ( boot_frame_allocator. allocate ( ) . unwrap ( ) | PT_PT , true )
252
+ } else {
253
+ (
254
+ PageTableEntry :: from ( pgd_slice[ idx_l3] ) . address ( ) . as_u64 ( ) ,
255
+ false ,
256
+ )
257
+ } ;
258
+ let pud_slice = unsafe {
259
+ std:: slice:: from_raw_parts_mut (
260
+ mem_addr. offset ( ( pud_addr - guest_address. as_u64 ( ) ) as isize ) as * mut u64 ,
261
+ 512 ,
262
+ )
263
+ } ;
264
+ if new {
265
+ pud_slice. fill ( 0 ) ;
266
+ pgd_slice[ idx_l3] = pud_addr | PT_PT ;
267
+ }
268
+
269
+ let ( pmd_addr, new) = if pud_slice[ idx_l2] == 0 {
270
+ ( boot_frame_allocator. allocate ( ) . unwrap ( ) | PT_PT , true )
271
+ } else {
272
+ (
273
+ PageTableEntry :: from ( pud_slice[ idx_l2] ) . address ( ) . as_u64 ( ) ,
274
+ false ,
275
+ )
276
+ } ;
277
+ let pmd_slice = unsafe {
278
+ std:: slice:: from_raw_parts_mut (
279
+ mem_addr. offset ( ( pmd_addr - guest_address. as_u64 ( ) ) as isize ) as * mut u64 ,
280
+ 512 ,
281
+ )
282
+ } ;
283
+ if new {
284
+ pmd_slice. fill ( 0 ) ;
285
+ pud_slice[ idx_l2] = pmd_addr | PT_PT ;
286
+ }
287
+
288
+ pmd_slice[ idx_l1] = frame_addr | PT_MEM
224
289
}
225
290
}
0 commit comments