22
22
//! User and kernel stack spaces are split into stacks with the size of [`VIRT_STACK_SIZE`].
23
23
24
24
use alloc:: collections:: BTreeMap ;
25
- use core:: { cmp :: Ordering , ops:: Range } ;
25
+ use core:: ops:: Range ;
26
26
27
27
use hyperion_log:: * ;
28
28
use hyperion_mem:: {
29
- from_higher_half, is_higher_half ,
30
- pmm:: { self , PageFrame } ,
29
+ from_higher_half,
30
+ pmm:: { self } ,
31
31
to_higher_half,
32
32
vmm:: { Handled , NotHandled , PageFaultResult , PageMapImpl , Privilege } ,
33
33
} ;
@@ -36,12 +36,9 @@ use x86_64::{
36
36
instructions:: tlb,
37
37
registers:: control:: { Cr3 , Cr3Flags } ,
38
38
structures:: paging:: {
39
- mapper:: {
40
- MapToError , MappedFrame , MapperFlush , MapperFlushAll , TranslateResult , UnmapError ,
41
- } ,
39
+ mapper:: { MapToError , MappedFrame , MapperFlush , MapperFlushAll } ,
42
40
page_table:: { FrameError , PageTableEntry } ,
43
- MappedPageTable , Mapper , OffsetPageTable , Page , PageSize , PageTable , PageTableFlags ,
44
- PhysFrame , Size1GiB , Size2MiB , Size4KiB , Translate ,
41
+ Page , PageSize , PageTable , PageTableFlags , PhysFrame , Size1GiB , Size2MiB , Size4KiB ,
45
42
} ,
46
43
PhysAddr , VirtAddr ,
47
44
} ;
@@ -141,7 +138,7 @@ fn page_fault_2mib(entry: &mut PageTableEntry, addr: VirtAddr) -> PageFaultResul
141
138
}
142
139
143
140
fn page_fault_4kib ( entry : & mut PageTableEntry , addr : VirtAddr ) -> PageFaultResult {
144
- let mut frame;
141
+ let frame;
145
142
let mut flags = entry. flags ( ) ;
146
143
147
144
if flags. contains ( COW ) {
@@ -172,10 +169,11 @@ fn alloc_table() -> PhysFrame {
172
169
PhysFrame :: < Size4KiB > :: from_start_address ( frame. physical_addr ( ) ) . unwrap ( )
173
170
}
174
171
175
- fn free_table ( f : PhysFrame ) {
172
+ // FIXME: vmm dealloc
173
+ /* fn free_table(f: PhysFrame) {
176
174
let frame = unsafe { PageFrame::new(f.start_address(), 1) };
177
175
pmm::PFA.free(frame)
178
- }
176
+ } */
179
177
180
178
//
181
179
@@ -1033,7 +1031,7 @@ impl LockedPageMap {
1033
1031
} ;
1034
1032
1035
1033
let l1e = & l1[ v_addr. p1_index ( ) ] ;
1036
- let phys = match l1e. frame ( ) {
1034
+ match l1e. frame ( ) {
1037
1035
Ok ( p) => p,
1038
1036
Err ( FrameError :: FrameNotPresent ) => return None ,
1039
1037
Err ( FrameError :: HugeFrame ) => unreachable ! ( "huge page at lvl 1" ) ,
@@ -1311,108 +1309,3 @@ pub enum TryMapError<T: PageSize> {
1311
1309
AlreadyMapped ,
1312
1310
NotMapped ,
1313
1311
}
1314
-
1315
- #[ derive( Debug ) ]
1316
- pub enum TryUnmapError < T : PageSize > {
1317
- Overflow ,
1318
- NotAligned ,
1319
- MapToError ( MapToError < T > ) ,
1320
- WrongSize ,
1321
- AlreadyMapped ,
1322
- }
1323
-
1324
- fn try_map_sized < T > (
1325
- table : & mut OffsetPageTable ,
1326
- start : VirtAddr ,
1327
- end : VirtAddr ,
1328
- p_addr : PhysAddr ,
1329
- flags : PageTableFlags ,
1330
- ) -> Result < ( ) , TryMapError < T > >
1331
- where
1332
- T : PageSize ,
1333
- for < ' a > OffsetPageTable < ' a > : Mapper < T > ,
1334
- {
1335
- let Some ( mapping_end) = start. as_u64 ( ) . checked_add ( T :: SIZE - 1 ) else {
1336
- return Err ( TryMapError :: Overflow ) ;
1337
- } ;
1338
-
1339
- if mapping_end > end. as_u64 ( ) {
1340
- return Err ( TryMapError :: Overflow ) ;
1341
- }
1342
-
1343
- if !start. is_aligned ( T :: SIZE ) || !p_addr. is_aligned ( T :: SIZE ) {
1344
- return Err ( TryMapError :: NotAligned ) ;
1345
- }
1346
-
1347
- let page = Page :: < T > :: containing_address ( start) ;
1348
- let frame = PhysFrame :: < T > :: containing_address ( p_addr) ;
1349
-
1350
- let result = unsafe {
1351
- table. map_to_with_table_flags (
1352
- page,
1353
- frame,
1354
- flags,
1355
- ( flags & ( PageTableFlags :: PRESENT | PageTableFlags :: USER_ACCESSIBLE ) )
1356
- | PageTableFlags :: WRITABLE ,
1357
- & mut Pfa ,
1358
- )
1359
- } ;
1360
-
1361
- if let Err ( MapToError :: PageAlreadyMapped ( old_frame) ) = result {
1362
- if old_frame == frame {
1363
- return Ok ( ( ) ) ;
1364
- }
1365
- }
1366
-
1367
- result. map_err ( |err| TryMapError :: MapToError ( err) ) ?. flush ( ) ;
1368
-
1369
- /* hyperion_log::debug!("mapped 1GiB at 0x{:016x}", start);
1370
- crash_after_nth(10); */
1371
-
1372
- Ok ( ( ) )
1373
- }
1374
-
1375
- fn try_unmap_sized < T > (
1376
- table : & mut OffsetPageTable ,
1377
- start : VirtAddr ,
1378
- _end : VirtAddr ,
1379
- ) -> Result < ( ) , TryMapError < T > >
1380
- where
1381
- T : PageSize ,
1382
- for < ' a > OffsetPageTable < ' a > : Mapper < T > ,
1383
- {
1384
- let Some ( _mapping_end) = start. as_u64 ( ) . checked_add ( T :: SIZE - 1 ) else {
1385
- return Err ( TryMapError :: Overflow ) ;
1386
- } ;
1387
-
1388
- /* if mapping_end > end.as_u64() {
1389
- return Err(TryMapSizedError::Overflow);
1390
- } */
1391
-
1392
- if !start. is_aligned ( T :: SIZE ) {
1393
- return Err ( TryMapError :: NotAligned ) ;
1394
- }
1395
-
1396
- let page = Page :: < T > :: containing_address ( start) ;
1397
-
1398
- match table. unmap ( page) {
1399
- Ok ( ( _, ok) ) => {
1400
- ok. flush ( ) ;
1401
- Ok ( ( ) )
1402
- }
1403
- Err ( UnmapError :: PageNotMapped ) => {
1404
- // hyperion_log::debug!("already not mapped");
1405
- Ok ( ( ) )
1406
- }
1407
- Err ( UnmapError :: ParentEntryHugePage ) => Err ( TryMapError :: WrongSize ) ,
1408
- Err ( _err) => panic ! ( "{_err:?}" ) ,
1409
- }
1410
- }
1411
-
1412
- fn v_addr_checked_add ( addr : VirtAddr , rhs : u64 ) -> Option < VirtAddr > {
1413
- VirtAddr :: try_new ( addr. as_u64 ( ) . checked_add ( rhs) ?) . ok ( )
1414
- }
1415
-
1416
- fn p_addr_checked_add ( addr : PhysAddr , rhs : u64 ) -> Option < PhysAddr > {
1417
- PhysAddr :: try_new ( addr. as_u64 ( ) . checked_add ( rhs) ?) . ok ( )
1418
- }
0 commit comments