1use core::{ops::RangeBounds, slice::IterMut};
5
6use tracing::trace;
7
8use crate::{
9 cpu,
10 memory_management::{
11 memory_layout::{
12 align_range, align_up, is_aligned, kernel_elf_rodata_end, physical2virtual,
13 virtual2physical, MemSize, EXTENDED_OFFSET, KERNEL_BASE, KERNEL_END, KERNEL_LINK,
14 KERNEL_MAPPED_SIZE, PAGE_2M, PAGE_4K,
15 },
16 physical_page_allocator,
17 },
18 sync::{once::OnceLock, spin::mutex::Mutex},
19};
20
21use super::memory_layout::{
22 stack_guard_page_ptr, PROCESS_KERNEL_STACK_BASE, PROCESS_KERNEL_STACK_SIZE,
23};
24
25#[allow(dead_code)]
27pub mod flags {
28 pub(super) const PTE_PRESENT: u64 = 1 << 0;
29 pub const PTE_WRITABLE: u64 = 1 << 1;
30 pub const PTE_USER: u64 = 1 << 2;
31 pub const PTE_WRITETHROUGH: u64 = 1 << 3;
32 pub const PTE_NOT_CACHEABLE: u64 = 1 << 4;
33 pub(super) const PTE_ACCESSED: u64 = 1 << 5;
34 pub(super) const PTE_DIRTY: u64 = 1 << 6;
35 pub(super) const PTE_HUGE_PAGE: u64 = 1 << 7;
36 pub(super) const PTE_GLOBAL: u64 = 1 << 8;
37 pub(super) const PTE_NO_EXECUTE: u64 = 1 << 63;
38}
39
40const ADDR_MASK: u64 = 0x0000_0000_FFFF_F000;
41
42const KERNEL_L4_INDEX: usize = 0x1FF;
45
46const KERNEL_L3_INDEX_START: usize = 0x1FE;
48#[allow(dead_code)]
49const KERNEL_L3_INDEX_END: usize = 0x1FF;
50
51const KERNEL_L3_PROCESS_INDEX_START: usize = 0;
52const KERNEL_L3_PROCESS_INDEX_END: usize = KERNEL_L3_INDEX_START - 1;
53
54pub const KERNEL_PROCESS_VIRTUAL_ADDRESS_START: usize =
55 0xFFFF_0000_0000_0000 | KERNEL_L4_INDEX << 39 | KERNEL_L3_PROCESS_INDEX_START << 30;
57
58const NUM_USER_L4_INDEXES: usize = KERNEL_L4_INDEX;
60
61pub const MAX_USER_VIRTUAL_ADDRESS: usize =
62 0xFFFF_0000_0000_0000
64 | (KERNEL_L4_INDEX - 1) << 39
65 | (0x1FF << 30)
66 | (0x1FF << 21)
67 | (0x1FF << 12);
68
69#[inline(always)]
70const fn get_l4(addr: usize) -> usize {
71 (addr >> 39) & 0x1FF
72}
73
74#[inline(always)]
75const fn get_l3(addr: usize) -> usize {
76 (addr >> 30) & 0x1FF
77}
78
79#[inline(always)]
80const fn get_l2(addr: usize) -> usize {
81 (addr >> 21) & 0x1FF
82}
83
84#[inline(always)]
85const fn get_l1(addr: usize) -> usize {
86 (addr >> 12) & 0x1FF
87}
88
89#[repr(C, align(32))]
91#[derive(Debug, Copy, Clone)]
92pub struct VirtualMemoryMapEntry {
93 pub virtual_address: usize,
95 pub physical_address: Option<u64>,
99 pub size: usize,
101 pub flags: u64,
103}
104
105#[repr(C, align(4096))]
107struct PageDirectoryTable {
108 entries: [u64; 512],
109}
110
111#[repr(transparent)]
112struct PageDirectoryTablePtr {
113 physical_addr: u64,
114}
115
116impl PageDirectoryTablePtr {
117 const fn from_entry(entry: u64) -> Self {
118 Self {
119 physical_addr: entry & ADDR_MASK,
120 }
121 }
122
123 fn entries_from_mut_entry(entry: &mut u64) -> &mut PageDirectoryTable {
125 let table = physical2virtual(*entry & ADDR_MASK) as *mut PageDirectoryTable;
126 unsafe { &mut *table }
127 }
128
129 fn as_physical(&self) -> u64 {
130 self.physical_addr
131 }
132
133 fn as_virtual(&self) -> usize {
134 assert!(self.physical_addr < KERNEL_END as u64);
136 physical2virtual(self.physical_addr)
137 }
138
139 fn alloc_new() -> Self {
140 Self {
142 physical_addr: unsafe {
143 virtual2physical(physical_page_allocator::alloc_zeroed() as _)
144 },
145 }
146 }
147
148 fn as_ptr(&self) -> *mut PageDirectoryTable {
149 self.as_virtual() as *mut PageDirectoryTable
150 }
151
152 fn as_mut(&mut self) -> &mut PageDirectoryTable {
153 unsafe { &mut *self.as_ptr() }
154 }
155
156 fn as_ref(&self) -> &PageDirectoryTable {
157 unsafe { &*self.as_ptr() }
158 }
159
160 unsafe fn free(self) {
161 unsafe { physical_page_allocator::free(self.as_virtual() as _) };
162 }
163}
164
165static KERNEL_VIRTUAL_MEMORY_MANAGER: OnceLock<Mutex<VirtualMemoryMapper>> = OnceLock::new();
166
167pub fn init_kernel_vm() {
168 if KERNEL_VIRTUAL_MEMORY_MANAGER.try_get().is_some() {
169 panic!("Kernel VM already initialized");
170 }
171
172 let manager = KERNEL_VIRTUAL_MEMORY_MANAGER
173 .get_or_init(|| Mutex::new(VirtualMemoryMapper::new_kernel_vm()))
174 .lock();
175
176 unsafe { manager.switch_to_this() };
178}
179pub unsafe fn switch_to_kernel() {
183 KERNEL_VIRTUAL_MEMORY_MANAGER.get().lock().switch_to_this();
184}
185
186pub fn map_kernel(entry: &VirtualMemoryMapEntry) {
187 assert!(entry.virtual_address >= KERNEL_BASE);
189 KERNEL_VIRTUAL_MEMORY_MANAGER.get().lock().map(entry);
190}
191
192pub fn unmap_kernel(entry: &VirtualMemoryMapEntry, is_allocated: bool) {
197 assert!(entry.virtual_address >= KERNEL_BASE);
199 KERNEL_VIRTUAL_MEMORY_MANAGER
200 .get()
201 .lock()
202 .unmap(entry, is_allocated);
203}
204
205#[allow(dead_code)]
206pub fn is_address_mapped_in_kernel(addr: usize) -> bool {
207 KERNEL_VIRTUAL_MEMORY_MANAGER
208 .get()
209 .lock()
210 .is_address_mapped(addr)
211}
212
213pub fn clone_current_vm_as_user() -> VirtualMemoryMapper {
214 cpu::cpu().push_cli();
216 let manager = get_current_vm();
217 let mut new_vm = manager.clone_kernel_mem();
218 cpu::cpu().pop_cli();
219 new_vm.is_user = true;
220 new_vm
221}
222
223pub fn get_current_vm() -> VirtualMemoryMapper {
224 VirtualMemoryMapper::get_current_vm()
225}
226
227pub struct VirtualMemoryMapper {
228 page_map_l4: PageDirectoryTablePtr,
229 is_user: bool,
230}
231
232impl VirtualMemoryMapper {
233 fn new() -> Self {
234 Self {
235 page_map_l4: PageDirectoryTablePtr::alloc_new(),
236 is_user: false,
237 }
238 }
239
240 pub fn clone_kernel_mem(&self) -> Self {
242 let this_kernel_l4 =
243 PageDirectoryTablePtr::from_entry(self.page_map_l4.as_ref().entries[KERNEL_L4_INDEX]);
244
245 let mut new_vm = Self::new();
246
247 let mut new_kernel_l4 = PageDirectoryTablePtr::alloc_new();
248
249 for i in 0..=0x1FF {
251 new_kernel_l4.as_mut().entries[i] = this_kernel_l4.as_ref().entries[i];
252 }
253
254 new_vm.page_map_l4.as_mut().entries[KERNEL_L4_INDEX] =
255 new_kernel_l4.as_physical() | flags::PTE_PRESENT | flags::PTE_WRITABLE;
256
257 new_vm
258 }
259
260 pub unsafe fn add_process_specific_mappings(&mut self) {
265 let mut this_kernel_l4 =
266 PageDirectoryTablePtr::from_entry(self.page_map_l4.as_ref().entries[KERNEL_L4_INDEX]);
267
268 for i in KERNEL_L3_PROCESS_INDEX_START..=KERNEL_L3_PROCESS_INDEX_END {
271 this_kernel_l4.as_mut().entries[i] = 0;
272 }
273 self.is_user = false;
276 self.map(&VirtualMemoryMapEntry {
278 virtual_address: PROCESS_KERNEL_STACK_BASE,
279 physical_address: None, size: PROCESS_KERNEL_STACK_SIZE,
281 flags: flags::PTE_WRITABLE,
282 });
283 self.is_user = true;
284 }
285
286 fn load_vm(base: &PageDirectoryTablePtr) {
287 trace!(
288 "Switching to new page map: {:p}",
289 base.as_physical() as *const u8
290 );
291 unsafe { cpu::set_cr3(base.as_physical()) }
292 }
293
294 fn get_current_vm() -> Self {
295 let kernel_vm_addr = KERNEL_VIRTUAL_MEMORY_MANAGER
296 .get()
297 .lock()
298 .page_map_l4
299 .as_physical();
300 let cr3 = unsafe { cpu::get_cr3() }; let is_user = cr3 != kernel_vm_addr;
302 Self {
303 page_map_l4: PageDirectoryTablePtr::from_entry(cr3),
304 is_user,
305 }
306 }
307
308 pub fn is_used_by_me(&self) -> bool {
310 let cr3 = unsafe { cpu::get_cr3() };
311 cr3 == self.page_map_l4.as_physical()
312 }
313
314 pub unsafe fn switch_to_this(&self) {
318 Self::load_vm(&self.page_map_l4);
319 }
320
321 fn new_kernel_vm() -> Self {
324 let data_start = align_up(kernel_elf_rodata_end(), PAGE_4K);
325 let kernel_vm = [
326 VirtualMemoryMapEntry {
328 virtual_address: KERNEL_BASE,
329 physical_address: Some(0),
330 size: EXTENDED_OFFSET,
331 flags: flags::PTE_WRITABLE,
332 },
333 VirtualMemoryMapEntry {
335 virtual_address: KERNEL_LINK,
336 physical_address: Some(virtual2physical(KERNEL_LINK)),
337 size: (virtual2physical(data_start) - virtual2physical(KERNEL_LINK)) as usize,
338 flags: 0, },
340 VirtualMemoryMapEntry {
343 virtual_address: data_start,
344 physical_address: Some(virtual2physical(data_start)),
345 size: KERNEL_MAPPED_SIZE - virtual2physical(data_start) as usize,
346 flags: flags::PTE_WRITABLE,
347 },
348 ];
349
350 let mut s = Self::new();
353
354 for entry in kernel_vm.iter() {
355 s.map(entry);
356 }
357
358 s.unmap(
360 &VirtualMemoryMapEntry {
361 virtual_address: stack_guard_page_ptr(),
362 physical_address: None,
363 size: PAGE_4K,
364 flags: 0,
365 },
366 false,
367 );
368
369 s
370 }
371
372 pub fn map(&mut self, entry: &VirtualMemoryMapEntry) {
373 let VirtualMemoryMapEntry {
374 mut virtual_address,
375 physical_address: mut start_physical_address,
376 size: requested_size,
377 flags,
378 } = entry;
379
380 assert!(!self.page_map_l4.as_ptr().is_null());
381 assert!(is_aligned(self.page_map_l4.as_virtual(), PAGE_4K));
382
383 let (aligned_start, mut size, _) = align_range(virtual_address, *requested_size, PAGE_4K);
384 virtual_address = aligned_start;
385
386 if self.is_user {
387 assert_ne!(*flags & flags::PTE_USER, 0);
388 assert_ne!(get_l4(virtual_address), KERNEL_L4_INDEX);
389 let end = virtual_address + size;
390 assert!(end <= MAX_USER_VIRTUAL_ADDRESS);
391 }
392
393 if let Some(start_physical_address) = start_physical_address.as_mut() {
394 let (aligned_start, physical_size, _) =
395 align_range(*start_physical_address, *requested_size, PAGE_4K);
396 assert_eq!(physical_size, size);
397 *start_physical_address = aligned_start;
398 }
399
400 let mut physical_address = start_physical_address;
402
403 assert!(size > 0);
404
405 trace!(
406 "{} {:08X?}",
407 MemSize(size),
408 VirtualMemoryMapEntry {
409 virtual_address,
410 physical_address,
411 size,
412 flags: *flags,
413 }
414 );
415
416 while size > 0 {
417 let current_physical_address = physical_address.unwrap_or_else(|| {
418 virtual2physical(unsafe { physical_page_allocator::alloc_zeroed() as _ })
419 });
420 trace!(
421 "[!] Mapping {:p} to {:p}",
422 virtual_address as *const u8,
423 current_physical_address as *const u8
424 );
425 let page_map_l4_index = get_l4(virtual_address);
426 let page_directory_pointer_index = get_l3(virtual_address);
427 let page_directory_index = get_l2(virtual_address);
428 let page_table_index = get_l1(virtual_address);
429
430 let page_map_l4_entry = &mut self.page_map_l4.as_mut().entries[page_map_l4_index];
432
433 if *page_map_l4_entry & flags::PTE_PRESENT == 0 {
434 let page_directory_pointer_table = PageDirectoryTablePtr::alloc_new();
435 *page_map_l4_entry =
436 (page_directory_pointer_table.as_physical() & ADDR_MASK) | flags::PTE_PRESENT;
437 }
438 *page_map_l4_entry |= flags;
440 trace!(
441 "L4[{}]: {:p} = {:x}",
442 page_map_l4_index,
443 page_map_l4_entry,
444 *page_map_l4_entry
445 );
446
447 let mut page_directory_pointer_table =
449 PageDirectoryTablePtr::from_entry(*page_map_l4_entry);
450
451 let page_directory_pointer_entry =
452 &mut page_directory_pointer_table.as_mut().entries[page_directory_pointer_index];
453
454 if *page_directory_pointer_entry & flags::PTE_PRESENT == 0 {
455 let page_directory_table = PageDirectoryTablePtr::alloc_new();
456 *page_directory_pointer_entry =
457 (page_directory_table.as_physical() & ADDR_MASK) | flags::PTE_PRESENT;
458 }
459
460 *page_directory_pointer_entry |= flags;
462 trace!(
463 "L3[{}]: {:p} = {:x}",
464 page_directory_pointer_index,
465 page_directory_pointer_entry,
466 *page_directory_pointer_entry
467 );
468
469 let mut page_directory_table =
471 PageDirectoryTablePtr::from_entry(*page_directory_pointer_entry);
472 let page_directory_entry =
473 &mut page_directory_table.as_mut().entries[page_directory_index];
474
475 let can_map_2mb_page = false;
487 if can_map_2mb_page {
488 if *page_directory_entry & flags::PTE_PRESENT != 0 {
490 if *page_directory_entry & flags::PTE_HUGE_PAGE == 0 {
493 let page_table_ptr =
494 PageDirectoryTablePtr::from_entry(*page_directory_entry);
495
496 unsafe { page_table_ptr.free() };
497 }
498 }
499
500 *page_directory_entry = (current_physical_address & ADDR_MASK)
502 | flags
503 | flags::PTE_PRESENT
504 | flags::PTE_HUGE_PAGE;
505
506 trace!(
507 "L2[{}] huge: {:p} = {:x}",
508 page_directory_index,
509 page_directory_entry,
510 *page_directory_entry
511 );
512
513 size -= PAGE_2M;
514 if size == 0 {
516 break;
517 }
518 virtual_address += PAGE_2M;
519 if let Some(physical_address) = physical_address.as_mut() {
520 *physical_address += PAGE_2M as u64;
521 }
522 } else {
523 if *page_directory_entry & flags::PTE_PRESENT == 0 {
525 let page_table = PageDirectoryTablePtr::alloc_new();
526 *page_directory_entry =
527 (page_table.as_physical() & ADDR_MASK) | flags::PTE_PRESENT;
528 }
529 *page_directory_entry |= flags;
531 trace!(
532 "L2[{}]: {:p} = {:x}",
533 page_directory_index,
534 page_directory_entry,
535 *page_directory_entry
536 );
537
538 let mut page_table = PageDirectoryTablePtr::from_entry(*page_directory_entry);
540 let page_table_entry = &mut page_table.as_mut().entries[page_table_index];
541 *page_table_entry =
542 (current_physical_address & ADDR_MASK) | flags | flags::PTE_PRESENT;
543 trace!(
544 "L1[{}]: {:p} = {:x}",
545 page_table_index,
546 page_table_entry,
547 *page_table_entry
548 );
549
550 size -= PAGE_4K;
551 if size == 0 {
553 break;
554 }
555 virtual_address += PAGE_4K;
556 if let Some(physical_address) = physical_address.as_mut() {
557 *physical_address += PAGE_4K as u64;
558 }
559 }
560
561 trace!("");
562 }
563 }
564
565 pub fn unmap(&mut self, entry: &VirtualMemoryMapEntry, is_allocated: bool) {
567 let VirtualMemoryMapEntry {
568 mut virtual_address,
569 physical_address,
570 size,
571 flags,
572 } = entry;
573
574 assert!(physical_address.is_none());
575
576 let (aligned_start, mut size, _) = align_range(virtual_address, *size, PAGE_4K);
578 virtual_address = aligned_start;
579
580 assert!(size > 0);
581
582 trace!(
583 "{} {:08X?}",
584 MemSize(size),
585 VirtualMemoryMapEntry {
586 virtual_address,
587 physical_address: *physical_address,
588 size,
589 flags: *flags,
590 }
591 );
592
593 while size > 0 {
594 unsafe {
595 cpu::invalidate_tlp(virtual_address as _);
596 }
597
598 let page_map_l4_index = get_l4(virtual_address);
599 let page_directory_pointer_index = get_l3(virtual_address);
600 let page_directory_index = get_l2(virtual_address);
601 let page_table_index = get_l1(virtual_address);
602
603 let page_map_l4_entry = &mut self.page_map_l4.as_mut().entries[page_map_l4_index];
605
606 if *page_map_l4_entry & flags::PTE_PRESENT == 0 {
607 panic!("Trying to unmap a non-mapped address");
608 }
609 *page_map_l4_entry &= !flags;
611 trace!(
612 "L4[{}]: {:p} = {:x}",
613 page_map_l4_index,
614 page_map_l4_entry,
615 *page_map_l4_entry
616 );
617
618 let mut page_directory_pointer_table =
620 PageDirectoryTablePtr::from_entry(*page_map_l4_entry);
621
622 let page_directory_pointer_entry =
623 &mut page_directory_pointer_table.as_mut().entries[page_directory_pointer_index];
624
625 if *page_directory_pointer_entry & flags::PTE_PRESENT == 0 {
626 panic!("Trying to unmap a non-mapped address");
627 }
628 *page_directory_pointer_entry &= !flags;
630 trace!(
631 "L3[{}]: {:p} = {:x}",
632 page_directory_pointer_index,
633 page_directory_pointer_entry,
634 *page_directory_pointer_entry
635 );
636
637 let mut page_directory_table =
639 PageDirectoryTablePtr::from_entry(*page_directory_pointer_entry);
640 let page_directory_entry =
641 &mut page_directory_table.as_mut().entries[page_directory_index];
642
643 if *page_directory_entry & flags::PTE_PRESENT == 0 {
644 panic!("Trying to unmap a non-mapped address");
645 }
646 *page_directory_entry &= !flags;
648
649 let mut page_table = PageDirectoryTablePtr::from_entry(*page_directory_entry);
651 let page_table_entry = &mut page_table.as_mut().entries[page_table_index];
652 if *page_table_entry & flags::PTE_PRESENT == 0 {
653 panic!("Trying to unmap a non-mapped address");
654 }
655 let physical_entry = PageDirectoryTablePtr::from_entry(*page_table_entry);
656 if is_allocated {
657 unsafe { physical_entry.free() };
658 }
659 *page_table_entry = 0;
661 trace!(
662 "L1[{}]: {:p} = {:x}",
663 page_table_index,
664 page_table_entry,
665 *page_table_entry
666 );
667
668 size -= PAGE_4K;
669 if size == 0 {
671 break;
672 }
673 virtual_address += PAGE_4K;
674 }
675 }
676
677 pub fn is_address_mapped(&self, addr: usize) -> bool {
678 let page_map_l4_index = get_l4(addr);
679 let page_directory_pointer_index = get_l3(addr);
680 let page_directory_index = get_l2(addr);
681 let page_table_index = get_l1(addr);
682
683 let page_map_l4 = self.page_map_l4.as_ref();
685 let page_map_l4_entry = &page_map_l4.entries[page_map_l4_index];
686
687 if *page_map_l4_entry & flags::PTE_PRESENT == 0 {
688 return false;
689 }
690 trace!(
691 "L4[{}]: {:p} = {:x}",
692 page_map_l4_index,
693 page_map_l4_entry,
694 *page_map_l4_entry
695 );
696
697 let page_directory_pointer_table = PageDirectoryTablePtr::from_entry(*page_map_l4_entry);
699 let page_directory_pointer_entry =
700 &page_directory_pointer_table.as_ref().entries[page_directory_pointer_index];
701 if *page_directory_pointer_entry & flags::PTE_PRESENT == 0 {
702 return false;
703 }
704 trace!(
705 "L3[{}]: {:p} = {:x}",
706 page_directory_pointer_index,
707 page_directory_pointer_entry,
708 *page_directory_pointer_entry
709 );
710
711 let page_directory_table = PageDirectoryTablePtr::from_entry(*page_directory_pointer_entry);
713 let page_directory_entry = &page_directory_table.as_ref().entries[page_directory_index];
714 if *page_directory_entry & flags::PTE_PRESENT == 0 {
715 return false;
716 }
717 if *page_directory_entry & flags::PTE_HUGE_PAGE != 0 {
718 return true;
719 }
720 trace!(
721 "L2[{}]: {:p} = {:x}",
722 page_directory_index,
723 page_directory_entry,
724 *page_directory_entry
725 );
726
727 let page_table = PageDirectoryTablePtr::from_entry(*page_directory_entry);
729 let page_table_entry = &page_table.as_ref().entries[page_table_index];
730 if *page_table_entry & flags::PTE_PRESENT == 0 {
731 return false;
732 }
733 trace!(
734 "L1[{}]: {:p} = {:x}",
735 page_table_index,
736 page_table_entry,
737 *page_table_entry
738 );
739
740 true
741 }
742
743 fn do_for_ranges_entries<R1, R2, F>(&mut self, l4_ranges: R1, l3_ranges: R2, mut f: F)
745 where
746 R1: RangeBounds<usize>,
747 R2: RangeBounds<usize>,
748 F: FnMut(&mut u64),
749 {
750 let page_map_l4 = self.page_map_l4.as_mut();
751
752 let present = |entry: &&mut u64| **entry & flags::PTE_PRESENT != 0;
753
754 fn as_page_directory_table_flat(entry: &mut u64) -> IterMut<'_, u64> {
755 let page_directory_table = PageDirectoryTablePtr::entries_from_mut_entry(entry);
756 page_directory_table.entries.iter_mut()
757 }
758
759 let handle_2mb_pages = |page_directory_entry: &mut u64| {
761 if *page_directory_entry & flags::PTE_HUGE_PAGE != 0 {
763 f(page_directory_entry);
764 } else {
765 as_page_directory_table_flat(page_directory_entry)
766 .filter(present)
767 .for_each(&mut f);
768 }
769 };
770
771 let l4_start = match l4_ranges.start_bound() {
772 core::ops::Bound::Included(&start) => start,
773 core::ops::Bound::Unbounded => 0,
774 core::ops::Bound::Excluded(_) => unreachable!("Excluded start bound"),
775 };
776 let l4_end = match l4_ranges.end_bound() {
777 core::ops::Bound::Included(&end) => end,
778 core::ops::Bound::Excluded(&end) => end - 1,
779 core::ops::Bound::Unbounded => 0x1FF, };
781 let l3_start = match l3_ranges.start_bound() {
782 core::ops::Bound::Included(&start) => start,
783 core::ops::Bound::Unbounded => 0,
784 core::ops::Bound::Excluded(_) => unreachable!("Excluded start bound"),
785 };
786 let l3_end = match l3_ranges.end_bound() {
787 core::ops::Bound::Included(&end) => end,
788 core::ops::Bound::Excluded(&end) => end - 1,
789 core::ops::Bound::Unbounded => 0x1FF, };
791
792 let l4_skip = l4_start;
793 let l4_take = l4_end - l4_skip + 1;
794 let l3_skip = l3_start;
795 let l3_take = l3_end - l3_skip + 1;
796
797 page_map_l4
798 .entries
799 .iter_mut()
800 .skip(l4_skip)
801 .take(l4_take) .flat_map(as_page_directory_table_flat)
803 .skip(l3_skip)
804 .take(l3_take)
805 .filter(present)
806 .flat_map(as_page_directory_table_flat)
807 .filter(present)
808 .for_each(handle_2mb_pages);
809 }
810
811 fn do_for_every_user_entry(&mut self, f: impl FnMut(&mut u64)) {
813 self.do_for_ranges_entries(0..NUM_USER_L4_INDEXES, 0..=0x1FF, f)
814 }
815
816 fn do_for_kernel_process_entry(&mut self, f: impl FnMut(&mut u64)) {
818 self.do_for_ranges_entries(
819 KERNEL_L4_INDEX..=KERNEL_L4_INDEX,
820 KERNEL_L3_PROCESS_INDEX_START..=KERNEL_L3_PROCESS_INDEX_END,
821 f,
822 );
823 }
824
825 pub fn unmap_process_memory(&mut self) {
828 let free_page = |entry: &mut u64| {
829 assert_eq!(
830 *entry & flags::PTE_HUGE_PAGE,
831 0,
832 "We haven't implemented 2MB physical pages for user allocation"
833 );
834 let page_table_ptr = PageDirectoryTablePtr::from_entry(*entry);
835 unsafe { page_table_ptr.free() };
836 *entry = 0;
837 };
838
839 self.do_for_every_user_entry(free_page);
840 self.do_for_kernel_process_entry(free_page);
841 }
842}