kernel/memory_management/
virtual_memory_mapper.rs

1//! This very specific to 64-bit x86 architecture, if this is to be ported to other architectures
2//! this will need to be changed
3
4use core::{ops::RangeBounds, slice::IterMut};
5
6use tracing::trace;
7
8use crate::{
9    cpu,
10    memory_management::{
11        memory_layout::{
12            align_range, align_up, is_aligned, kernel_elf_rodata_end, physical2virtual,
13            virtual2physical, MemSize, EXTENDED_OFFSET, KERNEL_BASE, KERNEL_END, KERNEL_LINK,
14            KERNEL_MAPPED_SIZE, PAGE_2M, PAGE_4K,
15        },
16        physical_page_allocator,
17    },
18    sync::{once::OnceLock, spin::mutex::Mutex},
19};
20
21use super::memory_layout::{
22    stack_guard_page_ptr, PROCESS_KERNEL_STACK_BASE, PROCESS_KERNEL_STACK_SIZE,
23};
24
25// TODO: replace by some sort of bitfield
26#[allow(dead_code)]
27pub mod flags {
28    pub(super) const PTE_PRESENT: u64 = 1 << 0;
29    pub const PTE_WRITABLE: u64 = 1 << 1;
30    pub const PTE_USER: u64 = 1 << 2;
31    pub const PTE_WRITETHROUGH: u64 = 1 << 3;
32    pub const PTE_NOT_CACHEABLE: u64 = 1 << 4;
33    pub(super) const PTE_ACCESSED: u64 = 1 << 5;
34    pub(super) const PTE_DIRTY: u64 = 1 << 6;
35    pub(super) const PTE_HUGE_PAGE: u64 = 1 << 7;
36    pub(super) const PTE_GLOBAL: u64 = 1 << 8;
37    pub(super) const PTE_NO_EXECUTE: u64 = 1 << 63;
38}
39
40const ADDR_MASK: u64 = 0x0000_0000_FFFF_F000;
41
42// only use the last index for the kernel
43// all the other indexes are free to use by the user
44const KERNEL_L4_INDEX: usize = 0x1FF;
45
46// The L3 positions are used for the non-moving kernel code/data
47const KERNEL_L3_INDEX_START: usize = 0x1FE;
48#[allow(dead_code)]
49const KERNEL_L3_INDEX_END: usize = 0x1FF;
50
51const KERNEL_L3_PROCESS_INDEX_START: usize = 0;
52const KERNEL_L3_PROCESS_INDEX_END: usize = KERNEL_L3_INDEX_START - 1;
53
54pub const KERNEL_PROCESS_VIRTUAL_ADDRESS_START: usize =
55    // sign extension
56    0xFFFF_0000_0000_0000 | KERNEL_L4_INDEX << 39 | KERNEL_L3_PROCESS_INDEX_START << 30;
57
58// the user can use all the indexes except the last one
59const NUM_USER_L4_INDEXES: usize = KERNEL_L4_INDEX;
60
61pub const MAX_USER_VIRTUAL_ADDRESS: usize =
62    // sign extension
63    0xFFFF_0000_0000_0000
64        | (KERNEL_L4_INDEX - 1) << 39
65        | (0x1FF << 30)
66        | (0x1FF << 21)
67        | (0x1FF << 12);
68
69#[inline(always)]
70const fn get_l4(addr: usize) -> usize {
71    (addr >> 39) & 0x1FF
72}
73
74#[inline(always)]
75const fn get_l3(addr: usize) -> usize {
76    (addr >> 30) & 0x1FF
77}
78
79#[inline(always)]
80const fn get_l2(addr: usize) -> usize {
81    (addr >> 21) & 0x1FF
82}
83
84#[inline(always)]
85const fn get_l1(addr: usize) -> usize {
86    (addr >> 12) & 0x1FF
87}
88
89// have a specific alignment so we can fit them in a page
90#[repr(C, align(32))]
91#[derive(Debug, Copy, Clone)]
92pub struct VirtualMemoryMapEntry {
93    /// The virtual address to map, this is constrained by the memory model, and thus `usize`
94    pub virtual_address: usize,
95    /// The physical address to map, if `None` then it will be allocated
96    /// This can be above `usize` in `32-bit`, as it support (in intel) up to `40-bit` physical address
97    /// for 32-bit paging
98    pub physical_address: Option<u64>,
99    /// The size of the mapping, this is constrained by the memory model, and thus `usize`
100    pub size: usize,
101    /// The flags to use for the mapping, look at [flags] for more information
102    pub flags: u64,
103}
104
105// This is a general structure for all levels
106#[repr(C, align(4096))]
107struct PageDirectoryTable {
108    entries: [u64; 512],
109}
110
111#[repr(transparent)]
112struct PageDirectoryTablePtr {
113    physical_addr: u64,
114}
115
116impl PageDirectoryTablePtr {
117    const fn from_entry(entry: u64) -> Self {
118        Self {
119            physical_addr: entry & ADDR_MASK,
120        }
121    }
122
123    /// An ugly hack used in `do_for_every_user_entry` to get a mutable reference to the page directory table
124    fn entries_from_mut_entry(entry: &mut u64) -> &mut PageDirectoryTable {
125        let table = physical2virtual(*entry & ADDR_MASK) as *mut PageDirectoryTable;
126        unsafe { &mut *table }
127    }
128
129    fn as_physical(&self) -> u64 {
130        self.physical_addr
131    }
132
133    fn as_virtual(&self) -> usize {
134        // for now, it must be within the lower kernel memory, easier to support
135        assert!(self.physical_addr < KERNEL_END as u64);
136        physical2virtual(self.physical_addr)
137    }
138
139    fn alloc_new() -> Self {
140        // SAFETY: it will panic if it couldn't allocate, so if it returns, it is safe
141        Self {
142            physical_addr: unsafe {
143                virtual2physical(physical_page_allocator::alloc_zeroed() as _)
144            },
145        }
146    }
147
148    fn as_ptr(&self) -> *mut PageDirectoryTable {
149        self.as_virtual() as *mut PageDirectoryTable
150    }
151
152    fn as_mut(&mut self) -> &mut PageDirectoryTable {
153        unsafe { &mut *self.as_ptr() }
154    }
155
156    fn as_ref(&self) -> &PageDirectoryTable {
157        unsafe { &*self.as_ptr() }
158    }
159
160    unsafe fn free(self) {
161        unsafe { physical_page_allocator::free(self.as_virtual() as _) };
162    }
163}
164
165static KERNEL_VIRTUAL_MEMORY_MANAGER: OnceLock<Mutex<VirtualMemoryMapper>> = OnceLock::new();
166
167pub fn init_kernel_vm() {
168    if KERNEL_VIRTUAL_MEMORY_MANAGER.try_get().is_some() {
169        panic!("Kernel VM already initialized");
170    }
171
172    let manager = KERNEL_VIRTUAL_MEMORY_MANAGER
173        .get_or_init(|| Mutex::new(VirtualMemoryMapper::new_kernel_vm()))
174        .lock();
175
176    // // SAFETY: this is the start VM, so we are sure that we are not inside a process, so its safe to switch
177    unsafe { manager.switch_to_this() };
178}
179/// # Safety
180/// This must never be called while we are in a process context
181/// and using any process specific memory regions
182pub unsafe fn switch_to_kernel() {
183    KERNEL_VIRTUAL_MEMORY_MANAGER.get().lock().switch_to_this();
184}
185
186pub fn map_kernel(entry: &VirtualMemoryMapEntry) {
187    // make sure we are only mapping to kernel memory
188    assert!(entry.virtual_address >= KERNEL_BASE);
189    KERNEL_VIRTUAL_MEMORY_MANAGER.get().lock().map(entry);
190}
191
192/// `is_allocated` is used to indicate if the physical pages were allocated by the caller
193/// i.e. when we called `map_kernel`, the `physical_address` is `None` and we will allocate the pages, and thus
194/// when calling this function, you should pass `is_allocated = true`
195// TODO: maybe its better to keep track of this information somewhere in the mapper here
196pub fn unmap_kernel(entry: &VirtualMemoryMapEntry, is_allocated: bool) {
197    // make sure we are only mapping to kernel memory
198    assert!(entry.virtual_address >= KERNEL_BASE);
199    KERNEL_VIRTUAL_MEMORY_MANAGER
200        .get()
201        .lock()
202        .unmap(entry, is_allocated);
203}
204
205#[allow(dead_code)]
206pub fn is_address_mapped_in_kernel(addr: usize) -> bool {
207    KERNEL_VIRTUAL_MEMORY_MANAGER
208        .get()
209        .lock()
210        .is_address_mapped(addr)
211}
212
213pub fn clone_current_vm_as_user() -> VirtualMemoryMapper {
214    // precaution, a sort of manual lock
215    cpu::cpu().push_cli();
216    let manager = get_current_vm();
217    let mut new_vm = manager.clone_kernel_mem();
218    cpu::cpu().pop_cli();
219    new_vm.is_user = true;
220    new_vm
221}
222
223pub fn get_current_vm() -> VirtualMemoryMapper {
224    VirtualMemoryMapper::get_current_vm()
225}
226
227pub struct VirtualMemoryMapper {
228    page_map_l4: PageDirectoryTablePtr,
229    is_user: bool,
230}
231
232impl VirtualMemoryMapper {
233    fn new() -> Self {
234        Self {
235            page_map_l4: PageDirectoryTablePtr::alloc_new(),
236            is_user: false,
237        }
238    }
239
240    // create a new virtual memory that maps the kernel only
241    pub fn clone_kernel_mem(&self) -> Self {
242        let this_kernel_l4 =
243            PageDirectoryTablePtr::from_entry(self.page_map_l4.as_ref().entries[KERNEL_L4_INDEX]);
244
245        let mut new_vm = Self::new();
246
247        let mut new_kernel_l4 = PageDirectoryTablePtr::alloc_new();
248
249        // copy the whole kernel mapping (process specific will be replaced later)
250        for i in 0..=0x1FF {
251            new_kernel_l4.as_mut().entries[i] = this_kernel_l4.as_ref().entries[i];
252        }
253
254        new_vm.page_map_l4.as_mut().entries[KERNEL_L4_INDEX] =
255            new_kernel_l4.as_physical() | flags::PTE_PRESENT | flags::PTE_WRITABLE;
256
257        new_vm
258    }
259
260    /// # Safety
261    ///
262    /// After this call, the VM must never be switched to unless
263    /// its from the scheduler or we are sure that the previous kernel regions are not used
264    pub unsafe fn add_process_specific_mappings(&mut self) {
265        let mut this_kernel_l4 =
266            PageDirectoryTablePtr::from_entry(self.page_map_l4.as_ref().entries[KERNEL_L4_INDEX]);
267
268        // clear out the process specific mappings if we have cloned another process
269        // but of course don't deallocate, just remove the mappings
270        for i in KERNEL_L3_PROCESS_INDEX_START..=KERNEL_L3_PROCESS_INDEX_END {
271            this_kernel_l4.as_mut().entries[i] = 0;
272        }
273        // set it temporarily so we can map kernel range
274        // TODO: fix this hack
275        self.is_user = false;
276        // load new kernel stack for this process
277        self.map(&VirtualMemoryMapEntry {
278            virtual_address: PROCESS_KERNEL_STACK_BASE,
279            physical_address: None, // allocate
280            size: PROCESS_KERNEL_STACK_SIZE,
281            flags: flags::PTE_WRITABLE,
282        });
283        self.is_user = true;
284    }
285
286    fn load_vm(base: &PageDirectoryTablePtr) {
287        trace!(
288            "Switching to new page map: {:p}",
289            base.as_physical() as *const u8
290        );
291        unsafe { cpu::set_cr3(base.as_physical()) }
292    }
293
294    fn get_current_vm() -> Self {
295        let kernel_vm_addr = KERNEL_VIRTUAL_MEMORY_MANAGER
296            .get()
297            .lock()
298            .page_map_l4
299            .as_physical();
300        let cr3 = unsafe { cpu::get_cr3() }; // cr3 is physical address
301        let is_user = cr3 != kernel_vm_addr;
302        Self {
303            page_map_l4: PageDirectoryTablePtr::from_entry(cr3),
304            is_user,
305        }
306    }
307
308    /// Return `true` if the current VM is used by the current cpu
309    pub fn is_used_by_me(&self) -> bool {
310        let cr3 = unsafe { cpu::get_cr3() };
311        cr3 == self.page_map_l4.as_physical()
312    }
313
314    /// # Safety
315    /// This must be used with caution, it must never be switched while we are using
316    /// memory from the same regions, i.e. kernel stack while we are in an interrupt
317    pub unsafe fn switch_to_this(&self) {
318        Self::load_vm(&self.page_map_l4);
319    }
320
321    // This replicate what is done in the assembly code
322    // but it will be stored
323    fn new_kernel_vm() -> Self {
324        let data_start = align_up(kernel_elf_rodata_end(), PAGE_4K);
325        let kernel_vm = [
326            // Low memory (has some BIOS stuff): mapped to kernel space
327            VirtualMemoryMapEntry {
328                virtual_address: KERNEL_BASE,
329                physical_address: Some(0),
330                size: EXTENDED_OFFSET,
331                flags: flags::PTE_WRITABLE,
332            },
333            // Extended memory: kernel .text and .rodata sections
334            VirtualMemoryMapEntry {
335                virtual_address: KERNEL_LINK,
336                physical_address: Some(virtual2physical(KERNEL_LINK)),
337                size: (virtual2physical(data_start) - virtual2physical(KERNEL_LINK)) as usize,
338                flags: 0, // read-only
339            },
340            // Extended memory: kernel .data and .bss sections and the rest of the data for the `whole` memory
341            // we decided to use in the kernel
342            VirtualMemoryMapEntry {
343                virtual_address: data_start,
344                physical_address: Some(virtual2physical(data_start)),
345                size: KERNEL_MAPPED_SIZE - virtual2physical(data_start) as usize,
346                flags: flags::PTE_WRITABLE,
347            },
348        ];
349
350        // create a new fresh page map
351        // SAFETY: we are calling the virtual memory manager after initializing the physical page allocator
352        let mut s = Self::new();
353
354        for entry in kernel_vm.iter() {
355            s.map(entry);
356        }
357
358        // unmap stack guard
359        s.unmap(
360            &VirtualMemoryMapEntry {
361                virtual_address: stack_guard_page_ptr(),
362                physical_address: None,
363                size: PAGE_4K,
364                flags: 0,
365            },
366            false,
367        );
368
369        s
370    }
371
372    pub fn map(&mut self, entry: &VirtualMemoryMapEntry) {
373        let VirtualMemoryMapEntry {
374            mut virtual_address,
375            physical_address: mut start_physical_address,
376            size: requested_size,
377            flags,
378        } = entry;
379
380        assert!(!self.page_map_l4.as_ptr().is_null());
381        assert!(is_aligned(self.page_map_l4.as_virtual(), PAGE_4K));
382
383        let (aligned_start, mut size, _) = align_range(virtual_address, *requested_size, PAGE_4K);
384        virtual_address = aligned_start;
385
386        if self.is_user {
387            assert_ne!(*flags & flags::PTE_USER, 0);
388            assert_ne!(get_l4(virtual_address), KERNEL_L4_INDEX);
389            let end = virtual_address + size;
390            assert!(end <= MAX_USER_VIRTUAL_ADDRESS);
391        }
392
393        if let Some(start_physical_address) = start_physical_address.as_mut() {
394            let (aligned_start, physical_size, _) =
395                align_range(*start_physical_address, *requested_size, PAGE_4K);
396            assert_eq!(physical_size, size);
397            *start_physical_address = aligned_start;
398        }
399
400        // keep track of current address and size
401        let mut physical_address = start_physical_address;
402
403        assert!(size > 0);
404
405        trace!(
406            "{} {:08X?}",
407            MemSize(size),
408            VirtualMemoryMapEntry {
409                virtual_address,
410                physical_address,
411                size,
412                flags: *flags,
413            }
414        );
415
416        while size > 0 {
417            let current_physical_address = physical_address.unwrap_or_else(|| {
418                virtual2physical(unsafe { physical_page_allocator::alloc_zeroed() as _ })
419            });
420            trace!(
421                "[!] Mapping {:p} to {:p}",
422                virtual_address as *const u8,
423                current_physical_address as *const u8
424            );
425            let page_map_l4_index = get_l4(virtual_address);
426            let page_directory_pointer_index = get_l3(virtual_address);
427            let page_directory_index = get_l2(virtual_address);
428            let page_table_index = get_l1(virtual_address);
429
430            // Level 4
431            let page_map_l4_entry = &mut self.page_map_l4.as_mut().entries[page_map_l4_index];
432
433            if *page_map_l4_entry & flags::PTE_PRESENT == 0 {
434                let page_directory_pointer_table = PageDirectoryTablePtr::alloc_new();
435                *page_map_l4_entry =
436                    (page_directory_pointer_table.as_physical() & ADDR_MASK) | flags::PTE_PRESENT;
437            }
438            // add new flags if any
439            *page_map_l4_entry |= flags;
440            trace!(
441                "L4[{}]: {:p} = {:x}",
442                page_map_l4_index,
443                page_map_l4_entry,
444                *page_map_l4_entry
445            );
446
447            // Level 3
448            let mut page_directory_pointer_table =
449                PageDirectoryTablePtr::from_entry(*page_map_l4_entry);
450
451            let page_directory_pointer_entry =
452                &mut page_directory_pointer_table.as_mut().entries[page_directory_pointer_index];
453
454            if *page_directory_pointer_entry & flags::PTE_PRESENT == 0 {
455                let page_directory_table = PageDirectoryTablePtr::alloc_new();
456                *page_directory_pointer_entry =
457                    (page_directory_table.as_physical() & ADDR_MASK) | flags::PTE_PRESENT;
458            }
459
460            // add new flags
461            *page_directory_pointer_entry |= flags;
462            trace!(
463                "L3[{}]: {:p} = {:x}",
464                page_directory_pointer_index,
465                page_directory_pointer_entry,
466                *page_directory_pointer_entry
467            );
468
469            // Level 2
470            let mut page_directory_table =
471                PageDirectoryTablePtr::from_entry(*page_directory_pointer_entry);
472            let page_directory_entry =
473                &mut page_directory_table.as_mut().entries[page_directory_index];
474
475            // here we have an intersection, if we can map a 2MB page, we will, otherwise we will map a 4K page
476            // if we are providing the pages (the user didn't provide), then we can't use 2MB pages
477            // let can_map_2mb_page = physical_address
478            //     .map(|phy_addr| {
479            //         is_aligned(phy_addr as _, PAGE_2M)
480            //             && is_aligned(virtual_address as _, PAGE_2M)
481            //             && size >= PAGE_2M as u64
482            //     })
483            //     .unwrap_or(false);
484            // TODO: we have disabled 2MB as its not easy to unmap in the middle, all pages must be the sames
485
486            let can_map_2mb_page = false;
487            if can_map_2mb_page {
488                // we already have an entry here
489                if *page_directory_entry & flags::PTE_PRESENT != 0 {
490                    // did we have a mapping here that lead to 4k pages?
491                    // if so, we should free the physical page allocation for them
492                    if *page_directory_entry & flags::PTE_HUGE_PAGE == 0 {
493                        let page_table_ptr =
494                            PageDirectoryTablePtr::from_entry(*page_directory_entry);
495
496                        unsafe { page_table_ptr.free() };
497                    }
498                }
499
500                // Level 1
501                *page_directory_entry = (current_physical_address & ADDR_MASK)
502                    | flags
503                    | flags::PTE_PRESENT
504                    | flags::PTE_HUGE_PAGE;
505
506                trace!(
507                    "L2[{}] huge: {:p} = {:x}",
508                    page_directory_index,
509                    page_directory_entry,
510                    *page_directory_entry
511                );
512
513                size -= PAGE_2M;
514                // do not overflow the address
515                if size == 0 {
516                    break;
517                }
518                virtual_address += PAGE_2M;
519                if let Some(physical_address) = physical_address.as_mut() {
520                    *physical_address += PAGE_2M as u64;
521                }
522            } else {
523                // continue mapping 4K pages
524                if *page_directory_entry & flags::PTE_PRESENT == 0 {
525                    let page_table = PageDirectoryTablePtr::alloc_new();
526                    *page_directory_entry =
527                        (page_table.as_physical() & ADDR_MASK) | flags::PTE_PRESENT;
528                }
529                // add new flags
530                *page_directory_entry |= flags;
531                trace!(
532                    "L2[{}]: {:p} = {:x}",
533                    page_directory_index,
534                    page_directory_entry,
535                    *page_directory_entry
536                );
537
538                // Level 1
539                let mut page_table = PageDirectoryTablePtr::from_entry(*page_directory_entry);
540                let page_table_entry = &mut page_table.as_mut().entries[page_table_index];
541                *page_table_entry =
542                    (current_physical_address & ADDR_MASK) | flags | flags::PTE_PRESENT;
543                trace!(
544                    "L1[{}]: {:p} = {:x}",
545                    page_table_index,
546                    page_table_entry,
547                    *page_table_entry
548                );
549
550                size -= PAGE_4K;
551                // do not overflow the address
552                if size == 0 {
553                    break;
554                }
555                virtual_address += PAGE_4K;
556                if let Some(physical_address) = physical_address.as_mut() {
557                    *physical_address += PAGE_4K as u64;
558                }
559            }
560
561            trace!("");
562        }
563    }
564
565    /// Removes mapping of a virtual entry, it will free it from physical memory if it was allocated
566    pub fn unmap(&mut self, entry: &VirtualMemoryMapEntry, is_allocated: bool) {
567        let VirtualMemoryMapEntry {
568            mut virtual_address,
569            physical_address,
570            size,
571            flags,
572        } = entry;
573
574        assert!(physical_address.is_none());
575
576        // get the end before alignment
577        let (aligned_start, mut size, _) = align_range(virtual_address, *size, PAGE_4K);
578        virtual_address = aligned_start;
579
580        assert!(size > 0);
581
582        trace!(
583            "{} {:08X?}",
584            MemSize(size),
585            VirtualMemoryMapEntry {
586                virtual_address,
587                physical_address: *physical_address,
588                size,
589                flags: *flags,
590            }
591        );
592
593        while size > 0 {
594            unsafe {
595                cpu::invalidate_tlp(virtual_address as _);
596            }
597
598            let page_map_l4_index = get_l4(virtual_address);
599            let page_directory_pointer_index = get_l3(virtual_address);
600            let page_directory_index = get_l2(virtual_address);
601            let page_table_index = get_l1(virtual_address);
602
603            // Level 4
604            let page_map_l4_entry = &mut self.page_map_l4.as_mut().entries[page_map_l4_index];
605
606            if *page_map_l4_entry & flags::PTE_PRESENT == 0 {
607                panic!("Trying to unmap a non-mapped address");
608            }
609            // remove flags
610            *page_map_l4_entry &= !flags;
611            trace!(
612                "L4[{}]: {:p} = {:x}",
613                page_map_l4_index,
614                page_map_l4_entry,
615                *page_map_l4_entry
616            );
617
618            // Level 3
619            let mut page_directory_pointer_table =
620                PageDirectoryTablePtr::from_entry(*page_map_l4_entry);
621
622            let page_directory_pointer_entry =
623                &mut page_directory_pointer_table.as_mut().entries[page_directory_pointer_index];
624
625            if *page_directory_pointer_entry & flags::PTE_PRESENT == 0 {
626                panic!("Trying to unmap a non-mapped address");
627            }
628            // remove flags
629            *page_directory_pointer_entry &= !flags;
630            trace!(
631                "L3[{}]: {:p} = {:x}",
632                page_directory_pointer_index,
633                page_directory_pointer_entry,
634                *page_directory_pointer_entry
635            );
636
637            // Level 2
638            let mut page_directory_table =
639                PageDirectoryTablePtr::from_entry(*page_directory_pointer_entry);
640            let page_directory_entry =
641                &mut page_directory_table.as_mut().entries[page_directory_index];
642
643            if *page_directory_entry & flags::PTE_PRESENT == 0 {
644                panic!("Trying to unmap a non-mapped address");
645            }
646            // remove flags
647            *page_directory_entry &= !flags;
648
649            // Level 1
650            let mut page_table = PageDirectoryTablePtr::from_entry(*page_directory_entry);
651            let page_table_entry = &mut page_table.as_mut().entries[page_table_index];
652            if *page_table_entry & flags::PTE_PRESENT == 0 {
653                panic!("Trying to unmap a non-mapped address");
654            }
655            let physical_entry = PageDirectoryTablePtr::from_entry(*page_table_entry);
656            if is_allocated {
657                unsafe { physical_entry.free() };
658            }
659            // remove whole entry
660            *page_table_entry = 0;
661            trace!(
662                "L1[{}]: {:p} = {:x}",
663                page_table_index,
664                page_table_entry,
665                *page_table_entry
666            );
667
668            size -= PAGE_4K;
669            // do not overflow the address
670            if size == 0 {
671                break;
672            }
673            virtual_address += PAGE_4K;
674        }
675    }
676
677    pub fn is_address_mapped(&self, addr: usize) -> bool {
678        let page_map_l4_index = get_l4(addr);
679        let page_directory_pointer_index = get_l3(addr);
680        let page_directory_index = get_l2(addr);
681        let page_table_index = get_l1(addr);
682
683        // Level 4
684        let page_map_l4 = self.page_map_l4.as_ref();
685        let page_map_l4_entry = &page_map_l4.entries[page_map_l4_index];
686
687        if *page_map_l4_entry & flags::PTE_PRESENT == 0 {
688            return false;
689        }
690        trace!(
691            "L4[{}]: {:p} = {:x}",
692            page_map_l4_index,
693            page_map_l4_entry,
694            *page_map_l4_entry
695        );
696
697        // Level 3
698        let page_directory_pointer_table = PageDirectoryTablePtr::from_entry(*page_map_l4_entry);
699        let page_directory_pointer_entry =
700            &page_directory_pointer_table.as_ref().entries[page_directory_pointer_index];
701        if *page_directory_pointer_entry & flags::PTE_PRESENT == 0 {
702            return false;
703        }
704        trace!(
705            "L3[{}]: {:p} = {:x}",
706            page_directory_pointer_index,
707            page_directory_pointer_entry,
708            *page_directory_pointer_entry
709        );
710
711        // Level 2
712        let page_directory_table = PageDirectoryTablePtr::from_entry(*page_directory_pointer_entry);
713        let page_directory_entry = &page_directory_table.as_ref().entries[page_directory_index];
714        if *page_directory_entry & flags::PTE_PRESENT == 0 {
715            return false;
716        }
717        if *page_directory_entry & flags::PTE_HUGE_PAGE != 0 {
718            return true;
719        }
720        trace!(
721            "L2[{}]: {:p} = {:x}",
722            page_directory_index,
723            page_directory_entry,
724            *page_directory_entry
725        );
726
727        // Level 1
728        let page_table = PageDirectoryTablePtr::from_entry(*page_directory_entry);
729        let page_table_entry = &page_table.as_ref().entries[page_table_index];
730        if *page_table_entry & flags::PTE_PRESENT == 0 {
731            return false;
732        }
733        trace!(
734            "L1[{}]: {:p} = {:x}",
735            page_table_index,
736            page_table_entry,
737            *page_table_entry
738        );
739
740        true
741    }
742
743    // TODO: add tests for this
744    fn do_for_ranges_entries<R1, R2, F>(&mut self, l4_ranges: R1, l3_ranges: R2, mut f: F)
745    where
746        R1: RangeBounds<usize>,
747        R2: RangeBounds<usize>,
748        F: FnMut(&mut u64),
749    {
750        let page_map_l4 = self.page_map_l4.as_mut();
751
752        let present = |entry: &&mut u64| **entry & flags::PTE_PRESENT != 0;
753
754        fn as_page_directory_table_flat(entry: &mut u64) -> IterMut<'_, u64> {
755            let page_directory_table = PageDirectoryTablePtr::entries_from_mut_entry(entry);
756            page_directory_table.entries.iter_mut()
757        }
758
759        // handle 2MB pages and below
760        let handle_2mb_pages = |page_directory_entry: &mut u64| {
761            // handle 2MB pages
762            if *page_directory_entry & flags::PTE_HUGE_PAGE != 0 {
763                f(page_directory_entry);
764            } else {
765                as_page_directory_table_flat(page_directory_entry)
766                    .filter(present)
767                    .for_each(&mut f);
768            }
769        };
770
771        let l4_start = match l4_ranges.start_bound() {
772            core::ops::Bound::Included(&start) => start,
773            core::ops::Bound::Unbounded => 0,
774            core::ops::Bound::Excluded(_) => unreachable!("Excluded start bound"),
775        };
776        let l4_end = match l4_ranges.end_bound() {
777            core::ops::Bound::Included(&end) => end,
778            core::ops::Bound::Excluded(&end) => end - 1,
779            core::ops::Bound::Unbounded => 0x1FF, // max entries
780        };
781        let l3_start = match l3_ranges.start_bound() {
782            core::ops::Bound::Included(&start) => start,
783            core::ops::Bound::Unbounded => 0,
784            core::ops::Bound::Excluded(_) => unreachable!("Excluded start bound"),
785        };
786        let l3_end = match l3_ranges.end_bound() {
787            core::ops::Bound::Included(&end) => end,
788            core::ops::Bound::Excluded(&end) => end - 1,
789            core::ops::Bound::Unbounded => 0x1FF, // max entries
790        };
791
792        let l4_skip = l4_start;
793        let l4_take = l4_end - l4_skip + 1;
794        let l3_skip = l3_start;
795        let l3_take = l3_end - l3_skip + 1;
796
797        page_map_l4
798            .entries
799            .iter_mut()
800            .skip(l4_skip)
801            .take(l4_take) //skip the kernel (the last one)
802            .flat_map(as_page_directory_table_flat)
803            .skip(l3_skip)
804            .take(l3_take)
805            .filter(present)
806            .flat_map(as_page_directory_table_flat)
807            .filter(present)
808            .for_each(handle_2mb_pages);
809    }
810
811    // the handler function definition is `fn(page_entry: &mut u64)`
812    fn do_for_every_user_entry(&mut self, f: impl FnMut(&mut u64)) {
813        self.do_for_ranges_entries(0..NUM_USER_L4_INDEXES, 0..=0x1FF, f)
814    }
815
816    // the handler function definition is `fn(page_entry: &mut u64)`
817    fn do_for_kernel_process_entry(&mut self, f: impl FnMut(&mut u64)) {
818        self.do_for_ranges_entries(
819            KERNEL_L4_INDEX..=KERNEL_L4_INDEX,
820            KERNEL_L3_PROCESS_INDEX_START..=KERNEL_L3_PROCESS_INDEX_END,
821            f,
822        );
823    }
824
825    // search for all the pages that are mapped to the user ranges and unmap them and free their memory
826    // also unmap any process specific kernel memory
827    pub fn unmap_process_memory(&mut self) {
828        let free_page = |entry: &mut u64| {
829            assert_eq!(
830                *entry & flags::PTE_HUGE_PAGE,
831                0,
832                "We haven't implemented 2MB physical pages for user allocation"
833            );
834            let page_table_ptr = PageDirectoryTablePtr::from_entry(*entry);
835            unsafe { page_table_ptr.free() };
836            *entry = 0;
837        };
838
839        self.do_for_every_user_entry(free_page);
840        self.do_for_kernel_process_entry(free_page);
841    }
842}