kernel/memory_management/
kernel_heap_allocator.rs

1use core::alloc::{GlobalAlloc, Layout};
2
3use increasing_heap_allocator::{HeapAllocator, HeapStats, PageAllocatorProvider};
4
5use crate::{
6    memory_management::{
7        memory_layout::KERNEL_HEAP_SIZE,
8        virtual_memory_mapper::{self, flags, VirtualMemoryMapEntry},
9    },
10    sync::{once::OnceLock, spin::mutex::Mutex},
11};
12
13use super::memory_layout::{KERNEL_HEAP_BASE, PAGE_4K};
14
15#[global_allocator]
16pub static ALLOCATOR: LockedKernelHeapAllocator = LockedKernelHeapAllocator::empty();
17
18struct PageAllocator {
19    heap_start: usize,
20    mapped_pages: usize,
21}
22
23impl PageAllocator {
24    fn new() -> Self {
25        Self {
26            heap_start: KERNEL_HEAP_BASE,
27            mapped_pages: 0,
28        }
29    }
30}
31
32impl PageAllocatorProvider<PAGE_4K> for PageAllocator {
33    fn allocate_pages(&mut self, pages: usize) -> Option<*mut u8> {
34        eprintln!("Allocating {} pages", pages);
35        assert!(pages > 0);
36
37        let last_heap_base = self.heap_start + self.mapped_pages * PAGE_4K;
38        let current_heap_base = last_heap_base;
39
40        // do not exceed the heap size
41        if (self.mapped_pages + pages) * PAGE_4K > KERNEL_HEAP_SIZE {
42            return None;
43        }
44
45        virtual_memory_mapper::map_kernel(&VirtualMemoryMapEntry {
46            virtual_address: current_heap_base,
47            physical_address: None,
48            size: PAGE_4K * pages,
49            flags: flags::PTE_WRITABLE,
50        });
51
52        self.mapped_pages += pages;
53
54        Some(current_heap_base as *mut u8)
55    }
56
57    fn deallocate_pages(&mut self, _pages: usize) -> bool {
58        todo!()
59    }
60}
61
62pub struct LockedKernelHeapAllocator {
63    inner: OnceLock<Mutex<HeapAllocator<PAGE_4K, PageAllocator>>>,
64}
65
66impl LockedKernelHeapAllocator {
67    const fn empty() -> Self {
68        Self {
69            inner: OnceLock::new(),
70        }
71    }
72
73    fn init_mutex() -> Mutex<HeapAllocator<PAGE_4K, PageAllocator>> {
74        Mutex::new(HeapAllocator::new(PageAllocator::new()))
75    }
76
77    pub fn stats(&self) -> HeapStats {
78        let inner = self.inner.get_or_init(Self::init_mutex).lock();
79        inner.stats()
80    }
81}
82
83unsafe impl GlobalAlloc for LockedKernelHeapAllocator {
84    unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
85        self.inner
86            .get_or_init(Self::init_mutex)
87            .lock()
88            .alloc(layout)
89    }
90
91    unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
92        self.inner
93            .get_or_init(Self::init_mutex)
94            .lock()
95            .dealloc(ptr, layout)
96    }
97}