1use core::alloc::{GlobalAlloc, Layout};
2
3use increasing_heap_allocator::{HeapAllocator, HeapStats, PageAllocatorProvider};
4use kernel_user_link::{
5 call_syscall,
6 syscalls::{SyscallError, SYS_INC_HEAP},
7};
8
9use crate::sync::{once::OnceLock, spin::mutex::Mutex};
10
11const PAGE_4K: usize = 0x1000;
12
13unsafe fn inc_dec_heap(increment: isize) -> Result<*mut u8, SyscallError> {
14 unsafe {
15 call_syscall!(
16 SYS_INC_HEAP,
17 increment as u64, )
19 .map(|addr| addr as *mut u8)
20 }
21}
22
23pub static ALLOCATOR: LockedKernelHeapAllocator = LockedKernelHeapAllocator::empty();
24
25struct PageAllocator {
26 heap_start: usize,
27 mapped_pages: usize,
28}
29
30impl PageAllocator {
31 fn new() -> Self {
32 Self {
33 heap_start: unsafe { inc_dec_heap(0).unwrap() as usize },
34 mapped_pages: 0,
35 }
36 }
37}
38
39impl PageAllocatorProvider<PAGE_4K> for PageAllocator {
40 fn allocate_pages(&mut self, pages: usize) -> Option<*mut u8> {
41 assert!(pages > 0);
43
44 let last_heap_base = self.heap_start + self.mapped_pages * PAGE_4K;
45 let new_addr = unsafe { inc_dec_heap((pages * PAGE_4K) as isize) };
46
47 let Ok(new_addr) = new_addr else {
48 return None;
49 };
50 assert_eq!(new_addr as usize, last_heap_base);
51
52 self.mapped_pages += pages;
53
54 Some(new_addr)
55 }
56
57 fn deallocate_pages(&mut self, _pages: usize) -> bool {
58 todo!()
59 }
60}
61
62pub struct LockedKernelHeapAllocator {
63 inner: OnceLock<Mutex<HeapAllocator<PAGE_4K, PageAllocator>>>,
64}
65
66impl LockedKernelHeapAllocator {
67 const fn empty() -> Self {
68 Self {
69 inner: OnceLock::new(),
70 }
71 }
72
73 fn init_mutex() -> Mutex<HeapAllocator<PAGE_4K, PageAllocator>> {
74 Mutex::new(HeapAllocator::new(PageAllocator::new()))
75 }
76
77 pub fn stats(&self) -> HeapStats {
78 let inner = self.inner.get_or_init(Self::init_mutex).lock();
79 inner.stats()
80 }
81}
82
83unsafe impl GlobalAlloc for LockedKernelHeapAllocator {
84 unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
85 self.inner
86 .get_or_init(Self::init_mutex)
87 .lock()
88 .alloc(layout)
89 }
90
91 unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
92 self.inner
93 .get_or_init(Self::init_mutex)
94 .lock()
95 .dealloc(ptr, layout)
96 }
97}
98
99pub unsafe fn alloc(layout: Layout) -> *mut u8 {
103 ALLOCATOR.alloc(layout)
104}
105
106pub unsafe fn dealloc(ptr: *mut u8, layout: Layout) {
110 ALLOCATOR.dealloc(ptr, layout)
111}
112
113pub unsafe fn alloc_zeroed(layout: Layout) -> *mut u8 {
116 ALLOCATOR.alloc_zeroed(layout)
117}
118
119pub unsafe fn realloc(ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
122 ALLOCATOR.realloc(ptr, layout, new_size)
123}