kernel/memory_management/
physical_page_allocator.rs1use core::ptr::NonNull;
2
3use tracing::info;
4
5use super::memory_layout::{align_down, align_up, is_aligned, PAGE_4K};
6use crate::{
7 memory_management::memory_layout::{
8 kernel_elf_end, physical2virtual, virtual2physical, EXTENDED_OFFSET, KERNEL_END,
9 KERNEL_LINK,
10 },
11 multiboot2::{MemoryMapType, MultiBoot2Info},
12 sync::{once::OnceLock, spin::mutex::Mutex},
13 testing,
14};
15
16struct FreePage {
17 next: Option<NonNull<FreePage>>,
18}
19
20static ALLOCATOR: OnceLock<Mutex<PhysicalPageAllocator>> = OnceLock::new();
21
22pub fn init(multiboot_info: &MultiBoot2Info) {
23 if ALLOCATOR.try_get().is_some() {
24 panic!("PhysicalPageAllocator already initialized");
25 }
26
27 ALLOCATOR.get_or_init(|| Mutex::new(PhysicalPageAllocator::new(multiboot_info)));
28}
29
30pub unsafe fn alloc() -> *mut u8 {
35 ALLOCATOR.get().lock().alloc()
36}
37
38pub unsafe fn alloc_zeroed() -> *mut u8 {
43 let page = alloc();
44 page.write_bytes(0, PAGE_4K);
45 page
46}
47
48pub unsafe fn free(page: *mut u8) {
57 let r = { ALLOCATOR.get().lock().free(page) };
58 r.unwrap_or_else(|| panic!("Page {page:p} not valid"))
59}
60
61pub fn stats() -> (usize, usize) {
62 let allocator = ALLOCATOR.get().lock();
63 (allocator.free_count, allocator.used_count)
64}
65
66struct PhysicalPageAllocator {
67 low_mem_free_list_head: Option<NonNull<FreePage>>,
68 #[allow(dead_code)]
69 high_mem_start: usize,
71 start: usize,
72 end: usize,
73 free_count: usize,
74 used_count: usize,
75}
76
77unsafe impl Send for PhysicalPageAllocator {}
78
79impl PhysicalPageAllocator {
80 fn new(multiboot_info: &MultiBoot2Info) -> Self {
81 const PHYSICAL_KERNEL_START: u64 = virtual2physical(KERNEL_LINK);
82 let mut physical_kernel_end = virtual2physical(align_up(kernel_elf_end(), PAGE_4K));
85 let multiboot_end = align_up(
86 virtual2physical(multiboot_info.end_address() as usize),
87 PAGE_4K,
88 );
89 info!("multiboot end: {multiboot_end:x}",);
90 info!(
91 "physical_kernel_start: {:p}",
92 PHYSICAL_KERNEL_START as *mut u8
93 );
94 info!("physical_kernel_end: {:p}", physical_kernel_end as *mut u8);
95
96 if multiboot_end > physical_kernel_end {
98 assert!(
101 multiboot_end - physical_kernel_end < PAGE_4K as u64 * 5,
102 "Multiboot is after the kernel by a lot",
103 );
104 physical_kernel_end = multiboot_end;
105 }
106
107 let mut s = Self {
108 low_mem_free_list_head: None,
109 high_mem_start: 0,
110 start: 0,
111 end: 0,
112 free_count: 0,
113 used_count: 0,
114 };
115
116 for memory in multiboot_info.memory_maps().unwrap() {
117 if (memory.base_addr + memory.length) < EXTENDED_OFFSET as u64 {
120 continue;
121 }
122 if memory.mem_type != MemoryMapType::Available {
123 continue;
124 }
125 let start_physical;
128 let end_physical;
129 if memory.base_addr <= PHYSICAL_KERNEL_START
130 && (memory.base_addr + memory.length) >= physical_kernel_end
131 {
132 start_physical = physical_kernel_end;
133 end_physical = align_down(memory.base_addr + memory.length, PAGE_4K);
134 s.start = physical2virtual(physical_kernel_end);
135 } else {
136 assert!(memory.base_addr >= physical_kernel_end);
137
138 start_physical = align_up(memory.base_addr, PAGE_4K);
139 end_physical = align_down(memory.base_addr + memory.length, PAGE_4K);
140 }
141 let mut high_mem_start = 0;
142 let end_virtual = if end_physical >= virtual2physical(KERNEL_END) {
143 high_mem_start = KERNEL_END;
144 KERNEL_END
145 } else {
146 physical2virtual(end_physical)
147 };
148 let start_virtual = physical2virtual(start_physical);
149
150 if start_virtual < end_virtual {
151 s.end = end_virtual;
152
153 s.init_range(start_virtual as _, end_virtual as _);
154 if high_mem_start != 0 {
155 s.high_mem_start = high_mem_start;
156 break;
157 }
158 }
159 }
160 s
161 }
162
163 fn init_range(&mut self, start: *mut u8, end: *mut u8) {
164 info!("init physical pages: [{:p}, {:p})", start, end);
165 let start = align_up(start as usize, PAGE_4K) as _;
166 let end = align_down(end as usize, PAGE_4K) as _;
167 assert!(start < end);
168 let mut page = start;
169 while page < end {
170 unsafe { self.free(page).expect("valid page") };
171 page = unsafe { page.add(PAGE_4K) };
172 }
173 }
174
175 unsafe fn alloc(&mut self) -> *mut u8 {
179 let Some(low_mem_free_list_head) = self.low_mem_free_list_head else {
180 panic!("out of memory");
181 };
182
183 let page = low_mem_free_list_head;
184 self.low_mem_free_list_head = page.as_ref().next;
185
186 let page = page.as_ptr() as *mut u8;
187 page.write_bytes(1, PAGE_4K);
189 self.used_count += 1;
190 page
191 }
192
193 #[must_use]
204 unsafe fn free(&mut self, page: *mut u8) -> Option<()> {
205 let page = page.cast::<FreePage>();
206
207 if page.is_null()
208 || !is_aligned(page as usize, PAGE_4K)
209 || page >= self.end as _
210 || page < self.start as _
211 {
212 return None;
213 }
214
215 page.cast::<u8>().write_bytes(2, PAGE_4K);
217 assert!(self.high_mem_start == 0 || page < self.high_mem_start as _);
219 let mut page = NonNull::new_unchecked(page);
220
221 page.as_mut().next = self.low_mem_free_list_head;
222 self.low_mem_free_list_head = Some(page);
223 self.free_count += 1;
224 Some(())
225 }
226}
227
228#[macro_rules_attribute::apply(testing::test)]
229fn test_general() {
230 let page1 = unsafe { alloc() };
231 let page2 = unsafe { alloc() };
232 let page3 = unsafe { alloc() };
233
234 assert_eq!(page1 as usize % PAGE_4K, 0);
236 assert_eq!(page2 as usize % PAGE_4K, 0);
237 assert_eq!(page3 as usize % PAGE_4K, 0);
238
239 assert_eq!(page1 as usize, page2 as usize + PAGE_4K);
241 assert_eq!(page2 as usize, page3 as usize + PAGE_4K);
242
243 assert!(unsafe { core::slice::from_raw_parts(page1, PAGE_4K) }
245 .iter()
246 .all(|&x| x == 1),);
247 assert!(unsafe { core::slice::from_raw_parts(page2, PAGE_4K) }
248 .iter()
249 .all(|&x| x == 1),);
250 assert!(unsafe { core::slice::from_raw_parts(page3, PAGE_4K) }
251 .iter()
252 .all(|&x| x == 1),);
253
254 let zeros = unsafe { alloc_zeroed() };
255 assert!(unsafe { core::slice::from_raw_parts(zeros, PAGE_4K) }
256 .iter()
257 .all(|&x| x == 0),);
258
259 unsafe {
260 free(page1);
261 free(page2);
262 free(page3);
263 free(zeros);
264 }
265}
266
267#[macro_rules_attribute::apply(testing::test)]
268fn test_free_realloc() {
269 let page = unsafe { alloc() };
270 let addr = page as usize;
271
272 unsafe { free(page) };
273
274 let page2 = unsafe { alloc() };
275
276 assert_eq!(page as usize, addr);
277
278 unsafe { free(page2) };
279}
280
281#[macro_rules_attribute::apply(testing::test)]
282#[should_panic]
283fn test_unaligned_free() {
284 let page = unsafe { alloc() };
285
286 let addr_inside_page = unsafe { page.add(1) };
287
288 unsafe { free(addr_inside_page) };
289}