kernel/memory_management/
virtual_space.rs1use core::{fmt, mem::MaybeUninit, ptr::NonNull};
2
3use alloc::collections::LinkedList;
4use tracing::info;
5
6use crate::{
7 memory_management::memory_layout::{
8 align_range, is_aligned, MemSize, KERNEL_EXTRA_MEMORY_BASE, KERNEL_EXTRA_MEMORY_SIZE,
9 PAGE_4K,
10 },
11 sync::spin::mutex::Mutex,
12};
13
14use super::virtual_memory_mapper::{self, VirtualMemoryMapEntry};
15
16static VIRTUAL_SPACE_ALLOCATOR: Mutex<VirtualSpaceAllocator> =
17 Mutex::new(VirtualSpaceAllocator::empty());
18
19pub enum VirtualSpaceError {
20 OutOfSpace,
21 AlreadyMapped,
22 NotFullRange,
23 EntryNotFound,
24}
25
26impl fmt::Debug for VirtualSpaceError {
27 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
28 match self {
29 VirtualSpaceError::OutOfSpace => write!(f, "Out of space"),
30 VirtualSpaceError::AlreadyMapped => write!(f, "Already mapped"),
31 VirtualSpaceError::NotFullRange => write!(f, "Not full range"),
32 VirtualSpaceError::EntryNotFound => write!(f, "Entry not found"),
33 }
34 }
35}
36
37type Result<T> = core::result::Result<T, VirtualSpaceError>;
38
39pub struct VirtualSpace<T: ?Sized> {
43 size: usize,
44 data: NonNull<T>,
45}
46
47impl<T> VirtualSpace<T> {
48 pub unsafe fn new(physical_start: u64) -> Result<Self> {
54 let size = core::mem::size_of::<T>();
55 let virtual_start = allocate_and_map_virtual_space(physical_start, size)?;
56 let data = NonNull::new(virtual_start as *mut T).unwrap();
57 Ok(Self { size, data })
58 }
59
60 #[allow(dead_code)]
66 pub unsafe fn new_uninit(physical_start: u64) -> Result<VirtualSpace<MaybeUninit<T>>> {
67 let size = core::mem::size_of::<T>();
68 let virtual_start = allocate_and_map_virtual_space(physical_start, size)?;
69 let data = NonNull::new(virtual_start as *mut T).unwrap();
70 Ok(VirtualSpace {
71 size,
72 data: NonNull::new_unchecked(data.as_ptr() as *mut MaybeUninit<T>),
73 })
74 }
75
76 pub unsafe fn new_slice(physical_start: u64, len: usize) -> Result<VirtualSpace<[T]>> {
82 let size = core::mem::size_of::<T>() * len;
83 let virtual_start = allocate_and_map_virtual_space(physical_start, size)?;
84 let data = NonNull::new(virtual_start as *mut T).unwrap();
85 let slice = core::slice::from_raw_parts_mut(data.as_ptr(), len);
86
87 Ok(VirtualSpace {
88 size,
89 data: NonNull::new_unchecked(slice as *mut [T]),
90 })
91 }
92}
93
94impl<T: ?Sized> core::ops::Deref for VirtualSpace<T> {
95 type Target = T;
96
97 fn deref(&self) -> &Self::Target {
98 unsafe { self.data.as_ref() }
99 }
100}
101
102impl<T: ?Sized> core::ops::DerefMut for VirtualSpace<T> {
103 fn deref_mut(&mut self) -> &mut Self::Target {
104 unsafe { self.data.as_mut() }
105 }
106}
107
108unsafe impl<T: ?Sized + Send> Send for VirtualSpace<T> {}
109unsafe impl<T: ?Sized + Sync> Sync for VirtualSpace<T> {}
110
111impl<T: ?Sized> Drop for VirtualSpace<T> {
112 fn drop(&mut self) {
113 let size = self.size;
114 deallocate_virtual_space(self.data.as_ptr() as *mut u8 as usize, size).unwrap();
115 }
116}
117
118impl<T: ?Sized + fmt::Debug> fmt::Debug for VirtualSpace<T> {
119 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
120 fmt::Debug::fmt(&**self, f)
121 }
122}
123
124impl<T: ?Sized + fmt::Display> fmt::Display for VirtualSpace<T> {
125 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
126 fmt::Display::fmt(&**self, f)
127 }
128}
129
130fn allocate_and_map_virtual_space(physical_start: u64, size: usize) -> Result<usize> {
131 let (aligned_start, size, offset) = align_range(physical_start, size, PAGE_4K);
132
133 let mut allocator = VIRTUAL_SPACE_ALLOCATOR.lock();
134 let virtual_addr = allocator.allocate(aligned_start, size)?;
135
136 virtual_memory_mapper::map_kernel(&VirtualMemoryMapEntry {
137 virtual_address: virtual_addr,
138 physical_address: Some(aligned_start),
139 size,
140 flags: virtual_memory_mapper::flags::PTE_WRITABLE,
141 });
142 drop(allocator);
144
145 Ok(virtual_addr + offset)
146}
147
148fn deallocate_virtual_space(virtual_start: usize, size: usize) -> Result<()> {
149 let (aligned_start, size, _) = align_range(virtual_start, size, PAGE_4K);
150
151 let mut allocator = VIRTUAL_SPACE_ALLOCATOR.lock();
152 allocator.deallocate(aligned_start, size)?;
153 virtual_memory_mapper::unmap_kernel(
155 &VirtualMemoryMapEntry {
156 virtual_address: aligned_start,
157 physical_address: None,
158 size,
159 flags: virtual_memory_mapper::flags::PTE_WRITABLE,
160 },
161 false,
163 );
164
165 Ok(())
166}
167
168pub fn debug_blocks() {
169 let allocator = VIRTUAL_SPACE_ALLOCATOR.lock();
170 allocator.debug_blocks();
171}
172
173struct VirtualSpaceEntry {
174 physical_start: Option<u64>,
175 virtual_start: usize,
176 size: usize,
177}
178
179#[allow(dead_code)]
180impl VirtualSpaceEntry {
181 fn virtual_for_physical(&self, physical_start: u64) -> Option<usize> {
183 if let Some(current_phy_start) = self.physical_start {
184 if current_phy_start <= physical_start
186 && current_phy_start + self.size as u64 > physical_start
187 {
188 return Some(self.virtual_start + (physical_start - current_phy_start) as usize);
189 }
190 }
191 None
192 }
193}
194
195struct VirtualSpaceAllocator {
196 entries: LinkedList<VirtualSpaceEntry>,
197}
198
199impl VirtualSpaceAllocator {
200 const fn empty() -> Self {
201 Self {
202 entries: LinkedList::new(),
203 }
204 }
205
206 fn get_entry_containing(
208 &mut self,
209 req_phy_start: u64,
210 req_size: usize,
211 ) -> Option<(&VirtualSpaceEntry, bool)> {
212 assert!(req_size > 0);
213 assert!(is_aligned(req_phy_start, PAGE_4K));
214 assert!(is_aligned(req_size, PAGE_4K));
215
216 let mut cursor = self.entries.cursor_front();
217 while let Some(entry) = cursor.current() {
218 if let Some(current_phy_start) = entry.physical_start {
219 if current_phy_start <= req_phy_start
221 && current_phy_start + entry.size as u64 > req_phy_start
222 {
223 return if current_phy_start + entry.size as u64
226 >= req_phy_start + req_size as u64
227 {
228 Some((entry, true))
230 } else {
231 Some((entry, false))
234 };
235 }
236 }
237 cursor.move_next();
238 }
239 None
240 }
241
242 fn allocate(&mut self, phy_start: u64, size: usize) -> Result<usize> {
243 assert!(size > 0);
244 assert!(is_aligned(phy_start, PAGE_4K));
245 assert!(is_aligned(size, PAGE_4K));
246
247 if self.get_entry_containing(phy_start, size).is_some() {
248 return Err(VirtualSpaceError::AlreadyMapped);
249 }
250
251 let mut cursor = self.entries.cursor_front_mut();
252 while let Some(entry) = cursor.current() {
254 if entry.physical_start.is_none() && entry.size >= size {
255 let new_entry = VirtualSpaceEntry {
259 physical_start: None,
260 virtual_start: entry.virtual_start + size,
261 size: entry.size - size,
262 };
263 entry.size = size;
265 entry.physical_start = Some(phy_start);
266 let virtual_address = entry.virtual_start;
267
268 cursor.insert_after(new_entry);
270 return Ok(virtual_address);
271 }
272 cursor.move_next();
273 }
274 if self.entries.is_empty() {
276 assert!(is_aligned(KERNEL_EXTRA_MEMORY_SIZE, PAGE_4K));
277 self.entries.push_back(VirtualSpaceEntry {
278 physical_start: None,
279 virtual_start: KERNEL_EXTRA_MEMORY_BASE,
280 size: KERNEL_EXTRA_MEMORY_SIZE,
281 });
282 self.allocate(phy_start, size)
283 } else {
284 Err(VirtualSpaceError::OutOfSpace)
285 }
286 }
287
288 fn deallocate(&mut self, req_virtual_start: usize, req_size: usize) -> Result<()> {
289 assert!(req_size > 0);
290 assert!(is_aligned(req_virtual_start, PAGE_4K));
291 assert!(is_aligned(req_size, PAGE_4K));
292
293 let mut cursor = self.entries.cursor_front_mut();
294 while let Some(entry) = cursor.current() {
295 if entry.virtual_start <= req_virtual_start
297 && entry.virtual_start + entry.size > req_virtual_start
298 {
299 if req_virtual_start != entry.virtual_start || req_size != entry.size {
301 return Err(VirtualSpaceError::NotFullRange);
304 }
305
306 assert!(entry.physical_start.is_some());
308 entry.physical_start = None;
309
310 let mut current = cursor.remove_current().unwrap();
313
314 if let Some(next_entry) = cursor.current() {
316 if next_entry.physical_start.is_none() {
317 current.size += next_entry.size;
319 cursor.remove_current();
321 }
322 }
323 cursor.move_prev();
325 if let Some(prev_entry) = cursor.current() {
327 if prev_entry.physical_start.is_none() {
328 prev_entry.size += current.size;
330 }
332 }
333 cursor.insert_after(current);
335 return Ok(());
336 }
337 cursor.move_next();
338 }
339 Err(VirtualSpaceError::EntryNotFound)
340 }
341
342 fn debug_blocks(&self) {
343 info!("Virtual space blocks:");
344 for entry in self.entries.iter() {
345 info!(
346 " range={:016x}..{:016x}, len={:4} => {:016X?}",
347 entry.virtual_start,
348 entry.virtual_start + entry.size,
349 MemSize(entry.size),
350 entry.physical_start
351 );
352 }
353 }
354}