kernel/memory_management/
virtual_space.rs

1use core::{fmt, mem::MaybeUninit, ptr::NonNull};
2
3use alloc::collections::LinkedList;
4use tracing::info;
5
6use crate::{
7    memory_management::memory_layout::{
8        align_range, is_aligned, MemSize, KERNEL_EXTRA_MEMORY_BASE, KERNEL_EXTRA_MEMORY_SIZE,
9        PAGE_4K,
10    },
11    sync::spin::mutex::Mutex,
12};
13
14use super::virtual_memory_mapper::{self, VirtualMemoryMapEntry};
15
16static VIRTUAL_SPACE_ALLOCATOR: Mutex<VirtualSpaceAllocator> =
17    Mutex::new(VirtualSpaceAllocator::empty());
18
19pub enum VirtualSpaceError {
20    OutOfSpace,
21    AlreadyMapped,
22    NotFullRange,
23    EntryNotFound,
24}
25
26impl fmt::Debug for VirtualSpaceError {
27    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
28        match self {
29            VirtualSpaceError::OutOfSpace => write!(f, "Out of space"),
30            VirtualSpaceError::AlreadyMapped => write!(f, "Already mapped"),
31            VirtualSpaceError::NotFullRange => write!(f, "Not full range"),
32            VirtualSpaceError::EntryNotFound => write!(f, "Entry not found"),
33        }
34    }
35}
36
37type Result<T> = core::result::Result<T, VirtualSpaceError>;
38
39/// A wrapper over memory that is defined by its `physical address`.
40/// We map this memory in `virtual space`, and return a pointer to it.
41///
42pub struct VirtualSpace<T: ?Sized> {
43    size: usize,
44    data: NonNull<T>,
45}
46
47impl<T> VirtualSpace<T> {
48    /// Create a new virtual space for the given `physical_start` on the given type `T`.
49    ///
50    /// # Safety
51    /// - Must be a valid physical address
52    /// - The memory must be defined by default. if its not, use [`new_uninit`](Self::new_uninit) instead
53    pub unsafe fn new(physical_start: u64) -> Result<Self> {
54        let size = core::mem::size_of::<T>();
55        let virtual_start = allocate_and_map_virtual_space(physical_start, size)?;
56        let data = NonNull::new(virtual_start as *mut T).unwrap();
57        Ok(Self { size, data })
58    }
59
60    /// Create a new virtual space for the given `physical_start` on the given type `T`.
61    /// But will assume that the memory is not initialized, and will return a `MaybeUninit` pointer.
62    ///
63    /// # Safety
64    /// - Must be a valid physical address
65    #[allow(dead_code)]
66    pub unsafe fn new_uninit(physical_start: u64) -> Result<VirtualSpace<MaybeUninit<T>>> {
67        let size = core::mem::size_of::<T>();
68        let virtual_start = allocate_and_map_virtual_space(physical_start, size)?;
69        let data = NonNull::new(virtual_start as *mut T).unwrap();
70        Ok(VirtualSpace {
71            size,
72            data: NonNull::new_unchecked(data.as_ptr() as *mut MaybeUninit<T>),
73        })
74    }
75
76    /// Create a new virtual space for the given `physical_start` on the given slice type `[T]`.
77    ///
78    /// # Safety
79    /// - Must be a valid physical address
80    /// - The memory must be defined by default. currently, there is no way to create a slice of `MaybeUninit`
81    pub unsafe fn new_slice(physical_start: u64, len: usize) -> Result<VirtualSpace<[T]>> {
82        let size = core::mem::size_of::<T>() * len;
83        let virtual_start = allocate_and_map_virtual_space(physical_start, size)?;
84        let data = NonNull::new(virtual_start as *mut T).unwrap();
85        let slice = core::slice::from_raw_parts_mut(data.as_ptr(), len);
86
87        Ok(VirtualSpace {
88            size,
89            data: NonNull::new_unchecked(slice as *mut [T]),
90        })
91    }
92}
93
94impl<T: ?Sized> core::ops::Deref for VirtualSpace<T> {
95    type Target = T;
96
97    fn deref(&self) -> &Self::Target {
98        unsafe { self.data.as_ref() }
99    }
100}
101
102impl<T: ?Sized> core::ops::DerefMut for VirtualSpace<T> {
103    fn deref_mut(&mut self) -> &mut Self::Target {
104        unsafe { self.data.as_mut() }
105    }
106}
107
108unsafe impl<T: ?Sized + Send> Send for VirtualSpace<T> {}
109unsafe impl<T: ?Sized + Sync> Sync for VirtualSpace<T> {}
110
111impl<T: ?Sized> Drop for VirtualSpace<T> {
112    fn drop(&mut self) {
113        let size = self.size;
114        deallocate_virtual_space(self.data.as_ptr() as *mut u8 as usize, size).unwrap();
115    }
116}
117
118impl<T: ?Sized + fmt::Debug> fmt::Debug for VirtualSpace<T> {
119    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
120        fmt::Debug::fmt(&**self, f)
121    }
122}
123
124impl<T: ?Sized + fmt::Display> fmt::Display for VirtualSpace<T> {
125    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
126        fmt::Display::fmt(&**self, f)
127    }
128}
129
130fn allocate_and_map_virtual_space(physical_start: u64, size: usize) -> Result<usize> {
131    let (aligned_start, size, offset) = align_range(physical_start, size, PAGE_4K);
132
133    let mut allocator = VIRTUAL_SPACE_ALLOCATOR.lock();
134    let virtual_addr = allocator.allocate(aligned_start, size)?;
135
136    virtual_memory_mapper::map_kernel(&VirtualMemoryMapEntry {
137        virtual_address: virtual_addr,
138        physical_address: Some(aligned_start),
139        size,
140        flags: virtual_memory_mapper::flags::PTE_WRITABLE,
141    });
142    // to make sure no one else play around with the space while we are mapping it
143    drop(allocator);
144
145    Ok(virtual_addr + offset)
146}
147
148fn deallocate_virtual_space(virtual_start: usize, size: usize) -> Result<()> {
149    let (aligned_start, size, _) = align_range(virtual_start, size, PAGE_4K);
150
151    let mut allocator = VIRTUAL_SPACE_ALLOCATOR.lock();
152    allocator.deallocate(aligned_start, size)?;
153    // unmap it after we deallocate (it will panic if its not valid deallocation)
154    virtual_memory_mapper::unmap_kernel(
155        &VirtualMemoryMapEntry {
156            virtual_address: aligned_start,
157            physical_address: None,
158            size,
159            flags: virtual_memory_mapper::flags::PTE_WRITABLE,
160        },
161        // we did specify our own physical address on allocation, so we must set this to false
162        false,
163    );
164
165    Ok(())
166}
167
168pub fn debug_blocks() {
169    let allocator = VIRTUAL_SPACE_ALLOCATOR.lock();
170    allocator.debug_blocks();
171}
172
173struct VirtualSpaceEntry {
174    physical_start: Option<u64>,
175    virtual_start: usize,
176    size: usize,
177}
178
179#[allow(dead_code)]
180impl VirtualSpaceEntry {
181    /// Return `None` if its not mapped, or if the `physical_start` is not inside this entry
182    fn virtual_for_physical(&self, physical_start: u64) -> Option<usize> {
183        if let Some(current_phy_start) = self.physical_start {
184            // is inside?
185            if current_phy_start <= physical_start
186                && current_phy_start + self.size as u64 > physical_start
187            {
188                return Some(self.virtual_start + (physical_start - current_phy_start) as usize);
189            }
190        }
191        None
192    }
193}
194
195struct VirtualSpaceAllocator {
196    entries: LinkedList<VirtualSpaceEntry>,
197}
198
199impl VirtualSpaceAllocator {
200    const fn empty() -> Self {
201        Self {
202            entries: LinkedList::new(),
203        }
204    }
205
206    /// Returns `(virtual_start, is_fully_inside)`
207    fn get_entry_containing(
208        &mut self,
209        req_phy_start: u64,
210        req_size: usize,
211    ) -> Option<(&VirtualSpaceEntry, bool)> {
212        assert!(req_size > 0);
213        assert!(is_aligned(req_phy_start, PAGE_4K));
214        assert!(is_aligned(req_size, PAGE_4K));
215
216        let mut cursor = self.entries.cursor_front();
217        while let Some(entry) = cursor.current() {
218            if let Some(current_phy_start) = entry.physical_start {
219                // is inside?
220                if current_phy_start <= req_phy_start
221                    && current_phy_start + entry.size as u64 > req_phy_start
222                {
223                    // this has parts of it inside
224                    // is it fully inside?
225                    return if current_phy_start + entry.size as u64
226                        >= req_phy_start + req_size as u64
227                    {
228                        // yes, it is fully inside
229                        Some((entry, true))
230                    } else {
231                        // no, it is not fully inside, but there is an overlap
232                        // we can't allocate this and we can't relocate
233                        Some((entry, false))
234                    };
235                }
236            }
237            cursor.move_next();
238        }
239        None
240    }
241
242    fn allocate(&mut self, phy_start: u64, size: usize) -> Result<usize> {
243        assert!(size > 0);
244        assert!(is_aligned(phy_start, PAGE_4K));
245        assert!(is_aligned(size, PAGE_4K));
246
247        if self.get_entry_containing(phy_start, size).is_some() {
248            return Err(VirtualSpaceError::AlreadyMapped);
249        }
250
251        let mut cursor = self.entries.cursor_front_mut();
252        // find largest fitting entry and allocate from it
253        while let Some(entry) = cursor.current() {
254            if entry.physical_start.is_none() && entry.size >= size {
255                // found it, split into two, and add to the list
256
257                // the new entry (after this)
258                let new_entry = VirtualSpaceEntry {
259                    physical_start: None,
260                    virtual_start: entry.virtual_start + size,
261                    size: entry.size - size,
262                };
263                // shrink this entry
264                entry.size = size;
265                entry.physical_start = Some(phy_start);
266                let virtual_address = entry.virtual_start;
267
268                // add the new entry
269                cursor.insert_after(new_entry);
270                return Ok(virtual_address);
271            }
272            cursor.move_next();
273        }
274        // if this is the first time, add a new entry and try again
275        if self.entries.is_empty() {
276            assert!(is_aligned(KERNEL_EXTRA_MEMORY_SIZE, PAGE_4K));
277            self.entries.push_back(VirtualSpaceEntry {
278                physical_start: None,
279                virtual_start: KERNEL_EXTRA_MEMORY_BASE,
280                size: KERNEL_EXTRA_MEMORY_SIZE,
281            });
282            self.allocate(phy_start, size)
283        } else {
284            Err(VirtualSpaceError::OutOfSpace)
285        }
286    }
287
288    fn deallocate(&mut self, req_virtual_start: usize, req_size: usize) -> Result<()> {
289        assert!(req_size > 0);
290        assert!(is_aligned(req_virtual_start, PAGE_4K));
291        assert!(is_aligned(req_size, PAGE_4K));
292
293        let mut cursor = self.entries.cursor_front_mut();
294        while let Some(entry) = cursor.current() {
295            // is inside?
296            if entry.virtual_start <= req_virtual_start
297                && entry.virtual_start + entry.size > req_virtual_start
298            {
299                // it must match the whole entry
300                if req_virtual_start != entry.virtual_start || req_size != entry.size {
301                    // panic!("Requested to deallocate {:016X}..{:016X}, but its partially inside {:016X}..{:016X}, must match exactly",
302                    //     req_virtual_start, req_virtual_start + req_size, entry.virtual_start, entry.virtual_start + entry.size);
303                    return Err(VirtualSpaceError::NotFullRange);
304                }
305
306                // found it, deallocate it
307                assert!(entry.physical_start.is_some());
308                entry.physical_start = None;
309
310                // try to merge with after and before
311                // extract the current so we can play around with values easily
312                let mut current = cursor.remove_current().unwrap();
313
314                // merge with next
315                if let Some(next_entry) = cursor.current() {
316                    if next_entry.physical_start.is_none() {
317                        // merge with next
318                        current.size += next_entry.size;
319                        // here `cursor` is pointing to `next_entry`
320                        cursor.remove_current();
321                    }
322                }
323                // go the the previous entry (before current)
324                cursor.move_prev();
325                // merge with prev
326                if let Some(prev_entry) = cursor.current() {
327                    if prev_entry.physical_start.is_none() {
328                        // merge with prev
329                        prev_entry.size += current.size;
330                        // no need to remove the `current` since its already removed
331                    }
332                }
333                // add `current` back
334                cursor.insert_after(current);
335                return Ok(());
336            }
337            cursor.move_next();
338        }
339        Err(VirtualSpaceError::EntryNotFound)
340    }
341
342    fn debug_blocks(&self) {
343        info!("Virtual space blocks:");
344        for entry in self.entries.iter() {
345            info!(
346                "  range={:016x}..{:016x}, len={:4} => {:016X?}",
347                entry.virtual_start,
348                entry.virtual_start + entry.size,
349                MemSize(entry.size),
350                entry.physical_start
351            );
352        }
353    }
354}