use core::{fmt, mem::MaybeUninit, ptr::NonNull};
use alloc::collections::LinkedList;
use tracing::info;
use crate::{
memory_management::memory_layout::{
align_range, is_aligned, MemSize, KERNEL_EXTRA_MEMORY_BASE, KERNEL_EXTRA_MEMORY_SIZE,
PAGE_4K,
},
sync::spin::mutex::Mutex,
};
use super::virtual_memory_mapper::{self, VirtualMemoryMapEntry};
static VIRTUAL_SPACE_ALLOCATOR: Mutex<VirtualSpaceAllocator> =
Mutex::new(VirtualSpaceAllocator::empty());
pub enum VirtualSpaceError {
OutOfSpace,
AlreadyMapped,
NotFullRange,
EntryNotFound,
}
impl fmt::Debug for VirtualSpaceError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
VirtualSpaceError::OutOfSpace => write!(f, "Out of space"),
VirtualSpaceError::AlreadyMapped => write!(f, "Already mapped"),
VirtualSpaceError::NotFullRange => write!(f, "Not full range"),
VirtualSpaceError::EntryNotFound => write!(f, "Entry not found"),
}
}
}
type Result<T> = core::result::Result<T, VirtualSpaceError>;
pub struct VirtualSpace<T: ?Sized> {
size: usize,
data: NonNull<T>,
}
impl<T> VirtualSpace<T> {
pub unsafe fn new(physical_start: u64) -> Result<Self> {
let size = core::mem::size_of::<T>();
let virtual_start = allocate_and_map_virtual_space(physical_start, size)?;
let data = NonNull::new(virtual_start as *mut T).unwrap();
Ok(Self { size, data })
}
#[allow(dead_code)]
pub unsafe fn new_uninit(physical_start: u64) -> Result<VirtualSpace<MaybeUninit<T>>> {
let size = core::mem::size_of::<T>();
let virtual_start = allocate_and_map_virtual_space(physical_start, size)?;
let data = NonNull::new(virtual_start as *mut T).unwrap();
Ok(VirtualSpace {
size,
data: NonNull::new_unchecked(data.as_ptr() as *mut MaybeUninit<T>),
})
}
pub unsafe fn new_slice(physical_start: u64, len: usize) -> Result<VirtualSpace<[T]>> {
let size = core::mem::size_of::<T>() * len;
let virtual_start = allocate_and_map_virtual_space(physical_start, size)?;
let data = NonNull::new(virtual_start as *mut T).unwrap();
let slice = core::slice::from_raw_parts_mut(data.as_ptr(), len);
Ok(VirtualSpace {
size,
data: NonNull::new_unchecked(slice as *mut [T]),
})
}
}
impl<T: ?Sized> core::ops::Deref for VirtualSpace<T> {
type Target = T;
fn deref(&self) -> &Self::Target {
unsafe { self.data.as_ref() }
}
}
impl<T: ?Sized> core::ops::DerefMut for VirtualSpace<T> {
fn deref_mut(&mut self) -> &mut Self::Target {
unsafe { self.data.as_mut() }
}
}
unsafe impl<T: ?Sized + Send> Send for VirtualSpace<T> {}
unsafe impl<T: ?Sized + Sync> Sync for VirtualSpace<T> {}
impl<T: ?Sized> Drop for VirtualSpace<T> {
fn drop(&mut self) {
let size = self.size;
deallocate_virtual_space(self.data.as_ptr() as *mut u8 as usize, size).unwrap();
}
}
impl<T: ?Sized + fmt::Debug> fmt::Debug for VirtualSpace<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Debug::fmt(&**self, f)
}
}
impl<T: ?Sized + fmt::Display> fmt::Display for VirtualSpace<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Display::fmt(&**self, f)
}
}
fn allocate_and_map_virtual_space(physical_start: u64, size: usize) -> Result<usize> {
let (aligned_start, size, offset) = align_range(physical_start, size, PAGE_4K);
let mut allocator = VIRTUAL_SPACE_ALLOCATOR.lock();
let virtual_addr = allocator.allocate(aligned_start, size)?;
virtual_memory_mapper::map_kernel(&VirtualMemoryMapEntry {
virtual_address: virtual_addr,
physical_address: Some(aligned_start),
size,
flags: virtual_memory_mapper::flags::PTE_WRITABLE,
});
drop(allocator);
Ok(virtual_addr + offset)
}
fn deallocate_virtual_space(virtual_start: usize, size: usize) -> Result<()> {
let (aligned_start, size, _) = align_range(virtual_start, size, PAGE_4K);
let mut allocator = VIRTUAL_SPACE_ALLOCATOR.lock();
allocator.deallocate(aligned_start, size)?;
virtual_memory_mapper::unmap_kernel(
&VirtualMemoryMapEntry {
virtual_address: aligned_start,
physical_address: None,
size,
flags: virtual_memory_mapper::flags::PTE_WRITABLE,
},
false,
);
Ok(())
}
pub fn debug_blocks() {
let allocator = VIRTUAL_SPACE_ALLOCATOR.lock();
allocator.debug_blocks();
}
struct VirtualSpaceEntry {
physical_start: Option<u64>,
virtual_start: usize,
size: usize,
}
#[allow(dead_code)]
impl VirtualSpaceEntry {
fn virtual_for_physical(&self, physical_start: u64) -> Option<usize> {
if let Some(current_phy_start) = self.physical_start {
if current_phy_start <= physical_start
&& current_phy_start + self.size as u64 > physical_start
{
return Some(self.virtual_start + (physical_start - current_phy_start) as usize);
}
}
None
}
}
struct VirtualSpaceAllocator {
entries: LinkedList<VirtualSpaceEntry>,
}
impl VirtualSpaceAllocator {
const fn empty() -> Self {
Self {
entries: LinkedList::new(),
}
}
fn get_entry_containing(
&mut self,
req_phy_start: u64,
req_size: usize,
) -> Option<(&VirtualSpaceEntry, bool)> {
assert!(req_size > 0);
assert!(is_aligned(req_phy_start, PAGE_4K));
assert!(is_aligned(req_size, PAGE_4K));
let mut cursor = self.entries.cursor_front();
while let Some(entry) = cursor.current() {
if let Some(current_phy_start) = entry.physical_start {
if current_phy_start <= req_phy_start
&& current_phy_start + entry.size as u64 > req_phy_start
{
return if current_phy_start + entry.size as u64
>= req_phy_start + req_size as u64
{
Some((entry, true))
} else {
Some((entry, false))
};
}
}
cursor.move_next();
}
None
}
fn allocate(&mut self, phy_start: u64, size: usize) -> Result<usize> {
assert!(size > 0);
assert!(is_aligned(phy_start, PAGE_4K));
assert!(is_aligned(size, PAGE_4K));
if self.get_entry_containing(phy_start, size).is_some() {
return Err(VirtualSpaceError::AlreadyMapped);
}
let mut cursor = self.entries.cursor_front_mut();
while let Some(entry) = cursor.current() {
if entry.physical_start.is_none() && entry.size >= size {
let new_entry = VirtualSpaceEntry {
physical_start: None,
virtual_start: entry.virtual_start + size,
size: entry.size - size,
};
entry.size = size;
entry.physical_start = Some(phy_start);
let virtual_address = entry.virtual_start;
cursor.insert_after(new_entry);
return Ok(virtual_address);
}
cursor.move_next();
}
if self.entries.is_empty() {
assert!(is_aligned(KERNEL_EXTRA_MEMORY_SIZE, PAGE_4K));
self.entries.push_back(VirtualSpaceEntry {
physical_start: None,
virtual_start: KERNEL_EXTRA_MEMORY_BASE,
size: KERNEL_EXTRA_MEMORY_SIZE,
});
self.allocate(phy_start, size)
} else {
Err(VirtualSpaceError::OutOfSpace)
}
}
fn deallocate(&mut self, req_virtual_start: usize, req_size: usize) -> Result<()> {
assert!(req_size > 0);
assert!(is_aligned(req_virtual_start, PAGE_4K));
assert!(is_aligned(req_size, PAGE_4K));
let mut cursor = self.entries.cursor_front_mut();
while let Some(entry) = cursor.current() {
if entry.virtual_start <= req_virtual_start
&& entry.virtual_start + entry.size > req_virtual_start
{
if req_virtual_start != entry.virtual_start || req_size != entry.size {
return Err(VirtualSpaceError::NotFullRange);
}
assert!(entry.physical_start.is_some());
entry.physical_start = None;
let mut current = cursor.remove_current().unwrap();
if let Some(next_entry) = cursor.current() {
if next_entry.physical_start.is_none() {
current.size += next_entry.size;
cursor.remove_current();
}
}
cursor.move_prev();
if let Some(prev_entry) = cursor.current() {
if prev_entry.physical_start.is_none() {
prev_entry.size += current.size;
}
}
cursor.insert_after(current);
return Ok(());
}
cursor.move_next();
}
Err(VirtualSpaceError::EntryNotFound)
}
fn debug_blocks(&self) {
info!("Virtual space blocks:");
for entry in self.entries.iter() {
info!(
" range={:016x}..{:016x}, len={:4} => {:016X?}",
entry.virtual_start,
entry.virtual_start + entry.size,
MemSize(entry.size),
entry.physical_start
);
}
}
}