kernel/memory_management/
memory_layout.rs1use core::fmt;
2
3use tracing::info;
4
5use super::virtual_memory_mapper;
6
7extern "C" {
8 static begin: usize;
9 static end: usize;
10 static text_end: usize;
11 static rodata_end: usize;
12 static data_end: usize;
13 static stack_guard_page: usize;
14 static __eh_frame: usize;
15}
16
17pub const KERNEL_BASE: usize = 0xFFFF_FFFF_8000_0000;
20pub const EXTENDED_OFFSET: usize = 0x10_0000;
22pub const KERNEL_LINK: usize = KERNEL_BASE + EXTENDED_OFFSET;
23pub const KERNEL_MAPPED_SIZE: usize = 0x800_0000;
26pub const KERNEL_END: usize = KERNEL_BASE + KERNEL_MAPPED_SIZE;
27
28pub const KERNEL_HEAP_BASE: usize = KERNEL_END;
32pub const KERNEL_HEAP_SIZE: usize = 0x100_0000; pub const INTR_STACK_SIZE: usize = PAGE_4K * 32;
36pub const INTR_STACK_EMPTY_SIZE: usize = PAGE_4K;
37pub const INTR_STACK_ENTRY_SIZE: usize = INTR_STACK_SIZE + INTR_STACK_EMPTY_SIZE;
38pub const INTR_STACK_BASE: usize = KERNEL_HEAP_BASE + KERNEL_HEAP_SIZE;
39pub const INTR_STACK_COUNT: usize = 7;
40pub const INTR_STACK_TOTAL_SIZE: usize = INTR_STACK_ENTRY_SIZE * INTR_STACK_COUNT;
42
43pub const KERNEL_EXTRA_MEMORY_BASE: usize = INTR_STACK_BASE + INTR_STACK_TOTAL_SIZE;
47pub const KERNEL_LAST_POSSIBLE_ADDR: usize = 0xFFFF_FFFF_FFFF_F000;
49pub const KERNEL_EXTRA_MEMORY_SIZE: usize = KERNEL_LAST_POSSIBLE_ADDR - KERNEL_EXTRA_MEMORY_BASE;
50
51pub const KERNEL_PROCESS_VIRTUAL_ADDRESS_START: usize =
53 virtual_memory_mapper::KERNEL_PROCESS_VIRTUAL_ADDRESS_START;
54pub const PROCESS_KERNEL_STACK_GUARD: usize = PAGE_4K;
55pub const PROCESS_KERNEL_STACK_BASE: usize =
59 KERNEL_PROCESS_VIRTUAL_ADDRESS_START + PROCESS_KERNEL_STACK_GUARD;
60pub const PROCESS_KERNEL_STACK_SIZE: usize = PAGE_4K * 64;
61pub const PROCESS_KERNEL_STACK_END: usize = PROCESS_KERNEL_STACK_BASE + PROCESS_KERNEL_STACK_SIZE;
62
63#[allow(dead_code)]
64pub const KB: usize = 0x400;
65pub const MB: usize = 0x100_000;
66pub const GB: usize = 0x400_00000;
67pub const PAGE_4K: usize = 0x1000;
68pub const PAGE_2M: usize = 0x20_0000;
69
70pub fn kernel_elf_end() -> usize {
71 (unsafe { &end } as *const usize as usize)
72}
73
74#[allow(dead_code)]
75pub fn kernel_elf_size() -> usize {
76 (unsafe { &end } as *const usize as usize) - (unsafe { &begin } as *const usize as usize)
77}
78
79pub fn kernel_text_end() -> usize {
80 (unsafe { &text_end } as *const usize as usize)
81}
82
83pub fn kernel_elf_rodata_end() -> usize {
84 (unsafe { &rodata_end } as *const usize as usize)
85}
86
87pub fn kernel_elf_data_end() -> usize {
88 (unsafe { &data_end } as *const usize as usize)
89}
90
91pub fn stack_guard_page_ptr() -> usize {
92 (unsafe { &stack_guard_page } as *const usize as usize)
93}
94
95pub fn eh_frame_start() -> usize {
96 (unsafe { &__eh_frame } as *const usize as usize)
97}
98
99pub fn eh_frame_end() -> usize {
100 (unsafe { &rodata_end } as *const usize as usize)
101}
102
103pub trait AlignMem: Sized {
104 fn align_up(self, alignment: usize) -> Self;
105 fn align_down(self, alignment: usize) -> Self;
106 fn is_aligned(&self, alignment: usize) -> bool;
107 fn align_range(self, size: usize, alignment: usize) -> (Self, usize, usize);
108}
109
110macro_rules! impl_align_mem {
111 ($t:ty) => {
112 impl AlignMem for $t {
113 #[inline(always)]
114 fn align_up(self, alignment: usize) -> Self {
115 (self + (alignment as $t) - 1) & !((alignment as $t) - 1)
116 }
117
118 #[inline(always)]
119 fn align_down(self, alignment: usize) -> Self {
120 self & !((alignment as $t) - 1)
121 }
122
123 #[inline(always)]
124 fn is_aligned(&self, alignment: usize) -> bool {
125 (self & ((alignment as $t) - 1)) == 0
126 }
127
128 #[inline(always)]
129 fn align_range(self, size: usize, alignment: usize) -> (Self, usize, usize) {
130 let addr_end = self + size as $t;
131 let start_aligned = self.align_down(alignment);
132 let end_aligned = addr_end.align_up(alignment);
133 let size: usize = (end_aligned - start_aligned).try_into().unwrap();
134 assert!(size > 0);
135 assert!(size.is_aligned(alignment));
136 let offset = (self - start_aligned).try_into().unwrap();
137
138 (start_aligned, size, offset)
139 }
140 }
141 };
142}
143
144impl_align_mem!(usize);
145impl_align_mem!(u64);
146
147pub fn align_up<T: AlignMem>(addr: T, alignment: usize) -> T {
148 addr.align_up(alignment)
149}
150
151pub fn align_down<T: AlignMem>(addr: T, alignment: usize) -> T {
152 addr.align_down(alignment)
153}
154
155pub fn is_aligned<T: AlignMem>(addr: T, alignment: usize) -> bool {
156 addr.is_aligned(alignment)
157}
158
159pub fn align_range<T: AlignMem>(addr: T, size: usize, alignment: usize) -> (T, usize, usize) {
160 addr.align_range(size, alignment)
161}
162
163#[inline(always)]
164pub const fn virtual2physical(addr: usize) -> u64 {
165 debug_assert!(addr >= KERNEL_BASE && addr <= KERNEL_BASE + KERNEL_MAPPED_SIZE);
166 (addr - KERNEL_BASE) as u64
167}
168
169#[inline(always)]
170pub const fn physical2virtual(addr: u64) -> usize {
171 debug_assert!(addr < KERNEL_MAPPED_SIZE as u64);
172 addr as usize + KERNEL_BASE
173}
174
175pub fn display_kernel_map() {
176 info!("Kernel map:");
177 let nothing = KERNEL_BASE..KERNEL_LINK;
178 let kernel_elf_end = align_up(kernel_elf_end(), PAGE_4K);
179 let kernel_elf = KERNEL_LINK..kernel_elf_end;
180 let kernel_elf_text = KERNEL_LINK..kernel_text_end();
181 let kernel_elf_rodata = kernel_text_end()..kernel_elf_rodata_end();
182 let kernel_elf_data = kernel_elf_rodata_end()..kernel_elf_data_end();
183 let kernel_elf_bss = kernel_elf_data_end()..kernel_elf_end;
184 let kernel_physical_allocator_low = kernel_elf_end..KERNEL_END;
185 let kernel_heap = KERNEL_HEAP_BASE..KERNEL_HEAP_BASE + KERNEL_HEAP_SIZE;
186 let interrupt_stack = INTR_STACK_BASE..INTR_STACK_BASE + INTR_STACK_TOTAL_SIZE;
187 let kernel_extra_memory =
188 KERNEL_EXTRA_MEMORY_BASE..KERNEL_EXTRA_MEMORY_BASE + KERNEL_EXTRA_MEMORY_SIZE;
189
190 info!(
191 " range={:016x}..{:016x}, len={:4} nothing",
192 nothing.start,
193 nothing.end,
194 MemSize(nothing.len())
195 );
196 info!(
197 " range={:016x}..{:016x}, len={:4} kernel elf",
198 kernel_elf.start,
199 kernel_elf.end,
200 MemSize(kernel_elf.len())
201 );
202 info!(
204 " range={:016x}..{:016x}, len={:4} kernel elf text",
205 kernel_elf_text.start,
206 kernel_elf_text.end,
207 MemSize(kernel_elf_text.len())
208 );
209 info!(
210 " range={:016x}..{:016x}, len={:4} kernel elf rodata",
211 kernel_elf_rodata.start,
212 kernel_elf_rodata.end,
213 MemSize(kernel_elf_rodata.len())
214 );
215 info!(
216 " range={:016x}..{:016x}, len={:4} kernel elf data",
217 kernel_elf_data.start,
218 kernel_elf_data.end,
219 MemSize(kernel_elf_data.len())
220 );
221 info!(
222 " range={:016x}..{:016x}, len={:4} kernel elf bss",
223 kernel_elf_bss.start,
224 kernel_elf_bss.end,
225 MemSize(kernel_elf_bss.len())
226 );
227 info!(
228 " range={:016x}..{:016x}, len={:4} kernel physical allocator low",
229 kernel_physical_allocator_low.start,
230 kernel_physical_allocator_low.end,
231 MemSize(kernel_physical_allocator_low.len())
232 );
233 info!(
234 " range={:016x}..{:016x}, len={:4} kernel heap",
235 kernel_heap.start,
236 kernel_heap.end,
237 MemSize(kernel_heap.len())
238 );
239 info!(
240 " range={:016x}..{:016x}, len={:4} interrupt stack",
241 interrupt_stack.start,
242 interrupt_stack.end,
243 MemSize(interrupt_stack.len())
244 );
245 info!(
246 " range={:016x}..{:016x}, len={:4} kernel extra (virtual space)",
247 kernel_extra_memory.start,
248 kernel_extra_memory.end,
249 MemSize(kernel_extra_memory.len())
250 );
251
252 info!(
254 "whole kernel physical size (startup/low): {}",
255 MemSize(KERNEL_END - KERNEL_BASE)
256 );
257 info!(
259 "whole kernel size: {}",
260 MemSize(usize::MAX - KERNEL_BASE + 1)
261 );
262}
263
264#[repr(transparent)]
265pub struct MemSize<T>(pub T);
266
267impl<T> fmt::Display for MemSize<T>
268where
269 T: TryInto<u64> + Copy,
270 <T as TryInto<u64>>::Error: fmt::Debug,
271{
272 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
273 let mut size = self.0.try_into().unwrap();
275 let mut remaining = 0;
276 let mut unit = "B";
277 if size >= 1024 {
278 remaining = size % 1024;
279 size /= 1024;
280 unit = "KB";
281 }
282 if size >= 1024 {
283 remaining = size % 1024;
284 size /= 1024;
285 unit = "MB";
286 }
287 if size >= 1024 {
288 remaining = size % 1024;
289 size /= 1024;
290 unit = "GB";
291 }
292 if size >= 1024 {
293 remaining = size % 1024;
294 size /= 1024;
295 unit = "TB";
296 }
297 if size >= 1024 {
298 remaining = size % 1024;
299 size /= 1024;
300 unit = "PB";
301 }
302
303 size.fmt(f).and_then(|_| {
304 let remaining = remaining * 100 / 1024;
305 write!(f, ".{remaining:02}")?;
306 write!(f, "{unit}")
307 })
308 }
309}
310
311impl<T> fmt::Debug for MemSize<T>
312where
313 T: TryInto<u64> + Copy,
314 <T as TryInto<u64>>::Error: fmt::Debug,
315{
316 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
317 fmt::Display::fmt(self, f)
318 }
319}