1use core::{mem, ptr::addr_of};
2
3use crate::{
4 memory_management::{
5 memory_layout::{
6 is_aligned, INTR_STACK_BASE, INTR_STACK_EMPTY_SIZE, INTR_STACK_ENTRY_SIZE,
7 INTR_STACK_SIZE, INTR_STACK_TOTAL_SIZE, PAGE_4K, PROCESS_KERNEL_STACK_END,
8 },
9 virtual_memory_mapper::{self, VirtualMemoryMapEntry},
10 },
11 sync::spin::mutex::Mutex,
12};
13
14static GDT: Mutex<GlobalDescriptorManager> = Mutex::new(GlobalDescriptorManager::empty());
15static mut TSS: TaskStateSegment = TaskStateSegment::empty();
17
18pub const KERNEL_RING: u8 = 0;
19pub const USER_RING: u8 = 3;
20
21#[repr(transparent)]
22#[derive(Clone, Copy, Debug, Default)]
23pub struct SegmentSelector(pub u64);
24
25impl SegmentSelector {
26 pub const fn from_index(index: usize) -> Self {
27 assert!(index < 8192);
28 Self((index << 3) as u64)
29 }
30}
31
32pub fn init_kernel_gdt() {
34 let mut manager = GDT.lock();
35 if manager.gdt.index != 1 {
36 panic!("GDT already initialized");
37 }
38
39 manager.kernel_code_seg = SegmentSelector::from_index(unsafe {
40 manager.gdt.push_user(UserDescriptorEntry {
41 access: flags::PRESENT | flags::CODE | flags::USER | flags::dpl(KERNEL_RING),
42 flags_and_limit: flags::LONG_MODE,
43 ..UserDescriptorEntry::empty()
44 })
45 });
46 manager.user_code_seg = SegmentSelector::from_index(unsafe {
47 manager.gdt.push_user(UserDescriptorEntry {
48 access: flags::PRESENT | flags::CODE | flags::USER | flags::dpl(USER_RING),
49 flags_and_limit: flags::LONG_MODE,
50 ..UserDescriptorEntry::empty()
51 })
52 });
53 manager.kernel_data_seg = SegmentSelector::from_index(unsafe {
54 manager.gdt.push_user(UserDescriptorEntry {
55 access: flags::PRESENT | flags::USER | flags::WRITE | flags::dpl(KERNEL_RING),
56 ..UserDescriptorEntry::empty()
57 })
58 });
59 manager.user_data_seg = SegmentSelector::from_index(unsafe {
60 manager.gdt.push_user(UserDescriptorEntry {
61 access: flags::PRESENT | flags::USER | flags::WRITE | flags::dpl(USER_RING),
62 ..UserDescriptorEntry::empty()
63 })
64 });
65
66 for i in 0..7 {
71 unsafe {
72 let stack_start_virtual =
74 INTR_STACK_BASE + (i * INTR_STACK_ENTRY_SIZE) + INTR_STACK_EMPTY_SIZE;
75 let stack_end_virtual = stack_start_virtual + INTR_STACK_SIZE;
76 assert!(stack_end_virtual <= INTR_STACK_BASE + INTR_STACK_TOTAL_SIZE);
77 if i == 6 {
78 assert_eq!(stack_end_virtual, INTR_STACK_BASE + INTR_STACK_TOTAL_SIZE);
80 }
81 assert!(
83 is_aligned(INTR_STACK_SIZE, PAGE_4K) && is_aligned(stack_start_virtual, PAGE_4K)
84 );
85
86 virtual_memory_mapper::map_kernel(&VirtualMemoryMapEntry {
88 virtual_address: stack_start_virtual,
89 physical_address: None,
90 size: INTR_STACK_SIZE,
91 flags: virtual_memory_mapper::flags::PTE_WRITABLE,
92 });
93
94 TSS.ist[i] = stack_end_virtual as u64 - 8;
97 }
98
99 unsafe { TSS.rsp[KERNEL_RING as usize] = PROCESS_KERNEL_STACK_END as u64 - 8 };
102 }
103
104 let tss_ptr = addr_of!(TSS) as u64;
105
106 manager.tss_seg = SegmentSelector::from_index(unsafe {
107 manager.gdt.push_system(SystemDescriptorEntry {
108 limit: (mem::size_of::<TaskStateSegment>() - 1) as u16,
109 access: flags::PRESENT | flags::TSS_TYPE,
110 base_low: (tss_ptr & 0xFFFF) as u16,
111 base_middle: ((tss_ptr >> 16) & 0xFF) as u8,
112 base_high: ((tss_ptr >> 24) & 0xFF) as u8,
113 base_upper: ((tss_ptr >> 32) & 0xFFFFFFFF) as u32,
114 ..SystemDescriptorEntry::empty()
115 })
116 });
117 drop(manager);
118 GDT.run_with(|manager| {
120 manager.gdt.apply_lgdt();
121
122 manager.load_kernel_segments();
123 manager.load_tss();
124 });
125}
126
127pub fn get_user_code_seg_index() -> SegmentSelector {
128 GDT.run_with(|manager| manager.user_code_seg)
129}
130
131pub fn get_user_data_seg_index() -> SegmentSelector {
132 GDT.run_with(|manager| manager.user_data_seg)
133}
134mod flags {
135 pub const LONG_MODE: u8 = 1 << 5;
137
138 pub const PRESENT: u8 = 1 << 7;
140 pub const CODE: u8 = 1 << 3;
141 pub const USER: u8 = 1 << 4;
142 pub const WRITE: u8 = 1 << 1;
143 pub const TSS_TYPE: u8 = 0b1001;
144 pub const fn dpl(dpl: u8) -> u8 {
145 dpl << 5
146 }
147}
148
149#[repr(C, packed(4))]
153#[derive(Default, Clone, Copy)]
154struct UserDescriptorEntry {
155 limit_low: u16,
156 base_low: u16,
157 base_middle: u8,
158 access: u8,
159 flags_and_limit: u8,
160 base_high: u8,
161}
162
163impl UserDescriptorEntry {
164 pub const fn empty() -> Self {
165 Self {
166 limit_low: 0,
167 base_low: 0,
168 base_middle: 0,
169 access: 0,
170 flags_and_limit: 0,
171 base_high: 0,
172 }
173 }
174}
175
176#[repr(C, packed(4))]
180#[derive(Default, Clone, Copy)]
181struct SystemDescriptorEntry {
182 limit: u16,
183 base_low: u16,
184 base_middle: u8,
185 access: u8,
186 flags_and_limit: u8,
187 base_high: u8,
188 base_upper: u32,
189 zero: u32,
190}
191
192impl SystemDescriptorEntry {
193 pub const fn empty() -> Self {
194 Self {
195 limit: 0,
196 base_low: 0,
197 base_middle: 0,
198 access: 0,
199 flags_and_limit: 0,
200 base_high: 0,
201 base_upper: 0,
202 zero: 0,
203 }
204 }
205}
206
207#[repr(C, packed(4))]
211struct TaskStateSegment {
212 reserved: u32,
213 rsp: [u64; 3],
214 reserved2: u64,
215 ist: [u64; 7],
216 reserved3: u64,
217 reserved4: u16,
218 iomap_base: u16,
219}
220
221impl TaskStateSegment {
222 pub const fn empty() -> Self {
223 Self {
224 reserved: 0,
225 rsp: [0; 3],
226 reserved2: 0,
227 ist: [0; 7],
228 reserved3: 0,
229 reserved4: 0,
230 iomap_base: 0,
231 }
232 }
233}
234
235#[repr(C, packed(2))]
236pub(super) struct GlobalDescriptorTablePointer {
237 limit: u16,
238 base: *const GlobalDescriptorTable,
239}
240
241struct GlobalDescriptorManager {
242 gdt: GlobalDescriptorTable,
243 kernel_code_seg: SegmentSelector,
244 user_code_seg: SegmentSelector,
245 kernel_data_seg: SegmentSelector,
248 user_data_seg: SegmentSelector,
249 tss_seg: SegmentSelector,
250}
251
252impl GlobalDescriptorManager {
253 pub const fn empty() -> Self {
254 Self {
255 gdt: GlobalDescriptorTable::empty(),
256 kernel_code_seg: SegmentSelector::from_index(0),
257 kernel_data_seg: SegmentSelector::from_index(0),
258 user_code_seg: SegmentSelector::from_index(0),
259 user_data_seg: SegmentSelector::from_index(0),
260 tss_seg: SegmentSelector::from_index(0),
261 }
262 }
263
264 pub fn load_kernel_segments(&self) {
265 assert_ne!(self.kernel_code_seg.0, 0);
266 unsafe {
267 super::set_cs(self.kernel_code_seg);
269 super::set_data_segments(self.kernel_data_seg);
271 }
272 }
273
274 pub fn load_tss(&self) {
275 assert_ne!(self.tss_seg.0, 0);
276 unsafe {
277 super::ltr(self.tss_seg);
279 }
280 }
281}
282
283#[repr(C, packed(16))]
284struct GlobalDescriptorTable {
285 data: [u64; 8],
286 index: usize,
287}
288
289impl GlobalDescriptorTable {
290 const fn empty() -> Self {
291 Self {
292 data: [0; 8],
293 index: 1,
294 }
295 }
296
297 unsafe fn push_user(&mut self, entry: UserDescriptorEntry) -> usize {
299 assert_eq!(mem::size_of::<UserDescriptorEntry>(), 8);
300 let index = self.index;
301 self.index += 1;
302 self.data[index] = core::mem::transmute::<UserDescriptorEntry, u64>(entry);
304 index
305 }
306
307 unsafe fn push_system(&mut self, entry: SystemDescriptorEntry) -> usize {
309 assert_eq!(mem::size_of::<SystemDescriptorEntry>(), 16);
310 let data = core::mem::transmute::<SystemDescriptorEntry, [u64; 2]>(entry);
312 let index = self.index;
313 self.index += 2;
314 self.data[index] = data[0];
315 self.data[index + 1] = data[1];
316 index
317 }
318
319 pub fn apply_lgdt(&'static self) {
320 let size_used = self.index * mem::size_of::<u64>() - 1;
321 let gdt_ptr = GlobalDescriptorTablePointer {
322 limit: size_used as u16,
323 base: self,
324 };
325
326 unsafe {
327 super::lgdt(&gdt_ptr);
328 }
329 }
330}