kernel/devices/net/
e1000.rs

1mod desc;
2
3use core::mem;
4
5use alloc::{collections::vec_deque::VecDeque, sync::Arc, vec::Vec};
6use desc::{Descriptor, DmaRing, ReceiveDescriptor, TransmitDescriptor};
7use tracing::{info, trace, warn};
8
9use crate::{
10    cpu::{
11        self,
12        idt::{BasicInterruptHandler, InterruptStackFrame64},
13        interrupts::apic,
14    },
15    devices::pci::PciDeviceConfig,
16    memory_management::virtual_space::VirtualSpace,
17    net::{NetworkError, NetworkPacket},
18    sync::{once::OnceLock, spin::mutex::Mutex},
19    utils::{
20        vcell::{RO, RW, WO},
21        Pad,
22    },
23};
24
25use super::{MacAddress, NetworkDevice};
26
27static E1000: OnceLock<Arc<Mutex<E1000>>> = OnceLock::new();
28
29pub fn get_device() -> Option<&'static dyn NetworkDevice> {
30    E1000.try_get().map(|e1000| e1000 as &dyn NetworkDevice)
31}
32
33#[allow(dead_code)]
34#[allow(clippy::identity_op)]
35#[allow(clippy::eq_op)]
36pub mod flags {
37    // EEPROM
38    pub const EERD_ADDR_SHIFT: u32 = 8;
39    pub const EERD_DATA_SHIFT: u32 = 16;
40    pub const EERD_START: u32 = 1 << 0;
41    pub const EERD_DONE: u32 = 1 << 4;
42    pub const EE_SIZE: u32 = 1 << 9;
43
44    // Control
45    pub const CTRL_FD: u32 = 1 << 0;
46    pub const CTRL_SPEED_10MB: u32 = 0 << 8;
47    pub const CTRL_SPEED_100MB: u32 = 1 << 8;
48    pub const CTRL_SPEED_1000MB: u32 = 2 << 8;
49    pub const CRTL_FORCE_DPLX: u32 = 1 << 12;
50
51    // Receive Control
52    pub const RCTL_EN: u32 = 1 << 1;
53    pub const RCTL_SBP: u32 = 1 << 2;
54    pub const RCTL_UPE: u32 = 1 << 3;
55    pub const RCTL_MPE: u32 = 1 << 4;
56    pub const RCTL_LPE: u32 = 1 << 5;
57    pub const RCTL_LBM_NO: u32 = 0 << 6;
58    pub const RCTL_LBM_YES: u32 = 3 << 6;
59    pub const RCTL_RDMTS_HALF: u32 = 0 << 8;
60    pub const RCTL_RDMTS_QUARTER: u32 = 1 << 8;
61    pub const RCTL_RDMTS_ONE_EIGHTH: u32 = 2 << 8;
62    pub const RCTL_MO_36: u32 = 0 << 12;
63    pub const RCTL_MO_35: u32 = 1 << 12;
64    pub const RCTL_MO_34: u32 = 2 << 12;
65    pub const RCTL_MO_33: u32 = 3 << 12;
66    pub const RCTL_BAM: u32 = 1 << 15;
67    pub const RCTL_BSIZE_256: u32 = 0 << 25 | 3 << 16;
68    pub const RCTL_BSIZE_512: u32 = 0 << 25 | 2 << 16;
69    pub const RCTL_BSIZE_1024: u32 = 0 << 25 | 1 << 16;
70    pub const RCTL_BSIZE_2048: u32 = 0 << 25 | 0 << 16;
71    pub const RCTL_BSIZE_4096: u32 = 1 << 25 | 3 << 16;
72    pub const RCTL_BSIZE_8192: u32 = 1 << 25 | 2 << 16;
73    pub const RCTL_BSIZE_16384: u32 = 1 << 25 | 1 << 16;
74    pub const RCTL_VLAN_FILTER_EN: u32 = 1 << 18;
75    pub const RCTL_CFI_EN: u32 = 1 << 19;
76    pub const RCTL_CFI: u32 = 1 << 20;
77    pub const RCTL_DPF: u32 = 1 << 22;
78    pub const RCTL_PMCF: u32 = 1 << 23;
79    pub const RCTL_STRIP_ETH_CRC: u32 = 1 << 26;
80
81    // Transmit Control
82    pub const TCTL_EN: u32 = 1 << 1;
83    pub const TCTL_PSP: u32 = 1 << 3;
84    // collision threshold
85    pub const TCTL_CT_SHIFT: u32 = 4;
86    pub const TCTL_CT_MASK: u32 = 0xF << TCTL_CT_SHIFT;
87    // collision distance
88    pub const TCTL_COLD_SHIFT: u32 = 12;
89    pub const TCTL_COLD_MASK: u32 = 0x3F << TCTL_COLD_SHIFT;
90
91    // Interrupts
92    pub const I_TXDW: u32 = 1 << 0;
93    pub const I_TXQE: u32 = 1 << 1;
94    pub const I_LSC: u32 = 1 << 2;
95    pub const I_RXSEQ: u32 = 1 << 3;
96    pub const I_RXDMT0: u32 = 1 << 4;
97    pub const I_RXO: u32 = 1 << 6;
98    pub const I_RXT0: u32 = 1 << 7;
99    pub const I_TXD_LOW: u32 = 1 << 15;
100}
101
102#[allow(dead_code)]
103mod pci_cfg {
104    // PCI Command
105    pub const CMD_IO_SPACE: u16 = 1 << 0;
106    pub const CMD_MEM_SPACE: u16 = 1 << 1;
107    pub const CMD_BUS_MASTER: u16 = 1 << 2;
108}
109
110#[repr(C, align(8))]
111struct E1000Mmio {
112    control: RW<u32>,
113    _pad0: Pad<4>,
114    status: RO<u32>,
115    _pad1: Pad<4>,
116    eecd: RW<u32>,
117    eerd: RW<u32>,
118    ctrl_ext: RW<u32>,
119    flash: RW<u32>,
120    mdi_control: RW<u32>,
121    _pad2: Pad<4>,
122    flow_control_addr_low: RW<u32>,
123    flow_control_addr_high: RW<u32>,
124    flow_control_type: RW<u32>,
125    _pad3: Pad<4>,
126    vlan_ethertype: RW<u32>,
127    _pad4: Pad<0x82>,
128    interrupt_cause_read: RW<u32>,
129    interrupt_throttling: RW<u32>,
130    interrupt_cause_set: RW<u32>,
131    _pad5: Pad<4>,
132    interrupt_mask_set: RW<u32>,
133    _pad6: Pad<4>,
134    interrupt_mask_clear: WO<u32>,
135    _pad7: Pad<0x24>,
136    receive_control: RW<u32>,
137    _pad8: Pad<0x6C>,
138    flow_control_transmit_timer: RW<u32>,
139    _pad9: Pad<4>,
140    transmit_config_word: RW<u32>,
141    _pad10: Pad<4>,
142    receive_config_word: RO<u32>,
143    _pad11: Pad<0x27C>,
144    transmit_control: RW<u32>,
145    _pad12: Pad<0x9FC>,
146    led_control: RW<u32>,
147    _pad13: Pad<0x160C>,
148    receive_data_fifo_head: RW<u32>,
149    _pad14: Pad<0x4>,
150    receive_data_fifo_tail: RW<u32>,
151    _pad15: Pad<0x4>,
152    receive_data_fifo_head_saved: RW<u32>,
153    _pad16: Pad<0x4>,
154    receive_data_fifo_tail_saved: RW<u32>,
155    _pad17: Pad<0x4>,
156    receive_data_fifo_packet_count: RW<u32>,
157    _pad18: Pad<0x3CC>,
158    receive_descriptor_base_low: RW<u32>,
159    receive_descriptor_base_high: RW<u32>,
160    receive_descriptor_length: RW<u32>,
161    _pad19: Pad<0x4>,
162    receive_descriptor_head: RW<u32>,
163    _pad20: Pad<0x4>,
164    receive_descriptor_tail: RW<u32>,
165    _pad21: Pad<0x4>,
166    receive_delay_timer: RW<u32>,
167    _pad22: Pad<0x8>,
168    receive_interrupt_abs_delay_timer: RW<u32>,
169    _pad23: Pad<0xFD0>,
170    transmit_descriptor_base_low: RW<u32>,
171    transmit_descriptor_base_high: RW<u32>,
172    transmit_descriptor_length: RW<u32>,
173    _pad24: Pad<0x4>,
174    transmit_descriptor_head: RW<u32>,
175    _pad25: Pad<0x4>,
176    transmit_descriptor_tail: RW<u32>,
177    _pad26: Pad<0x4>,
178    transmit_descriptor_interrupt_delay: RW<u32>,
179    _pad27: Pad<0x19DC>,
180    multicast_table_array: [RW<u32>; 128],
181    receive_addresses: [(RW<u32>, RW<u32>); 16],
182}
183
184struct E1000 {
185    mmio: VirtualSpace<E1000Mmio>,
186    eeprom_size: u16,
187
188    recv_ring: DmaRing<ReceiveDescriptor, 128>,
189    transmit_ring: DmaRing<TransmitDescriptor, 128>,
190
191    received_queue: VecDeque<Vec<u8>>,
192    in_middle_of_packet: bool,
193}
194
195#[allow(dead_code)]
196impl E1000 {
197    fn new(mmio: VirtualSpace<E1000Mmio>) -> Self {
198        let eecd = mmio.eecd.read();
199        let eeprom_size = if eecd & flags::EE_SIZE != 0 { 256 } else { 64 };
200
201        let mut recv_ring = DmaRing::new();
202        recv_ring.allocate_all_for_hw();
203
204        Self {
205            mmio,
206            eeprom_size,
207            recv_ring,
208            transmit_ring: DmaRing::new(),
209            received_queue: VecDeque::new(),
210            in_middle_of_packet: false,
211        }
212    }
213
214    pub fn read_eeprom(&self, offset: u16) -> u16 {
215        assert!(offset < self.eeprom_size);
216        assert!(offset <= 0xFF);
217
218        let data = (offset as u32) << flags::EERD_ADDR_SHIFT | flags::EERD_START;
219        unsafe { self.mmio.eerd.write(data) };
220
221        while self.mmio.eerd.read() & flags::EERD_DONE == 0 {
222            core::hint::spin_loop();
223        }
224
225        (self.mmio.eerd.read() >> flags::EERD_DATA_SHIFT) as u16
226    }
227
228    pub fn read_mac_address(&self) -> MacAddress {
229        let low = self.read_eeprom(0);
230        let mid = self.read_eeprom(1);
231        let high = self.read_eeprom(2);
232
233        MacAddress([
234            (low & 0xFF) as u8,
235            (low >> 8) as u8,
236            (mid & 0xFF) as u8,
237            (mid >> 8) as u8,
238            (high & 0xFF) as u8,
239            (high >> 8) as u8,
240        ])
241    }
242
243    pub fn init_recv(&self) {
244        // 14.4 Receive Initialization
245        unsafe {
246            assert_eq!(self.recv_ring.physical_ptr() & 0xF, 0);
247            self.mmio
248                .receive_descriptor_base_low
249                .write(self.recv_ring.physical_ptr() as u32);
250            self.mmio
251                .receive_descriptor_base_high
252                .write((self.recv_ring.physical_ptr() >> 32) as u32);
253            self.mmio
254                .receive_descriptor_length
255                .write(self.recv_ring.bytes_len() as u32);
256            self.mmio
257                .receive_descriptor_head
258                .write(self.recv_ring.head() as u32);
259            self.mmio
260                .receive_descriptor_tail
261                .write(self.recv_ring.tail() as u32);
262
263            self.mmio.receive_delay_timer.write(0);
264            self.mmio.receive_interrupt_abs_delay_timer.write(0);
265
266            for i in 0..128 {
267                self.mmio.multicast_table_array[i].write(0);
268            }
269
270            // Enable
271            self.mmio.receive_control.write(
272                flags::RCTL_EN
273                    | flags::RCTL_LPE
274                    | flags::RCTL_BAM
275                    | flags::RCTL_BSIZE_4096
276                    | flags::RCTL_STRIP_ETH_CRC,
277            )
278        };
279    }
280
281    pub fn init_transmit(&self) {
282        // 14.5 Transmit Initialization
283        unsafe {
284            assert_eq!(self.recv_ring.physical_ptr() & 0xF, 0);
285            self.mmio
286                .transmit_descriptor_base_low
287                .write(self.transmit_ring.physical_ptr() as u32);
288            self.mmio
289                .transmit_descriptor_base_high
290                .write((self.transmit_ring.physical_ptr() >> 32) as u32);
291            self.mmio
292                .transmit_descriptor_length
293                .write(self.transmit_ring.bytes_len() as u32);
294            self.mmio
295                .transmit_descriptor_head
296                .write(self.transmit_ring.head() as u32);
297            self.mmio
298                .transmit_descriptor_tail
299                .write(self.transmit_ring.tail() as u32);
300
301            self.mmio.transmit_descriptor_interrupt_delay.write(0);
302
303            self.mmio.transmit_control.write(
304                flags::TCTL_EN
305                    | flags::TCTL_PSP
306                    | (0xF << flags::TCTL_CT_SHIFT)
307                    | (0x40 << flags::TCTL_COLD_SHIFT),
308            );
309        }
310    }
311
312    pub fn enable_interrupts(&self) {
313        unsafe {
314            self.mmio.interrupt_mask_set.write(
315                flags::I_LSC
316                    | flags::I_RXSEQ
317                    | flags::I_RXDMT0
318                    | flags::I_RXO
319                    | flags::I_RXT0
320                    | flags::I_TXDW
321                    | flags::I_TXD_LOW,
322            );
323            // clear any pending interrupts
324            self.mmio.interrupt_cause_read.read();
325            self.flush_writes();
326        }
327    }
328
329    pub fn flush_writes(&self) {
330        self.mmio.status.read();
331    }
332
333    pub fn handle_recv(&mut self) {
334        let head = self.mmio.receive_descriptor_head.read() as u16;
335
336        let mut count = 0;
337        while let Some(desc) = self.recv_ring.pop_next(head) {
338            count += 1;
339
340            if self.in_middle_of_packet {
341                self.received_queue
342                    .back_mut()
343                    .expect("No packet in queue")
344                    .extend_from_slice(desc.data());
345            } else {
346                self.received_queue.push_back(desc.data().to_vec());
347            }
348            self.in_middle_of_packet = !desc.is_end_of_packet();
349
350            self.recv_ring.allocate_next_for_hw();
351        }
352
353        let new_tail = self.recv_ring.tail();
354        trace!("Processed {count} descriptors, new tail: {new_tail:x}");
355        unsafe { self.mmio.receive_descriptor_tail.write(new_tail as u32) };
356    }
357
358    pub fn handle_transmit_interrupt(&mut self) {
359        let head = self.mmio.transmit_descriptor_head.read() as u16;
360        // just pop all those that are done, so we can allocate them
361        // later, no need to do any processing here
362        while self.transmit_ring.pop_next(head).is_some() {}
363    }
364
365    pub fn transmit_raw(&mut self, data: &[u8]) {
366        assert!(data.len() < 4096);
367
368        let Some(desc) = self.transmit_ring.allocate_next_for_hw() else {
369            todo!("Transmit queue is full, implement dynamic driver queueing");
370        };
371
372        desc.data_mut(data.len()).copy_from_slice(data);
373        desc.prepare_for_transmit();
374
375        unsafe {
376            self.mmio
377                .transmit_descriptor_tail
378                .write(self.transmit_ring.tail() as u32)
379        };
380
381        self.flush_writes();
382    }
383
384    pub fn transmit_packet(&mut self, packet: &NetworkPacket) -> Result<(), NetworkError> {
385        if packet.size() > 4096 {
386            return Err(NetworkError::PacketTooLarge(packet.size()));
387        }
388
389        let Some(desc) = self.transmit_ring.allocate_next_for_hw() else {
390            todo!("Transmit queue is full, implement dynamic driver queueing");
391        };
392
393        let data = desc.data_mut(packet.size());
394        packet.write_into_buffer(data)?;
395
396        desc.prepare_for_transmit();
397
398        unsafe {
399            self.mmio
400                .transmit_descriptor_tail
401                .write(self.transmit_ring.tail() as u32)
402        };
403
404        self.flush_writes();
405
406        Ok(())
407    }
408
409    pub fn receive_packet(&mut self) -> Option<Vec<u8>> {
410        self.received_queue.pop_front()
411    }
412
413    // might not work depending on the network card
414    pub fn enable_loopback(&self) {
415        unsafe {
416            self.mmio
417                .receive_control
418                .write(self.mmio.receive_control.read() | flags::RCTL_LBM_YES);
419        }
420    }
421
422    pub fn enable_full_duplex(&self) {
423        unsafe {
424            self.mmio
425                .control
426                .write(self.mmio.control.read() | flags::CTRL_FD | flags::CRTL_FORCE_DPLX);
427        }
428    }
429}
430
431impl NetworkDevice for Arc<Mutex<E1000>> {
432    fn mac_address(&self) -> MacAddress {
433        self.lock().read_mac_address()
434    }
435
436    fn send(&self, data: &NetworkPacket) -> Result<(), NetworkError> {
437        self.lock().transmit_packet(data)
438    }
439
440    fn receive_into(&self, packet: &mut NetworkPacket) -> Result<bool, NetworkError> {
441        if let Some(data) = self.lock().receive_packet() {
442            packet.read_from_buffer(&data)?;
443            Ok(true)
444        } else {
445            Ok(false)
446        }
447    }
448}
449
450pub fn try_register(pci_device: &PciDeviceConfig) -> bool {
451    match (pci_device.vendor_id, pci_device.device_id) {
452        // TODO: this excludes (82541xx and 82547GI/EI)
453        //       they have a lot of special differences from the rest
454        (
455            0x8086,
456            0x100E..=0x1012
457            | 0x1015..=0x1017
458            | 0x101D
459            | 0x1026..=0x1028
460            | 0x1079..=0x107B
461            | 0x1107
462            | 0x1112,
463        ) => {} // allow
464        _ => return false,
465    }
466
467    let Some((mem_base, mem_size, _)) = pci_device.base_address[0].get_memory() else {
468        warn!("No valid memory base address");
469        return false;
470    };
471
472    let mut command = pci_device.read_command();
473    if command & pci_cfg::CMD_BUS_MASTER == 0 {
474        // enable bus master
475        command |= pci_cfg::CMD_BUS_MASTER;
476        pci_device.write_command(command);
477    }
478
479    assert!(mem_size >= mem::size_of::<E1000Mmio>());
480    assert_ne!(mem_base, 0);
481    assert_eq!(mem_base % 8, 0);
482
483    let mmio =
484        unsafe { VirtualSpace::<E1000Mmio>::new(mem_base as u64) }.expect("Failed to map MMIO");
485    // set mmio first
486    E1000
487        .set(Arc::new(Mutex::new(E1000::new(mmio))))
488        .ok()
489        .expect("Should only be called once");
490
491    // TODO: handle overlapping interrupts correctly
492    apic::assign_io_irq(
493        interrupt as BasicInterruptHandler,
494        pci_device.interrupt_line,
495        cpu::cpu(),
496    );
497
498    let e1000 = E1000.get().lock();
499
500    info!("MAC address: {:?}", e1000.read_mac_address());
501
502    e1000.enable_interrupts();
503    e1000.init_recv();
504    e1000.init_transmit();
505    e1000.flush_writes();
506
507    true
508}
509
510extern "x86-interrupt" fn interrupt(_stack_frame: InterruptStackFrame64) {
511    let mut e1000 = E1000.get().lock();
512    unsafe { e1000.mmio.interrupt_mask_set.write(0x1) };
513    let cause = e1000.mmio.interrupt_cause_read.read();
514
515    if cause & flags::I_RXO != 0 {
516        // Receiver FIFO overrun
517        warn!("Receiver FIFO overrun");
518    }
519    if cause & flags::I_LSC != 0 {
520        // Link Status Change
521        warn!("Link Status Change");
522    }
523    if cause & flags::I_RXSEQ != 0 {
524        // Receiver Sequence Error
525        warn!("Receiver Sequence Error");
526    }
527    if cause & flags::I_TXD_LOW != 0 {
528        // Transmit Descriptor Low Ring
529        warn!("Transmit Descriptor Low Ring");
530    }
531
532    e1000.handle_recv();
533    e1000.handle_transmit_interrupt();
534
535    apic::return_from_interrupt();
536}