kernel/devices/net/e1000/
desc.rs

1use crate::memory_management::{
2    memory_layout::{physical2virtual, virtual2physical},
3    physical_page_allocator,
4};
5
6#[allow(dead_code)]
7mod recv_desc {
8    pub const STATUS_DD: u8 = 1 << 0;
9    pub const STATUS_EOP: u8 = 1 << 1;
10    pub const STATUS_IGNORE_CHECKSUM: u8 = 1 << 2;
11    pub const STATUS_VLAN: u8 = 1 << 3;
12    pub const STATUS_TCP_CHECKSUM: u8 = 1 << 5;
13    pub const STATUS_IP_CHECKSUM: u8 = 1 << 6;
14    pub const STATUS_PIF: u8 = 1 << 7;
15}
16
17#[allow(dead_code)]
18mod transmit_desc {
19    pub const CMD_EOP: u8 = 1 << 0;
20    pub const CMD_INS_FCS: u8 = 1 << 1;
21    pub const CMD_INS_CHECKSUM: u8 = 1 << 2;
22    pub const CMD_REPORT_STATUS: u8 = 1 << 3;
23    pub const CMD_REPORT_PACKET_SENT: u8 = 1 << 4;
24    pub const CMD_VLAN_EN: u8 = 1 << 6;
25    pub const CMD_INTERRUPT_DELAY: u8 = 1 << 7;
26
27    pub const STATUS_DD: u8 = 1 << 0;
28    pub const STATUS_EXCESS_COLLISIONS: u8 = 1 << 1;
29    pub const STATUS_LATE_COLLISION: u8 = 1 << 2;
30    pub const STATUS_UNDERRUN: u8 = 1 << 3;
31}
32
33pub trait Descriptor {
34    fn data(&self) -> &[u8];
35    fn reset(&mut self);
36    fn init(&mut self);
37    fn is_hw_done(&self) -> bool;
38}
39
40#[repr(C)]
41#[derive(Clone, Copy, Debug)]
42pub struct ReceiveDescriptor {
43    address: u64,
44    len: u16,
45    checksum: u16,
46    status: u8,
47    errors: u8,
48    special: u16,
49}
50
51impl ReceiveDescriptor {
52    pub fn is_end_of_packet(&self) -> bool {
53        self.status & recv_desc::STATUS_EOP != 0
54    }
55}
56
57impl Descriptor for ReceiveDescriptor {
58    fn data(&self) -> &[u8] {
59        assert!(self.len <= 4096);
60        assert!(self.address != 0);
61        unsafe {
62            core::slice::from_raw_parts(
63                physical2virtual(self.address) as *const u8,
64                self.len as usize,
65            )
66        }
67    }
68
69    fn reset(&mut self) {
70        assert!(self.address != 0);
71        self.status = 0;
72    }
73
74    fn init(&mut self) {
75        self.status = 0;
76        self.address =
77            virtual2physical(unsafe { physical_page_allocator::alloc_zeroed() } as usize);
78    }
79
80    fn is_hw_done(&self) -> bool {
81        self.status & recv_desc::STATUS_DD != 0
82    }
83}
84
85#[repr(C)]
86#[derive(Clone, Copy, Debug)]
87pub struct TransmitDescriptor {
88    address: u64,
89    len: u16,
90    checksum_offset: u8,
91    cmd: u8,
92    status: u8,
93    checksum_start: u8,
94    special: u16,
95}
96
97#[allow(dead_code)]
98impl TransmitDescriptor {
99    pub fn data_mut(&mut self, len: usize) -> &mut [u8] {
100        assert!(len <= 4096);
101        assert!(self.address != 0);
102
103        self.len = len as u16;
104        unsafe {
105            core::slice::from_raw_parts_mut(
106                physical2virtual(self.address) as *mut u8,
107                self.len as usize,
108            )
109        }
110    }
111
112    pub fn prepare_for_transmit(&mut self) {
113        assert!(self.len <= 4096);
114        assert!(self.address != 0);
115
116        self.cmd =
117            transmit_desc::CMD_EOP | transmit_desc::CMD_REPORT_STATUS | transmit_desc::CMD_INS_FCS;
118    }
119}
120
121impl Descriptor for TransmitDescriptor {
122    fn data(&self) -> &[u8] {
123        assert!(self.len <= 4096);
124        assert!(self.address != 0);
125        unsafe {
126            core::slice::from_raw_parts(
127                physical2virtual(self.address) as *const u8,
128                self.len as usize,
129            )
130        }
131    }
132
133    fn reset(&mut self) {
134        assert!(self.address != 0);
135        self.status = 0;
136    }
137
138    fn init(&mut self) {
139        self.status = 0;
140        self.address =
141            virtual2physical(unsafe { physical_page_allocator::alloc_zeroed() } as usize);
142    }
143
144    fn is_hw_done(&self) -> bool {
145        self.status & transmit_desc::STATUS_DD != 0
146    }
147}
148
149pub struct DmaRing<T: Descriptor + 'static, const N: usize> {
150    ring: &'static mut [T],
151    head: u16,
152    tail: u16,
153}
154
155#[allow(dead_code)]
156impl<T: Descriptor, const N: usize> DmaRing<T, N> {
157    pub fn new() -> Self {
158        assert!(N.is_multiple_of(8)); // ring must be multiple of 8
159        assert!(N * 16 < 4096); // less than physical page
160
161        let ring: &mut [T] = unsafe {
162            core::slice::from_raw_parts_mut(physical_page_allocator::alloc_zeroed().cast(), N)
163        };
164
165        // set addresses
166        for elem in ring.iter_mut() {
167            elem.init();
168        }
169
170        Self {
171            ring,
172            head: 0,
173            tail: 0,
174        }
175    }
176
177    pub fn queue_len(&self, hw_head: u16) -> usize {
178        hw_head.wrapping_sub(self.tail).wrapping_sub(1) as usize % N
179    }
180
181    pub const fn bytes_len(&self) -> usize {
182        N * core::mem::size_of::<T>()
183    }
184
185    pub fn physical_ptr(&self) -> u64 {
186        virtual2physical(self.ring.as_ptr() as usize)
187    }
188
189    pub fn head(&self) -> u16 {
190        self.head
191    }
192
193    pub fn tail(&self) -> u16 {
194        self.tail
195    }
196
197    pub fn pop_next(&mut self, hw_head: u16) -> Option<&mut T> {
198        // check each entry from where we are at now until where the NIC is
199
200        if hw_head == self.head {
201            None
202        } else if self.ring[self.head as usize].is_hw_done() {
203            let res = Some(&mut self.ring[self.head as usize]);
204            self.head = (self.head + 1) % N as u16;
205            res
206        } else {
207            None
208        }
209    }
210
211    pub fn allocate_next_for_hw(&mut self) -> Option<&mut T> {
212        // queue is full
213        if (self.tail + 1) % N as u16 == self.head {
214            None
215        } else {
216            let res = &mut self.ring[self.tail as usize];
217            self.tail = (self.tail + 1) % N as u16;
218            res.reset();
219            Some(res)
220        }
221    }
222
223    pub fn allocate_all_for_hw(&mut self) {
224        while self.allocate_next_for_hw().is_some() {}
225    }
226}