kernel/devices/clock/
tsc.rs1use core::sync::atomic::{AtomicU64, Ordering};
2
3use tracing::info;
4
5use crate::{cpu, devices::clock::NANOS_PER_SEC};
6
7use super::ClockDevice;
8
9const NS_SCALE_SHIFT: u8 = 32;
13
14const fn cycles_to_ns(cycles: u64, nanos_per_cycle_scaled: u64) -> u64 {
15 (((cycles as u128) * (nanos_per_cycle_scaled as u128)) >> NS_SCALE_SHIFT) as u64
16}
17
18struct SyncPoint {
19 nanos: u64,
20 cycles: u64,
21}
22
23pub struct Tsc {
24 start_time: AtomicU64,
27 nanos_per_cycle_scaled: AtomicU64,
30 rd_tsc_call_latency: u64,
32}
33
34impl Tsc {
35 pub fn new(base: &dyn ClockDevice) -> Option<Self> {
36 if unsafe { cpu::cpuid::cpuid!(cpu::cpuid::FN_FEAT).edx } & cpu::cpuid::FEAT_EDX_TSC == 0 {
37 return None;
38 }
39 let mut rd_tsc_call_latency = u64::MAX;
40 for _ in 0..100 {
41 let t1 = unsafe { cpu::read_tsc() };
42 let t2 = unsafe { cpu::read_tsc() };
43 rd_tsc_call_latency = rd_tsc_call_latency.min(t2 - t1);
44 }
45
46 let tsc = Tsc {
47 start_time: AtomicU64::new(0),
48 nanos_per_cycle_scaled: AtomicU64::new(0),
49 rd_tsc_call_latency,
50 };
51 tsc.calibrate(base);
52 Some(tsc)
53 }
54
55 fn get_device_delay(&self, base: &dyn ClockDevice) -> u64 {
56 let mut device_latency = u64::MAX;
58 for _ in 0..100 {
59 let t1 = unsafe { cpu::read_tsc() };
60 let _ = base.get_time();
61 let t2 = unsafe { cpu::read_tsc() };
62 device_latency = device_latency.min(t2 - t1);
63 }
64 device_latency -= self.rd_tsc_call_latency;
66 device_latency
67 }
68
69 fn get_sync_time_point(&self, base: &dyn ClockDevice, device_latency: u64) -> SyncPoint {
70 let good_latency = device_latency + device_latency / 2;
71 let mut min_cycles = u64::MAX;
72
73 let mut ns = 0;
74 let mut cycles = 0;
75
76 for _ in 0..10 {
77 let t1 = unsafe { cpu::read_tsc() };
78 let device_time = base.get_time();
79 let t2 = unsafe { cpu::read_tsc() };
80 let diff_tsc = t2 - t1;
81 if diff_tsc >= min_cycles {
82 continue;
83 }
84 min_cycles = diff_tsc;
85
86 ns = device_time.seconds * NANOS_PER_SEC + device_time.nanoseconds;
87 cycles = t1 + self.rd_tsc_call_latency;
88
89 if diff_tsc <= good_latency {
90 break;
91 }
92 }
93
94 SyncPoint { nanos: ns, cycles }
95 }
96
97 pub fn calibrate(&self, base: &dyn ClockDevice) {
121 let device_latency = self.get_device_delay(base);
122
123 let granularity = base.granularity();
124 assert!(granularity > 0);
125
126 let sleep_time = (granularity * 1000).clamp(1_000_000, NANOS_PER_SEC);
128 info!("Calibrating TSC with sleep time: {}ns", sleep_time);
129
130 let start_point = self.get_sync_time_point(base, device_latency);
131 {
133 let mut time = base.get_time();
134 let start_ns = time.seconds * NANOS_PER_SEC + time.nanoseconds;
135 while time.seconds * NANOS_PER_SEC + time.nanoseconds - start_ns < sleep_time {
136 time = base.get_time();
137 }
138 }
139 let end_point = self.get_sync_time_point(base, device_latency);
140
141 let ns_diff = end_point.nanos - start_point.nanos;
142 let cycles_diff = end_point.cycles - start_point.cycles;
143
144 let scaled_ns_per_cycle = ((ns_diff as u128) << NS_SCALE_SHIFT) / cycles_diff as u128;
145 assert!(scaled_ns_per_cycle.leading_zeros() >= 64,
146 "scaled_ns_per_cycle: {scaled_ns_per_cycle:#X} is too large, i.e. `ns/cycles` is more than `{NS_SCALE_SHIFT}` bits");
147 let scaled_ns_per_cycle = scaled_ns_per_cycle as u64;
148
149 info!(
150 "TSC calibrated, CPU running at {:.1}Hz",
151 1_000_000_000.0 / (scaled_ns_per_cycle as f64 / ((1u128 << NS_SCALE_SHIFT) as f64))
152 );
153
154 let start_ns = start_point
155 .nanos
156 .wrapping_sub(cycles_to_ns(start_point.cycles, scaled_ns_per_cycle));
157 self.start_time.store(start_ns, Ordering::Relaxed);
158 self.nanos_per_cycle_scaled
159 .store(scaled_ns_per_cycle, Ordering::Relaxed);
160 }
161
162 #[allow(dead_code)]
163 fn recalibrate(&self, base: &dyn ClockDevice) {
164 let device_latency = self.get_device_delay(base);
165
166 let end_point = self.get_sync_time_point(base, device_latency);
167
168 let expected_nanos = self.time_nanos_since_start(end_point.cycles);
169 let diff = expected_nanos - end_point.nanos;
170
171 if diff > 50_000_000 {
173 info!("TSC recalibration needed, diff: {}ns", diff);
174 self.calibrate(base);
175 }
176 let start_ns = end_point
177 .nanos
178 .wrapping_sub(self.cycles_to_time_nanos(end_point.cycles));
179 self.start_time.store(start_ns, Ordering::Relaxed);
180 }
181
182 fn time_nanos_since_start(&self, cycles: u64) -> u64 {
183 self.start_time
184 .load(Ordering::Relaxed)
185 .wrapping_add(self.cycles_to_time_nanos(cycles))
186 }
187
188 fn cycles_to_time_nanos(&self, cycles: u64) -> u64 {
189 cycles_to_ns(cycles, self.nanos_per_cycle_scaled.load(Ordering::Relaxed))
190 }
191}
192
193impl ClockDevice for Tsc {
194 fn name(&self) -> &'static str {
195 "TSC"
196 }
197
198 fn get_time(&self) -> super::ClockTime {
199 let tsc = unsafe { cpu::read_tsc() };
200
201 let nanos = self.time_nanos_since_start(tsc);
202 super::ClockTime {
203 seconds: nanos / NANOS_PER_SEC,
204 nanoseconds: nanos % NANOS_PER_SEC,
205 }
206 }
207
208 fn granularity(&self) -> u64 {
209 let n = self.cycles_to_time_nanos(1);
210 if n > 0 {
211 n
212 } else {
213 1
214 }
215 }
216
217 fn require_calibration(&self) -> bool {
218 true
219 }
220
221 fn rating(&self) -> u64 {
222 100
223 }
224}