2 * check TSC synchronization.
4 * Copyright (C) 2006, Red Hat, Inc., Ingo Molnar
6 * We check whether all boot CPUs have their TSC's synchronized,
7 * print a warning if not and turn off the TSC clock-source.
9 * The warp-check is point-to-point between two CPUs, the CPU
10 * initiating the bootup is the 'source CPU', the freshly booting
11 * CPU is the 'target CPU'.
13 * Only two CPUs may participate - they can enter in any order.
14 * ( The serial nature of the boot logic and the CPU hotplug lock
15 * protects against more than 2 CPUs entering this code. )
17 #include <linux/topology.h>
18 #include <linux/spinlock.h>
19 #include <linux/kernel.h>
20 #include <linux/smp.h>
21 #include <linux/nmi.h>
27 unsigned long nextcheck;
31 static DEFINE_PER_CPU(struct tsc_adjust, tsc_adjust);
33 void tsc_verify_tsc_adjust(bool resume)
35 struct tsc_adjust *adj = this_cpu_ptr(&tsc_adjust);
38 if (!boot_cpu_has(X86_FEATURE_TSC_ADJUST))
41 /* Rate limit the MSR check */
42 if (!resume && time_before(jiffies, adj->nextcheck))
45 adj->nextcheck = jiffies + HZ;
47 rdmsrl(MSR_IA32_TSC_ADJUST, curval);
48 if (adj->adjusted == curval)
51 /* Restore the original value */
52 wrmsrl(MSR_IA32_TSC_ADJUST, adj->adjusted);
54 if (!adj->warned || resume) {
55 pr_warn(FW_BUG "TSC ADJUST differs: CPU%u %lld --> %lld. Restoring\n",
56 smp_processor_id(), adj->adjusted, curval);
61 static void tsc_sanitize_first_cpu(struct tsc_adjust *cur, s64 bootval,
62 unsigned int cpu, bool bootcpu)
65 * First online CPU in a package stores the boot value in the
66 * adjustment value. This value might change later via the sync
67 * mechanism. If that fails we still can yell about boot values not
70 * On the boot cpu we just force set the ADJUST value to 0 if it's
71 * non zero. We don't do that on non boot cpus because physical
72 * hotplug should have set the ADJUST register to a value > 0 so
73 * the TSC is in sync with the already running cpus.
75 * But we always force positive ADJUST values. Otherwise the TSC
76 * deadline timer creates an interrupt storm. We also have to
77 * prevent values > 0x7FFFFFFF as those wreckage the timer as well.
79 if ((bootcpu && bootval != 0) || (!bootcpu && bootval < 0) ||
80 (bootval > 0x7FFFFFFF)) {
81 pr_warn(FW_BUG "TSC ADJUST: CPU%u: %lld force to 0\n", cpu,
83 wrmsrl(MSR_IA32_TSC_ADJUST, 0);
86 cur->adjusted = bootval;
90 bool __init tsc_store_and_check_tsc_adjust(bool bootcpu)
92 struct tsc_adjust *cur = this_cpu_ptr(&tsc_adjust);
95 if (!boot_cpu_has(X86_FEATURE_TSC_ADJUST))
98 rdmsrl(MSR_IA32_TSC_ADJUST, bootval);
99 cur->bootval = bootval;
100 cur->nextcheck = jiffies + HZ;
101 tsc_sanitize_first_cpu(cur, bootval, smp_processor_id(), bootcpu);
105 #else /* !CONFIG_SMP */
108 * Store and check the TSC ADJUST MSR if available
110 bool tsc_store_and_check_tsc_adjust(bool bootcpu)
112 struct tsc_adjust *ref, *cur = this_cpu_ptr(&tsc_adjust);
113 unsigned int refcpu, cpu = smp_processor_id();
114 struct cpumask *mask;
117 if (!boot_cpu_has(X86_FEATURE_TSC_ADJUST))
120 rdmsrl(MSR_IA32_TSC_ADJUST, bootval);
121 cur->bootval = bootval;
122 cur->nextcheck = jiffies + HZ;
126 * Check whether this CPU is the first in a package to come up. In
127 * this case do not check the boot value against another package
128 * because the new package might have been physically hotplugged,
129 * where TSC_ADJUST is expected to be different. When called on the
130 * boot CPU topology_core_cpumask() might not be available yet.
132 mask = topology_core_cpumask(cpu);
133 refcpu = mask ? cpumask_any_but(mask, cpu) : nr_cpu_ids;
135 if (refcpu >= nr_cpu_ids) {
136 tsc_sanitize_first_cpu(cur, bootval, smp_processor_id(),
141 ref = per_cpu_ptr(&tsc_adjust, refcpu);
143 * Compare the boot value and complain if it differs in the
146 if (bootval != ref->bootval) {
147 pr_warn(FW_BUG "TSC ADJUST differs: Reference CPU%u: %lld CPU%u: %lld\n",
148 refcpu, ref->bootval, cpu, bootval);
151 * The TSC_ADJUST values in a package must be the same. If the boot
152 * value on this newly upcoming CPU differs from the adjustment
153 * value of the already online CPU in this package, set it to that
156 if (bootval != ref->adjusted) {
157 pr_warn("TSC ADJUST synchronize: Reference CPU%u: %lld CPU%u: %lld\n",
158 refcpu, ref->adjusted, cpu, bootval);
159 cur->adjusted = ref->adjusted;
160 wrmsrl(MSR_IA32_TSC_ADJUST, ref->adjusted);
163 * We have the TSCs forced to be in sync on this package. Skip sync
170 * Entry/exit counters that make sure that both CPUs
171 * run the measurement code at once:
173 static atomic_t start_count;
174 static atomic_t stop_count;
175 static atomic_t skip_test;
176 static atomic_t test_runs;
179 * We use a raw spinlock in this exceptional case, because
180 * we want to have the fastest, inlined, non-debug version
181 * of a critical section, to be able to prove TSC time-warps:
183 static arch_spinlock_t sync_lock = __ARCH_SPIN_LOCK_UNLOCKED;
185 static cycles_t last_tsc;
186 static cycles_t max_warp;
188 static int random_warps;
191 * TSC-warp measurement loop running on both CPUs. This is not called
192 * if there is no TSC.
194 static cycles_t check_tsc_warp(unsigned int timeout)
196 cycles_t start, now, prev, end, cur_max_warp = 0;
197 int i, cur_warps = 0;
199 start = rdtsc_ordered();
201 * The measurement runs for 'timeout' msecs:
203 end = start + (cycles_t) tsc_khz * timeout;
208 * We take the global lock, measure TSC, save the
209 * previous TSC that was measured (possibly on
210 * another CPU) and update the previous TSC timestamp.
212 arch_spin_lock(&sync_lock);
214 now = rdtsc_ordered();
216 arch_spin_unlock(&sync_lock);
219 * Be nice every now and then (and also check whether
220 * measurement is done [we also insert a 10 million
221 * loops safety exit, so we dont lock up in case the
222 * TSC readout is totally broken]):
224 if (unlikely(!(i & 7))) {
225 if (now > end || i > 10000000)
228 touch_nmi_watchdog();
231 * Outside the critical section we can now see whether
232 * we saw a time-warp of the TSC going backwards:
234 if (unlikely(prev > now)) {
235 arch_spin_lock(&sync_lock);
236 max_warp = max(max_warp, prev - now);
237 cur_max_warp = max_warp;
239 * Check whether this bounces back and forth. Only
240 * one CPU should observe time going backwards.
242 if (cur_warps != nr_warps)
245 cur_warps = nr_warps;
246 arch_spin_unlock(&sync_lock);
250 "Warning: zero tsc calibration delta: %Ld [max: %Ld]\n",
251 now-start, end-start);
256 * If the target CPU coming online doesn't have any of its core-siblings
257 * online, a timeout of 20msec will be used for the TSC-warp measurement
258 * loop. Otherwise a smaller timeout of 2msec will be used, as we have some
259 * information about this socket already (and this information grows as we
260 * have more and more logical-siblings in that socket).
262 * Ideally we should be able to skip the TSC sync check on the other
263 * core-siblings, if the first logical CPU in a socket passed the sync test.
264 * But as the TSC is per-logical CPU and can potentially be modified wrongly
265 * by the bios, TSC sync test for smaller duration should be able
266 * to catch such errors. Also this will catch the condition where all the
267 * cores in the socket doesn't get reset at the same time.
269 static inline unsigned int loop_timeout(int cpu)
271 return (cpumask_weight(topology_core_cpumask(cpu)) > 1) ? 2 : 20;
275 * Source CPU calls into this - it waits for the freshly booted
276 * target CPU to arrive and then starts the measurement:
278 void check_tsc_sync_source(int cpu)
283 * No need to check if we already know that the TSC is not
284 * synchronized or if we have no TSC.
286 if (unsynchronized_tsc())
290 * Set the maximum number of test runs to
291 * 1 if the CPU does not provide the TSC_ADJUST MSR
292 * 3 if the MSR is available, so the target can try to adjust
294 if (!boot_cpu_has(X86_FEATURE_TSC_ADJUST))
295 atomic_set(&test_runs, 1);
297 atomic_set(&test_runs, 3);
300 * Wait for the target to start or to skip the test:
302 while (atomic_read(&start_count) != cpus - 1) {
303 if (atomic_read(&skip_test) > 0) {
304 atomic_set(&skip_test, 0);
311 * Trigger the target to continue into the measurement too:
313 atomic_inc(&start_count);
315 check_tsc_warp(loop_timeout(cpu));
317 while (atomic_read(&stop_count) != cpus-1)
321 * If the test was successful set the number of runs to zero and
322 * stop. If not, decrement the number of runs an check if we can
323 * retry. In case of random warps no retry is attempted.
326 atomic_set(&test_runs, 0);
328 pr_debug("TSC synchronization [CPU#%d -> CPU#%d]: passed\n",
329 smp_processor_id(), cpu);
331 } else if (atomic_dec_and_test(&test_runs) || random_warps) {
332 /* Force it to 0 if random warps brought us here */
333 atomic_set(&test_runs, 0);
335 pr_warning("TSC synchronization [CPU#%d -> CPU#%d]:\n",
336 smp_processor_id(), cpu);
337 pr_warning("Measured %Ld cycles TSC warp between CPUs, "
338 "turning off TSC clock.\n", max_warp);
340 pr_warning("TSC warped randomly between CPUs\n");
341 mark_tsc_unstable("check_tsc_sync_source failed");
345 * Reset it - just in case we boot another CPU later:
347 atomic_set(&start_count, 0);
354 * Let the target continue with the bootup:
356 atomic_inc(&stop_count);
359 * Retry, if there is a chance to do so.
361 if (atomic_read(&test_runs) > 0)
366 * Freshly booted CPUs call into this:
368 void check_tsc_sync_target(void)
370 struct tsc_adjust *cur = this_cpu_ptr(&tsc_adjust);
371 unsigned int cpu = smp_processor_id();
372 cycles_t cur_max_warp, gbl_max_warp;
375 /* Also aborts if there is no TSC. */
376 if (unsynchronized_tsc())
380 * Store, verify and sanitize the TSC adjust register. If
381 * successful skip the test.
383 * The test is also skipped when the TSC is marked reliable. This
384 * is true for SoCs which have no fallback clocksource. On these
385 * SoCs the TSC is frequency synchronized, but still the TSC ADJUST
386 * register might have been wreckaged by the BIOS..
388 if (tsc_store_and_check_tsc_adjust(false) || tsc_clocksource_reliable) {
389 atomic_inc(&skip_test);
395 * Register this CPU's participation and wait for the
396 * source CPU to start the measurement:
398 atomic_inc(&start_count);
399 while (atomic_read(&start_count) != cpus)
402 cur_max_warp = check_tsc_warp(loop_timeout(cpu));
405 * Store the maximum observed warp value for a potential retry:
407 gbl_max_warp = max_warp;
412 atomic_inc(&stop_count);
415 * Wait for the source CPU to print stuff:
417 while (atomic_read(&stop_count) != cpus)
421 * Reset it for the next sync test:
423 atomic_set(&stop_count, 0);
426 * Check the number of remaining test runs. If not zero, the test
427 * failed and a retry with adjusted TSC is possible. If zero the
428 * test was either successful or failed terminally.
430 if (!atomic_read(&test_runs))
434 * If the warp value of this CPU is 0, then the other CPU
435 * observed time going backwards so this TSC was ahead and
436 * needs to move backwards.
439 cur_max_warp = -gbl_max_warp;
442 * Add the result to the previous adjustment value.
444 * The adjustement value is slightly off by the overhead of the
445 * sync mechanism (observed values are ~200 TSC cycles), but this
446 * really depends on CPU, node distance and frequency. So
447 * compensating for this is hard to get right. Experiments show
448 * that the warp is not longer detectable when the observed warp
449 * value is used. In the worst case the adjustment needs to go
450 * through a 3rd run for fine tuning.
452 cur->adjusted += cur_max_warp;
455 * TSC deadline timer stops working or creates an interrupt storm
456 * with adjust values < 0 and > x07ffffff.
458 * To allow adjust values > 0x7FFFFFFF we need to disable the
459 * deadline timer and use the local APIC timer, but that requires
460 * more intrusive changes and we do not have any useful information
461 * from Intel about the underlying HW wreckage yet.
463 if (cur->adjusted < 0)
465 if (cur->adjusted > 0x7FFFFFFF)
466 cur->adjusted = 0x7FFFFFFF;
468 pr_warn("TSC ADJUST compensate: CPU%u observed %lld warp. Adjust: %lld\n",
469 cpu, cur_max_warp, cur->adjusted);
471 wrmsrl(MSR_IA32_TSC_ADJUST, cur->adjusted);
476 #endif /* CONFIG_SMP */