blob: 048baab7726897c8e8716d13a034dbdfbf9cf213 [file] [log] [blame]
Ingo Molnarbb29ab22007-07-09 18:51:59 +02001#include <linux/sched.h>
john stultz5d0cf412006-06-26 00:25:12 -07002#include <linux/clocksource.h>
john stultz539eb112006-06-26 00:25:10 -07003#include <linux/workqueue.h>
Ingo Molnar6ff10de2008-06-24 01:19:49 +02004#include <linux/delay.h>
john stultz539eb112006-06-26 00:25:10 -07005#include <linux/cpufreq.h>
6#include <linux/jiffies.h>
7#include <linux/init.h>
john stultz5d0cf412006-06-26 00:25:12 -07008#include <linux/dmi.h>
Guillaume Chazarain53d517c2008-01-30 13:30:06 +01009#include <linux/percpu.h>
john stultz539eb112006-06-26 00:25:10 -070010
john stultz5d0cf412006-06-26 00:25:12 -070011#include <asm/delay.h>
john stultz539eb112006-06-26 00:25:10 -070012#include <asm/tsc.h>
13#include <asm/io.h>
Zachary Amsden6cb9a832007-03-05 00:30:35 -080014#include <asm/timer.h>
john stultz539eb112006-06-26 00:25:10 -070015
16#include "mach_timer.h"
17
Thomas Gleixner9ccc9062008-05-13 12:31:00 +020018static int tsc_disabled;
Thomas Gleixnerd9a5c0a2007-03-24 23:02:49 +010019
john stultz539eb112006-06-26 00:25:10 -070020/*
21 * On some systems the TSC frequency does not
22 * change with the cpu frequency. So we need
23 * an extra value to store the TSC freq
24 */
25unsigned int tsc_khz;
Rusty Russelld7e28ff2007-07-19 01:49:23 -070026EXPORT_SYMBOL_GPL(tsc_khz);
john stultz539eb112006-06-26 00:25:10 -070027
john stultz539eb112006-06-26 00:25:10 -070028#ifdef CONFIG_X86_TSC
29static int __init tsc_setup(char *str)
30{
31 printk(KERN_WARNING "notsc: Kernel compiled with CONFIG_X86_TSC, "
Thomas Gleixner9ccc9062008-05-13 12:31:00 +020032 "cannot disable TSC completely.\n");
33 tsc_disabled = 1;
john stultz539eb112006-06-26 00:25:10 -070034 return 1;
35}
36#else
37/*
38 * disable flag for tsc. Takes effect by clearing the TSC cpu flag
39 * in cpu/common.c
40 */
41static int __init tsc_setup(char *str)
42{
Andi Kleen404ee5b2008-01-30 13:33:20 +010043 setup_clear_cpu_cap(X86_FEATURE_TSC);
john stultz539eb112006-06-26 00:25:10 -070044 return 1;
45}
46#endif
47
48__setup("notsc", tsc_setup);
49
john stultz539eb112006-06-26 00:25:10 -070050/*
51 * code to mark and check if the TSC is unstable
52 * due to cpufreq or due to unsynced TSCs
53 */
54static int tsc_unstable;
55
Rusty Russelld7e28ff2007-07-19 01:49:23 -070056int check_tsc_unstable(void)
john stultz539eb112006-06-26 00:25:10 -070057{
58 return tsc_unstable;
59}
Rusty Russelld7e28ff2007-07-19 01:49:23 -070060EXPORT_SYMBOL_GPL(check_tsc_unstable);
john stultz539eb112006-06-26 00:25:10 -070061
Simon Arlott27b46d72007-10-20 01:13:56 +020062/* Accelerators for sched_clock()
john stultz539eb112006-06-26 00:25:10 -070063 * convert from cycles(64bits) => nanoseconds (64bits)
64 * basic equation:
65 * ns = cycles / (freq / ns_per_sec)
66 * ns = cycles * (ns_per_sec / freq)
67 * ns = cycles * (10^9 / (cpu_khz * 10^3))
68 * ns = cycles * (10^6 / cpu_khz)
69 *
70 * Then we use scaling math (suggested by george@mvista.com) to get:
71 * ns = cycles * (10^6 * SC / cpu_khz) / SC
72 * ns = cycles * cyc2ns_scale / SC
73 *
74 * And since SC is a constant power of two, we can convert the div
75 * into a shift.
76 *
Josh Triplett96315122007-10-20 02:23:49 +020077 * We can use khz divisor instead of mhz to keep a better precision, since
john stultz539eb112006-06-26 00:25:10 -070078 * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits.
79 * (mathieu.desnoyers@polymtl.ca)
80 *
81 * -johnstul@us.ibm.com "math is hard, lets go shopping!"
82 */
john stultz539eb112006-06-26 00:25:10 -070083
Guillaume Chazarain53d517c2008-01-30 13:30:06 +010084DEFINE_PER_CPU(unsigned long, cyc2ns);
john stultz539eb112006-06-26 00:25:10 -070085
Guillaume Chazarain53d517c2008-01-30 13:30:06 +010086static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
john stultz539eb112006-06-26 00:25:10 -070087{
Guillaume Chazarain53d517c2008-01-30 13:30:06 +010088 unsigned long long tsc_now, ns_now;
Ingo Molnar1725037f2008-03-31 14:52:15 +020089 unsigned long flags, *scale;
Guillaume Chazarain53d517c2008-01-30 13:30:06 +010090
91 local_irq_save(flags);
92 sched_clock_idle_sleep_event();
93
94 scale = &per_cpu(cyc2ns, cpu);
95
96 rdtscll(tsc_now);
97 ns_now = __cycles_2_ns(tsc_now);
98
Guillaume Chazarain53d517c2008-01-30 13:30:06 +010099 if (cpu_khz)
100 *scale = (NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR)/cpu_khz;
101
102 /*
103 * Start smoothly with the new frequency:
104 */
105 sched_clock_idle_wakeup_event(0);
106 local_irq_restore(flags);
john stultz539eb112006-06-26 00:25:10 -0700107}
108
john stultz539eb112006-06-26 00:25:10 -0700109/*
110 * Scheduler clock - returns current time in nanosec units.
111 */
Jeremy Fitzhardinge688340e2007-07-17 18:37:04 -0700112unsigned long long native_sched_clock(void)
john stultz539eb112006-06-26 00:25:10 -0700113{
114 unsigned long long this_offset;
115
116 /*
Ingo Molnarf9690982007-02-13 13:26:22 +0100117 * Fall back to jiffies if there's no TSC available:
Ingo Molnarbb29ab22007-07-09 18:51:59 +0200118 * ( But note that we still use it if the TSC is marked
119 * unstable. We do this because unlike Time Of Day,
120 * the scheduler clock tolerates small errors and it's
121 * very important for it to be as fast as the platform
122 * can achive it. )
john stultz539eb112006-06-26 00:25:10 -0700123 */
Thomas Gleixner9ccc9062008-05-13 12:31:00 +0200124 if (unlikely(tsc_disabled))
Ingo Molnarf9690982007-02-13 13:26:22 +0100125 /* No locking but a rare wrong value is not a big deal: */
john stultz539eb112006-06-26 00:25:10 -0700126 return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ);
127
128 /* read the Time Stamp Counter: */
Jeremy Fitzhardinge688340e2007-07-17 18:37:04 -0700129 rdtscll(this_offset);
john stultz539eb112006-06-26 00:25:10 -0700130
131 /* return the value in ns */
132 return cycles_2_ns(this_offset);
133}
134
Jeremy Fitzhardinge688340e2007-07-17 18:37:04 -0700135/* We need to define a real function for sched_clock, to override the
136 weak default version */
137#ifdef CONFIG_PARAVIRT
138unsigned long long sched_clock(void)
139{
140 return paravirt_sched_clock();
141}
142#else
143unsigned long long sched_clock(void)
144 __attribute__((alias("native_sched_clock")));
145#endif
146
Zachary Amsden1182d852007-03-05 00:30:36 -0800147unsigned long native_calculate_cpu_khz(void)
john stultz539eb112006-06-26 00:25:10 -0700148{
149 unsigned long long start, end;
150 unsigned long count;
Dave Johnsonedaf4202007-10-23 22:37:22 +0200151 u64 delta64 = (u64)ULLONG_MAX;
john stultz539eb112006-06-26 00:25:10 -0700152 int i;
153 unsigned long flags;
154
155 local_irq_save(flags);
156
Dave Johnson8c660062007-10-23 22:37:22 +0200157 /* run 3 times to ensure the cache is warm and to get an accurate reading */
john stultz539eb112006-06-26 00:25:10 -0700158 for (i = 0; i < 3; i++) {
159 mach_prepare_counter();
160 rdtscll(start);
161 mach_countup(&count);
162 rdtscll(end);
Dave Johnson8c660062007-10-23 22:37:22 +0200163
164 /*
165 * Error: ECTCNEVERSET
166 * The CTC wasn't reliable: we got a hit on the very first read,
167 * or the CPU was so fast/slow that the quotient wouldn't fit in
168 * 32 bits..
169 */
170 if (count <= 1)
171 continue;
172
173 /* cpu freq too slow: */
174 if ((end - start) <= CALIBRATE_TIME_MSEC)
175 continue;
176
177 /*
178 * We want the minimum time of all runs in case one of them
179 * is inaccurate due to SMI or other delay
180 */
Dave Johnsonedaf4202007-10-23 22:37:22 +0200181 delta64 = min(delta64, (end - start));
john stultz539eb112006-06-26 00:25:10 -0700182 }
john stultz539eb112006-06-26 00:25:10 -0700183
Dave Johnson8c660062007-10-23 22:37:22 +0200184 /* cpu freq too fast (or every run was bad): */
john stultz539eb112006-06-26 00:25:10 -0700185 if (delta64 > (1ULL<<32))
186 goto err;
187
john stultz539eb112006-06-26 00:25:10 -0700188 delta64 += CALIBRATE_TIME_MSEC/2; /* round for do_div */
189 do_div(delta64,CALIBRATE_TIME_MSEC);
190
191 local_irq_restore(flags);
192 return (unsigned long)delta64;
193err:
194 local_irq_restore(flags);
195 return 0;
196}
197
198int recalibrate_cpu_khz(void)
199{
200#ifndef CONFIG_SMP
201 unsigned long cpu_khz_old = cpu_khz;
202
203 if (cpu_has_tsc) {
204 cpu_khz = calculate_cpu_khz();
205 tsc_khz = cpu_khz;
Mike Travis92cb7612007-10-19 20:35:04 +0200206 cpu_data(0).loops_per_jiffy =
207 cpufreq_scale(cpu_data(0).loops_per_jiffy,
john stultz539eb112006-06-26 00:25:10 -0700208 cpu_khz_old, cpu_khz);
209 return 0;
210 } else
211 return -ENODEV;
212#else
213 return -ENODEV;
214#endif
215}
216
217EXPORT_SYMBOL(recalibrate_cpu_khz);
218
john stultz539eb112006-06-26 00:25:10 -0700219#ifdef CONFIG_CPU_FREQ
220
john stultz539eb112006-06-26 00:25:10 -0700221/*
222 * if the CPU frequency is scaled, TSC-based delays will need a different
223 * loops_per_jiffy value to function properly.
224 */
Pavel Machek4bd01602008-02-19 11:02:30 +0100225static unsigned int ref_freq;
226static unsigned long loops_per_jiffy_ref;
227static unsigned long cpu_khz_ref;
john stultz539eb112006-06-26 00:25:10 -0700228
229static int
230time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data)
231{
232 struct cpufreq_freqs *freq = data;
233
john stultz539eb112006-06-26 00:25:10 -0700234 if (!ref_freq) {
235 if (!freq->old){
236 ref_freq = freq->new;
Daniel Walkerdf3624a2007-05-02 19:27:18 +0200237 return 0;
john stultz539eb112006-06-26 00:25:10 -0700238 }
239 ref_freq = freq->old;
Mike Travis92cb7612007-10-19 20:35:04 +0200240 loops_per_jiffy_ref = cpu_data(freq->cpu).loops_per_jiffy;
john stultz539eb112006-06-26 00:25:10 -0700241 cpu_khz_ref = cpu_khz;
242 }
243
244 if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
245 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
246 (val == CPUFREQ_RESUMECHANGE)) {
247 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
Mike Travis92cb7612007-10-19 20:35:04 +0200248 cpu_data(freq->cpu).loops_per_jiffy =
john stultz539eb112006-06-26 00:25:10 -0700249 cpufreq_scale(loops_per_jiffy_ref,
250 ref_freq, freq->new);
251
252 if (cpu_khz) {
253
254 if (num_online_cpus() == 1)
255 cpu_khz = cpufreq_scale(cpu_khz_ref,
256 ref_freq, freq->new);
257 if (!(freq->flags & CPUFREQ_CONST_LOOPS)) {
258 tsc_khz = cpu_khz;
Karsten Wiese4f41c942008-04-07 12:14:45 +0200259 set_cyc2ns_scale(cpu_khz, freq->cpu);
john stultz539eb112006-06-26 00:25:10 -0700260 /*
261 * TSC based sched_clock turns
262 * to junk w/ cpufreq
263 */
john stultz5a90cf22007-05-02 19:27:08 +0200264 mark_tsc_unstable("cpufreq changes");
john stultz539eb112006-06-26 00:25:10 -0700265 }
266 }
267 }
john stultz539eb112006-06-26 00:25:10 -0700268
269 return 0;
270}
271
272static struct notifier_block time_cpufreq_notifier_block = {
273 .notifier_call = time_cpufreq_notifier
274};
275
276static int __init cpufreq_tsc(void)
277{
Thomas Gleixner26a08eb2007-02-16 01:27:32 -0800278 return cpufreq_register_notifier(&time_cpufreq_notifier_block,
279 CPUFREQ_TRANSITION_NOTIFIER);
john stultz539eb112006-06-26 00:25:10 -0700280}
john stultz539eb112006-06-26 00:25:10 -0700281core_initcall(cpufreq_tsc);
282
283#endif
john stultz5d0cf412006-06-26 00:25:12 -0700284
285/* clock source code */
286
Pavel Machek4bd01602008-02-19 11:02:30 +0100287static unsigned long current_tsc_khz;
Thomas Gleixnerd8bb6f42008-04-01 19:45:18 +0200288static struct clocksource clocksource_tsc;
john stultz5d0cf412006-06-26 00:25:12 -0700289
Thomas Gleixnerd8bb6f42008-04-01 19:45:18 +0200290/*
291 * We compare the TSC to the cycle_last value in the clocksource
292 * structure to avoid a nasty time-warp issue. This can be observed in
293 * a very small window right after one CPU updated cycle_last under
294 * xtime lock and the other CPU reads a TSC value which is smaller
295 * than the cycle_last reference value due to a TSC which is slighty
296 * behind. This delta is nowhere else observable, but in that case it
297 * results in a forward time jump in the range of hours due to the
298 * unsigned delta calculation of the time keeping core code, which is
299 * necessary to support wrapping clocksources like pm timer.
300 */
john stultz5d0cf412006-06-26 00:25:12 -0700301static cycle_t read_tsc(void)
302{
303 cycle_t ret;
304
305 rdtscll(ret);
306
Thomas Gleixnerd8bb6f42008-04-01 19:45:18 +0200307 return ret >= clocksource_tsc.cycle_last ?
308 ret : clocksource_tsc.cycle_last;
john stultz5d0cf412006-06-26 00:25:12 -0700309}
310
311static struct clocksource clocksource_tsc = {
312 .name = "tsc",
313 .rating = 300,
314 .read = read_tsc,
Jim Cromie7f9f3032006-06-26 00:25:15 -0700315 .mask = CLOCKSOURCE_MASK(64),
john stultz5d0cf412006-06-26 00:25:12 -0700316 .mult = 0, /* to be set */
317 .shift = 22,
Thomas Gleixner73b08d22007-02-16 01:27:36 -0800318 .flags = CLOCK_SOURCE_IS_CONTINUOUS |
319 CLOCK_SOURCE_MUST_VERIFY,
john stultz5d0cf412006-06-26 00:25:12 -0700320};
321
john stultz5a90cf22007-05-02 19:27:08 +0200322void mark_tsc_unstable(char *reason)
john stultz5d0cf412006-06-26 00:25:12 -0700323{
Thomas Gleixner7e69f2b2007-02-16 01:27:42 -0800324 if (!tsc_unstable) {
325 tsc_unstable = 1;
john stultz5a90cf22007-05-02 19:27:08 +0200326 printk("Marking TSC unstable due to: %s.\n", reason);
Thomas Gleixner7e69f2b2007-02-16 01:27:42 -0800327 /* Can be called before registration */
328 if (clocksource_tsc.mult)
329 clocksource_change_rating(&clocksource_tsc, 0);
330 else
331 clocksource_tsc.rating = 0;
john stultz5d0cf412006-06-26 00:25:12 -0700332 }
john stultz5d0cf412006-06-26 00:25:12 -0700333}
Thomas Gleixner7e69f2b2007-02-16 01:27:42 -0800334EXPORT_SYMBOL_GPL(mark_tsc_unstable);
john stultz5d0cf412006-06-26 00:25:12 -0700335
Jeff Garzik18552562007-10-03 15:15:40 -0400336static int __init dmi_mark_tsc_unstable(const struct dmi_system_id *d)
john stultz5d0cf412006-06-26 00:25:12 -0700337{
338 printk(KERN_NOTICE "%s detected: marking TSC unstable.\n",
Thomas Gleixner9ccc9062008-05-13 12:31:00 +0200339 d->ident);
Thomas Gleixner7e69f2b2007-02-16 01:27:42 -0800340 tsc_unstable = 1;
john stultz5d0cf412006-06-26 00:25:12 -0700341 return 0;
342}
343
344/* List of systems that have known TSC problems */
345static struct dmi_system_id __initdata bad_tsc_dmi_table[] = {
346 {
347 .callback = dmi_mark_tsc_unstable,
348 .ident = "IBM Thinkpad 380XD",
349 .matches = {
350 DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
351 DMI_MATCH(DMI_BOARD_NAME, "2635FA0"),
352 },
353 },
354 {}
355};
356
john stultz5d0cf412006-06-26 00:25:12 -0700357/*
358 * Make an educated guess if the TSC is trustworthy and synchronized
359 * over all CPUs.
360 */
Ingo Molnar95492e42007-02-16 01:27:34 -0800361__cpuinit int unsynchronized_tsc(void)
john stultz5d0cf412006-06-26 00:25:12 -0700362{
Ingo Molnar95492e42007-02-16 01:27:34 -0800363 if (!cpu_has_tsc || tsc_unstable)
364 return 1;
Andi Kleen51fc97b2008-01-30 13:32:40 +0100365
366 /* Anything with constant TSC should be synchronized */
367 if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
368 return 0;
369
john stultz5d0cf412006-06-26 00:25:12 -0700370 /*
371 * Intel systems are normally all synchronized.
372 * Exceptions must mark TSC as unstable:
373 */
Thomas Gleixner7e69f2b2007-02-16 01:27:42 -0800374 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
375 /* assume multi socket systems are not synchronized: */
376 if (num_possible_cpus() > 1)
377 tsc_unstable = 1;
378 }
379 return tsc_unstable;
john stultz5d0cf412006-06-26 00:25:12 -0700380}
381
Marcelo Tosatti07190a02007-02-16 01:27:44 -0800382/*
383 * Geode_LX - the OLPC CPU has a possibly a very reliable TSC
384 */
385#ifdef CONFIG_MGEODE_LX
386/* RTSC counts during suspend */
387#define RTSC_SUSP 0x100
388
389static void __init check_geode_tsc_reliable(void)
390{
Ingo Molnarf97586b2007-10-17 18:04:34 +0200391 unsigned long res_low, res_high;
Marcelo Tosatti07190a02007-02-16 01:27:44 -0800392
Ingo Molnarf97586b2007-10-17 18:04:34 +0200393 rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high);
394 if (res_low & RTSC_SUSP)
Marcelo Tosatti07190a02007-02-16 01:27:44 -0800395 clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
396}
397#else
398static inline void check_geode_tsc_reliable(void) { }
399#endif
400
john stultz6bb74df2007-03-05 00:30:50 -0800401
402void __init tsc_init(void)
john stultz5d0cf412006-06-26 00:25:12 -0700403{
Guillaume Chazarain53d517c2008-01-30 13:30:06 +0100404 int cpu;
Alok Kataria3da757d2008-06-20 15:06:33 -0700405 u64 lpj;
Guillaume Chazarain53d517c2008-01-30 13:30:06 +0100406
Thomas Gleixner9ccc9062008-05-13 12:31:00 +0200407 if (!cpu_has_tsc || tsc_disabled) {
408 /* Disable the TSC in case of !cpu_has_tsc */
409 tsc_disabled = 1;
Rusty Russell3c2047c2008-03-04 23:07:50 +1100410 return;
Thomas Gleixner9ccc9062008-05-13 12:31:00 +0200411 }
john stultz5d0cf412006-06-26 00:25:12 -0700412
john stultz6bb74df2007-03-05 00:30:50 -0800413 cpu_khz = calculate_cpu_khz();
414 tsc_khz = cpu_khz;
john stultz5d0cf412006-06-26 00:25:12 -0700415
Rusty Russell3c2047c2008-03-04 23:07:50 +1100416 if (!cpu_khz) {
417 mark_tsc_unstable("could not calculate TSC khz");
Thomas Gleixner74dc51a2008-05-18 22:17:59 +0200418 /*
419 * We need to disable the TSC completely in this case
420 * to prevent sched_clock() from using it.
421 */
422 tsc_disabled = 1;
Rusty Russell3c2047c2008-03-04 23:07:50 +1100423 return;
424 }
john stultz6bb74df2007-03-05 00:30:50 -0800425
Alok Kataria3da757d2008-06-20 15:06:33 -0700426 lpj = ((u64)tsc_khz * 1000);
427 do_div(lpj, HZ);
Alok Katariaf3f31492008-06-23 18:21:56 -0700428 lpj_fine = lpj;
Alok Kataria3da757d2008-06-20 15:06:33 -0700429
john stultz6bb74df2007-03-05 00:30:50 -0800430 printk("Detected %lu.%03lu MHz processor.\n",
431 (unsigned long)cpu_khz / 1000,
432 (unsigned long)cpu_khz % 1000);
433
Guillaume Chazarain53d517c2008-01-30 13:30:06 +0100434 /*
435 * Secondary CPUs do not run through tsc_init(), so set up
436 * all the scale factors for all CPUs, assuming the same
437 * speed as the bootup CPU. (cpufreq notifiers will fix this
438 * up if their speed diverges)
439 */
440 for_each_possible_cpu(cpu)
441 set_cyc2ns_scale(cpu_khz, cpu);
442
john stultz6bb74df2007-03-05 00:30:50 -0800443 use_tsc_delay();
444
445 /* Check and install the TSC clocksource */
446 dmi_check_system(bad_tsc_dmi_table);
447
448 unsynchronized_tsc();
449 check_geode_tsc_reliable();
450 current_tsc_khz = tsc_khz;
451 clocksource_tsc.mult = clocksource_khz2mult(current_tsc_khz,
john stultz5d0cf412006-06-26 00:25:12 -0700452 clocksource_tsc.shift);
john stultz6bb74df2007-03-05 00:30:50 -0800453 /* lower the rating if we already know its unstable: */
454 if (check_tsc_unstable()) {
455 clocksource_tsc.rating = 0;
456 clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS;
Thomas Gleixner9ccc9062008-05-13 12:31:00 +0200457 }
john stultz6bb74df2007-03-05 00:30:50 -0800458 clocksource_register(&clocksource_tsc);
john stultz5d0cf412006-06-26 00:25:12 -0700459}