| H. Peter Anvin | 1965aae | 2008-10-22 22:26:29 -0700 | [diff] [blame] | 1 | #ifndef _ASM_X86_TIMER_H | 
 | 2 | #define _ASM_X86_TIMER_H | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 | #include <linux/init.h> | 
| Shaohua Li | c3c433e | 2005-09-03 15:57:07 -0700 | [diff] [blame] | 4 | #include <linux/pm.h> | 
| Guillaume Chazarain | 53d517c | 2008-01-30 13:30:06 +0100 | [diff] [blame] | 5 | #include <linux/percpu.h> | 
| Ingo Molnar | 8e6dafd | 2009-02-23 00:34:39 +0100 | [diff] [blame] | 6 | #include <linux/interrupt.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 | #define TICK_SIZE (tick_nsec / 1000) | 
| Zachary Amsden | 6cb9a83 | 2007-03-05 00:30:35 -0800 | [diff] [blame] | 9 |  | 
| Zachary Amsden | 6cb9a83 | 2007-03-05 00:30:35 -0800 | [diff] [blame] | 10 | unsigned long long native_sched_clock(void); | 
| Stephen Rothwell | 25c1a41 | 2009-03-30 11:10:27 +1100 | [diff] [blame] | 11 | extern int recalibrate_cpu_khz(void); | 
| Jaswinder Singh | cc03849 | 2008-07-21 21:52:51 +0530 | [diff] [blame] | 12 |  | 
 | 13 | extern int no_timer_check; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 |  | 
| Guillaume Chazarain | 53d517c | 2008-01-30 13:30:06 +0100 | [diff] [blame] | 15 | /* Accelerators for sched_clock() | 
| Jeremy Fitzhardinge | 688340e | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 16 |  * convert from cycles(64bits) => nanoseconds (64bits) | 
 | 17 |  *  basic equation: | 
 | 18 |  *		ns = cycles / (freq / ns_per_sec) | 
 | 19 |  *		ns = cycles * (ns_per_sec / freq) | 
 | 20 |  *		ns = cycles * (10^9 / (cpu_khz * 10^3)) | 
 | 21 |  *		ns = cycles * (10^6 / cpu_khz) | 
 | 22 |  * | 
 | 23 |  *	Then we use scaling math (suggested by george@mvista.com) to get: | 
 | 24 |  *		ns = cycles * (10^6 * SC / cpu_khz) / SC | 
 | 25 |  *		ns = cycles * cyc2ns_scale / SC | 
 | 26 |  * | 
 | 27 |  *	And since SC is a constant power of two, we can convert the div | 
 | 28 |  *  into a shift. | 
 | 29 |  * | 
| Guillaume Chazarain | 53d517c | 2008-01-30 13:30:06 +0100 | [diff] [blame] | 30 |  *  We can use khz divisor instead of mhz to keep a better precision, since | 
| Jeremy Fitzhardinge | 688340e | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 31 |  *  cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits. | 
 | 32 |  *  (mathieu.desnoyers@polymtl.ca) | 
 | 33 |  * | 
 | 34 |  *			-johnstul@us.ibm.com "math is hard, lets go shopping!" | 
| Salman Qazi | 4cecf6d | 2011-11-15 14:12:06 -0800 | [diff] [blame] | 35 |  * | 
 | 36 |  * In: | 
 | 37 |  * | 
 | 38 |  * ns = cycles * cyc2ns_scale / SC | 
 | 39 |  * | 
 | 40 |  * Although we may still have enough bits to store the value of ns, | 
 | 41 |  * in some cases, we may not have enough bits to store cycles * cyc2ns_scale, | 
 | 42 |  * leading to an incorrect result. | 
 | 43 |  * | 
 | 44 |  * To avoid this, we can decompose 'cycles' into quotient and remainder | 
 | 45 |  * of division by SC.  Then, | 
 | 46 |  * | 
 | 47 |  * ns = (quot * SC + rem) * cyc2ns_scale / SC | 
 | 48 |  *    = quot * cyc2ns_scale + (rem * cyc2ns_scale) / SC | 
 | 49 |  * | 
 | 50 |  *			- sqazi@google.com | 
| Jeremy Fitzhardinge | 688340e | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 51 |  */ | 
| Guillaume Chazarain | 53d517c | 2008-01-30 13:30:06 +0100 | [diff] [blame] | 52 |  | 
 | 53 | DECLARE_PER_CPU(unsigned long, cyc2ns); | 
| Peter Zijlstra | 84599f8 | 2009-06-16 12:34:17 -0700 | [diff] [blame] | 54 | DECLARE_PER_CPU(unsigned long long, cyc2ns_offset); | 
| Jeremy Fitzhardinge | 688340e | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 55 |  | 
 | 56 | #define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */ | 
 | 57 |  | 
| Guillaume Chazarain | 53d517c | 2008-01-30 13:30:06 +0100 | [diff] [blame] | 58 | static inline unsigned long long __cycles_2_ns(unsigned long long cyc) | 
| Jeremy Fitzhardinge | 688340e | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 59 | { | 
| Peter Zijlstra | 84599f8 | 2009-06-16 12:34:17 -0700 | [diff] [blame] | 60 | 	int cpu = smp_processor_id(); | 
 | 61 | 	unsigned long long ns = per_cpu(cyc2ns_offset, cpu); | 
| Salman Qazi | 9993bc6 | 2012-03-09 16:41:01 -0800 | [diff] [blame] | 62 | 	ns += mult_frac(cyc, per_cpu(cyc2ns, cpu), | 
 | 63 | 			(1UL << CYC2NS_SCALE_FACTOR)); | 
| Peter Zijlstra | 84599f8 | 2009-06-16 12:34:17 -0700 | [diff] [blame] | 64 | 	return ns; | 
| Jeremy Fitzhardinge | 688340e | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 65 | } | 
 | 66 |  | 
| Guillaume Chazarain | 53d517c | 2008-01-30 13:30:06 +0100 | [diff] [blame] | 67 | static inline unsigned long long cycles_2_ns(unsigned long long cyc) | 
 | 68 | { | 
 | 69 | 	unsigned long long ns; | 
 | 70 | 	unsigned long flags; | 
 | 71 |  | 
 | 72 | 	local_irq_save(flags); | 
 | 73 | 	ns = __cycles_2_ns(cyc); | 
 | 74 | 	local_irq_restore(flags); | 
 | 75 |  | 
 | 76 | 	return ns; | 
 | 77 | } | 
| Jeremy Fitzhardinge | 688340e | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 78 |  | 
| H. Peter Anvin | 1965aae | 2008-10-22 22:26:29 -0700 | [diff] [blame] | 79 | #endif /* _ASM_X86_TIMER_H */ |