| Andres Salomon | 2272b0e | 2007-03-06 01:42:05 -0800 | [diff] [blame] | 1 | /* | 
| Thomas Gleixner | 2f0798a | 2007-10-12 23:04:23 +0200 | [diff] [blame] | 2 | * x86 TSC related functions | 
| Andres Salomon | 2272b0e | 2007-03-06 01:42:05 -0800 | [diff] [blame] | 3 | */ | 
| Thomas Gleixner | 2f0798a | 2007-10-12 23:04:23 +0200 | [diff] [blame] | 4 | #ifndef _ASM_X86_TSC_H | 
|  | 5 | #define _ASM_X86_TSC_H | 
| Andres Salomon | 2272b0e | 2007-03-06 01:42:05 -0800 | [diff] [blame] | 6 |  | 
|  | 7 | #include <asm/processor.h> | 
|  | 8 |  | 
| Thomas Gleixner | 2f0798a | 2007-10-12 23:04:23 +0200 | [diff] [blame] | 9 | #define NS_SCALE	10 /* 2^10, carefully chosen */ | 
|  | 10 | #define US_SCALE	32 /* 2^32, arbitralrily chosen */ | 
|  | 11 |  | 
| Andres Salomon | 2272b0e | 2007-03-06 01:42:05 -0800 | [diff] [blame] | 12 | /* | 
|  | 13 | * Standard way to access the cycle counter. | 
|  | 14 | */ | 
|  | 15 | typedef unsigned long long cycles_t; | 
|  | 16 |  | 
|  | 17 | extern unsigned int cpu_khz; | 
|  | 18 | extern unsigned int tsc_khz; | 
|  | 19 |  | 
|  | 20 | static inline cycles_t get_cycles(void) | 
|  | 21 | { | 
|  | 22 | unsigned long long ret = 0; | 
|  | 23 |  | 
|  | 24 | #ifndef CONFIG_X86_TSC | 
|  | 25 | if (!cpu_has_tsc) | 
|  | 26 | return 0; | 
|  | 27 | #endif | 
|  | 28 |  | 
|  | 29 | #if defined(CONFIG_X86_GENERIC) || defined(CONFIG_X86_TSC) | 
|  | 30 | rdtscll(ret); | 
|  | 31 | #endif | 
|  | 32 | return ret; | 
|  | 33 | } | 
|  | 34 |  | 
|  | 35 | /* Like get_cycles, but make sure the CPU is synchronized. */ | 
|  | 36 | static __always_inline cycles_t get_cycles_sync(void) | 
|  | 37 | { | 
|  | 38 | unsigned long long ret; | 
| Joerg Roedel | 6041b57 | 2007-05-10 22:22:14 -0700 | [diff] [blame] | 39 | unsigned eax, edx; | 
| Andres Salomon | 2272b0e | 2007-03-06 01:42:05 -0800 | [diff] [blame] | 40 |  | 
|  | 41 | /* | 
| Andi Kleen | c5bcb56 | 2007-05-02 19:27:21 +0200 | [diff] [blame] | 42 | * Use RDTSCP if possible; it is guaranteed to be synchronous | 
|  | 43 | * and doesn't cause a VMEXIT on Hypervisors | 
|  | 44 | */ | 
|  | 45 | alternative_io(ASM_NOP3, ".byte 0x0f,0x01,0xf9", X86_FEATURE_RDTSCP, | 
| Joerg Roedel | 6041b57 | 2007-05-10 22:22:14 -0700 | [diff] [blame] | 46 | ASM_OUTPUT2("=a" (eax), "=d" (edx)), | 
|  | 47 | "a" (0U), "d" (0U) : "ecx", "memory"); | 
|  | 48 | ret = (((unsigned long long)edx) << 32) | ((unsigned long long)eax); | 
| Andi Kleen | c5bcb56 | 2007-05-02 19:27:21 +0200 | [diff] [blame] | 49 | if (ret) | 
|  | 50 | return ret; | 
|  | 51 |  | 
|  | 52 | /* | 
| Andres Salomon | 2272b0e | 2007-03-06 01:42:05 -0800 | [diff] [blame] | 53 | * Don't do an additional sync on CPUs where we know | 
|  | 54 | * RDTSC is already synchronous: | 
|  | 55 | */ | 
|  | 56 | alternative_io("cpuid", ASM_NOP2, X86_FEATURE_SYNC_RDTSC, | 
|  | 57 | "=a" (eax), "0" (1) : "ebx","ecx","edx","memory"); | 
| Andres Salomon | 2272b0e | 2007-03-06 01:42:05 -0800 | [diff] [blame] | 58 | rdtscll(ret); | 
|  | 59 |  | 
|  | 60 | return ret; | 
|  | 61 | } | 
|  | 62 |  | 
|  | 63 | extern void tsc_init(void); | 
| john stultz | 5a90cf2 | 2007-05-02 19:27:08 +0200 | [diff] [blame] | 64 | extern void mark_tsc_unstable(char *reason); | 
| Andres Salomon | 2272b0e | 2007-03-06 01:42:05 -0800 | [diff] [blame] | 65 | extern int unsynchronized_tsc(void); | 
|  | 66 | extern void init_tsc_clocksource(void); | 
| Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 67 | int check_tsc_unstable(void); | 
| Andres Salomon | 2272b0e | 2007-03-06 01:42:05 -0800 | [diff] [blame] | 68 |  | 
|  | 69 | /* | 
|  | 70 | * Boot-time check whether the TSCs are synchronized across | 
|  | 71 | * all CPUs/cores: | 
|  | 72 | */ | 
|  | 73 | extern void check_tsc_sync_source(int cpu); | 
|  | 74 | extern void check_tsc_sync_target(void); | 
|  | 75 |  | 
| Thomas Gleixner | d371698 | 2007-10-12 23:04:06 +0200 | [diff] [blame] | 76 | #ifdef CONFIG_X86_64 | 
|  | 77 | extern void tsc_calibrate(void); | 
|  | 78 | #endif | 
|  | 79 |  | 
| Andres Salomon | 2272b0e | 2007-03-06 01:42:05 -0800 | [diff] [blame] | 80 | #endif |