blob: 9b7d264897fa4f3a74f46d5710817490713563e4 [file] [log] [blame]
Andres Salomon2272b0e2007-03-06 01:42:05 -08001/*
Thomas Gleixner2f0798a2007-10-12 23:04:23 +02002 * x86 TSC related functions
Andres Salomon2272b0e2007-03-06 01:42:05 -08003 */
Thomas Gleixner2f0798a2007-10-12 23:04:23 +02004#ifndef _ASM_X86_TSC_H
5#define _ASM_X86_TSC_H
Andres Salomon2272b0e2007-03-06 01:42:05 -08006
7#include <asm/processor.h>
8
Thomas Gleixner2f0798a2007-10-12 23:04:23 +02009#define NS_SCALE 10 /* 2^10, carefully chosen */
10#define US_SCALE 32 /* 2^32, arbitralrily chosen */
11
Andres Salomon2272b0e2007-03-06 01:42:05 -080012/*
13 * Standard way to access the cycle counter.
14 */
15typedef unsigned long long cycles_t;
16
17extern unsigned int cpu_khz;
18extern unsigned int tsc_khz;
19
20static inline cycles_t get_cycles(void)
21{
22 unsigned long long ret = 0;
23
24#ifndef CONFIG_X86_TSC
25 if (!cpu_has_tsc)
26 return 0;
27#endif
28
29#if defined(CONFIG_X86_GENERIC) || defined(CONFIG_X86_TSC)
30 rdtscll(ret);
31#endif
32 return ret;
33}
34
35/* Like get_cycles, but make sure the CPU is synchronized. */
Glauber de Oliveira Costa4e871732008-01-30 13:31:03 +010036static __always_inline cycles_t __get_cycles_sync(void)
Andres Salomon2272b0e2007-03-06 01:42:05 -080037{
38 unsigned long long ret;
Joerg Roedel6041b572007-05-10 22:22:14 -070039 unsigned eax, edx;
Andres Salomon2272b0e2007-03-06 01:42:05 -080040
41 /*
Glauber de Oliveira Costa4e871732008-01-30 13:31:03 +010042 * Use RDTSCP if possible; it is guaranteed to be synchronous
43 * and doesn't cause a VMEXIT on Hypervisors
Andi Kleenc5bcb562007-05-02 19:27:21 +020044 */
45 alternative_io(ASM_NOP3, ".byte 0x0f,0x01,0xf9", X86_FEATURE_RDTSCP,
Joerg Roedel6041b572007-05-10 22:22:14 -070046 ASM_OUTPUT2("=a" (eax), "=d" (edx)),
47 "a" (0U), "d" (0U) : "ecx", "memory");
48 ret = (((unsigned long long)edx) << 32) | ((unsigned long long)eax);
Andi Kleenc5bcb562007-05-02 19:27:21 +020049 if (ret)
50 return ret;
51
52 /*
Andres Salomon2272b0e2007-03-06 01:42:05 -080053 * Don't do an additional sync on CPUs where we know
54 * RDTSC is already synchronous:
55 */
56 alternative_io("cpuid", ASM_NOP2, X86_FEATURE_SYNC_RDTSC,
57 "=a" (eax), "0" (1) : "ebx","ecx","edx","memory");
Andres Salomon2272b0e2007-03-06 01:42:05 -080058
Glauber de Oliveira Costa4e871732008-01-30 13:31:03 +010059 return 0;
60}
61
62static __always_inline cycles_t get_cycles_sync(void)
63{
64 unsigned long long ret;
65 ret = __get_cycles_sync();
66 if (!ret)
67 rdtscll(ret);
Andres Salomon2272b0e2007-03-06 01:42:05 -080068 return ret;
69}
70
Glauber de Oliveira Costa4e871732008-01-30 13:31:03 +010071#ifdef CONFIG_PARAVIRT
72/*
73 * For paravirt guests, some functionalities are executed through function
74 * pointers in the various pvops structures.
75 * These function pointers exist inside the kernel and can not
76 * be accessed by user space. To avoid this, we make a copy of the
77 * get_cycles_sync (called in kernel) but force the use of native_read_tsc.
78 * Ideally, the guest should set up it's own clock and vread
79 */
80static __always_inline long long vget_cycles_sync(void)
81{
82 unsigned long long ret;
83 ret = __get_cycles_sync();
84 if (!ret)
85 ret = native_read_tsc();
86 return ret;
87}
88#else
89# define vget_cycles_sync() get_cycles_sync()
90#endif
91
Andres Salomon2272b0e2007-03-06 01:42:05 -080092extern void tsc_init(void);
john stultz5a90cf22007-05-02 19:27:08 +020093extern void mark_tsc_unstable(char *reason);
Andres Salomon2272b0e2007-03-06 01:42:05 -080094extern int unsynchronized_tsc(void);
95extern void init_tsc_clocksource(void);
Rusty Russelld7e28ff2007-07-19 01:49:23 -070096int check_tsc_unstable(void);
Andres Salomon2272b0e2007-03-06 01:42:05 -080097
98/*
99 * Boot-time check whether the TSCs are synchronized across
100 * all CPUs/cores:
101 */
102extern void check_tsc_sync_source(int cpu);
103extern void check_tsc_sync_target(void);
104
Thomas Gleixnerd3716982007-10-12 23:04:06 +0200105extern void tsc_calibrate(void);
Thomas Gleixner80ca9c92008-01-30 13:30:18 +0100106extern int notsc_setup(char *);
Thomas Gleixnerd3716982007-10-12 23:04:06 +0200107
Andres Salomon2272b0e2007-03-06 01:42:05 -0800108#endif