blob: 9b76b03d06001209437382b15c6f072c70f7e0dd [file] [log] [blame]
john stultzc37e7bb2007-02-16 01:28:19 -08001#include <linux/kernel.h>
2#include <linux/sched.h>
3#include <linux/interrupt.h>
4#include <linux/init.h>
5#include <linux/clocksource.h>
6#include <linux/time.h>
7#include <linux/acpi.h>
8#include <linux/cpufreq.h>
9
10#include <asm/timex.h>
11
john stultz14899392007-02-16 01:28:20 -080012static int notsc __initdata = 0;
john stultzc37e7bb2007-02-16 01:28:19 -080013
14unsigned int cpu_khz; /* TSC clocks / usec, not used here */
15EXPORT_SYMBOL(cpu_khz);
Joerg Roedel6b37f5a2007-05-02 19:27:06 +020016unsigned int tsc_khz;
17EXPORT_SYMBOL(tsc_khz);
john stultzc37e7bb2007-02-16 01:28:19 -080018
john stultzc37e7bb2007-02-16 01:28:19 -080019static unsigned int cyc2ns_scale __read_mostly;
20
21void set_cyc2ns_scale(unsigned long khz)
22{
23 cyc2ns_scale = (NSEC_PER_MSEC << NS_SCALE) / khz;
24}
25
john stultz14899392007-02-16 01:28:20 -080026static unsigned long long cycles_2_ns(unsigned long long cyc)
john stultzc37e7bb2007-02-16 01:28:19 -080027{
28 return (cyc * cyc2ns_scale) >> NS_SCALE;
29}
30
31unsigned long long sched_clock(void)
32{
33 unsigned long a = 0;
34
35 /* Could do CPU core sync here. Opteron can execute rdtsc speculatively,
36 * which means it is not completely exact and may not be monotonous
37 * between CPUs. But the errors should be too small to matter for
38 * scheduling purposes.
39 */
40
41 rdtscll(a);
42 return cycles_2_ns(a);
43}
44
john stultz14899392007-02-16 01:28:20 -080045static int tsc_unstable;
46
Rusty Russelld7e28ff2007-07-19 01:49:23 -070047inline int check_tsc_unstable(void)
john stultz14899392007-02-16 01:28:20 -080048{
49 return tsc_unstable;
50}
john stultzc37e7bb2007-02-16 01:28:19 -080051#ifdef CONFIG_CPU_FREQ
52
53/* Frequency scaling support. Adjust the TSC based timer when the cpu frequency
54 * changes.
55 *
56 * RED-PEN: On SMP we assume all CPUs run with the same frequency. It's
57 * not that important because current Opteron setups do not support
58 * scaling on SMP anyroads.
59 *
60 * Should fix up last_tsc too. Currently gettimeofday in the
61 * first tick after the change will be slightly wrong.
62 */
63
Thomas Gleixner7ff98472007-07-21 17:10:13 +020064static unsigned int ref_freq;
65static unsigned long loops_per_jiffy_ref;
66static unsigned long tsc_khz_ref;
john stultzc37e7bb2007-02-16 01:28:19 -080067
68static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
69 void *data)
70{
71 struct cpufreq_freqs *freq = data;
72 unsigned long *lpj, dummy;
73
74 if (cpu_has(&cpu_data[freq->cpu], X86_FEATURE_CONSTANT_TSC))
75 return 0;
76
77 lpj = &dummy;
78 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
79#ifdef CONFIG_SMP
80 lpj = &cpu_data[freq->cpu].loops_per_jiffy;
81#else
82 lpj = &boot_cpu_data.loops_per_jiffy;
83#endif
84
85 if (!ref_freq) {
86 ref_freq = freq->old;
87 loops_per_jiffy_ref = *lpj;
Joerg Roedel6b37f5a2007-05-02 19:27:06 +020088 tsc_khz_ref = tsc_khz;
john stultzc37e7bb2007-02-16 01:28:19 -080089 }
90 if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
91 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
92 (val == CPUFREQ_RESUMECHANGE)) {
93 *lpj =
94 cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new);
95
Joerg Roedel6b37f5a2007-05-02 19:27:06 +020096 tsc_khz = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new);
john stultzc37e7bb2007-02-16 01:28:19 -080097 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
john stultz5a90cf22007-05-02 19:27:08 +020098 mark_tsc_unstable("cpufreq changes");
john stultzc37e7bb2007-02-16 01:28:19 -080099 }
100
Joerg Roedel6b37f5a2007-05-02 19:27:06 +0200101 set_cyc2ns_scale(tsc_khz_ref);
john stultzc37e7bb2007-02-16 01:28:19 -0800102
103 return 0;
104}
105
106static struct notifier_block time_cpufreq_notifier_block = {
107 .notifier_call = time_cpufreq_notifier
108};
109
110static int __init cpufreq_tsc(void)
111{
Thomas Gleixner7ff98472007-07-21 17:10:13 +0200112 cpufreq_register_notifier(&time_cpufreq_notifier_block,
113 CPUFREQ_TRANSITION_NOTIFIER);
john stultzc37e7bb2007-02-16 01:28:19 -0800114 return 0;
115}
116
117core_initcall(cpufreq_tsc);
118
119#endif
120
121static int tsc_unstable = 0;
122
john stultzc37e7bb2007-02-16 01:28:19 -0800123/*
124 * Make an educated guess if the TSC is trustworthy and synchronized
125 * over all CPUs.
126 */
127__cpuinit int unsynchronized_tsc(void)
128{
129 if (tsc_unstable)
130 return 1;
131
132#ifdef CONFIG_SMP
133 if (apic_is_clustered_box())
134 return 1;
135#endif
136 /* Most intel systems have synchronized TSCs except for
137 multi node systems */
Thomas Gleixner7ff98472007-07-21 17:10:13 +0200138 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
john stultzc37e7bb2007-02-16 01:28:19 -0800139#ifdef CONFIG_ACPI
140 /* But TSC doesn't tick in C3 so don't use it there */
Thomas Gleixner7ff98472007-07-21 17:10:13 +0200141 if (acpi_gbl_FADT.header.length > 0 &&
142 acpi_gbl_FADT.C3latency < 1000)
john stultzc37e7bb2007-02-16 01:28:19 -0800143 return 1;
144#endif
Thomas Gleixner7ff98472007-07-21 17:10:13 +0200145 return 0;
john stultzc37e7bb2007-02-16 01:28:19 -0800146 }
147
Thomas Gleixner7ff98472007-07-21 17:10:13 +0200148 /* Assume multi socket systems are not synchronized */
149 return num_present_cpus() > 1;
john stultzc37e7bb2007-02-16 01:28:19 -0800150}
151
152int __init notsc_setup(char *s)
153{
154 notsc = 1;
155 return 1;
156}
157
158__setup("notsc", notsc_setup);
john stultz14899392007-02-16 01:28:20 -0800159
160
161/* clock source code: */
162static cycle_t read_tsc(void)
163{
164 cycle_t ret = (cycle_t)get_cycles_sync();
165 return ret;
166}
167
john stultz7460ed22007-02-16 01:28:21 -0800168static cycle_t __vsyscall_fn vread_tsc(void)
169{
170 cycle_t ret = (cycle_t)get_cycles_sync();
171 return ret;
172}
173
john stultz14899392007-02-16 01:28:20 -0800174static struct clocksource clocksource_tsc = {
175 .name = "tsc",
176 .rating = 300,
177 .read = read_tsc,
178 .mask = CLOCKSOURCE_MASK(64),
179 .shift = 22,
180 .flags = CLOCK_SOURCE_IS_CONTINUOUS |
181 CLOCK_SOURCE_MUST_VERIFY,
john stultz7460ed22007-02-16 01:28:21 -0800182 .vread = vread_tsc,
john stultz14899392007-02-16 01:28:20 -0800183};
184
john stultz5a90cf22007-05-02 19:27:08 +0200185void mark_tsc_unstable(char *reason)
john stultz14899392007-02-16 01:28:20 -0800186{
187 if (!tsc_unstable) {
188 tsc_unstable = 1;
john stultz5a90cf22007-05-02 19:27:08 +0200189 printk("Marking TSC unstable due to %s\n", reason);
john stultz14899392007-02-16 01:28:20 -0800190 /* Change only the rating, when not registered */
191 if (clocksource_tsc.mult)
192 clocksource_change_rating(&clocksource_tsc, 0);
193 else
194 clocksource_tsc.rating = 0;
195 }
196}
197EXPORT_SYMBOL_GPL(mark_tsc_unstable);
198
john stultz6bb74df2007-03-05 00:30:50 -0800199void __init init_tsc_clocksource(void)
john stultz14899392007-02-16 01:28:20 -0800200{
201 if (!notsc) {
Joerg Roedel6b37f5a2007-05-02 19:27:06 +0200202 clocksource_tsc.mult = clocksource_khz2mult(tsc_khz,
john stultz14899392007-02-16 01:28:20 -0800203 clocksource_tsc.shift);
204 if (check_tsc_unstable())
205 clocksource_tsc.rating = 0;
206
john stultz6bb74df2007-03-05 00:30:50 -0800207 clocksource_register(&clocksource_tsc);
john stultz14899392007-02-16 01:28:20 -0800208 }
john stultz14899392007-02-16 01:28:20 -0800209}