blob: 82d9d85d52709c2322392a5e33e5768b0c8dc085 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/arch/x86_64/nmi.c
3 *
4 * NMI watchdog support on APIC systems
5 *
6 * Started by Ingo Molnar <mingo@redhat.com>
7 *
8 * Fixes:
9 * Mikael Pettersson : AMD K7 support for local APIC NMI watchdog.
10 * Mikael Pettersson : Power Management for local APIC NMI watchdog.
11 * Pavel Machek and
12 * Mikael Pettersson : PM converted to driver model. Disable/enable API.
13 */
14
Andrew Mortonbb81a092006-12-07 02:14:01 +010015#include <linux/nmi.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/mm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/delay.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/interrupt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/module.h>
20#include <linux/sysdev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/sysctl.h>
Andi Kleeneddb6fb2006-02-03 21:50:41 +010022#include <linux/kprobes.h>
Andrew Mortonbb81a092006-12-07 02:14:01 +010023#include <linux/cpumask.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024
25#include <asm/smp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <asm/nmi.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <asm/proto.h>
28#include <asm/kdebug.h>
Andi Kleen553f2652006-04-07 19:49:57 +020029#include <asm/mce.h>
Venkatesh Pallipadi248dcb22006-09-26 10:52:27 +020030#include <asm/intel_arch_perfmon.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031
Andi Kleen29cbc782006-09-30 01:47:55 +020032int unknown_nmi_panic;
33int nmi_watchdog_enabled;
34int panic_on_unrecovered_nmi;
35
Don Zickus828f0af2006-09-26 10:52:26 +020036/* perfctr_nmi_owner tracks the ownership of the perfctr registers:
37 * evtsel_nmi_owner tracks the ownership of the event selection
38 * - different performance counters/ event selection may be reserved for
39 * different subsystems this reservation system just tries to coordinate
40 * things a little
41 */
42static DEFINE_PER_CPU(unsigned, perfctr_nmi_owner);
43static DEFINE_PER_CPU(unsigned, evntsel_nmi_owner[2]);
44
Andrew Mortonbb81a092006-12-07 02:14:01 +010045static cpumask_t backtrace_mask = CPU_MASK_NONE;
46
Don Zickus828f0af2006-09-26 10:52:26 +020047/* this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's
48 * offset from MSR_P4_BSU_ESCR0. It will be the max for all platforms (for now)
49 */
50#define NMI_MAX_COUNTER_BITS 66
51
Linus Torvalds1da177e2005-04-16 15:20:36 -070052/* nmi_active:
Don Zickusf2802e72006-09-26 10:52:26 +020053 * >0: the lapic NMI watchdog is active, but can be disabled
54 * <0: the lapic NMI watchdog has not been set up, and cannot
Linus Torvalds1da177e2005-04-16 15:20:36 -070055 * be enabled
Don Zickusf2802e72006-09-26 10:52:26 +020056 * 0: the lapic NMI watchdog is disabled, but can be enabled
Linus Torvalds1da177e2005-04-16 15:20:36 -070057 */
Don Zickusf2802e72006-09-26 10:52:26 +020058atomic_t nmi_active = ATOMIC_INIT(0); /* oprofile uses this */
Linus Torvalds1da177e2005-04-16 15:20:36 -070059int panic_on_timeout;
60
61unsigned int nmi_watchdog = NMI_DEFAULT;
62static unsigned int nmi_hz = HZ;
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
Don Zickusf2802e72006-09-26 10:52:26 +020064struct nmi_watchdog_ctlblk {
65 int enabled;
66 u64 check_bit;
67 unsigned int cccr_msr;
68 unsigned int perfctr_msr; /* the MSR to reset in NMI handler */
69 unsigned int evntsel_msr; /* the MSR to select the events to handle */
70};
71static DEFINE_PER_CPU(struct nmi_watchdog_ctlblk, nmi_watchdog_ctlblk);
Linus Torvalds1da177e2005-04-16 15:20:36 -070072
Don Zickusf2802e72006-09-26 10:52:26 +020073/* local prototypes */
Don Zickusf2802e72006-09-26 10:52:26 +020074static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu);
Andi Kleen75152112005-05-16 21:53:34 -070075
Don Zickus828f0af2006-09-26 10:52:26 +020076/* converts an msr to an appropriate reservation bit */
77static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr)
78{
79 /* returns the bit offset of the performance counter register */
80 switch (boot_cpu_data.x86_vendor) {
81 case X86_VENDOR_AMD:
82 return (msr - MSR_K7_PERFCTR0);
83 case X86_VENDOR_INTEL:
Venkatesh Pallipadi248dcb22006-09-26 10:52:27 +020084 if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
85 return (msr - MSR_ARCH_PERFMON_PERFCTR0);
86 else
87 return (msr - MSR_P4_BPU_PERFCTR0);
Don Zickus828f0af2006-09-26 10:52:26 +020088 }
89 return 0;
90}
91
92/* converts an msr to an appropriate reservation bit */
93static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr)
94{
95 /* returns the bit offset of the event selection register */
96 switch (boot_cpu_data.x86_vendor) {
97 case X86_VENDOR_AMD:
98 return (msr - MSR_K7_EVNTSEL0);
99 case X86_VENDOR_INTEL:
Venkatesh Pallipadi248dcb22006-09-26 10:52:27 +0200100 if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
101 return (msr - MSR_ARCH_PERFMON_EVENTSEL0);
102 else
103 return (msr - MSR_P4_BSU_ESCR0);
Don Zickus828f0af2006-09-26 10:52:26 +0200104 }
105 return 0;
106}
107
108/* checks for a bit availability (hack for oprofile) */
109int avail_to_resrv_perfctr_nmi_bit(unsigned int counter)
110{
111 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
112
113 return (!test_bit(counter, &__get_cpu_var(perfctr_nmi_owner)));
114}
115
116/* checks the an msr for availability */
117int avail_to_resrv_perfctr_nmi(unsigned int msr)
118{
119 unsigned int counter;
120
121 counter = nmi_perfctr_msr_to_bit(msr);
122 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
123
124 return (!test_bit(counter, &__get_cpu_var(perfctr_nmi_owner)));
125}
126
127int reserve_perfctr_nmi(unsigned int msr)
128{
129 unsigned int counter;
130
131 counter = nmi_perfctr_msr_to_bit(msr);
132 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
133
134 if (!test_and_set_bit(counter, &__get_cpu_var(perfctr_nmi_owner)))
135 return 1;
136 return 0;
137}
138
139void release_perfctr_nmi(unsigned int msr)
140{
141 unsigned int counter;
142
143 counter = nmi_perfctr_msr_to_bit(msr);
144 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
145
146 clear_bit(counter, &__get_cpu_var(perfctr_nmi_owner));
147}
148
149int reserve_evntsel_nmi(unsigned int msr)
150{
151 unsigned int counter;
152
153 counter = nmi_evntsel_msr_to_bit(msr);
154 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
155
156 if (!test_and_set_bit(counter, &__get_cpu_var(evntsel_nmi_owner)))
157 return 1;
158 return 0;
159}
160
161void release_evntsel_nmi(unsigned int msr)
162{
163 unsigned int counter;
164
165 counter = nmi_evntsel_msr_to_bit(msr);
166 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
167
168 clear_bit(counter, &__get_cpu_var(evntsel_nmi_owner));
169}
170
Ashok Raje6982c62005-06-25 14:54:58 -0700171static __cpuinit inline int nmi_known_cpu(void)
Andi Kleen75152112005-05-16 21:53:34 -0700172{
173 switch (boot_cpu_data.x86_vendor) {
174 case X86_VENDOR_AMD:
Andi Kleen0a4599c2007-02-13 13:26:25 +0100175 return boot_cpu_data.x86 == 15 || boot_cpu_data.x86 == 16;
Andi Kleen75152112005-05-16 21:53:34 -0700176 case X86_VENDOR_INTEL:
Venkatesh Pallipadi248dcb22006-09-26 10:52:27 +0200177 if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
178 return 1;
179 else
180 return (boot_cpu_data.x86 == 15);
Andi Kleen75152112005-05-16 21:53:34 -0700181 }
182 return 0;
183}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184
185/* Run after command line and cpu_init init, but before all other checks */
Don Zickuse33e89a2006-09-26 10:52:27 +0200186void nmi_watchdog_default(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187{
188 if (nmi_watchdog != NMI_DEFAULT)
189 return;
Linus Torvalds8ce5e3e2007-03-14 17:50:48 -0700190 nmi_watchdog = NMI_NONE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191}
192
Ravikiran G Thirumalai92715e22006-12-09 21:33:35 +0100193static int endflag __initdata = 0;
194
Andi Kleen75152112005-05-16 21:53:34 -0700195#ifdef CONFIG_SMP
196/* The performance counters used by NMI_LOCAL_APIC don't trigger when
197 * the CPU is idle. To make sure the NMI watchdog really ticks on all
198 * CPUs during the test make them busy.
199 */
200static __init void nmi_cpu_busy(void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201{
Ingo Molnar366c7f52006-07-03 00:25:25 -0700202 local_irq_enable_in_hardirq();
Andi Kleen75152112005-05-16 21:53:34 -0700203 /* Intentionally don't use cpu_relax here. This is
204 to make sure that the performance counter really ticks,
205 even if there is a simulator or similar that catches the
206 pause instruction. On a real HT machine this is fine because
207 all other CPUs are busy with "useless" delay loops and don't
208 care if they get somewhat less cycles. */
Ravikiran G Thirumalai92715e22006-12-09 21:33:35 +0100209 while (endflag == 0)
210 mb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211}
Andi Kleen75152112005-05-16 21:53:34 -0700212#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213
Venkatesh Pallipadi16761932007-02-13 13:26:22 +0100214static unsigned int adjust_for_32bit_ctr(unsigned int hz)
215{
216 unsigned int retval = hz;
217
218 /*
219 * On Intel CPUs with ARCH_PERFMON only 32 bits in the counter
220 * are writable, with higher bits sign extending from bit 31.
221 * So, we can only program the counter with 31 bit values and
222 * 32nd bit should be 1, for 33.. to be 1.
223 * Find the appropriate nmi_hz
224 */
225 if ((((u64)cpu_khz * 1000) / retval) > 0x7fffffffULL) {
226 retval = ((u64)cpu_khz * 1000) / 0x7fffffffUL + 1;
227 }
228 return retval;
229}
230
Andi Kleen75152112005-05-16 21:53:34 -0700231int __init check_nmi_watchdog (void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232{
Andi Kleenac6b9312005-05-16 21:53:19 -0700233 int *counts;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 int cpu;
235
Don Zickusf2802e72006-09-26 10:52:26 +0200236 if ((nmi_watchdog == NMI_NONE) || (nmi_watchdog == NMI_DEFAULT))
237 return 0;
238
239 if (!atomic_read(&nmi_active))
240 return 0;
241
Andi Kleen75152112005-05-16 21:53:34 -0700242 counts = kmalloc(NR_CPUS * sizeof(int), GFP_KERNEL);
243 if (!counts)
244 return -1;
Jack F Vogel67701ae2005-05-01 08:58:48 -0700245
Andi Kleen75152112005-05-16 21:53:34 -0700246 printk(KERN_INFO "testing NMI watchdog ... ");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247
Andi Kleen7554c3f2006-01-11 22:45:45 +0100248#ifdef CONFIG_SMP
Andi Kleen75152112005-05-16 21:53:34 -0700249 if (nmi_watchdog == NMI_LOCAL_APIC)
250 smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0);
Andi Kleen7554c3f2006-01-11 22:45:45 +0100251#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252
253 for (cpu = 0; cpu < NR_CPUS; cpu++)
Ravikiran G Thirumalaidf79efd2006-01-11 22:45:39 +0100254 counts[cpu] = cpu_pda(cpu)->__nmi_count;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255 local_irq_enable();
256 mdelay((10*1000)/nmi_hz); // wait 10 ticks
257
Andrew Morton394e3902006-03-23 03:01:05 -0800258 for_each_online_cpu(cpu) {
Don Zickusf2802e72006-09-26 10:52:26 +0200259 if (!per_cpu(nmi_watchdog_ctlblk, cpu).enabled)
260 continue;
Ravikiran G Thirumalaidf79efd2006-01-11 22:45:39 +0100261 if (cpu_pda(cpu)->__nmi_count - counts[cpu] <= 5) {
Andi Kleen75152112005-05-16 21:53:34 -0700262 printk("CPU#%d: NMI appears to be stuck (%d->%d)!\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263 cpu,
Andi Kleen75152112005-05-16 21:53:34 -0700264 counts[cpu],
Ravikiran G Thirumalaidf79efd2006-01-11 22:45:39 +0100265 cpu_pda(cpu)->__nmi_count);
Don Zickusf2802e72006-09-26 10:52:26 +0200266 per_cpu(nmi_watchdog_ctlblk, cpu).enabled = 0;
267 atomic_dec(&nmi_active);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268 }
269 }
Don Zickusf2802e72006-09-26 10:52:26 +0200270 if (!atomic_read(&nmi_active)) {
271 kfree(counts);
272 atomic_set(&nmi_active, -1);
Ravikiran G Thirumalai92715e22006-12-09 21:33:35 +0100273 endflag = 1;
Don Zickusf2802e72006-09-26 10:52:26 +0200274 return -1;
275 }
Andi Kleen75152112005-05-16 21:53:34 -0700276 endflag = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277 printk("OK.\n");
278
279 /* now that we know it works we can reduce NMI frequency to
280 something more reasonable; makes a difference in some configs */
Venkatesh Pallipadi248dcb22006-09-26 10:52:27 +0200281 if (nmi_watchdog == NMI_LOCAL_APIC) {
282 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
283
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284 nmi_hz = 1;
Venkatesh Pallipadi16761932007-02-13 13:26:22 +0100285 if (wd->perfctr_msr == MSR_ARCH_PERFMON_PERFCTR0)
286 nmi_hz = adjust_for_32bit_ctr(nmi_hz);
Venkatesh Pallipadi248dcb22006-09-26 10:52:27 +0200287 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288
Andi Kleenac6b9312005-05-16 21:53:19 -0700289 kfree(counts);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290 return 0;
291}
292
293int __init setup_nmi_watchdog(char *str)
294{
295 int nmi;
296
297 if (!strncmp(str,"panic",5)) {
298 panic_on_timeout = 1;
299 str = strchr(str, ',');
300 if (!str)
301 return 1;
302 ++str;
303 }
304
305 get_option(&str, &nmi);
306
Don Zickusf2802e72006-09-26 10:52:26 +0200307 if ((nmi >= NMI_INVALID) || (nmi < NMI_NONE))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308 return 0;
Don Zickusf2802e72006-09-26 10:52:26 +0200309
Andi Kleen75152112005-05-16 21:53:34 -0700310 nmi_watchdog = nmi;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311 return 1;
312}
313
314__setup("nmi_watchdog=", setup_nmi_watchdog);
315
316static void disable_lapic_nmi_watchdog(void)
317{
Don Zickusf2802e72006-09-26 10:52:26 +0200318 BUG_ON(nmi_watchdog != NMI_LOCAL_APIC);
319
320 if (atomic_read(&nmi_active) <= 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321 return;
Don Zickusf2802e72006-09-26 10:52:26 +0200322
323 on_each_cpu(stop_apic_nmi_watchdog, NULL, 0, 1);
324
325 BUG_ON(atomic_read(&nmi_active) != 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326}
327
328static void enable_lapic_nmi_watchdog(void)
329{
Don Zickusf2802e72006-09-26 10:52:26 +0200330 BUG_ON(nmi_watchdog != NMI_LOCAL_APIC);
331
332 /* are we already enabled */
333 if (atomic_read(&nmi_active) != 0)
334 return;
335
336 /* are we lapic aware */
337 if (nmi_known_cpu() <= 0)
338 return;
339
340 on_each_cpu(setup_apic_nmi_watchdog, NULL, 0, 1);
341 touch_nmi_watchdog();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342}
343
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344void disable_timer_nmi_watchdog(void)
345{
Don Zickusf2802e72006-09-26 10:52:26 +0200346 BUG_ON(nmi_watchdog != NMI_IO_APIC);
347
348 if (atomic_read(&nmi_active) <= 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349 return;
350
351 disable_irq(0);
Don Zickusf2802e72006-09-26 10:52:26 +0200352 on_each_cpu(stop_apic_nmi_watchdog, NULL, 0, 1);
353
354 BUG_ON(atomic_read(&nmi_active) != 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355}
356
357void enable_timer_nmi_watchdog(void)
358{
Don Zickusf2802e72006-09-26 10:52:26 +0200359 BUG_ON(nmi_watchdog != NMI_IO_APIC);
360
361 if (atomic_read(&nmi_active) == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362 touch_nmi_watchdog();
Don Zickusf2802e72006-09-26 10:52:26 +0200363 on_each_cpu(setup_apic_nmi_watchdog, NULL, 0, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364 enable_irq(0);
365 }
366}
367
Ingo Molnar5d0e6002007-02-13 13:26:24 +0100368static void __acpi_nmi_disable(void *__unused)
369{
370 apic_write(APIC_LVT0, APIC_DM_NMI | APIC_LVT_MASKED);
371}
372
373/*
374 * Disable timer based NMIs on all CPUs:
375 */
376void acpi_nmi_disable(void)
377{
378 if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC)
379 on_each_cpu(__acpi_nmi_disable, NULL, 0, 1);
380}
381
382static void __acpi_nmi_enable(void *__unused)
383{
384 apic_write(APIC_LVT0, APIC_DM_NMI);
385}
386
387/*
388 * Enable timer based NMIs on all CPUs:
389 */
390void acpi_nmi_enable(void)
391{
392 if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC)
393 on_each_cpu(__acpi_nmi_enable, NULL, 0, 1);
394}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395#ifdef CONFIG_PM
396
397static int nmi_pm_active; /* nmi_active before suspend */
398
Pavel Machek829ca9a2005-09-03 15:56:56 -0700399static int lapic_nmi_suspend(struct sys_device *dev, pm_message_t state)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400{
Shaohua Li4038f902006-09-26 10:52:27 +0200401 /* only CPU0 goes here, other CPUs should be offline */
Don Zickusf2802e72006-09-26 10:52:26 +0200402 nmi_pm_active = atomic_read(&nmi_active);
Shaohua Li4038f902006-09-26 10:52:27 +0200403 stop_apic_nmi_watchdog(NULL);
404 BUG_ON(atomic_read(&nmi_active) != 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405 return 0;
406}
407
408static int lapic_nmi_resume(struct sys_device *dev)
409{
Shaohua Li4038f902006-09-26 10:52:27 +0200410 /* only CPU0 goes here, other CPUs should be offline */
411 if (nmi_pm_active > 0) {
412 setup_apic_nmi_watchdog(NULL);
413 touch_nmi_watchdog();
414 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415 return 0;
416}
417
418static struct sysdev_class nmi_sysclass = {
419 set_kset_name("lapic_nmi"),
420 .resume = lapic_nmi_resume,
421 .suspend = lapic_nmi_suspend,
422};
423
424static struct sys_device device_lapic_nmi = {
425 .id = 0,
426 .cls = &nmi_sysclass,
427};
428
429static int __init init_lapic_nmi_sysfs(void)
430{
431 int error;
432
Don Zickusf2802e72006-09-26 10:52:26 +0200433 /* should really be a BUG_ON but b/c this is an
434 * init call, it just doesn't work. -dcz
435 */
436 if (nmi_watchdog != NMI_LOCAL_APIC)
437 return 0;
438
439 if ( atomic_read(&nmi_active) < 0 )
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440 return 0;
441
442 error = sysdev_class_register(&nmi_sysclass);
443 if (!error)
444 error = sysdev_register(&device_lapic_nmi);
445 return error;
446}
447/* must come after the local APIC's device_initcall() */
448late_initcall(init_lapic_nmi_sysfs);
449
450#endif /* CONFIG_PM */
451
Don Zickusf2802e72006-09-26 10:52:26 +0200452/*
453 * Activate the NMI watchdog via the local APIC.
454 * Original code written by Keith Owens.
455 */
456
457/* Note that these events don't tick when the CPU idles. This means
458 the frequency varies with CPU load. */
459
460#define K7_EVNTSEL_ENABLE (1 << 22)
461#define K7_EVNTSEL_INT (1 << 20)
462#define K7_EVNTSEL_OS (1 << 17)
463#define K7_EVNTSEL_USR (1 << 16)
464#define K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING 0x76
465#define K7_NMI_EVENT K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING
466
Don Zickus828f0af2006-09-26 10:52:26 +0200467static int setup_k7_watchdog(void)
Andi Kleen75152112005-05-16 21:53:34 -0700468{
Don Zickusf2802e72006-09-26 10:52:26 +0200469 unsigned int perfctr_msr, evntsel_msr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470 unsigned int evntsel;
Don Zickusf2802e72006-09-26 10:52:26 +0200471 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472
Don Zickusf2802e72006-09-26 10:52:26 +0200473 perfctr_msr = MSR_K7_PERFCTR0;
474 evntsel_msr = MSR_K7_EVNTSEL0;
475 if (!reserve_perfctr_nmi(perfctr_msr))
Don Zickus828f0af2006-09-26 10:52:26 +0200476 goto fail;
477
Don Zickusf2802e72006-09-26 10:52:26 +0200478 if (!reserve_evntsel_nmi(evntsel_msr))
Don Zickus828f0af2006-09-26 10:52:26 +0200479 goto fail1;
480
481 /* Simulator may not support it */
Don Zickusf2802e72006-09-26 10:52:26 +0200482 if (checking_wrmsrl(evntsel_msr, 0UL))
Don Zickus828f0af2006-09-26 10:52:26 +0200483 goto fail2;
Don Zickusf2802e72006-09-26 10:52:26 +0200484 wrmsrl(perfctr_msr, 0UL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485
486 evntsel = K7_EVNTSEL_INT
487 | K7_EVNTSEL_OS
488 | K7_EVNTSEL_USR
489 | K7_NMI_EVENT;
490
Don Zickusf2802e72006-09-26 10:52:26 +0200491 /* setup the timer */
492 wrmsr(evntsel_msr, evntsel, 0);
493 wrmsrl(perfctr_msr, -((u64)cpu_khz * 1000 / nmi_hz));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494 apic_write(APIC_LVTPC, APIC_DM_NMI);
495 evntsel |= K7_EVNTSEL_ENABLE;
Don Zickusf2802e72006-09-26 10:52:26 +0200496 wrmsr(evntsel_msr, evntsel, 0);
497
498 wd->perfctr_msr = perfctr_msr;
499 wd->evntsel_msr = evntsel_msr;
500 wd->cccr_msr = 0; //unused
501 wd->check_bit = 1ULL<<63;
Don Zickus828f0af2006-09-26 10:52:26 +0200502 return 1;
503fail2:
Don Zickusf2802e72006-09-26 10:52:26 +0200504 release_evntsel_nmi(evntsel_msr);
Don Zickus828f0af2006-09-26 10:52:26 +0200505fail1:
Don Zickusf2802e72006-09-26 10:52:26 +0200506 release_perfctr_nmi(perfctr_msr);
Don Zickus828f0af2006-09-26 10:52:26 +0200507fail:
508 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509}
510
Don Zickusf2802e72006-09-26 10:52:26 +0200511static void stop_k7_watchdog(void)
512{
513 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
514
515 wrmsr(wd->evntsel_msr, 0, 0);
516
517 release_evntsel_nmi(wd->evntsel_msr);
518 release_perfctr_nmi(wd->perfctr_msr);
519}
520
521/* Note that these events don't tick when the CPU idles. This means
522 the frequency varies with CPU load. */
523
524#define MSR_P4_MISC_ENABLE_PERF_AVAIL (1<<7)
525#define P4_ESCR_EVENT_SELECT(N) ((N)<<25)
526#define P4_ESCR_OS (1<<3)
527#define P4_ESCR_USR (1<<2)
528#define P4_CCCR_OVF_PMI0 (1<<26)
529#define P4_CCCR_OVF_PMI1 (1<<27)
530#define P4_CCCR_THRESHOLD(N) ((N)<<20)
531#define P4_CCCR_COMPLEMENT (1<<19)
532#define P4_CCCR_COMPARE (1<<18)
533#define P4_CCCR_REQUIRED (3<<16)
534#define P4_CCCR_ESCR_SELECT(N) ((N)<<13)
535#define P4_CCCR_ENABLE (1<<12)
536#define P4_CCCR_OVF (1<<31)
537/* Set up IQ_COUNTER0 to behave like a clock, by having IQ_CCCR0 filter
538 CRU_ESCR0 (with any non-null event selector) through a complemented
539 max threshold. [IA32-Vol3, Section 14.9.9] */
Andi Kleen75152112005-05-16 21:53:34 -0700540
541static int setup_p4_watchdog(void)
542{
Don Zickusf2802e72006-09-26 10:52:26 +0200543 unsigned int perfctr_msr, evntsel_msr, cccr_msr;
544 unsigned int evntsel, cccr_val;
Andi Kleen75152112005-05-16 21:53:34 -0700545 unsigned int misc_enable, dummy;
Don Zickusf2802e72006-09-26 10:52:26 +0200546 unsigned int ht_num;
547 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
Andi Kleen75152112005-05-16 21:53:34 -0700548
Don Zickusf2802e72006-09-26 10:52:26 +0200549 rdmsr(MSR_IA32_MISC_ENABLE, misc_enable, dummy);
Andi Kleen75152112005-05-16 21:53:34 -0700550 if (!(misc_enable & MSR_P4_MISC_ENABLE_PERF_AVAIL))
551 return 0;
552
Andi Kleen75152112005-05-16 21:53:34 -0700553#ifdef CONFIG_SMP
Don Zickusf2802e72006-09-26 10:52:26 +0200554 /* detect which hyperthread we are on */
555 if (smp_num_siblings == 2) {
556 unsigned int ebx, apicid;
Andi Kleen75152112005-05-16 21:53:34 -0700557
Don Zickusf2802e72006-09-26 10:52:26 +0200558 ebx = cpuid_ebx(1);
559 apicid = (ebx >> 24) & 0xff;
560 ht_num = apicid & 1;
561 } else
562#endif
563 ht_num = 0;
564
565 /* performance counters are shared resources
566 * assign each hyperthread its own set
567 * (re-use the ESCR0 register, seems safe
568 * and keeps the cccr_val the same)
569 */
570 if (!ht_num) {
571 /* logical cpu 0 */
572 perfctr_msr = MSR_P4_IQ_PERFCTR0;
573 evntsel_msr = MSR_P4_CRU_ESCR0;
574 cccr_msr = MSR_P4_IQ_CCCR0;
575 cccr_val = P4_CCCR_OVF_PMI0 | P4_CCCR_ESCR_SELECT(4);
576 } else {
577 /* logical cpu 1 */
578 perfctr_msr = MSR_P4_IQ_PERFCTR1;
579 evntsel_msr = MSR_P4_CRU_ESCR0;
580 cccr_msr = MSR_P4_IQ_CCCR1;
581 cccr_val = P4_CCCR_OVF_PMI1 | P4_CCCR_ESCR_SELECT(4);
582 }
583
584 if (!reserve_perfctr_nmi(perfctr_msr))
Don Zickus828f0af2006-09-26 10:52:26 +0200585 goto fail;
586
Don Zickusf2802e72006-09-26 10:52:26 +0200587 if (!reserve_evntsel_nmi(evntsel_msr))
Don Zickus828f0af2006-09-26 10:52:26 +0200588 goto fail1;
Andi Kleen75152112005-05-16 21:53:34 -0700589
Don Zickusf2802e72006-09-26 10:52:26 +0200590 evntsel = P4_ESCR_EVENT_SELECT(0x3F)
591 | P4_ESCR_OS
592 | P4_ESCR_USR;
593
594 cccr_val |= P4_CCCR_THRESHOLD(15)
595 | P4_CCCR_COMPLEMENT
596 | P4_CCCR_COMPARE
597 | P4_CCCR_REQUIRED;
598
599 wrmsr(evntsel_msr, evntsel, 0);
600 wrmsr(cccr_msr, cccr_val, 0);
601 wrmsrl(perfctr_msr, -((u64)cpu_khz * 1000 / nmi_hz));
Andi Kleen75152112005-05-16 21:53:34 -0700602 apic_write(APIC_LVTPC, APIC_DM_NMI);
Don Zickusf2802e72006-09-26 10:52:26 +0200603 cccr_val |= P4_CCCR_ENABLE;
604 wrmsr(cccr_msr, cccr_val, 0);
605
606 wd->perfctr_msr = perfctr_msr;
607 wd->evntsel_msr = evntsel_msr;
608 wd->cccr_msr = cccr_msr;
609 wd->check_bit = 1ULL<<39;
Andi Kleen75152112005-05-16 21:53:34 -0700610 return 1;
Don Zickus828f0af2006-09-26 10:52:26 +0200611fail1:
Don Zickusf2802e72006-09-26 10:52:26 +0200612 release_perfctr_nmi(perfctr_msr);
Don Zickus828f0af2006-09-26 10:52:26 +0200613fail:
614 return 0;
Andi Kleen75152112005-05-16 21:53:34 -0700615}
616
Don Zickusf2802e72006-09-26 10:52:26 +0200617static void stop_p4_watchdog(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618{
Don Zickusf2802e72006-09-26 10:52:26 +0200619 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
Andi Kleen75152112005-05-16 21:53:34 -0700620
Don Zickusf2802e72006-09-26 10:52:26 +0200621 wrmsr(wd->cccr_msr, 0, 0);
622 wrmsr(wd->evntsel_msr, 0, 0);
623
624 release_evntsel_nmi(wd->evntsel_msr);
625 release_perfctr_nmi(wd->perfctr_msr);
626}
627
Venkatesh Pallipadi248dcb22006-09-26 10:52:27 +0200628#define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
629#define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
630
631static int setup_intel_arch_watchdog(void)
632{
633 unsigned int ebx;
634 union cpuid10_eax eax;
635 unsigned int unused;
636 unsigned int perfctr_msr, evntsel_msr;
637 unsigned int evntsel;
638 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
639
640 /*
641 * Check whether the Architectural PerfMon supports
642 * Unhalted Core Cycles Event or not.
643 * NOTE: Corresponding bit = 0 in ebx indicates event present.
644 */
645 cpuid(10, &(eax.full), &ebx, &unused, &unused);
646 if ((eax.split.mask_length < (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX+1)) ||
647 (ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT))
648 goto fail;
649
650 perfctr_msr = MSR_ARCH_PERFMON_PERFCTR0;
651 evntsel_msr = MSR_ARCH_PERFMON_EVENTSEL0;
652
653 if (!reserve_perfctr_nmi(perfctr_msr))
654 goto fail;
655
656 if (!reserve_evntsel_nmi(evntsel_msr))
657 goto fail1;
658
659 wrmsrl(perfctr_msr, 0UL);
660
661 evntsel = ARCH_PERFMON_EVENTSEL_INT
662 | ARCH_PERFMON_EVENTSEL_OS
663 | ARCH_PERFMON_EVENTSEL_USR
664 | ARCH_PERFMON_NMI_EVENT_SEL
665 | ARCH_PERFMON_NMI_EVENT_UMASK;
666
667 /* setup the timer */
668 wrmsr(evntsel_msr, evntsel, 0);
Venkatesh Pallipadi16761932007-02-13 13:26:22 +0100669
670 nmi_hz = adjust_for_32bit_ctr(nmi_hz);
671 wrmsr(perfctr_msr, (u32)(-((u64)cpu_khz * 1000 / nmi_hz)), 0);
Venkatesh Pallipadi248dcb22006-09-26 10:52:27 +0200672
673 apic_write(APIC_LVTPC, APIC_DM_NMI);
674 evntsel |= ARCH_PERFMON_EVENTSEL0_ENABLE;
675 wrmsr(evntsel_msr, evntsel, 0);
676
677 wd->perfctr_msr = perfctr_msr;
678 wd->evntsel_msr = evntsel_msr;
679 wd->cccr_msr = 0; //unused
680 wd->check_bit = 1ULL << (eax.split.bit_width - 1);
681 return 1;
682fail1:
683 release_perfctr_nmi(perfctr_msr);
684fail:
685 return 0;
686}
687
688static void stop_intel_arch_watchdog(void)
689{
690 unsigned int ebx;
691 union cpuid10_eax eax;
692 unsigned int unused;
693 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
694
695 /*
696 * Check whether the Architectural PerfMon supports
697 * Unhalted Core Cycles Event or not.
698 * NOTE: Corresponding bit = 0 in ebx indicates event present.
699 */
700 cpuid(10, &(eax.full), &ebx, &unused, &unused);
701 if ((eax.split.mask_length < (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX+1)) ||
702 (ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT))
703 return;
704
705 wrmsr(wd->evntsel_msr, 0, 0);
706
707 release_evntsel_nmi(wd->evntsel_msr);
708 release_perfctr_nmi(wd->perfctr_msr);
709}
710
Don Zickusf2802e72006-09-26 10:52:26 +0200711void setup_apic_nmi_watchdog(void *unused)
712{
Shaohua Li4038f902006-09-26 10:52:27 +0200713 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
714
Don Zickusf2802e72006-09-26 10:52:26 +0200715 /* only support LOCAL and IO APICs for now */
716 if ((nmi_watchdog != NMI_LOCAL_APIC) &&
717 (nmi_watchdog != NMI_IO_APIC))
718 return;
719
Shaohua Li4038f902006-09-26 10:52:27 +0200720 if (wd->enabled == 1)
721 return;
722
723 /* cheap hack to support suspend/resume */
724 /* if cpu0 is not active neither should the other cpus */
725 if ((smp_processor_id() != 0) && (atomic_read(&nmi_active) <= 0))
726 return;
727
Don Zickusf2802e72006-09-26 10:52:26 +0200728 if (nmi_watchdog == NMI_LOCAL_APIC) {
729 switch (boot_cpu_data.x86_vendor) {
730 case X86_VENDOR_AMD:
731 if (strstr(boot_cpu_data.x86_model_id, "Screwdriver"))
732 return;
733 if (!setup_k7_watchdog())
734 return;
735 break;
736 case X86_VENDOR_INTEL:
Venkatesh Pallipadi248dcb22006-09-26 10:52:27 +0200737 if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
738 if (!setup_intel_arch_watchdog())
739 return;
740 break;
741 }
Don Zickusf2802e72006-09-26 10:52:26 +0200742 if (!setup_p4_watchdog())
743 return;
744 break;
745 default:
746 return;
747 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700748 }
Shaohua Li4038f902006-09-26 10:52:27 +0200749 wd->enabled = 1;
Don Zickusf2802e72006-09-26 10:52:26 +0200750 atomic_inc(&nmi_active);
751}
752
Shaohua Li4038f902006-09-26 10:52:27 +0200753void stop_apic_nmi_watchdog(void *unused)
Don Zickusf2802e72006-09-26 10:52:26 +0200754{
Shaohua Li4038f902006-09-26 10:52:27 +0200755 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
756
Don Zickusf2802e72006-09-26 10:52:26 +0200757 /* only support LOCAL and IO APICs for now */
758 if ((nmi_watchdog != NMI_LOCAL_APIC) &&
759 (nmi_watchdog != NMI_IO_APIC))
760 return;
761
Shaohua Li4038f902006-09-26 10:52:27 +0200762 if (wd->enabled == 0)
763 return;
764
Don Zickusf2802e72006-09-26 10:52:26 +0200765 if (nmi_watchdog == NMI_LOCAL_APIC) {
766 switch (boot_cpu_data.x86_vendor) {
767 case X86_VENDOR_AMD:
768 if (strstr(boot_cpu_data.x86_model_id, "Screwdriver"))
769 return;
770 stop_k7_watchdog();
771 break;
772 case X86_VENDOR_INTEL:
Venkatesh Pallipadi248dcb22006-09-26 10:52:27 +0200773 if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
774 stop_intel_arch_watchdog();
775 break;
776 }
Don Zickusf2802e72006-09-26 10:52:26 +0200777 stop_p4_watchdog();
778 break;
779 default:
780 return;
781 }
782 }
Shaohua Li4038f902006-09-26 10:52:27 +0200783 wd->enabled = 0;
Don Zickusf2802e72006-09-26 10:52:26 +0200784 atomic_dec(&nmi_active);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700785}
786
787/*
788 * the best way to detect whether a CPU has a 'hard lockup' problem
789 * is to check it's local APIC timer IRQ counts. If they are not
790 * changing then that CPU has some problem.
791 *
792 * as these watchdog NMI IRQs are generated on every CPU, we only
793 * have to check the current processor.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700794 */
795
Andi Kleen75152112005-05-16 21:53:34 -0700796static DEFINE_PER_CPU(unsigned, last_irq_sum);
797static DEFINE_PER_CPU(local_t, alert_counter);
798static DEFINE_PER_CPU(int, nmi_touch);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700799
800void touch_nmi_watchdog (void)
801{
Jan Beulich99019e92006-02-16 23:41:55 +0100802 if (nmi_watchdog > 0) {
803 unsigned cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804
Jan Beulich99019e92006-02-16 23:41:55 +0100805 /*
806 * Tell other CPUs to reset their alert counters. We cannot
807 * do it ourselves because the alert count increase is not
808 * atomic.
809 */
810 for_each_present_cpu (cpu)
811 per_cpu(nmi_touch, cpu) = 1;
812 }
Ingo Molnar8446f1d2005-09-06 15:16:27 -0700813
814 touch_softlockup_watchdog();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700815}
816
Don Zickus3adbbcce2006-09-26 10:52:26 +0200817int __kprobes nmi_watchdog_tick(struct pt_regs * regs, unsigned reason)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700818{
Andi Kleen75152112005-05-16 21:53:34 -0700819 int sum;
820 int touched = 0;
Andrew Mortonbb81a092006-12-07 02:14:01 +0100821 int cpu = smp_processor_id();
Don Zickusf2802e72006-09-26 10:52:26 +0200822 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
823 u64 dummy;
Don Zickus3adbbcce2006-09-26 10:52:26 +0200824 int rc=0;
Don Zickusf2802e72006-09-26 10:52:26 +0200825
826 /* check for other users first */
827 if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT)
828 == NOTIFY_STOP) {
Don Zickus3adbbcce2006-09-26 10:52:26 +0200829 rc = 1;
Don Zickusf2802e72006-09-26 10:52:26 +0200830 touched = 1;
831 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700832
Linus Torvalds1da177e2005-04-16 15:20:36 -0700833 sum = read_pda(apic_timer_irqs);
Andi Kleen75152112005-05-16 21:53:34 -0700834 if (__get_cpu_var(nmi_touch)) {
835 __get_cpu_var(nmi_touch) = 0;
836 touched = 1;
837 }
Don Zickusf2802e72006-09-26 10:52:26 +0200838
Andrew Mortonbb81a092006-12-07 02:14:01 +0100839 if (cpu_isset(cpu, backtrace_mask)) {
840 static DEFINE_SPINLOCK(lock); /* Serialise the printks */
841
842 spin_lock(&lock);
843 printk("NMI backtrace for cpu %d\n", cpu);
844 dump_stack();
845 spin_unlock(&lock);
846 cpu_clear(cpu, backtrace_mask);
847 }
848
Andi Kleen553f2652006-04-07 19:49:57 +0200849#ifdef CONFIG_X86_MCE
850 /* Could check oops_in_progress here too, but it's safer
851 not too */
852 if (atomic_read(&mce_entry) > 0)
853 touched = 1;
854#endif
Don Zickusf2802e72006-09-26 10:52:26 +0200855 /* if the apic timer isn't firing, this cpu isn't doing much */
Andi Kleen75152112005-05-16 21:53:34 -0700856 if (!touched && __get_cpu_var(last_irq_sum) == sum) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857 /*
858 * Ayiee, looks like this CPU is stuck ...
859 * wait a few IRQs (5 seconds) before doing the oops ...
860 */
Andi Kleen75152112005-05-16 21:53:34 -0700861 local_inc(&__get_cpu_var(alert_counter));
Don Zickusf2802e72006-09-26 10:52:26 +0200862 if (local_read(&__get_cpu_var(alert_counter)) == 5*nmi_hz)
Andi Kleenfac58552006-09-26 10:52:27 +0200863 die_nmi("NMI Watchdog detected LOCKUP on CPU %d\n", regs,
864 panic_on_timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700865 } else {
Andi Kleen75152112005-05-16 21:53:34 -0700866 __get_cpu_var(last_irq_sum) = sum;
867 local_set(&__get_cpu_var(alert_counter), 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700868 }
Don Zickusf2802e72006-09-26 10:52:26 +0200869
870 /* see if the nmi watchdog went off */
871 if (wd->enabled) {
872 if (nmi_watchdog == NMI_LOCAL_APIC) {
873 rdmsrl(wd->perfctr_msr, dummy);
874 if (dummy & wd->check_bit){
875 /* this wasn't a watchdog timer interrupt */
876 goto done;
877 }
878
879 /* only Intel uses the cccr msr */
880 if (wd->cccr_msr != 0) {
881 /*
882 * P4 quirks:
883 * - An overflown perfctr will assert its interrupt
884 * until the OVF flag in its CCCR is cleared.
885 * - LVTPC is masked on interrupt and must be
886 * unmasked by the LVTPC handler.
887 */
888 rdmsrl(wd->cccr_msr, dummy);
889 dummy &= ~P4_CCCR_OVF;
890 wrmsrl(wd->cccr_msr, dummy);
891 apic_write(APIC_LVTPC, APIC_DM_NMI);
Venkatesh Pallipadi16761932007-02-13 13:26:22 +0100892 /* start the cycle over again */
893 wrmsrl(wd->perfctr_msr,
894 -((u64)cpu_khz * 1000 / nmi_hz));
Venkatesh Pallipadi248dcb22006-09-26 10:52:27 +0200895 } else if (wd->perfctr_msr == MSR_ARCH_PERFMON_PERFCTR0) {
896 /*
897 * ArchPerfom/Core Duo needs to re-unmask
898 * the apic vector
899 */
900 apic_write(APIC_LVTPC, APIC_DM_NMI);
Venkatesh Pallipadi16761932007-02-13 13:26:22 +0100901 /* ARCH_PERFMON has 32 bit counter writes */
902 wrmsr(wd->perfctr_msr,
903 (u32)(-((u64)cpu_khz * 1000 / nmi_hz)), 0);
904 } else {
905 /* start the cycle over again */
906 wrmsrl(wd->perfctr_msr,
907 -((u64)cpu_khz * 1000 / nmi_hz));
Venkatesh Pallipadi248dcb22006-09-26 10:52:27 +0200908 }
Don Zickus3adbbcce2006-09-26 10:52:26 +0200909 rc = 1;
910 } else if (nmi_watchdog == NMI_IO_APIC) {
911 /* don't know how to accurately check for this.
912 * just assume it was a watchdog timer interrupt
913 * This matches the old behaviour.
914 */
915 rc = 1;
916 } else
917 printk(KERN_WARNING "Unknown enabled NMI hardware?!\n");
Andi Kleen75152112005-05-16 21:53:34 -0700918 }
Don Zickusf2802e72006-09-26 10:52:26 +0200919done:
Don Zickus3adbbcce2006-09-26 10:52:26 +0200920 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700921}
922
Andi Kleeneddb6fb2006-02-03 21:50:41 +0100923asmlinkage __kprobes void do_nmi(struct pt_regs * regs, long error_code)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700924{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700925 nmi_enter();
926 add_pda(__nmi_count,1);
Don Zickus3adbbcce2006-09-26 10:52:26 +0200927 default_do_nmi(regs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700928 nmi_exit();
929}
930
Don Zickus3adbbcce2006-09-26 10:52:26 +0200931int do_nmi_callback(struct pt_regs * regs, int cpu)
932{
Don Zickus2fbe7b22006-09-26 10:52:27 +0200933#ifdef CONFIG_SYSCTL
934 if (unknown_nmi_panic)
935 return unknown_nmi_panic_callback(regs, cpu);
936#endif
937 return 0;
Don Zickus3adbbcce2006-09-26 10:52:26 +0200938}
939
Linus Torvalds1da177e2005-04-16 15:20:36 -0700940#ifdef CONFIG_SYSCTL
941
942static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu)
943{
944 unsigned char reason = get_nmi_reason();
945 char buf[64];
946
Don Zickus2fbe7b22006-09-26 10:52:27 +0200947 sprintf(buf, "NMI received for unknown reason %02x\n", reason);
Andi Kleenfac58552006-09-26 10:52:27 +0200948 die_nmi(buf, regs, 1); /* Always panic here */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700949 return 0;
950}
951
Don Zickus407984f2006-09-26 10:52:27 +0200952/*
953 * proc handler for /proc/sys/kernel/nmi
954 */
955int proc_nmi_enabled(struct ctl_table *table, int write, struct file *file,
956 void __user *buffer, size_t *length, loff_t *ppos)
957{
958 int old_state;
959
960 nmi_watchdog_enabled = (atomic_read(&nmi_active) > 0) ? 1 : 0;
961 old_state = nmi_watchdog_enabled;
962 proc_dointvec(table, write, file, buffer, length, ppos);
963 if (!!old_state == !!nmi_watchdog_enabled)
964 return 0;
965
966 if (atomic_read(&nmi_active) < 0) {
967 printk( KERN_WARNING "NMI watchdog is permanently disabled\n");
Don Zickuse33e89a2006-09-26 10:52:27 +0200968 return -EIO;
Don Zickus407984f2006-09-26 10:52:27 +0200969 }
970
971 /* if nmi_watchdog is not set yet, then set it */
972 nmi_watchdog_default();
973
Don Zickuse33e89a2006-09-26 10:52:27 +0200974 if (nmi_watchdog == NMI_LOCAL_APIC) {
Don Zickus407984f2006-09-26 10:52:27 +0200975 if (nmi_watchdog_enabled)
976 enable_lapic_nmi_watchdog();
977 else
978 disable_lapic_nmi_watchdog();
Don Zickus407984f2006-09-26 10:52:27 +0200979 } else {
Don Zickuse33e89a2006-09-26 10:52:27 +0200980 printk( KERN_WARNING
Don Zickus407984f2006-09-26 10:52:27 +0200981 "NMI watchdog doesn't know what hardware to touch\n");
982 return -EIO;
983 }
984 return 0;
985}
986
Linus Torvalds1da177e2005-04-16 15:20:36 -0700987#endif
988
Andrew Mortonbb81a092006-12-07 02:14:01 +0100989void __trigger_all_cpu_backtrace(void)
990{
991 int i;
992
993 backtrace_mask = cpu_online_map;
994 /* Wait for up to 10 seconds for all CPUs to do the backtrace */
995 for (i = 0; i < 10 * 1000; i++) {
996 if (cpus_empty(backtrace_mask))
997 break;
998 mdelay(1);
999 }
1000}
1001
Linus Torvalds1da177e2005-04-16 15:20:36 -07001002EXPORT_SYMBOL(nmi_active);
1003EXPORT_SYMBOL(nmi_watchdog);
Don Zickus828f0af2006-09-26 10:52:26 +02001004EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi);
1005EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit);
1006EXPORT_SYMBOL(reserve_perfctr_nmi);
1007EXPORT_SYMBOL(release_perfctr_nmi);
1008EXPORT_SYMBOL(reserve_evntsel_nmi);
1009EXPORT_SYMBOL(release_evntsel_nmi);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001010EXPORT_SYMBOL(disable_timer_nmi_watchdog);
1011EXPORT_SYMBOL(enable_timer_nmi_watchdog);
1012EXPORT_SYMBOL(touch_nmi_watchdog);