Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | * NMI watchdog support on APIC systems |
| 3 | * |
| 4 | * Started by Ingo Molnar <mingo@redhat.com> |
| 5 | * |
| 6 | * Fixes: |
| 7 | * Mikael Pettersson : AMD K7 support for local APIC NMI watchdog. |
| 8 | * Mikael Pettersson : Power Management for local APIC NMI watchdog. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | * Mikael Pettersson : Pentium 4 support for local APIC NMI watchdog. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | * Pavel Machek and |
| 11 | * Mikael Pettersson : PM converted to driver model. Disable/enable API. |
| 12 | */ |
| 13 | |
Ingo Molnar | 3d1ba1d | 2008-06-02 13:50:10 +0200 | [diff] [blame] | 14 | #include <asm/apic.h> |
| 15 | |
Andrew Morton | bb81a09 | 2006-12-07 02:14:01 +0100 | [diff] [blame] | 16 | #include <linux/nmi.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | #include <linux/mm.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | #include <linux/delay.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 | #include <linux/interrupt.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | #include <linux/module.h> |
| 21 | #include <linux/sysdev.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | #include <linux/sysctl.h> |
Don Zickus | 3e4ff11 | 2006-06-26 13:57:01 +0200 | [diff] [blame] | 23 | #include <linux/percpu.h> |
Andi Kleen | eddb6fb | 2006-02-03 21:50:41 +0100 | [diff] [blame] | 24 | #include <linux/kprobes.h> |
Andrew Morton | bb81a09 | 2006-12-07 02:14:01 +0100 | [diff] [blame] | 25 | #include <linux/cpumask.h> |
Thomas Gleixner | f8b5035 | 2007-02-16 01:28:09 -0800 | [diff] [blame] | 26 | #include <linux/kernel_stat.h> |
Christoph Hellwig | 1eeb66a | 2007-05-08 00:27:03 -0700 | [diff] [blame] | 27 | #include <linux/kdebug.h> |
Hiroshi Shimamoto | 88ff0a4 | 2008-05-27 18:49:39 -0700 | [diff] [blame] | 28 | #include <linux/smp.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | |
Maciej W. Rozycki | 35542c5 | 2008-05-21 22:10:22 +0100 | [diff] [blame] | 30 | #include <asm/i8259.h> |
| 31 | #include <asm/io_apic.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 32 | #include <asm/smp.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 | #include <asm/nmi.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 34 | #include <asm/proto.h> |
Ingo Molnar | 6e90894 | 2008-03-21 14:32:36 +0100 | [diff] [blame] | 35 | #include <asm/timer.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | |
Andi Kleen | 553f265 | 2006-04-07 19:49:57 +0200 | [diff] [blame] | 37 | #include <asm/mce.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 | |
Glauber de Oliveira Costa | e32ede1 | 2008-03-19 14:25:35 -0300 | [diff] [blame] | 39 | #include <mach_traps.h> |
| 40 | |
Andi Kleen | 29cbc78 | 2006-09-30 01:47:55 +0200 | [diff] [blame] | 41 | int unknown_nmi_panic; |
| 42 | int nmi_watchdog_enabled; |
Andi Kleen | 29cbc78 | 2006-09-30 01:47:55 +0200 | [diff] [blame] | 43 | |
Andi Kleen | 1714f9b | 2007-04-16 10:30:27 +0200 | [diff] [blame] | 44 | static cpumask_t backtrace_mask = CPU_MASK_NONE; |
Don Zickus | 828f0af | 2006-09-26 10:52:26 +0200 | [diff] [blame] | 45 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 46 | /* nmi_active: |
Don Zickus | f2802e7 | 2006-09-26 10:52:26 +0200 | [diff] [blame] | 47 | * >0: the lapic NMI watchdog is active, but can be disabled |
| 48 | * <0: the lapic NMI watchdog has not been set up, and cannot |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 49 | * be enabled |
Don Zickus | f2802e7 | 2006-09-26 10:52:26 +0200 | [diff] [blame] | 50 | * 0: the lapic NMI watchdog is disabled, but can be enabled |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 51 | */ |
Don Zickus | f2802e7 | 2006-09-26 10:52:26 +0200 | [diff] [blame] | 52 | atomic_t nmi_active = ATOMIC_INIT(0); /* oprofile uses this */ |
Hiroshi Shimamoto | 88ff0a4 | 2008-05-27 18:49:39 -0700 | [diff] [blame] | 53 | EXPORT_SYMBOL(nmi_active); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 54 | |
Cyrill Gorcunov | c376d45 | 2008-06-24 22:52:05 +0200 | [diff] [blame] | 55 | unsigned int nmi_watchdog = NMI_NONE; |
Hiroshi Shimamoto | 88ff0a4 | 2008-05-27 18:49:39 -0700 | [diff] [blame] | 56 | EXPORT_SYMBOL(nmi_watchdog); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 57 | |
Hiroshi Shimamoto | 88ff0a4 | 2008-05-27 18:49:39 -0700 | [diff] [blame] | 58 | static int panic_on_timeout; |
Cyrill Gorcunov | 1798bc2 | 2008-05-24 19:36:41 +0400 | [diff] [blame] | 59 | |
Don Zickus | b7471c6 | 2006-09-26 10:52:26 +0200 | [diff] [blame] | 60 | static unsigned int nmi_hz = HZ; |
Andi Kleen | 05cb007 | 2007-05-02 19:27:20 +0200 | [diff] [blame] | 61 | static DEFINE_PER_CPU(short, wd_enabled); |
Hiroshi Shimamoto | 88ff0a4 | 2008-05-27 18:49:39 -0700 | [diff] [blame] | 62 | static int endflag __initdata; |
Ravikiran G Thirumalai | 92715e2 | 2006-12-09 21:33:35 +0100 | [diff] [blame] | 63 | |
Cyrill Gorcunov | fd5cea0 | 2008-05-24 19:36:40 +0400 | [diff] [blame] | 64 | static inline unsigned int get_nmi_count(int cpu) |
| 65 | { |
Cyrill Gorcunov | 1798bc2 | 2008-05-24 19:36:41 +0400 | [diff] [blame] | 66 | #ifdef CONFIG_X86_64 |
| 67 | return cpu_pda(cpu)->__nmi_count; |
| 68 | #else |
Cyrill Gorcunov | fd5cea0 | 2008-05-24 19:36:40 +0400 | [diff] [blame] | 69 | return nmi_count(cpu); |
Cyrill Gorcunov | 1798bc2 | 2008-05-24 19:36:41 +0400 | [diff] [blame] | 70 | #endif |
Cyrill Gorcunov | fd5cea0 | 2008-05-24 19:36:40 +0400 | [diff] [blame] | 71 | } |
| 72 | |
| 73 | static inline int mce_in_progress(void) |
| 74 | { |
Glauber Costa | b8e0418 | 2008-06-16 19:59:08 -0300 | [diff] [blame] | 75 | #if defined(CONFIG_X86_64) && defined(CONFIG_X86_MCE) |
Cyrill Gorcunov | 1798bc2 | 2008-05-24 19:36:41 +0400 | [diff] [blame] | 76 | return atomic_read(&mce_entry) > 0; |
| 77 | #endif |
Cyrill Gorcunov | fd5cea0 | 2008-05-24 19:36:40 +0400 | [diff] [blame] | 78 | return 0; |
| 79 | } |
| 80 | |
| 81 | /* |
| 82 | * Take the local apic timer and PIT/HPET into account. We don't |
| 83 | * know which one is active, when we have highres/dyntick on |
| 84 | */ |
| 85 | static inline unsigned int get_timer_irqs(int cpu) |
| 86 | { |
Cyrill Gorcunov | 1798bc2 | 2008-05-24 19:36:41 +0400 | [diff] [blame] | 87 | #ifdef CONFIG_X86_64 |
| 88 | return read_pda(apic_timer_irqs) + read_pda(irq0_irqs); |
| 89 | #else |
Cyrill Gorcunov | fd5cea0 | 2008-05-24 19:36:40 +0400 | [diff] [blame] | 90 | return per_cpu(irq_stat, cpu).apic_timer_irqs + |
| 91 | per_cpu(irq_stat, cpu).irq0_irqs; |
Cyrill Gorcunov | 1798bc2 | 2008-05-24 19:36:41 +0400 | [diff] [blame] | 92 | #endif |
Cyrill Gorcunov | fd5cea0 | 2008-05-24 19:36:40 +0400 | [diff] [blame] | 93 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 94 | |
Andi Kleen | 7515211 | 2005-05-16 21:53:34 -0700 | [diff] [blame] | 95 | #ifdef CONFIG_SMP |
Cyrill Gorcunov | 1798bc2 | 2008-05-24 19:36:41 +0400 | [diff] [blame] | 96 | /* |
| 97 | * The performance counters used by NMI_LOCAL_APIC don't trigger when |
Andi Kleen | 7515211 | 2005-05-16 21:53:34 -0700 | [diff] [blame] | 98 | * the CPU is idle. To make sure the NMI watchdog really ticks on all |
| 99 | * CPUs during the test make them busy. |
| 100 | */ |
| 101 | static __init void nmi_cpu_busy(void *data) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 102 | { |
Ingo Molnar | 366c7f5 | 2006-07-03 00:25:25 -0700 | [diff] [blame] | 103 | local_irq_enable_in_hardirq(); |
Cyrill Gorcunov | 1798bc2 | 2008-05-24 19:36:41 +0400 | [diff] [blame] | 104 | /* |
| 105 | * Intentionally don't use cpu_relax here. This is |
| 106 | * to make sure that the performance counter really ticks, |
| 107 | * even if there is a simulator or similar that catches the |
| 108 | * pause instruction. On a real HT machine this is fine because |
| 109 | * all other CPUs are busy with "useless" delay loops and don't |
| 110 | * care if they get somewhat less cycles. |
| 111 | */ |
Ravikiran G Thirumalai | 92715e2 | 2006-12-09 21:33:35 +0100 | [diff] [blame] | 112 | while (endflag == 0) |
| 113 | mb(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 114 | } |
Andi Kleen | 7515211 | 2005-05-16 21:53:34 -0700 | [diff] [blame] | 115 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 116 | |
Ingo Molnar | 8bb8519 | 2008-08-15 15:34:32 +0200 | [diff] [blame] | 117 | static void report_broken_nmi(int cpu, int *prev_nmi_count) |
| 118 | { |
| 119 | printk(KERN_CONT "\n"); |
| 120 | |
| 121 | printk(KERN_WARNING |
| 122 | "WARNING: CPU#%d: NMI appears to be stuck (%d->%d)!\n", |
| 123 | cpu, prev_nmi_count[cpu], get_nmi_count(cpu)); |
| 124 | |
| 125 | printk(KERN_WARNING |
| 126 | "Please report this to bugzilla.kernel.org,\n"); |
| 127 | printk(KERN_WARNING |
| 128 | "and attach the output of the 'dmesg' command.\n"); |
| 129 | |
| 130 | per_cpu(wd_enabled, cpu) = 0; |
| 131 | atomic_dec(&nmi_active); |
| 132 | } |
| 133 | |
Hiroshi Shimamoto | 416b721 | 2008-01-30 13:30:33 +0100 | [diff] [blame] | 134 | int __init check_nmi_watchdog(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 135 | { |
Eric W. Biederman | 29b7008 | 2005-10-30 14:59:40 -0800 | [diff] [blame] | 136 | unsigned int *prev_nmi_count; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 137 | int cpu; |
| 138 | |
Cyrill Gorcunov | 4de0043 | 2008-06-24 22:52:06 +0200 | [diff] [blame] | 139 | if (!nmi_watchdog_active() || !atomic_read(&nmi_active)) |
Don Zickus | f2802e7 | 2006-09-26 10:52:26 +0200 | [diff] [blame] | 140 | return 0; |
| 141 | |
Mike Travis | 7496b60 | 2008-05-12 21:21:12 +0200 | [diff] [blame] | 142 | prev_nmi_count = kmalloc(nr_cpu_ids * sizeof(int), GFP_KERNEL); |
Hiroshi Shimamoto | 416b721 | 2008-01-30 13:30:33 +0100 | [diff] [blame] | 143 | if (!prev_nmi_count) |
Maciej W. Rozycki | 35542c5 | 2008-05-21 22:10:22 +0100 | [diff] [blame] | 144 | goto error; |
Jack F Vogel | 67701ae | 2005-05-01 08:58:48 -0700 | [diff] [blame] | 145 | |
Hiroshi Shimamoto | 416b721 | 2008-01-30 13:30:33 +0100 | [diff] [blame] | 146 | printk(KERN_INFO "Testing NMI watchdog ... "); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 147 | |
Andi Kleen | 7554c3f | 2006-01-11 22:45:45 +0100 | [diff] [blame] | 148 | #ifdef CONFIG_SMP |
Andi Kleen | 7515211 | 2005-05-16 21:53:34 -0700 | [diff] [blame] | 149 | if (nmi_watchdog == NMI_LOCAL_APIC) |
Ingo Molnar | 1a781a7 | 2008-07-15 21:55:59 +0200 | [diff] [blame] | 150 | smp_call_function(nmi_cpu_busy, (void *)&endflag, 0); |
Andi Kleen | 7554c3f | 2006-01-11 22:45:45 +0100 | [diff] [blame] | 151 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 152 | |
KAMEZAWA Hiroyuki | c891259 | 2006-03-28 01:56:39 -0800 | [diff] [blame] | 153 | for_each_possible_cpu(cpu) |
Cyrill Gorcunov | fd5cea0 | 2008-05-24 19:36:40 +0400 | [diff] [blame] | 154 | prev_nmi_count[cpu] = get_nmi_count(cpu); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 155 | local_irq_enable(); |
Cyrill Gorcunov | 1798bc2 | 2008-05-24 19:36:41 +0400 | [diff] [blame] | 156 | mdelay((20 * 1000) / nmi_hz); /* wait 20 ticks */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 157 | |
Andrew Morton | 394e390 | 2006-03-23 03:01:05 -0800 | [diff] [blame] | 158 | for_each_online_cpu(cpu) { |
Andi Kleen | 05cb007 | 2007-05-02 19:27:20 +0200 | [diff] [blame] | 159 | if (!per_cpu(wd_enabled, cpu)) |
Don Zickus | f2802e7 | 2006-09-26 10:52:26 +0200 | [diff] [blame] | 160 | continue; |
Ingo Molnar | 8bb8519 | 2008-08-15 15:34:32 +0200 | [diff] [blame] | 161 | if (get_nmi_count(cpu) - prev_nmi_count[cpu] <= 5) |
| 162 | report_broken_nmi(cpu, prev_nmi_count); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 163 | } |
Hiroshi Shimamoto | 416b721 | 2008-01-30 13:30:33 +0100 | [diff] [blame] | 164 | endflag = 1; |
Don Zickus | f2802e7 | 2006-09-26 10:52:26 +0200 | [diff] [blame] | 165 | if (!atomic_read(&nmi_active)) { |
Hiroshi Shimamoto | 416b721 | 2008-01-30 13:30:33 +0100 | [diff] [blame] | 166 | kfree(prev_nmi_count); |
Don Zickus | f2802e7 | 2006-09-26 10:52:26 +0200 | [diff] [blame] | 167 | atomic_set(&nmi_active, -1); |
Maciej W. Rozycki | 35542c5 | 2008-05-21 22:10:22 +0100 | [diff] [blame] | 168 | goto error; |
Don Zickus | f2802e7 | 2006-09-26 10:52:26 +0200 | [diff] [blame] | 169 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 170 | printk("OK.\n"); |
| 171 | |
Cyrill Gorcunov | 1798bc2 | 2008-05-24 19:36:41 +0400 | [diff] [blame] | 172 | /* |
| 173 | * now that we know it works we can reduce NMI frequency to |
| 174 | * something more reasonable; makes a difference in some configs |
| 175 | */ |
Andi Kleen | 05cb007 | 2007-05-02 19:27:20 +0200 | [diff] [blame] | 176 | if (nmi_watchdog == NMI_LOCAL_APIC) |
| 177 | nmi_hz = lapic_adjust_nmi_hz(1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 178 | |
Hiroshi Shimamoto | 416b721 | 2008-01-30 13:30:33 +0100 | [diff] [blame] | 179 | kfree(prev_nmi_count); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 180 | return 0; |
Maciej W. Rozycki | 35542c5 | 2008-05-21 22:10:22 +0100 | [diff] [blame] | 181 | error: |
| 182 | if (nmi_watchdog == NMI_IO_APIC && !timer_through_8259) |
| 183 | disable_8259A_irq(0); |
Maciej W. Rozycki | 5b4d238 | 2008-07-11 19:47:15 +0100 | [diff] [blame] | 184 | #ifdef CONFIG_X86_32 |
| 185 | timer_ack = 0; |
| 186 | #endif |
Maciej W. Rozycki | 35542c5 | 2008-05-21 22:10:22 +0100 | [diff] [blame] | 187 | return -1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 188 | } |
| 189 | |
Adrian Bunk | 867ab54 | 2008-01-30 13:30:31 +0100 | [diff] [blame] | 190 | static int __init setup_nmi_watchdog(char *str) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 191 | { |
Cyrill Gorcunov | 2b6adda | 2008-06-24 22:52:04 +0200 | [diff] [blame] | 192 | unsigned int nmi; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 193 | |
Cyrill Gorcunov | d1b946b | 2008-05-24 19:36:34 +0400 | [diff] [blame] | 194 | if (!strncmp(str, "panic", 5)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 195 | panic_on_timeout = 1; |
| 196 | str = strchr(str, ','); |
| 197 | if (!str) |
| 198 | return 1; |
| 199 | ++str; |
| 200 | } |
| 201 | |
| 202 | get_option(&str, &nmi); |
| 203 | |
Cyrill Gorcunov | 2b6adda | 2008-06-24 22:52:04 +0200 | [diff] [blame] | 204 | if (nmi >= NMI_INVALID) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 205 | return 0; |
Don Zickus | f2802e7 | 2006-09-26 10:52:26 +0200 | [diff] [blame] | 206 | |
Andi Kleen | 7515211 | 2005-05-16 21:53:34 -0700 | [diff] [blame] | 207 | nmi_watchdog = nmi; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 208 | return 1; |
| 209 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 210 | __setup("nmi_watchdog=", setup_nmi_watchdog); |
| 211 | |
Cyrill Gorcunov | 1798bc2 | 2008-05-24 19:36:41 +0400 | [diff] [blame] | 212 | /* |
| 213 | * Suspend/resume support |
| 214 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 215 | #ifdef CONFIG_PM |
| 216 | |
| 217 | static int nmi_pm_active; /* nmi_active before suspend */ |
| 218 | |
Pavel Machek | 829ca9a | 2005-09-03 15:56:56 -0700 | [diff] [blame] | 219 | static int lapic_nmi_suspend(struct sys_device *dev, pm_message_t state) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 220 | { |
Shaohua Li | 4038f90 | 2006-09-26 10:52:27 +0200 | [diff] [blame] | 221 | /* only CPU0 goes here, other CPUs should be offline */ |
Don Zickus | f2802e7 | 2006-09-26 10:52:26 +0200 | [diff] [blame] | 222 | nmi_pm_active = atomic_read(&nmi_active); |
Shaohua Li | 4038f90 | 2006-09-26 10:52:27 +0200 | [diff] [blame] | 223 | stop_apic_nmi_watchdog(NULL); |
| 224 | BUG_ON(atomic_read(&nmi_active) != 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 225 | return 0; |
| 226 | } |
| 227 | |
| 228 | static int lapic_nmi_resume(struct sys_device *dev) |
| 229 | { |
Shaohua Li | 4038f90 | 2006-09-26 10:52:27 +0200 | [diff] [blame] | 230 | /* only CPU0 goes here, other CPUs should be offline */ |
| 231 | if (nmi_pm_active > 0) { |
| 232 | setup_apic_nmi_watchdog(NULL); |
| 233 | touch_nmi_watchdog(); |
| 234 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 235 | return 0; |
| 236 | } |
| 237 | |
| 238 | static struct sysdev_class nmi_sysclass = { |
Kay Sievers | af5ca3f | 2007-12-20 02:09:39 +0100 | [diff] [blame] | 239 | .name = "lapic_nmi", |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 240 | .resume = lapic_nmi_resume, |
| 241 | .suspend = lapic_nmi_suspend, |
| 242 | }; |
| 243 | |
| 244 | static struct sys_device device_lapic_nmi = { |
Hiroshi Shimamoto | 416b721 | 2008-01-30 13:30:33 +0100 | [diff] [blame] | 245 | .id = 0, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 246 | .cls = &nmi_sysclass, |
| 247 | }; |
| 248 | |
| 249 | static int __init init_lapic_nmi_sysfs(void) |
| 250 | { |
| 251 | int error; |
| 252 | |
Cyrill Gorcunov | 1798bc2 | 2008-05-24 19:36:41 +0400 | [diff] [blame] | 253 | /* |
| 254 | * should really be a BUG_ON but b/c this is an |
Don Zickus | f2802e7 | 2006-09-26 10:52:26 +0200 | [diff] [blame] | 255 | * init call, it just doesn't work. -dcz |
| 256 | */ |
| 257 | if (nmi_watchdog != NMI_LOCAL_APIC) |
| 258 | return 0; |
| 259 | |
Hiroshi Shimamoto | 416b721 | 2008-01-30 13:30:33 +0100 | [diff] [blame] | 260 | if (atomic_read(&nmi_active) < 0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 261 | return 0; |
| 262 | |
| 263 | error = sysdev_class_register(&nmi_sysclass); |
| 264 | if (!error) |
| 265 | error = sysdev_register(&device_lapic_nmi); |
| 266 | return error; |
| 267 | } |
Cyrill Gorcunov | 1798bc2 | 2008-05-24 19:36:41 +0400 | [diff] [blame] | 268 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 269 | /* must come after the local APIC's device_initcall() */ |
| 270 | late_initcall(init_lapic_nmi_sysfs); |
| 271 | |
| 272 | #endif /* CONFIG_PM */ |
| 273 | |
Hiroshi Shimamoto | 416b721 | 2008-01-30 13:30:33 +0100 | [diff] [blame] | 274 | static void __acpi_nmi_enable(void *__unused) |
| 275 | { |
Maciej W. Rozycki | 593f4a7 | 2008-07-16 19:15:30 +0100 | [diff] [blame] | 276 | apic_write(APIC_LVT0, APIC_DM_NMI); |
Hiroshi Shimamoto | 416b721 | 2008-01-30 13:30:33 +0100 | [diff] [blame] | 277 | } |
| 278 | |
| 279 | /* |
| 280 | * Enable timer based NMIs on all CPUs: |
| 281 | */ |
| 282 | void acpi_nmi_enable(void) |
| 283 | { |
| 284 | if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC) |
Ingo Molnar | 1a781a7 | 2008-07-15 21:55:59 +0200 | [diff] [blame] | 285 | on_each_cpu(__acpi_nmi_enable, NULL, 1); |
Hiroshi Shimamoto | 416b721 | 2008-01-30 13:30:33 +0100 | [diff] [blame] | 286 | } |
| 287 | |
| 288 | static void __acpi_nmi_disable(void *__unused) |
| 289 | { |
Maciej W. Rozycki | 593f4a7 | 2008-07-16 19:15:30 +0100 | [diff] [blame] | 290 | apic_write(APIC_LVT0, APIC_DM_NMI | APIC_LVT_MASKED); |
Hiroshi Shimamoto | 416b721 | 2008-01-30 13:30:33 +0100 | [diff] [blame] | 291 | } |
| 292 | |
| 293 | /* |
| 294 | * Disable timer based NMIs on all CPUs: |
| 295 | */ |
| 296 | void acpi_nmi_disable(void) |
| 297 | { |
| 298 | if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC) |
Ingo Molnar | 1a781a7 | 2008-07-15 21:55:59 +0200 | [diff] [blame] | 299 | on_each_cpu(__acpi_nmi_disable, NULL, 1); |
Hiroshi Shimamoto | 416b721 | 2008-01-30 13:30:33 +0100 | [diff] [blame] | 300 | } |
| 301 | |
Aristeu Rozanski | b3e15bd | 2008-09-22 13:13:59 -0400 | [diff] [blame^] | 302 | /* |
| 303 | * This function is called as soon the LAPIC NMI watchdog driver has everything |
| 304 | * in place and it's ready to check if the NMIs belong to the NMI watchdog |
| 305 | */ |
| 306 | void cpu_nmi_set_wd_enabled(void) |
| 307 | { |
| 308 | __get_cpu_var(wd_enabled) = 1; |
| 309 | } |
| 310 | |
Don Zickus | f2802e7 | 2006-09-26 10:52:26 +0200 | [diff] [blame] | 311 | void setup_apic_nmi_watchdog(void *unused) |
| 312 | { |
Hiroshi Shimamoto | 416b721 | 2008-01-30 13:30:33 +0100 | [diff] [blame] | 313 | if (__get_cpu_var(wd_enabled)) |
Shaohua Li | 4038f90 | 2006-09-26 10:52:27 +0200 | [diff] [blame] | 314 | return; |
| 315 | |
| 316 | /* cheap hack to support suspend/resume */ |
| 317 | /* if cpu0 is not active neither should the other cpus */ |
Cyrill Gorcunov | 1798bc2 | 2008-05-24 19:36:41 +0400 | [diff] [blame] | 318 | if (smp_processor_id() != 0 && atomic_read(&nmi_active) <= 0) |
Shaohua Li | 4038f90 | 2006-09-26 10:52:27 +0200 | [diff] [blame] | 319 | return; |
| 320 | |
Andi Kleen | 05cb007 | 2007-05-02 19:27:20 +0200 | [diff] [blame] | 321 | switch (nmi_watchdog) { |
| 322 | case NMI_LOCAL_APIC: |
Andi Kleen | 05cb007 | 2007-05-02 19:27:20 +0200 | [diff] [blame] | 323 | if (lapic_watchdog_init(nmi_hz) < 0) { |
| 324 | __get_cpu_var(wd_enabled) = 0; |
Don Zickus | f2802e7 | 2006-09-26 10:52:26 +0200 | [diff] [blame] | 325 | return; |
| 326 | } |
Andi Kleen | 05cb007 | 2007-05-02 19:27:20 +0200 | [diff] [blame] | 327 | /* FALL THROUGH */ |
| 328 | case NMI_IO_APIC: |
| 329 | __get_cpu_var(wd_enabled) = 1; |
| 330 | atomic_inc(&nmi_active); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 331 | } |
Don Zickus | f2802e7 | 2006-09-26 10:52:26 +0200 | [diff] [blame] | 332 | } |
| 333 | |
Shaohua Li | 4038f90 | 2006-09-26 10:52:27 +0200 | [diff] [blame] | 334 | void stop_apic_nmi_watchdog(void *unused) |
Don Zickus | f2802e7 | 2006-09-26 10:52:26 +0200 | [diff] [blame] | 335 | { |
| 336 | /* only support LOCAL and IO APICs for now */ |
Cyrill Gorcunov | 4de0043 | 2008-06-24 22:52:06 +0200 | [diff] [blame] | 337 | if (!nmi_watchdog_active()) |
Cyrill Gorcunov | 1798bc2 | 2008-05-24 19:36:41 +0400 | [diff] [blame] | 338 | return; |
Andi Kleen | 05cb007 | 2007-05-02 19:27:20 +0200 | [diff] [blame] | 339 | if (__get_cpu_var(wd_enabled) == 0) |
Shaohua Li | 4038f90 | 2006-09-26 10:52:27 +0200 | [diff] [blame] | 340 | return; |
Andi Kleen | 05cb007 | 2007-05-02 19:27:20 +0200 | [diff] [blame] | 341 | if (nmi_watchdog == NMI_LOCAL_APIC) |
| 342 | lapic_watchdog_stop(); |
| 343 | __get_cpu_var(wd_enabled) = 0; |
Don Zickus | f2802e7 | 2006-09-26 10:52:26 +0200 | [diff] [blame] | 344 | atomic_dec(&nmi_active); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 345 | } |
| 346 | |
| 347 | /* |
| 348 | * the best way to detect whether a CPU has a 'hard lockup' problem |
| 349 | * is to check it's local APIC timer IRQ counts. If they are not |
| 350 | * changing then that CPU has some problem. |
| 351 | * |
| 352 | * as these watchdog NMI IRQs are generated on every CPU, we only |
| 353 | * have to check the current processor. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 354 | * |
| 355 | * since NMIs don't listen to _any_ locks, we have to be extremely |
| 356 | * careful not to rely on unsafe variables. The printk might lock |
| 357 | * up though, so we have to break up any console locks first ... |
Cyrill Gorcunov | 1798bc2 | 2008-05-24 19:36:41 +0400 | [diff] [blame] | 358 | * [when there will be more tty-related locks, break them up here too!] |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 359 | */ |
| 360 | |
Andi Kleen | 7515211 | 2005-05-16 21:53:34 -0700 | [diff] [blame] | 361 | static DEFINE_PER_CPU(unsigned, last_irq_sum); |
| 362 | static DEFINE_PER_CPU(local_t, alert_counter); |
| 363 | static DEFINE_PER_CPU(int, nmi_touch); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 364 | |
Andrew Morton | 567f3e4 | 2007-07-17 04:03:58 -0700 | [diff] [blame] | 365 | void touch_nmi_watchdog(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 366 | { |
Cyrill Gorcunov | 4de0043 | 2008-06-24 22:52:06 +0200 | [diff] [blame] | 367 | if (nmi_watchdog_active()) { |
Jan Beulich | 99019e9 | 2006-02-16 23:41:55 +0100 | [diff] [blame] | 368 | unsigned cpu; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 369 | |
Jan Beulich | 99019e9 | 2006-02-16 23:41:55 +0100 | [diff] [blame] | 370 | /* |
Hiroshi Shimamoto | f784946 | 2008-05-02 16:45:08 -0700 | [diff] [blame] | 371 | * Tell other CPUs to reset their alert counters. We cannot |
Jan Beulich | 99019e9 | 2006-02-16 23:41:55 +0100 | [diff] [blame] | 372 | * do it ourselves because the alert count increase is not |
| 373 | * atomic. |
| 374 | */ |
Andrew Morton | 567f3e4 | 2007-07-17 04:03:58 -0700 | [diff] [blame] | 375 | for_each_present_cpu(cpu) { |
| 376 | if (per_cpu(nmi_touch, cpu) != 1) |
| 377 | per_cpu(nmi_touch, cpu) = 1; |
| 378 | } |
Jan Beulich | 99019e9 | 2006-02-16 23:41:55 +0100 | [diff] [blame] | 379 | } |
Ingo Molnar | 8446f1d | 2005-09-06 15:16:27 -0700 | [diff] [blame] | 380 | |
Ingo Molnar | 8446f1d | 2005-09-06 15:16:27 -0700 | [diff] [blame] | 381 | /* |
| 382 | * Tickle the softlockup detector too: |
| 383 | */ |
Hiroshi Shimamoto | 416b721 | 2008-01-30 13:30:33 +0100 | [diff] [blame] | 384 | touch_softlockup_watchdog(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 385 | } |
Hiroshi Shimamoto | 416b721 | 2008-01-30 13:30:33 +0100 | [diff] [blame] | 386 | EXPORT_SYMBOL(touch_nmi_watchdog); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 387 | |
Steven Rostedt | 5deb45e | 2008-04-19 19:19:55 +0200 | [diff] [blame] | 388 | notrace __kprobes int |
| 389 | nmi_watchdog_tick(struct pt_regs *regs, unsigned reason) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 390 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 391 | /* |
| 392 | * Since current_thread_info()-> is always on the stack, and we |
| 393 | * always switch the stack NMI-atomically, it's safe to use |
| 394 | * smp_processor_id(). |
| 395 | */ |
Jesper Juhl | b791cce | 2006-03-28 01:56:52 -0800 | [diff] [blame] | 396 | unsigned int sum; |
Andi Kleen | 7515211 | 2005-05-16 21:53:34 -0700 | [diff] [blame] | 397 | int touched = 0; |
Andrew Morton | bb81a09 | 2006-12-07 02:14:01 +0100 | [diff] [blame] | 398 | int cpu = smp_processor_id(); |
Andi Kleen | 05cb007 | 2007-05-02 19:27:20 +0200 | [diff] [blame] | 399 | int rc = 0; |
Don Zickus | f2802e7 | 2006-09-26 10:52:26 +0200 | [diff] [blame] | 400 | |
| 401 | /* check for other users first */ |
| 402 | if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) |
| 403 | == NOTIFY_STOP) { |
Don Zickus | 3adbbcc | 2006-09-26 10:52:26 +0200 | [diff] [blame] | 404 | rc = 1; |
Don Zickus | f2802e7 | 2006-09-26 10:52:26 +0200 | [diff] [blame] | 405 | touched = 1; |
| 406 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 407 | |
Cyrill Gorcunov | fd5cea0 | 2008-05-24 19:36:40 +0400 | [diff] [blame] | 408 | sum = get_timer_irqs(cpu); |
| 409 | |
Andi Kleen | 7515211 | 2005-05-16 21:53:34 -0700 | [diff] [blame] | 410 | if (__get_cpu_var(nmi_touch)) { |
| 411 | __get_cpu_var(nmi_touch) = 0; |
| 412 | touched = 1; |
| 413 | } |
Don Zickus | f2802e7 | 2006-09-26 10:52:26 +0200 | [diff] [blame] | 414 | |
Andrew Morton | bb81a09 | 2006-12-07 02:14:01 +0100 | [diff] [blame] | 415 | if (cpu_isset(cpu, backtrace_mask)) { |
| 416 | static DEFINE_SPINLOCK(lock); /* Serialise the printks */ |
| 417 | |
| 418 | spin_lock(&lock); |
Hiroshi Shimamoto | 88ff0a4 | 2008-05-27 18:49:39 -0700 | [diff] [blame] | 419 | printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu); |
Andrew Morton | bb81a09 | 2006-12-07 02:14:01 +0100 | [diff] [blame] | 420 | dump_stack(); |
| 421 | spin_unlock(&lock); |
| 422 | cpu_clear(cpu, backtrace_mask); |
| 423 | } |
| 424 | |
Cyrill Gorcunov | fd5cea0 | 2008-05-24 19:36:40 +0400 | [diff] [blame] | 425 | /* Could check oops_in_progress here too, but it's safer not to */ |
| 426 | if (mce_in_progress()) |
Andi Kleen | 553f265 | 2006-04-07 19:49:57 +0200 | [diff] [blame] | 427 | touched = 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 428 | |
Thomas Gleixner | f8b5035 | 2007-02-16 01:28:09 -0800 | [diff] [blame] | 429 | /* if the none of the timers isn't firing, this cpu isn't doing much */ |
Andi Kleen | 7515211 | 2005-05-16 21:53:34 -0700 | [diff] [blame] | 430 | if (!touched && __get_cpu_var(last_irq_sum) == sum) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 431 | /* |
| 432 | * Ayiee, looks like this CPU is stuck ... |
| 433 | * wait a few IRQs (5 seconds) before doing the oops ... |
| 434 | */ |
Andi Kleen | 7515211 | 2005-05-16 21:53:34 -0700 | [diff] [blame] | 435 | local_inc(&__get_cpu_var(alert_counter)); |
Cyrill Gorcunov | 1798bc2 | 2008-05-24 19:36:41 +0400 | [diff] [blame] | 436 | if (local_read(&__get_cpu_var(alert_counter)) == 5 * nmi_hz) |
George Anzinger | 748f2ed | 2005-09-03 15:56:48 -0700 | [diff] [blame] | 437 | /* |
| 438 | * die_nmi will return ONLY if NOTIFY_STOP happens.. |
| 439 | */ |
Cyrill Gorcunov | ddca03c | 2008-05-24 19:36:31 +0400 | [diff] [blame] | 440 | die_nmi("BUG: NMI Watchdog detected LOCKUP", |
Cyrill Gorcunov | d1b946b | 2008-05-24 19:36:34 +0400 | [diff] [blame] | 441 | regs, panic_on_timeout); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 442 | } else { |
Andi Kleen | 7515211 | 2005-05-16 21:53:34 -0700 | [diff] [blame] | 443 | __get_cpu_var(last_irq_sum) = sum; |
| 444 | local_set(&__get_cpu_var(alert_counter), 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 445 | } |
Don Zickus | f2802e7 | 2006-09-26 10:52:26 +0200 | [diff] [blame] | 446 | |
| 447 | /* see if the nmi watchdog went off */ |
Andi Kleen | 05cb007 | 2007-05-02 19:27:20 +0200 | [diff] [blame] | 448 | if (!__get_cpu_var(wd_enabled)) |
| 449 | return rc; |
| 450 | switch (nmi_watchdog) { |
| 451 | case NMI_LOCAL_APIC: |
| 452 | rc |= lapic_wd_event(nmi_hz); |
| 453 | break; |
| 454 | case NMI_IO_APIC: |
Cyrill Gorcunov | 1798bc2 | 2008-05-24 19:36:41 +0400 | [diff] [blame] | 455 | /* |
| 456 | * don't know how to accurately check for this. |
Andi Kleen | 05cb007 | 2007-05-02 19:27:20 +0200 | [diff] [blame] | 457 | * just assume it was a watchdog timer interrupt |
| 458 | * This matches the old behaviour. |
| 459 | */ |
| 460 | rc = 1; |
| 461 | break; |
Andi Kleen | 7515211 | 2005-05-16 21:53:34 -0700 | [diff] [blame] | 462 | } |
Don Zickus | 3adbbcc | 2006-09-26 10:52:26 +0200 | [diff] [blame] | 463 | return rc; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 464 | } |
| 465 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 466 | #ifdef CONFIG_SYSCTL |
| 467 | |
Simon Arlott | e3a61b0 | 2008-07-19 23:32:54 +0100 | [diff] [blame] | 468 | static int __init setup_unknown_nmi_panic(char *str) |
| 469 | { |
| 470 | unknown_nmi_panic = 1; |
| 471 | return 1; |
| 472 | } |
| 473 | __setup("unknown_nmi_panic", setup_unknown_nmi_panic); |
| 474 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 475 | static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu) |
| 476 | { |
| 477 | unsigned char reason = get_nmi_reason(); |
| 478 | char buf[64]; |
| 479 | |
Don Zickus | 2fbe7b2 | 2006-09-26 10:52:27 +0200 | [diff] [blame] | 480 | sprintf(buf, "NMI received for unknown reason %02x\n", reason); |
Cyrill Gorcunov | 6c8decd | 2008-05-24 19:36:37 +0400 | [diff] [blame] | 481 | die_nmi(buf, regs, 1); /* Always panic here */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 482 | return 0; |
| 483 | } |
| 484 | |
Don Zickus | 407984f | 2006-09-26 10:52:27 +0200 | [diff] [blame] | 485 | /* |
| 486 | * proc handler for /proc/sys/kernel/nmi |
| 487 | */ |
| 488 | int proc_nmi_enabled(struct ctl_table *table, int write, struct file *file, |
| 489 | void __user *buffer, size_t *length, loff_t *ppos) |
| 490 | { |
| 491 | int old_state; |
| 492 | |
| 493 | nmi_watchdog_enabled = (atomic_read(&nmi_active) > 0) ? 1 : 0; |
| 494 | old_state = nmi_watchdog_enabled; |
| 495 | proc_dointvec(table, write, file, buffer, length, ppos); |
| 496 | if (!!old_state == !!nmi_watchdog_enabled) |
| 497 | return 0; |
| 498 | |
Cyrill Gorcunov | 4de0043 | 2008-06-24 22:52:06 +0200 | [diff] [blame] | 499 | if (atomic_read(&nmi_active) < 0 || !nmi_watchdog_active()) { |
Cyrill Gorcunov | 1798bc2 | 2008-05-24 19:36:41 +0400 | [diff] [blame] | 500 | printk(KERN_WARNING |
| 501 | "NMI watchdog is permanently disabled\n"); |
Don Zickus | e33e89a | 2006-09-26 10:52:27 +0200 | [diff] [blame] | 502 | return -EIO; |
Don Zickus | 407984f | 2006-09-26 10:52:27 +0200 | [diff] [blame] | 503 | } |
| 504 | |
Don Zickus | e33e89a | 2006-09-26 10:52:27 +0200 | [diff] [blame] | 505 | if (nmi_watchdog == NMI_LOCAL_APIC) { |
Don Zickus | 407984f | 2006-09-26 10:52:27 +0200 | [diff] [blame] | 506 | if (nmi_watchdog_enabled) |
| 507 | enable_lapic_nmi_watchdog(); |
| 508 | else |
| 509 | disable_lapic_nmi_watchdog(); |
Don Zickus | 407984f | 2006-09-26 10:52:27 +0200 | [diff] [blame] | 510 | } else { |
Cyrill Gorcunov | 1798bc2 | 2008-05-24 19:36:41 +0400 | [diff] [blame] | 511 | printk(KERN_WARNING |
Don Zickus | 407984f | 2006-09-26 10:52:27 +0200 | [diff] [blame] | 512 | "NMI watchdog doesn't know what hardware to touch\n"); |
| 513 | return -EIO; |
| 514 | } |
| 515 | return 0; |
| 516 | } |
| 517 | |
Cyrill Gorcunov | 1798bc2 | 2008-05-24 19:36:41 +0400 | [diff] [blame] | 518 | #endif /* CONFIG_SYSCTL */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 519 | |
Li Zefan | a062bae | 2008-02-03 15:40:30 +0800 | [diff] [blame] | 520 | int do_nmi_callback(struct pt_regs *regs, int cpu) |
| 521 | { |
| 522 | #ifdef CONFIG_SYSCTL |
| 523 | if (unknown_nmi_panic) |
| 524 | return unknown_nmi_panic_callback(regs, cpu); |
| 525 | #endif |
| 526 | return 0; |
| 527 | } |
| 528 | |
Andrew Morton | bb81a09 | 2006-12-07 02:14:01 +0100 | [diff] [blame] | 529 | void __trigger_all_cpu_backtrace(void) |
| 530 | { |
| 531 | int i; |
| 532 | |
| 533 | backtrace_mask = cpu_online_map; |
| 534 | /* Wait for up to 10 seconds for all CPUs to do the backtrace */ |
| 535 | for (i = 0; i < 10 * 1000; i++) { |
| 536 | if (cpus_empty(backtrace_mask)) |
| 537 | break; |
| 538 | mdelay(1); |
| 539 | } |
| 540 | } |