Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | * NMI watchdog support on APIC systems |
| 3 | * |
| 4 | * Started by Ingo Molnar <mingo@redhat.com> |
| 5 | * |
| 6 | * Fixes: |
| 7 | * Mikael Pettersson : AMD K7 support for local APIC NMI watchdog. |
| 8 | * Mikael Pettersson : Power Management for local APIC NMI watchdog. |
| 9 | * Mikael Pettersson : Pentium 4 support for local APIC NMI watchdog. |
| 10 | * Pavel Machek and |
| 11 | * Mikael Pettersson : PM converted to driver model. Disable/enable API. |
| 12 | */ |
| 13 | |
Cyrill Gorcunov | 1798bc2 | 2008-05-24 19:36:41 +0400 | [diff] [blame] | 14 | #include <linux/nmi.h> |
| 15 | #include <linux/mm.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | #include <linux/delay.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | #include <linux/interrupt.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | #include <linux/module.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 | #include <linux/sysdev.h> |
| 20 | #include <linux/sysctl.h> |
Don Zickus | 3e4ff11 | 2006-06-26 13:57:01 +0200 | [diff] [blame] | 21 | #include <linux/percpu.h> |
Fernando Luis Vázquez Cao | 0603975 | 2006-09-26 10:52:36 +0200 | [diff] [blame] | 22 | #include <linux/kprobes.h> |
Andrew Morton | bb81a09 | 2006-12-07 02:14:01 +0100 | [diff] [blame] | 23 | #include <linux/cpumask.h> |
Thomas Gleixner | f8b5035 | 2007-02-16 01:28:09 -0800 | [diff] [blame] | 24 | #include <linux/kernel_stat.h> |
Christoph Hellwig | 1eeb66a | 2007-05-08 00:27:03 -0700 | [diff] [blame] | 25 | #include <linux/kdebug.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | |
| 27 | #include <asm/smp.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 | #include <asm/nmi.h> |
Cyrill Gorcunov | 1798bc2 | 2008-05-24 19:36:41 +0400 | [diff] [blame] | 29 | #include <asm/proto.h> |
Ingo Molnar | 6e90894 | 2008-03-21 14:32:36 +0100 | [diff] [blame] | 30 | #include <asm/timer.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 | |
Cyrill Gorcunov | 1798bc2 | 2008-05-24 19:36:41 +0400 | [diff] [blame] | 32 | #include <asm/mce.h> |
| 33 | |
| 34 | #include <mach_traps.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 | |
Andi Kleen | 29cbc78 | 2006-09-30 01:47:55 +0200 | [diff] [blame] | 36 | int unknown_nmi_panic; |
| 37 | int nmi_watchdog_enabled; |
| 38 | |
Andi Kleen | 1714f9b | 2007-04-16 10:30:27 +0200 | [diff] [blame] | 39 | static cpumask_t backtrace_mask = CPU_MASK_NONE; |
Andi Kleen | 09198e6 | 2007-05-02 19:27:20 +0200 | [diff] [blame] | 40 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | /* nmi_active: |
Don Zickus | b7471c6 | 2006-09-26 10:52:26 +0200 | [diff] [blame] | 42 | * >0: the lapic NMI watchdog is active, but can be disabled |
| 43 | * <0: the lapic NMI watchdog has not been set up, and cannot |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 | * be enabled |
Don Zickus | b7471c6 | 2006-09-26 10:52:26 +0200 | [diff] [blame] | 45 | * 0: the lapic NMI watchdog is disabled, but can be enabled |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 46 | */ |
Don Zickus | b7471c6 | 2006-09-26 10:52:26 +0200 | [diff] [blame] | 47 | atomic_t nmi_active = ATOMIC_INIT(0); /* oprofile uses this */ |
Cyrill Gorcunov | d1b946b | 2008-05-24 19:36:34 +0400 | [diff] [blame] | 48 | static int panic_on_timeout; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 49 | |
Don Zickus | b7471c6 | 2006-09-26 10:52:26 +0200 | [diff] [blame] | 50 | unsigned int nmi_watchdog = NMI_DEFAULT; |
Cyrill Gorcunov | 1798bc2 | 2008-05-24 19:36:41 +0400 | [diff] [blame] | 51 | |
Don Zickus | b7471c6 | 2006-09-26 10:52:26 +0200 | [diff] [blame] | 52 | static unsigned int nmi_hz = HZ; |
Andi Kleen | 09198e6 | 2007-05-02 19:27:20 +0200 | [diff] [blame] | 53 | static DEFINE_PER_CPU(short, wd_enabled); |
Ravikiran G Thirumalai | 92715e2 | 2006-12-09 21:33:35 +0100 | [diff] [blame] | 54 | static int endflag __initdata = 0; |
| 55 | |
Cyrill Gorcunov | fd5cea0 | 2008-05-24 19:36:40 +0400 | [diff] [blame] | 56 | static inline unsigned int get_nmi_count(int cpu) |
| 57 | { |
Cyrill Gorcunov | 1798bc2 | 2008-05-24 19:36:41 +0400 | [diff] [blame] | 58 | #ifdef CONFIG_X86_64 |
| 59 | return cpu_pda(cpu)->__nmi_count; |
| 60 | #else |
Cyrill Gorcunov | fd5cea0 | 2008-05-24 19:36:40 +0400 | [diff] [blame] | 61 | return nmi_count(cpu); |
Cyrill Gorcunov | 1798bc2 | 2008-05-24 19:36:41 +0400 | [diff] [blame] | 62 | #endif |
Cyrill Gorcunov | fd5cea0 | 2008-05-24 19:36:40 +0400 | [diff] [blame] | 63 | } |
| 64 | |
| 65 | static inline int mce_in_progress(void) |
| 66 | { |
Cyrill Gorcunov | 1798bc2 | 2008-05-24 19:36:41 +0400 | [diff] [blame] | 67 | #if defined(CONFIX_X86_64) && defined(CONFIG_X86_MCE) |
| 68 | return atomic_read(&mce_entry) > 0; |
| 69 | #endif |
Cyrill Gorcunov | fd5cea0 | 2008-05-24 19:36:40 +0400 | [diff] [blame] | 70 | return 0; |
| 71 | } |
| 72 | |
| 73 | /* |
| 74 | * Take the local apic timer and PIT/HPET into account. We don't |
| 75 | * know which one is active, when we have highres/dyntick on |
| 76 | */ |
| 77 | static inline unsigned int get_timer_irqs(int cpu) |
| 78 | { |
Cyrill Gorcunov | 1798bc2 | 2008-05-24 19:36:41 +0400 | [diff] [blame] | 79 | #ifdef CONFIG_X86_64 |
| 80 | return read_pda(apic_timer_irqs) + read_pda(irq0_irqs); |
| 81 | #else |
Cyrill Gorcunov | fd5cea0 | 2008-05-24 19:36:40 +0400 | [diff] [blame] | 82 | return per_cpu(irq_stat, cpu).apic_timer_irqs + |
| 83 | per_cpu(irq_stat, cpu).irq0_irqs; |
Cyrill Gorcunov | 1798bc2 | 2008-05-24 19:36:41 +0400 | [diff] [blame] | 84 | #endif |
Cyrill Gorcunov | fd5cea0 | 2008-05-24 19:36:40 +0400 | [diff] [blame] | 85 | } |
| 86 | |
Cyrill Gorcunov | 19ec673 | 2008-05-28 23:00:47 +0400 | [diff] [blame^] | 87 | #ifdef CONFIG_X86_64 |
Cyrill Gorcunov | 4b82b27 | 2008-05-24 19:36:35 +0400 | [diff] [blame] | 88 | /* Run after command line and cpu_init init, but before all other checks */ |
| 89 | void nmi_watchdog_default(void) |
| 90 | { |
| 91 | if (nmi_watchdog != NMI_DEFAULT) |
| 92 | return; |
Cyrill Gorcunov | 1798bc2 | 2008-05-24 19:36:41 +0400 | [diff] [blame] | 93 | nmi_watchdog = NMI_NONE; |
Cyrill Gorcunov | 4b82b27 | 2008-05-24 19:36:35 +0400 | [diff] [blame] | 94 | } |
Cyrill Gorcunov | 19ec673 | 2008-05-28 23:00:47 +0400 | [diff] [blame^] | 95 | #endif |
Cyrill Gorcunov | 4b82b27 | 2008-05-24 19:36:35 +0400 | [diff] [blame] | 96 | |
Hiroshi Shimamoto | 416b721 | 2008-01-30 13:30:33 +0100 | [diff] [blame] | 97 | #ifdef CONFIG_SMP |
Cyrill Gorcunov | 1798bc2 | 2008-05-24 19:36:41 +0400 | [diff] [blame] | 98 | /* |
| 99 | * The performance counters used by NMI_LOCAL_APIC don't trigger when |
Eric W. Biederman | 29b7008 | 2005-10-30 14:59:40 -0800 | [diff] [blame] | 100 | * the CPU is idle. To make sure the NMI watchdog really ticks on all |
| 101 | * CPUs during the test make them busy. |
| 102 | */ |
| 103 | static __init void nmi_cpu_busy(void *data) |
| 104 | { |
Ingo Molnar | 366c7f5 | 2006-07-03 00:25:25 -0700 | [diff] [blame] | 105 | local_irq_enable_in_hardirq(); |
Cyrill Gorcunov | 1798bc2 | 2008-05-24 19:36:41 +0400 | [diff] [blame] | 106 | /* |
| 107 | * Intentionally don't use cpu_relax here. This is |
| 108 | * to make sure that the performance counter really ticks, |
| 109 | * even if there is a simulator or similar that catches the |
| 110 | * pause instruction. On a real HT machine this is fine because |
| 111 | * all other CPUs are busy with "useless" delay loops and don't |
| 112 | * care if they get somewhat less cycles. |
| 113 | */ |
Ravikiran G Thirumalai | 92715e2 | 2006-12-09 21:33:35 +0100 | [diff] [blame] | 114 | while (endflag == 0) |
| 115 | mb(); |
Ingo Molnar | 0492007 | 2007-11-09 22:39:38 +0100 | [diff] [blame] | 116 | } |
Hiroshi Shimamoto | 416b721 | 2008-01-30 13:30:33 +0100 | [diff] [blame] | 117 | #endif |
Eric W. Biederman | 29b7008 | 2005-10-30 14:59:40 -0800 | [diff] [blame] | 118 | |
Glauber de Oliveira Costa | 6d60cd5 | 2008-03-19 14:25:36 -0300 | [diff] [blame] | 119 | int __init check_nmi_watchdog(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 120 | { |
Eric W. Biederman | 29b7008 | 2005-10-30 14:59:40 -0800 | [diff] [blame] | 121 | unsigned int *prev_nmi_count; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 122 | int cpu; |
| 123 | |
Cyrill Gorcunov | 1798bc2 | 2008-05-24 19:36:41 +0400 | [diff] [blame] | 124 | if (nmi_watchdog == NMI_NONE || nmi_watchdog == NMI_DISABLED) |
Don Zickus | b7471c6 | 2006-09-26 10:52:26 +0200 | [diff] [blame] | 125 | return 0; |
| 126 | |
| 127 | if (!atomic_read(&nmi_active)) |
Jack F Vogel | 67701ae | 2005-05-01 08:58:48 -0700 | [diff] [blame] | 128 | return 0; |
| 129 | |
Eric W. Biederman | 29b7008 | 2005-10-30 14:59:40 -0800 | [diff] [blame] | 130 | prev_nmi_count = kmalloc(NR_CPUS * sizeof(int), GFP_KERNEL); |
| 131 | if (!prev_nmi_count) |
Ingo Molnar | 6e90894 | 2008-03-21 14:32:36 +0100 | [diff] [blame] | 132 | goto error; |
Eric W. Biederman | 29b7008 | 2005-10-30 14:59:40 -0800 | [diff] [blame] | 133 | |
Jack F Vogel | 67701ae | 2005-05-01 08:58:48 -0700 | [diff] [blame] | 134 | printk(KERN_INFO "Testing NMI watchdog ... "); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 135 | |
Hiroshi Shimamoto | 416b721 | 2008-01-30 13:30:33 +0100 | [diff] [blame] | 136 | #ifdef CONFIG_SMP |
Eric W. Biederman | 29b7008 | 2005-10-30 14:59:40 -0800 | [diff] [blame] | 137 | if (nmi_watchdog == NMI_LOCAL_APIC) |
| 138 | smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0); |
Hiroshi Shimamoto | 416b721 | 2008-01-30 13:30:33 +0100 | [diff] [blame] | 139 | #endif |
Eric W. Biederman | 29b7008 | 2005-10-30 14:59:40 -0800 | [diff] [blame] | 140 | |
KAMEZAWA Hiroyuki | c891259 | 2006-03-28 01:56:39 -0800 | [diff] [blame] | 141 | for_each_possible_cpu(cpu) |
Cyrill Gorcunov | fd5cea0 | 2008-05-24 19:36:40 +0400 | [diff] [blame] | 142 | prev_nmi_count[cpu] = get_nmi_count(cpu); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 143 | local_irq_enable(); |
Cyrill Gorcunov | 1798bc2 | 2008-05-24 19:36:41 +0400 | [diff] [blame] | 144 | mdelay((20 * 1000) / nmi_hz); /* wait 20 ticks */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 145 | |
Cyrill Gorcunov | 7c2ba83 | 2008-05-24 19:36:39 +0400 | [diff] [blame] | 146 | for_each_online_cpu(cpu) { |
Andi Kleen | 09198e6 | 2007-05-02 19:27:20 +0200 | [diff] [blame] | 147 | if (!per_cpu(wd_enabled, cpu)) |
Don Zickus | b7471c6 | 2006-09-26 10:52:26 +0200 | [diff] [blame] | 148 | continue; |
Cyrill Gorcunov | fd5cea0 | 2008-05-24 19:36:40 +0400 | [diff] [blame] | 149 | if (get_nmi_count(cpu) - prev_nmi_count[cpu] <= 5) { |
Don Zickus | 75bc122 | 2007-12-04 17:19:07 +0100 | [diff] [blame] | 150 | printk(KERN_WARNING "WARNING: CPU#%d: NMI " |
| 151 | "appears to be stuck (%d->%d)!\n", |
Eric W. Biederman | 29b7008 | 2005-10-30 14:59:40 -0800 | [diff] [blame] | 152 | cpu, |
| 153 | prev_nmi_count[cpu], |
Cyrill Gorcunov | fd5cea0 | 2008-05-24 19:36:40 +0400 | [diff] [blame] | 154 | get_nmi_count(cpu)); |
Andi Kleen | 09198e6 | 2007-05-02 19:27:20 +0200 | [diff] [blame] | 155 | per_cpu(wd_enabled, cpu) = 0; |
Don Zickus | b7471c6 | 2006-09-26 10:52:26 +0200 | [diff] [blame] | 156 | atomic_dec(&nmi_active); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 157 | } |
| 158 | } |
Daniel Walker | d958654 | 2007-09-06 16:59:54 +0200 | [diff] [blame] | 159 | endflag = 1; |
Don Zickus | b7471c6 | 2006-09-26 10:52:26 +0200 | [diff] [blame] | 160 | if (!atomic_read(&nmi_active)) { |
| 161 | kfree(prev_nmi_count); |
| 162 | atomic_set(&nmi_active, -1); |
Ingo Molnar | 6e90894 | 2008-03-21 14:32:36 +0100 | [diff] [blame] | 163 | goto error; |
Don Zickus | b7471c6 | 2006-09-26 10:52:26 +0200 | [diff] [blame] | 164 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 165 | printk("OK.\n"); |
| 166 | |
Cyrill Gorcunov | 1798bc2 | 2008-05-24 19:36:41 +0400 | [diff] [blame] | 167 | /* |
| 168 | * now that we know it works we can reduce NMI frequency to |
| 169 | * something more reasonable; makes a difference in some configs |
| 170 | */ |
Andi Kleen | 09198e6 | 2007-05-02 19:27:20 +0200 | [diff] [blame] | 171 | if (nmi_watchdog == NMI_LOCAL_APIC) |
| 172 | nmi_hz = lapic_adjust_nmi_hz(1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 173 | |
Eric W. Biederman | 29b7008 | 2005-10-30 14:59:40 -0800 | [diff] [blame] | 174 | kfree(prev_nmi_count); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 175 | return 0; |
Ingo Molnar | 6e90894 | 2008-03-21 14:32:36 +0100 | [diff] [blame] | 176 | |
Cyrill Gorcunov | 1798bc2 | 2008-05-24 19:36:41 +0400 | [diff] [blame] | 177 | error: |
| 178 | #ifdef CONFIG_X86_32 |
| 179 | timer_ack = !cpu_has_tsc; |
| 180 | #endif |
Ingo Molnar | 6e90894 | 2008-03-21 14:32:36 +0100 | [diff] [blame] | 181 | return -1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 182 | } |
| 183 | |
| 184 | static int __init setup_nmi_watchdog(char *str) |
| 185 | { |
| 186 | int nmi; |
| 187 | |
Cyrill Gorcunov | d1b946b | 2008-05-24 19:36:34 +0400 | [diff] [blame] | 188 | if (!strncmp(str, "panic", 5)) { |
| 189 | panic_on_timeout = 1; |
| 190 | str = strchr(str, ','); |
| 191 | if (!str) |
| 192 | return 1; |
| 193 | ++str; |
| 194 | } |
| 195 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 196 | get_option(&str, &nmi); |
| 197 | |
Cyrill Gorcunov | 1798bc2 | 2008-05-24 19:36:41 +0400 | [diff] [blame] | 198 | if (nmi >= NMI_INVALID || nmi < NMI_NONE) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 199 | return 0; |
Venkatesh Pallipadi | 58d9ce7d | 2007-01-22 20:40:34 -0800 | [diff] [blame] | 200 | |
Don Zickus | b7471c6 | 2006-09-26 10:52:26 +0200 | [diff] [blame] | 201 | nmi_watchdog = nmi; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 202 | return 1; |
| 203 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 204 | __setup("nmi_watchdog=", setup_nmi_watchdog); |
| 205 | |
Cyrill Gorcunov | 1798bc2 | 2008-05-24 19:36:41 +0400 | [diff] [blame] | 206 | /* |
| 207 | * Suspend/resume support |
| 208 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 209 | #ifdef CONFIG_PM |
| 210 | |
| 211 | static int nmi_pm_active; /* nmi_active before suspend */ |
| 212 | |
Pavel Machek | 438510f | 2005-04-16 15:25:24 -0700 | [diff] [blame] | 213 | static int lapic_nmi_suspend(struct sys_device *dev, pm_message_t state) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 214 | { |
Shaohua Li | 4038f90 | 2006-09-26 10:52:27 +0200 | [diff] [blame] | 215 | /* only CPU0 goes here, other CPUs should be offline */ |
Don Zickus | b7471c6 | 2006-09-26 10:52:26 +0200 | [diff] [blame] | 216 | nmi_pm_active = atomic_read(&nmi_active); |
Shaohua Li | 4038f90 | 2006-09-26 10:52:27 +0200 | [diff] [blame] | 217 | stop_apic_nmi_watchdog(NULL); |
| 218 | BUG_ON(atomic_read(&nmi_active) != 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 219 | return 0; |
| 220 | } |
| 221 | |
| 222 | static int lapic_nmi_resume(struct sys_device *dev) |
| 223 | { |
Shaohua Li | 4038f90 | 2006-09-26 10:52:27 +0200 | [diff] [blame] | 224 | /* only CPU0 goes here, other CPUs should be offline */ |
| 225 | if (nmi_pm_active > 0) { |
| 226 | setup_apic_nmi_watchdog(NULL); |
| 227 | touch_nmi_watchdog(); |
| 228 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 229 | return 0; |
| 230 | } |
| 231 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 232 | static struct sysdev_class nmi_sysclass = { |
Kay Sievers | af5ca3f | 2007-12-20 02:09:39 +0100 | [diff] [blame] | 233 | .name = "lapic_nmi", |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 234 | .resume = lapic_nmi_resume, |
| 235 | .suspend = lapic_nmi_suspend, |
| 236 | }; |
| 237 | |
| 238 | static struct sys_device device_lapic_nmi = { |
| 239 | .id = 0, |
| 240 | .cls = &nmi_sysclass, |
| 241 | }; |
| 242 | |
| 243 | static int __init init_lapic_nmi_sysfs(void) |
| 244 | { |
| 245 | int error; |
| 246 | |
Cyrill Gorcunov | 1798bc2 | 2008-05-24 19:36:41 +0400 | [diff] [blame] | 247 | /* |
| 248 | * should really be a BUG_ON but b/c this is an |
Don Zickus | b7471c6 | 2006-09-26 10:52:26 +0200 | [diff] [blame] | 249 | * init call, it just doesn't work. -dcz |
| 250 | */ |
| 251 | if (nmi_watchdog != NMI_LOCAL_APIC) |
| 252 | return 0; |
| 253 | |
Andi Kleen | 09198e6 | 2007-05-02 19:27:20 +0200 | [diff] [blame] | 254 | if (atomic_read(&nmi_active) < 0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 255 | return 0; |
| 256 | |
| 257 | error = sysdev_class_register(&nmi_sysclass); |
| 258 | if (!error) |
| 259 | error = sysdev_register(&device_lapic_nmi); |
| 260 | return error; |
| 261 | } |
Cyrill Gorcunov | 1798bc2 | 2008-05-24 19:36:41 +0400 | [diff] [blame] | 262 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 263 | /* must come after the local APIC's device_initcall() */ |
| 264 | late_initcall(init_lapic_nmi_sysfs); |
| 265 | |
| 266 | #endif /* CONFIG_PM */ |
| 267 | |
Andi Kleen | 09198e6 | 2007-05-02 19:27:20 +0200 | [diff] [blame] | 268 | static void __acpi_nmi_enable(void *__unused) |
| 269 | { |
| 270 | apic_write_around(APIC_LVT0, APIC_DM_NMI); |
| 271 | } |
| 272 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 273 | /* |
Andi Kleen | 09198e6 | 2007-05-02 19:27:20 +0200 | [diff] [blame] | 274 | * Enable timer based NMIs on all CPUs: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 275 | */ |
Andi Kleen | 09198e6 | 2007-05-02 19:27:20 +0200 | [diff] [blame] | 276 | void acpi_nmi_enable(void) |
Jan Beulich | 7fbb4f6 | 2005-06-23 00:08:23 -0700 | [diff] [blame] | 277 | { |
Andi Kleen | 09198e6 | 2007-05-02 19:27:20 +0200 | [diff] [blame] | 278 | if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC) |
| 279 | on_each_cpu(__acpi_nmi_enable, NULL, 0, 1); |
Jan Beulich | 7fbb4f6 | 2005-06-23 00:08:23 -0700 | [diff] [blame] | 280 | } |
| 281 | |
Andi Kleen | 09198e6 | 2007-05-02 19:27:20 +0200 | [diff] [blame] | 282 | static void __acpi_nmi_disable(void *__unused) |
Venkatesh Pallipadi | 90ce4bc | 2007-02-13 13:26:22 +0100 | [diff] [blame] | 283 | { |
Cyrill Gorcunov | ad63ba1 | 2008-05-24 19:36:36 +0400 | [diff] [blame] | 284 | apic_write_around(APIC_LVT0, APIC_DM_NMI | APIC_LVT_MASKED); |
Venkatesh Pallipadi | 90ce4bc | 2007-02-13 13:26:22 +0100 | [diff] [blame] | 285 | } |
| 286 | |
Andi Kleen | 09198e6 | 2007-05-02 19:27:20 +0200 | [diff] [blame] | 287 | /* |
| 288 | * Disable timer based NMIs on all CPUs: |
| 289 | */ |
| 290 | void acpi_nmi_disable(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 291 | { |
Andi Kleen | 09198e6 | 2007-05-02 19:27:20 +0200 | [diff] [blame] | 292 | if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC) |
| 293 | on_each_cpu(__acpi_nmi_disable, NULL, 0, 1); |
Venkatesh Pallipadi | 248dcb2 | 2006-09-26 10:52:27 +0200 | [diff] [blame] | 294 | } |
| 295 | |
Hiroshi Shimamoto | 416b721 | 2008-01-30 13:30:33 +0100 | [diff] [blame] | 296 | void setup_apic_nmi_watchdog(void *unused) |
Don Zickus | b7471c6 | 2006-09-26 10:52:26 +0200 | [diff] [blame] | 297 | { |
Andi Kleen | 09198e6 | 2007-05-02 19:27:20 +0200 | [diff] [blame] | 298 | if (__get_cpu_var(wd_enabled)) |
Hiroshi Shimamoto | 416b721 | 2008-01-30 13:30:33 +0100 | [diff] [blame] | 299 | return; |
Shaohua Li | 4038f90 | 2006-09-26 10:52:27 +0200 | [diff] [blame] | 300 | |
| 301 | /* cheap hack to support suspend/resume */ |
| 302 | /* if cpu0 is not active neither should the other cpus */ |
Cyrill Gorcunov | 1798bc2 | 2008-05-24 19:36:41 +0400 | [diff] [blame] | 303 | if (smp_processor_id() != 0 && atomic_read(&nmi_active) <= 0) |
Shaohua Li | 4038f90 | 2006-09-26 10:52:27 +0200 | [diff] [blame] | 304 | return; |
| 305 | |
Andi Kleen | 09198e6 | 2007-05-02 19:27:20 +0200 | [diff] [blame] | 306 | switch (nmi_watchdog) { |
| 307 | case NMI_LOCAL_APIC: |
Cyrill Gorcunov | 1798bc2 | 2008-05-24 19:36:41 +0400 | [diff] [blame] | 308 | /* enable it before to avoid race with handler */ |
| 309 | __get_cpu_var(wd_enabled) = 1; |
Andi Kleen | 09198e6 | 2007-05-02 19:27:20 +0200 | [diff] [blame] | 310 | if (lapic_watchdog_init(nmi_hz) < 0) { |
| 311 | __get_cpu_var(wd_enabled) = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 312 | return; |
| 313 | } |
Andi Kleen | 09198e6 | 2007-05-02 19:27:20 +0200 | [diff] [blame] | 314 | /* FALL THROUGH */ |
| 315 | case NMI_IO_APIC: |
| 316 | __get_cpu_var(wd_enabled) = 1; |
| 317 | atomic_inc(&nmi_active); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 318 | } |
Don Zickus | b7471c6 | 2006-09-26 10:52:26 +0200 | [diff] [blame] | 319 | } |
| 320 | |
Shaohua Li | 4038f90 | 2006-09-26 10:52:27 +0200 | [diff] [blame] | 321 | void stop_apic_nmi_watchdog(void *unused) |
Don Zickus | b7471c6 | 2006-09-26 10:52:26 +0200 | [diff] [blame] | 322 | { |
| 323 | /* only support LOCAL and IO APICs for now */ |
Cyrill Gorcunov | 1798bc2 | 2008-05-24 19:36:41 +0400 | [diff] [blame] | 324 | if (nmi_watchdog != NMI_LOCAL_APIC && |
| 325 | nmi_watchdog != NMI_IO_APIC) |
| 326 | return; |
Andi Kleen | 09198e6 | 2007-05-02 19:27:20 +0200 | [diff] [blame] | 327 | if (__get_cpu_var(wd_enabled) == 0) |
Shaohua Li | 4038f90 | 2006-09-26 10:52:27 +0200 | [diff] [blame] | 328 | return; |
Andi Kleen | 09198e6 | 2007-05-02 19:27:20 +0200 | [diff] [blame] | 329 | if (nmi_watchdog == NMI_LOCAL_APIC) |
| 330 | lapic_watchdog_stop(); |
| 331 | __get_cpu_var(wd_enabled) = 0; |
Don Zickus | b7471c6 | 2006-09-26 10:52:26 +0200 | [diff] [blame] | 332 | atomic_dec(&nmi_active); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 333 | } |
| 334 | |
| 335 | /* |
| 336 | * the best way to detect whether a CPU has a 'hard lockup' problem |
| 337 | * is to check it's local APIC timer IRQ counts. If they are not |
| 338 | * changing then that CPU has some problem. |
| 339 | * |
| 340 | * as these watchdog NMI IRQs are generated on every CPU, we only |
| 341 | * have to check the current processor. |
| 342 | * |
| 343 | * since NMIs don't listen to _any_ locks, we have to be extremely |
| 344 | * careful not to rely on unsafe variables. The printk might lock |
| 345 | * up though, so we have to break up any console locks first ... |
Cyrill Gorcunov | 1798bc2 | 2008-05-24 19:36:41 +0400 | [diff] [blame] | 346 | * [when there will be more tty-related locks, break them up here too!] |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 347 | */ |
| 348 | |
Hiroshi Shimamoto | f784946 | 2008-05-02 16:45:08 -0700 | [diff] [blame] | 349 | static DEFINE_PER_CPU(unsigned, last_irq_sum); |
| 350 | static DEFINE_PER_CPU(local_t, alert_counter); |
| 351 | static DEFINE_PER_CPU(int, nmi_touch); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 352 | |
Andrew Morton | f289025 | 2007-07-17 04:03:57 -0700 | [diff] [blame] | 353 | void touch_nmi_watchdog(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 354 | { |
Jan Beulich | c6ea396 | 2006-12-07 02:14:09 +0100 | [diff] [blame] | 355 | if (nmi_watchdog > 0) { |
| 356 | unsigned cpu; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 357 | |
Jan Beulich | c6ea396 | 2006-12-07 02:14:09 +0100 | [diff] [blame] | 358 | /* |
Hiroshi Shimamoto | f784946 | 2008-05-02 16:45:08 -0700 | [diff] [blame] | 359 | * Tell other CPUs to reset their alert counters. We cannot |
| 360 | * do it ourselves because the alert count increase is not |
| 361 | * atomic. |
Jan Beulich | c6ea396 | 2006-12-07 02:14:09 +0100 | [diff] [blame] | 362 | */ |
Andrew Morton | f289025 | 2007-07-17 04:03:57 -0700 | [diff] [blame] | 363 | for_each_present_cpu(cpu) { |
Hiroshi Shimamoto | f784946 | 2008-05-02 16:45:08 -0700 | [diff] [blame] | 364 | if (per_cpu(nmi_touch, cpu) != 1) |
| 365 | per_cpu(nmi_touch, cpu) = 1; |
Andrew Morton | f289025 | 2007-07-17 04:03:57 -0700 | [diff] [blame] | 366 | } |
Jan Beulich | c6ea396 | 2006-12-07 02:14:09 +0100 | [diff] [blame] | 367 | } |
Ingo Molnar | 8446f1d | 2005-09-06 15:16:27 -0700 | [diff] [blame] | 368 | |
| 369 | /* |
| 370 | * Tickle the softlockup detector too: |
| 371 | */ |
| 372 | touch_softlockup_watchdog(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 373 | } |
Michal Schmidt | 1e86240 | 2006-07-30 03:03:29 -0700 | [diff] [blame] | 374 | EXPORT_SYMBOL(touch_nmi_watchdog); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 375 | |
Steven Rostedt | 5deb45e | 2008-04-19 19:19:55 +0200 | [diff] [blame] | 376 | notrace __kprobes int |
| 377 | nmi_watchdog_tick(struct pt_regs *regs, unsigned reason) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 378 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 379 | /* |
| 380 | * Since current_thread_info()-> is always on the stack, and we |
| 381 | * always switch the stack NMI-atomically, it's safe to use |
| 382 | * smp_processor_id(). |
| 383 | */ |
Jesper Juhl | b791cce | 2006-03-28 01:56:52 -0800 | [diff] [blame] | 384 | unsigned int sum; |
Don Zickus | b7471c6 | 2006-09-26 10:52:26 +0200 | [diff] [blame] | 385 | int touched = 0; |
Jesper Juhl | b791cce | 2006-03-28 01:56:52 -0800 | [diff] [blame] | 386 | int cpu = smp_processor_id(); |
Hiroshi Shimamoto | 416b721 | 2008-01-30 13:30:33 +0100 | [diff] [blame] | 387 | int rc = 0; |
Don Zickus | b7471c6 | 2006-09-26 10:52:26 +0200 | [diff] [blame] | 388 | |
| 389 | /* check for other users first */ |
| 390 | if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) |
| 391 | == NOTIFY_STOP) { |
Don Zickus | 3adbbcce | 2006-09-26 10:52:26 +0200 | [diff] [blame] | 392 | rc = 1; |
Don Zickus | b7471c6 | 2006-09-26 10:52:26 +0200 | [diff] [blame] | 393 | touched = 1; |
| 394 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 395 | |
Cyrill Gorcunov | fd5cea0 | 2008-05-24 19:36:40 +0400 | [diff] [blame] | 396 | sum = get_timer_irqs(cpu); |
| 397 | |
| 398 | if (__get_cpu_var(nmi_touch)) { |
| 399 | __get_cpu_var(nmi_touch) = 0; |
| 400 | touched = 1; |
| 401 | } |
| 402 | |
Andrew Morton | bb81a09 | 2006-12-07 02:14:01 +0100 | [diff] [blame] | 403 | if (cpu_isset(cpu, backtrace_mask)) { |
| 404 | static DEFINE_SPINLOCK(lock); /* Serialise the printks */ |
| 405 | |
| 406 | spin_lock(&lock); |
| 407 | printk("NMI backtrace for cpu %d\n", cpu); |
| 408 | dump_stack(); |
| 409 | spin_unlock(&lock); |
| 410 | cpu_clear(cpu, backtrace_mask); |
| 411 | } |
| 412 | |
Cyrill Gorcunov | fd5cea0 | 2008-05-24 19:36:40 +0400 | [diff] [blame] | 413 | /* Could check oops_in_progress here too, but it's safer not to */ |
| 414 | if (mce_in_progress()) |
Hiroshi Shimamoto | f784946 | 2008-05-02 16:45:08 -0700 | [diff] [blame] | 415 | touched = 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 416 | |
Thomas Gleixner | f8b5035 | 2007-02-16 01:28:09 -0800 | [diff] [blame] | 417 | /* if the none of the timers isn't firing, this cpu isn't doing much */ |
Hiroshi Shimamoto | f784946 | 2008-05-02 16:45:08 -0700 | [diff] [blame] | 418 | if (!touched && __get_cpu_var(last_irq_sum) == sum) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 419 | /* |
| 420 | * Ayiee, looks like this CPU is stuck ... |
| 421 | * wait a few IRQs (5 seconds) before doing the oops ... |
| 422 | */ |
Hiroshi Shimamoto | f784946 | 2008-05-02 16:45:08 -0700 | [diff] [blame] | 423 | local_inc(&__get_cpu_var(alert_counter)); |
Cyrill Gorcunov | 1798bc2 | 2008-05-24 19:36:41 +0400 | [diff] [blame] | 424 | if (local_read(&__get_cpu_var(alert_counter)) == 5 * nmi_hz) |
George Anzinger | 748f2ed | 2005-09-03 15:56:48 -0700 | [diff] [blame] | 425 | /* |
| 426 | * die_nmi will return ONLY if NOTIFY_STOP happens.. |
| 427 | */ |
Cyrill Gorcunov | ddca03c | 2008-05-24 19:36:31 +0400 | [diff] [blame] | 428 | die_nmi("BUG: NMI Watchdog detected LOCKUP", |
Cyrill Gorcunov | d1b946b | 2008-05-24 19:36:34 +0400 | [diff] [blame] | 429 | regs, panic_on_timeout); |
GOTO Masanori | b884e25 | 2006-03-07 21:55:29 -0800 | [diff] [blame] | 430 | } else { |
Hiroshi Shimamoto | f784946 | 2008-05-02 16:45:08 -0700 | [diff] [blame] | 431 | __get_cpu_var(last_irq_sum) = sum; |
| 432 | local_set(&__get_cpu_var(alert_counter), 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 433 | } |
Cyrill Gorcunov | 1798bc2 | 2008-05-24 19:36:41 +0400 | [diff] [blame] | 434 | |
Don Zickus | b7471c6 | 2006-09-26 10:52:26 +0200 | [diff] [blame] | 435 | /* see if the nmi watchdog went off */ |
Andi Kleen | 09198e6 | 2007-05-02 19:27:20 +0200 | [diff] [blame] | 436 | if (!__get_cpu_var(wd_enabled)) |
| 437 | return rc; |
| 438 | switch (nmi_watchdog) { |
| 439 | case NMI_LOCAL_APIC: |
| 440 | rc |= lapic_wd_event(nmi_hz); |
| 441 | break; |
| 442 | case NMI_IO_APIC: |
Cyrill Gorcunov | 1798bc2 | 2008-05-24 19:36:41 +0400 | [diff] [blame] | 443 | /* |
| 444 | * don't know how to accurately check for this. |
Andi Kleen | 09198e6 | 2007-05-02 19:27:20 +0200 | [diff] [blame] | 445 | * just assume it was a watchdog timer interrupt |
| 446 | * This matches the old behaviour. |
| 447 | */ |
| 448 | rc = 1; |
| 449 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 450 | } |
Don Zickus | 3adbbcce | 2006-09-26 10:52:26 +0200 | [diff] [blame] | 451 | return rc; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 452 | } |
| 453 | |
| 454 | #ifdef CONFIG_SYSCTL |
| 455 | |
| 456 | static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu) |
| 457 | { |
| 458 | unsigned char reason = get_nmi_reason(); |
| 459 | char buf[64]; |
| 460 | |
Don Zickus | 2fbe7b2 | 2006-09-26 10:52:27 +0200 | [diff] [blame] | 461 | sprintf(buf, "NMI received for unknown reason %02x\n", reason); |
Cyrill Gorcunov | 6c8decd | 2008-05-24 19:36:37 +0400 | [diff] [blame] | 462 | die_nmi(buf, regs, 1); /* Always panic here */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 463 | return 0; |
| 464 | } |
| 465 | |
Don Zickus | 407984f | 2006-09-26 10:52:27 +0200 | [diff] [blame] | 466 | /* |
Don Zickus | e33e89a | 2006-09-26 10:52:27 +0200 | [diff] [blame] | 467 | * proc handler for /proc/sys/kernel/nmi |
Don Zickus | 407984f | 2006-09-26 10:52:27 +0200 | [diff] [blame] | 468 | */ |
| 469 | int proc_nmi_enabled(struct ctl_table *table, int write, struct file *file, |
| 470 | void __user *buffer, size_t *length, loff_t *ppos) |
| 471 | { |
| 472 | int old_state; |
| 473 | |
| 474 | nmi_watchdog_enabled = (atomic_read(&nmi_active) > 0) ? 1 : 0; |
| 475 | old_state = nmi_watchdog_enabled; |
| 476 | proc_dointvec(table, write, file, buffer, length, ppos); |
| 477 | if (!!old_state == !!nmi_watchdog_enabled) |
| 478 | return 0; |
| 479 | |
Daniel Gollub | 0328ece | 2007-08-15 02:40:35 +0200 | [diff] [blame] | 480 | if (atomic_read(&nmi_active) < 0 || nmi_watchdog == NMI_DISABLED) { |
Cyrill Gorcunov | 1798bc2 | 2008-05-24 19:36:41 +0400 | [diff] [blame] | 481 | printk(KERN_WARNING |
| 482 | "NMI watchdog is permanently disabled\n"); |
Don Zickus | e33e89a | 2006-09-26 10:52:27 +0200 | [diff] [blame] | 483 | return -EIO; |
Don Zickus | 407984f | 2006-09-26 10:52:27 +0200 | [diff] [blame] | 484 | } |
| 485 | |
Cyrill Gorcunov | 19ec673 | 2008-05-28 23:00:47 +0400 | [diff] [blame^] | 486 | #ifdef CONFIG_X86_64 |
Cyrill Gorcunov | 4b82b27 | 2008-05-24 19:36:35 +0400 | [diff] [blame] | 487 | /* if nmi_watchdog is not set yet, then set it */ |
| 488 | nmi_watchdog_default(); |
Cyrill Gorcunov | 19ec673 | 2008-05-28 23:00:47 +0400 | [diff] [blame^] | 489 | #else |
| 490 | if (lapic_watchdog_ok()) |
| 491 | nmi_watchdog = NMI_LOCAL_APIC; |
| 492 | else |
| 493 | nmi_watchdog = NMI_IO_APIC; |
| 494 | #endif |
Don Zickus | 407984f | 2006-09-26 10:52:27 +0200 | [diff] [blame] | 495 | |
Don Zickus | e33e89a | 2006-09-26 10:52:27 +0200 | [diff] [blame] | 496 | if (nmi_watchdog == NMI_LOCAL_APIC) { |
Don Zickus | 407984f | 2006-09-26 10:52:27 +0200 | [diff] [blame] | 497 | if (nmi_watchdog_enabled) |
| 498 | enable_lapic_nmi_watchdog(); |
| 499 | else |
| 500 | disable_lapic_nmi_watchdog(); |
Don Zickus | 407984f | 2006-09-26 10:52:27 +0200 | [diff] [blame] | 501 | } else { |
Cyrill Gorcunov | 1798bc2 | 2008-05-24 19:36:41 +0400 | [diff] [blame] | 502 | printk(KERN_WARNING |
Don Zickus | 407984f | 2006-09-26 10:52:27 +0200 | [diff] [blame] | 503 | "NMI watchdog doesn't know what hardware to touch\n"); |
| 504 | return -EIO; |
| 505 | } |
| 506 | return 0; |
| 507 | } |
| 508 | |
Cyrill Gorcunov | 1798bc2 | 2008-05-24 19:36:41 +0400 | [diff] [blame] | 509 | #endif /* CONFIG_SYSCTL */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 510 | |
Li Zefan | a062bae | 2008-02-03 15:40:30 +0800 | [diff] [blame] | 511 | int do_nmi_callback(struct pt_regs *regs, int cpu) |
| 512 | { |
| 513 | #ifdef CONFIG_SYSCTL |
| 514 | if (unknown_nmi_panic) |
| 515 | return unknown_nmi_panic_callback(regs, cpu); |
| 516 | #endif |
| 517 | return 0; |
| 518 | } |
| 519 | |
Andrew Morton | bb81a09 | 2006-12-07 02:14:01 +0100 | [diff] [blame] | 520 | void __trigger_all_cpu_backtrace(void) |
| 521 | { |
| 522 | int i; |
| 523 | |
| 524 | backtrace_mask = cpu_online_map; |
| 525 | /* Wait for up to 10 seconds for all CPUs to do the backtrace */ |
| 526 | for (i = 0; i < 10 * 1000; i++) { |
| 527 | if (cpus_empty(backtrace_mask)) |
| 528 | break; |
| 529 | mdelay(1); |
| 530 | } |
| 531 | } |
| 532 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 533 | EXPORT_SYMBOL(nmi_active); |
| 534 | EXPORT_SYMBOL(nmi_watchdog); |
Cyrill Gorcunov | 1798bc2 | 2008-05-24 19:36:41 +0400 | [diff] [blame] | 535 | |