Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * linux/kernel/irq/spurious.c |
| 3 | * |
| 4 | * Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar |
| 5 | * |
| 6 | * This file contains spurious interrupt handling. |
| 7 | */ |
| 8 | |
S.Caglar Onur | 188fd89 | 2008-02-14 17:36:51 +0200 | [diff] [blame] | 9 | #include <linux/jiffies.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | #include <linux/irq.h> |
| 11 | #include <linux/module.h> |
| 12 | #include <linux/kallsyms.h> |
| 13 | #include <linux/interrupt.h> |
Andi Kleen | 9e094c1 | 2008-01-30 13:32:48 +0100 | [diff] [blame] | 14 | #include <linux/moduleparam.h> |
Eric W. Biederman | f84dbb9 | 2008-07-10 14:48:54 -0700 | [diff] [blame] | 15 | #include <linux/timer.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | |
Thomas Gleixner | bd15141 | 2010-10-01 15:17:14 +0200 | [diff] [blame] | 17 | #include "internals.h" |
| 18 | |
Andreas Mohr | 83d4e6e | 2006-06-23 02:05:32 -0700 | [diff] [blame] | 19 | static int irqfixup __read_mostly; |
Alan Cox | 200803d | 2005-06-28 20:45:18 -0700 | [diff] [blame] | 20 | |
Eric W. Biederman | f84dbb9 | 2008-07-10 14:48:54 -0700 | [diff] [blame] | 21 | #define POLL_SPURIOUS_IRQ_INTERVAL (HZ/10) |
| 22 | static void poll_spurious_irqs(unsigned long dummy); |
| 23 | static DEFINE_TIMER(poll_spurious_irq_timer, poll_spurious_irqs, 0, 0); |
Thomas Gleixner | d05c65f | 2011-02-07 14:31:37 +0100 | [diff] [blame] | 24 | static int irq_poll_cpu; |
| 25 | static atomic_t irq_poll_active; |
Eric W. Biederman | f84dbb9 | 2008-07-10 14:48:54 -0700 | [diff] [blame] | 26 | |
Alan Cox | 200803d | 2005-06-28 20:45:18 -0700 | [diff] [blame] | 27 | /* |
Thomas Gleixner | fe200ae | 2011-02-07 10:34:30 +0100 | [diff] [blame^] | 28 | * We wait here for a poller to finish. |
| 29 | * |
| 30 | * If the poll runs on this CPU, then we yell loudly and return |
| 31 | * false. That will leave the interrupt line disabled in the worst |
| 32 | * case, but it should never happen. |
| 33 | * |
| 34 | * We wait until the poller is done and then recheck disabled and |
| 35 | * action (about to be disabled). Only if it's still active, we return |
| 36 | * true and let the handler run. |
| 37 | */ |
| 38 | bool irq_wait_for_poll(struct irq_desc *desc) |
| 39 | { |
| 40 | if (WARN_ONCE(irq_poll_cpu == smp_processor_id(), |
| 41 | "irq poll in progress on cpu %d for irq %d\n", |
| 42 | smp_processor_id(), desc->irq_data.irq)) |
| 43 | return false; |
| 44 | |
| 45 | #ifdef CONFIG_SMP |
| 46 | do { |
| 47 | raw_spin_unlock(&desc->lock); |
| 48 | while (desc->status & IRQ_INPROGRESS) |
| 49 | cpu_relax(); |
| 50 | raw_spin_lock(&desc->lock); |
| 51 | } while (desc->status & IRQ_INPROGRESS); |
| 52 | /* Might have been disabled in meantime */ |
| 53 | return !(desc->status & IRQ_DISABLED) && desc->action; |
| 54 | #else |
| 55 | return false; |
| 56 | #endif |
| 57 | } |
| 58 | |
| 59 | /* |
Alan Cox | 200803d | 2005-06-28 20:45:18 -0700 | [diff] [blame] | 60 | * Recovery handler for misrouted interrupts. |
| 61 | */ |
Thomas Gleixner | c7259cd | 2011-02-07 09:52:27 +0100 | [diff] [blame] | 62 | static int try_one_irq(int irq, struct irq_desc *desc, bool force) |
Eric W. Biederman | f84dbb9 | 2008-07-10 14:48:54 -0700 | [diff] [blame] | 63 | { |
| 64 | struct irqaction *action; |
Thomas Gleixner | fe200ae | 2011-02-07 10:34:30 +0100 | [diff] [blame^] | 65 | int ok = 0; |
Eric W. Biederman | f84dbb9 | 2008-07-10 14:48:54 -0700 | [diff] [blame] | 66 | |
Thomas Gleixner | 239007b | 2009-11-17 16:46:45 +0100 | [diff] [blame] | 67 | raw_spin_lock(&desc->lock); |
Thomas Gleixner | c7259cd | 2011-02-07 09:52:27 +0100 | [diff] [blame] | 68 | |
| 69 | /* PER_CPU and nested thread interrupts are never polled */ |
| 70 | if (desc->status & (IRQ_PER_CPU | IRQ_NESTED_THREAD)) |
| 71 | goto out; |
| 72 | |
| 73 | /* |
| 74 | * Do not poll disabled interrupts unless the spurious |
| 75 | * disabled poller asks explicitely. |
| 76 | */ |
| 77 | if ((desc->status & IRQ_DISABLED) && !force) |
| 78 | goto out; |
| 79 | |
| 80 | /* |
| 81 | * All handlers must agree on IRQF_SHARED, so we test just the |
| 82 | * first. Check for action->next as well. |
| 83 | */ |
| 84 | action = desc->action; |
| 85 | if (!action || !(action->flags & IRQF_SHARED) || |
| 86 | (action->flags & __IRQF_TIMER) || !action->next) |
| 87 | goto out; |
| 88 | |
Eric W. Biederman | f84dbb9 | 2008-07-10 14:48:54 -0700 | [diff] [blame] | 89 | /* Already running on another processor */ |
| 90 | if (desc->status & IRQ_INPROGRESS) { |
| 91 | /* |
| 92 | * Already running: If it is shared get the other |
| 93 | * CPU to go looking for our mystery interrupt too |
| 94 | */ |
Thomas Gleixner | c7259cd | 2011-02-07 09:52:27 +0100 | [diff] [blame] | 95 | desc->status |= IRQ_PENDING; |
Thomas Gleixner | fa27271 | 2011-02-07 09:10:39 +0100 | [diff] [blame] | 96 | goto out; |
Thomas Gleixner | c7259cd | 2011-02-07 09:52:27 +0100 | [diff] [blame] | 97 | } |
Thomas Gleixner | fa27271 | 2011-02-07 09:10:39 +0100 | [diff] [blame] | 98 | |
Thomas Gleixner | fe200ae | 2011-02-07 10:34:30 +0100 | [diff] [blame^] | 99 | /* Honour the normal IRQ locking and mark it poll in progress */ |
| 100 | desc->status |= IRQ_INPROGRESS | IRQ_POLL_INPROGRESS; |
Thomas Gleixner | fa27271 | 2011-02-07 09:10:39 +0100 | [diff] [blame] | 101 | do { |
Eric W. Biederman | f84dbb9 | 2008-07-10 14:48:54 -0700 | [diff] [blame] | 102 | desc->status &= ~IRQ_PENDING; |
Thomas Gleixner | fa27271 | 2011-02-07 09:10:39 +0100 | [diff] [blame] | 103 | raw_spin_unlock(&desc->lock); |
| 104 | if (handle_IRQ_event(irq, action) != IRQ_NONE) |
| 105 | ok = 1; |
| 106 | raw_spin_lock(&desc->lock); |
| 107 | action = desc->action; |
| 108 | } while ((desc->status & IRQ_PENDING) && action); |
| 109 | |
Thomas Gleixner | fe200ae | 2011-02-07 10:34:30 +0100 | [diff] [blame^] | 110 | desc->status &= ~(IRQ_INPROGRESS | IRQ_POLL_INPROGRESS); |
Thomas Gleixner | fa27271 | 2011-02-07 09:10:39 +0100 | [diff] [blame] | 111 | out: |
| 112 | raw_spin_unlock(&desc->lock); |
Eric W. Biederman | f84dbb9 | 2008-07-10 14:48:54 -0700 | [diff] [blame] | 113 | return ok; |
| 114 | } |
| 115 | |
David Howells | 7d12e78 | 2006-10-05 14:55:46 +0100 | [diff] [blame] | 116 | static int misrouted_irq(int irq) |
Alan Cox | 200803d | 2005-06-28 20:45:18 -0700 | [diff] [blame] | 117 | { |
Yinghai Lu | e00585b | 2008-09-15 01:53:50 -0700 | [diff] [blame] | 118 | struct irq_desc *desc; |
Thomas Gleixner | d3c6004 | 2008-10-16 09:55:00 +0200 | [diff] [blame] | 119 | int i, ok = 0; |
Alan Cox | 200803d | 2005-06-28 20:45:18 -0700 | [diff] [blame] | 120 | |
Thomas Gleixner | d05c65f | 2011-02-07 14:31:37 +0100 | [diff] [blame] | 121 | if (atomic_inc_return(&irq_poll_active) == 1) |
| 122 | goto out; |
| 123 | |
| 124 | irq_poll_cpu = smp_processor_id(); |
| 125 | |
Yinghai Lu | e00585b | 2008-09-15 01:53:50 -0700 | [diff] [blame] | 126 | for_each_irq_desc(i, desc) { |
| 127 | if (!i) |
| 128 | continue; |
Alan Cox | 200803d | 2005-06-28 20:45:18 -0700 | [diff] [blame] | 129 | |
| 130 | if (i == irq) /* Already tried */ |
| 131 | continue; |
Ingo Molnar | 06fcb0c | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 132 | |
Thomas Gleixner | c7259cd | 2011-02-07 09:52:27 +0100 | [diff] [blame] | 133 | if (try_one_irq(i, desc, false)) |
Eric W. Biederman | f84dbb9 | 2008-07-10 14:48:54 -0700 | [diff] [blame] | 134 | ok = 1; |
Alan Cox | 200803d | 2005-06-28 20:45:18 -0700 | [diff] [blame] | 135 | } |
Thomas Gleixner | d05c65f | 2011-02-07 14:31:37 +0100 | [diff] [blame] | 136 | out: |
| 137 | atomic_dec(&irq_poll_active); |
Alan Cox | 200803d | 2005-06-28 20:45:18 -0700 | [diff] [blame] | 138 | /* So the caller can adjust the irq error counts */ |
| 139 | return ok; |
| 140 | } |
| 141 | |
Thomas Gleixner | 663e695 | 2009-11-04 14:22:21 +0100 | [diff] [blame] | 142 | static void poll_spurious_irqs(unsigned long dummy) |
Eric W. Biederman | f84dbb9 | 2008-07-10 14:48:54 -0700 | [diff] [blame] | 143 | { |
Yinghai Lu | e00585b | 2008-09-15 01:53:50 -0700 | [diff] [blame] | 144 | struct irq_desc *desc; |
Thomas Gleixner | d3c6004 | 2008-10-16 09:55:00 +0200 | [diff] [blame] | 145 | int i; |
Yinghai Lu | e00585b | 2008-09-15 01:53:50 -0700 | [diff] [blame] | 146 | |
Thomas Gleixner | d05c65f | 2011-02-07 14:31:37 +0100 | [diff] [blame] | 147 | if (atomic_inc_return(&irq_poll_active) != 1) |
| 148 | goto out; |
| 149 | irq_poll_cpu = smp_processor_id(); |
| 150 | |
Yinghai Lu | e00585b | 2008-09-15 01:53:50 -0700 | [diff] [blame] | 151 | for_each_irq_desc(i, desc) { |
Eric W. Biederman | f84dbb9 | 2008-07-10 14:48:54 -0700 | [diff] [blame] | 152 | unsigned int status; |
| 153 | |
Yinghai Lu | e00585b | 2008-09-15 01:53:50 -0700 | [diff] [blame] | 154 | if (!i) |
| 155 | continue; |
| 156 | |
Eric W. Biederman | f84dbb9 | 2008-07-10 14:48:54 -0700 | [diff] [blame] | 157 | /* Racy but it doesn't matter */ |
| 158 | status = desc->status; |
| 159 | barrier(); |
| 160 | if (!(status & IRQ_SPURIOUS_DISABLED)) |
| 161 | continue; |
| 162 | |
Yong Zhang | e7e7e0c | 2009-11-07 11:16:13 +0800 | [diff] [blame] | 163 | local_irq_disable(); |
Thomas Gleixner | c7259cd | 2011-02-07 09:52:27 +0100 | [diff] [blame] | 164 | try_one_irq(i, desc, true); |
Yong Zhang | e7e7e0c | 2009-11-07 11:16:13 +0800 | [diff] [blame] | 165 | local_irq_enable(); |
Eric W. Biederman | f84dbb9 | 2008-07-10 14:48:54 -0700 | [diff] [blame] | 166 | } |
Thomas Gleixner | d05c65f | 2011-02-07 14:31:37 +0100 | [diff] [blame] | 167 | out: |
| 168 | atomic_dec(&irq_poll_active); |
Thomas Gleixner | d3c6004 | 2008-10-16 09:55:00 +0200 | [diff] [blame] | 169 | mod_timer(&poll_spurious_irq_timer, |
| 170 | jiffies + POLL_SPURIOUS_IRQ_INTERVAL); |
Eric W. Biederman | f84dbb9 | 2008-07-10 14:48:54 -0700 | [diff] [blame] | 171 | } |
| 172 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 173 | /* |
| 174 | * If 99,900 of the previous 100,000 interrupts have not been handled |
| 175 | * then assume that the IRQ is stuck in some manner. Drop a diagnostic |
| 176 | * and try to turn the IRQ off. |
| 177 | * |
| 178 | * (The other 100-of-100,000 interrupts may have been a correctly |
| 179 | * functioning device sharing an IRQ with the failing one) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 180 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 181 | static void |
Ingo Molnar | 34ffdb7 | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 182 | __report_bad_irq(unsigned int irq, struct irq_desc *desc, |
| 183 | irqreturn_t action_ret) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 184 | { |
| 185 | struct irqaction *action; |
Thomas Gleixner | 1082687 | 2011-02-07 09:05:05 +0100 | [diff] [blame] | 186 | unsigned long flags; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 187 | |
| 188 | if (action_ret != IRQ_HANDLED && action_ret != IRQ_NONE) { |
| 189 | printk(KERN_ERR "irq event %d: bogus return value %x\n", |
| 190 | irq, action_ret); |
| 191 | } else { |
Alan Cox | 200803d | 2005-06-28 20:45:18 -0700 | [diff] [blame] | 192 | printk(KERN_ERR "irq %d: nobody cared (try booting with " |
| 193 | "the \"irqpoll\" option)\n", irq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 194 | } |
| 195 | dump_stack(); |
| 196 | printk(KERN_ERR "handlers:\n"); |
Ingo Molnar | 06fcb0c | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 197 | |
Thomas Gleixner | 1082687 | 2011-02-07 09:05:05 +0100 | [diff] [blame] | 198 | /* |
| 199 | * We need to take desc->lock here. note_interrupt() is called |
| 200 | * w/o desc->lock held, but IRQ_PROGRESS set. We might race |
| 201 | * with something else removing an action. It's ok to take |
| 202 | * desc->lock here. See synchronize_irq(). |
| 203 | */ |
| 204 | raw_spin_lock_irqsave(&desc->lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 205 | action = desc->action; |
| 206 | while (action) { |
| 207 | printk(KERN_ERR "[<%p>]", action->handler); |
| 208 | print_symbol(" (%s)", |
| 209 | (unsigned long)action->handler); |
| 210 | printk("\n"); |
| 211 | action = action->next; |
| 212 | } |
Thomas Gleixner | 1082687 | 2011-02-07 09:05:05 +0100 | [diff] [blame] | 213 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 214 | } |
| 215 | |
Ingo Molnar | 06fcb0c | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 216 | static void |
Ingo Molnar | 34ffdb7 | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 217 | report_bad_irq(unsigned int irq, struct irq_desc *desc, irqreturn_t action_ret) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 218 | { |
| 219 | static int count = 100; |
| 220 | |
| 221 | if (count > 0) { |
| 222 | count--; |
| 223 | __report_bad_irq(irq, desc, action_ret); |
| 224 | } |
| 225 | } |
| 226 | |
Thomas Gleixner | d3c6004 | 2008-10-16 09:55:00 +0200 | [diff] [blame] | 227 | static inline int |
| 228 | try_misrouted_irq(unsigned int irq, struct irq_desc *desc, |
| 229 | irqreturn_t action_ret) |
Linus Torvalds | 92ea772 | 2007-05-24 08:37:14 -0700 | [diff] [blame] | 230 | { |
| 231 | struct irqaction *action; |
| 232 | |
| 233 | if (!irqfixup) |
| 234 | return 0; |
| 235 | |
| 236 | /* We didn't actually handle the IRQ - see if it was misrouted? */ |
| 237 | if (action_ret == IRQ_NONE) |
| 238 | return 1; |
| 239 | |
| 240 | /* |
| 241 | * But for 'irqfixup == 2' we also do it for handled interrupts if |
| 242 | * they are marked as IRQF_IRQPOLL (or for irq zero, which is the |
| 243 | * traditional PC timer interrupt.. Legacy) |
| 244 | */ |
| 245 | if (irqfixup < 2) |
| 246 | return 0; |
| 247 | |
| 248 | if (!irq) |
| 249 | return 1; |
| 250 | |
| 251 | /* |
| 252 | * Since we don't get the descriptor lock, "action" can |
| 253 | * change under us. We don't really care, but we don't |
| 254 | * want to follow a NULL pointer. So tell the compiler to |
| 255 | * just load it once by using a barrier. |
| 256 | */ |
| 257 | action = desc->action; |
| 258 | barrier(); |
| 259 | return action && (action->flags & IRQF_IRQPOLL); |
| 260 | } |
| 261 | |
Ingo Molnar | 34ffdb7 | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 262 | void note_interrupt(unsigned int irq, struct irq_desc *desc, |
David Howells | 7d12e78 | 2006-10-05 14:55:46 +0100 | [diff] [blame] | 263 | irqreturn_t action_ret) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 264 | { |
Thomas Gleixner | fe200ae | 2011-02-07 10:34:30 +0100 | [diff] [blame^] | 265 | if (desc->status & IRQ_POLL_INPROGRESS) |
| 266 | return; |
| 267 | |
Andreas Mohr | 83d4e6e | 2006-06-23 02:05:32 -0700 | [diff] [blame] | 268 | if (unlikely(action_ret != IRQ_HANDLED)) { |
Alan Cox | 4f27c00 | 2007-07-15 23:40:55 -0700 | [diff] [blame] | 269 | /* |
| 270 | * If we are seeing only the odd spurious IRQ caused by |
| 271 | * bus asynchronicity then don't eventually trigger an error, |
Uwe Kleine-König | fbfecd3 | 2009-10-28 20:11:04 +0100 | [diff] [blame] | 272 | * otherwise the counter becomes a doomsday timer for otherwise |
Alan Cox | 4f27c00 | 2007-07-15 23:40:55 -0700 | [diff] [blame] | 273 | * working systems |
| 274 | */ |
S.Caglar Onur | 188fd89 | 2008-02-14 17:36:51 +0200 | [diff] [blame] | 275 | if (time_after(jiffies, desc->last_unhandled + HZ/10)) |
Alan Cox | 4f27c00 | 2007-07-15 23:40:55 -0700 | [diff] [blame] | 276 | desc->irqs_unhandled = 1; |
| 277 | else |
| 278 | desc->irqs_unhandled++; |
| 279 | desc->last_unhandled = jiffies; |
Andreas Mohr | 83d4e6e | 2006-06-23 02:05:32 -0700 | [diff] [blame] | 280 | if (unlikely(action_ret != IRQ_NONE)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 281 | report_bad_irq(irq, desc, action_ret); |
| 282 | } |
| 283 | |
Linus Torvalds | 92ea772 | 2007-05-24 08:37:14 -0700 | [diff] [blame] | 284 | if (unlikely(try_misrouted_irq(irq, desc, action_ret))) { |
| 285 | int ok = misrouted_irq(irq); |
| 286 | if (action_ret == IRQ_NONE) |
| 287 | desc->irqs_unhandled -= ok; |
Alan Cox | 200803d | 2005-06-28 20:45:18 -0700 | [diff] [blame] | 288 | } |
| 289 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 290 | desc->irq_count++; |
Andreas Mohr | 83d4e6e | 2006-06-23 02:05:32 -0700 | [diff] [blame] | 291 | if (likely(desc->irq_count < 100000)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 292 | return; |
| 293 | |
| 294 | desc->irq_count = 0; |
Andreas Mohr | 83d4e6e | 2006-06-23 02:05:32 -0700 | [diff] [blame] | 295 | if (unlikely(desc->irqs_unhandled > 99900)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 296 | /* |
| 297 | * The interrupt is stuck |
| 298 | */ |
| 299 | __report_bad_irq(irq, desc, action_ret); |
| 300 | /* |
| 301 | * Now kill the IRQ |
| 302 | */ |
| 303 | printk(KERN_EMERG "Disabling IRQ #%d\n", irq); |
Thomas Gleixner | 1adb085 | 2008-04-28 17:01:56 +0200 | [diff] [blame] | 304 | desc->status |= IRQ_DISABLED | IRQ_SPURIOUS_DISABLED; |
| 305 | desc->depth++; |
Thomas Gleixner | bc310dd | 2010-09-27 12:45:02 +0000 | [diff] [blame] | 306 | desc->irq_data.chip->irq_disable(&desc->irq_data); |
Eric W. Biederman | f84dbb9 | 2008-07-10 14:48:54 -0700 | [diff] [blame] | 307 | |
Thomas Gleixner | d3c6004 | 2008-10-16 09:55:00 +0200 | [diff] [blame] | 308 | mod_timer(&poll_spurious_irq_timer, |
| 309 | jiffies + POLL_SPURIOUS_IRQ_INTERVAL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 310 | } |
| 311 | desc->irqs_unhandled = 0; |
| 312 | } |
| 313 | |
Andreas Mohr | 83d4e6e | 2006-06-23 02:05:32 -0700 | [diff] [blame] | 314 | int noirqdebug __read_mostly; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 315 | |
Vivek Goyal | 343cde5 | 2007-01-11 01:52:44 +0100 | [diff] [blame] | 316 | int noirqdebug_setup(char *str) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 317 | { |
| 318 | noirqdebug = 1; |
| 319 | printk(KERN_INFO "IRQ lockup detection disabled\n"); |
Ingo Molnar | 06fcb0c | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 320 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 321 | return 1; |
| 322 | } |
| 323 | |
| 324 | __setup("noirqdebug", noirqdebug_setup); |
Andi Kleen | 9e094c1 | 2008-01-30 13:32:48 +0100 | [diff] [blame] | 325 | module_param(noirqdebug, bool, 0644); |
| 326 | MODULE_PARM_DESC(noirqdebug, "Disable irq lockup detection when true"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 327 | |
Alan Cox | 200803d | 2005-06-28 20:45:18 -0700 | [diff] [blame] | 328 | static int __init irqfixup_setup(char *str) |
| 329 | { |
| 330 | irqfixup = 1; |
| 331 | printk(KERN_WARNING "Misrouted IRQ fixup support enabled.\n"); |
| 332 | printk(KERN_WARNING "This may impact system performance.\n"); |
Ingo Molnar | 06fcb0c | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 333 | |
Alan Cox | 200803d | 2005-06-28 20:45:18 -0700 | [diff] [blame] | 334 | return 1; |
| 335 | } |
| 336 | |
| 337 | __setup("irqfixup", irqfixup_setup); |
Andi Kleen | 9e094c1 | 2008-01-30 13:32:48 +0100 | [diff] [blame] | 338 | module_param(irqfixup, int, 0644); |
Alan Cox | 200803d | 2005-06-28 20:45:18 -0700 | [diff] [blame] | 339 | |
| 340 | static int __init irqpoll_setup(char *str) |
| 341 | { |
| 342 | irqfixup = 2; |
| 343 | printk(KERN_WARNING "Misrouted IRQ fixup and polling support " |
| 344 | "enabled\n"); |
| 345 | printk(KERN_WARNING "This may significantly impact system " |
| 346 | "performance\n"); |
| 347 | return 1; |
| 348 | } |
| 349 | |
| 350 | __setup("irqpoll", irqpoll_setup); |