blob: 56ff8fffb8b032b0b8be7872251d2032a469f289 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/kernel/irq/spurious.c
3 *
4 * Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar
5 *
6 * This file contains spurious interrupt handling.
7 */
8
S.Caglar Onur188fd892008-02-14 17:36:51 +02009#include <linux/jiffies.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <linux/irq.h>
11#include <linux/module.h>
12#include <linux/kallsyms.h>
13#include <linux/interrupt.h>
Andi Kleen9e094c12008-01-30 13:32:48 +010014#include <linux/moduleparam.h>
Eric W. Biedermanf84dbb92008-07-10 14:48:54 -070015#include <linux/timer.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016
Thomas Gleixnerbd151412010-10-01 15:17:14 +020017#include "internals.h"
18
Andreas Mohr83d4e6e2006-06-23 02:05:32 -070019static int irqfixup __read_mostly;
Alan Cox200803d2005-06-28 20:45:18 -070020
Eric W. Biedermanf84dbb92008-07-10 14:48:54 -070021#define POLL_SPURIOUS_IRQ_INTERVAL (HZ/10)
22static void poll_spurious_irqs(unsigned long dummy);
23static DEFINE_TIMER(poll_spurious_irq_timer, poll_spurious_irqs, 0, 0);
Thomas Gleixnerd05c65f2011-02-07 14:31:37 +010024static int irq_poll_cpu;
25static atomic_t irq_poll_active;
Eric W. Biedermanf84dbb92008-07-10 14:48:54 -070026
Alan Cox200803d2005-06-28 20:45:18 -070027/*
28 * Recovery handler for misrouted interrupts.
29 */
Thomas Gleixnerc7259cd2011-02-07 09:52:27 +010030static int try_one_irq(int irq, struct irq_desc *desc, bool force)
Eric W. Biedermanf84dbb92008-07-10 14:48:54 -070031{
32 struct irqaction *action;
Thomas Gleixnerd3c60042008-10-16 09:55:00 +020033 int ok = 0, work = 0;
Eric W. Biedermanf84dbb92008-07-10 14:48:54 -070034
Thomas Gleixner239007b2009-11-17 16:46:45 +010035 raw_spin_lock(&desc->lock);
Thomas Gleixnerc7259cd2011-02-07 09:52:27 +010036
37 /* PER_CPU and nested thread interrupts are never polled */
38 if (desc->status & (IRQ_PER_CPU | IRQ_NESTED_THREAD))
39 goto out;
40
41 /*
42 * Do not poll disabled interrupts unless the spurious
43 * disabled poller asks explicitely.
44 */
45 if ((desc->status & IRQ_DISABLED) && !force)
46 goto out;
47
48 /*
49 * All handlers must agree on IRQF_SHARED, so we test just the
50 * first. Check for action->next as well.
51 */
52 action = desc->action;
53 if (!action || !(action->flags & IRQF_SHARED) ||
54 (action->flags & __IRQF_TIMER) || !action->next)
55 goto out;
56
Eric W. Biedermanf84dbb92008-07-10 14:48:54 -070057 /* Already running on another processor */
58 if (desc->status & IRQ_INPROGRESS) {
59 /*
60 * Already running: If it is shared get the other
61 * CPU to go looking for our mystery interrupt too
62 */
Thomas Gleixnerc7259cd2011-02-07 09:52:27 +010063 desc->status |= IRQ_PENDING;
Thomas Gleixnerfa272712011-02-07 09:10:39 +010064 goto out;
Thomas Gleixnerc7259cd2011-02-07 09:52:27 +010065 }
Thomas Gleixnerfa272712011-02-07 09:10:39 +010066
Eric W. Biedermanf84dbb92008-07-10 14:48:54 -070067 /* Honour the normal IRQ locking */
68 desc->status |= IRQ_INPROGRESS;
Thomas Gleixnerfa272712011-02-07 09:10:39 +010069 do {
70 work++;
Eric W. Biedermanf84dbb92008-07-10 14:48:54 -070071 desc->status &= ~IRQ_PENDING;
Thomas Gleixnerfa272712011-02-07 09:10:39 +010072 raw_spin_unlock(&desc->lock);
73 if (handle_IRQ_event(irq, action) != IRQ_NONE)
74 ok = 1;
75 raw_spin_lock(&desc->lock);
76 action = desc->action;
77 } while ((desc->status & IRQ_PENDING) && action);
78
Eric W. Biedermanf84dbb92008-07-10 14:48:54 -070079 desc->status &= ~IRQ_INPROGRESS;
80 /*
81 * If we did actual work for the real IRQ line we must let the
82 * IRQ controller clean up too
83 */
Thomas Gleixnerfa272712011-02-07 09:10:39 +010084 if (work > 1)
Thomas Gleixnerbd151412010-10-01 15:17:14 +020085 irq_end(irq, desc);
Eric W. Biedermanf84dbb92008-07-10 14:48:54 -070086
Thomas Gleixnerfa272712011-02-07 09:10:39 +010087out:
88 raw_spin_unlock(&desc->lock);
Eric W. Biedermanf84dbb92008-07-10 14:48:54 -070089 return ok;
90}
91
David Howells7d12e782006-10-05 14:55:46 +010092static int misrouted_irq(int irq)
Alan Cox200803d2005-06-28 20:45:18 -070093{
Yinghai Lue00585b2008-09-15 01:53:50 -070094 struct irq_desc *desc;
Thomas Gleixnerd3c60042008-10-16 09:55:00 +020095 int i, ok = 0;
Alan Cox200803d2005-06-28 20:45:18 -070096
Thomas Gleixnerd05c65f2011-02-07 14:31:37 +010097 if (atomic_inc_return(&irq_poll_active) == 1)
98 goto out;
99
100 irq_poll_cpu = smp_processor_id();
101
Yinghai Lue00585b2008-09-15 01:53:50 -0700102 for_each_irq_desc(i, desc) {
103 if (!i)
104 continue;
Alan Cox200803d2005-06-28 20:45:18 -0700105
106 if (i == irq) /* Already tried */
107 continue;
Ingo Molnar06fcb0c2006-06-29 02:24:40 -0700108
Thomas Gleixnerc7259cd2011-02-07 09:52:27 +0100109 if (try_one_irq(i, desc, false))
Eric W. Biedermanf84dbb92008-07-10 14:48:54 -0700110 ok = 1;
Alan Cox200803d2005-06-28 20:45:18 -0700111 }
Thomas Gleixnerd05c65f2011-02-07 14:31:37 +0100112out:
113 atomic_dec(&irq_poll_active);
Alan Cox200803d2005-06-28 20:45:18 -0700114 /* So the caller can adjust the irq error counts */
115 return ok;
116}
117
Thomas Gleixner663e6952009-11-04 14:22:21 +0100118static void poll_spurious_irqs(unsigned long dummy)
Eric W. Biedermanf84dbb92008-07-10 14:48:54 -0700119{
Yinghai Lue00585b2008-09-15 01:53:50 -0700120 struct irq_desc *desc;
Thomas Gleixnerd3c60042008-10-16 09:55:00 +0200121 int i;
Yinghai Lue00585b2008-09-15 01:53:50 -0700122
Thomas Gleixnerd05c65f2011-02-07 14:31:37 +0100123 if (atomic_inc_return(&irq_poll_active) != 1)
124 goto out;
125 irq_poll_cpu = smp_processor_id();
126
Yinghai Lue00585b2008-09-15 01:53:50 -0700127 for_each_irq_desc(i, desc) {
Eric W. Biedermanf84dbb92008-07-10 14:48:54 -0700128 unsigned int status;
129
Yinghai Lue00585b2008-09-15 01:53:50 -0700130 if (!i)
131 continue;
132
Eric W. Biedermanf84dbb92008-07-10 14:48:54 -0700133 /* Racy but it doesn't matter */
134 status = desc->status;
135 barrier();
136 if (!(status & IRQ_SPURIOUS_DISABLED))
137 continue;
138
Yong Zhange7e7e0c2009-11-07 11:16:13 +0800139 local_irq_disable();
Thomas Gleixnerc7259cd2011-02-07 09:52:27 +0100140 try_one_irq(i, desc, true);
Yong Zhange7e7e0c2009-11-07 11:16:13 +0800141 local_irq_enable();
Eric W. Biedermanf84dbb92008-07-10 14:48:54 -0700142 }
Thomas Gleixnerd05c65f2011-02-07 14:31:37 +0100143out:
144 atomic_dec(&irq_poll_active);
Thomas Gleixnerd3c60042008-10-16 09:55:00 +0200145 mod_timer(&poll_spurious_irq_timer,
146 jiffies + POLL_SPURIOUS_IRQ_INTERVAL);
Eric W. Biedermanf84dbb92008-07-10 14:48:54 -0700147}
148
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149/*
150 * If 99,900 of the previous 100,000 interrupts have not been handled
151 * then assume that the IRQ is stuck in some manner. Drop a diagnostic
152 * and try to turn the IRQ off.
153 *
154 * (The other 100-of-100,000 interrupts may have been a correctly
155 * functioning device sharing an IRQ with the failing one)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157static void
Ingo Molnar34ffdb72006-06-29 02:24:40 -0700158__report_bad_irq(unsigned int irq, struct irq_desc *desc,
159 irqreturn_t action_ret)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160{
161 struct irqaction *action;
Thomas Gleixner10826872011-02-07 09:05:05 +0100162 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163
164 if (action_ret != IRQ_HANDLED && action_ret != IRQ_NONE) {
165 printk(KERN_ERR "irq event %d: bogus return value %x\n",
166 irq, action_ret);
167 } else {
Alan Cox200803d2005-06-28 20:45:18 -0700168 printk(KERN_ERR "irq %d: nobody cared (try booting with "
169 "the \"irqpoll\" option)\n", irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170 }
171 dump_stack();
172 printk(KERN_ERR "handlers:\n");
Ingo Molnar06fcb0c2006-06-29 02:24:40 -0700173
Thomas Gleixner10826872011-02-07 09:05:05 +0100174 /*
175 * We need to take desc->lock here. note_interrupt() is called
176 * w/o desc->lock held, but IRQ_PROGRESS set. We might race
177 * with something else removing an action. It's ok to take
178 * desc->lock here. See synchronize_irq().
179 */
180 raw_spin_lock_irqsave(&desc->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181 action = desc->action;
182 while (action) {
183 printk(KERN_ERR "[<%p>]", action->handler);
184 print_symbol(" (%s)",
185 (unsigned long)action->handler);
186 printk("\n");
187 action = action->next;
188 }
Thomas Gleixner10826872011-02-07 09:05:05 +0100189 raw_spin_unlock_irqrestore(&desc->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190}
191
Ingo Molnar06fcb0c2006-06-29 02:24:40 -0700192static void
Ingo Molnar34ffdb72006-06-29 02:24:40 -0700193report_bad_irq(unsigned int irq, struct irq_desc *desc, irqreturn_t action_ret)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194{
195 static int count = 100;
196
197 if (count > 0) {
198 count--;
199 __report_bad_irq(irq, desc, action_ret);
200 }
201}
202
Thomas Gleixnerd3c60042008-10-16 09:55:00 +0200203static inline int
204try_misrouted_irq(unsigned int irq, struct irq_desc *desc,
205 irqreturn_t action_ret)
Linus Torvalds92ea7722007-05-24 08:37:14 -0700206{
207 struct irqaction *action;
208
209 if (!irqfixup)
210 return 0;
211
212 /* We didn't actually handle the IRQ - see if it was misrouted? */
213 if (action_ret == IRQ_NONE)
214 return 1;
215
216 /*
217 * But for 'irqfixup == 2' we also do it for handled interrupts if
218 * they are marked as IRQF_IRQPOLL (or for irq zero, which is the
219 * traditional PC timer interrupt.. Legacy)
220 */
221 if (irqfixup < 2)
222 return 0;
223
224 if (!irq)
225 return 1;
226
227 /*
228 * Since we don't get the descriptor lock, "action" can
229 * change under us. We don't really care, but we don't
230 * want to follow a NULL pointer. So tell the compiler to
231 * just load it once by using a barrier.
232 */
233 action = desc->action;
234 barrier();
235 return action && (action->flags & IRQF_IRQPOLL);
236}
237
Ingo Molnar34ffdb72006-06-29 02:24:40 -0700238void note_interrupt(unsigned int irq, struct irq_desc *desc,
David Howells7d12e782006-10-05 14:55:46 +0100239 irqreturn_t action_ret)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240{
Andreas Mohr83d4e6e2006-06-23 02:05:32 -0700241 if (unlikely(action_ret != IRQ_HANDLED)) {
Alan Cox4f27c002007-07-15 23:40:55 -0700242 /*
243 * If we are seeing only the odd spurious IRQ caused by
244 * bus asynchronicity then don't eventually trigger an error,
Uwe Kleine-Königfbfecd32009-10-28 20:11:04 +0100245 * otherwise the counter becomes a doomsday timer for otherwise
Alan Cox4f27c002007-07-15 23:40:55 -0700246 * working systems
247 */
S.Caglar Onur188fd892008-02-14 17:36:51 +0200248 if (time_after(jiffies, desc->last_unhandled + HZ/10))
Alan Cox4f27c002007-07-15 23:40:55 -0700249 desc->irqs_unhandled = 1;
250 else
251 desc->irqs_unhandled++;
252 desc->last_unhandled = jiffies;
Andreas Mohr83d4e6e2006-06-23 02:05:32 -0700253 if (unlikely(action_ret != IRQ_NONE))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254 report_bad_irq(irq, desc, action_ret);
255 }
256
Linus Torvalds92ea7722007-05-24 08:37:14 -0700257 if (unlikely(try_misrouted_irq(irq, desc, action_ret))) {
258 int ok = misrouted_irq(irq);
259 if (action_ret == IRQ_NONE)
260 desc->irqs_unhandled -= ok;
Alan Cox200803d2005-06-28 20:45:18 -0700261 }
262
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263 desc->irq_count++;
Andreas Mohr83d4e6e2006-06-23 02:05:32 -0700264 if (likely(desc->irq_count < 100000))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265 return;
266
267 desc->irq_count = 0;
Andreas Mohr83d4e6e2006-06-23 02:05:32 -0700268 if (unlikely(desc->irqs_unhandled > 99900)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269 /*
270 * The interrupt is stuck
271 */
272 __report_bad_irq(irq, desc, action_ret);
273 /*
274 * Now kill the IRQ
275 */
276 printk(KERN_EMERG "Disabling IRQ #%d\n", irq);
Thomas Gleixner1adb0852008-04-28 17:01:56 +0200277 desc->status |= IRQ_DISABLED | IRQ_SPURIOUS_DISABLED;
278 desc->depth++;
Thomas Gleixnerbc310dd2010-09-27 12:45:02 +0000279 desc->irq_data.chip->irq_disable(&desc->irq_data);
Eric W. Biedermanf84dbb92008-07-10 14:48:54 -0700280
Thomas Gleixnerd3c60042008-10-16 09:55:00 +0200281 mod_timer(&poll_spurious_irq_timer,
282 jiffies + POLL_SPURIOUS_IRQ_INTERVAL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283 }
284 desc->irqs_unhandled = 0;
285}
286
Andreas Mohr83d4e6e2006-06-23 02:05:32 -0700287int noirqdebug __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288
Vivek Goyal343cde52007-01-11 01:52:44 +0100289int noirqdebug_setup(char *str)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290{
291 noirqdebug = 1;
292 printk(KERN_INFO "IRQ lockup detection disabled\n");
Ingo Molnar06fcb0c2006-06-29 02:24:40 -0700293
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294 return 1;
295}
296
297__setup("noirqdebug", noirqdebug_setup);
Andi Kleen9e094c12008-01-30 13:32:48 +0100298module_param(noirqdebug, bool, 0644);
299MODULE_PARM_DESC(noirqdebug, "Disable irq lockup detection when true");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300
Alan Cox200803d2005-06-28 20:45:18 -0700301static int __init irqfixup_setup(char *str)
302{
303 irqfixup = 1;
304 printk(KERN_WARNING "Misrouted IRQ fixup support enabled.\n");
305 printk(KERN_WARNING "This may impact system performance.\n");
Ingo Molnar06fcb0c2006-06-29 02:24:40 -0700306
Alan Cox200803d2005-06-28 20:45:18 -0700307 return 1;
308}
309
310__setup("irqfixup", irqfixup_setup);
Andi Kleen9e094c12008-01-30 13:32:48 +0100311module_param(irqfixup, int, 0644);
Alan Cox200803d2005-06-28 20:45:18 -0700312
313static int __init irqpoll_setup(char *str)
314{
315 irqfixup = 2;
316 printk(KERN_WARNING "Misrouted IRQ fixup and polling support "
317 "enabled\n");
318 printk(KERN_WARNING "This may significantly impact system "
319 "performance\n");
320 return 1;
321}
322
323__setup("irqpoll", irqpoll_setup);