blob: c3c89751b327c9cf257c870d046973107be810da [file] [log] [blame]
Andrew Mortonc777ac52006-03-25 03:07:36 -08001
Christoph Hellwigd824e662006-04-10 22:54:04 -07002#include <linux/irq.h>
Yinghai Lu57b150c2009-04-27 17:59:53 -07003#include <linux/interrupt.h>
4
5#include "internals.h"
Andrew Mortonc777ac52006-03-25 03:07:36 -08006
Thomas Gleixnera4395202011-02-04 18:46:16 +01007void irq_move_masked_irq(struct irq_data *idata)
Andrew Mortonc777ac52006-03-25 03:07:36 -08008{
Thomas Gleixnera4395202011-02-04 18:46:16 +01009 struct irq_desc *desc = irq_data_to_desc(idata);
10 struct irq_chip *chip = idata->chip;
Andrew Mortonc777ac52006-03-25 03:07:36 -080011
Thomas Gleixnerf230b6d2011-02-05 15:20:04 +010012 if (likely(!irqd_is_setaffinity_pending(&desc->irq_data)))
Andrew Mortonc777ac52006-03-25 03:07:36 -080013 return;
14
Bryan Holty501f2492006-03-25 03:07:37 -080015 /*
16 * Paranoia: cpu-local interrupts shouldn't be calling in here anyway.
17 */
Thomas Gleixnera0056772011-02-08 17:11:03 +010018 if (!irqd_can_balance(&desc->irq_data)) {
Bryan Holty501f2492006-03-25 03:07:37 -080019 WARN_ON(1);
20 return;
21 }
22
Thomas Gleixnerf230b6d2011-02-05 15:20:04 +010023 irqd_clr_move_pending(&desc->irq_data);
Andrew Mortonc777ac52006-03-25 03:07:36 -080024
Mike Travis7f7ace02009-01-10 21:58:08 -080025 if (unlikely(cpumask_empty(desc->pending_mask)))
Andrew Mortonc777ac52006-03-25 03:07:36 -080026 return;
27
Thomas Gleixnerc96b3b32010-09-27 12:45:41 +000028 if (!chip->irq_set_affinity)
Andrew Mortonc777ac52006-03-25 03:07:36 -080029 return;
30
Thomas Gleixner239007b2009-11-17 16:46:45 +010031 assert_raw_spin_locked(&desc->lock);
Bryan Holty501f2492006-03-25 03:07:37 -080032
Andrew Mortonc777ac52006-03-25 03:07:36 -080033 /*
34 * If there was a valid mask to work with, please
35 * do the disable, re-program, enable sequence.
36 * This is *not* particularly important for level triggered
37 * but in a edge trigger case, we might be setting rte
Lucas De Marchi25985ed2011-03-30 22:57:33 -030038 * when an active trigger is coming in. This could
Andrew Mortonc777ac52006-03-25 03:07:36 -080039 * cause some ioapics to mal-function.
40 * Being paranoid i guess!
Eric W. Biedermane7b946e2006-10-04 02:16:29 -070041 *
42 * For correct operation this depends on the caller
43 * masking the irqs.
Andrew Mortonc777ac52006-03-25 03:07:36 -080044 */
Mike Travis7f7ace02009-01-10 21:58:08 -080045 if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask)
Jiang Liuf5cb92a2012-03-30 23:11:33 +080046 < nr_cpu_ids)) {
47 int ret = chip->irq_set_affinity(&desc->irq_data,
48 desc->pending_mask, false);
49 switch (ret) {
50 case IRQ_SET_MASK_OK:
Thomas Gleixner6b8ff312010-10-01 12:58:38 +020051 cpumask_copy(desc->irq_data.affinity, desc->pending_mask);
Jiang Liuf5cb92a2012-03-30 23:11:33 +080052 case IRQ_SET_MASK_OK_NOCOPY:
Thomas Gleixner591d2fb2009-07-21 11:09:39 +020053 irq_set_thread_affinity(desc);
Yinghai Lu57b150c2009-04-27 17:59:53 -070054 }
Jiang Liuf5cb92a2012-03-30 23:11:33 +080055 }
Yinghai Lu57b150c2009-04-27 17:59:53 -070056
Mike Travis7f7ace02009-01-10 21:58:08 -080057 cpumask_clear(desc->pending_mask);
Andrew Mortonc777ac52006-03-25 03:07:36 -080058}
Eric W. Biedermane7b946e2006-10-04 02:16:29 -070059
Thomas Gleixnera4395202011-02-04 18:46:16 +010060void irq_move_irq(struct irq_data *idata)
61{
Thomas Gleixnerf1a06392011-01-28 08:47:15 +010062 bool masked;
Eric W. Biedermane7b946e2006-10-04 02:16:29 -070063
Thomas Gleixnera4395202011-02-04 18:46:16 +010064 if (likely(!irqd_is_setaffinity_pending(idata)))
Eric W. Biedermane7b946e2006-10-04 02:16:29 -070065 return;
66
Thomas Gleixner32f41252011-03-28 14:10:52 +020067 if (unlikely(irqd_irq_disabled(idata)))
Eric W. Biederman2a786b42007-02-23 04:46:20 -070068 return;
Eric W. Biedermane7b946e2006-10-04 02:16:29 -070069
Thomas Gleixnerf1a06392011-01-28 08:47:15 +010070 /*
71 * Be careful vs. already masked interrupts. If this is a
72 * threaded interrupt with ONESHOT set, we can end up with an
73 * interrupt storm.
74 */
Thomas Gleixner32f41252011-03-28 14:10:52 +020075 masked = irqd_irq_masked(idata);
Thomas Gleixnerf1a06392011-01-28 08:47:15 +010076 if (!masked)
Thomas Gleixnera4395202011-02-04 18:46:16 +010077 idata->chip->irq_mask(idata);
78 irq_move_masked_irq(idata);
Thomas Gleixnerf1a06392011-01-28 08:47:15 +010079 if (!masked)
Thomas Gleixnera4395202011-02-04 18:46:16 +010080 idata->chip->irq_unmask(idata);
81}