| Andrew Morton | c777ac5 | 2006-03-25 03:07:36 -0800 | [diff] [blame] | 1 |  | 
| Christoph Hellwig | d824e66 | 2006-04-10 22:54:04 -0700 | [diff] [blame] | 2 | #include <linux/irq.h> | 
| Yinghai Lu | 57b150c | 2009-04-27 17:59:53 -0700 | [diff] [blame] | 3 | #include <linux/interrupt.h> | 
|  | 4 |  | 
|  | 5 | #include "internals.h" | 
| Andrew Morton | c777ac5 | 2006-03-25 03:07:36 -0800 | [diff] [blame] | 6 |  | 
| Thomas Gleixner | a439520 | 2011-02-04 18:46:16 +0100 | [diff] [blame] | 7 | void irq_move_masked_irq(struct irq_data *idata) | 
| Andrew Morton | c777ac5 | 2006-03-25 03:07:36 -0800 | [diff] [blame] | 8 | { | 
| Thomas Gleixner | a439520 | 2011-02-04 18:46:16 +0100 | [diff] [blame] | 9 | struct irq_desc *desc = irq_data_to_desc(idata); | 
|  | 10 | struct irq_chip *chip = idata->chip; | 
| Andrew Morton | c777ac5 | 2006-03-25 03:07:36 -0800 | [diff] [blame] | 11 |  | 
| Thomas Gleixner | f230b6d | 2011-02-05 15:20:04 +0100 | [diff] [blame] | 12 | if (likely(!irqd_is_setaffinity_pending(&desc->irq_data))) | 
| Andrew Morton | c777ac5 | 2006-03-25 03:07:36 -0800 | [diff] [blame] | 13 | return; | 
|  | 14 |  | 
| Bryan Holty | 501f249 | 2006-03-25 03:07:37 -0800 | [diff] [blame] | 15 | /* | 
|  | 16 | * Paranoia: cpu-local interrupts shouldn't be calling in here anyway. | 
|  | 17 | */ | 
| Thomas Gleixner | a005677 | 2011-02-08 17:11:03 +0100 | [diff] [blame] | 18 | if (!irqd_can_balance(&desc->irq_data)) { | 
| Bryan Holty | 501f249 | 2006-03-25 03:07:37 -0800 | [diff] [blame] | 19 | WARN_ON(1); | 
|  | 20 | return; | 
|  | 21 | } | 
|  | 22 |  | 
| Thomas Gleixner | f230b6d | 2011-02-05 15:20:04 +0100 | [diff] [blame] | 23 | irqd_clr_move_pending(&desc->irq_data); | 
| Andrew Morton | c777ac5 | 2006-03-25 03:07:36 -0800 | [diff] [blame] | 24 |  | 
| Mike Travis | 7f7ace0 | 2009-01-10 21:58:08 -0800 | [diff] [blame] | 25 | if (unlikely(cpumask_empty(desc->pending_mask))) | 
| Andrew Morton | c777ac5 | 2006-03-25 03:07:36 -0800 | [diff] [blame] | 26 | return; | 
|  | 27 |  | 
| Thomas Gleixner | c96b3b3 | 2010-09-27 12:45:41 +0000 | [diff] [blame] | 28 | if (!chip->irq_set_affinity) | 
| Andrew Morton | c777ac5 | 2006-03-25 03:07:36 -0800 | [diff] [blame] | 29 | return; | 
|  | 30 |  | 
| Thomas Gleixner | 239007b | 2009-11-17 16:46:45 +0100 | [diff] [blame] | 31 | assert_raw_spin_locked(&desc->lock); | 
| Bryan Holty | 501f249 | 2006-03-25 03:07:37 -0800 | [diff] [blame] | 32 |  | 
| Andrew Morton | c777ac5 | 2006-03-25 03:07:36 -0800 | [diff] [blame] | 33 | /* | 
|  | 34 | * If there was a valid mask to work with, please | 
|  | 35 | * do the disable, re-program, enable sequence. | 
|  | 36 | * This is *not* particularly important for level triggered | 
|  | 37 | * but in a edge trigger case, we might be setting rte | 
| Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 38 | * when an active trigger is coming in. This could | 
| Andrew Morton | c777ac5 | 2006-03-25 03:07:36 -0800 | [diff] [blame] | 39 | * cause some ioapics to mal-function. | 
|  | 40 | * Being paranoid i guess! | 
| Eric W. Biederman | e7b946e | 2006-10-04 02:16:29 -0700 | [diff] [blame] | 41 | * | 
|  | 42 | * For correct operation this depends on the caller | 
|  | 43 | * masking the irqs. | 
| Andrew Morton | c777ac5 | 2006-03-25 03:07:36 -0800 | [diff] [blame] | 44 | */ | 
| Jiang Liu | 818b0f3 | 2012-03-30 23:11:34 +0800 | [diff] [blame] | 45 | if (cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids) | 
|  | 46 | irq_do_set_affinity(&desc->irq_data, desc->pending_mask, false); | 
| Yinghai Lu | 57b150c | 2009-04-27 17:59:53 -0700 | [diff] [blame] | 47 |  | 
| Mike Travis | 7f7ace0 | 2009-01-10 21:58:08 -0800 | [diff] [blame] | 48 | cpumask_clear(desc->pending_mask); | 
| Andrew Morton | c777ac5 | 2006-03-25 03:07:36 -0800 | [diff] [blame] | 49 | } | 
| Eric W. Biederman | e7b946e | 2006-10-04 02:16:29 -0700 | [diff] [blame] | 50 |  | 
| Thomas Gleixner | a439520 | 2011-02-04 18:46:16 +0100 | [diff] [blame] | 51 | void irq_move_irq(struct irq_data *idata) | 
|  | 52 | { | 
| Thomas Gleixner | f1a0639 | 2011-01-28 08:47:15 +0100 | [diff] [blame] | 53 | bool masked; | 
| Eric W. Biederman | e7b946e | 2006-10-04 02:16:29 -0700 | [diff] [blame] | 54 |  | 
| Thomas Gleixner | a439520 | 2011-02-04 18:46:16 +0100 | [diff] [blame] | 55 | if (likely(!irqd_is_setaffinity_pending(idata))) | 
| Eric W. Biederman | e7b946e | 2006-10-04 02:16:29 -0700 | [diff] [blame] | 56 | return; | 
|  | 57 |  | 
| Thomas Gleixner | 32f4125 | 2011-03-28 14:10:52 +0200 | [diff] [blame] | 58 | if (unlikely(irqd_irq_disabled(idata))) | 
| Eric W. Biederman | 2a786b4 | 2007-02-23 04:46:20 -0700 | [diff] [blame] | 59 | return; | 
| Eric W. Biederman | e7b946e | 2006-10-04 02:16:29 -0700 | [diff] [blame] | 60 |  | 
| Thomas Gleixner | f1a0639 | 2011-01-28 08:47:15 +0100 | [diff] [blame] | 61 | /* | 
|  | 62 | * Be careful vs. already masked interrupts. If this is a | 
|  | 63 | * threaded interrupt with ONESHOT set, we can end up with an | 
|  | 64 | * interrupt storm. | 
|  | 65 | */ | 
| Thomas Gleixner | 32f4125 | 2011-03-28 14:10:52 +0200 | [diff] [blame] | 66 | masked = irqd_irq_masked(idata); | 
| Thomas Gleixner | f1a0639 | 2011-01-28 08:47:15 +0100 | [diff] [blame] | 67 | if (!masked) | 
| Thomas Gleixner | a439520 | 2011-02-04 18:46:16 +0100 | [diff] [blame] | 68 | idata->chip->irq_mask(idata); | 
|  | 69 | irq_move_masked_irq(idata); | 
| Thomas Gleixner | f1a0639 | 2011-01-28 08:47:15 +0100 | [diff] [blame] | 70 | if (!masked) | 
| Thomas Gleixner | a439520 | 2011-02-04 18:46:16 +0100 | [diff] [blame] | 71 | idata->chip->irq_unmask(idata); | 
|  | 72 | } |