| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  * linux/kernel/irq/manage.c | 
 | 3 |  * | 
| Ingo Molnar | a34db9b | 2006-06-29 02:24:50 -0700 | [diff] [blame] | 4 |  * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar | 
 | 5 |  * Copyright (C) 2005-2006 Thomas Gleixner | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 |  * | 
 | 7 |  * This file contains driver APIs to the irq subsystem. | 
 | 8 |  */ | 
 | 9 |  | 
 | 10 | #include <linux/irq.h> | 
| Thomas Gleixner | 3aa551c | 2009-03-23 18:28:15 +0100 | [diff] [blame] | 11 | #include <linux/kthread.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | #include <linux/module.h> | 
 | 13 | #include <linux/random.h> | 
 | 14 | #include <linux/interrupt.h> | 
| Robert P. J. Day | 1aeb272 | 2008-04-29 00:59:25 -0700 | [diff] [blame] | 15 | #include <linux/slab.h> | 
| Thomas Gleixner | 3aa551c | 2009-03-23 18:28:15 +0100 | [diff] [blame] | 16 | #include <linux/sched.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 |  | 
 | 18 | #include "internals.h" | 
 | 19 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | /** | 
 | 21 |  *	synchronize_irq - wait for pending IRQ handlers (on other CPUs) | 
| Randy Dunlap | 1e5d533 | 2005-11-07 01:01:06 -0800 | [diff] [blame] | 22 |  *	@irq: interrupt number to wait for | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 |  * | 
 | 24 |  *	This function waits for any pending IRQ handlers for this interrupt | 
 | 25 |  *	to complete before returning. If you use this function while | 
 | 26 |  *	holding a resource the IRQ handler may need you will deadlock. | 
 | 27 |  * | 
 | 28 |  *	This function may be called - with care - from IRQ context. | 
 | 29 |  */ | 
 | 30 | void synchronize_irq(unsigned int irq) | 
 | 31 | { | 
| Yinghai Lu | cb5bc83 | 2008-08-19 20:50:17 -0700 | [diff] [blame] | 32 | 	struct irq_desc *desc = irq_to_desc(irq); | 
| Herbert Xu | a98ce5c | 2007-10-23 11:26:25 +0800 | [diff] [blame] | 33 | 	unsigned int status; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 34 |  | 
| Yinghai Lu | 7d94f7c | 2008-08-19 20:50:14 -0700 | [diff] [blame] | 35 | 	if (!desc) | 
| Matthew Wilcox | c2b5a25 | 2005-11-03 07:51:18 -0700 | [diff] [blame] | 36 | 		return; | 
 | 37 |  | 
| Herbert Xu | a98ce5c | 2007-10-23 11:26:25 +0800 | [diff] [blame] | 38 | 	do { | 
 | 39 | 		unsigned long flags; | 
 | 40 |  | 
 | 41 | 		/* | 
 | 42 | 		 * Wait until we're out of the critical section.  This might | 
 | 43 | 		 * give the wrong answer due to the lack of memory barriers. | 
 | 44 | 		 */ | 
 | 45 | 		while (desc->status & IRQ_INPROGRESS) | 
 | 46 | 			cpu_relax(); | 
 | 47 |  | 
 | 48 | 		/* Ok, that indicated we're done: double-check carefully. */ | 
| Thomas Gleixner | 239007b | 2009-11-17 16:46:45 +0100 | [diff] [blame] | 49 | 		raw_spin_lock_irqsave(&desc->lock, flags); | 
| Herbert Xu | a98ce5c | 2007-10-23 11:26:25 +0800 | [diff] [blame] | 50 | 		status = desc->status; | 
| Thomas Gleixner | 239007b | 2009-11-17 16:46:45 +0100 | [diff] [blame] | 51 | 		raw_spin_unlock_irqrestore(&desc->lock, flags); | 
| Herbert Xu | a98ce5c | 2007-10-23 11:26:25 +0800 | [diff] [blame] | 52 |  | 
 | 53 | 		/* Oops, that failed? */ | 
 | 54 | 	} while (status & IRQ_INPROGRESS); | 
| Thomas Gleixner | 3aa551c | 2009-03-23 18:28:15 +0100 | [diff] [blame] | 55 |  | 
 | 56 | 	/* | 
 | 57 | 	 * We made sure that no hardirq handler is running. Now verify | 
 | 58 | 	 * that no threaded handlers are active. | 
 | 59 | 	 */ | 
 | 60 | 	wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 61 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 62 | EXPORT_SYMBOL(synchronize_irq); | 
 | 63 |  | 
| Thomas Gleixner | 3aa551c | 2009-03-23 18:28:15 +0100 | [diff] [blame] | 64 | #ifdef CONFIG_SMP | 
 | 65 | cpumask_var_t irq_default_affinity; | 
 | 66 |  | 
| Thomas Gleixner | 771ee3b | 2007-02-16 01:27:25 -0800 | [diff] [blame] | 67 | /** | 
 | 68 |  *	irq_can_set_affinity - Check if the affinity of a given irq can be set | 
 | 69 |  *	@irq:		Interrupt to check | 
 | 70 |  * | 
 | 71 |  */ | 
 | 72 | int irq_can_set_affinity(unsigned int irq) | 
 | 73 | { | 
| Yinghai Lu | 08678b0 | 2008-08-19 20:50:05 -0700 | [diff] [blame] | 74 | 	struct irq_desc *desc = irq_to_desc(irq); | 
| Thomas Gleixner | 771ee3b | 2007-02-16 01:27:25 -0800 | [diff] [blame] | 75 |  | 
| Thomas Gleixner | 6b8ff31 | 2010-10-01 12:58:38 +0200 | [diff] [blame] | 76 | 	if (CHECK_IRQ_PER_CPU(desc->status) || !desc->irq_data.chip || | 
| Thomas Gleixner | c96b3b3 | 2010-09-27 12:45:41 +0000 | [diff] [blame] | 77 | 	    !desc->irq_data.chip->irq_set_affinity) | 
| Thomas Gleixner | 771ee3b | 2007-02-16 01:27:25 -0800 | [diff] [blame] | 78 | 		return 0; | 
 | 79 |  | 
 | 80 | 	return 1; | 
 | 81 | } | 
 | 82 |  | 
| Thomas Gleixner | 591d2fb | 2009-07-21 11:09:39 +0200 | [diff] [blame] | 83 | /** | 
 | 84 |  *	irq_set_thread_affinity - Notify irq threads to adjust affinity | 
 | 85 |  *	@desc:		irq descriptor which has affitnity changed | 
 | 86 |  * | 
 | 87 |  *	We just set IRQTF_AFFINITY and delegate the affinity setting | 
 | 88 |  *	to the interrupt thread itself. We can not call | 
 | 89 |  *	set_cpus_allowed_ptr() here as we hold desc->lock and this | 
 | 90 |  *	code can be called from hard interrupt context. | 
 | 91 |  */ | 
 | 92 | void irq_set_thread_affinity(struct irq_desc *desc) | 
| Thomas Gleixner | 3aa551c | 2009-03-23 18:28:15 +0100 | [diff] [blame] | 93 | { | 
 | 94 | 	struct irqaction *action = desc->action; | 
 | 95 |  | 
 | 96 | 	while (action) { | 
 | 97 | 		if (action->thread) | 
| Thomas Gleixner | 591d2fb | 2009-07-21 11:09:39 +0200 | [diff] [blame] | 98 | 			set_bit(IRQTF_AFFINITY, &action->thread_flags); | 
| Thomas Gleixner | 3aa551c | 2009-03-23 18:28:15 +0100 | [diff] [blame] | 99 | 		action = action->next; | 
 | 100 | 	} | 
 | 101 | } | 
 | 102 |  | 
| Thomas Gleixner | 771ee3b | 2007-02-16 01:27:25 -0800 | [diff] [blame] | 103 | /** | 
 | 104 |  *	irq_set_affinity - Set the irq affinity of a given irq | 
 | 105 |  *	@irq:		Interrupt to set affinity | 
 | 106 |  *	@cpumask:	cpumask | 
 | 107 |  * | 
 | 108 |  */ | 
| Rusty Russell | 0de2652 | 2008-12-13 21:20:26 +1030 | [diff] [blame] | 109 | int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) | 
| Thomas Gleixner | 771ee3b | 2007-02-16 01:27:25 -0800 | [diff] [blame] | 110 | { | 
| Yinghai Lu | 08678b0 | 2008-08-19 20:50:05 -0700 | [diff] [blame] | 111 | 	struct irq_desc *desc = irq_to_desc(irq); | 
| Thomas Gleixner | c96b3b3 | 2010-09-27 12:45:41 +0000 | [diff] [blame] | 112 | 	struct irq_chip *chip = desc->irq_data.chip; | 
| Thomas Gleixner | f6d87f4 | 2008-11-07 13:18:30 +0100 | [diff] [blame] | 113 | 	unsigned long flags; | 
| Thomas Gleixner | 771ee3b | 2007-02-16 01:27:25 -0800 | [diff] [blame] | 114 |  | 
| Thomas Gleixner | c96b3b3 | 2010-09-27 12:45:41 +0000 | [diff] [blame] | 115 | 	if (!chip->irq_set_affinity) | 
| Thomas Gleixner | 771ee3b | 2007-02-16 01:27:25 -0800 | [diff] [blame] | 116 | 		return -EINVAL; | 
 | 117 |  | 
| Thomas Gleixner | 239007b | 2009-11-17 16:46:45 +0100 | [diff] [blame] | 118 | 	raw_spin_lock_irqsave(&desc->lock, flags); | 
| Thomas Gleixner | f6d87f4 | 2008-11-07 13:18:30 +0100 | [diff] [blame] | 119 |  | 
| Thomas Gleixner | 771ee3b | 2007-02-16 01:27:25 -0800 | [diff] [blame] | 120 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 
| Yinghai Lu | 57b150c | 2009-04-27 17:59:53 -0700 | [diff] [blame] | 121 | 	if (desc->status & IRQ_MOVE_PCNTXT) { | 
| Thomas Gleixner | c96b3b3 | 2010-09-27 12:45:41 +0000 | [diff] [blame] | 122 | 		if (!chip->irq_set_affinity(&desc->irq_data, cpumask, false)) { | 
| Thomas Gleixner | 6b8ff31 | 2010-10-01 12:58:38 +0200 | [diff] [blame] | 123 | 			cpumask_copy(desc->irq_data.affinity, cpumask); | 
| Thomas Gleixner | 591d2fb | 2009-07-21 11:09:39 +0200 | [diff] [blame] | 124 | 			irq_set_thread_affinity(desc); | 
| Yinghai Lu | 57b150c | 2009-04-27 17:59:53 -0700 | [diff] [blame] | 125 | 		} | 
 | 126 | 	} | 
| Pallipadi, Venkatesh | 6ec3cfe | 2009-04-13 15:20:58 -0700 | [diff] [blame] | 127 | 	else { | 
| Thomas Gleixner | f6d87f4 | 2008-11-07 13:18:30 +0100 | [diff] [blame] | 128 | 		desc->status |= IRQ_MOVE_PENDING; | 
| Mike Travis | 7f7ace0 | 2009-01-10 21:58:08 -0800 | [diff] [blame] | 129 | 		cpumask_copy(desc->pending_mask, cpumask); | 
| Thomas Gleixner | f6d87f4 | 2008-11-07 13:18:30 +0100 | [diff] [blame] | 130 | 	} | 
| Thomas Gleixner | 771ee3b | 2007-02-16 01:27:25 -0800 | [diff] [blame] | 131 | #else | 
| Thomas Gleixner | c96b3b3 | 2010-09-27 12:45:41 +0000 | [diff] [blame] | 132 | 	if (!chip->irq_set_affinity(&desc->irq_data, cpumask, false)) { | 
| Thomas Gleixner | 6b8ff31 | 2010-10-01 12:58:38 +0200 | [diff] [blame] | 133 | 		cpumask_copy(desc->irq_data.affinity, cpumask); | 
| Thomas Gleixner | 591d2fb | 2009-07-21 11:09:39 +0200 | [diff] [blame] | 134 | 		irq_set_thread_affinity(desc); | 
| Yinghai Lu | 57b150c | 2009-04-27 17:59:53 -0700 | [diff] [blame] | 135 | 	} | 
| Thomas Gleixner | 771ee3b | 2007-02-16 01:27:25 -0800 | [diff] [blame] | 136 | #endif | 
| Ben Hutchings | cd7eab4 | 2011-01-19 21:01:44 +0000 | [diff] [blame] | 137 | 	if (desc->affinity_notify) { | 
 | 138 | 		kref_get(&desc->affinity_notify->kref); | 
 | 139 | 		schedule_work(&desc->affinity_notify->work); | 
 | 140 | 	} | 
| Thomas Gleixner | f6d87f4 | 2008-11-07 13:18:30 +0100 | [diff] [blame] | 141 | 	desc->status |= IRQ_AFFINITY_SET; | 
| Thomas Gleixner | 239007b | 2009-11-17 16:46:45 +0100 | [diff] [blame] | 142 | 	raw_spin_unlock_irqrestore(&desc->lock, flags); | 
| Thomas Gleixner | 771ee3b | 2007-02-16 01:27:25 -0800 | [diff] [blame] | 143 | 	return 0; | 
 | 144 | } | 
 | 145 |  | 
| Peter P Waskiewicz Jr | e7a297b | 2010-04-30 14:44:50 -0700 | [diff] [blame] | 146 | int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m) | 
 | 147 | { | 
 | 148 | 	struct irq_desc *desc = irq_to_desc(irq); | 
 | 149 | 	unsigned long flags; | 
 | 150 |  | 
 | 151 | 	if (!desc) | 
 | 152 | 		return -EINVAL; | 
 | 153 |  | 
 | 154 | 	raw_spin_lock_irqsave(&desc->lock, flags); | 
 | 155 | 	desc->affinity_hint = m; | 
 | 156 | 	raw_spin_unlock_irqrestore(&desc->lock, flags); | 
 | 157 |  | 
 | 158 | 	return 0; | 
 | 159 | } | 
 | 160 | EXPORT_SYMBOL_GPL(irq_set_affinity_hint); | 
 | 161 |  | 
| Ben Hutchings | cd7eab4 | 2011-01-19 21:01:44 +0000 | [diff] [blame] | 162 | static void irq_affinity_notify(struct work_struct *work) | 
 | 163 | { | 
 | 164 | 	struct irq_affinity_notify *notify = | 
 | 165 | 		container_of(work, struct irq_affinity_notify, work); | 
 | 166 | 	struct irq_desc *desc = irq_to_desc(notify->irq); | 
 | 167 | 	cpumask_var_t cpumask; | 
 | 168 | 	unsigned long flags; | 
 | 169 |  | 
 | 170 | 	if (!desc) | 
 | 171 | 		goto out; | 
 | 172 |  | 
 | 173 | 	if (!alloc_cpumask_var(&cpumask, GFP_KERNEL)) | 
 | 174 | 		goto out; | 
 | 175 |  | 
 | 176 | 	raw_spin_lock_irqsave(&desc->lock, flags); | 
 | 177 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 
 | 178 | 	if (desc->status & IRQ_MOVE_PENDING) | 
 | 179 | 		cpumask_copy(cpumask, desc->pending_mask); | 
 | 180 | 	else | 
 | 181 | #endif | 
 | 182 | 		cpumask_copy(cpumask, desc->affinity); | 
 | 183 | 	raw_spin_unlock_irqrestore(&desc->lock, flags); | 
 | 184 |  | 
 | 185 | 	notify->notify(notify, cpumask); | 
 | 186 |  | 
 | 187 | 	free_cpumask_var(cpumask); | 
 | 188 | out: | 
 | 189 | 	kref_put(¬ify->kref, notify->release); | 
 | 190 | } | 
 | 191 |  | 
 | 192 | /** | 
 | 193 |  *	irq_set_affinity_notifier - control notification of IRQ affinity changes | 
 | 194 |  *	@irq:		Interrupt for which to enable/disable notification | 
 | 195 |  *	@notify:	Context for notification, or %NULL to disable | 
 | 196 |  *			notification.  Function pointers must be initialised; | 
 | 197 |  *			the other fields will be initialised by this function. | 
 | 198 |  * | 
 | 199 |  *	Must be called in process context.  Notification may only be enabled | 
 | 200 |  *	after the IRQ is allocated and must be disabled before the IRQ is | 
 | 201 |  *	freed using free_irq(). | 
 | 202 |  */ | 
 | 203 | int | 
 | 204 | irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) | 
 | 205 | { | 
 | 206 | 	struct irq_desc *desc = irq_to_desc(irq); | 
 | 207 | 	struct irq_affinity_notify *old_notify; | 
 | 208 | 	unsigned long flags; | 
 | 209 |  | 
 | 210 | 	/* The release function is promised process context */ | 
 | 211 | 	might_sleep(); | 
 | 212 |  | 
 | 213 | 	if (!desc) | 
 | 214 | 		return -EINVAL; | 
 | 215 |  | 
 | 216 | 	/* Complete initialisation of *notify */ | 
 | 217 | 	if (notify) { | 
 | 218 | 		notify->irq = irq; | 
 | 219 | 		kref_init(¬ify->kref); | 
 | 220 | 		INIT_WORK(¬ify->work, irq_affinity_notify); | 
 | 221 | 	} | 
 | 222 |  | 
 | 223 | 	raw_spin_lock_irqsave(&desc->lock, flags); | 
 | 224 | 	old_notify = desc->affinity_notify; | 
 | 225 | 	desc->affinity_notify = notify; | 
 | 226 | 	raw_spin_unlock_irqrestore(&desc->lock, flags); | 
 | 227 |  | 
 | 228 | 	if (old_notify) | 
 | 229 | 		kref_put(&old_notify->kref, old_notify->release); | 
 | 230 |  | 
 | 231 | 	return 0; | 
 | 232 | } | 
 | 233 | EXPORT_SYMBOL_GPL(irq_set_affinity_notifier); | 
 | 234 |  | 
| Max Krasnyansky | 1840475 | 2008-05-29 11:02:52 -0700 | [diff] [blame] | 235 | #ifndef CONFIG_AUTO_IRQ_AFFINITY | 
 | 236 | /* | 
 | 237 |  * Generic version of the affinity autoselector. | 
 | 238 |  */ | 
| Hannes Eder | 548c893 | 2009-02-08 20:24:47 +0100 | [diff] [blame] | 239 | static int setup_affinity(unsigned int irq, struct irq_desc *desc) | 
| Max Krasnyansky | 1840475 | 2008-05-29 11:02:52 -0700 | [diff] [blame] | 240 | { | 
| Max Krasnyansky | 1840475 | 2008-05-29 11:02:52 -0700 | [diff] [blame] | 241 | 	if (!irq_can_set_affinity(irq)) | 
 | 242 | 		return 0; | 
 | 243 |  | 
| Thomas Gleixner | f6d87f4 | 2008-11-07 13:18:30 +0100 | [diff] [blame] | 244 | 	/* | 
 | 245 | 	 * Preserve an userspace affinity setup, but make sure that | 
 | 246 | 	 * one of the targets is online. | 
 | 247 | 	 */ | 
| Thomas Gleixner | 612e368 | 2008-11-07 13:58:46 +0100 | [diff] [blame] | 248 | 	if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) { | 
| Thomas Gleixner | 6b8ff31 | 2010-10-01 12:58:38 +0200 | [diff] [blame] | 249 | 		if (cpumask_any_and(desc->irq_data.affinity, cpu_online_mask) | 
| Rusty Russell | 0de2652 | 2008-12-13 21:20:26 +1030 | [diff] [blame] | 250 | 		    < nr_cpu_ids) | 
 | 251 | 			goto set_affinity; | 
| Thomas Gleixner | f6d87f4 | 2008-11-07 13:18:30 +0100 | [diff] [blame] | 252 | 		else | 
 | 253 | 			desc->status &= ~IRQ_AFFINITY_SET; | 
 | 254 | 	} | 
 | 255 |  | 
| Thomas Gleixner | 6b8ff31 | 2010-10-01 12:58:38 +0200 | [diff] [blame] | 256 | 	cpumask_and(desc->irq_data.affinity, cpu_online_mask, irq_default_affinity); | 
| Rusty Russell | 0de2652 | 2008-12-13 21:20:26 +1030 | [diff] [blame] | 257 | set_affinity: | 
| Thomas Gleixner | c96b3b3 | 2010-09-27 12:45:41 +0000 | [diff] [blame] | 258 | 	desc->irq_data.chip->irq_set_affinity(&desc->irq_data, desc->irq_data.affinity, false); | 
| Max Krasnyansky | 1840475 | 2008-05-29 11:02:52 -0700 | [diff] [blame] | 259 |  | 
| Max Krasnyansky | 1840475 | 2008-05-29 11:02:52 -0700 | [diff] [blame] | 260 | 	return 0; | 
 | 261 | } | 
| Thomas Gleixner | f6d87f4 | 2008-11-07 13:18:30 +0100 | [diff] [blame] | 262 | #else | 
| Hannes Eder | 548c893 | 2009-02-08 20:24:47 +0100 | [diff] [blame] | 263 | static inline int setup_affinity(unsigned int irq, struct irq_desc *d) | 
| Thomas Gleixner | f6d87f4 | 2008-11-07 13:18:30 +0100 | [diff] [blame] | 264 | { | 
 | 265 | 	return irq_select_affinity(irq); | 
 | 266 | } | 
| Max Krasnyansky | 1840475 | 2008-05-29 11:02:52 -0700 | [diff] [blame] | 267 | #endif | 
 | 268 |  | 
| Thomas Gleixner | f6d87f4 | 2008-11-07 13:18:30 +0100 | [diff] [blame] | 269 | /* | 
 | 270 |  * Called when affinity is set via /proc/irq | 
 | 271 |  */ | 
 | 272 | int irq_select_affinity_usr(unsigned int irq) | 
 | 273 | { | 
 | 274 | 	struct irq_desc *desc = irq_to_desc(irq); | 
 | 275 | 	unsigned long flags; | 
 | 276 | 	int ret; | 
 | 277 |  | 
| Thomas Gleixner | 239007b | 2009-11-17 16:46:45 +0100 | [diff] [blame] | 278 | 	raw_spin_lock_irqsave(&desc->lock, flags); | 
| Hannes Eder | 548c893 | 2009-02-08 20:24:47 +0100 | [diff] [blame] | 279 | 	ret = setup_affinity(irq, desc); | 
| Thomas Gleixner | 3aa551c | 2009-03-23 18:28:15 +0100 | [diff] [blame] | 280 | 	if (!ret) | 
| Thomas Gleixner | 591d2fb | 2009-07-21 11:09:39 +0200 | [diff] [blame] | 281 | 		irq_set_thread_affinity(desc); | 
| Thomas Gleixner | 239007b | 2009-11-17 16:46:45 +0100 | [diff] [blame] | 282 | 	raw_spin_unlock_irqrestore(&desc->lock, flags); | 
| Thomas Gleixner | f6d87f4 | 2008-11-07 13:18:30 +0100 | [diff] [blame] | 283 |  | 
 | 284 | 	return ret; | 
 | 285 | } | 
 | 286 |  | 
 | 287 | #else | 
| Hannes Eder | 548c893 | 2009-02-08 20:24:47 +0100 | [diff] [blame] | 288 | static inline int setup_affinity(unsigned int irq, struct irq_desc *desc) | 
| Thomas Gleixner | f6d87f4 | 2008-11-07 13:18:30 +0100 | [diff] [blame] | 289 | { | 
 | 290 | 	return 0; | 
 | 291 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 292 | #endif | 
 | 293 |  | 
| Rafael J. Wysocki | 0a0c516 | 2009-03-16 22:33:49 +0100 | [diff] [blame] | 294 | void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend) | 
 | 295 | { | 
 | 296 | 	if (suspend) { | 
| Ian Campbell | 685fd0b4 | 2010-07-29 11:16:32 +0100 | [diff] [blame] | 297 | 		if (!desc->action || (desc->action->flags & IRQF_NO_SUSPEND)) | 
| Rafael J. Wysocki | 0a0c516 | 2009-03-16 22:33:49 +0100 | [diff] [blame] | 298 | 			return; | 
 | 299 | 		desc->status |= IRQ_SUSPENDED; | 
 | 300 | 	} | 
 | 301 |  | 
 | 302 | 	if (!desc->depth++) { | 
 | 303 | 		desc->status |= IRQ_DISABLED; | 
| Thomas Gleixner | bc310dd | 2010-09-27 12:45:02 +0000 | [diff] [blame] | 304 | 		desc->irq_data.chip->irq_disable(&desc->irq_data); | 
| Rafael J. Wysocki | 0a0c516 | 2009-03-16 22:33:49 +0100 | [diff] [blame] | 305 | 	} | 
 | 306 | } | 
 | 307 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 308 | /** | 
 | 309 |  *	disable_irq_nosync - disable an irq without waiting | 
 | 310 |  *	@irq: Interrupt to disable | 
 | 311 |  * | 
 | 312 |  *	Disable the selected interrupt line.  Disables and Enables are | 
 | 313 |  *	nested. | 
 | 314 |  *	Unlike disable_irq(), this function does not ensure existing | 
 | 315 |  *	instances of the IRQ handler have completed before returning. | 
 | 316 |  * | 
 | 317 |  *	This function may be called from IRQ context. | 
 | 318 |  */ | 
 | 319 | void disable_irq_nosync(unsigned int irq) | 
 | 320 | { | 
| Thomas Gleixner | d3c6004 | 2008-10-16 09:55:00 +0200 | [diff] [blame] | 321 | 	struct irq_desc *desc = irq_to_desc(irq); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 322 | 	unsigned long flags; | 
 | 323 |  | 
| Yinghai Lu | 7d94f7c | 2008-08-19 20:50:14 -0700 | [diff] [blame] | 324 | 	if (!desc) | 
| Matthew Wilcox | c2b5a25 | 2005-11-03 07:51:18 -0700 | [diff] [blame] | 325 | 		return; | 
 | 326 |  | 
| Thomas Gleixner | 3876ec9 | 2010-09-27 12:44:35 +0000 | [diff] [blame] | 327 | 	chip_bus_lock(desc); | 
| Thomas Gleixner | 239007b | 2009-11-17 16:46:45 +0100 | [diff] [blame] | 328 | 	raw_spin_lock_irqsave(&desc->lock, flags); | 
| Rafael J. Wysocki | 0a0c516 | 2009-03-16 22:33:49 +0100 | [diff] [blame] | 329 | 	__disable_irq(desc, irq, false); | 
| Thomas Gleixner | 239007b | 2009-11-17 16:46:45 +0100 | [diff] [blame] | 330 | 	raw_spin_unlock_irqrestore(&desc->lock, flags); | 
| Thomas Gleixner | 3876ec9 | 2010-09-27 12:44:35 +0000 | [diff] [blame] | 331 | 	chip_bus_sync_unlock(desc); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 332 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 333 | EXPORT_SYMBOL(disable_irq_nosync); | 
 | 334 |  | 
 | 335 | /** | 
 | 336 |  *	disable_irq - disable an irq and wait for completion | 
 | 337 |  *	@irq: Interrupt to disable | 
 | 338 |  * | 
 | 339 |  *	Disable the selected interrupt line.  Enables and Disables are | 
 | 340 |  *	nested. | 
 | 341 |  *	This function waits for any pending IRQ handlers for this interrupt | 
 | 342 |  *	to complete before returning. If you use this function while | 
 | 343 |  *	holding a resource the IRQ handler may need you will deadlock. | 
 | 344 |  * | 
 | 345 |  *	This function may be called - with care - from IRQ context. | 
 | 346 |  */ | 
 | 347 | void disable_irq(unsigned int irq) | 
 | 348 | { | 
| Thomas Gleixner | d3c6004 | 2008-10-16 09:55:00 +0200 | [diff] [blame] | 349 | 	struct irq_desc *desc = irq_to_desc(irq); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 350 |  | 
| Yinghai Lu | 7d94f7c | 2008-08-19 20:50:14 -0700 | [diff] [blame] | 351 | 	if (!desc) | 
| Matthew Wilcox | c2b5a25 | 2005-11-03 07:51:18 -0700 | [diff] [blame] | 352 | 		return; | 
 | 353 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 354 | 	disable_irq_nosync(irq); | 
 | 355 | 	if (desc->action) | 
 | 356 | 		synchronize_irq(irq); | 
 | 357 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 358 | EXPORT_SYMBOL(disable_irq); | 
 | 359 |  | 
| Rafael J. Wysocki | 0a0c516 | 2009-03-16 22:33:49 +0100 | [diff] [blame] | 360 | void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume) | 
| Thomas Gleixner | 1adb085 | 2008-04-28 17:01:56 +0200 | [diff] [blame] | 361 | { | 
| Rafael J. Wysocki | 0a0c516 | 2009-03-16 22:33:49 +0100 | [diff] [blame] | 362 | 	if (resume) | 
 | 363 | 		desc->status &= ~IRQ_SUSPENDED; | 
 | 364 |  | 
| Thomas Gleixner | 1adb085 | 2008-04-28 17:01:56 +0200 | [diff] [blame] | 365 | 	switch (desc->depth) { | 
 | 366 | 	case 0: | 
| Rafael J. Wysocki | 0a0c516 | 2009-03-16 22:33:49 +0100 | [diff] [blame] | 367 |  err_out: | 
| Arjan van de Ven | b8c512f | 2008-07-25 19:45:36 -0700 | [diff] [blame] | 368 | 		WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq); | 
| Thomas Gleixner | 1adb085 | 2008-04-28 17:01:56 +0200 | [diff] [blame] | 369 | 		break; | 
 | 370 | 	case 1: { | 
 | 371 | 		unsigned int status = desc->status & ~IRQ_DISABLED; | 
 | 372 |  | 
| Rafael J. Wysocki | 0a0c516 | 2009-03-16 22:33:49 +0100 | [diff] [blame] | 373 | 		if (desc->status & IRQ_SUSPENDED) | 
 | 374 | 			goto err_out; | 
| Thomas Gleixner | 1adb085 | 2008-04-28 17:01:56 +0200 | [diff] [blame] | 375 | 		/* Prevent probing on this irq: */ | 
 | 376 | 		desc->status = status | IRQ_NOPROBE; | 
 | 377 | 		check_irq_resend(desc, irq); | 
 | 378 | 		/* fall-through */ | 
 | 379 | 	} | 
 | 380 | 	default: | 
 | 381 | 		desc->depth--; | 
 | 382 | 	} | 
 | 383 | } | 
 | 384 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 385 | /** | 
 | 386 |  *	enable_irq - enable handling of an irq | 
 | 387 |  *	@irq: Interrupt to enable | 
 | 388 |  * | 
 | 389 |  *	Undoes the effect of one call to disable_irq().  If this | 
 | 390 |  *	matches the last disable, processing of interrupts on this | 
 | 391 |  *	IRQ line is re-enabled. | 
 | 392 |  * | 
| Thomas Gleixner | 70aedd2 | 2009-08-13 12:17:48 +0200 | [diff] [blame] | 393 |  *	This function may be called from IRQ context only when | 
| Thomas Gleixner | 6b8ff31 | 2010-10-01 12:58:38 +0200 | [diff] [blame] | 394 |  *	desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL ! | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 395 |  */ | 
 | 396 | void enable_irq(unsigned int irq) | 
 | 397 | { | 
| Thomas Gleixner | d3c6004 | 2008-10-16 09:55:00 +0200 | [diff] [blame] | 398 | 	struct irq_desc *desc = irq_to_desc(irq); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 399 | 	unsigned long flags; | 
 | 400 |  | 
| Yinghai Lu | 7d94f7c | 2008-08-19 20:50:14 -0700 | [diff] [blame] | 401 | 	if (!desc) | 
| Matthew Wilcox | c2b5a25 | 2005-11-03 07:51:18 -0700 | [diff] [blame] | 402 | 		return; | 
 | 403 |  | 
| Thomas Gleixner | 2656c36 | 2010-10-22 14:47:57 +0200 | [diff] [blame] | 404 | 	if (WARN(!desc->irq_data.chip || !desc->irq_data.chip->irq_enable, | 
 | 405 | 	    KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq)) | 
 | 406 | 		return; | 
 | 407 |  | 
| Thomas Gleixner | 3876ec9 | 2010-09-27 12:44:35 +0000 | [diff] [blame] | 408 | 	chip_bus_lock(desc); | 
| Thomas Gleixner | 239007b | 2009-11-17 16:46:45 +0100 | [diff] [blame] | 409 | 	raw_spin_lock_irqsave(&desc->lock, flags); | 
| Rafael J. Wysocki | 0a0c516 | 2009-03-16 22:33:49 +0100 | [diff] [blame] | 410 | 	__enable_irq(desc, irq, false); | 
| Thomas Gleixner | 239007b | 2009-11-17 16:46:45 +0100 | [diff] [blame] | 411 | 	raw_spin_unlock_irqrestore(&desc->lock, flags); | 
| Thomas Gleixner | 3876ec9 | 2010-09-27 12:44:35 +0000 | [diff] [blame] | 412 | 	chip_bus_sync_unlock(desc); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 413 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 414 | EXPORT_SYMBOL(enable_irq); | 
 | 415 |  | 
| David Brownell | 0c5d1eb | 2008-10-01 14:46:18 -0700 | [diff] [blame] | 416 | static int set_irq_wake_real(unsigned int irq, unsigned int on) | 
| Uwe Kleine-König | 2db8732 | 2008-07-23 14:42:25 +0200 | [diff] [blame] | 417 | { | 
| Yinghai Lu | 08678b0 | 2008-08-19 20:50:05 -0700 | [diff] [blame] | 418 | 	struct irq_desc *desc = irq_to_desc(irq); | 
| Uwe Kleine-König | 2db8732 | 2008-07-23 14:42:25 +0200 | [diff] [blame] | 419 | 	int ret = -ENXIO; | 
 | 420 |  | 
| Thomas Gleixner | 2f7e99b | 2010-09-27 12:45:50 +0000 | [diff] [blame] | 421 | 	if (desc->irq_data.chip->irq_set_wake) | 
 | 422 | 		ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on); | 
| Uwe Kleine-König | 2db8732 | 2008-07-23 14:42:25 +0200 | [diff] [blame] | 423 |  | 
 | 424 | 	return ret; | 
 | 425 | } | 
 | 426 |  | 
| Thomas Gleixner | ba9a233 | 2006-06-29 02:24:55 -0700 | [diff] [blame] | 427 | /** | 
 | 428 |  *	set_irq_wake - control irq power management wakeup | 
 | 429 |  *	@irq:	interrupt to control | 
 | 430 |  *	@on:	enable/disable power management wakeup | 
 | 431 |  * | 
| David Brownell | 15a647e | 2006-07-30 03:03:08 -0700 | [diff] [blame] | 432 |  *	Enable/disable power management wakeup mode, which is | 
 | 433 |  *	disabled by default.  Enables and disables must match, | 
 | 434 |  *	just as they match for non-wakeup mode support. | 
 | 435 |  * | 
 | 436 |  *	Wakeup mode lets this IRQ wake the system from sleep | 
 | 437 |  *	states like "suspend to RAM". | 
| Thomas Gleixner | ba9a233 | 2006-06-29 02:24:55 -0700 | [diff] [blame] | 438 |  */ | 
 | 439 | int set_irq_wake(unsigned int irq, unsigned int on) | 
 | 440 | { | 
| Yinghai Lu | 08678b0 | 2008-08-19 20:50:05 -0700 | [diff] [blame] | 441 | 	struct irq_desc *desc = irq_to_desc(irq); | 
| Thomas Gleixner | ba9a233 | 2006-06-29 02:24:55 -0700 | [diff] [blame] | 442 | 	unsigned long flags; | 
| Uwe Kleine-König | 2db8732 | 2008-07-23 14:42:25 +0200 | [diff] [blame] | 443 | 	int ret = 0; | 
| Thomas Gleixner | ba9a233 | 2006-06-29 02:24:55 -0700 | [diff] [blame] | 444 |  | 
| David Brownell | 15a647e | 2006-07-30 03:03:08 -0700 | [diff] [blame] | 445 | 	/* wakeup-capable irqs can be shared between drivers that | 
 | 446 | 	 * don't need to have the same sleep mode behaviors. | 
 | 447 | 	 */ | 
| Thomas Gleixner | 239007b | 2009-11-17 16:46:45 +0100 | [diff] [blame] | 448 | 	raw_spin_lock_irqsave(&desc->lock, flags); | 
| David Brownell | 15a647e | 2006-07-30 03:03:08 -0700 | [diff] [blame] | 449 | 	if (on) { | 
| Uwe Kleine-König | 2db8732 | 2008-07-23 14:42:25 +0200 | [diff] [blame] | 450 | 		if (desc->wake_depth++ == 0) { | 
 | 451 | 			ret = set_irq_wake_real(irq, on); | 
 | 452 | 			if (ret) | 
 | 453 | 				desc->wake_depth = 0; | 
 | 454 | 			else | 
 | 455 | 				desc->status |= IRQ_WAKEUP; | 
 | 456 | 		} | 
| David Brownell | 15a647e | 2006-07-30 03:03:08 -0700 | [diff] [blame] | 457 | 	} else { | 
 | 458 | 		if (desc->wake_depth == 0) { | 
| Arjan van de Ven | 7a2c477 | 2008-07-25 01:45:54 -0700 | [diff] [blame] | 459 | 			WARN(1, "Unbalanced IRQ %d wake disable\n", irq); | 
| Uwe Kleine-König | 2db8732 | 2008-07-23 14:42:25 +0200 | [diff] [blame] | 460 | 		} else if (--desc->wake_depth == 0) { | 
 | 461 | 			ret = set_irq_wake_real(irq, on); | 
 | 462 | 			if (ret) | 
 | 463 | 				desc->wake_depth = 1; | 
 | 464 | 			else | 
 | 465 | 				desc->status &= ~IRQ_WAKEUP; | 
 | 466 | 		} | 
| David Brownell | 15a647e | 2006-07-30 03:03:08 -0700 | [diff] [blame] | 467 | 	} | 
| Uwe Kleine-König | 2db8732 | 2008-07-23 14:42:25 +0200 | [diff] [blame] | 468 |  | 
| Thomas Gleixner | 239007b | 2009-11-17 16:46:45 +0100 | [diff] [blame] | 469 | 	raw_spin_unlock_irqrestore(&desc->lock, flags); | 
| Thomas Gleixner | ba9a233 | 2006-06-29 02:24:55 -0700 | [diff] [blame] | 470 | 	return ret; | 
 | 471 | } | 
 | 472 | EXPORT_SYMBOL(set_irq_wake); | 
 | 473 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 474 | /* | 
 | 475 |  * Internal function that tells the architecture code whether a | 
 | 476 |  * particular irq has been exclusively allocated or is available | 
 | 477 |  * for driver use. | 
 | 478 |  */ | 
 | 479 | int can_request_irq(unsigned int irq, unsigned long irqflags) | 
 | 480 | { | 
| Thomas Gleixner | d3c6004 | 2008-10-16 09:55:00 +0200 | [diff] [blame] | 481 | 	struct irq_desc *desc = irq_to_desc(irq); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 482 | 	struct irqaction *action; | 
| Thomas Gleixner | cc8c3b7 | 2010-03-23 22:40:53 +0100 | [diff] [blame] | 483 | 	unsigned long flags; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 484 |  | 
| Yinghai Lu | 7d94f7c | 2008-08-19 20:50:14 -0700 | [diff] [blame] | 485 | 	if (!desc) | 
 | 486 | 		return 0; | 
 | 487 |  | 
 | 488 | 	if (desc->status & IRQ_NOREQUEST) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 489 | 		return 0; | 
 | 490 |  | 
| Thomas Gleixner | cc8c3b7 | 2010-03-23 22:40:53 +0100 | [diff] [blame] | 491 | 	raw_spin_lock_irqsave(&desc->lock, flags); | 
| Yinghai Lu | 08678b0 | 2008-08-19 20:50:05 -0700 | [diff] [blame] | 492 | 	action = desc->action; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 493 | 	if (action) | 
| Thomas Gleixner | 3cca53b | 2006-07-01 19:29:31 -0700 | [diff] [blame] | 494 | 		if (irqflags & action->flags & IRQF_SHARED) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 495 | 			action = NULL; | 
 | 496 |  | 
| Thomas Gleixner | cc8c3b7 | 2010-03-23 22:40:53 +0100 | [diff] [blame] | 497 | 	raw_spin_unlock_irqrestore(&desc->lock, flags); | 
 | 498 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 499 | 	return !action; | 
 | 500 | } | 
 | 501 |  | 
| Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 502 | void compat_irq_chip_set_default_handler(struct irq_desc *desc) | 
 | 503 | { | 
 | 504 | 	/* | 
 | 505 | 	 * If the architecture still has not overriden | 
 | 506 | 	 * the flow handler then zap the default. This | 
 | 507 | 	 * should catch incorrect flow-type setting. | 
 | 508 | 	 */ | 
 | 509 | 	if (desc->handle_irq == &handle_bad_irq) | 
 | 510 | 		desc->handle_irq = NULL; | 
 | 511 | } | 
 | 512 |  | 
| David Brownell | 0c5d1eb | 2008-10-01 14:46:18 -0700 | [diff] [blame] | 513 | int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, | 
| Thomas Gleixner | b2ba2c3 | 2010-09-27 12:45:47 +0000 | [diff] [blame] | 514 | 		      unsigned long flags) | 
| Uwe Kleine-König | 82736f4 | 2008-07-23 21:28:54 -0700 | [diff] [blame] | 515 | { | 
 | 516 | 	int ret; | 
| Thomas Gleixner | 6b8ff31 | 2010-10-01 12:58:38 +0200 | [diff] [blame] | 517 | 	struct irq_chip *chip = desc->irq_data.chip; | 
| Uwe Kleine-König | 82736f4 | 2008-07-23 21:28:54 -0700 | [diff] [blame] | 518 |  | 
| Thomas Gleixner | b2ba2c3 | 2010-09-27 12:45:47 +0000 | [diff] [blame] | 519 | 	if (!chip || !chip->irq_set_type) { | 
| Uwe Kleine-König | 82736f4 | 2008-07-23 21:28:54 -0700 | [diff] [blame] | 520 | 		/* | 
 | 521 | 		 * IRQF_TRIGGER_* but the PIC does not support multiple | 
 | 522 | 		 * flow-types? | 
 | 523 | 		 */ | 
| Mark Nelson | 3ff68a6 | 2008-11-13 21:37:41 +1100 | [diff] [blame] | 524 | 		pr_debug("No set_type function for IRQ %d (%s)\n", irq, | 
| Uwe Kleine-König | 82736f4 | 2008-07-23 21:28:54 -0700 | [diff] [blame] | 525 | 				chip ? (chip->name ? : "unknown") : "unknown"); | 
 | 526 | 		return 0; | 
 | 527 | 	} | 
 | 528 |  | 
| David Brownell | f2b662d | 2008-12-01 14:31:38 -0800 | [diff] [blame] | 529 | 	/* caller masked out all except trigger mode flags */ | 
| Thomas Gleixner | b2ba2c3 | 2010-09-27 12:45:47 +0000 | [diff] [blame] | 530 | 	ret = chip->irq_set_type(&desc->irq_data, flags); | 
| Uwe Kleine-König | 82736f4 | 2008-07-23 21:28:54 -0700 | [diff] [blame] | 531 |  | 
 | 532 | 	if (ret) | 
| Thomas Gleixner | b2ba2c3 | 2010-09-27 12:45:47 +0000 | [diff] [blame] | 533 | 		pr_err("setting trigger mode %lu for irq %u failed (%pF)\n", | 
 | 534 | 		       flags, irq, chip->irq_set_type); | 
| David Brownell | 0c5d1eb | 2008-10-01 14:46:18 -0700 | [diff] [blame] | 535 | 	else { | 
| David Brownell | f2b662d | 2008-12-01 14:31:38 -0800 | [diff] [blame] | 536 | 		if (flags & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) | 
 | 537 | 			flags |= IRQ_LEVEL; | 
| David Brownell | 0c5d1eb | 2008-10-01 14:46:18 -0700 | [diff] [blame] | 538 | 		/* note that IRQF_TRIGGER_MASK == IRQ_TYPE_SENSE_MASK */ | 
| David Brownell | f2b662d | 2008-12-01 14:31:38 -0800 | [diff] [blame] | 539 | 		desc->status &= ~(IRQ_LEVEL | IRQ_TYPE_SENSE_MASK); | 
 | 540 | 		desc->status |= flags; | 
| Thomas Gleixner | 4673247 | 2010-06-07 17:53:51 +0200 | [diff] [blame] | 541 |  | 
| Thomas Gleixner | 6b8ff31 | 2010-10-01 12:58:38 +0200 | [diff] [blame] | 542 | 		if (chip != desc->irq_data.chip) | 
 | 543 | 			irq_chip_set_defaults(desc->irq_data.chip); | 
| David Brownell | 0c5d1eb | 2008-10-01 14:46:18 -0700 | [diff] [blame] | 544 | 	} | 
| Uwe Kleine-König | 82736f4 | 2008-07-23 21:28:54 -0700 | [diff] [blame] | 545 |  | 
 | 546 | 	return ret; | 
 | 547 | } | 
 | 548 |  | 
| Thomas Gleixner | b25c340 | 2009-08-13 12:17:22 +0200 | [diff] [blame] | 549 | /* | 
 | 550 |  * Default primary interrupt handler for threaded interrupts. Is | 
 | 551 |  * assigned as primary handler when request_threaded_irq is called | 
 | 552 |  * with handler == NULL. Useful for oneshot interrupts. | 
 | 553 |  */ | 
 | 554 | static irqreturn_t irq_default_primary_handler(int irq, void *dev_id) | 
 | 555 | { | 
 | 556 | 	return IRQ_WAKE_THREAD; | 
 | 557 | } | 
 | 558 |  | 
| Thomas Gleixner | 399b5da | 2009-08-13 13:21:38 +0200 | [diff] [blame] | 559 | /* | 
 | 560 |  * Primary handler for nested threaded interrupts. Should never be | 
 | 561 |  * called. | 
 | 562 |  */ | 
 | 563 | static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id) | 
 | 564 | { | 
 | 565 | 	WARN(1, "Primary handler called for nested irq %d\n", irq); | 
 | 566 | 	return IRQ_NONE; | 
 | 567 | } | 
 | 568 |  | 
| Thomas Gleixner | 3aa551c | 2009-03-23 18:28:15 +0100 | [diff] [blame] | 569 | static int irq_wait_for_interrupt(struct irqaction *action) | 
 | 570 | { | 
 | 571 | 	while (!kthread_should_stop()) { | 
 | 572 | 		set_current_state(TASK_INTERRUPTIBLE); | 
| Thomas Gleixner | f48fe81 | 2009-03-24 11:46:22 +0100 | [diff] [blame] | 573 |  | 
 | 574 | 		if (test_and_clear_bit(IRQTF_RUNTHREAD, | 
 | 575 | 				       &action->thread_flags)) { | 
| Thomas Gleixner | 3aa551c | 2009-03-23 18:28:15 +0100 | [diff] [blame] | 576 | 			__set_current_state(TASK_RUNNING); | 
 | 577 | 			return 0; | 
| Thomas Gleixner | f48fe81 | 2009-03-24 11:46:22 +0100 | [diff] [blame] | 578 | 		} | 
 | 579 | 		schedule(); | 
| Thomas Gleixner | 3aa551c | 2009-03-23 18:28:15 +0100 | [diff] [blame] | 580 | 	} | 
 | 581 | 	return -1; | 
 | 582 | } | 
 | 583 |  | 
| Thomas Gleixner | b25c340 | 2009-08-13 12:17:22 +0200 | [diff] [blame] | 584 | /* | 
 | 585 |  * Oneshot interrupts keep the irq line masked until the threaded | 
 | 586 |  * handler finished. unmask if the interrupt has not been disabled and | 
 | 587 |  * is marked MASKED. | 
 | 588 |  */ | 
 | 589 | static void irq_finalize_oneshot(unsigned int irq, struct irq_desc *desc) | 
 | 590 | { | 
| Thomas Gleixner | 0b1adaa | 2010-03-09 19:45:54 +0100 | [diff] [blame] | 591 | again: | 
| Thomas Gleixner | 3876ec9 | 2010-09-27 12:44:35 +0000 | [diff] [blame] | 592 | 	chip_bus_lock(desc); | 
| Thomas Gleixner | 239007b | 2009-11-17 16:46:45 +0100 | [diff] [blame] | 593 | 	raw_spin_lock_irq(&desc->lock); | 
| Thomas Gleixner | 0b1adaa | 2010-03-09 19:45:54 +0100 | [diff] [blame] | 594 |  | 
 | 595 | 	/* | 
 | 596 | 	 * Implausible though it may be we need to protect us against | 
 | 597 | 	 * the following scenario: | 
 | 598 | 	 * | 
 | 599 | 	 * The thread is faster done than the hard interrupt handler | 
 | 600 | 	 * on the other CPU. If we unmask the irq line then the | 
 | 601 | 	 * interrupt can come in again and masks the line, leaves due | 
 | 602 | 	 * to IRQ_INPROGRESS and the irq line is masked forever. | 
 | 603 | 	 */ | 
 | 604 | 	if (unlikely(desc->status & IRQ_INPROGRESS)) { | 
 | 605 | 		raw_spin_unlock_irq(&desc->lock); | 
| Thomas Gleixner | 3876ec9 | 2010-09-27 12:44:35 +0000 | [diff] [blame] | 606 | 		chip_bus_sync_unlock(desc); | 
| Thomas Gleixner | 0b1adaa | 2010-03-09 19:45:54 +0100 | [diff] [blame] | 607 | 		cpu_relax(); | 
 | 608 | 		goto again; | 
 | 609 | 	} | 
 | 610 |  | 
| Thomas Gleixner | b25c340 | 2009-08-13 12:17:22 +0200 | [diff] [blame] | 611 | 	if (!(desc->status & IRQ_DISABLED) && (desc->status & IRQ_MASKED)) { | 
 | 612 | 		desc->status &= ~IRQ_MASKED; | 
| Thomas Gleixner | 0eda58b | 2010-09-27 12:44:44 +0000 | [diff] [blame] | 613 | 		desc->irq_data.chip->irq_unmask(&desc->irq_data); | 
| Thomas Gleixner | b25c340 | 2009-08-13 12:17:22 +0200 | [diff] [blame] | 614 | 	} | 
| Thomas Gleixner | 239007b | 2009-11-17 16:46:45 +0100 | [diff] [blame] | 615 | 	raw_spin_unlock_irq(&desc->lock); | 
| Thomas Gleixner | 3876ec9 | 2010-09-27 12:44:35 +0000 | [diff] [blame] | 616 | 	chip_bus_sync_unlock(desc); | 
| Thomas Gleixner | b25c340 | 2009-08-13 12:17:22 +0200 | [diff] [blame] | 617 | } | 
 | 618 |  | 
| Bruno Premont | 61f3826 | 2009-07-22 22:22:32 +0200 | [diff] [blame] | 619 | #ifdef CONFIG_SMP | 
| Thomas Gleixner | 3aa551c | 2009-03-23 18:28:15 +0100 | [diff] [blame] | 620 | /* | 
| Thomas Gleixner | 591d2fb | 2009-07-21 11:09:39 +0200 | [diff] [blame] | 621 |  * Check whether we need to change the affinity of the interrupt thread. | 
 | 622 |  */ | 
 | 623 | static void | 
 | 624 | irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) | 
 | 625 | { | 
 | 626 | 	cpumask_var_t mask; | 
 | 627 |  | 
 | 628 | 	if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags)) | 
 | 629 | 		return; | 
 | 630 |  | 
 | 631 | 	/* | 
 | 632 | 	 * In case we are out of memory we set IRQTF_AFFINITY again and | 
 | 633 | 	 * try again next time | 
 | 634 | 	 */ | 
 | 635 | 	if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { | 
 | 636 | 		set_bit(IRQTF_AFFINITY, &action->thread_flags); | 
 | 637 | 		return; | 
 | 638 | 	} | 
 | 639 |  | 
| Thomas Gleixner | 239007b | 2009-11-17 16:46:45 +0100 | [diff] [blame] | 640 | 	raw_spin_lock_irq(&desc->lock); | 
| Thomas Gleixner | 6b8ff31 | 2010-10-01 12:58:38 +0200 | [diff] [blame] | 641 | 	cpumask_copy(mask, desc->irq_data.affinity); | 
| Thomas Gleixner | 239007b | 2009-11-17 16:46:45 +0100 | [diff] [blame] | 642 | 	raw_spin_unlock_irq(&desc->lock); | 
| Thomas Gleixner | 591d2fb | 2009-07-21 11:09:39 +0200 | [diff] [blame] | 643 |  | 
 | 644 | 	set_cpus_allowed_ptr(current, mask); | 
 | 645 | 	free_cpumask_var(mask); | 
 | 646 | } | 
| Bruno Premont | 61f3826 | 2009-07-22 22:22:32 +0200 | [diff] [blame] | 647 | #else | 
 | 648 | static inline void | 
 | 649 | irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { } | 
 | 650 | #endif | 
| Thomas Gleixner | 591d2fb | 2009-07-21 11:09:39 +0200 | [diff] [blame] | 651 |  | 
 | 652 | /* | 
| Thomas Gleixner | 3aa551c | 2009-03-23 18:28:15 +0100 | [diff] [blame] | 653 |  * Interrupt handler thread | 
 | 654 |  */ | 
 | 655 | static int irq_thread(void *data) | 
 | 656 | { | 
| Peter Zijlstra | c9b5f50 | 2011-01-07 13:41:40 +0100 | [diff] [blame] | 657 | 	static const struct sched_param param = { | 
| KOSAKI Motohiro | fe7de49 | 2010-10-20 16:01:12 -0700 | [diff] [blame] | 658 | 		.sched_priority = MAX_USER_RT_PRIO/2, | 
 | 659 | 	}; | 
| Thomas Gleixner | 3aa551c | 2009-03-23 18:28:15 +0100 | [diff] [blame] | 660 | 	struct irqaction *action = data; | 
 | 661 | 	struct irq_desc *desc = irq_to_desc(action->irq); | 
| Thomas Gleixner | b25c340 | 2009-08-13 12:17:22 +0200 | [diff] [blame] | 662 | 	int wake, oneshot = desc->status & IRQ_ONESHOT; | 
| Thomas Gleixner | 3aa551c | 2009-03-23 18:28:15 +0100 | [diff] [blame] | 663 |  | 
 | 664 | 	sched_setscheduler(current, SCHED_FIFO, ¶m); | 
 | 665 | 	current->irqaction = action; | 
 | 666 |  | 
 | 667 | 	while (!irq_wait_for_interrupt(action)) { | 
 | 668 |  | 
| Thomas Gleixner | 591d2fb | 2009-07-21 11:09:39 +0200 | [diff] [blame] | 669 | 		irq_thread_check_affinity(desc, action); | 
 | 670 |  | 
| Thomas Gleixner | 3aa551c | 2009-03-23 18:28:15 +0100 | [diff] [blame] | 671 | 		atomic_inc(&desc->threads_active); | 
 | 672 |  | 
| Thomas Gleixner | 239007b | 2009-11-17 16:46:45 +0100 | [diff] [blame] | 673 | 		raw_spin_lock_irq(&desc->lock); | 
| Thomas Gleixner | 3aa551c | 2009-03-23 18:28:15 +0100 | [diff] [blame] | 674 | 		if (unlikely(desc->status & IRQ_DISABLED)) { | 
 | 675 | 			/* | 
 | 676 | 			 * CHECKME: We might need a dedicated | 
 | 677 | 			 * IRQ_THREAD_PENDING flag here, which | 
 | 678 | 			 * retriggers the thread in check_irq_resend() | 
 | 679 | 			 * but AFAICT IRQ_PENDING should be fine as it | 
 | 680 | 			 * retriggers the interrupt itself --- tglx | 
 | 681 | 			 */ | 
 | 682 | 			desc->status |= IRQ_PENDING; | 
| Thomas Gleixner | 239007b | 2009-11-17 16:46:45 +0100 | [diff] [blame] | 683 | 			raw_spin_unlock_irq(&desc->lock); | 
| Thomas Gleixner | 3aa551c | 2009-03-23 18:28:15 +0100 | [diff] [blame] | 684 | 		} else { | 
| Thomas Gleixner | 239007b | 2009-11-17 16:46:45 +0100 | [diff] [blame] | 685 | 			raw_spin_unlock_irq(&desc->lock); | 
| Thomas Gleixner | 3aa551c | 2009-03-23 18:28:15 +0100 | [diff] [blame] | 686 |  | 
 | 687 | 			action->thread_fn(action->irq, action->dev_id); | 
| Thomas Gleixner | b25c340 | 2009-08-13 12:17:22 +0200 | [diff] [blame] | 688 |  | 
 | 689 | 			if (oneshot) | 
 | 690 | 				irq_finalize_oneshot(action->irq, desc); | 
| Thomas Gleixner | 3aa551c | 2009-03-23 18:28:15 +0100 | [diff] [blame] | 691 | 		} | 
 | 692 |  | 
 | 693 | 		wake = atomic_dec_and_test(&desc->threads_active); | 
 | 694 |  | 
 | 695 | 		if (wake && waitqueue_active(&desc->wait_for_threads)) | 
 | 696 | 			wake_up(&desc->wait_for_threads); | 
 | 697 | 	} | 
 | 698 |  | 
 | 699 | 	/* | 
 | 700 | 	 * Clear irqaction. Otherwise exit_irq_thread() would make | 
 | 701 | 	 * fuzz about an active irq thread going into nirvana. | 
 | 702 | 	 */ | 
 | 703 | 	current->irqaction = NULL; | 
 | 704 | 	return 0; | 
 | 705 | } | 
 | 706 |  | 
 | 707 | /* | 
 | 708 |  * Called from do_exit() | 
 | 709 |  */ | 
 | 710 | void exit_irq_thread(void) | 
 | 711 | { | 
 | 712 | 	struct task_struct *tsk = current; | 
 | 713 |  | 
 | 714 | 	if (!tsk->irqaction) | 
 | 715 | 		return; | 
 | 716 |  | 
 | 717 | 	printk(KERN_ERR | 
 | 718 | 	       "exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n", | 
 | 719 | 	       tsk->comm ? tsk->comm : "", tsk->pid, tsk->irqaction->irq); | 
 | 720 |  | 
 | 721 | 	/* | 
 | 722 | 	 * Set the THREAD DIED flag to prevent further wakeups of the | 
 | 723 | 	 * soon to be gone threaded handler. | 
 | 724 | 	 */ | 
 | 725 | 	set_bit(IRQTF_DIED, &tsk->irqaction->flags); | 
 | 726 | } | 
 | 727 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 728 | /* | 
 | 729 |  * Internal function to register an irqaction - typically used to | 
 | 730 |  * allocate special interrupts that are part of the architecture. | 
 | 731 |  */ | 
| Thomas Gleixner | d3c6004 | 2008-10-16 09:55:00 +0200 | [diff] [blame] | 732 | static int | 
| Ingo Molnar | 327ec56 | 2009-02-15 11:21:37 +0100 | [diff] [blame] | 733 | __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 734 | { | 
| Ingo Molnar | f17c754 | 2009-02-17 20:43:37 +0100 | [diff] [blame] | 735 | 	struct irqaction *old, **old_ptr; | 
| Andrew Morton | 8b126b7 | 2006-11-14 02:03:23 -0800 | [diff] [blame] | 736 | 	const char *old_name = NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 737 | 	unsigned long flags; | 
| Thomas Gleixner | 399b5da | 2009-08-13 13:21:38 +0200 | [diff] [blame] | 738 | 	int nested, shared = 0; | 
| Uwe Kleine-König | 82736f4 | 2008-07-23 21:28:54 -0700 | [diff] [blame] | 739 | 	int ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 740 |  | 
| Yinghai Lu | 7d94f7c | 2008-08-19 20:50:14 -0700 | [diff] [blame] | 741 | 	if (!desc) | 
| Matthew Wilcox | c2b5a25 | 2005-11-03 07:51:18 -0700 | [diff] [blame] | 742 | 		return -EINVAL; | 
 | 743 |  | 
| Thomas Gleixner | 6b8ff31 | 2010-10-01 12:58:38 +0200 | [diff] [blame] | 744 | 	if (desc->irq_data.chip == &no_irq_chip) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 745 | 		return -ENOSYS; | 
 | 746 | 	/* | 
 | 747 | 	 * Some drivers like serial.c use request_irq() heavily, | 
 | 748 | 	 * so we have to be careful not to interfere with a | 
 | 749 | 	 * running system. | 
 | 750 | 	 */ | 
| Thomas Gleixner | 3cca53b | 2006-07-01 19:29:31 -0700 | [diff] [blame] | 751 | 	if (new->flags & IRQF_SAMPLE_RANDOM) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 752 | 		/* | 
 | 753 | 		 * This function might sleep, we want to call it first, | 
 | 754 | 		 * outside of the atomic block. | 
 | 755 | 		 * Yes, this might clear the entropy pool if the wrong | 
 | 756 | 		 * driver is attempted to be loaded, without actually | 
 | 757 | 		 * installing a new handler, but is this really a problem, | 
 | 758 | 		 * only the sysadmin is able to do this. | 
 | 759 | 		 */ | 
 | 760 | 		rand_initialize_irq(irq); | 
 | 761 | 	} | 
 | 762 |  | 
| Thomas Gleixner | b25c340 | 2009-08-13 12:17:22 +0200 | [diff] [blame] | 763 | 	/* Oneshot interrupts are not allowed with shared */ | 
 | 764 | 	if ((new->flags & IRQF_ONESHOT) && (new->flags & IRQF_SHARED)) | 
 | 765 | 		return -EINVAL; | 
 | 766 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 767 | 	/* | 
| Thomas Gleixner | 399b5da | 2009-08-13 13:21:38 +0200 | [diff] [blame] | 768 | 	 * Check whether the interrupt nests into another interrupt | 
 | 769 | 	 * thread. | 
| Thomas Gleixner | 3aa551c | 2009-03-23 18:28:15 +0100 | [diff] [blame] | 770 | 	 */ | 
| Thomas Gleixner | 399b5da | 2009-08-13 13:21:38 +0200 | [diff] [blame] | 771 | 	nested = desc->status & IRQ_NESTED_THREAD; | 
 | 772 | 	if (nested) { | 
 | 773 | 		if (!new->thread_fn) | 
 | 774 | 			return -EINVAL; | 
 | 775 | 		/* | 
 | 776 | 		 * Replace the primary handler which was provided from | 
 | 777 | 		 * the driver for non nested interrupt handling by the | 
 | 778 | 		 * dummy function which warns when called. | 
 | 779 | 		 */ | 
 | 780 | 		new->handler = irq_nested_primary_handler; | 
 | 781 | 	} | 
 | 782 |  | 
 | 783 | 	/* | 
 | 784 | 	 * Create a handler thread when a thread function is supplied | 
 | 785 | 	 * and the interrupt does not nest into another interrupt | 
 | 786 | 	 * thread. | 
 | 787 | 	 */ | 
 | 788 | 	if (new->thread_fn && !nested) { | 
| Thomas Gleixner | 3aa551c | 2009-03-23 18:28:15 +0100 | [diff] [blame] | 789 | 		struct task_struct *t; | 
 | 790 |  | 
 | 791 | 		t = kthread_create(irq_thread, new, "irq/%d-%s", irq, | 
 | 792 | 				   new->name); | 
 | 793 | 		if (IS_ERR(t)) | 
 | 794 | 			return PTR_ERR(t); | 
 | 795 | 		/* | 
 | 796 | 		 * We keep the reference to the task struct even if | 
 | 797 | 		 * the thread dies to avoid that the interrupt code | 
 | 798 | 		 * references an already freed task_struct. | 
 | 799 | 		 */ | 
 | 800 | 		get_task_struct(t); | 
 | 801 | 		new->thread = t; | 
| Thomas Gleixner | 3aa551c | 2009-03-23 18:28:15 +0100 | [diff] [blame] | 802 | 	} | 
 | 803 |  | 
 | 804 | 	/* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 805 | 	 * The following block of code has to be executed atomically | 
 | 806 | 	 */ | 
| Thomas Gleixner | 239007b | 2009-11-17 16:46:45 +0100 | [diff] [blame] | 807 | 	raw_spin_lock_irqsave(&desc->lock, flags); | 
| Ingo Molnar | f17c754 | 2009-02-17 20:43:37 +0100 | [diff] [blame] | 808 | 	old_ptr = &desc->action; | 
 | 809 | 	old = *old_ptr; | 
| Ingo Molnar | 06fcb0c | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 810 | 	if (old) { | 
| Thomas Gleixner | e76de9f | 2006-06-29 02:24:56 -0700 | [diff] [blame] | 811 | 		/* | 
 | 812 | 		 * Can't share interrupts unless both agree to and are | 
 | 813 | 		 * the same type (level, edge, polarity). So both flag | 
| Thomas Gleixner | 3cca53b | 2006-07-01 19:29:31 -0700 | [diff] [blame] | 814 | 		 * fields must have IRQF_SHARED set and the bits which | 
| Thomas Gleixner | e76de9f | 2006-06-29 02:24:56 -0700 | [diff] [blame] | 815 | 		 * set the trigger type must match. | 
 | 816 | 		 */ | 
| Thomas Gleixner | 3cca53b | 2006-07-01 19:29:31 -0700 | [diff] [blame] | 817 | 		if (!((old->flags & new->flags) & IRQF_SHARED) || | 
| Andrew Morton | 8b126b7 | 2006-11-14 02:03:23 -0800 | [diff] [blame] | 818 | 		    ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK)) { | 
 | 819 | 			old_name = old->name; | 
| Dimitri Sivanich | f516342 | 2006-03-25 03:08:23 -0800 | [diff] [blame] | 820 | 			goto mismatch; | 
| Andrew Morton | 8b126b7 | 2006-11-14 02:03:23 -0800 | [diff] [blame] | 821 | 		} | 
| Dimitri Sivanich | f516342 | 2006-03-25 03:08:23 -0800 | [diff] [blame] | 822 |  | 
| Thomas Gleixner | 284c668 | 2006-07-03 02:20:32 +0200 | [diff] [blame] | 823 | #if defined(CONFIG_IRQ_PER_CPU) | 
| Dimitri Sivanich | f516342 | 2006-03-25 03:08:23 -0800 | [diff] [blame] | 824 | 		/* All handlers must agree on per-cpuness */ | 
| Thomas Gleixner | 3cca53b | 2006-07-01 19:29:31 -0700 | [diff] [blame] | 825 | 		if ((old->flags & IRQF_PERCPU) != | 
 | 826 | 		    (new->flags & IRQF_PERCPU)) | 
| Dimitri Sivanich | f516342 | 2006-03-25 03:08:23 -0800 | [diff] [blame] | 827 | 			goto mismatch; | 
 | 828 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 829 |  | 
 | 830 | 		/* add new interrupt at end of irq queue */ | 
 | 831 | 		do { | 
| Ingo Molnar | f17c754 | 2009-02-17 20:43:37 +0100 | [diff] [blame] | 832 | 			old_ptr = &old->next; | 
 | 833 | 			old = *old_ptr; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 834 | 		} while (old); | 
 | 835 | 		shared = 1; | 
 | 836 | 	} | 
 | 837 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 838 | 	if (!shared) { | 
| Thomas Gleixner | 6b8ff31 | 2010-10-01 12:58:38 +0200 | [diff] [blame] | 839 | 		irq_chip_set_defaults(desc->irq_data.chip); | 
| Thomas Gleixner | e76de9f | 2006-06-29 02:24:56 -0700 | [diff] [blame] | 840 |  | 
| Thomas Gleixner | 3aa551c | 2009-03-23 18:28:15 +0100 | [diff] [blame] | 841 | 		init_waitqueue_head(&desc->wait_for_threads); | 
 | 842 |  | 
| Uwe Kleine-König | 82736f4 | 2008-07-23 21:28:54 -0700 | [diff] [blame] | 843 | 		/* Setup the type (level, edge polarity) if configured: */ | 
 | 844 | 		if (new->flags & IRQF_TRIGGER_MASK) { | 
| David Brownell | f2b662d | 2008-12-01 14:31:38 -0800 | [diff] [blame] | 845 | 			ret = __irq_set_trigger(desc, irq, | 
 | 846 | 					new->flags & IRQF_TRIGGER_MASK); | 
| Uwe Kleine-König | 82736f4 | 2008-07-23 21:28:54 -0700 | [diff] [blame] | 847 |  | 
| Thomas Gleixner | 3aa551c | 2009-03-23 18:28:15 +0100 | [diff] [blame] | 848 | 			if (ret) | 
 | 849 | 				goto out_thread; | 
| Uwe Kleine-König | 82736f4 | 2008-07-23 21:28:54 -0700 | [diff] [blame] | 850 | 		} else | 
 | 851 | 			compat_irq_chip_set_default_handler(desc); | 
| Ahmed S. Darwish | f75d222 | 2007-05-08 00:27:55 -0700 | [diff] [blame] | 852 | #if defined(CONFIG_IRQ_PER_CPU) | 
 | 853 | 		if (new->flags & IRQF_PERCPU) | 
 | 854 | 			desc->status |= IRQ_PER_CPU; | 
 | 855 | #endif | 
 | 856 |  | 
| Thomas Gleixner | b25c340 | 2009-08-13 12:17:22 +0200 | [diff] [blame] | 857 | 		desc->status &= ~(IRQ_AUTODETECT | IRQ_WAITING | IRQ_ONESHOT | | 
| Thomas Gleixner | 1adb085 | 2008-04-28 17:01:56 +0200 | [diff] [blame] | 858 | 				  IRQ_INPROGRESS | IRQ_SPURIOUS_DISABLED); | 
| Thomas Gleixner | 94d39e1 | 2006-06-29 02:24:50 -0700 | [diff] [blame] | 859 |  | 
| Thomas Gleixner | b25c340 | 2009-08-13 12:17:22 +0200 | [diff] [blame] | 860 | 		if (new->flags & IRQF_ONESHOT) | 
 | 861 | 			desc->status |= IRQ_ONESHOT; | 
 | 862 |  | 
| Thomas Gleixner | 94d39e1 | 2006-06-29 02:24:50 -0700 | [diff] [blame] | 863 | 		if (!(desc->status & IRQ_NOAUTOEN)) { | 
 | 864 | 			desc->depth = 0; | 
 | 865 | 			desc->status &= ~IRQ_DISABLED; | 
| Thomas Gleixner | 37e12df | 2010-09-27 12:45:38 +0000 | [diff] [blame] | 866 | 			desc->irq_data.chip->irq_startup(&desc->irq_data); | 
| Thomas Gleixner | e76de9f | 2006-06-29 02:24:56 -0700 | [diff] [blame] | 867 | 		} else | 
 | 868 | 			/* Undo nested disables: */ | 
 | 869 | 			desc->depth = 1; | 
| Max Krasnyansky | 1840475 | 2008-05-29 11:02:52 -0700 | [diff] [blame] | 870 |  | 
| Thomas Gleixner | 612e368 | 2008-11-07 13:58:46 +0100 | [diff] [blame] | 871 | 		/* Exclude IRQ from balancing if requested */ | 
 | 872 | 		if (new->flags & IRQF_NOBALANCING) | 
 | 873 | 			desc->status |= IRQ_NO_BALANCING; | 
 | 874 |  | 
| Max Krasnyansky | 1840475 | 2008-05-29 11:02:52 -0700 | [diff] [blame] | 875 | 		/* Set default affinity mask once everything is setup */ | 
| Hannes Eder | 548c893 | 2009-02-08 20:24:47 +0100 | [diff] [blame] | 876 | 		setup_affinity(irq, desc); | 
| David Brownell | 0c5d1eb | 2008-10-01 14:46:18 -0700 | [diff] [blame] | 877 |  | 
 | 878 | 	} else if ((new->flags & IRQF_TRIGGER_MASK) | 
 | 879 | 			&& (new->flags & IRQF_TRIGGER_MASK) | 
 | 880 | 				!= (desc->status & IRQ_TYPE_SENSE_MASK)) { | 
 | 881 | 		/* hope the handler works with the actual trigger mode... */ | 
 | 882 | 		pr_warning("IRQ %d uses trigger mode %d; requested %d\n", | 
 | 883 | 				irq, (int)(desc->status & IRQ_TYPE_SENSE_MASK), | 
 | 884 | 				(int)(new->flags & IRQF_TRIGGER_MASK)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 885 | 	} | 
| Uwe Kleine-König | 82736f4 | 2008-07-23 21:28:54 -0700 | [diff] [blame] | 886 |  | 
| Thomas Gleixner | 69ab849 | 2009-08-17 14:07:16 +0200 | [diff] [blame] | 887 | 	new->irq = irq; | 
| Ingo Molnar | f17c754 | 2009-02-17 20:43:37 +0100 | [diff] [blame] | 888 | 	*old_ptr = new; | 
| Uwe Kleine-König | 82736f4 | 2008-07-23 21:28:54 -0700 | [diff] [blame] | 889 |  | 
| Linus Torvalds | 8528b0f | 2007-01-23 14:16:31 -0800 | [diff] [blame] | 890 | 	/* Reset broken irq detection when installing new handler */ | 
 | 891 | 	desc->irq_count = 0; | 
 | 892 | 	desc->irqs_unhandled = 0; | 
| Thomas Gleixner | 1adb085 | 2008-04-28 17:01:56 +0200 | [diff] [blame] | 893 |  | 
 | 894 | 	/* | 
 | 895 | 	 * Check whether we disabled the irq via the spurious handler | 
 | 896 | 	 * before. Reenable it and give it another chance. | 
 | 897 | 	 */ | 
 | 898 | 	if (shared && (desc->status & IRQ_SPURIOUS_DISABLED)) { | 
 | 899 | 		desc->status &= ~IRQ_SPURIOUS_DISABLED; | 
| Rafael J. Wysocki | 0a0c516 | 2009-03-16 22:33:49 +0100 | [diff] [blame] | 900 | 		__enable_irq(desc, irq, false); | 
| Thomas Gleixner | 1adb085 | 2008-04-28 17:01:56 +0200 | [diff] [blame] | 901 | 	} | 
 | 902 |  | 
| Thomas Gleixner | 239007b | 2009-11-17 16:46:45 +0100 | [diff] [blame] | 903 | 	raw_spin_unlock_irqrestore(&desc->lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 904 |  | 
| Thomas Gleixner | 69ab849 | 2009-08-17 14:07:16 +0200 | [diff] [blame] | 905 | 	/* | 
 | 906 | 	 * Strictly no need to wake it up, but hung_task complains | 
 | 907 | 	 * when no hard interrupt wakes the thread up. | 
 | 908 | 	 */ | 
 | 909 | 	if (new->thread) | 
 | 910 | 		wake_up_process(new->thread); | 
 | 911 |  | 
| Yinghai Lu | 2c6927a | 2008-08-19 20:50:11 -0700 | [diff] [blame] | 912 | 	register_irq_proc(irq, desc); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 913 | 	new->dir = NULL; | 
 | 914 | 	register_handler_proc(irq, new); | 
 | 915 |  | 
 | 916 | 	return 0; | 
| Dimitri Sivanich | f516342 | 2006-03-25 03:08:23 -0800 | [diff] [blame] | 917 |  | 
 | 918 | mismatch: | 
| Alan Cox | 3f05044 | 2007-02-12 00:52:04 -0800 | [diff] [blame] | 919 | #ifdef CONFIG_DEBUG_SHIRQ | 
| Thomas Gleixner | 3cca53b | 2006-07-01 19:29:31 -0700 | [diff] [blame] | 920 | 	if (!(new->flags & IRQF_PROBE_SHARED)) { | 
| Bjorn Helgaas | e8c4b9d | 2006-07-01 04:35:45 -0700 | [diff] [blame] | 921 | 		printk(KERN_ERR "IRQ handler type mismatch for IRQ %d\n", irq); | 
| Andrew Morton | 8b126b7 | 2006-11-14 02:03:23 -0800 | [diff] [blame] | 922 | 		if (old_name) | 
 | 923 | 			printk(KERN_ERR "current handler: %s\n", old_name); | 
| Andrew Morton | 13e87ec | 2006-04-27 18:39:18 -0700 | [diff] [blame] | 924 | 		dump_stack(); | 
 | 925 | 	} | 
| Alan Cox | 3f05044 | 2007-02-12 00:52:04 -0800 | [diff] [blame] | 926 | #endif | 
| Thomas Gleixner | 3aa551c | 2009-03-23 18:28:15 +0100 | [diff] [blame] | 927 | 	ret = -EBUSY; | 
 | 928 |  | 
 | 929 | out_thread: | 
| Thomas Gleixner | 239007b | 2009-11-17 16:46:45 +0100 | [diff] [blame] | 930 | 	raw_spin_unlock_irqrestore(&desc->lock, flags); | 
| Thomas Gleixner | 3aa551c | 2009-03-23 18:28:15 +0100 | [diff] [blame] | 931 | 	if (new->thread) { | 
 | 932 | 		struct task_struct *t = new->thread; | 
 | 933 |  | 
 | 934 | 		new->thread = NULL; | 
 | 935 | 		if (likely(!test_bit(IRQTF_DIED, &new->thread_flags))) | 
 | 936 | 			kthread_stop(t); | 
 | 937 | 		put_task_struct(t); | 
 | 938 | 	} | 
 | 939 | 	return ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 940 | } | 
 | 941 |  | 
 | 942 | /** | 
| Thomas Gleixner | d3c6004 | 2008-10-16 09:55:00 +0200 | [diff] [blame] | 943 |  *	setup_irq - setup an interrupt | 
 | 944 |  *	@irq: Interrupt line to setup | 
 | 945 |  *	@act: irqaction for the interrupt | 
 | 946 |  * | 
 | 947 |  * Used to statically setup interrupts in the early boot process. | 
 | 948 |  */ | 
 | 949 | int setup_irq(unsigned int irq, struct irqaction *act) | 
 | 950 | { | 
 | 951 | 	struct irq_desc *desc = irq_to_desc(irq); | 
 | 952 |  | 
 | 953 | 	return __setup_irq(irq, desc, act); | 
 | 954 | } | 
| Magnus Damm | eb53b4e | 2009-03-12 21:05:59 +0900 | [diff] [blame] | 955 | EXPORT_SYMBOL_GPL(setup_irq); | 
| Thomas Gleixner | d3c6004 | 2008-10-16 09:55:00 +0200 | [diff] [blame] | 956 |  | 
| Magnus Damm | cbf94f0 | 2009-03-12 21:05:51 +0900 | [diff] [blame] | 957 |  /* | 
 | 958 |  * Internal function to unregister an irqaction - used to free | 
 | 959 |  * regular and special interrupts that are part of the architecture. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 960 |  */ | 
| Magnus Damm | cbf94f0 | 2009-03-12 21:05:51 +0900 | [diff] [blame] | 961 | static struct irqaction *__free_irq(unsigned int irq, void *dev_id) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 962 | { | 
| Thomas Gleixner | d3c6004 | 2008-10-16 09:55:00 +0200 | [diff] [blame] | 963 | 	struct irq_desc *desc = irq_to_desc(irq); | 
| Ingo Molnar | f17c754 | 2009-02-17 20:43:37 +0100 | [diff] [blame] | 964 | 	struct irqaction *action, **action_ptr; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 965 | 	unsigned long flags; | 
 | 966 |  | 
| Ingo Molnar | ae88a23 | 2009-02-15 11:29:50 +0100 | [diff] [blame] | 967 | 	WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); | 
| Yinghai Lu | 7d94f7c | 2008-08-19 20:50:14 -0700 | [diff] [blame] | 968 |  | 
| Yinghai Lu | 7d94f7c | 2008-08-19 20:50:14 -0700 | [diff] [blame] | 969 | 	if (!desc) | 
| Magnus Damm | f21cfb2 | 2009-03-12 21:05:42 +0900 | [diff] [blame] | 970 | 		return NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 971 |  | 
| Thomas Gleixner | 239007b | 2009-11-17 16:46:45 +0100 | [diff] [blame] | 972 | 	raw_spin_lock_irqsave(&desc->lock, flags); | 
| Ingo Molnar | ae88a23 | 2009-02-15 11:29:50 +0100 | [diff] [blame] | 973 |  | 
 | 974 | 	/* | 
 | 975 | 	 * There can be multiple actions per IRQ descriptor, find the right | 
 | 976 | 	 * one based on the dev_id: | 
 | 977 | 	 */ | 
| Ingo Molnar | f17c754 | 2009-02-17 20:43:37 +0100 | [diff] [blame] | 978 | 	action_ptr = &desc->action; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 979 | 	for (;;) { | 
| Ingo Molnar | f17c754 | 2009-02-17 20:43:37 +0100 | [diff] [blame] | 980 | 		action = *action_ptr; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 981 |  | 
| Ingo Molnar | ae88a23 | 2009-02-15 11:29:50 +0100 | [diff] [blame] | 982 | 		if (!action) { | 
 | 983 | 			WARN(1, "Trying to free already-free IRQ %d\n", irq); | 
| Thomas Gleixner | 239007b | 2009-11-17 16:46:45 +0100 | [diff] [blame] | 984 | 			raw_spin_unlock_irqrestore(&desc->lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 985 |  | 
| Magnus Damm | f21cfb2 | 2009-03-12 21:05:42 +0900 | [diff] [blame] | 986 | 			return NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 987 | 		} | 
| Ingo Molnar | ae88a23 | 2009-02-15 11:29:50 +0100 | [diff] [blame] | 988 |  | 
| Ingo Molnar | 8316e38 | 2009-02-17 20:28:29 +0100 | [diff] [blame] | 989 | 		if (action->dev_id == dev_id) | 
 | 990 | 			break; | 
| Ingo Molnar | f17c754 | 2009-02-17 20:43:37 +0100 | [diff] [blame] | 991 | 		action_ptr = &action->next; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 992 | 	} | 
| Ingo Molnar | ae88a23 | 2009-02-15 11:29:50 +0100 | [diff] [blame] | 993 |  | 
 | 994 | 	/* Found it - now remove it from the list of entries: */ | 
| Ingo Molnar | f17c754 | 2009-02-17 20:43:37 +0100 | [diff] [blame] | 995 | 	*action_ptr = action->next; | 
| Ingo Molnar | ae88a23 | 2009-02-15 11:29:50 +0100 | [diff] [blame] | 996 |  | 
 | 997 | 	/* Currently used only by UML, might disappear one day: */ | 
 | 998 | #ifdef CONFIG_IRQ_RELEASE_METHOD | 
| Thomas Gleixner | 6b8ff31 | 2010-10-01 12:58:38 +0200 | [diff] [blame] | 999 | 	if (desc->irq_data.chip->release) | 
 | 1000 | 		desc->irq_data.chip->release(irq, dev_id); | 
| Ingo Molnar | ae88a23 | 2009-02-15 11:29:50 +0100 | [diff] [blame] | 1001 | #endif | 
 | 1002 |  | 
 | 1003 | 	/* If this was the last handler, shut down the IRQ line: */ | 
 | 1004 | 	if (!desc->action) { | 
 | 1005 | 		desc->status |= IRQ_DISABLED; | 
| Thomas Gleixner | bc310dd | 2010-09-27 12:45:02 +0000 | [diff] [blame] | 1006 | 		if (desc->irq_data.chip->irq_shutdown) | 
 | 1007 | 			desc->irq_data.chip->irq_shutdown(&desc->irq_data); | 
| Ingo Molnar | ae88a23 | 2009-02-15 11:29:50 +0100 | [diff] [blame] | 1008 | 		else | 
| Thomas Gleixner | bc310dd | 2010-09-27 12:45:02 +0000 | [diff] [blame] | 1009 | 			desc->irq_data.chip->irq_disable(&desc->irq_data); | 
| Ingo Molnar | ae88a23 | 2009-02-15 11:29:50 +0100 | [diff] [blame] | 1010 | 	} | 
| Thomas Gleixner | 3aa551c | 2009-03-23 18:28:15 +0100 | [diff] [blame] | 1011 |  | 
| Peter P Waskiewicz Jr | e7a297b | 2010-04-30 14:44:50 -0700 | [diff] [blame] | 1012 | #ifdef CONFIG_SMP | 
 | 1013 | 	/* make sure affinity_hint is cleaned up */ | 
 | 1014 | 	if (WARN_ON_ONCE(desc->affinity_hint)) | 
 | 1015 | 		desc->affinity_hint = NULL; | 
 | 1016 | #endif | 
 | 1017 |  | 
| Thomas Gleixner | 239007b | 2009-11-17 16:46:45 +0100 | [diff] [blame] | 1018 | 	raw_spin_unlock_irqrestore(&desc->lock, flags); | 
| Ingo Molnar | ae88a23 | 2009-02-15 11:29:50 +0100 | [diff] [blame] | 1019 |  | 
 | 1020 | 	unregister_handler_proc(irq, action); | 
 | 1021 |  | 
 | 1022 | 	/* Make sure it's not being used on another CPU: */ | 
 | 1023 | 	synchronize_irq(irq); | 
 | 1024 |  | 
 | 1025 | #ifdef CONFIG_DEBUG_SHIRQ | 
 | 1026 | 	/* | 
 | 1027 | 	 * It's a shared IRQ -- the driver ought to be prepared for an IRQ | 
 | 1028 | 	 * event to happen even now it's being freed, so let's make sure that | 
 | 1029 | 	 * is so by doing an extra call to the handler .... | 
 | 1030 | 	 * | 
 | 1031 | 	 * ( We do this after actually deregistering it, to make sure that a | 
 | 1032 | 	 *   'real' IRQ doesn't run in * parallel with our fake. ) | 
 | 1033 | 	 */ | 
 | 1034 | 	if (action->flags & IRQF_SHARED) { | 
 | 1035 | 		local_irq_save(flags); | 
 | 1036 | 		action->handler(irq, dev_id); | 
 | 1037 | 		local_irq_restore(flags); | 
 | 1038 | 	} | 
 | 1039 | #endif | 
| Linus Torvalds | 2d860ad | 2009-08-13 13:05:10 -0700 | [diff] [blame] | 1040 |  | 
 | 1041 | 	if (action->thread) { | 
 | 1042 | 		if (!test_bit(IRQTF_DIED, &action->thread_flags)) | 
 | 1043 | 			kthread_stop(action->thread); | 
 | 1044 | 		put_task_struct(action->thread); | 
 | 1045 | 	} | 
 | 1046 |  | 
| Magnus Damm | f21cfb2 | 2009-03-12 21:05:42 +0900 | [diff] [blame] | 1047 | 	return action; | 
 | 1048 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1049 |  | 
 | 1050 | /** | 
| Magnus Damm | cbf94f0 | 2009-03-12 21:05:51 +0900 | [diff] [blame] | 1051 |  *	remove_irq - free an interrupt | 
 | 1052 |  *	@irq: Interrupt line to free | 
 | 1053 |  *	@act: irqaction for the interrupt | 
 | 1054 |  * | 
 | 1055 |  * Used to remove interrupts statically setup by the early boot process. | 
 | 1056 |  */ | 
 | 1057 | void remove_irq(unsigned int irq, struct irqaction *act) | 
 | 1058 | { | 
 | 1059 | 	__free_irq(irq, act->dev_id); | 
 | 1060 | } | 
| Magnus Damm | eb53b4e | 2009-03-12 21:05:59 +0900 | [diff] [blame] | 1061 | EXPORT_SYMBOL_GPL(remove_irq); | 
| Magnus Damm | cbf94f0 | 2009-03-12 21:05:51 +0900 | [diff] [blame] | 1062 |  | 
 | 1063 | /** | 
| Magnus Damm | f21cfb2 | 2009-03-12 21:05:42 +0900 | [diff] [blame] | 1064 |  *	free_irq - free an interrupt allocated with request_irq | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1065 |  *	@irq: Interrupt line to free | 
 | 1066 |  *	@dev_id: Device identity to free | 
 | 1067 |  * | 
 | 1068 |  *	Remove an interrupt handler. The handler is removed and if the | 
 | 1069 |  *	interrupt line is no longer in use by any driver it is disabled. | 
 | 1070 |  *	On a shared IRQ the caller must ensure the interrupt is disabled | 
 | 1071 |  *	on the card it drives before calling this function. The function | 
 | 1072 |  *	does not return until any executing interrupts for this IRQ | 
 | 1073 |  *	have completed. | 
 | 1074 |  * | 
 | 1075 |  *	This function must not be called from interrupt context. | 
 | 1076 |  */ | 
 | 1077 | void free_irq(unsigned int irq, void *dev_id) | 
 | 1078 | { | 
| Thomas Gleixner | 70aedd2 | 2009-08-13 12:17:48 +0200 | [diff] [blame] | 1079 | 	struct irq_desc *desc = irq_to_desc(irq); | 
 | 1080 |  | 
 | 1081 | 	if (!desc) | 
 | 1082 | 		return; | 
 | 1083 |  | 
| Ben Hutchings | cd7eab4 | 2011-01-19 21:01:44 +0000 | [diff] [blame] | 1084 | #ifdef CONFIG_SMP | 
 | 1085 | 	if (WARN_ON(desc->affinity_notify)) | 
 | 1086 | 		desc->affinity_notify = NULL; | 
 | 1087 | #endif | 
 | 1088 |  | 
| Thomas Gleixner | 3876ec9 | 2010-09-27 12:44:35 +0000 | [diff] [blame] | 1089 | 	chip_bus_lock(desc); | 
| Magnus Damm | cbf94f0 | 2009-03-12 21:05:51 +0900 | [diff] [blame] | 1090 | 	kfree(__free_irq(irq, dev_id)); | 
| Thomas Gleixner | 3876ec9 | 2010-09-27 12:44:35 +0000 | [diff] [blame] | 1091 | 	chip_bus_sync_unlock(desc); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1092 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1093 | EXPORT_SYMBOL(free_irq); | 
 | 1094 |  | 
 | 1095 | /** | 
| Thomas Gleixner | 3aa551c | 2009-03-23 18:28:15 +0100 | [diff] [blame] | 1096 |  *	request_threaded_irq - allocate an interrupt line | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1097 |  *	@irq: Interrupt line to allocate | 
| Thomas Gleixner | 3aa551c | 2009-03-23 18:28:15 +0100 | [diff] [blame] | 1098 |  *	@handler: Function to be called when the IRQ occurs. | 
 | 1099 |  *		  Primary handler for threaded interrupts | 
| Thomas Gleixner | b25c340 | 2009-08-13 12:17:22 +0200 | [diff] [blame] | 1100 |  *		  If NULL and thread_fn != NULL the default | 
 | 1101 |  *		  primary handler is installed | 
| Thomas Gleixner | f48fe81 | 2009-03-24 11:46:22 +0100 | [diff] [blame] | 1102 |  *	@thread_fn: Function called from the irq handler thread | 
 | 1103 |  *		    If NULL, no irq thread is created | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1104 |  *	@irqflags: Interrupt type flags | 
 | 1105 |  *	@devname: An ascii name for the claiming device | 
 | 1106 |  *	@dev_id: A cookie passed back to the handler function | 
 | 1107 |  * | 
 | 1108 |  *	This call allocates interrupt resources and enables the | 
 | 1109 |  *	interrupt line and IRQ handling. From the point this | 
 | 1110 |  *	call is made your handler function may be invoked. Since | 
 | 1111 |  *	your handler function must clear any interrupt the board | 
 | 1112 |  *	raises, you must take care both to initialise your hardware | 
 | 1113 |  *	and to set up the interrupt handler in the right order. | 
 | 1114 |  * | 
| Thomas Gleixner | 3aa551c | 2009-03-23 18:28:15 +0100 | [diff] [blame] | 1115 |  *	If you want to set up a threaded irq handler for your device | 
 | 1116 |  *	then you need to supply @handler and @thread_fn. @handler ist | 
 | 1117 |  *	still called in hard interrupt context and has to check | 
 | 1118 |  *	whether the interrupt originates from the device. If yes it | 
 | 1119 |  *	needs to disable the interrupt on the device and return | 
| Steven Rostedt | 39a2edd | 2009-05-12 14:35:54 -0400 | [diff] [blame] | 1120 |  *	IRQ_WAKE_THREAD which will wake up the handler thread and run | 
| Thomas Gleixner | 3aa551c | 2009-03-23 18:28:15 +0100 | [diff] [blame] | 1121 |  *	@thread_fn. This split handler design is necessary to support | 
 | 1122 |  *	shared interrupts. | 
 | 1123 |  * | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1124 |  *	Dev_id must be globally unique. Normally the address of the | 
 | 1125 |  *	device data structure is used as the cookie. Since the handler | 
 | 1126 |  *	receives this value it makes sense to use it. | 
 | 1127 |  * | 
 | 1128 |  *	If your interrupt is shared you must pass a non NULL dev_id | 
 | 1129 |  *	as this is required when freeing the interrupt. | 
 | 1130 |  * | 
 | 1131 |  *	Flags: | 
 | 1132 |  * | 
| Thomas Gleixner | 3cca53b | 2006-07-01 19:29:31 -0700 | [diff] [blame] | 1133 |  *	IRQF_SHARED		Interrupt is shared | 
| Thomas Gleixner | 3cca53b | 2006-07-01 19:29:31 -0700 | [diff] [blame] | 1134 |  *	IRQF_SAMPLE_RANDOM	The interrupt can be used for entropy | 
| David Brownell | 0c5d1eb | 2008-10-01 14:46:18 -0700 | [diff] [blame] | 1135 |  *	IRQF_TRIGGER_*		Specify active edge(s) or level | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1136 |  * | 
 | 1137 |  */ | 
| Thomas Gleixner | 3aa551c | 2009-03-23 18:28:15 +0100 | [diff] [blame] | 1138 | int request_threaded_irq(unsigned int irq, irq_handler_t handler, | 
 | 1139 | 			 irq_handler_t thread_fn, unsigned long irqflags, | 
 | 1140 | 			 const char *devname, void *dev_id) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1141 | { | 
| Ingo Molnar | 06fcb0c | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 1142 | 	struct irqaction *action; | 
| Yinghai Lu | 08678b0 | 2008-08-19 20:50:05 -0700 | [diff] [blame] | 1143 | 	struct irq_desc *desc; | 
| Thomas Gleixner | d3c6004 | 2008-10-16 09:55:00 +0200 | [diff] [blame] | 1144 | 	int retval; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1145 |  | 
| David Brownell | 470c662 | 2008-12-01 14:31:37 -0800 | [diff] [blame] | 1146 | 	/* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1147 | 	 * Sanity-check: shared interrupts must pass in a real dev-ID, | 
 | 1148 | 	 * otherwise we'll have trouble later trying to figure out | 
 | 1149 | 	 * which interrupt is which (messes up the interrupt freeing | 
 | 1150 | 	 * logic etc). | 
 | 1151 | 	 */ | 
| Thomas Gleixner | 3cca53b | 2006-07-01 19:29:31 -0700 | [diff] [blame] | 1152 | 	if ((irqflags & IRQF_SHARED) && !dev_id) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1153 | 		return -EINVAL; | 
| Yinghai Lu | 7d94f7c | 2008-08-19 20:50:14 -0700 | [diff] [blame] | 1154 |  | 
| Yinghai Lu | cb5bc83 | 2008-08-19 20:50:17 -0700 | [diff] [blame] | 1155 | 	desc = irq_to_desc(irq); | 
| Yinghai Lu | 7d94f7c | 2008-08-19 20:50:14 -0700 | [diff] [blame] | 1156 | 	if (!desc) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1157 | 		return -EINVAL; | 
| Yinghai Lu | 7d94f7c | 2008-08-19 20:50:14 -0700 | [diff] [blame] | 1158 |  | 
| Yinghai Lu | 08678b0 | 2008-08-19 20:50:05 -0700 | [diff] [blame] | 1159 | 	if (desc->status & IRQ_NOREQUEST) | 
| Thomas Gleixner | 6550c77 | 2006-06-29 02:24:49 -0700 | [diff] [blame] | 1160 | 		return -EINVAL; | 
| Thomas Gleixner | b25c340 | 2009-08-13 12:17:22 +0200 | [diff] [blame] | 1161 |  | 
 | 1162 | 	if (!handler) { | 
 | 1163 | 		if (!thread_fn) | 
 | 1164 | 			return -EINVAL; | 
 | 1165 | 		handler = irq_default_primary_handler; | 
 | 1166 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1167 |  | 
| Thomas Gleixner | 4553573 | 2009-02-22 23:00:32 +0100 | [diff] [blame] | 1168 | 	action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1169 | 	if (!action) | 
 | 1170 | 		return -ENOMEM; | 
 | 1171 |  | 
 | 1172 | 	action->handler = handler; | 
| Thomas Gleixner | 3aa551c | 2009-03-23 18:28:15 +0100 | [diff] [blame] | 1173 | 	action->thread_fn = thread_fn; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1174 | 	action->flags = irqflags; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1175 | 	action->name = devname; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1176 | 	action->dev_id = dev_id; | 
 | 1177 |  | 
| Thomas Gleixner | 3876ec9 | 2010-09-27 12:44:35 +0000 | [diff] [blame] | 1178 | 	chip_bus_lock(desc); | 
| Thomas Gleixner | d3c6004 | 2008-10-16 09:55:00 +0200 | [diff] [blame] | 1179 | 	retval = __setup_irq(irq, desc, action); | 
| Thomas Gleixner | 3876ec9 | 2010-09-27 12:44:35 +0000 | [diff] [blame] | 1180 | 	chip_bus_sync_unlock(desc); | 
| Thomas Gleixner | 70aedd2 | 2009-08-13 12:17:48 +0200 | [diff] [blame] | 1181 |  | 
| Anton Vorontsov | 377bf1e | 2008-08-21 22:58:28 +0400 | [diff] [blame] | 1182 | 	if (retval) | 
 | 1183 | 		kfree(action); | 
 | 1184 |  | 
| David Woodhouse | a304e1b | 2007-02-12 00:52:00 -0800 | [diff] [blame] | 1185 | #ifdef CONFIG_DEBUG_SHIRQ | 
| Luis Henriques | 6ce51c4 | 2009-04-01 18:06:35 +0100 | [diff] [blame] | 1186 | 	if (!retval && (irqflags & IRQF_SHARED)) { | 
| David Woodhouse | a304e1b | 2007-02-12 00:52:00 -0800 | [diff] [blame] | 1187 | 		/* | 
 | 1188 | 		 * It's a shared IRQ -- the driver ought to be prepared for it | 
 | 1189 | 		 * to happen immediately, so let's make sure.... | 
| Anton Vorontsov | 377bf1e | 2008-08-21 22:58:28 +0400 | [diff] [blame] | 1190 | 		 * We disable the irq to make sure that a 'real' IRQ doesn't | 
 | 1191 | 		 * run in parallel with our fake. | 
| David Woodhouse | a304e1b | 2007-02-12 00:52:00 -0800 | [diff] [blame] | 1192 | 		 */ | 
| Jarek Poplawski | 59845b1 | 2007-08-30 23:56:34 -0700 | [diff] [blame] | 1193 | 		unsigned long flags; | 
| David Woodhouse | a304e1b | 2007-02-12 00:52:00 -0800 | [diff] [blame] | 1194 |  | 
| Anton Vorontsov | 377bf1e | 2008-08-21 22:58:28 +0400 | [diff] [blame] | 1195 | 		disable_irq(irq); | 
| Jarek Poplawski | 59845b1 | 2007-08-30 23:56:34 -0700 | [diff] [blame] | 1196 | 		local_irq_save(flags); | 
| Anton Vorontsov | 377bf1e | 2008-08-21 22:58:28 +0400 | [diff] [blame] | 1197 |  | 
| Jarek Poplawski | 59845b1 | 2007-08-30 23:56:34 -0700 | [diff] [blame] | 1198 | 		handler(irq, dev_id); | 
| Anton Vorontsov | 377bf1e | 2008-08-21 22:58:28 +0400 | [diff] [blame] | 1199 |  | 
| Jarek Poplawski | 59845b1 | 2007-08-30 23:56:34 -0700 | [diff] [blame] | 1200 | 		local_irq_restore(flags); | 
| Anton Vorontsov | 377bf1e | 2008-08-21 22:58:28 +0400 | [diff] [blame] | 1201 | 		enable_irq(irq); | 
| David Woodhouse | a304e1b | 2007-02-12 00:52:00 -0800 | [diff] [blame] | 1202 | 	} | 
 | 1203 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1204 | 	return retval; | 
 | 1205 | } | 
| Thomas Gleixner | 3aa551c | 2009-03-23 18:28:15 +0100 | [diff] [blame] | 1206 | EXPORT_SYMBOL(request_threaded_irq); | 
| Marc Zyngier | ae731f8 | 2010-03-15 22:56:33 +0000 | [diff] [blame] | 1207 |  | 
 | 1208 | /** | 
 | 1209 |  *	request_any_context_irq - allocate an interrupt line | 
 | 1210 |  *	@irq: Interrupt line to allocate | 
 | 1211 |  *	@handler: Function to be called when the IRQ occurs. | 
 | 1212 |  *		  Threaded handler for threaded interrupts. | 
 | 1213 |  *	@flags: Interrupt type flags | 
 | 1214 |  *	@name: An ascii name for the claiming device | 
 | 1215 |  *	@dev_id: A cookie passed back to the handler function | 
 | 1216 |  * | 
 | 1217 |  *	This call allocates interrupt resources and enables the | 
 | 1218 |  *	interrupt line and IRQ handling. It selects either a | 
 | 1219 |  *	hardirq or threaded handling method depending on the | 
 | 1220 |  *	context. | 
 | 1221 |  * | 
 | 1222 |  *	On failure, it returns a negative value. On success, | 
 | 1223 |  *	it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED. | 
 | 1224 |  */ | 
 | 1225 | int request_any_context_irq(unsigned int irq, irq_handler_t handler, | 
 | 1226 | 			    unsigned long flags, const char *name, void *dev_id) | 
 | 1227 | { | 
 | 1228 | 	struct irq_desc *desc = irq_to_desc(irq); | 
 | 1229 | 	int ret; | 
 | 1230 |  | 
 | 1231 | 	if (!desc) | 
 | 1232 | 		return -EINVAL; | 
 | 1233 |  | 
 | 1234 | 	if (desc->status & IRQ_NESTED_THREAD) { | 
 | 1235 | 		ret = request_threaded_irq(irq, NULL, handler, | 
 | 1236 | 					   flags, name, dev_id); | 
 | 1237 | 		return !ret ? IRQC_IS_NESTED : ret; | 
 | 1238 | 	} | 
 | 1239 |  | 
 | 1240 | 	ret = request_irq(irq, handler, flags, name, dev_id); | 
 | 1241 | 	return !ret ? IRQC_IS_HARDIRQ : ret; | 
 | 1242 | } | 
 | 1243 | EXPORT_SYMBOL_GPL(request_any_context_irq); |