| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  * linux/kernel/irq/manage.c | 
 | 3 |  * | 
| Ingo Molnar | a34db9b | 2006-06-29 02:24:50 -0700 | [diff] [blame] | 4 |  * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar | 
 | 5 |  * Copyright (C) 2005-2006 Thomas Gleixner | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 |  * | 
 | 7 |  * This file contains driver APIs to the irq subsystem. | 
 | 8 |  */ | 
 | 9 |  | 
 | 10 | #include <linux/irq.h> | 
| Thomas Gleixner | 3aa551c | 2009-03-23 18:28:15 +0100 | [diff] [blame] | 11 | #include <linux/kthread.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | #include <linux/module.h> | 
 | 13 | #include <linux/random.h> | 
 | 14 | #include <linux/interrupt.h> | 
| Robert P. J. Day | 1aeb272 | 2008-04-29 00:59:25 -0700 | [diff] [blame] | 15 | #include <linux/slab.h> | 
| Thomas Gleixner | 3aa551c | 2009-03-23 18:28:15 +0100 | [diff] [blame] | 16 | #include <linux/sched.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 |  | 
 | 18 | #include "internals.h" | 
 | 19 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | /** | 
 | 21 |  *	synchronize_irq - wait for pending IRQ handlers (on other CPUs) | 
| Randy Dunlap | 1e5d533 | 2005-11-07 01:01:06 -0800 | [diff] [blame] | 22 |  *	@irq: interrupt number to wait for | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 |  * | 
 | 24 |  *	This function waits for any pending IRQ handlers for this interrupt | 
 | 25 |  *	to complete before returning. If you use this function while | 
 | 26 |  *	holding a resource the IRQ handler may need you will deadlock. | 
 | 27 |  * | 
 | 28 |  *	This function may be called - with care - from IRQ context. | 
 | 29 |  */ | 
 | 30 | void synchronize_irq(unsigned int irq) | 
 | 31 | { | 
| Yinghai Lu | cb5bc83 | 2008-08-19 20:50:17 -0700 | [diff] [blame] | 32 | 	struct irq_desc *desc = irq_to_desc(irq); | 
| Herbert Xu | a98ce5c | 2007-10-23 11:26:25 +0800 | [diff] [blame] | 33 | 	unsigned int status; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 34 |  | 
| Yinghai Lu | 7d94f7c | 2008-08-19 20:50:14 -0700 | [diff] [blame] | 35 | 	if (!desc) | 
| Matthew Wilcox | c2b5a25 | 2005-11-03 07:51:18 -0700 | [diff] [blame] | 36 | 		return; | 
 | 37 |  | 
| Herbert Xu | a98ce5c | 2007-10-23 11:26:25 +0800 | [diff] [blame] | 38 | 	do { | 
 | 39 | 		unsigned long flags; | 
 | 40 |  | 
 | 41 | 		/* | 
 | 42 | 		 * Wait until we're out of the critical section.  This might | 
 | 43 | 		 * give the wrong answer due to the lack of memory barriers. | 
 | 44 | 		 */ | 
 | 45 | 		while (desc->status & IRQ_INPROGRESS) | 
 | 46 | 			cpu_relax(); | 
 | 47 |  | 
 | 48 | 		/* Ok, that indicated we're done: double-check carefully. */ | 
 | 49 | 		spin_lock_irqsave(&desc->lock, flags); | 
 | 50 | 		status = desc->status; | 
 | 51 | 		spin_unlock_irqrestore(&desc->lock, flags); | 
 | 52 |  | 
 | 53 | 		/* Oops, that failed? */ | 
 | 54 | 	} while (status & IRQ_INPROGRESS); | 
| Thomas Gleixner | 3aa551c | 2009-03-23 18:28:15 +0100 | [diff] [blame] | 55 |  | 
 | 56 | 	/* | 
 | 57 | 	 * We made sure that no hardirq handler is running. Now verify | 
 | 58 | 	 * that no threaded handlers are active. | 
 | 59 | 	 */ | 
 | 60 | 	wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 61 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 62 | EXPORT_SYMBOL(synchronize_irq); | 
 | 63 |  | 
| Thomas Gleixner | 3aa551c | 2009-03-23 18:28:15 +0100 | [diff] [blame] | 64 | #ifdef CONFIG_SMP | 
 | 65 | cpumask_var_t irq_default_affinity; | 
 | 66 |  | 
| Thomas Gleixner | 771ee3b | 2007-02-16 01:27:25 -0800 | [diff] [blame] | 67 | /** | 
 | 68 |  *	irq_can_set_affinity - Check if the affinity of a given irq can be set | 
 | 69 |  *	@irq:		Interrupt to check | 
 | 70 |  * | 
 | 71 |  */ | 
 | 72 | int irq_can_set_affinity(unsigned int irq) | 
 | 73 | { | 
| Yinghai Lu | 08678b0 | 2008-08-19 20:50:05 -0700 | [diff] [blame] | 74 | 	struct irq_desc *desc = irq_to_desc(irq); | 
| Thomas Gleixner | 771ee3b | 2007-02-16 01:27:25 -0800 | [diff] [blame] | 75 |  | 
 | 76 | 	if (CHECK_IRQ_PER_CPU(desc->status) || !desc->chip || | 
 | 77 | 	    !desc->chip->set_affinity) | 
 | 78 | 		return 0; | 
 | 79 |  | 
 | 80 | 	return 1; | 
 | 81 | } | 
 | 82 |  | 
| Thomas Gleixner | 591d2fb | 2009-07-21 11:09:39 +0200 | [diff] [blame] | 83 | /** | 
 | 84 |  *	irq_set_thread_affinity - Notify irq threads to adjust affinity | 
 | 85 |  *	@desc:		irq descriptor which has affitnity changed | 
 | 86 |  * | 
 | 87 |  *	We just set IRQTF_AFFINITY and delegate the affinity setting | 
 | 88 |  *	to the interrupt thread itself. We can not call | 
 | 89 |  *	set_cpus_allowed_ptr() here as we hold desc->lock and this | 
 | 90 |  *	code can be called from hard interrupt context. | 
 | 91 |  */ | 
 | 92 | void irq_set_thread_affinity(struct irq_desc *desc) | 
| Thomas Gleixner | 3aa551c | 2009-03-23 18:28:15 +0100 | [diff] [blame] | 93 | { | 
 | 94 | 	struct irqaction *action = desc->action; | 
 | 95 |  | 
 | 96 | 	while (action) { | 
 | 97 | 		if (action->thread) | 
| Thomas Gleixner | 591d2fb | 2009-07-21 11:09:39 +0200 | [diff] [blame] | 98 | 			set_bit(IRQTF_AFFINITY, &action->thread_flags); | 
| Thomas Gleixner | 3aa551c | 2009-03-23 18:28:15 +0100 | [diff] [blame] | 99 | 		action = action->next; | 
 | 100 | 	} | 
 | 101 | } | 
 | 102 |  | 
| Thomas Gleixner | 771ee3b | 2007-02-16 01:27:25 -0800 | [diff] [blame] | 103 | /** | 
 | 104 |  *	irq_set_affinity - Set the irq affinity of a given irq | 
 | 105 |  *	@irq:		Interrupt to set affinity | 
 | 106 |  *	@cpumask:	cpumask | 
 | 107 |  * | 
 | 108 |  */ | 
| Rusty Russell | 0de2652 | 2008-12-13 21:20:26 +1030 | [diff] [blame] | 109 | int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) | 
| Thomas Gleixner | 771ee3b | 2007-02-16 01:27:25 -0800 | [diff] [blame] | 110 | { | 
| Yinghai Lu | 08678b0 | 2008-08-19 20:50:05 -0700 | [diff] [blame] | 111 | 	struct irq_desc *desc = irq_to_desc(irq); | 
| Thomas Gleixner | f6d87f4 | 2008-11-07 13:18:30 +0100 | [diff] [blame] | 112 | 	unsigned long flags; | 
| Thomas Gleixner | 771ee3b | 2007-02-16 01:27:25 -0800 | [diff] [blame] | 113 |  | 
 | 114 | 	if (!desc->chip->set_affinity) | 
 | 115 | 		return -EINVAL; | 
 | 116 |  | 
| Thomas Gleixner | f6d87f4 | 2008-11-07 13:18:30 +0100 | [diff] [blame] | 117 | 	spin_lock_irqsave(&desc->lock, flags); | 
 | 118 |  | 
| Thomas Gleixner | 771ee3b | 2007-02-16 01:27:25 -0800 | [diff] [blame] | 119 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 
| Yinghai Lu | 57b150c | 2009-04-27 17:59:53 -0700 | [diff] [blame] | 120 | 	if (desc->status & IRQ_MOVE_PCNTXT) { | 
 | 121 | 		if (!desc->chip->set_affinity(irq, cpumask)) { | 
 | 122 | 			cpumask_copy(desc->affinity, cpumask); | 
| Thomas Gleixner | 591d2fb | 2009-07-21 11:09:39 +0200 | [diff] [blame] | 123 | 			irq_set_thread_affinity(desc); | 
| Yinghai Lu | 57b150c | 2009-04-27 17:59:53 -0700 | [diff] [blame] | 124 | 		} | 
 | 125 | 	} | 
| Pallipadi, Venkatesh | 6ec3cfe | 2009-04-13 15:20:58 -0700 | [diff] [blame] | 126 | 	else { | 
| Thomas Gleixner | f6d87f4 | 2008-11-07 13:18:30 +0100 | [diff] [blame] | 127 | 		desc->status |= IRQ_MOVE_PENDING; | 
| Mike Travis | 7f7ace0 | 2009-01-10 21:58:08 -0800 | [diff] [blame] | 128 | 		cpumask_copy(desc->pending_mask, cpumask); | 
| Thomas Gleixner | f6d87f4 | 2008-11-07 13:18:30 +0100 | [diff] [blame] | 129 | 	} | 
| Thomas Gleixner | 771ee3b | 2007-02-16 01:27:25 -0800 | [diff] [blame] | 130 | #else | 
| Yinghai Lu | 57b150c | 2009-04-27 17:59:53 -0700 | [diff] [blame] | 131 | 	if (!desc->chip->set_affinity(irq, cpumask)) { | 
 | 132 | 		cpumask_copy(desc->affinity, cpumask); | 
| Thomas Gleixner | 591d2fb | 2009-07-21 11:09:39 +0200 | [diff] [blame] | 133 | 		irq_set_thread_affinity(desc); | 
| Yinghai Lu | 57b150c | 2009-04-27 17:59:53 -0700 | [diff] [blame] | 134 | 	} | 
| Thomas Gleixner | 771ee3b | 2007-02-16 01:27:25 -0800 | [diff] [blame] | 135 | #endif | 
| Thomas Gleixner | f6d87f4 | 2008-11-07 13:18:30 +0100 | [diff] [blame] | 136 | 	desc->status |= IRQ_AFFINITY_SET; | 
 | 137 | 	spin_unlock_irqrestore(&desc->lock, flags); | 
| Thomas Gleixner | 771ee3b | 2007-02-16 01:27:25 -0800 | [diff] [blame] | 138 | 	return 0; | 
 | 139 | } | 
 | 140 |  | 
| Max Krasnyansky | 1840475 | 2008-05-29 11:02:52 -0700 | [diff] [blame] | 141 | #ifndef CONFIG_AUTO_IRQ_AFFINITY | 
 | 142 | /* | 
 | 143 |  * Generic version of the affinity autoselector. | 
 | 144 |  */ | 
| Hannes Eder | 548c893 | 2009-02-08 20:24:47 +0100 | [diff] [blame] | 145 | static int setup_affinity(unsigned int irq, struct irq_desc *desc) | 
| Max Krasnyansky | 1840475 | 2008-05-29 11:02:52 -0700 | [diff] [blame] | 146 | { | 
| Max Krasnyansky | 1840475 | 2008-05-29 11:02:52 -0700 | [diff] [blame] | 147 | 	if (!irq_can_set_affinity(irq)) | 
 | 148 | 		return 0; | 
 | 149 |  | 
| Thomas Gleixner | f6d87f4 | 2008-11-07 13:18:30 +0100 | [diff] [blame] | 150 | 	/* | 
 | 151 | 	 * Preserve an userspace affinity setup, but make sure that | 
 | 152 | 	 * one of the targets is online. | 
 | 153 | 	 */ | 
| Thomas Gleixner | 612e368 | 2008-11-07 13:58:46 +0100 | [diff] [blame] | 154 | 	if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) { | 
| Mike Travis | 7f7ace0 | 2009-01-10 21:58:08 -0800 | [diff] [blame] | 155 | 		if (cpumask_any_and(desc->affinity, cpu_online_mask) | 
| Rusty Russell | 0de2652 | 2008-12-13 21:20:26 +1030 | [diff] [blame] | 156 | 		    < nr_cpu_ids) | 
 | 157 | 			goto set_affinity; | 
| Thomas Gleixner | f6d87f4 | 2008-11-07 13:18:30 +0100 | [diff] [blame] | 158 | 		else | 
 | 159 | 			desc->status &= ~IRQ_AFFINITY_SET; | 
 | 160 | 	} | 
 | 161 |  | 
| Mike Travis | 7f7ace0 | 2009-01-10 21:58:08 -0800 | [diff] [blame] | 162 | 	cpumask_and(desc->affinity, cpu_online_mask, irq_default_affinity); | 
| Rusty Russell | 0de2652 | 2008-12-13 21:20:26 +1030 | [diff] [blame] | 163 | set_affinity: | 
| Mike Travis | 7f7ace0 | 2009-01-10 21:58:08 -0800 | [diff] [blame] | 164 | 	desc->chip->set_affinity(irq, desc->affinity); | 
| Max Krasnyansky | 1840475 | 2008-05-29 11:02:52 -0700 | [diff] [blame] | 165 |  | 
| Max Krasnyansky | 1840475 | 2008-05-29 11:02:52 -0700 | [diff] [blame] | 166 | 	return 0; | 
 | 167 | } | 
| Thomas Gleixner | f6d87f4 | 2008-11-07 13:18:30 +0100 | [diff] [blame] | 168 | #else | 
| Hannes Eder | 548c893 | 2009-02-08 20:24:47 +0100 | [diff] [blame] | 169 | static inline int setup_affinity(unsigned int irq, struct irq_desc *d) | 
| Thomas Gleixner | f6d87f4 | 2008-11-07 13:18:30 +0100 | [diff] [blame] | 170 | { | 
 | 171 | 	return irq_select_affinity(irq); | 
 | 172 | } | 
| Max Krasnyansky | 1840475 | 2008-05-29 11:02:52 -0700 | [diff] [blame] | 173 | #endif | 
 | 174 |  | 
| Thomas Gleixner | f6d87f4 | 2008-11-07 13:18:30 +0100 | [diff] [blame] | 175 | /* | 
 | 176 |  * Called when affinity is set via /proc/irq | 
 | 177 |  */ | 
 | 178 | int irq_select_affinity_usr(unsigned int irq) | 
 | 179 | { | 
 | 180 | 	struct irq_desc *desc = irq_to_desc(irq); | 
 | 181 | 	unsigned long flags; | 
 | 182 | 	int ret; | 
 | 183 |  | 
 | 184 | 	spin_lock_irqsave(&desc->lock, flags); | 
| Hannes Eder | 548c893 | 2009-02-08 20:24:47 +0100 | [diff] [blame] | 185 | 	ret = setup_affinity(irq, desc); | 
| Thomas Gleixner | 3aa551c | 2009-03-23 18:28:15 +0100 | [diff] [blame] | 186 | 	if (!ret) | 
| Thomas Gleixner | 591d2fb | 2009-07-21 11:09:39 +0200 | [diff] [blame] | 187 | 		irq_set_thread_affinity(desc); | 
| Thomas Gleixner | f6d87f4 | 2008-11-07 13:18:30 +0100 | [diff] [blame] | 188 | 	spin_unlock_irqrestore(&desc->lock, flags); | 
 | 189 |  | 
 | 190 | 	return ret; | 
 | 191 | } | 
 | 192 |  | 
 | 193 | #else | 
| Hannes Eder | 548c893 | 2009-02-08 20:24:47 +0100 | [diff] [blame] | 194 | static inline int setup_affinity(unsigned int irq, struct irq_desc *desc) | 
| Thomas Gleixner | f6d87f4 | 2008-11-07 13:18:30 +0100 | [diff] [blame] | 195 | { | 
 | 196 | 	return 0; | 
 | 197 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 198 | #endif | 
 | 199 |  | 
| Rafael J. Wysocki | 0a0c516 | 2009-03-16 22:33:49 +0100 | [diff] [blame] | 200 | void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend) | 
 | 201 | { | 
 | 202 | 	if (suspend) { | 
 | 203 | 		if (!desc->action || (desc->action->flags & IRQF_TIMER)) | 
 | 204 | 			return; | 
 | 205 | 		desc->status |= IRQ_SUSPENDED; | 
 | 206 | 	} | 
 | 207 |  | 
 | 208 | 	if (!desc->depth++) { | 
 | 209 | 		desc->status |= IRQ_DISABLED; | 
 | 210 | 		desc->chip->disable(irq); | 
 | 211 | 	} | 
 | 212 | } | 
 | 213 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 214 | /** | 
 | 215 |  *	disable_irq_nosync - disable an irq without waiting | 
 | 216 |  *	@irq: Interrupt to disable | 
 | 217 |  * | 
 | 218 |  *	Disable the selected interrupt line.  Disables and Enables are | 
 | 219 |  *	nested. | 
 | 220 |  *	Unlike disable_irq(), this function does not ensure existing | 
 | 221 |  *	instances of the IRQ handler have completed before returning. | 
 | 222 |  * | 
 | 223 |  *	This function may be called from IRQ context. | 
 | 224 |  */ | 
 | 225 | void disable_irq_nosync(unsigned int irq) | 
 | 226 | { | 
| Thomas Gleixner | d3c6004 | 2008-10-16 09:55:00 +0200 | [diff] [blame] | 227 | 	struct irq_desc *desc = irq_to_desc(irq); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 228 | 	unsigned long flags; | 
 | 229 |  | 
| Yinghai Lu | 7d94f7c | 2008-08-19 20:50:14 -0700 | [diff] [blame] | 230 | 	if (!desc) | 
| Matthew Wilcox | c2b5a25 | 2005-11-03 07:51:18 -0700 | [diff] [blame] | 231 | 		return; | 
 | 232 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 233 | 	spin_lock_irqsave(&desc->lock, flags); | 
| Rafael J. Wysocki | 0a0c516 | 2009-03-16 22:33:49 +0100 | [diff] [blame] | 234 | 	__disable_irq(desc, irq, false); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 235 | 	spin_unlock_irqrestore(&desc->lock, flags); | 
 | 236 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 237 | EXPORT_SYMBOL(disable_irq_nosync); | 
 | 238 |  | 
 | 239 | /** | 
 | 240 |  *	disable_irq - disable an irq and wait for completion | 
 | 241 |  *	@irq: Interrupt to disable | 
 | 242 |  * | 
 | 243 |  *	Disable the selected interrupt line.  Enables and Disables are | 
 | 244 |  *	nested. | 
 | 245 |  *	This function waits for any pending IRQ handlers for this interrupt | 
 | 246 |  *	to complete before returning. If you use this function while | 
 | 247 |  *	holding a resource the IRQ handler may need you will deadlock. | 
 | 248 |  * | 
 | 249 |  *	This function may be called - with care - from IRQ context. | 
 | 250 |  */ | 
 | 251 | void disable_irq(unsigned int irq) | 
 | 252 | { | 
| Thomas Gleixner | d3c6004 | 2008-10-16 09:55:00 +0200 | [diff] [blame] | 253 | 	struct irq_desc *desc = irq_to_desc(irq); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 254 |  | 
| Yinghai Lu | 7d94f7c | 2008-08-19 20:50:14 -0700 | [diff] [blame] | 255 | 	if (!desc) | 
| Matthew Wilcox | c2b5a25 | 2005-11-03 07:51:18 -0700 | [diff] [blame] | 256 | 		return; | 
 | 257 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 258 | 	disable_irq_nosync(irq); | 
 | 259 | 	if (desc->action) | 
 | 260 | 		synchronize_irq(irq); | 
 | 261 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 262 | EXPORT_SYMBOL(disable_irq); | 
 | 263 |  | 
| Rafael J. Wysocki | 0a0c516 | 2009-03-16 22:33:49 +0100 | [diff] [blame] | 264 | void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume) | 
| Thomas Gleixner | 1adb085 | 2008-04-28 17:01:56 +0200 | [diff] [blame] | 265 | { | 
| Rafael J. Wysocki | 0a0c516 | 2009-03-16 22:33:49 +0100 | [diff] [blame] | 266 | 	if (resume) | 
 | 267 | 		desc->status &= ~IRQ_SUSPENDED; | 
 | 268 |  | 
| Thomas Gleixner | 1adb085 | 2008-04-28 17:01:56 +0200 | [diff] [blame] | 269 | 	switch (desc->depth) { | 
 | 270 | 	case 0: | 
| Rafael J. Wysocki | 0a0c516 | 2009-03-16 22:33:49 +0100 | [diff] [blame] | 271 |  err_out: | 
| Arjan van de Ven | b8c512f | 2008-07-25 19:45:36 -0700 | [diff] [blame] | 272 | 		WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq); | 
| Thomas Gleixner | 1adb085 | 2008-04-28 17:01:56 +0200 | [diff] [blame] | 273 | 		break; | 
 | 274 | 	case 1: { | 
 | 275 | 		unsigned int status = desc->status & ~IRQ_DISABLED; | 
 | 276 |  | 
| Rafael J. Wysocki | 0a0c516 | 2009-03-16 22:33:49 +0100 | [diff] [blame] | 277 | 		if (desc->status & IRQ_SUSPENDED) | 
 | 278 | 			goto err_out; | 
| Thomas Gleixner | 1adb085 | 2008-04-28 17:01:56 +0200 | [diff] [blame] | 279 | 		/* Prevent probing on this irq: */ | 
 | 280 | 		desc->status = status | IRQ_NOPROBE; | 
 | 281 | 		check_irq_resend(desc, irq); | 
 | 282 | 		/* fall-through */ | 
 | 283 | 	} | 
 | 284 | 	default: | 
 | 285 | 		desc->depth--; | 
 | 286 | 	} | 
 | 287 | } | 
 | 288 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 289 | /** | 
 | 290 |  *	enable_irq - enable handling of an irq | 
 | 291 |  *	@irq: Interrupt to enable | 
 | 292 |  * | 
 | 293 |  *	Undoes the effect of one call to disable_irq().  If this | 
 | 294 |  *	matches the last disable, processing of interrupts on this | 
 | 295 |  *	IRQ line is re-enabled. | 
 | 296 |  * | 
 | 297 |  *	This function may be called from IRQ context. | 
 | 298 |  */ | 
 | 299 | void enable_irq(unsigned int irq) | 
 | 300 | { | 
| Thomas Gleixner | d3c6004 | 2008-10-16 09:55:00 +0200 | [diff] [blame] | 301 | 	struct irq_desc *desc = irq_to_desc(irq); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 302 | 	unsigned long flags; | 
 | 303 |  | 
| Yinghai Lu | 7d94f7c | 2008-08-19 20:50:14 -0700 | [diff] [blame] | 304 | 	if (!desc) | 
| Matthew Wilcox | c2b5a25 | 2005-11-03 07:51:18 -0700 | [diff] [blame] | 305 | 		return; | 
 | 306 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 307 | 	spin_lock_irqsave(&desc->lock, flags); | 
| Rafael J. Wysocki | 0a0c516 | 2009-03-16 22:33:49 +0100 | [diff] [blame] | 308 | 	__enable_irq(desc, irq, false); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 309 | 	spin_unlock_irqrestore(&desc->lock, flags); | 
 | 310 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 311 | EXPORT_SYMBOL(enable_irq); | 
 | 312 |  | 
| David Brownell | 0c5d1eb | 2008-10-01 14:46:18 -0700 | [diff] [blame] | 313 | static int set_irq_wake_real(unsigned int irq, unsigned int on) | 
| Uwe Kleine-König | 2db8732 | 2008-07-23 14:42:25 +0200 | [diff] [blame] | 314 | { | 
| Yinghai Lu | 08678b0 | 2008-08-19 20:50:05 -0700 | [diff] [blame] | 315 | 	struct irq_desc *desc = irq_to_desc(irq); | 
| Uwe Kleine-König | 2db8732 | 2008-07-23 14:42:25 +0200 | [diff] [blame] | 316 | 	int ret = -ENXIO; | 
 | 317 |  | 
 | 318 | 	if (desc->chip->set_wake) | 
 | 319 | 		ret = desc->chip->set_wake(irq, on); | 
 | 320 |  | 
 | 321 | 	return ret; | 
 | 322 | } | 
 | 323 |  | 
| Thomas Gleixner | ba9a233 | 2006-06-29 02:24:55 -0700 | [diff] [blame] | 324 | /** | 
 | 325 |  *	set_irq_wake - control irq power management wakeup | 
 | 326 |  *	@irq:	interrupt to control | 
 | 327 |  *	@on:	enable/disable power management wakeup | 
 | 328 |  * | 
| David Brownell | 15a647e | 2006-07-30 03:03:08 -0700 | [diff] [blame] | 329 |  *	Enable/disable power management wakeup mode, which is | 
 | 330 |  *	disabled by default.  Enables and disables must match, | 
 | 331 |  *	just as they match for non-wakeup mode support. | 
 | 332 |  * | 
 | 333 |  *	Wakeup mode lets this IRQ wake the system from sleep | 
 | 334 |  *	states like "suspend to RAM". | 
| Thomas Gleixner | ba9a233 | 2006-06-29 02:24:55 -0700 | [diff] [blame] | 335 |  */ | 
 | 336 | int set_irq_wake(unsigned int irq, unsigned int on) | 
 | 337 | { | 
| Yinghai Lu | 08678b0 | 2008-08-19 20:50:05 -0700 | [diff] [blame] | 338 | 	struct irq_desc *desc = irq_to_desc(irq); | 
| Thomas Gleixner | ba9a233 | 2006-06-29 02:24:55 -0700 | [diff] [blame] | 339 | 	unsigned long flags; | 
| Uwe Kleine-König | 2db8732 | 2008-07-23 14:42:25 +0200 | [diff] [blame] | 340 | 	int ret = 0; | 
| Thomas Gleixner | ba9a233 | 2006-06-29 02:24:55 -0700 | [diff] [blame] | 341 |  | 
| David Brownell | 15a647e | 2006-07-30 03:03:08 -0700 | [diff] [blame] | 342 | 	/* wakeup-capable irqs can be shared between drivers that | 
 | 343 | 	 * don't need to have the same sleep mode behaviors. | 
 | 344 | 	 */ | 
| Thomas Gleixner | ba9a233 | 2006-06-29 02:24:55 -0700 | [diff] [blame] | 345 | 	spin_lock_irqsave(&desc->lock, flags); | 
| David Brownell | 15a647e | 2006-07-30 03:03:08 -0700 | [diff] [blame] | 346 | 	if (on) { | 
| Uwe Kleine-König | 2db8732 | 2008-07-23 14:42:25 +0200 | [diff] [blame] | 347 | 		if (desc->wake_depth++ == 0) { | 
 | 348 | 			ret = set_irq_wake_real(irq, on); | 
 | 349 | 			if (ret) | 
 | 350 | 				desc->wake_depth = 0; | 
 | 351 | 			else | 
 | 352 | 				desc->status |= IRQ_WAKEUP; | 
 | 353 | 		} | 
| David Brownell | 15a647e | 2006-07-30 03:03:08 -0700 | [diff] [blame] | 354 | 	} else { | 
 | 355 | 		if (desc->wake_depth == 0) { | 
| Arjan van de Ven | 7a2c477 | 2008-07-25 01:45:54 -0700 | [diff] [blame] | 356 | 			WARN(1, "Unbalanced IRQ %d wake disable\n", irq); | 
| Uwe Kleine-König | 2db8732 | 2008-07-23 14:42:25 +0200 | [diff] [blame] | 357 | 		} else if (--desc->wake_depth == 0) { | 
 | 358 | 			ret = set_irq_wake_real(irq, on); | 
 | 359 | 			if (ret) | 
 | 360 | 				desc->wake_depth = 1; | 
 | 361 | 			else | 
 | 362 | 				desc->status &= ~IRQ_WAKEUP; | 
 | 363 | 		} | 
| David Brownell | 15a647e | 2006-07-30 03:03:08 -0700 | [diff] [blame] | 364 | 	} | 
| Uwe Kleine-König | 2db8732 | 2008-07-23 14:42:25 +0200 | [diff] [blame] | 365 |  | 
| Thomas Gleixner | ba9a233 | 2006-06-29 02:24:55 -0700 | [diff] [blame] | 366 | 	spin_unlock_irqrestore(&desc->lock, flags); | 
 | 367 | 	return ret; | 
 | 368 | } | 
 | 369 | EXPORT_SYMBOL(set_irq_wake); | 
 | 370 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 371 | /* | 
 | 372 |  * Internal function that tells the architecture code whether a | 
 | 373 |  * particular irq has been exclusively allocated or is available | 
 | 374 |  * for driver use. | 
 | 375 |  */ | 
 | 376 | int can_request_irq(unsigned int irq, unsigned long irqflags) | 
 | 377 | { | 
| Thomas Gleixner | d3c6004 | 2008-10-16 09:55:00 +0200 | [diff] [blame] | 378 | 	struct irq_desc *desc = irq_to_desc(irq); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 379 | 	struct irqaction *action; | 
 | 380 |  | 
| Yinghai Lu | 7d94f7c | 2008-08-19 20:50:14 -0700 | [diff] [blame] | 381 | 	if (!desc) | 
 | 382 | 		return 0; | 
 | 383 |  | 
 | 384 | 	if (desc->status & IRQ_NOREQUEST) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 385 | 		return 0; | 
 | 386 |  | 
| Yinghai Lu | 08678b0 | 2008-08-19 20:50:05 -0700 | [diff] [blame] | 387 | 	action = desc->action; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 388 | 	if (action) | 
| Thomas Gleixner | 3cca53b | 2006-07-01 19:29:31 -0700 | [diff] [blame] | 389 | 		if (irqflags & action->flags & IRQF_SHARED) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 390 | 			action = NULL; | 
 | 391 |  | 
 | 392 | 	return !action; | 
 | 393 | } | 
 | 394 |  | 
| Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 395 | void compat_irq_chip_set_default_handler(struct irq_desc *desc) | 
 | 396 | { | 
 | 397 | 	/* | 
 | 398 | 	 * If the architecture still has not overriden | 
 | 399 | 	 * the flow handler then zap the default. This | 
 | 400 | 	 * should catch incorrect flow-type setting. | 
 | 401 | 	 */ | 
 | 402 | 	if (desc->handle_irq == &handle_bad_irq) | 
 | 403 | 		desc->handle_irq = NULL; | 
 | 404 | } | 
 | 405 |  | 
| David Brownell | 0c5d1eb | 2008-10-01 14:46:18 -0700 | [diff] [blame] | 406 | int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, | 
| Uwe Kleine-König | 82736f4 | 2008-07-23 21:28:54 -0700 | [diff] [blame] | 407 | 		unsigned long flags) | 
 | 408 | { | 
 | 409 | 	int ret; | 
| David Brownell | 0c5d1eb | 2008-10-01 14:46:18 -0700 | [diff] [blame] | 410 | 	struct irq_chip *chip = desc->chip; | 
| Uwe Kleine-König | 82736f4 | 2008-07-23 21:28:54 -0700 | [diff] [blame] | 411 |  | 
 | 412 | 	if (!chip || !chip->set_type) { | 
 | 413 | 		/* | 
 | 414 | 		 * IRQF_TRIGGER_* but the PIC does not support multiple | 
 | 415 | 		 * flow-types? | 
 | 416 | 		 */ | 
| Mark Nelson | 3ff68a6 | 2008-11-13 21:37:41 +1100 | [diff] [blame] | 417 | 		pr_debug("No set_type function for IRQ %d (%s)\n", irq, | 
| Uwe Kleine-König | 82736f4 | 2008-07-23 21:28:54 -0700 | [diff] [blame] | 418 | 				chip ? (chip->name ? : "unknown") : "unknown"); | 
 | 419 | 		return 0; | 
 | 420 | 	} | 
 | 421 |  | 
| David Brownell | f2b662d | 2008-12-01 14:31:38 -0800 | [diff] [blame] | 422 | 	/* caller masked out all except trigger mode flags */ | 
 | 423 | 	ret = chip->set_type(irq, flags); | 
| Uwe Kleine-König | 82736f4 | 2008-07-23 21:28:54 -0700 | [diff] [blame] | 424 |  | 
 | 425 | 	if (ret) | 
| David Brownell | c69ad71 | 2008-08-05 13:01:14 -0700 | [diff] [blame] | 426 | 		pr_err("setting trigger mode %d for irq %u failed (%pF)\n", | 
| David Brownell | f2b662d | 2008-12-01 14:31:38 -0800 | [diff] [blame] | 427 | 				(int)flags, irq, chip->set_type); | 
| David Brownell | 0c5d1eb | 2008-10-01 14:46:18 -0700 | [diff] [blame] | 428 | 	else { | 
| David Brownell | f2b662d | 2008-12-01 14:31:38 -0800 | [diff] [blame] | 429 | 		if (flags & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) | 
 | 430 | 			flags |= IRQ_LEVEL; | 
| David Brownell | 0c5d1eb | 2008-10-01 14:46:18 -0700 | [diff] [blame] | 431 | 		/* note that IRQF_TRIGGER_MASK == IRQ_TYPE_SENSE_MASK */ | 
| David Brownell | f2b662d | 2008-12-01 14:31:38 -0800 | [diff] [blame] | 432 | 		desc->status &= ~(IRQ_LEVEL | IRQ_TYPE_SENSE_MASK); | 
 | 433 | 		desc->status |= flags; | 
| David Brownell | 0c5d1eb | 2008-10-01 14:46:18 -0700 | [diff] [blame] | 434 | 	} | 
| Uwe Kleine-König | 82736f4 | 2008-07-23 21:28:54 -0700 | [diff] [blame] | 435 |  | 
 | 436 | 	return ret; | 
 | 437 | } | 
 | 438 |  | 
| Thomas Gleixner | 3aa551c | 2009-03-23 18:28:15 +0100 | [diff] [blame] | 439 | static int irq_wait_for_interrupt(struct irqaction *action) | 
 | 440 | { | 
 | 441 | 	while (!kthread_should_stop()) { | 
 | 442 | 		set_current_state(TASK_INTERRUPTIBLE); | 
| Thomas Gleixner | f48fe81 | 2009-03-24 11:46:22 +0100 | [diff] [blame] | 443 |  | 
 | 444 | 		if (test_and_clear_bit(IRQTF_RUNTHREAD, | 
 | 445 | 				       &action->thread_flags)) { | 
| Thomas Gleixner | 3aa551c | 2009-03-23 18:28:15 +0100 | [diff] [blame] | 446 | 			__set_current_state(TASK_RUNNING); | 
 | 447 | 			return 0; | 
| Thomas Gleixner | f48fe81 | 2009-03-24 11:46:22 +0100 | [diff] [blame] | 448 | 		} | 
 | 449 | 		schedule(); | 
| Thomas Gleixner | 3aa551c | 2009-03-23 18:28:15 +0100 | [diff] [blame] | 450 | 	} | 
 | 451 | 	return -1; | 
 | 452 | } | 
 | 453 |  | 
| Bruno Premont | 61f3826 | 2009-07-22 22:22:32 +0200 | [diff] [blame] | 454 | #ifdef CONFIG_SMP | 
| Thomas Gleixner | 3aa551c | 2009-03-23 18:28:15 +0100 | [diff] [blame] | 455 | /* | 
| Thomas Gleixner | 591d2fb | 2009-07-21 11:09:39 +0200 | [diff] [blame] | 456 |  * Check whether we need to change the affinity of the interrupt thread. | 
 | 457 |  */ | 
 | 458 | static void | 
 | 459 | irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) | 
 | 460 | { | 
 | 461 | 	cpumask_var_t mask; | 
 | 462 |  | 
 | 463 | 	if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags)) | 
 | 464 | 		return; | 
 | 465 |  | 
 | 466 | 	/* | 
 | 467 | 	 * In case we are out of memory we set IRQTF_AFFINITY again and | 
 | 468 | 	 * try again next time | 
 | 469 | 	 */ | 
 | 470 | 	if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { | 
 | 471 | 		set_bit(IRQTF_AFFINITY, &action->thread_flags); | 
 | 472 | 		return; | 
 | 473 | 	} | 
 | 474 |  | 
 | 475 | 	spin_lock_irq(&desc->lock); | 
 | 476 | 	cpumask_copy(mask, desc->affinity); | 
 | 477 | 	spin_unlock_irq(&desc->lock); | 
 | 478 |  | 
 | 479 | 	set_cpus_allowed_ptr(current, mask); | 
 | 480 | 	free_cpumask_var(mask); | 
 | 481 | } | 
| Bruno Premont | 61f3826 | 2009-07-22 22:22:32 +0200 | [diff] [blame] | 482 | #else | 
 | 483 | static inline void | 
 | 484 | irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { } | 
 | 485 | #endif | 
| Thomas Gleixner | 591d2fb | 2009-07-21 11:09:39 +0200 | [diff] [blame] | 486 |  | 
 | 487 | /* | 
| Thomas Gleixner | 3aa551c | 2009-03-23 18:28:15 +0100 | [diff] [blame] | 488 |  * Interrupt handler thread | 
 | 489 |  */ | 
 | 490 | static int irq_thread(void *data) | 
 | 491 | { | 
 | 492 | 	struct sched_param param = { .sched_priority = MAX_USER_RT_PRIO/2, }; | 
 | 493 | 	struct irqaction *action = data; | 
 | 494 | 	struct irq_desc *desc = irq_to_desc(action->irq); | 
 | 495 | 	int wake; | 
 | 496 |  | 
 | 497 | 	sched_setscheduler(current, SCHED_FIFO, ¶m); | 
 | 498 | 	current->irqaction = action; | 
 | 499 |  | 
 | 500 | 	while (!irq_wait_for_interrupt(action)) { | 
 | 501 |  | 
| Thomas Gleixner | 591d2fb | 2009-07-21 11:09:39 +0200 | [diff] [blame] | 502 | 		irq_thread_check_affinity(desc, action); | 
 | 503 |  | 
| Thomas Gleixner | 3aa551c | 2009-03-23 18:28:15 +0100 | [diff] [blame] | 504 | 		atomic_inc(&desc->threads_active); | 
 | 505 |  | 
 | 506 | 		spin_lock_irq(&desc->lock); | 
 | 507 | 		if (unlikely(desc->status & IRQ_DISABLED)) { | 
 | 508 | 			/* | 
 | 509 | 			 * CHECKME: We might need a dedicated | 
 | 510 | 			 * IRQ_THREAD_PENDING flag here, which | 
 | 511 | 			 * retriggers the thread in check_irq_resend() | 
 | 512 | 			 * but AFAICT IRQ_PENDING should be fine as it | 
 | 513 | 			 * retriggers the interrupt itself --- tglx | 
 | 514 | 			 */ | 
 | 515 | 			desc->status |= IRQ_PENDING; | 
 | 516 | 			spin_unlock_irq(&desc->lock); | 
 | 517 | 		} else { | 
 | 518 | 			spin_unlock_irq(&desc->lock); | 
 | 519 |  | 
 | 520 | 			action->thread_fn(action->irq, action->dev_id); | 
 | 521 | 		} | 
 | 522 |  | 
 | 523 | 		wake = atomic_dec_and_test(&desc->threads_active); | 
 | 524 |  | 
 | 525 | 		if (wake && waitqueue_active(&desc->wait_for_threads)) | 
 | 526 | 			wake_up(&desc->wait_for_threads); | 
 | 527 | 	} | 
 | 528 |  | 
 | 529 | 	/* | 
 | 530 | 	 * Clear irqaction. Otherwise exit_irq_thread() would make | 
 | 531 | 	 * fuzz about an active irq thread going into nirvana. | 
 | 532 | 	 */ | 
 | 533 | 	current->irqaction = NULL; | 
 | 534 | 	return 0; | 
 | 535 | } | 
 | 536 |  | 
 | 537 | /* | 
 | 538 |  * Called from do_exit() | 
 | 539 |  */ | 
 | 540 | void exit_irq_thread(void) | 
 | 541 | { | 
 | 542 | 	struct task_struct *tsk = current; | 
 | 543 |  | 
 | 544 | 	if (!tsk->irqaction) | 
 | 545 | 		return; | 
 | 546 |  | 
 | 547 | 	printk(KERN_ERR | 
 | 548 | 	       "exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n", | 
 | 549 | 	       tsk->comm ? tsk->comm : "", tsk->pid, tsk->irqaction->irq); | 
 | 550 |  | 
 | 551 | 	/* | 
 | 552 | 	 * Set the THREAD DIED flag to prevent further wakeups of the | 
 | 553 | 	 * soon to be gone threaded handler. | 
 | 554 | 	 */ | 
 | 555 | 	set_bit(IRQTF_DIED, &tsk->irqaction->flags); | 
 | 556 | } | 
 | 557 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 558 | /* | 
 | 559 |  * Internal function to register an irqaction - typically used to | 
 | 560 |  * allocate special interrupts that are part of the architecture. | 
 | 561 |  */ | 
| Thomas Gleixner | d3c6004 | 2008-10-16 09:55:00 +0200 | [diff] [blame] | 562 | static int | 
| Ingo Molnar | 327ec56 | 2009-02-15 11:21:37 +0100 | [diff] [blame] | 563 | __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 564 | { | 
| Ingo Molnar | f17c754 | 2009-02-17 20:43:37 +0100 | [diff] [blame] | 565 | 	struct irqaction *old, **old_ptr; | 
| Andrew Morton | 8b126b7 | 2006-11-14 02:03:23 -0800 | [diff] [blame] | 566 | 	const char *old_name = NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 567 | 	unsigned long flags; | 
 | 568 | 	int shared = 0; | 
| Uwe Kleine-König | 82736f4 | 2008-07-23 21:28:54 -0700 | [diff] [blame] | 569 | 	int ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 570 |  | 
| Yinghai Lu | 7d94f7c | 2008-08-19 20:50:14 -0700 | [diff] [blame] | 571 | 	if (!desc) | 
| Matthew Wilcox | c2b5a25 | 2005-11-03 07:51:18 -0700 | [diff] [blame] | 572 | 		return -EINVAL; | 
 | 573 |  | 
| Ingo Molnar | f1c2662 | 2006-06-29 02:24:57 -0700 | [diff] [blame] | 574 | 	if (desc->chip == &no_irq_chip) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 575 | 		return -ENOSYS; | 
 | 576 | 	/* | 
 | 577 | 	 * Some drivers like serial.c use request_irq() heavily, | 
 | 578 | 	 * so we have to be careful not to interfere with a | 
 | 579 | 	 * running system. | 
 | 580 | 	 */ | 
| Thomas Gleixner | 3cca53b | 2006-07-01 19:29:31 -0700 | [diff] [blame] | 581 | 	if (new->flags & IRQF_SAMPLE_RANDOM) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 582 | 		/* | 
 | 583 | 		 * This function might sleep, we want to call it first, | 
 | 584 | 		 * outside of the atomic block. | 
 | 585 | 		 * Yes, this might clear the entropy pool if the wrong | 
 | 586 | 		 * driver is attempted to be loaded, without actually | 
 | 587 | 		 * installing a new handler, but is this really a problem, | 
 | 588 | 		 * only the sysadmin is able to do this. | 
 | 589 | 		 */ | 
 | 590 | 		rand_initialize_irq(irq); | 
 | 591 | 	} | 
 | 592 |  | 
 | 593 | 	/* | 
| Thomas Gleixner | 3aa551c | 2009-03-23 18:28:15 +0100 | [diff] [blame] | 594 | 	 * Threaded handler ? | 
 | 595 | 	 */ | 
 | 596 | 	if (new->thread_fn) { | 
 | 597 | 		struct task_struct *t; | 
 | 598 |  | 
 | 599 | 		t = kthread_create(irq_thread, new, "irq/%d-%s", irq, | 
 | 600 | 				   new->name); | 
 | 601 | 		if (IS_ERR(t)) | 
 | 602 | 			return PTR_ERR(t); | 
 | 603 | 		/* | 
 | 604 | 		 * We keep the reference to the task struct even if | 
 | 605 | 		 * the thread dies to avoid that the interrupt code | 
 | 606 | 		 * references an already freed task_struct. | 
 | 607 | 		 */ | 
 | 608 | 		get_task_struct(t); | 
 | 609 | 		new->thread = t; | 
 | 610 | 		wake_up_process(t); | 
 | 611 | 	} | 
 | 612 |  | 
 | 613 | 	/* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 614 | 	 * The following block of code has to be executed atomically | 
 | 615 | 	 */ | 
| Ingo Molnar | 06fcb0c | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 616 | 	spin_lock_irqsave(&desc->lock, flags); | 
| Ingo Molnar | f17c754 | 2009-02-17 20:43:37 +0100 | [diff] [blame] | 617 | 	old_ptr = &desc->action; | 
 | 618 | 	old = *old_ptr; | 
| Ingo Molnar | 06fcb0c | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 619 | 	if (old) { | 
| Thomas Gleixner | e76de9f | 2006-06-29 02:24:56 -0700 | [diff] [blame] | 620 | 		/* | 
 | 621 | 		 * Can't share interrupts unless both agree to and are | 
 | 622 | 		 * the same type (level, edge, polarity). So both flag | 
| Thomas Gleixner | 3cca53b | 2006-07-01 19:29:31 -0700 | [diff] [blame] | 623 | 		 * fields must have IRQF_SHARED set and the bits which | 
| Thomas Gleixner | e76de9f | 2006-06-29 02:24:56 -0700 | [diff] [blame] | 624 | 		 * set the trigger type must match. | 
 | 625 | 		 */ | 
| Thomas Gleixner | 3cca53b | 2006-07-01 19:29:31 -0700 | [diff] [blame] | 626 | 		if (!((old->flags & new->flags) & IRQF_SHARED) || | 
| Andrew Morton | 8b126b7 | 2006-11-14 02:03:23 -0800 | [diff] [blame] | 627 | 		    ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK)) { | 
 | 628 | 			old_name = old->name; | 
| Dimitri Sivanich | f516342 | 2006-03-25 03:08:23 -0800 | [diff] [blame] | 629 | 			goto mismatch; | 
| Andrew Morton | 8b126b7 | 2006-11-14 02:03:23 -0800 | [diff] [blame] | 630 | 		} | 
| Dimitri Sivanich | f516342 | 2006-03-25 03:08:23 -0800 | [diff] [blame] | 631 |  | 
| Thomas Gleixner | 284c668 | 2006-07-03 02:20:32 +0200 | [diff] [blame] | 632 | #if defined(CONFIG_IRQ_PER_CPU) | 
| Dimitri Sivanich | f516342 | 2006-03-25 03:08:23 -0800 | [diff] [blame] | 633 | 		/* All handlers must agree on per-cpuness */ | 
| Thomas Gleixner | 3cca53b | 2006-07-01 19:29:31 -0700 | [diff] [blame] | 634 | 		if ((old->flags & IRQF_PERCPU) != | 
 | 635 | 		    (new->flags & IRQF_PERCPU)) | 
| Dimitri Sivanich | f516342 | 2006-03-25 03:08:23 -0800 | [diff] [blame] | 636 | 			goto mismatch; | 
 | 637 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 638 |  | 
 | 639 | 		/* add new interrupt at end of irq queue */ | 
 | 640 | 		do { | 
| Ingo Molnar | f17c754 | 2009-02-17 20:43:37 +0100 | [diff] [blame] | 641 | 			old_ptr = &old->next; | 
 | 642 | 			old = *old_ptr; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 643 | 		} while (old); | 
 | 644 | 		shared = 1; | 
 | 645 | 	} | 
 | 646 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 647 | 	if (!shared) { | 
| Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 648 | 		irq_chip_set_defaults(desc->chip); | 
| Thomas Gleixner | e76de9f | 2006-06-29 02:24:56 -0700 | [diff] [blame] | 649 |  | 
| Thomas Gleixner | 3aa551c | 2009-03-23 18:28:15 +0100 | [diff] [blame] | 650 | 		init_waitqueue_head(&desc->wait_for_threads); | 
 | 651 |  | 
| Uwe Kleine-König | 82736f4 | 2008-07-23 21:28:54 -0700 | [diff] [blame] | 652 | 		/* Setup the type (level, edge polarity) if configured: */ | 
 | 653 | 		if (new->flags & IRQF_TRIGGER_MASK) { | 
| David Brownell | f2b662d | 2008-12-01 14:31:38 -0800 | [diff] [blame] | 654 | 			ret = __irq_set_trigger(desc, irq, | 
 | 655 | 					new->flags & IRQF_TRIGGER_MASK); | 
| Uwe Kleine-König | 82736f4 | 2008-07-23 21:28:54 -0700 | [diff] [blame] | 656 |  | 
| Thomas Gleixner | 3aa551c | 2009-03-23 18:28:15 +0100 | [diff] [blame] | 657 | 			if (ret) | 
 | 658 | 				goto out_thread; | 
| Uwe Kleine-König | 82736f4 | 2008-07-23 21:28:54 -0700 | [diff] [blame] | 659 | 		} else | 
 | 660 | 			compat_irq_chip_set_default_handler(desc); | 
| Ahmed S. Darwish | f75d222 | 2007-05-08 00:27:55 -0700 | [diff] [blame] | 661 | #if defined(CONFIG_IRQ_PER_CPU) | 
 | 662 | 		if (new->flags & IRQF_PERCPU) | 
 | 663 | 			desc->status |= IRQ_PER_CPU; | 
 | 664 | #endif | 
 | 665 |  | 
| Thomas Gleixner | 94d39e1 | 2006-06-29 02:24:50 -0700 | [diff] [blame] | 666 | 		desc->status &= ~(IRQ_AUTODETECT | IRQ_WAITING | | 
| Thomas Gleixner | 1adb085 | 2008-04-28 17:01:56 +0200 | [diff] [blame] | 667 | 				  IRQ_INPROGRESS | IRQ_SPURIOUS_DISABLED); | 
| Thomas Gleixner | 94d39e1 | 2006-06-29 02:24:50 -0700 | [diff] [blame] | 668 |  | 
 | 669 | 		if (!(desc->status & IRQ_NOAUTOEN)) { | 
 | 670 | 			desc->depth = 0; | 
 | 671 | 			desc->status &= ~IRQ_DISABLED; | 
| Pawel MOLL | 7e6e178 | 2008-09-01 10:12:11 +0100 | [diff] [blame] | 672 | 			desc->chip->startup(irq); | 
| Thomas Gleixner | e76de9f | 2006-06-29 02:24:56 -0700 | [diff] [blame] | 673 | 		} else | 
 | 674 | 			/* Undo nested disables: */ | 
 | 675 | 			desc->depth = 1; | 
| Max Krasnyansky | 1840475 | 2008-05-29 11:02:52 -0700 | [diff] [blame] | 676 |  | 
| Thomas Gleixner | 612e368 | 2008-11-07 13:58:46 +0100 | [diff] [blame] | 677 | 		/* Exclude IRQ from balancing if requested */ | 
 | 678 | 		if (new->flags & IRQF_NOBALANCING) | 
 | 679 | 			desc->status |= IRQ_NO_BALANCING; | 
 | 680 |  | 
| Max Krasnyansky | 1840475 | 2008-05-29 11:02:52 -0700 | [diff] [blame] | 681 | 		/* Set default affinity mask once everything is setup */ | 
| Hannes Eder | 548c893 | 2009-02-08 20:24:47 +0100 | [diff] [blame] | 682 | 		setup_affinity(irq, desc); | 
| David Brownell | 0c5d1eb | 2008-10-01 14:46:18 -0700 | [diff] [blame] | 683 |  | 
 | 684 | 	} else if ((new->flags & IRQF_TRIGGER_MASK) | 
 | 685 | 			&& (new->flags & IRQF_TRIGGER_MASK) | 
 | 686 | 				!= (desc->status & IRQ_TYPE_SENSE_MASK)) { | 
 | 687 | 		/* hope the handler works with the actual trigger mode... */ | 
 | 688 | 		pr_warning("IRQ %d uses trigger mode %d; requested %d\n", | 
 | 689 | 				irq, (int)(desc->status & IRQ_TYPE_SENSE_MASK), | 
 | 690 | 				(int)(new->flags & IRQF_TRIGGER_MASK)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 691 | 	} | 
| Uwe Kleine-König | 82736f4 | 2008-07-23 21:28:54 -0700 | [diff] [blame] | 692 |  | 
| Ingo Molnar | f17c754 | 2009-02-17 20:43:37 +0100 | [diff] [blame] | 693 | 	*old_ptr = new; | 
| Uwe Kleine-König | 82736f4 | 2008-07-23 21:28:54 -0700 | [diff] [blame] | 694 |  | 
| Linus Torvalds | 8528b0f | 2007-01-23 14:16:31 -0800 | [diff] [blame] | 695 | 	/* Reset broken irq detection when installing new handler */ | 
 | 696 | 	desc->irq_count = 0; | 
 | 697 | 	desc->irqs_unhandled = 0; | 
| Thomas Gleixner | 1adb085 | 2008-04-28 17:01:56 +0200 | [diff] [blame] | 698 |  | 
 | 699 | 	/* | 
 | 700 | 	 * Check whether we disabled the irq via the spurious handler | 
 | 701 | 	 * before. Reenable it and give it another chance. | 
 | 702 | 	 */ | 
 | 703 | 	if (shared && (desc->status & IRQ_SPURIOUS_DISABLED)) { | 
 | 704 | 		desc->status &= ~IRQ_SPURIOUS_DISABLED; | 
| Rafael J. Wysocki | 0a0c516 | 2009-03-16 22:33:49 +0100 | [diff] [blame] | 705 | 		__enable_irq(desc, irq, false); | 
| Thomas Gleixner | 1adb085 | 2008-04-28 17:01:56 +0200 | [diff] [blame] | 706 | 	} | 
 | 707 |  | 
| Ingo Molnar | 06fcb0c | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 708 | 	spin_unlock_irqrestore(&desc->lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 709 |  | 
 | 710 | 	new->irq = irq; | 
| Yinghai Lu | 2c6927a | 2008-08-19 20:50:11 -0700 | [diff] [blame] | 711 | 	register_irq_proc(irq, desc); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 712 | 	new->dir = NULL; | 
 | 713 | 	register_handler_proc(irq, new); | 
 | 714 |  | 
 | 715 | 	return 0; | 
| Dimitri Sivanich | f516342 | 2006-03-25 03:08:23 -0800 | [diff] [blame] | 716 |  | 
 | 717 | mismatch: | 
| Alan Cox | 3f05044 | 2007-02-12 00:52:04 -0800 | [diff] [blame] | 718 | #ifdef CONFIG_DEBUG_SHIRQ | 
| Thomas Gleixner | 3cca53b | 2006-07-01 19:29:31 -0700 | [diff] [blame] | 719 | 	if (!(new->flags & IRQF_PROBE_SHARED)) { | 
| Bjorn Helgaas | e8c4b9d | 2006-07-01 04:35:45 -0700 | [diff] [blame] | 720 | 		printk(KERN_ERR "IRQ handler type mismatch for IRQ %d\n", irq); | 
| Andrew Morton | 8b126b7 | 2006-11-14 02:03:23 -0800 | [diff] [blame] | 721 | 		if (old_name) | 
 | 722 | 			printk(KERN_ERR "current handler: %s\n", old_name); | 
| Andrew Morton | 13e87ec | 2006-04-27 18:39:18 -0700 | [diff] [blame] | 723 | 		dump_stack(); | 
 | 724 | 	} | 
| Alan Cox | 3f05044 | 2007-02-12 00:52:04 -0800 | [diff] [blame] | 725 | #endif | 
| Thomas Gleixner | 3aa551c | 2009-03-23 18:28:15 +0100 | [diff] [blame] | 726 | 	ret = -EBUSY; | 
 | 727 |  | 
 | 728 | out_thread: | 
| Andrew Morton | 8b126b7 | 2006-11-14 02:03:23 -0800 | [diff] [blame] | 729 | 	spin_unlock_irqrestore(&desc->lock, flags); | 
| Thomas Gleixner | 3aa551c | 2009-03-23 18:28:15 +0100 | [diff] [blame] | 730 | 	if (new->thread) { | 
 | 731 | 		struct task_struct *t = new->thread; | 
 | 732 |  | 
 | 733 | 		new->thread = NULL; | 
 | 734 | 		if (likely(!test_bit(IRQTF_DIED, &new->thread_flags))) | 
 | 735 | 			kthread_stop(t); | 
 | 736 | 		put_task_struct(t); | 
 | 737 | 	} | 
 | 738 | 	return ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 739 | } | 
 | 740 |  | 
 | 741 | /** | 
| Thomas Gleixner | d3c6004 | 2008-10-16 09:55:00 +0200 | [diff] [blame] | 742 |  *	setup_irq - setup an interrupt | 
 | 743 |  *	@irq: Interrupt line to setup | 
 | 744 |  *	@act: irqaction for the interrupt | 
 | 745 |  * | 
 | 746 |  * Used to statically setup interrupts in the early boot process. | 
 | 747 |  */ | 
 | 748 | int setup_irq(unsigned int irq, struct irqaction *act) | 
 | 749 | { | 
 | 750 | 	struct irq_desc *desc = irq_to_desc(irq); | 
 | 751 |  | 
 | 752 | 	return __setup_irq(irq, desc, act); | 
 | 753 | } | 
| Magnus Damm | eb53b4e | 2009-03-12 21:05:59 +0900 | [diff] [blame] | 754 | EXPORT_SYMBOL_GPL(setup_irq); | 
| Thomas Gleixner | d3c6004 | 2008-10-16 09:55:00 +0200 | [diff] [blame] | 755 |  | 
| Magnus Damm | cbf94f0 | 2009-03-12 21:05:51 +0900 | [diff] [blame] | 756 |  /* | 
 | 757 |  * Internal function to unregister an irqaction - used to free | 
 | 758 |  * regular and special interrupts that are part of the architecture. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 759 |  */ | 
| Magnus Damm | cbf94f0 | 2009-03-12 21:05:51 +0900 | [diff] [blame] | 760 | static struct irqaction *__free_irq(unsigned int irq, void *dev_id) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 761 | { | 
| Thomas Gleixner | d3c6004 | 2008-10-16 09:55:00 +0200 | [diff] [blame] | 762 | 	struct irq_desc *desc = irq_to_desc(irq); | 
| Ingo Molnar | f17c754 | 2009-02-17 20:43:37 +0100 | [diff] [blame] | 763 | 	struct irqaction *action, **action_ptr; | 
| Thomas Gleixner | 3aa551c | 2009-03-23 18:28:15 +0100 | [diff] [blame] | 764 | 	struct task_struct *irqthread; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 765 | 	unsigned long flags; | 
 | 766 |  | 
| Ingo Molnar | ae88a23 | 2009-02-15 11:29:50 +0100 | [diff] [blame] | 767 | 	WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); | 
| Yinghai Lu | 7d94f7c | 2008-08-19 20:50:14 -0700 | [diff] [blame] | 768 |  | 
| Yinghai Lu | 7d94f7c | 2008-08-19 20:50:14 -0700 | [diff] [blame] | 769 | 	if (!desc) | 
| Magnus Damm | f21cfb2 | 2009-03-12 21:05:42 +0900 | [diff] [blame] | 770 | 		return NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 771 |  | 
| Ingo Molnar | 06fcb0c | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 772 | 	spin_lock_irqsave(&desc->lock, flags); | 
| Ingo Molnar | ae88a23 | 2009-02-15 11:29:50 +0100 | [diff] [blame] | 773 |  | 
 | 774 | 	/* | 
 | 775 | 	 * There can be multiple actions per IRQ descriptor, find the right | 
 | 776 | 	 * one based on the dev_id: | 
 | 777 | 	 */ | 
| Ingo Molnar | f17c754 | 2009-02-17 20:43:37 +0100 | [diff] [blame] | 778 | 	action_ptr = &desc->action; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 779 | 	for (;;) { | 
| Ingo Molnar | f17c754 | 2009-02-17 20:43:37 +0100 | [diff] [blame] | 780 | 		action = *action_ptr; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 781 |  | 
| Ingo Molnar | ae88a23 | 2009-02-15 11:29:50 +0100 | [diff] [blame] | 782 | 		if (!action) { | 
 | 783 | 			WARN(1, "Trying to free already-free IRQ %d\n", irq); | 
| Ingo Molnar | 06fcb0c | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 784 | 			spin_unlock_irqrestore(&desc->lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 785 |  | 
| Magnus Damm | f21cfb2 | 2009-03-12 21:05:42 +0900 | [diff] [blame] | 786 | 			return NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 787 | 		} | 
| Ingo Molnar | ae88a23 | 2009-02-15 11:29:50 +0100 | [diff] [blame] | 788 |  | 
| Ingo Molnar | 8316e38 | 2009-02-17 20:28:29 +0100 | [diff] [blame] | 789 | 		if (action->dev_id == dev_id) | 
 | 790 | 			break; | 
| Ingo Molnar | f17c754 | 2009-02-17 20:43:37 +0100 | [diff] [blame] | 791 | 		action_ptr = &action->next; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 792 | 	} | 
| Ingo Molnar | ae88a23 | 2009-02-15 11:29:50 +0100 | [diff] [blame] | 793 |  | 
 | 794 | 	/* Found it - now remove it from the list of entries: */ | 
| Ingo Molnar | f17c754 | 2009-02-17 20:43:37 +0100 | [diff] [blame] | 795 | 	*action_ptr = action->next; | 
| Ingo Molnar | ae88a23 | 2009-02-15 11:29:50 +0100 | [diff] [blame] | 796 |  | 
 | 797 | 	/* Currently used only by UML, might disappear one day: */ | 
 | 798 | #ifdef CONFIG_IRQ_RELEASE_METHOD | 
 | 799 | 	if (desc->chip->release) | 
 | 800 | 		desc->chip->release(irq, dev_id); | 
 | 801 | #endif | 
 | 802 |  | 
 | 803 | 	/* If this was the last handler, shut down the IRQ line: */ | 
 | 804 | 	if (!desc->action) { | 
 | 805 | 		desc->status |= IRQ_DISABLED; | 
 | 806 | 		if (desc->chip->shutdown) | 
 | 807 | 			desc->chip->shutdown(irq); | 
 | 808 | 		else | 
 | 809 | 			desc->chip->disable(irq); | 
 | 810 | 	} | 
| Thomas Gleixner | 3aa551c | 2009-03-23 18:28:15 +0100 | [diff] [blame] | 811 |  | 
 | 812 | 	irqthread = action->thread; | 
 | 813 | 	action->thread = NULL; | 
 | 814 |  | 
| Ingo Molnar | ae88a23 | 2009-02-15 11:29:50 +0100 | [diff] [blame] | 815 | 	spin_unlock_irqrestore(&desc->lock, flags); | 
 | 816 |  | 
 | 817 | 	unregister_handler_proc(irq, action); | 
 | 818 |  | 
 | 819 | 	/* Make sure it's not being used on another CPU: */ | 
 | 820 | 	synchronize_irq(irq); | 
 | 821 |  | 
| Thomas Gleixner | 3aa551c | 2009-03-23 18:28:15 +0100 | [diff] [blame] | 822 | 	if (irqthread) { | 
 | 823 | 		if (!test_bit(IRQTF_DIED, &action->thread_flags)) | 
 | 824 | 			kthread_stop(irqthread); | 
 | 825 | 		put_task_struct(irqthread); | 
 | 826 | 	} | 
 | 827 |  | 
| Ingo Molnar | ae88a23 | 2009-02-15 11:29:50 +0100 | [diff] [blame] | 828 | #ifdef CONFIG_DEBUG_SHIRQ | 
 | 829 | 	/* | 
 | 830 | 	 * It's a shared IRQ -- the driver ought to be prepared for an IRQ | 
 | 831 | 	 * event to happen even now it's being freed, so let's make sure that | 
 | 832 | 	 * is so by doing an extra call to the handler .... | 
 | 833 | 	 * | 
 | 834 | 	 * ( We do this after actually deregistering it, to make sure that a | 
 | 835 | 	 *   'real' IRQ doesn't run in * parallel with our fake. ) | 
 | 836 | 	 */ | 
 | 837 | 	if (action->flags & IRQF_SHARED) { | 
 | 838 | 		local_irq_save(flags); | 
 | 839 | 		action->handler(irq, dev_id); | 
 | 840 | 		local_irq_restore(flags); | 
 | 841 | 	} | 
 | 842 | #endif | 
| Magnus Damm | f21cfb2 | 2009-03-12 21:05:42 +0900 | [diff] [blame] | 843 | 	return action; | 
 | 844 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 845 |  | 
 | 846 | /** | 
| Magnus Damm | cbf94f0 | 2009-03-12 21:05:51 +0900 | [diff] [blame] | 847 |  *	remove_irq - free an interrupt | 
 | 848 |  *	@irq: Interrupt line to free | 
 | 849 |  *	@act: irqaction for the interrupt | 
 | 850 |  * | 
 | 851 |  * Used to remove interrupts statically setup by the early boot process. | 
 | 852 |  */ | 
 | 853 | void remove_irq(unsigned int irq, struct irqaction *act) | 
 | 854 | { | 
 | 855 | 	__free_irq(irq, act->dev_id); | 
 | 856 | } | 
| Magnus Damm | eb53b4e | 2009-03-12 21:05:59 +0900 | [diff] [blame] | 857 | EXPORT_SYMBOL_GPL(remove_irq); | 
| Magnus Damm | cbf94f0 | 2009-03-12 21:05:51 +0900 | [diff] [blame] | 858 |  | 
 | 859 | /** | 
| Magnus Damm | f21cfb2 | 2009-03-12 21:05:42 +0900 | [diff] [blame] | 860 |  *	free_irq - free an interrupt allocated with request_irq | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 861 |  *	@irq: Interrupt line to free | 
 | 862 |  *	@dev_id: Device identity to free | 
 | 863 |  * | 
 | 864 |  *	Remove an interrupt handler. The handler is removed and if the | 
 | 865 |  *	interrupt line is no longer in use by any driver it is disabled. | 
 | 866 |  *	On a shared IRQ the caller must ensure the interrupt is disabled | 
 | 867 |  *	on the card it drives before calling this function. The function | 
 | 868 |  *	does not return until any executing interrupts for this IRQ | 
 | 869 |  *	have completed. | 
 | 870 |  * | 
 | 871 |  *	This function must not be called from interrupt context. | 
 | 872 |  */ | 
 | 873 | void free_irq(unsigned int irq, void *dev_id) | 
 | 874 | { | 
| Magnus Damm | cbf94f0 | 2009-03-12 21:05:51 +0900 | [diff] [blame] | 875 | 	kfree(__free_irq(irq, dev_id)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 876 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 877 | EXPORT_SYMBOL(free_irq); | 
 | 878 |  | 
 | 879 | /** | 
| Thomas Gleixner | 3aa551c | 2009-03-23 18:28:15 +0100 | [diff] [blame] | 880 |  *	request_threaded_irq - allocate an interrupt line | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 881 |  *	@irq: Interrupt line to allocate | 
| Thomas Gleixner | 3aa551c | 2009-03-23 18:28:15 +0100 | [diff] [blame] | 882 |  *	@handler: Function to be called when the IRQ occurs. | 
 | 883 |  *		  Primary handler for threaded interrupts | 
| Thomas Gleixner | f48fe81 | 2009-03-24 11:46:22 +0100 | [diff] [blame] | 884 |  *	@thread_fn: Function called from the irq handler thread | 
 | 885 |  *		    If NULL, no irq thread is created | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 886 |  *	@irqflags: Interrupt type flags | 
 | 887 |  *	@devname: An ascii name for the claiming device | 
 | 888 |  *	@dev_id: A cookie passed back to the handler function | 
 | 889 |  * | 
 | 890 |  *	This call allocates interrupt resources and enables the | 
 | 891 |  *	interrupt line and IRQ handling. From the point this | 
 | 892 |  *	call is made your handler function may be invoked. Since | 
 | 893 |  *	your handler function must clear any interrupt the board | 
 | 894 |  *	raises, you must take care both to initialise your hardware | 
 | 895 |  *	and to set up the interrupt handler in the right order. | 
 | 896 |  * | 
| Thomas Gleixner | 3aa551c | 2009-03-23 18:28:15 +0100 | [diff] [blame] | 897 |  *	If you want to set up a threaded irq handler for your device | 
 | 898 |  *	then you need to supply @handler and @thread_fn. @handler ist | 
 | 899 |  *	still called in hard interrupt context and has to check | 
 | 900 |  *	whether the interrupt originates from the device. If yes it | 
 | 901 |  *	needs to disable the interrupt on the device and return | 
| Steven Rostedt | 39a2edd | 2009-05-12 14:35:54 -0400 | [diff] [blame] | 902 |  *	IRQ_WAKE_THREAD which will wake up the handler thread and run | 
| Thomas Gleixner | 3aa551c | 2009-03-23 18:28:15 +0100 | [diff] [blame] | 903 |  *	@thread_fn. This split handler design is necessary to support | 
 | 904 |  *	shared interrupts. | 
 | 905 |  * | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 906 |  *	Dev_id must be globally unique. Normally the address of the | 
 | 907 |  *	device data structure is used as the cookie. Since the handler | 
 | 908 |  *	receives this value it makes sense to use it. | 
 | 909 |  * | 
 | 910 |  *	If your interrupt is shared you must pass a non NULL dev_id | 
 | 911 |  *	as this is required when freeing the interrupt. | 
 | 912 |  * | 
 | 913 |  *	Flags: | 
 | 914 |  * | 
| Thomas Gleixner | 3cca53b | 2006-07-01 19:29:31 -0700 | [diff] [blame] | 915 |  *	IRQF_SHARED		Interrupt is shared | 
 | 916 |  *	IRQF_DISABLED	Disable local interrupts while processing | 
 | 917 |  *	IRQF_SAMPLE_RANDOM	The interrupt can be used for entropy | 
| David Brownell | 0c5d1eb | 2008-10-01 14:46:18 -0700 | [diff] [blame] | 918 |  *	IRQF_TRIGGER_*		Specify active edge(s) or level | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 919 |  * | 
 | 920 |  */ | 
| Thomas Gleixner | 3aa551c | 2009-03-23 18:28:15 +0100 | [diff] [blame] | 921 | int request_threaded_irq(unsigned int irq, irq_handler_t handler, | 
 | 922 | 			 irq_handler_t thread_fn, unsigned long irqflags, | 
 | 923 | 			 const char *devname, void *dev_id) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 924 | { | 
| Ingo Molnar | 06fcb0c | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 925 | 	struct irqaction *action; | 
| Yinghai Lu | 08678b0 | 2008-08-19 20:50:05 -0700 | [diff] [blame] | 926 | 	struct irq_desc *desc; | 
| Thomas Gleixner | d3c6004 | 2008-10-16 09:55:00 +0200 | [diff] [blame] | 927 | 	int retval; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 928 |  | 
| David Brownell | 470c662 | 2008-12-01 14:31:37 -0800 | [diff] [blame] | 929 | 	/* | 
 | 930 | 	 * handle_IRQ_event() always ignores IRQF_DISABLED except for | 
 | 931 | 	 * the _first_ irqaction (sigh).  That can cause oopsing, but | 
 | 932 | 	 * the behavior is classified as "will not fix" so we need to | 
 | 933 | 	 * start nudging drivers away from using that idiom. | 
 | 934 | 	 */ | 
| Ingo Molnar | 327ec56 | 2009-02-15 11:21:37 +0100 | [diff] [blame] | 935 | 	if ((irqflags & (IRQF_SHARED|IRQF_DISABLED)) == | 
 | 936 | 					(IRQF_SHARED|IRQF_DISABLED)) { | 
 | 937 | 		pr_warning( | 
 | 938 | 		  "IRQ %d/%s: IRQF_DISABLED is not guaranteed on shared IRQs\n", | 
 | 939 | 			irq, devname); | 
 | 940 | 	} | 
| David Brownell | 470c662 | 2008-12-01 14:31:37 -0800 | [diff] [blame] | 941 |  | 
| Ingo Molnar | fbb9ce9 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 942 | #ifdef CONFIG_LOCKDEP | 
 | 943 | 	/* | 
 | 944 | 	 * Lockdep wants atomic interrupt handlers: | 
 | 945 | 	 */ | 
| Thomas Gleixner | 38515e9 | 2007-02-14 00:33:16 -0800 | [diff] [blame] | 946 | 	irqflags |= IRQF_DISABLED; | 
| Ingo Molnar | fbb9ce9 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 947 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 948 | 	/* | 
 | 949 | 	 * Sanity-check: shared interrupts must pass in a real dev-ID, | 
 | 950 | 	 * otherwise we'll have trouble later trying to figure out | 
 | 951 | 	 * which interrupt is which (messes up the interrupt freeing | 
 | 952 | 	 * logic etc). | 
 | 953 | 	 */ | 
| Thomas Gleixner | 3cca53b | 2006-07-01 19:29:31 -0700 | [diff] [blame] | 954 | 	if ((irqflags & IRQF_SHARED) && !dev_id) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 955 | 		return -EINVAL; | 
| Yinghai Lu | 7d94f7c | 2008-08-19 20:50:14 -0700 | [diff] [blame] | 956 |  | 
| Yinghai Lu | cb5bc83 | 2008-08-19 20:50:17 -0700 | [diff] [blame] | 957 | 	desc = irq_to_desc(irq); | 
| Yinghai Lu | 7d94f7c | 2008-08-19 20:50:14 -0700 | [diff] [blame] | 958 | 	if (!desc) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 959 | 		return -EINVAL; | 
| Yinghai Lu | 7d94f7c | 2008-08-19 20:50:14 -0700 | [diff] [blame] | 960 |  | 
| Yinghai Lu | 08678b0 | 2008-08-19 20:50:05 -0700 | [diff] [blame] | 961 | 	if (desc->status & IRQ_NOREQUEST) | 
| Thomas Gleixner | 6550c77 | 2006-06-29 02:24:49 -0700 | [diff] [blame] | 962 | 		return -EINVAL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 963 | 	if (!handler) | 
 | 964 | 		return -EINVAL; | 
 | 965 |  | 
| Thomas Gleixner | 4553573 | 2009-02-22 23:00:32 +0100 | [diff] [blame] | 966 | 	action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 967 | 	if (!action) | 
 | 968 | 		return -ENOMEM; | 
 | 969 |  | 
 | 970 | 	action->handler = handler; | 
| Thomas Gleixner | 3aa551c | 2009-03-23 18:28:15 +0100 | [diff] [blame] | 971 | 	action->thread_fn = thread_fn; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 972 | 	action->flags = irqflags; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 973 | 	action->name = devname; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 974 | 	action->dev_id = dev_id; | 
 | 975 |  | 
| Thomas Gleixner | d3c6004 | 2008-10-16 09:55:00 +0200 | [diff] [blame] | 976 | 	retval = __setup_irq(irq, desc, action); | 
| Anton Vorontsov | 377bf1e | 2008-08-21 22:58:28 +0400 | [diff] [blame] | 977 | 	if (retval) | 
 | 978 | 		kfree(action); | 
 | 979 |  | 
| David Woodhouse | a304e1b | 2007-02-12 00:52:00 -0800 | [diff] [blame] | 980 | #ifdef CONFIG_DEBUG_SHIRQ | 
 | 981 | 	if (irqflags & IRQF_SHARED) { | 
 | 982 | 		/* | 
 | 983 | 		 * It's a shared IRQ -- the driver ought to be prepared for it | 
 | 984 | 		 * to happen immediately, so let's make sure.... | 
| Anton Vorontsov | 377bf1e | 2008-08-21 22:58:28 +0400 | [diff] [blame] | 985 | 		 * We disable the irq to make sure that a 'real' IRQ doesn't | 
 | 986 | 		 * run in parallel with our fake. | 
| David Woodhouse | a304e1b | 2007-02-12 00:52:00 -0800 | [diff] [blame] | 987 | 		 */ | 
| Jarek Poplawski | 59845b1 | 2007-08-30 23:56:34 -0700 | [diff] [blame] | 988 | 		unsigned long flags; | 
| David Woodhouse | a304e1b | 2007-02-12 00:52:00 -0800 | [diff] [blame] | 989 |  | 
| Anton Vorontsov | 377bf1e | 2008-08-21 22:58:28 +0400 | [diff] [blame] | 990 | 		disable_irq(irq); | 
| Jarek Poplawski | 59845b1 | 2007-08-30 23:56:34 -0700 | [diff] [blame] | 991 | 		local_irq_save(flags); | 
| Anton Vorontsov | 377bf1e | 2008-08-21 22:58:28 +0400 | [diff] [blame] | 992 |  | 
| Jarek Poplawski | 59845b1 | 2007-08-30 23:56:34 -0700 | [diff] [blame] | 993 | 		handler(irq, dev_id); | 
| Anton Vorontsov | 377bf1e | 2008-08-21 22:58:28 +0400 | [diff] [blame] | 994 |  | 
| Jarek Poplawski | 59845b1 | 2007-08-30 23:56:34 -0700 | [diff] [blame] | 995 | 		local_irq_restore(flags); | 
| Anton Vorontsov | 377bf1e | 2008-08-21 22:58:28 +0400 | [diff] [blame] | 996 | 		enable_irq(irq); | 
| David Woodhouse | a304e1b | 2007-02-12 00:52:00 -0800 | [diff] [blame] | 997 | 	} | 
 | 998 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 999 | 	return retval; | 
 | 1000 | } | 
| Thomas Gleixner | 3aa551c | 2009-03-23 18:28:15 +0100 | [diff] [blame] | 1001 | EXPORT_SYMBOL(request_threaded_irq); |