| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * linux/kernel/irq/manage.c | 
|  | 3 | * | 
| Ingo Molnar | a34db9b | 2006-06-29 02:24:50 -0700 | [diff] [blame] | 4 | * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar | 
|  | 5 | * Copyright (C) 2005-2006 Thomas Gleixner | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | * | 
|  | 7 | * This file contains driver APIs to the irq subsystem. | 
|  | 8 | */ | 
|  | 9 |  | 
|  | 10 | #include <linux/irq.h> | 
|  | 11 | #include <linux/module.h> | 
|  | 12 | #include <linux/random.h> | 
|  | 13 | #include <linux/interrupt.h> | 
| Robert P. J. Day | 1aeb272 | 2008-04-29 00:59:25 -0700 | [diff] [blame] | 14 | #include <linux/slab.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 |  | 
|  | 16 | #include "internals.h" | 
|  | 17 |  | 
|  | 18 | #ifdef CONFIG_SMP | 
|  | 19 |  | 
| Max Krasnyansky | 1840475 | 2008-05-29 11:02:52 -0700 | [diff] [blame] | 20 | cpumask_t irq_default_affinity = CPU_MASK_ALL; | 
|  | 21 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | /** | 
|  | 23 | *	synchronize_irq - wait for pending IRQ handlers (on other CPUs) | 
| Randy Dunlap | 1e5d533 | 2005-11-07 01:01:06 -0800 | [diff] [blame] | 24 | *	@irq: interrupt number to wait for | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | * | 
|  | 26 | *	This function waits for any pending IRQ handlers for this interrupt | 
|  | 27 | *	to complete before returning. If you use this function while | 
|  | 28 | *	holding a resource the IRQ handler may need you will deadlock. | 
|  | 29 | * | 
|  | 30 | *	This function may be called - with care - from IRQ context. | 
|  | 31 | */ | 
|  | 32 | void synchronize_irq(unsigned int irq) | 
|  | 33 | { | 
|  | 34 | struct irq_desc *desc = irq_desc + irq; | 
| Herbert Xu | a98ce5c | 2007-10-23 11:26:25 +0800 | [diff] [blame] | 35 | unsigned int status; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 |  | 
| Matthew Wilcox | c2b5a25 | 2005-11-03 07:51:18 -0700 | [diff] [blame] | 37 | if (irq >= NR_IRQS) | 
|  | 38 | return; | 
|  | 39 |  | 
| Herbert Xu | a98ce5c | 2007-10-23 11:26:25 +0800 | [diff] [blame] | 40 | do { | 
|  | 41 | unsigned long flags; | 
|  | 42 |  | 
|  | 43 | /* | 
|  | 44 | * Wait until we're out of the critical section.  This might | 
|  | 45 | * give the wrong answer due to the lack of memory barriers. | 
|  | 46 | */ | 
|  | 47 | while (desc->status & IRQ_INPROGRESS) | 
|  | 48 | cpu_relax(); | 
|  | 49 |  | 
|  | 50 | /* Ok, that indicated we're done: double-check carefully. */ | 
|  | 51 | spin_lock_irqsave(&desc->lock, flags); | 
|  | 52 | status = desc->status; | 
|  | 53 | spin_unlock_irqrestore(&desc->lock, flags); | 
|  | 54 |  | 
|  | 55 | /* Oops, that failed? */ | 
|  | 56 | } while (status & IRQ_INPROGRESS); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 57 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 58 | EXPORT_SYMBOL(synchronize_irq); | 
|  | 59 |  | 
| Thomas Gleixner | 771ee3b | 2007-02-16 01:27:25 -0800 | [diff] [blame] | 60 | /** | 
|  | 61 | *	irq_can_set_affinity - Check if the affinity of a given irq can be set | 
|  | 62 | *	@irq:		Interrupt to check | 
|  | 63 | * | 
|  | 64 | */ | 
|  | 65 | int irq_can_set_affinity(unsigned int irq) | 
|  | 66 | { | 
|  | 67 | struct irq_desc *desc = irq_desc + irq; | 
|  | 68 |  | 
|  | 69 | if (CHECK_IRQ_PER_CPU(desc->status) || !desc->chip || | 
|  | 70 | !desc->chip->set_affinity) | 
|  | 71 | return 0; | 
|  | 72 |  | 
|  | 73 | return 1; | 
|  | 74 | } | 
|  | 75 |  | 
|  | 76 | /** | 
|  | 77 | *	irq_set_affinity - Set the irq affinity of a given irq | 
|  | 78 | *	@irq:		Interrupt to set affinity | 
|  | 79 | *	@cpumask:	cpumask | 
|  | 80 | * | 
|  | 81 | */ | 
|  | 82 | int irq_set_affinity(unsigned int irq, cpumask_t cpumask) | 
|  | 83 | { | 
|  | 84 | struct irq_desc *desc = irq_desc + irq; | 
|  | 85 |  | 
|  | 86 | if (!desc->chip->set_affinity) | 
|  | 87 | return -EINVAL; | 
|  | 88 |  | 
|  | 89 | set_balance_irq_affinity(irq, cpumask); | 
|  | 90 |  | 
|  | 91 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 
|  | 92 | set_pending_irq(irq, cpumask); | 
|  | 93 | #else | 
|  | 94 | desc->affinity = cpumask; | 
|  | 95 | desc->chip->set_affinity(irq, cpumask); | 
|  | 96 | #endif | 
|  | 97 | return 0; | 
|  | 98 | } | 
|  | 99 |  | 
| Max Krasnyansky | 1840475 | 2008-05-29 11:02:52 -0700 | [diff] [blame] | 100 | #ifndef CONFIG_AUTO_IRQ_AFFINITY | 
|  | 101 | /* | 
|  | 102 | * Generic version of the affinity autoselector. | 
|  | 103 | */ | 
|  | 104 | int irq_select_affinity(unsigned int irq) | 
|  | 105 | { | 
|  | 106 | cpumask_t mask; | 
|  | 107 |  | 
|  | 108 | if (!irq_can_set_affinity(irq)) | 
|  | 109 | return 0; | 
|  | 110 |  | 
|  | 111 | cpus_and(mask, cpu_online_map, irq_default_affinity); | 
|  | 112 |  | 
|  | 113 | irq_desc[irq].affinity = mask; | 
|  | 114 | irq_desc[irq].chip->set_affinity(irq, mask); | 
|  | 115 |  | 
|  | 116 | set_balance_irq_affinity(irq, mask); | 
|  | 117 | return 0; | 
|  | 118 | } | 
|  | 119 | #endif | 
|  | 120 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 121 | #endif | 
|  | 122 |  | 
|  | 123 | /** | 
|  | 124 | *	disable_irq_nosync - disable an irq without waiting | 
|  | 125 | *	@irq: Interrupt to disable | 
|  | 126 | * | 
|  | 127 | *	Disable the selected interrupt line.  Disables and Enables are | 
|  | 128 | *	nested. | 
|  | 129 | *	Unlike disable_irq(), this function does not ensure existing | 
|  | 130 | *	instances of the IRQ handler have completed before returning. | 
|  | 131 | * | 
|  | 132 | *	This function may be called from IRQ context. | 
|  | 133 | */ | 
|  | 134 | void disable_irq_nosync(unsigned int irq) | 
|  | 135 | { | 
| Ingo Molnar | 34ffdb7 | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 136 | struct irq_desc *desc = irq_desc + irq; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 137 | unsigned long flags; | 
|  | 138 |  | 
| Matthew Wilcox | c2b5a25 | 2005-11-03 07:51:18 -0700 | [diff] [blame] | 139 | if (irq >= NR_IRQS) | 
|  | 140 | return; | 
|  | 141 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 142 | spin_lock_irqsave(&desc->lock, flags); | 
|  | 143 | if (!desc->depth++) { | 
|  | 144 | desc->status |= IRQ_DISABLED; | 
| Ingo Molnar | d1bef4e | 2006-06-29 02:24:36 -0700 | [diff] [blame] | 145 | desc->chip->disable(irq); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 146 | } | 
|  | 147 | spin_unlock_irqrestore(&desc->lock, flags); | 
|  | 148 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 149 | EXPORT_SYMBOL(disable_irq_nosync); | 
|  | 150 |  | 
|  | 151 | /** | 
|  | 152 | *	disable_irq - disable an irq and wait for completion | 
|  | 153 | *	@irq: Interrupt to disable | 
|  | 154 | * | 
|  | 155 | *	Disable the selected interrupt line.  Enables and Disables are | 
|  | 156 | *	nested. | 
|  | 157 | *	This function waits for any pending IRQ handlers for this interrupt | 
|  | 158 | *	to complete before returning. If you use this function while | 
|  | 159 | *	holding a resource the IRQ handler may need you will deadlock. | 
|  | 160 | * | 
|  | 161 | *	This function may be called - with care - from IRQ context. | 
|  | 162 | */ | 
|  | 163 | void disable_irq(unsigned int irq) | 
|  | 164 | { | 
| Ingo Molnar | 34ffdb7 | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 165 | struct irq_desc *desc = irq_desc + irq; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 166 |  | 
| Matthew Wilcox | c2b5a25 | 2005-11-03 07:51:18 -0700 | [diff] [blame] | 167 | if (irq >= NR_IRQS) | 
|  | 168 | return; | 
|  | 169 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 170 | disable_irq_nosync(irq); | 
|  | 171 | if (desc->action) | 
|  | 172 | synchronize_irq(irq); | 
|  | 173 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 174 | EXPORT_SYMBOL(disable_irq); | 
|  | 175 |  | 
| Thomas Gleixner | 1adb085 | 2008-04-28 17:01:56 +0200 | [diff] [blame] | 176 | static void __enable_irq(struct irq_desc *desc, unsigned int irq) | 
|  | 177 | { | 
|  | 178 | switch (desc->depth) { | 
|  | 179 | case 0: | 
| Arjan van de Ven | b8c512f | 2008-07-25 19:45:36 -0700 | [diff] [blame] | 180 | WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq); | 
| Thomas Gleixner | 1adb085 | 2008-04-28 17:01:56 +0200 | [diff] [blame] | 181 | break; | 
|  | 182 | case 1: { | 
|  | 183 | unsigned int status = desc->status & ~IRQ_DISABLED; | 
|  | 184 |  | 
|  | 185 | /* Prevent probing on this irq: */ | 
|  | 186 | desc->status = status | IRQ_NOPROBE; | 
|  | 187 | check_irq_resend(desc, irq); | 
|  | 188 | /* fall-through */ | 
|  | 189 | } | 
|  | 190 | default: | 
|  | 191 | desc->depth--; | 
|  | 192 | } | 
|  | 193 | } | 
|  | 194 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 195 | /** | 
|  | 196 | *	enable_irq - enable handling of an irq | 
|  | 197 | *	@irq: Interrupt to enable | 
|  | 198 | * | 
|  | 199 | *	Undoes the effect of one call to disable_irq().  If this | 
|  | 200 | *	matches the last disable, processing of interrupts on this | 
|  | 201 | *	IRQ line is re-enabled. | 
|  | 202 | * | 
|  | 203 | *	This function may be called from IRQ context. | 
|  | 204 | */ | 
|  | 205 | void enable_irq(unsigned int irq) | 
|  | 206 | { | 
| Ingo Molnar | 34ffdb7 | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 207 | struct irq_desc *desc = irq_desc + irq; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 208 | unsigned long flags; | 
|  | 209 |  | 
| Matthew Wilcox | c2b5a25 | 2005-11-03 07:51:18 -0700 | [diff] [blame] | 210 | if (irq >= NR_IRQS) | 
|  | 211 | return; | 
|  | 212 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 213 | spin_lock_irqsave(&desc->lock, flags); | 
| Thomas Gleixner | 1adb085 | 2008-04-28 17:01:56 +0200 | [diff] [blame] | 214 | __enable_irq(desc, irq); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 215 | spin_unlock_irqrestore(&desc->lock, flags); | 
|  | 216 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 217 | EXPORT_SYMBOL(enable_irq); | 
|  | 218 |  | 
| Uwe Kleine-König | 2db8732 | 2008-07-23 14:42:25 +0200 | [diff] [blame] | 219 | int set_irq_wake_real(unsigned int irq, unsigned int on) | 
|  | 220 | { | 
|  | 221 | struct irq_desc *desc = irq_desc + irq; | 
|  | 222 | int ret = -ENXIO; | 
|  | 223 |  | 
|  | 224 | if (desc->chip->set_wake) | 
|  | 225 | ret = desc->chip->set_wake(irq, on); | 
|  | 226 |  | 
|  | 227 | return ret; | 
|  | 228 | } | 
|  | 229 |  | 
| Thomas Gleixner | ba9a233 | 2006-06-29 02:24:55 -0700 | [diff] [blame] | 230 | /** | 
|  | 231 | *	set_irq_wake - control irq power management wakeup | 
|  | 232 | *	@irq:	interrupt to control | 
|  | 233 | *	@on:	enable/disable power management wakeup | 
|  | 234 | * | 
| David Brownell | 15a647e | 2006-07-30 03:03:08 -0700 | [diff] [blame] | 235 | *	Enable/disable power management wakeup mode, which is | 
|  | 236 | *	disabled by default.  Enables and disables must match, | 
|  | 237 | *	just as they match for non-wakeup mode support. | 
|  | 238 | * | 
|  | 239 | *	Wakeup mode lets this IRQ wake the system from sleep | 
|  | 240 | *	states like "suspend to RAM". | 
| Thomas Gleixner | ba9a233 | 2006-06-29 02:24:55 -0700 | [diff] [blame] | 241 | */ | 
|  | 242 | int set_irq_wake(unsigned int irq, unsigned int on) | 
|  | 243 | { | 
|  | 244 | struct irq_desc *desc = irq_desc + irq; | 
|  | 245 | unsigned long flags; | 
| Uwe Kleine-König | 2db8732 | 2008-07-23 14:42:25 +0200 | [diff] [blame] | 246 | int ret = 0; | 
| Thomas Gleixner | ba9a233 | 2006-06-29 02:24:55 -0700 | [diff] [blame] | 247 |  | 
| David Brownell | 15a647e | 2006-07-30 03:03:08 -0700 | [diff] [blame] | 248 | /* wakeup-capable irqs can be shared between drivers that | 
|  | 249 | * don't need to have the same sleep mode behaviors. | 
|  | 250 | */ | 
| Thomas Gleixner | ba9a233 | 2006-06-29 02:24:55 -0700 | [diff] [blame] | 251 | spin_lock_irqsave(&desc->lock, flags); | 
| David Brownell | 15a647e | 2006-07-30 03:03:08 -0700 | [diff] [blame] | 252 | if (on) { | 
| Uwe Kleine-König | 2db8732 | 2008-07-23 14:42:25 +0200 | [diff] [blame] | 253 | if (desc->wake_depth++ == 0) { | 
|  | 254 | ret = set_irq_wake_real(irq, on); | 
|  | 255 | if (ret) | 
|  | 256 | desc->wake_depth = 0; | 
|  | 257 | else | 
|  | 258 | desc->status |= IRQ_WAKEUP; | 
|  | 259 | } | 
| David Brownell | 15a647e | 2006-07-30 03:03:08 -0700 | [diff] [blame] | 260 | } else { | 
|  | 261 | if (desc->wake_depth == 0) { | 
| Arjan van de Ven | 7a2c477 | 2008-07-25 01:45:54 -0700 | [diff] [blame] | 262 | WARN(1, "Unbalanced IRQ %d wake disable\n", irq); | 
| Uwe Kleine-König | 2db8732 | 2008-07-23 14:42:25 +0200 | [diff] [blame] | 263 | } else if (--desc->wake_depth == 0) { | 
|  | 264 | ret = set_irq_wake_real(irq, on); | 
|  | 265 | if (ret) | 
|  | 266 | desc->wake_depth = 1; | 
|  | 267 | else | 
|  | 268 | desc->status &= ~IRQ_WAKEUP; | 
|  | 269 | } | 
| David Brownell | 15a647e | 2006-07-30 03:03:08 -0700 | [diff] [blame] | 270 | } | 
| Uwe Kleine-König | 2db8732 | 2008-07-23 14:42:25 +0200 | [diff] [blame] | 271 |  | 
| Thomas Gleixner | ba9a233 | 2006-06-29 02:24:55 -0700 | [diff] [blame] | 272 | spin_unlock_irqrestore(&desc->lock, flags); | 
|  | 273 | return ret; | 
|  | 274 | } | 
|  | 275 | EXPORT_SYMBOL(set_irq_wake); | 
|  | 276 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 277 | /* | 
|  | 278 | * Internal function that tells the architecture code whether a | 
|  | 279 | * particular irq has been exclusively allocated or is available | 
|  | 280 | * for driver use. | 
|  | 281 | */ | 
|  | 282 | int can_request_irq(unsigned int irq, unsigned long irqflags) | 
|  | 283 | { | 
|  | 284 | struct irqaction *action; | 
|  | 285 |  | 
| Thomas Gleixner | 6550c77 | 2006-06-29 02:24:49 -0700 | [diff] [blame] | 286 | if (irq >= NR_IRQS || irq_desc[irq].status & IRQ_NOREQUEST) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 287 | return 0; | 
|  | 288 |  | 
|  | 289 | action = irq_desc[irq].action; | 
|  | 290 | if (action) | 
| Thomas Gleixner | 3cca53b | 2006-07-01 19:29:31 -0700 | [diff] [blame] | 291 | if (irqflags & action->flags & IRQF_SHARED) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 292 | action = NULL; | 
|  | 293 |  | 
|  | 294 | return !action; | 
|  | 295 | } | 
|  | 296 |  | 
| Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 297 | void compat_irq_chip_set_default_handler(struct irq_desc *desc) | 
|  | 298 | { | 
|  | 299 | /* | 
|  | 300 | * If the architecture still has not overriden | 
|  | 301 | * the flow handler then zap the default. This | 
|  | 302 | * should catch incorrect flow-type setting. | 
|  | 303 | */ | 
|  | 304 | if (desc->handle_irq == &handle_bad_irq) | 
|  | 305 | desc->handle_irq = NULL; | 
|  | 306 | } | 
|  | 307 |  | 
| Uwe Kleine-König | 82736f4 | 2008-07-23 21:28:54 -0700 | [diff] [blame] | 308 | static int __irq_set_trigger(struct irq_chip *chip, unsigned int irq, | 
|  | 309 | unsigned long flags) | 
|  | 310 | { | 
|  | 311 | int ret; | 
|  | 312 |  | 
|  | 313 | if (!chip || !chip->set_type) { | 
|  | 314 | /* | 
|  | 315 | * IRQF_TRIGGER_* but the PIC does not support multiple | 
|  | 316 | * flow-types? | 
|  | 317 | */ | 
|  | 318 | pr_warning("No set_type function for IRQ %d (%s)\n", irq, | 
|  | 319 | chip ? (chip->name ? : "unknown") : "unknown"); | 
|  | 320 | return 0; | 
|  | 321 | } | 
|  | 322 |  | 
|  | 323 | ret = chip->set_type(irq, flags & IRQF_TRIGGER_MASK); | 
|  | 324 |  | 
|  | 325 | if (ret) | 
| David Brownell | c69ad71 | 2008-08-05 13:01:14 -0700 | [diff] [blame] | 326 | pr_err("setting trigger mode %d for irq %u failed (%pF)\n", | 
|  | 327 | (int)(flags & IRQF_TRIGGER_MASK), | 
| Uwe Kleine-König | 82736f4 | 2008-07-23 21:28:54 -0700 | [diff] [blame] | 328 | irq, chip->set_type); | 
|  | 329 |  | 
|  | 330 | return ret; | 
|  | 331 | } | 
|  | 332 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 333 | /* | 
|  | 334 | * Internal function to register an irqaction - typically used to | 
|  | 335 | * allocate special interrupts that are part of the architecture. | 
|  | 336 | */ | 
| Ingo Molnar | 06fcb0c | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 337 | int setup_irq(unsigned int irq, struct irqaction *new) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 338 | { | 
|  | 339 | struct irq_desc *desc = irq_desc + irq; | 
|  | 340 | struct irqaction *old, **p; | 
| Andrew Morton | 8b126b7 | 2006-11-14 02:03:23 -0800 | [diff] [blame] | 341 | const char *old_name = NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 342 | unsigned long flags; | 
|  | 343 | int shared = 0; | 
| Uwe Kleine-König | 82736f4 | 2008-07-23 21:28:54 -0700 | [diff] [blame] | 344 | int ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 345 |  | 
| Matthew Wilcox | c2b5a25 | 2005-11-03 07:51:18 -0700 | [diff] [blame] | 346 | if (irq >= NR_IRQS) | 
|  | 347 | return -EINVAL; | 
|  | 348 |  | 
| Ingo Molnar | f1c2662 | 2006-06-29 02:24:57 -0700 | [diff] [blame] | 349 | if (desc->chip == &no_irq_chip) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 350 | return -ENOSYS; | 
|  | 351 | /* | 
|  | 352 | * Some drivers like serial.c use request_irq() heavily, | 
|  | 353 | * so we have to be careful not to interfere with a | 
|  | 354 | * running system. | 
|  | 355 | */ | 
| Thomas Gleixner | 3cca53b | 2006-07-01 19:29:31 -0700 | [diff] [blame] | 356 | if (new->flags & IRQF_SAMPLE_RANDOM) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 357 | /* | 
|  | 358 | * This function might sleep, we want to call it first, | 
|  | 359 | * outside of the atomic block. | 
|  | 360 | * Yes, this might clear the entropy pool if the wrong | 
|  | 361 | * driver is attempted to be loaded, without actually | 
|  | 362 | * installing a new handler, but is this really a problem, | 
|  | 363 | * only the sysadmin is able to do this. | 
|  | 364 | */ | 
|  | 365 | rand_initialize_irq(irq); | 
|  | 366 | } | 
|  | 367 |  | 
|  | 368 | /* | 
|  | 369 | * The following block of code has to be executed atomically | 
|  | 370 | */ | 
| Ingo Molnar | 06fcb0c | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 371 | spin_lock_irqsave(&desc->lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 372 | p = &desc->action; | 
| Ingo Molnar | 06fcb0c | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 373 | old = *p; | 
|  | 374 | if (old) { | 
| Thomas Gleixner | e76de9f | 2006-06-29 02:24:56 -0700 | [diff] [blame] | 375 | /* | 
|  | 376 | * Can't share interrupts unless both agree to and are | 
|  | 377 | * the same type (level, edge, polarity). So both flag | 
| Thomas Gleixner | 3cca53b | 2006-07-01 19:29:31 -0700 | [diff] [blame] | 378 | * fields must have IRQF_SHARED set and the bits which | 
| Thomas Gleixner | e76de9f | 2006-06-29 02:24:56 -0700 | [diff] [blame] | 379 | * set the trigger type must match. | 
|  | 380 | */ | 
| Thomas Gleixner | 3cca53b | 2006-07-01 19:29:31 -0700 | [diff] [blame] | 381 | if (!((old->flags & new->flags) & IRQF_SHARED) || | 
| Andrew Morton | 8b126b7 | 2006-11-14 02:03:23 -0800 | [diff] [blame] | 382 | ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK)) { | 
|  | 383 | old_name = old->name; | 
| Dimitri Sivanich | f516342 | 2006-03-25 03:08:23 -0800 | [diff] [blame] | 384 | goto mismatch; | 
| Andrew Morton | 8b126b7 | 2006-11-14 02:03:23 -0800 | [diff] [blame] | 385 | } | 
| Dimitri Sivanich | f516342 | 2006-03-25 03:08:23 -0800 | [diff] [blame] | 386 |  | 
| Thomas Gleixner | 284c668 | 2006-07-03 02:20:32 +0200 | [diff] [blame] | 387 | #if defined(CONFIG_IRQ_PER_CPU) | 
| Dimitri Sivanich | f516342 | 2006-03-25 03:08:23 -0800 | [diff] [blame] | 388 | /* All handlers must agree on per-cpuness */ | 
| Thomas Gleixner | 3cca53b | 2006-07-01 19:29:31 -0700 | [diff] [blame] | 389 | if ((old->flags & IRQF_PERCPU) != | 
|  | 390 | (new->flags & IRQF_PERCPU)) | 
| Dimitri Sivanich | f516342 | 2006-03-25 03:08:23 -0800 | [diff] [blame] | 391 | goto mismatch; | 
|  | 392 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 393 |  | 
|  | 394 | /* add new interrupt at end of irq queue */ | 
|  | 395 | do { | 
|  | 396 | p = &old->next; | 
|  | 397 | old = *p; | 
|  | 398 | } while (old); | 
|  | 399 | shared = 1; | 
|  | 400 | } | 
|  | 401 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 402 | if (!shared) { | 
| Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 403 | irq_chip_set_defaults(desc->chip); | 
| Thomas Gleixner | e76de9f | 2006-06-29 02:24:56 -0700 | [diff] [blame] | 404 |  | 
| Uwe Kleine-König | 82736f4 | 2008-07-23 21:28:54 -0700 | [diff] [blame] | 405 | /* Setup the type (level, edge polarity) if configured: */ | 
|  | 406 | if (new->flags & IRQF_TRIGGER_MASK) { | 
|  | 407 | ret = __irq_set_trigger(desc->chip, irq, new->flags); | 
|  | 408 |  | 
|  | 409 | if (ret) { | 
|  | 410 | spin_unlock_irqrestore(&desc->lock, flags); | 
|  | 411 | return ret; | 
|  | 412 | } | 
|  | 413 | } else | 
|  | 414 | compat_irq_chip_set_default_handler(desc); | 
| Ahmed S. Darwish | f75d222 | 2007-05-08 00:27:55 -0700 | [diff] [blame] | 415 | #if defined(CONFIG_IRQ_PER_CPU) | 
|  | 416 | if (new->flags & IRQF_PERCPU) | 
|  | 417 | desc->status |= IRQ_PER_CPU; | 
|  | 418 | #endif | 
|  | 419 |  | 
| Thomas Gleixner | 94d39e1 | 2006-06-29 02:24:50 -0700 | [diff] [blame] | 420 | desc->status &= ~(IRQ_AUTODETECT | IRQ_WAITING | | 
| Thomas Gleixner | 1adb085 | 2008-04-28 17:01:56 +0200 | [diff] [blame] | 421 | IRQ_INPROGRESS | IRQ_SPURIOUS_DISABLED); | 
| Thomas Gleixner | 94d39e1 | 2006-06-29 02:24:50 -0700 | [diff] [blame] | 422 |  | 
|  | 423 | if (!(desc->status & IRQ_NOAUTOEN)) { | 
|  | 424 | desc->depth = 0; | 
|  | 425 | desc->status &= ~IRQ_DISABLED; | 
|  | 426 | if (desc->chip->startup) | 
|  | 427 | desc->chip->startup(irq); | 
|  | 428 | else | 
|  | 429 | desc->chip->enable(irq); | 
| Thomas Gleixner | e76de9f | 2006-06-29 02:24:56 -0700 | [diff] [blame] | 430 | } else | 
|  | 431 | /* Undo nested disables: */ | 
|  | 432 | desc->depth = 1; | 
| Max Krasnyansky | 1840475 | 2008-05-29 11:02:52 -0700 | [diff] [blame] | 433 |  | 
|  | 434 | /* Set default affinity mask once everything is setup */ | 
|  | 435 | irq_select_affinity(irq); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 436 | } | 
| Uwe Kleine-König | 82736f4 | 2008-07-23 21:28:54 -0700 | [diff] [blame] | 437 |  | 
|  | 438 | *p = new; | 
|  | 439 |  | 
|  | 440 | /* Exclude IRQ from balancing */ | 
|  | 441 | if (new->flags & IRQF_NOBALANCING) | 
|  | 442 | desc->status |= IRQ_NO_BALANCING; | 
|  | 443 |  | 
| Linus Torvalds | 8528b0f | 2007-01-23 14:16:31 -0800 | [diff] [blame] | 444 | /* Reset broken irq detection when installing new handler */ | 
|  | 445 | desc->irq_count = 0; | 
|  | 446 | desc->irqs_unhandled = 0; | 
| Thomas Gleixner | 1adb085 | 2008-04-28 17:01:56 +0200 | [diff] [blame] | 447 |  | 
|  | 448 | /* | 
|  | 449 | * Check whether we disabled the irq via the spurious handler | 
|  | 450 | * before. Reenable it and give it another chance. | 
|  | 451 | */ | 
|  | 452 | if (shared && (desc->status & IRQ_SPURIOUS_DISABLED)) { | 
|  | 453 | desc->status &= ~IRQ_SPURIOUS_DISABLED; | 
|  | 454 | __enable_irq(desc, irq); | 
|  | 455 | } | 
|  | 456 |  | 
| Ingo Molnar | 06fcb0c | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 457 | spin_unlock_irqrestore(&desc->lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 458 |  | 
|  | 459 | new->irq = irq; | 
|  | 460 | register_irq_proc(irq); | 
|  | 461 | new->dir = NULL; | 
|  | 462 | register_handler_proc(irq, new); | 
|  | 463 |  | 
|  | 464 | return 0; | 
| Dimitri Sivanich | f516342 | 2006-03-25 03:08:23 -0800 | [diff] [blame] | 465 |  | 
|  | 466 | mismatch: | 
| Alan Cox | 3f05044 | 2007-02-12 00:52:04 -0800 | [diff] [blame] | 467 | #ifdef CONFIG_DEBUG_SHIRQ | 
| Thomas Gleixner | 3cca53b | 2006-07-01 19:29:31 -0700 | [diff] [blame] | 468 | if (!(new->flags & IRQF_PROBE_SHARED)) { | 
| Bjorn Helgaas | e8c4b9d | 2006-07-01 04:35:45 -0700 | [diff] [blame] | 469 | printk(KERN_ERR "IRQ handler type mismatch for IRQ %d\n", irq); | 
| Andrew Morton | 8b126b7 | 2006-11-14 02:03:23 -0800 | [diff] [blame] | 470 | if (old_name) | 
|  | 471 | printk(KERN_ERR "current handler: %s\n", old_name); | 
| Andrew Morton | 13e87ec | 2006-04-27 18:39:18 -0700 | [diff] [blame] | 472 | dump_stack(); | 
|  | 473 | } | 
| Alan Cox | 3f05044 | 2007-02-12 00:52:04 -0800 | [diff] [blame] | 474 | #endif | 
| Andrew Morton | 8b126b7 | 2006-11-14 02:03:23 -0800 | [diff] [blame] | 475 | spin_unlock_irqrestore(&desc->lock, flags); | 
| Dimitri Sivanich | f516342 | 2006-03-25 03:08:23 -0800 | [diff] [blame] | 476 | return -EBUSY; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 477 | } | 
|  | 478 |  | 
|  | 479 | /** | 
|  | 480 | *	free_irq - free an interrupt | 
|  | 481 | *	@irq: Interrupt line to free | 
|  | 482 | *	@dev_id: Device identity to free | 
|  | 483 | * | 
|  | 484 | *	Remove an interrupt handler. The handler is removed and if the | 
|  | 485 | *	interrupt line is no longer in use by any driver it is disabled. | 
|  | 486 | *	On a shared IRQ the caller must ensure the interrupt is disabled | 
|  | 487 | *	on the card it drives before calling this function. The function | 
|  | 488 | *	does not return until any executing interrupts for this IRQ | 
|  | 489 | *	have completed. | 
|  | 490 | * | 
|  | 491 | *	This function must not be called from interrupt context. | 
|  | 492 | */ | 
|  | 493 | void free_irq(unsigned int irq, void *dev_id) | 
|  | 494 | { | 
|  | 495 | struct irq_desc *desc; | 
|  | 496 | struct irqaction **p; | 
|  | 497 | unsigned long flags; | 
|  | 498 |  | 
| Ingo Molnar | cd7b24b | 2006-03-26 01:36:54 -0800 | [diff] [blame] | 499 | WARN_ON(in_interrupt()); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 500 | if (irq >= NR_IRQS) | 
|  | 501 | return; | 
|  | 502 |  | 
|  | 503 | desc = irq_desc + irq; | 
| Ingo Molnar | 06fcb0c | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 504 | spin_lock_irqsave(&desc->lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 505 | p = &desc->action; | 
|  | 506 | for (;;) { | 
| Ingo Molnar | 06fcb0c | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 507 | struct irqaction *action = *p; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 508 |  | 
|  | 509 | if (action) { | 
|  | 510 | struct irqaction **pp = p; | 
|  | 511 |  | 
|  | 512 | p = &action->next; | 
|  | 513 | if (action->dev_id != dev_id) | 
|  | 514 | continue; | 
|  | 515 |  | 
|  | 516 | /* Found it - now remove it from the list of entries */ | 
|  | 517 | *pp = action->next; | 
| Paolo 'Blaisorblade' Giarrusso | dbce706 | 2005-06-21 17:16:19 -0700 | [diff] [blame] | 518 |  | 
| Paolo 'Blaisorblade' Giarrusso | b77d6ad | 2005-06-21 17:16:24 -0700 | [diff] [blame] | 519 | /* Currently used only by UML, might disappear one day.*/ | 
|  | 520 | #ifdef CONFIG_IRQ_RELEASE_METHOD | 
| Ingo Molnar | d1bef4e | 2006-06-29 02:24:36 -0700 | [diff] [blame] | 521 | if (desc->chip->release) | 
|  | 522 | desc->chip->release(irq, dev_id); | 
| Paolo 'Blaisorblade' Giarrusso | b77d6ad | 2005-06-21 17:16:24 -0700 | [diff] [blame] | 523 | #endif | 
| Paolo 'Blaisorblade' Giarrusso | dbce706 | 2005-06-21 17:16:19 -0700 | [diff] [blame] | 524 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 525 | if (!desc->action) { | 
|  | 526 | desc->status |= IRQ_DISABLED; | 
| Ingo Molnar | d1bef4e | 2006-06-29 02:24:36 -0700 | [diff] [blame] | 527 | if (desc->chip->shutdown) | 
|  | 528 | desc->chip->shutdown(irq); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 529 | else | 
| Ingo Molnar | d1bef4e | 2006-06-29 02:24:36 -0700 | [diff] [blame] | 530 | desc->chip->disable(irq); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 531 | } | 
| Ingo Molnar | 06fcb0c | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 532 | spin_unlock_irqrestore(&desc->lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 533 | unregister_handler_proc(irq, action); | 
|  | 534 |  | 
|  | 535 | /* Make sure it's not being used on another CPU */ | 
|  | 536 | synchronize_irq(irq); | 
| David Woodhouse | 1d99493 | 2007-10-16 23:26:29 -0700 | [diff] [blame] | 537 | #ifdef CONFIG_DEBUG_SHIRQ | 
|  | 538 | /* | 
|  | 539 | * It's a shared IRQ -- the driver ought to be | 
|  | 540 | * prepared for it to happen even now it's | 
|  | 541 | * being freed, so let's make sure....  We do | 
|  | 542 | * this after actually deregistering it, to | 
|  | 543 | * make sure that a 'real' IRQ doesn't run in | 
|  | 544 | * parallel with our fake | 
|  | 545 | */ | 
|  | 546 | if (action->flags & IRQF_SHARED) { | 
|  | 547 | local_irq_save(flags); | 
|  | 548 | action->handler(irq, dev_id); | 
|  | 549 | local_irq_restore(flags); | 
|  | 550 | } | 
|  | 551 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 552 | kfree(action); | 
|  | 553 | return; | 
|  | 554 | } | 
| Bjorn Helgaas | e8c4b9d | 2006-07-01 04:35:45 -0700 | [diff] [blame] | 555 | printk(KERN_ERR "Trying to free already-free IRQ %d\n", irq); | 
| Ingo Molnar | 70edcd7 | 2008-01-30 13:33:24 +0100 | [diff] [blame] | 556 | #ifdef CONFIG_DEBUG_SHIRQ | 
|  | 557 | dump_stack(); | 
|  | 558 | #endif | 
| Ingo Molnar | 06fcb0c | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 559 | spin_unlock_irqrestore(&desc->lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 560 | return; | 
|  | 561 | } | 
|  | 562 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 563 | EXPORT_SYMBOL(free_irq); | 
|  | 564 |  | 
|  | 565 | /** | 
|  | 566 | *	request_irq - allocate an interrupt line | 
|  | 567 | *	@irq: Interrupt line to allocate | 
|  | 568 | *	@handler: Function to be called when the IRQ occurs | 
|  | 569 | *	@irqflags: Interrupt type flags | 
|  | 570 | *	@devname: An ascii name for the claiming device | 
|  | 571 | *	@dev_id: A cookie passed back to the handler function | 
|  | 572 | * | 
|  | 573 | *	This call allocates interrupt resources and enables the | 
|  | 574 | *	interrupt line and IRQ handling. From the point this | 
|  | 575 | *	call is made your handler function may be invoked. Since | 
|  | 576 | *	your handler function must clear any interrupt the board | 
|  | 577 | *	raises, you must take care both to initialise your hardware | 
|  | 578 | *	and to set up the interrupt handler in the right order. | 
|  | 579 | * | 
|  | 580 | *	Dev_id must be globally unique. Normally the address of the | 
|  | 581 | *	device data structure is used as the cookie. Since the handler | 
|  | 582 | *	receives this value it makes sense to use it. | 
|  | 583 | * | 
|  | 584 | *	If your interrupt is shared you must pass a non NULL dev_id | 
|  | 585 | *	as this is required when freeing the interrupt. | 
|  | 586 | * | 
|  | 587 | *	Flags: | 
|  | 588 | * | 
| Thomas Gleixner | 3cca53b | 2006-07-01 19:29:31 -0700 | [diff] [blame] | 589 | *	IRQF_SHARED		Interrupt is shared | 
|  | 590 | *	IRQF_DISABLED	Disable local interrupts while processing | 
|  | 591 | *	IRQF_SAMPLE_RANDOM	The interrupt can be used for entropy | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 592 | * | 
|  | 593 | */ | 
| David Howells | da48279 | 2006-10-05 13:06:34 +0100 | [diff] [blame] | 594 | int request_irq(unsigned int irq, irq_handler_t handler, | 
| Ingo Molnar | 06fcb0c | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 595 | unsigned long irqflags, const char *devname, void *dev_id) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 596 | { | 
| Ingo Molnar | 06fcb0c | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 597 | struct irqaction *action; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 598 | int retval; | 
|  | 599 |  | 
| Ingo Molnar | fbb9ce9 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 600 | #ifdef CONFIG_LOCKDEP | 
|  | 601 | /* | 
|  | 602 | * Lockdep wants atomic interrupt handlers: | 
|  | 603 | */ | 
| Thomas Gleixner | 38515e9 | 2007-02-14 00:33:16 -0800 | [diff] [blame] | 604 | irqflags |= IRQF_DISABLED; | 
| Ingo Molnar | fbb9ce9 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 605 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 606 | /* | 
|  | 607 | * Sanity-check: shared interrupts must pass in a real dev-ID, | 
|  | 608 | * otherwise we'll have trouble later trying to figure out | 
|  | 609 | * which interrupt is which (messes up the interrupt freeing | 
|  | 610 | * logic etc). | 
|  | 611 | */ | 
| Thomas Gleixner | 3cca53b | 2006-07-01 19:29:31 -0700 | [diff] [blame] | 612 | if ((irqflags & IRQF_SHARED) && !dev_id) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 613 | return -EINVAL; | 
|  | 614 | if (irq >= NR_IRQS) | 
|  | 615 | return -EINVAL; | 
| Thomas Gleixner | 6550c77 | 2006-06-29 02:24:49 -0700 | [diff] [blame] | 616 | if (irq_desc[irq].status & IRQ_NOREQUEST) | 
|  | 617 | return -EINVAL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 618 | if (!handler) | 
|  | 619 | return -EINVAL; | 
|  | 620 |  | 
|  | 621 | action = kmalloc(sizeof(struct irqaction), GFP_ATOMIC); | 
|  | 622 | if (!action) | 
|  | 623 | return -ENOMEM; | 
|  | 624 |  | 
|  | 625 | action->handler = handler; | 
|  | 626 | action->flags = irqflags; | 
|  | 627 | cpus_clear(action->mask); | 
|  | 628 | action->name = devname; | 
|  | 629 | action->next = NULL; | 
|  | 630 | action->dev_id = dev_id; | 
|  | 631 |  | 
| David Woodhouse | a304e1b | 2007-02-12 00:52:00 -0800 | [diff] [blame] | 632 | #ifdef CONFIG_DEBUG_SHIRQ | 
|  | 633 | if (irqflags & IRQF_SHARED) { | 
|  | 634 | /* | 
|  | 635 | * It's a shared IRQ -- the driver ought to be prepared for it | 
|  | 636 | * to happen immediately, so let's make sure.... | 
|  | 637 | * We do this before actually registering it, to make sure that | 
|  | 638 | * a 'real' IRQ doesn't run in parallel with our fake | 
|  | 639 | */ | 
| Jarek Poplawski | 59845b1 | 2007-08-30 23:56:34 -0700 | [diff] [blame] | 640 | unsigned long flags; | 
| David Woodhouse | a304e1b | 2007-02-12 00:52:00 -0800 | [diff] [blame] | 641 |  | 
| Jarek Poplawski | 59845b1 | 2007-08-30 23:56:34 -0700 | [diff] [blame] | 642 | local_irq_save(flags); | 
|  | 643 | handler(irq, dev_id); | 
|  | 644 | local_irq_restore(flags); | 
| David Woodhouse | a304e1b | 2007-02-12 00:52:00 -0800 | [diff] [blame] | 645 | } | 
|  | 646 | #endif | 
|  | 647 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 648 | retval = setup_irq(irq, action); | 
|  | 649 | if (retval) | 
|  | 650 | kfree(action); | 
|  | 651 |  | 
|  | 652 | return retval; | 
|  | 653 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 654 | EXPORT_SYMBOL(request_irq); |