blob: 3d151fd762ade81487a1d5af4b63b5f02e80585b [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/kernel/irq/manage.c
3 *
Ingo Molnara34db9b2006-06-29 02:24:50 -07004 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
5 * Copyright (C) 2005-2006 Thomas Gleixner
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 *
7 * This file contains driver APIs to the irq subsystem.
8 */
9
10#include <linux/irq.h>
Thomas Gleixner3aa551c2009-03-23 18:28:15 +010011#include <linux/kthread.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/module.h>
13#include <linux/random.h>
14#include <linux/interrupt.h>
Robert P. J. Day1aeb2722008-04-29 00:59:25 -070015#include <linux/slab.h>
Thomas Gleixner3aa551c2009-03-23 18:28:15 +010016#include <linux/sched.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017
18#include "internals.h"
19
Thomas Gleixner8d32a302011-02-23 23:52:23 +000020#ifdef CONFIG_IRQ_FORCED_THREADING
21__read_mostly bool force_irqthreads;
22
23static int __init setup_forced_irqthreads(char *arg)
24{
25 force_irqthreads = true;
26 return 0;
27}
28early_param("threadirqs", setup_forced_irqthreads);
29#endif
30
Linus Torvalds1da177e2005-04-16 15:20:36 -070031/**
32 * synchronize_irq - wait for pending IRQ handlers (on other CPUs)
Randy Dunlap1e5d5332005-11-07 01:01:06 -080033 * @irq: interrupt number to wait for
Linus Torvalds1da177e2005-04-16 15:20:36 -070034 *
35 * This function waits for any pending IRQ handlers for this interrupt
36 * to complete before returning. If you use this function while
37 * holding a resource the IRQ handler may need you will deadlock.
38 *
39 * This function may be called - with care - from IRQ context.
40 */
41void synchronize_irq(unsigned int irq)
42{
Yinghai Lucb5bc832008-08-19 20:50:17 -070043 struct irq_desc *desc = irq_to_desc(irq);
Thomas Gleixner009b4c32011-02-07 21:48:49 +010044 unsigned int state;
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Yinghai Lu7d94f7c2008-08-19 20:50:14 -070046 if (!desc)
Matthew Wilcoxc2b5a252005-11-03 07:51:18 -070047 return;
48
Herbert Xua98ce5c2007-10-23 11:26:25 +080049 do {
50 unsigned long flags;
51
52 /*
53 * Wait until we're out of the critical section. This might
54 * give the wrong answer due to the lack of memory barriers.
55 */
Thomas Gleixner009b4c32011-02-07 21:48:49 +010056 while (desc->istate & IRQS_INPROGRESS)
Herbert Xua98ce5c2007-10-23 11:26:25 +080057 cpu_relax();
58
59 /* Ok, that indicated we're done: double-check carefully. */
Thomas Gleixner239007b2009-11-17 16:46:45 +010060 raw_spin_lock_irqsave(&desc->lock, flags);
Thomas Gleixner009b4c32011-02-07 21:48:49 +010061 state = desc->istate;
Thomas Gleixner239007b2009-11-17 16:46:45 +010062 raw_spin_unlock_irqrestore(&desc->lock, flags);
Herbert Xua98ce5c2007-10-23 11:26:25 +080063
64 /* Oops, that failed? */
Thomas Gleixner009b4c32011-02-07 21:48:49 +010065 } while (state & IRQS_INPROGRESS);
Thomas Gleixner3aa551c2009-03-23 18:28:15 +010066
67 /*
68 * We made sure that no hardirq handler is running. Now verify
69 * that no threaded handlers are active.
70 */
71 wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active));
Linus Torvalds1da177e2005-04-16 15:20:36 -070072}
Linus Torvalds1da177e2005-04-16 15:20:36 -070073EXPORT_SYMBOL(synchronize_irq);
74
Thomas Gleixner3aa551c2009-03-23 18:28:15 +010075#ifdef CONFIG_SMP
76cpumask_var_t irq_default_affinity;
77
Thomas Gleixner771ee3b2007-02-16 01:27:25 -080078/**
79 * irq_can_set_affinity - Check if the affinity of a given irq can be set
80 * @irq: Interrupt to check
81 *
82 */
83int irq_can_set_affinity(unsigned int irq)
84{
Yinghai Lu08678b02008-08-19 20:50:05 -070085 struct irq_desc *desc = irq_to_desc(irq);
Thomas Gleixner771ee3b2007-02-16 01:27:25 -080086
Thomas Gleixnerbce43032011-02-10 22:37:41 +010087 if (!desc || !irqd_can_balance(&desc->irq_data) ||
88 !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
Thomas Gleixner771ee3b2007-02-16 01:27:25 -080089 return 0;
90
91 return 1;
92}
93
Thomas Gleixner591d2fb2009-07-21 11:09:39 +020094/**
95 * irq_set_thread_affinity - Notify irq threads to adjust affinity
96 * @desc: irq descriptor which has affitnity changed
97 *
98 * We just set IRQTF_AFFINITY and delegate the affinity setting
99 * to the interrupt thread itself. We can not call
100 * set_cpus_allowed_ptr() here as we hold desc->lock and this
101 * code can be called from hard interrupt context.
102 */
103void irq_set_thread_affinity(struct irq_desc *desc)
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100104{
105 struct irqaction *action = desc->action;
106
107 while (action) {
108 if (action->thread)
Thomas Gleixner591d2fb2009-07-21 11:09:39 +0200109 set_bit(IRQTF_AFFINITY, &action->thread_flags);
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100110 action = action->next;
111 }
112}
113
Thomas Gleixner1fa46f12011-02-07 16:46:58 +0100114#ifdef CONFIG_GENERIC_PENDING_IRQ
115static inline bool irq_can_move_pcntxt(struct irq_desc *desc)
116{
Thomas Gleixner1ccb4e62011-02-09 14:44:17 +0100117 return irq_settings_can_move_pcntxt(desc);
Thomas Gleixner1fa46f12011-02-07 16:46:58 +0100118}
119static inline bool irq_move_pending(struct irq_desc *desc)
120{
Thomas Gleixnerf230b6d2011-02-05 15:20:04 +0100121 return irqd_is_setaffinity_pending(&desc->irq_data);
Thomas Gleixner1fa46f12011-02-07 16:46:58 +0100122}
123static inline void
124irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask)
125{
126 cpumask_copy(desc->pending_mask, mask);
127}
128static inline void
129irq_get_pending(struct cpumask *mask, struct irq_desc *desc)
130{
131 cpumask_copy(mask, desc->pending_mask);
132}
133#else
134static inline bool irq_can_move_pcntxt(struct irq_desc *desc) { return true; }
135static inline bool irq_move_pending(struct irq_desc *desc) { return false; }
136static inline void
137irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { }
138static inline void
139irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { }
140#endif
141
David Daneyc2d0c552011-03-25 12:38:50 -0700142int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
143{
144 struct irq_chip *chip = irq_data_get_irq_chip(data);
145 struct irq_desc *desc = irq_data_to_desc(data);
146 int ret = 0;
147
148 if (!chip || !chip->irq_set_affinity)
149 return -EINVAL;
150
151 if (irqd_can_move_in_process_context(data)) {
152 ret = chip->irq_set_affinity(data, mask, false);
153 switch (ret) {
154 case IRQ_SET_MASK_OK:
155 cpumask_copy(data->affinity, mask);
156 case IRQ_SET_MASK_OK_NOCOPY:
157 irq_set_thread_affinity(desc);
158 ret = 0;
159 }
160 } else {
161 irqd_set_move_pending(data);
162 irq_copy_pending(desc, mask);
163 }
164
165 if (desc->affinity_notify) {
166 kref_get(&desc->affinity_notify->kref);
167 schedule_work(&desc->affinity_notify->work);
168 }
169 irq_compat_set_affinity(desc);
170 irqd_set(data, IRQD_AFFINITY_SET);
171
172 return ret;
173}
174
Thomas Gleixner771ee3b2007-02-16 01:27:25 -0800175/**
176 * irq_set_affinity - Set the irq affinity of a given irq
177 * @irq: Interrupt to set affinity
178 * @cpumask: cpumask
179 *
180 */
Thomas Gleixner1fa46f12011-02-07 16:46:58 +0100181int irq_set_affinity(unsigned int irq, const struct cpumask *mask)
Thomas Gleixner771ee3b2007-02-16 01:27:25 -0800182{
Yinghai Lu08678b02008-08-19 20:50:05 -0700183 struct irq_desc *desc = irq_to_desc(irq);
Thomas Gleixnerf6d87f42008-11-07 13:18:30 +0100184 unsigned long flags;
David Daneyc2d0c552011-03-25 12:38:50 -0700185 int ret;
Thomas Gleixner771ee3b2007-02-16 01:27:25 -0800186
David Daneyc2d0c552011-03-25 12:38:50 -0700187 if (!desc)
Thomas Gleixner771ee3b2007-02-16 01:27:25 -0800188 return -EINVAL;
189
Thomas Gleixner239007b2009-11-17 16:46:45 +0100190 raw_spin_lock_irqsave(&desc->lock, flags);
David Daneyc2d0c552011-03-25 12:38:50 -0700191 ret = __irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask);
Thomas Gleixner239007b2009-11-17 16:46:45 +0100192 raw_spin_unlock_irqrestore(&desc->lock, flags);
Thomas Gleixner1fa46f12011-02-07 16:46:58 +0100193 return ret;
Thomas Gleixner771ee3b2007-02-16 01:27:25 -0800194}
195
Peter P Waskiewicz Jre7a297b2010-04-30 14:44:50 -0700196int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
197{
Peter P Waskiewicz Jre7a297b2010-04-30 14:44:50 -0700198 unsigned long flags;
Thomas Gleixner02725e72011-02-12 10:37:36 +0100199 struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
Peter P Waskiewicz Jre7a297b2010-04-30 14:44:50 -0700200
201 if (!desc)
202 return -EINVAL;
Peter P Waskiewicz Jre7a297b2010-04-30 14:44:50 -0700203 desc->affinity_hint = m;
Thomas Gleixner02725e72011-02-12 10:37:36 +0100204 irq_put_desc_unlock(desc, flags);
Peter P Waskiewicz Jre7a297b2010-04-30 14:44:50 -0700205 return 0;
206}
207EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
208
Ben Hutchingscd7eab42011-01-19 21:01:44 +0000209static void irq_affinity_notify(struct work_struct *work)
210{
211 struct irq_affinity_notify *notify =
212 container_of(work, struct irq_affinity_notify, work);
213 struct irq_desc *desc = irq_to_desc(notify->irq);
214 cpumask_var_t cpumask;
215 unsigned long flags;
216
Thomas Gleixner1fa46f12011-02-07 16:46:58 +0100217 if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
Ben Hutchingscd7eab42011-01-19 21:01:44 +0000218 goto out;
219
220 raw_spin_lock_irqsave(&desc->lock, flags);
Thomas Gleixner1fa46f12011-02-07 16:46:58 +0100221 if (irq_move_pending(desc))
222 irq_get_pending(cpumask, desc);
Ben Hutchingscd7eab42011-01-19 21:01:44 +0000223 else
Thomas Gleixner1fb0ef32011-01-31 08:57:41 +0100224 cpumask_copy(cpumask, desc->irq_data.affinity);
Ben Hutchingscd7eab42011-01-19 21:01:44 +0000225 raw_spin_unlock_irqrestore(&desc->lock, flags);
226
227 notify->notify(notify, cpumask);
228
229 free_cpumask_var(cpumask);
230out:
231 kref_put(&notify->kref, notify->release);
232}
233
234/**
235 * irq_set_affinity_notifier - control notification of IRQ affinity changes
236 * @irq: Interrupt for which to enable/disable notification
237 * @notify: Context for notification, or %NULL to disable
238 * notification. Function pointers must be initialised;
239 * the other fields will be initialised by this function.
240 *
241 * Must be called in process context. Notification may only be enabled
242 * after the IRQ is allocated and must be disabled before the IRQ is
243 * freed using free_irq().
244 */
245int
246irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
247{
248 struct irq_desc *desc = irq_to_desc(irq);
249 struct irq_affinity_notify *old_notify;
250 unsigned long flags;
251
252 /* The release function is promised process context */
253 might_sleep();
254
255 if (!desc)
256 return -EINVAL;
257
258 /* Complete initialisation of *notify */
259 if (notify) {
260 notify->irq = irq;
261 kref_init(&notify->kref);
262 INIT_WORK(&notify->work, irq_affinity_notify);
263 }
264
265 raw_spin_lock_irqsave(&desc->lock, flags);
266 old_notify = desc->affinity_notify;
267 desc->affinity_notify = notify;
268 raw_spin_unlock_irqrestore(&desc->lock, flags);
269
270 if (old_notify)
271 kref_put(&old_notify->kref, old_notify->release);
272
273 return 0;
274}
275EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
276
Max Krasnyansky18404752008-05-29 11:02:52 -0700277#ifndef CONFIG_AUTO_IRQ_AFFINITY
278/*
279 * Generic version of the affinity autoselector.
280 */
Thomas Gleixner3b8249e2011-02-07 16:02:20 +0100281static int
282setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
Max Krasnyansky18404752008-05-29 11:02:52 -0700283{
Thomas Gleixner35e857c2011-02-10 12:20:23 +0100284 struct irq_chip *chip = irq_desc_get_chip(desc);
Thomas Gleixner569bda82011-02-07 17:05:08 +0100285 struct cpumask *set = irq_default_affinity;
Thomas Gleixner3b8249e2011-02-07 16:02:20 +0100286 int ret;
Thomas Gleixner569bda82011-02-07 17:05:08 +0100287
Thomas Gleixnerb0082072011-02-07 17:30:50 +0100288 /* Excludes PER_CPU and NO_BALANCE interrupts */
Max Krasnyansky18404752008-05-29 11:02:52 -0700289 if (!irq_can_set_affinity(irq))
290 return 0;
291
Thomas Gleixnerf6d87f42008-11-07 13:18:30 +0100292 /*
293 * Preserve an userspace affinity setup, but make sure that
294 * one of the targets is online.
295 */
Thomas Gleixner2bdd1052011-02-08 17:22:00 +0100296 if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
Thomas Gleixner569bda82011-02-07 17:05:08 +0100297 if (cpumask_intersects(desc->irq_data.affinity,
298 cpu_online_mask))
299 set = desc->irq_data.affinity;
Thomas Gleixner2bdd1052011-02-08 17:22:00 +0100300 else {
301 irq_compat_clr_affinity(desc);
302 irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
303 }
Thomas Gleixnerf6d87f42008-11-07 13:18:30 +0100304 }
Max Krasnyansky18404752008-05-29 11:02:52 -0700305
Thomas Gleixner3b8249e2011-02-07 16:02:20 +0100306 cpumask_and(mask, cpu_online_mask, set);
307 ret = chip->irq_set_affinity(&desc->irq_data, mask, false);
308 switch (ret) {
309 case IRQ_SET_MASK_OK:
310 cpumask_copy(desc->irq_data.affinity, mask);
311 case IRQ_SET_MASK_OK_NOCOPY:
312 irq_set_thread_affinity(desc);
313 }
Max Krasnyansky18404752008-05-29 11:02:52 -0700314 return 0;
315}
Thomas Gleixnerf6d87f42008-11-07 13:18:30 +0100316#else
Thomas Gleixner3b8249e2011-02-07 16:02:20 +0100317static inline int
318setup_affinity(unsigned int irq, struct irq_desc *d, struct cpumask *mask)
Thomas Gleixnerf6d87f42008-11-07 13:18:30 +0100319{
320 return irq_select_affinity(irq);
321}
Max Krasnyansky18404752008-05-29 11:02:52 -0700322#endif
323
Thomas Gleixnerf6d87f42008-11-07 13:18:30 +0100324/*
325 * Called when affinity is set via /proc/irq
326 */
Thomas Gleixner3b8249e2011-02-07 16:02:20 +0100327int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask)
Thomas Gleixnerf6d87f42008-11-07 13:18:30 +0100328{
329 struct irq_desc *desc = irq_to_desc(irq);
330 unsigned long flags;
331 int ret;
332
Thomas Gleixner239007b2009-11-17 16:46:45 +0100333 raw_spin_lock_irqsave(&desc->lock, flags);
Thomas Gleixner3b8249e2011-02-07 16:02:20 +0100334 ret = setup_affinity(irq, desc, mask);
Thomas Gleixner239007b2009-11-17 16:46:45 +0100335 raw_spin_unlock_irqrestore(&desc->lock, flags);
Thomas Gleixnerf6d87f42008-11-07 13:18:30 +0100336 return ret;
337}
338
339#else
Thomas Gleixner3b8249e2011-02-07 16:02:20 +0100340static inline int
341setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
Thomas Gleixnerf6d87f42008-11-07 13:18:30 +0100342{
343 return 0;
344}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345#endif
346
Rafael J. Wysocki0a0c5162009-03-16 22:33:49 +0100347void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend)
348{
349 if (suspend) {
Ian Campbell685fd0b42010-07-29 11:16:32 +0100350 if (!desc->action || (desc->action->flags & IRQF_NO_SUSPEND))
Rafael J. Wysocki0a0c5162009-03-16 22:33:49 +0100351 return;
Thomas Gleixnerc531e832011-02-08 12:44:58 +0100352 desc->istate |= IRQS_SUSPENDED;
Rafael J. Wysocki0a0c5162009-03-16 22:33:49 +0100353 }
354
Thomas Gleixner3aae9942011-02-04 10:17:52 +0100355 if (!desc->depth++)
Thomas Gleixner87923472011-02-03 12:27:44 +0100356 irq_disable(desc);
Rafael J. Wysocki0a0c5162009-03-16 22:33:49 +0100357}
358
Thomas Gleixner02725e72011-02-12 10:37:36 +0100359static int __disable_irq_nosync(unsigned int irq)
360{
361 unsigned long flags;
362 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
363
364 if (!desc)
365 return -EINVAL;
366 __disable_irq(desc, irq, false);
367 irq_put_desc_busunlock(desc, flags);
368 return 0;
369}
370
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371/**
372 * disable_irq_nosync - disable an irq without waiting
373 * @irq: Interrupt to disable
374 *
375 * Disable the selected interrupt line. Disables and Enables are
376 * nested.
377 * Unlike disable_irq(), this function does not ensure existing
378 * instances of the IRQ handler have completed before returning.
379 *
380 * This function may be called from IRQ context.
381 */
382void disable_irq_nosync(unsigned int irq)
383{
Thomas Gleixner02725e72011-02-12 10:37:36 +0100384 __disable_irq_nosync(irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386EXPORT_SYMBOL(disable_irq_nosync);
387
388/**
389 * disable_irq - disable an irq and wait for completion
390 * @irq: Interrupt to disable
391 *
392 * Disable the selected interrupt line. Enables and Disables are
393 * nested.
394 * This function waits for any pending IRQ handlers for this interrupt
395 * to complete before returning. If you use this function while
396 * holding a resource the IRQ handler may need you will deadlock.
397 *
398 * This function may be called - with care - from IRQ context.
399 */
400void disable_irq(unsigned int irq)
401{
Thomas Gleixner02725e72011-02-12 10:37:36 +0100402 if (!__disable_irq_nosync(irq))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403 synchronize_irq(irq);
404}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405EXPORT_SYMBOL(disable_irq);
406
Rafael J. Wysocki0a0c5162009-03-16 22:33:49 +0100407void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume)
Thomas Gleixner1adb0852008-04-28 17:01:56 +0200408{
Thomas Gleixnerdc5f2192011-02-04 13:19:20 +0100409 if (resume) {
Thomas Gleixnerc531e832011-02-08 12:44:58 +0100410 if (!(desc->istate & IRQS_SUSPENDED)) {
Thomas Gleixnerdc5f2192011-02-04 13:19:20 +0100411 if (!desc->action)
412 return;
413 if (!(desc->action->flags & IRQF_FORCE_RESUME))
414 return;
415 /* Pretend that it got disabled ! */
416 desc->depth++;
417 }
Thomas Gleixnerc531e832011-02-08 12:44:58 +0100418 desc->istate &= ~IRQS_SUSPENDED;
Thomas Gleixnerdc5f2192011-02-04 13:19:20 +0100419 }
Rafael J. Wysocki0a0c5162009-03-16 22:33:49 +0100420
Thomas Gleixner1adb0852008-04-28 17:01:56 +0200421 switch (desc->depth) {
422 case 0:
Rafael J. Wysocki0a0c5162009-03-16 22:33:49 +0100423 err_out:
Arjan van de Venb8c512f2008-07-25 19:45:36 -0700424 WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq);
Thomas Gleixner1adb0852008-04-28 17:01:56 +0200425 break;
426 case 1: {
Thomas Gleixnerc531e832011-02-08 12:44:58 +0100427 if (desc->istate & IRQS_SUSPENDED)
Rafael J. Wysocki0a0c5162009-03-16 22:33:49 +0100428 goto err_out;
Thomas Gleixner1adb0852008-04-28 17:01:56 +0200429 /* Prevent probing on this irq: */
Thomas Gleixner1ccb4e62011-02-09 14:44:17 +0100430 irq_settings_set_noprobe(desc);
Thomas Gleixner3aae9942011-02-04 10:17:52 +0100431 irq_enable(desc);
Thomas Gleixner1adb0852008-04-28 17:01:56 +0200432 check_irq_resend(desc, irq);
433 /* fall-through */
434 }
435 default:
436 desc->depth--;
437 }
438}
439
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440/**
441 * enable_irq - enable handling of an irq
442 * @irq: Interrupt to enable
443 *
444 * Undoes the effect of one call to disable_irq(). If this
445 * matches the last disable, processing of interrupts on this
446 * IRQ line is re-enabled.
447 *
Thomas Gleixner70aedd22009-08-13 12:17:48 +0200448 * This function may be called from IRQ context only when
Thomas Gleixner6b8ff312010-10-01 12:58:38 +0200449 * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450 */
451void enable_irq(unsigned int irq)
452{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453 unsigned long flags;
Thomas Gleixner02725e72011-02-12 10:37:36 +0100454 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455
Yinghai Lu7d94f7c2008-08-19 20:50:14 -0700456 if (!desc)
Matthew Wilcoxc2b5a252005-11-03 07:51:18 -0700457 return;
Thomas Gleixner50f7c032011-02-03 13:23:54 +0100458 if (WARN(!desc->irq_data.chip,
459 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
Thomas Gleixner02725e72011-02-12 10:37:36 +0100460 goto out;
Thomas Gleixner2656c362010-10-22 14:47:57 +0200461
Rafael J. Wysocki0a0c5162009-03-16 22:33:49 +0100462 __enable_irq(desc, irq, false);
Thomas Gleixner02725e72011-02-12 10:37:36 +0100463out:
464 irq_put_desc_busunlock(desc, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466EXPORT_SYMBOL(enable_irq);
467
David Brownell0c5d1eb2008-10-01 14:46:18 -0700468static int set_irq_wake_real(unsigned int irq, unsigned int on)
Uwe Kleine-König2db87322008-07-23 14:42:25 +0200469{
Yinghai Lu08678b02008-08-19 20:50:05 -0700470 struct irq_desc *desc = irq_to_desc(irq);
Uwe Kleine-König2db87322008-07-23 14:42:25 +0200471 int ret = -ENXIO;
472
Thomas Gleixner2f7e99b2010-09-27 12:45:50 +0000473 if (desc->irq_data.chip->irq_set_wake)
474 ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
Uwe Kleine-König2db87322008-07-23 14:42:25 +0200475
476 return ret;
477}
478
Thomas Gleixnerba9a2332006-06-29 02:24:55 -0700479/**
Thomas Gleixnera0cd9ca2011-02-10 11:36:33 +0100480 * irq_set_irq_wake - control irq power management wakeup
Thomas Gleixnerba9a2332006-06-29 02:24:55 -0700481 * @irq: interrupt to control
482 * @on: enable/disable power management wakeup
483 *
David Brownell15a647e2006-07-30 03:03:08 -0700484 * Enable/disable power management wakeup mode, which is
485 * disabled by default. Enables and disables must match,
486 * just as they match for non-wakeup mode support.
487 *
488 * Wakeup mode lets this IRQ wake the system from sleep
489 * states like "suspend to RAM".
Thomas Gleixnerba9a2332006-06-29 02:24:55 -0700490 */
Thomas Gleixnera0cd9ca2011-02-10 11:36:33 +0100491int irq_set_irq_wake(unsigned int irq, unsigned int on)
Thomas Gleixnerba9a2332006-06-29 02:24:55 -0700492{
Thomas Gleixnerba9a2332006-06-29 02:24:55 -0700493 unsigned long flags;
Thomas Gleixner02725e72011-02-12 10:37:36 +0100494 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
Uwe Kleine-König2db87322008-07-23 14:42:25 +0200495 int ret = 0;
Thomas Gleixnerba9a2332006-06-29 02:24:55 -0700496
David Brownell15a647e2006-07-30 03:03:08 -0700497 /* wakeup-capable irqs can be shared between drivers that
498 * don't need to have the same sleep mode behaviors.
499 */
David Brownell15a647e2006-07-30 03:03:08 -0700500 if (on) {
Uwe Kleine-König2db87322008-07-23 14:42:25 +0200501 if (desc->wake_depth++ == 0) {
502 ret = set_irq_wake_real(irq, on);
503 if (ret)
504 desc->wake_depth = 0;
505 else
Thomas Gleixner7f942262011-02-10 19:46:26 +0100506 irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
Uwe Kleine-König2db87322008-07-23 14:42:25 +0200507 }
David Brownell15a647e2006-07-30 03:03:08 -0700508 } else {
509 if (desc->wake_depth == 0) {
Arjan van de Ven7a2c4772008-07-25 01:45:54 -0700510 WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
Uwe Kleine-König2db87322008-07-23 14:42:25 +0200511 } else if (--desc->wake_depth == 0) {
512 ret = set_irq_wake_real(irq, on);
513 if (ret)
514 desc->wake_depth = 1;
515 else
Thomas Gleixner7f942262011-02-10 19:46:26 +0100516 irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
Uwe Kleine-König2db87322008-07-23 14:42:25 +0200517 }
David Brownell15a647e2006-07-30 03:03:08 -0700518 }
Thomas Gleixner02725e72011-02-12 10:37:36 +0100519 irq_put_desc_busunlock(desc, flags);
Thomas Gleixnerba9a2332006-06-29 02:24:55 -0700520 return ret;
521}
Thomas Gleixnera0cd9ca2011-02-10 11:36:33 +0100522EXPORT_SYMBOL(irq_set_irq_wake);
Thomas Gleixnerba9a2332006-06-29 02:24:55 -0700523
Linus Torvalds1da177e2005-04-16 15:20:36 -0700524/*
525 * Internal function that tells the architecture code whether a
526 * particular irq has been exclusively allocated or is available
527 * for driver use.
528 */
529int can_request_irq(unsigned int irq, unsigned long irqflags)
530{
Thomas Gleixnercc8c3b72010-03-23 22:40:53 +0100531 unsigned long flags;
Thomas Gleixner02725e72011-02-12 10:37:36 +0100532 struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
533 int canrequest = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534
Yinghai Lu7d94f7c2008-08-19 20:50:14 -0700535 if (!desc)
536 return 0;
537
Thomas Gleixner02725e72011-02-12 10:37:36 +0100538 if (irq_settings_can_request(desc)) {
539 if (desc->action)
540 if (irqflags & desc->action->flags & IRQF_SHARED)
541 canrequest =1;
542 }
543 irq_put_desc_unlock(desc, flags);
544 return canrequest;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545}
546
David Brownell0c5d1eb2008-10-01 14:46:18 -0700547int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
Thomas Gleixnerb2ba2c32010-09-27 12:45:47 +0000548 unsigned long flags)
Uwe Kleine-König82736f42008-07-23 21:28:54 -0700549{
Thomas Gleixner6b8ff312010-10-01 12:58:38 +0200550 struct irq_chip *chip = desc->irq_data.chip;
Thomas Gleixnerd4d5e082011-02-10 13:16:14 +0100551 int ret, unmask = 0;
Uwe Kleine-König82736f42008-07-23 21:28:54 -0700552
Thomas Gleixnerb2ba2c32010-09-27 12:45:47 +0000553 if (!chip || !chip->irq_set_type) {
Uwe Kleine-König82736f42008-07-23 21:28:54 -0700554 /*
555 * IRQF_TRIGGER_* but the PIC does not support multiple
556 * flow-types?
557 */
Mark Nelson3ff68a62008-11-13 21:37:41 +1100558 pr_debug("No set_type function for IRQ %d (%s)\n", irq,
Uwe Kleine-König82736f42008-07-23 21:28:54 -0700559 chip ? (chip->name ? : "unknown") : "unknown");
560 return 0;
561 }
562
Thomas Gleixner876dbd42011-02-08 17:28:12 +0100563 flags &= IRQ_TYPE_SENSE_MASK;
Thomas Gleixnerd4d5e082011-02-10 13:16:14 +0100564
565 if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
566 if (!(desc->istate & IRQS_MASKED))
567 mask_irq(desc);
568 if (!(desc->istate & IRQS_DISABLED))
569 unmask = 1;
570 }
571
David Brownellf2b662d2008-12-01 14:31:38 -0800572 /* caller masked out all except trigger mode flags */
Thomas Gleixnerb2ba2c32010-09-27 12:45:47 +0000573 ret = chip->irq_set_type(&desc->irq_data, flags);
Uwe Kleine-König82736f42008-07-23 21:28:54 -0700574
Thomas Gleixner876dbd42011-02-08 17:28:12 +0100575 switch (ret) {
576 case IRQ_SET_MASK_OK:
577 irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
578 irqd_set(&desc->irq_data, flags);
579
580 case IRQ_SET_MASK_OK_NOCOPY:
581 flags = irqd_get_trigger_type(&desc->irq_data);
582 irq_settings_set_trigger_mask(desc, flags);
583 irqd_clear(&desc->irq_data, IRQD_LEVEL);
584 irq_settings_clr_level(desc);
585 if (flags & IRQ_TYPE_LEVEL_MASK) {
586 irq_settings_set_level(desc);
587 irqd_set(&desc->irq_data, IRQD_LEVEL);
588 }
Thomas Gleixner46732472010-06-07 17:53:51 +0200589
Thomas Gleixner6b8ff312010-10-01 12:58:38 +0200590 if (chip != desc->irq_data.chip)
591 irq_chip_set_defaults(desc->irq_data.chip);
Thomas Gleixnerd4d5e082011-02-10 13:16:14 +0100592 ret = 0;
Thomas Gleixner8fff39e2011-02-21 14:19:42 +0100593 break;
Thomas Gleixner876dbd42011-02-08 17:28:12 +0100594 default:
595 pr_err("setting trigger mode %lu for irq %u failed (%pF)\n",
596 flags, irq, chip->irq_set_type);
David Brownell0c5d1eb2008-10-01 14:46:18 -0700597 }
Thomas Gleixnerd4d5e082011-02-10 13:16:14 +0100598 if (unmask)
599 unmask_irq(desc);
Uwe Kleine-König82736f42008-07-23 21:28:54 -0700600 return ret;
601}
602
Thomas Gleixnerb25c3402009-08-13 12:17:22 +0200603/*
604 * Default primary interrupt handler for threaded interrupts. Is
605 * assigned as primary handler when request_threaded_irq is called
606 * with handler == NULL. Useful for oneshot interrupts.
607 */
608static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
609{
610 return IRQ_WAKE_THREAD;
611}
612
Thomas Gleixner399b5da2009-08-13 13:21:38 +0200613/*
614 * Primary handler for nested threaded interrupts. Should never be
615 * called.
616 */
617static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
618{
619 WARN(1, "Primary handler called for nested irq %d\n", irq);
620 return IRQ_NONE;
621}
622
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100623static int irq_wait_for_interrupt(struct irqaction *action)
624{
625 while (!kthread_should_stop()) {
626 set_current_state(TASK_INTERRUPTIBLE);
Thomas Gleixnerf48fe812009-03-24 11:46:22 +0100627
628 if (test_and_clear_bit(IRQTF_RUNTHREAD,
629 &action->thread_flags)) {
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100630 __set_current_state(TASK_RUNNING);
631 return 0;
Thomas Gleixnerf48fe812009-03-24 11:46:22 +0100632 }
633 schedule();
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100634 }
635 return -1;
636}
637
Thomas Gleixnerb25c3402009-08-13 12:17:22 +0200638/*
639 * Oneshot interrupts keep the irq line masked until the threaded
640 * handler finished. unmask if the interrupt has not been disabled and
641 * is marked MASKED.
642 */
Thomas Gleixnerb5faba22011-02-23 23:52:13 +0000643static void irq_finalize_oneshot(struct irq_desc *desc,
644 struct irqaction *action, bool force)
Thomas Gleixnerb25c3402009-08-13 12:17:22 +0200645{
Thomas Gleixnerb5faba22011-02-23 23:52:13 +0000646 if (!(desc->istate & IRQS_ONESHOT))
647 return;
Thomas Gleixner0b1adaa2010-03-09 19:45:54 +0100648again:
Thomas Gleixner3876ec92010-09-27 12:44:35 +0000649 chip_bus_lock(desc);
Thomas Gleixner239007b2009-11-17 16:46:45 +0100650 raw_spin_lock_irq(&desc->lock);
Thomas Gleixner0b1adaa2010-03-09 19:45:54 +0100651
652 /*
653 * Implausible though it may be we need to protect us against
654 * the following scenario:
655 *
656 * The thread is faster done than the hard interrupt handler
657 * on the other CPU. If we unmask the irq line then the
658 * interrupt can come in again and masks the line, leaves due
Thomas Gleixner009b4c32011-02-07 21:48:49 +0100659 * to IRQS_INPROGRESS and the irq line is masked forever.
Thomas Gleixnerb5faba22011-02-23 23:52:13 +0000660 *
661 * This also serializes the state of shared oneshot handlers
662 * versus "desc->threads_onehsot |= action->thread_mask;" in
663 * irq_wake_thread(). See the comment there which explains the
664 * serialization.
Thomas Gleixner0b1adaa2010-03-09 19:45:54 +0100665 */
Thomas Gleixner009b4c32011-02-07 21:48:49 +0100666 if (unlikely(desc->istate & IRQS_INPROGRESS)) {
Thomas Gleixner0b1adaa2010-03-09 19:45:54 +0100667 raw_spin_unlock_irq(&desc->lock);
Thomas Gleixner3876ec92010-09-27 12:44:35 +0000668 chip_bus_sync_unlock(desc);
Thomas Gleixner0b1adaa2010-03-09 19:45:54 +0100669 cpu_relax();
670 goto again;
671 }
672
Thomas Gleixnerb5faba22011-02-23 23:52:13 +0000673 /*
674 * Now check again, whether the thread should run. Otherwise
675 * we would clear the threads_oneshot bit of this thread which
676 * was just set.
677 */
678 if (!force && test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
679 goto out_unlock;
680
681 desc->threads_oneshot &= ~action->thread_mask;
682
683 if (!desc->threads_oneshot && !(desc->istate & IRQS_DISABLED) &&
684 (desc->istate & IRQS_MASKED)) {
Thomas Gleixner6e402622011-02-08 12:36:06 +0100685 irq_compat_clr_masked(desc);
686 desc->istate &= ~IRQS_MASKED;
Thomas Gleixner0eda58b2010-09-27 12:44:44 +0000687 desc->irq_data.chip->irq_unmask(&desc->irq_data);
Thomas Gleixnerb25c3402009-08-13 12:17:22 +0200688 }
Thomas Gleixnerb5faba22011-02-23 23:52:13 +0000689out_unlock:
Thomas Gleixner239007b2009-11-17 16:46:45 +0100690 raw_spin_unlock_irq(&desc->lock);
Thomas Gleixner3876ec92010-09-27 12:44:35 +0000691 chip_bus_sync_unlock(desc);
Thomas Gleixnerb25c3402009-08-13 12:17:22 +0200692}
693
Bruno Premont61f38262009-07-22 22:22:32 +0200694#ifdef CONFIG_SMP
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100695/*
Thomas Gleixnerd4d5e082011-02-10 13:16:14 +0100696 * Check whether we need to chasnge the affinity of the interrupt thread.
Thomas Gleixner591d2fb2009-07-21 11:09:39 +0200697 */
698static void
699irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
700{
701 cpumask_var_t mask;
702
703 if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
704 return;
705
706 /*
707 * In case we are out of memory we set IRQTF_AFFINITY again and
708 * try again next time
709 */
710 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
711 set_bit(IRQTF_AFFINITY, &action->thread_flags);
712 return;
713 }
714
Thomas Gleixner239007b2009-11-17 16:46:45 +0100715 raw_spin_lock_irq(&desc->lock);
Thomas Gleixner6b8ff312010-10-01 12:58:38 +0200716 cpumask_copy(mask, desc->irq_data.affinity);
Thomas Gleixner239007b2009-11-17 16:46:45 +0100717 raw_spin_unlock_irq(&desc->lock);
Thomas Gleixner591d2fb2009-07-21 11:09:39 +0200718
719 set_cpus_allowed_ptr(current, mask);
720 free_cpumask_var(mask);
721}
Bruno Premont61f38262009-07-22 22:22:32 +0200722#else
723static inline void
724irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
725#endif
Thomas Gleixner591d2fb2009-07-21 11:09:39 +0200726
727/*
Thomas Gleixner8d32a302011-02-23 23:52:23 +0000728 * Interrupts which are not explicitely requested as threaded
729 * interrupts rely on the implicit bh/preempt disable of the hard irq
730 * context. So we need to disable bh here to avoid deadlocks and other
731 * side effects.
732 */
733static void
734irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
735{
736 local_bh_disable();
737 action->thread_fn(action->irq, action->dev_id);
738 irq_finalize_oneshot(desc, action, false);
739 local_bh_enable();
740}
741
742/*
743 * Interrupts explicitely requested as threaded interupts want to be
744 * preemtible - many of them need to sleep and wait for slow busses to
745 * complete.
746 */
747static void irq_thread_fn(struct irq_desc *desc, struct irqaction *action)
748{
749 action->thread_fn(action->irq, action->dev_id);
750 irq_finalize_oneshot(desc, action, false);
751}
752
753/*
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100754 * Interrupt handler thread
755 */
756static int irq_thread(void *data)
757{
Peter Zijlstrac9b5f502011-01-07 13:41:40 +0100758 static const struct sched_param param = {
KOSAKI Motohirofe7de492010-10-20 16:01:12 -0700759 .sched_priority = MAX_USER_RT_PRIO/2,
760 };
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100761 struct irqaction *action = data;
762 struct irq_desc *desc = irq_to_desc(action->irq);
Thomas Gleixner8d32a302011-02-23 23:52:23 +0000763 void (*handler_fn)(struct irq_desc *desc, struct irqaction *action);
Thomas Gleixnerb5faba22011-02-23 23:52:13 +0000764 int wake;
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100765
Thomas Gleixner8d32a302011-02-23 23:52:23 +0000766 if (force_irqthreads & test_bit(IRQTF_FORCED_THREAD,
767 &action->thread_flags))
768 handler_fn = irq_forced_thread_fn;
769 else
770 handler_fn = irq_thread_fn;
771
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100772 sched_setscheduler(current, SCHED_FIFO, &param);
773 current->irqaction = action;
774
775 while (!irq_wait_for_interrupt(action)) {
776
Thomas Gleixner591d2fb2009-07-21 11:09:39 +0200777 irq_thread_check_affinity(desc, action);
778
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100779 atomic_inc(&desc->threads_active);
780
Thomas Gleixner239007b2009-11-17 16:46:45 +0100781 raw_spin_lock_irq(&desc->lock);
Thomas Gleixnerc1594b72011-02-07 22:11:30 +0100782 if (unlikely(desc->istate & IRQS_DISABLED)) {
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100783 /*
784 * CHECKME: We might need a dedicated
785 * IRQ_THREAD_PENDING flag here, which
786 * retriggers the thread in check_irq_resend()
Thomas Gleixner2a0d6fb2011-02-08 12:17:57 +0100787 * but AFAICT IRQS_PENDING should be fine as it
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100788 * retriggers the interrupt itself --- tglx
789 */
Thomas Gleixner2a0d6fb2011-02-08 12:17:57 +0100790 irq_compat_set_pending(desc);
791 desc->istate |= IRQS_PENDING;
Thomas Gleixner239007b2009-11-17 16:46:45 +0100792 raw_spin_unlock_irq(&desc->lock);
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100793 } else {
Thomas Gleixner239007b2009-11-17 16:46:45 +0100794 raw_spin_unlock_irq(&desc->lock);
Thomas Gleixner8d32a302011-02-23 23:52:23 +0000795 handler_fn(desc, action);
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100796 }
797
798 wake = atomic_dec_and_test(&desc->threads_active);
799
800 if (wake && waitqueue_active(&desc->wait_for_threads))
801 wake_up(&desc->wait_for_threads);
802 }
803
Thomas Gleixnerb5faba22011-02-23 23:52:13 +0000804 /* Prevent a stale desc->threads_oneshot */
805 irq_finalize_oneshot(desc, action, true);
806
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100807 /*
808 * Clear irqaction. Otherwise exit_irq_thread() would make
809 * fuzz about an active irq thread going into nirvana.
810 */
811 current->irqaction = NULL;
812 return 0;
813}
814
815/*
816 * Called from do_exit()
817 */
818void exit_irq_thread(void)
819{
820 struct task_struct *tsk = current;
Thomas Gleixnerb5faba22011-02-23 23:52:13 +0000821 struct irq_desc *desc;
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100822
823 if (!tsk->irqaction)
824 return;
825
826 printk(KERN_ERR
827 "exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
828 tsk->comm ? tsk->comm : "", tsk->pid, tsk->irqaction->irq);
829
Thomas Gleixnerb5faba22011-02-23 23:52:13 +0000830 desc = irq_to_desc(tsk->irqaction->irq);
831
832 /*
833 * Prevent a stale desc->threads_oneshot. Must be called
834 * before setting the IRQTF_DIED flag.
835 */
836 irq_finalize_oneshot(desc, tsk->irqaction, true);
837
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100838 /*
839 * Set the THREAD DIED flag to prevent further wakeups of the
840 * soon to be gone threaded handler.
841 */
842 set_bit(IRQTF_DIED, &tsk->irqaction->flags);
843}
844
Thomas Gleixner8d32a302011-02-23 23:52:23 +0000845static void irq_setup_forced_threading(struct irqaction *new)
846{
847 if (!force_irqthreads)
848 return;
849 if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
850 return;
851
852 new->flags |= IRQF_ONESHOT;
853
854 if (!new->thread_fn) {
855 set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
856 new->thread_fn = new->handler;
857 new->handler = irq_default_primary_handler;
858 }
859}
860
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861/*
862 * Internal function to register an irqaction - typically used to
863 * allocate special interrupts that are part of the architecture.
864 */
Thomas Gleixnerd3c60042008-10-16 09:55:00 +0200865static int
Ingo Molnar327ec562009-02-15 11:21:37 +0100866__setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867{
Ingo Molnarf17c7542009-02-17 20:43:37 +0100868 struct irqaction *old, **old_ptr;
Andrew Morton8b126b72006-11-14 02:03:23 -0800869 const char *old_name = NULL;
Thomas Gleixnerb5faba22011-02-23 23:52:13 +0000870 unsigned long flags, thread_mask = 0;
Thomas Gleixner3b8249e2011-02-07 16:02:20 +0100871 int ret, nested, shared = 0;
872 cpumask_var_t mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700873
Yinghai Lu7d94f7c2008-08-19 20:50:14 -0700874 if (!desc)
Matthew Wilcoxc2b5a252005-11-03 07:51:18 -0700875 return -EINVAL;
876
Thomas Gleixner6b8ff312010-10-01 12:58:38 +0200877 if (desc->irq_data.chip == &no_irq_chip)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878 return -ENOSYS;
879 /*
880 * Some drivers like serial.c use request_irq() heavily,
881 * so we have to be careful not to interfere with a
882 * running system.
883 */
Thomas Gleixner3cca53b2006-07-01 19:29:31 -0700884 if (new->flags & IRQF_SAMPLE_RANDOM) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700885 /*
886 * This function might sleep, we want to call it first,
887 * outside of the atomic block.
888 * Yes, this might clear the entropy pool if the wrong
889 * driver is attempted to be loaded, without actually
890 * installing a new handler, but is this really a problem,
891 * only the sysadmin is able to do this.
892 */
893 rand_initialize_irq(irq);
894 }
895
896 /*
Thomas Gleixner399b5da2009-08-13 13:21:38 +0200897 * Check whether the interrupt nests into another interrupt
898 * thread.
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100899 */
Thomas Gleixner1ccb4e62011-02-09 14:44:17 +0100900 nested = irq_settings_is_nested_thread(desc);
Thomas Gleixner399b5da2009-08-13 13:21:38 +0200901 if (nested) {
902 if (!new->thread_fn)
903 return -EINVAL;
904 /*
905 * Replace the primary handler which was provided from
906 * the driver for non nested interrupt handling by the
907 * dummy function which warns when called.
908 */
909 new->handler = irq_nested_primary_handler;
Thomas Gleixner8d32a302011-02-23 23:52:23 +0000910 } else {
911 irq_setup_forced_threading(new);
Thomas Gleixner399b5da2009-08-13 13:21:38 +0200912 }
913
914 /*
915 * Create a handler thread when a thread function is supplied
916 * and the interrupt does not nest into another interrupt
917 * thread.
918 */
919 if (new->thread_fn && !nested) {
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100920 struct task_struct *t;
921
922 t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
923 new->name);
924 if (IS_ERR(t))
925 return PTR_ERR(t);
926 /*
927 * We keep the reference to the task struct even if
928 * the thread dies to avoid that the interrupt code
929 * references an already freed task_struct.
930 */
931 get_task_struct(t);
932 new->thread = t;
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100933 }
934
Thomas Gleixner3b8249e2011-02-07 16:02:20 +0100935 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
936 ret = -ENOMEM;
937 goto out_thread;
938 }
939
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100940 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700941 * The following block of code has to be executed atomically
942 */
Thomas Gleixner239007b2009-11-17 16:46:45 +0100943 raw_spin_lock_irqsave(&desc->lock, flags);
Ingo Molnarf17c7542009-02-17 20:43:37 +0100944 old_ptr = &desc->action;
945 old = *old_ptr;
Ingo Molnar06fcb0c2006-06-29 02:24:40 -0700946 if (old) {
Thomas Gleixnere76de9f2006-06-29 02:24:56 -0700947 /*
948 * Can't share interrupts unless both agree to and are
949 * the same type (level, edge, polarity). So both flag
Thomas Gleixner3cca53b2006-07-01 19:29:31 -0700950 * fields must have IRQF_SHARED set and the bits which
Thomas Gleixner9d591ed2011-02-23 23:52:16 +0000951 * set the trigger type must match. Also all must
952 * agree on ONESHOT.
Thomas Gleixnere76de9f2006-06-29 02:24:56 -0700953 */
Thomas Gleixner3cca53b2006-07-01 19:29:31 -0700954 if (!((old->flags & new->flags) & IRQF_SHARED) ||
Thomas Gleixner9d591ed2011-02-23 23:52:16 +0000955 ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK) ||
956 ((old->flags ^ new->flags) & IRQF_ONESHOT)) {
Andrew Morton8b126b72006-11-14 02:03:23 -0800957 old_name = old->name;
Dimitri Sivanichf5163422006-03-25 03:08:23 -0800958 goto mismatch;
Andrew Morton8b126b72006-11-14 02:03:23 -0800959 }
Dimitri Sivanichf5163422006-03-25 03:08:23 -0800960
Dimitri Sivanichf5163422006-03-25 03:08:23 -0800961 /* All handlers must agree on per-cpuness */
Thomas Gleixner3cca53b2006-07-01 19:29:31 -0700962 if ((old->flags & IRQF_PERCPU) !=
963 (new->flags & IRQF_PERCPU))
Dimitri Sivanichf5163422006-03-25 03:08:23 -0800964 goto mismatch;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965
966 /* add new interrupt at end of irq queue */
967 do {
Thomas Gleixnerb5faba22011-02-23 23:52:13 +0000968 thread_mask |= old->thread_mask;
Ingo Molnarf17c7542009-02-17 20:43:37 +0100969 old_ptr = &old->next;
970 old = *old_ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971 } while (old);
972 shared = 1;
973 }
974
Thomas Gleixnerb5faba22011-02-23 23:52:13 +0000975 /*
976 * Setup the thread mask for this irqaction. Unlikely to have
977 * 32 resp 64 irqs sharing one line, but who knows.
978 */
979 if (new->flags & IRQF_ONESHOT && thread_mask == ~0UL) {
980 ret = -EBUSY;
981 goto out_mask;
982 }
983 new->thread_mask = 1 << ffz(thread_mask);
984
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985 if (!shared) {
Thomas Gleixner6b8ff312010-10-01 12:58:38 +0200986 irq_chip_set_defaults(desc->irq_data.chip);
Thomas Gleixnere76de9f2006-06-29 02:24:56 -0700987
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100988 init_waitqueue_head(&desc->wait_for_threads);
989
Uwe Kleine-König82736f42008-07-23 21:28:54 -0700990 /* Setup the type (level, edge polarity) if configured: */
991 if (new->flags & IRQF_TRIGGER_MASK) {
David Brownellf2b662d2008-12-01 14:31:38 -0800992 ret = __irq_set_trigger(desc, irq,
993 new->flags & IRQF_TRIGGER_MASK);
Uwe Kleine-König82736f42008-07-23 21:28:54 -0700994
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100995 if (ret)
Thomas Gleixner3b8249e2011-02-07 16:02:20 +0100996 goto out_mask;
Thomas Gleixner091738a2011-02-14 20:16:43 +0100997 }
Ahmed S. Darwishf75d2222007-05-08 00:27:55 -0700998
Thomas Gleixner009b4c32011-02-07 21:48:49 +0100999 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
Thomas Gleixner163ef302011-02-08 11:39:15 +01001000 IRQS_INPROGRESS | IRQS_ONESHOT | \
1001 IRQS_WAITING);
Thomas Gleixner94d39e12006-06-29 02:24:50 -07001002
Thomas Gleixnera0056772011-02-08 17:11:03 +01001003 if (new->flags & IRQF_PERCPU) {
1004 irqd_set(&desc->irq_data, IRQD_PER_CPU);
1005 irq_settings_set_per_cpu(desc);
1006 }
Thomas Gleixner6a58fb32011-02-08 15:40:05 +01001007
Thomas Gleixnerb25c3402009-08-13 12:17:22 +02001008 if (new->flags & IRQF_ONESHOT)
Thomas Gleixner3d67bae2011-02-07 21:02:10 +01001009 desc->istate |= IRQS_ONESHOT;
Thomas Gleixnerb25c3402009-08-13 12:17:22 +02001010
Thomas Gleixner1ccb4e62011-02-09 14:44:17 +01001011 if (irq_settings_can_autoenable(desc))
Thomas Gleixner46999232011-02-02 21:41:14 +00001012 irq_startup(desc);
1013 else
Thomas Gleixnere76de9f2006-06-29 02:24:56 -07001014 /* Undo nested disables: */
1015 desc->depth = 1;
Max Krasnyansky18404752008-05-29 11:02:52 -07001016
Thomas Gleixner612e3682008-11-07 13:58:46 +01001017 /* Exclude IRQ from balancing if requested */
Thomas Gleixnera0056772011-02-08 17:11:03 +01001018 if (new->flags & IRQF_NOBALANCING) {
1019 irq_settings_set_no_balancing(desc);
1020 irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1021 }
Thomas Gleixner612e3682008-11-07 13:58:46 +01001022
Max Krasnyansky18404752008-05-29 11:02:52 -07001023 /* Set default affinity mask once everything is setup */
Thomas Gleixner3b8249e2011-02-07 16:02:20 +01001024 setup_affinity(irq, desc, mask);
David Brownell0c5d1eb2008-10-01 14:46:18 -07001025
Thomas Gleixner876dbd42011-02-08 17:28:12 +01001026 } else if (new->flags & IRQF_TRIGGER_MASK) {
1027 unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
1028 unsigned int omsk = irq_settings_get_trigger_mask(desc);
1029
1030 if (nmsk != omsk)
1031 /* hope the handler works with current trigger mode */
1032 pr_warning("IRQ %d uses trigger mode %u; requested %u\n",
1033 irq, nmsk, omsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001034 }
Uwe Kleine-König82736f42008-07-23 21:28:54 -07001035
Thomas Gleixner69ab8492009-08-17 14:07:16 +02001036 new->irq = irq;
Ingo Molnarf17c7542009-02-17 20:43:37 +01001037 *old_ptr = new;
Uwe Kleine-König82736f42008-07-23 21:28:54 -07001038
Linus Torvalds8528b0f2007-01-23 14:16:31 -08001039 /* Reset broken irq detection when installing new handler */
1040 desc->irq_count = 0;
1041 desc->irqs_unhandled = 0;
Thomas Gleixner1adb0852008-04-28 17:01:56 +02001042
1043 /*
1044 * Check whether we disabled the irq via the spurious handler
1045 * before. Reenable it and give it another chance.
1046 */
Thomas Gleixner7acdd532011-02-07 20:40:54 +01001047 if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
1048 desc->istate &= ~IRQS_SPURIOUS_DISABLED;
Rafael J. Wysocki0a0c5162009-03-16 22:33:49 +01001049 __enable_irq(desc, irq, false);
Thomas Gleixner1adb0852008-04-28 17:01:56 +02001050 }
1051
Thomas Gleixner239007b2009-11-17 16:46:45 +01001052 raw_spin_unlock_irqrestore(&desc->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001053
Thomas Gleixner69ab8492009-08-17 14:07:16 +02001054 /*
1055 * Strictly no need to wake it up, but hung_task complains
1056 * when no hard interrupt wakes the thread up.
1057 */
1058 if (new->thread)
1059 wake_up_process(new->thread);
1060
Yinghai Lu2c6927a2008-08-19 20:50:11 -07001061 register_irq_proc(irq, desc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001062 new->dir = NULL;
1063 register_handler_proc(irq, new);
1064
1065 return 0;
Dimitri Sivanichf5163422006-03-25 03:08:23 -08001066
1067mismatch:
Alan Cox3f050442007-02-12 00:52:04 -08001068#ifdef CONFIG_DEBUG_SHIRQ
Thomas Gleixner3cca53b2006-07-01 19:29:31 -07001069 if (!(new->flags & IRQF_PROBE_SHARED)) {
Bjorn Helgaase8c4b9d2006-07-01 04:35:45 -07001070 printk(KERN_ERR "IRQ handler type mismatch for IRQ %d\n", irq);
Andrew Morton8b126b72006-11-14 02:03:23 -08001071 if (old_name)
1072 printk(KERN_ERR "current handler: %s\n", old_name);
Andrew Morton13e87ec2006-04-27 18:39:18 -07001073 dump_stack();
1074 }
Alan Cox3f050442007-02-12 00:52:04 -08001075#endif
Thomas Gleixner3aa551c2009-03-23 18:28:15 +01001076 ret = -EBUSY;
1077
Thomas Gleixner3b8249e2011-02-07 16:02:20 +01001078out_mask:
Dan Carpenter1c389792011-03-17 14:43:07 +03001079 raw_spin_unlock_irqrestore(&desc->lock, flags);
Thomas Gleixner3b8249e2011-02-07 16:02:20 +01001080 free_cpumask_var(mask);
1081
Thomas Gleixner3aa551c2009-03-23 18:28:15 +01001082out_thread:
Thomas Gleixner3aa551c2009-03-23 18:28:15 +01001083 if (new->thread) {
1084 struct task_struct *t = new->thread;
1085
1086 new->thread = NULL;
1087 if (likely(!test_bit(IRQTF_DIED, &new->thread_flags)))
1088 kthread_stop(t);
1089 put_task_struct(t);
1090 }
1091 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001092}
1093
1094/**
Thomas Gleixnerd3c60042008-10-16 09:55:00 +02001095 * setup_irq - setup an interrupt
1096 * @irq: Interrupt line to setup
1097 * @act: irqaction for the interrupt
1098 *
1099 * Used to statically setup interrupts in the early boot process.
1100 */
1101int setup_irq(unsigned int irq, struct irqaction *act)
1102{
David Daney986c0112011-02-09 16:04:25 -08001103 int retval;
Thomas Gleixnerd3c60042008-10-16 09:55:00 +02001104 struct irq_desc *desc = irq_to_desc(irq);
1105
David Daney986c0112011-02-09 16:04:25 -08001106 chip_bus_lock(desc);
1107 retval = __setup_irq(irq, desc, act);
1108 chip_bus_sync_unlock(desc);
1109
1110 return retval;
Thomas Gleixnerd3c60042008-10-16 09:55:00 +02001111}
Magnus Dammeb53b4e2009-03-12 21:05:59 +09001112EXPORT_SYMBOL_GPL(setup_irq);
Thomas Gleixnerd3c60042008-10-16 09:55:00 +02001113
Magnus Dammcbf94f02009-03-12 21:05:51 +09001114 /*
1115 * Internal function to unregister an irqaction - used to free
1116 * regular and special interrupts that are part of the architecture.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001117 */
Magnus Dammcbf94f02009-03-12 21:05:51 +09001118static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001119{
Thomas Gleixnerd3c60042008-10-16 09:55:00 +02001120 struct irq_desc *desc = irq_to_desc(irq);
Ingo Molnarf17c7542009-02-17 20:43:37 +01001121 struct irqaction *action, **action_ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001122 unsigned long flags;
1123
Ingo Molnarae88a232009-02-15 11:29:50 +01001124 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
Yinghai Lu7d94f7c2008-08-19 20:50:14 -07001125
Yinghai Lu7d94f7c2008-08-19 20:50:14 -07001126 if (!desc)
Magnus Dammf21cfb22009-03-12 21:05:42 +09001127 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001128
Thomas Gleixner239007b2009-11-17 16:46:45 +01001129 raw_spin_lock_irqsave(&desc->lock, flags);
Ingo Molnarae88a232009-02-15 11:29:50 +01001130
1131 /*
1132 * There can be multiple actions per IRQ descriptor, find the right
1133 * one based on the dev_id:
1134 */
Ingo Molnarf17c7542009-02-17 20:43:37 +01001135 action_ptr = &desc->action;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001136 for (;;) {
Ingo Molnarf17c7542009-02-17 20:43:37 +01001137 action = *action_ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001138
Ingo Molnarae88a232009-02-15 11:29:50 +01001139 if (!action) {
1140 WARN(1, "Trying to free already-free IRQ %d\n", irq);
Thomas Gleixner239007b2009-11-17 16:46:45 +01001141 raw_spin_unlock_irqrestore(&desc->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001142
Magnus Dammf21cfb22009-03-12 21:05:42 +09001143 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001144 }
Ingo Molnarae88a232009-02-15 11:29:50 +01001145
Ingo Molnar8316e382009-02-17 20:28:29 +01001146 if (action->dev_id == dev_id)
1147 break;
Ingo Molnarf17c7542009-02-17 20:43:37 +01001148 action_ptr = &action->next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001149 }
Ingo Molnarae88a232009-02-15 11:29:50 +01001150
1151 /* Found it - now remove it from the list of entries: */
Ingo Molnarf17c7542009-02-17 20:43:37 +01001152 *action_ptr = action->next;
Ingo Molnarae88a232009-02-15 11:29:50 +01001153
1154 /* Currently used only by UML, might disappear one day: */
1155#ifdef CONFIG_IRQ_RELEASE_METHOD
Thomas Gleixner6b8ff312010-10-01 12:58:38 +02001156 if (desc->irq_data.chip->release)
1157 desc->irq_data.chip->release(irq, dev_id);
Ingo Molnarae88a232009-02-15 11:29:50 +01001158#endif
1159
1160 /* If this was the last handler, shut down the IRQ line: */
Thomas Gleixner46999232011-02-02 21:41:14 +00001161 if (!desc->action)
1162 irq_shutdown(desc);
Thomas Gleixner3aa551c2009-03-23 18:28:15 +01001163
Peter P Waskiewicz Jre7a297b2010-04-30 14:44:50 -07001164#ifdef CONFIG_SMP
1165 /* make sure affinity_hint is cleaned up */
1166 if (WARN_ON_ONCE(desc->affinity_hint))
1167 desc->affinity_hint = NULL;
1168#endif
1169
Thomas Gleixner239007b2009-11-17 16:46:45 +01001170 raw_spin_unlock_irqrestore(&desc->lock, flags);
Ingo Molnarae88a232009-02-15 11:29:50 +01001171
1172 unregister_handler_proc(irq, action);
1173
1174 /* Make sure it's not being used on another CPU: */
1175 synchronize_irq(irq);
1176
1177#ifdef CONFIG_DEBUG_SHIRQ
1178 /*
1179 * It's a shared IRQ -- the driver ought to be prepared for an IRQ
1180 * event to happen even now it's being freed, so let's make sure that
1181 * is so by doing an extra call to the handler ....
1182 *
1183 * ( We do this after actually deregistering it, to make sure that a
1184 * 'real' IRQ doesn't run in * parallel with our fake. )
1185 */
1186 if (action->flags & IRQF_SHARED) {
1187 local_irq_save(flags);
1188 action->handler(irq, dev_id);
1189 local_irq_restore(flags);
1190 }
1191#endif
Linus Torvalds2d860ad2009-08-13 13:05:10 -07001192
1193 if (action->thread) {
1194 if (!test_bit(IRQTF_DIED, &action->thread_flags))
1195 kthread_stop(action->thread);
1196 put_task_struct(action->thread);
1197 }
1198
Magnus Dammf21cfb22009-03-12 21:05:42 +09001199 return action;
1200}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201
1202/**
Magnus Dammcbf94f02009-03-12 21:05:51 +09001203 * remove_irq - free an interrupt
1204 * @irq: Interrupt line to free
1205 * @act: irqaction for the interrupt
1206 *
1207 * Used to remove interrupts statically setup by the early boot process.
1208 */
1209void remove_irq(unsigned int irq, struct irqaction *act)
1210{
1211 __free_irq(irq, act->dev_id);
1212}
Magnus Dammeb53b4e2009-03-12 21:05:59 +09001213EXPORT_SYMBOL_GPL(remove_irq);
Magnus Dammcbf94f02009-03-12 21:05:51 +09001214
1215/**
Magnus Dammf21cfb22009-03-12 21:05:42 +09001216 * free_irq - free an interrupt allocated with request_irq
Linus Torvalds1da177e2005-04-16 15:20:36 -07001217 * @irq: Interrupt line to free
1218 * @dev_id: Device identity to free
1219 *
1220 * Remove an interrupt handler. The handler is removed and if the
1221 * interrupt line is no longer in use by any driver it is disabled.
1222 * On a shared IRQ the caller must ensure the interrupt is disabled
1223 * on the card it drives before calling this function. The function
1224 * does not return until any executing interrupts for this IRQ
1225 * have completed.
1226 *
1227 * This function must not be called from interrupt context.
1228 */
1229void free_irq(unsigned int irq, void *dev_id)
1230{
Thomas Gleixner70aedd22009-08-13 12:17:48 +02001231 struct irq_desc *desc = irq_to_desc(irq);
1232
1233 if (!desc)
1234 return;
1235
Ben Hutchingscd7eab42011-01-19 21:01:44 +00001236#ifdef CONFIG_SMP
1237 if (WARN_ON(desc->affinity_notify))
1238 desc->affinity_notify = NULL;
1239#endif
1240
Thomas Gleixner3876ec92010-09-27 12:44:35 +00001241 chip_bus_lock(desc);
Magnus Dammcbf94f02009-03-12 21:05:51 +09001242 kfree(__free_irq(irq, dev_id));
Thomas Gleixner3876ec92010-09-27 12:44:35 +00001243 chip_bus_sync_unlock(desc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001244}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001245EXPORT_SYMBOL(free_irq);
1246
1247/**
Thomas Gleixner3aa551c2009-03-23 18:28:15 +01001248 * request_threaded_irq - allocate an interrupt line
Linus Torvalds1da177e2005-04-16 15:20:36 -07001249 * @irq: Interrupt line to allocate
Thomas Gleixner3aa551c2009-03-23 18:28:15 +01001250 * @handler: Function to be called when the IRQ occurs.
1251 * Primary handler for threaded interrupts
Thomas Gleixnerb25c3402009-08-13 12:17:22 +02001252 * If NULL and thread_fn != NULL the default
1253 * primary handler is installed
Thomas Gleixnerf48fe812009-03-24 11:46:22 +01001254 * @thread_fn: Function called from the irq handler thread
1255 * If NULL, no irq thread is created
Linus Torvalds1da177e2005-04-16 15:20:36 -07001256 * @irqflags: Interrupt type flags
1257 * @devname: An ascii name for the claiming device
1258 * @dev_id: A cookie passed back to the handler function
1259 *
1260 * This call allocates interrupt resources and enables the
1261 * interrupt line and IRQ handling. From the point this
1262 * call is made your handler function may be invoked. Since
1263 * your handler function must clear any interrupt the board
1264 * raises, you must take care both to initialise your hardware
1265 * and to set up the interrupt handler in the right order.
1266 *
Thomas Gleixner3aa551c2009-03-23 18:28:15 +01001267 * If you want to set up a threaded irq handler for your device
1268 * then you need to supply @handler and @thread_fn. @handler ist
1269 * still called in hard interrupt context and has to check
1270 * whether the interrupt originates from the device. If yes it
1271 * needs to disable the interrupt on the device and return
Steven Rostedt39a2edd2009-05-12 14:35:54 -04001272 * IRQ_WAKE_THREAD which will wake up the handler thread and run
Thomas Gleixner3aa551c2009-03-23 18:28:15 +01001273 * @thread_fn. This split handler design is necessary to support
1274 * shared interrupts.
1275 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07001276 * Dev_id must be globally unique. Normally the address of the
1277 * device data structure is used as the cookie. Since the handler
1278 * receives this value it makes sense to use it.
1279 *
1280 * If your interrupt is shared you must pass a non NULL dev_id
1281 * as this is required when freeing the interrupt.
1282 *
1283 * Flags:
1284 *
Thomas Gleixner3cca53b2006-07-01 19:29:31 -07001285 * IRQF_SHARED Interrupt is shared
Thomas Gleixner3cca53b2006-07-01 19:29:31 -07001286 * IRQF_SAMPLE_RANDOM The interrupt can be used for entropy
David Brownell0c5d1eb2008-10-01 14:46:18 -07001287 * IRQF_TRIGGER_* Specify active edge(s) or level
Linus Torvalds1da177e2005-04-16 15:20:36 -07001288 *
1289 */
Thomas Gleixner3aa551c2009-03-23 18:28:15 +01001290int request_threaded_irq(unsigned int irq, irq_handler_t handler,
1291 irq_handler_t thread_fn, unsigned long irqflags,
1292 const char *devname, void *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001293{
Ingo Molnar06fcb0c2006-06-29 02:24:40 -07001294 struct irqaction *action;
Yinghai Lu08678b02008-08-19 20:50:05 -07001295 struct irq_desc *desc;
Thomas Gleixnerd3c60042008-10-16 09:55:00 +02001296 int retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001297
David Brownell470c6622008-12-01 14:31:37 -08001298 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001299 * Sanity-check: shared interrupts must pass in a real dev-ID,
1300 * otherwise we'll have trouble later trying to figure out
1301 * which interrupt is which (messes up the interrupt freeing
1302 * logic etc).
1303 */
Thomas Gleixner3cca53b2006-07-01 19:29:31 -07001304 if ((irqflags & IRQF_SHARED) && !dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001305 return -EINVAL;
Yinghai Lu7d94f7c2008-08-19 20:50:14 -07001306
Yinghai Lucb5bc832008-08-19 20:50:17 -07001307 desc = irq_to_desc(irq);
Yinghai Lu7d94f7c2008-08-19 20:50:14 -07001308 if (!desc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001309 return -EINVAL;
Yinghai Lu7d94f7c2008-08-19 20:50:14 -07001310
Thomas Gleixner1ccb4e62011-02-09 14:44:17 +01001311 if (!irq_settings_can_request(desc))
Thomas Gleixner6550c772006-06-29 02:24:49 -07001312 return -EINVAL;
Thomas Gleixnerb25c3402009-08-13 12:17:22 +02001313
1314 if (!handler) {
1315 if (!thread_fn)
1316 return -EINVAL;
1317 handler = irq_default_primary_handler;
1318 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001319
Thomas Gleixner45535732009-02-22 23:00:32 +01001320 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001321 if (!action)
1322 return -ENOMEM;
1323
1324 action->handler = handler;
Thomas Gleixner3aa551c2009-03-23 18:28:15 +01001325 action->thread_fn = thread_fn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001326 action->flags = irqflags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327 action->name = devname;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001328 action->dev_id = dev_id;
1329
Thomas Gleixner3876ec92010-09-27 12:44:35 +00001330 chip_bus_lock(desc);
Thomas Gleixnerd3c60042008-10-16 09:55:00 +02001331 retval = __setup_irq(irq, desc, action);
Thomas Gleixner3876ec92010-09-27 12:44:35 +00001332 chip_bus_sync_unlock(desc);
Thomas Gleixner70aedd22009-08-13 12:17:48 +02001333
Anton Vorontsov377bf1e2008-08-21 22:58:28 +04001334 if (retval)
1335 kfree(action);
1336
Thomas Gleixner6d83f942011-02-18 23:27:23 +01001337#ifdef CONFIG_DEBUG_SHIRQ_FIXME
Luis Henriques6ce51c42009-04-01 18:06:35 +01001338 if (!retval && (irqflags & IRQF_SHARED)) {
David Woodhousea304e1b2007-02-12 00:52:00 -08001339 /*
1340 * It's a shared IRQ -- the driver ought to be prepared for it
1341 * to happen immediately, so let's make sure....
Anton Vorontsov377bf1e2008-08-21 22:58:28 +04001342 * We disable the irq to make sure that a 'real' IRQ doesn't
1343 * run in parallel with our fake.
David Woodhousea304e1b2007-02-12 00:52:00 -08001344 */
Jarek Poplawski59845b12007-08-30 23:56:34 -07001345 unsigned long flags;
David Woodhousea304e1b2007-02-12 00:52:00 -08001346
Anton Vorontsov377bf1e2008-08-21 22:58:28 +04001347 disable_irq(irq);
Jarek Poplawski59845b12007-08-30 23:56:34 -07001348 local_irq_save(flags);
Anton Vorontsov377bf1e2008-08-21 22:58:28 +04001349
Jarek Poplawski59845b12007-08-30 23:56:34 -07001350 handler(irq, dev_id);
Anton Vorontsov377bf1e2008-08-21 22:58:28 +04001351
Jarek Poplawski59845b12007-08-30 23:56:34 -07001352 local_irq_restore(flags);
Anton Vorontsov377bf1e2008-08-21 22:58:28 +04001353 enable_irq(irq);
David Woodhousea304e1b2007-02-12 00:52:00 -08001354 }
1355#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001356 return retval;
1357}
Thomas Gleixner3aa551c2009-03-23 18:28:15 +01001358EXPORT_SYMBOL(request_threaded_irq);
Marc Zyngierae731f82010-03-15 22:56:33 +00001359
1360/**
1361 * request_any_context_irq - allocate an interrupt line
1362 * @irq: Interrupt line to allocate
1363 * @handler: Function to be called when the IRQ occurs.
1364 * Threaded handler for threaded interrupts.
1365 * @flags: Interrupt type flags
1366 * @name: An ascii name for the claiming device
1367 * @dev_id: A cookie passed back to the handler function
1368 *
1369 * This call allocates interrupt resources and enables the
1370 * interrupt line and IRQ handling. It selects either a
1371 * hardirq or threaded handling method depending on the
1372 * context.
1373 *
1374 * On failure, it returns a negative value. On success,
1375 * it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED.
1376 */
1377int request_any_context_irq(unsigned int irq, irq_handler_t handler,
1378 unsigned long flags, const char *name, void *dev_id)
1379{
1380 struct irq_desc *desc = irq_to_desc(irq);
1381 int ret;
1382
1383 if (!desc)
1384 return -EINVAL;
1385
Thomas Gleixner1ccb4e62011-02-09 14:44:17 +01001386 if (irq_settings_is_nested_thread(desc)) {
Marc Zyngierae731f82010-03-15 22:56:33 +00001387 ret = request_threaded_irq(irq, NULL, handler,
1388 flags, name, dev_id);
1389 return !ret ? IRQC_IS_NESTED : ret;
1390 }
1391
1392 ret = request_irq(irq, handler, flags, name, dev_id);
1393 return !ret ? IRQC_IS_HARDIRQ : ret;
1394}
1395EXPORT_SYMBOL_GPL(request_any_context_irq);