blob: 6f86d0af7c562f8e0211d0e209f3bfdc2814a18b [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/arch/arm/kernel/irq.c
3 *
4 * Copyright (C) 1992 Linus Torvalds
5 * Modifications for ARM processor Copyright (C) 1995-2000 Russell King.
6 *
Russell King8749af62005-06-25 19:39:45 +01007 * Support for Dynamic Tick Timer Copyright (C) 2004-2005 Nokia Corporation.
8 * Dynamic Tick Timer written by Tony Lindgren <tony@atomide.com> and
9 * Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>.
10 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 *
15 * This file contains the code used by various IRQ handling routines:
16 * asking for different IRQ's should be done through these routines
17 * instead of just grabbing them. Thus setups with different IRQ numbers
18 * shouldn't result in any weird surprises, and installing new handlers
19 * should be easier.
20 *
21 * IRQ's are in fact implemented a bit like signal handlers for the kernel.
22 * Naturally it's not a 1:1 relation, but there are similarities.
23 */
24#include <linux/config.h>
25#include <linux/kernel_stat.h>
26#include <linux/module.h>
27#include <linux/signal.h>
28#include <linux/ioport.h>
29#include <linux/interrupt.h>
30#include <linux/ptrace.h>
31#include <linux/slab.h>
32#include <linux/random.h>
33#include <linux/smp.h>
34#include <linux/init.h>
35#include <linux/seq_file.h>
36#include <linux/errno.h>
37#include <linux/list.h>
38#include <linux/kallsyms.h>
39#include <linux/proc_fs.h>
40
41#include <asm/irq.h>
42#include <asm/system.h>
43#include <asm/mach/irq.h>
Russell King8749af62005-06-25 19:39:45 +010044#include <asm/mach/time.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
46/*
47 * Maximum IRQ count. Currently, this is arbitary. However, it should
48 * not be set too low to prevent false triggering. Conversely, if it
49 * is set too high, then you could miss a stuck IRQ.
50 *
51 * Maybe we ought to set a timer and re-enable the IRQ at a later time?
52 */
53#define MAX_IRQ_CNT 100000
54
55static int noirqdebug;
56static volatile unsigned long irq_err_count;
57static DEFINE_SPINLOCK(irq_controller_lock);
58static LIST_HEAD(irq_pending);
59
60struct irqdesc irq_desc[NR_IRQS];
61void (*init_arch_irq)(void) __initdata = NULL;
62
63/*
64 * No architecture-specific irq_finish function defined in arm/arch/irqs.h.
65 */
66#ifndef irq_finish
67#define irq_finish(irq) do { } while (0)
68#endif
69
70/*
71 * Dummy mask/unmask handler
72 */
73void dummy_mask_unmask_irq(unsigned int irq)
74{
75}
76
77irqreturn_t no_action(int irq, void *dev_id, struct pt_regs *regs)
78{
79 return IRQ_NONE;
80}
81
82void do_bad_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
83{
84 irq_err_count += 1;
85 printk(KERN_ERR "IRQ: spurious interrupt %d\n", irq);
86}
87
88static struct irqchip bad_chip = {
89 .ack = dummy_mask_unmask_irq,
90 .mask = dummy_mask_unmask_irq,
91 .unmask = dummy_mask_unmask_irq,
92};
93
94static struct irqdesc bad_irq_desc = {
95 .chip = &bad_chip,
96 .handle = do_bad_IRQ,
97 .pend = LIST_HEAD_INIT(bad_irq_desc.pend),
98 .disable_depth = 1,
99};
100
101#ifdef CONFIG_SMP
102void synchronize_irq(unsigned int irq)
103{
104 struct irqdesc *desc = irq_desc + irq;
105
106 while (desc->running)
107 barrier();
108}
109EXPORT_SYMBOL(synchronize_irq);
110
111#define smp_set_running(desc) do { desc->running = 1; } while (0)
112#define smp_clear_running(desc) do { desc->running = 0; } while (0)
113#else
114#define smp_set_running(desc) do { } while (0)
115#define smp_clear_running(desc) do { } while (0)
116#endif
117
118/**
119 * disable_irq_nosync - disable an irq without waiting
120 * @irq: Interrupt to disable
121 *
122 * Disable the selected interrupt line. Enables and disables
123 * are nested. We do this lazily.
124 *
125 * This function may be called from IRQ context.
126 */
127void disable_irq_nosync(unsigned int irq)
128{
129 struct irqdesc *desc = irq_desc + irq;
130 unsigned long flags;
131
132 spin_lock_irqsave(&irq_controller_lock, flags);
133 desc->disable_depth++;
134 list_del_init(&desc->pend);
135 spin_unlock_irqrestore(&irq_controller_lock, flags);
136}
137EXPORT_SYMBOL(disable_irq_nosync);
138
139/**
140 * disable_irq - disable an irq and wait for completion
141 * @irq: Interrupt to disable
142 *
143 * Disable the selected interrupt line. Enables and disables
144 * are nested. This functions waits for any pending IRQ
145 * handlers for this interrupt to complete before returning.
146 * If you use this function while holding a resource the IRQ
147 * handler may need you will deadlock.
148 *
149 * This function may be called - with care - from IRQ context.
150 */
151void disable_irq(unsigned int irq)
152{
153 struct irqdesc *desc = irq_desc + irq;
154
155 disable_irq_nosync(irq);
156 if (desc->action)
157 synchronize_irq(irq);
158}
159EXPORT_SYMBOL(disable_irq);
160
161/**
162 * enable_irq - enable interrupt handling on an irq
163 * @irq: Interrupt to enable
164 *
165 * Re-enables the processing of interrupts on this IRQ line.
166 * Note that this may call the interrupt handler, so you may
167 * get unexpected results if you hold IRQs disabled.
168 *
169 * This function may be called from IRQ context.
170 */
171void enable_irq(unsigned int irq)
172{
173 struct irqdesc *desc = irq_desc + irq;
174 unsigned long flags;
175
176 spin_lock_irqsave(&irq_controller_lock, flags);
177 if (unlikely(!desc->disable_depth)) {
178 printk("enable_irq(%u) unbalanced from %p\n", irq,
179 __builtin_return_address(0));
180 } else if (!--desc->disable_depth) {
181 desc->probing = 0;
182 desc->chip->unmask(irq);
183
184 /*
185 * If the interrupt is waiting to be processed,
186 * try to re-run it. We can't directly run it
187 * from here since the caller might be in an
188 * interrupt-protected region.
189 */
190 if (desc->pending && list_empty(&desc->pend)) {
191 desc->pending = 0;
192 if (!desc->chip->retrigger ||
193 desc->chip->retrigger(irq))
194 list_add(&desc->pend, &irq_pending);
195 }
196 }
197 spin_unlock_irqrestore(&irq_controller_lock, flags);
198}
199EXPORT_SYMBOL(enable_irq);
200
201/*
202 * Enable wake on selected irq
203 */
204void enable_irq_wake(unsigned int irq)
205{
206 struct irqdesc *desc = irq_desc + irq;
207 unsigned long flags;
208
209 spin_lock_irqsave(&irq_controller_lock, flags);
Russell King78019072005-09-04 19:43:13 +0100210 if (desc->chip->set_wake)
211 desc->chip->set_wake(irq, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 spin_unlock_irqrestore(&irq_controller_lock, flags);
213}
214EXPORT_SYMBOL(enable_irq_wake);
215
216void disable_irq_wake(unsigned int irq)
217{
218 struct irqdesc *desc = irq_desc + irq;
219 unsigned long flags;
220
221 spin_lock_irqsave(&irq_controller_lock, flags);
Russell King78019072005-09-04 19:43:13 +0100222 if (desc->chip->set_wake)
223 desc->chip->set_wake(irq, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224 spin_unlock_irqrestore(&irq_controller_lock, flags);
225}
226EXPORT_SYMBOL(disable_irq_wake);
227
228int show_interrupts(struct seq_file *p, void *v)
229{
230 int i = *(loff_t *) v, cpu;
231 struct irqaction * action;
232 unsigned long flags;
233
234 if (i == 0) {
235 char cpuname[12];
236
237 seq_printf(p, " ");
238 for_each_present_cpu(cpu) {
239 sprintf(cpuname, "CPU%d", cpu);
240 seq_printf(p, " %10s", cpuname);
241 }
242 seq_putc(p, '\n');
243 }
244
245 if (i < NR_IRQS) {
246 spin_lock_irqsave(&irq_controller_lock, flags);
247 action = irq_desc[i].action;
248 if (!action)
249 goto unlock;
250
251 seq_printf(p, "%3d: ", i);
252 for_each_present_cpu(cpu)
253 seq_printf(p, "%10u ", kstat_cpu(cpu).irqs[i]);
254 seq_printf(p, " %s", action->name);
255 for (action = action->next; action; action = action->next)
256 seq_printf(p, ", %s", action->name);
257
258 seq_putc(p, '\n');
259unlock:
260 spin_unlock_irqrestore(&irq_controller_lock, flags);
261 } else if (i == NR_IRQS) {
262#ifdef CONFIG_ARCH_ACORN
263 show_fiq_list(p, v);
264#endif
265#ifdef CONFIG_SMP
266 show_ipi_list(p);
267#endif
268 seq_printf(p, "Err: %10lu\n", irq_err_count);
269 }
270 return 0;
271}
272
273/*
274 * IRQ lock detection.
275 *
276 * Hopefully, this should get us out of a few locked situations.
277 * However, it may take a while for this to happen, since we need
278 * a large number if IRQs to appear in the same jiffie with the
279 * same instruction pointer (or within 2 instructions).
280 */
281static int check_irq_lock(struct irqdesc *desc, int irq, struct pt_regs *regs)
282{
283 unsigned long instr_ptr = instruction_pointer(regs);
284
285 if (desc->lck_jif == jiffies &&
286 desc->lck_pc >= instr_ptr && desc->lck_pc < instr_ptr + 8) {
287 desc->lck_cnt += 1;
288
289 if (desc->lck_cnt > MAX_IRQ_CNT) {
290 printk(KERN_ERR "IRQ LOCK: IRQ%d is locking the system, disabled\n", irq);
291 return 1;
292 }
293 } else {
294 desc->lck_cnt = 0;
295 desc->lck_pc = instruction_pointer(regs);
296 desc->lck_jif = jiffies;
297 }
298 return 0;
299}
300
301static void
302report_bad_irq(unsigned int irq, struct pt_regs *regs, struct irqdesc *desc, int ret)
303{
304 static int count = 100;
305 struct irqaction *action;
306
307 if (!count || noirqdebug)
308 return;
309
310 count--;
311
312 if (ret != IRQ_HANDLED && ret != IRQ_NONE) {
313 printk("irq%u: bogus retval mask %x\n", irq, ret);
314 } else {
315 printk("irq%u: nobody cared\n", irq);
316 }
317 show_regs(regs);
318 dump_stack();
319 printk(KERN_ERR "handlers:");
320 action = desc->action;
321 do {
322 printk("\n" KERN_ERR "[<%p>]", action->handler);
323 print_symbol(" (%s)", (unsigned long)action->handler);
324 action = action->next;
325 } while (action);
326 printk("\n");
327}
328
329static int
330__do_irq(unsigned int irq, struct irqaction *action, struct pt_regs *regs)
331{
332 unsigned int status;
333 int ret, retval = 0;
334
335 spin_unlock(&irq_controller_lock);
336
Russell King8749af62005-06-25 19:39:45 +0100337#ifdef CONFIG_NO_IDLE_HZ
338 if (!(action->flags & SA_TIMER) && system_timer->dyn_tick != NULL) {
339 write_seqlock(&xtime_lock);
340 if (system_timer->dyn_tick->state & DYN_TICK_ENABLED)
341 system_timer->dyn_tick->handler(irq, 0, regs);
342 write_sequnlock(&xtime_lock);
343 }
344#endif
345
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346 if (!(action->flags & SA_INTERRUPT))
347 local_irq_enable();
348
349 status = 0;
350 do {
351 ret = action->handler(irq, action->dev_id, regs);
352 if (ret == IRQ_HANDLED)
353 status |= action->flags;
354 retval |= ret;
355 action = action->next;
356 } while (action);
357
358 if (status & SA_SAMPLE_RANDOM)
359 add_interrupt_randomness(irq);
360
361 spin_lock_irq(&irq_controller_lock);
362
363 return retval;
364}
365
366/*
367 * This is for software-decoded IRQs. The caller is expected to
368 * handle the ack, clear, mask and unmask issues.
369 */
370void
371do_simple_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
372{
373 struct irqaction *action;
374 const unsigned int cpu = smp_processor_id();
375
376 desc->triggered = 1;
377
378 kstat_cpu(cpu).irqs[irq]++;
379
380 smp_set_running(desc);
381
382 action = desc->action;
383 if (action) {
384 int ret = __do_irq(irq, action, regs);
385 if (ret != IRQ_HANDLED)
386 report_bad_irq(irq, regs, desc, ret);
387 }
388
389 smp_clear_running(desc);
390}
391
392/*
393 * Most edge-triggered IRQ implementations seem to take a broken
394 * approach to this. Hence the complexity.
395 */
396void
397do_edge_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
398{
399 const unsigned int cpu = smp_processor_id();
400
401 desc->triggered = 1;
402
403 /*
404 * If we're currently running this IRQ, or its disabled,
405 * we shouldn't process the IRQ. Instead, turn on the
406 * hardware masks.
407 */
408 if (unlikely(desc->running || desc->disable_depth))
409 goto running;
410
411 /*
412 * Acknowledge and clear the IRQ, but don't mask it.
413 */
414 desc->chip->ack(irq);
415
416 /*
417 * Mark the IRQ currently in progress.
418 */
419 desc->running = 1;
420
421 kstat_cpu(cpu).irqs[irq]++;
422
423 do {
424 struct irqaction *action;
425
426 action = desc->action;
427 if (!action)
428 break;
429
430 if (desc->pending && !desc->disable_depth) {
431 desc->pending = 0;
432 desc->chip->unmask(irq);
433 }
434
435 __do_irq(irq, action, regs);
436 } while (desc->pending && !desc->disable_depth);
437
438 desc->running = 0;
439
440 /*
441 * If we were disabled or freed, shut down the handler.
442 */
443 if (likely(desc->action && !check_irq_lock(desc, irq, regs)))
444 return;
445
446 running:
447 /*
448 * We got another IRQ while this one was masked or
449 * currently running. Delay it.
450 */
451 desc->pending = 1;
452 desc->chip->mask(irq);
453 desc->chip->ack(irq);
454}
455
456/*
457 * Level-based IRQ handler. Nice and simple.
458 */
459void
460do_level_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
461{
462 struct irqaction *action;
463 const unsigned int cpu = smp_processor_id();
464
465 desc->triggered = 1;
466
467 /*
468 * Acknowledge, clear _AND_ disable the interrupt.
469 */
470 desc->chip->ack(irq);
471
472 if (likely(!desc->disable_depth)) {
473 kstat_cpu(cpu).irqs[irq]++;
474
475 smp_set_running(desc);
476
477 /*
478 * Return with this interrupt masked if no action
479 */
480 action = desc->action;
481 if (action) {
482 int ret = __do_irq(irq, desc->action, regs);
483
484 if (ret != IRQ_HANDLED)
485 report_bad_irq(irq, regs, desc, ret);
486
487 if (likely(!desc->disable_depth &&
488 !check_irq_lock(desc, irq, regs)))
489 desc->chip->unmask(irq);
490 }
491
492 smp_clear_running(desc);
493 }
494}
495
496static void do_pending_irqs(struct pt_regs *regs)
497{
498 struct list_head head, *l, *n;
499
500 do {
501 struct irqdesc *desc;
502
503 /*
504 * First, take the pending interrupts off the list.
505 * The act of calling the handlers may add some IRQs
506 * back onto the list.
507 */
508 head = irq_pending;
509 INIT_LIST_HEAD(&irq_pending);
510 head.next->prev = &head;
511 head.prev->next = &head;
512
513 /*
514 * Now run each entry. We must delete it from our
515 * list before calling the handler.
516 */
517 list_for_each_safe(l, n, &head) {
518 desc = list_entry(l, struct irqdesc, pend);
519 list_del_init(&desc->pend);
Russell King664399e2005-09-04 19:45:00 +0100520 desc_handle_irq(desc - irq_desc, desc, regs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521 }
522
523 /*
524 * The list must be empty.
525 */
526 BUG_ON(!list_empty(&head));
527 } while (!list_empty(&irq_pending));
528}
529
530/*
531 * do_IRQ handles all hardware IRQ's. Decoded IRQs should not
532 * come via this function. Instead, they should provide their
533 * own 'handler'
534 */
535asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
536{
537 struct irqdesc *desc = irq_desc + irq;
538
539 /*
540 * Some hardware gives randomly wrong interrupts. Rather
541 * than crashing, do something sensible.
542 */
543 if (irq >= NR_IRQS)
544 desc = &bad_irq_desc;
545
546 irq_enter();
547 spin_lock(&irq_controller_lock);
Russell King664399e2005-09-04 19:45:00 +0100548 desc_handle_irq(irq, desc, regs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549
550 /*
551 * Now re-run any pending interrupts.
552 */
553 if (!list_empty(&irq_pending))
554 do_pending_irqs(regs);
555
556 irq_finish(irq);
557
558 spin_unlock(&irq_controller_lock);
559 irq_exit();
560}
561
562void __set_irq_handler(unsigned int irq, irq_handler_t handle, int is_chained)
563{
564 struct irqdesc *desc;
565 unsigned long flags;
566
567 if (irq >= NR_IRQS) {
568 printk(KERN_ERR "Trying to install handler for IRQ%d\n", irq);
569 return;
570 }
571
572 if (handle == NULL)
573 handle = do_bad_IRQ;
574
575 desc = irq_desc + irq;
576
577 if (is_chained && desc->chip == &bad_chip)
578 printk(KERN_WARNING "Trying to install chained handler for IRQ%d\n", irq);
579
580 spin_lock_irqsave(&irq_controller_lock, flags);
581 if (handle == do_bad_IRQ) {
582 desc->chip->mask(irq);
583 desc->chip->ack(irq);
584 desc->disable_depth = 1;
585 }
586 desc->handle = handle;
587 if (handle != do_bad_IRQ && is_chained) {
588 desc->valid = 0;
589 desc->probe_ok = 0;
590 desc->disable_depth = 0;
591 desc->chip->unmask(irq);
592 }
593 spin_unlock_irqrestore(&irq_controller_lock, flags);
594}
595
596void set_irq_chip(unsigned int irq, struct irqchip *chip)
597{
598 struct irqdesc *desc;
599 unsigned long flags;
600
601 if (irq >= NR_IRQS) {
602 printk(KERN_ERR "Trying to install chip for IRQ%d\n", irq);
603 return;
604 }
605
606 if (chip == NULL)
607 chip = &bad_chip;
608
609 desc = irq_desc + irq;
610 spin_lock_irqsave(&irq_controller_lock, flags);
611 desc->chip = chip;
612 spin_unlock_irqrestore(&irq_controller_lock, flags);
613}
614
615int set_irq_type(unsigned int irq, unsigned int type)
616{
617 struct irqdesc *desc;
618 unsigned long flags;
619 int ret = -ENXIO;
620
621 if (irq >= NR_IRQS) {
622 printk(KERN_ERR "Trying to set irq type for IRQ%d\n", irq);
623 return -ENODEV;
624 }
625
626 desc = irq_desc + irq;
Russell King78019072005-09-04 19:43:13 +0100627 if (desc->chip->set_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628 spin_lock_irqsave(&irq_controller_lock, flags);
Russell King78019072005-09-04 19:43:13 +0100629 ret = desc->chip->set_type(irq, type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630 spin_unlock_irqrestore(&irq_controller_lock, flags);
631 }
632
633 return ret;
634}
635EXPORT_SYMBOL(set_irq_type);
636
637void set_irq_flags(unsigned int irq, unsigned int iflags)
638{
639 struct irqdesc *desc;
640 unsigned long flags;
641
642 if (irq >= NR_IRQS) {
643 printk(KERN_ERR "Trying to set irq flags for IRQ%d\n", irq);
644 return;
645 }
646
647 desc = irq_desc + irq;
648 spin_lock_irqsave(&irq_controller_lock, flags);
649 desc->valid = (iflags & IRQF_VALID) != 0;
650 desc->probe_ok = (iflags & IRQF_PROBE) != 0;
651 desc->noautoenable = (iflags & IRQF_NOAUTOEN) != 0;
652 spin_unlock_irqrestore(&irq_controller_lock, flags);
653}
654
655int setup_irq(unsigned int irq, struct irqaction *new)
656{
657 int shared = 0;
658 struct irqaction *old, **p;
659 unsigned long flags;
660 struct irqdesc *desc;
661
662 /*
663 * Some drivers like serial.c use request_irq() heavily,
664 * so we have to be careful not to interfere with a
665 * running system.
666 */
667 if (new->flags & SA_SAMPLE_RANDOM) {
668 /*
669 * This function might sleep, we want to call it first,
670 * outside of the atomic block.
671 * Yes, this might clear the entropy pool if the wrong
672 * driver is attempted to be loaded, without actually
673 * installing a new handler, but is this really a problem,
674 * only the sysadmin is able to do this.
675 */
676 rand_initialize_irq(irq);
677 }
678
679 /*
680 * The following block of code has to be executed atomically
681 */
682 desc = irq_desc + irq;
683 spin_lock_irqsave(&irq_controller_lock, flags);
684 p = &desc->action;
685 if ((old = *p) != NULL) {
686 /* Can't share interrupts unless both agree to */
687 if (!(old->flags & new->flags & SA_SHIRQ)) {
688 spin_unlock_irqrestore(&irq_controller_lock, flags);
689 return -EBUSY;
690 }
691
692 /* add new interrupt at end of irq queue */
693 do {
694 p = &old->next;
695 old = *p;
696 } while (old);
697 shared = 1;
698 }
699
700 *p = new;
701
702 if (!shared) {
703 desc->probing = 0;
704 desc->running = 0;
705 desc->pending = 0;
706 desc->disable_depth = 1;
707 if (!desc->noautoenable) {
708 desc->disable_depth = 0;
709 desc->chip->unmask(irq);
710 }
711 }
712
713 spin_unlock_irqrestore(&irq_controller_lock, flags);
714 return 0;
715}
716
717/**
718 * request_irq - allocate an interrupt line
719 * @irq: Interrupt line to allocate
720 * @handler: Function to be called when the IRQ occurs
721 * @irqflags: Interrupt type flags
722 * @devname: An ascii name for the claiming device
723 * @dev_id: A cookie passed back to the handler function
724 *
725 * This call allocates interrupt resources and enables the
726 * interrupt line and IRQ handling. From the point this
727 * call is made your handler function may be invoked. Since
728 * your handler function must clear any interrupt the board
729 * raises, you must take care both to initialise your hardware
730 * and to set up the interrupt handler in the right order.
731 *
732 * Dev_id must be globally unique. Normally the address of the
733 * device data structure is used as the cookie. Since the handler
734 * receives this value it makes sense to use it.
735 *
736 * If your interrupt is shared you must pass a non NULL dev_id
737 * as this is required when freeing the interrupt.
738 *
739 * Flags:
740 *
741 * SA_SHIRQ Interrupt is shared
742 *
743 * SA_INTERRUPT Disable local interrupts while processing
744 *
745 * SA_SAMPLE_RANDOM The interrupt can be used for entropy
746 *
747 */
748int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_regs *),
749 unsigned long irq_flags, const char * devname, void *dev_id)
750{
751 unsigned long retval;
752 struct irqaction *action;
753
754 if (irq >= NR_IRQS || !irq_desc[irq].valid || !handler ||
755 (irq_flags & SA_SHIRQ && !dev_id))
756 return -EINVAL;
757
758 action = (struct irqaction *)kmalloc(sizeof(struct irqaction), GFP_KERNEL);
759 if (!action)
760 return -ENOMEM;
761
762 action->handler = handler;
763 action->flags = irq_flags;
764 cpus_clear(action->mask);
765 action->name = devname;
766 action->next = NULL;
767 action->dev_id = dev_id;
768
769 retval = setup_irq(irq, action);
770
771 if (retval)
772 kfree(action);
773 return retval;
774}
775
776EXPORT_SYMBOL(request_irq);
777
778/**
779 * free_irq - free an interrupt
780 * @irq: Interrupt line to free
781 * @dev_id: Device identity to free
782 *
783 * Remove an interrupt handler. The handler is removed and if the
784 * interrupt line is no longer in use by any driver it is disabled.
785 * On a shared IRQ the caller must ensure the interrupt is disabled
786 * on the card it drives before calling this function.
787 *
788 * This function must not be called from interrupt context.
789 */
790void free_irq(unsigned int irq, void *dev_id)
791{
792 struct irqaction * action, **p;
793 unsigned long flags;
794
795 if (irq >= NR_IRQS || !irq_desc[irq].valid) {
796 printk(KERN_ERR "Trying to free IRQ%d\n",irq);
797 dump_stack();
798 return;
799 }
800
801 spin_lock_irqsave(&irq_controller_lock, flags);
802 for (p = &irq_desc[irq].action; (action = *p) != NULL; p = &action->next) {
803 if (action->dev_id != dev_id)
804 continue;
805
806 /* Found it - now free it */
807 *p = action->next;
808 break;
809 }
810 spin_unlock_irqrestore(&irq_controller_lock, flags);
811
812 if (!action) {
813 printk(KERN_ERR "Trying to free free IRQ%d\n",irq);
814 dump_stack();
815 } else {
816 synchronize_irq(irq);
817 kfree(action);
818 }
819}
820
821EXPORT_SYMBOL(free_irq);
822
823static DECLARE_MUTEX(probe_sem);
824
825/* Start the interrupt probing. Unlike other architectures,
826 * we don't return a mask of interrupts from probe_irq_on,
827 * but return the number of interrupts enabled for the probe.
828 * The interrupts which have been enabled for probing is
829 * instead recorded in the irq_desc structure.
830 */
831unsigned long probe_irq_on(void)
832{
833 unsigned int i, irqs = 0;
834 unsigned long delay;
835
836 down(&probe_sem);
837
838 /*
839 * first snaffle up any unassigned but
840 * probe-able interrupts
841 */
842 spin_lock_irq(&irq_controller_lock);
843 for (i = 0; i < NR_IRQS; i++) {
844 if (!irq_desc[i].probe_ok || irq_desc[i].action)
845 continue;
846
847 irq_desc[i].probing = 1;
848 irq_desc[i].triggered = 0;
Russell King78019072005-09-04 19:43:13 +0100849 if (irq_desc[i].chip->set_type)
850 irq_desc[i].chip->set_type(i, IRQT_PROBE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851 irq_desc[i].chip->unmask(i);
852 irqs += 1;
853 }
854 spin_unlock_irq(&irq_controller_lock);
855
856 /*
857 * wait for spurious interrupts to mask themselves out again
858 */
859 for (delay = jiffies + HZ/10; time_before(jiffies, delay); )
860 /* min 100ms delay */;
861
862 /*
863 * now filter out any obviously spurious interrupts
864 */
865 spin_lock_irq(&irq_controller_lock);
866 for (i = 0; i < NR_IRQS; i++) {
867 if (irq_desc[i].probing && irq_desc[i].triggered) {
868 irq_desc[i].probing = 0;
869 irqs -= 1;
870 }
871 }
872 spin_unlock_irq(&irq_controller_lock);
873
874 return irqs;
875}
876
877EXPORT_SYMBOL(probe_irq_on);
878
879unsigned int probe_irq_mask(unsigned long irqs)
880{
881 unsigned int mask = 0, i;
882
883 spin_lock_irq(&irq_controller_lock);
884 for (i = 0; i < 16 && i < NR_IRQS; i++)
885 if (irq_desc[i].probing && irq_desc[i].triggered)
886 mask |= 1 << i;
887 spin_unlock_irq(&irq_controller_lock);
888
889 up(&probe_sem);
890
891 return mask;
892}
893EXPORT_SYMBOL(probe_irq_mask);
894
895/*
896 * Possible return values:
897 * >= 0 - interrupt number
898 * -1 - no interrupt/many interrupts
899 */
900int probe_irq_off(unsigned long irqs)
901{
902 unsigned int i;
903 int irq_found = NO_IRQ;
904
905 /*
906 * look at the interrupts, and find exactly one
907 * that we were probing has been triggered
908 */
909 spin_lock_irq(&irq_controller_lock);
910 for (i = 0; i < NR_IRQS; i++) {
911 if (irq_desc[i].probing &&
912 irq_desc[i].triggered) {
913 if (irq_found != NO_IRQ) {
914 irq_found = NO_IRQ;
915 goto out;
916 }
917 irq_found = i;
918 }
919 }
920
921 if (irq_found == -1)
922 irq_found = NO_IRQ;
923out:
924 spin_unlock_irq(&irq_controller_lock);
925
926 up(&probe_sem);
927
928 return irq_found;
929}
930
931EXPORT_SYMBOL(probe_irq_off);
932
933#ifdef CONFIG_SMP
934static void route_irq(struct irqdesc *desc, unsigned int irq, unsigned int cpu)
935{
936 pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->cpu, cpu);
937
938 spin_lock_irq(&irq_controller_lock);
939 desc->cpu = cpu;
940 desc->chip->set_cpu(desc, irq, cpu);
941 spin_unlock_irq(&irq_controller_lock);
942}
943
944#ifdef CONFIG_PROC_FS
945static int
946irq_affinity_read_proc(char *page, char **start, off_t off, int count,
947 int *eof, void *data)
948{
949 struct irqdesc *desc = irq_desc + ((int)data);
950 int len = cpumask_scnprintf(page, count, desc->affinity);
951
952 if (count - len < 2)
953 return -EINVAL;
954 page[len++] = '\n';
955 page[len] = '\0';
956
957 return len;
958}
959
960static int
961irq_affinity_write_proc(struct file *file, const char __user *buffer,
962 unsigned long count, void *data)
963{
964 unsigned int irq = (unsigned int)data;
965 struct irqdesc *desc = irq_desc + irq;
966 cpumask_t affinity, tmp;
967 int ret = -EIO;
968
969 if (!desc->chip->set_cpu)
970 goto out;
971
972 ret = cpumask_parse(buffer, count, affinity);
973 if (ret)
974 goto out;
975
976 cpus_and(tmp, affinity, cpu_online_map);
977 if (cpus_empty(tmp)) {
978 ret = -EINVAL;
979 goto out;
980 }
981
982 desc->affinity = affinity;
983 route_irq(desc, irq, first_cpu(tmp));
984 ret = count;
985
986 out:
987 return ret;
988}
989#endif
990#endif
991
992void __init init_irq_proc(void)
993{
994#if defined(CONFIG_SMP) && defined(CONFIG_PROC_FS)
995 struct proc_dir_entry *dir;
996 int irq;
997
Russell King2c250132005-11-08 14:44:15 +0000998 dir = proc_mkdir("irq", NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700999 if (!dir)
1000 return;
1001
1002 for (irq = 0; irq < NR_IRQS; irq++) {
1003 struct proc_dir_entry *entry;
1004 struct irqdesc *desc;
1005 char name[16];
1006
1007 desc = irq_desc + irq;
1008 memset(name, 0, sizeof(name));
1009 snprintf(name, sizeof(name) - 1, "%u", irq);
1010
1011 desc->procdir = proc_mkdir(name, dir);
1012 if (!desc->procdir)
1013 continue;
1014
1015 entry = create_proc_entry("smp_affinity", 0600, desc->procdir);
1016 if (entry) {
1017 entry->nlink = 1;
1018 entry->data = (void *)irq;
1019 entry->read_proc = irq_affinity_read_proc;
1020 entry->write_proc = irq_affinity_write_proc;
1021 }
1022 }
1023#endif
1024}
1025
1026void __init init_IRQ(void)
1027{
1028 struct irqdesc *desc;
1029 extern void init_dma(void);
1030 int irq;
1031
1032#ifdef CONFIG_SMP
1033 bad_irq_desc.affinity = CPU_MASK_ALL;
1034 bad_irq_desc.cpu = smp_processor_id();
1035#endif
1036
1037 for (irq = 0, desc = irq_desc; irq < NR_IRQS; irq++, desc++) {
1038 *desc = bad_irq_desc;
1039 INIT_LIST_HEAD(&desc->pend);
1040 }
1041
1042 init_arch_irq();
1043 init_dma();
1044}
1045
1046static int __init noirqdebug_setup(char *str)
1047{
1048 noirqdebug = 1;
1049 return 1;
1050}
1051
1052__setup("noirqdebug", noirqdebug_setup);
Russell Kinga054a812005-11-02 22:24:33 +00001053
1054#ifdef CONFIG_HOTPLUG_CPU
1055/*
1056 * The CPU has been marked offline. Migrate IRQs off this CPU. If
1057 * the affinity settings do not allow other CPUs, force them onto any
1058 * available CPU.
1059 */
1060void migrate_irqs(void)
1061{
1062 unsigned int i, cpu = smp_processor_id();
1063
1064 for (i = 0; i < NR_IRQS; i++) {
1065 struct irqdesc *desc = irq_desc + i;
1066
1067 if (desc->cpu == cpu) {
1068 unsigned int newcpu = any_online_cpu(desc->affinity);
1069
1070 if (newcpu == NR_CPUS) {
1071 if (printk_ratelimit())
1072 printk(KERN_INFO "IRQ%u no longer affine to CPU%u\n",
1073 i, cpu);
1074
1075 cpus_setall(desc->affinity);
1076 newcpu = any_online_cpu(desc->affinity);
1077 }
1078
1079 route_irq(desc, i, newcpu);
1080 }
1081 }
1082}
1083#endif /* CONFIG_HOTPLUG_CPU */