Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * arch/ppc/kernel/irq.c |
| 3 | * |
| 4 | * Derived from arch/i386/kernel/irq.c |
| 5 | * Copyright (C) 1992 Linus Torvalds |
| 6 | * Adapted from arch/i386 by Gary Thomas |
| 7 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) |
| 8 | * Updated and modified by Cort Dougan <cort@fsmlabs.com> |
| 9 | * Copyright (C) 1996-2001 Cort Dougan |
| 10 | * Adapted for Power Macintosh by Paul Mackerras |
| 11 | * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au) |
| 12 | * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk). |
| 13 | * |
| 14 | * This file contains the code used by various IRQ handling routines: |
| 15 | * asking for different IRQ's should be done through these routines |
| 16 | * instead of just grabbing them. Thus setups with different IRQ numbers |
| 17 | * shouldn't result in any weird surprises, and installing new handlers |
| 18 | * should be easier. |
| 19 | * |
| 20 | * The MPC8xx has an interrupt mask in the SIU. If a bit is set, the |
| 21 | * interrupt is _enabled_. As expected, IRQ0 is bit 0 in the 32-bit |
| 22 | * mask register (of which only 16 are defined), hence the weird shifting |
| 23 | * and complement of the cached_irq_mask. I want to be able to stuff |
| 24 | * this right into the SIU SMASK register. |
| 25 | * Many of the prep/chrp functions are conditional compiled on CONFIG_8xx |
| 26 | * to reduce code space and undefined function references. |
| 27 | */ |
| 28 | |
| 29 | #include <linux/errno.h> |
| 30 | #include <linux/module.h> |
| 31 | #include <linux/threads.h> |
| 32 | #include <linux/kernel_stat.h> |
| 33 | #include <linux/signal.h> |
| 34 | #include <linux/sched.h> |
| 35 | #include <linux/ptrace.h> |
| 36 | #include <linux/ioport.h> |
| 37 | #include <linux/interrupt.h> |
| 38 | #include <linux/timex.h> |
| 39 | #include <linux/config.h> |
| 40 | #include <linux/init.h> |
| 41 | #include <linux/slab.h> |
| 42 | #include <linux/pci.h> |
| 43 | #include <linux/delay.h> |
| 44 | #include <linux/irq.h> |
| 45 | #include <linux/proc_fs.h> |
| 46 | #include <linux/random.h> |
| 47 | #include <linux/seq_file.h> |
| 48 | #include <linux/cpumask.h> |
| 49 | #include <linux/profile.h> |
| 50 | #include <linux/bitops.h> |
| 51 | |
| 52 | #include <asm/uaccess.h> |
| 53 | #include <asm/system.h> |
| 54 | #include <asm/io.h> |
| 55 | #include <asm/pgtable.h> |
| 56 | #include <asm/irq.h> |
| 57 | #include <asm/cache.h> |
| 58 | #include <asm/prom.h> |
| 59 | #include <asm/ptrace.h> |
| 60 | |
| 61 | #define NR_MASK_WORDS ((NR_IRQS + 31) / 32) |
| 62 | |
| 63 | extern atomic_t ipi_recv; |
| 64 | extern atomic_t ipi_sent; |
| 65 | |
| 66 | #define MAXCOUNT 10000000 |
| 67 | |
| 68 | int ppc_spurious_interrupts = 0; |
| 69 | struct irqaction *ppc_irq_action[NR_IRQS]; |
| 70 | unsigned long ppc_cached_irq_mask[NR_MASK_WORDS]; |
| 71 | unsigned long ppc_lost_interrupts[NR_MASK_WORDS]; |
| 72 | atomic_t ppc_n_lost_interrupts; |
| 73 | |
| 74 | #ifdef CONFIG_TAU_INT |
| 75 | extern int tau_initialized; |
| 76 | extern int tau_interrupts(int); |
| 77 | #endif |
| 78 | |
| 79 | int show_interrupts(struct seq_file *p, void *v) |
| 80 | { |
| 81 | int i = *(loff_t *) v, j; |
| 82 | struct irqaction * action; |
| 83 | unsigned long flags; |
| 84 | |
| 85 | if (i == 0) { |
| 86 | seq_puts(p, " "); |
| 87 | for (j=0; j<NR_CPUS; j++) |
| 88 | if (cpu_online(j)) |
| 89 | seq_printf(p, "CPU%d ", j); |
| 90 | seq_putc(p, '\n'); |
| 91 | } |
| 92 | |
| 93 | if (i < NR_IRQS) { |
| 94 | spin_lock_irqsave(&irq_desc[i].lock, flags); |
| 95 | action = irq_desc[i].action; |
| 96 | if ( !action || !action->handler ) |
| 97 | goto skip; |
| 98 | seq_printf(p, "%3d: ", i); |
| 99 | #ifdef CONFIG_SMP |
| 100 | for (j = 0; j < NR_CPUS; j++) |
| 101 | if (cpu_online(j)) |
| 102 | seq_printf(p, "%10u ", |
| 103 | kstat_cpu(j).irqs[i]); |
| 104 | #else |
| 105 | seq_printf(p, "%10u ", kstat_irqs(i)); |
| 106 | #endif /* CONFIG_SMP */ |
| 107 | if (irq_desc[i].handler) |
| 108 | seq_printf(p, " %s ", irq_desc[i].handler->typename); |
| 109 | else |
| 110 | seq_puts(p, " None "); |
| 111 | seq_printf(p, "%s", (irq_desc[i].status & IRQ_LEVEL) ? "Level " : "Edge "); |
| 112 | seq_printf(p, " %s", action->name); |
| 113 | for (action = action->next; action; action = action->next) |
| 114 | seq_printf(p, ", %s", action->name); |
| 115 | seq_putc(p, '\n'); |
| 116 | skip: |
| 117 | spin_unlock_irqrestore(&irq_desc[i].lock, flags); |
| 118 | } else if (i == NR_IRQS) { |
| 119 | #ifdef CONFIG_TAU_INT |
| 120 | if (tau_initialized){ |
| 121 | seq_puts(p, "TAU: "); |
| 122 | for (j = 0; j < NR_CPUS; j++) |
| 123 | if (cpu_online(j)) |
| 124 | seq_printf(p, "%10u ", tau_interrupts(j)); |
| 125 | seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n"); |
| 126 | } |
| 127 | #endif |
| 128 | #ifdef CONFIG_SMP |
| 129 | /* should this be per processor send/receive? */ |
| 130 | seq_printf(p, "IPI (recv/sent): %10u/%u\n", |
| 131 | atomic_read(&ipi_recv), atomic_read(&ipi_sent)); |
| 132 | #endif |
| 133 | seq_printf(p, "BAD: %10u\n", ppc_spurious_interrupts); |
| 134 | } |
| 135 | return 0; |
| 136 | } |
| 137 | |
| 138 | void do_IRQ(struct pt_regs *regs) |
| 139 | { |
| 140 | int irq, first = 1; |
| 141 | irq_enter(); |
| 142 | |
| 143 | /* |
| 144 | * Every platform is required to implement ppc_md.get_irq. |
| 145 | * This function will either return an irq number or -1 to |
| 146 | * indicate there are no more pending. But the first time |
| 147 | * through the loop this means there wasn't and IRQ pending. |
| 148 | * The value -2 is for buggy hardware and means that this IRQ |
| 149 | * has already been handled. -- Tom |
| 150 | */ |
| 151 | while ((irq = ppc_md.get_irq(regs)) >= 0) { |
| 152 | __do_IRQ(irq, regs); |
| 153 | first = 0; |
| 154 | } |
| 155 | if (irq != -2 && first) |
| 156 | /* That's not SMP safe ... but who cares ? */ |
| 157 | ppc_spurious_interrupts++; |
| 158 | irq_exit(); |
| 159 | } |
| 160 | |
| 161 | void __init init_IRQ(void) |
| 162 | { |
| 163 | ppc_md.init_IRQ(); |
| 164 | } |