| Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 1 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 |  * linux/arch/sh/kernel/irq.c | 
 | 3 |  * | 
 | 4 |  *	Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar | 
 | 5 |  * | 
 | 6 |  * | 
 | 7 |  * SuperH version:  Copyright (C) 1999  Niibe Yutaka | 
 | 8 |  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | #include <linux/irq.h> | 
| Paul Mundt | bf3a00f | 2006-01-16 22:14:14 -0800 | [diff] [blame] | 10 | #include <linux/interrupt.h> | 
| Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 11 | #include <linux/module.h> | 
| Paul Mundt | bf3a00f | 2006-01-16 22:14:14 -0800 | [diff] [blame] | 12 | #include <linux/kernel_stat.h> | 
 | 13 | #include <linux/seq_file.h> | 
| Paul Mundt | bf3a00f | 2006-01-16 22:14:14 -0800 | [diff] [blame] | 14 | #include <asm/processor.h> | 
| Paul Mundt | be782df | 2007-03-12 14:09:35 +0900 | [diff] [blame] | 15 | #include <asm/machvec.h> | 
| Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 16 | #include <asm/uaccess.h> | 
 | 17 | #include <asm/thread_info.h> | 
| Paul Mundt | f15cbe6 | 2008-07-29 08:09:44 +0900 | [diff] [blame] | 18 | #include <cpu/mmu_context.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 |  | 
| Paul Mundt | 35f3c51 | 2006-10-06 15:31:16 +0900 | [diff] [blame] | 20 | atomic_t irq_err_count; | 
 | 21 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | /* | 
 | 23 |  * 'what should we do if we get a hw irq event on an illegal vector'. | 
 | 24 |  * each architecture has to answer this themselves, it doesn't deserve | 
 | 25 |  * a generic callback i think. | 
 | 26 |  */ | 
 | 27 | void ack_bad_irq(unsigned int irq) | 
 | 28 | { | 
| Paul Mundt | baf4326 | 2006-10-12 12:03:04 +0900 | [diff] [blame] | 29 | 	atomic_inc(&irq_err_count); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | 	printk("unexpected IRQ trap at vector %02x\n", irq); | 
 | 31 | } | 
 | 32 |  | 
 | 33 | #if defined(CONFIG_PROC_FS) | 
| Paul Mundt | fa1d43a | 2009-05-22 01:26:16 +0900 | [diff] [blame] | 34 | /* | 
 | 35 |  * /proc/interrupts printing: | 
 | 36 |  */ | 
 | 37 | static int show_other_interrupts(struct seq_file *p, int prec) | 
 | 38 | { | 
 | 39 | 	seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count)); | 
 | 40 | 	return 0; | 
 | 41 | } | 
 | 42 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 43 | int show_interrupts(struct seq_file *p, void *v) | 
 | 44 | { | 
| Paul Mundt | fa1d43a | 2009-05-22 01:26:16 +0900 | [diff] [blame] | 45 | 	unsigned long flags, any_count = 0; | 
 | 46 | 	int i = *(loff_t *)v, j, prec; | 
 | 47 | 	struct irqaction *action; | 
 | 48 | 	struct irq_desc *desc; | 
 | 49 |  | 
 | 50 | 	if (i > nr_irqs) | 
 | 51 | 		return 0; | 
 | 52 |  | 
 | 53 | 	for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec) | 
 | 54 | 		j *= 10; | 
 | 55 |  | 
 | 56 | 	if (i == nr_irqs) | 
 | 57 | 		return show_other_interrupts(p, prec); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 58 |  | 
 | 59 | 	if (i == 0) { | 
| Paul Mundt | fa1d43a | 2009-05-22 01:26:16 +0900 | [diff] [blame] | 60 | 		seq_printf(p, "%*s", prec + 8, ""); | 
| Andrew Morton | 394e390 | 2006-03-23 03:01:05 -0800 | [diff] [blame] | 61 | 		for_each_online_cpu(j) | 
| Paul Mundt | fa1d43a | 2009-05-22 01:26:16 +0900 | [diff] [blame] | 62 | 			seq_printf(p, "CPU%-8d", j); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 63 | 		seq_putc(p, '\n'); | 
 | 64 | 	} | 
 | 65 |  | 
| Paul Mundt | fa1d43a | 2009-05-22 01:26:16 +0900 | [diff] [blame] | 66 | 	desc = irq_to_desc(i); | 
 | 67 | 	if (!desc) | 
 | 68 | 		return 0; | 
 | 69 |  | 
 | 70 | 	spin_lock_irqsave(&desc->lock, flags); | 
 | 71 | 	for_each_online_cpu(j) | 
 | 72 | 		any_count |= kstat_irqs_cpu(i, j); | 
 | 73 | 	action = desc->action; | 
 | 74 | 	if (!action && !any_count) | 
 | 75 | 		goto out; | 
 | 76 |  | 
 | 77 | 	seq_printf(p, "%*d: ", prec, i); | 
 | 78 | 	for_each_online_cpu(j) | 
 | 79 | 		seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); | 
 | 80 | 	seq_printf(p, " %14s", desc->chip->name); | 
 | 81 | 	seq_printf(p, "-%-8s", desc->name); | 
 | 82 |  | 
 | 83 | 	if (action) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 84 | 		seq_printf(p, "  %s", action->name); | 
| Paul Mundt | fa1d43a | 2009-05-22 01:26:16 +0900 | [diff] [blame] | 85 | 		while ((action = action->next) != NULL) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 86 | 			seq_printf(p, ", %s", action->name); | 
| Paul Mundt | fa1d43a | 2009-05-22 01:26:16 +0900 | [diff] [blame] | 87 | 	} | 
| Paul Mundt | 35f3c51 | 2006-10-06 15:31:16 +0900 | [diff] [blame] | 88 |  | 
| Paul Mundt | fa1d43a | 2009-05-22 01:26:16 +0900 | [diff] [blame] | 89 | 	seq_putc(p, '\n'); | 
 | 90 | out: | 
 | 91 | 	spin_unlock_irqrestore(&desc->lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 92 | 	return 0; | 
 | 93 | } | 
 | 94 | #endif | 
 | 95 |  | 
| Paul Mundt | 110ed28 | 2007-11-02 12:16:51 +0900 | [diff] [blame] | 96 | #ifdef CONFIG_IRQSTACKS | 
| Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 97 | /* | 
 | 98 |  * per-CPU IRQ handling contexts (thread information and stack) | 
 | 99 |  */ | 
 | 100 | union irq_ctx { | 
 | 101 | 	struct thread_info	tinfo; | 
 | 102 | 	u32			stack[THREAD_SIZE/sizeof(u32)]; | 
 | 103 | }; | 
 | 104 |  | 
| Paul Mundt | 1dc41e5 | 2006-11-24 19:46:18 +0900 | [diff] [blame] | 105 | static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly; | 
 | 106 | static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly; | 
| Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 107 | #endif | 
 | 108 |  | 
| Paul Mundt | 3afb209 | 2007-03-14 13:03:35 +0900 | [diff] [blame] | 109 | asmlinkage int do_IRQ(unsigned int irq, struct pt_regs *regs) | 
| Paul Mundt | bf3a00f | 2006-01-16 22:14:14 -0800 | [diff] [blame] | 110 | { | 
| Stuart Menefy | f0bc814 | 2006-11-21 11:16:57 +0900 | [diff] [blame] | 111 | 	struct pt_regs *old_regs = set_irq_regs(regs); | 
| Paul Mundt | 110ed28 | 2007-11-02 12:16:51 +0900 | [diff] [blame] | 112 | #ifdef CONFIG_IRQSTACKS | 
| Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 113 | 	union irq_ctx *curctx, *irqctx; | 
 | 114 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 115 |  | 
 | 116 | 	irq_enter(); | 
| Paul Mundt | bf3a00f | 2006-01-16 22:14:14 -0800 | [diff] [blame] | 117 |  | 
| Paul Mundt | d153ea8 | 2006-09-27 18:20:16 +0900 | [diff] [blame] | 118 | #ifdef CONFIG_DEBUG_STACKOVERFLOW | 
 | 119 | 	/* Debugging check for stack overflow: is there less than 1KB free? */ | 
 | 120 | 	{ | 
 | 121 | 		long sp; | 
 | 122 |  | 
 | 123 | 		__asm__ __volatile__ ("and r15, %0" : | 
 | 124 | 					"=r" (sp) : "0" (THREAD_SIZE - 1)); | 
 | 125 |  | 
 | 126 | 		if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) { | 
 | 127 | 			printk("do_IRQ: stack overflow: %ld\n", | 
 | 128 | 			       sp - sizeof(struct thread_info)); | 
 | 129 | 			dump_stack(); | 
 | 130 | 		} | 
 | 131 | 	} | 
 | 132 | #endif | 
 | 133 |  | 
| Magnus Damm | bdaa6e8 | 2009-02-24 22:58:57 +0900 | [diff] [blame] | 134 | 	irq = irq_demux(intc_evt2irq(irq)); | 
| Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 135 |  | 
| Paul Mundt | 110ed28 | 2007-11-02 12:16:51 +0900 | [diff] [blame] | 136 | #ifdef CONFIG_IRQSTACKS | 
| Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 137 | 	curctx = (union irq_ctx *)current_thread_info(); | 
 | 138 | 	irqctx = hardirq_ctx[smp_processor_id()]; | 
 | 139 |  | 
 | 140 | 	/* | 
 | 141 | 	 * this is where we switch to the IRQ stack. However, if we are | 
 | 142 | 	 * already using the IRQ stack (because we interrupted a hardirq | 
 | 143 | 	 * handler) we can't do that and just have to keep using the | 
 | 144 | 	 * current stack (which is the irq stack already after all) | 
 | 145 | 	 */ | 
 | 146 | 	if (curctx != irqctx) { | 
 | 147 | 		u32 *isp; | 
 | 148 |  | 
 | 149 | 		isp = (u32 *)((char *)irqctx + sizeof(*irqctx)); | 
 | 150 | 		irqctx->tinfo.task = curctx->tinfo.task; | 
 | 151 | 		irqctx->tinfo.previous_sp = current_stack_pointer; | 
 | 152 |  | 
| Paul Mundt | 1dc41e5 | 2006-11-24 19:46:18 +0900 | [diff] [blame] | 153 | 		/* | 
 | 154 | 		 * Copy the softirq bits in preempt_count so that the | 
 | 155 | 		 * softirq checks work in the hardirq context. | 
 | 156 | 		 */ | 
 | 157 | 		irqctx->tinfo.preempt_count = | 
 | 158 | 			(irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) | | 
 | 159 | 			(curctx->tinfo.preempt_count & SOFTIRQ_MASK); | 
 | 160 |  | 
| Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 161 | 		__asm__ __volatile__ ( | 
 | 162 | 			"mov	%0, r4		\n" | 
| Paul Mundt | 1dc41e5 | 2006-11-24 19:46:18 +0900 | [diff] [blame] | 163 | 			"mov	r15, r8		\n" | 
| Paul Mundt | baf4326 | 2006-10-12 12:03:04 +0900 | [diff] [blame] | 164 | 			"jsr	@%1		\n" | 
| Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 165 | 			/* swith to the irq stack */ | 
| Paul Mundt | baf4326 | 2006-10-12 12:03:04 +0900 | [diff] [blame] | 166 | 			" mov	%2, r15		\n" | 
| Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 167 | 			/* restore the stack (ring zero) */ | 
| Paul Mundt | 1dc41e5 | 2006-11-24 19:46:18 +0900 | [diff] [blame] | 168 | 			"mov	r8, r15		\n" | 
| Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 169 | 			: /* no outputs */ | 
| Paul Mundt | 35f3c51 | 2006-10-06 15:31:16 +0900 | [diff] [blame] | 170 | 			: "r" (irq), "r" (generic_handle_irq), "r" (isp) | 
| Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 171 | 			: "memory", "r0", "r1", "r2", "r3", "r4", | 
 | 172 | 			  "r5", "r6", "r7", "r8", "t", "pr" | 
 | 173 | 		); | 
 | 174 | 	} else | 
 | 175 | #endif | 
| Paul Mundt | 35f3c51 | 2006-10-06 15:31:16 +0900 | [diff] [blame] | 176 | 		generic_handle_irq(irq); | 
| Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 177 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 178 | 	irq_exit(); | 
| Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 179 |  | 
| Paul Mundt | 35f3c51 | 2006-10-06 15:31:16 +0900 | [diff] [blame] | 180 | 	set_irq_regs(old_regs); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 181 | 	return 1; | 
 | 182 | } | 
| Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 183 |  | 
| Paul Mundt | 110ed28 | 2007-11-02 12:16:51 +0900 | [diff] [blame] | 184 | #ifdef CONFIG_IRQSTACKS | 
| Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 185 | static char softirq_stack[NR_CPUS * THREAD_SIZE] | 
| Robert P. J. Day | bdf4fa5 | 2007-07-12 10:41:52 +0900 | [diff] [blame] | 186 | 		__attribute__((__section__(".bss.page_aligned"))); | 
| Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 187 |  | 
 | 188 | static char hardirq_stack[NR_CPUS * THREAD_SIZE] | 
| Robert P. J. Day | bdf4fa5 | 2007-07-12 10:41:52 +0900 | [diff] [blame] | 189 | 		__attribute__((__section__(".bss.page_aligned"))); | 
| Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 190 |  | 
 | 191 | /* | 
 | 192 |  * allocate per-cpu stacks for hardirq and for softirq processing | 
 | 193 |  */ | 
 | 194 | void irq_ctx_init(int cpu) | 
 | 195 | { | 
 | 196 | 	union irq_ctx *irqctx; | 
 | 197 |  | 
 | 198 | 	if (hardirq_ctx[cpu]) | 
 | 199 | 		return; | 
 | 200 |  | 
 | 201 | 	irqctx = (union irq_ctx *)&hardirq_stack[cpu * THREAD_SIZE]; | 
 | 202 | 	irqctx->tinfo.task		= NULL; | 
 | 203 | 	irqctx->tinfo.exec_domain	= NULL; | 
 | 204 | 	irqctx->tinfo.cpu		= cpu; | 
 | 205 | 	irqctx->tinfo.preempt_count	= HARDIRQ_OFFSET; | 
 | 206 | 	irqctx->tinfo.addr_limit	= MAKE_MM_SEG(0); | 
 | 207 |  | 
 | 208 | 	hardirq_ctx[cpu] = irqctx; | 
 | 209 |  | 
 | 210 | 	irqctx = (union irq_ctx *)&softirq_stack[cpu * THREAD_SIZE]; | 
 | 211 | 	irqctx->tinfo.task		= NULL; | 
 | 212 | 	irqctx->tinfo.exec_domain	= NULL; | 
 | 213 | 	irqctx->tinfo.cpu		= cpu; | 
| Paul Mundt | 1dc41e5 | 2006-11-24 19:46:18 +0900 | [diff] [blame] | 214 | 	irqctx->tinfo.preempt_count	= 0; | 
| Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 215 | 	irqctx->tinfo.addr_limit	= MAKE_MM_SEG(0); | 
 | 216 |  | 
 | 217 | 	softirq_ctx[cpu] = irqctx; | 
 | 218 |  | 
 | 219 | 	printk("CPU %u irqstacks, hard=%p soft=%p\n", | 
 | 220 | 		cpu, hardirq_ctx[cpu], softirq_ctx[cpu]); | 
 | 221 | } | 
 | 222 |  | 
 | 223 | void irq_ctx_exit(int cpu) | 
 | 224 | { | 
 | 225 | 	hardirq_ctx[cpu] = NULL; | 
 | 226 | } | 
 | 227 |  | 
| Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 228 | asmlinkage void do_softirq(void) | 
 | 229 | { | 
 | 230 | 	unsigned long flags; | 
 | 231 | 	struct thread_info *curctx; | 
 | 232 | 	union irq_ctx *irqctx; | 
 | 233 | 	u32 *isp; | 
 | 234 |  | 
 | 235 | 	if (in_interrupt()) | 
 | 236 | 		return; | 
 | 237 |  | 
 | 238 | 	local_irq_save(flags); | 
 | 239 |  | 
 | 240 | 	if (local_softirq_pending()) { | 
 | 241 | 		curctx = current_thread_info(); | 
 | 242 | 		irqctx = softirq_ctx[smp_processor_id()]; | 
 | 243 | 		irqctx->tinfo.task = curctx->task; | 
 | 244 | 		irqctx->tinfo.previous_sp = current_stack_pointer; | 
 | 245 |  | 
 | 246 | 		/* build the stack frame on the softirq stack */ | 
 | 247 | 		isp = (u32 *)((char *)irqctx + sizeof(*irqctx)); | 
 | 248 |  | 
 | 249 | 		__asm__ __volatile__ ( | 
 | 250 | 			"mov	r15, r9		\n" | 
 | 251 | 			"jsr	@%0		\n" | 
 | 252 | 			/* switch to the softirq stack */ | 
 | 253 | 			" mov	%1, r15		\n" | 
 | 254 | 			/* restore the thread stack */ | 
 | 255 | 			"mov	r9, r15		\n" | 
 | 256 | 			: /* no outputs */ | 
 | 257 | 			: "r" (__do_softirq), "r" (isp) | 
| Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 258 | 			: "memory", "r0", "r1", "r2", "r3", "r4", | 
 | 259 | 			  "r5", "r6", "r7", "r8", "r9", "r15", "t", "pr" | 
 | 260 | 		); | 
| Paul Mundt | 1dc41e5 | 2006-11-24 19:46:18 +0900 | [diff] [blame] | 261 |  | 
 | 262 | 		/* | 
 | 263 | 		 * Shouldnt happen, we returned above if in_interrupt(): | 
 | 264 | 		 */ | 
 | 265 | 		WARN_ON_ONCE(softirq_count()); | 
| Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 266 | 	} | 
 | 267 |  | 
 | 268 | 	local_irq_restore(flags); | 
 | 269 | } | 
| Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 270 | #endif | 
| Jamie Lenehan | ea0f8fe | 2006-12-06 12:05:02 +0900 | [diff] [blame] | 271 |  | 
 | 272 | void __init init_IRQ(void) | 
 | 273 | { | 
| Magnus Damm | 90015c8 | 2007-07-18 17:57:34 +0900 | [diff] [blame] | 274 | 	plat_irq_setup(); | 
| Jamie Lenehan | ea0f8fe | 2006-12-06 12:05:02 +0900 | [diff] [blame] | 275 |  | 
 | 276 | 	/* Perform the machine specific initialisation */ | 
 | 277 | 	if (sh_mv.mv_init_irq) | 
 | 278 | 		sh_mv.mv_init_irq(); | 
 | 279 |  | 
 | 280 | 	irq_ctx_init(smp_processor_id()); | 
 | 281 | } | 
| Paul Mundt | d8586ba | 2009-05-22 01:36:13 +0900 | [diff] [blame] | 282 |  | 
 | 283 | #ifdef CONFIG_SPARSE_IRQ | 
 | 284 | int __init arch_probe_nr_irqs(void) | 
 | 285 | { | 
 | 286 | 	nr_irqs = sh_mv.mv_nr_irqs; | 
 | 287 | 	return 0; | 
 | 288 | } | 
 | 289 | #endif |