| David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 1 | /* MN10300 Arch-specific interrupt handling | 
 | 2 |  * | 
 | 3 |  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. | 
 | 4 |  * Written by David Howells (dhowells@redhat.com) | 
 | 5 |  * | 
 | 6 |  * This program is free software; you can redistribute it and/or | 
 | 7 |  * modify it under the terms of the GNU General Public Licence | 
 | 8 |  * as published by the Free Software Foundation; either version | 
 | 9 |  * 2 of the Licence, or (at your option) any later version. | 
 | 10 |  */ | 
 | 11 | #include <linux/module.h> | 
 | 12 | #include <linux/interrupt.h> | 
 | 13 | #include <linux/kernel_stat.h> | 
 | 14 | #include <linux/seq_file.h> | 
| Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 15 | #include <linux/cpumask.h> | 
| David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 16 | #include <asm/setup.h> | 
| Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 17 | #include <asm/serial-regs.h> | 
| David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 18 |  | 
| Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 19 | unsigned long __mn10300_irq_enabled_epsw[NR_CPUS] __cacheline_aligned_in_smp = { | 
 | 20 | 	[0 ... NR_CPUS - 1] = EPSW_IE | EPSW_IM_7 | 
 | 21 | }; | 
| David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 22 | EXPORT_SYMBOL(__mn10300_irq_enabled_epsw); | 
 | 23 |  | 
| Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 24 | #ifdef CONFIG_SMP | 
 | 25 | static char irq_affinity_online[NR_IRQS] = { | 
 | 26 | 	[0 ... NR_IRQS - 1] = 0 | 
 | 27 | }; | 
 | 28 |  | 
 | 29 | #define NR_IRQ_WORDS	((NR_IRQS + 31) / 32) | 
 | 30 | static unsigned long irq_affinity_request[NR_IRQ_WORDS] = { | 
 | 31 | 	[0 ... NR_IRQ_WORDS - 1] = 0 | 
 | 32 | }; | 
 | 33 | #endif  /* CONFIG_SMP */ | 
 | 34 |  | 
| David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 35 | atomic_t irq_err_count; | 
 | 36 |  | 
 | 37 | /* | 
| David Howells | d6478fa | 2008-10-01 13:47:06 +0100 | [diff] [blame] | 38 |  * MN10300 interrupt controller operations | 
| David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 39 |  */ | 
| Thomas Gleixner | 125bb1d | 2011-03-18 16:52:51 +0000 | [diff] [blame] | 40 | static void mn10300_cpupic_ack(struct irq_data *d) | 
| David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 41 | { | 
| Thomas Gleixner | 125bb1d | 2011-03-18 16:52:51 +0000 | [diff] [blame] | 42 | 	unsigned int irq = d->irq; | 
| Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 43 | 	unsigned long flags; | 
| David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 44 | 	u16 tmp; | 
| Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 45 |  | 
 | 46 | 	flags = arch_local_cli_save(); | 
 | 47 | 	GxICR_u8(irq) = GxICR_DETECT; | 
| David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 48 | 	tmp = GxICR(irq); | 
| Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 49 | 	arch_local_irq_restore(flags); | 
 | 50 | } | 
 | 51 |  | 
 | 52 | static void __mask_and_set_icr(unsigned int irq, | 
 | 53 | 			       unsigned int mask, unsigned int set) | 
 | 54 | { | 
 | 55 | 	unsigned long flags; | 
 | 56 | 	u16 tmp; | 
 | 57 |  | 
 | 58 | 	flags = arch_local_cli_save(); | 
 | 59 | 	tmp = GxICR(irq); | 
 | 60 | 	GxICR(irq) = (tmp & mask) | set; | 
 | 61 | 	tmp = GxICR(irq); | 
 | 62 | 	arch_local_irq_restore(flags); | 
| David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 63 | } | 
 | 64 |  | 
| Thomas Gleixner | 125bb1d | 2011-03-18 16:52:51 +0000 | [diff] [blame] | 65 | static void mn10300_cpupic_mask(struct irq_data *d) | 
| David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 66 | { | 
| Thomas Gleixner | 125bb1d | 2011-03-18 16:52:51 +0000 | [diff] [blame] | 67 | 	__mask_and_set_icr(d->irq, GxICR_LEVEL, 0); | 
| David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 68 | } | 
 | 69 |  | 
| Thomas Gleixner | 125bb1d | 2011-03-18 16:52:51 +0000 | [diff] [blame] | 70 | static void mn10300_cpupic_mask_ack(struct irq_data *d) | 
| David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 71 | { | 
| Thomas Gleixner | 125bb1d | 2011-03-18 16:52:51 +0000 | [diff] [blame] | 72 | 	unsigned int irq = d->irq; | 
| Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 73 | #ifdef CONFIG_SMP | 
 | 74 | 	unsigned long flags; | 
 | 75 | 	u16 tmp; | 
 | 76 |  | 
 | 77 | 	flags = arch_local_cli_save(); | 
 | 78 |  | 
 | 79 | 	if (!test_and_clear_bit(irq, irq_affinity_request)) { | 
 | 80 | 		tmp = GxICR(irq); | 
 | 81 | 		GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_DETECT; | 
 | 82 | 		tmp = GxICR(irq); | 
 | 83 | 	} else { | 
 | 84 | 		u16 tmp2; | 
 | 85 | 		tmp = GxICR(irq); | 
 | 86 | 		GxICR(irq) = (tmp & GxICR_LEVEL); | 
 | 87 | 		tmp2 = GxICR(irq); | 
 | 88 |  | 
| Mark Salter | 730c1fa | 2010-10-27 17:28:57 +0100 | [diff] [blame] | 89 | 		irq_affinity_online[irq] = | 
| Thomas Gleixner | 125bb1d | 2011-03-18 16:52:51 +0000 | [diff] [blame] | 90 | 			any_online_cpu(*d->affinity); | 
| Mark Salter | 730c1fa | 2010-10-27 17:28:57 +0100 | [diff] [blame] | 91 | 		CROSS_GxICR(irq, irq_affinity_online[irq]) = | 
 | 92 | 			(tmp & (GxICR_LEVEL | GxICR_ENABLE)) | GxICR_DETECT; | 
 | 93 | 		tmp = CROSS_GxICR(irq, irq_affinity_online[irq]); | 
| Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 94 | 	} | 
 | 95 |  | 
 | 96 | 	arch_local_irq_restore(flags); | 
 | 97 | #else  /* CONFIG_SMP */ | 
 | 98 | 	__mask_and_set_icr(irq, GxICR_LEVEL, GxICR_DETECT); | 
 | 99 | #endif /* CONFIG_SMP */ | 
| David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 100 | } | 
 | 101 |  | 
| Thomas Gleixner | 125bb1d | 2011-03-18 16:52:51 +0000 | [diff] [blame] | 102 | static void mn10300_cpupic_unmask(struct irq_data *d) | 
| David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 103 | { | 
| Thomas Gleixner | 125bb1d | 2011-03-18 16:52:51 +0000 | [diff] [blame] | 104 | 	__mask_and_set_icr(d->irq, GxICR_LEVEL, GxICR_ENABLE); | 
| David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 105 | } | 
 | 106 |  | 
| Thomas Gleixner | 125bb1d | 2011-03-18 16:52:51 +0000 | [diff] [blame] | 107 | static void mn10300_cpupic_unmask_clear(struct irq_data *d) | 
| David Howells | d6478fa | 2008-10-01 13:47:06 +0100 | [diff] [blame] | 108 | { | 
| Thomas Gleixner | 125bb1d | 2011-03-18 16:52:51 +0000 | [diff] [blame] | 109 | 	unsigned int irq = d->irq; | 
| David Howells | d6478fa | 2008-10-01 13:47:06 +0100 | [diff] [blame] | 110 | 	/* the MN10300 PIC latches its interrupt request bit, even after the | 
 | 111 | 	 * device has ceased to assert its interrupt line and the interrupt | 
 | 112 | 	 * channel has been disabled in the PIC, so for level-triggered | 
 | 113 | 	 * interrupts we need to clear the request bit when we re-enable */ | 
| Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 114 | #ifdef CONFIG_SMP | 
 | 115 | 	unsigned long flags; | 
 | 116 | 	u16 tmp; | 
 | 117 |  | 
 | 118 | 	flags = arch_local_cli_save(); | 
 | 119 |  | 
 | 120 | 	if (!test_and_clear_bit(irq, irq_affinity_request)) { | 
 | 121 | 		tmp = GxICR(irq); | 
 | 122 | 		GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE | GxICR_DETECT; | 
 | 123 | 		tmp = GxICR(irq); | 
 | 124 | 	} else { | 
 | 125 | 		tmp = GxICR(irq); | 
 | 126 |  | 
| Thomas Gleixner | 125bb1d | 2011-03-18 16:52:51 +0000 | [diff] [blame] | 127 | 		irq_affinity_online[irq] = any_online_cpu(*d->affinity); | 
| Mark Salter | 730c1fa | 2010-10-27 17:28:57 +0100 | [diff] [blame] | 128 | 		CROSS_GxICR(irq, irq_affinity_online[irq]) = (tmp & GxICR_LEVEL) | GxICR_ENABLE | GxICR_DETECT; | 
 | 129 | 		tmp = CROSS_GxICR(irq, irq_affinity_online[irq]); | 
| Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 130 | 	} | 
 | 131 |  | 
 | 132 | 	arch_local_irq_restore(flags); | 
 | 133 | #else  /* CONFIG_SMP */ | 
 | 134 | 	__mask_and_set_icr(irq, GxICR_LEVEL, GxICR_ENABLE | GxICR_DETECT); | 
 | 135 | #endif /* CONFIG_SMP */ | 
| David Howells | d6478fa | 2008-10-01 13:47:06 +0100 | [diff] [blame] | 136 | } | 
 | 137 |  | 
| Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 138 | #ifdef CONFIG_SMP | 
 | 139 | static int | 
| Thomas Gleixner | 125bb1d | 2011-03-18 16:52:51 +0000 | [diff] [blame] | 140 | mn10300_cpupic_setaffinity(struct irq_data *d, const struct cpumask *mask, | 
 | 141 | 			   bool force) | 
| Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 142 | { | 
 | 143 | 	unsigned long flags; | 
 | 144 | 	int err; | 
 | 145 |  | 
 | 146 | 	flags = arch_local_cli_save(); | 
 | 147 |  | 
 | 148 | 	/* check irq no */ | 
| Thomas Gleixner | 125bb1d | 2011-03-18 16:52:51 +0000 | [diff] [blame] | 149 | 	switch (d->irq) { | 
| Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 150 | 	case TMJCIRQ: | 
 | 151 | 	case RESCHEDULE_IPI: | 
 | 152 | 	case CALL_FUNC_SINGLE_IPI: | 
 | 153 | 	case LOCAL_TIMER_IPI: | 
 | 154 | 	case FLUSH_CACHE_IPI: | 
 | 155 | 	case CALL_FUNCTION_NMI_IPI: | 
| David Howells | 67ddb40 | 2011-03-18 16:54:30 +0000 | [diff] [blame] | 156 | 	case DEBUGGER_NMI_IPI: | 
| Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 157 | #ifdef CONFIG_MN10300_TTYSM0 | 
 | 158 | 	case SC0RXIRQ: | 
 | 159 | 	case SC0TXIRQ: | 
 | 160 | #ifdef CONFIG_MN10300_TTYSM0_TIMER8 | 
 | 161 | 	case TM8IRQ: | 
 | 162 | #elif CONFIG_MN10300_TTYSM0_TIMER2 | 
 | 163 | 	case TM2IRQ: | 
 | 164 | #endif /* CONFIG_MN10300_TTYSM0_TIMER8 */ | 
 | 165 | #endif /* CONFIG_MN10300_TTYSM0 */ | 
 | 166 |  | 
 | 167 | #ifdef CONFIG_MN10300_TTYSM1 | 
 | 168 | 	case SC1RXIRQ: | 
 | 169 | 	case SC1TXIRQ: | 
 | 170 | #ifdef CONFIG_MN10300_TTYSM1_TIMER12 | 
 | 171 | 	case TM12IRQ: | 
 | 172 | #elif CONFIG_MN10300_TTYSM1_TIMER9 | 
 | 173 | 	case TM9IRQ: | 
 | 174 | #elif CONFIG_MN10300_TTYSM1_TIMER3 | 
 | 175 | 	case TM3IRQ: | 
 | 176 | #endif /* CONFIG_MN10300_TTYSM1_TIMER12 */ | 
 | 177 | #endif /* CONFIG_MN10300_TTYSM1 */ | 
 | 178 |  | 
 | 179 | #ifdef CONFIG_MN10300_TTYSM2 | 
 | 180 | 	case SC2RXIRQ: | 
 | 181 | 	case SC2TXIRQ: | 
 | 182 | 	case TM10IRQ: | 
 | 183 | #endif /* CONFIG_MN10300_TTYSM2 */ | 
 | 184 | 		err = -1; | 
 | 185 | 		break; | 
 | 186 |  | 
 | 187 | 	default: | 
| Thomas Gleixner | 125bb1d | 2011-03-18 16:52:51 +0000 | [diff] [blame] | 188 | 		set_bit(d->irq, irq_affinity_request); | 
| Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 189 | 		err = 0; | 
 | 190 | 		break; | 
 | 191 | 	} | 
 | 192 |  | 
 | 193 | 	arch_local_irq_restore(flags); | 
 | 194 | 	return err; | 
 | 195 | } | 
 | 196 | #endif /* CONFIG_SMP */ | 
 | 197 |  | 
| David Howells | d6478fa | 2008-10-01 13:47:06 +0100 | [diff] [blame] | 198 | /* | 
 | 199 |  * MN10300 PIC level-triggered IRQ handling. | 
 | 200 |  * | 
 | 201 |  * The PIC has no 'ACK' function per se.  It is possible to clear individual | 
 | 202 |  * channel latches, but each latch relatches whether or not the channel is | 
 | 203 |  * masked, so we need to clear the latch when we unmask the channel. | 
 | 204 |  * | 
 | 205 |  * Also for this reason, we don't supply an ack() op (it's unused anyway if | 
 | 206 |  * mask_ack() is provided), and mask_ack() just masks. | 
 | 207 |  */ | 
 | 208 | static struct irq_chip mn10300_cpu_pic_level = { | 
| Thomas Gleixner | 125bb1d | 2011-03-18 16:52:51 +0000 | [diff] [blame] | 209 | 	.name			= "cpu_l", | 
 | 210 | 	.irq_disable		= mn10300_cpupic_mask, | 
 | 211 | 	.irq_enable		= mn10300_cpupic_unmask_clear, | 
 | 212 | 	.irq_ack		= NULL, | 
 | 213 | 	.irq_mask		= mn10300_cpupic_mask, | 
 | 214 | 	.irq_mask_ack		= mn10300_cpupic_mask, | 
 | 215 | 	.irq_unmask		= mn10300_cpupic_unmask_clear, | 
| Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 216 | #ifdef CONFIG_SMP | 
| Thomas Gleixner | 125bb1d | 2011-03-18 16:52:51 +0000 | [diff] [blame] | 217 | 	.irq_set_affinity	= mn10300_cpupic_setaffinity, | 
| Mark Salter | 730c1fa | 2010-10-27 17:28:57 +0100 | [diff] [blame] | 218 | #endif | 
| David Howells | d6478fa | 2008-10-01 13:47:06 +0100 | [diff] [blame] | 219 | }; | 
 | 220 |  | 
 | 221 | /* | 
 | 222 |  * MN10300 PIC edge-triggered IRQ handling. | 
 | 223 |  * | 
 | 224 |  * We use the latch clearing function of the PIC as the 'ACK' function. | 
 | 225 |  */ | 
 | 226 | static struct irq_chip mn10300_cpu_pic_edge = { | 
| Thomas Gleixner | 125bb1d | 2011-03-18 16:52:51 +0000 | [diff] [blame] | 227 | 	.name			= "cpu_e", | 
 | 228 | 	.irq_disable		= mn10300_cpupic_mask, | 
 | 229 | 	.irq_enable		= mn10300_cpupic_unmask, | 
 | 230 | 	.irq_ack		= mn10300_cpupic_ack, | 
 | 231 | 	.irq_mask		= mn10300_cpupic_mask, | 
 | 232 | 	.irq_mask_ack		= mn10300_cpupic_mask_ack, | 
 | 233 | 	.irq_unmask		= mn10300_cpupic_unmask, | 
| Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 234 | #ifdef CONFIG_SMP | 
| Thomas Gleixner | 125bb1d | 2011-03-18 16:52:51 +0000 | [diff] [blame] | 235 | 	.irq_set_affinity	= mn10300_cpupic_setaffinity, | 
| Mark Salter | 730c1fa | 2010-10-27 17:28:57 +0100 | [diff] [blame] | 236 | #endif | 
| David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 237 | }; | 
 | 238 |  | 
 | 239 | /* | 
 | 240 |  * 'what should we do if we get a hw irq event on an illegal vector'. | 
 | 241 |  * each architecture has to answer this themselves. | 
 | 242 |  */ | 
 | 243 | void ack_bad_irq(int irq) | 
 | 244 | { | 
 | 245 | 	printk(KERN_WARNING "unexpected IRQ trap at vector %02x\n", irq); | 
 | 246 | } | 
 | 247 |  | 
 | 248 | /* | 
 | 249 |  * change the level at which an IRQ executes | 
 | 250 |  * - must not be called whilst interrupts are being processed! | 
 | 251 |  */ | 
 | 252 | void set_intr_level(int irq, u16 level) | 
 | 253 | { | 
| Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 254 | 	BUG_ON(in_interrupt()); | 
| David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 255 |  | 
| Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 256 | 	__mask_and_set_icr(irq, GxICR_ENABLE, level); | 
 | 257 | } | 
| David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 258 |  | 
| David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 259 | /* | 
 | 260 |  * mark an interrupt to be ACK'd after interrupt handlers have been run rather | 
 | 261 |  * than before | 
 | 262 |  * - see Documentation/mn10300/features.txt | 
 | 263 |  */ | 
| Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 264 | void mn10300_set_lateack_irq_type(int irq) | 
| David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 265 | { | 
| Thomas Gleixner | f4c547e | 2011-03-24 17:35:56 +0100 | [diff] [blame] | 266 | 	irq_set_chip_and_handler(irq, &mn10300_cpu_pic_level, | 
| David Howells | d6478fa | 2008-10-01 13:47:06 +0100 | [diff] [blame] | 267 | 				 handle_level_irq); | 
| David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 268 | } | 
 | 269 |  | 
 | 270 | /* | 
 | 271 |  * initialise the interrupt system | 
 | 272 |  */ | 
 | 273 | void __init init_IRQ(void) | 
 | 274 | { | 
 | 275 | 	int irq; | 
 | 276 |  | 
 | 277 | 	for (irq = 0; irq < NR_IRQS; irq++) | 
| Thomas Gleixner | f4c547e | 2011-03-24 17:35:56 +0100 | [diff] [blame] | 278 | 		if (irq_get_chip(irq) == &no_irq_chip) | 
| David Howells | d6478fa | 2008-10-01 13:47:06 +0100 | [diff] [blame] | 279 | 			/* due to the PIC latching interrupt requests, even | 
 | 280 | 			 * when the IRQ is disabled, IRQ_PENDING is superfluous | 
 | 281 | 			 * and we can use handle_level_irq() for edge-triggered | 
 | 282 | 			 * interrupts */ | 
| Thomas Gleixner | f4c547e | 2011-03-24 17:35:56 +0100 | [diff] [blame] | 283 | 			irq_set_chip_and_handler(irq, &mn10300_cpu_pic_edge, | 
| David Howells | d6478fa | 2008-10-01 13:47:06 +0100 | [diff] [blame] | 284 | 						 handle_level_irq); | 
| Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 285 |  | 
| David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 286 | 	unit_init_IRQ(); | 
 | 287 | } | 
 | 288 |  | 
 | 289 | /* | 
 | 290 |  * handle normal device IRQs | 
 | 291 |  */ | 
 | 292 | asmlinkage void do_IRQ(void) | 
 | 293 | { | 
 | 294 | 	unsigned long sp, epsw, irq_disabled_epsw, old_irq_enabled_epsw; | 
| Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 295 | 	unsigned int cpu_id = smp_processor_id(); | 
| David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 296 | 	int irq; | 
 | 297 |  | 
 | 298 | 	sp = current_stack_pointer(); | 
| Stoyan Gaydarov | 292aa14 | 2010-10-27 17:28:33 +0100 | [diff] [blame] | 299 | 	BUG_ON(sp - (sp & ~(THREAD_SIZE - 1)) < STACK_WARN); | 
| David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 300 |  | 
 | 301 | 	/* make sure local_irq_enable() doesn't muck up the interrupt priority | 
 | 302 | 	 * setting in EPSW */ | 
| Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 303 | 	old_irq_enabled_epsw = __mn10300_irq_enabled_epsw[cpu_id]; | 
| David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 304 | 	local_save_flags(epsw); | 
| Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 305 | 	__mn10300_irq_enabled_epsw[cpu_id] = EPSW_IE | (EPSW_IM & epsw); | 
| David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 306 | 	irq_disabled_epsw = EPSW_IE | MN10300_CLI_LEVEL; | 
 | 307 |  | 
| Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 308 | #ifdef CONFIG_MN10300_WD_TIMER | 
 | 309 | 	__IRQ_STAT(cpu_id, __irq_count)++; | 
 | 310 | #endif | 
| David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 311 |  | 
 | 312 | 	irq_enter(); | 
 | 313 |  | 
 | 314 | 	for (;;) { | 
 | 315 | 		/* ask the interrupt controller for the next IRQ to process | 
 | 316 | 		 * - the result we get depends on EPSW.IM | 
 | 317 | 		 */ | 
 | 318 | 		irq = IAGR & IAGR_GN; | 
 | 319 | 		if (!irq) | 
 | 320 | 			break; | 
 | 321 |  | 
 | 322 | 		local_irq_restore(irq_disabled_epsw); | 
 | 323 |  | 
 | 324 | 		generic_handle_irq(irq >> 2); | 
 | 325 |  | 
 | 326 | 		/* restore IRQ controls for IAGR access */ | 
 | 327 | 		local_irq_restore(epsw); | 
 | 328 | 	} | 
 | 329 |  | 
| Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 330 | 	__mn10300_irq_enabled_epsw[cpu_id] = old_irq_enabled_epsw; | 
| David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 331 |  | 
 | 332 | 	irq_exit(); | 
 | 333 | } | 
 | 334 |  | 
 | 335 | /* | 
 | 336 |  * Display interrupt management information through /proc/interrupts | 
 | 337 |  */ | 
| Thomas Gleixner | 2a8f55b | 2011-03-24 18:54:24 +0100 | [diff] [blame] | 338 | int arch_show_interrupts(struct seq_file *p, int prec) | 
| David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 339 | { | 
| Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 340 | #ifdef CONFIG_MN10300_WD_TIMER | 
| Thomas Gleixner | 2a8f55b | 2011-03-24 18:54:24 +0100 | [diff] [blame] | 341 | 	int j; | 
 | 342 |  | 
 | 343 | 	seq_printf(p, "%*s: ", prec, "NMI"); | 
 | 344 | 	for (j = 0; j < NR_CPUS; j++) | 
 | 345 | 		if (cpu_online(j)) | 
 | 346 | 			seq_printf(p, "%10u ", nmi_count(j)); | 
 | 347 | 	seq_putc(p, '\n'); | 
| Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 348 | #endif | 
| David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 349 |  | 
| Thomas Gleixner | 2a8f55b | 2011-03-24 18:54:24 +0100 | [diff] [blame] | 350 | 	seq_printf(p, "%*s: ", prec, "ERR"); | 
 | 351 | 	seq_printf(p, "%10u\n", atomic_read(&irq_err_count)); | 
| David Howells | b920de1 | 2008-02-08 04:19:31 -0800 | [diff] [blame] | 352 | 	return 0; | 
 | 353 | } | 
| Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 354 |  | 
 | 355 | #ifdef CONFIG_HOTPLUG_CPU | 
 | 356 | void migrate_irqs(void) | 
 | 357 | { | 
| Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 358 | 	int irq; | 
 | 359 | 	unsigned int self, new; | 
 | 360 | 	unsigned long flags; | 
 | 361 |  | 
 | 362 | 	self = smp_processor_id(); | 
 | 363 | 	for (irq = 0; irq < NR_IRQS; irq++) { | 
| Thomas Gleixner | 232f1d8 | 2011-03-24 17:36:37 +0100 | [diff] [blame] | 364 | 		struct irq_data *data = irq_get_irq_data(irq); | 
| Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 365 |  | 
| Thomas Gleixner | 232f1d8 | 2011-03-24 17:36:37 +0100 | [diff] [blame] | 366 | 		if (irqd_is_per_cpu(data)) | 
| Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 367 | 			continue; | 
 | 368 |  | 
| Thomas Gleixner | 232f1d8 | 2011-03-24 17:36:37 +0100 | [diff] [blame] | 369 | 		if (cpu_isset(self, data->affinity) && | 
| Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 370 | 		    !cpus_intersects(irq_affinity[irq], cpu_online_map)) { | 
 | 371 | 			int cpu_id; | 
 | 372 | 			cpu_id = first_cpu(cpu_online_map); | 
| Thomas Gleixner | 232f1d8 | 2011-03-24 17:36:37 +0100 | [diff] [blame] | 373 | 			cpu_set(cpu_id, data->affinity); | 
| Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 374 | 		} | 
 | 375 | 		/* We need to operate irq_affinity_online atomically. */ | 
 | 376 | 		arch_local_cli_save(flags); | 
 | 377 | 		if (irq_affinity_online[irq] == self) { | 
 | 378 | 			u16 x, tmp; | 
 | 379 |  | 
| Mark Salter | 730c1fa | 2010-10-27 17:28:57 +0100 | [diff] [blame] | 380 | 			x = GxICR(irq); | 
 | 381 | 			GxICR(irq) = x & GxICR_LEVEL; | 
 | 382 | 			tmp = GxICR(irq); | 
| Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 383 |  | 
| Thomas Gleixner | 232f1d8 | 2011-03-24 17:36:37 +0100 | [diff] [blame] | 384 | 			new = any_online_cpu(data->affinity); | 
| Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 385 | 			irq_affinity_online[irq] = new; | 
 | 386 |  | 
 | 387 | 			CROSS_GxICR(irq, new) = | 
 | 388 | 				(x & GxICR_LEVEL) | GxICR_DETECT; | 
 | 389 | 			tmp = CROSS_GxICR(irq, new); | 
 | 390 |  | 
 | 391 | 			x &= GxICR_LEVEL | GxICR_ENABLE; | 
| Andrew Morton | d9a1abe | 2011-01-03 14:59:11 -0800 | [diff] [blame] | 392 | 			if (GxICR(irq) & GxICR_REQUEST) | 
| Akira Takeuchi | 368dd5a | 2010-10-27 17:28:55 +0100 | [diff] [blame] | 393 | 				x |= GxICR_REQUEST | GxICR_DETECT; | 
 | 394 | 			CROSS_GxICR(irq, new) = x; | 
 | 395 | 			tmp = CROSS_GxICR(irq, new); | 
 | 396 | 		} | 
 | 397 | 		arch_local_irq_restore(flags); | 
 | 398 | 	} | 
 | 399 | } | 
 | 400 | #endif /* CONFIG_HOTPLUG_CPU */ |