| Andrew Isaacson | f137e46 | 2005-10-19 23:56:38 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  * Copyright (C) 2000,2001,2002,2003,2004 Broadcom Corporation | 
 | 3 |  * | 
 | 4 |  * This program is free software; you can redistribute it and/or | 
 | 5 |  * modify it under the terms of the GNU General Public License | 
 | 6 |  * as published by the Free Software Foundation; either version 2 | 
 | 7 |  * of the License, or (at your option) any later version. | 
 | 8 |  * | 
 | 9 |  * This program is distributed in the hope that it will be useful, | 
 | 10 |  * but WITHOUT ANY WARRANTY; without even the implied warranty of | 
 | 11 |  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 
 | 12 |  * GNU General Public License for more details. | 
 | 13 |  * | 
 | 14 |  * You should have received a copy of the GNU General Public License | 
 | 15 |  * along with this program; if not, write to the Free Software | 
 | 16 |  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA. | 
 | 17 |  */ | 
| Andrew Isaacson | f137e46 | 2005-10-19 23:56:38 -0700 | [diff] [blame] | 18 | #include <linux/kernel.h> | 
 | 19 | #include <linux/init.h> | 
 | 20 | #include <linux/linkage.h> | 
 | 21 | #include <linux/interrupt.h> | 
| Ralf Baechle | 631330f | 2009-06-19 14:05:26 +0100 | [diff] [blame] | 22 | #include <linux/smp.h> | 
| Andrew Isaacson | f137e46 | 2005-10-19 23:56:38 -0700 | [diff] [blame] | 23 | #include <linux/spinlock.h> | 
 | 24 | #include <linux/mm.h> | 
| Andrew Isaacson | f137e46 | 2005-10-19 23:56:38 -0700 | [diff] [blame] | 25 | #include <linux/kernel_stat.h> | 
 | 26 |  | 
 | 27 | #include <asm/errno.h> | 
| Ralf Baechle | 937a801 | 2006-10-07 19:44:33 +0100 | [diff] [blame] | 28 | #include <asm/irq_regs.h> | 
| Andrew Isaacson | f137e46 | 2005-10-19 23:56:38 -0700 | [diff] [blame] | 29 | #include <asm/signal.h> | 
 | 30 | #include <asm/system.h> | 
| Andrew Isaacson | f137e46 | 2005-10-19 23:56:38 -0700 | [diff] [blame] | 31 | #include <asm/io.h> | 
 | 32 |  | 
 | 33 | #include <asm/sibyte/bcm1480_regs.h> | 
 | 34 | #include <asm/sibyte/bcm1480_int.h> | 
 | 35 | #include <asm/sibyte/bcm1480_scd.h> | 
 | 36 |  | 
 | 37 | #include <asm/sibyte/sb1250_uart.h> | 
 | 38 | #include <asm/sibyte/sb1250.h> | 
 | 39 |  | 
 | 40 | /* | 
 | 41 |  * These are the routines that handle all the low level interrupt stuff. | 
 | 42 |  * Actions handled here are: initialization of the interrupt map, requesting of | 
 | 43 |  * interrupt lines by handlers, dispatching if interrupts to handlers, probing | 
 | 44 |  * for interrupt lines | 
 | 45 |  */ | 
 | 46 |  | 
| Andrew Isaacson | f137e46 | 2005-10-19 23:56:38 -0700 | [diff] [blame] | 47 | #ifdef CONFIG_PCI | 
 | 48 | extern unsigned long ht_eoi_space; | 
 | 49 | #endif | 
 | 50 |  | 
| Andrew Isaacson | f137e46 | 2005-10-19 23:56:38 -0700 | [diff] [blame] | 51 | /* Store the CPU id (not the logical number) */ | 
 | 52 | int bcm1480_irq_owner[BCM1480_NR_IRQS]; | 
 | 53 |  | 
| Ralf Baechle | ed14bbb | 2010-02-27 12:53:32 +0100 | [diff] [blame] | 54 | static DEFINE_RAW_SPINLOCK(bcm1480_imr_lock); | 
| Andrew Isaacson | f137e46 | 2005-10-19 23:56:38 -0700 | [diff] [blame] | 55 |  | 
 | 56 | void bcm1480_mask_irq(int cpu, int irq) | 
 | 57 | { | 
| Ralf Baechle | fbd0ed3 | 2007-08-29 00:38:13 +0100 | [diff] [blame] | 58 | 	unsigned long flags, hl_spacing; | 
 | 59 | 	u64 cur_ints; | 
| Andrew Isaacson | f137e46 | 2005-10-19 23:56:38 -0700 | [diff] [blame] | 60 |  | 
| Ralf Baechle | ed14bbb | 2010-02-27 12:53:32 +0100 | [diff] [blame] | 61 | 	raw_spin_lock_irqsave(&bcm1480_imr_lock, flags); | 
| Andrew Isaacson | f137e46 | 2005-10-19 23:56:38 -0700 | [diff] [blame] | 62 | 	hl_spacing = 0; | 
 | 63 | 	if ((irq >= BCM1480_NR_IRQS_HALF) && (irq <= BCM1480_NR_IRQS)) { | 
 | 64 | 		hl_spacing = BCM1480_IMR_HL_SPACING; | 
 | 65 | 		irq -= BCM1480_NR_IRQS_HALF; | 
 | 66 | 	} | 
 | 67 | 	cur_ints = ____raw_readq(IOADDR(A_BCM1480_IMR_MAPPER(cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + hl_spacing)); | 
 | 68 | 	cur_ints |= (((u64) 1) << irq); | 
 | 69 | 	____raw_writeq(cur_ints, IOADDR(A_BCM1480_IMR_MAPPER(cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + hl_spacing)); | 
| Ralf Baechle | ed14bbb | 2010-02-27 12:53:32 +0100 | [diff] [blame] | 70 | 	raw_spin_unlock_irqrestore(&bcm1480_imr_lock, flags); | 
| Andrew Isaacson | f137e46 | 2005-10-19 23:56:38 -0700 | [diff] [blame] | 71 | } | 
 | 72 |  | 
 | 73 | void bcm1480_unmask_irq(int cpu, int irq) | 
 | 74 | { | 
| Ralf Baechle | fbd0ed3 | 2007-08-29 00:38:13 +0100 | [diff] [blame] | 75 | 	unsigned long flags, hl_spacing; | 
 | 76 | 	u64 cur_ints; | 
| Andrew Isaacson | f137e46 | 2005-10-19 23:56:38 -0700 | [diff] [blame] | 77 |  | 
| Ralf Baechle | ed14bbb | 2010-02-27 12:53:32 +0100 | [diff] [blame] | 78 | 	raw_spin_lock_irqsave(&bcm1480_imr_lock, flags); | 
| Andrew Isaacson | f137e46 | 2005-10-19 23:56:38 -0700 | [diff] [blame] | 79 | 	hl_spacing = 0; | 
 | 80 | 	if ((irq >= BCM1480_NR_IRQS_HALF) && (irq <= BCM1480_NR_IRQS)) { | 
 | 81 | 		hl_spacing = BCM1480_IMR_HL_SPACING; | 
 | 82 | 		irq -= BCM1480_NR_IRQS_HALF; | 
 | 83 | 	} | 
 | 84 | 	cur_ints = ____raw_readq(IOADDR(A_BCM1480_IMR_MAPPER(cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + hl_spacing)); | 
 | 85 | 	cur_ints &= ~(((u64) 1) << irq); | 
 | 86 | 	____raw_writeq(cur_ints, IOADDR(A_BCM1480_IMR_MAPPER(cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + hl_spacing)); | 
| Ralf Baechle | ed14bbb | 2010-02-27 12:53:32 +0100 | [diff] [blame] | 87 | 	raw_spin_unlock_irqrestore(&bcm1480_imr_lock, flags); | 
| Andrew Isaacson | f137e46 | 2005-10-19 23:56:38 -0700 | [diff] [blame] | 88 | } | 
 | 89 |  | 
 | 90 | #ifdef CONFIG_SMP | 
| Thomas Gleixner | d6d5d5c4 | 2011-03-23 21:09:14 +0000 | [diff] [blame] | 91 | static int bcm1480_set_affinity(struct irq_data *d, const struct cpumask *mask, | 
 | 92 | 				bool force) | 
| Andrew Isaacson | f137e46 | 2005-10-19 23:56:38 -0700 | [diff] [blame] | 93 | { | 
| Thomas Gleixner | d6d5d5c4 | 2011-03-23 21:09:14 +0000 | [diff] [blame] | 94 | 	unsigned int irq_dirty, irq = d->irq; | 
| Martin Michlmayr | 76e1dae | 2006-02-20 04:57:00 +0000 | [diff] [blame] | 95 | 	int i = 0, old_cpu, cpu, int_on, k; | 
| Andrew Isaacson | f137e46 | 2005-10-19 23:56:38 -0700 | [diff] [blame] | 96 | 	u64 cur_ints; | 
| Andrew Isaacson | f137e46 | 2005-10-19 23:56:38 -0700 | [diff] [blame] | 97 | 	unsigned long flags; | 
| Andrew Isaacson | f137e46 | 2005-10-19 23:56:38 -0700 | [diff] [blame] | 98 |  | 
| Rusty Russell | 0de2652 | 2008-12-13 21:20:26 +1030 | [diff] [blame] | 99 | 	i = cpumask_first(mask); | 
| Andrew Isaacson | f137e46 | 2005-10-19 23:56:38 -0700 | [diff] [blame] | 100 |  | 
 | 101 | 	/* Convert logical CPU to physical CPU */ | 
 | 102 | 	cpu = cpu_logical_map(i); | 
 | 103 |  | 
 | 104 | 	/* Protect against other affinity changers and IMR manipulation */ | 
| Ralf Baechle | ed14bbb | 2010-02-27 12:53:32 +0100 | [diff] [blame] | 105 | 	raw_spin_lock_irqsave(&bcm1480_imr_lock, flags); | 
| Andrew Isaacson | f137e46 | 2005-10-19 23:56:38 -0700 | [diff] [blame] | 106 |  | 
 | 107 | 	/* Swizzle each CPU's IMR (but leave the IP selection alone) */ | 
 | 108 | 	old_cpu = bcm1480_irq_owner[irq]; | 
 | 109 | 	irq_dirty = irq; | 
 | 110 | 	if ((irq_dirty >= BCM1480_NR_IRQS_HALF) && (irq_dirty <= BCM1480_NR_IRQS)) { | 
 | 111 | 		irq_dirty -= BCM1480_NR_IRQS_HALF; | 
 | 112 | 	} | 
 | 113 |  | 
| Andrew Isaacson | f137e46 | 2005-10-19 23:56:38 -0700 | [diff] [blame] | 114 | 	for (k=0; k<2; k++) { /* Loop through high and low interrupt mask register */ | 
 | 115 | 		cur_ints = ____raw_readq(IOADDR(A_BCM1480_IMR_MAPPER(old_cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + (k*BCM1480_IMR_HL_SPACING))); | 
 | 116 | 		int_on = !(cur_ints & (((u64) 1) << irq_dirty)); | 
 | 117 | 		if (int_on) { | 
 | 118 | 			/* If it was on, mask it */ | 
 | 119 | 			cur_ints |= (((u64) 1) << irq_dirty); | 
 | 120 | 			____raw_writeq(cur_ints, IOADDR(A_BCM1480_IMR_MAPPER(old_cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + (k*BCM1480_IMR_HL_SPACING))); | 
 | 121 | 		} | 
 | 122 | 		bcm1480_irq_owner[irq] = cpu; | 
 | 123 | 		if (int_on) { | 
 | 124 | 			/* unmask for the new CPU */ | 
 | 125 | 			cur_ints = ____raw_readq(IOADDR(A_BCM1480_IMR_MAPPER(cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + (k*BCM1480_IMR_HL_SPACING))); | 
 | 126 | 			cur_ints &= ~(((u64) 1) << irq_dirty); | 
 | 127 | 			____raw_writeq(cur_ints, IOADDR(A_BCM1480_IMR_MAPPER(cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + (k*BCM1480_IMR_HL_SPACING))); | 
 | 128 | 		} | 
 | 129 | 	} | 
| Ralf Baechle | ed14bbb | 2010-02-27 12:53:32 +0100 | [diff] [blame] | 130 | 	raw_spin_unlock_irqrestore(&bcm1480_imr_lock, flags); | 
| Yinghai Lu | d5dedd4 | 2009-04-27 17:59:21 -0700 | [diff] [blame] | 131 |  | 
 | 132 | 	return 0; | 
| Andrew Isaacson | f137e46 | 2005-10-19 23:56:38 -0700 | [diff] [blame] | 133 | } | 
 | 134 | #endif | 
 | 135 |  | 
 | 136 |  | 
| Andrew Isaacson | f137e46 | 2005-10-19 23:56:38 -0700 | [diff] [blame] | 137 | /*****************************************************************************/ | 
 | 138 |  | 
| Thomas Gleixner | d6d5d5c4 | 2011-03-23 21:09:14 +0000 | [diff] [blame] | 139 | static void disable_bcm1480_irq(struct irq_data *d) | 
| Andrew Isaacson | f137e46 | 2005-10-19 23:56:38 -0700 | [diff] [blame] | 140 | { | 
| Thomas Gleixner | d6d5d5c4 | 2011-03-23 21:09:14 +0000 | [diff] [blame] | 141 | 	unsigned int irq = d->irq; | 
 | 142 |  | 
| Andrew Isaacson | f137e46 | 2005-10-19 23:56:38 -0700 | [diff] [blame] | 143 | 	bcm1480_mask_irq(bcm1480_irq_owner[irq], irq); | 
 | 144 | } | 
 | 145 |  | 
| Thomas Gleixner | d6d5d5c4 | 2011-03-23 21:09:14 +0000 | [diff] [blame] | 146 | static void enable_bcm1480_irq(struct irq_data *d) | 
| Andrew Isaacson | f137e46 | 2005-10-19 23:56:38 -0700 | [diff] [blame] | 147 | { | 
| Thomas Gleixner | d6d5d5c4 | 2011-03-23 21:09:14 +0000 | [diff] [blame] | 148 | 	unsigned int irq = d->irq; | 
 | 149 |  | 
| Andrew Isaacson | f137e46 | 2005-10-19 23:56:38 -0700 | [diff] [blame] | 150 | 	bcm1480_unmask_irq(bcm1480_irq_owner[irq], irq); | 
 | 151 | } | 
 | 152 |  | 
 | 153 |  | 
| Thomas Gleixner | d6d5d5c4 | 2011-03-23 21:09:14 +0000 | [diff] [blame] | 154 | static void ack_bcm1480_irq(struct irq_data *d) | 
| Andrew Isaacson | f137e46 | 2005-10-19 23:56:38 -0700 | [diff] [blame] | 155 | { | 
| Thomas Gleixner | d6d5d5c4 | 2011-03-23 21:09:14 +0000 | [diff] [blame] | 156 | 	unsigned int irq_dirty, irq = d->irq; | 
| Andrew Isaacson | f137e46 | 2005-10-19 23:56:38 -0700 | [diff] [blame] | 157 | 	u64 pending; | 
| Martin Michlmayr | 76e1dae | 2006-02-20 04:57:00 +0000 | [diff] [blame] | 158 | 	int k; | 
| Andrew Isaacson | f137e46 | 2005-10-19 23:56:38 -0700 | [diff] [blame] | 159 |  | 
 | 160 | 	/* | 
 | 161 | 	 * If the interrupt was an HT interrupt, now is the time to | 
 | 162 | 	 * clear it.  NOTE: we assume the HT bridge was set up to | 
 | 163 | 	 * deliver the interrupts to all CPUs (which makes affinity | 
 | 164 | 	 * changing easier for us) | 
 | 165 | 	 */ | 
 | 166 | 	irq_dirty = irq; | 
 | 167 | 	if ((irq_dirty >= BCM1480_NR_IRQS_HALF) && (irq_dirty <= BCM1480_NR_IRQS)) { | 
 | 168 | 		irq_dirty -= BCM1480_NR_IRQS_HALF; | 
 | 169 | 	} | 
| Andrew Isaacson | f137e46 | 2005-10-19 23:56:38 -0700 | [diff] [blame] | 170 | 	for (k=0; k<2; k++) { /* Loop through high and low LDT interrupts */ | 
 | 171 | 		pending = __raw_readq(IOADDR(A_BCM1480_IMR_REGISTER(bcm1480_irq_owner[irq], | 
 | 172 | 						R_BCM1480_IMR_LDT_INTERRUPT_H + (k*BCM1480_IMR_HL_SPACING)))); | 
 | 173 | 		pending &= ((u64)1 << (irq_dirty)); | 
 | 174 | 		if (pending) { | 
 | 175 | #ifdef CONFIG_SMP | 
 | 176 | 			int i; | 
 | 177 | 			for (i=0; i<NR_CPUS; i++) { | 
 | 178 | 				/* | 
 | 179 | 				 * Clear for all CPUs so an affinity switch | 
 | 180 | 				 * doesn't find an old status | 
 | 181 | 				 */ | 
 | 182 | 				__raw_writeq(pending, IOADDR(A_BCM1480_IMR_REGISTER(cpu_logical_map(i), | 
 | 183 | 								R_BCM1480_IMR_LDT_INTERRUPT_CLR_H + (k*BCM1480_IMR_HL_SPACING)))); | 
 | 184 | 			} | 
 | 185 | #else | 
 | 186 | 			__raw_writeq(pending, IOADDR(A_BCM1480_IMR_REGISTER(0, R_BCM1480_IMR_LDT_INTERRUPT_CLR_H + (k*BCM1480_IMR_HL_SPACING)))); | 
 | 187 | #endif | 
 | 188 |  | 
 | 189 | 			/* | 
 | 190 | 			 * Generate EOI.  For Pass 1 parts, EOI is a nop.  For | 
 | 191 | 			 * Pass 2, the LDT world may be edge-triggered, but | 
 | 192 | 			 * this EOI shouldn't hurt.  If they are | 
 | 193 | 			 * level-sensitive, the EOI is required. | 
 | 194 | 			 */ | 
 | 195 | #ifdef CONFIG_PCI | 
 | 196 | 			if (ht_eoi_space) | 
 | 197 | 				*(uint32_t *)(ht_eoi_space+(irq<<16)+(7<<2)) = 0; | 
 | 198 | #endif | 
 | 199 | 		} | 
 | 200 | 	} | 
 | 201 | 	bcm1480_mask_irq(bcm1480_irq_owner[irq], irq); | 
 | 202 | } | 
 | 203 |  | 
| Thomas Gleixner | d6d5d5c4 | 2011-03-23 21:09:14 +0000 | [diff] [blame] | 204 | static struct irq_chip bcm1480_irq_type = { | 
 | 205 | 	.name = "BCM1480-IMR", | 
 | 206 | 	.irq_mask_ack = ack_bcm1480_irq, | 
 | 207 | 	.irq_mask = disable_bcm1480_irq, | 
 | 208 | 	.irq_unmask = enable_bcm1480_irq, | 
 | 209 | #ifdef CONFIG_SMP | 
 | 210 | 	.irq_set_affinity = bcm1480_set_affinity | 
 | 211 | #endif | 
 | 212 | }; | 
| Andrew Isaacson | f137e46 | 2005-10-19 23:56:38 -0700 | [diff] [blame] | 213 |  | 
 | 214 | void __init init_bcm1480_irqs(void) | 
 | 215 | { | 
 | 216 | 	int i; | 
 | 217 |  | 
| Atsushi Nemoto | 1603b5a | 2006-11-02 02:08:36 +0900 | [diff] [blame] | 218 | 	for (i = 0; i < BCM1480_NR_IRQS; i++) { | 
| Thomas Gleixner | e4ec798 | 2011-03-27 15:19:28 +0200 | [diff] [blame] | 219 | 		irq_set_chip_and_handler(i, &bcm1480_irq_type, | 
 | 220 | 					 handle_level_irq); | 
| Atsushi Nemoto | 1603b5a | 2006-11-02 02:08:36 +0900 | [diff] [blame] | 221 | 		bcm1480_irq_owner[i] = 0; | 
| Andrew Isaacson | f137e46 | 2005-10-19 23:56:38 -0700 | [diff] [blame] | 222 | 	} | 
 | 223 | } | 
 | 224 |  | 
| Andrew Isaacson | f137e46 | 2005-10-19 23:56:38 -0700 | [diff] [blame] | 225 | /* | 
 | 226 |  *  init_IRQ is called early in the boot sequence from init/main.c.  It | 
 | 227 |  *  is responsible for setting up the interrupt mapper and installing the | 
 | 228 |  *  handler that will be responsible for dispatching interrupts to the | 
 | 229 |  *  "right" place. | 
 | 230 |  */ | 
 | 231 | /* | 
 | 232 |  * For now, map all interrupts to IP[2].  We could save | 
 | 233 |  * some cycles by parceling out system interrupts to different | 
 | 234 |  * IP lines, but keep it simple for bringup.  We'll also direct | 
 | 235 |  * all interrupts to a single CPU; we should probably route | 
 | 236 |  * PCI and LDT to one cpu and everything else to the other | 
 | 237 |  * to balance the load a bit. | 
 | 238 |  * | 
 | 239 |  * On the second cpu, everything is set to IP5, which is | 
 | 240 |  * ignored, EXCEPT the mailbox interrupt.  That one is | 
 | 241 |  * set to IP[2] so it is handled.  This is needed so we | 
| Gilles Espinasse | f77f13e | 2010-03-29 15:41:47 +0200 | [diff] [blame] | 242 |  * can do cross-cpu function calls, as required by SMP | 
| Andrew Isaacson | f137e46 | 2005-10-19 23:56:38 -0700 | [diff] [blame] | 243 |  */ | 
 | 244 |  | 
 | 245 | #define IMR_IP2_VAL	K_BCM1480_INT_MAP_I0 | 
 | 246 | #define IMR_IP3_VAL	K_BCM1480_INT_MAP_I1 | 
 | 247 | #define IMR_IP4_VAL	K_BCM1480_INT_MAP_I2 | 
 | 248 | #define IMR_IP5_VAL	K_BCM1480_INT_MAP_I3 | 
 | 249 | #define IMR_IP6_VAL	K_BCM1480_INT_MAP_I4 | 
 | 250 |  | 
 | 251 | void __init arch_init_irq(void) | 
 | 252 | { | 
| Andrew Isaacson | f137e46 | 2005-10-19 23:56:38 -0700 | [diff] [blame] | 253 | 	unsigned int i, cpu; | 
 | 254 | 	u64 tmp; | 
 | 255 | 	unsigned int imask = STATUSF_IP4 | STATUSF_IP3 | STATUSF_IP2 | | 
 | 256 | 		STATUSF_IP1 | STATUSF_IP0; | 
 | 257 |  | 
 | 258 | 	/* Default everything to IP2 */ | 
 | 259 | 	/* Start with _high registers which has no bit 0 interrupt source */ | 
 | 260 | 	for (i = 1; i < BCM1480_NR_IRQS_HALF; i++) {	/* was I0 */ | 
 | 261 | 		for (cpu = 0; cpu < 4; cpu++) { | 
 | 262 | 			__raw_writeq(IMR_IP2_VAL, | 
 | 263 | 				     IOADDR(A_BCM1480_IMR_REGISTER(cpu, | 
 | 264 | 								   R_BCM1480_IMR_INTERRUPT_MAP_BASE_H) + (i << 3))); | 
 | 265 | 		} | 
 | 266 | 	} | 
 | 267 |  | 
 | 268 | 	/* Now do _low registers */ | 
 | 269 | 	for (i = 0; i < BCM1480_NR_IRQS_HALF; i++) { | 
 | 270 | 		for (cpu = 0; cpu < 4; cpu++) { | 
 | 271 | 			__raw_writeq(IMR_IP2_VAL, | 
 | 272 | 				     IOADDR(A_BCM1480_IMR_REGISTER(cpu, | 
 | 273 | 								   R_BCM1480_IMR_INTERRUPT_MAP_BASE_L) + (i << 3))); | 
 | 274 | 		} | 
 | 275 | 	} | 
 | 276 |  | 
 | 277 | 	init_bcm1480_irqs(); | 
 | 278 |  | 
 | 279 | 	/* | 
 | 280 | 	 * Map the high 16 bits of mailbox_0 registers to IP[3], for | 
 | 281 | 	 * inter-cpu messages | 
 | 282 | 	 */ | 
 | 283 | 	/* Was I1 */ | 
 | 284 | 	for (cpu = 0; cpu < 4; cpu++) { | 
 | 285 | 		__raw_writeq(IMR_IP3_VAL, IOADDR(A_BCM1480_IMR_REGISTER(cpu, R_BCM1480_IMR_INTERRUPT_MAP_BASE_H) + | 
 | 286 | 						 (K_BCM1480_INT_MBOX_0_0 << 3))); | 
 | 287 |         } | 
 | 288 |  | 
 | 289 |  | 
 | 290 | 	/* Clear the mailboxes.  The firmware may leave them dirty */ | 
 | 291 | 	for (cpu = 0; cpu < 4; cpu++) { | 
 | 292 | 		__raw_writeq(0xffffffffffffffffULL, | 
 | 293 | 			     IOADDR(A_BCM1480_IMR_REGISTER(cpu, R_BCM1480_IMR_MAILBOX_0_CLR_CPU))); | 
 | 294 | 		__raw_writeq(0xffffffffffffffffULL, | 
 | 295 | 			     IOADDR(A_BCM1480_IMR_REGISTER(cpu, R_BCM1480_IMR_MAILBOX_1_CLR_CPU))); | 
 | 296 | 	} | 
 | 297 |  | 
 | 298 |  | 
 | 299 | 	/* Mask everything except the high 16 bit of mailbox_0 registers for all cpus */ | 
 | 300 | 	tmp = ~((u64) 0) ^ ( (((u64) 1) << K_BCM1480_INT_MBOX_0_0)); | 
 | 301 | 	for (cpu = 0; cpu < 4; cpu++) { | 
 | 302 | 		__raw_writeq(tmp, IOADDR(A_BCM1480_IMR_REGISTER(cpu, R_BCM1480_IMR_INTERRUPT_MASK_H))); | 
 | 303 | 	} | 
 | 304 | 	tmp = ~((u64) 0); | 
 | 305 | 	for (cpu = 0; cpu < 4; cpu++) { | 
 | 306 | 		__raw_writeq(tmp, IOADDR(A_BCM1480_IMR_REGISTER(cpu, R_BCM1480_IMR_INTERRUPT_MASK_L))); | 
 | 307 | 	} | 
 | 308 |  | 
| Andrew Isaacson | f137e46 | 2005-10-19 23:56:38 -0700 | [diff] [blame] | 309 | 	/* | 
 | 310 | 	 * Note that the timer interrupts are also mapped, but this is | 
 | 311 | 	 * done in bcm1480_time_init().  Also, the profiling driver | 
 | 312 | 	 * does its own management of IP7. | 
 | 313 | 	 */ | 
 | 314 |  | 
| Andrew Isaacson | f137e46 | 2005-10-19 23:56:38 -0700 | [diff] [blame] | 315 | 	/* Enable necessary IPs, disable the rest */ | 
 | 316 | 	change_c0_status(ST0_IM, imask); | 
| Andrew Isaacson | f137e46 | 2005-10-19 23:56:38 -0700 | [diff] [blame] | 317 | } | 
 | 318 |  | 
| Ralf Baechle | 937a801 | 2006-10-07 19:44:33 +0100 | [diff] [blame] | 319 | extern void bcm1480_mailbox_interrupt(void); | 
| Ralf Baechle | e4ac58a | 2006-04-03 17:56:36 +0100 | [diff] [blame] | 320 |  | 
| Ralf Baechle | d045336 | 2007-10-22 10:38:44 +0100 | [diff] [blame] | 321 | static inline void dispatch_ip2(void) | 
 | 322 | { | 
 | 323 | 	unsigned long long mask_h, mask_l; | 
 | 324 | 	unsigned int cpu = smp_processor_id(); | 
 | 325 | 	unsigned long base; | 
 | 326 |  | 
 | 327 | 	/* | 
 | 328 | 	 * Default...we've hit an IP[2] interrupt, which means we've got to | 
 | 329 | 	 * check the 1480 interrupt registers to figure out what to do.  Need | 
 | 330 | 	 * to detect which CPU we're on, now that smp_affinity is supported. | 
 | 331 | 	 */ | 
 | 332 | 	base = A_BCM1480_IMR_MAPPER(cpu); | 
 | 333 | 	mask_h = __raw_readq( | 
 | 334 | 		IOADDR(base + R_BCM1480_IMR_INTERRUPT_STATUS_BASE_H)); | 
 | 335 | 	mask_l = __raw_readq( | 
 | 336 | 		IOADDR(base + R_BCM1480_IMR_INTERRUPT_STATUS_BASE_L)); | 
 | 337 |  | 
 | 338 | 	if (mask_h) { | 
 | 339 | 		if (mask_h ^ 1) | 
 | 340 | 			do_IRQ(fls64(mask_h) - 1); | 
 | 341 | 		else if (mask_l) | 
 | 342 | 			do_IRQ(63 + fls64(mask_l)); | 
 | 343 | 	} | 
 | 344 | } | 
 | 345 |  | 
| Ralf Baechle | 937a801 | 2006-10-07 19:44:33 +0100 | [diff] [blame] | 346 | asmlinkage void plat_irq_dispatch(void) | 
| Ralf Baechle | e4ac58a | 2006-04-03 17:56:36 +0100 | [diff] [blame] | 347 | { | 
| Ralf Baechle | a8401fa | 2007-11-05 00:29:45 +0000 | [diff] [blame] | 348 | 	unsigned int cpu = smp_processor_id(); | 
| Ralf Baechle | e4ac58a | 2006-04-03 17:56:36 +0100 | [diff] [blame] | 349 | 	unsigned int pending; | 
 | 350 |  | 
 | 351 | #ifdef CONFIG_SIBYTE_BCM1480_PROF | 
 | 352 | 	/* Set compare to count to silence count/compare timer interrupts */ | 
 | 353 | 	write_c0_compare(read_c0_count()); | 
 | 354 | #endif | 
 | 355 |  | 
| Ralf Baechle | 34c2dd0 | 2006-10-03 14:42:02 +0100 | [diff] [blame] | 356 | 	pending = read_c0_cause() & read_c0_status(); | 
| Ralf Baechle | e4ac58a | 2006-04-03 17:56:36 +0100 | [diff] [blame] | 357 |  | 
 | 358 | #ifdef CONFIG_SIBYTE_BCM1480_PROF | 
 | 359 | 	if (pending & CAUSEF_IP7)	/* Cpu performance counter interrupt */ | 
| Ralf Baechle | 937a801 | 2006-10-07 19:44:33 +0100 | [diff] [blame] | 360 | 		sbprof_cpu_intr(); | 
| Thiemo Seufer | 6e61e85 | 2006-07-05 14:26:38 +0100 | [diff] [blame] | 361 | 	else | 
| Ralf Baechle | e4ac58a | 2006-04-03 17:56:36 +0100 | [diff] [blame] | 362 | #endif | 
 | 363 |  | 
| Ralf Baechle | d045336 | 2007-10-22 10:38:44 +0100 | [diff] [blame] | 364 | 	if (pending & CAUSEF_IP4) | 
| Ralf Baechle | a8401fa | 2007-11-05 00:29:45 +0000 | [diff] [blame] | 365 | 		do_IRQ(K_BCM1480_INT_TIMER_0 + cpu); | 
| Ralf Baechle | e4ac58a | 2006-04-03 17:56:36 +0100 | [diff] [blame] | 366 | #ifdef CONFIG_SMP | 
| Thiemo Seufer | 6e61e85 | 2006-07-05 14:26:38 +0100 | [diff] [blame] | 367 | 	else if (pending & CAUSEF_IP3) | 
| Ralf Baechle | 937a801 | 2006-10-07 19:44:33 +0100 | [diff] [blame] | 368 | 		bcm1480_mailbox_interrupt(); | 
| Ralf Baechle | e4ac58a | 2006-04-03 17:56:36 +0100 | [diff] [blame] | 369 | #endif | 
 | 370 |  | 
| Ralf Baechle | d045336 | 2007-10-22 10:38:44 +0100 | [diff] [blame] | 371 | 	else if (pending & CAUSEF_IP2) | 
 | 372 | 		dispatch_ip2(); | 
| Ralf Baechle | e4ac58a | 2006-04-03 17:56:36 +0100 | [diff] [blame] | 373 | } |