| Benjamin Herrenschmidt | 0b05ac6 | 2011-04-04 13:46:58 +1000 | [diff] [blame] | 1 | /* | 
 | 2 |  * Copyright 2011 IBM Corporation. | 
 | 3 |  * | 
 | 4 |  *  This program is free software; you can redistribute it and/or | 
 | 5 |  *  modify it under the terms of the GNU General Public License | 
 | 6 |  *  as published by the Free Software Foundation; either version | 
 | 7 |  *  2 of the License, or (at your option) any later version. | 
 | 8 |  * | 
 | 9 |  */ | 
 | 10 | #include <linux/types.h> | 
 | 11 | #include <linux/threads.h> | 
 | 12 | #include <linux/kernel.h> | 
 | 13 | #include <linux/irq.h> | 
 | 14 | #include <linux/debugfs.h> | 
 | 15 | #include <linux/smp.h> | 
 | 16 | #include <linux/interrupt.h> | 
 | 17 | #include <linux/seq_file.h> | 
 | 18 | #include <linux/init.h> | 
 | 19 | #include <linux/cpu.h> | 
 | 20 | #include <linux/of.h> | 
 | 21 | #include <linux/slab.h> | 
 | 22 | #include <linux/spinlock.h> | 
 | 23 |  | 
 | 24 | #include <asm/prom.h> | 
 | 25 | #include <asm/io.h> | 
 | 26 | #include <asm/smp.h> | 
 | 27 | #include <asm/machdep.h> | 
 | 28 | #include <asm/irq.h> | 
 | 29 | #include <asm/errno.h> | 
 | 30 | #include <asm/rtas.h> | 
 | 31 | #include <asm/xics.h> | 
 | 32 | #include <asm/firmware.h> | 
 | 33 |  | 
 | 34 | /* Globals common to all ICP/ICS implementations */ | 
 | 35 | const struct icp_ops	*icp_ops; | 
 | 36 |  | 
 | 37 | unsigned int xics_default_server		= 0xff; | 
 | 38 | unsigned int xics_default_distrib_server	= 0; | 
 | 39 | unsigned int xics_interrupt_server_size		= 8; | 
 | 40 |  | 
 | 41 | DEFINE_PER_CPU(struct xics_cppr, xics_cppr); | 
 | 42 |  | 
 | 43 | struct irq_host *xics_host; | 
 | 44 |  | 
 | 45 | static LIST_HEAD(ics_list); | 
 | 46 |  | 
 | 47 | void xics_update_irq_servers(void) | 
 | 48 | { | 
 | 49 | 	int i, j; | 
 | 50 | 	struct device_node *np; | 
 | 51 | 	u32 ilen; | 
 | 52 | 	const u32 *ireg; | 
 | 53 | 	u32 hcpuid; | 
 | 54 |  | 
 | 55 | 	/* Find the server numbers for the boot cpu. */ | 
 | 56 | 	np = of_get_cpu_node(boot_cpuid, NULL); | 
 | 57 | 	BUG_ON(!np); | 
 | 58 |  | 
 | 59 | 	hcpuid = get_hard_smp_processor_id(boot_cpuid); | 
| Benjamin Herrenschmidt | f6e17f9 | 2011-03-04 18:25:55 +1100 | [diff] [blame] | 60 | 	xics_default_server = xics_default_distrib_server = hcpuid; | 
 | 61 |  | 
 | 62 | 	pr_devel("xics: xics_default_server = 0x%x\n", xics_default_server); | 
| Benjamin Herrenschmidt | 0b05ac6 | 2011-04-04 13:46:58 +1000 | [diff] [blame] | 63 |  | 
 | 64 | 	ireg = of_get_property(np, "ibm,ppc-interrupt-gserver#s", &ilen); | 
 | 65 | 	if (!ireg) { | 
 | 66 | 		of_node_put(np); | 
 | 67 | 		return; | 
 | 68 | 	} | 
 | 69 |  | 
 | 70 | 	i = ilen / sizeof(int); | 
 | 71 |  | 
 | 72 | 	/* Global interrupt distribution server is specified in the last | 
 | 73 | 	 * entry of "ibm,ppc-interrupt-gserver#s" property. Get the last | 
 | 74 | 	 * entry fom this property for current boot cpu id and use it as | 
 | 75 | 	 * default distribution server | 
 | 76 | 	 */ | 
 | 77 | 	for (j = 0; j < i; j += 2) { | 
 | 78 | 		if (ireg[j] == hcpuid) { | 
 | 79 | 			xics_default_distrib_server = ireg[j+1]; | 
| Benjamin Herrenschmidt | f6e17f9 | 2011-03-04 18:25:55 +1100 | [diff] [blame] | 80 | 			break; | 
| Benjamin Herrenschmidt | 0b05ac6 | 2011-04-04 13:46:58 +1000 | [diff] [blame] | 81 | 		} | 
 | 82 | 	} | 
| Benjamin Herrenschmidt | f6e17f9 | 2011-03-04 18:25:55 +1100 | [diff] [blame] | 83 | 	pr_devel("xics: xics_default_distrib_server = 0x%x\n", | 
 | 84 | 		 xics_default_distrib_server); | 
| Benjamin Herrenschmidt | 0b05ac6 | 2011-04-04 13:46:58 +1000 | [diff] [blame] | 85 | 	of_node_put(np); | 
 | 86 | } | 
 | 87 |  | 
 | 88 | /* GIQ stuff, currently only supported on RTAS setups, will have | 
 | 89 |  * to be sorted properly for bare metal | 
 | 90 |  */ | 
 | 91 | void xics_set_cpu_giq(unsigned int gserver, unsigned int join) | 
 | 92 | { | 
 | 93 | #ifdef CONFIG_PPC_RTAS | 
 | 94 | 	int index; | 
 | 95 | 	int status; | 
 | 96 |  | 
 | 97 | 	if (!rtas_indicator_present(GLOBAL_INTERRUPT_QUEUE, NULL)) | 
 | 98 | 		return; | 
 | 99 |  | 
 | 100 | 	index = (1UL << xics_interrupt_server_size) - 1 - gserver; | 
 | 101 |  | 
 | 102 | 	status = rtas_set_indicator_fast(GLOBAL_INTERRUPT_QUEUE, index, join); | 
 | 103 |  | 
 | 104 | 	WARN(status < 0, "set-indicator(%d, %d, %u) returned %d\n", | 
 | 105 | 	     GLOBAL_INTERRUPT_QUEUE, index, join, status); | 
 | 106 | #endif | 
 | 107 | } | 
 | 108 |  | 
 | 109 | void xics_setup_cpu(void) | 
 | 110 | { | 
 | 111 | 	icp_ops->set_priority(LOWEST_PRIORITY); | 
 | 112 |  | 
 | 113 | 	xics_set_cpu_giq(xics_default_distrib_server, 1); | 
 | 114 | } | 
 | 115 |  | 
 | 116 | void xics_mask_unknown_vec(unsigned int vec) | 
 | 117 | { | 
 | 118 | 	struct ics *ics; | 
 | 119 |  | 
| Benjamin Herrenschmidt | f6e17f9 | 2011-03-04 18:25:55 +1100 | [diff] [blame] | 120 | 	pr_err("Interrupt 0x%x (real) is invalid, disabling it.\n", vec); | 
| Benjamin Herrenschmidt | 0b05ac6 | 2011-04-04 13:46:58 +1000 | [diff] [blame] | 121 |  | 
 | 122 | 	list_for_each_entry(ics, &ics_list, link) | 
 | 123 | 		ics->mask_unknown(ics, vec); | 
 | 124 | } | 
 | 125 |  | 
 | 126 |  | 
 | 127 | #ifdef CONFIG_SMP | 
 | 128 |  | 
 | 129 | DEFINE_PER_CPU_SHARED_ALIGNED(unsigned long, xics_ipi_message); | 
 | 130 |  | 
 | 131 | irqreturn_t xics_ipi_dispatch(int cpu) | 
 | 132 | { | 
 | 133 | 	unsigned long *tgt = &per_cpu(xics_ipi_message, cpu); | 
 | 134 |  | 
 | 135 | 	mb();	/* order mmio clearing qirr */ | 
 | 136 | 	while (*tgt) { | 
 | 137 | 		if (test_and_clear_bit(PPC_MSG_CALL_FUNCTION, tgt)) { | 
 | 138 | 			smp_message_recv(PPC_MSG_CALL_FUNCTION); | 
 | 139 | 		} | 
 | 140 | 		if (test_and_clear_bit(PPC_MSG_RESCHEDULE, tgt)) { | 
 | 141 | 			smp_message_recv(PPC_MSG_RESCHEDULE); | 
 | 142 | 		} | 
 | 143 | 		if (test_and_clear_bit(PPC_MSG_CALL_FUNC_SINGLE, tgt)) { | 
 | 144 | 			smp_message_recv(PPC_MSG_CALL_FUNC_SINGLE); | 
 | 145 | 		} | 
 | 146 | #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC) | 
 | 147 | 		if (test_and_clear_bit(PPC_MSG_DEBUGGER_BREAK, tgt)) { | 
 | 148 | 			smp_message_recv(PPC_MSG_DEBUGGER_BREAK); | 
 | 149 | 		} | 
 | 150 | #endif | 
 | 151 | 	} | 
 | 152 | 	return IRQ_HANDLED; | 
 | 153 | } | 
 | 154 |  | 
 | 155 | static void xics_request_ipi(void) | 
 | 156 | { | 
 | 157 | 	unsigned int ipi; | 
 | 158 |  | 
 | 159 | 	ipi = irq_create_mapping(xics_host, XICS_IPI); | 
 | 160 | 	BUG_ON(ipi == NO_IRQ); | 
 | 161 |  | 
 | 162 | 	/* | 
 | 163 | 	 * IPIs are marked IRQF_DISABLED as they must run with irqs | 
 | 164 | 	 * disabled | 
 | 165 | 	 */ | 
 | 166 | 	irq_set_handler(ipi, handle_percpu_irq); | 
 | 167 | 	BUG_ON(request_irq(ipi, icp_ops->ipi_action, | 
 | 168 | 			   IRQF_DISABLED|IRQF_PERCPU, "IPI", NULL)); | 
 | 169 | } | 
 | 170 |  | 
 | 171 | int __init xics_smp_probe(void) | 
 | 172 | { | 
 | 173 | 	/* Setup message_pass callback  based on which ICP is used */ | 
 | 174 | 	smp_ops->message_pass = icp_ops->message_pass; | 
 | 175 |  | 
 | 176 | 	/* Register all the IPIs */ | 
 | 177 | 	xics_request_ipi(); | 
 | 178 |  | 
 | 179 | 	return cpumask_weight(cpu_possible_mask); | 
 | 180 | } | 
 | 181 |  | 
 | 182 | #endif /* CONFIG_SMP */ | 
 | 183 |  | 
 | 184 | void xics_teardown_cpu(void) | 
 | 185 | { | 
 | 186 | 	struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); | 
 | 187 |  | 
 | 188 | 	/* | 
 | 189 | 	 * we have to reset the cppr index to 0 because we're | 
 | 190 | 	 * not going to return from the IPI | 
 | 191 | 	 */ | 
 | 192 | 	os_cppr->index = 0; | 
 | 193 | 	icp_ops->set_priority(0); | 
 | 194 | 	icp_ops->teardown_cpu(); | 
 | 195 | } | 
 | 196 |  | 
 | 197 | void xics_kexec_teardown_cpu(int secondary) | 
 | 198 | { | 
 | 199 | 	xics_teardown_cpu(); | 
 | 200 |  | 
 | 201 | 	icp_ops->flush_ipi(); | 
 | 202 |  | 
 | 203 | 	/* | 
 | 204 | 	 * Some machines need to have at least one cpu in the GIQ, | 
 | 205 | 	 * so leave the master cpu in the group. | 
 | 206 | 	 */ | 
 | 207 | 	if (secondary) | 
 | 208 | 		xics_set_cpu_giq(xics_default_distrib_server, 0); | 
 | 209 | } | 
 | 210 |  | 
 | 211 |  | 
 | 212 | #ifdef CONFIG_HOTPLUG_CPU | 
 | 213 |  | 
 | 214 | /* Interrupts are disabled. */ | 
 | 215 | void xics_migrate_irqs_away(void) | 
 | 216 | { | 
 | 217 | 	int cpu = smp_processor_id(), hw_cpu = hard_smp_processor_id(); | 
 | 218 | 	unsigned int irq, virq; | 
 | 219 |  | 
 | 220 | 	/* If we used to be the default server, move to the new "boot_cpuid" */ | 
 | 221 | 	if (hw_cpu == xics_default_server) | 
 | 222 | 		xics_update_irq_servers(); | 
 | 223 |  | 
 | 224 | 	/* Reject any interrupt that was queued to us... */ | 
 | 225 | 	icp_ops->set_priority(0); | 
 | 226 |  | 
 | 227 | 	/* Remove ourselves from the global interrupt queue */ | 
 | 228 | 	xics_set_cpu_giq(xics_default_distrib_server, 0); | 
 | 229 |  | 
 | 230 | 	/* Allow IPIs again... */ | 
 | 231 | 	icp_ops->set_priority(DEFAULT_PRIORITY); | 
 | 232 |  | 
 | 233 | 	for_each_irq(virq) { | 
 | 234 | 		struct irq_desc *desc; | 
 | 235 | 		struct irq_chip *chip; | 
 | 236 | 		long server; | 
 | 237 | 		unsigned long flags; | 
 | 238 | 		struct ics *ics; | 
 | 239 |  | 
 | 240 | 		/* We can't set affinity on ISA interrupts */ | 
 | 241 | 		if (virq < NUM_ISA_INTERRUPTS) | 
 | 242 | 			continue; | 
| Grant Likely | 476eb49 | 2011-05-04 15:02:15 +1000 | [diff] [blame] | 243 | 		if (virq_to_host(virq) != xics_host) | 
| Benjamin Herrenschmidt | 0b05ac6 | 2011-04-04 13:46:58 +1000 | [diff] [blame] | 244 | 			continue; | 
| Grant Likely | 476eb49 | 2011-05-04 15:02:15 +1000 | [diff] [blame] | 245 | 		irq = (unsigned int)virq_to_hw(virq); | 
| Benjamin Herrenschmidt | 0b05ac6 | 2011-04-04 13:46:58 +1000 | [diff] [blame] | 246 | 		/* We need to get IPIs still. */ | 
 | 247 | 		if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS) | 
 | 248 | 			continue; | 
 | 249 | 		desc = irq_to_desc(virq); | 
 | 250 | 		/* We only need to migrate enabled IRQS */ | 
 | 251 | 		if (!desc || !desc->action) | 
 | 252 | 			continue; | 
 | 253 | 		chip = irq_desc_get_chip(desc); | 
 | 254 | 		if (!chip || !chip->irq_set_affinity) | 
 | 255 | 			continue; | 
 | 256 |  | 
 | 257 | 		raw_spin_lock_irqsave(&desc->lock, flags); | 
 | 258 |  | 
 | 259 | 		/* Locate interrupt server */ | 
 | 260 | 		server = -1; | 
 | 261 | 		ics = irq_get_chip_data(virq); | 
 | 262 | 		if (ics) | 
 | 263 | 			server = ics->get_server(ics, irq); | 
 | 264 | 		if (server < 0) { | 
 | 265 | 			printk(KERN_ERR "%s: Can't find server for irq %d\n", | 
 | 266 | 			       __func__, irq); | 
 | 267 | 			goto unlock; | 
 | 268 | 		} | 
 | 269 |  | 
 | 270 | 		/* We only support delivery to all cpus or to one cpu. | 
 | 271 | 		 * The irq has to be migrated only in the single cpu | 
 | 272 | 		 * case. | 
 | 273 | 		 */ | 
 | 274 | 		if (server != hw_cpu) | 
 | 275 | 			goto unlock; | 
 | 276 |  | 
 | 277 | 		/* This is expected during cpu offline. */ | 
 | 278 | 		if (cpu_online(cpu)) | 
 | 279 | 			pr_warning("IRQ %u affinity broken off cpu %u\n", | 
 | 280 | 			       virq, cpu); | 
 | 281 |  | 
 | 282 | 		/* Reset affinity to all cpus */ | 
 | 283 | 		raw_spin_unlock_irqrestore(&desc->lock, flags); | 
 | 284 | 		irq_set_affinity(virq, cpu_all_mask); | 
 | 285 | 		continue; | 
 | 286 | unlock: | 
 | 287 | 		raw_spin_unlock_irqrestore(&desc->lock, flags); | 
 | 288 | 	} | 
 | 289 | } | 
 | 290 | #endif /* CONFIG_HOTPLUG_CPU */ | 
 | 291 |  | 
 | 292 | #ifdef CONFIG_SMP | 
 | 293 | /* | 
 | 294 |  * For the moment we only implement delivery to all cpus or one cpu. | 
 | 295 |  * | 
 | 296 |  * If the requested affinity is cpu_all_mask, we set global affinity. | 
 | 297 |  * If not we set it to the first cpu in the mask, even if multiple cpus | 
 | 298 |  * are set. This is so things like irqbalance (which set core and package | 
 | 299 |  * wide affinities) do the right thing. | 
| Benjamin Herrenschmidt | f6e17f9 | 2011-03-04 18:25:55 +1100 | [diff] [blame] | 300 |  * | 
 | 301 |  * We need to fix this to implement support for the links | 
| Benjamin Herrenschmidt | 0b05ac6 | 2011-04-04 13:46:58 +1000 | [diff] [blame] | 302 |  */ | 
 | 303 | int xics_get_irq_server(unsigned int virq, const struct cpumask *cpumask, | 
 | 304 | 			unsigned int strict_check) | 
 | 305 | { | 
 | 306 |  | 
 | 307 | 	if (!distribute_irqs) | 
 | 308 | 		return xics_default_server; | 
 | 309 |  | 
 | 310 | 	if (!cpumask_subset(cpu_possible_mask, cpumask)) { | 
 | 311 | 		int server = cpumask_first_and(cpu_online_mask, cpumask); | 
 | 312 |  | 
 | 313 | 		if (server < nr_cpu_ids) | 
 | 314 | 			return get_hard_smp_processor_id(server); | 
 | 315 |  | 
 | 316 | 		if (strict_check) | 
 | 317 | 			return -1; | 
 | 318 | 	} | 
 | 319 |  | 
 | 320 | 	/* | 
 | 321 | 	 * Workaround issue with some versions of JS20 firmware that | 
 | 322 | 	 * deliver interrupts to cpus which haven't been started. This | 
 | 323 | 	 * happens when using the maxcpus= boot option. | 
 | 324 | 	 */ | 
 | 325 | 	if (cpumask_equal(cpu_online_mask, cpu_present_mask)) | 
 | 326 | 		return xics_default_distrib_server; | 
 | 327 |  | 
 | 328 | 	return xics_default_server; | 
 | 329 | } | 
 | 330 | #endif /* CONFIG_SMP */ | 
 | 331 |  | 
 | 332 | static int xics_host_match(struct irq_host *h, struct device_node *node) | 
 | 333 | { | 
| Michael Ellerman | 5ca1237 | 2011-04-14 22:31:59 +0000 | [diff] [blame] | 334 | 	struct ics *ics; | 
 | 335 |  | 
 | 336 | 	list_for_each_entry(ics, &ics_list, link) | 
 | 337 | 		if (ics->host_match(ics, node)) | 
 | 338 | 			return 1; | 
 | 339 |  | 
 | 340 | 	return 0; | 
| Benjamin Herrenschmidt | 0b05ac6 | 2011-04-04 13:46:58 +1000 | [diff] [blame] | 341 | } | 
 | 342 |  | 
 | 343 | /* Dummies */ | 
 | 344 | static void xics_ipi_unmask(struct irq_data *d) { } | 
 | 345 | static void xics_ipi_mask(struct irq_data *d) { } | 
 | 346 |  | 
 | 347 | static struct irq_chip xics_ipi_chip = { | 
 | 348 | 	.name = "XICS", | 
 | 349 | 	.irq_eoi = NULL, /* Patched at init time */ | 
 | 350 | 	.irq_mask = xics_ipi_mask, | 
 | 351 | 	.irq_unmask = xics_ipi_unmask, | 
 | 352 | }; | 
 | 353 |  | 
 | 354 | static int xics_host_map(struct irq_host *h, unsigned int virq, | 
 | 355 | 			 irq_hw_number_t hw) | 
 | 356 | { | 
 | 357 | 	struct ics *ics; | 
 | 358 |  | 
 | 359 | 	pr_devel("xics: map virq %d, hwirq 0x%lx\n", virq, hw); | 
 | 360 |  | 
 | 361 | 	/* Insert the interrupt mapping into the radix tree for fast lookup */ | 
 | 362 | 	irq_radix_revmap_insert(xics_host, virq, hw); | 
 | 363 |  | 
 | 364 | 	/* They aren't all level sensitive but we just don't really know */ | 
 | 365 | 	irq_set_status_flags(virq, IRQ_LEVEL); | 
 | 366 |  | 
 | 367 | 	/* Don't call into ICS for IPIs */ | 
 | 368 | 	if (hw == XICS_IPI) { | 
 | 369 | 		irq_set_chip_and_handler(virq, &xics_ipi_chip, | 
 | 370 | 					 handle_fasteoi_irq); | 
 | 371 | 		return 0; | 
 | 372 | 	} | 
 | 373 |  | 
 | 374 | 	/* Let the ICS setup the chip data */ | 
 | 375 | 	list_for_each_entry(ics, &ics_list, link) | 
 | 376 | 		if (ics->map(ics, virq) == 0) | 
 | 377 | 			break; | 
 | 378 | 	return 0; | 
 | 379 | } | 
 | 380 |  | 
 | 381 | static int xics_host_xlate(struct irq_host *h, struct device_node *ct, | 
 | 382 | 			   const u32 *intspec, unsigned int intsize, | 
 | 383 | 			   irq_hw_number_t *out_hwirq, unsigned int *out_flags) | 
 | 384 |  | 
 | 385 | { | 
 | 386 | 	/* Current xics implementation translates everything | 
 | 387 | 	 * to level. It is not technically right for MSIs but this | 
 | 388 | 	 * is irrelevant at this point. We might get smarter in the future | 
 | 389 | 	 */ | 
 | 390 | 	*out_hwirq = intspec[0]; | 
 | 391 | 	*out_flags = IRQ_TYPE_LEVEL_LOW; | 
 | 392 |  | 
 | 393 | 	return 0; | 
 | 394 | } | 
 | 395 |  | 
 | 396 | static struct irq_host_ops xics_host_ops = { | 
 | 397 | 	.match = xics_host_match, | 
 | 398 | 	.map = xics_host_map, | 
 | 399 | 	.xlate = xics_host_xlate, | 
 | 400 | }; | 
 | 401 |  | 
 | 402 | static void __init xics_init_host(void) | 
 | 403 | { | 
 | 404 | 	xics_host = irq_alloc_host(NULL, IRQ_HOST_MAP_TREE, 0, &xics_host_ops, | 
 | 405 | 				   XICS_IRQ_SPURIOUS); | 
 | 406 | 	BUG_ON(xics_host == NULL); | 
 | 407 | 	irq_set_default_host(xics_host); | 
 | 408 | } | 
 | 409 |  | 
 | 410 | void __init xics_register_ics(struct ics *ics) | 
 | 411 | { | 
 | 412 | 	list_add(&ics->link, &ics_list); | 
 | 413 | } | 
 | 414 |  | 
 | 415 | static void __init xics_get_server_size(void) | 
 | 416 | { | 
 | 417 | 	struct device_node *np; | 
 | 418 | 	const u32 *isize; | 
 | 419 |  | 
 | 420 | 	/* We fetch the interrupt server size from the first ICS node | 
 | 421 | 	 * we find if any | 
 | 422 | 	 */ | 
 | 423 | 	np = of_find_compatible_node(NULL, NULL, "ibm,ppc-xics"); | 
 | 424 | 	if (!np) | 
 | 425 | 		return; | 
 | 426 | 	isize = of_get_property(np, "ibm,interrupt-server#-size", NULL); | 
 | 427 | 	if (!isize) | 
 | 428 | 		return; | 
 | 429 | 	xics_interrupt_server_size = *isize; | 
 | 430 | 	of_node_put(np); | 
 | 431 | } | 
 | 432 |  | 
 | 433 | void __init xics_init(void) | 
 | 434 | { | 
 | 435 | 	int rc = -1; | 
 | 436 |  | 
 | 437 | 	/* Fist locate ICP */ | 
 | 438 | #ifdef CONFIG_PPC_ICP_HV | 
 | 439 | 	if (firmware_has_feature(FW_FEATURE_LPAR)) | 
 | 440 | 		rc = icp_hv_init(); | 
 | 441 | #endif | 
 | 442 | #ifdef CONFIG_PPC_ICP_NATIVE | 
 | 443 | 	if (rc < 0) | 
 | 444 | 		rc = icp_native_init(); | 
 | 445 | #endif | 
 | 446 | 	if (rc < 0) { | 
 | 447 | 		pr_warning("XICS: Cannot find a Presentation Controller !\n"); | 
 | 448 | 		return; | 
 | 449 | 	} | 
 | 450 |  | 
 | 451 | 	/* Copy get_irq callback over to ppc_md */ | 
 | 452 | 	ppc_md.get_irq = icp_ops->get_irq; | 
 | 453 |  | 
 | 454 | 	/* Patch up IPI chip EOI */ | 
 | 455 | 	xics_ipi_chip.irq_eoi = icp_ops->eoi; | 
 | 456 |  | 
 | 457 | 	/* Now locate ICS */ | 
 | 458 | #ifdef CONFIG_PPC_ICS_RTAS | 
 | 459 | 	rc = ics_rtas_init(); | 
 | 460 | #endif | 
 | 461 | 	if (rc < 0) | 
 | 462 | 		pr_warning("XICS: Cannot find a Source Controller !\n"); | 
 | 463 |  | 
 | 464 | 	/* Initialize common bits */ | 
 | 465 | 	xics_get_server_size(); | 
 | 466 | 	xics_update_irq_servers(); | 
 | 467 | 	xics_init_host(); | 
 | 468 | 	xics_setup_cpu(); | 
 | 469 | } |