| Ralf Baechle | 41c594a | 2006-04-05 09:45:45 +0100 | [diff] [blame] | 1 | /* | 
 | 2 |  * Malta Platform-specific hooks for SMP operation | 
 | 3 |  */ | 
| Ralf Baechle | 45a98eb | 2007-08-06 16:32:20 +0100 | [diff] [blame] | 4 | #include <linux/irq.h> | 
| Ralf Baechle | 57a2050 | 2007-03-04 18:27:34 +0000 | [diff] [blame] | 5 | #include <linux/init.h> | 
| Ralf Baechle | 41c594a | 2006-04-05 09:45:45 +0100 | [diff] [blame] | 6 |  | 
| Ralf Baechle | 57a2050 | 2007-03-04 18:27:34 +0000 | [diff] [blame] | 7 | #include <asm/mipsregs.h> | 
 | 8 | #include <asm/mipsmtregs.h> | 
 | 9 | #include <asm/smtc.h> | 
| Ralf Baechle | 41c594a | 2006-04-05 09:45:45 +0100 | [diff] [blame] | 10 | #include <asm/smtc_ipi.h> | 
| Ralf Baechle | 41c594a | 2006-04-05 09:45:45 +0100 | [diff] [blame] | 11 |  | 
 | 12 | /* VPE/SMP Prototype implements platform interfaces directly */ | 
| Ralf Baechle | 41c594a | 2006-04-05 09:45:45 +0100 | [diff] [blame] | 13 |  | 
 | 14 | /* | 
 | 15 |  * Cause the specified action to be performed on a targeted "CPU" | 
 | 16 |  */ | 
 | 17 |  | 
| Ralf Baechle | 87353d8 | 2007-11-19 12:23:51 +0000 | [diff] [blame] | 18 | static void msmtc_send_ipi_single(int cpu, unsigned int action) | 
| Ralf Baechle | 41c594a | 2006-04-05 09:45:45 +0100 | [diff] [blame] | 19 | { | 
| Ralf Baechle | 57a2050 | 2007-03-04 18:27:34 +0000 | [diff] [blame] | 20 | 	/* "CPU" may be TC of same VPE, VPE of same CPU, or different CPU */ | 
| Ralf Baechle | 41c594a | 2006-04-05 09:45:45 +0100 | [diff] [blame] | 21 | 	smtc_send_ipi(cpu, LINUX_SMP_IPI, action); | 
| Ralf Baechle | 41c594a | 2006-04-05 09:45:45 +0100 | [diff] [blame] | 22 | } | 
 | 23 |  | 
| Rusty Russell | 48a048f | 2009-09-24 09:34:44 -0600 | [diff] [blame] | 24 | static void msmtc_send_ipi_mask(const struct cpumask *mask, unsigned int action) | 
| Ralf Baechle | 41c594a | 2006-04-05 09:45:45 +0100 | [diff] [blame] | 25 | { | 
| Ralf Baechle | 87353d8 | 2007-11-19 12:23:51 +0000 | [diff] [blame] | 26 | 	unsigned int i; | 
 | 27 |  | 
| Rusty Russell | 48a048f | 2009-09-24 09:34:44 -0600 | [diff] [blame] | 28 | 	for_each_cpu(i, mask) | 
| Ralf Baechle | 87353d8 | 2007-11-19 12:23:51 +0000 | [diff] [blame] | 29 | 		msmtc_send_ipi_single(i, action); | 
| Ralf Baechle | 41c594a | 2006-04-05 09:45:45 +0100 | [diff] [blame] | 30 | } | 
 | 31 |  | 
 | 32 | /* | 
 | 33 |  * Post-config but pre-boot cleanup entry point | 
 | 34 |  */ | 
| Ralf Baechle | 87353d8 | 2007-11-19 12:23:51 +0000 | [diff] [blame] | 35 | static void __cpuinit msmtc_init_secondary(void) | 
| Ralf Baechle | 41c594a | 2006-04-05 09:45:45 +0100 | [diff] [blame] | 36 | { | 
| Ralf Baechle | 41c594a | 2006-04-05 09:45:45 +0100 | [diff] [blame] | 37 | 	int myvpe; | 
 | 38 |  | 
 | 39 | 	/* Don't enable Malta I/O interrupts (IP2) for secondary VPEs */ | 
 | 40 | 	myvpe = read_c0_tcbind() & TCBIND_CURVPE; | 
 | 41 | 	if (myvpe != 0) { | 
 | 42 | 		/* Ideally, this should be done only once per VPE, but... */ | 
| Ralf Baechle | dc0366b | 2007-08-01 19:42:37 +0100 | [diff] [blame] | 43 | 		clear_c0_status(ST0_IM); | 
 | 44 | 		set_c0_status((0x100 << cp0_compare_irq) | 
 | 45 | 				| (0x100 << MIPS_CPU_IPI_IRQ)); | 
 | 46 | 		if (cp0_perfcount_irq >= 0) | 
 | 47 | 			set_c0_status(0x100 << cp0_perfcount_irq); | 
| Ralf Baechle | 41c594a | 2006-04-05 09:45:45 +0100 | [diff] [blame] | 48 | 	} | 
 | 49 |  | 
| Ralf Baechle | 87353d8 | 2007-11-19 12:23:51 +0000 | [diff] [blame] | 50 | 	smtc_init_secondary(); | 
 | 51 | } | 
 | 52 |  | 
 | 53 | /* | 
 | 54 |  * Platform "CPU" startup hook | 
 | 55 |  */ | 
 | 56 | static void __cpuinit msmtc_boot_secondary(int cpu, struct task_struct *idle) | 
 | 57 | { | 
 | 58 | 	smtc_boot_secondary(cpu, idle); | 
 | 59 | } | 
 | 60 |  | 
 | 61 | /* | 
 | 62 |  * SMP initialization finalization entry point | 
 | 63 |  */ | 
 | 64 | static void __cpuinit msmtc_smp_finish(void) | 
 | 65 | { | 
 | 66 | 	smtc_smp_finish(); | 
 | 67 | } | 
 | 68 |  | 
 | 69 | /* | 
 | 70 |  * Hook for after all CPUs are online | 
 | 71 |  */ | 
 | 72 |  | 
 | 73 | static void msmtc_cpus_done(void) | 
 | 74 | { | 
| Ralf Baechle | 41c594a | 2006-04-05 09:45:45 +0100 | [diff] [blame] | 75 | } | 
 | 76 |  | 
 | 77 | /* | 
 | 78 |  * Platform SMP pre-initialization | 
 | 79 |  * | 
 | 80 |  * As noted above, we can assume a single CPU for now | 
 | 81 |  * but it may be multithreaded. | 
 | 82 |  */ | 
 | 83 |  | 
| Ralf Baechle | 87353d8 | 2007-11-19 12:23:51 +0000 | [diff] [blame] | 84 | static void __init msmtc_smp_setup(void) | 
| Ralf Baechle | 41c594a | 2006-04-05 09:45:45 +0100 | [diff] [blame] | 85 | { | 
| Kevin D. Kissell | 8531a35 | 2008-09-09 21:48:52 +0200 | [diff] [blame] | 86 | 	/* | 
 | 87 | 	 * we won't get the definitive value until | 
 | 88 | 	 * we've run smtc_prepare_cpus later, but | 
 | 89 | 	 * we would appear to need an upper bound now. | 
 | 90 | 	 */ | 
 | 91 | 	smp_num_siblings = smtc_build_cpu_map(0); | 
| Ralf Baechle | 41c594a | 2006-04-05 09:45:45 +0100 | [diff] [blame] | 92 | } | 
 | 93 |  | 
| Ralf Baechle | 87353d8 | 2007-11-19 12:23:51 +0000 | [diff] [blame] | 94 | static void __init msmtc_prepare_cpus(unsigned int max_cpus) | 
| Ralf Baechle | 41c594a | 2006-04-05 09:45:45 +0100 | [diff] [blame] | 95 | { | 
| Kevin D. Kissell | 8531a35 | 2008-09-09 21:48:52 +0200 | [diff] [blame] | 96 | 	smtc_prepare_cpus(max_cpus); | 
| Ralf Baechle | 41c594a | 2006-04-05 09:45:45 +0100 | [diff] [blame] | 97 | } | 
 | 98 |  | 
| Ralf Baechle | 87353d8 | 2007-11-19 12:23:51 +0000 | [diff] [blame] | 99 | struct plat_smp_ops msmtc_smp_ops = { | 
 | 100 | 	.send_ipi_single	= msmtc_send_ipi_single, | 
 | 101 | 	.send_ipi_mask		= msmtc_send_ipi_mask, | 
 | 102 | 	.init_secondary		= msmtc_init_secondary, | 
 | 103 | 	.smp_finish		= msmtc_smp_finish, | 
 | 104 | 	.cpus_done		= msmtc_cpus_done, | 
 | 105 | 	.boot_secondary		= msmtc_boot_secondary, | 
 | 106 | 	.smp_setup		= msmtc_smp_setup, | 
 | 107 | 	.prepare_cpus		= msmtc_prepare_cpus, | 
 | 108 | }; | 
| Kevin D. Kissell | f571eff | 2007-08-03 19:38:03 +0200 | [diff] [blame] | 109 |  | 
 | 110 | #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF | 
 | 111 | /* | 
 | 112 |  * IRQ affinity hook | 
 | 113 |  */ | 
 | 114 |  | 
 | 115 |  | 
| Thomas Gleixner | 7c8d948 | 2011-03-23 21:08:57 +0000 | [diff] [blame] | 116 | int plat_set_irq_affinity(struct irq_data *d, const struct cpumask *affinity, | 
 | 117 | 			  bool force) | 
| Kevin D. Kissell | f571eff | 2007-08-03 19:38:03 +0200 | [diff] [blame] | 118 | { | 
| Mike Travis | e65e49d | 2009-01-12 15:27:13 -0800 | [diff] [blame] | 119 | 	cpumask_t tmask; | 
| Kevin D. Kissell | f571eff | 2007-08-03 19:38:03 +0200 | [diff] [blame] | 120 | 	int cpu = 0; | 
 | 121 | 	void smtc_set_irq_affinity(unsigned int irq, cpumask_t aff); | 
 | 122 |  | 
 | 123 | 	/* | 
 | 124 | 	 * On the legacy Malta development board, all I/O interrupts | 
 | 125 | 	 * are routed through the 8259 and combined in a single signal | 
 | 126 | 	 * to the CPU daughterboard, and on the CoreFPGA2/3 34K models, | 
 | 127 | 	 * that signal is brought to IP2 of both VPEs. To avoid racing | 
 | 128 | 	 * concurrent interrupt service events, IP2 is enabled only on | 
 | 129 | 	 * one VPE, by convention VPE0.  So long as no bits are ever | 
 | 130 | 	 * cleared in the affinity mask, there will never be any | 
 | 131 | 	 * interrupt forwarding.  But as soon as a program or operator | 
 | 132 | 	 * sets affinity for one of the related IRQs, we need to make | 
 | 133 | 	 * sure that we don't ever try to forward across the VPE boundry, | 
 | 134 | 	 * at least not until we engineer a system where the interrupt | 
 | 135 | 	 * _ack() or _end() function can somehow know that it corresponds | 
 | 136 | 	 * to an interrupt taken on another VPE, and perform the appropriate | 
 | 137 | 	 * restoration of Status.IM state using MFTR/MTTR instead of the | 
 | 138 | 	 * normal local behavior. We also ensure that no attempt will | 
 | 139 | 	 * be made to forward to an offline "CPU". | 
 | 140 | 	 */ | 
 | 141 |  | 
| Mike Travis | e65e49d | 2009-01-12 15:27:13 -0800 | [diff] [blame] | 142 | 	cpumask_copy(&tmask, affinity); | 
| Rusty Russell | 0de2652 | 2008-12-13 21:20:26 +1030 | [diff] [blame] | 143 | 	for_each_cpu(cpu, affinity) { | 
| Kevin D. Kissell | f571eff | 2007-08-03 19:38:03 +0200 | [diff] [blame] | 144 | 		if ((cpu_data[cpu].vpe_id != 0) || !cpu_online(cpu)) | 
 | 145 | 			cpu_clear(cpu, tmask); | 
 | 146 | 	} | 
| Thomas Gleixner | 7c8d948 | 2011-03-23 21:08:57 +0000 | [diff] [blame] | 147 | 	cpumask_copy(d->affinity, &tmask); | 
| Kevin D. Kissell | f571eff | 2007-08-03 19:38:03 +0200 | [diff] [blame] | 148 |  | 
 | 149 | 	if (cpus_empty(tmask)) | 
 | 150 | 		/* | 
 | 151 | 		 * We could restore a default mask here, but the | 
 | 152 | 		 * runtime code can anyway deal with the null set | 
 | 153 | 		 */ | 
 | 154 | 		printk(KERN_WARNING | 
 | 155 | 			"IRQ affinity leaves no legal CPU for IRQ %d\n", irq); | 
 | 156 |  | 
 | 157 | 	/* Do any generic SMTC IRQ affinity setup */ | 
| Thomas Gleixner | 7c8d948 | 2011-03-23 21:08:57 +0000 | [diff] [blame] | 158 | 	smtc_set_irq_affinity(d->irq, tmask); | 
| Yinghai Lu | d5dedd4 | 2009-04-27 17:59:21 -0700 | [diff] [blame] | 159 |  | 
| Thomas Gleixner | 7c8d948 | 2011-03-23 21:08:57 +0000 | [diff] [blame] | 160 | 	return IRQ_SET_MASK_OK_NOCOPY; | 
| Kevin D. Kissell | f571eff | 2007-08-03 19:38:03 +0200 | [diff] [blame] | 161 | } | 
 | 162 | #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ |