| Catalin Marinas | 08e875c | 2012-03-05 11:49:30 +0000 | [diff] [blame] | 1 | /* | 
 | 2 |  * SMP initialisation and IPI support | 
 | 3 |  * Based on arch/arm/kernel/smp.c | 
 | 4 |  * | 
 | 5 |  * Copyright (C) 2012 ARM Ltd. | 
 | 6 |  * | 
 | 7 |  * This program is free software; you can redistribute it and/or modify | 
 | 8 |  * it under the terms of the GNU General Public License version 2 as | 
 | 9 |  * published by the Free Software Foundation. | 
 | 10 |  * | 
 | 11 |  * This program is distributed in the hope that it will be useful, | 
 | 12 |  * but WITHOUT ANY WARRANTY; without even the implied warranty of | 
 | 13 |  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 
 | 14 |  * GNU General Public License for more details. | 
 | 15 |  * | 
 | 16 |  * You should have received a copy of the GNU General Public License | 
 | 17 |  * along with this program.  If not, see <http://www.gnu.org/licenses/>. | 
 | 18 |  */ | 
 | 19 |  | 
 | 20 | #include <linux/delay.h> | 
 | 21 | #include <linux/init.h> | 
 | 22 | #include <linux/spinlock.h> | 
 | 23 | #include <linux/sched.h> | 
 | 24 | #include <linux/interrupt.h> | 
 | 25 | #include <linux/cache.h> | 
 | 26 | #include <linux/profile.h> | 
 | 27 | #include <linux/errno.h> | 
 | 28 | #include <linux/mm.h> | 
 | 29 | #include <linux/err.h> | 
 | 30 | #include <linux/cpu.h> | 
 | 31 | #include <linux/smp.h> | 
 | 32 | #include <linux/seq_file.h> | 
 | 33 | #include <linux/irq.h> | 
 | 34 | #include <linux/percpu.h> | 
 | 35 | #include <linux/clockchips.h> | 
 | 36 | #include <linux/completion.h> | 
 | 37 | #include <linux/of.h> | 
 | 38 |  | 
 | 39 | #include <asm/atomic.h> | 
 | 40 | #include <asm/cacheflush.h> | 
 | 41 | #include <asm/cputype.h> | 
 | 42 | #include <asm/mmu_context.h> | 
 | 43 | #include <asm/pgtable.h> | 
 | 44 | #include <asm/pgalloc.h> | 
 | 45 | #include <asm/processor.h> | 
 | 46 | #include <asm/sections.h> | 
 | 47 | #include <asm/tlbflush.h> | 
 | 48 | #include <asm/ptrace.h> | 
| Catalin Marinas | 08e875c | 2012-03-05 11:49:30 +0000 | [diff] [blame] | 49 |  | 
 | 50 | /* | 
 | 51 |  * as from 2.5, kernels no longer have an init_tasks structure | 
 | 52 |  * so we need some other way of telling a new secondary core | 
 | 53 |  * where to place its SVC stack | 
 | 54 |  */ | 
 | 55 | struct secondary_data secondary_data; | 
 | 56 | volatile unsigned long secondary_holding_pen_release = -1; | 
 | 57 |  | 
 | 58 | enum ipi_msg_type { | 
 | 59 | 	IPI_RESCHEDULE, | 
 | 60 | 	IPI_CALL_FUNC, | 
 | 61 | 	IPI_CALL_FUNC_SINGLE, | 
 | 62 | 	IPI_CPU_STOP, | 
 | 63 | }; | 
 | 64 |  | 
 | 65 | static DEFINE_RAW_SPINLOCK(boot_lock); | 
 | 66 |  | 
 | 67 | /* | 
 | 68 |  * Write secondary_holding_pen_release in a way that is guaranteed to be | 
 | 69 |  * visible to all observers, irrespective of whether they're taking part | 
 | 70 |  * in coherency or not.  This is necessary for the hotplug code to work | 
 | 71 |  * reliably. | 
 | 72 |  */ | 
 | 73 | static void __cpuinit write_pen_release(int val) | 
 | 74 | { | 
 | 75 | 	void *start = (void *)&secondary_holding_pen_release; | 
 | 76 | 	unsigned long size = sizeof(secondary_holding_pen_release); | 
 | 77 |  | 
 | 78 | 	secondary_holding_pen_release = val; | 
 | 79 | 	__flush_dcache_area(start, size); | 
 | 80 | } | 
 | 81 |  | 
 | 82 | /* | 
 | 83 |  * Boot a secondary CPU, and assign it the specified idle task. | 
 | 84 |  * This also gives us the initial stack to use for this CPU. | 
 | 85 |  */ | 
 | 86 | static int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) | 
 | 87 | { | 
 | 88 | 	unsigned long timeout; | 
 | 89 |  | 
 | 90 | 	/* | 
 | 91 | 	 * Set synchronisation state between this boot processor | 
 | 92 | 	 * and the secondary one | 
 | 93 | 	 */ | 
 | 94 | 	raw_spin_lock(&boot_lock); | 
 | 95 |  | 
 | 96 | 	/* | 
 | 97 | 	 * Update the pen release flag. | 
 | 98 | 	 */ | 
 | 99 | 	write_pen_release(cpu); | 
 | 100 |  | 
 | 101 | 	/* | 
 | 102 | 	 * Send an event, causing the secondaries to read pen_release. | 
 | 103 | 	 */ | 
 | 104 | 	sev(); | 
 | 105 |  | 
 | 106 | 	timeout = jiffies + (1 * HZ); | 
 | 107 | 	while (time_before(jiffies, timeout)) { | 
 | 108 | 		if (secondary_holding_pen_release == -1UL) | 
 | 109 | 			break; | 
 | 110 | 		udelay(10); | 
 | 111 | 	} | 
 | 112 |  | 
 | 113 | 	/* | 
 | 114 | 	 * Now the secondary core is starting up let it run its | 
 | 115 | 	 * calibrations, then wait for it to finish | 
 | 116 | 	 */ | 
 | 117 | 	raw_spin_unlock(&boot_lock); | 
 | 118 |  | 
 | 119 | 	return secondary_holding_pen_release != -1 ? -ENOSYS : 0; | 
 | 120 | } | 
 | 121 |  | 
 | 122 | static DECLARE_COMPLETION(cpu_running); | 
 | 123 |  | 
 | 124 | int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle) | 
 | 125 | { | 
 | 126 | 	int ret; | 
 | 127 |  | 
 | 128 | 	/* | 
 | 129 | 	 * We need to tell the secondary core where to find its stack and the | 
 | 130 | 	 * page tables. | 
 | 131 | 	 */ | 
 | 132 | 	secondary_data.stack = task_stack_page(idle) + THREAD_START_SP; | 
 | 133 | 	__flush_dcache_area(&secondary_data, sizeof(secondary_data)); | 
 | 134 |  | 
 | 135 | 	/* | 
 | 136 | 	 * Now bring the CPU into our world. | 
 | 137 | 	 */ | 
 | 138 | 	ret = boot_secondary(cpu, idle); | 
 | 139 | 	if (ret == 0) { | 
 | 140 | 		/* | 
 | 141 | 		 * CPU was successfully started, wait for it to come online or | 
 | 142 | 		 * time out. | 
 | 143 | 		 */ | 
 | 144 | 		wait_for_completion_timeout(&cpu_running, | 
 | 145 | 					    msecs_to_jiffies(1000)); | 
 | 146 |  | 
 | 147 | 		if (!cpu_online(cpu)) { | 
 | 148 | 			pr_crit("CPU%u: failed to come online\n", cpu); | 
 | 149 | 			ret = -EIO; | 
 | 150 | 		} | 
 | 151 | 	} else { | 
 | 152 | 		pr_err("CPU%u: failed to boot: %d\n", cpu, ret); | 
 | 153 | 	} | 
 | 154 |  | 
 | 155 | 	secondary_data.stack = NULL; | 
 | 156 |  | 
 | 157 | 	return ret; | 
 | 158 | } | 
 | 159 |  | 
 | 160 | /* | 
 | 161 |  * This is the secondary CPU boot entry.  We're using this CPUs | 
 | 162 |  * idle thread stack, but a set of temporary page tables. | 
 | 163 |  */ | 
 | 164 | asmlinkage void __cpuinit secondary_start_kernel(void) | 
 | 165 | { | 
 | 166 | 	struct mm_struct *mm = &init_mm; | 
 | 167 | 	unsigned int cpu = smp_processor_id(); | 
 | 168 |  | 
 | 169 | 	printk("CPU%u: Booted secondary processor\n", cpu); | 
 | 170 |  | 
 | 171 | 	/* | 
 | 172 | 	 * All kernel threads share the same mm context; grab a | 
 | 173 | 	 * reference and switch to it. | 
 | 174 | 	 */ | 
 | 175 | 	atomic_inc(&mm->mm_count); | 
 | 176 | 	current->active_mm = mm; | 
 | 177 | 	cpumask_set_cpu(cpu, mm_cpumask(mm)); | 
 | 178 |  | 
 | 179 | 	/* | 
 | 180 | 	 * TTBR0 is only used for the identity mapping at this stage. Make it | 
 | 181 | 	 * point to zero page to avoid speculatively fetching new entries. | 
 | 182 | 	 */ | 
 | 183 | 	cpu_set_reserved_ttbr0(); | 
 | 184 | 	flush_tlb_all(); | 
 | 185 |  | 
 | 186 | 	preempt_disable(); | 
 | 187 | 	trace_hardirqs_off(); | 
 | 188 |  | 
 | 189 | 	/* | 
 | 190 | 	 * Let the primary processor know we're out of the | 
 | 191 | 	 * pen, then head off into the C entry point | 
 | 192 | 	 */ | 
 | 193 | 	write_pen_release(-1); | 
 | 194 |  | 
 | 195 | 	/* | 
 | 196 | 	 * Synchronise with the boot thread. | 
 | 197 | 	 */ | 
 | 198 | 	raw_spin_lock(&boot_lock); | 
 | 199 | 	raw_spin_unlock(&boot_lock); | 
 | 200 |  | 
 | 201 | 	/* | 
 | 202 | 	 * Enable local interrupts. | 
 | 203 | 	 */ | 
 | 204 | 	notify_cpu_starting(cpu); | 
 | 205 | 	local_irq_enable(); | 
 | 206 | 	local_fiq_enable(); | 
 | 207 |  | 
 | 208 | 	/* | 
 | 209 | 	 * OK, now it's safe to let the boot CPU continue.  Wait for | 
 | 210 | 	 * the CPU migration code to notice that the CPU is online | 
 | 211 | 	 * before we continue. | 
 | 212 | 	 */ | 
 | 213 | 	set_cpu_online(cpu, true); | 
| Will Deacon | b3770b3 | 2012-11-07 17:00:05 +0000 | [diff] [blame] | 214 | 	complete(&cpu_running); | 
| Catalin Marinas | 08e875c | 2012-03-05 11:49:30 +0000 | [diff] [blame] | 215 |  | 
 | 216 | 	/* | 
 | 217 | 	 * OK, it's off to the idle thread for us | 
 | 218 | 	 */ | 
 | 219 | 	cpu_idle(); | 
 | 220 | } | 
 | 221 |  | 
 | 222 | void __init smp_cpus_done(unsigned int max_cpus) | 
 | 223 | { | 
 | 224 | 	unsigned long bogosum = loops_per_jiffy * num_online_cpus(); | 
 | 225 |  | 
 | 226 | 	pr_info("SMP: Total of %d processors activated (%lu.%02lu BogoMIPS).\n", | 
 | 227 | 		num_online_cpus(), bogosum / (500000/HZ), | 
 | 228 | 		(bogosum / (5000/HZ)) % 100); | 
 | 229 | } | 
 | 230 |  | 
 | 231 | void __init smp_prepare_boot_cpu(void) | 
 | 232 | { | 
 | 233 | } | 
 | 234 |  | 
 | 235 | static void (*smp_cross_call)(const struct cpumask *, unsigned int); | 
 | 236 | static phys_addr_t cpu_release_addr[NR_CPUS]; | 
 | 237 |  | 
 | 238 | /* | 
 | 239 |  * Enumerate the possible CPU set from the device tree. | 
 | 240 |  */ | 
 | 241 | void __init smp_init_cpus(void) | 
 | 242 | { | 
 | 243 | 	const char *enable_method; | 
 | 244 | 	struct device_node *dn = NULL; | 
 | 245 | 	int cpu = 0; | 
 | 246 |  | 
 | 247 | 	while ((dn = of_find_node_by_type(dn, "cpu"))) { | 
 | 248 | 		if (cpu >= NR_CPUS) | 
 | 249 | 			goto next; | 
 | 250 |  | 
 | 251 | 		/* | 
 | 252 | 		 * We currently support only the "spin-table" enable-method. | 
 | 253 | 		 */ | 
 | 254 | 		enable_method = of_get_property(dn, "enable-method", NULL); | 
 | 255 | 		if (!enable_method || strcmp(enable_method, "spin-table")) { | 
 | 256 | 			pr_err("CPU %d: missing or invalid enable-method property: %s\n", | 
 | 257 | 			       cpu, enable_method); | 
 | 258 | 			goto next; | 
 | 259 | 		} | 
 | 260 |  | 
 | 261 | 		/* | 
 | 262 | 		 * Determine the address from which the CPU is polling. | 
 | 263 | 		 */ | 
 | 264 | 		if (of_property_read_u64(dn, "cpu-release-addr", | 
 | 265 | 					 &cpu_release_addr[cpu])) { | 
 | 266 | 			pr_err("CPU %d: missing or invalid cpu-release-addr property\n", | 
 | 267 | 			       cpu); | 
 | 268 | 			goto next; | 
 | 269 | 		} | 
 | 270 |  | 
 | 271 | 		set_cpu_possible(cpu, true); | 
 | 272 | next: | 
 | 273 | 		cpu++; | 
 | 274 | 	} | 
 | 275 |  | 
 | 276 | 	/* sanity check */ | 
 | 277 | 	if (cpu > NR_CPUS) | 
 | 278 | 		pr_warning("no. of cores (%d) greater than configured maximum of %d - clipping\n", | 
 | 279 | 			   cpu, NR_CPUS); | 
 | 280 | } | 
 | 281 |  | 
 | 282 | void __init smp_prepare_cpus(unsigned int max_cpus) | 
 | 283 | { | 
 | 284 | 	int cpu; | 
 | 285 | 	void **release_addr; | 
 | 286 | 	unsigned int ncores = num_possible_cpus(); | 
 | 287 |  | 
 | 288 | 	/* | 
 | 289 | 	 * are we trying to boot more cores than exist? | 
 | 290 | 	 */ | 
 | 291 | 	if (max_cpus > ncores) | 
 | 292 | 		max_cpus = ncores; | 
 | 293 |  | 
 | 294 | 	/* | 
 | 295 | 	 * Initialise the present map (which describes the set of CPUs | 
 | 296 | 	 * actually populated at the present time) and release the | 
 | 297 | 	 * secondaries from the bootloader. | 
 | 298 | 	 */ | 
 | 299 | 	for_each_possible_cpu(cpu) { | 
 | 300 | 		if (max_cpus == 0) | 
 | 301 | 			break; | 
 | 302 |  | 
 | 303 | 		if (!cpu_release_addr[cpu]) | 
 | 304 | 			continue; | 
 | 305 |  | 
 | 306 | 		release_addr = __va(cpu_release_addr[cpu]); | 
 | 307 | 		release_addr[0] = (void *)__pa(secondary_holding_pen); | 
 | 308 | 		__flush_dcache_area(release_addr, sizeof(release_addr[0])); | 
 | 309 |  | 
 | 310 | 		set_cpu_present(cpu, true); | 
 | 311 | 		max_cpus--; | 
 | 312 | 	} | 
 | 313 |  | 
 | 314 | 	/* | 
 | 315 | 	 * Send an event to wake up the secondaries. | 
 | 316 | 	 */ | 
 | 317 | 	sev(); | 
 | 318 | } | 
 | 319 |  | 
 | 320 |  | 
 | 321 | void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int)) | 
 | 322 | { | 
 | 323 | 	smp_cross_call = fn; | 
 | 324 | } | 
 | 325 |  | 
 | 326 | void arch_send_call_function_ipi_mask(const struct cpumask *mask) | 
 | 327 | { | 
 | 328 | 	smp_cross_call(mask, IPI_CALL_FUNC); | 
 | 329 | } | 
 | 330 |  | 
 | 331 | void arch_send_call_function_single_ipi(int cpu) | 
 | 332 | { | 
 | 333 | 	smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE); | 
 | 334 | } | 
 | 335 |  | 
 | 336 | static const char *ipi_types[NR_IPI] = { | 
 | 337 | #define S(x,s)	[x - IPI_RESCHEDULE] = s | 
 | 338 | 	S(IPI_RESCHEDULE, "Rescheduling interrupts"), | 
 | 339 | 	S(IPI_CALL_FUNC, "Function call interrupts"), | 
 | 340 | 	S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"), | 
 | 341 | 	S(IPI_CPU_STOP, "CPU stop interrupts"), | 
 | 342 | }; | 
 | 343 |  | 
 | 344 | void show_ipi_list(struct seq_file *p, int prec) | 
 | 345 | { | 
 | 346 | 	unsigned int cpu, i; | 
 | 347 |  | 
 | 348 | 	for (i = 0; i < NR_IPI; i++) { | 
 | 349 | 		seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i + IPI_RESCHEDULE, | 
 | 350 | 			   prec >= 4 ? " " : ""); | 
 | 351 | 		for_each_present_cpu(cpu) | 
 | 352 | 			seq_printf(p, "%10u ", | 
 | 353 | 				   __get_irq_stat(cpu, ipi_irqs[i])); | 
 | 354 | 		seq_printf(p, "      %s\n", ipi_types[i]); | 
 | 355 | 	} | 
 | 356 | } | 
 | 357 |  | 
 | 358 | u64 smp_irq_stat_cpu(unsigned int cpu) | 
 | 359 | { | 
 | 360 | 	u64 sum = 0; | 
 | 361 | 	int i; | 
 | 362 |  | 
 | 363 | 	for (i = 0; i < NR_IPI; i++) | 
 | 364 | 		sum += __get_irq_stat(cpu, ipi_irqs[i]); | 
 | 365 |  | 
 | 366 | 	return sum; | 
 | 367 | } | 
 | 368 |  | 
 | 369 | static DEFINE_RAW_SPINLOCK(stop_lock); | 
 | 370 |  | 
 | 371 | /* | 
 | 372 |  * ipi_cpu_stop - handle IPI from smp_send_stop() | 
 | 373 |  */ | 
 | 374 | static void ipi_cpu_stop(unsigned int cpu) | 
 | 375 | { | 
 | 376 | 	if (system_state == SYSTEM_BOOTING || | 
 | 377 | 	    system_state == SYSTEM_RUNNING) { | 
 | 378 | 		raw_spin_lock(&stop_lock); | 
 | 379 | 		pr_crit("CPU%u: stopping\n", cpu); | 
 | 380 | 		dump_stack(); | 
 | 381 | 		raw_spin_unlock(&stop_lock); | 
 | 382 | 	} | 
 | 383 |  | 
 | 384 | 	set_cpu_online(cpu, false); | 
 | 385 |  | 
 | 386 | 	local_fiq_disable(); | 
 | 387 | 	local_irq_disable(); | 
 | 388 |  | 
 | 389 | 	while (1) | 
 | 390 | 		cpu_relax(); | 
 | 391 | } | 
 | 392 |  | 
 | 393 | /* | 
 | 394 |  * Main handler for inter-processor interrupts | 
 | 395 |  */ | 
 | 396 | void handle_IPI(int ipinr, struct pt_regs *regs) | 
 | 397 | { | 
 | 398 | 	unsigned int cpu = smp_processor_id(); | 
 | 399 | 	struct pt_regs *old_regs = set_irq_regs(regs); | 
 | 400 |  | 
 | 401 | 	if (ipinr >= IPI_RESCHEDULE && ipinr < IPI_RESCHEDULE + NR_IPI) | 
 | 402 | 		__inc_irq_stat(cpu, ipi_irqs[ipinr - IPI_RESCHEDULE]); | 
 | 403 |  | 
 | 404 | 	switch (ipinr) { | 
 | 405 | 	case IPI_RESCHEDULE: | 
 | 406 | 		scheduler_ipi(); | 
 | 407 | 		break; | 
 | 408 |  | 
 | 409 | 	case IPI_CALL_FUNC: | 
 | 410 | 		irq_enter(); | 
 | 411 | 		generic_smp_call_function_interrupt(); | 
 | 412 | 		irq_exit(); | 
 | 413 | 		break; | 
 | 414 |  | 
 | 415 | 	case IPI_CALL_FUNC_SINGLE: | 
 | 416 | 		irq_enter(); | 
 | 417 | 		generic_smp_call_function_single_interrupt(); | 
 | 418 | 		irq_exit(); | 
 | 419 | 		break; | 
 | 420 |  | 
 | 421 | 	case IPI_CPU_STOP: | 
 | 422 | 		irq_enter(); | 
 | 423 | 		ipi_cpu_stop(cpu); | 
 | 424 | 		irq_exit(); | 
 | 425 | 		break; | 
 | 426 |  | 
 | 427 | 	default: | 
 | 428 | 		pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr); | 
 | 429 | 		break; | 
 | 430 | 	} | 
 | 431 | 	set_irq_regs(old_regs); | 
 | 432 | } | 
 | 433 |  | 
 | 434 | void smp_send_reschedule(int cpu) | 
 | 435 | { | 
 | 436 | 	smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); | 
 | 437 | } | 
 | 438 |  | 
 | 439 | void smp_send_stop(void) | 
 | 440 | { | 
 | 441 | 	unsigned long timeout; | 
 | 442 |  | 
 | 443 | 	if (num_online_cpus() > 1) { | 
 | 444 | 		cpumask_t mask; | 
 | 445 |  | 
 | 446 | 		cpumask_copy(&mask, cpu_online_mask); | 
 | 447 | 		cpu_clear(smp_processor_id(), mask); | 
 | 448 |  | 
 | 449 | 		smp_cross_call(&mask, IPI_CPU_STOP); | 
 | 450 | 	} | 
 | 451 |  | 
 | 452 | 	/* Wait up to one second for other CPUs to stop */ | 
 | 453 | 	timeout = USEC_PER_SEC; | 
 | 454 | 	while (num_online_cpus() > 1 && timeout--) | 
 | 455 | 		udelay(1); | 
 | 456 |  | 
 | 457 | 	if (num_online_cpus() > 1) | 
 | 458 | 		pr_warning("SMP: failed to stop secondary CPUs\n"); | 
 | 459 | } | 
 | 460 |  | 
 | 461 | /* | 
 | 462 |  * not supported here | 
 | 463 |  */ | 
 | 464 | int setup_profiling_timer(unsigned int multiplier) | 
 | 465 | { | 
 | 466 | 	return -EINVAL; | 
 | 467 | } |