blob: 335be9bcf0dc8b11effd1c78689294ed946af2a9 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version 2
5 * of the License, or (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
15 *
16 * Copyright (C) 2000, 2001 Kanoj Sarcar
17 * Copyright (C) 2000, 2001 Ralf Baechle
18 * Copyright (C) 2000, 2001 Silicon Graphics, Inc.
19 * Copyright (C) 2000, 2001, 2003 Broadcom Corporation
20 */
21#include <linux/cache.h>
22#include <linux/delay.h>
23#include <linux/init.h>
24#include <linux/interrupt.h>
25#include <linux/spinlock.h>
26#include <linux/threads.h>
27#include <linux/module.h>
28#include <linux/time.h>
29#include <linux/timex.h>
30#include <linux/sched.h>
31#include <linux/cpumask.h>
Rojhalat Ibrahim1e35aab2006-02-20 13:35:27 +000032#include <linux/cpu.h>
Alexey Dobriyan4e950f62007-07-30 02:36:13 +040033#include <linux/err.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <asm/atomic.h>
36#include <asm/cpu.h>
37#include <asm/processor.h>
38#include <asm/system.h>
39#include <asm/mmu_context.h>
40#include <asm/smp.h>
Ralf Baechle7bcf7712007-10-11 23:46:09 +010041#include <asm/time.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070042
Ralf Baechle41c594a2006-04-05 09:45:45 +010043#ifdef CONFIG_MIPS_MT_SMTC
44#include <asm/mipsmtregs.h>
45#endif /* CONFIG_MIPS_MT_SMTC */
46
Linus Torvalds1da177e2005-04-16 15:20:36 -070047cpumask_t phys_cpu_present_map; /* Bitmask of available CPUs */
48volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */
49cpumask_t cpu_online_map; /* Bitmask of currently online CPUs */
50int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
51int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
52
53EXPORT_SYMBOL(phys_cpu_present_map);
54EXPORT_SYMBOL(cpu_online_map);
55
Linus Torvalds1da177e2005-04-16 15:20:36 -070056extern void __init calibrate_delay(void);
Robert P. J. Dayb3f6df92007-05-25 14:32:28 -040057extern void cpu_idle(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -070058
Ralf Baechle0ab7aef2007-03-02 20:42:04 +000059/* Number of TCs (or siblings in Intel speak) per CPU core */
60int smp_num_siblings = 1;
61EXPORT_SYMBOL(smp_num_siblings);
62
63/* representing the TCs (or siblings in Intel speak) of each logical CPU */
64cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
65EXPORT_SYMBOL(cpu_sibling_map);
66
67/* representing cpus for which sibling maps can be computed */
68static cpumask_t cpu_sibling_setup_map;
69
70static inline void set_cpu_sibling_map(int cpu)
71{
72 int i;
73
74 cpu_set(cpu, cpu_sibling_setup_map);
75
76 if (smp_num_siblings > 1) {
77 for_each_cpu_mask(i, cpu_sibling_setup_map) {
78 if (cpu_data[cpu].core == cpu_data[i].core) {
79 cpu_set(i, cpu_sibling_map[cpu]);
80 cpu_set(cpu, cpu_sibling_map[i]);
81 }
82 }
83 } else
84 cpu_set(cpu, cpu_sibling_map[cpu]);
85}
86
Linus Torvalds1da177e2005-04-16 15:20:36 -070087/*
88 * First C code run on the secondary CPUs after being started up by
89 * the master.
90 */
Ralf Baechle4ebd5232007-05-31 16:15:01 +010091asmlinkage __cpuinit void start_secondary(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -070092{
Nick Piggin5bfb5d62005-11-08 21:39:01 -080093 unsigned int cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -070094
Ralf Baechle41c594a2006-04-05 09:45:45 +010095#ifdef CONFIG_MIPS_MT_SMTC
96 /* Only do cpu_probe for first TC of CPU */
97 if ((read_c0_tcbind() & TCBIND_CURTC) == 0)
98#endif /* CONFIG_MIPS_MT_SMTC */
Linus Torvalds1da177e2005-04-16 15:20:36 -070099 cpu_probe();
100 cpu_report();
101 per_cpu_trap_init();
Ralf Baechle7bcf7712007-10-11 23:46:09 +0100102 mips_clockevent_init();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103 prom_init_secondary();
104
105 /*
106 * XXX parity protection should be folded in here when it's converted
107 * to an option instead of something based on .cputype
108 */
109
110 calibrate_delay();
Nick Piggin5bfb5d62005-11-08 21:39:01 -0800111 preempt_disable();
112 cpu = smp_processor_id();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113 cpu_data[cpu].udelay_val = loops_per_jiffy;
114
115 prom_smp_finish();
Ralf Baechle0ab7aef2007-03-02 20:42:04 +0000116 set_cpu_sibling_map(cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117
118 cpu_set(cpu, cpu_callin_map);
119
120 cpu_idle();
121}
122
123DEFINE_SPINLOCK(smp_call_lock);
124
125struct call_data_struct *call_data;
126
127/*
128 * Run a function on all other CPUs.
Ralf Baechlebd6aeef2007-10-03 01:24:16 +0100129 *
130 * <mask> cpuset_t of all processors to run the function on.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131 * <func> The function to run. This must be fast and non-blocking.
132 * <info> An arbitrary pointer to pass to the function.
133 * <retry> If true, keep retrying until ready.
134 * <wait> If true, wait until function has completed on other CPUs.
135 * [RETURNS] 0 on success, else a negative status code.
136 *
137 * Does not return until remote CPUs are nearly ready to execute <func>
138 * or are or have executed.
139 *
140 * You must not call this function with disabled interrupts or from a
Ralf Baechle57f00602005-02-10 12:00:06 +0000141 * hardware interrupt handler or from a bottom half handler:
142 *
143 * CPU A CPU B
144 * Disable interrupts
145 * smp_call_function()
146 * Take call_lock
147 * Send IPIs
148 * Wait for all cpus to acknowledge IPI
149 * CPU A has not responded, spin waiting
150 * for cpu A to respond, holding call_lock
151 * smp_call_function()
152 * Spin waiting for call_lock
153 * Deadlock Deadlock
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154 */
Ralf Baechlebd6aeef2007-10-03 01:24:16 +0100155int smp_call_function_mask(cpumask_t mask, void (*func) (void *info),
156 void *info, int retry, int wait)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157{
158 struct call_data_struct data;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159 int cpu = smp_processor_id();
Ralf Baechlebd6aeef2007-10-03 01:24:16 +0100160 int cpus;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161
Ralf Baechleae1b3d52005-07-15 15:44:02 +0000162 /*
163 * Can die spectacularly if this CPU isn't yet marked online
164 */
165 BUG_ON(!cpu_online(cpu));
166
Ralf Baechlebd6aeef2007-10-03 01:24:16 +0100167 cpu_clear(cpu, mask);
168 cpus = cpus_weight(mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169 if (!cpus)
170 return 0;
171
172 /* Can deadlock when called with interrupts disabled */
173 WARN_ON(irqs_disabled());
174
175 data.func = func;
176 data.info = info;
177 atomic_set(&data.started, 0);
178 data.wait = wait;
179 if (wait)
180 atomic_set(&data.finished, 0);
181
182 spin_lock(&smp_call_lock);
183 call_data = &data;
Ralf Baechle0004a9d2006-10-31 03:45:07 +0000184 smp_mb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185
186 /* Send a message to all other CPUs and wait for them to respond */
Ralf Baechlebd6aeef2007-10-03 01:24:16 +0100187 core_send_ipi_mask(mask, SMP_CALL_FUNCTION);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188
189 /* Wait for response */
190 /* FIXME: lock-up detection, backtrace on lock-up */
191 while (atomic_read(&data.started) != cpus)
192 barrier();
193
194 if (wait)
195 while (atomic_read(&data.finished) != cpus)
196 barrier();
Ralf Baechle41c594a2006-04-05 09:45:45 +0100197 call_data = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198 spin_unlock(&smp_call_lock);
199
200 return 0;
201}
202
Ralf Baechlebd6aeef2007-10-03 01:24:16 +0100203int smp_call_function(void (*func) (void *info), void *info, int retry,
204 int wait)
205{
206 return smp_call_function_mask(cpu_online_map, func, info, retry, wait);
207}
Ralf Baechle41c594a2006-04-05 09:45:45 +0100208
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209void smp_call_function_interrupt(void)
210{
211 void (*func) (void *info) = call_data->func;
212 void *info = call_data->info;
213 int wait = call_data->wait;
214
215 /*
216 * Notify initiating CPU that I've grabbed the data and am
217 * about to execute the function.
218 */
Ralf Baechle0004a9d2006-10-31 03:45:07 +0000219 smp_mb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220 atomic_inc(&call_data->started);
221
222 /*
223 * At this point the info structure may be out of scope unless wait==1.
224 */
225 irq_enter();
226 (*func)(info);
227 irq_exit();
228
229 if (wait) {
Ralf Baechle0004a9d2006-10-31 03:45:07 +0000230 smp_mb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231 atomic_inc(&call_data->finished);
232 }
233}
234
Peter Watkinsb4b29172007-07-30 18:01:29 -0400235int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
236 int retry, int wait)
237{
Ralf Baechlebd6aeef2007-10-03 01:24:16 +0100238 int ret, me;
Peter Watkinsb4b29172007-07-30 18:01:29 -0400239
240 /*
241 * Can die spectacularly if this CPU isn't yet marked online
242 */
243 if (!cpu_online(cpu))
244 return 0;
245
246 me = get_cpu();
247 BUG_ON(!cpu_online(me));
248
249 if (cpu == me) {
250 local_irq_disable();
251 func(info);
252 local_irq_enable();
253 put_cpu();
254 return 0;
255 }
256
Ralf Baechlebd6aeef2007-10-03 01:24:16 +0100257 ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, retry,
258 wait);
Peter Watkinsb4b29172007-07-30 18:01:29 -0400259
260 put_cpu();
261 return 0;
262}
263
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264static void stop_this_cpu(void *dummy)
265{
266 /*
267 * Remove this CPU:
268 */
269 cpu_clear(smp_processor_id(), cpu_online_map);
270 local_irq_enable(); /* May need to service _machine_restart IPI */
271 for (;;); /* Wait if available. */
272}
273
274void smp_send_stop(void)
275{
276 smp_call_function(stop_this_cpu, NULL, 1, 0);
277}
278
279void __init smp_cpus_done(unsigned int max_cpus)
280{
281 prom_cpus_done();
282}
283
284/* called from main before smp_init() */
285void __init smp_prepare_cpus(unsigned int max_cpus)
286{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287 init_new_context(current, &init_mm);
288 current_thread_info()->cpu = 0;
Ralf Baechle9b6695a2006-02-23 12:23:27 +0000289 plat_prepare_cpus(max_cpus);
Ralf Baechle0ab7aef2007-03-02 20:42:04 +0000290 set_cpu_sibling_map(0);
Ralf Baechle320e6ab2006-05-22 14:24:04 +0100291#ifndef CONFIG_HOTPLUG_CPU
292 cpu_present_map = cpu_possible_map;
293#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294}
295
296/* preload SMP state for boot cpu */
297void __devinit smp_prepare_boot_cpu(void)
298{
299 /*
300 * This assumes that bootup is always handled by the processor
301 * with the logic and physical number 0.
302 */
303 __cpu_number_map[0] = 0;
304 __cpu_logical_map[0] = 0;
305 cpu_set(0, phys_cpu_present_map);
306 cpu_set(0, cpu_online_map);
307 cpu_set(0, cpu_callin_map);
308}
309
310/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311 * Called once for each "cpu_possible(cpu)". Needs to spin up the cpu
312 * and keep control until "cpu_online(cpu)" is set. Note: cpu is
313 * physical, not logical.
314 */
Gautham R Shenoyb282b6f2007-01-10 23:15:34 -0800315int __cpuinit __cpu_up(unsigned int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316{
Ralf Baechleb727a602005-02-22 21:18:01 +0000317 struct task_struct *idle;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318
Ralf Baechleb727a602005-02-22 21:18:01 +0000319 /*
320 * Processor goes to start_secondary(), sets online flag
321 * The following code is purely to make sure
322 * Linux can schedule processes on this slave.
323 */
324 idle = fork_idle(cpu);
325 if (IS_ERR(idle))
326 panic(KERN_ERR "Fork failed for CPU %d", cpu);
327
328 prom_boot_secondary(cpu, idle);
329
330 /*
331 * Trust is futile. We should really have timeouts ...
332 */
333 while (!cpu_isset(cpu, cpu_callin_map))
334 udelay(100);
335
336 cpu_set(cpu, cpu_online_map);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337
338 return 0;
339}
340
341/* Not really SMP stuff ... */
342int setup_profiling_timer(unsigned int multiplier)
343{
344 return 0;
345}
346
347static void flush_tlb_all_ipi(void *info)
348{
349 local_flush_tlb_all();
350}
351
352void flush_tlb_all(void)
353{
Ralf Baechle9a244b92006-10-11 19:30:03 +0100354 on_each_cpu(flush_tlb_all_ipi, NULL, 1, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355}
356
357static void flush_tlb_mm_ipi(void *mm)
358{
359 local_flush_tlb_mm((struct mm_struct *)mm);
360}
361
362/*
Ralf Baechle25969352006-06-22 22:42:32 +0100363 * Special Variant of smp_call_function for use by TLB functions:
364 *
365 * o No return value
366 * o collapses to normal function call on UP kernels
367 * o collapses to normal function call on systems with a single shared
368 * primary cache.
369 * o CONFIG_MIPS_MT_SMTC currently implies there is only one physical core.
370 */
371static inline void smp_on_other_tlbs(void (*func) (void *info), void *info)
372{
373#ifndef CONFIG_MIPS_MT_SMTC
374 smp_call_function(func, info, 1, 1);
375#endif
376}
377
378static inline void smp_on_each_tlb(void (*func) (void *info), void *info)
379{
380 preempt_disable();
381
382 smp_on_other_tlbs(func, info);
383 func(info);
384
385 preempt_enable();
386}
387
388/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389 * The following tlb flush calls are invoked when old translations are
390 * being torn down, or pte attributes are changing. For single threaded
391 * address spaces, a new context is obtained on the current cpu, and tlb
392 * context on other cpus are invalidated to force a new context allocation
393 * at switch_mm time, should the mm ever be used on other cpus. For
394 * multithreaded address spaces, intercpu interrupts have to be sent.
395 * Another case where intercpu interrupts are required is when the target
396 * mm might be active on another cpu (eg debuggers doing the flushes on
397 * behalf of debugees, kswapd stealing pages from another process etc).
398 * Kanoj 07/00.
399 */
400
401void flush_tlb_mm(struct mm_struct *mm)
402{
403 preempt_disable();
404
405 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
Ralf Baechlec50cade2007-10-04 16:57:08 +0100406 smp_on_other_tlbs(flush_tlb_mm_ipi, mm);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407 } else {
Ralf Baechleb5eb5512007-10-03 19:16:57 +0100408 cpumask_t mask = cpu_online_map;
409 unsigned int cpu;
410
411 cpu_clear(smp_processor_id(), mask);
Ralf Baechleece8a9e2007-10-12 15:03:38 +0100412 for_each_cpu_mask(cpu, mask)
Ralf Baechleb5eb5512007-10-03 19:16:57 +0100413 if (cpu_context(cpu, mm))
414 cpu_context(cpu, mm) = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415 }
416 local_flush_tlb_mm(mm);
417
418 preempt_enable();
419}
420
421struct flush_tlb_data {
422 struct vm_area_struct *vma;
423 unsigned long addr1;
424 unsigned long addr2;
425};
426
427static void flush_tlb_range_ipi(void *info)
428{
Ralf Baechlec50cade2007-10-04 16:57:08 +0100429 struct flush_tlb_data *fd = info;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430
431 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
432}
433
434void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
435{
436 struct mm_struct *mm = vma->vm_mm;
437
438 preempt_disable();
439 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
Ralf Baechle89a8a5a2007-10-04 18:18:52 +0100440 struct flush_tlb_data fd = {
441 .vma = vma,
442 .addr1 = start,
443 .addr2 = end,
444 };
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445
Ralf Baechlec50cade2007-10-04 16:57:08 +0100446 smp_on_other_tlbs(flush_tlb_range_ipi, &fd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447 } else {
Ralf Baechleb5eb5512007-10-03 19:16:57 +0100448 cpumask_t mask = cpu_online_map;
449 unsigned int cpu;
450
451 cpu_clear(smp_processor_id(), mask);
Ralf Baechleece8a9e2007-10-12 15:03:38 +0100452 for_each_cpu_mask(cpu, mask)
Ralf Baechleb5eb5512007-10-03 19:16:57 +0100453 if (cpu_context(cpu, mm))
454 cpu_context(cpu, mm) = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455 }
456 local_flush_tlb_range(vma, start, end);
457 preempt_enable();
458}
459
460static void flush_tlb_kernel_range_ipi(void *info)
461{
Ralf Baechlec50cade2007-10-04 16:57:08 +0100462 struct flush_tlb_data *fd = info;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463
464 local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
465}
466
467void flush_tlb_kernel_range(unsigned long start, unsigned long end)
468{
Ralf Baechle89a8a5a2007-10-04 18:18:52 +0100469 struct flush_tlb_data fd = {
470 .addr1 = start,
471 .addr2 = end,
472 };
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473
Ralf Baechlec50cade2007-10-04 16:57:08 +0100474 on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475}
476
477static void flush_tlb_page_ipi(void *info)
478{
Ralf Baechlec50cade2007-10-04 16:57:08 +0100479 struct flush_tlb_data *fd = info;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480
481 local_flush_tlb_page(fd->vma, fd->addr1);
482}
483
484void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
485{
486 preempt_disable();
487 if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) {
Ralf Baechle89a8a5a2007-10-04 18:18:52 +0100488 struct flush_tlb_data fd = {
489 .vma = vma,
490 .addr1 = page,
491 };
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492
Ralf Baechlec50cade2007-10-04 16:57:08 +0100493 smp_on_other_tlbs(flush_tlb_page_ipi, &fd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494 } else {
Ralf Baechleb5eb5512007-10-03 19:16:57 +0100495 cpumask_t mask = cpu_online_map;
496 unsigned int cpu;
497
498 cpu_clear(smp_processor_id(), mask);
Ralf Baechleece8a9e2007-10-12 15:03:38 +0100499 for_each_cpu_mask(cpu, mask)
Ralf Baechleb5eb5512007-10-03 19:16:57 +0100500 if (cpu_context(cpu, vma->vm_mm))
501 cpu_context(cpu, vma->vm_mm) = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502 }
503 local_flush_tlb_page(vma, page);
504 preempt_enable();
505}
506
507static void flush_tlb_one_ipi(void *info)
508{
509 unsigned long vaddr = (unsigned long) info;
510
511 local_flush_tlb_one(vaddr);
512}
513
514void flush_tlb_one(unsigned long vaddr)
515{
Ralf Baechle25969352006-06-22 22:42:32 +0100516 smp_on_each_tlb(flush_tlb_one_ipi, (void *) vaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517}
518
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519EXPORT_SYMBOL(flush_tlb_page);
520EXPORT_SYMBOL(flush_tlb_one);