blob: fe906ba1cf450ebdaa42ed1093acfa3fa5fde78a [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Intel SMP support routines.
3 *
4 * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
5 * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
6 * (c) 2002,2003 Andi Kleen, SuSE Labs.
7 *
8 * This code is released under the GNU General Public License version 2 or
9 * later.
10 */
11
12#include <linux/init.h>
13
14#include <linux/mm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/delay.h>
16#include <linux/spinlock.h>
17#include <linux/smp_lock.h>
18#include <linux/smp.h>
19#include <linux/kernel_stat.h>
20#include <linux/mc146818rtc.h>
21#include <linux/interrupt.h>
22
23#include <asm/mtrr.h>
24#include <asm/pgalloc.h>
25#include <asm/tlbflush.h>
26#include <asm/mach_apic.h>
27#include <asm/mmu_context.h>
28#include <asm/proto.h>
Andi Kleena8ab26f2005-04-16 15:25:19 -070029#include <asm/apicdef.h>
Andi Kleen95833c82006-01-11 22:44:36 +010030#include <asm/idle.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031
32/*
33 * Smarter SMP flushing macros.
34 * c/o Linus Torvalds.
35 *
36 * These mean you can really definitely utterly forget about
37 * writing to user space from interrupts. (Its not allowed anyway).
38 *
39 * Optimizations Manfred Spraul <manfred@colorfullife.com>
Andi Kleene5bc8b62005-09-12 18:49:24 +020040 *
41 * More scalable flush, from Andi Kleen
42 *
43 * To avoid global state use 8 different call vectors.
44 * Each CPU uses a specific vector to trigger flushes on other
45 * CPUs. Depending on the received vector the target CPUs look into
46 * the right per cpu variable for the flush data.
47 *
48 * With more than 8 CPUs they are hashed to the 8 available
49 * vectors. The limited global vector space forces us to this right now.
50 * In future when interrupts are split into per CPU domains this could be
51 * fixed, at the cost of triggering multiple IPIs in some cases.
Linus Torvalds1da177e2005-04-16 15:20:36 -070052 */
53
Andi Kleene5bc8b62005-09-12 18:49:24 +020054union smp_flush_state {
55 struct {
56 cpumask_t flush_cpumask;
57 struct mm_struct *flush_mm;
58 unsigned long flush_va;
Linus Torvalds1da177e2005-04-16 15:20:36 -070059#define FLUSH_ALL -1ULL
Andi Kleene5bc8b62005-09-12 18:49:24 +020060 spinlock_t tlbstate_lock;
61 };
62 char pad[SMP_CACHE_BYTES];
63} ____cacheline_aligned;
64
65/* State is put into the per CPU data section, but padded
66 to a full cache line because other CPUs can access it and we don't
67 want false sharing in the per cpu data segment. */
68static DEFINE_PER_CPU(union smp_flush_state, flush_state);
Linus Torvalds1da177e2005-04-16 15:20:36 -070069
70/*
71 * We cannot call mmdrop() because we are in interrupt context,
72 * instead update mm->cpu_vm_mask.
73 */
Andi Kleene5bc8b62005-09-12 18:49:24 +020074static inline void leave_mm(int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -070075{
76 if (read_pda(mmu_state) == TLBSTATE_OK)
77 BUG();
Brian Gerstb1fc5132006-03-25 16:31:13 +010078 cpu_clear(cpu, read_pda(active_mm)->cpu_vm_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -070079 load_cr3(swapper_pg_dir);
80}
81
82/*
83 *
84 * The flush IPI assumes that a thread switch happens in this order:
85 * [cpu0: the cpu that switches]
86 * 1) switch_mm() either 1a) or 1b)
87 * 1a) thread switch to a different mm
Brian Gerstb1fc5132006-03-25 16:31:13 +010088 * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -070089 * Stop ipi delivery for the old mm. This is not synchronized with
90 * the other cpus, but smp_invalidate_interrupt ignore flush ipis
91 * for the wrong mm, and in the worst case we perform a superfluous
92 * tlb flush.
93 * 1a2) set cpu mmu_state to TLBSTATE_OK
94 * Now the smp_invalidate_interrupt won't call leave_mm if cpu0
95 * was in lazy tlb mode.
96 * 1a3) update cpu active_mm
97 * Now cpu0 accepts tlb flushes for the new mm.
Brian Gerstb1fc5132006-03-25 16:31:13 +010098 * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -070099 * Now the other cpus will send tlb flush ipis.
100 * 1a4) change cr3.
101 * 1b) thread switch without mm change
102 * cpu active_mm is correct, cpu0 already handles
103 * flush ipis.
104 * 1b1) set cpu mmu_state to TLBSTATE_OK
105 * 1b2) test_and_set the cpu bit in cpu_vm_mask.
106 * Atomically set the bit [other cpus will start sending flush ipis],
107 * and test the bit.
108 * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
109 * 2) switch %%esp, ie current
110 *
111 * The interrupt must handle 2 special cases:
112 * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
113 * - the cpu performs speculative tlb reads, i.e. even if the cpu only
114 * runs in kernel space, the cpu could load tlb entries for user space
115 * pages.
116 *
117 * The good news is that cpu mmu_state is local to each cpu, no
118 * write/read ordering problems.
119 */
120
121/*
122 * TLB flush IPI:
123 *
124 * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
125 * 2) Leave the mm if we are in the lazy tlb mode.
Andi Kleene5bc8b62005-09-12 18:49:24 +0200126 *
127 * Interrupts are disabled.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128 */
129
Andi Kleene5bc8b62005-09-12 18:49:24 +0200130asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131{
Andi Kleene5bc8b62005-09-12 18:49:24 +0200132 int cpu;
133 int sender;
134 union smp_flush_state *f;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135
Andi Kleene5bc8b62005-09-12 18:49:24 +0200136 cpu = smp_processor_id();
137 /*
138 * orig_rax contains the interrupt vector - 256.
139 * Use that to determine where the sender put the data.
140 */
141 sender = regs->orig_rax + 256 - INVALIDATE_TLB_VECTOR_START;
142 f = &per_cpu(flush_state, sender);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143
Andi Kleene5bc8b62005-09-12 18:49:24 +0200144 if (!cpu_isset(cpu, f->flush_cpumask))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145 goto out;
146 /*
147 * This was a BUG() but until someone can quote me the
148 * line from the intel manual that guarantees an IPI to
149 * multiple CPUs is retried _only_ on the erroring CPUs
150 * its staying as a return
151 *
152 * BUG();
153 */
154
Andi Kleene5bc8b62005-09-12 18:49:24 +0200155 if (f->flush_mm == read_pda(active_mm)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156 if (read_pda(mmu_state) == TLBSTATE_OK) {
Andi Kleene5bc8b62005-09-12 18:49:24 +0200157 if (f->flush_va == FLUSH_ALL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158 local_flush_tlb();
159 else
Andi Kleene5bc8b62005-09-12 18:49:24 +0200160 __flush_tlb_one(f->flush_va);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161 } else
162 leave_mm(cpu);
163 }
Andi Kleen5df35742005-07-28 21:15:22 -0700164out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165 ack_APIC_irq();
Andi Kleene5bc8b62005-09-12 18:49:24 +0200166 cpu_clear(cpu, f->flush_cpumask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167}
168
169static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
170 unsigned long va)
171{
Andi Kleene5bc8b62005-09-12 18:49:24 +0200172 int sender;
173 union smp_flush_state *f;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174
Andi Kleene5bc8b62005-09-12 18:49:24 +0200175 /* Caller has disabled preemption */
176 sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS;
177 f = &per_cpu(flush_state, sender);
178
179 /* Could avoid this lock when
180 num_online_cpus() <= NUM_INVALIDATE_TLB_VECTORS, but it is
181 probably not worth checking this for a cache-hot lock. */
182 spin_lock(&f->tlbstate_lock);
183
184 f->flush_mm = mm;
185 f->flush_va = va;
186 cpus_or(f->flush_cpumask, cpumask, f->flush_cpumask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187
188 /*
189 * We have to send the IPI only to
190 * CPUs affected.
191 */
Andi Kleene5bc8b62005-09-12 18:49:24 +0200192 send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR_START + sender);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193
Andi Kleene5bc8b62005-09-12 18:49:24 +0200194 while (!cpus_empty(f->flush_cpumask))
195 cpu_relax();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196
Andi Kleene5bc8b62005-09-12 18:49:24 +0200197 f->flush_mm = NULL;
198 f->flush_va = 0;
199 spin_unlock(&f->tlbstate_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200}
Andi Kleene5bc8b62005-09-12 18:49:24 +0200201
202int __cpuinit init_smp_flush(void)
203{
204 int i;
205 for_each_cpu_mask(i, cpu_possible_map) {
206 spin_lock_init(&per_cpu(flush_state.tlbstate_lock, i));
207 }
208 return 0;
209}
210
211core_initcall(init_smp_flush);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212
213void flush_tlb_current_task(void)
214{
215 struct mm_struct *mm = current->mm;
216 cpumask_t cpu_mask;
217
218 preempt_disable();
219 cpu_mask = mm->cpu_vm_mask;
220 cpu_clear(smp_processor_id(), cpu_mask);
221
222 local_flush_tlb();
223 if (!cpus_empty(cpu_mask))
224 flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
225 preempt_enable();
226}
227
228void flush_tlb_mm (struct mm_struct * mm)
229{
230 cpumask_t cpu_mask;
231
232 preempt_disable();
233 cpu_mask = mm->cpu_vm_mask;
234 cpu_clear(smp_processor_id(), cpu_mask);
235
236 if (current->active_mm == mm) {
237 if (current->mm)
238 local_flush_tlb();
239 else
240 leave_mm(smp_processor_id());
241 }
242 if (!cpus_empty(cpu_mask))
243 flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
244
245 preempt_enable();
246}
247
248void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
249{
250 struct mm_struct *mm = vma->vm_mm;
251 cpumask_t cpu_mask;
252
253 preempt_disable();
254 cpu_mask = mm->cpu_vm_mask;
255 cpu_clear(smp_processor_id(), cpu_mask);
256
257 if (current->active_mm == mm) {
258 if(current->mm)
259 __flush_tlb_one(va);
260 else
261 leave_mm(smp_processor_id());
262 }
263
264 if (!cpus_empty(cpu_mask))
265 flush_tlb_others(cpu_mask, mm, va);
266
267 preempt_enable();
268}
269
270static void do_flush_tlb_all(void* info)
271{
272 unsigned long cpu = smp_processor_id();
273
274 __flush_tlb_all();
275 if (read_pda(mmu_state) == TLBSTATE_LAZY)
276 leave_mm(cpu);
277}
278
279void flush_tlb_all(void)
280{
281 on_each_cpu(do_flush_tlb_all, NULL, 1, 1);
282}
283
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284/*
285 * this function sends a 'reschedule' IPI to another CPU.
286 * it goes straight through and wastes no time serializing
287 * anything. Worst case is that we lose a reschedule ...
288 */
289
290void smp_send_reschedule(int cpu)
291{
292 send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
293}
294
295/*
296 * Structure and data for smp_call_function(). This is designed to minimise
297 * static memory requirements. It also looks cleaner.
298 */
299static DEFINE_SPINLOCK(call_lock);
300
301struct call_data_struct {
302 void (*func) (void *info);
303 void *info;
304 atomic_t started;
305 atomic_t finished;
306 int wait;
307};
308
309static struct call_data_struct * call_data;
310
Ashok Raj884d9e42005-06-25 14:55:02 -0700311void lock_ipi_call_lock(void)
312{
313 spin_lock_irq(&call_lock);
314}
315
316void unlock_ipi_call_lock(void)
317{
318 spin_unlock_irq(&call_lock);
319}
320
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321/*
Eric W. Biederman3d483f42005-07-29 14:03:29 -0700322 * this function sends a 'generic call function' IPI to one other CPU
323 * in the system.
Andi Kleenf1f4e832005-09-12 18:49:24 +0200324 *
325 * cpu is a standard Linux logical CPU number.
Eric W. Biederman3d483f42005-07-29 14:03:29 -0700326 */
Andi Kleenf1f4e832005-09-12 18:49:24 +0200327static void
328__smp_call_function_single(int cpu, void (*func) (void *info), void *info,
Eric W. Biederman3d483f42005-07-29 14:03:29 -0700329 int nonatomic, int wait)
330{
331 struct call_data_struct data;
332 int cpus = 1;
333
334 data.func = func;
335 data.info = info;
336 atomic_set(&data.started, 0);
337 data.wait = wait;
338 if (wait)
339 atomic_set(&data.finished, 0);
340
341 call_data = &data;
342 wmb();
343 /* Send a message to all other CPUs and wait for them to respond */
344 send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNCTION_VECTOR);
345
346 /* Wait for response */
347 while (atomic_read(&data.started) != cpus)
348 cpu_relax();
349
350 if (!wait)
351 return;
352
353 while (atomic_read(&data.finished) != cpus)
354 cpu_relax();
355}
356
357/*
358 * smp_call_function_single - Run a function on another CPU
359 * @func: The function to run. This must be fast and non-blocking.
360 * @info: An arbitrary pointer to pass to the function.
361 * @nonatomic: Currently unused.
362 * @wait: If true, wait until function has completed on other CPUs.
363 *
364 * Retrurns 0 on success, else a negative status code.
365 *
366 * Does not return until the remote CPU is nearly ready to execute <func>
367 * or is or has executed.
368 */
369
370int smp_call_function_single (int cpu, void (*func) (void *info), void *info,
371 int nonatomic, int wait)
372{
373 /* prevent preemption and reschedule on another processor */
374 int me = get_cpu();
375 if (cpu == me) {
376 WARN_ON(1);
377 put_cpu();
378 return -EBUSY;
379 }
380 spin_lock_bh(&call_lock);
381 __smp_call_function_single(cpu, func, info, nonatomic, wait);
382 spin_unlock_bh(&call_lock);
383 put_cpu();
384 return 0;
385}
386
387/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388 * this function sends a 'generic call function' IPI to all other CPUs
389 * in the system.
390 */
391static void __smp_call_function (void (*func) (void *info), void *info,
392 int nonatomic, int wait)
393{
394 struct call_data_struct data;
395 int cpus = num_online_cpus()-1;
396
397 if (!cpus)
398 return;
399
400 data.func = func;
401 data.info = info;
402 atomic_set(&data.started, 0);
403 data.wait = wait;
404 if (wait)
405 atomic_set(&data.finished, 0);
406
407 call_data = &data;
408 wmb();
409 /* Send a message to all other CPUs and wait for them to respond */
410 send_IPI_allbutself(CALL_FUNCTION_VECTOR);
411
412 /* Wait for response */
413 while (atomic_read(&data.started) != cpus)
414 cpu_relax();
415
416 if (!wait)
417 return;
418
419 while (atomic_read(&data.finished) != cpus)
420 cpu_relax();
421}
422
423/*
424 * smp_call_function - run a function on all other CPUs.
425 * @func: The function to run. This must be fast and non-blocking.
426 * @info: An arbitrary pointer to pass to the function.
427 * @nonatomic: currently unused.
428 * @wait: If true, wait (atomically) until function has completed on other
429 * CPUs.
430 *
431 * Returns 0 on success, else a negative status code. Does not return until
432 * remote CPUs are nearly ready to execute func or are or have executed.
433 *
434 * You must not call this function with disabled interrupts or from a
435 * hardware interrupt handler or from a bottom half handler.
436 * Actually there are a few legal cases, like panic.
437 */
438int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
439 int wait)
440{
441 spin_lock(&call_lock);
442 __smp_call_function(func,info,nonatomic,wait);
443 spin_unlock(&call_lock);
444 return 0;
445}
446
447void smp_stop_cpu(void)
448{
Andi Kleen35062292005-11-05 17:25:54 +0100449 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450 /*
451 * Remove this CPU:
452 */
453 cpu_clear(smp_processor_id(), cpu_online_map);
Andi Kleen35062292005-11-05 17:25:54 +0100454 local_irq_save(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455 disable_local_APIC();
Andi Kleen35062292005-11-05 17:25:54 +0100456 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457}
458
459static void smp_really_stop_cpu(void *dummy)
460{
461 smp_stop_cpu();
462 for (;;)
Jan Beulich46d13a32006-06-26 13:57:59 +0200463 halt();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464}
465
466void smp_send_stop(void)
467{
468 int nolock = 0;
469 if (reboot_force)
470 return;
471 /* Don't deadlock on the call lock in panic */
472 if (!spin_trylock(&call_lock)) {
473 /* ignore locking because we have paniced anyways */
474 nolock = 1;
475 }
476 __smp_call_function(smp_really_stop_cpu, NULL, 0, 0);
477 if (!nolock)
478 spin_unlock(&call_lock);
479
480 local_irq_disable();
481 disable_local_APIC();
482 local_irq_enable();
483}
484
485/*
486 * Reschedule call back. Nothing to do,
487 * all the work is done automatically when
488 * we return from the interrupt.
489 */
490asmlinkage void smp_reschedule_interrupt(void)
491{
492 ack_APIC_irq();
493}
494
495asmlinkage void smp_call_function_interrupt(void)
496{
497 void (*func) (void *info) = call_data->func;
498 void *info = call_data->info;
499 int wait = call_data->wait;
500
501 ack_APIC_irq();
502 /*
503 * Notify initiating CPU that I've grabbed the data and am
504 * about to execute the function
505 */
506 mb();
507 atomic_inc(&call_data->started);
508 /*
509 * At this point the info structure may be out of scope unless wait==1
510 */
Andi Kleen95833c82006-01-11 22:44:36 +0100511 exit_idle();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512 irq_enter();
513 (*func)(info);
514 irq_exit();
515 if (wait) {
516 mb();
517 atomic_inc(&call_data->finished);
518 }
519}
Andi Kleena8ab26f2005-04-16 15:25:19 -0700520
521int safe_smp_processor_id(void)
522{
523 int apicid, i;
524
525 if (disable_apic)
526 return 0;
527
528 apicid = hard_smp_processor_id();
529 if (x86_cpu_to_apicid[apicid] == apicid)
530 return apicid;
531
532 for (i = 0; i < NR_CPUS; ++i) {
533 if (x86_cpu_to_apicid[i] == apicid)
534 return i;
535 }
536
537 /* No entries in x86_cpu_to_apicid? Either no MPS|ACPI,
538 * or called too early. Either way, we must be CPU 0. */
539 if (x86_cpu_to_apicid[0] == BAD_APICID)
540 return 0;
541
542 return 0; /* Should not happen */
543}