blob: 3b91c981a27fbcd275230b4f1329d33f10e47473 [file] [log] [blame]
Glauber Costac048fdf2008-03-03 14:12:54 -03001#include <linux/init.h>
2
3#include <linux/mm.h>
Glauber Costac048fdf2008-03-03 14:12:54 -03004#include <linux/spinlock.h>
5#include <linux/smp.h>
Glauber Costac048fdf2008-03-03 14:12:54 -03006#include <linux/interrupt.h>
Tejun Heo6dd01be2009-01-21 17:26:06 +09007#include <linux/module.h>
Shaohua Li93296722010-10-20 11:07:03 +08008#include <linux/cpu.h>
Glauber Costac048fdf2008-03-03 14:12:54 -03009
Glauber Costac048fdf2008-03-03 14:12:54 -030010#include <asm/tlbflush.h>
Glauber Costac048fdf2008-03-03 14:12:54 -030011#include <asm/mmu_context.h>
Jan Beulich350f8f52009-11-13 11:54:40 +000012#include <asm/cache.h>
Tejun Heo6dd01be2009-01-21 17:26:06 +090013#include <asm/apic.h>
Tejun Heobdbcdd42009-01-21 17:26:06 +090014#include <asm/uv/uv.h>
Glauber Costa5af55732008-03-25 13:28:56 -030015
Brian Gerst9eb912d2009-01-19 00:38:57 +090016DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate)
17 = { &init_mm, 0, };
18
Glauber Costac048fdf2008-03-03 14:12:54 -030019/*
20 * Smarter SMP flushing macros.
21 * c/o Linus Torvalds.
22 *
23 * These mean you can really definitely utterly forget about
24 * writing to user space from interrupts. (Its not allowed anyway).
25 *
26 * Optimizations Manfred Spraul <manfred@colorfullife.com>
27 *
28 * More scalable flush, from Andi Kleen
29 *
30 * To avoid global state use 8 different call vectors.
31 * Each CPU uses a specific vector to trigger flushes on other
32 * CPUs. Depending on the received vector the target CPUs look into
Frederik Deweerdt09b3ec72009-01-12 22:35:42 +010033 * the right array slot for the flush data.
Glauber Costac048fdf2008-03-03 14:12:54 -030034 *
35 * With more than 8 CPUs they are hashed to the 8 available
36 * vectors. The limited global vector space forces us to this right now.
37 * In future when interrupts are split into per CPU domains this could be
38 * fixed, at the cost of triggering multiple IPIs in some cases.
39 */
40
41union smp_flush_state {
42 struct {
Glauber Costac048fdf2008-03-03 14:12:54 -030043 struct mm_struct *flush_mm;
Alex Shie7b52ff2012-06-28 09:02:17 +080044 unsigned long flush_start;
45 unsigned long flush_end;
Thomas Gleixner39c662f2009-07-25 19:15:48 +020046 raw_spinlock_t tlbstate_lock;
Rusty Russell4595f962009-01-10 21:58:09 -080047 DECLARE_BITMAP(flush_cpumask, NR_CPUS);
Glauber Costac048fdf2008-03-03 14:12:54 -030048 };
Jan Beulich350f8f52009-11-13 11:54:40 +000049 char pad[INTERNODE_CACHE_BYTES];
Frederik Deweerdt09b3ec72009-01-12 22:35:42 +010050} ____cacheline_internodealigned_in_smp;
Glauber Costac048fdf2008-03-03 14:12:54 -030051
52/* State is put into the per CPU data section, but padded
53 to a full cache line because other CPUs can access it and we don't
54 want false sharing in the per cpu data segment. */
Frederik Deweerdt09b3ec72009-01-12 22:35:42 +010055static union smp_flush_state flush_state[NUM_INVALIDATE_TLB_VECTORS];
Glauber Costac048fdf2008-03-03 14:12:54 -030056
Shaohua Li93296722010-10-20 11:07:03 +080057static DEFINE_PER_CPU_READ_MOSTLY(int, tlb_vector_offset);
58
Glauber Costac048fdf2008-03-03 14:12:54 -030059/*
60 * We cannot call mmdrop() because we are in interrupt context,
61 * instead update mm->cpu_vm_mask.
62 */
63void leave_mm(int cpu)
64{
Linus Torvalds02171b42012-05-23 11:06:59 -070065 struct mm_struct *active_mm = this_cpu_read(cpu_tlbstate.active_mm);
Alex Shic6ae41e2012-05-11 15:35:27 +080066 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
Glauber Costac048fdf2008-03-03 14:12:54 -030067 BUG();
Suresh Siddhaa6fca402012-03-22 17:01:25 -070068 if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
69 cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
70 load_cr3(swapper_pg_dir);
71 }
Glauber Costac048fdf2008-03-03 14:12:54 -030072}
73EXPORT_SYMBOL_GPL(leave_mm);
74
75/*
76 *
77 * The flush IPI assumes that a thread switch happens in this order:
78 * [cpu0: the cpu that switches]
79 * 1) switch_mm() either 1a) or 1b)
80 * 1a) thread switch to a different mm
81 * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask);
82 * Stop ipi delivery for the old mm. This is not synchronized with
83 * the other cpus, but smp_invalidate_interrupt ignore flush ipis
84 * for the wrong mm, and in the worst case we perform a superfluous
85 * tlb flush.
86 * 1a2) set cpu mmu_state to TLBSTATE_OK
87 * Now the smp_invalidate_interrupt won't call leave_mm if cpu0
88 * was in lazy tlb mode.
89 * 1a3) update cpu active_mm
90 * Now cpu0 accepts tlb flushes for the new mm.
91 * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask);
92 * Now the other cpus will send tlb flush ipis.
93 * 1a4) change cr3.
94 * 1b) thread switch without mm change
95 * cpu active_mm is correct, cpu0 already handles
96 * flush ipis.
97 * 1b1) set cpu mmu_state to TLBSTATE_OK
98 * 1b2) test_and_set the cpu bit in cpu_vm_mask.
99 * Atomically set the bit [other cpus will start sending flush ipis],
100 * and test the bit.
101 * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
102 * 2) switch %%esp, ie current
103 *
104 * The interrupt must handle 2 special cases:
105 * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
106 * - the cpu performs speculative tlb reads, i.e. even if the cpu only
107 * runs in kernel space, the cpu could load tlb entries for user space
108 * pages.
109 *
110 * The good news is that cpu mmu_state is local to each cpu, no
111 * write/read ordering problems.
112 */
113
114/*
115 * TLB flush IPI:
116 *
117 * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
118 * 2) Leave the mm if we are in the lazy tlb mode.
119 *
120 * Interrupts are disabled.
121 */
122
Tejun Heo02cf94c2009-01-21 17:26:06 +0900123/*
124 * FIXME: use of asmlinkage is not consistent. On x86_64 it's noop
125 * but still used for documentation purpose but the usage is slightly
126 * inconsistent. On x86_32, asmlinkage is regparm(0) but interrupt
127 * entry calls in with the first parameter in %eax. Maybe define
128 * intrlinkage?
129 */
130#ifdef CONFIG_X86_64
131asmlinkage
132#endif
133void smp_invalidate_interrupt(struct pt_regs *regs)
Glauber Costac048fdf2008-03-03 14:12:54 -0300134{
Tejun Heo6dd01be2009-01-21 17:26:06 +0900135 unsigned int cpu;
136 unsigned int sender;
Glauber Costac048fdf2008-03-03 14:12:54 -0300137 union smp_flush_state *f;
138
139 cpu = smp_processor_id();
140 /*
141 * orig_rax contains the negated interrupt vector.
142 * Use that to determine where the sender put the data.
143 */
144 sender = ~regs->orig_ax - INVALIDATE_TLB_VECTOR_START;
Frederik Deweerdt09b3ec72009-01-12 22:35:42 +0100145 f = &flush_state[sender];
Glauber Costac048fdf2008-03-03 14:12:54 -0300146
Rusty Russell4595f962009-01-10 21:58:09 -0800147 if (!cpumask_test_cpu(cpu, to_cpumask(f->flush_cpumask)))
Glauber Costac048fdf2008-03-03 14:12:54 -0300148 goto out;
149 /*
150 * This was a BUG() but until someone can quote me the
151 * line from the intel manual that guarantees an IPI to
152 * multiple CPUs is retried _only_ on the erroring CPUs
153 * its staying as a return
154 *
155 * BUG();
156 */
157
Alex Shic6ae41e2012-05-11 15:35:27 +0800158 if (f->flush_mm == this_cpu_read(cpu_tlbstate.active_mm)) {
159 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) {
Alex Shie7b52ff2012-06-28 09:02:17 +0800160 if (f->flush_end == TLB_FLUSH_ALL
161 || !cpu_has_invlpg)
Glauber Costac048fdf2008-03-03 14:12:54 -0300162 local_flush_tlb();
Alex Shie7b52ff2012-06-28 09:02:17 +0800163 else if (!f->flush_end)
164 __flush_tlb_single(f->flush_start);
165 else {
166 unsigned long addr;
167 addr = f->flush_start;
168 while (addr < f->flush_end) {
169 __flush_tlb_single(addr);
170 addr += PAGE_SIZE;
171 }
172 }
Glauber Costac048fdf2008-03-03 14:12:54 -0300173 } else
174 leave_mm(cpu);
175 }
176out:
177 ack_APIC_irq();
Tejun Heo6dd01be2009-01-21 17:26:06 +0900178 smp_mb__before_clear_bit();
Rusty Russell4595f962009-01-10 21:58:09 -0800179 cpumask_clear_cpu(cpu, to_cpumask(f->flush_cpumask));
Tejun Heo6dd01be2009-01-21 17:26:06 +0900180 smp_mb__after_clear_bit();
Hiroshi Shimamoto8ae93662008-12-12 15:52:26 -0800181 inc_irq_stat(irq_tlb_count);
Glauber Costac048fdf2008-03-03 14:12:54 -0300182}
183
Rusty Russell4595f962009-01-10 21:58:09 -0800184static void flush_tlb_others_ipi(const struct cpumask *cpumask,
Alex Shie7b52ff2012-06-28 09:02:17 +0800185 struct mm_struct *mm, unsigned long start,
186 unsigned long end)
Glauber Costac048fdf2008-03-03 14:12:54 -0300187{
Tejun Heo6dd01be2009-01-21 17:26:06 +0900188 unsigned int sender;
Glauber Costac048fdf2008-03-03 14:12:54 -0300189 union smp_flush_state *f;
Cliff Wickman18129242008-06-02 08:56:14 -0500190
Glauber Costac048fdf2008-03-03 14:12:54 -0300191 /* Caller has disabled preemption */
Shaohua Li93296722010-10-20 11:07:03 +0800192 sender = this_cpu_read(tlb_vector_offset);
Frederik Deweerdt09b3ec72009-01-12 22:35:42 +0100193 f = &flush_state[sender];
Glauber Costac048fdf2008-03-03 14:12:54 -0300194
Shaohua Li7064d862011-01-17 10:52:10 +0800195 if (nr_cpu_ids > NUM_INVALIDATE_TLB_VECTORS)
196 raw_spin_lock(&f->tlbstate_lock);
Glauber Costac048fdf2008-03-03 14:12:54 -0300197
198 f->flush_mm = mm;
Alex Shie7b52ff2012-06-28 09:02:17 +0800199 f->flush_start = start;
200 f->flush_end = end;
Linus Torvaldsb04e6372009-08-21 09:48:10 -0700201 if (cpumask_andnot(to_cpumask(f->flush_cpumask), cpumask, cpumask_of(smp_processor_id()))) {
202 /*
203 * We have to send the IPI only to
204 * CPUs affected.
205 */
206 apic->send_IPI_mask(to_cpumask(f->flush_cpumask),
207 INVALIDATE_TLB_VECTOR_START + sender);
Glauber Costac048fdf2008-03-03 14:12:54 -0300208
Linus Torvaldsb04e6372009-08-21 09:48:10 -0700209 while (!cpumask_empty(to_cpumask(f->flush_cpumask)))
210 cpu_relax();
211 }
Glauber Costac048fdf2008-03-03 14:12:54 -0300212
213 f->flush_mm = NULL;
Alex Shie7b52ff2012-06-28 09:02:17 +0800214 f->flush_start = 0;
215 f->flush_end = 0;
Shaohua Li7064d862011-01-17 10:52:10 +0800216 if (nr_cpu_ids > NUM_INVALIDATE_TLB_VECTORS)
217 raw_spin_unlock(&f->tlbstate_lock);
Glauber Costac048fdf2008-03-03 14:12:54 -0300218}
219
Rusty Russell4595f962009-01-10 21:58:09 -0800220void native_flush_tlb_others(const struct cpumask *cpumask,
Alex Shie7b52ff2012-06-28 09:02:17 +0800221 struct mm_struct *mm, unsigned long start,
222 unsigned long end)
Rusty Russell4595f962009-01-10 21:58:09 -0800223{
224 if (is_uv_system()) {
Tejun Heobdbcdd42009-01-21 17:26:06 +0900225 unsigned int cpu;
Rusty Russell4595f962009-01-10 21:58:09 -0800226
Xiao Guangrong25542c62011-03-15 09:57:37 +0800227 cpu = smp_processor_id();
Alex Shie7b52ff2012-06-28 09:02:17 +0800228 cpumask = uv_flush_tlb_others(cpumask, mm, start, end, cpu);
Tejun Heobdbcdd42009-01-21 17:26:06 +0900229 if (cpumask)
Alex Shie7b52ff2012-06-28 09:02:17 +0800230 flush_tlb_others_ipi(cpumask, mm, start, end);
Mike Travis0e219902009-01-10 21:58:10 -0800231 return;
Rusty Russell4595f962009-01-10 21:58:09 -0800232 }
Alex Shie7b52ff2012-06-28 09:02:17 +0800233 flush_tlb_others_ipi(cpumask, mm, start, end);
Rusty Russell4595f962009-01-10 21:58:09 -0800234}
235
Shaohua Li93296722010-10-20 11:07:03 +0800236static void __cpuinit calculate_tlb_offset(void)
237{
Yinghai Lu92230812010-11-13 10:52:09 -0800238 int cpu, node, nr_node_vecs, idx = 0;
Shaohua Li93296722010-10-20 11:07:03 +0800239 /*
240 * we are changing tlb_vector_offset for each CPU in runtime, but this
241 * will not cause inconsistency, as the write is atomic under X86. we
242 * might see more lock contentions in a short time, but after all CPU's
243 * tlb_vector_offset are changed, everything should go normal
244 *
245 * Note: if NUM_INVALIDATE_TLB_VECTORS % nr_online_nodes !=0, we might
246 * waste some vectors.
247 **/
248 if (nr_online_nodes > NUM_INVALIDATE_TLB_VECTORS)
249 nr_node_vecs = 1;
250 else
251 nr_node_vecs = NUM_INVALIDATE_TLB_VECTORS/nr_online_nodes;
252
253 for_each_online_node(node) {
Yinghai Lu92230812010-11-13 10:52:09 -0800254 int node_offset = (idx % NUM_INVALIDATE_TLB_VECTORS) *
Shaohua Li93296722010-10-20 11:07:03 +0800255 nr_node_vecs;
256 int cpu_offset = 0;
257 for_each_cpu(cpu, cpumask_of_node(node)) {
258 per_cpu(tlb_vector_offset, cpu) = node_offset +
259 cpu_offset;
260 cpu_offset++;
261 cpu_offset = cpu_offset % nr_node_vecs;
262 }
Yinghai Lu92230812010-11-13 10:52:09 -0800263 idx++;
Shaohua Li93296722010-10-20 11:07:03 +0800264 }
265}
266
Rakib Mullickcf38d0b2010-11-01 12:53:50 +0600267static int __cpuinit tlb_cpuhp_notify(struct notifier_block *n,
Shaohua Li93296722010-10-20 11:07:03 +0800268 unsigned long action, void *hcpu)
269{
270 switch (action & 0xf) {
271 case CPU_ONLINE:
272 case CPU_DEAD:
273 calculate_tlb_offset();
274 }
275 return NOTIFY_OK;
276}
277
Ingo Molnara4928cf2008-04-23 13:20:56 +0200278static int __cpuinit init_smp_flush(void)
Glauber Costac048fdf2008-03-03 14:12:54 -0300279{
280 int i;
281
Frederik Deweerdt09b3ec72009-01-12 22:35:42 +0100282 for (i = 0; i < ARRAY_SIZE(flush_state); i++)
Thomas Gleixner39c662f2009-07-25 19:15:48 +0200283 raw_spin_lock_init(&flush_state[i].tlbstate_lock);
Akinobu Mita7c04e642008-04-19 23:55:17 +0900284
Shaohua Li93296722010-10-20 11:07:03 +0800285 calculate_tlb_offset();
286 hotcpu_notifier(tlb_cpuhp_notify, 0);
Glauber Costac048fdf2008-03-03 14:12:54 -0300287 return 0;
288}
289core_initcall(init_smp_flush);
290
291void flush_tlb_current_task(void)
292{
293 struct mm_struct *mm = current->mm;
Glauber Costac048fdf2008-03-03 14:12:54 -0300294
295 preempt_disable();
Glauber Costac048fdf2008-03-03 14:12:54 -0300296
297 local_flush_tlb();
Rusty Russell78f1c4d2009-09-24 09:34:51 -0600298 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
Alex Shie7b52ff2012-06-28 09:02:17 +0800299 flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
Glauber Costac048fdf2008-03-03 14:12:54 -0300300 preempt_enable();
301}
302
303void flush_tlb_mm(struct mm_struct *mm)
304{
Glauber Costac048fdf2008-03-03 14:12:54 -0300305 preempt_disable();
Glauber Costac048fdf2008-03-03 14:12:54 -0300306
307 if (current->active_mm == mm) {
308 if (current->mm)
309 local_flush_tlb();
310 else
311 leave_mm(smp_processor_id());
312 }
Rusty Russell78f1c4d2009-09-24 09:34:51 -0600313 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
Alex Shie7b52ff2012-06-28 09:02:17 +0800314 flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
Glauber Costac048fdf2008-03-03 14:12:54 -0300315
316 preempt_enable();
317}
318
Alex Shie7b52ff2012-06-28 09:02:17 +0800319#define FLUSHALL_BAR 16
320
321void flush_tlb_range(struct vm_area_struct *vma,
322 unsigned long start, unsigned long end)
323{
324 struct mm_struct *mm;
325
326 if (!cpu_has_invlpg || vma->vm_flags & VM_HUGETLB) {
327 flush_tlb_mm(vma->vm_mm);
328 return;
329 }
330
331 preempt_disable();
332 mm = vma->vm_mm;
333 if (current->active_mm == mm) {
334 if (current->mm) {
335 unsigned long addr, vmflag = vma->vm_flags;
336 unsigned act_entries, tlb_entries = 0;
337
338 if (vmflag & VM_EXEC)
339 tlb_entries = tlb_lli_4k[ENTRIES];
340 else
341 tlb_entries = tlb_lld_4k[ENTRIES];
342
343 act_entries = tlb_entries > mm->total_vm ?
344 mm->total_vm : tlb_entries;
345
346 if ((end - start)/PAGE_SIZE > act_entries/FLUSHALL_BAR)
347 local_flush_tlb();
348 else {
349 for (addr = start; addr < end;
350 addr += PAGE_SIZE)
351 __flush_tlb_single(addr);
352
353 if (cpumask_any_but(mm_cpumask(mm),
354 smp_processor_id()) < nr_cpu_ids)
355 flush_tlb_others(mm_cpumask(mm), mm,
356 start, end);
357 preempt_enable();
358 return;
359 }
360 } else {
361 leave_mm(smp_processor_id());
362 }
363 }
364 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
365 flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
366 preempt_enable();
367}
368
369
370void flush_tlb_page(struct vm_area_struct *vma, unsigned long start)
Glauber Costac048fdf2008-03-03 14:12:54 -0300371{
372 struct mm_struct *mm = vma->vm_mm;
Glauber Costac048fdf2008-03-03 14:12:54 -0300373
374 preempt_disable();
Glauber Costac048fdf2008-03-03 14:12:54 -0300375
376 if (current->active_mm == mm) {
377 if (current->mm)
Alex Shie7b52ff2012-06-28 09:02:17 +0800378 __flush_tlb_one(start);
Glauber Costac048fdf2008-03-03 14:12:54 -0300379 else
380 leave_mm(smp_processor_id());
381 }
382
Rusty Russell78f1c4d2009-09-24 09:34:51 -0600383 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
Alex Shie7b52ff2012-06-28 09:02:17 +0800384 flush_tlb_others(mm_cpumask(mm), mm, start, 0UL);
Glauber Costac048fdf2008-03-03 14:12:54 -0300385
386 preempt_enable();
387}
388
389static void do_flush_tlb_all(void *info)
390{
Glauber Costac048fdf2008-03-03 14:12:54 -0300391 __flush_tlb_all();
Alex Shic6ae41e2012-05-11 15:35:27 +0800392 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY)
Borislav Petkov3f8afb72010-07-21 14:47:05 +0200393 leave_mm(smp_processor_id());
Glauber Costac048fdf2008-03-03 14:12:54 -0300394}
395
396void flush_tlb_all(void)
397{
Jens Axboe15c8b6c2008-05-09 09:39:44 +0200398 on_each_cpu(do_flush_tlb_all, NULL, 1);
Glauber Costac048fdf2008-03-03 14:12:54 -0300399}