blob: d908e75cc3b5b14aab8764fe701ef93c491cc8eb [file] [log] [blame]
Benjamin Herrenschmidtf048aac2008-12-18 19:13:38 +00001/*
2 * This file contains the routines for TLB flushing.
3 * On machines where the MMU does not use a hash table to store virtual to
4 * physical translations (ie, SW loaded TLBs or Book3E compilant processors,
5 * this does -not- include 603 however which shares the implementation with
6 * hash based processors)
7 *
8 * -- BenH
9 *
10 * Copyright 2008 Ben Herrenschmidt <benh@kernel.crashing.org>
11 * IBM Corp.
12 *
13 * Derived from arch/ppc/mm/init.c:
14 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
15 *
16 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
17 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
18 * Copyright (C) 1996 Paul Mackerras
19 *
20 * Derived from "arch/i386/mm/init.c"
21 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
22 *
23 * This program is free software; you can redistribute it and/or
24 * modify it under the terms of the GNU General Public License
25 * as published by the Free Software Foundation; either version
26 * 2 of the License, or (at your option) any later version.
27 *
28 */
29
30#include <linux/kernel.h>
31#include <linux/mm.h>
32#include <linux/init.h>
33#include <linux/highmem.h>
34#include <linux/pagemap.h>
35#include <linux/preempt.h>
36#include <linux/spinlock.h>
37
38#include <asm/tlbflush.h>
39#include <asm/tlb.h>
40
41#include "mmu_decl.h"
42
43/*
44 * Base TLB flushing operations:
45 *
46 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
47 * - flush_tlb_page(vma, vmaddr) flushes one page
48 * - flush_tlb_range(vma, start, end) flushes a range of pages
49 * - flush_tlb_kernel_range(start, end) flushes kernel pages
50 *
51 * - local_* variants of page and mm only apply to the current
52 * processor
53 */
54
55/*
56 * These are the base non-SMP variants of page and mm flushing
57 */
58void local_flush_tlb_mm(struct mm_struct *mm)
59{
60 unsigned int pid;
61
62 preempt_disable();
63 pid = mm->context.id;
64 if (pid != MMU_NO_CONTEXT)
65 _tlbil_pid(pid);
66 preempt_enable();
67}
68EXPORT_SYMBOL(local_flush_tlb_mm);
69
70void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
71{
72 unsigned int pid;
73
74 preempt_disable();
75 pid = vma ? vma->vm_mm->context.id : 0;
76 if (pid != MMU_NO_CONTEXT)
77 _tlbil_va(vmaddr, pid);
78 preempt_enable();
79}
80EXPORT_SYMBOL(local_flush_tlb_page);
81
82
83/*
84 * And here are the SMP non-local implementations
85 */
86#ifdef CONFIG_SMP
87
88static DEFINE_SPINLOCK(tlbivax_lock);
89
Benjamin Herrenschmidtfcce8102009-07-23 23:15:10 +000090static int mm_is_core_local(struct mm_struct *mm)
91{
92 return cpumask_subset(mm_cpumask(mm),
93 topology_thread_cpumask(smp_processor_id()));
94}
95
Benjamin Herrenschmidtf048aac2008-12-18 19:13:38 +000096struct tlb_flush_param {
97 unsigned long addr;
98 unsigned int pid;
99};
100
101static void do_flush_tlb_mm_ipi(void *param)
102{
103 struct tlb_flush_param *p = param;
104
105 _tlbil_pid(p ? p->pid : 0);
106}
107
108static void do_flush_tlb_page_ipi(void *param)
109{
110 struct tlb_flush_param *p = param;
111
112 _tlbil_va(p->addr, p->pid);
113}
114
115
116/* Note on invalidations and PID:
117 *
118 * We snapshot the PID with preempt disabled. At this point, it can still
119 * change either because:
120 * - our context is being stolen (PID -> NO_CONTEXT) on another CPU
121 * - we are invaliating some target that isn't currently running here
122 * and is concurrently acquiring a new PID on another CPU
123 * - some other CPU is re-acquiring a lost PID for this mm
124 * etc...
125 *
126 * However, this shouldn't be a problem as we only guarantee
127 * invalidation of TLB entries present prior to this call, so we
128 * don't care about the PID changing, and invalidating a stale PID
129 * is generally harmless.
130 */
131
132void flush_tlb_mm(struct mm_struct *mm)
133{
Benjamin Herrenschmidtf048aac2008-12-18 19:13:38 +0000134 unsigned int pid;
135
136 preempt_disable();
137 pid = mm->context.id;
138 if (unlikely(pid == MMU_NO_CONTEXT))
139 goto no_context;
Benjamin Herrenschmidtfcce8102009-07-23 23:15:10 +0000140 if (!mm_is_core_local(mm)) {
Benjamin Herrenschmidtf048aac2008-12-18 19:13:38 +0000141 struct tlb_flush_param p = { .pid = pid };
Rusty Russell56aa4122009-03-15 18:16:43 +0000142 /* Ignores smp_processor_id() even if set. */
143 smp_call_function_many(mm_cpumask(mm),
144 do_flush_tlb_mm_ipi, &p, 1);
Benjamin Herrenschmidtf048aac2008-12-18 19:13:38 +0000145 }
146 _tlbil_pid(pid);
147 no_context:
148 preempt_enable();
149}
150EXPORT_SYMBOL(flush_tlb_mm);
151
152void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
153{
Rusty Russell56aa4122009-03-15 18:16:43 +0000154 struct cpumask *cpu_mask;
Benjamin Herrenschmidtf048aac2008-12-18 19:13:38 +0000155 unsigned int pid;
156
157 preempt_disable();
158 pid = vma ? vma->vm_mm->context.id : 0;
159 if (unlikely(pid == MMU_NO_CONTEXT))
160 goto bail;
Rusty Russell56aa4122009-03-15 18:16:43 +0000161 cpu_mask = mm_cpumask(vma->vm_mm);
Benjamin Herrenschmidtfcce8102009-07-23 23:15:10 +0000162 if (!mm_is_core_local(mm)) {
Benjamin Herrenschmidtf048aac2008-12-18 19:13:38 +0000163 /* If broadcast tlbivax is supported, use it */
164 if (mmu_has_feature(MMU_FTR_USE_TLBIVAX_BCAST)) {
165 int lock = mmu_has_feature(MMU_FTR_LOCK_BCAST_INVAL);
166 if (lock)
167 spin_lock(&tlbivax_lock);
168 _tlbivax_bcast(vmaddr, pid);
169 if (lock)
170 spin_unlock(&tlbivax_lock);
171 goto bail;
172 } else {
173 struct tlb_flush_param p = { .pid = pid, .addr = vmaddr };
Rusty Russell56aa4122009-03-15 18:16:43 +0000174 /* Ignores smp_processor_id() even if set in cpu_mask */
175 smp_call_function_many(cpu_mask,
Benjamin Herrenschmidtf048aac2008-12-18 19:13:38 +0000176 do_flush_tlb_page_ipi, &p, 1);
177 }
178 }
179 _tlbil_va(vmaddr, pid);
180 bail:
181 preempt_enable();
182}
183EXPORT_SYMBOL(flush_tlb_page);
184
185#endif /* CONFIG_SMP */
186
187/*
188 * Flush kernel TLB entries in the given range
189 */
190void flush_tlb_kernel_range(unsigned long start, unsigned long end)
191{
192#ifdef CONFIG_SMP
193 preempt_disable();
194 smp_call_function(do_flush_tlb_mm_ipi, NULL, 1);
195 _tlbil_pid(0);
196 preempt_enable();
Dave Liud6a09e02008-12-30 23:42:55 +0000197#else
Benjamin Herrenschmidtf048aac2008-12-18 19:13:38 +0000198 _tlbil_pid(0);
Dave Liud6a09e02008-12-30 23:42:55 +0000199#endif
Benjamin Herrenschmidtf048aac2008-12-18 19:13:38 +0000200}
201EXPORT_SYMBOL(flush_tlb_kernel_range);
202
203/*
204 * Currently, for range flushing, we just do a full mm flush. This should
205 * be optimized based on a threshold on the size of the range, since
206 * some implementation can stack multiple tlbivax before a tlbsync but
207 * for now, we keep it that way
208 */
209void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
210 unsigned long end)
211
212{
213 flush_tlb_mm(vma->vm_mm);
214}
215EXPORT_SYMBOL(flush_tlb_range);