| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * This file contains the routines for flushing entries from the | 
|  | 3 | * TLB and MMU hash table. | 
|  | 4 | * | 
|  | 5 | *  Derived from arch/ppc64/mm/init.c: | 
|  | 6 | *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | 
|  | 7 | * | 
|  | 8 | *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) | 
|  | 9 | *  and Cort Dougan (PReP) (cort@cs.nmt.edu) | 
|  | 10 | *    Copyright (C) 1996 Paul Mackerras | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | * | 
|  | 12 | *  Derived from "arch/i386/mm/init.c" | 
|  | 13 | *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds | 
|  | 14 | * | 
|  | 15 | *  Dave Engebretsen <engebret@us.ibm.com> | 
|  | 16 | *      Rework for PPC64 port. | 
|  | 17 | * | 
|  | 18 | *  This program is free software; you can redistribute it and/or | 
|  | 19 | *  modify it under the terms of the GNU General Public License | 
|  | 20 | *  as published by the Free Software Foundation; either version | 
|  | 21 | *  2 of the License, or (at your option) any later version. | 
|  | 22 | */ | 
| Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 23 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 24 | #include <linux/kernel.h> | 
|  | 25 | #include <linux/mm.h> | 
|  | 26 | #include <linux/init.h> | 
|  | 27 | #include <linux/percpu.h> | 
|  | 28 | #include <linux/hardirq.h> | 
|  | 29 | #include <asm/pgalloc.h> | 
|  | 30 | #include <asm/tlbflush.h> | 
|  | 31 | #include <asm/tlb.h> | 
| Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 32 | #include <asm/bug.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 |  | 
|  | 34 | DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch); | 
|  | 35 |  | 
|  | 36 | /* This is declared as we are using the more or less generic | 
| Jon Mason | 2ef9481 | 2006-01-23 10:58:20 -0600 | [diff] [blame] | 37 | * include/asm-powerpc/tlb.h file -- tgall | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 | */ | 
|  | 39 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | 
|  | 40 | DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur); | 
|  | 41 | unsigned long pte_freelist_forced_free; | 
|  | 42 |  | 
| David Gibson | e28f7fa | 2005-08-05 19:39:06 +1000 | [diff] [blame] | 43 | struct pte_freelist_batch | 
|  | 44 | { | 
|  | 45 | struct rcu_head	rcu; | 
|  | 46 | unsigned int	index; | 
|  | 47 | pgtable_free_t	tables[0]; | 
|  | 48 | }; | 
|  | 49 |  | 
|  | 50 | DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur); | 
|  | 51 | unsigned long pte_freelist_forced_free; | 
|  | 52 |  | 
|  | 53 | #define PTE_FREELIST_SIZE \ | 
|  | 54 | ((PAGE_SIZE - sizeof(struct pte_freelist_batch)) \ | 
|  | 55 | / sizeof(pgtable_free_t)) | 
|  | 56 |  | 
| David Gibson | e28f7fa | 2005-08-05 19:39:06 +1000 | [diff] [blame] | 57 | static void pte_free_smp_sync(void *arg) | 
|  | 58 | { | 
|  | 59 | /* Do nothing, just ensure we sync with all CPUs */ | 
|  | 60 | } | 
| David Gibson | e28f7fa | 2005-08-05 19:39:06 +1000 | [diff] [blame] | 61 |  | 
|  | 62 | /* This is only called when we are critically out of memory | 
|  | 63 | * (and fail to get a page in pte_free_tlb). | 
|  | 64 | */ | 
|  | 65 | static void pgtable_free_now(pgtable_free_t pgf) | 
|  | 66 | { | 
|  | 67 | pte_freelist_forced_free++; | 
|  | 68 |  | 
|  | 69 | smp_call_function(pte_free_smp_sync, NULL, 0, 1); | 
|  | 70 |  | 
|  | 71 | pgtable_free(pgf); | 
|  | 72 | } | 
|  | 73 |  | 
|  | 74 | static void pte_free_rcu_callback(struct rcu_head *head) | 
|  | 75 | { | 
|  | 76 | struct pte_freelist_batch *batch = | 
|  | 77 | container_of(head, struct pte_freelist_batch, rcu); | 
|  | 78 | unsigned int i; | 
|  | 79 |  | 
|  | 80 | for (i = 0; i < batch->index; i++) | 
|  | 81 | pgtable_free(batch->tables[i]); | 
|  | 82 |  | 
|  | 83 | free_page((unsigned long)batch); | 
|  | 84 | } | 
|  | 85 |  | 
|  | 86 | static void pte_free_submit(struct pte_freelist_batch *batch) | 
|  | 87 | { | 
|  | 88 | INIT_RCU_HEAD(&batch->rcu); | 
|  | 89 | call_rcu(&batch->rcu, pte_free_rcu_callback); | 
|  | 90 | } | 
|  | 91 |  | 
|  | 92 | void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 93 | { | 
| Hugh Dickins | 01edcd8 | 2005-11-23 13:37:39 -0800 | [diff] [blame] | 94 | /* This is safe since tlb_gather_mmu has disabled preemption */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 95 | cpumask_t local_cpumask = cpumask_of_cpu(smp_processor_id()); | 
|  | 96 | struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur); | 
|  | 97 |  | 
|  | 98 | if (atomic_read(&tlb->mm->mm_users) < 2 || | 
|  | 99 | cpus_equal(tlb->mm->cpu_vm_mask, local_cpumask)) { | 
| David Gibson | e28f7fa | 2005-08-05 19:39:06 +1000 | [diff] [blame] | 100 | pgtable_free(pgf); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 101 | return; | 
|  | 102 | } | 
|  | 103 |  | 
|  | 104 | if (*batchp == NULL) { | 
|  | 105 | *batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC); | 
|  | 106 | if (*batchp == NULL) { | 
| David Gibson | e28f7fa | 2005-08-05 19:39:06 +1000 | [diff] [blame] | 107 | pgtable_free_now(pgf); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 108 | return; | 
|  | 109 | } | 
|  | 110 | (*batchp)->index = 0; | 
|  | 111 | } | 
| David Gibson | e28f7fa | 2005-08-05 19:39:06 +1000 | [diff] [blame] | 112 | (*batchp)->tables[(*batchp)->index++] = pgf; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 113 | if ((*batchp)->index == PTE_FREELIST_SIZE) { | 
|  | 114 | pte_free_submit(*batchp); | 
|  | 115 | *batchp = NULL; | 
|  | 116 | } | 
|  | 117 | } | 
|  | 118 |  | 
|  | 119 | /* | 
| Benjamin Herrenschmidt | a741e67 | 2007-04-10 17:09:37 +1000 | [diff] [blame] | 120 | * A linux PTE was changed and the corresponding hash table entry | 
|  | 121 | * neesd to be flushed. This function will either perform the flush | 
|  | 122 | * immediately or will batch it up if the current CPU has an active | 
|  | 123 | * batch on it. | 
|  | 124 | * | 
|  | 125 | * Must be called from within some kind of spinlock/non-preempt region... | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 126 | */ | 
| Benjamin Herrenschmidt | a741e67 | 2007-04-10 17:09:37 +1000 | [diff] [blame] | 127 | void hpte_need_flush(struct mm_struct *mm, unsigned long addr, | 
|  | 128 | pte_t *ptep, unsigned long pte, int huge) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 129 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 130 | struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); | 
| Benjamin Herrenschmidt | a741e67 | 2007-04-10 17:09:37 +1000 | [diff] [blame] | 131 | unsigned long vsid, vaddr; | 
| Paul Mackerras | bf72aeb | 2006-06-15 10:45:18 +1000 | [diff] [blame] | 132 | unsigned int psize; | 
| Paul Mackerras | 1189be6 | 2007-10-11 20:37:10 +1000 | [diff] [blame] | 133 | int ssize; | 
| Benjamin Herrenschmidt | a741e67 | 2007-04-10 17:09:37 +1000 | [diff] [blame] | 134 | real_pte_t rpte; | 
| Benjamin Herrenschmidt | 61b1a94 | 2005-09-20 13:52:50 +1000 | [diff] [blame] | 135 | int i; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 136 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 137 | i = batch->index; | 
|  | 138 |  | 
| Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 139 | /* We mask the address for the base page size. Huge pages will | 
|  | 140 | * have applied their own masking already | 
|  | 141 | */ | 
|  | 142 | addr &= PAGE_MASK; | 
|  | 143 |  | 
| Benjamin Herrenschmidt | 16c2d47 | 2007-05-08 16:27:28 +1000 | [diff] [blame] | 144 | /* Get page size (maybe move back to caller). | 
|  | 145 | * | 
|  | 146 | * NOTE: when using special 64K mappings in 4K environment like | 
|  | 147 | * for SPEs, we obtain the page size from the slice, which thus | 
|  | 148 | * must still exist (and thus the VMA not reused) at the time | 
|  | 149 | * of this call | 
|  | 150 | */ | 
| Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 151 | if (huge) { | 
|  | 152 | #ifdef CONFIG_HUGETLB_PAGE | 
|  | 153 | psize = mmu_huge_psize; | 
|  | 154 | #else | 
|  | 155 | BUG(); | 
| Benjamin Herrenschmidt | 16c2d47 | 2007-05-08 16:27:28 +1000 | [diff] [blame] | 156 | psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */ | 
| Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 157 | #endif | 
| Paul Mackerras | bf72aeb | 2006-06-15 10:45:18 +1000 | [diff] [blame] | 158 | } else | 
| Benjamin Herrenschmidt | 16c2d47 | 2007-05-08 16:27:28 +1000 | [diff] [blame] | 159 | psize = pte_pagesize_index(mm, addr, pte); | 
| Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 160 |  | 
| Benjamin Herrenschmidt | a741e67 | 2007-04-10 17:09:37 +1000 | [diff] [blame] | 161 | /* Build full vaddr */ | 
|  | 162 | if (!is_kernel_addr(addr)) { | 
| Paul Mackerras | 1189be6 | 2007-10-11 20:37:10 +1000 | [diff] [blame] | 163 | ssize = user_segment_size(addr); | 
|  | 164 | vsid = get_vsid(mm->context.id, addr, ssize); | 
| Benjamin Herrenschmidt | a741e67 | 2007-04-10 17:09:37 +1000 | [diff] [blame] | 165 | WARN_ON(vsid == 0); | 
| Paul Mackerras | 1189be6 | 2007-10-11 20:37:10 +1000 | [diff] [blame] | 166 | } else { | 
|  | 167 | vsid = get_kernel_vsid(addr, mmu_kernel_ssize); | 
|  | 168 | ssize = mmu_kernel_ssize; | 
|  | 169 | } | 
|  | 170 | vaddr = hpt_va(addr, vsid, ssize); | 
| Benjamin Herrenschmidt | a741e67 | 2007-04-10 17:09:37 +1000 | [diff] [blame] | 171 | rpte = __real_pte(__pte(pte), ptep); | 
|  | 172 |  | 
|  | 173 | /* | 
|  | 174 | * Check if we have an active batch on this CPU. If not, just | 
|  | 175 | * flush now and return. For now, we don global invalidates | 
|  | 176 | * in that case, might be worth testing the mm cpu mask though | 
|  | 177 | * and decide to use local invalidates instead... | 
|  | 178 | */ | 
|  | 179 | if (!batch->active) { | 
| Paul Mackerras | 1189be6 | 2007-10-11 20:37:10 +1000 | [diff] [blame] | 180 | flush_hash_page(vaddr, rpte, psize, ssize, 0); | 
| Benjamin Herrenschmidt | a741e67 | 2007-04-10 17:09:37 +1000 | [diff] [blame] | 181 | return; | 
|  | 182 | } | 
|  | 183 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 184 | /* | 
|  | 185 | * This can happen when we are in the middle of a TLB batch and | 
|  | 186 | * we encounter memory pressure (eg copy_page_range when it tries | 
|  | 187 | * to allocate a new pte). If we have to reclaim memory and end | 
|  | 188 | * up scanning and resetting referenced bits then our batch context | 
|  | 189 | * will change mid stream. | 
| Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 190 | * | 
|  | 191 | * We also need to ensure only one page size is present in a given | 
|  | 192 | * batch | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 193 | */ | 
| Paul Mackerras | 1189be6 | 2007-10-11 20:37:10 +1000 | [diff] [blame] | 194 | if (i != 0 && (mm != batch->mm || batch->psize != psize || | 
|  | 195 | batch->ssize != ssize)) { | 
| Benjamin Herrenschmidt | a741e67 | 2007-04-10 17:09:37 +1000 | [diff] [blame] | 196 | __flush_tlb_pending(batch); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 197 | i = 0; | 
|  | 198 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 199 | if (i == 0) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 200 | batch->mm = mm; | 
| Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 201 | batch->psize = psize; | 
| Paul Mackerras | 1189be6 | 2007-10-11 20:37:10 +1000 | [diff] [blame] | 202 | batch->ssize = ssize; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 203 | } | 
| Benjamin Herrenschmidt | a741e67 | 2007-04-10 17:09:37 +1000 | [diff] [blame] | 204 | batch->pte[i] = rpte; | 
|  | 205 | batch->vaddr[i] = vaddr; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 206 | batch->index = ++i; | 
|  | 207 | if (i >= PPC64_TLB_BATCH_NR) | 
| Benjamin Herrenschmidt | a741e67 | 2007-04-10 17:09:37 +1000 | [diff] [blame] | 208 | __flush_tlb_pending(batch); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 209 | } | 
|  | 210 |  | 
| Benjamin Herrenschmidt | a741e67 | 2007-04-10 17:09:37 +1000 | [diff] [blame] | 211 | /* | 
|  | 212 | * This function is called when terminating an mmu batch or when a batch | 
|  | 213 | * is full. It will perform the flush of all the entries currently stored | 
|  | 214 | * in a batch. | 
|  | 215 | * | 
|  | 216 | * Must be called from within some kind of spinlock/non-preempt region... | 
|  | 217 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 218 | void __flush_tlb_pending(struct ppc64_tlb_batch *batch) | 
|  | 219 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 220 | cpumask_t tmp; | 
| Benjamin Herrenschmidt | a741e67 | 2007-04-10 17:09:37 +1000 | [diff] [blame] | 221 | int i, local = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 222 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 223 | i = batch->index; | 
| Benjamin Herrenschmidt | a741e67 | 2007-04-10 17:09:37 +1000 | [diff] [blame] | 224 | tmp = cpumask_of_cpu(smp_processor_id()); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 225 | if (cpus_equal(batch->mm->cpu_vm_mask, tmp)) | 
|  | 226 | local = 1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 227 | if (i == 1) | 
| Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 228 | flush_hash_page(batch->vaddr[0], batch->pte[0], | 
| Paul Mackerras | 1189be6 | 2007-10-11 20:37:10 +1000 | [diff] [blame] | 229 | batch->psize, batch->ssize, local); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 230 | else | 
| Benjamin Herrenschmidt | 61b1a94 | 2005-09-20 13:52:50 +1000 | [diff] [blame] | 231 | flush_hash_range(i, local); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 232 | batch->index = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 233 | } | 
|  | 234 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 235 | void pte_free_finish(void) | 
|  | 236 | { | 
| Hugh Dickins | 01edcd8 | 2005-11-23 13:37:39 -0800 | [diff] [blame] | 237 | /* This is safe since tlb_gather_mmu has disabled preemption */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 238 | struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur); | 
|  | 239 |  | 
|  | 240 | if (*batchp == NULL) | 
|  | 241 | return; | 
|  | 242 | pte_free_submit(*batchp); | 
|  | 243 | *batchp = NULL; | 
|  | 244 | } | 
| Benjamin Herrenschmidt | 3d5134e | 2007-06-04 15:15:36 +1000 | [diff] [blame] | 245 |  | 
|  | 246 | /** | 
|  | 247 | * __flush_hash_table_range - Flush all HPTEs for a given address range | 
|  | 248 | *                            from the hash table (and the TLB). But keeps | 
|  | 249 | *                            the linux PTEs intact. | 
|  | 250 | * | 
|  | 251 | * @mm		: mm_struct of the target address space (generally init_mm) | 
|  | 252 | * @start	: starting address | 
|  | 253 | * @end         : ending address (not included in the flush) | 
|  | 254 | * | 
|  | 255 | * This function is mostly to be used by some IO hotplug code in order | 
|  | 256 | * to remove all hash entries from a given address range used to map IO | 
|  | 257 | * space on a removed PCI-PCI bidge without tearing down the full mapping | 
|  | 258 | * since 64K pages may overlap with other bridges when using 64K pages | 
|  | 259 | * with 4K HW pages on IO space. | 
|  | 260 | * | 
|  | 261 | * Because of that usage pattern, it's only available with CONFIG_HOTPLUG | 
|  | 262 | * and is implemented for small size rather than speed. | 
|  | 263 | */ | 
|  | 264 | #ifdef CONFIG_HOTPLUG | 
|  | 265 |  | 
|  | 266 | void __flush_hash_table_range(struct mm_struct *mm, unsigned long start, | 
|  | 267 | unsigned long end) | 
|  | 268 | { | 
|  | 269 | unsigned long flags; | 
|  | 270 |  | 
|  | 271 | start = _ALIGN_DOWN(start, PAGE_SIZE); | 
|  | 272 | end = _ALIGN_UP(end, PAGE_SIZE); | 
|  | 273 |  | 
|  | 274 | BUG_ON(!mm->pgd); | 
|  | 275 |  | 
|  | 276 | /* Note: Normally, we should only ever use a batch within a | 
|  | 277 | * PTE locked section. This violates the rule, but will work | 
|  | 278 | * since we don't actually modify the PTEs, we just flush the | 
|  | 279 | * hash while leaving the PTEs intact (including their reference | 
|  | 280 | * to being hashed). This is not the most performance oriented | 
|  | 281 | * way to do things but is fine for our needs here. | 
|  | 282 | */ | 
|  | 283 | local_irq_save(flags); | 
|  | 284 | arch_enter_lazy_mmu_mode(); | 
|  | 285 | for (; start < end; start += PAGE_SIZE) { | 
|  | 286 | pte_t *ptep = find_linux_pte(mm->pgd, start); | 
|  | 287 | unsigned long pte; | 
|  | 288 |  | 
|  | 289 | if (ptep == NULL) | 
|  | 290 | continue; | 
|  | 291 | pte = pte_val(*ptep); | 
|  | 292 | if (!(pte & _PAGE_HASHPTE)) | 
|  | 293 | continue; | 
|  | 294 | hpte_need_flush(mm, start, ptep, pte, 0); | 
|  | 295 | } | 
|  | 296 | arch_leave_lazy_mmu_mode(); | 
|  | 297 | local_irq_restore(flags); | 
|  | 298 | } | 
|  | 299 |  | 
|  | 300 | #endif /* CONFIG_HOTPLUG */ |