| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * This file contains the routines for flushing entries from the | 
|  | 3 | * TLB and MMU hash table. | 
|  | 4 | * | 
|  | 5 | *  Derived from arch/ppc64/mm/init.c: | 
|  | 6 | *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | 
|  | 7 | * | 
|  | 8 | *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) | 
|  | 9 | *  and Cort Dougan (PReP) (cort@cs.nmt.edu) | 
|  | 10 | *    Copyright (C) 1996 Paul Mackerras | 
|  | 11 | *  Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk). | 
|  | 12 | * | 
|  | 13 | *  Derived from "arch/i386/mm/init.c" | 
|  | 14 | *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds | 
|  | 15 | * | 
|  | 16 | *  Dave Engebretsen <engebret@us.ibm.com> | 
|  | 17 | *      Rework for PPC64 port. | 
|  | 18 | * | 
|  | 19 | *  This program is free software; you can redistribute it and/or | 
|  | 20 | *  modify it under the terms of the GNU General Public License | 
|  | 21 | *  as published by the Free Software Foundation; either version | 
|  | 22 | *  2 of the License, or (at your option) any later version. | 
|  | 23 | */ | 
| Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 24 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | #include <linux/config.h> | 
|  | 26 | #include <linux/kernel.h> | 
|  | 27 | #include <linux/mm.h> | 
|  | 28 | #include <linux/init.h> | 
|  | 29 | #include <linux/percpu.h> | 
|  | 30 | #include <linux/hardirq.h> | 
|  | 31 | #include <asm/pgalloc.h> | 
|  | 32 | #include <asm/tlbflush.h> | 
|  | 33 | #include <asm/tlb.h> | 
| Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 34 | #include <asm/bug.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 |  | 
|  | 36 | DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch); | 
|  | 37 |  | 
|  | 38 | /* This is declared as we are using the more or less generic | 
|  | 39 | * include/asm-ppc64/tlb.h file -- tgall | 
|  | 40 | */ | 
|  | 41 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | 
|  | 42 | DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur); | 
|  | 43 | unsigned long pte_freelist_forced_free; | 
|  | 44 |  | 
| David Gibson | e28f7fa | 2005-08-05 19:39:06 +1000 | [diff] [blame] | 45 | struct pte_freelist_batch | 
|  | 46 | { | 
|  | 47 | struct rcu_head	rcu; | 
|  | 48 | unsigned int	index; | 
|  | 49 | pgtable_free_t	tables[0]; | 
|  | 50 | }; | 
|  | 51 |  | 
|  | 52 | DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur); | 
|  | 53 | unsigned long pte_freelist_forced_free; | 
|  | 54 |  | 
|  | 55 | #define PTE_FREELIST_SIZE \ | 
|  | 56 | ((PAGE_SIZE - sizeof(struct pte_freelist_batch)) \ | 
|  | 57 | / sizeof(pgtable_free_t)) | 
|  | 58 |  | 
|  | 59 | #ifdef CONFIG_SMP | 
|  | 60 | static void pte_free_smp_sync(void *arg) | 
|  | 61 | { | 
|  | 62 | /* Do nothing, just ensure we sync with all CPUs */ | 
|  | 63 | } | 
|  | 64 | #endif | 
|  | 65 |  | 
|  | 66 | /* This is only called when we are critically out of memory | 
|  | 67 | * (and fail to get a page in pte_free_tlb). | 
|  | 68 | */ | 
|  | 69 | static void pgtable_free_now(pgtable_free_t pgf) | 
|  | 70 | { | 
|  | 71 | pte_freelist_forced_free++; | 
|  | 72 |  | 
|  | 73 | smp_call_function(pte_free_smp_sync, NULL, 0, 1); | 
|  | 74 |  | 
|  | 75 | pgtable_free(pgf); | 
|  | 76 | } | 
|  | 77 |  | 
|  | 78 | static void pte_free_rcu_callback(struct rcu_head *head) | 
|  | 79 | { | 
|  | 80 | struct pte_freelist_batch *batch = | 
|  | 81 | container_of(head, struct pte_freelist_batch, rcu); | 
|  | 82 | unsigned int i; | 
|  | 83 |  | 
|  | 84 | for (i = 0; i < batch->index; i++) | 
|  | 85 | pgtable_free(batch->tables[i]); | 
|  | 86 |  | 
|  | 87 | free_page((unsigned long)batch); | 
|  | 88 | } | 
|  | 89 |  | 
|  | 90 | static void pte_free_submit(struct pte_freelist_batch *batch) | 
|  | 91 | { | 
|  | 92 | INIT_RCU_HEAD(&batch->rcu); | 
|  | 93 | call_rcu(&batch->rcu, pte_free_rcu_callback); | 
|  | 94 | } | 
|  | 95 |  | 
|  | 96 | void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 97 | { | 
| Hugh Dickins | 01edcd8 | 2005-11-23 13:37:39 -0800 | [diff] [blame] | 98 | /* This is safe since tlb_gather_mmu has disabled preemption */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 99 | cpumask_t local_cpumask = cpumask_of_cpu(smp_processor_id()); | 
|  | 100 | struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur); | 
|  | 101 |  | 
|  | 102 | if (atomic_read(&tlb->mm->mm_users) < 2 || | 
|  | 103 | cpus_equal(tlb->mm->cpu_vm_mask, local_cpumask)) { | 
| David Gibson | e28f7fa | 2005-08-05 19:39:06 +1000 | [diff] [blame] | 104 | pgtable_free(pgf); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 105 | return; | 
|  | 106 | } | 
|  | 107 |  | 
|  | 108 | if (*batchp == NULL) { | 
|  | 109 | *batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC); | 
|  | 110 | if (*batchp == NULL) { | 
| David Gibson | e28f7fa | 2005-08-05 19:39:06 +1000 | [diff] [blame] | 111 | pgtable_free_now(pgf); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 112 | return; | 
|  | 113 | } | 
|  | 114 | (*batchp)->index = 0; | 
|  | 115 | } | 
| David Gibson | e28f7fa | 2005-08-05 19:39:06 +1000 | [diff] [blame] | 116 | (*batchp)->tables[(*batchp)->index++] = pgf; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 117 | if ((*batchp)->index == PTE_FREELIST_SIZE) { | 
|  | 118 | pte_free_submit(*batchp); | 
|  | 119 | *batchp = NULL; | 
|  | 120 | } | 
|  | 121 | } | 
|  | 122 |  | 
|  | 123 | /* | 
|  | 124 | * Update the MMU hash table to correspond with a change to | 
|  | 125 | * a Linux PTE.  If wrprot is true, it is permissible to | 
|  | 126 | * change the existing HPTE to read-only rather than removing it | 
|  | 127 | * (if we remove it we should clear the _PTE_HPTEFLAGS bits). | 
|  | 128 | */ | 
|  | 129 | void hpte_update(struct mm_struct *mm, unsigned long addr, | 
| Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 130 | pte_t *ptep, unsigned long pte, int huge) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 131 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 132 | struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); | 
| Benjamin Herrenschmidt | 61b1a94 | 2005-09-20 13:52:50 +1000 | [diff] [blame] | 133 | unsigned long vsid; | 
| Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 134 | unsigned int psize = mmu_virtual_psize; | 
| Benjamin Herrenschmidt | 61b1a94 | 2005-09-20 13:52:50 +1000 | [diff] [blame] | 135 | int i; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 136 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 137 | i = batch->index; | 
|  | 138 |  | 
| Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 139 | /* We mask the address for the base page size. Huge pages will | 
|  | 140 | * have applied their own masking already | 
|  | 141 | */ | 
|  | 142 | addr &= PAGE_MASK; | 
|  | 143 |  | 
|  | 144 | /* Get page size (maybe move back to caller) */ | 
|  | 145 | if (huge) { | 
|  | 146 | #ifdef CONFIG_HUGETLB_PAGE | 
|  | 147 | psize = mmu_huge_psize; | 
|  | 148 | #else | 
|  | 149 | BUG(); | 
|  | 150 | #endif | 
|  | 151 | } | 
|  | 152 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 153 | /* | 
|  | 154 | * This can happen when we are in the middle of a TLB batch and | 
|  | 155 | * we encounter memory pressure (eg copy_page_range when it tries | 
|  | 156 | * to allocate a new pte). If we have to reclaim memory and end | 
|  | 157 | * up scanning and resetting referenced bits then our batch context | 
|  | 158 | * will change mid stream. | 
| Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 159 | * | 
|  | 160 | * We also need to ensure only one page size is present in a given | 
|  | 161 | * batch | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 162 | */ | 
| Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 163 | if (i != 0 && (mm != batch->mm || batch->psize != psize)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 164 | flush_tlb_pending(); | 
|  | 165 | i = 0; | 
|  | 166 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 167 | if (i == 0) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 168 | batch->mm = mm; | 
| Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 169 | batch->psize = psize; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 170 | } | 
| Michael Ellerman | 51fae6d | 2005-12-04 18:39:15 +1100 | [diff] [blame] | 171 | if (!is_kernel_addr(addr)) { | 
| Benjamin Herrenschmidt | 61b1a94 | 2005-09-20 13:52:50 +1000 | [diff] [blame] | 172 | vsid = get_vsid(mm->context.id, addr); | 
|  | 173 | WARN_ON(vsid == 0); | 
|  | 174 | } else | 
|  | 175 | vsid = get_kernel_vsid(addr); | 
|  | 176 | batch->vaddr[i] = (vsid << 28 ) | (addr & 0x0fffffff); | 
| Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 177 | batch->pte[i] = __real_pte(__pte(pte), ptep); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 178 | batch->index = ++i; | 
|  | 179 | if (i >= PPC64_TLB_BATCH_NR) | 
|  | 180 | flush_tlb_pending(); | 
|  | 181 | } | 
|  | 182 |  | 
|  | 183 | void __flush_tlb_pending(struct ppc64_tlb_batch *batch) | 
|  | 184 | { | 
|  | 185 | int i; | 
|  | 186 | int cpu; | 
|  | 187 | cpumask_t tmp; | 
|  | 188 | int local = 0; | 
|  | 189 |  | 
|  | 190 | BUG_ON(in_interrupt()); | 
|  | 191 |  | 
|  | 192 | cpu = get_cpu(); | 
|  | 193 | i = batch->index; | 
|  | 194 | tmp = cpumask_of_cpu(cpu); | 
|  | 195 | if (cpus_equal(batch->mm->cpu_vm_mask, tmp)) | 
|  | 196 | local = 1; | 
|  | 197 |  | 
|  | 198 | if (i == 1) | 
| Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 199 | flush_hash_page(batch->vaddr[0], batch->pte[0], | 
|  | 200 | batch->psize, local); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 201 | else | 
| Benjamin Herrenschmidt | 61b1a94 | 2005-09-20 13:52:50 +1000 | [diff] [blame] | 202 | flush_hash_range(i, local); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 203 | batch->index = 0; | 
|  | 204 | put_cpu(); | 
|  | 205 | } | 
|  | 206 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 207 | void pte_free_finish(void) | 
|  | 208 | { | 
| Hugh Dickins | 01edcd8 | 2005-11-23 13:37:39 -0800 | [diff] [blame] | 209 | /* This is safe since tlb_gather_mmu has disabled preemption */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 210 | struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur); | 
|  | 211 |  | 
|  | 212 | if (*batchp == NULL) | 
|  | 213 | return; | 
|  | 214 | pte_free_submit(*batchp); | 
|  | 215 | *batchp = NULL; | 
|  | 216 | } |