Uwe Zeisberger | f30c226 | 2006-10-03 23:01:26 +0200 | [diff] [blame] | 1 | /* include/asm-generic/tlb.h |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | * |
| 3 | * Generic TLB shootdown code |
| 4 | * |
| 5 | * Copyright 2001 Red Hat, Inc. |
| 6 | * Based on code from mm/memory.c Copyright Linus Torvalds and others. |
| 7 | * |
Peter Zijlstra | d16dfc5 | 2011-05-24 17:11:45 -0700 | [diff] [blame] | 8 | * Copyright 2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> |
| 9 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | * This program is free software; you can redistribute it and/or |
| 11 | * modify it under the terms of the GNU General Public License |
| 12 | * as published by the Free Software Foundation; either version |
| 13 | * 2 of the License, or (at your option) any later version. |
| 14 | */ |
| 15 | #ifndef _ASM_GENERIC__TLB_H |
| 16 | #define _ASM_GENERIC__TLB_H |
| 17 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | #include <linux/swap.h> |
Ingo Molnar | 62152d0 | 2008-01-31 22:05:48 +0100 | [diff] [blame] | 19 | #include <asm/pgalloc.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | #include <asm/tlbflush.h> |
| 21 | |
| 22 | /* |
| 23 | * For UP we don't need to worry about TLB flush |
| 24 | * and page free order so much.. |
| 25 | */ |
| 26 | #ifdef CONFIG_SMP |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | #define tlb_fast_mode(tlb) ((tlb)->nr == ~0U) |
| 28 | #else |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | #define tlb_fast_mode(tlb) 1 |
| 30 | #endif |
| 31 | |
Peter Zijlstra | 2672391 | 2011-05-24 17:12:00 -0700 | [diff] [blame^] | 32 | #ifdef CONFIG_HAVE_RCU_TABLE_FREE |
| 33 | /* |
| 34 | * Semi RCU freeing of the page directories. |
| 35 | * |
| 36 | * This is needed by some architectures to implement software pagetable walkers. |
| 37 | * |
| 38 | * gup_fast() and other software pagetable walkers do a lockless page-table |
| 39 | * walk and therefore needs some synchronization with the freeing of the page |
| 40 | * directories. The chosen means to accomplish that is by disabling IRQs over |
| 41 | * the walk. |
| 42 | * |
| 43 | * Architectures that use IPIs to flush TLBs will then automagically DTRT, |
| 44 | * since we unlink the page, flush TLBs, free the page. Since the disabling of |
| 45 | * IRQs delays the completion of the TLB flush we can never observe an already |
| 46 | * freed page. |
| 47 | * |
| 48 | * Architectures that do not have this (PPC) need to delay the freeing by some |
| 49 | * other means, this is that means. |
| 50 | * |
| 51 | * What we do is batch the freed directory pages (tables) and RCU free them. |
| 52 | * We use the sched RCU variant, as that guarantees that IRQ/preempt disabling |
| 53 | * holds off grace periods. |
| 54 | * |
| 55 | * However, in order to batch these pages we need to allocate storage, this |
| 56 | * allocation is deep inside the MM code and can thus easily fail on memory |
| 57 | * pressure. To guarantee progress we fall back to single table freeing, see |
| 58 | * the implementation of tlb_remove_table_one(). |
| 59 | * |
| 60 | */ |
| 61 | struct mmu_table_batch { |
| 62 | struct rcu_head rcu; |
| 63 | unsigned int nr; |
| 64 | void *tables[0]; |
| 65 | }; |
| 66 | |
| 67 | #define MAX_TABLE_BATCH \ |
| 68 | ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *)) |
| 69 | |
| 70 | extern void tlb_table_flush(struct mmu_gather *tlb); |
| 71 | extern void tlb_remove_table(struct mmu_gather *tlb, void *table); |
| 72 | |
| 73 | #endif |
| 74 | |
Peter Zijlstra | d16dfc5 | 2011-05-24 17:11:45 -0700 | [diff] [blame] | 75 | /* |
| 76 | * If we can't allocate a page to make a big batch of page pointers |
| 77 | * to work on, then just handle a few from the on-stack structure. |
| 78 | */ |
| 79 | #define MMU_GATHER_BUNDLE 8 |
| 80 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 81 | /* struct mmu_gather is an opaque type used by the mm code for passing around |
Hugh Dickins | 15a23ff | 2005-10-29 18:16:01 -0700 | [diff] [blame] | 82 | * any data needed by arch specific code for tlb_remove_page. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 83 | */ |
| 84 | struct mmu_gather { |
| 85 | struct mm_struct *mm; |
Peter Zijlstra | 2672391 | 2011-05-24 17:12:00 -0700 | [diff] [blame^] | 86 | #ifdef CONFIG_HAVE_RCU_TABLE_FREE |
| 87 | struct mmu_table_batch *batch; |
| 88 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 89 | unsigned int nr; /* set to ~0U means fast mode */ |
Peter Zijlstra | d16dfc5 | 2011-05-24 17:11:45 -0700 | [diff] [blame] | 90 | unsigned int max; /* nr < max */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 91 | unsigned int need_flush;/* Really unmapped some ptes? */ |
| 92 | unsigned int fullmm; /* non-zero means full mm flush */ |
Peter Zijlstra | d16dfc5 | 2011-05-24 17:11:45 -0700 | [diff] [blame] | 93 | struct page **pages; |
| 94 | struct page *local[MMU_GATHER_BUNDLE]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 95 | }; |
| 96 | |
Peter Zijlstra | d16dfc5 | 2011-05-24 17:11:45 -0700 | [diff] [blame] | 97 | static inline void __tlb_alloc_page(struct mmu_gather *tlb) |
| 98 | { |
| 99 | unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0); |
| 100 | |
| 101 | if (addr) { |
| 102 | tlb->pages = (void *)addr; |
| 103 | tlb->max = PAGE_SIZE / sizeof(struct page *); |
| 104 | } |
| 105 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 106 | |
| 107 | /* tlb_gather_mmu |
Peter Zijlstra | d16dfc5 | 2011-05-24 17:11:45 -0700 | [diff] [blame] | 108 | * Called to initialize an (on-stack) mmu_gather structure for page-table |
| 109 | * tear-down from @mm. The @fullmm argument is used when @mm is without |
| 110 | * users and we're going to destroy the full address space (exit/execve). |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 111 | */ |
Peter Zijlstra | d16dfc5 | 2011-05-24 17:11:45 -0700 | [diff] [blame] | 112 | static inline void |
| 113 | tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 114 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 115 | tlb->mm = mm; |
| 116 | |
Peter Zijlstra | d16dfc5 | 2011-05-24 17:11:45 -0700 | [diff] [blame] | 117 | tlb->max = ARRAY_SIZE(tlb->local); |
| 118 | tlb->pages = tlb->local; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 119 | |
Peter Zijlstra | d16dfc5 | 2011-05-24 17:11:45 -0700 | [diff] [blame] | 120 | if (num_online_cpus() > 1) { |
| 121 | tlb->nr = 0; |
| 122 | __tlb_alloc_page(tlb); |
| 123 | } else /* Use fast mode if only one CPU is online */ |
| 124 | tlb->nr = ~0U; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 125 | |
Peter Zijlstra | d16dfc5 | 2011-05-24 17:11:45 -0700 | [diff] [blame] | 126 | tlb->fullmm = fullmm; |
| 127 | |
Peter Zijlstra | 2672391 | 2011-05-24 17:12:00 -0700 | [diff] [blame^] | 128 | #ifdef CONFIG_HAVE_RCU_TABLE_FREE |
| 129 | tlb->batch = NULL; |
Peter Zijlstra | d16dfc5 | 2011-05-24 17:11:45 -0700 | [diff] [blame] | 130 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 131 | } |
| 132 | |
| 133 | static inline void |
Peter Zijlstra | d16dfc5 | 2011-05-24 17:11:45 -0700 | [diff] [blame] | 134 | tlb_flush_mmu(struct mmu_gather *tlb) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 135 | { |
| 136 | if (!tlb->need_flush) |
| 137 | return; |
| 138 | tlb->need_flush = 0; |
| 139 | tlb_flush(tlb); |
Peter Zijlstra | 2672391 | 2011-05-24 17:12:00 -0700 | [diff] [blame^] | 140 | #ifdef CONFIG_HAVE_RCU_TABLE_FREE |
| 141 | tlb_table_flush(tlb); |
| 142 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 143 | if (!tlb_fast_mode(tlb)) { |
| 144 | free_pages_and_swap_cache(tlb->pages, tlb->nr); |
| 145 | tlb->nr = 0; |
Peter Zijlstra | d16dfc5 | 2011-05-24 17:11:45 -0700 | [diff] [blame] | 146 | /* |
| 147 | * If we are using the local on-stack array of pages for MMU |
| 148 | * gather, try allocating an off-stack array again as we have |
| 149 | * recently freed pages. |
| 150 | */ |
| 151 | if (tlb->pages == tlb->local) |
| 152 | __tlb_alloc_page(tlb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 153 | } |
| 154 | } |
| 155 | |
| 156 | /* tlb_finish_mmu |
| 157 | * Called at the end of the shootdown operation to free up any resources |
Hugh Dickins | 15a23ff | 2005-10-29 18:16:01 -0700 | [diff] [blame] | 158 | * that were required. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 159 | */ |
| 160 | static inline void |
| 161 | tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) |
| 162 | { |
Peter Zijlstra | d16dfc5 | 2011-05-24 17:11:45 -0700 | [diff] [blame] | 163 | tlb_flush_mmu(tlb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 164 | |
| 165 | /* keep the page table cache within bounds */ |
| 166 | check_pgt_cache(); |
Hugh Dickins | 15a23ff | 2005-10-29 18:16:01 -0700 | [diff] [blame] | 167 | |
Peter Zijlstra | d16dfc5 | 2011-05-24 17:11:45 -0700 | [diff] [blame] | 168 | if (tlb->pages != tlb->local) |
| 169 | free_pages((unsigned long)tlb->pages, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 170 | } |
| 171 | |
Peter Zijlstra | d16dfc5 | 2011-05-24 17:11:45 -0700 | [diff] [blame] | 172 | /* __tlb_remove_page |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 173 | * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)), while |
| 174 | * handling the additional races in SMP caused by other CPUs caching valid |
Peter Zijlstra | d16dfc5 | 2011-05-24 17:11:45 -0700 | [diff] [blame] | 175 | * mappings in their TLBs. Returns the number of free page slots left. |
| 176 | * When out of page slots we must call tlb_flush_mmu(). |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 177 | */ |
Peter Zijlstra | d16dfc5 | 2011-05-24 17:11:45 -0700 | [diff] [blame] | 178 | static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 179 | { |
| 180 | tlb->need_flush = 1; |
| 181 | if (tlb_fast_mode(tlb)) { |
| 182 | free_page_and_swap_cache(page); |
Peter Zijlstra | d16dfc5 | 2011-05-24 17:11:45 -0700 | [diff] [blame] | 183 | return 1; /* avoid calling tlb_flush_mmu() */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 184 | } |
| 185 | tlb->pages[tlb->nr++] = page; |
Peter Zijlstra | d16dfc5 | 2011-05-24 17:11:45 -0700 | [diff] [blame] | 186 | VM_BUG_ON(tlb->nr > tlb->max); |
| 187 | |
| 188 | return tlb->max - tlb->nr; |
| 189 | } |
| 190 | |
| 191 | /* tlb_remove_page |
| 192 | * Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when |
| 193 | * required. |
| 194 | */ |
| 195 | static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) |
| 196 | { |
| 197 | if (!__tlb_remove_page(tlb, page)) |
| 198 | tlb_flush_mmu(tlb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 199 | } |
| 200 | |
| 201 | /** |
| 202 | * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation. |
| 203 | * |
| 204 | * Record the fact that pte's were really umapped in ->need_flush, so we can |
| 205 | * later optimise away the tlb invalidate. This helps when userspace is |
| 206 | * unmapping already-unmapped pages, which happens quite a lot. |
| 207 | */ |
| 208 | #define tlb_remove_tlb_entry(tlb, ptep, address) \ |
| 209 | do { \ |
| 210 | tlb->need_flush = 1; \ |
| 211 | __tlb_remove_tlb_entry(tlb, ptep, address); \ |
| 212 | } while (0) |
| 213 | |
Benjamin Herrenschmidt | 9e1b32c | 2009-07-22 15:44:28 +1000 | [diff] [blame] | 214 | #define pte_free_tlb(tlb, ptep, address) \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 215 | do { \ |
| 216 | tlb->need_flush = 1; \ |
Benjamin Herrenschmidt | 9e1b32c | 2009-07-22 15:44:28 +1000 | [diff] [blame] | 217 | __pte_free_tlb(tlb, ptep, address); \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 218 | } while (0) |
| 219 | |
| 220 | #ifndef __ARCH_HAS_4LEVEL_HACK |
Benjamin Herrenschmidt | 9e1b32c | 2009-07-22 15:44:28 +1000 | [diff] [blame] | 221 | #define pud_free_tlb(tlb, pudp, address) \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 222 | do { \ |
| 223 | tlb->need_flush = 1; \ |
Benjamin Herrenschmidt | 9e1b32c | 2009-07-22 15:44:28 +1000 | [diff] [blame] | 224 | __pud_free_tlb(tlb, pudp, address); \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 225 | } while (0) |
| 226 | #endif |
| 227 | |
Benjamin Herrenschmidt | 9e1b32c | 2009-07-22 15:44:28 +1000 | [diff] [blame] | 228 | #define pmd_free_tlb(tlb, pmdp, address) \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 229 | do { \ |
| 230 | tlb->need_flush = 1; \ |
Benjamin Herrenschmidt | 9e1b32c | 2009-07-22 15:44:28 +1000 | [diff] [blame] | 231 | __pmd_free_tlb(tlb, pmdp, address); \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 232 | } while (0) |
| 233 | |
| 234 | #define tlb_migrate_finish(mm) do {} while (0) |
| 235 | |
| 236 | #endif /* _ASM_GENERIC__TLB_H */ |