| Uwe Zeisberger | f30c226 | 2006-10-03 23:01:26 +0200 | [diff] [blame] | 1 | /* include/asm-generic/tlb.h | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 |  * | 
 | 3 |  *	Generic TLB shootdown code | 
 | 4 |  * | 
 | 5 |  * Copyright 2001 Red Hat, Inc. | 
 | 6 |  * Based on code from mm/memory.c Copyright Linus Torvalds and others. | 
 | 7 |  * | 
| Peter Zijlstra | d16dfc5 | 2011-05-24 17:11:45 -0700 | [diff] [blame] | 8 |  * Copyright 2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> | 
 | 9 |  * | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 |  * This program is free software; you can redistribute it and/or | 
 | 11 |  * modify it under the terms of the GNU General Public License | 
 | 12 |  * as published by the Free Software Foundation; either version | 
 | 13 |  * 2 of the License, or (at your option) any later version. | 
 | 14 |  */ | 
 | 15 | #ifndef _ASM_GENERIC__TLB_H | 
 | 16 | #define _ASM_GENERIC__TLB_H | 
 | 17 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | #include <linux/swap.h> | 
| Ingo Molnar | 62152d0 | 2008-01-31 22:05:48 +0100 | [diff] [blame] | 19 | #include <asm/pgalloc.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | #include <asm/tlbflush.h> | 
 | 21 |  | 
| Peter Zijlstra | 2672391 | 2011-05-24 17:12:00 -0700 | [diff] [blame] | 22 | #ifdef CONFIG_HAVE_RCU_TABLE_FREE | 
 | 23 | /* | 
 | 24 |  * Semi RCU freeing of the page directories. | 
 | 25 |  * | 
 | 26 |  * This is needed by some architectures to implement software pagetable walkers. | 
 | 27 |  * | 
 | 28 |  * gup_fast() and other software pagetable walkers do a lockless page-table | 
 | 29 |  * walk and therefore needs some synchronization with the freeing of the page | 
 | 30 |  * directories. The chosen means to accomplish that is by disabling IRQs over | 
 | 31 |  * the walk. | 
 | 32 |  * | 
 | 33 |  * Architectures that use IPIs to flush TLBs will then automagically DTRT, | 
 | 34 |  * since we unlink the page, flush TLBs, free the page. Since the disabling of | 
 | 35 |  * IRQs delays the completion of the TLB flush we can never observe an already | 
 | 36 |  * freed page. | 
 | 37 |  * | 
 | 38 |  * Architectures that do not have this (PPC) need to delay the freeing by some | 
 | 39 |  * other means, this is that means. | 
 | 40 |  * | 
 | 41 |  * What we do is batch the freed directory pages (tables) and RCU free them. | 
 | 42 |  * We use the sched RCU variant, as that guarantees that IRQ/preempt disabling | 
 | 43 |  * holds off grace periods. | 
 | 44 |  * | 
 | 45 |  * However, in order to batch these pages we need to allocate storage, this | 
 | 46 |  * allocation is deep inside the MM code and can thus easily fail on memory | 
 | 47 |  * pressure. To guarantee progress we fall back to single table freeing, see | 
 | 48 |  * the implementation of tlb_remove_table_one(). | 
 | 49 |  * | 
 | 50 |  */ | 
 | 51 | struct mmu_table_batch { | 
 | 52 | 	struct rcu_head		rcu; | 
 | 53 | 	unsigned int		nr; | 
 | 54 | 	void			*tables[0]; | 
 | 55 | }; | 
 | 56 |  | 
 | 57 | #define MAX_TABLE_BATCH		\ | 
 | 58 | 	((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *)) | 
 | 59 |  | 
 | 60 | extern void tlb_table_flush(struct mmu_gather *tlb); | 
 | 61 | extern void tlb_remove_table(struct mmu_gather *tlb, void *table); | 
 | 62 |  | 
 | 63 | #endif | 
 | 64 |  | 
| Peter Zijlstra | d16dfc5 | 2011-05-24 17:11:45 -0700 | [diff] [blame] | 65 | /* | 
 | 66 |  * If we can't allocate a page to make a big batch of page pointers | 
 | 67 |  * to work on, then just handle a few from the on-stack structure. | 
 | 68 |  */ | 
 | 69 | #define MMU_GATHER_BUNDLE	8 | 
 | 70 |  | 
| Peter Zijlstra | e303297 | 2011-05-24 17:12:01 -0700 | [diff] [blame] | 71 | struct mmu_gather_batch { | 
 | 72 | 	struct mmu_gather_batch	*next; | 
 | 73 | 	unsigned int		nr; | 
 | 74 | 	unsigned int		max; | 
 | 75 | 	struct page		*pages[0]; | 
 | 76 | }; | 
 | 77 |  | 
 | 78 | #define MAX_GATHER_BATCH	\ | 
 | 79 | 	((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *)) | 
 | 80 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 81 | /* struct mmu_gather is an opaque type used by the mm code for passing around | 
| Hugh Dickins | 15a23ff | 2005-10-29 18:16:01 -0700 | [diff] [blame] | 82 |  * any data needed by arch specific code for tlb_remove_page. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 83 |  */ | 
 | 84 | struct mmu_gather { | 
 | 85 | 	struct mm_struct	*mm; | 
| Peter Zijlstra | 2672391 | 2011-05-24 17:12:00 -0700 | [diff] [blame] | 86 | #ifdef CONFIG_HAVE_RCU_TABLE_FREE | 
 | 87 | 	struct mmu_table_batch	*batch; | 
 | 88 | #endif | 
| Peter Zijlstra | e303297 | 2011-05-24 17:12:01 -0700 | [diff] [blame] | 89 | 	unsigned int		need_flush : 1,	/* Did free PTEs */ | 
 | 90 | 				fast_mode  : 1; /* No batching   */ | 
 | 91 |  | 
 | 92 | 	unsigned int		fullmm; | 
 | 93 |  | 
 | 94 | 	struct mmu_gather_batch *active; | 
 | 95 | 	struct mmu_gather_batch	local; | 
 | 96 | 	struct page		*__pages[MMU_GATHER_BUNDLE]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 97 | }; | 
 | 98 |  | 
| Peter Zijlstra | 9547d01 | 2011-05-24 17:12:14 -0700 | [diff] [blame] | 99 | #define HAVE_GENERIC_MMU_GATHER | 
 | 100 |  | 
 | 101 | static inline int tlb_fast_mode(struct mmu_gather *tlb) | 
 | 102 | { | 
| Peter Zijlstra | e303297 | 2011-05-24 17:12:01 -0700 | [diff] [blame] | 103 | #ifdef CONFIG_SMP | 
| Peter Zijlstra | 9547d01 | 2011-05-24 17:12:14 -0700 | [diff] [blame] | 104 | 	return tlb->fast_mode; | 
| Peter Zijlstra | e303297 | 2011-05-24 17:12:01 -0700 | [diff] [blame] | 105 | #else | 
| Peter Zijlstra | 9547d01 | 2011-05-24 17:12:14 -0700 | [diff] [blame] | 106 | 	/* | 
 | 107 | 	 * For UP we don't need to worry about TLB flush | 
 | 108 | 	 * and page free order so much.. | 
 | 109 | 	 */ | 
| Peter Zijlstra | e303297 | 2011-05-24 17:12:01 -0700 | [diff] [blame] | 110 | 	return 1; | 
| Peter Zijlstra | d16dfc5 | 2011-05-24 17:11:45 -0700 | [diff] [blame] | 111 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 112 | } | 
 | 113 |  | 
| Peter Zijlstra | 9547d01 | 2011-05-24 17:12:14 -0700 | [diff] [blame] | 114 | void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm); | 
 | 115 | void tlb_flush_mmu(struct mmu_gather *tlb); | 
 | 116 | void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end); | 
 | 117 | int __tlb_remove_page(struct mmu_gather *tlb, struct page *page); | 
| Peter Zijlstra | d16dfc5 | 2011-05-24 17:11:45 -0700 | [diff] [blame] | 118 |  | 
 | 119 | /* tlb_remove_page | 
 | 120 |  *	Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when | 
 | 121 |  *	required. | 
 | 122 |  */ | 
 | 123 | static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) | 
 | 124 | { | 
 | 125 | 	if (!__tlb_remove_page(tlb, page)) | 
 | 126 | 		tlb_flush_mmu(tlb); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 127 | } | 
 | 128 |  | 
 | 129 | /** | 
 | 130 |  * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation. | 
 | 131 |  * | 
 | 132 |  * Record the fact that pte's were really umapped in ->need_flush, so we can | 
 | 133 |  * later optimise away the tlb invalidate.   This helps when userspace is | 
 | 134 |  * unmapping already-unmapped pages, which happens quite a lot. | 
 | 135 |  */ | 
 | 136 | #define tlb_remove_tlb_entry(tlb, ptep, address)		\ | 
 | 137 | 	do {							\ | 
 | 138 | 		tlb->need_flush = 1;				\ | 
 | 139 | 		__tlb_remove_tlb_entry(tlb, ptep, address);	\ | 
 | 140 | 	} while (0) | 
 | 141 |  | 
| Shaohua Li | f21760b | 2012-01-12 17:19:16 -0800 | [diff] [blame] | 142 | /** | 
 | 143 |  * tlb_remove_pmd_tlb_entry - remember a pmd mapping for later tlb invalidation | 
 | 144 |  * This is a nop so far, because only x86 needs it. | 
 | 145 |  */ | 
 | 146 | #ifndef __tlb_remove_pmd_tlb_entry | 
 | 147 | #define __tlb_remove_pmd_tlb_entry(tlb, pmdp, address) do {} while (0) | 
 | 148 | #endif | 
 | 149 |  | 
 | 150 | #define tlb_remove_pmd_tlb_entry(tlb, pmdp, address)		\ | 
 | 151 | 	do {							\ | 
 | 152 | 		tlb->need_flush = 1;				\ | 
 | 153 | 		__tlb_remove_pmd_tlb_entry(tlb, pmdp, address);	\ | 
 | 154 | 	} while (0) | 
 | 155 |  | 
| Benjamin Herrenschmidt | 9e1b32c | 2009-07-22 15:44:28 +1000 | [diff] [blame] | 156 | #define pte_free_tlb(tlb, ptep, address)			\ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 157 | 	do {							\ | 
 | 158 | 		tlb->need_flush = 1;				\ | 
| Benjamin Herrenschmidt | 9e1b32c | 2009-07-22 15:44:28 +1000 | [diff] [blame] | 159 | 		__pte_free_tlb(tlb, ptep, address);		\ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 160 | 	} while (0) | 
 | 161 |  | 
 | 162 | #ifndef __ARCH_HAS_4LEVEL_HACK | 
| Benjamin Herrenschmidt | 9e1b32c | 2009-07-22 15:44:28 +1000 | [diff] [blame] | 163 | #define pud_free_tlb(tlb, pudp, address)			\ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 164 | 	do {							\ | 
 | 165 | 		tlb->need_flush = 1;				\ | 
| Benjamin Herrenschmidt | 9e1b32c | 2009-07-22 15:44:28 +1000 | [diff] [blame] | 166 | 		__pud_free_tlb(tlb, pudp, address);		\ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 167 | 	} while (0) | 
 | 168 | #endif | 
 | 169 |  | 
| Benjamin Herrenschmidt | 9e1b32c | 2009-07-22 15:44:28 +1000 | [diff] [blame] | 170 | #define pmd_free_tlb(tlb, pmdp, address)			\ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 171 | 	do {							\ | 
 | 172 | 		tlb->need_flush = 1;				\ | 
| Benjamin Herrenschmidt | 9e1b32c | 2009-07-22 15:44:28 +1000 | [diff] [blame] | 173 | 		__pmd_free_tlb(tlb, pmdp, address);		\ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 174 | 	} while (0) | 
 | 175 |  | 
 | 176 | #define tlb_migrate_finish(mm) do {} while (0) | 
 | 177 |  | 
 | 178 | #endif /* _ASM_GENERIC__TLB_H */ |