| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef __UM_TLB_H | 
|  | 2 | #define __UM_TLB_H | 
|  | 3 |  | 
| WANG Cong | 1f4deba | 2008-04-15 14:34:38 -0700 | [diff] [blame] | 4 | #include <linux/pagemap.h> | 
| Jeff Dike | 0b4e273 | 2008-02-04 22:31:07 -0800 | [diff] [blame] | 5 | #include <linux/swap.h> | 
|  | 6 | #include <asm/percpu.h> | 
|  | 7 | #include <asm/pgalloc.h> | 
|  | 8 | #include <asm/tlbflush.h> | 
|  | 9 |  | 
|  | 10 | #define tlb_start_vma(tlb, vma) do { } while (0) | 
|  | 11 | #define tlb_end_vma(tlb, vma) do { } while (0) | 
|  | 12 | #define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) | 
|  | 13 |  | 
|  | 14 | /* struct mmu_gather is an opaque type used by the mm code for passing around | 
|  | 15 | * any data needed by arch specific code for tlb_remove_page. | 
|  | 16 | */ | 
|  | 17 | struct mmu_gather { | 
|  | 18 | struct mm_struct	*mm; | 
|  | 19 | unsigned int		need_flush; /* Really unmapped some ptes? */ | 
|  | 20 | unsigned long		start; | 
|  | 21 | unsigned long		end; | 
|  | 22 | unsigned int		fullmm; /* non-zero means full mm flush */ | 
|  | 23 | }; | 
|  | 24 |  | 
| Jeff Dike | 0b4e273 | 2008-02-04 22:31:07 -0800 | [diff] [blame] | 25 | static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, | 
|  | 26 | unsigned long address) | 
|  | 27 | { | 
|  | 28 | if (tlb->start > address) | 
|  | 29 | tlb->start = address; | 
|  | 30 | if (tlb->end < address + PAGE_SIZE) | 
|  | 31 | tlb->end = address + PAGE_SIZE; | 
|  | 32 | } | 
|  | 33 |  | 
|  | 34 | static inline void init_tlb_gather(struct mmu_gather *tlb) | 
|  | 35 | { | 
|  | 36 | tlb->need_flush = 0; | 
|  | 37 |  | 
|  | 38 | tlb->start = TASK_SIZE; | 
|  | 39 | tlb->end = 0; | 
|  | 40 |  | 
|  | 41 | if (tlb->fullmm) { | 
|  | 42 | tlb->start = 0; | 
|  | 43 | tlb->end = TASK_SIZE; | 
|  | 44 | } | 
|  | 45 | } | 
|  | 46 |  | 
| Peter Zijlstra | ff075d6 | 2011-05-24 17:11:57 -0700 | [diff] [blame] | 47 | static inline void | 
|  | 48 | tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush) | 
| Jeff Dike | 0b4e273 | 2008-02-04 22:31:07 -0800 | [diff] [blame] | 49 | { | 
| Jeff Dike | 0b4e273 | 2008-02-04 22:31:07 -0800 | [diff] [blame] | 50 | tlb->mm = mm; | 
|  | 51 | tlb->fullmm = full_mm_flush; | 
|  | 52 |  | 
|  | 53 | init_tlb_gather(tlb); | 
| Jeff Dike | 0b4e273 | 2008-02-04 22:31:07 -0800 | [diff] [blame] | 54 | } | 
|  | 55 |  | 
|  | 56 | extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, | 
|  | 57 | unsigned long end); | 
|  | 58 |  | 
|  | 59 | static inline void | 
| Peter Zijlstra | ff075d6 | 2011-05-24 17:11:57 -0700 | [diff] [blame] | 60 | tlb_flush_mmu(struct mmu_gather *tlb) | 
| Jeff Dike | 0b4e273 | 2008-02-04 22:31:07 -0800 | [diff] [blame] | 61 | { | 
|  | 62 | if (!tlb->need_flush) | 
|  | 63 | return; | 
|  | 64 |  | 
|  | 65 | flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end); | 
|  | 66 | init_tlb_gather(tlb); | 
|  | 67 | } | 
|  | 68 |  | 
|  | 69 | /* tlb_finish_mmu | 
|  | 70 | *	Called at the end of the shootdown operation to free up any resources | 
|  | 71 | *	that were required. | 
|  | 72 | */ | 
|  | 73 | static inline void | 
|  | 74 | tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) | 
|  | 75 | { | 
| Peter Zijlstra | ff075d6 | 2011-05-24 17:11:57 -0700 | [diff] [blame] | 76 | tlb_flush_mmu(tlb); | 
| Jeff Dike | 0b4e273 | 2008-02-04 22:31:07 -0800 | [diff] [blame] | 77 |  | 
|  | 78 | /* keep the page table cache within bounds */ | 
|  | 79 | check_pgt_cache(); | 
| Jeff Dike | 0b4e273 | 2008-02-04 22:31:07 -0800 | [diff] [blame] | 80 | } | 
|  | 81 |  | 
|  | 82 | /* tlb_remove_page | 
|  | 83 | *	Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)), | 
|  | 84 | *	while handling the additional races in SMP caused by other CPUs | 
|  | 85 | *	caching valid mappings in their TLBs. | 
|  | 86 | */ | 
| Peter Zijlstra | ff075d6 | 2011-05-24 17:11:57 -0700 | [diff] [blame] | 87 | static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page) | 
| Jeff Dike | 0b4e273 | 2008-02-04 22:31:07 -0800 | [diff] [blame] | 88 | { | 
|  | 89 | tlb->need_flush = 1; | 
|  | 90 | free_page_and_swap_cache(page); | 
| Peter Zijlstra | ff075d6 | 2011-05-24 17:11:57 -0700 | [diff] [blame] | 91 | return 1; /* avoid calling tlb_flush_mmu */ | 
|  | 92 | } | 
|  | 93 |  | 
|  | 94 | static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) | 
|  | 95 | { | 
|  | 96 | __tlb_remove_page(tlb, page); | 
| Jeff Dike | 0b4e273 | 2008-02-04 22:31:07 -0800 | [diff] [blame] | 97 | } | 
|  | 98 |  | 
|  | 99 | /** | 
|  | 100 | * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation. | 
|  | 101 | * | 
|  | 102 | * Record the fact that pte's were really umapped in ->need_flush, so we can | 
|  | 103 | * later optimise away the tlb invalidate.   This helps when userspace is | 
|  | 104 | * unmapping already-unmapped pages, which happens quite a lot. | 
|  | 105 | */ | 
|  | 106 | #define tlb_remove_tlb_entry(tlb, ptep, address)		\ | 
|  | 107 | do {							\ | 
|  | 108 | tlb->need_flush = 1;				\ | 
|  | 109 | __tlb_remove_tlb_entry(tlb, ptep, address);	\ | 
|  | 110 | } while (0) | 
|  | 111 |  | 
| Benjamin Herrenschmidt | 9e1b32c | 2009-07-22 15:44:28 +1000 | [diff] [blame] | 112 | #define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr) | 
| Jeff Dike | 0b4e273 | 2008-02-04 22:31:07 -0800 | [diff] [blame] | 113 |  | 
| Benjamin Herrenschmidt | 9e1b32c | 2009-07-22 15:44:28 +1000 | [diff] [blame] | 114 | #define pud_free_tlb(tlb, pudp, addr) __pud_free_tlb(tlb, pudp, addr) | 
| Jeff Dike | 0b4e273 | 2008-02-04 22:31:07 -0800 | [diff] [blame] | 115 |  | 
| Benjamin Herrenschmidt | 9e1b32c | 2009-07-22 15:44:28 +1000 | [diff] [blame] | 116 | #define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr) | 
| Jeff Dike | 0b4e273 | 2008-02-04 22:31:07 -0800 | [diff] [blame] | 117 |  | 
|  | 118 | #define tlb_migrate_finish(mm) do {} while (0) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 119 |  | 
|  | 120 | #endif |