| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef __UM_TLB_H | 
|  | 2 | #define __UM_TLB_H | 
|  | 3 |  | 
| WANG Cong | 1f4deba | 2008-04-15 14:34:38 -0700 | [diff] [blame] | 4 | #include <linux/pagemap.h> | 
| Jeff Dike | 0b4e273 | 2008-02-04 22:31:07 -0800 | [diff] [blame] | 5 | #include <linux/swap.h> | 
|  | 6 | #include <asm/percpu.h> | 
|  | 7 | #include <asm/pgalloc.h> | 
|  | 8 | #include <asm/tlbflush.h> | 
|  | 9 |  | 
|  | 10 | #define tlb_start_vma(tlb, vma) do { } while (0) | 
|  | 11 | #define tlb_end_vma(tlb, vma) do { } while (0) | 
|  | 12 | #define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) | 
|  | 13 |  | 
|  | 14 | /* struct mmu_gather is an opaque type used by the mm code for passing around | 
|  | 15 | * any data needed by arch specific code for tlb_remove_page. | 
|  | 16 | */ | 
|  | 17 | struct mmu_gather { | 
|  | 18 | struct mm_struct	*mm; | 
|  | 19 | unsigned int		need_flush; /* Really unmapped some ptes? */ | 
|  | 20 | unsigned long		start; | 
|  | 21 | unsigned long		end; | 
|  | 22 | unsigned int		fullmm; /* non-zero means full mm flush */ | 
|  | 23 | }; | 
|  | 24 |  | 
|  | 25 | /* Users of the generic TLB shootdown code must declare this storage space. */ | 
|  | 26 | DECLARE_PER_CPU(struct mmu_gather, mmu_gathers); | 
|  | 27 |  | 
|  | 28 | static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, | 
|  | 29 | unsigned long address) | 
|  | 30 | { | 
|  | 31 | if (tlb->start > address) | 
|  | 32 | tlb->start = address; | 
|  | 33 | if (tlb->end < address + PAGE_SIZE) | 
|  | 34 | tlb->end = address + PAGE_SIZE; | 
|  | 35 | } | 
|  | 36 |  | 
|  | 37 | static inline void init_tlb_gather(struct mmu_gather *tlb) | 
|  | 38 | { | 
|  | 39 | tlb->need_flush = 0; | 
|  | 40 |  | 
|  | 41 | tlb->start = TASK_SIZE; | 
|  | 42 | tlb->end = 0; | 
|  | 43 |  | 
|  | 44 | if (tlb->fullmm) { | 
|  | 45 | tlb->start = 0; | 
|  | 46 | tlb->end = TASK_SIZE; | 
|  | 47 | } | 
|  | 48 | } | 
|  | 49 |  | 
|  | 50 | /* tlb_gather_mmu | 
|  | 51 | *	Return a pointer to an initialized struct mmu_gather. | 
|  | 52 | */ | 
|  | 53 | static inline struct mmu_gather * | 
|  | 54 | tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush) | 
|  | 55 | { | 
|  | 56 | struct mmu_gather *tlb = &get_cpu_var(mmu_gathers); | 
|  | 57 |  | 
|  | 58 | tlb->mm = mm; | 
|  | 59 | tlb->fullmm = full_mm_flush; | 
|  | 60 |  | 
|  | 61 | init_tlb_gather(tlb); | 
|  | 62 |  | 
|  | 63 | return tlb; | 
|  | 64 | } | 
|  | 65 |  | 
|  | 66 | extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, | 
|  | 67 | unsigned long end); | 
|  | 68 |  | 
|  | 69 | static inline void | 
|  | 70 | tlb_flush_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) | 
|  | 71 | { | 
|  | 72 | if (!tlb->need_flush) | 
|  | 73 | return; | 
|  | 74 |  | 
|  | 75 | flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end); | 
|  | 76 | init_tlb_gather(tlb); | 
|  | 77 | } | 
|  | 78 |  | 
|  | 79 | /* tlb_finish_mmu | 
|  | 80 | *	Called at the end of the shootdown operation to free up any resources | 
|  | 81 | *	that were required. | 
|  | 82 | */ | 
|  | 83 | static inline void | 
|  | 84 | tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) | 
|  | 85 | { | 
|  | 86 | tlb_flush_mmu(tlb, start, end); | 
|  | 87 |  | 
|  | 88 | /* keep the page table cache within bounds */ | 
|  | 89 | check_pgt_cache(); | 
|  | 90 |  | 
|  | 91 | put_cpu_var(mmu_gathers); | 
|  | 92 | } | 
|  | 93 |  | 
|  | 94 | /* tlb_remove_page | 
|  | 95 | *	Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)), | 
|  | 96 | *	while handling the additional races in SMP caused by other CPUs | 
|  | 97 | *	caching valid mappings in their TLBs. | 
|  | 98 | */ | 
|  | 99 | static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) | 
|  | 100 | { | 
|  | 101 | tlb->need_flush = 1; | 
|  | 102 | free_page_and_swap_cache(page); | 
|  | 103 | return; | 
|  | 104 | } | 
|  | 105 |  | 
|  | 106 | /** | 
|  | 107 | * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation. | 
|  | 108 | * | 
|  | 109 | * Record the fact that pte's were really umapped in ->need_flush, so we can | 
|  | 110 | * later optimise away the tlb invalidate.   This helps when userspace is | 
|  | 111 | * unmapping already-unmapped pages, which happens quite a lot. | 
|  | 112 | */ | 
|  | 113 | #define tlb_remove_tlb_entry(tlb, ptep, address)		\ | 
|  | 114 | do {							\ | 
|  | 115 | tlb->need_flush = 1;				\ | 
|  | 116 | __tlb_remove_tlb_entry(tlb, ptep, address);	\ | 
|  | 117 | } while (0) | 
|  | 118 |  | 
| Benjamin Herrenschmidt | 9e1b32c | 2009-07-22 15:44:28 +1000 | [diff] [blame] | 119 | #define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr) | 
| Jeff Dike | 0b4e273 | 2008-02-04 22:31:07 -0800 | [diff] [blame] | 120 |  | 
| Benjamin Herrenschmidt | 9e1b32c | 2009-07-22 15:44:28 +1000 | [diff] [blame] | 121 | #define pud_free_tlb(tlb, pudp, addr) __pud_free_tlb(tlb, pudp, addr) | 
| Jeff Dike | 0b4e273 | 2008-02-04 22:31:07 -0800 | [diff] [blame] | 122 |  | 
| Benjamin Herrenschmidt | 9e1b32c | 2009-07-22 15:44:28 +1000 | [diff] [blame] | 123 | #define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr) | 
| Jeff Dike | 0b4e273 | 2008-02-04 22:31:07 -0800 | [diff] [blame] | 124 |  | 
|  | 125 | #define tlb_migrate_finish(mm) do {} while (0) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 126 |  | 
|  | 127 | #endif |