| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef __ASM_SH_TLB_H | 
 | 2 | #define __ASM_SH_TLB_H | 
 | 3 |  | 
| Paul Mundt | 959f7d5 | 2007-11-10 20:35:53 +0900 | [diff] [blame] | 4 | #ifdef CONFIG_SUPERH64 | 
 | 5 | # include "tlb_64.h" | 
 | 6 | #endif | 
 | 7 |  | 
 | 8 | #ifndef __ASSEMBLY__ | 
| Paul Mundt | c203518 | 2009-03-17 21:19:49 +0900 | [diff] [blame] | 9 | #include <linux/pagemap.h> | 
| Paul Mundt | 959f7d5 | 2007-11-10 20:35:53 +0900 | [diff] [blame] | 10 |  | 
| Paul Mundt | c203518 | 2009-03-17 21:19:49 +0900 | [diff] [blame] | 11 | #ifdef CONFIG_MMU | 
| Nobuhiro Iwamatsu | 194cd8d | 2011-05-31 13:27:41 +0900 | [diff] [blame] | 12 | #include <linux/swap.h> | 
| Paul Mundt | c203518 | 2009-03-17 21:19:49 +0900 | [diff] [blame] | 13 | #include <asm/pgalloc.h> | 
 | 14 | #include <asm/tlbflush.h> | 
| Matt Fleming | 24ef7fc | 2009-11-19 21:11:05 +0000 | [diff] [blame] | 15 | #include <asm/mmu_context.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 |  | 
 | 17 | /* | 
| Paul Mundt | c203518 | 2009-03-17 21:19:49 +0900 | [diff] [blame] | 18 |  * TLB handling.  This allows us to remove pages from the page | 
 | 19 |  * tables, and efficiently handle the TLB issues. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 |  */ | 
| Paul Mundt | c203518 | 2009-03-17 21:19:49 +0900 | [diff] [blame] | 21 | struct mmu_gather { | 
 | 22 | 	struct mm_struct	*mm; | 
 | 23 | 	unsigned int		fullmm; | 
 | 24 | 	unsigned long		start, end; | 
 | 25 | }; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 |  | 
| Paul Mundt | c203518 | 2009-03-17 21:19:49 +0900 | [diff] [blame] | 27 | static inline void init_tlb_gather(struct mmu_gather *tlb) | 
 | 28 | { | 
 | 29 | 	tlb->start = TASK_SIZE; | 
 | 30 | 	tlb->end = 0; | 
 | 31 |  | 
 | 32 | 	if (tlb->fullmm) { | 
 | 33 | 		tlb->start = 0; | 
 | 34 | 		tlb->end = TASK_SIZE; | 
 | 35 | 	} | 
 | 36 | } | 
 | 37 |  | 
| Peter Zijlstra | 1e56a56 | 2011-05-24 17:11:54 -0700 | [diff] [blame] | 38 | static inline void | 
 | 39 | tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush) | 
| Paul Mundt | c203518 | 2009-03-17 21:19:49 +0900 | [diff] [blame] | 40 | { | 
| Paul Mundt | c203518 | 2009-03-17 21:19:49 +0900 | [diff] [blame] | 41 | 	tlb->mm = mm; | 
 | 42 | 	tlb->fullmm = full_mm_flush; | 
 | 43 |  | 
 | 44 | 	init_tlb_gather(tlb); | 
| Paul Mundt | c203518 | 2009-03-17 21:19:49 +0900 | [diff] [blame] | 45 | } | 
 | 46 |  | 
 | 47 | static inline void | 
 | 48 | tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) | 
 | 49 | { | 
 | 50 | 	if (tlb->fullmm) | 
 | 51 | 		flush_tlb_mm(tlb->mm); | 
 | 52 |  | 
 | 53 | 	/* keep the page table cache within bounds */ | 
 | 54 | 	check_pgt_cache(); | 
| Paul Mundt | c203518 | 2009-03-17 21:19:49 +0900 | [diff] [blame] | 55 | } | 
 | 56 |  | 
 | 57 | static inline void | 
 | 58 | tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long address) | 
 | 59 | { | 
 | 60 | 	if (tlb->start > address) | 
 | 61 | 		tlb->start = address; | 
 | 62 | 	if (tlb->end < address + PAGE_SIZE) | 
 | 63 | 		tlb->end = address + PAGE_SIZE; | 
 | 64 | } | 
 | 65 |  | 
 | 66 | /* | 
 | 67 |  * In the case of tlb vma handling, we can optimise these away in the | 
 | 68 |  * case where we're doing a full MM flush.  When we're doing a munmap, | 
 | 69 |  * the vmas are adjusted to only cover the region to be torn down. | 
 | 70 |  */ | 
 | 71 | static inline void | 
 | 72 | tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) | 
 | 73 | { | 
 | 74 | 	if (!tlb->fullmm) | 
 | 75 | 		flush_cache_range(vma, vma->vm_start, vma->vm_end); | 
 | 76 | } | 
 | 77 |  | 
 | 78 | static inline void | 
 | 79 | tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) | 
 | 80 | { | 
 | 81 | 	if (!tlb->fullmm && tlb->end) { | 
 | 82 | 		flush_tlb_range(vma, tlb->start, tlb->end); | 
 | 83 | 		init_tlb_gather(tlb); | 
 | 84 | 	} | 
 | 85 | } | 
 | 86 |  | 
| Peter Zijlstra | 1e56a56 | 2011-05-24 17:11:54 -0700 | [diff] [blame] | 87 | static inline void tlb_flush_mmu(struct mmu_gather *tlb) | 
 | 88 | { | 
 | 89 | } | 
 | 90 |  | 
 | 91 | static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page) | 
 | 92 | { | 
 | 93 | 	free_page_and_swap_cache(page); | 
 | 94 | 	return 1; /* avoid calling tlb_flush_mmu */ | 
 | 95 | } | 
 | 96 |  | 
 | 97 | static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) | 
 | 98 | { | 
 | 99 | 	__tlb_remove_page(tlb, page); | 
 | 100 | } | 
 | 101 |  | 
| Benjamin Herrenschmidt | 9e1b32c | 2009-07-22 15:44:28 +1000 | [diff] [blame] | 102 | #define pte_free_tlb(tlb, ptep, addr)	pte_free((tlb)->mm, ptep) | 
 | 103 | #define pmd_free_tlb(tlb, pmdp, addr)	pmd_free((tlb)->mm, pmdp) | 
 | 104 | #define pud_free_tlb(tlb, pudp, addr)	pud_free((tlb)->mm, pudp) | 
| Paul Mundt | c203518 | 2009-03-17 21:19:49 +0900 | [diff] [blame] | 105 |  | 
 | 106 | #define tlb_migrate_finish(mm)		do { } while (0) | 
 | 107 |  | 
| Paul Mundt | bb29c67 | 2010-01-19 15:20:35 +0900 | [diff] [blame] | 108 | #if defined(CONFIG_CPU_SH4) || defined(CONFIG_SUPERH64) | 
| Matt Fleming | 8eda551 | 2009-11-17 21:05:31 +0000 | [diff] [blame] | 109 | extern void tlb_wire_entry(struct vm_area_struct *, unsigned long, pte_t); | 
 | 110 | extern void tlb_unwire_entry(void); | 
 | 111 | #else | 
 | 112 | static inline void tlb_wire_entry(struct vm_area_struct *vma , | 
 | 113 | 				  unsigned long addr, pte_t pte) | 
 | 114 | { | 
 | 115 | 	BUG(); | 
 | 116 | } | 
 | 117 |  | 
 | 118 | static inline void tlb_unwire_entry(void) | 
 | 119 | { | 
 | 120 | 	BUG(); | 
 | 121 | } | 
| Paul Mundt | bb29c67 | 2010-01-19 15:20:35 +0900 | [diff] [blame] | 122 | #endif | 
| Matt Fleming | 8eda551 | 2009-11-17 21:05:31 +0000 | [diff] [blame] | 123 |  | 
| Paul Mundt | c203518 | 2009-03-17 21:19:49 +0900 | [diff] [blame] | 124 | #else /* CONFIG_MMU */ | 
 | 125 |  | 
 | 126 | #define tlb_start_vma(tlb, vma)				do { } while (0) | 
 | 127 | #define tlb_end_vma(tlb, vma)				do { } while (0) | 
 | 128 | #define __tlb_remove_tlb_entry(tlb, pte, address)	do { } while (0) | 
 | 129 | #define tlb_flush(tlb)					do { } while (0) | 
 | 130 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 131 | #include <asm-generic/tlb.h> | 
| Paul Mundt | 959f7d5 | 2007-11-10 20:35:53 +0900 | [diff] [blame] | 132 |  | 
| Paul Mundt | c203518 | 2009-03-17 21:19:49 +0900 | [diff] [blame] | 133 | #endif /* CONFIG_MMU */ | 
| Paul Mundt | 959f7d5 | 2007-11-10 20:35:53 +0900 | [diff] [blame] | 134 | #endif /* __ASSEMBLY__ */ | 
 | 135 | #endif /* __ASM_SH_TLB_H */ |