| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _ALPHA_TLBFLUSH_H | 
|  | 2 | #define _ALPHA_TLBFLUSH_H | 
|  | 3 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | #include <linux/mm.h> | 
|  | 5 | #include <asm/compiler.h> | 
| Thomas Gleixner | c18d125 | 2008-01-30 23:27:58 +0100 | [diff] [blame] | 6 | #include <asm/pgalloc.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 |  | 
|  | 8 | #ifndef __EXTERN_INLINE | 
|  | 9 | #define __EXTERN_INLINE extern inline | 
|  | 10 | #define __MMU_EXTERN_INLINE | 
|  | 11 | #endif | 
|  | 12 |  | 
|  | 13 | extern void __load_new_mm_context(struct mm_struct *); | 
|  | 14 |  | 
|  | 15 |  | 
|  | 16 | /* Use a few helper functions to hide the ugly broken ASN | 
|  | 17 | numbers on early Alphas (ev4 and ev45).  */ | 
|  | 18 |  | 
|  | 19 | __EXTERN_INLINE void | 
|  | 20 | ev4_flush_tlb_current(struct mm_struct *mm) | 
|  | 21 | { | 
|  | 22 | __load_new_mm_context(mm); | 
|  | 23 | tbiap(); | 
|  | 24 | } | 
|  | 25 |  | 
|  | 26 | __EXTERN_INLINE void | 
|  | 27 | ev5_flush_tlb_current(struct mm_struct *mm) | 
|  | 28 | { | 
|  | 29 | __load_new_mm_context(mm); | 
|  | 30 | } | 
|  | 31 |  | 
|  | 32 | /* Flush just one page in the current TLB set.  We need to be very | 
|  | 33 | careful about the icache here, there is no way to invalidate a | 
|  | 34 | specific icache page.  */ | 
|  | 35 |  | 
|  | 36 | __EXTERN_INLINE void | 
|  | 37 | ev4_flush_tlb_current_page(struct mm_struct * mm, | 
|  | 38 | struct vm_area_struct *vma, | 
|  | 39 | unsigned long addr) | 
|  | 40 | { | 
|  | 41 | int tbi_flag = 2; | 
|  | 42 | if (vma->vm_flags & VM_EXEC) { | 
|  | 43 | __load_new_mm_context(mm); | 
|  | 44 | tbi_flag = 3; | 
|  | 45 | } | 
|  | 46 | tbi(tbi_flag, addr); | 
|  | 47 | } | 
|  | 48 |  | 
|  | 49 | __EXTERN_INLINE void | 
|  | 50 | ev5_flush_tlb_current_page(struct mm_struct * mm, | 
|  | 51 | struct vm_area_struct *vma, | 
|  | 52 | unsigned long addr) | 
|  | 53 | { | 
|  | 54 | if (vma->vm_flags & VM_EXEC) | 
|  | 55 | __load_new_mm_context(mm); | 
|  | 56 | else | 
|  | 57 | tbi(2, addr); | 
|  | 58 | } | 
|  | 59 |  | 
|  | 60 |  | 
|  | 61 | #ifdef CONFIG_ALPHA_GENERIC | 
|  | 62 | # define flush_tlb_current		alpha_mv.mv_flush_tlb_current | 
|  | 63 | # define flush_tlb_current_page		alpha_mv.mv_flush_tlb_current_page | 
|  | 64 | #else | 
|  | 65 | # ifdef CONFIG_ALPHA_EV4 | 
|  | 66 | #  define flush_tlb_current		ev4_flush_tlb_current | 
|  | 67 | #  define flush_tlb_current_page	ev4_flush_tlb_current_page | 
|  | 68 | # else | 
|  | 69 | #  define flush_tlb_current		ev5_flush_tlb_current | 
|  | 70 | #  define flush_tlb_current_page	ev5_flush_tlb_current_page | 
|  | 71 | # endif | 
|  | 72 | #endif | 
|  | 73 |  | 
|  | 74 | #ifdef __MMU_EXTERN_INLINE | 
|  | 75 | #undef __EXTERN_INLINE | 
|  | 76 | #undef __MMU_EXTERN_INLINE | 
|  | 77 | #endif | 
|  | 78 |  | 
|  | 79 | /* Flush current user mapping.  */ | 
|  | 80 | static inline void | 
|  | 81 | flush_tlb(void) | 
|  | 82 | { | 
|  | 83 | flush_tlb_current(current->active_mm); | 
|  | 84 | } | 
|  | 85 |  | 
|  | 86 | /* Flush someone else's user mapping.  */ | 
|  | 87 | static inline void | 
|  | 88 | flush_tlb_other(struct mm_struct *mm) | 
|  | 89 | { | 
|  | 90 | unsigned long *mmc = &mm->context[smp_processor_id()]; | 
|  | 91 | /* Check it's not zero first to avoid cacheline ping pong | 
|  | 92 | when possible.  */ | 
|  | 93 | if (*mmc) *mmc = 0; | 
|  | 94 | } | 
|  | 95 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 96 | #ifndef CONFIG_SMP | 
|  | 97 | /* Flush everything (kernel mapping may also have changed | 
|  | 98 | due to vmalloc/vfree).  */ | 
|  | 99 | static inline void flush_tlb_all(void) | 
|  | 100 | { | 
|  | 101 | tbia(); | 
|  | 102 | } | 
|  | 103 |  | 
|  | 104 | /* Flush a specified user mapping.  */ | 
|  | 105 | static inline void | 
|  | 106 | flush_tlb_mm(struct mm_struct *mm) | 
|  | 107 | { | 
|  | 108 | if (mm == current->active_mm) | 
|  | 109 | flush_tlb_current(mm); | 
|  | 110 | else | 
|  | 111 | flush_tlb_other(mm); | 
|  | 112 | } | 
|  | 113 |  | 
|  | 114 | /* Page-granular tlb flush.  */ | 
|  | 115 | static inline void | 
|  | 116 | flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) | 
|  | 117 | { | 
|  | 118 | struct mm_struct *mm = vma->vm_mm; | 
|  | 119 |  | 
|  | 120 | if (mm == current->active_mm) | 
|  | 121 | flush_tlb_current_page(mm, vma, addr); | 
|  | 122 | else | 
|  | 123 | flush_tlb_other(mm); | 
|  | 124 | } | 
|  | 125 |  | 
|  | 126 | /* Flush a specified range of user mapping.  On the Alpha we flush | 
|  | 127 | the whole user tlb.  */ | 
|  | 128 | static inline void | 
|  | 129 | flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | 
|  | 130 | unsigned long end) | 
|  | 131 | { | 
|  | 132 | flush_tlb_mm(vma->vm_mm); | 
|  | 133 | } | 
|  | 134 |  | 
|  | 135 | #else /* CONFIG_SMP */ | 
|  | 136 |  | 
|  | 137 | extern void flush_tlb_all(void); | 
|  | 138 | extern void flush_tlb_mm(struct mm_struct *); | 
|  | 139 | extern void flush_tlb_page(struct vm_area_struct *, unsigned long); | 
|  | 140 | extern void flush_tlb_range(struct vm_area_struct *, unsigned long, | 
|  | 141 | unsigned long); | 
|  | 142 |  | 
|  | 143 | #endif /* CONFIG_SMP */ | 
|  | 144 |  | 
| Andrew Morton | cbed6c6 | 2008-02-04 22:30:05 -0800 | [diff] [blame] | 145 | static inline void flush_tlb_kernel_range(unsigned long start, | 
|  | 146 | unsigned long end) | 
|  | 147 | { | 
|  | 148 | flush_tlb_all(); | 
|  | 149 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 150 |  | 
|  | 151 | #endif /* _ALPHA_TLBFLUSH_H */ |