| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _ALPHA_TLBFLUSH_H | 
|  | 2 | #define _ALPHA_TLBFLUSH_H | 
|  | 3 |  | 
|  | 4 | #include <linux/config.h> | 
|  | 5 | #include <linux/mm.h> | 
|  | 6 | #include <asm/compiler.h> | 
|  | 7 |  | 
|  | 8 | #ifndef __EXTERN_INLINE | 
|  | 9 | #define __EXTERN_INLINE extern inline | 
|  | 10 | #define __MMU_EXTERN_INLINE | 
|  | 11 | #endif | 
|  | 12 |  | 
|  | 13 | extern void __load_new_mm_context(struct mm_struct *); | 
|  | 14 |  | 
|  | 15 |  | 
|  | 16 | /* Use a few helper functions to hide the ugly broken ASN | 
|  | 17 | numbers on early Alphas (ev4 and ev45).  */ | 
|  | 18 |  | 
|  | 19 | __EXTERN_INLINE void | 
|  | 20 | ev4_flush_tlb_current(struct mm_struct *mm) | 
|  | 21 | { | 
|  | 22 | __load_new_mm_context(mm); | 
|  | 23 | tbiap(); | 
|  | 24 | } | 
|  | 25 |  | 
|  | 26 | __EXTERN_INLINE void | 
|  | 27 | ev5_flush_tlb_current(struct mm_struct *mm) | 
|  | 28 | { | 
|  | 29 | __load_new_mm_context(mm); | 
|  | 30 | } | 
|  | 31 |  | 
|  | 32 | /* Flush just one page in the current TLB set.  We need to be very | 
|  | 33 | careful about the icache here, there is no way to invalidate a | 
|  | 34 | specific icache page.  */ | 
|  | 35 |  | 
|  | 36 | __EXTERN_INLINE void | 
|  | 37 | ev4_flush_tlb_current_page(struct mm_struct * mm, | 
|  | 38 | struct vm_area_struct *vma, | 
|  | 39 | unsigned long addr) | 
|  | 40 | { | 
|  | 41 | int tbi_flag = 2; | 
|  | 42 | if (vma->vm_flags & VM_EXEC) { | 
|  | 43 | __load_new_mm_context(mm); | 
|  | 44 | tbi_flag = 3; | 
|  | 45 | } | 
|  | 46 | tbi(tbi_flag, addr); | 
|  | 47 | } | 
|  | 48 |  | 
|  | 49 | __EXTERN_INLINE void | 
|  | 50 | ev5_flush_tlb_current_page(struct mm_struct * mm, | 
|  | 51 | struct vm_area_struct *vma, | 
|  | 52 | unsigned long addr) | 
|  | 53 | { | 
|  | 54 | if (vma->vm_flags & VM_EXEC) | 
|  | 55 | __load_new_mm_context(mm); | 
|  | 56 | else | 
|  | 57 | tbi(2, addr); | 
|  | 58 | } | 
|  | 59 |  | 
|  | 60 |  | 
|  | 61 | #ifdef CONFIG_ALPHA_GENERIC | 
|  | 62 | # define flush_tlb_current		alpha_mv.mv_flush_tlb_current | 
|  | 63 | # define flush_tlb_current_page		alpha_mv.mv_flush_tlb_current_page | 
|  | 64 | #else | 
|  | 65 | # ifdef CONFIG_ALPHA_EV4 | 
|  | 66 | #  define flush_tlb_current		ev4_flush_tlb_current | 
|  | 67 | #  define flush_tlb_current_page	ev4_flush_tlb_current_page | 
|  | 68 | # else | 
|  | 69 | #  define flush_tlb_current		ev5_flush_tlb_current | 
|  | 70 | #  define flush_tlb_current_page	ev5_flush_tlb_current_page | 
|  | 71 | # endif | 
|  | 72 | #endif | 
|  | 73 |  | 
|  | 74 | #ifdef __MMU_EXTERN_INLINE | 
|  | 75 | #undef __EXTERN_INLINE | 
|  | 76 | #undef __MMU_EXTERN_INLINE | 
|  | 77 | #endif | 
|  | 78 |  | 
|  | 79 | /* Flush current user mapping.  */ | 
|  | 80 | static inline void | 
|  | 81 | flush_tlb(void) | 
|  | 82 | { | 
|  | 83 | flush_tlb_current(current->active_mm); | 
|  | 84 | } | 
|  | 85 |  | 
|  | 86 | /* Flush someone else's user mapping.  */ | 
|  | 87 | static inline void | 
|  | 88 | flush_tlb_other(struct mm_struct *mm) | 
|  | 89 | { | 
|  | 90 | unsigned long *mmc = &mm->context[smp_processor_id()]; | 
|  | 91 | /* Check it's not zero first to avoid cacheline ping pong | 
|  | 92 | when possible.  */ | 
|  | 93 | if (*mmc) *mmc = 0; | 
|  | 94 | } | 
|  | 95 |  | 
|  | 96 | /* Flush a specified range of user mapping page tables from TLB. | 
|  | 97 | Although Alpha uses VPTE caches, this can be a nop, as Alpha does | 
|  | 98 | not have finegrained tlb flushing, so it will flush VPTE stuff | 
|  | 99 | during next flush_tlb_range.  */ | 
|  | 100 |  | 
|  | 101 | static inline void | 
|  | 102 | flush_tlb_pgtables(struct mm_struct *mm, unsigned long start, | 
|  | 103 | unsigned long end) | 
|  | 104 | { | 
|  | 105 | } | 
|  | 106 |  | 
|  | 107 | #ifndef CONFIG_SMP | 
|  | 108 | /* Flush everything (kernel mapping may also have changed | 
|  | 109 | due to vmalloc/vfree).  */ | 
|  | 110 | static inline void flush_tlb_all(void) | 
|  | 111 | { | 
|  | 112 | tbia(); | 
|  | 113 | } | 
|  | 114 |  | 
|  | 115 | /* Flush a specified user mapping.  */ | 
|  | 116 | static inline void | 
|  | 117 | flush_tlb_mm(struct mm_struct *mm) | 
|  | 118 | { | 
|  | 119 | if (mm == current->active_mm) | 
|  | 120 | flush_tlb_current(mm); | 
|  | 121 | else | 
|  | 122 | flush_tlb_other(mm); | 
|  | 123 | } | 
|  | 124 |  | 
|  | 125 | /* Page-granular tlb flush.  */ | 
|  | 126 | static inline void | 
|  | 127 | flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) | 
|  | 128 | { | 
|  | 129 | struct mm_struct *mm = vma->vm_mm; | 
|  | 130 |  | 
|  | 131 | if (mm == current->active_mm) | 
|  | 132 | flush_tlb_current_page(mm, vma, addr); | 
|  | 133 | else | 
|  | 134 | flush_tlb_other(mm); | 
|  | 135 | } | 
|  | 136 |  | 
|  | 137 | /* Flush a specified range of user mapping.  On the Alpha we flush | 
|  | 138 | the whole user tlb.  */ | 
|  | 139 | static inline void | 
|  | 140 | flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | 
|  | 141 | unsigned long end) | 
|  | 142 | { | 
|  | 143 | flush_tlb_mm(vma->vm_mm); | 
|  | 144 | } | 
|  | 145 |  | 
|  | 146 | #else /* CONFIG_SMP */ | 
|  | 147 |  | 
|  | 148 | extern void flush_tlb_all(void); | 
|  | 149 | extern void flush_tlb_mm(struct mm_struct *); | 
|  | 150 | extern void flush_tlb_page(struct vm_area_struct *, unsigned long); | 
|  | 151 | extern void flush_tlb_range(struct vm_area_struct *, unsigned long, | 
|  | 152 | unsigned long); | 
|  | 153 |  | 
|  | 154 | #endif /* CONFIG_SMP */ | 
|  | 155 |  | 
|  | 156 | #define flush_tlb_kernel_range(start, end) flush_tlb_all() | 
|  | 157 |  | 
|  | 158 | #endif /* _ALPHA_TLBFLUSH_H */ |