| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _ALPHA_TLBFLUSH_H | 
|  | 2 | #define _ALPHA_TLBFLUSH_H | 
|  | 3 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | #include <linux/mm.h> | 
|  | 5 | #include <asm/compiler.h> | 
|  | 6 |  | 
|  | 7 | #ifndef __EXTERN_INLINE | 
|  | 8 | #define __EXTERN_INLINE extern inline | 
|  | 9 | #define __MMU_EXTERN_INLINE | 
|  | 10 | #endif | 
|  | 11 |  | 
|  | 12 | extern void __load_new_mm_context(struct mm_struct *); | 
|  | 13 |  | 
|  | 14 |  | 
|  | 15 | /* Use a few helper functions to hide the ugly broken ASN | 
|  | 16 | numbers on early Alphas (ev4 and ev45).  */ | 
|  | 17 |  | 
|  | 18 | __EXTERN_INLINE void | 
|  | 19 | ev4_flush_tlb_current(struct mm_struct *mm) | 
|  | 20 | { | 
|  | 21 | __load_new_mm_context(mm); | 
|  | 22 | tbiap(); | 
|  | 23 | } | 
|  | 24 |  | 
|  | 25 | __EXTERN_INLINE void | 
|  | 26 | ev5_flush_tlb_current(struct mm_struct *mm) | 
|  | 27 | { | 
|  | 28 | __load_new_mm_context(mm); | 
|  | 29 | } | 
|  | 30 |  | 
|  | 31 | /* Flush just one page in the current TLB set.  We need to be very | 
|  | 32 | careful about the icache here, there is no way to invalidate a | 
|  | 33 | specific icache page.  */ | 
|  | 34 |  | 
|  | 35 | __EXTERN_INLINE void | 
|  | 36 | ev4_flush_tlb_current_page(struct mm_struct * mm, | 
|  | 37 | struct vm_area_struct *vma, | 
|  | 38 | unsigned long addr) | 
|  | 39 | { | 
|  | 40 | int tbi_flag = 2; | 
|  | 41 | if (vma->vm_flags & VM_EXEC) { | 
|  | 42 | __load_new_mm_context(mm); | 
|  | 43 | tbi_flag = 3; | 
|  | 44 | } | 
|  | 45 | tbi(tbi_flag, addr); | 
|  | 46 | } | 
|  | 47 |  | 
|  | 48 | __EXTERN_INLINE void | 
|  | 49 | ev5_flush_tlb_current_page(struct mm_struct * mm, | 
|  | 50 | struct vm_area_struct *vma, | 
|  | 51 | unsigned long addr) | 
|  | 52 | { | 
|  | 53 | if (vma->vm_flags & VM_EXEC) | 
|  | 54 | __load_new_mm_context(mm); | 
|  | 55 | else | 
|  | 56 | tbi(2, addr); | 
|  | 57 | } | 
|  | 58 |  | 
|  | 59 |  | 
|  | 60 | #ifdef CONFIG_ALPHA_GENERIC | 
|  | 61 | # define flush_tlb_current		alpha_mv.mv_flush_tlb_current | 
|  | 62 | # define flush_tlb_current_page		alpha_mv.mv_flush_tlb_current_page | 
|  | 63 | #else | 
|  | 64 | # ifdef CONFIG_ALPHA_EV4 | 
|  | 65 | #  define flush_tlb_current		ev4_flush_tlb_current | 
|  | 66 | #  define flush_tlb_current_page	ev4_flush_tlb_current_page | 
|  | 67 | # else | 
|  | 68 | #  define flush_tlb_current		ev5_flush_tlb_current | 
|  | 69 | #  define flush_tlb_current_page	ev5_flush_tlb_current_page | 
|  | 70 | # endif | 
|  | 71 | #endif | 
|  | 72 |  | 
|  | 73 | #ifdef __MMU_EXTERN_INLINE | 
|  | 74 | #undef __EXTERN_INLINE | 
|  | 75 | #undef __MMU_EXTERN_INLINE | 
|  | 76 | #endif | 
|  | 77 |  | 
|  | 78 | /* Flush current user mapping.  */ | 
|  | 79 | static inline void | 
|  | 80 | flush_tlb(void) | 
|  | 81 | { | 
|  | 82 | flush_tlb_current(current->active_mm); | 
|  | 83 | } | 
|  | 84 |  | 
|  | 85 | /* Flush someone else's user mapping.  */ | 
|  | 86 | static inline void | 
|  | 87 | flush_tlb_other(struct mm_struct *mm) | 
|  | 88 | { | 
|  | 89 | unsigned long *mmc = &mm->context[smp_processor_id()]; | 
|  | 90 | /* Check it's not zero first to avoid cacheline ping pong | 
|  | 91 | when possible.  */ | 
|  | 92 | if (*mmc) *mmc = 0; | 
|  | 93 | } | 
|  | 94 |  | 
|  | 95 | /* Flush a specified range of user mapping page tables from TLB. | 
|  | 96 | Although Alpha uses VPTE caches, this can be a nop, as Alpha does | 
|  | 97 | not have finegrained tlb flushing, so it will flush VPTE stuff | 
|  | 98 | during next flush_tlb_range.  */ | 
|  | 99 |  | 
|  | 100 | static inline void | 
|  | 101 | flush_tlb_pgtables(struct mm_struct *mm, unsigned long start, | 
|  | 102 | unsigned long end) | 
|  | 103 | { | 
|  | 104 | } | 
|  | 105 |  | 
|  | 106 | #ifndef CONFIG_SMP | 
|  | 107 | /* Flush everything (kernel mapping may also have changed | 
|  | 108 | due to vmalloc/vfree).  */ | 
|  | 109 | static inline void flush_tlb_all(void) | 
|  | 110 | { | 
|  | 111 | tbia(); | 
|  | 112 | } | 
|  | 113 |  | 
|  | 114 | /* Flush a specified user mapping.  */ | 
|  | 115 | static inline void | 
|  | 116 | flush_tlb_mm(struct mm_struct *mm) | 
|  | 117 | { | 
|  | 118 | if (mm == current->active_mm) | 
|  | 119 | flush_tlb_current(mm); | 
|  | 120 | else | 
|  | 121 | flush_tlb_other(mm); | 
|  | 122 | } | 
|  | 123 |  | 
|  | 124 | /* Page-granular tlb flush.  */ | 
|  | 125 | static inline void | 
|  | 126 | flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) | 
|  | 127 | { | 
|  | 128 | struct mm_struct *mm = vma->vm_mm; | 
|  | 129 |  | 
|  | 130 | if (mm == current->active_mm) | 
|  | 131 | flush_tlb_current_page(mm, vma, addr); | 
|  | 132 | else | 
|  | 133 | flush_tlb_other(mm); | 
|  | 134 | } | 
|  | 135 |  | 
|  | 136 | /* Flush a specified range of user mapping.  On the Alpha we flush | 
|  | 137 | the whole user tlb.  */ | 
|  | 138 | static inline void | 
|  | 139 | flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | 
|  | 140 | unsigned long end) | 
|  | 141 | { | 
|  | 142 | flush_tlb_mm(vma->vm_mm); | 
|  | 143 | } | 
|  | 144 |  | 
|  | 145 | #else /* CONFIG_SMP */ | 
|  | 146 |  | 
|  | 147 | extern void flush_tlb_all(void); | 
|  | 148 | extern void flush_tlb_mm(struct mm_struct *); | 
|  | 149 | extern void flush_tlb_page(struct vm_area_struct *, unsigned long); | 
|  | 150 | extern void flush_tlb_range(struct vm_area_struct *, unsigned long, | 
|  | 151 | unsigned long); | 
|  | 152 |  | 
|  | 153 | #endif /* CONFIG_SMP */ | 
|  | 154 |  | 
|  | 155 | #define flush_tlb_kernel_range(start, end) flush_tlb_all() | 
|  | 156 |  | 
|  | 157 | #endif /* _ALPHA_TLBFLUSH_H */ |