| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef __ASM_SH64_MMU_CONTEXT_H | 
|  | 2 | #define __ASM_SH64_MMU_CONTEXT_H | 
|  | 3 |  | 
|  | 4 | /* | 
|  | 5 | * This file is subject to the terms and conditions of the GNU General Public | 
|  | 6 | * License.  See the file "COPYING" in the main directory of this archive | 
|  | 7 | * for more details. | 
|  | 8 | * | 
|  | 9 | * include/asm-sh64/mmu_context.h | 
|  | 10 | * | 
|  | 11 | * Copyright (C) 2000, 2001  Paolo Alberelli | 
|  | 12 | * Copyright (C) 2003  Paul Mundt | 
|  | 13 | * | 
|  | 14 | * ASID handling idea taken from MIPS implementation. | 
|  | 15 | * | 
|  | 16 | */ | 
|  | 17 |  | 
|  | 18 | #ifndef __ASSEMBLY__ | 
|  | 19 |  | 
|  | 20 | /* | 
|  | 21 | * Cache of MMU context last used. | 
|  | 22 | * | 
|  | 23 | * The MMU "context" consists of two things: | 
|  | 24 | *   (a) TLB cache version (or cycle, top 24 bits of mmu_context_cache) | 
|  | 25 | *   (b) ASID (Address Space IDentifier, bottom 8 bits of mmu_context_cache) | 
|  | 26 | */ | 
|  | 27 | extern unsigned long mmu_context_cache; | 
|  | 28 |  | 
|  | 29 | #include <linux/config.h> | 
|  | 30 | #include <asm/page.h> | 
|  | 31 |  | 
|  | 32 |  | 
|  | 33 | /* Current mm's pgd */ | 
|  | 34 | extern pgd_t *mmu_pdtp_cache; | 
|  | 35 |  | 
|  | 36 | #define SR_ASID_MASK		0xffffffffff00ffffULL | 
|  | 37 | #define SR_ASID_SHIFT		16 | 
|  | 38 |  | 
|  | 39 | #define MMU_CONTEXT_ASID_MASK		0x000000ff | 
|  | 40 | #define MMU_CONTEXT_VERSION_MASK	0xffffff00 | 
|  | 41 | #define MMU_CONTEXT_FIRST_VERSION	0x00000100 | 
|  | 42 | #define NO_CONTEXT			0 | 
|  | 43 |  | 
|  | 44 | /* ASID is 8-bit value, so it can't be 0x100 */ | 
|  | 45 | #define MMU_NO_ASID			0x100 | 
|  | 46 |  | 
|  | 47 |  | 
|  | 48 | /* | 
|  | 49 | * Virtual Page Number mask | 
|  | 50 | */ | 
|  | 51 | #define MMU_VPN_MASK	0xfffff000 | 
|  | 52 |  | 
|  | 53 | extern __inline__ void | 
|  | 54 | get_new_mmu_context(struct mm_struct *mm) | 
|  | 55 | { | 
|  | 56 | extern void flush_tlb_all(void); | 
|  | 57 | extern void flush_cache_all(void); | 
|  | 58 |  | 
|  | 59 | unsigned long mc = ++mmu_context_cache; | 
|  | 60 |  | 
|  | 61 | if (!(mc & MMU_CONTEXT_ASID_MASK)) { | 
|  | 62 | /* We exhaust ASID of this version. | 
|  | 63 | Flush all TLB and start new cycle. */ | 
|  | 64 | flush_tlb_all(); | 
|  | 65 | /* We have to flush all caches as ASIDs are | 
|  | 66 | used in cache */ | 
|  | 67 | flush_cache_all(); | 
|  | 68 | /* Fix version if needed. | 
|  | 69 | Note that we avoid version #0/asid #0 to distingush NO_CONTEXT. */ | 
|  | 70 | if (!mc) | 
|  | 71 | mmu_context_cache = mc = MMU_CONTEXT_FIRST_VERSION; | 
|  | 72 | } | 
|  | 73 | mm->context = mc; | 
|  | 74 | } | 
|  | 75 |  | 
|  | 76 | /* | 
|  | 77 | * Get MMU context if needed. | 
|  | 78 | */ | 
|  | 79 | static __inline__ void | 
|  | 80 | get_mmu_context(struct mm_struct *mm) | 
|  | 81 | { | 
|  | 82 | if (mm) { | 
|  | 83 | unsigned long mc = mmu_context_cache; | 
|  | 84 | /* Check if we have old version of context. | 
|  | 85 | If it's old, we need to get new context with new version. */ | 
|  | 86 | if ((mm->context ^ mc) & MMU_CONTEXT_VERSION_MASK) | 
|  | 87 | get_new_mmu_context(mm); | 
|  | 88 | } | 
|  | 89 | } | 
|  | 90 |  | 
|  | 91 | /* | 
|  | 92 | * Initialize the context related info for a new mm_struct | 
|  | 93 | * instance. | 
|  | 94 | */ | 
|  | 95 | static inline int init_new_context(struct task_struct *tsk, | 
|  | 96 | struct mm_struct *mm) | 
|  | 97 | { | 
|  | 98 | mm->context = NO_CONTEXT; | 
|  | 99 |  | 
|  | 100 | return 0; | 
|  | 101 | } | 
|  | 102 |  | 
|  | 103 | /* | 
|  | 104 | * Destroy context related info for an mm_struct that is about | 
|  | 105 | * to be put to rest. | 
|  | 106 | */ | 
|  | 107 | static inline void destroy_context(struct mm_struct *mm) | 
|  | 108 | { | 
|  | 109 | extern void flush_tlb_mm(struct mm_struct *mm); | 
|  | 110 |  | 
|  | 111 | /* Well, at least free TLB entries */ | 
|  | 112 | flush_tlb_mm(mm); | 
|  | 113 | } | 
|  | 114 |  | 
|  | 115 | #endif	/* __ASSEMBLY__ */ | 
|  | 116 |  | 
|  | 117 | /* Common defines */ | 
|  | 118 | #define TLB_STEP	0x00000010 | 
|  | 119 | #define TLB_PTEH	0x00000000 | 
|  | 120 | #define TLB_PTEL	0x00000008 | 
|  | 121 |  | 
|  | 122 | /* PTEH defines */ | 
|  | 123 | #define PTEH_ASID_SHIFT	2 | 
|  | 124 | #define PTEH_VALID	0x0000000000000001 | 
|  | 125 | #define PTEH_SHARED	0x0000000000000002 | 
|  | 126 | #define PTEH_MATCH_ASID	0x00000000000003ff | 
|  | 127 |  | 
|  | 128 | #ifndef __ASSEMBLY__ | 
|  | 129 | /* This has to be a common function because the next location to fill | 
|  | 130 | * information is shared. */ | 
|  | 131 | extern void __do_tlb_refill(unsigned long address, unsigned long long is_text_not_data, pte_t *pte); | 
|  | 132 |  | 
|  | 133 | /* Profiling counter. */ | 
|  | 134 | #ifdef CONFIG_SH64_PROC_TLB | 
|  | 135 | extern unsigned long long calls_to_do_fast_page_fault; | 
|  | 136 | #endif | 
|  | 137 |  | 
|  | 138 | static inline unsigned long get_asid(void) | 
|  | 139 | { | 
|  | 140 | unsigned long long sr; | 
|  | 141 |  | 
|  | 142 | asm volatile ("getcon   " __SR ", %0\n\t" | 
|  | 143 | : "=r" (sr)); | 
|  | 144 |  | 
|  | 145 | sr = (sr >> SR_ASID_SHIFT) & MMU_CONTEXT_ASID_MASK; | 
|  | 146 | return (unsigned long) sr; | 
|  | 147 | } | 
|  | 148 |  | 
|  | 149 | /* Set ASID into SR */ | 
|  | 150 | static inline void set_asid(unsigned long asid) | 
|  | 151 | { | 
|  | 152 | unsigned long long sr, pc; | 
|  | 153 |  | 
|  | 154 | asm volatile ("getcon	" __SR ", %0" : "=r" (sr)); | 
|  | 155 |  | 
|  | 156 | sr = (sr & SR_ASID_MASK) | (asid << SR_ASID_SHIFT); | 
|  | 157 |  | 
|  | 158 | /* | 
|  | 159 | * It is possible that this function may be inlined and so to avoid | 
|  | 160 | * the assembler reporting duplicate symbols we make use of the gas trick | 
|  | 161 | * of generating symbols using numerics and forward reference. | 
|  | 162 | */ | 
|  | 163 | asm volatile ("movi	1, %1\n\t" | 
|  | 164 | "shlli	%1, 28, %1\n\t" | 
|  | 165 | "or	%0, %1, %1\n\t" | 
|  | 166 | "putcon	%1, " __SR "\n\t" | 
|  | 167 | "putcon	%0, " __SSR "\n\t" | 
|  | 168 | "movi	1f, %1\n\t" | 
|  | 169 | "ori	%1, 1 , %1\n\t" | 
|  | 170 | "putcon	%1, " __SPC "\n\t" | 
|  | 171 | "rte\n" | 
|  | 172 | "1:\n\t" | 
|  | 173 | : "=r" (sr), "=r" (pc) : "0" (sr)); | 
|  | 174 | } | 
|  | 175 |  | 
|  | 176 | /* | 
|  | 177 | * After we have set current->mm to a new value, this activates | 
|  | 178 | * the context for the new mm so we see the new mappings. | 
|  | 179 | */ | 
|  | 180 | static __inline__ void activate_context(struct mm_struct *mm) | 
|  | 181 | { | 
|  | 182 | get_mmu_context(mm); | 
|  | 183 | set_asid(mm->context & MMU_CONTEXT_ASID_MASK); | 
|  | 184 | } | 
|  | 185 |  | 
|  | 186 |  | 
|  | 187 | static __inline__ void switch_mm(struct mm_struct *prev, | 
|  | 188 | struct mm_struct *next, | 
|  | 189 | struct task_struct *tsk) | 
|  | 190 | { | 
|  | 191 | if (prev != next) { | 
|  | 192 | mmu_pdtp_cache = next->pgd; | 
|  | 193 | activate_context(next); | 
|  | 194 | } | 
|  | 195 | } | 
|  | 196 |  | 
|  | 197 | #define deactivate_mm(tsk,mm)	do { } while (0) | 
|  | 198 |  | 
|  | 199 | #define activate_mm(prev, next) \ | 
|  | 200 | switch_mm((prev),(next),NULL) | 
|  | 201 |  | 
|  | 202 | static inline void | 
|  | 203 | enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) | 
|  | 204 | { | 
|  | 205 | } | 
|  | 206 |  | 
|  | 207 | #endif	/* __ASSEMBLY__ */ | 
|  | 208 |  | 
|  | 209 | #endif /* __ASM_SH64_MMU_CONTEXT_H */ |