| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef __ASM_SH64_MMU_CONTEXT_H | 
|  | 2 | #define __ASM_SH64_MMU_CONTEXT_H | 
|  | 3 |  | 
|  | 4 | /* | 
|  | 5 | * This file is subject to the terms and conditions of the GNU General Public | 
|  | 6 | * License.  See the file "COPYING" in the main directory of this archive | 
|  | 7 | * for more details. | 
|  | 8 | * | 
|  | 9 | * include/asm-sh64/mmu_context.h | 
|  | 10 | * | 
|  | 11 | * Copyright (C) 2000, 2001  Paolo Alberelli | 
|  | 12 | * Copyright (C) 2003  Paul Mundt | 
|  | 13 | * | 
|  | 14 | * ASID handling idea taken from MIPS implementation. | 
|  | 15 | * | 
|  | 16 | */ | 
|  | 17 |  | 
|  | 18 | #ifndef __ASSEMBLY__ | 
|  | 19 |  | 
|  | 20 | /* | 
|  | 21 | * Cache of MMU context last used. | 
|  | 22 | * | 
|  | 23 | * The MMU "context" consists of two things: | 
|  | 24 | *   (a) TLB cache version (or cycle, top 24 bits of mmu_context_cache) | 
|  | 25 | *   (b) ASID (Address Space IDentifier, bottom 8 bits of mmu_context_cache) | 
|  | 26 | */ | 
|  | 27 | extern unsigned long mmu_context_cache; | 
|  | 28 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | #include <asm/page.h> | 
| Jeremy Fitzhardinge | d6dd61c | 2007-05-02 19:27:14 +0200 | [diff] [blame] | 30 | #include <asm-generic/mm_hooks.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 |  | 
|  | 32 | /* Current mm's pgd */ | 
|  | 33 | extern pgd_t *mmu_pdtp_cache; | 
|  | 34 |  | 
|  | 35 | #define SR_ASID_MASK		0xffffffffff00ffffULL | 
|  | 36 | #define SR_ASID_SHIFT		16 | 
|  | 37 |  | 
|  | 38 | #define MMU_CONTEXT_ASID_MASK		0x000000ff | 
|  | 39 | #define MMU_CONTEXT_VERSION_MASK	0xffffff00 | 
|  | 40 | #define MMU_CONTEXT_FIRST_VERSION	0x00000100 | 
|  | 41 | #define NO_CONTEXT			0 | 
|  | 42 |  | 
|  | 43 | /* ASID is 8-bit value, so it can't be 0x100 */ | 
|  | 44 | #define MMU_NO_ASID			0x100 | 
|  | 45 |  | 
|  | 46 |  | 
|  | 47 | /* | 
|  | 48 | * Virtual Page Number mask | 
|  | 49 | */ | 
|  | 50 | #define MMU_VPN_MASK	0xfffff000 | 
|  | 51 |  | 
| Adrian Bunk | ca5ed2f | 2006-01-09 20:54:47 -0800 | [diff] [blame] | 52 | static inline void | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 53 | get_new_mmu_context(struct mm_struct *mm) | 
|  | 54 | { | 
|  | 55 | extern void flush_tlb_all(void); | 
|  | 56 | extern void flush_cache_all(void); | 
|  | 57 |  | 
|  | 58 | unsigned long mc = ++mmu_context_cache; | 
|  | 59 |  | 
|  | 60 | if (!(mc & MMU_CONTEXT_ASID_MASK)) { | 
|  | 61 | /* We exhaust ASID of this version. | 
|  | 62 | Flush all TLB and start new cycle. */ | 
|  | 63 | flush_tlb_all(); | 
|  | 64 | /* We have to flush all caches as ASIDs are | 
|  | 65 | used in cache */ | 
|  | 66 | flush_cache_all(); | 
|  | 67 | /* Fix version if needed. | 
|  | 68 | Note that we avoid version #0/asid #0 to distingush NO_CONTEXT. */ | 
|  | 69 | if (!mc) | 
|  | 70 | mmu_context_cache = mc = MMU_CONTEXT_FIRST_VERSION; | 
|  | 71 | } | 
|  | 72 | mm->context = mc; | 
|  | 73 | } | 
|  | 74 |  | 
|  | 75 | /* | 
|  | 76 | * Get MMU context if needed. | 
|  | 77 | */ | 
|  | 78 | static __inline__ void | 
|  | 79 | get_mmu_context(struct mm_struct *mm) | 
|  | 80 | { | 
|  | 81 | if (mm) { | 
|  | 82 | unsigned long mc = mmu_context_cache; | 
|  | 83 | /* Check if we have old version of context. | 
|  | 84 | If it's old, we need to get new context with new version. */ | 
|  | 85 | if ((mm->context ^ mc) & MMU_CONTEXT_VERSION_MASK) | 
|  | 86 | get_new_mmu_context(mm); | 
|  | 87 | } | 
|  | 88 | } | 
|  | 89 |  | 
|  | 90 | /* | 
|  | 91 | * Initialize the context related info for a new mm_struct | 
|  | 92 | * instance. | 
|  | 93 | */ | 
|  | 94 | static inline int init_new_context(struct task_struct *tsk, | 
|  | 95 | struct mm_struct *mm) | 
|  | 96 | { | 
|  | 97 | mm->context = NO_CONTEXT; | 
|  | 98 |  | 
|  | 99 | return 0; | 
|  | 100 | } | 
|  | 101 |  | 
|  | 102 | /* | 
|  | 103 | * Destroy context related info for an mm_struct that is about | 
|  | 104 | * to be put to rest. | 
|  | 105 | */ | 
|  | 106 | static inline void destroy_context(struct mm_struct *mm) | 
|  | 107 | { | 
|  | 108 | extern void flush_tlb_mm(struct mm_struct *mm); | 
|  | 109 |  | 
|  | 110 | /* Well, at least free TLB entries */ | 
|  | 111 | flush_tlb_mm(mm); | 
|  | 112 | } | 
|  | 113 |  | 
|  | 114 | #endif	/* __ASSEMBLY__ */ | 
|  | 115 |  | 
|  | 116 | /* Common defines */ | 
|  | 117 | #define TLB_STEP	0x00000010 | 
|  | 118 | #define TLB_PTEH	0x00000000 | 
|  | 119 | #define TLB_PTEL	0x00000008 | 
|  | 120 |  | 
|  | 121 | /* PTEH defines */ | 
|  | 122 | #define PTEH_ASID_SHIFT	2 | 
|  | 123 | #define PTEH_VALID	0x0000000000000001 | 
|  | 124 | #define PTEH_SHARED	0x0000000000000002 | 
|  | 125 | #define PTEH_MATCH_ASID	0x00000000000003ff | 
|  | 126 |  | 
|  | 127 | #ifndef __ASSEMBLY__ | 
|  | 128 | /* This has to be a common function because the next location to fill | 
|  | 129 | * information is shared. */ | 
|  | 130 | extern void __do_tlb_refill(unsigned long address, unsigned long long is_text_not_data, pte_t *pte); | 
|  | 131 |  | 
|  | 132 | /* Profiling counter. */ | 
|  | 133 | #ifdef CONFIG_SH64_PROC_TLB | 
|  | 134 | extern unsigned long long calls_to_do_fast_page_fault; | 
|  | 135 | #endif | 
|  | 136 |  | 
|  | 137 | static inline unsigned long get_asid(void) | 
|  | 138 | { | 
|  | 139 | unsigned long long sr; | 
|  | 140 |  | 
|  | 141 | asm volatile ("getcon   " __SR ", %0\n\t" | 
|  | 142 | : "=r" (sr)); | 
|  | 143 |  | 
|  | 144 | sr = (sr >> SR_ASID_SHIFT) & MMU_CONTEXT_ASID_MASK; | 
|  | 145 | return (unsigned long) sr; | 
|  | 146 | } | 
|  | 147 |  | 
|  | 148 | /* Set ASID into SR */ | 
|  | 149 | static inline void set_asid(unsigned long asid) | 
|  | 150 | { | 
|  | 151 | unsigned long long sr, pc; | 
|  | 152 |  | 
|  | 153 | asm volatile ("getcon	" __SR ", %0" : "=r" (sr)); | 
|  | 154 |  | 
|  | 155 | sr = (sr & SR_ASID_MASK) | (asid << SR_ASID_SHIFT); | 
|  | 156 |  | 
|  | 157 | /* | 
|  | 158 | * It is possible that this function may be inlined and so to avoid | 
|  | 159 | * the assembler reporting duplicate symbols we make use of the gas trick | 
|  | 160 | * of generating symbols using numerics and forward reference. | 
|  | 161 | */ | 
|  | 162 | asm volatile ("movi	1, %1\n\t" | 
|  | 163 | "shlli	%1, 28, %1\n\t" | 
|  | 164 | "or	%0, %1, %1\n\t" | 
|  | 165 | "putcon	%1, " __SR "\n\t" | 
|  | 166 | "putcon	%0, " __SSR "\n\t" | 
|  | 167 | "movi	1f, %1\n\t" | 
|  | 168 | "ori	%1, 1 , %1\n\t" | 
|  | 169 | "putcon	%1, " __SPC "\n\t" | 
|  | 170 | "rte\n" | 
|  | 171 | "1:\n\t" | 
|  | 172 | : "=r" (sr), "=r" (pc) : "0" (sr)); | 
|  | 173 | } | 
|  | 174 |  | 
|  | 175 | /* | 
|  | 176 | * After we have set current->mm to a new value, this activates | 
|  | 177 | * the context for the new mm so we see the new mappings. | 
|  | 178 | */ | 
|  | 179 | static __inline__ void activate_context(struct mm_struct *mm) | 
|  | 180 | { | 
|  | 181 | get_mmu_context(mm); | 
|  | 182 | set_asid(mm->context & MMU_CONTEXT_ASID_MASK); | 
|  | 183 | } | 
|  | 184 |  | 
|  | 185 |  | 
|  | 186 | static __inline__ void switch_mm(struct mm_struct *prev, | 
|  | 187 | struct mm_struct *next, | 
|  | 188 | struct task_struct *tsk) | 
|  | 189 | { | 
|  | 190 | if (prev != next) { | 
|  | 191 | mmu_pdtp_cache = next->pgd; | 
|  | 192 | activate_context(next); | 
|  | 193 | } | 
|  | 194 | } | 
|  | 195 |  | 
|  | 196 | #define deactivate_mm(tsk,mm)	do { } while (0) | 
|  | 197 |  | 
|  | 198 | #define activate_mm(prev, next) \ | 
|  | 199 | switch_mm((prev),(next),NULL) | 
|  | 200 |  | 
|  | 201 | static inline void | 
|  | 202 | enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) | 
|  | 203 | { | 
|  | 204 | } | 
|  | 205 |  | 
|  | 206 | #endif	/* __ASSEMBLY__ */ | 
|  | 207 |  | 
|  | 208 | #endif /* __ASM_SH64_MMU_CONTEXT_H */ |