| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  * This file is subject to the terms and conditions of the GNU General Public | 
 | 3 |  * License.  See the file "COPYING" in the main directory of this archive | 
 | 4 |  * for more details. | 
 | 5 |  * | 
 | 6 |  * Synthesize TLB refill handlers at runtime. | 
 | 7 |  * | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 8 |  * Copyright (C) 2004, 2005, 2006, 2008  Thiemo Seufer | 
| David Daney | 95affdd | 2009-05-20 11:40:59 -0700 | [diff] [blame] | 9 |  * Copyright (C) 2005, 2007, 2008, 2009  Maciej W. Rozycki | 
| Ralf Baechle | 41c594a | 2006-04-05 09:45:45 +0100 | [diff] [blame] | 10 |  * Copyright (C) 2006  Ralf Baechle (ralf@linux-mips.org) | 
| David Daney | fd062c8 | 2009-05-27 17:47:44 -0700 | [diff] [blame] | 11 |  * Copyright (C) 2008, 2009 Cavium Networks, Inc. | 
| Ralf Baechle | 41c594a | 2006-04-05 09:45:45 +0100 | [diff] [blame] | 12 |  * | 
 | 13 |  * ... and the days got worse and worse and now you see | 
 | 14 |  * I've gone completly out of my mind. | 
 | 15 |  * | 
 | 16 |  * They're coming to take me a away haha | 
 | 17 |  * they're coming to take me a away hoho hihi haha | 
 | 18 |  * to the funny farm where code is beautiful all the time ... | 
 | 19 |  * | 
 | 20 |  * (Condolences to Napoleon XIV) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 |  */ | 
 | 22 |  | 
| David Daney | 95affdd | 2009-05-20 11:40:59 -0700 | [diff] [blame] | 23 | #include <linux/bug.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 24 | #include <linux/kernel.h> | 
 | 25 | #include <linux/types.h> | 
| Ralf Baechle | 631330f | 2009-06-19 14:05:26 +0100 | [diff] [blame] | 26 | #include <linux/smp.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | #include <linux/string.h> | 
 | 28 | #include <linux/init.h> | 
| David Daney | 3d8bfdd | 2010-12-21 14:19:11 -0800 | [diff] [blame] | 29 | #include <linux/cache.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 |  | 
| David Daney | 3d8bfdd | 2010-12-21 14:19:11 -0800 | [diff] [blame] | 31 | #include <asm/cacheflush.h> | 
 | 32 | #include <asm/pgtable.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 | #include <asm/war.h> | 
| Florian Fainelli | 3482d71 | 2010-01-28 15:21:24 +0100 | [diff] [blame] | 34 | #include <asm/uasm.h> | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 35 |  | 
| David Daney | 1ec5632 | 2010-04-28 12:16:18 -0700 | [diff] [blame] | 36 | /* | 
 | 37 |  * TLB load/store/modify handlers. | 
 | 38 |  * | 
 | 39 |  * Only the fastpath gets synthesized at runtime, the slowpath for | 
 | 40 |  * do_page_fault remains normal asm. | 
 | 41 |  */ | 
 | 42 | extern void tlb_do_page_fault_0(void); | 
 | 43 | extern void tlb_do_page_fault_1(void); | 
 | 44 |  | 
| David Daney | bf28607 | 2011-07-05 16:34:46 -0700 | [diff] [blame] | 45 | struct work_registers { | 
 | 46 | 	int r1; | 
 | 47 | 	int r2; | 
 | 48 | 	int r3; | 
 | 49 | }; | 
 | 50 |  | 
 | 51 | struct tlb_reg_save { | 
 | 52 | 	unsigned long a; | 
 | 53 | 	unsigned long b; | 
 | 54 | } ____cacheline_aligned_in_smp; | 
 | 55 |  | 
 | 56 | static struct tlb_reg_save handler_reg_save[NR_CPUS]; | 
| David Daney | 1ec5632 | 2010-04-28 12:16:18 -0700 | [diff] [blame] | 57 |  | 
| Ralf Baechle | aeffdbb | 2007-10-11 23:46:14 +0100 | [diff] [blame] | 58 | static inline int r45k_bvahwbug(void) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 59 | { | 
 | 60 | 	/* XXX: We should probe for the presence of this bug, but we don't. */ | 
 | 61 | 	return 0; | 
 | 62 | } | 
 | 63 |  | 
| Ralf Baechle | aeffdbb | 2007-10-11 23:46:14 +0100 | [diff] [blame] | 64 | static inline int r4k_250MHZhwbug(void) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 65 | { | 
 | 66 | 	/* XXX: We should probe for the presence of this bug, but we don't. */ | 
 | 67 | 	return 0; | 
 | 68 | } | 
 | 69 |  | 
| Ralf Baechle | aeffdbb | 2007-10-11 23:46:14 +0100 | [diff] [blame] | 70 | static inline int __maybe_unused bcm1250_m3_war(void) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 71 | { | 
 | 72 | 	return BCM1250_M3_WAR; | 
 | 73 | } | 
 | 74 |  | 
| Ralf Baechle | aeffdbb | 2007-10-11 23:46:14 +0100 | [diff] [blame] | 75 | static inline int __maybe_unused r10000_llsc_war(void) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 76 | { | 
 | 77 | 	return R10000_LLSC_WAR; | 
 | 78 | } | 
 | 79 |  | 
| David Daney | cc33ae4 | 2010-12-20 15:54:50 -0800 | [diff] [blame] | 80 | static int use_bbit_insns(void) | 
 | 81 | { | 
 | 82 | 	switch (current_cpu_type()) { | 
 | 83 | 	case CPU_CAVIUM_OCTEON: | 
 | 84 | 	case CPU_CAVIUM_OCTEON_PLUS: | 
 | 85 | 	case CPU_CAVIUM_OCTEON2: | 
 | 86 | 		return 1; | 
 | 87 | 	default: | 
 | 88 | 		return 0; | 
 | 89 | 	} | 
 | 90 | } | 
 | 91 |  | 
| David Daney | 2c8c53e | 2010-12-27 18:07:57 -0800 | [diff] [blame] | 92 | static int use_lwx_insns(void) | 
 | 93 | { | 
 | 94 | 	switch (current_cpu_type()) { | 
 | 95 | 	case CPU_CAVIUM_OCTEON2: | 
 | 96 | 		return 1; | 
 | 97 | 	default: | 
 | 98 | 		return 0; | 
 | 99 | 	} | 
 | 100 | } | 
 | 101 | #if defined(CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE) && \ | 
 | 102 |     CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0 | 
 | 103 | static bool scratchpad_available(void) | 
 | 104 | { | 
 | 105 | 	return true; | 
 | 106 | } | 
 | 107 | static int scratchpad_offset(int i) | 
 | 108 | { | 
 | 109 | 	/* | 
 | 110 | 	 * CVMSEG starts at address -32768 and extends for | 
 | 111 | 	 * CAVIUM_OCTEON_CVMSEG_SIZE 128 byte cache lines. | 
 | 112 | 	 */ | 
 | 113 | 	i += 1; /* Kernel use starts at the top and works down. */ | 
 | 114 | 	return CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE * 128 - (8 * i) - 32768; | 
 | 115 | } | 
 | 116 | #else | 
 | 117 | static bool scratchpad_available(void) | 
 | 118 | { | 
 | 119 | 	return false; | 
 | 120 | } | 
 | 121 | static int scratchpad_offset(int i) | 
 | 122 | { | 
 | 123 | 	BUG(); | 
| David Daney | e1c87d2 | 2011-01-19 15:24:42 -0800 | [diff] [blame] | 124 | 	/* Really unreachable, but evidently some GCC want this. */ | 
 | 125 | 	return 0; | 
| David Daney | 2c8c53e | 2010-12-27 18:07:57 -0800 | [diff] [blame] | 126 | } | 
 | 127 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 128 | /* | 
| Maciej W. Rozycki | 8df5bea | 2006-08-23 14:26:50 +0100 | [diff] [blame] | 129 |  * Found by experiment: At least some revisions of the 4kc throw under | 
 | 130 |  * some circumstances a machine check exception, triggered by invalid | 
 | 131 |  * values in the index register.  Delaying the tlbp instruction until | 
 | 132 |  * after the next branch,  plus adding an additional nop in front of | 
 | 133 |  * tlbwi/tlbwr avoids the invalid index register values. Nobody knows | 
 | 134 |  * why; it's not an issue caused by the core RTL. | 
 | 135 |  * | 
 | 136 |  */ | 
| Ralf Baechle | 234fcd1 | 2008-03-08 09:56:28 +0000 | [diff] [blame] | 137 | static int __cpuinit m4kc_tlbp_war(void) | 
| Maciej W. Rozycki | 8df5bea | 2006-08-23 14:26:50 +0100 | [diff] [blame] | 138 | { | 
 | 139 | 	return (current_cpu_data.processor_id & 0xffff00) == | 
 | 140 | 	       (PRID_COMP_MIPS | PRID_IMP_4KC); | 
 | 141 | } | 
 | 142 |  | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 143 | /* Handle labels (which must be positive integers). */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 144 | enum label_id { | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 145 | 	label_second_part = 1, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 146 | 	label_leave, | 
 | 147 | 	label_vmalloc, | 
 | 148 | 	label_vmalloc_done, | 
 | 149 | 	label_tlbw_hazard, | 
 | 150 | 	label_split, | 
| David Daney | 6dd9344 | 2010-02-10 15:12:47 -0800 | [diff] [blame] | 151 | 	label_tlbl_goaround1, | 
 | 152 | 	label_tlbl_goaround2, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 153 | 	label_nopage_tlbl, | 
 | 154 | 	label_nopage_tlbs, | 
 | 155 | 	label_nopage_tlbm, | 
 | 156 | 	label_smp_pgtable_change, | 
 | 157 | 	label_r3000_write_probe_fail, | 
| David Daney | 1ec5632 | 2010-04-28 12:16:18 -0700 | [diff] [blame] | 158 | 	label_large_segbits_fault, | 
| David Daney | fd062c8 | 2009-05-27 17:47:44 -0700 | [diff] [blame] | 159 | #ifdef CONFIG_HUGETLB_PAGE | 
 | 160 | 	label_tlb_huge_update, | 
 | 161 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 162 | }; | 
 | 163 |  | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 164 | UASM_L_LA(_second_part) | 
 | 165 | UASM_L_LA(_leave) | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 166 | UASM_L_LA(_vmalloc) | 
 | 167 | UASM_L_LA(_vmalloc_done) | 
 | 168 | UASM_L_LA(_tlbw_hazard) | 
 | 169 | UASM_L_LA(_split) | 
| David Daney | 6dd9344 | 2010-02-10 15:12:47 -0800 | [diff] [blame] | 170 | UASM_L_LA(_tlbl_goaround1) | 
 | 171 | UASM_L_LA(_tlbl_goaround2) | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 172 | UASM_L_LA(_nopage_tlbl) | 
 | 173 | UASM_L_LA(_nopage_tlbs) | 
 | 174 | UASM_L_LA(_nopage_tlbm) | 
 | 175 | UASM_L_LA(_smp_pgtable_change) | 
 | 176 | UASM_L_LA(_r3000_write_probe_fail) | 
| David Daney | 1ec5632 | 2010-04-28 12:16:18 -0700 | [diff] [blame] | 177 | UASM_L_LA(_large_segbits_fault) | 
| David Daney | fd062c8 | 2009-05-27 17:47:44 -0700 | [diff] [blame] | 178 | #ifdef CONFIG_HUGETLB_PAGE | 
 | 179 | UASM_L_LA(_tlb_huge_update) | 
 | 180 | #endif | 
| Atsushi Nemoto | 656be92 | 2006-10-26 00:08:31 +0900 | [diff] [blame] | 181 |  | 
| Franck Bui-Huu | 92b1e6a | 2007-10-18 09:11:17 +0200 | [diff] [blame] | 182 | /* | 
 | 183 |  * For debug purposes. | 
 | 184 |  */ | 
 | 185 | static inline void dump_handler(const u32 *handler, int count) | 
 | 186 | { | 
 | 187 | 	int i; | 
 | 188 |  | 
 | 189 | 	pr_debug("\t.set push\n"); | 
 | 190 | 	pr_debug("\t.set noreorder\n"); | 
 | 191 |  | 
 | 192 | 	for (i = 0; i < count; i++) | 
 | 193 | 		pr_debug("\t%p\t.word 0x%08x\n", &handler[i], handler[i]); | 
 | 194 |  | 
 | 195 | 	pr_debug("\t.set pop\n"); | 
 | 196 | } | 
 | 197 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 198 | /* The only general purpose registers allowed in TLB handlers. */ | 
 | 199 | #define K0		26 | 
 | 200 | #define K1		27 | 
 | 201 |  | 
 | 202 | /* Some CP0 registers */ | 
| Ralf Baechle | 41c594a | 2006-04-05 09:45:45 +0100 | [diff] [blame] | 203 | #define C0_INDEX	0, 0 | 
 | 204 | #define C0_ENTRYLO0	2, 0 | 
 | 205 | #define C0_TCBIND	2, 2 | 
 | 206 | #define C0_ENTRYLO1	3, 0 | 
 | 207 | #define C0_CONTEXT	4, 0 | 
| David Daney | fd062c8 | 2009-05-27 17:47:44 -0700 | [diff] [blame] | 208 | #define C0_PAGEMASK	5, 0 | 
| Ralf Baechle | 41c594a | 2006-04-05 09:45:45 +0100 | [diff] [blame] | 209 | #define C0_BADVADDR	8, 0 | 
 | 210 | #define C0_ENTRYHI	10, 0 | 
 | 211 | #define C0_EPC		14, 0 | 
 | 212 | #define C0_XCONTEXT	20, 0 | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 213 |  | 
| Ralf Baechle | 875d43e | 2005-09-03 15:56:16 -0700 | [diff] [blame] | 214 | #ifdef CONFIG_64BIT | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 215 | # define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_XCONTEXT) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 216 | #else | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 217 | # define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_CONTEXT) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 218 | #endif | 
 | 219 |  | 
 | 220 | /* The worst case length of the handler is around 18 instructions for | 
 | 221 |  * R3000-style TLBs and up to 63 instructions for R4000-style TLBs. | 
 | 222 |  * Maximum space available is 32 instructions for R3000 and 64 | 
 | 223 |  * instructions for R4000. | 
 | 224 |  * | 
 | 225 |  * We deliberately chose a buffer size of 128, so we won't scribble | 
 | 226 |  * over anything important on overflow before we panic. | 
 | 227 |  */ | 
| Ralf Baechle | 234fcd1 | 2008-03-08 09:56:28 +0000 | [diff] [blame] | 228 | static u32 tlb_handler[128] __cpuinitdata; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 229 |  | 
 | 230 | /* simply assume worst case size for labels and relocs */ | 
| Ralf Baechle | 234fcd1 | 2008-03-08 09:56:28 +0000 | [diff] [blame] | 231 | static struct uasm_label labels[128] __cpuinitdata; | 
 | 232 | static struct uasm_reloc relocs[128] __cpuinitdata; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 233 |  | 
| David Daney | 1ec5632 | 2010-04-28 12:16:18 -0700 | [diff] [blame] | 234 | #ifdef CONFIG_64BIT | 
 | 235 | static int check_for_high_segbits __cpuinitdata; | 
 | 236 | #endif | 
 | 237 |  | 
| David Daney | 2c8c53e | 2010-12-27 18:07:57 -0800 | [diff] [blame] | 238 | static int check_for_high_segbits __cpuinitdata; | 
| David Daney | 3d8bfdd | 2010-12-21 14:19:11 -0800 | [diff] [blame] | 239 |  | 
 | 240 | static unsigned int kscratch_used_mask __cpuinitdata; | 
 | 241 |  | 
 | 242 | static int __cpuinit allocate_kscratch(void) | 
 | 243 | { | 
 | 244 | 	int r; | 
 | 245 | 	unsigned int a = cpu_data[0].kscratch_mask & ~kscratch_used_mask; | 
 | 246 |  | 
 | 247 | 	r = ffs(a); | 
 | 248 |  | 
 | 249 | 	if (r == 0) | 
 | 250 | 		return -1; | 
 | 251 |  | 
 | 252 | 	r--; /* make it zero based */ | 
 | 253 |  | 
 | 254 | 	kscratch_used_mask |= (1 << r); | 
 | 255 |  | 
 | 256 | 	return r; | 
 | 257 | } | 
 | 258 |  | 
| David Daney | 2c8c53e | 2010-12-27 18:07:57 -0800 | [diff] [blame] | 259 | static int scratch_reg __cpuinitdata; | 
| David Daney | 3d8bfdd | 2010-12-21 14:19:11 -0800 | [diff] [blame] | 260 | static int pgd_reg __cpuinitdata; | 
| David Daney | 2c8c53e | 2010-12-27 18:07:57 -0800 | [diff] [blame] | 261 | enum vmalloc64_mode {not_refill, refill_scratch, refill_noscratch}; | 
| David Daney | 3d8bfdd | 2010-12-21 14:19:11 -0800 | [diff] [blame] | 262 |  | 
| David Daney | bf28607 | 2011-07-05 16:34:46 -0700 | [diff] [blame] | 263 | static struct work_registers __cpuinit build_get_work_registers(u32 **p) | 
 | 264 | { | 
 | 265 | 	struct work_registers r; | 
 | 266 |  | 
 | 267 | 	int smp_processor_id_reg; | 
 | 268 | 	int smp_processor_id_sel; | 
 | 269 | 	int smp_processor_id_shift; | 
 | 270 |  | 
 | 271 | 	if (scratch_reg > 0) { | 
 | 272 | 		/* Save in CPU local C0_KScratch? */ | 
 | 273 | 		UASM_i_MTC0(p, 1, 31, scratch_reg); | 
 | 274 | 		r.r1 = K0; | 
 | 275 | 		r.r2 = K1; | 
 | 276 | 		r.r3 = 1; | 
 | 277 | 		return r; | 
 | 278 | 	} | 
 | 279 |  | 
 | 280 | 	if (num_possible_cpus() > 1) { | 
 | 281 | #ifdef CONFIG_MIPS_PGD_C0_CONTEXT | 
 | 282 | 		smp_processor_id_shift = 51; | 
 | 283 | 		smp_processor_id_reg = 20; /* XContext */ | 
 | 284 | 		smp_processor_id_sel = 0; | 
 | 285 | #else | 
 | 286 | # ifdef CONFIG_32BIT | 
 | 287 | 		smp_processor_id_shift = 25; | 
 | 288 | 		smp_processor_id_reg = 4; /* Context */ | 
 | 289 | 		smp_processor_id_sel = 0; | 
 | 290 | # endif | 
 | 291 | # ifdef CONFIG_64BIT | 
 | 292 | 		smp_processor_id_shift = 26; | 
 | 293 | 		smp_processor_id_reg = 4; /* Context */ | 
 | 294 | 		smp_processor_id_sel = 0; | 
 | 295 | # endif | 
 | 296 | #endif | 
 | 297 | 		/* Get smp_processor_id */ | 
 | 298 | 		UASM_i_MFC0(p, K0, smp_processor_id_reg, smp_processor_id_sel); | 
 | 299 | 		UASM_i_SRL_SAFE(p, K0, K0, smp_processor_id_shift); | 
 | 300 |  | 
 | 301 | 		/* handler_reg_save index in K0 */ | 
 | 302 | 		UASM_i_SLL(p, K0, K0, ilog2(sizeof(struct tlb_reg_save))); | 
 | 303 |  | 
 | 304 | 		UASM_i_LA(p, K1, (long)&handler_reg_save); | 
 | 305 | 		UASM_i_ADDU(p, K0, K0, K1); | 
 | 306 | 	} else { | 
 | 307 | 		UASM_i_LA(p, K0, (long)&handler_reg_save); | 
 | 308 | 	} | 
 | 309 | 	/* K0 now points to save area, save $1 and $2  */ | 
 | 310 | 	UASM_i_SW(p, 1, offsetof(struct tlb_reg_save, a), K0); | 
 | 311 | 	UASM_i_SW(p, 2, offsetof(struct tlb_reg_save, b), K0); | 
 | 312 |  | 
 | 313 | 	r.r1 = K1; | 
 | 314 | 	r.r2 = 1; | 
 | 315 | 	r.r3 = 2; | 
 | 316 | 	return r; | 
 | 317 | } | 
 | 318 |  | 
 | 319 | static void __cpuinit build_restore_work_registers(u32 **p) | 
 | 320 | { | 
 | 321 | 	if (scratch_reg > 0) { | 
 | 322 | 		UASM_i_MFC0(p, 1, 31, scratch_reg); | 
 | 323 | 		return; | 
 | 324 | 	} | 
 | 325 | 	/* K0 already points to save area, restore $1 and $2  */ | 
 | 326 | 	UASM_i_LW(p, 1, offsetof(struct tlb_reg_save, a), K0); | 
 | 327 | 	UASM_i_LW(p, 2, offsetof(struct tlb_reg_save, b), K0); | 
 | 328 | } | 
 | 329 |  | 
| David Daney | 2c8c53e | 2010-12-27 18:07:57 -0800 | [diff] [blame] | 330 | #ifndef CONFIG_MIPS_PGD_C0_CONTEXT | 
 | 331 |  | 
| David Daney | 8262228 | 2009-10-14 12:16:56 -0700 | [diff] [blame] | 332 | /* | 
 | 333 |  * CONFIG_MIPS_PGD_C0_CONTEXT implies 64 bit and lack of pgd_current, | 
 | 334 |  * we cannot do r3000 under these circumstances. | 
| David Daney | 3d8bfdd | 2010-12-21 14:19:11 -0800 | [diff] [blame] | 335 |  * | 
 | 336 |  * Declare pgd_current here instead of including mmu_context.h to avoid type | 
 | 337 |  * conflicts for tlbmiss_handler_setup_pgd | 
| David Daney | 8262228 | 2009-10-14 12:16:56 -0700 | [diff] [blame] | 338 |  */ | 
| David Daney | 3d8bfdd | 2010-12-21 14:19:11 -0800 | [diff] [blame] | 339 | extern unsigned long pgd_current[]; | 
| David Daney | 8262228 | 2009-10-14 12:16:56 -0700 | [diff] [blame] | 340 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 341 | /* | 
 | 342 |  * The R3000 TLB handler is simple. | 
 | 343 |  */ | 
| Ralf Baechle | 234fcd1 | 2008-03-08 09:56:28 +0000 | [diff] [blame] | 344 | static void __cpuinit build_r3000_tlb_refill_handler(void) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 345 | { | 
 | 346 | 	long pgdc = (long)pgd_current; | 
 | 347 | 	u32 *p; | 
 | 348 |  | 
 | 349 | 	memset(tlb_handler, 0, sizeof(tlb_handler)); | 
 | 350 | 	p = tlb_handler; | 
 | 351 |  | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 352 | 	uasm_i_mfc0(&p, K0, C0_BADVADDR); | 
 | 353 | 	uasm_i_lui(&p, K1, uasm_rel_hi(pgdc)); /* cp0 delay */ | 
 | 354 | 	uasm_i_lw(&p, K1, uasm_rel_lo(pgdc), K1); | 
 | 355 | 	uasm_i_srl(&p, K0, K0, 22); /* load delay */ | 
 | 356 | 	uasm_i_sll(&p, K0, K0, 2); | 
 | 357 | 	uasm_i_addu(&p, K1, K1, K0); | 
 | 358 | 	uasm_i_mfc0(&p, K0, C0_CONTEXT); | 
 | 359 | 	uasm_i_lw(&p, K1, 0, K1); /* cp0 delay */ | 
 | 360 | 	uasm_i_andi(&p, K0, K0, 0xffc); /* load delay */ | 
 | 361 | 	uasm_i_addu(&p, K1, K1, K0); | 
 | 362 | 	uasm_i_lw(&p, K0, 0, K1); | 
 | 363 | 	uasm_i_nop(&p); /* load delay */ | 
 | 364 | 	uasm_i_mtc0(&p, K0, C0_ENTRYLO0); | 
 | 365 | 	uasm_i_mfc0(&p, K1, C0_EPC); /* cp0 delay */ | 
 | 366 | 	uasm_i_tlbwr(&p); /* cp0 delay */ | 
 | 367 | 	uasm_i_jr(&p, K1); | 
 | 368 | 	uasm_i_rfe(&p); /* branch delay */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 369 |  | 
 | 370 | 	if (p > tlb_handler + 32) | 
 | 371 | 		panic("TLB refill handler space exceeded"); | 
 | 372 |  | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 373 | 	pr_debug("Wrote TLB refill handler (%u instructions).\n", | 
 | 374 | 		 (unsigned int)(p - tlb_handler)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 375 |  | 
| Ralf Baechle | 91b05e6 | 2006-03-29 18:53:00 +0100 | [diff] [blame] | 376 | 	memcpy((void *)ebase, tlb_handler, 0x80); | 
| Franck Bui-Huu | 92b1e6a | 2007-10-18 09:11:17 +0200 | [diff] [blame] | 377 |  | 
 | 378 | 	dump_handler((u32 *)ebase, 32); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 379 | } | 
| David Daney | 8262228 | 2009-10-14 12:16:56 -0700 | [diff] [blame] | 380 | #endif /* CONFIG_MIPS_PGD_C0_CONTEXT */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 381 |  | 
 | 382 | /* | 
 | 383 |  * The R4000 TLB handler is much more complicated. We have two | 
 | 384 |  * consecutive handler areas with 32 instructions space each. | 
 | 385 |  * Since they aren't used at the same time, we can overflow in the | 
 | 386 |  * other one.To keep things simple, we first assume linear space, | 
 | 387 |  * then we relocate it to the final handler layout as needed. | 
 | 388 |  */ | 
| Ralf Baechle | 234fcd1 | 2008-03-08 09:56:28 +0000 | [diff] [blame] | 389 | static u32 final_handler[64] __cpuinitdata; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 390 |  | 
 | 391 | /* | 
 | 392 |  * Hazards | 
 | 393 |  * | 
 | 394 |  * From the IDT errata for the QED RM5230 (Nevada), processor revision 1.0: | 
 | 395 |  * 2. A timing hazard exists for the TLBP instruction. | 
 | 396 |  * | 
 | 397 |  *      stalling_instruction | 
 | 398 |  *      TLBP | 
 | 399 |  * | 
 | 400 |  * The JTLB is being read for the TLBP throughout the stall generated by the | 
 | 401 |  * previous instruction. This is not really correct as the stalling instruction | 
 | 402 |  * can modify the address used to access the JTLB.  The failure symptom is that | 
 | 403 |  * the TLBP instruction will use an address created for the stalling instruction | 
 | 404 |  * and not the address held in C0_ENHI and thus report the wrong results. | 
 | 405 |  * | 
 | 406 |  * The software work-around is to not allow the instruction preceding the TLBP | 
 | 407 |  * to stall - make it an NOP or some other instruction guaranteed not to stall. | 
 | 408 |  * | 
 | 409 |  * Errata 2 will not be fixed.  This errata is also on the R5000. | 
 | 410 |  * | 
 | 411 |  * As if we MIPS hackers wouldn't know how to nop pipelines happy ... | 
 | 412 |  */ | 
| Ralf Baechle | 234fcd1 | 2008-03-08 09:56:28 +0000 | [diff] [blame] | 413 | static void __cpuinit __maybe_unused build_tlb_probe_entry(u32 **p) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 414 | { | 
| Ralf Baechle | 10cc352 | 2007-10-11 23:46:15 +0100 | [diff] [blame] | 415 | 	switch (current_cpu_type()) { | 
| Thomas Bogendoerfer | 326e2e1 | 2008-05-12 13:55:42 +0200 | [diff] [blame] | 416 | 	/* Found by experiment: R4600 v2.0/R4700 needs this, too.  */ | 
| Thiemo Seufer | f5b4d95 | 2005-09-09 17:11:50 +0000 | [diff] [blame] | 417 | 	case CPU_R4600: | 
| Thomas Bogendoerfer | 326e2e1 | 2008-05-12 13:55:42 +0200 | [diff] [blame] | 418 | 	case CPU_R4700: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 419 | 	case CPU_R5000: | 
 | 420 | 	case CPU_R5000A: | 
 | 421 | 	case CPU_NEVADA: | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 422 | 		uasm_i_nop(p); | 
 | 423 | 		uasm_i_tlbp(p); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 424 | 		break; | 
 | 425 |  | 
 | 426 | 	default: | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 427 | 		uasm_i_tlbp(p); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 428 | 		break; | 
 | 429 | 	} | 
 | 430 | } | 
 | 431 |  | 
 | 432 | /* | 
 | 433 |  * Write random or indexed TLB entry, and care about the hazards from | 
| Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 434 |  * the preceding mtc0 and for the following eret. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 435 |  */ | 
 | 436 | enum tlb_write_entry { tlb_random, tlb_indexed }; | 
 | 437 |  | 
| Ralf Baechle | 234fcd1 | 2008-03-08 09:56:28 +0000 | [diff] [blame] | 438 | static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l, | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 439 | 					 struct uasm_reloc **r, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 440 | 					 enum tlb_write_entry wmode) | 
 | 441 | { | 
 | 442 | 	void(*tlbw)(u32 **) = NULL; | 
 | 443 |  | 
 | 444 | 	switch (wmode) { | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 445 | 	case tlb_random: tlbw = uasm_i_tlbwr; break; | 
 | 446 | 	case tlb_indexed: tlbw = uasm_i_tlbwi; break; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 447 | 	} | 
 | 448 |  | 
| Ralf Baechle | 161548b | 2008-01-29 10:14:54 +0000 | [diff] [blame] | 449 | 	if (cpu_has_mips_r2) { | 
| David Daney | 41f0e4d | 2009-05-12 12:41:53 -0700 | [diff] [blame] | 450 | 		if (cpu_has_mips_r2_exec_hazard) | 
 | 451 | 			uasm_i_ehb(p); | 
| Ralf Baechle | 161548b | 2008-01-29 10:14:54 +0000 | [diff] [blame] | 452 | 		tlbw(p); | 
 | 453 | 		return; | 
 | 454 | 	} | 
 | 455 |  | 
| Ralf Baechle | 10cc352 | 2007-10-11 23:46:15 +0100 | [diff] [blame] | 456 | 	switch (current_cpu_type()) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 457 | 	case CPU_R4000PC: | 
 | 458 | 	case CPU_R4000SC: | 
 | 459 | 	case CPU_R4000MC: | 
 | 460 | 	case CPU_R4400PC: | 
 | 461 | 	case CPU_R4400SC: | 
 | 462 | 	case CPU_R4400MC: | 
 | 463 | 		/* | 
 | 464 | 		 * This branch uses up a mtc0 hazard nop slot and saves | 
 | 465 | 		 * two nops after the tlbw instruction. | 
 | 466 | 		 */ | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 467 | 		uasm_il_bgezl(p, r, 0, label_tlbw_hazard); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 468 | 		tlbw(p); | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 469 | 		uasm_l_tlbw_hazard(l, *p); | 
 | 470 | 		uasm_i_nop(p); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 471 | 		break; | 
 | 472 |  | 
 | 473 | 	case CPU_R4600: | 
 | 474 | 	case CPU_R4700: | 
 | 475 | 	case CPU_R5000: | 
 | 476 | 	case CPU_R5000A: | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 477 | 		uasm_i_nop(p); | 
| Maciej W. Rozycki | 2c93e12 | 2005-06-30 10:51:01 +0000 | [diff] [blame] | 478 | 		tlbw(p); | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 479 | 		uasm_i_nop(p); | 
| Maciej W. Rozycki | 2c93e12 | 2005-06-30 10:51:01 +0000 | [diff] [blame] | 480 | 		break; | 
 | 481 |  | 
 | 482 | 	case CPU_R4300: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 483 | 	case CPU_5KC: | 
 | 484 | 	case CPU_TX49XX: | 
| Pete Popov | bdf21b1 | 2005-07-14 17:47:57 +0000 | [diff] [blame] | 485 | 	case CPU_PR4450: | 
| Jayachandran C | efa0f81 | 2011-05-07 01:36:21 +0530 | [diff] [blame] | 486 | 	case CPU_XLR: | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 487 | 		uasm_i_nop(p); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 488 | 		tlbw(p); | 
 | 489 | 		break; | 
 | 490 |  | 
 | 491 | 	case CPU_R10000: | 
 | 492 | 	case CPU_R12000: | 
| Kumba | 44d921b | 2006-05-16 22:23:59 -0400 | [diff] [blame] | 493 | 	case CPU_R14000: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 494 | 	case CPU_4KC: | 
| Thomas Bogendoerfer | b1ec4c8 | 2008-03-26 16:42:54 +0100 | [diff] [blame] | 495 | 	case CPU_4KEC: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 496 | 	case CPU_SB1: | 
| Andrew Isaacson | 93ce2f52 | 2005-10-19 23:56:20 -0700 | [diff] [blame] | 497 | 	case CPU_SB1A: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 498 | 	case CPU_4KSC: | 
 | 499 | 	case CPU_20KC: | 
 | 500 | 	case CPU_25KF: | 
| Kevin Cernekee | 602977b | 2010-10-16 14:22:30 -0700 | [diff] [blame] | 501 | 	case CPU_BMIPS32: | 
 | 502 | 	case CPU_BMIPS3300: | 
 | 503 | 	case CPU_BMIPS4350: | 
 | 504 | 	case CPU_BMIPS4380: | 
 | 505 | 	case CPU_BMIPS5000: | 
| Fuxin Zhang | 2a21c73 | 2007-06-06 14:52:43 +0800 | [diff] [blame] | 506 | 	case CPU_LOONGSON2: | 
| Shinya Kuribayashi | a644b27 | 2009-03-03 18:05:51 +0900 | [diff] [blame] | 507 | 	case CPU_R5500: | 
| Maciej W. Rozycki | 8df5bea | 2006-08-23 14:26:50 +0100 | [diff] [blame] | 508 | 		if (m4kc_tlbp_war()) | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 509 | 			uasm_i_nop(p); | 
| Manuel Lauss | 2f794d0 | 2009-03-25 17:49:30 +0100 | [diff] [blame] | 510 | 	case CPU_ALCHEMY: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 511 | 		tlbw(p); | 
 | 512 | 		break; | 
 | 513 |  | 
 | 514 | 	case CPU_NEVADA: | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 515 | 		uasm_i_nop(p); /* QED specifies 2 nops hazard */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 516 | 		/* | 
 | 517 | 		 * This branch uses up a mtc0 hazard nop slot and saves | 
 | 518 | 		 * a nop after the tlbw instruction. | 
 | 519 | 		 */ | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 520 | 		uasm_il_bgezl(p, r, 0, label_tlbw_hazard); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 521 | 		tlbw(p); | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 522 | 		uasm_l_tlbw_hazard(l, *p); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 523 | 		break; | 
 | 524 |  | 
 | 525 | 	case CPU_RM7000: | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 526 | 		uasm_i_nop(p); | 
 | 527 | 		uasm_i_nop(p); | 
 | 528 | 		uasm_i_nop(p); | 
 | 529 | 		uasm_i_nop(p); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 530 | 		tlbw(p); | 
 | 531 | 		break; | 
 | 532 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 533 | 	case CPU_RM9000: | 
 | 534 | 		/* | 
 | 535 | 		 * When the JTLB is updated by tlbwi or tlbwr, a subsequent | 
 | 536 | 		 * use of the JTLB for instructions should not occur for 4 | 
 | 537 | 		 * cpu cycles and use for data translations should not occur | 
 | 538 | 		 * for 3 cpu cycles. | 
 | 539 | 		 */ | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 540 | 		uasm_i_ssnop(p); | 
 | 541 | 		uasm_i_ssnop(p); | 
 | 542 | 		uasm_i_ssnop(p); | 
 | 543 | 		uasm_i_ssnop(p); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 544 | 		tlbw(p); | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 545 | 		uasm_i_ssnop(p); | 
 | 546 | 		uasm_i_ssnop(p); | 
 | 547 | 		uasm_i_ssnop(p); | 
 | 548 | 		uasm_i_ssnop(p); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 549 | 		break; | 
 | 550 |  | 
 | 551 | 	case CPU_VR4111: | 
 | 552 | 	case CPU_VR4121: | 
 | 553 | 	case CPU_VR4122: | 
 | 554 | 	case CPU_VR4181: | 
 | 555 | 	case CPU_VR4181A: | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 556 | 		uasm_i_nop(p); | 
 | 557 | 		uasm_i_nop(p); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 558 | 		tlbw(p); | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 559 | 		uasm_i_nop(p); | 
 | 560 | 		uasm_i_nop(p); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 561 | 		break; | 
 | 562 |  | 
 | 563 | 	case CPU_VR4131: | 
 | 564 | 	case CPU_VR4133: | 
| Ralf Baechle | 7623deb | 2005-08-29 16:49:55 +0000 | [diff] [blame] | 565 | 	case CPU_R5432: | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 566 | 		uasm_i_nop(p); | 
 | 567 | 		uasm_i_nop(p); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 568 | 		tlbw(p); | 
 | 569 | 		break; | 
 | 570 |  | 
| Lars-Peter Clausen | 83ccf69 | 2010-07-17 11:07:51 +0000 | [diff] [blame] | 571 | 	case CPU_JZRISC: | 
 | 572 | 		tlbw(p); | 
 | 573 | 		uasm_i_nop(p); | 
 | 574 | 		break; | 
 | 575 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 576 | 	default: | 
 | 577 | 		panic("No TLB refill handler yet (CPU type: %d)", | 
 | 578 | 		      current_cpu_data.cputype); | 
 | 579 | 		break; | 
 | 580 | 	} | 
 | 581 | } | 
 | 582 |  | 
| David Daney | 6dd9344 | 2010-02-10 15:12:47 -0800 | [diff] [blame] | 583 | static __cpuinit __maybe_unused void build_convert_pte_to_entrylo(u32 **p, | 
 | 584 | 								  unsigned int reg) | 
 | 585 | { | 
 | 586 | 	if (kernel_uses_smartmips_rixi) { | 
 | 587 | 		UASM_i_SRL(p, reg, reg, ilog2(_PAGE_NO_EXEC)); | 
 | 588 | 		UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC)); | 
 | 589 | 	} else { | 
 | 590 | #ifdef CONFIG_64BIT_PHYS_ADDR | 
| David Daney | 3be6022 | 2010-04-28 12:16:17 -0700 | [diff] [blame] | 591 | 		uasm_i_dsrl_safe(p, reg, reg, ilog2(_PAGE_GLOBAL)); | 
| David Daney | 6dd9344 | 2010-02-10 15:12:47 -0800 | [diff] [blame] | 592 | #else | 
 | 593 | 		UASM_i_SRL(p, reg, reg, ilog2(_PAGE_GLOBAL)); | 
 | 594 | #endif | 
 | 595 | 	} | 
 | 596 | } | 
 | 597 |  | 
| David Daney | fd062c8 | 2009-05-27 17:47:44 -0700 | [diff] [blame] | 598 | #ifdef CONFIG_HUGETLB_PAGE | 
| David Daney | 6dd9344 | 2010-02-10 15:12:47 -0800 | [diff] [blame] | 599 |  | 
 | 600 | static __cpuinit void build_restore_pagemask(u32 **p, | 
 | 601 | 					     struct uasm_reloc **r, | 
 | 602 | 					     unsigned int tmp, | 
| David Daney | 2c8c53e | 2010-12-27 18:07:57 -0800 | [diff] [blame] | 603 | 					     enum label_id lid, | 
 | 604 | 					     int restore_scratch) | 
| David Daney | 6dd9344 | 2010-02-10 15:12:47 -0800 | [diff] [blame] | 605 | { | 
| David Daney | 2c8c53e | 2010-12-27 18:07:57 -0800 | [diff] [blame] | 606 | 	if (restore_scratch) { | 
 | 607 | 		/* Reset default page size */ | 
 | 608 | 		if (PM_DEFAULT_MASK >> 16) { | 
 | 609 | 			uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16); | 
 | 610 | 			uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff); | 
 | 611 | 			uasm_i_mtc0(p, tmp, C0_PAGEMASK); | 
 | 612 | 			uasm_il_b(p, r, lid); | 
 | 613 | 		} else if (PM_DEFAULT_MASK) { | 
 | 614 | 			uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK); | 
 | 615 | 			uasm_i_mtc0(p, tmp, C0_PAGEMASK); | 
 | 616 | 			uasm_il_b(p, r, lid); | 
 | 617 | 		} else { | 
 | 618 | 			uasm_i_mtc0(p, 0, C0_PAGEMASK); | 
 | 619 | 			uasm_il_b(p, r, lid); | 
 | 620 | 		} | 
 | 621 | 		if (scratch_reg > 0) | 
 | 622 | 			UASM_i_MFC0(p, 1, 31, scratch_reg); | 
 | 623 | 		else | 
 | 624 | 			UASM_i_LW(p, 1, scratchpad_offset(0), 0); | 
| David Daney | 6dd9344 | 2010-02-10 15:12:47 -0800 | [diff] [blame] | 625 | 	} else { | 
| David Daney | 2c8c53e | 2010-12-27 18:07:57 -0800 | [diff] [blame] | 626 | 		/* Reset default page size */ | 
 | 627 | 		if (PM_DEFAULT_MASK >> 16) { | 
 | 628 | 			uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16); | 
 | 629 | 			uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff); | 
 | 630 | 			uasm_il_b(p, r, lid); | 
 | 631 | 			uasm_i_mtc0(p, tmp, C0_PAGEMASK); | 
 | 632 | 		} else if (PM_DEFAULT_MASK) { | 
 | 633 | 			uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK); | 
 | 634 | 			uasm_il_b(p, r, lid); | 
 | 635 | 			uasm_i_mtc0(p, tmp, C0_PAGEMASK); | 
 | 636 | 		} else { | 
 | 637 | 			uasm_il_b(p, r, lid); | 
 | 638 | 			uasm_i_mtc0(p, 0, C0_PAGEMASK); | 
 | 639 | 		} | 
| David Daney | 6dd9344 | 2010-02-10 15:12:47 -0800 | [diff] [blame] | 640 | 	} | 
 | 641 | } | 
 | 642 |  | 
| David Daney | fd062c8 | 2009-05-27 17:47:44 -0700 | [diff] [blame] | 643 | static __cpuinit void build_huge_tlb_write_entry(u32 **p, | 
 | 644 | 						 struct uasm_label **l, | 
 | 645 | 						 struct uasm_reloc **r, | 
 | 646 | 						 unsigned int tmp, | 
| David Daney | 2c8c53e | 2010-12-27 18:07:57 -0800 | [diff] [blame] | 647 | 						 enum tlb_write_entry wmode, | 
 | 648 | 						 int restore_scratch) | 
| David Daney | fd062c8 | 2009-05-27 17:47:44 -0700 | [diff] [blame] | 649 | { | 
 | 650 | 	/* Set huge page tlb entry size */ | 
 | 651 | 	uasm_i_lui(p, tmp, PM_HUGE_MASK >> 16); | 
 | 652 | 	uasm_i_ori(p, tmp, tmp, PM_HUGE_MASK & 0xffff); | 
 | 653 | 	uasm_i_mtc0(p, tmp, C0_PAGEMASK); | 
 | 654 |  | 
 | 655 | 	build_tlb_write_entry(p, l, r, wmode); | 
 | 656 |  | 
| David Daney | 2c8c53e | 2010-12-27 18:07:57 -0800 | [diff] [blame] | 657 | 	build_restore_pagemask(p, r, tmp, label_leave, restore_scratch); | 
| David Daney | fd062c8 | 2009-05-27 17:47:44 -0700 | [diff] [blame] | 658 | } | 
 | 659 |  | 
 | 660 | /* | 
 | 661 |  * Check if Huge PTE is present, if so then jump to LABEL. | 
 | 662 |  */ | 
 | 663 | static void __cpuinit | 
 | 664 | build_is_huge_pte(u32 **p, struct uasm_reloc **r, unsigned int tmp, | 
 | 665 | 		unsigned int pmd, int lid) | 
 | 666 | { | 
 | 667 | 	UASM_i_LW(p, tmp, 0, pmd); | 
| David Daney | cc33ae4 | 2010-12-20 15:54:50 -0800 | [diff] [blame] | 668 | 	if (use_bbit_insns()) { | 
 | 669 | 		uasm_il_bbit1(p, r, tmp, ilog2(_PAGE_HUGE), lid); | 
 | 670 | 	} else { | 
 | 671 | 		uasm_i_andi(p, tmp, tmp, _PAGE_HUGE); | 
 | 672 | 		uasm_il_bnez(p, r, tmp, lid); | 
 | 673 | 	} | 
| David Daney | fd062c8 | 2009-05-27 17:47:44 -0700 | [diff] [blame] | 674 | } | 
 | 675 |  | 
 | 676 | static __cpuinit void build_huge_update_entries(u32 **p, | 
 | 677 | 						unsigned int pte, | 
 | 678 | 						unsigned int tmp) | 
 | 679 | { | 
 | 680 | 	int small_sequence; | 
 | 681 |  | 
 | 682 | 	/* | 
 | 683 | 	 * A huge PTE describes an area the size of the | 
 | 684 | 	 * configured huge page size. This is twice the | 
 | 685 | 	 * of the large TLB entry size we intend to use. | 
 | 686 | 	 * A TLB entry half the size of the configured | 
 | 687 | 	 * huge page size is configured into entrylo0 | 
 | 688 | 	 * and entrylo1 to cover the contiguous huge PTE | 
 | 689 | 	 * address space. | 
 | 690 | 	 */ | 
 | 691 | 	small_sequence = (HPAGE_SIZE >> 7) < 0x10000; | 
 | 692 |  | 
 | 693 | 	/* We can clobber tmp.  It isn't used after this.*/ | 
 | 694 | 	if (!small_sequence) | 
 | 695 | 		uasm_i_lui(p, tmp, HPAGE_SIZE >> (7 + 16)); | 
 | 696 |  | 
| David Daney | 6dd9344 | 2010-02-10 15:12:47 -0800 | [diff] [blame] | 697 | 	build_convert_pte_to_entrylo(p, pte); | 
| David Daney | 9b8c389 | 2010-02-10 15:12:44 -0800 | [diff] [blame] | 698 | 	UASM_i_MTC0(p, pte, C0_ENTRYLO0); /* load it */ | 
| David Daney | fd062c8 | 2009-05-27 17:47:44 -0700 | [diff] [blame] | 699 | 	/* convert to entrylo1 */ | 
 | 700 | 	if (small_sequence) | 
 | 701 | 		UASM_i_ADDIU(p, pte, pte, HPAGE_SIZE >> 7); | 
 | 702 | 	else | 
 | 703 | 		UASM_i_ADDU(p, pte, pte, tmp); | 
 | 704 |  | 
| David Daney | 9b8c389 | 2010-02-10 15:12:44 -0800 | [diff] [blame] | 705 | 	UASM_i_MTC0(p, pte, C0_ENTRYLO1); /* load it */ | 
| David Daney | fd062c8 | 2009-05-27 17:47:44 -0700 | [diff] [blame] | 706 | } | 
 | 707 |  | 
 | 708 | static __cpuinit void build_huge_handler_tail(u32 **p, | 
 | 709 | 					      struct uasm_reloc **r, | 
 | 710 | 					      struct uasm_label **l, | 
 | 711 | 					      unsigned int pte, | 
 | 712 | 					      unsigned int ptr) | 
 | 713 | { | 
 | 714 | #ifdef CONFIG_SMP | 
 | 715 | 	UASM_i_SC(p, pte, 0, ptr); | 
 | 716 | 	uasm_il_beqz(p, r, pte, label_tlb_huge_update); | 
 | 717 | 	UASM_i_LW(p, pte, 0, ptr); /* Needed because SC killed our PTE */ | 
 | 718 | #else | 
 | 719 | 	UASM_i_SW(p, pte, 0, ptr); | 
 | 720 | #endif | 
 | 721 | 	build_huge_update_entries(p, pte, ptr); | 
| David Daney | 2c8c53e | 2010-12-27 18:07:57 -0800 | [diff] [blame] | 722 | 	build_huge_tlb_write_entry(p, l, r, pte, tlb_indexed, 0); | 
| David Daney | fd062c8 | 2009-05-27 17:47:44 -0700 | [diff] [blame] | 723 | } | 
 | 724 | #endif /* CONFIG_HUGETLB_PAGE */ | 
 | 725 |  | 
| Ralf Baechle | 875d43e | 2005-09-03 15:56:16 -0700 | [diff] [blame] | 726 | #ifdef CONFIG_64BIT | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 727 | /* | 
 | 728 |  * TMP and PTR are scratch. | 
 | 729 |  * TMP will be clobbered, PTR will hold the pmd entry. | 
 | 730 |  */ | 
| Ralf Baechle | 234fcd1 | 2008-03-08 09:56:28 +0000 | [diff] [blame] | 731 | static void __cpuinit | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 732 | build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 733 | 		 unsigned int tmp, unsigned int ptr) | 
 | 734 | { | 
| David Daney | 8262228 | 2009-10-14 12:16:56 -0700 | [diff] [blame] | 735 | #ifndef CONFIG_MIPS_PGD_C0_CONTEXT | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 736 | 	long pgdc = (long)pgd_current; | 
| David Daney | 8262228 | 2009-10-14 12:16:56 -0700 | [diff] [blame] | 737 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 738 | 	/* | 
 | 739 | 	 * The vmalloc handling is not in the hotpath. | 
 | 740 | 	 */ | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 741 | 	uasm_i_dmfc0(p, tmp, C0_BADVADDR); | 
| David Daney | 1ec5632 | 2010-04-28 12:16:18 -0700 | [diff] [blame] | 742 |  | 
 | 743 | 	if (check_for_high_segbits) { | 
 | 744 | 		/* | 
 | 745 | 		 * The kernel currently implicitely assumes that the | 
 | 746 | 		 * MIPS SEGBITS parameter for the processor is | 
 | 747 | 		 * (PGDIR_SHIFT+PGDIR_BITS) or less, and will never | 
 | 748 | 		 * allocate virtual addresses outside the maximum | 
 | 749 | 		 * range for SEGBITS = (PGDIR_SHIFT+PGDIR_BITS). But | 
 | 750 | 		 * that doesn't prevent user code from accessing the | 
 | 751 | 		 * higher xuseg addresses.  Here, we make sure that | 
 | 752 | 		 * everything but the lower xuseg addresses goes down | 
 | 753 | 		 * the module_alloc/vmalloc path. | 
 | 754 | 		 */ | 
 | 755 | 		uasm_i_dsrl_safe(p, ptr, tmp, PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3); | 
 | 756 | 		uasm_il_bnez(p, r, ptr, label_vmalloc); | 
 | 757 | 	} else { | 
 | 758 | 		uasm_il_bltz(p, r, tmp, label_vmalloc); | 
 | 759 | 	} | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 760 | 	/* No uasm_i_nop needed here, since the next insn doesn't touch TMP. */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 761 |  | 
| David Daney | 8262228 | 2009-10-14 12:16:56 -0700 | [diff] [blame] | 762 | #ifdef CONFIG_MIPS_PGD_C0_CONTEXT | 
| David Daney | 3d8bfdd | 2010-12-21 14:19:11 -0800 | [diff] [blame] | 763 | 	if (pgd_reg != -1) { | 
 | 764 | 		/* pgd is in pgd_reg */ | 
 | 765 | 		UASM_i_MFC0(p, ptr, 31, pgd_reg); | 
 | 766 | 	} else { | 
 | 767 | 		/* | 
 | 768 | 		 * &pgd << 11 stored in CONTEXT [23..63]. | 
 | 769 | 		 */ | 
 | 770 | 		UASM_i_MFC0(p, ptr, C0_CONTEXT); | 
 | 771 |  | 
 | 772 | 		/* Clear lower 23 bits of context. */ | 
 | 773 | 		uasm_i_dins(p, ptr, 0, 0, 23); | 
 | 774 |  | 
 | 775 | 		/* 1 0  1 0 1  << 6  xkphys cached */ | 
 | 776 | 		uasm_i_ori(p, ptr, ptr, 0x540); | 
 | 777 | 		uasm_i_drotr(p, ptr, ptr, 11); | 
 | 778 | 	} | 
| David Daney | 8262228 | 2009-10-14 12:16:56 -0700 | [diff] [blame] | 779 | #elif defined(CONFIG_SMP) | 
| Ralf Baechle | 41c594a | 2006-04-05 09:45:45 +0100 | [diff] [blame] | 780 | # ifdef  CONFIG_MIPS_MT_SMTC | 
 | 781 | 	/* | 
 | 782 | 	 * SMTC uses TCBind value as "CPU" index | 
 | 783 | 	 */ | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 784 | 	uasm_i_mfc0(p, ptr, C0_TCBIND); | 
| David Daney | 3be6022 | 2010-04-28 12:16:17 -0700 | [diff] [blame] | 785 | 	uasm_i_dsrl_safe(p, ptr, ptr, 19); | 
| Ralf Baechle | 41c594a | 2006-04-05 09:45:45 +0100 | [diff] [blame] | 786 | # else | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 787 | 	/* | 
| Thiemo Seufer | 1b3a6e9 | 2005-04-01 14:07:13 +0000 | [diff] [blame] | 788 | 	 * 64 bit SMP running in XKPHYS has smp_processor_id() << 3 | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 789 | 	 * stored in CONTEXT. | 
 | 790 | 	 */ | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 791 | 	uasm_i_dmfc0(p, ptr, C0_CONTEXT); | 
| David Daney | 3be6022 | 2010-04-28 12:16:17 -0700 | [diff] [blame] | 792 | 	uasm_i_dsrl_safe(p, ptr, ptr, 23); | 
| David Daney | 8262228 | 2009-10-14 12:16:56 -0700 | [diff] [blame] | 793 | # endif | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 794 | 	UASM_i_LA_mostly(p, tmp, pgdc); | 
 | 795 | 	uasm_i_daddu(p, ptr, ptr, tmp); | 
 | 796 | 	uasm_i_dmfc0(p, tmp, C0_BADVADDR); | 
 | 797 | 	uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 798 | #else | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 799 | 	UASM_i_LA_mostly(p, ptr, pgdc); | 
 | 800 | 	uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 801 | #endif | 
 | 802 |  | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 803 | 	uasm_l_vmalloc_done(l, *p); | 
| Ralf Baechle | 242954b | 2006-10-24 02:29:01 +0100 | [diff] [blame] | 804 |  | 
| David Daney | 3be6022 | 2010-04-28 12:16:17 -0700 | [diff] [blame] | 805 | 	/* get pgd offset in bytes */ | 
 | 806 | 	uasm_i_dsrl_safe(p, tmp, tmp, PGDIR_SHIFT - 3); | 
| Ralf Baechle | 242954b | 2006-10-24 02:29:01 +0100 | [diff] [blame] | 807 |  | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 808 | 	uasm_i_andi(p, tmp, tmp, (PTRS_PER_PGD - 1)<<3); | 
 | 809 | 	uasm_i_daddu(p, ptr, ptr, tmp); /* add in pgd offset */ | 
| David Daney | 325f8a0 | 2009-12-04 13:52:36 -0800 | [diff] [blame] | 810 | #ifndef __PAGETABLE_PMD_FOLDED | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 811 | 	uasm_i_dmfc0(p, tmp, C0_BADVADDR); /* get faulting address */ | 
 | 812 | 	uasm_i_ld(p, ptr, 0, ptr); /* get pmd pointer */ | 
| David Daney | 3be6022 | 2010-04-28 12:16:17 -0700 | [diff] [blame] | 813 | 	uasm_i_dsrl_safe(p, tmp, tmp, PMD_SHIFT-3); /* get pmd offset in bytes */ | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 814 | 	uasm_i_andi(p, tmp, tmp, (PTRS_PER_PMD - 1)<<3); | 
 | 815 | 	uasm_i_daddu(p, ptr, ptr, tmp); /* add in pmd offset */ | 
| David Daney | 325f8a0 | 2009-12-04 13:52:36 -0800 | [diff] [blame] | 816 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 817 | } | 
 | 818 |  | 
 | 819 | /* | 
 | 820 |  * BVADDR is the faulting address, PTR is scratch. | 
 | 821 |  * PTR will hold the pgd for vmalloc. | 
 | 822 |  */ | 
| Ralf Baechle | 234fcd1 | 2008-03-08 09:56:28 +0000 | [diff] [blame] | 823 | static void __cpuinit | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 824 | build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, | 
| David Daney | 1ec5632 | 2010-04-28 12:16:18 -0700 | [diff] [blame] | 825 | 			unsigned int bvaddr, unsigned int ptr, | 
 | 826 | 			enum vmalloc64_mode mode) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 827 | { | 
 | 828 | 	long swpd = (long)swapper_pg_dir; | 
| David Daney | 1ec5632 | 2010-04-28 12:16:18 -0700 | [diff] [blame] | 829 | 	int single_insn_swpd; | 
 | 830 | 	int did_vmalloc_branch = 0; | 
 | 831 |  | 
 | 832 | 	single_insn_swpd = uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 833 |  | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 834 | 	uasm_l_vmalloc(l, *p); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 835 |  | 
| David Daney | 2c8c53e | 2010-12-27 18:07:57 -0800 | [diff] [blame] | 836 | 	if (mode != not_refill && check_for_high_segbits) { | 
| David Daney | 1ec5632 | 2010-04-28 12:16:18 -0700 | [diff] [blame] | 837 | 		if (single_insn_swpd) { | 
 | 838 | 			uasm_il_bltz(p, r, bvaddr, label_vmalloc_done); | 
 | 839 | 			uasm_i_lui(p, ptr, uasm_rel_hi(swpd)); | 
 | 840 | 			did_vmalloc_branch = 1; | 
 | 841 | 			/* fall through */ | 
 | 842 | 		} else { | 
 | 843 | 			uasm_il_bgez(p, r, bvaddr, label_large_segbits_fault); | 
 | 844 | 		} | 
 | 845 | 	} | 
 | 846 | 	if (!did_vmalloc_branch) { | 
 | 847 | 		if (uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd)) { | 
 | 848 | 			uasm_il_b(p, r, label_vmalloc_done); | 
 | 849 | 			uasm_i_lui(p, ptr, uasm_rel_hi(swpd)); | 
 | 850 | 		} else { | 
 | 851 | 			UASM_i_LA_mostly(p, ptr, swpd); | 
 | 852 | 			uasm_il_b(p, r, label_vmalloc_done); | 
 | 853 | 			if (uasm_in_compat_space_p(swpd)) | 
 | 854 | 				uasm_i_addiu(p, ptr, ptr, uasm_rel_lo(swpd)); | 
 | 855 | 			else | 
 | 856 | 				uasm_i_daddiu(p, ptr, ptr, uasm_rel_lo(swpd)); | 
 | 857 | 		} | 
 | 858 | 	} | 
| David Daney | 2c8c53e | 2010-12-27 18:07:57 -0800 | [diff] [blame] | 859 | 	if (mode != not_refill && check_for_high_segbits) { | 
| David Daney | 1ec5632 | 2010-04-28 12:16:18 -0700 | [diff] [blame] | 860 | 		uasm_l_large_segbits_fault(l, *p); | 
 | 861 | 		/* | 
 | 862 | 		 * We get here if we are an xsseg address, or if we are | 
 | 863 | 		 * an xuseg address above (PGDIR_SHIFT+PGDIR_BITS) boundary. | 
 | 864 | 		 * | 
 | 865 | 		 * Ignoring xsseg (assume disabled so would generate | 
 | 866 | 		 * (address errors?), the only remaining possibility | 
 | 867 | 		 * is the upper xuseg addresses.  On processors with | 
 | 868 | 		 * TLB_SEGBITS <= PGDIR_SHIFT+PGDIR_BITS, these | 
 | 869 | 		 * addresses would have taken an address error. We try | 
 | 870 | 		 * to mimic that here by taking a load/istream page | 
 | 871 | 		 * fault. | 
 | 872 | 		 */ | 
 | 873 | 		UASM_i_LA(p, ptr, (unsigned long)tlb_do_page_fault_0); | 
 | 874 | 		uasm_i_jr(p, ptr); | 
| David Daney | 2c8c53e | 2010-12-27 18:07:57 -0800 | [diff] [blame] | 875 |  | 
 | 876 | 		if (mode == refill_scratch) { | 
 | 877 | 			if (scratch_reg > 0) | 
 | 878 | 				UASM_i_MFC0(p, 1, 31, scratch_reg); | 
 | 879 | 			else | 
 | 880 | 				UASM_i_LW(p, 1, scratchpad_offset(0), 0); | 
 | 881 | 		} else { | 
 | 882 | 			uasm_i_nop(p); | 
 | 883 | 		} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 884 | 	} | 
 | 885 | } | 
 | 886 |  | 
| Ralf Baechle | 875d43e | 2005-09-03 15:56:16 -0700 | [diff] [blame] | 887 | #else /* !CONFIG_64BIT */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 888 |  | 
 | 889 | /* | 
 | 890 |  * TMP and PTR are scratch. | 
 | 891 |  * TMP will be clobbered, PTR will hold the pgd entry. | 
 | 892 |  */ | 
| Ralf Baechle | 234fcd1 | 2008-03-08 09:56:28 +0000 | [diff] [blame] | 893 | static void __cpuinit __maybe_unused | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 894 | build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr) | 
 | 895 | { | 
 | 896 | 	long pgdc = (long)pgd_current; | 
 | 897 |  | 
 | 898 | 	/* 32 bit SMP has smp_processor_id() stored in CONTEXT. */ | 
 | 899 | #ifdef CONFIG_SMP | 
| Ralf Baechle | 41c594a | 2006-04-05 09:45:45 +0100 | [diff] [blame] | 900 | #ifdef  CONFIG_MIPS_MT_SMTC | 
 | 901 | 	/* | 
 | 902 | 	 * SMTC uses TCBind value as "CPU" index | 
 | 903 | 	 */ | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 904 | 	uasm_i_mfc0(p, ptr, C0_TCBIND); | 
 | 905 | 	UASM_i_LA_mostly(p, tmp, pgdc); | 
 | 906 | 	uasm_i_srl(p, ptr, ptr, 19); | 
| Ralf Baechle | 41c594a | 2006-04-05 09:45:45 +0100 | [diff] [blame] | 907 | #else | 
 | 908 | 	/* | 
 | 909 | 	 * smp_processor_id() << 3 is stored in CONTEXT. | 
 | 910 |          */ | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 911 | 	uasm_i_mfc0(p, ptr, C0_CONTEXT); | 
 | 912 | 	UASM_i_LA_mostly(p, tmp, pgdc); | 
 | 913 | 	uasm_i_srl(p, ptr, ptr, 23); | 
| Ralf Baechle | 41c594a | 2006-04-05 09:45:45 +0100 | [diff] [blame] | 914 | #endif | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 915 | 	uasm_i_addu(p, ptr, tmp, ptr); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 916 | #else | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 917 | 	UASM_i_LA_mostly(p, ptr, pgdc); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 918 | #endif | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 919 | 	uasm_i_mfc0(p, tmp, C0_BADVADDR); /* get faulting address */ | 
 | 920 | 	uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr); | 
 | 921 | 	uasm_i_srl(p, tmp, tmp, PGDIR_SHIFT); /* get pgd only bits */ | 
 | 922 | 	uasm_i_sll(p, tmp, tmp, PGD_T_LOG2); | 
 | 923 | 	uasm_i_addu(p, ptr, ptr, tmp); /* add in pgd offset */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 924 | } | 
 | 925 |  | 
| Ralf Baechle | 875d43e | 2005-09-03 15:56:16 -0700 | [diff] [blame] | 926 | #endif /* !CONFIG_64BIT */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 927 |  | 
| Ralf Baechle | 234fcd1 | 2008-03-08 09:56:28 +0000 | [diff] [blame] | 928 | static void __cpuinit build_adjust_context(u32 **p, unsigned int ctx) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 929 | { | 
| Ralf Baechle | 242954b | 2006-10-24 02:29:01 +0100 | [diff] [blame] | 930 | 	unsigned int shift = 4 - (PTE_T_LOG2 + 1) + PAGE_SHIFT - 12; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 931 | 	unsigned int mask = (PTRS_PER_PTE / 2 - 1) << (PTE_T_LOG2 + 1); | 
 | 932 |  | 
| Ralf Baechle | 10cc352 | 2007-10-11 23:46:15 +0100 | [diff] [blame] | 933 | 	switch (current_cpu_type()) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 934 | 	case CPU_VR41XX: | 
 | 935 | 	case CPU_VR4111: | 
 | 936 | 	case CPU_VR4121: | 
 | 937 | 	case CPU_VR4122: | 
 | 938 | 	case CPU_VR4131: | 
 | 939 | 	case CPU_VR4181: | 
 | 940 | 	case CPU_VR4181A: | 
 | 941 | 	case CPU_VR4133: | 
 | 942 | 		shift += 2; | 
 | 943 | 		break; | 
 | 944 |  | 
 | 945 | 	default: | 
 | 946 | 		break; | 
 | 947 | 	} | 
 | 948 |  | 
 | 949 | 	if (shift) | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 950 | 		UASM_i_SRL(p, ctx, ctx, shift); | 
 | 951 | 	uasm_i_andi(p, ctx, ctx, mask); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 952 | } | 
 | 953 |  | 
| Ralf Baechle | 234fcd1 | 2008-03-08 09:56:28 +0000 | [diff] [blame] | 954 | static void __cpuinit build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 955 | { | 
 | 956 | 	/* | 
 | 957 | 	 * Bug workaround for the Nevada. It seems as if under certain | 
 | 958 | 	 * circumstances the move from cp0_context might produce a | 
 | 959 | 	 * bogus result when the mfc0 instruction and its consumer are | 
 | 960 | 	 * in a different cacheline or a load instruction, probably any | 
 | 961 | 	 * memory reference, is between them. | 
 | 962 | 	 */ | 
| Ralf Baechle | 10cc352 | 2007-10-11 23:46:15 +0100 | [diff] [blame] | 963 | 	switch (current_cpu_type()) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 964 | 	case CPU_NEVADA: | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 965 | 		UASM_i_LW(p, ptr, 0, ptr); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 966 | 		GET_CONTEXT(p, tmp); /* get context reg */ | 
 | 967 | 		break; | 
 | 968 |  | 
 | 969 | 	default: | 
 | 970 | 		GET_CONTEXT(p, tmp); /* get context reg */ | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 971 | 		UASM_i_LW(p, ptr, 0, ptr); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 972 | 		break; | 
 | 973 | 	} | 
 | 974 |  | 
 | 975 | 	build_adjust_context(p, tmp); | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 976 | 	UASM_i_ADDU(p, ptr, ptr, tmp); /* add in offset */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 977 | } | 
 | 978 |  | 
| Ralf Baechle | 234fcd1 | 2008-03-08 09:56:28 +0000 | [diff] [blame] | 979 | static void __cpuinit build_update_entries(u32 **p, unsigned int tmp, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 980 | 					unsigned int ptep) | 
 | 981 | { | 
 | 982 | 	/* | 
 | 983 | 	 * 64bit address support (36bit on a 32bit CPU) in a 32bit | 
 | 984 | 	 * Kernel is a special case. Only a few CPUs use it. | 
 | 985 | 	 */ | 
 | 986 | #ifdef CONFIG_64BIT_PHYS_ADDR | 
 | 987 | 	if (cpu_has_64bits) { | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 988 | 		uasm_i_ld(p, tmp, 0, ptep); /* get even pte */ | 
 | 989 | 		uasm_i_ld(p, ptep, sizeof(pte_t), ptep); /* get odd pte */ | 
| David Daney | 6dd9344 | 2010-02-10 15:12:47 -0800 | [diff] [blame] | 990 | 		if (kernel_uses_smartmips_rixi) { | 
 | 991 | 			UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_NO_EXEC)); | 
 | 992 | 			UASM_i_SRL(p, ptep, ptep, ilog2(_PAGE_NO_EXEC)); | 
 | 993 | 			UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC)); | 
 | 994 | 			UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ | 
 | 995 | 			UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC)); | 
 | 996 | 		} else { | 
| David Daney | 3be6022 | 2010-04-28 12:16:17 -0700 | [diff] [blame] | 997 | 			uasm_i_dsrl_safe(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); /* convert to entrylo0 */ | 
| David Daney | 6dd9344 | 2010-02-10 15:12:47 -0800 | [diff] [blame] | 998 | 			UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ | 
| David Daney | 3be6022 | 2010-04-28 12:16:17 -0700 | [diff] [blame] | 999 | 			uasm_i_dsrl_safe(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); /* convert to entrylo1 */ | 
| David Daney | 6dd9344 | 2010-02-10 15:12:47 -0800 | [diff] [blame] | 1000 | 		} | 
| David Daney | 9b8c389 | 2010-02-10 15:12:44 -0800 | [diff] [blame] | 1001 | 		UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1002 | 	} else { | 
 | 1003 | 		int pte_off_even = sizeof(pte_t) / 2; | 
 | 1004 | 		int pte_off_odd = pte_off_even + sizeof(pte_t); | 
 | 1005 |  | 
 | 1006 | 		/* The pte entries are pre-shifted */ | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 1007 | 		uasm_i_lw(p, tmp, pte_off_even, ptep); /* get even pte */ | 
| David Daney | 9b8c389 | 2010-02-10 15:12:44 -0800 | [diff] [blame] | 1008 | 		UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 1009 | 		uasm_i_lw(p, ptep, pte_off_odd, ptep); /* get odd pte */ | 
| David Daney | 9b8c389 | 2010-02-10 15:12:44 -0800 | [diff] [blame] | 1010 | 		UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1011 | 	} | 
 | 1012 | #else | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 1013 | 	UASM_i_LW(p, tmp, 0, ptep); /* get even pte */ | 
 | 1014 | 	UASM_i_LW(p, ptep, sizeof(pte_t), ptep); /* get odd pte */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1015 | 	if (r45k_bvahwbug()) | 
 | 1016 | 		build_tlb_probe_entry(p); | 
| David Daney | 6dd9344 | 2010-02-10 15:12:47 -0800 | [diff] [blame] | 1017 | 	if (kernel_uses_smartmips_rixi) { | 
 | 1018 | 		UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_NO_EXEC)); | 
 | 1019 | 		UASM_i_SRL(p, ptep, ptep, ilog2(_PAGE_NO_EXEC)); | 
 | 1020 | 		UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC)); | 
 | 1021 | 		if (r4k_250MHZhwbug()) | 
 | 1022 | 			UASM_i_MTC0(p, 0, C0_ENTRYLO0); | 
 | 1023 | 		UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ | 
 | 1024 | 		UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC)); | 
 | 1025 | 	} else { | 
 | 1026 | 		UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); /* convert to entrylo0 */ | 
 | 1027 | 		if (r4k_250MHZhwbug()) | 
 | 1028 | 			UASM_i_MTC0(p, 0, C0_ENTRYLO0); | 
 | 1029 | 		UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ | 
 | 1030 | 		UASM_i_SRL(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); /* convert to entrylo1 */ | 
 | 1031 | 		if (r45k_bvahwbug()) | 
 | 1032 | 			uasm_i_mfc0(p, tmp, C0_INDEX); | 
 | 1033 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1034 | 	if (r4k_250MHZhwbug()) | 
| David Daney | 9b8c389 | 2010-02-10 15:12:44 -0800 | [diff] [blame] | 1035 | 		UASM_i_MTC0(p, 0, C0_ENTRYLO1); | 
 | 1036 | 	UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1037 | #endif | 
 | 1038 | } | 
 | 1039 |  | 
| David Daney | 2c8c53e | 2010-12-27 18:07:57 -0800 | [diff] [blame] | 1040 | struct mips_huge_tlb_info { | 
 | 1041 | 	int huge_pte; | 
 | 1042 | 	int restore_scratch; | 
 | 1043 | }; | 
 | 1044 |  | 
 | 1045 | static struct mips_huge_tlb_info __cpuinit | 
 | 1046 | build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l, | 
 | 1047 | 			       struct uasm_reloc **r, unsigned int tmp, | 
 | 1048 | 			       unsigned int ptr, int c0_scratch) | 
 | 1049 | { | 
 | 1050 | 	struct mips_huge_tlb_info rv; | 
 | 1051 | 	unsigned int even, odd; | 
 | 1052 | 	int vmalloc_branch_delay_filled = 0; | 
 | 1053 | 	const int scratch = 1; /* Our extra working register */ | 
 | 1054 |  | 
 | 1055 | 	rv.huge_pte = scratch; | 
 | 1056 | 	rv.restore_scratch = 0; | 
 | 1057 |  | 
 | 1058 | 	if (check_for_high_segbits) { | 
 | 1059 | 		UASM_i_MFC0(p, tmp, C0_BADVADDR); | 
 | 1060 |  | 
 | 1061 | 		if (pgd_reg != -1) | 
 | 1062 | 			UASM_i_MFC0(p, ptr, 31, pgd_reg); | 
 | 1063 | 		else | 
 | 1064 | 			UASM_i_MFC0(p, ptr, C0_CONTEXT); | 
 | 1065 |  | 
 | 1066 | 		if (c0_scratch >= 0) | 
 | 1067 | 			UASM_i_MTC0(p, scratch, 31, c0_scratch); | 
 | 1068 | 		else | 
 | 1069 | 			UASM_i_SW(p, scratch, scratchpad_offset(0), 0); | 
 | 1070 |  | 
 | 1071 | 		uasm_i_dsrl_safe(p, scratch, tmp, | 
 | 1072 | 				 PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3); | 
 | 1073 | 		uasm_il_bnez(p, r, scratch, label_vmalloc); | 
 | 1074 |  | 
 | 1075 | 		if (pgd_reg == -1) { | 
 | 1076 | 			vmalloc_branch_delay_filled = 1; | 
 | 1077 | 			/* Clear lower 23 bits of context. */ | 
 | 1078 | 			uasm_i_dins(p, ptr, 0, 0, 23); | 
 | 1079 | 		} | 
 | 1080 | 	} else { | 
 | 1081 | 		if (pgd_reg != -1) | 
 | 1082 | 			UASM_i_MFC0(p, ptr, 31, pgd_reg); | 
 | 1083 | 		else | 
 | 1084 | 			UASM_i_MFC0(p, ptr, C0_CONTEXT); | 
 | 1085 |  | 
 | 1086 | 		UASM_i_MFC0(p, tmp, C0_BADVADDR); | 
 | 1087 |  | 
 | 1088 | 		if (c0_scratch >= 0) | 
 | 1089 | 			UASM_i_MTC0(p, scratch, 31, c0_scratch); | 
 | 1090 | 		else | 
 | 1091 | 			UASM_i_SW(p, scratch, scratchpad_offset(0), 0); | 
 | 1092 |  | 
 | 1093 | 		if (pgd_reg == -1) | 
 | 1094 | 			/* Clear lower 23 bits of context. */ | 
 | 1095 | 			uasm_i_dins(p, ptr, 0, 0, 23); | 
 | 1096 |  | 
 | 1097 | 		uasm_il_bltz(p, r, tmp, label_vmalloc); | 
 | 1098 | 	} | 
 | 1099 |  | 
 | 1100 | 	if (pgd_reg == -1) { | 
 | 1101 | 		vmalloc_branch_delay_filled = 1; | 
 | 1102 | 		/* 1 0  1 0 1  << 6  xkphys cached */ | 
 | 1103 | 		uasm_i_ori(p, ptr, ptr, 0x540); | 
 | 1104 | 		uasm_i_drotr(p, ptr, ptr, 11); | 
 | 1105 | 	} | 
 | 1106 |  | 
 | 1107 | #ifdef __PAGETABLE_PMD_FOLDED | 
 | 1108 | #define LOC_PTEP scratch | 
 | 1109 | #else | 
 | 1110 | #define LOC_PTEP ptr | 
 | 1111 | #endif | 
 | 1112 |  | 
 | 1113 | 	if (!vmalloc_branch_delay_filled) | 
 | 1114 | 		/* get pgd offset in bytes */ | 
 | 1115 | 		uasm_i_dsrl_safe(p, scratch, tmp, PGDIR_SHIFT - 3); | 
 | 1116 |  | 
 | 1117 | 	uasm_l_vmalloc_done(l, *p); | 
 | 1118 |  | 
 | 1119 | 	/* | 
 | 1120 | 	 *                         tmp          ptr | 
 | 1121 | 	 * fall-through case =   badvaddr  *pgd_current | 
 | 1122 | 	 * vmalloc case      =   badvaddr  swapper_pg_dir | 
 | 1123 | 	 */ | 
 | 1124 |  | 
 | 1125 | 	if (vmalloc_branch_delay_filled) | 
 | 1126 | 		/* get pgd offset in bytes */ | 
 | 1127 | 		uasm_i_dsrl_safe(p, scratch, tmp, PGDIR_SHIFT - 3); | 
 | 1128 |  | 
 | 1129 | #ifdef __PAGETABLE_PMD_FOLDED | 
 | 1130 | 	GET_CONTEXT(p, tmp); /* get context reg */ | 
 | 1131 | #endif | 
 | 1132 | 	uasm_i_andi(p, scratch, scratch, (PTRS_PER_PGD - 1) << 3); | 
 | 1133 |  | 
 | 1134 | 	if (use_lwx_insns()) { | 
 | 1135 | 		UASM_i_LWX(p, LOC_PTEP, scratch, ptr); | 
 | 1136 | 	} else { | 
 | 1137 | 		uasm_i_daddu(p, ptr, ptr, scratch); /* add in pgd offset */ | 
 | 1138 | 		uasm_i_ld(p, LOC_PTEP, 0, ptr); /* get pmd pointer */ | 
 | 1139 | 	} | 
 | 1140 |  | 
 | 1141 | #ifndef __PAGETABLE_PMD_FOLDED | 
 | 1142 | 	/* get pmd offset in bytes */ | 
 | 1143 | 	uasm_i_dsrl_safe(p, scratch, tmp, PMD_SHIFT - 3); | 
 | 1144 | 	uasm_i_andi(p, scratch, scratch, (PTRS_PER_PMD - 1) << 3); | 
 | 1145 | 	GET_CONTEXT(p, tmp); /* get context reg */ | 
 | 1146 |  | 
 | 1147 | 	if (use_lwx_insns()) { | 
 | 1148 | 		UASM_i_LWX(p, scratch, scratch, ptr); | 
 | 1149 | 	} else { | 
 | 1150 | 		uasm_i_daddu(p, ptr, ptr, scratch); /* add in pmd offset */ | 
 | 1151 | 		UASM_i_LW(p, scratch, 0, ptr); | 
 | 1152 | 	} | 
 | 1153 | #endif | 
 | 1154 | 	/* Adjust the context during the load latency. */ | 
 | 1155 | 	build_adjust_context(p, tmp); | 
 | 1156 |  | 
 | 1157 | #ifdef CONFIG_HUGETLB_PAGE | 
 | 1158 | 	uasm_il_bbit1(p, r, scratch, ilog2(_PAGE_HUGE), label_tlb_huge_update); | 
 | 1159 | 	/* | 
 | 1160 | 	 * The in the LWX case we don't want to do the load in the | 
 | 1161 | 	 * delay slot.  It cannot issue in the same cycle and may be | 
 | 1162 | 	 * speculative and unneeded. | 
 | 1163 | 	 */ | 
 | 1164 | 	if (use_lwx_insns()) | 
 | 1165 | 		uasm_i_nop(p); | 
 | 1166 | #endif /* CONFIG_HUGETLB_PAGE */ | 
 | 1167 |  | 
 | 1168 |  | 
 | 1169 | 	/* build_update_entries */ | 
 | 1170 | 	if (use_lwx_insns()) { | 
 | 1171 | 		even = ptr; | 
 | 1172 | 		odd = tmp; | 
 | 1173 | 		UASM_i_LWX(p, even, scratch, tmp); | 
 | 1174 | 		UASM_i_ADDIU(p, tmp, tmp, sizeof(pte_t)); | 
 | 1175 | 		UASM_i_LWX(p, odd, scratch, tmp); | 
 | 1176 | 	} else { | 
 | 1177 | 		UASM_i_ADDU(p, ptr, scratch, tmp); /* add in offset */ | 
 | 1178 | 		even = tmp; | 
 | 1179 | 		odd = ptr; | 
 | 1180 | 		UASM_i_LW(p, even, 0, ptr); /* get even pte */ | 
 | 1181 | 		UASM_i_LW(p, odd, sizeof(pte_t), ptr); /* get odd pte */ | 
 | 1182 | 	} | 
 | 1183 | 	if (kernel_uses_smartmips_rixi) { | 
 | 1184 | 		uasm_i_dsrl_safe(p, even, even, ilog2(_PAGE_NO_EXEC)); | 
 | 1185 | 		uasm_i_dsrl_safe(p, odd, odd, ilog2(_PAGE_NO_EXEC)); | 
 | 1186 | 		uasm_i_drotr(p, even, even, | 
 | 1187 | 			     ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC)); | 
 | 1188 | 		UASM_i_MTC0(p, even, C0_ENTRYLO0); /* load it */ | 
 | 1189 | 		uasm_i_drotr(p, odd, odd, | 
 | 1190 | 			     ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC)); | 
 | 1191 | 	} else { | 
 | 1192 | 		uasm_i_dsrl_safe(p, even, even, ilog2(_PAGE_GLOBAL)); | 
 | 1193 | 		UASM_i_MTC0(p, even, C0_ENTRYLO0); /* load it */ | 
 | 1194 | 		uasm_i_dsrl_safe(p, odd, odd, ilog2(_PAGE_GLOBAL)); | 
 | 1195 | 	} | 
 | 1196 | 	UASM_i_MTC0(p, odd, C0_ENTRYLO1); /* load it */ | 
 | 1197 |  | 
 | 1198 | 	if (c0_scratch >= 0) { | 
 | 1199 | 		UASM_i_MFC0(p, scratch, 31, c0_scratch); | 
 | 1200 | 		build_tlb_write_entry(p, l, r, tlb_random); | 
 | 1201 | 		uasm_l_leave(l, *p); | 
 | 1202 | 		rv.restore_scratch = 1; | 
 | 1203 | 	} else if (PAGE_SHIFT == 14 || PAGE_SHIFT == 13)  { | 
 | 1204 | 		build_tlb_write_entry(p, l, r, tlb_random); | 
 | 1205 | 		uasm_l_leave(l, *p); | 
 | 1206 | 		UASM_i_LW(p, scratch, scratchpad_offset(0), 0); | 
 | 1207 | 	} else { | 
 | 1208 | 		UASM_i_LW(p, scratch, scratchpad_offset(0), 0); | 
 | 1209 | 		build_tlb_write_entry(p, l, r, tlb_random); | 
 | 1210 | 		uasm_l_leave(l, *p); | 
 | 1211 | 		rv.restore_scratch = 1; | 
 | 1212 | 	} | 
 | 1213 |  | 
 | 1214 | 	uasm_i_eret(p); /* return from trap */ | 
 | 1215 |  | 
 | 1216 | 	return rv; | 
 | 1217 | } | 
 | 1218 |  | 
| David Daney | e6f72d3 | 2009-05-20 11:40:58 -0700 | [diff] [blame] | 1219 | /* | 
 | 1220 |  * For a 64-bit kernel, we are using the 64-bit XTLB refill exception | 
 | 1221 |  * because EXL == 0.  If we wrap, we can also use the 32 instruction | 
 | 1222 |  * slots before the XTLB refill exception handler which belong to the | 
 | 1223 |  * unused TLB refill exception. | 
 | 1224 |  */ | 
 | 1225 | #define MIPS64_REFILL_INSNS 32 | 
 | 1226 |  | 
| Ralf Baechle | 234fcd1 | 2008-03-08 09:56:28 +0000 | [diff] [blame] | 1227 | static void __cpuinit build_r4000_tlb_refill_handler(void) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1228 | { | 
 | 1229 | 	u32 *p = tlb_handler; | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 1230 | 	struct uasm_label *l = labels; | 
 | 1231 | 	struct uasm_reloc *r = relocs; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1232 | 	u32 *f; | 
 | 1233 | 	unsigned int final_len; | 
| Ralf Baechle | 4a9040f | 2011-03-29 10:54:54 +0200 | [diff] [blame] | 1234 | 	struct mips_huge_tlb_info htlb_info __maybe_unused; | 
 | 1235 | 	enum vmalloc64_mode vmalloc_mode __maybe_unused; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1236 |  | 
 | 1237 | 	memset(tlb_handler, 0, sizeof(tlb_handler)); | 
 | 1238 | 	memset(labels, 0, sizeof(labels)); | 
 | 1239 | 	memset(relocs, 0, sizeof(relocs)); | 
 | 1240 | 	memset(final_handler, 0, sizeof(final_handler)); | 
 | 1241 |  | 
| David Daney | 2c8c53e | 2010-12-27 18:07:57 -0800 | [diff] [blame] | 1242 | 	if ((scratch_reg > 0 || scratchpad_available()) && use_bbit_insns()) { | 
 | 1243 | 		htlb_info = build_fast_tlb_refill_handler(&p, &l, &r, K0, K1, | 
 | 1244 | 							  scratch_reg); | 
 | 1245 | 		vmalloc_mode = refill_scratch; | 
 | 1246 | 	} else { | 
 | 1247 | 		htlb_info.huge_pte = K0; | 
 | 1248 | 		htlb_info.restore_scratch = 0; | 
 | 1249 | 		vmalloc_mode = refill_noscratch; | 
 | 1250 | 		/* | 
 | 1251 | 		 * create the plain linear handler | 
 | 1252 | 		 */ | 
 | 1253 | 		if (bcm1250_m3_war()) { | 
 | 1254 | 			unsigned int segbits = 44; | 
 | 1255 |  | 
 | 1256 | 			uasm_i_dmfc0(&p, K0, C0_BADVADDR); | 
 | 1257 | 			uasm_i_dmfc0(&p, K1, C0_ENTRYHI); | 
 | 1258 | 			uasm_i_xor(&p, K0, K0, K1); | 
 | 1259 | 			uasm_i_dsrl_safe(&p, K1, K0, 62); | 
 | 1260 | 			uasm_i_dsrl_safe(&p, K0, K0, 12 + 1); | 
 | 1261 | 			uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits); | 
 | 1262 | 			uasm_i_or(&p, K0, K0, K1); | 
 | 1263 | 			uasm_il_bnez(&p, &r, K0, label_leave); | 
 | 1264 | 			/* No need for uasm_i_nop */ | 
 | 1265 | 		} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1266 |  | 
| Ralf Baechle | 875d43e | 2005-09-03 15:56:16 -0700 | [diff] [blame] | 1267 | #ifdef CONFIG_64BIT | 
| David Daney | 2c8c53e | 2010-12-27 18:07:57 -0800 | [diff] [blame] | 1268 | 		build_get_pmde64(&p, &l, &r, K0, K1); /* get pmd in K1 */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1269 | #else | 
| David Daney | 2c8c53e | 2010-12-27 18:07:57 -0800 | [diff] [blame] | 1270 | 		build_get_pgde32(&p, K0, K1); /* get pgd in K1 */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1271 | #endif | 
 | 1272 |  | 
| David Daney | fd062c8 | 2009-05-27 17:47:44 -0700 | [diff] [blame] | 1273 | #ifdef CONFIG_HUGETLB_PAGE | 
| David Daney | 2c8c53e | 2010-12-27 18:07:57 -0800 | [diff] [blame] | 1274 | 		build_is_huge_pte(&p, &r, K0, K1, label_tlb_huge_update); | 
| David Daney | fd062c8 | 2009-05-27 17:47:44 -0700 | [diff] [blame] | 1275 | #endif | 
 | 1276 |  | 
| David Daney | 2c8c53e | 2010-12-27 18:07:57 -0800 | [diff] [blame] | 1277 | 		build_get_ptep(&p, K0, K1); | 
 | 1278 | 		build_update_entries(&p, K0, K1); | 
 | 1279 | 		build_tlb_write_entry(&p, &l, &r, tlb_random); | 
 | 1280 | 		uasm_l_leave(&l, p); | 
 | 1281 | 		uasm_i_eret(&p); /* return from trap */ | 
 | 1282 | 	} | 
| David Daney | fd062c8 | 2009-05-27 17:47:44 -0700 | [diff] [blame] | 1283 | #ifdef CONFIG_HUGETLB_PAGE | 
 | 1284 | 	uasm_l_tlb_huge_update(&l, p); | 
| David Daney | 2c8c53e | 2010-12-27 18:07:57 -0800 | [diff] [blame] | 1285 | 	build_huge_update_entries(&p, htlb_info.huge_pte, K1); | 
 | 1286 | 	build_huge_tlb_write_entry(&p, &l, &r, K0, tlb_random, | 
 | 1287 | 				   htlb_info.restore_scratch); | 
| David Daney | fd062c8 | 2009-05-27 17:47:44 -0700 | [diff] [blame] | 1288 | #endif | 
 | 1289 |  | 
| Ralf Baechle | 875d43e | 2005-09-03 15:56:16 -0700 | [diff] [blame] | 1290 | #ifdef CONFIG_64BIT | 
| David Daney | 2c8c53e | 2010-12-27 18:07:57 -0800 | [diff] [blame] | 1291 | 	build_get_pgd_vmalloc64(&p, &l, &r, K0, K1, vmalloc_mode); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1292 | #endif | 
 | 1293 |  | 
 | 1294 | 	/* | 
 | 1295 | 	 * Overflow check: For the 64bit handler, we need at least one | 
 | 1296 | 	 * free instruction slot for the wrap-around branch. In worst | 
 | 1297 | 	 * case, if the intended insertion point is a delay slot, we | 
| Matt LaPlante | 4b3f686 | 2006-10-03 22:21:02 +0200 | [diff] [blame] | 1298 | 	 * need three, with the second nop'ed and the third being | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1299 | 	 * unused. | 
 | 1300 | 	 */ | 
| Fuxin Zhang | 2a21c73 | 2007-06-06 14:52:43 +0800 | [diff] [blame] | 1301 | 	/* Loongson2 ebase is different than r4k, we have more space */ | 
 | 1302 | #if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1303 | 	if ((p - tlb_handler) > 64) | 
 | 1304 | 		panic("TLB refill handler space exceeded"); | 
 | 1305 | #else | 
| David Daney | e6f72d3 | 2009-05-20 11:40:58 -0700 | [diff] [blame] | 1306 | 	if (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 1) | 
 | 1307 | 	    || (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 3) | 
 | 1308 | 		&& uasm_insn_has_bdelay(relocs, | 
 | 1309 | 					tlb_handler + MIPS64_REFILL_INSNS - 3))) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1310 | 		panic("TLB refill handler space exceeded"); | 
 | 1311 | #endif | 
 | 1312 |  | 
 | 1313 | 	/* | 
 | 1314 | 	 * Now fold the handler in the TLB refill handler space. | 
 | 1315 | 	 */ | 
| Fuxin Zhang | 2a21c73 | 2007-06-06 14:52:43 +0800 | [diff] [blame] | 1316 | #if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1317 | 	f = final_handler; | 
 | 1318 | 	/* Simplest case, just copy the handler. */ | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 1319 | 	uasm_copy_handler(relocs, labels, tlb_handler, p, f); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1320 | 	final_len = p - tlb_handler; | 
| Ralf Baechle | 875d43e | 2005-09-03 15:56:16 -0700 | [diff] [blame] | 1321 | #else /* CONFIG_64BIT */ | 
| David Daney | e6f72d3 | 2009-05-20 11:40:58 -0700 | [diff] [blame] | 1322 | 	f = final_handler + MIPS64_REFILL_INSNS; | 
 | 1323 | 	if ((p - tlb_handler) <= MIPS64_REFILL_INSNS) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1324 | 		/* Just copy the handler. */ | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 1325 | 		uasm_copy_handler(relocs, labels, tlb_handler, p, f); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1326 | 		final_len = p - tlb_handler; | 
 | 1327 | 	} else { | 
| David Daney | fd062c8 | 2009-05-27 17:47:44 -0700 | [diff] [blame] | 1328 | #if defined(CONFIG_HUGETLB_PAGE) | 
 | 1329 | 		const enum label_id ls = label_tlb_huge_update; | 
| David Daney | 95affdd | 2009-05-20 11:40:59 -0700 | [diff] [blame] | 1330 | #else | 
 | 1331 | 		const enum label_id ls = label_vmalloc; | 
 | 1332 | #endif | 
 | 1333 | 		u32 *split; | 
 | 1334 | 		int ov = 0; | 
 | 1335 | 		int i; | 
 | 1336 |  | 
 | 1337 | 		for (i = 0; i < ARRAY_SIZE(labels) && labels[i].lab != ls; i++) | 
 | 1338 | 			; | 
 | 1339 | 		BUG_ON(i == ARRAY_SIZE(labels)); | 
 | 1340 | 		split = labels[i].addr; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1341 |  | 
 | 1342 | 		/* | 
| David Daney | 95affdd | 2009-05-20 11:40:59 -0700 | [diff] [blame] | 1343 | 		 * See if we have overflown one way or the other. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1344 | 		 */ | 
| David Daney | 95affdd | 2009-05-20 11:40:59 -0700 | [diff] [blame] | 1345 | 		if (split > tlb_handler + MIPS64_REFILL_INSNS || | 
 | 1346 | 		    split < p - MIPS64_REFILL_INSNS) | 
 | 1347 | 			ov = 1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1348 |  | 
| David Daney | 95affdd | 2009-05-20 11:40:59 -0700 | [diff] [blame] | 1349 | 		if (ov) { | 
 | 1350 | 			/* | 
 | 1351 | 			 * Split two instructions before the end.  One | 
 | 1352 | 			 * for the branch and one for the instruction | 
 | 1353 | 			 * in the delay slot. | 
 | 1354 | 			 */ | 
 | 1355 | 			split = tlb_handler + MIPS64_REFILL_INSNS - 2; | 
 | 1356 |  | 
 | 1357 | 			/* | 
 | 1358 | 			 * If the branch would fall in a delay slot, | 
 | 1359 | 			 * we must back up an additional instruction | 
 | 1360 | 			 * so that it is no longer in a delay slot. | 
 | 1361 | 			 */ | 
 | 1362 | 			if (uasm_insn_has_bdelay(relocs, split - 1)) | 
 | 1363 | 				split--; | 
 | 1364 | 		} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1365 | 		/* Copy first part of the handler. */ | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 1366 | 		uasm_copy_handler(relocs, labels, tlb_handler, split, f); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1367 | 		f += split - tlb_handler; | 
 | 1368 |  | 
| David Daney | 95affdd | 2009-05-20 11:40:59 -0700 | [diff] [blame] | 1369 | 		if (ov) { | 
 | 1370 | 			/* Insert branch. */ | 
 | 1371 | 			uasm_l_split(&l, final_handler); | 
 | 1372 | 			uasm_il_b(&f, &r, label_split); | 
 | 1373 | 			if (uasm_insn_has_bdelay(relocs, split)) | 
 | 1374 | 				uasm_i_nop(&f); | 
 | 1375 | 			else { | 
 | 1376 | 				uasm_copy_handler(relocs, labels, | 
 | 1377 | 						  split, split + 1, f); | 
 | 1378 | 				uasm_move_labels(labels, f, f + 1, -1); | 
 | 1379 | 				f++; | 
 | 1380 | 				split++; | 
 | 1381 | 			} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1382 | 		} | 
 | 1383 |  | 
 | 1384 | 		/* Copy the rest of the handler. */ | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 1385 | 		uasm_copy_handler(relocs, labels, split, p, final_handler); | 
| David Daney | e6f72d3 | 2009-05-20 11:40:58 -0700 | [diff] [blame] | 1386 | 		final_len = (f - (final_handler + MIPS64_REFILL_INSNS)) + | 
 | 1387 | 			    (p - split); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1388 | 	} | 
| Ralf Baechle | 875d43e | 2005-09-03 15:56:16 -0700 | [diff] [blame] | 1389 | #endif /* CONFIG_64BIT */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1390 |  | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 1391 | 	uasm_resolve_relocs(relocs, labels); | 
 | 1392 | 	pr_debug("Wrote TLB refill handler (%u instructions).\n", | 
 | 1393 | 		 final_len); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1394 |  | 
| Ralf Baechle | 91b05e6 | 2006-03-29 18:53:00 +0100 | [diff] [blame] | 1395 | 	memcpy((void *)ebase, final_handler, 0x100); | 
| Franck Bui-Huu | 92b1e6a | 2007-10-18 09:11:17 +0200 | [diff] [blame] | 1396 |  | 
 | 1397 | 	dump_handler((u32 *)ebase, 64); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1398 | } | 
 | 1399 |  | 
 | 1400 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1401 |  * 128 instructions for the fastpath handler is generous and should | 
 | 1402 |  * never be exceeded. | 
 | 1403 |  */ | 
 | 1404 | #define FASTPATH_SIZE 128 | 
 | 1405 |  | 
| Franck Bui-Huu | cbdbe07 | 2007-10-18 09:11:16 +0200 | [diff] [blame] | 1406 | u32 handle_tlbl[FASTPATH_SIZE] __cacheline_aligned; | 
 | 1407 | u32 handle_tlbs[FASTPATH_SIZE] __cacheline_aligned; | 
 | 1408 | u32 handle_tlbm[FASTPATH_SIZE] __cacheline_aligned; | 
| David Daney | 3d8bfdd | 2010-12-21 14:19:11 -0800 | [diff] [blame] | 1409 | #ifdef CONFIG_MIPS_PGD_C0_CONTEXT | 
 | 1410 | u32 tlbmiss_handler_setup_pgd[16] __cacheline_aligned; | 
 | 1411 |  | 
 | 1412 | static void __cpuinit build_r4000_setup_pgd(void) | 
 | 1413 | { | 
 | 1414 | 	const int a0 = 4; | 
 | 1415 | 	const int a1 = 5; | 
 | 1416 | 	u32 *p = tlbmiss_handler_setup_pgd; | 
 | 1417 | 	struct uasm_label *l = labels; | 
 | 1418 | 	struct uasm_reloc *r = relocs; | 
 | 1419 |  | 
 | 1420 | 	memset(tlbmiss_handler_setup_pgd, 0, sizeof(tlbmiss_handler_setup_pgd)); | 
 | 1421 | 	memset(labels, 0, sizeof(labels)); | 
 | 1422 | 	memset(relocs, 0, sizeof(relocs)); | 
 | 1423 |  | 
 | 1424 | 	pgd_reg = allocate_kscratch(); | 
 | 1425 |  | 
 | 1426 | 	if (pgd_reg == -1) { | 
 | 1427 | 		/* PGD << 11 in c0_Context */ | 
 | 1428 | 		/* | 
 | 1429 | 		 * If it is a ckseg0 address, convert to a physical | 
 | 1430 | 		 * address.  Shifting right by 29 and adding 4 will | 
 | 1431 | 		 * result in zero for these addresses. | 
 | 1432 | 		 * | 
 | 1433 | 		 */ | 
 | 1434 | 		UASM_i_SRA(&p, a1, a0, 29); | 
 | 1435 | 		UASM_i_ADDIU(&p, a1, a1, 4); | 
 | 1436 | 		uasm_il_bnez(&p, &r, a1, label_tlbl_goaround1); | 
 | 1437 | 		uasm_i_nop(&p); | 
 | 1438 | 		uasm_i_dinsm(&p, a0, 0, 29, 64 - 29); | 
 | 1439 | 		uasm_l_tlbl_goaround1(&l, p); | 
 | 1440 | 		UASM_i_SLL(&p, a0, a0, 11); | 
 | 1441 | 		uasm_i_jr(&p, 31); | 
 | 1442 | 		UASM_i_MTC0(&p, a0, C0_CONTEXT); | 
 | 1443 | 	} else { | 
 | 1444 | 		/* PGD in c0_KScratch */ | 
 | 1445 | 		uasm_i_jr(&p, 31); | 
 | 1446 | 		UASM_i_MTC0(&p, a0, 31, pgd_reg); | 
 | 1447 | 	} | 
 | 1448 | 	if (p - tlbmiss_handler_setup_pgd > ARRAY_SIZE(tlbmiss_handler_setup_pgd)) | 
 | 1449 | 		panic("tlbmiss_handler_setup_pgd space exceeded"); | 
 | 1450 | 	uasm_resolve_relocs(relocs, labels); | 
 | 1451 | 	pr_debug("Wrote tlbmiss_handler_setup_pgd (%u instructions).\n", | 
 | 1452 | 		 (unsigned int)(p - tlbmiss_handler_setup_pgd)); | 
 | 1453 |  | 
 | 1454 | 	dump_handler(tlbmiss_handler_setup_pgd, | 
 | 1455 | 		     ARRAY_SIZE(tlbmiss_handler_setup_pgd)); | 
 | 1456 | } | 
 | 1457 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1458 |  | 
| Ralf Baechle | 234fcd1 | 2008-03-08 09:56:28 +0000 | [diff] [blame] | 1459 | static void __cpuinit | 
| David Daney | bd1437e | 2009-05-08 15:10:50 -0700 | [diff] [blame] | 1460 | iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1461 | { | 
 | 1462 | #ifdef CONFIG_SMP | 
 | 1463 | # ifdef CONFIG_64BIT_PHYS_ADDR | 
 | 1464 | 	if (cpu_has_64bits) | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 1465 | 		uasm_i_lld(p, pte, 0, ptr); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1466 | 	else | 
 | 1467 | # endif | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 1468 | 		UASM_i_LL(p, pte, 0, ptr); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1469 | #else | 
 | 1470 | # ifdef CONFIG_64BIT_PHYS_ADDR | 
 | 1471 | 	if (cpu_has_64bits) | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 1472 | 		uasm_i_ld(p, pte, 0, ptr); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1473 | 	else | 
 | 1474 | # endif | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 1475 | 		UASM_i_LW(p, pte, 0, ptr); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1476 | #endif | 
 | 1477 | } | 
 | 1478 |  | 
| Ralf Baechle | 234fcd1 | 2008-03-08 09:56:28 +0000 | [diff] [blame] | 1479 | static void __cpuinit | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 1480 | iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr, | 
| Thiemo Seufer | 63b2d2f | 2005-04-28 08:52:57 +0000 | [diff] [blame] | 1481 | 	unsigned int mode) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1482 | { | 
| Thiemo Seufer | 63b2d2f | 2005-04-28 08:52:57 +0000 | [diff] [blame] | 1483 | #ifdef CONFIG_64BIT_PHYS_ADDR | 
 | 1484 | 	unsigned int hwmode = mode & (_PAGE_VALID | _PAGE_DIRTY); | 
 | 1485 | #endif | 
 | 1486 |  | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 1487 | 	uasm_i_ori(p, pte, pte, mode); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1488 | #ifdef CONFIG_SMP | 
 | 1489 | # ifdef CONFIG_64BIT_PHYS_ADDR | 
 | 1490 | 	if (cpu_has_64bits) | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 1491 | 		uasm_i_scd(p, pte, 0, ptr); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1492 | 	else | 
 | 1493 | # endif | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 1494 | 		UASM_i_SC(p, pte, 0, ptr); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1495 |  | 
 | 1496 | 	if (r10000_llsc_war()) | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 1497 | 		uasm_il_beqzl(p, r, pte, label_smp_pgtable_change); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1498 | 	else | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 1499 | 		uasm_il_beqz(p, r, pte, label_smp_pgtable_change); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1500 |  | 
 | 1501 | # ifdef CONFIG_64BIT_PHYS_ADDR | 
 | 1502 | 	if (!cpu_has_64bits) { | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 1503 | 		/* no uasm_i_nop needed */ | 
 | 1504 | 		uasm_i_ll(p, pte, sizeof(pte_t) / 2, ptr); | 
 | 1505 | 		uasm_i_ori(p, pte, pte, hwmode); | 
 | 1506 | 		uasm_i_sc(p, pte, sizeof(pte_t) / 2, ptr); | 
 | 1507 | 		uasm_il_beqz(p, r, pte, label_smp_pgtable_change); | 
 | 1508 | 		/* no uasm_i_nop needed */ | 
 | 1509 | 		uasm_i_lw(p, pte, 0, ptr); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1510 | 	} else | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 1511 | 		uasm_i_nop(p); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1512 | # else | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 1513 | 	uasm_i_nop(p); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1514 | # endif | 
 | 1515 | #else | 
 | 1516 | # ifdef CONFIG_64BIT_PHYS_ADDR | 
 | 1517 | 	if (cpu_has_64bits) | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 1518 | 		uasm_i_sd(p, pte, 0, ptr); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1519 | 	else | 
 | 1520 | # endif | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 1521 | 		UASM_i_SW(p, pte, 0, ptr); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1522 |  | 
 | 1523 | # ifdef CONFIG_64BIT_PHYS_ADDR | 
 | 1524 | 	if (!cpu_has_64bits) { | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 1525 | 		uasm_i_lw(p, pte, sizeof(pte_t) / 2, ptr); | 
 | 1526 | 		uasm_i_ori(p, pte, pte, hwmode); | 
 | 1527 | 		uasm_i_sw(p, pte, sizeof(pte_t) / 2, ptr); | 
 | 1528 | 		uasm_i_lw(p, pte, 0, ptr); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1529 | 	} | 
 | 1530 | # endif | 
 | 1531 | #endif | 
 | 1532 | } | 
 | 1533 |  | 
 | 1534 | /* | 
 | 1535 |  * Check if PTE is present, if not then jump to LABEL. PTR points to | 
 | 1536 |  * the page table where this PTE is located, PTE will be re-loaded | 
 | 1537 |  * with it's original value. | 
 | 1538 |  */ | 
| Ralf Baechle | 234fcd1 | 2008-03-08 09:56:28 +0000 | [diff] [blame] | 1539 | static void __cpuinit | 
| David Daney | bd1437e | 2009-05-08 15:10:50 -0700 | [diff] [blame] | 1540 | build_pte_present(u32 **p, struct uasm_reloc **r, | 
| David Daney | bf28607 | 2011-07-05 16:34:46 -0700 | [diff] [blame] | 1541 | 		  int pte, int ptr, int scratch, enum label_id lid) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1542 | { | 
| David Daney | bf28607 | 2011-07-05 16:34:46 -0700 | [diff] [blame] | 1543 | 	int t = scratch >= 0 ? scratch : pte; | 
 | 1544 |  | 
| David Daney | 6dd9344 | 2010-02-10 15:12:47 -0800 | [diff] [blame] | 1545 | 	if (kernel_uses_smartmips_rixi) { | 
| David Daney | cc33ae4 | 2010-12-20 15:54:50 -0800 | [diff] [blame] | 1546 | 		if (use_bbit_insns()) { | 
 | 1547 | 			uasm_il_bbit0(p, r, pte, ilog2(_PAGE_PRESENT), lid); | 
 | 1548 | 			uasm_i_nop(p); | 
 | 1549 | 		} else { | 
| David Daney | bf28607 | 2011-07-05 16:34:46 -0700 | [diff] [blame] | 1550 | 			uasm_i_andi(p, t, pte, _PAGE_PRESENT); | 
 | 1551 | 			uasm_il_beqz(p, r, t, lid); | 
 | 1552 | 			if (pte == t) | 
 | 1553 | 				/* You lose the SMP race :-(*/ | 
 | 1554 | 				iPTE_LW(p, pte, ptr); | 
| David Daney | cc33ae4 | 2010-12-20 15:54:50 -0800 | [diff] [blame] | 1555 | 		} | 
| David Daney | 6dd9344 | 2010-02-10 15:12:47 -0800 | [diff] [blame] | 1556 | 	} else { | 
| David Daney | bf28607 | 2011-07-05 16:34:46 -0700 | [diff] [blame] | 1557 | 		uasm_i_andi(p, t, pte, _PAGE_PRESENT | _PAGE_READ); | 
 | 1558 | 		uasm_i_xori(p, t, t, _PAGE_PRESENT | _PAGE_READ); | 
 | 1559 | 		uasm_il_bnez(p, r, t, lid); | 
 | 1560 | 		if (pte == t) | 
 | 1561 | 			/* You lose the SMP race :-(*/ | 
 | 1562 | 			iPTE_LW(p, pte, ptr); | 
| David Daney | 6dd9344 | 2010-02-10 15:12:47 -0800 | [diff] [blame] | 1563 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1564 | } | 
 | 1565 |  | 
 | 1566 | /* Make PTE valid, store result in PTR. */ | 
| Ralf Baechle | 234fcd1 | 2008-03-08 09:56:28 +0000 | [diff] [blame] | 1567 | static void __cpuinit | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 1568 | build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1569 | 		 unsigned int ptr) | 
 | 1570 | { | 
| Thiemo Seufer | 63b2d2f | 2005-04-28 08:52:57 +0000 | [diff] [blame] | 1571 | 	unsigned int mode = _PAGE_VALID | _PAGE_ACCESSED; | 
 | 1572 |  | 
 | 1573 | 	iPTE_SW(p, r, pte, ptr, mode); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1574 | } | 
 | 1575 |  | 
 | 1576 | /* | 
 | 1577 |  * Check if PTE can be written to, if not branch to LABEL. Regardless | 
 | 1578 |  * restore PTE with value from PTR when done. | 
 | 1579 |  */ | 
| Ralf Baechle | 234fcd1 | 2008-03-08 09:56:28 +0000 | [diff] [blame] | 1580 | static void __cpuinit | 
| David Daney | bd1437e | 2009-05-08 15:10:50 -0700 | [diff] [blame] | 1581 | build_pte_writable(u32 **p, struct uasm_reloc **r, | 
| David Daney | bf28607 | 2011-07-05 16:34:46 -0700 | [diff] [blame] | 1582 | 		   unsigned int pte, unsigned int ptr, int scratch, | 
 | 1583 | 		   enum label_id lid) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1584 | { | 
| David Daney | bf28607 | 2011-07-05 16:34:46 -0700 | [diff] [blame] | 1585 | 	int t = scratch >= 0 ? scratch : pte; | 
 | 1586 |  | 
 | 1587 | 	uasm_i_andi(p, t, pte, _PAGE_PRESENT | _PAGE_WRITE); | 
 | 1588 | 	uasm_i_xori(p, t, t, _PAGE_PRESENT | _PAGE_WRITE); | 
 | 1589 | 	uasm_il_bnez(p, r, t, lid); | 
 | 1590 | 	if (pte == t) | 
 | 1591 | 		/* You lose the SMP race :-(*/ | 
| David Daney | cc33ae4 | 2010-12-20 15:54:50 -0800 | [diff] [blame] | 1592 | 		iPTE_LW(p, pte, ptr); | 
| David Daney | bf28607 | 2011-07-05 16:34:46 -0700 | [diff] [blame] | 1593 | 	else | 
 | 1594 | 		uasm_i_nop(p); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1595 | } | 
 | 1596 |  | 
 | 1597 | /* Make PTE writable, update software status bits as well, then store | 
 | 1598 |  * at PTR. | 
 | 1599 |  */ | 
| Ralf Baechle | 234fcd1 | 2008-03-08 09:56:28 +0000 | [diff] [blame] | 1600 | static void __cpuinit | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 1601 | build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1602 | 		 unsigned int ptr) | 
 | 1603 | { | 
| Thiemo Seufer | 63b2d2f | 2005-04-28 08:52:57 +0000 | [diff] [blame] | 1604 | 	unsigned int mode = (_PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | 
 | 1605 | 			     | _PAGE_DIRTY); | 
 | 1606 |  | 
 | 1607 | 	iPTE_SW(p, r, pte, ptr, mode); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1608 | } | 
 | 1609 |  | 
 | 1610 | /* | 
 | 1611 |  * Check if PTE can be modified, if not branch to LABEL. Regardless | 
 | 1612 |  * restore PTE with value from PTR when done. | 
 | 1613 |  */ | 
| Ralf Baechle | 234fcd1 | 2008-03-08 09:56:28 +0000 | [diff] [blame] | 1614 | static void __cpuinit | 
| David Daney | bd1437e | 2009-05-08 15:10:50 -0700 | [diff] [blame] | 1615 | build_pte_modifiable(u32 **p, struct uasm_reloc **r, | 
| David Daney | bf28607 | 2011-07-05 16:34:46 -0700 | [diff] [blame] | 1616 | 		     unsigned int pte, unsigned int ptr, int scratch, | 
 | 1617 | 		     enum label_id lid) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1618 | { | 
| David Daney | cc33ae4 | 2010-12-20 15:54:50 -0800 | [diff] [blame] | 1619 | 	if (use_bbit_insns()) { | 
 | 1620 | 		uasm_il_bbit0(p, r, pte, ilog2(_PAGE_WRITE), lid); | 
 | 1621 | 		uasm_i_nop(p); | 
 | 1622 | 	} else { | 
| David Daney | bf28607 | 2011-07-05 16:34:46 -0700 | [diff] [blame] | 1623 | 		int t = scratch >= 0 ? scratch : pte; | 
 | 1624 | 		uasm_i_andi(p, t, pte, _PAGE_WRITE); | 
 | 1625 | 		uasm_il_beqz(p, r, t, lid); | 
 | 1626 | 		if (pte == t) | 
 | 1627 | 			/* You lose the SMP race :-(*/ | 
 | 1628 | 			iPTE_LW(p, pte, ptr); | 
| David Daney | cc33ae4 | 2010-12-20 15:54:50 -0800 | [diff] [blame] | 1629 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1630 | } | 
 | 1631 |  | 
| David Daney | 8262228 | 2009-10-14 12:16:56 -0700 | [diff] [blame] | 1632 | #ifndef CONFIG_MIPS_PGD_C0_CONTEXT | 
| David Daney | 3d8bfdd | 2010-12-21 14:19:11 -0800 | [diff] [blame] | 1633 |  | 
 | 1634 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1635 | /* | 
 | 1636 |  * R3000 style TLB load/store/modify handlers. | 
 | 1637 |  */ | 
 | 1638 |  | 
| Maciej W. Rozycki | fded2e5 | 2005-06-13 20:24:00 +0000 | [diff] [blame] | 1639 | /* | 
 | 1640 |  * This places the pte into ENTRYLO0 and writes it with tlbwi. | 
 | 1641 |  * Then it returns. | 
 | 1642 |  */ | 
| Ralf Baechle | 234fcd1 | 2008-03-08 09:56:28 +0000 | [diff] [blame] | 1643 | static void __cpuinit | 
| Maciej W. Rozycki | fded2e5 | 2005-06-13 20:24:00 +0000 | [diff] [blame] | 1644 | build_r3000_pte_reload_tlbwi(u32 **p, unsigned int pte, unsigned int tmp) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1645 | { | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 1646 | 	uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */ | 
 | 1647 | 	uasm_i_mfc0(p, tmp, C0_EPC); /* cp0 delay */ | 
 | 1648 | 	uasm_i_tlbwi(p); | 
 | 1649 | 	uasm_i_jr(p, tmp); | 
 | 1650 | 	uasm_i_rfe(p); /* branch delay */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1651 | } | 
 | 1652 |  | 
 | 1653 | /* | 
| Maciej W. Rozycki | fded2e5 | 2005-06-13 20:24:00 +0000 | [diff] [blame] | 1654 |  * This places the pte into ENTRYLO0 and writes it with tlbwi | 
 | 1655 |  * or tlbwr as appropriate.  This is because the index register | 
 | 1656 |  * may have the probe fail bit set as a result of a trap on a | 
 | 1657 |  * kseg2 access, i.e. without refill.  Then it returns. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1658 |  */ | 
| Ralf Baechle | 234fcd1 | 2008-03-08 09:56:28 +0000 | [diff] [blame] | 1659 | static void __cpuinit | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 1660 | build_r3000_tlb_reload_write(u32 **p, struct uasm_label **l, | 
 | 1661 | 			     struct uasm_reloc **r, unsigned int pte, | 
 | 1662 | 			     unsigned int tmp) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1663 | { | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 1664 | 	uasm_i_mfc0(p, tmp, C0_INDEX); | 
 | 1665 | 	uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */ | 
 | 1666 | 	uasm_il_bltz(p, r, tmp, label_r3000_write_probe_fail); /* cp0 delay */ | 
 | 1667 | 	uasm_i_mfc0(p, tmp, C0_EPC); /* branch delay */ | 
 | 1668 | 	uasm_i_tlbwi(p); /* cp0 delay */ | 
 | 1669 | 	uasm_i_jr(p, tmp); | 
 | 1670 | 	uasm_i_rfe(p); /* branch delay */ | 
 | 1671 | 	uasm_l_r3000_write_probe_fail(l, *p); | 
 | 1672 | 	uasm_i_tlbwr(p); /* cp0 delay */ | 
 | 1673 | 	uasm_i_jr(p, tmp); | 
 | 1674 | 	uasm_i_rfe(p); /* branch delay */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1675 | } | 
 | 1676 |  | 
| Ralf Baechle | 234fcd1 | 2008-03-08 09:56:28 +0000 | [diff] [blame] | 1677 | static void __cpuinit | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1678 | build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte, | 
 | 1679 | 				   unsigned int ptr) | 
 | 1680 | { | 
 | 1681 | 	long pgdc = (long)pgd_current; | 
 | 1682 |  | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 1683 | 	uasm_i_mfc0(p, pte, C0_BADVADDR); | 
 | 1684 | 	uasm_i_lui(p, ptr, uasm_rel_hi(pgdc)); /* cp0 delay */ | 
 | 1685 | 	uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr); | 
 | 1686 | 	uasm_i_srl(p, pte, pte, 22); /* load delay */ | 
 | 1687 | 	uasm_i_sll(p, pte, pte, 2); | 
 | 1688 | 	uasm_i_addu(p, ptr, ptr, pte); | 
 | 1689 | 	uasm_i_mfc0(p, pte, C0_CONTEXT); | 
 | 1690 | 	uasm_i_lw(p, ptr, 0, ptr); /* cp0 delay */ | 
 | 1691 | 	uasm_i_andi(p, pte, pte, 0xffc); /* load delay */ | 
 | 1692 | 	uasm_i_addu(p, ptr, ptr, pte); | 
 | 1693 | 	uasm_i_lw(p, pte, 0, ptr); | 
 | 1694 | 	uasm_i_tlbp(p); /* load delay */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1695 | } | 
 | 1696 |  | 
| Ralf Baechle | 234fcd1 | 2008-03-08 09:56:28 +0000 | [diff] [blame] | 1697 | static void __cpuinit build_r3000_tlb_load_handler(void) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1698 | { | 
 | 1699 | 	u32 *p = handle_tlbl; | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 1700 | 	struct uasm_label *l = labels; | 
 | 1701 | 	struct uasm_reloc *r = relocs; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1702 |  | 
 | 1703 | 	memset(handle_tlbl, 0, sizeof(handle_tlbl)); | 
 | 1704 | 	memset(labels, 0, sizeof(labels)); | 
 | 1705 | 	memset(relocs, 0, sizeof(relocs)); | 
 | 1706 |  | 
 | 1707 | 	build_r3000_tlbchange_handler_head(&p, K0, K1); | 
| David Daney | bf28607 | 2011-07-05 16:34:46 -0700 | [diff] [blame] | 1708 | 	build_pte_present(&p, &r, K0, K1, -1, label_nopage_tlbl); | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 1709 | 	uasm_i_nop(&p); /* load delay */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1710 | 	build_make_valid(&p, &r, K0, K1); | 
| Maciej W. Rozycki | fded2e5 | 2005-06-13 20:24:00 +0000 | [diff] [blame] | 1711 | 	build_r3000_tlb_reload_write(&p, &l, &r, K0, K1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1712 |  | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 1713 | 	uasm_l_nopage_tlbl(&l, p); | 
 | 1714 | 	uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff); | 
 | 1715 | 	uasm_i_nop(&p); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1716 |  | 
 | 1717 | 	if ((p - handle_tlbl) > FASTPATH_SIZE) | 
 | 1718 | 		panic("TLB load handler fastpath space exceeded"); | 
 | 1719 |  | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 1720 | 	uasm_resolve_relocs(relocs, labels); | 
 | 1721 | 	pr_debug("Wrote TLB load handler fastpath (%u instructions).\n", | 
 | 1722 | 		 (unsigned int)(p - handle_tlbl)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1723 |  | 
| Franck Bui-Huu | 92b1e6a | 2007-10-18 09:11:17 +0200 | [diff] [blame] | 1724 | 	dump_handler(handle_tlbl, ARRAY_SIZE(handle_tlbl)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1725 | } | 
 | 1726 |  | 
| Ralf Baechle | 234fcd1 | 2008-03-08 09:56:28 +0000 | [diff] [blame] | 1727 | static void __cpuinit build_r3000_tlb_store_handler(void) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1728 | { | 
 | 1729 | 	u32 *p = handle_tlbs; | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 1730 | 	struct uasm_label *l = labels; | 
 | 1731 | 	struct uasm_reloc *r = relocs; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1732 |  | 
 | 1733 | 	memset(handle_tlbs, 0, sizeof(handle_tlbs)); | 
 | 1734 | 	memset(labels, 0, sizeof(labels)); | 
 | 1735 | 	memset(relocs, 0, sizeof(relocs)); | 
 | 1736 |  | 
 | 1737 | 	build_r3000_tlbchange_handler_head(&p, K0, K1); | 
| David Daney | bf28607 | 2011-07-05 16:34:46 -0700 | [diff] [blame] | 1738 | 	build_pte_writable(&p, &r, K0, K1, -1, label_nopage_tlbs); | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 1739 | 	uasm_i_nop(&p); /* load delay */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1740 | 	build_make_write(&p, &r, K0, K1); | 
| Maciej W. Rozycki | fded2e5 | 2005-06-13 20:24:00 +0000 | [diff] [blame] | 1741 | 	build_r3000_tlb_reload_write(&p, &l, &r, K0, K1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1742 |  | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 1743 | 	uasm_l_nopage_tlbs(&l, p); | 
 | 1744 | 	uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); | 
 | 1745 | 	uasm_i_nop(&p); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1746 |  | 
 | 1747 | 	if ((p - handle_tlbs) > FASTPATH_SIZE) | 
 | 1748 | 		panic("TLB store handler fastpath space exceeded"); | 
 | 1749 |  | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 1750 | 	uasm_resolve_relocs(relocs, labels); | 
 | 1751 | 	pr_debug("Wrote TLB store handler fastpath (%u instructions).\n", | 
 | 1752 | 		 (unsigned int)(p - handle_tlbs)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1753 |  | 
| Franck Bui-Huu | 92b1e6a | 2007-10-18 09:11:17 +0200 | [diff] [blame] | 1754 | 	dump_handler(handle_tlbs, ARRAY_SIZE(handle_tlbs)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1755 | } | 
 | 1756 |  | 
| Ralf Baechle | 234fcd1 | 2008-03-08 09:56:28 +0000 | [diff] [blame] | 1757 | static void __cpuinit build_r3000_tlb_modify_handler(void) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1758 | { | 
 | 1759 | 	u32 *p = handle_tlbm; | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 1760 | 	struct uasm_label *l = labels; | 
 | 1761 | 	struct uasm_reloc *r = relocs; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1762 |  | 
 | 1763 | 	memset(handle_tlbm, 0, sizeof(handle_tlbm)); | 
 | 1764 | 	memset(labels, 0, sizeof(labels)); | 
 | 1765 | 	memset(relocs, 0, sizeof(relocs)); | 
 | 1766 |  | 
 | 1767 | 	build_r3000_tlbchange_handler_head(&p, K0, K1); | 
| Ralf Baechle | d954ffe | 2011-08-02 22:52:48 +0100 | [diff] [blame] | 1768 | 	build_pte_modifiable(&p, &r, K0, K1,  -1, label_nopage_tlbm); | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 1769 | 	uasm_i_nop(&p); /* load delay */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1770 | 	build_make_write(&p, &r, K0, K1); | 
| Maciej W. Rozycki | fded2e5 | 2005-06-13 20:24:00 +0000 | [diff] [blame] | 1771 | 	build_r3000_pte_reload_tlbwi(&p, K0, K1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1772 |  | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 1773 | 	uasm_l_nopage_tlbm(&l, p); | 
 | 1774 | 	uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); | 
 | 1775 | 	uasm_i_nop(&p); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1776 |  | 
 | 1777 | 	if ((p - handle_tlbm) > FASTPATH_SIZE) | 
 | 1778 | 		panic("TLB modify handler fastpath space exceeded"); | 
 | 1779 |  | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 1780 | 	uasm_resolve_relocs(relocs, labels); | 
 | 1781 | 	pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n", | 
 | 1782 | 		 (unsigned int)(p - handle_tlbm)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1783 |  | 
| Franck Bui-Huu | 92b1e6a | 2007-10-18 09:11:17 +0200 | [diff] [blame] | 1784 | 	dump_handler(handle_tlbm, ARRAY_SIZE(handle_tlbm)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1785 | } | 
| David Daney | 8262228 | 2009-10-14 12:16:56 -0700 | [diff] [blame] | 1786 | #endif /* CONFIG_MIPS_PGD_C0_CONTEXT */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1787 |  | 
 | 1788 | /* | 
 | 1789 |  * R4000 style TLB load/store/modify handlers. | 
 | 1790 |  */ | 
| David Daney | bf28607 | 2011-07-05 16:34:46 -0700 | [diff] [blame] | 1791 | static struct work_registers __cpuinit | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 1792 | build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l, | 
| David Daney | bf28607 | 2011-07-05 16:34:46 -0700 | [diff] [blame] | 1793 | 				   struct uasm_reloc **r) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1794 | { | 
| David Daney | bf28607 | 2011-07-05 16:34:46 -0700 | [diff] [blame] | 1795 | 	struct work_registers wr = build_get_work_registers(p); | 
 | 1796 |  | 
| Ralf Baechle | 875d43e | 2005-09-03 15:56:16 -0700 | [diff] [blame] | 1797 | #ifdef CONFIG_64BIT | 
| David Daney | bf28607 | 2011-07-05 16:34:46 -0700 | [diff] [blame] | 1798 | 	build_get_pmde64(p, l, r, wr.r1, wr.r2); /* get pmd in ptr */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1799 | #else | 
| David Daney | bf28607 | 2011-07-05 16:34:46 -0700 | [diff] [blame] | 1800 | 	build_get_pgde32(p, wr.r1, wr.r2); /* get pgd in ptr */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1801 | #endif | 
 | 1802 |  | 
| David Daney | fd062c8 | 2009-05-27 17:47:44 -0700 | [diff] [blame] | 1803 | #ifdef CONFIG_HUGETLB_PAGE | 
 | 1804 | 	/* | 
 | 1805 | 	 * For huge tlb entries, pmd doesn't contain an address but | 
 | 1806 | 	 * instead contains the tlb pte. Check the PAGE_HUGE bit and | 
 | 1807 | 	 * see if we need to jump to huge tlb processing. | 
 | 1808 | 	 */ | 
| David Daney | bf28607 | 2011-07-05 16:34:46 -0700 | [diff] [blame] | 1809 | 	build_is_huge_pte(p, r, wr.r1, wr.r2, label_tlb_huge_update); | 
| David Daney | fd062c8 | 2009-05-27 17:47:44 -0700 | [diff] [blame] | 1810 | #endif | 
 | 1811 |  | 
| David Daney | bf28607 | 2011-07-05 16:34:46 -0700 | [diff] [blame] | 1812 | 	UASM_i_MFC0(p, wr.r1, C0_BADVADDR); | 
 | 1813 | 	UASM_i_LW(p, wr.r2, 0, wr.r2); | 
 | 1814 | 	UASM_i_SRL(p, wr.r1, wr.r1, PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2); | 
 | 1815 | 	uasm_i_andi(p, wr.r1, wr.r1, (PTRS_PER_PTE - 1) << PTE_T_LOG2); | 
 | 1816 | 	UASM_i_ADDU(p, wr.r2, wr.r2, wr.r1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1817 |  | 
 | 1818 | #ifdef CONFIG_SMP | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 1819 | 	uasm_l_smp_pgtable_change(l, *p); | 
 | 1820 | #endif | 
| David Daney | bf28607 | 2011-07-05 16:34:46 -0700 | [diff] [blame] | 1821 | 	iPTE_LW(p, wr.r1, wr.r2); /* get even pte */ | 
| Maciej W. Rozycki | 8df5bea | 2006-08-23 14:26:50 +0100 | [diff] [blame] | 1822 | 	if (!m4kc_tlbp_war()) | 
 | 1823 | 		build_tlb_probe_entry(p); | 
| David Daney | bf28607 | 2011-07-05 16:34:46 -0700 | [diff] [blame] | 1824 | 	return wr; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1825 | } | 
 | 1826 |  | 
| Ralf Baechle | 234fcd1 | 2008-03-08 09:56:28 +0000 | [diff] [blame] | 1827 | static void __cpuinit | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 1828 | build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l, | 
 | 1829 | 				   struct uasm_reloc **r, unsigned int tmp, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1830 | 				   unsigned int ptr) | 
 | 1831 | { | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 1832 | 	uasm_i_ori(p, ptr, ptr, sizeof(pte_t)); | 
 | 1833 | 	uasm_i_xori(p, ptr, ptr, sizeof(pte_t)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1834 | 	build_update_entries(p, tmp, ptr); | 
 | 1835 | 	build_tlb_write_entry(p, l, r, tlb_indexed); | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 1836 | 	uasm_l_leave(l, *p); | 
| David Daney | bf28607 | 2011-07-05 16:34:46 -0700 | [diff] [blame] | 1837 | 	build_restore_work_registers(p); | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 1838 | 	uasm_i_eret(p); /* return from trap */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1839 |  | 
| Ralf Baechle | 875d43e | 2005-09-03 15:56:16 -0700 | [diff] [blame] | 1840 | #ifdef CONFIG_64BIT | 
| David Daney | 1ec5632 | 2010-04-28 12:16:18 -0700 | [diff] [blame] | 1841 | 	build_get_pgd_vmalloc64(p, l, r, tmp, ptr, not_refill); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1842 | #endif | 
 | 1843 | } | 
 | 1844 |  | 
| Ralf Baechle | 234fcd1 | 2008-03-08 09:56:28 +0000 | [diff] [blame] | 1845 | static void __cpuinit build_r4000_tlb_load_handler(void) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1846 | { | 
 | 1847 | 	u32 *p = handle_tlbl; | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 1848 | 	struct uasm_label *l = labels; | 
 | 1849 | 	struct uasm_reloc *r = relocs; | 
| David Daney | bf28607 | 2011-07-05 16:34:46 -0700 | [diff] [blame] | 1850 | 	struct work_registers wr; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1851 |  | 
 | 1852 | 	memset(handle_tlbl, 0, sizeof(handle_tlbl)); | 
 | 1853 | 	memset(labels, 0, sizeof(labels)); | 
 | 1854 | 	memset(relocs, 0, sizeof(relocs)); | 
 | 1855 |  | 
 | 1856 | 	if (bcm1250_m3_war()) { | 
| Ralf Baechle | 3d45285 | 2010-03-23 17:56:38 +0100 | [diff] [blame] | 1857 | 		unsigned int segbits = 44; | 
 | 1858 |  | 
 | 1859 | 		uasm_i_dmfc0(&p, K0, C0_BADVADDR); | 
 | 1860 | 		uasm_i_dmfc0(&p, K1, C0_ENTRYHI); | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 1861 | 		uasm_i_xor(&p, K0, K0, K1); | 
| David Daney | 3be6022 | 2010-04-28 12:16:17 -0700 | [diff] [blame] | 1862 | 		uasm_i_dsrl_safe(&p, K1, K0, 62); | 
 | 1863 | 		uasm_i_dsrl_safe(&p, K0, K0, 12 + 1); | 
 | 1864 | 		uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits); | 
| Ralf Baechle | 3d45285 | 2010-03-23 17:56:38 +0100 | [diff] [blame] | 1865 | 		uasm_i_or(&p, K0, K0, K1); | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 1866 | 		uasm_il_bnez(&p, &r, K0, label_leave); | 
 | 1867 | 		/* No need for uasm_i_nop */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1868 | 	} | 
 | 1869 |  | 
| David Daney | bf28607 | 2011-07-05 16:34:46 -0700 | [diff] [blame] | 1870 | 	wr = build_r4000_tlbchange_handler_head(&p, &l, &r); | 
 | 1871 | 	build_pte_present(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbl); | 
| Maciej W. Rozycki | 8df5bea | 2006-08-23 14:26:50 +0100 | [diff] [blame] | 1872 | 	if (m4kc_tlbp_war()) | 
 | 1873 | 		build_tlb_probe_entry(&p); | 
| David Daney | 6dd9344 | 2010-02-10 15:12:47 -0800 | [diff] [blame] | 1874 |  | 
 | 1875 | 	if (kernel_uses_smartmips_rixi) { | 
 | 1876 | 		/* | 
 | 1877 | 		 * If the page is not _PAGE_VALID, RI or XI could not | 
 | 1878 | 		 * have triggered it.  Skip the expensive test.. | 
 | 1879 | 		 */ | 
| David Daney | cc33ae4 | 2010-12-20 15:54:50 -0800 | [diff] [blame] | 1880 | 		if (use_bbit_insns()) { | 
| David Daney | bf28607 | 2011-07-05 16:34:46 -0700 | [diff] [blame] | 1881 | 			uasm_il_bbit0(&p, &r, wr.r1, ilog2(_PAGE_VALID), | 
| David Daney | cc33ae4 | 2010-12-20 15:54:50 -0800 | [diff] [blame] | 1882 | 				      label_tlbl_goaround1); | 
 | 1883 | 		} else { | 
| David Daney | bf28607 | 2011-07-05 16:34:46 -0700 | [diff] [blame] | 1884 | 			uasm_i_andi(&p, wr.r3, wr.r1, _PAGE_VALID); | 
 | 1885 | 			uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround1); | 
| David Daney | cc33ae4 | 2010-12-20 15:54:50 -0800 | [diff] [blame] | 1886 | 		} | 
| David Daney | 6dd9344 | 2010-02-10 15:12:47 -0800 | [diff] [blame] | 1887 | 		uasm_i_nop(&p); | 
 | 1888 |  | 
 | 1889 | 		uasm_i_tlbr(&p); | 
 | 1890 | 		/* Examine  entrylo 0 or 1 based on ptr. */ | 
| David Daney | cc33ae4 | 2010-12-20 15:54:50 -0800 | [diff] [blame] | 1891 | 		if (use_bbit_insns()) { | 
| David Daney | bf28607 | 2011-07-05 16:34:46 -0700 | [diff] [blame] | 1892 | 			uasm_i_bbit0(&p, wr.r2, ilog2(sizeof(pte_t)), 8); | 
| David Daney | cc33ae4 | 2010-12-20 15:54:50 -0800 | [diff] [blame] | 1893 | 		} else { | 
| David Daney | bf28607 | 2011-07-05 16:34:46 -0700 | [diff] [blame] | 1894 | 			uasm_i_andi(&p, wr.r3, wr.r2, sizeof(pte_t)); | 
 | 1895 | 			uasm_i_beqz(&p, wr.r3, 8); | 
| David Daney | cc33ae4 | 2010-12-20 15:54:50 -0800 | [diff] [blame] | 1896 | 		} | 
| David Daney | bf28607 | 2011-07-05 16:34:46 -0700 | [diff] [blame] | 1897 | 		/* load it in the delay slot*/ | 
 | 1898 | 		UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO0); | 
 | 1899 | 		/* load it if ptr is odd */ | 
 | 1900 | 		UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO1); | 
| David Daney | 6dd9344 | 2010-02-10 15:12:47 -0800 | [diff] [blame] | 1901 | 		/* | 
| David Daney | bf28607 | 2011-07-05 16:34:46 -0700 | [diff] [blame] | 1902 | 		 * If the entryLo (now in wr.r3) is valid (bit 1), RI or | 
| David Daney | 6dd9344 | 2010-02-10 15:12:47 -0800 | [diff] [blame] | 1903 | 		 * XI must have triggered it. | 
 | 1904 | 		 */ | 
| David Daney | cc33ae4 | 2010-12-20 15:54:50 -0800 | [diff] [blame] | 1905 | 		if (use_bbit_insns()) { | 
| David Daney | bf28607 | 2011-07-05 16:34:46 -0700 | [diff] [blame] | 1906 | 			uasm_il_bbit1(&p, &r, wr.r3, 1, label_nopage_tlbl); | 
 | 1907 | 			uasm_i_nop(&p); | 
| David Daney | cc33ae4 | 2010-12-20 15:54:50 -0800 | [diff] [blame] | 1908 | 			uasm_l_tlbl_goaround1(&l, p); | 
 | 1909 | 		} else { | 
| David Daney | bf28607 | 2011-07-05 16:34:46 -0700 | [diff] [blame] | 1910 | 			uasm_i_andi(&p, wr.r3, wr.r3, 2); | 
 | 1911 | 			uasm_il_bnez(&p, &r, wr.r3, label_nopage_tlbl); | 
 | 1912 | 			uasm_i_nop(&p); | 
| David Daney | cc33ae4 | 2010-12-20 15:54:50 -0800 | [diff] [blame] | 1913 | 		} | 
| David Daney | bf28607 | 2011-07-05 16:34:46 -0700 | [diff] [blame] | 1914 | 		uasm_l_tlbl_goaround1(&l, p); | 
| David Daney | 6dd9344 | 2010-02-10 15:12:47 -0800 | [diff] [blame] | 1915 | 	} | 
| David Daney | bf28607 | 2011-07-05 16:34:46 -0700 | [diff] [blame] | 1916 | 	build_make_valid(&p, &r, wr.r1, wr.r2); | 
 | 1917 | 	build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1918 |  | 
| David Daney | fd062c8 | 2009-05-27 17:47:44 -0700 | [diff] [blame] | 1919 | #ifdef CONFIG_HUGETLB_PAGE | 
 | 1920 | 	/* | 
 | 1921 | 	 * This is the entry point when build_r4000_tlbchange_handler_head | 
 | 1922 | 	 * spots a huge page. | 
 | 1923 | 	 */ | 
 | 1924 | 	uasm_l_tlb_huge_update(&l, p); | 
| David Daney | bf28607 | 2011-07-05 16:34:46 -0700 | [diff] [blame] | 1925 | 	iPTE_LW(&p, wr.r1, wr.r2); | 
 | 1926 | 	build_pte_present(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbl); | 
| David Daney | fd062c8 | 2009-05-27 17:47:44 -0700 | [diff] [blame] | 1927 | 	build_tlb_probe_entry(&p); | 
| David Daney | 6dd9344 | 2010-02-10 15:12:47 -0800 | [diff] [blame] | 1928 |  | 
 | 1929 | 	if (kernel_uses_smartmips_rixi) { | 
 | 1930 | 		/* | 
 | 1931 | 		 * If the page is not _PAGE_VALID, RI or XI could not | 
 | 1932 | 		 * have triggered it.  Skip the expensive test.. | 
 | 1933 | 		 */ | 
| David Daney | cc33ae4 | 2010-12-20 15:54:50 -0800 | [diff] [blame] | 1934 | 		if (use_bbit_insns()) { | 
| David Daney | bf28607 | 2011-07-05 16:34:46 -0700 | [diff] [blame] | 1935 | 			uasm_il_bbit0(&p, &r, wr.r1, ilog2(_PAGE_VALID), | 
| David Daney | cc33ae4 | 2010-12-20 15:54:50 -0800 | [diff] [blame] | 1936 | 				      label_tlbl_goaround2); | 
 | 1937 | 		} else { | 
| David Daney | bf28607 | 2011-07-05 16:34:46 -0700 | [diff] [blame] | 1938 | 			uasm_i_andi(&p, wr.r3, wr.r1, _PAGE_VALID); | 
 | 1939 | 			uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround2); | 
| David Daney | cc33ae4 | 2010-12-20 15:54:50 -0800 | [diff] [blame] | 1940 | 		} | 
| David Daney | 6dd9344 | 2010-02-10 15:12:47 -0800 | [diff] [blame] | 1941 | 		uasm_i_nop(&p); | 
 | 1942 |  | 
 | 1943 | 		uasm_i_tlbr(&p); | 
 | 1944 | 		/* Examine  entrylo 0 or 1 based on ptr. */ | 
| David Daney | cc33ae4 | 2010-12-20 15:54:50 -0800 | [diff] [blame] | 1945 | 		if (use_bbit_insns()) { | 
| David Daney | bf28607 | 2011-07-05 16:34:46 -0700 | [diff] [blame] | 1946 | 			uasm_i_bbit0(&p, wr.r2, ilog2(sizeof(pte_t)), 8); | 
| David Daney | cc33ae4 | 2010-12-20 15:54:50 -0800 | [diff] [blame] | 1947 | 		} else { | 
| David Daney | bf28607 | 2011-07-05 16:34:46 -0700 | [diff] [blame] | 1948 | 			uasm_i_andi(&p, wr.r3, wr.r2, sizeof(pte_t)); | 
 | 1949 | 			uasm_i_beqz(&p, wr.r3, 8); | 
| David Daney | cc33ae4 | 2010-12-20 15:54:50 -0800 | [diff] [blame] | 1950 | 		} | 
| David Daney | bf28607 | 2011-07-05 16:34:46 -0700 | [diff] [blame] | 1951 | 		/* load it in the delay slot*/ | 
 | 1952 | 		UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO0); | 
 | 1953 | 		/* load it if ptr is odd */ | 
 | 1954 | 		UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO1); | 
| David Daney | 6dd9344 | 2010-02-10 15:12:47 -0800 | [diff] [blame] | 1955 | 		/* | 
| David Daney | bf28607 | 2011-07-05 16:34:46 -0700 | [diff] [blame] | 1956 | 		 * If the entryLo (now in wr.r3) is valid (bit 1), RI or | 
| David Daney | 6dd9344 | 2010-02-10 15:12:47 -0800 | [diff] [blame] | 1957 | 		 * XI must have triggered it. | 
 | 1958 | 		 */ | 
| David Daney | cc33ae4 | 2010-12-20 15:54:50 -0800 | [diff] [blame] | 1959 | 		if (use_bbit_insns()) { | 
| David Daney | bf28607 | 2011-07-05 16:34:46 -0700 | [diff] [blame] | 1960 | 			uasm_il_bbit0(&p, &r, wr.r3, 1, label_tlbl_goaround2); | 
| David Daney | cc33ae4 | 2010-12-20 15:54:50 -0800 | [diff] [blame] | 1961 | 		} else { | 
| David Daney | bf28607 | 2011-07-05 16:34:46 -0700 | [diff] [blame] | 1962 | 			uasm_i_andi(&p, wr.r3, wr.r3, 2); | 
 | 1963 | 			uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround2); | 
| David Daney | cc33ae4 | 2010-12-20 15:54:50 -0800 | [diff] [blame] | 1964 | 		} | 
| David Daney | 0f4ccbc | 2011-09-16 18:06:02 -0700 | [diff] [blame] | 1965 | 		if (PM_DEFAULT_MASK == 0) | 
 | 1966 | 			uasm_i_nop(&p); | 
| David Daney | 6dd9344 | 2010-02-10 15:12:47 -0800 | [diff] [blame] | 1967 | 		/* | 
 | 1968 | 		 * We clobbered C0_PAGEMASK, restore it.  On the other branch | 
 | 1969 | 		 * it is restored in build_huge_tlb_write_entry. | 
 | 1970 | 		 */ | 
| David Daney | bf28607 | 2011-07-05 16:34:46 -0700 | [diff] [blame] | 1971 | 		build_restore_pagemask(&p, &r, wr.r3, label_nopage_tlbl, 0); | 
| David Daney | 6dd9344 | 2010-02-10 15:12:47 -0800 | [diff] [blame] | 1972 |  | 
 | 1973 | 		uasm_l_tlbl_goaround2(&l, p); | 
 | 1974 | 	} | 
| David Daney | bf28607 | 2011-07-05 16:34:46 -0700 | [diff] [blame] | 1975 | 	uasm_i_ori(&p, wr.r1, wr.r1, (_PAGE_ACCESSED | _PAGE_VALID)); | 
 | 1976 | 	build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2); | 
| David Daney | fd062c8 | 2009-05-27 17:47:44 -0700 | [diff] [blame] | 1977 | #endif | 
 | 1978 |  | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 1979 | 	uasm_l_nopage_tlbl(&l, p); | 
| David Daney | bf28607 | 2011-07-05 16:34:46 -0700 | [diff] [blame] | 1980 | 	build_restore_work_registers(&p); | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 1981 | 	uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff); | 
 | 1982 | 	uasm_i_nop(&p); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1983 |  | 
 | 1984 | 	if ((p - handle_tlbl) > FASTPATH_SIZE) | 
 | 1985 | 		panic("TLB load handler fastpath space exceeded"); | 
 | 1986 |  | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 1987 | 	uasm_resolve_relocs(relocs, labels); | 
 | 1988 | 	pr_debug("Wrote TLB load handler fastpath (%u instructions).\n", | 
 | 1989 | 		 (unsigned int)(p - handle_tlbl)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1990 |  | 
| Franck Bui-Huu | 92b1e6a | 2007-10-18 09:11:17 +0200 | [diff] [blame] | 1991 | 	dump_handler(handle_tlbl, ARRAY_SIZE(handle_tlbl)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1992 | } | 
 | 1993 |  | 
| Ralf Baechle | 234fcd1 | 2008-03-08 09:56:28 +0000 | [diff] [blame] | 1994 | static void __cpuinit build_r4000_tlb_store_handler(void) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1995 | { | 
 | 1996 | 	u32 *p = handle_tlbs; | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 1997 | 	struct uasm_label *l = labels; | 
 | 1998 | 	struct uasm_reloc *r = relocs; | 
| David Daney | bf28607 | 2011-07-05 16:34:46 -0700 | [diff] [blame] | 1999 | 	struct work_registers wr; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2000 |  | 
 | 2001 | 	memset(handle_tlbs, 0, sizeof(handle_tlbs)); | 
 | 2002 | 	memset(labels, 0, sizeof(labels)); | 
 | 2003 | 	memset(relocs, 0, sizeof(relocs)); | 
 | 2004 |  | 
| David Daney | bf28607 | 2011-07-05 16:34:46 -0700 | [diff] [blame] | 2005 | 	wr = build_r4000_tlbchange_handler_head(&p, &l, &r); | 
 | 2006 | 	build_pte_writable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbs); | 
| Maciej W. Rozycki | 8df5bea | 2006-08-23 14:26:50 +0100 | [diff] [blame] | 2007 | 	if (m4kc_tlbp_war()) | 
 | 2008 | 		build_tlb_probe_entry(&p); | 
| David Daney | bf28607 | 2011-07-05 16:34:46 -0700 | [diff] [blame] | 2009 | 	build_make_write(&p, &r, wr.r1, wr.r2); | 
 | 2010 | 	build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2011 |  | 
| David Daney | fd062c8 | 2009-05-27 17:47:44 -0700 | [diff] [blame] | 2012 | #ifdef CONFIG_HUGETLB_PAGE | 
 | 2013 | 	/* | 
 | 2014 | 	 * This is the entry point when | 
 | 2015 | 	 * build_r4000_tlbchange_handler_head spots a huge page. | 
 | 2016 | 	 */ | 
 | 2017 | 	uasm_l_tlb_huge_update(&l, p); | 
| David Daney | bf28607 | 2011-07-05 16:34:46 -0700 | [diff] [blame] | 2018 | 	iPTE_LW(&p, wr.r1, wr.r2); | 
 | 2019 | 	build_pte_writable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbs); | 
| David Daney | fd062c8 | 2009-05-27 17:47:44 -0700 | [diff] [blame] | 2020 | 	build_tlb_probe_entry(&p); | 
| David Daney | bf28607 | 2011-07-05 16:34:46 -0700 | [diff] [blame] | 2021 | 	uasm_i_ori(&p, wr.r1, wr.r1, | 
| David Daney | fd062c8 | 2009-05-27 17:47:44 -0700 | [diff] [blame] | 2022 | 		   _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY); | 
| David Daney | bf28607 | 2011-07-05 16:34:46 -0700 | [diff] [blame] | 2023 | 	build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2); | 
| David Daney | fd062c8 | 2009-05-27 17:47:44 -0700 | [diff] [blame] | 2024 | #endif | 
 | 2025 |  | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 2026 | 	uasm_l_nopage_tlbs(&l, p); | 
| David Daney | bf28607 | 2011-07-05 16:34:46 -0700 | [diff] [blame] | 2027 | 	build_restore_work_registers(&p); | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 2028 | 	uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); | 
 | 2029 | 	uasm_i_nop(&p); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2030 |  | 
 | 2031 | 	if ((p - handle_tlbs) > FASTPATH_SIZE) | 
 | 2032 | 		panic("TLB store handler fastpath space exceeded"); | 
 | 2033 |  | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 2034 | 	uasm_resolve_relocs(relocs, labels); | 
 | 2035 | 	pr_debug("Wrote TLB store handler fastpath (%u instructions).\n", | 
 | 2036 | 		 (unsigned int)(p - handle_tlbs)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2037 |  | 
| Franck Bui-Huu | 92b1e6a | 2007-10-18 09:11:17 +0200 | [diff] [blame] | 2038 | 	dump_handler(handle_tlbs, ARRAY_SIZE(handle_tlbs)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2039 | } | 
 | 2040 |  | 
| Ralf Baechle | 234fcd1 | 2008-03-08 09:56:28 +0000 | [diff] [blame] | 2041 | static void __cpuinit build_r4000_tlb_modify_handler(void) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2042 | { | 
 | 2043 | 	u32 *p = handle_tlbm; | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 2044 | 	struct uasm_label *l = labels; | 
 | 2045 | 	struct uasm_reloc *r = relocs; | 
| David Daney | bf28607 | 2011-07-05 16:34:46 -0700 | [diff] [blame] | 2046 | 	struct work_registers wr; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2047 |  | 
 | 2048 | 	memset(handle_tlbm, 0, sizeof(handle_tlbm)); | 
 | 2049 | 	memset(labels, 0, sizeof(labels)); | 
 | 2050 | 	memset(relocs, 0, sizeof(relocs)); | 
 | 2051 |  | 
| David Daney | bf28607 | 2011-07-05 16:34:46 -0700 | [diff] [blame] | 2052 | 	wr = build_r4000_tlbchange_handler_head(&p, &l, &r); | 
 | 2053 | 	build_pte_modifiable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbm); | 
| Maciej W. Rozycki | 8df5bea | 2006-08-23 14:26:50 +0100 | [diff] [blame] | 2054 | 	if (m4kc_tlbp_war()) | 
 | 2055 | 		build_tlb_probe_entry(&p); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2056 | 	/* Present and writable bits set, set accessed and dirty bits. */ | 
| David Daney | bf28607 | 2011-07-05 16:34:46 -0700 | [diff] [blame] | 2057 | 	build_make_write(&p, &r, wr.r1, wr.r2); | 
 | 2058 | 	build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2059 |  | 
| David Daney | fd062c8 | 2009-05-27 17:47:44 -0700 | [diff] [blame] | 2060 | #ifdef CONFIG_HUGETLB_PAGE | 
 | 2061 | 	/* | 
 | 2062 | 	 * This is the entry point when | 
 | 2063 | 	 * build_r4000_tlbchange_handler_head spots a huge page. | 
 | 2064 | 	 */ | 
 | 2065 | 	uasm_l_tlb_huge_update(&l, p); | 
| David Daney | bf28607 | 2011-07-05 16:34:46 -0700 | [diff] [blame] | 2066 | 	iPTE_LW(&p, wr.r1, wr.r2); | 
 | 2067 | 	build_pte_modifiable(&p, &r, wr.r1, wr.r2,  wr.r3, label_nopage_tlbm); | 
| David Daney | fd062c8 | 2009-05-27 17:47:44 -0700 | [diff] [blame] | 2068 | 	build_tlb_probe_entry(&p); | 
| David Daney | bf28607 | 2011-07-05 16:34:46 -0700 | [diff] [blame] | 2069 | 	uasm_i_ori(&p, wr.r1, wr.r1, | 
| David Daney | fd062c8 | 2009-05-27 17:47:44 -0700 | [diff] [blame] | 2070 | 		   _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY); | 
| David Daney | bf28607 | 2011-07-05 16:34:46 -0700 | [diff] [blame] | 2071 | 	build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2); | 
| David Daney | fd062c8 | 2009-05-27 17:47:44 -0700 | [diff] [blame] | 2072 | #endif | 
 | 2073 |  | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 2074 | 	uasm_l_nopage_tlbm(&l, p); | 
| David Daney | bf28607 | 2011-07-05 16:34:46 -0700 | [diff] [blame] | 2075 | 	build_restore_work_registers(&p); | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 2076 | 	uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); | 
 | 2077 | 	uasm_i_nop(&p); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2078 |  | 
 | 2079 | 	if ((p - handle_tlbm) > FASTPATH_SIZE) | 
 | 2080 | 		panic("TLB modify handler fastpath space exceeded"); | 
 | 2081 |  | 
| Thiemo Seufer | e30ec45 | 2008-01-28 20:05:38 +0000 | [diff] [blame] | 2082 | 	uasm_resolve_relocs(relocs, labels); | 
 | 2083 | 	pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n", | 
 | 2084 | 		 (unsigned int)(p - handle_tlbm)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2085 |  | 
| Franck Bui-Huu | 92b1e6a | 2007-10-18 09:11:17 +0200 | [diff] [blame] | 2086 | 	dump_handler(handle_tlbm, ARRAY_SIZE(handle_tlbm)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2087 | } | 
 | 2088 |  | 
| Ralf Baechle | 234fcd1 | 2008-03-08 09:56:28 +0000 | [diff] [blame] | 2089 | void __cpuinit build_tlb_refill_handler(void) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2090 | { | 
 | 2091 | 	/* | 
 | 2092 | 	 * The refill handler is generated per-CPU, multi-node systems | 
 | 2093 | 	 * may have local storage for it. The other handlers are only | 
 | 2094 | 	 * needed once. | 
 | 2095 | 	 */ | 
 | 2096 | 	static int run_once = 0; | 
 | 2097 |  | 
| David Daney | 1ec5632 | 2010-04-28 12:16:18 -0700 | [diff] [blame] | 2098 | #ifdef CONFIG_64BIT | 
 | 2099 | 	check_for_high_segbits = current_cpu_data.vmbits > (PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3); | 
 | 2100 | #endif | 
 | 2101 |  | 
| Ralf Baechle | 10cc352 | 2007-10-11 23:46:15 +0100 | [diff] [blame] | 2102 | 	switch (current_cpu_type()) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2103 | 	case CPU_R2000: | 
 | 2104 | 	case CPU_R3000: | 
 | 2105 | 	case CPU_R3000A: | 
 | 2106 | 	case CPU_R3081E: | 
 | 2107 | 	case CPU_TX3912: | 
 | 2108 | 	case CPU_TX3922: | 
 | 2109 | 	case CPU_TX3927: | 
| David Daney | 8262228 | 2009-10-14 12:16:56 -0700 | [diff] [blame] | 2110 | #ifndef CONFIG_MIPS_PGD_C0_CONTEXT | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2111 | 		build_r3000_tlb_refill_handler(); | 
 | 2112 | 		if (!run_once) { | 
 | 2113 | 			build_r3000_tlb_load_handler(); | 
 | 2114 | 			build_r3000_tlb_store_handler(); | 
 | 2115 | 			build_r3000_tlb_modify_handler(); | 
 | 2116 | 			run_once++; | 
 | 2117 | 		} | 
| David Daney | 8262228 | 2009-10-14 12:16:56 -0700 | [diff] [blame] | 2118 | #else | 
 | 2119 | 		panic("No R3000 TLB refill handler"); | 
 | 2120 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2121 | 		break; | 
 | 2122 |  | 
 | 2123 | 	case CPU_R6000: | 
 | 2124 | 	case CPU_R6000A: | 
 | 2125 | 		panic("No R6000 TLB refill handler yet"); | 
 | 2126 | 		break; | 
 | 2127 |  | 
 | 2128 | 	case CPU_R8000: | 
 | 2129 | 		panic("No R8000 TLB refill handler yet"); | 
 | 2130 | 		break; | 
 | 2131 |  | 
 | 2132 | 	default: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2133 | 		if (!run_once) { | 
| David Daney | bf28607 | 2011-07-05 16:34:46 -0700 | [diff] [blame] | 2134 | 			scratch_reg = allocate_kscratch(); | 
| David Daney | 3d8bfdd | 2010-12-21 14:19:11 -0800 | [diff] [blame] | 2135 | #ifdef CONFIG_MIPS_PGD_C0_CONTEXT | 
 | 2136 | 			build_r4000_setup_pgd(); | 
 | 2137 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2138 | 			build_r4000_tlb_load_handler(); | 
 | 2139 | 			build_r4000_tlb_store_handler(); | 
 | 2140 | 			build_r4000_tlb_modify_handler(); | 
 | 2141 | 			run_once++; | 
 | 2142 | 		} | 
| David Daney | 3d8bfdd | 2010-12-21 14:19:11 -0800 | [diff] [blame] | 2143 | 		build_r4000_tlb_refill_handler(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2144 | 	} | 
 | 2145 | } | 
| Ralf Baechle | 1d40cfc | 2005-07-15 15:23:23 +0000 | [diff] [blame] | 2146 |  | 
| Ralf Baechle | 234fcd1 | 2008-03-08 09:56:28 +0000 | [diff] [blame] | 2147 | void __cpuinit flush_tlb_handlers(void) | 
| Ralf Baechle | 1d40cfc | 2005-07-15 15:23:23 +0000 | [diff] [blame] | 2148 | { | 
| Thomas Bogendoerfer | e0cee3e | 2008-08-04 20:53:57 +0200 | [diff] [blame] | 2149 | 	local_flush_icache_range((unsigned long)handle_tlbl, | 
| Ralf Baechle | 1d40cfc | 2005-07-15 15:23:23 +0000 | [diff] [blame] | 2150 | 			   (unsigned long)handle_tlbl + sizeof(handle_tlbl)); | 
| Thomas Bogendoerfer | e0cee3e | 2008-08-04 20:53:57 +0200 | [diff] [blame] | 2151 | 	local_flush_icache_range((unsigned long)handle_tlbs, | 
| Ralf Baechle | 1d40cfc | 2005-07-15 15:23:23 +0000 | [diff] [blame] | 2152 | 			   (unsigned long)handle_tlbs + sizeof(handle_tlbs)); | 
| Thomas Bogendoerfer | e0cee3e | 2008-08-04 20:53:57 +0200 | [diff] [blame] | 2153 | 	local_flush_icache_range((unsigned long)handle_tlbm, | 
| Ralf Baechle | 1d40cfc | 2005-07-15 15:23:23 +0000 | [diff] [blame] | 2154 | 			   (unsigned long)handle_tlbm + sizeof(handle_tlbm)); | 
| David Daney | 3d8bfdd | 2010-12-21 14:19:11 -0800 | [diff] [blame] | 2155 | #ifdef CONFIG_MIPS_PGD_C0_CONTEXT | 
 | 2156 | 	local_flush_icache_range((unsigned long)tlbmiss_handler_setup_pgd, | 
 | 2157 | 			   (unsigned long)tlbmiss_handler_setup_pgd + sizeof(handle_tlbm)); | 
 | 2158 | #endif | 
| Ralf Baechle | 1d40cfc | 2005-07-15 15:23:23 +0000 | [diff] [blame] | 2159 | } |