| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  * This file is subject to the terms and conditions of the GNU General Public | 
 | 3 |  * License.  See the file "COPYING" in the main directory of this archive | 
 | 4 |  * for more details. | 
 | 5 |  * | 
 | 6 |  * arch/sh64/mm/fault.c | 
 | 7 |  * | 
 | 8 |  * Copyright (C) 2000, 2001  Paolo Alberelli | 
 | 9 |  * Copyright (C) 2003  Richard Curnow (/proc/tlb, bug fixes) | 
 | 10 |  * Copyright (C) 2003  Paul Mundt | 
 | 11 |  * | 
 | 12 |  */ | 
 | 13 |  | 
 | 14 | #include <linux/signal.h> | 
 | 15 | #include <linux/rwsem.h> | 
 | 16 | #include <linux/sched.h> | 
 | 17 | #include <linux/kernel.h> | 
 | 18 | #include <linux/errno.h> | 
 | 19 | #include <linux/string.h> | 
 | 20 | #include <linux/types.h> | 
 | 21 | #include <linux/ptrace.h> | 
 | 22 | #include <linux/mman.h> | 
 | 23 | #include <linux/mm.h> | 
 | 24 | #include <linux/smp.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | #include <linux/interrupt.h> | 
 | 26 |  | 
 | 27 | #include <asm/system.h> | 
 | 28 | #include <asm/io.h> | 
 | 29 | #include <asm/tlb.h> | 
 | 30 | #include <asm/uaccess.h> | 
 | 31 | #include <asm/pgalloc.h> | 
 | 32 | #include <asm/mmu_context.h> | 
 | 33 | #include <asm/registers.h>		/* required by inline asm statements */ | 
 | 34 |  | 
 | 35 | #if defined(CONFIG_SH64_PROC_TLB) | 
 | 36 | #include <linux/init.h> | 
 | 37 | #include <linux/proc_fs.h> | 
 | 38 | /* Count numbers of tlb refills in each region */ | 
 | 39 | static unsigned long long calls_to_update_mmu_cache = 0ULL; | 
 | 40 | static unsigned long long calls_to_flush_tlb_page   = 0ULL; | 
 | 41 | static unsigned long long calls_to_flush_tlb_range  = 0ULL; | 
 | 42 | static unsigned long long calls_to_flush_tlb_mm     = 0ULL; | 
 | 43 | static unsigned long long calls_to_flush_tlb_all    = 0ULL; | 
 | 44 | unsigned long long calls_to_do_slow_page_fault = 0ULL; | 
 | 45 | unsigned long long calls_to_do_fast_page_fault = 0ULL; | 
 | 46 |  | 
 | 47 | /* Count size of ranges for flush_tlb_range */ | 
 | 48 | static unsigned long long flush_tlb_range_1         = 0ULL; | 
 | 49 | static unsigned long long flush_tlb_range_2         = 0ULL; | 
 | 50 | static unsigned long long flush_tlb_range_3_4       = 0ULL; | 
 | 51 | static unsigned long long flush_tlb_range_5_7       = 0ULL; | 
 | 52 | static unsigned long long flush_tlb_range_8_11      = 0ULL; | 
 | 53 | static unsigned long long flush_tlb_range_12_15     = 0ULL; | 
 | 54 | static unsigned long long flush_tlb_range_16_up     = 0ULL; | 
 | 55 |  | 
 | 56 | static unsigned long long page_not_present          = 0ULL; | 
 | 57 |  | 
 | 58 | #endif | 
 | 59 |  | 
 | 60 | extern void die(const char *,struct pt_regs *,long); | 
 | 61 |  | 
 | 62 | #define PFLAG(val,flag)   (( (val) & (flag) ) ? #flag : "" ) | 
 | 63 | #define PPROT(flag) PFLAG(pgprot_val(prot),flag) | 
 | 64 |  | 
 | 65 | static inline void print_prots(pgprot_t prot) | 
 | 66 | { | 
 | 67 | 	printk("prot is 0x%08lx\n",pgprot_val(prot)); | 
 | 68 |  | 
 | 69 | 	printk("%s %s %s %s %s\n",PPROT(_PAGE_SHARED),PPROT(_PAGE_READ), | 
 | 70 | 	       PPROT(_PAGE_EXECUTE),PPROT(_PAGE_WRITE),PPROT(_PAGE_USER)); | 
 | 71 | } | 
 | 72 |  | 
 | 73 | static inline void print_vma(struct vm_area_struct *vma) | 
 | 74 | { | 
 | 75 | 	printk("vma start 0x%08lx\n", vma->vm_start); | 
 | 76 | 	printk("vma end   0x%08lx\n", vma->vm_end); | 
 | 77 |  | 
 | 78 | 	print_prots(vma->vm_page_prot); | 
 | 79 | 	printk("vm_flags 0x%08lx\n", vma->vm_flags); | 
 | 80 | } | 
 | 81 |  | 
 | 82 | static inline void print_task(struct task_struct *tsk) | 
 | 83 | { | 
| Alexey Dobriyan | 19c5870 | 2007-10-18 23:40:41 -0700 | [diff] [blame] | 84 | 	printk("Task pid %d\n", task_pid_nr(tsk)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 85 | } | 
 | 86 |  | 
 | 87 | static pte_t *lookup_pte(struct mm_struct *mm, unsigned long address) | 
 | 88 | { | 
 | 89 | 	pgd_t *dir; | 
 | 90 | 	pmd_t *pmd; | 
 | 91 | 	pte_t *pte; | 
 | 92 | 	pte_t entry; | 
 | 93 |  | 
 | 94 | 	dir = pgd_offset(mm, address); | 
 | 95 | 	if (pgd_none(*dir)) { | 
 | 96 | 		return NULL; | 
 | 97 | 	} | 
 | 98 |  | 
 | 99 | 	pmd = pmd_offset(dir, address); | 
 | 100 | 	if (pmd_none(*pmd)) { | 
 | 101 | 		return NULL; | 
 | 102 | 	} | 
 | 103 |  | 
 | 104 | 	pte = pte_offset_kernel(pmd, address); | 
 | 105 | 	entry = *pte; | 
 | 106 |  | 
 | 107 | 	if (pte_none(entry)) { | 
 | 108 | 		return NULL; | 
 | 109 | 	} | 
 | 110 | 	if (!pte_present(entry)) { | 
 | 111 | 		return NULL; | 
 | 112 | 	} | 
 | 113 |  | 
 | 114 | 	return pte; | 
 | 115 | } | 
 | 116 |  | 
 | 117 | /* | 
 | 118 |  * This routine handles page faults.  It determines the address, | 
 | 119 |  * and the problem, and then passes it off to one of the appropriate | 
 | 120 |  * routines. | 
 | 121 |  */ | 
 | 122 | asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess, | 
 | 123 | 			      unsigned long textaccess, unsigned long address) | 
 | 124 | { | 
 | 125 | 	struct task_struct *tsk; | 
 | 126 | 	struct mm_struct *mm; | 
 | 127 | 	struct vm_area_struct * vma; | 
 | 128 | 	const struct exception_table_entry *fixup; | 
 | 129 | 	pte_t *pte; | 
| Nick Piggin | 83c5407 | 2007-07-19 01:47:05 -0700 | [diff] [blame] | 130 | 	int fault; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 131 |  | 
 | 132 | #if defined(CONFIG_SH64_PROC_TLB) | 
 | 133 |         ++calls_to_do_slow_page_fault; | 
 | 134 | #endif | 
 | 135 |  | 
 | 136 | 	/* SIM | 
 | 137 | 	 * Note this is now called with interrupts still disabled | 
 | 138 | 	 * This is to cope with being called for a missing IO port | 
| Simon Arlott | 0a35477 | 2007-05-14 08:25:48 +0900 | [diff] [blame] | 139 | 	 * address with interrupts disabled. This should be fixed as | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 140 | 	 * soon as we have a better 'fast path' miss handler. | 
 | 141 | 	 * | 
 | 142 | 	 * Plus take care how you try and debug this stuff. | 
 | 143 | 	 * For example, writing debug data to a port which you | 
 | 144 | 	 * have just faulted on is not going to work. | 
 | 145 | 	 */ | 
 | 146 |  | 
 | 147 | 	tsk = current; | 
 | 148 | 	mm = tsk->mm; | 
 | 149 |  | 
 | 150 | 	/* Not an IO address, so reenable interrupts */ | 
 | 151 | 	local_irq_enable(); | 
 | 152 |  | 
 | 153 | 	/* | 
 | 154 | 	 * If we're in an interrupt or have no user | 
 | 155 | 	 * context, we must not take the fault.. | 
 | 156 | 	 */ | 
| Peter Zijlstra | 6edaf68 | 2006-12-06 20:32:18 -0800 | [diff] [blame] | 157 | 	if (in_atomic() || !mm) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 158 | 		goto no_context; | 
 | 159 |  | 
 | 160 | 	/* TLB misses upon some cache flushes get done under cli() */ | 
 | 161 | 	down_read(&mm->mmap_sem); | 
 | 162 |  | 
 | 163 | 	vma = find_vma(mm, address); | 
 | 164 |  | 
 | 165 | 	if (!vma) { | 
 | 166 | #ifdef DEBUG_FAULT | 
 | 167 | 		print_task(tsk); | 
 | 168 | 		printk("%s:%d fault, address is 0x%08x PC %016Lx textaccess %d writeaccess %d\n", | 
 | 169 | 		       __FUNCTION__,__LINE__, | 
 | 170 | 		       address,regs->pc,textaccess,writeaccess); | 
 | 171 | 		show_regs(regs); | 
 | 172 | #endif | 
 | 173 | 		goto bad_area; | 
 | 174 | 	} | 
 | 175 | 	if (vma->vm_start <= address) { | 
 | 176 | 		goto good_area; | 
 | 177 | 	} | 
 | 178 |  | 
 | 179 | 	if (!(vma->vm_flags & VM_GROWSDOWN)) { | 
 | 180 | #ifdef DEBUG_FAULT | 
 | 181 | 		print_task(tsk); | 
 | 182 | 		printk("%s:%d fault, address is 0x%08x PC %016Lx textaccess %d writeaccess %d\n", | 
 | 183 | 		       __FUNCTION__,__LINE__, | 
 | 184 | 		       address,regs->pc,textaccess,writeaccess); | 
 | 185 | 		show_regs(regs); | 
 | 186 |  | 
 | 187 | 		print_vma(vma); | 
 | 188 | #endif | 
 | 189 | 		goto bad_area; | 
 | 190 | 	} | 
 | 191 | 	if (expand_stack(vma, address)) { | 
 | 192 | #ifdef DEBUG_FAULT | 
 | 193 | 		print_task(tsk); | 
 | 194 | 		printk("%s:%d fault, address is 0x%08x PC %016Lx textaccess %d writeaccess %d\n", | 
 | 195 | 		       __FUNCTION__,__LINE__, | 
 | 196 | 		       address,regs->pc,textaccess,writeaccess); | 
 | 197 | 		show_regs(regs); | 
 | 198 | #endif | 
 | 199 | 		goto bad_area; | 
 | 200 | 	} | 
 | 201 | /* | 
 | 202 |  * Ok, we have a good vm_area for this memory access, so | 
 | 203 |  * we can handle it.. | 
 | 204 |  */ | 
 | 205 | good_area: | 
 | 206 | 	if (textaccess) { | 
 | 207 | 		if (!(vma->vm_flags & VM_EXEC)) | 
 | 208 | 			goto bad_area; | 
 | 209 | 	} else { | 
 | 210 | 		if (writeaccess) { | 
 | 211 | 			if (!(vma->vm_flags & VM_WRITE)) | 
 | 212 | 				goto bad_area; | 
 | 213 | 		} else { | 
 | 214 | 			if (!(vma->vm_flags & VM_READ)) | 
 | 215 | 				goto bad_area; | 
 | 216 | 		} | 
 | 217 | 	} | 
 | 218 |  | 
 | 219 | 	/* | 
 | 220 | 	 * If for any reason at all we couldn't handle the fault, | 
 | 221 | 	 * make sure we exit gracefully rather than endlessly redo | 
 | 222 | 	 * the fault. | 
 | 223 | 	 */ | 
 | 224 | survive: | 
| Nick Piggin | 83c5407 | 2007-07-19 01:47:05 -0700 | [diff] [blame] | 225 | 	fault = handle_mm_fault(mm, vma, address, writeaccess); | 
 | 226 | 	if (unlikely(fault & VM_FAULT_ERROR)) { | 
 | 227 | 		if (fault & VM_FAULT_OOM) | 
 | 228 | 			goto out_of_memory; | 
 | 229 | 		else if (fault & VM_FAULT_SIGBUS) | 
 | 230 | 			goto do_sigbus; | 
 | 231 | 		BUG(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 232 | 	} | 
| Nick Piggin | 83c5407 | 2007-07-19 01:47:05 -0700 | [diff] [blame] | 233 | 	if (fault & VM_FAULT_MAJOR) | 
 | 234 | 		tsk->maj_flt++; | 
 | 235 | 	else | 
 | 236 | 		tsk->min_flt++; | 
 | 237 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 238 | 	/* If we get here, the page fault has been handled.  Do the TLB refill | 
 | 239 | 	   now from the newly-setup PTE, to avoid having to fault again right | 
 | 240 | 	   away on the same instruction. */ | 
 | 241 | 	pte = lookup_pte (mm, address); | 
 | 242 | 	if (!pte) { | 
 | 243 | 		/* From empirical evidence, we can get here, due to | 
 | 244 | 		   !pte_present(pte).  (e.g. if a swap-in occurs, and the page | 
 | 245 | 		   is swapped back out again before the process that wanted it | 
 | 246 | 		   gets rescheduled?) */ | 
 | 247 | 		goto no_pte; | 
 | 248 | 	} | 
 | 249 |  | 
 | 250 | 	__do_tlb_refill(address, textaccess, pte); | 
 | 251 |  | 
 | 252 | no_pte: | 
 | 253 |  | 
 | 254 | 	up_read(&mm->mmap_sem); | 
 | 255 | 	return; | 
 | 256 |  | 
 | 257 | /* | 
 | 258 |  * Something tried to access memory that isn't in our memory map.. | 
 | 259 |  * Fix it, but check if it's kernel or user first.. | 
 | 260 |  */ | 
 | 261 | bad_area: | 
 | 262 | #ifdef DEBUG_FAULT | 
 | 263 | 	printk("fault:bad area\n"); | 
 | 264 | #endif | 
 | 265 | 	up_read(&mm->mmap_sem); | 
 | 266 |  | 
 | 267 | 	if (user_mode(regs)) { | 
 | 268 | 		static int count=0; | 
 | 269 | 		siginfo_t info; | 
 | 270 | 		if (count < 4) { | 
 | 271 | 			/* This is really to help debug faults when starting | 
 | 272 | 			 * usermode, so only need a few */ | 
 | 273 | 			count++; | 
 | 274 | 			printk("user mode bad_area address=%08lx pid=%d (%s) pc=%08lx\n", | 
| Alexey Dobriyan | 19c5870 | 2007-10-18 23:40:41 -0700 | [diff] [blame] | 275 | 				address, task_pid_nr(current), current->comm, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 276 | 				(unsigned long) regs->pc); | 
 | 277 | #if 0 | 
 | 278 | 			show_regs(regs); | 
 | 279 | #endif | 
 | 280 | 		} | 
| Serge E. Hallyn | b460cbc | 2007-10-18 23:39:52 -0700 | [diff] [blame] | 281 | 		if (is_global_init(tsk)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 282 | 			panic("INIT had user mode bad_area\n"); | 
 | 283 | 		} | 
 | 284 | 		tsk->thread.address = address; | 
 | 285 | 		tsk->thread.error_code = writeaccess; | 
 | 286 | 		info.si_signo = SIGSEGV; | 
 | 287 | 		info.si_errno = 0; | 
 | 288 | 		info.si_addr = (void *) address; | 
 | 289 | 		force_sig_info(SIGSEGV, &info, tsk); | 
 | 290 | 		return; | 
 | 291 | 	} | 
 | 292 |  | 
 | 293 | no_context: | 
 | 294 | #ifdef DEBUG_FAULT | 
 | 295 | 	printk("fault:No context\n"); | 
 | 296 | #endif | 
 | 297 | 	/* Are we prepared to handle this kernel fault?  */ | 
 | 298 | 	fixup = search_exception_tables(regs->pc); | 
 | 299 | 	if (fixup) { | 
 | 300 | 		regs->pc = fixup->fixup; | 
 | 301 | 		return; | 
 | 302 | 	} | 
 | 303 |  | 
 | 304 | /* | 
 | 305 |  * Oops. The kernel tried to access some bad page. We'll have to | 
 | 306 |  * terminate things with extreme prejudice. | 
 | 307 |  * | 
 | 308 |  */ | 
 | 309 | 	if (address < PAGE_SIZE) | 
 | 310 | 		printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference"); | 
 | 311 | 	else | 
 | 312 | 		printk(KERN_ALERT "Unable to handle kernel paging request"); | 
 | 313 | 	printk(" at virtual address %08lx\n", address); | 
 | 314 | 	printk(KERN_ALERT "pc = %08Lx%08Lx\n", regs->pc >> 32, regs->pc & 0xffffffff); | 
 | 315 | 	die("Oops", regs, writeaccess); | 
 | 316 | 	do_exit(SIGKILL); | 
 | 317 |  | 
 | 318 | /* | 
 | 319 |  * We ran out of memory, or some other thing happened to us that made | 
 | 320 |  * us unable to handle the page fault gracefully. | 
 | 321 |  */ | 
 | 322 | out_of_memory: | 
| Serge E. Hallyn | b460cbc | 2007-10-18 23:39:52 -0700 | [diff] [blame] | 323 | 	if (is_global_init(current)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 324 | 		panic("INIT out of memory\n"); | 
 | 325 | 		yield(); | 
 | 326 | 		goto survive; | 
 | 327 | 	} | 
 | 328 | 	printk("fault:Out of memory\n"); | 
 | 329 | 	up_read(&mm->mmap_sem); | 
| Serge E. Hallyn | b460cbc | 2007-10-18 23:39:52 -0700 | [diff] [blame] | 330 | 	if (is_global_init(current)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 331 | 		yield(); | 
 | 332 | 		down_read(&mm->mmap_sem); | 
 | 333 | 		goto survive; | 
 | 334 | 	} | 
 | 335 | 	printk("VM: killing process %s\n", tsk->comm); | 
 | 336 | 	if (user_mode(regs)) | 
| Will Schmidt | dcca2bd | 2007-10-16 01:24:18 -0700 | [diff] [blame] | 337 | 		do_group_exit(SIGKILL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 338 | 	goto no_context; | 
 | 339 |  | 
 | 340 | do_sigbus: | 
 | 341 | 	printk("fault:Do sigbus\n"); | 
 | 342 | 	up_read(&mm->mmap_sem); | 
 | 343 |  | 
 | 344 | 	/* | 
 | 345 | 	 * Send a sigbus, regardless of whether we were in kernel | 
 | 346 | 	 * or user mode. | 
 | 347 | 	 */ | 
 | 348 | 	tsk->thread.address = address; | 
 | 349 | 	tsk->thread.error_code = writeaccess; | 
 | 350 | 	tsk->thread.trap_no = 14; | 
 | 351 | 	force_sig(SIGBUS, tsk); | 
 | 352 |  | 
 | 353 | 	/* Kernel mode? Handle exceptions or die */ | 
 | 354 | 	if (!user_mode(regs)) | 
 | 355 | 		goto no_context; | 
 | 356 | } | 
 | 357 |  | 
 | 358 |  | 
 | 359 | void flush_tlb_all(void); | 
 | 360 |  | 
 | 361 | void update_mmu_cache(struct vm_area_struct * vma, | 
 | 362 | 			unsigned long address, pte_t pte) | 
 | 363 | { | 
 | 364 | #if defined(CONFIG_SH64_PROC_TLB) | 
 | 365 | 	++calls_to_update_mmu_cache; | 
 | 366 | #endif | 
 | 367 |  | 
 | 368 | 	/* | 
 | 369 | 	 * This appears to get called once for every pte entry that gets | 
 | 370 | 	 * established => I don't think it's efficient to try refilling the | 
 | 371 | 	 * TLBs with the pages - some may not get accessed even.  Also, for | 
 | 372 | 	 * executable pages, it is impossible to determine reliably here which | 
 | 373 | 	 * TLB they should be mapped into (or both even). | 
 | 374 | 	 * | 
 | 375 | 	 * So, just do nothing here and handle faults on demand.  In the | 
 | 376 | 	 * TLBMISS handling case, the refill is now done anyway after the pte | 
 | 377 | 	 * has been fixed up, so that deals with most useful cases. | 
 | 378 | 	 */ | 
 | 379 | } | 
 | 380 |  | 
 | 381 | static void __flush_tlb_page(struct vm_area_struct *vma, unsigned long page) | 
 | 382 | { | 
 | 383 | 	unsigned long long match, pteh=0, lpage; | 
 | 384 | 	unsigned long tlb; | 
 | 385 | 	struct mm_struct *mm; | 
 | 386 |  | 
 | 387 | 	mm = vma->vm_mm; | 
 | 388 |  | 
 | 389 | 	if (mm->context == NO_CONTEXT) | 
 | 390 | 		return; | 
 | 391 |  | 
 | 392 | 	/* | 
 | 393 | 	 * Sign-extend based on neff. | 
 | 394 | 	 */ | 
 | 395 | 	lpage = (page & NEFF_SIGN) ? (page | NEFF_MASK) : page; | 
 | 396 | 	match = ((mm->context & MMU_CONTEXT_ASID_MASK) << PTEH_ASID_SHIFT) | PTEH_VALID; | 
 | 397 | 	match |= lpage; | 
 | 398 |  | 
 | 399 |         /* Do ITLB : don't bother for pages in non-exectutable VMAs */ | 
 | 400 | 	if (vma->vm_flags & VM_EXEC) { | 
 | 401 | 		for_each_itlb_entry(tlb) { | 
 | 402 | 			asm volatile ("getcfg	%1, 0, %0" | 
 | 403 | 				      : "=r" (pteh) | 
 | 404 | 				      : "r" (tlb) ); | 
 | 405 |  | 
 | 406 | 			if (pteh == match) { | 
 | 407 | 				__flush_tlb_slot(tlb); | 
 | 408 | 				break; | 
 | 409 | 			} | 
 | 410 |  | 
 | 411 | 		} | 
 | 412 | 	} | 
 | 413 |  | 
 | 414 |         /* Do DTLB : any page could potentially be in here. */ | 
 | 415 | 	for_each_dtlb_entry(tlb) { | 
 | 416 | 		asm volatile ("getcfg	%1, 0, %0" | 
 | 417 | 			      : "=r" (pteh) | 
 | 418 | 			      : "r" (tlb) ); | 
 | 419 |  | 
 | 420 | 		if (pteh == match) { | 
 | 421 | 			__flush_tlb_slot(tlb); | 
 | 422 | 			break; | 
 | 423 | 		} | 
 | 424 |  | 
 | 425 | 	} | 
 | 426 | } | 
 | 427 |  | 
 | 428 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) | 
 | 429 | { | 
 | 430 | 	unsigned long flags; | 
 | 431 |  | 
 | 432 | #if defined(CONFIG_SH64_PROC_TLB) | 
 | 433 |         ++calls_to_flush_tlb_page; | 
 | 434 | #endif | 
 | 435 |  | 
 | 436 | 	if (vma->vm_mm) { | 
 | 437 | 		page &= PAGE_MASK; | 
 | 438 | 		local_irq_save(flags); | 
 | 439 | 		__flush_tlb_page(vma, page); | 
 | 440 | 		local_irq_restore(flags); | 
 | 441 | 	} | 
 | 442 | } | 
 | 443 |  | 
 | 444 | void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | 
 | 445 | 		     unsigned long end) | 
 | 446 | { | 
 | 447 | 	unsigned long flags; | 
 | 448 | 	unsigned long long match, pteh=0, pteh_epn, pteh_low; | 
 | 449 | 	unsigned long tlb; | 
 | 450 | 	struct mm_struct *mm; | 
 | 451 |  | 
 | 452 | 	mm = vma->vm_mm; | 
 | 453 |  | 
 | 454 | #if defined(CONFIG_SH64_PROC_TLB) | 
 | 455 | 	++calls_to_flush_tlb_range; | 
 | 456 |  | 
 | 457 | 	{ | 
 | 458 | 		unsigned long size = (end - 1) - start; | 
 | 459 | 		size >>= 12; /* divide by PAGE_SIZE */ | 
 | 460 | 		size++; /* end=start+4096 => 1 page */ | 
 | 461 | 		switch (size) { | 
 | 462 | 		  case  1        : flush_tlb_range_1++;     break; | 
 | 463 | 		  case  2        : flush_tlb_range_2++;     break; | 
 | 464 | 		  case  3 ...  4 : flush_tlb_range_3_4++;   break; | 
 | 465 | 		  case  5 ...  7 : flush_tlb_range_5_7++;   break; | 
 | 466 | 		  case  8 ... 11 : flush_tlb_range_8_11++;  break; | 
 | 467 | 		  case 12 ... 15 : flush_tlb_range_12_15++; break; | 
 | 468 | 		  default        : flush_tlb_range_16_up++; break; | 
 | 469 | 		} | 
 | 470 | 	} | 
 | 471 | #endif | 
 | 472 |  | 
 | 473 | 	if (mm->context == NO_CONTEXT) | 
 | 474 | 		return; | 
 | 475 |  | 
 | 476 | 	local_irq_save(flags); | 
 | 477 |  | 
 | 478 | 	start &= PAGE_MASK; | 
 | 479 | 	end &= PAGE_MASK; | 
 | 480 |  | 
 | 481 | 	match = ((mm->context & MMU_CONTEXT_ASID_MASK) << PTEH_ASID_SHIFT) | PTEH_VALID; | 
 | 482 |  | 
 | 483 | 	/* Flush ITLB */ | 
 | 484 | 	for_each_itlb_entry(tlb) { | 
 | 485 | 		asm volatile ("getcfg	%1, 0, %0" | 
 | 486 | 			      : "=r" (pteh) | 
 | 487 | 			      : "r" (tlb) ); | 
 | 488 |  | 
 | 489 | 		pteh_epn = pteh & PAGE_MASK; | 
 | 490 | 		pteh_low = pteh & ~PAGE_MASK; | 
 | 491 |  | 
 | 492 | 		if (pteh_low == match && pteh_epn >= start && pteh_epn <= end) | 
 | 493 | 			__flush_tlb_slot(tlb); | 
 | 494 | 	} | 
 | 495 |  | 
 | 496 | 	/* Flush DTLB */ | 
 | 497 | 	for_each_dtlb_entry(tlb) { | 
 | 498 | 		asm volatile ("getcfg	%1, 0, %0" | 
 | 499 | 			      : "=r" (pteh) | 
 | 500 | 			      : "r" (tlb) ); | 
 | 501 |  | 
 | 502 | 		pteh_epn = pteh & PAGE_MASK; | 
 | 503 | 		pteh_low = pteh & ~PAGE_MASK; | 
 | 504 |  | 
 | 505 | 		if (pteh_low == match && pteh_epn >= start && pteh_epn <= end) | 
 | 506 | 			__flush_tlb_slot(tlb); | 
 | 507 | 	} | 
 | 508 |  | 
 | 509 | 	local_irq_restore(flags); | 
 | 510 | } | 
 | 511 |  | 
 | 512 | void flush_tlb_mm(struct mm_struct *mm) | 
 | 513 | { | 
 | 514 | 	unsigned long flags; | 
 | 515 |  | 
 | 516 | #if defined(CONFIG_SH64_PROC_TLB) | 
 | 517 | 	++calls_to_flush_tlb_mm; | 
 | 518 | #endif | 
 | 519 |  | 
 | 520 | 	if (mm->context == NO_CONTEXT) | 
 | 521 | 		return; | 
 | 522 |  | 
 | 523 | 	local_irq_save(flags); | 
 | 524 |  | 
 | 525 | 	mm->context=NO_CONTEXT; | 
 | 526 | 	if(mm==current->mm) | 
 | 527 | 		activate_context(mm); | 
 | 528 |  | 
 | 529 | 	local_irq_restore(flags); | 
 | 530 |  | 
 | 531 | } | 
 | 532 |  | 
 | 533 | void flush_tlb_all(void) | 
 | 534 | { | 
 | 535 | 	/* Invalidate all, including shared pages, excluding fixed TLBs */ | 
 | 536 |  | 
 | 537 | 	unsigned long flags, tlb; | 
 | 538 |  | 
 | 539 | #if defined(CONFIG_SH64_PROC_TLB) | 
 | 540 | 	++calls_to_flush_tlb_all; | 
 | 541 | #endif | 
 | 542 |  | 
 | 543 | 	local_irq_save(flags); | 
 | 544 |  | 
 | 545 | 	/* Flush each ITLB entry */ | 
 | 546 | 	for_each_itlb_entry(tlb) { | 
 | 547 | 		__flush_tlb_slot(tlb); | 
 | 548 | 	} | 
 | 549 |  | 
 | 550 | 	/* Flush each DTLB entry */ | 
 | 551 | 	for_each_dtlb_entry(tlb) { | 
 | 552 | 		__flush_tlb_slot(tlb); | 
 | 553 | 	} | 
 | 554 |  | 
 | 555 | 	local_irq_restore(flags); | 
 | 556 | } | 
 | 557 |  | 
 | 558 | void flush_tlb_kernel_range(unsigned long start, unsigned long end) | 
 | 559 | { | 
 | 560 |         /* FIXME: Optimize this later.. */ | 
 | 561 |         flush_tlb_all(); | 
 | 562 | } | 
 | 563 |  | 
 | 564 | #if defined(CONFIG_SH64_PROC_TLB) | 
 | 565 | /* Procfs interface to read the performance information */ | 
 | 566 |  | 
 | 567 | static int | 
 | 568 | tlb_proc_info(char *buf, char **start, off_t fpos, int length, int *eof, void *data) | 
 | 569 | { | 
 | 570 |   int len=0; | 
 | 571 |   len += sprintf(buf+len, "do_fast_page_fault   called %12lld times\n", calls_to_do_fast_page_fault); | 
 | 572 |   len += sprintf(buf+len, "do_slow_page_fault   called %12lld times\n", calls_to_do_slow_page_fault); | 
 | 573 |   len += sprintf(buf+len, "update_mmu_cache     called %12lld times\n", calls_to_update_mmu_cache); | 
 | 574 |   len += sprintf(buf+len, "flush_tlb_page       called %12lld times\n", calls_to_flush_tlb_page); | 
 | 575 |   len += sprintf(buf+len, "flush_tlb_range      called %12lld times\n", calls_to_flush_tlb_range); | 
 | 576 |   len += sprintf(buf+len, "flush_tlb_mm         called %12lld times\n", calls_to_flush_tlb_mm); | 
 | 577 |   len += sprintf(buf+len, "flush_tlb_all        called %12lld times\n", calls_to_flush_tlb_all); | 
 | 578 |   len += sprintf(buf+len, "flush_tlb_range_sizes\n" | 
 | 579 |                           " 1      : %12lld\n" | 
 | 580 |                           " 2      : %12lld\n" | 
 | 581 |                           " 3 -  4 : %12lld\n" | 
 | 582 |                           " 5 -  7 : %12lld\n" | 
 | 583 |                           " 8 - 11 : %12lld\n" | 
 | 584 |                           "12 - 15 : %12lld\n" | 
 | 585 |                           "16+     : %12lld\n", | 
 | 586 |                           flush_tlb_range_1, flush_tlb_range_2, flush_tlb_range_3_4, | 
 | 587 |                           flush_tlb_range_5_7, flush_tlb_range_8_11, flush_tlb_range_12_15, | 
 | 588 |                           flush_tlb_range_16_up); | 
 | 589 |   len += sprintf(buf+len, "page not present           %12lld times\n", page_not_present); | 
 | 590 |   *eof = 1; | 
 | 591 |   return len; | 
 | 592 | } | 
 | 593 |  | 
 | 594 | static int __init register_proc_tlb(void) | 
 | 595 | { | 
 | 596 |   create_proc_read_entry("tlb", 0, NULL, tlb_proc_info, NULL); | 
 | 597 |   return 0; | 
 | 598 | } | 
 | 599 |  | 
 | 600 | __initcall(register_proc_tlb); | 
 | 601 |  | 
 | 602 | #endif |