| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 1 | /* | 
 | 2 |  * This file contains the routines for TLB flushing. | 
 | 3 |  * On machines where the MMU uses a hash table to store virtual to | 
 | 4 |  * physical translations, these routines flush entries from the | 
 | 5 |  * hash table also. | 
 | 6 |  *  -- paulus | 
 | 7 |  * | 
 | 8 |  *  Derived from arch/ppc/mm/init.c: | 
 | 9 |  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | 
 | 10 |  * | 
 | 11 |  *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) | 
 | 12 |  *  and Cort Dougan (PReP) (cort@cs.nmt.edu) | 
 | 13 |  *    Copyright (C) 1996 Paul Mackerras | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 14 |  * | 
 | 15 |  *  Derived from "arch/i386/mm/init.c" | 
 | 16 |  *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds | 
 | 17 |  * | 
 | 18 |  *  This program is free software; you can redistribute it and/or | 
 | 19 |  *  modify it under the terms of the GNU General Public License | 
 | 20 |  *  as published by the Free Software Foundation; either version | 
 | 21 |  *  2 of the License, or (at your option) any later version. | 
 | 22 |  * | 
 | 23 |  */ | 
 | 24 |  | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 25 | #include <linux/kernel.h> | 
 | 26 | #include <linux/mm.h> | 
 | 27 | #include <linux/init.h> | 
 | 28 | #include <linux/highmem.h> | 
| Mariusz Kozlowski | 97d22d2 | 2007-07-21 04:37:44 -0700 | [diff] [blame] | 29 | #include <linux/pagemap.h> | 
 | 30 |  | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 31 | #include <asm/tlbflush.h> | 
 | 32 | #include <asm/tlb.h> | 
 | 33 |  | 
 | 34 | #include "mmu_decl.h" | 
 | 35 |  | 
 | 36 | /* | 
 | 37 |  * Called when unmapping pages to flush entries from the TLB/hash table. | 
 | 38 |  */ | 
 | 39 | void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, unsigned long addr) | 
 | 40 | { | 
 | 41 | 	unsigned long ptephys; | 
 | 42 |  | 
 | 43 | 	if (Hash != 0) { | 
 | 44 | 		ptephys = __pa(ptep) & PAGE_MASK; | 
| Paul Mackerras | 6218a76 | 2006-06-11 14:15:17 +1000 | [diff] [blame] | 45 | 		flush_hash_pages(mm->context.id, addr, ptephys, 1); | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 46 | 	} | 
 | 47 | } | 
| Becky Bruce | 4ee7084 | 2008-09-24 11:01:24 -0500 | [diff] [blame] | 48 | EXPORT_SYMBOL(flush_hash_entry); | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 49 |  | 
 | 50 | /* | 
 | 51 |  * Called by ptep_set_access_flags, must flush on CPUs for which the | 
 | 52 |  * DSI handler can't just "fixup" the TLB on a write fault | 
 | 53 |  */ | 
 | 54 | void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr) | 
 | 55 | { | 
 | 56 | 	if (Hash != 0) | 
 | 57 | 		return; | 
 | 58 | 	_tlbie(addr); | 
 | 59 | } | 
 | 60 |  | 
 | 61 | /* | 
 | 62 |  * Called at the end of a mmu_gather operation to make sure the | 
 | 63 |  * TLB flush is completely done. | 
 | 64 |  */ | 
 | 65 | void tlb_flush(struct mmu_gather *tlb) | 
 | 66 | { | 
 | 67 | 	if (Hash == 0) { | 
 | 68 | 		/* | 
 | 69 | 		 * 603 needs to flush the whole TLB here since | 
 | 70 | 		 * it doesn't use a hash table. | 
 | 71 | 		 */ | 
 | 72 | 		_tlbia(); | 
 | 73 | 	} | 
 | 74 | } | 
 | 75 |  | 
 | 76 | /* | 
 | 77 |  * TLB flushing: | 
 | 78 |  * | 
 | 79 |  *  - flush_tlb_mm(mm) flushes the specified mm context TLB's | 
 | 80 |  *  - flush_tlb_page(vma, vmaddr) flushes one page | 
 | 81 |  *  - flush_tlb_range(vma, start, end) flushes a range of pages | 
 | 82 |  *  - flush_tlb_kernel_range(start, end) flushes kernel pages | 
 | 83 |  * | 
 | 84 |  * since the hardware hash table functions as an extension of the | 
 | 85 |  * tlb as far as the linux tables are concerned, flush it too. | 
 | 86 |  *    -- Cort | 
 | 87 |  */ | 
 | 88 |  | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 89 | static void flush_range(struct mm_struct *mm, unsigned long start, | 
 | 90 | 			unsigned long end) | 
 | 91 | { | 
 | 92 | 	pmd_t *pmd; | 
 | 93 | 	unsigned long pmd_end; | 
 | 94 | 	int count; | 
| Paul Mackerras | 6218a76 | 2006-06-11 14:15:17 +1000 | [diff] [blame] | 95 | 	unsigned int ctx = mm->context.id; | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 96 |  | 
 | 97 | 	if (Hash == 0) { | 
 | 98 | 		_tlbia(); | 
 | 99 | 		return; | 
 | 100 | 	} | 
 | 101 | 	start &= PAGE_MASK; | 
 | 102 | 	if (start >= end) | 
 | 103 | 		return; | 
 | 104 | 	end = (end - 1) | ~PAGE_MASK; | 
| David Gibson | f1a1eb2 | 2007-05-09 15:20:37 +1000 | [diff] [blame] | 105 | 	pmd = pmd_offset(pud_offset(pgd_offset(mm, start), start), start); | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 106 | 	for (;;) { | 
 | 107 | 		pmd_end = ((start + PGDIR_SIZE) & PGDIR_MASK) - 1; | 
 | 108 | 		if (pmd_end > end) | 
 | 109 | 			pmd_end = end; | 
 | 110 | 		if (!pmd_none(*pmd)) { | 
 | 111 | 			count = ((pmd_end - start) >> PAGE_SHIFT) + 1; | 
 | 112 | 			flush_hash_pages(ctx, start, pmd_val(*pmd), count); | 
 | 113 | 		} | 
 | 114 | 		if (pmd_end == end) | 
 | 115 | 			break; | 
 | 116 | 		start = pmd_end + 1; | 
 | 117 | 		++pmd; | 
 | 118 | 	} | 
 | 119 | } | 
 | 120 |  | 
 | 121 | /* | 
 | 122 |  * Flush kernel TLB entries in the given range | 
 | 123 |  */ | 
 | 124 | void flush_tlb_kernel_range(unsigned long start, unsigned long end) | 
 | 125 | { | 
 | 126 | 	flush_range(&init_mm, start, end); | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 127 | } | 
| Benjamin Herrenschmidt | f048aac | 2008-12-18 19:13:38 +0000 | [diff] [blame] | 128 | EXPORT_SYMBOL(flush_tlb_kernel_range); | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 129 |  | 
 | 130 | /* | 
 | 131 |  * Flush all the (user) entries for the address space described by mm. | 
 | 132 |  */ | 
 | 133 | void flush_tlb_mm(struct mm_struct *mm) | 
 | 134 | { | 
 | 135 | 	struct vm_area_struct *mp; | 
 | 136 |  | 
 | 137 | 	if (Hash == 0) { | 
 | 138 | 		_tlbia(); | 
 | 139 | 		return; | 
 | 140 | 	} | 
 | 141 |  | 
| Hugh Dickins | 01edcd8 | 2005-11-23 13:37:39 -0800 | [diff] [blame] | 142 | 	/* | 
 | 143 | 	 * It is safe to go down the mm's list of vmas when called | 
 | 144 | 	 * from dup_mmap, holding mmap_sem.  It would also be safe from | 
 | 145 | 	 * unmap_region or exit_mmap, but not from vmtruncate on SMP - | 
 | 146 | 	 * but it seems dup_mmap is the only SMP case which gets here. | 
 | 147 | 	 */ | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 148 | 	for (mp = mm->mmap; mp != NULL; mp = mp->vm_next) | 
 | 149 | 		flush_range(mp->vm_mm, mp->vm_start, mp->vm_end); | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 150 | } | 
| Benjamin Herrenschmidt | f048aac | 2008-12-18 19:13:38 +0000 | [diff] [blame] | 151 | EXPORT_SYMBOL(flush_tlb_mm); | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 152 |  | 
 | 153 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) | 
 | 154 | { | 
 | 155 | 	struct mm_struct *mm; | 
 | 156 | 	pmd_t *pmd; | 
 | 157 |  | 
 | 158 | 	if (Hash == 0) { | 
 | 159 | 		_tlbie(vmaddr); | 
 | 160 | 		return; | 
 | 161 | 	} | 
 | 162 | 	mm = (vmaddr < TASK_SIZE)? vma->vm_mm: &init_mm; | 
| David Gibson | f1a1eb2 | 2007-05-09 15:20:37 +1000 | [diff] [blame] | 163 | 	pmd = pmd_offset(pud_offset(pgd_offset(mm, vmaddr), vmaddr), vmaddr); | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 164 | 	if (!pmd_none(*pmd)) | 
| Paul Mackerras | 6218a76 | 2006-06-11 14:15:17 +1000 | [diff] [blame] | 165 | 		flush_hash_pages(mm->context.id, vmaddr, pmd_val(*pmd), 1); | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 166 | } | 
| Benjamin Herrenschmidt | f048aac | 2008-12-18 19:13:38 +0000 | [diff] [blame] | 167 | EXPORT_SYMBOL(flush_tlb_page); | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 168 |  | 
 | 169 | /* | 
 | 170 |  * For each address in the range, find the pte for the address | 
 | 171 |  * and check _PAGE_HASHPTE bit; if it is set, find and destroy | 
 | 172 |  * the corresponding HPTE. | 
 | 173 |  */ | 
 | 174 | void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | 
 | 175 | 		     unsigned long end) | 
 | 176 | { | 
 | 177 | 	flush_range(vma->vm_mm, start, end); | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 178 | } | 
| Benjamin Herrenschmidt | f048aac | 2008-12-18 19:13:38 +0000 | [diff] [blame] | 179 | EXPORT_SYMBOL(flush_tlb_range); |