| Paul Mundt | 26ff6c1 | 2006-09-27 15:13:36 +0900 | [diff] [blame] | 1 | /* | 
|  | 2 | * Page fault handler for SH with an MMU. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 | * | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | *  Copyright (C) 1999  Niibe Yutaka | 
| Paul Mundt | 037c10a | 2008-09-08 12:22:47 +0900 | [diff] [blame] | 5 | *  Copyright (C) 2003 - 2008  Paul Mundt | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | * | 
|  | 7 | *  Based on linux/arch/i386/mm/fault.c: | 
|  | 8 | *   Copyright (C) 1995  Linus Torvalds | 
| Paul Mundt | 26ff6c1 | 2006-09-27 15:13:36 +0900 | [diff] [blame] | 9 | * | 
|  | 10 | * This file is subject to the terms and conditions of the GNU General Public | 
|  | 11 | * License.  See the file "COPYING" in the main directory of this archive | 
|  | 12 | * for more details. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | #include <linux/kernel.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | #include <linux/mm.h> | 
| Paul Mundt | 0f08f33 | 2006-09-27 17:03:56 +0900 | [diff] [blame] | 16 | #include <linux/hardirq.h> | 
|  | 17 | #include <linux/kprobes.h> | 
| Paul Mundt | 3d58695 | 2008-09-21 13:56:39 +0900 | [diff] [blame] | 18 | #include <linux/marker.h> | 
| Magnus Damm | e7cc9a7 | 2008-02-07 20:18:21 +0900 | [diff] [blame] | 19 | #include <asm/io_trapped.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | #include <asm/system.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | #include <asm/mmu_context.h> | 
| Paul Mundt | db2e1fa | 2007-02-14 14:13:10 +0900 | [diff] [blame] | 22 | #include <asm/tlbflush.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 24 | /* | 
|  | 25 | * This routine handles page faults.  It determines the address, | 
|  | 26 | * and the problem, and then passes it off to one of the appropriate | 
|  | 27 | * routines. | 
|  | 28 | */ | 
| Stuart Menefy | b5a1bcb | 2006-11-21 13:34:04 +0900 | [diff] [blame] | 29 | asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, | 
|  | 30 | unsigned long writeaccess, | 
|  | 31 | unsigned long address) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 32 | { | 
|  | 33 | struct task_struct *tsk; | 
|  | 34 | struct mm_struct *mm; | 
|  | 35 | struct vm_area_struct * vma; | 
| Stuart Menefy | b5a1bcb | 2006-11-21 13:34:04 +0900 | [diff] [blame] | 36 | int si_code; | 
| Nick Piggin | 83c5407 | 2007-07-19 01:47:05 -0700 | [diff] [blame] | 37 | int fault; | 
| Stuart Menefy | b5a1bcb | 2006-11-21 13:34:04 +0900 | [diff] [blame] | 38 | siginfo_t info; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 |  | 
| Paul Mundt | 8f2baee | 2008-09-21 12:11:25 +0900 | [diff] [blame] | 40 | /* | 
|  | 41 | * We don't bother with any notifier callbacks here, as they are | 
|  | 42 | * all handled through the __do_page_fault() fast-path. | 
|  | 43 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 |  | 
|  | 45 | tsk = current; | 
| Stuart Menefy | b5a1bcb | 2006-11-21 13:34:04 +0900 | [diff] [blame] | 46 | si_code = SEGV_MAPERR; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 47 |  | 
| Stuart Menefy | 99a596f | 2006-11-21 15:38:05 +0900 | [diff] [blame] | 48 | if (unlikely(address >= TASK_SIZE)) { | 
|  | 49 | /* | 
|  | 50 | * Synchronize this task's top level page-table | 
|  | 51 | * with the 'reference' page table. | 
|  | 52 | * | 
|  | 53 | * Do _not_ use "tsk" here. We might be inside | 
|  | 54 | * an interrupt in the middle of a task switch.. | 
|  | 55 | */ | 
|  | 56 | int offset = pgd_index(address); | 
|  | 57 | pgd_t *pgd, *pgd_k; | 
|  | 58 | pud_t *pud, *pud_k; | 
|  | 59 | pmd_t *pmd, *pmd_k; | 
|  | 60 |  | 
|  | 61 | pgd = get_TTB() + offset; | 
|  | 62 | pgd_k = swapper_pg_dir + offset; | 
|  | 63 |  | 
| Stuart Menefy | 99a596f | 2006-11-21 15:38:05 +0900 | [diff] [blame] | 64 | if (!pgd_present(*pgd)) { | 
|  | 65 | if (!pgd_present(*pgd_k)) | 
|  | 66 | goto bad_area_nosemaphore; | 
|  | 67 | set_pgd(pgd, *pgd_k); | 
|  | 68 | return; | 
|  | 69 | } | 
|  | 70 |  | 
|  | 71 | pud = pud_offset(pgd, address); | 
|  | 72 | pud_k = pud_offset(pgd_k, address); | 
| Stuart Menefy | 96e14e5 | 2008-09-05 16:17:15 +0900 | [diff] [blame] | 73 |  | 
|  | 74 | if (!pud_present(*pud)) { | 
|  | 75 | if (!pud_present(*pud_k)) | 
|  | 76 | goto bad_area_nosemaphore; | 
|  | 77 | set_pud(pud, *pud_k); | 
|  | 78 | return; | 
|  | 79 | } | 
| Stuart Menefy | 99a596f | 2006-11-21 15:38:05 +0900 | [diff] [blame] | 80 |  | 
|  | 81 | pmd = pmd_offset(pud, address); | 
|  | 82 | pmd_k = pmd_offset(pud_k, address); | 
|  | 83 | if (pmd_present(*pmd) || !pmd_present(*pmd_k)) | 
|  | 84 | goto bad_area_nosemaphore; | 
|  | 85 | set_pmd(pmd, *pmd_k); | 
|  | 86 |  | 
|  | 87 | return; | 
|  | 88 | } | 
|  | 89 |  | 
| Stuart Menefy | f2fb4e4 | 2008-07-02 17:51:23 +0900 | [diff] [blame] | 90 | /* Only enable interrupts if they were on before the fault */ | 
|  | 91 | if ((regs->sr & SR_IMASK) != SR_IMASK) { | 
|  | 92 | trace_hardirqs_on(); | 
|  | 93 | local_irq_enable(); | 
|  | 94 | } | 
|  | 95 |  | 
|  | 96 | mm = tsk->mm; | 
|  | 97 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 98 | /* | 
|  | 99 | * If we're in an interrupt or have no user | 
|  | 100 | * context, we must not take the fault.. | 
|  | 101 | */ | 
|  | 102 | if (in_atomic() || !mm) | 
|  | 103 | goto no_context; | 
|  | 104 |  | 
|  | 105 | down_read(&mm->mmap_sem); | 
|  | 106 |  | 
|  | 107 | vma = find_vma(mm, address); | 
|  | 108 | if (!vma) | 
|  | 109 | goto bad_area; | 
|  | 110 | if (vma->vm_start <= address) | 
|  | 111 | goto good_area; | 
|  | 112 | if (!(vma->vm_flags & VM_GROWSDOWN)) | 
|  | 113 | goto bad_area; | 
|  | 114 | if (expand_stack(vma, address)) | 
|  | 115 | goto bad_area; | 
|  | 116 | /* | 
|  | 117 | * Ok, we have a good vm_area for this memory access, so | 
|  | 118 | * we can handle it.. | 
|  | 119 | */ | 
|  | 120 | good_area: | 
| Stuart Menefy | b5a1bcb | 2006-11-21 13:34:04 +0900 | [diff] [blame] | 121 | si_code = SEGV_ACCERR; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 122 | if (writeaccess) { | 
|  | 123 | if (!(vma->vm_flags & VM_WRITE)) | 
|  | 124 | goto bad_area; | 
|  | 125 | } else { | 
| Jason Baron | df67b3d | 2006-09-29 01:58:58 -0700 | [diff] [blame] | 126 | if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 127 | goto bad_area; | 
|  | 128 | } | 
|  | 129 |  | 
|  | 130 | /* | 
|  | 131 | * If for any reason at all we couldn't handle the fault, | 
|  | 132 | * make sure we exit gracefully rather than endlessly redo | 
|  | 133 | * the fault. | 
|  | 134 | */ | 
|  | 135 | survive: | 
| Nick Piggin | 83c5407 | 2007-07-19 01:47:05 -0700 | [diff] [blame] | 136 | fault = handle_mm_fault(mm, vma, address, writeaccess); | 
|  | 137 | if (unlikely(fault & VM_FAULT_ERROR)) { | 
|  | 138 | if (fault & VM_FAULT_OOM) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 139 | goto out_of_memory; | 
| Nick Piggin | 83c5407 | 2007-07-19 01:47:05 -0700 | [diff] [blame] | 140 | else if (fault & VM_FAULT_SIGBUS) | 
|  | 141 | goto do_sigbus; | 
|  | 142 | BUG(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 143 | } | 
| Nick Piggin | 83c5407 | 2007-07-19 01:47:05 -0700 | [diff] [blame] | 144 | if (fault & VM_FAULT_MAJOR) | 
|  | 145 | tsk->maj_flt++; | 
|  | 146 | else | 
|  | 147 | tsk->min_flt++; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 148 |  | 
|  | 149 | up_read(&mm->mmap_sem); | 
|  | 150 | return; | 
|  | 151 |  | 
|  | 152 | /* | 
|  | 153 | * Something tried to access memory that isn't in our memory map.. | 
|  | 154 | * Fix it, but check if it's kernel or user first.. | 
|  | 155 | */ | 
|  | 156 | bad_area: | 
|  | 157 | up_read(&mm->mmap_sem); | 
|  | 158 |  | 
| Stuart Menefy | 99a596f | 2006-11-21 15:38:05 +0900 | [diff] [blame] | 159 | bad_area_nosemaphore: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 160 | if (user_mode(regs)) { | 
| Stuart Menefy | b5a1bcb | 2006-11-21 13:34:04 +0900 | [diff] [blame] | 161 | info.si_signo = SIGSEGV; | 
|  | 162 | info.si_errno = 0; | 
|  | 163 | info.si_code = si_code; | 
|  | 164 | info.si_addr = (void *) address; | 
|  | 165 | force_sig_info(SIGSEGV, &info, tsk); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 166 | return; | 
|  | 167 | } | 
|  | 168 |  | 
|  | 169 | no_context: | 
|  | 170 | /* Are we prepared to handle this kernel fault?  */ | 
|  | 171 | if (fixup_exception(regs)) | 
|  | 172 | return; | 
|  | 173 |  | 
| Magnus Damm | e7cc9a7 | 2008-02-07 20:18:21 +0900 | [diff] [blame] | 174 | if (handle_trapped_io(regs, address)) | 
|  | 175 | return; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 176 | /* | 
|  | 177 | * Oops. The kernel tried to access some bad page. We'll have to | 
|  | 178 | * terminate things with extreme prejudice. | 
|  | 179 | * | 
|  | 180 | */ | 
| Paul Mundt | 0630e45 | 2007-06-18 19:02:47 +0900 | [diff] [blame] | 181 |  | 
|  | 182 | bust_spinlocks(1); | 
|  | 183 |  | 
|  | 184 | if (oops_may_print()) { | 
| Paul Mundt | b62ad83 | 2008-01-10 14:07:03 +0900 | [diff] [blame] | 185 | unsigned long page; | 
| Paul Mundt | 0630e45 | 2007-06-18 19:02:47 +0900 | [diff] [blame] | 186 |  | 
|  | 187 | if (address < PAGE_SIZE) | 
|  | 188 | printk(KERN_ALERT "Unable to handle kernel NULL " | 
|  | 189 | "pointer dereference"); | 
|  | 190 | else | 
|  | 191 | printk(KERN_ALERT "Unable to handle kernel paging " | 
|  | 192 | "request"); | 
|  | 193 | printk(" at virtual address %08lx\n", address); | 
|  | 194 | printk(KERN_ALERT "pc = %08lx\n", regs->pc); | 
|  | 195 | page = (unsigned long)get_TTB(); | 
|  | 196 | if (page) { | 
| Paul Mundt | 06f862c | 2007-08-01 16:39:51 +0900 | [diff] [blame] | 197 | page = ((__typeof__(page) *)page)[address >> PGDIR_SHIFT]; | 
| Paul Mundt | 0630e45 | 2007-06-18 19:02:47 +0900 | [diff] [blame] | 198 | printk(KERN_ALERT "*pde = %08lx\n", page); | 
|  | 199 | if (page & _PAGE_PRESENT) { | 
|  | 200 | page &= PAGE_MASK; | 
|  | 201 | address &= 0x003ff000; | 
|  | 202 | page = ((__typeof__(page) *) | 
|  | 203 | __va(page))[address >> | 
|  | 204 | PAGE_SHIFT]; | 
|  | 205 | printk(KERN_ALERT "*pte = %08lx\n", page); | 
|  | 206 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 207 | } | 
|  | 208 | } | 
| Paul Mundt | 0630e45 | 2007-06-18 19:02:47 +0900 | [diff] [blame] | 209 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 210 | die("Oops", regs, writeaccess); | 
| Paul Mundt | 0630e45 | 2007-06-18 19:02:47 +0900 | [diff] [blame] | 211 | bust_spinlocks(0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 212 | do_exit(SIGKILL); | 
|  | 213 |  | 
|  | 214 | /* | 
|  | 215 | * We ran out of memory, or some other thing happened to us that made | 
|  | 216 | * us unable to handle the page fault gracefully. | 
|  | 217 | */ | 
|  | 218 | out_of_memory: | 
|  | 219 | up_read(&mm->mmap_sem); | 
| Serge E. Hallyn | b460cbc | 2007-10-18 23:39:52 -0700 | [diff] [blame] | 220 | if (is_global_init(current)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 221 | yield(); | 
|  | 222 | down_read(&mm->mmap_sem); | 
|  | 223 | goto survive; | 
|  | 224 | } | 
|  | 225 | printk("VM: killing process %s\n", tsk->comm); | 
|  | 226 | if (user_mode(regs)) | 
| Will Schmidt | dcca2bd | 2007-10-16 01:24:18 -0700 | [diff] [blame] | 227 | do_group_exit(SIGKILL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 228 | goto no_context; | 
|  | 229 |  | 
|  | 230 | do_sigbus: | 
|  | 231 | up_read(&mm->mmap_sem); | 
|  | 232 |  | 
|  | 233 | /* | 
|  | 234 | * Send a sigbus, regardless of whether we were in kernel | 
|  | 235 | * or user mode. | 
|  | 236 | */ | 
| Stuart Menefy | b5a1bcb | 2006-11-21 13:34:04 +0900 | [diff] [blame] | 237 | info.si_signo = SIGBUS; | 
|  | 238 | info.si_errno = 0; | 
|  | 239 | info.si_code = BUS_ADRERR; | 
|  | 240 | info.si_addr = (void *)address; | 
|  | 241 | force_sig_info(SIGBUS, &info, tsk); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 242 |  | 
|  | 243 | /* Kernel mode? Handle exceptions or die */ | 
|  | 244 | if (!user_mode(regs)) | 
|  | 245 | goto no_context; | 
|  | 246 | } | 
| Paul Mundt | db2e1fa | 2007-02-14 14:13:10 +0900 | [diff] [blame] | 247 |  | 
| Paul Mundt | 3d58695 | 2008-09-21 13:56:39 +0900 | [diff] [blame] | 248 | static inline int notify_page_fault(struct pt_regs *regs, int trap) | 
|  | 249 | { | 
|  | 250 | int ret = 0; | 
|  | 251 |  | 
| Paul Mundt | 3d58695 | 2008-09-21 13:56:39 +0900 | [diff] [blame] | 252 | #ifdef CONFIG_KPROBES | 
|  | 253 | if (!user_mode(regs)) { | 
|  | 254 | preempt_disable(); | 
|  | 255 | if (kprobe_running() && kprobe_fault_handler(regs, trap)) | 
|  | 256 | ret = 1; | 
|  | 257 | preempt_enable(); | 
|  | 258 | } | 
|  | 259 | #endif | 
|  | 260 |  | 
|  | 261 | return ret; | 
|  | 262 | } | 
|  | 263 |  | 
| Paul Mundt | db2e1fa | 2007-02-14 14:13:10 +0900 | [diff] [blame] | 264 | /* | 
|  | 265 | * Called with interrupts disabled. | 
|  | 266 | */ | 
|  | 267 | asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs, | 
|  | 268 | unsigned long writeaccess, | 
|  | 269 | unsigned long address) | 
|  | 270 | { | 
|  | 271 | pgd_t *pgd; | 
|  | 272 | pud_t *pud; | 
|  | 273 | pmd_t *pmd; | 
|  | 274 | pte_t *pte; | 
|  | 275 | pte_t entry; | 
| Paul Mundt | 3d58695 | 2008-09-21 13:56:39 +0900 | [diff] [blame] | 276 | int ret = 0; | 
| Paul Mundt | db2e1fa | 2007-02-14 14:13:10 +0900 | [diff] [blame] | 277 |  | 
| Paul Mundt | 887f1ae | 2008-09-21 12:06:43 +0900 | [diff] [blame] | 278 | if (notify_page_fault(regs, lookup_exception_vector())) | 
| Paul Mundt | 3d58695 | 2008-09-21 13:56:39 +0900 | [diff] [blame] | 279 | goto out; | 
| Paul Mundt | 037c10a | 2008-09-08 12:22:47 +0900 | [diff] [blame] | 280 |  | 
| Paul Mundt | 3d58695 | 2008-09-21 13:56:39 +0900 | [diff] [blame] | 281 | ret = 1; | 
|  | 282 |  | 
| Paul Mundt | db2e1fa | 2007-02-14 14:13:10 +0900 | [diff] [blame] | 283 | /* | 
|  | 284 | * We don't take page faults for P1, P2, and parts of P4, these | 
|  | 285 | * are always mapped, whether it be due to legacy behaviour in | 
|  | 286 | * 29-bit mode, or due to PMB configuration in 32-bit mode. | 
|  | 287 | */ | 
|  | 288 | if (address >= P3SEG && address < P3_ADDR_MAX) { | 
|  | 289 | pgd = pgd_offset_k(address); | 
| Paul Mundt | db2e1fa | 2007-02-14 14:13:10 +0900 | [diff] [blame] | 290 | } else { | 
| Paul Mundt | 0f1a394 | 2007-11-19 13:05:18 +0900 | [diff] [blame] | 291 | if (unlikely(address >= TASK_SIZE || !current->mm)) | 
| Paul Mundt | 3d58695 | 2008-09-21 13:56:39 +0900 | [diff] [blame] | 292 | goto out; | 
| Paul Mundt | db2e1fa | 2007-02-14 14:13:10 +0900 | [diff] [blame] | 293 |  | 
| Paul Mundt | 0f1a394 | 2007-11-19 13:05:18 +0900 | [diff] [blame] | 294 | pgd = pgd_offset(current->mm, address); | 
| Paul Mundt | db2e1fa | 2007-02-14 14:13:10 +0900 | [diff] [blame] | 295 | } | 
|  | 296 |  | 
|  | 297 | pud = pud_offset(pgd, address); | 
|  | 298 | if (pud_none_or_clear_bad(pud)) | 
| Paul Mundt | 3d58695 | 2008-09-21 13:56:39 +0900 | [diff] [blame] | 299 | goto out; | 
| Paul Mundt | db2e1fa | 2007-02-14 14:13:10 +0900 | [diff] [blame] | 300 | pmd = pmd_offset(pud, address); | 
|  | 301 | if (pmd_none_or_clear_bad(pmd)) | 
| Paul Mundt | 3d58695 | 2008-09-21 13:56:39 +0900 | [diff] [blame] | 302 | goto out; | 
| Paul Mundt | 0f1a394 | 2007-11-19 13:05:18 +0900 | [diff] [blame] | 303 | pte = pte_offset_kernel(pmd, address); | 
| Paul Mundt | db2e1fa | 2007-02-14 14:13:10 +0900 | [diff] [blame] | 304 | entry = *pte; | 
|  | 305 | if (unlikely(pte_none(entry) || pte_not_present(entry))) | 
| Paul Mundt | 3d58695 | 2008-09-21 13:56:39 +0900 | [diff] [blame] | 306 | goto out; | 
| Paul Mundt | db2e1fa | 2007-02-14 14:13:10 +0900 | [diff] [blame] | 307 | if (unlikely(writeaccess && !pte_write(entry))) | 
| Paul Mundt | 3d58695 | 2008-09-21 13:56:39 +0900 | [diff] [blame] | 308 | goto out; | 
| Paul Mundt | db2e1fa | 2007-02-14 14:13:10 +0900 | [diff] [blame] | 309 |  | 
|  | 310 | if (writeaccess) | 
|  | 311 | entry = pte_mkdirty(entry); | 
|  | 312 | entry = pte_mkyoung(entry); | 
|  | 313 |  | 
| Hideo Saito | a602cc0 | 2008-02-14 14:45:08 +0900 | [diff] [blame] | 314 | #if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SMP) | 
|  | 315 | /* | 
|  | 316 | * ITLB is not affected by "ldtlb" instruction. | 
|  | 317 | * So, we need to flush the entry by ourselves. | 
|  | 318 | */ | 
|  | 319 | local_flush_tlb_one(get_asid(), address & PAGE_MASK); | 
|  | 320 | #endif | 
|  | 321 |  | 
| Paul Mundt | db2e1fa | 2007-02-14 14:13:10 +0900 | [diff] [blame] | 322 | set_pte(pte, entry); | 
|  | 323 | update_mmu_cache(NULL, address, entry); | 
| Paul Mundt | 0f1a394 | 2007-11-19 13:05:18 +0900 | [diff] [blame] | 324 |  | 
| Paul Mundt | 3d58695 | 2008-09-21 13:56:39 +0900 | [diff] [blame] | 325 | ret = 0; | 
|  | 326 | out: | 
| Paul Mundt | 3d58695 | 2008-09-21 13:56:39 +0900 | [diff] [blame] | 327 | return ret; | 
| Paul Mundt | db2e1fa | 2007-02-14 14:13:10 +0900 | [diff] [blame] | 328 | } |