blob: 43bed2cb00e38c73c0c10a8e78e405477f7c6faa [file] [log] [blame]
Paul Mundt26ff6c12006-09-27 15:13:36 +09001/*
2 * Page fault handler for SH with an MMU.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 * Copyright (C) 1999 Niibe Yutaka
5 * Copyright (C) 2003 Paul Mundt
6 *
7 * Based on linux/arch/i386/mm/fault.c:
8 * Copyright (C) 1995 Linus Torvalds
Paul Mundt26ff6c12006-09-27 15:13:36 +09009 *
10 * This file is subject to the terms and conditions of the GNU General Public
11 * License. See the file "COPYING" in the main directory of this archive
12 * for more details.
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/mm.h>
Paul Mundt0f08f332006-09-27 17:03:56 +090016#include <linux/hardirq.h>
17#include <linux/kprobes.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <asm/system.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <asm/mmu_context.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include <asm/kgdb.h>
21
22extern void die(const char *,struct pt_regs *,long);
23
24/*
25 * This routine handles page faults. It determines the address,
26 * and the problem, and then passes it off to one of the appropriate
27 * routines.
28 */
Stuart Menefyb5a1bcb2006-11-21 13:34:04 +090029asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
30 unsigned long writeaccess,
31 unsigned long address)
Linus Torvalds1da177e2005-04-16 15:20:36 -070032{
33 struct task_struct *tsk;
34 struct mm_struct *mm;
35 struct vm_area_struct * vma;
36 unsigned long page;
Stuart Menefyb5a1bcb2006-11-21 13:34:04 +090037 int si_code;
38 siginfo_t info;
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
40#ifdef CONFIG_SH_KGDB
41 if (kgdb_nofault && kgdb_bus_err_hook)
42 kgdb_bus_err_hook();
43#endif
44
45 tsk = current;
46 mm = tsk->mm;
Stuart Menefyb5a1bcb2006-11-21 13:34:04 +090047 si_code = SEGV_MAPERR;
Linus Torvalds1da177e2005-04-16 15:20:36 -070048
49 /*
50 * If we're in an interrupt or have no user
51 * context, we must not take the fault..
52 */
53 if (in_atomic() || !mm)
54 goto no_context;
55
56 down_read(&mm->mmap_sem);
57
58 vma = find_vma(mm, address);
59 if (!vma)
60 goto bad_area;
61 if (vma->vm_start <= address)
62 goto good_area;
63 if (!(vma->vm_flags & VM_GROWSDOWN))
64 goto bad_area;
65 if (expand_stack(vma, address))
66 goto bad_area;
67/*
68 * Ok, we have a good vm_area for this memory access, so
69 * we can handle it..
70 */
71good_area:
Stuart Menefyb5a1bcb2006-11-21 13:34:04 +090072 si_code = SEGV_ACCERR;
Linus Torvalds1da177e2005-04-16 15:20:36 -070073 if (writeaccess) {
74 if (!(vma->vm_flags & VM_WRITE))
75 goto bad_area;
76 } else {
Jason Barondf67b3d2006-09-29 01:58:58 -070077 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
Linus Torvalds1da177e2005-04-16 15:20:36 -070078 goto bad_area;
79 }
80
81 /*
82 * If for any reason at all we couldn't handle the fault,
83 * make sure we exit gracefully rather than endlessly redo
84 * the fault.
85 */
86survive:
87 switch (handle_mm_fault(mm, vma, address, writeaccess)) {
88 case VM_FAULT_MINOR:
89 tsk->min_flt++;
90 break;
91 case VM_FAULT_MAJOR:
92 tsk->maj_flt++;
93 break;
94 case VM_FAULT_SIGBUS:
95 goto do_sigbus;
96 case VM_FAULT_OOM:
97 goto out_of_memory;
98 default:
99 BUG();
100 }
101
102 up_read(&mm->mmap_sem);
103 return;
104
105/*
106 * Something tried to access memory that isn't in our memory map..
107 * Fix it, but check if it's kernel or user first..
108 */
109bad_area:
110 up_read(&mm->mmap_sem);
111
112 if (user_mode(regs)) {
Stuart Menefyb5a1bcb2006-11-21 13:34:04 +0900113 info.si_signo = SIGSEGV;
114 info.si_errno = 0;
115 info.si_code = si_code;
116 info.si_addr = (void *) address;
117 force_sig_info(SIGSEGV, &info, tsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118 return;
119 }
120
121no_context:
122 /* Are we prepared to handle this kernel fault? */
123 if (fixup_exception(regs))
124 return;
125
126/*
127 * Oops. The kernel tried to access some bad page. We'll have to
128 * terminate things with extreme prejudice.
129 *
130 */
131 if (address < PAGE_SIZE)
132 printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
133 else
134 printk(KERN_ALERT "Unable to handle kernel paging request");
135 printk(" at virtual address %08lx\n", address);
136 printk(KERN_ALERT "pc = %08lx\n", regs->pc);
137 asm volatile("mov.l %1, %0"
138 : "=r" (page)
139 : "m" (__m(MMU_TTB)));
140 if (page) {
141 page = ((unsigned long *) page)[address >> 22];
142 printk(KERN_ALERT "*pde = %08lx\n", page);
143 if (page & _PAGE_PRESENT) {
144 page &= PAGE_MASK;
145 address &= 0x003ff000;
146 page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT];
147 printk(KERN_ALERT "*pte = %08lx\n", page);
148 }
149 }
150 die("Oops", regs, writeaccess);
151 do_exit(SIGKILL);
152
153/*
154 * We ran out of memory, or some other thing happened to us that made
155 * us unable to handle the page fault gracefully.
156 */
157out_of_memory:
158 up_read(&mm->mmap_sem);
Sukadev Bhattiproluf400e192006-09-29 02:00:07 -0700159 if (is_init(current)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160 yield();
161 down_read(&mm->mmap_sem);
162 goto survive;
163 }
164 printk("VM: killing process %s\n", tsk->comm);
165 if (user_mode(regs))
166 do_exit(SIGKILL);
167 goto no_context;
168
169do_sigbus:
170 up_read(&mm->mmap_sem);
171
172 /*
173 * Send a sigbus, regardless of whether we were in kernel
174 * or user mode.
175 */
Stuart Menefyb5a1bcb2006-11-21 13:34:04 +0900176 info.si_signo = SIGBUS;
177 info.si_errno = 0;
178 info.si_code = BUS_ADRERR;
179 info.si_addr = (void *)address;
180 force_sig_info(SIGBUS, &info, tsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181
182 /* Kernel mode? Handle exceptions or die */
183 if (!user_mode(regs))
184 goto no_context;
185}
186
Paul Mundt26ff6c12006-09-27 15:13:36 +0900187#ifdef CONFIG_SH_STORE_QUEUES
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188/*
Paul Mundt26ff6c12006-09-27 15:13:36 +0900189 * This is a special case for the SH-4 store queues, as pages for this
190 * space still need to be faulted in before it's possible to flush the
191 * store queue cache for writeout to the remapped region.
192 */
193#define P3_ADDR_MAX (P4SEG_STORE_QUE + 0x04000000)
194#else
195#define P3_ADDR_MAX P4SEG
196#endif
197
198/*
199 * Called with interrupts disabled.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200 */
Paul Mundt0f08f332006-09-27 17:03:56 +0900201asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs,
202 unsigned long writeaccess,
203 unsigned long address)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204{
Hugh Dickins60ec5582005-10-29 18:16:34 -0700205 pgd_t *pgd;
Paul Mundt26ff6c12006-09-27 15:13:36 +0900206 pud_t *pud;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207 pmd_t *pmd;
208 pte_t *pte;
209 pte_t entry;
Paul Mundt0f08f332006-09-27 17:03:56 +0900210 struct mm_struct *mm = current->mm;
Hugh Dickins60ec5582005-10-29 18:16:34 -0700211 spinlock_t *ptl;
212 int ret = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213
214#ifdef CONFIG_SH_KGDB
215 if (kgdb_nofault && kgdb_bus_err_hook)
216 kgdb_bus_err_hook();
217#endif
218
Paul Mundt26ff6c12006-09-27 15:13:36 +0900219 /*
220 * We don't take page faults for P1, P2, and parts of P4, these
221 * are always mapped, whether it be due to legacy behaviour in
222 * 29-bit mode, or due to PMB configuration in 32-bit mode.
223 */
Paul Mundtf647d332006-09-27 15:30:24 +0900224 if (address >= P3SEG && address < P3_ADDR_MAX) {
Hugh Dickins60ec5582005-10-29 18:16:34 -0700225 pgd = pgd_offset_k(address);
Paul Mundtf647d332006-09-27 15:30:24 +0900226 mm = NULL;
227 } else {
Paul Mundt0f08f332006-09-27 17:03:56 +0900228 if (unlikely(address >= TASK_SIZE || !mm))
Paul Mundt26ff6c12006-09-27 15:13:36 +0900229 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230
Paul Mundt0f08f332006-09-27 17:03:56 +0900231 pgd = pgd_offset(mm, address);
Paul Mundt26ff6c12006-09-27 15:13:36 +0900232 }
233
234 pud = pud_offset(pgd, address);
235 if (pud_none_or_clear_bad(pud))
236 return 1;
237 pmd = pmd_offset(pud, address);
Hugh Dickins60ec5582005-10-29 18:16:34 -0700238 if (pmd_none_or_clear_bad(pmd))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239 return 1;
Paul Mundt26ff6c12006-09-27 15:13:36 +0900240
Hugh Dickins60ec5582005-10-29 18:16:34 -0700241 if (mm)
242 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
243 else
244 pte = pte_offset_kernel(pmd, address);
245
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246 entry = *pte;
Paul Mundt26ff6c12006-09-27 15:13:36 +0900247 if (unlikely(pte_none(entry) || pte_not_present(entry)))
248 goto unlock;
249 if (unlikely(writeaccess && !pte_write(entry)))
Hugh Dickins60ec5582005-10-29 18:16:34 -0700250 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251
252 if (writeaccess)
253 entry = pte_mkdirty(entry);
254 entry = pte_mkyoung(entry);
255
256#ifdef CONFIG_CPU_SH4
257 /*
258 * ITLB is not affected by "ldtlb" instruction.
259 * So, we need to flush the entry by ourselves.
260 */
Paul Mundt26ff6c12006-09-27 15:13:36 +0900261 __flush_tlb_page(get_asid(), address & PAGE_MASK);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262#endif
263
264 set_pte(pte, entry);
265 update_mmu_cache(NULL, address, entry);
Hugh Dickins60ec5582005-10-29 18:16:34 -0700266 ret = 0;
267unlock:
268 if (mm)
269 pte_unmap_unlock(pte, ptl);
270 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271}