blob: 7a03ffe6daddd6ce18d3942fc5a9f5aed5cb287e [file] [log] [blame]
Paul Mundt26ff6c12006-09-27 15:13:36 +09001/*
2 * Page fault handler for SH with an MMU.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 * Copyright (C) 1999 Niibe Yutaka
5 * Copyright (C) 2003 Paul Mundt
6 *
7 * Based on linux/arch/i386/mm/fault.c:
8 * Copyright (C) 1995 Linus Torvalds
Paul Mundt26ff6c12006-09-27 15:13:36 +09009 *
10 * This file is subject to the terms and conditions of the GNU General Public
11 * License. See the file "COPYING" in the main directory of this archive
12 * for more details.
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/mm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <asm/system.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <asm/mmu_context.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <asm/kgdb.h>
19
20extern void die(const char *,struct pt_regs *,long);
21
22/*
23 * This routine handles page faults. It determines the address,
24 * and the problem, and then passes it off to one of the appropriate
25 * routines.
26 */
27asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
28 unsigned long address)
29{
30 struct task_struct *tsk;
31 struct mm_struct *mm;
32 struct vm_area_struct * vma;
33 unsigned long page;
34
35#ifdef CONFIG_SH_KGDB
36 if (kgdb_nofault && kgdb_bus_err_hook)
37 kgdb_bus_err_hook();
38#endif
39
40 tsk = current;
41 mm = tsk->mm;
42
43 /*
44 * If we're in an interrupt or have no user
45 * context, we must not take the fault..
46 */
47 if (in_atomic() || !mm)
48 goto no_context;
49
50 down_read(&mm->mmap_sem);
51
52 vma = find_vma(mm, address);
53 if (!vma)
54 goto bad_area;
55 if (vma->vm_start <= address)
56 goto good_area;
57 if (!(vma->vm_flags & VM_GROWSDOWN))
58 goto bad_area;
59 if (expand_stack(vma, address))
60 goto bad_area;
61/*
62 * Ok, we have a good vm_area for this memory access, so
63 * we can handle it..
64 */
65good_area:
66 if (writeaccess) {
67 if (!(vma->vm_flags & VM_WRITE))
68 goto bad_area;
69 } else {
70 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
71 goto bad_area;
72 }
73
74 /*
75 * If for any reason at all we couldn't handle the fault,
76 * make sure we exit gracefully rather than endlessly redo
77 * the fault.
78 */
79survive:
80 switch (handle_mm_fault(mm, vma, address, writeaccess)) {
81 case VM_FAULT_MINOR:
82 tsk->min_flt++;
83 break;
84 case VM_FAULT_MAJOR:
85 tsk->maj_flt++;
86 break;
87 case VM_FAULT_SIGBUS:
88 goto do_sigbus;
89 case VM_FAULT_OOM:
90 goto out_of_memory;
91 default:
92 BUG();
93 }
94
95 up_read(&mm->mmap_sem);
96 return;
97
98/*
99 * Something tried to access memory that isn't in our memory map..
100 * Fix it, but check if it's kernel or user first..
101 */
102bad_area:
103 up_read(&mm->mmap_sem);
104
105 if (user_mode(regs)) {
106 tsk->thread.address = address;
107 tsk->thread.error_code = writeaccess;
108 force_sig(SIGSEGV, tsk);
109 return;
110 }
111
112no_context:
113 /* Are we prepared to handle this kernel fault? */
114 if (fixup_exception(regs))
115 return;
116
117/*
118 * Oops. The kernel tried to access some bad page. We'll have to
119 * terminate things with extreme prejudice.
120 *
121 */
122 if (address < PAGE_SIZE)
123 printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
124 else
125 printk(KERN_ALERT "Unable to handle kernel paging request");
126 printk(" at virtual address %08lx\n", address);
127 printk(KERN_ALERT "pc = %08lx\n", regs->pc);
128 asm volatile("mov.l %1, %0"
129 : "=r" (page)
130 : "m" (__m(MMU_TTB)));
131 if (page) {
132 page = ((unsigned long *) page)[address >> 22];
133 printk(KERN_ALERT "*pde = %08lx\n", page);
134 if (page & _PAGE_PRESENT) {
135 page &= PAGE_MASK;
136 address &= 0x003ff000;
137 page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT];
138 printk(KERN_ALERT "*pte = %08lx\n", page);
139 }
140 }
141 die("Oops", regs, writeaccess);
142 do_exit(SIGKILL);
143
144/*
145 * We ran out of memory, or some other thing happened to us that made
146 * us unable to handle the page fault gracefully.
147 */
148out_of_memory:
149 up_read(&mm->mmap_sem);
150 if (current->pid == 1) {
151 yield();
152 down_read(&mm->mmap_sem);
153 goto survive;
154 }
155 printk("VM: killing process %s\n", tsk->comm);
156 if (user_mode(regs))
157 do_exit(SIGKILL);
158 goto no_context;
159
160do_sigbus:
161 up_read(&mm->mmap_sem);
162
163 /*
164 * Send a sigbus, regardless of whether we were in kernel
165 * or user mode.
166 */
167 tsk->thread.address = address;
168 tsk->thread.error_code = writeaccess;
169 tsk->thread.trap_no = 14;
170 force_sig(SIGBUS, tsk);
171
172 /* Kernel mode? Handle exceptions or die */
173 if (!user_mode(regs))
174 goto no_context;
175}
176
Paul Mundt26ff6c12006-09-27 15:13:36 +0900177#ifdef CONFIG_SH_STORE_QUEUES
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178/*
Paul Mundt26ff6c12006-09-27 15:13:36 +0900179 * This is a special case for the SH-4 store queues, as pages for this
180 * space still need to be faulted in before it's possible to flush the
181 * store queue cache for writeout to the remapped region.
182 */
183#define P3_ADDR_MAX (P4SEG_STORE_QUE + 0x04000000)
184#else
185#define P3_ADDR_MAX P4SEG
186#endif
187
188/*
189 * Called with interrupts disabled.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190 */
191asmlinkage int __do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
192 unsigned long address)
193{
Hugh Dickins60ec5582005-10-29 18:16:34 -0700194 pgd_t *pgd;
Paul Mundt26ff6c12006-09-27 15:13:36 +0900195 pud_t *pud;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196 pmd_t *pmd;
197 pte_t *pte;
198 pte_t entry;
Hugh Dickins60ec5582005-10-29 18:16:34 -0700199 struct mm_struct *mm;
200 spinlock_t *ptl;
201 int ret = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202
203#ifdef CONFIG_SH_KGDB
204 if (kgdb_nofault && kgdb_bus_err_hook)
205 kgdb_bus_err_hook();
206#endif
207
Paul Mundt26ff6c12006-09-27 15:13:36 +0900208 /*
209 * We don't take page faults for P1, P2, and parts of P4, these
210 * are always mapped, whether it be due to legacy behaviour in
211 * 29-bit mode, or due to PMB configuration in 32-bit mode.
212 */
213 if (address >= P3SEG && address < P3_ADDR_MAX)
Hugh Dickins60ec5582005-10-29 18:16:34 -0700214 pgd = pgd_offset_k(address);
Paul Mundt26ff6c12006-09-27 15:13:36 +0900215 else {
216 if (unlikely(address >= TASK_SIZE || !current->mm))
217 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218
Paul Mundt26ff6c12006-09-27 15:13:36 +0900219 pgd = pgd_offset(current->mm, address);
220 }
221
222 pud = pud_offset(pgd, address);
223 if (pud_none_or_clear_bad(pud))
224 return 1;
225 pmd = pmd_offset(pud, address);
Hugh Dickins60ec5582005-10-29 18:16:34 -0700226 if (pmd_none_or_clear_bad(pmd))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227 return 1;
Paul Mundt26ff6c12006-09-27 15:13:36 +0900228
Hugh Dickins60ec5582005-10-29 18:16:34 -0700229 if (mm)
230 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
231 else
232 pte = pte_offset_kernel(pmd, address);
233
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 entry = *pte;
Paul Mundt26ff6c12006-09-27 15:13:36 +0900235 if (unlikely(pte_none(entry) || pte_not_present(entry)))
236 goto unlock;
237 if (unlikely(writeaccess && !pte_write(entry)))
Hugh Dickins60ec5582005-10-29 18:16:34 -0700238 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239
240 if (writeaccess)
241 entry = pte_mkdirty(entry);
242 entry = pte_mkyoung(entry);
243
244#ifdef CONFIG_CPU_SH4
245 /*
246 * ITLB is not affected by "ldtlb" instruction.
247 * So, we need to flush the entry by ourselves.
248 */
Paul Mundt26ff6c12006-09-27 15:13:36 +0900249 __flush_tlb_page(get_asid(), address & PAGE_MASK);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250#endif
251
252 set_pte(pte, entry);
253 update_mmu_cache(NULL, address, entry);
Hugh Dickins60ec5582005-10-29 18:16:34 -0700254 ret = 0;
255unlock:
256 if (mm)
257 pte_unmap_unlock(pte, ptl);
258 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259}