blob: 06ebd2f26df3eb0f5a7261587c96f9a3b43896de [file] [log] [blame]
Nicholas Flintham1e3d3112013-04-10 10:48:38 +01001/*
2 * linux/arch/arm/mm/fault.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 * Modifications for ARM processor (c) 1995-2004 Russell King
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/module.h>
12#include <linux/signal.h>
13#include <linux/mm.h>
14#include <linux/hardirq.h>
15#include <linux/init.h>
16#include <linux/kprobes.h>
17#include <linux/uaccess.h>
18#include <linux/page-flags.h>
19#include <linux/sched.h>
20#include <linux/highmem.h>
21#include <linux/perf_event.h>
22
23#include <asm/exception.h>
24#include <asm/pgtable.h>
25#include <asm/system_misc.h>
26#include <asm/system_info.h>
27#include <asm/tlbflush.h>
28#include <asm/cputype.h>
29#if defined(CONFIG_ARCH_MSM_SCORPION) && !defined(CONFIG_MSM_SMP)
30#include <asm/io.h>
31#include <mach/msm_iomap.h>
32#endif
33#include <mach/msm_rtb.h>
34
35#ifdef CONFIG_EMULATE_DOMAIN_MANAGER_V7
36#include <asm/domain.h>
37#endif
38
39#include "fault.h"
40static inline unsigned int read_DFSR(void)
41{
42 unsigned int dfsr;
43 asm volatile ("mrc p15, 0, %0, c5, c0, 0" : "=r" (dfsr));
44 return dfsr;
45}
46
47static inline unsigned int read_TTBCR(void)
48{
49 unsigned int ttbcr;
50 asm volatile ("mrc p15, 0, %0, c2, c0, 2" : "=r" (ttbcr));
51 return ttbcr;
52}
53
54static inline unsigned int read_TTBR0(void)
55{
56 unsigned int ttbr0;
57 asm volatile ("mrc p15, 0, %0, c2, c0, 0" : "=r" (ttbr0));
58 return ttbr0;
59}
60
61static inline unsigned int read_TTBR1(void)
62{
63 unsigned int ttbr1;
64 asm volatile ("mrc p15, 0, %0, c2, c0, 1" : "=r" (ttbr1));
65 return ttbr1;
66}
67
68static inline unsigned int read_MAIR0(void)
69{
70 unsigned int mair0;
71 asm volatile ("mrc p15, 0, %0, c10, c2, 0" : "=r" (mair0));
72 return mair0;
73}
74
75static inline unsigned int read_MAIR1(void)
76{
77 unsigned int mair1;
78 asm volatile ("mrc p15, 0, %0, c10, c2, 1" : "=r" (mair1));
79 return mair1;
80}
81
82static inline unsigned int read_SCTLR(void)
83{
84 unsigned int sctlr;
85 asm volatile ("mrc p15, 0, %0, c1, c0, 0" : "=r" (sctlr));
86 return sctlr;
87}
88
89#ifdef CONFIG_MMU
90
91#ifdef CONFIG_KPROBES
92static inline int notify_page_fault(struct pt_regs *regs, unsigned int fsr)
93{
94 int ret = 0;
95
96 if (!user_mode(regs)) {
97
98 preempt_disable();
99 if (kprobe_running() && kprobe_fault_handler(regs, fsr))
100 ret = 1;
101 preempt_enable();
102 }
103
104 return ret;
105}
106#else
107static inline int notify_page_fault(struct pt_regs *regs, unsigned int fsr)
108{
109 return 0;
110}
111#endif
112
113void show_pte(struct mm_struct *mm, unsigned long addr)
114{
115 pgd_t *pgd;
116
117 if (!mm)
118 mm = &init_mm;
119
120 printk(KERN_ALERT "pgd = %p\n", mm->pgd);
121 pgd = pgd_offset(mm, addr);
122 printk(KERN_ALERT "[%08lx] *pgd=%08llx",
123 addr, (long long)pgd_val(*pgd));
124
125 do {
126 pud_t *pud;
127 pmd_t *pmd;
128 pte_t *pte;
129
130 if (pgd_none(*pgd))
131 break;
132
133 if (pgd_bad(*pgd)) {
134 printk("(bad)");
135 break;
136 }
137
138 pud = pud_offset(pgd, addr);
139 if (PTRS_PER_PUD != 1)
140 printk(", *pud=%08llx", (long long)pud_val(*pud));
141
142 if (pud_none(*pud))
143 break;
144
145 if (pud_bad(*pud)) {
146 printk("(bad)");
147 break;
148 }
149
150 pmd = pmd_offset(pud, addr);
151 if (PTRS_PER_PMD != 1)
152 printk(", *pmd=%08llx", (long long)pmd_val(*pmd));
153
154 if (pmd_none(*pmd))
155 break;
156
157 if (pmd_bad(*pmd)) {
158 printk("(bad)");
159 break;
160 }
161
162
163 if (PageHighMem(pfn_to_page(pmd_val(*pmd) >> PAGE_SHIFT)))
164 break;
165
166 pte = pte_offset_map(pmd, addr);
167 printk(", *pte=%08llx", (long long)pte_val(*pte));
168#ifndef CONFIG_ARM_LPAE
169 printk(", *ppte=%08llx",
170 (long long)pte_val(pte[PTE_HWTABLE_PTRS]));
171#endif
172 pte_unmap(pte);
173 } while(0);
174
175 printk("\n");
176 printk("DFSR=%08x, TTBCR=%08x, TTBR0=%08x, TTBR1=%08x\n", read_DFSR(), read_TTBCR(), read_TTBR0(), read_TTBR1());
177 printk("MAIR0=%08x, MAIR1=%08x, SCTLR=%08x\n", read_MAIR0(), read_MAIR1(), read_SCTLR());
178}
179#else
180void show_pte(struct mm_struct *mm, unsigned long addr)
181{ }
182#endif
183
184static void
185__do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
186 struct pt_regs *regs)
187{
188 static int enable_logk_die = 1;
189
190 if (enable_logk_die) {
191 enable_logk_die = 0;
192 uncached_logk(LOGK_DIE, (void *)regs->ARM_pc);
193 uncached_logk(LOGK_DIE, (void *)regs->ARM_lr);
194 uncached_logk(LOGK_DIE, (void *)addr);
195 }
196
197 if (fixup_exception(regs))
198 return;
199
200
201 msm_rtb_disable();
202
203 bust_spinlocks(1);
204 printk(KERN_ALERT
205 "Unable to handle kernel %s at virtual address %08lx\n",
206 (addr < PAGE_SIZE) ? "NULL pointer dereference" :
207 "paging request", addr);
208
209 show_pte(mm, addr);
210 die("Oops", regs, fsr);
211 bust_spinlocks(0);
212 do_exit(SIGKILL);
213}
214
215static void
216__do_user_fault(struct task_struct *tsk, unsigned long addr,
217 unsigned int fsr, unsigned int sig, int code,
218 struct pt_regs *regs)
219{
220 struct siginfo si;
221
222#ifdef CONFIG_DEBUG_USER
223 if (((user_debug & UDBG_SEGV) && (sig == SIGSEGV)) ||
224 ((user_debug & UDBG_BUS) && (sig == SIGBUS))) {
225 printk(KERN_DEBUG "%s: unhandled page fault (%d) at 0x%08lx, code 0x%03x\n",
226 tsk->comm, sig, addr, fsr);
227 show_pte(tsk->mm, addr);
228 show_regs(regs);
229 }
230#endif
231
232 tsk->thread.address = addr;
233 tsk->thread.error_code = fsr;
234 tsk->thread.trap_no = 14;
235 si.si_signo = sig;
236 si.si_errno = 0;
237 si.si_code = code;
238 si.si_addr = (void __user *)addr;
239 force_sig_info(sig, &si, tsk);
240}
241
242void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
243{
244 struct task_struct *tsk = current;
245 struct mm_struct *mm = tsk->active_mm;
246
247 if (user_mode(regs))
248 __do_user_fault(tsk, addr, fsr, SIGSEGV, SEGV_MAPERR, regs);
249 else
250 __do_kernel_fault(mm, addr, fsr, regs);
251}
252
253#ifdef CONFIG_MMU
254#define VM_FAULT_BADMAP 0x010000
255#define VM_FAULT_BADACCESS 0x020000
256
257static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma)
258{
259 unsigned int mask = VM_READ | VM_WRITE | VM_EXEC;
260
261 if (fsr & FSR_WRITE)
262 mask = VM_WRITE;
263 if (fsr & FSR_LNX_PF)
264 mask = VM_EXEC;
265
266 return vma->vm_flags & mask ? false : true;
267}
268
269static int __kprobes
270__do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
271 unsigned int flags, struct task_struct *tsk)
272{
273 struct vm_area_struct *vma;
274 int fault;
275
276 vma = find_vma(mm, addr);
277 fault = VM_FAULT_BADMAP;
278 if (unlikely(!vma))
279 goto out;
280 if (unlikely(vma->vm_start > addr))
281 goto check_stack;
282
283good_area:
284 if (access_error(fsr, vma)) {
285 fault = VM_FAULT_BADACCESS;
286 goto out;
287 }
288
289 return handle_mm_fault(mm, vma, addr & PAGE_MASK, flags);
290
291check_stack:
292
293 if (vma->vm_flags & VM_GROWSDOWN &&
294 addr >= FIRST_USER_ADDRESS && !expand_stack(vma, addr))
295 goto good_area;
296out:
297 return fault;
298}
299
300static int __kprobes
301do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
302{
303 struct task_struct *tsk;
304 struct mm_struct *mm;
305 int fault, sig, code;
306 int write = fsr & FSR_WRITE;
307 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
308 (write ? FAULT_FLAG_WRITE : 0);
309
310 if (notify_page_fault(regs, fsr))
311 return 0;
312
313 tsk = current;
314 mm = tsk->mm;
315
316
317 if (interrupts_enabled(regs))
318 local_irq_enable();
319
320 if (in_atomic() || !mm)
321 goto no_context;
322
323 if (!down_read_trylock(&mm->mmap_sem)) {
324 if (!user_mode(regs) && !search_exception_tables(regs->ARM_pc))
325 goto no_context;
326retry:
327 down_read(&mm->mmap_sem);
328 } else {
329 might_sleep();
330#ifdef CONFIG_DEBUG_VM
331 if (!user_mode(regs) &&
332 !search_exception_tables(regs->ARM_pc))
333 goto no_context;
334#endif
335 }
336
337 fault = __do_page_fault(mm, addr, fsr, flags, tsk);
338
339 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
340 return 0;
341
342
343 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
344 if (!(fault & VM_FAULT_ERROR) && flags & FAULT_FLAG_ALLOW_RETRY) {
345 if (fault & VM_FAULT_MAJOR) {
346 tsk->maj_flt++;
347 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
348 regs, addr);
349 } else {
350 tsk->min_flt++;
351 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
352 regs, addr);
353 }
354 if (fault & VM_FAULT_RETRY) {
355 flags &= ~FAULT_FLAG_ALLOW_RETRY;
356 goto retry;
357 }
358 }
359
360 up_read(&mm->mmap_sem);
361
362 if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP | VM_FAULT_BADACCESS))))
363 return 0;
364
365 if (fault & VM_FAULT_OOM) {
366 pagefault_out_of_memory();
367 return 0;
368 }
369
370 if (!user_mode(regs))
371 goto no_context;
372
373 if (fault & VM_FAULT_SIGBUS) {
374 sig = SIGBUS;
375 code = BUS_ADRERR;
376 } else {
377 sig = SIGSEGV;
378 code = fault == VM_FAULT_BADACCESS ?
379 SEGV_ACCERR : SEGV_MAPERR;
380 }
381
382 __do_user_fault(tsk, addr, fsr, sig, code, regs);
383 return 0;
384
385no_context:
386 __do_kernel_fault(mm, addr, fsr, regs);
387 return 0;
388}
389#else
390static int
391do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
392{
393 return 0;
394}
395#endif
396
397#ifdef CONFIG_MMU
398static int __kprobes
399do_translation_fault(unsigned long addr, unsigned int fsr,
400 struct pt_regs *regs)
401{
402 unsigned int index;
403 pgd_t *pgd, *pgd_k;
404 pud_t *pud, *pud_k;
405 pmd_t *pmd, *pmd_k;
406
407 if (addr < TASK_SIZE)
408 return do_page_fault(addr, fsr, regs);
409
410 if (user_mode(regs))
411 goto bad_area;
412
413 index = pgd_index(addr);
414
415 pgd = cpu_get_pgd() + index;
416 pgd_k = init_mm.pgd + index;
417
418 if (pgd_none(*pgd_k))
419 goto bad_area;
420 if (!pgd_present(*pgd))
421 set_pgd(pgd, *pgd_k);
422
423 pud = pud_offset(pgd, addr);
424 pud_k = pud_offset(pgd_k, addr);
425
426 if (pud_none(*pud_k))
427 goto bad_area;
428 if (!pud_present(*pud))
429 set_pud(pud, *pud_k);
430
431 pmd = pmd_offset(pud, addr);
432 pmd_k = pmd_offset(pud_k, addr);
433
434#ifdef CONFIG_ARM_LPAE
435 index = 0;
436#else
437 index = (addr >> SECTION_SHIFT) & 1;
438#endif
439 if (pmd_none(pmd_k[index]))
440 goto bad_area;
441
442 copy_pmd(pmd, pmd_k);
443 return 0;
444
445bad_area:
446 do_bad_area(addr, fsr, regs);
447 return 0;
448}
449#else
450static int
451do_translation_fault(unsigned long addr, unsigned int fsr,
452 struct pt_regs *regs)
453{
454 return 0;
455}
456#endif
457
458static int
459do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
460{
461 do_bad_area(addr, fsr, regs);
462 return 0;
463}
464
465static int
466do_bad(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
467{
468 return 1;
469}
470
471#if defined(CONFIG_ARCH_MSM_SCORPION) && !defined(CONFIG_MSM_SMP)
472#define __str(x) #x
473#define MRC(x, v1, v2, v4, v5, v6) do { \
474 unsigned int __##x; \
475 asm("mrc " __str(v1) ", " __str(v2) ", %0, " __str(v4) ", " \
476 __str(v5) ", " __str(v6) "\n" \
477 : "=r" (__##x)); \
478 pr_info("%s: %s = 0x%.8x\n", __func__, #x, __##x); \
479} while(0)
480
481#define MSM_TCSR_SPARE2 (MSM_TCSR_BASE + 0x60)
482
483#endif
484
485int
486do_imprecise_ext(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
487{
488#if defined(CONFIG_ARCH_MSM_SCORPION) && !defined(CONFIG_MSM_SMP)
489 MRC(ADFSR, p15, 0, c5, c1, 0);
490 MRC(DFSR, p15, 0, c5, c0, 0);
491 MRC(ACTLR, p15, 0, c1, c0, 1);
492 MRC(EFSR, p15, 7, c15, c0, 1);
493 MRC(L2SR, p15, 3, c15, c1, 0);
494 MRC(L2CR0, p15, 3, c15, c0, 1);
495 MRC(L2CPUESR, p15, 3, c15, c1, 1);
496 MRC(L2CPUCR, p15, 3, c15, c0, 2);
497 MRC(SPESR, p15, 1, c9, c7, 0);
498 MRC(SPCR, p15, 0, c9, c7, 0);
499 MRC(DMACHSR, p15, 1, c11, c0, 0);
500 MRC(DMACHESR, p15, 1, c11, c0, 1);
501 MRC(DMACHCR, p15, 0, c11, c0, 2);
502
503
504 asm volatile ("mcr p15, 7, %0, c15, c0, 1\n\t"
505 "mcr p15, 0, %0, c5, c1, 0"
506 : : "r" (0));
507#endif
508#if defined(CONFIG_ARCH_MSM_SCORPION) && !defined(CONFIG_MSM_SMP)
509 pr_info("%s: TCSR_SPARE2 = 0x%.8x\n", __func__, readl(MSM_TCSR_SPARE2));
510#endif
511 return 1;
512}
513
514struct fsr_info {
515 int (*fn)(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
516 int sig;
517 int code;
518 const char *name;
519};
520
521#ifdef CONFIG_ARM_LPAE
522#include "fsr-3level.c"
523#else
524#include "fsr-2level.c"
525#endif
526
527void __init
528hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *),
529 int sig, int code, const char *name)
530{
531 if (nr < 0 || nr >= ARRAY_SIZE(fsr_info))
532 BUG();
533
534 fsr_info[nr].fn = fn;
535 fsr_info[nr].sig = sig;
536 fsr_info[nr].code = code;
537 fsr_info[nr].name = name;
538}
539
540#ifdef CONFIG_MSM_KRAIT_TBB_ABORT_HANDLER
541static int krait_tbb_fixup(unsigned int fsr, struct pt_regs *regs)
542{
543 int base_cond, cond = 0;
544 unsigned int p1, cpsr_z, cpsr_c, cpsr_n, cpsr_v;
545
546 if ((read_cpuid_id() & 0xFFFFFFFC) != 0x510F04D0)
547 return 0;
548
549 if (!thumb_mode(regs))
550 return 0;
551
552
553 if ((regs->ARM_cpsr & PSR_IT_MASK) == 0)
554 return 0;
555
556 cpsr_n = (regs->ARM_cpsr & PSR_N_BIT) ? 1 : 0;
557 cpsr_z = (regs->ARM_cpsr & PSR_Z_BIT) ? 1 : 0;
558 cpsr_c = (regs->ARM_cpsr & PSR_C_BIT) ? 1 : 0;
559 cpsr_v = (regs->ARM_cpsr & PSR_V_BIT) ? 1 : 0;
560
561 p1 = (regs->ARM_cpsr & BIT(12)) ? 1 : 0;
562
563 base_cond = (regs->ARM_cpsr >> 13) & 0x07;
564
565 switch (base_cond) {
566 case 0x0:
567 cond = cpsr_z;
568 break;
569
570 case 0x1:
571 cond = cpsr_c;
572 break;
573
574 case 0x2:
575 cond = cpsr_n;
576 break;
577
578 case 0x3:
579 cond = cpsr_v;
580 break;
581
582 case 0x4:
583 cond = (cpsr_c == 1) && (cpsr_z == 0);
584 break;
585
586 case 0x5:
587 cond = (cpsr_n == cpsr_v);
588 break;
589
590 case 0x6:
591 cond = (cpsr_z == 0) && (cpsr_n == cpsr_v);
592 break;
593
594 case 0x7:
595 cond = 1;
596 break;
597 };
598
599 if (cond == p1) {
600 pr_debug("Conditional abort fixup, PC=%08x, base=%d, cond=%d\n",
601 (unsigned int) regs->ARM_pc, base_cond, cond);
602 regs->ARM_pc += 2;
603 return 1;
604 }
605 return 0;
606}
607#endif
608
609asmlinkage void __exception
610do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
611{
612 const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
613 struct siginfo info;
614
615#ifdef CONFIG_EMULATE_DOMAIN_MANAGER_V7
616 if (emulate_domain_manager_data_abort(fsr, addr))
617 return;
618#endif
619
620#ifdef CONFIG_MSM_KRAIT_TBB_ABORT_HANDLER
621 if (krait_tbb_fixup(fsr, regs))
622 return;
623#endif
624
625 if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
626 return;
627
628 printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n",
629 inf->name, fsr, addr);
630
631 info.si_signo = inf->sig;
632 info.si_errno = 0;
633 info.si_code = inf->code;
634 info.si_addr = (void __user *)addr;
635 arm_notify_die("", regs, &info, fsr, 0);
636}
637
638void __init
639hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *),
640 int sig, int code, const char *name)
641{
642 if (nr < 0 || nr >= ARRAY_SIZE(ifsr_info))
643 BUG();
644
645 ifsr_info[nr].fn = fn;
646 ifsr_info[nr].sig = sig;
647 ifsr_info[nr].code = code;
648 ifsr_info[nr].name = name;
649}
650
651asmlinkage void __exception
652do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
653{
654 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
655 struct siginfo info;
656
657#ifdef CONFIG_EMULATE_DOMAIN_MANAGER_V7
658 if (emulate_domain_manager_prefetch_abort(ifsr, addr))
659 return;
660#endif
661
662 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
663 return;
664
665 printk(KERN_ALERT "Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
666 inf->name, ifsr, addr);
667
668 info.si_signo = inf->sig;
669 info.si_errno = 0;
670 info.si_code = inf->code;
671 info.si_addr = (void __user *)addr;
672 arm_notify_die("", regs, &info, ifsr, 0);
673}
674
675#ifndef CONFIG_ARM_LPAE
676static int __init exceptions_init(void)
677{
678 if (cpu_architecture() >= CPU_ARCH_ARMv6) {
679 hook_fault_code(4, do_translation_fault, SIGSEGV, SEGV_MAPERR,
680 "I-cache maintenance fault");
681 }
682
683 if (cpu_architecture() >= CPU_ARCH_ARMv7) {
684 hook_fault_code(3, do_bad, SIGSEGV, SEGV_MAPERR,
685 "section access flag fault");
686 hook_fault_code(6, do_bad, SIGSEGV, SEGV_MAPERR,
687 "section access flag fault");
688 }
689
690 return 0;
691}
692
693arch_initcall(exceptions_init);
694#endif