blob: 21d1596946d6c0c8d2889d1e78058fe045516325 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/arch/x86-64/mm/fault.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 * Copyright (C) 2001,2002 Andi Kleen, SuSE Labs.
6 */
7
8#include <linux/config.h>
9#include <linux/signal.h>
10#include <linux/sched.h>
11#include <linux/kernel.h>
12#include <linux/errno.h>
13#include <linux/string.h>
14#include <linux/types.h>
15#include <linux/ptrace.h>
16#include <linux/mman.h>
17#include <linux/mm.h>
18#include <linux/smp.h>
19#include <linux/smp_lock.h>
20#include <linux/interrupt.h>
21#include <linux/init.h>
22#include <linux/tty.h>
23#include <linux/vt_kern.h> /* For unblank_screen() */
24#include <linux/compiler.h>
25#include <linux/module.h>
Prasanna S Panchamukhi0f2fbdc2005-09-06 15:19:28 -070026#include <linux/kprobes.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027
28#include <asm/system.h>
29#include <asm/uaccess.h>
30#include <asm/pgalloc.h>
31#include <asm/smp.h>
32#include <asm/tlbflush.h>
33#include <asm/proto.h>
34#include <asm/kdebug.h>
35#include <asm-generic/sections.h>
36#include <asm/kdebug.h>
37
38void bust_spinlocks(int yes)
39{
40 int loglevel_save = console_loglevel;
41 if (yes) {
42 oops_in_progress = 1;
43 } else {
44#ifdef CONFIG_VT
45 unblank_screen();
46#endif
47 oops_in_progress = 0;
48 /*
49 * OK, the message is on the console. Now we call printk()
50 * without oops_in_progress set so that printk will give klogd
51 * a poke. Hold onto your hats...
52 */
53 console_loglevel = 15; /* NMI oopser may have shut the console up */
54 printk(" ");
55 console_loglevel = loglevel_save;
56 }
57}
58
59/* Sometimes the CPU reports invalid exceptions on prefetch.
60 Check that here and ignore.
61 Opcode checker based on code by Richard Brunner */
62static noinline int is_prefetch(struct pt_regs *regs, unsigned long addr,
63 unsigned long error_code)
64{
Andi Kleenf1290ec2005-04-16 15:24:59 -070065 unsigned char *instr;
Linus Torvalds1da177e2005-04-16 15:20:36 -070066 int scan_more = 1;
67 int prefetch = 0;
Andi Kleenf1290ec2005-04-16 15:24:59 -070068 unsigned char *max_instr;
Linus Torvalds1da177e2005-04-16 15:20:36 -070069
70 /* If it was a exec fault ignore */
71 if (error_code & (1<<4))
72 return 0;
73
Andi Kleenf1290ec2005-04-16 15:24:59 -070074 instr = (unsigned char *)convert_rip_to_linear(current, regs);
75 max_instr = instr + 15;
Linus Torvalds1da177e2005-04-16 15:20:36 -070076
Vincent Hanquez76381fe2005-06-23 00:08:46 -070077 if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE)
Linus Torvalds1da177e2005-04-16 15:20:36 -070078 return 0;
79
80 while (scan_more && instr < max_instr) {
81 unsigned char opcode;
82 unsigned char instr_hi;
83 unsigned char instr_lo;
84
85 if (__get_user(opcode, instr))
86 break;
87
88 instr_hi = opcode & 0xf0;
89 instr_lo = opcode & 0x0f;
90 instr++;
91
92 switch (instr_hi) {
93 case 0x20:
94 case 0x30:
95 /* Values 0x26,0x2E,0x36,0x3E are valid x86
96 prefixes. In long mode, the CPU will signal
97 invalid opcode if some of these prefixes are
98 present so we will never get here anyway */
99 scan_more = ((instr_lo & 7) == 0x6);
100 break;
101
102 case 0x40:
103 /* In AMD64 long mode, 0x40 to 0x4F are valid REX prefixes
104 Need to figure out under what instruction mode the
105 instruction was issued ... */
106 /* Could check the LDT for lm, but for now it's good
107 enough to assume that long mode only uses well known
108 segments or kernel. */
Vincent Hanquez76381fe2005-06-23 00:08:46 -0700109 scan_more = (!user_mode(regs)) || (regs->cs == __USER_CS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110 break;
111
112 case 0x60:
113 /* 0x64 thru 0x67 are valid prefixes in all modes. */
114 scan_more = (instr_lo & 0xC) == 0x4;
115 break;
116 case 0xF0:
117 /* 0xF0, 0xF2, and 0xF3 are valid prefixes in all modes. */
118 scan_more = !instr_lo || (instr_lo>>1) == 1;
119 break;
120 case 0x00:
121 /* Prefetch instruction is 0x0F0D or 0x0F18 */
122 scan_more = 0;
123 if (__get_user(opcode, instr))
124 break;
125 prefetch = (instr_lo == 0xF) &&
126 (opcode == 0x0D || opcode == 0x18);
127 break;
128 default:
129 scan_more = 0;
130 break;
131 }
132 }
133 return prefetch;
134}
135
136static int bad_address(void *p)
137{
138 unsigned long dummy;
139 return __get_user(dummy, (unsigned long *)p);
140}
141
142void dump_pagetable(unsigned long address)
143{
144 pgd_t *pgd;
145 pud_t *pud;
146 pmd_t *pmd;
147 pte_t *pte;
148
149 asm("movq %%cr3,%0" : "=r" (pgd));
150
151 pgd = __va((unsigned long)pgd & PHYSICAL_PAGE_MASK);
152 pgd += pgd_index(address);
153 printk("PGD %lx ", pgd_val(*pgd));
154 if (bad_address(pgd)) goto bad;
155 if (!pgd_present(*pgd)) goto ret;
156
157 pud = __pud_offset_k((pud_t *)pgd_page(*pgd), address);
158 if (bad_address(pud)) goto bad;
159 printk("PUD %lx ", pud_val(*pud));
160 if (!pud_present(*pud)) goto ret;
161
162 pmd = pmd_offset(pud, address);
163 if (bad_address(pmd)) goto bad;
164 printk("PMD %lx ", pmd_val(*pmd));
165 if (!pmd_present(*pmd)) goto ret;
166
167 pte = pte_offset_kernel(pmd, address);
168 if (bad_address(pte)) goto bad;
169 printk("PTE %lx", pte_val(*pte));
170ret:
171 printk("\n");
172 return;
173bad:
174 printk("BAD\n");
175}
176
177static const char errata93_warning[] =
178KERN_ERR "******* Your BIOS seems to not contain a fix for K8 errata #93\n"
179KERN_ERR "******* Working around it, but it may cause SEGVs or burn power.\n"
180KERN_ERR "******* Please consider a BIOS update.\n"
181KERN_ERR "******* Disabling USB legacy in the BIOS may also help.\n";
182
183/* Workaround for K8 erratum #93 & buggy BIOS.
184 BIOS SMM functions are required to use a specific workaround
185 to avoid corruption of the 64bit RIP register on C stepping K8.
186 A lot of BIOS that didn't get tested properly miss this.
187 The OS sees this as a page fault with the upper 32bits of RIP cleared.
188 Try to work around it here.
189 Note we only handle faults in kernel here. */
190
191static int is_errata93(struct pt_regs *regs, unsigned long address)
192{
193 static int warned;
194 if (address != regs->rip)
195 return 0;
196 if ((address >> 32) != 0)
197 return 0;
198 address |= 0xffffffffUL << 32;
199 if ((address >= (u64)_stext && address <= (u64)_etext) ||
200 (address >= MODULES_VADDR && address <= MODULES_END)) {
201 if (!warned) {
202 printk(errata93_warning);
203 warned = 1;
204 }
205 regs->rip = address;
206 return 1;
207 }
208 return 0;
209}
210
211int unhandled_signal(struct task_struct *tsk, int sig)
212{
213 if (tsk->pid == 1)
214 return 1;
Andi Kleen5e5ec102005-08-19 06:56:04 +0200215 if (tsk->ptrace & PT_PTRACED)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216 return 0;
217 return (tsk->sighand->action[sig-1].sa.sa_handler == SIG_IGN) ||
218 (tsk->sighand->action[sig-1].sa.sa_handler == SIG_DFL);
219}
220
221static noinline void pgtable_bad(unsigned long address, struct pt_regs *regs,
222 unsigned long error_code)
223{
Jan Beulich12091402005-09-12 18:49:24 +0200224 unsigned long flags = oops_begin();
Jan Beulich6e3f3612006-01-11 22:42:14 +0100225 struct task_struct *tsk;
Jan Beulich12091402005-09-12 18:49:24 +0200226
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227 printk(KERN_ALERT "%s: Corrupted page table at address %lx\n",
228 current->comm, address);
229 dump_pagetable(address);
Jan Beulich6e3f3612006-01-11 22:42:14 +0100230 tsk = current;
231 tsk->thread.cr2 = address;
232 tsk->thread.trap_no = 14;
233 tsk->thread.error_code = error_code;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 __die("Bad pagetable", regs, error_code);
Jan Beulich12091402005-09-12 18:49:24 +0200235 oops_end(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236 do_exit(SIGKILL);
237}
238
239/*
240 * Handle a fault on the vmalloc or module mapping area
Andi Kleen3b9ba4d2005-05-16 21:53:31 -0700241 *
242 * This assumes no large pages in there.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243 */
244static int vmalloc_fault(unsigned long address)
245{
246 pgd_t *pgd, *pgd_ref;
247 pud_t *pud, *pud_ref;
248 pmd_t *pmd, *pmd_ref;
249 pte_t *pte, *pte_ref;
250
251 /* Copy kernel mappings over when needed. This can also
252 happen within a race in page table update. In the later
253 case just flush. */
254
255 pgd = pgd_offset(current->mm ?: &init_mm, address);
256 pgd_ref = pgd_offset_k(address);
257 if (pgd_none(*pgd_ref))
258 return -1;
259 if (pgd_none(*pgd))
260 set_pgd(pgd, *pgd_ref);
261
262 /* Below here mismatches are bugs because these lower tables
263 are shared */
264
265 pud = pud_offset(pgd, address);
266 pud_ref = pud_offset(pgd_ref, address);
267 if (pud_none(*pud_ref))
268 return -1;
269 if (pud_none(*pud) || pud_page(*pud) != pud_page(*pud_ref))
270 BUG();
271 pmd = pmd_offset(pud, address);
272 pmd_ref = pmd_offset(pud_ref, address);
273 if (pmd_none(*pmd_ref))
274 return -1;
275 if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref))
276 BUG();
277 pte_ref = pte_offset_kernel(pmd_ref, address);
278 if (!pte_present(*pte_ref))
279 return -1;
280 pte = pte_offset_kernel(pmd, address);
Andi Kleen3b9ba4d2005-05-16 21:53:31 -0700281 /* Don't use pte_page here, because the mappings can point
282 outside mem_map, and the NUMA hash lookup cannot handle
283 that. */
284 if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285 BUG();
286 __flush_tlb_all();
287 return 0;
288}
289
290int page_fault_trace = 0;
291int exception_trace = 1;
292
293/*
294 * This routine handles page faults. It determines the address,
295 * and the problem, and then passes it off to one of the appropriate
296 * routines.
297 *
298 * error_code:
299 * bit 0 == 0 means no page found, 1 means protection fault
300 * bit 1 == 0 means read, 1 means write
301 * bit 2 == 0 means kernel, 1 means user-mode
302 * bit 3 == 1 means fault was an instruction fetch
303 */
Prasanna S Panchamukhi0f2fbdc2005-09-06 15:19:28 -0700304asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
305 unsigned long error_code)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306{
307 struct task_struct *tsk;
308 struct mm_struct *mm;
309 struct vm_area_struct * vma;
310 unsigned long address;
311 const struct exception_table_entry *fixup;
312 int write;
Jan Beulich12091402005-09-12 18:49:24 +0200313 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314 siginfo_t info;
315
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316 /* get the address */
317 __asm__("movq %%cr2,%0":"=r" (address));
318 if (notify_die(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
319 SIGSEGV) == NOTIFY_STOP)
320 return;
321
322 if (likely(regs->eflags & X86_EFLAGS_IF))
323 local_irq_enable();
324
325 if (unlikely(page_fault_trace))
326 printk("pagefault rip:%lx rsp:%lx cs:%lu ss:%lu address %lx error %lx\n",
327 regs->rip,regs->rsp,regs->cs,regs->ss,address,error_code);
328
329 tsk = current;
330 mm = tsk->mm;
331 info.si_code = SEGV_MAPERR;
332
333
334 /*
335 * We fault-in kernel-space virtual memory on-demand. The
336 * 'reference' page table is init_mm.pgd.
337 *
338 * NOTE! We MUST NOT take any locks for this case. We may
339 * be in an interrupt or a critical region, and should
340 * only copy the information from the master page table,
341 * nothing more.
342 *
343 * This verifies that the fault happens in kernel space
344 * (error_code & 4) == 0, and that the fault was not a
345 * protection error (error_code & 1) == 0.
346 */
Suresh Siddha84929802005-06-21 17:14:32 -0700347 if (unlikely(address >= TASK_SIZE64)) {
Andi Kleen3b9ba4d2005-05-16 21:53:31 -0700348 if (!(error_code & 5) &&
349 ((address >= VMALLOC_START && address < VMALLOC_END) ||
350 (address >= MODULES_VADDR && address < MODULES_END))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351 if (vmalloc_fault(address) < 0)
352 goto bad_area_nosemaphore;
353 return;
354 }
355 /*
356 * Don't take the mm semaphore here. If we fixup a prefetch
357 * fault we could otherwise deadlock.
358 */
359 goto bad_area_nosemaphore;
360 }
361
362 if (unlikely(error_code & (1 << 3)))
363 pgtable_bad(address, regs, error_code);
364
365 /*
366 * If we're in an interrupt or have no user
367 * context, we must not take the fault..
368 */
369 if (unlikely(in_atomic() || !mm))
370 goto bad_area_nosemaphore;
371
372 again:
373 /* When running in the kernel we expect faults to occur only to
374 * addresses in user space. All other faults represent errors in the
375 * kernel and should generate an OOPS. Unfortunatly, in the case of an
376 * erroneous fault occuring in a code path which already holds mmap_sem
377 * we will deadlock attempting to validate the fault against the
378 * address space. Luckily the kernel only validly references user
379 * space from well defined areas of code, which are listed in the
380 * exceptions table.
381 *
382 * As the vast majority of faults will be valid we will only perform
383 * the source reference check when there is a possibilty of a deadlock.
384 * Attempt to lock the address space, if we cannot we then validate the
385 * source. If this is invalid we can skip the address space check,
386 * thus avoiding the deadlock.
387 */
388 if (!down_read_trylock(&mm->mmap_sem)) {
389 if ((error_code & 4) == 0 &&
390 !search_exception_tables(regs->rip))
391 goto bad_area_nosemaphore;
392 down_read(&mm->mmap_sem);
393 }
394
395 vma = find_vma(mm, address);
396 if (!vma)
397 goto bad_area;
398 if (likely(vma->vm_start <= address))
399 goto good_area;
400 if (!(vma->vm_flags & VM_GROWSDOWN))
401 goto bad_area;
402 if (error_code & 4) {
403 // XXX: align red zone size with ABI
404 if (address + 128 < regs->rsp)
405 goto bad_area;
406 }
407 if (expand_stack(vma, address))
408 goto bad_area;
409/*
410 * Ok, we have a good vm_area for this memory access, so
411 * we can handle it..
412 */
413good_area:
414 info.si_code = SEGV_ACCERR;
415 write = 0;
416 switch (error_code & 3) {
417 default: /* 3: write, present */
418 /* fall through */
419 case 2: /* write, not present */
420 if (!(vma->vm_flags & VM_WRITE))
421 goto bad_area;
422 write++;
423 break;
424 case 1: /* read, present */
425 goto bad_area;
426 case 0: /* read, not present */
427 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
428 goto bad_area;
429 }
430
431 /*
432 * If for any reason at all we couldn't handle the fault,
433 * make sure we exit gracefully rather than endlessly redo
434 * the fault.
435 */
436 switch (handle_mm_fault(mm, vma, address, write)) {
Alexander Nyberg96800212005-08-04 16:14:57 +0200437 case VM_FAULT_MINOR:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438 tsk->min_flt++;
439 break;
Alexander Nyberg96800212005-08-04 16:14:57 +0200440 case VM_FAULT_MAJOR:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441 tsk->maj_flt++;
442 break;
Alexander Nyberg96800212005-08-04 16:14:57 +0200443 case VM_FAULT_SIGBUS:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444 goto do_sigbus;
445 default:
446 goto out_of_memory;
447 }
448
449 up_read(&mm->mmap_sem);
450 return;
451
452/*
453 * Something tried to access memory that isn't in our memory map..
454 * Fix it, but check if it's kernel or user first..
455 */
456bad_area:
457 up_read(&mm->mmap_sem);
458
459bad_area_nosemaphore:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460 /* User mode accesses just cause a SIGSEGV */
461 if (error_code & 4) {
462 if (is_prefetch(regs, address, error_code))
463 return;
464
465 /* Work around K8 erratum #100 K8 in compat mode
466 occasionally jumps to illegal addresses >4GB. We
467 catch this here in the page fault handler because
468 these addresses are not reachable. Just detect this
469 case and return. Any code segment in LDT is
470 compatibility mode. */
471 if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) &&
472 (address >> 32))
473 return;
474
475 if (exception_trace && unhandled_signal(tsk, SIGSEGV)) {
476 printk(
477 "%s%s[%d]: segfault at %016lx rip %016lx rsp %016lx error %lx\n",
478 tsk->pid > 1 ? KERN_INFO : KERN_EMERG,
479 tsk->comm, tsk->pid, address, regs->rip,
480 regs->rsp, error_code);
481 }
482
483 tsk->thread.cr2 = address;
484 /* Kernel addresses are always protection faults */
485 tsk->thread.error_code = error_code | (address >= TASK_SIZE);
486 tsk->thread.trap_no = 14;
487 info.si_signo = SIGSEGV;
488 info.si_errno = 0;
489 /* info.si_code has been set above */
490 info.si_addr = (void __user *)address;
491 force_sig_info(SIGSEGV, &info, tsk);
492 return;
493 }
494
495no_context:
496
497 /* Are we prepared to handle this kernel fault? */
498 fixup = search_exception_tables(regs->rip);
499 if (fixup) {
500 regs->rip = fixup->fixup;
501 return;
502 }
503
504 /*
505 * Hall of shame of CPU/BIOS bugs.
506 */
507
508 if (is_prefetch(regs, address, error_code))
509 return;
510
511 if (is_errata93(regs, address))
512 return;
513
514/*
515 * Oops. The kernel tried to access some bad page. We'll have to
516 * terminate things with extreme prejudice.
517 */
518
Jan Beulich12091402005-09-12 18:49:24 +0200519 flags = oops_begin();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520
521 if (address < PAGE_SIZE)
522 printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
523 else
524 printk(KERN_ALERT "Unable to handle kernel paging request");
525 printk(" at %016lx RIP: \n" KERN_ALERT,address);
526 printk_address(regs->rip);
527 printk("\n");
528 dump_pagetable(address);
Jan Beulich6e3f3612006-01-11 22:42:14 +0100529 tsk->thread.cr2 = address;
530 tsk->thread.trap_no = 14;
531 tsk->thread.error_code = error_code;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532 __die("Oops", regs, error_code);
533 /* Executive summary in case the body of the oops scrolled away */
534 printk(KERN_EMERG "CR2: %016lx\n", address);
Jan Beulich12091402005-09-12 18:49:24 +0200535 oops_end(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536 do_exit(SIGKILL);
537
538/*
539 * We ran out of memory, or some other thing happened to us that made
540 * us unable to handle the page fault gracefully.
541 */
542out_of_memory:
543 up_read(&mm->mmap_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544 if (current->pid == 1) {
545 yield();
546 goto again;
547 }
548 printk("VM: killing process %s\n", tsk->comm);
549 if (error_code & 4)
550 do_exit(SIGKILL);
551 goto no_context;
552
553do_sigbus:
554 up_read(&mm->mmap_sem);
555
556 /* Kernel mode? Handle exceptions or die */
557 if (!(error_code & 4))
558 goto no_context;
559
560 tsk->thread.cr2 = address;
561 tsk->thread.error_code = error_code;
562 tsk->thread.trap_no = 14;
563 info.si_signo = SIGBUS;
564 info.si_errno = 0;
565 info.si_code = BUS_ADRERR;
566 info.si_addr = (void __user *)address;
567 force_sig_info(SIGBUS, &info, tsk);
568 return;
569}
Andi Kleen9e43e1b2005-11-05 17:25:54 +0100570
571static int __init enable_pagefaulttrace(char *str)
572{
573 page_fault_trace = 1;
574 return 0;
575}
576__setup("pagefaulttrace", enable_pagefaulttrace);