| /* | 
 |  *  Copyright (C) 1995  Linus Torvalds | 
 |  *  Copyright (C) 2001, 2002 Andi Kleen, SuSE Labs. | 
 |  *  Copyright (C) 2008-2009, Red Hat Inc., Ingo Molnar | 
 |  */ | 
 | #include <linux/magic.h>		/* STACK_END_MAGIC		*/ | 
 | #include <linux/sched.h>		/* test_thread_flag(), ...	*/ | 
 | #include <linux/kdebug.h>		/* oops_begin/end, ...		*/ | 
 | #include <linux/module.h>		/* search_exception_table	*/ | 
 | #include <linux/bootmem.h>		/* max_low_pfn			*/ | 
 | #include <linux/kprobes.h>		/* __kprobes, ...		*/ | 
 | #include <linux/mmiotrace.h>		/* kmmio_handler, ...		*/ | 
 | #include <linux/perf_event.h>		/* perf_sw_event		*/ | 
 | #include <linux/hugetlb.h>		/* hstate_index_to_shift	*/ | 
 |  | 
 | #include <asm/traps.h>			/* dotraplinkage, ...		*/ | 
 | #include <asm/pgalloc.h>		/* pgd_*(), ...			*/ | 
 | #include <asm/kmemcheck.h>		/* kmemcheck_*(), ...		*/ | 
 |  | 
 | /* | 
 |  * Page fault error code bits: | 
 |  * | 
 |  *   bit 0 ==	 0: no page found	1: protection fault | 
 |  *   bit 1 ==	 0: read access		1: write access | 
 |  *   bit 2 ==	 0: kernel-mode access	1: user-mode access | 
 |  *   bit 3 ==				1: use of reserved bit detected | 
 |  *   bit 4 ==				1: fault was an instruction fetch | 
 |  */ | 
 | enum x86_pf_error_code { | 
 |  | 
 | 	PF_PROT		=		1 << 0, | 
 | 	PF_WRITE	=		1 << 1, | 
 | 	PF_USER		=		1 << 2, | 
 | 	PF_RSVD		=		1 << 3, | 
 | 	PF_INSTR	=		1 << 4, | 
 | }; | 
 |  | 
 | /* | 
 |  * Returns 0 if mmiotrace is disabled, or if the fault is not | 
 |  * handled by mmiotrace: | 
 |  */ | 
 | static inline int __kprobes | 
 | kmmio_fault(struct pt_regs *regs, unsigned long addr) | 
 | { | 
 | 	if (unlikely(is_kmmio_active())) | 
 | 		if (kmmio_handler(regs, addr) == 1) | 
 | 			return -1; | 
 | 	return 0; | 
 | } | 
 |  | 
 | static inline int __kprobes notify_page_fault(struct pt_regs *regs) | 
 | { | 
 | 	int ret = 0; | 
 |  | 
 | 	/* kprobe_running() needs smp_processor_id() */ | 
 | 	if (kprobes_built_in() && !user_mode_vm(regs)) { | 
 | 		preempt_disable(); | 
 | 		if (kprobe_running() && kprobe_fault_handler(regs, 14)) | 
 | 			ret = 1; | 
 | 		preempt_enable(); | 
 | 	} | 
 |  | 
 | 	return ret; | 
 | } | 
 |  | 
 | /* | 
 |  * Prefetch quirks: | 
 |  * | 
 |  * 32-bit mode: | 
 |  * | 
 |  *   Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch. | 
 |  *   Check that here and ignore it. | 
 |  * | 
 |  * 64-bit mode: | 
 |  * | 
 |  *   Sometimes the CPU reports invalid exceptions on prefetch. | 
 |  *   Check that here and ignore it. | 
 |  * | 
 |  * Opcode checker based on code by Richard Brunner. | 
 |  */ | 
 | static inline int | 
 | check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr, | 
 | 		      unsigned char opcode, int *prefetch) | 
 | { | 
 | 	unsigned char instr_hi = opcode & 0xf0; | 
 | 	unsigned char instr_lo = opcode & 0x0f; | 
 |  | 
 | 	switch (instr_hi) { | 
 | 	case 0x20: | 
 | 	case 0x30: | 
 | 		/* | 
 | 		 * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes. | 
 | 		 * In X86_64 long mode, the CPU will signal invalid | 
 | 		 * opcode if some of these prefixes are present so | 
 | 		 * X86_64 will never get here anyway | 
 | 		 */ | 
 | 		return ((instr_lo & 7) == 0x6); | 
 | #ifdef CONFIG_X86_64 | 
 | 	case 0x40: | 
 | 		/* | 
 | 		 * In AMD64 long mode 0x40..0x4F are valid REX prefixes | 
 | 		 * Need to figure out under what instruction mode the | 
 | 		 * instruction was issued. Could check the LDT for lm, | 
 | 		 * but for now it's good enough to assume that long | 
 | 		 * mode only uses well known segments or kernel. | 
 | 		 */ | 
 | 		return (!user_mode(regs)) || (regs->cs == __USER_CS); | 
 | #endif | 
 | 	case 0x60: | 
 | 		/* 0x64 thru 0x67 are valid prefixes in all modes. */ | 
 | 		return (instr_lo & 0xC) == 0x4; | 
 | 	case 0xF0: | 
 | 		/* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */ | 
 | 		return !instr_lo || (instr_lo>>1) == 1; | 
 | 	case 0x00: | 
 | 		/* Prefetch instruction is 0x0F0D or 0x0F18 */ | 
 | 		if (probe_kernel_address(instr, opcode)) | 
 | 			return 0; | 
 |  | 
 | 		*prefetch = (instr_lo == 0xF) && | 
 | 			(opcode == 0x0D || opcode == 0x18); | 
 | 		return 0; | 
 | 	default: | 
 | 		return 0; | 
 | 	} | 
 | } | 
 |  | 
 | static int | 
 | is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr) | 
 | { | 
 | 	unsigned char *max_instr; | 
 | 	unsigned char *instr; | 
 | 	int prefetch = 0; | 
 |  | 
 | 	/* | 
 | 	 * If it was a exec (instruction fetch) fault on NX page, then | 
 | 	 * do not ignore the fault: | 
 | 	 */ | 
 | 	if (error_code & PF_INSTR) | 
 | 		return 0; | 
 |  | 
 | 	instr = (void *)convert_ip_to_linear(current, regs); | 
 | 	max_instr = instr + 15; | 
 |  | 
 | 	if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE) | 
 | 		return 0; | 
 |  | 
 | 	while (instr < max_instr) { | 
 | 		unsigned char opcode; | 
 |  | 
 | 		if (probe_kernel_address(instr, opcode)) | 
 | 			break; | 
 |  | 
 | 		instr++; | 
 |  | 
 | 		if (!check_prefetch_opcode(regs, instr, opcode, &prefetch)) | 
 | 			break; | 
 | 	} | 
 | 	return prefetch; | 
 | } | 
 |  | 
 | static void | 
 | force_sig_info_fault(int si_signo, int si_code, unsigned long address, | 
 | 		     struct task_struct *tsk, int fault) | 
 | { | 
 | 	unsigned lsb = 0; | 
 | 	siginfo_t info; | 
 |  | 
 | 	info.si_signo	= si_signo; | 
 | 	info.si_errno	= 0; | 
 | 	info.si_code	= si_code; | 
 | 	info.si_addr	= (void __user *)address; | 
 | 	if (fault & VM_FAULT_HWPOISON_LARGE) | 
 | 		lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault));  | 
 | 	if (fault & VM_FAULT_HWPOISON) | 
 | 		lsb = PAGE_SHIFT; | 
 | 	info.si_addr_lsb = lsb; | 
 |  | 
 | 	force_sig_info(si_signo, &info, tsk); | 
 | } | 
 |  | 
 | DEFINE_SPINLOCK(pgd_lock); | 
 | LIST_HEAD(pgd_list); | 
 |  | 
 | #ifdef CONFIG_X86_32 | 
 | static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address) | 
 | { | 
 | 	unsigned index = pgd_index(address); | 
 | 	pgd_t *pgd_k; | 
 | 	pud_t *pud, *pud_k; | 
 | 	pmd_t *pmd, *pmd_k; | 
 |  | 
 | 	pgd += index; | 
 | 	pgd_k = init_mm.pgd + index; | 
 |  | 
 | 	if (!pgd_present(*pgd_k)) | 
 | 		return NULL; | 
 |  | 
 | 	/* | 
 | 	 * set_pgd(pgd, *pgd_k); here would be useless on PAE | 
 | 	 * and redundant with the set_pmd() on non-PAE. As would | 
 | 	 * set_pud. | 
 | 	 */ | 
 | 	pud = pud_offset(pgd, address); | 
 | 	pud_k = pud_offset(pgd_k, address); | 
 | 	if (!pud_present(*pud_k)) | 
 | 		return NULL; | 
 |  | 
 | 	pmd = pmd_offset(pud, address); | 
 | 	pmd_k = pmd_offset(pud_k, address); | 
 | 	if (!pmd_present(*pmd_k)) | 
 | 		return NULL; | 
 |  | 
 | 	if (!pmd_present(*pmd)) | 
 | 		set_pmd(pmd, *pmd_k); | 
 | 	else | 
 | 		BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k)); | 
 |  | 
 | 	return pmd_k; | 
 | } | 
 |  | 
 | void vmalloc_sync_all(void) | 
 | { | 
 | 	unsigned long address; | 
 |  | 
 | 	if (SHARED_KERNEL_PMD) | 
 | 		return; | 
 |  | 
 | 	for (address = VMALLOC_START & PMD_MASK; | 
 | 	     address >= TASK_SIZE && address < FIXADDR_TOP; | 
 | 	     address += PMD_SIZE) { | 
 |  | 
 | 		unsigned long flags; | 
 | 		struct page *page; | 
 |  | 
 | 		spin_lock_irqsave(&pgd_lock, flags); | 
 | 		list_for_each_entry(page, &pgd_list, lru) { | 
 | 			spinlock_t *pgt_lock; | 
 | 			pmd_t *ret; | 
 |  | 
 | 			pgt_lock = &pgd_page_get_mm(page)->page_table_lock; | 
 |  | 
 | 			spin_lock(pgt_lock); | 
 | 			ret = vmalloc_sync_one(page_address(page), address); | 
 | 			spin_unlock(pgt_lock); | 
 |  | 
 | 			if (!ret) | 
 | 				break; | 
 | 		} | 
 | 		spin_unlock_irqrestore(&pgd_lock, flags); | 
 | 	} | 
 | } | 
 |  | 
 | /* | 
 |  * 32-bit: | 
 |  * | 
 |  *   Handle a fault on the vmalloc or module mapping area | 
 |  */ | 
 | static noinline __kprobes int vmalloc_fault(unsigned long address) | 
 | { | 
 | 	unsigned long pgd_paddr; | 
 | 	pmd_t *pmd_k; | 
 | 	pte_t *pte_k; | 
 |  | 
 | 	/* Make sure we are in vmalloc area: */ | 
 | 	if (!(address >= VMALLOC_START && address < VMALLOC_END)) | 
 | 		return -1; | 
 |  | 
 | 	WARN_ON_ONCE(in_nmi()); | 
 |  | 
 | 	/* | 
 | 	 * Synchronize this task's top level page-table | 
 | 	 * with the 'reference' page table. | 
 | 	 * | 
 | 	 * Do _not_ use "current" here. We might be inside | 
 | 	 * an interrupt in the middle of a task switch.. | 
 | 	 */ | 
 | 	pgd_paddr = read_cr3(); | 
 | 	pmd_k = vmalloc_sync_one(__va(pgd_paddr), address); | 
 | 	if (!pmd_k) | 
 | 		return -1; | 
 |  | 
 | 	pte_k = pte_offset_kernel(pmd_k, address); | 
 | 	if (!pte_present(*pte_k)) | 
 | 		return -1; | 
 |  | 
 | 	return 0; | 
 | } | 
 |  | 
 | /* | 
 |  * Did it hit the DOS screen memory VA from vm86 mode? | 
 |  */ | 
 | static inline void | 
 | check_v8086_mode(struct pt_regs *regs, unsigned long address, | 
 | 		 struct task_struct *tsk) | 
 | { | 
 | 	unsigned long bit; | 
 |  | 
 | 	if (!v8086_mode(regs)) | 
 | 		return; | 
 |  | 
 | 	bit = (address - 0xA0000) >> PAGE_SHIFT; | 
 | 	if (bit < 32) | 
 | 		tsk->thread.screen_bitmap |= 1 << bit; | 
 | } | 
 |  | 
 | static bool low_pfn(unsigned long pfn) | 
 | { | 
 | 	return pfn < max_low_pfn; | 
 | } | 
 |  | 
 | static void dump_pagetable(unsigned long address) | 
 | { | 
 | 	pgd_t *base = __va(read_cr3()); | 
 | 	pgd_t *pgd = &base[pgd_index(address)]; | 
 | 	pmd_t *pmd; | 
 | 	pte_t *pte; | 
 |  | 
 | #ifdef CONFIG_X86_PAE | 
 | 	printk("*pdpt = %016Lx ", pgd_val(*pgd)); | 
 | 	if (!low_pfn(pgd_val(*pgd) >> PAGE_SHIFT) || !pgd_present(*pgd)) | 
 | 		goto out; | 
 | #endif | 
 | 	pmd = pmd_offset(pud_offset(pgd, address), address); | 
 | 	printk(KERN_CONT "*pde = %0*Lx ", sizeof(*pmd) * 2, (u64)pmd_val(*pmd)); | 
 |  | 
 | 	/* | 
 | 	 * We must not directly access the pte in the highpte | 
 | 	 * case if the page table is located in highmem. | 
 | 	 * And let's rather not kmap-atomic the pte, just in case | 
 | 	 * it's allocated already: | 
 | 	 */ | 
 | 	if (!low_pfn(pmd_pfn(*pmd)) || !pmd_present(*pmd) || pmd_large(*pmd)) | 
 | 		goto out; | 
 |  | 
 | 	pte = pte_offset_kernel(pmd, address); | 
 | 	printk("*pte = %0*Lx ", sizeof(*pte) * 2, (u64)pte_val(*pte)); | 
 | out: | 
 | 	printk("\n"); | 
 | } | 
 |  | 
 | #else /* CONFIG_X86_64: */ | 
 |  | 
 | void vmalloc_sync_all(void) | 
 | { | 
 | 	sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END); | 
 | } | 
 |  | 
 | /* | 
 |  * 64-bit: | 
 |  * | 
 |  *   Handle a fault on the vmalloc area | 
 |  * | 
 |  * This assumes no large pages in there. | 
 |  */ | 
 | static noinline __kprobes int vmalloc_fault(unsigned long address) | 
 | { | 
 | 	pgd_t *pgd, *pgd_ref; | 
 | 	pud_t *pud, *pud_ref; | 
 | 	pmd_t *pmd, *pmd_ref; | 
 | 	pte_t *pte, *pte_ref; | 
 |  | 
 | 	/* Make sure we are in vmalloc area: */ | 
 | 	if (!(address >= VMALLOC_START && address < VMALLOC_END)) | 
 | 		return -1; | 
 |  | 
 | 	WARN_ON_ONCE(in_nmi()); | 
 |  | 
 | 	/* | 
 | 	 * Copy kernel mappings over when needed. This can also | 
 | 	 * happen within a race in page table update. In the later | 
 | 	 * case just flush: | 
 | 	 */ | 
 | 	pgd = pgd_offset(current->active_mm, address); | 
 | 	pgd_ref = pgd_offset_k(address); | 
 | 	if (pgd_none(*pgd_ref)) | 
 | 		return -1; | 
 |  | 
 | 	if (pgd_none(*pgd)) | 
 | 		set_pgd(pgd, *pgd_ref); | 
 | 	else | 
 | 		BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); | 
 |  | 
 | 	/* | 
 | 	 * Below here mismatches are bugs because these lower tables | 
 | 	 * are shared: | 
 | 	 */ | 
 |  | 
 | 	pud = pud_offset(pgd, address); | 
 | 	pud_ref = pud_offset(pgd_ref, address); | 
 | 	if (pud_none(*pud_ref)) | 
 | 		return -1; | 
 |  | 
 | 	if (pud_none(*pud) || pud_page_vaddr(*pud) != pud_page_vaddr(*pud_ref)) | 
 | 		BUG(); | 
 |  | 
 | 	pmd = pmd_offset(pud, address); | 
 | 	pmd_ref = pmd_offset(pud_ref, address); | 
 | 	if (pmd_none(*pmd_ref)) | 
 | 		return -1; | 
 |  | 
 | 	if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref)) | 
 | 		BUG(); | 
 |  | 
 | 	pte_ref = pte_offset_kernel(pmd_ref, address); | 
 | 	if (!pte_present(*pte_ref)) | 
 | 		return -1; | 
 |  | 
 | 	pte = pte_offset_kernel(pmd, address); | 
 |  | 
 | 	/* | 
 | 	 * Don't use pte_page here, because the mappings can point | 
 | 	 * outside mem_map, and the NUMA hash lookup cannot handle | 
 | 	 * that: | 
 | 	 */ | 
 | 	if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref)) | 
 | 		BUG(); | 
 |  | 
 | 	return 0; | 
 | } | 
 |  | 
 | static const char errata93_warning[] = | 
 | KERN_ERR  | 
 | "******* Your BIOS seems to not contain a fix for K8 errata #93\n" | 
 | "******* Working around it, but it may cause SEGVs or burn power.\n" | 
 | "******* Please consider a BIOS update.\n" | 
 | "******* Disabling USB legacy in the BIOS may also help.\n"; | 
 |  | 
 | /* | 
 |  * No vm86 mode in 64-bit mode: | 
 |  */ | 
 | static inline void | 
 | check_v8086_mode(struct pt_regs *regs, unsigned long address, | 
 | 		 struct task_struct *tsk) | 
 | { | 
 | } | 
 |  | 
 | static int bad_address(void *p) | 
 | { | 
 | 	unsigned long dummy; | 
 |  | 
 | 	return probe_kernel_address((unsigned long *)p, dummy); | 
 | } | 
 |  | 
 | static void dump_pagetable(unsigned long address) | 
 | { | 
 | 	pgd_t *base = __va(read_cr3() & PHYSICAL_PAGE_MASK); | 
 | 	pgd_t *pgd = base + pgd_index(address); | 
 | 	pud_t *pud; | 
 | 	pmd_t *pmd; | 
 | 	pte_t *pte; | 
 |  | 
 | 	if (bad_address(pgd)) | 
 | 		goto bad; | 
 |  | 
 | 	printk("PGD %lx ", pgd_val(*pgd)); | 
 |  | 
 | 	if (!pgd_present(*pgd)) | 
 | 		goto out; | 
 |  | 
 | 	pud = pud_offset(pgd, address); | 
 | 	if (bad_address(pud)) | 
 | 		goto bad; | 
 |  | 
 | 	printk("PUD %lx ", pud_val(*pud)); | 
 | 	if (!pud_present(*pud) || pud_large(*pud)) | 
 | 		goto out; | 
 |  | 
 | 	pmd = pmd_offset(pud, address); | 
 | 	if (bad_address(pmd)) | 
 | 		goto bad; | 
 |  | 
 | 	printk("PMD %lx ", pmd_val(*pmd)); | 
 | 	if (!pmd_present(*pmd) || pmd_large(*pmd)) | 
 | 		goto out; | 
 |  | 
 | 	pte = pte_offset_kernel(pmd, address); | 
 | 	if (bad_address(pte)) | 
 | 		goto bad; | 
 |  | 
 | 	printk("PTE %lx", pte_val(*pte)); | 
 | out: | 
 | 	printk("\n"); | 
 | 	return; | 
 | bad: | 
 | 	printk("BAD\n"); | 
 | } | 
 |  | 
 | #endif /* CONFIG_X86_64 */ | 
 |  | 
 | /* | 
 |  * Workaround for K8 erratum #93 & buggy BIOS. | 
 |  * | 
 |  * BIOS SMM functions are required to use a specific workaround | 
 |  * to avoid corruption of the 64bit RIP register on C stepping K8. | 
 |  * | 
 |  * A lot of BIOS that didn't get tested properly miss this. | 
 |  * | 
 |  * The OS sees this as a page fault with the upper 32bits of RIP cleared. | 
 |  * Try to work around it here. | 
 |  * | 
 |  * Note we only handle faults in kernel here. | 
 |  * Does nothing on 32-bit. | 
 |  */ | 
 | static int is_errata93(struct pt_regs *regs, unsigned long address) | 
 | { | 
 | #ifdef CONFIG_X86_64 | 
 | 	if (address != regs->ip) | 
 | 		return 0; | 
 |  | 
 | 	if ((address >> 32) != 0) | 
 | 		return 0; | 
 |  | 
 | 	address |= 0xffffffffUL << 32; | 
 | 	if ((address >= (u64)_stext && address <= (u64)_etext) || | 
 | 	    (address >= MODULES_VADDR && address <= MODULES_END)) { | 
 | 		printk_once(errata93_warning); | 
 | 		regs->ip = address; | 
 | 		return 1; | 
 | 	} | 
 | #endif | 
 | 	return 0; | 
 | } | 
 |  | 
 | /* | 
 |  * Work around K8 erratum #100 K8 in compat mode occasionally jumps | 
 |  * to illegal addresses >4GB. | 
 |  * | 
 |  * We catch this in the page fault handler because these addresses | 
 |  * are not reachable. Just detect this case and return.  Any code | 
 |  * segment in LDT is compatibility mode. | 
 |  */ | 
 | static int is_errata100(struct pt_regs *regs, unsigned long address) | 
 | { | 
 | #ifdef CONFIG_X86_64 | 
 | 	if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32)) | 
 | 		return 1; | 
 | #endif | 
 | 	return 0; | 
 | } | 
 |  | 
 | static int is_f00f_bug(struct pt_regs *regs, unsigned long address) | 
 | { | 
 | #ifdef CONFIG_X86_F00F_BUG | 
 | 	unsigned long nr; | 
 |  | 
 | 	/* | 
 | 	 * Pentium F0 0F C7 C8 bug workaround: | 
 | 	 */ | 
 | 	if (boot_cpu_data.f00f_bug) { | 
 | 		nr = (address - idt_descr.address) >> 3; | 
 |  | 
 | 		if (nr == 6) { | 
 | 			do_invalid_op(regs, 0); | 
 | 			return 1; | 
 | 		} | 
 | 	} | 
 | #endif | 
 | 	return 0; | 
 | } | 
 |  | 
 | static const char nx_warning[] = KERN_CRIT | 
 | "kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n"; | 
 |  | 
 | static void | 
 | show_fault_oops(struct pt_regs *regs, unsigned long error_code, | 
 | 		unsigned long address) | 
 | { | 
 | 	if (!oops_may_print()) | 
 | 		return; | 
 |  | 
 | 	if (error_code & PF_INSTR) { | 
 | 		unsigned int level; | 
 |  | 
 | 		pte_t *pte = lookup_address(address, &level); | 
 |  | 
 | 		if (pte && pte_present(*pte) && !pte_exec(*pte)) | 
 | 			printk(nx_warning, current_uid()); | 
 | 	} | 
 |  | 
 | 	printk(KERN_ALERT "BUG: unable to handle kernel "); | 
 | 	if (address < PAGE_SIZE) | 
 | 		printk(KERN_CONT "NULL pointer dereference"); | 
 | 	else | 
 | 		printk(KERN_CONT "paging request"); | 
 |  | 
 | 	printk(KERN_CONT " at %p\n", (void *) address); | 
 | 	printk(KERN_ALERT "IP:"); | 
 | 	printk_address(regs->ip, 1); | 
 |  | 
 | 	dump_pagetable(address); | 
 | } | 
 |  | 
 | static noinline void | 
 | pgtable_bad(struct pt_regs *regs, unsigned long error_code, | 
 | 	    unsigned long address) | 
 | { | 
 | 	struct task_struct *tsk; | 
 | 	unsigned long flags; | 
 | 	int sig; | 
 |  | 
 | 	flags = oops_begin(); | 
 | 	tsk = current; | 
 | 	sig = SIGKILL; | 
 |  | 
 | 	printk(KERN_ALERT "%s: Corrupted page table at address %lx\n", | 
 | 	       tsk->comm, address); | 
 | 	dump_pagetable(address); | 
 |  | 
 | 	tsk->thread.cr2		= address; | 
 | 	tsk->thread.trap_no	= 14; | 
 | 	tsk->thread.error_code	= error_code; | 
 |  | 
 | 	if (__die("Bad pagetable", regs, error_code)) | 
 | 		sig = 0; | 
 |  | 
 | 	oops_end(flags, regs, sig); | 
 | } | 
 |  | 
 | static noinline void | 
 | no_context(struct pt_regs *regs, unsigned long error_code, | 
 | 	   unsigned long address) | 
 | { | 
 | 	struct task_struct *tsk = current; | 
 | 	unsigned long *stackend; | 
 | 	unsigned long flags; | 
 | 	int sig; | 
 |  | 
 | 	/* Are we prepared to handle this kernel fault? */ | 
 | 	if (fixup_exception(regs)) | 
 | 		return; | 
 |  | 
 | 	/* | 
 | 	 * 32-bit: | 
 | 	 * | 
 | 	 *   Valid to do another page fault here, because if this fault | 
 | 	 *   had been triggered by is_prefetch fixup_exception would have | 
 | 	 *   handled it. | 
 | 	 * | 
 | 	 * 64-bit: | 
 | 	 * | 
 | 	 *   Hall of shame of CPU/BIOS bugs. | 
 | 	 */ | 
 | 	if (is_prefetch(regs, error_code, address)) | 
 | 		return; | 
 |  | 
 | 	if (is_errata93(regs, address)) | 
 | 		return; | 
 |  | 
 | 	/* | 
 | 	 * Oops. The kernel tried to access some bad page. We'll have to | 
 | 	 * terminate things with extreme prejudice: | 
 | 	 */ | 
 | 	flags = oops_begin(); | 
 |  | 
 | 	show_fault_oops(regs, error_code, address); | 
 |  | 
 | 	stackend = end_of_stack(tsk); | 
 | 	if (tsk != &init_task && *stackend != STACK_END_MAGIC) | 
 | 		printk(KERN_ALERT "Thread overran stack, or stack corrupted\n"); | 
 |  | 
 | 	tsk->thread.cr2		= address; | 
 | 	tsk->thread.trap_no	= 14; | 
 | 	tsk->thread.error_code	= error_code; | 
 |  | 
 | 	sig = SIGKILL; | 
 | 	if (__die("Oops", regs, error_code)) | 
 | 		sig = 0; | 
 |  | 
 | 	/* Executive summary in case the body of the oops scrolled away */ | 
 | 	printk(KERN_EMERG "CR2: %016lx\n", address); | 
 |  | 
 | 	oops_end(flags, regs, sig); | 
 | } | 
 |  | 
 | /* | 
 |  * Print out info about fatal segfaults, if the show_unhandled_signals | 
 |  * sysctl is set: | 
 |  */ | 
 | static inline void | 
 | show_signal_msg(struct pt_regs *regs, unsigned long error_code, | 
 | 		unsigned long address, struct task_struct *tsk) | 
 | { | 
 | 	if (!unhandled_signal(tsk, SIGSEGV)) | 
 | 		return; | 
 |  | 
 | 	if (!printk_ratelimit()) | 
 | 		return; | 
 |  | 
 | 	printk("%s%s[%d]: segfault at %lx ip %p sp %p error %lx", | 
 | 		task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG, | 
 | 		tsk->comm, task_pid_nr(tsk), address, | 
 | 		(void *)regs->ip, (void *)regs->sp, error_code); | 
 |  | 
 | 	print_vma_addr(KERN_CONT " in ", regs->ip); | 
 |  | 
 | 	printk(KERN_CONT "\n"); | 
 | } | 
 |  | 
 | static void | 
 | __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, | 
 | 		       unsigned long address, int si_code) | 
 | { | 
 | 	struct task_struct *tsk = current; | 
 |  | 
 | 	/* User mode accesses just cause a SIGSEGV */ | 
 | 	if (error_code & PF_USER) { | 
 | 		/* | 
 | 		 * It's possible to have interrupts off here: | 
 | 		 */ | 
 | 		local_irq_enable(); | 
 |  | 
 | 		/* | 
 | 		 * Valid to do another page fault here because this one came | 
 | 		 * from user space: | 
 | 		 */ | 
 | 		if (is_prefetch(regs, error_code, address)) | 
 | 			return; | 
 |  | 
 | 		if (is_errata100(regs, address)) | 
 | 			return; | 
 |  | 
 | 		if (unlikely(show_unhandled_signals)) | 
 | 			show_signal_msg(regs, error_code, address, tsk); | 
 |  | 
 | 		/* Kernel addresses are always protection faults: */ | 
 | 		tsk->thread.cr2		= address; | 
 | 		tsk->thread.error_code	= error_code | (address >= TASK_SIZE); | 
 | 		tsk->thread.trap_no	= 14; | 
 |  | 
 | 		force_sig_info_fault(SIGSEGV, si_code, address, tsk, 0); | 
 |  | 
 | 		return; | 
 | 	} | 
 |  | 
 | 	if (is_f00f_bug(regs, address)) | 
 | 		return; | 
 |  | 
 | 	no_context(regs, error_code, address); | 
 | } | 
 |  | 
 | static noinline void | 
 | bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, | 
 | 		     unsigned long address) | 
 | { | 
 | 	__bad_area_nosemaphore(regs, error_code, address, SEGV_MAPERR); | 
 | } | 
 |  | 
 | static void | 
 | __bad_area(struct pt_regs *regs, unsigned long error_code, | 
 | 	   unsigned long address, int si_code) | 
 | { | 
 | 	struct mm_struct *mm = current->mm; | 
 |  | 
 | 	/* | 
 | 	 * Something tried to access memory that isn't in our memory map.. | 
 | 	 * Fix it, but check if it's kernel or user first.. | 
 | 	 */ | 
 | 	up_read(&mm->mmap_sem); | 
 |  | 
 | 	__bad_area_nosemaphore(regs, error_code, address, si_code); | 
 | } | 
 |  | 
 | static noinline void | 
 | bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address) | 
 | { | 
 | 	__bad_area(regs, error_code, address, SEGV_MAPERR); | 
 | } | 
 |  | 
 | static noinline void | 
 | bad_area_access_error(struct pt_regs *regs, unsigned long error_code, | 
 | 		      unsigned long address) | 
 | { | 
 | 	__bad_area(regs, error_code, address, SEGV_ACCERR); | 
 | } | 
 |  | 
 | /* TODO: fixup for "mm-invoke-oom-killer-from-page-fault.patch" */ | 
 | static void | 
 | out_of_memory(struct pt_regs *regs, unsigned long error_code, | 
 | 	      unsigned long address) | 
 | { | 
 | 	/* | 
 | 	 * We ran out of memory, call the OOM killer, and return the userspace | 
 | 	 * (which will retry the fault, or kill us if we got oom-killed): | 
 | 	 */ | 
 | 	up_read(¤t->mm->mmap_sem); | 
 |  | 
 | 	pagefault_out_of_memory(); | 
 | } | 
 |  | 
 | static void | 
 | do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address, | 
 | 	  unsigned int fault) | 
 | { | 
 | 	struct task_struct *tsk = current; | 
 | 	struct mm_struct *mm = tsk->mm; | 
 | 	int code = BUS_ADRERR; | 
 |  | 
 | 	up_read(&mm->mmap_sem); | 
 |  | 
 | 	/* Kernel mode? Handle exceptions or die: */ | 
 | 	if (!(error_code & PF_USER)) { | 
 | 		no_context(regs, error_code, address); | 
 | 		return; | 
 | 	} | 
 |  | 
 | 	/* User-space => ok to do another page fault: */ | 
 | 	if (is_prefetch(regs, error_code, address)) | 
 | 		return; | 
 |  | 
 | 	tsk->thread.cr2		= address; | 
 | 	tsk->thread.error_code	= error_code; | 
 | 	tsk->thread.trap_no	= 14; | 
 |  | 
 | #ifdef CONFIG_MEMORY_FAILURE | 
 | 	if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) { | 
 | 		printk(KERN_ERR | 
 | 	"MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n", | 
 | 			tsk->comm, tsk->pid, address); | 
 | 		code = BUS_MCEERR_AR; | 
 | 	} | 
 | #endif | 
 | 	force_sig_info_fault(SIGBUS, code, address, tsk, fault); | 
 | } | 
 |  | 
 | static noinline void | 
 | mm_fault_error(struct pt_regs *regs, unsigned long error_code, | 
 | 	       unsigned long address, unsigned int fault) | 
 | { | 
 | 	if (fault & VM_FAULT_OOM) { | 
 | 		out_of_memory(regs, error_code, address); | 
 | 	} else { | 
 | 		if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON| | 
 | 			     VM_FAULT_HWPOISON_LARGE)) | 
 | 			do_sigbus(regs, error_code, address, fault); | 
 | 		else | 
 | 			BUG(); | 
 | 	} | 
 | } | 
 |  | 
 | static int spurious_fault_check(unsigned long error_code, pte_t *pte) | 
 | { | 
 | 	if ((error_code & PF_WRITE) && !pte_write(*pte)) | 
 | 		return 0; | 
 |  | 
 | 	if ((error_code & PF_INSTR) && !pte_exec(*pte)) | 
 | 		return 0; | 
 |  | 
 | 	return 1; | 
 | } | 
 |  | 
 | /* | 
 |  * Handle a spurious fault caused by a stale TLB entry. | 
 |  * | 
 |  * This allows us to lazily refresh the TLB when increasing the | 
 |  * permissions of a kernel page (RO -> RW or NX -> X).  Doing it | 
 |  * eagerly is very expensive since that implies doing a full | 
 |  * cross-processor TLB flush, even if no stale TLB entries exist | 
 |  * on other processors. | 
 |  * | 
 |  * There are no security implications to leaving a stale TLB when | 
 |  * increasing the permissions on a page. | 
 |  */ | 
 | static noinline __kprobes int | 
 | spurious_fault(unsigned long error_code, unsigned long address) | 
 | { | 
 | 	pgd_t *pgd; | 
 | 	pud_t *pud; | 
 | 	pmd_t *pmd; | 
 | 	pte_t *pte; | 
 | 	int ret; | 
 |  | 
 | 	/* Reserved-bit violation or user access to kernel space? */ | 
 | 	if (error_code & (PF_USER | PF_RSVD)) | 
 | 		return 0; | 
 |  | 
 | 	pgd = init_mm.pgd + pgd_index(address); | 
 | 	if (!pgd_present(*pgd)) | 
 | 		return 0; | 
 |  | 
 | 	pud = pud_offset(pgd, address); | 
 | 	if (!pud_present(*pud)) | 
 | 		return 0; | 
 |  | 
 | 	if (pud_large(*pud)) | 
 | 		return spurious_fault_check(error_code, (pte_t *) pud); | 
 |  | 
 | 	pmd = pmd_offset(pud, address); | 
 | 	if (!pmd_present(*pmd)) | 
 | 		return 0; | 
 |  | 
 | 	if (pmd_large(*pmd)) | 
 | 		return spurious_fault_check(error_code, (pte_t *) pmd); | 
 |  | 
 | 	/* | 
 | 	 * Note: don't use pte_present() here, since it returns true | 
 | 	 * if the _PAGE_PROTNONE bit is set.  However, this aliases the | 
 | 	 * _PAGE_GLOBAL bit, which for kernel pages give false positives | 
 | 	 * when CONFIG_DEBUG_PAGEALLOC is used. | 
 | 	 */ | 
 | 	pte = pte_offset_kernel(pmd, address); | 
 | 	if (!(pte_flags(*pte) & _PAGE_PRESENT)) | 
 | 		return 0; | 
 |  | 
 | 	ret = spurious_fault_check(error_code, pte); | 
 | 	if (!ret) | 
 | 		return 0; | 
 |  | 
 | 	/* | 
 | 	 * Make sure we have permissions in PMD. | 
 | 	 * If not, then there's a bug in the page tables: | 
 | 	 */ | 
 | 	ret = spurious_fault_check(error_code, (pte_t *) pmd); | 
 | 	WARN_ONCE(!ret, "PMD has incorrect permission bits\n"); | 
 |  | 
 | 	return ret; | 
 | } | 
 |  | 
 | int show_unhandled_signals = 1; | 
 |  | 
 | static inline int | 
 | access_error(unsigned long error_code, struct vm_area_struct *vma) | 
 | { | 
 | 	if (error_code & PF_WRITE) { | 
 | 		/* write, present and write, not present: */ | 
 | 		if (unlikely(!(vma->vm_flags & VM_WRITE))) | 
 | 			return 1; | 
 | 		return 0; | 
 | 	} | 
 |  | 
 | 	/* read, present: */ | 
 | 	if (unlikely(error_code & PF_PROT)) | 
 | 		return 1; | 
 |  | 
 | 	/* read, not present: */ | 
 | 	if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))) | 
 | 		return 1; | 
 |  | 
 | 	return 0; | 
 | } | 
 |  | 
 | static int fault_in_kernel_space(unsigned long address) | 
 | { | 
 | 	return address >= TASK_SIZE_MAX; | 
 | } | 
 |  | 
 | /* | 
 |  * This routine handles page faults.  It determines the address, | 
 |  * and the problem, and then passes it off to one of the appropriate | 
 |  * routines. | 
 |  */ | 
 | dotraplinkage void __kprobes | 
 | do_page_fault(struct pt_regs *regs, unsigned long error_code) | 
 | { | 
 | 	struct vm_area_struct *vma; | 
 | 	struct task_struct *tsk; | 
 | 	unsigned long address; | 
 | 	struct mm_struct *mm; | 
 | 	int fault; | 
 | 	int write = error_code & PF_WRITE; | 
 | 	unsigned int flags = FAULT_FLAG_ALLOW_RETRY | | 
 | 					(write ? FAULT_FLAG_WRITE : 0); | 
 |  | 
 | 	tsk = current; | 
 | 	mm = tsk->mm; | 
 |  | 
 | 	/* Get the faulting address: */ | 
 | 	address = read_cr2(); | 
 |  | 
 | 	/* | 
 | 	 * Detect and handle instructions that would cause a page fault for | 
 | 	 * both a tracked kernel page and a userspace page. | 
 | 	 */ | 
 | 	if (kmemcheck_active(regs)) | 
 | 		kmemcheck_hide(regs); | 
 | 	prefetchw(&mm->mmap_sem); | 
 |  | 
 | 	if (unlikely(kmmio_fault(regs, address))) | 
 | 		return; | 
 |  | 
 | 	/* | 
 | 	 * We fault-in kernel-space virtual memory on-demand. The | 
 | 	 * 'reference' page table is init_mm.pgd. | 
 | 	 * | 
 | 	 * NOTE! We MUST NOT take any locks for this case. We may | 
 | 	 * be in an interrupt or a critical region, and should | 
 | 	 * only copy the information from the master page table, | 
 | 	 * nothing more. | 
 | 	 * | 
 | 	 * This verifies that the fault happens in kernel space | 
 | 	 * (error_code & 4) == 0, and that the fault was not a | 
 | 	 * protection error (error_code & 9) == 0. | 
 | 	 */ | 
 | 	if (unlikely(fault_in_kernel_space(address))) { | 
 | 		if (!(error_code & (PF_RSVD | PF_USER | PF_PROT))) { | 
 | 			if (vmalloc_fault(address) >= 0) | 
 | 				return; | 
 |  | 
 | 			if (kmemcheck_fault(regs, address, error_code)) | 
 | 				return; | 
 | 		} | 
 |  | 
 | 		/* Can handle a stale RO->RW TLB: */ | 
 | 		if (spurious_fault(error_code, address)) | 
 | 			return; | 
 |  | 
 | 		/* kprobes don't want to hook the spurious faults: */ | 
 | 		if (notify_page_fault(regs)) | 
 | 			return; | 
 | 		/* | 
 | 		 * Don't take the mm semaphore here. If we fixup a prefetch | 
 | 		 * fault we could otherwise deadlock: | 
 | 		 */ | 
 | 		bad_area_nosemaphore(regs, error_code, address); | 
 |  | 
 | 		return; | 
 | 	} | 
 |  | 
 | 	/* kprobes don't want to hook the spurious faults: */ | 
 | 	if (unlikely(notify_page_fault(regs))) | 
 | 		return; | 
 | 	/* | 
 | 	 * It's safe to allow irq's after cr2 has been saved and the | 
 | 	 * vmalloc fault has been handled. | 
 | 	 * | 
 | 	 * User-mode registers count as a user access even for any | 
 | 	 * potential system fault or CPU buglet: | 
 | 	 */ | 
 | 	if (user_mode_vm(regs)) { | 
 | 		local_irq_enable(); | 
 | 		error_code |= PF_USER; | 
 | 	} else { | 
 | 		if (regs->flags & X86_EFLAGS_IF) | 
 | 			local_irq_enable(); | 
 | 	} | 
 |  | 
 | 	if (unlikely(error_code & PF_RSVD)) | 
 | 		pgtable_bad(regs, error_code, address); | 
 |  | 
 | 	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); | 
 |  | 
 | 	/* | 
 | 	 * If we're in an interrupt, have no user context or are running | 
 | 	 * in an atomic region then we must not take the fault: | 
 | 	 */ | 
 | 	if (unlikely(in_atomic() || !mm)) { | 
 | 		bad_area_nosemaphore(regs, error_code, address); | 
 | 		return; | 
 | 	} | 
 |  | 
 | 	/* | 
 | 	 * When running in the kernel we expect faults to occur only to | 
 | 	 * addresses in user space.  All other faults represent errors in | 
 | 	 * the kernel and should generate an OOPS.  Unfortunately, in the | 
 | 	 * case of an erroneous fault occurring in a code path which already | 
 | 	 * holds mmap_sem we will deadlock attempting to validate the fault | 
 | 	 * against the address space.  Luckily the kernel only validly | 
 | 	 * references user space from well defined areas of code, which are | 
 | 	 * listed in the exceptions table. | 
 | 	 * | 
 | 	 * As the vast majority of faults will be valid we will only perform | 
 | 	 * the source reference check when there is a possibility of a | 
 | 	 * deadlock. Attempt to lock the address space, if we cannot we then | 
 | 	 * validate the source. If this is invalid we can skip the address | 
 | 	 * space check, thus avoiding the deadlock: | 
 | 	 */ | 
 | 	if (unlikely(!down_read_trylock(&mm->mmap_sem))) { | 
 | 		if ((error_code & PF_USER) == 0 && | 
 | 		    !search_exception_tables(regs->ip)) { | 
 | 			bad_area_nosemaphore(regs, error_code, address); | 
 | 			return; | 
 | 		} | 
 | retry: | 
 | 		down_read(&mm->mmap_sem); | 
 | 	} else { | 
 | 		/* | 
 | 		 * The above down_read_trylock() might have succeeded in | 
 | 		 * which case we'll have missed the might_sleep() from | 
 | 		 * down_read(): | 
 | 		 */ | 
 | 		might_sleep(); | 
 | 	} | 
 |  | 
 | 	vma = find_vma(mm, address); | 
 | 	if (unlikely(!vma)) { | 
 | 		bad_area(regs, error_code, address); | 
 | 		return; | 
 | 	} | 
 | 	if (likely(vma->vm_start <= address)) | 
 | 		goto good_area; | 
 | 	if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) { | 
 | 		bad_area(regs, error_code, address); | 
 | 		return; | 
 | 	} | 
 | 	if (error_code & PF_USER) { | 
 | 		/* | 
 | 		 * Accessing the stack below %sp is always a bug. | 
 | 		 * The large cushion allows instructions like enter | 
 | 		 * and pusha to work. ("enter $65535, $31" pushes | 
 | 		 * 32 pointers and then decrements %sp by 65535.) | 
 | 		 */ | 
 | 		if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) { | 
 | 			bad_area(regs, error_code, address); | 
 | 			return; | 
 | 		} | 
 | 	} | 
 | 	if (unlikely(expand_stack(vma, address))) { | 
 | 		bad_area(regs, error_code, address); | 
 | 		return; | 
 | 	} | 
 |  | 
 | 	/* | 
 | 	 * Ok, we have a good vm_area for this memory access, so | 
 | 	 * we can handle it.. | 
 | 	 */ | 
 | good_area: | 
 | 	if (unlikely(access_error(error_code, vma))) { | 
 | 		bad_area_access_error(regs, error_code, address); | 
 | 		return; | 
 | 	} | 
 |  | 
 | 	/* | 
 | 	 * If for any reason at all we couldn't handle the fault, | 
 | 	 * make sure we exit gracefully rather than endlessly redo | 
 | 	 * the fault: | 
 | 	 */ | 
 | 	fault = handle_mm_fault(mm, vma, address, flags); | 
 |  | 
 | 	if (unlikely(fault & VM_FAULT_ERROR)) { | 
 | 		mm_fault_error(regs, error_code, address, fault); | 
 | 		return; | 
 | 	} | 
 |  | 
 | 	/* | 
 | 	 * Major/minor page fault accounting is only done on the | 
 | 	 * initial attempt. If we go through a retry, it is extremely | 
 | 	 * likely that the page will be found in page cache at that point. | 
 | 	 */ | 
 | 	if (flags & FAULT_FLAG_ALLOW_RETRY) { | 
 | 		if (fault & VM_FAULT_MAJOR) { | 
 | 			tsk->maj_flt++; | 
 | 			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, | 
 | 				      regs, address); | 
 | 		} else { | 
 | 			tsk->min_flt++; | 
 | 			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, | 
 | 				      regs, address); | 
 | 		} | 
 | 		if (fault & VM_FAULT_RETRY) { | 
 | 			/* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk | 
 | 			 * of starvation. */ | 
 | 			flags &= ~FAULT_FLAG_ALLOW_RETRY; | 
 | 			goto retry; | 
 | 		} | 
 | 	} | 
 |  | 
 | 	check_v8086_mode(regs, address, tsk); | 
 |  | 
 | 	up_read(&mm->mmap_sem); | 
 | } |