Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | * Copyright (C) 1995 Linus Torvalds |
| 3 | */ |
| 4 | |
| 5 | #include <linux/signal.h> |
| 6 | #include <linux/sched.h> |
| 7 | #include <linux/kernel.h> |
| 8 | #include <linux/errno.h> |
| 9 | #include <linux/string.h> |
| 10 | #include <linux/types.h> |
| 11 | #include <linux/ptrace.h> |
| 12 | #include <linux/mman.h> |
| 13 | #include <linux/mm.h> |
| 14 | #include <linux/smp.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | #include <linux/interrupt.h> |
| 16 | #include <linux/init.h> |
| 17 | #include <linux/tty.h> |
| 18 | #include <linux/vt_kern.h> /* For unblank_screen() */ |
| 19 | #include <linux/highmem.h> |
Jan Beulich | 28609f6 | 2007-05-02 19:27:04 +0200 | [diff] [blame] | 20 | #include <linux/bootmem.h> /* for max_low_pfn */ |
Christoph Hellwig | 1eeb66a | 2007-05-08 00:27:03 -0700 | [diff] [blame] | 21 | #include <linux/vmalloc.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | #include <linux/module.h> |
Prasanna S Panchamukhi | 3d97ae5 | 2005-09-06 15:19:27 -0700 | [diff] [blame] | 23 | #include <linux/kprobes.h> |
Andi Kleen | 11a4180 | 2006-12-07 02:14:06 +0100 | [diff] [blame] | 24 | #include <linux/uaccess.h> |
Christoph Hellwig | 1eeb66a | 2007-05-08 00:27:03 -0700 | [diff] [blame] | 25 | #include <linux/kdebug.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | |
| 27 | #include <asm/system.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 | #include <asm/desc.h> |
Rusty Russell | 78be370 | 2006-09-26 10:52:39 +0200 | [diff] [blame] | 29 | #include <asm/segment.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | |
Harvey Harrison | 33cb524 | 2008-01-30 13:32:19 +0100 | [diff] [blame] | 31 | /* |
| 32 | * Page fault error code bits |
| 33 | * bit 0 == 0 means no page found, 1 means protection fault |
| 34 | * bit 1 == 0 means read, 1 means write |
| 35 | * bit 2 == 0 means kernel, 1 means user-mode |
| 36 | * bit 3 == 1 means use of reserved bit detected |
| 37 | * bit 4 == 1 means fault was an instruction fetch |
| 38 | */ |
| 39 | #define PF_PROT (1<<0) |
| 40 | #define PF_WRITE (1<<1) |
| 41 | #define PF_USER (1<<2) |
| 42 | #define PF_RSVD (1<<3) |
| 43 | #define PF_INSTR (1<<4) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 | |
Christoph Hellwig | 74a0b57 | 2007-10-16 01:24:07 -0700 | [diff] [blame] | 45 | static inline int notify_page_fault(struct pt_regs *regs) |
Anil S Keshavamurthy | b71b5b6 | 2006-06-26 00:25:25 -0700 | [diff] [blame] | 46 | { |
Harvey Harrison | 33cb524 | 2008-01-30 13:32:19 +0100 | [diff] [blame] | 47 | #ifdef CONFIG_KPROBES |
Christoph Hellwig | 74a0b57 | 2007-10-16 01:24:07 -0700 | [diff] [blame] | 48 | int ret = 0; |
Anil S Keshavamurthy | b71b5b6 | 2006-06-26 00:25:25 -0700 | [diff] [blame] | 49 | |
Christoph Hellwig | 74a0b57 | 2007-10-16 01:24:07 -0700 | [diff] [blame] | 50 | /* kprobe_running() needs smp_processor_id() */ |
| 51 | if (!user_mode_vm(regs)) { |
| 52 | preempt_disable(); |
| 53 | if (kprobe_running() && kprobe_fault_handler(regs, 14)) |
| 54 | ret = 1; |
| 55 | preempt_enable(); |
| 56 | } |
Anil S Keshavamurthy | b71b5b6 | 2006-06-26 00:25:25 -0700 | [diff] [blame] | 57 | |
Christoph Hellwig | 74a0b57 | 2007-10-16 01:24:07 -0700 | [diff] [blame] | 58 | return ret; |
Christoph Hellwig | 74a0b57 | 2007-10-16 01:24:07 -0700 | [diff] [blame] | 59 | #else |
Christoph Hellwig | 74a0b57 | 2007-10-16 01:24:07 -0700 | [diff] [blame] | 60 | return 0; |
Christoph Hellwig | 74a0b57 | 2007-10-16 01:24:07 -0700 | [diff] [blame] | 61 | #endif |
Harvey Harrison | 33cb524 | 2008-01-30 13:32:19 +0100 | [diff] [blame] | 62 | } |
Anil S Keshavamurthy | b71b5b6 | 2006-06-26 00:25:25 -0700 | [diff] [blame] | 63 | |
Harvey Harrison | 1dc85be | 2008-01-30 13:32:35 +0100 | [diff] [blame] | 64 | #ifdef CONFIG_X86_32 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 65 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 66 | * Return EIP plus the CS segment base. The segment limit is also |
| 67 | * adjusted, clamped to the kernel/user address space (whichever is |
| 68 | * appropriate), and returned in *eip_limit. |
| 69 | * |
| 70 | * The segment is checked, because it might have been changed by another |
| 71 | * task between the original faulting instruction and here. |
| 72 | * |
| 73 | * If CS is no longer a valid code segment, or if EIP is beyond the |
| 74 | * limit, or if it is a kernel address when CS is not a kernel segment, |
| 75 | * then the returned value will be greater than *eip_limit. |
Harvey Harrison | 33cb524 | 2008-01-30 13:32:19 +0100 | [diff] [blame] | 76 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 77 | * This is slow, but is very rarely executed. |
| 78 | */ |
| 79 | static inline unsigned long get_segment_eip(struct pt_regs *regs, |
| 80 | unsigned long *eip_limit) |
| 81 | { |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 82 | unsigned long ip = regs->ip; |
| 83 | unsigned seg = regs->cs & 0xffff; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 84 | u32 seg_ar, seg_limit, base, *desc; |
| 85 | |
Chuck Ebbert | 19964fe | 2006-06-23 02:04:29 -0700 | [diff] [blame] | 86 | /* Unlikely, but must come before segment checks. */ |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 87 | if (unlikely(regs->flags & VM_MASK)) { |
Chuck Ebbert | 19964fe | 2006-06-23 02:04:29 -0700 | [diff] [blame] | 88 | base = seg << 4; |
| 89 | *eip_limit = base + 0xffff; |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 90 | return base + (ip & 0xffff); |
Chuck Ebbert | 19964fe | 2006-06-23 02:04:29 -0700 | [diff] [blame] | 91 | } |
| 92 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 93 | /* The standard kernel/user address space limit. */ |
Rusty Russell | 78be370 | 2006-09-26 10:52:39 +0200 | [diff] [blame] | 94 | *eip_limit = user_mode(regs) ? USER_DS.seg : KERNEL_DS.seg; |
Harvey Harrison | 33cb524 | 2008-01-30 13:32:19 +0100 | [diff] [blame] | 95 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 96 | /* By far the most common cases. */ |
Rusty Russell | 78be370 | 2006-09-26 10:52:39 +0200 | [diff] [blame] | 97 | if (likely(SEGMENT_IS_FLAT_CODE(seg))) |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 98 | return ip; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 99 | |
| 100 | /* Check the segment exists, is within the current LDT/GDT size, |
| 101 | that kernel/user (ring 0..3) has the appropriate privilege, |
| 102 | that it's a code segment, and get the limit. */ |
| 103 | __asm__ ("larl %3,%0; lsll %3,%1" |
| 104 | : "=&r" (seg_ar), "=r" (seg_limit) : "0" (0), "rm" (seg)); |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 105 | if ((~seg_ar & 0x9800) || ip > seg_limit) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 106 | *eip_limit = 0; |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 107 | return 1; /* So that returned ip > *eip_limit. */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 108 | } |
| 109 | |
Harvey Harrison | 33cb524 | 2008-01-30 13:32:19 +0100 | [diff] [blame] | 110 | /* Get the GDT/LDT descriptor base. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 111 | When you look for races in this code remember that |
| 112 | LDT and other horrors are only used in user space. */ |
| 113 | if (seg & (1<<2)) { |
| 114 | /* Must lock the LDT while reading it. */ |
Luiz Fernando N. Capitulino | de8aacb | 2007-10-17 18:04:41 +0200 | [diff] [blame] | 115 | mutex_lock(¤t->mm->context.lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 116 | desc = current->mm->context.ldt; |
| 117 | desc = (void *)desc + (seg & ~7); |
| 118 | } else { |
| 119 | /* Must disable preemption while reading the GDT. */ |
Harvey Harrison | 33cb524 | 2008-01-30 13:32:19 +0100 | [diff] [blame] | 120 | desc = (u32 *)get_cpu_gdt_table(get_cpu()); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 121 | desc = (void *)desc + (seg & ~7); |
| 122 | } |
| 123 | |
| 124 | /* Decode the code segment base from the descriptor */ |
Glauber de Oliveira Costa | cc69785 | 2008-01-30 13:31:14 +0100 | [diff] [blame] | 125 | base = get_desc_base((struct desc_struct *)desc); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 126 | |
Harvey Harrison | 33cb524 | 2008-01-30 13:32:19 +0100 | [diff] [blame] | 127 | if (seg & (1<<2)) |
Luiz Fernando N. Capitulino | de8aacb | 2007-10-17 18:04:41 +0200 | [diff] [blame] | 128 | mutex_unlock(¤t->mm->context.lock); |
Harvey Harrison | 33cb524 | 2008-01-30 13:32:19 +0100 | [diff] [blame] | 129 | else |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 130 | put_cpu(); |
| 131 | |
| 132 | /* Adjust EIP and segment limit, and clamp at the kernel limit. |
| 133 | It's legitimate for segments to wrap at 0xffffffff. */ |
| 134 | seg_limit += base; |
| 135 | if (seg_limit < *eip_limit && seg_limit >= base) |
| 136 | *eip_limit = seg_limit; |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 137 | return ip + base; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 138 | } |
Harvey Harrison | 1dc85be | 2008-01-30 13:32:35 +0100 | [diff] [blame] | 139 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 140 | |
Harvey Harrison | 33cb524 | 2008-01-30 13:32:19 +0100 | [diff] [blame] | 141 | /* |
Harvey Harrison | 1dc85be | 2008-01-30 13:32:35 +0100 | [diff] [blame] | 142 | * X86_32 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 143 | * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch. |
| 144 | * Check that here and ignore it. |
Harvey Harrison | 1dc85be | 2008-01-30 13:32:35 +0100 | [diff] [blame] | 145 | * |
| 146 | * X86_64 |
| 147 | * Sometimes the CPU reports invalid exceptions on prefetch. |
| 148 | * Check that here and ignore it. |
| 149 | * |
| 150 | * Opcode checker based on code by Richard Brunner |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 151 | */ |
Harvey Harrison | 1dc85be | 2008-01-30 13:32:35 +0100 | [diff] [blame] | 152 | static int is_prefetch(struct pt_regs *regs, unsigned long addr, |
| 153 | unsigned long error_code) |
Harvey Harrison | 33cb524 | 2008-01-30 13:32:19 +0100 | [diff] [blame] | 154 | { |
Harvey Harrison | 1dc85be | 2008-01-30 13:32:35 +0100 | [diff] [blame] | 155 | unsigned char *instr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 156 | int scan_more = 1; |
Harvey Harrison | 33cb524 | 2008-01-30 13:32:19 +0100 | [diff] [blame] | 157 | int prefetch = 0; |
Harvey Harrison | 1dc85be | 2008-01-30 13:32:35 +0100 | [diff] [blame] | 158 | unsigned char *max_instr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 159 | |
Harvey Harrison | 1dc85be | 2008-01-30 13:32:35 +0100 | [diff] [blame] | 160 | #ifdef CONFIG_X86_32 |
| 161 | unsigned long limit; |
| 162 | if (unlikely(boot_cpu_data.x86_vendor == X86_VENDOR_AMD && |
| 163 | boot_cpu_data.x86 >= 6)) { |
| 164 | /* Catch an obscure case of prefetch inside an NX page. */ |
| 165 | if (nx_enabled && (error_code & PF_INSTR)) |
| 166 | return 0; |
| 167 | } else { |
| 168 | return 0; |
| 169 | } |
| 170 | instr = (unsigned char *)get_segment_eip(regs, &limit); |
| 171 | #else |
| 172 | /* If it was a exec fault ignore */ |
| 173 | if (error_code & PF_INSTR) |
| 174 | return 0; |
| 175 | instr = (unsigned char __user *)convert_rip_to_linear(current, regs); |
| 176 | #endif |
| 177 | |
| 178 | max_instr = instr + 15; |
| 179 | |
| 180 | #ifdef CONFIG_X86_64 |
| 181 | if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE) |
| 182 | return 0; |
| 183 | #endif |
| 184 | |
| 185 | while (scan_more && instr < max_instr) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 186 | unsigned char opcode; |
| 187 | unsigned char instr_hi; |
| 188 | unsigned char instr_lo; |
| 189 | |
Harvey Harrison | 1dc85be | 2008-01-30 13:32:35 +0100 | [diff] [blame] | 190 | #ifdef CONFIG_X86_32 |
Andi Kleen | 11a4180 | 2006-12-07 02:14:06 +0100 | [diff] [blame] | 191 | if (instr > (unsigned char *)limit) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 192 | break; |
Harvey Harrison | 1dc85be | 2008-01-30 13:32:35 +0100 | [diff] [blame] | 193 | #endif |
Andi Kleen | 11a4180 | 2006-12-07 02:14:06 +0100 | [diff] [blame] | 194 | if (probe_kernel_address(instr, opcode)) |
Harvey Harrison | 33cb524 | 2008-01-30 13:32:19 +0100 | [diff] [blame] | 195 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 196 | |
Harvey Harrison | 33cb524 | 2008-01-30 13:32:19 +0100 | [diff] [blame] | 197 | instr_hi = opcode & 0xf0; |
| 198 | instr_lo = opcode & 0x0f; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 199 | instr++; |
| 200 | |
Harvey Harrison | 33cb524 | 2008-01-30 13:32:19 +0100 | [diff] [blame] | 201 | switch (instr_hi) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 202 | case 0x20: |
| 203 | case 0x30: |
Harvey Harrison | 33cb524 | 2008-01-30 13:32:19 +0100 | [diff] [blame] | 204 | /* |
| 205 | * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes. |
| 206 | * In X86_64 long mode, the CPU will signal invalid |
| 207 | * opcode if some of these prefixes are present so |
| 208 | * X86_64 will never get here anyway |
| 209 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 210 | scan_more = ((instr_lo & 7) == 0x6); |
| 211 | break; |
Harvey Harrison | 33cb524 | 2008-01-30 13:32:19 +0100 | [diff] [blame] | 212 | #ifdef CONFIG_X86_64 |
| 213 | case 0x40: |
| 214 | /* |
| 215 | * In AMD64 long mode 0x40..0x4F are valid REX prefixes |
| 216 | * Need to figure out under what instruction mode the |
| 217 | * instruction was issued. Could check the LDT for lm, |
| 218 | * but for now it's good enough to assume that long |
| 219 | * mode only uses well known segments or kernel. |
| 220 | */ |
| 221 | scan_more = (!user_mode(regs)) || (regs->cs == __USER_CS); |
| 222 | break; |
| 223 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 224 | case 0x60: |
| 225 | /* 0x64 thru 0x67 are valid prefixes in all modes. */ |
| 226 | scan_more = (instr_lo & 0xC) == 0x4; |
Harvey Harrison | 33cb524 | 2008-01-30 13:32:19 +0100 | [diff] [blame] | 227 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 228 | case 0xF0: |
Harvey Harrison | 33cb524 | 2008-01-30 13:32:19 +0100 | [diff] [blame] | 229 | /* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 230 | scan_more = !instr_lo || (instr_lo>>1) == 1; |
Harvey Harrison | 33cb524 | 2008-01-30 13:32:19 +0100 | [diff] [blame] | 231 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 232 | case 0x00: |
| 233 | /* Prefetch instruction is 0x0F0D or 0x0F18 */ |
| 234 | scan_more = 0; |
Harvey Harrison | 1dc85be | 2008-01-30 13:32:35 +0100 | [diff] [blame] | 235 | #ifdef CONFIG_X86_32 |
Andi Kleen | 11a4180 | 2006-12-07 02:14:06 +0100 | [diff] [blame] | 236 | if (instr > (unsigned char *)limit) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 237 | break; |
Harvey Harrison | 1dc85be | 2008-01-30 13:32:35 +0100 | [diff] [blame] | 238 | #endif |
Andi Kleen | 11a4180 | 2006-12-07 02:14:06 +0100 | [diff] [blame] | 239 | if (probe_kernel_address(instr, opcode)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 240 | break; |
| 241 | prefetch = (instr_lo == 0xF) && |
| 242 | (opcode == 0x0D || opcode == 0x18); |
Harvey Harrison | 33cb524 | 2008-01-30 13:32:19 +0100 | [diff] [blame] | 243 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 244 | default: |
| 245 | scan_more = 0; |
| 246 | break; |
Harvey Harrison | 33cb524 | 2008-01-30 13:32:19 +0100 | [diff] [blame] | 247 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 248 | } |
| 249 | return prefetch; |
| 250 | } |
| 251 | |
Harvey Harrison | c4aba4a | 2008-01-30 13:32:35 +0100 | [diff] [blame] | 252 | static void force_sig_info_fault(int si_signo, int si_code, |
Ingo Molnar | 869f96a | 2005-09-03 15:56:26 -0700 | [diff] [blame] | 253 | unsigned long address, struct task_struct *tsk) |
| 254 | { |
| 255 | siginfo_t info; |
| 256 | |
| 257 | info.si_signo = si_signo; |
| 258 | info.si_errno = 0; |
| 259 | info.si_code = si_code; |
| 260 | info.si_addr = (void __user *)address; |
| 261 | force_sig_info(si_signo, &info, tsk); |
| 262 | } |
| 263 | |
Harvey Harrison | 75604d7 | 2008-01-30 13:31:17 +0100 | [diff] [blame] | 264 | void do_invalid_op(struct pt_regs *, unsigned long); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 265 | |
Jan Beulich | 101f12a | 2006-03-23 02:59:45 -0800 | [diff] [blame] | 266 | static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address) |
| 267 | { |
| 268 | unsigned index = pgd_index(address); |
| 269 | pgd_t *pgd_k; |
| 270 | pud_t *pud, *pud_k; |
| 271 | pmd_t *pmd, *pmd_k; |
| 272 | |
| 273 | pgd += index; |
| 274 | pgd_k = init_mm.pgd + index; |
| 275 | |
| 276 | if (!pgd_present(*pgd_k)) |
| 277 | return NULL; |
| 278 | |
| 279 | /* |
| 280 | * set_pgd(pgd, *pgd_k); here would be useless on PAE |
| 281 | * and redundant with the set_pmd() on non-PAE. As would |
| 282 | * set_pud. |
| 283 | */ |
| 284 | |
| 285 | pud = pud_offset(pgd, address); |
| 286 | pud_k = pud_offset(pgd_k, address); |
| 287 | if (!pud_present(*pud_k)) |
| 288 | return NULL; |
| 289 | |
| 290 | pmd = pmd_offset(pud, address); |
| 291 | pmd_k = pmd_offset(pud_k, address); |
| 292 | if (!pmd_present(*pmd_k)) |
| 293 | return NULL; |
Zachary Amsden | 8b14cb9 | 2007-08-21 18:30:36 -0700 | [diff] [blame] | 294 | if (!pmd_present(*pmd)) { |
Jan Beulich | 101f12a | 2006-03-23 02:59:45 -0800 | [diff] [blame] | 295 | set_pmd(pmd, *pmd_k); |
Zachary Amsden | 8b14cb9 | 2007-08-21 18:30:36 -0700 | [diff] [blame] | 296 | arch_flush_lazy_mmu_mode(); |
| 297 | } else |
Jan Beulich | 101f12a | 2006-03-23 02:59:45 -0800 | [diff] [blame] | 298 | BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k)); |
| 299 | return pmd_k; |
| 300 | } |
| 301 | |
Harvey Harrison | 1dc85be | 2008-01-30 13:32:35 +0100 | [diff] [blame] | 302 | #ifdef CONFIG_X86_64 |
| 303 | static const char errata93_warning[] = |
| 304 | KERN_ERR "******* Your BIOS seems to not contain a fix for K8 errata #93\n" |
| 305 | KERN_ERR "******* Working around it, but it may cause SEGVs or burn power.\n" |
| 306 | KERN_ERR "******* Please consider a BIOS update.\n" |
| 307 | KERN_ERR "******* Disabling USB legacy in the BIOS may also help.\n"; |
| 308 | |
| 309 | /* Workaround for K8 erratum #93 & buggy BIOS. |
| 310 | BIOS SMM functions are required to use a specific workaround |
| 311 | to avoid corruption of the 64bit RIP register on C stepping K8. |
| 312 | A lot of BIOS that didn't get tested properly miss this. |
| 313 | The OS sees this as a page fault with the upper 32bits of RIP cleared. |
| 314 | Try to work around it here. |
| 315 | Note we only handle faults in kernel here. */ |
| 316 | |
| 317 | static int is_errata93(struct pt_regs *regs, unsigned long address) |
| 318 | { |
| 319 | static int warned; |
| 320 | if (address != regs->ip) |
| 321 | return 0; |
| 322 | if ((address >> 32) != 0) |
| 323 | return 0; |
| 324 | address |= 0xffffffffUL << 32; |
| 325 | if ((address >= (u64)_stext && address <= (u64)_etext) || |
| 326 | (address >= MODULES_VADDR && address <= MODULES_END)) { |
| 327 | if (!warned) { |
| 328 | printk(errata93_warning); |
| 329 | warned = 1; |
| 330 | } |
| 331 | regs->ip = address; |
| 332 | return 1; |
| 333 | } |
| 334 | return 0; |
| 335 | } |
| 336 | #endif |
| 337 | |
Jan Beulich | 101f12a | 2006-03-23 02:59:45 -0800 | [diff] [blame] | 338 | /* |
| 339 | * Handle a fault on the vmalloc or module mapping area |
| 340 | * |
| 341 | * This assumes no large pages in there. |
| 342 | */ |
| 343 | static inline int vmalloc_fault(unsigned long address) |
| 344 | { |
| 345 | unsigned long pgd_paddr; |
| 346 | pmd_t *pmd_k; |
| 347 | pte_t *pte_k; |
| 348 | /* |
| 349 | * Synchronize this task's top level page-table |
| 350 | * with the 'reference' page table. |
| 351 | * |
| 352 | * Do _not_ use "current" here. We might be inside |
| 353 | * an interrupt in the middle of a task switch.. |
| 354 | */ |
| 355 | pgd_paddr = read_cr3(); |
| 356 | pmd_k = vmalloc_sync_one(__va(pgd_paddr), address); |
| 357 | if (!pmd_k) |
| 358 | return -1; |
| 359 | pte_k = pte_offset_kernel(pmd_k, address); |
| 360 | if (!pte_present(*pte_k)) |
| 361 | return -1; |
| 362 | return 0; |
| 363 | } |
| 364 | |
Masoud Asgharifard Sharbiani | abd4f75 | 2007-07-22 11:12:28 +0200 | [diff] [blame] | 365 | int show_unhandled_signals = 1; |
| 366 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 367 | /* |
| 368 | * This routine handles page faults. It determines the address, |
| 369 | * and the problem, and then passes it off to one of the appropriate |
| 370 | * routines. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 371 | */ |
Harvey Harrison | 75604d7 | 2008-01-30 13:31:17 +0100 | [diff] [blame] | 372 | void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 373 | { |
| 374 | struct task_struct *tsk; |
| 375 | struct mm_struct *mm; |
Harvey Harrison | 33cb524 | 2008-01-30 13:32:19 +0100 | [diff] [blame] | 376 | struct vm_area_struct *vma; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 377 | unsigned long address; |
Ingo Molnar | 869f96a | 2005-09-03 15:56:26 -0700 | [diff] [blame] | 378 | int write, si_code; |
Nick Piggin | 83c5407 | 2007-07-19 01:47:05 -0700 | [diff] [blame] | 379 | int fault; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 380 | |
Peter Zijlstra | 143a5d3 | 2007-10-25 14:01:10 +0200 | [diff] [blame] | 381 | /* |
| 382 | * We can fault from pretty much anywhere, with unknown IRQ state. |
| 383 | */ |
| 384 | trace_hardirqs_fixup(); |
| 385 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 386 | /* get the address */ |
Harvey Harrison | 33cb524 | 2008-01-30 13:32:19 +0100 | [diff] [blame] | 387 | address = read_cr2(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 388 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 389 | tsk = current; |
| 390 | |
Ingo Molnar | 869f96a | 2005-09-03 15:56:26 -0700 | [diff] [blame] | 391 | si_code = SEGV_MAPERR; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 392 | |
| 393 | /* |
| 394 | * We fault-in kernel-space virtual memory on-demand. The |
| 395 | * 'reference' page table is init_mm.pgd. |
| 396 | * |
| 397 | * NOTE! We MUST NOT take any locks for this case. We may |
| 398 | * be in an interrupt or a critical region, and should |
| 399 | * only copy the information from the master page table, |
| 400 | * nothing more. |
| 401 | * |
| 402 | * This verifies that the fault happens in kernel space |
| 403 | * (error_code & 4) == 0, and that the fault was not a |
Jan Beulich | 101f12a | 2006-03-23 02:59:45 -0800 | [diff] [blame] | 404 | * protection error (error_code & 9) == 0. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 405 | */ |
Jan Beulich | 101f12a | 2006-03-23 02:59:45 -0800 | [diff] [blame] | 406 | if (unlikely(address >= TASK_SIZE)) { |
Harvey Harrison | 318aa29 | 2008-01-30 13:32:59 +0100 | [diff] [blame^] | 407 | if (!(error_code & (PF_RSVD|PF_USER|PF_PROT)) && |
| 408 | vmalloc_fault(address) >= 0) |
Jan Beulich | 101f12a | 2006-03-23 02:59:45 -0800 | [diff] [blame] | 409 | return; |
Christoph Hellwig | 74a0b57 | 2007-10-16 01:24:07 -0700 | [diff] [blame] | 410 | if (notify_page_fault(regs)) |
Jan Beulich | 101f12a | 2006-03-23 02:59:45 -0800 | [diff] [blame] | 411 | return; |
| 412 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 413 | * Don't take the mm semaphore here. If we fixup a prefetch |
| 414 | * fault we could otherwise deadlock. |
| 415 | */ |
| 416 | goto bad_area_nosemaphore; |
Jan Beulich | 101f12a | 2006-03-23 02:59:45 -0800 | [diff] [blame] | 417 | } |
| 418 | |
Christoph Hellwig | 74a0b57 | 2007-10-16 01:24:07 -0700 | [diff] [blame] | 419 | if (notify_page_fault(regs)) |
Jan Beulich | 101f12a | 2006-03-23 02:59:45 -0800 | [diff] [blame] | 420 | return; |
| 421 | |
| 422 | /* It's safe to allow irq's after cr2 has been saved and the vmalloc |
| 423 | fault has been handled. */ |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 424 | if (regs->flags & (X86_EFLAGS_IF|VM_MASK)) |
Jan Beulich | 101f12a | 2006-03-23 02:59:45 -0800 | [diff] [blame] | 425 | local_irq_enable(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 426 | |
| 427 | mm = tsk->mm; |
| 428 | |
| 429 | /* |
| 430 | * If we're in an interrupt, have no user context or are running in an |
Harvey Harrison | 33cb524 | 2008-01-30 13:32:19 +0100 | [diff] [blame] | 431 | * atomic region then we must not take the fault. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 432 | */ |
| 433 | if (in_atomic() || !mm) |
| 434 | goto bad_area_nosemaphore; |
| 435 | |
| 436 | /* When running in the kernel we expect faults to occur only to |
| 437 | * addresses in user space. All other faults represent errors in the |
Simon Arlott | 27b46d7 | 2007-10-20 01:13:56 +0200 | [diff] [blame] | 438 | * kernel and should generate an OOPS. Unfortunately, in the case of an |
Adrian Bunk | 80f7228 | 2006-06-30 18:27:16 +0200 | [diff] [blame] | 439 | * erroneous fault occurring in a code path which already holds mmap_sem |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 440 | * we will deadlock attempting to validate the fault against the |
| 441 | * address space. Luckily the kernel only validly references user |
| 442 | * space from well defined areas of code, which are listed in the |
| 443 | * exceptions table. |
| 444 | * |
| 445 | * As the vast majority of faults will be valid we will only perform |
Simon Arlott | 27b46d7 | 2007-10-20 01:13:56 +0200 | [diff] [blame] | 446 | * the source reference check when there is a possibility of a deadlock. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 447 | * Attempt to lock the address space, if we cannot we then validate the |
| 448 | * source. If this is invalid we can skip the address space check, |
| 449 | * thus avoiding the deadlock. |
| 450 | */ |
| 451 | if (!down_read_trylock(&mm->mmap_sem)) { |
Harvey Harrison | 33cb524 | 2008-01-30 13:32:19 +0100 | [diff] [blame] | 452 | if ((error_code & PF_USER) == 0 && |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 453 | !search_exception_tables(regs->ip)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 454 | goto bad_area_nosemaphore; |
| 455 | down_read(&mm->mmap_sem); |
| 456 | } |
| 457 | |
| 458 | vma = find_vma(mm, address); |
| 459 | if (!vma) |
| 460 | goto bad_area; |
| 461 | if (vma->vm_start <= address) |
| 462 | goto good_area; |
| 463 | if (!(vma->vm_flags & VM_GROWSDOWN)) |
| 464 | goto bad_area; |
Harvey Harrison | 33cb524 | 2008-01-30 13:32:19 +0100 | [diff] [blame] | 465 | if (error_code & PF_USER) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 466 | /* |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 467 | * Accessing the stack below %sp is always a bug. |
Chuck Ebbert | 2152845 | 2006-06-23 02:04:23 -0700 | [diff] [blame] | 468 | * The large cushion allows instructions like enter |
| 469 | * and pusha to work. ("enter $65535,$31" pushes |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 470 | * 32 pointers and then decrements %sp by 65535.) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 471 | */ |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 472 | if (address + 65536 + 32 * sizeof(unsigned long) < regs->sp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 473 | goto bad_area; |
| 474 | } |
| 475 | if (expand_stack(vma, address)) |
| 476 | goto bad_area; |
| 477 | /* |
| 478 | * Ok, we have a good vm_area for this memory access, so |
| 479 | * we can handle it.. |
| 480 | */ |
| 481 | good_area: |
Ingo Molnar | 869f96a | 2005-09-03 15:56:26 -0700 | [diff] [blame] | 482 | si_code = SEGV_ACCERR; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 483 | write = 0; |
Harvey Harrison | 33cb524 | 2008-01-30 13:32:19 +0100 | [diff] [blame] | 484 | switch (error_code & (PF_PROT|PF_WRITE)) { |
| 485 | default: /* 3: write, present */ |
| 486 | /* fall through */ |
| 487 | case PF_WRITE: /* write, not present */ |
| 488 | if (!(vma->vm_flags & VM_WRITE)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 489 | goto bad_area; |
Harvey Harrison | 33cb524 | 2008-01-30 13:32:19 +0100 | [diff] [blame] | 490 | write++; |
| 491 | break; |
| 492 | case PF_PROT: /* read, present */ |
| 493 | goto bad_area; |
| 494 | case 0: /* read, not present */ |
| 495 | if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) |
| 496 | goto bad_area; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 497 | } |
| 498 | |
| 499 | survive: |
| 500 | /* |
| 501 | * If for any reason at all we couldn't handle the fault, |
| 502 | * make sure we exit gracefully rather than endlessly redo |
| 503 | * the fault. |
| 504 | */ |
Nick Piggin | 83c5407 | 2007-07-19 01:47:05 -0700 | [diff] [blame] | 505 | fault = handle_mm_fault(mm, vma, address, write); |
| 506 | if (unlikely(fault & VM_FAULT_ERROR)) { |
| 507 | if (fault & VM_FAULT_OOM) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 508 | goto out_of_memory; |
Nick Piggin | 83c5407 | 2007-07-19 01:47:05 -0700 | [diff] [blame] | 509 | else if (fault & VM_FAULT_SIGBUS) |
| 510 | goto do_sigbus; |
| 511 | BUG(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 512 | } |
Nick Piggin | 83c5407 | 2007-07-19 01:47:05 -0700 | [diff] [blame] | 513 | if (fault & VM_FAULT_MAJOR) |
| 514 | tsk->maj_flt++; |
| 515 | else |
| 516 | tsk->min_flt++; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 517 | |
| 518 | /* |
| 519 | * Did it hit the DOS screen memory VA from vm86 mode? |
| 520 | */ |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 521 | if (regs->flags & VM_MASK) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 522 | unsigned long bit = (address - 0xA0000) >> PAGE_SHIFT; |
| 523 | if (bit < 32) |
| 524 | tsk->thread.screen_bitmap |= 1 << bit; |
| 525 | } |
| 526 | up_read(&mm->mmap_sem); |
| 527 | return; |
| 528 | |
| 529 | /* |
| 530 | * Something tried to access memory that isn't in our memory map.. |
| 531 | * Fix it, but check if it's kernel or user first.. |
| 532 | */ |
| 533 | bad_area: |
| 534 | up_read(&mm->mmap_sem); |
| 535 | |
| 536 | bad_area_nosemaphore: |
| 537 | /* User mode accesses just cause a SIGSEGV */ |
Harvey Harrison | 33cb524 | 2008-01-30 13:32:19 +0100 | [diff] [blame] | 538 | if (error_code & PF_USER) { |
Steven Rostedt | e5e3c84 | 2007-06-06 23:34:04 -0400 | [diff] [blame] | 539 | /* |
| 540 | * It's possible to have interrupts off here. |
| 541 | */ |
| 542 | local_irq_enable(); |
| 543 | |
Harvey Harrison | 33cb524 | 2008-01-30 13:32:19 +0100 | [diff] [blame] | 544 | /* |
| 545 | * Valid to do another page fault here because this one came |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 546 | * from user space. |
| 547 | */ |
| 548 | if (is_prefetch(regs, address, error_code)) |
| 549 | return; |
| 550 | |
Masoud Asgharifard Sharbiani | abd4f75 | 2007-07-22 11:12:28 +0200 | [diff] [blame] | 551 | if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) && |
| 552 | printk_ratelimit()) { |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 553 | printk("%s%s[%d]: segfault at %08lx ip %08lx " |
| 554 | "sp %08lx error %lx\n", |
Alexey Dobriyan | 19c5870 | 2007-10-18 23:40:41 -0700 | [diff] [blame] | 555 | task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG, |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 556 | tsk->comm, task_pid_nr(tsk), address, regs->ip, |
| 557 | regs->sp, error_code); |
Masoud Asgharifard Sharbiani | abd4f75 | 2007-07-22 11:12:28 +0200 | [diff] [blame] | 558 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 559 | tsk->thread.cr2 = address; |
| 560 | /* Kernel addresses are always protection faults */ |
| 561 | tsk->thread.error_code = error_code | (address >= TASK_SIZE); |
| 562 | tsk->thread.trap_no = 14; |
Ingo Molnar | 869f96a | 2005-09-03 15:56:26 -0700 | [diff] [blame] | 563 | force_sig_info_fault(SIGSEGV, si_code, address, tsk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 564 | return; |
| 565 | } |
| 566 | |
| 567 | #ifdef CONFIG_X86_F00F_BUG |
| 568 | /* |
| 569 | * Pentium F0 0F C7 C8 bug workaround. |
| 570 | */ |
| 571 | if (boot_cpu_data.f00f_bug) { |
| 572 | unsigned long nr; |
Harvey Harrison | 33cb524 | 2008-01-30 13:32:19 +0100 | [diff] [blame] | 573 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 574 | nr = (address - idt_descr.address) >> 3; |
| 575 | |
| 576 | if (nr == 6) { |
| 577 | do_invalid_op(regs, 0); |
| 578 | return; |
| 579 | } |
| 580 | } |
| 581 | #endif |
| 582 | |
| 583 | no_context: |
| 584 | /* Are we prepared to handle this kernel fault? */ |
| 585 | if (fixup_exception(regs)) |
| 586 | return; |
| 587 | |
Harvey Harrison | 33cb524 | 2008-01-30 13:32:19 +0100 | [diff] [blame] | 588 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 589 | * Valid to do another page fault here, because if this fault |
Harvey Harrison | 33cb524 | 2008-01-30 13:32:19 +0100 | [diff] [blame] | 590 | * had been triggered by is_prefetch fixup_exception would have |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 591 | * handled it. |
| 592 | */ |
Harvey Harrison | 33cb524 | 2008-01-30 13:32:19 +0100 | [diff] [blame] | 593 | if (is_prefetch(regs, address, error_code)) |
| 594 | return; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 595 | |
| 596 | /* |
| 597 | * Oops. The kernel tried to access some bad page. We'll have to |
| 598 | * terminate things with extreme prejudice. |
| 599 | */ |
| 600 | |
| 601 | bust_spinlocks(1); |
| 602 | |
Andrew Morton | dd28779 | 2006-03-23 03:00:57 -0800 | [diff] [blame] | 603 | if (oops_may_print()) { |
Jan Beulich | 28609f6 | 2007-05-02 19:27:04 +0200 | [diff] [blame] | 604 | __typeof__(pte_val(__pte(0))) page; |
| 605 | |
| 606 | #ifdef CONFIG_X86_PAE |
Harvey Harrison | 318aa29 | 2008-01-30 13:32:59 +0100 | [diff] [blame^] | 607 | if (error_code & PF_INSTR) { |
Andrew Morton | dd28779 | 2006-03-23 03:00:57 -0800 | [diff] [blame] | 608 | pte_t *pte = lookup_address(address); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 609 | |
Andrew Morton | dd28779 | 2006-03-23 03:00:57 -0800 | [diff] [blame] | 610 | if (pte && pte_present(*pte) && !pte_exec_kernel(*pte)) |
| 611 | printk(KERN_CRIT "kernel tried to execute " |
| 612 | "NX-protected page - exploit attempt? " |
| 613 | "(uid: %d)\n", current->uid); |
| 614 | } |
Jan Beulich | 28609f6 | 2007-05-02 19:27:04 +0200 | [diff] [blame] | 615 | #endif |
Andrew Morton | dd28779 | 2006-03-23 03:00:57 -0800 | [diff] [blame] | 616 | if (address < PAGE_SIZE) |
| 617 | printk(KERN_ALERT "BUG: unable to handle kernel NULL " |
| 618 | "pointer dereference"); |
| 619 | else |
| 620 | printk(KERN_ALERT "BUG: unable to handle kernel paging" |
| 621 | " request"); |
Harvey Harrison | 33cb524 | 2008-01-30 13:32:19 +0100 | [diff] [blame] | 622 | printk(" at virtual address %08lx\n", address); |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 623 | printk(KERN_ALERT "printing ip: %08lx ", regs->ip); |
Jan Beulich | 28609f6 | 2007-05-02 19:27:04 +0200 | [diff] [blame] | 624 | |
| 625 | page = read_cr3(); |
| 626 | page = ((__typeof__(page) *) __va(page))[address >> PGDIR_SHIFT]; |
| 627 | #ifdef CONFIG_X86_PAE |
Pavel Emelyanov | 9aa8d71 | 2007-10-17 18:04:40 +0200 | [diff] [blame] | 628 | printk("*pdpt = %016Lx ", page); |
Jan Beulich | 28609f6 | 2007-05-02 19:27:04 +0200 | [diff] [blame] | 629 | if ((page >> PAGE_SHIFT) < max_low_pfn |
| 630 | && page & _PAGE_PRESENT) { |
| 631 | page &= PAGE_MASK; |
| 632 | page = ((__typeof__(page) *) __va(page))[(address >> PMD_SHIFT) |
| 633 | & (PTRS_PER_PMD - 1)]; |
Alexey Dobriyan | eec407c | 2007-10-24 12:58:02 +0200 | [diff] [blame] | 634 | printk(KERN_CONT "*pde = %016Lx ", page); |
Jan Beulich | 28609f6 | 2007-05-02 19:27:04 +0200 | [diff] [blame] | 635 | page &= ~_PAGE_NX; |
| 636 | } |
| 637 | #else |
Pavel Emelyanov | 9aa8d71 | 2007-10-17 18:04:40 +0200 | [diff] [blame] | 638 | printk("*pde = %08lx ", page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 639 | #endif |
Jan Beulich | 28609f6 | 2007-05-02 19:27:04 +0200 | [diff] [blame] | 640 | |
| 641 | /* |
| 642 | * We must not directly access the pte in the highpte |
| 643 | * case if the page table is located in highmem. |
| 644 | * And let's rather not kmap-atomic the pte, just in case |
| 645 | * it's allocated already. |
| 646 | */ |
| 647 | if ((page >> PAGE_SHIFT) < max_low_pfn |
Jan Beulich | b1992df | 2007-10-19 20:35:03 +0200 | [diff] [blame] | 648 | && (page & _PAGE_PRESENT) |
| 649 | && !(page & _PAGE_PSE)) { |
Jan Beulich | 28609f6 | 2007-05-02 19:27:04 +0200 | [diff] [blame] | 650 | page &= PAGE_MASK; |
| 651 | page = ((__typeof__(page) *) __va(page))[(address >> PAGE_SHIFT) |
| 652 | & (PTRS_PER_PTE - 1)]; |
Pavel Emelyanov | 9aa8d71 | 2007-10-17 18:04:40 +0200 | [diff] [blame] | 653 | printk("*pte = %0*Lx ", sizeof(page)*2, (u64)page); |
Jan Beulich | 28609f6 | 2007-05-02 19:27:04 +0200 | [diff] [blame] | 654 | } |
Pavel Emelyanov | 9aa8d71 | 2007-10-17 18:04:40 +0200 | [diff] [blame] | 655 | |
| 656 | printk("\n"); |
Jan Beulich | 28609f6 | 2007-05-02 19:27:04 +0200 | [diff] [blame] | 657 | } |
| 658 | |
Alexander Nyberg | 4f339ec | 2005-06-25 14:58:27 -0700 | [diff] [blame] | 659 | tsk->thread.cr2 = address; |
| 660 | tsk->thread.trap_no = 14; |
| 661 | tsk->thread.error_code = error_code; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 662 | die("Oops", regs, error_code); |
| 663 | bust_spinlocks(0); |
| 664 | do_exit(SIGKILL); |
| 665 | |
| 666 | /* |
| 667 | * We ran out of memory, or some other thing happened to us that made |
| 668 | * us unable to handle the page fault gracefully. |
| 669 | */ |
| 670 | out_of_memory: |
| 671 | up_read(&mm->mmap_sem); |
Serge E. Hallyn | b460cbc | 2007-10-18 23:39:52 -0700 | [diff] [blame] | 672 | if (is_global_init(tsk)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 673 | yield(); |
| 674 | down_read(&mm->mmap_sem); |
| 675 | goto survive; |
| 676 | } |
| 677 | printk("VM: killing process %s\n", tsk->comm); |
Harvey Harrison | 318aa29 | 2008-01-30 13:32:59 +0100 | [diff] [blame^] | 678 | if (error_code & PF_USER) |
Will Schmidt | dcca2bd | 2007-10-16 01:24:18 -0700 | [diff] [blame] | 679 | do_group_exit(SIGKILL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 680 | goto no_context; |
| 681 | |
| 682 | do_sigbus: |
| 683 | up_read(&mm->mmap_sem); |
| 684 | |
| 685 | /* Kernel mode? Handle exceptions or die */ |
Harvey Harrison | 33cb524 | 2008-01-30 13:32:19 +0100 | [diff] [blame] | 686 | if (!(error_code & PF_USER)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 687 | goto no_context; |
| 688 | |
| 689 | /* User space => ok to do another page fault */ |
| 690 | if (is_prefetch(regs, address, error_code)) |
| 691 | return; |
| 692 | |
| 693 | tsk->thread.cr2 = address; |
| 694 | tsk->thread.error_code = error_code; |
| 695 | tsk->thread.trap_no = 14; |
Ingo Molnar | 869f96a | 2005-09-03 15:56:26 -0700 | [diff] [blame] | 696 | force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk); |
Jan Beulich | 101f12a | 2006-03-23 02:59:45 -0800 | [diff] [blame] | 697 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 698 | |
Jan Beulich | 101f12a | 2006-03-23 02:59:45 -0800 | [diff] [blame] | 699 | void vmalloc_sync_all(void) |
| 700 | { |
| 701 | /* |
| 702 | * Note that races in the updates of insync and start aren't |
| 703 | * problematic: insync can only get set bits added, and updates to |
| 704 | * start are only improving performance (without affecting correctness |
| 705 | * if undone). |
| 706 | */ |
| 707 | static DECLARE_BITMAP(insync, PTRS_PER_PGD); |
| 708 | static unsigned long start = TASK_SIZE; |
| 709 | unsigned long address; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 710 | |
Jeremy Fitzhardinge | 5311ab6 | 2007-05-02 19:27:13 +0200 | [diff] [blame] | 711 | if (SHARED_KERNEL_PMD) |
| 712 | return; |
| 713 | |
Jan Beulich | 101f12a | 2006-03-23 02:59:45 -0800 | [diff] [blame] | 714 | BUILD_BUG_ON(TASK_SIZE & ~PGDIR_MASK); |
| 715 | for (address = start; address >= TASK_SIZE; address += PGDIR_SIZE) { |
| 716 | if (!test_bit(pgd_index(address), insync)) { |
| 717 | unsigned long flags; |
| 718 | struct page *page; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 719 | |
Jan Beulich | 101f12a | 2006-03-23 02:59:45 -0800 | [diff] [blame] | 720 | spin_lock_irqsave(&pgd_lock, flags); |
| 721 | for (page = pgd_list; page; page = |
| 722 | (struct page *)page->index) |
| 723 | if (!vmalloc_sync_one(page_address(page), |
| 724 | address)) { |
| 725 | BUG_ON(page != pgd_list); |
| 726 | break; |
| 727 | } |
| 728 | spin_unlock_irqrestore(&pgd_lock, flags); |
| 729 | if (!page) |
| 730 | set_bit(pgd_index(address), insync); |
| 731 | } |
| 732 | if (address == start && test_bit(pgd_index(address), insync)) |
| 733 | start = address + PGDIR_SIZE; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 734 | } |
| 735 | } |