Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE |
| 3 | * Copyright 2003 Andi Kleen, SuSE Labs. |
| 4 | * |
Andy Lutomirski | 5cec93c | 2011-06-05 13:50:24 -0400 | [diff] [blame] | 5 | * [ NOTE: this mechanism is now deprecated in favor of the vDSO. ] |
| 6 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | * Thanks to hpa@transmeta.com for some useful hint. |
| 8 | * Special thanks to Ingo Molnar for his early experience with |
| 9 | * a different vsyscall implementation for Linux/IA32 and for the name. |
| 10 | * |
| 11 | * vsyscall 1 is located at -10Mbyte, vsyscall 2 is located |
| 12 | * at virtual address -10Mbyte+1024bytes etc... There are at max 4 |
| 13 | * vsyscalls. One vsyscall can reserve more than 1 slot to avoid |
| 14 | * jumping out of line if necessary. We cannot add more with this |
| 15 | * mechanism because older kernels won't return -ENOSYS. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | * |
Andy Lutomirski | 5cec93c | 2011-06-05 13:50:24 -0400 | [diff] [blame] | 17 | * Note: the concept clashes with user mode linux. UML users should |
| 18 | * use the vDSO. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 | */ |
| 20 | |
Ingo Molnar | 2b7d039 | 2008-11-12 13:17:38 +0100 | [diff] [blame] | 21 | /* Disable profiling for userspace code: */ |
Steven Rostedt | 2ed84ee | 2008-11-12 15:24:24 -0500 | [diff] [blame] | 22 | #define DISABLE_BRANCH_PROFILING |
Steven Rostedt | 1f0d69a | 2008-11-12 00:14:39 -0500 | [diff] [blame] | 23 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 24 | #include <linux/time.h> |
| 25 | #include <linux/init.h> |
| 26 | #include <linux/kernel.h> |
| 27 | #include <linux/timer.h> |
| 28 | #include <linux/seqlock.h> |
| 29 | #include <linux/jiffies.h> |
| 30 | #include <linux/sysctl.h> |
john stultz | 7460ed2 | 2007-02-16 01:28:21 -0800 | [diff] [blame] | 31 | #include <linux/clocksource.h> |
Vojtech Pavlik | c08c820 | 2006-09-26 10:52:28 +0200 | [diff] [blame] | 32 | #include <linux/getcpu.h> |
Andi Kleen | 8c131af | 2006-11-14 16:57:46 +0100 | [diff] [blame] | 33 | #include <linux/cpu.h> |
| 34 | #include <linux/smp.h> |
| 35 | #include <linux/notifier.h> |
Andy Lutomirski | 5cec93c | 2011-06-05 13:50:24 -0400 | [diff] [blame] | 36 | #include <linux/syscalls.h> |
| 37 | #include <linux/ratelimit.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 | |
| 39 | #include <asm/vsyscall.h> |
| 40 | #include <asm/pgtable.h> |
Andy Lutomirski | c971294 | 2011-07-13 09:24:09 -0400 | [diff] [blame] | 41 | #include <asm/compat.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 42 | #include <asm/page.h> |
john stultz | 7460ed2 | 2007-02-16 01:28:21 -0800 | [diff] [blame] | 43 | #include <asm/unistd.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 | #include <asm/fixmap.h> |
| 45 | #include <asm/errno.h> |
| 46 | #include <asm/io.h> |
Vojtech Pavlik | c08c820 | 2006-09-26 10:52:28 +0200 | [diff] [blame] | 47 | #include <asm/segment.h> |
| 48 | #include <asm/desc.h> |
| 49 | #include <asm/topology.h> |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 50 | #include <asm/vgtod.h> |
Andy Lutomirski | 5cec93c | 2011-06-05 13:50:24 -0400 | [diff] [blame] | 51 | #include <asm/traps.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 52 | |
Andy Lutomirski | 8c49d9a | 2011-05-23 09:31:24 -0400 | [diff] [blame] | 53 | DEFINE_VVAR(int, vgetcpu_mode); |
| 54 | DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) = |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 55 | { |
Eric Dumazet | c4dbe54 | 2011-05-24 14:08:08 +0200 | [diff] [blame] | 56 | .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock), |
john stultz | 7460ed2 | 2007-02-16 01:28:21 -0800 | [diff] [blame] | 57 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 58 | |
Tony Breeds | 2c62214 | 2007-10-18 03:04:57 -0700 | [diff] [blame] | 59 | void update_vsyscall_tz(void) |
| 60 | { |
| 61 | unsigned long flags; |
| 62 | |
| 63 | write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags); |
| 64 | /* sys_tz has changed */ |
| 65 | vsyscall_gtod_data.sys_tz = sys_tz; |
| 66 | write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags); |
| 67 | } |
| 68 | |
John Stultz | 7615856 | 2010-07-13 17:56:23 -0700 | [diff] [blame] | 69 | void update_vsyscall(struct timespec *wall_time, struct timespec *wtm, |
| 70 | struct clocksource *clock, u32 mult) |
john stultz | 7460ed2 | 2007-02-16 01:28:21 -0800 | [diff] [blame] | 71 | { |
| 72 | unsigned long flags; |
| 73 | |
| 74 | write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags); |
Andy Lutomirski | 5cec93c | 2011-06-05 13:50:24 -0400 | [diff] [blame] | 75 | |
john stultz | 7460ed2 | 2007-02-16 01:28:21 -0800 | [diff] [blame] | 76 | /* copy vsyscall data */ |
Andy Lutomirski | 98d0ac3 | 2011-07-14 06:47:22 -0400 | [diff] [blame] | 77 | vsyscall_gtod_data.clock.vclock_mode = clock->archdata.vclock_mode; |
Andy Lutomirski | 5cec93c | 2011-06-05 13:50:24 -0400 | [diff] [blame] | 78 | vsyscall_gtod_data.clock.cycle_last = clock->cycle_last; |
| 79 | vsyscall_gtod_data.clock.mask = clock->mask; |
| 80 | vsyscall_gtod_data.clock.mult = mult; |
| 81 | vsyscall_gtod_data.clock.shift = clock->shift; |
| 82 | vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec; |
| 83 | vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec; |
| 84 | vsyscall_gtod_data.wall_to_monotonic = *wtm; |
| 85 | vsyscall_gtod_data.wall_time_coarse = __current_kernel_time(); |
| 86 | |
john stultz | 7460ed2 | 2007-02-16 01:28:21 -0800 | [diff] [blame] | 87 | write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 88 | } |
| 89 | |
Andy Lutomirski | 5cec93c | 2011-06-05 13:50:24 -0400 | [diff] [blame] | 90 | static void warn_bad_vsyscall(const char *level, struct pt_regs *regs, |
| 91 | const char *message) |
| 92 | { |
| 93 | static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST); |
| 94 | struct task_struct *tsk; |
| 95 | |
| 96 | if (!show_unhandled_signals || !__ratelimit(&rs)) |
| 97 | return; |
| 98 | |
| 99 | tsk = current; |
| 100 | |
Andy Lutomirski | c971294 | 2011-07-13 09:24:09 -0400 | [diff] [blame] | 101 | printk("%s%s[%d] %s ip:%lx cs:%lx sp:%lx ax:%lx si:%lx di:%lx\n", |
Andy Lutomirski | 5cec93c | 2011-06-05 13:50:24 -0400 | [diff] [blame] | 102 | level, tsk->comm, task_pid_nr(tsk), |
Andy Lutomirski | c971294 | 2011-07-13 09:24:09 -0400 | [diff] [blame] | 103 | message, regs->ip - 2, regs->cs, |
| 104 | regs->sp, regs->ax, regs->si, regs->di); |
| 105 | } |
| 106 | |
| 107 | static int addr_to_vsyscall_nr(unsigned long addr) |
| 108 | { |
| 109 | int nr; |
| 110 | |
| 111 | if ((addr & ~0xC00UL) != VSYSCALL_START) |
| 112 | return -EINVAL; |
| 113 | |
| 114 | nr = (addr & 0xC00UL) >> 10; |
| 115 | if (nr >= 3) |
| 116 | return -EINVAL; |
| 117 | |
| 118 | return nr; |
Andy Lutomirski | 5cec93c | 2011-06-05 13:50:24 -0400 | [diff] [blame] | 119 | } |
| 120 | |
| 121 | void dotraplinkage do_emulate_vsyscall(struct pt_regs *regs, long error_code) |
| 122 | { |
Andy Lutomirski | 5cec93c | 2011-06-05 13:50:24 -0400 | [diff] [blame] | 123 | struct task_struct *tsk; |
| 124 | unsigned long caller; |
| 125 | int vsyscall_nr; |
| 126 | long ret; |
| 127 | |
Andy Lutomirski | 5cec93c | 2011-06-05 13:50:24 -0400 | [diff] [blame] | 128 | local_irq_enable(); |
| 129 | |
Andy Lutomirski | 318f5a2 | 2011-08-03 09:31:53 -0400 | [diff] [blame^] | 130 | if (!user_64bit_mode(regs)) { |
Andy Lutomirski | c971294 | 2011-07-13 09:24:09 -0400 | [diff] [blame] | 131 | /* |
| 132 | * If we trapped from kernel mode, we might as well OOPS now |
| 133 | * instead of returning to some random address and OOPSing |
| 134 | * then. |
| 135 | */ |
| 136 | BUG_ON(!user_mode(regs)); |
| 137 | |
| 138 | /* Compat mode and non-compat 32-bit CS should both segfault. */ |
| 139 | warn_bad_vsyscall(KERN_WARNING, regs, |
| 140 | "illegal int 0xcc from 32-bit mode"); |
| 141 | goto sigsegv; |
| 142 | } |
| 143 | |
| 144 | /* |
Andy Lutomirski | 5cec93c | 2011-06-05 13:50:24 -0400 | [diff] [blame] | 145 | * x86-ism here: regs->ip points to the instruction after the int 0xcc, |
| 146 | * and int 0xcc is two bytes long. |
| 147 | */ |
Andy Lutomirski | c971294 | 2011-07-13 09:24:09 -0400 | [diff] [blame] | 148 | vsyscall_nr = addr_to_vsyscall_nr(regs->ip - 2); |
| 149 | if (vsyscall_nr < 0) { |
| 150 | warn_bad_vsyscall(KERN_WARNING, regs, |
| 151 | "illegal int 0xcc (exploit attempt?)"); |
Andy Lutomirski | 5cec93c | 2011-06-05 13:50:24 -0400 | [diff] [blame] | 152 | goto sigsegv; |
| 153 | } |
Andy Lutomirski | 5cec93c | 2011-06-05 13:50:24 -0400 | [diff] [blame] | 154 | |
| 155 | if (get_user(caller, (unsigned long __user *)regs->sp) != 0) { |
| 156 | warn_bad_vsyscall(KERN_WARNING, regs, "int 0xcc with bad stack (exploit attempt?)"); |
| 157 | goto sigsegv; |
| 158 | } |
| 159 | |
| 160 | tsk = current; |
| 161 | if (seccomp_mode(&tsk->seccomp)) |
| 162 | do_exit(SIGKILL); |
| 163 | |
| 164 | switch (vsyscall_nr) { |
| 165 | case 0: |
Andy Lutomirski | 5cec93c | 2011-06-05 13:50:24 -0400 | [diff] [blame] | 166 | ret = sys_gettimeofday( |
| 167 | (struct timeval __user *)regs->di, |
| 168 | (struct timezone __user *)regs->si); |
| 169 | break; |
| 170 | |
| 171 | case 1: |
Andy Lutomirski | 5cec93c | 2011-06-05 13:50:24 -0400 | [diff] [blame] | 172 | ret = sys_time((time_t __user *)regs->di); |
| 173 | break; |
| 174 | |
| 175 | case 2: |
Andy Lutomirski | 5cec93c | 2011-06-05 13:50:24 -0400 | [diff] [blame] | 176 | ret = sys_getcpu((unsigned __user *)regs->di, |
| 177 | (unsigned __user *)regs->si, |
| 178 | 0); |
| 179 | break; |
Andy Lutomirski | 5cec93c | 2011-06-05 13:50:24 -0400 | [diff] [blame] | 180 | } |
| 181 | |
| 182 | if (ret == -EFAULT) { |
| 183 | /* |
| 184 | * Bad news -- userspace fed a bad pointer to a vsyscall. |
| 185 | * |
| 186 | * With a real vsyscall, that would have caused SIGSEGV. |
| 187 | * To make writing reliable exploits using the emulated |
| 188 | * vsyscalls harder, generate SIGSEGV here as well. |
| 189 | */ |
| 190 | warn_bad_vsyscall(KERN_INFO, regs, |
| 191 | "vsyscall fault (exploit attempt?)"); |
| 192 | goto sigsegv; |
| 193 | } |
| 194 | |
| 195 | regs->ax = ret; |
| 196 | |
| 197 | /* Emulate a ret instruction. */ |
| 198 | regs->ip = caller; |
| 199 | regs->sp += 8; |
| 200 | |
| 201 | local_irq_disable(); |
| 202 | return; |
| 203 | |
| 204 | sigsegv: |
| 205 | regs->ip -= 2; /* The faulting instruction should be the int 0xcc. */ |
| 206 | force_sig(SIGSEGV, current); |
Andy Lutomirski | c971294 | 2011-07-13 09:24:09 -0400 | [diff] [blame] | 207 | local_irq_disable(); |
Andy Lutomirski | 5cec93c | 2011-06-05 13:50:24 -0400 | [diff] [blame] | 208 | } |
| 209 | |
| 210 | /* |
| 211 | * Assume __initcall executes before all user space. Hopefully kmod |
| 212 | * doesn't violate that. We'll find out if it does. |
john stultz | 7460ed2 | 2007-02-16 01:28:21 -0800 | [diff] [blame] | 213 | */ |
Andi Kleen | 8c131af | 2006-11-14 16:57:46 +0100 | [diff] [blame] | 214 | static void __cpuinit vsyscall_set_cpu(int cpu) |
Vojtech Pavlik | c08c820 | 2006-09-26 10:52:28 +0200 | [diff] [blame] | 215 | { |
Jeremy Fitzhardinge | fc8b8a6 | 2008-06-25 00:19:01 -0400 | [diff] [blame] | 216 | unsigned long d; |
Vojtech Pavlik | c08c820 | 2006-09-26 10:52:28 +0200 | [diff] [blame] | 217 | unsigned long node = 0; |
| 218 | #ifdef CONFIG_NUMA |
Mike Travis | 98c9e27 | 2007-10-17 18:04:39 +0200 | [diff] [blame] | 219 | node = cpu_to_node(cpu); |
Vojtech Pavlik | c08c820 | 2006-09-26 10:52:28 +0200 | [diff] [blame] | 220 | #endif |
Mike Travis | 92cb761 | 2007-10-19 20:35:04 +0200 | [diff] [blame] | 221 | if (cpu_has(&cpu_data(cpu), X86_FEATURE_RDTSCP)) |
Andi Kleen | 8c131af | 2006-11-14 16:57:46 +0100 | [diff] [blame] | 222 | write_rdtscp_aux((node << 12) | cpu); |
Vojtech Pavlik | c08c820 | 2006-09-26 10:52:28 +0200 | [diff] [blame] | 223 | |
Andy Lutomirski | 5cec93c | 2011-06-05 13:50:24 -0400 | [diff] [blame] | 224 | /* |
| 225 | * Store cpu number in limit so that it can be loaded quickly |
| 226 | * in user space in vgetcpu. (12 bits for the CPU and 8 bits for the node) |
| 227 | */ |
Jeremy Fitzhardinge | fc8b8a6 | 2008-06-25 00:19:01 -0400 | [diff] [blame] | 228 | d = 0x0f40000000000ULL; |
| 229 | d |= cpu; |
| 230 | d |= (node & 0xf) << 12; |
| 231 | d |= (node >> 4) << 48; |
Andy Lutomirski | 5cec93c | 2011-06-05 13:50:24 -0400 | [diff] [blame] | 232 | |
Jeremy Fitzhardinge | fc8b8a6 | 2008-06-25 00:19:01 -0400 | [diff] [blame] | 233 | write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_PER_CPU, &d, DESCTYPE_S); |
Vojtech Pavlik | c08c820 | 2006-09-26 10:52:28 +0200 | [diff] [blame] | 234 | } |
| 235 | |
Andi Kleen | 8c131af | 2006-11-14 16:57:46 +0100 | [diff] [blame] | 236 | static void __cpuinit cpu_vsyscall_init(void *arg) |
| 237 | { |
| 238 | /* preemption should be already off */ |
| 239 | vsyscall_set_cpu(raw_smp_processor_id()); |
| 240 | } |
| 241 | |
| 242 | static int __cpuinit |
| 243 | cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg) |
| 244 | { |
| 245 | long cpu = (long)arg; |
Andy Lutomirski | 5cec93c | 2011-06-05 13:50:24 -0400 | [diff] [blame] | 246 | |
Rafael J. Wysocki | 8bb7844 | 2007-05-09 02:35:10 -0700 | [diff] [blame] | 247 | if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) |
Jens Axboe | 8691e5a | 2008-06-06 11:18:06 +0200 | [diff] [blame] | 248 | smp_call_function_single(cpu, cpu_vsyscall_init, NULL, 1); |
Andy Lutomirski | 5cec93c | 2011-06-05 13:50:24 -0400 | [diff] [blame] | 249 | |
Andi Kleen | 8c131af | 2006-11-14 16:57:46 +0100 | [diff] [blame] | 250 | return NOTIFY_DONE; |
| 251 | } |
| 252 | |
Ingo Molnar | e402644 | 2008-01-30 13:32:39 +0100 | [diff] [blame] | 253 | void __init map_vsyscall(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 254 | { |
| 255 | extern char __vsyscall_0; |
| 256 | unsigned long physaddr_page0 = __pa_symbol(&__vsyscall_0); |
Andy Lutomirski | 9fd67b4 | 2011-06-05 13:50:19 -0400 | [diff] [blame] | 257 | extern char __vvar_page; |
| 258 | unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 259 | |
Ernie Petrides | 103efcd | 2006-12-07 02:14:09 +0100 | [diff] [blame] | 260 | /* Note that VSYSCALL_MAPPED_PAGES must agree with the code below. */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 261 | __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_page0, PAGE_KERNEL_VSYSCALL); |
Andy Lutomirski | 9fd67b4 | 2011-06-05 13:50:19 -0400 | [diff] [blame] | 262 | __set_fixmap(VVAR_PAGE, physaddr_vvar_page, PAGE_KERNEL_VVAR); |
Andy Lutomirski | 5cec93c | 2011-06-05 13:50:24 -0400 | [diff] [blame] | 263 | BUILD_BUG_ON((unsigned long)__fix_to_virt(VVAR_PAGE) != (unsigned long)VVAR_ADDRESS); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 264 | } |
| 265 | |
| 266 | static int __init vsyscall_init(void) |
| 267 | { |
Andy Lutomirski | 5cec93c | 2011-06-05 13:50:24 -0400 | [diff] [blame] | 268 | BUG_ON(VSYSCALL_ADDR(0) != __fix_to_virt(VSYSCALL_FIRST_PAGE)); |
| 269 | |
Jens Axboe | 15c8b6c | 2008-05-09 09:39:44 +0200 | [diff] [blame] | 270 | on_each_cpu(cpu_vsyscall_init, NULL, 1); |
Sheng Yang | be43f83 | 2009-12-18 16:48:45 +0800 | [diff] [blame] | 271 | /* notifier priority > KVM */ |
| 272 | hotcpu_notifier(cpu_vsyscall_notifier, 30); |
Andy Lutomirski | 5cec93c | 2011-06-05 13:50:24 -0400 | [diff] [blame] | 273 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 274 | return 0; |
| 275 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 276 | __initcall(vsyscall_init); |