blob: d5c69860b524bc85f1e5eea1685466e42c6c2945 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
3 * Copyright 2003 Andi Kleen, SuSE Labs.
4 *
Andy Lutomirski5cec93c2011-06-05 13:50:24 -04005 * [ NOTE: this mechanism is now deprecated in favor of the vDSO. ]
6 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * Thanks to hpa@transmeta.com for some useful hint.
8 * Special thanks to Ingo Molnar for his early experience with
9 * a different vsyscall implementation for Linux/IA32 and for the name.
10 *
11 * vsyscall 1 is located at -10Mbyte, vsyscall 2 is located
12 * at virtual address -10Mbyte+1024bytes etc... There are at max 4
13 * vsyscalls. One vsyscall can reserve more than 1 slot to avoid
14 * jumping out of line if necessary. We cannot add more with this
15 * mechanism because older kernels won't return -ENOSYS.
Linus Torvalds1da177e2005-04-16 15:20:36 -070016 *
Andy Lutomirski5cec93c2011-06-05 13:50:24 -040017 * Note: the concept clashes with user mode linux. UML users should
18 * use the vDSO.
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 */
20
21#include <linux/time.h>
22#include <linux/init.h>
23#include <linux/kernel.h>
24#include <linux/timer.h>
25#include <linux/seqlock.h>
26#include <linux/jiffies.h>
27#include <linux/sysctl.h>
Paul Gortmaker29574022011-05-26 12:33:18 -040028#include <linux/topology.h>
john stultz7460ed22007-02-16 01:28:21 -080029#include <linux/clocksource.h>
Vojtech Pavlikc08c8202006-09-26 10:52:28 +020030#include <linux/getcpu.h>
Andi Kleen8c131af2006-11-14 16:57:46 +010031#include <linux/cpu.h>
32#include <linux/smp.h>
33#include <linux/notifier.h>
Andy Lutomirski5cec93c2011-06-05 13:50:24 -040034#include <linux/syscalls.h>
35#include <linux/ratelimit.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036
37#include <asm/vsyscall.h>
38#include <asm/pgtable.h>
Andy Lutomirskic9712942011-07-13 09:24:09 -040039#include <asm/compat.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070040#include <asm/page.h>
john stultz7460ed22007-02-16 01:28:21 -080041#include <asm/unistd.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070042#include <asm/fixmap.h>
43#include <asm/errno.h>
44#include <asm/io.h>
Vojtech Pavlikc08c8202006-09-26 10:52:28 +020045#include <asm/segment.h>
46#include <asm/desc.h>
47#include <asm/topology.h>
Andi Kleen2aae9502007-07-21 17:10:01 +020048#include <asm/vgtod.h>
Andy Lutomirski5cec93c2011-06-05 13:50:24 -040049#include <asm/traps.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070050
Andy Lutomirskic149a662011-08-03 09:31:54 -040051#define CREATE_TRACE_POINTS
52#include "vsyscall_trace.h"
53
Andy Lutomirski8c49d9a2011-05-23 09:31:24 -040054DEFINE_VVAR(int, vgetcpu_mode);
Thomas Gleixner2ab51652012-02-28 19:46:04 +000055DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -070056
Andy Lutomirski2e57ae02011-11-07 16:33:41 -080057static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
Andy Lutomirski3ae36652011-08-10 11:15:32 -040058
59static int __init vsyscall_setup(char *str)
60{
61 if (str) {
62 if (!strcmp("emulate", str))
63 vsyscall_mode = EMULATE;
64 else if (!strcmp("native", str))
65 vsyscall_mode = NATIVE;
66 else if (!strcmp("none", str))
67 vsyscall_mode = NONE;
68 else
69 return -EINVAL;
70
71 return 0;
72 }
73
74 return -EINVAL;
75}
76early_param("vsyscall", vsyscall_setup);
77
Tony Breeds2c622142007-10-18 03:04:57 -070078void update_vsyscall_tz(void)
79{
Tony Breeds2c622142007-10-18 03:04:57 -070080 vsyscall_gtod_data.sys_tz = sys_tz;
Tony Breeds2c622142007-10-18 03:04:57 -070081}
82
John Stultz76158562010-07-13 17:56:23 -070083void update_vsyscall(struct timespec *wall_time, struct timespec *wtm,
84 struct clocksource *clock, u32 mult)
john stultz7460ed22007-02-16 01:28:21 -080085{
Andy Lutomirski91ec87d2012-03-22 21:15:51 -070086 struct timespec monotonic;
Andy Lutomirski5cec93c2011-06-05 13:50:24 -040087
Thomas Gleixner68fe7b22012-03-24 09:29:22 +010088 write_seqcount_begin(&vsyscall_gtod_data.seq);
89
john stultz7460ed22007-02-16 01:28:21 -080090 /* copy vsyscall data */
Andy Lutomirski98d0ac32011-07-14 06:47:22 -040091 vsyscall_gtod_data.clock.vclock_mode = clock->archdata.vclock_mode;
Andy Lutomirski5cec93c2011-06-05 13:50:24 -040092 vsyscall_gtod_data.clock.cycle_last = clock->cycle_last;
93 vsyscall_gtod_data.clock.mask = clock->mask;
94 vsyscall_gtod_data.clock.mult = mult;
95 vsyscall_gtod_data.clock.shift = clock->shift;
Andy Lutomirski91ec87d2012-03-22 21:15:51 -070096
Andy Lutomirski5cec93c2011-06-05 13:50:24 -040097 vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec;
98 vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec;
Andy Lutomirski91ec87d2012-03-22 21:15:51 -070099
100 monotonic = timespec_add(*wall_time, *wtm);
101 vsyscall_gtod_data.monotonic_time_sec = monotonic.tv_sec;
102 vsyscall_gtod_data.monotonic_time_nsec = monotonic.tv_nsec;
103
Andy Lutomirski5cec93c2011-06-05 13:50:24 -0400104 vsyscall_gtod_data.wall_time_coarse = __current_kernel_time();
Andy Lutomirski91ec87d2012-03-22 21:15:51 -0700105 vsyscall_gtod_data.monotonic_time_coarse =
106 timespec_add(vsyscall_gtod_data.wall_time_coarse, *wtm);
Andy Lutomirski5cec93c2011-06-05 13:50:24 -0400107
Thomas Gleixner2ab51652012-02-28 19:46:04 +0000108 write_seqcount_end(&vsyscall_gtod_data.seq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109}
110
Andy Lutomirski5cec93c2011-06-05 13:50:24 -0400111static void warn_bad_vsyscall(const char *level, struct pt_regs *regs,
112 const char *message)
113{
114 static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST);
115 struct task_struct *tsk;
116
117 if (!show_unhandled_signals || !__ratelimit(&rs))
118 return;
119
120 tsk = current;
121
Andy Lutomirskic9712942011-07-13 09:24:09 -0400122 printk("%s%s[%d] %s ip:%lx cs:%lx sp:%lx ax:%lx si:%lx di:%lx\n",
Andy Lutomirski5cec93c2011-06-05 13:50:24 -0400123 level, tsk->comm, task_pid_nr(tsk),
Andy Lutomirski3ae36652011-08-10 11:15:32 -0400124 message, regs->ip, regs->cs,
Andy Lutomirskic9712942011-07-13 09:24:09 -0400125 regs->sp, regs->ax, regs->si, regs->di);
126}
127
128static int addr_to_vsyscall_nr(unsigned long addr)
129{
130 int nr;
131
132 if ((addr & ~0xC00UL) != VSYSCALL_START)
133 return -EINVAL;
134
135 nr = (addr & 0xC00UL) >> 10;
136 if (nr >= 3)
137 return -EINVAL;
138
139 return nr;
Andy Lutomirski5cec93c2011-06-05 13:50:24 -0400140}
141
Andy Lutomirski4fc34902011-11-07 16:33:40 -0800142static bool write_ok_or_segv(unsigned long ptr, size_t size)
143{
144 /*
145 * XXX: if access_ok, get_user, and put_user handled
146 * sig_on_uaccess_error, this could go away.
147 */
148
149 if (!access_ok(VERIFY_WRITE, (void __user *)ptr, size)) {
150 siginfo_t info;
151 struct thread_struct *thread = &current->thread;
152
153 thread->error_code = 6; /* user fault, no page, write */
154 thread->cr2 = ptr;
155 thread->trap_no = 14;
156
157 memset(&info, 0, sizeof(info));
158 info.si_signo = SIGSEGV;
159 info.si_errno = 0;
160 info.si_code = SEGV_MAPERR;
161 info.si_addr = (void __user *)ptr;
162
163 force_sig_info(SIGSEGV, &info, current);
164 return false;
165 } else {
166 return true;
167 }
168}
169
Andy Lutomirski3ae36652011-08-10 11:15:32 -0400170bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
Andy Lutomirski5cec93c2011-06-05 13:50:24 -0400171{
Andy Lutomirski5cec93c2011-06-05 13:50:24 -0400172 struct task_struct *tsk;
173 unsigned long caller;
174 int vsyscall_nr;
Andy Lutomirski4fc34902011-11-07 16:33:40 -0800175 int prev_sig_on_uaccess_error;
Andy Lutomirski5cec93c2011-06-05 13:50:24 -0400176 long ret;
177
Andy Lutomirski3ae36652011-08-10 11:15:32 -0400178 /*
179 * No point in checking CS -- the only way to get here is a user mode
180 * trap to a high address, which means that we're in 64-bit user code.
181 */
Andy Lutomirski5cec93c2011-06-05 13:50:24 -0400182
Andy Lutomirski3ae36652011-08-10 11:15:32 -0400183 WARN_ON_ONCE(address != regs->ip);
Andy Lutomirskic9712942011-07-13 09:24:09 -0400184
Andy Lutomirski3ae36652011-08-10 11:15:32 -0400185 if (vsyscall_mode == NONE) {
186 warn_bad_vsyscall(KERN_INFO, regs,
187 "vsyscall attempted with vsyscall=none");
188 return false;
Andy Lutomirskic9712942011-07-13 09:24:09 -0400189 }
190
Andy Lutomirski3ae36652011-08-10 11:15:32 -0400191 vsyscall_nr = addr_to_vsyscall_nr(address);
Andy Lutomirskic149a662011-08-03 09:31:54 -0400192
193 trace_emulate_vsyscall(vsyscall_nr);
194
Andy Lutomirskic9712942011-07-13 09:24:09 -0400195 if (vsyscall_nr < 0) {
196 warn_bad_vsyscall(KERN_WARNING, regs,
Andy Lutomirski3ae36652011-08-10 11:15:32 -0400197 "misaligned vsyscall (exploit attempt or buggy program) -- look up the vsyscall kernel parameter if you need a workaround");
Andy Lutomirski5cec93c2011-06-05 13:50:24 -0400198 goto sigsegv;
199 }
Andy Lutomirski5cec93c2011-06-05 13:50:24 -0400200
201 if (get_user(caller, (unsigned long __user *)regs->sp) != 0) {
Andy Lutomirski3ae36652011-08-10 11:15:32 -0400202 warn_bad_vsyscall(KERN_WARNING, regs,
203 "vsyscall with bad stack (exploit attempt?)");
Andy Lutomirski5cec93c2011-06-05 13:50:24 -0400204 goto sigsegv;
205 }
206
207 tsk = current;
208 if (seccomp_mode(&tsk->seccomp))
209 do_exit(SIGKILL);
210
Andy Lutomirski4fc34902011-11-07 16:33:40 -0800211 /*
212 * With a real vsyscall, page faults cause SIGSEGV. We want to
213 * preserve that behavior to make writing exploits harder.
214 */
215 prev_sig_on_uaccess_error = current_thread_info()->sig_on_uaccess_error;
216 current_thread_info()->sig_on_uaccess_error = 1;
217
218 /*
219 * 0 is a valid user pointer (in the access_ok sense) on 32-bit and
220 * 64-bit, so we don't need to special-case it here. For all the
221 * vsyscalls, 0 means "don't write anything" not "write it at
222 * address 0".
223 */
224 ret = -EFAULT;
Andy Lutomirski5cec93c2011-06-05 13:50:24 -0400225 switch (vsyscall_nr) {
226 case 0:
Andy Lutomirski4fc34902011-11-07 16:33:40 -0800227 if (!write_ok_or_segv(regs->di, sizeof(struct timeval)) ||
228 !write_ok_or_segv(regs->si, sizeof(struct timezone)))
229 break;
230
Andy Lutomirski5cec93c2011-06-05 13:50:24 -0400231 ret = sys_gettimeofday(
232 (struct timeval __user *)regs->di,
233 (struct timezone __user *)regs->si);
234 break;
235
236 case 1:
Andy Lutomirski4fc34902011-11-07 16:33:40 -0800237 if (!write_ok_or_segv(regs->di, sizeof(time_t)))
238 break;
239
Andy Lutomirski5cec93c2011-06-05 13:50:24 -0400240 ret = sys_time((time_t __user *)regs->di);
241 break;
242
243 case 2:
Andy Lutomirski4fc34902011-11-07 16:33:40 -0800244 if (!write_ok_or_segv(regs->di, sizeof(unsigned)) ||
245 !write_ok_or_segv(regs->si, sizeof(unsigned)))
246 break;
247
Andy Lutomirski5cec93c2011-06-05 13:50:24 -0400248 ret = sys_getcpu((unsigned __user *)regs->di,
249 (unsigned __user *)regs->si,
250 0);
251 break;
Andy Lutomirski5cec93c2011-06-05 13:50:24 -0400252 }
253
Andy Lutomirski4fc34902011-11-07 16:33:40 -0800254 current_thread_info()->sig_on_uaccess_error = prev_sig_on_uaccess_error;
255
Andy Lutomirski5cec93c2011-06-05 13:50:24 -0400256 if (ret == -EFAULT) {
Andy Lutomirski4fc34902011-11-07 16:33:40 -0800257 /* Bad news -- userspace fed a bad pointer to a vsyscall. */
Andy Lutomirski5cec93c2011-06-05 13:50:24 -0400258 warn_bad_vsyscall(KERN_INFO, regs,
259 "vsyscall fault (exploit attempt?)");
Andy Lutomirski4fc34902011-11-07 16:33:40 -0800260
261 /*
262 * If we failed to generate a signal for any reason,
263 * generate one here. (This should be impossible.)
264 */
265 if (WARN_ON_ONCE(!sigismember(&tsk->pending.signal, SIGBUS) &&
266 !sigismember(&tsk->pending.signal, SIGSEGV)))
267 goto sigsegv;
268
269 return true; /* Don't emulate the ret. */
Andy Lutomirski5cec93c2011-06-05 13:50:24 -0400270 }
271
272 regs->ax = ret;
273
274 /* Emulate a ret instruction. */
275 regs->ip = caller;
276 regs->sp += 8;
277
Andy Lutomirski3ae36652011-08-10 11:15:32 -0400278 return true;
Andy Lutomirski5cec93c2011-06-05 13:50:24 -0400279
280sigsegv:
Andy Lutomirski5cec93c2011-06-05 13:50:24 -0400281 force_sig(SIGSEGV, current);
Andy Lutomirski3ae36652011-08-10 11:15:32 -0400282 return true;
Andy Lutomirski5cec93c2011-06-05 13:50:24 -0400283}
284
285/*
286 * Assume __initcall executes before all user space. Hopefully kmod
287 * doesn't violate that. We'll find out if it does.
john stultz7460ed22007-02-16 01:28:21 -0800288 */
Andi Kleen8c131af2006-11-14 16:57:46 +0100289static void __cpuinit vsyscall_set_cpu(int cpu)
Vojtech Pavlikc08c8202006-09-26 10:52:28 +0200290{
Jeremy Fitzhardingefc8b8a62008-06-25 00:19:01 -0400291 unsigned long d;
Vojtech Pavlikc08c8202006-09-26 10:52:28 +0200292 unsigned long node = 0;
293#ifdef CONFIG_NUMA
Mike Travis98c9e272007-10-17 18:04:39 +0200294 node = cpu_to_node(cpu);
Vojtech Pavlikc08c8202006-09-26 10:52:28 +0200295#endif
Mike Travis92cb7612007-10-19 20:35:04 +0200296 if (cpu_has(&cpu_data(cpu), X86_FEATURE_RDTSCP))
Andi Kleen8c131af2006-11-14 16:57:46 +0100297 write_rdtscp_aux((node << 12) | cpu);
Vojtech Pavlikc08c8202006-09-26 10:52:28 +0200298
Andy Lutomirski5cec93c2011-06-05 13:50:24 -0400299 /*
300 * Store cpu number in limit so that it can be loaded quickly
301 * in user space in vgetcpu. (12 bits for the CPU and 8 bits for the node)
302 */
Jeremy Fitzhardingefc8b8a62008-06-25 00:19:01 -0400303 d = 0x0f40000000000ULL;
304 d |= cpu;
305 d |= (node & 0xf) << 12;
306 d |= (node >> 4) << 48;
Andy Lutomirski5cec93c2011-06-05 13:50:24 -0400307
Jeremy Fitzhardingefc8b8a62008-06-25 00:19:01 -0400308 write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_PER_CPU, &d, DESCTYPE_S);
Vojtech Pavlikc08c8202006-09-26 10:52:28 +0200309}
310
Andi Kleen8c131af2006-11-14 16:57:46 +0100311static void __cpuinit cpu_vsyscall_init(void *arg)
312{
313 /* preemption should be already off */
314 vsyscall_set_cpu(raw_smp_processor_id());
315}
316
317static int __cpuinit
318cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg)
319{
320 long cpu = (long)arg;
Andy Lutomirski5cec93c2011-06-05 13:50:24 -0400321
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -0700322 if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN)
Jens Axboe8691e5a2008-06-06 11:18:06 +0200323 smp_call_function_single(cpu, cpu_vsyscall_init, NULL, 1);
Andy Lutomirski5cec93c2011-06-05 13:50:24 -0400324
Andi Kleen8c131af2006-11-14 16:57:46 +0100325 return NOTIFY_DONE;
326}
327
Ingo Molnare4026442008-01-30 13:32:39 +0100328void __init map_vsyscall(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329{
Andy Lutomirski3ae36652011-08-10 11:15:32 -0400330 extern char __vsyscall_page;
331 unsigned long physaddr_vsyscall = __pa_symbol(&__vsyscall_page);
Andy Lutomirski9fd67b42011-06-05 13:50:19 -0400332 extern char __vvar_page;
333 unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334
Andy Lutomirski3ae36652011-08-10 11:15:32 -0400335 __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
336 vsyscall_mode == NATIVE
337 ? PAGE_KERNEL_VSYSCALL
338 : PAGE_KERNEL_VVAR);
339 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
340 (unsigned long)VSYSCALL_START);
341
Andy Lutomirski9fd67b42011-06-05 13:50:19 -0400342 __set_fixmap(VVAR_PAGE, physaddr_vvar_page, PAGE_KERNEL_VVAR);
Andy Lutomirski3ae36652011-08-10 11:15:32 -0400343 BUILD_BUG_ON((unsigned long)__fix_to_virt(VVAR_PAGE) !=
344 (unsigned long)VVAR_ADDRESS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345}
346
347static int __init vsyscall_init(void)
348{
Andy Lutomirski5cec93c2011-06-05 13:50:24 -0400349 BUG_ON(VSYSCALL_ADDR(0) != __fix_to_virt(VSYSCALL_FIRST_PAGE));
350
Jens Axboe15c8b6c2008-05-09 09:39:44 +0200351 on_each_cpu(cpu_vsyscall_init, NULL, 1);
Sheng Yangbe43f832009-12-18 16:48:45 +0800352 /* notifier priority > KVM */
353 hotcpu_notifier(cpu_vsyscall_notifier, 30);
Andy Lutomirski5cec93c2011-06-05 13:50:24 -0400354
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355 return 0;
356}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357__initcall(vsyscall_init);