blob: 630036c06c751b39af6e378d98a14a02668c2d62 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/arch/x86_64/kernel/vsyscall.c
3 *
4 * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
5 * Copyright 2003 Andi Kleen, SuSE Labs.
6 *
7 * Thanks to hpa@transmeta.com for some useful hint.
8 * Special thanks to Ingo Molnar for his early experience with
9 * a different vsyscall implementation for Linux/IA32 and for the name.
10 *
11 * vsyscall 1 is located at -10Mbyte, vsyscall 2 is located
12 * at virtual address -10Mbyte+1024bytes etc... There are at max 4
13 * vsyscalls. One vsyscall can reserve more than 1 slot to avoid
14 * jumping out of line if necessary. We cannot add more with this
15 * mechanism because older kernels won't return -ENOSYS.
16 * If we want more than four we need a vDSO.
17 *
18 * Note: the concept clashes with user mode linux. If you use UML and
19 * want per guest time just set the kernel.vsyscall64 sysctl to 0.
20 */
21
22#include <linux/time.h>
23#include <linux/init.h>
24#include <linux/kernel.h>
25#include <linux/timer.h>
26#include <linux/seqlock.h>
27#include <linux/jiffies.h>
28#include <linux/sysctl.h>
Vojtech Pavlikc08c8202006-09-26 10:52:28 +020029#include <linux/getcpu.h>
Andi Kleen8c131af2006-11-14 16:57:46 +010030#include <linux/cpu.h>
31#include <linux/smp.h>
32#include <linux/notifier.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
34#include <asm/vsyscall.h>
35#include <asm/pgtable.h>
36#include <asm/page.h>
37#include <asm/fixmap.h>
38#include <asm/errno.h>
39#include <asm/io.h>
Vojtech Pavlikc08c8202006-09-26 10:52:28 +020040#include <asm/segment.h>
41#include <asm/desc.h>
42#include <asm/topology.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
44#define __vsyscall(nr) __attribute__ ((unused,__section__(".vsyscall_" #nr)))
Arnd Bergmannf5738ce2006-12-06 20:37:29 -080045#define __syscall_clobber "r11","rcx","memory"
Linus Torvalds1da177e2005-04-16 15:20:36 -070046
47int __sysctl_vsyscall __section_sysctl_vsyscall = 1;
48seqlock_t __xtime_lock __section_xtime_lock = SEQLOCK_UNLOCKED;
Vojtech Pavlikc08c8202006-09-26 10:52:28 +020049int __vgetcpu_mode __section_vgetcpu_mode;
Linus Torvalds1da177e2005-04-16 15:20:36 -070050
51#include <asm/unistd.h>
52
Andi Kleen2c8bc942006-01-11 22:45:30 +010053static __always_inline void timeval_normalize(struct timeval * tv)
Linus Torvalds1da177e2005-04-16 15:20:36 -070054{
55 time_t __sec;
56
57 __sec = tv->tv_usec / 1000000;
58 if (__sec) {
59 tv->tv_usec %= 1000000;
60 tv->tv_sec += __sec;
61 }
62}
63
Andi Kleen2c8bc942006-01-11 22:45:30 +010064static __always_inline void do_vgettimeofday(struct timeval * tv)
Linus Torvalds1da177e2005-04-16 15:20:36 -070065{
66 long sequence, t;
67 unsigned long sec, usec;
68
69 do {
70 sequence = read_seqbegin(&__xtime_lock);
71
72 sec = __xtime.tv_sec;
Atsushi Nemoto8ef38602006-09-30 23:28:31 -070073 usec = __xtime.tv_nsec / 1000;
Linus Torvalds1da177e2005-04-16 15:20:36 -070074
Andi Kleen312df5f2005-05-16 21:53:28 -070075 if (__vxtime.mode != VXTIME_HPET) {
Andi Kleenc818a182006-01-11 22:45:24 +010076 t = get_cycles_sync();
Linus Torvalds1da177e2005-04-16 15:20:36 -070077 if (t < __vxtime.last_tsc)
78 t = __vxtime.last_tsc;
79 usec += ((t - __vxtime.last_tsc) *
80 __vxtime.tsc_quot) >> 32;
81 /* See comment in x86_64 do_gettimeofday. */
82 } else {
Andi Kleen131cfd72006-09-26 10:52:33 +020083 usec += ((readl((void __iomem *)
84 fix_to_virt(VSYSCALL_HPET) + 0xf0) -
Linus Torvalds1da177e2005-04-16 15:20:36 -070085 __vxtime.last) * __vxtime.quot) >> 32;
86 }
87 } while (read_seqretry(&__xtime_lock, sequence));
88
89 tv->tv_sec = sec + usec / 1000000;
90 tv->tv_usec = usec % 1000000;
91}
92
93/* RED-PEN may want to readd seq locking, but then the variable should be write-once. */
Andi Kleen2c8bc942006-01-11 22:45:30 +010094static __always_inline void do_get_tz(struct timezone * tz)
Linus Torvalds1da177e2005-04-16 15:20:36 -070095{
96 *tz = __sys_tz;
97}
98
Andi Kleen2c8bc942006-01-11 22:45:30 +010099static __always_inline int gettimeofday(struct timeval *tv, struct timezone *tz)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100{
101 int ret;
102 asm volatile("vsysc2: syscall"
103 : "=a" (ret)
104 : "0" (__NR_gettimeofday),"D" (tv),"S" (tz) : __syscall_clobber );
105 return ret;
106}
107
Andi Kleen2c8bc942006-01-11 22:45:30 +0100108static __always_inline long time_syscall(long *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109{
110 long secs;
111 asm volatile("vsysc1: syscall"
112 : "=a" (secs)
113 : "0" (__NR_time),"D" (t) : __syscall_clobber);
114 return secs;
115}
116
Andi Kleen2e8ad432005-09-12 18:49:24 +0200117int __vsyscall(0) vgettimeofday(struct timeval * tv, struct timezone * tz)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118{
Ingo Molnar14118c32006-06-26 13:56:58 +0200119 if (!__sysctl_vsyscall)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120 return gettimeofday(tv,tz);
121 if (tv)
122 do_vgettimeofday(tv);
123 if (tz)
124 do_get_tz(tz);
125 return 0;
126}
127
128/* This will break when the xtime seconds get inaccurate, but that is
129 * unlikely */
Andi Kleen2e8ad432005-09-12 18:49:24 +0200130time_t __vsyscall(1) vtime(time_t *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131{
Ingo Molnar14118c32006-06-26 13:56:58 +0200132 if (!__sysctl_vsyscall)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133 return time_syscall(t);
134 else if (t)
135 *t = __xtime.tv_sec;
136 return __xtime.tv_sec;
137}
138
Vojtech Pavlikc08c8202006-09-26 10:52:28 +0200139/* Fast way to get current CPU and node.
140 This helps to do per node and per CPU caches in user space.
141 The result is not guaranteed without CPU affinity, but usually
142 works out because the scheduler tries to keep a thread on the same
143 CPU.
144
145 tcache must point to a two element sized long array.
146 All arguments can be NULL. */
147long __vsyscall(2)
148vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149{
Vojtech Pavlikc08c8202006-09-26 10:52:28 +0200150 unsigned int dummy, p;
151 unsigned long j = 0;
152
153 /* Fast cache - only recompute value once per jiffies and avoid
154 relatively costly rdtscp/cpuid otherwise.
155 This works because the scheduler usually keeps the process
156 on the same CPU and this syscall doesn't guarantee its
157 results anyways.
158 We do this here because otherwise user space would do it on
159 its own in a likely inferior way (no access to jiffies).
160 If you don't like it pass NULL. */
Andi Kleen34596dc2006-09-30 01:47:55 +0200161 if (tcache && tcache->blob[0] == (j = __jiffies)) {
162 p = tcache->blob[1];
Vojtech Pavlikc08c8202006-09-26 10:52:28 +0200163 } else if (__vgetcpu_mode == VGETCPU_RDTSCP) {
164 /* Load per CPU data from RDTSCP */
165 rdtscp(dummy, dummy, p);
166 } else {
167 /* Load per CPU data from GDT */
168 asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
169 }
170 if (tcache) {
Andi Kleen34596dc2006-09-30 01:47:55 +0200171 tcache->blob[0] = j;
172 tcache->blob[1] = p;
Vojtech Pavlikc08c8202006-09-26 10:52:28 +0200173 }
174 if (cpu)
175 *cpu = p & 0xfff;
176 if (node)
177 *node = p >> 12;
178 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179}
180
Andi Kleen2e8ad432005-09-12 18:49:24 +0200181long __vsyscall(3) venosys_1(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182{
183 return -ENOSYS;
184}
185
186#ifdef CONFIG_SYSCTL
187
188#define SYSCALL 0x050f
189#define NOP2 0x9090
190
191/*
192 * NOP out syscall in vsyscall page when not needed.
193 */
194static int vsyscall_sysctl_change(ctl_table *ctl, int write, struct file * filp,
195 void __user *buffer, size_t *lenp, loff_t *ppos)
196{
197 extern u16 vsysc1, vsysc2;
Andi Kleen131cfd72006-09-26 10:52:33 +0200198 u16 __iomem *map1;
199 u16 __iomem *map2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200 int ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
201 if (!write)
202 return ret;
203 /* gcc has some trouble with __va(__pa()), so just do it this
204 way. */
205 map1 = ioremap(__pa_symbol(&vsysc1), 2);
206 if (!map1)
207 return -ENOMEM;
208 map2 = ioremap(__pa_symbol(&vsysc2), 2);
209 if (!map2) {
210 ret = -ENOMEM;
211 goto out;
212 }
213 if (!sysctl_vsyscall) {
Andi Kleen131cfd72006-09-26 10:52:33 +0200214 writew(SYSCALL, map1);
215 writew(SYSCALL, map2);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216 } else {
Andi Kleen131cfd72006-09-26 10:52:33 +0200217 writew(NOP2, map1);
218 writew(NOP2, map2);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219 }
220 iounmap(map2);
221out:
222 iounmap(map1);
223 return ret;
224}
225
226static int vsyscall_sysctl_nostrat(ctl_table *t, int __user *name, int nlen,
227 void __user *oldval, size_t __user *oldlenp,
228 void __user *newval, size_t newlen,
229 void **context)
230{
231 return -ENOSYS;
232}
233
234static ctl_table kernel_table2[] = {
235 { .ctl_name = 99, .procname = "vsyscall64",
236 .data = &sysctl_vsyscall, .maxlen = sizeof(int), .mode = 0644,
237 .strategy = vsyscall_sysctl_nostrat,
238 .proc_handler = vsyscall_sysctl_change },
239 { 0, }
240};
241
242static ctl_table kernel_root_table2[] = {
243 { .ctl_name = CTL_KERN, .procname = "kernel", .mode = 0555,
244 .child = kernel_table2 },
245 { 0 },
246};
247
248#endif
249
Andi Kleen8c131af2006-11-14 16:57:46 +0100250/* Assume __initcall executes before all user space. Hopefully kmod
251 doesn't violate that. We'll find out if it does. */
252static void __cpuinit vsyscall_set_cpu(int cpu)
Vojtech Pavlikc08c8202006-09-26 10:52:28 +0200253{
254 unsigned long *d;
255 unsigned long node = 0;
256#ifdef CONFIG_NUMA
257 node = cpu_to_node[cpu];
258#endif
Andi Kleen8c131af2006-11-14 16:57:46 +0100259 if (cpu_has(&cpu_data[cpu], X86_FEATURE_RDTSCP))
260 write_rdtscp_aux((node << 12) | cpu);
Vojtech Pavlikc08c8202006-09-26 10:52:28 +0200261
262 /* Store cpu number in limit so that it can be loaded quickly
263 in user space in vgetcpu.
264 12 bits for the CPU and 8 bits for the node. */
265 d = (unsigned long *)(cpu_gdt(cpu) + GDT_ENTRY_PER_CPU);
266 *d = 0x0f40000000000ULL;
267 *d |= cpu;
268 *d |= (node & 0xf) << 12;
269 *d |= (node >> 4) << 48;
270}
271
Andi Kleen8c131af2006-11-14 16:57:46 +0100272static void __cpuinit cpu_vsyscall_init(void *arg)
273{
274 /* preemption should be already off */
275 vsyscall_set_cpu(raw_smp_processor_id());
276}
277
Andi Kleen6b3d1a92006-11-16 10:22:03 +0100278#ifdef CONFIG_HOTPLUG_CPU
Andi Kleen8c131af2006-11-14 16:57:46 +0100279static int __cpuinit
280cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg)
281{
282 long cpu = (long)arg;
283 if (action == CPU_ONLINE)
284 smp_call_function_single(cpu, cpu_vsyscall_init, NULL, 0, 1);
285 return NOTIFY_DONE;
286}
Andi Kleen6b3d1a92006-11-16 10:22:03 +0100287#endif
Andi Kleen8c131af2006-11-14 16:57:46 +0100288
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289static void __init map_vsyscall(void)
290{
291 extern char __vsyscall_0;
292 unsigned long physaddr_page0 = __pa_symbol(&__vsyscall_0);
293
294 __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_page0, PAGE_KERNEL_VSYSCALL);
295}
296
297static int __init vsyscall_init(void)
298{
299 BUG_ON(((unsigned long) &vgettimeofday !=
300 VSYSCALL_ADDR(__NR_vgettimeofday)));
301 BUG_ON((unsigned long) &vtime != VSYSCALL_ADDR(__NR_vtime));
302 BUG_ON((VSYSCALL_ADDR(0) != __fix_to_virt(VSYSCALL_FIRST_PAGE)));
Vojtech Pavlikc08c8202006-09-26 10:52:28 +0200303 BUG_ON((unsigned long) &vgetcpu != VSYSCALL_ADDR(__NR_vgetcpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304 map_vsyscall();
Andi Kleenf3c5f5e2005-05-16 21:53:33 -0700305#ifdef CONFIG_SYSCTL
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306 register_sysctl_table(kernel_root_table2, 0);
Andi Kleenf3c5f5e2005-05-16 21:53:33 -0700307#endif
Andi Kleen8c131af2006-11-14 16:57:46 +0100308 on_each_cpu(cpu_vsyscall_init, NULL, 0, 1);
309 hotcpu_notifier(cpu_vsyscall_notifier, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310 return 0;
311}
312
313__initcall(vsyscall_init);