blob: a98b460af6a1f600ebc2153490ecd4706700d08e [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/arch/x86_64/kernel/vsyscall.c
3 *
4 * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
5 * Copyright 2003 Andi Kleen, SuSE Labs.
6 *
7 * Thanks to hpa@transmeta.com for some useful hint.
8 * Special thanks to Ingo Molnar for his early experience with
9 * a different vsyscall implementation for Linux/IA32 and for the name.
10 *
11 * vsyscall 1 is located at -10Mbyte, vsyscall 2 is located
12 * at virtual address -10Mbyte+1024bytes etc... There are at max 4
13 * vsyscalls. One vsyscall can reserve more than 1 slot to avoid
14 * jumping out of line if necessary. We cannot add more with this
15 * mechanism because older kernels won't return -ENOSYS.
16 * If we want more than four we need a vDSO.
17 *
18 * Note: the concept clashes with user mode linux. If you use UML and
19 * want per guest time just set the kernel.vsyscall64 sysctl to 0.
20 */
21
22#include <linux/time.h>
23#include <linux/init.h>
24#include <linux/kernel.h>
25#include <linux/timer.h>
26#include <linux/seqlock.h>
27#include <linux/jiffies.h>
28#include <linux/sysctl.h>
Vojtech Pavlikc08c8202006-09-26 10:52:28 +020029#include <linux/getcpu.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
31#include <asm/vsyscall.h>
32#include <asm/pgtable.h>
33#include <asm/page.h>
34#include <asm/fixmap.h>
35#include <asm/errno.h>
36#include <asm/io.h>
Vojtech Pavlikc08c8202006-09-26 10:52:28 +020037#include <asm/segment.h>
38#include <asm/desc.h>
39#include <asm/topology.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070040
41#define __vsyscall(nr) __attribute__ ((unused,__section__(".vsyscall_" #nr)))
Linus Torvalds1da177e2005-04-16 15:20:36 -070042
43int __sysctl_vsyscall __section_sysctl_vsyscall = 1;
44seqlock_t __xtime_lock __section_xtime_lock = SEQLOCK_UNLOCKED;
Vojtech Pavlikc08c8202006-09-26 10:52:28 +020045int __vgetcpu_mode __section_vgetcpu_mode;
Linus Torvalds1da177e2005-04-16 15:20:36 -070046
47#include <asm/unistd.h>
48
Andi Kleen2c8bc942006-01-11 22:45:30 +010049static __always_inline void timeval_normalize(struct timeval * tv)
Linus Torvalds1da177e2005-04-16 15:20:36 -070050{
51 time_t __sec;
52
53 __sec = tv->tv_usec / 1000000;
54 if (__sec) {
55 tv->tv_usec %= 1000000;
56 tv->tv_sec += __sec;
57 }
58}
59
Andi Kleen2c8bc942006-01-11 22:45:30 +010060static __always_inline void do_vgettimeofday(struct timeval * tv)
Linus Torvalds1da177e2005-04-16 15:20:36 -070061{
62 long sequence, t;
63 unsigned long sec, usec;
64
65 do {
66 sequence = read_seqbegin(&__xtime_lock);
67
68 sec = __xtime.tv_sec;
Atsushi Nemoto8ef38602006-09-30 23:28:31 -070069 usec = __xtime.tv_nsec / 1000;
Linus Torvalds1da177e2005-04-16 15:20:36 -070070
Andi Kleen312df5f2005-05-16 21:53:28 -070071 if (__vxtime.mode != VXTIME_HPET) {
Andi Kleenc818a182006-01-11 22:45:24 +010072 t = get_cycles_sync();
Linus Torvalds1da177e2005-04-16 15:20:36 -070073 if (t < __vxtime.last_tsc)
74 t = __vxtime.last_tsc;
75 usec += ((t - __vxtime.last_tsc) *
76 __vxtime.tsc_quot) >> 32;
77 /* See comment in x86_64 do_gettimeofday. */
78 } else {
Andi Kleen131cfd72006-09-26 10:52:33 +020079 usec += ((readl((void __iomem *)
80 fix_to_virt(VSYSCALL_HPET) + 0xf0) -
Linus Torvalds1da177e2005-04-16 15:20:36 -070081 __vxtime.last) * __vxtime.quot) >> 32;
82 }
83 } while (read_seqretry(&__xtime_lock, sequence));
84
85 tv->tv_sec = sec + usec / 1000000;
86 tv->tv_usec = usec % 1000000;
87}
88
89/* RED-PEN may want to readd seq locking, but then the variable should be write-once. */
Andi Kleen2c8bc942006-01-11 22:45:30 +010090static __always_inline void do_get_tz(struct timezone * tz)
Linus Torvalds1da177e2005-04-16 15:20:36 -070091{
92 *tz = __sys_tz;
93}
94
Andi Kleen2c8bc942006-01-11 22:45:30 +010095static __always_inline int gettimeofday(struct timeval *tv, struct timezone *tz)
Linus Torvalds1da177e2005-04-16 15:20:36 -070096{
97 int ret;
98 asm volatile("vsysc2: syscall"
99 : "=a" (ret)
100 : "0" (__NR_gettimeofday),"D" (tv),"S" (tz) : __syscall_clobber );
101 return ret;
102}
103
Andi Kleen2c8bc942006-01-11 22:45:30 +0100104static __always_inline long time_syscall(long *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105{
106 long secs;
107 asm volatile("vsysc1: syscall"
108 : "=a" (secs)
109 : "0" (__NR_time),"D" (t) : __syscall_clobber);
110 return secs;
111}
112
Andi Kleen2e8ad432005-09-12 18:49:24 +0200113int __vsyscall(0) vgettimeofday(struct timeval * tv, struct timezone * tz)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114{
Ingo Molnar14118c32006-06-26 13:56:58 +0200115 if (!__sysctl_vsyscall)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116 return gettimeofday(tv,tz);
117 if (tv)
118 do_vgettimeofday(tv);
119 if (tz)
120 do_get_tz(tz);
121 return 0;
122}
123
124/* This will break when the xtime seconds get inaccurate, but that is
125 * unlikely */
Andi Kleen2e8ad432005-09-12 18:49:24 +0200126time_t __vsyscall(1) vtime(time_t *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127{
Ingo Molnar14118c32006-06-26 13:56:58 +0200128 if (!__sysctl_vsyscall)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129 return time_syscall(t);
130 else if (t)
131 *t = __xtime.tv_sec;
132 return __xtime.tv_sec;
133}
134
Vojtech Pavlikc08c8202006-09-26 10:52:28 +0200135/* Fast way to get current CPU and node.
136 This helps to do per node and per CPU caches in user space.
137 The result is not guaranteed without CPU affinity, but usually
138 works out because the scheduler tries to keep a thread on the same
139 CPU.
140
141 tcache must point to a two element sized long array.
142 All arguments can be NULL. */
143long __vsyscall(2)
144vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145{
Vojtech Pavlikc08c8202006-09-26 10:52:28 +0200146 unsigned int dummy, p;
147 unsigned long j = 0;
148
149 /* Fast cache - only recompute value once per jiffies and avoid
150 relatively costly rdtscp/cpuid otherwise.
151 This works because the scheduler usually keeps the process
152 on the same CPU and this syscall doesn't guarantee its
153 results anyways.
154 We do this here because otherwise user space would do it on
155 its own in a likely inferior way (no access to jiffies).
156 If you don't like it pass NULL. */
Andi Kleen34596dc2006-09-30 01:47:55 +0200157 if (tcache && tcache->blob[0] == (j = __jiffies)) {
158 p = tcache->blob[1];
Vojtech Pavlikc08c8202006-09-26 10:52:28 +0200159 } else if (__vgetcpu_mode == VGETCPU_RDTSCP) {
160 /* Load per CPU data from RDTSCP */
161 rdtscp(dummy, dummy, p);
162 } else {
163 /* Load per CPU data from GDT */
164 asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
165 }
166 if (tcache) {
Andi Kleen34596dc2006-09-30 01:47:55 +0200167 tcache->blob[0] = j;
168 tcache->blob[1] = p;
Vojtech Pavlikc08c8202006-09-26 10:52:28 +0200169 }
170 if (cpu)
171 *cpu = p & 0xfff;
172 if (node)
173 *node = p >> 12;
174 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175}
176
Andi Kleen2e8ad432005-09-12 18:49:24 +0200177long __vsyscall(3) venosys_1(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178{
179 return -ENOSYS;
180}
181
182#ifdef CONFIG_SYSCTL
183
184#define SYSCALL 0x050f
185#define NOP2 0x9090
186
187/*
188 * NOP out syscall in vsyscall page when not needed.
189 */
190static int vsyscall_sysctl_change(ctl_table *ctl, int write, struct file * filp,
191 void __user *buffer, size_t *lenp, loff_t *ppos)
192{
193 extern u16 vsysc1, vsysc2;
Andi Kleen131cfd72006-09-26 10:52:33 +0200194 u16 __iomem *map1;
195 u16 __iomem *map2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196 int ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
197 if (!write)
198 return ret;
199 /* gcc has some trouble with __va(__pa()), so just do it this
200 way. */
201 map1 = ioremap(__pa_symbol(&vsysc1), 2);
202 if (!map1)
203 return -ENOMEM;
204 map2 = ioremap(__pa_symbol(&vsysc2), 2);
205 if (!map2) {
206 ret = -ENOMEM;
207 goto out;
208 }
209 if (!sysctl_vsyscall) {
Andi Kleen131cfd72006-09-26 10:52:33 +0200210 writew(SYSCALL, map1);
211 writew(SYSCALL, map2);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 } else {
Andi Kleen131cfd72006-09-26 10:52:33 +0200213 writew(NOP2, map1);
214 writew(NOP2, map2);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215 }
216 iounmap(map2);
217out:
218 iounmap(map1);
219 return ret;
220}
221
222static int vsyscall_sysctl_nostrat(ctl_table *t, int __user *name, int nlen,
223 void __user *oldval, size_t __user *oldlenp,
224 void __user *newval, size_t newlen,
225 void **context)
226{
227 return -ENOSYS;
228}
229
230static ctl_table kernel_table2[] = {
231 { .ctl_name = 99, .procname = "vsyscall64",
232 .data = &sysctl_vsyscall, .maxlen = sizeof(int), .mode = 0644,
233 .strategy = vsyscall_sysctl_nostrat,
234 .proc_handler = vsyscall_sysctl_change },
235 { 0, }
236};
237
238static ctl_table kernel_root_table2[] = {
239 { .ctl_name = CTL_KERN, .procname = "kernel", .mode = 0555,
240 .child = kernel_table2 },
241 { 0 },
242};
243
244#endif
245
Vojtech Pavlikc08c8202006-09-26 10:52:28 +0200246static void __cpuinit write_rdtscp_cb(void *info)
247{
248 write_rdtscp_aux((unsigned long)info);
249}
250
251void __cpuinit vsyscall_set_cpu(int cpu)
252{
253 unsigned long *d;
254 unsigned long node = 0;
255#ifdef CONFIG_NUMA
256 node = cpu_to_node[cpu];
257#endif
258 if (cpu_has(&cpu_data[cpu], X86_FEATURE_RDTSCP)) {
259 void *info = (void *)((node << 12) | cpu);
260 /* Can happen on preemptive kernel */
261 if (get_cpu() == cpu)
262 write_rdtscp_cb(info);
263#ifdef CONFIG_SMP
264 else {
265 /* the notifier is unfortunately not executed on the
266 target CPU */
267 smp_call_function_single(cpu,write_rdtscp_cb,info,0,1);
268 }
269#endif
270 put_cpu();
271 }
272
273 /* Store cpu number in limit so that it can be loaded quickly
274 in user space in vgetcpu.
275 12 bits for the CPU and 8 bits for the node. */
276 d = (unsigned long *)(cpu_gdt(cpu) + GDT_ENTRY_PER_CPU);
277 *d = 0x0f40000000000ULL;
278 *d |= cpu;
279 *d |= (node & 0xf) << 12;
280 *d |= (node >> 4) << 48;
281}
282
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283static void __init map_vsyscall(void)
284{
285 extern char __vsyscall_0;
286 unsigned long physaddr_page0 = __pa_symbol(&__vsyscall_0);
287
288 __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_page0, PAGE_KERNEL_VSYSCALL);
289}
290
291static int __init vsyscall_init(void)
292{
293 BUG_ON(((unsigned long) &vgettimeofday !=
294 VSYSCALL_ADDR(__NR_vgettimeofday)));
295 BUG_ON((unsigned long) &vtime != VSYSCALL_ADDR(__NR_vtime));
296 BUG_ON((VSYSCALL_ADDR(0) != __fix_to_virt(VSYSCALL_FIRST_PAGE)));
Vojtech Pavlikc08c8202006-09-26 10:52:28 +0200297 BUG_ON((unsigned long) &vgetcpu != VSYSCALL_ADDR(__NR_vgetcpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298 map_vsyscall();
Andi Kleenf3c5f5e2005-05-16 21:53:33 -0700299#ifdef CONFIG_SYSCTL
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300 register_sysctl_table(kernel_root_table2, 0);
Andi Kleenf3c5f5e2005-05-16 21:53:33 -0700301#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302 return 0;
303}
304
305__initcall(vsyscall_init);