blob: 44153afc9067558cef387ba3237ffcf439ca3800 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
3 * Copyright 2003 Andi Kleen, SuSE Labs.
4 *
5 * Thanks to hpa@transmeta.com for some useful hint.
6 * Special thanks to Ingo Molnar for his early experience with
7 * a different vsyscall implementation for Linux/IA32 and for the name.
8 *
9 * vsyscall 1 is located at -10Mbyte, vsyscall 2 is located
10 * at virtual address -10Mbyte+1024bytes etc... There are at max 4
11 * vsyscalls. One vsyscall can reserve more than 1 slot to avoid
12 * jumping out of line if necessary. We cannot add more with this
13 * mechanism because older kernels won't return -ENOSYS.
14 * If we want more than four we need a vDSO.
15 *
16 * Note: the concept clashes with user mode linux. If you use UML and
17 * want per guest time just set the kernel.vsyscall64 sysctl to 0.
18 */
19
Ingo Molnar2b7d0392008-11-12 13:17:38 +010020/* Disable profiling for userspace code: */
Steven Rostedt2ed84ee2008-11-12 15:24:24 -050021#define DISABLE_BRANCH_PROFILING
Steven Rostedt1f0d69a2008-11-12 00:14:39 -050022
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <linux/time.h>
24#include <linux/init.h>
25#include <linux/kernel.h>
26#include <linux/timer.h>
27#include <linux/seqlock.h>
28#include <linux/jiffies.h>
29#include <linux/sysctl.h>
john stultz7460ed22007-02-16 01:28:21 -080030#include <linux/clocksource.h>
Vojtech Pavlikc08c8202006-09-26 10:52:28 +020031#include <linux/getcpu.h>
Andi Kleen8c131af2006-11-14 16:57:46 +010032#include <linux/cpu.h>
33#include <linux/smp.h>
34#include <linux/notifier.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035
36#include <asm/vsyscall.h>
37#include <asm/pgtable.h>
38#include <asm/page.h>
john stultz7460ed22007-02-16 01:28:21 -080039#include <asm/unistd.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070040#include <asm/fixmap.h>
41#include <asm/errno.h>
42#include <asm/io.h>
Vojtech Pavlikc08c8202006-09-26 10:52:28 +020043#include <asm/segment.h>
44#include <asm/desc.h>
45#include <asm/topology.h>
Andi Kleen2aae9502007-07-21 17:10:01 +020046#include <asm/vgtod.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
Steven Rostedt23adec52008-05-12 21:20:41 +020048#define __vsyscall(nr) \
49 __attribute__ ((unused, __section__(".vsyscall_" #nr))) notrace
H. Peter Anvin65ea5b02008-01-30 13:30:56 +010050#define __syscall_clobber "r11","cx","memory"
Linus Torvalds1da177e2005-04-16 15:20:36 -070051
Eric Dumazetc8118c62007-05-02 19:27:11 +020052/*
53 * vsyscall_gtod_data contains data that is :
54 * - readonly from vsyscalls
Simon Arlott676b1852007-10-20 01:25:36 +020055 * - written by timer interrupt or systcl (/proc/sys/kernel/vsyscall64)
Eric Dumazetc8118c62007-05-02 19:27:11 +020056 * Try to keep this structure as small as possible to avoid cache line ping pongs
57 */
Vojtech Pavlikc08c8202006-09-26 10:52:28 +020058int __vgetcpu_mode __section_vgetcpu_mode;
Linus Torvalds1da177e2005-04-16 15:20:36 -070059
Andi Kleen2aae9502007-07-21 17:10:01 +020060struct vsyscall_gtod_data __vsyscall_gtod_data __section_vsyscall_gtod_data =
Linus Torvalds1da177e2005-04-16 15:20:36 -070061{
john stultz7460ed22007-02-16 01:28:21 -080062 .lock = SEQLOCK_UNLOCKED,
63 .sysctl_enabled = 1,
64};
Linus Torvalds1da177e2005-04-16 15:20:36 -070065
Tony Breeds2c622142007-10-18 03:04:57 -070066void update_vsyscall_tz(void)
67{
68 unsigned long flags;
69
70 write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
71 /* sys_tz has changed */
72 vsyscall_gtod_data.sys_tz = sys_tz;
73 write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags);
74}
75
john stultz7460ed22007-02-16 01:28:21 -080076void update_vsyscall(struct timespec *wall_time, struct clocksource *clock)
77{
78 unsigned long flags;
79
80 write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
81 /* copy vsyscall data */
Eric Dumazetc8118c62007-05-02 19:27:11 +020082 vsyscall_gtod_data.clock.vread = clock->vread;
83 vsyscall_gtod_data.clock.cycle_last = clock->cycle_last;
84 vsyscall_gtod_data.clock.mask = clock->mask;
85 vsyscall_gtod_data.clock.mult = clock->mult;
86 vsyscall_gtod_data.clock.shift = clock->shift;
87 vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec;
88 vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec;
Andi Kleen2aae9502007-07-21 17:10:01 +020089 vsyscall_gtod_data.wall_to_monotonic = wall_to_monotonic;
john stultz7460ed22007-02-16 01:28:21 -080090 write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070091}
92
john stultz7460ed22007-02-16 01:28:21 -080093/* RED-PEN may want to readd seq locking, but then the variable should be
94 * write-once.
95 */
Andi Kleen2c8bc942006-01-11 22:45:30 +010096static __always_inline void do_get_tz(struct timezone * tz)
Linus Torvalds1da177e2005-04-16 15:20:36 -070097{
john stultz7460ed22007-02-16 01:28:21 -080098 *tz = __vsyscall_gtod_data.sys_tz;
Linus Torvalds1da177e2005-04-16 15:20:36 -070099}
100
Andi Kleen2c8bc942006-01-11 22:45:30 +0100101static __always_inline int gettimeofday(struct timeval *tv, struct timezone *tz)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102{
103 int ret;
Thomas Gleixnerce28b982008-02-20 23:57:30 +0100104 asm volatile("syscall"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105 : "=a" (ret)
john stultz7460ed22007-02-16 01:28:21 -0800106 : "0" (__NR_gettimeofday),"D" (tv),"S" (tz)
107 : __syscall_clobber );
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108 return ret;
109}
110
Andi Kleen2c8bc942006-01-11 22:45:30 +0100111static __always_inline long time_syscall(long *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112{
113 long secs;
Thomas Gleixnerce28b982008-02-20 23:57:30 +0100114 asm volatile("syscall"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115 : "=a" (secs)
116 : "0" (__NR_time),"D" (t) : __syscall_clobber);
117 return secs;
118}
119
john stultz7460ed22007-02-16 01:28:21 -0800120static __always_inline void do_vgettimeofday(struct timeval * tv)
121{
122 cycle_t now, base, mask, cycle_delta;
Eric Dumazetc8118c62007-05-02 19:27:11 +0200123 unsigned seq;
124 unsigned long mult, shift, nsec;
john stultz7460ed22007-02-16 01:28:21 -0800125 cycle_t (*vread)(void);
126 do {
127 seq = read_seqbegin(&__vsyscall_gtod_data.lock);
128
129 vread = __vsyscall_gtod_data.clock.vread;
130 if (unlikely(!__vsyscall_gtod_data.sysctl_enabled || !vread)) {
Al Viro89952d12007-03-14 09:17:59 +0000131 gettimeofday(tv,NULL);
john stultz7460ed22007-02-16 01:28:21 -0800132 return;
133 }
Ingo Molnarcb9e35d2008-11-08 20:27:00 +0100134
135 /*
136 * Surround the RDTSC by barriers, to make sure it's not
137 * speculated to outside the seqlock critical section and
138 * does not cause time warps:
139 */
140 rdtsc_barrier();
john stultz7460ed22007-02-16 01:28:21 -0800141 now = vread();
Ingo Molnarcb9e35d2008-11-08 20:27:00 +0100142 rdtsc_barrier();
143
john stultz7460ed22007-02-16 01:28:21 -0800144 base = __vsyscall_gtod_data.clock.cycle_last;
145 mask = __vsyscall_gtod_data.clock.mask;
146 mult = __vsyscall_gtod_data.clock.mult;
147 shift = __vsyscall_gtod_data.clock.shift;
148
Eric Dumazetc8118c62007-05-02 19:27:11 +0200149 tv->tv_sec = __vsyscall_gtod_data.wall_time_sec;
150 nsec = __vsyscall_gtod_data.wall_time_nsec;
john stultz7460ed22007-02-16 01:28:21 -0800151 } while (read_seqretry(&__vsyscall_gtod_data.lock, seq));
152
153 /* calculate interval: */
154 cycle_delta = (now - base) & mask;
155 /* convert to nsecs: */
Eric Dumazetc8118c62007-05-02 19:27:11 +0200156 nsec += (cycle_delta * mult) >> shift;
john stultz7460ed22007-02-16 01:28:21 -0800157
Eric Dumazetc8118c62007-05-02 19:27:11 +0200158 while (nsec >= NSEC_PER_SEC) {
john stultz7460ed22007-02-16 01:28:21 -0800159 tv->tv_sec += 1;
Eric Dumazetc8118c62007-05-02 19:27:11 +0200160 nsec -= NSEC_PER_SEC;
john stultz7460ed22007-02-16 01:28:21 -0800161 }
Eric Dumazetc8118c62007-05-02 19:27:11 +0200162 tv->tv_usec = nsec / NSEC_PER_USEC;
john stultz7460ed22007-02-16 01:28:21 -0800163}
164
Andi Kleen2e8ad432005-09-12 18:49:24 +0200165int __vsyscall(0) vgettimeofday(struct timeval * tv, struct timezone * tz)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167 if (tv)
168 do_vgettimeofday(tv);
169 if (tz)
170 do_get_tz(tz);
171 return 0;
172}
173
174/* This will break when the xtime seconds get inaccurate, but that is
175 * unlikely */
Andi Kleen2e8ad432005-09-12 18:49:24 +0200176time_t __vsyscall(1) vtime(time_t *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177{
john stultzd0aff6e2007-05-21 14:31:52 +0200178 struct timeval tv;
Eric Dumazet272a3712007-05-02 19:27:11 +0200179 time_t result;
john stultz7460ed22007-02-16 01:28:21 -0800180 if (unlikely(!__vsyscall_gtod_data.sysctl_enabled))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181 return time_syscall(t);
john stultzd0aff6e2007-05-21 14:31:52 +0200182
Stephen Hemmingerc80544d2007-10-18 03:07:05 -0700183 vgettimeofday(&tv, NULL);
john stultzd0aff6e2007-05-21 14:31:52 +0200184 result = tv.tv_sec;
Eric Dumazet272a3712007-05-02 19:27:11 +0200185 if (t)
186 *t = result;
187 return result;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188}
189
Vojtech Pavlikc08c8202006-09-26 10:52:28 +0200190/* Fast way to get current CPU and node.
191 This helps to do per node and per CPU caches in user space.
192 The result is not guaranteed without CPU affinity, but usually
193 works out because the scheduler tries to keep a thread on the same
194 CPU.
195
196 tcache must point to a two element sized long array.
197 All arguments can be NULL. */
198long __vsyscall(2)
199vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200{
Glauber de Oliveira Costa8f12dea2008-01-30 13:31:06 +0100201 unsigned int p;
Vojtech Pavlikc08c8202006-09-26 10:52:28 +0200202 unsigned long j = 0;
203
204 /* Fast cache - only recompute value once per jiffies and avoid
205 relatively costly rdtscp/cpuid otherwise.
206 This works because the scheduler usually keeps the process
207 on the same CPU and this syscall doesn't guarantee its
208 results anyways.
209 We do this here because otherwise user space would do it on
210 its own in a likely inferior way (no access to jiffies).
211 If you don't like it pass NULL. */
Andi Kleen34596dc2006-09-30 01:47:55 +0200212 if (tcache && tcache->blob[0] == (j = __jiffies)) {
213 p = tcache->blob[1];
Vojtech Pavlikc08c8202006-09-26 10:52:28 +0200214 } else if (__vgetcpu_mode == VGETCPU_RDTSCP) {
215 /* Load per CPU data from RDTSCP */
Glauber de Oliveira Costa8f12dea2008-01-30 13:31:06 +0100216 native_read_tscp(&p);
Vojtech Pavlikc08c8202006-09-26 10:52:28 +0200217 } else {
218 /* Load per CPU data from GDT */
219 asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
220 }
221 if (tcache) {
Andi Kleen34596dc2006-09-30 01:47:55 +0200222 tcache->blob[0] = j;
223 tcache->blob[1] = p;
Vojtech Pavlikc08c8202006-09-26 10:52:28 +0200224 }
225 if (cpu)
226 *cpu = p & 0xfff;
227 if (node)
228 *node = p >> 12;
229 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230}
231
Ingo Molnara4928cf2008-04-23 13:20:56 +0200232static long __vsyscall(3) venosys_1(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233{
234 return -ENOSYS;
235}
236
237#ifdef CONFIG_SYSCTL
Thomas Gleixnerd67bbac2008-02-27 09:39:52 +0100238
239static int
240vsyscall_sysctl_change(ctl_table *ctl, int write, struct file * filp,
241 void __user *buffer, size_t *lenp, loff_t *ppos)
242{
243 return proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
244}
245
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246static ctl_table kernel_table2[] = {
Eric W. Biederman282a8212007-10-18 03:05:27 -0700247 { .procname = "vsyscall64",
john stultz7460ed22007-02-16 01:28:21 -0800248 .data = &vsyscall_gtod_data.sysctl_enabled, .maxlen = sizeof(int),
Thomas Gleixnerd67bbac2008-02-27 09:39:52 +0100249 .mode = 0644,
250 .proc_handler = vsyscall_sysctl_change },
Eric W. Biederman7a44d372007-02-14 00:33:50 -0800251 {}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252};
253
254static ctl_table kernel_root_table2[] = {
255 { .ctl_name = CTL_KERN, .procname = "kernel", .mode = 0555,
256 .child = kernel_table2 },
Eric W. Biederman7a44d372007-02-14 00:33:50 -0800257 {}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259#endif
260
Andi Kleen8c131af2006-11-14 16:57:46 +0100261/* Assume __initcall executes before all user space. Hopefully kmod
262 doesn't violate that. We'll find out if it does. */
263static void __cpuinit vsyscall_set_cpu(int cpu)
Vojtech Pavlikc08c8202006-09-26 10:52:28 +0200264{
Jeremy Fitzhardingefc8b8a62008-06-25 00:19:01 -0400265 unsigned long d;
Vojtech Pavlikc08c8202006-09-26 10:52:28 +0200266 unsigned long node = 0;
267#ifdef CONFIG_NUMA
Mike Travis98c9e272007-10-17 18:04:39 +0200268 node = cpu_to_node(cpu);
Vojtech Pavlikc08c8202006-09-26 10:52:28 +0200269#endif
Mike Travis92cb7612007-10-19 20:35:04 +0200270 if (cpu_has(&cpu_data(cpu), X86_FEATURE_RDTSCP))
Andi Kleen8c131af2006-11-14 16:57:46 +0100271 write_rdtscp_aux((node << 12) | cpu);
Vojtech Pavlikc08c8202006-09-26 10:52:28 +0200272
273 /* Store cpu number in limit so that it can be loaded quickly
274 in user space in vgetcpu.
275 12 bits for the CPU and 8 bits for the node. */
Jeremy Fitzhardingefc8b8a62008-06-25 00:19:01 -0400276 d = 0x0f40000000000ULL;
277 d |= cpu;
278 d |= (node & 0xf) << 12;
279 d |= (node >> 4) << 48;
280 write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_PER_CPU, &d, DESCTYPE_S);
Vojtech Pavlikc08c8202006-09-26 10:52:28 +0200281}
282
Andi Kleen8c131af2006-11-14 16:57:46 +0100283static void __cpuinit cpu_vsyscall_init(void *arg)
284{
285 /* preemption should be already off */
286 vsyscall_set_cpu(raw_smp_processor_id());
287}
288
289static int __cpuinit
290cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg)
291{
292 long cpu = (long)arg;
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -0700293 if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN)
Jens Axboe8691e5a2008-06-06 11:18:06 +0200294 smp_call_function_single(cpu, cpu_vsyscall_init, NULL, 1);
Andi Kleen8c131af2006-11-14 16:57:46 +0100295 return NOTIFY_DONE;
296}
297
Ingo Molnare4026442008-01-30 13:32:39 +0100298void __init map_vsyscall(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299{
300 extern char __vsyscall_0;
301 unsigned long physaddr_page0 = __pa_symbol(&__vsyscall_0);
302
Ernie Petrides103efcd2006-12-07 02:14:09 +0100303 /* Note that VSYSCALL_MAPPED_PAGES must agree with the code below. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304 __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_page0, PAGE_KERNEL_VSYSCALL);
305}
306
307static int __init vsyscall_init(void)
308{
309 BUG_ON(((unsigned long) &vgettimeofday !=
310 VSYSCALL_ADDR(__NR_vgettimeofday)));
311 BUG_ON((unsigned long) &vtime != VSYSCALL_ADDR(__NR_vtime));
312 BUG_ON((VSYSCALL_ADDR(0) != __fix_to_virt(VSYSCALL_FIRST_PAGE)));
Vojtech Pavlikc08c8202006-09-26 10:52:28 +0200313 BUG_ON((unsigned long) &vgetcpu != VSYSCALL_ADDR(__NR_vgetcpu));
Andi Kleenf3c5f5e2005-05-16 21:53:33 -0700314#ifdef CONFIG_SYSCTL
Eric W. Biederman0b4d4142007-02-14 00:34:09 -0800315 register_sysctl_table(kernel_root_table2);
Andi Kleenf3c5f5e2005-05-16 21:53:33 -0700316#endif
Jens Axboe15c8b6c2008-05-09 09:39:44 +0200317 on_each_cpu(cpu_vsyscall_init, NULL, 1);
Andi Kleen8c131af2006-11-14 16:57:46 +0100318 hotcpu_notifier(cpu_vsyscall_notifier, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319 return 0;
320}
321
322__initcall(vsyscall_init);