blob: abab42e9f5f8718cfcda91702803e8d797f70544 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * arch/s390/kernel/time.c
3 * Time of day based timer functions.
4 *
5 * S390 version
6 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s): Hartmut Penner (hp@de.ibm.com),
8 * Martin Schwidefsky (schwidefsky@de.ibm.com),
9 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
10 *
11 * Derived from "arch/i386/kernel/time.c"
12 * Copyright (C) 1991, 1992, 1995 Linus Torvalds
13 */
14
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/errno.h>
16#include <linux/module.h>
17#include <linux/sched.h>
18#include <linux/kernel.h>
19#include <linux/param.h>
20#include <linux/string.h>
21#include <linux/mm.h>
22#include <linux/interrupt.h>
23#include <linux/time.h>
24#include <linux/delay.h>
25#include <linux/init.h>
26#include <linux/smp.h>
27#include <linux/types.h>
28#include <linux/profile.h>
29#include <linux/timex.h>
30#include <linux/notifier.h>
31
32#include <asm/uaccess.h>
33#include <asm/delay.h>
34#include <asm/s390_ext.h>
35#include <asm/div64.h>
36#include <asm/irq.h>
37#include <asm/timer.h>
38
39/* change this if you have some constant time drift */
40#define USECS_PER_JIFFY ((unsigned long) 1000000/HZ)
41#define CLK_TICKS_PER_JIFFY ((unsigned long) USECS_PER_JIFFY << 12)
42
43/*
44 * Create a small time difference between the timer interrupts
45 * on the different cpus to avoid lock contention.
46 */
47#define CPU_DEVIATION (smp_processor_id() << 12)
48
49#define TICK_SIZE tick
50
Linus Torvalds1da177e2005-04-16 15:20:36 -070051static ext_int_info_t ext_int_info_cc;
52static u64 init_timer_cc;
53static u64 jiffies_timer_cc;
54static u64 xtime_cc;
55
56extern unsigned long wall_jiffies;
57
58/*
59 * Scheduler clock - returns current time in nanosec units.
60 */
61unsigned long long sched_clock(void)
62{
Jan Glauber9dbafa52006-02-01 03:06:32 -080063 return ((get_clock() - jiffies_timer_cc) * 125) >> 9;
Linus Torvalds1da177e2005-04-16 15:20:36 -070064}
65
Jan Glauber32f65f22006-02-01 03:06:33 -080066/*
67 * Monotonic_clock - returns # of nanoseconds passed since time_init()
68 */
69unsigned long long monotonic_clock(void)
70{
71 return sched_clock();
72}
73EXPORT_SYMBOL(monotonic_clock);
74
Linus Torvalds1da177e2005-04-16 15:20:36 -070075void tod_to_timeval(__u64 todval, struct timespec *xtime)
76{
77 unsigned long long sec;
78
79 sec = todval >> 12;
80 do_div(sec, 1000000);
81 xtime->tv_sec = sec;
82 todval -= (sec * 1000000) << 12;
83 xtime->tv_nsec = ((todval * 1000) >> 12);
84}
85
86static inline unsigned long do_gettimeoffset(void)
87{
88 __u64 now;
89
90 now = (get_clock() - jiffies_timer_cc) >> 12;
91 /* We require the offset from the latest update of xtime */
92 now -= (__u64) wall_jiffies*USECS_PER_JIFFY;
93 return (unsigned long) now;
94}
95
96/*
97 * This version of gettimeofday has microsecond resolution.
98 */
99void do_gettimeofday(struct timeval *tv)
100{
101 unsigned long flags;
102 unsigned long seq;
103 unsigned long usec, sec;
104
105 do {
106 seq = read_seqbegin_irqsave(&xtime_lock, flags);
107
108 sec = xtime.tv_sec;
109 usec = xtime.tv_nsec / 1000 + do_gettimeoffset();
110 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
111
112 while (usec >= 1000000) {
113 usec -= 1000000;
114 sec++;
115 }
116
117 tv->tv_sec = sec;
118 tv->tv_usec = usec;
119}
120
121EXPORT_SYMBOL(do_gettimeofday);
122
123int do_settimeofday(struct timespec *tv)
124{
125 time_t wtm_sec, sec = tv->tv_sec;
126 long wtm_nsec, nsec = tv->tv_nsec;
127
128 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
129 return -EINVAL;
130
131 write_seqlock_irq(&xtime_lock);
132 /* This is revolting. We need to set the xtime.tv_nsec
133 * correctly. However, the value in this location is
134 * is value at the last tick.
135 * Discover what correction gettimeofday
136 * would have done, and then undo it!
137 */
138 nsec -= do_gettimeoffset() * 1000;
139
140 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
141 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
142
143 set_normalized_timespec(&xtime, sec, nsec);
144 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
145
john stultzb149ee22005-09-06 15:17:46 -0700146 ntp_clear();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147 write_sequnlock_irq(&xtime_lock);
148 clock_was_set();
149 return 0;
150}
151
152EXPORT_SYMBOL(do_settimeofday);
153
154
155#ifdef CONFIG_PROFILING
156#define s390_do_profile(regs) profile_tick(CPU_PROFILING, regs)
157#else
158#define s390_do_profile(regs) do { ; } while(0)
159#endif /* CONFIG_PROFILING */
160
161
162/*
163 * timer_interrupt() needs to keep up the real-time clock,
164 * as well as call the "do_timer()" routine every clocktick
165 */
166void account_ticks(struct pt_regs *regs)
167{
168 __u64 tmp;
Atsushi Nemoto3171a032006-09-29 02:00:32 -0700169 __u32 ticks;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170
171 /* Calculate how many ticks have passed. */
172 if (S390_lowcore.int_clock < S390_lowcore.jiffy_timer) {
173 /*
174 * We have to program the clock comparator even if
175 * no tick has passed. That happens if e.g. an i/o
176 * interrupt wakes up an idle processor that has
177 * switched off its hz timer.
178 */
179 tmp = S390_lowcore.jiffy_timer + CPU_DEVIATION;
180 asm volatile ("SCKC %0" : : "m" (tmp));
181 return;
182 }
183 tmp = S390_lowcore.int_clock - S390_lowcore.jiffy_timer;
184 if (tmp >= 2*CLK_TICKS_PER_JIFFY) { /* more than two ticks ? */
185 ticks = __div(tmp, CLK_TICKS_PER_JIFFY) + 1;
186 S390_lowcore.jiffy_timer +=
187 CLK_TICKS_PER_JIFFY * (__u64) ticks;
188 } else if (tmp >= CLK_TICKS_PER_JIFFY) {
189 ticks = 2;
190 S390_lowcore.jiffy_timer += 2*CLK_TICKS_PER_JIFFY;
191 } else {
192 ticks = 1;
193 S390_lowcore.jiffy_timer += CLK_TICKS_PER_JIFFY;
194 }
195
196 /* set clock comparator for next tick */
197 tmp = S390_lowcore.jiffy_timer + CPU_DEVIATION;
198 asm volatile ("SCKC %0" : : "m" (tmp));
199
200#ifdef CONFIG_SMP
201 /*
202 * Do not rely on the boot cpu to do the calls to do_timer.
203 * Spread it over all cpus instead.
204 */
205 write_seqlock(&xtime_lock);
206 if (S390_lowcore.jiffy_timer > xtime_cc) {
Atsushi Nemoto3171a032006-09-29 02:00:32 -0700207 __u32 xticks;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208 tmp = S390_lowcore.jiffy_timer - xtime_cc;
209 if (tmp >= 2*CLK_TICKS_PER_JIFFY) {
210 xticks = __div(tmp, CLK_TICKS_PER_JIFFY);
211 xtime_cc += (__u64) xticks * CLK_TICKS_PER_JIFFY;
212 } else {
213 xticks = 1;
214 xtime_cc += CLK_TICKS_PER_JIFFY;
215 }
Atsushi Nemoto3171a032006-09-29 02:00:32 -0700216 do_timer(xticks);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217 }
218 write_sequnlock(&xtime_lock);
219#else
Atsushi Nemoto3171a032006-09-29 02:00:32 -0700220 do_timer(ticks);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221#endif
222
223#ifdef CONFIG_VIRT_CPU_ACCOUNTING
Martin Schwidefsky1f1c12a2006-01-14 13:21:03 -0800224 account_tick_vtime(current);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225#else
226 while (ticks--)
227 update_process_times(user_mode(regs));
228#endif
229
230 s390_do_profile(regs);
231}
232
233#ifdef CONFIG_NO_IDLE_HZ
234
235#ifdef CONFIG_NO_IDLE_HZ_INIT
236int sysctl_hz_timer = 0;
237#else
238int sysctl_hz_timer = 1;
239#endif
240
241/*
242 * Stop the HZ tick on the current CPU.
243 * Only cpu_idle may call this function.
244 */
245static inline void stop_hz_timer(void)
246{
Martin Schwidefsky1b44e982005-11-07 00:59:02 -0800247 unsigned long flags;
248 unsigned long seq, next;
Martin Schwidefsky4b7e0702005-05-01 08:58:57 -0700249 __u64 timer, todval;
Heiko Carstens5afdbd62006-05-15 09:43:59 -0700250 int cpu = smp_processor_id();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251
252 if (sysctl_hz_timer != 0)
253 return;
254
Heiko Carstens5afdbd62006-05-15 09:43:59 -0700255 cpu_set(cpu, nohz_cpu_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256
257 /*
258 * Leave the clock comparator set up for the next timer
259 * tick if either rcu or a softirq is pending.
260 */
Heiko Carstens5afdbd62006-05-15 09:43:59 -0700261 if (rcu_needs_cpu(cpu) || local_softirq_pending()) {
262 cpu_clear(cpu, nohz_cpu_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263 return;
264 }
265
266 /*
267 * This cpu is going really idle. Set up the clock comparator
268 * for the next event.
269 */
Martin Schwidefsky1b44e982005-11-07 00:59:02 -0800270 next = next_timer_interrupt();
271 do {
272 seq = read_seqbegin_irqsave(&xtime_lock, flags);
Martin Schwidefsky705af302006-05-23 09:22:42 +0200273 timer = ((__u64) next) - ((__u64) jiffies) + jiffies_64;
Martin Schwidefsky1b44e982005-11-07 00:59:02 -0800274 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
Martin Schwidefsky4b7e0702005-05-01 08:58:57 -0700275 todval = -1ULL;
276 /* Be careful about overflows. */
277 if (timer < (-1ULL / CLK_TICKS_PER_JIFFY)) {
278 timer = jiffies_timer_cc + timer * CLK_TICKS_PER_JIFFY;
279 if (timer >= jiffies_timer_cc)
280 todval = timer;
281 }
282 asm volatile ("SCKC %0" : : "m" (todval));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283}
284
285/*
286 * Start the HZ tick on the current CPU.
287 * Only cpu_idle may call this function.
288 */
289static inline void start_hz_timer(void)
290{
291 if (!cpu_isset(smp_processor_id(), nohz_cpu_mask))
292 return;
Al Viroc7584fb2006-01-12 01:05:49 -0800293 account_ticks(task_pt_regs(current));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294 cpu_clear(smp_processor_id(), nohz_cpu_mask);
295}
296
297static int nohz_idle_notify(struct notifier_block *self,
298 unsigned long action, void *hcpu)
299{
300 switch (action) {
301 case CPU_IDLE:
302 stop_hz_timer();
303 break;
304 case CPU_NOT_IDLE:
305 start_hz_timer();
306 break;
307 }
308 return NOTIFY_OK;
309}
310
311static struct notifier_block nohz_idle_nb = {
312 .notifier_call = nohz_idle_notify,
313};
314
315void __init nohz_init(void)
316{
317 if (register_idle_notifier(&nohz_idle_nb))
318 panic("Couldn't register idle notifier");
319}
320
321#endif
322
323/*
324 * Start the clock comparator on the current CPU.
325 */
326void init_cpu_timer(void)
327{
328 unsigned long cr0;
329 __u64 timer;
330
331 timer = jiffies_timer_cc + jiffies_64 * CLK_TICKS_PER_JIFFY;
332 S390_lowcore.jiffy_timer = timer + CLK_TICKS_PER_JIFFY;
333 timer += CLK_TICKS_PER_JIFFY + CPU_DEVIATION;
334 asm volatile ("SCKC %0" : : "m" (timer));
335 /* allow clock comparator timer interrupt */
336 __ctl_store(cr0, 0, 0);
337 cr0 |= 0x800;
338 __ctl_load(cr0, 0, 0);
339}
340
341extern void vtime_init(void);
342
343/*
344 * Initialize the TOD clock and the CPU timer of
345 * the boot cpu.
346 */
347void __init time_init(void)
348{
349 __u64 set_time_cc;
350 int cc;
351
352 /* kick the TOD clock */
Martin Schwidefsky94c12cc2006-09-28 16:56:43 +0200353 asm volatile(
354 " stck 0(%2)\n"
355 " ipm %0\n"
356 " srl %0,28"
357 : "=d" (cc), "=m" (init_timer_cc)
358 : "a" (&init_timer_cc) : "cc");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359 switch (cc) {
360 case 0: /* clock in set state: all is fine */
361 break;
362 case 1: /* clock in non-set state: FIXME */
363 printk("time_init: TOD clock in non-set state\n");
364 break;
365 case 2: /* clock in error state: FIXME */
366 printk("time_init: TOD clock in error state\n");
367 break;
368 case 3: /* clock in stopped or not-operational state: FIXME */
369 printk("time_init: TOD clock stopped/non-operational\n");
370 break;
371 }
372 jiffies_timer_cc = init_timer_cc - jiffies_64 * CLK_TICKS_PER_JIFFY;
373
374 /* set xtime */
375 xtime_cc = init_timer_cc + CLK_TICKS_PER_JIFFY;
376 set_time_cc = init_timer_cc - 0x8126d60e46000000LL +
377 (0x3c26700LL*1000000*4096);
378 tod_to_timeval(set_time_cc, &xtime);
379 set_normalized_timespec(&wall_to_monotonic,
380 -xtime.tv_sec, -xtime.tv_nsec);
381
382 /* request the clock comparator external interrupt */
Heiko Carstensd2c993d2006-07-12 16:41:55 +0200383 if (register_early_external_interrupt(0x1004, NULL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384 &ext_int_info_cc) != 0)
385 panic("Couldn't request external interrupt 0x1004");
386
387 init_cpu_timer();
388
389#ifdef CONFIG_NO_IDLE_HZ
390 nohz_init();
391#endif
392
393#ifdef CONFIG_VIRT_TIMER
394 vtime_init();
395#endif
396}
397