blob: 7a248135c6f24678a331a3e96bb90caca13c59a0 [file] [log] [blame]
john stultz85240702007-05-08 00:27:59 -07001/*
2 * linux/kernel/time/timekeeping.c
3 *
4 * Kernel timekeeping code and accessor functions
5 *
6 * This code was moved from linux/kernel/timer.c.
7 * Please see that file for copyright and history logs.
8 *
9 */
10
11#include <linux/module.h>
12#include <linux/interrupt.h>
13#include <linux/percpu.h>
14#include <linux/init.h>
15#include <linux/mm.h>
16#include <linux/sysdev.h>
17#include <linux/clocksource.h>
18#include <linux/jiffies.h>
19#include <linux/time.h>
20#include <linux/tick.h>
21
22
23/*
24 * This read-write spinlock protects us from races in SMP while
Thomas Gleixnerdce48a82009-04-11 10:43:41 +020025 * playing with xtime.
john stultz85240702007-05-08 00:27:59 -070026 */
Adrian Bunkba2a6312007-10-16 23:27:16 -070027__cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock);
john stultz85240702007-05-08 00:27:59 -070028
29
30/*
31 * The current time
32 * wall_to_monotonic is what we need to add to xtime (or xtime corrected
33 * for sub jiffie times) to get to monotonic time. Monotonic is pegged
34 * at zero at system boot time, so wall_to_monotonic will be negative,
35 * however, we will ALWAYS keep the tv_nsec part positive so we can use
36 * the usual normalization.
Tomas Janousek7c3f1a52007-07-15 23:39:41 -070037 *
38 * wall_to_monotonic is moved after resume from suspend for the monotonic
39 * time not to jump. We need to add total_sleep_time to wall_to_monotonic
40 * to get the real boot based time offset.
41 *
42 * - wall_to_monotonic is no longer the boot time, getboottime must be
43 * used instead.
john stultz85240702007-05-08 00:27:59 -070044 */
45struct timespec xtime __attribute__ ((aligned (16)));
46struct timespec wall_to_monotonic __attribute__ ((aligned (16)));
Tomas Janousek7c3f1a52007-07-15 23:39:41 -070047static unsigned long total_sleep_time; /* seconds */
john stultz85240702007-05-08 00:27:59 -070048
Thomas Gleixner1c5745a2008-12-22 23:05:28 +010049/* flag for if timekeeping is suspended */
50int __read_mostly timekeeping_suspended;
51
john stultz17c38b72007-07-24 18:38:34 -070052static struct timespec xtime_cache __attribute__ ((aligned (16)));
Thomas Gleixner1001d0a2008-02-01 17:45:13 +010053void update_xtime_cache(u64 nsec)
john stultz17c38b72007-07-24 18:38:34 -070054{
55 xtime_cache = xtime;
56 timespec_add_ns(&xtime_cache, nsec);
57}
john stultz17c38b72007-07-24 18:38:34 -070058
Roman Zippel7dffa3c2008-05-01 04:34:41 -070059struct clocksource *clock;
john stultz85240702007-05-08 00:27:59 -070060
61
62#ifdef CONFIG_GENERIC_TIME
63/**
Roman Zippel9a055112008-08-20 16:37:28 -070064 * clocksource_forward_now - update clock to the current time
john stultz85240702007-05-08 00:27:59 -070065 *
Roman Zippel9a055112008-08-20 16:37:28 -070066 * Forward the current clock to update its state since the last call to
67 * update_wall_time(). This is useful before significant clock changes,
68 * as it avoids having to deal with this time offset explicitly.
john stultz85240702007-05-08 00:27:59 -070069 */
Roman Zippel9a055112008-08-20 16:37:28 -070070static void clocksource_forward_now(void)
john stultz85240702007-05-08 00:27:59 -070071{
72 cycle_t cycle_now, cycle_delta;
Roman Zippel9a055112008-08-20 16:37:28 -070073 s64 nsec;
john stultz85240702007-05-08 00:27:59 -070074
john stultz85240702007-05-08 00:27:59 -070075 cycle_now = clocksource_read(clock);
john stultz85240702007-05-08 00:27:59 -070076 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
Roman Zippel9a055112008-08-20 16:37:28 -070077 clock->cycle_last = cycle_now;
john stultz85240702007-05-08 00:27:59 -070078
Roman Zippel9a055112008-08-20 16:37:28 -070079 nsec = cyc2ns(clock, cycle_delta);
john stultz7d275582009-05-01 13:10:26 -070080
81 /* If arch requires, add in gettimeoffset() */
82 nsec += arch_gettimeoffset();
83
Roman Zippel9a055112008-08-20 16:37:28 -070084 timespec_add_ns(&xtime, nsec);
John Stultz2d422442008-08-20 16:37:30 -070085
86 nsec = ((s64)cycle_delta * clock->mult_orig) >> clock->shift;
87 clock->raw_time.tv_nsec += nsec;
john stultz85240702007-05-08 00:27:59 -070088}
89
90/**
Geert Uytterhoevenefd9ac82008-01-30 13:30:01 +010091 * getnstimeofday - Returns the time of day in a timespec
john stultz85240702007-05-08 00:27:59 -070092 * @ts: pointer to the timespec to be set
93 *
Geert Uytterhoevenefd9ac82008-01-30 13:30:01 +010094 * Returns the time of day in a timespec.
john stultz85240702007-05-08 00:27:59 -070095 */
Geert Uytterhoevenefd9ac82008-01-30 13:30:01 +010096void getnstimeofday(struct timespec *ts)
john stultz85240702007-05-08 00:27:59 -070097{
Roman Zippel9a055112008-08-20 16:37:28 -070098 cycle_t cycle_now, cycle_delta;
john stultz85240702007-05-08 00:27:59 -070099 unsigned long seq;
100 s64 nsecs;
101
Thomas Gleixner1c5745a2008-12-22 23:05:28 +0100102 WARN_ON(timekeeping_suspended);
103
john stultz85240702007-05-08 00:27:59 -0700104 do {
105 seq = read_seqbegin(&xtime_lock);
106
107 *ts = xtime;
Roman Zippel9a055112008-08-20 16:37:28 -0700108
109 /* read clocksource: */
110 cycle_now = clocksource_read(clock);
111
112 /* calculate the delta since the last update_wall_time: */
113 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
114
115 /* convert to nanoseconds: */
116 nsecs = cyc2ns(clock, cycle_delta);
john stultz85240702007-05-08 00:27:59 -0700117
john stultz7d275582009-05-01 13:10:26 -0700118 /* If arch requires, add in gettimeoffset() */
119 nsecs += arch_gettimeoffset();
120
john stultz85240702007-05-08 00:27:59 -0700121 } while (read_seqretry(&xtime_lock, seq));
122
123 timespec_add_ns(ts, nsecs);
124}
125
john stultz85240702007-05-08 00:27:59 -0700126EXPORT_SYMBOL(getnstimeofday);
127
Martin Schwidefsky951ed4d2009-07-07 11:27:28 +0200128ktime_t ktime_get(void)
129{
130 cycle_t cycle_now, cycle_delta;
131 unsigned int seq;
132 s64 secs, nsecs;
133
134 WARN_ON(timekeeping_suspended);
135
136 do {
137 seq = read_seqbegin(&xtime_lock);
138 secs = xtime.tv_sec + wall_to_monotonic.tv_sec;
139 nsecs = xtime.tv_nsec + wall_to_monotonic.tv_nsec;
140
141 /* read clocksource: */
142 cycle_now = clocksource_read(clock);
143
144 /* calculate the delta since the last update_wall_time: */
145 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
146
147 /* convert to nanoseconds: */
148 nsecs += cyc2ns(clock, cycle_delta);
149
150 } while (read_seqretry(&xtime_lock, seq));
151 /*
152 * Use ktime_set/ktime_add_ns to create a proper ktime on
153 * 32-bit architectures without CONFIG_KTIME_SCALAR.
154 */
155 return ktime_add_ns(ktime_set(secs, 0), nsecs);
156}
157EXPORT_SYMBOL_GPL(ktime_get);
158
159/**
160 * ktime_get_ts - get the monotonic clock in timespec format
161 * @ts: pointer to timespec variable
162 *
163 * The function calculates the monotonic clock from the realtime
164 * clock and the wall_to_monotonic offset and stores the result
165 * in normalized timespec format in the variable pointed to by @ts.
166 */
167void ktime_get_ts(struct timespec *ts)
168{
169 cycle_t cycle_now, cycle_delta;
170 struct timespec tomono;
171 unsigned int seq;
172 s64 nsecs;
173
174 WARN_ON(timekeeping_suspended);
175
176 do {
177 seq = read_seqbegin(&xtime_lock);
178 *ts = xtime;
179 tomono = wall_to_monotonic;
180
181 /* read clocksource: */
182 cycle_now = clocksource_read(clock);
183
184 /* calculate the delta since the last update_wall_time: */
185 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
186
187 /* convert to nanoseconds: */
188 nsecs = cyc2ns(clock, cycle_delta);
189
190 } while (read_seqretry(&xtime_lock, seq));
191
192 set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec,
193 ts->tv_nsec + tomono.tv_nsec + nsecs);
194}
195EXPORT_SYMBOL_GPL(ktime_get_ts);
196
john stultz85240702007-05-08 00:27:59 -0700197/**
198 * do_gettimeofday - Returns the time of day in a timeval
199 * @tv: pointer to the timeval to be set
200 *
Geert Uytterhoevenefd9ac82008-01-30 13:30:01 +0100201 * NOTE: Users should be converted to using getnstimeofday()
john stultz85240702007-05-08 00:27:59 -0700202 */
203void do_gettimeofday(struct timeval *tv)
204{
205 struct timespec now;
206
Geert Uytterhoevenefd9ac82008-01-30 13:30:01 +0100207 getnstimeofday(&now);
john stultz85240702007-05-08 00:27:59 -0700208 tv->tv_sec = now.tv_sec;
209 tv->tv_usec = now.tv_nsec/1000;
210}
211
212EXPORT_SYMBOL(do_gettimeofday);
213/**
214 * do_settimeofday - Sets the time of day
215 * @tv: pointer to the timespec variable containing the new time
216 *
217 * Sets the time of day to the new time and update NTP and notify hrtimers
218 */
219int do_settimeofday(struct timespec *tv)
220{
Roman Zippel9a055112008-08-20 16:37:28 -0700221 struct timespec ts_delta;
john stultz85240702007-05-08 00:27:59 -0700222 unsigned long flags;
john stultz85240702007-05-08 00:27:59 -0700223
224 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
225 return -EINVAL;
226
227 write_seqlock_irqsave(&xtime_lock, flags);
228
Roman Zippel9a055112008-08-20 16:37:28 -0700229 clocksource_forward_now();
john stultz85240702007-05-08 00:27:59 -0700230
Roman Zippel9a055112008-08-20 16:37:28 -0700231 ts_delta.tv_sec = tv->tv_sec - xtime.tv_sec;
232 ts_delta.tv_nsec = tv->tv_nsec - xtime.tv_nsec;
233 wall_to_monotonic = timespec_sub(wall_to_monotonic, ts_delta);
john stultz85240702007-05-08 00:27:59 -0700234
Roman Zippel9a055112008-08-20 16:37:28 -0700235 xtime = *tv;
236
Thomas Gleixner1001d0a2008-02-01 17:45:13 +0100237 update_xtime_cache(0);
john stultz85240702007-05-08 00:27:59 -0700238
239 clock->error = 0;
240 ntp_clear();
241
242 update_vsyscall(&xtime, clock);
243
244 write_sequnlock_irqrestore(&xtime_lock, flags);
245
246 /* signal hrtimers about time change */
247 clock_was_set();
248
249 return 0;
250}
251
252EXPORT_SYMBOL(do_settimeofday);
253
254/**
255 * change_clocksource - Swaps clocksources if a new one is available
256 *
257 * Accumulates current time interval and initializes new clocksource
258 */
259static void change_clocksource(void)
260{
Magnus Damm4614e6a2009-04-21 12:24:02 -0700261 struct clocksource *new, *old;
john stultz85240702007-05-08 00:27:59 -0700262
263 new = clocksource_get_next();
264
265 if (clock == new)
266 return;
267
Roman Zippel9a055112008-08-20 16:37:28 -0700268 clocksource_forward_now();
john stultz85240702007-05-08 00:27:59 -0700269
Magnus Damm4614e6a2009-04-21 12:24:02 -0700270 if (clocksource_enable(new))
271 return;
John Stultz2d422442008-08-20 16:37:30 -0700272
Magnus Damm4614e6a2009-04-21 12:24:02 -0700273 new->raw_time = clock->raw_time;
274 old = clock;
john stultz85240702007-05-08 00:27:59 -0700275 clock = new;
Magnus Damm4614e6a2009-04-21 12:24:02 -0700276 clocksource_disable(old);
277
Roman Zippel9a055112008-08-20 16:37:28 -0700278 clock->cycle_last = 0;
Magnus Damm4614e6a2009-04-21 12:24:02 -0700279 clock->cycle_last = clocksource_read(clock);
john stultz85240702007-05-08 00:27:59 -0700280 clock->error = 0;
281 clock->xtime_nsec = 0;
Roman Zippel10a398d2008-03-04 15:14:26 -0800282 clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH);
john stultz85240702007-05-08 00:27:59 -0700283
284 tick_clock_notify();
285
Linus Torvalds92896bd2008-03-24 11:07:15 -0700286 /*
287 * We're holding xtime lock and waking up klogd would deadlock
288 * us on enqueue. So no printing!
john stultz85240702007-05-08 00:27:59 -0700289 printk(KERN_INFO "Time: %s clocksource has been installed.\n",
290 clock->name);
Linus Torvalds92896bd2008-03-24 11:07:15 -0700291 */
john stultz85240702007-05-08 00:27:59 -0700292}
293#else
Roman Zippel9a055112008-08-20 16:37:28 -0700294static inline void clocksource_forward_now(void) { }
john stultz85240702007-05-08 00:27:59 -0700295static inline void change_clocksource(void) { }
296#endif
297
298/**
John Stultz2d422442008-08-20 16:37:30 -0700299 * getrawmonotonic - Returns the raw monotonic time in a timespec
300 * @ts: pointer to the timespec to be set
301 *
302 * Returns the raw monotonic time (completely un-modified by ntp)
303 */
304void getrawmonotonic(struct timespec *ts)
305{
306 unsigned long seq;
307 s64 nsecs;
308 cycle_t cycle_now, cycle_delta;
309
310 do {
311 seq = read_seqbegin(&xtime_lock);
312
313 /* read clocksource: */
314 cycle_now = clocksource_read(clock);
315
316 /* calculate the delta since the last update_wall_time: */
317 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
318
319 /* convert to nanoseconds: */
320 nsecs = ((s64)cycle_delta * clock->mult_orig) >> clock->shift;
321
322 *ts = clock->raw_time;
323
324 } while (read_seqretry(&xtime_lock, seq));
325
326 timespec_add_ns(ts, nsecs);
327}
328EXPORT_SYMBOL(getrawmonotonic);
329
330
331/**
Li Zefancf4fc6c2008-02-08 04:19:24 -0800332 * timekeeping_valid_for_hres - Check if timekeeping is suitable for hres
john stultz85240702007-05-08 00:27:59 -0700333 */
Li Zefancf4fc6c2008-02-08 04:19:24 -0800334int timekeeping_valid_for_hres(void)
john stultz85240702007-05-08 00:27:59 -0700335{
336 unsigned long seq;
337 int ret;
338
339 do {
340 seq = read_seqbegin(&xtime_lock);
341
342 ret = clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
343
344 } while (read_seqretry(&xtime_lock, seq));
345
346 return ret;
347}
348
349/**
350 * read_persistent_clock - Return time in seconds from the persistent clock.
351 *
352 * Weak dummy function for arches that do not yet support it.
353 * Returns seconds from epoch using the battery backed persistent clock.
354 * Returns zero if unsupported.
355 *
356 * XXX - Do be sure to remove it once all arches implement it.
357 */
358unsigned long __attribute__((weak)) read_persistent_clock(void)
359{
360 return 0;
361}
362
363/*
364 * timekeeping_init - Initializes the clocksource and common timekeeping values
365 */
366void __init timekeeping_init(void)
367{
368 unsigned long flags;
369 unsigned long sec = read_persistent_clock();
370
371 write_seqlock_irqsave(&xtime_lock, flags);
372
Roman Zippel7dffa3c2008-05-01 04:34:41 -0700373 ntp_init();
john stultz85240702007-05-08 00:27:59 -0700374
375 clock = clocksource_get_next();
Magnus Damm4614e6a2009-04-21 12:24:02 -0700376 clocksource_enable(clock);
Roman Zippel10a398d2008-03-04 15:14:26 -0800377 clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH);
john stultz85240702007-05-08 00:27:59 -0700378 clock->cycle_last = clocksource_read(clock);
379
380 xtime.tv_sec = sec;
381 xtime.tv_nsec = 0;
382 set_normalized_timespec(&wall_to_monotonic,
383 -xtime.tv_sec, -xtime.tv_nsec);
Thomas Gleixner1001d0a2008-02-01 17:45:13 +0100384 update_xtime_cache(0);
Tomas Janousek7c3f1a52007-07-15 23:39:41 -0700385 total_sleep_time = 0;
john stultz85240702007-05-08 00:27:59 -0700386 write_sequnlock_irqrestore(&xtime_lock, flags);
387}
388
john stultz85240702007-05-08 00:27:59 -0700389/* time in seconds when suspend began */
390static unsigned long timekeeping_suspend_time;
391
392/**
393 * timekeeping_resume - Resumes the generic timekeeping subsystem.
394 * @dev: unused
395 *
396 * This is for the generic clocksource timekeeping.
397 * xtime/wall_to_monotonic/jiffies/etc are
398 * still managed by arch specific suspend/resume code.
399 */
400static int timekeeping_resume(struct sys_device *dev)
401{
402 unsigned long flags;
403 unsigned long now = read_persistent_clock();
404
Thomas Gleixnerd10ff3f2007-05-14 11:10:02 +0200405 clocksource_resume();
406
john stultz85240702007-05-08 00:27:59 -0700407 write_seqlock_irqsave(&xtime_lock, flags);
408
409 if (now && (now > timekeeping_suspend_time)) {
410 unsigned long sleep_length = now - timekeeping_suspend_time;
411
412 xtime.tv_sec += sleep_length;
413 wall_to_monotonic.tv_sec -= sleep_length;
Tomas Janousek7c3f1a52007-07-15 23:39:41 -0700414 total_sleep_time += sleep_length;
john stultz85240702007-05-08 00:27:59 -0700415 }
Thomas Gleixner1001d0a2008-02-01 17:45:13 +0100416 update_xtime_cache(0);
john stultz85240702007-05-08 00:27:59 -0700417 /* re-base the last cycle value */
Thomas Gleixnerd8bb6f42008-04-01 19:45:18 +0200418 clock->cycle_last = 0;
john stultz85240702007-05-08 00:27:59 -0700419 clock->cycle_last = clocksource_read(clock);
420 clock->error = 0;
421 timekeeping_suspended = 0;
422 write_sequnlock_irqrestore(&xtime_lock, flags);
423
424 touch_softlockup_watchdog();
425
426 clockevents_notify(CLOCK_EVT_NOTIFY_RESUME, NULL);
427
428 /* Resume hrtimers */
429 hres_timers_resume();
430
431 return 0;
432}
433
434static int timekeeping_suspend(struct sys_device *dev, pm_message_t state)
435{
436 unsigned long flags;
437
Thomas Gleixner3be90952007-09-16 15:36:43 +0200438 timekeeping_suspend_time = read_persistent_clock();
439
john stultz85240702007-05-08 00:27:59 -0700440 write_seqlock_irqsave(&xtime_lock, flags);
Roman Zippel9a055112008-08-20 16:37:28 -0700441 clocksource_forward_now();
john stultz85240702007-05-08 00:27:59 -0700442 timekeeping_suspended = 1;
john stultz85240702007-05-08 00:27:59 -0700443 write_sequnlock_irqrestore(&xtime_lock, flags);
444
445 clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL);
446
447 return 0;
448}
449
450/* sysfs resume/suspend bits for timekeeping */
451static struct sysdev_class timekeeping_sysclass = {
Kay Sieversaf5ca3f2007-12-20 02:09:39 +0100452 .name = "timekeeping",
john stultz85240702007-05-08 00:27:59 -0700453 .resume = timekeeping_resume,
454 .suspend = timekeeping_suspend,
john stultz85240702007-05-08 00:27:59 -0700455};
456
457static struct sys_device device_timer = {
458 .id = 0,
459 .cls = &timekeeping_sysclass,
460};
461
462static int __init timekeeping_init_device(void)
463{
464 int error = sysdev_class_register(&timekeeping_sysclass);
465 if (!error)
466 error = sysdev_register(&device_timer);
467 return error;
468}
469
470device_initcall(timekeeping_init_device);
471
472/*
473 * If the error is already larger, we look ahead even further
474 * to compensate for late or lost adjustments.
475 */
476static __always_inline int clocksource_bigadjust(s64 error, s64 *interval,
477 s64 *offset)
478{
479 s64 tick_error, i;
480 u32 look_ahead, adj;
481 s32 error2, mult;
482
483 /*
484 * Use the current error value to determine how much to look ahead.
485 * The larger the error the slower we adjust for it to avoid problems
486 * with losing too many ticks, otherwise we would overadjust and
487 * produce an even larger error. The smaller the adjustment the
488 * faster we try to adjust for it, as lost ticks can do less harm
Li Zefan3eb05672008-02-08 04:19:25 -0800489 * here. This is tuned so that an error of about 1 msec is adjusted
john stultz85240702007-05-08 00:27:59 -0700490 * within about 1 sec (or 2^20 nsec in 2^SHIFT_HZ ticks).
491 */
Roman Zippel7fc5c7842008-05-01 04:34:38 -0700492 error2 = clock->error >> (NTP_SCALE_SHIFT + 22 - 2 * SHIFT_HZ);
john stultz85240702007-05-08 00:27:59 -0700493 error2 = abs(error2);
494 for (look_ahead = 0; error2 > 0; look_ahead++)
495 error2 >>= 2;
496
497 /*
498 * Now calculate the error in (1 << look_ahead) ticks, but first
499 * remove the single look ahead already included in the error.
500 */
Roman Zippel8383c422008-05-01 04:34:39 -0700501 tick_error = tick_length >> (NTP_SCALE_SHIFT - clock->shift + 1);
john stultz85240702007-05-08 00:27:59 -0700502 tick_error -= clock->xtime_interval >> 1;
503 error = ((error - tick_error) >> look_ahead) + tick_error;
504
505 /* Finally calculate the adjustment shift value. */
506 i = *interval;
507 mult = 1;
508 if (error < 0) {
509 error = -error;
510 *interval = -*interval;
511 *offset = -*offset;
512 mult = -1;
513 }
514 for (adj = 0; error > i; adj++)
515 error >>= 1;
516
517 *interval <<= adj;
518 *offset <<= adj;
519 return mult << adj;
520}
521
522/*
523 * Adjust the multiplier to reduce the error value,
524 * this is optimized for the most common adjustments of -1,0,1,
525 * for other values we can do a bit more work.
526 */
Thomas Gleixner71120f12007-07-19 01:49:16 -0700527static void clocksource_adjust(s64 offset)
john stultz85240702007-05-08 00:27:59 -0700528{
529 s64 error, interval = clock->cycle_interval;
530 int adj;
531
Roman Zippel7fc5c7842008-05-01 04:34:38 -0700532 error = clock->error >> (NTP_SCALE_SHIFT - clock->shift - 1);
john stultz85240702007-05-08 00:27:59 -0700533 if (error > interval) {
534 error >>= 2;
535 if (likely(error <= interval))
536 adj = 1;
537 else
538 adj = clocksource_bigadjust(error, &interval, &offset);
539 } else if (error < -interval) {
540 error >>= 2;
541 if (likely(error >= -interval)) {
542 adj = -1;
543 interval = -interval;
544 offset = -offset;
545 } else
546 adj = clocksource_bigadjust(error, &interval, &offset);
547 } else
548 return;
549
550 clock->mult += adj;
551 clock->xtime_interval += interval;
552 clock->xtime_nsec -= offset;
553 clock->error -= (interval - offset) <<
Roman Zippel7fc5c7842008-05-01 04:34:38 -0700554 (NTP_SCALE_SHIFT - clock->shift);
john stultz85240702007-05-08 00:27:59 -0700555}
556
557/**
558 * update_wall_time - Uses the current clocksource to increment the wall time
559 *
560 * Called from the timer interrupt, must hold a write on xtime_lock.
561 */
562void update_wall_time(void)
563{
564 cycle_t offset;
565
566 /* Make sure we're fully resumed: */
567 if (unlikely(timekeeping_suspended))
568 return;
569
570#ifdef CONFIG_GENERIC_TIME
571 offset = (clocksource_read(clock) - clock->cycle_last) & clock->mask;
572#else
573 offset = clock->cycle_interval;
574#endif
Roman Zippel5cd1c9c2008-09-22 14:42:43 -0700575 clock->xtime_nsec = (s64)xtime.tv_nsec << clock->shift;
john stultz85240702007-05-08 00:27:59 -0700576
577 /* normally this loop will run just once, however in the
578 * case of lost or late ticks, it will accumulate correctly.
579 */
580 while (offset >= clock->cycle_interval) {
581 /* accumulate one interval */
john stultz85240702007-05-08 00:27:59 -0700582 offset -= clock->cycle_interval;
Roman Zippel9a055112008-08-20 16:37:28 -0700583 clock->cycle_last += clock->cycle_interval;
john stultz85240702007-05-08 00:27:59 -0700584
Roman Zippel9a055112008-08-20 16:37:28 -0700585 clock->xtime_nsec += clock->xtime_interval;
john stultz85240702007-05-08 00:27:59 -0700586 if (clock->xtime_nsec >= (u64)NSEC_PER_SEC << clock->shift) {
587 clock->xtime_nsec -= (u64)NSEC_PER_SEC << clock->shift;
588 xtime.tv_sec++;
589 second_overflow();
590 }
591
John Stultz2d422442008-08-20 16:37:30 -0700592 clock->raw_time.tv_nsec += clock->raw_interval;
593 if (clock->raw_time.tv_nsec >= NSEC_PER_SEC) {
594 clock->raw_time.tv_nsec -= NSEC_PER_SEC;
595 clock->raw_time.tv_sec++;
596 }
597
john stultz85240702007-05-08 00:27:59 -0700598 /* accumulate error between NTP and clock interval */
Roman Zippel8383c422008-05-01 04:34:39 -0700599 clock->error += tick_length;
Roman Zippel7fc5c7842008-05-01 04:34:38 -0700600 clock->error -= clock->xtime_interval << (NTP_SCALE_SHIFT - clock->shift);
john stultz85240702007-05-08 00:27:59 -0700601 }
602
603 /* correct the clock when NTP error is too big */
Thomas Gleixner71120f12007-07-19 01:49:16 -0700604 clocksource_adjust(offset);
john stultz85240702007-05-08 00:27:59 -0700605
john stultz6c9bacb2008-12-01 18:34:41 -0800606 /*
607 * Since in the loop above, we accumulate any amount of time
608 * in xtime_nsec over a second into xtime.tv_sec, its possible for
609 * xtime_nsec to be fairly small after the loop. Further, if we're
610 * slightly speeding the clocksource up in clocksource_adjust(),
611 * its possible the required corrective factor to xtime_nsec could
612 * cause it to underflow.
613 *
614 * Now, we cannot simply roll the accumulated second back, since
615 * the NTP subsystem has been notified via second_overflow. So
616 * instead we push xtime_nsec forward by the amount we underflowed,
617 * and add that amount into the error.
618 *
619 * We'll correct this error next time through this function, when
620 * xtime_nsec is not as small.
621 */
622 if (unlikely((s64)clock->xtime_nsec < 0)) {
623 s64 neg = -(s64)clock->xtime_nsec;
624 clock->xtime_nsec = 0;
625 clock->error += neg << (NTP_SCALE_SHIFT - clock->shift);
626 }
627
Roman Zippel5cd1c9c2008-09-22 14:42:43 -0700628 /* store full nanoseconds into xtime after rounding it up and
629 * add the remainder to the error difference.
630 */
631 xtime.tv_nsec = ((s64)clock->xtime_nsec >> clock->shift) + 1;
john stultz85240702007-05-08 00:27:59 -0700632 clock->xtime_nsec -= (s64)xtime.tv_nsec << clock->shift;
Roman Zippel5cd1c9c2008-09-22 14:42:43 -0700633 clock->error += clock->xtime_nsec << (NTP_SCALE_SHIFT - clock->shift);
john stultz85240702007-05-08 00:27:59 -0700634
john stultz17c38b72007-07-24 18:38:34 -0700635 update_xtime_cache(cyc2ns(clock, offset));
636
john stultz85240702007-05-08 00:27:59 -0700637 /* check to see if there is a new clocksource to use */
638 change_clocksource();
639 update_vsyscall(&xtime, clock);
640}
Tomas Janousek7c3f1a52007-07-15 23:39:41 -0700641
642/**
643 * getboottime - Return the real time of system boot.
644 * @ts: pointer to the timespec to be set
645 *
646 * Returns the time of day in a timespec.
647 *
648 * This is based on the wall_to_monotonic offset and the total suspend
649 * time. Calls to settimeofday will affect the value returned (which
650 * basically means that however wrong your real time clock is at boot time,
651 * you get the right time here).
652 */
653void getboottime(struct timespec *ts)
654{
655 set_normalized_timespec(ts,
656 - (wall_to_monotonic.tv_sec + total_sleep_time),
657 - wall_to_monotonic.tv_nsec);
658}
659
660/**
661 * monotonic_to_bootbased - Convert the monotonic time to boot based.
662 * @ts: pointer to the timespec to be converted
663 */
664void monotonic_to_bootbased(struct timespec *ts)
665{
666 ts->tv_sec += total_sleep_time;
667}
john stultz2c6b47d2007-07-24 17:47:43 -0700668
john stultz17c38b72007-07-24 18:38:34 -0700669unsigned long get_seconds(void)
670{
671 return xtime_cache.tv_sec;
672}
673EXPORT_SYMBOL(get_seconds);
674
675
john stultz2c6b47d2007-07-24 17:47:43 -0700676struct timespec current_kernel_time(void)
677{
678 struct timespec now;
679 unsigned long seq;
680
681 do {
682 seq = read_seqbegin(&xtime_lock);
683
john stultz17c38b72007-07-24 18:38:34 -0700684 now = xtime_cache;
john stultz2c6b47d2007-07-24 17:47:43 -0700685 } while (read_seqretry(&xtime_lock, seq));
686
687 return now;
688}
john stultz2c6b47d2007-07-24 17:47:43 -0700689EXPORT_SYMBOL(current_kernel_time);