blob: 8ed9346015879ddb3851bbed8a908151b6f887a9 [file] [log] [blame]
john stultz85240702007-05-08 00:27:59 -07001/*
2 * linux/kernel/time/timekeeping.c
3 *
4 * Kernel timekeeping code and accessor functions
5 *
6 * This code was moved from linux/kernel/timer.c.
7 * Please see that file for copyright and history logs.
8 *
9 */
10
John Stultzd7b42022012-09-04 15:12:07 -040011#include <linux/timekeeper_internal.h>
john stultz85240702007-05-08 00:27:59 -070012#include <linux/module.h>
13#include <linux/interrupt.h>
14#include <linux/percpu.h>
15#include <linux/init.h>
16#include <linux/mm.h>
Alexey Dobriyand43c36d2009-10-07 17:09:06 +040017#include <linux/sched.h>
Rafael J. Wysockie1a85b22011-03-23 22:16:04 +010018#include <linux/syscore_ops.h>
john stultz85240702007-05-08 00:27:59 -070019#include <linux/clocksource.h>
20#include <linux/jiffies.h>
21#include <linux/time.h>
22#include <linux/tick.h>
Martin Schwidefsky75c51582009-08-14 15:47:30 +020023#include <linux/stop_machine.h>
Marcelo Tosattie0b306f2012-11-27 23:28:59 -020024#include <linux/pvclock_gtod.h>
john stultz85240702007-05-08 00:27:59 -070025
Martin Schwidefsky155ec602009-08-14 15:47:26 +020026
H Hartley Sweetenafa14e72011-01-11 17:59:38 -060027static struct timekeeper timekeeper;
Martin Schwidefsky155ec602009-08-14 15:47:26 +020028
John Stultz8fcce542011-11-14 11:46:39 -080029/* flag for if timekeeping is suspended */
30int __read_mostly timekeeping_suspended;
31
John Stultz1e75fa82012-07-13 01:21:53 -040032static inline void tk_normalize_xtime(struct timekeeper *tk)
33{
34 while (tk->xtime_nsec >= ((u64)NSEC_PER_SEC << tk->shift)) {
35 tk->xtime_nsec -= (u64)NSEC_PER_SEC << tk->shift;
36 tk->xtime_sec++;
37 }
38}
John Stultz8fcce542011-11-14 11:46:39 -080039
John Stultz1e75fa82012-07-13 01:21:53 -040040static void tk_set_xtime(struct timekeeper *tk, const struct timespec *ts)
41{
42 tk->xtime_sec = ts->tv_sec;
John Stultzb44d50d2012-07-23 16:22:37 -040043 tk->xtime_nsec = (u64)ts->tv_nsec << tk->shift;
John Stultz1e75fa82012-07-13 01:21:53 -040044}
45
46static void tk_xtime_add(struct timekeeper *tk, const struct timespec *ts)
47{
48 tk->xtime_sec += ts->tv_sec;
John Stultzb44d50d2012-07-23 16:22:37 -040049 tk->xtime_nsec += (u64)ts->tv_nsec << tk->shift;
John Stultz784ffcb2012-08-21 20:30:46 -040050 tk_normalize_xtime(tk);
John Stultz1e75fa82012-07-13 01:21:53 -040051}
John Stultz8fcce542011-11-14 11:46:39 -080052
John Stultz6d0ef902012-07-27 14:48:12 -040053static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec wtm)
54{
55 struct timespec tmp;
56
57 /*
58 * Verify consistency of: offset_real = -wall_to_monotonic
59 * before modifying anything
60 */
61 set_normalized_timespec(&tmp, -tk->wall_to_monotonic.tv_sec,
62 -tk->wall_to_monotonic.tv_nsec);
63 WARN_ON_ONCE(tk->offs_real.tv64 != timespec_to_ktime(tmp).tv64);
64 tk->wall_to_monotonic = wtm;
65 set_normalized_timespec(&tmp, -wtm.tv_sec, -wtm.tv_nsec);
66 tk->offs_real = timespec_to_ktime(tmp);
67}
68
69static void tk_set_sleep_time(struct timekeeper *tk, struct timespec t)
70{
71 /* Verify consistency before modifying */
72 WARN_ON_ONCE(tk->offs_boot.tv64 != timespec_to_ktime(tk->total_sleep_time).tv64);
73
74 tk->total_sleep_time = t;
75 tk->offs_boot = timespec_to_ktime(t);
76}
77
Martin Schwidefsky155ec602009-08-14 15:47:26 +020078/**
79 * timekeeper_setup_internals - Set up internals to use clocksource clock.
80 *
81 * @clock: Pointer to clocksource.
82 *
83 * Calculates a fixed cycle/nsec interval for a given clocksource/adjustment
84 * pair and interval request.
85 *
86 * Unless you're the timekeeping code, you should not be using this!
87 */
John Stultzf726a692012-07-13 01:21:57 -040088static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
Martin Schwidefsky155ec602009-08-14 15:47:26 +020089{
90 cycle_t interval;
Kasper Pedersena386b5a2010-10-20 15:55:15 -070091 u64 tmp, ntpinterval;
John Stultz1e75fa82012-07-13 01:21:53 -040092 struct clocksource *old_clock;
Martin Schwidefsky155ec602009-08-14 15:47:26 +020093
John Stultzf726a692012-07-13 01:21:57 -040094 old_clock = tk->clock;
95 tk->clock = clock;
Martin Schwidefsky155ec602009-08-14 15:47:26 +020096 clock->cycle_last = clock->read(clock);
97
98 /* Do the ns -> cycle conversion first, using original mult */
99 tmp = NTP_INTERVAL_LENGTH;
100 tmp <<= clock->shift;
Kasper Pedersena386b5a2010-10-20 15:55:15 -0700101 ntpinterval = tmp;
Martin Schwidefsky0a544192009-08-14 15:47:28 +0200102 tmp += clock->mult/2;
103 do_div(tmp, clock->mult);
Martin Schwidefsky155ec602009-08-14 15:47:26 +0200104 if (tmp == 0)
105 tmp = 1;
106
107 interval = (cycle_t) tmp;
John Stultzf726a692012-07-13 01:21:57 -0400108 tk->cycle_interval = interval;
Martin Schwidefsky155ec602009-08-14 15:47:26 +0200109
110 /* Go back from cycles -> shifted ns */
John Stultzf726a692012-07-13 01:21:57 -0400111 tk->xtime_interval = (u64) interval * clock->mult;
112 tk->xtime_remainder = ntpinterval - tk->xtime_interval;
113 tk->raw_interval =
Martin Schwidefsky0a544192009-08-14 15:47:28 +0200114 ((u64) interval * clock->mult) >> clock->shift;
Martin Schwidefsky155ec602009-08-14 15:47:26 +0200115
John Stultz1e75fa82012-07-13 01:21:53 -0400116 /* if changing clocks, convert xtime_nsec shift units */
117 if (old_clock) {
118 int shift_change = clock->shift - old_clock->shift;
119 if (shift_change < 0)
John Stultzf726a692012-07-13 01:21:57 -0400120 tk->xtime_nsec >>= -shift_change;
John Stultz1e75fa82012-07-13 01:21:53 -0400121 else
John Stultzf726a692012-07-13 01:21:57 -0400122 tk->xtime_nsec <<= shift_change;
John Stultz1e75fa82012-07-13 01:21:53 -0400123 }
John Stultzf726a692012-07-13 01:21:57 -0400124 tk->shift = clock->shift;
Martin Schwidefsky155ec602009-08-14 15:47:26 +0200125
John Stultzf726a692012-07-13 01:21:57 -0400126 tk->ntp_error = 0;
127 tk->ntp_error_shift = NTP_SCALE_SHIFT - clock->shift;
Martin Schwidefsky0a544192009-08-14 15:47:28 +0200128
129 /*
130 * The timekeeper keeps its own mult values for the currently
131 * active clocksource. These value will be adjusted via NTP
132 * to counteract clock drifting.
133 */
John Stultzf726a692012-07-13 01:21:57 -0400134 tk->mult = clock->mult;
Martin Schwidefsky155ec602009-08-14 15:47:26 +0200135}
john stultz85240702007-05-08 00:27:59 -0700136
Martin Schwidefsky2ba2a302009-08-14 15:47:29 +0200137/* Timekeeper helper functions. */
Stephen Warren7b1f6202012-11-07 17:58:54 -0700138
139#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
140u32 (*arch_gettimeoffset)(void);
141
142u32 get_arch_timeoffset(void)
143{
144 if (likely(arch_gettimeoffset))
145 return arch_gettimeoffset();
146 return 0;
147}
148#else
149static inline u32 get_arch_timeoffset(void) { return 0; }
150#endif
151
John Stultzf726a692012-07-13 01:21:57 -0400152static inline s64 timekeeping_get_ns(struct timekeeper *tk)
Martin Schwidefsky2ba2a302009-08-14 15:47:29 +0200153{
154 cycle_t cycle_now, cycle_delta;
155 struct clocksource *clock;
John Stultz1e75fa82012-07-13 01:21:53 -0400156 s64 nsec;
Martin Schwidefsky2ba2a302009-08-14 15:47:29 +0200157
158 /* read clocksource: */
John Stultzf726a692012-07-13 01:21:57 -0400159 clock = tk->clock;
Martin Schwidefsky2ba2a302009-08-14 15:47:29 +0200160 cycle_now = clock->read(clock);
161
162 /* calculate the delta since the last update_wall_time: */
163 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
164
John Stultzf726a692012-07-13 01:21:57 -0400165 nsec = cycle_delta * tk->mult + tk->xtime_nsec;
166 nsec >>= tk->shift;
John Stultzf2a5a082012-07-13 01:21:55 -0400167
Stephen Warren7b1f6202012-11-07 17:58:54 -0700168 /* If arch requires, add in get_arch_timeoffset() */
169 return nsec + get_arch_timeoffset();
Martin Schwidefsky2ba2a302009-08-14 15:47:29 +0200170}
171
John Stultzf726a692012-07-13 01:21:57 -0400172static inline s64 timekeeping_get_ns_raw(struct timekeeper *tk)
Martin Schwidefsky2ba2a302009-08-14 15:47:29 +0200173{
174 cycle_t cycle_now, cycle_delta;
175 struct clocksource *clock;
John Stultzf2a5a082012-07-13 01:21:55 -0400176 s64 nsec;
Martin Schwidefsky2ba2a302009-08-14 15:47:29 +0200177
178 /* read clocksource: */
John Stultzf726a692012-07-13 01:21:57 -0400179 clock = tk->clock;
Martin Schwidefsky2ba2a302009-08-14 15:47:29 +0200180 cycle_now = clock->read(clock);
181
182 /* calculate the delta since the last update_wall_time: */
183 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
184
John Stultzf2a5a082012-07-13 01:21:55 -0400185 /* convert delta to nanoseconds. */
186 nsec = clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift);
187
Stephen Warren7b1f6202012-11-07 17:58:54 -0700188 /* If arch requires, add in get_arch_timeoffset() */
189 return nsec + get_arch_timeoffset();
Martin Schwidefsky2ba2a302009-08-14 15:47:29 +0200190}
191
Marcelo Tosattie0b306f2012-11-27 23:28:59 -0200192static RAW_NOTIFIER_HEAD(pvclock_gtod_chain);
193
194static void update_pvclock_gtod(struct timekeeper *tk)
195{
196 raw_notifier_call_chain(&pvclock_gtod_chain, 0, tk);
197}
198
199/**
200 * pvclock_gtod_register_notifier - register a pvclock timedata update listener
201 *
202 * Must hold write on timekeeper.lock
203 */
204int pvclock_gtod_register_notifier(struct notifier_block *nb)
205{
206 struct timekeeper *tk = &timekeeper;
207 unsigned long flags;
208 int ret;
209
210 write_seqlock_irqsave(&tk->lock, flags);
211 ret = raw_notifier_chain_register(&pvclock_gtod_chain, nb);
212 /* update timekeeping data */
213 update_pvclock_gtod(tk);
214 write_sequnlock_irqrestore(&tk->lock, flags);
215
216 return ret;
217}
218EXPORT_SYMBOL_GPL(pvclock_gtod_register_notifier);
219
220/**
221 * pvclock_gtod_unregister_notifier - unregister a pvclock
222 * timedata update listener
223 *
224 * Must hold write on timekeeper.lock
225 */
226int pvclock_gtod_unregister_notifier(struct notifier_block *nb)
227{
228 struct timekeeper *tk = &timekeeper;
229 unsigned long flags;
230 int ret;
231
232 write_seqlock_irqsave(&tk->lock, flags);
233 ret = raw_notifier_chain_unregister(&pvclock_gtod_chain, nb);
234 write_sequnlock_irqrestore(&tk->lock, flags);
235
236 return ret;
237}
238EXPORT_SYMBOL_GPL(pvclock_gtod_unregister_notifier);
239
Thomas Gleixnercc062682011-11-13 23:19:49 +0000240/* must hold write on timekeeper.lock */
John Stultzf726a692012-07-13 01:21:57 -0400241static void timekeeping_update(struct timekeeper *tk, bool clearntp)
Thomas Gleixnercc062682011-11-13 23:19:49 +0000242{
243 if (clearntp) {
John Stultzf726a692012-07-13 01:21:57 -0400244 tk->ntp_error = 0;
Thomas Gleixnercc062682011-11-13 23:19:49 +0000245 ntp_clear();
246 }
John Stultz576094b2012-09-11 19:58:13 -0400247 update_vsyscall(tk);
Marcelo Tosattie0b306f2012-11-27 23:28:59 -0200248 update_pvclock_gtod(tk);
Thomas Gleixnercc062682011-11-13 23:19:49 +0000249}
250
john stultz85240702007-05-08 00:27:59 -0700251/**
Martin Schwidefsky155ec602009-08-14 15:47:26 +0200252 * timekeeping_forward_now - update clock to the current time
john stultz85240702007-05-08 00:27:59 -0700253 *
Roman Zippel9a055112008-08-20 16:37:28 -0700254 * Forward the current clock to update its state since the last call to
255 * update_wall_time(). This is useful before significant clock changes,
256 * as it avoids having to deal with this time offset explicitly.
john stultz85240702007-05-08 00:27:59 -0700257 */
John Stultzf726a692012-07-13 01:21:57 -0400258static void timekeeping_forward_now(struct timekeeper *tk)
john stultz85240702007-05-08 00:27:59 -0700259{
260 cycle_t cycle_now, cycle_delta;
Martin Schwidefsky155ec602009-08-14 15:47:26 +0200261 struct clocksource *clock;
Roman Zippel9a055112008-08-20 16:37:28 -0700262 s64 nsec;
john stultz85240702007-05-08 00:27:59 -0700263
John Stultzf726a692012-07-13 01:21:57 -0400264 clock = tk->clock;
Martin Schwidefskya0f7d482009-08-14 15:47:19 +0200265 cycle_now = clock->read(clock);
john stultz85240702007-05-08 00:27:59 -0700266 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
Roman Zippel9a055112008-08-20 16:37:28 -0700267 clock->cycle_last = cycle_now;
john stultz85240702007-05-08 00:27:59 -0700268
John Stultzf726a692012-07-13 01:21:57 -0400269 tk->xtime_nsec += cycle_delta * tk->mult;
john stultz7d275582009-05-01 13:10:26 -0700270
Stephen Warren7b1f6202012-11-07 17:58:54 -0700271 /* If arch requires, add in get_arch_timeoffset() */
272 tk->xtime_nsec += (u64)get_arch_timeoffset() << tk->shift;
john stultz7d275582009-05-01 13:10:26 -0700273
John Stultzf726a692012-07-13 01:21:57 -0400274 tk_normalize_xtime(tk);
John Stultz2d422442008-08-20 16:37:30 -0700275
Martin Schwidefsky0a544192009-08-14 15:47:28 +0200276 nsec = clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift);
John Stultzf726a692012-07-13 01:21:57 -0400277 timespec_add_ns(&tk->raw_time, nsec);
john stultz85240702007-05-08 00:27:59 -0700278}
279
280/**
Geert Uytterhoevenefd9ac82008-01-30 13:30:01 +0100281 * getnstimeofday - Returns the time of day in a timespec
john stultz85240702007-05-08 00:27:59 -0700282 * @ts: pointer to the timespec to be set
283 *
Geert Uytterhoevenefd9ac82008-01-30 13:30:01 +0100284 * Returns the time of day in a timespec.
john stultz85240702007-05-08 00:27:59 -0700285 */
Geert Uytterhoevenefd9ac82008-01-30 13:30:01 +0100286void getnstimeofday(struct timespec *ts)
john stultz85240702007-05-08 00:27:59 -0700287{
John Stultz4e250fd2012-07-27 14:48:13 -0400288 struct timekeeper *tk = &timekeeper;
john stultz85240702007-05-08 00:27:59 -0700289 unsigned long seq;
John Stultz1e75fa82012-07-13 01:21:53 -0400290 s64 nsecs = 0;
john stultz85240702007-05-08 00:27:59 -0700291
Thomas Gleixner1c5745a2008-12-22 23:05:28 +0100292 WARN_ON(timekeeping_suspended);
293
john stultz85240702007-05-08 00:27:59 -0700294 do {
John Stultz4e250fd2012-07-27 14:48:13 -0400295 seq = read_seqbegin(&tk->lock);
john stultz85240702007-05-08 00:27:59 -0700296
John Stultz4e250fd2012-07-27 14:48:13 -0400297 ts->tv_sec = tk->xtime_sec;
John Stultzec145ba2012-09-11 19:26:03 -0400298 nsecs = timekeeping_get_ns(tk);
john stultz85240702007-05-08 00:27:59 -0700299
John Stultz4e250fd2012-07-27 14:48:13 -0400300 } while (read_seqretry(&tk->lock, seq));
john stultz85240702007-05-08 00:27:59 -0700301
John Stultzec145ba2012-09-11 19:26:03 -0400302 ts->tv_nsec = 0;
john stultz85240702007-05-08 00:27:59 -0700303 timespec_add_ns(ts, nsecs);
304}
john stultz85240702007-05-08 00:27:59 -0700305EXPORT_SYMBOL(getnstimeofday);
306
Martin Schwidefsky951ed4d2009-07-07 11:27:28 +0200307ktime_t ktime_get(void)
308{
John Stultz4e250fd2012-07-27 14:48:13 -0400309 struct timekeeper *tk = &timekeeper;
Martin Schwidefsky951ed4d2009-07-07 11:27:28 +0200310 unsigned int seq;
311 s64 secs, nsecs;
312
313 WARN_ON(timekeeping_suspended);
314
315 do {
John Stultz4e250fd2012-07-27 14:48:13 -0400316 seq = read_seqbegin(&tk->lock);
317 secs = tk->xtime_sec + tk->wall_to_monotonic.tv_sec;
318 nsecs = timekeeping_get_ns(tk) + tk->wall_to_monotonic.tv_nsec;
Martin Schwidefsky951ed4d2009-07-07 11:27:28 +0200319
John Stultz4e250fd2012-07-27 14:48:13 -0400320 } while (read_seqretry(&tk->lock, seq));
Martin Schwidefsky951ed4d2009-07-07 11:27:28 +0200321 /*
322 * Use ktime_set/ktime_add_ns to create a proper ktime on
323 * 32-bit architectures without CONFIG_KTIME_SCALAR.
324 */
325 return ktime_add_ns(ktime_set(secs, 0), nsecs);
326}
327EXPORT_SYMBOL_GPL(ktime_get);
328
329/**
330 * ktime_get_ts - get the monotonic clock in timespec format
331 * @ts: pointer to timespec variable
332 *
333 * The function calculates the monotonic clock from the realtime
334 * clock and the wall_to_monotonic offset and stores the result
335 * in normalized timespec format in the variable pointed to by @ts.
336 */
337void ktime_get_ts(struct timespec *ts)
338{
John Stultz4e250fd2012-07-27 14:48:13 -0400339 struct timekeeper *tk = &timekeeper;
Martin Schwidefsky951ed4d2009-07-07 11:27:28 +0200340 struct timespec tomono;
John Stultzec145ba2012-09-11 19:26:03 -0400341 s64 nsec;
Martin Schwidefsky951ed4d2009-07-07 11:27:28 +0200342 unsigned int seq;
Martin Schwidefsky951ed4d2009-07-07 11:27:28 +0200343
344 WARN_ON(timekeeping_suspended);
345
346 do {
John Stultz4e250fd2012-07-27 14:48:13 -0400347 seq = read_seqbegin(&tk->lock);
348 ts->tv_sec = tk->xtime_sec;
John Stultzec145ba2012-09-11 19:26:03 -0400349 nsec = timekeeping_get_ns(tk);
John Stultz4e250fd2012-07-27 14:48:13 -0400350 tomono = tk->wall_to_monotonic;
Martin Schwidefsky951ed4d2009-07-07 11:27:28 +0200351
John Stultz4e250fd2012-07-27 14:48:13 -0400352 } while (read_seqretry(&tk->lock, seq));
Martin Schwidefsky951ed4d2009-07-07 11:27:28 +0200353
John Stultzec145ba2012-09-11 19:26:03 -0400354 ts->tv_sec += tomono.tv_sec;
355 ts->tv_nsec = 0;
356 timespec_add_ns(ts, nsec + tomono.tv_nsec);
Martin Schwidefsky951ed4d2009-07-07 11:27:28 +0200357}
358EXPORT_SYMBOL_GPL(ktime_get_ts);
359
Alexander Gordeeve2c18e42011-01-12 17:00:57 -0800360#ifdef CONFIG_NTP_PPS
361
362/**
363 * getnstime_raw_and_real - get day and raw monotonic time in timespec format
364 * @ts_raw: pointer to the timespec to be set to raw monotonic time
365 * @ts_real: pointer to the timespec to be set to the time of day
366 *
367 * This function reads both the time of day and raw monotonic time at the
368 * same time atomically and stores the resulting timestamps in timespec
369 * format.
370 */
371void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real)
372{
John Stultz4e250fd2012-07-27 14:48:13 -0400373 struct timekeeper *tk = &timekeeper;
Alexander Gordeeve2c18e42011-01-12 17:00:57 -0800374 unsigned long seq;
375 s64 nsecs_raw, nsecs_real;
376
377 WARN_ON_ONCE(timekeeping_suspended);
378
379 do {
John Stultz4e250fd2012-07-27 14:48:13 -0400380 seq = read_seqbegin(&tk->lock);
Alexander Gordeeve2c18e42011-01-12 17:00:57 -0800381
John Stultz4e250fd2012-07-27 14:48:13 -0400382 *ts_raw = tk->raw_time;
383 ts_real->tv_sec = tk->xtime_sec;
John Stultz1e75fa82012-07-13 01:21:53 -0400384 ts_real->tv_nsec = 0;
Alexander Gordeeve2c18e42011-01-12 17:00:57 -0800385
John Stultz4e250fd2012-07-27 14:48:13 -0400386 nsecs_raw = timekeeping_get_ns_raw(tk);
387 nsecs_real = timekeeping_get_ns(tk);
Alexander Gordeeve2c18e42011-01-12 17:00:57 -0800388
John Stultz4e250fd2012-07-27 14:48:13 -0400389 } while (read_seqretry(&tk->lock, seq));
Alexander Gordeeve2c18e42011-01-12 17:00:57 -0800390
391 timespec_add_ns(ts_raw, nsecs_raw);
392 timespec_add_ns(ts_real, nsecs_real);
393}
394EXPORT_SYMBOL(getnstime_raw_and_real);
395
396#endif /* CONFIG_NTP_PPS */
397
john stultz85240702007-05-08 00:27:59 -0700398/**
399 * do_gettimeofday - Returns the time of day in a timeval
400 * @tv: pointer to the timeval to be set
401 *
Geert Uytterhoevenefd9ac82008-01-30 13:30:01 +0100402 * NOTE: Users should be converted to using getnstimeofday()
john stultz85240702007-05-08 00:27:59 -0700403 */
404void do_gettimeofday(struct timeval *tv)
405{
406 struct timespec now;
407
Geert Uytterhoevenefd9ac82008-01-30 13:30:01 +0100408 getnstimeofday(&now);
john stultz85240702007-05-08 00:27:59 -0700409 tv->tv_sec = now.tv_sec;
410 tv->tv_usec = now.tv_nsec/1000;
411}
john stultz85240702007-05-08 00:27:59 -0700412EXPORT_SYMBOL(do_gettimeofday);
Richard Cochrand239f492012-04-27 10:12:42 +0200413
john stultz85240702007-05-08 00:27:59 -0700414/**
415 * do_settimeofday - Sets the time of day
416 * @tv: pointer to the timespec variable containing the new time
417 *
418 * Sets the time of day to the new time and update NTP and notify hrtimers
419 */
Richard Cochran1e6d7672011-02-01 13:50:58 +0000420int do_settimeofday(const struct timespec *tv)
john stultz85240702007-05-08 00:27:59 -0700421{
John Stultz4e250fd2012-07-27 14:48:13 -0400422 struct timekeeper *tk = &timekeeper;
John Stultz1e75fa82012-07-13 01:21:53 -0400423 struct timespec ts_delta, xt;
John Stultz92c1d3e2011-11-14 14:05:44 -0800424 unsigned long flags;
john stultz85240702007-05-08 00:27:59 -0700425
John Stultzcee58482012-08-31 13:30:06 -0400426 if (!timespec_valid_strict(tv))
john stultz85240702007-05-08 00:27:59 -0700427 return -EINVAL;
428
John Stultz4e250fd2012-07-27 14:48:13 -0400429 write_seqlock_irqsave(&tk->lock, flags);
john stultz85240702007-05-08 00:27:59 -0700430
John Stultz4e250fd2012-07-27 14:48:13 -0400431 timekeeping_forward_now(tk);
john stultz85240702007-05-08 00:27:59 -0700432
John Stultz4e250fd2012-07-27 14:48:13 -0400433 xt = tk_xtime(tk);
John Stultz1e75fa82012-07-13 01:21:53 -0400434 ts_delta.tv_sec = tv->tv_sec - xt.tv_sec;
435 ts_delta.tv_nsec = tv->tv_nsec - xt.tv_nsec;
436
John Stultz4e250fd2012-07-27 14:48:13 -0400437 tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, ts_delta));
john stultz85240702007-05-08 00:27:59 -0700438
John Stultz4e250fd2012-07-27 14:48:13 -0400439 tk_set_xtime(tk, tv);
John Stultz1e75fa82012-07-13 01:21:53 -0400440
John Stultz4e250fd2012-07-27 14:48:13 -0400441 timekeeping_update(tk, true);
john stultz85240702007-05-08 00:27:59 -0700442
John Stultz4e250fd2012-07-27 14:48:13 -0400443 write_sequnlock_irqrestore(&tk->lock, flags);
john stultz85240702007-05-08 00:27:59 -0700444
445 /* signal hrtimers about time change */
446 clock_was_set();
447
448 return 0;
449}
john stultz85240702007-05-08 00:27:59 -0700450EXPORT_SYMBOL(do_settimeofday);
451
John Stultzc528f7c2011-02-01 13:52:17 +0000452/**
453 * timekeeping_inject_offset - Adds or subtracts from the current time.
454 * @tv: pointer to the timespec variable containing the offset
455 *
456 * Adds or subtracts an offset value from the current time.
457 */
458int timekeeping_inject_offset(struct timespec *ts)
459{
John Stultz4e250fd2012-07-27 14:48:13 -0400460 struct timekeeper *tk = &timekeeper;
John Stultz92c1d3e2011-11-14 14:05:44 -0800461 unsigned long flags;
John Stultz4e8b1452012-08-08 15:36:20 -0400462 struct timespec tmp;
463 int ret = 0;
John Stultzc528f7c2011-02-01 13:52:17 +0000464
465 if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
466 return -EINVAL;
467
John Stultz4e250fd2012-07-27 14:48:13 -0400468 write_seqlock_irqsave(&tk->lock, flags);
John Stultzc528f7c2011-02-01 13:52:17 +0000469
John Stultz4e250fd2012-07-27 14:48:13 -0400470 timekeeping_forward_now(tk);
John Stultzc528f7c2011-02-01 13:52:17 +0000471
John Stultz4e8b1452012-08-08 15:36:20 -0400472 /* Make sure the proposed value is valid */
473 tmp = timespec_add(tk_xtime(tk), *ts);
John Stultzcee58482012-08-31 13:30:06 -0400474 if (!timespec_valid_strict(&tmp)) {
John Stultz4e8b1452012-08-08 15:36:20 -0400475 ret = -EINVAL;
476 goto error;
477 }
John Stultz1e75fa82012-07-13 01:21:53 -0400478
John Stultz4e250fd2012-07-27 14:48:13 -0400479 tk_xtime_add(tk, ts);
480 tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, *ts));
John Stultzc528f7c2011-02-01 13:52:17 +0000481
John Stultz4e8b1452012-08-08 15:36:20 -0400482error: /* even if we error out, we forwarded the time, so call update */
John Stultz4e250fd2012-07-27 14:48:13 -0400483 timekeeping_update(tk, true);
John Stultzc528f7c2011-02-01 13:52:17 +0000484
John Stultz4e250fd2012-07-27 14:48:13 -0400485 write_sequnlock_irqrestore(&tk->lock, flags);
John Stultzc528f7c2011-02-01 13:52:17 +0000486
487 /* signal hrtimers about time change */
488 clock_was_set();
489
John Stultz4e8b1452012-08-08 15:36:20 -0400490 return ret;
John Stultzc528f7c2011-02-01 13:52:17 +0000491}
492EXPORT_SYMBOL(timekeeping_inject_offset);
493
john stultz85240702007-05-08 00:27:59 -0700494/**
495 * change_clocksource - Swaps clocksources if a new one is available
496 *
497 * Accumulates current time interval and initializes new clocksource
498 */
Martin Schwidefsky75c51582009-08-14 15:47:30 +0200499static int change_clocksource(void *data)
john stultz85240702007-05-08 00:27:59 -0700500{
John Stultz4e250fd2012-07-27 14:48:13 -0400501 struct timekeeper *tk = &timekeeper;
Magnus Damm4614e6a2009-04-21 12:24:02 -0700502 struct clocksource *new, *old;
John Stultzf695cf92012-03-14 16:38:15 -0700503 unsigned long flags;
john stultz85240702007-05-08 00:27:59 -0700504
Martin Schwidefsky75c51582009-08-14 15:47:30 +0200505 new = (struct clocksource *) data;
john stultz85240702007-05-08 00:27:59 -0700506
John Stultz4e250fd2012-07-27 14:48:13 -0400507 write_seqlock_irqsave(&tk->lock, flags);
John Stultzf695cf92012-03-14 16:38:15 -0700508
John Stultz4e250fd2012-07-27 14:48:13 -0400509 timekeeping_forward_now(tk);
Martin Schwidefsky75c51582009-08-14 15:47:30 +0200510 if (!new->enable || new->enable(new) == 0) {
John Stultz4e250fd2012-07-27 14:48:13 -0400511 old = tk->clock;
512 tk_setup_internals(tk, new);
Martin Schwidefsky75c51582009-08-14 15:47:30 +0200513 if (old->disable)
514 old->disable(old);
515 }
John Stultz4e250fd2012-07-27 14:48:13 -0400516 timekeeping_update(tk, true);
John Stultzf695cf92012-03-14 16:38:15 -0700517
John Stultz4e250fd2012-07-27 14:48:13 -0400518 write_sequnlock_irqrestore(&tk->lock, flags);
John Stultzf695cf92012-03-14 16:38:15 -0700519
Martin Schwidefsky75c51582009-08-14 15:47:30 +0200520 return 0;
521}
john stultz85240702007-05-08 00:27:59 -0700522
Martin Schwidefsky75c51582009-08-14 15:47:30 +0200523/**
524 * timekeeping_notify - Install a new clock source
525 * @clock: pointer to the clock source
526 *
527 * This function is called from clocksource.c after a new, better clock
528 * source has been registered. The caller holds the clocksource_mutex.
529 */
530void timekeeping_notify(struct clocksource *clock)
531{
John Stultz4e250fd2012-07-27 14:48:13 -0400532 struct timekeeper *tk = &timekeeper;
533
534 if (tk->clock == clock)
Magnus Damm4614e6a2009-04-21 12:24:02 -0700535 return;
Martin Schwidefsky75c51582009-08-14 15:47:30 +0200536 stop_machine(change_clocksource, clock, NULL);
john stultz85240702007-05-08 00:27:59 -0700537 tick_clock_notify();
john stultz85240702007-05-08 00:27:59 -0700538}
Martin Schwidefsky75c51582009-08-14 15:47:30 +0200539
Thomas Gleixnera40f2622009-07-07 13:00:31 +0200540/**
541 * ktime_get_real - get the real (wall-) time in ktime_t format
542 *
543 * returns the time in ktime_t format
544 */
545ktime_t ktime_get_real(void)
546{
547 struct timespec now;
548
549 getnstimeofday(&now);
550
551 return timespec_to_ktime(now);
552}
553EXPORT_SYMBOL_GPL(ktime_get_real);
john stultz85240702007-05-08 00:27:59 -0700554
555/**
John Stultz2d422442008-08-20 16:37:30 -0700556 * getrawmonotonic - Returns the raw monotonic time in a timespec
557 * @ts: pointer to the timespec to be set
558 *
559 * Returns the raw monotonic time (completely un-modified by ntp)
560 */
561void getrawmonotonic(struct timespec *ts)
562{
John Stultz4e250fd2012-07-27 14:48:13 -0400563 struct timekeeper *tk = &timekeeper;
John Stultz2d422442008-08-20 16:37:30 -0700564 unsigned long seq;
565 s64 nsecs;
John Stultz2d422442008-08-20 16:37:30 -0700566
567 do {
John Stultz4e250fd2012-07-27 14:48:13 -0400568 seq = read_seqbegin(&tk->lock);
569 nsecs = timekeeping_get_ns_raw(tk);
570 *ts = tk->raw_time;
John Stultz2d422442008-08-20 16:37:30 -0700571
John Stultz4e250fd2012-07-27 14:48:13 -0400572 } while (read_seqretry(&tk->lock, seq));
John Stultz2d422442008-08-20 16:37:30 -0700573
574 timespec_add_ns(ts, nsecs);
575}
576EXPORT_SYMBOL(getrawmonotonic);
577
John Stultz2d422442008-08-20 16:37:30 -0700578/**
Li Zefancf4fc6c2008-02-08 04:19:24 -0800579 * timekeeping_valid_for_hres - Check if timekeeping is suitable for hres
john stultz85240702007-05-08 00:27:59 -0700580 */
Li Zefancf4fc6c2008-02-08 04:19:24 -0800581int timekeeping_valid_for_hres(void)
john stultz85240702007-05-08 00:27:59 -0700582{
John Stultz4e250fd2012-07-27 14:48:13 -0400583 struct timekeeper *tk = &timekeeper;
john stultz85240702007-05-08 00:27:59 -0700584 unsigned long seq;
585 int ret;
586
587 do {
John Stultz4e250fd2012-07-27 14:48:13 -0400588 seq = read_seqbegin(&tk->lock);
john stultz85240702007-05-08 00:27:59 -0700589
John Stultz4e250fd2012-07-27 14:48:13 -0400590 ret = tk->clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
john stultz85240702007-05-08 00:27:59 -0700591
John Stultz4e250fd2012-07-27 14:48:13 -0400592 } while (read_seqretry(&tk->lock, seq));
john stultz85240702007-05-08 00:27:59 -0700593
594 return ret;
595}
596
597/**
Jon Hunter98962462009-08-18 12:45:10 -0500598 * timekeeping_max_deferment - Returns max time the clocksource can be deferred
Jon Hunter98962462009-08-18 12:45:10 -0500599 */
600u64 timekeeping_max_deferment(void)
601{
John Stultz4e250fd2012-07-27 14:48:13 -0400602 struct timekeeper *tk = &timekeeper;
John Stultz70471f22011-11-14 12:48:10 -0800603 unsigned long seq;
604 u64 ret;
John Stultz42e71e82012-07-13 01:21:51 -0400605
John Stultz70471f22011-11-14 12:48:10 -0800606 do {
John Stultz4e250fd2012-07-27 14:48:13 -0400607 seq = read_seqbegin(&tk->lock);
John Stultz70471f22011-11-14 12:48:10 -0800608
John Stultz4e250fd2012-07-27 14:48:13 -0400609 ret = tk->clock->max_idle_ns;
John Stultz70471f22011-11-14 12:48:10 -0800610
John Stultz4e250fd2012-07-27 14:48:13 -0400611 } while (read_seqretry(&tk->lock, seq));
John Stultz70471f22011-11-14 12:48:10 -0800612
613 return ret;
Jon Hunter98962462009-08-18 12:45:10 -0500614}
615
616/**
Martin Schwidefskyd4f587c2009-08-14 15:47:31 +0200617 * read_persistent_clock - Return time from the persistent clock.
john stultz85240702007-05-08 00:27:59 -0700618 *
619 * Weak dummy function for arches that do not yet support it.
Martin Schwidefskyd4f587c2009-08-14 15:47:31 +0200620 * Reads the time from the battery backed persistent clock.
621 * Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported.
john stultz85240702007-05-08 00:27:59 -0700622 *
623 * XXX - Do be sure to remove it once all arches implement it.
624 */
Martin Schwidefskyd4f587c2009-08-14 15:47:31 +0200625void __attribute__((weak)) read_persistent_clock(struct timespec *ts)
john stultz85240702007-05-08 00:27:59 -0700626{
Martin Schwidefskyd4f587c2009-08-14 15:47:31 +0200627 ts->tv_sec = 0;
628 ts->tv_nsec = 0;
john stultz85240702007-05-08 00:27:59 -0700629}
630
Martin Schwidefsky23970e32009-08-14 15:47:32 +0200631/**
632 * read_boot_clock - Return time of the system start.
633 *
634 * Weak dummy function for arches that do not yet support it.
635 * Function to read the exact time the system has been started.
636 * Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported.
637 *
638 * XXX - Do be sure to remove it once all arches implement it.
639 */
640void __attribute__((weak)) read_boot_clock(struct timespec *ts)
641{
642 ts->tv_sec = 0;
643 ts->tv_nsec = 0;
644}
645
john stultz85240702007-05-08 00:27:59 -0700646/*
647 * timekeeping_init - Initializes the clocksource and common timekeeping values
648 */
649void __init timekeeping_init(void)
650{
John Stultz4e250fd2012-07-27 14:48:13 -0400651 struct timekeeper *tk = &timekeeper;
Martin Schwidefsky155ec602009-08-14 15:47:26 +0200652 struct clocksource *clock;
john stultz85240702007-05-08 00:27:59 -0700653 unsigned long flags;
John Stultz6d0ef902012-07-27 14:48:12 -0400654 struct timespec now, boot, tmp;
Martin Schwidefskyd4f587c2009-08-14 15:47:31 +0200655
656 read_persistent_clock(&now);
John Stultzcee58482012-08-31 13:30:06 -0400657 if (!timespec_valid_strict(&now)) {
John Stultz4e8b1452012-08-08 15:36:20 -0400658 pr_warn("WARNING: Persistent clock returned invalid value!\n"
659 " Check your CMOS/BIOS settings.\n");
660 now.tv_sec = 0;
661 now.tv_nsec = 0;
662 }
663
Martin Schwidefsky23970e32009-08-14 15:47:32 +0200664 read_boot_clock(&boot);
John Stultzcee58482012-08-31 13:30:06 -0400665 if (!timespec_valid_strict(&boot)) {
John Stultz4e8b1452012-08-08 15:36:20 -0400666 pr_warn("WARNING: Boot clock returned invalid value!\n"
667 " Check your CMOS/BIOS settings.\n");
668 boot.tv_sec = 0;
669 boot.tv_nsec = 0;
670 }
john stultz85240702007-05-08 00:27:59 -0700671
John Stultz4e250fd2012-07-27 14:48:13 -0400672 seqlock_init(&tk->lock);
John Stultz70471f22011-11-14 12:48:10 -0800673
Roman Zippel7dffa3c2008-05-01 04:34:41 -0700674 ntp_init();
john stultz85240702007-05-08 00:27:59 -0700675
John Stultz4e250fd2012-07-27 14:48:13 -0400676 write_seqlock_irqsave(&tk->lock, flags);
Martin Schwidefskyf1b82742009-08-14 15:47:21 +0200677 clock = clocksource_default_clock();
Martin Schwidefskya0f7d482009-08-14 15:47:19 +0200678 if (clock->enable)
679 clock->enable(clock);
John Stultz4e250fd2012-07-27 14:48:13 -0400680 tk_setup_internals(tk, clock);
john stultz85240702007-05-08 00:27:59 -0700681
John Stultz4e250fd2012-07-27 14:48:13 -0400682 tk_set_xtime(tk, &now);
683 tk->raw_time.tv_sec = 0;
684 tk->raw_time.tv_nsec = 0;
John Stultz1e75fa82012-07-13 01:21:53 -0400685 if (boot.tv_sec == 0 && boot.tv_nsec == 0)
John Stultz4e250fd2012-07-27 14:48:13 -0400686 boot = tk_xtime(tk);
John Stultz1e75fa82012-07-13 01:21:53 -0400687
John Stultz6d0ef902012-07-27 14:48:12 -0400688 set_normalized_timespec(&tmp, -boot.tv_sec, -boot.tv_nsec);
John Stultz4e250fd2012-07-27 14:48:13 -0400689 tk_set_wall_to_mono(tk, tmp);
John Stultz6d0ef902012-07-27 14:48:12 -0400690
691 tmp.tv_sec = 0;
692 tmp.tv_nsec = 0;
John Stultz4e250fd2012-07-27 14:48:13 -0400693 tk_set_sleep_time(tk, tmp);
John Stultz6d0ef902012-07-27 14:48:12 -0400694
John Stultz4e250fd2012-07-27 14:48:13 -0400695 write_sequnlock_irqrestore(&tk->lock, flags);
john stultz85240702007-05-08 00:27:59 -0700696}
697
john stultz85240702007-05-08 00:27:59 -0700698/* time in seconds when suspend began */
Martin Schwidefskyd4f587c2009-08-14 15:47:31 +0200699static struct timespec timekeeping_suspend_time;
john stultz85240702007-05-08 00:27:59 -0700700
701/**
John Stultz304529b2011-04-01 14:32:09 -0700702 * __timekeeping_inject_sleeptime - Internal function to add sleep interval
703 * @delta: pointer to a timespec delta value
704 *
705 * Takes a timespec offset measuring a suspend interval and properly
706 * adds the sleep offset to the timekeeping variables.
707 */
John Stultzf726a692012-07-13 01:21:57 -0400708static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
709 struct timespec *delta)
John Stultz304529b2011-04-01 14:32:09 -0700710{
John Stultzcee58482012-08-31 13:30:06 -0400711 if (!timespec_valid_strict(delta)) {
John Stultzcbaa5152011-07-20 15:42:55 -0700712 printk(KERN_WARNING "__timekeeping_inject_sleeptime: Invalid "
John Stultzcb5de2f2011-06-01 18:18:09 -0700713 "sleep delta value!\n");
714 return;
715 }
John Stultzf726a692012-07-13 01:21:57 -0400716 tk_xtime_add(tk, delta);
John Stultz6d0ef902012-07-27 14:48:12 -0400717 tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, *delta));
718 tk_set_sleep_time(tk, timespec_add(tk->total_sleep_time, *delta));
John Stultz304529b2011-04-01 14:32:09 -0700719}
720
John Stultz304529b2011-04-01 14:32:09 -0700721/**
722 * timekeeping_inject_sleeptime - Adds suspend interval to timeekeeping values
723 * @delta: pointer to a timespec delta value
724 *
725 * This hook is for architectures that cannot support read_persistent_clock
726 * because their RTC/persistent clock is only accessible when irqs are enabled.
727 *
728 * This function should only be called by rtc_resume(), and allows
729 * a suspend offset to be injected into the timekeeping values.
730 */
731void timekeeping_inject_sleeptime(struct timespec *delta)
732{
John Stultz4e250fd2012-07-27 14:48:13 -0400733 struct timekeeper *tk = &timekeeper;
John Stultz92c1d3e2011-11-14 14:05:44 -0800734 unsigned long flags;
John Stultz304529b2011-04-01 14:32:09 -0700735 struct timespec ts;
736
737 /* Make sure we don't set the clock twice */
738 read_persistent_clock(&ts);
739 if (!(ts.tv_sec == 0 && ts.tv_nsec == 0))
740 return;
741
John Stultz4e250fd2012-07-27 14:48:13 -0400742 write_seqlock_irqsave(&tk->lock, flags);
John Stultz70471f22011-11-14 12:48:10 -0800743
John Stultz4e250fd2012-07-27 14:48:13 -0400744 timekeeping_forward_now(tk);
John Stultz304529b2011-04-01 14:32:09 -0700745
John Stultz4e250fd2012-07-27 14:48:13 -0400746 __timekeeping_inject_sleeptime(tk, delta);
John Stultz304529b2011-04-01 14:32:09 -0700747
John Stultz4e250fd2012-07-27 14:48:13 -0400748 timekeeping_update(tk, true);
John Stultz304529b2011-04-01 14:32:09 -0700749
John Stultz4e250fd2012-07-27 14:48:13 -0400750 write_sequnlock_irqrestore(&tk->lock, flags);
John Stultz304529b2011-04-01 14:32:09 -0700751
752 /* signal hrtimers about time change */
753 clock_was_set();
754}
755
John Stultz304529b2011-04-01 14:32:09 -0700756/**
john stultz85240702007-05-08 00:27:59 -0700757 * timekeeping_resume - Resumes the generic timekeeping subsystem.
john stultz85240702007-05-08 00:27:59 -0700758 *
759 * This is for the generic clocksource timekeeping.
760 * xtime/wall_to_monotonic/jiffies/etc are
761 * still managed by arch specific suspend/resume code.
762 */
Rafael J. Wysockie1a85b22011-03-23 22:16:04 +0100763static void timekeeping_resume(void)
john stultz85240702007-05-08 00:27:59 -0700764{
John Stultz4e250fd2012-07-27 14:48:13 -0400765 struct timekeeper *tk = &timekeeper;
John Stultz92c1d3e2011-11-14 14:05:44 -0800766 unsigned long flags;
Martin Schwidefskyd4f587c2009-08-14 15:47:31 +0200767 struct timespec ts;
768
769 read_persistent_clock(&ts);
john stultz85240702007-05-08 00:27:59 -0700770
Rafael J. Wysockiadc78e62012-08-06 01:40:41 +0200771 clockevents_resume();
Thomas Gleixnerd10ff3f2007-05-14 11:10:02 +0200772 clocksource_resume();
773
John Stultz4e250fd2012-07-27 14:48:13 -0400774 write_seqlock_irqsave(&tk->lock, flags);
john stultz85240702007-05-08 00:27:59 -0700775
Martin Schwidefskyd4f587c2009-08-14 15:47:31 +0200776 if (timespec_compare(&ts, &timekeeping_suspend_time) > 0) {
777 ts = timespec_sub(ts, timekeeping_suspend_time);
John Stultz4e250fd2012-07-27 14:48:13 -0400778 __timekeeping_inject_sleeptime(tk, &ts);
john stultz85240702007-05-08 00:27:59 -0700779 }
780 /* re-base the last cycle value */
John Stultz4e250fd2012-07-27 14:48:13 -0400781 tk->clock->cycle_last = tk->clock->read(tk->clock);
782 tk->ntp_error = 0;
john stultz85240702007-05-08 00:27:59 -0700783 timekeeping_suspended = 0;
John Stultz4e250fd2012-07-27 14:48:13 -0400784 timekeeping_update(tk, false);
785 write_sequnlock_irqrestore(&tk->lock, flags);
john stultz85240702007-05-08 00:27:59 -0700786
787 touch_softlockup_watchdog();
788
789 clockevents_notify(CLOCK_EVT_NOTIFY_RESUME, NULL);
790
791 /* Resume hrtimers */
Thomas Gleixnerb12a03c2011-05-02 16:48:57 +0200792 hrtimers_resume();
john stultz85240702007-05-08 00:27:59 -0700793}
794
Rafael J. Wysockie1a85b22011-03-23 22:16:04 +0100795static int timekeeping_suspend(void)
john stultz85240702007-05-08 00:27:59 -0700796{
John Stultz4e250fd2012-07-27 14:48:13 -0400797 struct timekeeper *tk = &timekeeper;
John Stultz92c1d3e2011-11-14 14:05:44 -0800798 unsigned long flags;
John Stultzcb332172011-05-31 22:53:23 -0700799 struct timespec delta, delta_delta;
800 static struct timespec old_delta;
john stultz85240702007-05-08 00:27:59 -0700801
Martin Schwidefskyd4f587c2009-08-14 15:47:31 +0200802 read_persistent_clock(&timekeeping_suspend_time);
Thomas Gleixner3be90952007-09-16 15:36:43 +0200803
John Stultz4e250fd2012-07-27 14:48:13 -0400804 write_seqlock_irqsave(&tk->lock, flags);
805 timekeeping_forward_now(tk);
john stultz85240702007-05-08 00:27:59 -0700806 timekeeping_suspended = 1;
John Stultzcb332172011-05-31 22:53:23 -0700807
808 /*
809 * To avoid drift caused by repeated suspend/resumes,
810 * which each can add ~1 second drift error,
811 * try to compensate so the difference in system time
812 * and persistent_clock time stays close to constant.
813 */
John Stultz4e250fd2012-07-27 14:48:13 -0400814 delta = timespec_sub(tk_xtime(tk), timekeeping_suspend_time);
John Stultzcb332172011-05-31 22:53:23 -0700815 delta_delta = timespec_sub(delta, old_delta);
816 if (abs(delta_delta.tv_sec) >= 2) {
817 /*
818 * if delta_delta is too large, assume time correction
819 * has occured and set old_delta to the current delta.
820 */
821 old_delta = delta;
822 } else {
823 /* Otherwise try to adjust old_system to compensate */
824 timekeeping_suspend_time =
825 timespec_add(timekeeping_suspend_time, delta_delta);
826 }
John Stultz4e250fd2012-07-27 14:48:13 -0400827 write_sequnlock_irqrestore(&tk->lock, flags);
john stultz85240702007-05-08 00:27:59 -0700828
829 clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL);
Magnus Dammc54a42b2010-02-02 14:41:41 -0800830 clocksource_suspend();
Rafael J. Wysockiadc78e62012-08-06 01:40:41 +0200831 clockevents_suspend();
john stultz85240702007-05-08 00:27:59 -0700832
833 return 0;
834}
835
836/* sysfs resume/suspend bits for timekeeping */
Rafael J. Wysockie1a85b22011-03-23 22:16:04 +0100837static struct syscore_ops timekeeping_syscore_ops = {
john stultz85240702007-05-08 00:27:59 -0700838 .resume = timekeeping_resume,
839 .suspend = timekeeping_suspend,
john stultz85240702007-05-08 00:27:59 -0700840};
841
Rafael J. Wysockie1a85b22011-03-23 22:16:04 +0100842static int __init timekeeping_init_ops(void)
john stultz85240702007-05-08 00:27:59 -0700843{
Rafael J. Wysockie1a85b22011-03-23 22:16:04 +0100844 register_syscore_ops(&timekeeping_syscore_ops);
845 return 0;
john stultz85240702007-05-08 00:27:59 -0700846}
847
Rafael J. Wysockie1a85b22011-03-23 22:16:04 +0100848device_initcall(timekeeping_init_ops);
john stultz85240702007-05-08 00:27:59 -0700849
850/*
851 * If the error is already larger, we look ahead even further
852 * to compensate for late or lost adjustments.
853 */
John Stultzf726a692012-07-13 01:21:57 -0400854static __always_inline int timekeeping_bigadjust(struct timekeeper *tk,
855 s64 error, s64 *interval,
john stultz85240702007-05-08 00:27:59 -0700856 s64 *offset)
857{
858 s64 tick_error, i;
859 u32 look_ahead, adj;
860 s32 error2, mult;
861
862 /*
863 * Use the current error value to determine how much to look ahead.
864 * The larger the error the slower we adjust for it to avoid problems
865 * with losing too many ticks, otherwise we would overadjust and
866 * produce an even larger error. The smaller the adjustment the
867 * faster we try to adjust for it, as lost ticks can do less harm
Li Zefan3eb05672008-02-08 04:19:25 -0800868 * here. This is tuned so that an error of about 1 msec is adjusted
john stultz85240702007-05-08 00:27:59 -0700869 * within about 1 sec (or 2^20 nsec in 2^SHIFT_HZ ticks).
870 */
John Stultzf726a692012-07-13 01:21:57 -0400871 error2 = tk->ntp_error >> (NTP_SCALE_SHIFT + 22 - 2 * SHIFT_HZ);
john stultz85240702007-05-08 00:27:59 -0700872 error2 = abs(error2);
873 for (look_ahead = 0; error2 > 0; look_ahead++)
874 error2 >>= 2;
875
876 /*
877 * Now calculate the error in (1 << look_ahead) ticks, but first
878 * remove the single look ahead already included in the error.
879 */
John Stultzf726a692012-07-13 01:21:57 -0400880 tick_error = ntp_tick_length() >> (tk->ntp_error_shift + 1);
881 tick_error -= tk->xtime_interval >> 1;
john stultz85240702007-05-08 00:27:59 -0700882 error = ((error - tick_error) >> look_ahead) + tick_error;
883
884 /* Finally calculate the adjustment shift value. */
885 i = *interval;
886 mult = 1;
887 if (error < 0) {
888 error = -error;
889 *interval = -*interval;
890 *offset = -*offset;
891 mult = -1;
892 }
893 for (adj = 0; error > i; adj++)
894 error >>= 1;
895
896 *interval <<= adj;
897 *offset <<= adj;
898 return mult << adj;
899}
900
901/*
902 * Adjust the multiplier to reduce the error value,
903 * this is optimized for the most common adjustments of -1,0,1,
904 * for other values we can do a bit more work.
905 */
John Stultzf726a692012-07-13 01:21:57 -0400906static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
john stultz85240702007-05-08 00:27:59 -0700907{
John Stultzf726a692012-07-13 01:21:57 -0400908 s64 error, interval = tk->cycle_interval;
john stultz85240702007-05-08 00:27:59 -0700909 int adj;
910
John Stultzc2bc1112011-10-27 18:12:42 -0700911 /*
Jim Cromie88b28ad2012-03-14 21:28:56 -0600912 * The point of this is to check if the error is greater than half
John Stultzc2bc1112011-10-27 18:12:42 -0700913 * an interval.
914 *
915 * First we shift it down from NTP_SHIFT to clocksource->shifted nsecs.
916 *
917 * Note we subtract one in the shift, so that error is really error*2.
John Stultz3f86f282011-10-27 17:41:17 -0700918 * This "saves" dividing(shifting) interval twice, but keeps the
919 * (error > interval) comparison as still measuring if error is
Jim Cromie88b28ad2012-03-14 21:28:56 -0600920 * larger than half an interval.
John Stultzc2bc1112011-10-27 18:12:42 -0700921 *
John Stultz3f86f282011-10-27 17:41:17 -0700922 * Note: It does not "save" on aggravation when reading the code.
John Stultzc2bc1112011-10-27 18:12:42 -0700923 */
John Stultzf726a692012-07-13 01:21:57 -0400924 error = tk->ntp_error >> (tk->ntp_error_shift - 1);
john stultz85240702007-05-08 00:27:59 -0700925 if (error > interval) {
John Stultzc2bc1112011-10-27 18:12:42 -0700926 /*
927 * We now divide error by 4(via shift), which checks if
Jim Cromie88b28ad2012-03-14 21:28:56 -0600928 * the error is greater than twice the interval.
John Stultzc2bc1112011-10-27 18:12:42 -0700929 * If it is greater, we need a bigadjust, if its smaller,
930 * we can adjust by 1.
931 */
john stultz85240702007-05-08 00:27:59 -0700932 error >>= 2;
John Stultzc2bc1112011-10-27 18:12:42 -0700933 /*
934 * XXX - In update_wall_time, we round up to the next
935 * nanosecond, and store the amount rounded up into
936 * the error. This causes the likely below to be unlikely.
937 *
John Stultz3f86f282011-10-27 17:41:17 -0700938 * The proper fix is to avoid rounding up by using
John Stultz4e250fd2012-07-27 14:48:13 -0400939 * the high precision tk->xtime_nsec instead of
John Stultzc2bc1112011-10-27 18:12:42 -0700940 * xtime.tv_nsec everywhere. Fixing this will take some
941 * time.
942 */
john stultz85240702007-05-08 00:27:59 -0700943 if (likely(error <= interval))
944 adj = 1;
945 else
Ingo Molnar1d17d172012-08-04 21:21:14 +0200946 adj = timekeeping_bigadjust(tk, error, &interval, &offset);
947 } else {
948 if (error < -interval) {
949 /* See comment above, this is just switched for the negative */
950 error >>= 2;
951 if (likely(error >= -interval)) {
952 adj = -1;
953 interval = -interval;
954 offset = -offset;
955 } else {
956 adj = timekeeping_bigadjust(tk, error, &interval, &offset);
957 }
958 } else {
959 goto out_adjust;
960 }
961 }
john stultz85240702007-05-08 00:27:59 -0700962
John Stultzf726a692012-07-13 01:21:57 -0400963 if (unlikely(tk->clock->maxadj &&
964 (tk->mult + adj > tk->clock->mult + tk->clock->maxadj))) {
John Stultze919cfd2012-03-22 19:14:46 -0700965 printk_once(KERN_WARNING
966 "Adjusting %s more than 11%% (%ld vs %ld)\n",
John Stultzf726a692012-07-13 01:21:57 -0400967 tk->clock->name, (long)tk->mult + adj,
968 (long)tk->clock->mult + tk->clock->maxadj);
John Stultze919cfd2012-03-22 19:14:46 -0700969 }
John Stultzc2bc1112011-10-27 18:12:42 -0700970 /*
971 * So the following can be confusing.
972 *
973 * To keep things simple, lets assume adj == 1 for now.
974 *
975 * When adj != 1, remember that the interval and offset values
976 * have been appropriately scaled so the math is the same.
977 *
978 * The basic idea here is that we're increasing the multiplier
979 * by one, this causes the xtime_interval to be incremented by
980 * one cycle_interval. This is because:
981 * xtime_interval = cycle_interval * mult
982 * So if mult is being incremented by one:
983 * xtime_interval = cycle_interval * (mult + 1)
984 * Its the same as:
985 * xtime_interval = (cycle_interval * mult) + cycle_interval
986 * Which can be shortened to:
987 * xtime_interval += cycle_interval
988 *
989 * So offset stores the non-accumulated cycles. Thus the current
990 * time (in shifted nanoseconds) is:
991 * now = (offset * adj) + xtime_nsec
992 * Now, even though we're adjusting the clock frequency, we have
993 * to keep time consistent. In other words, we can't jump back
994 * in time, and we also want to avoid jumping forward in time.
995 *
996 * So given the same offset value, we need the time to be the same
997 * both before and after the freq adjustment.
998 * now = (offset * adj_1) + xtime_nsec_1
999 * now = (offset * adj_2) + xtime_nsec_2
1000 * So:
1001 * (offset * adj_1) + xtime_nsec_1 =
1002 * (offset * adj_2) + xtime_nsec_2
1003 * And we know:
1004 * adj_2 = adj_1 + 1
1005 * So:
1006 * (offset * adj_1) + xtime_nsec_1 =
1007 * (offset * (adj_1+1)) + xtime_nsec_2
1008 * (offset * adj_1) + xtime_nsec_1 =
1009 * (offset * adj_1) + offset + xtime_nsec_2
1010 * Canceling the sides:
1011 * xtime_nsec_1 = offset + xtime_nsec_2
1012 * Which gives us:
1013 * xtime_nsec_2 = xtime_nsec_1 - offset
1014 * Which simplfies to:
1015 * xtime_nsec -= offset
1016 *
1017 * XXX - TODO: Doc ntp_error calculation.
1018 */
John Stultzf726a692012-07-13 01:21:57 -04001019 tk->mult += adj;
1020 tk->xtime_interval += interval;
1021 tk->xtime_nsec -= offset;
1022 tk->ntp_error -= (interval - offset) << tk->ntp_error_shift;
John Stultz2a8c0882012-07-13 01:21:56 -04001023
Ingo Molnar1d17d172012-08-04 21:21:14 +02001024out_adjust:
John Stultz2a8c0882012-07-13 01:21:56 -04001025 /*
1026 * It may be possible that when we entered this function, xtime_nsec
1027 * was very small. Further, if we're slightly speeding the clocksource
1028 * in the code above, its possible the required corrective factor to
1029 * xtime_nsec could cause it to underflow.
1030 *
1031 * Now, since we already accumulated the second, cannot simply roll
1032 * the accumulated second back, since the NTP subsystem has been
1033 * notified via second_overflow. So instead we push xtime_nsec forward
1034 * by the amount we underflowed, and add that amount into the error.
1035 *
1036 * We'll correct this error next time through this function, when
1037 * xtime_nsec is not as small.
1038 */
John Stultzf726a692012-07-13 01:21:57 -04001039 if (unlikely((s64)tk->xtime_nsec < 0)) {
1040 s64 neg = -(s64)tk->xtime_nsec;
1041 tk->xtime_nsec = 0;
1042 tk->ntp_error += neg << tk->ntp_error_shift;
John Stultz2a8c0882012-07-13 01:21:56 -04001043 }
1044
john stultz85240702007-05-08 00:27:59 -07001045}
1046
1047/**
John Stultz1f4f9482012-07-13 01:21:54 -04001048 * accumulate_nsecs_to_secs - Accumulates nsecs into secs
1049 *
1050 * Helper function that accumulates a the nsecs greater then a second
1051 * from the xtime_nsec field to the xtime_secs field.
1052 * It also calls into the NTP code to handle leapsecond processing.
1053 *
1054 */
1055static inline void accumulate_nsecs_to_secs(struct timekeeper *tk)
1056{
1057 u64 nsecps = (u64)NSEC_PER_SEC << tk->shift;
1058
1059 while (tk->xtime_nsec >= nsecps) {
1060 int leap;
1061
1062 tk->xtime_nsec -= nsecps;
1063 tk->xtime_sec++;
1064
1065 /* Figure out if its a leap sec and apply if needed */
1066 leap = second_overflow(tk->xtime_sec);
John Stultz6d0ef902012-07-27 14:48:12 -04001067 if (unlikely(leap)) {
1068 struct timespec ts;
John Stultz1f4f9482012-07-13 01:21:54 -04001069
John Stultz6d0ef902012-07-27 14:48:12 -04001070 tk->xtime_sec += leap;
1071
1072 ts.tv_sec = leap;
1073 ts.tv_nsec = 0;
1074 tk_set_wall_to_mono(tk,
1075 timespec_sub(tk->wall_to_monotonic, ts));
1076
1077 clock_was_set_delayed();
1078 }
John Stultz1f4f9482012-07-13 01:21:54 -04001079 }
1080}
1081
John Stultz1f4f9482012-07-13 01:21:54 -04001082/**
john stultza092ff02009-10-02 16:17:53 -07001083 * logarithmic_accumulation - shifted accumulation of cycles
1084 *
1085 * This functions accumulates a shifted interval of cycles into
1086 * into a shifted interval nanoseconds. Allows for O(log) accumulation
1087 * loop.
1088 *
1089 * Returns the unconsumed cycles.
1090 */
John Stultzf726a692012-07-13 01:21:57 -04001091static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset,
1092 u32 shift)
john stultza092ff02009-10-02 16:17:53 -07001093{
Jason Wesseldeda2e82010-08-09 14:20:09 -07001094 u64 raw_nsecs;
john stultza092ff02009-10-02 16:17:53 -07001095
John Stultzf726a692012-07-13 01:21:57 -04001096 /* If the offset is smaller then a shifted interval, do nothing */
1097 if (offset < tk->cycle_interval<<shift)
john stultza092ff02009-10-02 16:17:53 -07001098 return offset;
1099
1100 /* Accumulate one shifted interval */
John Stultzf726a692012-07-13 01:21:57 -04001101 offset -= tk->cycle_interval << shift;
1102 tk->clock->cycle_last += tk->cycle_interval << shift;
john stultza092ff02009-10-02 16:17:53 -07001103
John Stultzf726a692012-07-13 01:21:57 -04001104 tk->xtime_nsec += tk->xtime_interval << shift;
1105 accumulate_nsecs_to_secs(tk);
john stultza092ff02009-10-02 16:17:53 -07001106
Jason Wesseldeda2e82010-08-09 14:20:09 -07001107 /* Accumulate raw time */
Dan Carpenter5b3900c2012-10-09 10:18:23 +03001108 raw_nsecs = (u64)tk->raw_interval << shift;
John Stultzf726a692012-07-13 01:21:57 -04001109 raw_nsecs += tk->raw_time.tv_nsec;
John Stultzc7dcf872010-08-13 11:30:58 -07001110 if (raw_nsecs >= NSEC_PER_SEC) {
1111 u64 raw_secs = raw_nsecs;
1112 raw_nsecs = do_div(raw_secs, NSEC_PER_SEC);
John Stultzf726a692012-07-13 01:21:57 -04001113 tk->raw_time.tv_sec += raw_secs;
john stultza092ff02009-10-02 16:17:53 -07001114 }
John Stultzf726a692012-07-13 01:21:57 -04001115 tk->raw_time.tv_nsec = raw_nsecs;
john stultza092ff02009-10-02 16:17:53 -07001116
1117 /* Accumulate error between NTP and clock interval */
John Stultzf726a692012-07-13 01:21:57 -04001118 tk->ntp_error += ntp_tick_length() << shift;
1119 tk->ntp_error -= (tk->xtime_interval + tk->xtime_remainder) <<
1120 (tk->ntp_error_shift + shift);
john stultza092ff02009-10-02 16:17:53 -07001121
1122 return offset;
1123}
1124
John Stultz92bb1fc2012-09-04 15:38:12 -04001125#ifdef CONFIG_GENERIC_TIME_VSYSCALL_OLD
1126static inline void old_vsyscall_fixup(struct timekeeper *tk)
1127{
1128 s64 remainder;
1129
1130 /*
1131 * Store only full nanoseconds into xtime_nsec after rounding
1132 * it up and add the remainder to the error difference.
1133 * XXX - This is necessary to avoid small 1ns inconsistnecies caused
1134 * by truncating the remainder in vsyscalls. However, it causes
1135 * additional work to be done in timekeeping_adjust(). Once
1136 * the vsyscall implementations are converted to use xtime_nsec
1137 * (shifted nanoseconds), and CONFIG_GENERIC_TIME_VSYSCALL_OLD
1138 * users are removed, this can be killed.
1139 */
1140 remainder = tk->xtime_nsec & ((1ULL << tk->shift) - 1);
1141 tk->xtime_nsec -= remainder;
1142 tk->xtime_nsec += 1ULL << tk->shift;
1143 tk->ntp_error += remainder << tk->ntp_error_shift;
1144
1145}
1146#else
1147#define old_vsyscall_fixup(tk)
1148#endif
1149
1150
1151
john stultz85240702007-05-08 00:27:59 -07001152/**
1153 * update_wall_time - Uses the current clocksource to increment the wall time
1154 *
john stultz85240702007-05-08 00:27:59 -07001155 */
Torben Hohn871cf1e2011-01-27 15:58:55 +01001156static void update_wall_time(void)
john stultz85240702007-05-08 00:27:59 -07001157{
Martin Schwidefsky155ec602009-08-14 15:47:26 +02001158 struct clocksource *clock;
John Stultz4e250fd2012-07-27 14:48:13 -04001159 struct timekeeper *tk = &timekeeper;
john stultz85240702007-05-08 00:27:59 -07001160 cycle_t offset;
john stultza092ff02009-10-02 16:17:53 -07001161 int shift = 0, maxshift;
John Stultz70471f22011-11-14 12:48:10 -08001162 unsigned long flags;
1163
John Stultz4e250fd2012-07-27 14:48:13 -04001164 write_seqlock_irqsave(&tk->lock, flags);
john stultz85240702007-05-08 00:27:59 -07001165
1166 /* Make sure we're fully resumed: */
1167 if (unlikely(timekeeping_suspended))
John Stultz70471f22011-11-14 12:48:10 -08001168 goto out;
john stultz85240702007-05-08 00:27:59 -07001169
John Stultz4e250fd2012-07-27 14:48:13 -04001170 clock = tk->clock;
John Stultz592913e2010-07-13 17:56:20 -07001171
1172#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
John Stultz4e250fd2012-07-27 14:48:13 -04001173 offset = tk->cycle_interval;
John Stultz592913e2010-07-13 17:56:20 -07001174#else
1175 offset = (clock->read(clock) - clock->cycle_last) & clock->mask;
john stultz85240702007-05-08 00:27:59 -07001176#endif
john stultz85240702007-05-08 00:27:59 -07001177
John Stultzbf2ac312012-08-21 20:30:49 -04001178 /* Check if there's really nothing to do */
1179 if (offset < tk->cycle_interval)
1180 goto out;
1181
john stultza092ff02009-10-02 16:17:53 -07001182 /*
1183 * With NO_HZ we may have to accumulate many cycle_intervals
1184 * (think "ticks") worth of time at once. To do this efficiently,
1185 * we calculate the largest doubling multiple of cycle_intervals
Jim Cromie88b28ad2012-03-14 21:28:56 -06001186 * that is smaller than the offset. We then accumulate that
john stultza092ff02009-10-02 16:17:53 -07001187 * chunk in one go, and then try to consume the next smaller
1188 * doubled multiple.
john stultz85240702007-05-08 00:27:59 -07001189 */
John Stultz4e250fd2012-07-27 14:48:13 -04001190 shift = ilog2(offset) - ilog2(tk->cycle_interval);
john stultza092ff02009-10-02 16:17:53 -07001191 shift = max(0, shift);
Jim Cromie88b28ad2012-03-14 21:28:56 -06001192 /* Bound shift to one less than what overflows tick_length */
John Stultzea7cf492011-11-14 13:18:07 -08001193 maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1;
john stultza092ff02009-10-02 16:17:53 -07001194 shift = min(shift, maxshift);
John Stultz4e250fd2012-07-27 14:48:13 -04001195 while (offset >= tk->cycle_interval) {
1196 offset = logarithmic_accumulation(tk, offset, shift);
1197 if (offset < tk->cycle_interval<<shift)
John Stultz830ec042010-03-18 14:47:30 -07001198 shift--;
john stultz85240702007-05-08 00:27:59 -07001199 }
1200
1201 /* correct the clock when NTP error is too big */
John Stultz4e250fd2012-07-27 14:48:13 -04001202 timekeeping_adjust(tk, offset);
john stultz85240702007-05-08 00:27:59 -07001203
John Stultz6a867a32010-04-06 14:30:51 -07001204 /*
John Stultz92bb1fc2012-09-04 15:38:12 -04001205 * XXX This can be killed once everyone converts
1206 * to the new update_vsyscall.
1207 */
1208 old_vsyscall_fixup(tk);
john stultz85240702007-05-08 00:27:59 -07001209
John Stultz6a867a32010-04-06 14:30:51 -07001210 /*
1211 * Finally, make sure that after the rounding
John Stultz1e75fa82012-07-13 01:21:53 -04001212 * xtime_nsec isn't larger than NSEC_PER_SEC
John Stultz6a867a32010-04-06 14:30:51 -07001213 */
John Stultz4e250fd2012-07-27 14:48:13 -04001214 accumulate_nsecs_to_secs(tk);
Linus Torvalds83f57a12009-12-22 14:10:37 -08001215
John Stultz4e250fd2012-07-27 14:48:13 -04001216 timekeeping_update(tk, false);
John Stultz70471f22011-11-14 12:48:10 -08001217
1218out:
John Stultz4e250fd2012-07-27 14:48:13 -04001219 write_sequnlock_irqrestore(&tk->lock, flags);
John Stultz70471f22011-11-14 12:48:10 -08001220
john stultz85240702007-05-08 00:27:59 -07001221}
Tomas Janousek7c3f1a52007-07-15 23:39:41 -07001222
1223/**
1224 * getboottime - Return the real time of system boot.
1225 * @ts: pointer to the timespec to be set
1226 *
John Stultzabb3a4e2011-02-14 17:52:09 -08001227 * Returns the wall-time of boot in a timespec.
Tomas Janousek7c3f1a52007-07-15 23:39:41 -07001228 *
1229 * This is based on the wall_to_monotonic offset and the total suspend
1230 * time. Calls to settimeofday will affect the value returned (which
1231 * basically means that however wrong your real time clock is at boot time,
1232 * you get the right time here).
1233 */
1234void getboottime(struct timespec *ts)
1235{
John Stultz4e250fd2012-07-27 14:48:13 -04001236 struct timekeeper *tk = &timekeeper;
Hiroshi Shimamoto36d47482009-08-25 15:08:30 +09001237 struct timespec boottime = {
John Stultz4e250fd2012-07-27 14:48:13 -04001238 .tv_sec = tk->wall_to_monotonic.tv_sec +
1239 tk->total_sleep_time.tv_sec,
1240 .tv_nsec = tk->wall_to_monotonic.tv_nsec +
1241 tk->total_sleep_time.tv_nsec
Hiroshi Shimamoto36d47482009-08-25 15:08:30 +09001242 };
Martin Schwidefskyd4f587c2009-08-14 15:47:31 +02001243
Martin Schwidefskyd4f587c2009-08-14 15:47:31 +02001244 set_normalized_timespec(ts, -boottime.tv_sec, -boottime.tv_nsec);
Tomas Janousek7c3f1a52007-07-15 23:39:41 -07001245}
Jason Wangc93d89f2010-01-27 19:13:40 +08001246EXPORT_SYMBOL_GPL(getboottime);
Tomas Janousek7c3f1a52007-07-15 23:39:41 -07001247
John Stultzabb3a4e2011-02-14 17:52:09 -08001248/**
1249 * get_monotonic_boottime - Returns monotonic time since boot
1250 * @ts: pointer to the timespec to be set
1251 *
1252 * Returns the monotonic time since boot in a timespec.
1253 *
1254 * This is similar to CLOCK_MONTONIC/ktime_get_ts, but also
1255 * includes the time spent in suspend.
1256 */
1257void get_monotonic_boottime(struct timespec *ts)
1258{
John Stultz4e250fd2012-07-27 14:48:13 -04001259 struct timekeeper *tk = &timekeeper;
John Stultzabb3a4e2011-02-14 17:52:09 -08001260 struct timespec tomono, sleep;
John Stultzec145ba2012-09-11 19:26:03 -04001261 s64 nsec;
John Stultzabb3a4e2011-02-14 17:52:09 -08001262 unsigned int seq;
John Stultzabb3a4e2011-02-14 17:52:09 -08001263
1264 WARN_ON(timekeeping_suspended);
1265
1266 do {
John Stultz4e250fd2012-07-27 14:48:13 -04001267 seq = read_seqbegin(&tk->lock);
1268 ts->tv_sec = tk->xtime_sec;
John Stultzec145ba2012-09-11 19:26:03 -04001269 nsec = timekeeping_get_ns(tk);
John Stultz4e250fd2012-07-27 14:48:13 -04001270 tomono = tk->wall_to_monotonic;
1271 sleep = tk->total_sleep_time;
John Stultzabb3a4e2011-02-14 17:52:09 -08001272
John Stultz4e250fd2012-07-27 14:48:13 -04001273 } while (read_seqretry(&tk->lock, seq));
John Stultzabb3a4e2011-02-14 17:52:09 -08001274
John Stultzec145ba2012-09-11 19:26:03 -04001275 ts->tv_sec += tomono.tv_sec + sleep.tv_sec;
1276 ts->tv_nsec = 0;
1277 timespec_add_ns(ts, nsec + tomono.tv_nsec + sleep.tv_nsec);
John Stultzabb3a4e2011-02-14 17:52:09 -08001278}
1279EXPORT_SYMBOL_GPL(get_monotonic_boottime);
1280
1281/**
1282 * ktime_get_boottime - Returns monotonic time since boot in a ktime
1283 *
1284 * Returns the monotonic time since boot in a ktime
1285 *
1286 * This is similar to CLOCK_MONTONIC/ktime_get, but also
1287 * includes the time spent in suspend.
1288 */
1289ktime_t ktime_get_boottime(void)
1290{
1291 struct timespec ts;
1292
1293 get_monotonic_boottime(&ts);
1294 return timespec_to_ktime(ts);
1295}
1296EXPORT_SYMBOL_GPL(ktime_get_boottime);
1297
Tomas Janousek7c3f1a52007-07-15 23:39:41 -07001298/**
1299 * monotonic_to_bootbased - Convert the monotonic time to boot based.
1300 * @ts: pointer to the timespec to be converted
1301 */
1302void monotonic_to_bootbased(struct timespec *ts)
1303{
John Stultz4e250fd2012-07-27 14:48:13 -04001304 struct timekeeper *tk = &timekeeper;
1305
1306 *ts = timespec_add(*ts, tk->total_sleep_time);
Tomas Janousek7c3f1a52007-07-15 23:39:41 -07001307}
Jason Wangc93d89f2010-01-27 19:13:40 +08001308EXPORT_SYMBOL_GPL(monotonic_to_bootbased);
john stultz2c6b47d2007-07-24 17:47:43 -07001309
john stultz17c38b72007-07-24 18:38:34 -07001310unsigned long get_seconds(void)
1311{
John Stultz4e250fd2012-07-27 14:48:13 -04001312 struct timekeeper *tk = &timekeeper;
1313
1314 return tk->xtime_sec;
john stultz17c38b72007-07-24 18:38:34 -07001315}
1316EXPORT_SYMBOL(get_seconds);
1317
john stultzda15cfd2009-08-19 19:13:34 -07001318struct timespec __current_kernel_time(void)
1319{
John Stultz4e250fd2012-07-27 14:48:13 -04001320 struct timekeeper *tk = &timekeeper;
1321
1322 return tk_xtime(tk);
john stultzda15cfd2009-08-19 19:13:34 -07001323}
john stultz17c38b72007-07-24 18:38:34 -07001324
john stultz2c6b47d2007-07-24 17:47:43 -07001325struct timespec current_kernel_time(void)
1326{
John Stultz4e250fd2012-07-27 14:48:13 -04001327 struct timekeeper *tk = &timekeeper;
john stultz2c6b47d2007-07-24 17:47:43 -07001328 struct timespec now;
1329 unsigned long seq;
1330
1331 do {
John Stultz4e250fd2012-07-27 14:48:13 -04001332 seq = read_seqbegin(&tk->lock);
Linus Torvalds83f57a12009-12-22 14:10:37 -08001333
John Stultz4e250fd2012-07-27 14:48:13 -04001334 now = tk_xtime(tk);
1335 } while (read_seqretry(&tk->lock, seq));
john stultz2c6b47d2007-07-24 17:47:43 -07001336
1337 return now;
1338}
john stultz2c6b47d2007-07-24 17:47:43 -07001339EXPORT_SYMBOL(current_kernel_time);
john stultzda15cfd2009-08-19 19:13:34 -07001340
1341struct timespec get_monotonic_coarse(void)
1342{
John Stultz4e250fd2012-07-27 14:48:13 -04001343 struct timekeeper *tk = &timekeeper;
john stultzda15cfd2009-08-19 19:13:34 -07001344 struct timespec now, mono;
1345 unsigned long seq;
1346
1347 do {
John Stultz4e250fd2012-07-27 14:48:13 -04001348 seq = read_seqbegin(&tk->lock);
Linus Torvalds83f57a12009-12-22 14:10:37 -08001349
John Stultz4e250fd2012-07-27 14:48:13 -04001350 now = tk_xtime(tk);
1351 mono = tk->wall_to_monotonic;
1352 } while (read_seqretry(&tk->lock, seq));
john stultzda15cfd2009-08-19 19:13:34 -07001353
1354 set_normalized_timespec(&now, now.tv_sec + mono.tv_sec,
1355 now.tv_nsec + mono.tv_nsec);
1356 return now;
1357}
Torben Hohn871cf1e2011-01-27 15:58:55 +01001358
1359/*
John Stultzd6ad4182012-02-28 16:50:11 -08001360 * Must hold jiffies_lock
Torben Hohn871cf1e2011-01-27 15:58:55 +01001361 */
1362void do_timer(unsigned long ticks)
1363{
1364 jiffies_64 += ticks;
1365 update_wall_time();
1366 calc_global_load(ticks);
1367}
Torben Hohn48cf76f72011-01-27 15:59:05 +01001368
1369/**
John Stultz314ac372011-02-14 18:43:08 -08001370 * get_xtime_and_monotonic_and_sleep_offset() - get xtime, wall_to_monotonic,
1371 * and sleep offsets.
Torben Hohn48cf76f72011-01-27 15:59:05 +01001372 * @xtim: pointer to timespec to be set with xtime
1373 * @wtom: pointer to timespec to be set with wall_to_monotonic
John Stultz314ac372011-02-14 18:43:08 -08001374 * @sleep: pointer to timespec to be set with time in suspend
Torben Hohn48cf76f72011-01-27 15:59:05 +01001375 */
John Stultz314ac372011-02-14 18:43:08 -08001376void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim,
1377 struct timespec *wtom, struct timespec *sleep)
Torben Hohn48cf76f72011-01-27 15:59:05 +01001378{
John Stultz4e250fd2012-07-27 14:48:13 -04001379 struct timekeeper *tk = &timekeeper;
Torben Hohn48cf76f72011-01-27 15:59:05 +01001380 unsigned long seq;
1381
1382 do {
John Stultz4e250fd2012-07-27 14:48:13 -04001383 seq = read_seqbegin(&tk->lock);
1384 *xtim = tk_xtime(tk);
1385 *wtom = tk->wall_to_monotonic;
1386 *sleep = tk->total_sleep_time;
1387 } while (read_seqretry(&tk->lock, seq));
Torben Hohn48cf76f72011-01-27 15:59:05 +01001388}
Torben Hohnf0af911a92011-01-27 15:59:10 +01001389
Thomas Gleixnerf6c06ab2012-07-10 18:43:24 -04001390#ifdef CONFIG_HIGH_RES_TIMERS
1391/**
1392 * ktime_get_update_offsets - hrtimer helper
1393 * @offs_real: pointer to storage for monotonic -> realtime offset
1394 * @offs_boot: pointer to storage for monotonic -> boottime offset
1395 *
1396 * Returns current monotonic time and updates the offsets
1397 * Called from hrtimer_interupt() or retrigger_next_event()
1398 */
1399ktime_t ktime_get_update_offsets(ktime_t *offs_real, ktime_t *offs_boot)
1400{
John Stultz4e250fd2012-07-27 14:48:13 -04001401 struct timekeeper *tk = &timekeeper;
Thomas Gleixnerf6c06ab2012-07-10 18:43:24 -04001402 ktime_t now;
1403 unsigned int seq;
1404 u64 secs, nsecs;
1405
1406 do {
John Stultz4e250fd2012-07-27 14:48:13 -04001407 seq = read_seqbegin(&tk->lock);
Thomas Gleixnerf6c06ab2012-07-10 18:43:24 -04001408
John Stultz4e250fd2012-07-27 14:48:13 -04001409 secs = tk->xtime_sec;
1410 nsecs = timekeeping_get_ns(tk);
Thomas Gleixnerf6c06ab2012-07-10 18:43:24 -04001411
John Stultz4e250fd2012-07-27 14:48:13 -04001412 *offs_real = tk->offs_real;
1413 *offs_boot = tk->offs_boot;
1414 } while (read_seqretry(&tk->lock, seq));
Thomas Gleixnerf6c06ab2012-07-10 18:43:24 -04001415
1416 now = ktime_add_ns(ktime_set(secs, 0), nsecs);
1417 now = ktime_sub(now, *offs_real);
1418 return now;
1419}
1420#endif
1421
Torben Hohnf0af911a92011-01-27 15:59:10 +01001422/**
Thomas Gleixner99ee5312011-04-27 14:16:42 +02001423 * ktime_get_monotonic_offset() - get wall_to_monotonic in ktime_t format
1424 */
1425ktime_t ktime_get_monotonic_offset(void)
1426{
John Stultz4e250fd2012-07-27 14:48:13 -04001427 struct timekeeper *tk = &timekeeper;
Thomas Gleixner99ee5312011-04-27 14:16:42 +02001428 unsigned long seq;
1429 struct timespec wtom;
1430
1431 do {
John Stultz4e250fd2012-07-27 14:48:13 -04001432 seq = read_seqbegin(&tk->lock);
1433 wtom = tk->wall_to_monotonic;
1434 } while (read_seqretry(&tk->lock, seq));
John Stultz70471f22011-11-14 12:48:10 -08001435
Thomas Gleixner99ee5312011-04-27 14:16:42 +02001436 return timespec_to_ktime(wtom);
1437}
John Stultza80b83b2012-02-03 00:19:07 -08001438EXPORT_SYMBOL_GPL(ktime_get_monotonic_offset);
1439
Thomas Gleixner99ee5312011-04-27 14:16:42 +02001440/**
Torben Hohnf0af911a92011-01-27 15:59:10 +01001441 * xtime_update() - advances the timekeeping infrastructure
1442 * @ticks: number of ticks, that have elapsed since the last call.
1443 *
1444 * Must be called with interrupts disabled.
1445 */
1446void xtime_update(unsigned long ticks)
1447{
John Stultzd6ad4182012-02-28 16:50:11 -08001448 write_seqlock(&jiffies_lock);
Torben Hohnf0af911a92011-01-27 15:59:10 +01001449 do_timer(ticks);
John Stultzd6ad4182012-02-28 16:50:11 -08001450 write_sequnlock(&jiffies_lock);
Torben Hohnf0af911a92011-01-27 15:59:10 +01001451}