john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 1 | /* |
| 2 | * linux/kernel/time/timekeeping.c |
| 3 | * |
| 4 | * Kernel timekeeping code and accessor functions |
| 5 | * |
| 6 | * This code was moved from linux/kernel/timer.c. |
| 7 | * Please see that file for copyright and history logs. |
| 8 | * |
| 9 | */ |
| 10 | |
| 11 | #include <linux/module.h> |
| 12 | #include <linux/interrupt.h> |
| 13 | #include <linux/percpu.h> |
| 14 | #include <linux/init.h> |
| 15 | #include <linux/mm.h> |
Alexey Dobriyan | d43c36d | 2009-10-07 17:09:06 +0400 | [diff] [blame] | 16 | #include <linux/sched.h> |
Rafael J. Wysocki | e1a85b2 | 2011-03-23 22:16:04 +0100 | [diff] [blame] | 17 | #include <linux/syscore_ops.h> |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 18 | #include <linux/clocksource.h> |
| 19 | #include <linux/jiffies.h> |
| 20 | #include <linux/time.h> |
| 21 | #include <linux/tick.h> |
Martin Schwidefsky | 75c5158 | 2009-08-14 15:47:30 +0200 | [diff] [blame] | 22 | #include <linux/stop_machine.h> |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 23 | |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 24 | /* Structure holding internal timekeeping values. */ |
| 25 | struct timekeeper { |
| 26 | /* Current clocksource used for timekeeping. */ |
John Stultz | 42e71e8 | 2012-07-13 01:21:51 -0400 | [diff] [blame] | 27 | struct clocksource *clock; |
Thomas Gleixner | 058892e | 2011-11-13 23:19:48 +0000 | [diff] [blame] | 28 | /* NTP adjusted clock multiplier */ |
John Stultz | 42e71e8 | 2012-07-13 01:21:51 -0400 | [diff] [blame] | 29 | u32 mult; |
Martin Schwidefsky | 23ce721 | 2009-08-14 15:47:27 +0200 | [diff] [blame] | 30 | /* The shift value of the current clocksource. */ |
John Stultz | fee84c4 | 2012-07-13 01:21:52 -0400 | [diff] [blame] | 31 | u32 shift; |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 32 | /* Number of clock cycles in one NTP interval. */ |
John Stultz | 42e71e8 | 2012-07-13 01:21:51 -0400 | [diff] [blame] | 33 | cycle_t cycle_interval; |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 34 | /* Number of clock shifted nano seconds in one NTP interval. */ |
John Stultz | 42e71e8 | 2012-07-13 01:21:51 -0400 | [diff] [blame] | 35 | u64 xtime_interval; |
Kasper Pedersen | a386b5a | 2010-10-20 15:55:15 -0700 | [diff] [blame] | 36 | /* shifted nano seconds left over when rounding cycle_interval */ |
John Stultz | 42e71e8 | 2012-07-13 01:21:51 -0400 | [diff] [blame] | 37 | s64 xtime_remainder; |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 38 | /* Raw nano seconds accumulated per NTP interval. */ |
John Stultz | 42e71e8 | 2012-07-13 01:21:51 -0400 | [diff] [blame] | 39 | u32 raw_interval; |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 40 | |
John Stultz | 1e75fa8 | 2012-07-13 01:21:53 -0400 | [diff] [blame^] | 41 | /* Current CLOCK_REALTIME time in seconds */ |
| 42 | u64 xtime_sec; |
| 43 | /* Clock shifted nano seconds */ |
John Stultz | 42e71e8 | 2012-07-13 01:21:51 -0400 | [diff] [blame] | 44 | u64 xtime_nsec; |
John Stultz | 1e75fa8 | 2012-07-13 01:21:53 -0400 | [diff] [blame^] | 45 | |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 46 | /* Difference between accumulated time and NTP time in ntp |
| 47 | * shifted nano seconds. */ |
John Stultz | 42e71e8 | 2012-07-13 01:21:51 -0400 | [diff] [blame] | 48 | s64 ntp_error; |
Martin Schwidefsky | 23ce721 | 2009-08-14 15:47:27 +0200 | [diff] [blame] | 49 | /* Shift conversion between clock shifted nano seconds and |
| 50 | * ntp shifted nano seconds. */ |
John Stultz | fee84c4 | 2012-07-13 01:21:52 -0400 | [diff] [blame] | 51 | u32 ntp_error_shift; |
John Stultz | 00c5fb7 | 2011-11-14 11:23:15 -0800 | [diff] [blame] | 52 | |
John Stultz | d9f7217 | 2011-11-14 11:29:32 -0800 | [diff] [blame] | 53 | /* |
| 54 | * wall_to_monotonic is what we need to add to xtime (or xtime corrected |
| 55 | * for sub jiffie times) to get to monotonic time. Monotonic is pegged |
| 56 | * at zero at system boot time, so wall_to_monotonic will be negative, |
| 57 | * however, we will ALWAYS keep the tv_nsec part positive so we can use |
| 58 | * the usual normalization. |
| 59 | * |
| 60 | * wall_to_monotonic is moved after resume from suspend for the |
| 61 | * monotonic time not to jump. We need to add total_sleep_time to |
| 62 | * wall_to_monotonic to get the real boot based time offset. |
| 63 | * |
| 64 | * - wall_to_monotonic is no longer the boot time, getboottime must be |
| 65 | * used instead. |
| 66 | */ |
John Stultz | 42e71e8 | 2012-07-13 01:21:51 -0400 | [diff] [blame] | 67 | struct timespec wall_to_monotonic; |
John Stultz | 00c5fb7 | 2011-11-14 11:23:15 -0800 | [diff] [blame] | 68 | /* time spent in suspend */ |
John Stultz | 42e71e8 | 2012-07-13 01:21:51 -0400 | [diff] [blame] | 69 | struct timespec total_sleep_time; |
John Stultz | 01f71b4 | 2011-11-14 11:43:49 -0800 | [diff] [blame] | 70 | /* The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock. */ |
John Stultz | 42e71e8 | 2012-07-13 01:21:51 -0400 | [diff] [blame] | 71 | struct timespec raw_time; |
Thomas Gleixner | 5b9fe75 | 2012-07-10 18:43:21 -0400 | [diff] [blame] | 72 | /* Offset clock monotonic -> clock realtime */ |
John Stultz | 42e71e8 | 2012-07-13 01:21:51 -0400 | [diff] [blame] | 73 | ktime_t offs_real; |
Thomas Gleixner | 5b9fe75 | 2012-07-10 18:43:21 -0400 | [diff] [blame] | 74 | /* Offset clock monotonic -> clock boottime */ |
John Stultz | 42e71e8 | 2012-07-13 01:21:51 -0400 | [diff] [blame] | 75 | ktime_t offs_boot; |
John Stultz | 70471f2 | 2011-11-14 12:48:10 -0800 | [diff] [blame] | 76 | /* Seqlock for all timekeeper values */ |
John Stultz | 42e71e8 | 2012-07-13 01:21:51 -0400 | [diff] [blame] | 77 | seqlock_t lock; |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 78 | }; |
| 79 | |
H Hartley Sweeten | afa14e7 | 2011-01-11 17:59:38 -0600 | [diff] [blame] | 80 | static struct timekeeper timekeeper; |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 81 | |
John Stultz | 8fcce54 | 2011-11-14 11:46:39 -0800 | [diff] [blame] | 82 | /* |
| 83 | * This read-write spinlock protects us from races in SMP while |
| 84 | * playing with xtime. |
| 85 | */ |
| 86 | __cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock); |
| 87 | |
John Stultz | 8fcce54 | 2011-11-14 11:46:39 -0800 | [diff] [blame] | 88 | /* flag for if timekeeping is suspended */ |
| 89 | int __read_mostly timekeeping_suspended; |
| 90 | |
John Stultz | 1e75fa8 | 2012-07-13 01:21:53 -0400 | [diff] [blame^] | 91 | static inline void tk_normalize_xtime(struct timekeeper *tk) |
| 92 | { |
| 93 | while (tk->xtime_nsec >= ((u64)NSEC_PER_SEC << tk->shift)) { |
| 94 | tk->xtime_nsec -= (u64)NSEC_PER_SEC << tk->shift; |
| 95 | tk->xtime_sec++; |
| 96 | } |
| 97 | } |
John Stultz | 8fcce54 | 2011-11-14 11:46:39 -0800 | [diff] [blame] | 98 | |
John Stultz | 1e75fa8 | 2012-07-13 01:21:53 -0400 | [diff] [blame^] | 99 | static struct timespec tk_xtime(struct timekeeper *tk) |
| 100 | { |
| 101 | struct timespec ts; |
| 102 | |
| 103 | ts.tv_sec = tk->xtime_sec; |
| 104 | ts.tv_nsec = (long)(tk->xtime_nsec >> tk->shift); |
| 105 | return ts; |
| 106 | } |
| 107 | |
| 108 | static void tk_set_xtime(struct timekeeper *tk, const struct timespec *ts) |
| 109 | { |
| 110 | tk->xtime_sec = ts->tv_sec; |
| 111 | tk->xtime_nsec = ts->tv_nsec << tk->shift; |
| 112 | } |
| 113 | |
| 114 | static void tk_xtime_add(struct timekeeper *tk, const struct timespec *ts) |
| 115 | { |
| 116 | tk->xtime_sec += ts->tv_sec; |
| 117 | tk->xtime_nsec += ts->tv_nsec << tk->shift; |
| 118 | } |
John Stultz | 8fcce54 | 2011-11-14 11:46:39 -0800 | [diff] [blame] | 119 | |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 120 | /** |
| 121 | * timekeeper_setup_internals - Set up internals to use clocksource clock. |
| 122 | * |
| 123 | * @clock: Pointer to clocksource. |
| 124 | * |
| 125 | * Calculates a fixed cycle/nsec interval for a given clocksource/adjustment |
| 126 | * pair and interval request. |
| 127 | * |
| 128 | * Unless you're the timekeeping code, you should not be using this! |
| 129 | */ |
| 130 | static void timekeeper_setup_internals(struct clocksource *clock) |
| 131 | { |
| 132 | cycle_t interval; |
Kasper Pedersen | a386b5a | 2010-10-20 15:55:15 -0700 | [diff] [blame] | 133 | u64 tmp, ntpinterval; |
John Stultz | 1e75fa8 | 2012-07-13 01:21:53 -0400 | [diff] [blame^] | 134 | struct clocksource *old_clock; |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 135 | |
John Stultz | 1e75fa8 | 2012-07-13 01:21:53 -0400 | [diff] [blame^] | 136 | old_clock = timekeeper.clock; |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 137 | timekeeper.clock = clock; |
| 138 | clock->cycle_last = clock->read(clock); |
| 139 | |
| 140 | /* Do the ns -> cycle conversion first, using original mult */ |
| 141 | tmp = NTP_INTERVAL_LENGTH; |
| 142 | tmp <<= clock->shift; |
Kasper Pedersen | a386b5a | 2010-10-20 15:55:15 -0700 | [diff] [blame] | 143 | ntpinterval = tmp; |
Martin Schwidefsky | 0a54419 | 2009-08-14 15:47:28 +0200 | [diff] [blame] | 144 | tmp += clock->mult/2; |
| 145 | do_div(tmp, clock->mult); |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 146 | if (tmp == 0) |
| 147 | tmp = 1; |
| 148 | |
| 149 | interval = (cycle_t) tmp; |
| 150 | timekeeper.cycle_interval = interval; |
| 151 | |
| 152 | /* Go back from cycles -> shifted ns */ |
| 153 | timekeeper.xtime_interval = (u64) interval * clock->mult; |
Kasper Pedersen | a386b5a | 2010-10-20 15:55:15 -0700 | [diff] [blame] | 154 | timekeeper.xtime_remainder = ntpinterval - timekeeper.xtime_interval; |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 155 | timekeeper.raw_interval = |
Martin Schwidefsky | 0a54419 | 2009-08-14 15:47:28 +0200 | [diff] [blame] | 156 | ((u64) interval * clock->mult) >> clock->shift; |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 157 | |
John Stultz | 1e75fa8 | 2012-07-13 01:21:53 -0400 | [diff] [blame^] | 158 | /* if changing clocks, convert xtime_nsec shift units */ |
| 159 | if (old_clock) { |
| 160 | int shift_change = clock->shift - old_clock->shift; |
| 161 | if (shift_change < 0) |
| 162 | timekeeper.xtime_nsec >>= -shift_change; |
| 163 | else |
| 164 | timekeeper.xtime_nsec <<= shift_change; |
| 165 | } |
Martin Schwidefsky | 23ce721 | 2009-08-14 15:47:27 +0200 | [diff] [blame] | 166 | timekeeper.shift = clock->shift; |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 167 | |
| 168 | timekeeper.ntp_error = 0; |
Martin Schwidefsky | 23ce721 | 2009-08-14 15:47:27 +0200 | [diff] [blame] | 169 | timekeeper.ntp_error_shift = NTP_SCALE_SHIFT - clock->shift; |
Martin Schwidefsky | 0a54419 | 2009-08-14 15:47:28 +0200 | [diff] [blame] | 170 | |
| 171 | /* |
| 172 | * The timekeeper keeps its own mult values for the currently |
| 173 | * active clocksource. These value will be adjusted via NTP |
| 174 | * to counteract clock drifting. |
| 175 | */ |
| 176 | timekeeper.mult = clock->mult; |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 177 | } |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 178 | |
Martin Schwidefsky | 2ba2a30 | 2009-08-14 15:47:29 +0200 | [diff] [blame] | 179 | /* Timekeeper helper functions. */ |
| 180 | static inline s64 timekeeping_get_ns(void) |
| 181 | { |
| 182 | cycle_t cycle_now, cycle_delta; |
| 183 | struct clocksource *clock; |
John Stultz | 1e75fa8 | 2012-07-13 01:21:53 -0400 | [diff] [blame^] | 184 | s64 nsec; |
Martin Schwidefsky | 2ba2a30 | 2009-08-14 15:47:29 +0200 | [diff] [blame] | 185 | |
| 186 | /* read clocksource: */ |
| 187 | clock = timekeeper.clock; |
| 188 | cycle_now = clock->read(clock); |
| 189 | |
| 190 | /* calculate the delta since the last update_wall_time: */ |
| 191 | cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; |
| 192 | |
John Stultz | 1e75fa8 | 2012-07-13 01:21:53 -0400 | [diff] [blame^] | 193 | nsec = cycle_delta * timekeeper.mult + timekeeper.xtime_nsec; |
| 194 | return nsec >> timekeeper.shift; |
Martin Schwidefsky | 2ba2a30 | 2009-08-14 15:47:29 +0200 | [diff] [blame] | 195 | } |
| 196 | |
| 197 | static inline s64 timekeeping_get_ns_raw(void) |
| 198 | { |
| 199 | cycle_t cycle_now, cycle_delta; |
| 200 | struct clocksource *clock; |
| 201 | |
| 202 | /* read clocksource: */ |
| 203 | clock = timekeeper.clock; |
| 204 | cycle_now = clock->read(clock); |
| 205 | |
| 206 | /* calculate the delta since the last update_wall_time: */ |
| 207 | cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; |
| 208 | |
Dan McGee | c9fad42 | 2011-10-17 13:58:43 -0500 | [diff] [blame] | 209 | /* return delta convert to nanoseconds. */ |
Martin Schwidefsky | 2ba2a30 | 2009-08-14 15:47:29 +0200 | [diff] [blame] | 210 | return clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift); |
| 211 | } |
| 212 | |
Thomas Gleixner | 5b9fe75 | 2012-07-10 18:43:21 -0400 | [diff] [blame] | 213 | static void update_rt_offset(void) |
| 214 | { |
| 215 | struct timespec tmp, *wtm = &timekeeper.wall_to_monotonic; |
| 216 | |
| 217 | set_normalized_timespec(&tmp, -wtm->tv_sec, -wtm->tv_nsec); |
| 218 | timekeeper.offs_real = timespec_to_ktime(tmp); |
| 219 | } |
| 220 | |
Thomas Gleixner | cc06268 | 2011-11-13 23:19:49 +0000 | [diff] [blame] | 221 | /* must hold write on timekeeper.lock */ |
| 222 | static void timekeeping_update(bool clearntp) |
| 223 | { |
John Stultz | 1e75fa8 | 2012-07-13 01:21:53 -0400 | [diff] [blame^] | 224 | struct timespec xt; |
| 225 | |
Thomas Gleixner | cc06268 | 2011-11-13 23:19:49 +0000 | [diff] [blame] | 226 | if (clearntp) { |
| 227 | timekeeper.ntp_error = 0; |
| 228 | ntp_clear(); |
| 229 | } |
Thomas Gleixner | 5b9fe75 | 2012-07-10 18:43:21 -0400 | [diff] [blame] | 230 | update_rt_offset(); |
John Stultz | 1e75fa8 | 2012-07-13 01:21:53 -0400 | [diff] [blame^] | 231 | xt = tk_xtime(&timekeeper); |
| 232 | update_vsyscall(&xt, &timekeeper.wall_to_monotonic, |
Thomas Gleixner | cc06268 | 2011-11-13 23:19:49 +0000 | [diff] [blame] | 233 | timekeeper.clock, timekeeper.mult); |
| 234 | } |
| 235 | |
| 236 | |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 237 | /** |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 238 | * timekeeping_forward_now - update clock to the current time |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 239 | * |
Roman Zippel | 9a05511 | 2008-08-20 16:37:28 -0700 | [diff] [blame] | 240 | * Forward the current clock to update its state since the last call to |
| 241 | * update_wall_time(). This is useful before significant clock changes, |
| 242 | * as it avoids having to deal with this time offset explicitly. |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 243 | */ |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 244 | static void timekeeping_forward_now(void) |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 245 | { |
| 246 | cycle_t cycle_now, cycle_delta; |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 247 | struct clocksource *clock; |
Roman Zippel | 9a05511 | 2008-08-20 16:37:28 -0700 | [diff] [blame] | 248 | s64 nsec; |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 249 | |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 250 | clock = timekeeper.clock; |
Martin Schwidefsky | a0f7d48 | 2009-08-14 15:47:19 +0200 | [diff] [blame] | 251 | cycle_now = clock->read(clock); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 252 | cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; |
Roman Zippel | 9a05511 | 2008-08-20 16:37:28 -0700 | [diff] [blame] | 253 | clock->cycle_last = cycle_now; |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 254 | |
John Stultz | 1e75fa8 | 2012-07-13 01:21:53 -0400 | [diff] [blame^] | 255 | timekeeper.xtime_nsec += cycle_delta * timekeeper.mult; |
john stultz | 7d27558 | 2009-05-01 13:10:26 -0700 | [diff] [blame] | 256 | |
| 257 | /* If arch requires, add in gettimeoffset() */ |
John Stultz | 1e75fa8 | 2012-07-13 01:21:53 -0400 | [diff] [blame^] | 258 | timekeeper.xtime_nsec += arch_gettimeoffset() << timekeeper.shift; |
john stultz | 7d27558 | 2009-05-01 13:10:26 -0700 | [diff] [blame] | 259 | |
John Stultz | 1e75fa8 | 2012-07-13 01:21:53 -0400 | [diff] [blame^] | 260 | tk_normalize_xtime(&timekeeper); |
John Stultz | 2d42244 | 2008-08-20 16:37:30 -0700 | [diff] [blame] | 261 | |
Martin Schwidefsky | 0a54419 | 2009-08-14 15:47:28 +0200 | [diff] [blame] | 262 | nsec = clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift); |
John Stultz | 01f71b4 | 2011-11-14 11:43:49 -0800 | [diff] [blame] | 263 | timespec_add_ns(&timekeeper.raw_time, nsec); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 264 | } |
| 265 | |
| 266 | /** |
Geert Uytterhoeven | efd9ac8 | 2008-01-30 13:30:01 +0100 | [diff] [blame] | 267 | * getnstimeofday - Returns the time of day in a timespec |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 268 | * @ts: pointer to the timespec to be set |
| 269 | * |
Geert Uytterhoeven | efd9ac8 | 2008-01-30 13:30:01 +0100 | [diff] [blame] | 270 | * Returns the time of day in a timespec. |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 271 | */ |
Geert Uytterhoeven | efd9ac8 | 2008-01-30 13:30:01 +0100 | [diff] [blame] | 272 | void getnstimeofday(struct timespec *ts) |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 273 | { |
| 274 | unsigned long seq; |
John Stultz | 1e75fa8 | 2012-07-13 01:21:53 -0400 | [diff] [blame^] | 275 | s64 nsecs = 0; |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 276 | |
Thomas Gleixner | 1c5745a | 2008-12-22 23:05:28 +0100 | [diff] [blame] | 277 | WARN_ON(timekeeping_suspended); |
| 278 | |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 279 | do { |
John Stultz | 70471f2 | 2011-11-14 12:48:10 -0800 | [diff] [blame] | 280 | seq = read_seqbegin(&timekeeper.lock); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 281 | |
John Stultz | 1e75fa8 | 2012-07-13 01:21:53 -0400 | [diff] [blame^] | 282 | ts->tv_sec = timekeeper.xtime_sec; |
| 283 | ts->tv_nsec = timekeeping_get_ns(); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 284 | |
john stultz | 7d27558 | 2009-05-01 13:10:26 -0700 | [diff] [blame] | 285 | /* If arch requires, add in gettimeoffset() */ |
| 286 | nsecs += arch_gettimeoffset(); |
| 287 | |
John Stultz | 70471f2 | 2011-11-14 12:48:10 -0800 | [diff] [blame] | 288 | } while (read_seqretry(&timekeeper.lock, seq)); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 289 | |
| 290 | timespec_add_ns(ts, nsecs); |
| 291 | } |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 292 | EXPORT_SYMBOL(getnstimeofday); |
| 293 | |
Martin Schwidefsky | 951ed4d | 2009-07-07 11:27:28 +0200 | [diff] [blame] | 294 | ktime_t ktime_get(void) |
| 295 | { |
Martin Schwidefsky | 951ed4d | 2009-07-07 11:27:28 +0200 | [diff] [blame] | 296 | unsigned int seq; |
| 297 | s64 secs, nsecs; |
| 298 | |
| 299 | WARN_ON(timekeeping_suspended); |
| 300 | |
| 301 | do { |
John Stultz | 70471f2 | 2011-11-14 12:48:10 -0800 | [diff] [blame] | 302 | seq = read_seqbegin(&timekeeper.lock); |
John Stultz | 1e75fa8 | 2012-07-13 01:21:53 -0400 | [diff] [blame^] | 303 | secs = timekeeper.xtime_sec + |
John Stultz | 8ff2cb9 | 2011-11-14 11:40:54 -0800 | [diff] [blame] | 304 | timekeeper.wall_to_monotonic.tv_sec; |
John Stultz | 1e75fa8 | 2012-07-13 01:21:53 -0400 | [diff] [blame^] | 305 | nsecs = timekeeping_get_ns() + |
John Stultz | 8ff2cb9 | 2011-11-14 11:40:54 -0800 | [diff] [blame] | 306 | timekeeper.wall_to_monotonic.tv_nsec; |
Hector Palacios | d004e02 | 2011-11-14 11:15:25 +0100 | [diff] [blame] | 307 | /* If arch requires, add in gettimeoffset() */ |
| 308 | nsecs += arch_gettimeoffset(); |
Martin Schwidefsky | 951ed4d | 2009-07-07 11:27:28 +0200 | [diff] [blame] | 309 | |
John Stultz | 70471f2 | 2011-11-14 12:48:10 -0800 | [diff] [blame] | 310 | } while (read_seqretry(&timekeeper.lock, seq)); |
Martin Schwidefsky | 951ed4d | 2009-07-07 11:27:28 +0200 | [diff] [blame] | 311 | /* |
| 312 | * Use ktime_set/ktime_add_ns to create a proper ktime on |
| 313 | * 32-bit architectures without CONFIG_KTIME_SCALAR. |
| 314 | */ |
| 315 | return ktime_add_ns(ktime_set(secs, 0), nsecs); |
| 316 | } |
| 317 | EXPORT_SYMBOL_GPL(ktime_get); |
| 318 | |
| 319 | /** |
| 320 | * ktime_get_ts - get the monotonic clock in timespec format |
| 321 | * @ts: pointer to timespec variable |
| 322 | * |
| 323 | * The function calculates the monotonic clock from the realtime |
| 324 | * clock and the wall_to_monotonic offset and stores the result |
| 325 | * in normalized timespec format in the variable pointed to by @ts. |
| 326 | */ |
| 327 | void ktime_get_ts(struct timespec *ts) |
| 328 | { |
Martin Schwidefsky | 951ed4d | 2009-07-07 11:27:28 +0200 | [diff] [blame] | 329 | struct timespec tomono; |
| 330 | unsigned int seq; |
Martin Schwidefsky | 951ed4d | 2009-07-07 11:27:28 +0200 | [diff] [blame] | 331 | |
| 332 | WARN_ON(timekeeping_suspended); |
| 333 | |
| 334 | do { |
John Stultz | 70471f2 | 2011-11-14 12:48:10 -0800 | [diff] [blame] | 335 | seq = read_seqbegin(&timekeeper.lock); |
John Stultz | 1e75fa8 | 2012-07-13 01:21:53 -0400 | [diff] [blame^] | 336 | ts->tv_sec = timekeeper.xtime_sec; |
| 337 | ts->tv_nsec = timekeeping_get_ns(); |
John Stultz | d9f7217 | 2011-11-14 11:29:32 -0800 | [diff] [blame] | 338 | tomono = timekeeper.wall_to_monotonic; |
Hector Palacios | d004e02 | 2011-11-14 11:15:25 +0100 | [diff] [blame] | 339 | /* If arch requires, add in gettimeoffset() */ |
John Stultz | 1e75fa8 | 2012-07-13 01:21:53 -0400 | [diff] [blame^] | 340 | ts->tv_nsec += arch_gettimeoffset(); |
Martin Schwidefsky | 951ed4d | 2009-07-07 11:27:28 +0200 | [diff] [blame] | 341 | |
John Stultz | 70471f2 | 2011-11-14 12:48:10 -0800 | [diff] [blame] | 342 | } while (read_seqretry(&timekeeper.lock, seq)); |
Martin Schwidefsky | 951ed4d | 2009-07-07 11:27:28 +0200 | [diff] [blame] | 343 | |
| 344 | set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec, |
John Stultz | 1e75fa8 | 2012-07-13 01:21:53 -0400 | [diff] [blame^] | 345 | ts->tv_nsec + tomono.tv_nsec); |
Martin Schwidefsky | 951ed4d | 2009-07-07 11:27:28 +0200 | [diff] [blame] | 346 | } |
| 347 | EXPORT_SYMBOL_GPL(ktime_get_ts); |
| 348 | |
Alexander Gordeev | e2c18e4 | 2011-01-12 17:00:57 -0800 | [diff] [blame] | 349 | #ifdef CONFIG_NTP_PPS |
| 350 | |
| 351 | /** |
| 352 | * getnstime_raw_and_real - get day and raw monotonic time in timespec format |
| 353 | * @ts_raw: pointer to the timespec to be set to raw monotonic time |
| 354 | * @ts_real: pointer to the timespec to be set to the time of day |
| 355 | * |
| 356 | * This function reads both the time of day and raw monotonic time at the |
| 357 | * same time atomically and stores the resulting timestamps in timespec |
| 358 | * format. |
| 359 | */ |
| 360 | void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real) |
| 361 | { |
| 362 | unsigned long seq; |
| 363 | s64 nsecs_raw, nsecs_real; |
| 364 | |
| 365 | WARN_ON_ONCE(timekeeping_suspended); |
| 366 | |
| 367 | do { |
| 368 | u32 arch_offset; |
| 369 | |
John Stultz | 70471f2 | 2011-11-14 12:48:10 -0800 | [diff] [blame] | 370 | seq = read_seqbegin(&timekeeper.lock); |
Alexander Gordeev | e2c18e4 | 2011-01-12 17:00:57 -0800 | [diff] [blame] | 371 | |
John Stultz | 01f71b4 | 2011-11-14 11:43:49 -0800 | [diff] [blame] | 372 | *ts_raw = timekeeper.raw_time; |
John Stultz | 1e75fa8 | 2012-07-13 01:21:53 -0400 | [diff] [blame^] | 373 | ts_real->tv_sec = timekeeper.xtime_sec; |
| 374 | ts_real->tv_nsec = 0; |
Alexander Gordeev | e2c18e4 | 2011-01-12 17:00:57 -0800 | [diff] [blame] | 375 | |
| 376 | nsecs_raw = timekeeping_get_ns_raw(); |
| 377 | nsecs_real = timekeeping_get_ns(); |
| 378 | |
| 379 | /* If arch requires, add in gettimeoffset() */ |
| 380 | arch_offset = arch_gettimeoffset(); |
| 381 | nsecs_raw += arch_offset; |
| 382 | nsecs_real += arch_offset; |
| 383 | |
John Stultz | 70471f2 | 2011-11-14 12:48:10 -0800 | [diff] [blame] | 384 | } while (read_seqretry(&timekeeper.lock, seq)); |
Alexander Gordeev | e2c18e4 | 2011-01-12 17:00:57 -0800 | [diff] [blame] | 385 | |
| 386 | timespec_add_ns(ts_raw, nsecs_raw); |
| 387 | timespec_add_ns(ts_real, nsecs_real); |
| 388 | } |
| 389 | EXPORT_SYMBOL(getnstime_raw_and_real); |
| 390 | |
| 391 | #endif /* CONFIG_NTP_PPS */ |
| 392 | |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 393 | /** |
| 394 | * do_gettimeofday - Returns the time of day in a timeval |
| 395 | * @tv: pointer to the timeval to be set |
| 396 | * |
Geert Uytterhoeven | efd9ac8 | 2008-01-30 13:30:01 +0100 | [diff] [blame] | 397 | * NOTE: Users should be converted to using getnstimeofday() |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 398 | */ |
| 399 | void do_gettimeofday(struct timeval *tv) |
| 400 | { |
| 401 | struct timespec now; |
| 402 | |
Geert Uytterhoeven | efd9ac8 | 2008-01-30 13:30:01 +0100 | [diff] [blame] | 403 | getnstimeofday(&now); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 404 | tv->tv_sec = now.tv_sec; |
| 405 | tv->tv_usec = now.tv_nsec/1000; |
| 406 | } |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 407 | EXPORT_SYMBOL(do_gettimeofday); |
Richard Cochran | d239f49 | 2012-04-27 10:12:42 +0200 | [diff] [blame] | 408 | |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 409 | /** |
| 410 | * do_settimeofday - Sets the time of day |
| 411 | * @tv: pointer to the timespec variable containing the new time |
| 412 | * |
| 413 | * Sets the time of day to the new time and update NTP and notify hrtimers |
| 414 | */ |
Richard Cochran | 1e6d767 | 2011-02-01 13:50:58 +0000 | [diff] [blame] | 415 | int do_settimeofday(const struct timespec *tv) |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 416 | { |
John Stultz | 1e75fa8 | 2012-07-13 01:21:53 -0400 | [diff] [blame^] | 417 | struct timespec ts_delta, xt; |
John Stultz | 92c1d3e | 2011-11-14 14:05:44 -0800 | [diff] [blame] | 418 | unsigned long flags; |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 419 | |
| 420 | if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) |
| 421 | return -EINVAL; |
| 422 | |
John Stultz | 92c1d3e | 2011-11-14 14:05:44 -0800 | [diff] [blame] | 423 | write_seqlock_irqsave(&timekeeper.lock, flags); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 424 | |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 425 | timekeeping_forward_now(); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 426 | |
John Stultz | 1e75fa8 | 2012-07-13 01:21:53 -0400 | [diff] [blame^] | 427 | xt = tk_xtime(&timekeeper); |
| 428 | ts_delta.tv_sec = tv->tv_sec - xt.tv_sec; |
| 429 | ts_delta.tv_nsec = tv->tv_nsec - xt.tv_nsec; |
| 430 | |
John Stultz | d9f7217 | 2011-11-14 11:29:32 -0800 | [diff] [blame] | 431 | timekeeper.wall_to_monotonic = |
| 432 | timespec_sub(timekeeper.wall_to_monotonic, ts_delta); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 433 | |
John Stultz | 1e75fa8 | 2012-07-13 01:21:53 -0400 | [diff] [blame^] | 434 | tk_set_xtime(&timekeeper, tv); |
| 435 | |
Thomas Gleixner | cc06268 | 2011-11-13 23:19:49 +0000 | [diff] [blame] | 436 | timekeeping_update(true); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 437 | |
John Stultz | 92c1d3e | 2011-11-14 14:05:44 -0800 | [diff] [blame] | 438 | write_sequnlock_irqrestore(&timekeeper.lock, flags); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 439 | |
| 440 | /* signal hrtimers about time change */ |
| 441 | clock_was_set(); |
| 442 | |
| 443 | return 0; |
| 444 | } |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 445 | EXPORT_SYMBOL(do_settimeofday); |
| 446 | |
John Stultz | c528f7c | 2011-02-01 13:52:17 +0000 | [diff] [blame] | 447 | |
| 448 | /** |
| 449 | * timekeeping_inject_offset - Adds or subtracts from the current time. |
| 450 | * @tv: pointer to the timespec variable containing the offset |
| 451 | * |
| 452 | * Adds or subtracts an offset value from the current time. |
| 453 | */ |
| 454 | int timekeeping_inject_offset(struct timespec *ts) |
| 455 | { |
John Stultz | 92c1d3e | 2011-11-14 14:05:44 -0800 | [diff] [blame] | 456 | unsigned long flags; |
John Stultz | c528f7c | 2011-02-01 13:52:17 +0000 | [diff] [blame] | 457 | |
| 458 | if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC) |
| 459 | return -EINVAL; |
| 460 | |
John Stultz | 92c1d3e | 2011-11-14 14:05:44 -0800 | [diff] [blame] | 461 | write_seqlock_irqsave(&timekeeper.lock, flags); |
John Stultz | c528f7c | 2011-02-01 13:52:17 +0000 | [diff] [blame] | 462 | |
| 463 | timekeeping_forward_now(); |
| 464 | |
John Stultz | 1e75fa8 | 2012-07-13 01:21:53 -0400 | [diff] [blame^] | 465 | |
| 466 | tk_xtime_add(&timekeeper, ts); |
John Stultz | d9f7217 | 2011-11-14 11:29:32 -0800 | [diff] [blame] | 467 | timekeeper.wall_to_monotonic = |
| 468 | timespec_sub(timekeeper.wall_to_monotonic, *ts); |
John Stultz | c528f7c | 2011-02-01 13:52:17 +0000 | [diff] [blame] | 469 | |
Thomas Gleixner | cc06268 | 2011-11-13 23:19:49 +0000 | [diff] [blame] | 470 | timekeeping_update(true); |
John Stultz | c528f7c | 2011-02-01 13:52:17 +0000 | [diff] [blame] | 471 | |
John Stultz | 92c1d3e | 2011-11-14 14:05:44 -0800 | [diff] [blame] | 472 | write_sequnlock_irqrestore(&timekeeper.lock, flags); |
John Stultz | c528f7c | 2011-02-01 13:52:17 +0000 | [diff] [blame] | 473 | |
| 474 | /* signal hrtimers about time change */ |
| 475 | clock_was_set(); |
| 476 | |
| 477 | return 0; |
| 478 | } |
| 479 | EXPORT_SYMBOL(timekeeping_inject_offset); |
| 480 | |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 481 | /** |
| 482 | * change_clocksource - Swaps clocksources if a new one is available |
| 483 | * |
| 484 | * Accumulates current time interval and initializes new clocksource |
| 485 | */ |
Martin Schwidefsky | 75c5158 | 2009-08-14 15:47:30 +0200 | [diff] [blame] | 486 | static int change_clocksource(void *data) |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 487 | { |
Magnus Damm | 4614e6a | 2009-04-21 12:24:02 -0700 | [diff] [blame] | 488 | struct clocksource *new, *old; |
John Stultz | f695cf9 | 2012-03-14 16:38:15 -0700 | [diff] [blame] | 489 | unsigned long flags; |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 490 | |
Martin Schwidefsky | 75c5158 | 2009-08-14 15:47:30 +0200 | [diff] [blame] | 491 | new = (struct clocksource *) data; |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 492 | |
John Stultz | f695cf9 | 2012-03-14 16:38:15 -0700 | [diff] [blame] | 493 | write_seqlock_irqsave(&timekeeper.lock, flags); |
| 494 | |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 495 | timekeeping_forward_now(); |
Martin Schwidefsky | 75c5158 | 2009-08-14 15:47:30 +0200 | [diff] [blame] | 496 | if (!new->enable || new->enable(new) == 0) { |
| 497 | old = timekeeper.clock; |
| 498 | timekeeper_setup_internals(new); |
| 499 | if (old->disable) |
| 500 | old->disable(old); |
| 501 | } |
John Stultz | f695cf9 | 2012-03-14 16:38:15 -0700 | [diff] [blame] | 502 | timekeeping_update(true); |
| 503 | |
| 504 | write_sequnlock_irqrestore(&timekeeper.lock, flags); |
| 505 | |
Martin Schwidefsky | 75c5158 | 2009-08-14 15:47:30 +0200 | [diff] [blame] | 506 | return 0; |
| 507 | } |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 508 | |
Martin Schwidefsky | 75c5158 | 2009-08-14 15:47:30 +0200 | [diff] [blame] | 509 | /** |
| 510 | * timekeeping_notify - Install a new clock source |
| 511 | * @clock: pointer to the clock source |
| 512 | * |
| 513 | * This function is called from clocksource.c after a new, better clock |
| 514 | * source has been registered. The caller holds the clocksource_mutex. |
| 515 | */ |
| 516 | void timekeeping_notify(struct clocksource *clock) |
| 517 | { |
| 518 | if (timekeeper.clock == clock) |
Magnus Damm | 4614e6a | 2009-04-21 12:24:02 -0700 | [diff] [blame] | 519 | return; |
Martin Schwidefsky | 75c5158 | 2009-08-14 15:47:30 +0200 | [diff] [blame] | 520 | stop_machine(change_clocksource, clock, NULL); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 521 | tick_clock_notify(); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 522 | } |
Martin Schwidefsky | 75c5158 | 2009-08-14 15:47:30 +0200 | [diff] [blame] | 523 | |
Thomas Gleixner | a40f262 | 2009-07-07 13:00:31 +0200 | [diff] [blame] | 524 | /** |
| 525 | * ktime_get_real - get the real (wall-) time in ktime_t format |
| 526 | * |
| 527 | * returns the time in ktime_t format |
| 528 | */ |
| 529 | ktime_t ktime_get_real(void) |
| 530 | { |
| 531 | struct timespec now; |
| 532 | |
| 533 | getnstimeofday(&now); |
| 534 | |
| 535 | return timespec_to_ktime(now); |
| 536 | } |
| 537 | EXPORT_SYMBOL_GPL(ktime_get_real); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 538 | |
| 539 | /** |
John Stultz | 2d42244 | 2008-08-20 16:37:30 -0700 | [diff] [blame] | 540 | * getrawmonotonic - Returns the raw monotonic time in a timespec |
| 541 | * @ts: pointer to the timespec to be set |
| 542 | * |
| 543 | * Returns the raw monotonic time (completely un-modified by ntp) |
| 544 | */ |
| 545 | void getrawmonotonic(struct timespec *ts) |
| 546 | { |
| 547 | unsigned long seq; |
| 548 | s64 nsecs; |
John Stultz | 2d42244 | 2008-08-20 16:37:30 -0700 | [diff] [blame] | 549 | |
| 550 | do { |
John Stultz | 70471f2 | 2011-11-14 12:48:10 -0800 | [diff] [blame] | 551 | seq = read_seqbegin(&timekeeper.lock); |
Martin Schwidefsky | 2ba2a30 | 2009-08-14 15:47:29 +0200 | [diff] [blame] | 552 | nsecs = timekeeping_get_ns_raw(); |
John Stultz | 01f71b4 | 2011-11-14 11:43:49 -0800 | [diff] [blame] | 553 | *ts = timekeeper.raw_time; |
John Stultz | 2d42244 | 2008-08-20 16:37:30 -0700 | [diff] [blame] | 554 | |
John Stultz | 70471f2 | 2011-11-14 12:48:10 -0800 | [diff] [blame] | 555 | } while (read_seqretry(&timekeeper.lock, seq)); |
John Stultz | 2d42244 | 2008-08-20 16:37:30 -0700 | [diff] [blame] | 556 | |
| 557 | timespec_add_ns(ts, nsecs); |
| 558 | } |
| 559 | EXPORT_SYMBOL(getrawmonotonic); |
| 560 | |
| 561 | |
| 562 | /** |
Li Zefan | cf4fc6c | 2008-02-08 04:19:24 -0800 | [diff] [blame] | 563 | * timekeeping_valid_for_hres - Check if timekeeping is suitable for hres |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 564 | */ |
Li Zefan | cf4fc6c | 2008-02-08 04:19:24 -0800 | [diff] [blame] | 565 | int timekeeping_valid_for_hres(void) |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 566 | { |
| 567 | unsigned long seq; |
| 568 | int ret; |
| 569 | |
| 570 | do { |
John Stultz | 70471f2 | 2011-11-14 12:48:10 -0800 | [diff] [blame] | 571 | seq = read_seqbegin(&timekeeper.lock); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 572 | |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 573 | ret = timekeeper.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES; |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 574 | |
John Stultz | 70471f2 | 2011-11-14 12:48:10 -0800 | [diff] [blame] | 575 | } while (read_seqretry(&timekeeper.lock, seq)); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 576 | |
| 577 | return ret; |
| 578 | } |
| 579 | |
| 580 | /** |
Jon Hunter | 9896246 | 2009-08-18 12:45:10 -0500 | [diff] [blame] | 581 | * timekeeping_max_deferment - Returns max time the clocksource can be deferred |
Jon Hunter | 9896246 | 2009-08-18 12:45:10 -0500 | [diff] [blame] | 582 | */ |
| 583 | u64 timekeeping_max_deferment(void) |
| 584 | { |
John Stultz | 70471f2 | 2011-11-14 12:48:10 -0800 | [diff] [blame] | 585 | unsigned long seq; |
| 586 | u64 ret; |
John Stultz | 42e71e8 | 2012-07-13 01:21:51 -0400 | [diff] [blame] | 587 | |
John Stultz | 70471f2 | 2011-11-14 12:48:10 -0800 | [diff] [blame] | 588 | do { |
| 589 | seq = read_seqbegin(&timekeeper.lock); |
| 590 | |
| 591 | ret = timekeeper.clock->max_idle_ns; |
| 592 | |
| 593 | } while (read_seqretry(&timekeeper.lock, seq)); |
| 594 | |
| 595 | return ret; |
Jon Hunter | 9896246 | 2009-08-18 12:45:10 -0500 | [diff] [blame] | 596 | } |
| 597 | |
| 598 | /** |
Martin Schwidefsky | d4f587c | 2009-08-14 15:47:31 +0200 | [diff] [blame] | 599 | * read_persistent_clock - Return time from the persistent clock. |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 600 | * |
| 601 | * Weak dummy function for arches that do not yet support it. |
Martin Schwidefsky | d4f587c | 2009-08-14 15:47:31 +0200 | [diff] [blame] | 602 | * Reads the time from the battery backed persistent clock. |
| 603 | * Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported. |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 604 | * |
| 605 | * XXX - Do be sure to remove it once all arches implement it. |
| 606 | */ |
Martin Schwidefsky | d4f587c | 2009-08-14 15:47:31 +0200 | [diff] [blame] | 607 | void __attribute__((weak)) read_persistent_clock(struct timespec *ts) |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 608 | { |
Martin Schwidefsky | d4f587c | 2009-08-14 15:47:31 +0200 | [diff] [blame] | 609 | ts->tv_sec = 0; |
| 610 | ts->tv_nsec = 0; |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 611 | } |
| 612 | |
Martin Schwidefsky | 23970e3 | 2009-08-14 15:47:32 +0200 | [diff] [blame] | 613 | /** |
| 614 | * read_boot_clock - Return time of the system start. |
| 615 | * |
| 616 | * Weak dummy function for arches that do not yet support it. |
| 617 | * Function to read the exact time the system has been started. |
| 618 | * Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported. |
| 619 | * |
| 620 | * XXX - Do be sure to remove it once all arches implement it. |
| 621 | */ |
| 622 | void __attribute__((weak)) read_boot_clock(struct timespec *ts) |
| 623 | { |
| 624 | ts->tv_sec = 0; |
| 625 | ts->tv_nsec = 0; |
| 626 | } |
| 627 | |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 628 | /* |
| 629 | * timekeeping_init - Initializes the clocksource and common timekeeping values |
| 630 | */ |
| 631 | void __init timekeeping_init(void) |
| 632 | { |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 633 | struct clocksource *clock; |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 634 | unsigned long flags; |
Martin Schwidefsky | 23970e3 | 2009-08-14 15:47:32 +0200 | [diff] [blame] | 635 | struct timespec now, boot; |
Martin Schwidefsky | d4f587c | 2009-08-14 15:47:31 +0200 | [diff] [blame] | 636 | |
| 637 | read_persistent_clock(&now); |
Martin Schwidefsky | 23970e3 | 2009-08-14 15:47:32 +0200 | [diff] [blame] | 638 | read_boot_clock(&boot); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 639 | |
John Stultz | 70471f2 | 2011-11-14 12:48:10 -0800 | [diff] [blame] | 640 | seqlock_init(&timekeeper.lock); |
| 641 | |
Roman Zippel | 7dffa3c | 2008-05-01 04:34:41 -0700 | [diff] [blame] | 642 | ntp_init(); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 643 | |
John Stultz | 70471f2 | 2011-11-14 12:48:10 -0800 | [diff] [blame] | 644 | write_seqlock_irqsave(&timekeeper.lock, flags); |
Martin Schwidefsky | f1b8274 | 2009-08-14 15:47:21 +0200 | [diff] [blame] | 645 | clock = clocksource_default_clock(); |
Martin Schwidefsky | a0f7d48 | 2009-08-14 15:47:19 +0200 | [diff] [blame] | 646 | if (clock->enable) |
| 647 | clock->enable(clock); |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 648 | timekeeper_setup_internals(clock); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 649 | |
John Stultz | 1e75fa8 | 2012-07-13 01:21:53 -0400 | [diff] [blame^] | 650 | tk_set_xtime(&timekeeper, &now); |
John Stultz | 01f71b4 | 2011-11-14 11:43:49 -0800 | [diff] [blame] | 651 | timekeeper.raw_time.tv_sec = 0; |
| 652 | timekeeper.raw_time.tv_nsec = 0; |
John Stultz | 1e75fa8 | 2012-07-13 01:21:53 -0400 | [diff] [blame^] | 653 | if (boot.tv_sec == 0 && boot.tv_nsec == 0) |
| 654 | boot = tk_xtime(&timekeeper); |
| 655 | |
John Stultz | d9f7217 | 2011-11-14 11:29:32 -0800 | [diff] [blame] | 656 | set_normalized_timespec(&timekeeper.wall_to_monotonic, |
Martin Schwidefsky | 23970e3 | 2009-08-14 15:47:32 +0200 | [diff] [blame] | 657 | -boot.tv_sec, -boot.tv_nsec); |
Thomas Gleixner | 5b9fe75 | 2012-07-10 18:43:21 -0400 | [diff] [blame] | 658 | update_rt_offset(); |
John Stultz | 00c5fb7 | 2011-11-14 11:23:15 -0800 | [diff] [blame] | 659 | timekeeper.total_sleep_time.tv_sec = 0; |
| 660 | timekeeper.total_sleep_time.tv_nsec = 0; |
John Stultz | 70471f2 | 2011-11-14 12:48:10 -0800 | [diff] [blame] | 661 | write_sequnlock_irqrestore(&timekeeper.lock, flags); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 662 | } |
| 663 | |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 664 | /* time in seconds when suspend began */ |
Martin Schwidefsky | d4f587c | 2009-08-14 15:47:31 +0200 | [diff] [blame] | 665 | static struct timespec timekeeping_suspend_time; |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 666 | |
Thomas Gleixner | 5b9fe75 | 2012-07-10 18:43:21 -0400 | [diff] [blame] | 667 | static void update_sleep_time(struct timespec t) |
| 668 | { |
| 669 | timekeeper.total_sleep_time = t; |
| 670 | timekeeper.offs_boot = timespec_to_ktime(t); |
| 671 | } |
| 672 | |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 673 | /** |
John Stultz | 304529b | 2011-04-01 14:32:09 -0700 | [diff] [blame] | 674 | * __timekeeping_inject_sleeptime - Internal function to add sleep interval |
| 675 | * @delta: pointer to a timespec delta value |
| 676 | * |
| 677 | * Takes a timespec offset measuring a suspend interval and properly |
| 678 | * adds the sleep offset to the timekeeping variables. |
| 679 | */ |
| 680 | static void __timekeeping_inject_sleeptime(struct timespec *delta) |
| 681 | { |
John Stultz | cb5de2f | 2011-06-01 18:18:09 -0700 | [diff] [blame] | 682 | if (!timespec_valid(delta)) { |
John Stultz | cbaa515 | 2011-07-20 15:42:55 -0700 | [diff] [blame] | 683 | printk(KERN_WARNING "__timekeeping_inject_sleeptime: Invalid " |
John Stultz | cb5de2f | 2011-06-01 18:18:09 -0700 | [diff] [blame] | 684 | "sleep delta value!\n"); |
| 685 | return; |
| 686 | } |
| 687 | |
John Stultz | 1e75fa8 | 2012-07-13 01:21:53 -0400 | [diff] [blame^] | 688 | tk_xtime_add(&timekeeper, delta); |
John Stultz | d9f7217 | 2011-11-14 11:29:32 -0800 | [diff] [blame] | 689 | timekeeper.wall_to_monotonic = |
| 690 | timespec_sub(timekeeper.wall_to_monotonic, *delta); |
Thomas Gleixner | 5b9fe75 | 2012-07-10 18:43:21 -0400 | [diff] [blame] | 691 | update_sleep_time(timespec_add(timekeeper.total_sleep_time, *delta)); |
John Stultz | 304529b | 2011-04-01 14:32:09 -0700 | [diff] [blame] | 692 | } |
| 693 | |
| 694 | |
| 695 | /** |
| 696 | * timekeeping_inject_sleeptime - Adds suspend interval to timeekeeping values |
| 697 | * @delta: pointer to a timespec delta value |
| 698 | * |
| 699 | * This hook is for architectures that cannot support read_persistent_clock |
| 700 | * because their RTC/persistent clock is only accessible when irqs are enabled. |
| 701 | * |
| 702 | * This function should only be called by rtc_resume(), and allows |
| 703 | * a suspend offset to be injected into the timekeeping values. |
| 704 | */ |
| 705 | void timekeeping_inject_sleeptime(struct timespec *delta) |
| 706 | { |
John Stultz | 92c1d3e | 2011-11-14 14:05:44 -0800 | [diff] [blame] | 707 | unsigned long flags; |
John Stultz | 304529b | 2011-04-01 14:32:09 -0700 | [diff] [blame] | 708 | struct timespec ts; |
| 709 | |
| 710 | /* Make sure we don't set the clock twice */ |
| 711 | read_persistent_clock(&ts); |
| 712 | if (!(ts.tv_sec == 0 && ts.tv_nsec == 0)) |
| 713 | return; |
| 714 | |
John Stultz | 92c1d3e | 2011-11-14 14:05:44 -0800 | [diff] [blame] | 715 | write_seqlock_irqsave(&timekeeper.lock, flags); |
John Stultz | 70471f2 | 2011-11-14 12:48:10 -0800 | [diff] [blame] | 716 | |
John Stultz | 304529b | 2011-04-01 14:32:09 -0700 | [diff] [blame] | 717 | timekeeping_forward_now(); |
| 718 | |
| 719 | __timekeeping_inject_sleeptime(delta); |
| 720 | |
Thomas Gleixner | cc06268 | 2011-11-13 23:19:49 +0000 | [diff] [blame] | 721 | timekeeping_update(true); |
John Stultz | 304529b | 2011-04-01 14:32:09 -0700 | [diff] [blame] | 722 | |
John Stultz | 92c1d3e | 2011-11-14 14:05:44 -0800 | [diff] [blame] | 723 | write_sequnlock_irqrestore(&timekeeper.lock, flags); |
John Stultz | 304529b | 2011-04-01 14:32:09 -0700 | [diff] [blame] | 724 | |
| 725 | /* signal hrtimers about time change */ |
| 726 | clock_was_set(); |
| 727 | } |
| 728 | |
| 729 | |
| 730 | /** |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 731 | * timekeeping_resume - Resumes the generic timekeeping subsystem. |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 732 | * |
| 733 | * This is for the generic clocksource timekeeping. |
| 734 | * xtime/wall_to_monotonic/jiffies/etc are |
| 735 | * still managed by arch specific suspend/resume code. |
| 736 | */ |
Rafael J. Wysocki | e1a85b2 | 2011-03-23 22:16:04 +0100 | [diff] [blame] | 737 | static void timekeeping_resume(void) |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 738 | { |
John Stultz | 92c1d3e | 2011-11-14 14:05:44 -0800 | [diff] [blame] | 739 | unsigned long flags; |
Martin Schwidefsky | d4f587c | 2009-08-14 15:47:31 +0200 | [diff] [blame] | 740 | struct timespec ts; |
| 741 | |
| 742 | read_persistent_clock(&ts); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 743 | |
Thomas Gleixner | d10ff3f | 2007-05-14 11:10:02 +0200 | [diff] [blame] | 744 | clocksource_resume(); |
| 745 | |
John Stultz | 92c1d3e | 2011-11-14 14:05:44 -0800 | [diff] [blame] | 746 | write_seqlock_irqsave(&timekeeper.lock, flags); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 747 | |
Martin Schwidefsky | d4f587c | 2009-08-14 15:47:31 +0200 | [diff] [blame] | 748 | if (timespec_compare(&ts, &timekeeping_suspend_time) > 0) { |
| 749 | ts = timespec_sub(ts, timekeeping_suspend_time); |
John Stultz | 304529b | 2011-04-01 14:32:09 -0700 | [diff] [blame] | 750 | __timekeeping_inject_sleeptime(&ts); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 751 | } |
| 752 | /* re-base the last cycle value */ |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 753 | timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock); |
| 754 | timekeeper.ntp_error = 0; |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 755 | timekeeping_suspended = 0; |
John Stultz | 92c1d3e | 2011-11-14 14:05:44 -0800 | [diff] [blame] | 756 | write_sequnlock_irqrestore(&timekeeper.lock, flags); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 757 | |
| 758 | touch_softlockup_watchdog(); |
| 759 | |
| 760 | clockevents_notify(CLOCK_EVT_NOTIFY_RESUME, NULL); |
| 761 | |
| 762 | /* Resume hrtimers */ |
Thomas Gleixner | b12a03c | 2011-05-02 16:48:57 +0200 | [diff] [blame] | 763 | hrtimers_resume(); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 764 | } |
| 765 | |
Rafael J. Wysocki | e1a85b2 | 2011-03-23 22:16:04 +0100 | [diff] [blame] | 766 | static int timekeeping_suspend(void) |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 767 | { |
John Stultz | 92c1d3e | 2011-11-14 14:05:44 -0800 | [diff] [blame] | 768 | unsigned long flags; |
John Stultz | cb33217 | 2011-05-31 22:53:23 -0700 | [diff] [blame] | 769 | struct timespec delta, delta_delta; |
| 770 | static struct timespec old_delta; |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 771 | |
Martin Schwidefsky | d4f587c | 2009-08-14 15:47:31 +0200 | [diff] [blame] | 772 | read_persistent_clock(&timekeeping_suspend_time); |
Thomas Gleixner | 3be9095 | 2007-09-16 15:36:43 +0200 | [diff] [blame] | 773 | |
John Stultz | 92c1d3e | 2011-11-14 14:05:44 -0800 | [diff] [blame] | 774 | write_seqlock_irqsave(&timekeeper.lock, flags); |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 775 | timekeeping_forward_now(); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 776 | timekeeping_suspended = 1; |
John Stultz | cb33217 | 2011-05-31 22:53:23 -0700 | [diff] [blame] | 777 | |
| 778 | /* |
| 779 | * To avoid drift caused by repeated suspend/resumes, |
| 780 | * which each can add ~1 second drift error, |
| 781 | * try to compensate so the difference in system time |
| 782 | * and persistent_clock time stays close to constant. |
| 783 | */ |
John Stultz | 1e75fa8 | 2012-07-13 01:21:53 -0400 | [diff] [blame^] | 784 | delta = timespec_sub(tk_xtime(&timekeeper), timekeeping_suspend_time); |
John Stultz | cb33217 | 2011-05-31 22:53:23 -0700 | [diff] [blame] | 785 | delta_delta = timespec_sub(delta, old_delta); |
| 786 | if (abs(delta_delta.tv_sec) >= 2) { |
| 787 | /* |
| 788 | * if delta_delta is too large, assume time correction |
| 789 | * has occured and set old_delta to the current delta. |
| 790 | */ |
| 791 | old_delta = delta; |
| 792 | } else { |
| 793 | /* Otherwise try to adjust old_system to compensate */ |
| 794 | timekeeping_suspend_time = |
| 795 | timespec_add(timekeeping_suspend_time, delta_delta); |
| 796 | } |
John Stultz | 92c1d3e | 2011-11-14 14:05:44 -0800 | [diff] [blame] | 797 | write_sequnlock_irqrestore(&timekeeper.lock, flags); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 798 | |
| 799 | clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL); |
Magnus Damm | c54a42b | 2010-02-02 14:41:41 -0800 | [diff] [blame] | 800 | clocksource_suspend(); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 801 | |
| 802 | return 0; |
| 803 | } |
| 804 | |
| 805 | /* sysfs resume/suspend bits for timekeeping */ |
Rafael J. Wysocki | e1a85b2 | 2011-03-23 22:16:04 +0100 | [diff] [blame] | 806 | static struct syscore_ops timekeeping_syscore_ops = { |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 807 | .resume = timekeeping_resume, |
| 808 | .suspend = timekeeping_suspend, |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 809 | }; |
| 810 | |
Rafael J. Wysocki | e1a85b2 | 2011-03-23 22:16:04 +0100 | [diff] [blame] | 811 | static int __init timekeeping_init_ops(void) |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 812 | { |
Rafael J. Wysocki | e1a85b2 | 2011-03-23 22:16:04 +0100 | [diff] [blame] | 813 | register_syscore_ops(&timekeeping_syscore_ops); |
| 814 | return 0; |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 815 | } |
| 816 | |
Rafael J. Wysocki | e1a85b2 | 2011-03-23 22:16:04 +0100 | [diff] [blame] | 817 | device_initcall(timekeeping_init_ops); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 818 | |
| 819 | /* |
| 820 | * If the error is already larger, we look ahead even further |
| 821 | * to compensate for late or lost adjustments. |
| 822 | */ |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 823 | static __always_inline int timekeeping_bigadjust(s64 error, s64 *interval, |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 824 | s64 *offset) |
| 825 | { |
| 826 | s64 tick_error, i; |
| 827 | u32 look_ahead, adj; |
| 828 | s32 error2, mult; |
| 829 | |
| 830 | /* |
| 831 | * Use the current error value to determine how much to look ahead. |
| 832 | * The larger the error the slower we adjust for it to avoid problems |
| 833 | * with losing too many ticks, otherwise we would overadjust and |
| 834 | * produce an even larger error. The smaller the adjustment the |
| 835 | * faster we try to adjust for it, as lost ticks can do less harm |
Li Zefan | 3eb0567 | 2008-02-08 04:19:25 -0800 | [diff] [blame] | 836 | * here. This is tuned so that an error of about 1 msec is adjusted |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 837 | * within about 1 sec (or 2^20 nsec in 2^SHIFT_HZ ticks). |
| 838 | */ |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 839 | error2 = timekeeper.ntp_error >> (NTP_SCALE_SHIFT + 22 - 2 * SHIFT_HZ); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 840 | error2 = abs(error2); |
| 841 | for (look_ahead = 0; error2 > 0; look_ahead++) |
| 842 | error2 >>= 2; |
| 843 | |
| 844 | /* |
| 845 | * Now calculate the error in (1 << look_ahead) ticks, but first |
| 846 | * remove the single look ahead already included in the error. |
| 847 | */ |
John Stultz | ea7cf49 | 2011-11-14 13:18:07 -0800 | [diff] [blame] | 848 | tick_error = ntp_tick_length() >> (timekeeper.ntp_error_shift + 1); |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 849 | tick_error -= timekeeper.xtime_interval >> 1; |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 850 | error = ((error - tick_error) >> look_ahead) + tick_error; |
| 851 | |
| 852 | /* Finally calculate the adjustment shift value. */ |
| 853 | i = *interval; |
| 854 | mult = 1; |
| 855 | if (error < 0) { |
| 856 | error = -error; |
| 857 | *interval = -*interval; |
| 858 | *offset = -*offset; |
| 859 | mult = -1; |
| 860 | } |
| 861 | for (adj = 0; error > i; adj++) |
| 862 | error >>= 1; |
| 863 | |
| 864 | *interval <<= adj; |
| 865 | *offset <<= adj; |
| 866 | return mult << adj; |
| 867 | } |
| 868 | |
| 869 | /* |
| 870 | * Adjust the multiplier to reduce the error value, |
| 871 | * this is optimized for the most common adjustments of -1,0,1, |
| 872 | * for other values we can do a bit more work. |
| 873 | */ |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 874 | static void timekeeping_adjust(s64 offset) |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 875 | { |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 876 | s64 error, interval = timekeeper.cycle_interval; |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 877 | int adj; |
| 878 | |
John Stultz | c2bc111 | 2011-10-27 18:12:42 -0700 | [diff] [blame] | 879 | /* |
Jim Cromie | 88b28ad | 2012-03-14 21:28:56 -0600 | [diff] [blame] | 880 | * The point of this is to check if the error is greater than half |
John Stultz | c2bc111 | 2011-10-27 18:12:42 -0700 | [diff] [blame] | 881 | * an interval. |
| 882 | * |
| 883 | * First we shift it down from NTP_SHIFT to clocksource->shifted nsecs. |
| 884 | * |
| 885 | * Note we subtract one in the shift, so that error is really error*2. |
John Stultz | 3f86f28 | 2011-10-27 17:41:17 -0700 | [diff] [blame] | 886 | * This "saves" dividing(shifting) interval twice, but keeps the |
| 887 | * (error > interval) comparison as still measuring if error is |
Jim Cromie | 88b28ad | 2012-03-14 21:28:56 -0600 | [diff] [blame] | 888 | * larger than half an interval. |
John Stultz | c2bc111 | 2011-10-27 18:12:42 -0700 | [diff] [blame] | 889 | * |
John Stultz | 3f86f28 | 2011-10-27 17:41:17 -0700 | [diff] [blame] | 890 | * Note: It does not "save" on aggravation when reading the code. |
John Stultz | c2bc111 | 2011-10-27 18:12:42 -0700 | [diff] [blame] | 891 | */ |
Martin Schwidefsky | 23ce721 | 2009-08-14 15:47:27 +0200 | [diff] [blame] | 892 | error = timekeeper.ntp_error >> (timekeeper.ntp_error_shift - 1); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 893 | if (error > interval) { |
John Stultz | c2bc111 | 2011-10-27 18:12:42 -0700 | [diff] [blame] | 894 | /* |
| 895 | * We now divide error by 4(via shift), which checks if |
Jim Cromie | 88b28ad | 2012-03-14 21:28:56 -0600 | [diff] [blame] | 896 | * the error is greater than twice the interval. |
John Stultz | c2bc111 | 2011-10-27 18:12:42 -0700 | [diff] [blame] | 897 | * If it is greater, we need a bigadjust, if its smaller, |
| 898 | * we can adjust by 1. |
| 899 | */ |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 900 | error >>= 2; |
John Stultz | c2bc111 | 2011-10-27 18:12:42 -0700 | [diff] [blame] | 901 | /* |
| 902 | * XXX - In update_wall_time, we round up to the next |
| 903 | * nanosecond, and store the amount rounded up into |
| 904 | * the error. This causes the likely below to be unlikely. |
| 905 | * |
John Stultz | 3f86f28 | 2011-10-27 17:41:17 -0700 | [diff] [blame] | 906 | * The proper fix is to avoid rounding up by using |
John Stultz | c2bc111 | 2011-10-27 18:12:42 -0700 | [diff] [blame] | 907 | * the high precision timekeeper.xtime_nsec instead of |
| 908 | * xtime.tv_nsec everywhere. Fixing this will take some |
| 909 | * time. |
| 910 | */ |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 911 | if (likely(error <= interval)) |
| 912 | adj = 1; |
| 913 | else |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 914 | adj = timekeeping_bigadjust(error, &interval, &offset); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 915 | } else if (error < -interval) { |
John Stultz | c2bc111 | 2011-10-27 18:12:42 -0700 | [diff] [blame] | 916 | /* See comment above, this is just switched for the negative */ |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 917 | error >>= 2; |
| 918 | if (likely(error >= -interval)) { |
| 919 | adj = -1; |
| 920 | interval = -interval; |
| 921 | offset = -offset; |
| 922 | } else |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 923 | adj = timekeeping_bigadjust(error, &interval, &offset); |
John Stultz | c2bc111 | 2011-10-27 18:12:42 -0700 | [diff] [blame] | 924 | } else /* No adjustment needed */ |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 925 | return; |
| 926 | |
John Stultz | e919cfd | 2012-03-22 19:14:46 -0700 | [diff] [blame] | 927 | if (unlikely(timekeeper.clock->maxadj && |
| 928 | (timekeeper.mult + adj > |
| 929 | timekeeper.clock->mult + timekeeper.clock->maxadj))) { |
| 930 | printk_once(KERN_WARNING |
| 931 | "Adjusting %s more than 11%% (%ld vs %ld)\n", |
John Stultz | d65670a | 2011-10-31 17:06:35 -0400 | [diff] [blame] | 932 | timekeeper.clock->name, (long)timekeeper.mult + adj, |
| 933 | (long)timekeeper.clock->mult + |
| 934 | timekeeper.clock->maxadj); |
John Stultz | e919cfd | 2012-03-22 19:14:46 -0700 | [diff] [blame] | 935 | } |
John Stultz | c2bc111 | 2011-10-27 18:12:42 -0700 | [diff] [blame] | 936 | /* |
| 937 | * So the following can be confusing. |
| 938 | * |
| 939 | * To keep things simple, lets assume adj == 1 for now. |
| 940 | * |
| 941 | * When adj != 1, remember that the interval and offset values |
| 942 | * have been appropriately scaled so the math is the same. |
| 943 | * |
| 944 | * The basic idea here is that we're increasing the multiplier |
| 945 | * by one, this causes the xtime_interval to be incremented by |
| 946 | * one cycle_interval. This is because: |
| 947 | * xtime_interval = cycle_interval * mult |
| 948 | * So if mult is being incremented by one: |
| 949 | * xtime_interval = cycle_interval * (mult + 1) |
| 950 | * Its the same as: |
| 951 | * xtime_interval = (cycle_interval * mult) + cycle_interval |
| 952 | * Which can be shortened to: |
| 953 | * xtime_interval += cycle_interval |
| 954 | * |
| 955 | * So offset stores the non-accumulated cycles. Thus the current |
| 956 | * time (in shifted nanoseconds) is: |
| 957 | * now = (offset * adj) + xtime_nsec |
| 958 | * Now, even though we're adjusting the clock frequency, we have |
| 959 | * to keep time consistent. In other words, we can't jump back |
| 960 | * in time, and we also want to avoid jumping forward in time. |
| 961 | * |
| 962 | * So given the same offset value, we need the time to be the same |
| 963 | * both before and after the freq adjustment. |
| 964 | * now = (offset * adj_1) + xtime_nsec_1 |
| 965 | * now = (offset * adj_2) + xtime_nsec_2 |
| 966 | * So: |
| 967 | * (offset * adj_1) + xtime_nsec_1 = |
| 968 | * (offset * adj_2) + xtime_nsec_2 |
| 969 | * And we know: |
| 970 | * adj_2 = adj_1 + 1 |
| 971 | * So: |
| 972 | * (offset * adj_1) + xtime_nsec_1 = |
| 973 | * (offset * (adj_1+1)) + xtime_nsec_2 |
| 974 | * (offset * adj_1) + xtime_nsec_1 = |
| 975 | * (offset * adj_1) + offset + xtime_nsec_2 |
| 976 | * Canceling the sides: |
| 977 | * xtime_nsec_1 = offset + xtime_nsec_2 |
| 978 | * Which gives us: |
| 979 | * xtime_nsec_2 = xtime_nsec_1 - offset |
| 980 | * Which simplfies to: |
| 981 | * xtime_nsec -= offset |
| 982 | * |
| 983 | * XXX - TODO: Doc ntp_error calculation. |
| 984 | */ |
Martin Schwidefsky | 0a54419 | 2009-08-14 15:47:28 +0200 | [diff] [blame] | 985 | timekeeper.mult += adj; |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 986 | timekeeper.xtime_interval += interval; |
| 987 | timekeeper.xtime_nsec -= offset; |
| 988 | timekeeper.ntp_error -= (interval - offset) << |
Martin Schwidefsky | 23ce721 | 2009-08-14 15:47:27 +0200 | [diff] [blame] | 989 | timekeeper.ntp_error_shift; |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 990 | } |
| 991 | |
Linus Torvalds | 83f57a1 | 2009-12-22 14:10:37 -0800 | [diff] [blame] | 992 | |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 993 | /** |
john stultz | a092ff0 | 2009-10-02 16:17:53 -0700 | [diff] [blame] | 994 | * logarithmic_accumulation - shifted accumulation of cycles |
| 995 | * |
| 996 | * This functions accumulates a shifted interval of cycles into |
| 997 | * into a shifted interval nanoseconds. Allows for O(log) accumulation |
| 998 | * loop. |
| 999 | * |
| 1000 | * Returns the unconsumed cycles. |
| 1001 | */ |
John Stultz | fee84c4 | 2012-07-13 01:21:52 -0400 | [diff] [blame] | 1002 | static cycle_t logarithmic_accumulation(cycle_t offset, u32 shift) |
john stultz | a092ff0 | 2009-10-02 16:17:53 -0700 | [diff] [blame] | 1003 | { |
| 1004 | u64 nsecps = (u64)NSEC_PER_SEC << timekeeper.shift; |
Jason Wessel | deda2e8 | 2010-08-09 14:20:09 -0700 | [diff] [blame] | 1005 | u64 raw_nsecs; |
john stultz | a092ff0 | 2009-10-02 16:17:53 -0700 | [diff] [blame] | 1006 | |
Jim Cromie | 88b28ad | 2012-03-14 21:28:56 -0600 | [diff] [blame] | 1007 | /* If the offset is smaller than a shifted interval, do nothing */ |
john stultz | a092ff0 | 2009-10-02 16:17:53 -0700 | [diff] [blame] | 1008 | if (offset < timekeeper.cycle_interval<<shift) |
| 1009 | return offset; |
| 1010 | |
| 1011 | /* Accumulate one shifted interval */ |
| 1012 | offset -= timekeeper.cycle_interval << shift; |
| 1013 | timekeeper.clock->cycle_last += timekeeper.cycle_interval << shift; |
| 1014 | |
| 1015 | timekeeper.xtime_nsec += timekeeper.xtime_interval << shift; |
| 1016 | while (timekeeper.xtime_nsec >= nsecps) { |
John Stultz | 6b43ae8 | 2012-03-15 13:04:03 -0700 | [diff] [blame] | 1017 | int leap; |
john stultz | a092ff0 | 2009-10-02 16:17:53 -0700 | [diff] [blame] | 1018 | timekeeper.xtime_nsec -= nsecps; |
John Stultz | 1e75fa8 | 2012-07-13 01:21:53 -0400 | [diff] [blame^] | 1019 | timekeeper.xtime_sec++; |
| 1020 | leap = second_overflow(timekeeper.xtime_sec); |
| 1021 | timekeeper.xtime_sec += leap; |
John Stultz | fad0c66 | 2012-05-30 10:54:57 -0700 | [diff] [blame] | 1022 | timekeeper.wall_to_monotonic.tv_sec -= leap; |
John Stultz | 4873fa0 | 2012-07-10 18:43:20 -0400 | [diff] [blame] | 1023 | if (leap) |
| 1024 | clock_was_set_delayed(); |
john stultz | a092ff0 | 2009-10-02 16:17:53 -0700 | [diff] [blame] | 1025 | } |
| 1026 | |
Jason Wessel | deda2e8 | 2010-08-09 14:20:09 -0700 | [diff] [blame] | 1027 | /* Accumulate raw time */ |
| 1028 | raw_nsecs = timekeeper.raw_interval << shift; |
John Stultz | 01f71b4 | 2011-11-14 11:43:49 -0800 | [diff] [blame] | 1029 | raw_nsecs += timekeeper.raw_time.tv_nsec; |
John Stultz | c7dcf87 | 2010-08-13 11:30:58 -0700 | [diff] [blame] | 1030 | if (raw_nsecs >= NSEC_PER_SEC) { |
| 1031 | u64 raw_secs = raw_nsecs; |
| 1032 | raw_nsecs = do_div(raw_secs, NSEC_PER_SEC); |
John Stultz | 01f71b4 | 2011-11-14 11:43:49 -0800 | [diff] [blame] | 1033 | timekeeper.raw_time.tv_sec += raw_secs; |
john stultz | a092ff0 | 2009-10-02 16:17:53 -0700 | [diff] [blame] | 1034 | } |
John Stultz | 01f71b4 | 2011-11-14 11:43:49 -0800 | [diff] [blame] | 1035 | timekeeper.raw_time.tv_nsec = raw_nsecs; |
john stultz | a092ff0 | 2009-10-02 16:17:53 -0700 | [diff] [blame] | 1036 | |
| 1037 | /* Accumulate error between NTP and clock interval */ |
John Stultz | ea7cf49 | 2011-11-14 13:18:07 -0800 | [diff] [blame] | 1038 | timekeeper.ntp_error += ntp_tick_length() << shift; |
Kasper Pedersen | a386b5a | 2010-10-20 15:55:15 -0700 | [diff] [blame] | 1039 | timekeeper.ntp_error -= |
| 1040 | (timekeeper.xtime_interval + timekeeper.xtime_remainder) << |
john stultz | a092ff0 | 2009-10-02 16:17:53 -0700 | [diff] [blame] | 1041 | (timekeeper.ntp_error_shift + shift); |
| 1042 | |
| 1043 | return offset; |
| 1044 | } |
| 1045 | |
Linus Torvalds | 83f57a1 | 2009-12-22 14:10:37 -0800 | [diff] [blame] | 1046 | |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 1047 | /** |
| 1048 | * update_wall_time - Uses the current clocksource to increment the wall time |
| 1049 | * |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 1050 | */ |
Torben Hohn | 871cf1e | 2011-01-27 15:58:55 +0100 | [diff] [blame] | 1051 | static void update_wall_time(void) |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 1052 | { |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 1053 | struct clocksource *clock; |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 1054 | cycle_t offset; |
john stultz | a092ff0 | 2009-10-02 16:17:53 -0700 | [diff] [blame] | 1055 | int shift = 0, maxshift; |
John Stultz | 70471f2 | 2011-11-14 12:48:10 -0800 | [diff] [blame] | 1056 | unsigned long flags; |
John Stultz | 1e75fa8 | 2012-07-13 01:21:53 -0400 | [diff] [blame^] | 1057 | s64 remainder; |
John Stultz | 70471f2 | 2011-11-14 12:48:10 -0800 | [diff] [blame] | 1058 | |
| 1059 | write_seqlock_irqsave(&timekeeper.lock, flags); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 1060 | |
| 1061 | /* Make sure we're fully resumed: */ |
| 1062 | if (unlikely(timekeeping_suspended)) |
John Stultz | 70471f2 | 2011-11-14 12:48:10 -0800 | [diff] [blame] | 1063 | goto out; |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 1064 | |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 1065 | clock = timekeeper.clock; |
John Stultz | 592913e | 2010-07-13 17:56:20 -0700 | [diff] [blame] | 1066 | |
| 1067 | #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 1068 | offset = timekeeper.cycle_interval; |
John Stultz | 592913e | 2010-07-13 17:56:20 -0700 | [diff] [blame] | 1069 | #else |
| 1070 | offset = (clock->read(clock) - clock->cycle_last) & clock->mask; |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 1071 | #endif |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 1072 | |
john stultz | a092ff0 | 2009-10-02 16:17:53 -0700 | [diff] [blame] | 1073 | /* |
| 1074 | * With NO_HZ we may have to accumulate many cycle_intervals |
| 1075 | * (think "ticks") worth of time at once. To do this efficiently, |
| 1076 | * we calculate the largest doubling multiple of cycle_intervals |
Jim Cromie | 88b28ad | 2012-03-14 21:28:56 -0600 | [diff] [blame] | 1077 | * that is smaller than the offset. We then accumulate that |
john stultz | a092ff0 | 2009-10-02 16:17:53 -0700 | [diff] [blame] | 1078 | * chunk in one go, and then try to consume the next smaller |
| 1079 | * doubled multiple. |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 1080 | */ |
john stultz | a092ff0 | 2009-10-02 16:17:53 -0700 | [diff] [blame] | 1081 | shift = ilog2(offset) - ilog2(timekeeper.cycle_interval); |
| 1082 | shift = max(0, shift); |
Jim Cromie | 88b28ad | 2012-03-14 21:28:56 -0600 | [diff] [blame] | 1083 | /* Bound shift to one less than what overflows tick_length */ |
John Stultz | ea7cf49 | 2011-11-14 13:18:07 -0800 | [diff] [blame] | 1084 | maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1; |
john stultz | a092ff0 | 2009-10-02 16:17:53 -0700 | [diff] [blame] | 1085 | shift = min(shift, maxshift); |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 1086 | while (offset >= timekeeper.cycle_interval) { |
john stultz | a092ff0 | 2009-10-02 16:17:53 -0700 | [diff] [blame] | 1087 | offset = logarithmic_accumulation(offset, shift); |
John Stultz | 830ec04 | 2010-03-18 14:47:30 -0700 | [diff] [blame] | 1088 | if(offset < timekeeper.cycle_interval<<shift) |
| 1089 | shift--; |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 1090 | } |
| 1091 | |
| 1092 | /* correct the clock when NTP error is too big */ |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 1093 | timekeeping_adjust(offset); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 1094 | |
john stultz | 6c9bacb | 2008-12-01 18:34:41 -0800 | [diff] [blame] | 1095 | /* |
| 1096 | * Since in the loop above, we accumulate any amount of time |
| 1097 | * in xtime_nsec over a second into xtime.tv_sec, its possible for |
| 1098 | * xtime_nsec to be fairly small after the loop. Further, if we're |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 1099 | * slightly speeding the clocksource up in timekeeping_adjust(), |
john stultz | 6c9bacb | 2008-12-01 18:34:41 -0800 | [diff] [blame] | 1100 | * its possible the required corrective factor to xtime_nsec could |
| 1101 | * cause it to underflow. |
| 1102 | * |
| 1103 | * Now, we cannot simply roll the accumulated second back, since |
| 1104 | * the NTP subsystem has been notified via second_overflow. So |
| 1105 | * instead we push xtime_nsec forward by the amount we underflowed, |
| 1106 | * and add that amount into the error. |
| 1107 | * |
| 1108 | * We'll correct this error next time through this function, when |
| 1109 | * xtime_nsec is not as small. |
| 1110 | */ |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 1111 | if (unlikely((s64)timekeeper.xtime_nsec < 0)) { |
| 1112 | s64 neg = -(s64)timekeeper.xtime_nsec; |
| 1113 | timekeeper.xtime_nsec = 0; |
Martin Schwidefsky | 23ce721 | 2009-08-14 15:47:27 +0200 | [diff] [blame] | 1114 | timekeeper.ntp_error += neg << timekeeper.ntp_error_shift; |
john stultz | 6c9bacb | 2008-12-01 18:34:41 -0800 | [diff] [blame] | 1115 | } |
| 1116 | |
John Stultz | 6a867a3 | 2010-04-06 14:30:51 -0700 | [diff] [blame] | 1117 | /* |
John Stultz | 1e75fa8 | 2012-07-13 01:21:53 -0400 | [diff] [blame^] | 1118 | * Store only full nanoseconds into xtime_nsec after rounding |
| 1119 | * it up and add the remainder to the error difference. |
| 1120 | * XXX - This is necessary to avoid small 1ns inconsistnecies caused |
| 1121 | * by truncating the remainder in vsyscalls. However, it causes |
| 1122 | * additional work to be done in timekeeping_adjust(). Once |
| 1123 | * the vsyscall implementations are converted to use xtime_nsec |
| 1124 | * (shifted nanoseconds), this can be killed. |
| 1125 | */ |
| 1126 | remainder = timekeeper.xtime_nsec & ((1 << timekeeper.shift) - 1); |
| 1127 | timekeeper.xtime_nsec -= remainder; |
| 1128 | timekeeper.xtime_nsec += 1 << timekeeper.shift; |
| 1129 | timekeeper.ntp_error += remainder << timekeeper.ntp_error_shift; |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 1130 | |
John Stultz | 6a867a3 | 2010-04-06 14:30:51 -0700 | [diff] [blame] | 1131 | /* |
| 1132 | * Finally, make sure that after the rounding |
John Stultz | 1e75fa8 | 2012-07-13 01:21:53 -0400 | [diff] [blame^] | 1133 | * xtime_nsec isn't larger than NSEC_PER_SEC |
John Stultz | 6a867a3 | 2010-04-06 14:30:51 -0700 | [diff] [blame] | 1134 | */ |
John Stultz | 1e75fa8 | 2012-07-13 01:21:53 -0400 | [diff] [blame^] | 1135 | if (unlikely(timekeeper.xtime_nsec >= |
| 1136 | ((u64)NSEC_PER_SEC << timekeeper.shift))) { |
John Stultz | 6b43ae8 | 2012-03-15 13:04:03 -0700 | [diff] [blame] | 1137 | int leap; |
John Stultz | 1e75fa8 | 2012-07-13 01:21:53 -0400 | [diff] [blame^] | 1138 | timekeeper.xtime_nsec -= (u64)NSEC_PER_SEC << timekeeper.shift; |
| 1139 | timekeeper.xtime_sec++; |
| 1140 | leap = second_overflow(timekeeper.xtime_sec); |
| 1141 | timekeeper.xtime_sec += leap; |
John Stultz | fad0c66 | 2012-05-30 10:54:57 -0700 | [diff] [blame] | 1142 | timekeeper.wall_to_monotonic.tv_sec -= leap; |
John Stultz | 4873fa0 | 2012-07-10 18:43:20 -0400 | [diff] [blame] | 1143 | if (leap) |
| 1144 | clock_was_set_delayed(); |
John Stultz | 6a867a3 | 2010-04-06 14:30:51 -0700 | [diff] [blame] | 1145 | } |
Linus Torvalds | 83f57a1 | 2009-12-22 14:10:37 -0800 | [diff] [blame] | 1146 | |
Thomas Gleixner | cc06268 | 2011-11-13 23:19:49 +0000 | [diff] [blame] | 1147 | timekeeping_update(false); |
John Stultz | 70471f2 | 2011-11-14 12:48:10 -0800 | [diff] [blame] | 1148 | |
| 1149 | out: |
| 1150 | write_sequnlock_irqrestore(&timekeeper.lock, flags); |
| 1151 | |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 1152 | } |
Tomas Janousek | 7c3f1a5 | 2007-07-15 23:39:41 -0700 | [diff] [blame] | 1153 | |
| 1154 | /** |
| 1155 | * getboottime - Return the real time of system boot. |
| 1156 | * @ts: pointer to the timespec to be set |
| 1157 | * |
John Stultz | abb3a4e | 2011-02-14 17:52:09 -0800 | [diff] [blame] | 1158 | * Returns the wall-time of boot in a timespec. |
Tomas Janousek | 7c3f1a5 | 2007-07-15 23:39:41 -0700 | [diff] [blame] | 1159 | * |
| 1160 | * This is based on the wall_to_monotonic offset and the total suspend |
| 1161 | * time. Calls to settimeofday will affect the value returned (which |
| 1162 | * basically means that however wrong your real time clock is at boot time, |
| 1163 | * you get the right time here). |
| 1164 | */ |
| 1165 | void getboottime(struct timespec *ts) |
| 1166 | { |
Hiroshi Shimamoto | 36d4748 | 2009-08-25 15:08:30 +0900 | [diff] [blame] | 1167 | struct timespec boottime = { |
John Stultz | d9f7217 | 2011-11-14 11:29:32 -0800 | [diff] [blame] | 1168 | .tv_sec = timekeeper.wall_to_monotonic.tv_sec + |
John Stultz | 00c5fb7 | 2011-11-14 11:23:15 -0800 | [diff] [blame] | 1169 | timekeeper.total_sleep_time.tv_sec, |
John Stultz | d9f7217 | 2011-11-14 11:29:32 -0800 | [diff] [blame] | 1170 | .tv_nsec = timekeeper.wall_to_monotonic.tv_nsec + |
John Stultz | 00c5fb7 | 2011-11-14 11:23:15 -0800 | [diff] [blame] | 1171 | timekeeper.total_sleep_time.tv_nsec |
Hiroshi Shimamoto | 36d4748 | 2009-08-25 15:08:30 +0900 | [diff] [blame] | 1172 | }; |
Martin Schwidefsky | d4f587c | 2009-08-14 15:47:31 +0200 | [diff] [blame] | 1173 | |
Martin Schwidefsky | d4f587c | 2009-08-14 15:47:31 +0200 | [diff] [blame] | 1174 | set_normalized_timespec(ts, -boottime.tv_sec, -boottime.tv_nsec); |
Tomas Janousek | 7c3f1a5 | 2007-07-15 23:39:41 -0700 | [diff] [blame] | 1175 | } |
Jason Wang | c93d89f | 2010-01-27 19:13:40 +0800 | [diff] [blame] | 1176 | EXPORT_SYMBOL_GPL(getboottime); |
Tomas Janousek | 7c3f1a5 | 2007-07-15 23:39:41 -0700 | [diff] [blame] | 1177 | |
John Stultz | abb3a4e | 2011-02-14 17:52:09 -0800 | [diff] [blame] | 1178 | |
| 1179 | /** |
| 1180 | * get_monotonic_boottime - Returns monotonic time since boot |
| 1181 | * @ts: pointer to the timespec to be set |
| 1182 | * |
| 1183 | * Returns the monotonic time since boot in a timespec. |
| 1184 | * |
| 1185 | * This is similar to CLOCK_MONTONIC/ktime_get_ts, but also |
| 1186 | * includes the time spent in suspend. |
| 1187 | */ |
| 1188 | void get_monotonic_boottime(struct timespec *ts) |
| 1189 | { |
| 1190 | struct timespec tomono, sleep; |
| 1191 | unsigned int seq; |
John Stultz | abb3a4e | 2011-02-14 17:52:09 -0800 | [diff] [blame] | 1192 | |
| 1193 | WARN_ON(timekeeping_suspended); |
| 1194 | |
| 1195 | do { |
John Stultz | 70471f2 | 2011-11-14 12:48:10 -0800 | [diff] [blame] | 1196 | seq = read_seqbegin(&timekeeper.lock); |
John Stultz | 1e75fa8 | 2012-07-13 01:21:53 -0400 | [diff] [blame^] | 1197 | ts->tv_sec = timekeeper.xtime_sec; |
| 1198 | ts->tv_nsec = timekeeping_get_ns(); |
John Stultz | d9f7217 | 2011-11-14 11:29:32 -0800 | [diff] [blame] | 1199 | tomono = timekeeper.wall_to_monotonic; |
John Stultz | 00c5fb7 | 2011-11-14 11:23:15 -0800 | [diff] [blame] | 1200 | sleep = timekeeper.total_sleep_time; |
John Stultz | abb3a4e | 2011-02-14 17:52:09 -0800 | [diff] [blame] | 1201 | |
John Stultz | 70471f2 | 2011-11-14 12:48:10 -0800 | [diff] [blame] | 1202 | } while (read_seqretry(&timekeeper.lock, seq)); |
John Stultz | abb3a4e | 2011-02-14 17:52:09 -0800 | [diff] [blame] | 1203 | |
| 1204 | set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec + sleep.tv_sec, |
John Stultz | 1e75fa8 | 2012-07-13 01:21:53 -0400 | [diff] [blame^] | 1205 | ts->tv_nsec + tomono.tv_nsec + sleep.tv_nsec); |
John Stultz | abb3a4e | 2011-02-14 17:52:09 -0800 | [diff] [blame] | 1206 | } |
| 1207 | EXPORT_SYMBOL_GPL(get_monotonic_boottime); |
| 1208 | |
| 1209 | /** |
| 1210 | * ktime_get_boottime - Returns monotonic time since boot in a ktime |
| 1211 | * |
| 1212 | * Returns the monotonic time since boot in a ktime |
| 1213 | * |
| 1214 | * This is similar to CLOCK_MONTONIC/ktime_get, but also |
| 1215 | * includes the time spent in suspend. |
| 1216 | */ |
| 1217 | ktime_t ktime_get_boottime(void) |
| 1218 | { |
| 1219 | struct timespec ts; |
| 1220 | |
| 1221 | get_monotonic_boottime(&ts); |
| 1222 | return timespec_to_ktime(ts); |
| 1223 | } |
| 1224 | EXPORT_SYMBOL_GPL(ktime_get_boottime); |
| 1225 | |
Tomas Janousek | 7c3f1a5 | 2007-07-15 23:39:41 -0700 | [diff] [blame] | 1226 | /** |
| 1227 | * monotonic_to_bootbased - Convert the monotonic time to boot based. |
| 1228 | * @ts: pointer to the timespec to be converted |
| 1229 | */ |
| 1230 | void monotonic_to_bootbased(struct timespec *ts) |
| 1231 | { |
John Stultz | 00c5fb7 | 2011-11-14 11:23:15 -0800 | [diff] [blame] | 1232 | *ts = timespec_add(*ts, timekeeper.total_sleep_time); |
Tomas Janousek | 7c3f1a5 | 2007-07-15 23:39:41 -0700 | [diff] [blame] | 1233 | } |
Jason Wang | c93d89f | 2010-01-27 19:13:40 +0800 | [diff] [blame] | 1234 | EXPORT_SYMBOL_GPL(monotonic_to_bootbased); |
john stultz | 2c6b47d | 2007-07-24 17:47:43 -0700 | [diff] [blame] | 1235 | |
john stultz | 17c38b7 | 2007-07-24 18:38:34 -0700 | [diff] [blame] | 1236 | unsigned long get_seconds(void) |
| 1237 | { |
John Stultz | 1e75fa8 | 2012-07-13 01:21:53 -0400 | [diff] [blame^] | 1238 | return timekeeper.xtime_sec; |
john stultz | 17c38b7 | 2007-07-24 18:38:34 -0700 | [diff] [blame] | 1239 | } |
| 1240 | EXPORT_SYMBOL(get_seconds); |
| 1241 | |
john stultz | da15cfd | 2009-08-19 19:13:34 -0700 | [diff] [blame] | 1242 | struct timespec __current_kernel_time(void) |
| 1243 | { |
John Stultz | 1e75fa8 | 2012-07-13 01:21:53 -0400 | [diff] [blame^] | 1244 | return tk_xtime(&timekeeper); |
john stultz | da15cfd | 2009-08-19 19:13:34 -0700 | [diff] [blame] | 1245 | } |
john stultz | 17c38b7 | 2007-07-24 18:38:34 -0700 | [diff] [blame] | 1246 | |
john stultz | 2c6b47d | 2007-07-24 17:47:43 -0700 | [diff] [blame] | 1247 | struct timespec current_kernel_time(void) |
| 1248 | { |
| 1249 | struct timespec now; |
| 1250 | unsigned long seq; |
| 1251 | |
| 1252 | do { |
John Stultz | 70471f2 | 2011-11-14 12:48:10 -0800 | [diff] [blame] | 1253 | seq = read_seqbegin(&timekeeper.lock); |
Linus Torvalds | 83f57a1 | 2009-12-22 14:10:37 -0800 | [diff] [blame] | 1254 | |
John Stultz | 1e75fa8 | 2012-07-13 01:21:53 -0400 | [diff] [blame^] | 1255 | now = tk_xtime(&timekeeper); |
John Stultz | 70471f2 | 2011-11-14 12:48:10 -0800 | [diff] [blame] | 1256 | } while (read_seqretry(&timekeeper.lock, seq)); |
john stultz | 2c6b47d | 2007-07-24 17:47:43 -0700 | [diff] [blame] | 1257 | |
| 1258 | return now; |
| 1259 | } |
john stultz | 2c6b47d | 2007-07-24 17:47:43 -0700 | [diff] [blame] | 1260 | EXPORT_SYMBOL(current_kernel_time); |
john stultz | da15cfd | 2009-08-19 19:13:34 -0700 | [diff] [blame] | 1261 | |
| 1262 | struct timespec get_monotonic_coarse(void) |
| 1263 | { |
| 1264 | struct timespec now, mono; |
| 1265 | unsigned long seq; |
| 1266 | |
| 1267 | do { |
John Stultz | 70471f2 | 2011-11-14 12:48:10 -0800 | [diff] [blame] | 1268 | seq = read_seqbegin(&timekeeper.lock); |
Linus Torvalds | 83f57a1 | 2009-12-22 14:10:37 -0800 | [diff] [blame] | 1269 | |
John Stultz | 1e75fa8 | 2012-07-13 01:21:53 -0400 | [diff] [blame^] | 1270 | now = tk_xtime(&timekeeper); |
John Stultz | d9f7217 | 2011-11-14 11:29:32 -0800 | [diff] [blame] | 1271 | mono = timekeeper.wall_to_monotonic; |
John Stultz | 70471f2 | 2011-11-14 12:48:10 -0800 | [diff] [blame] | 1272 | } while (read_seqretry(&timekeeper.lock, seq)); |
john stultz | da15cfd | 2009-08-19 19:13:34 -0700 | [diff] [blame] | 1273 | |
| 1274 | set_normalized_timespec(&now, now.tv_sec + mono.tv_sec, |
| 1275 | now.tv_nsec + mono.tv_nsec); |
| 1276 | return now; |
| 1277 | } |
Torben Hohn | 871cf1e | 2011-01-27 15:58:55 +0100 | [diff] [blame] | 1278 | |
| 1279 | /* |
| 1280 | * The 64-bit jiffies value is not atomic - you MUST NOT read it |
| 1281 | * without sampling the sequence number in xtime_lock. |
| 1282 | * jiffies is defined in the linker script... |
| 1283 | */ |
| 1284 | void do_timer(unsigned long ticks) |
| 1285 | { |
| 1286 | jiffies_64 += ticks; |
| 1287 | update_wall_time(); |
| 1288 | calc_global_load(ticks); |
| 1289 | } |
Torben Hohn | 48cf76f7 | 2011-01-27 15:59:05 +0100 | [diff] [blame] | 1290 | |
| 1291 | /** |
John Stultz | 314ac37 | 2011-02-14 18:43:08 -0800 | [diff] [blame] | 1292 | * get_xtime_and_monotonic_and_sleep_offset() - get xtime, wall_to_monotonic, |
| 1293 | * and sleep offsets. |
Torben Hohn | 48cf76f7 | 2011-01-27 15:59:05 +0100 | [diff] [blame] | 1294 | * @xtim: pointer to timespec to be set with xtime |
| 1295 | * @wtom: pointer to timespec to be set with wall_to_monotonic |
John Stultz | 314ac37 | 2011-02-14 18:43:08 -0800 | [diff] [blame] | 1296 | * @sleep: pointer to timespec to be set with time in suspend |
Torben Hohn | 48cf76f7 | 2011-01-27 15:59:05 +0100 | [diff] [blame] | 1297 | */ |
John Stultz | 314ac37 | 2011-02-14 18:43:08 -0800 | [diff] [blame] | 1298 | void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim, |
| 1299 | struct timespec *wtom, struct timespec *sleep) |
Torben Hohn | 48cf76f7 | 2011-01-27 15:59:05 +0100 | [diff] [blame] | 1300 | { |
| 1301 | unsigned long seq; |
| 1302 | |
| 1303 | do { |
John Stultz | 70471f2 | 2011-11-14 12:48:10 -0800 | [diff] [blame] | 1304 | seq = read_seqbegin(&timekeeper.lock); |
John Stultz | 1e75fa8 | 2012-07-13 01:21:53 -0400 | [diff] [blame^] | 1305 | *xtim = tk_xtime(&timekeeper); |
John Stultz | d9f7217 | 2011-11-14 11:29:32 -0800 | [diff] [blame] | 1306 | *wtom = timekeeper.wall_to_monotonic; |
John Stultz | 00c5fb7 | 2011-11-14 11:23:15 -0800 | [diff] [blame] | 1307 | *sleep = timekeeper.total_sleep_time; |
John Stultz | 70471f2 | 2011-11-14 12:48:10 -0800 | [diff] [blame] | 1308 | } while (read_seqretry(&timekeeper.lock, seq)); |
Torben Hohn | 48cf76f7 | 2011-01-27 15:59:05 +0100 | [diff] [blame] | 1309 | } |
Torben Hohn | f0af911a9 | 2011-01-27 15:59:10 +0100 | [diff] [blame] | 1310 | |
Thomas Gleixner | f6c06ab | 2012-07-10 18:43:24 -0400 | [diff] [blame] | 1311 | #ifdef CONFIG_HIGH_RES_TIMERS |
| 1312 | /** |
| 1313 | * ktime_get_update_offsets - hrtimer helper |
| 1314 | * @offs_real: pointer to storage for monotonic -> realtime offset |
| 1315 | * @offs_boot: pointer to storage for monotonic -> boottime offset |
| 1316 | * |
| 1317 | * Returns current monotonic time and updates the offsets |
| 1318 | * Called from hrtimer_interupt() or retrigger_next_event() |
| 1319 | */ |
| 1320 | ktime_t ktime_get_update_offsets(ktime_t *offs_real, ktime_t *offs_boot) |
| 1321 | { |
| 1322 | ktime_t now; |
| 1323 | unsigned int seq; |
| 1324 | u64 secs, nsecs; |
| 1325 | |
| 1326 | do { |
| 1327 | seq = read_seqbegin(&timekeeper.lock); |
| 1328 | |
John Stultz | 1e75fa8 | 2012-07-13 01:21:53 -0400 | [diff] [blame^] | 1329 | secs = timekeeper.xtime_sec; |
| 1330 | nsecs = timekeeping_get_ns(); |
Thomas Gleixner | f6c06ab | 2012-07-10 18:43:24 -0400 | [diff] [blame] | 1331 | /* If arch requires, add in gettimeoffset() */ |
| 1332 | nsecs += arch_gettimeoffset(); |
| 1333 | |
| 1334 | *offs_real = timekeeper.offs_real; |
| 1335 | *offs_boot = timekeeper.offs_boot; |
| 1336 | } while (read_seqretry(&timekeeper.lock, seq)); |
| 1337 | |
| 1338 | now = ktime_add_ns(ktime_set(secs, 0), nsecs); |
| 1339 | now = ktime_sub(now, *offs_real); |
| 1340 | return now; |
| 1341 | } |
| 1342 | #endif |
| 1343 | |
Torben Hohn | f0af911a9 | 2011-01-27 15:59:10 +0100 | [diff] [blame] | 1344 | /** |
Thomas Gleixner | 99ee531 | 2011-04-27 14:16:42 +0200 | [diff] [blame] | 1345 | * ktime_get_monotonic_offset() - get wall_to_monotonic in ktime_t format |
| 1346 | */ |
| 1347 | ktime_t ktime_get_monotonic_offset(void) |
| 1348 | { |
| 1349 | unsigned long seq; |
| 1350 | struct timespec wtom; |
| 1351 | |
| 1352 | do { |
John Stultz | 70471f2 | 2011-11-14 12:48:10 -0800 | [diff] [blame] | 1353 | seq = read_seqbegin(&timekeeper.lock); |
John Stultz | d9f7217 | 2011-11-14 11:29:32 -0800 | [diff] [blame] | 1354 | wtom = timekeeper.wall_to_monotonic; |
John Stultz | 70471f2 | 2011-11-14 12:48:10 -0800 | [diff] [blame] | 1355 | } while (read_seqretry(&timekeeper.lock, seq)); |
| 1356 | |
Thomas Gleixner | 99ee531 | 2011-04-27 14:16:42 +0200 | [diff] [blame] | 1357 | return timespec_to_ktime(wtom); |
| 1358 | } |
John Stultz | a80b83b | 2012-02-03 00:19:07 -0800 | [diff] [blame] | 1359 | EXPORT_SYMBOL_GPL(ktime_get_monotonic_offset); |
| 1360 | |
Thomas Gleixner | 99ee531 | 2011-04-27 14:16:42 +0200 | [diff] [blame] | 1361 | |
| 1362 | /** |
Torben Hohn | f0af911a9 | 2011-01-27 15:59:10 +0100 | [diff] [blame] | 1363 | * xtime_update() - advances the timekeeping infrastructure |
| 1364 | * @ticks: number of ticks, that have elapsed since the last call. |
| 1365 | * |
| 1366 | * Must be called with interrupts disabled. |
| 1367 | */ |
| 1368 | void xtime_update(unsigned long ticks) |
| 1369 | { |
| 1370 | write_seqlock(&xtime_lock); |
| 1371 | do_timer(ticks); |
| 1372 | write_sequnlock(&xtime_lock); |
| 1373 | } |