john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 1 | /* |
| 2 | * linux/kernel/time/timekeeping.c |
| 3 | * |
| 4 | * Kernel timekeeping code and accessor functions |
| 5 | * |
| 6 | * This code was moved from linux/kernel/timer.c. |
| 7 | * Please see that file for copyright and history logs. |
| 8 | * |
| 9 | */ |
| 10 | |
| 11 | #include <linux/module.h> |
| 12 | #include <linux/interrupt.h> |
| 13 | #include <linux/percpu.h> |
| 14 | #include <linux/init.h> |
| 15 | #include <linux/mm.h> |
Alexey Dobriyan | d43c36d | 2009-10-07 17:09:06 +0400 | [diff] [blame] | 16 | #include <linux/sched.h> |
Rafael J. Wysocki | e1a85b2 | 2011-03-23 22:16:04 +0100 | [diff] [blame] | 17 | #include <linux/syscore_ops.h> |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 18 | #include <linux/clocksource.h> |
| 19 | #include <linux/jiffies.h> |
| 20 | #include <linux/time.h> |
| 21 | #include <linux/tick.h> |
Martin Schwidefsky | 75c5158 | 2009-08-14 15:47:30 +0200 | [diff] [blame] | 22 | #include <linux/stop_machine.h> |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 23 | |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 24 | /* Structure holding internal timekeeping values. */ |
| 25 | struct timekeeper { |
| 26 | /* Current clocksource used for timekeeping. */ |
| 27 | struct clocksource *clock; |
Martin Schwidefsky | 23ce721 | 2009-08-14 15:47:27 +0200 | [diff] [blame] | 28 | /* The shift value of the current clocksource. */ |
| 29 | int shift; |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 30 | |
| 31 | /* Number of clock cycles in one NTP interval. */ |
| 32 | cycle_t cycle_interval; |
| 33 | /* Number of clock shifted nano seconds in one NTP interval. */ |
| 34 | u64 xtime_interval; |
Kasper Pedersen | a386b5a | 2010-10-20 15:55:15 -0700 | [diff] [blame] | 35 | /* shifted nano seconds left over when rounding cycle_interval */ |
| 36 | s64 xtime_remainder; |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 37 | /* Raw nano seconds accumulated per NTP interval. */ |
| 38 | u32 raw_interval; |
| 39 | |
| 40 | /* Clock shifted nano seconds remainder not stored in xtime.tv_nsec. */ |
| 41 | u64 xtime_nsec; |
| 42 | /* Difference between accumulated time and NTP time in ntp |
| 43 | * shifted nano seconds. */ |
| 44 | s64 ntp_error; |
Martin Schwidefsky | 23ce721 | 2009-08-14 15:47:27 +0200 | [diff] [blame] | 45 | /* Shift conversion between clock shifted nano seconds and |
| 46 | * ntp shifted nano seconds. */ |
| 47 | int ntp_error_shift; |
Martin Schwidefsky | 0a54419 | 2009-08-14 15:47:28 +0200 | [diff] [blame] | 48 | /* NTP adjusted clock multiplier */ |
| 49 | u32 mult; |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 50 | }; |
| 51 | |
H Hartley Sweeten | afa14e7 | 2011-01-11 17:59:38 -0600 | [diff] [blame] | 52 | static struct timekeeper timekeeper; |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 53 | |
| 54 | /** |
| 55 | * timekeeper_setup_internals - Set up internals to use clocksource clock. |
| 56 | * |
| 57 | * @clock: Pointer to clocksource. |
| 58 | * |
| 59 | * Calculates a fixed cycle/nsec interval for a given clocksource/adjustment |
| 60 | * pair and interval request. |
| 61 | * |
| 62 | * Unless you're the timekeeping code, you should not be using this! |
| 63 | */ |
| 64 | static void timekeeper_setup_internals(struct clocksource *clock) |
| 65 | { |
| 66 | cycle_t interval; |
Kasper Pedersen | a386b5a | 2010-10-20 15:55:15 -0700 | [diff] [blame] | 67 | u64 tmp, ntpinterval; |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 68 | |
| 69 | timekeeper.clock = clock; |
| 70 | clock->cycle_last = clock->read(clock); |
| 71 | |
| 72 | /* Do the ns -> cycle conversion first, using original mult */ |
| 73 | tmp = NTP_INTERVAL_LENGTH; |
| 74 | tmp <<= clock->shift; |
Kasper Pedersen | a386b5a | 2010-10-20 15:55:15 -0700 | [diff] [blame] | 75 | ntpinterval = tmp; |
Martin Schwidefsky | 0a54419 | 2009-08-14 15:47:28 +0200 | [diff] [blame] | 76 | tmp += clock->mult/2; |
| 77 | do_div(tmp, clock->mult); |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 78 | if (tmp == 0) |
| 79 | tmp = 1; |
| 80 | |
| 81 | interval = (cycle_t) tmp; |
| 82 | timekeeper.cycle_interval = interval; |
| 83 | |
| 84 | /* Go back from cycles -> shifted ns */ |
| 85 | timekeeper.xtime_interval = (u64) interval * clock->mult; |
Kasper Pedersen | a386b5a | 2010-10-20 15:55:15 -0700 | [diff] [blame] | 86 | timekeeper.xtime_remainder = ntpinterval - timekeeper.xtime_interval; |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 87 | timekeeper.raw_interval = |
Martin Schwidefsky | 0a54419 | 2009-08-14 15:47:28 +0200 | [diff] [blame] | 88 | ((u64) interval * clock->mult) >> clock->shift; |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 89 | |
| 90 | timekeeper.xtime_nsec = 0; |
Martin Schwidefsky | 23ce721 | 2009-08-14 15:47:27 +0200 | [diff] [blame] | 91 | timekeeper.shift = clock->shift; |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 92 | |
| 93 | timekeeper.ntp_error = 0; |
Martin Schwidefsky | 23ce721 | 2009-08-14 15:47:27 +0200 | [diff] [blame] | 94 | timekeeper.ntp_error_shift = NTP_SCALE_SHIFT - clock->shift; |
Martin Schwidefsky | 0a54419 | 2009-08-14 15:47:28 +0200 | [diff] [blame] | 95 | |
| 96 | /* |
| 97 | * The timekeeper keeps its own mult values for the currently |
| 98 | * active clocksource. These value will be adjusted via NTP |
| 99 | * to counteract clock drifting. |
| 100 | */ |
| 101 | timekeeper.mult = clock->mult; |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 102 | } |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 103 | |
Martin Schwidefsky | 2ba2a30 | 2009-08-14 15:47:29 +0200 | [diff] [blame] | 104 | /* Timekeeper helper functions. */ |
| 105 | static inline s64 timekeeping_get_ns(void) |
| 106 | { |
| 107 | cycle_t cycle_now, cycle_delta; |
| 108 | struct clocksource *clock; |
| 109 | |
| 110 | /* read clocksource: */ |
| 111 | clock = timekeeper.clock; |
| 112 | cycle_now = clock->read(clock); |
| 113 | |
| 114 | /* calculate the delta since the last update_wall_time: */ |
| 115 | cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; |
| 116 | |
| 117 | /* return delta convert to nanoseconds using ntp adjusted mult. */ |
| 118 | return clocksource_cyc2ns(cycle_delta, timekeeper.mult, |
| 119 | timekeeper.shift); |
| 120 | } |
| 121 | |
| 122 | static inline s64 timekeeping_get_ns_raw(void) |
| 123 | { |
| 124 | cycle_t cycle_now, cycle_delta; |
| 125 | struct clocksource *clock; |
| 126 | |
| 127 | /* read clocksource: */ |
| 128 | clock = timekeeper.clock; |
| 129 | cycle_now = clock->read(clock); |
| 130 | |
| 131 | /* calculate the delta since the last update_wall_time: */ |
| 132 | cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; |
| 133 | |
| 134 | /* return delta convert to nanoseconds using ntp adjusted mult. */ |
| 135 | return clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift); |
| 136 | } |
| 137 | |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 138 | /* |
| 139 | * This read-write spinlock protects us from races in SMP while |
Thomas Gleixner | dce48a8 | 2009-04-11 10:43:41 +0200 | [diff] [blame] | 140 | * playing with xtime. |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 141 | */ |
Adrian Bunk | ba2a631 | 2007-10-16 23:27:16 -0700 | [diff] [blame] | 142 | __cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 143 | |
| 144 | |
| 145 | /* |
| 146 | * The current time |
| 147 | * wall_to_monotonic is what we need to add to xtime (or xtime corrected |
| 148 | * for sub jiffie times) to get to monotonic time. Monotonic is pegged |
| 149 | * at zero at system boot time, so wall_to_monotonic will be negative, |
| 150 | * however, we will ALWAYS keep the tv_nsec part positive so we can use |
| 151 | * the usual normalization. |
Tomas Janousek | 7c3f1a5 | 2007-07-15 23:39:41 -0700 | [diff] [blame] | 152 | * |
| 153 | * wall_to_monotonic is moved after resume from suspend for the monotonic |
| 154 | * time not to jump. We need to add total_sleep_time to wall_to_monotonic |
| 155 | * to get the real boot based time offset. |
| 156 | * |
| 157 | * - wall_to_monotonic is no longer the boot time, getboottime must be |
| 158 | * used instead. |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 159 | */ |
John Stultz | 0fb86b0 | 2010-07-13 17:56:26 -0700 | [diff] [blame] | 160 | static struct timespec xtime __attribute__ ((aligned (16))); |
| 161 | static struct timespec wall_to_monotonic __attribute__ ((aligned (16))); |
Martin Schwidefsky | d4f587c | 2009-08-14 15:47:31 +0200 | [diff] [blame] | 162 | static struct timespec total_sleep_time; |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 163 | |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 164 | /* |
| 165 | * The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock. |
| 166 | */ |
H Hartley Sweeten | afa14e7 | 2011-01-11 17:59:38 -0600 | [diff] [blame] | 167 | static struct timespec raw_time; |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 168 | |
Thomas Gleixner | 1c5745a | 2008-12-22 23:05:28 +0100 | [diff] [blame] | 169 | /* flag for if timekeeping is suspended */ |
| 170 | int __read_mostly timekeeping_suspended; |
| 171 | |
John Stultz | 31089c1 | 2009-08-14 15:47:18 +0200 | [diff] [blame] | 172 | /* must hold xtime_lock */ |
| 173 | void timekeeping_leap_insert(int leapsecond) |
| 174 | { |
| 175 | xtime.tv_sec += leapsecond; |
| 176 | wall_to_monotonic.tv_sec -= leapsecond; |
John Stultz | 7615856 | 2010-07-13 17:56:23 -0700 | [diff] [blame] | 177 | update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock, |
| 178 | timekeeper.mult); |
John Stultz | 31089c1 | 2009-08-14 15:47:18 +0200 | [diff] [blame] | 179 | } |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 180 | |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 181 | /** |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 182 | * timekeeping_forward_now - update clock to the current time |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 183 | * |
Roman Zippel | 9a05511 | 2008-08-20 16:37:28 -0700 | [diff] [blame] | 184 | * Forward the current clock to update its state since the last call to |
| 185 | * update_wall_time(). This is useful before significant clock changes, |
| 186 | * as it avoids having to deal with this time offset explicitly. |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 187 | */ |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 188 | static void timekeeping_forward_now(void) |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 189 | { |
| 190 | cycle_t cycle_now, cycle_delta; |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 191 | struct clocksource *clock; |
Roman Zippel | 9a05511 | 2008-08-20 16:37:28 -0700 | [diff] [blame] | 192 | s64 nsec; |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 193 | |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 194 | clock = timekeeper.clock; |
Martin Schwidefsky | a0f7d48 | 2009-08-14 15:47:19 +0200 | [diff] [blame] | 195 | cycle_now = clock->read(clock); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 196 | cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; |
Roman Zippel | 9a05511 | 2008-08-20 16:37:28 -0700 | [diff] [blame] | 197 | clock->cycle_last = cycle_now; |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 198 | |
Martin Schwidefsky | 0a54419 | 2009-08-14 15:47:28 +0200 | [diff] [blame] | 199 | nsec = clocksource_cyc2ns(cycle_delta, timekeeper.mult, |
| 200 | timekeeper.shift); |
john stultz | 7d27558 | 2009-05-01 13:10:26 -0700 | [diff] [blame] | 201 | |
| 202 | /* If arch requires, add in gettimeoffset() */ |
| 203 | nsec += arch_gettimeoffset(); |
| 204 | |
Roman Zippel | 9a05511 | 2008-08-20 16:37:28 -0700 | [diff] [blame] | 205 | timespec_add_ns(&xtime, nsec); |
John Stultz | 2d42244 | 2008-08-20 16:37:30 -0700 | [diff] [blame] | 206 | |
Martin Schwidefsky | 0a54419 | 2009-08-14 15:47:28 +0200 | [diff] [blame] | 207 | nsec = clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift); |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 208 | timespec_add_ns(&raw_time, nsec); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 209 | } |
| 210 | |
| 211 | /** |
Geert Uytterhoeven | efd9ac8 | 2008-01-30 13:30:01 +0100 | [diff] [blame] | 212 | * getnstimeofday - Returns the time of day in a timespec |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 213 | * @ts: pointer to the timespec to be set |
| 214 | * |
Geert Uytterhoeven | efd9ac8 | 2008-01-30 13:30:01 +0100 | [diff] [blame] | 215 | * Returns the time of day in a timespec. |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 216 | */ |
Geert Uytterhoeven | efd9ac8 | 2008-01-30 13:30:01 +0100 | [diff] [blame] | 217 | void getnstimeofday(struct timespec *ts) |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 218 | { |
| 219 | unsigned long seq; |
| 220 | s64 nsecs; |
| 221 | |
Thomas Gleixner | 1c5745a | 2008-12-22 23:05:28 +0100 | [diff] [blame] | 222 | WARN_ON(timekeeping_suspended); |
| 223 | |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 224 | do { |
| 225 | seq = read_seqbegin(&xtime_lock); |
| 226 | |
| 227 | *ts = xtime; |
Martin Schwidefsky | 2ba2a30 | 2009-08-14 15:47:29 +0200 | [diff] [blame] | 228 | nsecs = timekeeping_get_ns(); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 229 | |
john stultz | 7d27558 | 2009-05-01 13:10:26 -0700 | [diff] [blame] | 230 | /* If arch requires, add in gettimeoffset() */ |
| 231 | nsecs += arch_gettimeoffset(); |
| 232 | |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 233 | } while (read_seqretry(&xtime_lock, seq)); |
| 234 | |
| 235 | timespec_add_ns(ts, nsecs); |
| 236 | } |
| 237 | |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 238 | EXPORT_SYMBOL(getnstimeofday); |
| 239 | |
Martin Schwidefsky | 951ed4d | 2009-07-07 11:27:28 +0200 | [diff] [blame] | 240 | ktime_t ktime_get(void) |
| 241 | { |
Martin Schwidefsky | 951ed4d | 2009-07-07 11:27:28 +0200 | [diff] [blame] | 242 | unsigned int seq; |
| 243 | s64 secs, nsecs; |
| 244 | |
| 245 | WARN_ON(timekeeping_suspended); |
| 246 | |
| 247 | do { |
| 248 | seq = read_seqbegin(&xtime_lock); |
| 249 | secs = xtime.tv_sec + wall_to_monotonic.tv_sec; |
| 250 | nsecs = xtime.tv_nsec + wall_to_monotonic.tv_nsec; |
Martin Schwidefsky | 2ba2a30 | 2009-08-14 15:47:29 +0200 | [diff] [blame] | 251 | nsecs += timekeeping_get_ns(); |
Hector Palacios | e1ef77b | 2011-11-14 11:15:25 +0100 | [diff] [blame^] | 252 | /* If arch requires, add in gettimeoffset() */ |
| 253 | nsecs += arch_gettimeoffset(); |
Martin Schwidefsky | 951ed4d | 2009-07-07 11:27:28 +0200 | [diff] [blame] | 254 | |
| 255 | } while (read_seqretry(&xtime_lock, seq)); |
| 256 | /* |
| 257 | * Use ktime_set/ktime_add_ns to create a proper ktime on |
| 258 | * 32-bit architectures without CONFIG_KTIME_SCALAR. |
| 259 | */ |
| 260 | return ktime_add_ns(ktime_set(secs, 0), nsecs); |
| 261 | } |
| 262 | EXPORT_SYMBOL_GPL(ktime_get); |
| 263 | |
| 264 | /** |
| 265 | * ktime_get_ts - get the monotonic clock in timespec format |
| 266 | * @ts: pointer to timespec variable |
| 267 | * |
| 268 | * The function calculates the monotonic clock from the realtime |
| 269 | * clock and the wall_to_monotonic offset and stores the result |
| 270 | * in normalized timespec format in the variable pointed to by @ts. |
| 271 | */ |
| 272 | void ktime_get_ts(struct timespec *ts) |
| 273 | { |
Martin Schwidefsky | 951ed4d | 2009-07-07 11:27:28 +0200 | [diff] [blame] | 274 | struct timespec tomono; |
| 275 | unsigned int seq; |
| 276 | s64 nsecs; |
| 277 | |
| 278 | WARN_ON(timekeeping_suspended); |
| 279 | |
| 280 | do { |
| 281 | seq = read_seqbegin(&xtime_lock); |
| 282 | *ts = xtime; |
| 283 | tomono = wall_to_monotonic; |
Martin Schwidefsky | 2ba2a30 | 2009-08-14 15:47:29 +0200 | [diff] [blame] | 284 | nsecs = timekeeping_get_ns(); |
Hector Palacios | e1ef77b | 2011-11-14 11:15:25 +0100 | [diff] [blame^] | 285 | /* If arch requires, add in gettimeoffset() */ |
| 286 | nsecs += arch_gettimeoffset(); |
Martin Schwidefsky | 951ed4d | 2009-07-07 11:27:28 +0200 | [diff] [blame] | 287 | |
| 288 | } while (read_seqretry(&xtime_lock, seq)); |
| 289 | |
| 290 | set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec, |
| 291 | ts->tv_nsec + tomono.tv_nsec + nsecs); |
| 292 | } |
| 293 | EXPORT_SYMBOL_GPL(ktime_get_ts); |
| 294 | |
Alexander Gordeev | e2c18e4 | 2011-01-12 17:00:57 -0800 | [diff] [blame] | 295 | #ifdef CONFIG_NTP_PPS |
| 296 | |
| 297 | /** |
| 298 | * getnstime_raw_and_real - get day and raw monotonic time in timespec format |
| 299 | * @ts_raw: pointer to the timespec to be set to raw monotonic time |
| 300 | * @ts_real: pointer to the timespec to be set to the time of day |
| 301 | * |
| 302 | * This function reads both the time of day and raw monotonic time at the |
| 303 | * same time atomically and stores the resulting timestamps in timespec |
| 304 | * format. |
| 305 | */ |
| 306 | void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real) |
| 307 | { |
| 308 | unsigned long seq; |
| 309 | s64 nsecs_raw, nsecs_real; |
| 310 | |
| 311 | WARN_ON_ONCE(timekeeping_suspended); |
| 312 | |
| 313 | do { |
| 314 | u32 arch_offset; |
| 315 | |
| 316 | seq = read_seqbegin(&xtime_lock); |
| 317 | |
| 318 | *ts_raw = raw_time; |
| 319 | *ts_real = xtime; |
| 320 | |
| 321 | nsecs_raw = timekeeping_get_ns_raw(); |
| 322 | nsecs_real = timekeeping_get_ns(); |
| 323 | |
| 324 | /* If arch requires, add in gettimeoffset() */ |
| 325 | arch_offset = arch_gettimeoffset(); |
| 326 | nsecs_raw += arch_offset; |
| 327 | nsecs_real += arch_offset; |
| 328 | |
| 329 | } while (read_seqretry(&xtime_lock, seq)); |
| 330 | |
| 331 | timespec_add_ns(ts_raw, nsecs_raw); |
| 332 | timespec_add_ns(ts_real, nsecs_real); |
| 333 | } |
| 334 | EXPORT_SYMBOL(getnstime_raw_and_real); |
| 335 | |
| 336 | #endif /* CONFIG_NTP_PPS */ |
| 337 | |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 338 | /** |
| 339 | * do_gettimeofday - Returns the time of day in a timeval |
| 340 | * @tv: pointer to the timeval to be set |
| 341 | * |
Geert Uytterhoeven | efd9ac8 | 2008-01-30 13:30:01 +0100 | [diff] [blame] | 342 | * NOTE: Users should be converted to using getnstimeofday() |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 343 | */ |
| 344 | void do_gettimeofday(struct timeval *tv) |
| 345 | { |
| 346 | struct timespec now; |
| 347 | |
Geert Uytterhoeven | efd9ac8 | 2008-01-30 13:30:01 +0100 | [diff] [blame] | 348 | getnstimeofday(&now); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 349 | tv->tv_sec = now.tv_sec; |
| 350 | tv->tv_usec = now.tv_nsec/1000; |
| 351 | } |
| 352 | |
| 353 | EXPORT_SYMBOL(do_gettimeofday); |
| 354 | /** |
| 355 | * do_settimeofday - Sets the time of day |
| 356 | * @tv: pointer to the timespec variable containing the new time |
| 357 | * |
| 358 | * Sets the time of day to the new time and update NTP and notify hrtimers |
| 359 | */ |
Richard Cochran | 1e6d767 | 2011-02-01 13:50:58 +0000 | [diff] [blame] | 360 | int do_settimeofday(const struct timespec *tv) |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 361 | { |
Roman Zippel | 9a05511 | 2008-08-20 16:37:28 -0700 | [diff] [blame] | 362 | struct timespec ts_delta; |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 363 | unsigned long flags; |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 364 | |
| 365 | if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) |
| 366 | return -EINVAL; |
| 367 | |
| 368 | write_seqlock_irqsave(&xtime_lock, flags); |
| 369 | |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 370 | timekeeping_forward_now(); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 371 | |
Roman Zippel | 9a05511 | 2008-08-20 16:37:28 -0700 | [diff] [blame] | 372 | ts_delta.tv_sec = tv->tv_sec - xtime.tv_sec; |
| 373 | ts_delta.tv_nsec = tv->tv_nsec - xtime.tv_nsec; |
| 374 | wall_to_monotonic = timespec_sub(wall_to_monotonic, ts_delta); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 375 | |
Roman Zippel | 9a05511 | 2008-08-20 16:37:28 -0700 | [diff] [blame] | 376 | xtime = *tv; |
| 377 | |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 378 | timekeeper.ntp_error = 0; |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 379 | ntp_clear(); |
| 380 | |
John Stultz | 7615856 | 2010-07-13 17:56:23 -0700 | [diff] [blame] | 381 | update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock, |
| 382 | timekeeper.mult); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 383 | |
| 384 | write_sequnlock_irqrestore(&xtime_lock, flags); |
| 385 | |
| 386 | /* signal hrtimers about time change */ |
| 387 | clock_was_set(); |
| 388 | |
| 389 | return 0; |
| 390 | } |
| 391 | |
| 392 | EXPORT_SYMBOL(do_settimeofday); |
| 393 | |
John Stultz | c528f7c | 2011-02-01 13:52:17 +0000 | [diff] [blame] | 394 | |
| 395 | /** |
| 396 | * timekeeping_inject_offset - Adds or subtracts from the current time. |
| 397 | * @tv: pointer to the timespec variable containing the offset |
| 398 | * |
| 399 | * Adds or subtracts an offset value from the current time. |
| 400 | */ |
| 401 | int timekeeping_inject_offset(struct timespec *ts) |
| 402 | { |
| 403 | unsigned long flags; |
| 404 | |
| 405 | if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC) |
| 406 | return -EINVAL; |
| 407 | |
| 408 | write_seqlock_irqsave(&xtime_lock, flags); |
| 409 | |
| 410 | timekeeping_forward_now(); |
| 411 | |
| 412 | xtime = timespec_add(xtime, *ts); |
| 413 | wall_to_monotonic = timespec_sub(wall_to_monotonic, *ts); |
| 414 | |
| 415 | timekeeper.ntp_error = 0; |
| 416 | ntp_clear(); |
| 417 | |
| 418 | update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock, |
| 419 | timekeeper.mult); |
| 420 | |
| 421 | write_sequnlock_irqrestore(&xtime_lock, flags); |
| 422 | |
| 423 | /* signal hrtimers about time change */ |
| 424 | clock_was_set(); |
| 425 | |
| 426 | return 0; |
| 427 | } |
| 428 | EXPORT_SYMBOL(timekeeping_inject_offset); |
| 429 | |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 430 | /** |
| 431 | * change_clocksource - Swaps clocksources if a new one is available |
| 432 | * |
| 433 | * Accumulates current time interval and initializes new clocksource |
| 434 | */ |
Martin Schwidefsky | 75c5158 | 2009-08-14 15:47:30 +0200 | [diff] [blame] | 435 | static int change_clocksource(void *data) |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 436 | { |
Magnus Damm | 4614e6a | 2009-04-21 12:24:02 -0700 | [diff] [blame] | 437 | struct clocksource *new, *old; |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 438 | |
Martin Schwidefsky | 75c5158 | 2009-08-14 15:47:30 +0200 | [diff] [blame] | 439 | new = (struct clocksource *) data; |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 440 | |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 441 | timekeeping_forward_now(); |
Martin Schwidefsky | 75c5158 | 2009-08-14 15:47:30 +0200 | [diff] [blame] | 442 | if (!new->enable || new->enable(new) == 0) { |
| 443 | old = timekeeper.clock; |
| 444 | timekeeper_setup_internals(new); |
| 445 | if (old->disable) |
| 446 | old->disable(old); |
| 447 | } |
| 448 | return 0; |
| 449 | } |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 450 | |
Martin Schwidefsky | 75c5158 | 2009-08-14 15:47:30 +0200 | [diff] [blame] | 451 | /** |
| 452 | * timekeeping_notify - Install a new clock source |
| 453 | * @clock: pointer to the clock source |
| 454 | * |
| 455 | * This function is called from clocksource.c after a new, better clock |
| 456 | * source has been registered. The caller holds the clocksource_mutex. |
| 457 | */ |
| 458 | void timekeeping_notify(struct clocksource *clock) |
| 459 | { |
| 460 | if (timekeeper.clock == clock) |
Magnus Damm | 4614e6a | 2009-04-21 12:24:02 -0700 | [diff] [blame] | 461 | return; |
Martin Schwidefsky | 75c5158 | 2009-08-14 15:47:30 +0200 | [diff] [blame] | 462 | stop_machine(change_clocksource, clock, NULL); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 463 | tick_clock_notify(); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 464 | } |
Martin Schwidefsky | 75c5158 | 2009-08-14 15:47:30 +0200 | [diff] [blame] | 465 | |
Thomas Gleixner | a40f262 | 2009-07-07 13:00:31 +0200 | [diff] [blame] | 466 | /** |
| 467 | * ktime_get_real - get the real (wall-) time in ktime_t format |
| 468 | * |
| 469 | * returns the time in ktime_t format |
| 470 | */ |
| 471 | ktime_t ktime_get_real(void) |
| 472 | { |
| 473 | struct timespec now; |
| 474 | |
| 475 | getnstimeofday(&now); |
| 476 | |
| 477 | return timespec_to_ktime(now); |
| 478 | } |
| 479 | EXPORT_SYMBOL_GPL(ktime_get_real); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 480 | |
| 481 | /** |
John Stultz | 2d42244 | 2008-08-20 16:37:30 -0700 | [diff] [blame] | 482 | * getrawmonotonic - Returns the raw monotonic time in a timespec |
| 483 | * @ts: pointer to the timespec to be set |
| 484 | * |
| 485 | * Returns the raw monotonic time (completely un-modified by ntp) |
| 486 | */ |
| 487 | void getrawmonotonic(struct timespec *ts) |
| 488 | { |
| 489 | unsigned long seq; |
| 490 | s64 nsecs; |
John Stultz | 2d42244 | 2008-08-20 16:37:30 -0700 | [diff] [blame] | 491 | |
| 492 | do { |
| 493 | seq = read_seqbegin(&xtime_lock); |
Martin Schwidefsky | 2ba2a30 | 2009-08-14 15:47:29 +0200 | [diff] [blame] | 494 | nsecs = timekeeping_get_ns_raw(); |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 495 | *ts = raw_time; |
John Stultz | 2d42244 | 2008-08-20 16:37:30 -0700 | [diff] [blame] | 496 | |
| 497 | } while (read_seqretry(&xtime_lock, seq)); |
| 498 | |
| 499 | timespec_add_ns(ts, nsecs); |
| 500 | } |
| 501 | EXPORT_SYMBOL(getrawmonotonic); |
| 502 | |
| 503 | |
| 504 | /** |
Li Zefan | cf4fc6c | 2008-02-08 04:19:24 -0800 | [diff] [blame] | 505 | * timekeeping_valid_for_hres - Check if timekeeping is suitable for hres |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 506 | */ |
Li Zefan | cf4fc6c | 2008-02-08 04:19:24 -0800 | [diff] [blame] | 507 | int timekeeping_valid_for_hres(void) |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 508 | { |
| 509 | unsigned long seq; |
| 510 | int ret; |
| 511 | |
| 512 | do { |
| 513 | seq = read_seqbegin(&xtime_lock); |
| 514 | |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 515 | ret = timekeeper.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES; |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 516 | |
| 517 | } while (read_seqretry(&xtime_lock, seq)); |
| 518 | |
| 519 | return ret; |
| 520 | } |
| 521 | |
| 522 | /** |
Jon Hunter | 9896246 | 2009-08-18 12:45:10 -0500 | [diff] [blame] | 523 | * timekeeping_max_deferment - Returns max time the clocksource can be deferred |
| 524 | * |
| 525 | * Caller must observe xtime_lock via read_seqbegin/read_seqretry to |
| 526 | * ensure that the clocksource does not change! |
| 527 | */ |
| 528 | u64 timekeeping_max_deferment(void) |
| 529 | { |
| 530 | return timekeeper.clock->max_idle_ns; |
| 531 | } |
| 532 | |
| 533 | /** |
Martin Schwidefsky | d4f587c | 2009-08-14 15:47:31 +0200 | [diff] [blame] | 534 | * read_persistent_clock - Return time from the persistent clock. |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 535 | * |
| 536 | * Weak dummy function for arches that do not yet support it. |
Martin Schwidefsky | d4f587c | 2009-08-14 15:47:31 +0200 | [diff] [blame] | 537 | * Reads the time from the battery backed persistent clock. |
| 538 | * Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported. |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 539 | * |
| 540 | * XXX - Do be sure to remove it once all arches implement it. |
| 541 | */ |
Martin Schwidefsky | d4f587c | 2009-08-14 15:47:31 +0200 | [diff] [blame] | 542 | void __attribute__((weak)) read_persistent_clock(struct timespec *ts) |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 543 | { |
Martin Schwidefsky | d4f587c | 2009-08-14 15:47:31 +0200 | [diff] [blame] | 544 | ts->tv_sec = 0; |
| 545 | ts->tv_nsec = 0; |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 546 | } |
| 547 | |
Martin Schwidefsky | 23970e3 | 2009-08-14 15:47:32 +0200 | [diff] [blame] | 548 | /** |
| 549 | * read_boot_clock - Return time of the system start. |
| 550 | * |
| 551 | * Weak dummy function for arches that do not yet support it. |
| 552 | * Function to read the exact time the system has been started. |
| 553 | * Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported. |
| 554 | * |
| 555 | * XXX - Do be sure to remove it once all arches implement it. |
| 556 | */ |
| 557 | void __attribute__((weak)) read_boot_clock(struct timespec *ts) |
| 558 | { |
| 559 | ts->tv_sec = 0; |
| 560 | ts->tv_nsec = 0; |
| 561 | } |
| 562 | |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 563 | /* |
| 564 | * timekeeping_init - Initializes the clocksource and common timekeeping values |
| 565 | */ |
| 566 | void __init timekeeping_init(void) |
| 567 | { |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 568 | struct clocksource *clock; |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 569 | unsigned long flags; |
Martin Schwidefsky | 23970e3 | 2009-08-14 15:47:32 +0200 | [diff] [blame] | 570 | struct timespec now, boot; |
Martin Schwidefsky | d4f587c | 2009-08-14 15:47:31 +0200 | [diff] [blame] | 571 | |
| 572 | read_persistent_clock(&now); |
Martin Schwidefsky | 23970e3 | 2009-08-14 15:47:32 +0200 | [diff] [blame] | 573 | read_boot_clock(&boot); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 574 | |
| 575 | write_seqlock_irqsave(&xtime_lock, flags); |
| 576 | |
Roman Zippel | 7dffa3c | 2008-05-01 04:34:41 -0700 | [diff] [blame] | 577 | ntp_init(); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 578 | |
Martin Schwidefsky | f1b8274 | 2009-08-14 15:47:21 +0200 | [diff] [blame] | 579 | clock = clocksource_default_clock(); |
Martin Schwidefsky | a0f7d48 | 2009-08-14 15:47:19 +0200 | [diff] [blame] | 580 | if (clock->enable) |
| 581 | clock->enable(clock); |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 582 | timekeeper_setup_internals(clock); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 583 | |
Martin Schwidefsky | d4f587c | 2009-08-14 15:47:31 +0200 | [diff] [blame] | 584 | xtime.tv_sec = now.tv_sec; |
| 585 | xtime.tv_nsec = now.tv_nsec; |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 586 | raw_time.tv_sec = 0; |
| 587 | raw_time.tv_nsec = 0; |
Martin Schwidefsky | 23970e3 | 2009-08-14 15:47:32 +0200 | [diff] [blame] | 588 | if (boot.tv_sec == 0 && boot.tv_nsec == 0) { |
| 589 | boot.tv_sec = xtime.tv_sec; |
| 590 | boot.tv_nsec = xtime.tv_nsec; |
| 591 | } |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 592 | set_normalized_timespec(&wall_to_monotonic, |
Martin Schwidefsky | 23970e3 | 2009-08-14 15:47:32 +0200 | [diff] [blame] | 593 | -boot.tv_sec, -boot.tv_nsec); |
Martin Schwidefsky | d4f587c | 2009-08-14 15:47:31 +0200 | [diff] [blame] | 594 | total_sleep_time.tv_sec = 0; |
| 595 | total_sleep_time.tv_nsec = 0; |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 596 | write_sequnlock_irqrestore(&xtime_lock, flags); |
| 597 | } |
| 598 | |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 599 | /* time in seconds when suspend began */ |
Martin Schwidefsky | d4f587c | 2009-08-14 15:47:31 +0200 | [diff] [blame] | 600 | static struct timespec timekeeping_suspend_time; |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 601 | |
| 602 | /** |
John Stultz | 304529b | 2011-04-01 14:32:09 -0700 | [diff] [blame] | 603 | * __timekeeping_inject_sleeptime - Internal function to add sleep interval |
| 604 | * @delta: pointer to a timespec delta value |
| 605 | * |
| 606 | * Takes a timespec offset measuring a suspend interval and properly |
| 607 | * adds the sleep offset to the timekeeping variables. |
| 608 | */ |
| 609 | static void __timekeeping_inject_sleeptime(struct timespec *delta) |
| 610 | { |
| 611 | xtime = timespec_add(xtime, *delta); |
| 612 | wall_to_monotonic = timespec_sub(wall_to_monotonic, *delta); |
| 613 | total_sleep_time = timespec_add(total_sleep_time, *delta); |
| 614 | } |
| 615 | |
| 616 | |
| 617 | /** |
| 618 | * timekeeping_inject_sleeptime - Adds suspend interval to timeekeeping values |
| 619 | * @delta: pointer to a timespec delta value |
| 620 | * |
| 621 | * This hook is for architectures that cannot support read_persistent_clock |
| 622 | * because their RTC/persistent clock is only accessible when irqs are enabled. |
| 623 | * |
| 624 | * This function should only be called by rtc_resume(), and allows |
| 625 | * a suspend offset to be injected into the timekeeping values. |
| 626 | */ |
| 627 | void timekeeping_inject_sleeptime(struct timespec *delta) |
| 628 | { |
| 629 | unsigned long flags; |
| 630 | struct timespec ts; |
| 631 | |
| 632 | /* Make sure we don't set the clock twice */ |
| 633 | read_persistent_clock(&ts); |
| 634 | if (!(ts.tv_sec == 0 && ts.tv_nsec == 0)) |
| 635 | return; |
| 636 | |
| 637 | write_seqlock_irqsave(&xtime_lock, flags); |
| 638 | timekeeping_forward_now(); |
| 639 | |
| 640 | __timekeeping_inject_sleeptime(delta); |
| 641 | |
| 642 | timekeeper.ntp_error = 0; |
| 643 | ntp_clear(); |
| 644 | update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock, |
| 645 | timekeeper.mult); |
| 646 | |
| 647 | write_sequnlock_irqrestore(&xtime_lock, flags); |
| 648 | |
| 649 | /* signal hrtimers about time change */ |
| 650 | clock_was_set(); |
| 651 | } |
| 652 | |
| 653 | |
| 654 | /** |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 655 | * timekeeping_resume - Resumes the generic timekeeping subsystem. |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 656 | * |
| 657 | * This is for the generic clocksource timekeeping. |
| 658 | * xtime/wall_to_monotonic/jiffies/etc are |
| 659 | * still managed by arch specific suspend/resume code. |
| 660 | */ |
Rafael J. Wysocki | e1a85b2 | 2011-03-23 22:16:04 +0100 | [diff] [blame] | 661 | static void timekeeping_resume(void) |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 662 | { |
| 663 | unsigned long flags; |
Martin Schwidefsky | d4f587c | 2009-08-14 15:47:31 +0200 | [diff] [blame] | 664 | struct timespec ts; |
| 665 | |
| 666 | read_persistent_clock(&ts); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 667 | |
Thomas Gleixner | d10ff3f | 2007-05-14 11:10:02 +0200 | [diff] [blame] | 668 | clocksource_resume(); |
| 669 | |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 670 | write_seqlock_irqsave(&xtime_lock, flags); |
| 671 | |
Martin Schwidefsky | d4f587c | 2009-08-14 15:47:31 +0200 | [diff] [blame] | 672 | if (timespec_compare(&ts, &timekeeping_suspend_time) > 0) { |
| 673 | ts = timespec_sub(ts, timekeeping_suspend_time); |
John Stultz | 304529b | 2011-04-01 14:32:09 -0700 | [diff] [blame] | 674 | __timekeeping_inject_sleeptime(&ts); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 675 | } |
| 676 | /* re-base the last cycle value */ |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 677 | timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock); |
| 678 | timekeeper.ntp_error = 0; |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 679 | timekeeping_suspended = 0; |
| 680 | write_sequnlock_irqrestore(&xtime_lock, flags); |
| 681 | |
| 682 | touch_softlockup_watchdog(); |
| 683 | |
| 684 | clockevents_notify(CLOCK_EVT_NOTIFY_RESUME, NULL); |
| 685 | |
| 686 | /* Resume hrtimers */ |
Thomas Gleixner | b12a03c | 2011-05-02 16:48:57 +0200 | [diff] [blame] | 687 | hrtimers_resume(); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 688 | } |
| 689 | |
Rafael J. Wysocki | e1a85b2 | 2011-03-23 22:16:04 +0100 | [diff] [blame] | 690 | static int timekeeping_suspend(void) |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 691 | { |
| 692 | unsigned long flags; |
| 693 | |
Martin Schwidefsky | d4f587c | 2009-08-14 15:47:31 +0200 | [diff] [blame] | 694 | read_persistent_clock(&timekeeping_suspend_time); |
Thomas Gleixner | 3be9095 | 2007-09-16 15:36:43 +0200 | [diff] [blame] | 695 | |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 696 | write_seqlock_irqsave(&xtime_lock, flags); |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 697 | timekeeping_forward_now(); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 698 | timekeeping_suspended = 1; |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 699 | write_sequnlock_irqrestore(&xtime_lock, flags); |
| 700 | |
| 701 | clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL); |
Magnus Damm | c54a42b | 2010-02-02 14:41:41 -0800 | [diff] [blame] | 702 | clocksource_suspend(); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 703 | |
| 704 | return 0; |
| 705 | } |
| 706 | |
| 707 | /* sysfs resume/suspend bits for timekeeping */ |
Rafael J. Wysocki | e1a85b2 | 2011-03-23 22:16:04 +0100 | [diff] [blame] | 708 | static struct syscore_ops timekeeping_syscore_ops = { |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 709 | .resume = timekeeping_resume, |
| 710 | .suspend = timekeeping_suspend, |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 711 | }; |
| 712 | |
Rafael J. Wysocki | e1a85b2 | 2011-03-23 22:16:04 +0100 | [diff] [blame] | 713 | static int __init timekeeping_init_ops(void) |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 714 | { |
Rafael J. Wysocki | e1a85b2 | 2011-03-23 22:16:04 +0100 | [diff] [blame] | 715 | register_syscore_ops(&timekeeping_syscore_ops); |
| 716 | return 0; |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 717 | } |
| 718 | |
Rafael J. Wysocki | e1a85b2 | 2011-03-23 22:16:04 +0100 | [diff] [blame] | 719 | device_initcall(timekeeping_init_ops); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 720 | |
| 721 | /* |
| 722 | * If the error is already larger, we look ahead even further |
| 723 | * to compensate for late or lost adjustments. |
| 724 | */ |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 725 | static __always_inline int timekeeping_bigadjust(s64 error, s64 *interval, |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 726 | s64 *offset) |
| 727 | { |
| 728 | s64 tick_error, i; |
| 729 | u32 look_ahead, adj; |
| 730 | s32 error2, mult; |
| 731 | |
| 732 | /* |
| 733 | * Use the current error value to determine how much to look ahead. |
| 734 | * The larger the error the slower we adjust for it to avoid problems |
| 735 | * with losing too many ticks, otherwise we would overadjust and |
| 736 | * produce an even larger error. The smaller the adjustment the |
| 737 | * faster we try to adjust for it, as lost ticks can do less harm |
Li Zefan | 3eb0567 | 2008-02-08 04:19:25 -0800 | [diff] [blame] | 738 | * here. This is tuned so that an error of about 1 msec is adjusted |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 739 | * within about 1 sec (or 2^20 nsec in 2^SHIFT_HZ ticks). |
| 740 | */ |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 741 | error2 = timekeeper.ntp_error >> (NTP_SCALE_SHIFT + 22 - 2 * SHIFT_HZ); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 742 | error2 = abs(error2); |
| 743 | for (look_ahead = 0; error2 > 0; look_ahead++) |
| 744 | error2 >>= 2; |
| 745 | |
| 746 | /* |
| 747 | * Now calculate the error in (1 << look_ahead) ticks, but first |
| 748 | * remove the single look ahead already included in the error. |
| 749 | */ |
Martin Schwidefsky | 23ce721 | 2009-08-14 15:47:27 +0200 | [diff] [blame] | 750 | tick_error = tick_length >> (timekeeper.ntp_error_shift + 1); |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 751 | tick_error -= timekeeper.xtime_interval >> 1; |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 752 | error = ((error - tick_error) >> look_ahead) + tick_error; |
| 753 | |
| 754 | /* Finally calculate the adjustment shift value. */ |
| 755 | i = *interval; |
| 756 | mult = 1; |
| 757 | if (error < 0) { |
| 758 | error = -error; |
| 759 | *interval = -*interval; |
| 760 | *offset = -*offset; |
| 761 | mult = -1; |
| 762 | } |
| 763 | for (adj = 0; error > i; adj++) |
| 764 | error >>= 1; |
| 765 | |
| 766 | *interval <<= adj; |
| 767 | *offset <<= adj; |
| 768 | return mult << adj; |
| 769 | } |
| 770 | |
| 771 | /* |
| 772 | * Adjust the multiplier to reduce the error value, |
| 773 | * this is optimized for the most common adjustments of -1,0,1, |
| 774 | * for other values we can do a bit more work. |
| 775 | */ |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 776 | static void timekeeping_adjust(s64 offset) |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 777 | { |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 778 | s64 error, interval = timekeeper.cycle_interval; |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 779 | int adj; |
| 780 | |
Martin Schwidefsky | 23ce721 | 2009-08-14 15:47:27 +0200 | [diff] [blame] | 781 | error = timekeeper.ntp_error >> (timekeeper.ntp_error_shift - 1); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 782 | if (error > interval) { |
| 783 | error >>= 2; |
| 784 | if (likely(error <= interval)) |
| 785 | adj = 1; |
| 786 | else |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 787 | adj = timekeeping_bigadjust(error, &interval, &offset); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 788 | } else if (error < -interval) { |
| 789 | error >>= 2; |
| 790 | if (likely(error >= -interval)) { |
| 791 | adj = -1; |
| 792 | interval = -interval; |
| 793 | offset = -offset; |
| 794 | } else |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 795 | adj = timekeeping_bigadjust(error, &interval, &offset); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 796 | } else |
| 797 | return; |
| 798 | |
Martin Schwidefsky | 0a54419 | 2009-08-14 15:47:28 +0200 | [diff] [blame] | 799 | timekeeper.mult += adj; |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 800 | timekeeper.xtime_interval += interval; |
| 801 | timekeeper.xtime_nsec -= offset; |
| 802 | timekeeper.ntp_error -= (interval - offset) << |
Martin Schwidefsky | 23ce721 | 2009-08-14 15:47:27 +0200 | [diff] [blame] | 803 | timekeeper.ntp_error_shift; |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 804 | } |
| 805 | |
Linus Torvalds | 83f57a1 | 2009-12-22 14:10:37 -0800 | [diff] [blame] | 806 | |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 807 | /** |
john stultz | a092ff0 | 2009-10-02 16:17:53 -0700 | [diff] [blame] | 808 | * logarithmic_accumulation - shifted accumulation of cycles |
| 809 | * |
| 810 | * This functions accumulates a shifted interval of cycles into |
| 811 | * into a shifted interval nanoseconds. Allows for O(log) accumulation |
| 812 | * loop. |
| 813 | * |
| 814 | * Returns the unconsumed cycles. |
| 815 | */ |
| 816 | static cycle_t logarithmic_accumulation(cycle_t offset, int shift) |
| 817 | { |
| 818 | u64 nsecps = (u64)NSEC_PER_SEC << timekeeper.shift; |
Jason Wessel | deda2e8 | 2010-08-09 14:20:09 -0700 | [diff] [blame] | 819 | u64 raw_nsecs; |
john stultz | a092ff0 | 2009-10-02 16:17:53 -0700 | [diff] [blame] | 820 | |
| 821 | /* If the offset is smaller then a shifted interval, do nothing */ |
| 822 | if (offset < timekeeper.cycle_interval<<shift) |
| 823 | return offset; |
| 824 | |
| 825 | /* Accumulate one shifted interval */ |
| 826 | offset -= timekeeper.cycle_interval << shift; |
| 827 | timekeeper.clock->cycle_last += timekeeper.cycle_interval << shift; |
| 828 | |
| 829 | timekeeper.xtime_nsec += timekeeper.xtime_interval << shift; |
| 830 | while (timekeeper.xtime_nsec >= nsecps) { |
| 831 | timekeeper.xtime_nsec -= nsecps; |
| 832 | xtime.tv_sec++; |
| 833 | second_overflow(); |
| 834 | } |
| 835 | |
Jason Wessel | deda2e8 | 2010-08-09 14:20:09 -0700 | [diff] [blame] | 836 | /* Accumulate raw time */ |
| 837 | raw_nsecs = timekeeper.raw_interval << shift; |
| 838 | raw_nsecs += raw_time.tv_nsec; |
John Stultz | c7dcf87 | 2010-08-13 11:30:58 -0700 | [diff] [blame] | 839 | if (raw_nsecs >= NSEC_PER_SEC) { |
| 840 | u64 raw_secs = raw_nsecs; |
| 841 | raw_nsecs = do_div(raw_secs, NSEC_PER_SEC); |
| 842 | raw_time.tv_sec += raw_secs; |
john stultz | a092ff0 | 2009-10-02 16:17:53 -0700 | [diff] [blame] | 843 | } |
Jason Wessel | deda2e8 | 2010-08-09 14:20:09 -0700 | [diff] [blame] | 844 | raw_time.tv_nsec = raw_nsecs; |
john stultz | a092ff0 | 2009-10-02 16:17:53 -0700 | [diff] [blame] | 845 | |
| 846 | /* Accumulate error between NTP and clock interval */ |
| 847 | timekeeper.ntp_error += tick_length << shift; |
Kasper Pedersen | a386b5a | 2010-10-20 15:55:15 -0700 | [diff] [blame] | 848 | timekeeper.ntp_error -= |
| 849 | (timekeeper.xtime_interval + timekeeper.xtime_remainder) << |
john stultz | a092ff0 | 2009-10-02 16:17:53 -0700 | [diff] [blame] | 850 | (timekeeper.ntp_error_shift + shift); |
| 851 | |
| 852 | return offset; |
| 853 | } |
| 854 | |
Linus Torvalds | 83f57a1 | 2009-12-22 14:10:37 -0800 | [diff] [blame] | 855 | |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 856 | /** |
| 857 | * update_wall_time - Uses the current clocksource to increment the wall time |
| 858 | * |
| 859 | * Called from the timer interrupt, must hold a write on xtime_lock. |
| 860 | */ |
Torben Hohn | 871cf1e | 2011-01-27 15:58:55 +0100 | [diff] [blame] | 861 | static void update_wall_time(void) |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 862 | { |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 863 | struct clocksource *clock; |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 864 | cycle_t offset; |
john stultz | a092ff0 | 2009-10-02 16:17:53 -0700 | [diff] [blame] | 865 | int shift = 0, maxshift; |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 866 | |
| 867 | /* Make sure we're fully resumed: */ |
| 868 | if (unlikely(timekeeping_suspended)) |
| 869 | return; |
| 870 | |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 871 | clock = timekeeper.clock; |
John Stultz | 592913e | 2010-07-13 17:56:20 -0700 | [diff] [blame] | 872 | |
| 873 | #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 874 | offset = timekeeper.cycle_interval; |
John Stultz | 592913e | 2010-07-13 17:56:20 -0700 | [diff] [blame] | 875 | #else |
| 876 | offset = (clock->read(clock) - clock->cycle_last) & clock->mask; |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 877 | #endif |
Martin Schwidefsky | 23ce721 | 2009-08-14 15:47:27 +0200 | [diff] [blame] | 878 | timekeeper.xtime_nsec = (s64)xtime.tv_nsec << timekeeper.shift; |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 879 | |
john stultz | a092ff0 | 2009-10-02 16:17:53 -0700 | [diff] [blame] | 880 | /* |
| 881 | * With NO_HZ we may have to accumulate many cycle_intervals |
| 882 | * (think "ticks") worth of time at once. To do this efficiently, |
| 883 | * we calculate the largest doubling multiple of cycle_intervals |
| 884 | * that is smaller then the offset. We then accumulate that |
| 885 | * chunk in one go, and then try to consume the next smaller |
| 886 | * doubled multiple. |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 887 | */ |
john stultz | a092ff0 | 2009-10-02 16:17:53 -0700 | [diff] [blame] | 888 | shift = ilog2(offset) - ilog2(timekeeper.cycle_interval); |
| 889 | shift = max(0, shift); |
| 890 | /* Bound shift to one less then what overflows tick_length */ |
| 891 | maxshift = (8*sizeof(tick_length) - (ilog2(tick_length)+1)) - 1; |
| 892 | shift = min(shift, maxshift); |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 893 | while (offset >= timekeeper.cycle_interval) { |
john stultz | a092ff0 | 2009-10-02 16:17:53 -0700 | [diff] [blame] | 894 | offset = logarithmic_accumulation(offset, shift); |
John Stultz | 830ec04 | 2010-03-18 14:47:30 -0700 | [diff] [blame] | 895 | if(offset < timekeeper.cycle_interval<<shift) |
| 896 | shift--; |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 897 | } |
| 898 | |
| 899 | /* correct the clock when NTP error is too big */ |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 900 | timekeeping_adjust(offset); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 901 | |
john stultz | 6c9bacb | 2008-12-01 18:34:41 -0800 | [diff] [blame] | 902 | /* |
| 903 | * Since in the loop above, we accumulate any amount of time |
| 904 | * in xtime_nsec over a second into xtime.tv_sec, its possible for |
| 905 | * xtime_nsec to be fairly small after the loop. Further, if we're |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 906 | * slightly speeding the clocksource up in timekeeping_adjust(), |
john stultz | 6c9bacb | 2008-12-01 18:34:41 -0800 | [diff] [blame] | 907 | * its possible the required corrective factor to xtime_nsec could |
| 908 | * cause it to underflow. |
| 909 | * |
| 910 | * Now, we cannot simply roll the accumulated second back, since |
| 911 | * the NTP subsystem has been notified via second_overflow. So |
| 912 | * instead we push xtime_nsec forward by the amount we underflowed, |
| 913 | * and add that amount into the error. |
| 914 | * |
| 915 | * We'll correct this error next time through this function, when |
| 916 | * xtime_nsec is not as small. |
| 917 | */ |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 918 | if (unlikely((s64)timekeeper.xtime_nsec < 0)) { |
| 919 | s64 neg = -(s64)timekeeper.xtime_nsec; |
| 920 | timekeeper.xtime_nsec = 0; |
Martin Schwidefsky | 23ce721 | 2009-08-14 15:47:27 +0200 | [diff] [blame] | 921 | timekeeper.ntp_error += neg << timekeeper.ntp_error_shift; |
john stultz | 6c9bacb | 2008-12-01 18:34:41 -0800 | [diff] [blame] | 922 | } |
| 923 | |
John Stultz | 6a867a3 | 2010-04-06 14:30:51 -0700 | [diff] [blame] | 924 | |
| 925 | /* |
| 926 | * Store full nanoseconds into xtime after rounding it up and |
Roman Zippel | 5cd1c9c | 2008-09-22 14:42:43 -0700 | [diff] [blame] | 927 | * add the remainder to the error difference. |
| 928 | */ |
Martin Schwidefsky | 23ce721 | 2009-08-14 15:47:27 +0200 | [diff] [blame] | 929 | xtime.tv_nsec = ((s64) timekeeper.xtime_nsec >> timekeeper.shift) + 1; |
| 930 | timekeeper.xtime_nsec -= (s64) xtime.tv_nsec << timekeeper.shift; |
| 931 | timekeeper.ntp_error += timekeeper.xtime_nsec << |
| 932 | timekeeper.ntp_error_shift; |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 933 | |
John Stultz | 6a867a3 | 2010-04-06 14:30:51 -0700 | [diff] [blame] | 934 | /* |
| 935 | * Finally, make sure that after the rounding |
| 936 | * xtime.tv_nsec isn't larger then NSEC_PER_SEC |
| 937 | */ |
| 938 | if (unlikely(xtime.tv_nsec >= NSEC_PER_SEC)) { |
| 939 | xtime.tv_nsec -= NSEC_PER_SEC; |
| 940 | xtime.tv_sec++; |
| 941 | second_overflow(); |
| 942 | } |
Linus Torvalds | 83f57a1 | 2009-12-22 14:10:37 -0800 | [diff] [blame] | 943 | |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 944 | /* check to see if there is a new clocksource to use */ |
John Stultz | 7615856 | 2010-07-13 17:56:23 -0700 | [diff] [blame] | 945 | update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock, |
| 946 | timekeeper.mult); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 947 | } |
Tomas Janousek | 7c3f1a5 | 2007-07-15 23:39:41 -0700 | [diff] [blame] | 948 | |
| 949 | /** |
| 950 | * getboottime - Return the real time of system boot. |
| 951 | * @ts: pointer to the timespec to be set |
| 952 | * |
John Stultz | abb3a4e | 2011-02-14 17:52:09 -0800 | [diff] [blame] | 953 | * Returns the wall-time of boot in a timespec. |
Tomas Janousek | 7c3f1a5 | 2007-07-15 23:39:41 -0700 | [diff] [blame] | 954 | * |
| 955 | * This is based on the wall_to_monotonic offset and the total suspend |
| 956 | * time. Calls to settimeofday will affect the value returned (which |
| 957 | * basically means that however wrong your real time clock is at boot time, |
| 958 | * you get the right time here). |
| 959 | */ |
| 960 | void getboottime(struct timespec *ts) |
| 961 | { |
Hiroshi Shimamoto | 36d4748 | 2009-08-25 15:08:30 +0900 | [diff] [blame] | 962 | struct timespec boottime = { |
| 963 | .tv_sec = wall_to_monotonic.tv_sec + total_sleep_time.tv_sec, |
| 964 | .tv_nsec = wall_to_monotonic.tv_nsec + total_sleep_time.tv_nsec |
| 965 | }; |
Martin Schwidefsky | d4f587c | 2009-08-14 15:47:31 +0200 | [diff] [blame] | 966 | |
Martin Schwidefsky | d4f587c | 2009-08-14 15:47:31 +0200 | [diff] [blame] | 967 | set_normalized_timespec(ts, -boottime.tv_sec, -boottime.tv_nsec); |
Tomas Janousek | 7c3f1a5 | 2007-07-15 23:39:41 -0700 | [diff] [blame] | 968 | } |
Jason Wang | c93d89f | 2010-01-27 19:13:40 +0800 | [diff] [blame] | 969 | EXPORT_SYMBOL_GPL(getboottime); |
Tomas Janousek | 7c3f1a5 | 2007-07-15 23:39:41 -0700 | [diff] [blame] | 970 | |
John Stultz | abb3a4e | 2011-02-14 17:52:09 -0800 | [diff] [blame] | 971 | |
| 972 | /** |
| 973 | * get_monotonic_boottime - Returns monotonic time since boot |
| 974 | * @ts: pointer to the timespec to be set |
| 975 | * |
| 976 | * Returns the monotonic time since boot in a timespec. |
| 977 | * |
| 978 | * This is similar to CLOCK_MONTONIC/ktime_get_ts, but also |
| 979 | * includes the time spent in suspend. |
| 980 | */ |
| 981 | void get_monotonic_boottime(struct timespec *ts) |
| 982 | { |
| 983 | struct timespec tomono, sleep; |
| 984 | unsigned int seq; |
| 985 | s64 nsecs; |
| 986 | |
| 987 | WARN_ON(timekeeping_suspended); |
| 988 | |
| 989 | do { |
| 990 | seq = read_seqbegin(&xtime_lock); |
| 991 | *ts = xtime; |
| 992 | tomono = wall_to_monotonic; |
| 993 | sleep = total_sleep_time; |
| 994 | nsecs = timekeeping_get_ns(); |
| 995 | |
| 996 | } while (read_seqretry(&xtime_lock, seq)); |
| 997 | |
| 998 | set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec + sleep.tv_sec, |
| 999 | ts->tv_nsec + tomono.tv_nsec + sleep.tv_nsec + nsecs); |
| 1000 | } |
| 1001 | EXPORT_SYMBOL_GPL(get_monotonic_boottime); |
| 1002 | |
| 1003 | /** |
| 1004 | * ktime_get_boottime - Returns monotonic time since boot in a ktime |
| 1005 | * |
| 1006 | * Returns the monotonic time since boot in a ktime |
| 1007 | * |
| 1008 | * This is similar to CLOCK_MONTONIC/ktime_get, but also |
| 1009 | * includes the time spent in suspend. |
| 1010 | */ |
| 1011 | ktime_t ktime_get_boottime(void) |
| 1012 | { |
| 1013 | struct timespec ts; |
| 1014 | |
| 1015 | get_monotonic_boottime(&ts); |
| 1016 | return timespec_to_ktime(ts); |
| 1017 | } |
| 1018 | EXPORT_SYMBOL_GPL(ktime_get_boottime); |
| 1019 | |
Tomas Janousek | 7c3f1a5 | 2007-07-15 23:39:41 -0700 | [diff] [blame] | 1020 | /** |
| 1021 | * monotonic_to_bootbased - Convert the monotonic time to boot based. |
| 1022 | * @ts: pointer to the timespec to be converted |
| 1023 | */ |
| 1024 | void monotonic_to_bootbased(struct timespec *ts) |
| 1025 | { |
John Stultz | ce3bf7a | 2010-07-13 17:56:19 -0700 | [diff] [blame] | 1026 | *ts = timespec_add(*ts, total_sleep_time); |
Tomas Janousek | 7c3f1a5 | 2007-07-15 23:39:41 -0700 | [diff] [blame] | 1027 | } |
Jason Wang | c93d89f | 2010-01-27 19:13:40 +0800 | [diff] [blame] | 1028 | EXPORT_SYMBOL_GPL(monotonic_to_bootbased); |
john stultz | 2c6b47d | 2007-07-24 17:47:43 -0700 | [diff] [blame] | 1029 | |
john stultz | 17c38b7 | 2007-07-24 18:38:34 -0700 | [diff] [blame] | 1030 | unsigned long get_seconds(void) |
| 1031 | { |
John Stultz | 6a867a3 | 2010-04-06 14:30:51 -0700 | [diff] [blame] | 1032 | return xtime.tv_sec; |
john stultz | 17c38b7 | 2007-07-24 18:38:34 -0700 | [diff] [blame] | 1033 | } |
| 1034 | EXPORT_SYMBOL(get_seconds); |
| 1035 | |
john stultz | da15cfd | 2009-08-19 19:13:34 -0700 | [diff] [blame] | 1036 | struct timespec __current_kernel_time(void) |
| 1037 | { |
John Stultz | 6a867a3 | 2010-04-06 14:30:51 -0700 | [diff] [blame] | 1038 | return xtime; |
john stultz | da15cfd | 2009-08-19 19:13:34 -0700 | [diff] [blame] | 1039 | } |
john stultz | 17c38b7 | 2007-07-24 18:38:34 -0700 | [diff] [blame] | 1040 | |
john stultz | 2c6b47d | 2007-07-24 17:47:43 -0700 | [diff] [blame] | 1041 | struct timespec current_kernel_time(void) |
| 1042 | { |
| 1043 | struct timespec now; |
| 1044 | unsigned long seq; |
| 1045 | |
| 1046 | do { |
| 1047 | seq = read_seqbegin(&xtime_lock); |
Linus Torvalds | 83f57a1 | 2009-12-22 14:10:37 -0800 | [diff] [blame] | 1048 | |
John Stultz | 6a867a3 | 2010-04-06 14:30:51 -0700 | [diff] [blame] | 1049 | now = xtime; |
john stultz | 2c6b47d | 2007-07-24 17:47:43 -0700 | [diff] [blame] | 1050 | } while (read_seqretry(&xtime_lock, seq)); |
| 1051 | |
| 1052 | return now; |
| 1053 | } |
john stultz | 2c6b47d | 2007-07-24 17:47:43 -0700 | [diff] [blame] | 1054 | EXPORT_SYMBOL(current_kernel_time); |
john stultz | da15cfd | 2009-08-19 19:13:34 -0700 | [diff] [blame] | 1055 | |
| 1056 | struct timespec get_monotonic_coarse(void) |
| 1057 | { |
| 1058 | struct timespec now, mono; |
| 1059 | unsigned long seq; |
| 1060 | |
| 1061 | do { |
| 1062 | seq = read_seqbegin(&xtime_lock); |
Linus Torvalds | 83f57a1 | 2009-12-22 14:10:37 -0800 | [diff] [blame] | 1063 | |
John Stultz | 6a867a3 | 2010-04-06 14:30:51 -0700 | [diff] [blame] | 1064 | now = xtime; |
john stultz | da15cfd | 2009-08-19 19:13:34 -0700 | [diff] [blame] | 1065 | mono = wall_to_monotonic; |
| 1066 | } while (read_seqretry(&xtime_lock, seq)); |
| 1067 | |
| 1068 | set_normalized_timespec(&now, now.tv_sec + mono.tv_sec, |
| 1069 | now.tv_nsec + mono.tv_nsec); |
| 1070 | return now; |
| 1071 | } |
Torben Hohn | 871cf1e | 2011-01-27 15:58:55 +0100 | [diff] [blame] | 1072 | |
| 1073 | /* |
| 1074 | * The 64-bit jiffies value is not atomic - you MUST NOT read it |
| 1075 | * without sampling the sequence number in xtime_lock. |
| 1076 | * jiffies is defined in the linker script... |
| 1077 | */ |
| 1078 | void do_timer(unsigned long ticks) |
| 1079 | { |
| 1080 | jiffies_64 += ticks; |
| 1081 | update_wall_time(); |
| 1082 | calc_global_load(ticks); |
| 1083 | } |
Torben Hohn | 48cf76f | 2011-01-27 15:59:05 +0100 | [diff] [blame] | 1084 | |
| 1085 | /** |
John Stultz | 314ac37 | 2011-02-14 18:43:08 -0800 | [diff] [blame] | 1086 | * get_xtime_and_monotonic_and_sleep_offset() - get xtime, wall_to_monotonic, |
| 1087 | * and sleep offsets. |
Torben Hohn | 48cf76f | 2011-01-27 15:59:05 +0100 | [diff] [blame] | 1088 | * @xtim: pointer to timespec to be set with xtime |
| 1089 | * @wtom: pointer to timespec to be set with wall_to_monotonic |
John Stultz | 314ac37 | 2011-02-14 18:43:08 -0800 | [diff] [blame] | 1090 | * @sleep: pointer to timespec to be set with time in suspend |
Torben Hohn | 48cf76f | 2011-01-27 15:59:05 +0100 | [diff] [blame] | 1091 | */ |
John Stultz | 314ac37 | 2011-02-14 18:43:08 -0800 | [diff] [blame] | 1092 | void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim, |
| 1093 | struct timespec *wtom, struct timespec *sleep) |
Torben Hohn | 48cf76f | 2011-01-27 15:59:05 +0100 | [diff] [blame] | 1094 | { |
| 1095 | unsigned long seq; |
| 1096 | |
| 1097 | do { |
| 1098 | seq = read_seqbegin(&xtime_lock); |
| 1099 | *xtim = xtime; |
| 1100 | *wtom = wall_to_monotonic; |
John Stultz | 314ac37 | 2011-02-14 18:43:08 -0800 | [diff] [blame] | 1101 | *sleep = total_sleep_time; |
Torben Hohn | 48cf76f | 2011-01-27 15:59:05 +0100 | [diff] [blame] | 1102 | } while (read_seqretry(&xtime_lock, seq)); |
| 1103 | } |
Torben Hohn | f0af911a9 | 2011-01-27 15:59:10 +0100 | [diff] [blame] | 1104 | |
| 1105 | /** |
Thomas Gleixner | 99ee531 | 2011-04-27 14:16:42 +0200 | [diff] [blame] | 1106 | * ktime_get_monotonic_offset() - get wall_to_monotonic in ktime_t format |
| 1107 | */ |
| 1108 | ktime_t ktime_get_monotonic_offset(void) |
| 1109 | { |
| 1110 | unsigned long seq; |
| 1111 | struct timespec wtom; |
| 1112 | |
| 1113 | do { |
| 1114 | seq = read_seqbegin(&xtime_lock); |
| 1115 | wtom = wall_to_monotonic; |
| 1116 | } while (read_seqretry(&xtime_lock, seq)); |
| 1117 | return timespec_to_ktime(wtom); |
| 1118 | } |
| 1119 | |
| 1120 | /** |
Torben Hohn | f0af911a9 | 2011-01-27 15:59:10 +0100 | [diff] [blame] | 1121 | * xtime_update() - advances the timekeeping infrastructure |
| 1122 | * @ticks: number of ticks, that have elapsed since the last call. |
| 1123 | * |
| 1124 | * Must be called with interrupts disabled. |
| 1125 | */ |
| 1126 | void xtime_update(unsigned long ticks) |
| 1127 | { |
| 1128 | write_seqlock(&xtime_lock); |
| 1129 | do_timer(ticks); |
| 1130 | write_sequnlock(&xtime_lock); |
| 1131 | } |