blob: 3c9ca940fed206327be5c54c466687047567c86c [file] [log] [blame]
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001/*
2 * linux/kernel/hrtimer.c
3 *
Thomas Gleixner3c8aa392007-02-16 01:27:50 -08004 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -08005 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -08006 * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08007 *
8 * High-resolution kernel timers
9 *
10 * In contrast to the low-resolution timeout API implemented in
11 * kernel/timer.c, hrtimers provide finer resolution and accuracy
12 * depending on system configuration and capabilities.
13 *
14 * These timers are currently used for:
15 * - itimers
16 * - POSIX timers
17 * - nanosleep
18 * - precise in-kernel timing
19 *
20 * Started by: Thomas Gleixner and Ingo Molnar
21 *
22 * Credits:
23 * based on kernel/timer.c
24 *
Thomas Gleixner66188fa2006-02-01 03:05:13 -080025 * Help, testing, suggestions, bugfixes, improvements were
26 * provided by:
27 *
28 * George Anzinger, Andrew Morton, Steven Rostedt, Roman Zippel
29 * et. al.
30 *
Thomas Gleixnerc0a31322006-01-09 20:52:32 -080031 * For licencing details see kernel-base/COPYING
32 */
33
34#include <linux/cpu.h>
Paul Gortmaker9984de12011-05-23 14:51:41 -040035#include <linux/export.h>
Thomas Gleixnerc0a31322006-01-09 20:52:32 -080036#include <linux/percpu.h>
37#include <linux/hrtimer.h>
38#include <linux/notifier.h>
39#include <linux/syscalls.h>
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -080040#include <linux/kallsyms.h>
Thomas Gleixnerc0a31322006-01-09 20:52:32 -080041#include <linux/interrupt.h>
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -080042#include <linux/tick.h>
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -080043#include <linux/seq_file.h>
44#include <linux/err.h>
Thomas Gleixner237fc6e2008-04-30 00:55:04 -070045#include <linux/debugobjects.h>
Arun R Bharadwajeea08f32009-04-16 12:16:41 +053046#include <linux/sched.h>
47#include <linux/timer.h>
Colin Cross437993e2013-05-06 23:50:19 +000048#include <linux/freezer.h>
Thomas Gleixnerc0a31322006-01-09 20:52:32 -080049
50#include <asm/uaccess.h>
51
Xiao Guangrongc6a2a172009-08-10 10:51:23 +080052#include <trace/events/timer.h>
53
Thomas Gleixnerc0a31322006-01-09 20:52:32 -080054/*
55 * The timer bases:
George Anzinger79786722006-02-01 03:05:11 -080056 *
John Stultze06383d2010-12-14 19:37:07 -080057 * There are more clockids then hrtimer bases. Thus, we index
58 * into the timer bases by the hrtimer_base_type enum. When trying
59 * to reach a base using a clockid, hrtimer_clockid_to_base()
60 * is used to convert from clockid to the proper hrtimer_base_type.
Thomas Gleixnerc0a31322006-01-09 20:52:32 -080061 */
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -080062DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
Thomas Gleixnerc0a31322006-01-09 20:52:32 -080063{
Thomas Gleixner3c8aa392007-02-16 01:27:50 -080064
Michael Bohanb7824c62013-03-19 11:07:23 -070065 .lock = __RAW_SPIN_LOCK_UNLOCKED(hrtimer_bases.lock),
Thomas Gleixner3c8aa392007-02-16 01:27:50 -080066 .clock_base =
Thomas Gleixnerc0a31322006-01-09 20:52:32 -080067 {
Thomas Gleixner3c8aa392007-02-16 01:27:50 -080068 {
Thomas Gleixnerab8177b2011-05-20 13:05:15 +020069 .index = HRTIMER_BASE_MONOTONIC,
70 .clockid = CLOCK_MONOTONIC,
Thomas Gleixner3c8aa392007-02-16 01:27:50 -080071 .get_time = &ktime_get,
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -080072 .resolution = KTIME_LOW_RES,
Thomas Gleixner3c8aa392007-02-16 01:27:50 -080073 },
John Stultz70a08cc2011-02-15 10:45:16 -080074 {
Thomas Gleixner68fa61c2011-05-20 23:14:04 +020075 .index = HRTIMER_BASE_REALTIME,
76 .clockid = CLOCK_REALTIME,
77 .get_time = &ktime_get_real,
78 .resolution = KTIME_LOW_RES,
79 },
80 {
Thomas Gleixnerab8177b2011-05-20 13:05:15 +020081 .index = HRTIMER_BASE_BOOTTIME,
82 .clockid = CLOCK_BOOTTIME,
John Stultz70a08cc2011-02-15 10:45:16 -080083 .get_time = &ktime_get_boottime,
84 .resolution = KTIME_LOW_RES,
85 },
Thomas Gleixner3c8aa392007-02-16 01:27:50 -080086 }
Thomas Gleixnerc0a31322006-01-09 20:52:32 -080087};
88
Mike Frysinger942c3c52011-05-02 15:24:27 -040089static const int hrtimer_clock_to_base_table[MAX_CLOCKS] = {
Thomas Gleixnerce313322011-04-29 00:02:00 +020090 [CLOCK_REALTIME] = HRTIMER_BASE_REALTIME,
91 [CLOCK_MONOTONIC] = HRTIMER_BASE_MONOTONIC,
92 [CLOCK_BOOTTIME] = HRTIMER_BASE_BOOTTIME,
93};
John Stultze06383d2010-12-14 19:37:07 -080094
95static inline int hrtimer_clockid_to_base(clockid_t clock_id)
96{
97 return hrtimer_clock_to_base_table[clock_id];
98}
99
100
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800101/*
Thomas Gleixner92127c72006-03-26 01:38:05 -0800102 * Get the coarse grained time at the softirq based on xtime and
103 * wall_to_monotonic.
104 */
Thomas Gleixner3c8aa392007-02-16 01:27:50 -0800105static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base)
Thomas Gleixner92127c72006-03-26 01:38:05 -0800106{
John Stultz70a08cc2011-02-15 10:45:16 -0800107 ktime_t xtim, mono, boot;
John Stultz314ac372011-02-14 18:43:08 -0800108 struct timespec xts, tom, slp;
Thomas Gleixner92127c72006-03-26 01:38:05 -0800109
John Stultz314ac372011-02-14 18:43:08 -0800110 get_xtime_and_monotonic_and_sleep_offset(&xts, &tom, &slp);
Thomas Gleixner92127c72006-03-26 01:38:05 -0800111
john stultzf4304ab2007-02-16 01:27:26 -0800112 xtim = timespec_to_ktime(xts);
John Stultz70a08cc2011-02-15 10:45:16 -0800113 mono = ktime_add(xtim, timespec_to_ktime(tom));
114 boot = ktime_add(mono, timespec_to_ktime(slp));
John Stultze06383d2010-12-14 19:37:07 -0800115 base->clock_base[HRTIMER_BASE_REALTIME].softirq_time = xtim;
John Stultz70a08cc2011-02-15 10:45:16 -0800116 base->clock_base[HRTIMER_BASE_MONOTONIC].softirq_time = mono;
117 base->clock_base[HRTIMER_BASE_BOOTTIME].softirq_time = boot;
Thomas Gleixner92127c72006-03-26 01:38:05 -0800118}
119
120/*
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800121 * Functions and macros which are different for UP/SMP systems are kept in a
122 * single place
123 */
124#ifdef CONFIG_SMP
125
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800126/*
127 * We are using hashed locking: holding per_cpu(hrtimer_bases)[n].lock
128 * means that all timers which are tied to this base via timer->base are
129 * locked, and the base itself is locked too.
130 *
131 * So __run_timers/migrate_timers can safely modify all timers which could
132 * be found on the lists/queues.
133 *
134 * When the timer's base is locked, and the timer removed from list, it is
135 * possible to set timer->base = NULL and drop the lock: the timer remains
136 * locked.
137 */
Thomas Gleixner3c8aa392007-02-16 01:27:50 -0800138static
139struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer,
140 unsigned long *flags)
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800141{
Thomas Gleixner3c8aa392007-02-16 01:27:50 -0800142 struct hrtimer_clock_base *base;
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800143
144 for (;;) {
145 base = timer->base;
146 if (likely(base != NULL)) {
Thomas Gleixnerecb49d12009-11-17 16:36:54 +0100147 raw_spin_lock_irqsave(&base->cpu_base->lock, *flags);
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800148 if (likely(base == timer->base))
149 return base;
150 /* The timer has migrated to another CPU: */
Thomas Gleixnerecb49d12009-11-17 16:36:54 +0100151 raw_spin_unlock_irqrestore(&base->cpu_base->lock, *flags);
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800152 }
153 cpu_relax();
154 }
155}
156
Thomas Gleixner6ff70412009-07-10 14:57:05 +0200157
158/*
159 * Get the preferred target CPU for NOHZ
160 */
161static int hrtimer_get_target(int this_cpu, int pinned)
162{
163#ifdef CONFIG_NO_HZ
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -0700164 if (!pinned && get_sysctl_timer_migration() && idle_cpu(this_cpu))
165 return get_nohz_timer_target();
Thomas Gleixner6ff70412009-07-10 14:57:05 +0200166#endif
167 return this_cpu;
168}
169
170/*
171 * With HIGHRES=y we do not migrate the timer when it is expiring
172 * before the next event on the target cpu because we cannot reprogram
173 * the target cpu hardware and we would cause it to fire late.
174 *
175 * Called with cpu_base->lock of target cpu held.
176 */
177static int
178hrtimer_check_target(struct hrtimer *timer, struct hrtimer_clock_base *new_base)
179{
180#ifdef CONFIG_HIGH_RES_TIMERS
181 ktime_t expires;
182
183 if (!new_base->cpu_base->hres_active)
184 return 0;
185
186 expires = ktime_sub(hrtimer_get_expires(timer), new_base->offset);
187 return expires.tv64 <= new_base->cpu_base->expires_next.tv64;
188#else
189 return 0;
190#endif
191}
192
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800193/*
194 * Switch the timer base to the current CPU when possible.
195 */
Thomas Gleixner3c8aa392007-02-16 01:27:50 -0800196static inline struct hrtimer_clock_base *
Arun R Bharadwaj597d0272009-04-16 12:13:26 +0530197switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base,
198 int pinned)
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800199{
Thomas Gleixner3c8aa392007-02-16 01:27:50 -0800200 struct hrtimer_clock_base *new_base;
201 struct hrtimer_cpu_base *new_cpu_base;
Thomas Gleixner6ff70412009-07-10 14:57:05 +0200202 int this_cpu = smp_processor_id();
203 int cpu = hrtimer_get_target(this_cpu, pinned);
Thomas Gleixnerab8177b2011-05-20 13:05:15 +0200204 int basenum = base->index;
Arun R Bharadwajeea08f32009-04-16 12:16:41 +0530205
206again:
207 new_cpu_base = &per_cpu(hrtimer_bases, cpu);
John Stultze06383d2010-12-14 19:37:07 -0800208 new_base = &new_cpu_base->clock_base[basenum];
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800209
210 if (base != new_base) {
211 /*
Thomas Gleixner6ff70412009-07-10 14:57:05 +0200212 * We are trying to move timer to new_base.
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800213 * However we can't change timer's base while it is running,
214 * so we keep it on the same CPU. No hassle vs. reprogramming
215 * the event source in the high resolution case. The softirq
216 * code will take care of this when the timer function has
217 * completed. There is no conflict as we hold the lock until
218 * the timer is enqueued.
219 */
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -0800220 if (unlikely(hrtimer_callback_running(timer)))
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800221 return base;
222
223 /* See the comment in lock_timer_base() */
224 timer->base = NULL;
Thomas Gleixnerecb49d12009-11-17 16:36:54 +0100225 raw_spin_unlock(&base->cpu_base->lock);
226 raw_spin_lock(&new_base->cpu_base->lock);
Arun R Bharadwajeea08f32009-04-16 12:16:41 +0530227
Thomas Gleixner6ff70412009-07-10 14:57:05 +0200228 if (cpu != this_cpu && hrtimer_check_target(timer, new_base)) {
229 cpu = this_cpu;
Thomas Gleixnerecb49d12009-11-17 16:36:54 +0100230 raw_spin_unlock(&new_base->cpu_base->lock);
231 raw_spin_lock(&base->cpu_base->lock);
Thomas Gleixner6ff70412009-07-10 14:57:05 +0200232 timer->base = base;
233 goto again;
Arun R Bharadwajeea08f32009-04-16 12:16:41 +0530234 }
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800235 timer->base = new_base;
Leon Maa91ccfc2014-04-30 16:43:10 +0800236 } else {
237 if (cpu != this_cpu && hrtimer_check_target(timer, new_base)) {
238 cpu = this_cpu;
239 goto again;
240 }
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800241 }
242 return new_base;
243}
244
245#else /* CONFIG_SMP */
246
Thomas Gleixner3c8aa392007-02-16 01:27:50 -0800247static inline struct hrtimer_clock_base *
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800248lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
249{
Thomas Gleixner3c8aa392007-02-16 01:27:50 -0800250 struct hrtimer_clock_base *base = timer->base;
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800251
Thomas Gleixnerecb49d12009-11-17 16:36:54 +0100252 raw_spin_lock_irqsave(&base->cpu_base->lock, *flags);
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800253
254 return base;
255}
256
Arun R Bharadwajeea08f32009-04-16 12:16:41 +0530257# define switch_hrtimer_base(t, b, p) (b)
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800258
259#endif /* !CONFIG_SMP */
260
261/*
262 * Functions for the union type storage format of ktime_t which are
263 * too large for inlining:
264 */
265#if BITS_PER_LONG < 64
266# ifndef CONFIG_KTIME_SCALAR
267/**
268 * ktime_add_ns - Add a scalar nanoseconds value to a ktime_t variable
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800269 * @kt: addend
270 * @nsec: the scalar nsec value to add
271 *
272 * Returns the sum of kt and nsec in ktime_t format
273 */
274ktime_t ktime_add_ns(const ktime_t kt, u64 nsec)
275{
276 ktime_t tmp;
277
278 if (likely(nsec < NSEC_PER_SEC)) {
279 tmp.tv64 = nsec;
280 } else {
281 unsigned long rem = do_div(nsec, NSEC_PER_SEC);
282
283 tmp = ktime_set((long)nsec, rem);
284 }
285
286 return ktime_add(kt, tmp);
287}
David Howellsb8b8fd22007-04-27 15:31:24 -0700288
289EXPORT_SYMBOL_GPL(ktime_add_ns);
Arnaldo Carvalho de Meloa2723782007-08-19 17:16:05 -0700290
291/**
292 * ktime_sub_ns - Subtract a scalar nanoseconds value from a ktime_t variable
293 * @kt: minuend
294 * @nsec: the scalar nsec value to subtract
295 *
296 * Returns the subtraction of @nsec from @kt in ktime_t format
297 */
298ktime_t ktime_sub_ns(const ktime_t kt, u64 nsec)
299{
300 ktime_t tmp;
301
302 if (likely(nsec < NSEC_PER_SEC)) {
303 tmp.tv64 = nsec;
304 } else {
305 unsigned long rem = do_div(nsec, NSEC_PER_SEC);
306
David Engraf86a640f2013-03-19 13:29:55 +0100307 /* Make sure nsec fits into long */
308 if (unlikely(nsec > KTIME_SEC_MAX))
309 return (ktime_t){ .tv64 = KTIME_MAX };
310
Arnaldo Carvalho de Meloa2723782007-08-19 17:16:05 -0700311 tmp = ktime_set((long)nsec, rem);
312 }
313
314 return ktime_sub(kt, tmp);
315}
316
317EXPORT_SYMBOL_GPL(ktime_sub_ns);
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800318# endif /* !CONFIG_KTIME_SCALAR */
319
320/*
321 * Divide a ktime value by a nanosecond value
322 */
Davide Libenzi4d672e72008-02-04 22:27:26 -0800323u64 ktime_divns(const ktime_t kt, s64 div)
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800324{
Carlos R. Mafra900cfa42008-05-22 19:25:11 -0300325 u64 dclc;
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800326 int sft = 0;
327
Carlos R. Mafra900cfa42008-05-22 19:25:11 -0300328 dclc = ktime_to_ns(kt);
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800329 /* Make sure the divisor is less than 2^32: */
330 while (div >> 32) {
331 sft++;
332 div >>= 1;
333 }
334 dclc >>= sft;
335 do_div(dclc, (unsigned long) div);
336
Davide Libenzi4d672e72008-02-04 22:27:26 -0800337 return dclc;
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800338}
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800339#endif /* BITS_PER_LONG >= 64 */
340
Peter Zijlstrad3d74452008-01-25 21:08:31 +0100341/*
Thomas Gleixner5a7780e2008-02-13 09:20:43 +0100342 * Add two ktime values and do a safety check for overflow:
343 */
344ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs)
345{
346 ktime_t res = ktime_add(lhs, rhs);
347
348 /*
349 * We use KTIME_SEC_MAX here, the maximum timeout which we can
350 * return to user space in a timespec:
351 */
352 if (res.tv64 < 0 || res.tv64 < lhs.tv64 || res.tv64 < rhs.tv64)
353 res = ktime_set(KTIME_SEC_MAX, 0);
354
355 return res;
356}
357
Artem Bityutskiy8daa21e2009-05-28 16:21:24 +0300358EXPORT_SYMBOL_GPL(ktime_add_safe);
359
Thomas Gleixner237fc6e2008-04-30 00:55:04 -0700360#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
361
362static struct debug_obj_descr hrtimer_debug_descr;
363
Stanislaw Gruszka99777282011-03-07 09:58:33 +0100364static void *hrtimer_debug_hint(void *addr)
365{
366 return ((struct hrtimer *) addr)->function;
367}
368
Thomas Gleixner237fc6e2008-04-30 00:55:04 -0700369/*
370 * fixup_init is called when:
371 * - an active object is initialized
372 */
373static int hrtimer_fixup_init(void *addr, enum debug_obj_state state)
374{
375 struct hrtimer *timer = addr;
376
377 switch (state) {
378 case ODEBUG_STATE_ACTIVE:
379 hrtimer_cancel(timer);
380 debug_object_init(timer, &hrtimer_debug_descr);
381 return 1;
382 default:
383 return 0;
384 }
385}
386
387/*
388 * fixup_activate is called when:
389 * - an active object is activated
390 * - an unknown object is activated (might be a statically initialized object)
391 */
392static int hrtimer_fixup_activate(void *addr, enum debug_obj_state state)
393{
394 switch (state) {
395
396 case ODEBUG_STATE_NOTAVAILABLE:
397 WARN_ON_ONCE(1);
398 return 0;
399
400 case ODEBUG_STATE_ACTIVE:
401 WARN_ON(1);
402
403 default:
404 return 0;
405 }
406}
407
408/*
409 * fixup_free is called when:
410 * - an active object is freed
411 */
412static int hrtimer_fixup_free(void *addr, enum debug_obj_state state)
413{
414 struct hrtimer *timer = addr;
415
416 switch (state) {
417 case ODEBUG_STATE_ACTIVE:
418 hrtimer_cancel(timer);
419 debug_object_free(timer, &hrtimer_debug_descr);
420 return 1;
421 default:
422 return 0;
423 }
424}
425
426static struct debug_obj_descr hrtimer_debug_descr = {
427 .name = "hrtimer",
Stanislaw Gruszka99777282011-03-07 09:58:33 +0100428 .debug_hint = hrtimer_debug_hint,
Thomas Gleixner237fc6e2008-04-30 00:55:04 -0700429 .fixup_init = hrtimer_fixup_init,
430 .fixup_activate = hrtimer_fixup_activate,
431 .fixup_free = hrtimer_fixup_free,
432};
433
434static inline void debug_hrtimer_init(struct hrtimer *timer)
435{
436 debug_object_init(timer, &hrtimer_debug_descr);
437}
438
439static inline void debug_hrtimer_activate(struct hrtimer *timer)
440{
441 debug_object_activate(timer, &hrtimer_debug_descr);
442}
443
444static inline void debug_hrtimer_deactivate(struct hrtimer *timer)
445{
446 debug_object_deactivate(timer, &hrtimer_debug_descr);
447}
448
449static inline void debug_hrtimer_free(struct hrtimer *timer)
450{
451 debug_object_free(timer, &hrtimer_debug_descr);
452}
453
454static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
455 enum hrtimer_mode mode);
456
457void hrtimer_init_on_stack(struct hrtimer *timer, clockid_t clock_id,
458 enum hrtimer_mode mode)
459{
460 debug_object_init_on_stack(timer, &hrtimer_debug_descr);
461 __hrtimer_init(timer, clock_id, mode);
462}
Stephen Hemminger2bc481c2009-08-28 23:41:29 -0700463EXPORT_SYMBOL_GPL(hrtimer_init_on_stack);
Thomas Gleixner237fc6e2008-04-30 00:55:04 -0700464
465void destroy_hrtimer_on_stack(struct hrtimer *timer)
466{
467 debug_object_free(timer, &hrtimer_debug_descr);
468}
469
470#else
471static inline void debug_hrtimer_init(struct hrtimer *timer) { }
472static inline void debug_hrtimer_activate(struct hrtimer *timer) { }
473static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { }
474#endif
475
Xiao Guangrongc6a2a172009-08-10 10:51:23 +0800476static inline void
477debug_init(struct hrtimer *timer, clockid_t clockid,
478 enum hrtimer_mode mode)
479{
480 debug_hrtimer_init(timer);
481 trace_hrtimer_init(timer, clockid, mode);
482}
483
484static inline void debug_activate(struct hrtimer *timer)
485{
486 debug_hrtimer_activate(timer);
487 trace_hrtimer_start(timer);
488}
489
490static inline void debug_deactivate(struct hrtimer *timer)
491{
492 debug_hrtimer_deactivate(timer);
493 trace_hrtimer_cancel(timer);
494}
495
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -0800496/* High resolution timer related functions */
497#ifdef CONFIG_HIGH_RES_TIMERS
498
499/*
500 * High resolution timer enabled ?
501 */
502static int hrtimer_hres_enabled __read_mostly = 1;
503
504/*
505 * Enable / Disable high resolution mode
506 */
507static int __init setup_hrtimer_hres(char *str)
508{
509 if (!strcmp(str, "off"))
510 hrtimer_hres_enabled = 0;
511 else if (!strcmp(str, "on"))
512 hrtimer_hres_enabled = 1;
513 else
514 return 0;
515 return 1;
516}
517
518__setup("highres=", setup_hrtimer_hres);
519
520/*
521 * hrtimer_high_res_enabled - query, if the highres mode is enabled
522 */
523static inline int hrtimer_is_hres_enabled(void)
524{
525 return hrtimer_hres_enabled;
526}
527
528/*
529 * Is the high resolution mode active ?
530 */
531static inline int hrtimer_hres_active(void)
532{
Christoph Lameter909ea962010-12-08 16:22:55 +0100533 return __this_cpu_read(hrtimer_bases.hres_active);
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -0800534}
535
536/*
537 * Reprogram the event source with checking both queues for the
538 * next event
539 * Called with interrupts disabled and base->lock held
540 */
Ashwin Chaugule7403f412009-09-01 23:03:33 -0400541static void
542hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -0800543{
544 int i;
545 struct hrtimer_clock_base *base = cpu_base->clock_base;
Ashwin Chaugule7403f412009-09-01 23:03:33 -0400546 ktime_t expires, expires_next;
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -0800547
Ashwin Chaugule7403f412009-09-01 23:03:33 -0400548 expires_next.tv64 = KTIME_MAX;
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -0800549
550 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) {
551 struct hrtimer *timer;
John Stultz998adc32010-09-20 19:19:17 -0700552 struct timerqueue_node *next;
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -0800553
John Stultz998adc32010-09-20 19:19:17 -0700554 next = timerqueue_getnext(&base->active);
555 if (!next)
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -0800556 continue;
John Stultz998adc32010-09-20 19:19:17 -0700557 timer = container_of(next, struct hrtimer, node);
558
Arjan van de Vencc584b22008-09-01 15:02:30 -0700559 expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
Thomas Gleixnerb0a9b512009-01-25 11:31:36 +0100560 /*
561 * clock_was_set() has changed base->offset so the
562 * result might be negative. Fix it up to prevent a
563 * false positive in clockevents_program_event()
564 */
565 if (expires.tv64 < 0)
566 expires.tv64 = 0;
Ashwin Chaugule7403f412009-09-01 23:03:33 -0400567 if (expires.tv64 < expires_next.tv64)
568 expires_next = expires;
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -0800569 }
570
Ashwin Chaugule7403f412009-09-01 23:03:33 -0400571 if (skip_equal && expires_next.tv64 == cpu_base->expires_next.tv64)
572 return;
573
574 cpu_base->expires_next.tv64 = expires_next.tv64;
575
Stuart Hayes3d8b2f52014-04-29 17:55:02 -0500576 /*
577 * If a hang was detected in the last timer interrupt then we
578 * leave the hang delay active in the hardware. We want the
579 * system to make progress. That also prevents the following
580 * scenario:
581 * T1 expires 50ms from now
582 * T2 expires 5s from now
583 *
584 * T1 is removed, so this code is called and would reprogram
585 * the hardware to 5s from now. Any hrtimer_start after that
586 * will not reprogram the hardware due to hang_detected being
587 * set. So we'd effectivly block all timers until the T2 event
588 * fires.
589 */
590 if (cpu_base->hang_detected)
591 return;
592
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -0800593 if (cpu_base->expires_next.tv64 != KTIME_MAX)
594 tick_program_event(cpu_base->expires_next, 1);
595}
596
597/*
598 * Shared reprogramming for clock_realtime and clock_monotonic
599 *
600 * When a timer is enqueued and expires earlier than the already enqueued
601 * timers, we have to check, whether it expires earlier than the timer for
602 * which the clock event device was armed.
603 *
604 * Called with interrupts disabled and base->cpu_base.lock held
605 */
606static int hrtimer_reprogram(struct hrtimer *timer,
607 struct hrtimer_clock_base *base)
608{
Thomas Gleixner41d2e492009-11-13 17:05:44 +0100609 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
Arjan van de Vencc584b22008-09-01 15:02:30 -0700610 ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -0800611 int res;
612
Arjan van de Vencc584b22008-09-01 15:02:30 -0700613 WARN_ON_ONCE(hrtimer_get_expires_tv64(timer) < 0);
Thomas Gleixner63070a72008-02-14 00:58:36 +0100614
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -0800615 /*
616 * When the callback is running, we do not reprogram the clock event
617 * device. The timer callback is either running on a different CPU or
Robert P. J. Day3a4fa0a2007-10-19 23:10:43 +0200618 * the callback is executed in the hrtimer_interrupt context. The
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -0800619 * reprogramming is handled either by the softirq, which called the
620 * callback or at the end of the hrtimer_interrupt.
621 */
622 if (hrtimer_callback_running(timer))
623 return 0;
624
Thomas Gleixner63070a72008-02-14 00:58:36 +0100625 /*
626 * CLOCK_REALTIME timer might be requested with an absolute
627 * expiry time which is less than base->offset. Nothing wrong
628 * about that, just avoid to call into the tick code, which
629 * has now objections against negative expiry values.
630 */
631 if (expires.tv64 < 0)
632 return -ETIME;
633
Thomas Gleixner41d2e492009-11-13 17:05:44 +0100634 if (expires.tv64 >= cpu_base->expires_next.tv64)
635 return 0;
636
637 /*
638 * If a hang was detected in the last timer interrupt then we
639 * do not schedule a timer which is earlier than the expiry
640 * which we enforced in the hang detection. We want the system
641 * to make progress.
642 */
643 if (cpu_base->hang_detected)
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -0800644 return 0;
645
646 /*
647 * Clockevents returns -ETIME, when the event was in the past.
648 */
649 res = tick_program_event(expires, 0);
650 if (!IS_ERR_VALUE(res))
Thomas Gleixner41d2e492009-11-13 17:05:44 +0100651 cpu_base->expires_next = expires;
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -0800652 return res;
653}
654
Ingo Molnar995f0542007-04-07 12:05:00 +0200655/*
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -0800656 * Initialize the high resolution related parts of cpu_base
657 */
658static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base)
659{
660 base->expires_next.tv64 = KTIME_MAX;
661 base->hres_active = 0;
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -0800662}
663
664/*
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -0800665 * When High resolution timers are active, try to reprogram. Note, that in case
666 * the state has HRTIMER_STATE_CALLBACK set, no reprogramming and no expiry
667 * check happens. The timer gets enqueued into the rbtree. The reprogramming
668 * and expiry check is done in the hrtimer_interrupt or in the softirq.
669 */
670static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
Leonid Shatzdd9c58a2013-02-04 14:33:37 +0200671 struct hrtimer_clock_base *base)
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -0800672{
Leonid Shatzdd9c58a2013-02-04 14:33:37 +0200673 return base->cpu_base->hres_active && hrtimer_reprogram(timer, base);
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -0800674}
675
John Stultz6321a0a2012-07-17 02:39:55 -0400676static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
677{
678 ktime_t *offs_real = &base->clock_base[HRTIMER_BASE_REALTIME].offset;
679 ktime_t *offs_boot = &base->clock_base[HRTIMER_BASE_BOOTTIME].offset;
680
681 return ktime_get_update_offsets(offs_real, offs_boot);
682}
683
Thomas Gleixner9ec26902011-05-20 16:18:50 +0200684/*
685 * Retrigger next event is called after clock was set
686 *
687 * Called with interrupts disabled via on_each_cpu()
688 */
689static void retrigger_next_event(void *arg)
690{
691 struct hrtimer_cpu_base *base = &__get_cpu_var(hrtimer_bases);
Thomas Gleixner9ec26902011-05-20 16:18:50 +0200692
693 if (!hrtimer_hres_active())
694 return;
695
Thomas Gleixner9ec26902011-05-20 16:18:50 +0200696 raw_spin_lock(&base->lock);
John Stultz6321a0a2012-07-17 02:39:55 -0400697 hrtimer_update_base(base);
Thomas Gleixner9ec26902011-05-20 16:18:50 +0200698 hrtimer_force_reprogram(base, 0);
699 raw_spin_unlock(&base->lock);
700}
Thomas Gleixnerb12a03c2011-05-02 16:48:57 +0200701
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -0800702/*
703 * Switch to high resolution mode
704 */
Thomas Gleixnerf8953852007-03-06 01:42:08 -0800705static int hrtimer_switch_to_hres(void)
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -0800706{
Thomas Gleixnerb12a03c2011-05-02 16:48:57 +0200707 int i, cpu = smp_processor_id();
Ingo Molnar820de5c2007-07-21 04:37:36 -0700708 struct hrtimer_cpu_base *base = &per_cpu(hrtimer_bases, cpu);
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -0800709 unsigned long flags;
710
711 if (base->hres_active)
Thomas Gleixnerf8953852007-03-06 01:42:08 -0800712 return 1;
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -0800713
714 local_irq_save(flags);
715
716 if (tick_init_highres()) {
717 local_irq_restore(flags);
Ingo Molnar820de5c2007-07-21 04:37:36 -0700718 printk(KERN_WARNING "Could not switch to high resolution "
719 "mode on CPU %d\n", cpu);
Thomas Gleixnerf8953852007-03-06 01:42:08 -0800720 return 0;
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -0800721 }
722 base->hres_active = 1;
Thomas Gleixnerb12a03c2011-05-02 16:48:57 +0200723 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++)
724 base->clock_base[i].resolution = KTIME_HIGH_RES;
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -0800725
726 tick_setup_sched_timer();
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -0800727 /* "Retrigger" the interrupt to get things going */
728 retrigger_next_event(NULL);
729 local_irq_restore(flags);
Thomas Gleixnerf8953852007-03-06 01:42:08 -0800730 return 1;
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -0800731}
732
Thomas Gleixner39367f42013-07-05 12:09:18 +0200733static void clock_was_set_work(struct work_struct *work)
734{
735 clock_was_set();
736}
737
738static DECLARE_WORK(hrtimer_work, clock_was_set_work);
739
John Stultz5e5006e2012-07-17 02:39:50 -0400740/*
Thomas Gleixner39367f42013-07-05 12:09:18 +0200741 * Called from timekeeping and resume code to reprogramm the hrtimer
742 * interrupt device on all cpus.
John Stultz5e5006e2012-07-17 02:39:50 -0400743 */
744void clock_was_set_delayed(void)
745{
Thomas Gleixner39367f42013-07-05 12:09:18 +0200746 schedule_work(&hrtimer_work);
John Stultz5e5006e2012-07-17 02:39:50 -0400747}
748
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -0800749#else
750
751static inline int hrtimer_hres_active(void) { return 0; }
752static inline int hrtimer_is_hres_enabled(void) { return 0; }
Thomas Gleixnerf8953852007-03-06 01:42:08 -0800753static inline int hrtimer_switch_to_hres(void) { return 0; }
Ashwin Chaugule7403f412009-09-01 23:03:33 -0400754static inline void
755hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { }
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -0800756static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
Leonid Shatzdd9c58a2013-02-04 14:33:37 +0200757 struct hrtimer_clock_base *base)
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -0800758{
759 return 0;
760}
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -0800761static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { }
Thomas Gleixner9ec26902011-05-20 16:18:50 +0200762static inline void retrigger_next_event(void *arg) { }
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -0800763
764#endif /* CONFIG_HIGH_RES_TIMERS */
765
Thomas Gleixnerb12a03c2011-05-02 16:48:57 +0200766/*
Thomas Gleixnerb12a03c2011-05-02 16:48:57 +0200767 * Clock realtime was set
768 *
769 * Change the offset of the realtime clock vs. the monotonic
770 * clock.
771 *
772 * We might have to reprogram the high resolution timer interrupt. On
773 * SMP we call the architecture specific code to retrigger _all_ high
774 * resolution timer interrupts. On UP we just disable interrupts and
775 * call the high resolution interrupt code.
776 */
777void clock_was_set(void)
778{
Thomas Gleixner90ff1f32011-05-25 23:08:17 +0200779#ifdef CONFIG_HIGH_RES_TIMERS
Thomas Gleixnerb12a03c2011-05-02 16:48:57 +0200780 /* Retrigger the CPU local events everywhere */
781 on_each_cpu(retrigger_next_event, NULL, 1);
Thomas Gleixner9ec26902011-05-20 16:18:50 +0200782#endif
783 timerfd_clock_was_set();
Thomas Gleixnerb12a03c2011-05-02 16:48:57 +0200784}
785
786/*
787 * During resume we might have to reprogram the high resolution timer
788 * interrupt (on the local CPU):
789 */
790void hrtimers_resume(void)
791{
792 WARN_ONCE(!irqs_disabled(),
793 KERN_INFO "hrtimers_resume() called with IRQs enabled!");
794
Thomas Gleixner39367f42013-07-05 12:09:18 +0200795 /* Retrigger on the local CPU */
Thomas Gleixnerb12a03c2011-05-02 16:48:57 +0200796 retrigger_next_event(NULL);
Thomas Gleixner39367f42013-07-05 12:09:18 +0200797 /* And schedule a retrigger for all others */
798 clock_was_set_delayed();
Thomas Gleixnerb12a03c2011-05-02 16:48:57 +0200799}
800
Heiko Carstens5f201902009-12-10 10:56:29 +0100801static inline void timer_stats_hrtimer_set_start_info(struct hrtimer *timer)
Ingo Molnar82f67cd2007-02-16 01:28:13 -0800802{
Heiko Carstens5f201902009-12-10 10:56:29 +0100803#ifdef CONFIG_TIMER_STATS
Ingo Molnar82f67cd2007-02-16 01:28:13 -0800804 if (timer->start_site)
805 return;
Heiko Carstens5f201902009-12-10 10:56:29 +0100806 timer->start_site = __builtin_return_address(0);
Ingo Molnar82f67cd2007-02-16 01:28:13 -0800807 memcpy(timer->start_comm, current->comm, TASK_COMM_LEN);
808 timer->start_pid = current->pid;
Ingo Molnar82f67cd2007-02-16 01:28:13 -0800809#endif
Heiko Carstens5f201902009-12-10 10:56:29 +0100810}
811
812static inline void timer_stats_hrtimer_clear_start_info(struct hrtimer *timer)
813{
814#ifdef CONFIG_TIMER_STATS
815 timer->start_site = NULL;
816#endif
817}
818
819static inline void timer_stats_account_hrtimer(struct hrtimer *timer)
820{
821#ifdef CONFIG_TIMER_STATS
822 if (likely(!timer_stats_active))
823 return;
824 timer_stats_update_stats(timer, timer->start_pid, timer->start_site,
825 timer->function, timer->start_comm, 0);
826#endif
827}
Ingo Molnar82f67cd2007-02-16 01:28:13 -0800828
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800829/*
Uwe Kleine-König6506f2a2007-10-20 01:56:53 +0200830 * Counterpart to lock_hrtimer_base above:
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800831 */
832static inline
833void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
834{
Thomas Gleixnerecb49d12009-11-17 16:36:54 +0100835 raw_spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags);
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800836}
837
838/**
839 * hrtimer_forward - forward the timer expiry
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800840 * @timer: hrtimer to forward
Roman Zippel44f21472006-03-26 01:38:06 -0800841 * @now: forward past this time
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800842 * @interval: the interval to forward
843 *
844 * Forward the timer expiry so it will expire in the future.
Jonathan Corbet8dca6f32006-01-16 15:58:55 -0700845 * Returns the number of overruns.
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800846 */
Davide Libenzi4d672e72008-02-04 22:27:26 -0800847u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800848{
Davide Libenzi4d672e72008-02-04 22:27:26 -0800849 u64 orun = 1;
Roman Zippel44f21472006-03-26 01:38:06 -0800850 ktime_t delta;
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800851
Arjan van de Vencc584b22008-09-01 15:02:30 -0700852 delta = ktime_sub(now, hrtimer_get_expires(timer));
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800853
854 if (delta.tv64 < 0)
855 return 0;
856
Thomas Gleixnerc9db4fa2006-01-12 11:47:34 +0100857 if (interval.tv64 < timer->base->resolution.tv64)
858 interval.tv64 = timer->base->resolution.tv64;
859
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800860 if (unlikely(delta.tv64 >= interval.tv64)) {
Roman Zippeldf869b62006-03-26 01:38:11 -0800861 s64 incr = ktime_to_ns(interval);
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800862
863 orun = ktime_divns(delta, incr);
Arjan van de Vencc584b22008-09-01 15:02:30 -0700864 hrtimer_add_expires_ns(timer, incr * orun);
865 if (hrtimer_get_expires_tv64(timer) > now.tv64)
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800866 return orun;
867 /*
868 * This (and the ktime_add() below) is the
869 * correction for exact:
870 */
871 orun++;
872 }
Arjan van de Vencc584b22008-09-01 15:02:30 -0700873 hrtimer_add_expires(timer, interval);
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800874
875 return orun;
876}
Stas Sergeev6bdb6b62007-05-08 00:31:58 -0700877EXPORT_SYMBOL_GPL(hrtimer_forward);
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800878
879/*
880 * enqueue_hrtimer - internal function to (re)start a timer
881 *
882 * The timer is inserted in expiry order. Insertion into the
883 * red black tree is O(log(n)). Must hold the base lock.
Peter Zijlstraa6037b62009-01-05 11:28:22 +0100884 *
885 * Returns 1 when the new timer is the leftmost timer in the tree.
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800886 */
Peter Zijlstraa6037b62009-01-05 11:28:22 +0100887static int enqueue_hrtimer(struct hrtimer *timer,
888 struct hrtimer_clock_base *base)
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800889{
Xiao Guangrongc6a2a172009-08-10 10:51:23 +0800890 debug_activate(timer);
Thomas Gleixner237fc6e2008-04-30 00:55:04 -0700891
John Stultz998adc32010-09-20 19:19:17 -0700892 timerqueue_add(&base->active, &timer->node);
Thomas Gleixnerab8177b2011-05-20 13:05:15 +0200893 base->cpu_base->active_bases |= 1 << base->index;
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800894
895 /*
Thomas Gleixner303e9672007-02-16 01:27:51 -0800896 * HRTIMER_STATE_ENQUEUED is or'ed to the current state to preserve the
897 * state of a possibly running callback.
898 */
899 timer->state |= HRTIMER_STATE_ENQUEUED;
Peter Zijlstraa6037b62009-01-05 11:28:22 +0100900
John Stultz998adc32010-09-20 19:19:17 -0700901 return (&timer->node == base->active.next);
Thomas Gleixner288867e2006-01-12 11:25:54 +0100902}
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800903
904/*
905 * __remove_hrtimer - internal function to remove a timer
906 *
907 * Caller must hold the base lock.
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -0800908 *
909 * High resolution timer mode reprograms the clock event device when the
910 * timer is the one which expires next. The caller can disable this by setting
911 * reprogram to zero. This is useful, when the context does a reprogramming
912 * anyway (e.g. timer interrupt)
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800913 */
Thomas Gleixner3c8aa392007-02-16 01:27:50 -0800914static void __remove_hrtimer(struct hrtimer *timer,
Thomas Gleixner303e9672007-02-16 01:27:51 -0800915 struct hrtimer_clock_base *base,
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -0800916 unsigned long newstate, int reprogram)
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800917{
Jeff Ohlstein27c9cd72011-11-18 15:47:10 -0800918 struct timerqueue_node *next_timer;
Ashwin Chaugule7403f412009-09-01 23:03:33 -0400919 if (!(timer->state & HRTIMER_STATE_ENQUEUED))
920 goto out;
921
Jeff Ohlstein27c9cd72011-11-18 15:47:10 -0800922 next_timer = timerqueue_getnext(&base->active);
923 timerqueue_del(&base->active, &timer->node);
924 if (&timer->node == next_timer) {
Ashwin Chaugule7403f412009-09-01 23:03:33 -0400925#ifdef CONFIG_HIGH_RES_TIMERS
926 /* Reprogram the clock event device. if enabled */
927 if (reprogram && hrtimer_hres_active()) {
928 ktime_t expires;
929
930 expires = ktime_sub(hrtimer_get_expires(timer),
931 base->offset);
932 if (base->cpu_base->expires_next.tv64 == expires.tv64)
933 hrtimer_force_reprogram(base->cpu_base, 1);
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -0800934 }
Ashwin Chaugule7403f412009-09-01 23:03:33 -0400935#endif
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -0800936 }
Thomas Gleixnerab8177b2011-05-20 13:05:15 +0200937 if (!timerqueue_getnext(&base->active))
938 base->cpu_base->active_bases &= ~(1 << base->index);
Ashwin Chaugule7403f412009-09-01 23:03:33 -0400939out:
Thomas Gleixner303e9672007-02-16 01:27:51 -0800940 timer->state = newstate;
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800941}
942
943/*
944 * remove hrtimer, called with base lock held
945 */
946static inline int
Thomas Gleixner3c8aa392007-02-16 01:27:50 -0800947remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base)
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800948{
Thomas Gleixner303e9672007-02-16 01:27:51 -0800949 if (hrtimer_is_queued(timer)) {
Salman Qazif13d4f92010-10-12 07:25:19 -0700950 unsigned long state;
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -0800951 int reprogram;
952
953 /*
954 * Remove the timer and force reprogramming when high
955 * resolution mode is active and the timer is on the current
956 * CPU. If we remove a timer on another CPU, reprogramming is
957 * skipped. The interrupt event on this CPU is fired and
958 * reprogramming happens in the interrupt handler. This is a
959 * rare case and less expensive than a smp call.
960 */
Xiao Guangrongc6a2a172009-08-10 10:51:23 +0800961 debug_deactivate(timer);
Ingo Molnar82f67cd2007-02-16 01:28:13 -0800962 timer_stats_hrtimer_clear_start_info(timer);
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -0800963 reprogram = base->cpu_base == &__get_cpu_var(hrtimer_bases);
Salman Qazif13d4f92010-10-12 07:25:19 -0700964 /*
965 * We must preserve the CALLBACK state flag here,
966 * otherwise we could move the timer base in
967 * switch_hrtimer_base.
968 */
969 state = timer->state & HRTIMER_STATE_CALLBACK;
970 __remove_hrtimer(timer, base, state, reprogram);
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800971 return 1;
972 }
973 return 0;
974}
975
Peter Zijlstra7f1e2ca2009-03-13 12:21:27 +0100976int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
977 unsigned long delta_ns, const enum hrtimer_mode mode,
978 int wakeup)
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800979{
Thomas Gleixner3c8aa392007-02-16 01:27:50 -0800980 struct hrtimer_clock_base *base, *new_base;
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800981 unsigned long flags;
Peter Zijlstraa6037b62009-01-05 11:28:22 +0100982 int ret, leftmost;
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800983
984 base = lock_hrtimer_base(timer, &flags);
985
986 /* Remove an active timer from the queue: */
987 ret = remove_hrtimer(timer, base);
988
Arun R Bharadwaj597d0272009-04-16 12:13:26 +0530989 if (mode & HRTIMER_MODE_REL) {
Viresh Kumar11f87a62014-05-12 13:42:29 +0530990 tim = ktime_add_safe(tim, base->get_time());
Ingo Molnar06027bd2006-02-14 13:53:15 -0800991 /*
992 * CONFIG_TIME_LOW_RES is a temporary way for architectures
993 * to signal that they simply return xtime in
994 * do_gettimeoffset(). In this case we want to round up by
995 * resolution when starting a relative timer, to avoid short
996 * timeouts. This will go away with the GTOD framework.
997 */
998#ifdef CONFIG_TIME_LOW_RES
Thomas Gleixner5a7780e2008-02-13 09:20:43 +0100999 tim = ktime_add_safe(tim, base->resolution);
Ingo Molnar06027bd2006-02-14 13:53:15 -08001000#endif
1001 }
Thomas Gleixner237fc6e2008-04-30 00:55:04 -07001002
Arjan van de Venda8f2e12008-09-07 10:47:46 -07001003 hrtimer_set_expires_range_ns(timer, tim, delta_ns);
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001004
Viresh Kumar11f87a62014-05-12 13:42:29 +05301005 /* Switch the timer base, if necessary: */
1006 new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED);
1007
Ingo Molnar82f67cd2007-02-16 01:28:13 -08001008 timer_stats_hrtimer_set_start_info(timer);
1009
Peter Zijlstraa6037b62009-01-05 11:28:22 +01001010 leftmost = enqueue_hrtimer(timer, new_base);
1011
Ingo Molnar935c6312007-03-28 13:17:18 +02001012 /*
1013 * Only allow reprogramming if the new base is on this CPU.
1014 * (it might still be on another CPU if the timer was pending)
Peter Zijlstraa6037b62009-01-05 11:28:22 +01001015 *
1016 * XXX send_remote_softirq() ?
Ingo Molnar935c6312007-03-28 13:17:18 +02001017 */
Leonid Shatzdd9c58a2013-02-04 14:33:37 +02001018 if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases)
1019 && hrtimer_enqueue_reprogram(timer, new_base)) {
1020 if (wakeup) {
1021 /*
1022 * We need to drop cpu_base->lock to avoid a
1023 * lock ordering issue vs. rq->lock.
1024 */
1025 raw_spin_unlock(&new_base->cpu_base->lock);
1026 raise_softirq_irqoff(HRTIMER_SOFTIRQ);
1027 local_irq_restore(flags);
1028 return ret;
1029 } else {
1030 __raise_softirq_irqoff(HRTIMER_SOFTIRQ);
1031 }
1032 }
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001033
1034 unlock_hrtimer_base(timer, &flags);
1035
1036 return ret;
1037}
Peter Zijlstra7f1e2ca2009-03-13 12:21:27 +01001038
1039/**
1040 * hrtimer_start_range_ns - (re)start an hrtimer on the current CPU
1041 * @timer: the timer to be added
1042 * @tim: expiry time
1043 * @delta_ns: "slack" range for the timer
1044 * @mode: expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL)
1045 *
1046 * Returns:
1047 * 0 on success
1048 * 1 when the timer was active
1049 */
1050int hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
1051 unsigned long delta_ns, const enum hrtimer_mode mode)
1052{
1053 return __hrtimer_start_range_ns(timer, tim, delta_ns, mode, 1);
1054}
Arjan van de Venda8f2e12008-09-07 10:47:46 -07001055EXPORT_SYMBOL_GPL(hrtimer_start_range_ns);
1056
1057/**
Thomas Gleixnere1dd7bc2008-10-20 13:33:36 +02001058 * hrtimer_start - (re)start an hrtimer on the current CPU
Arjan van de Venda8f2e12008-09-07 10:47:46 -07001059 * @timer: the timer to be added
1060 * @tim: expiry time
1061 * @mode: expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL)
1062 *
1063 * Returns:
1064 * 0 on success
1065 * 1 when the timer was active
1066 */
1067int
1068hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)
1069{
Peter Zijlstra7f1e2ca2009-03-13 12:21:27 +01001070 return __hrtimer_start_range_ns(timer, tim, 0, mode, 1);
Arjan van de Venda8f2e12008-09-07 10:47:46 -07001071}
Stephen Hemminger8d16b762006-05-30 21:26:09 -07001072EXPORT_SYMBOL_GPL(hrtimer_start);
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001073
Arjan van de Venda8f2e12008-09-07 10:47:46 -07001074
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001075/**
1076 * hrtimer_try_to_cancel - try to deactivate a timer
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001077 * @timer: hrtimer to stop
1078 *
1079 * Returns:
1080 * 0 when the timer was not active
1081 * 1 when the timer was active
1082 * -1 when the timer is currently excuting the callback function and
Randy Dunlapfa9799e2006-06-25 05:49:15 -07001083 * cannot be stopped
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001084 */
1085int hrtimer_try_to_cancel(struct hrtimer *timer)
1086{
Thomas Gleixner3c8aa392007-02-16 01:27:50 -08001087 struct hrtimer_clock_base *base;
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001088 unsigned long flags;
1089 int ret = -1;
1090
1091 base = lock_hrtimer_base(timer, &flags);
1092
Thomas Gleixner303e9672007-02-16 01:27:51 -08001093 if (!hrtimer_callback_running(timer))
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001094 ret = remove_hrtimer(timer, base);
1095
1096 unlock_hrtimer_base(timer, &flags);
1097
1098 return ret;
1099
1100}
Stephen Hemminger8d16b762006-05-30 21:26:09 -07001101EXPORT_SYMBOL_GPL(hrtimer_try_to_cancel);
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001102
1103/**
1104 * hrtimer_cancel - cancel a timer and wait for the handler to finish.
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001105 * @timer: the timer to be cancelled
1106 *
1107 * Returns:
1108 * 0 when the timer was not active
1109 * 1 when the timer was active
1110 */
1111int hrtimer_cancel(struct hrtimer *timer)
1112{
1113 for (;;) {
1114 int ret = hrtimer_try_to_cancel(timer);
1115
1116 if (ret >= 0)
1117 return ret;
Joe Korty5ef37b12006-04-10 22:54:13 -07001118 cpu_relax();
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001119 }
1120}
Stephen Hemminger8d16b762006-05-30 21:26:09 -07001121EXPORT_SYMBOL_GPL(hrtimer_cancel);
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001122
1123/**
1124 * hrtimer_get_remaining - get remaining time for the timer
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001125 * @timer: the timer to read
1126 */
1127ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
1128{
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001129 unsigned long flags;
1130 ktime_t rem;
1131
Andi Kleenb3bd3de2010-08-10 14:17:51 -07001132 lock_hrtimer_base(timer, &flags);
Arjan van de Vencc584b22008-09-01 15:02:30 -07001133 rem = hrtimer_expires_remaining(timer);
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001134 unlock_hrtimer_base(timer, &flags);
1135
1136 return rem;
1137}
Stephen Hemminger8d16b762006-05-30 21:26:09 -07001138EXPORT_SYMBOL_GPL(hrtimer_get_remaining);
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001139
Russell Kingee9c5782008-04-20 13:59:33 +01001140#ifdef CONFIG_NO_HZ
Tony Lindgren69239742006-03-06 15:42:45 -08001141/**
1142 * hrtimer_get_next_event - get the time until next expiry event
1143 *
1144 * Returns the delta to the next expiry event or KTIME_MAX if no timer
1145 * is pending.
1146 */
1147ktime_t hrtimer_get_next_event(void)
1148{
Thomas Gleixner3c8aa392007-02-16 01:27:50 -08001149 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
1150 struct hrtimer_clock_base *base = cpu_base->clock_base;
Tony Lindgren69239742006-03-06 15:42:45 -08001151 ktime_t delta, mindelta = { .tv64 = KTIME_MAX };
1152 unsigned long flags;
1153 int i;
1154
Thomas Gleixnerecb49d12009-11-17 16:36:54 +01001155 raw_spin_lock_irqsave(&cpu_base->lock, flags);
Thomas Gleixner3c8aa392007-02-16 01:27:50 -08001156
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -08001157 if (!hrtimer_hres_active()) {
1158 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) {
1159 struct hrtimer *timer;
John Stultz998adc32010-09-20 19:19:17 -07001160 struct timerqueue_node *next;
Tony Lindgren69239742006-03-06 15:42:45 -08001161
John Stultz998adc32010-09-20 19:19:17 -07001162 next = timerqueue_getnext(&base->active);
1163 if (!next)
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -08001164 continue;
Thomas Gleixner3c8aa392007-02-16 01:27:50 -08001165
John Stultz998adc32010-09-20 19:19:17 -07001166 timer = container_of(next, struct hrtimer, node);
Arjan van de Vencc584b22008-09-01 15:02:30 -07001167 delta.tv64 = hrtimer_get_expires_tv64(timer);
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -08001168 delta = ktime_sub(delta, base->get_time());
1169 if (delta.tv64 < mindelta.tv64)
1170 mindelta.tv64 = delta.tv64;
1171 }
Tony Lindgren69239742006-03-06 15:42:45 -08001172 }
Thomas Gleixner3c8aa392007-02-16 01:27:50 -08001173
Thomas Gleixnerecb49d12009-11-17 16:36:54 +01001174 raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
Thomas Gleixner3c8aa392007-02-16 01:27:50 -08001175
Tony Lindgren69239742006-03-06 15:42:45 -08001176 if (mindelta.tv64 < 0)
1177 mindelta.tv64 = 0;
1178 return mindelta;
1179}
1180#endif
1181
Thomas Gleixner237fc6e2008-04-30 00:55:04 -07001182static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
1183 enum hrtimer_mode mode)
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001184{
Thomas Gleixner3c8aa392007-02-16 01:27:50 -08001185 struct hrtimer_cpu_base *cpu_base;
John Stultze06383d2010-12-14 19:37:07 -08001186 int base;
George Anzinger79786722006-02-01 03:05:11 -08001187
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001188 memset(timer, 0, sizeof(struct hrtimer));
George Anzinger79786722006-02-01 03:05:11 -08001189
Thomas Gleixner3c8aa392007-02-16 01:27:50 -08001190 cpu_base = &__raw_get_cpu_var(hrtimer_bases);
George Anzinger79786722006-02-01 03:05:11 -08001191
Thomas Gleixnerc9cb2e32007-02-16 01:27:49 -08001192 if (clock_id == CLOCK_REALTIME && mode != HRTIMER_MODE_ABS)
George Anzinger79786722006-02-01 03:05:11 -08001193 clock_id = CLOCK_MONOTONIC;
1194
John Stultze06383d2010-12-14 19:37:07 -08001195 base = hrtimer_clockid_to_base(clock_id);
1196 timer->base = &cpu_base->clock_base[base];
John Stultz998adc32010-09-20 19:19:17 -07001197 timerqueue_init(&timer->node);
Ingo Molnar82f67cd2007-02-16 01:28:13 -08001198
1199#ifdef CONFIG_TIMER_STATS
1200 timer->start_site = NULL;
1201 timer->start_pid = -1;
1202 memset(timer->start_comm, 0, TASK_COMM_LEN);
1203#endif
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001204}
Thomas Gleixner237fc6e2008-04-30 00:55:04 -07001205
1206/**
1207 * hrtimer_init - initialize a timer to the given clock
1208 * @timer: the timer to be initialized
1209 * @clock_id: the clock to be used
1210 * @mode: timer mode abs/rel
1211 */
1212void hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
1213 enum hrtimer_mode mode)
1214{
Xiao Guangrongc6a2a172009-08-10 10:51:23 +08001215 debug_init(timer, clock_id, mode);
Thomas Gleixner237fc6e2008-04-30 00:55:04 -07001216 __hrtimer_init(timer, clock_id, mode);
1217}
Stephen Hemminger8d16b762006-05-30 21:26:09 -07001218EXPORT_SYMBOL_GPL(hrtimer_init);
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001219
1220/**
1221 * hrtimer_get_res - get the timer resolution for a clock
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001222 * @which_clock: which clock to query
1223 * @tp: pointer to timespec variable to store the resolution
1224 *
Robert P. J. Day72fd4a32007-02-10 01:45:59 -08001225 * Store the resolution of the clock selected by @which_clock in the
1226 * variable pointed to by @tp.
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001227 */
1228int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp)
1229{
Thomas Gleixner3c8aa392007-02-16 01:27:50 -08001230 struct hrtimer_cpu_base *cpu_base;
John Stultze06383d2010-12-14 19:37:07 -08001231 int base = hrtimer_clockid_to_base(which_clock);
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001232
Thomas Gleixner3c8aa392007-02-16 01:27:50 -08001233 cpu_base = &__raw_get_cpu_var(hrtimer_bases);
John Stultze06383d2010-12-14 19:37:07 -08001234 *tp = ktime_to_timespec(cpu_base->clock_base[base].resolution);
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001235
1236 return 0;
1237}
Stephen Hemminger8d16b762006-05-30 21:26:09 -07001238EXPORT_SYMBOL_GPL(hrtimer_get_res);
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001239
Xiao Guangrongc6a2a172009-08-10 10:51:23 +08001240static void __run_hrtimer(struct hrtimer *timer, ktime_t *now)
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001241{
Peter Zijlstrad3d74452008-01-25 21:08:31 +01001242 struct hrtimer_clock_base *base = timer->base;
1243 struct hrtimer_cpu_base *cpu_base = base->cpu_base;
1244 enum hrtimer_restart (*fn)(struct hrtimer *);
1245 int restart;
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001246
Peter Zijlstraca109492008-11-25 12:43:51 +01001247 WARN_ON(!irqs_disabled());
1248
Xiao Guangrongc6a2a172009-08-10 10:51:23 +08001249 debug_deactivate(timer);
Peter Zijlstrad3d74452008-01-25 21:08:31 +01001250 __remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0);
1251 timer_stats_account_hrtimer(timer);
Peter Zijlstrad3d74452008-01-25 21:08:31 +01001252 fn = timer->function;
Peter Zijlstraca109492008-11-25 12:43:51 +01001253
1254 /*
1255 * Because we run timers from hardirq context, there is no chance
1256 * they get migrated to another cpu, therefore its safe to unlock
1257 * the timer base.
1258 */
Thomas Gleixnerecb49d12009-11-17 16:36:54 +01001259 raw_spin_unlock(&cpu_base->lock);
Xiao Guangrongc6a2a172009-08-10 10:51:23 +08001260 trace_hrtimer_expire_entry(timer, now);
Peter Zijlstraca109492008-11-25 12:43:51 +01001261 restart = fn(timer);
Xiao Guangrongc6a2a172009-08-10 10:51:23 +08001262 trace_hrtimer_expire_exit(timer);
Thomas Gleixnerecb49d12009-11-17 16:36:54 +01001263 raw_spin_lock(&cpu_base->lock);
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001264
Peter Zijlstrad3d74452008-01-25 21:08:31 +01001265 /*
Thomas Gleixnere3f1d882009-01-05 11:28:23 +01001266 * Note: We clear the CALLBACK bit after enqueue_hrtimer and
1267 * we do not reprogramm the event hardware. Happens either in
1268 * hrtimer_start_range_ns() or in hrtimer_interrupt()
Peter Zijlstrad3d74452008-01-25 21:08:31 +01001269 */
1270 if (restart != HRTIMER_NORESTART) {
1271 BUG_ON(timer->state != HRTIMER_STATE_CALLBACK);
Peter Zijlstraa6037b62009-01-05 11:28:22 +01001272 enqueue_hrtimer(timer, base);
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001273 }
Salman Qazif13d4f92010-10-12 07:25:19 -07001274
1275 WARN_ON_ONCE(!(timer->state & HRTIMER_STATE_CALLBACK));
1276
Peter Zijlstrad3d74452008-01-25 21:08:31 +01001277 timer->state &= ~HRTIMER_STATE_CALLBACK;
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001278}
1279
Peter Zijlstrad3d74452008-01-25 21:08:31 +01001280#ifdef CONFIG_HIGH_RES_TIMERS
1281
1282/*
1283 * High resolution timer interrupt
1284 * Called with interrupts disabled
1285 */
1286void hrtimer_interrupt(struct clock_event_device *dev)
1287{
1288 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
Thomas Gleixner41d2e492009-11-13 17:05:44 +01001289 ktime_t expires_next, now, entry_time, delta;
1290 int i, retries = 0;
Peter Zijlstrad3d74452008-01-25 21:08:31 +01001291
1292 BUG_ON(!cpu_base->hres_active);
1293 cpu_base->nr_events++;
1294 dev->next_event.tv64 = KTIME_MAX;
1295
Thomas Gleixnerdd3cded2012-07-17 02:39:53 -04001296 raw_spin_lock(&cpu_base->lock);
John Stultz6321a0a2012-07-17 02:39:55 -04001297 entry_time = now = hrtimer_update_base(cpu_base);
Thomas Gleixner41d2e492009-11-13 17:05:44 +01001298retry:
Peter Zijlstrad3d74452008-01-25 21:08:31 +01001299 expires_next.tv64 = KTIME_MAX;
Thomas Gleixner6ff70412009-07-10 14:57:05 +02001300 /*
1301 * We set expires_next to KTIME_MAX here with cpu_base->lock
1302 * held to prevent that a timer is enqueued in our queue via
1303 * the migration code. This does not affect enqueueing of
1304 * timers which run their callback and need to be requeued on
1305 * this CPU.
1306 */
1307 cpu_base->expires_next.tv64 = KTIME_MAX;
1308
Peter Zijlstrad3d74452008-01-25 21:08:31 +01001309 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
Thomas Gleixnerab8177b2011-05-20 13:05:15 +02001310 struct hrtimer_clock_base *base;
John Stultz998adc32010-09-20 19:19:17 -07001311 struct timerqueue_node *node;
Thomas Gleixnerab8177b2011-05-20 13:05:15 +02001312 ktime_t basenow;
Peter Zijlstrad3d74452008-01-25 21:08:31 +01001313
Thomas Gleixnerab8177b2011-05-20 13:05:15 +02001314 if (!(cpu_base->active_bases & (1 << i)))
1315 continue;
1316
1317 base = cpu_base->clock_base + i;
Peter Zijlstrad3d74452008-01-25 21:08:31 +01001318 basenow = ktime_add(now, base->offset);
1319
John Stultz998adc32010-09-20 19:19:17 -07001320 while ((node = timerqueue_getnext(&base->active))) {
Peter Zijlstrad3d74452008-01-25 21:08:31 +01001321 struct hrtimer *timer;
1322
John Stultz998adc32010-09-20 19:19:17 -07001323 timer = container_of(node, struct hrtimer, node);
Peter Zijlstrad3d74452008-01-25 21:08:31 +01001324
Arjan van de Ven654c8e02008-09-01 15:47:08 -07001325 /*
1326 * The immediate goal for using the softexpires is
1327 * minimizing wakeups, not running timers at the
1328 * earliest interrupt after their soft expiration.
1329 * This allows us to avoid using a Priority Search
1330 * Tree, which can answer a stabbing querry for
1331 * overlapping intervals and instead use the simple
1332 * BST we already have.
1333 * We don't add extra wakeups by delaying timers that
1334 * are right-of a not yet expired timer, because that
1335 * timer will have to trigger a wakeup anyway.
1336 */
1337
1338 if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer)) {
Peter Zijlstrad3d74452008-01-25 21:08:31 +01001339 ktime_t expires;
1340
Arjan van de Vencc584b22008-09-01 15:02:30 -07001341 expires = ktime_sub(hrtimer_get_expires(timer),
Peter Zijlstrad3d74452008-01-25 21:08:31 +01001342 base->offset);
Prarit Bhargava194d30b2013-04-08 08:47:15 -04001343 if (expires.tv64 < 0)
1344 expires.tv64 = KTIME_MAX;
Peter Zijlstrad3d74452008-01-25 21:08:31 +01001345 if (expires.tv64 < expires_next.tv64)
1346 expires_next = expires;
1347 break;
1348 }
1349
Xiao Guangrongc6a2a172009-08-10 10:51:23 +08001350 __run_hrtimer(timer, &basenow);
Peter Zijlstrad3d74452008-01-25 21:08:31 +01001351 }
Peter Zijlstrad3d74452008-01-25 21:08:31 +01001352 }
1353
Thomas Gleixner6ff70412009-07-10 14:57:05 +02001354 /*
1355 * Store the new expiry value so the migration code can verify
1356 * against it.
1357 */
Peter Zijlstrad3d74452008-01-25 21:08:31 +01001358 cpu_base->expires_next = expires_next;
Thomas Gleixnerecb49d12009-11-17 16:36:54 +01001359 raw_spin_unlock(&cpu_base->lock);
Peter Zijlstrad3d74452008-01-25 21:08:31 +01001360
1361 /* Reprogramming necessary ? */
Thomas Gleixner41d2e492009-11-13 17:05:44 +01001362 if (expires_next.tv64 == KTIME_MAX ||
1363 !tick_program_event(expires_next, 0)) {
1364 cpu_base->hang_detected = 0;
1365 return;
Peter Zijlstrad3d74452008-01-25 21:08:31 +01001366 }
Thomas Gleixner41d2e492009-11-13 17:05:44 +01001367
1368 /*
1369 * The next timer was already expired due to:
1370 * - tracing
1371 * - long lasting callbacks
1372 * - being scheduled away when running in a VM
1373 *
1374 * We need to prevent that we loop forever in the hrtimer
1375 * interrupt routine. We give it 3 attempts to avoid
1376 * overreacting on some spurious event.
John Stultz6321a0a2012-07-17 02:39:55 -04001377 *
1378 * Acquire base lock for updating the offsets and retrieving
1379 * the current time.
Thomas Gleixner41d2e492009-11-13 17:05:44 +01001380 */
Thomas Gleixnerdd3cded2012-07-17 02:39:53 -04001381 raw_spin_lock(&cpu_base->lock);
John Stultz6321a0a2012-07-17 02:39:55 -04001382 now = hrtimer_update_base(cpu_base);
Thomas Gleixner41d2e492009-11-13 17:05:44 +01001383 cpu_base->nr_retries++;
1384 if (++retries < 3)
1385 goto retry;
1386 /*
1387 * Give the system a chance to do something else than looping
1388 * here. We stored the entry time, so we know exactly how long
1389 * we spent here. We schedule the next event this amount of
1390 * time away.
1391 */
1392 cpu_base->nr_hangs++;
1393 cpu_base->hang_detected = 1;
Thomas Gleixnerdd3cded2012-07-17 02:39:53 -04001394 raw_spin_unlock(&cpu_base->lock);
Thomas Gleixner41d2e492009-11-13 17:05:44 +01001395 delta = ktime_sub(now, entry_time);
1396 if (delta.tv64 > cpu_base->max_hang_time.tv64)
1397 cpu_base->max_hang_time = delta;
1398 /*
1399 * Limit it to a sensible value as we enforce a longer
1400 * delay. Give the CPU at least 100ms to catch up.
1401 */
1402 if (delta.tv64 > 100 * NSEC_PER_MSEC)
1403 expires_next = ktime_add_ns(now, 100 * NSEC_PER_MSEC);
1404 else
1405 expires_next = ktime_add(now, delta);
1406 tick_program_event(expires_next, 1);
1407 printk_once(KERN_WARNING "hrtimer: interrupt took %llu ns\n",
1408 ktime_to_ns(delta));
Peter Zijlstrad3d74452008-01-25 21:08:31 +01001409}
1410
Thomas Gleixner8bdec952009-01-05 11:28:19 +01001411/*
1412 * local version of hrtimer_peek_ahead_timers() called with interrupts
1413 * disabled.
1414 */
1415static void __hrtimer_peek_ahead_timers(void)
1416{
1417 struct tick_device *td;
1418
1419 if (!hrtimer_hres_active())
1420 return;
1421
1422 td = &__get_cpu_var(tick_cpu_device);
1423 if (td && td->evtdev)
1424 hrtimer_interrupt(td->evtdev);
1425}
1426
Arjan van de Ven2e94d1f2008-09-10 16:06:00 -07001427/**
1428 * hrtimer_peek_ahead_timers -- run soft-expired timers now
1429 *
1430 * hrtimer_peek_ahead_timers will peek at the timer queue of
1431 * the current cpu and check if there are any timers for which
1432 * the soft expires time has passed. If any such timers exist,
1433 * they are run immediately and then removed from the timer queue.
1434 *
1435 */
1436void hrtimer_peek_ahead_timers(void)
1437{
Thomas Gleixner643bdf62008-10-20 13:38:11 +02001438 unsigned long flags;
Arjan van de Vendc4304f2008-10-13 10:32:15 -04001439
Arjan van de Ven2e94d1f2008-09-10 16:06:00 -07001440 local_irq_save(flags);
Thomas Gleixner8bdec952009-01-05 11:28:19 +01001441 __hrtimer_peek_ahead_timers();
Arjan van de Ven2e94d1f2008-09-10 16:06:00 -07001442 local_irq_restore(flags);
1443}
1444
Peter Zijlstraa6037b62009-01-05 11:28:22 +01001445static void run_hrtimer_softirq(struct softirq_action *h)
1446{
1447 hrtimer_peek_ahead_timers();
1448}
1449
Ingo Molnar82c5b7b2009-01-05 14:11:10 +01001450#else /* CONFIG_HIGH_RES_TIMERS */
1451
1452static inline void __hrtimer_peek_ahead_timers(void) { }
1453
1454#endif /* !CONFIG_HIGH_RES_TIMERS */
Peter Zijlstrad3d74452008-01-25 21:08:31 +01001455
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001456/*
1457 * Called from timer softirq every jiffy, expire hrtimers:
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -08001458 *
1459 * For HRT its the fall back code to run the softirq in the timer
1460 * softirq context in case the hrtimer initialization failed or has
1461 * not been done yet.
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001462 */
Peter Zijlstrad3d74452008-01-25 21:08:31 +01001463void hrtimer_run_pending(void)
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001464{
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -08001465 if (hrtimer_hres_active())
1466 return;
1467
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -08001468 /*
1469 * This _is_ ugly: We have to check in the softirq context,
1470 * whether we can switch to highres and / or nohz mode. The
1471 * clocksource switch happens in the timer interrupt with
1472 * xtime_lock held. Notification from there only sets the
1473 * check bit in the tick_oneshot code, otherwise we might
1474 * deadlock vs. xtime_lock.
1475 */
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -08001476 if (tick_check_oneshot_change(!hrtimer_is_hres_enabled()))
Peter Zijlstrad3d74452008-01-25 21:08:31 +01001477 hrtimer_switch_to_hres();
Peter Zijlstrad3d74452008-01-25 21:08:31 +01001478}
1479
1480/*
1481 * Called from hardirq context every jiffy
1482 */
Peter Zijlstrad3d74452008-01-25 21:08:31 +01001483void hrtimer_run_queues(void)
1484{
John Stultz998adc32010-09-20 19:19:17 -07001485 struct timerqueue_node *node;
Peter Zijlstrad3d74452008-01-25 21:08:31 +01001486 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
Dimitri Sivanich833883d2008-04-18 13:39:00 -07001487 struct hrtimer_clock_base *base;
1488 int index, gettime = 1;
Peter Zijlstrad3d74452008-01-25 21:08:31 +01001489
1490 if (hrtimer_hres_active())
1491 return;
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -08001492
Dimitri Sivanich833883d2008-04-18 13:39:00 -07001493 for (index = 0; index < HRTIMER_MAX_CLOCK_BASES; index++) {
1494 base = &cpu_base->clock_base[index];
John Stultzb007c382010-12-10 22:19:53 -08001495 if (!timerqueue_getnext(&base->active))
Dimitri Sivanich833883d2008-04-18 13:39:00 -07001496 continue;
1497
Mark McLoughlind7cfb602008-09-19 13:13:44 +01001498 if (gettime) {
Dimitri Sivanich833883d2008-04-18 13:39:00 -07001499 hrtimer_get_softirq_time(cpu_base);
1500 gettime = 0;
1501 }
1502
Thomas Gleixnerecb49d12009-11-17 16:36:54 +01001503 raw_spin_lock(&cpu_base->lock);
Dimitri Sivanich833883d2008-04-18 13:39:00 -07001504
John Stultzb007c382010-12-10 22:19:53 -08001505 while ((node = timerqueue_getnext(&base->active))) {
Dimitri Sivanich833883d2008-04-18 13:39:00 -07001506 struct hrtimer *timer;
1507
John Stultz998adc32010-09-20 19:19:17 -07001508 timer = container_of(node, struct hrtimer, node);
Arjan van de Vencc584b22008-09-01 15:02:30 -07001509 if (base->softirq_time.tv64 <=
1510 hrtimer_get_expires_tv64(timer))
Dimitri Sivanich833883d2008-04-18 13:39:00 -07001511 break;
1512
Xiao Guangrongc6a2a172009-08-10 10:51:23 +08001513 __run_hrtimer(timer, &base->softirq_time);
Dimitri Sivanich833883d2008-04-18 13:39:00 -07001514 }
Thomas Gleixnerecb49d12009-11-17 16:36:54 +01001515 raw_spin_unlock(&cpu_base->lock);
Dimitri Sivanich833883d2008-04-18 13:39:00 -07001516 }
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001517}
1518
1519/*
Thomas Gleixner10c94ec2006-01-09 20:52:35 -08001520 * Sleep related functions:
1521 */
Thomas Gleixnerc9cb2e32007-02-16 01:27:49 -08001522static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer)
Thomas Gleixner00362e32006-03-31 02:31:17 -08001523{
1524 struct hrtimer_sleeper *t =
1525 container_of(timer, struct hrtimer_sleeper, timer);
1526 struct task_struct *task = t->task;
1527
1528 t->task = NULL;
1529 if (task)
1530 wake_up_process(task);
1531
1532 return HRTIMER_NORESTART;
1533}
1534
Ingo Molnar36c8b582006-07-03 00:25:41 -07001535void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task)
Thomas Gleixner00362e32006-03-31 02:31:17 -08001536{
1537 sl->timer.function = hrtimer_wakeup;
1538 sl->task = task;
1539}
Stephen Hemminger2bc481c2009-08-28 23:41:29 -07001540EXPORT_SYMBOL_GPL(hrtimer_init_sleeper);
Thomas Gleixner00362e32006-03-31 02:31:17 -08001541
Thomas Gleixner669d7862006-03-31 02:31:19 -08001542static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode)
Thomas Gleixner10c94ec2006-01-09 20:52:35 -08001543{
Thomas Gleixner669d7862006-03-31 02:31:19 -08001544 hrtimer_init_sleeper(t, current);
Thomas Gleixner10c94ec2006-01-09 20:52:35 -08001545
Roman Zippel432569b2006-03-26 01:38:08 -08001546 do {
1547 set_current_state(TASK_INTERRUPTIBLE);
Arjan van de Vencc584b22008-09-01 15:02:30 -07001548 hrtimer_start_expires(&t->timer, mode);
Peter Zijlstra37bb6cb2008-01-25 21:08:32 +01001549 if (!hrtimer_active(&t->timer))
1550 t->task = NULL;
Roman Zippel432569b2006-03-26 01:38:08 -08001551
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -08001552 if (likely(t->task))
Colin Cross437993e2013-05-06 23:50:19 +00001553 freezable_schedule();
Roman Zippel432569b2006-03-26 01:38:08 -08001554
Thomas Gleixner669d7862006-03-31 02:31:19 -08001555 hrtimer_cancel(&t->timer);
Thomas Gleixnerc9cb2e32007-02-16 01:27:49 -08001556 mode = HRTIMER_MODE_ABS;
Roman Zippel432569b2006-03-26 01:38:08 -08001557
Thomas Gleixner669d7862006-03-31 02:31:19 -08001558 } while (t->task && !signal_pending(current));
1559
Peter Zijlstra3588a082008-02-01 17:45:13 +01001560 __set_current_state(TASK_RUNNING);
1561
Thomas Gleixner669d7862006-03-31 02:31:19 -08001562 return t->task == NULL;
Thomas Gleixner10c94ec2006-01-09 20:52:35 -08001563}
1564
Oleg Nesterov080344b2008-02-01 17:29:05 +03001565static int update_rmtp(struct hrtimer *timer, struct timespec __user *rmtp)
1566{
1567 struct timespec rmt;
1568 ktime_t rem;
1569
Arjan van de Vencc584b22008-09-01 15:02:30 -07001570 rem = hrtimer_expires_remaining(timer);
Oleg Nesterov080344b2008-02-01 17:29:05 +03001571 if (rem.tv64 <= 0)
1572 return 0;
1573 rmt = ktime_to_timespec(rem);
1574
1575 if (copy_to_user(rmtp, &rmt, sizeof(*rmtp)))
1576 return -EFAULT;
1577
1578 return 1;
1579}
1580
Toyo Abe1711ef32006-09-29 02:00:28 -07001581long __sched hrtimer_nanosleep_restart(struct restart_block *restart)
Thomas Gleixner10c94ec2006-01-09 20:52:35 -08001582{
Thomas Gleixner669d7862006-03-31 02:31:19 -08001583 struct hrtimer_sleeper t;
Oleg Nesterov080344b2008-02-01 17:29:05 +03001584 struct timespec __user *rmtp;
Thomas Gleixner237fc6e2008-04-30 00:55:04 -07001585 int ret = 0;
Thomas Gleixner10c94ec2006-01-09 20:52:35 -08001586
Thomas Gleixnerab8177b2011-05-20 13:05:15 +02001587 hrtimer_init_on_stack(&t.timer, restart->nanosleep.clockid,
Thomas Gleixner237fc6e2008-04-30 00:55:04 -07001588 HRTIMER_MODE_ABS);
Arjan van de Vencc584b22008-09-01 15:02:30 -07001589 hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires);
Thomas Gleixner10c94ec2006-01-09 20:52:35 -08001590
Thomas Gleixnerc9cb2e32007-02-16 01:27:49 -08001591 if (do_nanosleep(&t, HRTIMER_MODE_ABS))
Thomas Gleixner237fc6e2008-04-30 00:55:04 -07001592 goto out;
Thomas Gleixner10c94ec2006-01-09 20:52:35 -08001593
Thomas Gleixner029a07e2008-02-10 09:17:43 +01001594 rmtp = restart->nanosleep.rmtp;
Roman Zippel432569b2006-03-26 01:38:08 -08001595 if (rmtp) {
Thomas Gleixner237fc6e2008-04-30 00:55:04 -07001596 ret = update_rmtp(&t.timer, rmtp);
Oleg Nesterov080344b2008-02-01 17:29:05 +03001597 if (ret <= 0)
Thomas Gleixner237fc6e2008-04-30 00:55:04 -07001598 goto out;
Roman Zippel432569b2006-03-26 01:38:08 -08001599 }
Thomas Gleixner10c94ec2006-01-09 20:52:35 -08001600
Thomas Gleixner10c94ec2006-01-09 20:52:35 -08001601 /* The other values in restart are already filled in */
Thomas Gleixner237fc6e2008-04-30 00:55:04 -07001602 ret = -ERESTART_RESTARTBLOCK;
1603out:
1604 destroy_hrtimer_on_stack(&t.timer);
1605 return ret;
Thomas Gleixner10c94ec2006-01-09 20:52:35 -08001606}
1607
Oleg Nesterov080344b2008-02-01 17:29:05 +03001608long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
Thomas Gleixner10c94ec2006-01-09 20:52:35 -08001609 const enum hrtimer_mode mode, const clockid_t clockid)
1610{
1611 struct restart_block *restart;
Thomas Gleixner669d7862006-03-31 02:31:19 -08001612 struct hrtimer_sleeper t;
Thomas Gleixner237fc6e2008-04-30 00:55:04 -07001613 int ret = 0;
Arjan van de Ven3bd01202008-09-08 08:58:59 -07001614 unsigned long slack;
1615
1616 slack = current->timer_slack_ns;
1617 if (rt_task(current))
1618 slack = 0;
Thomas Gleixner10c94ec2006-01-09 20:52:35 -08001619
Thomas Gleixner237fc6e2008-04-30 00:55:04 -07001620 hrtimer_init_on_stack(&t.timer, clockid, mode);
Arjan van de Ven3bd01202008-09-08 08:58:59 -07001621 hrtimer_set_expires_range_ns(&t.timer, timespec_to_ktime(*rqtp), slack);
Roman Zippel432569b2006-03-26 01:38:08 -08001622 if (do_nanosleep(&t, mode))
Thomas Gleixner237fc6e2008-04-30 00:55:04 -07001623 goto out;
Thomas Gleixner10c94ec2006-01-09 20:52:35 -08001624
George Anzinger79786722006-02-01 03:05:11 -08001625 /* Absolute timers do not update the rmtp value and restart: */
Thomas Gleixner237fc6e2008-04-30 00:55:04 -07001626 if (mode == HRTIMER_MODE_ABS) {
1627 ret = -ERESTARTNOHAND;
1628 goto out;
1629 }
Thomas Gleixner10c94ec2006-01-09 20:52:35 -08001630
Roman Zippel432569b2006-03-26 01:38:08 -08001631 if (rmtp) {
Thomas Gleixner237fc6e2008-04-30 00:55:04 -07001632 ret = update_rmtp(&t.timer, rmtp);
Oleg Nesterov080344b2008-02-01 17:29:05 +03001633 if (ret <= 0)
Thomas Gleixner237fc6e2008-04-30 00:55:04 -07001634 goto out;
Roman Zippel432569b2006-03-26 01:38:08 -08001635 }
Thomas Gleixner10c94ec2006-01-09 20:52:35 -08001636
1637 restart = &current_thread_info()->restart_block;
Toyo Abe1711ef32006-09-29 02:00:28 -07001638 restart->fn = hrtimer_nanosleep_restart;
Thomas Gleixnerab8177b2011-05-20 13:05:15 +02001639 restart->nanosleep.clockid = t.timer.base->clockid;
Thomas Gleixner029a07e2008-02-10 09:17:43 +01001640 restart->nanosleep.rmtp = rmtp;
Arjan van de Vencc584b22008-09-01 15:02:30 -07001641 restart->nanosleep.expires = hrtimer_get_expires_tv64(&t.timer);
Thomas Gleixner10c94ec2006-01-09 20:52:35 -08001642
Thomas Gleixner237fc6e2008-04-30 00:55:04 -07001643 ret = -ERESTART_RESTARTBLOCK;
1644out:
1645 destroy_hrtimer_on_stack(&t.timer);
1646 return ret;
Thomas Gleixner10c94ec2006-01-09 20:52:35 -08001647}
1648
Heiko Carstens58fd3aa2009-01-14 14:14:03 +01001649SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp,
1650 struct timespec __user *, rmtp)
Thomas Gleixner6ba1b912006-01-09 20:52:36 -08001651{
Oleg Nesterov080344b2008-02-01 17:29:05 +03001652 struct timespec tu;
Thomas Gleixner6ba1b912006-01-09 20:52:36 -08001653
1654 if (copy_from_user(&tu, rqtp, sizeof(tu)))
1655 return -EFAULT;
1656
1657 if (!timespec_valid(&tu))
1658 return -EINVAL;
1659
Oleg Nesterov080344b2008-02-01 17:29:05 +03001660 return hrtimer_nanosleep(&tu, rmtp, HRTIMER_MODE_REL, CLOCK_MONOTONIC);
Thomas Gleixner6ba1b912006-01-09 20:52:36 -08001661}
1662
Thomas Gleixner10c94ec2006-01-09 20:52:35 -08001663/*
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001664 * Functions related to boot-time initialization:
1665 */
Randy Dunlap0ec160d2008-01-21 17:18:24 -08001666static void __cpuinit init_hrtimers_cpu(int cpu)
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001667{
Thomas Gleixner3c8aa392007-02-16 01:27:50 -08001668 struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001669 int i;
1670
John Stultz998adc32010-09-20 19:19:17 -07001671 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
Thomas Gleixner3c8aa392007-02-16 01:27:50 -08001672 cpu_base->clock_base[i].cpu_base = cpu_base;
John Stultz998adc32010-09-20 19:19:17 -07001673 timerqueue_init_head(&cpu_base->clock_base[i].active);
1674 }
Thomas Gleixner3c8aa392007-02-16 01:27:50 -08001675
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -08001676 hrtimer_init_hres(cpu_base);
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001677}
1678
1679#ifdef CONFIG_HOTPLUG_CPU
1680
Peter Zijlstraca109492008-11-25 12:43:51 +01001681static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
Peter Zijlstra37810652008-12-04 11:17:10 +01001682 struct hrtimer_clock_base *new_base)
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001683{
1684 struct hrtimer *timer;
John Stultz998adc32010-09-20 19:19:17 -07001685 struct timerqueue_node *node;
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001686
John Stultz998adc32010-09-20 19:19:17 -07001687 while ((node = timerqueue_getnext(&old_base->active))) {
1688 timer = container_of(node, struct hrtimer, node);
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -08001689 BUG_ON(hrtimer_callback_running(timer));
Xiao Guangrongc6a2a172009-08-10 10:51:23 +08001690 debug_deactivate(timer);
Thomas Gleixnerb00c1a92008-09-29 15:44:46 +02001691
1692 /*
1693 * Mark it as STATE_MIGRATE not INACTIVE otherwise the
1694 * timer could be seen as !active and just vanish away
1695 * under us on another CPU
1696 */
1697 __remove_hrtimer(timer, old_base, HRTIMER_STATE_MIGRATE, 0);
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001698 timer->base = new_base;
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -08001699 /*
Thomas Gleixnere3f1d882009-01-05 11:28:23 +01001700 * Enqueue the timers on the new cpu. This does not
1701 * reprogram the event device in case the timer
1702 * expires before the earliest on this CPU, but we run
1703 * hrtimer_interrupt after we migrated everything to
1704 * sort out already expired timers and reprogram the
1705 * event device.
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -08001706 */
Peter Zijlstraa6037b62009-01-05 11:28:22 +01001707 enqueue_hrtimer(timer, new_base);
Thomas Gleixner41e10222008-09-29 14:09:39 +02001708
Thomas Gleixnerb00c1a92008-09-29 15:44:46 +02001709 /* Clear the migration state bit */
1710 timer->state &= ~HRTIMER_STATE_MIGRATE;
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001711 }
1712}
1713
Thomas Gleixnerd5fd43c2009-01-05 11:28:20 +01001714static void migrate_hrtimers(int scpu)
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001715{
Thomas Gleixner3c8aa392007-02-16 01:27:50 -08001716 struct hrtimer_cpu_base *old_base, *new_base;
Thomas Gleixner731a55b2009-01-05 11:28:21 +01001717 int i;
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001718
Peter Zijlstra37810652008-12-04 11:17:10 +01001719 BUG_ON(cpu_online(scpu));
Peter Zijlstra37810652008-12-04 11:17:10 +01001720 tick_cancel_sched_timer(scpu);
Thomas Gleixner731a55b2009-01-05 11:28:21 +01001721
1722 local_irq_disable();
1723 old_base = &per_cpu(hrtimer_bases, scpu);
1724 new_base = &__get_cpu_var(hrtimer_bases);
Oleg Nesterovd82f0b02008-08-20 16:46:04 -07001725 /*
1726 * The caller is globally serialized and nobody else
1727 * takes two locks at once, deadlock is not possible.
1728 */
Thomas Gleixnerecb49d12009-11-17 16:36:54 +01001729 raw_spin_lock(&new_base->lock);
1730 raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001731
Thomas Gleixner3c8aa392007-02-16 01:27:50 -08001732 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
Peter Zijlstraca109492008-11-25 12:43:51 +01001733 migrate_hrtimer_list(&old_base->clock_base[i],
Peter Zijlstra37810652008-12-04 11:17:10 +01001734 &new_base->clock_base[i]);
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001735 }
1736
Thomas Gleixnerecb49d12009-11-17 16:36:54 +01001737 raw_spin_unlock(&old_base->lock);
1738 raw_spin_unlock(&new_base->lock);
Peter Zijlstra37810652008-12-04 11:17:10 +01001739
Thomas Gleixner731a55b2009-01-05 11:28:21 +01001740 /* Check, if we got expired work to do */
1741 __hrtimer_peek_ahead_timers();
1742 local_irq_enable();
Peter Zijlstra37810652008-12-04 11:17:10 +01001743}
1744
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001745#endif /* CONFIG_HOTPLUG_CPU */
1746
Chandra Seetharaman8c78f302006-07-30 03:03:35 -07001747static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self,
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001748 unsigned long action, void *hcpu)
1749{
Ingo Molnarb2e3c0a2008-12-19 00:48:27 +01001750 int scpu = (long)hcpu;
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001751
1752 switch (action) {
1753
1754 case CPU_UP_PREPARE:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07001755 case CPU_UP_PREPARE_FROZEN:
Peter Zijlstra37810652008-12-04 11:17:10 +01001756 init_hrtimers_cpu(scpu);
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001757 break;
1758
1759#ifdef CONFIG_HOTPLUG_CPU
Sebastien Dugue94df7de2008-12-01 14:09:07 +01001760 case CPU_DYING:
1761 case CPU_DYING_FROZEN:
1762 clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DYING, &scpu);
1763 break;
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001764 case CPU_DEAD:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07001765 case CPU_DEAD_FROZEN:
Ingo Molnarb2e3c0a2008-12-19 00:48:27 +01001766 {
Peter Zijlstra37810652008-12-04 11:17:10 +01001767 clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DEAD, &scpu);
Thomas Gleixnerd5fd43c2009-01-05 11:28:20 +01001768 migrate_hrtimers(scpu);
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001769 break;
Ingo Molnarb2e3c0a2008-12-19 00:48:27 +01001770 }
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001771#endif
1772
1773 default:
1774 break;
1775 }
1776
1777 return NOTIFY_OK;
1778}
1779
Chandra Seetharaman8c78f302006-07-30 03:03:35 -07001780static struct notifier_block __cpuinitdata hrtimers_nb = {
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001781 .notifier_call = hrtimer_cpu_notify,
1782};
1783
1784void __init hrtimers_init(void)
1785{
1786 hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE,
1787 (void *)(long)smp_processor_id());
1788 register_cpu_notifier(&hrtimers_nb);
Peter Zijlstraa6037b62009-01-05 11:28:22 +01001789#ifdef CONFIG_HIGH_RES_TIMERS
1790 open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq);
1791#endif
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001792}
1793
Arjan van de Ven7bb67432008-08-31 08:05:58 -07001794/**
Carsten Emde351b3f72010-04-02 22:40:19 +02001795 * schedule_hrtimeout_range_clock - sleep until timeout
1796 * @expires: timeout value (ktime_t)
1797 * @delta: slack in expires timeout (ktime_t)
1798 * @mode: timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL
1799 * @clock: timer clock, CLOCK_MONOTONIC or CLOCK_REALTIME
1800 */
1801int __sched
1802schedule_hrtimeout_range_clock(ktime_t *expires, unsigned long delta,
1803 const enum hrtimer_mode mode, int clock)
1804{
1805 struct hrtimer_sleeper t;
1806
1807 /*
1808 * Optimize when a zero timeout value is given. It does not
1809 * matter whether this is an absolute or a relative time.
1810 */
1811 if (expires && !expires->tv64) {
1812 __set_current_state(TASK_RUNNING);
1813 return 0;
1814 }
1815
1816 /*
Namhyung Kim43b21012010-12-22 19:01:47 +01001817 * A NULL parameter means "infinite"
Carsten Emde351b3f72010-04-02 22:40:19 +02001818 */
1819 if (!expires) {
1820 schedule();
1821 __set_current_state(TASK_RUNNING);
1822 return -EINTR;
1823 }
1824
1825 hrtimer_init_on_stack(&t.timer, clock, mode);
1826 hrtimer_set_expires_range_ns(&t.timer, *expires, delta);
1827
1828 hrtimer_init_sleeper(&t, current);
1829
1830 hrtimer_start_expires(&t.timer, mode);
1831 if (!hrtimer_active(&t.timer))
1832 t.task = NULL;
1833
1834 if (likely(t.task))
1835 schedule();
1836
1837 hrtimer_cancel(&t.timer);
1838 destroy_hrtimer_on_stack(&t.timer);
1839
1840 __set_current_state(TASK_RUNNING);
1841
1842 return !t.task ? 0 : -EINTR;
1843}
1844
1845/**
Arjan van de Ven654c8e02008-09-01 15:47:08 -07001846 * schedule_hrtimeout_range - sleep until timeout
1847 * @expires: timeout value (ktime_t)
1848 * @delta: slack in expires timeout (ktime_t)
1849 * @mode: timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL
1850 *
1851 * Make the current task sleep until the given expiry time has
1852 * elapsed. The routine will return immediately unless
1853 * the current task state has been set (see set_current_state()).
1854 *
1855 * The @delta argument gives the kernel the freedom to schedule the
1856 * actual wakeup to a time that is both power and performance friendly.
1857 * The kernel give the normal best effort behavior for "@expires+@delta",
1858 * but may decide to fire the timer earlier, but no earlier than @expires.
1859 *
1860 * You can set the task state as follows -
1861 *
1862 * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to
1863 * pass before the routine returns.
1864 *
1865 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
1866 * delivered to the current task.
1867 *
1868 * The current task state is guaranteed to be TASK_RUNNING when this
1869 * routine returns.
1870 *
1871 * Returns 0 when the timer has expired otherwise -EINTR
1872 */
1873int __sched schedule_hrtimeout_range(ktime_t *expires, unsigned long delta,
Carsten Emde351b3f72010-04-02 22:40:19 +02001874 const enum hrtimer_mode mode)
Arjan van de Ven654c8e02008-09-01 15:47:08 -07001875{
Carsten Emde351b3f72010-04-02 22:40:19 +02001876 return schedule_hrtimeout_range_clock(expires, delta, mode,
1877 CLOCK_MONOTONIC);
Arjan van de Ven654c8e02008-09-01 15:47:08 -07001878}
1879EXPORT_SYMBOL_GPL(schedule_hrtimeout_range);
1880
1881/**
Arjan van de Ven7bb67432008-08-31 08:05:58 -07001882 * schedule_hrtimeout - sleep until timeout
1883 * @expires: timeout value (ktime_t)
1884 * @mode: timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL
1885 *
1886 * Make the current task sleep until the given expiry time has
1887 * elapsed. The routine will return immediately unless
1888 * the current task state has been set (see set_current_state()).
1889 *
1890 * You can set the task state as follows -
1891 *
1892 * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to
1893 * pass before the routine returns.
1894 *
1895 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
1896 * delivered to the current task.
1897 *
1898 * The current task state is guaranteed to be TASK_RUNNING when this
1899 * routine returns.
1900 *
1901 * Returns 0 when the timer has expired otherwise -EINTR
1902 */
1903int __sched schedule_hrtimeout(ktime_t *expires,
1904 const enum hrtimer_mode mode)
1905{
Arjan van de Ven654c8e02008-09-01 15:47:08 -07001906 return schedule_hrtimeout_range(expires, 0, mode);
Arjan van de Ven7bb67432008-08-31 08:05:58 -07001907}
1908EXPORT_SYMBOL_GPL(schedule_hrtimeout);