| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1 | /* | 
 | 2 |  *  linux/kernel/hrtimer.c | 
 | 3 |  * | 
| Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 4 |  *  Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> | 
| Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 5 |  *  Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 6 |  *  Copyright(C) 2006-2007  Timesys Corp., Thomas Gleixner | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 7 |  * | 
 | 8 |  *  High-resolution kernel timers | 
 | 9 |  * | 
 | 10 |  *  In contrast to the low-resolution timeout API implemented in | 
 | 11 |  *  kernel/timer.c, hrtimers provide finer resolution and accuracy | 
 | 12 |  *  depending on system configuration and capabilities. | 
 | 13 |  * | 
 | 14 |  *  These timers are currently used for: | 
 | 15 |  *   - itimers | 
 | 16 |  *   - POSIX timers | 
 | 17 |  *   - nanosleep | 
 | 18 |  *   - precise in-kernel timing | 
 | 19 |  * | 
 | 20 |  *  Started by: Thomas Gleixner and Ingo Molnar | 
 | 21 |  * | 
 | 22 |  *  Credits: | 
 | 23 |  *	based on kernel/timer.c | 
 | 24 |  * | 
| Thomas Gleixner | 66188fa | 2006-02-01 03:05:13 -0800 | [diff] [blame] | 25 |  *	Help, testing, suggestions, bugfixes, improvements were | 
 | 26 |  *	provided by: | 
 | 27 |  * | 
 | 28 |  *	George Anzinger, Andrew Morton, Steven Rostedt, Roman Zippel | 
 | 29 |  *	et. al. | 
 | 30 |  * | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 31 |  *  For licencing details see kernel-base/COPYING | 
 | 32 |  */ | 
 | 33 |  | 
 | 34 | #include <linux/cpu.h> | 
| Paul Gortmaker | 9984de1 | 2011-05-23 14:51:41 -0400 | [diff] [blame] | 35 | #include <linux/export.h> | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 36 | #include <linux/percpu.h> | 
 | 37 | #include <linux/hrtimer.h> | 
 | 38 | #include <linux/notifier.h> | 
 | 39 | #include <linux/syscalls.h> | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 40 | #include <linux/kallsyms.h> | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 41 | #include <linux/interrupt.h> | 
| Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 42 | #include <linux/tick.h> | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 43 | #include <linux/seq_file.h> | 
 | 44 | #include <linux/err.h> | 
| Thomas Gleixner | 237fc6e | 2008-04-30 00:55:04 -0700 | [diff] [blame] | 45 | #include <linux/debugobjects.h> | 
| Arun R Bharadwaj | eea08f3 | 2009-04-16 12:16:41 +0530 | [diff] [blame] | 46 | #include <linux/sched.h> | 
| Clark Williams | cf4aebc2 | 2013-02-07 09:46:59 -0600 | [diff] [blame] | 47 | #include <linux/sched/sysctl.h> | 
| Clark Williams | 8bd75c7 | 2013-02-07 09:47:07 -0600 | [diff] [blame] | 48 | #include <linux/sched/rt.h> | 
| Arun R Bharadwaj | eea08f3 | 2009-04-16 12:16:41 +0530 | [diff] [blame] | 49 | #include <linux/timer.h> | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 50 |  | 
 | 51 | #include <asm/uaccess.h> | 
 | 52 |  | 
| Xiao Guangrong | c6a2a17 | 2009-08-10 10:51:23 +0800 | [diff] [blame] | 53 | #include <trace/events/timer.h> | 
 | 54 |  | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 55 | /* | 
 | 56 |  * The timer bases: | 
| George Anzinger | 7978672 | 2006-02-01 03:05:11 -0800 | [diff] [blame] | 57 |  * | 
| John Stultz | e06383d | 2010-12-14 19:37:07 -0800 | [diff] [blame] | 58 |  * There are more clockids then hrtimer bases. Thus, we index | 
 | 59 |  * into the timer bases by the hrtimer_base_type enum. When trying | 
 | 60 |  * to reach a base using a clockid, hrtimer_clockid_to_base() | 
 | 61 |  * is used to convert from clockid to the proper hrtimer_base_type. | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 62 |  */ | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 63 | DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) = | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 64 | { | 
| Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 65 |  | 
 | 66 | 	.clock_base = | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 67 | 	{ | 
| Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 68 | 		{ | 
| Thomas Gleixner | ab8177b | 2011-05-20 13:05:15 +0200 | [diff] [blame] | 69 | 			.index = HRTIMER_BASE_MONOTONIC, | 
 | 70 | 			.clockid = CLOCK_MONOTONIC, | 
| Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 71 | 			.get_time = &ktime_get, | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 72 | 			.resolution = KTIME_LOW_RES, | 
| Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 73 | 		}, | 
| John Stultz | 70a08cc | 2011-02-15 10:45:16 -0800 | [diff] [blame] | 74 | 		{ | 
| Thomas Gleixner | 68fa61c | 2011-05-20 23:14:04 +0200 | [diff] [blame] | 75 | 			.index = HRTIMER_BASE_REALTIME, | 
 | 76 | 			.clockid = CLOCK_REALTIME, | 
 | 77 | 			.get_time = &ktime_get_real, | 
 | 78 | 			.resolution = KTIME_LOW_RES, | 
 | 79 | 		}, | 
 | 80 | 		{ | 
| Thomas Gleixner | ab8177b | 2011-05-20 13:05:15 +0200 | [diff] [blame] | 81 | 			.index = HRTIMER_BASE_BOOTTIME, | 
 | 82 | 			.clockid = CLOCK_BOOTTIME, | 
| John Stultz | 70a08cc | 2011-02-15 10:45:16 -0800 | [diff] [blame] | 83 | 			.get_time = &ktime_get_boottime, | 
 | 84 | 			.resolution = KTIME_LOW_RES, | 
 | 85 | 		}, | 
| Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 86 | 	} | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 87 | }; | 
 | 88 |  | 
| Mike Frysinger | 942c3c5 | 2011-05-02 15:24:27 -0400 | [diff] [blame] | 89 | static const int hrtimer_clock_to_base_table[MAX_CLOCKS] = { | 
| Thomas Gleixner | ce31332 | 2011-04-29 00:02:00 +0200 | [diff] [blame] | 90 | 	[CLOCK_REALTIME]	= HRTIMER_BASE_REALTIME, | 
 | 91 | 	[CLOCK_MONOTONIC]	= HRTIMER_BASE_MONOTONIC, | 
 | 92 | 	[CLOCK_BOOTTIME]	= HRTIMER_BASE_BOOTTIME, | 
 | 93 | }; | 
| John Stultz | e06383d | 2010-12-14 19:37:07 -0800 | [diff] [blame] | 94 |  | 
 | 95 | static inline int hrtimer_clockid_to_base(clockid_t clock_id) | 
 | 96 | { | 
 | 97 | 	return hrtimer_clock_to_base_table[clock_id]; | 
 | 98 | } | 
 | 99 |  | 
 | 100 |  | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 101 | /* | 
| Thomas Gleixner | 92127c7 | 2006-03-26 01:38:05 -0800 | [diff] [blame] | 102 |  * Get the coarse grained time at the softirq based on xtime and | 
 | 103 |  * wall_to_monotonic. | 
 | 104 |  */ | 
| Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 105 | static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base) | 
| Thomas Gleixner | 92127c7 | 2006-03-26 01:38:05 -0800 | [diff] [blame] | 106 | { | 
| John Stultz | 70a08cc | 2011-02-15 10:45:16 -0800 | [diff] [blame] | 107 | 	ktime_t xtim, mono, boot; | 
| John Stultz | 314ac37 | 2011-02-14 18:43:08 -0800 | [diff] [blame] | 108 | 	struct timespec xts, tom, slp; | 
| Thomas Gleixner | 92127c7 | 2006-03-26 01:38:05 -0800 | [diff] [blame] | 109 |  | 
| John Stultz | 314ac37 | 2011-02-14 18:43:08 -0800 | [diff] [blame] | 110 | 	get_xtime_and_monotonic_and_sleep_offset(&xts, &tom, &slp); | 
| Thomas Gleixner | 92127c7 | 2006-03-26 01:38:05 -0800 | [diff] [blame] | 111 |  | 
| john stultz | f4304ab | 2007-02-16 01:27:26 -0800 | [diff] [blame] | 112 | 	xtim = timespec_to_ktime(xts); | 
| John Stultz | 70a08cc | 2011-02-15 10:45:16 -0800 | [diff] [blame] | 113 | 	mono = ktime_add(xtim, timespec_to_ktime(tom)); | 
 | 114 | 	boot = ktime_add(mono, timespec_to_ktime(slp)); | 
| John Stultz | e06383d | 2010-12-14 19:37:07 -0800 | [diff] [blame] | 115 | 	base->clock_base[HRTIMER_BASE_REALTIME].softirq_time = xtim; | 
| John Stultz | 70a08cc | 2011-02-15 10:45:16 -0800 | [diff] [blame] | 116 | 	base->clock_base[HRTIMER_BASE_MONOTONIC].softirq_time = mono; | 
 | 117 | 	base->clock_base[HRTIMER_BASE_BOOTTIME].softirq_time = boot; | 
| Thomas Gleixner | 92127c7 | 2006-03-26 01:38:05 -0800 | [diff] [blame] | 118 | } | 
 | 119 |  | 
 | 120 | /* | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 121 |  * Functions and macros which are different for UP/SMP systems are kept in a | 
 | 122 |  * single place | 
 | 123 |  */ | 
 | 124 | #ifdef CONFIG_SMP | 
 | 125 |  | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 126 | /* | 
 | 127 |  * We are using hashed locking: holding per_cpu(hrtimer_bases)[n].lock | 
 | 128 |  * means that all timers which are tied to this base via timer->base are | 
 | 129 |  * locked, and the base itself is locked too. | 
 | 130 |  * | 
 | 131 |  * So __run_timers/migrate_timers can safely modify all timers which could | 
 | 132 |  * be found on the lists/queues. | 
 | 133 |  * | 
 | 134 |  * When the timer's base is locked, and the timer removed from list, it is | 
 | 135 |  * possible to set timer->base = NULL and drop the lock: the timer remains | 
 | 136 |  * locked. | 
 | 137 |  */ | 
| Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 138 | static | 
 | 139 | struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer, | 
 | 140 | 					     unsigned long *flags) | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 141 | { | 
| Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 142 | 	struct hrtimer_clock_base *base; | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 143 |  | 
 | 144 | 	for (;;) { | 
 | 145 | 		base = timer->base; | 
 | 146 | 		if (likely(base != NULL)) { | 
| Thomas Gleixner | ecb49d1 | 2009-11-17 16:36:54 +0100 | [diff] [blame] | 147 | 			raw_spin_lock_irqsave(&base->cpu_base->lock, *flags); | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 148 | 			if (likely(base == timer->base)) | 
 | 149 | 				return base; | 
 | 150 | 			/* The timer has migrated to another CPU: */ | 
| Thomas Gleixner | ecb49d1 | 2009-11-17 16:36:54 +0100 | [diff] [blame] | 151 | 			raw_spin_unlock_irqrestore(&base->cpu_base->lock, *flags); | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 152 | 		} | 
 | 153 | 		cpu_relax(); | 
 | 154 | 	} | 
 | 155 | } | 
 | 156 |  | 
| Thomas Gleixner | 6ff7041 | 2009-07-10 14:57:05 +0200 | [diff] [blame] | 157 |  | 
 | 158 | /* | 
 | 159 |  * Get the preferred target CPU for NOHZ | 
 | 160 |  */ | 
 | 161 | static int hrtimer_get_target(int this_cpu, int pinned) | 
 | 162 | { | 
 | 163 | #ifdef CONFIG_NO_HZ | 
| Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 164 | 	if (!pinned && get_sysctl_timer_migration() && idle_cpu(this_cpu)) | 
 | 165 | 		return get_nohz_timer_target(); | 
| Thomas Gleixner | 6ff7041 | 2009-07-10 14:57:05 +0200 | [diff] [blame] | 166 | #endif | 
 | 167 | 	return this_cpu; | 
 | 168 | } | 
 | 169 |  | 
 | 170 | /* | 
 | 171 |  * With HIGHRES=y we do not migrate the timer when it is expiring | 
 | 172 |  * before the next event on the target cpu because we cannot reprogram | 
 | 173 |  * the target cpu hardware and we would cause it to fire late. | 
 | 174 |  * | 
 | 175 |  * Called with cpu_base->lock of target cpu held. | 
 | 176 |  */ | 
 | 177 | static int | 
 | 178 | hrtimer_check_target(struct hrtimer *timer, struct hrtimer_clock_base *new_base) | 
 | 179 | { | 
 | 180 | #ifdef CONFIG_HIGH_RES_TIMERS | 
 | 181 | 	ktime_t expires; | 
 | 182 |  | 
 | 183 | 	if (!new_base->cpu_base->hres_active) | 
 | 184 | 		return 0; | 
 | 185 |  | 
 | 186 | 	expires = ktime_sub(hrtimer_get_expires(timer), new_base->offset); | 
 | 187 | 	return expires.tv64 <= new_base->cpu_base->expires_next.tv64; | 
 | 188 | #else | 
 | 189 | 	return 0; | 
 | 190 | #endif | 
 | 191 | } | 
 | 192 |  | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 193 | /* | 
 | 194 |  * Switch the timer base to the current CPU when possible. | 
 | 195 |  */ | 
| Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 196 | static inline struct hrtimer_clock_base * | 
| Arun R Bharadwaj | 597d027 | 2009-04-16 12:13:26 +0530 | [diff] [blame] | 197 | switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base, | 
 | 198 | 		    int pinned) | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 199 | { | 
| Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 200 | 	struct hrtimer_clock_base *new_base; | 
 | 201 | 	struct hrtimer_cpu_base *new_cpu_base; | 
| Thomas Gleixner | 6ff7041 | 2009-07-10 14:57:05 +0200 | [diff] [blame] | 202 | 	int this_cpu = smp_processor_id(); | 
 | 203 | 	int cpu = hrtimer_get_target(this_cpu, pinned); | 
| Thomas Gleixner | ab8177b | 2011-05-20 13:05:15 +0200 | [diff] [blame] | 204 | 	int basenum = base->index; | 
| Arun R Bharadwaj | eea08f3 | 2009-04-16 12:16:41 +0530 | [diff] [blame] | 205 |  | 
 | 206 | again: | 
 | 207 | 	new_cpu_base = &per_cpu(hrtimer_bases, cpu); | 
| John Stultz | e06383d | 2010-12-14 19:37:07 -0800 | [diff] [blame] | 208 | 	new_base = &new_cpu_base->clock_base[basenum]; | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 209 |  | 
 | 210 | 	if (base != new_base) { | 
 | 211 | 		/* | 
| Thomas Gleixner | 6ff7041 | 2009-07-10 14:57:05 +0200 | [diff] [blame] | 212 | 		 * We are trying to move timer to new_base. | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 213 | 		 * However we can't change timer's base while it is running, | 
 | 214 | 		 * so we keep it on the same CPU. No hassle vs. reprogramming | 
 | 215 | 		 * the event source in the high resolution case. The softirq | 
 | 216 | 		 * code will take care of this when the timer function has | 
 | 217 | 		 * completed. There is no conflict as we hold the lock until | 
 | 218 | 		 * the timer is enqueued. | 
 | 219 | 		 */ | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 220 | 		if (unlikely(hrtimer_callback_running(timer))) | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 221 | 			return base; | 
 | 222 |  | 
 | 223 | 		/* See the comment in lock_timer_base() */ | 
 | 224 | 		timer->base = NULL; | 
| Thomas Gleixner | ecb49d1 | 2009-11-17 16:36:54 +0100 | [diff] [blame] | 225 | 		raw_spin_unlock(&base->cpu_base->lock); | 
 | 226 | 		raw_spin_lock(&new_base->cpu_base->lock); | 
| Arun R Bharadwaj | eea08f3 | 2009-04-16 12:16:41 +0530 | [diff] [blame] | 227 |  | 
| Thomas Gleixner | 6ff7041 | 2009-07-10 14:57:05 +0200 | [diff] [blame] | 228 | 		if (cpu != this_cpu && hrtimer_check_target(timer, new_base)) { | 
 | 229 | 			cpu = this_cpu; | 
| Thomas Gleixner | ecb49d1 | 2009-11-17 16:36:54 +0100 | [diff] [blame] | 230 | 			raw_spin_unlock(&new_base->cpu_base->lock); | 
 | 231 | 			raw_spin_lock(&base->cpu_base->lock); | 
| Thomas Gleixner | 6ff7041 | 2009-07-10 14:57:05 +0200 | [diff] [blame] | 232 | 			timer->base = base; | 
 | 233 | 			goto again; | 
| Arun R Bharadwaj | eea08f3 | 2009-04-16 12:16:41 +0530 | [diff] [blame] | 234 | 		} | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 235 | 		timer->base = new_base; | 
 | 236 | 	} | 
 | 237 | 	return new_base; | 
 | 238 | } | 
 | 239 |  | 
 | 240 | #else /* CONFIG_SMP */ | 
 | 241 |  | 
| Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 242 | static inline struct hrtimer_clock_base * | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 243 | lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) | 
 | 244 | { | 
| Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 245 | 	struct hrtimer_clock_base *base = timer->base; | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 246 |  | 
| Thomas Gleixner | ecb49d1 | 2009-11-17 16:36:54 +0100 | [diff] [blame] | 247 | 	raw_spin_lock_irqsave(&base->cpu_base->lock, *flags); | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 248 |  | 
 | 249 | 	return base; | 
 | 250 | } | 
 | 251 |  | 
| Arun R Bharadwaj | eea08f3 | 2009-04-16 12:16:41 +0530 | [diff] [blame] | 252 | # define switch_hrtimer_base(t, b, p)	(b) | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 253 |  | 
 | 254 | #endif	/* !CONFIG_SMP */ | 
 | 255 |  | 
 | 256 | /* | 
 | 257 |  * Functions for the union type storage format of ktime_t which are | 
 | 258 |  * too large for inlining: | 
 | 259 |  */ | 
 | 260 | #if BITS_PER_LONG < 64 | 
 | 261 | # ifndef CONFIG_KTIME_SCALAR | 
 | 262 | /** | 
 | 263 |  * ktime_add_ns - Add a scalar nanoseconds value to a ktime_t variable | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 264 |  * @kt:		addend | 
 | 265 |  * @nsec:	the scalar nsec value to add | 
 | 266 |  * | 
 | 267 |  * Returns the sum of kt and nsec in ktime_t format | 
 | 268 |  */ | 
 | 269 | ktime_t ktime_add_ns(const ktime_t kt, u64 nsec) | 
 | 270 | { | 
 | 271 | 	ktime_t tmp; | 
 | 272 |  | 
 | 273 | 	if (likely(nsec < NSEC_PER_SEC)) { | 
 | 274 | 		tmp.tv64 = nsec; | 
 | 275 | 	} else { | 
 | 276 | 		unsigned long rem = do_div(nsec, NSEC_PER_SEC); | 
 | 277 |  | 
 | 278 | 		tmp = ktime_set((long)nsec, rem); | 
 | 279 | 	} | 
 | 280 |  | 
 | 281 | 	return ktime_add(kt, tmp); | 
 | 282 | } | 
| David Howells | b8b8fd2 | 2007-04-27 15:31:24 -0700 | [diff] [blame] | 283 |  | 
 | 284 | EXPORT_SYMBOL_GPL(ktime_add_ns); | 
| Arnaldo Carvalho de Melo | a272378 | 2007-08-19 17:16:05 -0700 | [diff] [blame] | 285 |  | 
 | 286 | /** | 
 | 287 |  * ktime_sub_ns - Subtract a scalar nanoseconds value from a ktime_t variable | 
 | 288 |  * @kt:		minuend | 
 | 289 |  * @nsec:	the scalar nsec value to subtract | 
 | 290 |  * | 
 | 291 |  * Returns the subtraction of @nsec from @kt in ktime_t format | 
 | 292 |  */ | 
 | 293 | ktime_t ktime_sub_ns(const ktime_t kt, u64 nsec) | 
 | 294 | { | 
 | 295 | 	ktime_t tmp; | 
 | 296 |  | 
 | 297 | 	if (likely(nsec < NSEC_PER_SEC)) { | 
 | 298 | 		tmp.tv64 = nsec; | 
 | 299 | 	} else { | 
 | 300 | 		unsigned long rem = do_div(nsec, NSEC_PER_SEC); | 
 | 301 |  | 
 | 302 | 		tmp = ktime_set((long)nsec, rem); | 
 | 303 | 	} | 
 | 304 |  | 
 | 305 | 	return ktime_sub(kt, tmp); | 
 | 306 | } | 
 | 307 |  | 
 | 308 | EXPORT_SYMBOL_GPL(ktime_sub_ns); | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 309 | # endif /* !CONFIG_KTIME_SCALAR */ | 
 | 310 |  | 
 | 311 | /* | 
 | 312 |  * Divide a ktime value by a nanosecond value | 
 | 313 |  */ | 
| Davide Libenzi | 4d672e7 | 2008-02-04 22:27:26 -0800 | [diff] [blame] | 314 | u64 ktime_divns(const ktime_t kt, s64 div) | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 315 | { | 
| Carlos R. Mafra | 900cfa4 | 2008-05-22 19:25:11 -0300 | [diff] [blame] | 316 | 	u64 dclc; | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 317 | 	int sft = 0; | 
 | 318 |  | 
| Carlos R. Mafra | 900cfa4 | 2008-05-22 19:25:11 -0300 | [diff] [blame] | 319 | 	dclc = ktime_to_ns(kt); | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 320 | 	/* Make sure the divisor is less than 2^32: */ | 
 | 321 | 	while (div >> 32) { | 
 | 322 | 		sft++; | 
 | 323 | 		div >>= 1; | 
 | 324 | 	} | 
 | 325 | 	dclc >>= sft; | 
 | 326 | 	do_div(dclc, (unsigned long) div); | 
 | 327 |  | 
| Davide Libenzi | 4d672e7 | 2008-02-04 22:27:26 -0800 | [diff] [blame] | 328 | 	return dclc; | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 329 | } | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 330 | #endif /* BITS_PER_LONG >= 64 */ | 
 | 331 |  | 
| Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 332 | /* | 
| Thomas Gleixner | 5a7780e | 2008-02-13 09:20:43 +0100 | [diff] [blame] | 333 |  * Add two ktime values and do a safety check for overflow: | 
 | 334 |  */ | 
 | 335 | ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs) | 
 | 336 | { | 
 | 337 | 	ktime_t res = ktime_add(lhs, rhs); | 
 | 338 |  | 
 | 339 | 	/* | 
 | 340 | 	 * We use KTIME_SEC_MAX here, the maximum timeout which we can | 
 | 341 | 	 * return to user space in a timespec: | 
 | 342 | 	 */ | 
 | 343 | 	if (res.tv64 < 0 || res.tv64 < lhs.tv64 || res.tv64 < rhs.tv64) | 
 | 344 | 		res = ktime_set(KTIME_SEC_MAX, 0); | 
 | 345 |  | 
 | 346 | 	return res; | 
 | 347 | } | 
 | 348 |  | 
| Artem Bityutskiy | 8daa21e | 2009-05-28 16:21:24 +0300 | [diff] [blame] | 349 | EXPORT_SYMBOL_GPL(ktime_add_safe); | 
 | 350 |  | 
| Thomas Gleixner | 237fc6e | 2008-04-30 00:55:04 -0700 | [diff] [blame] | 351 | #ifdef CONFIG_DEBUG_OBJECTS_TIMERS | 
 | 352 |  | 
 | 353 | static struct debug_obj_descr hrtimer_debug_descr; | 
 | 354 |  | 
| Stanislaw Gruszka | 9977728 | 2011-03-07 09:58:33 +0100 | [diff] [blame] | 355 | static void *hrtimer_debug_hint(void *addr) | 
 | 356 | { | 
 | 357 | 	return ((struct hrtimer *) addr)->function; | 
 | 358 | } | 
 | 359 |  | 
| Thomas Gleixner | 237fc6e | 2008-04-30 00:55:04 -0700 | [diff] [blame] | 360 | /* | 
 | 361 |  * fixup_init is called when: | 
 | 362 |  * - an active object is initialized | 
 | 363 |  */ | 
 | 364 | static int hrtimer_fixup_init(void *addr, enum debug_obj_state state) | 
 | 365 | { | 
 | 366 | 	struct hrtimer *timer = addr; | 
 | 367 |  | 
 | 368 | 	switch (state) { | 
 | 369 | 	case ODEBUG_STATE_ACTIVE: | 
 | 370 | 		hrtimer_cancel(timer); | 
 | 371 | 		debug_object_init(timer, &hrtimer_debug_descr); | 
 | 372 | 		return 1; | 
 | 373 | 	default: | 
 | 374 | 		return 0; | 
 | 375 | 	} | 
 | 376 | } | 
 | 377 |  | 
 | 378 | /* | 
 | 379 |  * fixup_activate is called when: | 
 | 380 |  * - an active object is activated | 
 | 381 |  * - an unknown object is activated (might be a statically initialized object) | 
 | 382 |  */ | 
 | 383 | static int hrtimer_fixup_activate(void *addr, enum debug_obj_state state) | 
 | 384 | { | 
 | 385 | 	switch (state) { | 
 | 386 |  | 
 | 387 | 	case ODEBUG_STATE_NOTAVAILABLE: | 
 | 388 | 		WARN_ON_ONCE(1); | 
 | 389 | 		return 0; | 
 | 390 |  | 
 | 391 | 	case ODEBUG_STATE_ACTIVE: | 
 | 392 | 		WARN_ON(1); | 
 | 393 |  | 
 | 394 | 	default: | 
 | 395 | 		return 0; | 
 | 396 | 	} | 
 | 397 | } | 
 | 398 |  | 
 | 399 | /* | 
 | 400 |  * fixup_free is called when: | 
 | 401 |  * - an active object is freed | 
 | 402 |  */ | 
 | 403 | static int hrtimer_fixup_free(void *addr, enum debug_obj_state state) | 
 | 404 | { | 
 | 405 | 	struct hrtimer *timer = addr; | 
 | 406 |  | 
 | 407 | 	switch (state) { | 
 | 408 | 	case ODEBUG_STATE_ACTIVE: | 
 | 409 | 		hrtimer_cancel(timer); | 
 | 410 | 		debug_object_free(timer, &hrtimer_debug_descr); | 
 | 411 | 		return 1; | 
 | 412 | 	default: | 
 | 413 | 		return 0; | 
 | 414 | 	} | 
 | 415 | } | 
 | 416 |  | 
 | 417 | static struct debug_obj_descr hrtimer_debug_descr = { | 
 | 418 | 	.name		= "hrtimer", | 
| Stanislaw Gruszka | 9977728 | 2011-03-07 09:58:33 +0100 | [diff] [blame] | 419 | 	.debug_hint	= hrtimer_debug_hint, | 
| Thomas Gleixner | 237fc6e | 2008-04-30 00:55:04 -0700 | [diff] [blame] | 420 | 	.fixup_init	= hrtimer_fixup_init, | 
 | 421 | 	.fixup_activate	= hrtimer_fixup_activate, | 
 | 422 | 	.fixup_free	= hrtimer_fixup_free, | 
 | 423 | }; | 
 | 424 |  | 
 | 425 | static inline void debug_hrtimer_init(struct hrtimer *timer) | 
 | 426 | { | 
 | 427 | 	debug_object_init(timer, &hrtimer_debug_descr); | 
 | 428 | } | 
 | 429 |  | 
 | 430 | static inline void debug_hrtimer_activate(struct hrtimer *timer) | 
 | 431 | { | 
 | 432 | 	debug_object_activate(timer, &hrtimer_debug_descr); | 
 | 433 | } | 
 | 434 |  | 
 | 435 | static inline void debug_hrtimer_deactivate(struct hrtimer *timer) | 
 | 436 | { | 
 | 437 | 	debug_object_deactivate(timer, &hrtimer_debug_descr); | 
 | 438 | } | 
 | 439 |  | 
 | 440 | static inline void debug_hrtimer_free(struct hrtimer *timer) | 
 | 441 | { | 
 | 442 | 	debug_object_free(timer, &hrtimer_debug_descr); | 
 | 443 | } | 
 | 444 |  | 
 | 445 | static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id, | 
 | 446 | 			   enum hrtimer_mode mode); | 
 | 447 |  | 
 | 448 | void hrtimer_init_on_stack(struct hrtimer *timer, clockid_t clock_id, | 
 | 449 | 			   enum hrtimer_mode mode) | 
 | 450 | { | 
 | 451 | 	debug_object_init_on_stack(timer, &hrtimer_debug_descr); | 
 | 452 | 	__hrtimer_init(timer, clock_id, mode); | 
 | 453 | } | 
| Stephen Hemminger | 2bc481c | 2009-08-28 23:41:29 -0700 | [diff] [blame] | 454 | EXPORT_SYMBOL_GPL(hrtimer_init_on_stack); | 
| Thomas Gleixner | 237fc6e | 2008-04-30 00:55:04 -0700 | [diff] [blame] | 455 |  | 
 | 456 | void destroy_hrtimer_on_stack(struct hrtimer *timer) | 
 | 457 | { | 
 | 458 | 	debug_object_free(timer, &hrtimer_debug_descr); | 
 | 459 | } | 
 | 460 |  | 
 | 461 | #else | 
 | 462 | static inline void debug_hrtimer_init(struct hrtimer *timer) { } | 
 | 463 | static inline void debug_hrtimer_activate(struct hrtimer *timer) { } | 
 | 464 | static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { } | 
 | 465 | #endif | 
 | 466 |  | 
| Xiao Guangrong | c6a2a17 | 2009-08-10 10:51:23 +0800 | [diff] [blame] | 467 | static inline void | 
 | 468 | debug_init(struct hrtimer *timer, clockid_t clockid, | 
 | 469 | 	   enum hrtimer_mode mode) | 
 | 470 | { | 
 | 471 | 	debug_hrtimer_init(timer); | 
 | 472 | 	trace_hrtimer_init(timer, clockid, mode); | 
 | 473 | } | 
 | 474 |  | 
 | 475 | static inline void debug_activate(struct hrtimer *timer) | 
 | 476 | { | 
 | 477 | 	debug_hrtimer_activate(timer); | 
 | 478 | 	trace_hrtimer_start(timer); | 
 | 479 | } | 
 | 480 |  | 
 | 481 | static inline void debug_deactivate(struct hrtimer *timer) | 
 | 482 | { | 
 | 483 | 	debug_hrtimer_deactivate(timer); | 
 | 484 | 	trace_hrtimer_cancel(timer); | 
 | 485 | } | 
 | 486 |  | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 487 | /* High resolution timer related functions */ | 
 | 488 | #ifdef CONFIG_HIGH_RES_TIMERS | 
 | 489 |  | 
 | 490 | /* | 
 | 491 |  * High resolution timer enabled ? | 
 | 492 |  */ | 
 | 493 | static int hrtimer_hres_enabled __read_mostly  = 1; | 
 | 494 |  | 
 | 495 | /* | 
 | 496 |  * Enable / Disable high resolution mode | 
 | 497 |  */ | 
 | 498 | static int __init setup_hrtimer_hres(char *str) | 
 | 499 | { | 
 | 500 | 	if (!strcmp(str, "off")) | 
 | 501 | 		hrtimer_hres_enabled = 0; | 
 | 502 | 	else if (!strcmp(str, "on")) | 
 | 503 | 		hrtimer_hres_enabled = 1; | 
 | 504 | 	else | 
 | 505 | 		return 0; | 
 | 506 | 	return 1; | 
 | 507 | } | 
 | 508 |  | 
 | 509 | __setup("highres=", setup_hrtimer_hres); | 
 | 510 |  | 
 | 511 | /* | 
 | 512 |  * hrtimer_high_res_enabled - query, if the highres mode is enabled | 
 | 513 |  */ | 
 | 514 | static inline int hrtimer_is_hres_enabled(void) | 
 | 515 | { | 
 | 516 | 	return hrtimer_hres_enabled; | 
 | 517 | } | 
 | 518 |  | 
 | 519 | /* | 
 | 520 |  * Is the high resolution mode active ? | 
 | 521 |  */ | 
 | 522 | static inline int hrtimer_hres_active(void) | 
 | 523 | { | 
| Christoph Lameter | 909ea96 | 2010-12-08 16:22:55 +0100 | [diff] [blame] | 524 | 	return __this_cpu_read(hrtimer_bases.hres_active); | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 525 | } | 
 | 526 |  | 
 | 527 | /* | 
 | 528 |  * Reprogram the event source with checking both queues for the | 
 | 529 |  * next event | 
 | 530 |  * Called with interrupts disabled and base->lock held | 
 | 531 |  */ | 
| Ashwin Chaugule | 7403f41 | 2009-09-01 23:03:33 -0400 | [diff] [blame] | 532 | static void | 
 | 533 | hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal) | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 534 | { | 
 | 535 | 	int i; | 
 | 536 | 	struct hrtimer_clock_base *base = cpu_base->clock_base; | 
| Ashwin Chaugule | 7403f41 | 2009-09-01 23:03:33 -0400 | [diff] [blame] | 537 | 	ktime_t expires, expires_next; | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 538 |  | 
| Ashwin Chaugule | 7403f41 | 2009-09-01 23:03:33 -0400 | [diff] [blame] | 539 | 	expires_next.tv64 = KTIME_MAX; | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 540 |  | 
 | 541 | 	for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) { | 
 | 542 | 		struct hrtimer *timer; | 
| John Stultz | 998adc3 | 2010-09-20 19:19:17 -0700 | [diff] [blame] | 543 | 		struct timerqueue_node *next; | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 544 |  | 
| John Stultz | 998adc3 | 2010-09-20 19:19:17 -0700 | [diff] [blame] | 545 | 		next = timerqueue_getnext(&base->active); | 
 | 546 | 		if (!next) | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 547 | 			continue; | 
| John Stultz | 998adc3 | 2010-09-20 19:19:17 -0700 | [diff] [blame] | 548 | 		timer = container_of(next, struct hrtimer, node); | 
 | 549 |  | 
| Arjan van de Ven | cc584b2 | 2008-09-01 15:02:30 -0700 | [diff] [blame] | 550 | 		expires = ktime_sub(hrtimer_get_expires(timer), base->offset); | 
| Thomas Gleixner | b0a9b51 | 2009-01-25 11:31:36 +0100 | [diff] [blame] | 551 | 		/* | 
 | 552 | 		 * clock_was_set() has changed base->offset so the | 
 | 553 | 		 * result might be negative. Fix it up to prevent a | 
 | 554 | 		 * false positive in clockevents_program_event() | 
 | 555 | 		 */ | 
 | 556 | 		if (expires.tv64 < 0) | 
 | 557 | 			expires.tv64 = 0; | 
| Ashwin Chaugule | 7403f41 | 2009-09-01 23:03:33 -0400 | [diff] [blame] | 558 | 		if (expires.tv64 < expires_next.tv64) | 
 | 559 | 			expires_next = expires; | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 560 | 	} | 
 | 561 |  | 
| Ashwin Chaugule | 7403f41 | 2009-09-01 23:03:33 -0400 | [diff] [blame] | 562 | 	if (skip_equal && expires_next.tv64 == cpu_base->expires_next.tv64) | 
 | 563 | 		return; | 
 | 564 |  | 
 | 565 | 	cpu_base->expires_next.tv64 = expires_next.tv64; | 
 | 566 |  | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 567 | 	if (cpu_base->expires_next.tv64 != KTIME_MAX) | 
 | 568 | 		tick_program_event(cpu_base->expires_next, 1); | 
 | 569 | } | 
 | 570 |  | 
 | 571 | /* | 
 | 572 |  * Shared reprogramming for clock_realtime and clock_monotonic | 
 | 573 |  * | 
 | 574 |  * When a timer is enqueued and expires earlier than the already enqueued | 
 | 575 |  * timers, we have to check, whether it expires earlier than the timer for | 
 | 576 |  * which the clock event device was armed. | 
 | 577 |  * | 
 | 578 |  * Called with interrupts disabled and base->cpu_base.lock held | 
 | 579 |  */ | 
 | 580 | static int hrtimer_reprogram(struct hrtimer *timer, | 
 | 581 | 			     struct hrtimer_clock_base *base) | 
 | 582 | { | 
| Thomas Gleixner | 41d2e49 | 2009-11-13 17:05:44 +0100 | [diff] [blame] | 583 | 	struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); | 
| Arjan van de Ven | cc584b2 | 2008-09-01 15:02:30 -0700 | [diff] [blame] | 584 | 	ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset); | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 585 | 	int res; | 
 | 586 |  | 
| Arjan van de Ven | cc584b2 | 2008-09-01 15:02:30 -0700 | [diff] [blame] | 587 | 	WARN_ON_ONCE(hrtimer_get_expires_tv64(timer) < 0); | 
| Thomas Gleixner | 63070a7 | 2008-02-14 00:58:36 +0100 | [diff] [blame] | 588 |  | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 589 | 	/* | 
 | 590 | 	 * When the callback is running, we do not reprogram the clock event | 
 | 591 | 	 * device. The timer callback is either running on a different CPU or | 
| Robert P. J. Day | 3a4fa0a | 2007-10-19 23:10:43 +0200 | [diff] [blame] | 592 | 	 * the callback is executed in the hrtimer_interrupt context. The | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 593 | 	 * reprogramming is handled either by the softirq, which called the | 
 | 594 | 	 * callback or at the end of the hrtimer_interrupt. | 
 | 595 | 	 */ | 
 | 596 | 	if (hrtimer_callback_running(timer)) | 
 | 597 | 		return 0; | 
 | 598 |  | 
| Thomas Gleixner | 63070a7 | 2008-02-14 00:58:36 +0100 | [diff] [blame] | 599 | 	/* | 
 | 600 | 	 * CLOCK_REALTIME timer might be requested with an absolute | 
 | 601 | 	 * expiry time which is less than base->offset. Nothing wrong | 
 | 602 | 	 * about that, just avoid to call into the tick code, which | 
 | 603 | 	 * has now objections against negative expiry values. | 
 | 604 | 	 */ | 
 | 605 | 	if (expires.tv64 < 0) | 
 | 606 | 		return -ETIME; | 
 | 607 |  | 
| Thomas Gleixner | 41d2e49 | 2009-11-13 17:05:44 +0100 | [diff] [blame] | 608 | 	if (expires.tv64 >= cpu_base->expires_next.tv64) | 
 | 609 | 		return 0; | 
 | 610 |  | 
 | 611 | 	/* | 
 | 612 | 	 * If a hang was detected in the last timer interrupt then we | 
 | 613 | 	 * do not schedule a timer which is earlier than the expiry | 
 | 614 | 	 * which we enforced in the hang detection. We want the system | 
 | 615 | 	 * to make progress. | 
 | 616 | 	 */ | 
 | 617 | 	if (cpu_base->hang_detected) | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 618 | 		return 0; | 
 | 619 |  | 
 | 620 | 	/* | 
 | 621 | 	 * Clockevents returns -ETIME, when the event was in the past. | 
 | 622 | 	 */ | 
 | 623 | 	res = tick_program_event(expires, 0); | 
 | 624 | 	if (!IS_ERR_VALUE(res)) | 
| Thomas Gleixner | 41d2e49 | 2009-11-13 17:05:44 +0100 | [diff] [blame] | 625 | 		cpu_base->expires_next = expires; | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 626 | 	return res; | 
 | 627 | } | 
 | 628 |  | 
| Ingo Molnar | 995f054 | 2007-04-07 12:05:00 +0200 | [diff] [blame] | 629 | /* | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 630 |  * Initialize the high resolution related parts of cpu_base | 
 | 631 |  */ | 
 | 632 | static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) | 
 | 633 | { | 
 | 634 | 	base->expires_next.tv64 = KTIME_MAX; | 
 | 635 | 	base->hres_active = 0; | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 636 | } | 
 | 637 |  | 
 | 638 | /* | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 639 |  * When High resolution timers are active, try to reprogram. Note, that in case | 
 | 640 |  * the state has HRTIMER_STATE_CALLBACK set, no reprogramming and no expiry | 
 | 641 |  * check happens. The timer gets enqueued into the rbtree. The reprogramming | 
 | 642 |  * and expiry check is done in the hrtimer_interrupt or in the softirq. | 
 | 643 |  */ | 
 | 644 | static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, | 
| Leonid Shatz | b22affe | 2013-02-04 14:33:37 +0200 | [diff] [blame] | 645 | 					    struct hrtimer_clock_base *base) | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 646 | { | 
| Leonid Shatz | b22affe | 2013-02-04 14:33:37 +0200 | [diff] [blame] | 647 | 	return base->cpu_base->hres_active && hrtimer_reprogram(timer, base); | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 648 | } | 
 | 649 |  | 
| John Stultz | 5baefd6 | 2012-07-10 18:43:25 -0400 | [diff] [blame] | 650 | static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base) | 
 | 651 | { | 
 | 652 | 	ktime_t *offs_real = &base->clock_base[HRTIMER_BASE_REALTIME].offset; | 
 | 653 | 	ktime_t *offs_boot = &base->clock_base[HRTIMER_BASE_BOOTTIME].offset; | 
 | 654 |  | 
 | 655 | 	return ktime_get_update_offsets(offs_real, offs_boot); | 
 | 656 | } | 
 | 657 |  | 
| Thomas Gleixner | 9ec2690 | 2011-05-20 16:18:50 +0200 | [diff] [blame] | 658 | /* | 
 | 659 |  * Retrigger next event is called after clock was set | 
 | 660 |  * | 
 | 661 |  * Called with interrupts disabled via on_each_cpu() | 
 | 662 |  */ | 
 | 663 | static void retrigger_next_event(void *arg) | 
 | 664 | { | 
 | 665 | 	struct hrtimer_cpu_base *base = &__get_cpu_var(hrtimer_bases); | 
| Thomas Gleixner | 9ec2690 | 2011-05-20 16:18:50 +0200 | [diff] [blame] | 666 |  | 
 | 667 | 	if (!hrtimer_hres_active()) | 
 | 668 | 		return; | 
 | 669 |  | 
| Thomas Gleixner | 9ec2690 | 2011-05-20 16:18:50 +0200 | [diff] [blame] | 670 | 	raw_spin_lock(&base->lock); | 
| John Stultz | 5baefd6 | 2012-07-10 18:43:25 -0400 | [diff] [blame] | 671 | 	hrtimer_update_base(base); | 
| Thomas Gleixner | 9ec2690 | 2011-05-20 16:18:50 +0200 | [diff] [blame] | 672 | 	hrtimer_force_reprogram(base, 0); | 
 | 673 | 	raw_spin_unlock(&base->lock); | 
 | 674 | } | 
| Thomas Gleixner | b12a03c | 2011-05-02 16:48:57 +0200 | [diff] [blame] | 675 |  | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 676 | /* | 
 | 677 |  * Switch to high resolution mode | 
 | 678 |  */ | 
| Thomas Gleixner | f895385 | 2007-03-06 01:42:08 -0800 | [diff] [blame] | 679 | static int hrtimer_switch_to_hres(void) | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 680 | { | 
| Thomas Gleixner | b12a03c | 2011-05-02 16:48:57 +0200 | [diff] [blame] | 681 | 	int i, cpu = smp_processor_id(); | 
| Ingo Molnar | 820de5c | 2007-07-21 04:37:36 -0700 | [diff] [blame] | 682 | 	struct hrtimer_cpu_base *base = &per_cpu(hrtimer_bases, cpu); | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 683 | 	unsigned long flags; | 
 | 684 |  | 
 | 685 | 	if (base->hres_active) | 
| Thomas Gleixner | f895385 | 2007-03-06 01:42:08 -0800 | [diff] [blame] | 686 | 		return 1; | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 687 |  | 
 | 688 | 	local_irq_save(flags); | 
 | 689 |  | 
 | 690 | 	if (tick_init_highres()) { | 
 | 691 | 		local_irq_restore(flags); | 
| Ingo Molnar | 820de5c | 2007-07-21 04:37:36 -0700 | [diff] [blame] | 692 | 		printk(KERN_WARNING "Could not switch to high resolution " | 
 | 693 | 				    "mode on CPU %d\n", cpu); | 
| Thomas Gleixner | f895385 | 2007-03-06 01:42:08 -0800 | [diff] [blame] | 694 | 		return 0; | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 695 | 	} | 
 | 696 | 	base->hres_active = 1; | 
| Thomas Gleixner | b12a03c | 2011-05-02 16:48:57 +0200 | [diff] [blame] | 697 | 	for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) | 
 | 698 | 		base->clock_base[i].resolution = KTIME_HIGH_RES; | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 699 |  | 
 | 700 | 	tick_setup_sched_timer(); | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 701 | 	/* "Retrigger" the interrupt to get things going */ | 
 | 702 | 	retrigger_next_event(NULL); | 
 | 703 | 	local_irq_restore(flags); | 
| Thomas Gleixner | f895385 | 2007-03-06 01:42:08 -0800 | [diff] [blame] | 704 | 	return 1; | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 705 | } | 
 | 706 |  | 
| John Stultz | f55a6fa | 2012-07-10 18:43:19 -0400 | [diff] [blame] | 707 | /* | 
 | 708 |  * Called from timekeeping code to reprogramm the hrtimer interrupt | 
 | 709 |  * device. If called from the timer interrupt context we defer it to | 
 | 710 |  * softirq context. | 
 | 711 |  */ | 
 | 712 | void clock_was_set_delayed(void) | 
 | 713 | { | 
 | 714 | 	struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); | 
 | 715 |  | 
 | 716 | 	cpu_base->clock_was_set = 1; | 
 | 717 | 	__raise_softirq_irqoff(HRTIMER_SOFTIRQ); | 
 | 718 | } | 
 | 719 |  | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 720 | #else | 
 | 721 |  | 
 | 722 | static inline int hrtimer_hres_active(void) { return 0; } | 
 | 723 | static inline int hrtimer_is_hres_enabled(void) { return 0; } | 
| Thomas Gleixner | f895385 | 2007-03-06 01:42:08 -0800 | [diff] [blame] | 724 | static inline int hrtimer_switch_to_hres(void) { return 0; } | 
| Ashwin Chaugule | 7403f41 | 2009-09-01 23:03:33 -0400 | [diff] [blame] | 725 | static inline void | 
 | 726 | hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { } | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 727 | static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, | 
| Leonid Shatz | b22affe | 2013-02-04 14:33:37 +0200 | [diff] [blame] | 728 | 					    struct hrtimer_clock_base *base) | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 729 | { | 
 | 730 | 	return 0; | 
 | 731 | } | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 732 | static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { } | 
| Thomas Gleixner | 9ec2690 | 2011-05-20 16:18:50 +0200 | [diff] [blame] | 733 | static inline void retrigger_next_event(void *arg) { } | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 734 |  | 
 | 735 | #endif /* CONFIG_HIGH_RES_TIMERS */ | 
 | 736 |  | 
| Thomas Gleixner | b12a03c | 2011-05-02 16:48:57 +0200 | [diff] [blame] | 737 | /* | 
| Thomas Gleixner | b12a03c | 2011-05-02 16:48:57 +0200 | [diff] [blame] | 738 |  * Clock realtime was set | 
 | 739 |  * | 
 | 740 |  * Change the offset of the realtime clock vs. the monotonic | 
 | 741 |  * clock. | 
 | 742 |  * | 
 | 743 |  * We might have to reprogram the high resolution timer interrupt. On | 
 | 744 |  * SMP we call the architecture specific code to retrigger _all_ high | 
 | 745 |  * resolution timer interrupts. On UP we just disable interrupts and | 
 | 746 |  * call the high resolution interrupt code. | 
 | 747 |  */ | 
 | 748 | void clock_was_set(void) | 
 | 749 | { | 
| Thomas Gleixner | 90ff1f3 | 2011-05-25 23:08:17 +0200 | [diff] [blame] | 750 | #ifdef CONFIG_HIGH_RES_TIMERS | 
| Thomas Gleixner | b12a03c | 2011-05-02 16:48:57 +0200 | [diff] [blame] | 751 | 	/* Retrigger the CPU local events everywhere */ | 
 | 752 | 	on_each_cpu(retrigger_next_event, NULL, 1); | 
| Thomas Gleixner | 9ec2690 | 2011-05-20 16:18:50 +0200 | [diff] [blame] | 753 | #endif | 
 | 754 | 	timerfd_clock_was_set(); | 
| Thomas Gleixner | b12a03c | 2011-05-02 16:48:57 +0200 | [diff] [blame] | 755 | } | 
 | 756 |  | 
 | 757 | /* | 
 | 758 |  * During resume we might have to reprogram the high resolution timer | 
 | 759 |  * interrupt (on the local CPU): | 
 | 760 |  */ | 
 | 761 | void hrtimers_resume(void) | 
 | 762 | { | 
 | 763 | 	WARN_ONCE(!irqs_disabled(), | 
 | 764 | 		  KERN_INFO "hrtimers_resume() called with IRQs enabled!"); | 
 | 765 |  | 
 | 766 | 	retrigger_next_event(NULL); | 
| Thomas Gleixner | 9ec2690 | 2011-05-20 16:18:50 +0200 | [diff] [blame] | 767 | 	timerfd_clock_was_set(); | 
| Thomas Gleixner | b12a03c | 2011-05-02 16:48:57 +0200 | [diff] [blame] | 768 | } | 
 | 769 |  | 
| Heiko Carstens | 5f20190 | 2009-12-10 10:56:29 +0100 | [diff] [blame] | 770 | static inline void timer_stats_hrtimer_set_start_info(struct hrtimer *timer) | 
| Ingo Molnar | 82f67cd | 2007-02-16 01:28:13 -0800 | [diff] [blame] | 771 | { | 
| Heiko Carstens | 5f20190 | 2009-12-10 10:56:29 +0100 | [diff] [blame] | 772 | #ifdef CONFIG_TIMER_STATS | 
| Ingo Molnar | 82f67cd | 2007-02-16 01:28:13 -0800 | [diff] [blame] | 773 | 	if (timer->start_site) | 
 | 774 | 		return; | 
| Heiko Carstens | 5f20190 | 2009-12-10 10:56:29 +0100 | [diff] [blame] | 775 | 	timer->start_site = __builtin_return_address(0); | 
| Ingo Molnar | 82f67cd | 2007-02-16 01:28:13 -0800 | [diff] [blame] | 776 | 	memcpy(timer->start_comm, current->comm, TASK_COMM_LEN); | 
 | 777 | 	timer->start_pid = current->pid; | 
| Ingo Molnar | 82f67cd | 2007-02-16 01:28:13 -0800 | [diff] [blame] | 778 | #endif | 
| Heiko Carstens | 5f20190 | 2009-12-10 10:56:29 +0100 | [diff] [blame] | 779 | } | 
 | 780 |  | 
 | 781 | static inline void timer_stats_hrtimer_clear_start_info(struct hrtimer *timer) | 
 | 782 | { | 
 | 783 | #ifdef CONFIG_TIMER_STATS | 
 | 784 | 	timer->start_site = NULL; | 
 | 785 | #endif | 
 | 786 | } | 
 | 787 |  | 
 | 788 | static inline void timer_stats_account_hrtimer(struct hrtimer *timer) | 
 | 789 | { | 
 | 790 | #ifdef CONFIG_TIMER_STATS | 
 | 791 | 	if (likely(!timer_stats_active)) | 
 | 792 | 		return; | 
 | 793 | 	timer_stats_update_stats(timer, timer->start_pid, timer->start_site, | 
 | 794 | 				 timer->function, timer->start_comm, 0); | 
 | 795 | #endif | 
 | 796 | } | 
| Ingo Molnar | 82f67cd | 2007-02-16 01:28:13 -0800 | [diff] [blame] | 797 |  | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 798 | /* | 
| Uwe Kleine-König | 6506f2a | 2007-10-20 01:56:53 +0200 | [diff] [blame] | 799 |  * Counterpart to lock_hrtimer_base above: | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 800 |  */ | 
 | 801 | static inline | 
 | 802 | void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) | 
 | 803 | { | 
| Thomas Gleixner | ecb49d1 | 2009-11-17 16:36:54 +0100 | [diff] [blame] | 804 | 	raw_spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags); | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 805 | } | 
 | 806 |  | 
 | 807 | /** | 
 | 808 |  * hrtimer_forward - forward the timer expiry | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 809 |  * @timer:	hrtimer to forward | 
| Roman Zippel | 44f2147 | 2006-03-26 01:38:06 -0800 | [diff] [blame] | 810 |  * @now:	forward past this time | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 811 |  * @interval:	the interval to forward | 
 | 812 |  * | 
 | 813 |  * Forward the timer expiry so it will expire in the future. | 
| Jonathan Corbet | 8dca6f3 | 2006-01-16 15:58:55 -0700 | [diff] [blame] | 814 |  * Returns the number of overruns. | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 815 |  */ | 
| Davide Libenzi | 4d672e7 | 2008-02-04 22:27:26 -0800 | [diff] [blame] | 816 | u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval) | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 817 | { | 
| Davide Libenzi | 4d672e7 | 2008-02-04 22:27:26 -0800 | [diff] [blame] | 818 | 	u64 orun = 1; | 
| Roman Zippel | 44f2147 | 2006-03-26 01:38:06 -0800 | [diff] [blame] | 819 | 	ktime_t delta; | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 820 |  | 
| Arjan van de Ven | cc584b2 | 2008-09-01 15:02:30 -0700 | [diff] [blame] | 821 | 	delta = ktime_sub(now, hrtimer_get_expires(timer)); | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 822 |  | 
 | 823 | 	if (delta.tv64 < 0) | 
 | 824 | 		return 0; | 
 | 825 |  | 
| Thomas Gleixner | c9db4fa | 2006-01-12 11:47:34 +0100 | [diff] [blame] | 826 | 	if (interval.tv64 < timer->base->resolution.tv64) | 
 | 827 | 		interval.tv64 = timer->base->resolution.tv64; | 
 | 828 |  | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 829 | 	if (unlikely(delta.tv64 >= interval.tv64)) { | 
| Roman Zippel | df869b6 | 2006-03-26 01:38:11 -0800 | [diff] [blame] | 830 | 		s64 incr = ktime_to_ns(interval); | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 831 |  | 
 | 832 | 		orun = ktime_divns(delta, incr); | 
| Arjan van de Ven | cc584b2 | 2008-09-01 15:02:30 -0700 | [diff] [blame] | 833 | 		hrtimer_add_expires_ns(timer, incr * orun); | 
 | 834 | 		if (hrtimer_get_expires_tv64(timer) > now.tv64) | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 835 | 			return orun; | 
 | 836 | 		/* | 
 | 837 | 		 * This (and the ktime_add() below) is the | 
 | 838 | 		 * correction for exact: | 
 | 839 | 		 */ | 
 | 840 | 		orun++; | 
 | 841 | 	} | 
| Arjan van de Ven | cc584b2 | 2008-09-01 15:02:30 -0700 | [diff] [blame] | 842 | 	hrtimer_add_expires(timer, interval); | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 843 |  | 
 | 844 | 	return orun; | 
 | 845 | } | 
| Stas Sergeev | 6bdb6b6 | 2007-05-08 00:31:58 -0700 | [diff] [blame] | 846 | EXPORT_SYMBOL_GPL(hrtimer_forward); | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 847 |  | 
 | 848 | /* | 
 | 849 |  * enqueue_hrtimer - internal function to (re)start a timer | 
 | 850 |  * | 
 | 851 |  * The timer is inserted in expiry order. Insertion into the | 
 | 852 |  * red black tree is O(log(n)). Must hold the base lock. | 
| Peter Zijlstra | a6037b6 | 2009-01-05 11:28:22 +0100 | [diff] [blame] | 853 |  * | 
 | 854 |  * Returns 1 when the new timer is the leftmost timer in the tree. | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 855 |  */ | 
| Peter Zijlstra | a6037b6 | 2009-01-05 11:28:22 +0100 | [diff] [blame] | 856 | static int enqueue_hrtimer(struct hrtimer *timer, | 
 | 857 | 			   struct hrtimer_clock_base *base) | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 858 | { | 
| Xiao Guangrong | c6a2a17 | 2009-08-10 10:51:23 +0800 | [diff] [blame] | 859 | 	debug_activate(timer); | 
| Thomas Gleixner | 237fc6e | 2008-04-30 00:55:04 -0700 | [diff] [blame] | 860 |  | 
| John Stultz | 998adc3 | 2010-09-20 19:19:17 -0700 | [diff] [blame] | 861 | 	timerqueue_add(&base->active, &timer->node); | 
| Thomas Gleixner | ab8177b | 2011-05-20 13:05:15 +0200 | [diff] [blame] | 862 | 	base->cpu_base->active_bases |= 1 << base->index; | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 863 |  | 
 | 864 | 	/* | 
| Thomas Gleixner | 303e967 | 2007-02-16 01:27:51 -0800 | [diff] [blame] | 865 | 	 * HRTIMER_STATE_ENQUEUED is or'ed to the current state to preserve the | 
 | 866 | 	 * state of a possibly running callback. | 
 | 867 | 	 */ | 
 | 868 | 	timer->state |= HRTIMER_STATE_ENQUEUED; | 
| Peter Zijlstra | a6037b6 | 2009-01-05 11:28:22 +0100 | [diff] [blame] | 869 |  | 
| John Stultz | 998adc3 | 2010-09-20 19:19:17 -0700 | [diff] [blame] | 870 | 	return (&timer->node == base->active.next); | 
| Thomas Gleixner | 288867e | 2006-01-12 11:25:54 +0100 | [diff] [blame] | 871 | } | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 872 |  | 
 | 873 | /* | 
 | 874 |  * __remove_hrtimer - internal function to remove a timer | 
 | 875 |  * | 
 | 876 |  * Caller must hold the base lock. | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 877 |  * | 
 | 878 |  * High resolution timer mode reprograms the clock event device when the | 
 | 879 |  * timer is the one which expires next. The caller can disable this by setting | 
 | 880 |  * reprogram to zero. This is useful, when the context does a reprogramming | 
 | 881 |  * anyway (e.g. timer interrupt) | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 882 |  */ | 
| Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 883 | static void __remove_hrtimer(struct hrtimer *timer, | 
| Thomas Gleixner | 303e967 | 2007-02-16 01:27:51 -0800 | [diff] [blame] | 884 | 			     struct hrtimer_clock_base *base, | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 885 | 			     unsigned long newstate, int reprogram) | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 886 | { | 
| Jeff Ohlstein | 27c9cd7 | 2011-11-18 15:47:10 -0800 | [diff] [blame] | 887 | 	struct timerqueue_node *next_timer; | 
| Ashwin Chaugule | 7403f41 | 2009-09-01 23:03:33 -0400 | [diff] [blame] | 888 | 	if (!(timer->state & HRTIMER_STATE_ENQUEUED)) | 
 | 889 | 		goto out; | 
 | 890 |  | 
| Jeff Ohlstein | 27c9cd7 | 2011-11-18 15:47:10 -0800 | [diff] [blame] | 891 | 	next_timer = timerqueue_getnext(&base->active); | 
 | 892 | 	timerqueue_del(&base->active, &timer->node); | 
 | 893 | 	if (&timer->node == next_timer) { | 
| Ashwin Chaugule | 7403f41 | 2009-09-01 23:03:33 -0400 | [diff] [blame] | 894 | #ifdef CONFIG_HIGH_RES_TIMERS | 
 | 895 | 		/* Reprogram the clock event device. if enabled */ | 
 | 896 | 		if (reprogram && hrtimer_hres_active()) { | 
 | 897 | 			ktime_t expires; | 
 | 898 |  | 
 | 899 | 			expires = ktime_sub(hrtimer_get_expires(timer), | 
 | 900 | 					    base->offset); | 
 | 901 | 			if (base->cpu_base->expires_next.tv64 == expires.tv64) | 
 | 902 | 				hrtimer_force_reprogram(base->cpu_base, 1); | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 903 | 		} | 
| Ashwin Chaugule | 7403f41 | 2009-09-01 23:03:33 -0400 | [diff] [blame] | 904 | #endif | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 905 | 	} | 
| Thomas Gleixner | ab8177b | 2011-05-20 13:05:15 +0200 | [diff] [blame] | 906 | 	if (!timerqueue_getnext(&base->active)) | 
 | 907 | 		base->cpu_base->active_bases &= ~(1 << base->index); | 
| Ashwin Chaugule | 7403f41 | 2009-09-01 23:03:33 -0400 | [diff] [blame] | 908 | out: | 
| Thomas Gleixner | 303e967 | 2007-02-16 01:27:51 -0800 | [diff] [blame] | 909 | 	timer->state = newstate; | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 910 | } | 
 | 911 |  | 
 | 912 | /* | 
 | 913 |  * remove hrtimer, called with base lock held | 
 | 914 |  */ | 
 | 915 | static inline int | 
| Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 916 | remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base) | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 917 | { | 
| Thomas Gleixner | 303e967 | 2007-02-16 01:27:51 -0800 | [diff] [blame] | 918 | 	if (hrtimer_is_queued(timer)) { | 
| Salman Qazi | f13d4f9 | 2010-10-12 07:25:19 -0700 | [diff] [blame] | 919 | 		unsigned long state; | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 920 | 		int reprogram; | 
 | 921 |  | 
 | 922 | 		/* | 
 | 923 | 		 * Remove the timer and force reprogramming when high | 
 | 924 | 		 * resolution mode is active and the timer is on the current | 
 | 925 | 		 * CPU. If we remove a timer on another CPU, reprogramming is | 
 | 926 | 		 * skipped. The interrupt event on this CPU is fired and | 
 | 927 | 		 * reprogramming happens in the interrupt handler. This is a | 
 | 928 | 		 * rare case and less expensive than a smp call. | 
 | 929 | 		 */ | 
| Xiao Guangrong | c6a2a17 | 2009-08-10 10:51:23 +0800 | [diff] [blame] | 930 | 		debug_deactivate(timer); | 
| Ingo Molnar | 82f67cd | 2007-02-16 01:28:13 -0800 | [diff] [blame] | 931 | 		timer_stats_hrtimer_clear_start_info(timer); | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 932 | 		reprogram = base->cpu_base == &__get_cpu_var(hrtimer_bases); | 
| Salman Qazi | f13d4f9 | 2010-10-12 07:25:19 -0700 | [diff] [blame] | 933 | 		/* | 
 | 934 | 		 * We must preserve the CALLBACK state flag here, | 
 | 935 | 		 * otherwise we could move the timer base in | 
 | 936 | 		 * switch_hrtimer_base. | 
 | 937 | 		 */ | 
 | 938 | 		state = timer->state & HRTIMER_STATE_CALLBACK; | 
 | 939 | 		__remove_hrtimer(timer, base, state, reprogram); | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 940 | 		return 1; | 
 | 941 | 	} | 
 | 942 | 	return 0; | 
 | 943 | } | 
 | 944 |  | 
| Peter Zijlstra | 7f1e2ca | 2009-03-13 12:21:27 +0100 | [diff] [blame] | 945 | int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, | 
 | 946 | 		unsigned long delta_ns, const enum hrtimer_mode mode, | 
 | 947 | 		int wakeup) | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 948 | { | 
| Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 949 | 	struct hrtimer_clock_base *base, *new_base; | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 950 | 	unsigned long flags; | 
| Peter Zijlstra | a6037b6 | 2009-01-05 11:28:22 +0100 | [diff] [blame] | 951 | 	int ret, leftmost; | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 952 |  | 
 | 953 | 	base = lock_hrtimer_base(timer, &flags); | 
 | 954 |  | 
 | 955 | 	/* Remove an active timer from the queue: */ | 
 | 956 | 	ret = remove_hrtimer(timer, base); | 
 | 957 |  | 
 | 958 | 	/* Switch the timer base, if necessary: */ | 
| Arun R Bharadwaj | 597d027 | 2009-04-16 12:13:26 +0530 | [diff] [blame] | 959 | 	new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED); | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 960 |  | 
| Arun R Bharadwaj | 597d027 | 2009-04-16 12:13:26 +0530 | [diff] [blame] | 961 | 	if (mode & HRTIMER_MODE_REL) { | 
| Thomas Gleixner | 5a7780e | 2008-02-13 09:20:43 +0100 | [diff] [blame] | 962 | 		tim = ktime_add_safe(tim, new_base->get_time()); | 
| Ingo Molnar | 06027bd | 2006-02-14 13:53:15 -0800 | [diff] [blame] | 963 | 		/* | 
 | 964 | 		 * CONFIG_TIME_LOW_RES is a temporary way for architectures | 
 | 965 | 		 * to signal that they simply return xtime in | 
 | 966 | 		 * do_gettimeoffset(). In this case we want to round up by | 
 | 967 | 		 * resolution when starting a relative timer, to avoid short | 
 | 968 | 		 * timeouts. This will go away with the GTOD framework. | 
 | 969 | 		 */ | 
 | 970 | #ifdef CONFIG_TIME_LOW_RES | 
| Thomas Gleixner | 5a7780e | 2008-02-13 09:20:43 +0100 | [diff] [blame] | 971 | 		tim = ktime_add_safe(tim, base->resolution); | 
| Ingo Molnar | 06027bd | 2006-02-14 13:53:15 -0800 | [diff] [blame] | 972 | #endif | 
 | 973 | 	} | 
| Thomas Gleixner | 237fc6e | 2008-04-30 00:55:04 -0700 | [diff] [blame] | 974 |  | 
| Arjan van de Ven | da8f2e1 | 2008-09-07 10:47:46 -0700 | [diff] [blame] | 975 | 	hrtimer_set_expires_range_ns(timer, tim, delta_ns); | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 976 |  | 
| Ingo Molnar | 82f67cd | 2007-02-16 01:28:13 -0800 | [diff] [blame] | 977 | 	timer_stats_hrtimer_set_start_info(timer); | 
 | 978 |  | 
| Peter Zijlstra | a6037b6 | 2009-01-05 11:28:22 +0100 | [diff] [blame] | 979 | 	leftmost = enqueue_hrtimer(timer, new_base); | 
 | 980 |  | 
| Ingo Molnar | 935c631 | 2007-03-28 13:17:18 +0200 | [diff] [blame] | 981 | 	/* | 
 | 982 | 	 * Only allow reprogramming if the new base is on this CPU. | 
 | 983 | 	 * (it might still be on another CPU if the timer was pending) | 
| Peter Zijlstra | a6037b6 | 2009-01-05 11:28:22 +0100 | [diff] [blame] | 984 | 	 * | 
 | 985 | 	 * XXX send_remote_softirq() ? | 
| Ingo Molnar | 935c631 | 2007-03-28 13:17:18 +0200 | [diff] [blame] | 986 | 	 */ | 
| Leonid Shatz | b22affe | 2013-02-04 14:33:37 +0200 | [diff] [blame] | 987 | 	if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases) | 
 | 988 | 		&& hrtimer_enqueue_reprogram(timer, new_base)) { | 
 | 989 | 		if (wakeup) { | 
 | 990 | 			/* | 
 | 991 | 			 * We need to drop cpu_base->lock to avoid a | 
 | 992 | 			 * lock ordering issue vs. rq->lock. | 
 | 993 | 			 */ | 
 | 994 | 			raw_spin_unlock(&new_base->cpu_base->lock); | 
 | 995 | 			raise_softirq_irqoff(HRTIMER_SOFTIRQ); | 
 | 996 | 			local_irq_restore(flags); | 
 | 997 | 			return ret; | 
 | 998 | 		} else { | 
 | 999 | 			__raise_softirq_irqoff(HRTIMER_SOFTIRQ); | 
 | 1000 | 		} | 
 | 1001 | 	} | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1002 |  | 
 | 1003 | 	unlock_hrtimer_base(timer, &flags); | 
 | 1004 |  | 
 | 1005 | 	return ret; | 
 | 1006 | } | 
| Peter Zijlstra | 7f1e2ca | 2009-03-13 12:21:27 +0100 | [diff] [blame] | 1007 |  | 
 | 1008 | /** | 
 | 1009 |  * hrtimer_start_range_ns - (re)start an hrtimer on the current CPU | 
 | 1010 |  * @timer:	the timer to be added | 
 | 1011 |  * @tim:	expiry time | 
 | 1012 |  * @delta_ns:	"slack" range for the timer | 
 | 1013 |  * @mode:	expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL) | 
 | 1014 |  * | 
 | 1015 |  * Returns: | 
 | 1016 |  *  0 on success | 
 | 1017 |  *  1 when the timer was active | 
 | 1018 |  */ | 
 | 1019 | int hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, | 
 | 1020 | 		unsigned long delta_ns, const enum hrtimer_mode mode) | 
 | 1021 | { | 
 | 1022 | 	return __hrtimer_start_range_ns(timer, tim, delta_ns, mode, 1); | 
 | 1023 | } | 
| Arjan van de Ven | da8f2e1 | 2008-09-07 10:47:46 -0700 | [diff] [blame] | 1024 | EXPORT_SYMBOL_GPL(hrtimer_start_range_ns); | 
 | 1025 |  | 
 | 1026 | /** | 
| Thomas Gleixner | e1dd7bc | 2008-10-20 13:33:36 +0200 | [diff] [blame] | 1027 |  * hrtimer_start - (re)start an hrtimer on the current CPU | 
| Arjan van de Ven | da8f2e1 | 2008-09-07 10:47:46 -0700 | [diff] [blame] | 1028 |  * @timer:	the timer to be added | 
 | 1029 |  * @tim:	expiry time | 
 | 1030 |  * @mode:	expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL) | 
 | 1031 |  * | 
 | 1032 |  * Returns: | 
 | 1033 |  *  0 on success | 
 | 1034 |  *  1 when the timer was active | 
 | 1035 |  */ | 
 | 1036 | int | 
 | 1037 | hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode) | 
 | 1038 | { | 
| Peter Zijlstra | 7f1e2ca | 2009-03-13 12:21:27 +0100 | [diff] [blame] | 1039 | 	return __hrtimer_start_range_ns(timer, tim, 0, mode, 1); | 
| Arjan van de Ven | da8f2e1 | 2008-09-07 10:47:46 -0700 | [diff] [blame] | 1040 | } | 
| Stephen Hemminger | 8d16b76 | 2006-05-30 21:26:09 -0700 | [diff] [blame] | 1041 | EXPORT_SYMBOL_GPL(hrtimer_start); | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1042 |  | 
| Arjan van de Ven | da8f2e1 | 2008-09-07 10:47:46 -0700 | [diff] [blame] | 1043 |  | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1044 | /** | 
 | 1045 |  * hrtimer_try_to_cancel - try to deactivate a timer | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1046 |  * @timer:	hrtimer to stop | 
 | 1047 |  * | 
 | 1048 |  * Returns: | 
 | 1049 |  *  0 when the timer was not active | 
 | 1050 |  *  1 when the timer was active | 
 | 1051 |  * -1 when the timer is currently excuting the callback function and | 
| Randy Dunlap | fa9799e | 2006-06-25 05:49:15 -0700 | [diff] [blame] | 1052 |  *    cannot be stopped | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1053 |  */ | 
 | 1054 | int hrtimer_try_to_cancel(struct hrtimer *timer) | 
 | 1055 | { | 
| Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 1056 | 	struct hrtimer_clock_base *base; | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1057 | 	unsigned long flags; | 
 | 1058 | 	int ret = -1; | 
 | 1059 |  | 
 | 1060 | 	base = lock_hrtimer_base(timer, &flags); | 
 | 1061 |  | 
| Thomas Gleixner | 303e967 | 2007-02-16 01:27:51 -0800 | [diff] [blame] | 1062 | 	if (!hrtimer_callback_running(timer)) | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1063 | 		ret = remove_hrtimer(timer, base); | 
 | 1064 |  | 
 | 1065 | 	unlock_hrtimer_base(timer, &flags); | 
 | 1066 |  | 
 | 1067 | 	return ret; | 
 | 1068 |  | 
 | 1069 | } | 
| Stephen Hemminger | 8d16b76 | 2006-05-30 21:26:09 -0700 | [diff] [blame] | 1070 | EXPORT_SYMBOL_GPL(hrtimer_try_to_cancel); | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1071 |  | 
 | 1072 | /** | 
 | 1073 |  * hrtimer_cancel - cancel a timer and wait for the handler to finish. | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1074 |  * @timer:	the timer to be cancelled | 
 | 1075 |  * | 
 | 1076 |  * Returns: | 
 | 1077 |  *  0 when the timer was not active | 
 | 1078 |  *  1 when the timer was active | 
 | 1079 |  */ | 
 | 1080 | int hrtimer_cancel(struct hrtimer *timer) | 
 | 1081 | { | 
 | 1082 | 	for (;;) { | 
 | 1083 | 		int ret = hrtimer_try_to_cancel(timer); | 
 | 1084 |  | 
 | 1085 | 		if (ret >= 0) | 
 | 1086 | 			return ret; | 
| Joe Korty | 5ef37b1 | 2006-04-10 22:54:13 -0700 | [diff] [blame] | 1087 | 		cpu_relax(); | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1088 | 	} | 
 | 1089 | } | 
| Stephen Hemminger | 8d16b76 | 2006-05-30 21:26:09 -0700 | [diff] [blame] | 1090 | EXPORT_SYMBOL_GPL(hrtimer_cancel); | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1091 |  | 
 | 1092 | /** | 
 | 1093 |  * hrtimer_get_remaining - get remaining time for the timer | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1094 |  * @timer:	the timer to read | 
 | 1095 |  */ | 
 | 1096 | ktime_t hrtimer_get_remaining(const struct hrtimer *timer) | 
 | 1097 | { | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1098 | 	unsigned long flags; | 
 | 1099 | 	ktime_t rem; | 
 | 1100 |  | 
| Andi Kleen | b3bd3de | 2010-08-10 14:17:51 -0700 | [diff] [blame] | 1101 | 	lock_hrtimer_base(timer, &flags); | 
| Arjan van de Ven | cc584b2 | 2008-09-01 15:02:30 -0700 | [diff] [blame] | 1102 | 	rem = hrtimer_expires_remaining(timer); | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1103 | 	unlock_hrtimer_base(timer, &flags); | 
 | 1104 |  | 
 | 1105 | 	return rem; | 
 | 1106 | } | 
| Stephen Hemminger | 8d16b76 | 2006-05-30 21:26:09 -0700 | [diff] [blame] | 1107 | EXPORT_SYMBOL_GPL(hrtimer_get_remaining); | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1108 |  | 
| Russell King | ee9c578 | 2008-04-20 13:59:33 +0100 | [diff] [blame] | 1109 | #ifdef CONFIG_NO_HZ | 
| Tony Lindgren | 6923974 | 2006-03-06 15:42:45 -0800 | [diff] [blame] | 1110 | /** | 
 | 1111 |  * hrtimer_get_next_event - get the time until next expiry event | 
 | 1112 |  * | 
 | 1113 |  * Returns the delta to the next expiry event or KTIME_MAX if no timer | 
 | 1114 |  * is pending. | 
 | 1115 |  */ | 
 | 1116 | ktime_t hrtimer_get_next_event(void) | 
 | 1117 | { | 
| Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 1118 | 	struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); | 
 | 1119 | 	struct hrtimer_clock_base *base = cpu_base->clock_base; | 
| Tony Lindgren | 6923974 | 2006-03-06 15:42:45 -0800 | [diff] [blame] | 1120 | 	ktime_t delta, mindelta = { .tv64 = KTIME_MAX }; | 
 | 1121 | 	unsigned long flags; | 
 | 1122 | 	int i; | 
 | 1123 |  | 
| Thomas Gleixner | ecb49d1 | 2009-11-17 16:36:54 +0100 | [diff] [blame] | 1124 | 	raw_spin_lock_irqsave(&cpu_base->lock, flags); | 
| Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 1125 |  | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 1126 | 	if (!hrtimer_hres_active()) { | 
 | 1127 | 		for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) { | 
 | 1128 | 			struct hrtimer *timer; | 
| John Stultz | 998adc3 | 2010-09-20 19:19:17 -0700 | [diff] [blame] | 1129 | 			struct timerqueue_node *next; | 
| Tony Lindgren | 6923974 | 2006-03-06 15:42:45 -0800 | [diff] [blame] | 1130 |  | 
| John Stultz | 998adc3 | 2010-09-20 19:19:17 -0700 | [diff] [blame] | 1131 | 			next = timerqueue_getnext(&base->active); | 
 | 1132 | 			if (!next) | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 1133 | 				continue; | 
| Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 1134 |  | 
| John Stultz | 998adc3 | 2010-09-20 19:19:17 -0700 | [diff] [blame] | 1135 | 			timer = container_of(next, struct hrtimer, node); | 
| Arjan van de Ven | cc584b2 | 2008-09-01 15:02:30 -0700 | [diff] [blame] | 1136 | 			delta.tv64 = hrtimer_get_expires_tv64(timer); | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 1137 | 			delta = ktime_sub(delta, base->get_time()); | 
 | 1138 | 			if (delta.tv64 < mindelta.tv64) | 
 | 1139 | 				mindelta.tv64 = delta.tv64; | 
 | 1140 | 		} | 
| Tony Lindgren | 6923974 | 2006-03-06 15:42:45 -0800 | [diff] [blame] | 1141 | 	} | 
| Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 1142 |  | 
| Thomas Gleixner | ecb49d1 | 2009-11-17 16:36:54 +0100 | [diff] [blame] | 1143 | 	raw_spin_unlock_irqrestore(&cpu_base->lock, flags); | 
| Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 1144 |  | 
| Tony Lindgren | 6923974 | 2006-03-06 15:42:45 -0800 | [diff] [blame] | 1145 | 	if (mindelta.tv64 < 0) | 
 | 1146 | 		mindelta.tv64 = 0; | 
 | 1147 | 	return mindelta; | 
 | 1148 | } | 
 | 1149 | #endif | 
 | 1150 |  | 
| Thomas Gleixner | 237fc6e | 2008-04-30 00:55:04 -0700 | [diff] [blame] | 1151 | static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id, | 
 | 1152 | 			   enum hrtimer_mode mode) | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1153 | { | 
| Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 1154 | 	struct hrtimer_cpu_base *cpu_base; | 
| John Stultz | e06383d | 2010-12-14 19:37:07 -0800 | [diff] [blame] | 1155 | 	int base; | 
| George Anzinger | 7978672 | 2006-02-01 03:05:11 -0800 | [diff] [blame] | 1156 |  | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1157 | 	memset(timer, 0, sizeof(struct hrtimer)); | 
| George Anzinger | 7978672 | 2006-02-01 03:05:11 -0800 | [diff] [blame] | 1158 |  | 
| Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 1159 | 	cpu_base = &__raw_get_cpu_var(hrtimer_bases); | 
| George Anzinger | 7978672 | 2006-02-01 03:05:11 -0800 | [diff] [blame] | 1160 |  | 
| Thomas Gleixner | c9cb2e3 | 2007-02-16 01:27:49 -0800 | [diff] [blame] | 1161 | 	if (clock_id == CLOCK_REALTIME && mode != HRTIMER_MODE_ABS) | 
| George Anzinger | 7978672 | 2006-02-01 03:05:11 -0800 | [diff] [blame] | 1162 | 		clock_id = CLOCK_MONOTONIC; | 
 | 1163 |  | 
| John Stultz | e06383d | 2010-12-14 19:37:07 -0800 | [diff] [blame] | 1164 | 	base = hrtimer_clockid_to_base(clock_id); | 
 | 1165 | 	timer->base = &cpu_base->clock_base[base]; | 
| John Stultz | 998adc3 | 2010-09-20 19:19:17 -0700 | [diff] [blame] | 1166 | 	timerqueue_init(&timer->node); | 
| Ingo Molnar | 82f67cd | 2007-02-16 01:28:13 -0800 | [diff] [blame] | 1167 |  | 
 | 1168 | #ifdef CONFIG_TIMER_STATS | 
 | 1169 | 	timer->start_site = NULL; | 
 | 1170 | 	timer->start_pid = -1; | 
 | 1171 | 	memset(timer->start_comm, 0, TASK_COMM_LEN); | 
 | 1172 | #endif | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1173 | } | 
| Thomas Gleixner | 237fc6e | 2008-04-30 00:55:04 -0700 | [diff] [blame] | 1174 |  | 
 | 1175 | /** | 
 | 1176 |  * hrtimer_init - initialize a timer to the given clock | 
 | 1177 |  * @timer:	the timer to be initialized | 
 | 1178 |  * @clock_id:	the clock to be used | 
 | 1179 |  * @mode:	timer mode abs/rel | 
 | 1180 |  */ | 
 | 1181 | void hrtimer_init(struct hrtimer *timer, clockid_t clock_id, | 
 | 1182 | 		  enum hrtimer_mode mode) | 
 | 1183 | { | 
| Xiao Guangrong | c6a2a17 | 2009-08-10 10:51:23 +0800 | [diff] [blame] | 1184 | 	debug_init(timer, clock_id, mode); | 
| Thomas Gleixner | 237fc6e | 2008-04-30 00:55:04 -0700 | [diff] [blame] | 1185 | 	__hrtimer_init(timer, clock_id, mode); | 
 | 1186 | } | 
| Stephen Hemminger | 8d16b76 | 2006-05-30 21:26:09 -0700 | [diff] [blame] | 1187 | EXPORT_SYMBOL_GPL(hrtimer_init); | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1188 |  | 
 | 1189 | /** | 
 | 1190 |  * hrtimer_get_res - get the timer resolution for a clock | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1191 |  * @which_clock: which clock to query | 
 | 1192 |  * @tp:		 pointer to timespec variable to store the resolution | 
 | 1193 |  * | 
| Robert P. J. Day | 72fd4a3 | 2007-02-10 01:45:59 -0800 | [diff] [blame] | 1194 |  * Store the resolution of the clock selected by @which_clock in the | 
 | 1195 |  * variable pointed to by @tp. | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1196 |  */ | 
 | 1197 | int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp) | 
 | 1198 | { | 
| Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 1199 | 	struct hrtimer_cpu_base *cpu_base; | 
| John Stultz | e06383d | 2010-12-14 19:37:07 -0800 | [diff] [blame] | 1200 | 	int base = hrtimer_clockid_to_base(which_clock); | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1201 |  | 
| Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 1202 | 	cpu_base = &__raw_get_cpu_var(hrtimer_bases); | 
| John Stultz | e06383d | 2010-12-14 19:37:07 -0800 | [diff] [blame] | 1203 | 	*tp = ktime_to_timespec(cpu_base->clock_base[base].resolution); | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1204 |  | 
 | 1205 | 	return 0; | 
 | 1206 | } | 
| Stephen Hemminger | 8d16b76 | 2006-05-30 21:26:09 -0700 | [diff] [blame] | 1207 | EXPORT_SYMBOL_GPL(hrtimer_get_res); | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1208 |  | 
| Xiao Guangrong | c6a2a17 | 2009-08-10 10:51:23 +0800 | [diff] [blame] | 1209 | static void __run_hrtimer(struct hrtimer *timer, ktime_t *now) | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1210 | { | 
| Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1211 | 	struct hrtimer_clock_base *base = timer->base; | 
 | 1212 | 	struct hrtimer_cpu_base *cpu_base = base->cpu_base; | 
 | 1213 | 	enum hrtimer_restart (*fn)(struct hrtimer *); | 
 | 1214 | 	int restart; | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1215 |  | 
| Peter Zijlstra | ca10949 | 2008-11-25 12:43:51 +0100 | [diff] [blame] | 1216 | 	WARN_ON(!irqs_disabled()); | 
 | 1217 |  | 
| Xiao Guangrong | c6a2a17 | 2009-08-10 10:51:23 +0800 | [diff] [blame] | 1218 | 	debug_deactivate(timer); | 
| Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1219 | 	__remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0); | 
 | 1220 | 	timer_stats_account_hrtimer(timer); | 
| Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1221 | 	fn = timer->function; | 
| Peter Zijlstra | ca10949 | 2008-11-25 12:43:51 +0100 | [diff] [blame] | 1222 |  | 
 | 1223 | 	/* | 
 | 1224 | 	 * Because we run timers from hardirq context, there is no chance | 
 | 1225 | 	 * they get migrated to another cpu, therefore its safe to unlock | 
 | 1226 | 	 * the timer base. | 
 | 1227 | 	 */ | 
| Thomas Gleixner | ecb49d1 | 2009-11-17 16:36:54 +0100 | [diff] [blame] | 1228 | 	raw_spin_unlock(&cpu_base->lock); | 
| Xiao Guangrong | c6a2a17 | 2009-08-10 10:51:23 +0800 | [diff] [blame] | 1229 | 	trace_hrtimer_expire_entry(timer, now); | 
| Peter Zijlstra | ca10949 | 2008-11-25 12:43:51 +0100 | [diff] [blame] | 1230 | 	restart = fn(timer); | 
| Xiao Guangrong | c6a2a17 | 2009-08-10 10:51:23 +0800 | [diff] [blame] | 1231 | 	trace_hrtimer_expire_exit(timer); | 
| Thomas Gleixner | ecb49d1 | 2009-11-17 16:36:54 +0100 | [diff] [blame] | 1232 | 	raw_spin_lock(&cpu_base->lock); | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1233 |  | 
| Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1234 | 	/* | 
| Thomas Gleixner | e3f1d88 | 2009-01-05 11:28:23 +0100 | [diff] [blame] | 1235 | 	 * Note: We clear the CALLBACK bit after enqueue_hrtimer and | 
 | 1236 | 	 * we do not reprogramm the event hardware. Happens either in | 
 | 1237 | 	 * hrtimer_start_range_ns() or in hrtimer_interrupt() | 
| Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1238 | 	 */ | 
 | 1239 | 	if (restart != HRTIMER_NORESTART) { | 
 | 1240 | 		BUG_ON(timer->state != HRTIMER_STATE_CALLBACK); | 
| Peter Zijlstra | a6037b6 | 2009-01-05 11:28:22 +0100 | [diff] [blame] | 1241 | 		enqueue_hrtimer(timer, base); | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1242 | 	} | 
| Salman Qazi | f13d4f9 | 2010-10-12 07:25:19 -0700 | [diff] [blame] | 1243 |  | 
 | 1244 | 	WARN_ON_ONCE(!(timer->state & HRTIMER_STATE_CALLBACK)); | 
 | 1245 |  | 
| Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1246 | 	timer->state &= ~HRTIMER_STATE_CALLBACK; | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1247 | } | 
 | 1248 |  | 
| Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1249 | #ifdef CONFIG_HIGH_RES_TIMERS | 
 | 1250 |  | 
 | 1251 | /* | 
 | 1252 |  * High resolution timer interrupt | 
 | 1253 |  * Called with interrupts disabled | 
 | 1254 |  */ | 
 | 1255 | void hrtimer_interrupt(struct clock_event_device *dev) | 
 | 1256 | { | 
 | 1257 | 	struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); | 
| Thomas Gleixner | 41d2e49 | 2009-11-13 17:05:44 +0100 | [diff] [blame] | 1258 | 	ktime_t expires_next, now, entry_time, delta; | 
 | 1259 | 	int i, retries = 0; | 
| Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1260 |  | 
 | 1261 | 	BUG_ON(!cpu_base->hres_active); | 
 | 1262 | 	cpu_base->nr_events++; | 
 | 1263 | 	dev->next_event.tv64 = KTIME_MAX; | 
 | 1264 |  | 
| Thomas Gleixner | 196951e | 2012-07-10 18:43:23 -0400 | [diff] [blame] | 1265 | 	raw_spin_lock(&cpu_base->lock); | 
| John Stultz | 5baefd6 | 2012-07-10 18:43:25 -0400 | [diff] [blame] | 1266 | 	entry_time = now = hrtimer_update_base(cpu_base); | 
| Thomas Gleixner | 41d2e49 | 2009-11-13 17:05:44 +0100 | [diff] [blame] | 1267 | retry: | 
| Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1268 | 	expires_next.tv64 = KTIME_MAX; | 
| Thomas Gleixner | 6ff7041 | 2009-07-10 14:57:05 +0200 | [diff] [blame] | 1269 | 	/* | 
 | 1270 | 	 * We set expires_next to KTIME_MAX here with cpu_base->lock | 
 | 1271 | 	 * held to prevent that a timer is enqueued in our queue via | 
 | 1272 | 	 * the migration code. This does not affect enqueueing of | 
 | 1273 | 	 * timers which run their callback and need to be requeued on | 
 | 1274 | 	 * this CPU. | 
 | 1275 | 	 */ | 
 | 1276 | 	cpu_base->expires_next.tv64 = KTIME_MAX; | 
 | 1277 |  | 
| Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1278 | 	for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { | 
| Thomas Gleixner | ab8177b | 2011-05-20 13:05:15 +0200 | [diff] [blame] | 1279 | 		struct hrtimer_clock_base *base; | 
| John Stultz | 998adc3 | 2010-09-20 19:19:17 -0700 | [diff] [blame] | 1280 | 		struct timerqueue_node *node; | 
| Thomas Gleixner | ab8177b | 2011-05-20 13:05:15 +0200 | [diff] [blame] | 1281 | 		ktime_t basenow; | 
| Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1282 |  | 
| Thomas Gleixner | ab8177b | 2011-05-20 13:05:15 +0200 | [diff] [blame] | 1283 | 		if (!(cpu_base->active_bases & (1 << i))) | 
 | 1284 | 			continue; | 
 | 1285 |  | 
 | 1286 | 		base = cpu_base->clock_base + i; | 
| Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1287 | 		basenow = ktime_add(now, base->offset); | 
 | 1288 |  | 
| John Stultz | 998adc3 | 2010-09-20 19:19:17 -0700 | [diff] [blame] | 1289 | 		while ((node = timerqueue_getnext(&base->active))) { | 
| Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1290 | 			struct hrtimer *timer; | 
 | 1291 |  | 
| John Stultz | 998adc3 | 2010-09-20 19:19:17 -0700 | [diff] [blame] | 1292 | 			timer = container_of(node, struct hrtimer, node); | 
| Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1293 |  | 
| Arjan van de Ven | 654c8e0 | 2008-09-01 15:47:08 -0700 | [diff] [blame] | 1294 | 			/* | 
 | 1295 | 			 * The immediate goal for using the softexpires is | 
 | 1296 | 			 * minimizing wakeups, not running timers at the | 
 | 1297 | 			 * earliest interrupt after their soft expiration. | 
 | 1298 | 			 * This allows us to avoid using a Priority Search | 
 | 1299 | 			 * Tree, which can answer a stabbing querry for | 
 | 1300 | 			 * overlapping intervals and instead use the simple | 
 | 1301 | 			 * BST we already have. | 
 | 1302 | 			 * We don't add extra wakeups by delaying timers that | 
 | 1303 | 			 * are right-of a not yet expired timer, because that | 
 | 1304 | 			 * timer will have to trigger a wakeup anyway. | 
 | 1305 | 			 */ | 
 | 1306 |  | 
 | 1307 | 			if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer)) { | 
| Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1308 | 				ktime_t expires; | 
 | 1309 |  | 
| Arjan van de Ven | cc584b2 | 2008-09-01 15:02:30 -0700 | [diff] [blame] | 1310 | 				expires = ktime_sub(hrtimer_get_expires(timer), | 
| Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1311 | 						    base->offset); | 
 | 1312 | 				if (expires.tv64 < expires_next.tv64) | 
 | 1313 | 					expires_next = expires; | 
 | 1314 | 				break; | 
 | 1315 | 			} | 
 | 1316 |  | 
| Xiao Guangrong | c6a2a17 | 2009-08-10 10:51:23 +0800 | [diff] [blame] | 1317 | 			__run_hrtimer(timer, &basenow); | 
| Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1318 | 		} | 
| Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1319 | 	} | 
 | 1320 |  | 
| Thomas Gleixner | 6ff7041 | 2009-07-10 14:57:05 +0200 | [diff] [blame] | 1321 | 	/* | 
 | 1322 | 	 * Store the new expiry value so the migration code can verify | 
 | 1323 | 	 * against it. | 
 | 1324 | 	 */ | 
| Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1325 | 	cpu_base->expires_next = expires_next; | 
| Thomas Gleixner | ecb49d1 | 2009-11-17 16:36:54 +0100 | [diff] [blame] | 1326 | 	raw_spin_unlock(&cpu_base->lock); | 
| Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1327 |  | 
 | 1328 | 	/* Reprogramming necessary ? */ | 
| Thomas Gleixner | 41d2e49 | 2009-11-13 17:05:44 +0100 | [diff] [blame] | 1329 | 	if (expires_next.tv64 == KTIME_MAX || | 
 | 1330 | 	    !tick_program_event(expires_next, 0)) { | 
 | 1331 | 		cpu_base->hang_detected = 0; | 
 | 1332 | 		return; | 
| Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1333 | 	} | 
| Thomas Gleixner | 41d2e49 | 2009-11-13 17:05:44 +0100 | [diff] [blame] | 1334 |  | 
 | 1335 | 	/* | 
 | 1336 | 	 * The next timer was already expired due to: | 
 | 1337 | 	 * - tracing | 
 | 1338 | 	 * - long lasting callbacks | 
 | 1339 | 	 * - being scheduled away when running in a VM | 
 | 1340 | 	 * | 
 | 1341 | 	 * We need to prevent that we loop forever in the hrtimer | 
 | 1342 | 	 * interrupt routine. We give it 3 attempts to avoid | 
 | 1343 | 	 * overreacting on some spurious event. | 
| John Stultz | 5baefd6 | 2012-07-10 18:43:25 -0400 | [diff] [blame] | 1344 | 	 * | 
 | 1345 | 	 * Acquire base lock for updating the offsets and retrieving | 
 | 1346 | 	 * the current time. | 
| Thomas Gleixner | 41d2e49 | 2009-11-13 17:05:44 +0100 | [diff] [blame] | 1347 | 	 */ | 
| Thomas Gleixner | 196951e | 2012-07-10 18:43:23 -0400 | [diff] [blame] | 1348 | 	raw_spin_lock(&cpu_base->lock); | 
| John Stultz | 5baefd6 | 2012-07-10 18:43:25 -0400 | [diff] [blame] | 1349 | 	now = hrtimer_update_base(cpu_base); | 
| Thomas Gleixner | 41d2e49 | 2009-11-13 17:05:44 +0100 | [diff] [blame] | 1350 | 	cpu_base->nr_retries++; | 
 | 1351 | 	if (++retries < 3) | 
 | 1352 | 		goto retry; | 
 | 1353 | 	/* | 
 | 1354 | 	 * Give the system a chance to do something else than looping | 
 | 1355 | 	 * here. We stored the entry time, so we know exactly how long | 
 | 1356 | 	 * we spent here. We schedule the next event this amount of | 
 | 1357 | 	 * time away. | 
 | 1358 | 	 */ | 
 | 1359 | 	cpu_base->nr_hangs++; | 
 | 1360 | 	cpu_base->hang_detected = 1; | 
| Thomas Gleixner | 196951e | 2012-07-10 18:43:23 -0400 | [diff] [blame] | 1361 | 	raw_spin_unlock(&cpu_base->lock); | 
| Thomas Gleixner | 41d2e49 | 2009-11-13 17:05:44 +0100 | [diff] [blame] | 1362 | 	delta = ktime_sub(now, entry_time); | 
 | 1363 | 	if (delta.tv64 > cpu_base->max_hang_time.tv64) | 
 | 1364 | 		cpu_base->max_hang_time = delta; | 
 | 1365 | 	/* | 
 | 1366 | 	 * Limit it to a sensible value as we enforce a longer | 
 | 1367 | 	 * delay. Give the CPU at least 100ms to catch up. | 
 | 1368 | 	 */ | 
 | 1369 | 	if (delta.tv64 > 100 * NSEC_PER_MSEC) | 
 | 1370 | 		expires_next = ktime_add_ns(now, 100 * NSEC_PER_MSEC); | 
 | 1371 | 	else | 
 | 1372 | 		expires_next = ktime_add(now, delta); | 
 | 1373 | 	tick_program_event(expires_next, 1); | 
 | 1374 | 	printk_once(KERN_WARNING "hrtimer: interrupt took %llu ns\n", | 
 | 1375 | 		    ktime_to_ns(delta)); | 
| Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1376 | } | 
 | 1377 |  | 
| Thomas Gleixner | 8bdec95 | 2009-01-05 11:28:19 +0100 | [diff] [blame] | 1378 | /* | 
 | 1379 |  * local version of hrtimer_peek_ahead_timers() called with interrupts | 
 | 1380 |  * disabled. | 
 | 1381 |  */ | 
 | 1382 | static void __hrtimer_peek_ahead_timers(void) | 
 | 1383 | { | 
 | 1384 | 	struct tick_device *td; | 
 | 1385 |  | 
 | 1386 | 	if (!hrtimer_hres_active()) | 
 | 1387 | 		return; | 
 | 1388 |  | 
 | 1389 | 	td = &__get_cpu_var(tick_cpu_device); | 
 | 1390 | 	if (td && td->evtdev) | 
 | 1391 | 		hrtimer_interrupt(td->evtdev); | 
 | 1392 | } | 
 | 1393 |  | 
| Arjan van de Ven | 2e94d1f | 2008-09-10 16:06:00 -0700 | [diff] [blame] | 1394 | /** | 
 | 1395 |  * hrtimer_peek_ahead_timers -- run soft-expired timers now | 
 | 1396 |  * | 
 | 1397 |  * hrtimer_peek_ahead_timers will peek at the timer queue of | 
 | 1398 |  * the current cpu and check if there are any timers for which | 
 | 1399 |  * the soft expires time has passed. If any such timers exist, | 
 | 1400 |  * they are run immediately and then removed from the timer queue. | 
 | 1401 |  * | 
 | 1402 |  */ | 
 | 1403 | void hrtimer_peek_ahead_timers(void) | 
 | 1404 | { | 
| Thomas Gleixner | 643bdf6 | 2008-10-20 13:38:11 +0200 | [diff] [blame] | 1405 | 	unsigned long flags; | 
| Arjan van de Ven | dc4304f | 2008-10-13 10:32:15 -0400 | [diff] [blame] | 1406 |  | 
| Arjan van de Ven | 2e94d1f | 2008-09-10 16:06:00 -0700 | [diff] [blame] | 1407 | 	local_irq_save(flags); | 
| Thomas Gleixner | 8bdec95 | 2009-01-05 11:28:19 +0100 | [diff] [blame] | 1408 | 	__hrtimer_peek_ahead_timers(); | 
| Arjan van de Ven | 2e94d1f | 2008-09-10 16:06:00 -0700 | [diff] [blame] | 1409 | 	local_irq_restore(flags); | 
 | 1410 | } | 
 | 1411 |  | 
| Peter Zijlstra | a6037b6 | 2009-01-05 11:28:22 +0100 | [diff] [blame] | 1412 | static void run_hrtimer_softirq(struct softirq_action *h) | 
 | 1413 | { | 
| John Stultz | f55a6fa | 2012-07-10 18:43:19 -0400 | [diff] [blame] | 1414 | 	struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); | 
 | 1415 |  | 
 | 1416 | 	if (cpu_base->clock_was_set) { | 
 | 1417 | 		cpu_base->clock_was_set = 0; | 
 | 1418 | 		clock_was_set(); | 
 | 1419 | 	} | 
 | 1420 |  | 
| Peter Zijlstra | a6037b6 | 2009-01-05 11:28:22 +0100 | [diff] [blame] | 1421 | 	hrtimer_peek_ahead_timers(); | 
 | 1422 | } | 
 | 1423 |  | 
| Ingo Molnar | 82c5b7b | 2009-01-05 14:11:10 +0100 | [diff] [blame] | 1424 | #else /* CONFIG_HIGH_RES_TIMERS */ | 
 | 1425 |  | 
 | 1426 | static inline void __hrtimer_peek_ahead_timers(void) { } | 
 | 1427 |  | 
 | 1428 | #endif	/* !CONFIG_HIGH_RES_TIMERS */ | 
| Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1429 |  | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1430 | /* | 
 | 1431 |  * Called from timer softirq every jiffy, expire hrtimers: | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 1432 |  * | 
 | 1433 |  * For HRT its the fall back code to run the softirq in the timer | 
 | 1434 |  * softirq context in case the hrtimer initialization failed or has | 
 | 1435 |  * not been done yet. | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1436 |  */ | 
| Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1437 | void hrtimer_run_pending(void) | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1438 | { | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 1439 | 	if (hrtimer_hres_active()) | 
 | 1440 | 		return; | 
 | 1441 |  | 
| Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 1442 | 	/* | 
 | 1443 | 	 * This _is_ ugly: We have to check in the softirq context, | 
 | 1444 | 	 * whether we can switch to highres and / or nohz mode. The | 
 | 1445 | 	 * clocksource switch happens in the timer interrupt with | 
 | 1446 | 	 * xtime_lock held. Notification from there only sets the | 
 | 1447 | 	 * check bit in the tick_oneshot code, otherwise we might | 
 | 1448 | 	 * deadlock vs. xtime_lock. | 
 | 1449 | 	 */ | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 1450 | 	if (tick_check_oneshot_change(!hrtimer_is_hres_enabled())) | 
| Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1451 | 		hrtimer_switch_to_hres(); | 
| Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1452 | } | 
 | 1453 |  | 
 | 1454 | /* | 
 | 1455 |  * Called from hardirq context every jiffy | 
 | 1456 |  */ | 
| Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1457 | void hrtimer_run_queues(void) | 
 | 1458 | { | 
| John Stultz | 998adc3 | 2010-09-20 19:19:17 -0700 | [diff] [blame] | 1459 | 	struct timerqueue_node *node; | 
| Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1460 | 	struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); | 
| Dimitri Sivanich | 833883d | 2008-04-18 13:39:00 -0700 | [diff] [blame] | 1461 | 	struct hrtimer_clock_base *base; | 
 | 1462 | 	int index, gettime = 1; | 
| Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1463 |  | 
 | 1464 | 	if (hrtimer_hres_active()) | 
 | 1465 | 		return; | 
| Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 1466 |  | 
| Dimitri Sivanich | 833883d | 2008-04-18 13:39:00 -0700 | [diff] [blame] | 1467 | 	for (index = 0; index < HRTIMER_MAX_CLOCK_BASES; index++) { | 
 | 1468 | 		base = &cpu_base->clock_base[index]; | 
| John Stultz | b007c38 | 2010-12-10 22:19:53 -0800 | [diff] [blame] | 1469 | 		if (!timerqueue_getnext(&base->active)) | 
| Dimitri Sivanich | 833883d | 2008-04-18 13:39:00 -0700 | [diff] [blame] | 1470 | 			continue; | 
 | 1471 |  | 
| Mark McLoughlin | d7cfb60 | 2008-09-19 13:13:44 +0100 | [diff] [blame] | 1472 | 		if (gettime) { | 
| Dimitri Sivanich | 833883d | 2008-04-18 13:39:00 -0700 | [diff] [blame] | 1473 | 			hrtimer_get_softirq_time(cpu_base); | 
 | 1474 | 			gettime = 0; | 
 | 1475 | 		} | 
 | 1476 |  | 
| Thomas Gleixner | ecb49d1 | 2009-11-17 16:36:54 +0100 | [diff] [blame] | 1477 | 		raw_spin_lock(&cpu_base->lock); | 
| Dimitri Sivanich | 833883d | 2008-04-18 13:39:00 -0700 | [diff] [blame] | 1478 |  | 
| John Stultz | b007c38 | 2010-12-10 22:19:53 -0800 | [diff] [blame] | 1479 | 		while ((node = timerqueue_getnext(&base->active))) { | 
| Dimitri Sivanich | 833883d | 2008-04-18 13:39:00 -0700 | [diff] [blame] | 1480 | 			struct hrtimer *timer; | 
 | 1481 |  | 
| John Stultz | 998adc3 | 2010-09-20 19:19:17 -0700 | [diff] [blame] | 1482 | 			timer = container_of(node, struct hrtimer, node); | 
| Arjan van de Ven | cc584b2 | 2008-09-01 15:02:30 -0700 | [diff] [blame] | 1483 | 			if (base->softirq_time.tv64 <= | 
 | 1484 | 					hrtimer_get_expires_tv64(timer)) | 
| Dimitri Sivanich | 833883d | 2008-04-18 13:39:00 -0700 | [diff] [blame] | 1485 | 				break; | 
 | 1486 |  | 
| Xiao Guangrong | c6a2a17 | 2009-08-10 10:51:23 +0800 | [diff] [blame] | 1487 | 			__run_hrtimer(timer, &base->softirq_time); | 
| Dimitri Sivanich | 833883d | 2008-04-18 13:39:00 -0700 | [diff] [blame] | 1488 | 		} | 
| Thomas Gleixner | ecb49d1 | 2009-11-17 16:36:54 +0100 | [diff] [blame] | 1489 | 		raw_spin_unlock(&cpu_base->lock); | 
| Dimitri Sivanich | 833883d | 2008-04-18 13:39:00 -0700 | [diff] [blame] | 1490 | 	} | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1491 | } | 
 | 1492 |  | 
 | 1493 | /* | 
| Thomas Gleixner | 10c94ec | 2006-01-09 20:52:35 -0800 | [diff] [blame] | 1494 |  * Sleep related functions: | 
 | 1495 |  */ | 
| Thomas Gleixner | c9cb2e3 | 2007-02-16 01:27:49 -0800 | [diff] [blame] | 1496 | static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer) | 
| Thomas Gleixner | 00362e3 | 2006-03-31 02:31:17 -0800 | [diff] [blame] | 1497 | { | 
 | 1498 | 	struct hrtimer_sleeper *t = | 
 | 1499 | 		container_of(timer, struct hrtimer_sleeper, timer); | 
 | 1500 | 	struct task_struct *task = t->task; | 
 | 1501 |  | 
 | 1502 | 	t->task = NULL; | 
 | 1503 | 	if (task) | 
 | 1504 | 		wake_up_process(task); | 
 | 1505 |  | 
 | 1506 | 	return HRTIMER_NORESTART; | 
 | 1507 | } | 
 | 1508 |  | 
| Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 1509 | void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task) | 
| Thomas Gleixner | 00362e3 | 2006-03-31 02:31:17 -0800 | [diff] [blame] | 1510 | { | 
 | 1511 | 	sl->timer.function = hrtimer_wakeup; | 
 | 1512 | 	sl->task = task; | 
 | 1513 | } | 
| Stephen Hemminger | 2bc481c | 2009-08-28 23:41:29 -0700 | [diff] [blame] | 1514 | EXPORT_SYMBOL_GPL(hrtimer_init_sleeper); | 
| Thomas Gleixner | 00362e3 | 2006-03-31 02:31:17 -0800 | [diff] [blame] | 1515 |  | 
| Thomas Gleixner | 669d786 | 2006-03-31 02:31:19 -0800 | [diff] [blame] | 1516 | static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode) | 
| Thomas Gleixner | 10c94ec | 2006-01-09 20:52:35 -0800 | [diff] [blame] | 1517 | { | 
| Thomas Gleixner | 669d786 | 2006-03-31 02:31:19 -0800 | [diff] [blame] | 1518 | 	hrtimer_init_sleeper(t, current); | 
| Thomas Gleixner | 10c94ec | 2006-01-09 20:52:35 -0800 | [diff] [blame] | 1519 |  | 
| Roman Zippel | 432569b | 2006-03-26 01:38:08 -0800 | [diff] [blame] | 1520 | 	do { | 
 | 1521 | 		set_current_state(TASK_INTERRUPTIBLE); | 
| Arjan van de Ven | cc584b2 | 2008-09-01 15:02:30 -0700 | [diff] [blame] | 1522 | 		hrtimer_start_expires(&t->timer, mode); | 
| Peter Zijlstra | 37bb6cb | 2008-01-25 21:08:32 +0100 | [diff] [blame] | 1523 | 		if (!hrtimer_active(&t->timer)) | 
 | 1524 | 			t->task = NULL; | 
| Roman Zippel | 432569b | 2006-03-26 01:38:08 -0800 | [diff] [blame] | 1525 |  | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 1526 | 		if (likely(t->task)) | 
 | 1527 | 			schedule(); | 
| Roman Zippel | 432569b | 2006-03-26 01:38:08 -0800 | [diff] [blame] | 1528 |  | 
| Thomas Gleixner | 669d786 | 2006-03-31 02:31:19 -0800 | [diff] [blame] | 1529 | 		hrtimer_cancel(&t->timer); | 
| Thomas Gleixner | c9cb2e3 | 2007-02-16 01:27:49 -0800 | [diff] [blame] | 1530 | 		mode = HRTIMER_MODE_ABS; | 
| Roman Zippel | 432569b | 2006-03-26 01:38:08 -0800 | [diff] [blame] | 1531 |  | 
| Thomas Gleixner | 669d786 | 2006-03-31 02:31:19 -0800 | [diff] [blame] | 1532 | 	} while (t->task && !signal_pending(current)); | 
 | 1533 |  | 
| Peter Zijlstra | 3588a08 | 2008-02-01 17:45:13 +0100 | [diff] [blame] | 1534 | 	__set_current_state(TASK_RUNNING); | 
 | 1535 |  | 
| Thomas Gleixner | 669d786 | 2006-03-31 02:31:19 -0800 | [diff] [blame] | 1536 | 	return t->task == NULL; | 
| Thomas Gleixner | 10c94ec | 2006-01-09 20:52:35 -0800 | [diff] [blame] | 1537 | } | 
 | 1538 |  | 
| Oleg Nesterov | 080344b | 2008-02-01 17:29:05 +0300 | [diff] [blame] | 1539 | static int update_rmtp(struct hrtimer *timer, struct timespec __user *rmtp) | 
 | 1540 | { | 
 | 1541 | 	struct timespec rmt; | 
 | 1542 | 	ktime_t rem; | 
 | 1543 |  | 
| Arjan van de Ven | cc584b2 | 2008-09-01 15:02:30 -0700 | [diff] [blame] | 1544 | 	rem = hrtimer_expires_remaining(timer); | 
| Oleg Nesterov | 080344b | 2008-02-01 17:29:05 +0300 | [diff] [blame] | 1545 | 	if (rem.tv64 <= 0) | 
 | 1546 | 		return 0; | 
 | 1547 | 	rmt = ktime_to_timespec(rem); | 
 | 1548 |  | 
 | 1549 | 	if (copy_to_user(rmtp, &rmt, sizeof(*rmtp))) | 
 | 1550 | 		return -EFAULT; | 
 | 1551 |  | 
 | 1552 | 	return 1; | 
 | 1553 | } | 
 | 1554 |  | 
| Toyo Abe | 1711ef3 | 2006-09-29 02:00:28 -0700 | [diff] [blame] | 1555 | long __sched hrtimer_nanosleep_restart(struct restart_block *restart) | 
| Thomas Gleixner | 10c94ec | 2006-01-09 20:52:35 -0800 | [diff] [blame] | 1556 | { | 
| Thomas Gleixner | 669d786 | 2006-03-31 02:31:19 -0800 | [diff] [blame] | 1557 | 	struct hrtimer_sleeper t; | 
| Oleg Nesterov | 080344b | 2008-02-01 17:29:05 +0300 | [diff] [blame] | 1558 | 	struct timespec __user  *rmtp; | 
| Thomas Gleixner | 237fc6e | 2008-04-30 00:55:04 -0700 | [diff] [blame] | 1559 | 	int ret = 0; | 
| Thomas Gleixner | 10c94ec | 2006-01-09 20:52:35 -0800 | [diff] [blame] | 1560 |  | 
| Thomas Gleixner | ab8177b | 2011-05-20 13:05:15 +0200 | [diff] [blame] | 1561 | 	hrtimer_init_on_stack(&t.timer, restart->nanosleep.clockid, | 
| Thomas Gleixner | 237fc6e | 2008-04-30 00:55:04 -0700 | [diff] [blame] | 1562 | 				HRTIMER_MODE_ABS); | 
| Arjan van de Ven | cc584b2 | 2008-09-01 15:02:30 -0700 | [diff] [blame] | 1563 | 	hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires); | 
| Thomas Gleixner | 10c94ec | 2006-01-09 20:52:35 -0800 | [diff] [blame] | 1564 |  | 
| Thomas Gleixner | c9cb2e3 | 2007-02-16 01:27:49 -0800 | [diff] [blame] | 1565 | 	if (do_nanosleep(&t, HRTIMER_MODE_ABS)) | 
| Thomas Gleixner | 237fc6e | 2008-04-30 00:55:04 -0700 | [diff] [blame] | 1566 | 		goto out; | 
| Thomas Gleixner | 10c94ec | 2006-01-09 20:52:35 -0800 | [diff] [blame] | 1567 |  | 
| Thomas Gleixner | 029a07e | 2008-02-10 09:17:43 +0100 | [diff] [blame] | 1568 | 	rmtp = restart->nanosleep.rmtp; | 
| Roman Zippel | 432569b | 2006-03-26 01:38:08 -0800 | [diff] [blame] | 1569 | 	if (rmtp) { | 
| Thomas Gleixner | 237fc6e | 2008-04-30 00:55:04 -0700 | [diff] [blame] | 1570 | 		ret = update_rmtp(&t.timer, rmtp); | 
| Oleg Nesterov | 080344b | 2008-02-01 17:29:05 +0300 | [diff] [blame] | 1571 | 		if (ret <= 0) | 
| Thomas Gleixner | 237fc6e | 2008-04-30 00:55:04 -0700 | [diff] [blame] | 1572 | 			goto out; | 
| Roman Zippel | 432569b | 2006-03-26 01:38:08 -0800 | [diff] [blame] | 1573 | 	} | 
| Thomas Gleixner | 10c94ec | 2006-01-09 20:52:35 -0800 | [diff] [blame] | 1574 |  | 
| Thomas Gleixner | 10c94ec | 2006-01-09 20:52:35 -0800 | [diff] [blame] | 1575 | 	/* The other values in restart are already filled in */ | 
| Thomas Gleixner | 237fc6e | 2008-04-30 00:55:04 -0700 | [diff] [blame] | 1576 | 	ret = -ERESTART_RESTARTBLOCK; | 
 | 1577 | out: | 
 | 1578 | 	destroy_hrtimer_on_stack(&t.timer); | 
 | 1579 | 	return ret; | 
| Thomas Gleixner | 10c94ec | 2006-01-09 20:52:35 -0800 | [diff] [blame] | 1580 | } | 
 | 1581 |  | 
| Oleg Nesterov | 080344b | 2008-02-01 17:29:05 +0300 | [diff] [blame] | 1582 | long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp, | 
| Thomas Gleixner | 10c94ec | 2006-01-09 20:52:35 -0800 | [diff] [blame] | 1583 | 		       const enum hrtimer_mode mode, const clockid_t clockid) | 
 | 1584 | { | 
 | 1585 | 	struct restart_block *restart; | 
| Thomas Gleixner | 669d786 | 2006-03-31 02:31:19 -0800 | [diff] [blame] | 1586 | 	struct hrtimer_sleeper t; | 
| Thomas Gleixner | 237fc6e | 2008-04-30 00:55:04 -0700 | [diff] [blame] | 1587 | 	int ret = 0; | 
| Arjan van de Ven | 3bd0120 | 2008-09-08 08:58:59 -0700 | [diff] [blame] | 1588 | 	unsigned long slack; | 
 | 1589 |  | 
 | 1590 | 	slack = current->timer_slack_ns; | 
 | 1591 | 	if (rt_task(current)) | 
 | 1592 | 		slack = 0; | 
| Thomas Gleixner | 10c94ec | 2006-01-09 20:52:35 -0800 | [diff] [blame] | 1593 |  | 
| Thomas Gleixner | 237fc6e | 2008-04-30 00:55:04 -0700 | [diff] [blame] | 1594 | 	hrtimer_init_on_stack(&t.timer, clockid, mode); | 
| Arjan van de Ven | 3bd0120 | 2008-09-08 08:58:59 -0700 | [diff] [blame] | 1595 | 	hrtimer_set_expires_range_ns(&t.timer, timespec_to_ktime(*rqtp), slack); | 
| Roman Zippel | 432569b | 2006-03-26 01:38:08 -0800 | [diff] [blame] | 1596 | 	if (do_nanosleep(&t, mode)) | 
| Thomas Gleixner | 237fc6e | 2008-04-30 00:55:04 -0700 | [diff] [blame] | 1597 | 		goto out; | 
| Thomas Gleixner | 10c94ec | 2006-01-09 20:52:35 -0800 | [diff] [blame] | 1598 |  | 
| George Anzinger | 7978672 | 2006-02-01 03:05:11 -0800 | [diff] [blame] | 1599 | 	/* Absolute timers do not update the rmtp value and restart: */ | 
| Thomas Gleixner | 237fc6e | 2008-04-30 00:55:04 -0700 | [diff] [blame] | 1600 | 	if (mode == HRTIMER_MODE_ABS) { | 
 | 1601 | 		ret = -ERESTARTNOHAND; | 
 | 1602 | 		goto out; | 
 | 1603 | 	} | 
| Thomas Gleixner | 10c94ec | 2006-01-09 20:52:35 -0800 | [diff] [blame] | 1604 |  | 
| Roman Zippel | 432569b | 2006-03-26 01:38:08 -0800 | [diff] [blame] | 1605 | 	if (rmtp) { | 
| Thomas Gleixner | 237fc6e | 2008-04-30 00:55:04 -0700 | [diff] [blame] | 1606 | 		ret = update_rmtp(&t.timer, rmtp); | 
| Oleg Nesterov | 080344b | 2008-02-01 17:29:05 +0300 | [diff] [blame] | 1607 | 		if (ret <= 0) | 
| Thomas Gleixner | 237fc6e | 2008-04-30 00:55:04 -0700 | [diff] [blame] | 1608 | 			goto out; | 
| Roman Zippel | 432569b | 2006-03-26 01:38:08 -0800 | [diff] [blame] | 1609 | 	} | 
| Thomas Gleixner | 10c94ec | 2006-01-09 20:52:35 -0800 | [diff] [blame] | 1610 |  | 
 | 1611 | 	restart = ¤t_thread_info()->restart_block; | 
| Toyo Abe | 1711ef3 | 2006-09-29 02:00:28 -0700 | [diff] [blame] | 1612 | 	restart->fn = hrtimer_nanosleep_restart; | 
| Thomas Gleixner | ab8177b | 2011-05-20 13:05:15 +0200 | [diff] [blame] | 1613 | 	restart->nanosleep.clockid = t.timer.base->clockid; | 
| Thomas Gleixner | 029a07e | 2008-02-10 09:17:43 +0100 | [diff] [blame] | 1614 | 	restart->nanosleep.rmtp = rmtp; | 
| Arjan van de Ven | cc584b2 | 2008-09-01 15:02:30 -0700 | [diff] [blame] | 1615 | 	restart->nanosleep.expires = hrtimer_get_expires_tv64(&t.timer); | 
| Thomas Gleixner | 10c94ec | 2006-01-09 20:52:35 -0800 | [diff] [blame] | 1616 |  | 
| Thomas Gleixner | 237fc6e | 2008-04-30 00:55:04 -0700 | [diff] [blame] | 1617 | 	ret = -ERESTART_RESTARTBLOCK; | 
 | 1618 | out: | 
 | 1619 | 	destroy_hrtimer_on_stack(&t.timer); | 
 | 1620 | 	return ret; | 
| Thomas Gleixner | 10c94ec | 2006-01-09 20:52:35 -0800 | [diff] [blame] | 1621 | } | 
 | 1622 |  | 
| Heiko Carstens | 58fd3aa | 2009-01-14 14:14:03 +0100 | [diff] [blame] | 1623 | SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp, | 
 | 1624 | 		struct timespec __user *, rmtp) | 
| Thomas Gleixner | 6ba1b91 | 2006-01-09 20:52:36 -0800 | [diff] [blame] | 1625 | { | 
| Oleg Nesterov | 080344b | 2008-02-01 17:29:05 +0300 | [diff] [blame] | 1626 | 	struct timespec tu; | 
| Thomas Gleixner | 6ba1b91 | 2006-01-09 20:52:36 -0800 | [diff] [blame] | 1627 |  | 
 | 1628 | 	if (copy_from_user(&tu, rqtp, sizeof(tu))) | 
 | 1629 | 		return -EFAULT; | 
 | 1630 |  | 
 | 1631 | 	if (!timespec_valid(&tu)) | 
 | 1632 | 		return -EINVAL; | 
 | 1633 |  | 
| Oleg Nesterov | 080344b | 2008-02-01 17:29:05 +0300 | [diff] [blame] | 1634 | 	return hrtimer_nanosleep(&tu, rmtp, HRTIMER_MODE_REL, CLOCK_MONOTONIC); | 
| Thomas Gleixner | 6ba1b91 | 2006-01-09 20:52:36 -0800 | [diff] [blame] | 1635 | } | 
 | 1636 |  | 
| Thomas Gleixner | 10c94ec | 2006-01-09 20:52:35 -0800 | [diff] [blame] | 1637 | /* | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1638 |  * Functions related to boot-time initialization: | 
 | 1639 |  */ | 
| Randy Dunlap | 0ec160d | 2008-01-21 17:18:24 -0800 | [diff] [blame] | 1640 | static void __cpuinit init_hrtimers_cpu(int cpu) | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1641 | { | 
| Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 1642 | 	struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu); | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1643 | 	int i; | 
 | 1644 |  | 
| Thomas Gleixner | ecb49d1 | 2009-11-17 16:36:54 +0100 | [diff] [blame] | 1645 | 	raw_spin_lock_init(&cpu_base->lock); | 
| Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 1646 |  | 
| John Stultz | 998adc3 | 2010-09-20 19:19:17 -0700 | [diff] [blame] | 1647 | 	for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { | 
| Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 1648 | 		cpu_base->clock_base[i].cpu_base = cpu_base; | 
| John Stultz | 998adc3 | 2010-09-20 19:19:17 -0700 | [diff] [blame] | 1649 | 		timerqueue_init_head(&cpu_base->clock_base[i].active); | 
 | 1650 | 	} | 
| Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 1651 |  | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 1652 | 	hrtimer_init_hres(cpu_base); | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1653 | } | 
 | 1654 |  | 
 | 1655 | #ifdef CONFIG_HOTPLUG_CPU | 
 | 1656 |  | 
| Peter Zijlstra | ca10949 | 2008-11-25 12:43:51 +0100 | [diff] [blame] | 1657 | static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base, | 
| Peter Zijlstra | 3781065 | 2008-12-04 11:17:10 +0100 | [diff] [blame] | 1658 | 				struct hrtimer_clock_base *new_base) | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1659 | { | 
 | 1660 | 	struct hrtimer *timer; | 
| John Stultz | 998adc3 | 2010-09-20 19:19:17 -0700 | [diff] [blame] | 1661 | 	struct timerqueue_node *node; | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1662 |  | 
| John Stultz | 998adc3 | 2010-09-20 19:19:17 -0700 | [diff] [blame] | 1663 | 	while ((node = timerqueue_getnext(&old_base->active))) { | 
 | 1664 | 		timer = container_of(node, struct hrtimer, node); | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 1665 | 		BUG_ON(hrtimer_callback_running(timer)); | 
| Xiao Guangrong | c6a2a17 | 2009-08-10 10:51:23 +0800 | [diff] [blame] | 1666 | 		debug_deactivate(timer); | 
| Thomas Gleixner | b00c1a9 | 2008-09-29 15:44:46 +0200 | [diff] [blame] | 1667 |  | 
 | 1668 | 		/* | 
 | 1669 | 		 * Mark it as STATE_MIGRATE not INACTIVE otherwise the | 
 | 1670 | 		 * timer could be seen as !active and just vanish away | 
 | 1671 | 		 * under us on another CPU | 
 | 1672 | 		 */ | 
 | 1673 | 		__remove_hrtimer(timer, old_base, HRTIMER_STATE_MIGRATE, 0); | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1674 | 		timer->base = new_base; | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 1675 | 		/* | 
| Thomas Gleixner | e3f1d88 | 2009-01-05 11:28:23 +0100 | [diff] [blame] | 1676 | 		 * Enqueue the timers on the new cpu. This does not | 
 | 1677 | 		 * reprogram the event device in case the timer | 
 | 1678 | 		 * expires before the earliest on this CPU, but we run | 
 | 1679 | 		 * hrtimer_interrupt after we migrated everything to | 
 | 1680 | 		 * sort out already expired timers and reprogram the | 
 | 1681 | 		 * event device. | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 1682 | 		 */ | 
| Peter Zijlstra | a6037b6 | 2009-01-05 11:28:22 +0100 | [diff] [blame] | 1683 | 		enqueue_hrtimer(timer, new_base); | 
| Thomas Gleixner | 41e1022 | 2008-09-29 14:09:39 +0200 | [diff] [blame] | 1684 |  | 
| Thomas Gleixner | b00c1a9 | 2008-09-29 15:44:46 +0200 | [diff] [blame] | 1685 | 		/* Clear the migration state bit */ | 
 | 1686 | 		timer->state &= ~HRTIMER_STATE_MIGRATE; | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1687 | 	} | 
 | 1688 | } | 
 | 1689 |  | 
| Thomas Gleixner | d5fd43c | 2009-01-05 11:28:20 +0100 | [diff] [blame] | 1690 | static void migrate_hrtimers(int scpu) | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1691 | { | 
| Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 1692 | 	struct hrtimer_cpu_base *old_base, *new_base; | 
| Thomas Gleixner | 731a55b | 2009-01-05 11:28:21 +0100 | [diff] [blame] | 1693 | 	int i; | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1694 |  | 
| Peter Zijlstra | 3781065 | 2008-12-04 11:17:10 +0100 | [diff] [blame] | 1695 | 	BUG_ON(cpu_online(scpu)); | 
| Peter Zijlstra | 3781065 | 2008-12-04 11:17:10 +0100 | [diff] [blame] | 1696 | 	tick_cancel_sched_timer(scpu); | 
| Thomas Gleixner | 731a55b | 2009-01-05 11:28:21 +0100 | [diff] [blame] | 1697 |  | 
 | 1698 | 	local_irq_disable(); | 
 | 1699 | 	old_base = &per_cpu(hrtimer_bases, scpu); | 
 | 1700 | 	new_base = &__get_cpu_var(hrtimer_bases); | 
| Oleg Nesterov | d82f0b0 | 2008-08-20 16:46:04 -0700 | [diff] [blame] | 1701 | 	/* | 
 | 1702 | 	 * The caller is globally serialized and nobody else | 
 | 1703 | 	 * takes two locks at once, deadlock is not possible. | 
 | 1704 | 	 */ | 
| Thomas Gleixner | ecb49d1 | 2009-11-17 16:36:54 +0100 | [diff] [blame] | 1705 | 	raw_spin_lock(&new_base->lock); | 
 | 1706 | 	raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1707 |  | 
| Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 1708 | 	for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { | 
| Peter Zijlstra | ca10949 | 2008-11-25 12:43:51 +0100 | [diff] [blame] | 1709 | 		migrate_hrtimer_list(&old_base->clock_base[i], | 
| Peter Zijlstra | 3781065 | 2008-12-04 11:17:10 +0100 | [diff] [blame] | 1710 | 				     &new_base->clock_base[i]); | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1711 | 	} | 
 | 1712 |  | 
| Thomas Gleixner | ecb49d1 | 2009-11-17 16:36:54 +0100 | [diff] [blame] | 1713 | 	raw_spin_unlock(&old_base->lock); | 
 | 1714 | 	raw_spin_unlock(&new_base->lock); | 
| Peter Zijlstra | 3781065 | 2008-12-04 11:17:10 +0100 | [diff] [blame] | 1715 |  | 
| Thomas Gleixner | 731a55b | 2009-01-05 11:28:21 +0100 | [diff] [blame] | 1716 | 	/* Check, if we got expired work to do */ | 
 | 1717 | 	__hrtimer_peek_ahead_timers(); | 
 | 1718 | 	local_irq_enable(); | 
| Peter Zijlstra | 3781065 | 2008-12-04 11:17:10 +0100 | [diff] [blame] | 1719 | } | 
 | 1720 |  | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1721 | #endif /* CONFIG_HOTPLUG_CPU */ | 
 | 1722 |  | 
| Chandra Seetharaman | 8c78f30 | 2006-07-30 03:03:35 -0700 | [diff] [blame] | 1723 | static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self, | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1724 | 					unsigned long action, void *hcpu) | 
 | 1725 | { | 
| Ingo Molnar | b2e3c0a | 2008-12-19 00:48:27 +0100 | [diff] [blame] | 1726 | 	int scpu = (long)hcpu; | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1727 |  | 
 | 1728 | 	switch (action) { | 
 | 1729 |  | 
 | 1730 | 	case CPU_UP_PREPARE: | 
| Rafael J. Wysocki | 8bb7844 | 2007-05-09 02:35:10 -0700 | [diff] [blame] | 1731 | 	case CPU_UP_PREPARE_FROZEN: | 
| Peter Zijlstra | 3781065 | 2008-12-04 11:17:10 +0100 | [diff] [blame] | 1732 | 		init_hrtimers_cpu(scpu); | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1733 | 		break; | 
 | 1734 |  | 
 | 1735 | #ifdef CONFIG_HOTPLUG_CPU | 
| Sebastien Dugue | 94df7de | 2008-12-01 14:09:07 +0100 | [diff] [blame] | 1736 | 	case CPU_DYING: | 
 | 1737 | 	case CPU_DYING_FROZEN: | 
 | 1738 | 		clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DYING, &scpu); | 
 | 1739 | 		break; | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1740 | 	case CPU_DEAD: | 
| Rafael J. Wysocki | 8bb7844 | 2007-05-09 02:35:10 -0700 | [diff] [blame] | 1741 | 	case CPU_DEAD_FROZEN: | 
| Ingo Molnar | b2e3c0a | 2008-12-19 00:48:27 +0100 | [diff] [blame] | 1742 | 	{ | 
| Peter Zijlstra | 3781065 | 2008-12-04 11:17:10 +0100 | [diff] [blame] | 1743 | 		clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DEAD, &scpu); | 
| Thomas Gleixner | d5fd43c | 2009-01-05 11:28:20 +0100 | [diff] [blame] | 1744 | 		migrate_hrtimers(scpu); | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1745 | 		break; | 
| Ingo Molnar | b2e3c0a | 2008-12-19 00:48:27 +0100 | [diff] [blame] | 1746 | 	} | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1747 | #endif | 
 | 1748 |  | 
 | 1749 | 	default: | 
 | 1750 | 		break; | 
 | 1751 | 	} | 
 | 1752 |  | 
 | 1753 | 	return NOTIFY_OK; | 
 | 1754 | } | 
 | 1755 |  | 
| Chandra Seetharaman | 8c78f30 | 2006-07-30 03:03:35 -0700 | [diff] [blame] | 1756 | static struct notifier_block __cpuinitdata hrtimers_nb = { | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1757 | 	.notifier_call = hrtimer_cpu_notify, | 
 | 1758 | }; | 
 | 1759 |  | 
 | 1760 | void __init hrtimers_init(void) | 
 | 1761 | { | 
 | 1762 | 	hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE, | 
 | 1763 | 			  (void *)(long)smp_processor_id()); | 
 | 1764 | 	register_cpu_notifier(&hrtimers_nb); | 
| Peter Zijlstra | a6037b6 | 2009-01-05 11:28:22 +0100 | [diff] [blame] | 1765 | #ifdef CONFIG_HIGH_RES_TIMERS | 
 | 1766 | 	open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq); | 
 | 1767 | #endif | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1768 | } | 
 | 1769 |  | 
| Arjan van de Ven | 7bb6743 | 2008-08-31 08:05:58 -0700 | [diff] [blame] | 1770 | /** | 
| Carsten Emde | 351b3f7 | 2010-04-02 22:40:19 +0200 | [diff] [blame] | 1771 |  * schedule_hrtimeout_range_clock - sleep until timeout | 
 | 1772 |  * @expires:	timeout value (ktime_t) | 
 | 1773 |  * @delta:	slack in expires timeout (ktime_t) | 
 | 1774 |  * @mode:	timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL | 
 | 1775 |  * @clock:	timer clock, CLOCK_MONOTONIC or CLOCK_REALTIME | 
 | 1776 |  */ | 
 | 1777 | int __sched | 
 | 1778 | schedule_hrtimeout_range_clock(ktime_t *expires, unsigned long delta, | 
 | 1779 | 			       const enum hrtimer_mode mode, int clock) | 
 | 1780 | { | 
 | 1781 | 	struct hrtimer_sleeper t; | 
 | 1782 |  | 
 | 1783 | 	/* | 
 | 1784 | 	 * Optimize when a zero timeout value is given. It does not | 
 | 1785 | 	 * matter whether this is an absolute or a relative time. | 
 | 1786 | 	 */ | 
 | 1787 | 	if (expires && !expires->tv64) { | 
 | 1788 | 		__set_current_state(TASK_RUNNING); | 
 | 1789 | 		return 0; | 
 | 1790 | 	} | 
 | 1791 |  | 
 | 1792 | 	/* | 
| Namhyung Kim | 43b2101 | 2010-12-22 19:01:47 +0100 | [diff] [blame] | 1793 | 	 * A NULL parameter means "infinite" | 
| Carsten Emde | 351b3f7 | 2010-04-02 22:40:19 +0200 | [diff] [blame] | 1794 | 	 */ | 
 | 1795 | 	if (!expires) { | 
 | 1796 | 		schedule(); | 
 | 1797 | 		__set_current_state(TASK_RUNNING); | 
 | 1798 | 		return -EINTR; | 
 | 1799 | 	} | 
 | 1800 |  | 
 | 1801 | 	hrtimer_init_on_stack(&t.timer, clock, mode); | 
 | 1802 | 	hrtimer_set_expires_range_ns(&t.timer, *expires, delta); | 
 | 1803 |  | 
 | 1804 | 	hrtimer_init_sleeper(&t, current); | 
 | 1805 |  | 
 | 1806 | 	hrtimer_start_expires(&t.timer, mode); | 
 | 1807 | 	if (!hrtimer_active(&t.timer)) | 
 | 1808 | 		t.task = NULL; | 
 | 1809 |  | 
 | 1810 | 	if (likely(t.task)) | 
 | 1811 | 		schedule(); | 
 | 1812 |  | 
 | 1813 | 	hrtimer_cancel(&t.timer); | 
 | 1814 | 	destroy_hrtimer_on_stack(&t.timer); | 
 | 1815 |  | 
 | 1816 | 	__set_current_state(TASK_RUNNING); | 
 | 1817 |  | 
 | 1818 | 	return !t.task ? 0 : -EINTR; | 
 | 1819 | } | 
 | 1820 |  | 
 | 1821 | /** | 
| Arjan van de Ven | 654c8e0 | 2008-09-01 15:47:08 -0700 | [diff] [blame] | 1822 |  * schedule_hrtimeout_range - sleep until timeout | 
 | 1823 |  * @expires:	timeout value (ktime_t) | 
 | 1824 |  * @delta:	slack in expires timeout (ktime_t) | 
 | 1825 |  * @mode:	timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL | 
 | 1826 |  * | 
 | 1827 |  * Make the current task sleep until the given expiry time has | 
 | 1828 |  * elapsed. The routine will return immediately unless | 
 | 1829 |  * the current task state has been set (see set_current_state()). | 
 | 1830 |  * | 
 | 1831 |  * The @delta argument gives the kernel the freedom to schedule the | 
 | 1832 |  * actual wakeup to a time that is both power and performance friendly. | 
 | 1833 |  * The kernel give the normal best effort behavior for "@expires+@delta", | 
 | 1834 |  * but may decide to fire the timer earlier, but no earlier than @expires. | 
 | 1835 |  * | 
 | 1836 |  * You can set the task state as follows - | 
 | 1837 |  * | 
 | 1838 |  * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to | 
 | 1839 |  * pass before the routine returns. | 
 | 1840 |  * | 
 | 1841 |  * %TASK_INTERRUPTIBLE - the routine may return early if a signal is | 
 | 1842 |  * delivered to the current task. | 
 | 1843 |  * | 
 | 1844 |  * The current task state is guaranteed to be TASK_RUNNING when this | 
 | 1845 |  * routine returns. | 
 | 1846 |  * | 
 | 1847 |  * Returns 0 when the timer has expired otherwise -EINTR | 
 | 1848 |  */ | 
 | 1849 | int __sched schedule_hrtimeout_range(ktime_t *expires, unsigned long delta, | 
| Carsten Emde | 351b3f7 | 2010-04-02 22:40:19 +0200 | [diff] [blame] | 1850 | 				     const enum hrtimer_mode mode) | 
| Arjan van de Ven | 654c8e0 | 2008-09-01 15:47:08 -0700 | [diff] [blame] | 1851 | { | 
| Carsten Emde | 351b3f7 | 2010-04-02 22:40:19 +0200 | [diff] [blame] | 1852 | 	return schedule_hrtimeout_range_clock(expires, delta, mode, | 
 | 1853 | 					      CLOCK_MONOTONIC); | 
| Arjan van de Ven | 654c8e0 | 2008-09-01 15:47:08 -0700 | [diff] [blame] | 1854 | } | 
 | 1855 | EXPORT_SYMBOL_GPL(schedule_hrtimeout_range); | 
 | 1856 |  | 
 | 1857 | /** | 
| Arjan van de Ven | 7bb6743 | 2008-08-31 08:05:58 -0700 | [diff] [blame] | 1858 |  * schedule_hrtimeout - sleep until timeout | 
 | 1859 |  * @expires:	timeout value (ktime_t) | 
 | 1860 |  * @mode:	timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL | 
 | 1861 |  * | 
 | 1862 |  * Make the current task sleep until the given expiry time has | 
 | 1863 |  * elapsed. The routine will return immediately unless | 
 | 1864 |  * the current task state has been set (see set_current_state()). | 
 | 1865 |  * | 
 | 1866 |  * You can set the task state as follows - | 
 | 1867 |  * | 
 | 1868 |  * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to | 
 | 1869 |  * pass before the routine returns. | 
 | 1870 |  * | 
 | 1871 |  * %TASK_INTERRUPTIBLE - the routine may return early if a signal is | 
 | 1872 |  * delivered to the current task. | 
 | 1873 |  * | 
 | 1874 |  * The current task state is guaranteed to be TASK_RUNNING when this | 
 | 1875 |  * routine returns. | 
 | 1876 |  * | 
 | 1877 |  * Returns 0 when the timer has expired otherwise -EINTR | 
 | 1878 |  */ | 
 | 1879 | int __sched schedule_hrtimeout(ktime_t *expires, | 
 | 1880 | 			       const enum hrtimer_mode mode) | 
 | 1881 | { | 
| Arjan van de Ven | 654c8e0 | 2008-09-01 15:47:08 -0700 | [diff] [blame] | 1882 | 	return schedule_hrtimeout_range(expires, 0, mode); | 
| Arjan van de Ven | 7bb6743 | 2008-08-31 08:05:58 -0700 | [diff] [blame] | 1883 | } | 
 | 1884 | EXPORT_SYMBOL_GPL(schedule_hrtimeout); |