| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1 | /* | 
 | 2 |  *  linux/kernel/hrtimer.c | 
 | 3 |  * | 
| Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 4 |  *  Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> | 
| Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 5 |  *  Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 6 |  *  Copyright(C) 2006-2007  Timesys Corp., Thomas Gleixner | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 7 |  * | 
 | 8 |  *  High-resolution kernel timers | 
 | 9 |  * | 
 | 10 |  *  In contrast to the low-resolution timeout API implemented in | 
 | 11 |  *  kernel/timer.c, hrtimers provide finer resolution and accuracy | 
 | 12 |  *  depending on system configuration and capabilities. | 
 | 13 |  * | 
 | 14 |  *  These timers are currently used for: | 
 | 15 |  *   - itimers | 
 | 16 |  *   - POSIX timers | 
 | 17 |  *   - nanosleep | 
 | 18 |  *   - precise in-kernel timing | 
 | 19 |  * | 
 | 20 |  *  Started by: Thomas Gleixner and Ingo Molnar | 
 | 21 |  * | 
 | 22 |  *  Credits: | 
 | 23 |  *	based on kernel/timer.c | 
 | 24 |  * | 
| Thomas Gleixner | 66188fa | 2006-02-01 03:05:13 -0800 | [diff] [blame] | 25 |  *	Help, testing, suggestions, bugfixes, improvements were | 
 | 26 |  *	provided by: | 
 | 27 |  * | 
 | 28 |  *	George Anzinger, Andrew Morton, Steven Rostedt, Roman Zippel | 
 | 29 |  *	et. al. | 
 | 30 |  * | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 31 |  *  For licencing details see kernel-base/COPYING | 
 | 32 |  */ | 
 | 33 |  | 
 | 34 | #include <linux/cpu.h> | 
 | 35 | #include <linux/module.h> | 
 | 36 | #include <linux/percpu.h> | 
 | 37 | #include <linux/hrtimer.h> | 
 | 38 | #include <linux/notifier.h> | 
 | 39 | #include <linux/syscalls.h> | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 40 | #include <linux/kallsyms.h> | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 41 | #include <linux/interrupt.h> | 
| Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 42 | #include <linux/tick.h> | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 43 | #include <linux/seq_file.h> | 
 | 44 | #include <linux/err.h> | 
| Thomas Gleixner | 237fc6e | 2008-04-30 00:55:04 -0700 | [diff] [blame] | 45 | #include <linux/debugobjects.h> | 
| Arun R Bharadwaj | eea08f3 | 2009-04-16 12:16:41 +0530 | [diff] [blame] | 46 | #include <linux/sched.h> | 
 | 47 | #include <linux/timer.h> | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 48 |  | 
 | 49 | #include <asm/uaccess.h> | 
 | 50 |  | 
| Xiao Guangrong | c6a2a17 | 2009-08-10 10:51:23 +0800 | [diff] [blame] | 51 | #include <trace/events/timer.h> | 
 | 52 |  | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 53 | /* | 
 | 54 |  * The timer bases: | 
| George Anzinger | 7978672 | 2006-02-01 03:05:11 -0800 | [diff] [blame] | 55 |  * | 
 | 56 |  * Note: If we want to add new timer bases, we have to skip the two | 
 | 57 |  * clock ids captured by the cpu-timers. We do this by holding empty | 
 | 58 |  * entries rather than doing math adjustment of the clock ids. | 
 | 59 |  * This ensures that we capture erroneous accesses to these clock ids | 
 | 60 |  * rather than moving them into the range of valid clock id's. | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 61 |  */ | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 62 | DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) = | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 63 | { | 
| Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 64 |  | 
 | 65 | 	.clock_base = | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 66 | 	{ | 
| Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 67 | 		{ | 
 | 68 | 			.index = CLOCK_REALTIME, | 
 | 69 | 			.get_time = &ktime_get_real, | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 70 | 			.resolution = KTIME_LOW_RES, | 
| Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 71 | 		}, | 
 | 72 | 		{ | 
 | 73 | 			.index = CLOCK_MONOTONIC, | 
 | 74 | 			.get_time = &ktime_get, | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 75 | 			.resolution = KTIME_LOW_RES, | 
| Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 76 | 		}, | 
 | 77 | 	} | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 78 | }; | 
 | 79 |  | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 80 | /* | 
| Thomas Gleixner | 92127c7 | 2006-03-26 01:38:05 -0800 | [diff] [blame] | 81 |  * Get the coarse grained time at the softirq based on xtime and | 
 | 82 |  * wall_to_monotonic. | 
 | 83 |  */ | 
| Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 84 | static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base) | 
| Thomas Gleixner | 92127c7 | 2006-03-26 01:38:05 -0800 | [diff] [blame] | 85 | { | 
 | 86 | 	ktime_t xtim, tomono; | 
| Thomas Gleixner | ad28d94 | 2007-03-16 13:38:21 -0800 | [diff] [blame] | 87 | 	struct timespec xts, tom; | 
| Thomas Gleixner | 92127c7 | 2006-03-26 01:38:05 -0800 | [diff] [blame] | 88 | 	unsigned long seq; | 
 | 89 |  | 
 | 90 | 	do { | 
 | 91 | 		seq = read_seqbegin(&xtime_lock); | 
| john stultz | 2c6b47d | 2007-07-24 17:47:43 -0700 | [diff] [blame] | 92 | 		xts = current_kernel_time(); | 
| Thomas Gleixner | ad28d94 | 2007-03-16 13:38:21 -0800 | [diff] [blame] | 93 | 		tom = wall_to_monotonic; | 
| Thomas Gleixner | 92127c7 | 2006-03-26 01:38:05 -0800 | [diff] [blame] | 94 | 	} while (read_seqretry(&xtime_lock, seq)); | 
 | 95 |  | 
| john stultz | f4304ab | 2007-02-16 01:27:26 -0800 | [diff] [blame] | 96 | 	xtim = timespec_to_ktime(xts); | 
| Thomas Gleixner | ad28d94 | 2007-03-16 13:38:21 -0800 | [diff] [blame] | 97 | 	tomono = timespec_to_ktime(tom); | 
| Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 98 | 	base->clock_base[CLOCK_REALTIME].softirq_time = xtim; | 
 | 99 | 	base->clock_base[CLOCK_MONOTONIC].softirq_time = | 
 | 100 | 		ktime_add(xtim, tomono); | 
| Thomas Gleixner | 92127c7 | 2006-03-26 01:38:05 -0800 | [diff] [blame] | 101 | } | 
 | 102 |  | 
 | 103 | /* | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 104 |  * Functions and macros which are different for UP/SMP systems are kept in a | 
 | 105 |  * single place | 
 | 106 |  */ | 
 | 107 | #ifdef CONFIG_SMP | 
 | 108 |  | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 109 | /* | 
 | 110 |  * We are using hashed locking: holding per_cpu(hrtimer_bases)[n].lock | 
 | 111 |  * means that all timers which are tied to this base via timer->base are | 
 | 112 |  * locked, and the base itself is locked too. | 
 | 113 |  * | 
 | 114 |  * So __run_timers/migrate_timers can safely modify all timers which could | 
 | 115 |  * be found on the lists/queues. | 
 | 116 |  * | 
 | 117 |  * When the timer's base is locked, and the timer removed from list, it is | 
 | 118 |  * possible to set timer->base = NULL and drop the lock: the timer remains | 
 | 119 |  * locked. | 
 | 120 |  */ | 
| Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 121 | static | 
 | 122 | struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer, | 
 | 123 | 					     unsigned long *flags) | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 124 | { | 
| Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 125 | 	struct hrtimer_clock_base *base; | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 126 |  | 
 | 127 | 	for (;;) { | 
 | 128 | 		base = timer->base; | 
 | 129 | 		if (likely(base != NULL)) { | 
| Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 130 | 			spin_lock_irqsave(&base->cpu_base->lock, *flags); | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 131 | 			if (likely(base == timer->base)) | 
 | 132 | 				return base; | 
 | 133 | 			/* The timer has migrated to another CPU: */ | 
| Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 134 | 			spin_unlock_irqrestore(&base->cpu_base->lock, *flags); | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 135 | 		} | 
 | 136 | 		cpu_relax(); | 
 | 137 | 	} | 
 | 138 | } | 
 | 139 |  | 
| Thomas Gleixner | 6ff7041 | 2009-07-10 14:57:05 +0200 | [diff] [blame] | 140 |  | 
 | 141 | /* | 
 | 142 |  * Get the preferred target CPU for NOHZ | 
 | 143 |  */ | 
 | 144 | static int hrtimer_get_target(int this_cpu, int pinned) | 
 | 145 | { | 
 | 146 | #ifdef CONFIG_NO_HZ | 
 | 147 | 	if (!pinned && get_sysctl_timer_migration() && idle_cpu(this_cpu)) { | 
 | 148 | 		int preferred_cpu = get_nohz_load_balancer(); | 
 | 149 |  | 
 | 150 | 		if (preferred_cpu >= 0) | 
 | 151 | 			return preferred_cpu; | 
 | 152 | 	} | 
 | 153 | #endif | 
 | 154 | 	return this_cpu; | 
 | 155 | } | 
 | 156 |  | 
 | 157 | /* | 
 | 158 |  * With HIGHRES=y we do not migrate the timer when it is expiring | 
 | 159 |  * before the next event on the target cpu because we cannot reprogram | 
 | 160 |  * the target cpu hardware and we would cause it to fire late. | 
 | 161 |  * | 
 | 162 |  * Called with cpu_base->lock of target cpu held. | 
 | 163 |  */ | 
 | 164 | static int | 
 | 165 | hrtimer_check_target(struct hrtimer *timer, struct hrtimer_clock_base *new_base) | 
 | 166 | { | 
 | 167 | #ifdef CONFIG_HIGH_RES_TIMERS | 
 | 168 | 	ktime_t expires; | 
 | 169 |  | 
 | 170 | 	if (!new_base->cpu_base->hres_active) | 
 | 171 | 		return 0; | 
 | 172 |  | 
 | 173 | 	expires = ktime_sub(hrtimer_get_expires(timer), new_base->offset); | 
 | 174 | 	return expires.tv64 <= new_base->cpu_base->expires_next.tv64; | 
 | 175 | #else | 
 | 176 | 	return 0; | 
 | 177 | #endif | 
 | 178 | } | 
 | 179 |  | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 180 | /* | 
 | 181 |  * Switch the timer base to the current CPU when possible. | 
 | 182 |  */ | 
| Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 183 | static inline struct hrtimer_clock_base * | 
| Arun R Bharadwaj | 597d027 | 2009-04-16 12:13:26 +0530 | [diff] [blame] | 184 | switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base, | 
 | 185 | 		    int pinned) | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 186 | { | 
| Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 187 | 	struct hrtimer_clock_base *new_base; | 
 | 188 | 	struct hrtimer_cpu_base *new_cpu_base; | 
| Thomas Gleixner | 6ff7041 | 2009-07-10 14:57:05 +0200 | [diff] [blame] | 189 | 	int this_cpu = smp_processor_id(); | 
 | 190 | 	int cpu = hrtimer_get_target(this_cpu, pinned); | 
| Arun R Bharadwaj | eea08f3 | 2009-04-16 12:16:41 +0530 | [diff] [blame] | 191 |  | 
 | 192 | again: | 
 | 193 | 	new_cpu_base = &per_cpu(hrtimer_bases, cpu); | 
| Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 194 | 	new_base = &new_cpu_base->clock_base[base->index]; | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 195 |  | 
 | 196 | 	if (base != new_base) { | 
 | 197 | 		/* | 
| Thomas Gleixner | 6ff7041 | 2009-07-10 14:57:05 +0200 | [diff] [blame] | 198 | 		 * We are trying to move timer to new_base. | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 199 | 		 * However we can't change timer's base while it is running, | 
 | 200 | 		 * so we keep it on the same CPU. No hassle vs. reprogramming | 
 | 201 | 		 * the event source in the high resolution case. The softirq | 
 | 202 | 		 * code will take care of this when the timer function has | 
 | 203 | 		 * completed. There is no conflict as we hold the lock until | 
 | 204 | 		 * the timer is enqueued. | 
 | 205 | 		 */ | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 206 | 		if (unlikely(hrtimer_callback_running(timer))) | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 207 | 			return base; | 
 | 208 |  | 
 | 209 | 		/* See the comment in lock_timer_base() */ | 
 | 210 | 		timer->base = NULL; | 
| Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 211 | 		spin_unlock(&base->cpu_base->lock); | 
 | 212 | 		spin_lock(&new_base->cpu_base->lock); | 
| Arun R Bharadwaj | eea08f3 | 2009-04-16 12:16:41 +0530 | [diff] [blame] | 213 |  | 
| Thomas Gleixner | 6ff7041 | 2009-07-10 14:57:05 +0200 | [diff] [blame] | 214 | 		if (cpu != this_cpu && hrtimer_check_target(timer, new_base)) { | 
 | 215 | 			cpu = this_cpu; | 
 | 216 | 			spin_unlock(&new_base->cpu_base->lock); | 
 | 217 | 			spin_lock(&base->cpu_base->lock); | 
 | 218 | 			timer->base = base; | 
 | 219 | 			goto again; | 
| Arun R Bharadwaj | eea08f3 | 2009-04-16 12:16:41 +0530 | [diff] [blame] | 220 | 		} | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 221 | 		timer->base = new_base; | 
 | 222 | 	} | 
 | 223 | 	return new_base; | 
 | 224 | } | 
 | 225 |  | 
 | 226 | #else /* CONFIG_SMP */ | 
 | 227 |  | 
| Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 228 | static inline struct hrtimer_clock_base * | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 229 | lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) | 
 | 230 | { | 
| Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 231 | 	struct hrtimer_clock_base *base = timer->base; | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 232 |  | 
| Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 233 | 	spin_lock_irqsave(&base->cpu_base->lock, *flags); | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 234 |  | 
 | 235 | 	return base; | 
 | 236 | } | 
 | 237 |  | 
| Arun R Bharadwaj | eea08f3 | 2009-04-16 12:16:41 +0530 | [diff] [blame] | 238 | # define switch_hrtimer_base(t, b, p)	(b) | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 239 |  | 
 | 240 | #endif	/* !CONFIG_SMP */ | 
 | 241 |  | 
 | 242 | /* | 
 | 243 |  * Functions for the union type storage format of ktime_t which are | 
 | 244 |  * too large for inlining: | 
 | 245 |  */ | 
 | 246 | #if BITS_PER_LONG < 64 | 
 | 247 | # ifndef CONFIG_KTIME_SCALAR | 
 | 248 | /** | 
 | 249 |  * ktime_add_ns - Add a scalar nanoseconds value to a ktime_t variable | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 250 |  * @kt:		addend | 
 | 251 |  * @nsec:	the scalar nsec value to add | 
 | 252 |  * | 
 | 253 |  * Returns the sum of kt and nsec in ktime_t format | 
 | 254 |  */ | 
 | 255 | ktime_t ktime_add_ns(const ktime_t kt, u64 nsec) | 
 | 256 | { | 
 | 257 | 	ktime_t tmp; | 
 | 258 |  | 
 | 259 | 	if (likely(nsec < NSEC_PER_SEC)) { | 
 | 260 | 		tmp.tv64 = nsec; | 
 | 261 | 	} else { | 
 | 262 | 		unsigned long rem = do_div(nsec, NSEC_PER_SEC); | 
 | 263 |  | 
 | 264 | 		tmp = ktime_set((long)nsec, rem); | 
 | 265 | 	} | 
 | 266 |  | 
 | 267 | 	return ktime_add(kt, tmp); | 
 | 268 | } | 
| David Howells | b8b8fd2 | 2007-04-27 15:31:24 -0700 | [diff] [blame] | 269 |  | 
 | 270 | EXPORT_SYMBOL_GPL(ktime_add_ns); | 
| Arnaldo Carvalho de Melo | a272378 | 2007-08-19 17:16:05 -0700 | [diff] [blame] | 271 |  | 
 | 272 | /** | 
 | 273 |  * ktime_sub_ns - Subtract a scalar nanoseconds value from a ktime_t variable | 
 | 274 |  * @kt:		minuend | 
 | 275 |  * @nsec:	the scalar nsec value to subtract | 
 | 276 |  * | 
 | 277 |  * Returns the subtraction of @nsec from @kt in ktime_t format | 
 | 278 |  */ | 
 | 279 | ktime_t ktime_sub_ns(const ktime_t kt, u64 nsec) | 
 | 280 | { | 
 | 281 | 	ktime_t tmp; | 
 | 282 |  | 
 | 283 | 	if (likely(nsec < NSEC_PER_SEC)) { | 
 | 284 | 		tmp.tv64 = nsec; | 
 | 285 | 	} else { | 
 | 286 | 		unsigned long rem = do_div(nsec, NSEC_PER_SEC); | 
 | 287 |  | 
 | 288 | 		tmp = ktime_set((long)nsec, rem); | 
 | 289 | 	} | 
 | 290 |  | 
 | 291 | 	return ktime_sub(kt, tmp); | 
 | 292 | } | 
 | 293 |  | 
 | 294 | EXPORT_SYMBOL_GPL(ktime_sub_ns); | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 295 | # endif /* !CONFIG_KTIME_SCALAR */ | 
 | 296 |  | 
 | 297 | /* | 
 | 298 |  * Divide a ktime value by a nanosecond value | 
 | 299 |  */ | 
| Davide Libenzi | 4d672e7 | 2008-02-04 22:27:26 -0800 | [diff] [blame] | 300 | u64 ktime_divns(const ktime_t kt, s64 div) | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 301 | { | 
| Carlos R. Mafra | 900cfa4 | 2008-05-22 19:25:11 -0300 | [diff] [blame] | 302 | 	u64 dclc; | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 303 | 	int sft = 0; | 
 | 304 |  | 
| Carlos R. Mafra | 900cfa4 | 2008-05-22 19:25:11 -0300 | [diff] [blame] | 305 | 	dclc = ktime_to_ns(kt); | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 306 | 	/* Make sure the divisor is less than 2^32: */ | 
 | 307 | 	while (div >> 32) { | 
 | 308 | 		sft++; | 
 | 309 | 		div >>= 1; | 
 | 310 | 	} | 
 | 311 | 	dclc >>= sft; | 
 | 312 | 	do_div(dclc, (unsigned long) div); | 
 | 313 |  | 
| Davide Libenzi | 4d672e7 | 2008-02-04 22:27:26 -0800 | [diff] [blame] | 314 | 	return dclc; | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 315 | } | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 316 | #endif /* BITS_PER_LONG >= 64 */ | 
 | 317 |  | 
| Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 318 | /* | 
| Thomas Gleixner | 5a7780e | 2008-02-13 09:20:43 +0100 | [diff] [blame] | 319 |  * Add two ktime values and do a safety check for overflow: | 
 | 320 |  */ | 
 | 321 | ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs) | 
 | 322 | { | 
 | 323 | 	ktime_t res = ktime_add(lhs, rhs); | 
 | 324 |  | 
 | 325 | 	/* | 
 | 326 | 	 * We use KTIME_SEC_MAX here, the maximum timeout which we can | 
 | 327 | 	 * return to user space in a timespec: | 
 | 328 | 	 */ | 
 | 329 | 	if (res.tv64 < 0 || res.tv64 < lhs.tv64 || res.tv64 < rhs.tv64) | 
 | 330 | 		res = ktime_set(KTIME_SEC_MAX, 0); | 
 | 331 |  | 
 | 332 | 	return res; | 
 | 333 | } | 
 | 334 |  | 
| Artem Bityutskiy | 8daa21e | 2009-05-28 16:21:24 +0300 | [diff] [blame] | 335 | EXPORT_SYMBOL_GPL(ktime_add_safe); | 
 | 336 |  | 
| Thomas Gleixner | 237fc6e | 2008-04-30 00:55:04 -0700 | [diff] [blame] | 337 | #ifdef CONFIG_DEBUG_OBJECTS_TIMERS | 
 | 338 |  | 
 | 339 | static struct debug_obj_descr hrtimer_debug_descr; | 
 | 340 |  | 
 | 341 | /* | 
 | 342 |  * fixup_init is called when: | 
 | 343 |  * - an active object is initialized | 
 | 344 |  */ | 
 | 345 | static int hrtimer_fixup_init(void *addr, enum debug_obj_state state) | 
 | 346 | { | 
 | 347 | 	struct hrtimer *timer = addr; | 
 | 348 |  | 
 | 349 | 	switch (state) { | 
 | 350 | 	case ODEBUG_STATE_ACTIVE: | 
 | 351 | 		hrtimer_cancel(timer); | 
 | 352 | 		debug_object_init(timer, &hrtimer_debug_descr); | 
 | 353 | 		return 1; | 
 | 354 | 	default: | 
 | 355 | 		return 0; | 
 | 356 | 	} | 
 | 357 | } | 
 | 358 |  | 
 | 359 | /* | 
 | 360 |  * fixup_activate is called when: | 
 | 361 |  * - an active object is activated | 
 | 362 |  * - an unknown object is activated (might be a statically initialized object) | 
 | 363 |  */ | 
 | 364 | static int hrtimer_fixup_activate(void *addr, enum debug_obj_state state) | 
 | 365 | { | 
 | 366 | 	switch (state) { | 
 | 367 |  | 
 | 368 | 	case ODEBUG_STATE_NOTAVAILABLE: | 
 | 369 | 		WARN_ON_ONCE(1); | 
 | 370 | 		return 0; | 
 | 371 |  | 
 | 372 | 	case ODEBUG_STATE_ACTIVE: | 
 | 373 | 		WARN_ON(1); | 
 | 374 |  | 
 | 375 | 	default: | 
 | 376 | 		return 0; | 
 | 377 | 	} | 
 | 378 | } | 
 | 379 |  | 
 | 380 | /* | 
 | 381 |  * fixup_free is called when: | 
 | 382 |  * - an active object is freed | 
 | 383 |  */ | 
 | 384 | static int hrtimer_fixup_free(void *addr, enum debug_obj_state state) | 
 | 385 | { | 
 | 386 | 	struct hrtimer *timer = addr; | 
 | 387 |  | 
 | 388 | 	switch (state) { | 
 | 389 | 	case ODEBUG_STATE_ACTIVE: | 
 | 390 | 		hrtimer_cancel(timer); | 
 | 391 | 		debug_object_free(timer, &hrtimer_debug_descr); | 
 | 392 | 		return 1; | 
 | 393 | 	default: | 
 | 394 | 		return 0; | 
 | 395 | 	} | 
 | 396 | } | 
 | 397 |  | 
 | 398 | static struct debug_obj_descr hrtimer_debug_descr = { | 
 | 399 | 	.name		= "hrtimer", | 
 | 400 | 	.fixup_init	= hrtimer_fixup_init, | 
 | 401 | 	.fixup_activate	= hrtimer_fixup_activate, | 
 | 402 | 	.fixup_free	= hrtimer_fixup_free, | 
 | 403 | }; | 
 | 404 |  | 
 | 405 | static inline void debug_hrtimer_init(struct hrtimer *timer) | 
 | 406 | { | 
 | 407 | 	debug_object_init(timer, &hrtimer_debug_descr); | 
 | 408 | } | 
 | 409 |  | 
 | 410 | static inline void debug_hrtimer_activate(struct hrtimer *timer) | 
 | 411 | { | 
 | 412 | 	debug_object_activate(timer, &hrtimer_debug_descr); | 
 | 413 | } | 
 | 414 |  | 
 | 415 | static inline void debug_hrtimer_deactivate(struct hrtimer *timer) | 
 | 416 | { | 
 | 417 | 	debug_object_deactivate(timer, &hrtimer_debug_descr); | 
 | 418 | } | 
 | 419 |  | 
 | 420 | static inline void debug_hrtimer_free(struct hrtimer *timer) | 
 | 421 | { | 
 | 422 | 	debug_object_free(timer, &hrtimer_debug_descr); | 
 | 423 | } | 
 | 424 |  | 
 | 425 | static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id, | 
 | 426 | 			   enum hrtimer_mode mode); | 
 | 427 |  | 
 | 428 | void hrtimer_init_on_stack(struct hrtimer *timer, clockid_t clock_id, | 
 | 429 | 			   enum hrtimer_mode mode) | 
 | 430 | { | 
 | 431 | 	debug_object_init_on_stack(timer, &hrtimer_debug_descr); | 
 | 432 | 	__hrtimer_init(timer, clock_id, mode); | 
 | 433 | } | 
| Stephen Hemminger | 2bc481c | 2009-08-28 23:41:29 -0700 | [diff] [blame] | 434 | EXPORT_SYMBOL_GPL(hrtimer_init_on_stack); | 
| Thomas Gleixner | 237fc6e | 2008-04-30 00:55:04 -0700 | [diff] [blame] | 435 |  | 
 | 436 | void destroy_hrtimer_on_stack(struct hrtimer *timer) | 
 | 437 | { | 
 | 438 | 	debug_object_free(timer, &hrtimer_debug_descr); | 
 | 439 | } | 
 | 440 |  | 
 | 441 | #else | 
 | 442 | static inline void debug_hrtimer_init(struct hrtimer *timer) { } | 
 | 443 | static inline void debug_hrtimer_activate(struct hrtimer *timer) { } | 
 | 444 | static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { } | 
 | 445 | #endif | 
 | 446 |  | 
| Xiao Guangrong | c6a2a17 | 2009-08-10 10:51:23 +0800 | [diff] [blame] | 447 | static inline void | 
 | 448 | debug_init(struct hrtimer *timer, clockid_t clockid, | 
 | 449 | 	   enum hrtimer_mode mode) | 
 | 450 | { | 
 | 451 | 	debug_hrtimer_init(timer); | 
 | 452 | 	trace_hrtimer_init(timer, clockid, mode); | 
 | 453 | } | 
 | 454 |  | 
 | 455 | static inline void debug_activate(struct hrtimer *timer) | 
 | 456 | { | 
 | 457 | 	debug_hrtimer_activate(timer); | 
 | 458 | 	trace_hrtimer_start(timer); | 
 | 459 | } | 
 | 460 |  | 
 | 461 | static inline void debug_deactivate(struct hrtimer *timer) | 
 | 462 | { | 
 | 463 | 	debug_hrtimer_deactivate(timer); | 
 | 464 | 	trace_hrtimer_cancel(timer); | 
 | 465 | } | 
 | 466 |  | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 467 | /* High resolution timer related functions */ | 
 | 468 | #ifdef CONFIG_HIGH_RES_TIMERS | 
 | 469 |  | 
 | 470 | /* | 
 | 471 |  * High resolution timer enabled ? | 
 | 472 |  */ | 
 | 473 | static int hrtimer_hres_enabled __read_mostly  = 1; | 
 | 474 |  | 
 | 475 | /* | 
 | 476 |  * Enable / Disable high resolution mode | 
 | 477 |  */ | 
 | 478 | static int __init setup_hrtimer_hres(char *str) | 
 | 479 | { | 
 | 480 | 	if (!strcmp(str, "off")) | 
 | 481 | 		hrtimer_hres_enabled = 0; | 
 | 482 | 	else if (!strcmp(str, "on")) | 
 | 483 | 		hrtimer_hres_enabled = 1; | 
 | 484 | 	else | 
 | 485 | 		return 0; | 
 | 486 | 	return 1; | 
 | 487 | } | 
 | 488 |  | 
 | 489 | __setup("highres=", setup_hrtimer_hres); | 
 | 490 |  | 
 | 491 | /* | 
 | 492 |  * hrtimer_high_res_enabled - query, if the highres mode is enabled | 
 | 493 |  */ | 
 | 494 | static inline int hrtimer_is_hres_enabled(void) | 
 | 495 | { | 
 | 496 | 	return hrtimer_hres_enabled; | 
 | 497 | } | 
 | 498 |  | 
 | 499 | /* | 
 | 500 |  * Is the high resolution mode active ? | 
 | 501 |  */ | 
 | 502 | static inline int hrtimer_hres_active(void) | 
 | 503 | { | 
 | 504 | 	return __get_cpu_var(hrtimer_bases).hres_active; | 
 | 505 | } | 
 | 506 |  | 
 | 507 | /* | 
 | 508 |  * Reprogram the event source with checking both queues for the | 
 | 509 |  * next event | 
 | 510 |  * Called with interrupts disabled and base->lock held | 
 | 511 |  */ | 
| Ashwin Chaugule | 7403f41 | 2009-09-01 23:03:33 -0400 | [diff] [blame] | 512 | static void | 
 | 513 | hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal) | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 514 | { | 
 | 515 | 	int i; | 
 | 516 | 	struct hrtimer_clock_base *base = cpu_base->clock_base; | 
| Ashwin Chaugule | 7403f41 | 2009-09-01 23:03:33 -0400 | [diff] [blame] | 517 | 	ktime_t expires, expires_next; | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 518 |  | 
| Ashwin Chaugule | 7403f41 | 2009-09-01 23:03:33 -0400 | [diff] [blame] | 519 | 	expires_next.tv64 = KTIME_MAX; | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 520 |  | 
 | 521 | 	for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) { | 
 | 522 | 		struct hrtimer *timer; | 
 | 523 |  | 
 | 524 | 		if (!base->first) | 
 | 525 | 			continue; | 
 | 526 | 		timer = rb_entry(base->first, struct hrtimer, node); | 
| Arjan van de Ven | cc584b2 | 2008-09-01 15:02:30 -0700 | [diff] [blame] | 527 | 		expires = ktime_sub(hrtimer_get_expires(timer), base->offset); | 
| Thomas Gleixner | b0a9b51 | 2009-01-25 11:31:36 +0100 | [diff] [blame] | 528 | 		/* | 
 | 529 | 		 * clock_was_set() has changed base->offset so the | 
 | 530 | 		 * result might be negative. Fix it up to prevent a | 
 | 531 | 		 * false positive in clockevents_program_event() | 
 | 532 | 		 */ | 
 | 533 | 		if (expires.tv64 < 0) | 
 | 534 | 			expires.tv64 = 0; | 
| Ashwin Chaugule | 7403f41 | 2009-09-01 23:03:33 -0400 | [diff] [blame] | 535 | 		if (expires.tv64 < expires_next.tv64) | 
 | 536 | 			expires_next = expires; | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 537 | 	} | 
 | 538 |  | 
| Ashwin Chaugule | 7403f41 | 2009-09-01 23:03:33 -0400 | [diff] [blame] | 539 | 	if (skip_equal && expires_next.tv64 == cpu_base->expires_next.tv64) | 
 | 540 | 		return; | 
 | 541 |  | 
 | 542 | 	cpu_base->expires_next.tv64 = expires_next.tv64; | 
 | 543 |  | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 544 | 	if (cpu_base->expires_next.tv64 != KTIME_MAX) | 
 | 545 | 		tick_program_event(cpu_base->expires_next, 1); | 
 | 546 | } | 
 | 547 |  | 
 | 548 | /* | 
 | 549 |  * Shared reprogramming for clock_realtime and clock_monotonic | 
 | 550 |  * | 
 | 551 |  * When a timer is enqueued and expires earlier than the already enqueued | 
 | 552 |  * timers, we have to check, whether it expires earlier than the timer for | 
 | 553 |  * which the clock event device was armed. | 
 | 554 |  * | 
 | 555 |  * Called with interrupts disabled and base->cpu_base.lock held | 
 | 556 |  */ | 
 | 557 | static int hrtimer_reprogram(struct hrtimer *timer, | 
 | 558 | 			     struct hrtimer_clock_base *base) | 
 | 559 | { | 
 | 560 | 	ktime_t *expires_next = &__get_cpu_var(hrtimer_bases).expires_next; | 
| Arjan van de Ven | cc584b2 | 2008-09-01 15:02:30 -0700 | [diff] [blame] | 561 | 	ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset); | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 562 | 	int res; | 
 | 563 |  | 
| Arjan van de Ven | cc584b2 | 2008-09-01 15:02:30 -0700 | [diff] [blame] | 564 | 	WARN_ON_ONCE(hrtimer_get_expires_tv64(timer) < 0); | 
| Thomas Gleixner | 63070a7 | 2008-02-14 00:58:36 +0100 | [diff] [blame] | 565 |  | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 566 | 	/* | 
 | 567 | 	 * When the callback is running, we do not reprogram the clock event | 
 | 568 | 	 * device. The timer callback is either running on a different CPU or | 
| Robert P. J. Day | 3a4fa0a | 2007-10-19 23:10:43 +0200 | [diff] [blame] | 569 | 	 * the callback is executed in the hrtimer_interrupt context. The | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 570 | 	 * reprogramming is handled either by the softirq, which called the | 
 | 571 | 	 * callback or at the end of the hrtimer_interrupt. | 
 | 572 | 	 */ | 
 | 573 | 	if (hrtimer_callback_running(timer)) | 
 | 574 | 		return 0; | 
 | 575 |  | 
| Thomas Gleixner | 63070a7 | 2008-02-14 00:58:36 +0100 | [diff] [blame] | 576 | 	/* | 
 | 577 | 	 * CLOCK_REALTIME timer might be requested with an absolute | 
 | 578 | 	 * expiry time which is less than base->offset. Nothing wrong | 
 | 579 | 	 * about that, just avoid to call into the tick code, which | 
 | 580 | 	 * has now objections against negative expiry values. | 
 | 581 | 	 */ | 
 | 582 | 	if (expires.tv64 < 0) | 
 | 583 | 		return -ETIME; | 
 | 584 |  | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 585 | 	if (expires.tv64 >= expires_next->tv64) | 
 | 586 | 		return 0; | 
 | 587 |  | 
 | 588 | 	/* | 
 | 589 | 	 * Clockevents returns -ETIME, when the event was in the past. | 
 | 590 | 	 */ | 
 | 591 | 	res = tick_program_event(expires, 0); | 
 | 592 | 	if (!IS_ERR_VALUE(res)) | 
 | 593 | 		*expires_next = expires; | 
 | 594 | 	return res; | 
 | 595 | } | 
 | 596 |  | 
 | 597 |  | 
 | 598 | /* | 
 | 599 |  * Retrigger next event is called after clock was set | 
 | 600 |  * | 
 | 601 |  * Called with interrupts disabled via on_each_cpu() | 
 | 602 |  */ | 
 | 603 | static void retrigger_next_event(void *arg) | 
 | 604 | { | 
 | 605 | 	struct hrtimer_cpu_base *base; | 
 | 606 | 	struct timespec realtime_offset; | 
 | 607 | 	unsigned long seq; | 
 | 608 |  | 
 | 609 | 	if (!hrtimer_hres_active()) | 
 | 610 | 		return; | 
 | 611 |  | 
 | 612 | 	do { | 
 | 613 | 		seq = read_seqbegin(&xtime_lock); | 
 | 614 | 		set_normalized_timespec(&realtime_offset, | 
 | 615 | 					-wall_to_monotonic.tv_sec, | 
 | 616 | 					-wall_to_monotonic.tv_nsec); | 
 | 617 | 	} while (read_seqretry(&xtime_lock, seq)); | 
 | 618 |  | 
 | 619 | 	base = &__get_cpu_var(hrtimer_bases); | 
 | 620 |  | 
 | 621 | 	/* Adjust CLOCK_REALTIME offset */ | 
 | 622 | 	spin_lock(&base->lock); | 
 | 623 | 	base->clock_base[CLOCK_REALTIME].offset = | 
 | 624 | 		timespec_to_ktime(realtime_offset); | 
 | 625 |  | 
| Ashwin Chaugule | 7403f41 | 2009-09-01 23:03:33 -0400 | [diff] [blame] | 626 | 	hrtimer_force_reprogram(base, 0); | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 627 | 	spin_unlock(&base->lock); | 
 | 628 | } | 
 | 629 |  | 
 | 630 | /* | 
 | 631 |  * Clock realtime was set | 
 | 632 |  * | 
 | 633 |  * Change the offset of the realtime clock vs. the monotonic | 
 | 634 |  * clock. | 
 | 635 |  * | 
 | 636 |  * We might have to reprogram the high resolution timer interrupt. On | 
 | 637 |  * SMP we call the architecture specific code to retrigger _all_ high | 
 | 638 |  * resolution timer interrupts. On UP we just disable interrupts and | 
 | 639 |  * call the high resolution interrupt code. | 
 | 640 |  */ | 
 | 641 | void clock_was_set(void) | 
 | 642 | { | 
 | 643 | 	/* Retrigger the CPU local events everywhere */ | 
| Jens Axboe | 15c8b6c | 2008-05-09 09:39:44 +0200 | [diff] [blame] | 644 | 	on_each_cpu(retrigger_next_event, NULL, 1); | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 645 | } | 
 | 646 |  | 
 | 647 | /* | 
| Ingo Molnar | 995f054 | 2007-04-07 12:05:00 +0200 | [diff] [blame] | 648 |  * During resume we might have to reprogram the high resolution timer | 
 | 649 |  * interrupt (on the local CPU): | 
 | 650 |  */ | 
 | 651 | void hres_timers_resume(void) | 
 | 652 | { | 
| Peter Zijlstra | 1d4a7f1 | 2009-01-18 16:39:29 +0100 | [diff] [blame] | 653 | 	WARN_ONCE(!irqs_disabled(), | 
 | 654 | 		  KERN_INFO "hres_timers_resume() called with IRQs enabled!"); | 
 | 655 |  | 
| Ingo Molnar | 995f054 | 2007-04-07 12:05:00 +0200 | [diff] [blame] | 656 | 	retrigger_next_event(NULL); | 
 | 657 | } | 
 | 658 |  | 
 | 659 | /* | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 660 |  * Initialize the high resolution related parts of cpu_base | 
 | 661 |  */ | 
 | 662 | static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) | 
 | 663 | { | 
 | 664 | 	base->expires_next.tv64 = KTIME_MAX; | 
 | 665 | 	base->hres_active = 0; | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 666 | } | 
 | 667 |  | 
 | 668 | /* | 
 | 669 |  * Initialize the high resolution related parts of a hrtimer | 
 | 670 |  */ | 
 | 671 | static inline void hrtimer_init_timer_hres(struct hrtimer *timer) | 
 | 672 | { | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 673 | } | 
 | 674 |  | 
| Peter Zijlstra | ca10949 | 2008-11-25 12:43:51 +0100 | [diff] [blame] | 675 |  | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 676 | /* | 
 | 677 |  * When High resolution timers are active, try to reprogram. Note, that in case | 
 | 678 |  * the state has HRTIMER_STATE_CALLBACK set, no reprogramming and no expiry | 
 | 679 |  * check happens. The timer gets enqueued into the rbtree. The reprogramming | 
 | 680 |  * and expiry check is done in the hrtimer_interrupt or in the softirq. | 
 | 681 |  */ | 
 | 682 | static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, | 
| Peter Zijlstra | 7f1e2ca | 2009-03-13 12:21:27 +0100 | [diff] [blame] | 683 | 					    struct hrtimer_clock_base *base, | 
 | 684 | 					    int wakeup) | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 685 | { | 
 | 686 | 	if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) { | 
| Peter Zijlstra | 7f1e2ca | 2009-03-13 12:21:27 +0100 | [diff] [blame] | 687 | 		if (wakeup) { | 
 | 688 | 			spin_unlock(&base->cpu_base->lock); | 
 | 689 | 			raise_softirq_irqoff(HRTIMER_SOFTIRQ); | 
 | 690 | 			spin_lock(&base->cpu_base->lock); | 
 | 691 | 		} else | 
 | 692 | 			__raise_softirq_irqoff(HRTIMER_SOFTIRQ); | 
 | 693 |  | 
| Peter Zijlstra | ca10949 | 2008-11-25 12:43:51 +0100 | [diff] [blame] | 694 | 		return 1; | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 695 | 	} | 
| Peter Zijlstra | 7f1e2ca | 2009-03-13 12:21:27 +0100 | [diff] [blame] | 696 |  | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 697 | 	return 0; | 
 | 698 | } | 
 | 699 |  | 
 | 700 | /* | 
 | 701 |  * Switch to high resolution mode | 
 | 702 |  */ | 
| Thomas Gleixner | f895385 | 2007-03-06 01:42:08 -0800 | [diff] [blame] | 703 | static int hrtimer_switch_to_hres(void) | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 704 | { | 
| Ingo Molnar | 820de5c | 2007-07-21 04:37:36 -0700 | [diff] [blame] | 705 | 	int cpu = smp_processor_id(); | 
 | 706 | 	struct hrtimer_cpu_base *base = &per_cpu(hrtimer_bases, cpu); | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 707 | 	unsigned long flags; | 
 | 708 |  | 
 | 709 | 	if (base->hres_active) | 
| Thomas Gleixner | f895385 | 2007-03-06 01:42:08 -0800 | [diff] [blame] | 710 | 		return 1; | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 711 |  | 
 | 712 | 	local_irq_save(flags); | 
 | 713 |  | 
 | 714 | 	if (tick_init_highres()) { | 
 | 715 | 		local_irq_restore(flags); | 
| Ingo Molnar | 820de5c | 2007-07-21 04:37:36 -0700 | [diff] [blame] | 716 | 		printk(KERN_WARNING "Could not switch to high resolution " | 
 | 717 | 				    "mode on CPU %d\n", cpu); | 
| Thomas Gleixner | f895385 | 2007-03-06 01:42:08 -0800 | [diff] [blame] | 718 | 		return 0; | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 719 | 	} | 
 | 720 | 	base->hres_active = 1; | 
 | 721 | 	base->clock_base[CLOCK_REALTIME].resolution = KTIME_HIGH_RES; | 
 | 722 | 	base->clock_base[CLOCK_MONOTONIC].resolution = KTIME_HIGH_RES; | 
 | 723 |  | 
 | 724 | 	tick_setup_sched_timer(); | 
 | 725 |  | 
 | 726 | 	/* "Retrigger" the interrupt to get things going */ | 
 | 727 | 	retrigger_next_event(NULL); | 
 | 728 | 	local_irq_restore(flags); | 
| Thomas Gleixner | f895385 | 2007-03-06 01:42:08 -0800 | [diff] [blame] | 729 | 	return 1; | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 730 | } | 
 | 731 |  | 
 | 732 | #else | 
 | 733 |  | 
 | 734 | static inline int hrtimer_hres_active(void) { return 0; } | 
 | 735 | static inline int hrtimer_is_hres_enabled(void) { return 0; } | 
| Thomas Gleixner | f895385 | 2007-03-06 01:42:08 -0800 | [diff] [blame] | 736 | static inline int hrtimer_switch_to_hres(void) { return 0; } | 
| Ashwin Chaugule | 7403f41 | 2009-09-01 23:03:33 -0400 | [diff] [blame] | 737 | static inline void | 
 | 738 | hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { } | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 739 | static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, | 
| Peter Zijlstra | 7f1e2ca | 2009-03-13 12:21:27 +0100 | [diff] [blame] | 740 | 					    struct hrtimer_clock_base *base, | 
 | 741 | 					    int wakeup) | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 742 | { | 
 | 743 | 	return 0; | 
 | 744 | } | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 745 | static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { } | 
 | 746 | static inline void hrtimer_init_timer_hres(struct hrtimer *timer) { } | 
 | 747 |  | 
 | 748 | #endif /* CONFIG_HIGH_RES_TIMERS */ | 
 | 749 |  | 
| Ingo Molnar | 82f67cd | 2007-02-16 01:28:13 -0800 | [diff] [blame] | 750 | #ifdef CONFIG_TIMER_STATS | 
 | 751 | void __timer_stats_hrtimer_set_start_info(struct hrtimer *timer, void *addr) | 
 | 752 | { | 
 | 753 | 	if (timer->start_site) | 
 | 754 | 		return; | 
 | 755 |  | 
 | 756 | 	timer->start_site = addr; | 
 | 757 | 	memcpy(timer->start_comm, current->comm, TASK_COMM_LEN); | 
 | 758 | 	timer->start_pid = current->pid; | 
 | 759 | } | 
 | 760 | #endif | 
 | 761 |  | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 762 | /* | 
| Uwe Kleine-König | 6506f2a | 2007-10-20 01:56:53 +0200 | [diff] [blame] | 763 |  * Counterpart to lock_hrtimer_base above: | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 764 |  */ | 
 | 765 | static inline | 
 | 766 | void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) | 
 | 767 | { | 
| Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 768 | 	spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags); | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 769 | } | 
 | 770 |  | 
 | 771 | /** | 
 | 772 |  * hrtimer_forward - forward the timer expiry | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 773 |  * @timer:	hrtimer to forward | 
| Roman Zippel | 44f2147 | 2006-03-26 01:38:06 -0800 | [diff] [blame] | 774 |  * @now:	forward past this time | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 775 |  * @interval:	the interval to forward | 
 | 776 |  * | 
 | 777 |  * Forward the timer expiry so it will expire in the future. | 
| Jonathan Corbet | 8dca6f3 | 2006-01-16 15:58:55 -0700 | [diff] [blame] | 778 |  * Returns the number of overruns. | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 779 |  */ | 
| Davide Libenzi | 4d672e7 | 2008-02-04 22:27:26 -0800 | [diff] [blame] | 780 | u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval) | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 781 | { | 
| Davide Libenzi | 4d672e7 | 2008-02-04 22:27:26 -0800 | [diff] [blame] | 782 | 	u64 orun = 1; | 
| Roman Zippel | 44f2147 | 2006-03-26 01:38:06 -0800 | [diff] [blame] | 783 | 	ktime_t delta; | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 784 |  | 
| Arjan van de Ven | cc584b2 | 2008-09-01 15:02:30 -0700 | [diff] [blame] | 785 | 	delta = ktime_sub(now, hrtimer_get_expires(timer)); | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 786 |  | 
 | 787 | 	if (delta.tv64 < 0) | 
 | 788 | 		return 0; | 
 | 789 |  | 
| Thomas Gleixner | c9db4fa | 2006-01-12 11:47:34 +0100 | [diff] [blame] | 790 | 	if (interval.tv64 < timer->base->resolution.tv64) | 
 | 791 | 		interval.tv64 = timer->base->resolution.tv64; | 
 | 792 |  | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 793 | 	if (unlikely(delta.tv64 >= interval.tv64)) { | 
| Roman Zippel | df869b6 | 2006-03-26 01:38:11 -0800 | [diff] [blame] | 794 | 		s64 incr = ktime_to_ns(interval); | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 795 |  | 
 | 796 | 		orun = ktime_divns(delta, incr); | 
| Arjan van de Ven | cc584b2 | 2008-09-01 15:02:30 -0700 | [diff] [blame] | 797 | 		hrtimer_add_expires_ns(timer, incr * orun); | 
 | 798 | 		if (hrtimer_get_expires_tv64(timer) > now.tv64) | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 799 | 			return orun; | 
 | 800 | 		/* | 
 | 801 | 		 * This (and the ktime_add() below) is the | 
 | 802 | 		 * correction for exact: | 
 | 803 | 		 */ | 
 | 804 | 		orun++; | 
 | 805 | 	} | 
| Arjan van de Ven | cc584b2 | 2008-09-01 15:02:30 -0700 | [diff] [blame] | 806 | 	hrtimer_add_expires(timer, interval); | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 807 |  | 
 | 808 | 	return orun; | 
 | 809 | } | 
| Stas Sergeev | 6bdb6b6 | 2007-05-08 00:31:58 -0700 | [diff] [blame] | 810 | EXPORT_SYMBOL_GPL(hrtimer_forward); | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 811 |  | 
 | 812 | /* | 
 | 813 |  * enqueue_hrtimer - internal function to (re)start a timer | 
 | 814 |  * | 
 | 815 |  * The timer is inserted in expiry order. Insertion into the | 
 | 816 |  * red black tree is O(log(n)). Must hold the base lock. | 
| Peter Zijlstra | a6037b6 | 2009-01-05 11:28:22 +0100 | [diff] [blame] | 817 |  * | 
 | 818 |  * Returns 1 when the new timer is the leftmost timer in the tree. | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 819 |  */ | 
| Peter Zijlstra | a6037b6 | 2009-01-05 11:28:22 +0100 | [diff] [blame] | 820 | static int enqueue_hrtimer(struct hrtimer *timer, | 
 | 821 | 			   struct hrtimer_clock_base *base) | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 822 | { | 
 | 823 | 	struct rb_node **link = &base->active.rb_node; | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 824 | 	struct rb_node *parent = NULL; | 
 | 825 | 	struct hrtimer *entry; | 
| Ingo Molnar | 99bc2fc | 2007-07-21 04:37:36 -0700 | [diff] [blame] | 826 | 	int leftmost = 1; | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 827 |  | 
| Xiao Guangrong | c6a2a17 | 2009-08-10 10:51:23 +0800 | [diff] [blame] | 828 | 	debug_activate(timer); | 
| Thomas Gleixner | 237fc6e | 2008-04-30 00:55:04 -0700 | [diff] [blame] | 829 |  | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 830 | 	/* | 
 | 831 | 	 * Find the right place in the rbtree: | 
 | 832 | 	 */ | 
 | 833 | 	while (*link) { | 
 | 834 | 		parent = *link; | 
 | 835 | 		entry = rb_entry(parent, struct hrtimer, node); | 
 | 836 | 		/* | 
 | 837 | 		 * We dont care about collisions. Nodes with | 
 | 838 | 		 * the same expiry time stay together. | 
 | 839 | 		 */ | 
| Arjan van de Ven | cc584b2 | 2008-09-01 15:02:30 -0700 | [diff] [blame] | 840 | 		if (hrtimer_get_expires_tv64(timer) < | 
 | 841 | 				hrtimer_get_expires_tv64(entry)) { | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 842 | 			link = &(*link)->rb_left; | 
| Ingo Molnar | 99bc2fc | 2007-07-21 04:37:36 -0700 | [diff] [blame] | 843 | 		} else { | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 844 | 			link = &(*link)->rb_right; | 
| Ingo Molnar | 99bc2fc | 2007-07-21 04:37:36 -0700 | [diff] [blame] | 845 | 			leftmost = 0; | 
 | 846 | 		} | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 847 | 	} | 
 | 848 |  | 
 | 849 | 	/* | 
| Thomas Gleixner | 288867e | 2006-01-12 11:25:54 +0100 | [diff] [blame] | 850 | 	 * Insert the timer to the rbtree and check whether it | 
 | 851 | 	 * replaces the first pending timer | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 852 | 	 */ | 
| Peter Zijlstra | a6037b6 | 2009-01-05 11:28:22 +0100 | [diff] [blame] | 853 | 	if (leftmost) | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 854 | 		base->first = &timer->node; | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 855 |  | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 856 | 	rb_link_node(&timer->node, parent, link); | 
 | 857 | 	rb_insert_color(&timer->node, &base->active); | 
| Thomas Gleixner | 303e967 | 2007-02-16 01:27:51 -0800 | [diff] [blame] | 858 | 	/* | 
 | 859 | 	 * HRTIMER_STATE_ENQUEUED is or'ed to the current state to preserve the | 
 | 860 | 	 * state of a possibly running callback. | 
 | 861 | 	 */ | 
 | 862 | 	timer->state |= HRTIMER_STATE_ENQUEUED; | 
| Peter Zijlstra | a6037b6 | 2009-01-05 11:28:22 +0100 | [diff] [blame] | 863 |  | 
 | 864 | 	return leftmost; | 
| Thomas Gleixner | 288867e | 2006-01-12 11:25:54 +0100 | [diff] [blame] | 865 | } | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 866 |  | 
 | 867 | /* | 
 | 868 |  * __remove_hrtimer - internal function to remove a timer | 
 | 869 |  * | 
 | 870 |  * Caller must hold the base lock. | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 871 |  * | 
 | 872 |  * High resolution timer mode reprograms the clock event device when the | 
 | 873 |  * timer is the one which expires next. The caller can disable this by setting | 
 | 874 |  * reprogram to zero. This is useful, when the context does a reprogramming | 
 | 875 |  * anyway (e.g. timer interrupt) | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 876 |  */ | 
| Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 877 | static void __remove_hrtimer(struct hrtimer *timer, | 
| Thomas Gleixner | 303e967 | 2007-02-16 01:27:51 -0800 | [diff] [blame] | 878 | 			     struct hrtimer_clock_base *base, | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 879 | 			     unsigned long newstate, int reprogram) | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 880 | { | 
| Ashwin Chaugule | 7403f41 | 2009-09-01 23:03:33 -0400 | [diff] [blame] | 881 | 	if (!(timer->state & HRTIMER_STATE_ENQUEUED)) | 
 | 882 | 		goto out; | 
 | 883 |  | 
 | 884 | 	/* | 
 | 885 | 	 * Remove the timer from the rbtree and replace the first | 
 | 886 | 	 * entry pointer if necessary. | 
 | 887 | 	 */ | 
 | 888 | 	if (base->first == &timer->node) { | 
 | 889 | 		base->first = rb_next(&timer->node); | 
 | 890 | #ifdef CONFIG_HIGH_RES_TIMERS | 
 | 891 | 		/* Reprogram the clock event device. if enabled */ | 
 | 892 | 		if (reprogram && hrtimer_hres_active()) { | 
 | 893 | 			ktime_t expires; | 
 | 894 |  | 
 | 895 | 			expires = ktime_sub(hrtimer_get_expires(timer), | 
 | 896 | 					    base->offset); | 
 | 897 | 			if (base->cpu_base->expires_next.tv64 == expires.tv64) | 
 | 898 | 				hrtimer_force_reprogram(base->cpu_base, 1); | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 899 | 		} | 
| Ashwin Chaugule | 7403f41 | 2009-09-01 23:03:33 -0400 | [diff] [blame] | 900 | #endif | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 901 | 	} | 
| Ashwin Chaugule | 7403f41 | 2009-09-01 23:03:33 -0400 | [diff] [blame] | 902 | 	rb_erase(&timer->node, &base->active); | 
 | 903 | out: | 
| Thomas Gleixner | 303e967 | 2007-02-16 01:27:51 -0800 | [diff] [blame] | 904 | 	timer->state = newstate; | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 905 | } | 
 | 906 |  | 
 | 907 | /* | 
 | 908 |  * remove hrtimer, called with base lock held | 
 | 909 |  */ | 
 | 910 | static inline int | 
| Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 911 | remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base) | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 912 | { | 
| Thomas Gleixner | 303e967 | 2007-02-16 01:27:51 -0800 | [diff] [blame] | 913 | 	if (hrtimer_is_queued(timer)) { | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 914 | 		int reprogram; | 
 | 915 |  | 
 | 916 | 		/* | 
 | 917 | 		 * Remove the timer and force reprogramming when high | 
 | 918 | 		 * resolution mode is active and the timer is on the current | 
 | 919 | 		 * CPU. If we remove a timer on another CPU, reprogramming is | 
 | 920 | 		 * skipped. The interrupt event on this CPU is fired and | 
 | 921 | 		 * reprogramming happens in the interrupt handler. This is a | 
 | 922 | 		 * rare case and less expensive than a smp call. | 
 | 923 | 		 */ | 
| Xiao Guangrong | c6a2a17 | 2009-08-10 10:51:23 +0800 | [diff] [blame] | 924 | 		debug_deactivate(timer); | 
| Ingo Molnar | 82f67cd | 2007-02-16 01:28:13 -0800 | [diff] [blame] | 925 | 		timer_stats_hrtimer_clear_start_info(timer); | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 926 | 		reprogram = base->cpu_base == &__get_cpu_var(hrtimer_bases); | 
 | 927 | 		__remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE, | 
 | 928 | 				 reprogram); | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 929 | 		return 1; | 
 | 930 | 	} | 
 | 931 | 	return 0; | 
 | 932 | } | 
 | 933 |  | 
| Peter Zijlstra | 7f1e2ca | 2009-03-13 12:21:27 +0100 | [diff] [blame] | 934 | int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, | 
 | 935 | 		unsigned long delta_ns, const enum hrtimer_mode mode, | 
 | 936 | 		int wakeup) | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 937 | { | 
| Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 938 | 	struct hrtimer_clock_base *base, *new_base; | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 939 | 	unsigned long flags; | 
| Peter Zijlstra | a6037b6 | 2009-01-05 11:28:22 +0100 | [diff] [blame] | 940 | 	int ret, leftmost; | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 941 |  | 
 | 942 | 	base = lock_hrtimer_base(timer, &flags); | 
 | 943 |  | 
 | 944 | 	/* Remove an active timer from the queue: */ | 
 | 945 | 	ret = remove_hrtimer(timer, base); | 
 | 946 |  | 
 | 947 | 	/* Switch the timer base, if necessary: */ | 
| Arun R Bharadwaj | 597d027 | 2009-04-16 12:13:26 +0530 | [diff] [blame] | 948 | 	new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED); | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 949 |  | 
| Arun R Bharadwaj | 597d027 | 2009-04-16 12:13:26 +0530 | [diff] [blame] | 950 | 	if (mode & HRTIMER_MODE_REL) { | 
| Thomas Gleixner | 5a7780e | 2008-02-13 09:20:43 +0100 | [diff] [blame] | 951 | 		tim = ktime_add_safe(tim, new_base->get_time()); | 
| Ingo Molnar | 06027bd | 2006-02-14 13:53:15 -0800 | [diff] [blame] | 952 | 		/* | 
 | 953 | 		 * CONFIG_TIME_LOW_RES is a temporary way for architectures | 
 | 954 | 		 * to signal that they simply return xtime in | 
 | 955 | 		 * do_gettimeoffset(). In this case we want to round up by | 
 | 956 | 		 * resolution when starting a relative timer, to avoid short | 
 | 957 | 		 * timeouts. This will go away with the GTOD framework. | 
 | 958 | 		 */ | 
 | 959 | #ifdef CONFIG_TIME_LOW_RES | 
| Thomas Gleixner | 5a7780e | 2008-02-13 09:20:43 +0100 | [diff] [blame] | 960 | 		tim = ktime_add_safe(tim, base->resolution); | 
| Ingo Molnar | 06027bd | 2006-02-14 13:53:15 -0800 | [diff] [blame] | 961 | #endif | 
 | 962 | 	} | 
| Thomas Gleixner | 237fc6e | 2008-04-30 00:55:04 -0700 | [diff] [blame] | 963 |  | 
| Arjan van de Ven | da8f2e1 | 2008-09-07 10:47:46 -0700 | [diff] [blame] | 964 | 	hrtimer_set_expires_range_ns(timer, tim, delta_ns); | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 965 |  | 
| Ingo Molnar | 82f67cd | 2007-02-16 01:28:13 -0800 | [diff] [blame] | 966 | 	timer_stats_hrtimer_set_start_info(timer); | 
 | 967 |  | 
| Peter Zijlstra | a6037b6 | 2009-01-05 11:28:22 +0100 | [diff] [blame] | 968 | 	leftmost = enqueue_hrtimer(timer, new_base); | 
 | 969 |  | 
| Ingo Molnar | 935c631 | 2007-03-28 13:17:18 +0200 | [diff] [blame] | 970 | 	/* | 
 | 971 | 	 * Only allow reprogramming if the new base is on this CPU. | 
 | 972 | 	 * (it might still be on another CPU if the timer was pending) | 
| Peter Zijlstra | a6037b6 | 2009-01-05 11:28:22 +0100 | [diff] [blame] | 973 | 	 * | 
 | 974 | 	 * XXX send_remote_softirq() ? | 
| Ingo Molnar | 935c631 | 2007-03-28 13:17:18 +0200 | [diff] [blame] | 975 | 	 */ | 
| Peter Zijlstra | a6037b6 | 2009-01-05 11:28:22 +0100 | [diff] [blame] | 976 | 	if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases)) | 
| Peter Zijlstra | 7f1e2ca | 2009-03-13 12:21:27 +0100 | [diff] [blame] | 977 | 		hrtimer_enqueue_reprogram(timer, new_base, wakeup); | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 978 |  | 
 | 979 | 	unlock_hrtimer_base(timer, &flags); | 
 | 980 |  | 
 | 981 | 	return ret; | 
 | 982 | } | 
| Peter Zijlstra | 7f1e2ca | 2009-03-13 12:21:27 +0100 | [diff] [blame] | 983 |  | 
 | 984 | /** | 
 | 985 |  * hrtimer_start_range_ns - (re)start an hrtimer on the current CPU | 
 | 986 |  * @timer:	the timer to be added | 
 | 987 |  * @tim:	expiry time | 
 | 988 |  * @delta_ns:	"slack" range for the timer | 
 | 989 |  * @mode:	expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL) | 
 | 990 |  * | 
 | 991 |  * Returns: | 
 | 992 |  *  0 on success | 
 | 993 |  *  1 when the timer was active | 
 | 994 |  */ | 
 | 995 | int hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, | 
 | 996 | 		unsigned long delta_ns, const enum hrtimer_mode mode) | 
 | 997 | { | 
 | 998 | 	return __hrtimer_start_range_ns(timer, tim, delta_ns, mode, 1); | 
 | 999 | } | 
| Arjan van de Ven | da8f2e1 | 2008-09-07 10:47:46 -0700 | [diff] [blame] | 1000 | EXPORT_SYMBOL_GPL(hrtimer_start_range_ns); | 
 | 1001 |  | 
 | 1002 | /** | 
| Thomas Gleixner | e1dd7bc | 2008-10-20 13:33:36 +0200 | [diff] [blame] | 1003 |  * hrtimer_start - (re)start an hrtimer on the current CPU | 
| Arjan van de Ven | da8f2e1 | 2008-09-07 10:47:46 -0700 | [diff] [blame] | 1004 |  * @timer:	the timer to be added | 
 | 1005 |  * @tim:	expiry time | 
 | 1006 |  * @mode:	expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL) | 
 | 1007 |  * | 
 | 1008 |  * Returns: | 
 | 1009 |  *  0 on success | 
 | 1010 |  *  1 when the timer was active | 
 | 1011 |  */ | 
 | 1012 | int | 
 | 1013 | hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode) | 
 | 1014 | { | 
| Peter Zijlstra | 7f1e2ca | 2009-03-13 12:21:27 +0100 | [diff] [blame] | 1015 | 	return __hrtimer_start_range_ns(timer, tim, 0, mode, 1); | 
| Arjan van de Ven | da8f2e1 | 2008-09-07 10:47:46 -0700 | [diff] [blame] | 1016 | } | 
| Stephen Hemminger | 8d16b76 | 2006-05-30 21:26:09 -0700 | [diff] [blame] | 1017 | EXPORT_SYMBOL_GPL(hrtimer_start); | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1018 |  | 
| Arjan van de Ven | da8f2e1 | 2008-09-07 10:47:46 -0700 | [diff] [blame] | 1019 |  | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1020 | /** | 
 | 1021 |  * hrtimer_try_to_cancel - try to deactivate a timer | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1022 |  * @timer:	hrtimer to stop | 
 | 1023 |  * | 
 | 1024 |  * Returns: | 
 | 1025 |  *  0 when the timer was not active | 
 | 1026 |  *  1 when the timer was active | 
 | 1027 |  * -1 when the timer is currently excuting the callback function and | 
| Randy Dunlap | fa9799e | 2006-06-25 05:49:15 -0700 | [diff] [blame] | 1028 |  *    cannot be stopped | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1029 |  */ | 
 | 1030 | int hrtimer_try_to_cancel(struct hrtimer *timer) | 
 | 1031 | { | 
| Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 1032 | 	struct hrtimer_clock_base *base; | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1033 | 	unsigned long flags; | 
 | 1034 | 	int ret = -1; | 
 | 1035 |  | 
 | 1036 | 	base = lock_hrtimer_base(timer, &flags); | 
 | 1037 |  | 
| Thomas Gleixner | 303e967 | 2007-02-16 01:27:51 -0800 | [diff] [blame] | 1038 | 	if (!hrtimer_callback_running(timer)) | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1039 | 		ret = remove_hrtimer(timer, base); | 
 | 1040 |  | 
 | 1041 | 	unlock_hrtimer_base(timer, &flags); | 
 | 1042 |  | 
 | 1043 | 	return ret; | 
 | 1044 |  | 
 | 1045 | } | 
| Stephen Hemminger | 8d16b76 | 2006-05-30 21:26:09 -0700 | [diff] [blame] | 1046 | EXPORT_SYMBOL_GPL(hrtimer_try_to_cancel); | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1047 |  | 
 | 1048 | /** | 
 | 1049 |  * hrtimer_cancel - cancel a timer and wait for the handler to finish. | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1050 |  * @timer:	the timer to be cancelled | 
 | 1051 |  * | 
 | 1052 |  * Returns: | 
 | 1053 |  *  0 when the timer was not active | 
 | 1054 |  *  1 when the timer was active | 
 | 1055 |  */ | 
 | 1056 | int hrtimer_cancel(struct hrtimer *timer) | 
 | 1057 | { | 
 | 1058 | 	for (;;) { | 
 | 1059 | 		int ret = hrtimer_try_to_cancel(timer); | 
 | 1060 |  | 
 | 1061 | 		if (ret >= 0) | 
 | 1062 | 			return ret; | 
| Joe Korty | 5ef37b1 | 2006-04-10 22:54:13 -0700 | [diff] [blame] | 1063 | 		cpu_relax(); | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1064 | 	} | 
 | 1065 | } | 
| Stephen Hemminger | 8d16b76 | 2006-05-30 21:26:09 -0700 | [diff] [blame] | 1066 | EXPORT_SYMBOL_GPL(hrtimer_cancel); | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1067 |  | 
 | 1068 | /** | 
 | 1069 |  * hrtimer_get_remaining - get remaining time for the timer | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1070 |  * @timer:	the timer to read | 
 | 1071 |  */ | 
 | 1072 | ktime_t hrtimer_get_remaining(const struct hrtimer *timer) | 
 | 1073 | { | 
| Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 1074 | 	struct hrtimer_clock_base *base; | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1075 | 	unsigned long flags; | 
 | 1076 | 	ktime_t rem; | 
 | 1077 |  | 
 | 1078 | 	base = lock_hrtimer_base(timer, &flags); | 
| Arjan van de Ven | cc584b2 | 2008-09-01 15:02:30 -0700 | [diff] [blame] | 1079 | 	rem = hrtimer_expires_remaining(timer); | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1080 | 	unlock_hrtimer_base(timer, &flags); | 
 | 1081 |  | 
 | 1082 | 	return rem; | 
 | 1083 | } | 
| Stephen Hemminger | 8d16b76 | 2006-05-30 21:26:09 -0700 | [diff] [blame] | 1084 | EXPORT_SYMBOL_GPL(hrtimer_get_remaining); | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1085 |  | 
| Russell King | ee9c578 | 2008-04-20 13:59:33 +0100 | [diff] [blame] | 1086 | #ifdef CONFIG_NO_HZ | 
| Tony Lindgren | 6923974 | 2006-03-06 15:42:45 -0800 | [diff] [blame] | 1087 | /** | 
 | 1088 |  * hrtimer_get_next_event - get the time until next expiry event | 
 | 1089 |  * | 
 | 1090 |  * Returns the delta to the next expiry event or KTIME_MAX if no timer | 
 | 1091 |  * is pending. | 
 | 1092 |  */ | 
 | 1093 | ktime_t hrtimer_get_next_event(void) | 
 | 1094 | { | 
| Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 1095 | 	struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); | 
 | 1096 | 	struct hrtimer_clock_base *base = cpu_base->clock_base; | 
| Tony Lindgren | 6923974 | 2006-03-06 15:42:45 -0800 | [diff] [blame] | 1097 | 	ktime_t delta, mindelta = { .tv64 = KTIME_MAX }; | 
 | 1098 | 	unsigned long flags; | 
 | 1099 | 	int i; | 
 | 1100 |  | 
| Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 1101 | 	spin_lock_irqsave(&cpu_base->lock, flags); | 
 | 1102 |  | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 1103 | 	if (!hrtimer_hres_active()) { | 
 | 1104 | 		for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) { | 
 | 1105 | 			struct hrtimer *timer; | 
| Tony Lindgren | 6923974 | 2006-03-06 15:42:45 -0800 | [diff] [blame] | 1106 |  | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 1107 | 			if (!base->first) | 
 | 1108 | 				continue; | 
| Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 1109 |  | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 1110 | 			timer = rb_entry(base->first, struct hrtimer, node); | 
| Arjan van de Ven | cc584b2 | 2008-09-01 15:02:30 -0700 | [diff] [blame] | 1111 | 			delta.tv64 = hrtimer_get_expires_tv64(timer); | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 1112 | 			delta = ktime_sub(delta, base->get_time()); | 
 | 1113 | 			if (delta.tv64 < mindelta.tv64) | 
 | 1114 | 				mindelta.tv64 = delta.tv64; | 
 | 1115 | 		} | 
| Tony Lindgren | 6923974 | 2006-03-06 15:42:45 -0800 | [diff] [blame] | 1116 | 	} | 
| Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 1117 |  | 
 | 1118 | 	spin_unlock_irqrestore(&cpu_base->lock, flags); | 
 | 1119 |  | 
| Tony Lindgren | 6923974 | 2006-03-06 15:42:45 -0800 | [diff] [blame] | 1120 | 	if (mindelta.tv64 < 0) | 
 | 1121 | 		mindelta.tv64 = 0; | 
 | 1122 | 	return mindelta; | 
 | 1123 | } | 
 | 1124 | #endif | 
 | 1125 |  | 
| Thomas Gleixner | 237fc6e | 2008-04-30 00:55:04 -0700 | [diff] [blame] | 1126 | static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id, | 
 | 1127 | 			   enum hrtimer_mode mode) | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1128 | { | 
| Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 1129 | 	struct hrtimer_cpu_base *cpu_base; | 
| George Anzinger | 7978672 | 2006-02-01 03:05:11 -0800 | [diff] [blame] | 1130 |  | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1131 | 	memset(timer, 0, sizeof(struct hrtimer)); | 
| George Anzinger | 7978672 | 2006-02-01 03:05:11 -0800 | [diff] [blame] | 1132 |  | 
| Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 1133 | 	cpu_base = &__raw_get_cpu_var(hrtimer_bases); | 
| George Anzinger | 7978672 | 2006-02-01 03:05:11 -0800 | [diff] [blame] | 1134 |  | 
| Thomas Gleixner | c9cb2e3 | 2007-02-16 01:27:49 -0800 | [diff] [blame] | 1135 | 	if (clock_id == CLOCK_REALTIME && mode != HRTIMER_MODE_ABS) | 
| George Anzinger | 7978672 | 2006-02-01 03:05:11 -0800 | [diff] [blame] | 1136 | 		clock_id = CLOCK_MONOTONIC; | 
 | 1137 |  | 
| Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 1138 | 	timer->base = &cpu_base->clock_base[clock_id]; | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 1139 | 	hrtimer_init_timer_hres(timer); | 
| Ingo Molnar | 82f67cd | 2007-02-16 01:28:13 -0800 | [diff] [blame] | 1140 |  | 
 | 1141 | #ifdef CONFIG_TIMER_STATS | 
 | 1142 | 	timer->start_site = NULL; | 
 | 1143 | 	timer->start_pid = -1; | 
 | 1144 | 	memset(timer->start_comm, 0, TASK_COMM_LEN); | 
 | 1145 | #endif | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1146 | } | 
| Thomas Gleixner | 237fc6e | 2008-04-30 00:55:04 -0700 | [diff] [blame] | 1147 |  | 
 | 1148 | /** | 
 | 1149 |  * hrtimer_init - initialize a timer to the given clock | 
 | 1150 |  * @timer:	the timer to be initialized | 
 | 1151 |  * @clock_id:	the clock to be used | 
 | 1152 |  * @mode:	timer mode abs/rel | 
 | 1153 |  */ | 
 | 1154 | void hrtimer_init(struct hrtimer *timer, clockid_t clock_id, | 
 | 1155 | 		  enum hrtimer_mode mode) | 
 | 1156 | { | 
| Xiao Guangrong | c6a2a17 | 2009-08-10 10:51:23 +0800 | [diff] [blame] | 1157 | 	debug_init(timer, clock_id, mode); | 
| Thomas Gleixner | 237fc6e | 2008-04-30 00:55:04 -0700 | [diff] [blame] | 1158 | 	__hrtimer_init(timer, clock_id, mode); | 
 | 1159 | } | 
| Stephen Hemminger | 8d16b76 | 2006-05-30 21:26:09 -0700 | [diff] [blame] | 1160 | EXPORT_SYMBOL_GPL(hrtimer_init); | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1161 |  | 
 | 1162 | /** | 
 | 1163 |  * hrtimer_get_res - get the timer resolution for a clock | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1164 |  * @which_clock: which clock to query | 
 | 1165 |  * @tp:		 pointer to timespec variable to store the resolution | 
 | 1166 |  * | 
| Robert P. J. Day | 72fd4a3 | 2007-02-10 01:45:59 -0800 | [diff] [blame] | 1167 |  * Store the resolution of the clock selected by @which_clock in the | 
 | 1168 |  * variable pointed to by @tp. | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1169 |  */ | 
 | 1170 | int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp) | 
 | 1171 | { | 
| Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 1172 | 	struct hrtimer_cpu_base *cpu_base; | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1173 |  | 
| Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 1174 | 	cpu_base = &__raw_get_cpu_var(hrtimer_bases); | 
 | 1175 | 	*tp = ktime_to_timespec(cpu_base->clock_base[which_clock].resolution); | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1176 |  | 
 | 1177 | 	return 0; | 
 | 1178 | } | 
| Stephen Hemminger | 8d16b76 | 2006-05-30 21:26:09 -0700 | [diff] [blame] | 1179 | EXPORT_SYMBOL_GPL(hrtimer_get_res); | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1180 |  | 
| Xiao Guangrong | c6a2a17 | 2009-08-10 10:51:23 +0800 | [diff] [blame] | 1181 | static void __run_hrtimer(struct hrtimer *timer, ktime_t *now) | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1182 | { | 
| Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1183 | 	struct hrtimer_clock_base *base = timer->base; | 
 | 1184 | 	struct hrtimer_cpu_base *cpu_base = base->cpu_base; | 
 | 1185 | 	enum hrtimer_restart (*fn)(struct hrtimer *); | 
 | 1186 | 	int restart; | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1187 |  | 
| Peter Zijlstra | ca10949 | 2008-11-25 12:43:51 +0100 | [diff] [blame] | 1188 | 	WARN_ON(!irqs_disabled()); | 
 | 1189 |  | 
| Xiao Guangrong | c6a2a17 | 2009-08-10 10:51:23 +0800 | [diff] [blame] | 1190 | 	debug_deactivate(timer); | 
| Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1191 | 	__remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0); | 
 | 1192 | 	timer_stats_account_hrtimer(timer); | 
| Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1193 | 	fn = timer->function; | 
| Peter Zijlstra | ca10949 | 2008-11-25 12:43:51 +0100 | [diff] [blame] | 1194 |  | 
 | 1195 | 	/* | 
 | 1196 | 	 * Because we run timers from hardirq context, there is no chance | 
 | 1197 | 	 * they get migrated to another cpu, therefore its safe to unlock | 
 | 1198 | 	 * the timer base. | 
 | 1199 | 	 */ | 
 | 1200 | 	spin_unlock(&cpu_base->lock); | 
| Xiao Guangrong | c6a2a17 | 2009-08-10 10:51:23 +0800 | [diff] [blame] | 1201 | 	trace_hrtimer_expire_entry(timer, now); | 
| Peter Zijlstra | ca10949 | 2008-11-25 12:43:51 +0100 | [diff] [blame] | 1202 | 	restart = fn(timer); | 
| Xiao Guangrong | c6a2a17 | 2009-08-10 10:51:23 +0800 | [diff] [blame] | 1203 | 	trace_hrtimer_expire_exit(timer); | 
| Peter Zijlstra | ca10949 | 2008-11-25 12:43:51 +0100 | [diff] [blame] | 1204 | 	spin_lock(&cpu_base->lock); | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1205 |  | 
| Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1206 | 	/* | 
| Thomas Gleixner | e3f1d88 | 2009-01-05 11:28:23 +0100 | [diff] [blame] | 1207 | 	 * Note: We clear the CALLBACK bit after enqueue_hrtimer and | 
 | 1208 | 	 * we do not reprogramm the event hardware. Happens either in | 
 | 1209 | 	 * hrtimer_start_range_ns() or in hrtimer_interrupt() | 
| Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1210 | 	 */ | 
 | 1211 | 	if (restart != HRTIMER_NORESTART) { | 
 | 1212 | 		BUG_ON(timer->state != HRTIMER_STATE_CALLBACK); | 
| Peter Zijlstra | a6037b6 | 2009-01-05 11:28:22 +0100 | [diff] [blame] | 1213 | 		enqueue_hrtimer(timer, base); | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1214 | 	} | 
| Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1215 | 	timer->state &= ~HRTIMER_STATE_CALLBACK; | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1216 | } | 
 | 1217 |  | 
| Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1218 | #ifdef CONFIG_HIGH_RES_TIMERS | 
 | 1219 |  | 
| Frederic Weisbecker | 7f22391 | 2008-12-22 02:24:48 +0100 | [diff] [blame] | 1220 | static int force_clock_reprogram; | 
 | 1221 |  | 
 | 1222 | /* | 
 | 1223 |  * After 5 iteration's attempts, we consider that hrtimer_interrupt() | 
 | 1224 |  * is hanging, which could happen with something that slows the interrupt | 
 | 1225 |  * such as the tracing. Then we force the clock reprogramming for each future | 
 | 1226 |  * hrtimer interrupts to avoid infinite loops and use the min_delta_ns | 
 | 1227 |  * threshold that we will overwrite. | 
 | 1228 |  * The next tick event will be scheduled to 3 times we currently spend on | 
 | 1229 |  * hrtimer_interrupt(). This gives a good compromise, the cpus will spend | 
 | 1230 |  * 1/4 of their time to process the hrtimer interrupts. This is enough to | 
 | 1231 |  * let it running without serious starvation. | 
 | 1232 |  */ | 
 | 1233 |  | 
 | 1234 | static inline void | 
 | 1235 | hrtimer_interrupt_hanging(struct clock_event_device *dev, | 
 | 1236 | 			ktime_t try_time) | 
 | 1237 | { | 
 | 1238 | 	force_clock_reprogram = 1; | 
 | 1239 | 	dev->min_delta_ns = (unsigned long)try_time.tv64 * 3; | 
 | 1240 | 	printk(KERN_WARNING "hrtimer: interrupt too slow, " | 
 | 1241 | 		"forcing clock min delta to %lu ns\n", dev->min_delta_ns); | 
 | 1242 | } | 
| Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1243 | /* | 
 | 1244 |  * High resolution timer interrupt | 
 | 1245 |  * Called with interrupts disabled | 
 | 1246 |  */ | 
 | 1247 | void hrtimer_interrupt(struct clock_event_device *dev) | 
 | 1248 | { | 
 | 1249 | 	struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); | 
 | 1250 | 	struct hrtimer_clock_base *base; | 
 | 1251 | 	ktime_t expires_next, now; | 
| Frederic Weisbecker | 7f22391 | 2008-12-22 02:24:48 +0100 | [diff] [blame] | 1252 | 	int nr_retries = 0; | 
| Peter Zijlstra | ca10949 | 2008-11-25 12:43:51 +0100 | [diff] [blame] | 1253 | 	int i; | 
| Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1254 |  | 
 | 1255 | 	BUG_ON(!cpu_base->hres_active); | 
 | 1256 | 	cpu_base->nr_events++; | 
 | 1257 | 	dev->next_event.tv64 = KTIME_MAX; | 
 | 1258 |  | 
 | 1259 |  retry: | 
| Frederic Weisbecker | 7f22391 | 2008-12-22 02:24:48 +0100 | [diff] [blame] | 1260 | 	/* 5 retries is enough to notice a hang */ | 
 | 1261 | 	if (!(++nr_retries % 5)) | 
 | 1262 | 		hrtimer_interrupt_hanging(dev, ktime_sub(ktime_get(), now)); | 
 | 1263 |  | 
| Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1264 | 	now = ktime_get(); | 
 | 1265 |  | 
 | 1266 | 	expires_next.tv64 = KTIME_MAX; | 
 | 1267 |  | 
| Thomas Gleixner | 6ff7041 | 2009-07-10 14:57:05 +0200 | [diff] [blame] | 1268 | 	spin_lock(&cpu_base->lock); | 
 | 1269 | 	/* | 
 | 1270 | 	 * We set expires_next to KTIME_MAX here with cpu_base->lock | 
 | 1271 | 	 * held to prevent that a timer is enqueued in our queue via | 
 | 1272 | 	 * the migration code. This does not affect enqueueing of | 
 | 1273 | 	 * timers which run their callback and need to be requeued on | 
 | 1274 | 	 * this CPU. | 
 | 1275 | 	 */ | 
 | 1276 | 	cpu_base->expires_next.tv64 = KTIME_MAX; | 
 | 1277 |  | 
| Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1278 | 	base = cpu_base->clock_base; | 
 | 1279 |  | 
 | 1280 | 	for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { | 
 | 1281 | 		ktime_t basenow; | 
 | 1282 | 		struct rb_node *node; | 
 | 1283 |  | 
| Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1284 | 		basenow = ktime_add(now, base->offset); | 
 | 1285 |  | 
 | 1286 | 		while ((node = base->first)) { | 
 | 1287 | 			struct hrtimer *timer; | 
 | 1288 |  | 
 | 1289 | 			timer = rb_entry(node, struct hrtimer, node); | 
 | 1290 |  | 
| Arjan van de Ven | 654c8e0 | 2008-09-01 15:47:08 -0700 | [diff] [blame] | 1291 | 			/* | 
 | 1292 | 			 * The immediate goal for using the softexpires is | 
 | 1293 | 			 * minimizing wakeups, not running timers at the | 
 | 1294 | 			 * earliest interrupt after their soft expiration. | 
 | 1295 | 			 * This allows us to avoid using a Priority Search | 
 | 1296 | 			 * Tree, which can answer a stabbing querry for | 
 | 1297 | 			 * overlapping intervals and instead use the simple | 
 | 1298 | 			 * BST we already have. | 
 | 1299 | 			 * We don't add extra wakeups by delaying timers that | 
 | 1300 | 			 * are right-of a not yet expired timer, because that | 
 | 1301 | 			 * timer will have to trigger a wakeup anyway. | 
 | 1302 | 			 */ | 
 | 1303 |  | 
 | 1304 | 			if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer)) { | 
| Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1305 | 				ktime_t expires; | 
 | 1306 |  | 
| Arjan van de Ven | cc584b2 | 2008-09-01 15:02:30 -0700 | [diff] [blame] | 1307 | 				expires = ktime_sub(hrtimer_get_expires(timer), | 
| Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1308 | 						    base->offset); | 
 | 1309 | 				if (expires.tv64 < expires_next.tv64) | 
 | 1310 | 					expires_next = expires; | 
 | 1311 | 				break; | 
 | 1312 | 			} | 
 | 1313 |  | 
| Xiao Guangrong | c6a2a17 | 2009-08-10 10:51:23 +0800 | [diff] [blame] | 1314 | 			__run_hrtimer(timer, &basenow); | 
| Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1315 | 		} | 
| Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1316 | 		base++; | 
 | 1317 | 	} | 
 | 1318 |  | 
| Thomas Gleixner | 6ff7041 | 2009-07-10 14:57:05 +0200 | [diff] [blame] | 1319 | 	/* | 
 | 1320 | 	 * Store the new expiry value so the migration code can verify | 
 | 1321 | 	 * against it. | 
 | 1322 | 	 */ | 
| Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1323 | 	cpu_base->expires_next = expires_next; | 
| Thomas Gleixner | 6ff7041 | 2009-07-10 14:57:05 +0200 | [diff] [blame] | 1324 | 	spin_unlock(&cpu_base->lock); | 
| Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1325 |  | 
 | 1326 | 	/* Reprogramming necessary ? */ | 
 | 1327 | 	if (expires_next.tv64 != KTIME_MAX) { | 
| Frederic Weisbecker | 7f22391 | 2008-12-22 02:24:48 +0100 | [diff] [blame] | 1328 | 		if (tick_program_event(expires_next, force_clock_reprogram)) | 
| Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1329 | 			goto retry; | 
 | 1330 | 	} | 
| Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1331 | } | 
 | 1332 |  | 
| Thomas Gleixner | 8bdec95 | 2009-01-05 11:28:19 +0100 | [diff] [blame] | 1333 | /* | 
 | 1334 |  * local version of hrtimer_peek_ahead_timers() called with interrupts | 
 | 1335 |  * disabled. | 
 | 1336 |  */ | 
 | 1337 | static void __hrtimer_peek_ahead_timers(void) | 
 | 1338 | { | 
 | 1339 | 	struct tick_device *td; | 
 | 1340 |  | 
 | 1341 | 	if (!hrtimer_hres_active()) | 
 | 1342 | 		return; | 
 | 1343 |  | 
 | 1344 | 	td = &__get_cpu_var(tick_cpu_device); | 
 | 1345 | 	if (td && td->evtdev) | 
 | 1346 | 		hrtimer_interrupt(td->evtdev); | 
 | 1347 | } | 
 | 1348 |  | 
| Arjan van de Ven | 2e94d1f | 2008-09-10 16:06:00 -0700 | [diff] [blame] | 1349 | /** | 
 | 1350 |  * hrtimer_peek_ahead_timers -- run soft-expired timers now | 
 | 1351 |  * | 
 | 1352 |  * hrtimer_peek_ahead_timers will peek at the timer queue of | 
 | 1353 |  * the current cpu and check if there are any timers for which | 
 | 1354 |  * the soft expires time has passed. If any such timers exist, | 
 | 1355 |  * they are run immediately and then removed from the timer queue. | 
 | 1356 |  * | 
 | 1357 |  */ | 
 | 1358 | void hrtimer_peek_ahead_timers(void) | 
 | 1359 | { | 
| Thomas Gleixner | 643bdf6 | 2008-10-20 13:38:11 +0200 | [diff] [blame] | 1360 | 	unsigned long flags; | 
| Arjan van de Ven | dc4304f | 2008-10-13 10:32:15 -0400 | [diff] [blame] | 1361 |  | 
| Arjan van de Ven | 2e94d1f | 2008-09-10 16:06:00 -0700 | [diff] [blame] | 1362 | 	local_irq_save(flags); | 
| Thomas Gleixner | 8bdec95 | 2009-01-05 11:28:19 +0100 | [diff] [blame] | 1363 | 	__hrtimer_peek_ahead_timers(); | 
| Arjan van de Ven | 2e94d1f | 2008-09-10 16:06:00 -0700 | [diff] [blame] | 1364 | 	local_irq_restore(flags); | 
 | 1365 | } | 
 | 1366 |  | 
| Peter Zijlstra | a6037b6 | 2009-01-05 11:28:22 +0100 | [diff] [blame] | 1367 | static void run_hrtimer_softirq(struct softirq_action *h) | 
 | 1368 | { | 
 | 1369 | 	hrtimer_peek_ahead_timers(); | 
 | 1370 | } | 
 | 1371 |  | 
| Ingo Molnar | 82c5b7b | 2009-01-05 14:11:10 +0100 | [diff] [blame] | 1372 | #else /* CONFIG_HIGH_RES_TIMERS */ | 
 | 1373 |  | 
 | 1374 | static inline void __hrtimer_peek_ahead_timers(void) { } | 
 | 1375 |  | 
 | 1376 | #endif	/* !CONFIG_HIGH_RES_TIMERS */ | 
| Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1377 |  | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1378 | /* | 
 | 1379 |  * Called from timer softirq every jiffy, expire hrtimers: | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 1380 |  * | 
 | 1381 |  * For HRT its the fall back code to run the softirq in the timer | 
 | 1382 |  * softirq context in case the hrtimer initialization failed or has | 
 | 1383 |  * not been done yet. | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1384 |  */ | 
| Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1385 | void hrtimer_run_pending(void) | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1386 | { | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 1387 | 	if (hrtimer_hres_active()) | 
 | 1388 | 		return; | 
 | 1389 |  | 
| Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 1390 | 	/* | 
 | 1391 | 	 * This _is_ ugly: We have to check in the softirq context, | 
 | 1392 | 	 * whether we can switch to highres and / or nohz mode. The | 
 | 1393 | 	 * clocksource switch happens in the timer interrupt with | 
 | 1394 | 	 * xtime_lock held. Notification from there only sets the | 
 | 1395 | 	 * check bit in the tick_oneshot code, otherwise we might | 
 | 1396 | 	 * deadlock vs. xtime_lock. | 
 | 1397 | 	 */ | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 1398 | 	if (tick_check_oneshot_change(!hrtimer_is_hres_enabled())) | 
| Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1399 | 		hrtimer_switch_to_hres(); | 
| Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1400 | } | 
 | 1401 |  | 
 | 1402 | /* | 
 | 1403 |  * Called from hardirq context every jiffy | 
 | 1404 |  */ | 
| Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1405 | void hrtimer_run_queues(void) | 
 | 1406 | { | 
| Dimitri Sivanich | 833883d | 2008-04-18 13:39:00 -0700 | [diff] [blame] | 1407 | 	struct rb_node *node; | 
| Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1408 | 	struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); | 
| Dimitri Sivanich | 833883d | 2008-04-18 13:39:00 -0700 | [diff] [blame] | 1409 | 	struct hrtimer_clock_base *base; | 
 | 1410 | 	int index, gettime = 1; | 
| Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1411 |  | 
 | 1412 | 	if (hrtimer_hres_active()) | 
 | 1413 | 		return; | 
| Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 1414 |  | 
| Dimitri Sivanich | 833883d | 2008-04-18 13:39:00 -0700 | [diff] [blame] | 1415 | 	for (index = 0; index < HRTIMER_MAX_CLOCK_BASES; index++) { | 
 | 1416 | 		base = &cpu_base->clock_base[index]; | 
| Thomas Gleixner | 92127c7 | 2006-03-26 01:38:05 -0800 | [diff] [blame] | 1417 |  | 
| Dimitri Sivanich | 833883d | 2008-04-18 13:39:00 -0700 | [diff] [blame] | 1418 | 		if (!base->first) | 
 | 1419 | 			continue; | 
 | 1420 |  | 
| Mark McLoughlin | d7cfb60 | 2008-09-19 13:13:44 +0100 | [diff] [blame] | 1421 | 		if (gettime) { | 
| Dimitri Sivanich | 833883d | 2008-04-18 13:39:00 -0700 | [diff] [blame] | 1422 | 			hrtimer_get_softirq_time(cpu_base); | 
 | 1423 | 			gettime = 0; | 
 | 1424 | 		} | 
 | 1425 |  | 
| Dimitri Sivanich | 833883d | 2008-04-18 13:39:00 -0700 | [diff] [blame] | 1426 | 		spin_lock(&cpu_base->lock); | 
 | 1427 |  | 
 | 1428 | 		while ((node = base->first)) { | 
 | 1429 | 			struct hrtimer *timer; | 
 | 1430 |  | 
 | 1431 | 			timer = rb_entry(node, struct hrtimer, node); | 
| Arjan van de Ven | cc584b2 | 2008-09-01 15:02:30 -0700 | [diff] [blame] | 1432 | 			if (base->softirq_time.tv64 <= | 
 | 1433 | 					hrtimer_get_expires_tv64(timer)) | 
| Dimitri Sivanich | 833883d | 2008-04-18 13:39:00 -0700 | [diff] [blame] | 1434 | 				break; | 
 | 1435 |  | 
| Xiao Guangrong | c6a2a17 | 2009-08-10 10:51:23 +0800 | [diff] [blame] | 1436 | 			__run_hrtimer(timer, &base->softirq_time); | 
| Dimitri Sivanich | 833883d | 2008-04-18 13:39:00 -0700 | [diff] [blame] | 1437 | 		} | 
 | 1438 | 		spin_unlock(&cpu_base->lock); | 
 | 1439 | 	} | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1440 | } | 
 | 1441 |  | 
 | 1442 | /* | 
| Thomas Gleixner | 10c94ec | 2006-01-09 20:52:35 -0800 | [diff] [blame] | 1443 |  * Sleep related functions: | 
 | 1444 |  */ | 
| Thomas Gleixner | c9cb2e3 | 2007-02-16 01:27:49 -0800 | [diff] [blame] | 1445 | static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer) | 
| Thomas Gleixner | 00362e3 | 2006-03-31 02:31:17 -0800 | [diff] [blame] | 1446 | { | 
 | 1447 | 	struct hrtimer_sleeper *t = | 
 | 1448 | 		container_of(timer, struct hrtimer_sleeper, timer); | 
 | 1449 | 	struct task_struct *task = t->task; | 
 | 1450 |  | 
 | 1451 | 	t->task = NULL; | 
 | 1452 | 	if (task) | 
 | 1453 | 		wake_up_process(task); | 
 | 1454 |  | 
 | 1455 | 	return HRTIMER_NORESTART; | 
 | 1456 | } | 
 | 1457 |  | 
| Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 1458 | void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task) | 
| Thomas Gleixner | 00362e3 | 2006-03-31 02:31:17 -0800 | [diff] [blame] | 1459 | { | 
 | 1460 | 	sl->timer.function = hrtimer_wakeup; | 
 | 1461 | 	sl->task = task; | 
 | 1462 | } | 
| Stephen Hemminger | 2bc481c | 2009-08-28 23:41:29 -0700 | [diff] [blame] | 1463 | EXPORT_SYMBOL_GPL(hrtimer_init_sleeper); | 
| Thomas Gleixner | 00362e3 | 2006-03-31 02:31:17 -0800 | [diff] [blame] | 1464 |  | 
| Thomas Gleixner | 669d786 | 2006-03-31 02:31:19 -0800 | [diff] [blame] | 1465 | static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode) | 
| Thomas Gleixner | 10c94ec | 2006-01-09 20:52:35 -0800 | [diff] [blame] | 1466 | { | 
| Thomas Gleixner | 669d786 | 2006-03-31 02:31:19 -0800 | [diff] [blame] | 1467 | 	hrtimer_init_sleeper(t, current); | 
| Thomas Gleixner | 10c94ec | 2006-01-09 20:52:35 -0800 | [diff] [blame] | 1468 |  | 
| Roman Zippel | 432569b | 2006-03-26 01:38:08 -0800 | [diff] [blame] | 1469 | 	do { | 
 | 1470 | 		set_current_state(TASK_INTERRUPTIBLE); | 
| Arjan van de Ven | cc584b2 | 2008-09-01 15:02:30 -0700 | [diff] [blame] | 1471 | 		hrtimer_start_expires(&t->timer, mode); | 
| Peter Zijlstra | 37bb6cb | 2008-01-25 21:08:32 +0100 | [diff] [blame] | 1472 | 		if (!hrtimer_active(&t->timer)) | 
 | 1473 | 			t->task = NULL; | 
| Roman Zippel | 432569b | 2006-03-26 01:38:08 -0800 | [diff] [blame] | 1474 |  | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 1475 | 		if (likely(t->task)) | 
 | 1476 | 			schedule(); | 
| Roman Zippel | 432569b | 2006-03-26 01:38:08 -0800 | [diff] [blame] | 1477 |  | 
| Thomas Gleixner | 669d786 | 2006-03-31 02:31:19 -0800 | [diff] [blame] | 1478 | 		hrtimer_cancel(&t->timer); | 
| Thomas Gleixner | c9cb2e3 | 2007-02-16 01:27:49 -0800 | [diff] [blame] | 1479 | 		mode = HRTIMER_MODE_ABS; | 
| Roman Zippel | 432569b | 2006-03-26 01:38:08 -0800 | [diff] [blame] | 1480 |  | 
| Thomas Gleixner | 669d786 | 2006-03-31 02:31:19 -0800 | [diff] [blame] | 1481 | 	} while (t->task && !signal_pending(current)); | 
 | 1482 |  | 
| Peter Zijlstra | 3588a08 | 2008-02-01 17:45:13 +0100 | [diff] [blame] | 1483 | 	__set_current_state(TASK_RUNNING); | 
 | 1484 |  | 
| Thomas Gleixner | 669d786 | 2006-03-31 02:31:19 -0800 | [diff] [blame] | 1485 | 	return t->task == NULL; | 
| Thomas Gleixner | 10c94ec | 2006-01-09 20:52:35 -0800 | [diff] [blame] | 1486 | } | 
 | 1487 |  | 
| Oleg Nesterov | 080344b | 2008-02-01 17:29:05 +0300 | [diff] [blame] | 1488 | static int update_rmtp(struct hrtimer *timer, struct timespec __user *rmtp) | 
 | 1489 | { | 
 | 1490 | 	struct timespec rmt; | 
 | 1491 | 	ktime_t rem; | 
 | 1492 |  | 
| Arjan van de Ven | cc584b2 | 2008-09-01 15:02:30 -0700 | [diff] [blame] | 1493 | 	rem = hrtimer_expires_remaining(timer); | 
| Oleg Nesterov | 080344b | 2008-02-01 17:29:05 +0300 | [diff] [blame] | 1494 | 	if (rem.tv64 <= 0) | 
 | 1495 | 		return 0; | 
 | 1496 | 	rmt = ktime_to_timespec(rem); | 
 | 1497 |  | 
 | 1498 | 	if (copy_to_user(rmtp, &rmt, sizeof(*rmtp))) | 
 | 1499 | 		return -EFAULT; | 
 | 1500 |  | 
 | 1501 | 	return 1; | 
 | 1502 | } | 
 | 1503 |  | 
| Toyo Abe | 1711ef3 | 2006-09-29 02:00:28 -0700 | [diff] [blame] | 1504 | long __sched hrtimer_nanosleep_restart(struct restart_block *restart) | 
| Thomas Gleixner | 10c94ec | 2006-01-09 20:52:35 -0800 | [diff] [blame] | 1505 | { | 
| Thomas Gleixner | 669d786 | 2006-03-31 02:31:19 -0800 | [diff] [blame] | 1506 | 	struct hrtimer_sleeper t; | 
| Oleg Nesterov | 080344b | 2008-02-01 17:29:05 +0300 | [diff] [blame] | 1507 | 	struct timespec __user  *rmtp; | 
| Thomas Gleixner | 237fc6e | 2008-04-30 00:55:04 -0700 | [diff] [blame] | 1508 | 	int ret = 0; | 
| Thomas Gleixner | 10c94ec | 2006-01-09 20:52:35 -0800 | [diff] [blame] | 1509 |  | 
| Thomas Gleixner | 237fc6e | 2008-04-30 00:55:04 -0700 | [diff] [blame] | 1510 | 	hrtimer_init_on_stack(&t.timer, restart->nanosleep.index, | 
 | 1511 | 				HRTIMER_MODE_ABS); | 
| Arjan van de Ven | cc584b2 | 2008-09-01 15:02:30 -0700 | [diff] [blame] | 1512 | 	hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires); | 
| Thomas Gleixner | 10c94ec | 2006-01-09 20:52:35 -0800 | [diff] [blame] | 1513 |  | 
| Thomas Gleixner | c9cb2e3 | 2007-02-16 01:27:49 -0800 | [diff] [blame] | 1514 | 	if (do_nanosleep(&t, HRTIMER_MODE_ABS)) | 
| Thomas Gleixner | 237fc6e | 2008-04-30 00:55:04 -0700 | [diff] [blame] | 1515 | 		goto out; | 
| Thomas Gleixner | 10c94ec | 2006-01-09 20:52:35 -0800 | [diff] [blame] | 1516 |  | 
| Thomas Gleixner | 029a07e | 2008-02-10 09:17:43 +0100 | [diff] [blame] | 1517 | 	rmtp = restart->nanosleep.rmtp; | 
| Roman Zippel | 432569b | 2006-03-26 01:38:08 -0800 | [diff] [blame] | 1518 | 	if (rmtp) { | 
| Thomas Gleixner | 237fc6e | 2008-04-30 00:55:04 -0700 | [diff] [blame] | 1519 | 		ret = update_rmtp(&t.timer, rmtp); | 
| Oleg Nesterov | 080344b | 2008-02-01 17:29:05 +0300 | [diff] [blame] | 1520 | 		if (ret <= 0) | 
| Thomas Gleixner | 237fc6e | 2008-04-30 00:55:04 -0700 | [diff] [blame] | 1521 | 			goto out; | 
| Roman Zippel | 432569b | 2006-03-26 01:38:08 -0800 | [diff] [blame] | 1522 | 	} | 
| Thomas Gleixner | 10c94ec | 2006-01-09 20:52:35 -0800 | [diff] [blame] | 1523 |  | 
| Thomas Gleixner | 10c94ec | 2006-01-09 20:52:35 -0800 | [diff] [blame] | 1524 | 	/* The other values in restart are already filled in */ | 
| Thomas Gleixner | 237fc6e | 2008-04-30 00:55:04 -0700 | [diff] [blame] | 1525 | 	ret = -ERESTART_RESTARTBLOCK; | 
 | 1526 | out: | 
 | 1527 | 	destroy_hrtimer_on_stack(&t.timer); | 
 | 1528 | 	return ret; | 
| Thomas Gleixner | 10c94ec | 2006-01-09 20:52:35 -0800 | [diff] [blame] | 1529 | } | 
 | 1530 |  | 
| Oleg Nesterov | 080344b | 2008-02-01 17:29:05 +0300 | [diff] [blame] | 1531 | long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp, | 
| Thomas Gleixner | 10c94ec | 2006-01-09 20:52:35 -0800 | [diff] [blame] | 1532 | 		       const enum hrtimer_mode mode, const clockid_t clockid) | 
 | 1533 | { | 
 | 1534 | 	struct restart_block *restart; | 
| Thomas Gleixner | 669d786 | 2006-03-31 02:31:19 -0800 | [diff] [blame] | 1535 | 	struct hrtimer_sleeper t; | 
| Thomas Gleixner | 237fc6e | 2008-04-30 00:55:04 -0700 | [diff] [blame] | 1536 | 	int ret = 0; | 
| Arjan van de Ven | 3bd0120 | 2008-09-08 08:58:59 -0700 | [diff] [blame] | 1537 | 	unsigned long slack; | 
 | 1538 |  | 
 | 1539 | 	slack = current->timer_slack_ns; | 
 | 1540 | 	if (rt_task(current)) | 
 | 1541 | 		slack = 0; | 
| Thomas Gleixner | 10c94ec | 2006-01-09 20:52:35 -0800 | [diff] [blame] | 1542 |  | 
| Thomas Gleixner | 237fc6e | 2008-04-30 00:55:04 -0700 | [diff] [blame] | 1543 | 	hrtimer_init_on_stack(&t.timer, clockid, mode); | 
| Arjan van de Ven | 3bd0120 | 2008-09-08 08:58:59 -0700 | [diff] [blame] | 1544 | 	hrtimer_set_expires_range_ns(&t.timer, timespec_to_ktime(*rqtp), slack); | 
| Roman Zippel | 432569b | 2006-03-26 01:38:08 -0800 | [diff] [blame] | 1545 | 	if (do_nanosleep(&t, mode)) | 
| Thomas Gleixner | 237fc6e | 2008-04-30 00:55:04 -0700 | [diff] [blame] | 1546 | 		goto out; | 
| Thomas Gleixner | 10c94ec | 2006-01-09 20:52:35 -0800 | [diff] [blame] | 1547 |  | 
| George Anzinger | 7978672 | 2006-02-01 03:05:11 -0800 | [diff] [blame] | 1548 | 	/* Absolute timers do not update the rmtp value and restart: */ | 
| Thomas Gleixner | 237fc6e | 2008-04-30 00:55:04 -0700 | [diff] [blame] | 1549 | 	if (mode == HRTIMER_MODE_ABS) { | 
 | 1550 | 		ret = -ERESTARTNOHAND; | 
 | 1551 | 		goto out; | 
 | 1552 | 	} | 
| Thomas Gleixner | 10c94ec | 2006-01-09 20:52:35 -0800 | [diff] [blame] | 1553 |  | 
| Roman Zippel | 432569b | 2006-03-26 01:38:08 -0800 | [diff] [blame] | 1554 | 	if (rmtp) { | 
| Thomas Gleixner | 237fc6e | 2008-04-30 00:55:04 -0700 | [diff] [blame] | 1555 | 		ret = update_rmtp(&t.timer, rmtp); | 
| Oleg Nesterov | 080344b | 2008-02-01 17:29:05 +0300 | [diff] [blame] | 1556 | 		if (ret <= 0) | 
| Thomas Gleixner | 237fc6e | 2008-04-30 00:55:04 -0700 | [diff] [blame] | 1557 | 			goto out; | 
| Roman Zippel | 432569b | 2006-03-26 01:38:08 -0800 | [diff] [blame] | 1558 | 	} | 
| Thomas Gleixner | 10c94ec | 2006-01-09 20:52:35 -0800 | [diff] [blame] | 1559 |  | 
 | 1560 | 	restart = ¤t_thread_info()->restart_block; | 
| Toyo Abe | 1711ef3 | 2006-09-29 02:00:28 -0700 | [diff] [blame] | 1561 | 	restart->fn = hrtimer_nanosleep_restart; | 
| Thomas Gleixner | 029a07e | 2008-02-10 09:17:43 +0100 | [diff] [blame] | 1562 | 	restart->nanosleep.index = t.timer.base->index; | 
 | 1563 | 	restart->nanosleep.rmtp = rmtp; | 
| Arjan van de Ven | cc584b2 | 2008-09-01 15:02:30 -0700 | [diff] [blame] | 1564 | 	restart->nanosleep.expires = hrtimer_get_expires_tv64(&t.timer); | 
| Thomas Gleixner | 10c94ec | 2006-01-09 20:52:35 -0800 | [diff] [blame] | 1565 |  | 
| Thomas Gleixner | 237fc6e | 2008-04-30 00:55:04 -0700 | [diff] [blame] | 1566 | 	ret = -ERESTART_RESTARTBLOCK; | 
 | 1567 | out: | 
 | 1568 | 	destroy_hrtimer_on_stack(&t.timer); | 
 | 1569 | 	return ret; | 
| Thomas Gleixner | 10c94ec | 2006-01-09 20:52:35 -0800 | [diff] [blame] | 1570 | } | 
 | 1571 |  | 
| Heiko Carstens | 58fd3aa | 2009-01-14 14:14:03 +0100 | [diff] [blame] | 1572 | SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp, | 
 | 1573 | 		struct timespec __user *, rmtp) | 
| Thomas Gleixner | 6ba1b91 | 2006-01-09 20:52:36 -0800 | [diff] [blame] | 1574 | { | 
| Oleg Nesterov | 080344b | 2008-02-01 17:29:05 +0300 | [diff] [blame] | 1575 | 	struct timespec tu; | 
| Thomas Gleixner | 6ba1b91 | 2006-01-09 20:52:36 -0800 | [diff] [blame] | 1576 |  | 
 | 1577 | 	if (copy_from_user(&tu, rqtp, sizeof(tu))) | 
 | 1578 | 		return -EFAULT; | 
 | 1579 |  | 
 | 1580 | 	if (!timespec_valid(&tu)) | 
 | 1581 | 		return -EINVAL; | 
 | 1582 |  | 
| Oleg Nesterov | 080344b | 2008-02-01 17:29:05 +0300 | [diff] [blame] | 1583 | 	return hrtimer_nanosleep(&tu, rmtp, HRTIMER_MODE_REL, CLOCK_MONOTONIC); | 
| Thomas Gleixner | 6ba1b91 | 2006-01-09 20:52:36 -0800 | [diff] [blame] | 1584 | } | 
 | 1585 |  | 
| Thomas Gleixner | 10c94ec | 2006-01-09 20:52:35 -0800 | [diff] [blame] | 1586 | /* | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1587 |  * Functions related to boot-time initialization: | 
 | 1588 |  */ | 
| Randy Dunlap | 0ec160d | 2008-01-21 17:18:24 -0800 | [diff] [blame] | 1589 | static void __cpuinit init_hrtimers_cpu(int cpu) | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1590 | { | 
| Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 1591 | 	struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu); | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1592 | 	int i; | 
 | 1593 |  | 
| Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 1594 | 	spin_lock_init(&cpu_base->lock); | 
| Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 1595 |  | 
 | 1596 | 	for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) | 
 | 1597 | 		cpu_base->clock_base[i].cpu_base = cpu_base; | 
 | 1598 |  | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 1599 | 	hrtimer_init_hres(cpu_base); | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1600 | } | 
 | 1601 |  | 
 | 1602 | #ifdef CONFIG_HOTPLUG_CPU | 
 | 1603 |  | 
| Peter Zijlstra | ca10949 | 2008-11-25 12:43:51 +0100 | [diff] [blame] | 1604 | static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base, | 
| Peter Zijlstra | 3781065 | 2008-12-04 11:17:10 +0100 | [diff] [blame] | 1605 | 				struct hrtimer_clock_base *new_base) | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1606 | { | 
 | 1607 | 	struct hrtimer *timer; | 
 | 1608 | 	struct rb_node *node; | 
 | 1609 |  | 
 | 1610 | 	while ((node = rb_first(&old_base->active))) { | 
 | 1611 | 		timer = rb_entry(node, struct hrtimer, node); | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 1612 | 		BUG_ON(hrtimer_callback_running(timer)); | 
| Xiao Guangrong | c6a2a17 | 2009-08-10 10:51:23 +0800 | [diff] [blame] | 1613 | 		debug_deactivate(timer); | 
| Thomas Gleixner | b00c1a9 | 2008-09-29 15:44:46 +0200 | [diff] [blame] | 1614 |  | 
 | 1615 | 		/* | 
 | 1616 | 		 * Mark it as STATE_MIGRATE not INACTIVE otherwise the | 
 | 1617 | 		 * timer could be seen as !active and just vanish away | 
 | 1618 | 		 * under us on another CPU | 
 | 1619 | 		 */ | 
 | 1620 | 		__remove_hrtimer(timer, old_base, HRTIMER_STATE_MIGRATE, 0); | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1621 | 		timer->base = new_base; | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 1622 | 		/* | 
| Thomas Gleixner | e3f1d88 | 2009-01-05 11:28:23 +0100 | [diff] [blame] | 1623 | 		 * Enqueue the timers on the new cpu. This does not | 
 | 1624 | 		 * reprogram the event device in case the timer | 
 | 1625 | 		 * expires before the earliest on this CPU, but we run | 
 | 1626 | 		 * hrtimer_interrupt after we migrated everything to | 
 | 1627 | 		 * sort out already expired timers and reprogram the | 
 | 1628 | 		 * event device. | 
| Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 1629 | 		 */ | 
| Peter Zijlstra | a6037b6 | 2009-01-05 11:28:22 +0100 | [diff] [blame] | 1630 | 		enqueue_hrtimer(timer, new_base); | 
| Thomas Gleixner | 41e1022 | 2008-09-29 14:09:39 +0200 | [diff] [blame] | 1631 |  | 
| Thomas Gleixner | b00c1a9 | 2008-09-29 15:44:46 +0200 | [diff] [blame] | 1632 | 		/* Clear the migration state bit */ | 
 | 1633 | 		timer->state &= ~HRTIMER_STATE_MIGRATE; | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1634 | 	} | 
 | 1635 | } | 
 | 1636 |  | 
| Thomas Gleixner | d5fd43c | 2009-01-05 11:28:20 +0100 | [diff] [blame] | 1637 | static void migrate_hrtimers(int scpu) | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1638 | { | 
| Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 1639 | 	struct hrtimer_cpu_base *old_base, *new_base; | 
| Thomas Gleixner | 731a55b | 2009-01-05 11:28:21 +0100 | [diff] [blame] | 1640 | 	int i; | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1641 |  | 
| Peter Zijlstra | 3781065 | 2008-12-04 11:17:10 +0100 | [diff] [blame] | 1642 | 	BUG_ON(cpu_online(scpu)); | 
| Peter Zijlstra | 3781065 | 2008-12-04 11:17:10 +0100 | [diff] [blame] | 1643 | 	tick_cancel_sched_timer(scpu); | 
| Thomas Gleixner | 731a55b | 2009-01-05 11:28:21 +0100 | [diff] [blame] | 1644 |  | 
 | 1645 | 	local_irq_disable(); | 
 | 1646 | 	old_base = &per_cpu(hrtimer_bases, scpu); | 
 | 1647 | 	new_base = &__get_cpu_var(hrtimer_bases); | 
| Oleg Nesterov | d82f0b0 | 2008-08-20 16:46:04 -0700 | [diff] [blame] | 1648 | 	/* | 
 | 1649 | 	 * The caller is globally serialized and nobody else | 
 | 1650 | 	 * takes two locks at once, deadlock is not possible. | 
 | 1651 | 	 */ | 
| Thomas Gleixner | 731a55b | 2009-01-05 11:28:21 +0100 | [diff] [blame] | 1652 | 	spin_lock(&new_base->lock); | 
| Oleg Nesterov | 8e60e05 | 2008-04-04 20:54:10 +0200 | [diff] [blame] | 1653 | 	spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1654 |  | 
| Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 1655 | 	for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { | 
| Peter Zijlstra | ca10949 | 2008-11-25 12:43:51 +0100 | [diff] [blame] | 1656 | 		migrate_hrtimer_list(&old_base->clock_base[i], | 
| Peter Zijlstra | 3781065 | 2008-12-04 11:17:10 +0100 | [diff] [blame] | 1657 | 				     &new_base->clock_base[i]); | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1658 | 	} | 
 | 1659 |  | 
| Oleg Nesterov | 8e60e05 | 2008-04-04 20:54:10 +0200 | [diff] [blame] | 1660 | 	spin_unlock(&old_base->lock); | 
| Thomas Gleixner | 731a55b | 2009-01-05 11:28:21 +0100 | [diff] [blame] | 1661 | 	spin_unlock(&new_base->lock); | 
| Peter Zijlstra | 3781065 | 2008-12-04 11:17:10 +0100 | [diff] [blame] | 1662 |  | 
| Thomas Gleixner | 731a55b | 2009-01-05 11:28:21 +0100 | [diff] [blame] | 1663 | 	/* Check, if we got expired work to do */ | 
 | 1664 | 	__hrtimer_peek_ahead_timers(); | 
 | 1665 | 	local_irq_enable(); | 
| Peter Zijlstra | 3781065 | 2008-12-04 11:17:10 +0100 | [diff] [blame] | 1666 | } | 
 | 1667 |  | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1668 | #endif /* CONFIG_HOTPLUG_CPU */ | 
 | 1669 |  | 
| Chandra Seetharaman | 8c78f30 | 2006-07-30 03:03:35 -0700 | [diff] [blame] | 1670 | static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self, | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1671 | 					unsigned long action, void *hcpu) | 
 | 1672 | { | 
| Ingo Molnar | b2e3c0a | 2008-12-19 00:48:27 +0100 | [diff] [blame] | 1673 | 	int scpu = (long)hcpu; | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1674 |  | 
 | 1675 | 	switch (action) { | 
 | 1676 |  | 
 | 1677 | 	case CPU_UP_PREPARE: | 
| Rafael J. Wysocki | 8bb7844 | 2007-05-09 02:35:10 -0700 | [diff] [blame] | 1678 | 	case CPU_UP_PREPARE_FROZEN: | 
| Peter Zijlstra | 3781065 | 2008-12-04 11:17:10 +0100 | [diff] [blame] | 1679 | 		init_hrtimers_cpu(scpu); | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1680 | 		break; | 
 | 1681 |  | 
 | 1682 | #ifdef CONFIG_HOTPLUG_CPU | 
| Sebastien Dugue | 94df7de | 2008-12-01 14:09:07 +0100 | [diff] [blame] | 1683 | 	case CPU_DYING: | 
 | 1684 | 	case CPU_DYING_FROZEN: | 
 | 1685 | 		clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DYING, &scpu); | 
 | 1686 | 		break; | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1687 | 	case CPU_DEAD: | 
| Rafael J. Wysocki | 8bb7844 | 2007-05-09 02:35:10 -0700 | [diff] [blame] | 1688 | 	case CPU_DEAD_FROZEN: | 
| Ingo Molnar | b2e3c0a | 2008-12-19 00:48:27 +0100 | [diff] [blame] | 1689 | 	{ | 
| Peter Zijlstra | 3781065 | 2008-12-04 11:17:10 +0100 | [diff] [blame] | 1690 | 		clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DEAD, &scpu); | 
| Thomas Gleixner | d5fd43c | 2009-01-05 11:28:20 +0100 | [diff] [blame] | 1691 | 		migrate_hrtimers(scpu); | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1692 | 		break; | 
| Ingo Molnar | b2e3c0a | 2008-12-19 00:48:27 +0100 | [diff] [blame] | 1693 | 	} | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1694 | #endif | 
 | 1695 |  | 
 | 1696 | 	default: | 
 | 1697 | 		break; | 
 | 1698 | 	} | 
 | 1699 |  | 
 | 1700 | 	return NOTIFY_OK; | 
 | 1701 | } | 
 | 1702 |  | 
| Chandra Seetharaman | 8c78f30 | 2006-07-30 03:03:35 -0700 | [diff] [blame] | 1703 | static struct notifier_block __cpuinitdata hrtimers_nb = { | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1704 | 	.notifier_call = hrtimer_cpu_notify, | 
 | 1705 | }; | 
 | 1706 |  | 
 | 1707 | void __init hrtimers_init(void) | 
 | 1708 | { | 
 | 1709 | 	hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE, | 
 | 1710 | 			  (void *)(long)smp_processor_id()); | 
 | 1711 | 	register_cpu_notifier(&hrtimers_nb); | 
| Peter Zijlstra | a6037b6 | 2009-01-05 11:28:22 +0100 | [diff] [blame] | 1712 | #ifdef CONFIG_HIGH_RES_TIMERS | 
 | 1713 | 	open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq); | 
 | 1714 | #endif | 
| Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1715 | } | 
 | 1716 |  | 
| Arjan van de Ven | 7bb6743 | 2008-08-31 08:05:58 -0700 | [diff] [blame] | 1717 | /** | 
| Arjan van de Ven | 654c8e0 | 2008-09-01 15:47:08 -0700 | [diff] [blame] | 1718 |  * schedule_hrtimeout_range - sleep until timeout | 
 | 1719 |  * @expires:	timeout value (ktime_t) | 
 | 1720 |  * @delta:	slack in expires timeout (ktime_t) | 
 | 1721 |  * @mode:	timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL | 
 | 1722 |  * | 
 | 1723 |  * Make the current task sleep until the given expiry time has | 
 | 1724 |  * elapsed. The routine will return immediately unless | 
 | 1725 |  * the current task state has been set (see set_current_state()). | 
 | 1726 |  * | 
 | 1727 |  * The @delta argument gives the kernel the freedom to schedule the | 
 | 1728 |  * actual wakeup to a time that is both power and performance friendly. | 
 | 1729 |  * The kernel give the normal best effort behavior for "@expires+@delta", | 
 | 1730 |  * but may decide to fire the timer earlier, but no earlier than @expires. | 
 | 1731 |  * | 
 | 1732 |  * You can set the task state as follows - | 
 | 1733 |  * | 
 | 1734 |  * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to | 
 | 1735 |  * pass before the routine returns. | 
 | 1736 |  * | 
 | 1737 |  * %TASK_INTERRUPTIBLE - the routine may return early if a signal is | 
 | 1738 |  * delivered to the current task. | 
 | 1739 |  * | 
 | 1740 |  * The current task state is guaranteed to be TASK_RUNNING when this | 
 | 1741 |  * routine returns. | 
 | 1742 |  * | 
 | 1743 |  * Returns 0 when the timer has expired otherwise -EINTR | 
 | 1744 |  */ | 
 | 1745 | int __sched schedule_hrtimeout_range(ktime_t *expires, unsigned long delta, | 
 | 1746 | 			       const enum hrtimer_mode mode) | 
 | 1747 | { | 
 | 1748 | 	struct hrtimer_sleeper t; | 
 | 1749 |  | 
 | 1750 | 	/* | 
 | 1751 | 	 * Optimize when a zero timeout value is given. It does not | 
 | 1752 | 	 * matter whether this is an absolute or a relative time. | 
 | 1753 | 	 */ | 
 | 1754 | 	if (expires && !expires->tv64) { | 
 | 1755 | 		__set_current_state(TASK_RUNNING); | 
 | 1756 | 		return 0; | 
 | 1757 | 	} | 
 | 1758 |  | 
 | 1759 | 	/* | 
 | 1760 | 	 * A NULL parameter means "inifinte" | 
 | 1761 | 	 */ | 
 | 1762 | 	if (!expires) { | 
 | 1763 | 		schedule(); | 
 | 1764 | 		__set_current_state(TASK_RUNNING); | 
 | 1765 | 		return -EINTR; | 
 | 1766 | 	} | 
 | 1767 |  | 
 | 1768 | 	hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC, mode); | 
 | 1769 | 	hrtimer_set_expires_range_ns(&t.timer, *expires, delta); | 
 | 1770 |  | 
 | 1771 | 	hrtimer_init_sleeper(&t, current); | 
 | 1772 |  | 
 | 1773 | 	hrtimer_start_expires(&t.timer, mode); | 
 | 1774 | 	if (!hrtimer_active(&t.timer)) | 
 | 1775 | 		t.task = NULL; | 
 | 1776 |  | 
 | 1777 | 	if (likely(t.task)) | 
 | 1778 | 		schedule(); | 
 | 1779 |  | 
 | 1780 | 	hrtimer_cancel(&t.timer); | 
 | 1781 | 	destroy_hrtimer_on_stack(&t.timer); | 
 | 1782 |  | 
 | 1783 | 	__set_current_state(TASK_RUNNING); | 
 | 1784 |  | 
 | 1785 | 	return !t.task ? 0 : -EINTR; | 
 | 1786 | } | 
 | 1787 | EXPORT_SYMBOL_GPL(schedule_hrtimeout_range); | 
 | 1788 |  | 
 | 1789 | /** | 
| Arjan van de Ven | 7bb6743 | 2008-08-31 08:05:58 -0700 | [diff] [blame] | 1790 |  * schedule_hrtimeout - sleep until timeout | 
 | 1791 |  * @expires:	timeout value (ktime_t) | 
 | 1792 |  * @mode:	timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL | 
 | 1793 |  * | 
 | 1794 |  * Make the current task sleep until the given expiry time has | 
 | 1795 |  * elapsed. The routine will return immediately unless | 
 | 1796 |  * the current task state has been set (see set_current_state()). | 
 | 1797 |  * | 
 | 1798 |  * You can set the task state as follows - | 
 | 1799 |  * | 
 | 1800 |  * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to | 
 | 1801 |  * pass before the routine returns. | 
 | 1802 |  * | 
 | 1803 |  * %TASK_INTERRUPTIBLE - the routine may return early if a signal is | 
 | 1804 |  * delivered to the current task. | 
 | 1805 |  * | 
 | 1806 |  * The current task state is guaranteed to be TASK_RUNNING when this | 
 | 1807 |  * routine returns. | 
 | 1808 |  * | 
 | 1809 |  * Returns 0 when the timer has expired otherwise -EINTR | 
 | 1810 |  */ | 
 | 1811 | int __sched schedule_hrtimeout(ktime_t *expires, | 
 | 1812 | 			       const enum hrtimer_mode mode) | 
 | 1813 | { | 
| Arjan van de Ven | 654c8e0 | 2008-09-01 15:47:08 -0700 | [diff] [blame] | 1814 | 	return schedule_hrtimeout_range(expires, 0, mode); | 
| Arjan van de Ven | 7bb6743 | 2008-08-31 08:05:58 -0700 | [diff] [blame] | 1815 | } | 
 | 1816 | EXPORT_SYMBOL_GPL(schedule_hrtimeout); |