blob: 0d19b32f89e0b8af8ec2573e29c9333cecdcdc59 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/kernel/timer.c
3 *
john stultz85240702007-05-08 00:27:59 -07004 * Kernel internal timers, basic process system calls
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 *
6 * Copyright (C) 1991, 1992 Linus Torvalds
7 *
8 * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better.
9 *
10 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
11 * "A Kernel Model for Precision Timekeeping" by Dave Mills
12 * 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
13 * serialize accesses to xtime/lost_ticks).
14 * Copyright (C) 1998 Andrea Arcangeli
15 * 1999-03-10 Improved NTP compatibility by Ulrich Windl
16 * 2002-05-31 Move sys_sysinfo here and make its locking sane, Robert Love
17 * 2000-10-05 Implemented scalable SMP per-CPU timer handling.
18 * Copyright (C) 2000, 2001, 2002 Ingo Molnar
19 * Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar
20 */
21
22#include <linux/kernel_stat.h>
Paul Gortmaker9984de12011-05-23 14:51:41 -040023#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/interrupt.h>
25#include <linux/percpu.h>
26#include <linux/init.h>
27#include <linux/mm.h>
28#include <linux/swap.h>
Pavel Emelyanovb4888932007-10-18 23:40:14 -070029#include <linux/pid_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030#include <linux/notifier.h>
31#include <linux/thread_info.h>
32#include <linux/time.h>
33#include <linux/jiffies.h>
34#include <linux/posix-timers.h>
35#include <linux/cpu.h>
36#include <linux/syscalls.h>
Adrian Bunk97a41e22006-01-08 01:02:17 -080037#include <linux/delay.h>
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -080038#include <linux/tick.h>
Ingo Molnar82f67cd2007-02-16 01:28:13 -080039#include <linux/kallsyms.h>
Peter Zijlstrae360adb2010-10-14 14:01:34 +080040#include <linux/irq_work.h>
Arun R Bharadwajeea08f32009-04-16 12:16:41 +053041#include <linux/sched.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090042#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
44#include <asm/uaccess.h>
45#include <asm/unistd.h>
46#include <asm/div64.h>
47#include <asm/timex.h>
48#include <asm/io.h>
49
Xiao Guangrong2b022e32009-08-10 10:48:59 +080050#define CREATE_TRACE_POINTS
51#include <trace/events/timer.h>
52
Thomas Gleixnerecea8d12005-10-30 15:03:00 -080053u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
54
55EXPORT_SYMBOL(jiffies_64);
56
Linus Torvalds1da177e2005-04-16 15:20:36 -070057/*
58 * per-CPU timer vector definitions:
59 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070060#define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6)
61#define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8)
62#define TVN_SIZE (1 << TVN_BITS)
63#define TVR_SIZE (1 << TVR_BITS)
64#define TVN_MASK (TVN_SIZE - 1)
65#define TVR_MASK (TVR_SIZE - 1)
Devin Kim62720dc2012-10-29 16:46:33 -070066#define MAX_TVAL ((unsigned long)((1ULL << (TVR_BITS + 4*TVN_BITS)) - 1))
Linus Torvalds1da177e2005-04-16 15:20:36 -070067
Pavel Macheka6fa8e52008-01-30 13:30:00 +010068struct tvec {
Linus Torvalds1da177e2005-04-16 15:20:36 -070069 struct list_head vec[TVN_SIZE];
Pavel Macheka6fa8e52008-01-30 13:30:00 +010070};
Linus Torvalds1da177e2005-04-16 15:20:36 -070071
Pavel Macheka6fa8e52008-01-30 13:30:00 +010072struct tvec_root {
Linus Torvalds1da177e2005-04-16 15:20:36 -070073 struct list_head vec[TVR_SIZE];
Pavel Macheka6fa8e52008-01-30 13:30:00 +010074};
Linus Torvalds1da177e2005-04-16 15:20:36 -070075
Pavel Macheka6fa8e52008-01-30 13:30:00 +010076struct tvec_base {
Oleg Nesterov3691c512006-03-31 02:30:30 -080077 spinlock_t lock;
78 struct timer_list *running_timer;
Linus Torvalds1da177e2005-04-16 15:20:36 -070079 unsigned long timer_jiffies;
Martin Schwidefsky97fd9ed2009-07-21 20:25:05 +020080 unsigned long next_timer;
Pavel Macheka6fa8e52008-01-30 13:30:00 +010081 struct tvec_root tv1;
82 struct tvec tv2;
83 struct tvec tv3;
84 struct tvec tv4;
85 struct tvec tv5;
Venki Pallipadi6e453a62007-05-08 00:27:44 -070086} ____cacheline_aligned;
Linus Torvalds1da177e2005-04-16 15:20:36 -070087
Pavel Macheka6fa8e52008-01-30 13:30:00 +010088struct tvec_base boot_tvec_bases;
Oleg Nesterov3691c512006-03-31 02:30:30 -080089EXPORT_SYMBOL(boot_tvec_bases);
Pavel Macheka6fa8e52008-01-30 13:30:00 +010090static DEFINE_PER_CPU(struct tvec_base *, tvec_bases) = &boot_tvec_bases;
Linus Torvalds1da177e2005-04-16 15:20:36 -070091
Venki Pallipadi6e453a62007-05-08 00:27:44 -070092/* Functions below help us manage 'deferrable' flag */
Pavel Macheka6fa8e52008-01-30 13:30:00 +010093static inline unsigned int tbase_get_deferrable(struct tvec_base *base)
Venki Pallipadi6e453a62007-05-08 00:27:44 -070094{
akpm@linux-foundation.orge9910842007-05-10 03:16:01 -070095 return ((unsigned int)(unsigned long)base & TBASE_DEFERRABLE_FLAG);
Venki Pallipadi6e453a62007-05-08 00:27:44 -070096}
97
Pavel Macheka6fa8e52008-01-30 13:30:00 +010098static inline struct tvec_base *tbase_get_base(struct tvec_base *base)
Venki Pallipadi6e453a62007-05-08 00:27:44 -070099{
Pavel Macheka6fa8e52008-01-30 13:30:00 +0100100 return ((struct tvec_base *)((unsigned long)base & ~TBASE_DEFERRABLE_FLAG));
Venki Pallipadi6e453a62007-05-08 00:27:44 -0700101}
102
103static inline void timer_set_deferrable(struct timer_list *timer)
104{
Phil Carmodydd6414b2010-10-20 15:57:33 -0700105 timer->base = TBASE_MAKE_DEFERRED(timer->base);
Venki Pallipadi6e453a62007-05-08 00:27:44 -0700106}
107
108static inline void
Pavel Macheka6fa8e52008-01-30 13:30:00 +0100109timer_set_base(struct timer_list *timer, struct tvec_base *new_base)
Venki Pallipadi6e453a62007-05-08 00:27:44 -0700110{
Pavel Macheka6fa8e52008-01-30 13:30:00 +0100111 timer->base = (struct tvec_base *)((unsigned long)(new_base) |
Thomas Gleixner68194572007-07-19 01:49:16 -0700112 tbase_get_deferrable(timer->base));
Venki Pallipadi6e453a62007-05-08 00:27:44 -0700113}
114
Alan Stern9c133c42008-11-06 08:42:48 +0100115static unsigned long round_jiffies_common(unsigned long j, int cpu,
116 bool force_up)
117{
118 int rem;
119 unsigned long original = j;
120
121 /*
122 * We don't want all cpus firing their timers at once hitting the
123 * same lock or cachelines, so we skew each extra cpu with an extra
124 * 3 jiffies. This 3 jiffies came originally from the mm/ code which
125 * already did this.
126 * The skew is done by adding 3*cpunr, then round, then subtract this
127 * extra offset again.
128 */
129 j += cpu * 3;
130
131 rem = j % HZ;
132
133 /*
134 * If the target jiffie is just after a whole second (which can happen
135 * due to delays of the timer irq, long irq off times etc etc) then
136 * we should round down to the whole second, not up. Use 1/4th second
137 * as cutoff for this rounding as an extreme upper bound for this.
138 * But never round down if @force_up is set.
139 */
140 if (rem < HZ/4 && !force_up) /* round down */
141 j = j - rem;
142 else /* round up */
143 j = j - rem + HZ;
144
145 /* now that we have rounded, subtract the extra skew again */
146 j -= cpu * 3;
147
Bart Van Assche7b1648b2013-05-21 20:43:50 +0200148 /*
149 * Make sure j is still in the future. Otherwise return the
150 * unmodified value.
151 */
152 return time_is_after_jiffies(j) ? j : original;
Alan Stern9c133c42008-11-06 08:42:48 +0100153}
154
Arjan van de Ven4c36a5d2006-12-10 02:21:24 -0800155/**
156 * __round_jiffies - function to round jiffies to a full second
157 * @j: the time in (absolute) jiffies that should be rounded
158 * @cpu: the processor number on which the timeout will happen
159 *
Robert P. J. Day72fd4a32007-02-10 01:45:59 -0800160 * __round_jiffies() rounds an absolute time in the future (in jiffies)
Arjan van de Ven4c36a5d2006-12-10 02:21:24 -0800161 * up or down to (approximately) full seconds. This is useful for timers
162 * for which the exact time they fire does not matter too much, as long as
163 * they fire approximately every X seconds.
164 *
165 * By rounding these timers to whole seconds, all such timers will fire
166 * at the same time, rather than at various times spread out. The goal
167 * of this is to have the CPU wake up less, which saves power.
168 *
169 * The exact rounding is skewed for each processor to avoid all
170 * processors firing at the exact same time, which could lead
171 * to lock contention or spurious cache line bouncing.
172 *
Robert P. J. Day72fd4a32007-02-10 01:45:59 -0800173 * The return value is the rounded version of the @j parameter.
Arjan van de Ven4c36a5d2006-12-10 02:21:24 -0800174 */
175unsigned long __round_jiffies(unsigned long j, int cpu)
176{
Alan Stern9c133c42008-11-06 08:42:48 +0100177 return round_jiffies_common(j, cpu, false);
Arjan van de Ven4c36a5d2006-12-10 02:21:24 -0800178}
179EXPORT_SYMBOL_GPL(__round_jiffies);
180
181/**
182 * __round_jiffies_relative - function to round jiffies to a full second
183 * @j: the time in (relative) jiffies that should be rounded
184 * @cpu: the processor number on which the timeout will happen
185 *
Robert P. J. Day72fd4a32007-02-10 01:45:59 -0800186 * __round_jiffies_relative() rounds a time delta in the future (in jiffies)
Arjan van de Ven4c36a5d2006-12-10 02:21:24 -0800187 * up or down to (approximately) full seconds. This is useful for timers
188 * for which the exact time they fire does not matter too much, as long as
189 * they fire approximately every X seconds.
190 *
191 * By rounding these timers to whole seconds, all such timers will fire
192 * at the same time, rather than at various times spread out. The goal
193 * of this is to have the CPU wake up less, which saves power.
194 *
195 * The exact rounding is skewed for each processor to avoid all
196 * processors firing at the exact same time, which could lead
197 * to lock contention or spurious cache line bouncing.
198 *
Robert P. J. Day72fd4a32007-02-10 01:45:59 -0800199 * The return value is the rounded version of the @j parameter.
Arjan van de Ven4c36a5d2006-12-10 02:21:24 -0800200 */
201unsigned long __round_jiffies_relative(unsigned long j, int cpu)
202{
Alan Stern9c133c42008-11-06 08:42:48 +0100203 unsigned long j0 = jiffies;
204
205 /* Use j0 because jiffies might change while we run */
206 return round_jiffies_common(j + j0, cpu, false) - j0;
Arjan van de Ven4c36a5d2006-12-10 02:21:24 -0800207}
208EXPORT_SYMBOL_GPL(__round_jiffies_relative);
209
210/**
211 * round_jiffies - function to round jiffies to a full second
212 * @j: the time in (absolute) jiffies that should be rounded
213 *
Robert P. J. Day72fd4a32007-02-10 01:45:59 -0800214 * round_jiffies() rounds an absolute time in the future (in jiffies)
Arjan van de Ven4c36a5d2006-12-10 02:21:24 -0800215 * up or down to (approximately) full seconds. This is useful for timers
216 * for which the exact time they fire does not matter too much, as long as
217 * they fire approximately every X seconds.
218 *
219 * By rounding these timers to whole seconds, all such timers will fire
220 * at the same time, rather than at various times spread out. The goal
221 * of this is to have the CPU wake up less, which saves power.
222 *
Robert P. J. Day72fd4a32007-02-10 01:45:59 -0800223 * The return value is the rounded version of the @j parameter.
Arjan van de Ven4c36a5d2006-12-10 02:21:24 -0800224 */
225unsigned long round_jiffies(unsigned long j)
226{
Alan Stern9c133c42008-11-06 08:42:48 +0100227 return round_jiffies_common(j, raw_smp_processor_id(), false);
Arjan van de Ven4c36a5d2006-12-10 02:21:24 -0800228}
229EXPORT_SYMBOL_GPL(round_jiffies);
230
231/**
232 * round_jiffies_relative - function to round jiffies to a full second
233 * @j: the time in (relative) jiffies that should be rounded
234 *
Robert P. J. Day72fd4a32007-02-10 01:45:59 -0800235 * round_jiffies_relative() rounds a time delta in the future (in jiffies)
Arjan van de Ven4c36a5d2006-12-10 02:21:24 -0800236 * up or down to (approximately) full seconds. This is useful for timers
237 * for which the exact time they fire does not matter too much, as long as
238 * they fire approximately every X seconds.
239 *
240 * By rounding these timers to whole seconds, all such timers will fire
241 * at the same time, rather than at various times spread out. The goal
242 * of this is to have the CPU wake up less, which saves power.
243 *
Robert P. J. Day72fd4a32007-02-10 01:45:59 -0800244 * The return value is the rounded version of the @j parameter.
Arjan van de Ven4c36a5d2006-12-10 02:21:24 -0800245 */
246unsigned long round_jiffies_relative(unsigned long j)
247{
248 return __round_jiffies_relative(j, raw_smp_processor_id());
249}
250EXPORT_SYMBOL_GPL(round_jiffies_relative);
251
Alan Stern9c133c42008-11-06 08:42:48 +0100252/**
253 * __round_jiffies_up - function to round jiffies up to a full second
254 * @j: the time in (absolute) jiffies that should be rounded
255 * @cpu: the processor number on which the timeout will happen
256 *
257 * This is the same as __round_jiffies() except that it will never
258 * round down. This is useful for timeouts for which the exact time
259 * of firing does not matter too much, as long as they don't fire too
260 * early.
261 */
262unsigned long __round_jiffies_up(unsigned long j, int cpu)
263{
264 return round_jiffies_common(j, cpu, true);
265}
266EXPORT_SYMBOL_GPL(__round_jiffies_up);
267
268/**
269 * __round_jiffies_up_relative - function to round jiffies up to a full second
270 * @j: the time in (relative) jiffies that should be rounded
271 * @cpu: the processor number on which the timeout will happen
272 *
273 * This is the same as __round_jiffies_relative() except that it will never
274 * round down. This is useful for timeouts for which the exact time
275 * of firing does not matter too much, as long as they don't fire too
276 * early.
277 */
278unsigned long __round_jiffies_up_relative(unsigned long j, int cpu)
279{
280 unsigned long j0 = jiffies;
281
282 /* Use j0 because jiffies might change while we run */
283 return round_jiffies_common(j + j0, cpu, true) - j0;
284}
285EXPORT_SYMBOL_GPL(__round_jiffies_up_relative);
286
287/**
288 * round_jiffies_up - function to round jiffies up to a full second
289 * @j: the time in (absolute) jiffies that should be rounded
290 *
291 * This is the same as round_jiffies() except that it will never
292 * round down. This is useful for timeouts for which the exact time
293 * of firing does not matter too much, as long as they don't fire too
294 * early.
295 */
296unsigned long round_jiffies_up(unsigned long j)
297{
298 return round_jiffies_common(j, raw_smp_processor_id(), true);
299}
300EXPORT_SYMBOL_GPL(round_jiffies_up);
301
302/**
303 * round_jiffies_up_relative - function to round jiffies up to a full second
304 * @j: the time in (relative) jiffies that should be rounded
305 *
306 * This is the same as round_jiffies_relative() except that it will never
307 * round down. This is useful for timeouts for which the exact time
308 * of firing does not matter too much, as long as they don't fire too
309 * early.
310 */
311unsigned long round_jiffies_up_relative(unsigned long j)
312{
313 return __round_jiffies_up_relative(j, raw_smp_processor_id());
314}
315EXPORT_SYMBOL_GPL(round_jiffies_up_relative);
316
Arjan van de Ven3bbb9ec2010-03-11 14:04:36 -0800317/**
318 * set_timer_slack - set the allowed slack for a timer
Randy Dunlap0caa6212010-08-09 16:32:50 -0700319 * @timer: the timer to be modified
Arjan van de Ven3bbb9ec2010-03-11 14:04:36 -0800320 * @slack_hz: the amount of time (in jiffies) allowed for rounding
321 *
322 * Set the amount of time, in jiffies, that a certain timer has
323 * in terms of slack. By setting this value, the timer subsystem
324 * will schedule the actual timer somewhere between
325 * the time mod_timer() asks for, and that time plus the slack.
326 *
327 * By setting the slack to -1, a percentage of the delay is used
328 * instead.
329 */
330void set_timer_slack(struct timer_list *timer, int slack_hz)
331{
332 timer->slack = slack_hz;
333}
334EXPORT_SYMBOL_GPL(set_timer_slack);
335
Pavel Macheka6fa8e52008-01-30 13:30:00 +0100336static void internal_add_timer(struct tvec_base *base, struct timer_list *timer)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337{
338 unsigned long expires = timer->expires;
339 unsigned long idx = expires - base->timer_jiffies;
340 struct list_head *vec;
341
342 if (idx < TVR_SIZE) {
343 int i = expires & TVR_MASK;
344 vec = base->tv1.vec + i;
345 } else if (idx < 1 << (TVR_BITS + TVN_BITS)) {
346 int i = (expires >> TVR_BITS) & TVN_MASK;
347 vec = base->tv2.vec + i;
348 } else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) {
349 int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK;
350 vec = base->tv3.vec + i;
351 } else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) {
352 int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK;
353 vec = base->tv4.vec + i;
354 } else if ((signed long) idx < 0) {
355 /*
356 * Can happen if you add a timer with expires == jiffies,
357 * or you set a timer to go off in the past
358 */
359 vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK);
360 } else {
361 int i;
Devin Kim62720dc2012-10-29 16:46:33 -0700362 /* If the timeout is larger than MAX_TVAL (on 64-bit
363 * architectures or with CONFIG_BASE_SMALL=1) then we
364 * use the maximum timeout.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365 */
Devin Kim62720dc2012-10-29 16:46:33 -0700366 if (idx > MAX_TVAL) {
367 idx = MAX_TVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368 expires = idx + base->timer_jiffies;
369 }
370 i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
371 vec = base->tv5.vec + i;
372 }
373 /*
374 * Timers are FIFO:
375 */
376 list_add_tail(&timer->entry, vec);
377}
378
Ingo Molnar82f67cd2007-02-16 01:28:13 -0800379#ifdef CONFIG_TIMER_STATS
380void __timer_stats_timer_set_start_info(struct timer_list *timer, void *addr)
381{
382 if (timer->start_site)
383 return;
384
385 timer->start_site = addr;
386 memcpy(timer->start_comm, current->comm, TASK_COMM_LEN);
387 timer->start_pid = current->pid;
388}
Venki Pallipadic5c061b82007-07-15 23:40:30 -0700389
390static void timer_stats_account_timer(struct timer_list *timer)
391{
392 unsigned int flag = 0;
393
Heiko Carstens507e1232009-06-23 17:38:15 +0200394 if (likely(!timer->start_site))
395 return;
Venki Pallipadic5c061b82007-07-15 23:40:30 -0700396 if (unlikely(tbase_get_deferrable(timer->base)))
397 flag |= TIMER_STATS_FLAG_DEFERRABLE;
398
399 timer_stats_update_stats(timer, timer->start_pid, timer->start_site,
400 timer->function, timer->start_comm, flag);
401}
402
403#else
404static void timer_stats_account_timer(struct timer_list *timer) {}
Ingo Molnar82f67cd2007-02-16 01:28:13 -0800405#endif
406
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700407#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
408
409static struct debug_obj_descr timer_debug_descr;
410
Stanislaw Gruszka99777282011-03-07 09:58:33 +0100411static void *timer_debug_hint(void *addr)
412{
413 return ((struct timer_list *) addr)->function;
414}
415
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700416/*
417 * fixup_init is called when:
418 * - an active object is initialized
419 */
420static int timer_fixup_init(void *addr, enum debug_obj_state state)
421{
422 struct timer_list *timer = addr;
423
424 switch (state) {
425 case ODEBUG_STATE_ACTIVE:
426 del_timer_sync(timer);
427 debug_object_init(timer, &timer_debug_descr);
428 return 1;
429 default:
430 return 0;
431 }
432}
433
Stephen Boydfb16b8c2011-11-07 19:48:26 -0800434/* Stub timer callback for improperly used timers. */
435static void stub_timer(unsigned long data)
436{
437 WARN_ON(1);
438}
439
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700440/*
441 * fixup_activate is called when:
442 * - an active object is activated
443 * - an unknown object is activated (might be a statically initialized object)
444 */
445static int timer_fixup_activate(void *addr, enum debug_obj_state state)
446{
447 struct timer_list *timer = addr;
448
449 switch (state) {
450
451 case ODEBUG_STATE_NOTAVAILABLE:
452 /*
453 * This is not really a fixup. The timer was
454 * statically initialized. We just make sure that it
455 * is tracked in the object tracker.
456 */
457 if (timer->entry.next == NULL &&
458 timer->entry.prev == TIMER_ENTRY_STATIC) {
459 debug_object_init(timer, &timer_debug_descr);
460 debug_object_activate(timer, &timer_debug_descr);
461 return 0;
462 } else {
Stephen Boydfb16b8c2011-11-07 19:48:26 -0800463 setup_timer(timer, stub_timer, 0);
464 return 1;
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700465 }
466 return 0;
467
468 case ODEBUG_STATE_ACTIVE:
469 WARN_ON(1);
470
471 default:
472 return 0;
473 }
474}
475
476/*
477 * fixup_free is called when:
478 * - an active object is freed
479 */
480static int timer_fixup_free(void *addr, enum debug_obj_state state)
481{
482 struct timer_list *timer = addr;
483
484 switch (state) {
485 case ODEBUG_STATE_ACTIVE:
486 del_timer_sync(timer);
487 debug_object_free(timer, &timer_debug_descr);
488 return 1;
489 default:
490 return 0;
491 }
492}
493
Christine Chandc4218b2011-11-07 19:48:28 -0800494/*
495 * fixup_assert_init is called when:
496 * - an untracked/uninit-ed object is found
497 */
498static int timer_fixup_assert_init(void *addr, enum debug_obj_state state)
499{
500 struct timer_list *timer = addr;
501
502 switch (state) {
503 case ODEBUG_STATE_NOTAVAILABLE:
504 if (timer->entry.prev == TIMER_ENTRY_STATIC) {
505 /*
506 * This is not really a fixup. The timer was
507 * statically initialized. We just make sure that it
508 * is tracked in the object tracker.
509 */
510 debug_object_init(timer, &timer_debug_descr);
511 return 0;
512 } else {
513 setup_timer(timer, stub_timer, 0);
514 return 1;
515 }
516 default:
517 return 0;
518 }
519}
520
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700521static struct debug_obj_descr timer_debug_descr = {
Christine Chandc4218b2011-11-07 19:48:28 -0800522 .name = "timer_list",
523 .debug_hint = timer_debug_hint,
524 .fixup_init = timer_fixup_init,
525 .fixup_activate = timer_fixup_activate,
526 .fixup_free = timer_fixup_free,
527 .fixup_assert_init = timer_fixup_assert_init,
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700528};
529
530static inline void debug_timer_init(struct timer_list *timer)
531{
532 debug_object_init(timer, &timer_debug_descr);
533}
534
535static inline void debug_timer_activate(struct timer_list *timer)
536{
537 debug_object_activate(timer, &timer_debug_descr);
538}
539
540static inline void debug_timer_deactivate(struct timer_list *timer)
541{
542 debug_object_deactivate(timer, &timer_debug_descr);
543}
544
545static inline void debug_timer_free(struct timer_list *timer)
546{
547 debug_object_free(timer, &timer_debug_descr);
548}
549
Christine Chandc4218b2011-11-07 19:48:28 -0800550static inline void debug_timer_assert_init(struct timer_list *timer)
551{
552 debug_object_assert_init(timer, &timer_debug_descr);
553}
554
Johannes Berg6f2b9b92009-01-29 16:03:20 +0100555static void __init_timer(struct timer_list *timer,
556 const char *name,
557 struct lock_class_key *key);
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700558
Johannes Berg6f2b9b92009-01-29 16:03:20 +0100559void init_timer_on_stack_key(struct timer_list *timer,
560 const char *name,
561 struct lock_class_key *key)
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700562{
563 debug_object_init_on_stack(timer, &timer_debug_descr);
Johannes Berg6f2b9b92009-01-29 16:03:20 +0100564 __init_timer(timer, name, key);
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700565}
Johannes Berg6f2b9b92009-01-29 16:03:20 +0100566EXPORT_SYMBOL_GPL(init_timer_on_stack_key);
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700567
568void destroy_timer_on_stack(struct timer_list *timer)
569{
570 debug_object_free(timer, &timer_debug_descr);
571}
572EXPORT_SYMBOL_GPL(destroy_timer_on_stack);
573
574#else
575static inline void debug_timer_init(struct timer_list *timer) { }
576static inline void debug_timer_activate(struct timer_list *timer) { }
577static inline void debug_timer_deactivate(struct timer_list *timer) { }
Christine Chandc4218b2011-11-07 19:48:28 -0800578static inline void debug_timer_assert_init(struct timer_list *timer) { }
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700579#endif
580
Xiao Guangrong2b022e32009-08-10 10:48:59 +0800581static inline void debug_init(struct timer_list *timer)
582{
583 debug_timer_init(timer);
584 trace_timer_init(timer);
585}
586
587static inline void
588debug_activate(struct timer_list *timer, unsigned long expires)
589{
590 debug_timer_activate(timer);
Badhri Jagan Sridharana2cd6ea2012-07-18 15:46:01 -0700591 trace_timer_start(timer, expires,
592 tbase_get_deferrable(timer->base) > 0 ? 'y' : 'n');
Xiao Guangrong2b022e32009-08-10 10:48:59 +0800593}
594
595static inline void debug_deactivate(struct timer_list *timer)
596{
597 debug_timer_deactivate(timer);
598 trace_timer_cancel(timer);
599}
600
Christine Chandc4218b2011-11-07 19:48:28 -0800601static inline void debug_assert_init(struct timer_list *timer)
602{
603 debug_timer_assert_init(timer);
604}
605
Johannes Berg6f2b9b92009-01-29 16:03:20 +0100606static void __init_timer(struct timer_list *timer,
607 const char *name,
608 struct lock_class_key *key)
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700609{
610 timer->entry.next = NULL;
611 timer->base = __raw_get_cpu_var(tvec_bases);
Arjan van de Ven3bbb9ec2010-03-11 14:04:36 -0800612 timer->slack = -1;
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700613#ifdef CONFIG_TIMER_STATS
614 timer->start_site = NULL;
615 timer->start_pid = -1;
616 memset(timer->start_comm, 0, TASK_COMM_LEN);
617#endif
Johannes Berg6f2b9b92009-01-29 16:03:20 +0100618 lockdep_init_map(&timer->lockdep_map, name, key, 0);
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700619}
620
Jesse Barnes8cadd2832010-05-10 14:26:20 -0700621void setup_deferrable_timer_on_stack_key(struct timer_list *timer,
622 const char *name,
623 struct lock_class_key *key,
624 void (*function)(unsigned long),
625 unsigned long data)
626{
627 timer->function = function;
628 timer->data = data;
629 init_timer_on_stack_key(timer, name, key);
630 timer_set_deferrable(timer);
631}
632EXPORT_SYMBOL_GPL(setup_deferrable_timer_on_stack_key);
633
Rolf Eike Beer2aae4a12006-09-29 01:59:46 -0700634/**
Randy Dunlap633fe792009-04-01 17:47:23 -0700635 * init_timer_key - initialize a timer
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700636 * @timer: the timer to be initialized
Randy Dunlap633fe792009-04-01 17:47:23 -0700637 * @name: name of the timer
638 * @key: lockdep class key of the fake lock used for tracking timer
639 * sync lock dependencies
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700640 *
Randy Dunlap633fe792009-04-01 17:47:23 -0700641 * init_timer_key() must be done to a timer prior calling *any* of the
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700642 * other timer functions.
643 */
Johannes Berg6f2b9b92009-01-29 16:03:20 +0100644void init_timer_key(struct timer_list *timer,
645 const char *name,
646 struct lock_class_key *key)
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700647{
Xiao Guangrong2b022e32009-08-10 10:48:59 +0800648 debug_init(timer);
Johannes Berg6f2b9b92009-01-29 16:03:20 +0100649 __init_timer(timer, name, key);
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700650}
Johannes Berg6f2b9b92009-01-29 16:03:20 +0100651EXPORT_SYMBOL(init_timer_key);
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700652
Johannes Berg6f2b9b92009-01-29 16:03:20 +0100653void init_timer_deferrable_key(struct timer_list *timer,
654 const char *name,
655 struct lock_class_key *key)
Venki Pallipadi6e453a62007-05-08 00:27:44 -0700656{
Johannes Berg6f2b9b92009-01-29 16:03:20 +0100657 init_timer_key(timer, name, key);
Venki Pallipadi6e453a62007-05-08 00:27:44 -0700658 timer_set_deferrable(timer);
659}
Johannes Berg6f2b9b92009-01-29 16:03:20 +0100660EXPORT_SYMBOL(init_timer_deferrable_key);
Venki Pallipadi6e453a62007-05-08 00:27:44 -0700661
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700662static inline void detach_timer(struct timer_list *timer,
Ingo Molnar82f67cd2007-02-16 01:28:13 -0800663 int clear_pending)
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700664{
665 struct list_head *entry = &timer->entry;
666
Xiao Guangrong2b022e32009-08-10 10:48:59 +0800667 debug_deactivate(timer);
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700668
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700669 __list_del(entry->prev, entry->next);
670 if (clear_pending)
671 entry->next = NULL;
672 entry->prev = LIST_POISON2;
673}
674
675/*
Oleg Nesterov3691c512006-03-31 02:30:30 -0800676 * We are using hashed locking: holding per_cpu(tvec_bases).lock
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700677 * means that all timers which are tied to this base via timer->base are
678 * locked, and the base itself is locked too.
679 *
680 * So __run_timers/migrate_timers can safely modify all timers which could
681 * be found on ->tvX lists.
682 *
683 * When the timer's base is locked, and the timer removed from list, it is
684 * possible to set timer->base = NULL and drop the lock: the timer remains
685 * locked.
686 */
Pavel Macheka6fa8e52008-01-30 13:30:00 +0100687static struct tvec_base *lock_timer_base(struct timer_list *timer,
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700688 unsigned long *flags)
Josh Triplett89e7e3742006-09-29 01:59:36 -0700689 __acquires(timer->base->lock)
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700690{
Pavel Macheka6fa8e52008-01-30 13:30:00 +0100691 struct tvec_base *base;
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700692
693 for (;;) {
Pavel Macheka6fa8e52008-01-30 13:30:00 +0100694 struct tvec_base *prelock_base = timer->base;
Venki Pallipadi6e453a62007-05-08 00:27:44 -0700695 base = tbase_get_base(prelock_base);
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700696 if (likely(base != NULL)) {
697 spin_lock_irqsave(&base->lock, *flags);
Venki Pallipadi6e453a62007-05-08 00:27:44 -0700698 if (likely(prelock_base == timer->base))
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700699 return base;
700 /* The timer has migrated to another CPU */
701 spin_unlock_irqrestore(&base->lock, *flags);
702 }
703 cpu_relax();
704 }
705}
706
Ingo Molnar74019222009-02-18 12:23:29 +0100707static inline int
Arun R Bharadwaj597d0272009-04-16 12:13:26 +0530708__mod_timer(struct timer_list *timer, unsigned long expires,
709 bool pending_only, int pinned)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700710{
Pavel Macheka6fa8e52008-01-30 13:30:00 +0100711 struct tvec_base *base, *new_base;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712 unsigned long flags;
Arun R Bharadwajeea08f32009-04-16 12:16:41 +0530713 int ret = 0 , cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700714
Ingo Molnar82f67cd2007-02-16 01:28:13 -0800715 timer_stats_timer_set_start_info(timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716 BUG_ON(!timer->function);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700718 base = lock_timer_base(timer, &flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700720 if (timer_pending(timer)) {
721 detach_timer(timer, 0);
Martin Schwidefsky97fd9ed2009-07-21 20:25:05 +0200722 if (timer->expires == base->next_timer &&
723 !tbase_get_deferrable(timer->base))
724 base->next_timer = base->timer_jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700725 ret = 1;
Ingo Molnar74019222009-02-18 12:23:29 +0100726 } else {
727 if (pending_only)
728 goto out_unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729 }
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700730
Xiao Guangrong2b022e32009-08-10 10:48:59 +0800731 debug_activate(timer, expires);
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700732
Arun R Bharadwajeea08f32009-04-16 12:16:41 +0530733 cpu = smp_processor_id();
734
735#if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP)
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -0700736 if (!pinned && get_sysctl_timer_migration() && idle_cpu(cpu))
737 cpu = get_nohz_timer_target();
Arun R Bharadwajeea08f32009-04-16 12:16:41 +0530738#endif
739 new_base = per_cpu(tvec_bases, cpu);
740
Oleg Nesterov3691c512006-03-31 02:30:30 -0800741 if (base != new_base) {
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700742 /*
743 * We are trying to schedule the timer on the local CPU.
744 * However we can't change timer's base while it is running,
745 * otherwise del_timer_sync() can't detect that the timer's
746 * handler yet has not finished. This also guarantees that
747 * the timer is serialized wrt itself.
748 */
Oleg Nesterova2c348f2006-03-31 02:30:31 -0800749 if (likely(base->running_timer != timer)) {
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700750 /* See the comment in lock_timer_base() */
Venki Pallipadi6e453a62007-05-08 00:27:44 -0700751 timer_set_base(timer, NULL);
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700752 spin_unlock(&base->lock);
Oleg Nesterova2c348f2006-03-31 02:30:31 -0800753 base = new_base;
754 spin_lock(&base->lock);
Venki Pallipadi6e453a62007-05-08 00:27:44 -0700755 timer_set_base(timer, base);
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700756 }
757 }
758
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759 timer->expires = expires;
Martin Schwidefsky97fd9ed2009-07-21 20:25:05 +0200760 if (time_before(timer->expires, base->next_timer) &&
761 !tbase_get_deferrable(timer->base))
762 base->next_timer = timer->expires;
Oleg Nesterova2c348f2006-03-31 02:30:31 -0800763 internal_add_timer(base, timer);
Ingo Molnar74019222009-02-18 12:23:29 +0100764
765out_unlock:
Oleg Nesterova2c348f2006-03-31 02:30:31 -0800766 spin_unlock_irqrestore(&base->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767
768 return ret;
769}
770
Ingo Molnar74019222009-02-18 12:23:29 +0100771/**
772 * mod_timer_pending - modify a pending timer's timeout
773 * @timer: the pending timer to be modified
774 * @expires: new timeout in jiffies
775 *
776 * mod_timer_pending() is the same for pending timers as mod_timer(),
777 * but will not re-activate and modify already deleted timers.
778 *
779 * It is useful for unserialized use of timers.
780 */
781int mod_timer_pending(struct timer_list *timer, unsigned long expires)
782{
Arun R Bharadwaj597d0272009-04-16 12:13:26 +0530783 return __mod_timer(timer, expires, true, TIMER_NOT_PINNED);
Ingo Molnar74019222009-02-18 12:23:29 +0100784}
785EXPORT_SYMBOL(mod_timer_pending);
786
Arjan van de Ven3bbb9ec2010-03-11 14:04:36 -0800787/*
788 * Decide where to put the timer while taking the slack into account
789 *
790 * Algorithm:
791 * 1) calculate the maximum (absolute) time
792 * 2) calculate the highest bit where the expires and new max are different
793 * 3) use this bit to make a mask
794 * 4) use the bitmask to round down the maximum time, so that all last
795 * bits are zeros
796 */
797static inline
798unsigned long apply_slack(struct timer_list *timer, unsigned long expires)
799{
800 unsigned long expires_limit, mask;
801 int bit;
802
Thomas Gleixner8e63d772010-05-25 20:43:30 +0200803 if (timer->slack >= 0) {
Jeff Chuaf00e0472010-05-24 07:16:24 +0800804 expires_limit = expires + timer->slack;
Thomas Gleixner8e63d772010-05-25 20:43:30 +0200805 } else {
Sebastian Andrzej Siewior1c3cc1162011-05-21 12:58:28 +0200806 long delta = expires - jiffies;
Arjan van de Ven3bbb9ec2010-03-11 14:04:36 -0800807
Sebastian Andrzej Siewior1c3cc1162011-05-21 12:58:28 +0200808 if (delta < 256)
809 return expires;
810
811 expires_limit = expires + delta / 256;
Thomas Gleixner8e63d772010-05-25 20:43:30 +0200812 }
Arjan van de Ven3bbb9ec2010-03-11 14:04:36 -0800813 mask = expires ^ expires_limit;
Arjan van de Ven3bbb9ec2010-03-11 14:04:36 -0800814 if (mask == 0)
815 return expires;
816
Felix Fietkauc731a272012-05-04 21:08:33 -0700817 bit = __fls(mask);
Arjan van de Ven3bbb9ec2010-03-11 14:04:36 -0800818
Jiri Bohac11e69562014-04-18 17:23:11 +0200819 mask = (1UL << bit) - 1;
Arjan van de Ven3bbb9ec2010-03-11 14:04:36 -0800820
821 expires_limit = expires_limit & ~(mask);
822
823 return expires_limit;
824}
825
Ingo Molnar74019222009-02-18 12:23:29 +0100826/**
827 * mod_timer - modify a timer's timeout
828 * @timer: the timer to be modified
829 * @expires: new timeout in jiffies
830 *
831 * mod_timer() is a more efficient way to update the expire field of an
832 * active timer (if the timer is inactive it will be activated)
833 *
834 * mod_timer(timer, expires) is equivalent to:
835 *
836 * del_timer(timer); timer->expires = expires; add_timer(timer);
837 *
838 * Note that if there are multiple unserialized concurrent users of the
839 * same timer, then mod_timer() is the only safe way to modify the timeout,
840 * since add_timer() cannot modify an already running timer.
841 *
842 * The function returns whether it has modified a pending timer or not.
843 * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an
844 * active timer returns 1.)
845 */
846int mod_timer(struct timer_list *timer, unsigned long expires)
847{
Sebastian Andrzej Siewior1c3cc1162011-05-21 12:58:28 +0200848 expires = apply_slack(timer, expires);
849
Ingo Molnar74019222009-02-18 12:23:29 +0100850 /*
851 * This is a common optimization triggered by the
852 * networking code - if the timer is re-modified
853 * to be the same thing then just return:
854 */
Pavel Roskin48411582009-07-18 16:46:02 -0400855 if (timer_pending(timer) && timer->expires == expires)
Ingo Molnar74019222009-02-18 12:23:29 +0100856 return 1;
857
Arun R Bharadwaj597d0272009-04-16 12:13:26 +0530858 return __mod_timer(timer, expires, false, TIMER_NOT_PINNED);
Ingo Molnar74019222009-02-18 12:23:29 +0100859}
860EXPORT_SYMBOL(mod_timer);
861
862/**
Arun R Bharadwaj597d0272009-04-16 12:13:26 +0530863 * mod_timer_pinned - modify a timer's timeout
864 * @timer: the timer to be modified
865 * @expires: new timeout in jiffies
866 *
867 * mod_timer_pinned() is a way to update the expire field of an
868 * active timer (if the timer is inactive it will be activated)
869 * and not allow the timer to be migrated to a different CPU.
870 *
871 * mod_timer_pinned(timer, expires) is equivalent to:
872 *
873 * del_timer(timer); timer->expires = expires; add_timer(timer);
874 */
875int mod_timer_pinned(struct timer_list *timer, unsigned long expires)
876{
877 if (timer->expires == expires && timer_pending(timer))
878 return 1;
879
880 return __mod_timer(timer, expires, false, TIMER_PINNED);
881}
882EXPORT_SYMBOL(mod_timer_pinned);
883
884/**
Ingo Molnar74019222009-02-18 12:23:29 +0100885 * add_timer - start a timer
886 * @timer: the timer to be added
887 *
888 * The kernel will do a ->function(->data) callback from the
889 * timer interrupt at the ->expires point in the future. The
890 * current time is 'jiffies'.
891 *
892 * The timer's ->expires, ->function (and if the handler uses it, ->data)
893 * fields must be set prior calling this function.
894 *
895 * Timers with an ->expires field in the past will be executed in the next
896 * timer tick.
897 */
898void add_timer(struct timer_list *timer)
899{
900 BUG_ON(timer_pending(timer));
901 mod_timer(timer, timer->expires);
902}
903EXPORT_SYMBOL(add_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904
Rolf Eike Beer2aae4a12006-09-29 01:59:46 -0700905/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700906 * add_timer_on - start a timer on a particular CPU
907 * @timer: the timer to be added
908 * @cpu: the CPU to start it on
909 *
910 * This is not very scalable on SMP. Double adds are not possible.
911 */
912void add_timer_on(struct timer_list *timer, int cpu)
913{
Pavel Macheka6fa8e52008-01-30 13:30:00 +0100914 struct tvec_base *base = per_cpu(tvec_bases, cpu);
Thomas Gleixner68194572007-07-19 01:49:16 -0700915 unsigned long flags;
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700916
Ingo Molnar82f67cd2007-02-16 01:28:13 -0800917 timer_stats_timer_set_start_info(timer);
Thomas Gleixner68194572007-07-19 01:49:16 -0700918 BUG_ON(timer_pending(timer) || !timer->function);
Oleg Nesterov3691c512006-03-31 02:30:30 -0800919 spin_lock_irqsave(&base->lock, flags);
Venki Pallipadi6e453a62007-05-08 00:27:44 -0700920 timer_set_base(timer, base);
Xiao Guangrong2b022e32009-08-10 10:48:59 +0800921 debug_activate(timer, timer->expires);
Martin Schwidefsky97fd9ed2009-07-21 20:25:05 +0200922 if (time_before(timer->expires, base->next_timer) &&
923 !tbase_get_deferrable(timer->base))
924 base->next_timer = timer->expires;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700925 internal_add_timer(base, timer);
Thomas Gleixner06d83082008-03-22 09:20:24 +0100926 /*
927 * Check whether the other CPU is idle and needs to be
928 * triggered to reevaluate the timer wheel when nohz is
929 * active. We are protected against the other CPU fiddling
930 * with the timer by holding the timer base lock. This also
931 * makes sure that a CPU on the way to idle can not evaluate
932 * the timer wheel.
933 */
934 wake_up_idle_cpu(cpu);
Oleg Nesterov3691c512006-03-31 02:30:30 -0800935 spin_unlock_irqrestore(&base->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700936}
Andi Kleena9862e02009-05-19 22:49:07 +0200937EXPORT_SYMBOL_GPL(add_timer_on);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700938
Rolf Eike Beer2aae4a12006-09-29 01:59:46 -0700939/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700940 * del_timer - deactive a timer.
941 * @timer: the timer to be deactivated
942 *
943 * del_timer() deactivates a timer - this works on both active and inactive
944 * timers.
945 *
946 * The function returns whether it has deactivated a pending timer or not.
947 * (ie. del_timer() of an inactive timer returns 0, del_timer() of an
948 * active timer returns 1.)
949 */
950int del_timer(struct timer_list *timer)
951{
Pavel Macheka6fa8e52008-01-30 13:30:00 +0100952 struct tvec_base *base;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700953 unsigned long flags;
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700954 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700955
Christine Chandc4218b2011-11-07 19:48:28 -0800956 debug_assert_init(timer);
957
Ingo Molnar82f67cd2007-02-16 01:28:13 -0800958 timer_stats_timer_clear_start_info(timer);
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700959 if (timer_pending(timer)) {
960 base = lock_timer_base(timer, &flags);
961 if (timer_pending(timer)) {
962 detach_timer(timer, 1);
Martin Schwidefsky97fd9ed2009-07-21 20:25:05 +0200963 if (timer->expires == base->next_timer &&
964 !tbase_get_deferrable(timer->base))
965 base->next_timer = base->timer_jiffies;
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700966 ret = 1;
967 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968 spin_unlock_irqrestore(&base->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700969 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700970
Oleg Nesterov55c888d2005-06-23 00:08:56 -0700971 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700973EXPORT_SYMBOL(del_timer);
974
Rolf Eike Beer2aae4a12006-09-29 01:59:46 -0700975/**
976 * try_to_del_timer_sync - Try to deactivate a timer
977 * @timer: timer do del
978 *
Oleg Nesterovfd450b72005-06-23 00:08:59 -0700979 * This function tries to deactivate a timer. Upon successful (ret >= 0)
980 * exit the timer is not queued and the handler is not running on any CPU.
Oleg Nesterovfd450b72005-06-23 00:08:59 -0700981 */
982int try_to_del_timer_sync(struct timer_list *timer)
983{
Pavel Macheka6fa8e52008-01-30 13:30:00 +0100984 struct tvec_base *base;
Oleg Nesterovfd450b72005-06-23 00:08:59 -0700985 unsigned long flags;
986 int ret = -1;
987
Christine Chandc4218b2011-11-07 19:48:28 -0800988 debug_assert_init(timer);
989
Oleg Nesterovfd450b72005-06-23 00:08:59 -0700990 base = lock_timer_base(timer, &flags);
991
992 if (base->running_timer == timer)
993 goto out;
994
Andrew Morton829b6c12010-03-11 14:04:30 -0800995 timer_stats_timer_clear_start_info(timer);
Oleg Nesterovfd450b72005-06-23 00:08:59 -0700996 ret = 0;
997 if (timer_pending(timer)) {
998 detach_timer(timer, 1);
Martin Schwidefsky97fd9ed2009-07-21 20:25:05 +0200999 if (timer->expires == base->next_timer &&
1000 !tbase_get_deferrable(timer->base))
1001 base->next_timer = base->timer_jiffies;
Oleg Nesterovfd450b72005-06-23 00:08:59 -07001002 ret = 1;
1003 }
1004out:
1005 spin_unlock_irqrestore(&base->lock, flags);
1006
1007 return ret;
1008}
David Howellse19dff12007-04-26 15:46:56 -07001009EXPORT_SYMBOL(try_to_del_timer_sync);
1010
Yong Zhang6f1bc452010-10-20 15:57:31 -07001011#ifdef CONFIG_SMP
Rolf Eike Beer2aae4a12006-09-29 01:59:46 -07001012/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001013 * del_timer_sync - deactivate a timer and wait for the handler to finish.
1014 * @timer: the timer to be deactivated
1015 *
1016 * This function only differs from del_timer() on SMP: besides deactivating
1017 * the timer it also makes sure the handler has finished executing on other
1018 * CPUs.
1019 *
Robert P. J. Day72fd4a32007-02-10 01:45:59 -08001020 * Synchronization rules: Callers must prevent restarting of the timer,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001021 * otherwise this function is meaningless. It must not be called from
Peter Zijlstra7ff20792011-02-08 15:18:00 +01001022 * interrupt contexts. The caller must not hold locks which would prevent
Oleg Nesterov55c888d2005-06-23 00:08:56 -07001023 * completion of the timer's handler. The timer's handler must not call
1024 * add_timer_on(). Upon exit the timer is not queued and the handler is
1025 * not running on any CPU.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001026 *
Steven Rostedt48228f72011-02-08 12:39:54 -05001027 * Note: You must not hold locks that are held in interrupt context
1028 * while calling this function. Even if the lock has nothing to do
1029 * with the timer in question. Here's why:
1030 *
1031 * CPU0 CPU1
1032 * ---- ----
1033 * <SOFTIRQ>
1034 * call_timer_fn();
1035 * base->running_timer = mytimer;
1036 * spin_lock_irq(somelock);
1037 * <IRQ>
1038 * spin_lock(somelock);
1039 * del_timer_sync(mytimer);
1040 * while (base->running_timer == mytimer);
1041 *
1042 * Now del_timer_sync() will never return and never release somelock.
1043 * The interrupt on the other CPU is waiting to grab somelock but
1044 * it has interrupted the softirq that CPU0 is waiting to finish.
1045 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07001046 * The function returns whether it has deactivated a pending timer or not.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001047 */
1048int del_timer_sync(struct timer_list *timer)
1049{
Johannes Berg6f2b9b92009-01-29 16:03:20 +01001050#ifdef CONFIG_LOCKDEP
Peter Zijlstraf266a512011-02-03 15:09:41 +01001051 unsigned long flags;
1052
Steven Rostedt48228f72011-02-08 12:39:54 -05001053 /*
1054 * If lockdep gives a backtrace here, please reference
1055 * the synchronization rules above.
1056 */
Peter Zijlstra7ff20792011-02-08 15:18:00 +01001057 local_irq_save(flags);
Johannes Berg6f2b9b92009-01-29 16:03:20 +01001058 lock_map_acquire(&timer->lockdep_map);
1059 lock_map_release(&timer->lockdep_map);
Peter Zijlstra7ff20792011-02-08 15:18:00 +01001060 local_irq_restore(flags);
Johannes Berg6f2b9b92009-01-29 16:03:20 +01001061#endif
Yong Zhang466bd302010-10-20 15:57:33 -07001062 /*
1063 * don't use it in hardirq context, because it
1064 * could lead to deadlock.
1065 */
1066 WARN_ON(in_irq());
Oleg Nesterovfd450b72005-06-23 00:08:59 -07001067 for (;;) {
1068 int ret = try_to_del_timer_sync(timer);
1069 if (ret >= 0)
1070 return ret;
Andrew Mortona0009652006-07-14 00:24:06 -07001071 cpu_relax();
Oleg Nesterovfd450b72005-06-23 00:08:59 -07001072 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001073}
1074EXPORT_SYMBOL(del_timer_sync);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075#endif
1076
Pavel Macheka6fa8e52008-01-30 13:30:00 +01001077static int cascade(struct tvec_base *base, struct tvec *tv, int index)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001078{
1079 /* cascade all the timers from tv up one level */
Porpoise3439dd82006-06-23 02:05:56 -07001080 struct timer_list *timer, *tmp;
1081 struct list_head tv_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001082
Porpoise3439dd82006-06-23 02:05:56 -07001083 list_replace_init(tv->vec + index, &tv_list);
1084
Linus Torvalds1da177e2005-04-16 15:20:36 -07001085 /*
Porpoise3439dd82006-06-23 02:05:56 -07001086 * We are removing _all_ timers from the list, so we
1087 * don't have to detach them individually.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001088 */
Porpoise3439dd82006-06-23 02:05:56 -07001089 list_for_each_entry_safe(timer, tmp, &tv_list, entry) {
Venki Pallipadi6e453a62007-05-08 00:27:44 -07001090 BUG_ON(tbase_get_base(timer->base) != base);
Porpoise3439dd82006-06-23 02:05:56 -07001091 internal_add_timer(base, timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001092 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001093
1094 return index;
1095}
1096
Thomas Gleixner576da122010-03-12 21:10:29 +01001097static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long),
1098 unsigned long data)
1099{
1100 int preempt_count = preempt_count();
1101
1102#ifdef CONFIG_LOCKDEP
1103 /*
1104 * It is permissible to free the timer from inside the
1105 * function that is called from it, this we need to take into
1106 * account for lockdep too. To avoid bogus "held lock freed"
1107 * warnings as well as problems when looking into
1108 * timer->lockdep_map, make a copy and use that here.
1109 */
1110 struct lockdep_map lockdep_map = timer->lockdep_map;
1111#endif
1112 /*
1113 * Couple the lock chain with the lock chain at
1114 * del_timer_sync() by acquiring the lock_map around the fn()
1115 * call here and in del_timer_sync().
1116 */
1117 lock_map_acquire(&lockdep_map);
1118
1119 trace_timer_expire_entry(timer);
1120 fn(data);
1121 trace_timer_expire_exit(timer);
1122
1123 lock_map_release(&lockdep_map);
1124
1125 if (preempt_count != preempt_count()) {
Thomas Gleixner802702e2010-03-12 20:13:23 +01001126 WARN_ONCE(1, "timer: %pF preempt leak: %08x -> %08x\n",
1127 fn, preempt_count, preempt_count());
1128 /*
1129 * Restore the preempt count. That gives us a decent
1130 * chance to survive and extract information. If the
1131 * callback kept a lock held, bad luck, but not worse
1132 * than the BUG() we had.
1133 */
1134 preempt_count() = preempt_count;
Thomas Gleixner576da122010-03-12 21:10:29 +01001135 }
1136}
1137
Rolf Eike Beer2aae4a12006-09-29 01:59:46 -07001138#define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK)
1139
1140/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001141 * __run_timers - run all expired timers (if any) on this CPU.
1142 * @base: the timer vector to be processed.
1143 *
1144 * This function cascades all vectors and executes all expired timer
1145 * vectors.
1146 */
Pavel Macheka6fa8e52008-01-30 13:30:00 +01001147static inline void __run_timers(struct tvec_base *base)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001148{
1149 struct timer_list *timer;
1150
Oleg Nesterov3691c512006-03-31 02:30:30 -08001151 spin_lock_irq(&base->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001152 while (time_after_eq(jiffies, base->timer_jiffies)) {
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07001153 struct list_head work_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001154 struct list_head *head = &work_list;
Thomas Gleixner68194572007-07-19 01:49:16 -07001155 int index = base->timer_jiffies & TVR_MASK;
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07001156
Linus Torvalds1da177e2005-04-16 15:20:36 -07001157 /*
1158 * Cascade timers:
1159 */
1160 if (!index &&
1161 (!cascade(base, &base->tv2, INDEX(0))) &&
1162 (!cascade(base, &base->tv3, INDEX(1))) &&
1163 !cascade(base, &base->tv4, INDEX(2)))
1164 cascade(base, &base->tv5, INDEX(3));
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07001165 ++base->timer_jiffies;
1166 list_replace_init(base->tv1.vec + index, &work_list);
Oleg Nesterov55c888d2005-06-23 00:08:56 -07001167 while (!list_empty(head)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001168 void (*fn)(unsigned long);
1169 unsigned long data;
1170
Pavel Emelianovb5e61812007-05-08 00:30:19 -07001171 timer = list_first_entry(head, struct timer_list,entry);
Thomas Gleixner68194572007-07-19 01:49:16 -07001172 fn = timer->function;
1173 data = timer->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001174
Ingo Molnar82f67cd2007-02-16 01:28:13 -08001175 timer_stats_account_timer(timer);
1176
Yong Zhang6f1bc452010-10-20 15:57:31 -07001177 base->running_timer = timer;
Oleg Nesterov55c888d2005-06-23 00:08:56 -07001178 detach_timer(timer, 1);
Johannes Berg6f2b9b92009-01-29 16:03:20 +01001179
Oleg Nesterov3691c512006-03-31 02:30:30 -08001180 spin_unlock_irq(&base->lock);
Thomas Gleixner576da122010-03-12 21:10:29 +01001181 call_timer_fn(timer, fn, data);
Oleg Nesterov3691c512006-03-31 02:30:30 -08001182 spin_lock_irq(&base->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001183 }
1184 }
Yong Zhang6f1bc452010-10-20 15:57:31 -07001185 base->running_timer = NULL;
Oleg Nesterov3691c512006-03-31 02:30:30 -08001186 spin_unlock_irq(&base->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001187}
1188
Russell Kingee9c5782008-04-20 13:59:33 +01001189#ifdef CONFIG_NO_HZ
Linus Torvalds1da177e2005-04-16 15:20:36 -07001190/*
1191 * Find out when the next timer event is due to happen. This
Randy Dunlap90cba642009-08-25 14:35:41 -07001192 * is used on S/390 to stop all activity when a CPU is idle.
1193 * This function needs to be called with interrupts disabled.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001194 */
Pavel Macheka6fa8e52008-01-30 13:30:00 +01001195static unsigned long __next_timer_interrupt(struct tvec_base *base)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001196{
Thomas Gleixner1cfd6842007-02-16 01:27:46 -08001197 unsigned long timer_jiffies = base->timer_jiffies;
Thomas Gleixnereaad0842007-05-29 23:47:39 +02001198 unsigned long expires = timer_jiffies + NEXT_TIMER_MAX_DELTA;
Thomas Gleixner1cfd6842007-02-16 01:27:46 -08001199 int index, slot, array, found = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001200 struct timer_list *nte;
Pavel Macheka6fa8e52008-01-30 13:30:00 +01001201 struct tvec *varray[4];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001202
1203 /* Look for timer events in tv1. */
Thomas Gleixner1cfd6842007-02-16 01:27:46 -08001204 index = slot = timer_jiffies & TVR_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001205 do {
Thomas Gleixner1cfd6842007-02-16 01:27:46 -08001206 list_for_each_entry(nte, base->tv1.vec + slot, entry) {
Thomas Gleixner68194572007-07-19 01:49:16 -07001207 if (tbase_get_deferrable(nte->base))
1208 continue;
Venki Pallipadi6e453a62007-05-08 00:27:44 -07001209
Thomas Gleixner1cfd6842007-02-16 01:27:46 -08001210 found = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001211 expires = nte->expires;
Thomas Gleixner1cfd6842007-02-16 01:27:46 -08001212 /* Look at the cascade bucket(s)? */
1213 if (!index || slot < index)
1214 goto cascade;
1215 return expires;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001216 }
Thomas Gleixner1cfd6842007-02-16 01:27:46 -08001217 slot = (slot + 1) & TVR_MASK;
1218 } while (slot != index);
1219
1220cascade:
1221 /* Calculate the next cascade event */
1222 if (index)
1223 timer_jiffies += TVR_SIZE - index;
1224 timer_jiffies >>= TVR_BITS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001225
1226 /* Check tv2-tv5. */
1227 varray[0] = &base->tv2;
1228 varray[1] = &base->tv3;
1229 varray[2] = &base->tv4;
1230 varray[3] = &base->tv5;
Thomas Gleixner1cfd6842007-02-16 01:27:46 -08001231
1232 for (array = 0; array < 4; array++) {
Pavel Macheka6fa8e52008-01-30 13:30:00 +01001233 struct tvec *varp = varray[array];
Thomas Gleixner1cfd6842007-02-16 01:27:46 -08001234
1235 index = slot = timer_jiffies & TVN_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001236 do {
Thomas Gleixner1cfd6842007-02-16 01:27:46 -08001237 list_for_each_entry(nte, varp->vec + slot, entry) {
Jon Huntera04198882009-05-01 13:10:23 -07001238 if (tbase_get_deferrable(nte->base))
1239 continue;
1240
Thomas Gleixner1cfd6842007-02-16 01:27:46 -08001241 found = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001242 if (time_before(nte->expires, expires))
1243 expires = nte->expires;
Thomas Gleixner1cfd6842007-02-16 01:27:46 -08001244 }
1245 /*
1246 * Do we still search for the first timer or are
1247 * we looking up the cascade buckets ?
1248 */
1249 if (found) {
1250 /* Look at the cascade bucket(s)? */
1251 if (!index || slot < index)
1252 break;
1253 return expires;
1254 }
1255 slot = (slot + 1) & TVN_MASK;
1256 } while (slot != index);
1257
1258 if (index)
1259 timer_jiffies += TVN_SIZE - index;
1260 timer_jiffies >>= TVN_BITS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001261 }
Thomas Gleixner1cfd6842007-02-16 01:27:46 -08001262 return expires;
1263}
1264
1265/*
1266 * Check, if the next hrtimer event is before the next timer wheel
1267 * event:
1268 */
1269static unsigned long cmp_next_hrtimer_event(unsigned long now,
1270 unsigned long expires)
1271{
1272 ktime_t hr_delta = hrtimer_get_next_event();
1273 struct timespec tsdelta;
Thomas Gleixner9501b6c2007-03-25 14:31:17 +02001274 unsigned long delta;
Thomas Gleixner1cfd6842007-02-16 01:27:46 -08001275
1276 if (hr_delta.tv64 == KTIME_MAX)
1277 return expires;
1278
Thomas Gleixner9501b6c2007-03-25 14:31:17 +02001279 /*
1280 * Expired timer available, let it expire in the next tick
1281 */
1282 if (hr_delta.tv64 <= 0)
1283 return now + 1;
Thomas Gleixner1cfd6842007-02-16 01:27:46 -08001284
1285 tsdelta = ktime_to_timespec(hr_delta);
Thomas Gleixner9501b6c2007-03-25 14:31:17 +02001286 delta = timespec_to_jiffies(&tsdelta);
Thomas Gleixnereaad0842007-05-29 23:47:39 +02001287
1288 /*
1289 * Limit the delta to the max value, which is checked in
1290 * tick_nohz_stop_sched_tick():
1291 */
1292 if (delta > NEXT_TIMER_MAX_DELTA)
1293 delta = NEXT_TIMER_MAX_DELTA;
1294
Thomas Gleixner9501b6c2007-03-25 14:31:17 +02001295 /*
1296 * Take rounding errors in to account and make sure, that it
1297 * expires in the next tick. Otherwise we go into an endless
1298 * ping pong due to tick_nohz_stop_sched_tick() retriggering
1299 * the timer softirq
1300 */
1301 if (delta < 1)
1302 delta = 1;
1303 now += delta;
Thomas Gleixner1cfd6842007-02-16 01:27:46 -08001304 if (time_before(now, expires))
1305 return now;
1306 return expires;
1307}
1308
1309/**
Li Zefan8dce39c2007-11-05 14:51:10 -08001310 * get_next_timer_interrupt - return the jiffy of the next pending timer
Randy Dunlap05fb6bf2007-02-28 20:12:13 -08001311 * @now: current time (in jiffies)
Thomas Gleixner1cfd6842007-02-16 01:27:46 -08001312 */
Thomas Gleixnerfd064b92007-02-16 01:27:47 -08001313unsigned long get_next_timer_interrupt(unsigned long now)
Thomas Gleixner1cfd6842007-02-16 01:27:46 -08001314{
Christoph Lameter74963512010-11-30 14:05:53 -06001315 struct tvec_base *base = __this_cpu_read(tvec_bases);
Thomas Gleixnerfd064b92007-02-16 01:27:47 -08001316 unsigned long expires;
Thomas Gleixner1cfd6842007-02-16 01:27:46 -08001317
Heiko Carstensdbd87b52010-12-01 10:11:09 +01001318 /*
1319 * Pretend that there is no timer pending if the cpu is offline.
1320 * Possible pending timers will be migrated later to an active cpu.
1321 */
1322 if (cpu_is_offline(smp_processor_id()))
1323 return now + NEXT_TIMER_MAX_DELTA;
Thomas Gleixner1cfd6842007-02-16 01:27:46 -08001324 spin_lock(&base->lock);
Martin Schwidefsky97fd9ed2009-07-21 20:25:05 +02001325 if (time_before_eq(base->next_timer, base->timer_jiffies))
1326 base->next_timer = __next_timer_interrupt(base);
1327 expires = base->next_timer;
Oleg Nesterov3691c512006-03-31 02:30:30 -08001328 spin_unlock(&base->lock);
Tony Lindgren69239742006-03-06 15:42:45 -08001329
Thomas Gleixner1cfd6842007-02-16 01:27:46 -08001330 if (time_before_eq(expires, now))
1331 return now;
Zachary Amsden0662b712006-05-20 15:00:24 -07001332
Thomas Gleixner1cfd6842007-02-16 01:27:46 -08001333 return cmp_next_hrtimer_event(now, expires);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001334}
1335#endif
1336
Linus Torvalds1da177e2005-04-16 15:20:36 -07001337/*
Daniel Walker5b4db0c2007-10-18 03:06:11 -07001338 * Called from the timer interrupt handler to charge one tick to the current
Linus Torvalds1da177e2005-04-16 15:20:36 -07001339 * process. user_tick is 1 if the tick is user time, 0 for system.
1340 */
1341void update_process_times(int user_tick)
1342{
1343 struct task_struct *p = current;
1344 int cpu = smp_processor_id();
1345
1346 /* Note: this timer irq context must be accounted for as well. */
Paul Mackerrasfa13a5a2007-11-09 22:39:38 +01001347 account_process_tick(p, user_tick);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001348 run_local_timers();
Paul E. McKenneya1572292009-08-22 13:56:51 -07001349 rcu_check_callbacks(cpu, user_tick);
Peter Zijlstrab845b512008-08-08 21:47:09 +02001350 printk_tick();
Peter Zijlstrae360adb2010-10-14 14:01:34 +08001351#ifdef CONFIG_IRQ_WORK
1352 if (in_irq())
1353 irq_work_run();
1354#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001355 scheduler_tick();
Thomas Gleixner68194572007-07-19 01:49:16 -07001356 run_posix_cpu_timers(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001357}
1358
1359/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001360 * This function runs timers and the timer-tq in bottom half context.
1361 */
1362static void run_timer_softirq(struct softirq_action *h)
1363{
Christoph Lameter74963512010-11-30 14:05:53 -06001364 struct tvec_base *base = __this_cpu_read(tvec_bases);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001365
Peter Zijlstrad3d74452008-01-25 21:08:31 +01001366 hrtimer_run_pending();
Ingo Molnar82f67cd2007-02-16 01:28:13 -08001367
Linus Torvalds1da177e2005-04-16 15:20:36 -07001368 if (time_after_eq(jiffies, base->timer_jiffies))
1369 __run_timers(base);
1370}
1371
1372/*
1373 * Called by the local, per-CPU timer interrupt on SMP.
1374 */
1375void run_local_timers(void)
1376{
Peter Zijlstrad3d74452008-01-25 21:08:31 +01001377 hrtimer_run_queues();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001378 raise_softirq(TIMER_SOFTIRQ);
1379}
1380
Linus Torvalds1da177e2005-04-16 15:20:36 -07001381#ifdef __ARCH_WANT_SYS_ALARM
1382
1383/*
1384 * For backwards compatibility? This can be done in libc so Alpha
1385 * and all newer ports shouldn't need it.
1386 */
Heiko Carstens58fd3aa2009-01-14 14:14:03 +01001387SYSCALL_DEFINE1(alarm, unsigned int, seconds)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001388{
Thomas Gleixnerc08b8a42006-03-25 03:06:33 -08001389 return alarm_setitimer(seconds);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001390}
1391
1392#endif
1393
1394#ifndef __alpha__
1395
1396/*
1397 * The Alpha uses getxpid, getxuid, and getxgid instead. Maybe this
1398 * should be moved into arch/i386 instead?
1399 */
1400
1401/**
1402 * sys_getpid - return the thread group id of the current process
1403 *
1404 * Note, despite the name, this returns the tgid not the pid. The tgid and
1405 * the pid are identical unless CLONE_THREAD was specified on clone() in
1406 * which case the tgid is the same in all threads of the same group.
1407 *
1408 * This is SMP safe as current->tgid does not change.
1409 */
Heiko Carstens58fd3aa2009-01-14 14:14:03 +01001410SYSCALL_DEFINE0(getpid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001411{
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001412 return task_tgid_vnr(current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001413}
1414
1415/*
Kirill Korotaev6997a6f2006-08-13 23:24:23 -07001416 * Accessing ->real_parent is not SMP-safe, it could
1417 * change from under us. However, we can use a stale
1418 * value of ->real_parent under rcu_read_lock(), see
1419 * release_task()->call_rcu(delayed_put_task_struct).
Linus Torvalds1da177e2005-04-16 15:20:36 -07001420 */
Heiko Carstensdbf040d2009-01-14 14:14:04 +01001421SYSCALL_DEFINE0(getppid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001422{
1423 int pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001424
Kirill Korotaev6997a6f2006-08-13 23:24:23 -07001425 rcu_read_lock();
Mandeep Singh Baines031af1652011-12-08 14:34:44 -08001426 pid = task_tgid_vnr(rcu_dereference(current->real_parent));
Kirill Korotaev6997a6f2006-08-13 23:24:23 -07001427 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001428
Linus Torvalds1da177e2005-04-16 15:20:36 -07001429 return pid;
1430}
1431
Heiko Carstensdbf040d2009-01-14 14:14:04 +01001432SYSCALL_DEFINE0(getuid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001433{
1434 /* Only we change this so SMP safe */
David Howells76aac0e2008-11-14 10:39:12 +11001435 return current_uid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001436}
1437
Heiko Carstensdbf040d2009-01-14 14:14:04 +01001438SYSCALL_DEFINE0(geteuid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001439{
1440 /* Only we change this so SMP safe */
David Howells76aac0e2008-11-14 10:39:12 +11001441 return current_euid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001442}
1443
Heiko Carstensdbf040d2009-01-14 14:14:04 +01001444SYSCALL_DEFINE0(getgid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001445{
1446 /* Only we change this so SMP safe */
David Howells76aac0e2008-11-14 10:39:12 +11001447 return current_gid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001448}
1449
Heiko Carstensdbf040d2009-01-14 14:14:04 +01001450SYSCALL_DEFINE0(getegid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001451{
1452 /* Only we change this so SMP safe */
David Howells76aac0e2008-11-14 10:39:12 +11001453 return current_egid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001454}
1455
1456#endif
1457
1458static void process_timeout(unsigned long __data)
1459{
Ingo Molnar36c8b582006-07-03 00:25:41 -07001460 wake_up_process((struct task_struct *)__data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001461}
1462
1463/**
1464 * schedule_timeout - sleep until timeout
1465 * @timeout: timeout value in jiffies
1466 *
1467 * Make the current task sleep until @timeout jiffies have
1468 * elapsed. The routine will return immediately unless
1469 * the current task state has been set (see set_current_state()).
1470 *
1471 * You can set the task state as follows -
1472 *
1473 * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
1474 * pass before the routine returns. The routine will return 0
1475 *
1476 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
1477 * delivered to the current task. In this case the remaining time
1478 * in jiffies will be returned, or 0 if the timer expired in time
1479 *
1480 * The current task state is guaranteed to be TASK_RUNNING when this
1481 * routine returns.
1482 *
1483 * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule
1484 * the CPU away without a bound on the timeout. In this case the return
1485 * value will be %MAX_SCHEDULE_TIMEOUT.
1486 *
1487 * In all cases the return value is guaranteed to be non-negative.
1488 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -08001489signed long __sched schedule_timeout(signed long timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001490{
1491 struct timer_list timer;
1492 unsigned long expire;
1493
1494 switch (timeout)
1495 {
1496 case MAX_SCHEDULE_TIMEOUT:
1497 /*
1498 * These two special cases are useful to be comfortable
1499 * in the caller. Nothing more. We could take
1500 * MAX_SCHEDULE_TIMEOUT from one of the negative value
1501 * but I' d like to return a valid offset (>=0) to allow
1502 * the caller to do everything it want with the retval.
1503 */
1504 schedule();
1505 goto out;
1506 default:
1507 /*
1508 * Another bit of PARANOID. Note that the retval will be
1509 * 0 since no piece of kernel is supposed to do a check
1510 * for a negative retval of schedule_timeout() (since it
1511 * should never happens anyway). You just have the printk()
1512 * that will tell you if something is gone wrong and where.
1513 */
Andrew Morton5b149bc2006-12-22 01:10:14 -08001514 if (timeout < 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001515 printk(KERN_ERR "schedule_timeout: wrong timeout "
Andrew Morton5b149bc2006-12-22 01:10:14 -08001516 "value %lx\n", timeout);
1517 dump_stack();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001518 current->state = TASK_RUNNING;
1519 goto out;
1520 }
1521 }
1522
1523 expire = timeout + jiffies;
1524
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -07001525 setup_timer_on_stack(&timer, process_timeout, (unsigned long)current);
Arun R Bharadwaj597d0272009-04-16 12:13:26 +05301526 __mod_timer(&timer, expire, false, TIMER_NOT_PINNED);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001527 schedule();
1528 del_singleshot_timer_sync(&timer);
1529
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -07001530 /* Remove the timer from the object tracker */
1531 destroy_timer_on_stack(&timer);
1532
Linus Torvalds1da177e2005-04-16 15:20:36 -07001533 timeout = expire - jiffies;
1534
1535 out:
1536 return timeout < 0 ? 0 : timeout;
1537}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001538EXPORT_SYMBOL(schedule_timeout);
1539
Andrew Morton8a1c1752005-09-13 01:25:15 -07001540/*
1541 * We can use __set_current_state() here because schedule_timeout() calls
1542 * schedule() unconditionally.
1543 */
Nishanth Aravamudan64ed93a2005-09-10 00:27:21 -07001544signed long __sched schedule_timeout_interruptible(signed long timeout)
1545{
Andrew Mortona5a0d522005-10-30 15:01:42 -08001546 __set_current_state(TASK_INTERRUPTIBLE);
1547 return schedule_timeout(timeout);
Nishanth Aravamudan64ed93a2005-09-10 00:27:21 -07001548}
1549EXPORT_SYMBOL(schedule_timeout_interruptible);
1550
Matthew Wilcox294d5cc2007-12-06 11:59:46 -05001551signed long __sched schedule_timeout_killable(signed long timeout)
1552{
1553 __set_current_state(TASK_KILLABLE);
1554 return schedule_timeout(timeout);
1555}
1556EXPORT_SYMBOL(schedule_timeout_killable);
1557
Nishanth Aravamudan64ed93a2005-09-10 00:27:21 -07001558signed long __sched schedule_timeout_uninterruptible(signed long timeout)
1559{
Andrew Mortona5a0d522005-10-30 15:01:42 -08001560 __set_current_state(TASK_UNINTERRUPTIBLE);
1561 return schedule_timeout(timeout);
Nishanth Aravamudan64ed93a2005-09-10 00:27:21 -07001562}
1563EXPORT_SYMBOL(schedule_timeout_uninterruptible);
1564
Linus Torvalds1da177e2005-04-16 15:20:36 -07001565/* Thread ID - the internal kernel "pid" */
Heiko Carstens58fd3aa2009-01-14 14:14:03 +01001566SYSCALL_DEFINE0(gettid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001567{
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001568 return task_pid_vnr(current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001569}
1570
Rolf Eike Beer2aae4a12006-09-29 01:59:46 -07001571/**
Kyle McMartind4d23ad2007-02-10 01:46:00 -08001572 * do_sysinfo - fill in sysinfo struct
Rolf Eike Beer2aae4a12006-09-29 01:59:46 -07001573 * @info: pointer to buffer to fill
Thomas Gleixner68194572007-07-19 01:49:16 -07001574 */
Kyle McMartind4d23ad2007-02-10 01:46:00 -08001575int do_sysinfo(struct sysinfo *info)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001576{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001577 unsigned long mem_total, sav_total;
1578 unsigned int mem_unit, bitcount;
Thomas Gleixner2d024942009-05-02 20:08:52 +02001579 struct timespec tp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001580
Kyle McMartind4d23ad2007-02-10 01:46:00 -08001581 memset(info, 0, sizeof(struct sysinfo));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001582
Thomas Gleixner2d024942009-05-02 20:08:52 +02001583 ktime_get_ts(&tp);
1584 monotonic_to_bootbased(&tp);
1585 info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001586
Thomas Gleixner2d024942009-05-02 20:08:52 +02001587 get_avenrun(info->loads, 0, SI_LOAD_SHIFT - FSHIFT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001588
Thomas Gleixner2d024942009-05-02 20:08:52 +02001589 info->procs = nr_threads;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001590
Kyle McMartind4d23ad2007-02-10 01:46:00 -08001591 si_meminfo(info);
1592 si_swapinfo(info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001593
1594 /*
1595 * If the sum of all the available memory (i.e. ram + swap)
1596 * is less than can be stored in a 32 bit unsigned long then
1597 * we can be binary compatible with 2.2.x kernels. If not,
1598 * well, in that case 2.2.x was broken anyways...
1599 *
1600 * -Erik Andersen <andersee@debian.org>
1601 */
1602
Kyle McMartind4d23ad2007-02-10 01:46:00 -08001603 mem_total = info->totalram + info->totalswap;
1604 if (mem_total < info->totalram || mem_total < info->totalswap)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001605 goto out;
1606 bitcount = 0;
Kyle McMartind4d23ad2007-02-10 01:46:00 -08001607 mem_unit = info->mem_unit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001608 while (mem_unit > 1) {
1609 bitcount++;
1610 mem_unit >>= 1;
1611 sav_total = mem_total;
1612 mem_total <<= 1;
1613 if (mem_total < sav_total)
1614 goto out;
1615 }
1616
1617 /*
1618 * If mem_total did not overflow, multiply all memory values by
Kyle McMartind4d23ad2007-02-10 01:46:00 -08001619 * info->mem_unit and set it to 1. This leaves things compatible
Linus Torvalds1da177e2005-04-16 15:20:36 -07001620 * with 2.2.x, and also retains compatibility with earlier 2.4.x
1621 * kernels...
1622 */
1623
Kyle McMartind4d23ad2007-02-10 01:46:00 -08001624 info->mem_unit = 1;
1625 info->totalram <<= bitcount;
1626 info->freeram <<= bitcount;
1627 info->sharedram <<= bitcount;
1628 info->bufferram <<= bitcount;
1629 info->totalswap <<= bitcount;
1630 info->freeswap <<= bitcount;
1631 info->totalhigh <<= bitcount;
1632 info->freehigh <<= bitcount;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001633
Kyle McMartind4d23ad2007-02-10 01:46:00 -08001634out:
1635 return 0;
1636}
1637
Heiko Carstens1e7bfb22009-01-14 14:14:29 +01001638SYSCALL_DEFINE1(sysinfo, struct sysinfo __user *, info)
Kyle McMartind4d23ad2007-02-10 01:46:00 -08001639{
1640 struct sysinfo val;
1641
1642 do_sysinfo(&val);
1643
Linus Torvalds1da177e2005-04-16 15:20:36 -07001644 if (copy_to_user(info, &val, sizeof(struct sysinfo)))
1645 return -EFAULT;
1646
1647 return 0;
1648}
1649
Adrian Bunkb4be6252007-12-18 18:05:58 +01001650static int __cpuinit init_timers_cpu(int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001651{
1652 int j;
Pavel Macheka6fa8e52008-01-30 13:30:00 +01001653 struct tvec_base *base;
Adrian Bunkb4be6252007-12-18 18:05:58 +01001654 static char __cpuinitdata tvec_base_done[NR_CPUS];
Oleg Nesterov55c888d2005-06-23 00:08:56 -07001655
Andrew Mortonba6edfc2006-04-10 22:53:58 -07001656 if (!tvec_base_done[cpu]) {
Jan Beulicha4a61982006-03-24 03:15:54 -08001657 static char boot_done;
1658
Jan Beulicha4a61982006-03-24 03:15:54 -08001659 if (boot_done) {
Andrew Mortonba6edfc2006-04-10 22:53:58 -07001660 /*
1661 * The APs use this path later in boot
1662 */
Christoph Lameter94f60302007-07-17 04:03:29 -07001663 base = kmalloc_node(sizeof(*base),
1664 GFP_KERNEL | __GFP_ZERO,
Jan Beulicha4a61982006-03-24 03:15:54 -08001665 cpu_to_node(cpu));
1666 if (!base)
1667 return -ENOMEM;
Venki Pallipadi6e453a62007-05-08 00:27:44 -07001668
1669 /* Make sure that tvec_base is 2 byte aligned */
1670 if (tbase_get_deferrable(base)) {
1671 WARN_ON(1);
1672 kfree(base);
1673 return -ENOMEM;
1674 }
Andrew Mortonba6edfc2006-04-10 22:53:58 -07001675 per_cpu(tvec_bases, cpu) = base;
Jan Beulicha4a61982006-03-24 03:15:54 -08001676 } else {
Andrew Mortonba6edfc2006-04-10 22:53:58 -07001677 /*
1678 * This is for the boot CPU - we use compile-time
1679 * static initialisation because per-cpu memory isn't
1680 * ready yet and because the memory allocators are not
1681 * initialised either.
1682 */
Jan Beulicha4a61982006-03-24 03:15:54 -08001683 boot_done = 1;
Andrew Mortonba6edfc2006-04-10 22:53:58 -07001684 base = &boot_tvec_bases;
Jan Beulicha4a61982006-03-24 03:15:54 -08001685 }
Tirupathi Reddy12bc4032013-04-23 20:25:55 +05301686 spin_lock_init(&base->lock);
Andrew Mortonba6edfc2006-04-10 22:53:58 -07001687 tvec_base_done[cpu] = 1;
1688 } else {
1689 base = per_cpu(tvec_bases, cpu);
Jan Beulicha4a61982006-03-24 03:15:54 -08001690 }
Andrew Mortonba6edfc2006-04-10 22:53:58 -07001691
Ingo Molnard730e882006-07-03 00:25:10 -07001692
Linus Torvalds1da177e2005-04-16 15:20:36 -07001693 for (j = 0; j < TVN_SIZE; j++) {
1694 INIT_LIST_HEAD(base->tv5.vec + j);
1695 INIT_LIST_HEAD(base->tv4.vec + j);
1696 INIT_LIST_HEAD(base->tv3.vec + j);
1697 INIT_LIST_HEAD(base->tv2.vec + j);
1698 }
1699 for (j = 0; j < TVR_SIZE; j++)
1700 INIT_LIST_HEAD(base->tv1.vec + j);
1701
1702 base->timer_jiffies = jiffies;
Martin Schwidefsky97fd9ed2009-07-21 20:25:05 +02001703 base->next_timer = base->timer_jiffies;
Jan Beulicha4a61982006-03-24 03:15:54 -08001704 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001705}
1706
1707#ifdef CONFIG_HOTPLUG_CPU
Pavel Macheka6fa8e52008-01-30 13:30:00 +01001708static void migrate_timer_list(struct tvec_base *new_base, struct list_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001709{
1710 struct timer_list *timer;
1711
1712 while (!list_empty(head)) {
Pavel Emelianovb5e61812007-05-08 00:30:19 -07001713 timer = list_first_entry(head, struct timer_list, entry);
Oleg Nesterov55c888d2005-06-23 00:08:56 -07001714 detach_timer(timer, 0);
Venki Pallipadi6e453a62007-05-08 00:27:44 -07001715 timer_set_base(timer, new_base);
Martin Schwidefsky97fd9ed2009-07-21 20:25:05 +02001716 if (time_before(timer->expires, new_base->next_timer) &&
1717 !tbase_get_deferrable(timer->base))
1718 new_base->next_timer = timer->expires;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001719 internal_add_timer(new_base, timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001720 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001721}
1722
Randy Dunlap48ccf3d2008-01-21 17:18:25 -08001723static void __cpuinit migrate_timers(int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001724{
Pavel Macheka6fa8e52008-01-30 13:30:00 +01001725 struct tvec_base *old_base;
1726 struct tvec_base *new_base;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001727 int i;
1728
1729 BUG_ON(cpu_online(cpu));
Jan Beulicha4a61982006-03-24 03:15:54 -08001730 old_base = per_cpu(tvec_bases, cpu);
1731 new_base = get_cpu_var(tvec_bases);
Oleg Nesterovd82f0b02008-08-20 16:46:04 -07001732 /*
1733 * The caller is globally serialized and nobody else
1734 * takes two locks at once, deadlock is not possible.
1735 */
1736 spin_lock_irq(&new_base->lock);
Oleg Nesterov0d180402008-04-04 20:54:10 +02001737 spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001738
Oleg Nesterov3691c512006-03-31 02:30:30 -08001739 BUG_ON(old_base->running_timer);
1740
Linus Torvalds1da177e2005-04-16 15:20:36 -07001741 for (i = 0; i < TVR_SIZE; i++)
Oleg Nesterov55c888d2005-06-23 00:08:56 -07001742 migrate_timer_list(new_base, old_base->tv1.vec + i);
1743 for (i = 0; i < TVN_SIZE; i++) {
1744 migrate_timer_list(new_base, old_base->tv2.vec + i);
1745 migrate_timer_list(new_base, old_base->tv3.vec + i);
1746 migrate_timer_list(new_base, old_base->tv4.vec + i);
1747 migrate_timer_list(new_base, old_base->tv5.vec + i);
1748 }
1749
Oleg Nesterov0d180402008-04-04 20:54:10 +02001750 spin_unlock(&old_base->lock);
Oleg Nesterovd82f0b02008-08-20 16:46:04 -07001751 spin_unlock_irq(&new_base->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001752 put_cpu_var(tvec_bases);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001753}
1754#endif /* CONFIG_HOTPLUG_CPU */
1755
Chandra Seetharaman8c78f302006-07-30 03:03:35 -07001756static int __cpuinit timer_cpu_notify(struct notifier_block *self,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001757 unsigned long action, void *hcpu)
1758{
1759 long cpu = (long)hcpu;
Akinobu Mita80b51842010-05-26 14:43:32 -07001760 int err;
1761
Linus Torvalds1da177e2005-04-16 15:20:36 -07001762 switch(action) {
1763 case CPU_UP_PREPARE:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07001764 case CPU_UP_PREPARE_FROZEN:
Akinobu Mita80b51842010-05-26 14:43:32 -07001765 err = init_timers_cpu(cpu);
1766 if (err < 0)
1767 return notifier_from_errno(err);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001768 break;
1769#ifdef CONFIG_HOTPLUG_CPU
1770 case CPU_DEAD:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07001771 case CPU_DEAD_FROZEN:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001772 migrate_timers(cpu);
1773 break;
1774#endif
1775 default:
1776 break;
1777 }
1778 return NOTIFY_OK;
1779}
1780
Chandra Seetharaman8c78f302006-07-30 03:03:35 -07001781static struct notifier_block __cpuinitdata timers_nb = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001782 .notifier_call = timer_cpu_notify,
1783};
1784
1785
1786void __init init_timers(void)
1787{
Akinobu Mita07dccf32006-09-29 02:00:22 -07001788 int err = timer_cpu_notify(&timers_nb, (unsigned long)CPU_UP_PREPARE,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001789 (void *)(long)smp_processor_id());
Akinobu Mita07dccf32006-09-29 02:00:22 -07001790
Ingo Molnar82f67cd2007-02-16 01:28:13 -08001791 init_timer_stats();
1792
Akinobu Mita9e506f72010-06-04 14:15:04 -07001793 BUG_ON(err != NOTIFY_OK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001794 register_cpu_notifier(&timers_nb);
Carlos R. Mafra962cf362008-05-15 11:15:37 -03001795 open_softirq(TIMER_SOFTIRQ, run_timer_softirq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001796}
1797
Linus Torvalds1da177e2005-04-16 15:20:36 -07001798/**
1799 * msleep - sleep safely even with waitqueue interruptions
1800 * @msecs: Time in milliseconds to sleep for
1801 */
1802void msleep(unsigned int msecs)
1803{
1804 unsigned long timeout = msecs_to_jiffies(msecs) + 1;
1805
Nishanth Aravamudan75bcc8c2005-09-10 00:27:24 -07001806 while (timeout)
1807 timeout = schedule_timeout_uninterruptible(timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001808}
1809
1810EXPORT_SYMBOL(msleep);
1811
1812/**
Domen Puncer96ec3ef2005-06-25 14:58:43 -07001813 * msleep_interruptible - sleep waiting for signals
Linus Torvalds1da177e2005-04-16 15:20:36 -07001814 * @msecs: Time in milliseconds to sleep for
1815 */
1816unsigned long msleep_interruptible(unsigned int msecs)
1817{
1818 unsigned long timeout = msecs_to_jiffies(msecs) + 1;
1819
Nishanth Aravamudan75bcc8c2005-09-10 00:27:24 -07001820 while (timeout && !signal_pending(current))
1821 timeout = schedule_timeout_interruptible(timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001822 return jiffies_to_msecs(timeout);
1823}
1824
1825EXPORT_SYMBOL(msleep_interruptible);
Patrick Pannuto5e7f5a12010-08-02 15:01:04 -07001826
Jonathan Corbetead13f92013-09-02 02:30:26 -07001827static void do_nsleep(unsigned int msecs, struct hrtimer_sleeper *sleeper,
1828 int sigs)
1829{
1830 enum hrtimer_mode mode = HRTIMER_MODE_REL;
1831 int state = sigs ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
1832
1833 hrtimer_init(&sleeper->timer, CLOCK_MONOTONIC, mode);
1834 sleeper->timer.node.expires = ktime_set(msecs / 1000,
1835 (msecs % 1000) * NSEC_PER_MSEC);
1836 hrtimer_init_sleeper(sleeper, current);
1837
1838 do {
1839 set_current_state(state);
1840 hrtimer_start(&sleeper->timer, sleeper->timer.node.expires, mode);
1841 if (sleeper->task)
1842 schedule();
1843 hrtimer_cancel(&sleeper->timer);
1844 mode = HRTIMER_MODE_ABS;
1845 } while (sleeper->task && !(sigs && signal_pending(current)));
1846}
1847
1848void hr_msleep(unsigned int msecs)
1849{
1850 struct hrtimer_sleeper sleeper;
1851
1852 do_nsleep(msecs, &sleeper, 0);
1853}
1854
1855EXPORT_SYMBOL(hr_msleep);
1856
1857unsigned long hr_msleep_interruptible(unsigned int msecs)
1858{
1859 struct hrtimer_sleeper sleeper;
1860 ktime_t left;
1861
1862 do_nsleep(msecs, &sleeper, 1);
1863
1864 if (!sleeper.task)
1865 return 0;
1866 left = ktime_sub(sleeper.timer.node.expires,
1867 sleeper.timer.base->get_time());
1868 return max(((long) ktime_to_ns(left))/(long)NSEC_PER_MSEC, 1L);
1869}
1870
1871EXPORT_SYMBOL(hr_msleep_interruptible);
1872
Patrick Pannuto5e7f5a12010-08-02 15:01:04 -07001873static int __sched do_usleep_range(unsigned long min, unsigned long max)
1874{
1875 ktime_t kmin;
1876 unsigned long delta;
1877
1878 kmin = ktime_set(0, min * NSEC_PER_USEC);
1879 delta = (max - min) * NSEC_PER_USEC;
1880 return schedule_hrtimeout_range(&kmin, delta, HRTIMER_MODE_REL);
1881}
1882
1883/**
1884 * usleep_range - Drop in replacement for udelay where wakeup is flexible
1885 * @min: Minimum time in usecs to sleep
1886 * @max: Maximum time in usecs to sleep
1887 */
1888void usleep_range(unsigned long min, unsigned long max)
1889{
1890 __set_current_state(TASK_UNINTERRUPTIBLE);
1891 do_usleep_range(min, max);
1892}
1893EXPORT_SYMBOL(usleep_range);