| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | *  linux/kernel/timer.c | 
|  | 3 | * | 
| john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 4 | *  Kernel internal timers, basic process system calls | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | * | 
|  | 6 | *  Copyright (C) 1991, 1992  Linus Torvalds | 
|  | 7 | * | 
|  | 8 | *  1997-01-28  Modified by Finn Arne Gangstad to make timers scale better. | 
|  | 9 | * | 
|  | 10 | *  1997-09-10  Updated NTP code according to technical memorandum Jan '96 | 
|  | 11 | *              "A Kernel Model for Precision Timekeeping" by Dave Mills | 
|  | 12 | *  1998-12-24  Fixed a xtime SMP race (we need the xtime_lock rw spinlock to | 
|  | 13 | *              serialize accesses to xtime/lost_ticks). | 
|  | 14 | *                              Copyright (C) 1998  Andrea Arcangeli | 
|  | 15 | *  1999-03-10  Improved NTP compatibility by Ulrich Windl | 
|  | 16 | *  2002-05-31	Move sys_sysinfo here and make its locking sane, Robert Love | 
|  | 17 | *  2000-10-05  Implemented scalable SMP per-CPU timer handling. | 
|  | 18 | *                              Copyright (C) 2000, 2001, 2002  Ingo Molnar | 
|  | 19 | *              Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar | 
|  | 20 | */ | 
|  | 21 |  | 
|  | 22 | #include <linux/kernel_stat.h> | 
|  | 23 | #include <linux/module.h> | 
|  | 24 | #include <linux/interrupt.h> | 
|  | 25 | #include <linux/percpu.h> | 
|  | 26 | #include <linux/init.h> | 
|  | 27 | #include <linux/mm.h> | 
|  | 28 | #include <linux/swap.h> | 
| Pavel Emelyanov | b488893 | 2007-10-18 23:40:14 -0700 | [diff] [blame] | 29 | #include <linux/pid_namespace.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | #include <linux/notifier.h> | 
|  | 31 | #include <linux/thread_info.h> | 
|  | 32 | #include <linux/time.h> | 
|  | 33 | #include <linux/jiffies.h> | 
|  | 34 | #include <linux/posix-timers.h> | 
|  | 35 | #include <linux/cpu.h> | 
|  | 36 | #include <linux/syscalls.h> | 
| Adrian Bunk | 97a41e2 | 2006-01-08 01:02:17 -0800 | [diff] [blame] | 37 | #include <linux/delay.h> | 
| Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 38 | #include <linux/tick.h> | 
| Ingo Molnar | 82f67cd | 2007-02-16 01:28:13 -0800 | [diff] [blame] | 39 | #include <linux/kallsyms.h> | 
| Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 40 | #include <linux/irq_work.h> | 
| Arun R Bharadwaj | eea08f3 | 2009-04-16 12:16:41 +0530 | [diff] [blame] | 41 | #include <linux/sched.h> | 
| Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 42 | #include <linux/slab.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 43 |  | 
|  | 44 | #include <asm/uaccess.h> | 
|  | 45 | #include <asm/unistd.h> | 
|  | 46 | #include <asm/div64.h> | 
|  | 47 | #include <asm/timex.h> | 
|  | 48 | #include <asm/io.h> | 
|  | 49 |  | 
| Xiao Guangrong | 2b022e3 | 2009-08-10 10:48:59 +0800 | [diff] [blame] | 50 | #define CREATE_TRACE_POINTS | 
|  | 51 | #include <trace/events/timer.h> | 
|  | 52 |  | 
| Thomas Gleixner | ecea8d1 | 2005-10-30 15:03:00 -0800 | [diff] [blame] | 53 | u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES; | 
|  | 54 |  | 
|  | 55 | EXPORT_SYMBOL(jiffies_64); | 
|  | 56 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 57 | /* | 
|  | 58 | * per-CPU timer vector definitions: | 
|  | 59 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 60 | #define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6) | 
|  | 61 | #define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8) | 
|  | 62 | #define TVN_SIZE (1 << TVN_BITS) | 
|  | 63 | #define TVR_SIZE (1 << TVR_BITS) | 
|  | 64 | #define TVN_MASK (TVN_SIZE - 1) | 
|  | 65 | #define TVR_MASK (TVR_SIZE - 1) | 
|  | 66 |  | 
| Pavel Machek | a6fa8e5 | 2008-01-30 13:30:00 +0100 | [diff] [blame] | 67 | struct tvec { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 68 | struct list_head vec[TVN_SIZE]; | 
| Pavel Machek | a6fa8e5 | 2008-01-30 13:30:00 +0100 | [diff] [blame] | 69 | }; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 70 |  | 
| Pavel Machek | a6fa8e5 | 2008-01-30 13:30:00 +0100 | [diff] [blame] | 71 | struct tvec_root { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 72 | struct list_head vec[TVR_SIZE]; | 
| Pavel Machek | a6fa8e5 | 2008-01-30 13:30:00 +0100 | [diff] [blame] | 73 | }; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 74 |  | 
| Pavel Machek | a6fa8e5 | 2008-01-30 13:30:00 +0100 | [diff] [blame] | 75 | struct tvec_base { | 
| Oleg Nesterov | 3691c51 | 2006-03-31 02:30:30 -0800 | [diff] [blame] | 76 | spinlock_t lock; | 
|  | 77 | struct timer_list *running_timer; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 78 | unsigned long timer_jiffies; | 
| Martin Schwidefsky | 97fd9ed | 2009-07-21 20:25:05 +0200 | [diff] [blame] | 79 | unsigned long next_timer; | 
| Pavel Machek | a6fa8e5 | 2008-01-30 13:30:00 +0100 | [diff] [blame] | 80 | struct tvec_root tv1; | 
|  | 81 | struct tvec tv2; | 
|  | 82 | struct tvec tv3; | 
|  | 83 | struct tvec tv4; | 
|  | 84 | struct tvec tv5; | 
| Venki Pallipadi | 6e453a6 | 2007-05-08 00:27:44 -0700 | [diff] [blame] | 85 | } ____cacheline_aligned; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 86 |  | 
| Pavel Machek | a6fa8e5 | 2008-01-30 13:30:00 +0100 | [diff] [blame] | 87 | struct tvec_base boot_tvec_bases; | 
| Oleg Nesterov | 3691c51 | 2006-03-31 02:30:30 -0800 | [diff] [blame] | 88 | EXPORT_SYMBOL(boot_tvec_bases); | 
| Pavel Machek | a6fa8e5 | 2008-01-30 13:30:00 +0100 | [diff] [blame] | 89 | static DEFINE_PER_CPU(struct tvec_base *, tvec_bases) = &boot_tvec_bases; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 90 |  | 
| Venki Pallipadi | 6e453a6 | 2007-05-08 00:27:44 -0700 | [diff] [blame] | 91 | /* Functions below help us manage 'deferrable' flag */ | 
| Pavel Machek | a6fa8e5 | 2008-01-30 13:30:00 +0100 | [diff] [blame] | 92 | static inline unsigned int tbase_get_deferrable(struct tvec_base *base) | 
| Venki Pallipadi | 6e453a6 | 2007-05-08 00:27:44 -0700 | [diff] [blame] | 93 | { | 
| akpm@linux-foundation.org | e991084 | 2007-05-10 03:16:01 -0700 | [diff] [blame] | 94 | return ((unsigned int)(unsigned long)base & TBASE_DEFERRABLE_FLAG); | 
| Venki Pallipadi | 6e453a6 | 2007-05-08 00:27:44 -0700 | [diff] [blame] | 95 | } | 
|  | 96 |  | 
| Pavel Machek | a6fa8e5 | 2008-01-30 13:30:00 +0100 | [diff] [blame] | 97 | static inline struct tvec_base *tbase_get_base(struct tvec_base *base) | 
| Venki Pallipadi | 6e453a6 | 2007-05-08 00:27:44 -0700 | [diff] [blame] | 98 | { | 
| Pavel Machek | a6fa8e5 | 2008-01-30 13:30:00 +0100 | [diff] [blame] | 99 | return ((struct tvec_base *)((unsigned long)base & ~TBASE_DEFERRABLE_FLAG)); | 
| Venki Pallipadi | 6e453a6 | 2007-05-08 00:27:44 -0700 | [diff] [blame] | 100 | } | 
|  | 101 |  | 
|  | 102 | static inline void timer_set_deferrable(struct timer_list *timer) | 
|  | 103 | { | 
| Phil Carmody | dd6414b | 2010-10-20 15:57:33 -0700 | [diff] [blame] | 104 | timer->base = TBASE_MAKE_DEFERRED(timer->base); | 
| Venki Pallipadi | 6e453a6 | 2007-05-08 00:27:44 -0700 | [diff] [blame] | 105 | } | 
|  | 106 |  | 
|  | 107 | static inline void | 
| Pavel Machek | a6fa8e5 | 2008-01-30 13:30:00 +0100 | [diff] [blame] | 108 | timer_set_base(struct timer_list *timer, struct tvec_base *new_base) | 
| Venki Pallipadi | 6e453a6 | 2007-05-08 00:27:44 -0700 | [diff] [blame] | 109 | { | 
| Pavel Machek | a6fa8e5 | 2008-01-30 13:30:00 +0100 | [diff] [blame] | 110 | timer->base = (struct tvec_base *)((unsigned long)(new_base) | | 
| Thomas Gleixner | 6819457 | 2007-07-19 01:49:16 -0700 | [diff] [blame] | 111 | tbase_get_deferrable(timer->base)); | 
| Venki Pallipadi | 6e453a6 | 2007-05-08 00:27:44 -0700 | [diff] [blame] | 112 | } | 
|  | 113 |  | 
| Alan Stern | 9c133c4 | 2008-11-06 08:42:48 +0100 | [diff] [blame] | 114 | static unsigned long round_jiffies_common(unsigned long j, int cpu, | 
|  | 115 | bool force_up) | 
|  | 116 | { | 
|  | 117 | int rem; | 
|  | 118 | unsigned long original = j; | 
|  | 119 |  | 
|  | 120 | /* | 
|  | 121 | * We don't want all cpus firing their timers at once hitting the | 
|  | 122 | * same lock or cachelines, so we skew each extra cpu with an extra | 
|  | 123 | * 3 jiffies. This 3 jiffies came originally from the mm/ code which | 
|  | 124 | * already did this. | 
|  | 125 | * The skew is done by adding 3*cpunr, then round, then subtract this | 
|  | 126 | * extra offset again. | 
|  | 127 | */ | 
|  | 128 | j += cpu * 3; | 
|  | 129 |  | 
|  | 130 | rem = j % HZ; | 
|  | 131 |  | 
|  | 132 | /* | 
|  | 133 | * If the target jiffie is just after a whole second (which can happen | 
|  | 134 | * due to delays of the timer irq, long irq off times etc etc) then | 
|  | 135 | * we should round down to the whole second, not up. Use 1/4th second | 
|  | 136 | * as cutoff for this rounding as an extreme upper bound for this. | 
|  | 137 | * But never round down if @force_up is set. | 
|  | 138 | */ | 
|  | 139 | if (rem < HZ/4 && !force_up) /* round down */ | 
|  | 140 | j = j - rem; | 
|  | 141 | else /* round up */ | 
|  | 142 | j = j - rem + HZ; | 
|  | 143 |  | 
|  | 144 | /* now that we have rounded, subtract the extra skew again */ | 
|  | 145 | j -= cpu * 3; | 
|  | 146 |  | 
|  | 147 | if (j <= jiffies) /* rounding ate our timeout entirely; */ | 
|  | 148 | return original; | 
|  | 149 | return j; | 
|  | 150 | } | 
|  | 151 |  | 
| Arjan van de Ven | 4c36a5d | 2006-12-10 02:21:24 -0800 | [diff] [blame] | 152 | /** | 
|  | 153 | * __round_jiffies - function to round jiffies to a full second | 
|  | 154 | * @j: the time in (absolute) jiffies that should be rounded | 
|  | 155 | * @cpu: the processor number on which the timeout will happen | 
|  | 156 | * | 
| Robert P. J. Day | 72fd4a3 | 2007-02-10 01:45:59 -0800 | [diff] [blame] | 157 | * __round_jiffies() rounds an absolute time in the future (in jiffies) | 
| Arjan van de Ven | 4c36a5d | 2006-12-10 02:21:24 -0800 | [diff] [blame] | 158 | * up or down to (approximately) full seconds. This is useful for timers | 
|  | 159 | * for which the exact time they fire does not matter too much, as long as | 
|  | 160 | * they fire approximately every X seconds. | 
|  | 161 | * | 
|  | 162 | * By rounding these timers to whole seconds, all such timers will fire | 
|  | 163 | * at the same time, rather than at various times spread out. The goal | 
|  | 164 | * of this is to have the CPU wake up less, which saves power. | 
|  | 165 | * | 
|  | 166 | * The exact rounding is skewed for each processor to avoid all | 
|  | 167 | * processors firing at the exact same time, which could lead | 
|  | 168 | * to lock contention or spurious cache line bouncing. | 
|  | 169 | * | 
| Robert P. J. Day | 72fd4a3 | 2007-02-10 01:45:59 -0800 | [diff] [blame] | 170 | * The return value is the rounded version of the @j parameter. | 
| Arjan van de Ven | 4c36a5d | 2006-12-10 02:21:24 -0800 | [diff] [blame] | 171 | */ | 
|  | 172 | unsigned long __round_jiffies(unsigned long j, int cpu) | 
|  | 173 | { | 
| Alan Stern | 9c133c4 | 2008-11-06 08:42:48 +0100 | [diff] [blame] | 174 | return round_jiffies_common(j, cpu, false); | 
| Arjan van de Ven | 4c36a5d | 2006-12-10 02:21:24 -0800 | [diff] [blame] | 175 | } | 
|  | 176 | EXPORT_SYMBOL_GPL(__round_jiffies); | 
|  | 177 |  | 
|  | 178 | /** | 
|  | 179 | * __round_jiffies_relative - function to round jiffies to a full second | 
|  | 180 | * @j: the time in (relative) jiffies that should be rounded | 
|  | 181 | * @cpu: the processor number on which the timeout will happen | 
|  | 182 | * | 
| Robert P. J. Day | 72fd4a3 | 2007-02-10 01:45:59 -0800 | [diff] [blame] | 183 | * __round_jiffies_relative() rounds a time delta  in the future (in jiffies) | 
| Arjan van de Ven | 4c36a5d | 2006-12-10 02:21:24 -0800 | [diff] [blame] | 184 | * up or down to (approximately) full seconds. This is useful for timers | 
|  | 185 | * for which the exact time they fire does not matter too much, as long as | 
|  | 186 | * they fire approximately every X seconds. | 
|  | 187 | * | 
|  | 188 | * By rounding these timers to whole seconds, all such timers will fire | 
|  | 189 | * at the same time, rather than at various times spread out. The goal | 
|  | 190 | * of this is to have the CPU wake up less, which saves power. | 
|  | 191 | * | 
|  | 192 | * The exact rounding is skewed for each processor to avoid all | 
|  | 193 | * processors firing at the exact same time, which could lead | 
|  | 194 | * to lock contention or spurious cache line bouncing. | 
|  | 195 | * | 
| Robert P. J. Day | 72fd4a3 | 2007-02-10 01:45:59 -0800 | [diff] [blame] | 196 | * The return value is the rounded version of the @j parameter. | 
| Arjan van de Ven | 4c36a5d | 2006-12-10 02:21:24 -0800 | [diff] [blame] | 197 | */ | 
|  | 198 | unsigned long __round_jiffies_relative(unsigned long j, int cpu) | 
|  | 199 | { | 
| Alan Stern | 9c133c4 | 2008-11-06 08:42:48 +0100 | [diff] [blame] | 200 | unsigned long j0 = jiffies; | 
|  | 201 |  | 
|  | 202 | /* Use j0 because jiffies might change while we run */ | 
|  | 203 | return round_jiffies_common(j + j0, cpu, false) - j0; | 
| Arjan van de Ven | 4c36a5d | 2006-12-10 02:21:24 -0800 | [diff] [blame] | 204 | } | 
|  | 205 | EXPORT_SYMBOL_GPL(__round_jiffies_relative); | 
|  | 206 |  | 
|  | 207 | /** | 
|  | 208 | * round_jiffies - function to round jiffies to a full second | 
|  | 209 | * @j: the time in (absolute) jiffies that should be rounded | 
|  | 210 | * | 
| Robert P. J. Day | 72fd4a3 | 2007-02-10 01:45:59 -0800 | [diff] [blame] | 211 | * round_jiffies() rounds an absolute time in the future (in jiffies) | 
| Arjan van de Ven | 4c36a5d | 2006-12-10 02:21:24 -0800 | [diff] [blame] | 212 | * up or down to (approximately) full seconds. This is useful for timers | 
|  | 213 | * for which the exact time they fire does not matter too much, as long as | 
|  | 214 | * they fire approximately every X seconds. | 
|  | 215 | * | 
|  | 216 | * By rounding these timers to whole seconds, all such timers will fire | 
|  | 217 | * at the same time, rather than at various times spread out. The goal | 
|  | 218 | * of this is to have the CPU wake up less, which saves power. | 
|  | 219 | * | 
| Robert P. J. Day | 72fd4a3 | 2007-02-10 01:45:59 -0800 | [diff] [blame] | 220 | * The return value is the rounded version of the @j parameter. | 
| Arjan van de Ven | 4c36a5d | 2006-12-10 02:21:24 -0800 | [diff] [blame] | 221 | */ | 
|  | 222 | unsigned long round_jiffies(unsigned long j) | 
|  | 223 | { | 
| Alan Stern | 9c133c4 | 2008-11-06 08:42:48 +0100 | [diff] [blame] | 224 | return round_jiffies_common(j, raw_smp_processor_id(), false); | 
| Arjan van de Ven | 4c36a5d | 2006-12-10 02:21:24 -0800 | [diff] [blame] | 225 | } | 
|  | 226 | EXPORT_SYMBOL_GPL(round_jiffies); | 
|  | 227 |  | 
|  | 228 | /** | 
|  | 229 | * round_jiffies_relative - function to round jiffies to a full second | 
|  | 230 | * @j: the time in (relative) jiffies that should be rounded | 
|  | 231 | * | 
| Robert P. J. Day | 72fd4a3 | 2007-02-10 01:45:59 -0800 | [diff] [blame] | 232 | * round_jiffies_relative() rounds a time delta  in the future (in jiffies) | 
| Arjan van de Ven | 4c36a5d | 2006-12-10 02:21:24 -0800 | [diff] [blame] | 233 | * up or down to (approximately) full seconds. This is useful for timers | 
|  | 234 | * for which the exact time they fire does not matter too much, as long as | 
|  | 235 | * they fire approximately every X seconds. | 
|  | 236 | * | 
|  | 237 | * By rounding these timers to whole seconds, all such timers will fire | 
|  | 238 | * at the same time, rather than at various times spread out. The goal | 
|  | 239 | * of this is to have the CPU wake up less, which saves power. | 
|  | 240 | * | 
| Robert P. J. Day | 72fd4a3 | 2007-02-10 01:45:59 -0800 | [diff] [blame] | 241 | * The return value is the rounded version of the @j parameter. | 
| Arjan van de Ven | 4c36a5d | 2006-12-10 02:21:24 -0800 | [diff] [blame] | 242 | */ | 
|  | 243 | unsigned long round_jiffies_relative(unsigned long j) | 
|  | 244 | { | 
|  | 245 | return __round_jiffies_relative(j, raw_smp_processor_id()); | 
|  | 246 | } | 
|  | 247 | EXPORT_SYMBOL_GPL(round_jiffies_relative); | 
|  | 248 |  | 
| Alan Stern | 9c133c4 | 2008-11-06 08:42:48 +0100 | [diff] [blame] | 249 | /** | 
|  | 250 | * __round_jiffies_up - function to round jiffies up to a full second | 
|  | 251 | * @j: the time in (absolute) jiffies that should be rounded | 
|  | 252 | * @cpu: the processor number on which the timeout will happen | 
|  | 253 | * | 
|  | 254 | * This is the same as __round_jiffies() except that it will never | 
|  | 255 | * round down.  This is useful for timeouts for which the exact time | 
|  | 256 | * of firing does not matter too much, as long as they don't fire too | 
|  | 257 | * early. | 
|  | 258 | */ | 
|  | 259 | unsigned long __round_jiffies_up(unsigned long j, int cpu) | 
|  | 260 | { | 
|  | 261 | return round_jiffies_common(j, cpu, true); | 
|  | 262 | } | 
|  | 263 | EXPORT_SYMBOL_GPL(__round_jiffies_up); | 
|  | 264 |  | 
|  | 265 | /** | 
|  | 266 | * __round_jiffies_up_relative - function to round jiffies up to a full second | 
|  | 267 | * @j: the time in (relative) jiffies that should be rounded | 
|  | 268 | * @cpu: the processor number on which the timeout will happen | 
|  | 269 | * | 
|  | 270 | * This is the same as __round_jiffies_relative() except that it will never | 
|  | 271 | * round down.  This is useful for timeouts for which the exact time | 
|  | 272 | * of firing does not matter too much, as long as they don't fire too | 
|  | 273 | * early. | 
|  | 274 | */ | 
|  | 275 | unsigned long __round_jiffies_up_relative(unsigned long j, int cpu) | 
|  | 276 | { | 
|  | 277 | unsigned long j0 = jiffies; | 
|  | 278 |  | 
|  | 279 | /* Use j0 because jiffies might change while we run */ | 
|  | 280 | return round_jiffies_common(j + j0, cpu, true) - j0; | 
|  | 281 | } | 
|  | 282 | EXPORT_SYMBOL_GPL(__round_jiffies_up_relative); | 
|  | 283 |  | 
|  | 284 | /** | 
|  | 285 | * round_jiffies_up - function to round jiffies up to a full second | 
|  | 286 | * @j: the time in (absolute) jiffies that should be rounded | 
|  | 287 | * | 
|  | 288 | * This is the same as round_jiffies() except that it will never | 
|  | 289 | * round down.  This is useful for timeouts for which the exact time | 
|  | 290 | * of firing does not matter too much, as long as they don't fire too | 
|  | 291 | * early. | 
|  | 292 | */ | 
|  | 293 | unsigned long round_jiffies_up(unsigned long j) | 
|  | 294 | { | 
|  | 295 | return round_jiffies_common(j, raw_smp_processor_id(), true); | 
|  | 296 | } | 
|  | 297 | EXPORT_SYMBOL_GPL(round_jiffies_up); | 
|  | 298 |  | 
|  | 299 | /** | 
|  | 300 | * round_jiffies_up_relative - function to round jiffies up to a full second | 
|  | 301 | * @j: the time in (relative) jiffies that should be rounded | 
|  | 302 | * | 
|  | 303 | * This is the same as round_jiffies_relative() except that it will never | 
|  | 304 | * round down.  This is useful for timeouts for which the exact time | 
|  | 305 | * of firing does not matter too much, as long as they don't fire too | 
|  | 306 | * early. | 
|  | 307 | */ | 
|  | 308 | unsigned long round_jiffies_up_relative(unsigned long j) | 
|  | 309 | { | 
|  | 310 | return __round_jiffies_up_relative(j, raw_smp_processor_id()); | 
|  | 311 | } | 
|  | 312 | EXPORT_SYMBOL_GPL(round_jiffies_up_relative); | 
|  | 313 |  | 
| Arjan van de Ven | 3bbb9ec | 2010-03-11 14:04:36 -0800 | [diff] [blame] | 314 | /** | 
|  | 315 | * set_timer_slack - set the allowed slack for a timer | 
| Randy Dunlap | 0caa621 | 2010-08-09 16:32:50 -0700 | [diff] [blame] | 316 | * @timer: the timer to be modified | 
| Arjan van de Ven | 3bbb9ec | 2010-03-11 14:04:36 -0800 | [diff] [blame] | 317 | * @slack_hz: the amount of time (in jiffies) allowed for rounding | 
|  | 318 | * | 
|  | 319 | * Set the amount of time, in jiffies, that a certain timer has | 
|  | 320 | * in terms of slack. By setting this value, the timer subsystem | 
|  | 321 | * will schedule the actual timer somewhere between | 
|  | 322 | * the time mod_timer() asks for, and that time plus the slack. | 
|  | 323 | * | 
|  | 324 | * By setting the slack to -1, a percentage of the delay is used | 
|  | 325 | * instead. | 
|  | 326 | */ | 
|  | 327 | void set_timer_slack(struct timer_list *timer, int slack_hz) | 
|  | 328 | { | 
|  | 329 | timer->slack = slack_hz; | 
|  | 330 | } | 
|  | 331 | EXPORT_SYMBOL_GPL(set_timer_slack); | 
|  | 332 |  | 
| Pavel Machek | a6fa8e5 | 2008-01-30 13:30:00 +0100 | [diff] [blame] | 333 | static void internal_add_timer(struct tvec_base *base, struct timer_list *timer) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 334 | { | 
|  | 335 | unsigned long expires = timer->expires; | 
|  | 336 | unsigned long idx = expires - base->timer_jiffies; | 
|  | 337 | struct list_head *vec; | 
|  | 338 |  | 
|  | 339 | if (idx < TVR_SIZE) { | 
|  | 340 | int i = expires & TVR_MASK; | 
|  | 341 | vec = base->tv1.vec + i; | 
|  | 342 | } else if (idx < 1 << (TVR_BITS + TVN_BITS)) { | 
|  | 343 | int i = (expires >> TVR_BITS) & TVN_MASK; | 
|  | 344 | vec = base->tv2.vec + i; | 
|  | 345 | } else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) { | 
|  | 346 | int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK; | 
|  | 347 | vec = base->tv3.vec + i; | 
|  | 348 | } else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) { | 
|  | 349 | int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK; | 
|  | 350 | vec = base->tv4.vec + i; | 
|  | 351 | } else if ((signed long) idx < 0) { | 
|  | 352 | /* | 
|  | 353 | * Can happen if you add a timer with expires == jiffies, | 
|  | 354 | * or you set a timer to go off in the past | 
|  | 355 | */ | 
|  | 356 | vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK); | 
|  | 357 | } else { | 
|  | 358 | int i; | 
|  | 359 | /* If the timeout is larger than 0xffffffff on 64-bit | 
|  | 360 | * architectures then we use the maximum timeout: | 
|  | 361 | */ | 
|  | 362 | if (idx > 0xffffffffUL) { | 
|  | 363 | idx = 0xffffffffUL; | 
|  | 364 | expires = idx + base->timer_jiffies; | 
|  | 365 | } | 
|  | 366 | i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK; | 
|  | 367 | vec = base->tv5.vec + i; | 
|  | 368 | } | 
|  | 369 | /* | 
|  | 370 | * Timers are FIFO: | 
|  | 371 | */ | 
|  | 372 | list_add_tail(&timer->entry, vec); | 
|  | 373 | } | 
|  | 374 |  | 
| Ingo Molnar | 82f67cd | 2007-02-16 01:28:13 -0800 | [diff] [blame] | 375 | #ifdef CONFIG_TIMER_STATS | 
|  | 376 | void __timer_stats_timer_set_start_info(struct timer_list *timer, void *addr) | 
|  | 377 | { | 
|  | 378 | if (timer->start_site) | 
|  | 379 | return; | 
|  | 380 |  | 
|  | 381 | timer->start_site = addr; | 
|  | 382 | memcpy(timer->start_comm, current->comm, TASK_COMM_LEN); | 
|  | 383 | timer->start_pid = current->pid; | 
|  | 384 | } | 
| Venki Pallipadi | c5c061b8 | 2007-07-15 23:40:30 -0700 | [diff] [blame] | 385 |  | 
|  | 386 | static void timer_stats_account_timer(struct timer_list *timer) | 
|  | 387 | { | 
|  | 388 | unsigned int flag = 0; | 
|  | 389 |  | 
| Heiko Carstens | 507e123 | 2009-06-23 17:38:15 +0200 | [diff] [blame] | 390 | if (likely(!timer->start_site)) | 
|  | 391 | return; | 
| Venki Pallipadi | c5c061b8 | 2007-07-15 23:40:30 -0700 | [diff] [blame] | 392 | if (unlikely(tbase_get_deferrable(timer->base))) | 
|  | 393 | flag |= TIMER_STATS_FLAG_DEFERRABLE; | 
|  | 394 |  | 
|  | 395 | timer_stats_update_stats(timer, timer->start_pid, timer->start_site, | 
|  | 396 | timer->function, timer->start_comm, flag); | 
|  | 397 | } | 
|  | 398 |  | 
|  | 399 | #else | 
|  | 400 | static void timer_stats_account_timer(struct timer_list *timer) {} | 
| Ingo Molnar | 82f67cd | 2007-02-16 01:28:13 -0800 | [diff] [blame] | 401 | #endif | 
|  | 402 |  | 
| Thomas Gleixner | c6f3a97 | 2008-04-30 00:55:03 -0700 | [diff] [blame] | 403 | #ifdef CONFIG_DEBUG_OBJECTS_TIMERS | 
|  | 404 |  | 
|  | 405 | static struct debug_obj_descr timer_debug_descr; | 
|  | 406 |  | 
| Stanislaw Gruszka | 9977728 | 2011-03-07 09:58:33 +0100 | [diff] [blame] | 407 | static void *timer_debug_hint(void *addr) | 
|  | 408 | { | 
|  | 409 | return ((struct timer_list *) addr)->function; | 
|  | 410 | } | 
|  | 411 |  | 
| Thomas Gleixner | c6f3a97 | 2008-04-30 00:55:03 -0700 | [diff] [blame] | 412 | /* | 
|  | 413 | * fixup_init is called when: | 
|  | 414 | * - an active object is initialized | 
|  | 415 | */ | 
|  | 416 | static int timer_fixup_init(void *addr, enum debug_obj_state state) | 
|  | 417 | { | 
|  | 418 | struct timer_list *timer = addr; | 
|  | 419 |  | 
|  | 420 | switch (state) { | 
|  | 421 | case ODEBUG_STATE_ACTIVE: | 
|  | 422 | del_timer_sync(timer); | 
|  | 423 | debug_object_init(timer, &timer_debug_descr); | 
|  | 424 | return 1; | 
|  | 425 | default: | 
|  | 426 | return 0; | 
|  | 427 | } | 
|  | 428 | } | 
|  | 429 |  | 
|  | 430 | /* | 
|  | 431 | * fixup_activate is called when: | 
|  | 432 | * - an active object is activated | 
|  | 433 | * - an unknown object is activated (might be a statically initialized object) | 
|  | 434 | */ | 
|  | 435 | static int timer_fixup_activate(void *addr, enum debug_obj_state state) | 
|  | 436 | { | 
|  | 437 | struct timer_list *timer = addr; | 
|  | 438 |  | 
|  | 439 | switch (state) { | 
|  | 440 |  | 
|  | 441 | case ODEBUG_STATE_NOTAVAILABLE: | 
|  | 442 | /* | 
|  | 443 | * This is not really a fixup. The timer was | 
|  | 444 | * statically initialized. We just make sure that it | 
|  | 445 | * is tracked in the object tracker. | 
|  | 446 | */ | 
|  | 447 | if (timer->entry.next == NULL && | 
|  | 448 | timer->entry.prev == TIMER_ENTRY_STATIC) { | 
|  | 449 | debug_object_init(timer, &timer_debug_descr); | 
|  | 450 | debug_object_activate(timer, &timer_debug_descr); | 
|  | 451 | return 0; | 
|  | 452 | } else { | 
|  | 453 | WARN_ON_ONCE(1); | 
|  | 454 | } | 
|  | 455 | return 0; | 
|  | 456 |  | 
|  | 457 | case ODEBUG_STATE_ACTIVE: | 
|  | 458 | WARN_ON(1); | 
|  | 459 |  | 
|  | 460 | default: | 
|  | 461 | return 0; | 
|  | 462 | } | 
|  | 463 | } | 
|  | 464 |  | 
|  | 465 | /* | 
|  | 466 | * fixup_free is called when: | 
|  | 467 | * - an active object is freed | 
|  | 468 | */ | 
|  | 469 | static int timer_fixup_free(void *addr, enum debug_obj_state state) | 
|  | 470 | { | 
|  | 471 | struct timer_list *timer = addr; | 
|  | 472 |  | 
|  | 473 | switch (state) { | 
|  | 474 | case ODEBUG_STATE_ACTIVE: | 
|  | 475 | del_timer_sync(timer); | 
|  | 476 | debug_object_free(timer, &timer_debug_descr); | 
|  | 477 | return 1; | 
|  | 478 | default: | 
|  | 479 | return 0; | 
|  | 480 | } | 
|  | 481 | } | 
|  | 482 |  | 
|  | 483 | static struct debug_obj_descr timer_debug_descr = { | 
|  | 484 | .name		= "timer_list", | 
| Stanislaw Gruszka | 9977728 | 2011-03-07 09:58:33 +0100 | [diff] [blame] | 485 | .debug_hint	= timer_debug_hint, | 
| Thomas Gleixner | c6f3a97 | 2008-04-30 00:55:03 -0700 | [diff] [blame] | 486 | .fixup_init	= timer_fixup_init, | 
|  | 487 | .fixup_activate	= timer_fixup_activate, | 
|  | 488 | .fixup_free	= timer_fixup_free, | 
|  | 489 | }; | 
|  | 490 |  | 
|  | 491 | static inline void debug_timer_init(struct timer_list *timer) | 
|  | 492 | { | 
|  | 493 | debug_object_init(timer, &timer_debug_descr); | 
|  | 494 | } | 
|  | 495 |  | 
|  | 496 | static inline void debug_timer_activate(struct timer_list *timer) | 
|  | 497 | { | 
|  | 498 | debug_object_activate(timer, &timer_debug_descr); | 
|  | 499 | } | 
|  | 500 |  | 
|  | 501 | static inline void debug_timer_deactivate(struct timer_list *timer) | 
|  | 502 | { | 
|  | 503 | debug_object_deactivate(timer, &timer_debug_descr); | 
|  | 504 | } | 
|  | 505 |  | 
|  | 506 | static inline void debug_timer_free(struct timer_list *timer) | 
|  | 507 | { | 
|  | 508 | debug_object_free(timer, &timer_debug_descr); | 
|  | 509 | } | 
|  | 510 |  | 
| Johannes Berg | 6f2b9b9 | 2009-01-29 16:03:20 +0100 | [diff] [blame] | 511 | static void __init_timer(struct timer_list *timer, | 
|  | 512 | const char *name, | 
|  | 513 | struct lock_class_key *key); | 
| Thomas Gleixner | c6f3a97 | 2008-04-30 00:55:03 -0700 | [diff] [blame] | 514 |  | 
| Johannes Berg | 6f2b9b9 | 2009-01-29 16:03:20 +0100 | [diff] [blame] | 515 | void init_timer_on_stack_key(struct timer_list *timer, | 
|  | 516 | const char *name, | 
|  | 517 | struct lock_class_key *key) | 
| Thomas Gleixner | c6f3a97 | 2008-04-30 00:55:03 -0700 | [diff] [blame] | 518 | { | 
|  | 519 | debug_object_init_on_stack(timer, &timer_debug_descr); | 
| Johannes Berg | 6f2b9b9 | 2009-01-29 16:03:20 +0100 | [diff] [blame] | 520 | __init_timer(timer, name, key); | 
| Thomas Gleixner | c6f3a97 | 2008-04-30 00:55:03 -0700 | [diff] [blame] | 521 | } | 
| Johannes Berg | 6f2b9b9 | 2009-01-29 16:03:20 +0100 | [diff] [blame] | 522 | EXPORT_SYMBOL_GPL(init_timer_on_stack_key); | 
| Thomas Gleixner | c6f3a97 | 2008-04-30 00:55:03 -0700 | [diff] [blame] | 523 |  | 
|  | 524 | void destroy_timer_on_stack(struct timer_list *timer) | 
|  | 525 | { | 
|  | 526 | debug_object_free(timer, &timer_debug_descr); | 
|  | 527 | } | 
|  | 528 | EXPORT_SYMBOL_GPL(destroy_timer_on_stack); | 
|  | 529 |  | 
|  | 530 | #else | 
|  | 531 | static inline void debug_timer_init(struct timer_list *timer) { } | 
|  | 532 | static inline void debug_timer_activate(struct timer_list *timer) { } | 
|  | 533 | static inline void debug_timer_deactivate(struct timer_list *timer) { } | 
|  | 534 | #endif | 
|  | 535 |  | 
| Xiao Guangrong | 2b022e3 | 2009-08-10 10:48:59 +0800 | [diff] [blame] | 536 | static inline void debug_init(struct timer_list *timer) | 
|  | 537 | { | 
|  | 538 | debug_timer_init(timer); | 
|  | 539 | trace_timer_init(timer); | 
|  | 540 | } | 
|  | 541 |  | 
|  | 542 | static inline void | 
|  | 543 | debug_activate(struct timer_list *timer, unsigned long expires) | 
|  | 544 | { | 
|  | 545 | debug_timer_activate(timer); | 
|  | 546 | trace_timer_start(timer, expires); | 
|  | 547 | } | 
|  | 548 |  | 
|  | 549 | static inline void debug_deactivate(struct timer_list *timer) | 
|  | 550 | { | 
|  | 551 | debug_timer_deactivate(timer); | 
|  | 552 | trace_timer_cancel(timer); | 
|  | 553 | } | 
|  | 554 |  | 
| Johannes Berg | 6f2b9b9 | 2009-01-29 16:03:20 +0100 | [diff] [blame] | 555 | static void __init_timer(struct timer_list *timer, | 
|  | 556 | const char *name, | 
|  | 557 | struct lock_class_key *key) | 
| Thomas Gleixner | c6f3a97 | 2008-04-30 00:55:03 -0700 | [diff] [blame] | 558 | { | 
|  | 559 | timer->entry.next = NULL; | 
|  | 560 | timer->base = __raw_get_cpu_var(tvec_bases); | 
| Arjan van de Ven | 3bbb9ec | 2010-03-11 14:04:36 -0800 | [diff] [blame] | 561 | timer->slack = -1; | 
| Thomas Gleixner | c6f3a97 | 2008-04-30 00:55:03 -0700 | [diff] [blame] | 562 | #ifdef CONFIG_TIMER_STATS | 
|  | 563 | timer->start_site = NULL; | 
|  | 564 | timer->start_pid = -1; | 
|  | 565 | memset(timer->start_comm, 0, TASK_COMM_LEN); | 
|  | 566 | #endif | 
| Johannes Berg | 6f2b9b9 | 2009-01-29 16:03:20 +0100 | [diff] [blame] | 567 | lockdep_init_map(&timer->lockdep_map, name, key, 0); | 
| Thomas Gleixner | c6f3a97 | 2008-04-30 00:55:03 -0700 | [diff] [blame] | 568 | } | 
|  | 569 |  | 
| Jesse Barnes | 8cadd283 | 2010-05-10 14:26:20 -0700 | [diff] [blame] | 570 | void setup_deferrable_timer_on_stack_key(struct timer_list *timer, | 
|  | 571 | const char *name, | 
|  | 572 | struct lock_class_key *key, | 
|  | 573 | void (*function)(unsigned long), | 
|  | 574 | unsigned long data) | 
|  | 575 | { | 
|  | 576 | timer->function = function; | 
|  | 577 | timer->data = data; | 
|  | 578 | init_timer_on_stack_key(timer, name, key); | 
|  | 579 | timer_set_deferrable(timer); | 
|  | 580 | } | 
|  | 581 | EXPORT_SYMBOL_GPL(setup_deferrable_timer_on_stack_key); | 
|  | 582 |  | 
| Rolf Eike Beer | 2aae4a1 | 2006-09-29 01:59:46 -0700 | [diff] [blame] | 583 | /** | 
| Randy Dunlap | 633fe79 | 2009-04-01 17:47:23 -0700 | [diff] [blame] | 584 | * init_timer_key - initialize a timer | 
| Oleg Nesterov | 55c888d | 2005-06-23 00:08:56 -0700 | [diff] [blame] | 585 | * @timer: the timer to be initialized | 
| Randy Dunlap | 633fe79 | 2009-04-01 17:47:23 -0700 | [diff] [blame] | 586 | * @name: name of the timer | 
|  | 587 | * @key: lockdep class key of the fake lock used for tracking timer | 
|  | 588 | *       sync lock dependencies | 
| Oleg Nesterov | 55c888d | 2005-06-23 00:08:56 -0700 | [diff] [blame] | 589 | * | 
| Randy Dunlap | 633fe79 | 2009-04-01 17:47:23 -0700 | [diff] [blame] | 590 | * init_timer_key() must be done to a timer prior calling *any* of the | 
| Oleg Nesterov | 55c888d | 2005-06-23 00:08:56 -0700 | [diff] [blame] | 591 | * other timer functions. | 
|  | 592 | */ | 
| Johannes Berg | 6f2b9b9 | 2009-01-29 16:03:20 +0100 | [diff] [blame] | 593 | void init_timer_key(struct timer_list *timer, | 
|  | 594 | const char *name, | 
|  | 595 | struct lock_class_key *key) | 
| Oleg Nesterov | 55c888d | 2005-06-23 00:08:56 -0700 | [diff] [blame] | 596 | { | 
| Xiao Guangrong | 2b022e3 | 2009-08-10 10:48:59 +0800 | [diff] [blame] | 597 | debug_init(timer); | 
| Johannes Berg | 6f2b9b9 | 2009-01-29 16:03:20 +0100 | [diff] [blame] | 598 | __init_timer(timer, name, key); | 
| Oleg Nesterov | 55c888d | 2005-06-23 00:08:56 -0700 | [diff] [blame] | 599 | } | 
| Johannes Berg | 6f2b9b9 | 2009-01-29 16:03:20 +0100 | [diff] [blame] | 600 | EXPORT_SYMBOL(init_timer_key); | 
| Oleg Nesterov | 55c888d | 2005-06-23 00:08:56 -0700 | [diff] [blame] | 601 |  | 
| Johannes Berg | 6f2b9b9 | 2009-01-29 16:03:20 +0100 | [diff] [blame] | 602 | void init_timer_deferrable_key(struct timer_list *timer, | 
|  | 603 | const char *name, | 
|  | 604 | struct lock_class_key *key) | 
| Venki Pallipadi | 6e453a6 | 2007-05-08 00:27:44 -0700 | [diff] [blame] | 605 | { | 
| Johannes Berg | 6f2b9b9 | 2009-01-29 16:03:20 +0100 | [diff] [blame] | 606 | init_timer_key(timer, name, key); | 
| Venki Pallipadi | 6e453a6 | 2007-05-08 00:27:44 -0700 | [diff] [blame] | 607 | timer_set_deferrable(timer); | 
|  | 608 | } | 
| Johannes Berg | 6f2b9b9 | 2009-01-29 16:03:20 +0100 | [diff] [blame] | 609 | EXPORT_SYMBOL(init_timer_deferrable_key); | 
| Venki Pallipadi | 6e453a6 | 2007-05-08 00:27:44 -0700 | [diff] [blame] | 610 |  | 
| Oleg Nesterov | 55c888d | 2005-06-23 00:08:56 -0700 | [diff] [blame] | 611 | static inline void detach_timer(struct timer_list *timer, | 
| Ingo Molnar | 82f67cd | 2007-02-16 01:28:13 -0800 | [diff] [blame] | 612 | int clear_pending) | 
| Oleg Nesterov | 55c888d | 2005-06-23 00:08:56 -0700 | [diff] [blame] | 613 | { | 
|  | 614 | struct list_head *entry = &timer->entry; | 
|  | 615 |  | 
| Xiao Guangrong | 2b022e3 | 2009-08-10 10:48:59 +0800 | [diff] [blame] | 616 | debug_deactivate(timer); | 
| Thomas Gleixner | c6f3a97 | 2008-04-30 00:55:03 -0700 | [diff] [blame] | 617 |  | 
| Oleg Nesterov | 55c888d | 2005-06-23 00:08:56 -0700 | [diff] [blame] | 618 | __list_del(entry->prev, entry->next); | 
|  | 619 | if (clear_pending) | 
|  | 620 | entry->next = NULL; | 
|  | 621 | entry->prev = LIST_POISON2; | 
|  | 622 | } | 
|  | 623 |  | 
|  | 624 | /* | 
| Oleg Nesterov | 3691c51 | 2006-03-31 02:30:30 -0800 | [diff] [blame] | 625 | * We are using hashed locking: holding per_cpu(tvec_bases).lock | 
| Oleg Nesterov | 55c888d | 2005-06-23 00:08:56 -0700 | [diff] [blame] | 626 | * means that all timers which are tied to this base via timer->base are | 
|  | 627 | * locked, and the base itself is locked too. | 
|  | 628 | * | 
|  | 629 | * So __run_timers/migrate_timers can safely modify all timers which could | 
|  | 630 | * be found on ->tvX lists. | 
|  | 631 | * | 
|  | 632 | * When the timer's base is locked, and the timer removed from list, it is | 
|  | 633 | * possible to set timer->base = NULL and drop the lock: the timer remains | 
|  | 634 | * locked. | 
|  | 635 | */ | 
| Pavel Machek | a6fa8e5 | 2008-01-30 13:30:00 +0100 | [diff] [blame] | 636 | static struct tvec_base *lock_timer_base(struct timer_list *timer, | 
| Oleg Nesterov | 55c888d | 2005-06-23 00:08:56 -0700 | [diff] [blame] | 637 | unsigned long *flags) | 
| Josh Triplett | 89e7e374 | 2006-09-29 01:59:36 -0700 | [diff] [blame] | 638 | __acquires(timer->base->lock) | 
| Oleg Nesterov | 55c888d | 2005-06-23 00:08:56 -0700 | [diff] [blame] | 639 | { | 
| Pavel Machek | a6fa8e5 | 2008-01-30 13:30:00 +0100 | [diff] [blame] | 640 | struct tvec_base *base; | 
| Oleg Nesterov | 55c888d | 2005-06-23 00:08:56 -0700 | [diff] [blame] | 641 |  | 
|  | 642 | for (;;) { | 
| Pavel Machek | a6fa8e5 | 2008-01-30 13:30:00 +0100 | [diff] [blame] | 643 | struct tvec_base *prelock_base = timer->base; | 
| Venki Pallipadi | 6e453a6 | 2007-05-08 00:27:44 -0700 | [diff] [blame] | 644 | base = tbase_get_base(prelock_base); | 
| Oleg Nesterov | 55c888d | 2005-06-23 00:08:56 -0700 | [diff] [blame] | 645 | if (likely(base != NULL)) { | 
|  | 646 | spin_lock_irqsave(&base->lock, *flags); | 
| Venki Pallipadi | 6e453a6 | 2007-05-08 00:27:44 -0700 | [diff] [blame] | 647 | if (likely(prelock_base == timer->base)) | 
| Oleg Nesterov | 55c888d | 2005-06-23 00:08:56 -0700 | [diff] [blame] | 648 | return base; | 
|  | 649 | /* The timer has migrated to another CPU */ | 
|  | 650 | spin_unlock_irqrestore(&base->lock, *flags); | 
|  | 651 | } | 
|  | 652 | cpu_relax(); | 
|  | 653 | } | 
|  | 654 | } | 
|  | 655 |  | 
| Ingo Molnar | 7401922 | 2009-02-18 12:23:29 +0100 | [diff] [blame] | 656 | static inline int | 
| Arun R Bharadwaj | 597d027 | 2009-04-16 12:13:26 +0530 | [diff] [blame] | 657 | __mod_timer(struct timer_list *timer, unsigned long expires, | 
|  | 658 | bool pending_only, int pinned) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 659 | { | 
| Pavel Machek | a6fa8e5 | 2008-01-30 13:30:00 +0100 | [diff] [blame] | 660 | struct tvec_base *base, *new_base; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 661 | unsigned long flags; | 
| Arun R Bharadwaj | eea08f3 | 2009-04-16 12:16:41 +0530 | [diff] [blame] | 662 | int ret = 0 , cpu; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 663 |  | 
| Ingo Molnar | 82f67cd | 2007-02-16 01:28:13 -0800 | [diff] [blame] | 664 | timer_stats_timer_set_start_info(timer); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 665 | BUG_ON(!timer->function); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 666 |  | 
| Oleg Nesterov | 55c888d | 2005-06-23 00:08:56 -0700 | [diff] [blame] | 667 | base = lock_timer_base(timer, &flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 668 |  | 
| Oleg Nesterov | 55c888d | 2005-06-23 00:08:56 -0700 | [diff] [blame] | 669 | if (timer_pending(timer)) { | 
|  | 670 | detach_timer(timer, 0); | 
| Martin Schwidefsky | 97fd9ed | 2009-07-21 20:25:05 +0200 | [diff] [blame] | 671 | if (timer->expires == base->next_timer && | 
|  | 672 | !tbase_get_deferrable(timer->base)) | 
|  | 673 | base->next_timer = base->timer_jiffies; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 674 | ret = 1; | 
| Ingo Molnar | 7401922 | 2009-02-18 12:23:29 +0100 | [diff] [blame] | 675 | } else { | 
|  | 676 | if (pending_only) | 
|  | 677 | goto out_unlock; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 678 | } | 
| Oleg Nesterov | 55c888d | 2005-06-23 00:08:56 -0700 | [diff] [blame] | 679 |  | 
| Xiao Guangrong | 2b022e3 | 2009-08-10 10:48:59 +0800 | [diff] [blame] | 680 | debug_activate(timer, expires); | 
| Thomas Gleixner | c6f3a97 | 2008-04-30 00:55:03 -0700 | [diff] [blame] | 681 |  | 
| Arun R Bharadwaj | eea08f3 | 2009-04-16 12:16:41 +0530 | [diff] [blame] | 682 | cpu = smp_processor_id(); | 
|  | 683 |  | 
|  | 684 | #if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP) | 
| Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 685 | if (!pinned && get_sysctl_timer_migration() && idle_cpu(cpu)) | 
|  | 686 | cpu = get_nohz_timer_target(); | 
| Arun R Bharadwaj | eea08f3 | 2009-04-16 12:16:41 +0530 | [diff] [blame] | 687 | #endif | 
|  | 688 | new_base = per_cpu(tvec_bases, cpu); | 
|  | 689 |  | 
| Oleg Nesterov | 3691c51 | 2006-03-31 02:30:30 -0800 | [diff] [blame] | 690 | if (base != new_base) { | 
| Oleg Nesterov | 55c888d | 2005-06-23 00:08:56 -0700 | [diff] [blame] | 691 | /* | 
|  | 692 | * We are trying to schedule the timer on the local CPU. | 
|  | 693 | * However we can't change timer's base while it is running, | 
|  | 694 | * otherwise del_timer_sync() can't detect that the timer's | 
|  | 695 | * handler yet has not finished. This also guarantees that | 
|  | 696 | * the timer is serialized wrt itself. | 
|  | 697 | */ | 
| Oleg Nesterov | a2c348f | 2006-03-31 02:30:31 -0800 | [diff] [blame] | 698 | if (likely(base->running_timer != timer)) { | 
| Oleg Nesterov | 55c888d | 2005-06-23 00:08:56 -0700 | [diff] [blame] | 699 | /* See the comment in lock_timer_base() */ | 
| Venki Pallipadi | 6e453a6 | 2007-05-08 00:27:44 -0700 | [diff] [blame] | 700 | timer_set_base(timer, NULL); | 
| Oleg Nesterov | 55c888d | 2005-06-23 00:08:56 -0700 | [diff] [blame] | 701 | spin_unlock(&base->lock); | 
| Oleg Nesterov | a2c348f | 2006-03-31 02:30:31 -0800 | [diff] [blame] | 702 | base = new_base; | 
|  | 703 | spin_lock(&base->lock); | 
| Venki Pallipadi | 6e453a6 | 2007-05-08 00:27:44 -0700 | [diff] [blame] | 704 | timer_set_base(timer, base); | 
| Oleg Nesterov | 55c888d | 2005-06-23 00:08:56 -0700 | [diff] [blame] | 705 | } | 
|  | 706 | } | 
|  | 707 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 708 | timer->expires = expires; | 
| Martin Schwidefsky | 97fd9ed | 2009-07-21 20:25:05 +0200 | [diff] [blame] | 709 | if (time_before(timer->expires, base->next_timer) && | 
|  | 710 | !tbase_get_deferrable(timer->base)) | 
|  | 711 | base->next_timer = timer->expires; | 
| Oleg Nesterov | a2c348f | 2006-03-31 02:30:31 -0800 | [diff] [blame] | 712 | internal_add_timer(base, timer); | 
| Ingo Molnar | 7401922 | 2009-02-18 12:23:29 +0100 | [diff] [blame] | 713 |  | 
|  | 714 | out_unlock: | 
| Oleg Nesterov | a2c348f | 2006-03-31 02:30:31 -0800 | [diff] [blame] | 715 | spin_unlock_irqrestore(&base->lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 716 |  | 
|  | 717 | return ret; | 
|  | 718 | } | 
|  | 719 |  | 
| Ingo Molnar | 7401922 | 2009-02-18 12:23:29 +0100 | [diff] [blame] | 720 | /** | 
|  | 721 | * mod_timer_pending - modify a pending timer's timeout | 
|  | 722 | * @timer: the pending timer to be modified | 
|  | 723 | * @expires: new timeout in jiffies | 
|  | 724 | * | 
|  | 725 | * mod_timer_pending() is the same for pending timers as mod_timer(), | 
|  | 726 | * but will not re-activate and modify already deleted timers. | 
|  | 727 | * | 
|  | 728 | * It is useful for unserialized use of timers. | 
|  | 729 | */ | 
|  | 730 | int mod_timer_pending(struct timer_list *timer, unsigned long expires) | 
|  | 731 | { | 
| Arun R Bharadwaj | 597d027 | 2009-04-16 12:13:26 +0530 | [diff] [blame] | 732 | return __mod_timer(timer, expires, true, TIMER_NOT_PINNED); | 
| Ingo Molnar | 7401922 | 2009-02-18 12:23:29 +0100 | [diff] [blame] | 733 | } | 
|  | 734 | EXPORT_SYMBOL(mod_timer_pending); | 
|  | 735 |  | 
| Arjan van de Ven | 3bbb9ec | 2010-03-11 14:04:36 -0800 | [diff] [blame] | 736 | /* | 
|  | 737 | * Decide where to put the timer while taking the slack into account | 
|  | 738 | * | 
|  | 739 | * Algorithm: | 
|  | 740 | *   1) calculate the maximum (absolute) time | 
|  | 741 | *   2) calculate the highest bit where the expires and new max are different | 
|  | 742 | *   3) use this bit to make a mask | 
|  | 743 | *   4) use the bitmask to round down the maximum time, so that all last | 
|  | 744 | *      bits are zeros | 
|  | 745 | */ | 
|  | 746 | static inline | 
|  | 747 | unsigned long apply_slack(struct timer_list *timer, unsigned long expires) | 
|  | 748 | { | 
|  | 749 | unsigned long expires_limit, mask; | 
|  | 750 | int bit; | 
|  | 751 |  | 
| Jeff Chua | f00e047 | 2010-05-24 07:16:24 +0800 | [diff] [blame] | 752 | expires_limit = expires; | 
| Arjan van de Ven | 3bbb9ec | 2010-03-11 14:04:36 -0800 | [diff] [blame] | 753 |  | 
| Thomas Gleixner | 8e63d77 | 2010-05-25 20:43:30 +0200 | [diff] [blame] | 754 | if (timer->slack >= 0) { | 
| Jeff Chua | f00e047 | 2010-05-24 07:16:24 +0800 | [diff] [blame] | 755 | expires_limit = expires + timer->slack; | 
| Thomas Gleixner | 8e63d77 | 2010-05-25 20:43:30 +0200 | [diff] [blame] | 756 | } else { | 
| Thomas Gleixner | 2abfb9e | 2010-05-26 16:07:13 +0200 | [diff] [blame] | 757 | unsigned long now = jiffies; | 
| Arjan van de Ven | 3bbb9ec | 2010-03-11 14:04:36 -0800 | [diff] [blame] | 758 |  | 
| Thomas Gleixner | 8e63d77 | 2010-05-25 20:43:30 +0200 | [diff] [blame] | 759 | /* No slack, if already expired else auto slack 0.4% */ | 
|  | 760 | if (time_after(expires, now)) | 
|  | 761 | expires_limit = expires + (expires - now)/256; | 
|  | 762 | } | 
| Arjan van de Ven | 3bbb9ec | 2010-03-11 14:04:36 -0800 | [diff] [blame] | 763 | mask = expires ^ expires_limit; | 
| Arjan van de Ven | 3bbb9ec | 2010-03-11 14:04:36 -0800 | [diff] [blame] | 764 | if (mask == 0) | 
|  | 765 | return expires; | 
|  | 766 |  | 
|  | 767 | bit = find_last_bit(&mask, BITS_PER_LONG); | 
|  | 768 |  | 
|  | 769 | mask = (1 << bit) - 1; | 
|  | 770 |  | 
|  | 771 | expires_limit = expires_limit & ~(mask); | 
|  | 772 |  | 
|  | 773 | return expires_limit; | 
|  | 774 | } | 
|  | 775 |  | 
| Ingo Molnar | 7401922 | 2009-02-18 12:23:29 +0100 | [diff] [blame] | 776 | /** | 
|  | 777 | * mod_timer - modify a timer's timeout | 
|  | 778 | * @timer: the timer to be modified | 
|  | 779 | * @expires: new timeout in jiffies | 
|  | 780 | * | 
|  | 781 | * mod_timer() is a more efficient way to update the expire field of an | 
|  | 782 | * active timer (if the timer is inactive it will be activated) | 
|  | 783 | * | 
|  | 784 | * mod_timer(timer, expires) is equivalent to: | 
|  | 785 | * | 
|  | 786 | *     del_timer(timer); timer->expires = expires; add_timer(timer); | 
|  | 787 | * | 
|  | 788 | * Note that if there are multiple unserialized concurrent users of the | 
|  | 789 | * same timer, then mod_timer() is the only safe way to modify the timeout, | 
|  | 790 | * since add_timer() cannot modify an already running timer. | 
|  | 791 | * | 
|  | 792 | * The function returns whether it has modified a pending timer or not. | 
|  | 793 | * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an | 
|  | 794 | * active timer returns 1.) | 
|  | 795 | */ | 
|  | 796 | int mod_timer(struct timer_list *timer, unsigned long expires) | 
|  | 797 | { | 
|  | 798 | /* | 
|  | 799 | * This is a common optimization triggered by the | 
|  | 800 | * networking code - if the timer is re-modified | 
|  | 801 | * to be the same thing then just return: | 
|  | 802 | */ | 
| Pavel Roskin | 4841158 | 2009-07-18 16:46:02 -0400 | [diff] [blame] | 803 | if (timer_pending(timer) && timer->expires == expires) | 
| Ingo Molnar | 7401922 | 2009-02-18 12:23:29 +0100 | [diff] [blame] | 804 | return 1; | 
|  | 805 |  | 
| Arjan van de Ven | 3bbb9ec | 2010-03-11 14:04:36 -0800 | [diff] [blame] | 806 | expires = apply_slack(timer, expires); | 
|  | 807 |  | 
| Arun R Bharadwaj | 597d027 | 2009-04-16 12:13:26 +0530 | [diff] [blame] | 808 | return __mod_timer(timer, expires, false, TIMER_NOT_PINNED); | 
| Ingo Molnar | 7401922 | 2009-02-18 12:23:29 +0100 | [diff] [blame] | 809 | } | 
|  | 810 | EXPORT_SYMBOL(mod_timer); | 
|  | 811 |  | 
|  | 812 | /** | 
| Arun R Bharadwaj | 597d027 | 2009-04-16 12:13:26 +0530 | [diff] [blame] | 813 | * mod_timer_pinned - modify a timer's timeout | 
|  | 814 | * @timer: the timer to be modified | 
|  | 815 | * @expires: new timeout in jiffies | 
|  | 816 | * | 
|  | 817 | * mod_timer_pinned() is a way to update the expire field of an | 
|  | 818 | * active timer (if the timer is inactive it will be activated) | 
|  | 819 | * and not allow the timer to be migrated to a different CPU. | 
|  | 820 | * | 
|  | 821 | * mod_timer_pinned(timer, expires) is equivalent to: | 
|  | 822 | * | 
|  | 823 | *     del_timer(timer); timer->expires = expires; add_timer(timer); | 
|  | 824 | */ | 
|  | 825 | int mod_timer_pinned(struct timer_list *timer, unsigned long expires) | 
|  | 826 | { | 
|  | 827 | if (timer->expires == expires && timer_pending(timer)) | 
|  | 828 | return 1; | 
|  | 829 |  | 
|  | 830 | return __mod_timer(timer, expires, false, TIMER_PINNED); | 
|  | 831 | } | 
|  | 832 | EXPORT_SYMBOL(mod_timer_pinned); | 
|  | 833 |  | 
|  | 834 | /** | 
| Ingo Molnar | 7401922 | 2009-02-18 12:23:29 +0100 | [diff] [blame] | 835 | * add_timer - start a timer | 
|  | 836 | * @timer: the timer to be added | 
|  | 837 | * | 
|  | 838 | * The kernel will do a ->function(->data) callback from the | 
|  | 839 | * timer interrupt at the ->expires point in the future. The | 
|  | 840 | * current time is 'jiffies'. | 
|  | 841 | * | 
|  | 842 | * The timer's ->expires, ->function (and if the handler uses it, ->data) | 
|  | 843 | * fields must be set prior calling this function. | 
|  | 844 | * | 
|  | 845 | * Timers with an ->expires field in the past will be executed in the next | 
|  | 846 | * timer tick. | 
|  | 847 | */ | 
|  | 848 | void add_timer(struct timer_list *timer) | 
|  | 849 | { | 
|  | 850 | BUG_ON(timer_pending(timer)); | 
|  | 851 | mod_timer(timer, timer->expires); | 
|  | 852 | } | 
|  | 853 | EXPORT_SYMBOL(add_timer); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 854 |  | 
| Rolf Eike Beer | 2aae4a1 | 2006-09-29 01:59:46 -0700 | [diff] [blame] | 855 | /** | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 856 | * add_timer_on - start a timer on a particular CPU | 
|  | 857 | * @timer: the timer to be added | 
|  | 858 | * @cpu: the CPU to start it on | 
|  | 859 | * | 
|  | 860 | * This is not very scalable on SMP. Double adds are not possible. | 
|  | 861 | */ | 
|  | 862 | void add_timer_on(struct timer_list *timer, int cpu) | 
|  | 863 | { | 
| Pavel Machek | a6fa8e5 | 2008-01-30 13:30:00 +0100 | [diff] [blame] | 864 | struct tvec_base *base = per_cpu(tvec_bases, cpu); | 
| Thomas Gleixner | 6819457 | 2007-07-19 01:49:16 -0700 | [diff] [blame] | 865 | unsigned long flags; | 
| Oleg Nesterov | 55c888d | 2005-06-23 00:08:56 -0700 | [diff] [blame] | 866 |  | 
| Ingo Molnar | 82f67cd | 2007-02-16 01:28:13 -0800 | [diff] [blame] | 867 | timer_stats_timer_set_start_info(timer); | 
| Thomas Gleixner | 6819457 | 2007-07-19 01:49:16 -0700 | [diff] [blame] | 868 | BUG_ON(timer_pending(timer) || !timer->function); | 
| Oleg Nesterov | 3691c51 | 2006-03-31 02:30:30 -0800 | [diff] [blame] | 869 | spin_lock_irqsave(&base->lock, flags); | 
| Venki Pallipadi | 6e453a6 | 2007-05-08 00:27:44 -0700 | [diff] [blame] | 870 | timer_set_base(timer, base); | 
| Xiao Guangrong | 2b022e3 | 2009-08-10 10:48:59 +0800 | [diff] [blame] | 871 | debug_activate(timer, timer->expires); | 
| Martin Schwidefsky | 97fd9ed | 2009-07-21 20:25:05 +0200 | [diff] [blame] | 872 | if (time_before(timer->expires, base->next_timer) && | 
|  | 873 | !tbase_get_deferrable(timer->base)) | 
|  | 874 | base->next_timer = timer->expires; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 875 | internal_add_timer(base, timer); | 
| Thomas Gleixner | 06d8308 | 2008-03-22 09:20:24 +0100 | [diff] [blame] | 876 | /* | 
|  | 877 | * Check whether the other CPU is idle and needs to be | 
|  | 878 | * triggered to reevaluate the timer wheel when nohz is | 
|  | 879 | * active. We are protected against the other CPU fiddling | 
|  | 880 | * with the timer by holding the timer base lock. This also | 
|  | 881 | * makes sure that a CPU on the way to idle can not evaluate | 
|  | 882 | * the timer wheel. | 
|  | 883 | */ | 
|  | 884 | wake_up_idle_cpu(cpu); | 
| Oleg Nesterov | 3691c51 | 2006-03-31 02:30:30 -0800 | [diff] [blame] | 885 | spin_unlock_irqrestore(&base->lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 886 | } | 
| Andi Kleen | a9862e0 | 2009-05-19 22:49:07 +0200 | [diff] [blame] | 887 | EXPORT_SYMBOL_GPL(add_timer_on); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 888 |  | 
| Rolf Eike Beer | 2aae4a1 | 2006-09-29 01:59:46 -0700 | [diff] [blame] | 889 | /** | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 890 | * del_timer - deactive a timer. | 
|  | 891 | * @timer: the timer to be deactivated | 
|  | 892 | * | 
|  | 893 | * del_timer() deactivates a timer - this works on both active and inactive | 
|  | 894 | * timers. | 
|  | 895 | * | 
|  | 896 | * The function returns whether it has deactivated a pending timer or not. | 
|  | 897 | * (ie. del_timer() of an inactive timer returns 0, del_timer() of an | 
|  | 898 | * active timer returns 1.) | 
|  | 899 | */ | 
|  | 900 | int del_timer(struct timer_list *timer) | 
|  | 901 | { | 
| Pavel Machek | a6fa8e5 | 2008-01-30 13:30:00 +0100 | [diff] [blame] | 902 | struct tvec_base *base; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 903 | unsigned long flags; | 
| Oleg Nesterov | 55c888d | 2005-06-23 00:08:56 -0700 | [diff] [blame] | 904 | int ret = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 905 |  | 
| Ingo Molnar | 82f67cd | 2007-02-16 01:28:13 -0800 | [diff] [blame] | 906 | timer_stats_timer_clear_start_info(timer); | 
| Oleg Nesterov | 55c888d | 2005-06-23 00:08:56 -0700 | [diff] [blame] | 907 | if (timer_pending(timer)) { | 
|  | 908 | base = lock_timer_base(timer, &flags); | 
|  | 909 | if (timer_pending(timer)) { | 
|  | 910 | detach_timer(timer, 1); | 
| Martin Schwidefsky | 97fd9ed | 2009-07-21 20:25:05 +0200 | [diff] [blame] | 911 | if (timer->expires == base->next_timer && | 
|  | 912 | !tbase_get_deferrable(timer->base)) | 
|  | 913 | base->next_timer = base->timer_jiffies; | 
| Oleg Nesterov | 55c888d | 2005-06-23 00:08:56 -0700 | [diff] [blame] | 914 | ret = 1; | 
|  | 915 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 916 | spin_unlock_irqrestore(&base->lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 917 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 918 |  | 
| Oleg Nesterov | 55c888d | 2005-06-23 00:08:56 -0700 | [diff] [blame] | 919 | return ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 920 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 921 | EXPORT_SYMBOL(del_timer); | 
|  | 922 |  | 
| Rolf Eike Beer | 2aae4a1 | 2006-09-29 01:59:46 -0700 | [diff] [blame] | 923 | /** | 
|  | 924 | * try_to_del_timer_sync - Try to deactivate a timer | 
|  | 925 | * @timer: timer do del | 
|  | 926 | * | 
| Oleg Nesterov | fd450b7 | 2005-06-23 00:08:59 -0700 | [diff] [blame] | 927 | * This function tries to deactivate a timer. Upon successful (ret >= 0) | 
|  | 928 | * exit the timer is not queued and the handler is not running on any CPU. | 
| Oleg Nesterov | fd450b7 | 2005-06-23 00:08:59 -0700 | [diff] [blame] | 929 | */ | 
|  | 930 | int try_to_del_timer_sync(struct timer_list *timer) | 
|  | 931 | { | 
| Pavel Machek | a6fa8e5 | 2008-01-30 13:30:00 +0100 | [diff] [blame] | 932 | struct tvec_base *base; | 
| Oleg Nesterov | fd450b7 | 2005-06-23 00:08:59 -0700 | [diff] [blame] | 933 | unsigned long flags; | 
|  | 934 | int ret = -1; | 
|  | 935 |  | 
|  | 936 | base = lock_timer_base(timer, &flags); | 
|  | 937 |  | 
|  | 938 | if (base->running_timer == timer) | 
|  | 939 | goto out; | 
|  | 940 |  | 
| Andrew Morton | 829b6c1 | 2010-03-11 14:04:30 -0800 | [diff] [blame] | 941 | timer_stats_timer_clear_start_info(timer); | 
| Oleg Nesterov | fd450b7 | 2005-06-23 00:08:59 -0700 | [diff] [blame] | 942 | ret = 0; | 
|  | 943 | if (timer_pending(timer)) { | 
|  | 944 | detach_timer(timer, 1); | 
| Martin Schwidefsky | 97fd9ed | 2009-07-21 20:25:05 +0200 | [diff] [blame] | 945 | if (timer->expires == base->next_timer && | 
|  | 946 | !tbase_get_deferrable(timer->base)) | 
|  | 947 | base->next_timer = base->timer_jiffies; | 
| Oleg Nesterov | fd450b7 | 2005-06-23 00:08:59 -0700 | [diff] [blame] | 948 | ret = 1; | 
|  | 949 | } | 
|  | 950 | out: | 
|  | 951 | spin_unlock_irqrestore(&base->lock, flags); | 
|  | 952 |  | 
|  | 953 | return ret; | 
|  | 954 | } | 
| David Howells | e19dff1 | 2007-04-26 15:46:56 -0700 | [diff] [blame] | 955 | EXPORT_SYMBOL(try_to_del_timer_sync); | 
|  | 956 |  | 
| Yong Zhang | 6f1bc45 | 2010-10-20 15:57:31 -0700 | [diff] [blame] | 957 | #ifdef CONFIG_SMP | 
| Rolf Eike Beer | 2aae4a1 | 2006-09-29 01:59:46 -0700 | [diff] [blame] | 958 | /** | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 959 | * del_timer_sync - deactivate a timer and wait for the handler to finish. | 
|  | 960 | * @timer: the timer to be deactivated | 
|  | 961 | * | 
|  | 962 | * This function only differs from del_timer() on SMP: besides deactivating | 
|  | 963 | * the timer it also makes sure the handler has finished executing on other | 
|  | 964 | * CPUs. | 
|  | 965 | * | 
| Robert P. J. Day | 72fd4a3 | 2007-02-10 01:45:59 -0800 | [diff] [blame] | 966 | * Synchronization rules: Callers must prevent restarting of the timer, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 967 | * otherwise this function is meaningless. It must not be called from | 
| Peter Zijlstra | 7ff2079 | 2011-02-08 15:18:00 +0100 | [diff] [blame] | 968 | * interrupt contexts. The caller must not hold locks which would prevent | 
| Oleg Nesterov | 55c888d | 2005-06-23 00:08:56 -0700 | [diff] [blame] | 969 | * completion of the timer's handler. The timer's handler must not call | 
|  | 970 | * add_timer_on(). Upon exit the timer is not queued and the handler is | 
|  | 971 | * not running on any CPU. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 972 | * | 
| Steven Rostedt | 48228f7 | 2011-02-08 12:39:54 -0500 | [diff] [blame] | 973 | * Note: You must not hold locks that are held in interrupt context | 
|  | 974 | *   while calling this function. Even if the lock has nothing to do | 
|  | 975 | *   with the timer in question.  Here's why: | 
|  | 976 | * | 
|  | 977 | *    CPU0                             CPU1 | 
|  | 978 | *    ----                             ---- | 
|  | 979 | *                                   <SOFTIRQ> | 
|  | 980 | *                                   call_timer_fn(); | 
|  | 981 | *                                     base->running_timer = mytimer; | 
|  | 982 | *  spin_lock_irq(somelock); | 
|  | 983 | *                                     <IRQ> | 
|  | 984 | *                                        spin_lock(somelock); | 
|  | 985 | *  del_timer_sync(mytimer); | 
|  | 986 | *   while (base->running_timer == mytimer); | 
|  | 987 | * | 
|  | 988 | * Now del_timer_sync() will never return and never release somelock. | 
|  | 989 | * The interrupt on the other CPU is waiting to grab somelock but | 
|  | 990 | * it has interrupted the softirq that CPU0 is waiting to finish. | 
|  | 991 | * | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 992 | * The function returns whether it has deactivated a pending timer or not. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 993 | */ | 
|  | 994 | int del_timer_sync(struct timer_list *timer) | 
|  | 995 | { | 
| Johannes Berg | 6f2b9b9 | 2009-01-29 16:03:20 +0100 | [diff] [blame] | 996 | #ifdef CONFIG_LOCKDEP | 
| Peter Zijlstra | f266a51 | 2011-02-03 15:09:41 +0100 | [diff] [blame] | 997 | unsigned long flags; | 
|  | 998 |  | 
| Steven Rostedt | 48228f7 | 2011-02-08 12:39:54 -0500 | [diff] [blame] | 999 | /* | 
|  | 1000 | * If lockdep gives a backtrace here, please reference | 
|  | 1001 | * the synchronization rules above. | 
|  | 1002 | */ | 
| Peter Zijlstra | 7ff2079 | 2011-02-08 15:18:00 +0100 | [diff] [blame] | 1003 | local_irq_save(flags); | 
| Johannes Berg | 6f2b9b9 | 2009-01-29 16:03:20 +0100 | [diff] [blame] | 1004 | lock_map_acquire(&timer->lockdep_map); | 
|  | 1005 | lock_map_release(&timer->lockdep_map); | 
| Peter Zijlstra | 7ff2079 | 2011-02-08 15:18:00 +0100 | [diff] [blame] | 1006 | local_irq_restore(flags); | 
| Johannes Berg | 6f2b9b9 | 2009-01-29 16:03:20 +0100 | [diff] [blame] | 1007 | #endif | 
| Yong Zhang | 466bd30 | 2010-10-20 15:57:33 -0700 | [diff] [blame] | 1008 | /* | 
|  | 1009 | * don't use it in hardirq context, because it | 
|  | 1010 | * could lead to deadlock. | 
|  | 1011 | */ | 
|  | 1012 | WARN_ON(in_irq()); | 
| Oleg Nesterov | fd450b7 | 2005-06-23 00:08:59 -0700 | [diff] [blame] | 1013 | for (;;) { | 
|  | 1014 | int ret = try_to_del_timer_sync(timer); | 
|  | 1015 | if (ret >= 0) | 
|  | 1016 | return ret; | 
| Andrew Morton | a000965 | 2006-07-14 00:24:06 -0700 | [diff] [blame] | 1017 | cpu_relax(); | 
| Oleg Nesterov | fd450b7 | 2005-06-23 00:08:59 -0700 | [diff] [blame] | 1018 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1019 | } | 
|  | 1020 | EXPORT_SYMBOL(del_timer_sync); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1021 | #endif | 
|  | 1022 |  | 
| Pavel Machek | a6fa8e5 | 2008-01-30 13:30:00 +0100 | [diff] [blame] | 1023 | static int cascade(struct tvec_base *base, struct tvec *tv, int index) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1024 | { | 
|  | 1025 | /* cascade all the timers from tv up one level */ | 
| Porpoise | 3439dd8 | 2006-06-23 02:05:56 -0700 | [diff] [blame] | 1026 | struct timer_list *timer, *tmp; | 
|  | 1027 | struct list_head tv_list; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1028 |  | 
| Porpoise | 3439dd8 | 2006-06-23 02:05:56 -0700 | [diff] [blame] | 1029 | list_replace_init(tv->vec + index, &tv_list); | 
|  | 1030 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1031 | /* | 
| Porpoise | 3439dd8 | 2006-06-23 02:05:56 -0700 | [diff] [blame] | 1032 | * We are removing _all_ timers from the list, so we | 
|  | 1033 | * don't have to detach them individually. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1034 | */ | 
| Porpoise | 3439dd8 | 2006-06-23 02:05:56 -0700 | [diff] [blame] | 1035 | list_for_each_entry_safe(timer, tmp, &tv_list, entry) { | 
| Venki Pallipadi | 6e453a6 | 2007-05-08 00:27:44 -0700 | [diff] [blame] | 1036 | BUG_ON(tbase_get_base(timer->base) != base); | 
| Porpoise | 3439dd8 | 2006-06-23 02:05:56 -0700 | [diff] [blame] | 1037 | internal_add_timer(base, timer); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1038 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1039 |  | 
|  | 1040 | return index; | 
|  | 1041 | } | 
|  | 1042 |  | 
| Thomas Gleixner | 576da12 | 2010-03-12 21:10:29 +0100 | [diff] [blame] | 1043 | static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long), | 
|  | 1044 | unsigned long data) | 
|  | 1045 | { | 
|  | 1046 | int preempt_count = preempt_count(); | 
|  | 1047 |  | 
|  | 1048 | #ifdef CONFIG_LOCKDEP | 
|  | 1049 | /* | 
|  | 1050 | * It is permissible to free the timer from inside the | 
|  | 1051 | * function that is called from it, this we need to take into | 
|  | 1052 | * account for lockdep too. To avoid bogus "held lock freed" | 
|  | 1053 | * warnings as well as problems when looking into | 
|  | 1054 | * timer->lockdep_map, make a copy and use that here. | 
|  | 1055 | */ | 
|  | 1056 | struct lockdep_map lockdep_map = timer->lockdep_map; | 
|  | 1057 | #endif | 
|  | 1058 | /* | 
|  | 1059 | * Couple the lock chain with the lock chain at | 
|  | 1060 | * del_timer_sync() by acquiring the lock_map around the fn() | 
|  | 1061 | * call here and in del_timer_sync(). | 
|  | 1062 | */ | 
|  | 1063 | lock_map_acquire(&lockdep_map); | 
|  | 1064 |  | 
|  | 1065 | trace_timer_expire_entry(timer); | 
|  | 1066 | fn(data); | 
|  | 1067 | trace_timer_expire_exit(timer); | 
|  | 1068 |  | 
|  | 1069 | lock_map_release(&lockdep_map); | 
|  | 1070 |  | 
|  | 1071 | if (preempt_count != preempt_count()) { | 
| Thomas Gleixner | 802702e | 2010-03-12 20:13:23 +0100 | [diff] [blame] | 1072 | WARN_ONCE(1, "timer: %pF preempt leak: %08x -> %08x\n", | 
|  | 1073 | fn, preempt_count, preempt_count()); | 
|  | 1074 | /* | 
|  | 1075 | * Restore the preempt count. That gives us a decent | 
|  | 1076 | * chance to survive and extract information. If the | 
|  | 1077 | * callback kept a lock held, bad luck, but not worse | 
|  | 1078 | * than the BUG() we had. | 
|  | 1079 | */ | 
|  | 1080 | preempt_count() = preempt_count; | 
| Thomas Gleixner | 576da12 | 2010-03-12 21:10:29 +0100 | [diff] [blame] | 1081 | } | 
|  | 1082 | } | 
|  | 1083 |  | 
| Rolf Eike Beer | 2aae4a1 | 2006-09-29 01:59:46 -0700 | [diff] [blame] | 1084 | #define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK) | 
|  | 1085 |  | 
|  | 1086 | /** | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1087 | * __run_timers - run all expired timers (if any) on this CPU. | 
|  | 1088 | * @base: the timer vector to be processed. | 
|  | 1089 | * | 
|  | 1090 | * This function cascades all vectors and executes all expired timer | 
|  | 1091 | * vectors. | 
|  | 1092 | */ | 
| Pavel Machek | a6fa8e5 | 2008-01-30 13:30:00 +0100 | [diff] [blame] | 1093 | static inline void __run_timers(struct tvec_base *base) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1094 | { | 
|  | 1095 | struct timer_list *timer; | 
|  | 1096 |  | 
| Oleg Nesterov | 3691c51 | 2006-03-31 02:30:30 -0800 | [diff] [blame] | 1097 | spin_lock_irq(&base->lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1098 | while (time_after_eq(jiffies, base->timer_jiffies)) { | 
| Oleg Nesterov | 626ab0e | 2006-06-23 02:05:55 -0700 | [diff] [blame] | 1099 | struct list_head work_list; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1100 | struct list_head *head = &work_list; | 
| Thomas Gleixner | 6819457 | 2007-07-19 01:49:16 -0700 | [diff] [blame] | 1101 | int index = base->timer_jiffies & TVR_MASK; | 
| Oleg Nesterov | 626ab0e | 2006-06-23 02:05:55 -0700 | [diff] [blame] | 1102 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1103 | /* | 
|  | 1104 | * Cascade timers: | 
|  | 1105 | */ | 
|  | 1106 | if (!index && | 
|  | 1107 | (!cascade(base, &base->tv2, INDEX(0))) && | 
|  | 1108 | (!cascade(base, &base->tv3, INDEX(1))) && | 
|  | 1109 | !cascade(base, &base->tv4, INDEX(2))) | 
|  | 1110 | cascade(base, &base->tv5, INDEX(3)); | 
| Oleg Nesterov | 626ab0e | 2006-06-23 02:05:55 -0700 | [diff] [blame] | 1111 | ++base->timer_jiffies; | 
|  | 1112 | list_replace_init(base->tv1.vec + index, &work_list); | 
| Oleg Nesterov | 55c888d | 2005-06-23 00:08:56 -0700 | [diff] [blame] | 1113 | while (!list_empty(head)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1114 | void (*fn)(unsigned long); | 
|  | 1115 | unsigned long data; | 
|  | 1116 |  | 
| Pavel Emelianov | b5e6181 | 2007-05-08 00:30:19 -0700 | [diff] [blame] | 1117 | timer = list_first_entry(head, struct timer_list,entry); | 
| Thomas Gleixner | 6819457 | 2007-07-19 01:49:16 -0700 | [diff] [blame] | 1118 | fn = timer->function; | 
|  | 1119 | data = timer->data; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1120 |  | 
| Ingo Molnar | 82f67cd | 2007-02-16 01:28:13 -0800 | [diff] [blame] | 1121 | timer_stats_account_timer(timer); | 
|  | 1122 |  | 
| Yong Zhang | 6f1bc45 | 2010-10-20 15:57:31 -0700 | [diff] [blame] | 1123 | base->running_timer = timer; | 
| Oleg Nesterov | 55c888d | 2005-06-23 00:08:56 -0700 | [diff] [blame] | 1124 | detach_timer(timer, 1); | 
| Johannes Berg | 6f2b9b9 | 2009-01-29 16:03:20 +0100 | [diff] [blame] | 1125 |  | 
| Oleg Nesterov | 3691c51 | 2006-03-31 02:30:30 -0800 | [diff] [blame] | 1126 | spin_unlock_irq(&base->lock); | 
| Thomas Gleixner | 576da12 | 2010-03-12 21:10:29 +0100 | [diff] [blame] | 1127 | call_timer_fn(timer, fn, data); | 
| Oleg Nesterov | 3691c51 | 2006-03-31 02:30:30 -0800 | [diff] [blame] | 1128 | spin_lock_irq(&base->lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1129 | } | 
|  | 1130 | } | 
| Yong Zhang | 6f1bc45 | 2010-10-20 15:57:31 -0700 | [diff] [blame] | 1131 | base->running_timer = NULL; | 
| Oleg Nesterov | 3691c51 | 2006-03-31 02:30:30 -0800 | [diff] [blame] | 1132 | spin_unlock_irq(&base->lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1133 | } | 
|  | 1134 |  | 
| Russell King | ee9c578 | 2008-04-20 13:59:33 +0100 | [diff] [blame] | 1135 | #ifdef CONFIG_NO_HZ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1136 | /* | 
|  | 1137 | * Find out when the next timer event is due to happen. This | 
| Randy Dunlap | 90cba64 | 2009-08-25 14:35:41 -0700 | [diff] [blame] | 1138 | * is used on S/390 to stop all activity when a CPU is idle. | 
|  | 1139 | * This function needs to be called with interrupts disabled. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1140 | */ | 
| Pavel Machek | a6fa8e5 | 2008-01-30 13:30:00 +0100 | [diff] [blame] | 1141 | static unsigned long __next_timer_interrupt(struct tvec_base *base) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1142 | { | 
| Thomas Gleixner | 1cfd684 | 2007-02-16 01:27:46 -0800 | [diff] [blame] | 1143 | unsigned long timer_jiffies = base->timer_jiffies; | 
| Thomas Gleixner | eaad084 | 2007-05-29 23:47:39 +0200 | [diff] [blame] | 1144 | unsigned long expires = timer_jiffies + NEXT_TIMER_MAX_DELTA; | 
| Thomas Gleixner | 1cfd684 | 2007-02-16 01:27:46 -0800 | [diff] [blame] | 1145 | int index, slot, array, found = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1146 | struct timer_list *nte; | 
| Pavel Machek | a6fa8e5 | 2008-01-30 13:30:00 +0100 | [diff] [blame] | 1147 | struct tvec *varray[4]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1148 |  | 
|  | 1149 | /* Look for timer events in tv1. */ | 
| Thomas Gleixner | 1cfd684 | 2007-02-16 01:27:46 -0800 | [diff] [blame] | 1150 | index = slot = timer_jiffies & TVR_MASK; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1151 | do { | 
| Thomas Gleixner | 1cfd684 | 2007-02-16 01:27:46 -0800 | [diff] [blame] | 1152 | list_for_each_entry(nte, base->tv1.vec + slot, entry) { | 
| Thomas Gleixner | 6819457 | 2007-07-19 01:49:16 -0700 | [diff] [blame] | 1153 | if (tbase_get_deferrable(nte->base)) | 
|  | 1154 | continue; | 
| Venki Pallipadi | 6e453a6 | 2007-05-08 00:27:44 -0700 | [diff] [blame] | 1155 |  | 
| Thomas Gleixner | 1cfd684 | 2007-02-16 01:27:46 -0800 | [diff] [blame] | 1156 | found = 1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1157 | expires = nte->expires; | 
| Thomas Gleixner | 1cfd684 | 2007-02-16 01:27:46 -0800 | [diff] [blame] | 1158 | /* Look at the cascade bucket(s)? */ | 
|  | 1159 | if (!index || slot < index) | 
|  | 1160 | goto cascade; | 
|  | 1161 | return expires; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1162 | } | 
| Thomas Gleixner | 1cfd684 | 2007-02-16 01:27:46 -0800 | [diff] [blame] | 1163 | slot = (slot + 1) & TVR_MASK; | 
|  | 1164 | } while (slot != index); | 
|  | 1165 |  | 
|  | 1166 | cascade: | 
|  | 1167 | /* Calculate the next cascade event */ | 
|  | 1168 | if (index) | 
|  | 1169 | timer_jiffies += TVR_SIZE - index; | 
|  | 1170 | timer_jiffies >>= TVR_BITS; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1171 |  | 
|  | 1172 | /* Check tv2-tv5. */ | 
|  | 1173 | varray[0] = &base->tv2; | 
|  | 1174 | varray[1] = &base->tv3; | 
|  | 1175 | varray[2] = &base->tv4; | 
|  | 1176 | varray[3] = &base->tv5; | 
| Thomas Gleixner | 1cfd684 | 2007-02-16 01:27:46 -0800 | [diff] [blame] | 1177 |  | 
|  | 1178 | for (array = 0; array < 4; array++) { | 
| Pavel Machek | a6fa8e5 | 2008-01-30 13:30:00 +0100 | [diff] [blame] | 1179 | struct tvec *varp = varray[array]; | 
| Thomas Gleixner | 1cfd684 | 2007-02-16 01:27:46 -0800 | [diff] [blame] | 1180 |  | 
|  | 1181 | index = slot = timer_jiffies & TVN_MASK; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1182 | do { | 
| Thomas Gleixner | 1cfd684 | 2007-02-16 01:27:46 -0800 | [diff] [blame] | 1183 | list_for_each_entry(nte, varp->vec + slot, entry) { | 
| Jon Hunter | a0419888 | 2009-05-01 13:10:23 -0700 | [diff] [blame] | 1184 | if (tbase_get_deferrable(nte->base)) | 
|  | 1185 | continue; | 
|  | 1186 |  | 
| Thomas Gleixner | 1cfd684 | 2007-02-16 01:27:46 -0800 | [diff] [blame] | 1187 | found = 1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1188 | if (time_before(nte->expires, expires)) | 
|  | 1189 | expires = nte->expires; | 
| Thomas Gleixner | 1cfd684 | 2007-02-16 01:27:46 -0800 | [diff] [blame] | 1190 | } | 
|  | 1191 | /* | 
|  | 1192 | * Do we still search for the first timer or are | 
|  | 1193 | * we looking up the cascade buckets ? | 
|  | 1194 | */ | 
|  | 1195 | if (found) { | 
|  | 1196 | /* Look at the cascade bucket(s)? */ | 
|  | 1197 | if (!index || slot < index) | 
|  | 1198 | break; | 
|  | 1199 | return expires; | 
|  | 1200 | } | 
|  | 1201 | slot = (slot + 1) & TVN_MASK; | 
|  | 1202 | } while (slot != index); | 
|  | 1203 |  | 
|  | 1204 | if (index) | 
|  | 1205 | timer_jiffies += TVN_SIZE - index; | 
|  | 1206 | timer_jiffies >>= TVN_BITS; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1207 | } | 
| Thomas Gleixner | 1cfd684 | 2007-02-16 01:27:46 -0800 | [diff] [blame] | 1208 | return expires; | 
|  | 1209 | } | 
|  | 1210 |  | 
|  | 1211 | /* | 
|  | 1212 | * Check, if the next hrtimer event is before the next timer wheel | 
|  | 1213 | * event: | 
|  | 1214 | */ | 
|  | 1215 | static unsigned long cmp_next_hrtimer_event(unsigned long now, | 
|  | 1216 | unsigned long expires) | 
|  | 1217 | { | 
|  | 1218 | ktime_t hr_delta = hrtimer_get_next_event(); | 
|  | 1219 | struct timespec tsdelta; | 
| Thomas Gleixner | 9501b6c | 2007-03-25 14:31:17 +0200 | [diff] [blame] | 1220 | unsigned long delta; | 
| Thomas Gleixner | 1cfd684 | 2007-02-16 01:27:46 -0800 | [diff] [blame] | 1221 |  | 
|  | 1222 | if (hr_delta.tv64 == KTIME_MAX) | 
|  | 1223 | return expires; | 
|  | 1224 |  | 
| Thomas Gleixner | 9501b6c | 2007-03-25 14:31:17 +0200 | [diff] [blame] | 1225 | /* | 
|  | 1226 | * Expired timer available, let it expire in the next tick | 
|  | 1227 | */ | 
|  | 1228 | if (hr_delta.tv64 <= 0) | 
|  | 1229 | return now + 1; | 
| Thomas Gleixner | 1cfd684 | 2007-02-16 01:27:46 -0800 | [diff] [blame] | 1230 |  | 
|  | 1231 | tsdelta = ktime_to_timespec(hr_delta); | 
| Thomas Gleixner | 9501b6c | 2007-03-25 14:31:17 +0200 | [diff] [blame] | 1232 | delta = timespec_to_jiffies(&tsdelta); | 
| Thomas Gleixner | eaad084 | 2007-05-29 23:47:39 +0200 | [diff] [blame] | 1233 |  | 
|  | 1234 | /* | 
|  | 1235 | * Limit the delta to the max value, which is checked in | 
|  | 1236 | * tick_nohz_stop_sched_tick(): | 
|  | 1237 | */ | 
|  | 1238 | if (delta > NEXT_TIMER_MAX_DELTA) | 
|  | 1239 | delta = NEXT_TIMER_MAX_DELTA; | 
|  | 1240 |  | 
| Thomas Gleixner | 9501b6c | 2007-03-25 14:31:17 +0200 | [diff] [blame] | 1241 | /* | 
|  | 1242 | * Take rounding errors in to account and make sure, that it | 
|  | 1243 | * expires in the next tick. Otherwise we go into an endless | 
|  | 1244 | * ping pong due to tick_nohz_stop_sched_tick() retriggering | 
|  | 1245 | * the timer softirq | 
|  | 1246 | */ | 
|  | 1247 | if (delta < 1) | 
|  | 1248 | delta = 1; | 
|  | 1249 | now += delta; | 
| Thomas Gleixner | 1cfd684 | 2007-02-16 01:27:46 -0800 | [diff] [blame] | 1250 | if (time_before(now, expires)) | 
|  | 1251 | return now; | 
|  | 1252 | return expires; | 
|  | 1253 | } | 
|  | 1254 |  | 
|  | 1255 | /** | 
| Li Zefan | 8dce39c | 2007-11-05 14:51:10 -0800 | [diff] [blame] | 1256 | * get_next_timer_interrupt - return the jiffy of the next pending timer | 
| Randy Dunlap | 05fb6bf | 2007-02-28 20:12:13 -0800 | [diff] [blame] | 1257 | * @now: current time (in jiffies) | 
| Thomas Gleixner | 1cfd684 | 2007-02-16 01:27:46 -0800 | [diff] [blame] | 1258 | */ | 
| Thomas Gleixner | fd064b9 | 2007-02-16 01:27:47 -0800 | [diff] [blame] | 1259 | unsigned long get_next_timer_interrupt(unsigned long now) | 
| Thomas Gleixner | 1cfd684 | 2007-02-16 01:27:46 -0800 | [diff] [blame] | 1260 | { | 
| Christoph Lameter | 7496351 | 2010-11-30 14:05:53 -0600 | [diff] [blame] | 1261 | struct tvec_base *base = __this_cpu_read(tvec_bases); | 
| Thomas Gleixner | fd064b9 | 2007-02-16 01:27:47 -0800 | [diff] [blame] | 1262 | unsigned long expires; | 
| Thomas Gleixner | 1cfd684 | 2007-02-16 01:27:46 -0800 | [diff] [blame] | 1263 |  | 
| Heiko Carstens | dbd87b5 | 2010-12-01 10:11:09 +0100 | [diff] [blame] | 1264 | /* | 
|  | 1265 | * Pretend that there is no timer pending if the cpu is offline. | 
|  | 1266 | * Possible pending timers will be migrated later to an active cpu. | 
|  | 1267 | */ | 
|  | 1268 | if (cpu_is_offline(smp_processor_id())) | 
|  | 1269 | return now + NEXT_TIMER_MAX_DELTA; | 
| Thomas Gleixner | 1cfd684 | 2007-02-16 01:27:46 -0800 | [diff] [blame] | 1270 | spin_lock(&base->lock); | 
| Martin Schwidefsky | 97fd9ed | 2009-07-21 20:25:05 +0200 | [diff] [blame] | 1271 | if (time_before_eq(base->next_timer, base->timer_jiffies)) | 
|  | 1272 | base->next_timer = __next_timer_interrupt(base); | 
|  | 1273 | expires = base->next_timer; | 
| Oleg Nesterov | 3691c51 | 2006-03-31 02:30:30 -0800 | [diff] [blame] | 1274 | spin_unlock(&base->lock); | 
| Tony Lindgren | 6923974 | 2006-03-06 15:42:45 -0800 | [diff] [blame] | 1275 |  | 
| Thomas Gleixner | 1cfd684 | 2007-02-16 01:27:46 -0800 | [diff] [blame] | 1276 | if (time_before_eq(expires, now)) | 
|  | 1277 | return now; | 
| Zachary Amsden | 0662b71 | 2006-05-20 15:00:24 -0700 | [diff] [blame] | 1278 |  | 
| Thomas Gleixner | 1cfd684 | 2007-02-16 01:27:46 -0800 | [diff] [blame] | 1279 | return cmp_next_hrtimer_event(now, expires); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1280 | } | 
|  | 1281 | #endif | 
|  | 1282 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1283 | /* | 
| Daniel Walker | 5b4db0c | 2007-10-18 03:06:11 -0700 | [diff] [blame] | 1284 | * Called from the timer interrupt handler to charge one tick to the current | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1285 | * process.  user_tick is 1 if the tick is user time, 0 for system. | 
|  | 1286 | */ | 
|  | 1287 | void update_process_times(int user_tick) | 
|  | 1288 | { | 
|  | 1289 | struct task_struct *p = current; | 
|  | 1290 | int cpu = smp_processor_id(); | 
|  | 1291 |  | 
|  | 1292 | /* Note: this timer irq context must be accounted for as well. */ | 
| Paul Mackerras | fa13a5a | 2007-11-09 22:39:38 +0100 | [diff] [blame] | 1293 | account_process_tick(p, user_tick); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1294 | run_local_timers(); | 
| Paul E. McKenney | a157229 | 2009-08-22 13:56:51 -0700 | [diff] [blame] | 1295 | rcu_check_callbacks(cpu, user_tick); | 
| Peter Zijlstra | b845b51 | 2008-08-08 21:47:09 +0200 | [diff] [blame] | 1296 | printk_tick(); | 
| Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 1297 | #ifdef CONFIG_IRQ_WORK | 
|  | 1298 | if (in_irq()) | 
|  | 1299 | irq_work_run(); | 
|  | 1300 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1301 | scheduler_tick(); | 
| Thomas Gleixner | 6819457 | 2007-07-19 01:49:16 -0700 | [diff] [blame] | 1302 | run_posix_cpu_timers(p); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1303 | } | 
|  | 1304 |  | 
|  | 1305 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1306 | * This function runs timers and the timer-tq in bottom half context. | 
|  | 1307 | */ | 
|  | 1308 | static void run_timer_softirq(struct softirq_action *h) | 
|  | 1309 | { | 
| Christoph Lameter | 7496351 | 2010-11-30 14:05:53 -0600 | [diff] [blame] | 1310 | struct tvec_base *base = __this_cpu_read(tvec_bases); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1311 |  | 
| Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1312 | hrtimer_run_pending(); | 
| Ingo Molnar | 82f67cd | 2007-02-16 01:28:13 -0800 | [diff] [blame] | 1313 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1314 | if (time_after_eq(jiffies, base->timer_jiffies)) | 
|  | 1315 | __run_timers(base); | 
|  | 1316 | } | 
|  | 1317 |  | 
|  | 1318 | /* | 
|  | 1319 | * Called by the local, per-CPU timer interrupt on SMP. | 
|  | 1320 | */ | 
|  | 1321 | void run_local_timers(void) | 
|  | 1322 | { | 
| Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1323 | hrtimer_run_queues(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1324 | raise_softirq(TIMER_SOFTIRQ); | 
|  | 1325 | } | 
|  | 1326 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1327 | #ifdef __ARCH_WANT_SYS_ALARM | 
|  | 1328 |  | 
|  | 1329 | /* | 
|  | 1330 | * For backwards compatibility?  This can be done in libc so Alpha | 
|  | 1331 | * and all newer ports shouldn't need it. | 
|  | 1332 | */ | 
| Heiko Carstens | 58fd3aa | 2009-01-14 14:14:03 +0100 | [diff] [blame] | 1333 | SYSCALL_DEFINE1(alarm, unsigned int, seconds) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1334 | { | 
| Thomas Gleixner | c08b8a4 | 2006-03-25 03:06:33 -0800 | [diff] [blame] | 1335 | return alarm_setitimer(seconds); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1336 | } | 
|  | 1337 |  | 
|  | 1338 | #endif | 
|  | 1339 |  | 
|  | 1340 | #ifndef __alpha__ | 
|  | 1341 |  | 
|  | 1342 | /* | 
|  | 1343 | * The Alpha uses getxpid, getxuid, and getxgid instead.  Maybe this | 
|  | 1344 | * should be moved into arch/i386 instead? | 
|  | 1345 | */ | 
|  | 1346 |  | 
|  | 1347 | /** | 
|  | 1348 | * sys_getpid - return the thread group id of the current process | 
|  | 1349 | * | 
|  | 1350 | * Note, despite the name, this returns the tgid not the pid.  The tgid and | 
|  | 1351 | * the pid are identical unless CLONE_THREAD was specified on clone() in | 
|  | 1352 | * which case the tgid is the same in all threads of the same group. | 
|  | 1353 | * | 
|  | 1354 | * This is SMP safe as current->tgid does not change. | 
|  | 1355 | */ | 
| Heiko Carstens | 58fd3aa | 2009-01-14 14:14:03 +0100 | [diff] [blame] | 1356 | SYSCALL_DEFINE0(getpid) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1357 | { | 
| Pavel Emelyanov | b488893 | 2007-10-18 23:40:14 -0700 | [diff] [blame] | 1358 | return task_tgid_vnr(current); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1359 | } | 
|  | 1360 |  | 
|  | 1361 | /* | 
| Kirill Korotaev | 6997a6f | 2006-08-13 23:24:23 -0700 | [diff] [blame] | 1362 | * Accessing ->real_parent is not SMP-safe, it could | 
|  | 1363 | * change from under us. However, we can use a stale | 
|  | 1364 | * value of ->real_parent under rcu_read_lock(), see | 
|  | 1365 | * release_task()->call_rcu(delayed_put_task_struct). | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1366 | */ | 
| Heiko Carstens | dbf040d | 2009-01-14 14:14:04 +0100 | [diff] [blame] | 1367 | SYSCALL_DEFINE0(getppid) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1368 | { | 
|  | 1369 | int pid; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1370 |  | 
| Kirill Korotaev | 6997a6f | 2006-08-13 23:24:23 -0700 | [diff] [blame] | 1371 | rcu_read_lock(); | 
| Pavel Emelyanov | 6c5f3e7 | 2008-02-08 04:19:20 -0800 | [diff] [blame] | 1372 | pid = task_tgid_vnr(current->real_parent); | 
| Kirill Korotaev | 6997a6f | 2006-08-13 23:24:23 -0700 | [diff] [blame] | 1373 | rcu_read_unlock(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1374 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1375 | return pid; | 
|  | 1376 | } | 
|  | 1377 |  | 
| Heiko Carstens | dbf040d | 2009-01-14 14:14:04 +0100 | [diff] [blame] | 1378 | SYSCALL_DEFINE0(getuid) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1379 | { | 
|  | 1380 | /* Only we change this so SMP safe */ | 
| David Howells | 76aac0e | 2008-11-14 10:39:12 +1100 | [diff] [blame] | 1381 | return current_uid(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1382 | } | 
|  | 1383 |  | 
| Heiko Carstens | dbf040d | 2009-01-14 14:14:04 +0100 | [diff] [blame] | 1384 | SYSCALL_DEFINE0(geteuid) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1385 | { | 
|  | 1386 | /* Only we change this so SMP safe */ | 
| David Howells | 76aac0e | 2008-11-14 10:39:12 +1100 | [diff] [blame] | 1387 | return current_euid(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1388 | } | 
|  | 1389 |  | 
| Heiko Carstens | dbf040d | 2009-01-14 14:14:04 +0100 | [diff] [blame] | 1390 | SYSCALL_DEFINE0(getgid) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1391 | { | 
|  | 1392 | /* Only we change this so SMP safe */ | 
| David Howells | 76aac0e | 2008-11-14 10:39:12 +1100 | [diff] [blame] | 1393 | return current_gid(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1394 | } | 
|  | 1395 |  | 
| Heiko Carstens | dbf040d | 2009-01-14 14:14:04 +0100 | [diff] [blame] | 1396 | SYSCALL_DEFINE0(getegid) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1397 | { | 
|  | 1398 | /* Only we change this so SMP safe */ | 
| David Howells | 76aac0e | 2008-11-14 10:39:12 +1100 | [diff] [blame] | 1399 | return  current_egid(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1400 | } | 
|  | 1401 |  | 
|  | 1402 | #endif | 
|  | 1403 |  | 
|  | 1404 | static void process_timeout(unsigned long __data) | 
|  | 1405 | { | 
| Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 1406 | wake_up_process((struct task_struct *)__data); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1407 | } | 
|  | 1408 |  | 
|  | 1409 | /** | 
|  | 1410 | * schedule_timeout - sleep until timeout | 
|  | 1411 | * @timeout: timeout value in jiffies | 
|  | 1412 | * | 
|  | 1413 | * Make the current task sleep until @timeout jiffies have | 
|  | 1414 | * elapsed. The routine will return immediately unless | 
|  | 1415 | * the current task state has been set (see set_current_state()). | 
|  | 1416 | * | 
|  | 1417 | * You can set the task state as follows - | 
|  | 1418 | * | 
|  | 1419 | * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to | 
|  | 1420 | * pass before the routine returns. The routine will return 0 | 
|  | 1421 | * | 
|  | 1422 | * %TASK_INTERRUPTIBLE - the routine may return early if a signal is | 
|  | 1423 | * delivered to the current task. In this case the remaining time | 
|  | 1424 | * in jiffies will be returned, or 0 if the timer expired in time | 
|  | 1425 | * | 
|  | 1426 | * The current task state is guaranteed to be TASK_RUNNING when this | 
|  | 1427 | * routine returns. | 
|  | 1428 | * | 
|  | 1429 | * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule | 
|  | 1430 | * the CPU away without a bound on the timeout. In this case the return | 
|  | 1431 | * value will be %MAX_SCHEDULE_TIMEOUT. | 
|  | 1432 | * | 
|  | 1433 | * In all cases the return value is guaranteed to be non-negative. | 
|  | 1434 | */ | 
| Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 1435 | signed long __sched schedule_timeout(signed long timeout) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1436 | { | 
|  | 1437 | struct timer_list timer; | 
|  | 1438 | unsigned long expire; | 
|  | 1439 |  | 
|  | 1440 | switch (timeout) | 
|  | 1441 | { | 
|  | 1442 | case MAX_SCHEDULE_TIMEOUT: | 
|  | 1443 | /* | 
|  | 1444 | * These two special cases are useful to be comfortable | 
|  | 1445 | * in the caller. Nothing more. We could take | 
|  | 1446 | * MAX_SCHEDULE_TIMEOUT from one of the negative value | 
|  | 1447 | * but I' d like to return a valid offset (>=0) to allow | 
|  | 1448 | * the caller to do everything it want with the retval. | 
|  | 1449 | */ | 
|  | 1450 | schedule(); | 
|  | 1451 | goto out; | 
|  | 1452 | default: | 
|  | 1453 | /* | 
|  | 1454 | * Another bit of PARANOID. Note that the retval will be | 
|  | 1455 | * 0 since no piece of kernel is supposed to do a check | 
|  | 1456 | * for a negative retval of schedule_timeout() (since it | 
|  | 1457 | * should never happens anyway). You just have the printk() | 
|  | 1458 | * that will tell you if something is gone wrong and where. | 
|  | 1459 | */ | 
| Andrew Morton | 5b149bc | 2006-12-22 01:10:14 -0800 | [diff] [blame] | 1460 | if (timeout < 0) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1461 | printk(KERN_ERR "schedule_timeout: wrong timeout " | 
| Andrew Morton | 5b149bc | 2006-12-22 01:10:14 -0800 | [diff] [blame] | 1462 | "value %lx\n", timeout); | 
|  | 1463 | dump_stack(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1464 | current->state = TASK_RUNNING; | 
|  | 1465 | goto out; | 
|  | 1466 | } | 
|  | 1467 | } | 
|  | 1468 |  | 
|  | 1469 | expire = timeout + jiffies; | 
|  | 1470 |  | 
| Thomas Gleixner | c6f3a97 | 2008-04-30 00:55:03 -0700 | [diff] [blame] | 1471 | setup_timer_on_stack(&timer, process_timeout, (unsigned long)current); | 
| Arun R Bharadwaj | 597d027 | 2009-04-16 12:13:26 +0530 | [diff] [blame] | 1472 | __mod_timer(&timer, expire, false, TIMER_NOT_PINNED); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1473 | schedule(); | 
|  | 1474 | del_singleshot_timer_sync(&timer); | 
|  | 1475 |  | 
| Thomas Gleixner | c6f3a97 | 2008-04-30 00:55:03 -0700 | [diff] [blame] | 1476 | /* Remove the timer from the object tracker */ | 
|  | 1477 | destroy_timer_on_stack(&timer); | 
|  | 1478 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1479 | timeout = expire - jiffies; | 
|  | 1480 |  | 
|  | 1481 | out: | 
|  | 1482 | return timeout < 0 ? 0 : timeout; | 
|  | 1483 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1484 | EXPORT_SYMBOL(schedule_timeout); | 
|  | 1485 |  | 
| Andrew Morton | 8a1c175 | 2005-09-13 01:25:15 -0700 | [diff] [blame] | 1486 | /* | 
|  | 1487 | * We can use __set_current_state() here because schedule_timeout() calls | 
|  | 1488 | * schedule() unconditionally. | 
|  | 1489 | */ | 
| Nishanth Aravamudan | 64ed93a | 2005-09-10 00:27:21 -0700 | [diff] [blame] | 1490 | signed long __sched schedule_timeout_interruptible(signed long timeout) | 
|  | 1491 | { | 
| Andrew Morton | a5a0d52 | 2005-10-30 15:01:42 -0800 | [diff] [blame] | 1492 | __set_current_state(TASK_INTERRUPTIBLE); | 
|  | 1493 | return schedule_timeout(timeout); | 
| Nishanth Aravamudan | 64ed93a | 2005-09-10 00:27:21 -0700 | [diff] [blame] | 1494 | } | 
|  | 1495 | EXPORT_SYMBOL(schedule_timeout_interruptible); | 
|  | 1496 |  | 
| Matthew Wilcox | 294d5cc | 2007-12-06 11:59:46 -0500 | [diff] [blame] | 1497 | signed long __sched schedule_timeout_killable(signed long timeout) | 
|  | 1498 | { | 
|  | 1499 | __set_current_state(TASK_KILLABLE); | 
|  | 1500 | return schedule_timeout(timeout); | 
|  | 1501 | } | 
|  | 1502 | EXPORT_SYMBOL(schedule_timeout_killable); | 
|  | 1503 |  | 
| Nishanth Aravamudan | 64ed93a | 2005-09-10 00:27:21 -0700 | [diff] [blame] | 1504 | signed long __sched schedule_timeout_uninterruptible(signed long timeout) | 
|  | 1505 | { | 
| Andrew Morton | a5a0d52 | 2005-10-30 15:01:42 -0800 | [diff] [blame] | 1506 | __set_current_state(TASK_UNINTERRUPTIBLE); | 
|  | 1507 | return schedule_timeout(timeout); | 
| Nishanth Aravamudan | 64ed93a | 2005-09-10 00:27:21 -0700 | [diff] [blame] | 1508 | } | 
|  | 1509 | EXPORT_SYMBOL(schedule_timeout_uninterruptible); | 
|  | 1510 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1511 | /* Thread ID - the internal kernel "pid" */ | 
| Heiko Carstens | 58fd3aa | 2009-01-14 14:14:03 +0100 | [diff] [blame] | 1512 | SYSCALL_DEFINE0(gettid) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1513 | { | 
| Pavel Emelyanov | b488893 | 2007-10-18 23:40:14 -0700 | [diff] [blame] | 1514 | return task_pid_vnr(current); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1515 | } | 
|  | 1516 |  | 
| Rolf Eike Beer | 2aae4a1 | 2006-09-29 01:59:46 -0700 | [diff] [blame] | 1517 | /** | 
| Kyle McMartin | d4d23ad | 2007-02-10 01:46:00 -0800 | [diff] [blame] | 1518 | * do_sysinfo - fill in sysinfo struct | 
| Rolf Eike Beer | 2aae4a1 | 2006-09-29 01:59:46 -0700 | [diff] [blame] | 1519 | * @info: pointer to buffer to fill | 
| Thomas Gleixner | 6819457 | 2007-07-19 01:49:16 -0700 | [diff] [blame] | 1520 | */ | 
| Kyle McMartin | d4d23ad | 2007-02-10 01:46:00 -0800 | [diff] [blame] | 1521 | int do_sysinfo(struct sysinfo *info) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1522 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1523 | unsigned long mem_total, sav_total; | 
|  | 1524 | unsigned int mem_unit, bitcount; | 
| Thomas Gleixner | 2d02494 | 2009-05-02 20:08:52 +0200 | [diff] [blame] | 1525 | struct timespec tp; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1526 |  | 
| Kyle McMartin | d4d23ad | 2007-02-10 01:46:00 -0800 | [diff] [blame] | 1527 | memset(info, 0, sizeof(struct sysinfo)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1528 |  | 
| Thomas Gleixner | 2d02494 | 2009-05-02 20:08:52 +0200 | [diff] [blame] | 1529 | ktime_get_ts(&tp); | 
|  | 1530 | monotonic_to_bootbased(&tp); | 
|  | 1531 | info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1532 |  | 
| Thomas Gleixner | 2d02494 | 2009-05-02 20:08:52 +0200 | [diff] [blame] | 1533 | get_avenrun(info->loads, 0, SI_LOAD_SHIFT - FSHIFT); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1534 |  | 
| Thomas Gleixner | 2d02494 | 2009-05-02 20:08:52 +0200 | [diff] [blame] | 1535 | info->procs = nr_threads; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1536 |  | 
| Kyle McMartin | d4d23ad | 2007-02-10 01:46:00 -0800 | [diff] [blame] | 1537 | si_meminfo(info); | 
|  | 1538 | si_swapinfo(info); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1539 |  | 
|  | 1540 | /* | 
|  | 1541 | * If the sum of all the available memory (i.e. ram + swap) | 
|  | 1542 | * is less than can be stored in a 32 bit unsigned long then | 
|  | 1543 | * we can be binary compatible with 2.2.x kernels.  If not, | 
|  | 1544 | * well, in that case 2.2.x was broken anyways... | 
|  | 1545 | * | 
|  | 1546 | *  -Erik Andersen <andersee@debian.org> | 
|  | 1547 | */ | 
|  | 1548 |  | 
| Kyle McMartin | d4d23ad | 2007-02-10 01:46:00 -0800 | [diff] [blame] | 1549 | mem_total = info->totalram + info->totalswap; | 
|  | 1550 | if (mem_total < info->totalram || mem_total < info->totalswap) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1551 | goto out; | 
|  | 1552 | bitcount = 0; | 
| Kyle McMartin | d4d23ad | 2007-02-10 01:46:00 -0800 | [diff] [blame] | 1553 | mem_unit = info->mem_unit; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1554 | while (mem_unit > 1) { | 
|  | 1555 | bitcount++; | 
|  | 1556 | mem_unit >>= 1; | 
|  | 1557 | sav_total = mem_total; | 
|  | 1558 | mem_total <<= 1; | 
|  | 1559 | if (mem_total < sav_total) | 
|  | 1560 | goto out; | 
|  | 1561 | } | 
|  | 1562 |  | 
|  | 1563 | /* | 
|  | 1564 | * If mem_total did not overflow, multiply all memory values by | 
| Kyle McMartin | d4d23ad | 2007-02-10 01:46:00 -0800 | [diff] [blame] | 1565 | * info->mem_unit and set it to 1.  This leaves things compatible | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1566 | * with 2.2.x, and also retains compatibility with earlier 2.4.x | 
|  | 1567 | * kernels... | 
|  | 1568 | */ | 
|  | 1569 |  | 
| Kyle McMartin | d4d23ad | 2007-02-10 01:46:00 -0800 | [diff] [blame] | 1570 | info->mem_unit = 1; | 
|  | 1571 | info->totalram <<= bitcount; | 
|  | 1572 | info->freeram <<= bitcount; | 
|  | 1573 | info->sharedram <<= bitcount; | 
|  | 1574 | info->bufferram <<= bitcount; | 
|  | 1575 | info->totalswap <<= bitcount; | 
|  | 1576 | info->freeswap <<= bitcount; | 
|  | 1577 | info->totalhigh <<= bitcount; | 
|  | 1578 | info->freehigh <<= bitcount; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1579 |  | 
| Kyle McMartin | d4d23ad | 2007-02-10 01:46:00 -0800 | [diff] [blame] | 1580 | out: | 
|  | 1581 | return 0; | 
|  | 1582 | } | 
|  | 1583 |  | 
| Heiko Carstens | 1e7bfb2 | 2009-01-14 14:14:29 +0100 | [diff] [blame] | 1584 | SYSCALL_DEFINE1(sysinfo, struct sysinfo __user *, info) | 
| Kyle McMartin | d4d23ad | 2007-02-10 01:46:00 -0800 | [diff] [blame] | 1585 | { | 
|  | 1586 | struct sysinfo val; | 
|  | 1587 |  | 
|  | 1588 | do_sysinfo(&val); | 
|  | 1589 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1590 | if (copy_to_user(info, &val, sizeof(struct sysinfo))) | 
|  | 1591 | return -EFAULT; | 
|  | 1592 |  | 
|  | 1593 | return 0; | 
|  | 1594 | } | 
|  | 1595 |  | 
| Adrian Bunk | b4be625 | 2007-12-18 18:05:58 +0100 | [diff] [blame] | 1596 | static int __cpuinit init_timers_cpu(int cpu) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1597 | { | 
|  | 1598 | int j; | 
| Pavel Machek | a6fa8e5 | 2008-01-30 13:30:00 +0100 | [diff] [blame] | 1599 | struct tvec_base *base; | 
| Adrian Bunk | b4be625 | 2007-12-18 18:05:58 +0100 | [diff] [blame] | 1600 | static char __cpuinitdata tvec_base_done[NR_CPUS]; | 
| Oleg Nesterov | 55c888d | 2005-06-23 00:08:56 -0700 | [diff] [blame] | 1601 |  | 
| Andrew Morton | ba6edfc | 2006-04-10 22:53:58 -0700 | [diff] [blame] | 1602 | if (!tvec_base_done[cpu]) { | 
| Jan Beulich | a4a6198 | 2006-03-24 03:15:54 -0800 | [diff] [blame] | 1603 | static char boot_done; | 
|  | 1604 |  | 
| Jan Beulich | a4a6198 | 2006-03-24 03:15:54 -0800 | [diff] [blame] | 1605 | if (boot_done) { | 
| Andrew Morton | ba6edfc | 2006-04-10 22:53:58 -0700 | [diff] [blame] | 1606 | /* | 
|  | 1607 | * The APs use this path later in boot | 
|  | 1608 | */ | 
| Christoph Lameter | 94f6030 | 2007-07-17 04:03:29 -0700 | [diff] [blame] | 1609 | base = kmalloc_node(sizeof(*base), | 
|  | 1610 | GFP_KERNEL | __GFP_ZERO, | 
| Jan Beulich | a4a6198 | 2006-03-24 03:15:54 -0800 | [diff] [blame] | 1611 | cpu_to_node(cpu)); | 
|  | 1612 | if (!base) | 
|  | 1613 | return -ENOMEM; | 
| Venki Pallipadi | 6e453a6 | 2007-05-08 00:27:44 -0700 | [diff] [blame] | 1614 |  | 
|  | 1615 | /* Make sure that tvec_base is 2 byte aligned */ | 
|  | 1616 | if (tbase_get_deferrable(base)) { | 
|  | 1617 | WARN_ON(1); | 
|  | 1618 | kfree(base); | 
|  | 1619 | return -ENOMEM; | 
|  | 1620 | } | 
| Andrew Morton | ba6edfc | 2006-04-10 22:53:58 -0700 | [diff] [blame] | 1621 | per_cpu(tvec_bases, cpu) = base; | 
| Jan Beulich | a4a6198 | 2006-03-24 03:15:54 -0800 | [diff] [blame] | 1622 | } else { | 
| Andrew Morton | ba6edfc | 2006-04-10 22:53:58 -0700 | [diff] [blame] | 1623 | /* | 
|  | 1624 | * This is for the boot CPU - we use compile-time | 
|  | 1625 | * static initialisation because per-cpu memory isn't | 
|  | 1626 | * ready yet and because the memory allocators are not | 
|  | 1627 | * initialised either. | 
|  | 1628 | */ | 
| Jan Beulich | a4a6198 | 2006-03-24 03:15:54 -0800 | [diff] [blame] | 1629 | boot_done = 1; | 
| Andrew Morton | ba6edfc | 2006-04-10 22:53:58 -0700 | [diff] [blame] | 1630 | base = &boot_tvec_bases; | 
| Jan Beulich | a4a6198 | 2006-03-24 03:15:54 -0800 | [diff] [blame] | 1631 | } | 
| Andrew Morton | ba6edfc | 2006-04-10 22:53:58 -0700 | [diff] [blame] | 1632 | tvec_base_done[cpu] = 1; | 
|  | 1633 | } else { | 
|  | 1634 | base = per_cpu(tvec_bases, cpu); | 
| Jan Beulich | a4a6198 | 2006-03-24 03:15:54 -0800 | [diff] [blame] | 1635 | } | 
| Andrew Morton | ba6edfc | 2006-04-10 22:53:58 -0700 | [diff] [blame] | 1636 |  | 
| Oleg Nesterov | 3691c51 | 2006-03-31 02:30:30 -0800 | [diff] [blame] | 1637 | spin_lock_init(&base->lock); | 
| Ingo Molnar | d730e88 | 2006-07-03 00:25:10 -0700 | [diff] [blame] | 1638 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1639 | for (j = 0; j < TVN_SIZE; j++) { | 
|  | 1640 | INIT_LIST_HEAD(base->tv5.vec + j); | 
|  | 1641 | INIT_LIST_HEAD(base->tv4.vec + j); | 
|  | 1642 | INIT_LIST_HEAD(base->tv3.vec + j); | 
|  | 1643 | INIT_LIST_HEAD(base->tv2.vec + j); | 
|  | 1644 | } | 
|  | 1645 | for (j = 0; j < TVR_SIZE; j++) | 
|  | 1646 | INIT_LIST_HEAD(base->tv1.vec + j); | 
|  | 1647 |  | 
|  | 1648 | base->timer_jiffies = jiffies; | 
| Martin Schwidefsky | 97fd9ed | 2009-07-21 20:25:05 +0200 | [diff] [blame] | 1649 | base->next_timer = base->timer_jiffies; | 
| Jan Beulich | a4a6198 | 2006-03-24 03:15:54 -0800 | [diff] [blame] | 1650 | return 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1651 | } | 
|  | 1652 |  | 
|  | 1653 | #ifdef CONFIG_HOTPLUG_CPU | 
| Pavel Machek | a6fa8e5 | 2008-01-30 13:30:00 +0100 | [diff] [blame] | 1654 | static void migrate_timer_list(struct tvec_base *new_base, struct list_head *head) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1655 | { | 
|  | 1656 | struct timer_list *timer; | 
|  | 1657 |  | 
|  | 1658 | while (!list_empty(head)) { | 
| Pavel Emelianov | b5e6181 | 2007-05-08 00:30:19 -0700 | [diff] [blame] | 1659 | timer = list_first_entry(head, struct timer_list, entry); | 
| Oleg Nesterov | 55c888d | 2005-06-23 00:08:56 -0700 | [diff] [blame] | 1660 | detach_timer(timer, 0); | 
| Venki Pallipadi | 6e453a6 | 2007-05-08 00:27:44 -0700 | [diff] [blame] | 1661 | timer_set_base(timer, new_base); | 
| Martin Schwidefsky | 97fd9ed | 2009-07-21 20:25:05 +0200 | [diff] [blame] | 1662 | if (time_before(timer->expires, new_base->next_timer) && | 
|  | 1663 | !tbase_get_deferrable(timer->base)) | 
|  | 1664 | new_base->next_timer = timer->expires; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1665 | internal_add_timer(new_base, timer); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1666 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1667 | } | 
|  | 1668 |  | 
| Randy Dunlap | 48ccf3d | 2008-01-21 17:18:25 -0800 | [diff] [blame] | 1669 | static void __cpuinit migrate_timers(int cpu) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1670 | { | 
| Pavel Machek | a6fa8e5 | 2008-01-30 13:30:00 +0100 | [diff] [blame] | 1671 | struct tvec_base *old_base; | 
|  | 1672 | struct tvec_base *new_base; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1673 | int i; | 
|  | 1674 |  | 
|  | 1675 | BUG_ON(cpu_online(cpu)); | 
| Jan Beulich | a4a6198 | 2006-03-24 03:15:54 -0800 | [diff] [blame] | 1676 | old_base = per_cpu(tvec_bases, cpu); | 
|  | 1677 | new_base = get_cpu_var(tvec_bases); | 
| Oleg Nesterov | d82f0b0 | 2008-08-20 16:46:04 -0700 | [diff] [blame] | 1678 | /* | 
|  | 1679 | * The caller is globally serialized and nobody else | 
|  | 1680 | * takes two locks at once, deadlock is not possible. | 
|  | 1681 | */ | 
|  | 1682 | spin_lock_irq(&new_base->lock); | 
| Oleg Nesterov | 0d18040 | 2008-04-04 20:54:10 +0200 | [diff] [blame] | 1683 | spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1684 |  | 
| Oleg Nesterov | 3691c51 | 2006-03-31 02:30:30 -0800 | [diff] [blame] | 1685 | BUG_ON(old_base->running_timer); | 
|  | 1686 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1687 | for (i = 0; i < TVR_SIZE; i++) | 
| Oleg Nesterov | 55c888d | 2005-06-23 00:08:56 -0700 | [diff] [blame] | 1688 | migrate_timer_list(new_base, old_base->tv1.vec + i); | 
|  | 1689 | for (i = 0; i < TVN_SIZE; i++) { | 
|  | 1690 | migrate_timer_list(new_base, old_base->tv2.vec + i); | 
|  | 1691 | migrate_timer_list(new_base, old_base->tv3.vec + i); | 
|  | 1692 | migrate_timer_list(new_base, old_base->tv4.vec + i); | 
|  | 1693 | migrate_timer_list(new_base, old_base->tv5.vec + i); | 
|  | 1694 | } | 
|  | 1695 |  | 
| Oleg Nesterov | 0d18040 | 2008-04-04 20:54:10 +0200 | [diff] [blame] | 1696 | spin_unlock(&old_base->lock); | 
| Oleg Nesterov | d82f0b0 | 2008-08-20 16:46:04 -0700 | [diff] [blame] | 1697 | spin_unlock_irq(&new_base->lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1698 | put_cpu_var(tvec_bases); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1699 | } | 
|  | 1700 | #endif /* CONFIG_HOTPLUG_CPU */ | 
|  | 1701 |  | 
| Chandra Seetharaman | 8c78f30 | 2006-07-30 03:03:35 -0700 | [diff] [blame] | 1702 | static int __cpuinit timer_cpu_notify(struct notifier_block *self, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1703 | unsigned long action, void *hcpu) | 
|  | 1704 | { | 
|  | 1705 | long cpu = (long)hcpu; | 
| Akinobu Mita | 80b5184 | 2010-05-26 14:43:32 -0700 | [diff] [blame] | 1706 | int err; | 
|  | 1707 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1708 | switch(action) { | 
|  | 1709 | case CPU_UP_PREPARE: | 
| Rafael J. Wysocki | 8bb7844 | 2007-05-09 02:35:10 -0700 | [diff] [blame] | 1710 | case CPU_UP_PREPARE_FROZEN: | 
| Akinobu Mita | 80b5184 | 2010-05-26 14:43:32 -0700 | [diff] [blame] | 1711 | err = init_timers_cpu(cpu); | 
|  | 1712 | if (err < 0) | 
|  | 1713 | return notifier_from_errno(err); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1714 | break; | 
|  | 1715 | #ifdef CONFIG_HOTPLUG_CPU | 
|  | 1716 | case CPU_DEAD: | 
| Rafael J. Wysocki | 8bb7844 | 2007-05-09 02:35:10 -0700 | [diff] [blame] | 1717 | case CPU_DEAD_FROZEN: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1718 | migrate_timers(cpu); | 
|  | 1719 | break; | 
|  | 1720 | #endif | 
|  | 1721 | default: | 
|  | 1722 | break; | 
|  | 1723 | } | 
|  | 1724 | return NOTIFY_OK; | 
|  | 1725 | } | 
|  | 1726 |  | 
| Chandra Seetharaman | 8c78f30 | 2006-07-30 03:03:35 -0700 | [diff] [blame] | 1727 | static struct notifier_block __cpuinitdata timers_nb = { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1728 | .notifier_call	= timer_cpu_notify, | 
|  | 1729 | }; | 
|  | 1730 |  | 
|  | 1731 |  | 
|  | 1732 | void __init init_timers(void) | 
|  | 1733 | { | 
| Akinobu Mita | 07dccf3 | 2006-09-29 02:00:22 -0700 | [diff] [blame] | 1734 | int err = timer_cpu_notify(&timers_nb, (unsigned long)CPU_UP_PREPARE, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1735 | (void *)(long)smp_processor_id()); | 
| Akinobu Mita | 07dccf3 | 2006-09-29 02:00:22 -0700 | [diff] [blame] | 1736 |  | 
| Ingo Molnar | 82f67cd | 2007-02-16 01:28:13 -0800 | [diff] [blame] | 1737 | init_timer_stats(); | 
|  | 1738 |  | 
| Akinobu Mita | 9e506f7 | 2010-06-04 14:15:04 -0700 | [diff] [blame] | 1739 | BUG_ON(err != NOTIFY_OK); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1740 | register_cpu_notifier(&timers_nb); | 
| Carlos R. Mafra | 962cf36 | 2008-05-15 11:15:37 -0300 | [diff] [blame] | 1741 | open_softirq(TIMER_SOFTIRQ, run_timer_softirq); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1742 | } | 
|  | 1743 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1744 | /** | 
|  | 1745 | * msleep - sleep safely even with waitqueue interruptions | 
|  | 1746 | * @msecs: Time in milliseconds to sleep for | 
|  | 1747 | */ | 
|  | 1748 | void msleep(unsigned int msecs) | 
|  | 1749 | { | 
|  | 1750 | unsigned long timeout = msecs_to_jiffies(msecs) + 1; | 
|  | 1751 |  | 
| Nishanth Aravamudan | 75bcc8c | 2005-09-10 00:27:24 -0700 | [diff] [blame] | 1752 | while (timeout) | 
|  | 1753 | timeout = schedule_timeout_uninterruptible(timeout); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1754 | } | 
|  | 1755 |  | 
|  | 1756 | EXPORT_SYMBOL(msleep); | 
|  | 1757 |  | 
|  | 1758 | /** | 
| Domen Puncer | 96ec3ef | 2005-06-25 14:58:43 -0700 | [diff] [blame] | 1759 | * msleep_interruptible - sleep waiting for signals | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1760 | * @msecs: Time in milliseconds to sleep for | 
|  | 1761 | */ | 
|  | 1762 | unsigned long msleep_interruptible(unsigned int msecs) | 
|  | 1763 | { | 
|  | 1764 | unsigned long timeout = msecs_to_jiffies(msecs) + 1; | 
|  | 1765 |  | 
| Nishanth Aravamudan | 75bcc8c | 2005-09-10 00:27:24 -0700 | [diff] [blame] | 1766 | while (timeout && !signal_pending(current)) | 
|  | 1767 | timeout = schedule_timeout_interruptible(timeout); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1768 | return jiffies_to_msecs(timeout); | 
|  | 1769 | } | 
|  | 1770 |  | 
|  | 1771 | EXPORT_SYMBOL(msleep_interruptible); | 
| Patrick Pannuto | 5e7f5a1 | 2010-08-02 15:01:04 -0700 | [diff] [blame] | 1772 |  | 
|  | 1773 | static int __sched do_usleep_range(unsigned long min, unsigned long max) | 
|  | 1774 | { | 
|  | 1775 | ktime_t kmin; | 
|  | 1776 | unsigned long delta; | 
|  | 1777 |  | 
|  | 1778 | kmin = ktime_set(0, min * NSEC_PER_USEC); | 
|  | 1779 | delta = (max - min) * NSEC_PER_USEC; | 
|  | 1780 | return schedule_hrtimeout_range(&kmin, delta, HRTIMER_MODE_REL); | 
|  | 1781 | } | 
|  | 1782 |  | 
|  | 1783 | /** | 
|  | 1784 | * usleep_range - Drop in replacement for udelay where wakeup is flexible | 
|  | 1785 | * @min: Minimum time in usecs to sleep | 
|  | 1786 | * @max: Maximum time in usecs to sleep | 
|  | 1787 | */ | 
|  | 1788 | void usleep_range(unsigned long min, unsigned long max) | 
|  | 1789 | { | 
|  | 1790 | __set_current_state(TASK_UNINTERRUPTIBLE); | 
|  | 1791 | do_usleep_range(min, max); | 
|  | 1792 | } | 
|  | 1793 | EXPORT_SYMBOL(usleep_range); |