| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | *  linux/kernel/timer.c | 
|  | 3 | * | 
| john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 4 | *  Kernel internal timers, basic process system calls | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | * | 
|  | 6 | *  Copyright (C) 1991, 1992  Linus Torvalds | 
|  | 7 | * | 
|  | 8 | *  1997-01-28  Modified by Finn Arne Gangstad to make timers scale better. | 
|  | 9 | * | 
|  | 10 | *  1997-09-10  Updated NTP code according to technical memorandum Jan '96 | 
|  | 11 | *              "A Kernel Model for Precision Timekeeping" by Dave Mills | 
|  | 12 | *  1998-12-24  Fixed a xtime SMP race (we need the xtime_lock rw spinlock to | 
|  | 13 | *              serialize accesses to xtime/lost_ticks). | 
|  | 14 | *                              Copyright (C) 1998  Andrea Arcangeli | 
|  | 15 | *  1999-03-10  Improved NTP compatibility by Ulrich Windl | 
|  | 16 | *  2002-05-31	Move sys_sysinfo here and make its locking sane, Robert Love | 
|  | 17 | *  2000-10-05  Implemented scalable SMP per-CPU timer handling. | 
|  | 18 | *                              Copyright (C) 2000, 2001, 2002  Ingo Molnar | 
|  | 19 | *              Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar | 
|  | 20 | */ | 
|  | 21 |  | 
|  | 22 | #include <linux/kernel_stat.h> | 
|  | 23 | #include <linux/module.h> | 
|  | 24 | #include <linux/interrupt.h> | 
|  | 25 | #include <linux/percpu.h> | 
|  | 26 | #include <linux/init.h> | 
|  | 27 | #include <linux/mm.h> | 
|  | 28 | #include <linux/swap.h> | 
| Pavel Emelyanov | b488893 | 2007-10-18 23:40:14 -0700 | [diff] [blame] | 29 | #include <linux/pid_namespace.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | #include <linux/notifier.h> | 
|  | 31 | #include <linux/thread_info.h> | 
|  | 32 | #include <linux/time.h> | 
|  | 33 | #include <linux/jiffies.h> | 
|  | 34 | #include <linux/posix-timers.h> | 
|  | 35 | #include <linux/cpu.h> | 
|  | 36 | #include <linux/syscalls.h> | 
| Adrian Bunk | 97a41e2 | 2006-01-08 01:02:17 -0800 | [diff] [blame] | 37 | #include <linux/delay.h> | 
| Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 38 | #include <linux/tick.h> | 
| Ingo Molnar | 82f67cd | 2007-02-16 01:28:13 -0800 | [diff] [blame] | 39 | #include <linux/kallsyms.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 40 |  | 
|  | 41 | #include <asm/uaccess.h> | 
|  | 42 | #include <asm/unistd.h> | 
|  | 43 | #include <asm/div64.h> | 
|  | 44 | #include <asm/timex.h> | 
|  | 45 | #include <asm/io.h> | 
|  | 46 |  | 
| Thomas Gleixner | ecea8d1 | 2005-10-30 15:03:00 -0800 | [diff] [blame] | 47 | u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES; | 
|  | 48 |  | 
|  | 49 | EXPORT_SYMBOL(jiffies_64); | 
|  | 50 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 51 | /* | 
|  | 52 | * per-CPU timer vector definitions: | 
|  | 53 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 54 | #define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6) | 
|  | 55 | #define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8) | 
|  | 56 | #define TVN_SIZE (1 << TVN_BITS) | 
|  | 57 | #define TVR_SIZE (1 << TVR_BITS) | 
|  | 58 | #define TVN_MASK (TVN_SIZE - 1) | 
|  | 59 | #define TVR_MASK (TVR_SIZE - 1) | 
|  | 60 |  | 
| Pavel Machek | a6fa8e5 | 2008-01-30 13:30:00 +0100 | [diff] [blame] | 61 | struct tvec { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 62 | struct list_head vec[TVN_SIZE]; | 
| Pavel Machek | a6fa8e5 | 2008-01-30 13:30:00 +0100 | [diff] [blame] | 63 | }; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 64 |  | 
| Pavel Machek | a6fa8e5 | 2008-01-30 13:30:00 +0100 | [diff] [blame] | 65 | struct tvec_root { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 66 | struct list_head vec[TVR_SIZE]; | 
| Pavel Machek | a6fa8e5 | 2008-01-30 13:30:00 +0100 | [diff] [blame] | 67 | }; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 68 |  | 
| Pavel Machek | a6fa8e5 | 2008-01-30 13:30:00 +0100 | [diff] [blame] | 69 | struct tvec_base { | 
| Oleg Nesterov | 3691c51 | 2006-03-31 02:30:30 -0800 | [diff] [blame] | 70 | spinlock_t lock; | 
|  | 71 | struct timer_list *running_timer; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 72 | unsigned long timer_jiffies; | 
| Pavel Machek | a6fa8e5 | 2008-01-30 13:30:00 +0100 | [diff] [blame] | 73 | struct tvec_root tv1; | 
|  | 74 | struct tvec tv2; | 
|  | 75 | struct tvec tv3; | 
|  | 76 | struct tvec tv4; | 
|  | 77 | struct tvec tv5; | 
| Venki Pallipadi | 6e453a6 | 2007-05-08 00:27:44 -0700 | [diff] [blame] | 78 | } ____cacheline_aligned; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 79 |  | 
| Pavel Machek | a6fa8e5 | 2008-01-30 13:30:00 +0100 | [diff] [blame] | 80 | struct tvec_base boot_tvec_bases; | 
| Oleg Nesterov | 3691c51 | 2006-03-31 02:30:30 -0800 | [diff] [blame] | 81 | EXPORT_SYMBOL(boot_tvec_bases); | 
| Pavel Machek | a6fa8e5 | 2008-01-30 13:30:00 +0100 | [diff] [blame] | 82 | static DEFINE_PER_CPU(struct tvec_base *, tvec_bases) = &boot_tvec_bases; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 83 |  | 
| Venki Pallipadi | 6e453a6 | 2007-05-08 00:27:44 -0700 | [diff] [blame] | 84 | /* | 
| Pavel Machek | a6fa8e5 | 2008-01-30 13:30:00 +0100 | [diff] [blame] | 85 | * Note that all tvec_bases are 2 byte aligned and lower bit of | 
| Venki Pallipadi | 6e453a6 | 2007-05-08 00:27:44 -0700 | [diff] [blame] | 86 | * base in timer_list is guaranteed to be zero. Use the LSB for | 
|  | 87 | * the new flag to indicate whether the timer is deferrable | 
|  | 88 | */ | 
|  | 89 | #define TBASE_DEFERRABLE_FLAG		(0x1) | 
|  | 90 |  | 
|  | 91 | /* Functions below help us manage 'deferrable' flag */ | 
| Pavel Machek | a6fa8e5 | 2008-01-30 13:30:00 +0100 | [diff] [blame] | 92 | static inline unsigned int tbase_get_deferrable(struct tvec_base *base) | 
| Venki Pallipadi | 6e453a6 | 2007-05-08 00:27:44 -0700 | [diff] [blame] | 93 | { | 
| akpm@linux-foundation.org | e991084 | 2007-05-10 03:16:01 -0700 | [diff] [blame] | 94 | return ((unsigned int)(unsigned long)base & TBASE_DEFERRABLE_FLAG); | 
| Venki Pallipadi | 6e453a6 | 2007-05-08 00:27:44 -0700 | [diff] [blame] | 95 | } | 
|  | 96 |  | 
| Pavel Machek | a6fa8e5 | 2008-01-30 13:30:00 +0100 | [diff] [blame] | 97 | static inline struct tvec_base *tbase_get_base(struct tvec_base *base) | 
| Venki Pallipadi | 6e453a6 | 2007-05-08 00:27:44 -0700 | [diff] [blame] | 98 | { | 
| Pavel Machek | a6fa8e5 | 2008-01-30 13:30:00 +0100 | [diff] [blame] | 99 | return ((struct tvec_base *)((unsigned long)base & ~TBASE_DEFERRABLE_FLAG)); | 
| Venki Pallipadi | 6e453a6 | 2007-05-08 00:27:44 -0700 | [diff] [blame] | 100 | } | 
|  | 101 |  | 
|  | 102 | static inline void timer_set_deferrable(struct timer_list *timer) | 
|  | 103 | { | 
| Pavel Machek | a6fa8e5 | 2008-01-30 13:30:00 +0100 | [diff] [blame] | 104 | timer->base = ((struct tvec_base *)((unsigned long)(timer->base) | | 
| Thomas Gleixner | 6819457 | 2007-07-19 01:49:16 -0700 | [diff] [blame] | 105 | TBASE_DEFERRABLE_FLAG)); | 
| Venki Pallipadi | 6e453a6 | 2007-05-08 00:27:44 -0700 | [diff] [blame] | 106 | } | 
|  | 107 |  | 
|  | 108 | static inline void | 
| Pavel Machek | a6fa8e5 | 2008-01-30 13:30:00 +0100 | [diff] [blame] | 109 | timer_set_base(struct timer_list *timer, struct tvec_base *new_base) | 
| Venki Pallipadi | 6e453a6 | 2007-05-08 00:27:44 -0700 | [diff] [blame] | 110 | { | 
| Pavel Machek | a6fa8e5 | 2008-01-30 13:30:00 +0100 | [diff] [blame] | 111 | timer->base = (struct tvec_base *)((unsigned long)(new_base) | | 
| Thomas Gleixner | 6819457 | 2007-07-19 01:49:16 -0700 | [diff] [blame] | 112 | tbase_get_deferrable(timer->base)); | 
| Venki Pallipadi | 6e453a6 | 2007-05-08 00:27:44 -0700 | [diff] [blame] | 113 | } | 
|  | 114 |  | 
| Arjan van de Ven | 4c36a5d | 2006-12-10 02:21:24 -0800 | [diff] [blame] | 115 | /** | 
|  | 116 | * __round_jiffies - function to round jiffies to a full second | 
|  | 117 | * @j: the time in (absolute) jiffies that should be rounded | 
|  | 118 | * @cpu: the processor number on which the timeout will happen | 
|  | 119 | * | 
| Robert P. J. Day | 72fd4a3 | 2007-02-10 01:45:59 -0800 | [diff] [blame] | 120 | * __round_jiffies() rounds an absolute time in the future (in jiffies) | 
| Arjan van de Ven | 4c36a5d | 2006-12-10 02:21:24 -0800 | [diff] [blame] | 121 | * up or down to (approximately) full seconds. This is useful for timers | 
|  | 122 | * for which the exact time they fire does not matter too much, as long as | 
|  | 123 | * they fire approximately every X seconds. | 
|  | 124 | * | 
|  | 125 | * By rounding these timers to whole seconds, all such timers will fire | 
|  | 126 | * at the same time, rather than at various times spread out. The goal | 
|  | 127 | * of this is to have the CPU wake up less, which saves power. | 
|  | 128 | * | 
|  | 129 | * The exact rounding is skewed for each processor to avoid all | 
|  | 130 | * processors firing at the exact same time, which could lead | 
|  | 131 | * to lock contention or spurious cache line bouncing. | 
|  | 132 | * | 
| Robert P. J. Day | 72fd4a3 | 2007-02-10 01:45:59 -0800 | [diff] [blame] | 133 | * The return value is the rounded version of the @j parameter. | 
| Arjan van de Ven | 4c36a5d | 2006-12-10 02:21:24 -0800 | [diff] [blame] | 134 | */ | 
|  | 135 | unsigned long __round_jiffies(unsigned long j, int cpu) | 
|  | 136 | { | 
|  | 137 | int rem; | 
|  | 138 | unsigned long original = j; | 
|  | 139 |  | 
|  | 140 | /* | 
|  | 141 | * We don't want all cpus firing their timers at once hitting the | 
|  | 142 | * same lock or cachelines, so we skew each extra cpu with an extra | 
|  | 143 | * 3 jiffies. This 3 jiffies came originally from the mm/ code which | 
|  | 144 | * already did this. | 
|  | 145 | * The skew is done by adding 3*cpunr, then round, then subtract this | 
|  | 146 | * extra offset again. | 
|  | 147 | */ | 
|  | 148 | j += cpu * 3; | 
|  | 149 |  | 
|  | 150 | rem = j % HZ; | 
|  | 151 |  | 
|  | 152 | /* | 
|  | 153 | * If the target jiffie is just after a whole second (which can happen | 
|  | 154 | * due to delays of the timer irq, long irq off times etc etc) then | 
|  | 155 | * we should round down to the whole second, not up. Use 1/4th second | 
|  | 156 | * as cutoff for this rounding as an extreme upper bound for this. | 
|  | 157 | */ | 
|  | 158 | if (rem < HZ/4) /* round down */ | 
|  | 159 | j = j - rem; | 
|  | 160 | else /* round up */ | 
|  | 161 | j = j - rem + HZ; | 
|  | 162 |  | 
|  | 163 | /* now that we have rounded, subtract the extra skew again */ | 
|  | 164 | j -= cpu * 3; | 
|  | 165 |  | 
|  | 166 | if (j <= jiffies) /* rounding ate our timeout entirely; */ | 
|  | 167 | return original; | 
|  | 168 | return j; | 
|  | 169 | } | 
|  | 170 | EXPORT_SYMBOL_GPL(__round_jiffies); | 
|  | 171 |  | 
|  | 172 | /** | 
|  | 173 | * __round_jiffies_relative - function to round jiffies to a full second | 
|  | 174 | * @j: the time in (relative) jiffies that should be rounded | 
|  | 175 | * @cpu: the processor number on which the timeout will happen | 
|  | 176 | * | 
| Robert P. J. Day | 72fd4a3 | 2007-02-10 01:45:59 -0800 | [diff] [blame] | 177 | * __round_jiffies_relative() rounds a time delta  in the future (in jiffies) | 
| Arjan van de Ven | 4c36a5d | 2006-12-10 02:21:24 -0800 | [diff] [blame] | 178 | * up or down to (approximately) full seconds. This is useful for timers | 
|  | 179 | * for which the exact time they fire does not matter too much, as long as | 
|  | 180 | * they fire approximately every X seconds. | 
|  | 181 | * | 
|  | 182 | * By rounding these timers to whole seconds, all such timers will fire | 
|  | 183 | * at the same time, rather than at various times spread out. The goal | 
|  | 184 | * of this is to have the CPU wake up less, which saves power. | 
|  | 185 | * | 
|  | 186 | * The exact rounding is skewed for each processor to avoid all | 
|  | 187 | * processors firing at the exact same time, which could lead | 
|  | 188 | * to lock contention or spurious cache line bouncing. | 
|  | 189 | * | 
| Robert P. J. Day | 72fd4a3 | 2007-02-10 01:45:59 -0800 | [diff] [blame] | 190 | * The return value is the rounded version of the @j parameter. | 
| Arjan van de Ven | 4c36a5d | 2006-12-10 02:21:24 -0800 | [diff] [blame] | 191 | */ | 
|  | 192 | unsigned long __round_jiffies_relative(unsigned long j, int cpu) | 
|  | 193 | { | 
|  | 194 | /* | 
|  | 195 | * In theory the following code can skip a jiffy in case jiffies | 
|  | 196 | * increments right between the addition and the later subtraction. | 
|  | 197 | * However since the entire point of this function is to use approximate | 
|  | 198 | * timeouts, it's entirely ok to not handle that. | 
|  | 199 | */ | 
|  | 200 | return  __round_jiffies(j + jiffies, cpu) - jiffies; | 
|  | 201 | } | 
|  | 202 | EXPORT_SYMBOL_GPL(__round_jiffies_relative); | 
|  | 203 |  | 
|  | 204 | /** | 
|  | 205 | * round_jiffies - function to round jiffies to a full second | 
|  | 206 | * @j: the time in (absolute) jiffies that should be rounded | 
|  | 207 | * | 
| Robert P. J. Day | 72fd4a3 | 2007-02-10 01:45:59 -0800 | [diff] [blame] | 208 | * round_jiffies() rounds an absolute time in the future (in jiffies) | 
| Arjan van de Ven | 4c36a5d | 2006-12-10 02:21:24 -0800 | [diff] [blame] | 209 | * up or down to (approximately) full seconds. This is useful for timers | 
|  | 210 | * for which the exact time they fire does not matter too much, as long as | 
|  | 211 | * they fire approximately every X seconds. | 
|  | 212 | * | 
|  | 213 | * By rounding these timers to whole seconds, all such timers will fire | 
|  | 214 | * at the same time, rather than at various times spread out. The goal | 
|  | 215 | * of this is to have the CPU wake up less, which saves power. | 
|  | 216 | * | 
| Robert P. J. Day | 72fd4a3 | 2007-02-10 01:45:59 -0800 | [diff] [blame] | 217 | * The return value is the rounded version of the @j parameter. | 
| Arjan van de Ven | 4c36a5d | 2006-12-10 02:21:24 -0800 | [diff] [blame] | 218 | */ | 
|  | 219 | unsigned long round_jiffies(unsigned long j) | 
|  | 220 | { | 
|  | 221 | return __round_jiffies(j, raw_smp_processor_id()); | 
|  | 222 | } | 
|  | 223 | EXPORT_SYMBOL_GPL(round_jiffies); | 
|  | 224 |  | 
|  | 225 | /** | 
|  | 226 | * round_jiffies_relative - function to round jiffies to a full second | 
|  | 227 | * @j: the time in (relative) jiffies that should be rounded | 
|  | 228 | * | 
| Robert P. J. Day | 72fd4a3 | 2007-02-10 01:45:59 -0800 | [diff] [blame] | 229 | * round_jiffies_relative() rounds a time delta  in the future (in jiffies) | 
| Arjan van de Ven | 4c36a5d | 2006-12-10 02:21:24 -0800 | [diff] [blame] | 230 | * up or down to (approximately) full seconds. This is useful for timers | 
|  | 231 | * for which the exact time they fire does not matter too much, as long as | 
|  | 232 | * they fire approximately every X seconds. | 
|  | 233 | * | 
|  | 234 | * By rounding these timers to whole seconds, all such timers will fire | 
|  | 235 | * at the same time, rather than at various times spread out. The goal | 
|  | 236 | * of this is to have the CPU wake up less, which saves power. | 
|  | 237 | * | 
| Robert P. J. Day | 72fd4a3 | 2007-02-10 01:45:59 -0800 | [diff] [blame] | 238 | * The return value is the rounded version of the @j parameter. | 
| Arjan van de Ven | 4c36a5d | 2006-12-10 02:21:24 -0800 | [diff] [blame] | 239 | */ | 
|  | 240 | unsigned long round_jiffies_relative(unsigned long j) | 
|  | 241 | { | 
|  | 242 | return __round_jiffies_relative(j, raw_smp_processor_id()); | 
|  | 243 | } | 
|  | 244 | EXPORT_SYMBOL_GPL(round_jiffies_relative); | 
|  | 245 |  | 
|  | 246 |  | 
| Pavel Machek | a6fa8e5 | 2008-01-30 13:30:00 +0100 | [diff] [blame] | 247 | static inline void set_running_timer(struct tvec_base *base, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 248 | struct timer_list *timer) | 
|  | 249 | { | 
|  | 250 | #ifdef CONFIG_SMP | 
| Oleg Nesterov | 3691c51 | 2006-03-31 02:30:30 -0800 | [diff] [blame] | 251 | base->running_timer = timer; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 252 | #endif | 
|  | 253 | } | 
|  | 254 |  | 
| Pavel Machek | a6fa8e5 | 2008-01-30 13:30:00 +0100 | [diff] [blame] | 255 | static void internal_add_timer(struct tvec_base *base, struct timer_list *timer) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 256 | { | 
|  | 257 | unsigned long expires = timer->expires; | 
|  | 258 | unsigned long idx = expires - base->timer_jiffies; | 
|  | 259 | struct list_head *vec; | 
|  | 260 |  | 
|  | 261 | if (idx < TVR_SIZE) { | 
|  | 262 | int i = expires & TVR_MASK; | 
|  | 263 | vec = base->tv1.vec + i; | 
|  | 264 | } else if (idx < 1 << (TVR_BITS + TVN_BITS)) { | 
|  | 265 | int i = (expires >> TVR_BITS) & TVN_MASK; | 
|  | 266 | vec = base->tv2.vec + i; | 
|  | 267 | } else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) { | 
|  | 268 | int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK; | 
|  | 269 | vec = base->tv3.vec + i; | 
|  | 270 | } else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) { | 
|  | 271 | int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK; | 
|  | 272 | vec = base->tv4.vec + i; | 
|  | 273 | } else if ((signed long) idx < 0) { | 
|  | 274 | /* | 
|  | 275 | * Can happen if you add a timer with expires == jiffies, | 
|  | 276 | * or you set a timer to go off in the past | 
|  | 277 | */ | 
|  | 278 | vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK); | 
|  | 279 | } else { | 
|  | 280 | int i; | 
|  | 281 | /* If the timeout is larger than 0xffffffff on 64-bit | 
|  | 282 | * architectures then we use the maximum timeout: | 
|  | 283 | */ | 
|  | 284 | if (idx > 0xffffffffUL) { | 
|  | 285 | idx = 0xffffffffUL; | 
|  | 286 | expires = idx + base->timer_jiffies; | 
|  | 287 | } | 
|  | 288 | i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK; | 
|  | 289 | vec = base->tv5.vec + i; | 
|  | 290 | } | 
|  | 291 | /* | 
|  | 292 | * Timers are FIFO: | 
|  | 293 | */ | 
|  | 294 | list_add_tail(&timer->entry, vec); | 
|  | 295 | } | 
|  | 296 |  | 
| Ingo Molnar | 82f67cd | 2007-02-16 01:28:13 -0800 | [diff] [blame] | 297 | #ifdef CONFIG_TIMER_STATS | 
|  | 298 | void __timer_stats_timer_set_start_info(struct timer_list *timer, void *addr) | 
|  | 299 | { | 
|  | 300 | if (timer->start_site) | 
|  | 301 | return; | 
|  | 302 |  | 
|  | 303 | timer->start_site = addr; | 
|  | 304 | memcpy(timer->start_comm, current->comm, TASK_COMM_LEN); | 
|  | 305 | timer->start_pid = current->pid; | 
|  | 306 | } | 
| Venki Pallipadi | c5c061b8 | 2007-07-15 23:40:30 -0700 | [diff] [blame] | 307 |  | 
|  | 308 | static void timer_stats_account_timer(struct timer_list *timer) | 
|  | 309 | { | 
|  | 310 | unsigned int flag = 0; | 
|  | 311 |  | 
|  | 312 | if (unlikely(tbase_get_deferrable(timer->base))) | 
|  | 313 | flag |= TIMER_STATS_FLAG_DEFERRABLE; | 
|  | 314 |  | 
|  | 315 | timer_stats_update_stats(timer, timer->start_pid, timer->start_site, | 
|  | 316 | timer->function, timer->start_comm, flag); | 
|  | 317 | } | 
|  | 318 |  | 
|  | 319 | #else | 
|  | 320 | static void timer_stats_account_timer(struct timer_list *timer) {} | 
| Ingo Molnar | 82f67cd | 2007-02-16 01:28:13 -0800 | [diff] [blame] | 321 | #endif | 
|  | 322 |  | 
| Rolf Eike Beer | 2aae4a1 | 2006-09-29 01:59:46 -0700 | [diff] [blame] | 323 | /** | 
| Oleg Nesterov | 55c888d | 2005-06-23 00:08:56 -0700 | [diff] [blame] | 324 | * init_timer - initialize a timer. | 
|  | 325 | * @timer: the timer to be initialized | 
|  | 326 | * | 
|  | 327 | * init_timer() must be done to a timer prior calling *any* of the | 
|  | 328 | * other timer functions. | 
|  | 329 | */ | 
|  | 330 | void fastcall init_timer(struct timer_list *timer) | 
|  | 331 | { | 
|  | 332 | timer->entry.next = NULL; | 
| Paul Mackerras | bfe5d83 | 2006-06-25 05:47:14 -0700 | [diff] [blame] | 333 | timer->base = __raw_get_cpu_var(tvec_bases); | 
| Ingo Molnar | 82f67cd | 2007-02-16 01:28:13 -0800 | [diff] [blame] | 334 | #ifdef CONFIG_TIMER_STATS | 
|  | 335 | timer->start_site = NULL; | 
|  | 336 | timer->start_pid = -1; | 
|  | 337 | memset(timer->start_comm, 0, TASK_COMM_LEN); | 
|  | 338 | #endif | 
| Oleg Nesterov | 55c888d | 2005-06-23 00:08:56 -0700 | [diff] [blame] | 339 | } | 
|  | 340 | EXPORT_SYMBOL(init_timer); | 
|  | 341 |  | 
| Venki Pallipadi | 6e453a6 | 2007-05-08 00:27:44 -0700 | [diff] [blame] | 342 | void fastcall init_timer_deferrable(struct timer_list *timer) | 
|  | 343 | { | 
|  | 344 | init_timer(timer); | 
|  | 345 | timer_set_deferrable(timer); | 
|  | 346 | } | 
|  | 347 | EXPORT_SYMBOL(init_timer_deferrable); | 
|  | 348 |  | 
| Oleg Nesterov | 55c888d | 2005-06-23 00:08:56 -0700 | [diff] [blame] | 349 | static inline void detach_timer(struct timer_list *timer, | 
| Ingo Molnar | 82f67cd | 2007-02-16 01:28:13 -0800 | [diff] [blame] | 350 | int clear_pending) | 
| Oleg Nesterov | 55c888d | 2005-06-23 00:08:56 -0700 | [diff] [blame] | 351 | { | 
|  | 352 | struct list_head *entry = &timer->entry; | 
|  | 353 |  | 
|  | 354 | __list_del(entry->prev, entry->next); | 
|  | 355 | if (clear_pending) | 
|  | 356 | entry->next = NULL; | 
|  | 357 | entry->prev = LIST_POISON2; | 
|  | 358 | } | 
|  | 359 |  | 
|  | 360 | /* | 
| Oleg Nesterov | 3691c51 | 2006-03-31 02:30:30 -0800 | [diff] [blame] | 361 | * We are using hashed locking: holding per_cpu(tvec_bases).lock | 
| Oleg Nesterov | 55c888d | 2005-06-23 00:08:56 -0700 | [diff] [blame] | 362 | * means that all timers which are tied to this base via timer->base are | 
|  | 363 | * locked, and the base itself is locked too. | 
|  | 364 | * | 
|  | 365 | * So __run_timers/migrate_timers can safely modify all timers which could | 
|  | 366 | * be found on ->tvX lists. | 
|  | 367 | * | 
|  | 368 | * When the timer's base is locked, and the timer removed from list, it is | 
|  | 369 | * possible to set timer->base = NULL and drop the lock: the timer remains | 
|  | 370 | * locked. | 
|  | 371 | */ | 
| Pavel Machek | a6fa8e5 | 2008-01-30 13:30:00 +0100 | [diff] [blame] | 372 | static struct tvec_base *lock_timer_base(struct timer_list *timer, | 
| Oleg Nesterov | 55c888d | 2005-06-23 00:08:56 -0700 | [diff] [blame] | 373 | unsigned long *flags) | 
| Josh Triplett | 89e7e374 | 2006-09-29 01:59:36 -0700 | [diff] [blame] | 374 | __acquires(timer->base->lock) | 
| Oleg Nesterov | 55c888d | 2005-06-23 00:08:56 -0700 | [diff] [blame] | 375 | { | 
| Pavel Machek | a6fa8e5 | 2008-01-30 13:30:00 +0100 | [diff] [blame] | 376 | struct tvec_base *base; | 
| Oleg Nesterov | 55c888d | 2005-06-23 00:08:56 -0700 | [diff] [blame] | 377 |  | 
|  | 378 | for (;;) { | 
| Pavel Machek | a6fa8e5 | 2008-01-30 13:30:00 +0100 | [diff] [blame] | 379 | struct tvec_base *prelock_base = timer->base; | 
| Venki Pallipadi | 6e453a6 | 2007-05-08 00:27:44 -0700 | [diff] [blame] | 380 | base = tbase_get_base(prelock_base); | 
| Oleg Nesterov | 55c888d | 2005-06-23 00:08:56 -0700 | [diff] [blame] | 381 | if (likely(base != NULL)) { | 
|  | 382 | spin_lock_irqsave(&base->lock, *flags); | 
| Venki Pallipadi | 6e453a6 | 2007-05-08 00:27:44 -0700 | [diff] [blame] | 383 | if (likely(prelock_base == timer->base)) | 
| Oleg Nesterov | 55c888d | 2005-06-23 00:08:56 -0700 | [diff] [blame] | 384 | return base; | 
|  | 385 | /* The timer has migrated to another CPU */ | 
|  | 386 | spin_unlock_irqrestore(&base->lock, *flags); | 
|  | 387 | } | 
|  | 388 | cpu_relax(); | 
|  | 389 | } | 
|  | 390 | } | 
|  | 391 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 392 | int __mod_timer(struct timer_list *timer, unsigned long expires) | 
|  | 393 | { | 
| Pavel Machek | a6fa8e5 | 2008-01-30 13:30:00 +0100 | [diff] [blame] | 394 | struct tvec_base *base, *new_base; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 395 | unsigned long flags; | 
|  | 396 | int ret = 0; | 
|  | 397 |  | 
| Ingo Molnar | 82f67cd | 2007-02-16 01:28:13 -0800 | [diff] [blame] | 398 | timer_stats_timer_set_start_info(timer); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 399 | BUG_ON(!timer->function); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 400 |  | 
| Oleg Nesterov | 55c888d | 2005-06-23 00:08:56 -0700 | [diff] [blame] | 401 | base = lock_timer_base(timer, &flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 402 |  | 
| Oleg Nesterov | 55c888d | 2005-06-23 00:08:56 -0700 | [diff] [blame] | 403 | if (timer_pending(timer)) { | 
|  | 404 | detach_timer(timer, 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 405 | ret = 1; | 
|  | 406 | } | 
| Oleg Nesterov | 55c888d | 2005-06-23 00:08:56 -0700 | [diff] [blame] | 407 |  | 
| Jan Beulich | a4a6198 | 2006-03-24 03:15:54 -0800 | [diff] [blame] | 408 | new_base = __get_cpu_var(tvec_bases); | 
| Oleg Nesterov | 55c888d | 2005-06-23 00:08:56 -0700 | [diff] [blame] | 409 |  | 
| Oleg Nesterov | 3691c51 | 2006-03-31 02:30:30 -0800 | [diff] [blame] | 410 | if (base != new_base) { | 
| Oleg Nesterov | 55c888d | 2005-06-23 00:08:56 -0700 | [diff] [blame] | 411 | /* | 
|  | 412 | * We are trying to schedule the timer on the local CPU. | 
|  | 413 | * However we can't change timer's base while it is running, | 
|  | 414 | * otherwise del_timer_sync() can't detect that the timer's | 
|  | 415 | * handler yet has not finished. This also guarantees that | 
|  | 416 | * the timer is serialized wrt itself. | 
|  | 417 | */ | 
| Oleg Nesterov | a2c348f | 2006-03-31 02:30:31 -0800 | [diff] [blame] | 418 | if (likely(base->running_timer != timer)) { | 
| Oleg Nesterov | 55c888d | 2005-06-23 00:08:56 -0700 | [diff] [blame] | 419 | /* See the comment in lock_timer_base() */ | 
| Venki Pallipadi | 6e453a6 | 2007-05-08 00:27:44 -0700 | [diff] [blame] | 420 | timer_set_base(timer, NULL); | 
| Oleg Nesterov | 55c888d | 2005-06-23 00:08:56 -0700 | [diff] [blame] | 421 | spin_unlock(&base->lock); | 
| Oleg Nesterov | a2c348f | 2006-03-31 02:30:31 -0800 | [diff] [blame] | 422 | base = new_base; | 
|  | 423 | spin_lock(&base->lock); | 
| Venki Pallipadi | 6e453a6 | 2007-05-08 00:27:44 -0700 | [diff] [blame] | 424 | timer_set_base(timer, base); | 
| Oleg Nesterov | 55c888d | 2005-06-23 00:08:56 -0700 | [diff] [blame] | 425 | } | 
|  | 426 | } | 
|  | 427 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 428 | timer->expires = expires; | 
| Oleg Nesterov | a2c348f | 2006-03-31 02:30:31 -0800 | [diff] [blame] | 429 | internal_add_timer(base, timer); | 
|  | 430 | spin_unlock_irqrestore(&base->lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 431 |  | 
|  | 432 | return ret; | 
|  | 433 | } | 
|  | 434 |  | 
|  | 435 | EXPORT_SYMBOL(__mod_timer); | 
|  | 436 |  | 
| Rolf Eike Beer | 2aae4a1 | 2006-09-29 01:59:46 -0700 | [diff] [blame] | 437 | /** | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 438 | * add_timer_on - start a timer on a particular CPU | 
|  | 439 | * @timer: the timer to be added | 
|  | 440 | * @cpu: the CPU to start it on | 
|  | 441 | * | 
|  | 442 | * This is not very scalable on SMP. Double adds are not possible. | 
|  | 443 | */ | 
|  | 444 | void add_timer_on(struct timer_list *timer, int cpu) | 
|  | 445 | { | 
| Pavel Machek | a6fa8e5 | 2008-01-30 13:30:00 +0100 | [diff] [blame] | 446 | struct tvec_base *base = per_cpu(tvec_bases, cpu); | 
| Thomas Gleixner | 6819457 | 2007-07-19 01:49:16 -0700 | [diff] [blame] | 447 | unsigned long flags; | 
| Oleg Nesterov | 55c888d | 2005-06-23 00:08:56 -0700 | [diff] [blame] | 448 |  | 
| Ingo Molnar | 82f67cd | 2007-02-16 01:28:13 -0800 | [diff] [blame] | 449 | timer_stats_timer_set_start_info(timer); | 
| Thomas Gleixner | 6819457 | 2007-07-19 01:49:16 -0700 | [diff] [blame] | 450 | BUG_ON(timer_pending(timer) || !timer->function); | 
| Oleg Nesterov | 3691c51 | 2006-03-31 02:30:30 -0800 | [diff] [blame] | 451 | spin_lock_irqsave(&base->lock, flags); | 
| Venki Pallipadi | 6e453a6 | 2007-05-08 00:27:44 -0700 | [diff] [blame] | 452 | timer_set_base(timer, base); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 453 | internal_add_timer(base, timer); | 
| Oleg Nesterov | 3691c51 | 2006-03-31 02:30:30 -0800 | [diff] [blame] | 454 | spin_unlock_irqrestore(&base->lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 455 | } | 
|  | 456 |  | 
|  | 457 |  | 
| Rolf Eike Beer | 2aae4a1 | 2006-09-29 01:59:46 -0700 | [diff] [blame] | 458 | /** | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 459 | * mod_timer - modify a timer's timeout | 
|  | 460 | * @timer: the timer to be modified | 
| Rolf Eike Beer | 2aae4a1 | 2006-09-29 01:59:46 -0700 | [diff] [blame] | 461 | * @expires: new timeout in jiffies | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 462 | * | 
| Robert P. J. Day | 72fd4a3 | 2007-02-10 01:45:59 -0800 | [diff] [blame] | 463 | * mod_timer() is a more efficient way to update the expire field of an | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 464 | * active timer (if the timer is inactive it will be activated) | 
|  | 465 | * | 
|  | 466 | * mod_timer(timer, expires) is equivalent to: | 
|  | 467 | * | 
|  | 468 | *     del_timer(timer); timer->expires = expires; add_timer(timer); | 
|  | 469 | * | 
|  | 470 | * Note that if there are multiple unserialized concurrent users of the | 
|  | 471 | * same timer, then mod_timer() is the only safe way to modify the timeout, | 
|  | 472 | * since add_timer() cannot modify an already running timer. | 
|  | 473 | * | 
|  | 474 | * The function returns whether it has modified a pending timer or not. | 
|  | 475 | * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an | 
|  | 476 | * active timer returns 1.) | 
|  | 477 | */ | 
|  | 478 | int mod_timer(struct timer_list *timer, unsigned long expires) | 
|  | 479 | { | 
|  | 480 | BUG_ON(!timer->function); | 
|  | 481 |  | 
| Ingo Molnar | 82f67cd | 2007-02-16 01:28:13 -0800 | [diff] [blame] | 482 | timer_stats_timer_set_start_info(timer); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 483 | /* | 
|  | 484 | * This is a common optimization triggered by the | 
|  | 485 | * networking code - if the timer is re-modified | 
|  | 486 | * to be the same thing then just return: | 
|  | 487 | */ | 
|  | 488 | if (timer->expires == expires && timer_pending(timer)) | 
|  | 489 | return 1; | 
|  | 490 |  | 
|  | 491 | return __mod_timer(timer, expires); | 
|  | 492 | } | 
|  | 493 |  | 
|  | 494 | EXPORT_SYMBOL(mod_timer); | 
|  | 495 |  | 
| Rolf Eike Beer | 2aae4a1 | 2006-09-29 01:59:46 -0700 | [diff] [blame] | 496 | /** | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 497 | * del_timer - deactive a timer. | 
|  | 498 | * @timer: the timer to be deactivated | 
|  | 499 | * | 
|  | 500 | * del_timer() deactivates a timer - this works on both active and inactive | 
|  | 501 | * timers. | 
|  | 502 | * | 
|  | 503 | * The function returns whether it has deactivated a pending timer or not. | 
|  | 504 | * (ie. del_timer() of an inactive timer returns 0, del_timer() of an | 
|  | 505 | * active timer returns 1.) | 
|  | 506 | */ | 
|  | 507 | int del_timer(struct timer_list *timer) | 
|  | 508 | { | 
| Pavel Machek | a6fa8e5 | 2008-01-30 13:30:00 +0100 | [diff] [blame] | 509 | struct tvec_base *base; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 510 | unsigned long flags; | 
| Oleg Nesterov | 55c888d | 2005-06-23 00:08:56 -0700 | [diff] [blame] | 511 | int ret = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 512 |  | 
| Ingo Molnar | 82f67cd | 2007-02-16 01:28:13 -0800 | [diff] [blame] | 513 | timer_stats_timer_clear_start_info(timer); | 
| Oleg Nesterov | 55c888d | 2005-06-23 00:08:56 -0700 | [diff] [blame] | 514 | if (timer_pending(timer)) { | 
|  | 515 | base = lock_timer_base(timer, &flags); | 
|  | 516 | if (timer_pending(timer)) { | 
|  | 517 | detach_timer(timer, 1); | 
|  | 518 | ret = 1; | 
|  | 519 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 520 | spin_unlock_irqrestore(&base->lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 521 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 522 |  | 
| Oleg Nesterov | 55c888d | 2005-06-23 00:08:56 -0700 | [diff] [blame] | 523 | return ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 524 | } | 
|  | 525 |  | 
|  | 526 | EXPORT_SYMBOL(del_timer); | 
|  | 527 |  | 
|  | 528 | #ifdef CONFIG_SMP | 
| Rolf Eike Beer | 2aae4a1 | 2006-09-29 01:59:46 -0700 | [diff] [blame] | 529 | /** | 
|  | 530 | * try_to_del_timer_sync - Try to deactivate a timer | 
|  | 531 | * @timer: timer do del | 
|  | 532 | * | 
| Oleg Nesterov | fd450b7 | 2005-06-23 00:08:59 -0700 | [diff] [blame] | 533 | * This function tries to deactivate a timer. Upon successful (ret >= 0) | 
|  | 534 | * exit the timer is not queued and the handler is not running on any CPU. | 
|  | 535 | * | 
|  | 536 | * It must not be called from interrupt contexts. | 
|  | 537 | */ | 
|  | 538 | int try_to_del_timer_sync(struct timer_list *timer) | 
|  | 539 | { | 
| Pavel Machek | a6fa8e5 | 2008-01-30 13:30:00 +0100 | [diff] [blame] | 540 | struct tvec_base *base; | 
| Oleg Nesterov | fd450b7 | 2005-06-23 00:08:59 -0700 | [diff] [blame] | 541 | unsigned long flags; | 
|  | 542 | int ret = -1; | 
|  | 543 |  | 
|  | 544 | base = lock_timer_base(timer, &flags); | 
|  | 545 |  | 
|  | 546 | if (base->running_timer == timer) | 
|  | 547 | goto out; | 
|  | 548 |  | 
|  | 549 | ret = 0; | 
|  | 550 | if (timer_pending(timer)) { | 
|  | 551 | detach_timer(timer, 1); | 
|  | 552 | ret = 1; | 
|  | 553 | } | 
|  | 554 | out: | 
|  | 555 | spin_unlock_irqrestore(&base->lock, flags); | 
|  | 556 |  | 
|  | 557 | return ret; | 
|  | 558 | } | 
|  | 559 |  | 
| David Howells | e19dff1 | 2007-04-26 15:46:56 -0700 | [diff] [blame] | 560 | EXPORT_SYMBOL(try_to_del_timer_sync); | 
|  | 561 |  | 
| Rolf Eike Beer | 2aae4a1 | 2006-09-29 01:59:46 -0700 | [diff] [blame] | 562 | /** | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 563 | * del_timer_sync - deactivate a timer and wait for the handler to finish. | 
|  | 564 | * @timer: the timer to be deactivated | 
|  | 565 | * | 
|  | 566 | * This function only differs from del_timer() on SMP: besides deactivating | 
|  | 567 | * the timer it also makes sure the handler has finished executing on other | 
|  | 568 | * CPUs. | 
|  | 569 | * | 
| Robert P. J. Day | 72fd4a3 | 2007-02-10 01:45:59 -0800 | [diff] [blame] | 570 | * Synchronization rules: Callers must prevent restarting of the timer, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 571 | * otherwise this function is meaningless. It must not be called from | 
|  | 572 | * interrupt contexts. The caller must not hold locks which would prevent | 
| Oleg Nesterov | 55c888d | 2005-06-23 00:08:56 -0700 | [diff] [blame] | 573 | * completion of the timer's handler. The timer's handler must not call | 
|  | 574 | * add_timer_on(). Upon exit the timer is not queued and the handler is | 
|  | 575 | * not running on any CPU. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 576 | * | 
|  | 577 | * The function returns whether it has deactivated a pending timer or not. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 578 | */ | 
|  | 579 | int del_timer_sync(struct timer_list *timer) | 
|  | 580 | { | 
| Oleg Nesterov | fd450b7 | 2005-06-23 00:08:59 -0700 | [diff] [blame] | 581 | for (;;) { | 
|  | 582 | int ret = try_to_del_timer_sync(timer); | 
|  | 583 | if (ret >= 0) | 
|  | 584 | return ret; | 
| Andrew Morton | a000965 | 2006-07-14 00:24:06 -0700 | [diff] [blame] | 585 | cpu_relax(); | 
| Oleg Nesterov | fd450b7 | 2005-06-23 00:08:59 -0700 | [diff] [blame] | 586 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 587 | } | 
| Oleg Nesterov | 55c888d | 2005-06-23 00:08:56 -0700 | [diff] [blame] | 588 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 589 | EXPORT_SYMBOL(del_timer_sync); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 590 | #endif | 
|  | 591 |  | 
| Pavel Machek | a6fa8e5 | 2008-01-30 13:30:00 +0100 | [diff] [blame] | 592 | static int cascade(struct tvec_base *base, struct tvec *tv, int index) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 593 | { | 
|  | 594 | /* cascade all the timers from tv up one level */ | 
| Porpoise | 3439dd8 | 2006-06-23 02:05:56 -0700 | [diff] [blame] | 595 | struct timer_list *timer, *tmp; | 
|  | 596 | struct list_head tv_list; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 597 |  | 
| Porpoise | 3439dd8 | 2006-06-23 02:05:56 -0700 | [diff] [blame] | 598 | list_replace_init(tv->vec + index, &tv_list); | 
|  | 599 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 600 | /* | 
| Porpoise | 3439dd8 | 2006-06-23 02:05:56 -0700 | [diff] [blame] | 601 | * We are removing _all_ timers from the list, so we | 
|  | 602 | * don't have to detach them individually. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 603 | */ | 
| Porpoise | 3439dd8 | 2006-06-23 02:05:56 -0700 | [diff] [blame] | 604 | list_for_each_entry_safe(timer, tmp, &tv_list, entry) { | 
| Venki Pallipadi | 6e453a6 | 2007-05-08 00:27:44 -0700 | [diff] [blame] | 605 | BUG_ON(tbase_get_base(timer->base) != base); | 
| Porpoise | 3439dd8 | 2006-06-23 02:05:56 -0700 | [diff] [blame] | 606 | internal_add_timer(base, timer); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 607 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 608 |  | 
|  | 609 | return index; | 
|  | 610 | } | 
|  | 611 |  | 
| Rolf Eike Beer | 2aae4a1 | 2006-09-29 01:59:46 -0700 | [diff] [blame] | 612 | #define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK) | 
|  | 613 |  | 
|  | 614 | /** | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 615 | * __run_timers - run all expired timers (if any) on this CPU. | 
|  | 616 | * @base: the timer vector to be processed. | 
|  | 617 | * | 
|  | 618 | * This function cascades all vectors and executes all expired timer | 
|  | 619 | * vectors. | 
|  | 620 | */ | 
| Pavel Machek | a6fa8e5 | 2008-01-30 13:30:00 +0100 | [diff] [blame] | 621 | static inline void __run_timers(struct tvec_base *base) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 622 | { | 
|  | 623 | struct timer_list *timer; | 
|  | 624 |  | 
| Oleg Nesterov | 3691c51 | 2006-03-31 02:30:30 -0800 | [diff] [blame] | 625 | spin_lock_irq(&base->lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 626 | while (time_after_eq(jiffies, base->timer_jiffies)) { | 
| Oleg Nesterov | 626ab0e | 2006-06-23 02:05:55 -0700 | [diff] [blame] | 627 | struct list_head work_list; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 628 | struct list_head *head = &work_list; | 
| Thomas Gleixner | 6819457 | 2007-07-19 01:49:16 -0700 | [diff] [blame] | 629 | int index = base->timer_jiffies & TVR_MASK; | 
| Oleg Nesterov | 626ab0e | 2006-06-23 02:05:55 -0700 | [diff] [blame] | 630 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 631 | /* | 
|  | 632 | * Cascade timers: | 
|  | 633 | */ | 
|  | 634 | if (!index && | 
|  | 635 | (!cascade(base, &base->tv2, INDEX(0))) && | 
|  | 636 | (!cascade(base, &base->tv3, INDEX(1))) && | 
|  | 637 | !cascade(base, &base->tv4, INDEX(2))) | 
|  | 638 | cascade(base, &base->tv5, INDEX(3)); | 
| Oleg Nesterov | 626ab0e | 2006-06-23 02:05:55 -0700 | [diff] [blame] | 639 | ++base->timer_jiffies; | 
|  | 640 | list_replace_init(base->tv1.vec + index, &work_list); | 
| Oleg Nesterov | 55c888d | 2005-06-23 00:08:56 -0700 | [diff] [blame] | 641 | while (!list_empty(head)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 642 | void (*fn)(unsigned long); | 
|  | 643 | unsigned long data; | 
|  | 644 |  | 
| Pavel Emelianov | b5e6181 | 2007-05-08 00:30:19 -0700 | [diff] [blame] | 645 | timer = list_first_entry(head, struct timer_list,entry); | 
| Thomas Gleixner | 6819457 | 2007-07-19 01:49:16 -0700 | [diff] [blame] | 646 | fn = timer->function; | 
|  | 647 | data = timer->data; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 648 |  | 
| Ingo Molnar | 82f67cd | 2007-02-16 01:28:13 -0800 | [diff] [blame] | 649 | timer_stats_account_timer(timer); | 
|  | 650 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 651 | set_running_timer(base, timer); | 
| Oleg Nesterov | 55c888d | 2005-06-23 00:08:56 -0700 | [diff] [blame] | 652 | detach_timer(timer, 1); | 
| Oleg Nesterov | 3691c51 | 2006-03-31 02:30:30 -0800 | [diff] [blame] | 653 | spin_unlock_irq(&base->lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 654 | { | 
| Jesper Juhl | be5b4fb | 2005-06-23 00:09:09 -0700 | [diff] [blame] | 655 | int preempt_count = preempt_count(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 656 | fn(data); | 
|  | 657 | if (preempt_count != preempt_count()) { | 
| Pavel Machek | 4c9dc64 | 2008-01-30 13:30:00 +0100 | [diff] [blame] | 658 | printk(KERN_ERR "huh, entered %p " | 
| Jesper Juhl | be5b4fb | 2005-06-23 00:09:09 -0700 | [diff] [blame] | 659 | "with preempt_count %08x, exited" | 
|  | 660 | " with %08x?\n", | 
|  | 661 | fn, preempt_count, | 
|  | 662 | preempt_count()); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 663 | BUG(); | 
|  | 664 | } | 
|  | 665 | } | 
| Oleg Nesterov | 3691c51 | 2006-03-31 02:30:30 -0800 | [diff] [blame] | 666 | spin_lock_irq(&base->lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 667 | } | 
|  | 668 | } | 
|  | 669 | set_running_timer(base, NULL); | 
| Oleg Nesterov | 3691c51 | 2006-03-31 02:30:30 -0800 | [diff] [blame] | 670 | spin_unlock_irq(&base->lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 671 | } | 
|  | 672 |  | 
| Thomas Gleixner | fd064b9 | 2007-02-16 01:27:47 -0800 | [diff] [blame] | 673 | #if defined(CONFIG_NO_IDLE_HZ) || defined(CONFIG_NO_HZ) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 674 | /* | 
|  | 675 | * Find out when the next timer event is due to happen. This | 
|  | 676 | * is used on S/390 to stop all activity when a cpus is idle. | 
|  | 677 | * This functions needs to be called disabled. | 
|  | 678 | */ | 
| Pavel Machek | a6fa8e5 | 2008-01-30 13:30:00 +0100 | [diff] [blame] | 679 | static unsigned long __next_timer_interrupt(struct tvec_base *base) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 680 | { | 
| Thomas Gleixner | 1cfd684 | 2007-02-16 01:27:46 -0800 | [diff] [blame] | 681 | unsigned long timer_jiffies = base->timer_jiffies; | 
| Thomas Gleixner | eaad084 | 2007-05-29 23:47:39 +0200 | [diff] [blame] | 682 | unsigned long expires = timer_jiffies + NEXT_TIMER_MAX_DELTA; | 
| Thomas Gleixner | 1cfd684 | 2007-02-16 01:27:46 -0800 | [diff] [blame] | 683 | int index, slot, array, found = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 684 | struct timer_list *nte; | 
| Pavel Machek | a6fa8e5 | 2008-01-30 13:30:00 +0100 | [diff] [blame] | 685 | struct tvec *varray[4]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 686 |  | 
|  | 687 | /* Look for timer events in tv1. */ | 
| Thomas Gleixner | 1cfd684 | 2007-02-16 01:27:46 -0800 | [diff] [blame] | 688 | index = slot = timer_jiffies & TVR_MASK; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 689 | do { | 
| Thomas Gleixner | 1cfd684 | 2007-02-16 01:27:46 -0800 | [diff] [blame] | 690 | list_for_each_entry(nte, base->tv1.vec + slot, entry) { | 
| Thomas Gleixner | 6819457 | 2007-07-19 01:49:16 -0700 | [diff] [blame] | 691 | if (tbase_get_deferrable(nte->base)) | 
|  | 692 | continue; | 
| Venki Pallipadi | 6e453a6 | 2007-05-08 00:27:44 -0700 | [diff] [blame] | 693 |  | 
| Thomas Gleixner | 1cfd684 | 2007-02-16 01:27:46 -0800 | [diff] [blame] | 694 | found = 1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 695 | expires = nte->expires; | 
| Thomas Gleixner | 1cfd684 | 2007-02-16 01:27:46 -0800 | [diff] [blame] | 696 | /* Look at the cascade bucket(s)? */ | 
|  | 697 | if (!index || slot < index) | 
|  | 698 | goto cascade; | 
|  | 699 | return expires; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 700 | } | 
| Thomas Gleixner | 1cfd684 | 2007-02-16 01:27:46 -0800 | [diff] [blame] | 701 | slot = (slot + 1) & TVR_MASK; | 
|  | 702 | } while (slot != index); | 
|  | 703 |  | 
|  | 704 | cascade: | 
|  | 705 | /* Calculate the next cascade event */ | 
|  | 706 | if (index) | 
|  | 707 | timer_jiffies += TVR_SIZE - index; | 
|  | 708 | timer_jiffies >>= TVR_BITS; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 709 |  | 
|  | 710 | /* Check tv2-tv5. */ | 
|  | 711 | varray[0] = &base->tv2; | 
|  | 712 | varray[1] = &base->tv3; | 
|  | 713 | varray[2] = &base->tv4; | 
|  | 714 | varray[3] = &base->tv5; | 
| Thomas Gleixner | 1cfd684 | 2007-02-16 01:27:46 -0800 | [diff] [blame] | 715 |  | 
|  | 716 | for (array = 0; array < 4; array++) { | 
| Pavel Machek | a6fa8e5 | 2008-01-30 13:30:00 +0100 | [diff] [blame] | 717 | struct tvec *varp = varray[array]; | 
| Thomas Gleixner | 1cfd684 | 2007-02-16 01:27:46 -0800 | [diff] [blame] | 718 |  | 
|  | 719 | index = slot = timer_jiffies & TVN_MASK; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 720 | do { | 
| Thomas Gleixner | 1cfd684 | 2007-02-16 01:27:46 -0800 | [diff] [blame] | 721 | list_for_each_entry(nte, varp->vec + slot, entry) { | 
|  | 722 | found = 1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 723 | if (time_before(nte->expires, expires)) | 
|  | 724 | expires = nte->expires; | 
| Thomas Gleixner | 1cfd684 | 2007-02-16 01:27:46 -0800 | [diff] [blame] | 725 | } | 
|  | 726 | /* | 
|  | 727 | * Do we still search for the first timer or are | 
|  | 728 | * we looking up the cascade buckets ? | 
|  | 729 | */ | 
|  | 730 | if (found) { | 
|  | 731 | /* Look at the cascade bucket(s)? */ | 
|  | 732 | if (!index || slot < index) | 
|  | 733 | break; | 
|  | 734 | return expires; | 
|  | 735 | } | 
|  | 736 | slot = (slot + 1) & TVN_MASK; | 
|  | 737 | } while (slot != index); | 
|  | 738 |  | 
|  | 739 | if (index) | 
|  | 740 | timer_jiffies += TVN_SIZE - index; | 
|  | 741 | timer_jiffies >>= TVN_BITS; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 742 | } | 
| Thomas Gleixner | 1cfd684 | 2007-02-16 01:27:46 -0800 | [diff] [blame] | 743 | return expires; | 
|  | 744 | } | 
|  | 745 |  | 
|  | 746 | /* | 
|  | 747 | * Check, if the next hrtimer event is before the next timer wheel | 
|  | 748 | * event: | 
|  | 749 | */ | 
|  | 750 | static unsigned long cmp_next_hrtimer_event(unsigned long now, | 
|  | 751 | unsigned long expires) | 
|  | 752 | { | 
|  | 753 | ktime_t hr_delta = hrtimer_get_next_event(); | 
|  | 754 | struct timespec tsdelta; | 
| Thomas Gleixner | 9501b6c | 2007-03-25 14:31:17 +0200 | [diff] [blame] | 755 | unsigned long delta; | 
| Thomas Gleixner | 1cfd684 | 2007-02-16 01:27:46 -0800 | [diff] [blame] | 756 |  | 
|  | 757 | if (hr_delta.tv64 == KTIME_MAX) | 
|  | 758 | return expires; | 
|  | 759 |  | 
| Thomas Gleixner | 9501b6c | 2007-03-25 14:31:17 +0200 | [diff] [blame] | 760 | /* | 
|  | 761 | * Expired timer available, let it expire in the next tick | 
|  | 762 | */ | 
|  | 763 | if (hr_delta.tv64 <= 0) | 
|  | 764 | return now + 1; | 
| Thomas Gleixner | 1cfd684 | 2007-02-16 01:27:46 -0800 | [diff] [blame] | 765 |  | 
|  | 766 | tsdelta = ktime_to_timespec(hr_delta); | 
| Thomas Gleixner | 9501b6c | 2007-03-25 14:31:17 +0200 | [diff] [blame] | 767 | delta = timespec_to_jiffies(&tsdelta); | 
| Thomas Gleixner | eaad084 | 2007-05-29 23:47:39 +0200 | [diff] [blame] | 768 |  | 
|  | 769 | /* | 
|  | 770 | * Limit the delta to the max value, which is checked in | 
|  | 771 | * tick_nohz_stop_sched_tick(): | 
|  | 772 | */ | 
|  | 773 | if (delta > NEXT_TIMER_MAX_DELTA) | 
|  | 774 | delta = NEXT_TIMER_MAX_DELTA; | 
|  | 775 |  | 
| Thomas Gleixner | 9501b6c | 2007-03-25 14:31:17 +0200 | [diff] [blame] | 776 | /* | 
|  | 777 | * Take rounding errors in to account and make sure, that it | 
|  | 778 | * expires in the next tick. Otherwise we go into an endless | 
|  | 779 | * ping pong due to tick_nohz_stop_sched_tick() retriggering | 
|  | 780 | * the timer softirq | 
|  | 781 | */ | 
|  | 782 | if (delta < 1) | 
|  | 783 | delta = 1; | 
|  | 784 | now += delta; | 
| Thomas Gleixner | 1cfd684 | 2007-02-16 01:27:46 -0800 | [diff] [blame] | 785 | if (time_before(now, expires)) | 
|  | 786 | return now; | 
|  | 787 | return expires; | 
|  | 788 | } | 
|  | 789 |  | 
|  | 790 | /** | 
| Li Zefan | 8dce39c | 2007-11-05 14:51:10 -0800 | [diff] [blame] | 791 | * get_next_timer_interrupt - return the jiffy of the next pending timer | 
| Randy Dunlap | 05fb6bf | 2007-02-28 20:12:13 -0800 | [diff] [blame] | 792 | * @now: current time (in jiffies) | 
| Thomas Gleixner | 1cfd684 | 2007-02-16 01:27:46 -0800 | [diff] [blame] | 793 | */ | 
| Thomas Gleixner | fd064b9 | 2007-02-16 01:27:47 -0800 | [diff] [blame] | 794 | unsigned long get_next_timer_interrupt(unsigned long now) | 
| Thomas Gleixner | 1cfd684 | 2007-02-16 01:27:46 -0800 | [diff] [blame] | 795 | { | 
| Pavel Machek | a6fa8e5 | 2008-01-30 13:30:00 +0100 | [diff] [blame] | 796 | struct tvec_base *base = __get_cpu_var(tvec_bases); | 
| Thomas Gleixner | fd064b9 | 2007-02-16 01:27:47 -0800 | [diff] [blame] | 797 | unsigned long expires; | 
| Thomas Gleixner | 1cfd684 | 2007-02-16 01:27:46 -0800 | [diff] [blame] | 798 |  | 
|  | 799 | spin_lock(&base->lock); | 
|  | 800 | expires = __next_timer_interrupt(base); | 
| Oleg Nesterov | 3691c51 | 2006-03-31 02:30:30 -0800 | [diff] [blame] | 801 | spin_unlock(&base->lock); | 
| Tony Lindgren | 6923974 | 2006-03-06 15:42:45 -0800 | [diff] [blame] | 802 |  | 
| Thomas Gleixner | 1cfd684 | 2007-02-16 01:27:46 -0800 | [diff] [blame] | 803 | if (time_before_eq(expires, now)) | 
|  | 804 | return now; | 
| Zachary Amsden | 0662b71 | 2006-05-20 15:00:24 -0700 | [diff] [blame] | 805 |  | 
| Thomas Gleixner | 1cfd684 | 2007-02-16 01:27:46 -0800 | [diff] [blame] | 806 | return cmp_next_hrtimer_event(now, expires); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 807 | } | 
| Thomas Gleixner | fd064b9 | 2007-02-16 01:27:47 -0800 | [diff] [blame] | 808 |  | 
|  | 809 | #ifdef CONFIG_NO_IDLE_HZ | 
|  | 810 | unsigned long next_timer_interrupt(void) | 
|  | 811 | { | 
|  | 812 | return get_next_timer_interrupt(jiffies); | 
|  | 813 | } | 
|  | 814 | #endif | 
|  | 815 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 816 | #endif | 
|  | 817 |  | 
| Paul Mackerras | fa13a5a | 2007-11-09 22:39:38 +0100 | [diff] [blame] | 818 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING | 
|  | 819 | void account_process_tick(struct task_struct *p, int user_tick) | 
|  | 820 | { | 
|  | 821 | if (user_tick) { | 
|  | 822 | account_user_time(p, jiffies_to_cputime(1)); | 
|  | 823 | account_user_time_scaled(p, jiffies_to_cputime(1)); | 
|  | 824 | } else { | 
|  | 825 | account_system_time(p, HARDIRQ_OFFSET, jiffies_to_cputime(1)); | 
|  | 826 | account_system_time_scaled(p, jiffies_to_cputime(1)); | 
|  | 827 | } | 
|  | 828 | } | 
|  | 829 | #endif | 
|  | 830 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 831 | /* | 
| Daniel Walker | 5b4db0c | 2007-10-18 03:06:11 -0700 | [diff] [blame] | 832 | * Called from the timer interrupt handler to charge one tick to the current | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 833 | * process.  user_tick is 1 if the tick is user time, 0 for system. | 
|  | 834 | */ | 
|  | 835 | void update_process_times(int user_tick) | 
|  | 836 | { | 
|  | 837 | struct task_struct *p = current; | 
|  | 838 | int cpu = smp_processor_id(); | 
|  | 839 |  | 
|  | 840 | /* Note: this timer irq context must be accounted for as well. */ | 
| Paul Mackerras | fa13a5a | 2007-11-09 22:39:38 +0100 | [diff] [blame] | 841 | account_process_tick(p, user_tick); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 842 | run_local_timers(); | 
|  | 843 | if (rcu_pending(cpu)) | 
|  | 844 | rcu_check_callbacks(cpu, user_tick); | 
|  | 845 | scheduler_tick(); | 
| Thomas Gleixner | 6819457 | 2007-07-19 01:49:16 -0700 | [diff] [blame] | 846 | run_posix_cpu_timers(p); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 847 | } | 
|  | 848 |  | 
|  | 849 | /* | 
|  | 850 | * Nr of active tasks - counted in fixed-point numbers | 
|  | 851 | */ | 
|  | 852 | static unsigned long count_active_tasks(void) | 
|  | 853 | { | 
| Jack Steiner | db1b1fe | 2006-03-31 02:31:21 -0800 | [diff] [blame] | 854 | return nr_active() * FIXED_1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 855 | } | 
|  | 856 |  | 
|  | 857 | /* | 
|  | 858 | * Hmm.. Changed this, as the GNU make sources (load.c) seems to | 
|  | 859 | * imply that avenrun[] is the standard name for this kind of thing. | 
|  | 860 | * Nothing else seems to be standardized: the fractional size etc | 
|  | 861 | * all seem to differ on different machines. | 
|  | 862 | * | 
|  | 863 | * Requires xtime_lock to access. | 
|  | 864 | */ | 
|  | 865 | unsigned long avenrun[3]; | 
|  | 866 |  | 
|  | 867 | EXPORT_SYMBOL(avenrun); | 
|  | 868 |  | 
|  | 869 | /* | 
|  | 870 | * calc_load - given tick count, update the avenrun load estimates. | 
|  | 871 | * This is called while holding a write_lock on xtime_lock. | 
|  | 872 | */ | 
|  | 873 | static inline void calc_load(unsigned long ticks) | 
|  | 874 | { | 
|  | 875 | unsigned long active_tasks; /* fixed-point */ | 
|  | 876 | static int count = LOAD_FREQ; | 
|  | 877 |  | 
| Eric Dumazet | cd7175e | 2006-12-13 00:35:45 -0800 | [diff] [blame] | 878 | count -= ticks; | 
|  | 879 | if (unlikely(count < 0)) { | 
|  | 880 | active_tasks = count_active_tasks(); | 
|  | 881 | do { | 
|  | 882 | CALC_LOAD(avenrun[0], EXP_1, active_tasks); | 
|  | 883 | CALC_LOAD(avenrun[1], EXP_5, active_tasks); | 
|  | 884 | CALC_LOAD(avenrun[2], EXP_15, active_tasks); | 
|  | 885 | count += LOAD_FREQ; | 
|  | 886 | } while (count < 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 887 | } | 
|  | 888 | } | 
|  | 889 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 890 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 891 | * This function runs timers and the timer-tq in bottom half context. | 
|  | 892 | */ | 
|  | 893 | static void run_timer_softirq(struct softirq_action *h) | 
|  | 894 | { | 
| Pavel Machek | a6fa8e5 | 2008-01-30 13:30:00 +0100 | [diff] [blame] | 895 | struct tvec_base *base = __get_cpu_var(tvec_bases); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 896 |  | 
| Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 897 | hrtimer_run_pending(); | 
| Ingo Molnar | 82f67cd | 2007-02-16 01:28:13 -0800 | [diff] [blame] | 898 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 899 | if (time_after_eq(jiffies, base->timer_jiffies)) | 
|  | 900 | __run_timers(base); | 
|  | 901 | } | 
|  | 902 |  | 
|  | 903 | /* | 
|  | 904 | * Called by the local, per-CPU timer interrupt on SMP. | 
|  | 905 | */ | 
|  | 906 | void run_local_timers(void) | 
|  | 907 | { | 
| Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 908 | hrtimer_run_queues(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 909 | raise_softirq(TIMER_SOFTIRQ); | 
| Ingo Molnar | 6687a97 | 2006-03-24 03:18:41 -0800 | [diff] [blame] | 910 | softlockup_tick(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 911 | } | 
|  | 912 |  | 
|  | 913 | /* | 
|  | 914 | * Called by the timer interrupt. xtime_lock must already be taken | 
|  | 915 | * by the timer IRQ! | 
|  | 916 | */ | 
| Atsushi Nemoto | 3171a03 | 2006-09-29 02:00:32 -0700 | [diff] [blame] | 917 | static inline void update_times(unsigned long ticks) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 918 | { | 
| john stultz | ad59617 | 2006-06-26 00:25:06 -0700 | [diff] [blame] | 919 | update_wall_time(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 920 | calc_load(ticks); | 
|  | 921 | } | 
| Thomas Gleixner | 6819457 | 2007-07-19 01:49:16 -0700 | [diff] [blame] | 922 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 923 | /* | 
|  | 924 | * The 64-bit jiffies value is not atomic - you MUST NOT read it | 
|  | 925 | * without sampling the sequence number in xtime_lock. | 
|  | 926 | * jiffies is defined in the linker script... | 
|  | 927 | */ | 
|  | 928 |  | 
| Atsushi Nemoto | 3171a03 | 2006-09-29 02:00:32 -0700 | [diff] [blame] | 929 | void do_timer(unsigned long ticks) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 930 | { | 
| Atsushi Nemoto | 3171a03 | 2006-09-29 02:00:32 -0700 | [diff] [blame] | 931 | jiffies_64 += ticks; | 
|  | 932 | update_times(ticks); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 933 | } | 
|  | 934 |  | 
|  | 935 | #ifdef __ARCH_WANT_SYS_ALARM | 
|  | 936 |  | 
|  | 937 | /* | 
|  | 938 | * For backwards compatibility?  This can be done in libc so Alpha | 
|  | 939 | * and all newer ports shouldn't need it. | 
|  | 940 | */ | 
|  | 941 | asmlinkage unsigned long sys_alarm(unsigned int seconds) | 
|  | 942 | { | 
| Thomas Gleixner | c08b8a4 | 2006-03-25 03:06:33 -0800 | [diff] [blame] | 943 | return alarm_setitimer(seconds); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 944 | } | 
|  | 945 |  | 
|  | 946 | #endif | 
|  | 947 |  | 
|  | 948 | #ifndef __alpha__ | 
|  | 949 |  | 
|  | 950 | /* | 
|  | 951 | * The Alpha uses getxpid, getxuid, and getxgid instead.  Maybe this | 
|  | 952 | * should be moved into arch/i386 instead? | 
|  | 953 | */ | 
|  | 954 |  | 
|  | 955 | /** | 
|  | 956 | * sys_getpid - return the thread group id of the current process | 
|  | 957 | * | 
|  | 958 | * Note, despite the name, this returns the tgid not the pid.  The tgid and | 
|  | 959 | * the pid are identical unless CLONE_THREAD was specified on clone() in | 
|  | 960 | * which case the tgid is the same in all threads of the same group. | 
|  | 961 | * | 
|  | 962 | * This is SMP safe as current->tgid does not change. | 
|  | 963 | */ | 
|  | 964 | asmlinkage long sys_getpid(void) | 
|  | 965 | { | 
| Pavel Emelyanov | b488893 | 2007-10-18 23:40:14 -0700 | [diff] [blame] | 966 | return task_tgid_vnr(current); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 967 | } | 
|  | 968 |  | 
|  | 969 | /* | 
| Kirill Korotaev | 6997a6f | 2006-08-13 23:24:23 -0700 | [diff] [blame] | 970 | * Accessing ->real_parent is not SMP-safe, it could | 
|  | 971 | * change from under us. However, we can use a stale | 
|  | 972 | * value of ->real_parent under rcu_read_lock(), see | 
|  | 973 | * release_task()->call_rcu(delayed_put_task_struct). | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 974 | */ | 
|  | 975 | asmlinkage long sys_getppid(void) | 
|  | 976 | { | 
|  | 977 | int pid; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 978 |  | 
| Kirill Korotaev | 6997a6f | 2006-08-13 23:24:23 -0700 | [diff] [blame] | 979 | rcu_read_lock(); | 
| Roland McGrath | 84427ea | 2008-01-10 12:52:04 -0800 | [diff] [blame] | 980 | pid = task_tgid_nr_ns(current->real_parent, current->nsproxy->pid_ns); | 
| Kirill Korotaev | 6997a6f | 2006-08-13 23:24:23 -0700 | [diff] [blame] | 981 | rcu_read_unlock(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 982 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 983 | return pid; | 
|  | 984 | } | 
|  | 985 |  | 
|  | 986 | asmlinkage long sys_getuid(void) | 
|  | 987 | { | 
|  | 988 | /* Only we change this so SMP safe */ | 
|  | 989 | return current->uid; | 
|  | 990 | } | 
|  | 991 |  | 
|  | 992 | asmlinkage long sys_geteuid(void) | 
|  | 993 | { | 
|  | 994 | /* Only we change this so SMP safe */ | 
|  | 995 | return current->euid; | 
|  | 996 | } | 
|  | 997 |  | 
|  | 998 | asmlinkage long sys_getgid(void) | 
|  | 999 | { | 
|  | 1000 | /* Only we change this so SMP safe */ | 
|  | 1001 | return current->gid; | 
|  | 1002 | } | 
|  | 1003 |  | 
|  | 1004 | asmlinkage long sys_getegid(void) | 
|  | 1005 | { | 
|  | 1006 | /* Only we change this so SMP safe */ | 
|  | 1007 | return  current->egid; | 
|  | 1008 | } | 
|  | 1009 |  | 
|  | 1010 | #endif | 
|  | 1011 |  | 
|  | 1012 | static void process_timeout(unsigned long __data) | 
|  | 1013 | { | 
| Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 1014 | wake_up_process((struct task_struct *)__data); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1015 | } | 
|  | 1016 |  | 
|  | 1017 | /** | 
|  | 1018 | * schedule_timeout - sleep until timeout | 
|  | 1019 | * @timeout: timeout value in jiffies | 
|  | 1020 | * | 
|  | 1021 | * Make the current task sleep until @timeout jiffies have | 
|  | 1022 | * elapsed. The routine will return immediately unless | 
|  | 1023 | * the current task state has been set (see set_current_state()). | 
|  | 1024 | * | 
|  | 1025 | * You can set the task state as follows - | 
|  | 1026 | * | 
|  | 1027 | * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to | 
|  | 1028 | * pass before the routine returns. The routine will return 0 | 
|  | 1029 | * | 
|  | 1030 | * %TASK_INTERRUPTIBLE - the routine may return early if a signal is | 
|  | 1031 | * delivered to the current task. In this case the remaining time | 
|  | 1032 | * in jiffies will be returned, or 0 if the timer expired in time | 
|  | 1033 | * | 
|  | 1034 | * The current task state is guaranteed to be TASK_RUNNING when this | 
|  | 1035 | * routine returns. | 
|  | 1036 | * | 
|  | 1037 | * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule | 
|  | 1038 | * the CPU away without a bound on the timeout. In this case the return | 
|  | 1039 | * value will be %MAX_SCHEDULE_TIMEOUT. | 
|  | 1040 | * | 
|  | 1041 | * In all cases the return value is guaranteed to be non-negative. | 
|  | 1042 | */ | 
|  | 1043 | fastcall signed long __sched schedule_timeout(signed long timeout) | 
|  | 1044 | { | 
|  | 1045 | struct timer_list timer; | 
|  | 1046 | unsigned long expire; | 
|  | 1047 |  | 
|  | 1048 | switch (timeout) | 
|  | 1049 | { | 
|  | 1050 | case MAX_SCHEDULE_TIMEOUT: | 
|  | 1051 | /* | 
|  | 1052 | * These two special cases are useful to be comfortable | 
|  | 1053 | * in the caller. Nothing more. We could take | 
|  | 1054 | * MAX_SCHEDULE_TIMEOUT from one of the negative value | 
|  | 1055 | * but I' d like to return a valid offset (>=0) to allow | 
|  | 1056 | * the caller to do everything it want with the retval. | 
|  | 1057 | */ | 
|  | 1058 | schedule(); | 
|  | 1059 | goto out; | 
|  | 1060 | default: | 
|  | 1061 | /* | 
|  | 1062 | * Another bit of PARANOID. Note that the retval will be | 
|  | 1063 | * 0 since no piece of kernel is supposed to do a check | 
|  | 1064 | * for a negative retval of schedule_timeout() (since it | 
|  | 1065 | * should never happens anyway). You just have the printk() | 
|  | 1066 | * that will tell you if something is gone wrong and where. | 
|  | 1067 | */ | 
| Andrew Morton | 5b149bc | 2006-12-22 01:10:14 -0800 | [diff] [blame] | 1068 | if (timeout < 0) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1069 | printk(KERN_ERR "schedule_timeout: wrong timeout " | 
| Andrew Morton | 5b149bc | 2006-12-22 01:10:14 -0800 | [diff] [blame] | 1070 | "value %lx\n", timeout); | 
|  | 1071 | dump_stack(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1072 | current->state = TASK_RUNNING; | 
|  | 1073 | goto out; | 
|  | 1074 | } | 
|  | 1075 | } | 
|  | 1076 |  | 
|  | 1077 | expire = timeout + jiffies; | 
|  | 1078 |  | 
| Oleg Nesterov | a8db2db | 2005-10-30 15:01:38 -0800 | [diff] [blame] | 1079 | setup_timer(&timer, process_timeout, (unsigned long)current); | 
|  | 1080 | __mod_timer(&timer, expire); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1081 | schedule(); | 
|  | 1082 | del_singleshot_timer_sync(&timer); | 
|  | 1083 |  | 
|  | 1084 | timeout = expire - jiffies; | 
|  | 1085 |  | 
|  | 1086 | out: | 
|  | 1087 | return timeout < 0 ? 0 : timeout; | 
|  | 1088 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1089 | EXPORT_SYMBOL(schedule_timeout); | 
|  | 1090 |  | 
| Andrew Morton | 8a1c175 | 2005-09-13 01:25:15 -0700 | [diff] [blame] | 1091 | /* | 
|  | 1092 | * We can use __set_current_state() here because schedule_timeout() calls | 
|  | 1093 | * schedule() unconditionally. | 
|  | 1094 | */ | 
| Nishanth Aravamudan | 64ed93a | 2005-09-10 00:27:21 -0700 | [diff] [blame] | 1095 | signed long __sched schedule_timeout_interruptible(signed long timeout) | 
|  | 1096 | { | 
| Andrew Morton | a5a0d52 | 2005-10-30 15:01:42 -0800 | [diff] [blame] | 1097 | __set_current_state(TASK_INTERRUPTIBLE); | 
|  | 1098 | return schedule_timeout(timeout); | 
| Nishanth Aravamudan | 64ed93a | 2005-09-10 00:27:21 -0700 | [diff] [blame] | 1099 | } | 
|  | 1100 | EXPORT_SYMBOL(schedule_timeout_interruptible); | 
|  | 1101 |  | 
| Matthew Wilcox | 294d5cc | 2007-12-06 11:59:46 -0500 | [diff] [blame] | 1102 | signed long __sched schedule_timeout_killable(signed long timeout) | 
|  | 1103 | { | 
|  | 1104 | __set_current_state(TASK_KILLABLE); | 
|  | 1105 | return schedule_timeout(timeout); | 
|  | 1106 | } | 
|  | 1107 | EXPORT_SYMBOL(schedule_timeout_killable); | 
|  | 1108 |  | 
| Nishanth Aravamudan | 64ed93a | 2005-09-10 00:27:21 -0700 | [diff] [blame] | 1109 | signed long __sched schedule_timeout_uninterruptible(signed long timeout) | 
|  | 1110 | { | 
| Andrew Morton | a5a0d52 | 2005-10-30 15:01:42 -0800 | [diff] [blame] | 1111 | __set_current_state(TASK_UNINTERRUPTIBLE); | 
|  | 1112 | return schedule_timeout(timeout); | 
| Nishanth Aravamudan | 64ed93a | 2005-09-10 00:27:21 -0700 | [diff] [blame] | 1113 | } | 
|  | 1114 | EXPORT_SYMBOL(schedule_timeout_uninterruptible); | 
|  | 1115 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1116 | /* Thread ID - the internal kernel "pid" */ | 
|  | 1117 | asmlinkage long sys_gettid(void) | 
|  | 1118 | { | 
| Pavel Emelyanov | b488893 | 2007-10-18 23:40:14 -0700 | [diff] [blame] | 1119 | return task_pid_vnr(current); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1120 | } | 
|  | 1121 |  | 
| Rolf Eike Beer | 2aae4a1 | 2006-09-29 01:59:46 -0700 | [diff] [blame] | 1122 | /** | 
| Kyle McMartin | d4d23ad | 2007-02-10 01:46:00 -0800 | [diff] [blame] | 1123 | * do_sysinfo - fill in sysinfo struct | 
| Rolf Eike Beer | 2aae4a1 | 2006-09-29 01:59:46 -0700 | [diff] [blame] | 1124 | * @info: pointer to buffer to fill | 
| Thomas Gleixner | 6819457 | 2007-07-19 01:49:16 -0700 | [diff] [blame] | 1125 | */ | 
| Kyle McMartin | d4d23ad | 2007-02-10 01:46:00 -0800 | [diff] [blame] | 1126 | int do_sysinfo(struct sysinfo *info) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1127 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1128 | unsigned long mem_total, sav_total; | 
|  | 1129 | unsigned int mem_unit, bitcount; | 
|  | 1130 | unsigned long seq; | 
|  | 1131 |  | 
| Kyle McMartin | d4d23ad | 2007-02-10 01:46:00 -0800 | [diff] [blame] | 1132 | memset(info, 0, sizeof(struct sysinfo)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1133 |  | 
|  | 1134 | do { | 
|  | 1135 | struct timespec tp; | 
|  | 1136 | seq = read_seqbegin(&xtime_lock); | 
|  | 1137 |  | 
|  | 1138 | /* | 
|  | 1139 | * This is annoying.  The below is the same thing | 
|  | 1140 | * posix_get_clock_monotonic() does, but it wants to | 
|  | 1141 | * take the lock which we want to cover the loads stuff | 
|  | 1142 | * too. | 
|  | 1143 | */ | 
|  | 1144 |  | 
|  | 1145 | getnstimeofday(&tp); | 
|  | 1146 | tp.tv_sec += wall_to_monotonic.tv_sec; | 
|  | 1147 | tp.tv_nsec += wall_to_monotonic.tv_nsec; | 
| Tomas Janousek | d621414 | 2007-07-15 23:39:42 -0700 | [diff] [blame] | 1148 | monotonic_to_bootbased(&tp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1149 | if (tp.tv_nsec - NSEC_PER_SEC >= 0) { | 
|  | 1150 | tp.tv_nsec = tp.tv_nsec - NSEC_PER_SEC; | 
|  | 1151 | tp.tv_sec++; | 
|  | 1152 | } | 
| Kyle McMartin | d4d23ad | 2007-02-10 01:46:00 -0800 | [diff] [blame] | 1153 | info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1154 |  | 
| Kyle McMartin | d4d23ad | 2007-02-10 01:46:00 -0800 | [diff] [blame] | 1155 | info->loads[0] = avenrun[0] << (SI_LOAD_SHIFT - FSHIFT); | 
|  | 1156 | info->loads[1] = avenrun[1] << (SI_LOAD_SHIFT - FSHIFT); | 
|  | 1157 | info->loads[2] = avenrun[2] << (SI_LOAD_SHIFT - FSHIFT); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1158 |  | 
| Kyle McMartin | d4d23ad | 2007-02-10 01:46:00 -0800 | [diff] [blame] | 1159 | info->procs = nr_threads; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1160 | } while (read_seqretry(&xtime_lock, seq)); | 
|  | 1161 |  | 
| Kyle McMartin | d4d23ad | 2007-02-10 01:46:00 -0800 | [diff] [blame] | 1162 | si_meminfo(info); | 
|  | 1163 | si_swapinfo(info); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1164 |  | 
|  | 1165 | /* | 
|  | 1166 | * If the sum of all the available memory (i.e. ram + swap) | 
|  | 1167 | * is less than can be stored in a 32 bit unsigned long then | 
|  | 1168 | * we can be binary compatible with 2.2.x kernels.  If not, | 
|  | 1169 | * well, in that case 2.2.x was broken anyways... | 
|  | 1170 | * | 
|  | 1171 | *  -Erik Andersen <andersee@debian.org> | 
|  | 1172 | */ | 
|  | 1173 |  | 
| Kyle McMartin | d4d23ad | 2007-02-10 01:46:00 -0800 | [diff] [blame] | 1174 | mem_total = info->totalram + info->totalswap; | 
|  | 1175 | if (mem_total < info->totalram || mem_total < info->totalswap) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1176 | goto out; | 
|  | 1177 | bitcount = 0; | 
| Kyle McMartin | d4d23ad | 2007-02-10 01:46:00 -0800 | [diff] [blame] | 1178 | mem_unit = info->mem_unit; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1179 | while (mem_unit > 1) { | 
|  | 1180 | bitcount++; | 
|  | 1181 | mem_unit >>= 1; | 
|  | 1182 | sav_total = mem_total; | 
|  | 1183 | mem_total <<= 1; | 
|  | 1184 | if (mem_total < sav_total) | 
|  | 1185 | goto out; | 
|  | 1186 | } | 
|  | 1187 |  | 
|  | 1188 | /* | 
|  | 1189 | * If mem_total did not overflow, multiply all memory values by | 
| Kyle McMartin | d4d23ad | 2007-02-10 01:46:00 -0800 | [diff] [blame] | 1190 | * info->mem_unit and set it to 1.  This leaves things compatible | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1191 | * with 2.2.x, and also retains compatibility with earlier 2.4.x | 
|  | 1192 | * kernels... | 
|  | 1193 | */ | 
|  | 1194 |  | 
| Kyle McMartin | d4d23ad | 2007-02-10 01:46:00 -0800 | [diff] [blame] | 1195 | info->mem_unit = 1; | 
|  | 1196 | info->totalram <<= bitcount; | 
|  | 1197 | info->freeram <<= bitcount; | 
|  | 1198 | info->sharedram <<= bitcount; | 
|  | 1199 | info->bufferram <<= bitcount; | 
|  | 1200 | info->totalswap <<= bitcount; | 
|  | 1201 | info->freeswap <<= bitcount; | 
|  | 1202 | info->totalhigh <<= bitcount; | 
|  | 1203 | info->freehigh <<= bitcount; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1204 |  | 
| Kyle McMartin | d4d23ad | 2007-02-10 01:46:00 -0800 | [diff] [blame] | 1205 | out: | 
|  | 1206 | return 0; | 
|  | 1207 | } | 
|  | 1208 |  | 
|  | 1209 | asmlinkage long sys_sysinfo(struct sysinfo __user *info) | 
|  | 1210 | { | 
|  | 1211 | struct sysinfo val; | 
|  | 1212 |  | 
|  | 1213 | do_sysinfo(&val); | 
|  | 1214 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1215 | if (copy_to_user(info, &val, sizeof(struct sysinfo))) | 
|  | 1216 | return -EFAULT; | 
|  | 1217 |  | 
|  | 1218 | return 0; | 
|  | 1219 | } | 
|  | 1220 |  | 
| Ingo Molnar | d730e88 | 2006-07-03 00:25:10 -0700 | [diff] [blame] | 1221 | /* | 
|  | 1222 | * lockdep: we want to track each per-CPU base as a separate lock-class, | 
|  | 1223 | * but timer-bases are kmalloc()-ed, so we need to attach separate | 
|  | 1224 | * keys to them: | 
|  | 1225 | */ | 
|  | 1226 | static struct lock_class_key base_lock_keys[NR_CPUS]; | 
|  | 1227 |  | 
| Adrian Bunk | b4be625 | 2007-12-18 18:05:58 +0100 | [diff] [blame] | 1228 | static int __cpuinit init_timers_cpu(int cpu) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1229 | { | 
|  | 1230 | int j; | 
| Pavel Machek | a6fa8e5 | 2008-01-30 13:30:00 +0100 | [diff] [blame] | 1231 | struct tvec_base *base; | 
| Adrian Bunk | b4be625 | 2007-12-18 18:05:58 +0100 | [diff] [blame] | 1232 | static char __cpuinitdata tvec_base_done[NR_CPUS]; | 
| Oleg Nesterov | 55c888d | 2005-06-23 00:08:56 -0700 | [diff] [blame] | 1233 |  | 
| Andrew Morton | ba6edfc | 2006-04-10 22:53:58 -0700 | [diff] [blame] | 1234 | if (!tvec_base_done[cpu]) { | 
| Jan Beulich | a4a6198 | 2006-03-24 03:15:54 -0800 | [diff] [blame] | 1235 | static char boot_done; | 
|  | 1236 |  | 
| Jan Beulich | a4a6198 | 2006-03-24 03:15:54 -0800 | [diff] [blame] | 1237 | if (boot_done) { | 
| Andrew Morton | ba6edfc | 2006-04-10 22:53:58 -0700 | [diff] [blame] | 1238 | /* | 
|  | 1239 | * The APs use this path later in boot | 
|  | 1240 | */ | 
| Christoph Lameter | 94f6030 | 2007-07-17 04:03:29 -0700 | [diff] [blame] | 1241 | base = kmalloc_node(sizeof(*base), | 
|  | 1242 | GFP_KERNEL | __GFP_ZERO, | 
| Jan Beulich | a4a6198 | 2006-03-24 03:15:54 -0800 | [diff] [blame] | 1243 | cpu_to_node(cpu)); | 
|  | 1244 | if (!base) | 
|  | 1245 | return -ENOMEM; | 
| Venki Pallipadi | 6e453a6 | 2007-05-08 00:27:44 -0700 | [diff] [blame] | 1246 |  | 
|  | 1247 | /* Make sure that tvec_base is 2 byte aligned */ | 
|  | 1248 | if (tbase_get_deferrable(base)) { | 
|  | 1249 | WARN_ON(1); | 
|  | 1250 | kfree(base); | 
|  | 1251 | return -ENOMEM; | 
|  | 1252 | } | 
| Andrew Morton | ba6edfc | 2006-04-10 22:53:58 -0700 | [diff] [blame] | 1253 | per_cpu(tvec_bases, cpu) = base; | 
| Jan Beulich | a4a6198 | 2006-03-24 03:15:54 -0800 | [diff] [blame] | 1254 | } else { | 
| Andrew Morton | ba6edfc | 2006-04-10 22:53:58 -0700 | [diff] [blame] | 1255 | /* | 
|  | 1256 | * This is for the boot CPU - we use compile-time | 
|  | 1257 | * static initialisation because per-cpu memory isn't | 
|  | 1258 | * ready yet and because the memory allocators are not | 
|  | 1259 | * initialised either. | 
|  | 1260 | */ | 
| Jan Beulich | a4a6198 | 2006-03-24 03:15:54 -0800 | [diff] [blame] | 1261 | boot_done = 1; | 
| Andrew Morton | ba6edfc | 2006-04-10 22:53:58 -0700 | [diff] [blame] | 1262 | base = &boot_tvec_bases; | 
| Jan Beulich | a4a6198 | 2006-03-24 03:15:54 -0800 | [diff] [blame] | 1263 | } | 
| Andrew Morton | ba6edfc | 2006-04-10 22:53:58 -0700 | [diff] [blame] | 1264 | tvec_base_done[cpu] = 1; | 
|  | 1265 | } else { | 
|  | 1266 | base = per_cpu(tvec_bases, cpu); | 
| Jan Beulich | a4a6198 | 2006-03-24 03:15:54 -0800 | [diff] [blame] | 1267 | } | 
| Andrew Morton | ba6edfc | 2006-04-10 22:53:58 -0700 | [diff] [blame] | 1268 |  | 
| Oleg Nesterov | 3691c51 | 2006-03-31 02:30:30 -0800 | [diff] [blame] | 1269 | spin_lock_init(&base->lock); | 
| Ingo Molnar | d730e88 | 2006-07-03 00:25:10 -0700 | [diff] [blame] | 1270 | lockdep_set_class(&base->lock, base_lock_keys + cpu); | 
|  | 1271 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1272 | for (j = 0; j < TVN_SIZE; j++) { | 
|  | 1273 | INIT_LIST_HEAD(base->tv5.vec + j); | 
|  | 1274 | INIT_LIST_HEAD(base->tv4.vec + j); | 
|  | 1275 | INIT_LIST_HEAD(base->tv3.vec + j); | 
|  | 1276 | INIT_LIST_HEAD(base->tv2.vec + j); | 
|  | 1277 | } | 
|  | 1278 | for (j = 0; j < TVR_SIZE; j++) | 
|  | 1279 | INIT_LIST_HEAD(base->tv1.vec + j); | 
|  | 1280 |  | 
|  | 1281 | base->timer_jiffies = jiffies; | 
| Jan Beulich | a4a6198 | 2006-03-24 03:15:54 -0800 | [diff] [blame] | 1282 | return 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1283 | } | 
|  | 1284 |  | 
|  | 1285 | #ifdef CONFIG_HOTPLUG_CPU | 
| Pavel Machek | a6fa8e5 | 2008-01-30 13:30:00 +0100 | [diff] [blame] | 1286 | static void migrate_timer_list(struct tvec_base *new_base, struct list_head *head) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1287 | { | 
|  | 1288 | struct timer_list *timer; | 
|  | 1289 |  | 
|  | 1290 | while (!list_empty(head)) { | 
| Pavel Emelianov | b5e6181 | 2007-05-08 00:30:19 -0700 | [diff] [blame] | 1291 | timer = list_first_entry(head, struct timer_list, entry); | 
| Oleg Nesterov | 55c888d | 2005-06-23 00:08:56 -0700 | [diff] [blame] | 1292 | detach_timer(timer, 0); | 
| Venki Pallipadi | 6e453a6 | 2007-05-08 00:27:44 -0700 | [diff] [blame] | 1293 | timer_set_base(timer, new_base); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1294 | internal_add_timer(new_base, timer); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1295 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1296 | } | 
|  | 1297 |  | 
| Randy Dunlap | 48ccf3d | 2008-01-21 17:18:25 -0800 | [diff] [blame] | 1298 | static void __cpuinit migrate_timers(int cpu) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1299 | { | 
| Pavel Machek | a6fa8e5 | 2008-01-30 13:30:00 +0100 | [diff] [blame] | 1300 | struct tvec_base *old_base; | 
|  | 1301 | struct tvec_base *new_base; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1302 | int i; | 
|  | 1303 |  | 
|  | 1304 | BUG_ON(cpu_online(cpu)); | 
| Jan Beulich | a4a6198 | 2006-03-24 03:15:54 -0800 | [diff] [blame] | 1305 | old_base = per_cpu(tvec_bases, cpu); | 
|  | 1306 | new_base = get_cpu_var(tvec_bases); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1307 |  | 
|  | 1308 | local_irq_disable(); | 
| Heiko Carstens | e81ce1f | 2007-03-05 00:30:51 -0800 | [diff] [blame] | 1309 | double_spin_lock(&new_base->lock, &old_base->lock, | 
|  | 1310 | smp_processor_id() < cpu); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1311 |  | 
| Oleg Nesterov | 3691c51 | 2006-03-31 02:30:30 -0800 | [diff] [blame] | 1312 | BUG_ON(old_base->running_timer); | 
|  | 1313 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1314 | for (i = 0; i < TVR_SIZE; i++) | 
| Oleg Nesterov | 55c888d | 2005-06-23 00:08:56 -0700 | [diff] [blame] | 1315 | migrate_timer_list(new_base, old_base->tv1.vec + i); | 
|  | 1316 | for (i = 0; i < TVN_SIZE; i++) { | 
|  | 1317 | migrate_timer_list(new_base, old_base->tv2.vec + i); | 
|  | 1318 | migrate_timer_list(new_base, old_base->tv3.vec + i); | 
|  | 1319 | migrate_timer_list(new_base, old_base->tv4.vec + i); | 
|  | 1320 | migrate_timer_list(new_base, old_base->tv5.vec + i); | 
|  | 1321 | } | 
|  | 1322 |  | 
| Heiko Carstens | e81ce1f | 2007-03-05 00:30:51 -0800 | [diff] [blame] | 1323 | double_spin_unlock(&new_base->lock, &old_base->lock, | 
|  | 1324 | smp_processor_id() < cpu); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1325 | local_irq_enable(); | 
|  | 1326 | put_cpu_var(tvec_bases); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1327 | } | 
|  | 1328 | #endif /* CONFIG_HOTPLUG_CPU */ | 
|  | 1329 |  | 
| Chandra Seetharaman | 8c78f30 | 2006-07-30 03:03:35 -0700 | [diff] [blame] | 1330 | static int __cpuinit timer_cpu_notify(struct notifier_block *self, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1331 | unsigned long action, void *hcpu) | 
|  | 1332 | { | 
|  | 1333 | long cpu = (long)hcpu; | 
|  | 1334 | switch(action) { | 
|  | 1335 | case CPU_UP_PREPARE: | 
| Rafael J. Wysocki | 8bb7844 | 2007-05-09 02:35:10 -0700 | [diff] [blame] | 1336 | case CPU_UP_PREPARE_FROZEN: | 
| Jan Beulich | a4a6198 | 2006-03-24 03:15:54 -0800 | [diff] [blame] | 1337 | if (init_timers_cpu(cpu) < 0) | 
|  | 1338 | return NOTIFY_BAD; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1339 | break; | 
|  | 1340 | #ifdef CONFIG_HOTPLUG_CPU | 
|  | 1341 | case CPU_DEAD: | 
| Rafael J. Wysocki | 8bb7844 | 2007-05-09 02:35:10 -0700 | [diff] [blame] | 1342 | case CPU_DEAD_FROZEN: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1343 | migrate_timers(cpu); | 
|  | 1344 | break; | 
|  | 1345 | #endif | 
|  | 1346 | default: | 
|  | 1347 | break; | 
|  | 1348 | } | 
|  | 1349 | return NOTIFY_OK; | 
|  | 1350 | } | 
|  | 1351 |  | 
| Chandra Seetharaman | 8c78f30 | 2006-07-30 03:03:35 -0700 | [diff] [blame] | 1352 | static struct notifier_block __cpuinitdata timers_nb = { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1353 | .notifier_call	= timer_cpu_notify, | 
|  | 1354 | }; | 
|  | 1355 |  | 
|  | 1356 |  | 
|  | 1357 | void __init init_timers(void) | 
|  | 1358 | { | 
| Akinobu Mita | 07dccf3 | 2006-09-29 02:00:22 -0700 | [diff] [blame] | 1359 | int err = timer_cpu_notify(&timers_nb, (unsigned long)CPU_UP_PREPARE, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1360 | (void *)(long)smp_processor_id()); | 
| Akinobu Mita | 07dccf3 | 2006-09-29 02:00:22 -0700 | [diff] [blame] | 1361 |  | 
| Ingo Molnar | 82f67cd | 2007-02-16 01:28:13 -0800 | [diff] [blame] | 1362 | init_timer_stats(); | 
|  | 1363 |  | 
| Akinobu Mita | 07dccf3 | 2006-09-29 02:00:22 -0700 | [diff] [blame] | 1364 | BUG_ON(err == NOTIFY_BAD); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1365 | register_cpu_notifier(&timers_nb); | 
|  | 1366 | open_softirq(TIMER_SOFTIRQ, run_timer_softirq, NULL); | 
|  | 1367 | } | 
|  | 1368 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1369 | /** | 
|  | 1370 | * msleep - sleep safely even with waitqueue interruptions | 
|  | 1371 | * @msecs: Time in milliseconds to sleep for | 
|  | 1372 | */ | 
|  | 1373 | void msleep(unsigned int msecs) | 
|  | 1374 | { | 
|  | 1375 | unsigned long timeout = msecs_to_jiffies(msecs) + 1; | 
|  | 1376 |  | 
| Nishanth Aravamudan | 75bcc8c | 2005-09-10 00:27:24 -0700 | [diff] [blame] | 1377 | while (timeout) | 
|  | 1378 | timeout = schedule_timeout_uninterruptible(timeout); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1379 | } | 
|  | 1380 |  | 
|  | 1381 | EXPORT_SYMBOL(msleep); | 
|  | 1382 |  | 
|  | 1383 | /** | 
| Domen Puncer | 96ec3ef | 2005-06-25 14:58:43 -0700 | [diff] [blame] | 1384 | * msleep_interruptible - sleep waiting for signals | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1385 | * @msecs: Time in milliseconds to sleep for | 
|  | 1386 | */ | 
|  | 1387 | unsigned long msleep_interruptible(unsigned int msecs) | 
|  | 1388 | { | 
|  | 1389 | unsigned long timeout = msecs_to_jiffies(msecs) + 1; | 
|  | 1390 |  | 
| Nishanth Aravamudan | 75bcc8c | 2005-09-10 00:27:24 -0700 | [diff] [blame] | 1391 | while (timeout && !signal_pending(current)) | 
|  | 1392 | timeout = schedule_timeout_interruptible(timeout); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1393 | return jiffies_to_msecs(timeout); | 
|  | 1394 | } | 
|  | 1395 |  | 
|  | 1396 | EXPORT_SYMBOL(msleep_interruptible); |