Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 1 | /* |
| 2 | * linux/kernel/time/tick-sched.c |
| 3 | * |
| 4 | * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> |
| 5 | * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar |
| 6 | * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner |
| 7 | * |
| 8 | * No idle tick implementation for low and high resolution timers |
| 9 | * |
| 10 | * Started by: Thomas Gleixner and Ingo Molnar |
| 11 | * |
Pavel Machek | b10db7f | 2008-01-30 13:30:00 +0100 | [diff] [blame] | 12 | * Distribute under GPLv2. |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 13 | */ |
| 14 | #include <linux/cpu.h> |
| 15 | #include <linux/err.h> |
| 16 | #include <linux/hrtimer.h> |
| 17 | #include <linux/interrupt.h> |
| 18 | #include <linux/kernel_stat.h> |
| 19 | #include <linux/percpu.h> |
| 20 | #include <linux/profile.h> |
| 21 | #include <linux/sched.h> |
venkatesh.pallipadi@intel.com | 8083e4a | 2008-08-04 11:59:11 -0700 | [diff] [blame] | 22 | #include <linux/module.h> |
Amar Singhal | f49d99b | 2011-08-22 19:02:04 -0700 | [diff] [blame] | 23 | #include <linux/rq_stats.h> |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 24 | |
David S. Miller | 9e203bc | 2007-02-24 22:10:13 -0800 | [diff] [blame] | 25 | #include <asm/irq_regs.h> |
| 26 | |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 27 | #include "tick-internal.h" |
| 28 | |
Amar Singhal | f49d99b | 2011-08-22 19:02:04 -0700 | [diff] [blame] | 29 | |
| 30 | struct rq_data rq_info; |
| 31 | struct workqueue_struct *rq_wq; |
| 32 | spinlock_t rq_lock; |
| 33 | |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 34 | /* |
| 35 | * Per cpu nohz control structure |
| 36 | */ |
| 37 | static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched); |
| 38 | |
| 39 | /* |
| 40 | * The time, when the last jiffy update happened. Protected by xtime_lock. |
| 41 | */ |
| 42 | static ktime_t last_jiffies_update; |
| 43 | |
Ingo Molnar | 289f480 | 2007-02-16 01:28:15 -0800 | [diff] [blame] | 44 | struct tick_sched *tick_get_tick_sched(int cpu) |
| 45 | { |
| 46 | return &per_cpu(tick_cpu_sched, cpu); |
| 47 | } |
| 48 | |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 49 | /* |
| 50 | * Must be called with interrupts disabled ! |
| 51 | */ |
| 52 | static void tick_do_update_jiffies64(ktime_t now) |
| 53 | { |
| 54 | unsigned long ticks = 0; |
| 55 | ktime_t delta; |
| 56 | |
Ingo Molnar | 7a14ce1 | 2008-05-12 15:43:53 +0200 | [diff] [blame] | 57 | /* |
| 58 | * Do a quick check without holding xtime_lock: |
| 59 | */ |
| 60 | delta = ktime_sub(now, last_jiffies_update); |
| 61 | if (delta.tv64 < tick_period.tv64) |
| 62 | return; |
| 63 | |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 64 | /* Reevalute with xtime_lock held */ |
| 65 | write_seqlock(&xtime_lock); |
| 66 | |
| 67 | delta = ktime_sub(now, last_jiffies_update); |
| 68 | if (delta.tv64 >= tick_period.tv64) { |
| 69 | |
| 70 | delta = ktime_sub(delta, tick_period); |
| 71 | last_jiffies_update = ktime_add(last_jiffies_update, |
| 72 | tick_period); |
| 73 | |
| 74 | /* Slow path for long timeouts */ |
| 75 | if (unlikely(delta.tv64 >= tick_period.tv64)) { |
| 76 | s64 incr = ktime_to_ns(tick_period); |
| 77 | |
| 78 | ticks = ktime_divns(delta, incr); |
| 79 | |
| 80 | last_jiffies_update = ktime_add_ns(last_jiffies_update, |
| 81 | incr * ticks); |
| 82 | } |
| 83 | do_timer(++ticks); |
Thomas Gleixner | 49d670f | 2008-09-22 18:56:01 +0200 | [diff] [blame] | 84 | |
| 85 | /* Keep the tick_next_period variable up to date */ |
| 86 | tick_next_period = ktime_add(last_jiffies_update, tick_period); |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 87 | } |
| 88 | write_sequnlock(&xtime_lock); |
| 89 | } |
| 90 | |
| 91 | /* |
| 92 | * Initialize and return retrieve the jiffies update. |
| 93 | */ |
| 94 | static ktime_t tick_init_jiffy_update(void) |
| 95 | { |
| 96 | ktime_t period; |
| 97 | |
| 98 | write_seqlock(&xtime_lock); |
| 99 | /* Did we start the jiffies update yet ? */ |
| 100 | if (last_jiffies_update.tv64 == 0) |
| 101 | last_jiffies_update = tick_next_period; |
| 102 | period = last_jiffies_update; |
| 103 | write_sequnlock(&xtime_lock); |
| 104 | return period; |
| 105 | } |
| 106 | |
| 107 | /* |
| 108 | * NOHZ - aka dynamic tick functionality |
| 109 | */ |
| 110 | #ifdef CONFIG_NO_HZ |
| 111 | /* |
| 112 | * NO HZ enabled ? |
| 113 | */ |
| 114 | static int tick_nohz_enabled __read_mostly = 1; |
| 115 | |
| 116 | /* |
| 117 | * Enable / Disable tickless mode |
| 118 | */ |
| 119 | static int __init setup_tick_nohz(char *str) |
| 120 | { |
| 121 | if (!strcmp(str, "off")) |
| 122 | tick_nohz_enabled = 0; |
| 123 | else if (!strcmp(str, "on")) |
| 124 | tick_nohz_enabled = 1; |
| 125 | else |
| 126 | return 0; |
| 127 | return 1; |
| 128 | } |
| 129 | |
| 130 | __setup("nohz=", setup_tick_nohz); |
| 131 | |
| 132 | /** |
| 133 | * tick_nohz_update_jiffies - update jiffies when idle was interrupted |
| 134 | * |
| 135 | * Called from interrupt entry when the CPU was idle |
| 136 | * |
| 137 | * In case the sched_tick was stopped on this CPU, we have to check if jiffies |
| 138 | * must be updated. Otherwise an interrupt handler could use a stale jiffy |
| 139 | * value. We do this unconditionally on any cpu, as we don't know whether the |
| 140 | * cpu, which has the update task assigned is in a long sleep. |
| 141 | */ |
Martin Schwidefsky | eed3b9c | 2009-09-29 14:25:15 +0200 | [diff] [blame] | 142 | static void tick_nohz_update_jiffies(ktime_t now) |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 143 | { |
| 144 | int cpu = smp_processor_id(); |
| 145 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); |
| 146 | unsigned long flags; |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 147 | |
Thomas Gleixner | 5df7fa1 | 2008-02-01 17:45:14 +0100 | [diff] [blame] | 148 | ts->idle_waketime = now; |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 149 | |
| 150 | local_irq_save(flags); |
| 151 | tick_do_update_jiffies64(now); |
| 152 | local_irq_restore(flags); |
Ingo Molnar | 02ff375 | 2008-05-12 15:43:53 +0200 | [diff] [blame] | 153 | |
| 154 | touch_softlockup_watchdog(); |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 155 | } |
| 156 | |
Arjan van de Ven | 595aac4 | 2010-05-09 08:22:45 -0700 | [diff] [blame] | 157 | /* |
| 158 | * Updates the per cpu time idle statistics counters |
| 159 | */ |
Arjan van de Ven | 8d63bf9 | 2010-05-09 08:24:03 -0700 | [diff] [blame] | 160 | static void |
Peter Zijlstra | 8c215bd | 2010-07-01 09:07:17 +0200 | [diff] [blame] | 161 | update_ts_time_stats(int cpu, struct tick_sched *ts, ktime_t now, u64 *last_update_time) |
Arjan van de Ven | 595aac4 | 2010-05-09 08:22:45 -0700 | [diff] [blame] | 162 | { |
| 163 | ktime_t delta; |
| 164 | |
Arjan van de Ven | 595aac4 | 2010-05-09 08:22:45 -0700 | [diff] [blame] | 165 | if (ts->idle_active) { |
| 166 | delta = ktime_sub(now, ts->idle_entrytime); |
Peter Zijlstra | 8c215bd | 2010-07-01 09:07:17 +0200 | [diff] [blame] | 167 | if (nr_iowait_cpu(cpu) > 0) |
Arjan van de Ven | 0224cf4 | 2010-05-09 08:25:23 -0700 | [diff] [blame] | 168 | ts->iowait_sleeptime = ktime_add(ts->iowait_sleeptime, delta); |
Michal Hocko | 6beea0c | 2011-08-24 09:37:48 +0200 | [diff] [blame] | 169 | else |
| 170 | ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta); |
Arjan van de Ven | 8c7b09f | 2010-05-09 08:23:23 -0700 | [diff] [blame] | 171 | ts->idle_entrytime = now; |
Arjan van de Ven | 595aac4 | 2010-05-09 08:22:45 -0700 | [diff] [blame] | 172 | } |
Arjan van de Ven | 8d63bf9 | 2010-05-09 08:24:03 -0700 | [diff] [blame] | 173 | |
Arjan van de Ven | e0e37c2 | 2010-05-09 08:24:39 -0700 | [diff] [blame] | 174 | if (last_update_time) |
Arjan van de Ven | 8d63bf9 | 2010-05-09 08:24:03 -0700 | [diff] [blame] | 175 | *last_update_time = ktime_to_us(now); |
| 176 | |
Arjan van de Ven | 595aac4 | 2010-05-09 08:22:45 -0700 | [diff] [blame] | 177 | } |
| 178 | |
Martin Schwidefsky | eed3b9c | 2009-09-29 14:25:15 +0200 | [diff] [blame] | 179 | static void tick_nohz_stop_idle(int cpu, ktime_t now) |
Venki Pallipadi | 6378ddb | 2008-01-30 13:30:04 +0100 | [diff] [blame] | 180 | { |
| 181 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); |
| 182 | |
Peter Zijlstra | 8c215bd | 2010-07-01 09:07:17 +0200 | [diff] [blame] | 183 | update_ts_time_stats(cpu, ts, now, NULL); |
Martin Schwidefsky | eed3b9c | 2009-09-29 14:25:15 +0200 | [diff] [blame] | 184 | ts->idle_active = 0; |
Peter Zijlstra | 56c7426 | 2008-09-01 16:44:23 +0200 | [diff] [blame] | 185 | |
Martin Schwidefsky | eed3b9c | 2009-09-29 14:25:15 +0200 | [diff] [blame] | 186 | sched_clock_idle_wakeup_event(0); |
Venki Pallipadi | 6378ddb | 2008-01-30 13:30:04 +0100 | [diff] [blame] | 187 | } |
| 188 | |
Peter Zijlstra | 8c215bd | 2010-07-01 09:07:17 +0200 | [diff] [blame] | 189 | static ktime_t tick_nohz_start_idle(int cpu, struct tick_sched *ts) |
Venki Pallipadi | 6378ddb | 2008-01-30 13:30:04 +0100 | [diff] [blame] | 190 | { |
Michal Hocko | 430ee88 | 2011-12-01 17:00:22 +0100 | [diff] [blame] | 191 | ktime_t now = ktime_get(); |
Arjan van de Ven | 595aac4 | 2010-05-09 08:22:45 -0700 | [diff] [blame] | 192 | |
Venki Pallipadi | 6378ddb | 2008-01-30 13:30:04 +0100 | [diff] [blame] | 193 | ts->idle_entrytime = now; |
| 194 | ts->idle_active = 1; |
Peter Zijlstra | 56c7426 | 2008-09-01 16:44:23 +0200 | [diff] [blame] | 195 | sched_clock_idle_sleep_event(); |
Venki Pallipadi | 6378ddb | 2008-01-30 13:30:04 +0100 | [diff] [blame] | 196 | return now; |
| 197 | } |
| 198 | |
Arjan van de Ven | b1f724c | 2010-05-09 08:22:08 -0700 | [diff] [blame] | 199 | /** |
| 200 | * get_cpu_idle_time_us - get the total idle time of a cpu |
| 201 | * @cpu: CPU number to query |
Michal Hocko | 09a1d34 | 2011-08-24 09:39:30 +0200 | [diff] [blame] | 202 | * @last_update_time: variable to store update time in. Do not update |
| 203 | * counters if NULL. |
Arjan van de Ven | b1f724c | 2010-05-09 08:22:08 -0700 | [diff] [blame] | 204 | * |
| 205 | * Return the cummulative idle time (since boot) for a given |
Michal Hocko | 6beea0c | 2011-08-24 09:37:48 +0200 | [diff] [blame] | 206 | * CPU, in microseconds. |
Arjan van de Ven | b1f724c | 2010-05-09 08:22:08 -0700 | [diff] [blame] | 207 | * |
| 208 | * This time is measured via accounting rather than sampling, |
| 209 | * and is as accurate as ktime_get() is. |
| 210 | * |
| 211 | * This function returns -1 if NOHZ is not enabled. |
| 212 | */ |
Venki Pallipadi | 6378ddb | 2008-01-30 13:30:04 +0100 | [diff] [blame] | 213 | u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time) |
| 214 | { |
| 215 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); |
Michal Hocko | 09a1d34 | 2011-08-24 09:39:30 +0200 | [diff] [blame] | 216 | ktime_t now, idle; |
Venki Pallipadi | 6378ddb | 2008-01-30 13:30:04 +0100 | [diff] [blame] | 217 | |
venkatesh.pallipadi@intel.com | 8083e4a | 2008-08-04 11:59:11 -0700 | [diff] [blame] | 218 | if (!tick_nohz_enabled) |
| 219 | return -1; |
| 220 | |
Michal Hocko | 09a1d34 | 2011-08-24 09:39:30 +0200 | [diff] [blame] | 221 | now = ktime_get(); |
| 222 | if (last_update_time) { |
| 223 | update_ts_time_stats(cpu, ts, now, last_update_time); |
| 224 | idle = ts->idle_sleeptime; |
| 225 | } else { |
| 226 | if (ts->idle_active && !nr_iowait_cpu(cpu)) { |
| 227 | ktime_t delta = ktime_sub(now, ts->idle_entrytime); |
venkatesh.pallipadi@intel.com | 8083e4a | 2008-08-04 11:59:11 -0700 | [diff] [blame] | 228 | |
Michal Hocko | 09a1d34 | 2011-08-24 09:39:30 +0200 | [diff] [blame] | 229 | idle = ktime_add(ts->idle_sleeptime, delta); |
| 230 | } else { |
| 231 | idle = ts->idle_sleeptime; |
| 232 | } |
| 233 | } |
| 234 | |
| 235 | return ktime_to_us(idle); |
| 236 | |
Venki Pallipadi | 6378ddb | 2008-01-30 13:30:04 +0100 | [diff] [blame] | 237 | } |
venkatesh.pallipadi@intel.com | 8083e4a | 2008-08-04 11:59:11 -0700 | [diff] [blame] | 238 | EXPORT_SYMBOL_GPL(get_cpu_idle_time_us); |
Venki Pallipadi | 6378ddb | 2008-01-30 13:30:04 +0100 | [diff] [blame] | 239 | |
Michal Hocko | 6beea0c | 2011-08-24 09:37:48 +0200 | [diff] [blame] | 240 | /** |
Arjan van de Ven | 0224cf4 | 2010-05-09 08:25:23 -0700 | [diff] [blame] | 241 | * get_cpu_iowait_time_us - get the total iowait time of a cpu |
| 242 | * @cpu: CPU number to query |
Michal Hocko | 09a1d34 | 2011-08-24 09:39:30 +0200 | [diff] [blame] | 243 | * @last_update_time: variable to store update time in. Do not update |
| 244 | * counters if NULL. |
Arjan van de Ven | 0224cf4 | 2010-05-09 08:25:23 -0700 | [diff] [blame] | 245 | * |
| 246 | * Return the cummulative iowait time (since boot) for a given |
| 247 | * CPU, in microseconds. |
| 248 | * |
| 249 | * This time is measured via accounting rather than sampling, |
| 250 | * and is as accurate as ktime_get() is. |
| 251 | * |
| 252 | * This function returns -1 if NOHZ is not enabled. |
| 253 | */ |
| 254 | u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time) |
| 255 | { |
| 256 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); |
Michal Hocko | 09a1d34 | 2011-08-24 09:39:30 +0200 | [diff] [blame] | 257 | ktime_t now, iowait; |
Arjan van de Ven | 0224cf4 | 2010-05-09 08:25:23 -0700 | [diff] [blame] | 258 | |
| 259 | if (!tick_nohz_enabled) |
| 260 | return -1; |
| 261 | |
Michal Hocko | 09a1d34 | 2011-08-24 09:39:30 +0200 | [diff] [blame] | 262 | now = ktime_get(); |
| 263 | if (last_update_time) { |
| 264 | update_ts_time_stats(cpu, ts, now, last_update_time); |
| 265 | iowait = ts->iowait_sleeptime; |
| 266 | } else { |
| 267 | if (ts->idle_active && nr_iowait_cpu(cpu) > 0) { |
| 268 | ktime_t delta = ktime_sub(now, ts->idle_entrytime); |
Arjan van de Ven | 0224cf4 | 2010-05-09 08:25:23 -0700 | [diff] [blame] | 269 | |
Michal Hocko | 09a1d34 | 2011-08-24 09:39:30 +0200 | [diff] [blame] | 270 | iowait = ktime_add(ts->iowait_sleeptime, delta); |
| 271 | } else { |
| 272 | iowait = ts->iowait_sleeptime; |
| 273 | } |
| 274 | } |
| 275 | |
| 276 | return ktime_to_us(iowait); |
Arjan van de Ven | 0224cf4 | 2010-05-09 08:25:23 -0700 | [diff] [blame] | 277 | } |
| 278 | EXPORT_SYMBOL_GPL(get_cpu_iowait_time_us); |
| 279 | |
Frederic Weisbecker | 280f067 | 2011-10-07 18:22:06 +0200 | [diff] [blame] | 280 | static void tick_nohz_stop_sched_tick(struct tick_sched *ts) |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 281 | { |
Frederic Weisbecker | 280f067 | 2011-10-07 18:22:06 +0200 | [diff] [blame] | 282 | unsigned long seq, last_jiffies, next_jiffies, delta_jiffies; |
Venki Pallipadi | 6378ddb | 2008-01-30 13:30:04 +0100 | [diff] [blame] | 283 | ktime_t last_update, expires, now; |
Len Brown | 4f86d3a | 2007-10-03 18:58:00 -0400 | [diff] [blame] | 284 | struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev; |
Jon Hunter | 9896246 | 2009-08-18 12:45:10 -0500 | [diff] [blame] | 285 | u64 time_delta; |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 286 | int cpu; |
| 287 | |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 288 | cpu = smp_processor_id(); |
| 289 | ts = &per_cpu(tick_cpu_sched, cpu); |
Eero Nurkkala | f2e21c9 | 2009-05-25 09:57:37 +0300 | [diff] [blame] | 290 | |
Peter Zijlstra | 8c215bd | 2010-07-01 09:07:17 +0200 | [diff] [blame] | 291 | now = tick_nohz_start_idle(cpu, ts); |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 292 | |
Thomas Gleixner | 5e41d0d | 2007-09-16 15:36:43 +0200 | [diff] [blame] | 293 | /* |
| 294 | * If this cpu is offline and it is the one which updates |
| 295 | * jiffies, then give up the assignment and let it be taken by |
| 296 | * the cpu which runs the tick timer next. If we don't drop |
| 297 | * this here the jiffies might be stale and do_timer() never |
| 298 | * invoked. |
| 299 | */ |
| 300 | if (unlikely(!cpu_online(cpu))) { |
| 301 | if (cpu == tick_do_timer_cpu) |
Thomas Gleixner | 6441402 | 2008-09-22 18:46:37 +0200 | [diff] [blame] | 302 | tick_do_timer_cpu = TICK_DO_TIMER_NONE; |
Thomas Gleixner | 5e41d0d | 2007-09-16 15:36:43 +0200 | [diff] [blame] | 303 | } |
| 304 | |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 305 | if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) |
Frederic Weisbecker | 280f067 | 2011-10-07 18:22:06 +0200 | [diff] [blame] | 306 | return; |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 307 | |
| 308 | if (need_resched()) |
Frederic Weisbecker | 280f067 | 2011-10-07 18:22:06 +0200 | [diff] [blame] | 309 | return; |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 310 | |
Heiko Carstens | fa116ea | 2008-12-11 17:04:11 +0100 | [diff] [blame] | 311 | if (unlikely(local_softirq_pending() && cpu_online(cpu))) { |
Thomas Gleixner | 3528231 | 2007-05-23 13:57:37 -0700 | [diff] [blame] | 312 | static int ratelimit; |
| 313 | |
| 314 | if (ratelimit < 10) { |
| 315 | printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n", |
Thomas Gleixner | 529eacc | 2009-11-13 14:32:19 +0100 | [diff] [blame] | 316 | (unsigned int) local_softirq_pending()); |
Thomas Gleixner | 3528231 | 2007-05-23 13:57:37 -0700 | [diff] [blame] | 317 | ratelimit++; |
| 318 | } |
Frederic Weisbecker | 280f067 | 2011-10-07 18:22:06 +0200 | [diff] [blame] | 319 | return; |
Thomas Gleixner | 3528231 | 2007-05-23 13:57:37 -0700 | [diff] [blame] | 320 | } |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 321 | |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 322 | ts->idle_calls++; |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 323 | /* Read jiffies and the time when jiffies were updated last */ |
| 324 | do { |
| 325 | seq = read_seqbegin(&xtime_lock); |
| 326 | last_update = last_jiffies_update; |
| 327 | last_jiffies = jiffies; |
Thomas Gleixner | 2718501 | 2009-11-12 22:12:06 +0100 | [diff] [blame] | 328 | time_delta = timekeeping_max_deferment(); |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 329 | } while (read_seqretry(&xtime_lock, seq)); |
| 330 | |
Martin Schwidefsky | 3c5d92a | 2009-09-29 14:25:16 +0200 | [diff] [blame] | 331 | if (rcu_needs_cpu(cpu) || printk_needs_cpu(cpu) || |
Peter Zijlstra | 396e894 | 2010-07-09 15:12:27 +0200 | [diff] [blame] | 332 | arch_needs_cpu(cpu)) { |
Martin Schwidefsky | 3c5d92a | 2009-09-29 14:25:16 +0200 | [diff] [blame] | 333 | next_jiffies = last_jiffies + 1; |
Ingo Molnar | 6ba9b34 | 2007-02-19 18:11:56 +0000 | [diff] [blame] | 334 | delta_jiffies = 1; |
Martin Schwidefsky | 3c5d92a | 2009-09-29 14:25:16 +0200 | [diff] [blame] | 335 | } else { |
| 336 | /* Get the next timer wheel timer */ |
| 337 | next_jiffies = get_next_timer_interrupt(last_jiffies); |
| 338 | delta_jiffies = next_jiffies - last_jiffies; |
| 339 | } |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 340 | /* |
| 341 | * Do not stop the tick, if we are only one off |
| 342 | * or if the cpu is required for rcu |
| 343 | */ |
Ingo Molnar | 6ba9b34 | 2007-02-19 18:11:56 +0000 | [diff] [blame] | 344 | if (!ts->tick_stopped && delta_jiffies == 1) |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 345 | goto out; |
| 346 | |
| 347 | /* Schedule the tick, if we are at least one jiffie off */ |
| 348 | if ((long)delta_jiffies >= 1) { |
| 349 | |
Woodruff, Richard | 0014744 | 2008-12-01 14:18:11 -0800 | [diff] [blame] | 350 | /* |
Woodruff, Richard | 0014744 | 2008-12-01 14:18:11 -0800 | [diff] [blame] | 351 | * If this cpu is the one which updates jiffies, then |
| 352 | * give up the assignment and let it be taken by the |
| 353 | * cpu which runs the tick timer next, which might be |
| 354 | * this cpu as well. If we don't drop this here the |
| 355 | * jiffies might be stale and do_timer() never |
Thomas Gleixner | 2718501 | 2009-11-12 22:12:06 +0100 | [diff] [blame] | 356 | * invoked. Keep track of the fact that it was the one |
| 357 | * which had the do_timer() duty last. If this cpu is |
| 358 | * the one which had the do_timer() duty last, we |
| 359 | * limit the sleep time to the timekeeping |
| 360 | * max_deferement value which we retrieved |
| 361 | * above. Otherwise we can sleep as long as we want. |
Woodruff, Richard | 0014744 | 2008-12-01 14:18:11 -0800 | [diff] [blame] | 362 | */ |
Thomas Gleixner | 2718501 | 2009-11-12 22:12:06 +0100 | [diff] [blame] | 363 | if (cpu == tick_do_timer_cpu) { |
Woodruff, Richard | 0014744 | 2008-12-01 14:18:11 -0800 | [diff] [blame] | 364 | tick_do_timer_cpu = TICK_DO_TIMER_NONE; |
Thomas Gleixner | 2718501 | 2009-11-12 22:12:06 +0100 | [diff] [blame] | 365 | ts->do_timer_last = 1; |
| 366 | } else if (tick_do_timer_cpu != TICK_DO_TIMER_NONE) { |
| 367 | time_delta = KTIME_MAX; |
| 368 | ts->do_timer_last = 0; |
| 369 | } else if (!ts->do_timer_last) { |
| 370 | time_delta = KTIME_MAX; |
| 371 | } |
| 372 | |
| 373 | /* |
Jon Hunter | 9896246 | 2009-08-18 12:45:10 -0500 | [diff] [blame] | 374 | * calculate the expiry time for the next timer wheel |
| 375 | * timer. delta_jiffies >= NEXT_TIMER_MAX_DELTA signals |
| 376 | * that there is no timer pending or at least extremely |
| 377 | * far into the future (12 days for HZ=1000). In this |
| 378 | * case we set the expiry to the end of time. |
| 379 | */ |
| 380 | if (likely(delta_jiffies < NEXT_TIMER_MAX_DELTA)) { |
| 381 | /* |
| 382 | * Calculate the time delta for the next timer event. |
| 383 | * If the time delta exceeds the maximum time delta |
| 384 | * permitted by the current clocksource then adjust |
| 385 | * the time delta accordingly to ensure the |
| 386 | * clocksource does not wrap. |
| 387 | */ |
| 388 | time_delta = min_t(u64, time_delta, |
| 389 | tick_period.tv64 * delta_jiffies); |
Jon Hunter | 9896246 | 2009-08-18 12:45:10 -0500 | [diff] [blame] | 390 | } |
Woodruff, Richard | 0014744 | 2008-12-01 14:18:11 -0800 | [diff] [blame] | 391 | |
Thomas Gleixner | 2718501 | 2009-11-12 22:12:06 +0100 | [diff] [blame] | 392 | if (time_delta < KTIME_MAX) |
| 393 | expires = ktime_add_ns(last_update, time_delta); |
| 394 | else |
| 395 | expires.tv64 = KTIME_MAX; |
Woodruff, Richard | 0014744 | 2008-12-01 14:18:11 -0800 | [diff] [blame] | 396 | |
Woodruff, Richard | 0014744 | 2008-12-01 14:18:11 -0800 | [diff] [blame] | 397 | /* Skip reprogram of event if its not changed */ |
| 398 | if (ts->tick_stopped && ktime_equal(expires, dev->next_event)) |
| 399 | goto out; |
| 400 | |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 401 | /* |
| 402 | * nohz_stop_sched_tick can be called several times before |
| 403 | * the nohz_restart_sched_tick is called. This happens when |
| 404 | * interrupts arrive which do not cause a reschedule. In the |
| 405 | * first call we save the current tick time, so we can restart |
| 406 | * the scheduler tick in nohz_restart_sched_tick. |
| 407 | */ |
| 408 | if (!ts->tick_stopped) { |
Venkatesh Pallipadi | 83cd4fe | 2010-05-21 17:09:41 -0700 | [diff] [blame] | 409 | select_nohz_load_balancer(1); |
Peter Zijlstra | 7490d0a | 2012-06-22 15:52:09 +0200 | [diff] [blame] | 410 | calc_load_enter_idle(); |
Siddha, Suresh B | 46cb4b7 | 2007-05-08 00:32:51 -0700 | [diff] [blame] | 411 | |
Arjan van de Ven | cc584b2 | 2008-09-01 15:02:30 -0700 | [diff] [blame] | 412 | ts->idle_tick = hrtimer_get_expires(&ts->sched_timer); |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 413 | ts->tick_stopped = 1; |
| 414 | ts->idle_jiffies = last_jiffies; |
| 415 | } |
Thomas Gleixner | d3ed782 | 2007-05-08 00:30:03 -0700 | [diff] [blame] | 416 | |
Thomas Gleixner | eaad084 | 2007-05-29 23:47:39 +0200 | [diff] [blame] | 417 | ts->idle_sleeps++; |
| 418 | |
Jon Hunter | 9896246 | 2009-08-18 12:45:10 -0500 | [diff] [blame] | 419 | /* Mark expires */ |
| 420 | ts->idle_expires = expires; |
| 421 | |
Thomas Gleixner | eaad084 | 2007-05-29 23:47:39 +0200 | [diff] [blame] | 422 | /* |
Jon Hunter | 9896246 | 2009-08-18 12:45:10 -0500 | [diff] [blame] | 423 | * If the expiration time == KTIME_MAX, then |
| 424 | * in this case we simply stop the tick timer. |
Thomas Gleixner | eaad084 | 2007-05-29 23:47:39 +0200 | [diff] [blame] | 425 | */ |
Jon Hunter | 9896246 | 2009-08-18 12:45:10 -0500 | [diff] [blame] | 426 | if (unlikely(expires.tv64 == KTIME_MAX)) { |
Thomas Gleixner | eaad084 | 2007-05-29 23:47:39 +0200 | [diff] [blame] | 427 | if (ts->nohz_mode == NOHZ_MODE_HIGHRES) |
| 428 | hrtimer_cancel(&ts->sched_timer); |
| 429 | goto out; |
| 430 | } |
| 431 | |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 432 | if (ts->nohz_mode == NOHZ_MODE_HIGHRES) { |
| 433 | hrtimer_start(&ts->sched_timer, expires, |
Arun R Bharadwaj | 5c33386 | 2009-04-16 12:14:37 +0530 | [diff] [blame] | 434 | HRTIMER_MODE_ABS_PINNED); |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 435 | /* Check, if the timer was already in the past */ |
| 436 | if (hrtimer_active(&ts->sched_timer)) |
| 437 | goto out; |
Pavel Machek | 4c9dc64 | 2008-01-30 13:30:00 +0100 | [diff] [blame] | 438 | } else if (!tick_program_event(expires, 0)) |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 439 | goto out; |
| 440 | /* |
| 441 | * We are past the event already. So we crossed a |
| 442 | * jiffie boundary. Update jiffies and raise the |
| 443 | * softirq. |
| 444 | */ |
| 445 | tick_do_update_jiffies64(ktime_get()); |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 446 | } |
| 447 | raise_softirq_irqoff(TIMER_SOFTIRQ); |
| 448 | out: |
| 449 | ts->next_jiffies = next_jiffies; |
| 450 | ts->last_jiffies = last_jiffies; |
Frederic Weisbecker | 280f067 | 2011-10-07 18:22:06 +0200 | [diff] [blame] | 451 | } |
| 452 | |
| 453 | /** |
| 454 | * tick_nohz_idle_enter - stop the idle tick from the idle task |
| 455 | * |
| 456 | * When the next event is more than a tick into the future, stop the idle tick |
| 457 | * Called when we start the idle loop. |
Frederic Weisbecker | 2bbb681 | 2011-10-08 16:01:00 +0200 | [diff] [blame] | 458 | * |
Frederic Weisbecker | 1268fbc | 2011-11-17 18:48:14 +0100 | [diff] [blame] | 459 | * The arch is responsible of calling: |
Frederic Weisbecker | 2bbb681 | 2011-10-08 16:01:00 +0200 | [diff] [blame] | 460 | * |
| 461 | * - rcu_idle_enter() after its last use of RCU before the CPU is put |
| 462 | * to sleep. |
| 463 | * - rcu_idle_exit() before the first use of RCU after the CPU is woken up. |
Frederic Weisbecker | 280f067 | 2011-10-07 18:22:06 +0200 | [diff] [blame] | 464 | */ |
Frederic Weisbecker | 1268fbc | 2011-11-17 18:48:14 +0100 | [diff] [blame] | 465 | void tick_nohz_idle_enter(void) |
Frederic Weisbecker | 280f067 | 2011-10-07 18:22:06 +0200 | [diff] [blame] | 466 | { |
| 467 | struct tick_sched *ts; |
| 468 | |
Frederic Weisbecker | 1268fbc | 2011-11-17 18:48:14 +0100 | [diff] [blame] | 469 | WARN_ON_ONCE(irqs_disabled()); |
| 470 | |
Linus Torvalds | 0db49b7 | 2012-01-06 08:33:28 -0800 | [diff] [blame] | 471 | /* |
| 472 | * Update the idle state in the scheduler domain hierarchy |
| 473 | * when tick_nohz_stop_sched_tick() is called from the idle loop. |
| 474 | * State will be updated to busy during the first busy tick after |
| 475 | * exiting idle. |
| 476 | */ |
| 477 | set_cpu_sd_state_idle(); |
| 478 | |
Frederic Weisbecker | 1268fbc | 2011-11-17 18:48:14 +0100 | [diff] [blame] | 479 | local_irq_disable(); |
| 480 | |
Frederic Weisbecker | 280f067 | 2011-10-07 18:22:06 +0200 | [diff] [blame] | 481 | ts = &__get_cpu_var(tick_cpu_sched); |
| 482 | /* |
| 483 | * set ts->inidle unconditionally. even if the system did not |
| 484 | * switch to nohz mode the cpu frequency governers rely on the |
| 485 | * update of the idle time accounting in tick_nohz_start_idle(). |
| 486 | */ |
| 487 | ts->inidle = 1; |
| 488 | tick_nohz_stop_sched_tick(ts); |
Frederic Weisbecker | 1268fbc | 2011-11-17 18:48:14 +0100 | [diff] [blame] | 489 | |
| 490 | local_irq_enable(); |
Frederic Weisbecker | 280f067 | 2011-10-07 18:22:06 +0200 | [diff] [blame] | 491 | } |
| 492 | |
| 493 | /** |
| 494 | * tick_nohz_irq_exit - update next tick event from interrupt exit |
| 495 | * |
| 496 | * When an interrupt fires while we are idle and it doesn't cause |
| 497 | * a reschedule, it may still add, modify or delete a timer, enqueue |
| 498 | * an RCU callback, etc... |
| 499 | * So we need to re-calculate and reprogram the next tick event. |
| 500 | */ |
| 501 | void tick_nohz_irq_exit(void) |
| 502 | { |
Frederic Weisbecker | 5899ef0 | 2013-02-20 16:15:36 +0100 | [diff] [blame] | 503 | unsigned long flags; |
Frederic Weisbecker | 280f067 | 2011-10-07 18:22:06 +0200 | [diff] [blame] | 504 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); |
| 505 | |
| 506 | if (!ts->inidle) |
| 507 | return; |
| 508 | |
Frederic Weisbecker | 5899ef0 | 2013-02-20 16:15:36 +0100 | [diff] [blame] | 509 | local_irq_save(flags); |
| 510 | |
Frederic Weisbecker | 280f067 | 2011-10-07 18:22:06 +0200 | [diff] [blame] | 511 | tick_nohz_stop_sched_tick(ts); |
Frederic Weisbecker | 5899ef0 | 2013-02-20 16:15:36 +0100 | [diff] [blame] | 512 | |
| 513 | local_irq_restore(flags); |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 514 | } |
| 515 | |
| 516 | /** |
Len Brown | 4f86d3a | 2007-10-03 18:58:00 -0400 | [diff] [blame] | 517 | * tick_nohz_get_sleep_length - return the length of the current sleep |
| 518 | * |
| 519 | * Called from power state control code with interrupts disabled |
| 520 | */ |
| 521 | ktime_t tick_nohz_get_sleep_length(void) |
| 522 | { |
| 523 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); |
Jeff Ohlstein | 9feb87d | 2012-07-17 16:45:19 -0700 | [diff] [blame] | 524 | struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev; |
| 525 | return ktime_sub(dev->next_event, ts->idle_entrytime); |
Len Brown | 4f86d3a | 2007-10-03 18:58:00 -0400 | [diff] [blame] | 526 | } |
| 527 | |
Thomas Gleixner | c34bec5 | 2008-10-17 10:04:34 +0200 | [diff] [blame] | 528 | static void tick_nohz_restart(struct tick_sched *ts, ktime_t now) |
| 529 | { |
| 530 | hrtimer_cancel(&ts->sched_timer); |
Thomas Gleixner | 268a3dc | 2008-10-22 09:48:06 +0200 | [diff] [blame] | 531 | hrtimer_set_expires(&ts->sched_timer, ts->idle_tick); |
Thomas Gleixner | c34bec5 | 2008-10-17 10:04:34 +0200 | [diff] [blame] | 532 | |
| 533 | while (1) { |
| 534 | /* Forward the time to expire in the future */ |
| 535 | hrtimer_forward(&ts->sched_timer, now, tick_period); |
| 536 | |
| 537 | if (ts->nohz_mode == NOHZ_MODE_HIGHRES) { |
Thomas Gleixner | 268a3dc | 2008-10-22 09:48:06 +0200 | [diff] [blame] | 538 | hrtimer_start_expires(&ts->sched_timer, |
Arun R Bharadwaj | 5c33386 | 2009-04-16 12:14:37 +0530 | [diff] [blame] | 539 | HRTIMER_MODE_ABS_PINNED); |
Thomas Gleixner | c34bec5 | 2008-10-17 10:04:34 +0200 | [diff] [blame] | 540 | /* Check, if the timer was already in the past */ |
| 541 | if (hrtimer_active(&ts->sched_timer)) |
| 542 | break; |
| 543 | } else { |
Thomas Gleixner | 268a3dc | 2008-10-22 09:48:06 +0200 | [diff] [blame] | 544 | if (!tick_program_event( |
| 545 | hrtimer_get_expires(&ts->sched_timer), 0)) |
Thomas Gleixner | c34bec5 | 2008-10-17 10:04:34 +0200 | [diff] [blame] | 546 | break; |
| 547 | } |
Neal Cardwell | 6f10392 | 2012-03-27 15:09:37 -0400 | [diff] [blame] | 548 | /* Reread time and update jiffies */ |
Thomas Gleixner | c34bec5 | 2008-10-17 10:04:34 +0200 | [diff] [blame] | 549 | now = ktime_get(); |
Neal Cardwell | 6f10392 | 2012-03-27 15:09:37 -0400 | [diff] [blame] | 550 | tick_do_update_jiffies64(now); |
Thomas Gleixner | c34bec5 | 2008-10-17 10:04:34 +0200 | [diff] [blame] | 551 | } |
| 552 | } |
| 553 | |
Len Brown | 4f86d3a | 2007-10-03 18:58:00 -0400 | [diff] [blame] | 554 | /** |
Frederic Weisbecker | 280f067 | 2011-10-07 18:22:06 +0200 | [diff] [blame] | 555 | * tick_nohz_idle_exit - restart the idle tick from the idle task |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 556 | * |
| 557 | * Restart the idle tick when the CPU is woken up from idle |
Frederic Weisbecker | 280f067 | 2011-10-07 18:22:06 +0200 | [diff] [blame] | 558 | * This also exit the RCU extended quiescent state. The CPU |
| 559 | * can use RCU again after this function is called. |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 560 | */ |
Frederic Weisbecker | 280f067 | 2011-10-07 18:22:06 +0200 | [diff] [blame] | 561 | void tick_nohz_idle_exit(void) |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 562 | { |
| 563 | int cpu = smp_processor_id(); |
| 564 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); |
Martin Schwidefsky | 79741dd | 2008-12-31 15:11:38 +0100 | [diff] [blame] | 565 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 566 | unsigned long ticks; |
Martin Schwidefsky | 79741dd | 2008-12-31 15:11:38 +0100 | [diff] [blame] | 567 | #endif |
Venki Pallipadi | 6378ddb | 2008-01-30 13:30:04 +0100 | [diff] [blame] | 568 | ktime_t now; |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 569 | |
| 570 | local_irq_disable(); |
Frederic Weisbecker | 2bbb681 | 2011-10-08 16:01:00 +0200 | [diff] [blame] | 571 | |
Frederic Weisbecker | 15f827b | 2012-01-24 18:59:43 +0100 | [diff] [blame] | 572 | WARN_ON_ONCE(!ts->inidle); |
| 573 | |
| 574 | ts->inidle = 0; |
| 575 | |
| 576 | if (ts->idle_active || ts->tick_stopped) |
Martin Schwidefsky | eed3b9c | 2009-09-29 14:25:15 +0200 | [diff] [blame] | 577 | now = ktime_get(); |
| 578 | |
| 579 | if (ts->idle_active) |
| 580 | tick_nohz_stop_idle(cpu, now); |
Venki Pallipadi | 6378ddb | 2008-01-30 13:30:04 +0100 | [diff] [blame] | 581 | |
Frederic Weisbecker | 15f827b | 2012-01-24 18:59:43 +0100 | [diff] [blame] | 582 | if (!ts->tick_stopped) { |
Venki Pallipadi | 6378ddb | 2008-01-30 13:30:04 +0100 | [diff] [blame] | 583 | local_irq_enable(); |
| 584 | return; |
| 585 | } |
| 586 | |
| 587 | /* Update jiffies first */ |
Siddha, Suresh B | 46cb4b7 | 2007-05-08 00:32:51 -0700 | [diff] [blame] | 588 | select_nohz_load_balancer(0); |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 589 | tick_do_update_jiffies64(now); |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 590 | |
Martin Schwidefsky | 79741dd | 2008-12-31 15:11:38 +0100 | [diff] [blame] | 591 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 592 | /* |
| 593 | * We stopped the tick in idle. Update process times would miss the |
| 594 | * time we slept as update_process_times does only a 1 tick |
| 595 | * accounting. Enforce that this is accounted to idle ! |
| 596 | */ |
| 597 | ticks = jiffies - ts->idle_jiffies; |
| 598 | /* |
| 599 | * We might be one off. Do not randomly account a huge number of ticks! |
| 600 | */ |
Martin Schwidefsky | 79741dd | 2008-12-31 15:11:38 +0100 | [diff] [blame] | 601 | if (ticks && ticks < LONG_MAX) |
| 602 | account_idle_ticks(ticks); |
| 603 | #endif |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 604 | |
Peter Zijlstra | 7490d0a | 2012-06-22 15:52:09 +0200 | [diff] [blame] | 605 | calc_load_exit_idle(); |
Ingo Molnar | 126e01b | 2008-04-25 00:25:08 +0200 | [diff] [blame] | 606 | touch_softlockup_watchdog(); |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 607 | /* |
| 608 | * Cancel the scheduled timer and restore the tick |
| 609 | */ |
| 610 | ts->tick_stopped = 0; |
Thomas Gleixner | 5df7fa1 | 2008-02-01 17:45:14 +0100 | [diff] [blame] | 611 | ts->idle_exittime = now; |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 612 | |
Thomas Gleixner | c34bec5 | 2008-10-17 10:04:34 +0200 | [diff] [blame] | 613 | tick_nohz_restart(ts, now); |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 614 | |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 615 | local_irq_enable(); |
| 616 | } |
| 617 | |
| 618 | static int tick_nohz_reprogram(struct tick_sched *ts, ktime_t now) |
| 619 | { |
| 620 | hrtimer_forward(&ts->sched_timer, now, tick_period); |
Arjan van de Ven | cc584b2 | 2008-09-01 15:02:30 -0700 | [diff] [blame] | 621 | return tick_program_event(hrtimer_get_expires(&ts->sched_timer), 0); |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 622 | } |
| 623 | |
| 624 | /* |
| 625 | * The nohz low res interrupt handler |
| 626 | */ |
| 627 | static void tick_nohz_handler(struct clock_event_device *dev) |
| 628 | { |
| 629 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); |
| 630 | struct pt_regs *regs = get_irq_regs(); |
Thomas Gleixner | d3ed782 | 2007-05-08 00:30:03 -0700 | [diff] [blame] | 631 | int cpu = smp_processor_id(); |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 632 | ktime_t now = ktime_get(); |
| 633 | |
| 634 | dev->next_event.tv64 = KTIME_MAX; |
| 635 | |
Thomas Gleixner | d3ed782 | 2007-05-08 00:30:03 -0700 | [diff] [blame] | 636 | /* |
| 637 | * Check if the do_timer duty was dropped. We don't care about |
| 638 | * concurrency: This happens only when the cpu in charge went |
| 639 | * into a long sleep. If two cpus happen to assign themself to |
| 640 | * this duty, then the jiffies update is still serialized by |
| 641 | * xtime_lock. |
| 642 | */ |
Thomas Gleixner | 6441402 | 2008-09-22 18:46:37 +0200 | [diff] [blame] | 643 | if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) |
Thomas Gleixner | d3ed782 | 2007-05-08 00:30:03 -0700 | [diff] [blame] | 644 | tick_do_timer_cpu = cpu; |
| 645 | |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 646 | /* Check, if the jiffies need an update */ |
Thomas Gleixner | d3ed782 | 2007-05-08 00:30:03 -0700 | [diff] [blame] | 647 | if (tick_do_timer_cpu == cpu) |
| 648 | tick_do_update_jiffies64(now); |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 649 | |
| 650 | /* |
| 651 | * When we are idle and the tick is stopped, we have to touch |
| 652 | * the watchdog as we might not schedule for a really long |
| 653 | * time. This happens on complete idle SMP systems while |
| 654 | * waiting on the login prompt. We also increment the "start |
| 655 | * of idle" jiffy stamp so the idle accounting adjustment we |
| 656 | * do when we go busy again does not account too much ticks. |
| 657 | */ |
| 658 | if (ts->tick_stopped) { |
| 659 | touch_softlockup_watchdog(); |
| 660 | ts->idle_jiffies++; |
| 661 | } |
| 662 | |
| 663 | update_process_times(user_mode(regs)); |
| 664 | profile_tick(CPU_PROFILING); |
| 665 | |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 666 | while (tick_nohz_reprogram(ts, now)) { |
| 667 | now = ktime_get(); |
| 668 | tick_do_update_jiffies64(now); |
| 669 | } |
| 670 | } |
| 671 | |
| 672 | /** |
| 673 | * tick_nohz_switch_to_nohz - switch to nohz mode |
| 674 | */ |
| 675 | static void tick_nohz_switch_to_nohz(void) |
| 676 | { |
| 677 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); |
| 678 | ktime_t next; |
| 679 | |
| 680 | if (!tick_nohz_enabled) |
| 681 | return; |
| 682 | |
| 683 | local_irq_disable(); |
| 684 | if (tick_switch_to_oneshot(tick_nohz_handler)) { |
| 685 | local_irq_enable(); |
| 686 | return; |
| 687 | } |
| 688 | |
| 689 | ts->nohz_mode = NOHZ_MODE_LOWRES; |
| 690 | |
| 691 | /* |
| 692 | * Recycle the hrtimer in ts, so we can share the |
| 693 | * hrtimer_forward with the highres code. |
| 694 | */ |
| 695 | hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); |
| 696 | /* Get the next period */ |
| 697 | next = tick_init_jiffy_update(); |
| 698 | |
| 699 | for (;;) { |
Arjan van de Ven | cc584b2 | 2008-09-01 15:02:30 -0700 | [diff] [blame] | 700 | hrtimer_set_expires(&ts->sched_timer, next); |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 701 | if (!tick_program_event(next, 0)) |
| 702 | break; |
| 703 | next = ktime_add(next, tick_period); |
| 704 | } |
| 705 | local_irq_enable(); |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 706 | } |
| 707 | |
Thomas Gleixner | fb02fbc | 2008-10-17 10:01:23 +0200 | [diff] [blame] | 708 | /* |
| 709 | * When NOHZ is enabled and the tick is stopped, we need to kick the |
| 710 | * tick timer from irq_enter() so that the jiffies update is kept |
| 711 | * alive during long running softirqs. That's ugly as hell, but |
| 712 | * correctness is key even if we need to fix the offending softirq in |
| 713 | * the first place. |
| 714 | * |
| 715 | * Note, this is different to tick_nohz_restart. We just kick the |
| 716 | * timer and do not touch the other magic bits which need to be done |
| 717 | * when idle is left. |
| 718 | */ |
Martin Schwidefsky | eed3b9c | 2009-09-29 14:25:15 +0200 | [diff] [blame] | 719 | static void tick_nohz_kick_tick(int cpu, ktime_t now) |
Thomas Gleixner | fb02fbc | 2008-10-17 10:01:23 +0200 | [diff] [blame] | 720 | { |
Thomas Gleixner | ae99286 | 2008-11-10 13:20:23 +0100 | [diff] [blame] | 721 | #if 0 |
| 722 | /* Switch back to 2.6.27 behaviour */ |
| 723 | |
Thomas Gleixner | fb02fbc | 2008-10-17 10:01:23 +0200 | [diff] [blame] | 724 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); |
Martin Schwidefsky | eed3b9c | 2009-09-29 14:25:15 +0200 | [diff] [blame] | 725 | ktime_t delta; |
Thomas Gleixner | fb02fbc | 2008-10-17 10:01:23 +0200 | [diff] [blame] | 726 | |
Thomas Gleixner | c4bd822 | 2008-10-21 20:17:35 +0200 | [diff] [blame] | 727 | /* |
| 728 | * Do not touch the tick device, when the next expiry is either |
| 729 | * already reached or less/equal than the tick period. |
| 730 | */ |
Thomas Gleixner | 268a3dc | 2008-10-22 09:48:06 +0200 | [diff] [blame] | 731 | delta = ktime_sub(hrtimer_get_expires(&ts->sched_timer), now); |
Thomas Gleixner | c4bd822 | 2008-10-21 20:17:35 +0200 | [diff] [blame] | 732 | if (delta.tv64 <= tick_period.tv64) |
| 733 | return; |
| 734 | |
| 735 | tick_nohz_restart(ts, now); |
Thomas Gleixner | ae99286 | 2008-11-10 13:20:23 +0100 | [diff] [blame] | 736 | #endif |
Thomas Gleixner | fb02fbc | 2008-10-17 10:01:23 +0200 | [diff] [blame] | 737 | } |
| 738 | |
Martin Schwidefsky | eed3b9c | 2009-09-29 14:25:15 +0200 | [diff] [blame] | 739 | static inline void tick_check_nohz(int cpu) |
| 740 | { |
| 741 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); |
| 742 | ktime_t now; |
| 743 | |
| 744 | if (!ts->idle_active && !ts->tick_stopped) |
| 745 | return; |
| 746 | now = ktime_get(); |
| 747 | if (ts->idle_active) |
| 748 | tick_nohz_stop_idle(cpu, now); |
| 749 | if (ts->tick_stopped) { |
| 750 | tick_nohz_update_jiffies(now); |
| 751 | tick_nohz_kick_tick(cpu, now); |
| 752 | } |
| 753 | } |
| 754 | |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 755 | #else |
| 756 | |
| 757 | static inline void tick_nohz_switch_to_nohz(void) { } |
Martin Schwidefsky | eed3b9c | 2009-09-29 14:25:15 +0200 | [diff] [blame] | 758 | static inline void tick_check_nohz(int cpu) { } |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 759 | |
| 760 | #endif /* NO_HZ */ |
| 761 | |
| 762 | /* |
Thomas Gleixner | 719254f | 2008-10-17 09:59:47 +0200 | [diff] [blame] | 763 | * Called from irq_enter to notify about the possible interruption of idle() |
| 764 | */ |
| 765 | void tick_check_idle(int cpu) |
| 766 | { |
Thomas Gleixner | fb02fbc | 2008-10-17 10:01:23 +0200 | [diff] [blame] | 767 | tick_check_oneshot_broadcast(cpu); |
Martin Schwidefsky | eed3b9c | 2009-09-29 14:25:15 +0200 | [diff] [blame] | 768 | tick_check_nohz(cpu); |
Thomas Gleixner | 719254f | 2008-10-17 09:59:47 +0200 | [diff] [blame] | 769 | } |
| 770 | |
| 771 | /* |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 772 | * High resolution timer specific code |
| 773 | */ |
| 774 | #ifdef CONFIG_HIGH_RES_TIMERS |
Amar Singhal | f49d99b | 2011-08-22 19:02:04 -0700 | [diff] [blame] | 775 | static void update_rq_stats(void) |
| 776 | { |
| 777 | unsigned long jiffy_gap = 0; |
| 778 | unsigned int rq_avg = 0; |
| 779 | unsigned long flags = 0; |
| 780 | |
| 781 | jiffy_gap = jiffies - rq_info.rq_poll_last_jiffy; |
| 782 | |
| 783 | if (jiffy_gap >= rq_info.rq_poll_jiffies) { |
| 784 | |
| 785 | spin_lock_irqsave(&rq_lock, flags); |
| 786 | |
| 787 | if (!rq_info.rq_avg) |
| 788 | rq_info.rq_poll_total_jiffies = 0; |
| 789 | |
| 790 | rq_avg = nr_running() * 10; |
| 791 | |
| 792 | if (rq_info.rq_poll_total_jiffies) { |
| 793 | rq_avg = (rq_avg * jiffy_gap) + |
| 794 | (rq_info.rq_avg * |
| 795 | rq_info.rq_poll_total_jiffies); |
| 796 | do_div(rq_avg, |
| 797 | rq_info.rq_poll_total_jiffies + jiffy_gap); |
| 798 | } |
| 799 | |
| 800 | rq_info.rq_avg = rq_avg; |
| 801 | rq_info.rq_poll_total_jiffies += jiffy_gap; |
| 802 | rq_info.rq_poll_last_jiffy = jiffies; |
| 803 | |
| 804 | spin_unlock_irqrestore(&rq_lock, flags); |
| 805 | } |
| 806 | } |
| 807 | |
| 808 | static void wakeup_user(void) |
| 809 | { |
| 810 | unsigned long jiffy_gap; |
| 811 | |
| 812 | jiffy_gap = jiffies - rq_info.def_timer_last_jiffy; |
| 813 | |
| 814 | if (jiffy_gap >= rq_info.def_timer_jiffies) { |
| 815 | rq_info.def_timer_last_jiffy = jiffies; |
| 816 | queue_work(rq_wq, &rq_info.def_timer_work); |
| 817 | } |
| 818 | } |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 819 | /* |
Pavel Machek | 4c9dc64 | 2008-01-30 13:30:00 +0100 | [diff] [blame] | 820 | * We rearm the timer until we get disabled by the idle code. |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 821 | * Called with interrupts disabled and timer->base->cpu_base->lock held. |
| 822 | */ |
| 823 | static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer) |
| 824 | { |
| 825 | struct tick_sched *ts = |
| 826 | container_of(timer, struct tick_sched, sched_timer); |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 827 | struct pt_regs *regs = get_irq_regs(); |
| 828 | ktime_t now = ktime_get(); |
Thomas Gleixner | d3ed782 | 2007-05-08 00:30:03 -0700 | [diff] [blame] | 829 | int cpu = smp_processor_id(); |
| 830 | |
| 831 | #ifdef CONFIG_NO_HZ |
| 832 | /* |
| 833 | * Check if the do_timer duty was dropped. We don't care about |
| 834 | * concurrency: This happens only when the cpu in charge went |
| 835 | * into a long sleep. If two cpus happen to assign themself to |
| 836 | * this duty, then the jiffies update is still serialized by |
| 837 | * xtime_lock. |
| 838 | */ |
Thomas Gleixner | 6441402 | 2008-09-22 18:46:37 +0200 | [diff] [blame] | 839 | if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) |
Thomas Gleixner | d3ed782 | 2007-05-08 00:30:03 -0700 | [diff] [blame] | 840 | tick_do_timer_cpu = cpu; |
| 841 | #endif |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 842 | |
| 843 | /* Check, if the jiffies need an update */ |
Thomas Gleixner | d3ed782 | 2007-05-08 00:30:03 -0700 | [diff] [blame] | 844 | if (tick_do_timer_cpu == cpu) |
| 845 | tick_do_update_jiffies64(now); |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 846 | |
| 847 | /* |
| 848 | * Do not call, when we are not in irq context and have |
| 849 | * no valid regs pointer |
| 850 | */ |
| 851 | if (regs) { |
| 852 | /* |
| 853 | * When we are idle and the tick is stopped, we have to touch |
| 854 | * the watchdog as we might not schedule for a really long |
| 855 | * time. This happens on complete idle SMP systems while |
| 856 | * waiting on the login prompt. We also increment the "start of |
| 857 | * idle" jiffy stamp so the idle accounting adjustment we do |
| 858 | * when we go busy again does not account too much ticks. |
| 859 | */ |
| 860 | if (ts->tick_stopped) { |
| 861 | touch_softlockup_watchdog(); |
| 862 | ts->idle_jiffies++; |
| 863 | } |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 864 | update_process_times(user_mode(regs)); |
| 865 | profile_tick(CPU_PROFILING); |
Amar Singhal | f49d99b | 2011-08-22 19:02:04 -0700 | [diff] [blame] | 866 | |
Amar Singhal | f10f2a8 | 2011-12-19 13:17:51 -0800 | [diff] [blame] | 867 | if ((rq_info.init == 1) && (tick_do_timer_cpu == cpu)) { |
Amar Singhal | f49d99b | 2011-08-22 19:02:04 -0700 | [diff] [blame] | 868 | |
| 869 | /* |
| 870 | * update run queue statistics |
| 871 | */ |
| 872 | update_rq_stats(); |
| 873 | |
| 874 | /* |
| 875 | * wakeup user if needed |
| 876 | */ |
| 877 | wakeup_user(); |
| 878 | } |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 879 | } |
| 880 | |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 881 | hrtimer_forward(timer, now, tick_period); |
| 882 | |
| 883 | return HRTIMER_RESTART; |
| 884 | } |
| 885 | |
| 886 | /** |
| 887 | * tick_setup_sched_timer - setup the tick emulation timer |
| 888 | */ |
| 889 | void tick_setup_sched_timer(void) |
| 890 | { |
| 891 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); |
| 892 | ktime_t now = ktime_get(); |
| 893 | |
| 894 | /* |
| 895 | * Emulate tick processing via per-CPU hrtimers: |
| 896 | */ |
| 897 | hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); |
| 898 | ts->sched_timer.function = tick_sched_timer; |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 899 | |
john stultz | 3704540 | 2007-07-21 04:37:35 -0700 | [diff] [blame] | 900 | /* Get the next period (per cpu) */ |
Arjan van de Ven | cc584b2 | 2008-09-01 15:02:30 -0700 | [diff] [blame] | 901 | hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update()); |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 902 | |
| 903 | for (;;) { |
| 904 | hrtimer_forward(&ts->sched_timer, now, tick_period); |
Arun R Bharadwaj | 5c33386 | 2009-04-16 12:14:37 +0530 | [diff] [blame] | 905 | hrtimer_start_expires(&ts->sched_timer, |
| 906 | HRTIMER_MODE_ABS_PINNED); |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 907 | /* Check, if the timer was already in the past */ |
| 908 | if (hrtimer_active(&ts->sched_timer)) |
| 909 | break; |
| 910 | now = ktime_get(); |
| 911 | } |
| 912 | |
| 913 | #ifdef CONFIG_NO_HZ |
Heiko Carstens | 29c158e | 2011-08-23 13:20:46 +0200 | [diff] [blame] | 914 | if (tick_nohz_enabled) |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 915 | ts->nohz_mode = NOHZ_MODE_HIGHRES; |
| 916 | #endif |
| 917 | } |
Miao Xie | 3c4fbe5 | 2008-08-20 16:37:38 -0700 | [diff] [blame] | 918 | #endif /* HIGH_RES_TIMERS */ |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 919 | |
Miao Xie | 3c4fbe5 | 2008-08-20 16:37:38 -0700 | [diff] [blame] | 920 | #if defined CONFIG_NO_HZ || defined CONFIG_HIGH_RES_TIMERS |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 921 | void tick_cancel_sched_timer(int cpu) |
| 922 | { |
| 923 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); |
| 924 | |
Miao Xie | 3c4fbe5 | 2008-08-20 16:37:38 -0700 | [diff] [blame] | 925 | # ifdef CONFIG_HIGH_RES_TIMERS |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 926 | if (ts->sched_timer.base) |
| 927 | hrtimer_cancel(&ts->sched_timer); |
Miao Xie | 3c4fbe5 | 2008-08-20 16:37:38 -0700 | [diff] [blame] | 928 | # endif |
Karsten Wiese | a790176 | 2008-03-04 14:59:55 -0800 | [diff] [blame] | 929 | |
Thomas Gleixner | 33b7cfc | 2013-05-03 15:02:50 +0200 | [diff] [blame] | 930 | memset(ts, 0, sizeof(*ts)); |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 931 | } |
Miao Xie | 3c4fbe5 | 2008-08-20 16:37:38 -0700 | [diff] [blame] | 932 | #endif |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 933 | |
| 934 | /** |
| 935 | * Async notification about clocksource changes |
| 936 | */ |
| 937 | void tick_clock_notify(void) |
| 938 | { |
| 939 | int cpu; |
| 940 | |
| 941 | for_each_possible_cpu(cpu) |
| 942 | set_bit(0, &per_cpu(tick_cpu_sched, cpu).check_clocks); |
| 943 | } |
| 944 | |
| 945 | /* |
| 946 | * Async notification about clock event changes |
| 947 | */ |
| 948 | void tick_oneshot_notify(void) |
| 949 | { |
| 950 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); |
| 951 | |
| 952 | set_bit(0, &ts->check_clocks); |
| 953 | } |
| 954 | |
| 955 | /** |
| 956 | * Check, if a change happened, which makes oneshot possible. |
| 957 | * |
| 958 | * Called cyclic from the hrtimer softirq (driven by the timer |
| 959 | * softirq) allow_nohz signals, that we can switch into low-res nohz |
| 960 | * mode, because high resolution timers are disabled (either compile |
| 961 | * or runtime). |
| 962 | */ |
| 963 | int tick_check_oneshot_change(int allow_nohz) |
| 964 | { |
| 965 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); |
| 966 | |
| 967 | if (!test_and_clear_bit(0, &ts->check_clocks)) |
| 968 | return 0; |
| 969 | |
| 970 | if (ts->nohz_mode != NOHZ_MODE_INACTIVE) |
| 971 | return 0; |
| 972 | |
Li Zefan | cf4fc6c | 2008-02-08 04:19:24 -0800 | [diff] [blame] | 973 | if (!timekeeping_valid_for_hres() || !tick_is_oneshot_available()) |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 974 | return 0; |
| 975 | |
| 976 | if (!allow_nohz) |
| 977 | return 1; |
| 978 | |
| 979 | tick_nohz_switch_to_nohz(); |
| 980 | return 0; |
| 981 | } |