blob: 68a873af09a851580943ba6992173af1e144f54a [file] [log] [blame]
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -08001/*
2 * linux/kernel/time/tick-sched.c
3 *
4 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
5 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
6 * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner
7 *
8 * No idle tick implementation for low and high resolution timers
9 *
10 * Started by: Thomas Gleixner and Ingo Molnar
11 *
Pavel Machekb10db7f2008-01-30 13:30:00 +010012 * Distribute under GPLv2.
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -080013 */
14#include <linux/cpu.h>
15#include <linux/err.h>
16#include <linux/hrtimer.h>
17#include <linux/interrupt.h>
18#include <linux/kernel_stat.h>
19#include <linux/percpu.h>
20#include <linux/profile.h>
21#include <linux/sched.h>
venkatesh.pallipadi@intel.com8083e4a2008-08-04 11:59:11 -070022#include <linux/module.h>
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -080023
David S. Miller9e203bc2007-02-24 22:10:13 -080024#include <asm/irq_regs.h>
25
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -080026#include "tick-internal.h"
27
28/*
29 * Per cpu nohz control structure
30 */
31static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched);
32
33/*
34 * The time, when the last jiffy update happened. Protected by xtime_lock.
35 */
36static ktime_t last_jiffies_update;
37
Ingo Molnar289f4802007-02-16 01:28:15 -080038struct tick_sched *tick_get_tick_sched(int cpu)
39{
40 return &per_cpu(tick_cpu_sched, cpu);
41}
42
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -080043/*
44 * Must be called with interrupts disabled !
45 */
46static void tick_do_update_jiffies64(ktime_t now)
47{
48 unsigned long ticks = 0;
49 ktime_t delta;
50
Ingo Molnar7a14ce12008-05-12 15:43:53 +020051 /*
52 * Do a quick check without holding xtime_lock:
53 */
54 delta = ktime_sub(now, last_jiffies_update);
55 if (delta.tv64 < tick_period.tv64)
56 return;
57
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -080058 /* Reevalute with xtime_lock held */
59 write_seqlock(&xtime_lock);
60
61 delta = ktime_sub(now, last_jiffies_update);
62 if (delta.tv64 >= tick_period.tv64) {
63
64 delta = ktime_sub(delta, tick_period);
65 last_jiffies_update = ktime_add(last_jiffies_update,
66 tick_period);
67
68 /* Slow path for long timeouts */
69 if (unlikely(delta.tv64 >= tick_period.tv64)) {
70 s64 incr = ktime_to_ns(tick_period);
71
72 ticks = ktime_divns(delta, incr);
73
74 last_jiffies_update = ktime_add_ns(last_jiffies_update,
75 incr * ticks);
76 }
77 do_timer(++ticks);
Thomas Gleixner49d670f2008-09-22 18:56:01 +020078
79 /* Keep the tick_next_period variable up to date */
80 tick_next_period = ktime_add(last_jiffies_update, tick_period);
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -080081 }
82 write_sequnlock(&xtime_lock);
83}
84
85/*
86 * Initialize and return retrieve the jiffies update.
87 */
88static ktime_t tick_init_jiffy_update(void)
89{
90 ktime_t period;
91
92 write_seqlock(&xtime_lock);
93 /* Did we start the jiffies update yet ? */
94 if (last_jiffies_update.tv64 == 0)
95 last_jiffies_update = tick_next_period;
96 period = last_jiffies_update;
97 write_sequnlock(&xtime_lock);
98 return period;
99}
100
Frederic Weisbecker5bb96222012-10-15 02:03:27 +0200101
102static void tick_sched_do_timer(ktime_t now)
103{
104 int cpu = smp_processor_id();
105
106#ifdef CONFIG_NO_HZ
107 /*
108 * Check if the do_timer duty was dropped. We don't care about
109 * concurrency: This happens only when the cpu in charge went
110 * into a long sleep. If two cpus happen to assign themself to
111 * this duty, then the jiffies update is still serialized by
112 * xtime_lock.
113 */
114 if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE))
115 tick_do_timer_cpu = cpu;
116#endif
117
118 /* Check, if the jiffies need an update */
119 if (tick_do_timer_cpu == cpu)
120 tick_do_update_jiffies64(now);
121}
122
Frederic Weisbecker9e8f5592012-10-15 02:43:03 +0200123static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs)
124{
125 /*
126 * When we are idle and the tick is stopped, we have to touch
127 * the watchdog as we might not schedule for a really long
128 * time. This happens on complete idle SMP systems while
129 * waiting on the login prompt. We also increment the "start of
130 * idle" jiffy stamp so the idle accounting adjustment we do
131 * when we go busy again does not account too much ticks.
132 */
133 if (ts->tick_stopped) {
134 touch_softlockup_watchdog();
135 if (is_idle_task(current))
136 ts->idle_jiffies++;
137 }
138 update_process_times(user_mode(regs));
139 profile_tick(CPU_PROFILING);
140}
141
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800142/*
143 * NOHZ - aka dynamic tick functionality
144 */
145#ifdef CONFIG_NO_HZ
146/*
147 * NO HZ enabled ?
148 */
Paul E. McKenney9d2ad242012-06-24 10:15:02 -0700149int tick_nohz_enabled __read_mostly = 1;
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800150
151/*
152 * Enable / Disable tickless mode
153 */
154static int __init setup_tick_nohz(char *str)
155{
156 if (!strcmp(str, "off"))
157 tick_nohz_enabled = 0;
158 else if (!strcmp(str, "on"))
159 tick_nohz_enabled = 1;
160 else
161 return 0;
162 return 1;
163}
164
165__setup("nohz=", setup_tick_nohz);
166
167/**
168 * tick_nohz_update_jiffies - update jiffies when idle was interrupted
169 *
170 * Called from interrupt entry when the CPU was idle
171 *
172 * In case the sched_tick was stopped on this CPU, we have to check if jiffies
173 * must be updated. Otherwise an interrupt handler could use a stale jiffy
174 * value. We do this unconditionally on any cpu, as we don't know whether the
175 * cpu, which has the update task assigned is in a long sleep.
176 */
Martin Schwidefskyeed3b9c2009-09-29 14:25:15 +0200177static void tick_nohz_update_jiffies(ktime_t now)
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800178{
179 int cpu = smp_processor_id();
180 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
181 unsigned long flags;
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800182
Thomas Gleixner5df7fa12008-02-01 17:45:14 +0100183 ts->idle_waketime = now;
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800184
185 local_irq_save(flags);
186 tick_do_update_jiffies64(now);
187 local_irq_restore(flags);
Ingo Molnar02ff3752008-05-12 15:43:53 +0200188
189 touch_softlockup_watchdog();
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800190}
191
Arjan van de Ven595aac42010-05-09 08:22:45 -0700192/*
193 * Updates the per cpu time idle statistics counters
194 */
Arjan van de Ven8d63bf92010-05-09 08:24:03 -0700195static void
Peter Zijlstra8c215bd2010-07-01 09:07:17 +0200196update_ts_time_stats(int cpu, struct tick_sched *ts, ktime_t now, u64 *last_update_time)
Arjan van de Ven595aac42010-05-09 08:22:45 -0700197{
198 ktime_t delta;
199
Arjan van de Ven595aac42010-05-09 08:22:45 -0700200 if (ts->idle_active) {
201 delta = ktime_sub(now, ts->idle_entrytime);
Peter Zijlstra8c215bd2010-07-01 09:07:17 +0200202 if (nr_iowait_cpu(cpu) > 0)
Arjan van de Ven0224cf42010-05-09 08:25:23 -0700203 ts->iowait_sleeptime = ktime_add(ts->iowait_sleeptime, delta);
Michal Hocko6beea0c2011-08-24 09:37:48 +0200204 else
205 ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
Arjan van de Ven8c7b09f2010-05-09 08:23:23 -0700206 ts->idle_entrytime = now;
Arjan van de Ven595aac42010-05-09 08:22:45 -0700207 }
Arjan van de Ven8d63bf92010-05-09 08:24:03 -0700208
Arjan van de Vene0e37c22010-05-09 08:24:39 -0700209 if (last_update_time)
Arjan van de Ven8d63bf92010-05-09 08:24:03 -0700210 *last_update_time = ktime_to_us(now);
211
Arjan van de Ven595aac42010-05-09 08:22:45 -0700212}
213
Martin Schwidefskyeed3b9c2009-09-29 14:25:15 +0200214static void tick_nohz_stop_idle(int cpu, ktime_t now)
Venki Pallipadi6378ddb2008-01-30 13:30:04 +0100215{
216 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
217
Peter Zijlstra8c215bd2010-07-01 09:07:17 +0200218 update_ts_time_stats(cpu, ts, now, NULL);
Martin Schwidefskyeed3b9c2009-09-29 14:25:15 +0200219 ts->idle_active = 0;
Peter Zijlstra56c74262008-09-01 16:44:23 +0200220
Martin Schwidefskyeed3b9c2009-09-29 14:25:15 +0200221 sched_clock_idle_wakeup_event(0);
Venki Pallipadi6378ddb2008-01-30 13:30:04 +0100222}
223
Peter Zijlstra8c215bd2010-07-01 09:07:17 +0200224static ktime_t tick_nohz_start_idle(int cpu, struct tick_sched *ts)
Venki Pallipadi6378ddb2008-01-30 13:30:04 +0100225{
Michal Hocko430ee882011-12-01 17:00:22 +0100226 ktime_t now = ktime_get();
Arjan van de Ven595aac42010-05-09 08:22:45 -0700227
Venki Pallipadi6378ddb2008-01-30 13:30:04 +0100228 ts->idle_entrytime = now;
229 ts->idle_active = 1;
Peter Zijlstra56c74262008-09-01 16:44:23 +0200230 sched_clock_idle_sleep_event();
Venki Pallipadi6378ddb2008-01-30 13:30:04 +0100231 return now;
232}
233
Arjan van de Venb1f724c2010-05-09 08:22:08 -0700234/**
235 * get_cpu_idle_time_us - get the total idle time of a cpu
236 * @cpu: CPU number to query
Michal Hocko09a1d342011-08-24 09:39:30 +0200237 * @last_update_time: variable to store update time in. Do not update
238 * counters if NULL.
Arjan van de Venb1f724c2010-05-09 08:22:08 -0700239 *
240 * Return the cummulative idle time (since boot) for a given
Michal Hocko6beea0c2011-08-24 09:37:48 +0200241 * CPU, in microseconds.
Arjan van de Venb1f724c2010-05-09 08:22:08 -0700242 *
243 * This time is measured via accounting rather than sampling,
244 * and is as accurate as ktime_get() is.
245 *
246 * This function returns -1 if NOHZ is not enabled.
247 */
Venki Pallipadi6378ddb2008-01-30 13:30:04 +0100248u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time)
249{
250 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
Michal Hocko09a1d342011-08-24 09:39:30 +0200251 ktime_t now, idle;
Venki Pallipadi6378ddb2008-01-30 13:30:04 +0100252
venkatesh.pallipadi@intel.com8083e4a2008-08-04 11:59:11 -0700253 if (!tick_nohz_enabled)
254 return -1;
255
Michal Hocko09a1d342011-08-24 09:39:30 +0200256 now = ktime_get();
257 if (last_update_time) {
258 update_ts_time_stats(cpu, ts, now, last_update_time);
259 idle = ts->idle_sleeptime;
260 } else {
261 if (ts->idle_active && !nr_iowait_cpu(cpu)) {
262 ktime_t delta = ktime_sub(now, ts->idle_entrytime);
venkatesh.pallipadi@intel.com8083e4a2008-08-04 11:59:11 -0700263
Michal Hocko09a1d342011-08-24 09:39:30 +0200264 idle = ktime_add(ts->idle_sleeptime, delta);
265 } else {
266 idle = ts->idle_sleeptime;
267 }
268 }
269
270 return ktime_to_us(idle);
271
Venki Pallipadi6378ddb2008-01-30 13:30:04 +0100272}
venkatesh.pallipadi@intel.com8083e4a2008-08-04 11:59:11 -0700273EXPORT_SYMBOL_GPL(get_cpu_idle_time_us);
Venki Pallipadi6378ddb2008-01-30 13:30:04 +0100274
Michal Hocko6beea0c2011-08-24 09:37:48 +0200275/**
Arjan van de Ven0224cf42010-05-09 08:25:23 -0700276 * get_cpu_iowait_time_us - get the total iowait time of a cpu
277 * @cpu: CPU number to query
Michal Hocko09a1d342011-08-24 09:39:30 +0200278 * @last_update_time: variable to store update time in. Do not update
279 * counters if NULL.
Arjan van de Ven0224cf42010-05-09 08:25:23 -0700280 *
281 * Return the cummulative iowait time (since boot) for a given
282 * CPU, in microseconds.
283 *
284 * This time is measured via accounting rather than sampling,
285 * and is as accurate as ktime_get() is.
286 *
287 * This function returns -1 if NOHZ is not enabled.
288 */
289u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time)
290{
291 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
Michal Hocko09a1d342011-08-24 09:39:30 +0200292 ktime_t now, iowait;
Arjan van de Ven0224cf42010-05-09 08:25:23 -0700293
294 if (!tick_nohz_enabled)
295 return -1;
296
Michal Hocko09a1d342011-08-24 09:39:30 +0200297 now = ktime_get();
298 if (last_update_time) {
299 update_ts_time_stats(cpu, ts, now, last_update_time);
300 iowait = ts->iowait_sleeptime;
301 } else {
302 if (ts->idle_active && nr_iowait_cpu(cpu) > 0) {
303 ktime_t delta = ktime_sub(now, ts->idle_entrytime);
Arjan van de Ven0224cf42010-05-09 08:25:23 -0700304
Michal Hocko09a1d342011-08-24 09:39:30 +0200305 iowait = ktime_add(ts->iowait_sleeptime, delta);
306 } else {
307 iowait = ts->iowait_sleeptime;
308 }
309 }
310
311 return ktime_to_us(iowait);
Arjan van de Ven0224cf42010-05-09 08:25:23 -0700312}
313EXPORT_SYMBOL_GPL(get_cpu_iowait_time_us);
314
Frederic Weisbecker84bf1bc2011-08-01 01:25:38 +0200315static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
316 ktime_t now, int cpu)
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800317{
Frederic Weisbecker280f0672011-10-07 18:22:06 +0200318 unsigned long seq, last_jiffies, next_jiffies, delta_jiffies;
Frederic Weisbecker84bf1bc2011-08-01 01:25:38 +0200319 ktime_t last_update, expires, ret = { .tv64 = 0 };
Paul E. McKenneyaa9b16302012-05-10 16:41:44 -0700320 unsigned long rcu_delta_jiffies;
Len Brown4f86d3a2007-10-03 18:58:00 -0400321 struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
Jon Hunter98962462009-08-18 12:45:10 -0500322 u64 time_delta;
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800323
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800324 /* Read jiffies and the time when jiffies were updated last */
325 do {
326 seq = read_seqbegin(&xtime_lock);
327 last_update = last_jiffies_update;
328 last_jiffies = jiffies;
Thomas Gleixner27185012009-11-12 22:12:06 +0100329 time_delta = timekeeping_max_deferment();
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800330 } while (read_seqretry(&xtime_lock, seq));
331
Paul E. McKenneyaa9b16302012-05-10 16:41:44 -0700332 if (rcu_needs_cpu(cpu, &rcu_delta_jiffies) || printk_needs_cpu(cpu) ||
Peter Zijlstra396e8942010-07-09 15:12:27 +0200333 arch_needs_cpu(cpu)) {
Martin Schwidefsky3c5d92a2009-09-29 14:25:16 +0200334 next_jiffies = last_jiffies + 1;
Ingo Molnar6ba9b342007-02-19 18:11:56 +0000335 delta_jiffies = 1;
Martin Schwidefsky3c5d92a2009-09-29 14:25:16 +0200336 } else {
337 /* Get the next timer wheel timer */
338 next_jiffies = get_next_timer_interrupt(last_jiffies);
339 delta_jiffies = next_jiffies - last_jiffies;
Paul E. McKenneyaa9b16302012-05-10 16:41:44 -0700340 if (rcu_delta_jiffies < delta_jiffies) {
341 next_jiffies = last_jiffies + rcu_delta_jiffies;
342 delta_jiffies = rcu_delta_jiffies;
343 }
Martin Schwidefsky3c5d92a2009-09-29 14:25:16 +0200344 }
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800345 /*
346 * Do not stop the tick, if we are only one off
347 * or if the cpu is required for rcu
348 */
Ingo Molnar6ba9b342007-02-19 18:11:56 +0000349 if (!ts->tick_stopped && delta_jiffies == 1)
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800350 goto out;
351
352 /* Schedule the tick, if we are at least one jiffie off */
353 if ((long)delta_jiffies >= 1) {
354
Woodruff, Richard00147442008-12-01 14:18:11 -0800355 /*
Woodruff, Richard00147442008-12-01 14:18:11 -0800356 * If this cpu is the one which updates jiffies, then
357 * give up the assignment and let it be taken by the
358 * cpu which runs the tick timer next, which might be
359 * this cpu as well. If we don't drop this here the
360 * jiffies might be stale and do_timer() never
Thomas Gleixner27185012009-11-12 22:12:06 +0100361 * invoked. Keep track of the fact that it was the one
362 * which had the do_timer() duty last. If this cpu is
363 * the one which had the do_timer() duty last, we
364 * limit the sleep time to the timekeeping
365 * max_deferement value which we retrieved
366 * above. Otherwise we can sleep as long as we want.
Woodruff, Richard00147442008-12-01 14:18:11 -0800367 */
Thomas Gleixner27185012009-11-12 22:12:06 +0100368 if (cpu == tick_do_timer_cpu) {
Woodruff, Richard00147442008-12-01 14:18:11 -0800369 tick_do_timer_cpu = TICK_DO_TIMER_NONE;
Thomas Gleixner27185012009-11-12 22:12:06 +0100370 ts->do_timer_last = 1;
371 } else if (tick_do_timer_cpu != TICK_DO_TIMER_NONE) {
372 time_delta = KTIME_MAX;
373 ts->do_timer_last = 0;
374 } else if (!ts->do_timer_last) {
375 time_delta = KTIME_MAX;
376 }
377
378 /*
Jon Hunter98962462009-08-18 12:45:10 -0500379 * calculate the expiry time for the next timer wheel
380 * timer. delta_jiffies >= NEXT_TIMER_MAX_DELTA signals
381 * that there is no timer pending or at least extremely
382 * far into the future (12 days for HZ=1000). In this
383 * case we set the expiry to the end of time.
384 */
385 if (likely(delta_jiffies < NEXT_TIMER_MAX_DELTA)) {
386 /*
387 * Calculate the time delta for the next timer event.
388 * If the time delta exceeds the maximum time delta
389 * permitted by the current clocksource then adjust
390 * the time delta accordingly to ensure the
391 * clocksource does not wrap.
392 */
393 time_delta = min_t(u64, time_delta,
394 tick_period.tv64 * delta_jiffies);
Jon Hunter98962462009-08-18 12:45:10 -0500395 }
Woodruff, Richard00147442008-12-01 14:18:11 -0800396
Thomas Gleixner27185012009-11-12 22:12:06 +0100397 if (time_delta < KTIME_MAX)
398 expires = ktime_add_ns(last_update, time_delta);
399 else
400 expires.tv64 = KTIME_MAX;
Woodruff, Richard00147442008-12-01 14:18:11 -0800401
Woodruff, Richard00147442008-12-01 14:18:11 -0800402 /* Skip reprogram of event if its not changed */
403 if (ts->tick_stopped && ktime_equal(expires, dev->next_event))
404 goto out;
405
Frederic Weisbecker84bf1bc2011-08-01 01:25:38 +0200406 ret = expires;
407
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800408 /*
409 * nohz_stop_sched_tick can be called several times before
410 * the nohz_restart_sched_tick is called. This happens when
411 * interrupts arrive which do not cause a reschedule. In the
412 * first call we save the current tick time, so we can restart
413 * the scheduler tick in nohz_restart_sched_tick.
414 */
415 if (!ts->tick_stopped) {
Alex Shic1cc0172012-09-10 15:10:58 +0800416 nohz_balance_enter_idle(cpu);
Peter Zijlstra5167e8d2012-06-22 15:52:09 +0200417 calc_load_enter_idle();
Siddha, Suresh B46cb4b72007-05-08 00:32:51 -0700418
Frederic Weisbeckerf5d411c2011-07-31 17:44:12 +0200419 ts->last_tick = hrtimer_get_expires(&ts->sched_timer);
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800420 ts->tick_stopped = 1;
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800421 }
Thomas Gleixnerd3ed7822007-05-08 00:30:03 -0700422
Thomas Gleixnereaad0842007-05-29 23:47:39 +0200423 /*
Jon Hunter98962462009-08-18 12:45:10 -0500424 * If the expiration time == KTIME_MAX, then
425 * in this case we simply stop the tick timer.
Thomas Gleixnereaad0842007-05-29 23:47:39 +0200426 */
Jon Hunter98962462009-08-18 12:45:10 -0500427 if (unlikely(expires.tv64 == KTIME_MAX)) {
Thomas Gleixnereaad0842007-05-29 23:47:39 +0200428 if (ts->nohz_mode == NOHZ_MODE_HIGHRES)
429 hrtimer_cancel(&ts->sched_timer);
430 goto out;
431 }
432
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800433 if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
434 hrtimer_start(&ts->sched_timer, expires,
Arun R Bharadwaj5c333862009-04-16 12:14:37 +0530435 HRTIMER_MODE_ABS_PINNED);
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800436 /* Check, if the timer was already in the past */
437 if (hrtimer_active(&ts->sched_timer))
438 goto out;
Pavel Machek4c9dc642008-01-30 13:30:00 +0100439 } else if (!tick_program_event(expires, 0))
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800440 goto out;
441 /*
442 * We are past the event already. So we crossed a
443 * jiffie boundary. Update jiffies and raise the
444 * softirq.
445 */
446 tick_do_update_jiffies64(ktime_get());
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800447 }
448 raise_softirq_irqoff(TIMER_SOFTIRQ);
449out:
450 ts->next_jiffies = next_jiffies;
451 ts->last_jiffies = last_jiffies;
Len Brown4f86d3a2007-10-03 18:58:00 -0400452 ts->sleep_length = ktime_sub(dev->next_event, now);
Frederic Weisbecker84bf1bc2011-08-01 01:25:38 +0200453
454 return ret;
Frederic Weisbecker280f0672011-10-07 18:22:06 +0200455}
456
Frederic Weisbecker5b399392011-08-01 00:06:10 +0200457static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
458{
459 /*
460 * If this cpu is offline and it is the one which updates
461 * jiffies, then give up the assignment and let it be taken by
462 * the cpu which runs the tick timer next. If we don't drop
463 * this here the jiffies might be stale and do_timer() never
464 * invoked.
465 */
466 if (unlikely(!cpu_online(cpu))) {
467 if (cpu == tick_do_timer_cpu)
468 tick_do_timer_cpu = TICK_DO_TIMER_NONE;
469 }
470
471 if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE))
472 return false;
473
474 if (need_resched())
475 return false;
476
477 if (unlikely(local_softirq_pending() && cpu_online(cpu))) {
478 static int ratelimit;
479
Paul E. McKenney803b0eb2012-08-23 08:34:07 -0700480 if (ratelimit < 10 &&
481 (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) {
Frederic Weisbecker5b399392011-08-01 00:06:10 +0200482 printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
483 (unsigned int) local_softirq_pending());
484 ratelimit++;
485 }
486 return false;
487 }
488
489 return true;
490}
491
Frederic Weisbecker19f5f732011-07-27 17:29:28 +0200492static void __tick_nohz_idle_enter(struct tick_sched *ts)
493{
Frederic Weisbecker84bf1bc2011-08-01 01:25:38 +0200494 ktime_t now, expires;
Frederic Weisbecker5b399392011-08-01 00:06:10 +0200495 int cpu = smp_processor_id();
Frederic Weisbecker19f5f732011-07-27 17:29:28 +0200496
Frederic Weisbecker5b399392011-08-01 00:06:10 +0200497 now = tick_nohz_start_idle(cpu, ts);
Frederic Weisbecker2ac0d982011-07-28 04:00:47 +0200498
Frederic Weisbecker5b399392011-08-01 00:06:10 +0200499 if (can_stop_idle_tick(cpu, ts)) {
500 int was_stopped = ts->tick_stopped;
501
502 ts->idle_calls++;
Frederic Weisbecker84bf1bc2011-08-01 01:25:38 +0200503
504 expires = tick_nohz_stop_sched_tick(ts, now, cpu);
505 if (expires.tv64 > 0LL) {
506 ts->idle_sleeps++;
507 ts->idle_expires = expires;
508 }
Frederic Weisbecker5b399392011-08-01 00:06:10 +0200509
510 if (!was_stopped && ts->tick_stopped)
511 ts->idle_jiffies = ts->last_jiffies;
512 }
Frederic Weisbecker280f0672011-10-07 18:22:06 +0200513}
514
515/**
516 * tick_nohz_idle_enter - stop the idle tick from the idle task
517 *
518 * When the next event is more than a tick into the future, stop the idle tick
519 * Called when we start the idle loop.
Frederic Weisbecker2bbb6812011-10-08 16:01:00 +0200520 *
Frederic Weisbecker1268fbc2011-11-17 18:48:14 +0100521 * The arch is responsible of calling:
Frederic Weisbecker2bbb6812011-10-08 16:01:00 +0200522 *
523 * - rcu_idle_enter() after its last use of RCU before the CPU is put
524 * to sleep.
525 * - rcu_idle_exit() before the first use of RCU after the CPU is woken up.
Frederic Weisbecker280f0672011-10-07 18:22:06 +0200526 */
Frederic Weisbecker1268fbc2011-11-17 18:48:14 +0100527void tick_nohz_idle_enter(void)
Frederic Weisbecker280f0672011-10-07 18:22:06 +0200528{
529 struct tick_sched *ts;
530
Frederic Weisbecker1268fbc2011-11-17 18:48:14 +0100531 WARN_ON_ONCE(irqs_disabled());
532
Linus Torvalds0db49b72012-01-06 08:33:28 -0800533 /*
534 * Update the idle state in the scheduler domain hierarchy
535 * when tick_nohz_stop_sched_tick() is called from the idle loop.
536 * State will be updated to busy during the first busy tick after
537 * exiting idle.
538 */
539 set_cpu_sd_state_idle();
540
Frederic Weisbecker1268fbc2011-11-17 18:48:14 +0100541 local_irq_disable();
542
Frederic Weisbecker280f0672011-10-07 18:22:06 +0200543 ts = &__get_cpu_var(tick_cpu_sched);
544 /*
545 * set ts->inidle unconditionally. even if the system did not
546 * switch to nohz mode the cpu frequency governers rely on the
547 * update of the idle time accounting in tick_nohz_start_idle().
548 */
549 ts->inidle = 1;
Frederic Weisbecker19f5f732011-07-27 17:29:28 +0200550 __tick_nohz_idle_enter(ts);
Frederic Weisbecker1268fbc2011-11-17 18:48:14 +0100551
552 local_irq_enable();
Frederic Weisbecker280f0672011-10-07 18:22:06 +0200553}
554
555/**
556 * tick_nohz_irq_exit - update next tick event from interrupt exit
557 *
558 * When an interrupt fires while we are idle and it doesn't cause
559 * a reschedule, it may still add, modify or delete a timer, enqueue
560 * an RCU callback, etc...
561 * So we need to re-calculate and reprogram the next tick event.
562 */
563void tick_nohz_irq_exit(void)
564{
565 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
566
567 if (!ts->inidle)
568 return;
569
Frederic Weisbecker19f5f732011-07-27 17:29:28 +0200570 __tick_nohz_idle_enter(ts);
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800571}
572
573/**
Len Brown4f86d3a2007-10-03 18:58:00 -0400574 * tick_nohz_get_sleep_length - return the length of the current sleep
575 *
576 * Called from power state control code with interrupts disabled
577 */
578ktime_t tick_nohz_get_sleep_length(void)
579{
580 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
581
582 return ts->sleep_length;
583}
584
Thomas Gleixnerc34bec52008-10-17 10:04:34 +0200585static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
586{
587 hrtimer_cancel(&ts->sched_timer);
Frederic Weisbeckerf5d411c2011-07-31 17:44:12 +0200588 hrtimer_set_expires(&ts->sched_timer, ts->last_tick);
Thomas Gleixnerc34bec52008-10-17 10:04:34 +0200589
590 while (1) {
591 /* Forward the time to expire in the future */
592 hrtimer_forward(&ts->sched_timer, now, tick_period);
593
594 if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
Thomas Gleixner268a3dc2008-10-22 09:48:06 +0200595 hrtimer_start_expires(&ts->sched_timer,
Arun R Bharadwaj5c333862009-04-16 12:14:37 +0530596 HRTIMER_MODE_ABS_PINNED);
Thomas Gleixnerc34bec52008-10-17 10:04:34 +0200597 /* Check, if the timer was already in the past */
598 if (hrtimer_active(&ts->sched_timer))
599 break;
600 } else {
Thomas Gleixner268a3dc2008-10-22 09:48:06 +0200601 if (!tick_program_event(
602 hrtimer_get_expires(&ts->sched_timer), 0))
Thomas Gleixnerc34bec52008-10-17 10:04:34 +0200603 break;
604 }
Neal Cardwell6f103922012-03-27 15:09:37 -0400605 /* Reread time and update jiffies */
Thomas Gleixnerc34bec52008-10-17 10:04:34 +0200606 now = ktime_get();
Neal Cardwell6f103922012-03-27 15:09:37 -0400607 tick_do_update_jiffies64(now);
Thomas Gleixnerc34bec52008-10-17 10:04:34 +0200608 }
609}
610
Frederic Weisbecker19f5f732011-07-27 17:29:28 +0200611static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now)
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800612{
Venki Pallipadi6378ddb2008-01-30 13:30:04 +0100613 /* Update jiffies first */
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800614 tick_do_update_jiffies64(now);
Peter Zijlstra5aaa0b72012-05-17 17:15:29 +0200615 update_cpu_load_nohz();
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800616
Charles Wang749c8812012-08-20 16:02:33 +0800617 calc_load_exit_idle();
Frederic Weisbecker2ac0d982011-07-28 04:00:47 +0200618 touch_softlockup_watchdog();
619 /*
620 * Cancel the scheduled timer and restore the tick
621 */
622 ts->tick_stopped = 0;
623 ts->idle_exittime = now;
624
625 tick_nohz_restart(ts, now);
626}
627
628static void tick_nohz_account_idle_ticks(struct tick_sched *ts)
629{
Martin Schwidefsky79741dd2008-12-31 15:11:38 +0100630#ifndef CONFIG_VIRT_CPU_ACCOUNTING
Frederic Weisbecker2ac0d982011-07-28 04:00:47 +0200631 unsigned long ticks;
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800632 /*
633 * We stopped the tick in idle. Update process times would miss the
634 * time we slept as update_process_times does only a 1 tick
635 * accounting. Enforce that this is accounted to idle !
636 */
637 ticks = jiffies - ts->idle_jiffies;
638 /*
639 * We might be one off. Do not randomly account a huge number of ticks!
640 */
Martin Schwidefsky79741dd2008-12-31 15:11:38 +0100641 if (ticks && ticks < LONG_MAX)
642 account_idle_ticks(ticks);
643#endif
Frederic Weisbecker19f5f732011-07-27 17:29:28 +0200644}
645
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800646/**
647 * tick_nohz_idle_exit - restart the idle tick from the idle task
648 *
649 * Restart the idle tick when the CPU is woken up from idle
650 * This also exit the RCU extended quiescent state. The CPU
651 * can use RCU again after this function is called.
652 */
653void tick_nohz_idle_exit(void)
654{
655 int cpu = smp_processor_id();
656 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800657 ktime_t now;
658
659 local_irq_disable();
660
661 WARN_ON_ONCE(!ts->inidle);
662
663 ts->inidle = 0;
664
665 if (ts->idle_active || ts->tick_stopped)
666 now = ktime_get();
667
668 if (ts->idle_active)
669 tick_nohz_stop_idle(cpu, now);
670
Frederic Weisbecker2ac0d982011-07-28 04:00:47 +0200671 if (ts->tick_stopped) {
Frederic Weisbecker19f5f732011-07-27 17:29:28 +0200672 tick_nohz_restart_sched_tick(ts, now);
Frederic Weisbecker2ac0d982011-07-28 04:00:47 +0200673 tick_nohz_account_idle_ticks(ts);
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800674 }
675
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800676 local_irq_enable();
677}
678
679static int tick_nohz_reprogram(struct tick_sched *ts, ktime_t now)
680{
681 hrtimer_forward(&ts->sched_timer, now, tick_period);
Arjan van de Vencc584b22008-09-01 15:02:30 -0700682 return tick_program_event(hrtimer_get_expires(&ts->sched_timer), 0);
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800683}
684
685/*
686 * The nohz low res interrupt handler
687 */
688static void tick_nohz_handler(struct clock_event_device *dev)
689{
690 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
691 struct pt_regs *regs = get_irq_regs();
692 ktime_t now = ktime_get();
693
694 dev->next_event.tv64 = KTIME_MAX;
695
Frederic Weisbecker5bb96222012-10-15 02:03:27 +0200696 tick_sched_do_timer(now);
Frederic Weisbecker9e8f5592012-10-15 02:43:03 +0200697 tick_sched_handle(ts, regs);
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800698
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800699 while (tick_nohz_reprogram(ts, now)) {
700 now = ktime_get();
701 tick_do_update_jiffies64(now);
702 }
703}
704
705/**
706 * tick_nohz_switch_to_nohz - switch to nohz mode
707 */
708static void tick_nohz_switch_to_nohz(void)
709{
710 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
711 ktime_t next;
712
713 if (!tick_nohz_enabled)
714 return;
715
716 local_irq_disable();
717 if (tick_switch_to_oneshot(tick_nohz_handler)) {
718 local_irq_enable();
719 return;
720 }
721
722 ts->nohz_mode = NOHZ_MODE_LOWRES;
723
724 /*
725 * Recycle the hrtimer in ts, so we can share the
726 * hrtimer_forward with the highres code.
727 */
728 hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
729 /* Get the next period */
730 next = tick_init_jiffy_update();
731
732 for (;;) {
Arjan van de Vencc584b22008-09-01 15:02:30 -0700733 hrtimer_set_expires(&ts->sched_timer, next);
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800734 if (!tick_program_event(next, 0))
735 break;
736 next = ktime_add(next, tick_period);
737 }
738 local_irq_enable();
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800739}
740
Thomas Gleixnerfb02fbc2008-10-17 10:01:23 +0200741/*
742 * When NOHZ is enabled and the tick is stopped, we need to kick the
743 * tick timer from irq_enter() so that the jiffies update is kept
744 * alive during long running softirqs. That's ugly as hell, but
745 * correctness is key even if we need to fix the offending softirq in
746 * the first place.
747 *
748 * Note, this is different to tick_nohz_restart. We just kick the
749 * timer and do not touch the other magic bits which need to be done
750 * when idle is left.
751 */
Martin Schwidefskyeed3b9c2009-09-29 14:25:15 +0200752static void tick_nohz_kick_tick(int cpu, ktime_t now)
Thomas Gleixnerfb02fbc2008-10-17 10:01:23 +0200753{
Thomas Gleixnerae992862008-11-10 13:20:23 +0100754#if 0
755 /* Switch back to 2.6.27 behaviour */
756
Thomas Gleixnerfb02fbc2008-10-17 10:01:23 +0200757 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
Martin Schwidefskyeed3b9c2009-09-29 14:25:15 +0200758 ktime_t delta;
Thomas Gleixnerfb02fbc2008-10-17 10:01:23 +0200759
Thomas Gleixnerc4bd8222008-10-21 20:17:35 +0200760 /*
761 * Do not touch the tick device, when the next expiry is either
762 * already reached or less/equal than the tick period.
763 */
Thomas Gleixner268a3dc2008-10-22 09:48:06 +0200764 delta = ktime_sub(hrtimer_get_expires(&ts->sched_timer), now);
Thomas Gleixnerc4bd8222008-10-21 20:17:35 +0200765 if (delta.tv64 <= tick_period.tv64)
766 return;
767
768 tick_nohz_restart(ts, now);
Thomas Gleixnerae992862008-11-10 13:20:23 +0100769#endif
Thomas Gleixnerfb02fbc2008-10-17 10:01:23 +0200770}
771
Martin Schwidefskyeed3b9c2009-09-29 14:25:15 +0200772static inline void tick_check_nohz(int cpu)
773{
774 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
775 ktime_t now;
776
777 if (!ts->idle_active && !ts->tick_stopped)
778 return;
779 now = ktime_get();
780 if (ts->idle_active)
781 tick_nohz_stop_idle(cpu, now);
782 if (ts->tick_stopped) {
783 tick_nohz_update_jiffies(now);
784 tick_nohz_kick_tick(cpu, now);
785 }
786}
787
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800788#else
789
790static inline void tick_nohz_switch_to_nohz(void) { }
Martin Schwidefskyeed3b9c2009-09-29 14:25:15 +0200791static inline void tick_check_nohz(int cpu) { }
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800792
793#endif /* NO_HZ */
794
795/*
Thomas Gleixner719254f2008-10-17 09:59:47 +0200796 * Called from irq_enter to notify about the possible interruption of idle()
797 */
798void tick_check_idle(int cpu)
799{
Thomas Gleixnerfb02fbc2008-10-17 10:01:23 +0200800 tick_check_oneshot_broadcast(cpu);
Martin Schwidefskyeed3b9c2009-09-29 14:25:15 +0200801 tick_check_nohz(cpu);
Thomas Gleixner719254f2008-10-17 09:59:47 +0200802}
803
804/*
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800805 * High resolution timer specific code
806 */
807#ifdef CONFIG_HIGH_RES_TIMERS
808/*
Pavel Machek4c9dc642008-01-30 13:30:00 +0100809 * We rearm the timer until we get disabled by the idle code.
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800810 * Called with interrupts disabled and timer->base->cpu_base->lock held.
811 */
812static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
813{
814 struct tick_sched *ts =
815 container_of(timer, struct tick_sched, sched_timer);
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800816 struct pt_regs *regs = get_irq_regs();
817 ktime_t now = ktime_get();
Thomas Gleixnerd3ed7822007-05-08 00:30:03 -0700818
Frederic Weisbecker5bb96222012-10-15 02:03:27 +0200819 tick_sched_do_timer(now);
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800820
821 /*
822 * Do not call, when we are not in irq context and have
823 * no valid regs pointer
824 */
Frederic Weisbecker9e8f5592012-10-15 02:43:03 +0200825 if (regs)
826 tick_sched_handle(ts, regs);
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800827
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800828 hrtimer_forward(timer, now, tick_period);
829
830 return HRTIMER_RESTART;
831}
832
Mike Galbraith5307c952012-05-08 12:20:58 +0200833static int sched_skew_tick;
834
Thomas Gleixner62cf20b2012-05-25 14:08:57 +0200835static int __init skew_tick(char *str)
836{
837 get_option(&str, &sched_skew_tick);
838
839 return 0;
840}
841early_param("skew_tick", skew_tick);
842
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800843/**
844 * tick_setup_sched_timer - setup the tick emulation timer
845 */
846void tick_setup_sched_timer(void)
847{
848 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
849 ktime_t now = ktime_get();
850
851 /*
852 * Emulate tick processing via per-CPU hrtimers:
853 */
854 hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
855 ts->sched_timer.function = tick_sched_timer;
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800856
john stultz37045402007-07-21 04:37:35 -0700857 /* Get the next period (per cpu) */
Arjan van de Vencc584b22008-09-01 15:02:30 -0700858 hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update());
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800859
Mike Galbraith5307c952012-05-08 12:20:58 +0200860 /* Offset the tick to avert xtime_lock contention. */
861 if (sched_skew_tick) {
862 u64 offset = ktime_to_ns(tick_period) >> 1;
863 do_div(offset, num_possible_cpus());
864 offset *= smp_processor_id();
865 hrtimer_add_expires_ns(&ts->sched_timer, offset);
866 }
867
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800868 for (;;) {
869 hrtimer_forward(&ts->sched_timer, now, tick_period);
Arun R Bharadwaj5c333862009-04-16 12:14:37 +0530870 hrtimer_start_expires(&ts->sched_timer,
871 HRTIMER_MODE_ABS_PINNED);
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800872 /* Check, if the timer was already in the past */
873 if (hrtimer_active(&ts->sched_timer))
874 break;
875 now = ktime_get();
876 }
877
878#ifdef CONFIG_NO_HZ
Heiko Carstens29c158e2011-08-23 13:20:46 +0200879 if (tick_nohz_enabled)
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800880 ts->nohz_mode = NOHZ_MODE_HIGHRES;
881#endif
882}
Miao Xie3c4fbe52008-08-20 16:37:38 -0700883#endif /* HIGH_RES_TIMERS */
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800884
Miao Xie3c4fbe52008-08-20 16:37:38 -0700885#if defined CONFIG_NO_HZ || defined CONFIG_HIGH_RES_TIMERS
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800886void tick_cancel_sched_timer(int cpu)
887{
888 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
889
Miao Xie3c4fbe52008-08-20 16:37:38 -0700890# ifdef CONFIG_HIGH_RES_TIMERS
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800891 if (ts->sched_timer.base)
892 hrtimer_cancel(&ts->sched_timer);
Miao Xie3c4fbe52008-08-20 16:37:38 -0700893# endif
Karsten Wiesea7901762008-03-04 14:59:55 -0800894
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800895 ts->nohz_mode = NOHZ_MODE_INACTIVE;
896}
Miao Xie3c4fbe52008-08-20 16:37:38 -0700897#endif
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800898
899/**
900 * Async notification about clocksource changes
901 */
902void tick_clock_notify(void)
903{
904 int cpu;
905
906 for_each_possible_cpu(cpu)
907 set_bit(0, &per_cpu(tick_cpu_sched, cpu).check_clocks);
908}
909
910/*
911 * Async notification about clock event changes
912 */
913void tick_oneshot_notify(void)
914{
915 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
916
917 set_bit(0, &ts->check_clocks);
918}
919
920/**
921 * Check, if a change happened, which makes oneshot possible.
922 *
923 * Called cyclic from the hrtimer softirq (driven by the timer
924 * softirq) allow_nohz signals, that we can switch into low-res nohz
925 * mode, because high resolution timers are disabled (either compile
926 * or runtime).
927 */
928int tick_check_oneshot_change(int allow_nohz)
929{
930 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
931
932 if (!test_and_clear_bit(0, &ts->check_clocks))
933 return 0;
934
935 if (ts->nohz_mode != NOHZ_MODE_INACTIVE)
936 return 0;
937
Li Zefancf4fc6c2008-02-08 04:19:24 -0800938 if (!timekeeping_valid_for_hres() || !tick_is_oneshot_available())
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800939 return 0;
940
941 if (!allow_nohz)
942 return 1;
943
944 tick_nohz_switch_to_nohz();
945 return 0;
946}