| Paul Mundt | 1da1180 | 2008-11-26 15:52:44 +0900 | [diff] [blame] | 1 | /* | 
|  | 2 | * The idle loop for all SuperH platforms. | 
|  | 3 | * | 
| Paul Mundt | 2e046b9 | 2009-06-19 14:40:51 +0900 | [diff] [blame] | 4 | *  Copyright (C) 2002 - 2009  Paul Mundt | 
| Paul Mundt | 1da1180 | 2008-11-26 15:52:44 +0900 | [diff] [blame] | 5 | * | 
|  | 6 | * This file is subject to the terms and conditions of the GNU General Public | 
|  | 7 | * License.  See the file "COPYING" in the main directory of this archive | 
|  | 8 | * for more details. | 
|  | 9 | */ | 
|  | 10 | #include <linux/module.h> | 
|  | 11 | #include <linux/init.h> | 
|  | 12 | #include <linux/mm.h> | 
|  | 13 | #include <linux/pm.h> | 
|  | 14 | #include <linux/tick.h> | 
|  | 15 | #include <linux/preempt.h> | 
|  | 16 | #include <linux/thread_info.h> | 
|  | 17 | #include <linux/irqflags.h> | 
| Paul Mundt | 2e046b9 | 2009-06-19 14:40:51 +0900 | [diff] [blame] | 18 | #include <linux/smp.h> | 
| Paul Mundt | 1da1180 | 2008-11-26 15:52:44 +0900 | [diff] [blame] | 19 | #include <asm/pgalloc.h> | 
|  | 20 | #include <asm/system.h> | 
|  | 21 | #include <asm/atomic.h> | 
| Paul Mundt | 763142d | 2010-04-26 19:08:55 +0900 | [diff] [blame] | 22 | #include <asm/smp.h> | 
| Paul Mundt | 1da1180 | 2008-11-26 15:52:44 +0900 | [diff] [blame] | 23 |  | 
| Paul Mundt | f533c3d | 2009-10-16 17:20:58 +0900 | [diff] [blame] | 24 | void (*pm_idle)(void) = NULL; | 
| Paul Mundt | fbb82b0 | 2010-01-20 16:42:52 +0900 | [diff] [blame] | 25 |  | 
|  | 26 | static int hlt_counter; | 
| Paul Mundt | 1da1180 | 2008-11-26 15:52:44 +0900 | [diff] [blame] | 27 |  | 
|  | 28 | static int __init nohlt_setup(char *__unused) | 
|  | 29 | { | 
|  | 30 | hlt_counter = 1; | 
|  | 31 | return 1; | 
|  | 32 | } | 
|  | 33 | __setup("nohlt", nohlt_setup); | 
|  | 34 |  | 
|  | 35 | static int __init hlt_setup(char *__unused) | 
|  | 36 | { | 
|  | 37 | hlt_counter = 0; | 
|  | 38 | return 1; | 
|  | 39 | } | 
|  | 40 | __setup("hlt", hlt_setup); | 
|  | 41 |  | 
| Paul Mundt | f533c3d | 2009-10-16 17:20:58 +0900 | [diff] [blame] | 42 | static inline int hlt_works(void) | 
| Paul Mundt | 1da1180 | 2008-11-26 15:52:44 +0900 | [diff] [blame] | 43 | { | 
| Paul Mundt | f533c3d | 2009-10-16 17:20:58 +0900 | [diff] [blame] | 44 | return !hlt_counter; | 
| Paul Mundt | 1da1180 | 2008-11-26 15:52:44 +0900 | [diff] [blame] | 45 | } | 
|  | 46 |  | 
| Paul Mundt | f533c3d | 2009-10-16 17:20:58 +0900 | [diff] [blame] | 47 | /* | 
|  | 48 | * On SMP it's slightly faster (but much more power-consuming!) | 
|  | 49 | * to poll the ->work.need_resched flag instead of waiting for the | 
|  | 50 | * cross-CPU IPI to arrive. Use this option with caution. | 
|  | 51 | */ | 
|  | 52 | static void poll_idle(void) | 
|  | 53 | { | 
|  | 54 | local_irq_enable(); | 
|  | 55 | while (!need_resched()) | 
|  | 56 | cpu_relax(); | 
|  | 57 | } | 
|  | 58 |  | 
|  | 59 | void default_idle(void) | 
|  | 60 | { | 
|  | 61 | if (hlt_works()) { | 
|  | 62 | clear_thread_flag(TIF_POLLING_NRFLAG); | 
|  | 63 | smp_mb__after_clear_bit(); | 
|  | 64 |  | 
| Paul Mundt | 73a38b8 | 2009-12-18 14:40:56 +0900 | [diff] [blame] | 65 | set_bl_bit(); | 
| Paul Mundt | f533c3d | 2009-10-16 17:20:58 +0900 | [diff] [blame] | 66 | if (!need_resched()) { | 
|  | 67 | local_irq_enable(); | 
|  | 68 | cpu_sleep(); | 
| Paul Mundt | 9dbe00a | 2009-10-16 17:55:59 +0900 | [diff] [blame] | 69 | } else | 
|  | 70 | local_irq_enable(); | 
| Paul Mundt | f533c3d | 2009-10-16 17:20:58 +0900 | [diff] [blame] | 71 |  | 
|  | 72 | set_thread_flag(TIF_POLLING_NRFLAG); | 
| Paul Mundt | 73a38b8 | 2009-12-18 14:40:56 +0900 | [diff] [blame] | 73 | clear_bl_bit(); | 
| Paul Mundt | f533c3d | 2009-10-16 17:20:58 +0900 | [diff] [blame] | 74 | } else | 
|  | 75 | poll_idle(); | 
|  | 76 | } | 
|  | 77 |  | 
|  | 78 | /* | 
|  | 79 | * The idle thread. There's no useful work to be done, so just try to conserve | 
|  | 80 | * power and have a low exit latency (ie sit in a loop waiting for somebody to | 
|  | 81 | * say that they'd like to reschedule) | 
|  | 82 | */ | 
| Paul Mundt | 1da1180 | 2008-11-26 15:52:44 +0900 | [diff] [blame] | 83 | void cpu_idle(void) | 
|  | 84 | { | 
| Paul Mundt | f533c3d | 2009-10-16 17:20:58 +0900 | [diff] [blame] | 85 | unsigned int cpu = smp_processor_id(); | 
|  | 86 |  | 
| Paul Mundt | 1da1180 | 2008-11-26 15:52:44 +0900 | [diff] [blame] | 87 | set_thread_flag(TIF_POLLING_NRFLAG); | 
|  | 88 |  | 
|  | 89 | /* endless idle loop with no priority at all */ | 
|  | 90 | while (1) { | 
| Paul Mundt | 1da1180 | 2008-11-26 15:52:44 +0900 | [diff] [blame] | 91 | tick_nohz_stop_sched_tick(1); | 
| Paul Mundt | 1da1180 | 2008-11-26 15:52:44 +0900 | [diff] [blame] | 92 |  | 
| Paul Mundt | 763142d | 2010-04-26 19:08:55 +0900 | [diff] [blame] | 93 | while (!need_resched()) { | 
| Paul Mundt | 0e6d498 | 2009-10-16 17:27:58 +0900 | [diff] [blame] | 94 | check_pgt_cache(); | 
|  | 95 | rmb(); | 
|  | 96 |  | 
| Paul Mundt | 763142d | 2010-04-26 19:08:55 +0900 | [diff] [blame] | 97 | if (cpu_is_offline(cpu)) | 
|  | 98 | play_dead(); | 
|  | 99 |  | 
| Paul Mundt | f533c3d | 2009-10-16 17:20:58 +0900 | [diff] [blame] | 100 | local_irq_disable(); | 
|  | 101 | /* Don't trace irqs off for idle */ | 
|  | 102 | stop_critical_timings(); | 
|  | 103 | pm_idle(); | 
|  | 104 | /* | 
|  | 105 | * Sanity check to ensure that pm_idle() returns | 
|  | 106 | * with IRQs enabled | 
|  | 107 | */ | 
|  | 108 | WARN_ON(irqs_disabled()); | 
|  | 109 | start_critical_timings(); | 
|  | 110 | } | 
|  | 111 |  | 
|  | 112 | tick_nohz_restart_sched_tick(); | 
| Paul Mundt | 1da1180 | 2008-11-26 15:52:44 +0900 | [diff] [blame] | 113 | preempt_enable_no_resched(); | 
|  | 114 | schedule(); | 
|  | 115 | preempt_disable(); | 
| Paul Mundt | 1da1180 | 2008-11-26 15:52:44 +0900 | [diff] [blame] | 116 | } | 
|  | 117 | } | 
| Paul Mundt | 2e046b9 | 2009-06-19 14:40:51 +0900 | [diff] [blame] | 118 |  | 
| Paul Mundt | 90851c4 | 2010-03-23 17:06:47 +0900 | [diff] [blame] | 119 | void __init select_idle_routine(void) | 
| Paul Mundt | f533c3d | 2009-10-16 17:20:58 +0900 | [diff] [blame] | 120 | { | 
|  | 121 | /* | 
|  | 122 | * If a platform has set its own idle routine, leave it alone. | 
|  | 123 | */ | 
|  | 124 | if (pm_idle) | 
|  | 125 | return; | 
|  | 126 |  | 
|  | 127 | if (hlt_works()) | 
|  | 128 | pm_idle = default_idle; | 
|  | 129 | else | 
|  | 130 | pm_idle = poll_idle; | 
|  | 131 | } | 
|  | 132 |  | 
| Paul Mundt | 2e046b9 | 2009-06-19 14:40:51 +0900 | [diff] [blame] | 133 | static void do_nothing(void *unused) | 
|  | 134 | { | 
|  | 135 | } | 
|  | 136 |  | 
| Paul Mundt | fbb82b0 | 2010-01-20 16:42:52 +0900 | [diff] [blame] | 137 | void stop_this_cpu(void *unused) | 
|  | 138 | { | 
|  | 139 | local_irq_disable(); | 
| Paul Mundt | f0ccf27 | 2010-04-26 18:39:50 +0900 | [diff] [blame] | 140 | set_cpu_online(smp_processor_id(), false); | 
| Paul Mundt | fbb82b0 | 2010-01-20 16:42:52 +0900 | [diff] [blame] | 141 |  | 
|  | 142 | for (;;) | 
|  | 143 | cpu_sleep(); | 
|  | 144 | } | 
|  | 145 |  | 
| Paul Mundt | 2e046b9 | 2009-06-19 14:40:51 +0900 | [diff] [blame] | 146 | /* | 
|  | 147 | * cpu_idle_wait - Used to ensure that all the CPUs discard old value of | 
|  | 148 | * pm_idle and update to new pm_idle value. Required while changing pm_idle | 
|  | 149 | * handler on SMP systems. | 
|  | 150 | * | 
|  | 151 | * Caller must have changed pm_idle to the new value before the call. Old | 
|  | 152 | * pm_idle value will not be used by any CPU after the return of this function. | 
|  | 153 | */ | 
|  | 154 | void cpu_idle_wait(void) | 
|  | 155 | { | 
|  | 156 | smp_mb(); | 
|  | 157 | /* kick all the CPUs so that they exit out of pm_idle */ | 
|  | 158 | smp_call_function(do_nothing, NULL, 1); | 
|  | 159 | } | 
|  | 160 | EXPORT_SYMBOL_GPL(cpu_idle_wait); |