blob: 54c3203839342f7d4ead28f3051692979ef629b4 [file] [log] [blame]
Thomas Gleixnera1a04ec2013-03-21 22:49:34 +01001/*
2 * Generic entry point for the idle threads
3 */
4#include <linux/sched.h>
5#include <linux/cpu.h>
Thomas Gleixnerd1669912013-03-21 22:49:35 +01006#include <linux/tick.h>
7#include <linux/mm.h>
Thomas Gleixnera1a04ec2013-03-21 22:49:34 +01008
Thomas Gleixnerd1669912013-03-21 22:49:35 +01009#include <asm/tlb.h>
10
11#include <trace/events/power.h>
12
13#ifndef CONFIG_GENERIC_IDLE_LOOP
Thomas Gleixnera1a04ec2013-03-21 22:49:34 +010014void cpu_startup_entry(enum cpuhp_state state)
15{
16 cpu_idle();
17}
Thomas Gleixnerd1669912013-03-21 22:49:35 +010018#else
19
20static int __read_mostly cpu_idle_force_poll;
21
22void cpu_idle_poll_ctrl(bool enable)
23{
24 if (enable) {
25 cpu_idle_force_poll++;
26 } else {
27 cpu_idle_force_poll--;
28 WARN_ON_ONCE(cpu_idle_force_poll < 0);
29 }
30}
31
32#ifdef CONFIG_GENERIC_IDLE_POLL_SETUP
33static int __init cpu_idle_poll_setup(char *__unused)
34{
35 cpu_idle_force_poll = 1;
36 return 1;
37}
38__setup("nohlt", cpu_idle_poll_setup);
39
40static int __init cpu_idle_nopoll_setup(char *__unused)
41{
42 cpu_idle_force_poll = 0;
43 return 1;
44}
45__setup("hlt", cpu_idle_nopoll_setup);
46#endif
47
48static inline int cpu_idle_poll(void)
49{
50 trace_cpu_idle_rcuidle(0, smp_processor_id());
51 local_irq_enable();
52 while (!need_resched())
53 cpu_relax();
54 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
55 return 1;
56}
57
58/* Weak implementations for optional arch specific functions */
59void __weak arch_cpu_idle_prepare(void) { }
60void __weak arch_cpu_idle_enter(void) { }
61void __weak arch_cpu_idle_exit(void) { }
62void __weak arch_cpu_idle_dead(void) { }
63void __weak arch_cpu_idle(void)
64{
65 cpu_idle_force_poll = 1;
66}
67
68/*
69 * Generic idle loop implementation
70 */
71static void cpu_idle_loop(void)
72{
73 while (1) {
74 tick_nohz_idle_enter();
75
76 while (!need_resched()) {
77 check_pgt_cache();
78 rmb();
79
80 if (cpu_is_offline(smp_processor_id()))
81 arch_cpu_idle_dead();
82
83 local_irq_disable();
84 arch_cpu_idle_enter();
85
86 if (cpu_idle_force_poll) {
87 cpu_idle_poll();
88 } else {
89 current_clr_polling();
90 if (!need_resched()) {
91 stop_critical_timings();
92 rcu_idle_enter();
93 arch_cpu_idle();
94 WARN_ON_ONCE(irqs_disabled());
95 rcu_idle_exit();
96 start_critical_timings();
97 } else {
98 local_irq_enable();
99 }
100 current_set_polling();
101 }
102 arch_cpu_idle_exit();
103 }
104 tick_nohz_idle_exit();
105 schedule_preempt_disabled();
106 }
107}
108
109void cpu_startup_entry(enum cpuhp_state state)
110{
111 current_set_polling();
112 arch_cpu_idle_prepare();
113 cpu_idle_loop();
114}
115#endif