blob: 6d0c4b657a215fb7731fefbd9bece7346f1f57e1 [file] [log] [blame]
Daniel Lezcano3ebabaa2012-04-19 14:46:32 +02001/*
2 * Copyright (c) 2012 Linaro : Daniel Lezcano <daniel.lezcano@linaro.org> (IBM)
3 *
4 * Based on the work of Rickard Andersson <rickard.andersson@stericsson.com>
5 * and Jonas Aaberg <jonas.aberg@stericsson.com>.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/module.h>
13#include <linux/cpuidle.h>
14#include <linux/clockchips.h>
15#include <linux/spinlock.h>
16#include <linux/atomic.h>
17#include <linux/smp.h>
18#include <linux/mfd/dbx500-prcmu.h>
19
20#include <asm/cpuidle.h>
21#include <asm/proc-fns.h>
22
23static atomic_t master = ATOMIC_INIT(0);
24static DEFINE_SPINLOCK(master_lock);
25static DEFINE_PER_CPU(struct cpuidle_device, ux500_cpuidle_device);
26
27static inline int ux500_enter_idle(struct cpuidle_device *dev,
28 struct cpuidle_driver *drv, int index)
29{
30 int this_cpu = smp_processor_id();
31 bool recouple = false;
32
Daniel Lezcano3ebabaa2012-04-19 14:46:32 +020033 if (atomic_inc_return(&master) == num_online_cpus()) {
34
35 /* With this lock, we prevent the other cpu to exit and enter
36 * this function again and become the master */
37 if (!spin_trylock(&master_lock))
38 goto wfi;
39
40 /* decouple the gic from the A9 cores */
Steve Zhan5cc23662013-01-23 11:24:47 +010041 if (prcmu_gic_decouple()) {
42 spin_unlock(&master_lock);
Daniel Lezcano3ebabaa2012-04-19 14:46:32 +020043 goto out;
Steve Zhan5cc23662013-01-23 11:24:47 +010044 }
Daniel Lezcano3ebabaa2012-04-19 14:46:32 +020045
46 /* If an error occur, we will have to recouple the gic
47 * manually */
48 recouple = true;
49
50 /* At this state, as the gic is decoupled, if the other
51 * cpu is in WFI, we have the guarantee it won't be wake
52 * up, so we can safely go to retention */
53 if (!prcmu_is_cpu_in_wfi(this_cpu ? 0 : 1))
54 goto out;
55
56 /* The prcmu will be in charge of watching the interrupts
57 * and wake up the cpus */
58 if (prcmu_copy_gic_settings())
59 goto out;
60
61 /* Check in the meantime an interrupt did
62 * not occur on the gic ... */
63 if (prcmu_gic_pending_irq())
64 goto out;
65
66 /* ... and the prcmu */
67 if (prcmu_pending_irq())
68 goto out;
69
70 /* Go to the retention state, the prcmu will wait for the
71 * cpu to go WFI and this is what happens after exiting this
72 * 'master' critical section */
73 if (prcmu_set_power_state(PRCMU_AP_IDLE, true, true))
74 goto out;
75
76 /* When we switch to retention, the prcmu is in charge
77 * of recoupling the gic automatically */
78 recouple = false;
79
80 spin_unlock(&master_lock);
81 }
82wfi:
83 cpu_do_idle();
84out:
85 atomic_dec(&master);
86
87 if (recouple) {
88 prcmu_gic_recouple();
89 spin_unlock(&master_lock);
90 }
91
Daniel Lezcano3ebabaa2012-04-19 14:46:32 +020092 return index;
93}
94
95static struct cpuidle_driver ux500_idle_driver = {
96 .name = "ux500_idle",
97 .owner = THIS_MODULE,
98 .en_core_tk_irqen = 1,
99 .states = {
100 ARM_CPUIDLE_WFI_STATE,
101 {
102 .enter = ux500_enter_idle,
103 .exit_latency = 70,
104 .target_residency = 260,
Daniel Lezcanod2b578e2013-03-21 12:21:34 +0000105 .flags = CPUIDLE_FLAG_TIME_VALID |
106 CPUIDLE_FLAG_TIMER_STOP,
Daniel Lezcano3ebabaa2012-04-19 14:46:32 +0200107 .name = "ApIdle",
108 .desc = "ARM Retention",
109 },
110 },
111 .safe_state_index = 0,
112 .state_count = 2,
113};
114
115/*
116 * For each cpu, setup the broadcast timer because we will
117 * need to migrate the timers for the states >= ApIdle.
118 */
119static void ux500_setup_broadcast_timer(void *arg)
120{
121 int cpu = smp_processor_id();
122 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ON, &cpu);
123}
124
125int __init ux500_idle_init(void)
126{
127 int ret, cpu;
128 struct cpuidle_device *device;
129
130 /* Configure wake up reasons */
131 prcmu_enable_wakeups(PRCMU_WAKEUP(ARM) | PRCMU_WAKEUP(RTC) |
132 PRCMU_WAKEUP(ABB));
133
134 /*
135 * Configure the timer broadcast for each cpu, that must
136 * be done from the cpu context, so we use a smp cross
137 * call with 'on_each_cpu'.
138 */
139 on_each_cpu(ux500_setup_broadcast_timer, NULL, 1);
140
141 ret = cpuidle_register_driver(&ux500_idle_driver);
142 if (ret) {
143 printk(KERN_ERR "failed to register ux500 idle driver\n");
144 return ret;
145 }
146
147 for_each_online_cpu(cpu) {
148 device = &per_cpu(ux500_cpuidle_device, cpu);
149 device->cpu = cpu;
150 ret = cpuidle_register_device(device);
151 if (ret) {
152 printk(KERN_ERR "Failed to register cpuidle "
153 "device for cpu%d\n", cpu);
154 goto out_unregister;
155 }
156 }
157out:
158 return ret;
159
160out_unregister:
161 for_each_online_cpu(cpu) {
162 device = &per_cpu(ux500_cpuidle_device, cpu);
163 cpuidle_unregister_device(device);
164 }
165
166 cpuidle_unregister_driver(&ux500_idle_driver);
167 goto out;
168}
169
170device_initcall(ux500_idle_init);