blob: ba3c1e81f4ad6e1cc52d4d0cd23eb357df15b0fe [file] [log] [blame]
Asish Bhattacharyab1aeae22012-02-15 08:29:28 +05301/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12#include <linux/bitops.h>
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/sched.h>
16#include <linux/irq.h>
17#include <linux/mfd/core.h>
18#include <linux/mfd/wcd9xxx/core.h>
19#include <linux/mfd/wcd9xxx/wcd9xxx_registers.h>
20#include <linux/mfd/wcd9xxx/wcd9310_registers.h>
21#include <linux/interrupt.h>
22
23#define BYTE_BIT_MASK(nr) (1UL << ((nr) % BITS_PER_BYTE))
24#define BIT_BYTE(nr) ((nr) / BITS_PER_BYTE)
25
26struct wcd9xxx_irq {
27 bool level;
28};
29
30static struct wcd9xxx_irq wcd9xxx_irqs[TABLA_NUM_IRQS] = {
31 [0] = { .level = 1},
32/* All other wcd9xxx interrupts are edge triggered */
33};
34
35static inline int irq_to_wcd9xxx_irq(struct wcd9xxx *wcd9xxx, int irq)
36{
37 return irq - wcd9xxx->irq_base;
38}
39
40static void wcd9xxx_irq_lock(struct irq_data *data)
41{
42 struct wcd9xxx *wcd9xxx = irq_data_get_irq_chip_data(data);
43 mutex_lock(&wcd9xxx->irq_lock);
44}
45
46static void wcd9xxx_irq_sync_unlock(struct irq_data *data)
47{
48 struct wcd9xxx *wcd9xxx = irq_data_get_irq_chip_data(data);
49 int i;
50
51 for (i = 0; i < ARRAY_SIZE(wcd9xxx->irq_masks_cur); i++) {
52 /* If there's been a change in the mask write it back
53 * to the hardware.
54 */
55 if (wcd9xxx->irq_masks_cur[i] != wcd9xxx->irq_masks_cache[i]) {
56 wcd9xxx->irq_masks_cache[i] = wcd9xxx->irq_masks_cur[i];
57 wcd9xxx_reg_write(wcd9xxx, TABLA_A_INTR_MASK0+i,
58 wcd9xxx->irq_masks_cur[i]);
59 }
60 }
61
62 mutex_unlock(&wcd9xxx->irq_lock);
63}
64
65static void wcd9xxx_irq_enable(struct irq_data *data)
66{
67 struct wcd9xxx *wcd9xxx = irq_data_get_irq_chip_data(data);
68 int wcd9xxx_irq = irq_to_wcd9xxx_irq(wcd9xxx, data->irq);
69 wcd9xxx->irq_masks_cur[BIT_BYTE(wcd9xxx_irq)] &=
70 ~(BYTE_BIT_MASK(wcd9xxx_irq));
71}
72
73static void wcd9xxx_irq_disable(struct irq_data *data)
74{
75 struct wcd9xxx *wcd9xxx = irq_data_get_irq_chip_data(data);
76 int wcd9xxx_irq = irq_to_wcd9xxx_irq(wcd9xxx, data->irq);
77 wcd9xxx->irq_masks_cur[BIT_BYTE(wcd9xxx_irq)]
78 |= BYTE_BIT_MASK(wcd9xxx_irq);
79}
80
81static struct irq_chip wcd9xxx_irq_chip = {
82 .name = "wcd9xxx",
83 .irq_bus_lock = wcd9xxx_irq_lock,
84 .irq_bus_sync_unlock = wcd9xxx_irq_sync_unlock,
85 .irq_disable = wcd9xxx_irq_disable,
86 .irq_enable = wcd9xxx_irq_enable,
87};
88
89enum wcd9xxx_pm_state wcd9xxx_pm_cmpxchg(struct wcd9xxx *wcd9xxx,
90 enum wcd9xxx_pm_state o,
91 enum wcd9xxx_pm_state n)
92{
93 enum wcd9xxx_pm_state old;
94 mutex_lock(&wcd9xxx->pm_lock);
95 old = wcd9xxx->pm_state;
96 if (old == o)
97 wcd9xxx->pm_state = n;
98 mutex_unlock(&wcd9xxx->pm_lock);
99 return old;
100}
101EXPORT_SYMBOL_GPL(wcd9xxx_pm_cmpxchg);
102
Joonwoo Parkd7cf2e92012-03-19 19:38:23 -0700103bool wcd9xxx_lock_sleep(struct wcd9xxx *wcd9xxx)
Asish Bhattacharyab1aeae22012-02-15 08:29:28 +0530104{
105 enum wcd9xxx_pm_state os;
106
107 /* wcd9xxx_{lock/unlock}_sleep will be called by wcd9xxx_irq_thread
108 * and its subroutines only motly.
109 * but btn0_lpress_fn is not wcd9xxx_irq_thread's subroutine and
110 * it can race with wcd9xxx_irq_thread.
111 * so need to embrace wlock_holders with mutex.
112 */
113 mutex_lock(&wcd9xxx->pm_lock);
Joonwoo Parkd7cf2e92012-03-19 19:38:23 -0700114 if (wcd9xxx->wlock_holders++ == 0) {
115 pr_debug("%s: holding wake lock\n", __func__);
Asish Bhattacharyab1aeae22012-02-15 08:29:28 +0530116 wake_lock(&wcd9xxx->wlock);
Joonwoo Parkd7cf2e92012-03-19 19:38:23 -0700117 }
Asish Bhattacharyab1aeae22012-02-15 08:29:28 +0530118 mutex_unlock(&wcd9xxx->pm_lock);
Joonwoo Parkd7cf2e92012-03-19 19:38:23 -0700119 if (!wait_event_timeout(wcd9xxx->pm_wq,
Asish Bhattacharyab1aeae22012-02-15 08:29:28 +0530120 ((os = wcd9xxx_pm_cmpxchg(wcd9xxx, WCD9XXX_PM_SLEEPABLE,
121 WCD9XXX_PM_AWAKE)) ==
122 WCD9XXX_PM_SLEEPABLE ||
123 (os == WCD9XXX_PM_AWAKE)),
124 5 * HZ)) {
125 pr_err("%s: system didn't resume within 5000ms, state %d, "
126 "wlock %d\n", __func__, wcd9xxx->pm_state,
127 wcd9xxx->wlock_holders);
Joonwoo Parkd7cf2e92012-03-19 19:38:23 -0700128 WARN_ON(1);
129 wcd9xxx_unlock_sleep(wcd9xxx);
130 return false;
Asish Bhattacharyab1aeae22012-02-15 08:29:28 +0530131 }
132 wake_up_all(&wcd9xxx->pm_wq);
Joonwoo Parkd7cf2e92012-03-19 19:38:23 -0700133 return true;
Asish Bhattacharyab1aeae22012-02-15 08:29:28 +0530134}
135EXPORT_SYMBOL_GPL(wcd9xxx_lock_sleep);
136
137void wcd9xxx_unlock_sleep(struct wcd9xxx *wcd9xxx)
138{
139 mutex_lock(&wcd9xxx->pm_lock);
140 if (--wcd9xxx->wlock_holders == 0) {
141 wcd9xxx->pm_state = WCD9XXX_PM_SLEEPABLE;
Joonwoo Parkd7cf2e92012-03-19 19:38:23 -0700142 pr_debug("%s: releasing wake lock\n", __func__);
Asish Bhattacharyab1aeae22012-02-15 08:29:28 +0530143 wake_unlock(&wcd9xxx->wlock);
144 }
145 mutex_unlock(&wcd9xxx->pm_lock);
146 wake_up_all(&wcd9xxx->pm_wq);
147}
148EXPORT_SYMBOL_GPL(wcd9xxx_unlock_sleep);
149
Joonwoo Park03324832012-03-19 19:36:16 -0700150static void wcd9xxx_irq_dispatch(struct wcd9xxx *wcd9xxx, int irqbit)
151{
152 if ((irqbit <= TABLA_IRQ_MBHC_INSERTION) &&
153 (irqbit >= TABLA_IRQ_MBHC_REMOVAL)) {
154 wcd9xxx_reg_write(wcd9xxx, TABLA_A_INTR_CLEAR0 +
155 BIT_BYTE(irqbit), BYTE_BIT_MASK(irqbit));
156 if (wcd9xxx_get_intf_type() == WCD9XXX_INTERFACE_TYPE_I2C)
157 wcd9xxx_reg_write(wcd9xxx, TABLA_A_INTR_MODE, 0x02);
158 handle_nested_irq(wcd9xxx->irq_base + irqbit);
159 } else {
160 handle_nested_irq(wcd9xxx->irq_base + irqbit);
161 wcd9xxx_reg_write(wcd9xxx, TABLA_A_INTR_CLEAR0 +
162 BIT_BYTE(irqbit), BYTE_BIT_MASK(irqbit));
163 if (wcd9xxx_get_intf_type() == WCD9XXX_INTERFACE_TYPE_I2C)
164 wcd9xxx_reg_write(wcd9xxx, TABLA_A_INTR_MODE, 0x02);
165 }
166}
167
Asish Bhattacharyab1aeae22012-02-15 08:29:28 +0530168static irqreturn_t wcd9xxx_irq_thread(int irq, void *data)
169{
170 int ret;
171 struct wcd9xxx *wcd9xxx = data;
172 u8 status[WCD9XXX_NUM_IRQ_REGS];
Joonwoo Park03324832012-03-19 19:36:16 -0700173 int i;
Asish Bhattacharyab1aeae22012-02-15 08:29:28 +0530174
Joonwoo Parkd7cf2e92012-03-19 19:38:23 -0700175 if (unlikely(wcd9xxx_lock_sleep(wcd9xxx) == false)) {
176 dev_err(wcd9xxx->dev, "Failed to hold suspend\n");
177 return IRQ_NONE;
178 }
Asish Bhattacharyab1aeae22012-02-15 08:29:28 +0530179 ret = wcd9xxx_bulk_read(wcd9xxx, TABLA_A_INTR_STATUS0,
180 WCD9XXX_NUM_IRQ_REGS, status);
181 if (ret < 0) {
182 dev_err(wcd9xxx->dev, "Failed to read interrupt status: %d\n",
183 ret);
184 wcd9xxx_unlock_sleep(wcd9xxx);
185 return IRQ_NONE;
186 }
187 /* Apply masking */
188 for (i = 0; i < WCD9XXX_NUM_IRQ_REGS; i++)
189 status[i] &= ~wcd9xxx->irq_masks_cur[i];
190
191 /* Find out which interrupt was triggered and call that interrupt's
192 * handler function
193 */
Joonwoo Park03324832012-03-19 19:36:16 -0700194 if (status[BIT_BYTE(TABLA_IRQ_SLIMBUS)] &
195 BYTE_BIT_MASK(TABLA_IRQ_SLIMBUS))
196 wcd9xxx_irq_dispatch(wcd9xxx, TABLA_IRQ_SLIMBUS);
197
198 /* Since codec has only one hardware irq line which is shared by
199 * codec's different internal interrupts, so it's possible master irq
200 * handler dispatches multiple nested irq handlers after breaking
201 * order. Dispatch MBHC interrupts order to follow MBHC state
202 * machine's order */
203 for (i = TABLA_IRQ_MBHC_INSERTION; i >= TABLA_IRQ_MBHC_REMOVAL; i--) {
204 if (status[BIT_BYTE(i)] & BYTE_BIT_MASK(i))
205 wcd9xxx_irq_dispatch(wcd9xxx, i);
206 }
207 for (i = TABLA_IRQ_BG_PRECHARGE; i < TABLA_NUM_IRQS; i++) {
208 if (status[BIT_BYTE(i)] & BYTE_BIT_MASK(i))
209 wcd9xxx_irq_dispatch(wcd9xxx, i);
Asish Bhattacharyab1aeae22012-02-15 08:29:28 +0530210 }
211 wcd9xxx_unlock_sleep(wcd9xxx);
212
213 return IRQ_HANDLED;
214}
215
216int wcd9xxx_irq_init(struct wcd9xxx *wcd9xxx)
217{
218 int ret;
219 unsigned int i, cur_irq;
220
221 mutex_init(&wcd9xxx->irq_lock);
222
223 if (!wcd9xxx->irq) {
224 dev_warn(wcd9xxx->dev,
225 "No interrupt specified, no interrupts\n");
226 wcd9xxx->irq_base = 0;
227 return 0;
228 }
229
230 if (!wcd9xxx->irq_base) {
231 dev_err(wcd9xxx->dev,
232 "No interrupt base specified, no interrupts\n");
233 return 0;
234 }
235 /* Mask the individual interrupt sources */
236 for (i = 0, cur_irq = wcd9xxx->irq_base; i < TABLA_NUM_IRQS; i++,
237 cur_irq++) {
238
239 irq_set_chip_data(cur_irq, wcd9xxx);
240
241 if (wcd9xxx_irqs[i].level)
242 irq_set_chip_and_handler(cur_irq, &wcd9xxx_irq_chip,
243 handle_level_irq);
244 else
245 irq_set_chip_and_handler(cur_irq, &wcd9xxx_irq_chip,
246 handle_edge_irq);
247
248 irq_set_nested_thread(cur_irq, 1);
249
250 /* ARM needs us to explicitly flag the IRQ as valid
251 * and will set them noprobe when we do so. */
252#ifdef CONFIG_ARM
253 set_irq_flags(cur_irq, IRQF_VALID);
254#else
255 set_irq_noprobe(cur_irq);
256#endif
257
258 wcd9xxx->irq_masks_cur[BIT_BYTE(i)] |= BYTE_BIT_MASK(i);
259 wcd9xxx->irq_masks_cache[BIT_BYTE(i)] |= BYTE_BIT_MASK(i);
260 wcd9xxx->irq_level[BIT_BYTE(i)] |= wcd9xxx_irqs[i].level <<
261 (i % BITS_PER_BYTE);
262 }
263 for (i = 0; i < WCD9XXX_NUM_IRQ_REGS; i++) {
264 /* Initialize interrupt mask and level registers */
265 wcd9xxx_reg_write(wcd9xxx, TABLA_A_INTR_LEVEL0 + i,
266 wcd9xxx->irq_level[i]);
267 wcd9xxx_reg_write(wcd9xxx, TABLA_A_INTR_MASK0 + i,
268 wcd9xxx->irq_masks_cur[i]);
269 }
270
271 ret = request_threaded_irq(wcd9xxx->irq, NULL, wcd9xxx_irq_thread,
272 IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
273 "wcd9xxx", wcd9xxx);
274 if (ret != 0)
275 dev_err(wcd9xxx->dev, "Failed to request IRQ %d: %d\n",
276 wcd9xxx->irq, ret);
277 else {
278 ret = enable_irq_wake(wcd9xxx->irq);
279 if (ret == 0) {
280 ret = device_init_wakeup(wcd9xxx->dev, 1);
281 if (ret) {
282 dev_err(wcd9xxx->dev, "Failed to init device"
283 "wakeup : %d\n", ret);
284 disable_irq_wake(wcd9xxx->irq);
285 }
286 } else
287 dev_err(wcd9xxx->dev, "Failed to set wake interrupt on"
288 " IRQ %d: %d\n", wcd9xxx->irq, ret);
289 if (ret)
290 free_irq(wcd9xxx->irq, wcd9xxx);
291 }
292
293 if (ret)
294 mutex_destroy(&wcd9xxx->irq_lock);
295
296 return ret;
297}
298
299void wcd9xxx_irq_exit(struct wcd9xxx *wcd9xxx)
300{
301 if (wcd9xxx->irq) {
302 disable_irq_wake(wcd9xxx->irq);
303 free_irq(wcd9xxx->irq, wcd9xxx);
304 device_init_wakeup(wcd9xxx->dev, 0);
305 }
306 mutex_destroy(&wcd9xxx->irq_lock);
307}