blob: 800c3767feb426ac31703e6f9b369ff8b8b74a91 [file] [log] [blame]
Sathish Ambleyd2ad0fa2012-03-23 11:23:47 -07001/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/bitmap.h>
14#include <linux/bitops.h>
15#include <linux/gpio.h>
16#include <linux/init.h>
17#include <linux/interrupt.h>
18#include <linux/irq.h>
19#include <linux/io.h>
20#include <linux/module.h>
21#include <linux/spinlock.h>
22#include <linux/syscore_ops.h>
23#include <linux/irqdomain.h>
24#include <linux/of.h>
25#include <linux/err.h>
Rohit Vaswanib1cc4932012-07-23 21:30:11 -070026#include <linux/platform_device.h>
Sathish Ambleyd2ad0fa2012-03-23 11:23:47 -070027
28#include <asm/mach/irq.h>
29
30#include <mach/msm_iomap.h>
31#include <mach/gpiomux.h>
32#include <mach/mpm.h>
33#include "gpio-msm-common.h"
34
Sujit Reddy Thumma39306c22012-06-26 15:39:26 +053035#ifdef CONFIG_GPIO_MSM_V3
36enum msm_tlmm_register {
37 SDC4_HDRV_PULL_CTL = 0x0, /* NOT USED */
38 SDC3_HDRV_PULL_CTL = 0x0, /* NOT USED */
39 SDC2_HDRV_PULL_CTL = 0x2048,
40 SDC1_HDRV_PULL_CTL = 0x2044,
41};
42#else
Sathish Ambleyd2ad0fa2012-03-23 11:23:47 -070043enum msm_tlmm_register {
44 SDC4_HDRV_PULL_CTL = 0x20a0,
45 SDC3_HDRV_PULL_CTL = 0x20a4,
Sujit Reddy Thumma39306c22012-06-26 15:39:26 +053046 SDC2_HDRV_PULL_CTL = 0x0, /* NOT USED */
Sathish Ambleyd2ad0fa2012-03-23 11:23:47 -070047 SDC1_HDRV_PULL_CTL = 0x20a0,
48};
Sujit Reddy Thumma39306c22012-06-26 15:39:26 +053049#endif
Sathish Ambleyd2ad0fa2012-03-23 11:23:47 -070050
51struct tlmm_field_cfg {
52 enum msm_tlmm_register reg;
53 u8 off;
54};
55
56static const struct tlmm_field_cfg tlmm_hdrv_cfgs[] = {
57 {SDC4_HDRV_PULL_CTL, 6}, /* TLMM_HDRV_SDC4_CLK */
58 {SDC4_HDRV_PULL_CTL, 3}, /* TLMM_HDRV_SDC4_CMD */
59 {SDC4_HDRV_PULL_CTL, 0}, /* TLMM_HDRV_SDC4_DATA */
60 {SDC3_HDRV_PULL_CTL, 6}, /* TLMM_HDRV_SDC3_CLK */
61 {SDC3_HDRV_PULL_CTL, 3}, /* TLMM_HDRV_SDC3_CMD */
62 {SDC3_HDRV_PULL_CTL, 0}, /* TLMM_HDRV_SDC3_DATA */
Sujit Reddy Thumma39306c22012-06-26 15:39:26 +053063 {SDC2_HDRV_PULL_CTL, 6}, /* TLMM_HDRV_SDC2_CLK */
64 {SDC2_HDRV_PULL_CTL, 3}, /* TLMM_HDRV_SDC2_CMD */
65 {SDC2_HDRV_PULL_CTL, 0}, /* TLMM_HDRV_SDC2_DATA */
Sathish Ambleyd2ad0fa2012-03-23 11:23:47 -070066 {SDC1_HDRV_PULL_CTL, 6}, /* TLMM_HDRV_SDC1_CLK */
67 {SDC1_HDRV_PULL_CTL, 3}, /* TLMM_HDRV_SDC1_CMD */
68 {SDC1_HDRV_PULL_CTL, 0}, /* TLMM_HDRV_SDC1_DATA */
69};
70
71static const struct tlmm_field_cfg tlmm_pull_cfgs[] = {
Sujit Reddy Thumma39306c22012-06-26 15:39:26 +053072 {SDC4_HDRV_PULL_CTL, 14}, /* TLMM_PULL_SDC4_CLK */
Sathish Ambleyd2ad0fa2012-03-23 11:23:47 -070073 {SDC4_HDRV_PULL_CTL, 11}, /* TLMM_PULL_SDC4_CMD */
74 {SDC4_HDRV_PULL_CTL, 9}, /* TLMM_PULL_SDC4_DATA */
75 {SDC3_HDRV_PULL_CTL, 14}, /* TLMM_PULL_SDC3_CLK */
76 {SDC3_HDRV_PULL_CTL, 11}, /* TLMM_PULL_SDC3_CMD */
77 {SDC3_HDRV_PULL_CTL, 9}, /* TLMM_PULL_SDC3_DATA */
Sujit Reddy Thumma39306c22012-06-26 15:39:26 +053078 {SDC2_HDRV_PULL_CTL, 14}, /* TLMM_PULL_SDC2_CLK */
79 {SDC2_HDRV_PULL_CTL, 11}, /* TLMM_PULL_SDC2_CMD */
80 {SDC2_HDRV_PULL_CTL, 9}, /* TLMM_PULL_SDC2_DATA */
Sathish Ambleyd2ad0fa2012-03-23 11:23:47 -070081 {SDC1_HDRV_PULL_CTL, 13}, /* TLMM_PULL_SDC1_CLK */
82 {SDC1_HDRV_PULL_CTL, 11}, /* TLMM_PULL_SDC1_CMD */
83 {SDC1_HDRV_PULL_CTL, 9}, /* TLMM_PULL_SDC1_DATA */
84};
85
86/*
87 * Supported arch specific irq extension.
88 * Default make them NULL.
89 */
90struct irq_chip msm_gpio_irq_extn = {
91 .irq_eoi = NULL,
92 .irq_mask = NULL,
93 .irq_unmask = NULL,
94 .irq_retrigger = NULL,
95 .irq_set_type = NULL,
96 .irq_set_wake = NULL,
97 .irq_disable = NULL,
98};
99
100/**
101 * struct msm_gpio_dev: the MSM8660 SoC GPIO device structure
102 *
103 * @enabled_irqs: a bitmap used to optimize the summary-irq handler. By
104 * keeping track of which gpios are unmasked as irq sources, we avoid
105 * having to do __raw_readl calls on hundreds of iomapped registers each time
106 * the summary interrupt fires in order to locate the active interrupts.
107 *
108 * @wake_irqs: a bitmap for tracking which interrupt lines are enabled
109 * as wakeup sources. When the device is suspended, interrupts which are
110 * not wakeup sources are disabled.
111 *
112 * @dual_edge_irqs: a bitmap used to track which irqs are configured
113 * as dual-edge, as this is not supported by the hardware and requires
114 * some special handling in the driver.
115 */
116struct msm_gpio_dev {
117 struct gpio_chip gpio_chip;
118 DECLARE_BITMAP(enabled_irqs, NR_MSM_GPIOS);
119 DECLARE_BITMAP(wake_irqs, NR_MSM_GPIOS);
120 DECLARE_BITMAP(dual_edge_irqs, NR_MSM_GPIOS);
Michael Bohanbb6b30f2012-06-01 13:33:51 -0700121 struct irq_domain *domain;
Sathish Ambleyd2ad0fa2012-03-23 11:23:47 -0700122};
123
124static DEFINE_SPINLOCK(tlmm_lock);
125
126static inline struct msm_gpio_dev *to_msm_gpio_dev(struct gpio_chip *chip)
127{
128 return container_of(chip, struct msm_gpio_dev, gpio_chip);
129}
130
131static int msm_gpio_get(struct gpio_chip *chip, unsigned offset)
132{
133 int rc;
134 rc = __msm_gpio_get_inout(offset);
135 mb();
136 return rc;
137}
138
139static void msm_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
140{
141 __msm_gpio_set_inout(offset, val);
142 mb();
143}
144
145static int msm_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
146{
147 unsigned long irq_flags;
148
149 spin_lock_irqsave(&tlmm_lock, irq_flags);
150 __msm_gpio_set_config_direction(offset, 1, 0);
151 mb();
152 spin_unlock_irqrestore(&tlmm_lock, irq_flags);
153 return 0;
154}
155
156static int msm_gpio_direction_output(struct gpio_chip *chip,
157 unsigned offset,
158 int val)
159{
160 unsigned long irq_flags;
161
162 spin_lock_irqsave(&tlmm_lock, irq_flags);
163 __msm_gpio_set_config_direction(offset, 0, val);
164 mb();
165 spin_unlock_irqrestore(&tlmm_lock, irq_flags);
166 return 0;
167}
168
169#ifdef CONFIG_OF
170static int msm_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
171{
172 struct msm_gpio_dev *g_dev = to_msm_gpio_dev(chip);
Michael Bohanbb6b30f2012-06-01 13:33:51 -0700173 struct irq_domain *domain = g_dev->domain;
Michael Bohan27e7b942012-07-06 10:25:30 -0700174 return irq_linear_revmap(domain, offset);
Sathish Ambleyd2ad0fa2012-03-23 11:23:47 -0700175}
176
177static inline int msm_irq_to_gpio(struct gpio_chip *chip, unsigned irq)
178{
Michael Bohanbb6b30f2012-06-01 13:33:51 -0700179 struct irq_data *irq_data = irq_get_irq_data(irq);
180 return irq_data->hwirq;
Sathish Ambleyd2ad0fa2012-03-23 11:23:47 -0700181}
182#else
183static int msm_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
184{
185 return MSM_GPIO_TO_INT(offset - chip->base);
186}
187
188static inline int msm_irq_to_gpio(struct gpio_chip *chip, unsigned irq)
189{
190 return irq - MSM_GPIO_TO_INT(chip->base);
191}
192#endif
193
194static int msm_gpio_request(struct gpio_chip *chip, unsigned offset)
195{
196 return msm_gpiomux_get(chip->base + offset);
197}
198
199static void msm_gpio_free(struct gpio_chip *chip, unsigned offset)
200{
201 msm_gpiomux_put(chip->base + offset);
202}
203
204static struct msm_gpio_dev msm_gpio = {
205 .gpio_chip = {
206 .label = "msmgpio",
207 .base = 0,
208 .ngpio = NR_MSM_GPIOS,
209 .direction_input = msm_gpio_direction_input,
210 .direction_output = msm_gpio_direction_output,
211 .get = msm_gpio_get,
212 .set = msm_gpio_set,
213 .to_irq = msm_gpio_to_irq,
214 .request = msm_gpio_request,
215 .free = msm_gpio_free,
216 },
217};
218
219static void switch_mpm_config(struct irq_data *d, unsigned val)
220{
221 /* switch the configuration in the mpm as well */
222 if (!msm_gpio_irq_extn.irq_set_type)
223 return;
224
225 if (val)
226 msm_gpio_irq_extn.irq_set_type(d, IRQF_TRIGGER_FALLING);
227 else
228 msm_gpio_irq_extn.irq_set_type(d, IRQF_TRIGGER_RISING);
229}
230
231/* For dual-edge interrupts in software, since the hardware has no
232 * such support:
233 *
234 * At appropriate moments, this function may be called to flip the polarity
235 * settings of both-edge irq lines to try and catch the next edge.
236 *
237 * The attempt is considered successful if:
238 * - the status bit goes high, indicating that an edge was caught, or
239 * - the input value of the gpio doesn't change during the attempt.
240 * If the value changes twice during the process, that would cause the first
241 * test to fail but would force the second, as two opposite
242 * transitions would cause a detection no matter the polarity setting.
243 *
244 * The do-loop tries to sledge-hammer closed the timing hole between
245 * the initial value-read and the polarity-write - if the line value changes
246 * during that window, an interrupt is lost, the new polarity setting is
247 * incorrect, and the first success test will fail, causing a retry.
248 *
249 * Algorithm comes from Google's msmgpio driver, see mach-msm/gpio.c.
250 */
251static void msm_gpio_update_dual_edge_pos(struct irq_data *d, unsigned gpio)
252{
253 int loop_limit = 100;
254 unsigned val, val2, intstat;
255
256 do {
257 val = __msm_gpio_get_inout(gpio);
258 __msm_gpio_set_polarity(gpio, val);
259 val2 = __msm_gpio_get_inout(gpio);
260 intstat = __msm_gpio_get_intr_status(gpio);
261 if (intstat || val == val2) {
262 switch_mpm_config(d, val);
263 return;
264 }
265 } while (loop_limit-- > 0);
266 pr_err("%s: dual-edge irq failed to stabilize, %#08x != %#08x\n",
267 __func__, val, val2);
268}
269
270static void msm_gpio_irq_ack(struct irq_data *d)
271{
272 int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, d->irq);
273
274 __msm_gpio_set_intr_status(gpio);
275 if (test_bit(gpio, msm_gpio.dual_edge_irqs))
276 msm_gpio_update_dual_edge_pos(d, gpio);
277 mb();
278}
279
280static void msm_gpio_irq_mask(struct irq_data *d)
281{
282 int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, d->irq);
283 unsigned long irq_flags;
284
285 spin_lock_irqsave(&tlmm_lock, irq_flags);
286 __msm_gpio_set_intr_cfg_enable(gpio, 0);
287 __clear_bit(gpio, msm_gpio.enabled_irqs);
288 mb();
289 spin_unlock_irqrestore(&tlmm_lock, irq_flags);
290
291 if (msm_gpio_irq_extn.irq_mask)
292 msm_gpio_irq_extn.irq_mask(d);
293
294}
295
296static void msm_gpio_irq_unmask(struct irq_data *d)
297{
298 int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, d->irq);
299 unsigned long irq_flags;
300
301 spin_lock_irqsave(&tlmm_lock, irq_flags);
302 __set_bit(gpio, msm_gpio.enabled_irqs);
Ajay Dudani3bdb6de2012-08-28 11:49:11 -0700303 if (!__msm_gpio_get_intr_cfg_enable(gpio)) {
304 __msm_gpio_set_intr_status(gpio);
305 __msm_gpio_set_intr_cfg_enable(gpio, 1);
306 mb();
307 }
Sathish Ambleyd2ad0fa2012-03-23 11:23:47 -0700308 spin_unlock_irqrestore(&tlmm_lock, irq_flags);
309
310 if (msm_gpio_irq_extn.irq_mask)
311 msm_gpio_irq_extn.irq_unmask(d);
312}
313
314static void msm_gpio_irq_disable(struct irq_data *d)
315{
316 if (msm_gpio_irq_extn.irq_disable)
317 msm_gpio_irq_extn.irq_disable(d);
318}
319
320static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int flow_type)
321{
322 int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, d->irq);
323 unsigned long irq_flags;
324
325 spin_lock_irqsave(&tlmm_lock, irq_flags);
326
327 if (flow_type & IRQ_TYPE_EDGE_BOTH) {
328 __irq_set_handler_locked(d->irq, handle_edge_irq);
329 if ((flow_type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH)
330 __set_bit(gpio, msm_gpio.dual_edge_irqs);
331 else
332 __clear_bit(gpio, msm_gpio.dual_edge_irqs);
333 } else {
334 __irq_set_handler_locked(d->irq, handle_level_irq);
335 __clear_bit(gpio, msm_gpio.dual_edge_irqs);
336 }
337
338 __msm_gpio_set_intr_cfg_type(gpio, flow_type);
339
340 if ((flow_type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH)
341 msm_gpio_update_dual_edge_pos(d, gpio);
342
343 mb();
344 spin_unlock_irqrestore(&tlmm_lock, irq_flags);
345
346 if (msm_gpio_irq_extn.irq_set_type)
347 msm_gpio_irq_extn.irq_set_type(d, flow_type);
348
349 return 0;
350}
351
352/*
353 * When the summary IRQ is raised, any number of GPIO lines may be high.
354 * It is the job of the summary handler to find all those GPIO lines
355 * which have been set as summary IRQ lines and which are triggered,
356 * and to call their interrupt handlers.
357 */
358static irqreturn_t msm_summary_irq_handler(int irq, void *data)
359{
360 unsigned long i;
361 struct irq_desc *desc = irq_to_desc(irq);
362 struct irq_chip *chip = irq_desc_get_chip(desc);
363
364 chained_irq_enter(chip, desc);
365
366 for (i = find_first_bit(msm_gpio.enabled_irqs, NR_MSM_GPIOS);
367 i < NR_MSM_GPIOS;
368 i = find_next_bit(msm_gpio.enabled_irqs, NR_MSM_GPIOS, i + 1)) {
369 if (__msm_gpio_get_intr_status(i))
370 generic_handle_irq(msm_gpio_to_irq(&msm_gpio.gpio_chip,
371 i));
372 }
373
374 chained_irq_exit(chip, desc);
375 return IRQ_HANDLED;
376}
377
378static int msm_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
379{
380 int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, d->irq);
381
382 if (on) {
383 if (bitmap_empty(msm_gpio.wake_irqs, NR_MSM_GPIOS))
384 irq_set_irq_wake(TLMM_MSM_SUMMARY_IRQ, 1);
385 set_bit(gpio, msm_gpio.wake_irqs);
386 } else {
387 clear_bit(gpio, msm_gpio.wake_irqs);
388 if (bitmap_empty(msm_gpio.wake_irqs, NR_MSM_GPIOS))
389 irq_set_irq_wake(TLMM_MSM_SUMMARY_IRQ, 0);
390 }
391
392 if (msm_gpio_irq_extn.irq_set_wake)
393 msm_gpio_irq_extn.irq_set_wake(d, on);
394
395 return 0;
396}
397
398static struct irq_chip msm_gpio_irq_chip = {
399 .name = "msmgpio",
400 .irq_mask = msm_gpio_irq_mask,
401 .irq_unmask = msm_gpio_irq_unmask,
402 .irq_ack = msm_gpio_irq_ack,
403 .irq_set_type = msm_gpio_irq_set_type,
404 .irq_set_wake = msm_gpio_irq_set_wake,
405 .irq_disable = msm_gpio_irq_disable,
406};
407
Sathish Ambleyd2ad0fa2012-03-23 11:23:47 -0700408#ifdef CONFIG_PM
409static int msm_gpio_suspend(void)
410{
411 unsigned long irq_flags;
412 unsigned long i;
413
414 spin_lock_irqsave(&tlmm_lock, irq_flags);
415 for_each_set_bit(i, msm_gpio.enabled_irqs, NR_MSM_GPIOS)
416 __msm_gpio_set_intr_cfg_enable(i, 0);
417
418 for_each_set_bit(i, msm_gpio.wake_irqs, NR_MSM_GPIOS)
419 __msm_gpio_set_intr_cfg_enable(i, 1);
420 mb();
421 spin_unlock_irqrestore(&tlmm_lock, irq_flags);
422 return 0;
423}
424
425void msm_gpio_show_resume_irq(void)
426{
427 unsigned long irq_flags;
428 int i, irq, intstat;
429
430 if (!msm_show_resume_irq_mask)
431 return;
432
433 spin_lock_irqsave(&tlmm_lock, irq_flags);
434 for_each_set_bit(i, msm_gpio.wake_irqs, NR_MSM_GPIOS) {
435 intstat = __msm_gpio_get_intr_status(i);
436 if (intstat) {
437 irq = msm_gpio_to_irq(&msm_gpio.gpio_chip, i);
438 pr_warning("%s: %d triggered\n",
439 __func__, irq);
440 }
441 }
442 spin_unlock_irqrestore(&tlmm_lock, irq_flags);
443}
444
445static void msm_gpio_resume(void)
446{
447 unsigned long irq_flags;
448 unsigned long i;
449
450 msm_gpio_show_resume_irq();
451
452 spin_lock_irqsave(&tlmm_lock, irq_flags);
453 for_each_set_bit(i, msm_gpio.wake_irqs, NR_MSM_GPIOS)
454 __msm_gpio_set_intr_cfg_enable(i, 0);
455
456 for_each_set_bit(i, msm_gpio.enabled_irqs, NR_MSM_GPIOS)
457 __msm_gpio_set_intr_cfg_enable(i, 1);
458 mb();
459 spin_unlock_irqrestore(&tlmm_lock, irq_flags);
460}
461#else
462#define msm_gpio_suspend NULL
463#define msm_gpio_resume NULL
464#endif
465
466static struct syscore_ops msm_gpio_syscore_ops = {
467 .suspend = msm_gpio_suspend,
468 .resume = msm_gpio_resume,
469};
470
Sathish Ambleyd2ad0fa2012-03-23 11:23:47 -0700471static void msm_tlmm_set_field(const struct tlmm_field_cfg *configs,
472 unsigned id, unsigned width, unsigned val)
473{
474 unsigned long irqflags;
475 u32 mask = (1 << width) - 1;
476 u32 __iomem *reg = MSM_TLMM_BASE + configs[id].reg;
477 u32 reg_val;
478
479 spin_lock_irqsave(&tlmm_lock, irqflags);
480 reg_val = __raw_readl(reg);
481 reg_val &= ~(mask << configs[id].off);
482 reg_val |= (val & mask) << configs[id].off;
483 __raw_writel(reg_val, reg);
484 mb();
485 spin_unlock_irqrestore(&tlmm_lock, irqflags);
486}
487
488void msm_tlmm_set_hdrive(enum msm_tlmm_hdrive_tgt tgt, int drv_str)
489{
490 msm_tlmm_set_field(tlmm_hdrv_cfgs, tgt, 3, drv_str);
491}
492EXPORT_SYMBOL(msm_tlmm_set_hdrive);
493
494void msm_tlmm_set_pull(enum msm_tlmm_pull_tgt tgt, int pull)
495{
496 msm_tlmm_set_field(tlmm_pull_cfgs, tgt, 2, pull);
497}
498EXPORT_SYMBOL(msm_tlmm_set_pull);
499
500int gpio_tlmm_config(unsigned config, unsigned disable)
501{
502 unsigned gpio = GPIO_PIN(config);
503
504 if (gpio > NR_MSM_GPIOS)
505 return -EINVAL;
506
507 __gpio_tlmm_config(config);
508 mb();
509
510 return 0;
511}
512EXPORT_SYMBOL(gpio_tlmm_config);
513
514int msm_gpio_install_direct_irq(unsigned gpio, unsigned irq,
515 unsigned int input_polarity)
516{
517 unsigned long irq_flags;
518
519 if (gpio >= NR_MSM_GPIOS || irq >= NR_TLMM_MSM_DIR_CONN_IRQ)
520 return -EINVAL;
521
522 spin_lock_irqsave(&tlmm_lock, irq_flags);
523 __msm_gpio_install_direct_irq(gpio, irq, input_polarity);
524 mb();
525 spin_unlock_irqrestore(&tlmm_lock, irq_flags);
526
527 return 0;
528}
529EXPORT_SYMBOL(msm_gpio_install_direct_irq);
530
Rohit Vaswanib1cc4932012-07-23 21:30:11 -0700531/*
532 * This lock class tells lockdep that GPIO irqs are in a different
533 * category than their parent, so it won't report false recursion.
534 */
535static struct lock_class_key msm_gpio_lock_class;
536
537static int __devinit msm_gpio_probe(struct platform_device *pdev)
538{
539 int ret;
540#ifndef CONFIG_OF
541 int irq, i;
542#endif
543 msm_gpio.gpio_chip.dev = &pdev->dev;
544 spin_lock_init(&tlmm_lock);
545 bitmap_zero(msm_gpio.enabled_irqs, NR_MSM_GPIOS);
546 bitmap_zero(msm_gpio.wake_irqs, NR_MSM_GPIOS);
547 bitmap_zero(msm_gpio.dual_edge_irqs, NR_MSM_GPIOS);
548 ret = gpiochip_add(&msm_gpio.gpio_chip);
549 if (ret < 0)
550 return ret;
551
552#ifndef CONFIG_OF
553 for (i = 0; i < msm_gpio.gpio_chip.ngpio; ++i) {
554 irq = msm_gpio_to_irq(&msm_gpio.gpio_chip, i);
555 irq_set_lockdep_class(irq, &msm_gpio_lock_class);
556 irq_set_chip_and_handler(irq, &msm_gpio_irq_chip,
557 handle_level_irq);
558 set_irq_flags(irq, IRQF_VALID);
559 }
560#endif
561 ret = request_irq(TLMM_MSM_SUMMARY_IRQ, msm_summary_irq_handler,
562 IRQF_TRIGGER_HIGH, "msmgpio", NULL);
563 if (ret) {
564 pr_err("Request_irq failed for TLMM_MSM_SUMMARY_IRQ - %d\n",
565 ret);
566 return ret;
567 }
568 register_syscore_ops(&msm_gpio_syscore_ops);
569 return 0;
570}
571
572#ifdef CONFIG_OF
573static struct of_device_id msm_gpio_of_match[] __devinitdata = {
574 {.compatible = "qcom,msm-gpio", },
575 { },
576};
577#endif
578
579static int __devexit msm_gpio_remove(struct platform_device *pdev)
580{
581 int ret;
582
583 unregister_syscore_ops(&msm_gpio_syscore_ops);
584 ret = gpiochip_remove(&msm_gpio.gpio_chip);
585 if (ret < 0)
586 return ret;
587 irq_set_handler(TLMM_MSM_SUMMARY_IRQ, NULL);
588
589 return 0;
590}
591
592static struct platform_driver msm_gpio_driver = {
593 .probe = msm_gpio_probe,
594 .remove = __devexit_p(msm_gpio_remove),
595 .driver = {
596 .name = "msmgpio",
597 .owner = THIS_MODULE,
598 .of_match_table = of_match_ptr(msm_gpio_of_match),
599 },
600};
601
602static void __exit msm_gpio_exit(void)
603{
604 platform_driver_unregister(&msm_gpio_driver);
605}
606module_exit(msm_gpio_exit);
607
608static int __init msm_gpio_init(void)
609{
610 return platform_driver_register(&msm_gpio_driver);
611}
612postcore_initcall(msm_gpio_init);
613
Sathish Ambleyd2ad0fa2012-03-23 11:23:47 -0700614#ifdef CONFIG_OF
Michael Bohanbb6b30f2012-06-01 13:33:51 -0700615static int msm_gpio_irq_domain_xlate(struct irq_domain *d,
616 struct device_node *controller,
617 const u32 *intspec,
618 unsigned int intsize,
619 unsigned long *out_hwirq,
620 unsigned int *out_type)
Sathish Ambleyd2ad0fa2012-03-23 11:23:47 -0700621{
622 if (d->of_node != controller)
623 return -EINVAL;
624 if (intsize != 2)
625 return -EINVAL;
626
627 /* hwirq value */
628 *out_hwirq = intspec[0];
629
630 /* irq flags */
631 *out_type = intspec[1] & IRQ_TYPE_SENSE_MASK;
632 return 0;
633}
634
Michael Bohan27e7b942012-07-06 10:25:30 -0700635static int msm_gpio_irq_domain_map(struct irq_domain *d, unsigned int irq,
636 irq_hw_number_t hwirq)
Michael Bohanbb6b30f2012-06-01 13:33:51 -0700637{
Michael Bohan27e7b942012-07-06 10:25:30 -0700638 irq_set_lockdep_class(irq, &msm_gpio_lock_class);
639 irq_set_chip_and_handler(irq, &msm_gpio_irq_chip,
640 handle_level_irq);
641 set_irq_flags(irq, IRQF_VALID);
642
Michael Bohanbb6b30f2012-06-01 13:33:51 -0700643 return 0;
644}
645
Sathish Ambleyd2ad0fa2012-03-23 11:23:47 -0700646static struct irq_domain_ops msm_gpio_irq_domain_ops = {
Michael Bohanbb6b30f2012-06-01 13:33:51 -0700647 .xlate = msm_gpio_irq_domain_xlate,
648 .map = msm_gpio_irq_domain_map,
Sathish Ambleyd2ad0fa2012-03-23 11:23:47 -0700649};
650
651int __init msm_gpio_of_init(struct device_node *node,
652 struct device_node *parent)
653{
Michael Bohanbb6b30f2012-06-01 13:33:51 -0700654 msm_gpio.domain = irq_domain_add_linear(node, NR_MSM_GPIOS,
655 &msm_gpio_irq_domain_ops, &msm_gpio);
656 if (!msm_gpio.domain) {
657 WARN(1, "Cannot allocate irq_domain\n");
658 return -ENOMEM;
Sathish Ambleyd2ad0fa2012-03-23 11:23:47 -0700659 }
660
Sathish Ambleyd2ad0fa2012-03-23 11:23:47 -0700661 return 0;
662}
663#endif
664
665MODULE_AUTHOR("Gregory Bean <gbean@codeaurora.org>");
666MODULE_DESCRIPTION("Driver for Qualcomm MSM TLMMv2 SoC GPIOs");
667MODULE_LICENSE("GPL v2");
668MODULE_ALIAS("sysdev:msmgpio");