blob: cc6059621c0e28ec1785cbf79b77a16c6497f183 [file] [log] [blame]
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -06001/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/types.h>
17#include <linux/init.h>
18#include <linux/bitmap.h>
19#include <linux/bitops.h>
20#include <linux/interrupt.h>
21#include <linux/io.h>
22#include <linux/irq.h>
23#include <linux/irqdomain.h>
24#include <linux/list.h>
25#include <linux/platform_device.h>
26#include <linux/of.h>
27#include <linux/of_address.h>
28#include <linux/slab.h>
29#include <linux/spinlock.h>
30#include <asm/hardware/gic.h>
31#include <mach/gpio.h>
32#include <mach/mpm.h>
33
34enum {
35 MSM_MPM_GIC_IRQ_DOMAIN,
36 MSM_MPM_GPIO_IRQ_DOMAIN,
37 MSM_MPM_NR_IRQ_DOMAINS,
38};
39
40enum {
41 MSM_MPM_SET_ENABLED,
42 MSM_MPM_SET_WAKEUP,
43 MSM_NR_IRQS_SET,
44};
45
46struct mpm_irqs_a2m {
47 struct irq_domain *domain;
48 struct device_node *parent;
49 irq_hw_number_t hwirq;
50 unsigned long pin;
51 struct hlist_node node;
52};
53
54struct mpm_irqs {
55 struct irq_domain *domain;
56 unsigned long *enabled_irqs;
57 unsigned long *wakeup_irqs;
58};
59
60static struct mpm_irqs unlisted_irqs[MSM_MPM_NR_IRQ_DOMAINS];
61
62static struct hlist_head irq_hash[MSM_MPM_NR_MPM_IRQS];
63static unsigned int msm_mpm_irqs_m2a[MSM_MPM_NR_MPM_IRQS];
64#define MSM_MPM_REG_WIDTH DIV_ROUND_UP(MSM_MPM_NR_MPM_IRQS, 32)
65
66#define MSM_MPM_IRQ_INDEX(irq) (irq / 32)
67#define MSM_MPM_IRQ_MASK(irq) BIT(irq % 32)
68
69#define MSM_MPM_DETECT_CTL_INDEX(irq) (irq / 16)
70#define MSM_MPM_DETECT_CTL_SHIFT(irq) ((irq % 16) * 2)
71
72#define hashfn(val) (val % MSM_MPM_NR_MPM_IRQS)
73
74static struct msm_mpm_device_data msm_mpm_dev_data;
75
76enum mpm_reg_offsets {
77 MSM_MPM_REG_WAKEUP,
78 MSM_MPM_REG_ENABLE,
79 MSM_MPM_REG_DETECT_CTL,
80 MSM_MPM_REG_DETECT_CTL1,
81 MSM_MPM_REG_POLARITY,
82 MSM_MPM_REG_STATUS,
83};
84
85static DEFINE_SPINLOCK(msm_mpm_lock);
86
87static uint32_t msm_mpm_enabled_irq[MSM_MPM_REG_WIDTH];
88static uint32_t msm_mpm_wake_irq[MSM_MPM_REG_WIDTH];
89static uint32_t msm_mpm_detect_ctl[MSM_MPM_REG_WIDTH * 2];
90static uint32_t msm_mpm_polarity[MSM_MPM_REG_WIDTH];
91
92enum {
93 MSM_MPM_DEBUG_NON_DETECTABLE_IRQ = BIT(0),
94 MSM_MPM_DEBUG_PENDING_IRQ = BIT(1),
95 MSM_MPM_DEBUG_WRITE = BIT(2),
96 MSM_MPM_DEBUG_NON_DETECTABLE_IRQ_IDLE = BIT(3),
97};
98
99static int msm_mpm_debug_mask = 1;
100module_param_named(
101 debug_mask, msm_mpm_debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP
102);
103
104enum mpm_state {
105 MSM_MPM_IRQ_MAPPING_DONE = BIT(0),
106 MSM_MPM_DEVICE_PROBED = BIT(1),
107};
108
109static enum mpm_state msm_mpm_initialized;
110
111static inline bool msm_mpm_is_initialized(void)
112{
113 return msm_mpm_initialized &
114 (MSM_MPM_IRQ_MAPPING_DONE | MSM_MPM_DEVICE_PROBED);
115
116}
117
118static inline uint32_t msm_mpm_read(
119 unsigned int reg, unsigned int subreg_index)
120{
121 unsigned int offset = reg * MSM_MPM_REG_WIDTH + subreg_index;
122 return __raw_readl(msm_mpm_dev_data.mpm_request_reg_base + offset * 4);
123}
124
125static inline void msm_mpm_write(
126 unsigned int reg, unsigned int subreg_index, uint32_t value)
127{
128 unsigned int offset = reg * MSM_MPM_REG_WIDTH + subreg_index;
129
130 __raw_writel(value, msm_mpm_dev_data.mpm_request_reg_base + offset * 4);
131 if (MSM_MPM_DEBUG_WRITE & msm_mpm_debug_mask)
132 pr_info("%s: reg %u.%u: 0x%08x\n",
133 __func__, reg, subreg_index, value);
134}
135
136static inline void msm_mpm_send_interrupt(void)
137{
138 __raw_writel(msm_mpm_dev_data.mpm_apps_ipc_val,
139 msm_mpm_dev_data.mpm_apps_ipc_reg);
140 /* Ensure the write is complete before returning. */
141 wmb();
142}
143
144static irqreturn_t msm_mpm_irq(int irq, void *dev_id)
145{
146 /*
147 * When the system resumes from deep sleep mode, the RPM hardware wakes
148 * up the Apps processor by triggering this interrupt. This interrupt
149 * has to be enabled and set as wake for the irq to get SPM out of
150 * sleep. Handle the interrupt here to make sure that it gets cleared.
151 */
152 return IRQ_HANDLED;
153}
154
155static void msm_mpm_set(bool wakeset)
156{
157 uint32_t *irqs;
158 unsigned int reg;
159 int i;
160
161 irqs = wakeset ? msm_mpm_wake_irq : msm_mpm_enabled_irq;
162 for (i = 0; i < MSM_MPM_REG_WIDTH; i++) {
163 reg = MSM_MPM_REG_ENABLE;
164 msm_mpm_write(reg, i, irqs[i]);
165
166 reg = MSM_MPM_REG_DETECT_CTL;
167 msm_mpm_write(reg, i, msm_mpm_detect_ctl[i]);
168
169 reg = MSM_MPM_REG_DETECT_CTL1;
170 msm_mpm_write(reg, i, msm_mpm_detect_ctl[2+i]);
171
172 reg = MSM_MPM_REG_POLARITY;
173 msm_mpm_write(reg, i, msm_mpm_polarity[i]);
174 }
175
176 /*
177 * Ensure that the set operation is complete before sending the
178 * interrupt
179 */
180 wmb();
181 msm_mpm_send_interrupt();
182}
183
184static inline unsigned int msm_mpm_get_irq_m2a(unsigned int pin)
185{
186 return msm_mpm_irqs_m2a[pin];
187}
188
189static inline uint16_t msm_mpm_get_irq_a2m(struct irq_data *d)
190{
191 struct hlist_node *elem;
192 struct mpm_irqs_a2m *node = NULL;
193
194 hlist_for_each_entry(node, elem, &irq_hash[hashfn(d->hwirq)], node) {
195 if ((node->hwirq == d->hwirq)
196 && (d->domain == node->domain)) {
197 /* Update the linux irq mapping */
198 msm_mpm_irqs_m2a[node->pin] = d->irq;
199 break;
200 }
201 }
202 return node ? node->pin : 0;
203}
204
205static int msm_mpm_enable_irq_exclusive(
206 struct irq_data *d, bool enable, bool wakeset)
207{
208 uint16_t mpm_pin;
209
210 WARN_ON(!d);
211 if (!d)
212 return 0;
213
214 mpm_pin = msm_mpm_get_irq_a2m(d);
215
216 if (mpm_pin == 0xff)
217 return 0;
218
219 if (mpm_pin) {
220 uint32_t *mpm_irq_masks = wakeset ?
221 msm_mpm_wake_irq : msm_mpm_enabled_irq;
222 uint32_t index = MSM_MPM_IRQ_INDEX(mpm_pin);
223 uint32_t mask = MSM_MPM_IRQ_MASK(mpm_pin);
224
225 if (enable)
226 mpm_irq_masks[index] |= mask;
227 else
228 mpm_irq_masks[index] &= ~mask;
229 } else {
230 int i;
231 unsigned long *irq_apps;
232
233 for (i = 0; i < MSM_MPM_NR_IRQ_DOMAINS; i++) {
234 if (d->domain == unlisted_irqs[i].domain)
235 break;
236 }
237
238 if (i == MSM_MPM_NR_IRQ_DOMAINS)
239 return 0;
240 irq_apps = wakeset ? unlisted_irqs[i].wakeup_irqs :
241 unlisted_irqs[i].enabled_irqs;
242
243 if (enable)
244 __set_bit(d->hwirq, irq_apps);
245 else
246 __clear_bit(d->hwirq, irq_apps);
247
248 }
249
250 return 0;
251}
252
253static void msm_mpm_set_detect_ctl(int pin, unsigned int flow_type)
254{
255 uint32_t index;
256 uint32_t val = 0;
257 uint32_t shift;
258
259 index = MSM_MPM_DETECT_CTL_INDEX(pin);
260 shift = MSM_MPM_DETECT_CTL_SHIFT(pin);
261
262 if (flow_type & IRQ_TYPE_EDGE_RISING)
263 val |= 0x02;
264
265 if (flow_type & IRQ_TYPE_EDGE_FALLING)
266 val |= 0x01;
267
268 msm_mpm_detect_ctl[index] &= ~(0x3 << shift);
269 msm_mpm_detect_ctl[index] |= (val & 0x03) << shift;
270}
271
272static int msm_mpm_set_irq_type_exclusive(
273 struct irq_data *d, unsigned int flow_type)
274{
275 uint32_t mpm_irq;
276
277 mpm_irq = msm_mpm_get_irq_a2m(d);
278
279 if (mpm_irq == 0xff)
280 return 0;
281
282 if (mpm_irq) {
283 uint32_t index = MSM_MPM_IRQ_INDEX(mpm_irq);
284 uint32_t mask = MSM_MPM_IRQ_MASK(mpm_irq);
285
286 if (index >= MSM_MPM_REG_WIDTH)
287 return -EFAULT;
288
289 msm_mpm_set_detect_ctl(mpm_irq, flow_type);
290
291 if (flow_type & IRQ_TYPE_LEVEL_HIGH)
292 msm_mpm_polarity[index] |= mask;
293 else
294 msm_mpm_polarity[index] &= ~mask;
295 }
296 return 0;
297}
298
299static int __msm_mpm_enable_irq(struct irq_data *d, bool enable)
300{
301 unsigned long flags;
302 int rc;
303
304 if (!msm_mpm_is_initialized())
305 return -EINVAL;
306
307 spin_lock_irqsave(&msm_mpm_lock, flags);
308
309 rc = msm_mpm_enable_irq_exclusive(d, enable, false);
310 spin_unlock_irqrestore(&msm_mpm_lock, flags);
311
312 return rc;
313}
314
315static void msm_mpm_enable_irq(struct irq_data *d)
316{
317 __msm_mpm_enable_irq(d, true);
318}
319
320static void msm_mpm_disable_irq(struct irq_data *d)
321{
322 __msm_mpm_enable_irq(d, false);
323}
324
325static int msm_mpm_set_irq_wake(struct irq_data *d, unsigned int on)
326{
327 unsigned long flags;
328 int rc;
329
330 if (!msm_mpm_is_initialized())
331 return -EINVAL;
332
333 spin_lock_irqsave(&msm_mpm_lock, flags);
334 rc = msm_mpm_enable_irq_exclusive(d, (bool)on, true);
335 spin_unlock_irqrestore(&msm_mpm_lock, flags);
336
337 return rc;
338}
339
340static int msm_mpm_set_irq_type(struct irq_data *d, unsigned int flow_type)
341{
342 unsigned long flags;
343 int rc;
344
345 if (!msm_mpm_is_initialized())
346 return -EINVAL;
347
348 spin_lock_irqsave(&msm_mpm_lock, flags);
349 rc = msm_mpm_set_irq_type_exclusive(d, flow_type);
350 spin_unlock_irqrestore(&msm_mpm_lock, flags);
351
352 return rc;
353}
354
355/******************************************************************************
356 * Public functions
357 *****************************************************************************/
358int msm_mpm_enable_pin(unsigned int pin, unsigned int enable)
359{
360 uint32_t index = MSM_MPM_IRQ_INDEX(pin);
361 uint32_t mask = MSM_MPM_IRQ_MASK(pin);
362 unsigned long flags;
363
364 if (!msm_mpm_is_initialized())
365 return -EINVAL;
366
367 if (pin > MSM_MPM_NR_MPM_IRQS)
368 return -EINVAL;
369
370 spin_lock_irqsave(&msm_mpm_lock, flags);
371
372 if (enable)
373 msm_mpm_enabled_irq[index] |= mask;
374 else
375 msm_mpm_enabled_irq[index] &= ~mask;
376
377 spin_unlock_irqrestore(&msm_mpm_lock, flags);
378 return 0;
379}
380
381int msm_mpm_set_pin_wake(unsigned int pin, unsigned int on)
382{
383 uint32_t index = MSM_MPM_IRQ_INDEX(pin);
384 uint32_t mask = MSM_MPM_IRQ_MASK(pin);
385 unsigned long flags;
386
387 if (!msm_mpm_is_initialized())
388 return -EINVAL;
389
390 if (pin >= MSM_MPM_NR_MPM_IRQS)
391 return -EINVAL;
392
393 spin_lock_irqsave(&msm_mpm_lock, flags);
394
395 if (on)
396 msm_mpm_wake_irq[index] |= mask;
397 else
398 msm_mpm_wake_irq[index] &= ~mask;
399
400 spin_unlock_irqrestore(&msm_mpm_lock, flags);
401 return 0;
402}
403
404int msm_mpm_set_pin_type(unsigned int pin, unsigned int flow_type)
405{
406 uint32_t index = MSM_MPM_IRQ_INDEX(pin);
407 uint32_t mask = MSM_MPM_IRQ_MASK(pin);
408 unsigned long flags;
409
410 if (!msm_mpm_is_initialized())
411 return -EINVAL;
412
413 if (pin >= MSM_MPM_NR_MPM_IRQS)
414 return -EINVAL;
415
416 spin_lock_irqsave(&msm_mpm_lock, flags);
417
418 msm_mpm_set_detect_ctl(pin, flow_type);
419
420 if (flow_type & IRQ_TYPE_LEVEL_HIGH)
421 msm_mpm_polarity[index] |= mask;
422 else
423 msm_mpm_polarity[index] &= ~mask;
424
425 spin_unlock_irqrestore(&msm_mpm_lock, flags);
426 return 0;
427}
428
429bool msm_mpm_irqs_detectable(bool from_idle)
430{
431 /* TODO:
432 * Return true if unlisted irqs is empty
433 */
434
435 if (!msm_mpm_is_initialized())
436 return false;
437
438 return true;
439}
440
441bool msm_mpm_gpio_irqs_detectable(bool from_idle)
442{
443 /* TODO:
444 * Return true if unlisted irqs is empty
445 */
446 if (!msm_mpm_is_initialized())
447 return false;
448 return true;
449}
450
451void msm_mpm_enter_sleep(bool from_idle)
452{
453 if (!msm_mpm_is_initialized()) {
454 pr_err("%s(): MPM not initialized\n", __func__);
455 return;
456 }
457
458 msm_mpm_set(!from_idle);
459}
460
461void msm_mpm_exit_sleep(bool from_idle)
462{
463 unsigned long pending;
464 int i;
465 int k;
466
467 if (!msm_mpm_is_initialized()) {
468 pr_err("%s(): MPM not initialized\n", __func__);
469 return;
470 }
471
472 for (i = 0; i < MSM_MPM_REG_WIDTH; i++) {
473 pending = msm_mpm_read(MSM_MPM_REG_STATUS, i);
474
475 if (MSM_MPM_DEBUG_PENDING_IRQ & msm_mpm_debug_mask)
476 pr_info("%s: pending.%d: 0x%08lx", __func__,
477 i, pending);
478
479 k = find_first_bit(&pending, 32);
480 while (k < 32) {
481 unsigned int mpm_irq = 32 * i + k;
482 unsigned int apps_irq = msm_mpm_get_irq_m2a(mpm_irq);
483 struct irq_desc *desc = apps_irq ?
484 irq_to_desc(apps_irq) : NULL;
485
486 if (desc && !irqd_is_level_type(&desc->irq_data)) {
487 irq_set_pending(apps_irq);
488 if (from_idle) {
489 raw_spin_lock(&desc->lock);
490 check_irq_resend(desc, apps_irq);
491 raw_spin_unlock(&desc->lock);
492 }
493 }
494
495 k = find_next_bit(&pending, 32, k + 1);
496 }
497 }
498}
499
500static int __devinit msm_mpm_dev_probe(struct platform_device *pdev)
501{
502 struct resource *res = NULL;
503 int offset, ret;
504 struct msm_mpm_device_data *dev = &msm_mpm_dev_data;
505
506 if (msm_mpm_initialized & MSM_MPM_DEVICE_PROBED) {
507 pr_warn("MPM device probed multiple times\n");
508 return 0;
509 }
510
511 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vmpm");
512 if (!res) {
513 pr_err("%s(): Missing RPM memory resource\n", __func__);
514 goto fail;
515 }
516
517 dev->mpm_request_reg_base = devm_request_and_ioremap(&pdev->dev, res);
518
519 if (!dev->mpm_request_reg_base) {
520 pr_err("%s(): Unable to iomap\n", __func__);
521 goto fail;
522 }
523
524 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ipc");
525 if (!res) {
526 pr_err("%s(): Missing GCC memory resource\n", __func__);
527 goto failed_irq_get;
528 }
529
530 dev->mpm_apps_ipc_reg = devm_ioremap(&pdev->dev, res->start,
531 resource_size(res));
532
533 if (of_property_read_u32(pdev->dev.of_node,
534 "qcom,ipc-bit-offset", &offset)) {
535 pr_info("%s(): Cannot read ipc bit offset\n", __func__);
536 goto failed_free_irq;
537 }
538
539 dev->mpm_apps_ipc_val = (1 << offset);
540
541 if (!dev->mpm_apps_ipc_reg)
542 goto failed_irq_get;
543
544 dev->mpm_ipc_irq = platform_get_irq(pdev, 0);
545
546 if (dev->mpm_ipc_irq == -ENXIO) {
547 pr_info("%s(): Cannot find IRQ resource\n", __func__);
548 goto failed_irq_get;
549 }
550 ret = request_irq(dev->mpm_ipc_irq, msm_mpm_irq,
551 IRQF_TRIGGER_RISING, pdev->name, msm_mpm_irq);
552
553 if (ret) {
554 pr_info("%s(): request_irq failed errno: %d\n", __func__, ret);
555 goto failed_irq_get;
556 }
Mahesh Sivasubramanianb9498582012-07-25 11:22:56 -0600557 msm_mpm_initialized |= MSM_MPM_DEVICE_PROBED;
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600558
559 return 0;
560
561failed_free_irq:
562 free_irq(dev->mpm_ipc_irq, msm_mpm_irq);
563failed_irq_get:
564 if (dev->mpm_apps_ipc_reg)
565 devm_iounmap(&pdev->dev, dev->mpm_apps_ipc_reg);
566 if (dev->mpm_request_reg_base)
567 devm_iounmap(&pdev->dev, dev->mpm_request_reg_base);
568fail:
569 return -EINVAL;
570}
571
572static inline int __init mpm_irq_domain_linear_size(struct irq_domain *d)
573{
574 return d->revmap_data.linear.size;
575}
576
577static inline int __init mpm_irq_domain_legacy_size(struct irq_domain *d)
578{
579 return d->revmap_data.legacy.size;
580}
581
582void __init of_mpm_init(struct device_node *node)
583{
584 const __be32 *list;
585
586 struct mpm_of {
587 char *pkey;
588 char *map;
589 struct irq_chip *chip;
590 int (*get_max_irqs)(struct irq_domain *d);
591 };
592 int i;
593
594 struct mpm_of mpm_of_map[MSM_MPM_NR_IRQ_DOMAINS] = {
595 {
596 "qcom,gic-parent",
597 "qcom,gic-map",
598 &gic_arch_extn,
599 mpm_irq_domain_linear_size,
600 },
601 {
602 "qcom,gpio-parent",
603 "qcom,gpio-map",
604 &msm_gpio_irq_extn,
605 mpm_irq_domain_legacy_size,
606 },
607 };
608
609 if (msm_mpm_initialized & MSM_MPM_IRQ_MAPPING_DONE) {
610 pr_warn("%s(): MPM driver mapping exists\n", __func__);
611 return;
612 }
613
614 for (i = 0; i < MSM_MPM_NR_MPM_IRQS; i++)
615 INIT_HLIST_HEAD(&irq_hash[i]);
616
617 for (i = 0; i < MSM_MPM_NR_IRQ_DOMAINS; i++) {
618 struct device_node *parent = NULL;
619 struct mpm_irqs_a2m *mpm_node = NULL;
620 struct irq_domain *domain = NULL;
621 int size;
622
623 parent = of_parse_phandle(node, mpm_of_map[i].pkey, 0);
624
625 if (!parent) {
626 pr_warn("%s(): %s Not found\n", __func__,
627 mpm_of_map[i].pkey);
628 continue;
629 }
630
631 domain = irq_find_host(parent);
632
633 if (!domain) {
634 pr_warn("%s(): Cannot find irq controller for %s\n",
635 __func__, mpm_of_map[i].pkey);
636 continue;
637 }
638
639 size = mpm_of_map[i].get_max_irqs(domain);
640
641 unlisted_irqs[i].enabled_irqs =
642 kzalloc(BITS_TO_LONGS(size) * sizeof(unsigned long),
643 GFP_KERNEL);
644
645 if (!unlisted_irqs[i].enabled_irqs)
646 goto failed_malloc;
647
648 unlisted_irqs[i].wakeup_irqs =
649 kzalloc(BITS_TO_LONGS(size) * sizeof(unsigned long),
650 GFP_KERNEL);
651
652 if (!unlisted_irqs[i].wakeup_irqs)
653 goto failed_malloc;
654
655 unlisted_irqs[i].domain = domain;
656
657 list = of_get_property(node, mpm_of_map[i].map, &size);
658
659 if (!list || !size) {
660 __WARN();
661 continue;
662 }
663
664 /*
665 * Size is in bytes. Convert to size of uint32_t
666 */
667 size /= sizeof(*list);
668
669 /*
670 * The data is represented by a tuple mapping hwirq to a MPM
671 * pin. The number of mappings in the device tree would be
672 * size/2
673 */
674 mpm_node = kzalloc(sizeof(struct mpm_irqs_a2m) * size / 2,
675 GFP_KERNEL);
676 if (!mpm_node)
677 goto failed_malloc;
678
679 while (size) {
680 unsigned long pin = be32_to_cpup(list++);
681 irq_hw_number_t hwirq = be32_to_cpup(list++);
682
683 mpm_node->pin = pin;
684 mpm_node->hwirq = hwirq;
685 mpm_node->parent = parent;
686 mpm_node->domain = domain;
687 INIT_HLIST_NODE(&mpm_node->node);
688
689 hlist_add_head(&mpm_node->node,
690 &irq_hash[hashfn(mpm_node->hwirq)]);
691 size -= 2;
692 mpm_node++;
693 }
694
695 if (mpm_of_map[i].chip) {
696 mpm_of_map[i].chip->irq_mask = msm_mpm_disable_irq;
697 mpm_of_map[i].chip->irq_unmask = msm_mpm_enable_irq;
698 mpm_of_map[i].chip->irq_disable = msm_mpm_disable_irq;
699 mpm_of_map[i].chip->irq_set_type = msm_mpm_set_irq_type;
700 mpm_of_map[i].chip->irq_set_wake = msm_mpm_set_irq_wake;
701 }
702
703 }
Mahesh Sivasubramanianb9498582012-07-25 11:22:56 -0600704 msm_mpm_initialized |= MSM_MPM_IRQ_MAPPING_DONE;
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600705
706 return;
Mahesh Sivasubramanianb9498582012-07-25 11:22:56 -0600707
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600708failed_malloc:
709 for (i = 0; i < MSM_MPM_NR_MPM_IRQS; i++) {
710 mpm_of_map[i].chip->irq_mask = NULL;
711 mpm_of_map[i].chip->irq_unmask = NULL;
712 mpm_of_map[i].chip->irq_disable = NULL;
713 mpm_of_map[i].chip->irq_set_type = NULL;
714 mpm_of_map[i].chip->irq_set_wake = NULL;
715
716 kfree(unlisted_irqs[i].enabled_irqs);
717 kfree(unlisted_irqs[i].wakeup_irqs);
718
719 }
720}
721
722static struct of_device_id msm_mpm_match_table[] = {
723 {.compatible = "qcom,mpm-v2"},
724 {},
725};
726
727static struct platform_driver msm_mpm_dev_driver = {
728 .probe = msm_mpm_dev_probe,
729 .driver = {
730 .name = "mpm-v2",
731 .owner = THIS_MODULE,
732 .of_match_table = msm_mpm_match_table,
733 },
734};
735
736int __init msm_mpm_device_init(void)
737{
738 return platform_driver_register(&msm_mpm_dev_driver);
739}
740arch_initcall(msm_mpm_device_init);