irqdomain: Port system to new API

The following merge commit chose the irq_domain implementation
from AU_LINUX_ANDROID_ICS.04.00.04.00.126 instead of the version
in v3.4.

commit f132c6cf77251e011e1dad0ec88c0b1fda16d5aa
Merge: 23016de 3f6240f
Author: Steve Muckle <smuckle@codeaurora.org>
Date:   Wed Jun 6 18:30:57 2012 -0700

    Merge commit 'AU_LINUX_ANDROID_ICS.04.00.04.00.126' into
    msm-3.4

Since this version is inconsistent with the upstream,
port the irq_domain framework to the version in v3.4 and
makes all necessary changes to clients that are out of spec.

Details of client ports are below.

-Update the qpnp-int driver for revmap irq_domain API. The revmap
irq_domain implementation introduces a reverse lookup scheme using
a radix tree. This scheme is useful for controllers like qpnp-int
that require a large range of hwirqs.

-Bring the ARM GIC driver up to v3.4, being careful
to port existing CAF changes.

-Partially port the gpio-msm-common driver to the new irq_domain API.
Enable the gpio-msm-common driver to work with the new irq_domain
API using a linear revmap. It is not a full port since irq_domain
is still only registered for Device Tree configurations. It should
be registered even for legacy configurations.

In addition, the irq_domains .map function should be setting all
the fields currently done in msm_gpio_probe(). That's not
currently possible since msm_gpio_probe is invoked
unconditionally - even from Device Tree configurations.

Finally, gpio-msm-common should be converted into a real
platform_device so that probe() is invoked due to driver and
device matching.

Change-Id: I19fa50171bd244759fb6076e3cddc70896d8727b
Signed-off-by: Michael Bohan <mbohan@codeaurora.org>
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c
index b14ecf8..29d01f3 100644
--- a/arch/arm/common/gic.c
+++ b/arch/arm/common/gic.c
@@ -64,9 +64,7 @@
 	u32 __percpu *saved_ppi_enable;
 	u32 __percpu *saved_ppi_conf;
 #endif
-#ifdef CONFIG_IRQ_DOMAIN
-	struct irq_domain domain;
-#endif
+	struct irq_domain *domain;
 	unsigned int gic_irqs;
 #ifdef CONFIG_GIC_NON_BANKED
 	void __iomem *(*get_base)(union gic_base *);
@@ -447,7 +445,7 @@
 		irqnr = irqstat & ~0x1c00;
 
 		if (likely(irqnr > 15 && irqnr < 1021)) {
-			irqnr = irq_domain_to_irq(&gic->domain, irqnr);
+			irqnr = irq_find_mapping(gic->domain, irqnr);
 			handle_IRQ(irqnr, regs);
 			continue;
 		}
@@ -485,8 +483,8 @@
 	if (gic_irq == 1023)
 		goto out;
 
-	cascade_irq = irq_domain_to_irq(&chip_data->domain, gic_irq);
-	if (unlikely(gic_irq < 32 || gic_irq > 1020 || cascade_irq >= NR_IRQS))
+	cascade_irq = irq_find_mapping(chip_data->domain, gic_irq);
+	if (unlikely(gic_irq < 32 || gic_irq > 1020))
 		do_bad_IRQ(cascade_irq, desc);
 	else
 		generic_handle_irq(cascade_irq);
@@ -520,10 +518,9 @@
 
 static void __init gic_dist_init(struct gic_chip_data *gic)
 {
-	unsigned int i, irq;
+	unsigned int i;
 	u32 cpumask;
 	unsigned int gic_irqs = gic->gic_irqs;
-	struct irq_domain *domain = &gic->domain;
 	void __iomem *base = gic_data_dist_base(gic);
 	u32 cpu = cpu_logical_map(smp_processor_id());
 
@@ -566,23 +563,6 @@
 	for (i = 32; i < gic_irqs; i += 32)
 		writel_relaxed(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32);
 
-	/*
-	 * Setup the Linux IRQ subsystem.
-	 */
-	irq_domain_for_each_irq(domain, i, irq) {
-		if (i < 32) {
-			irq_set_percpu_devid(irq);
-			irq_set_chip_and_handler(irq, &gic_chip,
-						 handle_percpu_devid_irq);
-			set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN);
-		} else {
-			irq_set_chip_and_handler(irq, &gic_chip,
-						 handle_fasteoi_irq);
-			set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
-		}
-		irq_set_chip_data(irq, gic);
-	}
-
 	gic->max_irq = gic_irqs;
 
 	if (is_cpu_secure())
@@ -833,11 +813,27 @@
 }
 #endif
 
-#ifdef CONFIG_OF
-static int gic_irq_domain_dt_translate(struct irq_domain *d,
-				       struct device_node *controller,
-				       const u32 *intspec, unsigned int intsize,
-				       unsigned long *out_hwirq, unsigned int *out_type)
+static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
+				irq_hw_number_t hw)
+{
+	if (hw < 32) {
+		irq_set_percpu_devid(irq);
+		irq_set_chip_and_handler(irq, &gic_chip,
+					 handle_percpu_devid_irq);
+		set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN);
+	} else {
+		irq_set_chip_and_handler(irq, &gic_chip,
+					 handle_fasteoi_irq);
+		set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+	}
+	irq_set_chip_data(irq, d->host_data);
+	return 0;
+}
+
+static int gic_irq_domain_xlate(struct irq_domain *d,
+				struct device_node *controller,
+				const u32 *intspec, unsigned int intsize,
+				unsigned long *out_hwirq, unsigned int *out_type)
 {
 	if (d->of_node != controller)
 		return -EINVAL;
@@ -854,26 +850,23 @@
 	*out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
 	return 0;
 }
-#endif
 
 const struct irq_domain_ops gic_irq_domain_ops = {
-#ifdef CONFIG_OF
-	.dt_translate = gic_irq_domain_dt_translate,
-#endif
+	.map = gic_irq_domain_map,
+	.xlate = gic_irq_domain_xlate,
 };
 
 void __init gic_init_bases(unsigned int gic_nr, int irq_start,
 			   void __iomem *dist_base, void __iomem *cpu_base,
-			   u32 percpu_offset)
+			   u32 percpu_offset, struct device_node *node)
 {
+	irq_hw_number_t hwirq_base;
 	struct gic_chip_data *gic;
-	struct irq_domain *domain;
-	int gic_irqs, rc;
+	int gic_irqs, irq_base;
 
 	BUG_ON(gic_nr >= MAX_GIC_NR);
 
 	gic = &gic_data[gic_nr];
-	domain = &gic->domain;
 #ifdef CONFIG_GIC_NON_BANKED
 	if (percpu_offset) { /* Frankein-GIC without banked registers... */
 		unsigned int cpu;
@@ -881,8 +874,11 @@
 		gic->dist_base.percpu_base = alloc_percpu(void __iomem *);
 		gic->cpu_base.percpu_base = alloc_percpu(void __iomem *);
 		if (WARN_ON(!gic->dist_base.percpu_base ||
-			    !gic->cpu_base.percpu_base))
-			goto init_bases_err;
+			     !gic->cpu_base.percpu_base)) {
+			free_percpu(gic->dist_base.percpu_base);
+			free_percpu(gic->cpu_base.percpu_base);
+			return;
+		}
 
 		for_each_possible_cpu(cpu) {
 			unsigned long offset = percpu_offset * cpu_logical_map(cpu);
@@ -906,13 +902,12 @@
 	 * For primary GICs, skip over SGIs.
 	 * For secondary GICs, skip over PPIs, too.
 	 */
-	domain->hwirq_base = 32;
-	if (gic_nr == 0) {
-		if ((irq_start & 31) > 0) {
-			domain->hwirq_base = 16;
-			if (irq_start != -1)
-				irq_start = (irq_start & ~31) + 16;
-		}
+	if (gic_nr == 0 && (irq_start & 31) > 0) {
+		hwirq_base = 16;
+		if (irq_start != -1)
+			irq_start = (irq_start & ~31) + 16;
+	} else {
+		hwirq_base = 32;
 	}
 
 	/*
@@ -925,33 +920,22 @@
 		gic_irqs = 1020;
 	gic->gic_irqs = gic_irqs;
 
-	domain->nr_irq = gic_irqs - domain->hwirq_base;
-	domain->irq_base = irq_alloc_descs(irq_start, 16, domain->nr_irq,
-					   numa_node_id());
-	if (IS_ERR_VALUE(domain->irq_base)) {
+	gic_irqs -= hwirq_base; /* calculate # of irqs to allocate */
+	irq_base = irq_alloc_descs(irq_start, 16, gic_irqs, numa_node_id());
+	if (IS_ERR_VALUE(irq_base)) {
 		WARN(1, "Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n",
 		     irq_start);
-		domain->irq_base = irq_start;
+		irq_base = irq_start;
 	}
-	domain->priv = gic;
-	domain->ops = &gic_irq_domain_ops;
-	rc = irq_domain_add(domain);
-	if (rc) {
-		WARN(1, "Unable to create irq_domain\n");
-		goto init_bases_err;
-	}
-	irq_domain_register(domain);
+	gic->domain = irq_domain_add_legacy(node, gic_irqs, irq_base,
+				    hwirq_base, &gic_irq_domain_ops, gic);
+	if (WARN_ON(!gic->domain))
+		return;
 
 	gic_chip.flags |= gic_arch_extn.flags;
 	gic_dist_init(gic);
 	gic_cpu_init(gic);
 	gic_pm_init(gic);
-
-	return;
-
-init_bases_err:
-	free_percpu(gic->dist_base.percpu_base);
-	free_percpu(gic->cpu_base.percpu_base);
 }
 
 void __cpuinit gic_secondary_init(unsigned int gic_nr)
@@ -1036,7 +1020,6 @@
 	void __iomem *dist_base;
 	u32 percpu_offset;
 	int irq;
-	struct irq_domain *domain = &gic_data[gic_cnt].domain;
 
 	if (WARN_ON(!node))
 		return -ENODEV;
@@ -1050,9 +1033,7 @@
 	if (of_property_read_u32(node, "cpu-offset", &percpu_offset))
 		percpu_offset = 0;
 
-	domain->of_node = of_node_get(node);
-
-	gic_init_bases(gic_cnt, -1, dist_base, cpu_base, percpu_offset);
+	gic_init_bases(gic_cnt, -1, dist_base, cpu_base, percpu_offset, node);
 
 	if (parent) {
 		irq = irq_of_parse_and_map(node, 0);
diff --git a/arch/arm/include/asm/hardware/gic.h b/arch/arm/include/asm/hardware/gic.h
index 3783ff3..e067a08 100644
--- a/arch/arm/include/asm/hardware/gic.h
+++ b/arch/arm/include/asm/hardware/gic.h
@@ -40,7 +40,7 @@
 extern struct irq_chip gic_arch_extn;
 
 void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
-		    u32 offset);
+		    u32 offset, struct device_node *);
 int gic_of_init(struct device_node *node, struct device_node *parent);
 void gic_secondary_init(unsigned int);
 void gic_handle_irq(struct pt_regs *regs);
@@ -56,7 +56,7 @@
 static inline void gic_init(unsigned int nr, int start,
 			    void __iomem *dist , void __iomem *cpu)
 {
-	gic_init_bases(nr, start, dist, cpu, 0);
+	gic_init_bases(nr, start, dist, cpu, 0, NULL);
 }
 void gic_set_irq_secure(unsigned int irq);
 #endif
diff --git a/arch/arm/mach-msm/include/mach/qpnp-int.h b/arch/arm/mach-msm/include/mach/qpnp-int.h
index a79d2fc..21d95e6 100644
--- a/arch/arm/mach-msm/include/mach/qpnp-int.h
+++ b/arch/arm/mach-msm/include/mach/qpnp-int.h
@@ -52,7 +52,8 @@
  * Used by the PMIC Arbiter driver or equivalent to register
  * callbacks for interrupt events.
  */
-int qpnpint_register_controller(unsigned int busno,
+int qpnpint_register_controller(struct device_node *node,
+				struct spmi_controller *ctrl,
 				struct qpnp_local_int *li_cb);
 
 /**
@@ -68,8 +69,11 @@
 {
 	return -ENXIO;
 }
-static inline int qpnpint_register_controller(unsigned int busno,
-				struct qpnp_local_int *li_cb)
+
+static inline int qpnpint_register_controller(struct device_node *node,
+					      struct spmi_controller *ctrl,
+					      struct qpnp_local_int *li_cb)
+
 {
 	return -ENXIO;
 }
diff --git a/arch/arm/mach-msm/platsmp-8625.c b/arch/arm/mach-msm/platsmp-8625.c
index b31d94b..700f966 100644
--- a/arch/arm/mach-msm/platsmp-8625.c
+++ b/arch/arm/mach-msm/platsmp-8625.c
@@ -18,6 +18,7 @@
 #include <linux/smp.h>
 #include <linux/io.h>
 #include <linux/interrupt.h>
+#include <linux/irq.h>
 
 #include <asm/cacheflush.h>
 #include <asm/hardware/gic.h>
diff --git a/drivers/gpio/gpio-msm-common.c b/drivers/gpio/gpio-msm-common.c
index 9a9a783..5539950 100644
--- a/drivers/gpio/gpio-msm-common.c
+++ b/drivers/gpio/gpio-msm-common.c
@@ -100,7 +100,7 @@
 	DECLARE_BITMAP(enabled_irqs, NR_MSM_GPIOS);
 	DECLARE_BITMAP(wake_irqs, NR_MSM_GPIOS);
 	DECLARE_BITMAP(dual_edge_irqs, NR_MSM_GPIOS);
-	struct irq_domain domain;
+	struct irq_domain *domain;
 };
 
 static DEFINE_SPINLOCK(tlmm_lock);
@@ -152,15 +152,14 @@
 static int msm_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
 {
 	struct msm_gpio_dev *g_dev = to_msm_gpio_dev(chip);
-	struct irq_domain *domain = &g_dev->domain;
-	return domain->irq_base + (offset - chip->base);
+	struct irq_domain *domain = g_dev->domain;
+	return irq_linear_revmap(domain, offset - chip->base);
 }
 
 static inline int msm_irq_to_gpio(struct gpio_chip *chip, unsigned irq)
 {
-	struct msm_gpio_dev *g_dev = to_msm_gpio_dev(chip);
-	struct irq_domain *domain = &g_dev->domain;
-	return irq - domain->irq_base;
+	struct irq_data *irq_data = irq_get_irq_data(irq);
+	return irq_data->hwirq;
 }
 #else
 static int msm_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
@@ -391,6 +390,7 @@
  */
 static struct lock_class_key msm_gpio_lock_class;
 
+/* TODO: This should be a real platform_driver */
 static int __devinit msm_gpio_probe(void)
 {
 	int i, irq, ret;
@@ -573,12 +573,12 @@
 EXPORT_SYMBOL(msm_gpio_install_direct_irq);
 
 #ifdef CONFIG_OF
-static int msm_gpio_domain_dt_translate(struct irq_domain *d,
-					struct device_node *controller,
-					const u32 *intspec,
-					unsigned int intsize,
-					unsigned long *out_hwirq,
-					unsigned int *out_type)
+static int msm_gpio_irq_domain_xlate(struct irq_domain *d,
+				     struct device_node *controller,
+				     const u32 *intspec,
+				     unsigned int intsize,
+				     unsigned long *out_hwirq,
+				     unsigned int *out_type)
 {
 	if (d->of_node != controller)
 		return -EINVAL;
@@ -593,32 +593,32 @@
 	return 0;
 }
 
+/*
+ * TODO: this really should be doing all the things that msm_gpio_probe() does,
+ * but since the msm_gpio_probe is called unconditionally for DT and non-DT
+ * configs, we can't duplicate it here. This should be fixed.
+ */
+int msm_gpio_irq_domain_map(struct irq_domain *d, unsigned int irq,
+			  irq_hw_number_t hwirq)
+{
+	return 0;
+}
+
 static struct irq_domain_ops msm_gpio_irq_domain_ops = {
-	.dt_translate = msm_gpio_domain_dt_translate,
+	.xlate = msm_gpio_irq_domain_xlate,
+	.map = msm_gpio_irq_domain_map,
 };
 
 int __init msm_gpio_of_init(struct device_node *node,
 			    struct device_node *parent)
 {
-	struct irq_domain *domain = &msm_gpio.domain;
-	int start;
-
-	start = irq_domain_find_free_range(0, NR_MSM_GPIOS);
-	domain->irq_base = irq_alloc_descs(start, 0, NR_MSM_GPIOS,
-							numa_node_id());
-	if (IS_ERR_VALUE(domain->irq_base)) {
-		WARN(1, "Cannot allocate irq_descs @ IRQ%d\n", start);
-		return domain->irq_base;
+	msm_gpio.domain = irq_domain_add_linear(node, NR_MSM_GPIOS,
+			&msm_gpio_irq_domain_ops, &msm_gpio);
+	if (!msm_gpio.domain) {
+		WARN(1, "Cannot allocate irq_domain\n");
+		return -ENOMEM;
 	}
 
-	domain->nr_irq = NR_MSM_GPIOS;
-	domain->of_node = of_node_get(node);
-	domain->priv = &msm_gpio;
-	domain->ops = &msm_gpio_irq_domain_ops;
-	irq_domain_add(domain);
-	msm_gpio.gpio_chip.of_node = of_node_get(node);
-	pr_debug("%s: irq_base = %u\n", __func__, domain->irq_base);
-
 	return 0;
 }
 #endif
diff --git a/drivers/spmi/qpnp-int.c b/drivers/spmi/qpnp-int.c
index 2998c01..f44d782 100644
--- a/drivers/spmi/qpnp-int.c
+++ b/drivers/spmi/qpnp-int.c
@@ -31,8 +31,6 @@
 #include <asm/mach/irq.h>
 #include <mach/qpnp-int.h>
 
-#define QPNPINT_MAX_BUSSES 1
-
 /* 16 slave_ids, 256 per_ids per slave, and 8 ints per per_id */
 #define QPNPINT_NR_IRQS (16 * 256 * 8)
 
@@ -66,13 +64,18 @@
 
 struct q_chip_data {
 	int bus_nr;
-	struct irq_domain domain;
+	struct irq_domain *domain;
 	struct qpnp_local_int cb;
 	struct spmi_controller *spmi_ctrl;
 	struct radix_tree_root per_tree;
+	struct list_head list;
 };
 
-static struct q_chip_data chip_data[QPNPINT_MAX_BUSSES] __read_mostly;
+static LIST_HEAD(qpnpint_chips);
+static DEFINE_MUTEX(qpnpint_chips_mutex);
+
+#define QPNPINT_MAX_BUSSES 4
+struct q_chip_data *chip_lookup[QPNPINT_MAX_BUSSES];
 
 /**
  * qpnpint_encode_hwirq - translate between qpnp_irq_spec and
@@ -138,8 +141,7 @@
 	if (chip_d->cb.mask) {
 		rc = qpnpint_decode_hwirq(d->hwirq, &q_spec);
 		if (rc)
-			pr_err("%s: decode failed on hwirq %lu\n",
-						 __func__, d->hwirq);
+			pr_err("decode failed on hwirq %lu\n", d->hwirq);
 		else
 			chip_d->cb.mask(chip_d->spmi_ctrl, &q_spec,
 								irq_d->priv_d);
@@ -150,8 +152,7 @@
 	rc = qpnpint_spmi_write(irq_d, QPNPINT_REG_EN_CLR,
 					(u8 *)&irq_d->mask_shift, 1);
 	if (rc)
-		pr_err("%s: spmi failure on irq %d\n",
-						 __func__, d->irq);
+		pr_err("spmi failure on irq %d\n", d->irq);
 }
 
 static void qpnpint_irq_mask_ack(struct irq_data *d)
@@ -168,8 +169,7 @@
 	if (chip_d->cb.mask) {
 		rc = qpnpint_decode_hwirq(d->hwirq, &q_spec);
 		if (rc)
-			pr_err("%s: decode failed on hwirq %lu\n",
-						 __func__, d->hwirq);
+			pr_err("decode failed on hwirq %lu\n", d->hwirq);
 		else
 			chip_d->cb.mask(chip_d->spmi_ctrl, &q_spec,
 								irq_d->priv_d);
@@ -180,14 +180,12 @@
 	rc = qpnpint_spmi_write(irq_d, QPNPINT_REG_EN_CLR,
 							&irq_d->mask_shift, 1);
 	if (rc)
-		pr_err("%s: spmi failure on irq %d\n",
-						 __func__, d->irq);
+		pr_err("spmi failure on irq %d\n", d->irq);
 
 	rc = qpnpint_spmi_write(irq_d, QPNPINT_REG_LATCHED_CLR,
 							&irq_d->mask_shift, 1);
 	if (rc)
-		pr_err("%s: spmi failure on irq %d\n",
-						 __func__, d->irq);
+		pr_err("spmi failure on irq %d\n", d->irq);
 }
 
 static void qpnpint_irq_unmask(struct irq_data *d)
@@ -203,8 +201,7 @@
 	if (chip_d->cb.unmask) {
 		rc = qpnpint_decode_hwirq(d->hwirq, &q_spec);
 		if (rc)
-			pr_err("%s: decode failed on hwirq %lu\n",
-						 __func__, d->hwirq);
+			pr_err("decode failed on hwirq %lu\n", d->hwirq);
 		else
 			chip_d->cb.unmask(chip_d->spmi_ctrl, &q_spec,
 								irq_d->priv_d);
@@ -214,8 +211,7 @@
 	rc = qpnpint_spmi_write(irq_d, QPNPINT_REG_EN_SET,
 					&irq_d->mask_shift, 1);
 	if (rc)
-		pr_err("%s: spmi failure on irq %d\n",
-						 __func__, d->irq);
+		pr_err("spmi failure on irq %d\n", d->irq);
 }
 
 static int qpnpint_irq_set_type(struct irq_data *d, unsigned int flow_type)
@@ -253,8 +249,7 @@
 
 	rc = qpnpint_spmi_write(irq_d, QPNPINT_REG_SET_TYPE, &buf, 3);
 	if (rc)
-		pr_err("%s: spmi failure on irq %d\n",
-						 __func__, d->irq);
+		pr_err("spmi failure on irq %d\n", d->irq);
 	return rc;
 }
 
@@ -279,13 +274,16 @@
 		return rc;
 	irq_d->spmi_slave = q_spec.slave;
 	irq_d->spmi_offset = q_spec.per << 8;
-	irq_d->per_d->use_count++;
 	irq_d->chip_d = chip_d;
 
 	if (chip_d->cb.register_priv_data)
 		rc = chip_d->cb.register_priv_data(chip_d->spmi_ctrl, &q_spec,
 							&irq_d->priv_d);
-	return rc;
+		if (rc)
+			return rc;
+
+	irq_d->per_d->use_count++;
+	return 0;
 }
 
 static struct q_irq_data *qpnpint_alloc_irq_data(
@@ -307,8 +305,10 @@
 	per_d = radix_tree_lookup(&chip_d->per_tree, (hwirq & ~0x7));
 	if (!per_d) {
 		per_d = kzalloc(sizeof(struct q_perip_data), GFP_KERNEL);
-		if (!per_d)
+		if (!per_d) {
+			kfree(irq_d);
 			return ERR_PTR(-ENOMEM);
+		}
 		radix_tree_insert(&chip_d->per_tree,
 				  (hwirq & ~0x7), per_d);
 	}
@@ -317,74 +317,6 @@
 	return irq_d;
 }
 
-static int qpnpint_register_int(uint32_t busno, unsigned long hwirq)
-{
-	int irq, rc;
-	struct irq_domain *domain;
-	struct q_irq_data *irq_d;
-
-	pr_debug("busno = %u hwirq = %lu\n", busno, hwirq);
-
-	if (hwirq < 0 || hwirq >= 32768) {
-		pr_err("%s: hwirq %lu out of qpnp interrupt bounds\n",
-							__func__, hwirq);
-		return -EINVAL;
-	}
-
-	if (busno < 0 || busno > QPNPINT_MAX_BUSSES) {
-		pr_err("%s: invalid bus number %d\n", __func__, busno);
-		return -EINVAL;
-	}
-
-	domain = &chip_data[busno].domain;
-	irq = irq_domain_to_irq(domain, hwirq);
-
-	rc = irq_alloc_desc_at(irq, numa_node_id());
-	if (rc < 0) {
-		if (rc != -EEXIST)
-			pr_err("%s: failed to alloc irq at %d with "
-					"rc %d\n", __func__, irq, rc);
-		return rc;
-	}
-	irq_d = qpnpint_alloc_irq_data(&chip_data[busno], hwirq);
-	if (IS_ERR(irq_d)) {
-		pr_err("%s: failed to alloc irq data %d with "
-					"rc %d\n", __func__, irq, rc);
-		rc = PTR_ERR(irq_d);
-		goto register_err_cleanup;
-	}
-	rc = qpnpint_init_irq_data(&chip_data[busno], irq_d, hwirq);
-	if (rc) {
-		pr_err("%s: failed to init irq data %d with "
-					"rc %d\n", __func__, irq, rc);
-		goto register_err_cleanup;
-	}
-
-	irq_domain_register_irq(domain, hwirq);
-
-	irq_set_chip_and_handler(irq,
-			&qpnpint_chip,
-			handle_level_irq);
-	irq_set_chip_data(irq, irq_d);
-#ifdef CONFIG_ARM
-	set_irq_flags(irq, IRQF_VALID);
-#else
-	irq_set_noprobe(irq);
-#endif
-	return 0;
-
-register_err_cleanup:
-	irq_free_desc(irq);
-	if (!IS_ERR(irq_d)) {
-		if (irq_d->per_d->use_count == 1)
-			kfree(irq_d->per_d);
-		else
-			irq_d->per_d->use_count--;
-		kfree(irq_d);
-	}
-	return rc;
-}
-
 static int qpnpint_irq_domain_dt_translate(struct irq_domain *d,
 				       struct device_node *controller,
 				       const u32 *intspec, unsigned int intsize,
@@ -392,11 +324,10 @@
 				       unsigned int *out_type)
 {
 	struct qpnp_irq_spec addr;
-	struct q_chip_data *chip_d = d->priv;
 	int ret;
 
-	pr_debug("%s: intspec[0] 0x%x intspec[1] 0x%x intspec[2] 0x%x\n",
-				__func__, intspec[0], intspec[1], intspec[2]);
+	pr_debug("intspec[0] 0x%x intspec[1] 0x%x intspec[2] 0x%x\n",
+				intspec[0], intspec[1], intspec[2]);
 
 	if (d->of_node != controller)
 		return -EINVAL;
@@ -409,41 +340,102 @@
 
 	ret = qpnpint_encode_hwirq(&addr);
 	if (ret < 0) {
-		pr_err("%s: invalid intspec\n", __func__);
+		pr_err("invalid intspec\n");
 		return ret;
 	}
 	*out_hwirq = ret;
 	*out_type = IRQ_TYPE_NONE;
 
-	/**
-	 * Register the interrupt if it's not already registered.
-	 * This implies that mapping a qpnp interrupt allocates
-	 * resources.
-	 */
-	ret = qpnpint_register_int(chip_d->bus_nr, *out_hwirq);
-	if (ret && ret != -EEXIST) {
-		pr_err("%s: Cannot register hwirq %lu\n", __func__, *out_hwirq);
-		return ret;
-	}
-
 	return 0;
 }
 
+static void qpnpint_free_irq_data(struct q_irq_data *irq_d)
+{
+	if (irq_d->per_d->use_count == 1)
+		kfree(irq_d->per_d);
+	else
+		irq_d->per_d->use_count--;
+	kfree(irq_d);
+}
+
+static int qpnpint_irq_domain_map(struct irq_domain *d,
+				  unsigned int virq, irq_hw_number_t hwirq)
+{
+	struct q_chip_data *chip_d = d->host_data;
+	struct q_irq_data *irq_d;
+	int rc;
+
+	pr_debug("hwirq = %lu\n", hwirq);
+
+	if (hwirq < 0 || hwirq >= 32768) {
+		pr_err("hwirq %lu out of bounds\n", hwirq);
+		return -EINVAL;
+	}
+
+	irq_radix_revmap_insert(d, virq, hwirq);
+
+	irq_d = qpnpint_alloc_irq_data(chip_d, hwirq);
+	if (IS_ERR(irq_d)) {
+		pr_err("failed to alloc irq data for hwirq %lu\n", hwirq);
+		return PTR_ERR(irq_d);
+	}
+
+	rc = qpnpint_init_irq_data(chip_d, irq_d, hwirq);
+	if (rc) {
+		pr_err("failed to init irq data for hwirq %lu\n", hwirq);
+		goto map_err;
+	}
+
+	irq_set_chip_and_handler(virq,
+			&qpnpint_chip,
+			handle_level_irq);
+	irq_set_chip_data(virq, irq_d);
+#ifdef CONFIG_ARM
+	set_irq_flags(virq, IRQF_VALID);
+#else
+	irq_set_noprobe(virq);
+#endif
+	return 0;
+
+map_err:
+	qpnpint_free_irq_data(irq_d);
+	return rc;
+}
+
+void qpnpint_irq_domain_unmap(struct irq_domain *d, unsigned int virq)
+{
+	struct q_irq_data *irq_d = irq_get_chip_data(virq);
+
+	if (WARN_ON(!irq_d))
+		return;
+
+	qpnpint_free_irq_data(irq_d);
+}
+
 const struct irq_domain_ops qpnpint_irq_domain_ops = {
-	.dt_translate = qpnpint_irq_domain_dt_translate,
+	.map = qpnpint_irq_domain_map,
+	.unmap = qpnpint_irq_domain_unmap,
+	.xlate = qpnpint_irq_domain_dt_translate,
 };
 
-int qpnpint_register_controller(unsigned int busno,
+int qpnpint_register_controller(struct device_node *node,
+				struct spmi_controller *ctrl,
 				struct qpnp_local_int *li_cb)
 {
-	if (busno >= QPNPINT_MAX_BUSSES)
-		return -EINVAL;
-	chip_data[busno].cb = *li_cb;
-	chip_data[busno].spmi_ctrl = spmi_busnum_to_ctrl(busno);
-	if (!chip_data[busno].spmi_ctrl)
-		return -ENOENT;
+	struct q_chip_data *chip_d;
 
-	return 0;
+	if (!node || !ctrl || ctrl->nr >= QPNPINT_MAX_BUSSES)
+		return -EINVAL;
+
+	list_for_each_entry(chip_d, &qpnpint_chips, list)
+		if (node == chip_d->domain->of_node) {
+			chip_d->cb = *li_cb;
+			chip_d->spmi_ctrl = ctrl;
+			chip_lookup[ctrl->nr] = chip_d;
+			return 0;
+		}
+
+	return -ENOENT;
 }
 EXPORT_SYMBOL(qpnpint_register_controller);
 
@@ -457,21 +449,18 @@
 	pr_debug("spec slave = %u per = %u irq = %u\n",
 					spec->slave, spec->per, spec->irq);
 
-	if (!spec || !spmi_ctrl)
-		return -EINVAL;
-
 	busno = spmi_ctrl->nr;
-	if (busno >= QPNPINT_MAX_BUSSES)
+	if (!spec || !spmi_ctrl || busno >= QPNPINT_MAX_BUSSES)
 		return -EINVAL;
 
 	hwirq = qpnpint_encode_hwirq(spec);
 	if (hwirq < 0) {
-		pr_err("%s: invalid irq spec passed\n", __func__);
+		pr_err("invalid irq spec passed\n");
 		return -EINVAL;
 	}
 
-	domain = &chip_data[busno].domain;
-	irq = irq_domain_to_irq(domain, hwirq);
+	domain = chip_lookup[busno]->domain;
+	irq = irq_radix_revmap_lookup(domain, hwirq);
 
 	generic_handle_irq(irq);
 
@@ -479,31 +468,24 @@
 }
 EXPORT_SYMBOL(qpnpint_handle_irq);
 
-/**
- * This assumes that there's a relationship between the order of the interrupt
- * controllers specified to of_irq_match() is the SPMI device topology. If
- * this ever turns out to be a bad assumption, then of_irq_init_cb_t should
- * be modified to pass a parameter to this function.
- */
-static int qpnpint_cnt __initdata;
-
 int __init qpnpint_of_init(struct device_node *node, struct device_node *parent)
 {
-	struct q_chip_data *chip_d = &chip_data[qpnpint_cnt];
-	struct irq_domain *domain = &chip_d->domain;
+	struct q_chip_data *chip_d;
+
+	chip_d = kzalloc(sizeof(struct q_chip_data), GFP_KERNEL);
+	if (!chip_d)
+		return -ENOMEM;
+
+	chip_d->domain = irq_domain_add_tree(node,
+					&qpnpint_irq_domain_ops, chip_d);
+	if (!chip_d->domain) {
+		pr_err("Unable to allocate irq_domain\n");
+		kfree(chip_d);
+		return -ENOMEM;
+	}
 
 	INIT_RADIX_TREE(&chip_d->per_tree, GFP_ATOMIC);
-
-	domain->irq_base = irq_domain_find_free_range(0, QPNPINT_NR_IRQS);
-	domain->nr_irq = QPNPINT_NR_IRQS;
-	domain->of_node = of_node_get(node);
-	domain->priv = chip_d;
-	domain->ops = &qpnpint_irq_domain_ops;
-	irq_domain_add(domain);
-
-	pr_info("irq_base = %d\n", domain->irq_base);
-
-	qpnpint_cnt++;
+	list_add(&chip_d->list, &qpnpint_chips);
 
 	return 0;
 }
diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
index f22b900..422e99e 100644
--- a/drivers/spmi/spmi-pmic-arb.c
+++ b/drivers/spmi/spmi-pmic-arb.c
@@ -664,7 +664,14 @@
 		goto err_add_controller;
 
 	/* Register the interrupt enable/disable functions */
-	qpnpint_register_controller(cell_index, &spmi_pmic_arb_intr_cb);
+	ret = qpnpint_register_controller(pmic_arb->controller.dev.of_node,
+					  &pmic_arb->controller,
+					  &spmi_pmic_arb_intr_cb);
+	if (ret) {
+		dev_err(&pdev->dev, "Unable to register controller %d\n",
+					cell_index);
+		goto err_reg_controller;
+	}
 
 	/* Register device(s) from the device tree */
 	of_spmi_register_devices(&pmic_arb->controller);
@@ -674,6 +681,8 @@
 
 	return 0;
 
+err_reg_controller:
+	spmi_del_controller(&pmic_arb->controller);
 err_add_controller:
 	platform_set_drvdata(pdev, NULL);
 	return ret;
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index ed6bb39..c65740d 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -9,106 +9,178 @@
  * representation into a hardware irq number that can be mapped back to a
  * Linux irq number without any extra platform support code.
  *
- * irq_domain is expected to be embedded in an interrupt controller's private
- * data structure.
+ * Interrupt controller "domain" data structure. This could be defined as a
+ * irq domain controller. That is, it handles the mapping between hardware
+ * and virtual interrupt numbers for a given interrupt domain. The domain
+ * structure is generally created by the PIC code for a given PIC instance
+ * (though a domain can cover more than one PIC if they have a flat number
+ * model). It's the domain callbacks that are responsible for setting the
+ * irq_chip on a given irq_desc after it's been mapped.
+ *
+ * The host code and data structures are agnostic to whether or not
+ * we use an open firmware device-tree. We do have references to struct
+ * device_node in two places: in irq_find_host() to find the host matching
+ * a given interrupt controller node, and of course as an argument to its
+ * counterpart domain->ops->match() callback. However, those are treated as
+ * generic pointers by the core and the fact that it's actually a device-node
+ * pointer is purely a convention between callers and implementation. This
+ * code could thus be used on other architectures by replacing those two
+ * by some sort of arch-specific void * "token" used to identify interrupt
+ * controllers.
  */
+
 #ifndef _LINUX_IRQDOMAIN_H
 #define _LINUX_IRQDOMAIN_H
 
-#include <linux/irq.h>
-#include <linux/mod_devicetable.h>
+#include <linux/types.h>
+#include <linux/radix-tree.h>
 
-#ifdef CONFIG_IRQ_DOMAIN
 struct device_node;
 struct irq_domain;
+struct of_device_id;
+
+/* Number of irqs reserved for a legacy isa controller */
+#define NUM_ISA_INTERRUPTS	16
 
 /**
  * struct irq_domain_ops - Methods for irq_domain objects
- * @to_irq: (optional) given a local hardware irq number, return the linux
- *          irq number.  If to_irq is not implemented, then the irq_domain
- *          will use this translation: irq = (domain->irq_base + hwirq)
- * @dt_translate: Given a device tree node and interrupt specifier, decode
- *                the hardware irq number and linux irq type value.
+ * @match: Match an interrupt controller device node to a host, returns
+ *         1 on a match
+ * @map: Create or update a mapping between a virtual irq number and a hw
+ *       irq number. This is called only once for a given mapping.
+ * @unmap: Dispose of such a mapping
+ * @xlate: Given a device tree node and interrupt specifier, decode
+ *         the hardware irq number and linux irq type value.
+ *
+ * Functions below are provided by the driver and called whenever a new mapping
+ * is created or an old mapping is disposed. The driver can then proceed to
+ * whatever internal data structures management is required. It also needs
+ * to setup the irq_desc when returning from map().
  */
 struct irq_domain_ops {
-	unsigned int (*to_irq)(struct irq_domain *d, unsigned long hwirq);
-
-#ifdef CONFIG_OF
-	int (*dt_translate)(struct irq_domain *d, struct device_node *node,
-			    const u32 *intspec, unsigned int intsize,
-			    unsigned long *out_hwirq, unsigned int *out_type);
-#endif /* CONFIG_OF */
+	int (*match)(struct irq_domain *d, struct device_node *node);
+	int (*map)(struct irq_domain *d, unsigned int virq, irq_hw_number_t hw);
+	void (*unmap)(struct irq_domain *d, unsigned int virq);
+	int (*xlate)(struct irq_domain *d, struct device_node *node,
+		     const u32 *intspec, unsigned int intsize,
+		     unsigned long *out_hwirq, unsigned int *out_type);
 };
 
 /**
  * struct irq_domain - Hardware interrupt number translation object
- * @list: Element in global irq_domain list.
+ * @link: Element in global irq_domain list.
+ * @revmap_type: Method used for reverse mapping hwirq numbers to linux irq. This
+ *               will be one of the IRQ_DOMAIN_MAP_* values.
+ * @revmap_data: Revmap method specific data.
+ * @ops: pointer to irq_domain methods
+ * @host_data: private data pointer for use by owner.  Not touched by irq_domain
+ *             core code.
  * @irq_base: Start of irq_desc range assigned to the irq_domain.  The creator
  *            of the irq_domain is responsible for allocating the array of
  *            irq_desc structures.
  * @nr_irq: Number of irqs managed by the irq domain
  * @hwirq_base: Starting number for hwirqs managed by the irq domain
- * @ops: pointer to irq_domain methods
- * @priv: private data pointer for use by owner.  Not touched by irq_domain
- *        core code.
  * @of_node: (optional) Pointer to device tree nodes associated with the
  *           irq_domain.  Used when decoding device tree interrupt specifiers.
  */
 struct irq_domain {
-	struct list_head list;
-	unsigned int irq_base;
-	unsigned int nr_irq;
-	unsigned int hwirq_base;
+	struct list_head link;
+
+	/* type of reverse mapping_technique */
+	unsigned int revmap_type;
+	union {
+		struct {
+			unsigned int size;
+			unsigned int first_irq;
+			irq_hw_number_t first_hwirq;
+		} legacy;
+		struct {
+			unsigned int size;
+			unsigned int *revmap;
+		} linear;
+		struct {
+			unsigned int max_irq;
+		} nomap;
+		struct radix_tree_root tree;
+	} revmap_data;
 	const struct irq_domain_ops *ops;
-	void *priv;
+	void *host_data;
+	irq_hw_number_t inval_irq;
+
+	/* Optional device node pointer */
 	struct device_node *of_node;
 };
 
-/**
- * irq_domain_to_irq() - Translate from a hardware irq to a linux irq number
- *
- * Returns the linux irq number associated with a hardware irq.  By default,
- * the mapping is irq == domain->irq_base + hwirq, but this mapping can
- * be overridden if the irq_domain implements a .to_irq() hook.
- */
-static inline unsigned int irq_domain_to_irq(struct irq_domain *d,
-					     unsigned long hwirq)
+#ifdef CONFIG_IRQ_DOMAIN
+struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
+					 unsigned int size,
+					 unsigned int first_irq,
+					 irq_hw_number_t first_hwirq,
+					 const struct irq_domain_ops *ops,
+					 void *host_data);
+struct irq_domain *irq_domain_add_linear(struct device_node *of_node,
+					 unsigned int size,
+					 const struct irq_domain_ops *ops,
+					 void *host_data);
+struct irq_domain *irq_domain_add_nomap(struct device_node *of_node,
+					 unsigned int max_irq,
+					 const struct irq_domain_ops *ops,
+					 void *host_data);
+struct irq_domain *irq_domain_add_tree(struct device_node *of_node,
+					 const struct irq_domain_ops *ops,
+					 void *host_data);
+
+extern struct irq_domain *irq_find_host(struct device_node *node);
+extern void irq_set_default_host(struct irq_domain *host);
+
+static inline struct irq_domain *irq_domain_add_legacy_isa(
+				struct device_node *of_node,
+				const struct irq_domain_ops *ops,
+				void *host_data)
 {
-	if (d->ops->to_irq)
-		return d->ops->to_irq(d, hwirq);
-	if (WARN_ON(hwirq < d->hwirq_base))
-		return 0;
-	return d->irq_base + hwirq - d->hwirq_base;
+	return irq_domain_add_legacy(of_node, NUM_ISA_INTERRUPTS, 0, 0, ops,
+				     host_data);
 }
+extern struct irq_domain *irq_find_host(struct device_node *node);
+extern void irq_set_default_host(struct irq_domain *host);
 
-#define irq_domain_for_each_hwirq(d, hw) \
-	for (hw = d->hwirq_base; hw < d->hwirq_base + d->nr_irq; hw++)
 
-#define irq_domain_for_each_irq(d, hw, irq) \
-	for (hw = d->hwirq_base, irq = irq_domain_to_irq(d, hw); \
-	     hw < d->hwirq_base + d->nr_irq; \
-	     hw++, irq = irq_domain_to_irq(d, hw))
-
+extern unsigned int irq_create_mapping(struct irq_domain *host,
+				       irq_hw_number_t hwirq);
 extern void irq_dispose_mapping(unsigned int virq);
+extern unsigned int irq_find_mapping(struct irq_domain *host,
+				     irq_hw_number_t hwirq);
+extern unsigned int irq_create_direct_mapping(struct irq_domain *host);
+extern void irq_radix_revmap_insert(struct irq_domain *host, unsigned int virq,
+				    irq_hw_number_t hwirq);
+extern unsigned int irq_radix_revmap_lookup(struct irq_domain *host,
+					    irq_hw_number_t hwirq);
+extern unsigned int irq_linear_revmap(struct irq_domain *host,
+				      irq_hw_number_t hwirq);
 
-extern int irq_domain_add(struct irq_domain *domain);
-extern void irq_domain_del(struct irq_domain *domain);
-extern void irq_domain_register(struct irq_domain *domain);
-extern void irq_domain_register_irq(struct irq_domain *domain, int hwirq);
-extern void irq_domain_unregister(struct irq_domain *domain);
-extern void irq_domain_unregister_irq(struct irq_domain *domain, int hwirq);
-extern int irq_domain_find_free_range(unsigned int from, unsigned int cnt);
+extern const struct irq_domain_ops irq_domain_simple_ops;
 
-extern struct irq_domain_ops irq_domain_simple_ops;
-#endif /* CONFIG_IRQ_DOMAIN */
+/* stock xlate functions */
+int irq_domain_xlate_onecell(struct irq_domain *d, struct device_node *ctrlr,
+			const u32 *intspec, unsigned int intsize,
+			irq_hw_number_t *out_hwirq, unsigned int *out_type);
+int irq_domain_xlate_twocell(struct irq_domain *d, struct device_node *ctrlr,
+			const u32 *intspec, unsigned int intsize,
+			irq_hw_number_t *out_hwirq, unsigned int *out_type);
+int irq_domain_xlate_onetwocell(struct irq_domain *d, struct device_node *ctrlr,
+			const u32 *intspec, unsigned int intsize,
+			irq_hw_number_t *out_hwirq, unsigned int *out_type);
 
-#if defined(CONFIG_IRQ_DOMAIN) && defined(CONFIG_OF_IRQ)
-extern void irq_domain_add_simple(struct device_node *controller, int irq_base);
+#if defined(CONFIG_OF_IRQ)
 extern void irq_domain_generate_simple(const struct of_device_id *match,
 					u64 phys_base, unsigned int irq_start);
-#else /* CONFIG_IRQ_DOMAIN && CONFIG_OF_IRQ */
+#else /* CONFIG_OF_IRQ */
 static inline void irq_domain_generate_simple(const struct of_device_id *match,
 					u64 phys_base, unsigned int irq_start) { }
-#endif /* CONFIG_IRQ_DOMAIN && CONFIG_OF_IRQ */
+#endif /* !CONFIG_OF_IRQ */
+
+#else /* CONFIG_IRQ_DOMAIN */
+static inline void irq_dispose_mapping(unsigned int virq) { }
+#endif /* !CONFIG_IRQ_DOMAIN */
 
 #endif /* _LINUX_IRQDOMAIN_H */
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index 0aa96d3..0e0ba5f 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -1,312 +1,780 @@
+#include <linux/debugfs.h>
+#include <linux/hardirq.h>
+#include <linux/interrupt.h>
 #include <linux/irq.h>
+#include <linux/irqdesc.h>
 #include <linux/irqdomain.h>
 #include <linux/module.h>
 #include <linux/mutex.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
+#include <linux/seq_file.h>
 #include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/fs.h>
+
+#define IRQ_DOMAIN_MAP_LEGACY 0 /* driver allocated fixed range of irqs.
+				 * ie. legacy 8259, gets irqs 1..15 */
+#define IRQ_DOMAIN_MAP_NOMAP 1 /* no fast reverse mapping */
+#define IRQ_DOMAIN_MAP_LINEAR 2 /* linear map of interrupts */
+#define IRQ_DOMAIN_MAP_TREE 3 /* radix tree */
 
 static LIST_HEAD(irq_domain_list);
 static DEFINE_MUTEX(irq_domain_mutex);
 
-/**
- * irq_domain_add() - Register an irq_domain
- * @domain: ptr to initialized irq_domain structure
- *
- * Adds a irq_domain structure.  The irq_domain must at a minimum be
- * initialized with an ops structure pointer, and either a ->to_irq hook or
- * a valid irq_base value.  The irq range must be mutually exclusive with
- * domains already registered. Everything else is optional.
- */
-int irq_domain_add(struct irq_domain *domain)
-{
-	struct irq_domain *curr;
-	uint32_t d_highirq = domain->irq_base + domain->nr_irq - 1;
+static DEFINE_MUTEX(revmap_trees_mutex);
+static struct irq_domain *irq_default_domain;
 
-	if (!domain->nr_irq)
-		return -EINVAL;
+/**
+ * irq_domain_alloc() - Allocate a new irq_domain data structure
+ * @of_node: optional device-tree node of the interrupt controller
+ * @revmap_type: type of reverse mapping to use
+ * @ops: map/unmap domain callbacks
+ * @host_data: Controller private data pointer
+ *
+ * Allocates and initialize and irq_domain structure.  Caller is expected to
+ * register allocated irq_domain with irq_domain_register().  Returns pointer
+ * to IRQ domain, or NULL on failure.
+ */
+static struct irq_domain *irq_domain_alloc(struct device_node *of_node,
+					   unsigned int revmap_type,
+					   const struct irq_domain_ops *ops,
+					   void *host_data)
+{
+	struct irq_domain *domain;
+
+	domain = kzalloc(sizeof(*domain), GFP_KERNEL);
+	if (WARN_ON(!domain))
+		return NULL;
+
+	/* Fill structure */
+	domain->revmap_type = revmap_type;
+	domain->ops = ops;
+	domain->host_data = host_data;
+	domain->of_node = of_node_get(of_node);
+
+	return domain;
+}
+
+static void irq_domain_add(struct irq_domain *domain)
+{
+	mutex_lock(&irq_domain_mutex);
+	list_add(&domain->link, &irq_domain_list);
+	mutex_unlock(&irq_domain_mutex);
+	pr_debug("irq: Allocated domain of type %d @0x%p\n",
+		 domain->revmap_type, domain);
+}
+
+static unsigned int irq_domain_legacy_revmap(struct irq_domain *domain,
+					     irq_hw_number_t hwirq)
+{
+	irq_hw_number_t first_hwirq = domain->revmap_data.legacy.first_hwirq;
+	int size = domain->revmap_data.legacy.size;
+
+	if (WARN_ON(hwirq < first_hwirq || hwirq >= first_hwirq + size))
+		return 0;
+	return hwirq - first_hwirq + domain->revmap_data.legacy.first_irq;
+}
+
+/**
+ * irq_domain_add_legacy() - Allocate and register a legacy revmap irq_domain.
+ * @of_node: pointer to interrupt controller's device tree node.
+ * @size: total number of irqs in legacy mapping
+ * @first_irq: first number of irq block assigned to the domain
+ * @first_hwirq: first hwirq number to use for the translation. Should normally
+ *               be '0', but a positive integer can be used if the effective
+ *               hwirqs numbering does not begin at zero.
+ * @ops: map/unmap domain callbacks
+ * @host_data: Controller private data pointer
+ *
+ * Note: the map() callback will be called before this function returns
+ * for all legacy interrupts except 0 (which is always the invalid irq for
+ * a legacy controller).
+ */
+struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
+					 unsigned int size,
+					 unsigned int first_irq,
+					 irq_hw_number_t first_hwirq,
+					 const struct irq_domain_ops *ops,
+					 void *host_data)
+{
+	struct irq_domain *domain;
+	unsigned int i;
+
+	domain = irq_domain_alloc(of_node, IRQ_DOMAIN_MAP_LEGACY, ops, host_data);
+	if (!domain)
+		return NULL;
+
+	domain->revmap_data.legacy.first_irq = first_irq;
+	domain->revmap_data.legacy.first_hwirq = first_hwirq;
+	domain->revmap_data.legacy.size = size;
 
 	mutex_lock(&irq_domain_mutex);
-	/* insert in ascending order of domain->irq_base */
-	list_for_each_entry(curr, &irq_domain_list, list) {
-		uint32_t c_highirq = curr->irq_base + curr->nr_irq - 1;
-		if (domain->irq_base < curr->irq_base &&
-		    d_highirq < curr->irq_base) {
-			break;
-		}
-		if (d_highirq <= c_highirq) {
+	/* Verify that all the irqs are available */
+	for (i = 0; i < size; i++) {
+		int irq = first_irq + i;
+		struct irq_data *irq_data = irq_get_irq_data(irq);
+
+		if (WARN_ON(!irq_data || irq_data->domain)) {
 			mutex_unlock(&irq_domain_mutex);
-			return -EINVAL;
+			of_node_put(domain->of_node);
+			kfree(domain);
+			return NULL;
 		}
 	}
-	list_add_tail(&domain->list, &curr->list);
+
+	/* Claim all of the irqs before registering a legacy domain */
+	for (i = 0; i < size; i++) {
+		struct irq_data *irq_data = irq_get_irq_data(first_irq + i);
+		irq_data->hwirq = first_hwirq + i;
+		irq_data->domain = domain;
+	}
 	mutex_unlock(&irq_domain_mutex);
 
+	for (i = 0; i < size; i++) {
+		int irq = first_irq + i;
+		int hwirq = first_hwirq + i;
+
+		/* IRQ0 gets ignored */
+		if (!irq)
+			continue;
+
+		/* Legacy flags are left to default at this point,
+		 * one can then use irq_create_mapping() to
+		 * explicitly change them
+		 */
+		ops->map(domain, irq, hwirq);
+
+		/* Clear norequest flags */
+		irq_clear_status_flags(irq, IRQ_NOREQUEST);
+	}
+
+	irq_domain_add(domain);
+	return domain;
+}
+
+/**
+ * irq_domain_add_linear() - Allocate and register a legacy revmap irq_domain.
+ * @of_node: pointer to interrupt controller's device tree node.
+ * @ops: map/unmap domain callbacks
+ * @host_data: Controller private data pointer
+ */
+struct irq_domain *irq_domain_add_linear(struct device_node *of_node,
+					 unsigned int size,
+					 const struct irq_domain_ops *ops,
+					 void *host_data)
+{
+	struct irq_domain *domain;
+	unsigned int *revmap;
+
+	revmap = kzalloc(sizeof(*revmap) * size, GFP_KERNEL);
+	if (WARN_ON(!revmap))
+		return NULL;
+
+	domain = irq_domain_alloc(of_node, IRQ_DOMAIN_MAP_LINEAR, ops, host_data);
+	if (!domain) {
+		kfree(revmap);
+		return NULL;
+	}
+	domain->revmap_data.linear.size = size;
+	domain->revmap_data.linear.revmap = revmap;
+	irq_domain_add(domain);
+	return domain;
+}
+
+struct irq_domain *irq_domain_add_nomap(struct device_node *of_node,
+					 unsigned int max_irq,
+					 const struct irq_domain_ops *ops,
+					 void *host_data)
+{
+	struct irq_domain *domain = irq_domain_alloc(of_node,
+					IRQ_DOMAIN_MAP_NOMAP, ops, host_data);
+	if (domain) {
+		domain->revmap_data.nomap.max_irq = max_irq ? max_irq : ~0;
+		irq_domain_add(domain);
+	}
+	return domain;
+}
+
+/**
+ * irq_domain_add_tree()
+ * @of_node: pointer to interrupt controller's device tree node.
+ * @ops: map/unmap domain callbacks
+ *
+ * Note: The radix tree will be allocated later during boot automatically
+ * (the reverse mapping will use the slow path until that happens).
+ */
+struct irq_domain *irq_domain_add_tree(struct device_node *of_node,
+					 const struct irq_domain_ops *ops,
+					 void *host_data)
+{
+	struct irq_domain *domain = irq_domain_alloc(of_node,
+					IRQ_DOMAIN_MAP_TREE, ops, host_data);
+	if (domain) {
+		INIT_RADIX_TREE(&domain->revmap_data.tree, GFP_KERNEL);
+		irq_domain_add(domain);
+	}
+	return domain;
+}
+
+/**
+ * irq_find_host() - Locates a domain for a given device node
+ * @node: device-tree node of the interrupt controller
+ */
+struct irq_domain *irq_find_host(struct device_node *node)
+{
+	struct irq_domain *h, *found = NULL;
+	int rc;
+
+	/* We might want to match the legacy controller last since
+	 * it might potentially be set to match all interrupts in
+	 * the absence of a device node. This isn't a problem so far
+	 * yet though...
+	 */
+	mutex_lock(&irq_domain_mutex);
+	list_for_each_entry(h, &irq_domain_list, link) {
+		if (h->ops->match)
+			rc = h->ops->match(h, node);
+		else
+			rc = (h->of_node != NULL) && (h->of_node == node);
+
+		if (rc) {
+			found = h;
+			break;
+		}
+	}
+	mutex_unlock(&irq_domain_mutex);
+	return found;
+}
+EXPORT_SYMBOL_GPL(irq_find_host);
+
+/**
+ * irq_set_default_host() - Set a "default" irq domain
+ * @domain: default domain pointer
+ *
+ * For convenience, it's possible to set a "default" domain that will be used
+ * whenever NULL is passed to irq_create_mapping(). It makes life easier for
+ * platforms that want to manipulate a few hard coded interrupt numbers that
+ * aren't properly represented in the device-tree.
+ */
+void irq_set_default_host(struct irq_domain *domain)
+{
+	pr_debug("irq: Default domain set to @0x%p\n", domain);
+
+	irq_default_domain = domain;
+}
+
+static int irq_setup_virq(struct irq_domain *domain, unsigned int virq,
+			    irq_hw_number_t hwirq)
+{
+	struct irq_data *irq_data = irq_get_irq_data(virq);
+
+	irq_data->hwirq = hwirq;
+	irq_data->domain = domain;
+	if (domain->ops->map(domain, virq, hwirq)) {
+		pr_debug("irq: -> mapping failed, freeing\n");
+		irq_data->domain = NULL;
+		irq_data->hwirq = 0;
+		return -1;
+	}
+
+	irq_clear_status_flags(virq, IRQ_NOREQUEST);
+
 	return 0;
 }
 
 /**
- * irq_domain_register() - Register an entire irq_domain
- * @domain: ptr to initialized irq_domain structure
+ * irq_create_direct_mapping() - Allocate an irq for direct mapping
+ * @domain: domain to allocate the irq for or NULL for default domain
  *
- * Registers the entire irq_domain.  The irq_domain must at a minimum be
- * initialized with an ops structure pointer, and either a ->to_irq hook or
- * a valid irq_base value.  Everything else is optional.
+ * This routine is used for irq controllers which can choose the hardware
+ * interrupt numbers they generate. In such a case it's simplest to use
+ * the linux irq as the hardware interrupt number.
  */
-void irq_domain_register(struct irq_domain *domain)
+unsigned int irq_create_direct_mapping(struct irq_domain *domain)
 {
-	struct irq_data *d;
-	int hwirq, irq;
+	unsigned int virq;
 
-	irq_domain_for_each_irq(domain, hwirq, irq) {
-		d = irq_get_irq_data(irq);
-		if (!d) {
-			WARN(1, "error: assigning domain to non existant irq_desc");
-			return;
-		}
-		if (d->domain) {
-			/* things are broken; just report, don't clean up */
-			WARN(1, "error: irq_desc already assigned to a domain");
-			return;
-		}
-		d->domain = domain;
-		d->hwirq = hwirq;
+	if (domain == NULL)
+		domain = irq_default_domain;
+
+	BUG_ON(domain == NULL);
+	WARN_ON(domain->revmap_type != IRQ_DOMAIN_MAP_NOMAP);
+
+	virq = irq_alloc_desc_from(1, 0);
+	if (!virq) {
+		pr_debug("irq: create_direct virq allocation failed\n");
+		return 0;
 	}
+	if (virq >= domain->revmap_data.nomap.max_irq) {
+		pr_err("ERROR: no free irqs available below %i maximum\n",
+			domain->revmap_data.nomap.max_irq);
+		irq_free_desc(virq);
+		return 0;
+	}
+	pr_debug("irq: create_direct obtained virq %d\n", virq);
+
+	if (irq_setup_virq(domain, virq, virq)) {
+		irq_free_desc(virq);
+		return 0;
+	}
+
+	return virq;
 }
 
 /**
- * irq_domain_register_irq() - Register an irq_domain
- * @domain: ptr to initialized irq_domain structure
- * @hwirq: irq_domain hwirq to register
+ * irq_create_mapping() - Map a hardware interrupt into linux irq space
+ * @domain: domain owning this hardware interrupt or NULL for default domain
+ * @hwirq: hardware irq number in that domain space
  *
- * Registers a specific hwirq within the irq_domain.  The irq_domain
- * must at a minimum be initialized with an ops structure pointer, and
- * either a ->to_irq hook or a valid irq_base value.  Everything else is
- * optional.
+ * Only one mapping per hardware interrupt is permitted. Returns a linux
+ * irq number.
+ * If the sense/trigger is to be specified, set_irq_type() should be called
+ * on the number returned from that call.
  */
-void irq_domain_register_irq(struct irq_domain *domain, int hwirq)
+unsigned int irq_create_mapping(struct irq_domain *domain,
+				irq_hw_number_t hwirq)
 {
-	struct irq_data *d;
+	unsigned int hint;
+	int virq;
 
-	d = irq_get_irq_data(irq_domain_to_irq(domain, hwirq));
-	if (!d) {
-		WARN(1, "error: assigning domain to non existant irq_desc");
-		return;
+	pr_debug("irq: irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq);
+
+	/* Look for default domain if nececssary */
+	if (domain == NULL)
+		domain = irq_default_domain;
+	if (domain == NULL) {
+		printk(KERN_WARNING "irq_create_mapping called for"
+		       " NULL domain, hwirq=%lx\n", hwirq);
+		WARN_ON(1);
+		return 0;
 	}
-	if (d->domain) {
-		/* things are broken; just report, don't clean up */
-		WARN(1, "error: irq_desc already assigned to a domain");
-		return;
+	pr_debug("irq: -> using domain @%p\n", domain);
+
+	/* Check if mapping already exists */
+	virq = irq_find_mapping(domain, hwirq);
+	if (virq) {
+		pr_debug("irq: -> existing mapping on virq %d\n", virq);
+		return virq;
 	}
-	d->domain = domain;
-	d->hwirq = hwirq;
-}
 
-/**
- * irq_domain_del() - Removes a irq_domain from the system
- * @domain: ptr to registered irq_domain.
- */
-void irq_domain_del(struct irq_domain *domain)
-{
-	mutex_lock(&irq_domain_mutex);
-	list_del(&domain->list);
-	mutex_unlock(&irq_domain_mutex);
-}
+	/* Get a virtual interrupt number */
+	if (domain->revmap_type == IRQ_DOMAIN_MAP_LEGACY)
+		return irq_domain_legacy_revmap(domain, hwirq);
 
-/**
- * irq_domain_unregister() - Unregister an irq_domain
- * @domain: ptr to registered irq_domain.
- */
-void irq_domain_unregister(struct irq_domain *domain)
-{
-	struct irq_data *d;
-	int hwirq, irq;
-
-	/* Clear the irq_domain assignments */
-	irq_domain_for_each_irq(domain, hwirq, irq) {
-		d = irq_get_irq_data(irq);
-		d->domain = NULL;
+	/* Allocate a virtual interrupt number */
+	hint = hwirq % nr_irqs;
+	if (hint == 0)
+		hint++;
+	virq = irq_alloc_desc_from(hint, 0);
+	if (virq <= 0)
+		virq = irq_alloc_desc_from(1, 0);
+	if (virq <= 0) {
+		pr_debug("irq: -> virq allocation failed\n");
+		return 0;
 	}
-}
 
-/**
- * irq_domain_unregister_irq() - Unregister a hwirq within a irq_domain
- * @domain: ptr to registered irq_domain.
- * @hwirq: irq_domain hwirq to unregister.
- */
-void irq_domain_unregister_irq(struct irq_domain *domain, int hwirq)
-{
-	struct irq_data *d;
-
-	/* Clear the irq_domain assignment */
-	d = irq_get_irq_data(irq_domain_to_irq(domain, hwirq));
-	d->domain = NULL;
-}
-
-/**
- * irq_domain_find_free_range() - Find an available irq range
- * @from: lowest logical irq number to request from
- * @cnt: number of interrupts to search for
- *
- * Finds an available logical irq range from the domains specified
- * on the system. The from parameter can be used to allocate a range
- * at least as great as the specified irq number.
- */
-int irq_domain_find_free_range(unsigned int from, unsigned int cnt)
-{
-	struct irq_domain *curr, *prev = NULL;
-
-	if (list_empty(&irq_domain_list))
-		return from;
-
-	list_for_each_entry(curr, &irq_domain_list, list) {
-		if (prev == NULL) {
-			if ((from + cnt - 1) < curr->irq_base)
-				return from;
-		} else {
-			uint32_t p_next_irq = prev->irq_base + prev->nr_irq;
-			uint32_t start_irq;
-			if (from >= curr->irq_base)
-				continue;
-			if (from < p_next_irq)
-				start_irq = p_next_irq;
-			else
-				start_irq = from;
-			if ((curr->irq_base - start_irq) >= cnt)
-				return p_next_irq;
-		}
-		prev = curr;
+	if (irq_setup_virq(domain, virq, hwirq)) {
+		if (domain->revmap_type != IRQ_DOMAIN_MAP_LEGACY)
+			irq_free_desc(virq);
+		return 0;
 	}
-	curr = list_entry(curr->list.prev, struct irq_domain, list);
 
-	return from > curr->irq_base + curr->nr_irq ?
-	       from : curr->irq_base + curr->nr_irq;
+	pr_debug("irq: irq %lu on domain %s mapped to virtual irq %u\n",
+		hwirq, domain->of_node ? domain->of_node->full_name : "null", virq);
+
+	return virq;
 }
+EXPORT_SYMBOL_GPL(irq_create_mapping);
 
-#if defined(CONFIG_OF_IRQ)
-/**
- * irq_create_of_mapping() - Map a linux irq number from a DT interrupt spec
- *
- * Used by the device tree interrupt mapping code to translate a device tree
- * interrupt specifier to a valid linux irq number.  Returns either a valid
- * linux IRQ number or 0.
- *
- * When the caller no longer need the irq number returned by this function it
- * should arrange to call irq_dispose_mapping().
- */
 unsigned int irq_create_of_mapping(struct device_node *controller,
 				   const u32 *intspec, unsigned int intsize)
 {
 	struct irq_domain *domain;
-	unsigned long hwirq;
-	unsigned int irq, type;
-	int rc = -EINVAL;
+	irq_hw_number_t hwirq;
+	unsigned int type = IRQ_TYPE_NONE;
+	unsigned int virq;
 
-	/* Find a domain which can translate the irq spec */
-	mutex_lock(&irq_domain_mutex);
-	list_for_each_entry(domain, &irq_domain_list, list) {
-		if (!domain->ops->dt_translate)
-			continue;
-
-		rc = domain->ops->dt_translate(domain, controller,
-					intspec, intsize, &hwirq, &type);
-		if (rc == 0)
-			break;
-	}
-	mutex_unlock(&irq_domain_mutex);
-
-	if (rc != 0)
+	domain = controller ? irq_find_host(controller) : irq_default_domain;
+	if (!domain) {
+#ifdef CONFIG_MIPS
+		/*
+		 * Workaround to avoid breaking interrupt controller drivers
+		 * that don't yet register an irq_domain.  This is temporary
+		 * code. ~~~gcl, Feb 24, 2012
+		 *
+		 * Scheduled for removal in Linux v3.6.  That should be enough
+		 * time.
+		 */
+		if (intsize > 0)
+			return intspec[0];
+#endif
+		printk(KERN_WARNING "irq: no irq domain found for %s !\n",
+		       controller->full_name);
 		return 0;
+	}
 
-	irq = irq_domain_to_irq(domain, hwirq);
-	if (type != IRQ_TYPE_NONE)
-		irq_set_irq_type(irq, type);
-	pr_debug("%s: mapped hwirq=%i to irq=%i, flags=%x\n",
-		 controller->full_name, (int)hwirq, irq, type);
-	return irq;
+	/* If domain has no translation, then we assume interrupt line */
+	if (domain->ops->xlate == NULL)
+		hwirq = intspec[0];
+	else {
+		if (domain->ops->xlate(domain, controller, intspec, intsize,
+				     &hwirq, &type))
+			return 0;
+	}
+
+	/* Create mapping */
+	virq = irq_create_mapping(domain, hwirq);
+	if (!virq)
+		return virq;
+
+	/* Set type if specified and different than the current one */
+	if (type != IRQ_TYPE_NONE &&
+	    type != (irqd_get_trigger_type(irq_get_irq_data(virq))))
+		irq_set_irq_type(virq, type);
+	return virq;
 }
 EXPORT_SYMBOL_GPL(irq_create_of_mapping);
 
 /**
- * irq_dispose_mapping() - Discard a mapping created by irq_create_of_mapping()
- * @irq: linux irq number to be discarded
- *
- * Calling this function indicates the caller no longer needs a reference to
- * the linux irq number returned by a prior call to irq_create_of_mapping().
+ * irq_dispose_mapping() - Unmap an interrupt
+ * @virq: linux irq number of the interrupt to unmap
  */
-void irq_dispose_mapping(unsigned int irq)
+void irq_dispose_mapping(unsigned int virq)
 {
-	/*
-	 * nothing yet; will be filled when support for dynamic allocation of
-	 * irq_descs is added to irq_domain
-	 */
+	struct irq_data *irq_data = irq_get_irq_data(virq);
+	struct irq_domain *domain;
+	irq_hw_number_t hwirq;
+
+	if (!virq || !irq_data)
+		return;
+
+	domain = irq_data->domain;
+	if (WARN_ON(domain == NULL))
+		return;
+
+	/* Never unmap legacy interrupts */
+	if (domain->revmap_type == IRQ_DOMAIN_MAP_LEGACY)
+		return;
+
+	irq_set_status_flags(virq, IRQ_NOREQUEST);
+
+	/* remove chip and handler */
+	irq_set_chip_and_handler(virq, NULL, NULL);
+
+	/* Make sure it's completed */
+	synchronize_irq(virq);
+
+	/* Tell the PIC about it */
+	if (domain->ops->unmap)
+		domain->ops->unmap(domain, virq);
+	smp_mb();
+
+	/* Clear reverse map */
+	hwirq = irq_data->hwirq;
+	switch(domain->revmap_type) {
+	case IRQ_DOMAIN_MAP_LINEAR:
+		if (hwirq < domain->revmap_data.linear.size)
+			domain->revmap_data.linear.revmap[hwirq] = 0;
+		break;
+	case IRQ_DOMAIN_MAP_TREE:
+		mutex_lock(&revmap_trees_mutex);
+		radix_tree_delete(&domain->revmap_data.tree, hwirq);
+		mutex_unlock(&revmap_trees_mutex);
+		break;
+	}
+
+	irq_free_desc(virq);
 }
 EXPORT_SYMBOL_GPL(irq_dispose_mapping);
 
-int irq_domain_simple_dt_translate(struct irq_domain *d,
-			    struct device_node *controller,
-			    const u32 *intspec, unsigned int intsize,
-			    unsigned long *out_hwirq, unsigned int *out_type)
+/**
+ * irq_find_mapping() - Find a linux irq from an hw irq number.
+ * @domain: domain owning this hardware interrupt
+ * @hwirq: hardware irq number in that domain space
+ *
+ * This is a slow path, for use by generic code. It's expected that an
+ * irq controller implementation directly calls the appropriate low level
+ * mapping function.
+ */
+unsigned int irq_find_mapping(struct irq_domain *domain,
+			      irq_hw_number_t hwirq)
 {
-	if (d->of_node != controller)
-		return -EINVAL;
-	if (intsize < 1)
-		return -EINVAL;
-	if (d->nr_irq && ((intspec[0] < d->hwirq_base) ||
-	    (intspec[0] >= d->hwirq_base + d->nr_irq)))
-		return -EINVAL;
+	unsigned int i;
+	unsigned int hint = hwirq % nr_irqs;
 
-	*out_hwirq = intspec[0];
-	*out_type = IRQ_TYPE_NONE;
-	if (intsize > 1)
-		*out_type = intspec[1] & IRQ_TYPE_SENSE_MASK;
+	/* Look for default domain if nececssary */
+	if (domain == NULL)
+		domain = irq_default_domain;
+	if (domain == NULL)
+		return 0;
+
+	/* legacy -> bail early */
+	if (domain->revmap_type == IRQ_DOMAIN_MAP_LEGACY)
+		return irq_domain_legacy_revmap(domain, hwirq);
+
+	/* Slow path does a linear search of the map */
+	if (hint == 0)
+		hint = 1;
+	i = hint;
+	do {
+		struct irq_data *data = irq_get_irq_data(i);
+		if (data && (data->domain == domain) && (data->hwirq == hwirq))
+			return i;
+		i++;
+		if (i >= nr_irqs)
+			i = 1;
+	} while(i != hint);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(irq_find_mapping);
+
+/**
+ * irq_radix_revmap_lookup() - Find a linux irq from a hw irq number.
+ * @domain: domain owning this hardware interrupt
+ * @hwirq: hardware irq number in that domain space
+ *
+ * This is a fast path, for use by irq controller code that uses radix tree
+ * revmaps
+ */
+unsigned int irq_radix_revmap_lookup(struct irq_domain *domain,
+				     irq_hw_number_t hwirq)
+{
+	struct irq_data *irq_data;
+
+	if (WARN_ON_ONCE(domain->revmap_type != IRQ_DOMAIN_MAP_TREE))
+		return irq_find_mapping(domain, hwirq);
+
+	/*
+	 * Freeing an irq can delete nodes along the path to
+	 * do the lookup via call_rcu.
+	 */
+	rcu_read_lock();
+	irq_data = radix_tree_lookup(&domain->revmap_data.tree, hwirq);
+	rcu_read_unlock();
+
+	/*
+	 * If found in radix tree, then fine.
+	 * Else fallback to linear lookup - this should not happen in practice
+	 * as it means that we failed to insert the node in the radix tree.
+	 */
+	return irq_data ? irq_data->irq : irq_find_mapping(domain, hwirq);
+}
+
+/**
+ * irq_radix_revmap_insert() - Insert a hw irq to linux irq number mapping.
+ * @domain: domain owning this hardware interrupt
+ * @virq: linux irq number
+ * @hwirq: hardware irq number in that domain space
+ *
+ * This is for use by irq controllers that use a radix tree reverse
+ * mapping for fast lookup.
+ */
+void irq_radix_revmap_insert(struct irq_domain *domain, unsigned int virq,
+			     irq_hw_number_t hwirq)
+{
+	struct irq_data *irq_data = irq_get_irq_data(virq);
+
+	if (WARN_ON(domain->revmap_type != IRQ_DOMAIN_MAP_TREE))
+		return;
+
+	if (virq) {
+		mutex_lock(&revmap_trees_mutex);
+		radix_tree_insert(&domain->revmap_data.tree, hwirq, irq_data);
+		mutex_unlock(&revmap_trees_mutex);
+	}
+}
+
+/**
+ * irq_linear_revmap() - Find a linux irq from a hw irq number.
+ * @domain: domain owning this hardware interrupt
+ * @hwirq: hardware irq number in that domain space
+ *
+ * This is a fast path, for use by irq controller code that uses linear
+ * revmaps. It does fallback to the slow path if the revmap doesn't exist
+ * yet and will create the revmap entry with appropriate locking
+ */
+unsigned int irq_linear_revmap(struct irq_domain *domain,
+			       irq_hw_number_t hwirq)
+{
+	unsigned int *revmap;
+
+	if (WARN_ON_ONCE(domain->revmap_type != IRQ_DOMAIN_MAP_LINEAR))
+		return irq_find_mapping(domain, hwirq);
+
+	/* Check revmap bounds */
+	if (unlikely(hwirq >= domain->revmap_data.linear.size))
+		return irq_find_mapping(domain, hwirq);
+
+	/* Check if revmap was allocated */
+	revmap = domain->revmap_data.linear.revmap;
+	if (unlikely(revmap == NULL))
+		return irq_find_mapping(domain, hwirq);
+
+	/* Fill up revmap with slow path if no mapping found */
+	if (unlikely(!revmap[hwirq]))
+		revmap[hwirq] = irq_find_mapping(domain, hwirq);
+
+	return revmap[hwirq];
+}
+
+#ifdef CONFIG_IRQ_DOMAIN_DEBUG
+static int virq_debug_show(struct seq_file *m, void *private)
+{
+	unsigned long flags;
+	struct irq_desc *desc;
+	const char *p;
+	static const char none[] = "none";
+	void *data;
+	int i;
+
+	seq_printf(m, "%-5s  %-7s  %-15s  %-*s  %s\n", "irq", "hwirq",
+		      "chip name", (int)(2 * sizeof(void *) + 2), "chip data",
+		      "domain name");
+
+	for (i = 1; i < nr_irqs; i++) {
+		desc = irq_to_desc(i);
+		if (!desc)
+			continue;
+
+		raw_spin_lock_irqsave(&desc->lock, flags);
+
+		if (desc->action && desc->action->handler) {
+			struct irq_chip *chip;
+
+			seq_printf(m, "%5d  ", i);
+			seq_printf(m, "0x%05lx  ", desc->irq_data.hwirq);
+
+			chip = irq_desc_get_chip(desc);
+			if (chip && chip->name)
+				p = chip->name;
+			else
+				p = none;
+			seq_printf(m, "%-15s  ", p);
+
+			data = irq_desc_get_chip_data(desc);
+			seq_printf(m, data ? "0x%p  " : "  %p  ", data);
+
+			if (desc->irq_data.domain && desc->irq_data.domain->of_node)
+				p = desc->irq_data.domain->of_node->full_name;
+			else
+				p = none;
+			seq_printf(m, "%s\n", p);
+		}
+
+		raw_spin_unlock_irqrestore(&desc->lock, flags);
+	}
+
+	return 0;
+}
+
+static int virq_debug_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, virq_debug_show, inode->i_private);
+}
+
+static const struct file_operations virq_debug_fops = {
+	.open = virq_debug_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static int __init irq_debugfs_init(void)
+{
+	if (debugfs_create_file("irq_domain_mapping", S_IRUGO, NULL,
+				 NULL, &virq_debug_fops) == NULL)
+		return -ENOMEM;
+
+	return 0;
+}
+__initcall(irq_debugfs_init);
+#endif /* CONFIG_IRQ_DOMAIN_DEBUG */
+
+int irq_domain_simple_map(struct irq_domain *d, unsigned int irq,
+			  irq_hw_number_t hwirq)
+{
 	return 0;
 }
 
 /**
- * irq_domain_create_simple() - Set up a 'simple' translation range
+ * irq_domain_xlate_onecell() - Generic xlate for direct one cell bindings
+ *
+ * Device Tree IRQ specifier translation function which works with one cell
+ * bindings where the cell value maps directly to the hwirq number.
  */
-void irq_domain_add_simple(struct device_node *controller, int irq_base)
+int irq_domain_xlate_onecell(struct irq_domain *d, struct device_node *ctrlr,
+			     const u32 *intspec, unsigned int intsize,
+			     unsigned long *out_hwirq, unsigned int *out_type)
 {
-	struct irq_domain *domain;
-	int rc;
-
-	domain = kzalloc(sizeof(*domain), GFP_KERNEL);
-	if (!domain) {
-		WARN_ON(1);
-		return;
-	}
-
-	domain->irq_base = irq_base;
-	domain->of_node = of_node_get(controller);
-	domain->ops = &irq_domain_simple_ops;
-	rc = irq_domain_add(domain);
-	if (rc) {
-		WARN(1, "Unable to create irq domain\n");
-		return;
-	}
-	irq_domain_register(domain);
+	if (WARN_ON(intsize < 1))
+		return -EINVAL;
+	*out_hwirq = intspec[0];
+	*out_type = IRQ_TYPE_NONE;
+	return 0;
 }
-EXPORT_SYMBOL_GPL(irq_domain_add_simple);
+EXPORT_SYMBOL_GPL(irq_domain_xlate_onecell);
 
+/**
+ * irq_domain_xlate_twocell() - Generic xlate for direct two cell bindings
+ *
+ * Device Tree IRQ specifier translation function which works with two cell
+ * bindings where the cell values map directly to the hwirq number
+ * and linux irq flags.
+ */
+int irq_domain_xlate_twocell(struct irq_domain *d, struct device_node *ctrlr,
+			const u32 *intspec, unsigned int intsize,
+			irq_hw_number_t *out_hwirq, unsigned int *out_type)
+{
+	if (WARN_ON(intsize < 2))
+		return -EINVAL;
+	*out_hwirq = intspec[0];
+	*out_type = intspec[1] & IRQ_TYPE_SENSE_MASK;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(irq_domain_xlate_twocell);
+
+/**
+ * irq_domain_xlate_onetwocell() - Generic xlate for one or two cell bindings
+ *
+ * Device Tree IRQ specifier translation function which works with either one
+ * or two cell bindings where the cell values map directly to the hwirq number
+ * and linux irq flags.
+ *
+ * Note: don't use this function unless your interrupt controller explicitly
+ * supports both one and two cell bindings.  For the majority of controllers
+ * the _onecell() or _twocell() variants above should be used.
+ */
+int irq_domain_xlate_onetwocell(struct irq_domain *d,
+				struct device_node *ctrlr,
+				const u32 *intspec, unsigned int intsize,
+				unsigned long *out_hwirq, unsigned int *out_type)
+{
+	if (WARN_ON(intsize < 1))
+		return -EINVAL;
+	*out_hwirq = intspec[0];
+	*out_type = (intsize > 1) ? intspec[1] : IRQ_TYPE_NONE;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(irq_domain_xlate_onetwocell);
+
+const struct irq_domain_ops irq_domain_simple_ops = {
+	.map = irq_domain_simple_map,
+	.xlate = irq_domain_xlate_onetwocell,
+};
+EXPORT_SYMBOL_GPL(irq_domain_simple_ops);
+
+#ifdef CONFIG_OF_IRQ
 void irq_domain_generate_simple(const struct of_device_id *match,
 				u64 phys_base, unsigned int irq_start)
 {
 	struct device_node *node;
-	pr_info("looking for phys_base=%llx, irq_start=%i\n",
+	pr_debug("looking for phys_base=%llx, irq_start=%i\n",
 		(unsigned long long) phys_base, (int) irq_start);
 	node = of_find_matching_node_by_address(NULL, match, phys_base);
 	if (node)
-		irq_domain_add_simple(node, irq_start);
-	else
-		pr_info("no node found\n");
+		irq_domain_add_legacy(node, 32, irq_start, 0,
+				      &irq_domain_simple_ops, NULL);
 }
 EXPORT_SYMBOL_GPL(irq_domain_generate_simple);
-#endif /* CONFIG_OF_IRQ */
-
-struct irq_domain_ops irq_domain_simple_ops = {
-#ifdef CONFIG_OF_IRQ
-	.dt_translate = irq_domain_simple_dt_translate,
-#endif /* CONFIG_OF_IRQ */
-};
-EXPORT_SYMBOL_GPL(irq_domain_simple_ops);
+#endif