Merge commit 'AU_LINUX_ANDROID_ICS.04.00.04.00.126' into msm-3.4

AU_LINUX_ANDROID_ICS.04.00.04.00.126 from msm-3.0.
First parent is from google/android-3.4.

* commit 'AU_LINUX_ANDROID_ICS.04.00.04.00.126': (8712 commits)
  PRNG: Device tree entry for qrng device.
  vidc:1080p: Set video core timeout value for Thumbnail mode
  msm: sps: improve the debugging support in SPS driver
  board-8064 msm: Overlap secure and non secure video firmware heaps.
  msm: clock: Add handoff ops for 7x30 and copper XO clocks
  msm_fb: display: Wait for external vsync before DTV IOMMU unmap
  msm: Fix ciruclar dependency in debug UART settings
  msm: gdsc: Add GDSC regulator driver for msm-copper
  defconfig: Enable Mobicore Driver.
  mobicore: Add mobicore driver.
  mobicore: rename variable to lower case.
  mobicore: rename folder.
  mobicore: add makefiles
  mobicore: initial import of kernel driver
  ASoC: msm: Add SLIMBUS_2_RX CPU DAI
  board-8064-gpio: Update FUNC for EPM SPI CS
  msm_fb: display: Remove chicken bit config during video playback
  mmc: msm_sdcc: enable the sanitize capability
  msm-fb: display: lm2 writeback support on mpq platfroms
  msm_fb: display: Disable LVDS phy & pll during panel off
  ...

Signed-off-by: Steve Muckle <smuckle@codeaurora.org>
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c
index aa52699..54a8392 100644
--- a/arch/arm/common/gic.c
+++ b/arch/arm/common/gic.c
@@ -38,12 +38,14 @@
 #include <linux/interrupt.h>
 #include <linux/percpu.h>
 #include <linux/slab.h>
+#include <linux/syscore_ops.h>
 
 #include <asm/irq.h>
 #include <asm/exception.h>
 #include <asm/smp_plat.h>
 #include <asm/mach/irq.h>
 #include <asm/hardware/gic.h>
+#include <asm/system.h>
 
 union gic_base {
 	void __iomem *common_base;
@@ -51,24 +53,37 @@
 };
 
 struct gic_chip_data {
+	unsigned int irq_offset;
 	union gic_base dist_base;
 	union gic_base cpu_base;
 #ifdef CONFIG_CPU_PM
 	u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)];
 	u32 saved_spi_conf[DIV_ROUND_UP(1020, 16)];
 	u32 saved_spi_target[DIV_ROUND_UP(1020, 4)];
+	u32 saved_dist_pri[DIV_ROUND_UP(1020, 4)];
 	u32 __percpu *saved_ppi_enable;
 	u32 __percpu *saved_ppi_conf;
 #endif
-	struct irq_domain *domain;
+#ifdef CONFIG_IRQ_DOMAIN
+	struct irq_domain domain;
+#endif
 	unsigned int gic_irqs;
 #ifdef CONFIG_GIC_NON_BANKED
 	void __iomem *(*get_base)(union gic_base *);
 #endif
+	unsigned int max_irq;
+#ifdef CONFIG_PM
+	unsigned int wakeup_irqs[32];
+	unsigned int enabled_irqs[32];
+#endif
 };
 
 static DEFINE_RAW_SPINLOCK(irq_controller_lock);
 
+#ifdef CONFIG_CPU_PM
+static unsigned int saved_dist_ctrl, saved_cpu_ctrl;
+#endif
+
 /*
  * Supported arch specific GIC irq extension.
  * Default make them NULL.
@@ -80,6 +95,7 @@
 	.irq_retrigger	= NULL,
 	.irq_set_type	= NULL,
 	.irq_set_wake	= NULL,
+	.irq_disable	= NULL,
 };
 
 #ifndef MAX_GIC_NR
@@ -137,6 +153,26 @@
 	return d->hwirq;
 }
 
+#if defined(CONFIG_CPU_V7) && defined(CONFIG_GIC_SECURE)
+static const inline bool is_cpu_secure(void)
+{
+	unsigned int dscr;
+
+	asm volatile ("mrc p14, 0, %0, c0, c1, 0" : "=r" (dscr));
+
+	/* BIT(18) - NS bit; 1 = NS; 0 = S */
+	if (BIT(18) & dscr)
+		return false;
+	else
+		return true;
+}
+#else
+static const inline bool is_cpu_secure(void)
+{
+	return false;
+}
+#endif
+
 /*
  * Routines to acknowledge, disable and enable interrupts
  */
@@ -162,6 +198,132 @@
 	raw_spin_unlock(&irq_controller_lock);
 }
 
+static void gic_disable_irq(struct irq_data *d)
+{
+	if (gic_arch_extn.irq_disable)
+		gic_arch_extn.irq_disable(d);
+}
+
+#ifdef CONFIG_PM
+static int gic_suspend_one(struct gic_chip_data *gic)
+{
+	unsigned int i;
+	void __iomem *base = gic_data_dist_base(gic);
+#ifdef CONFIG_ARCH_MSM8625
+	unsigned long flags;
+#endif
+
+	for (i = 0; i * 32 < gic->max_irq; i++) {
+#ifdef CONFIG_ARCH_MSM8625
+		raw_spin_lock_irqsave(&irq_controller_lock, flags);
+#endif
+		gic->enabled_irqs[i]
+			= readl_relaxed(base + GIC_DIST_ENABLE_SET + i * 4);
+		/* disable all of them */
+		writel_relaxed(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4);
+		/* enable the wakeup set */
+		writel_relaxed(gic->wakeup_irqs[i],
+			base + GIC_DIST_ENABLE_SET + i * 4);
+#ifdef CONFIG_ARCH_MSM8625
+		raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
+#endif
+	}
+	mb();
+	return 0;
+}
+
+static int gic_suspend(void)
+{
+	int i;
+	for (i = 0; i < MAX_GIC_NR; i++)
+		gic_suspend_one(&gic_data[i]);
+	return 0;
+}
+
+extern int msm_show_resume_irq_mask;
+
+static void gic_show_resume_irq(struct gic_chip_data *gic)
+{
+	unsigned int i;
+	u32 enabled;
+	unsigned long pending[32];
+	void __iomem *base = gic_data_dist_base(gic);
+#ifdef CONFIG_ARCH_MSM8625
+	unsigned long flags;
+#endif
+
+	if (!msm_show_resume_irq_mask)
+		return;
+
+#ifdef CONFIG_ARCH_MSM8625
+	raw_spin_lock_irqsave(&irq_controller_lock, flags);
+#else
+	raw_spin_lock(&irq_controller_lock);
+#endif
+	for (i = 0; i * 32 < gic->max_irq; i++) {
+		enabled = readl_relaxed(base + GIC_DIST_ENABLE_CLEAR + i * 4);
+		pending[i] = readl_relaxed(base + GIC_DIST_PENDING_SET + i * 4);
+		pending[i] &= enabled;
+	}
+#ifdef CONFIG_ARCH_MSM8625
+	raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
+#else
+	raw_spin_lock(&irq_controller_lock);
+#endif
+
+	for (i = find_first_bit(pending, gic->max_irq);
+	     i < gic->max_irq;
+	     i = find_next_bit(pending, gic->max_irq, i+1)) {
+		pr_warning("%s: %d triggered", __func__,
+					i + gic->irq_offset);
+	}
+}
+
+static void gic_resume_one(struct gic_chip_data *gic)
+{
+	unsigned int i;
+	void __iomem *base = gic_data_dist_base(gic);
+#ifdef CONFIG_ARCH_MSM8625
+	unsigned long flags;
+#endif
+	gic_show_resume_irq(gic);
+	for (i = 0; i * 32 < gic->max_irq; i++) {
+#ifdef CONFIG_ARCH_MSM8625
+		raw_spin_lock_irqsave(&irq_controller_lock, flags);
+#endif
+		/* disable all of them */
+		writel_relaxed(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4);
+		/* enable the enabled set */
+		writel_relaxed(gic->enabled_irqs[i],
+			base + GIC_DIST_ENABLE_SET + i * 4);
+#ifdef CONFIG_ARCH_MSM8625
+		raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
+#endif
+	}
+	mb();
+}
+
+static void gic_resume(void)
+{
+	int i;
+	for (i = 0; i < MAX_GIC_NR; i++)
+		gic_resume_one(&gic_data[i]);
+}
+
+static struct syscore_ops gic_syscore_ops = {
+	.suspend = gic_suspend,
+	.resume = gic_resume,
+};
+
+static int __init gic_init_sys(void)
+{
+	register_syscore_ops(&gic_syscore_ops);
+	return 0;
+}
+arch_initcall(gic_init_sys);
+
+#endif
+
 static void gic_eoi_irq(struct irq_data *d)
 {
 	if (gic_arch_extn.irq_eoi) {
@@ -169,8 +331,13 @@
 		gic_arch_extn.irq_eoi(d);
 		raw_spin_unlock(&irq_controller_lock);
 	}
-
+#ifdef CONFIG_ARCH_MSM8625
+	raw_spin_lock(&irq_controller_lock);
+#endif
 	writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI);
+#ifdef CONFIG_ARCH_MSM8625
+	raw_spin_unlock(&irq_controller_lock);
+#endif
 }
 
 static int gic_set_type(struct irq_data *d, unsigned int type)
@@ -257,6 +424,20 @@
 static int gic_set_wake(struct irq_data *d, unsigned int on)
 {
 	int ret = -ENXIO;
+	unsigned int reg_offset, bit_offset;
+	unsigned int gicirq = gic_irq(d);
+	struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
+
+	/* per-cpu interrupts cannot be wakeup interrupts */
+	WARN_ON(gicirq < 32);
+
+	reg_offset = gicirq / 32;
+	bit_offset = gicirq % 32;
+
+	if (on)
+		gic_data->wakeup_irqs[reg_offset] |=  1 << bit_offset;
+	else
+		gic_data->wakeup_irqs[reg_offset] &=  ~(1 << bit_offset);
 
 	if (gic_arch_extn.irq_set_wake)
 		ret = gic_arch_extn.irq_set_wake(d, on);
@@ -275,16 +456,28 @@
 	void __iomem *cpu_base = gic_data_cpu_base(gic);
 
 	do {
+#ifdef CONFIG_ARCH_MSM8625
+		raw_spin_lock(&irq_controller_lock);
+#endif
 		irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK);
+#ifdef CONFIG_ARCH_MSM8625
+		raw_spin_unlock(&irq_controller_lock);
+#endif
 		irqnr = irqstat & ~0x1c00;
 
 		if (likely(irqnr > 15 && irqnr < 1021)) {
-			irqnr = irq_find_mapping(gic->domain, irqnr);
+			irqnr = irq_domain_to_irq(&gic->domain, irqnr);
 			handle_IRQ(irqnr, regs);
 			continue;
 		}
 		if (irqnr < 16) {
+#ifdef CONFIG_ARCH_MSM8625
+			raw_spin_lock(&irq_controller_lock);
+#endif
 			writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI);
+#ifdef CONFIG_ARCH_MSM8625
+			raw_spin_unlock(&irq_controller_lock);
+#endif
 #ifdef CONFIG_SMP
 			handle_IPI(irqnr, regs);
 #endif
@@ -311,8 +504,8 @@
 	if (gic_irq == 1023)
 		goto out;
 
-	cascade_irq = irq_find_mapping(chip_data->domain, gic_irq);
-	if (unlikely(gic_irq < 32 || gic_irq > 1020))
+	cascade_irq = irq_domain_to_irq(&chip_data->domain, gic_irq);
+	if (unlikely(gic_irq < 32 || gic_irq > 1020 || cascade_irq >= NR_IRQS))
 		do_bad_IRQ(cascade_irq, desc);
 	else
 		generic_handle_irq(cascade_irq);
@@ -331,6 +524,7 @@
 #ifdef CONFIG_SMP
 	.irq_set_affinity	= gic_set_affinity,
 #endif
+	.irq_disable		= gic_disable_irq,
 	.irq_set_wake		= gic_set_wake,
 };
 
@@ -345,9 +539,10 @@
 
 static void __init gic_dist_init(struct gic_chip_data *gic)
 {
-	unsigned int i;
+	unsigned int i, irq;
 	u32 cpumask;
 	unsigned int gic_irqs = gic->gic_irqs;
+	struct irq_domain *domain = &gic->domain;
 	void __iomem *base = gic_data_dist_base(gic);
 	u32 cpu = cpu_logical_map(smp_processor_id());
 
@@ -370,6 +565,14 @@
 		writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4);
 
 	/*
+	 * Set NS/S.
+	 */
+	if (is_cpu_secure())
+		for (i = 32; i < gic_irqs; i += 32)
+			writel_relaxed(0xFFFFFFFF,
+					base + GIC_DIST_ISR + i * 4 / 32);
+
+	/*
 	 * Set priority on all global interrupts.
 	 */
 	for (i = 32; i < gic_irqs; i += 4)
@@ -382,7 +585,31 @@
 	for (i = 32; i < gic_irqs; i += 32)
 		writel_relaxed(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32);
 
-	writel_relaxed(1, base + GIC_DIST_CTRL);
+	/*
+	 * Setup the Linux IRQ subsystem.
+	 */
+	irq_domain_for_each_irq(domain, i, irq) {
+		if (i < 32) {
+			irq_set_percpu_devid(irq);
+			irq_set_chip_and_handler(irq, &gic_chip,
+						 handle_percpu_devid_irq);
+			set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN);
+		} else {
+			irq_set_chip_and_handler(irq, &gic_chip,
+						 handle_fasteoi_irq);
+			set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+		}
+		irq_set_chip_data(irq, gic);
+	}
+
+	gic->max_irq = gic_irqs;
+
+	if (is_cpu_secure())
+		writel_relaxed(3, base + GIC_DIST_CTRL);
+	else
+		writel_relaxed(1, base + GIC_DIST_CTRL);
+
+	mb();
 }
 
 static void __cpuinit gic_cpu_init(struct gic_chip_data *gic)
@@ -395,9 +622,16 @@
 	 * Deal with the banked PPI and SGI interrupts - disable all
 	 * PPI interrupts, ensure all SGI interrupts are enabled.
 	 */
+#ifdef CONFIG_ARCH_MSM8625
+	raw_spin_lock(&irq_controller_lock);
+#endif
 	writel_relaxed(0xffff0000, dist_base + GIC_DIST_ENABLE_CLEAR);
 	writel_relaxed(0x0000ffff, dist_base + GIC_DIST_ENABLE_SET);
 
+	/* Set NS/S */
+	if (is_cpu_secure())
+		writel_relaxed(0xFFFFFFFF, dist_base + GIC_DIST_ISR);
+
 	/*
 	 * Set priority on PPI and SGI interrupts
 	 */
@@ -405,7 +639,15 @@
 		writel_relaxed(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4 / 4);
 
 	writel_relaxed(0xf0, base + GIC_CPU_PRIMASK);
-	writel_relaxed(1, base + GIC_CPU_CTRL);
+
+	if (is_cpu_secure())
+		writel_relaxed(0xF, base + GIC_CPU_CTRL);
+	else
+		writel_relaxed(1, base + GIC_CPU_CTRL);
+#ifdef CONFIG_ARCH_MSM8625
+	raw_spin_unlock(&irq_controller_lock);
+#endif
+    mb();
 }
 
 #ifdef CONFIG_CPU_PM
@@ -430,6 +672,8 @@
 	if (!dist_base)
 		return;
 
+	saved_dist_ctrl = readl_relaxed(dist_base + GIC_DIST_CTRL);
+
 	for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
 		gic_data[gic_nr].saved_spi_conf[i] =
 			readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
@@ -438,6 +682,10 @@
 		gic_data[gic_nr].saved_spi_target[i] =
 			readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4);
 
+	for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
+		gic_data[gic_nr].saved_dist_pri[i] =
+			readl_relaxed(dist_base + GIC_DIST_PRI + i * 4);
+
 	for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
 		gic_data[gic_nr].saved_spi_enable[i] =
 			readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
@@ -472,7 +720,7 @@
 			dist_base + GIC_DIST_CONFIG + i * 4);
 
 	for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
-		writel_relaxed(0xa0a0a0a0,
+		writel_relaxed(gic_data[gic_nr].saved_dist_pri[i],
 			dist_base + GIC_DIST_PRI + i * 4);
 
 	for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
@@ -483,7 +731,7 @@
 		writel_relaxed(gic_data[gic_nr].saved_spi_enable[i],
 			dist_base + GIC_DIST_ENABLE_SET + i * 4);
 
-	writel_relaxed(1, dist_base + GIC_DIST_CTRL);
+	writel_relaxed(saved_dist_ctrl, dist_base + GIC_DIST_CTRL);
 }
 
 static void gic_cpu_save(unsigned int gic_nr)
@@ -502,6 +750,12 @@
 	if (!dist_base || !cpu_base)
 		return;
 
+	saved_cpu_ctrl = readl_relaxed(cpu_base + GIC_CPU_CTRL);
+
+	for (i = 0; i < DIV_ROUND_UP(32, 4); i++)
+		gic_data[gic_nr].saved_dist_pri[i] = readl_relaxed(dist_base +
+							GIC_DIST_PRI + i * 4);
+
 	ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_enable);
 	for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
 		ptr[i] = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
@@ -537,10 +791,11 @@
 		writel_relaxed(ptr[i], dist_base + GIC_DIST_CONFIG + i * 4);
 
 	for (i = 0; i < DIV_ROUND_UP(32, 4); i++)
-		writel_relaxed(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4);
+		writel_relaxed(gic_data[gic_nr].saved_dist_pri[i],
+			dist_base + GIC_DIST_PRI + i * 4);
 
 	writel_relaxed(0xf0, cpu_base + GIC_CPU_PRIMASK);
-	writel_relaxed(1, cpu_base + GIC_CPU_CTRL);
+	writel_relaxed(saved_cpu_ctrl, cpu_base + GIC_CPU_CTRL);
 }
 
 static int gic_notifier(struct notifier_block *self, unsigned long cmd,	void *v)
@@ -597,27 +852,11 @@
 }
 #endif
 
-static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
-				irq_hw_number_t hw)
-{
-	if (hw < 32) {
-		irq_set_percpu_devid(irq);
-		irq_set_chip_and_handler(irq, &gic_chip,
-					 handle_percpu_devid_irq);
-		set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN);
-	} else {
-		irq_set_chip_and_handler(irq, &gic_chip,
-					 handle_fasteoi_irq);
-		set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
-	}
-	irq_set_chip_data(irq, d->host_data);
-	return 0;
-}
-
-static int gic_irq_domain_xlate(struct irq_domain *d,
-				struct device_node *controller,
-				const u32 *intspec, unsigned int intsize,
-				unsigned long *out_hwirq, unsigned int *out_type)
+#ifdef CONFIG_OF
+static int gic_irq_domain_dt_translate(struct irq_domain *d,
+				       struct device_node *controller,
+				       const u32 *intspec, unsigned int intsize,
+				       unsigned long *out_hwirq, unsigned int *out_type)
 {
 	if (d->of_node != controller)
 		return -EINVAL;
@@ -634,23 +873,26 @@
 	*out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
 	return 0;
 }
+#endif
 
 const struct irq_domain_ops gic_irq_domain_ops = {
-	.map = gic_irq_domain_map,
-	.xlate = gic_irq_domain_xlate,
+#ifdef CONFIG_OF
+	.dt_translate = gic_irq_domain_dt_translate,
+#endif
 };
 
 void __init gic_init_bases(unsigned int gic_nr, int irq_start,
 			   void __iomem *dist_base, void __iomem *cpu_base,
-			   u32 percpu_offset, struct device_node *node)
+			   u32 percpu_offset)
 {
-	irq_hw_number_t hwirq_base;
 	struct gic_chip_data *gic;
-	int gic_irqs, irq_base;
+	struct irq_domain *domain;
+	int gic_irqs, rc;
 
 	BUG_ON(gic_nr >= MAX_GIC_NR);
 
 	gic = &gic_data[gic_nr];
+	domain = &gic->domain;
 #ifdef CONFIG_GIC_NON_BANKED
 	if (percpu_offset) { /* Frankein-GIC without banked registers... */
 		unsigned int cpu;
@@ -658,11 +900,8 @@
 		gic->dist_base.percpu_base = alloc_percpu(void __iomem *);
 		gic->cpu_base.percpu_base = alloc_percpu(void __iomem *);
 		if (WARN_ON(!gic->dist_base.percpu_base ||
-			    !gic->cpu_base.percpu_base)) {
-			free_percpu(gic->dist_base.percpu_base);
-			free_percpu(gic->cpu_base.percpu_base);
-			return;
-		}
+			    !gic->cpu_base.percpu_base))
+			goto init_bases_err;
 
 		for_each_possible_cpu(cpu) {
 			unsigned long offset = percpu_offset * cpu_logical_map(cpu);
@@ -686,12 +925,13 @@
 	 * For primary GICs, skip over SGIs.
 	 * For secondary GICs, skip over PPIs, too.
 	 */
-	if (gic_nr == 0 && (irq_start & 31) > 0) {
-		hwirq_base = 16;
-		if (irq_start != -1)
-			irq_start = (irq_start & ~31) + 16;
-	} else {
-		hwirq_base = 32;
+	domain->hwirq_base = 32;
+	if (gic_nr == 0) {
+		if ((irq_start & 31) > 0) {
+			domain->hwirq_base = 16;
+			if (irq_start != -1)
+				irq_start = (irq_start & ~31) + 16;
+		}
 	}
 
 	/*
@@ -704,22 +944,33 @@
 		gic_irqs = 1020;
 	gic->gic_irqs = gic_irqs;
 
-	gic_irqs -= hwirq_base; /* calculate # of irqs to allocate */
-	irq_base = irq_alloc_descs(irq_start, 16, gic_irqs, numa_node_id());
-	if (IS_ERR_VALUE(irq_base)) {
+	domain->nr_irq = gic_irqs - domain->hwirq_base;
+	domain->irq_base = irq_alloc_descs(irq_start, 16, domain->nr_irq,
+					   numa_node_id());
+	if (IS_ERR_VALUE(domain->irq_base)) {
 		WARN(1, "Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n",
 		     irq_start);
-		irq_base = irq_start;
+		domain->irq_base = irq_start;
 	}
-	gic->domain = irq_domain_add_legacy(node, gic_irqs, irq_base,
-				    hwirq_base, &gic_irq_domain_ops, gic);
-	if (WARN_ON(!gic->domain))
-		return;
+	domain->priv = gic;
+	domain->ops = &gic_irq_domain_ops;
+	rc = irq_domain_add(domain);
+	if (rc) {
+		WARN(1, "Unable to create irq_domain\n");
+		goto init_bases_err;
+	}
+	irq_domain_register(domain);
 
 	gic_chip.flags |= gic_arch_extn.flags;
 	gic_dist_init(gic);
 	gic_cpu_init(gic);
 	gic_pm_init(gic);
+
+	return;
+
+init_bases_err:
+	free_percpu(gic->dist_base.percpu_base);
+	free_percpu(gic->cpu_base.percpu_base);
 }
 
 void __cpuinit gic_secondary_init(unsigned int gic_nr)
@@ -733,23 +984,68 @@
 void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
 {
 	int cpu;
+	unsigned long sgir;
 	unsigned long map = 0;
+#ifdef CONFIG_ARCH_MSM8625
+	unsigned long flags;
+#endif
 
 	/* Convert our logical CPU mask into a physical one. */
 	for_each_cpu(cpu, mask)
 		map |= 1 << cpu_logical_map(cpu);
 
+	sgir = (map << 16) | irq;
+	if (is_cpu_secure())
+		sgir |= (1 << 15);
+
 	/*
 	 * Ensure that stores to Normal memory are visible to the
 	 * other CPUs before issuing the IPI.
 	 */
 	dsb();
 
+#ifdef CONFIG_ARCH_MSM8625
+	raw_spin_lock_irqsave(&irq_controller_lock, flags);
+#endif
 	/* this always happens on GIC0 */
-	writel_relaxed(map << 16 | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
+
+	writel_relaxed(sgir,
+			gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
+#ifdef CONFIG_ARCH_MSM8625
+	raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
+#endif
+	mb();
 }
 #endif
 
+void gic_set_irq_secure(unsigned int irq)
+{
+	unsigned int gicd_isr_reg, gicd_pri_reg;
+	unsigned int mask = 0xFFFFFF00;
+	struct gic_chip_data *gic_data = &gic_data[0];
+	struct irq_data *d = irq_get_irq_data(irq);
+
+	if (is_cpu_secure()) {
+		raw_spin_lock(&irq_controller_lock);
+		gicd_isr_reg = readl_relaxed(gic_dist_base(d) +
+				GIC_DIST_ISR + gic_irq(d) / 32 * 4);
+		gicd_isr_reg &= ~BIT(gic_irq(d) % 32);
+		writel_relaxed(gicd_isr_reg, gic_dist_base(d) +
+				GIC_DIST_ISR + gic_irq(d) / 32 * 4);
+		/* Also increase the priority of that irq */
+		gicd_pri_reg = readl_relaxed(gic_dist_base(d) +
+					GIC_DIST_PRI + (gic_irq(d) * 4 / 4));
+		gicd_pri_reg &= mask;
+		gicd_pri_reg |= 0x80; /* Priority of 0x80 > 0xA0 */
+		writel_relaxed(gicd_pri_reg, gic_dist_base(d) + GIC_DIST_PRI +
+				gic_irq(d) * 4 / 4);
+		mb();
+		raw_spin_unlock(&irq_controller_lock);
+	} else {
+		WARN(1, "Trying to run secure operation from Non-secure mode");
+	}
+}
+
 #ifdef CONFIG_OF
 static int gic_cnt __initdata = 0;
 
@@ -759,6 +1055,7 @@
 	void __iomem *dist_base;
 	u32 percpu_offset;
 	int irq;
+	struct irq_domain *domain = &gic_data[gic_cnt].domain;
 
 	if (WARN_ON(!node))
 		return -ENODEV;
@@ -772,7 +1069,9 @@
 	if (of_property_read_u32(node, "cpu-offset", &percpu_offset))
 		percpu_offset = 0;
 
-	gic_init_bases(gic_cnt, -1, dist_base, cpu_base, percpu_offset, node);
+	domain->of_node = of_node_get(node);
+
+	gic_init_bases(gic_cnt, -1, dist_base, cpu_base, percpu_offset);
 
 	if (parent) {
 		irq = irq_of_parse_and_map(node, 0);
@@ -782,3 +1081,156 @@
 	return 0;
 }
 #endif
+/* before calling this function the interrupts should be disabled
+ * and the irq must be disabled at gic to avoid spurious interrupts */
+bool gic_is_spi_pending(unsigned int irq)
+{
+	struct irq_data *d = irq_get_irq_data(irq);
+	struct gic_chip_data *gic_data = &gic_data[0];
+	u32 mask, val;
+
+	WARN_ON(!irqs_disabled());
+	raw_spin_lock(&irq_controller_lock);
+	mask = 1 << (gic_irq(d) % 32);
+	val = readl(gic_dist_base(d) +
+			GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4);
+	/* warn if the interrupt is enabled */
+	WARN_ON(val & mask);
+	val = readl(gic_dist_base(d) +
+			GIC_DIST_PENDING_SET + (gic_irq(d) / 32) * 4);
+	raw_spin_unlock(&irq_controller_lock);
+	return (bool) (val & mask);
+}
+
+/* before calling this function the interrupts should be disabled
+ * and the irq must be disabled at gic to avoid spurious interrupts */
+void gic_clear_spi_pending(unsigned int irq)
+{
+	struct gic_chip_data *gic_data = &gic_data[0];
+	struct irq_data *d = irq_get_irq_data(irq);
+
+	u32 mask, val;
+	WARN_ON(!irqs_disabled());
+	raw_spin_lock(&irq_controller_lock);
+	mask = 1 << (gic_irq(d) % 32);
+	val = readl(gic_dist_base(d) +
+			GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4);
+	/* warn if the interrupt is enabled */
+	WARN_ON(val & mask);
+	writel(mask, gic_dist_base(d) +
+			GIC_DIST_PENDING_CLEAR + (gic_irq(d) / 32) * 4);
+	raw_spin_unlock(&irq_controller_lock);
+}
+
+#ifdef CONFIG_ARCH_MSM8625
+ /*
+  *  Check for any interrupts which are enabled are pending
+  *  in the pending set or not.
+  *  Return :
+  *       0 : No pending interrupts
+  *       1 : Pending interrupts other than A9_M2A_5
+  */
+unsigned int msm_gic_spi_ppi_pending(void)
+{
+	unsigned int i, bit = 0;
+	unsigned int pending_enb = 0, pending = 0;
+	unsigned long value = 0;
+	struct gic_chip_data *gic = &gic_data[0];
+	void __iomem *base = gic_data_dist_base(gic);
+	unsigned long flags;
+
+	raw_spin_lock_irqsave(&irq_controller_lock, flags);
+	/*
+	 * PPI and SGI to be included.
+	 * MSM8625_INT_A9_M2A_5 needs to be ignored, as A9_M2A_5
+	 * requesting sleep triggers it
+	 */
+	for (i = 0; (i * 32) < gic->max_irq; i++) {
+		pending = readl_relaxed(base +
+				GIC_DIST_PENDING_SET + i * 4);
+		pending_enb = readl_relaxed(base +
+				GIC_DIST_ENABLE_SET + i * 4);
+		value = pending & pending_enb;
+
+		if (value) {
+			for (bit = 0; bit < 32; bit++) {
+				bit = find_next_bit(&value, 32, bit);
+				if ((bit + 32 * i) != MSM8625_INT_A9_M2A_5) {
+					raw_spin_unlock_irqrestore(
+						&irq_controller_lock, flags);
+					return 1;
+				}
+			}
+		}
+	}
+	raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
+
+	return 0;
+}
+
+void msm_gic_save(bool modem_wake, int from_idle)
+{
+	unsigned int i;
+	struct gic_chip_data *gic = &gic_data[0];
+	void __iomem *base = gic_data_dist_base(gic);
+
+	gic_cpu_save(0);
+	gic_dist_save(0);
+
+	/* Disable all the Interrupts, before we enter pc */
+	for (i = 0; (i * 32) < gic->max_irq; i++) {
+		raw_spin_lock(&irq_controller_lock);
+		writel_relaxed(0xffffffff, base
+				+ GIC_DIST_ENABLE_CLEAR + i * 4);
+		raw_spin_unlock(&irq_controller_lock);
+	}
+}
+
+void msm_gic_restore(void)
+{
+	gic_dist_restore(0);
+	gic_cpu_restore(0);
+}
+
+/*
+ * Configure the GIC after we come out of power collapse.
+ * This function will configure some of the GIC registers so as to prepare the
+ * core1 to receive an SPI(ACSR_MP_CORE_IPC1, (32 + 8)), which will bring
+ * core1 out of GDFS.
+ */
+void core1_gic_configure_and_raise(void)
+{
+	struct gic_chip_data *gic = &gic_data[0];
+	void __iomem *base = gic_data_dist_base(gic);
+	unsigned int value = 0;
+	unsigned long flags;
+
+	raw_spin_lock_irqsave(&irq_controller_lock, flags);
+
+	value = __raw_readl(base + GIC_DIST_ACTIVE_BIT + 0x4);
+	value |= BIT(8);
+	__raw_writel(value, base + GIC_DIST_ACTIVE_BIT + 0x4);
+	mb();
+
+	value = __raw_readl(base + GIC_DIST_TARGET + 0x24);
+	value |= BIT(13);
+	__raw_writel(value, base + GIC_DIST_TARGET + 0x24);
+	mb();
+
+	value = __raw_readl(base + GIC_DIST_TARGET + 0x28);
+	value |= BIT(1);
+	__raw_writel(value, base + GIC_DIST_TARGET + 0x28);
+	mb();
+
+	value =  __raw_readl(base + GIC_DIST_ENABLE_SET + 0x4);
+	value |= BIT(8);
+	__raw_writel(value, base + GIC_DIST_ENABLE_SET + 0x4);
+	mb();
+
+	value =  __raw_readl(base + GIC_DIST_PENDING_SET + 0x4);
+	value |= BIT(8);
+	__raw_writel(value, base + GIC_DIST_PENDING_SET + 0x4);
+	mb();
+	raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
+}
+#endif