Merge commit 'AU_LINUX_ANDROID_ICS.04.00.04.00.126' into msm-3.4

AU_LINUX_ANDROID_ICS.04.00.04.00.126 from msm-3.0.
First parent is from google/android-3.4.

* commit 'AU_LINUX_ANDROID_ICS.04.00.04.00.126': (8712 commits)
  PRNG: Device tree entry for qrng device.
  vidc:1080p: Set video core timeout value for Thumbnail mode
  msm: sps: improve the debugging support in SPS driver
  board-8064 msm: Overlap secure and non secure video firmware heaps.
  msm: clock: Add handoff ops for 7x30 and copper XO clocks
  msm_fb: display: Wait for external vsync before DTV IOMMU unmap
  msm: Fix ciruclar dependency in debug UART settings
  msm: gdsc: Add GDSC regulator driver for msm-copper
  defconfig: Enable Mobicore Driver.
  mobicore: Add mobicore driver.
  mobicore: rename variable to lower case.
  mobicore: rename folder.
  mobicore: add makefiles
  mobicore: initial import of kernel driver
  ASoC: msm: Add SLIMBUS_2_RX CPU DAI
  board-8064-gpio: Update FUNC for EPM SPI CS
  msm_fb: display: Remove chicken bit config during video playback
  mmc: msm_sdcc: enable the sanitize capability
  msm-fb: display: lm2 writeback support on mpq platfroms
  msm_fb: display: Disable LVDS phy & pll during panel off
  ...

Signed-off-by: Steve Muckle <smuckle@codeaurora.org>
diff --git a/arch/arm/include/asm/arch_timer.h b/arch/arm/include/asm/arch_timer.h
new file mode 100644
index 0000000..2098288
--- /dev/null
+++ b/arch/arm/include/asm/arch_timer.h
@@ -0,0 +1,25 @@
+#ifndef __ASMARM_ARCH_TIMER_H
+#define __ASMARM_ARCH_TIMER_H
+
+#include <linux/ioport.h>
+
+struct arch_timer {
+	struct resource	res[2];
+};
+
+#ifdef CONFIG_ARM_ARCH_TIMER
+int arch_timer_register(struct arch_timer *);
+int arch_timer_of_register(void);
+#else
+static inline int arch_timer_register(struct arch_timer *at)
+{
+	return -ENXIO;
+}
+
+static inline int arch_timer_of_register(void)
+{
+	return -ENXIO;
+}
+#endif
+
+#endif
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 5684cbc..d021905 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -88,6 +88,21 @@
  *	DMA Cache Coherency
  *	===================
  *
+ *	dma_inv_range(start, end)
+ *
+ *		Invalidate (discard) the specified virtual address range.
+ *		May not write back any entries.  If 'start' or 'end'
+ *		are not cache line aligned, those lines must be written
+ *		back.
+ *		- start  - virtual start address
+ *		- end    - virtual end address
+ *
+ *	dma_clean_range(start, end)
+ *
+ *		Clean (write back) the specified virtual address range.
+ *		- start  - virtual start address
+ *		- end    - virtual end address
+ *
  *	dma_flush_range(start, end)
  *
  *		Clean and invalidate the specified virtual address range.
@@ -108,6 +123,8 @@
 	void (*dma_map_area)(const void *, size_t, int);
 	void (*dma_unmap_area)(const void *, size_t, int);
 
+	void (*dma_inv_range)(const void *, const void *);
+	void (*dma_clean_range)(const void *, const void *);
 	void (*dma_flush_range)(const void *, const void *);
 };
 
@@ -134,6 +151,8 @@
  */
 #define dmac_map_area			cpu_cache.dma_map_area
 #define dmac_unmap_area			cpu_cache.dma_unmap_area
+#define dmac_inv_range			cpu_cache.dma_inv_range
+#define dmac_clean_range		cpu_cache.dma_clean_range
 #define dmac_flush_range		cpu_cache.dma_flush_range
 
 #else
@@ -154,6 +173,8 @@
  */
 extern void dmac_map_area(const void *, size_t, int);
 extern void dmac_unmap_area(const void *, size_t, int);
+extern void dmac_inv_range(const void *, const void *);
+extern void dmac_clean_range(const void *, const void *);
 extern void dmac_flush_range(const void *, const void *);
 
 #endif
diff --git a/arch/arm/include/asm/delay.h b/arch/arm/include/asm/delay.h
index b2deda1..5c6b9a3 100644
--- a/arch/arm/include/asm/delay.h
+++ b/arch/arm/include/asm/delay.h
@@ -8,7 +8,7 @@
 
 #include <asm/param.h>	/* HZ */
 
-extern void __delay(int loops);
+extern void __delay(unsigned long loops);
 
 /*
  * This function intentionally does not exist; if you see references to
@@ -40,5 +40,8 @@
 			__const_udelay((n) * ((2199023U*HZ)>>11))) :	\
 	  __udelay(n))
 
+extern void set_delay_fn(void (*fn)(unsigned long));
+extern void read_current_timer_delay_loop(unsigned long loops);
+
 #endif /* defined(_ARM_DELAY_H) */
 
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index cb3b7c9..dc988ff 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -141,6 +141,46 @@
 {
 }
 
+
+/*
+ * dma_coherent_pre_ops - barrier functions for coherent memory before DMA.
+ * A barrier is required to ensure memory operations are complete before the
+ * initiation of a DMA xfer.
+ * If the coherent memory is Strongly Ordered
+ * - pre ARMv7 and 8x50 guarantees ordering wrt other mem accesses
+ * - ARMv7 guarantees ordering only within a 1KB block, so we need a barrier
+ * If coherent memory is normal then we need a barrier to prevent
+ * reordering
+ */
+static inline void dma_coherent_pre_ops(void)
+{
+#if COHERENT_IS_NORMAL == 1
+	dmb();
+#else
+	if (arch_is_coherent())
+		dmb();
+	else
+		barrier();
+#endif
+}
+/*
+ * dma_post_coherent_ops - barrier functions for coherent memory after DMA.
+ * If the coherent memory is Strongly Ordered we dont need a barrier since
+ * there are no speculative fetches to Strongly Ordered memory.
+ * If coherent memory is normal then we need a barrier to prevent reordering
+ */
+static inline void dma_coherent_post_ops(void)
+{
+#if COHERENT_IS_NORMAL == 1
+	dmb();
+#else
+	if (arch_is_coherent())
+		dmb();
+	else
+		barrier();
+#endif
+}
+
 /**
  * dma_alloc_coherent - allocate consistent memory for DMA
  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
@@ -331,6 +371,58 @@
 }
 
 /**
+ * dma_cache_pre_ops - clean or invalidate cache before dma transfer is
+ *                     initiated and perform a barrier operation.
+ * @virtual_addr: A kernel logical or kernel virtual address
+ * @size: size of buffer to map
+ * @dir: DMA transfer direction
+ *
+ * Ensure that any data held in the cache is appropriately discarded
+ * or written back.
+ *
+ */
+static inline void dma_cache_pre_ops(void *virtual_addr,
+		size_t size, enum dma_data_direction dir)
+{
+	extern void ___dma_single_cpu_to_dev(const void *, size_t,
+		enum dma_data_direction);
+
+	BUG_ON(!valid_dma_direction(dir));
+
+	if (!arch_is_coherent())
+		___dma_single_cpu_to_dev(virtual_addr, size, dir);
+}
+
+/**
+ * dma_cache_post_ops - clean or invalidate cache after dma transfer is
+ *                     initiated and perform a barrier operation.
+ * @virtual_addr: A kernel logical or kernel virtual address
+ * @size: size of buffer to map
+ * @dir: DMA transfer direction
+ *
+ * Ensure that any data held in the cache is appropriately discarded
+ * or written back.
+ *
+ */
+static inline void dma_cache_post_ops(void *virtual_addr,
+		size_t size, enum dma_data_direction dir)
+{
+	extern void ___dma_single_cpu_to_dev(const void *, size_t,
+		enum dma_data_direction);
+
+	BUG_ON(!valid_dma_direction(dir));
+
+	if (arch_has_speculative_dfetch() && !arch_is_coherent()
+	 && dir != DMA_TO_DEVICE)
+		/*
+		 * Treat DMA_BIDIRECTIONAL and DMA_FROM_DEVICE
+		 * identically: invalidate
+		 */
+		___dma_single_cpu_to_dev(virtual_addr,
+					 size, DMA_FROM_DEVICE);
+}
+
+/**
  * dma_map_page - map a portion of a page for streaming DMA
  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
  * @page: page that buffer resides in
diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
index 3d22204..b216a00 100644
--- a/arch/arm/include/asm/domain.h
+++ b/arch/arm/include/asm/domain.h
@@ -2,6 +2,7 @@
  *  arch/arm/include/asm/domain.h
  *
  *  Copyright (C) 1999 Russell King.
+ *  Copyright (c) 2009, Code Aurora Forum. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -31,8 +32,13 @@
  *
  * 36-bit addressing and supersections are only available on
  * CPUs based on ARMv6+ or the Intel XSC3 core.
+ *
+ * We cannot use domain 0 for the kernel on QSD8x50 since the kernel domain
+ * is set to manager mode when set_fs(KERNEL_DS) is called. Setting domain 0
+ * to manager mode will disable the workaround for a cpu bug that can cause an
+ * invalid fault status and/or tlb corruption (CONFIG_VERIFY_PERMISSION_FAULT).
  */
-#ifndef CONFIG_IO_36
+#if !defined(CONFIG_IO_36) && !defined(CONFIG_VERIFY_PERMISSION_FAULT)
 #define DOMAIN_KERNEL	0
 #define DOMAIN_TABLE	0
 #define DOMAIN_USER	1
@@ -60,6 +66,17 @@
 #ifndef __ASSEMBLY__
 
 #ifdef CONFIG_CPU_USE_DOMAINS
+#ifdef CONFIG_EMULATE_DOMAIN_MANAGER_V7
+void emulate_domain_manager_set(u32 domain);
+int emulate_domain_manager_data_abort(u32 dfsr, u32 dfar);
+int emulate_domain_manager_prefetch_abort(u32 ifsr, u32 ifar);
+void emulate_domain_manager_switch_mm(
+	unsigned long pgd_phys,
+	struct mm_struct *mm,
+	void (*switch_mm)(unsigned long pgd_phys, struct mm_struct *));
+
+#define set_domain(x) emulate_domain_manager_set(x)
+#else
 #define set_domain(x)					\
 	do {						\
 	__asm__ __volatile__(				\
@@ -67,6 +84,7 @@
 	  : : "r" (x));					\
 	isb();						\
 	} while (0)
+#endif
 
 #define modify_domain(dom,type)					\
 	do {							\
diff --git a/arch/arm/include/asm/fiq.h b/arch/arm/include/asm/fiq.h
index d493d0b..ec4b8b8 100644
--- a/arch/arm/include/asm/fiq.h
+++ b/arch/arm/include/asm/fiq.h
@@ -33,11 +33,22 @@
 	void *dev_id;
 };
 
+#ifdef CONFIG_FIQ
 extern int claim_fiq(struct fiq_handler *f);
 extern void release_fiq(struct fiq_handler *f);
 extern void set_fiq_handler(void *start, unsigned int length);
 extern void enable_fiq(int fiq);
 extern void disable_fiq(int fiq);
+#else
+static inline int claim_fiq(struct fiq_handler *f)
+{
+	return 0;
+}
+static inline void release_fiq(struct fiq_handler *f) { }
+static inline void set_fiq_handler(void *start, unsigned int length) { }
+static inline void enable_fiq(int fiq) { }
+static inline void disable_fiq(int fiq) { }
+#endif
 
 /* helpers defined in fiqasm.S: */
 extern void __set_fiq_regs(unsigned long const *regs);
diff --git a/arch/arm/include/asm/hardirq.h b/arch/arm/include/asm/hardirq.h
index 2740c2a..3d7351c 100644
--- a/arch/arm/include/asm/hardirq.h
+++ b/arch/arm/include/asm/hardirq.h
@@ -5,7 +5,7 @@
 #include <linux/threads.h>
 #include <asm/irq.h>
 
-#define NR_IPI	6
+#define NR_IPI	7
 
 typedef struct {
 	unsigned int __softirq_pending;
diff --git a/arch/arm/include/asm/hardware/cache-l2x0.h b/arch/arm/include/asm/hardware/cache-l2x0.h
index bd2c6a5..a244039 100644
--- a/arch/arm/include/asm/hardware/cache-l2x0.h
+++ b/arch/arm/include/asm/hardware/cache-l2x0.h
@@ -91,6 +91,7 @@
 #define L2X0_AUX_CTRL_WAY_SIZE_SHIFT		17
 #define L2X0_AUX_CTRL_WAY_SIZE_MASK		(0x7 << 17)
 #define L2X0_AUX_CTRL_SHARE_OVERRIDE_SHIFT	22
+#define L2X0_AUX_CTRL_L2_FORCE_NWA_SHIFT	23
 #define L2X0_AUX_CTRL_NS_LOCKDOWN_SHIFT		26
 #define L2X0_AUX_CTRL_NS_INT_CTRL_SHIFT		27
 #define L2X0_AUX_CTRL_DATA_PREFETCH_SHIFT	28
@@ -105,7 +106,18 @@
 
 #define REV_PL310_R2P0				4
 
+#define L2X0_LATENCY_CTRL_SETUP_SHIFT	0
+#define L2X0_LATENCY_CTRL_RD_SHIFT	4
+#define L2X0_LATENCY_CTRL_WR_SHIFT	8
+
+#define L2X0_PREFETCH_CTRL_OFFSET_SHIFT		0
+#define L2X0_PREFETCH_CTRL_WRAP8_INC_SHIFT	23
+#define L2X0_PREFETCH_CTRL_WRAP8_SHIFT		30
+
 #ifndef __ASSEMBLY__
+extern void l2cc_suspend(void);
+extern void l2cc_resume(void);
+extern void l2x0_cache_sync(void);
 extern void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask);
 #if defined(CONFIG_CACHE_L2X0) && defined(CONFIG_OF)
 extern int l2x0_of_init(u32 aux_val, u32 aux_mask);
diff --git a/arch/arm/include/asm/hardware/gic.h b/arch/arm/include/asm/hardware/gic.h
index 4b1ce6c..3fb0a1c 100644
--- a/arch/arm/include/asm/hardware/gic.h
+++ b/arch/arm/include/asm/hardware/gic.h
@@ -22,6 +22,7 @@
 
 #define GIC_DIST_CTRL			0x000
 #define GIC_DIST_CTR			0x004
+#define GIC_DIST_ISR			0x080
 #define GIC_DIST_ENABLE_SET		0x100
 #define GIC_DIST_ENABLE_CLEAR		0x180
 #define GIC_DIST_PENDING_SET		0x200
@@ -39,19 +40,31 @@
 extern struct irq_chip gic_arch_extn;
 
 void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
-		    u32 offset, struct device_node *);
+		    u32 offset);
 int gic_of_init(struct device_node *node, struct device_node *parent);
 void gic_secondary_init(unsigned int);
 void gic_handle_irq(struct pt_regs *regs);
 void gic_cascade_irq(unsigned int gic_nr, unsigned int irq);
 void gic_raise_softirq(const struct cpumask *mask, unsigned int irq);
-
+#ifdef CONFIG_ARM_GIC
+void gic_set_irq_secure(unsigned int irq);
+#else
+static inline void gic_set_irq_secure(unsigned int irq) { }
+#endif
 static inline void gic_init(unsigned int nr, int start,
 			    void __iomem *dist , void __iomem *cpu)
 {
-	gic_init_bases(nr, start, dist, cpu, 0, NULL);
+	gic_init_bases(nr, start, dist, cpu, 0);
 }
+bool gic_is_spi_pending(unsigned int irq);
+void gic_clear_spi_pending(unsigned int irq);
+void gic_set_irq_secure(unsigned int irq);
+#endif
 
+#ifdef CONFIG_ARCH_MSM8625
+void msm_gic_save(bool modem_wake, int from_idle);
+void msm_gic_restore(void);
+void core1_gic_configure_and_raise(void);
 #endif
 
 #endif
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index 9af5563..42fef7c 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -27,6 +27,7 @@
 #include <asm/byteorder.h>
 #include <asm/memory.h>
 #include <asm-generic/pci_iomap.h>
+#include <mach/msm_rtb.h>
 
 /*
  * ISA I/O bus memory addresses are 1:1 with the physical address.
@@ -47,13 +48,52 @@
 extern void __raw_readsw(const void __iomem *addr, void *data, int wordlen);
 extern void __raw_readsl(const void __iomem *addr, void *data, int longlen);
 
-#define __raw_writeb(v,a)	(__chk_io_ptr(a), *(volatile unsigned char __force  *)(a) = (v))
-#define __raw_writew(v,a)	(__chk_io_ptr(a), *(volatile unsigned short __force *)(a) = (v))
-#define __raw_writel(v,a)	(__chk_io_ptr(a), *(volatile unsigned int __force   *)(a) = (v))
+/*
+ * There may be cases when clients don't want to support or can't support the
+ * logging. The appropriate functions can be used but clients should carefully
+ * consider why they can't support the logging.
+ */
 
-#define __raw_readb(a)		(__chk_io_ptr(a), *(volatile unsigned char __force  *)(a))
-#define __raw_readw(a)		(__chk_io_ptr(a), *(volatile unsigned short __force *)(a))
-#define __raw_readl(a)		(__chk_io_ptr(a), *(volatile unsigned int __force   *)(a))
+#define __raw_write_logged(v, a, _t)	({ \
+	int _ret; \
+	void *_addr = (void *)(a); \
+	_ret = uncached_logk(LOGK_WRITEL, _addr); \
+	ETB_WAYPOINT; \
+	__raw_write##_t##_no_log((v), _addr); \
+	if (_ret) \
+		LOG_BARRIER; \
+	})
+
+
+#define __raw_writeb_no_log(v, a)	(__chk_io_ptr(a), *(volatile unsigned char __force  *)(a) = (v))
+#define __raw_writew_no_log(v, a)	(__chk_io_ptr(a), *(volatile unsigned short __force *)(a) = (v))
+#define __raw_writel_no_log(v, a)	(__chk_io_ptr(a), *(volatile unsigned int __force *)(a) = (v))
+
+
+#define __raw_writeb(v, a)	__raw_write_logged((v), (a), b)
+#define __raw_writew(v, a)	__raw_write_logged((v), (a), w)
+#define __raw_writel(v, a)	__raw_write_logged((v), (a), l)
+
+#define __raw_readb_no_log(a)		(__chk_io_ptr(a), *(volatile unsigned char __force  *)(a))
+#define __raw_readw_no_log(a)		(__chk_io_ptr(a), *(volatile unsigned short __force *)(a))
+#define __raw_readl_no_log(a)		(__chk_io_ptr(a), *(volatile unsigned int __force *)(a))
+
+#define __raw_read_logged(a, _l, _t)		({ \
+	unsigned _t __a; \
+	void *_addr = (void *)(a); \
+	int _ret; \
+	_ret = uncached_logk(LOGK_READL, _addr); \
+	ETB_WAYPOINT; \
+	__a = __raw_read##_l##_no_log(_addr);\
+	if (_ret) \
+		LOG_BARRIER; \
+	__a; \
+	})
+
+
+#define __raw_readb(a)		__raw_read_logged((a), b, char)
+#define __raw_readw(a)		__raw_read_logged((a), w, short)
+#define __raw_readl(a)		__raw_read_logged((a), l, int)
 
 /*
  * Architecture ioremap implementation.
diff --git a/arch/arm/include/asm/localtimer.h b/arch/arm/include/asm/localtimer.h
index f77ffc1..87de915 100644
--- a/arch/arm/include/asm/localtimer.h
+++ b/arch/arm/include/asm/localtimer.h
@@ -14,6 +14,11 @@
 
 struct clock_event_device;
 
+/*
+ * Setup a per-cpu timer, whether it be a local timer or dummy broadcast
+ */
+void percpu_timer_setup(void);
+
 struct local_timer_ops {
 	int  (*setup)(struct clock_event_device *);
 	void (*stop)(struct clock_event_device *);
diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h
index d7692ca..0a45dee 100644
--- a/arch/arm/include/asm/mach/arch.h
+++ b/arch/arm/include/asm/mach/arch.h
@@ -22,7 +22,7 @@
 	const char *const 	*dt_compat;	/* array of device tree
 						 * 'compatible' strings	*/
 
-	unsigned int		nr_irqs;	/* number of IRQs */
+	int			nr_irqs;	/* number of IRQs */
 
 #ifdef CONFIG_ZONE_DMA
 	unsigned long		dma_zone_size;	/* size of DMA-able area */
@@ -39,6 +39,7 @@
 					 struct meminfo *);
 	void			(*reserve)(void);/* reserve mem blocks	*/
 	void			(*map_io)(void);/* IO mapping function	*/
+	void			(*init_very_early)(void);
 	void			(*init_early)(void);
 	void			(*init_irq)(void);
 	struct sys_timer	*timer;		/* system tick timer	*/
diff --git a/arch/arm/include/asm/mach/flash.h b/arch/arm/include/asm/mach/flash.h
index 4ca69fe..36938ea 100644
--- a/arch/arm/include/asm/mach/flash.h
+++ b/arch/arm/include/asm/mach/flash.h
@@ -17,6 +17,7 @@
  * map_name:	the map probe function name
  * name:	flash device name (eg, as used with mtdparts=)
  * width:	width of mapped device
+ * interleave:  interleave mode feature support
  * init:	method called at driver/device initialisation
  * exit:	method called at driver/device removal
  * set_vpp:	method called to enable or disable VPP
@@ -28,6 +29,7 @@
 	const char	*map_name;
 	const char	*name;
 	unsigned int	width;
+	unsigned int    interleave;
 	int		(*init)(void);
 	void		(*exit)(void);
 	void		(*set_vpp)(int on);
diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
index b36f365..5f731df 100644
--- a/arch/arm/include/asm/mach/map.h
+++ b/arch/arm/include/asm/mach/map.h
@@ -9,6 +9,9 @@
  *
  *  Page table mapping constructs and function prototypes
  */
+#ifndef __ASM_ARM_MACH_MAP_H
+#define __ASM_ARM_MACH_MAP_H
+
 #include <asm/io.h>
 
 struct map_desc {
@@ -30,6 +33,9 @@
 #define MT_MEMORY_DTCM		12
 #define MT_MEMORY_ITCM		13
 #define MT_MEMORY_SO		14
+#define MT_MEMORY_R		15
+#define MT_MEMORY_RW		16
+#define MT_MEMORY_RX		17
 
 #ifdef CONFIG_MMU
 extern void iotable_init(struct map_desc *, int);
@@ -41,6 +47,11 @@
  */
 extern int ioremap_page(unsigned long virt, unsigned long phys,
 			const struct mem_type *mtype);
+
+extern int ioremap_pages(unsigned long virt, unsigned long phys,
+			unsigned long size, const struct mem_type *mtype);
 #else
 #define iotable_init(map,num)	do { } while (0)
 #endif
+
+#endif
diff --git a/arch/arm/include/asm/mach/mmc.h b/arch/arm/include/asm/mach/mmc.h
index bca864a..745a3a4 100644
--- a/arch/arm/include/asm/mach/mmc.h
+++ b/arch/arm/include/asm/mach/mmc.h
@@ -7,6 +7,13 @@
 #include <linux/mmc/host.h>
 #include <linux/mmc/card.h>
 #include <linux/mmc/sdio_func.h>
+#include <mach/gpio.h>
+#include <mach/msm_bus.h>
+
+#define SDC_DAT1_DISABLE 0
+#define SDC_DAT1_ENABLE  1
+#define SDC_DAT1_ENWAKE  2
+#define SDC_DAT1_DISWAKE 3
 
 struct embedded_sdio_data {
         struct sdio_cis cis;
@@ -15,6 +22,103 @@
         int num_funcs;
 };
 
+/* This structure keeps information per regulator */
+struct msm_mmc_reg_data {
+	/* voltage regulator handle */
+	struct regulator *reg;
+	/* regulator name */
+	const char *name;
+	/* voltage level to be set */
+	unsigned int low_vol_level;
+	unsigned int high_vol_level;
+	/* Load values for low power and high power mode */
+	unsigned int lpm_uA;
+	unsigned int hpm_uA;
+	/*
+	 * is set voltage supported for this regulator?
+	 * false => set voltage is not supported
+	 * true  => set voltage is supported
+	 *
+	 * Some regulators (like gpio-regulators, LVS (low voltage swtiches)
+	 * PMIC regulators) dont have the capability to call
+	 * regulator_set_voltage or regulator_set_optimum_mode
+	 * Use this variable to indicate if its a such regulator or not
+	 */
+	bool set_voltage_sup;
+	/* is this regulator enabled? */
+	bool is_enabled;
+	/* is this regulator needs to be always on? */
+	bool always_on;
+	/* is low power mode setting required for this regulator? */
+	bool lpm_sup;
+};
+
+/*
+ * This structure keeps information for all the
+ * regulators required for a SDCC slot.
+ */
+struct msm_mmc_slot_reg_data {
+	struct msm_mmc_reg_data *vdd_data; /* keeps VDD/VCC regulator info */
+	struct msm_mmc_reg_data *vccq_data; /* keeps VCCQ regulator info */
+	struct msm_mmc_reg_data *vddp_data; /* keeps VDD Pad regulator info */
+};
+
+struct msm_mmc_gpio {
+	u32 no;
+	const char *name;
+	bool is_always_on;
+	bool is_enabled;
+};
+
+struct msm_mmc_gpio_data {
+	struct msm_mmc_gpio *gpio;
+	u8 size;
+};
+
+struct msm_mmc_pad_pull {
+	enum msm_tlmm_pull_tgt no;
+	u32 val;
+};
+
+struct msm_mmc_pad_pull_data {
+	struct msm_mmc_pad_pull *on;
+	struct msm_mmc_pad_pull *off;
+	u8 size;
+};
+
+struct msm_mmc_pad_drv {
+	enum msm_tlmm_hdrive_tgt no;
+	u32 val;
+};
+
+struct msm_mmc_pad_drv_data {
+	struct msm_mmc_pad_drv *on;
+	struct msm_mmc_pad_drv *off;
+	u8 size;
+};
+
+struct msm_mmc_pad_data {
+	struct msm_mmc_pad_pull_data *pull;
+	struct msm_mmc_pad_drv_data *drv;
+};
+
+struct msm_mmc_pin_data {
+	/*
+	 * = 1 if controller pins are using gpios
+	 * = 0 if controller has dedicated MSM pads
+	 */
+	u8 is_gpio;
+	u8 cfg_sts;
+	struct msm_mmc_gpio_data *gpio_data;
+	struct msm_mmc_pad_data *pad_data;
+};
+
+struct msm_mmc_bus_voting_data {
+	struct msm_bus_scale_pdata *use_cases;
+	unsigned int *bw_vecs;
+	unsigned int bw_vecs_size;
+};
+
 struct mmc_platform_data {
 	unsigned int ocr_mask;			/* available voltages */
 	int built_in;				/* built-in device flag */
@@ -23,6 +127,40 @@
 	unsigned int (*status)(struct device *);
 	struct embedded_sdio_data *embedded_sdio;
 	int (*register_status_notify)(void (*callback)(int card_present, void *dev_id), void *dev_id);
+	/*
+	 * XPC controls the maximum current in the
+	 * default speed mode of SDXC card.
+	 */
+	unsigned int xpc_cap;
+	/* Supported UHS-I Modes */
+	unsigned int uhs_caps;
+	void (*sdio_lpm_gpio_setup)(struct device *, unsigned int);
+        unsigned int status_irq;
+	unsigned int status_gpio;
+	/* Indicates the polarity of the GPIO line when card is inserted */
+	bool is_status_gpio_active_low;
+        unsigned int sdiowakeup_irq;
+        unsigned long irq_flags;
+        unsigned long mmc_bus_width;
+        int (*wpswitch) (struct device *);
+	unsigned int msmsdcc_fmin;
+	unsigned int msmsdcc_fmid;
+	unsigned int msmsdcc_fmax;
+	bool nonremovable;
+	bool pclk_src_dfab;
+	unsigned int mpm_sdiowakeup_int;
+	unsigned int wpswitch_gpio;
+	unsigned char wpswitch_polarity;
+	struct msm_mmc_slot_reg_data *vreg_data;
+	int is_sdio_al_client;
+	unsigned int *sup_clk_table;
+	unsigned char sup_clk_cnt;
+	struct msm_mmc_pin_data *pin_data;
+	bool disable_bam;
+	bool disable_runtime_pm;
+	bool disable_cmd23;
+	u32 cpu_dma_latency;
+	struct msm_mmc_bus_voting_data *msm_bus_voting_data;
 };
 
 #endif
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index fcb5757..17b7b31 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -280,6 +280,13 @@
 #define arch_is_coherent()		0
 #endif
 
+/*
+ * Set if the architecture speculatively fetches data into cache.
+ */
+#ifndef arch_has_speculative_dfetch
+#define arch_has_speculative_dfetch()	0
+#endif
+
 #endif
 
 #include <asm-generic/memory_model.h>
diff --git a/arch/arm/include/asm/mmu_writeable.h b/arch/arm/include/asm/mmu_writeable.h
new file mode 100644
index 0000000..96d348c
--- /dev/null
+++ b/arch/arm/include/asm/mmu_writeable.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef _MMU_WRITEABLE_H
+#define _MMU_WRITEABLE_H
+
+#ifdef CONFIG_STRICT_MEMORY_RWX
+void mem_text_writeable_spinlock(unsigned long *flags);
+void mem_text_address_writeable(unsigned long);
+void mem_text_address_restore(void);
+void mem_text_writeable_spinunlock(unsigned long *flags);
+#else
+static inline void mem_text_writeable_spinlock(unsigned long *flags) {};
+static inline void mem_text_address_writeable(unsigned long addr) {};
+static inline void mem_text_address_restore(void) {};
+static inline void mem_text_writeable_spinunlock(unsigned long *flags) {};
+#endif
+
+void mem_text_write_kernel_word(unsigned long *addr, unsigned long word);
+
+#endif
diff --git a/arch/arm/include/asm/mutex.h b/arch/arm/include/asm/mutex.h
index 93226cf..fd3f17e 100644
--- a/arch/arm/include/asm/mutex.h
+++ b/arch/arm/include/asm/mutex.h
@@ -41,6 +41,8 @@
 	__res |= __ex_flag;
 	if (unlikely(__res != 0))
 		fail_fn(count);
+	else
+		smp_rmb();
 }
 
 static inline int
@@ -61,6 +63,9 @@
 	__res |= __ex_flag;
 	if (unlikely(__res != 0))
 		__res = fail_fn(count);
+	else
+		smp_rmb();
+
 	return __res;
 }
 
@@ -74,6 +79,7 @@
 {
 	int __ex_flag, __res, __orig;
 
+	smp_wmb();
 	__asm__ (
 
 		"ldrex	%0, [%3]	\n\t"
@@ -119,6 +125,8 @@
 		: "=&r" (__orig), "=&r" (__res), "=&r" (__ex_flag)
 		: "r" (&count->counter)
 		: "cc", "memory" );
+	if (__orig)
+		smp_rmb();
 
 	return __orig;
 }
diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
index 5838361..afc1ca7 100644
--- a/arch/arm/include/asm/page.h
+++ b/arch/arm/include/asm/page.h
@@ -167,6 +167,11 @@
 extern int pfn_valid(unsigned long);
 #endif
 
+#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
+extern int _early_pfn_valid(unsigned long);
+#define early_pfn_valid(pfn) (_early_pfn_valid(pfn))
+#endif
+
 #include <asm/memory.h>
 
 #endif /* !__ASSEMBLY__ */
diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h
index 00cbe10..2fecc60 100644
--- a/arch/arm/include/asm/perf_event.h
+++ b/arch/arm/include/asm/perf_event.h
@@ -23,6 +23,9 @@
 	ARM_PERF_PMU_ID_CA5,
 	ARM_PERF_PMU_ID_CA15,
 	ARM_PERF_PMU_ID_CA7,
+	ARM_PERF_PMU_ID_SCORPION,
+	ARM_PERF_PMU_ID_SCORPIONMP,
+	ARM_PERF_PMU_ID_KRAIT,
 	ARM_NUM_PMU_IDS,
 };
 
diff --git a/arch/arm/include/asm/perftypes.h b/arch/arm/include/asm/perftypes.h
new file mode 100644
index 0000000..8d21dcd
--- /dev/null
+++ b/arch/arm/include/asm/perftypes.h
@@ -0,0 +1,33 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/*
+**  perftypes.h
+**  DESCRIPTION
+**  ksapi.ko function hooks header file
+*/
+
+#ifndef __PERFTYPES_H__
+#define __PERFTYPES_H__
+
+typedef void (*VPVF)(void);
+typedef void (*VPULF)(unsigned long);
+typedef void (*VPULULF)(unsigned long, unsigned long);
+
+extern VPVF pp_interrupt_out_ptr;
+extern VPVF pp_interrupt_in_ptr;
+extern VPULF pp_process_remove_ptr;
+extern void perf_mon_interrupt_in(void);
+extern void perf_mon_interrupt_out(void);
+
+#endif
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index f66626d..7b6f42a 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -103,16 +103,30 @@
 #define pgprot_stronglyordered(prot) \
 	__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED)
 
+#define pgprot_device(prot) \
+	__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_DEV_NONSHARED)
+
+#define pgprot_writethroughcache(prot) \
+	__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_WRITETHROUGH)
+
+#define pgprot_writebackcache(prot) \
+	__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_WRITEBACK)
+
+#define pgprot_writebackwacache(prot) \
+	__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_WRITEALLOC)
+
 #ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
 #define pgprot_dmacoherent(prot) \
 	__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE | L_PTE_XN)
 #define __HAVE_PHYS_MEM_ACCESS_PROT
+#define COHERENT_IS_NORMAL 1
 struct file;
 extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
 				     unsigned long size, pgprot_t vma_prot);
 #else
 #define pgprot_dmacoherent(prot) \
 	__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED | L_PTE_XN)
+#define COHERENT_IS_NORMAL 0
 #endif
 
 #endif /* __ASSEMBLY__ */
@@ -306,7 +320,7 @@
  * into virtual address `from'
  */
 #define io_remap_pfn_range(vma,from,pfn,size,prot) \
-		remap_pfn_range(vma, from, pfn, size, prot)
+	remap_pfn_range(vma,from,pfn,size,prot)
 
 #define pgtable_cache_init() do { } while (0)
 
diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h
index 90114fa..1e54b58 100644
--- a/arch/arm/include/asm/pmu.h
+++ b/arch/arm/include/asm/pmu.h
@@ -21,6 +21,7 @@
  */
 enum arm_pmu_type {
 	ARM_PMU_DEVICE_CPU	= 0,
+	ARM_PMU_DEVICE_L2	= 1,
 	ARM_NUM_PMU_DEVICES,
 };
 
@@ -108,7 +109,9 @@
 	cpumask_t	active_irqs;
 	const char	*name;
 	irqreturn_t	(*handle_irq)(int irq_num, void *dev);
-	void		(*enable)(struct hw_perf_event *evt, int idx);
+	int     	(*request_pmu_irq)(int irq, irq_handler_t *irq_h);
+	void    	(*free_pmu_irq)(int irq);
+	void		(*enable)(struct hw_perf_event *evt, int idx, int cpu);
 	void		(*disable)(struct hw_perf_event *evt, int idx);
 	int		(*get_event_idx)(struct pmu_hw_events *hw_events,
 					 struct hw_perf_event *hwc);
diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h
index 5ac8d3d..07209d7 100644
--- a/arch/arm/include/asm/processor.h
+++ b/arch/arm/include/asm/processor.h
@@ -29,6 +29,8 @@
 #define STACK_TOP_MAX	TASK_SIZE
 #endif
 
+extern unsigned int boot_reason;
+
 struct debug_info {
 #ifdef CONFIG_HAVE_HW_BREAKPOINT
 	struct perf_event	*hbp[ARM_MAX_HBP_SLOTS];
diff --git a/arch/arm/include/asm/remote_spinlock.h b/arch/arm/include/asm/remote_spinlock.h
new file mode 100644
index 0000000..702b669
--- /dev/null
+++ b/arch/arm/include/asm/remote_spinlock.h
@@ -0,0 +1,18 @@
+/* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __ASM_REMOTE_SPINLOCK_H
+#define __ASM_REMOTE_SPINLOCK_H
+
+#include <mach/remote_spinlock.h>
+
+#endif /* __ASM_REMOTE_SPINLOCK_H */
diff --git a/arch/arm/include/asm/setup.h b/arch/arm/include/asm/setup.h
index 23ebc0c..d1f9709 100644
--- a/arch/arm/include/asm/setup.h
+++ b/arch/arm/include/asm/setup.h
@@ -211,7 +211,8 @@
 	for (iter = 0; iter < (mi)->nr_banks; iter++)
 
 #define bank_pfn_start(bank)	__phys_to_pfn((bank)->start)
-#define bank_pfn_end(bank)	__phys_to_pfn((bank)->start + (bank)->size)
+#define bank_pfn_end(bank)	(__phys_to_pfn((bank)->start) + \
+						__phys_to_pfn((bank)->size))
 #define bank_pfn_size(bank)	((bank)->size >> PAGE_SHIFT)
 #define bank_phys_start(bank)	(bank)->start
 #define bank_phys_end(bank)	((bank)->start + (bank)->size)
@@ -221,6 +222,18 @@
 extern void early_print(const char *str, ...);
 extern void dump_machine_table(void);
 
+/*
+ * Early command line parameters.
+ */
+struct early_params {
+	const char *arg;
+	void (*fn)(char **p);
+};
+
+#define __early_param(name,fn)					\
+static struct early_params __early_##fn __used			\
+__attribute__((__section__(".early_param.init"))) = { name, fn }
+
 #endif  /*  __KERNEL__  */
 
 #endif
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
index 65fa3c8..582c9b3 100644
--- a/arch/arm/include/asm/spinlock.h
+++ b/arch/arm/include/asm/spinlock.h
@@ -58,6 +58,7 @@
 #endif
 }
 
+#ifndef CONFIG_ARM_TICKET_LOCKS
 /*
  * ARMv6 Spin-locking.
  *
@@ -126,6 +127,131 @@
 
 	dsb_sev();
 }
+#else
+/*
+ * ARM Ticket spin-locking
+ *
+ * Ticket locks are conceptually two parts, one indicating the current head of
+ * the queue, and the other indicating the current tail. The lock is acquired
+ * by atomically noting the tail and incrementing it by one (thus adding
+ * ourself to the queue and noting our position), then waiting until the head
+ * becomes equal to the the initial value of the tail.
+ *
+ * Unlocked value: 0
+ * Locked value: now_serving != next_ticket
+ *
+ *   31             17  16    15  14                    0
+ *  +----------------------------------------------------+
+ *  |  now_serving          |     next_ticket            |
+ *  +----------------------------------------------------+
+ */
+
+#define TICKET_SHIFT	16
+#define TICKET_BITS	16
+#define	TICKET_MASK	0xFFFF
+
+#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
+
+static inline void arch_spin_lock(arch_spinlock_t *lock)
+{
+	unsigned long tmp, ticket, next_ticket;
+
+	/* Grab the next ticket and wait for it to be "served" */
+	__asm__ __volatile__(
+"1:	ldrex	%[ticket], [%[lockaddr]]\n"
+"	uadd16	%[next_ticket], %[ticket], %[val1]\n"
+"	strex	%[tmp], %[next_ticket], [%[lockaddr]]\n"
+"	teq	%[tmp], #0\n"
+"	bne	1b\n"
+"	uxth	%[ticket], %[ticket]\n"
+"2:\n"
+#ifdef CONFIG_CPU_32v6K
+"	wfene\n"
+#endif
+"	ldr	%[tmp], [%[lockaddr]]\n"
+"	cmp	%[ticket], %[tmp], lsr #16\n"
+"	bne	2b"
+	: [ticket]"=&r" (ticket), [tmp]"=&r" (tmp), [next_ticket]"=&r" (next_ticket)
+	: [lockaddr]"r" (&lock->lock), [val1]"r" (1)
+	: "cc");
+	smp_mb();
+}
+
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
+{
+	unsigned long tmp, ticket, next_ticket;
+
+	/* Grab lock if now_serving == next_ticket and access is exclusive */
+	__asm__ __volatile__(
+"	ldrex	%[ticket], [%[lockaddr]]\n"
+"	ror	%[tmp], %[ticket], #16\n"
+"	eors	%[tmp], %[tmp], %[ticket]\n"
+"	bne	1f\n"
+"	uadd16	%[next_ticket], %[ticket], %[val1]\n"
+"	strex	%[tmp], %[next_ticket], [%[lockaddr]]\n"
+"1:"
+	: [ticket]"=&r" (ticket), [tmp]"=&r" (tmp),
+	  [next_ticket]"=&r" (next_ticket)
+	: [lockaddr]"r" (&lock->lock), [val1]"r" (1)
+	: "cc");
+	if (!tmp)
+		smp_mb();
+	return !tmp;
+}
+
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
+{
+	unsigned long ticket, tmp;
+
+	smp_mb();
+
+	/* Bump now_serving by 1 */
+	__asm__ __volatile__(
+"1:	ldrex	%[ticket], [%[lockaddr]]\n"
+"	uadd16	%[ticket], %[ticket], %[serving1]\n"
+"	strex	%[tmp], %[ticket], [%[lockaddr]]\n"
+"	teq	%[tmp], #0\n"
+"	bne	1b"
+	: [ticket]"=&r" (ticket), [tmp]"=&r" (tmp)
+	: [lockaddr]"r" (&lock->lock), [serving1]"r" (0x00010000)
+	: "cc");
+	dsb_sev();
+}
+
+static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
+{
+	unsigned long ticket;
+
+	/* Wait for now_serving == next_ticket */
+	__asm__ __volatile__(
+#ifdef CONFIG_CPU_32v6K
+"	cmpne	%[lockaddr], %[lockaddr]\n"
+"1:	wfene\n"
+#else
+"1:\n"
+#endif
+"	ldr	%[ticket], [%[lockaddr]]\n"
+"	eor	%[ticket], %[ticket], %[ticket], lsr #16\n"
+"	uxth	%[ticket], %[ticket]\n"
+"	cmp	%[ticket], #0\n"
+"	bne	1b"
+	: [ticket]"=&r" (ticket)
+	: [lockaddr]"r" (&lock->lock)
+	: "cc");
+}
+
+static inline int arch_spin_is_locked(arch_spinlock_t *lock)
+{
+	unsigned long tmp = ACCESS_ONCE(lock->lock);
+	return (((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK) != 0;
+}
+
+static inline int arch_spin_is_contended(arch_spinlock_t *lock)
+{
+	unsigned long tmp = ACCESS_ONCE(lock->lock);
+	return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1;
+}
+#endif
 
 /*
  * RWLOCKS
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index 85fe61e..d44d33f 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -409,7 +409,7 @@
 	tlb_op(TLB_V6_U_PAGE, "c8, c7, 1", uaddr);
 	tlb_op(TLB_V6_D_PAGE, "c8, c6, 1", uaddr);
 	tlb_op(TLB_V6_I_PAGE, "c8, c5, 1", uaddr);
-#ifdef CONFIG_ARM_ERRATA_720789
+#if defined(CONFIG_ARM_ERRATA_720789) || defined(CONFIG_ARCH_MSM8X60)
 	tlb_op(TLB_V7_UIS_PAGE, "c8, c3, 3", uaddr & PAGE_MASK);
 #else
 	tlb_op(TLB_V7_UIS_PAGE, "c8, c3, 1", uaddr);
@@ -439,7 +439,11 @@
 	tlb_op(TLB_V6_U_PAGE, "c8, c7, 1", kaddr);
 	tlb_op(TLB_V6_D_PAGE, "c8, c6, 1", kaddr);
 	tlb_op(TLB_V6_I_PAGE, "c8, c5, 1", kaddr);
+#ifdef CONFIG_ARCH_MSM8X60
+	tlb_op(TLB_V7_UIS_PAGE, "c8, c3, 3", kaddr);
+#else
 	tlb_op(TLB_V7_UIS_PAGE, "c8, c3, 1", kaddr);
+#endif
 
 	if (tlb_flag(TLB_BARRIER)) {
 		dsb();
diff --git a/arch/arm/include/asm/vfp.h b/arch/arm/include/asm/vfp.h
index f4ab34f..5a1e789 100644
--- a/arch/arm/include/asm/vfp.h
+++ b/arch/arm/include/asm/vfp.h
@@ -21,7 +21,7 @@
 #define FPSID_FORMAT_MASK	(0x3  << FPSID_FORMAT_BIT)
 #define FPSID_NODOUBLE		(1<<20)
 #define FPSID_ARCH_BIT		(16)
-#define FPSID_ARCH_MASK		(0xF  << FPSID_ARCH_BIT)
+#define FPSID_ARCH_MASK		(0x7F  << FPSID_ARCH_BIT)
 #define FPSID_PART_BIT		(8)
 #define FPSID_PART_MASK		(0xFF << FPSID_PART_BIT)
 #define FPSID_VARIANT_BIT	(4)
@@ -82,3 +82,8 @@
 #define VFPOPDESC_UNUSED_BIT	(24)
 #define VFPOPDESC_UNUSED_MASK	(0xFF << VFPOPDESC_UNUSED_BIT)
 #define VFPOPDESC_OPDESC_MASK	(~(VFPOPDESC_LENGTH_MASK | VFPOPDESC_UNUSED_MASK))
+
+#ifndef __ASSEMBLY__
+int vfp_pm_suspend(void);
+void vfp_pm_resume(void);
+#endif